From 6ee9151a8343b54cf37d99cd9a0994018c348f0e Mon Sep 17 00:00:00 2001 From: Jeff Hodges Date: Fri, 20 Sep 2024 12:17:42 -0700 Subject: [PATCH] upgrade sops to v3.9.1 from v3.4.0 This upgrades sops from v3.4.0 to v3.9.1. A variety of corrections and improvements have happened since the sops version we've been shipping on. The most obvious change is the new Go modules package path. A number of deprecated, archived, and unused transitive dependencies were also removed by this upgrade. Seeing autograph stuck on those unmaintained dependencies (like the v1 aws libraries) through the sops dependency was the inspiration for this. New transitive deps were added, per usual. This patch was created by replacing the sops imports with the new ones in main.go, and then running: ``` $ go mod tidy $ go mod vendor ``` --- go.mod | 115 +- go.sum | 567 +- main.go | 4 +- vendor/cloud.google.com/go/.gitignore | 12 + .../.release-please-manifest-individual.json | 18 + .../.release-please-manifest-submodules.json | 149 + .../go/.release-please-manifest.json | 3 + vendor/cloud.google.com/go/CHANGES.md | 2651 ++ .../go/CODE_OF_CONDUCT.md} | 48 +- vendor/cloud.google.com/go/CONTRIBUTING.md | 364 + vendor/cloud.google.com/go/README.md | 86 + vendor/cloud.google.com/go/RELEASING.md | 141 + vendor/cloud.google.com/go/SECURITY.md | 7 + vendor/cloud.google.com/go/auth/CHANGES.md | 291 + .../go/auth}/LICENSE | 0 vendor/cloud.google.com/go/auth/README.md | 40 + vendor/cloud.google.com/go/auth/auth.go | 614 + .../go/auth/credentials/compute.go | 86 + .../go/auth/credentials/detect.go | 262 + .../go/auth/credentials/doc.go | 45 + .../go/auth/credentials/filetypes.go | 225 + .../internal/externalaccount/aws_provider.go | 520 + .../externalaccount/executable_provider.go | 284 + .../externalaccount/externalaccount.go | 407 + .../internal/externalaccount/file_provider.go | 78 + .../internal/externalaccount/info.go | 74 + .../externalaccount/programmatic_provider.go | 30 + .../internal/externalaccount/url_provider.go | 88 + .../internal/externalaccount/x509_provider.go | 63 + .../externalaccountuser.go | 110 + .../go/auth/credentials/internal/gdch/gdch.go | 184 + .../internal/impersonate/impersonate.go | 146 + .../internal/stsexchange/sts_exchange.go | 161 + .../go/auth/credentials/selfsignedjwt.go | 85 + .../go/auth/grpctransport/dial_socketopt.go | 62 + .../go/auth/grpctransport/directpath.go | 126 + .../go/auth/grpctransport/grpctransport.go | 400 + .../go/auth/grpctransport/pool.go | 119 + .../go/auth/httptransport/httptransport.go | 226 + .../go/auth/httptransport/trace.go | 93 + .../go/auth/httptransport/transport.go | 231 + .../go/auth/internal/compute/compute.go | 66 + .../go/auth/internal/compute/manufacturer.go | 22 + .../internal/compute/manufacturer_linux.go | 23 + .../internal/compute/manufacturer_windows.go | 46 + .../go/auth/internal/credsfile/credsfile.go | 107 + .../go/auth/internal/credsfile/filetype.go | 157 + .../go/auth/internal/credsfile/parse.go | 98 + .../go/auth/internal/internal.go | 216 + .../go/auth/internal/jwt/jwt.go | 171 + .../go/auth/internal/transport/cba.go | 356 + .../internal/transport/cert/default_cert.go | 65 + .../transport/cert/enterprise_cert.go | 56 + .../transport/cert/secureconnect_cert.go | 124 + .../internal/transport/cert/workload_cert.go | 117 + .../go/auth/internal/transport/s2a.go | 134 + .../go/auth/internal/transport/transport.go | 105 + .../go/auth/oauth2adapt/CHANGES.md | 54 + .../go/auth/oauth2adapt/LICENSE} | 0 .../go/auth/oauth2adapt/oauth2adapt.go | 164 + .../cloud.google.com/go/auth/threelegged.go | 368 + .../go/compute/metadata/CHANGES.md | 59 + .../go/compute/metadata}/LICENSE | 2 +- .../go/compute/metadata/README.md | 27 + .../go/compute/metadata/metadata.go | 528 +- .../go/compute/metadata/retry.go | 114 + .../go/compute/metadata/retry_linux.go} | 25 +- .../go/compute/metadata/syscheck.go | 26 + .../go/compute/metadata/syscheck_linux.go} | 23 +- .../go/compute/metadata/syscheck_windows.go | 38 + vendor/cloud.google.com/go/debug.md | 398 + vendor/cloud.google.com/go/doc.go | 294 + vendor/cloud.google.com/go/go.work | 181 + vendor/cloud.google.com/go/go.work.sum | 91 + vendor/cloud.google.com/go/iam/CHANGES.md | 224 + vendor/cloud.google.com/go/iam/LICENSE | 202 + vendor/cloud.google.com/go/iam/README.md | 40 + .../go/iam/apiv1/iampb/iam_policy.pb.go | 672 + .../go/iam/apiv1/iampb/options.pb.go | 187 + .../go/iam/apiv1/iampb/policy.pb.go | 1180 + vendor/cloud.google.com/go/iam/iam.go | 387 + .../go/internal/.repo-metadata-full.json | 2982 ++ vendor/cloud.google.com/go/internal/README.md | 29 + .../cloud.google.com/go/internal/annotate.go | 55 + .../cloud.google.com/go/internal/gen_info.sh | 46 + .../go/internal/optional/optional.go | 108 + vendor/cloud.google.com/go/internal/retry.go | 76 + .../go/internal/trace/trace.go | 275 + .../go/internal/version/update_version.sh} | 20 +- .../go/internal/version/version.go | 71 + vendor/cloud.google.com/go/kms/LICENSE | 202 + .../go/kms/apiv1/autokey_admin_client.go | 1271 + .../go/kms/apiv1/autokey_client.go | 1405 + .../go/kms/apiv1/auxiliary.go | 421 + .../go/kms/apiv1/auxiliary_go123.go | 69 + vendor/cloud.google.com/go/kms/apiv1/doc.go | 128 + .../go/kms/apiv1/ekm_client.go | 1723 + .../go/kms/apiv1/gapic_metadata.json | 715 + vendor/cloud.google.com/go/kms/apiv1/iam.go | 40 + .../go/kms/apiv1/key_management_client.go | 4532 +++ .../go/kms/apiv1/kmspb/autokey.pb.go | 872 + .../go/kms/apiv1/kmspb/autokey_admin.pb.go | 815 + .../go/kms/apiv1/kmspb/ekm_service.pb.go | 1965 + .../go/kms/apiv1/kmspb/resources.pb.go | 2902 ++ .../go/kms/apiv1/kmspb/service.pb.go | 7283 ++++ .../go/kms/apiv1/version.go} | 17 +- .../go/kms/internal/version.go | 18 + .../go/longrunning/CHANGES.md | 138 + .../cloud.google.com/go/longrunning/LICENSE | 202 + .../cloud.google.com/go/longrunning/README.md | 26 + .../go/longrunning/autogen/auxiliary.go | 69 + .../go/longrunning/autogen/auxiliary_go123.go | 32 + .../go/longrunning/autogen/doc.go | 117 + .../go/longrunning/autogen/from_conn.go | 30 + .../longrunning/autogen/gapic_metadata.json | 73 + .../go/longrunning/autogen/info.go | 24 + .../autogen/longrunningpb/operations.pb.go | 1230 + .../longrunning/autogen/operations_client.go | 875 + .../go/longrunning/longrunning.go | 182 + .../go/longrunning/tidyfix.go | 23 + vendor/cloud.google.com/go/migration.md | 50 + .../go/release-please-config-individual.json | 60 + ...elease-please-config-yoshi-submodules.json | 451 + .../go/release-please-config.json | 11 + vendor/cloud.google.com/go/storage/CHANGES.md | 602 + vendor/cloud.google.com/go/storage/LICENSE | 202 + vendor/cloud.google.com/go/storage/README.md | 32 + vendor/cloud.google.com/go/storage/acl.go | 345 + vendor/cloud.google.com/go/storage/bucket.go | 2388 ++ vendor/cloud.google.com/go/storage/client.go | 352 + vendor/cloud.google.com/go/storage/copy.go | 233 + vendor/cloud.google.com/go/storage/doc.go | 379 + .../go/storage/emulator_test.sh | 92 + .../go/storage/grpc_client.go | 2289 ++ vendor/cloud.google.com/go/storage/hmac.go | 356 + .../go/storage/http_client.go | 1486 + vendor/cloud.google.com/go/storage/iam.go | 133 + .../go/storage/internal/apiv2/auxiliary.go | 210 + .../go/storage/internal/apiv2/doc.go | 152 + .../internal/apiv2/gapic_metadata.json | 178 + .../storage/internal/apiv2/storage_client.go | 1919 + .../internal/apiv2/storagepb/storage.pb.go | 11796 ++++++ .../go/storage/internal/apiv2/version.go | 23 + .../go/storage/internal/version.go | 18 + vendor/cloud.google.com/go/storage/invoke.go | 157 + .../go/storage/notifications.go | 200 + vendor/cloud.google.com/go/storage/option.go | 80 + .../go/storage/post_policy_v4.go | 443 + vendor/cloud.google.com/go/storage/reader.go | 286 + vendor/cloud.google.com/go/storage/storage.go | 2450 ++ .../go/storage/storage.replay | 30067 ++++++++++++++++ vendor/cloud.google.com/go/storage/writer.go | 282 + vendor/cloud.google.com/go/testing.md | 237 + vendor/filippo.io/age/.gitattributes | 2 + vendor/filippo.io/age/AUTHORS | 6 + vendor/filippo.io/age/LICENSE | 27 + vendor/filippo.io/age/README.md | 271 + vendor/filippo.io/age/age.go | 271 + vendor/filippo.io/age/armor/armor.go | 187 + .../filippo.io/age/internal/bech32/bech32.go | 173 + .../filippo.io/age/internal/format/format.go | 311 + .../filippo.io/age/internal/stream/stream.go | 231 + vendor/filippo.io/age/parse.go | 84 + vendor/filippo.io/age/primitives.go | 72 + vendor/filippo.io/age/scrypt.go | 206 + vendor/filippo.io/age/x25519.go | 208 + .../github.com/Azure/azure-sdk-for-go/NOTICE | 5 - .../azure-sdk-for-go/sdk/azcore/CHANGELOG.md | 817 + .../azure-sdk-for-go/sdk/azcore/LICENSE.txt | 21 + .../azure-sdk-for-go/sdk/azcore/README.md | 39 + .../internal/resource/resource_identifier.go | 224 + .../arm/internal/resource/resource_type.go | 114 + .../sdk/azcore/arm/policy/policy.go | 108 + .../sdk/azcore/arm/runtime/pipeline.go | 70 + .../azcore/arm/runtime/policy_bearer_token.go | 146 + .../azcore/arm/runtime/policy_register_rp.go | 322 + .../arm/runtime/policy_trace_namespace.go | 30 + .../sdk/azcore/arm/runtime/runtime.go | 24 + .../Azure/azure-sdk-for-go/sdk/azcore/ci.yml | 29 + .../sdk/azcore/cloud/cloud.go | 44 + .../azure-sdk-for-go/sdk/azcore/cloud/doc.go | 53 + .../Azure/azure-sdk-for-go/sdk/azcore/core.go | 173 + .../Azure/azure-sdk-for-go/sdk/azcore/doc.go | 264 + .../azure-sdk-for-go/sdk/azcore/errors.go | 14 + .../Azure/azure-sdk-for-go/sdk/azcore/etag.go | 57 + .../sdk/azcore/internal/exported/exported.go | 175 + .../sdk/azcore/internal/exported/pipeline.go | 77 + .../sdk/azcore/internal/exported/request.go | 260 + .../internal/exported/response_error.go | 167 + .../sdk/azcore/internal/log/log.go | 50 + .../azcore/internal/pollers/async/async.go | 159 + .../sdk/azcore/internal/pollers/body/body.go | 135 + .../sdk/azcore/internal/pollers/fake/fake.go | 133 + .../sdk/azcore/internal/pollers/loc/loc.go | 123 + .../sdk/azcore/internal/pollers/op/op.go | 150 + .../sdk/azcore/internal/pollers/poller.go | 24 + .../sdk/azcore/internal/pollers/util.go | 212 + .../sdk/azcore/internal/shared/constants.go | 44 + .../sdk/azcore/internal/shared/shared.go | 149 + .../azure-sdk-for-go/sdk/azcore/log/doc.go | 10 + .../azure-sdk-for-go/sdk/azcore/log/log.go | 55 + .../azure-sdk-for-go/sdk/azcore/policy/doc.go | 10 + .../sdk/azcore/policy/policy.go | 197 + .../sdk/azcore/runtime/doc.go | 10 + .../sdk/azcore/runtime/errors.go | 27 + .../sdk/azcore/runtime/pager.go | 137 + .../sdk/azcore/runtime/pipeline.go | 94 + .../sdk/azcore/runtime/policy_api_version.go | 75 + .../sdk/azcore/runtime/policy_bearer_token.go | 123 + .../azcore/runtime/policy_body_download.go | 72 + .../sdk/azcore/runtime/policy_http_header.go | 40 + .../sdk/azcore/runtime/policy_http_trace.go | 150 + .../azcore/runtime/policy_include_response.go | 35 + .../azcore/runtime/policy_key_credential.go | 64 + .../sdk/azcore/runtime/policy_logging.go | 264 + .../sdk/azcore/runtime/policy_request_id.go | 34 + .../sdk/azcore/runtime/policy_retry.go | 256 + .../azcore/runtime/policy_sas_credential.go | 55 + .../sdk/azcore/runtime/policy_telemetry.go | 83 + .../sdk/azcore/runtime/poller.go | 389 + .../sdk/azcore/runtime/request.go | 281 + .../sdk/azcore/runtime/response.go | 109 + .../runtime/transport_default_dialer_other.go | 15 + .../runtime/transport_default_dialer_wasm.go | 15 + .../runtime/transport_default_http_client.go | 48 + .../sdk/azcore/streaming/doc.go | 9 + .../sdk/azcore/streaming/progress.go | 89 + .../azure-sdk-for-go/sdk/azcore/to/doc.go | 9 + .../azure-sdk-for-go/sdk/azcore/to/to.go | 21 + .../sdk/azcore/tracing/constants.go | 41 + .../sdk/azcore/tracing/tracing.go | 191 + .../sdk/azidentity/.gitignore | 4 + .../sdk/azidentity/CHANGELOG.md | 575 + .../sdk/azidentity/LICENSE.txt | 21 + .../sdk/azidentity/MIGRATION.md | 307 + .../azure-sdk-for-go/sdk/azidentity/README.md | 258 + .../sdk/azidentity/TOKEN_CACHING.MD | 71 + .../sdk/azidentity/TROUBLESHOOTING.md | 241 + .../sdk/azidentity/assets.json | 6 + .../sdk/azidentity/authentication_record.go | 95 + .../sdk/azidentity/azidentity.go | 190 + .../sdk/azidentity/azure_cli_credential.go | 190 + .../azure_developer_cli_credential.go | 169 + .../azidentity/azure_pipelines_credential.go | 140 + .../azidentity/chained_token_credential.go | 138 + .../azure-sdk-for-go/sdk/azidentity/ci.yml | 46 + .../azidentity/client_assertion_credential.go | 85 + .../client_certificate_credential.go | 174 + .../azidentity/client_secret_credential.go | 75 + .../sdk/azidentity/confidential_client.go | 184 + .../azidentity/default_azure_credential.go | 165 + .../azidentity/developer_credential_util.go | 38 + .../sdk/azidentity/device_code_credential.go | 138 + .../sdk/azidentity/environment_credential.go | 167 + .../azure-sdk-for-go/sdk/azidentity/errors.go | 170 + .../azure-sdk-for-go/sdk/azidentity/go.work | 6 + .../sdk/azidentity/go.work.sum | 60 + .../interactive_browser_credential.go | 118 + .../sdk/azidentity/internal/exported.go | 18 + .../sdk/azidentity/internal/internal.go | 31 + .../sdk/azidentity/logging.go | 14 + .../azidentity/managed-identity-matrix.json | 17 + .../sdk/azidentity/managed_identity_client.go | 501 + .../azidentity/managed_identity_credential.go | 128 + .../sdk/azidentity/on_behalf_of_credential.go | 113 + .../sdk/azidentity/public_client.go | 273 + .../sdk/azidentity/test-resources-post.ps1 | 112 + .../sdk/azidentity/test-resources-pre.ps1 | 44 + .../sdk/azidentity/test-resources.bicep | 219 + .../username_password_credential.go | 90 + .../sdk/azidentity/version.go | 18 + .../sdk/azidentity/workload_identity.go | 131 + .../azure-sdk-for-go/sdk/internal/LICENSE.txt | 21 + .../sdk/internal/diag/diag.go | 51 + .../azure-sdk-for-go/sdk/internal/diag/doc.go | 7 + .../sdk/internal/errorinfo/doc.go | 7 + .../sdk/internal/errorinfo/errorinfo.go | 46 + .../sdk/internal/exported/exported.go | 129 + .../azure-sdk-for-go/sdk/internal/log/doc.go | 7 + .../azure-sdk-for-go/sdk/internal/log/log.go | 104 + .../sdk/internal/poller/util.go | 155 + .../sdk/internal/temporal/resource.go | 123 + .../azure-sdk-for-go/sdk/internal/uuid/doc.go | 7 + .../sdk/internal/uuid/uuid.go | 76 + .../sdk/security/keyvault/azkeys/CHANGELOG.md | 168 + .../sdk/security/keyvault/azkeys/LICENSE.txt | 21 + .../sdk/security/keyvault/azkeys/README.md | 147 + .../keyvault/azkeys/TROUBLESHOOTING.md | 4 + .../sdk/security/keyvault/azkeys/assets.json | 6 + .../sdk/security/keyvault/azkeys/autorest.md | 252 + .../sdk/security/keyvault/azkeys/build.go | 10 + .../sdk/security/keyvault/azkeys/ci.yml | 40 + .../sdk/security/keyvault/azkeys/client.go | 1418 + .../sdk/security/keyvault/azkeys/constants.go | 211 + .../security/keyvault/azkeys/custom_client.go | 68 + .../sdk/security/keyvault/azkeys/models.go | 428 + .../security/keyvault/azkeys/models_serde.go | 1123 + .../sdk/security/keyvault/azkeys/options.go | 131 + .../keyvault/azkeys/platform-matrix.json | 17 + .../keyvault/azkeys/response_types.go | 152 + .../keyvault/azkeys/test-resources-post.ps1 | 118 + .../keyvault/azkeys/test-resources.json | 296 + .../sdk/security/keyvault/azkeys/time_unix.go | 61 + .../sdk/security/keyvault/azkeys/version.go | 12 + .../security/keyvault/internal/CHANGELOG.md | 75 + .../security/keyvault/internal/LICENSE.txt | 21 + .../sdk/security/keyvault/internal/README.md | 21 + .../keyvault/internal/challenge_policy.go | 156 + .../keyvault/internal/ci.keyvault.yml | 28 + .../security/keyvault/internal/constants.go | 11 + .../sdk/security/keyvault/internal/doc.go | 7 + .../sdk/security/keyvault/internal/parse.go | 37 + .../keyvault/2016-10-01/keyvault/client.go | 6307 ---- .../keyvault/2016-10-01/keyvault/models.go | 2883 -- .../keyvault/2016-10-01/keyvault/version.go | 30 - .../Azure/go-autorest/autorest/LICENSE | 191 - .../Azure/go-autorest/autorest/adal/README.md | 292 - .../Azure/go-autorest/autorest/adal/config.go | 151 - .../go-autorest/autorest/adal/devicetoken.go | 269 - .../autorest/adal/go_mod_tidy_hack.go | 24 - .../go-autorest/autorest/adal/persist.go | 73 - .../Azure/go-autorest/autorest/adal/sender.go | 95 - .../Azure/go-autorest/autorest/adal/token.go | 1112 - .../go-autorest/autorest/adal/version.go | 45 - .../go-autorest/autorest/authorization.go | 336 - .../Azure/go-autorest/autorest/autorest.go | 150 - .../Azure/go-autorest/autorest/azure/async.go | 924 - .../go-autorest/autorest/azure/auth/auth.go | 737 - .../autorest/azure/auth/go_mod_tidy_hack.go | 24 - .../Azure/go-autorest/autorest/azure/azure.go | 326 - .../go-autorest/autorest/azure/cli/LICENSE | 191 - .../autorest/azure/cli/go_mod_tidy_hack.go | 24 - .../go-autorest/autorest/azure/cli/profile.go | 79 - .../go-autorest/autorest/azure/cli/token.go | 170 - .../autorest/azure/environments.go | 244 - .../autorest/azure/metadata_environment.go | 245 - .../Azure/go-autorest/autorest/azure/rp.go | 200 - .../Azure/go-autorest/autorest/client.go | 300 - .../Azure/go-autorest/autorest/date/LICENSE | 191 - .../Azure/go-autorest/autorest/date/date.go | 96 - .../autorest/date/go_mod_tidy_hack.go | 24 - .../Azure/go-autorest/autorest/date/time.go | 103 - .../go-autorest/autorest/date/timerfc1123.go | 100 - .../go-autorest/autorest/date/unixtime.go | 123 - .../go-autorest/autorest/date/utility.go | 25 - .../Azure/go-autorest/autorest/error.go | 98 - .../Azure/go-autorest/autorest/preparer.go | 550 - .../Azure/go-autorest/autorest/responder.go | 269 - .../go-autorest/autorest/retriablerequest.go | 52 - .../autorest/retriablerequest_1.7.go | 54 - .../autorest/retriablerequest_1.8.go | 66 - .../Azure/go-autorest/autorest/sender.go | 407 - .../Azure/go-autorest/autorest/to/LICENSE | 191 - .../Azure/go-autorest/autorest/to/convert.go | 152 - .../autorest/to/go_mod_tidy_hack.go | 24 - .../Azure/go-autorest/autorest/utility.go | 228 - .../go-autorest/autorest/validation/LICENSE | 191 - .../go-autorest/autorest/validation/error.go | 48 - .../autorest/validation/go_mod_tidy_hack.go | 24 - .../autorest/validation/validation.go | 400 - .../Azure/go-autorest/autorest/version.go | 41 - .../Azure/go-autorest/logger/LICENSE | 191 - .../Azure/go-autorest/logger/logger.go | 328 - .../Azure/go-autorest/tracing/LICENSE | 191 - .../Azure/go-autorest/tracing/tracing.go | 67 - .../LICENSE | 21 + .../apps/cache/cache.go | 54 + .../apps/confidential/confidential.go | 719 + .../apps/errors/error_design.md | 111 + .../apps/errors/errors.go | 89 + .../apps/internal/base/base.go | 477 + .../internal/base/internal/storage/items.go | 213 + .../internal/storage/partitioned_storage.go | 442 + .../internal/base/internal/storage/storage.go | 583 + .../apps/internal/exported/exported.go | 34 + .../apps/internal/json/design.md | 140 + .../apps/internal/json/json.go | 184 + .../apps/internal/json/mapslice.go | 333 + .../apps/internal/json/marshal.go | 346 + .../apps/internal/json/struct.go | 290 + .../apps/internal/json/types/time/time.go | 70 + .../apps/internal/local/server.go | 177 + .../apps/internal/oauth/oauth.go | 354 + .../oauth/ops/accesstokens/accesstokens.go | 457 + .../oauth/ops/accesstokens/apptype_string.go | 25 + .../internal/oauth/ops/accesstokens/tokens.go | 339 + .../internal/oauth/ops/authority/authority.go | 589 + .../ops/authority/authorizetype_string.go | 30 + .../internal/oauth/ops/internal/comm/comm.go | 320 + .../oauth/ops/internal/comm/compress.go | 33 + .../oauth/ops/internal/grant/grant.go | 17 + .../apps/internal/oauth/ops/ops.go | 56 + .../ops/wstrust/defs/endpointtype_string.go | 25 + .../wstrust/defs/mex_document_definitions.go | 394 + .../defs/saml_assertion_definitions.go | 230 + .../oauth/ops/wstrust/defs/version_string.go | 25 + .../ops/wstrust/defs/wstrust_endpoint.go | 199 + .../ops/wstrust/defs/wstrust_mex_document.go | 159 + .../internal/oauth/ops/wstrust/wstrust.go | 136 + .../apps/internal/oauth/resolvers.go | 149 + .../apps/internal/options/options.go | 52 + .../apps/internal/shared/shared.go | 72 + .../apps/internal/version/version.go | 8 + .../apps/public/public.go | 756 + .../ProtonMail/go-crypto}/AUTHORS | 2 +- .../ProtonMail/go-crypto}/CONTRIBUTORS | 2 +- .../go-crypto}/LICENSE | 0 .../github.com/ProtonMail/go-crypto/PATENTS | 22 + .../go-crypto/bitcurves/bitcurve.go | 381 + .../go-crypto/brainpool/brainpool.go | 134 + .../ProtonMail/go-crypto/brainpool/rcurve.go | 83 + .../ProtonMail/go-crypto/eax/eax.go | 162 + .../go-crypto/eax/eax_test_vectors.go | 58 + .../go-crypto/eax/random_vectors.go | 131 + .../go-crypto/internal/byteutil/byteutil.go | 90 + .../ProtonMail/go-crypto/ocb/ocb.go | 318 + .../go-crypto/ocb/random_vectors.go | 136 + .../ocb/rfc7253_test_vectors_suite_a.go | 78 + .../ocb/rfc7253_test_vectors_suite_b.go | 25 + .../go-crypto/openpgp/aes/keywrap/keywrap.go | 153 + .../go-crypto}/openpgp/armor/armor.go | 85 +- .../go-crypto}/openpgp/armor/encode.go | 77 +- .../go-crypto}/openpgp/canonical_text.go | 30 +- .../ProtonMail/go-crypto/openpgp/ecdh/ecdh.go | 287 + .../go-crypto/openpgp/ecdsa/ecdsa.go | 80 + .../go-crypto/openpgp/ed25519/ed25519.go | 115 + .../go-crypto/openpgp/ed448/ed448.go | 119 + .../go-crypto/openpgp/eddsa/eddsa.go | 91 + .../go-crypto}/openpgp/elgamal/elgamal.go | 8 +- .../go-crypto/openpgp/errors/errors.go | 145 + .../go-crypto/openpgp/forwarding.go | 163 + .../ProtonMail/go-crypto/openpgp/hash.go | 24 + .../openpgp/internal/algorithm/aead.go | 65 + .../openpgp/internal/algorithm/cipher.go | 97 + .../openpgp/internal/algorithm/hash.go | 143 + .../openpgp/internal/ecc/curve25519.go | 170 + .../internal/ecc/curve25519/curve25519.go | 122 + .../internal/ecc/curve25519/field/fe.go | 416 + .../internal/ecc/curve25519/field/fe_amd64.go | 13 + .../internal/ecc/curve25519/field/fe_amd64.s | 379 + .../ecc/curve25519/field/fe_amd64_noasm.go | 12 + .../internal/ecc/curve25519/field/fe_arm64.go | 16 + .../internal/ecc/curve25519/field/fe_arm64.s | 43 + .../ecc/curve25519/field/fe_arm64_noasm.go | 12 + .../ecc/curve25519/field/fe_generic.go | 264 + .../openpgp/internal/ecc/curve_info.go | 143 + .../go-crypto/openpgp/internal/ecc/curves.go | 48 + .../go-crypto/openpgp/internal/ecc/ed25519.go | 120 + .../go-crypto/openpgp/internal/ecc/ed448.go | 119 + .../go-crypto/openpgp/internal/ecc/generic.go | 149 + .../go-crypto/openpgp/internal/ecc/x448.go | 107 + .../openpgp/internal/encoding/encoding.go | 27 + .../openpgp/internal/encoding/mpi.go | 91 + .../openpgp/internal/encoding/oid.go | 88 + .../internal/encoding/short_byte_string.go | 50 + .../go-crypto/openpgp/key_generation.go | 458 + .../ProtonMail/go-crypto/openpgp/keys.go | 915 + .../go-crypto/openpgp/keys_test_data.go | 538 + .../go-crypto/openpgp/packet/aead_config.go | 67 + .../go-crypto/openpgp/packet/aead_crypter.go | 270 + .../openpgp/packet/aead_encrypted.go | 96 + .../go-crypto}/openpgp/packet/compressed.go | 48 +- .../go-crypto/openpgp/packet/config.go | 398 + .../go-crypto/openpgp/packet/config_v5.go | 7 + .../go-crypto/openpgp/packet/encrypted_key.go | 664 + .../go-crypto/openpgp/packet/forwarding.go | 36 + .../go-crypto}/openpgp/packet/literal.go | 10 +- .../go-crypto/openpgp/packet/marker.go | 33 + .../go-crypto/openpgp/packet/notation.go | 29 + .../go-crypto}/openpgp/packet/ocfb.go | 8 +- .../openpgp/packet/one_pass_signature.go | 157 + .../go-crypto}/openpgp/packet/opaque.go | 17 +- .../go-crypto/openpgp/packet/packet.go | 678 + .../openpgp/packet/packet_sequence.go | 222 + .../openpgp/packet/packet_unsupported.go | 24 + .../go-crypto/openpgp/packet/padding.go | 26 + .../go-crypto/openpgp/packet/private_key.go | 1282 + .../openpgp/packet/private_key_test_data.go | 12 + .../go-crypto/openpgp/packet/public_key.go | 1277 + .../openpgp/packet/public_key_test_data.go | 24 + .../go-crypto/openpgp/packet/reader.go | 209 + .../go-crypto/openpgp/packet/recipient.go | 15 + .../go-crypto/openpgp/packet/signature.go | 1530 + .../openpgp/packet/symmetric_key_encrypted.go | 319 + .../openpgp/packet/symmetrically_encrypted.go | 90 + .../packet/symmetrically_encrypted_aead.go | 157 + .../packet/symmetrically_encrypted_mdc.go} | 118 +- .../openpgp/packet/userattribute.go | 16 +- .../go-crypto}/openpgp/packet/userid.go | 7 + .../ProtonMail/go-crypto/openpgp/read.go | 619 + .../go-crypto/openpgp/read_write_test_data.go | 475 + .../ProtonMail/go-crypto/openpgp/s2k/s2k.go | 414 + .../go-crypto/openpgp/s2k/s2k_cache.go | 26 + .../go-crypto/openpgp/s2k/s2k_config.go | 129 + .../go-crypto/openpgp/symmetric/aead.go | 76 + .../go-crypto/openpgp/symmetric/hmac.go | 109 + .../ProtonMail/go-crypto/openpgp/write.go | 620 + .../go-crypto/openpgp/x25519/x25519.go | 221 + .../ProtonMail/go-crypto/openpgp/x448/x448.go | 229 + .../aws-sdk-go-v2/service/kms/CHANGELOG.md | 574 + .../aws/aws-sdk-go-v2/service/kms/LICENSE.txt | 202 + .../aws-sdk-go-v2/service/kms/api_client.go | 911 + .../service/kms/api_op_CancelKeyDeletion.go | 191 + .../kms/api_op_ConnectCustomKeyStore.go | 245 + .../service/kms/api_op_CreateAlias.go | 244 + .../kms/api_op_CreateCustomKeyStore.go | 393 + .../service/kms/api_op_CreateGrant.go | 351 + .../service/kms/api_op_CreateKey.go | 599 + .../service/kms/api_op_Decrypt.go | 362 + .../service/kms/api_op_DeleteAlias.go | 194 + .../kms/api_op_DeleteCustomKeyStore.go | 211 + .../kms/api_op_DeleteImportedKeyMaterial.go | 193 + .../service/kms/api_op_DeriveSharedSecret.go | 364 + .../kms/api_op_DescribeCustomKeyStores.go | 338 + .../service/kms/api_op_DescribeKey.go | 257 + .../service/kms/api_op_DisableKey.go | 185 + .../service/kms/api_op_DisableKeyRotation.go | 215 + .../kms/api_op_DisconnectCustomKeyStore.go | 200 + .../service/kms/api_op_EnableKey.go | 182 + .../service/kms/api_op_EnableKeyRotation.go | 254 + .../service/kms/api_op_Encrypt.go | 322 + .../service/kms/api_op_GenerateDataKey.go | 377 + .../service/kms/api_op_GenerateDataKeyPair.go | 375 + ..._op_GenerateDataKeyPairWithoutPlaintext.go | 302 + .../api_op_GenerateDataKeyWithoutPlaintext.go | 302 + .../service/kms/api_op_GenerateMac.go | 247 + .../service/kms/api_op_GenerateRandom.go | 232 + .../service/kms/api_op_GetKeyPolicy.go | 189 + .../kms/api_op_GetKeyRotationStatus.go | 258 + .../kms/api_op_GetParametersForImport.go | 306 + .../service/kms/api_op_GetPublicKey.go | 297 + .../service/kms/api_op_ImportKeyMaterial.go | 321 + .../service/kms/api_op_ListAliases.go | 326 + .../service/kms/api_op_ListGrants.go | 335 + .../service/kms/api_op_ListKeyPolicies.go | 312 + .../service/kms/api_op_ListKeyRotations.go | 318 + .../service/kms/api_op_ListKeys.go | 291 + .../service/kms/api_op_ListResourceTags.go | 326 + .../service/kms/api_op_ListRetirableGrants.go | 332 + .../service/kms/api_op_PutKeyPolicy.go | 244 + .../service/kms/api_op_ReEncrypt.go | 401 + .../service/kms/api_op_ReplicateKey.go | 398 + .../service/kms/api_op_RetireGrant.go | 213 + .../service/kms/api_op_RevokeGrant.go | 215 + .../service/kms/api_op_RotateKeyOnDemand.go | 237 + .../service/kms/api_op_ScheduleKeyDeletion.go | 268 + .../aws-sdk-go-v2/service/kms/api_op_Sign.go | 325 + .../service/kms/api_op_TagResource.go | 226 + .../service/kms/api_op_UntagResource.go | 209 + .../service/kms/api_op_UpdateAlias.go | 240 + .../kms/api_op_UpdateCustomKeyStore.go | 350 + .../kms/api_op_UpdateKeyDescription.go | 193 + .../service/kms/api_op_UpdatePrimaryRegion.go | 248 + .../service/kms/api_op_Verify.go | 313 + .../service/kms/api_op_VerifyMac.go | 246 + .../aws/aws-sdk-go-v2/service/kms/auth.go | 313 + .../service/kms/deserializers.go | 13880 +++++++ .../aws/aws-sdk-go-v2/service/kms/doc.go | 97 + .../aws-sdk-go-v2/service/kms/endpoints.go | 537 + .../aws-sdk-go-v2/service/kms/generated.json | 85 + .../service/kms/go_module_metadata.go | 6 + .../kms/internal/endpoints/endpoints.go | 978 + .../aws/aws-sdk-go-v2/service/kms/options.go | 232 + .../aws-sdk-go-v2/service/kms/serializers.go | 4827 +++ .../aws-sdk-go-v2/service/kms/types/enums.go | 635 + .../aws-sdk-go-v2/service/kms/types/errors.go | 1474 + .../aws-sdk-go-v2/service/kms/types/types.go | 691 + .../aws-sdk-go-v2/service/kms/validators.go | 2050 ++ vendor/github.com/aws/aws-sdk-go/NOTICE.txt | 3 - .../aws/aws-sdk-go/aws/awserr/error.go | 164 - .../aws/aws-sdk-go/aws/awserr/types.go | 221 - .../aws/aws-sdk-go/aws/awsutil/copy.go | 108 - .../aws/aws-sdk-go/aws/awsutil/equal.go | 27 - .../aws/aws-sdk-go/aws/awsutil/path_value.go | 221 - .../aws/aws-sdk-go/aws/awsutil/prettify.go | 123 - .../aws-sdk-go/aws/awsutil/string_value.go | 90 - .../aws/aws-sdk-go/aws/client/client.go | 94 - .../aws-sdk-go/aws/client/default_retryer.go | 177 - .../aws/aws-sdk-go/aws/client/logger.go | 206 - .../aws/client/metadata/client_info.go | 15 - .../aws-sdk-go/aws/client/no_op_retryer.go | 28 - .../github.com/aws/aws-sdk-go/aws/config.go | 628 - .../aws/aws-sdk-go/aws/context_1_5.go | 38 - .../aws/aws-sdk-go/aws/context_1_9.go | 12 - .../aws-sdk-go/aws/context_background_1_5.go | 23 - .../aws-sdk-go/aws/context_background_1_7.go | 21 - .../aws/aws-sdk-go/aws/context_sleep.go | 24 - .../aws/aws-sdk-go/aws/convert_types.go | 918 - .../aws-sdk-go/aws/corehandlers/handlers.go | 232 - .../aws/corehandlers/param_validator.go | 17 - .../aws-sdk-go/aws/corehandlers/user_agent.go | 37 - .../aws/credentials/chain_provider.go | 100 - .../credentials/context_background_go1.5.go | 23 - .../credentials/context_background_go1.7.go | 21 - .../aws/credentials/context_go1.5.go | 40 - .../aws/credentials/context_go1.9.go | 14 - .../aws-sdk-go/aws/credentials/credentials.go | 383 - .../ec2rolecreds/ec2_role_provider.go | 188 - .../aws/credentials/endpointcreds/provider.go | 210 - .../aws/credentials/env_provider.go | 74 - .../aws-sdk-go/aws/credentials/example.ini | 12 - .../aws/credentials/processcreds/provider.go | 426 - .../shared_credentials_provider.go | 151 - .../aws/credentials/ssocreds/doc.go | 60 - .../aws-sdk-go/aws/credentials/ssocreds/os.go | 10 - .../aws/credentials/ssocreds/os_windows.go | 7 - .../aws/credentials/ssocreds/provider.go | 180 - .../aws/credentials/static_provider.go | 57 - .../stscreds/assume_role_provider.go | 367 - .../stscreds/web_identity_provider.go | 154 - .../github.com/aws/aws-sdk-go/aws/csm/doc.go | 69 - .../aws/aws-sdk-go/aws/csm/enable.go | 89 - .../aws/aws-sdk-go/aws/csm/metric.go | 109 - .../aws/aws-sdk-go/aws/csm/metric_chan.go | 55 - .../aws-sdk-go/aws/csm/metric_exception.go | 26 - .../aws/aws-sdk-go/aws/csm/reporter.go | 264 - .../aws/aws-sdk-go/aws/defaults/defaults.go | 207 - .../aws-sdk-go/aws/defaults/shared_config.go | 27 - vendor/github.com/aws/aws-sdk-go/aws/doc.go | 56 - .../aws/aws-sdk-go/aws/ec2metadata/api.go | 250 - .../aws/aws-sdk-go/aws/ec2metadata/service.go | 245 - .../aws/ec2metadata/token_provider.go | 93 - .../aws/aws-sdk-go/aws/endpoints/decode.go | 193 - .../aws/aws-sdk-go/aws/endpoints/defaults.go | 28166 --------------- .../aws/endpoints/dep_service_ids.go | 141 - .../aws/aws-sdk-go/aws/endpoints/doc.go | 66 - .../aws/aws-sdk-go/aws/endpoints/endpoints.go | 706 - .../aws/endpoints/legacy_regions.go | 24 - .../aws/aws-sdk-go/aws/endpoints/v3model.go | 594 - .../aws/endpoints/v3model_codegen.go | 412 - .../github.com/aws/aws-sdk-go/aws/errors.go | 13 - .../aws/aws-sdk-go/aws/jsonvalue.go | 12 - .../github.com/aws/aws-sdk-go/aws/logger.go | 121 - .../aws/request/connection_reset_error.go | 19 - .../aws/aws-sdk-go/aws/request/handlers.go | 343 - .../aws-sdk-go/aws/request/http_request.go | 24 - .../aws-sdk-go/aws/request/offset_reader.go | 65 - .../aws/aws-sdk-go/aws/request/request.go | 713 - .../aws/aws-sdk-go/aws/request/request_1_7.go | 40 - .../aws/aws-sdk-go/aws/request/request_1_8.go | 37 - .../aws-sdk-go/aws/request/request_context.go | 15 - .../aws/request/request_context_1_6.go | 15 - .../aws/request/request_pagination.go | 266 - .../aws/aws-sdk-go/aws/request/retryer.go | 309 - .../aws/request/timeout_read_closer.go | 94 - .../aws/aws-sdk-go/aws/request/validation.go | 286 - .../aws/aws-sdk-go/aws/request/waiter.go | 295 - .../aws/aws-sdk-go/aws/session/credentials.go | 290 - .../aws/session/custom_transport.go | 28 - .../aws/session/custom_transport_go1.12.go | 27 - .../aws/session/custom_transport_go1.5.go | 23 - .../aws/session/custom_transport_go1.6.go | 24 - .../aws/aws-sdk-go/aws/session/doc.go | 367 - .../aws/aws-sdk-go/aws/session/env_config.go | 471 - .../aws/aws-sdk-go/aws/session/session.go | 992 - .../aws-sdk-go/aws/session/shared_config.go | 729 - .../aws-sdk-go/aws/signer/v4/header_rules.go | 81 - .../aws/aws-sdk-go/aws/signer/v4/options.go | 7 - .../aws/signer/v4/request_context_go1.5.go | 14 - .../aws/signer/v4/request_context_go1.7.go | 14 - .../aws/aws-sdk-go/aws/signer/v4/stream.go | 63 - .../aws/aws-sdk-go/aws/signer/v4/uri_path.go | 25 - .../aws/aws-sdk-go/aws/signer/v4/v4.go | 854 - vendor/github.com/aws/aws-sdk-go/aws/types.go | 264 - vendor/github.com/aws/aws-sdk-go/aws/url.go | 13 - .../github.com/aws/aws-sdk-go/aws/url_1_7.go | 30 - .../github.com/aws/aws-sdk-go/aws/version.go | 8 - .../internal/context/background_go1.5.go | 41 - .../aws/aws-sdk-go/internal/ini/ast.go | 120 - .../aws-sdk-go/internal/ini/comma_token.go | 11 - .../aws-sdk-go/internal/ini/comment_token.go | 35 - .../aws/aws-sdk-go/internal/ini/doc.go | 42 - .../aws-sdk-go/internal/ini/empty_token.go | 4 - .../aws/aws-sdk-go/internal/ini/expression.go | 24 - .../aws/aws-sdk-go/internal/ini/fuzz.go | 18 - .../aws/aws-sdk-go/internal/ini/ini.go | 51 - .../aws/aws-sdk-go/internal/ini/ini_lexer.go | 165 - .../aws/aws-sdk-go/internal/ini/ini_parser.go | 350 - .../aws-sdk-go/internal/ini/literal_tokens.go | 340 - .../aws-sdk-go/internal/ini/newline_token.go | 30 - .../aws-sdk-go/internal/ini/number_helper.go | 152 - .../aws/aws-sdk-go/internal/ini/op_tokens.go | 39 - .../aws-sdk-go/internal/ini/parse_error.go | 43 - .../aws-sdk-go/internal/ini/parse_stack.go | 60 - .../aws/aws-sdk-go/internal/ini/sep_tokens.go | 41 - .../aws/aws-sdk-go/internal/ini/skipper.go | 45 - .../aws/aws-sdk-go/internal/ini/statement.go | 35 - .../aws/aws-sdk-go/internal/ini/value_util.go | 284 - .../aws/aws-sdk-go/internal/ini/visitor.go | 169 - .../aws/aws-sdk-go/internal/ini/walker.go | 25 - .../aws/aws-sdk-go/internal/ini/ws_token.go | 24 - .../aws/aws-sdk-go/internal/sdkio/byte.go | 12 - .../aws/aws-sdk-go/internal/sdkio/io_go1.6.go | 11 - .../aws/aws-sdk-go/internal/sdkio/io_go1.7.go | 13 - .../aws/aws-sdk-go/internal/sdkmath/floor.go | 16 - .../internal/sdkmath/floor_go1.9.go | 57 - .../internal/sdkrand/locked_source.go | 29 - .../aws/aws-sdk-go/internal/sdkrand/read.go | 12 - .../aws-sdk-go/internal/sdkrand/read_1_5.go | 25 - .../aws/aws-sdk-go/internal/sdkuri/path.go | 23 - .../internal/shareddefaults/ecs_container.go | 12 - .../internal/shareddefaults/shared_config.go | 40 - .../aws-sdk-go/internal/strings/strings.go | 11 - .../sync/singleflight/singleflight.go | 120 - .../aws/aws-sdk-go/private/protocol/host.go | 104 - .../private/protocol/host_prefix.go | 54 - .../private/protocol/idempotency.go | 75 - .../private/protocol/json/jsonutil/build.go | 298 - .../protocol/json/jsonutil/unmarshal.go | 304 - .../private/protocol/jsonrpc/jsonrpc.go | 87 - .../protocol/jsonrpc/unmarshal_error.go | 107 - .../aws-sdk-go/private/protocol/jsonvalue.go | 76 - .../aws-sdk-go/private/protocol/payload.go | 81 - .../aws-sdk-go/private/protocol/protocol.go | 49 - .../private/protocol/query/build.go | 36 - .../protocol/query/queryutil/queryutil.go | 246 - .../private/protocol/query/unmarshal.go | 39 - .../private/protocol/query/unmarshal_error.go | 69 - .../aws-sdk-go/private/protocol/rest/build.go | 310 - .../private/protocol/rest/payload.go | 54 - .../private/protocol/rest/unmarshal.go | 257 - .../private/protocol/restjson/restjson.go | 59 - .../protocol/restjson/unmarshal_error.go | 134 - .../aws-sdk-go/private/protocol/timestamp.go | 134 - .../aws-sdk-go/private/protocol/unmarshal.go | 27 - .../private/protocol/unmarshal_error.go | 65 - .../private/protocol/xml/xmlutil/build.go | 317 - .../private/protocol/xml/xmlutil/sort.go | 32 - .../private/protocol/xml/xmlutil/unmarshal.go | 299 - .../protocol/xml/xmlutil/xml_to_struct.go | 173 - .../aws/aws-sdk-go/service/kms/api.go | 18095 ---------- .../aws/aws-sdk-go/service/kms/doc.go | 104 - .../aws/aws-sdk-go/service/kms/errors.go | 366 - .../service/kms/kmsiface/interface.go | 268 - .../aws/aws-sdk-go/service/kms/service.go | 104 - .../aws/aws-sdk-go/service/sso/api.go | 1354 - .../aws/aws-sdk-go/service/sso/doc.go | 44 - .../aws/aws-sdk-go/service/sso/errors.go | 44 - .../aws/aws-sdk-go/service/sso/service.go | 105 - .../service/sso/ssoiface/interface.go | 86 - .../aws/aws-sdk-go/service/sts/api.go | 3437 -- .../aws-sdk-go/service/sts/customizations.go | 11 - .../aws/aws-sdk-go/service/sts/doc.go | 32 - .../aws/aws-sdk-go/service/sts/errors.go | 84 - .../aws/aws-sdk-go/service/sts/service.go | 99 - .../service/sts/stsiface/interface.go | 96 - vendor/github.com/blang/semver/.travis.yml | 21 + vendor/github.com/blang/semver/LICENSE | 22 + vendor/github.com/blang/semver/README.md | 194 + vendor/github.com/blang/semver/json.go | 23 + vendor/github.com/blang/semver/package.json | 17 + vendor/github.com/blang/semver/range.go | 416 + vendor/github.com/blang/semver/semver.go | 418 + vendor/github.com/blang/semver/sort.go | 28 + vendor/github.com/blang/semver/sql.go | 30 + .../github.com/cenkalti/backoff/v4/.gitignore | 25 + .../cenkalti/backoff/v4}/LICENSE | 4 +- .../github.com/cenkalti/backoff/v4/README.md | 30 + .../github.com/cenkalti/backoff/v4/backoff.go | 66 + .../github.com/cenkalti/backoff/v4/context.go | 62 + .../cenkalti/backoff/v4/exponential.go | 216 + .../github.com/cenkalti/backoff/v4/retry.go | 146 + .../github.com/cenkalti/backoff/v4/ticker.go | 97 + .../github.com/cenkalti/backoff/v4/timer.go | 35 + .../github.com/cenkalti/backoff/v4/tries.go | 38 + vendor/github.com/cloudflare/circl/LICENSE | 57 + .../cloudflare/circl/dh/x25519/curve.go | 96 + .../cloudflare/circl/dh/x25519/curve_amd64.go | 30 + .../cloudflare/circl/dh/x25519/curve_amd64.h | 111 + .../cloudflare/circl/dh/x25519/curve_amd64.s | 157 + .../circl/dh/x25519/curve_generic.go | 85 + .../cloudflare/circl/dh/x25519/curve_noasm.go | 11 + .../cloudflare/circl/dh/x25519/doc.go | 19 + .../cloudflare/circl/dh/x25519/key.go | 47 + .../cloudflare/circl/dh/x25519/table.go | 268 + .../cloudflare/circl/dh/x448/curve.go | 104 + .../cloudflare/circl/dh/x448/curve_amd64.go | 30 + .../cloudflare/circl/dh/x448/curve_amd64.h | 111 + .../cloudflare/circl/dh/x448/curve_amd64.s | 194 + .../cloudflare/circl/dh/x448/curve_generic.go | 100 + .../cloudflare/circl/dh/x448/curve_noasm.go | 11 + .../cloudflare/circl/dh/x448/doc.go | 19 + .../cloudflare/circl/dh/x448/key.go | 46 + .../cloudflare/circl/dh/x448/table.go | 460 + .../circl/ecc/goldilocks/constants.go | 71 + .../cloudflare/circl/ecc/goldilocks/curve.go | 80 + .../circl/ecc/goldilocks/isogeny.go | 52 + .../cloudflare/circl/ecc/goldilocks/point.go | 171 + .../cloudflare/circl/ecc/goldilocks/scalar.go | 203 + .../cloudflare/circl/ecc/goldilocks/twist.go | 138 + .../circl/ecc/goldilocks/twistPoint.go | 135 + .../circl/ecc/goldilocks/twistTables.go | 216 + .../circl/ecc/goldilocks/twist_basemult.go | 62 + .../cloudflare/circl/internal/conv/conv.go | 140 + .../cloudflare/circl/internal/sha3/doc.go | 62 + .../cloudflare/circl/internal/sha3/hashes.go | 69 + .../cloudflare/circl/internal/sha3/keccakf.go | 391 + .../cloudflare/circl/internal/sha3/rc.go | 29 + .../cloudflare/circl/internal/sha3/sha3.go | 200 + .../circl/internal/sha3/sha3_s390x.s | 33 + .../cloudflare/circl/internal/sha3/shake.go | 119 + .../cloudflare/circl/internal/sha3/xor.go | 15 + .../circl/internal/sha3/xor_generic.go | 33 + .../circl/internal/sha3/xor_unaligned.go | 61 + .../cloudflare/circl/math/fp25519/fp.go | 205 + .../cloudflare/circl/math/fp25519/fp_amd64.go | 45 + .../cloudflare/circl/math/fp25519/fp_amd64.h | 351 + .../cloudflare/circl/math/fp25519/fp_amd64.s | 112 + .../circl/math/fp25519/fp_generic.go | 317 + .../cloudflare/circl/math/fp25519/fp_noasm.go | 13 + .../cloudflare/circl/math/fp448/fp.go | 164 + .../cloudflare/circl/math/fp448/fp_amd64.go | 43 + .../cloudflare/circl/math/fp448/fp_amd64.h | 591 + .../cloudflare/circl/math/fp448/fp_amd64.s | 75 + .../cloudflare/circl/math/fp448/fp_generic.go | 339 + .../cloudflare/circl/math/fp448/fp_noasm.go | 12 + .../cloudflare/circl/math/fp448/fuzzer.go | 75 + .../cloudflare/circl/math/mlsbset/mlsbset.go | 122 + .../cloudflare/circl/math/mlsbset/power.go | 64 + .../cloudflare/circl/math/primes.go | 34 + .../github.com/cloudflare/circl/math/wnaf.go | 84 + .../cloudflare/circl/sign/ed25519/ed25519.go | 453 + .../cloudflare/circl/sign/ed25519/modular.go | 175 + .../cloudflare/circl/sign/ed25519/mult.go | 180 + .../cloudflare/circl/sign/ed25519/point.go | 195 + .../cloudflare/circl/sign/ed25519/pubkey.go | 9 + .../circl/sign/ed25519/pubkey112.go | 7 + .../cloudflare/circl/sign/ed25519/signapi.go | 87 + .../cloudflare/circl/sign/ed25519/tables.go | 213 + .../cloudflare/circl/sign/ed448/ed448.go | 411 + .../cloudflare/circl/sign/ed448/signapi.go | 87 + .../github.com/cloudflare/circl/sign/sign.go | 110 + .../cpuguy83/go-md2man/v2/LICENSE.md | 21 + .../cpuguy83/go-md2man/v2/md2man/debug.go | 62 + .../cpuguy83/go-md2man/v2/md2man/md2man.go | 23 + .../cpuguy83/go-md2man/v2/md2man/roff.go | 408 + .../github.com/dgrijalva/jwt-go/.travis.yml | 13 - .../dgrijalva/jwt-go/MIGRATION_GUIDE.md | 97 - vendor/github.com/dgrijalva/jwt-go/README.md | 100 - vendor/github.com/dgrijalva/jwt-go/claims.go | 134 - vendor/github.com/dgrijalva/jwt-go/errors.go | 59 - .../github.com/dgrijalva/jwt-go/map_claims.go | 94 - vendor/github.com/dgrijalva/jwt-go/parser.go | 148 - .../dgrijalva/jwt-go/signing_method.go | 35 - vendor/github.com/dgrijalva/jwt-go/token.go | 108 - .../github.com/dimchansky/utfbom/.gitignore | 37 - .../github.com/dimchansky/utfbom/.travis.yml | 18 - vendor/github.com/dimchansky/utfbom/README.md | 66 - vendor/github.com/dimchansky/utfbom/utfbom.go | 192 - vendor/github.com/fatih/color/.travis.yml | 5 - vendor/github.com/fatih/color/Gopkg.lock | 27 - vendor/github.com/fatih/color/Gopkg.toml | 30 - vendor/github.com/fatih/color/README.md | 37 +- vendor/github.com/fatih/color/color.go | 116 +- .../github.com/fatih/color/color_windows.go | 19 + vendor/github.com/fatih/color/doc.go | 139 +- .../github.com/felixge/httpsnoop/.gitignore | 0 .../github.com/felixge/httpsnoop/LICENSE.txt | 19 + vendor/github.com/felixge/httpsnoop/Makefile | 10 + vendor/github.com/felixge/httpsnoop/README.md | 95 + .../felixge/httpsnoop/capture_metrics.go | 86 + vendor/github.com/felixge/httpsnoop/docs.go | 10 + .../httpsnoop/wrap_generated_gteq_1.8.go | 436 + .../httpsnoop/wrap_generated_lt_1.8.go | 278 + .../getsops/gopgagent}/LICENSE | 4 +- .../getsops}/gopgagent/gpgagent.go | 0 vendor/github.com/getsops/sops/v3/.gitignore | 5 + .../getsops/sops/v3/.goreleaser.yaml | 370 + vendor/github.com/getsops/sops/v3/.sops.yaml | 4 + .../github.com/getsops/sops/v3/CHANGELOG.rst | 486 + .../getsops/sops/v3/CODE_OF_CONDUCT.md | 5 + .../getsops/sops/v3/CONTRIBUTING.md | 38 + vendor/github.com/getsops/sops/v3/DCO | 36 + .../getsops/sops/v3}/LICENSE | 0 vendor/github.com/getsops/sops/v3/Makefile | 122 + vendor/github.com/getsops/sops/v3/README.rst | 1894 + .../getsops/sops/v3}/aes/cipher.go | 13 +- .../getsops/sops/v3/age/keysource.go | 319 + .../getsops/sops/v3}/audit/audit.go | 8 +- .../getsops/sops/v3}/audit/schema.sql | 0 .../getsops/sops/v3/azkv/keysource.go | 235 + .../getsops/sops/v3/cmd/sops/codes/codes.go | 33 + .../getsops/sops/v3/cmd/sops/common/common.go | 463 + .../sops/v3/cmd/sops/formats/formats.go | 78 + .../getsops/sops/v3/config/config.go | 476 + .../getsops/sops/v3}/decrypt/decrypt.go | 55 +- vendor/github.com/getsops/sops/v3/example.ini | 31 + .../github.com/getsops/sops/v3/example.json | 43 + vendor/github.com/getsops/sops/v3/example.txt | 24 + .../github.com/getsops/sops/v3/example.yaml | 69 + .../getsops/sops/v3/gcpkms/keysource.go | 255 + .../getsops/sops/v3/hcvault/keysource.go | 369 + .../getsops/sops/v3}/keys/keys.go | 1 + .../getsops/sops/v3}/keyservice/client.go | 8 +- .../getsops/sops/v3}/keyservice/keyservice.go | 30 +- .../sops/v3/keyservice/keyservice.pb.go | 1119 + .../sops/v3}/keyservice/keyservice.proto | 13 + .../getsops/sops/v3}/keyservice/server.go | 106 +- .../getsops/sops/v3/kms/keysource.go | 402 + .../getsops/sops/v3}/logging/logging.go | 0 .../getsops/sops/v3/pgp/keysource.go | 641 + .../v3}/pgp/sops_functional_tests_key.asc | 0 .../github.com/getsops/sops/v3/publish/gcs.go | 45 + .../getsops/sops/v3/publish/publish.go | 19 + .../github.com/getsops/sops/v3/publish/s3.go | 51 + .../getsops/sops/v3/publish/vault.go | 102 + .../getsops/sops/v3/rust-toolchain.toml | 3 + .../getsops/sops/v3}/shamir/LICENSE | 0 .../getsops/sops/v3}/shamir/README.md | 3 +- .../getsops/sops/v3}/shamir/shamir.go | 2 +- .../getsops/sops/v3}/shamir/tables.go | 0 .../getsops/sops/v3}/sops.go | 430 +- .../getsops/sops/v3}/stores/dotenv/store.go | 96 +- .../getsops/sops/v3}/stores/flatten.go | 91 + .../getsops/sops/v3/stores/ini/store.go | 281 + .../getsops/sops/v3}/stores/json/store.go | 76 +- .../getsops/sops/v3}/stores/stores.go | 147 +- .../getsops/sops/v3/stores/yaml/store.go | 424 + .../getsops/sops/v3}/usererrors.go | 10 +- .../getsops/sops/v3/version/version.go | 229 + .../github.com/go-jose/go-jose/v4/.gitignore | 2 + .../go-jose/go-jose/v4/.golangci.yml | 53 + .../github.com/go-jose/go-jose/v4/.travis.yml | 33 + .../go-jose/go-jose/v4/CHANGELOG.md | 96 + .../go-jose/go-jose/v4/CONTRIBUTING.md | 15 + vendor/github.com/go-jose/go-jose/v4/LICENSE | 202 + .../github.com/go-jose/go-jose/v4/README.md | 114 + .../github.com/go-jose/go-jose/v4/SECURITY.md | 13 + .../go-jose/go-jose/v4/asymmetric.go | 595 + .../go-jose/go-jose/v4/cipher/cbc_hmac.go | 196 + .../go-jose/go-jose/v4/cipher/concat_kdf.go | 75 + .../go-jose/go-jose/v4/cipher/ecdh_es.go | 86 + .../go-jose/go-jose/v4/cipher/key_wrap.go | 109 + .../github.com/go-jose/go-jose/v4/crypter.go | 599 + vendor/github.com/go-jose/go-jose/v4/doc.go | 25 + .../github.com/go-jose/go-jose/v4/encoding.go | 228 + .../go-jose/go-jose/v4/json/LICENSE | 27 + .../go-jose/go-jose/v4/json/README.md | 13 + .../go-jose/go-jose/v4/json/decode.go | 1216 + .../go-jose/go-jose/v4/json/encode.go | 1197 + .../go-jose/go-jose/v4/json/indent.go | 141 + .../go-jose/go-jose/v4/json/scanner.go | 623 + .../go-jose/go-jose/v4/json/stream.go | 484 + .../go-jose/go-jose/v4/json/tags.go | 44 + vendor/github.com/go-jose/go-jose/v4/jwe.go | 381 + vendor/github.com/go-jose/go-jose/v4/jwk.go | 823 + vendor/github.com/go-jose/go-jose/v4/jws.go | 421 + .../go-jose/go-jose/v4/jwt/builder.go | 315 + .../go-jose/go-jose/v4/jwt/claims.go | 130 + .../github.com/go-jose/go-jose/v4/jwt/doc.go | 20 + .../go-jose/go-jose/v4/jwt/errors.go | 53 + .../github.com/go-jose/go-jose/v4/jwt/jwt.go | 198 + .../go-jose/go-jose/v4/jwt/validation.go | 127 + .../github.com/go-jose/go-jose/v4/opaque.go | 147 + .../github.com/go-jose/go-jose/v4/shared.go | 531 + .../github.com/go-jose/go-jose/v4/signing.go | 505 + .../go-jose/go-jose/v4/symmetric.go | 517 + vendor/github.com/go-logr/logr/.golangci.yaml | 26 + vendor/github.com/go-logr/logr/CHANGELOG.md | 6 + .../github.com/go-logr/logr/CONTRIBUTING.md | 17 + .../utfbom => go-logr/logr}/LICENSE | 0 vendor/github.com/go-logr/logr/README.md | 407 + vendor/github.com/go-logr/logr/SECURITY.md | 18 + vendor/github.com/go-logr/logr/context.go | 33 + .../github.com/go-logr/logr/context_noslog.go | 49 + .../github.com/go-logr/logr/context_slog.go | 83 + vendor/github.com/go-logr/logr/discard.go | 24 + vendor/github.com/go-logr/logr/funcr/funcr.go | 914 + .../github.com/go-logr/logr/funcr/slogsink.go | 105 + vendor/github.com/go-logr/logr/logr.go | 520 + vendor/github.com/go-logr/logr/sloghandler.go | 192 + vendor/github.com/go-logr/logr/slogr.go | 100 + vendor/github.com/go-logr/logr/slogsink.go | 120 + .../autorest/adal => go-logr/stdr}/LICENSE | 14 +- vendor/github.com/go-logr/stdr/README.md | 6 + vendor/github.com/go-logr/stdr/stdr.go | 170 + .../jwt-go => golang-jwt/jwt/v5}/.gitignore | 2 +- .../jwt-go => golang-jwt/jwt/v5}/LICENSE | 1 + .../golang-jwt/jwt/v5/MIGRATION_GUIDE.md | 195 + vendor/github.com/golang-jwt/jwt/v5/README.md | 167 + .../github.com/golang-jwt/jwt/v5/SECURITY.md | 19 + .../jwt/v5}/VERSION_HISTORY.md | 29 +- vendor/github.com/golang-jwt/jwt/v5/claims.go | 16 + .../jwt-go => golang-jwt/jwt/v5}/doc.go | 0 .../jwt-go => golang-jwt/jwt/v5}/ecdsa.go | 54 +- .../jwt/v5}/ecdsa_utils.go | 12 +- .../github.com/golang-jwt/jwt/v5/ed25519.go | 79 + .../golang-jwt/jwt/v5/ed25519_utils.go | 64 + vendor/github.com/golang-jwt/jwt/v5/errors.go | 49 + .../golang-jwt/jwt/v5/errors_go1_20.go | 47 + .../golang-jwt/jwt/v5/errors_go_other.go | 78 + .../jwt-go => golang-jwt/jwt/v5}/hmac.go | 41 +- .../golang-jwt/jwt/v5/map_claims.go | 109 + .../jwt-go => golang-jwt/jwt/v5}/none.go | 20 +- vendor/github.com/golang-jwt/jwt/v5/parser.go | 238 + .../golang-jwt/jwt/v5/parser_option.go | 128 + .../golang-jwt/jwt/v5/registered_claims.go | 63 + .../jwt-go => golang-jwt/jwt/v5}/rsa.go | 28 +- .../jwt-go => golang-jwt/jwt/v5}/rsa_pss.go | 67 +- .../jwt-go => golang-jwt/jwt/v5}/rsa_utils.go | 20 +- .../golang-jwt/jwt/v5/signing_method.go | 49 + .../golang-jwt/jwt/v5/staticcheck.conf | 1 + vendor/github.com/golang-jwt/jwt/v5/token.go | 100 + .../golang-jwt/jwt/v5/token_option.go | 5 + vendor/github.com/golang-jwt/jwt/v5/types.go | 149 + .../github.com/golang-jwt/jwt/v5/validator.go | 316 + vendor/github.com/golang/glog/README | 44 - vendor/github.com/golang/glog/README.md | 36 + vendor/github.com/golang/glog/glog.go | 1367 +- vendor/github.com/golang/glog/glog_file.go | 324 +- .../github.com/golang/glog/glog_file_linux.go | 39 + .../golang/glog/glog_file_nonwindows.go | 12 + .../github.com/golang/glog/glog_file_other.go | 30 + .../github.com/golang/glog/glog_file_posix.go | 53 + .../golang/glog/glog_file_windows.go | 30 + vendor/github.com/golang/glog/glog_flags.go | 398 + .../golang/glog/internal/logsink/logsink.go | 393 + .../glog/internal/logsink/logsink_fatal.go | 35 + .../glog/internal/stackdump/stackdump.go | 127 + .../golang/protobuf/proto/buffer.go | 324 + .../github.com/golang/protobuf/proto/clone.go | 253 - .../golang/protobuf/proto/decode.go | 427 - .../golang/protobuf/proto/defaults.go | 63 + .../golang/protobuf/proto/deprecated.go | 126 +- .../golang/protobuf/proto/discard.go | 356 +- .../golang/protobuf/proto/encode.go | 203 - .../github.com/golang/protobuf/proto/equal.go | 301 - .../golang/protobuf/proto/extensions.go | 771 +- .../github.com/golang/protobuf/proto/lib.go | 965 - .../golang/protobuf/proto/message_set.go | 181 - .../golang/protobuf/proto/pointer_reflect.go | 360 - .../golang/protobuf/proto/pointer_unsafe.go | 313 - .../golang/protobuf/proto/properties.go | 648 +- .../github.com/golang/protobuf/proto/proto.go | 167 + .../golang/protobuf/proto/registry.go | 317 + .../golang/protobuf/proto/table_marshal.go | 2776 -- .../golang/protobuf/proto/table_merge.go | 654 - .../golang/protobuf/proto/table_unmarshal.go | 2053 -- .../github.com/golang/protobuf/proto/text.go | 843 - .../golang/protobuf/proto/text_decode.go | 801 + .../golang/protobuf/proto/text_encode.go | 560 + .../golang/protobuf/proto/text_parser.go | 880 - .../github.com/golang/protobuf/proto/wire.go | 78 + .../golang/protobuf/proto/wrappers.go | 34 + .../github.com/golang/protobuf/ptypes/any.go | 141 - .../golang/protobuf/ptypes/any/any.pb.go | 200 - .../golang/protobuf/ptypes/any/any.proto | 154 - .../golang/protobuf/ptypes/duration.go | 102 - .../protobuf/ptypes/duration/duration.pb.go | 161 - .../protobuf/ptypes/duration/duration.proto | 117 - .../golang/protobuf/ptypes/timestamp.go | 132 - .../protobuf/ptypes/timestamp/timestamp.pb.go | 179 - .../protobuf/ptypes/timestamp/timestamp.proto | 135 - vendor/github.com/google/go-cmp/LICENSE | 27 + .../github.com/google/go-cmp/cmp/compare.go | 671 + vendor/github.com/google/go-cmp/cmp/export.go | 31 + .../go-cmp/cmp/internal/diff/debug_disable.go | 18 + .../go-cmp/cmp/internal/diff/debug_enable.go | 123 + .../google/go-cmp/cmp/internal/diff/diff.go | 402 + .../google/go-cmp/cmp/internal/flags/flags.go | 9 + .../go-cmp/cmp/internal/function/func.go | 99 + .../google/go-cmp/cmp/internal/value/name.go | 164 + .../go-cmp/cmp/internal/value/pointer.go | 34 + .../google/go-cmp/cmp/internal/value/sort.go | 106 + .../github.com/google/go-cmp/cmp/options.go | 554 + vendor/github.com/google/go-cmp/cmp/path.go | 390 + vendor/github.com/google/go-cmp/cmp/report.go | 54 + .../google/go-cmp/cmp/report_compare.go | 433 + .../google/go-cmp/cmp/report_references.go | 264 + .../google/go-cmp/cmp/report_reflect.go | 414 + .../google/go-cmp/cmp/report_slices.go | 614 + .../google/go-cmp/cmp/report_text.go | 432 + .../google/go-cmp/cmp/report_value.go | 121 + vendor/github.com/google/s2a-go/.gitignore | 6 + .../google/s2a-go/CODE_OF_CONDUCT.md | 93 + .../github.com/google/s2a-go/CONTRIBUTING.md | 29 + vendor/github.com/google/s2a-go/LICENSE.md | 202 + vendor/github.com/google/s2a-go/README.md | 14 + .../google/s2a-go/fallback/s2a_fallback.go | 167 + .../s2a-go/internal/authinfo/authinfo.go | 119 + .../s2a-go/internal/handshaker/handshaker.go | 438 + .../internal/handshaker/service/service.go | 66 + .../proto/common_go_proto/common.pb.go | 388 + .../s2a_context_go_proto/s2a_context.pb.go | 267 + .../internal/proto/s2a_go_proto/s2a.pb.go | 1377 + .../proto/s2a_go_proto/s2a_grpc.pb.go | 174 + .../proto/v2/common_go_proto/common.pb.go | 549 + .../v2/s2a_context_go_proto/s2a_context.pb.go | 249 + .../internal/proto/v2/s2a_go_proto/s2a.pb.go | 2512 ++ .../proto/v2/s2a_go_proto/s2a_grpc.pb.go | 160 + .../internal/aeadcrypter/aeadcrypter.go | 34 + .../record/internal/aeadcrypter/aesgcm.go | 70 + .../record/internal/aeadcrypter/chachapoly.go | 67 + .../record/internal/aeadcrypter/common.go | 92 + .../record/internal/halfconn/ciphersuite.go | 98 + .../record/internal/halfconn/counter.go | 60 + .../record/internal/halfconn/expander.go | 59 + .../record/internal/halfconn/halfconn.go | 193 + .../google/s2a-go/internal/record/record.go | 729 + .../s2a-go/internal/record/ticketsender.go | 178 + .../internal/tokenmanager/tokenmanager.go | 79 + .../google/s2a-go/internal/v2/README.md | 1 + .../internal/v2/certverifier/certverifier.go | 122 + .../internal/v2/remotesigner/remotesigner.go | 186 + .../google/s2a-go/internal/v2/s2av2.go | 379 + .../v2/tlsconfigstore/tlsconfigstore.go | 403 + .../github.com/google/s2a-go/retry/retry.go | 144 + vendor/github.com/google/s2a-go/s2a.go | 442 + .../github.com/google/s2a-go/s2a_options.go | 235 + vendor/github.com/google/s2a-go/s2a_utils.go | 79 + .../google/s2a-go/stream/s2a_stream.go | 34 + .../enterprise-certificate-proxy/LICENSE | 202 + .../client/client.go | 219 + .../client/util/util.go | 100 + .../gax-go/v2/.release-please-manifest.json | 3 + .../googleapis/gax-go/v2/CHANGES.md | 149 + .../googleapis/gax-go/v2/apierror/apierror.go | 363 + .../v2/apierror/internal/proto/README.md | 30 + .../internal/proto/custom_error.pb.go | 256 + .../internal/proto/custom_error.proto | 50 + .../v2/apierror/internal/proto/error.pb.go | 280 + .../v2/apierror/internal/proto/error.proto | 46 + .../googleapis/gax-go/v2/call_option.go | 122 +- .../googleapis/gax-go/v2/callctx/callctx.go | 100 + .../googleapis/gax-go/v2/content_type.go | 112 + vendor/github.com/googleapis/gax-go/v2/gax.go | 4 +- .../github.com/googleapis/gax-go/v2/header.go | 151 +- .../gax-go/v2/internal/version.go} | 14 +- .../github.com/googleapis/gax-go/v2/invoke.go | 25 +- .../googleapis/gax-go/v2/iterator/iterator.go | 63 + .../googleapis/gax-go/v2/proto_json_stream.go | 127 + .../gax-go/v2/release-please-config.json | 10 + vendor/github.com/hashicorp/errwrap/LICENSE | 354 + vendor/github.com/hashicorp/errwrap/README.md | 89 + .../github.com/hashicorp/errwrap/errwrap.go | 178 + .../github.com/hashicorp/go-cleanhttp/LICENSE | 363 + .../hashicorp/go-cleanhttp/README.md | 30 + .../hashicorp/go-cleanhttp/cleanhttp.go | 58 + .../github.com/hashicorp/go-cleanhttp/doc.go | 20 + .../hashicorp/go-cleanhttp/handlers.go | 48 + .../hashicorp/go-multierror/LICENSE | 353 + .../hashicorp/go-multierror/Makefile | 31 + .../hashicorp/go-multierror/README.md | 150 + .../hashicorp/go-multierror/append.go | 43 + .../hashicorp/go-multierror/flatten.go | 26 + .../hashicorp/go-multierror/format.go | 27 + .../hashicorp/go-multierror/group.go | 38 + .../hashicorp/go-multierror/multierror.go | 121 + .../hashicorp/go-multierror/prefix.go | 37 + .../hashicorp/go-multierror/sort.go | 16 + .../hashicorp/go-retryablehttp/.gitignore | 4 + .../hashicorp/go-retryablehttp/.go-version | 1 + .../hashicorp/go-retryablehttp/CHANGELOG.md | 33 + .../hashicorp/go-retryablehttp/CODEOWNERS | 1 + .../hashicorp/go-retryablehttp/LICENSE | 365 + .../hashicorp/go-retryablehttp/Makefile | 11 + .../hashicorp/go-retryablehttp/README.md | 62 + .../go-retryablehttp/cert_error_go119.go | 14 + .../go-retryablehttp/cert_error_go120.go | 14 + .../hashicorp/go-retryablehttp/client.go | 919 + .../go-retryablehttp/roundtripper.go | 55 + .../hashicorp/go-rootcerts/.travis.yml | 12 + .../github.com/hashicorp/go-rootcerts/LICENSE | 363 + .../hashicorp/go-rootcerts/Makefile | 8 + .../hashicorp/go-rootcerts/README.md | 44 + .../github.com/hashicorp/go-rootcerts/doc.go | 9 + .../hashicorp/go-rootcerts/rootcerts.go | 123 + .../hashicorp/go-rootcerts/rootcerts_base.go | 12 + .../go-rootcerts/rootcerts_darwin.go | 48 + .../go-secure-stdlib/parseutil/LICENSE | 365 + .../go-secure-stdlib/parseutil/parsepath.go | 68 + .../go-secure-stdlib/parseutil/parseutil.go | 505 + .../go-secure-stdlib/strutil/LICENSE | 363 + .../go-secure-stdlib/strutil/strutil.go | 510 + .../hashicorp/go-sockaddr/.gitignore | 26 + .../hashicorp/go-sockaddr/GNUmakefile | 65 + .../github.com/hashicorp/go-sockaddr/LICENSE | 375 + .../hashicorp/go-sockaddr/README.md | 118 + .../github.com/hashicorp/go-sockaddr/doc.go | 5 + .../hashicorp/go-sockaddr/ifaddr.go | 254 + .../hashicorp/go-sockaddr/ifaddrs.go | 1317 + .../hashicorp/go-sockaddr/ifattr.go | 65 + .../hashicorp/go-sockaddr/ipaddr.go | 169 + .../hashicorp/go-sockaddr/ipaddrs.go | 98 + .../hashicorp/go-sockaddr/ipv4addr.go | 516 + .../hashicorp/go-sockaddr/ipv6addr.go | 591 + .../github.com/hashicorp/go-sockaddr/rfc.go | 948 + .../hashicorp/go-sockaddr/route_info.go | 30 + .../hashicorp/go-sockaddr/route_info_aix.go | 35 + .../go-sockaddr/route_info_android.go | 32 + .../hashicorp/go-sockaddr/route_info_bsd.go | 33 + .../go-sockaddr/route_info_default.go | 19 + .../hashicorp/go-sockaddr/route_info_linux.go | 39 + .../go-sockaddr/route_info_solaris.go | 35 + .../go-sockaddr/route_info_test_windows.go | 25 + .../go-sockaddr/route_info_windows.go | 65 + .../hashicorp/go-sockaddr/sockaddr.go | 206 + .../hashicorp/go-sockaddr/sockaddrs.go | 193 + .../hashicorp/go-sockaddr/unixsock.go | 148 + vendor/github.com/hashicorp/hcl/.gitignore | 9 + vendor/github.com/hashicorp/hcl/.travis.yml | 13 + vendor/github.com/hashicorp/hcl/LICENSE | 354 + vendor/github.com/hashicorp/hcl/Makefile | 18 + vendor/github.com/hashicorp/hcl/README.md | 125 + vendor/github.com/hashicorp/hcl/appveyor.yml | 19 + vendor/github.com/hashicorp/hcl/decoder.go | 729 + vendor/github.com/hashicorp/hcl/hcl.go | 11 + .../github.com/hashicorp/hcl/hcl/ast/ast.go | 219 + .../github.com/hashicorp/hcl/hcl/ast/walk.go | 52 + .../hashicorp/hcl/hcl/parser/error.go | 17 + .../hashicorp/hcl/hcl/parser/parser.go | 532 + .../hashicorp/hcl/hcl/scanner/scanner.go | 652 + .../hashicorp/hcl/hcl/strconv/quote.go | 241 + .../hashicorp/hcl/hcl/token/position.go | 46 + .../hashicorp/hcl/hcl/token/token.go | 219 + .../hashicorp/hcl/json/parser/flatten.go | 117 + .../hashicorp/hcl/json/parser/parser.go | 313 + .../hashicorp/hcl/json/scanner/scanner.go | 451 + .../hashicorp/hcl/json/token/position.go | 46 + .../hashicorp/hcl/json/token/token.go | 118 + vendor/github.com/hashicorp/hcl/lex.go | 38 + vendor/github.com/hashicorp/hcl/parse.go | 39 + .../hashicorp/vault/api/.copywrite.hcl | 8 + vendor/github.com/hashicorp/vault/api/LICENSE | 365 + .../github.com/hashicorp/vault/api/README.md | 9 + vendor/github.com/hashicorp/vault/api/auth.go | 115 + .../hashicorp/vault/api/auth_token.go | 377 + .../github.com/hashicorp/vault/api/client.go | 1935 + vendor/github.com/hashicorp/vault/api/help.go | 40 + vendor/github.com/hashicorp/vault/api/kv.go | 59 + .../github.com/hashicorp/vault/api/kv_v1.go | 60 + .../github.com/hashicorp/vault/api/kv_v2.go | 781 + .../hashicorp/vault/api/lifetime_watcher.go | 431 + .../github.com/hashicorp/vault/api/logical.go | 422 + .../hashicorp/vault/api/output_policy.go | 99 + .../hashicorp/vault/api/output_string.go | 98 + .../hashicorp/vault/api/plugin_helpers.go | 219 + .../vault/api/plugin_runtime_types.go | 30 + .../hashicorp/vault/api/plugin_types.go | 100 + .../vault/api/pluginruntimetype_enumer.go | 49 + .../vault/api/renewbehavior_enumer.go | 50 + .../hashicorp/vault/api/replication_status.go | 133 + .../github.com/hashicorp/vault/api/request.go | 155 + .../hashicorp/vault/api/response.go | 137 + .../github.com/hashicorp/vault/api/secret.go | 393 + vendor/github.com/hashicorp/vault/api/ssh.go | 78 + .../hashicorp/vault/api/ssh_agent.go | 276 + .../hashicorp/vault/api/sudo_paths.go | 88 + vendor/github.com/hashicorp/vault/api/sys.go | 14 + .../hashicorp/vault/api/sys_audit.go | 159 + .../hashicorp/vault/api/sys_auth.go | 136 + .../hashicorp/vault/api/sys_capabilities.go | 133 + .../hashicorp/vault/api/sys_config_cors.go | 94 + .../hashicorp/vault/api/sys_generate_root.go | 198 + .../hashicorp/vault/api/sys_hastatus.go | 49 + .../hashicorp/vault/api/sys_health.go | 56 + .../hashicorp/vault/api/sys_init.go | 77 + .../hashicorp/vault/api/sys_leader.go | 44 + .../hashicorp/vault/api/sys_leases.go | 166 + .../github.com/hashicorp/vault/api/sys_mfa.go | 48 + .../hashicorp/vault/api/sys_monitor.go | 74 + .../hashicorp/vault/api/sys_mounts.go | 376 + .../hashicorp/vault/api/sys_plugins.go | 417 + .../vault/api/sys_plugins_runtimes.go | 190 + .../hashicorp/vault/api/sys_policy.go | 137 + .../hashicorp/vault/api/sys_raft.go | 445 + .../hashicorp/vault/api/sys_rekey.go | 482 + .../hashicorp/vault/api/sys_rotate.go | 105 + .../hashicorp/vault/api/sys_seal.go | 123 + .../hashicorp/vault/api/sys_stepdown.go | 26 + .../vault/api/sys_ui_custom_message.go | 281 + vendor/github.com/howeyc/gopass/.travis.yml | 11 - vendor/github.com/howeyc/gopass/LICENSE.txt | 15 - .../howeyc/gopass/OPENSOLARIS.LICENSE | 384 - vendor/github.com/howeyc/gopass/README.md | 27 - vendor/github.com/howeyc/gopass/pass.go | 110 - vendor/github.com/howeyc/gopass/terminal.go | 25 - .../howeyc/gopass/terminal_solaris.go | 69 - .../jmespath/go-jmespath/.gitignore | 4 - .../jmespath/go-jmespath/.travis.yml | 28 - .../github.com/jmespath/go-jmespath/Makefile | 51 - .../github.com/jmespath/go-jmespath/README.md | 87 - vendor/github.com/jmespath/go-jmespath/api.go | 49 - .../go-jmespath/astnodetype_string.go | 16 - .../jmespath/go-jmespath/functions.go | 842 - .../jmespath/go-jmespath/interpreter.go | 418 - .../github.com/jmespath/go-jmespath/lexer.go | 420 - .../github.com/jmespath/go-jmespath/parser.go | 603 - .../jmespath/go-jmespath/toktype_string.go | 16 - .../github.com/jmespath/go-jmespath/util.go | 185 - vendor/github.com/kylelemons/godebug/LICENSE | 202 + .../kylelemons/godebug/diff/diff.go | 186 + .../kylelemons/godebug/pretty/.gitignore | 5 + .../kylelemons/godebug/pretty/doc.go | 25 + .../kylelemons/godebug/pretty/public.go | 188 + .../kylelemons/godebug/pretty/reflect.go | 241 + .../kylelemons/godebug/pretty/structure.go | 223 + .../github.com/mattn/go-colorable/.travis.yml | 9 - .../github.com/mattn/go-colorable/README.md | 6 +- .../mattn/go-colorable/colorable_appengine.go | 9 + .../mattn/go-colorable/colorable_others.go | 12 +- .../mattn/go-colorable/colorable_windows.go | 70 +- .../mattn/go-colorable/go.test.sh} | 2 +- .../mattn/go-colorable/noncolorable.go | 12 +- vendor/github.com/mattn/go-isatty/.travis.yml | 13 - vendor/github.com/mattn/go-isatty/README.md | 2 +- vendor/github.com/mattn/go-isatty/go.test.sh | 12 + .../mattn/go-isatty/isatty_android.go | 23 - .../github.com/mattn/go-isatty/isatty_bsd.go | 16 +- .../mattn/go-isatty/isatty_others.go | 4 +- .../mattn/go-isatty/isatty_plan9.go | 3 +- .../mattn/go-isatty/isatty_solaris.go | 9 +- .../mattn/go-isatty/isatty_tcgets.go | 5 +- .../mattn/go-isatty/isatty_windows.go | 6 +- .../mitchellh/go-wordwrap/wordwrap.go | 26 +- .../mitchellh/mapstructure/CHANGELOG.md | 96 + .../github.com/mitchellh/mapstructure/LICENSE | 21 + .../mitchellh/mapstructure/README.md | 46 + .../mitchellh/mapstructure/decode_hooks.go | 279 + .../mitchellh/mapstructure/error.go | 50 + .../mitchellh/mapstructure/mapstructure.go | 1540 + vendor/github.com/pkg/browser/LICENSE | 23 + vendor/github.com/pkg/browser/README.md | 55 + vendor/github.com/pkg/browser/browser.go | 57 + .../github.com/pkg/browser/browser_darwin.go | 5 + .../github.com/pkg/browser/browser_freebsd.go | 14 + .../github.com/pkg/browser/browser_linux.go | 21 + .../github.com/pkg/browser/browser_netbsd.go | 14 + .../github.com/pkg/browser/browser_openbsd.go | 14 + .../pkg/browser/browser_unsupported.go | 12 + .../github.com/pkg/browser/browser_windows.go | 7 + .../russross/blackfriday/v2/.gitignore | 8 + .../russross/blackfriday/v2/.travis.yml | 17 + .../russross/blackfriday/v2/LICENSE.txt | 29 + .../russross/blackfriday/v2/README.md | 335 + .../russross/blackfriday/v2/block.go | 1612 + .../github.com/russross/blackfriday/v2/doc.go | 46 + .../russross/blackfriday/v2/entities.go | 2236 ++ .../github.com/russross/blackfriday/v2/esc.go | 70 + .../russross/blackfriday/v2/html.go | 952 + .../russross/blackfriday/v2/inline.go | 1228 + .../russross/blackfriday/v2/markdown.go | 950 + .../russross/blackfriday/v2/node.go | 360 + .../russross/blackfriday/v2/smartypants.go | 457 + .../github.com/ryanuber/go-glob/.travis.yml | 5 + vendor/github.com/ryanuber/go-glob/LICENSE | 21 + vendor/github.com/ryanuber/go-glob/README.md | 29 + vendor/github.com/ryanuber/go-glob/glob.go | 56 + vendor/github.com/urfave/cli/.flake8 | 2 + vendor/github.com/urfave/cli/.gitignore | 10 + .../github.com/urfave/cli/CODE_OF_CONDUCT.md | 74 + vendor/github.com/urfave/cli/LICENSE | 21 + vendor/github.com/urfave/cli/README.md | 51 + vendor/github.com/urfave/cli/app.go | 531 + vendor/github.com/urfave/cli/category.go | 44 + vendor/github.com/urfave/cli/cli.go | 22 + vendor/github.com/urfave/cli/command.go | 386 + vendor/github.com/urfave/cli/context.go | 348 + vendor/github.com/urfave/cli/docs.go | 151 + vendor/github.com/urfave/cli/errors.go | 115 + vendor/github.com/urfave/cli/fish.go | 194 + vendor/github.com/urfave/cli/flag.go | 348 + vendor/github.com/urfave/cli/flag_bool.go | 109 + vendor/github.com/urfave/cli/flag_bool_t.go | 110 + vendor/github.com/urfave/cli/flag_duration.go | 106 + vendor/github.com/urfave/cli/flag_float64.go | 106 + vendor/github.com/urfave/cli/flag_generic.go | 110 + vendor/github.com/urfave/cli/flag_int.go | 105 + vendor/github.com/urfave/cli/flag_int64.go | 106 + .../github.com/urfave/cli/flag_int64_slice.go | 199 + .../github.com/urfave/cli/flag_int_slice.go | 198 + vendor/github.com/urfave/cli/flag_string.go | 98 + .../urfave/cli/flag_string_slice.go | 184 + vendor/github.com/urfave/cli/flag_uint.go | 106 + vendor/github.com/urfave/cli/flag_uint64.go | 106 + vendor/github.com/urfave/cli/funcs.go | 44 + vendor/github.com/urfave/cli/help.go | 363 + vendor/github.com/urfave/cli/parse.go | 94 + vendor/github.com/urfave/cli/sort.go | 29 + vendor/github.com/urfave/cli/template.go | 121 + vendor/go.mozilla.org/sops/.gitignore | 3 - vendor/go.mozilla.org/sops/.sops.yaml | 2 - vendor/go.mozilla.org/sops/.travis.yml | 54 - vendor/go.mozilla.org/sops/CHANGELOG.rst | 196 - vendor/go.mozilla.org/sops/CODE_OF_CONDUCT.md | 8 - vendor/go.mozilla.org/sops/CONTRIBUTING.md | 30 - vendor/go.mozilla.org/sops/Dockerfile | 10 - vendor/go.mozilla.org/sops/Makefile | 102 - vendor/go.mozilla.org/sops/README.rst | 1481 - vendor/go.mozilla.org/sops/azkv/keysource.go | 285 - vendor/go.mozilla.org/sops/example.ini | 28 - vendor/go.mozilla.org/sops/example.json | 40 - vendor/go.mozilla.org/sops/example.txt | 21 - vendor/go.mozilla.org/sops/example.yaml | 77 - .../go.mozilla.org/sops/gcpkms/keysource.go | 153 - .../sops/keyservice/keyservice.pb.go | 555 - vendor/go.mozilla.org/sops/kms/keysource.go | 279 - .../go.mozilla.org/sops/make_download_page.sh | 11 - vendor/go.mozilla.org/sops/pgp/keysource.go | 363 - .../go.mozilla.org/sops/stores/yaml/store.go | 207 - vendor/go.opencensus.io/.travis.yml | 17 - vendor/go.opencensus.io/Gopkg.lock | 231 - vendor/go.opencensus.io/Gopkg.toml | 36 - vendor/go.opencensus.io/Makefile | 31 +- vendor/go.opencensus.io/README.md | 4 + vendor/go.opencensus.io/appveyor.yml | 7 +- vendor/go.opencensus.io/opencensus.go | 2 +- .../go.opencensus.io/plugin/ocgrpc/client.go | 56 + .../plugin/ocgrpc/client_metrics.go | 118 + .../plugin/ocgrpc/client_stats_handler.go | 49 + .../plugin/ocgrpc/doc.go} | 18 +- .../go.opencensus.io/plugin/ocgrpc/server.go | 81 + .../plugin/ocgrpc/server_metrics.go | 108 + .../plugin/ocgrpc/server_stats_handler.go | 63 + .../plugin/ocgrpc/stats_common.go | 248 + .../plugin/ocgrpc/trace_common.go | 107 + .../plugin/ochttp/propagation/b3/b3.go | 4 +- .../go.opencensus.io/plugin/ochttp/server.go | 16 +- .../go.opencensus.io/plugin/ochttp/trace.go | 3 + vendor/go.opencensus.io/stats/doc.go | 7 +- .../go.opencensus.io/stats/internal/record.go | 6 + vendor/go.opencensus.io/stats/record.go | 41 +- vendor/go.opencensus.io/stats/units.go | 1 + .../stats/view/aggregation.go | 29 +- .../stats/view/aggregation_data.go | 59 +- .../go.opencensus.io/stats/view/collector.go | 11 +- vendor/go.opencensus.io/stats/view/doc.go | 2 +- vendor/go.opencensus.io/stats/view/export.go | 17 +- .../stats/view/view_to_metric.go | 16 +- vendor/go.opencensus.io/stats/view/worker.go | 245 +- .../stats/view/worker_commands.go | 6 +- vendor/go.opencensus.io/tag/profile_19.go | 1 + vendor/go.opencensus.io/tag/profile_not19.go | 1 + vendor/go.opencensus.io/trace/basetypes.go | 10 + vendor/go.opencensus.io/trace/doc.go | 13 +- vendor/go.opencensus.io/trace/lrumap.go | 2 +- vendor/go.opencensus.io/trace/spanstore.go | 14 +- vendor/go.opencensus.io/trace/trace.go | 187 +- vendor/go.opencensus.io/trace/trace_api.go | 265 + vendor/go.opencensus.io/trace/trace_go11.go | 1 + .../go.opencensus.io/trace/trace_nongo11.go | 1 + .../google.golang.org/grpc/otelgrpc}/LICENSE | 14 +- .../google.golang.org/grpc/otelgrpc/config.go | 287 + .../google.golang.org/grpc/otelgrpc/doc.go | 11 + .../grpc/otelgrpc/interceptor.go | 530 + .../grpc/otelgrpc/interceptorinfo.go | 39 + .../grpc/otelgrpc/internal/parse.go | 40 + .../grpc/otelgrpc/metadata_supplier.go | 87 + .../grpc/otelgrpc/semconv.go | 41 + .../grpc/otelgrpc/stats_handler.go | 222 + .../grpc/otelgrpc/version.go | 17 + .../instrumentation/net/http/otelhttp/LICENSE | 201 + .../net/http/otelhttp/client.go | 50 + .../net/http/otelhttp/common.go | 34 + .../net/http/otelhttp/config.go | 207 + .../instrumentation/net/http/otelhttp/doc.go | 7 + .../net/http/otelhttp/handler.go | 217 + .../otelhttp/internal/request/body_wrapper.go | 75 + .../internal/request/resp_writer_wrapper.go | 119 + .../net/http/otelhttp/internal/semconv/env.go | 165 + .../otelhttp/internal/semconv/httpconv.go | 348 + .../http/otelhttp/internal/semconv/util.go | 98 + .../http/otelhttp/internal/semconv/v1.20.0.go | 192 + .../http/otelhttp/internal/semconvutil/gen.go | 10 + .../otelhttp/internal/semconvutil/httpconv.go | 575 + .../otelhttp/internal/semconvutil/netconv.go | 205 + .../net/http/otelhttp/labeler.go | 58 + .../net/http/otelhttp/transport.go | 295 + .../net/http/otelhttp/version.go | 17 + .../go.opentelemetry.io/otel/.codespellignore | 9 + vendor/go.opentelemetry.io/otel/.codespellrc | 10 + .../go.opentelemetry.io/otel/.gitattributes | 3 + vendor/go.opentelemetry.io/otel/.gitignore | 22 + vendor/go.opentelemetry.io/otel/.golangci.yml | 304 + vendor/go.opentelemetry.io/otel/.lycheeignore | 6 + .../otel/.markdownlint.yaml | 29 + vendor/go.opentelemetry.io/otel/CHANGELOG.md | 3182 ++ vendor/go.opentelemetry.io/otel/CODEOWNERS | 17 + .../go.opentelemetry.io/otel/CONTRIBUTING.md | 661 + vendor/go.opentelemetry.io/otel/LICENSE | 201 + vendor/go.opentelemetry.io/otel/Makefile | 300 + vendor/go.opentelemetry.io/otel/README.md | 111 + vendor/go.opentelemetry.io/otel/RELEASING.md | 146 + vendor/go.opentelemetry.io/otel/VERSIONING.md | 224 + .../otel/attribute/README.md | 3 + .../go.opentelemetry.io/otel/attribute/doc.go | 5 + .../otel/attribute/encoder.go | 135 + .../otel/attribute/filter.go | 49 + .../otel/attribute/iterator.go | 150 + .../go.opentelemetry.io/otel/attribute/key.go | 123 + .../go.opentelemetry.io/otel/attribute/kv.go | 75 + .../go.opentelemetry.io/otel/attribute/set.go | 431 + .../otel/attribute/type_string.go | 31 + .../otel/attribute/value.go | 271 + .../otel/baggage/README.md | 3 + .../otel/baggage/baggage.go | 1018 + .../otel/baggage/context.go | 28 + .../go.opentelemetry.io/otel/baggage/doc.go | 9 + .../go.opentelemetry.io/otel/codes/README.md | 3 + .../go.opentelemetry.io/otel/codes/codes.go | 105 + vendor/go.opentelemetry.io/otel/codes/doc.go | 10 + vendor/go.opentelemetry.io/otel/doc.go | 25 + .../go.opentelemetry.io/otel/error_handler.go | 27 + .../go.opentelemetry.io/otel/get_main_pkgs.sh | 30 + vendor/go.opentelemetry.io/otel/handler.go | 33 + .../otel/internal/attribute/attribute.go | 100 + .../otel/internal/baggage/baggage.go | 32 + .../otel/internal/baggage/context.go | 81 + .../go.opentelemetry.io/otel/internal/gen.go | 18 + .../otel/internal/global/handler.go | 36 + .../otel/internal/global/instruments.go | 412 + .../otel/internal/global/internal_logging.go | 62 + .../otel/internal/global/meter.go | 506 + .../otel/internal/global/propagator.go | 71 + .../otel/internal/global/state.go | 199 + .../otel/internal/global/trace.go | 189 + .../otel/internal/rawhelpers.go | 47 + .../otel/internal_logging.go | 15 + vendor/go.opentelemetry.io/otel/metric.go | 42 + .../go.opentelemetry.io/otel/metric/LICENSE | 201 + .../go.opentelemetry.io/otel/metric/README.md | 3 + .../otel/metric/asyncfloat64.go | 260 + .../otel/metric/asyncint64.go | 258 + .../go.opentelemetry.io/otel/metric/config.go | 81 + vendor/go.opentelemetry.io/otel/metric/doc.go | 177 + .../otel/metric/embedded/README.md | 3 + .../otel/metric/embedded/embedded.go | 243 + .../otel/metric/instrument.go | 368 + .../go.opentelemetry.io/otel/metric/meter.go | 278 + .../otel/metric/noop/README.md | 3 + .../otel/metric/noop/noop.go | 281 + .../otel/metric/syncfloat64.go | 226 + .../otel/metric/syncint64.go | 226 + .../go.opentelemetry.io/otel/propagation.go | 20 + .../otel/propagation/README.md | 3 + .../otel/propagation/baggage.go | 47 + .../otel/propagation/doc.go | 13 + .../otel/propagation/propagation.go | 142 + .../otel/propagation/trace_context.go | 156 + vendor/go.opentelemetry.io/otel/renovate.json | 28 + .../go.opentelemetry.io/otel/requirements.txt | 1 + .../otel/semconv/v1.17.0/README.md | 3 + .../otel/semconv/v1.17.0/doc.go | 9 + .../otel/semconv/v1.17.0/event.go | 188 + .../otel/semconv/v1.17.0/exception.go | 9 + .../otel/semconv/v1.17.0/http.go | 10 + .../otel/semconv/v1.17.0/resource.go | 1999 + .../otel/semconv/v1.17.0/schema.go | 9 + .../otel/semconv/v1.17.0/trace.go | 3364 ++ .../otel/semconv/v1.20.0/README.md | 3 + .../otel/semconv/v1.20.0/attribute_group.go | 1198 + .../otel/semconv/v1.20.0/doc.go | 9 + .../otel/semconv/v1.20.0/event.go | 188 + .../otel/semconv/v1.20.0/exception.go | 9 + .../otel/semconv/v1.20.0/http.go | 10 + .../otel/semconv/v1.20.0/resource.go | 2060 ++ .../otel/semconv/v1.20.0/schema.go | 9 + .../otel/semconv/v1.20.0/trace.go | 2599 ++ .../otel/semconv/v1.26.0/README.md | 3 + .../otel/semconv/v1.26.0/attribute_group.go | 8996 +++++ .../otel/semconv/v1.26.0/doc.go | 9 + .../otel/semconv/v1.26.0/exception.go | 9 + .../otel/semconv/v1.26.0/metric.go | 1307 + .../otel/semconv/v1.26.0/schema.go | 9 + vendor/go.opentelemetry.io/otel/trace.go | 36 + vendor/go.opentelemetry.io/otel/trace/LICENSE | 201 + .../go.opentelemetry.io/otel/trace/README.md | 3 + .../go.opentelemetry.io/otel/trace/config.go | 323 + .../go.opentelemetry.io/otel/trace/context.go | 50 + vendor/go.opentelemetry.io/otel/trace/doc.go | 119 + .../otel/trace/embedded/README.md | 3 + .../otel/trace/embedded/embedded.go | 45 + .../otel/trace/nonrecording.go | 16 + vendor/go.opentelemetry.io/otel/trace/noop.go | 85 + .../otel/trace/provider.go | 59 + vendor/go.opentelemetry.io/otel/trace/span.go | 177 + .../go.opentelemetry.io/otel/trace/trace.go | 323 + .../go.opentelemetry.io/otel/trace/tracer.go | 37 + .../otel/trace/tracestate.go | 330 + .../otel/verify_examples.sh | 74 + .../otel/verify_readmes.sh | 21 + .../otel/verify_released_changelog.sh | 42 + vendor/go.opentelemetry.io/otel/version.go | 9 + vendor/go.opentelemetry.io/otel/versions.yaml | 49 + vendor/golang.org/x/crypto/LICENSE | 4 +- vendor/golang.org/x/crypto/argon2/argon2.go | 283 + vendor/golang.org/x/crypto/argon2/blake2b.go | 53 + .../x/crypto/argon2/blamka_amd64.go | 60 + .../golang.org/x/crypto/argon2/blamka_amd64.s | 2791 ++ .../x/crypto/argon2/blamka_generic.go | 163 + .../golang.org/x/crypto/argon2/blamka_ref.go | 15 + vendor/golang.org/x/crypto/blake2b/blake2b.go | 291 + .../x/crypto/blake2b/blake2bAVX2_amd64.go | 37 + .../x/crypto/blake2b/blake2bAVX2_amd64.s | 4559 +++ .../x/crypto/blake2b/blake2b_amd64.s | 1441 + .../x/crypto/blake2b/blake2b_generic.go | 182 + .../x/crypto/blake2b/blake2b_ref.go | 11 + vendor/golang.org/x/crypto/blake2b/blake2x.go | 177 + .../golang.org/x/crypto/blake2b/register.go | 30 + vendor/golang.org/x/crypto/cast5/cast5.go | 2 +- .../x/crypto/chacha20/chacha_arm64.go | 16 + .../x/crypto/chacha20/chacha_arm64.s | 307 + .../x/crypto/chacha20/chacha_generic.go | 398 + .../x/crypto/chacha20/chacha_noasm.go | 13 + .../x/crypto/chacha20/chacha_ppc64le.go | 16 + .../x/crypto/chacha20/chacha_ppc64le.s | 443 + .../x/crypto/chacha20/chacha_s390x.go | 27 + .../x/crypto/chacha20/chacha_s390x.s | 224 + vendor/golang.org/x/crypto/chacha20/xor.go | 42 + .../chacha20poly1305/chacha20poly1305.go | 98 + .../chacha20poly1305_amd64.go | 86 + .../chacha20poly1305/chacha20poly1305_amd64.s | 9762 +++++ .../chacha20poly1305_generic.go | 81 + .../chacha20poly1305_noasm.go | 15 + .../chacha20poly1305/xchacha20poly1305.go | 86 + vendor/golang.org/x/crypto/cryptobyte/asn1.go | 825 + .../x/crypto/cryptobyte/asn1/asn1.go | 46 + .../golang.org/x/crypto/cryptobyte/builder.go | 350 + .../golang.org/x/crypto/cryptobyte/string.go | 183 + .../x/crypto/curve25519/curve25519.go | 90 + vendor/golang.org/x/crypto/hkdf/hkdf.go | 95 + .../x/crypto/internal/alias/alias.go | 31 + .../x/crypto/internal/alias/alias_purego.go | 34 + .../x/crypto/internal/poly1305/mac_noasm.go | 9 + .../x/crypto/internal/poly1305/poly1305.go | 99 + .../x/crypto/internal/poly1305/sum_amd64.go | 47 + .../x/crypto/internal/poly1305/sum_amd64.s | 93 + .../x/crypto/internal/poly1305/sum_generic.go | 312 + .../x/crypto/internal/poly1305/sum_ppc64le.go | 47 + .../x/crypto/internal/poly1305/sum_ppc64le.s | 179 + .../x/crypto/internal/poly1305/sum_s390x.go | 76 + .../x/crypto/internal/poly1305/sum_s390x.s | 503 + .../x/crypto/openpgp/errors/errors.go | 78 - vendor/golang.org/x/crypto/openpgp/keys.go | 693 - .../x/crypto/openpgp/packet/config.go | 91 - .../x/crypto/openpgp/packet/encrypted_key.go | 208 - .../openpgp/packet/one_pass_signature.go | 73 - .../x/crypto/openpgp/packet/packet.go | 590 - .../x/crypto/openpgp/packet/private_key.go | 384 - .../x/crypto/openpgp/packet/public_key.go | 753 - .../x/crypto/openpgp/packet/public_key_v3.go | 279 - .../x/crypto/openpgp/packet/reader.go | 76 - .../x/crypto/openpgp/packet/signature.go | 731 - .../x/crypto/openpgp/packet/signature_v3.go | 146 - .../openpgp/packet/symmetric_key_encrypted.go | 155 - vendor/golang.org/x/crypto/openpgp/read.go | 448 - vendor/golang.org/x/crypto/openpgp/s2k/s2k.go | 279 - vendor/golang.org/x/crypto/openpgp/write.go | 418 - vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go | 77 + .../x/crypto/poly1305/poly1305_compat.go | 91 + vendor/golang.org/x/crypto/scrypt/scrypt.go | 212 + vendor/golang.org/x/crypto/sha3/doc.go | 62 + vendor/golang.org/x/crypto/sha3/hashes.go | 109 + .../golang.org/x/crypto/sha3/hashes_noasm.go | 23 + vendor/golang.org/x/crypto/sha3/keccakf.go | 414 + .../golang.org/x/crypto/sha3/keccakf_amd64.go | 13 + .../golang.org/x/crypto/sha3/keccakf_amd64.s | 5419 +++ vendor/golang.org/x/crypto/sha3/sha3.go | 185 + vendor/golang.org/x/crypto/sha3/sha3_s390x.go | 303 + vendor/golang.org/x/crypto/sha3/sha3_s390x.s | 33 + vendor/golang.org/x/crypto/sha3/shake.go | 174 + .../golang.org/x/crypto/sha3/shake_noasm.go | 15 + vendor/golang.org/x/crypto/sha3/xor.go | 40 + .../x/crypto/ssh/terminal/terminal.go | 76 - vendor/golang.org/x/net/LICENSE | 4 +- .../x/net/context/ctxhttp/ctxhttp.go | 71 - .../golang.org/x/net/http/httpguts/httplex.go | 13 +- vendor/golang.org/x/net/http2/frame.go | 13 +- vendor/golang.org/x/net/http2/http2.go | 19 +- vendor/golang.org/x/net/http2/server.go | 105 +- vendor/golang.org/x/net/http2/testsync.go | 331 - vendor/golang.org/x/net/http2/timer.go | 20 + vendor/golang.org/x/net/http2/transport.go | 329 +- .../x/net/http2/writesched_priority.go | 4 +- vendor/golang.org/x/oauth2/LICENSE | 4 +- vendor/golang.org/x/oauth2/README.md | 29 +- .../x/oauth2/authhandler/authhandler.go | 94 + vendor/golang.org/x/oauth2/deviceauth.go | 198 + .../golang.org/x/oauth2/google/appengine.go | 20 +- .../x/oauth2/google/appengine_gen1.go | 77 - .../x/oauth2/google/appengine_gen2_flex.go | 27 - vendor/golang.org/x/oauth2/google/default.go | 257 +- vendor/golang.org/x/oauth2/google/doc.go | 21 +- vendor/golang.org/x/oauth2/google/error.go | 64 + .../x/oauth2/google/externalaccount/aws.go | 577 + .../google/externalaccount/basecredentials.go | 485 + .../externalaccount/executablecredsource.go | 313 + .../google/externalaccount/filecredsource.go | 61 + .../x/oauth2/google/externalaccount/header.go | 64 + .../programmaticrefreshcredsource.go | 21 + .../google/externalaccount/urlcredsource.go | 79 + vendor/golang.org/x/oauth2/google/google.go | 141 +- .../externalaccountauthorizeduser.go | 114 + .../internal/impersonate/impersonate.go | 105 + .../google/internal/stsexchange/clientauth.go | 45 + .../internal/stsexchange/sts_exchange.go | 125 + vendor/golang.org/x/oauth2/google/jwt.go | 40 +- vendor/golang.org/x/oauth2/internal/oauth2.go | 2 +- vendor/golang.org/x/oauth2/internal/token.go | 134 +- .../golang.org/x/oauth2/internal/transport.go | 5 - vendor/golang.org/x/oauth2/jws/jws.go | 2 +- vendor/golang.org/x/oauth2/oauth2.go | 64 +- vendor/golang.org/x/oauth2/pkce.go | 68 + vendor/golang.org/x/oauth2/token.go | 42 +- vendor/golang.org/x/oauth2/transport.go | 79 +- vendor/golang.org/x/sync/LICENSE | 27 + vendor/golang.org/x/sync/PATENTS | 22 + .../golang.org/x/sync/semaphore/semaphore.go | 160 + vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s | 17 + vendor/golang.org/x/sys/cpu/byteorder.go | 66 + vendor/golang.org/x/sys/cpu/cpu.go | 312 + vendor/golang.org/x/sys/cpu/cpu_aix.go | 33 + vendor/golang.org/x/sys/cpu/cpu_arm.go | 73 + vendor/golang.org/x/sys/cpu/cpu_arm64.go | 194 + vendor/golang.org/x/sys/cpu/cpu_arm64.s | 39 + vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go | 12 + vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go | 21 + vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 15 + .../golang.org/x/sys/cpu/cpu_gccgo_arm64.go | 11 + .../golang.org/x/sys/cpu/cpu_gccgo_s390x.go | 22 + vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c | 37 + vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go | 31 + vendor/golang.org/x/sys/cpu/cpu_linux.go | 15 + vendor/golang.org/x/sys/cpu/cpu_linux_arm.go | 39 + .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 121 + .../golang.org/x/sys/cpu/cpu_linux_mips64x.go | 22 + .../golang.org/x/sys/cpu/cpu_linux_noinit.go | 9 + .../golang.org/x/sys/cpu/cpu_linux_ppc64x.go | 30 + .../golang.org/x/sys/cpu/cpu_linux_riscv64.go | 137 + .../golang.org/x/sys/cpu/cpu_linux_s390x.go | 40 + vendor/golang.org/x/sys/cpu/cpu_loong64.go | 12 + vendor/golang.org/x/sys/cpu/cpu_mips64x.go | 15 + .../cpu/cpu_mipsx.go} | 10 +- .../golang.org/x/sys/cpu/cpu_netbsd_arm64.go | 173 + .../golang.org/x/sys/cpu/cpu_openbsd_arm64.go | 65 + .../golang.org/x/sys/cpu/cpu_openbsd_arm64.s | 11 + vendor/golang.org/x/sys/cpu/cpu_other_arm.go | 9 + .../golang.org/x/sys/cpu/cpu_other_arm64.go | 9 + .../golang.org/x/sys/cpu/cpu_other_mips64x.go | 11 + .../golang.org/x/sys/cpu/cpu_other_ppc64x.go | 12 + .../golang.org/x/sys/cpu/cpu_other_riscv64.go | 11 + vendor/golang.org/x/sys/cpu/cpu_ppc64x.go | 16 + vendor/golang.org/x/sys/cpu/cpu_riscv64.go | 20 + vendor/golang.org/x/sys/cpu/cpu_s390x.go | 172 + vendor/golang.org/x/sys/cpu/cpu_s390x.s | 57 + vendor/golang.org/x/sys/cpu/cpu_wasm.go | 17 + vendor/golang.org/x/sys/cpu/cpu_x86.go | 151 + vendor/golang.org/x/sys/cpu/cpu_x86.s | 26 + vendor/golang.org/x/sys/cpu/cpu_zos.go | 10 + vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go | 25 + vendor/golang.org/x/sys/cpu/endian_big.go | 10 + vendor/golang.org/x/sys/cpu/endian_little.go | 10 + vendor/golang.org/x/sys/cpu/hwcap_linux.go | 71 + vendor/golang.org/x/sys/cpu/parse.go | 43 + .../x/sys/cpu/proc_cpuinfo_linux.go | 53 + vendor/golang.org/x/sys/cpu/runtime_auxv.go | 16 + .../x/sys/cpu/runtime_auxv_go121.go | 18 + .../golang.org/x/sys/cpu/syscall_aix_gccgo.go | 26 + .../x/sys/cpu/syscall_aix_ppc64_gc.go | 35 + vendor/golang.org/x/sys/unix/mkerrors.sh | 1 + .../golang.org/x/sys/unix/syscall_darwin.go | 37 + vendor/golang.org/x/sys/unix/syscall_hurd.go | 1 + .../x/sys/unix/zerrors_darwin_amd64.go | 7 + .../x/sys/unix/zerrors_darwin_arm64.go | 7 + .../x/sys/unix/zerrors_zos_s390x.go | 2 + .../x/sys/unix/zsyscall_darwin_amd64.go | 20 + .../x/sys/unix/zsyscall_darwin_amd64.s | 5 + .../x/sys/unix/zsyscall_darwin_arm64.go | 20 + .../x/sys/unix/zsyscall_darwin_arm64.s | 5 + .../x/sys/unix/ztypes_darwin_amd64.go | 13 + .../x/sys/unix/ztypes_darwin_arm64.go | 13 + .../x/sys/unix/ztypes_freebsd_386.go | 1 + .../x/sys/unix/ztypes_freebsd_amd64.go | 1 + .../x/sys/unix/ztypes_freebsd_arm.go | 1 + .../x/sys/unix/ztypes_freebsd_arm64.go | 1 + .../x/sys/unix/ztypes_freebsd_riscv64.go | 1 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 2 +- .../x/sys/unix/ztypes_linux_riscv64.go | 33 + .../golang.org/x/sys/windows/registry/key.go | 205 + .../x/sys/windows/registry/mksyscall.go | 9 + .../x/sys/windows/registry/syscall.go | 32 + .../x/sys/windows/registry/value.go | 386 + .../sys/windows/registry/zsyscall_windows.go | 117 + .../x/sys/windows/syscall_windows.go | 4 + .../golang.org/x/sys/windows/types_windows.go | 1 + .../x/sys/windows/zsyscall_windows.go | 38 + vendor/golang.org/x/term/LICENSE | 4 +- vendor/golang.org/x/term/term_windows.go | 1 + vendor/golang.org/x/text/LICENSE | 4 +- vendor/golang.org/x/time/LICENSE | 27 + vendor/golang.org/x/time/PATENTS | 22 + vendor/golang.org/x/time/rate/rate.go | 430 + vendor/golang.org/x/time/rate/sometimes.go | 67 + vendor/google.golang.org/api/AUTHORS | 1 + vendor/google.golang.org/api/CONTRIBUTORS | 1 + .../api/cloudkms/v1/cloudkms-api.json | 2225 -- .../api/cloudkms/v1/cloudkms-gen.go | 7819 ---- .../api/googleapi/googleapi.go | 113 +- .../api/googleapi/transport/apikey.go | 8 +- .../google.golang.org/api/googleapi/types.go | 2 +- .../iamcredentials/v1/iamcredentials-api.json | 372 + .../iamcredentials/v1/iamcredentials-gen.go | 868 + vendor/google.golang.org/api/internal/cba.go | 318 + .../api/internal/cert/default_cert.go | 58 + .../api/internal/cert/enterprise_cert.go | 54 + .../api/internal/cert/secureconnect_cert.go | 122 + .../api/internal/conn_pool.go | 30 + .../google.golang.org/api/internal/creds.go | 292 +- .../api/internal/gensupport/error.go | 24 + .../api/internal/gensupport/json.go | 33 +- .../api/internal/gensupport/jsonfloat.go | 16 +- .../api/internal/gensupport/media.go | 138 +- .../api/internal/gensupport/params.go | 31 +- .../api/internal/gensupport/resumable.go | 120 +- .../api/internal/gensupport/retry.go | 123 + .../api/internal/gensupport/send.go | 201 +- .../api/internal/gensupport/version.go | 53 + .../api/internal/impersonate/impersonate.go | 127 + vendor/google.golang.org/api/internal/pool.go | 61 - vendor/google.golang.org/api/internal/s2a.go | 136 + .../api/internal/service-account.json | 12 - .../api/internal/settings.go | 204 +- .../internal/third_party/uritemplates/LICENSE | 27 + .../third_party/uritemplates/METADATA | 14 + .../third_party}/uritemplates/uritemplates.go | 2 +- .../third_party}/uritemplates/utils.go | 0 .../google.golang.org/api/internal/version.go | 8 + .../api/iterator/iterator.go | 227 + .../option/internaloption/internaloption.go | 214 + vendor/google.golang.org/api/option/option.go | 173 +- .../api/storage/v1/storage-api.json | 6005 +++ .../api/storage/v1/storage-gen.go | 13148 +++++++ .../google.golang.org/api/transport/dial.go | 48 + vendor/google.golang.org/api/transport/doc.go | 11 + .../api/transport/grpc/dial.go | 534 + .../api/transport/grpc/dial_socketopt.go | 52 + .../api/transport/grpc/pool.go | 117 + .../api/transport/http/dial.go | 244 +- .../http/internal/propagation/http.go | 17 +- .../google.golang.org/appengine/.travis.yml | 20 - vendor/google.golang.org/appengine/README.md | 100 - .../google.golang.org/appengine/appengine.go | 135 - .../appengine/appengine_vm.go | 20 - vendor/google.golang.org/appengine/errors.go | 46 - .../google.golang.org/appengine/identity.go | 142 - .../appengine/internal/api.go | 675 - .../appengine/internal/api_classic.go | 169 - .../appengine/internal/api_common.go | 123 - .../appengine/internal/app_id.go | 28 - .../app_identity/app_identity_service.pb.go | 611 - .../app_identity/app_identity_service.proto | 64 - .../appengine/internal/base/api_base.pb.go | 308 - .../appengine/internal/base/api_base.proto | 33 - .../internal/datastore/datastore_v3.pb.go | 4367 --- .../internal/datastore/datastore_v3.proto | 551 - .../appengine/internal/identity.go | 55 - .../appengine/internal/identity_classic.go | 61 - .../appengine/internal/identity_flex.go | 11 - .../appengine/internal/identity_vm.go | 134 - .../appengine/internal/internal.go | 110 - .../appengine/internal/log/log_service.pb.go | 1313 - .../appengine/internal/log/log_service.proto | 150 - .../appengine/internal/main.go | 16 - .../appengine/internal/main_common.go | 7 - .../appengine/internal/main_vm.go | 69 - .../appengine/internal/metadata.go | 60 - .../internal/modules/modules_service.pb.go | 786 - .../internal/modules/modules_service.proto | 80 - .../appengine/internal/net.go | 56 - .../appengine/internal/regen.sh | 40 - .../internal/remote_api/remote_api.pb.go | 361 - .../internal/remote_api/remote_api.proto | 44 - .../appengine/internal/transaction.go | 115 - .../internal/urlfetch/urlfetch_service.pb.go | 527 - .../internal/urlfetch/urlfetch_service.proto | 64 - .../google.golang.org/appengine/namespace.go | 25 - vendor/google.golang.org/appengine/timeout.go | 20 - .../appengine/travis_install.sh | 18 - .../appengine/travis_test.sh | 12 - .../appengine/urlfetch/urlfetch.go | 210 - .../genproto/googleapis/api/LICENSE | 202 + .../api/annotations/annotations.pb.go | 119 + .../googleapis/api/annotations/client.pb.go | 1940 + .../api/annotations/field_behavior.pb.go | 266 + .../api/annotations/field_info.pb.go | 392 + .../googleapis/api/annotations/http.pb.go | 774 + .../googleapis/api/annotations/resource.pb.go | 660 + .../googleapis/api/annotations/routing.pb.go | 693 + .../googleapis/api/launch_stage.pb.go | 203 + .../googleapis/cloud/location/locations.pb.go | 631 + .../genproto/googleapis/rpc/LICENSE | 202 + .../genproto/googleapis/rpc/code/code.pb.go | 336 + .../rpc/errdetails/error_details.pb.go | 1314 + .../googleapis/rpc/status/status.pb.go | 272 +- .../genproto/googleapis/type/date/date.pb.go | 200 + .../genproto/googleapis/type/expr/expr.pb.go | 232 + vendor/google.golang.org/grpc/.travis.yml | 39 - vendor/google.golang.org/grpc/CONTRIBUTING.md | 33 +- vendor/google.golang.org/grpc/MAINTAINERS.md | 35 +- vendor/google.golang.org/grpc/Makefile | 37 +- .../grpc/NOTICE.txt} | 2 +- vendor/google.golang.org/grpc/README.md | 120 +- vendor/google.golang.org/grpc/SECURITY.md | 3 + .../grpc/attributes/attributes.go | 141 + vendor/google.golang.org/grpc/backoff.go | 23 + .../google.golang.org/grpc/backoff/backoff.go | 52 + vendor/google.golang.org/grpc/balancer.go | 391 - .../grpc/balancer/balancer.go | 390 +- .../grpc/balancer/base/balancer.go | 190 +- .../grpc/balancer/base/base.go | 25 +- .../grpc/balancer/conn_state_evaluator.go | 74 + .../grpclb/grpc_lb_v1/load_balancer.pb.go | 957 + .../grpc_lb_v1/load_balancer_grpc.pb.go | 134 + .../grpc/balancer/grpclb/grpclb.go | 535 + .../grpc/balancer/grpclb/grpclb_config.go | 67 + .../grpc/balancer/grpclb/grpclb_picker.go | 193 + .../balancer/grpclb/grpclb_remote_balancer.go | 458 + .../grpc/balancer/grpclb/grpclb_util.go | 173 + .../grpc/balancer/grpclb/state/state.go | 51 + .../grpc/balancer/pickfirst/pickfirst.go | 283 + .../grpc/balancer/roundrobin/roundrobin.go | 38 +- .../grpc/balancer_conn_wrappers.go | 318 - .../grpc/balancer_v1_wrapper.go | 334 - .../grpc/balancer_wrapper.go | 365 + .../grpc_binarylog_v1/binarylog.pb.go | 1381 +- vendor/google.golang.org/grpc/call.go | 6 +- .../grpc/channelz/channelz.go | 36 + vendor/google.golang.org/grpc/clientconn.go | 1850 +- vendor/google.golang.org/grpc/codec.go | 73 +- vendor/google.golang.org/grpc/codegen.sh | 17 - .../grpc/codes/code_string.go | 51 +- vendor/google.golang.org/grpc/codes/codes.go | 56 +- .../grpc/connectivity/connectivity.go | 47 +- .../grpc/credentials/alts/alts.go | 332 + .../alts/internal/authinfo/authinfo.go | 95 + .../grpc/credentials/alts/internal/common.go | 67 + .../alts/internal/conn/aeadrekey.go | 131 + .../alts/internal/conn/aes128gcm.go | 105 + .../alts/internal/conn/aes128gcmrekey.go | 116 + .../credentials/alts/internal/conn/common.go | 70 + .../credentials/alts/internal/conn/counter.go | 62 + .../credentials/alts/internal/conn/record.go | 268 + .../credentials/alts/internal/conn/utils.go | 63 + .../alts/internal/handshaker/handshaker.go | 373 + .../internal/handshaker/service/service.go | 76 + .../internal/proto/grpc_gcp/altscontext.pb.go | 259 + .../internal/proto/grpc_gcp/handshaker.pb.go | 1467 + .../proto/grpc_gcp/handshaker_grpc.pb.go | 144 + .../grpc_gcp/transport_security_common.pb.go | 321 + .../grpc/credentials/alts/utils.go | 70 + .../grpc/credentials/credentials.go | 353 +- .../grpc/credentials/google/google.go | 145 + .../grpc/credentials/google/xds.go | 128 + .../grpc/credentials/insecure/insecure.go | 98 + .../grpc/credentials/oauth/oauth.go | 244 + .../google.golang.org/grpc/credentials/tls.go | 283 + vendor/google.golang.org/grpc/dialoptions.go | 447 +- vendor/google.golang.org/grpc/doc.go | 2 + .../grpc/encoding/encoding.go | 25 +- .../grpc/encoding/encoding_v2.go | 81 + .../grpc/encoding/proto/proto.go | 110 +- .../grpc/experimental/stats/metricregistry.go | 269 + .../grpc/experimental/stats/metrics.go | 114 + .../grpc/grpclog/component.go | 115 + .../google.golang.org/grpc/grpclog/grpclog.go | 144 +- .../grpc/grpclog/internal/grpclog.go | 26 + .../grpc/grpclog/internal/logger.go | 87 + .../grpc/grpclog/internal/loggerv2.go | 204 + .../google.golang.org/grpc/grpclog/logger.go | 59 +- .../grpc/grpclog/loggerv2.go | 154 +- vendor/google.golang.org/grpc/install_gae.sh | 6 - vendor/google.golang.org/grpc/interceptor.go | 57 +- .../grpc/internal/backoff/backoff.go | 65 +- .../balancer/gracefulswitch/config.go | 82 + .../balancer/gracefulswitch/gracefulswitch.go | 419 + .../grpc/internal/balancerload/load.go | 4 +- .../grpc/internal/binarylog/binarylog.go | 133 +- .../grpc/internal/binarylog/env_config.go | 34 +- .../grpc/internal/binarylog/method_logger.go | 193 +- .../grpc/internal/binarylog/sink.go | 120 +- .../grpc/internal/binarylog/util.go | 41 - .../grpc/internal/buffer/unbounded.go | 116 + .../grpc/internal/channelz/channel.go | 255 + .../grpc/internal/channelz/channelmap.go | 395 + .../grpc/internal/channelz/funcs.go | 755 +- .../grpc/internal/channelz/logging.go | 75 + .../grpc/internal/channelz/server.go | 119 + .../grpc/internal/channelz/socket.go | 130 + .../grpc/internal/channelz/subchannel.go | 151 + .../{types_linux.go => syscall_linux.go} | 16 +- ...{types_nonlinux.go => syscall_nonlinux.go} | 13 +- .../grpc/internal/channelz/trace.go | 204 + .../grpc/internal/channelz/types.go | 702 - .../grpc/internal/credentials/credentials.go | 49 + .../grpc/internal/credentials/spiffe.go | 75 + .../credentials}/syscallconn.go | 5 +- .../grpc/internal/credentials/util.go | 52 + .../grpc/internal/envconfig/envconfig.go | 55 +- .../grpc/internal/envconfig/observability.go | 42 + .../grpc/internal/envconfig/xds.go | 56 + .../grpc/internal/experimental.go | 28 + .../grpc/internal/googlecloud/googlecloud.go | 72 + .../manufacturer.go} | 12 +- .../googlecloud/manufacturer_linux.go} | 17 +- .../googlecloud/manufacturer_windows.go | 50 + .../grpc/internal/grpclog/prefix_logger.go | 79 + .../grpc/internal/grpcrand/grpcrand.go | 56 - .../internal/grpcsync/callback_serializer.go | 112 + .../util_linux.go => grpcsync/oncefunc.go} | 25 +- .../grpc/internal/grpcsync/pubsub.go | 121 + .../grpc/internal/grpcutil/compressor.go | 42 + .../grpc/internal/grpcutil/encode_duration.go | 63 + .../grpc/internal/grpcutil/grpcutil.go | 20 + .../grpc/internal/grpcutil/metadata.go | 40 + .../grpc/internal/grpcutil/method.go | 88 + .../grpc/internal/grpcutil/regex.go | 31 + .../grpc/internal/idle/idle.go | 278 + .../grpc/internal/internal.go | 216 +- .../grpc/internal/metadata/metadata.go | 132 + .../grpc/internal/pretty/pretty.go | 73 + .../grpc/internal/resolver/config_selector.go | 167 + .../internal/resolver/dns/dns_resolver.go | 458 + .../resolver/dns/internal/internal.go | 77 + .../resolver/passthrough/passthrough.go | 15 +- .../grpc/internal/resolver/unix/unix.go | 78 + .../grpc/internal/serviceconfig/duration.go | 130 + .../internal/serviceconfig/serviceconfig.go | 180 + .../grpc/internal/stats/labels.go | 42 + .../internal/stats/metrics_recorder_list.go | 95 + .../grpc/internal/status/status.go | 205 + .../grpc/internal/syscall/syscall_linux.go | 28 +- .../grpc/internal/syscall/syscall_nonlinux.go | 32 +- .../tcp_keepalive_others.go} | 11 +- .../grpc/internal/tcp_keepalive_unix.go | 54 + .../grpc/internal/tcp_keepalive_windows.go | 54 + .../grpc/internal/transport/controlbuf.go | 471 +- .../grpc/internal/transport/defaults.go | 6 + .../grpc/internal/transport/flowcontrol.go | 4 +- .../grpc/internal/transport/handler_server.go | 275 +- .../grpc/internal/transport/http2_client.go | 1031 +- .../grpc/internal/transport/http2_server.go | 948 +- .../grpc/internal/transport/http_util.go | 463 +- .../grpc/internal/transport/log.go | 44 - .../grpc/internal/transport/logging.go | 40 + .../transport/networktype/networktype.go | 46 + .../grpc/{ => internal/transport}/proxy.go | 68 +- .../grpc/internal/transport/transport.go | 446 +- .../grpc/internal/xds/xds.go | 42 + .../grpc/keepalive/keepalive.go | 20 +- .../google.golang.org/grpc/mem/buffer_pool.go | 194 + .../grpc/mem/buffer_slice.go | 226 + vendor/google.golang.org/grpc/mem/buffers.go | 252 + .../grpc/metadata/metadata.go | 180 +- .../grpc/naming/dns_resolver.go | 293 - .../google.golang.org/grpc/naming/naming.go | 68 - vendor/google.golang.org/grpc/peer/peer.go | 32 + .../google.golang.org/grpc/picker_wrapper.go | 220 +- vendor/google.golang.org/grpc/pickfirst.go | 118 - vendor/google.golang.org/grpc/preloader.go | 35 +- .../grpc/resolver/dns/dns_resolver.go | 447 +- .../grpc/resolver/manual/manual.go | 128 + vendor/google.golang.org/grpc/resolver/map.go | 251 + .../grpc/resolver/resolver.go | 263 +- .../grpc/resolver_conn_wrapper.go | 168 - .../grpc/resolver_wrapper.go | 201 + vendor/google.golang.org/grpc/rpc_util.go | 644 +- vendor/google.golang.org/grpc/server.go | 1614 +- .../google.golang.org/grpc/service_config.go | 327 +- .../grpc/serviceconfig/serviceconfig.go | 26 +- vendor/google.golang.org/grpc/stats/stats.go | 83 +- .../google.golang.org/grpc/status/status.go | 204 +- vendor/google.golang.org/grpc/stream.go | 986 +- .../grpc/stream_interfaces.go | 238 + vendor/google.golang.org/grpc/tap/tap.go | 29 +- vendor/google.golang.org/grpc/trace.go | 35 +- .../google.golang.org/grpc/trace_notrace.go | 52 + .../google.golang.org/grpc/trace_withtrace.go | 39 + vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 135 - vendor/google.golang.org/protobuf/LICENSE | 27 + vendor/google.golang.org/protobuf/PATENTS | 22 + .../protobuf/encoding/protojson/decode.go | 685 + .../protobuf/encoding/protojson/doc.go | 11 + .../protobuf/encoding/protojson/encode.go | 382 + .../encoding/protojson/well_known_types.go | 876 + .../protobuf/encoding/prototext/decode.go | 772 + .../protobuf/encoding/prototext/doc.go | 7 + .../protobuf/encoding/prototext/encode.go | 380 + .../protobuf/encoding/protowire/wire.go | 547 + .../protobuf/internal/descfmt/stringer.go | 414 + .../protobuf/internal/descopts/options.go | 29 + .../protobuf/internal/detrand/rand.go | 69 + .../internal/editiondefaults/defaults.go | 12 + .../editiondefaults/editions_defaults.binpb | Bin 0 -> 93 bytes .../internal/editionssupport/editions.go | 13 + .../internal/encoding/defval/default.go | 213 + .../protobuf/internal/encoding/json/decode.go | 340 + .../internal/encoding/json/decode_number.go | 254 + .../internal/encoding/json/decode_string.go | 91 + .../internal/encoding/json/decode_token.go | 192 + .../protobuf/internal/encoding/json/encode.go | 278 + .../encoding/messageset/messageset.go | 242 + .../protobuf/internal/encoding/tag/tag.go | 207 + .../protobuf/internal/encoding/text/decode.go | 686 + .../internal/encoding/text/decode_number.go | 211 + .../internal/encoding/text/decode_string.go | 161 + .../internal/encoding/text/decode_token.go | 373 + .../protobuf/internal/encoding/text/doc.go | 29 + .../protobuf/internal/encoding/text/encode.go | 272 + .../protobuf/internal/errors/errors.go | 104 + .../protobuf/internal/errors/is_go112.go | 40 + .../protobuf/internal/errors/is_go113.go | 13 + .../protobuf/internal/filedesc/build.go | 157 + .../protobuf/internal/filedesc/desc.go | 733 + .../protobuf/internal/filedesc/desc_init.go | 558 + .../protobuf/internal/filedesc/desc_lazy.go | 701 + .../protobuf/internal/filedesc/desc_list.go | 457 + .../internal/filedesc/desc_list_gen.go | 367 + .../protobuf/internal/filedesc/editions.go | 156 + .../protobuf/internal/filedesc/placeholder.go | 110 + .../protobuf/internal/filetype/build.go | 296 + .../protobuf/internal/flags/flags.go | 24 + .../internal/flags/proto_legacy_disable.go | 10 + .../internal/flags/proto_legacy_enable.go | 10 + .../protobuf/internal/genid/any_gen.go | 34 + .../protobuf/internal/genid/api_gen.go | 106 + .../protobuf/internal/genid/descriptor_gen.go | 1270 + .../protobuf/internal/genid/doc.go | 11 + .../protobuf/internal/genid/duration_gen.go | 34 + .../protobuf/internal/genid/empty_gen.go | 19 + .../protobuf/internal/genid/field_mask_gen.go | 31 + .../internal/genid/go_features_gen.go | 31 + .../protobuf/internal/genid/goname.go | 25 + .../protobuf/internal/genid/map_entry.go | 16 + .../internal/genid/source_context_gen.go | 31 + .../protobuf/internal/genid/struct_gen.go | 121 + .../protobuf/internal/genid/timestamp_gen.go | 34 + .../protobuf/internal/genid/type_gen.go | 228 + .../protobuf/internal/genid/wrappers.go | 13 + .../protobuf/internal/genid/wrappers_gen.go | 175 + .../protobuf/internal/impl/api_export.go | 177 + .../protobuf/internal/impl/checkinit.go | 141 + .../protobuf/internal/impl/codec_extension.go | 237 + .../protobuf/internal/impl/codec_field.go | 860 + .../protobuf/internal/impl/codec_gen.go | 5724 +++ .../protobuf/internal/impl/codec_map.go | 399 + .../protobuf/internal/impl/codec_map_go111.go | 38 + .../protobuf/internal/impl/codec_map_go112.go | 12 + .../protobuf/internal/impl/codec_message.go | 217 + .../internal/impl/codec_messageset.go | 145 + .../protobuf/internal/impl/codec_reflect.go | 210 + .../protobuf/internal/impl/codec_tables.go | 557 + .../protobuf/internal/impl/codec_unsafe.go | 18 + .../protobuf/internal/impl/convert.go | 495 + .../protobuf/internal/impl/convert_list.go | 141 + .../protobuf/internal/impl/convert_map.go | 121 + .../protobuf/internal/impl/decode.go | 285 + .../protobuf/internal/impl/encode.go | 237 + .../protobuf/internal/impl/enum.go | 21 + .../protobuf/internal/impl/extension.go | 156 + .../protobuf/internal/impl/legacy_enum.go | 219 + .../protobuf/internal/impl/legacy_export.go | 92 + .../internal/impl/legacy_extension.go | 176 + .../protobuf/internal/impl/legacy_file.go | 81 + .../protobuf/internal/impl/legacy_message.go | 572 + .../protobuf/internal/impl/merge.go | 176 + .../protobuf/internal/impl/merge_gen.go | 209 + .../protobuf/internal/impl/message.go | 284 + .../protobuf/internal/impl/message_reflect.go | 462 + .../internal/impl/message_reflect_field.go | 543 + .../internal/impl/message_reflect_gen.go | 271 + .../protobuf/internal/impl/pointer_reflect.go | 215 + .../protobuf/internal/impl/pointer_unsafe.go | 215 + .../protobuf/internal/impl/validate.go | 576 + .../protobuf/internal/impl/weak.go | 74 + .../protobuf/internal/order/order.go | 89 + .../protobuf/internal/order/range.go | 115 + .../protobuf/internal/pragma/pragma.go | 29 + .../protobuf/internal/set/ints.go | 58 + .../protobuf/internal/strs/strings.go | 196 + .../protobuf/internal/strs/strings_pure.go | 28 + .../internal/strs/strings_unsafe_go120.go | 95 + .../internal/strs/strings_unsafe_go121.go | 74 + .../protobuf/internal/version/version.go | 79 + .../protobuf/proto/checkinit.go | 71 + .../protobuf/proto/decode.go | 296 + .../protobuf/proto/decode_gen.go | 603 + .../google.golang.org/protobuf/proto/doc.go | 86 + .../protobuf/proto/encode.go | 354 + .../protobuf/proto/encode_gen.go | 97 + .../google.golang.org/protobuf/proto/equal.go | 57 + .../protobuf/proto/extension.go | 95 + .../google.golang.org/protobuf/proto/merge.go | 139 + .../protobuf/proto/messageset.go | 98 + .../google.golang.org/protobuf/proto/proto.go | 45 + .../protobuf/proto/proto_methods.go | 20 + .../protobuf/proto/proto_reflect.go | 20 + .../google.golang.org/protobuf/proto/reset.go | 43 + .../google.golang.org/protobuf/proto/size.go | 103 + .../protobuf/proto/size_gen.go | 55 + .../protobuf/proto/wrappers.go | 29 + .../protobuf/protoadapt/convert.go | 31 + .../protobuf/reflect/protodesc/desc.go | 286 + .../protobuf/reflect/protodesc/desc_init.go | 285 + .../reflect/protodesc/desc_resolve.go | 291 + .../reflect/protodesc/desc_validate.go | 371 + .../protobuf/reflect/protodesc/editions.go | 145 + .../protobuf/reflect/protodesc/proto.go | 274 + .../protobuf/reflect/protoreflect/methods.go | 78 + .../protobuf/reflect/protoreflect/proto.go | 513 + .../protobuf/reflect/protoreflect/source.go | 129 + .../reflect/protoreflect/source_gen.go | 573 + .../protobuf/reflect/protoreflect/type.go | 672 + .../protobuf/reflect/protoreflect/value.go | 285 + .../reflect/protoreflect/value_equal.go | 168 + .../reflect/protoreflect/value_pure.go | 60 + .../reflect/protoreflect/value_union.go | 438 + .../protoreflect/value_unsafe_go120.go | 99 + .../protoreflect/value_unsafe_go121.go | 87 + .../reflect/protoregistry/registry.go | 882 + .../protobuf/runtime/protoiface/legacy.go | 15 + .../protobuf/runtime/protoiface/methods.go | 168 + .../protobuf/runtime/protoimpl/impl.go | 44 + .../protobuf/runtime/protoimpl/version.go | 60 + .../types/descriptorpb/descriptor.pb.go | 5825 +++ .../types/gofeaturespb/go_features.pb.go | 181 + .../protobuf/types/known/anypb/any.pb.go | 496 + .../types/known/durationpb/duration.pb.go | 374 + .../protobuf/types/known/emptypb/empty.pb.go | 166 + .../types/known/fieldmaskpb/field_mask.pb.go | 588 + .../types/known/timestamppb/timestamp.pb.go | 383 + .../types/known/wrapperspb/wrappers.pb.go | 760 + vendor/gopkg.in/ini.v1/.editorconfig | 12 + vendor/gopkg.in/ini.v1/.gitignore | 7 + vendor/gopkg.in/ini.v1/.golangci.yml | 27 + vendor/gopkg.in/ini.v1/LICENSE | 191 + vendor/gopkg.in/ini.v1/Makefile | 15 + vendor/gopkg.in/ini.v1/README.md | 43 + vendor/gopkg.in/ini.v1/codecov.yml | 16 + vendor/gopkg.in/ini.v1/data_source.go | 76 + vendor/gopkg.in/ini.v1/deprecated.go | 22 + vendor/gopkg.in/ini.v1/error.go | 49 + vendor/gopkg.in/ini.v1/file.go | 541 + vendor/gopkg.in/ini.v1/helper.go | 24 + vendor/gopkg.in/ini.v1/ini.go | 176 + vendor/gopkg.in/ini.v1/key.go | 837 + vendor/gopkg.in/ini.v1/parser.go | 520 + vendor/gopkg.in/ini.v1/section.go | 256 + vendor/gopkg.in/ini.v1/struct.go | 747 + vendor/gopkg.in/yaml.v2/.travis.yml | 16 - vendor/gopkg.in/yaml.v2/encode.go | 390 - vendor/gopkg.in/yaml.v2/writerc.go | 26 - .../LICENSE.libyaml => yaml.v3/LICENSE} | 39 +- vendor/gopkg.in/{yaml.v2 => yaml.v3}/NOTICE | 0 .../gopkg.in/{yaml.v2 => yaml.v3}/README.md | 31 +- vendor/gopkg.in/{yaml.v2 => yaml.v3}/apic.go | 56 +- .../gopkg.in/{yaml.v2 => yaml.v3}/decode.go | 629 +- .../gopkg.in/{yaml.v2 => yaml.v3}/emitterc.go | 413 +- vendor/gopkg.in/yaml.v3/encode.go | 577 + .../gopkg.in/{yaml.v2 => yaml.v3}/parserc.go | 165 +- .../gopkg.in/{yaml.v2 => yaml.v3}/readerc.go | 24 +- .../gopkg.in/{yaml.v2 => yaml.v3}/resolve.go | 138 +- .../gopkg.in/{yaml.v2 => yaml.v3}/scannerc.go | 363 +- .../gopkg.in/{yaml.v2 => yaml.v3}/sorter.go | 25 +- vendor/gopkg.in/yaml.v3/writerc.go | 48 + vendor/gopkg.in/{yaml.v2 => yaml.v3}/yaml.go | 340 +- vendor/gopkg.in/{yaml.v2 => yaml.v3}/yamlh.go | 80 +- .../{yaml.v2 => yaml.v3}/yamlprivateh.go | 37 +- vendor/modules.txt | 686 +- 2269 files changed, 489179 insertions(+), 158153 deletions(-) create mode 100644 vendor/cloud.google.com/go/.gitignore create mode 100644 vendor/cloud.google.com/go/.release-please-manifest-individual.json create mode 100644 vendor/cloud.google.com/go/.release-please-manifest-submodules.json create mode 100644 vendor/cloud.google.com/go/.release-please-manifest.json create mode 100644 vendor/cloud.google.com/go/CHANGES.md rename vendor/{google.golang.org/appengine/CONTRIBUTING.md => cloud.google.com/go/CODE_OF_CONDUCT.md} (53%) create mode 100644 vendor/cloud.google.com/go/CONTRIBUTING.md create mode 100644 vendor/cloud.google.com/go/README.md create mode 100644 vendor/cloud.google.com/go/RELEASING.md create mode 100644 vendor/cloud.google.com/go/SECURITY.md create mode 100644 vendor/cloud.google.com/go/auth/CHANGES.md rename vendor/{google.golang.org/appengine => cloud.google.com/go/auth}/LICENSE (100%) create mode 100644 vendor/cloud.google.com/go/auth/README.md create mode 100644 vendor/cloud.google.com/go/auth/auth.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/compute.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/detect.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/doc.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/filetypes.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/executable_provider.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/file_provider.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/info.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/programmatic_provider.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go create mode 100644 vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go create mode 100644 vendor/cloud.google.com/go/auth/grpctransport/dial_socketopt.go create mode 100644 vendor/cloud.google.com/go/auth/grpctransport/directpath.go create mode 100644 vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go create mode 100644 vendor/cloud.google.com/go/auth/grpctransport/pool.go create mode 100644 vendor/cloud.google.com/go/auth/httptransport/httptransport.go create mode 100644 vendor/cloud.google.com/go/auth/httptransport/trace.go create mode 100644 vendor/cloud.google.com/go/auth/httptransport/transport.go create mode 100644 vendor/cloud.google.com/go/auth/internal/compute/compute.go create mode 100644 vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go create mode 100644 vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go create mode 100644 vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go create mode 100644 vendor/cloud.google.com/go/auth/internal/credsfile/credsfile.go create mode 100644 vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go create mode 100644 vendor/cloud.google.com/go/auth/internal/credsfile/parse.go create mode 100644 vendor/cloud.google.com/go/auth/internal/internal.go create mode 100644 vendor/cloud.google.com/go/auth/internal/jwt/jwt.go create mode 100644 vendor/cloud.google.com/go/auth/internal/transport/cba.go create mode 100644 vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go create mode 100644 vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go create mode 100644 vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go create mode 100644 vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go create mode 100644 vendor/cloud.google.com/go/auth/internal/transport/s2a.go create mode 100644 vendor/cloud.google.com/go/auth/internal/transport/transport.go create mode 100644 vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md rename vendor/{github.com/aws/aws-sdk-go/LICENSE.txt => cloud.google.com/go/auth/oauth2adapt/LICENSE} (100%) create mode 100644 vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go create mode 100644 vendor/cloud.google.com/go/auth/threelegged.go create mode 100644 vendor/cloud.google.com/go/compute/metadata/CHANGES.md rename vendor/{github.com/Azure/azure-sdk-for-go => cloud.google.com/go/compute/metadata}/LICENSE (99%) create mode 100644 vendor/cloud.google.com/go/compute/metadata/README.md create mode 100644 vendor/cloud.google.com/go/compute/metadata/retry.go rename vendor/{google.golang.org/api/option/credentials_notgo19.go => cloud.google.com/go/compute/metadata/retry_linux.go} (60%) create mode 100644 vendor/cloud.google.com/go/compute/metadata/syscheck.go rename vendor/{google.golang.org/api/option/credentials_go19.go => cloud.google.com/go/compute/metadata/syscheck_linux.go} (57%) create mode 100644 vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go create mode 100644 vendor/cloud.google.com/go/debug.md create mode 100644 vendor/cloud.google.com/go/doc.go create mode 100644 vendor/cloud.google.com/go/go.work create mode 100644 vendor/cloud.google.com/go/go.work.sum create mode 100644 vendor/cloud.google.com/go/iam/CHANGES.md create mode 100644 vendor/cloud.google.com/go/iam/LICENSE create mode 100644 vendor/cloud.google.com/go/iam/README.md create mode 100644 vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go create mode 100644 vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go create mode 100644 vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go create mode 100644 vendor/cloud.google.com/go/iam/iam.go create mode 100644 vendor/cloud.google.com/go/internal/.repo-metadata-full.json create mode 100644 vendor/cloud.google.com/go/internal/README.md create mode 100644 vendor/cloud.google.com/go/internal/annotate.go create mode 100644 vendor/cloud.google.com/go/internal/gen_info.sh create mode 100644 vendor/cloud.google.com/go/internal/optional/optional.go create mode 100644 vendor/cloud.google.com/go/internal/retry.go create mode 100644 vendor/cloud.google.com/go/internal/trace/trace.go rename vendor/{google.golang.org/grpc/internal/binarylog/regenerate.sh => cloud.google.com/go/internal/version/update_version.sh} (50%) create mode 100644 vendor/cloud.google.com/go/internal/version/version.go create mode 100644 vendor/cloud.google.com/go/kms/LICENSE create mode 100644 vendor/cloud.google.com/go/kms/apiv1/autokey_admin_client.go create mode 100644 vendor/cloud.google.com/go/kms/apiv1/autokey_client.go create mode 100644 vendor/cloud.google.com/go/kms/apiv1/auxiliary.go create mode 100644 vendor/cloud.google.com/go/kms/apiv1/auxiliary_go123.go create mode 100644 vendor/cloud.google.com/go/kms/apiv1/doc.go create mode 100644 vendor/cloud.google.com/go/kms/apiv1/ekm_client.go create mode 100644 vendor/cloud.google.com/go/kms/apiv1/gapic_metadata.json create mode 100644 vendor/cloud.google.com/go/kms/apiv1/iam.go create mode 100644 vendor/cloud.google.com/go/kms/apiv1/key_management_client.go create mode 100644 vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey.pb.go create mode 100644 vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey_admin.pb.go create mode 100644 vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go create mode 100644 vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go create mode 100644 vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go rename vendor/{google.golang.org/api/transport/http/dial_appengine.go => cloud.google.com/go/kms/apiv1/version.go} (69%) create mode 100644 vendor/cloud.google.com/go/kms/internal/version.go create mode 100644 vendor/cloud.google.com/go/longrunning/CHANGES.md create mode 100644 vendor/cloud.google.com/go/longrunning/LICENSE create mode 100644 vendor/cloud.google.com/go/longrunning/README.md create mode 100644 vendor/cloud.google.com/go/longrunning/autogen/auxiliary.go create mode 100644 vendor/cloud.google.com/go/longrunning/autogen/auxiliary_go123.go create mode 100644 vendor/cloud.google.com/go/longrunning/autogen/doc.go create mode 100644 vendor/cloud.google.com/go/longrunning/autogen/from_conn.go create mode 100644 vendor/cloud.google.com/go/longrunning/autogen/gapic_metadata.json create mode 100644 vendor/cloud.google.com/go/longrunning/autogen/info.go create mode 100644 vendor/cloud.google.com/go/longrunning/autogen/longrunningpb/operations.pb.go create mode 100644 vendor/cloud.google.com/go/longrunning/autogen/operations_client.go create mode 100644 vendor/cloud.google.com/go/longrunning/longrunning.go create mode 100644 vendor/cloud.google.com/go/longrunning/tidyfix.go create mode 100644 vendor/cloud.google.com/go/migration.md create mode 100644 vendor/cloud.google.com/go/release-please-config-individual.json create mode 100644 vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json create mode 100644 vendor/cloud.google.com/go/release-please-config.json create mode 100644 vendor/cloud.google.com/go/storage/CHANGES.md create mode 100644 vendor/cloud.google.com/go/storage/LICENSE create mode 100644 vendor/cloud.google.com/go/storage/README.md create mode 100644 vendor/cloud.google.com/go/storage/acl.go create mode 100644 vendor/cloud.google.com/go/storage/bucket.go create mode 100644 vendor/cloud.google.com/go/storage/client.go create mode 100644 vendor/cloud.google.com/go/storage/copy.go create mode 100644 vendor/cloud.google.com/go/storage/doc.go create mode 100644 vendor/cloud.google.com/go/storage/emulator_test.sh create mode 100644 vendor/cloud.google.com/go/storage/grpc_client.go create mode 100644 vendor/cloud.google.com/go/storage/hmac.go create mode 100644 vendor/cloud.google.com/go/storage/http_client.go create mode 100644 vendor/cloud.google.com/go/storage/iam.go create mode 100644 vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go create mode 100644 vendor/cloud.google.com/go/storage/internal/apiv2/doc.go create mode 100644 vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json create mode 100644 vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go create mode 100644 vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go create mode 100644 vendor/cloud.google.com/go/storage/internal/apiv2/version.go create mode 100644 vendor/cloud.google.com/go/storage/internal/version.go create mode 100644 vendor/cloud.google.com/go/storage/invoke.go create mode 100644 vendor/cloud.google.com/go/storage/notifications.go create mode 100644 vendor/cloud.google.com/go/storage/option.go create mode 100644 vendor/cloud.google.com/go/storage/post_policy_v4.go create mode 100644 vendor/cloud.google.com/go/storage/reader.go create mode 100644 vendor/cloud.google.com/go/storage/storage.go create mode 100644 vendor/cloud.google.com/go/storage/storage.replay create mode 100644 vendor/cloud.google.com/go/storage/writer.go create mode 100644 vendor/cloud.google.com/go/testing.md create mode 100644 vendor/filippo.io/age/.gitattributes create mode 100644 vendor/filippo.io/age/AUTHORS create mode 100644 vendor/filippo.io/age/LICENSE create mode 100644 vendor/filippo.io/age/README.md create mode 100644 vendor/filippo.io/age/age.go create mode 100644 vendor/filippo.io/age/armor/armor.go create mode 100644 vendor/filippo.io/age/internal/bech32/bech32.go create mode 100644 vendor/filippo.io/age/internal/format/format.go create mode 100644 vendor/filippo.io/age/internal/stream/stream.go create mode 100644 vendor/filippo.io/age/parse.go create mode 100644 vendor/filippo.io/age/primitives.go create mode 100644 vendor/filippo.io/age/scrypt.go create mode 100644 vendor/filippo.io/age/x25519.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/NOTICE create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_type.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_trace_namespace.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/runtime.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_other.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_wasm.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/.gitignore create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/LICENSE.txt create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/logging.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-pre.ps1 create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/poller/util.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/CHANGELOG.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/LICENSE.txt create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/README.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/TROUBLESHOOTING.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/assets.json create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/autorest.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/build.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/ci.yml create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/custom_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/models_serde.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/options.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/platform-matrix.json create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/response_types.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/test-resources-post.ps1 create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/test-resources.json create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/time_unix.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/version.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/CHANGELOG.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/LICENSE.txt create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/README.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/challenge_policy.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/ci.keyvault.yml create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/parse.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault/client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault/models.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault/version.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/LICENSE delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/README.md delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/config.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/persist.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/sender.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/token.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/version.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/authorization.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/autorest.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/async.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/azure.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/cli/LICENSE delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/environments.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/rp.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/client.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/LICENSE delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/date.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/time.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/utility.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/error.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/preparer.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/responder.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/sender.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/to/LICENSE delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/to/convert.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/utility.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/validation/LICENSE delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/validation/error.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/validation/validation.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/version.go delete mode 100644 vendor/github.com/Azure/go-autorest/logger/LICENSE delete mode 100644 vendor/github.com/Azure/go-autorest/logger/logger.go delete mode 100644 vendor/github.com/Azure/go-autorest/tracing/LICENSE delete mode 100644 vendor/github.com/Azure/go-autorest/tracing/tracing.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/LICENSE create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache/cache.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/error_design.md create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/design.md create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/mapslice.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/marshal.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/struct.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time/time.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/apptype_string.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authorizetype_string.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/compress.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant/grant.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/endpointtype_string.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/mex_document_definitions.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/saml_assertion_definitions.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/version_string.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_endpoint.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_mex_document.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/wstrust.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options/options.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go rename vendor/{golang.org/x/oauth2 => github.com/ProtonMail/go-crypto}/AUTHORS (74%) rename vendor/{golang.org/x/oauth2 => github.com/ProtonMail/go-crypto}/CONTRIBUTORS (70%) rename vendor/github.com/{aws/aws-sdk-go/internal/sync/singleflight => ProtonMail/go-crypto}/LICENSE (100%) create mode 100644 vendor/github.com/ProtonMail/go-crypto/PATENTS create mode 100644 vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/eax/eax.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go rename vendor/{golang.org/x/crypto => github.com/ProtonMail/go-crypto}/openpgp/armor/armor.go (66%) rename vendor/{golang.org/x/crypto => github.com/ProtonMail/go-crypto}/openpgp/armor/encode.go (61%) rename vendor/{golang.org/x/crypto => github.com/ProtonMail/go-crypto}/openpgp/canonical_text.go (71%) create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/ecdsa/ecdsa.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/ed25519/ed25519.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/ed448/ed448.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/eddsa/eddsa.go rename vendor/{golang.org/x/crypto => github.com/ProtonMail/go-crypto}/openpgp/elgamal/elgamal.go (88%) create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/forwarding.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/hash.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/curve25519.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_amd64.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_amd64.s create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_amd64_noasm.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_arm64.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_arm64.s create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_arm64_noasm.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_generic.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curves.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/generic.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/x448.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/short_byte_string.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go rename vendor/{golang.org/x/crypto => github.com/ProtonMail/go-crypto}/openpgp/packet/compressed.go (72%) create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config_v5.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/forwarding.go rename vendor/{golang.org/x/crypto => github.com/ProtonMail/go-crypto}/openpgp/packet/literal.go (94%) create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/marker.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/notation.go rename vendor/{golang.org/x/crypto => github.com/ProtonMail/go-crypto}/openpgp/packet/ocfb.go (92%) create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go rename vendor/{golang.org/x/crypto => github.com/ProtonMail/go-crypto}/openpgp/packet/opaque.go (90%) create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_sequence.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_unsupported.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/padding.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/recipient.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go rename vendor/{golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go => github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go} (60%) rename vendor/{golang.org/x/crypto => github.com/ProtonMail/go-crypto}/openpgp/packet/userattribute.go (89%) rename vendor/{golang.org/x/crypto => github.com/ProtonMail/go-crypto}/openpgp/packet/userid.go (96%) create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/read.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/symmetric/aead.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/symmetric/hmac.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/write.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/x25519/x25519.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/x448/x448.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/LICENSE.txt create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CancelKeyDeletion.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ConnectCustomKeyStore.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateAlias.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateCustomKeyStore.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateGrant.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateKey.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Decrypt.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteAlias.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteCustomKeyStore.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteImportedKeyMaterial.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeriveSharedSecret.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeCustomKeyStores.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeKey.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKey.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKeyRotation.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisconnectCustomKeyStore.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKey.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKeyRotation.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Encrypt.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKey.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPair.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateMac.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateRandom.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyPolicy.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyRotationStatus.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetParametersForImport.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetPublicKey.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ImportKeyMaterial.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListAliases.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListGrants.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyPolicies.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyRotations.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeys.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListResourceTags.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListRetirableGrants.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_PutKeyPolicy.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReEncrypt.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReplicateKey.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RetireGrant.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RevokeGrant.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RotateKeyOnDemand.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ScheduleKeyDeletion.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Sign.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_TagResource.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UntagResource.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateAlias.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateCustomKeyStore.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateKeyDescription.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdatePrimaryRegion.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Verify.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_VerifyMac.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/auth.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/deserializers.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/endpoints.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/generated.json create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints/endpoints.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/options.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/serializers.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/enums.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/types.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/kms/validators.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/NOTICE.txt delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/client.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/logger.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/config.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/convert_types.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/errors.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/logger.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/validation.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/session.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/types.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/url.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/version.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/host.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/kms/api.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/kms/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/kms/errors.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/kms/kmsiface/interface.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/kms/service.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sso/api.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sso/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sso/errors.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sso/service.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/api.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/errors.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/service.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go create mode 100644 vendor/github.com/blang/semver/.travis.yml create mode 100644 vendor/github.com/blang/semver/LICENSE create mode 100644 vendor/github.com/blang/semver/README.md create mode 100644 vendor/github.com/blang/semver/json.go create mode 100644 vendor/github.com/blang/semver/package.json create mode 100644 vendor/github.com/blang/semver/range.go create mode 100644 vendor/github.com/blang/semver/semver.go create mode 100644 vendor/github.com/blang/semver/sort.go create mode 100644 vendor/github.com/blang/semver/sql.go create mode 100644 vendor/github.com/cenkalti/backoff/v4/.gitignore rename vendor/{google.golang.org/api/googleapi/internal/uritemplates => github.com/cenkalti/backoff/v4}/LICENSE (95%) create mode 100644 vendor/github.com/cenkalti/backoff/v4/README.md create mode 100644 vendor/github.com/cenkalti/backoff/v4/backoff.go create mode 100644 vendor/github.com/cenkalti/backoff/v4/context.go create mode 100644 vendor/github.com/cenkalti/backoff/v4/exponential.go create mode 100644 vendor/github.com/cenkalti/backoff/v4/retry.go create mode 100644 vendor/github.com/cenkalti/backoff/v4/ticker.go create mode 100644 vendor/github.com/cenkalti/backoff/v4/timer.go create mode 100644 vendor/github.com/cenkalti/backoff/v4/tries.go create mode 100644 vendor/github.com/cloudflare/circl/LICENSE create mode 100644 vendor/github.com/cloudflare/circl/dh/x25519/curve.go create mode 100644 vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.go create mode 100644 vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.h create mode 100644 vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s create mode 100644 vendor/github.com/cloudflare/circl/dh/x25519/curve_generic.go create mode 100644 vendor/github.com/cloudflare/circl/dh/x25519/curve_noasm.go create mode 100644 vendor/github.com/cloudflare/circl/dh/x25519/doc.go create mode 100644 vendor/github.com/cloudflare/circl/dh/x25519/key.go create mode 100644 vendor/github.com/cloudflare/circl/dh/x25519/table.go create mode 100644 vendor/github.com/cloudflare/circl/dh/x448/curve.go create mode 100644 vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.go create mode 100644 vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.h create mode 100644 vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s create mode 100644 vendor/github.com/cloudflare/circl/dh/x448/curve_generic.go create mode 100644 vendor/github.com/cloudflare/circl/dh/x448/curve_noasm.go create mode 100644 vendor/github.com/cloudflare/circl/dh/x448/doc.go create mode 100644 vendor/github.com/cloudflare/circl/dh/x448/key.go create mode 100644 vendor/github.com/cloudflare/circl/dh/x448/table.go create mode 100644 vendor/github.com/cloudflare/circl/ecc/goldilocks/constants.go create mode 100644 vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go create mode 100644 vendor/github.com/cloudflare/circl/ecc/goldilocks/isogeny.go create mode 100644 vendor/github.com/cloudflare/circl/ecc/goldilocks/point.go create mode 100644 vendor/github.com/cloudflare/circl/ecc/goldilocks/scalar.go create mode 100644 vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go create mode 100644 vendor/github.com/cloudflare/circl/ecc/goldilocks/twistPoint.go create mode 100644 vendor/github.com/cloudflare/circl/ecc/goldilocks/twistTables.go create mode 100644 vendor/github.com/cloudflare/circl/ecc/goldilocks/twist_basemult.go create mode 100644 vendor/github.com/cloudflare/circl/internal/conv/conv.go create mode 100644 vendor/github.com/cloudflare/circl/internal/sha3/doc.go create mode 100644 vendor/github.com/cloudflare/circl/internal/sha3/hashes.go create mode 100644 vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go create mode 100644 vendor/github.com/cloudflare/circl/internal/sha3/rc.go create mode 100644 vendor/github.com/cloudflare/circl/internal/sha3/sha3.go create mode 100644 vendor/github.com/cloudflare/circl/internal/sha3/sha3_s390x.s create mode 100644 vendor/github.com/cloudflare/circl/internal/sha3/shake.go create mode 100644 vendor/github.com/cloudflare/circl/internal/sha3/xor.go create mode 100644 vendor/github.com/cloudflare/circl/internal/sha3/xor_generic.go create mode 100644 vendor/github.com/cloudflare/circl/internal/sha3/xor_unaligned.go create mode 100644 vendor/github.com/cloudflare/circl/math/fp25519/fp.go create mode 100644 vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.go create mode 100644 vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.h create mode 100644 vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.s create mode 100644 vendor/github.com/cloudflare/circl/math/fp25519/fp_generic.go create mode 100644 vendor/github.com/cloudflare/circl/math/fp25519/fp_noasm.go create mode 100644 vendor/github.com/cloudflare/circl/math/fp448/fp.go create mode 100644 vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.go create mode 100644 vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.h create mode 100644 vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s create mode 100644 vendor/github.com/cloudflare/circl/math/fp448/fp_generic.go create mode 100644 vendor/github.com/cloudflare/circl/math/fp448/fp_noasm.go create mode 100644 vendor/github.com/cloudflare/circl/math/fp448/fuzzer.go create mode 100644 vendor/github.com/cloudflare/circl/math/mlsbset/mlsbset.go create mode 100644 vendor/github.com/cloudflare/circl/math/mlsbset/power.go create mode 100644 vendor/github.com/cloudflare/circl/math/primes.go create mode 100644 vendor/github.com/cloudflare/circl/math/wnaf.go create mode 100644 vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go create mode 100644 vendor/github.com/cloudflare/circl/sign/ed25519/modular.go create mode 100644 vendor/github.com/cloudflare/circl/sign/ed25519/mult.go create mode 100644 vendor/github.com/cloudflare/circl/sign/ed25519/point.go create mode 100644 vendor/github.com/cloudflare/circl/sign/ed25519/pubkey.go create mode 100644 vendor/github.com/cloudflare/circl/sign/ed25519/pubkey112.go create mode 100644 vendor/github.com/cloudflare/circl/sign/ed25519/signapi.go create mode 100644 vendor/github.com/cloudflare/circl/sign/ed25519/tables.go create mode 100644 vendor/github.com/cloudflare/circl/sign/ed448/ed448.go create mode 100644 vendor/github.com/cloudflare/circl/sign/ed448/signapi.go create mode 100644 vendor/github.com/cloudflare/circl/sign/sign.go create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/md2man/debug.go create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/.travis.yml delete mode 100644 vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md delete mode 100644 vendor/github.com/dgrijalva/jwt-go/README.md delete mode 100644 vendor/github.com/dgrijalva/jwt-go/claims.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/errors.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/map_claims.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/parser.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/signing_method.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/token.go delete mode 100644 vendor/github.com/dimchansky/utfbom/.gitignore delete mode 100644 vendor/github.com/dimchansky/utfbom/.travis.yml delete mode 100644 vendor/github.com/dimchansky/utfbom/README.md delete mode 100644 vendor/github.com/dimchansky/utfbom/utfbom.go delete mode 100644 vendor/github.com/fatih/color/.travis.yml delete mode 100644 vendor/github.com/fatih/color/Gopkg.lock delete mode 100644 vendor/github.com/fatih/color/Gopkg.toml create mode 100644 vendor/github.com/fatih/color/color_windows.go create mode 100644 vendor/github.com/felixge/httpsnoop/.gitignore create mode 100644 vendor/github.com/felixge/httpsnoop/LICENSE.txt create mode 100644 vendor/github.com/felixge/httpsnoop/Makefile create mode 100644 vendor/github.com/felixge/httpsnoop/README.md create mode 100644 vendor/github.com/felixge/httpsnoop/capture_metrics.go create mode 100644 vendor/github.com/felixge/httpsnoop/docs.go create mode 100644 vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go create mode 100644 vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go rename vendor/{gopkg.in/yaml.v2 => github.com/getsops/gopgagent}/LICENSE (99%) rename vendor/{go.mozilla.org => github.com/getsops}/gopgagent/gpgagent.go (100%) create mode 100644 vendor/github.com/getsops/sops/v3/.gitignore create mode 100644 vendor/github.com/getsops/sops/v3/.goreleaser.yaml create mode 100644 vendor/github.com/getsops/sops/v3/.sops.yaml create mode 100644 vendor/github.com/getsops/sops/v3/CHANGELOG.rst create mode 100644 vendor/github.com/getsops/sops/v3/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/getsops/sops/v3/CONTRIBUTING.md create mode 100644 vendor/github.com/getsops/sops/v3/DCO rename vendor/{go.mozilla.org/sops => github.com/getsops/sops/v3}/LICENSE (100%) create mode 100644 vendor/github.com/getsops/sops/v3/Makefile create mode 100644 vendor/github.com/getsops/sops/v3/README.rst rename vendor/{go.mozilla.org/sops => github.com/getsops/sops/v3}/aes/cipher.go (96%) create mode 100644 vendor/github.com/getsops/sops/v3/age/keysource.go rename vendor/{go.mozilla.org/sops => github.com/getsops/sops/v3}/audit/audit.go (97%) rename vendor/{go.mozilla.org/sops => github.com/getsops/sops/v3}/audit/schema.sql (100%) create mode 100644 vendor/github.com/getsops/sops/v3/azkv/keysource.go create mode 100644 vendor/github.com/getsops/sops/v3/cmd/sops/codes/codes.go create mode 100644 vendor/github.com/getsops/sops/v3/cmd/sops/common/common.go create mode 100644 vendor/github.com/getsops/sops/v3/cmd/sops/formats/formats.go create mode 100644 vendor/github.com/getsops/sops/v3/config/config.go rename vendor/{go.mozilla.org/sops => github.com/getsops/sops/v3}/decrypt/decrypt.go (59%) create mode 100644 vendor/github.com/getsops/sops/v3/example.ini create mode 100644 vendor/github.com/getsops/sops/v3/example.json create mode 100644 vendor/github.com/getsops/sops/v3/example.txt create mode 100644 vendor/github.com/getsops/sops/v3/example.yaml create mode 100644 vendor/github.com/getsops/sops/v3/gcpkms/keysource.go create mode 100644 vendor/github.com/getsops/sops/v3/hcvault/keysource.go rename vendor/{go.mozilla.org/sops => github.com/getsops/sops/v3}/keys/keys.go (93%) rename vendor/{go.mozilla.org/sops => github.com/getsops/sops/v3}/keyservice/client.go (77%) rename vendor/{go.mozilla.org/sops => github.com/getsops/sops/v3}/keyservice/keyservice.go (68%) create mode 100644 vendor/github.com/getsops/sops/v3/keyservice/keyservice.pb.go rename vendor/{go.mozilla.org/sops => github.com/getsops/sops/v3}/keyservice/keyservice.proto (79%) rename vendor/{go.mozilla.org/sops => github.com/getsops/sops/v3}/keyservice/server.go (70%) create mode 100644 vendor/github.com/getsops/sops/v3/kms/keysource.go rename vendor/{go.mozilla.org/sops => github.com/getsops/sops/v3}/logging/logging.go (100%) create mode 100644 vendor/github.com/getsops/sops/v3/pgp/keysource.go rename vendor/{go.mozilla.org/sops => github.com/getsops/sops/v3}/pgp/sops_functional_tests_key.asc (100%) create mode 100644 vendor/github.com/getsops/sops/v3/publish/gcs.go create mode 100644 vendor/github.com/getsops/sops/v3/publish/publish.go create mode 100644 vendor/github.com/getsops/sops/v3/publish/s3.go create mode 100644 vendor/github.com/getsops/sops/v3/publish/vault.go create mode 100644 vendor/github.com/getsops/sops/v3/rust-toolchain.toml rename vendor/{go.mozilla.org/sops => github.com/getsops/sops/v3}/shamir/LICENSE (100%) rename vendor/{go.mozilla.org/sops => github.com/getsops/sops/v3}/shamir/README.md (99%) rename vendor/{go.mozilla.org/sops => github.com/getsops/sops/v3}/shamir/shamir.go (99%) rename vendor/{go.mozilla.org/sops => github.com/getsops/sops/v3}/shamir/tables.go (100%) rename vendor/{go.mozilla.org/sops => github.com/getsops/sops/v3}/sops.go (61%) rename vendor/{go.mozilla.org/sops => github.com/getsops/sops/v3}/stores/dotenv/store.go (71%) rename vendor/{go.mozilla.org/sops => github.com/getsops/sops/v3}/stores/flatten.go (66%) create mode 100644 vendor/github.com/getsops/sops/v3/stores/ini/store.go rename vendor/{go.mozilla.org/sops => github.com/getsops/sops/v3}/stores/json/store.go (78%) rename vendor/{go.mozilla.org/sops => github.com/getsops/sops/v3}/stores/stores.go (72%) create mode 100644 vendor/github.com/getsops/sops/v3/stores/yaml/store.go rename vendor/{go.mozilla.org/sops => github.com/getsops/sops/v3}/usererrors.go (96%) create mode 100644 vendor/github.com/getsops/sops/v3/version/version.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/.gitignore create mode 100644 vendor/github.com/go-jose/go-jose/v4/.golangci.yml create mode 100644 vendor/github.com/go-jose/go-jose/v4/.travis.yml create mode 100644 vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md create mode 100644 vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md create mode 100644 vendor/github.com/go-jose/go-jose/v4/LICENSE create mode 100644 vendor/github.com/go-jose/go-jose/v4/README.md create mode 100644 vendor/github.com/go-jose/go-jose/v4/SECURITY.md create mode 100644 vendor/github.com/go-jose/go-jose/v4/asymmetric.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/cipher/cbc_hmac.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/cipher/concat_kdf.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/cipher/ecdh_es.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/cipher/key_wrap.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/crypter.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/doc.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/encoding.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/json/LICENSE create mode 100644 vendor/github.com/go-jose/go-jose/v4/json/README.md create mode 100644 vendor/github.com/go-jose/go-jose/v4/json/decode.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/json/encode.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/json/indent.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/json/scanner.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/json/stream.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/json/tags.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/jwe.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/jwk.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/jws.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/jwt/builder.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/jwt/claims.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/jwt/doc.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/jwt/errors.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/jwt/jwt.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/jwt/validation.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/opaque.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/shared.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/signing.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/symmetric.go create mode 100644 vendor/github.com/go-logr/logr/.golangci.yaml create mode 100644 vendor/github.com/go-logr/logr/CHANGELOG.md create mode 100644 vendor/github.com/go-logr/logr/CONTRIBUTING.md rename vendor/github.com/{dimchansky/utfbom => go-logr/logr}/LICENSE (100%) create mode 100644 vendor/github.com/go-logr/logr/README.md create mode 100644 vendor/github.com/go-logr/logr/SECURITY.md create mode 100644 vendor/github.com/go-logr/logr/context.go create mode 100644 vendor/github.com/go-logr/logr/context_noslog.go create mode 100644 vendor/github.com/go-logr/logr/context_slog.go create mode 100644 vendor/github.com/go-logr/logr/discard.go create mode 100644 vendor/github.com/go-logr/logr/funcr/funcr.go create mode 100644 vendor/github.com/go-logr/logr/funcr/slogsink.go create mode 100644 vendor/github.com/go-logr/logr/logr.go create mode 100644 vendor/github.com/go-logr/logr/sloghandler.go create mode 100644 vendor/github.com/go-logr/logr/slogr.go create mode 100644 vendor/github.com/go-logr/logr/slogsink.go rename vendor/github.com/{Azure/go-autorest/autorest/adal => go-logr/stdr}/LICENSE (94%) create mode 100644 vendor/github.com/go-logr/stdr/README.md create mode 100644 vendor/github.com/go-logr/stdr/stdr.go rename vendor/github.com/{dgrijalva/jwt-go => golang-jwt/jwt/v5}/.gitignore (68%) rename vendor/github.com/{dgrijalva/jwt-go => golang-jwt/jwt/v5}/LICENSE (96%) create mode 100644 vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md create mode 100644 vendor/github.com/golang-jwt/jwt/v5/README.md create mode 100644 vendor/github.com/golang-jwt/jwt/v5/SECURITY.md rename vendor/github.com/{dgrijalva/jwt-go => golang-jwt/jwt/v5}/VERSION_HISTORY.md (81%) create mode 100644 vendor/github.com/golang-jwt/jwt/v5/claims.go rename vendor/github.com/{dgrijalva/jwt-go => golang-jwt/jwt/v5}/doc.go (100%) rename vendor/github.com/{dgrijalva/jwt-go => golang-jwt/jwt/v5}/ecdsa.go (68%) rename vendor/github.com/{dgrijalva/jwt-go => golang-jwt/jwt/v5}/ecdsa_utils.go (75%) create mode 100644 vendor/github.com/golang-jwt/jwt/v5/ed25519.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/errors.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/errors_go1_20.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go rename vendor/github.com/{dgrijalva/jwt-go => golang-jwt/jwt/v5}/hmac.go (53%) create mode 100644 vendor/github.com/golang-jwt/jwt/v5/map_claims.go rename vendor/github.com/{dgrijalva/jwt-go => golang-jwt/jwt/v5}/none.go (68%) create mode 100644 vendor/github.com/golang-jwt/jwt/v5/parser.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/parser_option.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/registered_claims.go rename vendor/github.com/{dgrijalva/jwt-go => golang-jwt/jwt/v5}/rsa.go (77%) rename vendor/github.com/{dgrijalva/jwt-go => golang-jwt/jwt/v5}/rsa_pss.go (54%) rename vendor/github.com/{dgrijalva/jwt-go => golang-jwt/jwt/v5}/rsa_utils.go (68%) create mode 100644 vendor/github.com/golang-jwt/jwt/v5/signing_method.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/staticcheck.conf create mode 100644 vendor/github.com/golang-jwt/jwt/v5/token.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/token_option.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/types.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/validator.go delete mode 100644 vendor/github.com/golang/glog/README create mode 100644 vendor/github.com/golang/glog/README.md create mode 100644 vendor/github.com/golang/glog/glog_file_linux.go create mode 100644 vendor/github.com/golang/glog/glog_file_nonwindows.go create mode 100644 vendor/github.com/golang/glog/glog_file_other.go create mode 100644 vendor/github.com/golang/glog/glog_file_posix.go create mode 100644 vendor/github.com/golang/glog/glog_file_windows.go create mode 100644 vendor/github.com/golang/glog/glog_flags.go create mode 100644 vendor/github.com/golang/glog/internal/logsink/logsink.go create mode 100644 vendor/github.com/golang/glog/internal/logsink/logsink_fatal.go create mode 100644 vendor/github.com/golang/glog/internal/stackdump/stackdump.go create mode 100644 vendor/github.com/golang/protobuf/proto/buffer.go delete mode 100644 vendor/github.com/golang/protobuf/proto/clone.go delete mode 100644 vendor/github.com/golang/protobuf/proto/decode.go create mode 100644 vendor/github.com/golang/protobuf/proto/defaults.go delete mode 100644 vendor/github.com/golang/protobuf/proto/encode.go delete mode 100644 vendor/github.com/golang/protobuf/proto/equal.go delete mode 100644 vendor/github.com/golang/protobuf/proto/lib.go delete mode 100644 vendor/github.com/golang/protobuf/proto/message_set.go delete mode 100644 vendor/github.com/golang/protobuf/proto/pointer_reflect.go delete mode 100644 vendor/github.com/golang/protobuf/proto/pointer_unsafe.go create mode 100644 vendor/github.com/golang/protobuf/proto/proto.go create mode 100644 vendor/github.com/golang/protobuf/proto/registry.go delete mode 100644 vendor/github.com/golang/protobuf/proto/table_marshal.go delete mode 100644 vendor/github.com/golang/protobuf/proto/table_merge.go delete mode 100644 vendor/github.com/golang/protobuf/proto/table_unmarshal.go delete mode 100644 vendor/github.com/golang/protobuf/proto/text.go create mode 100644 vendor/github.com/golang/protobuf/proto/text_decode.go create mode 100644 vendor/github.com/golang/protobuf/proto/text_encode.go delete mode 100644 vendor/github.com/golang/protobuf/proto/text_parser.go create mode 100644 vendor/github.com/golang/protobuf/proto/wire.go create mode 100644 vendor/github.com/golang/protobuf/proto/wrappers.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.proto delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.proto delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto create mode 100644 vendor/github.com/google/go-cmp/LICENSE create mode 100644 vendor/github.com/google/go-cmp/cmp/compare.go create mode 100644 vendor/github.com/google/go-cmp/cmp/export.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/function/func.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/name.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/sort.go create mode 100644 vendor/github.com/google/go-cmp/cmp/options.go create mode 100644 vendor/github.com/google/go-cmp/cmp/path.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_compare.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_references.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_reflect.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_slices.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_text.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_value.go create mode 100644 vendor/github.com/google/s2a-go/.gitignore create mode 100644 vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/google/s2a-go/CONTRIBUTING.md create mode 100644 vendor/github.com/google/s2a-go/LICENSE.md create mode 100644 vendor/github.com/google/s2a-go/README.md create mode 100644 vendor/github.com/google/s2a-go/fallback/s2a_fallback.go create mode 100644 vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go create mode 100644 vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go create mode 100644 vendor/github.com/google/s2a-go/internal/handshaker/service/service.go create mode 100644 vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go create mode 100644 vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go create mode 100644 vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go create mode 100644 vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go create mode 100644 vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go create mode 100644 vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go create mode 100644 vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go create mode 100644 vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go create mode 100644 vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go create mode 100644 vendor/github.com/google/s2a-go/internal/record/record.go create mode 100644 vendor/github.com/google/s2a-go/internal/record/ticketsender.go create mode 100644 vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go create mode 100644 vendor/github.com/google/s2a-go/internal/v2/README.md create mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go create mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/remotesigner.go create mode 100644 vendor/github.com/google/s2a-go/internal/v2/s2av2.go create mode 100644 vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go create mode 100644 vendor/github.com/google/s2a-go/retry/retry.go create mode 100644 vendor/github.com/google/s2a-go/s2a.go create mode 100644 vendor/github.com/google/s2a-go/s2a_options.go create mode 100644 vendor/github.com/google/s2a-go/s2a_utils.go create mode 100644 vendor/github.com/google/s2a-go/stream/s2a_stream.go create mode 100644 vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE create mode 100644 vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go create mode 100644 vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json create mode 100644 vendor/github.com/googleapis/gax-go/v2/CHANGES.md create mode 100644 vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md create mode 100644 vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto create mode 100644 vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto create mode 100644 vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/content_type.go rename vendor/github.com/{golang/protobuf/ptypes/doc.go => googleapis/gax-go/v2/internal/version.go} (85%) create mode 100644 vendor/github.com/googleapis/gax-go/v2/iterator/iterator.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/release-please-config.json create mode 100644 vendor/github.com/hashicorp/errwrap/LICENSE create mode 100644 vendor/github.com/hashicorp/errwrap/README.md create mode 100644 vendor/github.com/hashicorp/errwrap/errwrap.go create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/LICENSE create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/README.md create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/doc.go create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/handlers.go create mode 100644 vendor/github.com/hashicorp/go-multierror/LICENSE create mode 100644 vendor/github.com/hashicorp/go-multierror/Makefile create mode 100644 vendor/github.com/hashicorp/go-multierror/README.md create mode 100644 vendor/github.com/hashicorp/go-multierror/append.go create mode 100644 vendor/github.com/hashicorp/go-multierror/flatten.go create mode 100644 vendor/github.com/hashicorp/go-multierror/format.go create mode 100644 vendor/github.com/hashicorp/go-multierror/group.go create mode 100644 vendor/github.com/hashicorp/go-multierror/multierror.go create mode 100644 vendor/github.com/hashicorp/go-multierror/prefix.go create mode 100644 vendor/github.com/hashicorp/go-multierror/sort.go create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/.gitignore create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/.go-version create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/CHANGELOG.md create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/CODEOWNERS create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/LICENSE create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/Makefile create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/README.md create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/cert_error_go119.go create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/cert_error_go120.go create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/client.go create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go create mode 100644 vendor/github.com/hashicorp/go-rootcerts/.travis.yml create mode 100644 vendor/github.com/hashicorp/go-rootcerts/LICENSE create mode 100644 vendor/github.com/hashicorp/go-rootcerts/Makefile create mode 100644 vendor/github.com/hashicorp/go-rootcerts/README.md create mode 100644 vendor/github.com/hashicorp/go-rootcerts/doc.go create mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts.go create mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go create mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go create mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/parseutil/LICENSE create mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parsepath.go create mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parseutil.go create mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/strutil/LICENSE create mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/strutil/strutil.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/.gitignore create mode 100644 vendor/github.com/hashicorp/go-sockaddr/GNUmakefile create mode 100644 vendor/github.com/hashicorp/go-sockaddr/LICENSE create mode 100644 vendor/github.com/hashicorp/go-sockaddr/README.md create mode 100644 vendor/github.com/hashicorp/go-sockaddr/doc.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ifaddr.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ifattr.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ipaddr.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ipaddrs.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ipv4addr.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ipv6addr.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/rfc.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/route_info.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/route_info_aix.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/route_info_android.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/route_info_bsd.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/route_info_default.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/route_info_linux.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/route_info_solaris.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/route_info_test_windows.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/route_info_windows.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/sockaddr.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/sockaddrs.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/unixsock.go create mode 100644 vendor/github.com/hashicorp/hcl/.gitignore create mode 100644 vendor/github.com/hashicorp/hcl/.travis.yml create mode 100644 vendor/github.com/hashicorp/hcl/LICENSE create mode 100644 vendor/github.com/hashicorp/hcl/Makefile create mode 100644 vendor/github.com/hashicorp/hcl/README.md create mode 100644 vendor/github.com/hashicorp/hcl/appveyor.yml create mode 100644 vendor/github.com/hashicorp/hcl/decoder.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/ast.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/walk.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/error.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/parser.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/position.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/token.go create mode 100644 vendor/github.com/hashicorp/hcl/json/parser/flatten.go create mode 100644 vendor/github.com/hashicorp/hcl/json/parser/parser.go create mode 100644 vendor/github.com/hashicorp/hcl/json/scanner/scanner.go create mode 100644 vendor/github.com/hashicorp/hcl/json/token/position.go create mode 100644 vendor/github.com/hashicorp/hcl/json/token/token.go create mode 100644 vendor/github.com/hashicorp/hcl/lex.go create mode 100644 vendor/github.com/hashicorp/hcl/parse.go create mode 100644 vendor/github.com/hashicorp/vault/api/.copywrite.hcl create mode 100644 vendor/github.com/hashicorp/vault/api/LICENSE create mode 100644 vendor/github.com/hashicorp/vault/api/README.md create mode 100644 vendor/github.com/hashicorp/vault/api/auth.go create mode 100644 vendor/github.com/hashicorp/vault/api/auth_token.go create mode 100644 vendor/github.com/hashicorp/vault/api/client.go create mode 100644 vendor/github.com/hashicorp/vault/api/help.go create mode 100644 vendor/github.com/hashicorp/vault/api/kv.go create mode 100644 vendor/github.com/hashicorp/vault/api/kv_v1.go create mode 100644 vendor/github.com/hashicorp/vault/api/kv_v2.go create mode 100644 vendor/github.com/hashicorp/vault/api/lifetime_watcher.go create mode 100644 vendor/github.com/hashicorp/vault/api/logical.go create mode 100644 vendor/github.com/hashicorp/vault/api/output_policy.go create mode 100644 vendor/github.com/hashicorp/vault/api/output_string.go create mode 100644 vendor/github.com/hashicorp/vault/api/plugin_helpers.go create mode 100644 vendor/github.com/hashicorp/vault/api/plugin_runtime_types.go create mode 100644 vendor/github.com/hashicorp/vault/api/plugin_types.go create mode 100644 vendor/github.com/hashicorp/vault/api/pluginruntimetype_enumer.go create mode 100644 vendor/github.com/hashicorp/vault/api/renewbehavior_enumer.go create mode 100644 vendor/github.com/hashicorp/vault/api/replication_status.go create mode 100644 vendor/github.com/hashicorp/vault/api/request.go create mode 100644 vendor/github.com/hashicorp/vault/api/response.go create mode 100644 vendor/github.com/hashicorp/vault/api/secret.go create mode 100644 vendor/github.com/hashicorp/vault/api/ssh.go create mode 100644 vendor/github.com/hashicorp/vault/api/ssh_agent.go create mode 100644 vendor/github.com/hashicorp/vault/api/sudo_paths.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_audit.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_auth.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_capabilities.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_config_cors.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_generate_root.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_hastatus.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_health.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_init.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_leader.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_leases.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_mfa.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_monitor.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_mounts.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_plugins.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_plugins_runtimes.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_policy.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_raft.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_rekey.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_rotate.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_seal.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_stepdown.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_ui_custom_message.go delete mode 100644 vendor/github.com/howeyc/gopass/.travis.yml delete mode 100644 vendor/github.com/howeyc/gopass/LICENSE.txt delete mode 100644 vendor/github.com/howeyc/gopass/OPENSOLARIS.LICENSE delete mode 100644 vendor/github.com/howeyc/gopass/README.md delete mode 100644 vendor/github.com/howeyc/gopass/pass.go delete mode 100644 vendor/github.com/howeyc/gopass/terminal.go delete mode 100644 vendor/github.com/howeyc/gopass/terminal_solaris.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/.gitignore delete mode 100644 vendor/github.com/jmespath/go-jmespath/.travis.yml delete mode 100644 vendor/github.com/jmespath/go-jmespath/Makefile delete mode 100644 vendor/github.com/jmespath/go-jmespath/README.md delete mode 100644 vendor/github.com/jmespath/go-jmespath/api.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/astnodetype_string.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/functions.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/interpreter.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/lexer.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/parser.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/toktype_string.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/util.go create mode 100644 vendor/github.com/kylelemons/godebug/LICENSE create mode 100644 vendor/github.com/kylelemons/godebug/diff/diff.go create mode 100644 vendor/github.com/kylelemons/godebug/pretty/.gitignore create mode 100644 vendor/github.com/kylelemons/godebug/pretty/doc.go create mode 100644 vendor/github.com/kylelemons/godebug/pretty/public.go create mode 100644 vendor/github.com/kylelemons/godebug/pretty/reflect.go create mode 100644 vendor/github.com/kylelemons/godebug/pretty/structure.go delete mode 100644 vendor/github.com/mattn/go-colorable/.travis.yml rename vendor/{go.mozilla.org/sops/test.sh => github.com/mattn/go-colorable/go.test.sh} (75%) delete mode 100644 vendor/github.com/mattn/go-isatty/.travis.yml create mode 100644 vendor/github.com/mattn/go-isatty/go.test.sh delete mode 100644 vendor/github.com/mattn/go-isatty/isatty_android.go create mode 100644 vendor/github.com/mitchellh/mapstructure/CHANGELOG.md create mode 100644 vendor/github.com/mitchellh/mapstructure/LICENSE create mode 100644 vendor/github.com/mitchellh/mapstructure/README.md create mode 100644 vendor/github.com/mitchellh/mapstructure/decode_hooks.go create mode 100644 vendor/github.com/mitchellh/mapstructure/error.go create mode 100644 vendor/github.com/mitchellh/mapstructure/mapstructure.go create mode 100644 vendor/github.com/pkg/browser/LICENSE create mode 100644 vendor/github.com/pkg/browser/README.md create mode 100644 vendor/github.com/pkg/browser/browser.go create mode 100644 vendor/github.com/pkg/browser/browser_darwin.go create mode 100644 vendor/github.com/pkg/browser/browser_freebsd.go create mode 100644 vendor/github.com/pkg/browser/browser_linux.go create mode 100644 vendor/github.com/pkg/browser/browser_netbsd.go create mode 100644 vendor/github.com/pkg/browser/browser_openbsd.go create mode 100644 vendor/github.com/pkg/browser/browser_unsupported.go create mode 100644 vendor/github.com/pkg/browser/browser_windows.go create mode 100644 vendor/github.com/russross/blackfriday/v2/.gitignore create mode 100644 vendor/github.com/russross/blackfriday/v2/.travis.yml create mode 100644 vendor/github.com/russross/blackfriday/v2/LICENSE.txt create mode 100644 vendor/github.com/russross/blackfriday/v2/README.md create mode 100644 vendor/github.com/russross/blackfriday/v2/block.go create mode 100644 vendor/github.com/russross/blackfriday/v2/doc.go create mode 100644 vendor/github.com/russross/blackfriday/v2/entities.go create mode 100644 vendor/github.com/russross/blackfriday/v2/esc.go create mode 100644 vendor/github.com/russross/blackfriday/v2/html.go create mode 100644 vendor/github.com/russross/blackfriday/v2/inline.go create mode 100644 vendor/github.com/russross/blackfriday/v2/markdown.go create mode 100644 vendor/github.com/russross/blackfriday/v2/node.go create mode 100644 vendor/github.com/russross/blackfriday/v2/smartypants.go create mode 100644 vendor/github.com/ryanuber/go-glob/.travis.yml create mode 100644 vendor/github.com/ryanuber/go-glob/LICENSE create mode 100644 vendor/github.com/ryanuber/go-glob/README.md create mode 100644 vendor/github.com/ryanuber/go-glob/glob.go create mode 100644 vendor/github.com/urfave/cli/.flake8 create mode 100644 vendor/github.com/urfave/cli/.gitignore create mode 100644 vendor/github.com/urfave/cli/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/urfave/cli/LICENSE create mode 100644 vendor/github.com/urfave/cli/README.md create mode 100644 vendor/github.com/urfave/cli/app.go create mode 100644 vendor/github.com/urfave/cli/category.go create mode 100644 vendor/github.com/urfave/cli/cli.go create mode 100644 vendor/github.com/urfave/cli/command.go create mode 100644 vendor/github.com/urfave/cli/context.go create mode 100644 vendor/github.com/urfave/cli/docs.go create mode 100644 vendor/github.com/urfave/cli/errors.go create mode 100644 vendor/github.com/urfave/cli/fish.go create mode 100644 vendor/github.com/urfave/cli/flag.go create mode 100644 vendor/github.com/urfave/cli/flag_bool.go create mode 100644 vendor/github.com/urfave/cli/flag_bool_t.go create mode 100644 vendor/github.com/urfave/cli/flag_duration.go create mode 100644 vendor/github.com/urfave/cli/flag_float64.go create mode 100644 vendor/github.com/urfave/cli/flag_generic.go create mode 100644 vendor/github.com/urfave/cli/flag_int.go create mode 100644 vendor/github.com/urfave/cli/flag_int64.go create mode 100644 vendor/github.com/urfave/cli/flag_int64_slice.go create mode 100644 vendor/github.com/urfave/cli/flag_int_slice.go create mode 100644 vendor/github.com/urfave/cli/flag_string.go create mode 100644 vendor/github.com/urfave/cli/flag_string_slice.go create mode 100644 vendor/github.com/urfave/cli/flag_uint.go create mode 100644 vendor/github.com/urfave/cli/flag_uint64.go create mode 100644 vendor/github.com/urfave/cli/funcs.go create mode 100644 vendor/github.com/urfave/cli/help.go create mode 100644 vendor/github.com/urfave/cli/parse.go create mode 100644 vendor/github.com/urfave/cli/sort.go create mode 100644 vendor/github.com/urfave/cli/template.go delete mode 100644 vendor/go.mozilla.org/sops/.gitignore delete mode 100644 vendor/go.mozilla.org/sops/.sops.yaml delete mode 100644 vendor/go.mozilla.org/sops/.travis.yml delete mode 100644 vendor/go.mozilla.org/sops/CHANGELOG.rst delete mode 100644 vendor/go.mozilla.org/sops/CODE_OF_CONDUCT.md delete mode 100644 vendor/go.mozilla.org/sops/CONTRIBUTING.md delete mode 100644 vendor/go.mozilla.org/sops/Dockerfile delete mode 100644 vendor/go.mozilla.org/sops/Makefile delete mode 100644 vendor/go.mozilla.org/sops/README.rst delete mode 100644 vendor/go.mozilla.org/sops/azkv/keysource.go delete mode 100644 vendor/go.mozilla.org/sops/example.ini delete mode 100644 vendor/go.mozilla.org/sops/example.json delete mode 100644 vendor/go.mozilla.org/sops/example.txt delete mode 100644 vendor/go.mozilla.org/sops/example.yaml delete mode 100644 vendor/go.mozilla.org/sops/gcpkms/keysource.go delete mode 100644 vendor/go.mozilla.org/sops/keyservice/keyservice.pb.go delete mode 100644 vendor/go.mozilla.org/sops/kms/keysource.go delete mode 100644 vendor/go.mozilla.org/sops/make_download_page.sh delete mode 100644 vendor/go.mozilla.org/sops/pgp/keysource.go delete mode 100644 vendor/go.mozilla.org/sops/stores/yaml/store.go delete mode 100644 vendor/go.opencensus.io/.travis.yml delete mode 100644 vendor/go.opencensus.io/Gopkg.lock delete mode 100644 vendor/go.opencensus.io/Gopkg.toml create mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/client.go create mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go create mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go rename vendor/{github.com/Azure/azure-sdk-for-go/version/version.go => go.opencensus.io/plugin/ocgrpc/doc.go} (59%) create mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/server.go create mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go create mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go create mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go create mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go create mode 100644 vendor/go.opencensus.io/trace/trace_api.go rename vendor/{github.com/Azure/go-autorest/autorest/azure/auth => go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc}/LICENSE (94%) create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go create mode 100644 vendor/go.opentelemetry.io/otel/.codespellignore create mode 100644 vendor/go.opentelemetry.io/otel/.codespellrc create mode 100644 vendor/go.opentelemetry.io/otel/.gitattributes create mode 100644 vendor/go.opentelemetry.io/otel/.gitignore create mode 100644 vendor/go.opentelemetry.io/otel/.golangci.yml create mode 100644 vendor/go.opentelemetry.io/otel/.lycheeignore create mode 100644 vendor/go.opentelemetry.io/otel/.markdownlint.yaml create mode 100644 vendor/go.opentelemetry.io/otel/CHANGELOG.md create mode 100644 vendor/go.opentelemetry.io/otel/CODEOWNERS create mode 100644 vendor/go.opentelemetry.io/otel/CONTRIBUTING.md create mode 100644 vendor/go.opentelemetry.io/otel/LICENSE create mode 100644 vendor/go.opentelemetry.io/otel/Makefile create mode 100644 vendor/go.opentelemetry.io/otel/README.md create mode 100644 vendor/go.opentelemetry.io/otel/RELEASING.md create mode 100644 vendor/go.opentelemetry.io/otel/VERSIONING.md create mode 100644 vendor/go.opentelemetry.io/otel/attribute/README.md create mode 100644 vendor/go.opentelemetry.io/otel/attribute/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/encoder.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/filter.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/iterator.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/key.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/kv.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/set.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/type_string.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/value.go create mode 100644 vendor/go.opentelemetry.io/otel/baggage/README.md create mode 100644 vendor/go.opentelemetry.io/otel/baggage/baggage.go create mode 100644 vendor/go.opentelemetry.io/otel/baggage/context.go create mode 100644 vendor/go.opentelemetry.io/otel/baggage/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/codes/README.md create mode 100644 vendor/go.opentelemetry.io/otel/codes/codes.go create mode 100644 vendor/go.opentelemetry.io/otel/codes/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/error_handler.go create mode 100644 vendor/go.opentelemetry.io/otel/get_main_pkgs.sh create mode 100644 vendor/go.opentelemetry.io/otel/handler.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/baggage/context.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/gen.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/handler.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/instruments.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/meter.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/propagator.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/state.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/rawhelpers.go create mode 100644 vendor/go.opentelemetry.io/otel/internal_logging.go create mode 100644 vendor/go.opentelemetry.io/otel/metric.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/LICENSE create mode 100644 vendor/go.opentelemetry.io/otel/metric/README.md create mode 100644 vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/asyncint64.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/config.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/embedded/README.md create mode 100644 vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/meter.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/noop/README.md create mode 100644 vendor/go.opentelemetry.io/otel/metric/noop/noop.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/syncfloat64.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/syncint64.go create mode 100644 vendor/go.opentelemetry.io/otel/propagation.go create mode 100644 vendor/go.opentelemetry.io/otel/propagation/README.md create mode 100644 vendor/go.opentelemetry.io/otel/propagation/baggage.go create mode 100644 vendor/go.opentelemetry.io/otel/propagation/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/propagation/propagation.go create mode 100644 vendor/go.opentelemetry.io/otel/propagation/trace_context.go create mode 100644 vendor/go.opentelemetry.io/otel/renovate.json create mode 100644 vendor/go.opentelemetry.io/otel/requirements.txt create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go create mode 100644 vendor/go.opentelemetry.io/otel/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/LICENSE create mode 100644 vendor/go.opentelemetry.io/otel/trace/README.md create mode 100644 vendor/go.opentelemetry.io/otel/trace/config.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/context.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/embedded/README.md create mode 100644 vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/nonrecording.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/noop.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/provider.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/span.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/tracer.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/tracestate.go create mode 100644 vendor/go.opentelemetry.io/otel/verify_examples.sh create mode 100644 vendor/go.opentelemetry.io/otel/verify_readmes.sh create mode 100644 vendor/go.opentelemetry.io/otel/verify_released_changelog.sh create mode 100644 vendor/go.opentelemetry.io/otel/version.go create mode 100644 vendor/go.opentelemetry.io/otel/versions.yaml create mode 100644 vendor/golang.org/x/crypto/argon2/argon2.go create mode 100644 vendor/golang.org/x/crypto/argon2/blake2b.go create mode 100644 vendor/golang.org/x/crypto/argon2/blamka_amd64.go create mode 100644 vendor/golang.org/x/crypto/argon2/blamka_amd64.s create mode 100644 vendor/golang.org/x/crypto/argon2/blamka_generic.go create mode 100644 vendor/golang.org/x/crypto/argon2/blamka_ref.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_generic.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_ref.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2x.go create mode 100644 vendor/golang.org/x/crypto/blake2b/register.go create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_arm64.go create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_arm64.s create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_generic.go create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_noasm.go create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_s390x.go create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_s390x.s create mode 100644 vendor/golang.org/x/crypto/chacha20/xor.go create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go create mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/asn1.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/builder.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/string.go create mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519.go create mode 100644 vendor/golang.org/x/crypto/hkdf/hkdf.go create mode 100644 vendor/golang.org/x/crypto/internal/alias/alias.go create mode 100644 vendor/golang.org/x/crypto/internal/alias/alias_purego.go create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/poly1305.go create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s delete mode 100644 vendor/golang.org/x/crypto/openpgp/errors/errors.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/keys.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/config.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/packet.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/private_key.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/reader.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/read.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/s2k/s2k.go delete mode 100644 vendor/golang.org/x/crypto/openpgp/write.go create mode 100644 vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go create mode 100644 vendor/golang.org/x/crypto/poly1305/poly1305_compat.go create mode 100644 vendor/golang.org/x/crypto/scrypt/scrypt.go create mode 100644 vendor/golang.org/x/crypto/sha3/doc.go create mode 100644 vendor/golang.org/x/crypto/sha3/hashes.go create mode 100644 vendor/golang.org/x/crypto/sha3/hashes_noasm.go create mode 100644 vendor/golang.org/x/crypto/sha3/keccakf.go create mode 100644 vendor/golang.org/x/crypto/sha3/keccakf_amd64.go create mode 100644 vendor/golang.org/x/crypto/sha3/keccakf_amd64.s create mode 100644 vendor/golang.org/x/crypto/sha3/sha3.go create mode 100644 vendor/golang.org/x/crypto/sha3/sha3_s390x.go create mode 100644 vendor/golang.org/x/crypto/sha3/sha3_s390x.s create mode 100644 vendor/golang.org/x/crypto/sha3/shake.go create mode 100644 vendor/golang.org/x/crypto/sha3/shake_noasm.go create mode 100644 vendor/golang.org/x/crypto/sha3/xor.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/terminal.go delete mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go delete mode 100644 vendor/golang.org/x/net/http2/testsync.go create mode 100644 vendor/golang.org/x/net/http2/timer.go create mode 100644 vendor/golang.org/x/oauth2/authhandler/authhandler.go create mode 100644 vendor/golang.org/x/oauth2/deviceauth.go delete mode 100644 vendor/golang.org/x/oauth2/google/appengine_gen1.go delete mode 100644 vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go create mode 100644 vendor/golang.org/x/oauth2/google/error.go create mode 100644 vendor/golang.org/x/oauth2/google/externalaccount/aws.go create mode 100644 vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go create mode 100644 vendor/golang.org/x/oauth2/google/externalaccount/executablecredsource.go create mode 100644 vendor/golang.org/x/oauth2/google/externalaccount/filecredsource.go create mode 100644 vendor/golang.org/x/oauth2/google/externalaccount/header.go create mode 100644 vendor/golang.org/x/oauth2/google/externalaccount/programmaticrefreshcredsource.go create mode 100644 vendor/golang.org/x/oauth2/google/externalaccount/urlcredsource.go create mode 100644 vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go create mode 100644 vendor/golang.org/x/oauth2/google/internal/impersonate/impersonate.go create mode 100644 vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go create mode 100644 vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go create mode 100644 vendor/golang.org/x/oauth2/pkce.go create mode 100644 vendor/golang.org/x/sync/LICENSE create mode 100644 vendor/golang.org/x/sync/PATENTS create mode 100644 vendor/golang.org/x/sync/semaphore/semaphore.go create mode 100644 vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s create mode 100644 vendor/golang.org/x/sys/cpu/byteorder.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_aix.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_arm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_loong64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_mips64x.go rename vendor/golang.org/x/{oauth2/internal/client_appengine.go => sys/cpu/cpu_mipsx.go} (54%) create mode 100644 vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_arm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_riscv64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_wasm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/endian_big.go create mode 100644 vendor/golang.org/x/sys/cpu/endian_little.go create mode 100644 vendor/golang.org/x/sys/cpu/hwcap_linux.go create mode 100644 vendor/golang.org/x/sys/cpu/parse.go create mode 100644 vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go create mode 100644 vendor/golang.org/x/sys/cpu/runtime_auxv.go create mode 100644 vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go create mode 100644 vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go create mode 100644 vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go create mode 100644 vendor/golang.org/x/sys/windows/registry/key.go create mode 100644 vendor/golang.org/x/sys/windows/registry/mksyscall.go create mode 100644 vendor/golang.org/x/sys/windows/registry/syscall.go create mode 100644 vendor/golang.org/x/sys/windows/registry/value.go create mode 100644 vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go create mode 100644 vendor/golang.org/x/time/LICENSE create mode 100644 vendor/golang.org/x/time/PATENTS create mode 100644 vendor/golang.org/x/time/rate/rate.go create mode 100644 vendor/golang.org/x/time/rate/sometimes.go delete mode 100644 vendor/google.golang.org/api/cloudkms/v1/cloudkms-api.json delete mode 100644 vendor/google.golang.org/api/cloudkms/v1/cloudkms-gen.go create mode 100644 vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json create mode 100644 vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go create mode 100644 vendor/google.golang.org/api/internal/cba.go create mode 100644 vendor/google.golang.org/api/internal/cert/default_cert.go create mode 100644 vendor/google.golang.org/api/internal/cert/enterprise_cert.go create mode 100644 vendor/google.golang.org/api/internal/cert/secureconnect_cert.go create mode 100644 vendor/google.golang.org/api/internal/conn_pool.go create mode 100644 vendor/google.golang.org/api/internal/gensupport/error.go create mode 100644 vendor/google.golang.org/api/internal/gensupport/retry.go create mode 100644 vendor/google.golang.org/api/internal/gensupport/version.go create mode 100644 vendor/google.golang.org/api/internal/impersonate/impersonate.go delete mode 100644 vendor/google.golang.org/api/internal/pool.go create mode 100644 vendor/google.golang.org/api/internal/s2a.go delete mode 100644 vendor/google.golang.org/api/internal/service-account.json create mode 100644 vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE create mode 100644 vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA rename vendor/google.golang.org/api/{googleapi/internal => internal/third_party}/uritemplates/uritemplates.go (98%) rename vendor/google.golang.org/api/{googleapi/internal => internal/third_party}/uritemplates/utils.go (100%) create mode 100644 vendor/google.golang.org/api/internal/version.go create mode 100644 vendor/google.golang.org/api/iterator/iterator.go create mode 100644 vendor/google.golang.org/api/option/internaloption/internaloption.go create mode 100644 vendor/google.golang.org/api/storage/v1/storage-api.json create mode 100644 vendor/google.golang.org/api/storage/v1/storage-gen.go create mode 100644 vendor/google.golang.org/api/transport/dial.go create mode 100644 vendor/google.golang.org/api/transport/doc.go create mode 100644 vendor/google.golang.org/api/transport/grpc/dial.go create mode 100644 vendor/google.golang.org/api/transport/grpc/dial_socketopt.go create mode 100644 vendor/google.golang.org/api/transport/grpc/pool.go delete mode 100644 vendor/google.golang.org/appengine/.travis.yml delete mode 100644 vendor/google.golang.org/appengine/README.md delete mode 100644 vendor/google.golang.org/appengine/appengine.go delete mode 100644 vendor/google.golang.org/appengine/appengine_vm.go delete mode 100644 vendor/google.golang.org/appengine/errors.go delete mode 100644 vendor/google.golang.org/appengine/identity.go delete mode 100644 vendor/google.golang.org/appengine/internal/api.go delete mode 100644 vendor/google.golang.org/appengine/internal/api_classic.go delete mode 100644 vendor/google.golang.org/appengine/internal/api_common.go delete mode 100644 vendor/google.golang.org/appengine/internal/app_id.go delete mode 100644 vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto delete mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.proto delete mode 100644 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto delete mode 100644 vendor/google.golang.org/appengine/internal/identity.go delete mode 100644 vendor/google.golang.org/appengine/internal/identity_classic.go delete mode 100644 vendor/google.golang.org/appengine/internal/identity_flex.go delete mode 100644 vendor/google.golang.org/appengine/internal/identity_vm.go delete mode 100644 vendor/google.golang.org/appengine/internal/internal.go delete mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.proto delete mode 100644 vendor/google.golang.org/appengine/internal/main.go delete mode 100644 vendor/google.golang.org/appengine/internal/main_common.go delete mode 100644 vendor/google.golang.org/appengine/internal/main_vm.go delete mode 100644 vendor/google.golang.org/appengine/internal/metadata.go delete mode 100644 vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/modules/modules_service.proto delete mode 100644 vendor/google.golang.org/appengine/internal/net.go delete mode 100644 vendor/google.golang.org/appengine/internal/regen.sh delete mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto delete mode 100644 vendor/google.golang.org/appengine/internal/transaction.go delete mode 100644 vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto delete mode 100644 vendor/google.golang.org/appengine/namespace.go delete mode 100644 vendor/google.golang.org/appengine/timeout.go delete mode 100644 vendor/google.golang.org/appengine/travis_install.sh delete mode 100644 vendor/google.golang.org/appengine/travis_test.sh delete mode 100644 vendor/google.golang.org/appengine/urlfetch/urlfetch.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/LICENSE create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/location/locations.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/LICENSE create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go delete mode 100644 vendor/google.golang.org/grpc/.travis.yml rename vendor/{github.com/jmespath/go-jmespath/LICENSE => google.golang.org/grpc/NOTICE.txt} (93%) create mode 100644 vendor/google.golang.org/grpc/SECURITY.md create mode 100644 vendor/google.golang.org/grpc/attributes/attributes.go create mode 100644 vendor/google.golang.org/grpc/backoff/backoff.go delete mode 100644 vendor/google.golang.org/grpc/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go create mode 100644 vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go create mode 100644 vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go create mode 100644 vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go create mode 100644 vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go create mode 100644 vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go create mode 100644 vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go create mode 100644 vendor/google.golang.org/grpc/balancer/grpclb/state/state.go create mode 100644 vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go delete mode 100644 vendor/google.golang.org/grpc/balancer_conn_wrappers.go delete mode 100644 vendor/google.golang.org/grpc/balancer_v1_wrapper.go create mode 100644 vendor/google.golang.org/grpc/balancer_wrapper.go create mode 100644 vendor/google.golang.org/grpc/channelz/channelz.go delete mode 100644 vendor/google.golang.org/grpc/codegen.sh create mode 100644 vendor/google.golang.org/grpc/credentials/alts/alts.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/common.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcm.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/conn/common.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/conn/counter.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/conn/utils.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go create mode 100644 vendor/google.golang.org/grpc/credentials/alts/utils.go create mode 100644 vendor/google.golang.org/grpc/credentials/google/google.go create mode 100644 vendor/google.golang.org/grpc/credentials/google/xds.go create mode 100644 vendor/google.golang.org/grpc/credentials/insecure/insecure.go create mode 100644 vendor/google.golang.org/grpc/credentials/oauth/oauth.go create mode 100644 vendor/google.golang.org/grpc/credentials/tls.go create mode 100644 vendor/google.golang.org/grpc/encoding/encoding_v2.go create mode 100644 vendor/google.golang.org/grpc/experimental/stats/metricregistry.go create mode 100644 vendor/google.golang.org/grpc/experimental/stats/metrics.go create mode 100644 vendor/google.golang.org/grpc/grpclog/component.go create mode 100644 vendor/google.golang.org/grpc/grpclog/internal/grpclog.go create mode 100644 vendor/google.golang.org/grpc/grpclog/internal/logger.go create mode 100644 vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go delete mode 100644 vendor/google.golang.org/grpc/install_gae.sh create mode 100644 vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go create mode 100644 vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go delete mode 100644 vendor/google.golang.org/grpc/internal/binarylog/util.go create mode 100644 vendor/google.golang.org/grpc/internal/buffer/unbounded.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/channel.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/channelmap.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/logging.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/server.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/socket.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/subchannel.go rename vendor/google.golang.org/grpc/internal/channelz/{types_linux.go => syscall_linux.go} (83%) rename vendor/google.golang.org/grpc/internal/channelz/{types_nonlinux.go => syscall_nonlinux.go} (79%) create mode 100644 vendor/google.golang.org/grpc/internal/channelz/trace.go delete mode 100644 vendor/google.golang.org/grpc/internal/channelz/types.go create mode 100644 vendor/google.golang.org/grpc/internal/credentials/credentials.go create mode 100644 vendor/google.golang.org/grpc/internal/credentials/spiffe.go rename vendor/google.golang.org/grpc/{credentials/internal => internal/credentials}/syscallconn.go (94%) create mode 100644 vendor/google.golang.org/grpc/internal/credentials/util.go create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/observability.go create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/xds.go create mode 100644 vendor/google.golang.org/grpc/internal/experimental.go create mode 100644 vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go rename vendor/google.golang.org/grpc/internal/{channelz/util_nonlinux.go => googlecloud/manufacturer.go} (73%) rename vendor/google.golang.org/grpc/{credentials/tls13.go => internal/googlecloud/manufacturer_linux.go} (58%) create mode 100644 vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go create mode 100644 vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go delete mode 100644 vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go rename vendor/google.golang.org/grpc/internal/{channelz/util_linux.go => grpcsync/oncefunc.go} (59%) create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/compressor.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/metadata.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/method.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/regex.go create mode 100644 vendor/google.golang.org/grpc/internal/idle/idle.go create mode 100644 vendor/google.golang.org/grpc/internal/metadata/metadata.go create mode 100644 vendor/google.golang.org/grpc/internal/pretty/pretty.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/config_selector.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go rename vendor/google.golang.org/grpc/{ => internal}/resolver/passthrough/passthrough.go (78%) create mode 100644 vendor/google.golang.org/grpc/internal/resolver/unix/unix.go create mode 100644 vendor/google.golang.org/grpc/internal/serviceconfig/duration.go create mode 100644 vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go create mode 100644 vendor/google.golang.org/grpc/internal/stats/labels.go create mode 100644 vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go create mode 100644 vendor/google.golang.org/grpc/internal/status/status.go rename vendor/google.golang.org/grpc/{credentials/internal/syscallconn_appengine.go => internal/tcp_keepalive_others.go} (74%) create mode 100644 vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go create mode 100644 vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/log.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/logging.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go rename vendor/google.golang.org/grpc/{ => internal/transport}/proxy.go (70%) create mode 100644 vendor/google.golang.org/grpc/internal/xds/xds.go create mode 100644 vendor/google.golang.org/grpc/mem/buffer_pool.go create mode 100644 vendor/google.golang.org/grpc/mem/buffer_slice.go create mode 100644 vendor/google.golang.org/grpc/mem/buffers.go delete mode 100644 vendor/google.golang.org/grpc/naming/dns_resolver.go delete mode 100644 vendor/google.golang.org/grpc/naming/naming.go delete mode 100644 vendor/google.golang.org/grpc/pickfirst.go create mode 100644 vendor/google.golang.org/grpc/resolver/manual/manual.go create mode 100644 vendor/google.golang.org/grpc/resolver/map.go delete mode 100644 vendor/google.golang.org/grpc/resolver_conn_wrapper.go create mode 100644 vendor/google.golang.org/grpc/resolver_wrapper.go create mode 100644 vendor/google.golang.org/grpc/stream_interfaces.go create mode 100644 vendor/google.golang.org/grpc/trace_notrace.go create mode 100644 vendor/google.golang.org/grpc/trace_withtrace.go delete mode 100644 vendor/google.golang.org/grpc/vet.sh create mode 100644 vendor/google.golang.org/protobuf/LICENSE create mode 100644 vendor/google.golang.org/protobuf/PATENTS create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/decode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/doc.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/encode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/decode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/doc.go create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/encode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protowire/wire.go create mode 100644 vendor/google.golang.org/protobuf/internal/descfmt/stringer.go create mode 100644 vendor/google.golang.org/protobuf/internal/descopts/options.go create mode 100644 vendor/google.golang.org/protobuf/internal/detrand/rand.go create mode 100644 vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go create mode 100644 vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb create mode 100644 vendor/google.golang.org/protobuf/internal/editionssupport/editions.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/defval/default.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/encode.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/doc.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/encode.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/errors.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go112.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go113.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/build.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/editions.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go create mode 100644 vendor/google.golang.org/protobuf/internal/filetype/build.go create mode 100644 vendor/google.golang.org/protobuf/internal/flags/flags.go create mode 100644 vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go create mode 100644 vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/any_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/api_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/doc.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/duration_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/empty_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/goname.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/map_entry.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/struct_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/type_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/wrappers.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/api_export.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/checkinit.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_extension.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_field.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_message.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_tables.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert_list.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert_map.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/decode.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/encode.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/enum.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/extension.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_export.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_file.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_message.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/merge.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/merge_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/validate.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/weak.go create mode 100644 vendor/google.golang.org/protobuf/internal/order/order.go create mode 100644 vendor/google.golang.org/protobuf/internal/order/range.go create mode 100644 vendor/google.golang.org/protobuf/internal/pragma/pragma.go create mode 100644 vendor/google.golang.org/protobuf/internal/set/ints.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_pure.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go create mode 100644 vendor/google.golang.org/protobuf/internal/version/version.go create mode 100644 vendor/google.golang.org/protobuf/proto/checkinit.go create mode 100644 vendor/google.golang.org/protobuf/proto/decode.go create mode 100644 vendor/google.golang.org/protobuf/proto/decode_gen.go create mode 100644 vendor/google.golang.org/protobuf/proto/doc.go create mode 100644 vendor/google.golang.org/protobuf/proto/encode.go create mode 100644 vendor/google.golang.org/protobuf/proto/encode_gen.go create mode 100644 vendor/google.golang.org/protobuf/proto/equal.go create mode 100644 vendor/google.golang.org/protobuf/proto/extension.go create mode 100644 vendor/google.golang.org/protobuf/proto/merge.go create mode 100644 vendor/google.golang.org/protobuf/proto/messageset.go create mode 100644 vendor/google.golang.org/protobuf/proto/proto.go create mode 100644 vendor/google.golang.org/protobuf/proto/proto_methods.go create mode 100644 vendor/google.golang.org/protobuf/proto/proto_reflect.go create mode 100644 vendor/google.golang.org/protobuf/proto/reset.go create mode 100644 vendor/google.golang.org/protobuf/proto/size.go create mode 100644 vendor/google.golang.org/protobuf/proto/size_gen.go create mode 100644 vendor/google.golang.org/protobuf/proto/wrappers.go create mode 100644 vendor/google.golang.org/protobuf/protoadapt/convert.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/editions.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/proto.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/source.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/type.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoiface/methods.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoimpl/version.go create mode 100644 vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go create mode 100644 vendor/gopkg.in/ini.v1/.editorconfig create mode 100644 vendor/gopkg.in/ini.v1/.gitignore create mode 100644 vendor/gopkg.in/ini.v1/.golangci.yml create mode 100644 vendor/gopkg.in/ini.v1/LICENSE create mode 100644 vendor/gopkg.in/ini.v1/Makefile create mode 100644 vendor/gopkg.in/ini.v1/README.md create mode 100644 vendor/gopkg.in/ini.v1/codecov.yml create mode 100644 vendor/gopkg.in/ini.v1/data_source.go create mode 100644 vendor/gopkg.in/ini.v1/deprecated.go create mode 100644 vendor/gopkg.in/ini.v1/error.go create mode 100644 vendor/gopkg.in/ini.v1/file.go create mode 100644 vendor/gopkg.in/ini.v1/helper.go create mode 100644 vendor/gopkg.in/ini.v1/ini.go create mode 100644 vendor/gopkg.in/ini.v1/key.go create mode 100644 vendor/gopkg.in/ini.v1/parser.go create mode 100644 vendor/gopkg.in/ini.v1/section.go create mode 100644 vendor/gopkg.in/ini.v1/struct.go delete mode 100644 vendor/gopkg.in/yaml.v2/.travis.yml delete mode 100644 vendor/gopkg.in/yaml.v2/encode.go delete mode 100644 vendor/gopkg.in/yaml.v2/writerc.go rename vendor/gopkg.in/{yaml.v2/LICENSE.libyaml => yaml.v3/LICENSE} (51%) rename vendor/gopkg.in/{yaml.v2 => yaml.v3}/NOTICE (100%) rename vendor/gopkg.in/{yaml.v2 => yaml.v3}/README.md (66%) rename vendor/gopkg.in/{yaml.v2 => yaml.v3}/apic.go (93%) rename vendor/gopkg.in/{yaml.v2 => yaml.v3}/decode.go (51%) rename vendor/gopkg.in/{yaml.v2 => yaml.v3}/emitterc.go (80%) create mode 100644 vendor/gopkg.in/yaml.v3/encode.go rename vendor/gopkg.in/{yaml.v2 => yaml.v3}/parserc.go (85%) rename vendor/gopkg.in/{yaml.v2 => yaml.v3}/readerc.go (91%) rename vendor/gopkg.in/{yaml.v2 => yaml.v3}/resolve.go (60%) rename vendor/gopkg.in/{yaml.v2 => yaml.v3}/scannerc.go (87%) rename vendor/gopkg.in/{yaml.v2 => yaml.v3}/sorter.go (76%) create mode 100644 vendor/gopkg.in/yaml.v3/writerc.go rename vendor/gopkg.in/{yaml.v2 => yaml.v3}/yaml.go (55%) rename vendor/gopkg.in/{yaml.v2 => yaml.v3}/yamlh.go (88%) rename vendor/gopkg.in/{yaml.v2 => yaml.v3}/yamlprivateh.go (78%) diff --git a/go.mod b/go.mod index 113ac6eca..445ba7d4b 100644 --- a/go.mod +++ b/go.mod @@ -9,6 +9,7 @@ require ( github.com/aws/aws-sdk-go-v2/config v1.28.1 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.35 github.com/aws/aws-sdk-go-v2/service/s3 v1.66.2 + github.com/getsops/sops/v3 v3.9.1 github.com/golang/mock v1.6.0 github.com/google/uuid v1.6.0 github.com/gorilla/mux v1.8.0 @@ -23,23 +24,26 @@ require ( go.mozilla.org/mar v0.0.0-20200124173325-c51ce05c9f3d go.mozilla.org/mozlogrus v2.0.0+incompatible go.mozilla.org/pkcs7 v0.9.0 - go.mozilla.org/sops v0.0.0-20190912205235-14a22d7a7060 go.uber.org/mock v0.5.0 ) require ( - cloud.google.com/go v0.43.0 // indirect - github.com/Azure/azure-sdk-for-go v32.6.0+incompatible // indirect - github.com/Azure/go-autorest/autorest v0.9.2 // indirect - github.com/Azure/go-autorest/autorest/adal v0.8.0 // indirect - github.com/Azure/go-autorest/autorest/azure/auth v0.4.0 // indirect - github.com/Azure/go-autorest/autorest/azure/cli v0.3.0 // indirect - github.com/Azure/go-autorest/autorest/date v0.2.0 // indirect - github.com/Azure/go-autorest/autorest/to v0.3.0 // indirect - github.com/Azure/go-autorest/autorest/validation v0.2.0 // indirect - github.com/Azure/go-autorest/logger v0.1.0 // indirect - github.com/Azure/go-autorest/tracing v0.5.0 // indirect - github.com/aws/aws-sdk-go v1.42.15 // indirect + cloud.google.com/go v0.115.1 // indirect + cloud.google.com/go/auth v0.9.7 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect + cloud.google.com/go/compute/metadata v0.5.2 // indirect + cloud.google.com/go/iam v1.2.1 // indirect + cloud.google.com/go/kms v1.20.0 // indirect + cloud.google.com/go/longrunning v0.6.1 // indirect + cloud.google.com/go/storage v1.43.0 // indirect + filippo.io/age v1.2.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.1.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.1 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect + github.com/ProtonMail/go-crypto v1.1.0-beta.0-proton // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.17.42 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.18 // indirect @@ -51,37 +55,72 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.3 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.3 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.3 // indirect + github.com/aws/aws-sdk-go-v2/service/kms v1.36.3 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.24.3 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.3 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.32.3 // indirect github.com/aws/smithy-go v1.22.0 // indirect - github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect - github.com/dimchansky/utfbom v1.1.0 // indirect - github.com/fatih/color v1.7.0 // indirect - github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect - github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 // indirect - github.com/golang/protobuf v1.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.0.5 // indirect + github.com/blang/semver v3.5.1+incompatible // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cloudflare/circl v1.4.0 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect + github.com/fatih/color v1.17.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/getsops/gopgagent v0.0.0-20240527072608-0c14999532fe // indirect + github.com/go-jose/go-jose/v4 v4.0.4 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/golang-jwt/jwt/v5 v5.2.1 // indirect + github.com/golang/glog v1.2.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/s2a-go v0.1.8 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect + github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/goware/prefixer v0.0.0-20160118172347-395022866408 // indirect - github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/mattn/go-colorable v0.1.4 // indirect - github.com/mattn/go-isatty v0.0.10 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-retryablehttp v0.7.7 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8 // indirect + github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect + github.com/hashicorp/go-sockaddr v1.0.7 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hashicorp/vault/api v1.15.0 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/go-wordwrap v1.0.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/ryanuber/go-glob v1.0.0 // indirect github.com/ugorji/go v0.0.0-20180112141927-9831f2c3ac10 // indirect - go.mozilla.org/gopgagent v0.0.0-20170926210634-4d7ea76ff71a // indirect - go.opencensus.io v0.22.1 // indirect - golang.org/x/crypto v0.21.0 // indirect - golang.org/x/net v0.23.0 // indirect - golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/term v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect - google.golang.org/api v0.11.0 // indirect - google.golang.org/appengine v1.6.1 // indirect - google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610 // indirect - google.golang.org/grpc v1.24.0 // indirect - gopkg.in/yaml.v2 v2.2.8 // indirect + github.com/urfave/cli v1.22.15 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 // indirect + go.opentelemetry.io/otel v1.30.0 // indirect + go.opentelemetry.io/otel/metric v1.30.0 // indirect + go.opentelemetry.io/otel/trace v1.30.0 // indirect + golang.org/x/crypto v0.27.0 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/term v0.24.0 // indirect + golang.org/x/text v0.18.0 // indirect + golang.org/x/time v0.6.0 // indirect + google.golang.org/api v0.199.0 // indirect + google.golang.org/genproto v0.0.0-20240930140551-af27646dc61f // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240930140551-af27646dc61f // indirect + google.golang.org/grpc v1.67.1 // indirect + google.golang.org/protobuf v1.34.2 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 469d54af9..47c428741 100644 --- a/go.sum +++ b/go.sum @@ -1,64 +1,52 @@ +c2sp.org/CCTV/age v0.0.0-20240306222714-3ec4d716e805 h1:u2qwJeEvnypw+OCPUHmoZE3IqwfuN5kgDfo5MLzpNM0= +c2sp.org/CCTV/age v0.0.0-20240306222714-3ec4d716e805/go.mod h1:FomMrUJ2Lxt5jCLmZkG3FHa72zUprnhd3v/Z18Snm4w= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.43.0 h1:banaiRPAM8kUVYneOSkhgcDsLzEvL25FinuiSZaH/2w= -cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= -contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= -github.com/Azure/azure-sdk-for-go v31.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v32.6.0+incompatible h1:PgaVceWF5idtJajyt1rzq1cep6eRPJ8+8hs4GnNzTo0= -github.com/Azure/azure-sdk-for-go v32.6.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/go-autorest/autorest v0.1.0/go.mod h1:AKyIcETwSUFxIcs/Wnq/C+kwCtlEYGUVd7FPNb2slmg= -github.com/Azure/go-autorest/autorest v0.5.0/go.mod h1:9HLKlQjVBH6U3oDfsXOeVc56THsLPw1L03yban4xThw= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.2 h1:6AWuh3uWrsZJcNoCHrCF/+g4aKPCU39kaMO6/qrnK/4= -github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.1.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= -github.com/Azure/go-autorest/autorest/adal v0.2.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/adal v0.8.0 h1:CxTzQrySOxDnKpLjFJeZAS5Qrv/qFPkgLjx5bOAi//I= -github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/azure/auth v0.1.0/go.mod h1:Gf7/i2FUpyb/sGBLIFxTBzrNzBo7aPXXE3ZVeDRwdpM= -github.com/Azure/go-autorest/autorest/azure/auth v0.4.0 h1:18ld/uw9Rr7VkNie7a7RMAcFIWrJdlUL59TWGfcu530= -github.com/Azure/go-autorest/autorest/azure/auth v0.4.0/go.mod h1:Oo5cRhLvZteXzI2itUm5ziqsoIxRkzrt3t61FeZaS18= -github.com/Azure/go-autorest/autorest/azure/cli v0.1.0/go.mod h1:Dk8CUAt/b/PzkfeRsWzVG9Yj3ps8mS8ECztu43rdU8U= -github.com/Azure/go-autorest/autorest/azure/cli v0.3.0 h1:5PAqnv+CSTwW9mlZWZAizmzrazFWEgZykEZXpr2hDtY= -github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= -github.com/Azure/go-autorest/autorest/to v0.3.0 h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8= -github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= -github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= -github.com/Azure/go-autorest/autorest/validation v0.2.0 h1:15vMO4y76dehZSq7pAaOLQxC6dZYsSrj2GQpflyM/L4= -github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ= +cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc= +cloud.google.com/go/auth v0.9.7 h1:ha65jNwOfI48YmUzNfMaUDfqt5ykuYIUnSartpU1+BA= +cloud.google.com/go/auth v0.9.7/go.mod h1:Xo0n7n66eHyOWWCnitop6870Ilwo3PiZyodVkkH1xWM= +cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= +cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= +cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= +cloud.google.com/go/iam v1.2.1 h1:QFct02HRb7H12J/3utj0qf5tobFh9V4vR6h9eX5EBRU= +cloud.google.com/go/iam v1.2.1/go.mod h1:3VUIJDPpwT6p/amXRC5GY8fCCh70lxPygguVtI0Z4/g= +cloud.google.com/go/kms v1.20.0 h1:uKUvjGqbBlI96xGE669hcVnEMw1Px/Mvfa62dhM5UrY= +cloud.google.com/go/kms v1.20.0/go.mod h1:/dMbFF1tLLFnQV44AoI2GlotbjowyUfgVwezxW291fM= +cloud.google.com/go/longrunning v0.6.1 h1:lOLTFxYpr8hcRtcwWir5ITh1PAKUD/sG2lKrTSYjyMc= +cloud.google.com/go/longrunning v0.6.1/go.mod h1:nHISoOZpBcmlwbJmiVk5oDRz0qG/ZxPynEGs1iZ79s0= +cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= +cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +filippo.io/age v1.2.0 h1:vRDp7pUMaAJzXNIWJVAZnEf/Dyi4Vu4wI8S1LBzufhE= +filippo.io/age v1.2.0/go.mod h1:JL9ew2lTN+Pyft4RiNGguFfOpewKwSHm5ayKD/A4004= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.1.0 h1:DRiANoJTiW6obBQe3SqZizkuV1PEgfiiGivmVocDy64= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.1.0/go.mod h1:qLIye2hwb/ZouqhpSD9Zn3SJipvpEnz1Ywl3VUk9Y0s= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.1 h1:9fXQS/0TtQmKXp8SureKouF+idbQvp7cPUxykiohnBs= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.1/go.mod h1:f+OaoSg0VQYPMqB0Jp2D54j1VHzITYcJaCNwV+k00ts= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/DataDog/datadog-go v3.7.2+incompatible h1:o4QtYjBU/rG58VPh8Ne6F65YiMY5/v5q4WdY/HvRYMQ= github.com/DataDog/datadog-go v3.7.2+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= +github.com/ProtonMail/go-crypto v1.1.0-beta.0-proton h1:ZGewsAoeSirbUS5cO8L0FMQA+iSop9xR1nmFYifDBPo= +github.com/ProtonMail/go-crypto v1.1.0-beta.0-proton/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/aws/aws-lambda-go v1.47.0 h1:0H8s0vumYx/YKs4sE7YM0ktwL2eWse+kfopsRI1sXVI= github.com/aws/aws-lambda-go v1.47.0/go.mod h1:dpMpZgvWx5vuQJfBt0zqBha60q7Dd7RfgJv23DymV8A= -github.com/aws/aws-sdk-go v1.21.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.42.15 h1:RcUChuF7KzrrTqx9LAzJbLBX00LkUY7cH9T1VdxNdqk= -github.com/aws/aws-sdk-go v1.42.15/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/aws/aws-sdk-go-v2 v1.32.3 h1:T0dRlFBKcdaUPGNtkBSwHZxrtis8CQU17UpNBZYd0wk= github.com/aws/aws-sdk-go-v2 v1.32.3/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6 h1:pT3hpW0cOHRJx8Y0DfJUEQuqPild8jRGmSFmBgvydr0= @@ -87,6 +75,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.3 h1:qcxX0JYlg github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.3/go.mod h1:cLSNEmI45soc+Ef8K/L+8sEA3A3pYFEYf5B5UI+6bH4= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.3 h1:ZC7Y/XgKUxwqcdhO5LE8P6oGP1eh6xlQReWNKfhvJno= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.3/go.mod h1:WqfO7M9l9yUAw0HcHaikwRd/H6gzYdz7vjejCA5e2oY= +github.com/aws/aws-sdk-go-v2/service/kms v1.36.3 h1:iHi6lC6LfW6SNvB2bixmlOW3WMyWFrHZCWX+P+CCxMk= +github.com/aws/aws-sdk-go-v2/service/kms v1.36.3/go.mod h1:OHmlX4+o0XIlJAQGAHPIy0N9yZcYS/vNG+T7geSNcFw= github.com/aws/aws-sdk-go-v2/service/s3 v1.66.2 h1:p9TNFL8bFUMd+38YIpTAXpoxyz0MxC7FlbFEH4P4E1U= github.com/aws/aws-sdk-go-v2/service/s3 v1.66.2/go.mod h1:fNjyo0Coen9QTwQLWeV6WO2Nytwiu+cCcWaTdKCAqqE= github.com/aws/aws-sdk-go-v2/service/sso v1.24.3 h1:UTpsIf0loCIWEbrqdLb+0RxnTXfWh2vhw4nQmFi4nPc= @@ -97,188 +87,207 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.32.3 h1:wVnQ6tigGsRqSWDEEyH6lSAJ9OyF github.com/aws/aws-sdk-go-v2/service/sts v1.32.3/go.mod h1:VZa9yTFyj4o10YGsmDO4gbQJUvvhY72fhumT8W4LqsE= github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM= github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/circl v1.4.0 h1:BV7h5MgrktNzytKmWjpOtdYrf0lkkbF8YMlBGPhJQrY= +github.com/cloudflare/circl v1.4.0/go.mod h1:PDRU+oXvdD7KCtgKxW95M5Z8BpSCJXQORiZFnBQS5QU= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8= +github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc= +github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= -github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/docker/cli v27.3.1+incompatible h1:qEGdFBF3Xu6SCvCYhc7CzaQTlBmqDuzxPDpigSyeKQQ= +github.com/docker/cli v27.3.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= +github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= +github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/getsops/gopgagent v0.0.0-20240527072608-0c14999532fe h1:QKe/kmAYbndxwu91TcjHERsnMh5SgOB1x/qicvOdUJ8= +github.com/getsops/gopgagent v0.0.0-20240527072608-0c14999532fe/go.mod h1:awFzISqLJoZLm+i9QQ4SgMNHDqljH6jWV0B36V5MrUM= +github.com/getsops/sops/v3 v3.9.1 h1:wXsqzEsUPVQPcxsvjpwpqqD3DVRe9UZKJ7LSf5rzuLA= +github.com/getsops/sops/v3 v3.9.1/go.mod h1:k3XzAfcvMI1Rfyw2tky0/RakHrdbVcUrtCGTYsmkMY8= +github.com/go-jose/go-jose/v4 v4.0.4 h1:VsjPI33J0SB9vQM6PLmNjoHqMQNGPiZ0rHL7Ni7Q6/E= +github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= +github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= +github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf/go.mod h1:RpwtwJQFrIEPstU94h88MWPXP2ektJZ8cZ0YntAmXiE= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= +github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= +github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= +github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/goware/prefixer v0.0.0-20160118172347-395022866408 h1:Y9iQJfEqnN3/Nce9cOegemcy/9Ai5k3huT6E80F3zaw= github.com/goware/prefixer v0.0.0-20160118172347-395022866408/go.mod h1:PE1ycukgRPJ7bJ9a1fdfQ9j8i/cEcRAoLZzbxYpNB/s= -github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= -github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= -github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8 h1:iBt4Ew4XEGLfh6/bPk4rSYmuZJGizr6/x/AEizP0CQc= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8/go.mod h1:aiJI+PIApBRQG7FZTEBx5GiiX+HbOHilUdNxUZi4eV0= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= +github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoIospckxBxk6Q= -github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c h1:kQWxfPIHVLbgLzphqk3QUflDy9QdksZR4ygR807bpy0= -github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/hashicorp/vault/api v1.15.0 h1:O24FYQCWwhwKnF7CuSqP30S51rTV7vz1iACXE/pj5DA= +github.com/hashicorp/vault/api v1.15.0/go.mod h1:+5YTO09JGn0u+b6ySD/LLVf8WkJCPLAL2Vkmrn2+CM8= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/miekg/pkcs11 v1.0.3 h1:iMwmD7I5225wv84WxIG/bmxz9AXjWvTWIbM/TYHvWtw= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mozilla-services/yaml v0.0.0-20180922153656-28ffe5d0cafb/go.mod h1:Is/Ucts/yU/mWyGR8yELRoO46mejouKsJfQLAIfTR18= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/mozilla-services/yaml v0.0.0-20191106225358-5c216288813c h1:yE1NxRAZA3wF0laDWECtOe2J0tFjSHUI6MXXbMif+QY= github.com/mozilla-services/yaml v0.0.0-20191106225358-5c216288813c/go.mod h1:Is/Ucts/yU/mWyGR8yELRoO46mejouKsJfQLAIfTR18= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/runc v1.1.14 h1:rgSuzbmgz5DUJjeSnw337TxDbRuqjs6iqQck/2weR6w= +github.com/opencontainers/runc v1.1.14/go.mod h1:E4C2z+7BxR7GHXp0hAY53mek+x49X1LjPNeMTfRGvOA= +github.com/ory/dockertest/v3 v3.11.0 h1:OiHcxKAvSDUwsEVh2BjxQQc/5EHz9n0va9awCtNGuyA= +github.com/ory/dockertest/v3 v3.11.0/go.mod h1:VIPxS1gwT9NpPOrfD3rACs8Y9Z7yhzO4SB194iUDnUI= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20190710185942-9d28bd7c0945/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/ugorji/go v0.0.0-20180112141927-9831f2c3ac10 h1:4zp+5ElNBLy5qmaDFrbVDolQSOtPmquw+W6EMNEpi+k= github.com/ugorji/go v0.0.0-20180112141927-9831f2c3ac10/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= +github.com/urfave/cli v1.22.15 h1:nuqt+pdC/KqswQKhETJjo7pvn/k4xMUxgW6liI7XpnM= +github.com/urfave/cli v1.22.15/go.mod h1:wSan1hmo5zeyLGBjRJbzRTNk8gwoYa2B9n4q9dmRIc0= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/youtube/vitess v2.1.1+incompatible h1:SE+P7DNX/jw5RHFs5CHRhZQjq402EJFCD33JhzQMdDw= github.com/youtube/vitess v2.1.1+incompatible/go.mod h1:hpMim5/30F1r+0P8GGtB29d0gWHr0IZ5unS+CG0zMx8= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.mozilla.org/cose v0.0.0-20200221144611-2ea72a6b3de3 h1:+06j/3Sl6VUyvzrDor4UrtxuCQQ67sUpUtKFm5KqYKU= go.mozilla.org/cose v0.0.0-20200221144611-2ea72a6b3de3/go.mod h1:NitxzJTubT7Y6B94irV0gYJeNT224l4AOv10qjgilLU= -go.mozilla.org/gopgagent v0.0.0-20170926210634-4d7ea76ff71a h1:N7VD+PwpJME2ZfQT8+ejxwA4Ow10IkGbU0MGf94ll8k= -go.mozilla.org/gopgagent v0.0.0-20170926210634-4d7ea76ff71a/go.mod h1:YDKUvO0b//78PaaEro6CAPH6NqohCmL2Cwju5XI2HoE= go.mozilla.org/hawk v0.0.0-20190327210923-a483e4a7047e h1:EHC+jNgDT61H9gWumkg+1bc5/+lYAhynV+GhgTrtUtc= go.mozilla.org/hawk v0.0.0-20190327210923-a483e4a7047e/go.mod h1:ios+sJANmPARsTCF4LHZNaCVQna3TvwS+ECs/Y2GONU= go.mozilla.org/mar v0.0.0-20200124173325-c51ce05c9f3d h1:ACkLHkvZ4aS7QBgPbE8qNl++J+5gRbJOOtqQeRznLmQ= @@ -287,170 +296,124 @@ go.mozilla.org/mozlogrus v2.0.0+incompatible h1:V8aAmJPN07RQuTJZfsroehGglIERIpbj go.mozilla.org/mozlogrus v2.0.0+incompatible/go.mod h1:bg4v22liQ+tLlQ6nI56e5C7Xe8AqEU4xDdEpWoCzQ6M= go.mozilla.org/pkcs7 v0.9.0 h1:yM4/HS9dYv7ri2biPtxt8ikvB37a980dg69/pKmS+eI= go.mozilla.org/pkcs7 v0.9.0/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= -go.mozilla.org/sops v0.0.0-20190912205235-14a22d7a7060 h1:KnOZgR0z89tCwGympfPkpzXdlCmhksUP5ddQ+Iqo1FU= -go.mozilla.org/sops v0.0.0-20190912205235-14a22d7a7060/go.mod h1:StzH0aTTiss59GzhB1wiC9H5dqUqTTDZYfeurUrmJD8= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.1 h1:8dP3SGL7MPB94crU3bEPplMPe83FI4EouesJUeFHv50= -go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0 h1:hCq2hNMwsegUvPzI7sPOvtO9cqyy5GbWt/Ybp2xrx8Q= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0/go.mod h1:LqaApwGx/oUmzsbqxkzuBvyoPpkxk3JQWnqfVrJ3wCA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI= +go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= +go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= +go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= +go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= +go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= +go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= +go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= +go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.11.0 h1:n/qM3q0/rV2F0pox7o0CvNhlPvZAo7pLbef122cbLJ0= -google.golang.org/api v0.11.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.199.0 h1:aWUXClp+VFJmqE0JPvpZOK3LDQMyFKYIow4etYd9qxs= +google.golang.org/api v0.199.0/go.mod h1:ohG4qSztDJmZdjK/Ar6MhbAmb/Rpi4JHOqagsh90K28= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610 h1:Ygq9/SRJX9+dU0WCIICM8RkWvDw03lvB77hrhJnpxfU= -google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20240930140551-af27646dc61f h1:mCJ6SGikSxVlt9scCayUl2dMq0msUgmBArqRY6umieI= +google.golang.org/genproto v0.0.0-20240930140551-af27646dc61f/go.mod h1:xtVODtPkMQRUZ4kqOTgp6JrXQrPevvfCSdk4mJtHUbM= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f h1:jTm13A2itBi3La6yTGqn8bVSrc3ZZ1r8ENHlIXBfnRA= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f/go.mod h1:CLGoBuH1VHxAUXVPP8FfPwPEVJB6lz3URE5mY2SuayE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240930140551-af27646dc61f h1:cUMEy+8oS78BWIH9OWazBkzbr090Od9tWBNtZHkOhf0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240930140551-af27646dc61f/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/ini.v1 v1.44.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/main.go b/main.go index 62870b34a..9fa4128f0 100644 --- a/main.go +++ b/main.go @@ -38,8 +38,8 @@ import ( "github.com/mozilla-services/autograph/signer/mar" "github.com/mozilla-services/autograph/signer/xpi" - "go.mozilla.org/sops" - "go.mozilla.org/sops/decrypt" + sops "github.com/getsops/sops/v3" + "github.com/getsops/sops/v3/decrypt" "github.com/DataDog/datadog-go/statsd" "github.com/mozilla-services/autograph/crypto11" diff --git a/vendor/cloud.google.com/go/.gitignore b/vendor/cloud.google.com/go/.gitignore new file mode 100644 index 000000000..cc7e53b46 --- /dev/null +++ b/vendor/cloud.google.com/go/.gitignore @@ -0,0 +1,12 @@ +# Editors +.idea +.vscode +*.swp +.history + +# Test files +*.test +coverage.txt + +# Other +.DS_Store diff --git a/vendor/cloud.google.com/go/.release-please-manifest-individual.json b/vendor/cloud.google.com/go/.release-please-manifest-individual.json new file mode 100644 index 000000000..841b54357 --- /dev/null +++ b/vendor/cloud.google.com/go/.release-please-manifest-individual.json @@ -0,0 +1,18 @@ +{ + "ai": "0.8.2", + "aiplatform": "1.68.0", + "auth": "0.8.1", + "auth/oauth2adapt": "0.2.4", + "bigquery": "1.62.0", + "bigtable": "1.29.0", + "datastore": "1.17.1", + "errorreporting": "0.3.1", + "firestore": "1.16.0", + "logging": "1.11.0", + "profiler": "0.4.1", + "pubsub": "1.41.0", + "pubsublite": "1.8.2", + "spanner": "1.66.0", + "storage": "1.43.0", + "vertexai": "0.12.0" +} diff --git a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json new file mode 100644 index 000000000..8ac2f38f9 --- /dev/null +++ b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json @@ -0,0 +1,149 @@ +{ + "accessapproval": "1.7.12", + "accesscontextmanager": "1.8.12", + "advisorynotifications": "1.4.6", + "alloydb": "1.10.7", + "analytics": "0.24.0", + "apigateway": "1.6.12", + "apigeeconnect": "1.6.12", + "apigeeregistry": "0.8.10", + "apikeys": "1.1.12", + "appengine": "1.8.12", + "apphub": "0.1.6", + "apps": "0.4.7", + "area120": "0.8.12", + "artifactregistry": "1.14.14", + "asset": "1.19.6", + "assuredworkloads": "1.11.12", + "automl": "1.13.12", + "backupdr": "1.0.4", + "baremetalsolution": "1.2.11", + "batch": "1.9.3", + "beyondcorp": "1.0.11", + "billing": "1.18.10", + "binaryauthorization": "1.8.8", + "certificatemanager": "1.8.6", + "channel": "1.17.12", + "chat": "0.3.1", + "cloudbuild": "1.16.6", + "cloudcontrolspartner": "1.0.4", + "clouddms": "1.7.11", + "cloudprofiler": "0.3.6", + "cloudquotas": "1.0.4", + "cloudtasks": "1.12.13", + "commerce": "1.0.5", + "compute": "1.27.5", + "compute/metadata": "0.5.0", + "confidentialcomputing": "1.6.1", + "config": "1.0.5", + "contactcenterinsights": "1.13.7", + "container": "1.38.1", + "containeranalysis": "0.12.2", + "datacatalog": "1.21.1", + "dataflow": "0.9.12", + "dataform": "0.9.9", + "datafusion": "1.7.12", + "datalabeling": "0.8.12", + "dataplex": "1.18.3", + "dataproc": "2.5.4", + "dataqna": "0.8.12", + "datastream": "1.10.11", + "deploy": "1.21.1", + "developerconnect": "0.1.4", + "dialogflow": "1.56.0", + "discoveryengine": "1.12.0", + "dlp": "1.17.0", + "documentai": "1.32.0", + "domains": "0.9.12", + "edgecontainer": "1.2.6", + "edgenetwork": "1.1.3", + "essentialcontacts": "1.6.13", + "eventarc": "1.13.11", + "filestore": "1.8.8", + "functions": "1.17.0", + "gkebackup": "1.5.5", + "gkeconnect": "0.8.12", + "gkehub": "0.14.12", + "gkemulticloud": "1.2.5", + "grafeas": "0.3.10", + "gsuiteaddons": "1.6.12", + "iam": "1.1.13", + "iap": "1.9.11", + "identitytoolkit": "0.1.4", + "ids": "1.4.12", + "iot": "1.7.12", + "kms": "1.18.5", + "language": "1.13.1", + "lifesciences": "0.9.12", + "longrunning": "0.5.12", + "managedidentities": "1.6.12", + "managedkafka": "0.1.6", + "maps": "1.11.7", + "mediatranslation": "0.8.12", + "memcache": "1.10.12", + "metastore": "1.13.11", + "migrationcenter": "1.0.5", + "monitoring": "1.20.4", + "netapp": "1.2.1", + "networkconnectivity": "1.14.11", + "networkmanagement": "1.13.7", + "networksecurity": "0.9.12", + "networkservices": "0.1.6", + "notebooks": "1.11.10", + "optimization": "1.6.10", + "orchestration": "1.9.7", + "orgpolicy": "1.12.8", + "osconfig": "1.13.3", + "oslogin": "1.13.8", + "parallelstore": "0.5.1", + "phishingprotection": "0.8.12", + "policysimulator": "0.2.10", + "policytroubleshooter": "1.10.10", + "privatecatalog": "0.9.12", + "privilegedaccessmanager": "0.1.1", + "rapidmigrationassessment": "1.0.12", + "recaptchaenterprise": "2.14.3", + "recommendationengine": "0.8.12", + "recommender": "1.12.8", + "redis": "1.16.5", + "resourcemanager": "1.9.12", + "resourcesettings": "1.7.5", + "retail": "1.17.5", + "run": "1.4.1", + "scheduler": "1.10.13", + "secretmanager": "1.13.6", + "securesourcemanager": "1.1.1", + "security": "1.17.5", + "securitycenter": "1.34.0", + "securitycentermanagement": "1.0.4", + "securityposture": "0.1.8", + "servicecontrol": "1.13.7", + "servicedirectory": "1.11.12", + "servicehealth": "1.0.5", + "servicemanagement": "1.9.13", + "serviceusage": "1.8.11", + "shell": "1.7.12", + "shopping": "0.8.7", + "speech": "1.24.1", + "storageinsights": "1.0.12", + "storagetransfer": "1.10.11", + "streetview": "0.1.5", + "support": "1.0.11", + "talent": "1.6.13", + "telcoautomation": "1.0.4", + "texttospeech": "1.7.12", + "tpu": "1.6.12", + "trace": "1.10.12", + "translate": "1.11.0", + "video": "1.22.1", + "videointelligence": "1.11.12", + "vision": "2.8.7", + "visionai": "0.2.5", + "vmmigration": "1.7.12", + "vmwareengine": "1.2.1", + "vpcaccess": "1.7.12", + "webrisk": "1.9.12", + "websecurityscanner": "1.6.12", + "workflows": "1.12.11", + "workstations": "1.0.5" +} diff --git a/vendor/cloud.google.com/go/.release-please-manifest.json b/vendor/cloud.google.com/go/.release-please-manifest.json new file mode 100644 index 000000000..7b1015e63 --- /dev/null +++ b/vendor/cloud.google.com/go/.release-please-manifest.json @@ -0,0 +1,3 @@ +{ + ".": "0.115.1" +} diff --git a/vendor/cloud.google.com/go/CHANGES.md b/vendor/cloud.google.com/go/CHANGES.md new file mode 100644 index 000000000..d48e0cfba --- /dev/null +++ b/vendor/cloud.google.com/go/CHANGES.md @@ -0,0 +1,2651 @@ +# Changes + +## [0.115.1](https://github.com/googleapis/google-cloud-go/compare/v0.115.0...v0.115.1) (2024-08-13) + + +### Bug Fixes + +* **cloud.google.com/go:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5)) + +## [0.115.0](https://github.com/googleapis/google-cloud-go/compare/v0.114.0...v0.115.0) (2024-06-12) + + +### Features + +* **internal/trace:** Deprecate OpenCensus support ([#10287](https://github.com/googleapis/google-cloud-go/issues/10287)) ([430ce8a](https://github.com/googleapis/google-cloud-go/commit/430ce8adea2d0be43461e2ca783b7c17794e983f)), refs [#2205](https://github.com/googleapis/google-cloud-go/issues/2205) [#8655](https://github.com/googleapis/google-cloud-go/issues/8655) + + +### Bug Fixes + +* **internal/postprocessor:** Use approved image tag ([#10341](https://github.com/googleapis/google-cloud-go/issues/10341)) ([a388fe5](https://github.com/googleapis/google-cloud-go/commit/a388fe5cf075d0af986861c70dcb7b9f97c31019)) + +## [0.114.0](https://github.com/googleapis/google-cloud-go/compare/v0.113.0...v0.114.0) (2024-05-23) + + +### Features + +* **civil:** Add Compare method to Date, Time, and DateTime ([#10193](https://github.com/googleapis/google-cloud-go/issues/10193)) ([c2920d7](https://github.com/googleapis/google-cloud-go/commit/c2920d7c9007a11d9232c628fba5496197deeba4)) + + +### Bug Fixes + +* **internal/postprocessor:** Add scopes to all appropriate commit lines ([#10192](https://github.com/googleapis/google-cloud-go/issues/10192)) ([c21399b](https://github.com/googleapis/google-cloud-go/commit/c21399bdc362c6c646c2c0f8c2c55903898e0eab)) + +## [0.113.0](https://github.com/googleapis/google-cloud-go/compare/v0.112.2...v0.113.0) (2024-05-08) + + +### Features + +* **civil:** Add Compare method to Date, Time, and DateTime ([#10010](https://github.com/googleapis/google-cloud-go/issues/10010)) ([34455c1](https://github.com/googleapis/google-cloud-go/commit/34455c15d62b089f3281ff4c663245e72b257f37)) + + +### Bug Fixes + +* **all:** Bump x/net to v0.24.0 ([#10000](https://github.com/googleapis/google-cloud-go/issues/10000)) ([ba31ed5](https://github.com/googleapis/google-cloud-go/commit/ba31ed5fda2c9664f2e1cf972469295e63deb5b4)) +* **debugger:** Add internaloption.WithDefaultEndpointTemplate ([3b41408](https://github.com/googleapis/google-cloud-go/commit/3b414084450a5764a0248756e95e13383a645f90)) +* **internal/aliasfix:** Handle import paths correctly ([#10097](https://github.com/googleapis/google-cloud-go/issues/10097)) ([fafaf0d](https://github.com/googleapis/google-cloud-go/commit/fafaf0d0a293096559a4655ea61062cb896f1568)) +* **rpcreplay:** Properly unmarshal dynamic message ([#9774](https://github.com/googleapis/google-cloud-go/issues/9774)) ([53ccb20](https://github.com/googleapis/google-cloud-go/commit/53ccb20d925ccb00f861958d9658b55738097dc6)), refs [#9773](https://github.com/googleapis/google-cloud-go/issues/9773) + + +### Documentation + +* **testing:** Switch deprecated WithInsecure to WithTransportCredentials ([#10091](https://github.com/googleapis/google-cloud-go/issues/10091)) ([2b576ab](https://github.com/googleapis/google-cloud-go/commit/2b576abd1c3bfca2f962de0e024524f72d3652c0)) + +## [0.112.2](https://github.com/googleapis/google-cloud-go/compare/v0.112.1...v0.112.2) (2024-03-27) + + +### Bug Fixes + +* **all:** Release protobuf dep bump ([#9586](https://github.com/googleapis/google-cloud-go/issues/9586)) ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a)) + +## [0.112.1](https://github.com/googleapis/google-cloud-go/compare/v0.112.0...v0.112.1) (2024-02-26) + + +### Bug Fixes + +* **internal/postprocessor:** Handle googleapis link in commit body ([#9251](https://github.com/googleapis/google-cloud-go/issues/9251)) ([1dd3515](https://github.com/googleapis/google-cloud-go/commit/1dd35157bff871a2b3e5b0e3cac33502737fd631)) + + +### Documentation + +* **main:** Add OpenTelemetry-Go compatibility warning to debug.md ([#9268](https://github.com/googleapis/google-cloud-go/issues/9268)) ([18f9bb9](https://github.com/googleapis/google-cloud-go/commit/18f9bb94fbc239255a873b29462fc7c2eac3c0aa)), refs [#9267](https://github.com/googleapis/google-cloud-go/issues/9267) + +## [0.112.0](https://github.com/googleapis/google-cloud-go/compare/v0.111.0...v0.112.0) (2024-01-11) + + +### Features + +* **internal/trace:** Export internal/trace package constants and vars ([#9242](https://github.com/googleapis/google-cloud-go/issues/9242)) ([941c16f](https://github.com/googleapis/google-cloud-go/commit/941c16f3a2602e9bdc737b139060a7dd8318f9dd)) + + +### Documentation + +* **main:** Add telemetry discussion to debug.md ([#9074](https://github.com/googleapis/google-cloud-go/issues/9074)) ([90ed12e](https://github.com/googleapis/google-cloud-go/commit/90ed12e1dffe722b42f58556f0e17b808da9714d)), refs [#8655](https://github.com/googleapis/google-cloud-go/issues/8655) + +## [0.111.0](https://github.com/googleapis/google-cloud-go/compare/v0.110.10...v0.111.0) (2023-11-29) + + +### Features + +* **internal/trace:** Add OpenTelemetry support ([#8655](https://github.com/googleapis/google-cloud-go/issues/8655)) ([7a46b54](https://github.com/googleapis/google-cloud-go/commit/7a46b5428f239871993d66be2c7c667121f60a6f)), refs [#2205](https://github.com/googleapis/google-cloud-go/issues/2205) + + +### Bug Fixes + +* **all:** Bump google.golang.org/api to v0.149.0 ([#8959](https://github.com/googleapis/google-cloud-go/issues/8959)) ([8d2ab9f](https://github.com/googleapis/google-cloud-go/commit/8d2ab9f320a86c1c0fab90513fc05861561d0880)) + +## [0.110.10](https://github.com/googleapis/google-cloud-go/compare/v0.110.9...v0.110.10) (2023-10-31) + + +### Bug Fixes + +* **all:** Update grpc-go to v1.56.3 ([#8916](https://github.com/googleapis/google-cloud-go/issues/8916)) ([343cea8](https://github.com/googleapis/google-cloud-go/commit/343cea8c43b1e31ae21ad50ad31d3b0b60143f8c)) +* **all:** Update grpc-go to v1.59.0 ([#8922](https://github.com/googleapis/google-cloud-go/issues/8922)) ([81a97b0](https://github.com/googleapis/google-cloud-go/commit/81a97b06cb28b25432e4ece595c55a9857e960b7)) +* **internal/godocfx:** Fix links to other packages in summary ([#8756](https://github.com/googleapis/google-cloud-go/issues/8756)) ([6220a9a](https://github.com/googleapis/google-cloud-go/commit/6220a9afeb89df3080e9e663e97648939fd4e15f)) + +## [0.110.9](https://github.com/googleapis/google-cloud-go/compare/v0.110.8...v0.110.9) (2023-10-19) + + +### Bug Fixes + +* **all:** Update golang.org/x/net to v0.17.0 ([#8705](https://github.com/googleapis/google-cloud-go/issues/8705)) ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) +* **internal/aliasgen:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) +* **internal/examples/fake:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) +* **internal/gapicgen:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) +* **internal/generated/snippets:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) +* **internal/godocfx:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) +* **internal/postprocessor:** Add ability to override release level ([#8643](https://github.com/googleapis/google-cloud-go/issues/8643)) ([26c608a](https://github.com/googleapis/google-cloud-go/commit/26c608a8204d740767dfebf6aa473cdf1873e5f0)) +* **internal/postprocessor:** Add missing assignment ([#8646](https://github.com/googleapis/google-cloud-go/issues/8646)) ([d8c5746](https://github.com/googleapis/google-cloud-go/commit/d8c5746e6dde1bd34c01a9886804f861c88c0cb7)) +* **internal/postprocessor:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) + +## [0.110.8](https://github.com/googleapis/google-cloud-go/compare/v0.110.7...v0.110.8) (2023-09-11) + + +### Documentation + +* **postprocessor:** Nudge users towards stable clients ([#8513](https://github.com/googleapis/google-cloud-go/issues/8513)) ([05a1484](https://github.com/googleapis/google-cloud-go/commit/05a1484b0752aaa3d6a164d37686d6de070cc78d)) + +## [0.110.7](https://github.com/googleapis/google-cloud-go/compare/v0.110.6...v0.110.7) (2023-07-31) + + +### Bug Fixes + +* **main:** Add more docs to base package ([c401ab4](https://github.com/googleapis/google-cloud-go/commit/c401ab4a576c64ab2b8840a90f7ccd5d031cea57)) + +## [0.110.6](https://github.com/googleapis/google-cloud-go/compare/v0.110.5...v0.110.6) (2023-07-13) + + +### Bug Fixes + +* **httpreplay:** Ignore GCS header by default ([#8260](https://github.com/googleapis/google-cloud-go/issues/8260)) ([b961a1a](https://github.com/googleapis/google-cloud-go/commit/b961a1abe7aeafe420c88eed38035fed0bbf7bbe)), refs [#8233](https://github.com/googleapis/google-cloud-go/issues/8233) + +## [0.110.5](https://github.com/googleapis/google-cloud-go/compare/v0.110.4...v0.110.5) (2023-07-07) + + +### Bug Fixes + +* **logadmin:** Use consistent filter in paging example ([#8221](https://github.com/googleapis/google-cloud-go/issues/8221)) ([9570159](https://github.com/googleapis/google-cloud-go/commit/95701597b1d709543ea22a4b6ff9b28b14a2d4fc)) + +## [0.110.4](https://github.com/googleapis/google-cloud-go/compare/v0.110.3...v0.110.4) (2023-07-05) + + +### Bug Fixes + +* **internal/retry:** Simplify gRPC status code mapping of retry error ([#8196](https://github.com/googleapis/google-cloud-go/issues/8196)) ([e8b224a](https://github.com/googleapis/google-cloud-go/commit/e8b224a3bcb0ca9430990ef6ae8ddb7b60f5225d)) + +## [0.110.3](https://github.com/googleapis/google-cloud-go/compare/v0.110.2...v0.110.3) (2023-06-23) + + +### Bug Fixes + +* **internal/retry:** Never return nil from GRPCStatus() ([#8128](https://github.com/googleapis/google-cloud-go/issues/8128)) ([005d2df](https://github.com/googleapis/google-cloud-go/commit/005d2dfb6b68bf5a35bfb8db449d3f0084b34d6e)) + + +### Documentation + +* **v1:** Minor clarifications for TaskGroup and min_cpu_platform ([3382ef8](https://github.com/googleapis/google-cloud-go/commit/3382ef81b6bcefe1c7bfc14aa5ff9bbf25850966)) + +## [0.110.2](https://github.com/googleapis/google-cloud-go/compare/v0.110.1...v0.110.2) (2023-05-08) + + +### Bug Fixes + +* **deps:** Update grpc to v1.55.0 ([#7885](https://github.com/googleapis/google-cloud-go/issues/7885)) ([9fc48a9](https://github.com/googleapis/google-cloud-go/commit/9fc48a921428c94c725ea90415d55ff0c177dd81)) + +## [0.110.1](https://github.com/googleapis/google-cloud-go/compare/v0.110.0...v0.110.1) (2023-05-03) + + +### Bug Fixes + +* **httpreplay:** Add ignore-header flag, fix tests ([#7865](https://github.com/googleapis/google-cloud-go/issues/7865)) ([1829706](https://github.com/googleapis/google-cloud-go/commit/1829706c5ade36cc786b2e6780fda5e7302f965b)) + +## [0.110.0](https://github.com/googleapis/google-cloud-go/compare/v0.109.0...v0.110.0) (2023-02-15) + + +### Features + +* **internal/postprocessor:** Detect and initialize new modules ([#7288](https://github.com/googleapis/google-cloud-go/issues/7288)) ([59ce02c](https://github.com/googleapis/google-cloud-go/commit/59ce02c13f265741a8f1f0f7ad5109bf83e3df82)) +* **internal/postprocessor:** Only regen snippets for changed modules ([#7300](https://github.com/googleapis/google-cloud-go/issues/7300)) ([220f8a5](https://github.com/googleapis/google-cloud-go/commit/220f8a5ad2fd64b75c5a1af531b1ab4597cf17d7)) + + +### Bug Fixes + +* **internal/postprocessor:** Add scopes without OwlBot api-name feature ([#7404](https://github.com/googleapis/google-cloud-go/issues/7404)) ([f7fe4f6](https://github.com/googleapis/google-cloud-go/commit/f7fe4f68ebf2ca28efd282f3419329dd2c09d245)) +* **internal/postprocessor:** Include module and package in scope ([#7294](https://github.com/googleapis/google-cloud-go/issues/7294)) ([d2c5c84](https://github.com/googleapis/google-cloud-go/commit/d2c5c8449f6939301f0fd506282e8fc73fc84f96)) + +## [0.109.0](https://github.com/googleapis/google-cloud-go/compare/v0.108.0...v0.109.0) (2023-01-18) + + +### Features + +* **internal/postprocessor:** Make OwlBot postprocessor ([#7202](https://github.com/googleapis/google-cloud-go/issues/7202)) ([7a1022e](https://github.com/googleapis/google-cloud-go/commit/7a1022e215261d679c8496cdd35a9cad1f13e527)) + +## [0.108.0](https://github.com/googleapis/google-cloud-go/compare/v0.107.0...v0.108.0) (2023-01-05) + + +### Features + +* **all:** Enable REGAPIC and REST numeric enums ([#6999](https://github.com/googleapis/google-cloud-go/issues/6999)) ([28f3572](https://github.com/googleapis/google-cloud-go/commit/28f3572addb0f563a2a42a76977b4e083191613f)) +* **debugger:** Add REST client ([06a54a1](https://github.com/googleapis/google-cloud-go/commit/06a54a16a5866cce966547c51e203b9e09a25bc0)) + + +### Bug Fixes + +* **internal/gapicgen:** Disable rest for non-rest APIs ([#7157](https://github.com/googleapis/google-cloud-go/issues/7157)) ([ab332ce](https://github.com/googleapis/google-cloud-go/commit/ab332ced06f6c07909444e4528c02a8b6a0a70a6)) + +## [0.107.0](https://github.com/googleapis/google-cloud-go/compare/v0.106.0...v0.107.0) (2022-11-15) + + +### Features + +* **routing:** Start generating apiv2 ([#7011](https://github.com/googleapis/google-cloud-go/issues/7011)) ([66e8e27](https://github.com/googleapis/google-cloud-go/commit/66e8e2717b2593f4e5640ecb97344bb1d5e5fc0b)) + +## [0.106.0](https://github.com/googleapis/google-cloud-go/compare/v0.105.0...v0.106.0) (2022-11-09) + + +### Features + +* **debugger:** rewrite signatures in terms of new location ([3c4b2b3](https://github.com/googleapis/google-cloud-go/commit/3c4b2b34565795537aac1661e6af2442437e34ad)) + +## [0.104.0](https://github.com/googleapis/google-cloud-go/compare/v0.103.0...v0.104.0) (2022-08-24) + + +### Features + +* **godocfx:** add friendlyAPIName ([#6447](https://github.com/googleapis/google-cloud-go/issues/6447)) ([c6d3ba4](https://github.com/googleapis/google-cloud-go/commit/c6d3ba401b7b3ae9b710a8850c6ec5d49c4c1490)) + +## [0.103.0](https://github.com/googleapis/google-cloud-go/compare/v0.102.1...v0.103.0) (2022-06-29) + + +### Features + +* **privateca:** temporarily remove REGAPIC support ([199b725](https://github.com/googleapis/google-cloud-go/commit/199b7250f474b1a6f53dcf0aac0c2966f4987b68)) + +## [0.102.1](https://github.com/googleapis/google-cloud-go/compare/v0.102.0...v0.102.1) (2022-06-17) + + +### Bug Fixes + +* **longrunning:** regapic remove path params duped as query params ([#6183](https://github.com/googleapis/google-cloud-go/issues/6183)) ([c963be3](https://github.com/googleapis/google-cloud-go/commit/c963be301f074779e6bb8c897d8064fa076e9e35)) + +## [0.102.0](https://github.com/googleapis/google-cloud-go/compare/v0.101.1...v0.102.0) (2022-05-24) + + +### Features + +* **civil:** add Before and After methods to civil.Time ([#5703](https://github.com/googleapis/google-cloud-go/issues/5703)) ([7acaaaf](https://github.com/googleapis/google-cloud-go/commit/7acaaafef47668c3e8382b8bc03475598c3db187)) + +### [0.101.1](https://github.com/googleapis/google-cloud-go/compare/v0.101.0...v0.101.1) (2022-05-03) + + +### Bug Fixes + +* **internal/gapicgen:** properly update modules that have no gapic changes ([#5945](https://github.com/googleapis/google-cloud-go/issues/5945)) ([de2befc](https://github.com/googleapis/google-cloud-go/commit/de2befcaa2a886499db9da6d4d04d28398c8d44b)) + +## [0.101.0](https://github.com/googleapis/google-cloud-go/compare/v0.100.2...v0.101.0) (2022-04-20) + + +### Features + +* **all:** bump grpc dep ([#5481](https://github.com/googleapis/google-cloud-go/issues/5481)) ([b12964d](https://github.com/googleapis/google-cloud-go/commit/b12964df5c63c647aaf204e73cfcdfd379d19682)) +* **internal/gapicgen:** change versionClient for gapics ([#5687](https://github.com/googleapis/google-cloud-go/issues/5687)) ([55f0d92](https://github.com/googleapis/google-cloud-go/commit/55f0d92bf112f14b024b4ab0076c9875a17423c9)) + + +### Bug Fixes + +* **internal/gapicgen:** add generation of internal/version.go for new client modules ([#5726](https://github.com/googleapis/google-cloud-go/issues/5726)) ([341e0df](https://github.com/googleapis/google-cloud-go/commit/341e0df1e44480706180cc5b07c49b3cee904095)) +* **internal/gapicgen:** don't gen version files for longrunning and debugger ([#5698](https://github.com/googleapis/google-cloud-go/issues/5698)) ([3a81108](https://github.com/googleapis/google-cloud-go/commit/3a81108c74cd8864c56b8ab5939afd864db3c64b)) +* **internal/gapicgen:** don't try to make snippets for non-gapics ([#5919](https://github.com/googleapis/google-cloud-go/issues/5919)) ([c94dddc](https://github.com/googleapis/google-cloud-go/commit/c94dddc60ef83a0584ba8f7dd24589d9db971672)) +* **internal/gapicgen:** move breaking change indicator if present ([#5452](https://github.com/googleapis/google-cloud-go/issues/5452)) ([e712df5](https://github.com/googleapis/google-cloud-go/commit/e712df5ebb45598a1653081d7e11e578bad22ff8)) +* **internal/godocfx:** prevent errors for filtered mods ([#5485](https://github.com/googleapis/google-cloud-go/issues/5485)) ([6cb9b89](https://github.com/googleapis/google-cloud-go/commit/6cb9b89b2d654c695eab00d8fb375cce0cd6e059)) + +## [0.100.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.99.0...v0.100.0) (2022-01-04) + + +### Features + +* **analytics/admin:** add the `AcknowledgeUserDataCollection` operation which acknowledges the terms of user data collection for the specified property feat: add the new resource type `DataStream`, which is planned to eventually replace `WebDataStream`, `IosAppDataStream`, `AndroidAppDataStream` resources fix!: remove `GetEnhancedMeasurementSettings`, `UpdateEnhancedMeasurementSettingsRequest`, `UpdateEnhancedMeasurementSettingsRequest` operations from the API feat: add `CreateDataStream`, `DeleteDataStream`, `UpdateDataStream`, `ListDataStreams` operations to support the new `DataStream` resource feat: add `DISPLAY_VIDEO_360_ADVERTISER_LINK`, `DISPLAY_VIDEO_360_ADVERTISER_LINK_PROPOSAL` fields to `ChangeHistoryResourceType` enum feat: add the `account` field to the `Property` type docs: update the documentation with a new list of valid values for `UserLink.direct_roles` field ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023)) +* **assuredworkloads:** EU Regions and Support With Sovereign Controls ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023)) +* **dialogflow/cx:** added the display name of the current page in webhook requests ([e0833b2](https://www.github.com/googleapis/google-cloud-go/commit/e0833b2853834ba79fd20ca2ae9c613d585dd2a5)) +* **dialogflow/cx:** added the display name of the current page in webhook requests ([e0833b2](https://www.github.com/googleapis/google-cloud-go/commit/e0833b2853834ba79fd20ca2ae9c613d585dd2a5)) +* **dialogflow:** added export documentation method feat: added filter in list documentations request feat: added option to import custom metadata from Google Cloud Storage in reload document request feat: added option to apply partial update to the smart messaging allowlist in reload document request feat: added filter in list knowledge bases request ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023)) +* **dialogflow:** removed OPTIONAL for speech model variant docs: added more docs for speech model variant and improved docs format for participant ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023)) +* **recaptchaenterprise:** add new reCAPTCHA Enterprise fraud annotations ([3dd34a2](https://www.github.com/googleapis/google-cloud-go/commit/3dd34a262edbff63b9aece8faddc2ff0d98ce42a)) + + +### Bug Fixes + +* **artifactregistry:** fix resource pattern ID segment name ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023)) +* **compute:** add parameter in compute bazel rules ([#692](https://www.github.com/googleapis/google-cloud-go/issues/692)) ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023)) +* **profiler:** refine regular expression for parsing backoff duration in E2E tests ([#5229](https://www.github.com/googleapis/google-cloud-go/issues/5229)) ([4438aeb](https://www.github.com/googleapis/google-cloud-go/commit/4438aebca2ec01d4dbf22287aa651937a381e043)) +* **profiler:** remove certificate expiration workaround ([#5222](https://www.github.com/googleapis/google-cloud-go/issues/5222)) ([2da36c9](https://www.github.com/googleapis/google-cloud-go/commit/2da36c95f44d5f88fd93cd949ab78823cea74fe7)) + +## [0.99.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.98.0...v0.99.0) (2021-12-06) + + +### Features + +* **dialogflow/cx:** added `TelephonyTransferCall` in response message ([fe27098](https://www.github.com/googleapis/google-cloud-go/commit/fe27098e5d429911428821ded57384353e699774)) + +## [0.98.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.97.0...v0.98.0) (2021-12-03) + + +### Features + +* **aiplatform:** add enable_private_service_connect field to Endpoint feat: add id field to DeployedModel feat: add service_attachment field to PrivateEndpoints feat: add endpoint_id to CreateEndpointRequest and method signature to CreateEndpoint feat: add method signature to CreateFeatureStore, CreateEntityType, CreateFeature feat: add network and enable_private_service_connect to IndexEndpoint feat: add service_attachment to IndexPrivateEndpoints feat: add stratified_split field to training_pipeline InputDataConfig ([a2c0bef](https://www.github.com/googleapis/google-cloud-go/commit/a2c0bef551489c9f1d0d12b973d3bf095354841e)) +* **aiplatform:** add featurestore service to aiplatform v1 feat: add metadata service to aiplatform v1 ([30794e7](https://www.github.com/googleapis/google-cloud-go/commit/30794e70050b55ff87d6a80d0b4075065e9d271d)) +* **aiplatform:** Adds support for `google.protobuf.Value` pipeline parameters in the `parameter_values` field ([88a1cdb](https://www.github.com/googleapis/google-cloud-go/commit/88a1cdbef3cc337354a61bc9276725bfb9a686d8)) +* **aiplatform:** Tensorboard v1 protos release feat:Exposing a field for v1 CustomJob-Tensorboard integration. ([90e2868](https://www.github.com/googleapis/google-cloud-go/commit/90e2868a3d220aa7f897438f4917013fda7a7c59)) +* **binaryauthorization:** add new admission rule types to Policy feat: update SignatureAlgorithm enum to match algorithm names in KMS feat: add SystemPolicyV1Beta1 service ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5)) +* **channel:** add resource type to ChannelPartnerLink ([c206948](https://www.github.com/googleapis/google-cloud-go/commit/c2069487f6af5bcb37d519afeb60e312e35e67d5)) +* **cloudtasks:** add C++ rules for Cloud Tasks ([90e2868](https://www.github.com/googleapis/google-cloud-go/commit/90e2868a3d220aa7f897438f4917013fda7a7c59)) +* **compute:** Move compute.v1 from googleapis-discovery to googleapis ([#675](https://www.github.com/googleapis/google-cloud-go/issues/675)) ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5)) +* **compute:** Switch to string enums for compute ([#685](https://www.github.com/googleapis/google-cloud-go/issues/685)) ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7)) +* **contactcenterinsights:** Add ability to update phrase matchers feat: Add issue model stats to time series feat: Add display name to issue model stats ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5)) +* **contactcenterinsights:** Add WriteDisposition to BigQuery Export API ([a2c0bef](https://www.github.com/googleapis/google-cloud-go/commit/a2c0bef551489c9f1d0d12b973d3bf095354841e)) +* **contactcenterinsights:** deprecate issue_matches docs: if conversation medium is unspecified, it will default to PHONE_CALL ([1a0720f](https://www.github.com/googleapis/google-cloud-go/commit/1a0720f2f33bb14617f5c6a524946a93209e1266)) +* **contactcenterinsights:** new feature flag disable_issue_modeling docs: fixed formatting issues in the reference documentation ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7)) +* **contactcenterinsights:** remove feature flag disable_issue_modeling ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7)) +* **datacatalog:** Added BigQueryDateShardedSpec.latest_shard_resource field feat: Added SearchCatalogResult.display_name field feat: Added SearchCatalogResult.description field ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5)) +* **dataproc:** add Dataproc Serverless for Spark Batches API ([30794e7](https://www.github.com/googleapis/google-cloud-go/commit/30794e70050b55ff87d6a80d0b4075065e9d271d)) +* **dataproc:** Add support for dataproc BatchController service ([8519b94](https://www.github.com/googleapis/google-cloud-go/commit/8519b948fee5dc82d39300c4d96e92c85fe78fe6)) +* **dialogflow/cx:** added API for changelogs docs: clarified semantic of the streaming APIs ([587bba5](https://www.github.com/googleapis/google-cloud-go/commit/587bba5ad792a92f252107aa38c6af50fb09fb58)) +* **dialogflow/cx:** added API for changelogs docs: clarified semantic of the streaming APIs ([587bba5](https://www.github.com/googleapis/google-cloud-go/commit/587bba5ad792a92f252107aa38c6af50fb09fb58)) +* **dialogflow/cx:** added support for comparing between versions docs: clarified security settings API reference ([83b941c](https://www.github.com/googleapis/google-cloud-go/commit/83b941c0983e44fdd18ceee8c6f3e91219d72ad1)) +* **dialogflow/cx:** added support for Deployments with ListDeployments and GetDeployment apis feat: added support for DeployFlow api under Environments feat: added support for TestCasesConfig under Environment docs: added long running operation explanation for several apis fix!: marked resource name of security setting as not-required ([8c5c6cf](https://www.github.com/googleapis/google-cloud-go/commit/8c5c6cf9df046b67998a8608d05595bd9e34feb0)) +* **dialogflow/cx:** allow setting custom CA for generic webhooks and release CompareVersions API docs: clarify DLP template reader usage ([90e2868](https://www.github.com/googleapis/google-cloud-go/commit/90e2868a3d220aa7f897438f4917013fda7a7c59)) +* **dialogflow:** added support to configure security settings, language code and time zone on conversation profile ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5)) +* **dialogflow:** support document metadata filter in article suggestion and smart reply model in human agent assistant ([e33350c](https://www.github.com/googleapis/google-cloud-go/commit/e33350cfcabcddcda1a90069383d39c68deb977a)) +* **dlp:** added deidentify replacement dictionaries feat: added field for BigQuery inspect template inclusion lists feat: added field to support infotype versioning ([a2c0bef](https://www.github.com/googleapis/google-cloud-go/commit/a2c0bef551489c9f1d0d12b973d3bf095354841e)) +* **domains:** added library for Cloud Domains v1 API. Also added methods for the transfer-in flow docs: improved API comments ([8519b94](https://www.github.com/googleapis/google-cloud-go/commit/8519b948fee5dc82d39300c4d96e92c85fe78fe6)) +* **functions:** Secret Manager integration fields 'secret_environment_variables' and 'secret_volumes' added feat: CMEK integration fields 'kms_key_name' and 'docker_repository' added ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5)) +* **kms:** add OAEP+SHA1 to the list of supported algorithms ([8c5c6cf](https://www.github.com/googleapis/google-cloud-go/commit/8c5c6cf9df046b67998a8608d05595bd9e34feb0)) +* **kms:** add RPC retry information for MacSign, MacVerify, and GenerateRandomBytes Committer: [@bdhess](https://www.github.com/bdhess) ([1a0720f](https://www.github.com/googleapis/google-cloud-go/commit/1a0720f2f33bb14617f5c6a524946a93209e1266)) +* **kms:** add support for Raw PKCS[#1](https://www.github.com/googleapis/google-cloud-go/issues/1) signing keys ([58bea89](https://www.github.com/googleapis/google-cloud-go/commit/58bea89a3d177d5c431ff19310794e3296253353)) +* **monitoring/apiv3:** add CreateServiceTimeSeries RPC ([9e41088](https://www.github.com/googleapis/google-cloud-go/commit/9e41088bb395fbae0e757738277d5c95fa2749c8)) +* **monitoring/dashboard:** Added support for auto-close configurations ([90e2868](https://www.github.com/googleapis/google-cloud-go/commit/90e2868a3d220aa7f897438f4917013fda7a7c59)) +* **monitoring/metricsscope:** promote apiv1 to GA ([#5135](https://www.github.com/googleapis/google-cloud-go/issues/5135)) ([33c0f63](https://www.github.com/googleapis/google-cloud-go/commit/33c0f63e0e0ce69d9ef6e57b04d1b8cc10ed2b78)) +* **osconfig:** OSConfig: add OS policy assignment rpcs ([83b941c](https://www.github.com/googleapis/google-cloud-go/commit/83b941c0983e44fdd18ceee8c6f3e91219d72ad1)) +* **osconfig:** Update OSConfig API ([e33350c](https://www.github.com/googleapis/google-cloud-go/commit/e33350cfcabcddcda1a90069383d39c68deb977a)) +* **osconfig:** Update osconfig v1 and v1alpha RecurringSchedule.Frequency with DAILY frequency ([59e548a](https://www.github.com/googleapis/google-cloud-go/commit/59e548acc249c7bddd9c884c2af35d582a408c4d)) +* **recaptchaenterprise:** add reCAPTCHA Enterprise account defender API methods ([88a1cdb](https://www.github.com/googleapis/google-cloud-go/commit/88a1cdbef3cc337354a61bc9276725bfb9a686d8)) +* **redis:** [Cloud Memorystore for Redis] Support Multiple Read Replicas when creating Instance ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5)) +* **redis:** [Cloud Memorystore for Redis] Support Multiple Read Replicas when creating Instance ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5)) +* **security/privateca:** add IAMPolicy & Locations mix-in support ([1a0720f](https://www.github.com/googleapis/google-cloud-go/commit/1a0720f2f33bb14617f5c6a524946a93209e1266)) +* **securitycenter:** Added a new API method UpdateExternalSystem, which enables updating a finding w/ external system metadata. External systems are a child resource under finding, and are housed on the finding itself, and can also be filtered on in Notifications, the ListFindings and GroupFindings API ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7)) +* **securitycenter:** Added mute related APIs, proto messages and fields ([3e7185c](https://www.github.com/googleapis/google-cloud-go/commit/3e7185c241d97ee342f132ae04bc93bb79a8e897)) +* **securitycenter:** Added resource type and display_name field to the FindingResult, and supported them in the filter for ListFindings and GroupFindings. Also added display_name to the resource which is surfaced in NotificationMessage ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5)) +* **securitycenter:** Added vulnerability field to the finding feat: Added type field to the resource which is surfaced in NotificationMessage ([090cc3a](https://www.github.com/googleapis/google-cloud-go/commit/090cc3ae0f8747a14cc904fc6d429e2f5379bb03)) +* **servicecontrol:** add C++ rules for many Cloud services ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7)) +* **speech:** add result_end_time to SpeechRecognitionResult ([a2c0bef](https://www.github.com/googleapis/google-cloud-go/commit/a2c0bef551489c9f1d0d12b973d3bf095354841e)) +* **speech:** added alternative_language_codes to RecognitionConfig feat: WEBM_OPUS codec feat: SpeechAdaptation configuration feat: word confidence feat: spoken punctuation and spoken emojis feat: hint boost in SpeechContext ([a2c0bef](https://www.github.com/googleapis/google-cloud-go/commit/a2c0bef551489c9f1d0d12b973d3bf095354841e)) +* **texttospeech:** update v1 proto ([90e2868](https://www.github.com/googleapis/google-cloud-go/commit/90e2868a3d220aa7f897438f4917013fda7a7c59)) +* **workflows/executions:** add a stack_trace field to the Error messages specifying where the error occured feat: add call_log_level field to Execution messages doc: clarify requirement to escape strings within JSON arguments ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5)) + + +### Bug Fixes + +* **accesscontextmanager:** nodejs package name access-context-manager ([30794e7](https://www.github.com/googleapis/google-cloud-go/commit/30794e70050b55ff87d6a80d0b4075065e9d271d)) +* **aiplatform:** Remove invalid resource annotations ([587bba5](https://www.github.com/googleapis/google-cloud-go/commit/587bba5ad792a92f252107aa38c6af50fb09fb58)) +* **compute/metadata:** return an error when all retries have failed ([#5063](https://www.github.com/googleapis/google-cloud-go/issues/5063)) ([c792a0d](https://www.github.com/googleapis/google-cloud-go/commit/c792a0d13db019c9964efeee5c6bc85b07ca50fa)), refs [#5062](https://www.github.com/googleapis/google-cloud-go/issues/5062) +* **compute:** make parent_id fields required compute move and insert methods ([#686](https://www.github.com/googleapis/google-cloud-go/issues/686)) ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7)) +* **compute:** Move compute_small protos under its own directory ([#681](https://www.github.com/googleapis/google-cloud-go/issues/681)) ([3e7185c](https://www.github.com/googleapis/google-cloud-go/commit/3e7185c241d97ee342f132ae04bc93bb79a8e897)) +* **internal/gapicgen:** fix a compute filtering ([#5111](https://www.github.com/googleapis/google-cloud-go/issues/5111)) ([77aa19d](https://www.github.com/googleapis/google-cloud-go/commit/77aa19de7fc33a9e831e6b91bd324d6832b44d99)) +* **internal/godocfx:** only put TOC status on mod if all pkgs have same status ([#4974](https://www.github.com/googleapis/google-cloud-go/issues/4974)) ([309b59e](https://www.github.com/googleapis/google-cloud-go/commit/309b59e583d1bf0dd9ffe84223034eb8a2975d47)) +* **internal/godocfx:** replace * with HTML code ([#5049](https://www.github.com/googleapis/google-cloud-go/issues/5049)) ([a8f7c06](https://www.github.com/googleapis/google-cloud-go/commit/a8f7c066e8d97120ae4e12963e3c9acc8b8906c2)) +* **monitoring/apiv3:** Reintroduce deprecated field/enum for backward compatibility docs: Use absolute link targets in comments ([45fd259](https://www.github.com/googleapis/google-cloud-go/commit/45fd2594d99ef70c776df26866f0a3b537e7e69e)) +* **profiler:** workaround certificate expiration issue in integration tests ([#4955](https://www.github.com/googleapis/google-cloud-go/issues/4955)) ([de9e465](https://www.github.com/googleapis/google-cloud-go/commit/de9e465bea8cd0580c45e87d2cbc2b610615b363)) +* **security/privateca:** include mixin protos as input for mixin rpcs ([479c2f9](https://www.github.com/googleapis/google-cloud-go/commit/479c2f90d556a106b25ebcdb1539d231488182da)) +* **security/privateca:** repair service config to enable mixins ([83b941c](https://www.github.com/googleapis/google-cloud-go/commit/83b941c0983e44fdd18ceee8c6f3e91219d72ad1)) +* **video/transcoder:** update nodejs package name to video-transcoder ([30794e7](https://www.github.com/googleapis/google-cloud-go/commit/30794e70050b55ff87d6a80d0b4075065e9d271d)) + +## [0.97.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.96.0...v0.97.0) (2021-09-29) + + +### Features + +* **internal** add Retry func to testutil from samples repository [#4902](https://github.com/googleapis/google-cloud-go/pull/4902) + +## [0.96.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.95.0...v0.96.0) (2021-09-28) + + +### Features + +* **civil:** add IsEmpty function to time, date and datetime ([#4728](https://www.github.com/googleapis/google-cloud-go/issues/4728)) ([88bfa64](https://www.github.com/googleapis/google-cloud-go/commit/88bfa64d6df2f3bb7d41e0b8f56717dd3de790e2)), refs [#4727](https://www.github.com/googleapis/google-cloud-go/issues/4727) +* **internal/godocfx:** detect preview versions ([#4899](https://www.github.com/googleapis/google-cloud-go/issues/4899)) ([9b60844](https://www.github.com/googleapis/google-cloud-go/commit/9b608445ce9ebabbc87a50e85ce6ef89125031d2)) +* **internal:** provide wrapping for retried errors ([#4797](https://www.github.com/googleapis/google-cloud-go/issues/4797)) ([ce5f4db](https://www.github.com/googleapis/google-cloud-go/commit/ce5f4dbab884e847a2d9f1f8f3fcfd7df19a505a)) + + +### Bug Fixes + +* **internal/gapicgen:** restore fmting proto files ([#4789](https://www.github.com/googleapis/google-cloud-go/issues/4789)) ([5606b54](https://www.github.com/googleapis/google-cloud-go/commit/5606b54b97bb675487c6c138a4081c827218f933)) +* **internal/trace:** use xerrors.As for trace ([#4813](https://www.github.com/googleapis/google-cloud-go/issues/4813)) ([05fe61c](https://www.github.com/googleapis/google-cloud-go/commit/05fe61c5aa4860bdebbbe3e91a9afaba16aa6184)) + +## [0.95.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.94.1...v0.95.0) (2021-09-21) + +### Bug Fixes + +* **internal/gapicgen:** add a temporary import ([#4756](https://www.github.com/googleapis/google-cloud-go/issues/4756)) ([4d9c046](https://www.github.com/googleapis/google-cloud-go/commit/4d9c046b66a2dc205e2c14b676995771301440da)) +* **compute/metadata:** remove heavy gax dependency ([#4784](https://www.github.com/googleapis/google-cloud-go/issues/4784)) ([ea00264](https://www.github.com/googleapis/google-cloud-go/commit/ea00264428137471805f2ec67f04f3a5a42928fa)) + +### [0.94.1](https://www.github.com/googleapis/google-cloud-go/compare/v0.94.0...v0.94.1) (2021-09-02) + + +### Bug Fixes + +* **compute/metadata:** fix retry logic to not panic on error ([#4714](https://www.github.com/googleapis/google-cloud-go/issues/4714)) ([75c63b9](https://www.github.com/googleapis/google-cloud-go/commit/75c63b94d2cf86606fffc3611f7e6150b667eedc)), refs [#4713](https://www.github.com/googleapis/google-cloud-go/issues/4713) + +## [0.94.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.92.0...v0.94.0) (2021-08-31) + + +### Features + +* **aiplatform:** add XAI, model monitoring, and index services to aiplatform v1 ([e385b40](https://www.github.com/googleapis/google-cloud-go/commit/e385b40a1e2ecf81f5fd0910de5c37275951f86b)) +* **analytics/admin:** add `GetDataRetentionSettings`, `UpdateDataRetentionSettings` methods to the API ([8467899](https://www.github.com/googleapis/google-cloud-go/commit/8467899ab6ebf0328c543bfb5fbcddeb2f53a082)) +* **asset:** Release of relationships in v1, Add content type Relationship to support relationship export Committer: lvv@ ([d4c3340](https://www.github.com/googleapis/google-cloud-go/commit/d4c3340bfc8b6793d6d2c8a3ed8ccdb472e1efd3)) +* **assuredworkloads:** Add Canada Regions And Support compliance regime ([b9226eb](https://www.github.com/googleapis/google-cloud-go/commit/b9226eb0b34473cb6f920c2526ad0d6dacb03f3c)) +* **cloudbuild/apiv1:** Add ability to configure BuildTriggers to create Builds that require approval before executing and ApproveBuild API to approve or reject pending Builds ([d4c3340](https://www.github.com/googleapis/google-cloud-go/commit/d4c3340bfc8b6793d6d2c8a3ed8ccdb472e1efd3)) +* **cloudbuild/apiv1:** add script field to BuildStep message ([b9226eb](https://www.github.com/googleapis/google-cloud-go/commit/b9226eb0b34473cb6f920c2526ad0d6dacb03f3c)) +* **cloudbuild/apiv1:** Update cloudbuild proto with the service_account for BYOSA Triggers. ([b9226eb](https://www.github.com/googleapis/google-cloud-go/commit/b9226eb0b34473cb6f920c2526ad0d6dacb03f3c)) +* **compute/metadata:** retry error when talking to metadata service ([#4648](https://www.github.com/googleapis/google-cloud-go/issues/4648)) ([81c6039](https://www.github.com/googleapis/google-cloud-go/commit/81c6039503121f8da3de4f4cd957b8488a3ef620)), refs [#4642](https://www.github.com/googleapis/google-cloud-go/issues/4642) +* **dataproc:** remove apiv1beta2 client ([#4682](https://www.github.com/googleapis/google-cloud-go/issues/4682)) ([2248554](https://www.github.com/googleapis/google-cloud-go/commit/22485541affb1251604df292670a20e794111d3e)) +* **gaming:** support version reporting API ([cd65cec](https://www.github.com/googleapis/google-cloud-go/commit/cd65cecf15c4a01648da7f8f4f4d497772961510)) +* **gkehub:** Add request_id under `DeleteMembershipRequest` and `UpdateMembershipRequest` ([b9226eb](https://www.github.com/googleapis/google-cloud-go/commit/b9226eb0b34473cb6f920c2526ad0d6dacb03f3c)) +* **internal/carver:** support carving batches ([#4623](https://www.github.com/googleapis/google-cloud-go/issues/4623)) ([2972d19](https://www.github.com/googleapis/google-cloud-go/commit/2972d194da19bedf16d76fda471c06a965cfdcd6)) +* **kms:** add support for Key Reimport ([bf4378b](https://www.github.com/googleapis/google-cloud-go/commit/bf4378b5b859f7b835946891dbfebfee31c4b123)) +* **metastore:** Added the Backup resource and Backup resource GetIamPolicy/SetIamPolicy to V1 feat: Added the RestoreService method to V1 ([d4c3340](https://www.github.com/googleapis/google-cloud-go/commit/d4c3340bfc8b6793d6d2c8a3ed8ccdb472e1efd3)) +* **monitoring/dashboard:** Added support for logs-based alerts: https://cloud.google.com/logging/docs/alerting/log-based-alerts feat: Added support for user-defined labels on cloud monitoring's Service and ServiceLevelObjective objects fix!: mark required fields in QueryTimeSeriesRequest as required ([b9226eb](https://www.github.com/googleapis/google-cloud-go/commit/b9226eb0b34473cb6f920c2526ad0d6dacb03f3c)) +* **osconfig:** Update osconfig v1 and v1alpha with WindowsApplication ([bf4378b](https://www.github.com/googleapis/google-cloud-go/commit/bf4378b5b859f7b835946891dbfebfee31c4b123)) +* **speech:** Add transcript normalization ([b31646d](https://www.github.com/googleapis/google-cloud-go/commit/b31646d1e12037731df4b5c0ba9f60b6434d7b9b)) +* **talent:** Add new commute methods in Search APIs feat: Add new histogram type 'publish_time_in_day' feat: Support filtering by requisitionId is ListJobs API ([d4c3340](https://www.github.com/googleapis/google-cloud-go/commit/d4c3340bfc8b6793d6d2c8a3ed8ccdb472e1efd3)) +* **translate:** added v3 proto for online/batch document translation and updated v3beta1 proto for format conversion ([bf4378b](https://www.github.com/googleapis/google-cloud-go/commit/bf4378b5b859f7b835946891dbfebfee31c4b123)) + + +### Bug Fixes + +* **datastream:** Change a few resource pattern variables from camelCase to snake_case ([bf4378b](https://www.github.com/googleapis/google-cloud-go/commit/bf4378b5b859f7b835946891dbfebfee31c4b123)) + +## [0.92.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.91.0...v0.92.0) (2021-08-16) + + +### Features + +* **all:** remove testing deps ([#4580](https://www.github.com/googleapis/google-cloud-go/issues/4580)) ([15c1eb9](https://www.github.com/googleapis/google-cloud-go/commit/15c1eb9730f0b514edb911161f9c59e8d790a5ec)), refs [#4061](https://www.github.com/googleapis/google-cloud-go/issues/4061) +* **internal/detect:** add helper to detect projectID from env ([#4582](https://www.github.com/googleapis/google-cloud-go/issues/4582)) ([cc65d94](https://www.github.com/googleapis/google-cloud-go/commit/cc65d945688ac446602bce6ef86a935714dfe2f8)), refs [#1294](https://www.github.com/googleapis/google-cloud-go/issues/1294) +* **spannertest:** Add validation of duplicated column names ([#4611](https://www.github.com/googleapis/google-cloud-go/issues/4611)) ([84f86a6](https://www.github.com/googleapis/google-cloud-go/commit/84f86a605c809ab36dd3cb4b3ab1df15a5302083)) + +## [0.91.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.90.0...v0.91.0) (2021-08-11) + + +### Features + +* **.github:** support dynamic submodule detection ([#4537](https://www.github.com/googleapis/google-cloud-go/issues/4537)) ([4374b90](https://www.github.com/googleapis/google-cloud-go/commit/4374b907e9f166da6bd23a8ef94399872b00afd6)) +* **dialogflow/cx:** add advanced settings for agent level feat: add rollout config, state and failure reason for experiment feat: add insights export settings for security setting feat: add language code for streaming recognition result and flow versions for query parameters docs: deprecate legacy logging settings ([ed73554](https://www.github.com/googleapis/google-cloud-go/commit/ed735541dc57d0681d84b46853393eac5f7ccec3)) +* **dialogflow/cx:** add advanced settings for agent level feat: add rollout config, state and failure reason for experiment feat: add insights export settings for security setting feat: add language code for streaming recognition result and flow versions for query parameters docs: deprecate legacy logging settings ([ed73554](https://www.github.com/googleapis/google-cloud-go/commit/ed735541dc57d0681d84b46853393eac5f7ccec3)) +* **dialogflow/cx:** added support for DLP templates; expose `Locations` service to get/list avaliable locations of Dialogflow products ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093)) +* **dialogflow/cx:** added support for DLP templates; expose `Locations` service to get/list avaliable locations of Dialogflow products docs: reorder some fields ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093)) +* **dialogflow:** expose `Locations` service to get/list avaliable locations of Dialogflow products; fixed some API annotations ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093)) +* **kms:** add support for HMAC, Variable Key Destruction, and GenerateRandom ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093)) +* **speech:** add total_billed_time response field ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093)) +* **video/transcoder:** Add video cropping feature feat: Add video padding feature feat: Add ttl_after_completion_days field to Job docs: Update proto documentation docs: Indicate v1beta1 deprecation ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093)) + + +### Bug Fixes + +* **functions:** Updating behavior of source_upload_url during Get/List function calls ([381a494](https://www.github.com/googleapis/google-cloud-go/commit/381a494c29da388977b0bdda2177058328cc4afe)) + +## [0.90.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.89.0...v0.90.0) (2021-08-03) + + +### ⚠ BREAKING CHANGES + +* **compute:** add pagination and an Operation wrapper (#4542) + +### Features + +* **compute:** add pagination and an Operation wrapper ([#4542](https://www.github.com/googleapis/google-cloud-go/issues/4542)) ([36f4649](https://www.github.com/googleapis/google-cloud-go/commit/36f46494111f6d16d103fb208d49616576dbf91e)) +* **internal/godocfx:** add status to packages and TOCs ([#4547](https://www.github.com/googleapis/google-cloud-go/issues/4547)) ([c6de69c](https://www.github.com/googleapis/google-cloud-go/commit/c6de69c710561bb2a40eff05417df4b9798c258a)) +* **internal/godocfx:** mark status of deprecated items ([#4525](https://www.github.com/googleapis/google-cloud-go/issues/4525)) ([d571c6f](https://www.github.com/googleapis/google-cloud-go/commit/d571c6f4337ec9c4807c230cd77f53b6e7db6437)) + + +### Bug Fixes + +* **internal/carver:** don't tag commits ([#4518](https://www.github.com/googleapis/google-cloud-go/issues/4518)) ([c355eb8](https://www.github.com/googleapis/google-cloud-go/commit/c355eb8ecb0bb1af0ccf55e6262ca8c0d5c7e352)) + +## [0.89.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.88.0...v0.89.0) (2021-07-29) + + +### Features + +* **assuredworkloads:** Add EU Regions And Support compliance regime ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758)) +* **datacatalog:** Added support for BigQuery connections entries feat: Added support for BigQuery routines entries feat: Added usage_signal field feat: Added labels field feat: Added ReplaceTaxonomy in Policy Tag Manager Serialization API feat: Added support for public tag templates feat: Added support for rich text tags docs: Documentation improvements ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758)) +* **datafusion:** start generating apiv1 ([e55a016](https://www.github.com/googleapis/google-cloud-go/commit/e55a01667afaf36ff70807d061ecafb61827ba97)) +* **iap:** start generating apiv1 ([e55a016](https://www.github.com/googleapis/google-cloud-go/commit/e55a01667afaf36ff70807d061ecafb61827ba97)) +* **internal/carver:** add tooling to help carve out sub-modules ([#4417](https://www.github.com/googleapis/google-cloud-go/issues/4417)) ([a7e28f2](https://www.github.com/googleapis/google-cloud-go/commit/a7e28f2557469562ae57e5174b41bdf8fce62b63)) +* **networkconnectivity:** Add files for Network Connectivity v1 API. ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758)) +* **retail:** Add restricted Retail Search features for Retail API v2. ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758)) +* **secretmanager:** In Secret Manager, users can now use filter to customize the output of ListSecrets/ListSecretVersions calls ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758)) +* **securitycenter:** add finding_class and indicator fields in Finding ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758)) +* **speech:** add total_billed_time response field. fix!: phrase_set_id is required field in CreatePhraseSetRequest. fix!: custom_class_id is required field in CreateCustomClassRequest. ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758)) +* **storagetransfer:** start generating apiv1 ([#4505](https://www.github.com/googleapis/google-cloud-go/issues/4505)) ([f2d531d](https://www.github.com/googleapis/google-cloud-go/commit/f2d531d2b519efa58e0f23a178bbebe675c203c3)) + + +### Bug Fixes + +* **internal/gapicgen:** exec Stdout already set ([#4509](https://www.github.com/googleapis/google-cloud-go/issues/4509)) ([41246e9](https://www.github.com/googleapis/google-cloud-go/commit/41246e900aaaea92a9f956e92956c40c86f4cb3a)) +* **internal/gapicgen:** tidy all after dep bump ([#4515](https://www.github.com/googleapis/google-cloud-go/issues/4515)) ([9401be5](https://www.github.com/googleapis/google-cloud-go/commit/9401be509c570c3c55694375065c84139e961857)), refs [#4434](https://www.github.com/googleapis/google-cloud-go/issues/4434) + +## [0.88.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.87.0...v0.88.0) (2021-07-22) + + +### ⚠ BREAKING CHANGES + +* **cloudbuild/apiv1:** Proto had a prior definitions of WorkerPool resources which were never supported. This change replaces those resources with definitions that are currently supported. + +### Features + +* **cloudbuild/apiv1:** add a WorkerPools API ([19ea3f8](https://www.github.com/googleapis/google-cloud-go/commit/19ea3f830212582bfee21d9e09f0034f9ce76547)) +* **cloudbuild/apiv1:** Implementation of Build Failure Info: - Added message FailureInfo field ([19ea3f8](https://www.github.com/googleapis/google-cloud-go/commit/19ea3f830212582bfee21d9e09f0034f9ce76547)) +* **osconfig/agentendpoint:** OSConfig AgentEndpoint: add basic os info to RegisterAgentRequest, add WindowsApplication type to Inventory ([8936bc3](https://www.github.com/googleapis/google-cloud-go/commit/8936bc3f2d0fb2f6514f6e019fa247b8f41bd43c)) +* **resourcesettings:** Publish Cloud ResourceSettings v1 API ([43ad3cb](https://www.github.com/googleapis/google-cloud-go/commit/43ad3cb7be981fff9dc5dcf4510f1cd7bea99957)) + + +### Bug Fixes + +* **internal/godocfx:** set exit code, print cmd output, no go get ... ([#4445](https://www.github.com/googleapis/google-cloud-go/issues/4445)) ([cc70f77](https://www.github.com/googleapis/google-cloud-go/commit/cc70f77ac279a62e24e1b07f6e53fd126b7286b0)) +* **internal:** detect module for properly generating docs URLs ([#4460](https://www.github.com/googleapis/google-cloud-go/issues/4460)) ([1eaba8b](https://www.github.com/googleapis/google-cloud-go/commit/1eaba8bd694f7552a8e3e09b4f164de8b6ca23f0)), refs [#4447](https://www.github.com/googleapis/google-cloud-go/issues/4447) +* **kms:** Updating WORKSPACE files to use the newest version of the Typescript generator. ([8936bc3](https://www.github.com/googleapis/google-cloud-go/commit/8936bc3f2d0fb2f6514f6e019fa247b8f41bd43c)) + +## [0.87.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.86.0...v0.87.0) (2021-07-13) + + +### Features + +* **container:** allow updating security group on existing clusters ([528ffc9](https://www.github.com/googleapis/google-cloud-go/commit/528ffc9bd63090129a8b1355cd31273f8c23e34c)) +* **monitoring/dashboard:** added validation only mode when writing dashboards feat: added alert chart widget ([652d7c2](https://www.github.com/googleapis/google-cloud-go/commit/652d7c277da2f6774729064ab65d557875c81567)) +* **networkmanagment:** start generating apiv1 ([907592c](https://www.github.com/googleapis/google-cloud-go/commit/907592c576abfc65c01bbcd30c1a6094916cdc06)) +* **secretmanager:** Tune Secret Manager auto retry parameters ([528ffc9](https://www.github.com/googleapis/google-cloud-go/commit/528ffc9bd63090129a8b1355cd31273f8c23e34c)) +* **video/transcoder:** start generating apiv1 ([907592c](https://www.github.com/googleapis/google-cloud-go/commit/907592c576abfc65c01bbcd30c1a6094916cdc06)) + + +### Bug Fixes + +* **compute:** properly generate PUT requests ([#4426](https://www.github.com/googleapis/google-cloud-go/issues/4426)) ([a7491a5](https://www.github.com/googleapis/google-cloud-go/commit/a7491a533e4ad75eb6d5f89718d4dafb0c5b4167)) +* **internal:** fix relative pathing for generator ([#4397](https://www.github.com/googleapis/google-cloud-go/issues/4397)) ([25e0eae](https://www.github.com/googleapis/google-cloud-go/commit/25e0eaecf9feb1caa97988c5398ac58f6ca17391)) + + +### Miscellaneous Chores + +* **all:** fix release version ([#4427](https://www.github.com/googleapis/google-cloud-go/issues/4427)) ([2c0d267](https://www.github.com/googleapis/google-cloud-go/commit/2c0d2673ccab7281b6432215ee8279f9efd04a15)) + +## [0.86.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.85.0...v0.86.0) (2021-07-01) + + +### Features + +* **bigquery managedwriter:** schema conversion support ([#4357](https://www.github.com/googleapis/google-cloud-go/issues/4357)) ([f2b20f4](https://www.github.com/googleapis/google-cloud-go/commit/f2b20f493e2ed5a883ce42fa65695c03c574feb5)) + +## [0.85.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.84.0...v0.85.0) (2021-06-30) + + +### Features + +* **dataflow:** start generating apiv1beta3 ([cfee361](https://www.github.com/googleapis/google-cloud-go/commit/cfee36161d41e3a0f769e51ab96c25d0967af273)) +* **datastream:** start generating apiv1alpha1 ([cfee361](https://www.github.com/googleapis/google-cloud-go/commit/cfee36161d41e3a0f769e51ab96c25d0967af273)) +* **dialogflow:** added Automated agent reply type and allow cancellation flag for partial response feature. ([5a9c6ce](https://www.github.com/googleapis/google-cloud-go/commit/5a9c6ce781fb6a338e29d3dee72367998d834af0)) +* **documentai:** update document.proto, add the processor management methods. ([5a9c6ce](https://www.github.com/googleapis/google-cloud-go/commit/5a9c6ce781fb6a338e29d3dee72367998d834af0)) +* **eventarc:** start generating apiv1 ([cfee361](https://www.github.com/googleapis/google-cloud-go/commit/cfee36161d41e3a0f769e51ab96c25d0967af273)) +* **gkehub:** added v1alpha messages and client for gkehub ([8fb4649](https://www.github.com/googleapis/google-cloud-go/commit/8fb464956f0ca51d30e8e14dc625ff9fa150c437)) +* **internal/godocfx:** add support for other modules ([#4290](https://www.github.com/googleapis/google-cloud-go/issues/4290)) ([d52bae6](https://www.github.com/googleapis/google-cloud-go/commit/d52bae6cd77474174192c46236d309bf967dfa00)) +* **internal/godocfx:** different metadata for different modules ([#4297](https://www.github.com/googleapis/google-cloud-go/issues/4297)) ([598f5b9](https://www.github.com/googleapis/google-cloud-go/commit/598f5b93778b2e2e75265ae54484dd54477433f5)) +* **internal:** add force option for regen ([#4310](https://www.github.com/googleapis/google-cloud-go/issues/4310)) ([de654eb](https://www.github.com/googleapis/google-cloud-go/commit/de654ebfcf23a53b4d1fee0aa48c73999a55c1a6)) +* **servicecontrol:** Added the gRPC service config for the Service Controller v1 API docs: Updated some comments. ([8fb4649](https://www.github.com/googleapis/google-cloud-go/commit/8fb464956f0ca51d30e8e14dc625ff9fa150c437)) +* **workflows/executions:** start generating apiv1 ([cfee361](https://www.github.com/googleapis/google-cloud-go/commit/cfee36161d41e3a0f769e51ab96c25d0967af273)) + + +### Bug Fixes + +* **internal:** add autogenerated header to snippets ([#4261](https://www.github.com/googleapis/google-cloud-go/issues/4261)) ([2220787](https://www.github.com/googleapis/google-cloud-go/commit/222078722c37c3fdadec7bbbe0bcf81edd105f1a)), refs [#4260](https://www.github.com/googleapis/google-cloud-go/issues/4260) +* **internal:** fix googleapis-disco regen ([#4354](https://www.github.com/googleapis/google-cloud-go/issues/4354)) ([aeea1ce](https://www.github.com/googleapis/google-cloud-go/commit/aeea1ce1e5dff3acdfe208932327b52c49851b41)) +* **kms:** replace IAMPolicy mixin in service config. ([5a9c6ce](https://www.github.com/googleapis/google-cloud-go/commit/5a9c6ce781fb6a338e29d3dee72367998d834af0)) +* **security/privateca:** Fixed casing of the Ruby namespace ([5a9c6ce](https://www.github.com/googleapis/google-cloud-go/commit/5a9c6ce781fb6a338e29d3dee72367998d834af0)) + +## [0.84.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.83.0...v0.84.0) (2021-06-09) + + +### Features + +* **aiplatform:** start generating apiv1 ([be1d729](https://www.github.com/googleapis/google-cloud-go/commit/be1d729fdaa18eb1c782f3b09a6bb8fd6b3a144c)) +* **apigeeconnect:** start generating abiv1 ([be1d729](https://www.github.com/googleapis/google-cloud-go/commit/be1d729fdaa18eb1c782f3b09a6bb8fd6b3a144c)) +* **dialogflow/cx:** support sentiment analysis in bot testing ([7a57aac](https://www.github.com/googleapis/google-cloud-go/commit/7a57aac996f2bae20ee6ddbd02ad9e56e380099b)) +* **dialogflow/cx:** support sentiment analysis in bot testing ([6ad2306](https://www.github.com/googleapis/google-cloud-go/commit/6ad2306f64710ce16059b464342dbc6a98d2d9c2)) +* **documentai:** Move CommonOperationMetadata into a separate proto file for potential reuse. ([9e80ea0](https://www.github.com/googleapis/google-cloud-go/commit/9e80ea0d053b06876418194f65a478045dc4fe6c)) +* **documentai:** Move CommonOperationMetadata into a separate proto file for potential reuse. ([18375e5](https://www.github.com/googleapis/google-cloud-go/commit/18375e50e8f16e63506129b8927a7b62f85e407b)) +* **gkeconnect/gateway:** start generating apiv1beta1 ([#4235](https://www.github.com/googleapis/google-cloud-go/issues/4235)) ([1c3e968](https://www.github.com/googleapis/google-cloud-go/commit/1c3e9689d78670a231a3660db00fd4fd8f5c6345)) +* **lifesciences:** strat generating apiv2beta ([be1d729](https://www.github.com/googleapis/google-cloud-go/commit/be1d729fdaa18eb1c782f3b09a6bb8fd6b3a144c)) +* **tpu:** start generating apiv1 ([#4199](https://www.github.com/googleapis/google-cloud-go/issues/4199)) ([cac48ea](https://www.github.com/googleapis/google-cloud-go/commit/cac48eab960cd34cc20732f6a1aeb93c540a036b)) + + +### Bug Fixes + +* **bttest:** fix race condition in SampleRowKeys ([#4207](https://www.github.com/googleapis/google-cloud-go/issues/4207)) ([5711fb1](https://www.github.com/googleapis/google-cloud-go/commit/5711fb10d25c458807598d736a232bb2210a047a)) +* **documentai:** Fix Ruby gem title of documentai v1 (package not currently published) ([9e80ea0](https://www.github.com/googleapis/google-cloud-go/commit/9e80ea0d053b06876418194f65a478045dc4fe6c)) + +## [0.83.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.82.0...v0.83.0) (2021-06-02) + + +### Features + +* **dialogflow:** added a field in the query result to indicate whether slot filling is cancelled. ([f9cda8f](https://www.github.com/googleapis/google-cloud-go/commit/f9cda8fb6c3d76a062affebe6649f0a43aeb96f3)) +* **essentialcontacts:** start generating apiv1 ([#4118](https://www.github.com/googleapis/google-cloud-go/issues/4118)) ([fe14afc](https://www.github.com/googleapis/google-cloud-go/commit/fe14afcf74e09089b22c4f5221cbe37046570fda)) +* **gsuiteaddons:** start generating apiv1 ([#4082](https://www.github.com/googleapis/google-cloud-go/issues/4082)) ([6de5c99](https://www.github.com/googleapis/google-cloud-go/commit/6de5c99173c4eeaf777af18c47522ca15637d232)) +* **osconfig:** OSConfig: add ExecResourceOutput and per step error message. ([f9cda8f](https://www.github.com/googleapis/google-cloud-go/commit/f9cda8fb6c3d76a062affebe6649f0a43aeb96f3)) +* **osconfig:** start generating apiv1alpha ([#4119](https://www.github.com/googleapis/google-cloud-go/issues/4119)) ([8ad471f](https://www.github.com/googleapis/google-cloud-go/commit/8ad471f26087ec076460df6dcf27769ffe1b8834)) +* **privatecatalog:** start generating apiv1beta1 ([500c1a6](https://www.github.com/googleapis/google-cloud-go/commit/500c1a6101f624cb6032f0ea16147645a02e7076)) +* **serviceusage:** start generating apiv1 ([#4120](https://www.github.com/googleapis/google-cloud-go/issues/4120)) ([e4531f9](https://www.github.com/googleapis/google-cloud-go/commit/e4531f93cfeb6388280bb253ef6eb231aba37098)) +* **shell:** start generating apiv1 ([500c1a6](https://www.github.com/googleapis/google-cloud-go/commit/500c1a6101f624cb6032f0ea16147645a02e7076)) +* **vpcaccess:** start generating apiv1 ([500c1a6](https://www.github.com/googleapis/google-cloud-go/commit/500c1a6101f624cb6032f0ea16147645a02e7076)) + +## [0.82.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.81.0...v0.82.0) (2021-05-17) + + +### Features + +* **billing/budgets:** Added support for configurable budget time period. fix: Updated some documentation links. ([83b1b3b](https://www.github.com/googleapis/google-cloud-go/commit/83b1b3b648c6d9225f07f00e8c0cdabc3d1fc1ab)) +* **billing/budgets:** Added support for configurable budget time period. fix: Updated some documentation links. ([83b1b3b](https://www.github.com/googleapis/google-cloud-go/commit/83b1b3b648c6d9225f07f00e8c0cdabc3d1fc1ab)) +* **cloudbuild/apiv1:** Add fields for Pub/Sub triggers ([8b4adbf](https://www.github.com/googleapis/google-cloud-go/commit/8b4adbf9815e1ec229dfbcfb9189d3ea63112e1b)) +* **cloudbuild/apiv1:** Implementation of Source Manifests: - Added message StorageSourceManifest as an option for the Source message - Added StorageSourceManifest field to the SourceProvenance message ([7fd2ccd](https://www.github.com/googleapis/google-cloud-go/commit/7fd2ccd26adec1468e15fe84bf75210255a9dfea)) +* **clouddms:** start generating apiv1 ([#4081](https://www.github.com/googleapis/google-cloud-go/issues/4081)) ([29df85c](https://www.github.com/googleapis/google-cloud-go/commit/29df85c40ab64d59e389a980c9ce550077839763)) +* **dataproc:** update the Dataproc V1 API client library ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3)) +* **dialogflow/cx:** add support for service directory webhooks ([7fd2ccd](https://www.github.com/googleapis/google-cloud-go/commit/7fd2ccd26adec1468e15fe84bf75210255a9dfea)) +* **dialogflow/cx:** add support for service directory webhooks ([7fd2ccd](https://www.github.com/googleapis/google-cloud-go/commit/7fd2ccd26adec1468e15fe84bf75210255a9dfea)) +* **dialogflow/cx:** support setting current_page to resume sessions; expose transition_route_groups in flows and language_code in webhook ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3)) +* **dialogflow/cx:** support setting current_page to resume sessions; expose transition_route_groups in flows and language_code in webhook ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3)) +* **dialogflow:** added more Environment RPCs feat: added Versions service feat: added Fulfillment service feat: added TextToSpeechSettings. feat: added location in some resource patterns. ([4f73dc1](https://www.github.com/googleapis/google-cloud-go/commit/4f73dc19c2e05ad6133a8eac3d62ddb522314540)) +* **documentai:** add confidence field to the PageAnchor.PageRef in document.proto. ([d089dda](https://www.github.com/googleapis/google-cloud-go/commit/d089dda0089acb9aaef9b3da40b219476af9fc06)) +* **documentai:** add confidence field to the PageAnchor.PageRef in document.proto. ([07fdcd1](https://www.github.com/googleapis/google-cloud-go/commit/07fdcd12499eac26f9b5fae01d6c1282c3e02b7c)) +* **internal/gapicgen:** only update relevant gapic files ([#4066](https://www.github.com/googleapis/google-cloud-go/issues/4066)) ([5948bee](https://www.github.com/googleapis/google-cloud-go/commit/5948beedbadd491601bdee6a006cf685e94a85f4)) +* **internal/gensnippets:** add license header and region tags ([#3924](https://www.github.com/googleapis/google-cloud-go/issues/3924)) ([e9ff7a0](https://www.github.com/googleapis/google-cloud-go/commit/e9ff7a0f9bb1cc67f5d0de47934811960429e72c)) +* **internal/gensnippets:** initial commit ([#3922](https://www.github.com/googleapis/google-cloud-go/issues/3922)) ([3fabef0](https://www.github.com/googleapis/google-cloud-go/commit/3fabef032388713f732ab4dbfc51624cdca0f481)) +* **internal:** auto-generate snippets ([#3949](https://www.github.com/googleapis/google-cloud-go/issues/3949)) ([b70e0fc](https://www.github.com/googleapis/google-cloud-go/commit/b70e0fccdc86813e0d97ff63b585822d4deafb38)) +* **internal:** generate region tags for snippets ([#3962](https://www.github.com/googleapis/google-cloud-go/issues/3962)) ([ef2b90e](https://www.github.com/googleapis/google-cloud-go/commit/ef2b90ea6d47e27744c98a1a9ae0c487c5051808)) +* **metastore:** start generateing apiv1 ([#4083](https://www.github.com/googleapis/google-cloud-go/issues/4083)) ([661610a](https://www.github.com/googleapis/google-cloud-go/commit/661610afa6a9113534884cafb138109536724310)) +* **security/privateca:** start generating apiv1 ([#4023](https://www.github.com/googleapis/google-cloud-go/issues/4023)) ([08aa83a](https://www.github.com/googleapis/google-cloud-go/commit/08aa83a5371bb6485bc3b19b3ed5300f807ce69f)) +* **securitycenter:** add canonical_name and folder fields ([5c5ca08](https://www.github.com/googleapis/google-cloud-go/commit/5c5ca08c637a23cfa3e3a051fea576e1feb324fd)) +* **securitycenter:** add canonical_name and folder fields ([5c5ca08](https://www.github.com/googleapis/google-cloud-go/commit/5c5ca08c637a23cfa3e3a051fea576e1feb324fd)) +* **speech:** add webm opus support. ([d089dda](https://www.github.com/googleapis/google-cloud-go/commit/d089dda0089acb9aaef9b3da40b219476af9fc06)) +* **speech:** Support for spoken punctuation and spoken emojis. ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3)) + + +### Bug Fixes + +* **binaryauthorization:** add Java options to Binaryauthorization protos ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3)) +* **internal/gapicgen:** filter out internal directory changes ([#4085](https://www.github.com/googleapis/google-cloud-go/issues/4085)) ([01473f6](https://www.github.com/googleapis/google-cloud-go/commit/01473f6d8db26c6e18969ace7f9e87c66e94ad9e)) +* **internal/gapicgen:** use correct region tags for gensnippets ([#4022](https://www.github.com/googleapis/google-cloud-go/issues/4022)) ([8ccd689](https://www.github.com/googleapis/google-cloud-go/commit/8ccd689cab08f016008ca06a939a4828817d4a25)) +* **internal/gensnippets:** run goimports ([#3931](https://www.github.com/googleapis/google-cloud-go/issues/3931)) ([10050f0](https://www.github.com/googleapis/google-cloud-go/commit/10050f05c20c226547d87c08168fa4bc551395c5)) +* **internal:** append a new line to comply with go fmt ([#4028](https://www.github.com/googleapis/google-cloud-go/issues/4028)) ([a297278](https://www.github.com/googleapis/google-cloud-go/commit/a2972783c4af806199d1c67c9f63ad9677f20f34)) +* **internal:** make sure formatting is run on snippets ([#4039](https://www.github.com/googleapis/google-cloud-go/issues/4039)) ([130dfc5](https://www.github.com/googleapis/google-cloud-go/commit/130dfc535396e98fc009585b0457e3bc48ead941)), refs [#4037](https://www.github.com/googleapis/google-cloud-go/issues/4037) +* **metastore:** increase metastore lro polling timeouts ([83b1b3b](https://www.github.com/googleapis/google-cloud-go/commit/83b1b3b648c6d9225f07f00e8c0cdabc3d1fc1ab)) + + +### Miscellaneous Chores + +* **all:** fix release version ([#4040](https://www.github.com/googleapis/google-cloud-go/issues/4040)) ([4c991a9](https://www.github.com/googleapis/google-cloud-go/commit/4c991a928665d9be93691decce0c653f430688b7)) + +## [0.81.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.80.0...v0.81.0) (2021-04-02) + + +### Features + +* **datacatalog:** Policy Tag Manager v1 API service feat: new RenameTagTemplateFieldEnumValue API feat: adding fully_qualified_name in lookup and search feat: added DATAPROC_METASTORE integrated system along with new entry types: DATABASE and SERVICE docs: Documentation improvements ([2b02a03](https://www.github.com/googleapis/google-cloud-go/commit/2b02a03ff9f78884da5a8e7b64a336014c61bde7)) +* **dialogflow/cx:** include original user query in WebhookRequest; add GetTextCaseresult API. doc: clarify resource format for session response. ([a0b1f6f](https://www.github.com/googleapis/google-cloud-go/commit/a0b1f6faae77d014fdee166ab018ddcd6f846ab4)) +* **dialogflow/cx:** include original user query in WebhookRequest; add GetTextCaseresult API. doc: clarify resource format for session response. ([b5b4da6](https://www.github.com/googleapis/google-cloud-go/commit/b5b4da6952922440d03051f629f3166f731dfaa3)) +* **dialogflow:** expose MP3_64_KBPS and MULAW for output audio encodings. ([b5b4da6](https://www.github.com/googleapis/google-cloud-go/commit/b5b4da6952922440d03051f629f3166f731dfaa3)) +* **secretmanager:** Rotation for Secrets ([2b02a03](https://www.github.com/googleapis/google-cloud-go/commit/2b02a03ff9f78884da5a8e7b64a336014c61bde7)) + + +### Bug Fixes + +* **internal/godocfx:** filter out non-Cloud ([#3878](https://www.github.com/googleapis/google-cloud-go/issues/3878)) ([625aef9](https://www.github.com/googleapis/google-cloud-go/commit/625aef9b47181cf627587cc9cde9e400713c6678)) + +## [0.80.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.79.0...v0.80.0) (2021-03-23) + + +### ⚠ BREAKING CHANGES + +* **all:** This is a breaking change in dialogflow + +### Features + +* **appengine:** added vm_liveness, search_api_available, network_settings, service_account, build_env_variables, kms_key_reference to v1 API ([fd04a55](https://www.github.com/googleapis/google-cloud-go/commit/fd04a552213f99619c714b5858548f61f4948493)) +* **assuredworkloads:** Add 'resource_settings' field to provide custom properties (ids) for the provisioned projects. ([ab4824a](https://www.github.com/googleapis/google-cloud-go/commit/ab4824a7914864228e59b244d6382de862139524)) +* **assuredworkloads:** add HIPAA and HITRUST compliance regimes ([ab4824a](https://www.github.com/googleapis/google-cloud-go/commit/ab4824a7914864228e59b244d6382de862139524)) +* **dialogflow/cx:** added fallback option when restoring an agent docs: clarified experiment length ([cd70aa9](https://www.github.com/googleapis/google-cloud-go/commit/cd70aa9cc1a5dccfe4e49d2d6ca6db2119553c86)) +* **dialogflow/cx:** start generating apiv3 ([#3850](https://www.github.com/googleapis/google-cloud-go/issues/3850)) ([febbdcf](https://www.github.com/googleapis/google-cloud-go/commit/febbdcf13fcea3f5d8186c3d3dface1c0d27ef9e)), refs [#3634](https://www.github.com/googleapis/google-cloud-go/issues/3634) +* **documentai:** add EVAL_SKIPPED value to the Provenance.OperationType enum in document.proto. ([cb43066](https://www.github.com/googleapis/google-cloud-go/commit/cb4306683926843f6e977f207fa6070bb9242a61)) +* **documentai:** start generating apiv1 ([#3853](https://www.github.com/googleapis/google-cloud-go/issues/3853)) ([d68e604](https://www.github.com/googleapis/google-cloud-go/commit/d68e604c953eea90489f6134e71849b24dd0fcbf)) +* **internal/godocfx:** add prettyprint class to code blocks ([#3819](https://www.github.com/googleapis/google-cloud-go/issues/3819)) ([6e49f21](https://www.github.com/googleapis/google-cloud-go/commit/6e49f2148b116ee439c8a882dcfeefb6e7647c57)) +* **internal/godocfx:** handle Markdown content ([#3816](https://www.github.com/googleapis/google-cloud-go/issues/3816)) ([56d5d0a](https://www.github.com/googleapis/google-cloud-go/commit/56d5d0a900197fb2de46120a0eda649f2c17448f)) +* **kms:** Add maxAttempts to retry policy for KMS gRPC service config feat: Add Bazel exports_files entry for KMS gRPC service config ([fd04a55](https://www.github.com/googleapis/google-cloud-go/commit/fd04a552213f99619c714b5858548f61f4948493)) +* **resourcesettings:** start generating apiv1 ([#3854](https://www.github.com/googleapis/google-cloud-go/issues/3854)) ([3b288b4](https://www.github.com/googleapis/google-cloud-go/commit/3b288b4fa593c6cb418f696b5b26768967c20b9e)) +* **speech:** Support output transcript to GCS for LongRunningRecognize. ([fd04a55](https://www.github.com/googleapis/google-cloud-go/commit/fd04a552213f99619c714b5858548f61f4948493)) +* **speech:** Support output transcript to GCS for LongRunningRecognize. ([cd70aa9](https://www.github.com/googleapis/google-cloud-go/commit/cd70aa9cc1a5dccfe4e49d2d6ca6db2119553c86)) +* **speech:** Support output transcript to GCS for LongRunningRecognize. ([35a8706](https://www.github.com/googleapis/google-cloud-go/commit/35a870662df8bf63c4ec10a0233d1d7a708007ee)) + + +### Miscellaneous Chores + +* **all:** auto-regenerate gapics ([#3837](https://www.github.com/googleapis/google-cloud-go/issues/3837)) ([ab4824a](https://www.github.com/googleapis/google-cloud-go/commit/ab4824a7914864228e59b244d6382de862139524)) + +## [0.79.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.78.0...v0.79.0) (2021-03-10) + + +### Features + +* **apigateway:** start generating apiv1 ([#3726](https://www.github.com/googleapis/google-cloud-go/issues/3726)) ([66046da](https://www.github.com/googleapis/google-cloud-go/commit/66046da2a4be5971ce2655dc6a5e1fadb08c3d1f)) +* **channel:** addition of billing_account field on Plan. docs: clarification that valid address lines are required for all customers. ([d4246aa](https://www.github.com/googleapis/google-cloud-go/commit/d4246aad4da3c3ef12350385f229bb908e3fb215)) +* **dialogflow/cx:** allow to disable webhook invocation per request ([d4246aa](https://www.github.com/googleapis/google-cloud-go/commit/d4246aad4da3c3ef12350385f229bb908e3fb215)) +* **dialogflow/cx:** allow to disable webhook invocation per request ([44c6bf9](https://www.github.com/googleapis/google-cloud-go/commit/44c6bf986f39a3c9fddf46788ae63bfbb3739441)) +* **dialogflow:** Add CCAI API ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d)) +* **documentai:** remove the translation fields in document.proto. ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d)) +* **documentai:** Update documentai/v1beta3 protos: add support for boolean normalized value ([529925b](https://www.github.com/googleapis/google-cloud-go/commit/529925ba79f4d3191ef80a13e566d86210fe4d25)) +* **internal/godocfx:** keep some cross links on same domain ([#3767](https://www.github.com/googleapis/google-cloud-go/issues/3767)) ([77f76ed](https://www.github.com/googleapis/google-cloud-go/commit/77f76ed09cb07a090ba9054063a7c002a35bca4e)) +* **internal:** add ability to regenerate one module's docs ([#3777](https://www.github.com/googleapis/google-cloud-go/issues/3777)) ([dc15995](https://www.github.com/googleapis/google-cloud-go/commit/dc15995521bd065da4cfaae95642588919a8c548)) +* **metastore:** added support for release channels when creating service ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d)) +* **metastore:** Publish Dataproc Metastore v1alpha API ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d)) +* **metastore:** start generating apiv1alpha ([#3747](https://www.github.com/googleapis/google-cloud-go/issues/3747)) ([359312a](https://www.github.com/googleapis/google-cloud-go/commit/359312ad6d4f61fb341d41ffa35fc0634979e650)) +* **metastore:** start generating apiv1beta ([#3788](https://www.github.com/googleapis/google-cloud-go/issues/3788)) ([2977095](https://www.github.com/googleapis/google-cloud-go/commit/297709593ad32f234c0fbcfa228cffcfd3e591f4)) +* **secretmanager:** added topic field to Secret ([f1323b1](https://www.github.com/googleapis/google-cloud-go/commit/f1323b10a3c7cc1d215730cefd3062064ef54c01)) + + +### Bug Fixes + +* **analytics/admin:** add `https://www.googleapis.com/auth/analytics.edit` OAuth2 scope to the list of acceptable scopes for all read only methods of the Admin API docs: update the documentation of the `update_mask` field used by Update() methods ([f1323b1](https://www.github.com/googleapis/google-cloud-go/commit/f1323b10a3c7cc1d215730cefd3062064ef54c01)) +* **apigateway:** Provide resource definitions for service management and IAM resources ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d)) +* **functions:** Fix service namespace in grpc_service_config. ([7811a34](https://www.github.com/googleapis/google-cloud-go/commit/7811a34ef64d722480c640810251bb3a0d65d495)) +* **internal/godocfx:** prevent index out of bounds when pkg == mod ([#3768](https://www.github.com/googleapis/google-cloud-go/issues/3768)) ([3d80b4e](https://www.github.com/googleapis/google-cloud-go/commit/3d80b4e93b0f7e857d6e9681d8d6a429750ecf80)) +* **internal/godocfx:** use correct anchor links ([#3738](https://www.github.com/googleapis/google-cloud-go/issues/3738)) ([919039a](https://www.github.com/googleapis/google-cloud-go/commit/919039a01a006c41e720218bd55f83ce98a5edef)) +* **internal:** fix Bash syntax ([#3779](https://www.github.com/googleapis/google-cloud-go/issues/3779)) ([3dd245d](https://www.github.com/googleapis/google-cloud-go/commit/3dd245dbdbfa84f0bbe5a476412d8463fe3e700c)) +* **tables:** use area120tables_v1alpha1.yaml as api-service-config ([#3759](https://www.github.com/googleapis/google-cloud-go/issues/3759)) ([b130ec0](https://www.github.com/googleapis/google-cloud-go/commit/b130ec0aa946b1a1eaa4d5a7c33e72353ac1612e)) + +## [0.78.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.77.0...v0.78.0) (2021-02-22) + + +### Features + +* **area120/tables:** Added ListWorkspaces, GetWorkspace, BatchDeleteRows APIs. ([16597fa](https://www.github.com/googleapis/google-cloud-go/commit/16597fa1ce549053c7183e8456e23f554a5501de)) +* **area120/tables:** Added ListWorkspaces, GetWorkspace, BatchDeleteRows APIs. ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113)) +* **dialogflow:** add additional_bindings to Dialogflow v2 ListIntents API docs: update copyrights and session docs ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113)) +* **documentai:** Update documentai/v1beta3 protos ([613ced7](https://www.github.com/googleapis/google-cloud-go/commit/613ced702bbc82a154a4d3641b483f71c7cd1af4)) +* **gkehub:** Update Membership API v1beta1 proto ([613ced7](https://www.github.com/googleapis/google-cloud-go/commit/613ced702bbc82a154a4d3641b483f71c7cd1af4)) +* **servicecontrol:** Update the ruby_cloud_gapic_library rules for the libraries published to google-cloud-ruby to the form that works with build_gen (separate parameters for ruby_cloud_title and ruby_cloud_description). chore: Update Bazel-Ruby rules version. chore: Update build_gen version. ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113)) +* **speech:** Support Model Adaptation. ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113)) + + +### Bug Fixes + +* **dialogflow/cx:** RunTestCase http template. PHP REST client lib can be generated. feat: Support transition route group coverage for Test Cases. ([613ced7](https://www.github.com/googleapis/google-cloud-go/commit/613ced702bbc82a154a4d3641b483f71c7cd1af4)) +* **errorreporting:** Fixes ruby gem build ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113)) + +## [0.77.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.76.0...v0.77.0) (2021-02-16) + + +### Features + +* **channel:** Add Pub/Sub endpoints for Cloud Channel API. ([1aea7c8](https://www.github.com/googleapis/google-cloud-go/commit/1aea7c87d39eed87620b488ba0dd60b88ff26c04)) +* **dialogflow/cx:** supports SentimentAnalysisResult in webhook request docs: minor updates in wording ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663)) +* **errorreporting:** Make resolution status field available for error groups. Now callers can set the status of an error group by passing this to UpdateGroup. When not specified, it's treated like OPEN. feat: Make source location available for error groups created from GAE. ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663)) +* **errorreporting:** Make resolution status field available for error groups. Now callers can set the status of an error group by passing this to UpdateGroup. When not specified, it's treated like OPEN. feat: Make source location available for error groups created from GAE. ([f66114b](https://www.github.com/googleapis/google-cloud-go/commit/f66114bc7233ad06e18f38dd39497a74d85fdbd8)) +* **gkehub:** start generating apiv1beta1 ([#3698](https://www.github.com/googleapis/google-cloud-go/issues/3698)) ([8aed3bd](https://www.github.com/googleapis/google-cloud-go/commit/8aed3bd1bbbe983e4891c813e4c5dc9b3aa1b9b2)) +* **internal/docfx:** full cross reference linking ([#3656](https://www.github.com/googleapis/google-cloud-go/issues/3656)) ([fcb7318](https://www.github.com/googleapis/google-cloud-go/commit/fcb7318eb338bf3828ac831ed06ca630e1876418)) +* **memcache:** added ApplySoftwareUpdate API docs: various clarifications, new documentation for ApplySoftwareUpdate chore: update proto annotations ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663)) +* **networkconnectivity:** Add state field in resources docs: Minor changes ([0b4370a](https://www.github.com/googleapis/google-cloud-go/commit/0b4370a0d397913d932dbbdc2046a958dc3b836a)) +* **networkconnectivity:** Add state field in resources docs: Minor changes ([b4b5898](https://www.github.com/googleapis/google-cloud-go/commit/b4b58987368f80494bbc7f651f50e9123200fb3f)) +* **recommendationengine:** start generating apiv1beta1 ([#3686](https://www.github.com/googleapis/google-cloud-go/issues/3686)) ([8f4e130](https://www.github.com/googleapis/google-cloud-go/commit/8f4e13009444d88a5a56144129f055623a2205ac)) + + +### Bug Fixes + +* **errorreporting:** Remove dependency on AppEngine's proto definitions. This also removes the source_references field. ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663)) +* **errorreporting:** Update bazel builds for ER client libraries. ([0b4370a](https://www.github.com/googleapis/google-cloud-go/commit/0b4370a0d397913d932dbbdc2046a958dc3b836a)) +* **internal/godocfx:** use exact list of top-level decls ([#3665](https://www.github.com/googleapis/google-cloud-go/issues/3665)) ([3cd2961](https://www.github.com/googleapis/google-cloud-go/commit/3cd2961bd7b9c29d82a21ba8850eff00c7c332fd)) +* **kms:** do not retry on 13 INTERNAL ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663)) +* **orgpolicy:** Fix constraint resource pattern annotation ([f66114b](https://www.github.com/googleapis/google-cloud-go/commit/f66114bc7233ad06e18f38dd39497a74d85fdbd8)) +* **orgpolicy:** Fix constraint resource pattern annotation ([0b4370a](https://www.github.com/googleapis/google-cloud-go/commit/0b4370a0d397913d932dbbdc2046a958dc3b836a)) +* **profiler:** make sure retries use the most up-to-date copy of the trailer ([#3660](https://www.github.com/googleapis/google-cloud-go/issues/3660)) ([3ba9ebc](https://www.github.com/googleapis/google-cloud-go/commit/3ba9ebcee2b8b43cdf2c8f8a3d810516a604b363)) +* **vision:** sync vision v1 protos to get extra FaceAnnotation Landmark Types ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663)) + +## [0.76.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.75.0...v0.76.0) (2021-02-02) + + +### Features + +* **accessapproval:** Migrate the Bazel rules for the libraries published to google-cloud-ruby to use the gapic-generator-ruby instead of the monolith generator. ([ac22beb](https://www.github.com/googleapis/google-cloud-go/commit/ac22beb9b90771b24c8b35db7587ad3f5c0a970e)) +* **all:** auto-regenerate gapics ([#3526](https://www.github.com/googleapis/google-cloud-go/issues/3526)) ([ab2af0b](https://www.github.com/googleapis/google-cloud-go/commit/ab2af0b32630dd97f44800f4e273184f887375db)) +* **all:** auto-regenerate gapics ([#3539](https://www.github.com/googleapis/google-cloud-go/issues/3539)) ([84d4d8a](https://www.github.com/googleapis/google-cloud-go/commit/84d4d8ae2d3fbf34a4a312a0a2e4062d18caaa3d)) +* **all:** auto-regenerate gapics ([#3546](https://www.github.com/googleapis/google-cloud-go/issues/3546)) ([959fde5](https://www.github.com/googleapis/google-cloud-go/commit/959fde5ab12f7aee206dd46022e3cad1bc3470f7)) +* **all:** auto-regenerate gapics ([#3563](https://www.github.com/googleapis/google-cloud-go/issues/3563)) ([102112a](https://www.github.com/googleapis/google-cloud-go/commit/102112a4e9285a16645aabc89789f613d4f47c9e)) +* **all:** auto-regenerate gapics ([#3576](https://www.github.com/googleapis/google-cloud-go/issues/3576)) ([ac22beb](https://www.github.com/googleapis/google-cloud-go/commit/ac22beb9b90771b24c8b35db7587ad3f5c0a970e)) +* **all:** auto-regenerate gapics ([#3580](https://www.github.com/googleapis/google-cloud-go/issues/3580)) ([9974a80](https://www.github.com/googleapis/google-cloud-go/commit/9974a8017b5de8129a586f2404a23396caea0ee1)) +* **all:** auto-regenerate gapics ([#3587](https://www.github.com/googleapis/google-cloud-go/issues/3587)) ([3859a6f](https://www.github.com/googleapis/google-cloud-go/commit/3859a6ffc447e9c0b4ef231e2788fbbcfe48a94f)) +* **all:** auto-regenerate gapics ([#3598](https://www.github.com/googleapis/google-cloud-go/issues/3598)) ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) +* **appengine:** start generating apiv1 ([#3561](https://www.github.com/googleapis/google-cloud-go/issues/3561)) ([2b6a3b4](https://www.github.com/googleapis/google-cloud-go/commit/2b6a3b4609e389da418a83eb60a8ae3710d646d7)) +* **assuredworkloads:** updated google.cloud.assuredworkloads.v1beta1.AssuredWorkloadsService service. Clients can now create workloads with US_REGIONAL_ACCESS compliance regime ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) +* **binaryauthorization:** start generating apiv1beta1 ([#3562](https://www.github.com/googleapis/google-cloud-go/issues/3562)) ([56e18a6](https://www.github.com/googleapis/google-cloud-go/commit/56e18a64836ab9482528b212eb139f649f7a35c3)) +* **channel:** Add Pub/Sub endpoints for Cloud Channel API. ([9070c86](https://www.github.com/googleapis/google-cloud-go/commit/9070c86e2c69f9405d42fc0e6fe7afd4a256d8b8)) +* **cloudtasks:** introducing field: ListQueuesRequest.read_mask, GetQueueRequest.read_mask, Queue.task_ttl, Queue.tombstone_ttl, Queue.stats, Task.pull_message and introducing messages: QueueStats PullMessage docs: updates to max burst size description ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) +* **cloudtasks:** introducing fields: ListQueuesRequest.read_mask, GetQueueRequest.read_mask, Queue.task_ttl, Queue.tombstone_ttl, Queue.stats and introducing messages: QueueStats docs: updates to AppEngineHttpRequest description ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) +* **datalabeling:** start generating apiv1beta1 ([#3582](https://www.github.com/googleapis/google-cloud-go/issues/3582)) ([d8a7fee](https://www.github.com/googleapis/google-cloud-go/commit/d8a7feef51d3344fa7e258aba1d9fbdab56dadcf)) +* **dataqna:** start generating apiv1alpha ([#3586](https://www.github.com/googleapis/google-cloud-go/issues/3586)) ([24c5b8f](https://www.github.com/googleapis/google-cloud-go/commit/24c5b8f4f45f8cd8b3001b1ca5a8d80e9f3b39d5)) +* **dialogflow/cx:** Add new Experiment service docs: minor doc update on redact field in intent.proto and page.proto ([0959f27](https://www.github.com/googleapis/google-cloud-go/commit/0959f27e85efe94d39437ceef0ff62ddceb8e7a7)) +* **dialogflow/cx:** added support for test cases and agent validation ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) +* **dialogflow/cx:** added support for test cases and agent validation ([3859a6f](https://www.github.com/googleapis/google-cloud-go/commit/3859a6ffc447e9c0b4ef231e2788fbbcfe48a94f)) +* **dialogflow:** add C++ targets for DialogFlow ([959fde5](https://www.github.com/googleapis/google-cloud-go/commit/959fde5ab12f7aee206dd46022e3cad1bc3470f7)) +* **documentai:** start generating apiv1beta3 ([#3595](https://www.github.com/googleapis/google-cloud-go/issues/3595)) ([5ae21fa](https://www.github.com/googleapis/google-cloud-go/commit/5ae21fa1cfb8b8dacbcd0fc43eee430f7db63102)) +* **domains:** start generating apiv1beta1 ([#3632](https://www.github.com/googleapis/google-cloud-go/issues/3632)) ([b8ada6f](https://www.github.com/googleapis/google-cloud-go/commit/b8ada6f197e680d0bb26aa031e6431bc099a3149)) +* **godocfx:** include alt documentation link ([#3530](https://www.github.com/googleapis/google-cloud-go/issues/3530)) ([806cdd5](https://www.github.com/googleapis/google-cloud-go/commit/806cdd56fb6fdddd7a6c1354e55e0d1259bd6c8b)) +* **internal/gapicgen:** change commit formatting to match standard ([#3500](https://www.github.com/googleapis/google-cloud-go/issues/3500)) ([d1e3d46](https://www.github.com/googleapis/google-cloud-go/commit/d1e3d46c47c425581e2b149c07f8e27ffc373c7e)) +* **internal/godocfx:** xref function declarations ([#3615](https://www.github.com/googleapis/google-cloud-go/issues/3615)) ([2bdbb87](https://www.github.com/googleapis/google-cloud-go/commit/2bdbb87a682d799cf5e262a61a3ef1faf41151af)) +* **mediatranslation:** start generating apiv1beta1 ([#3636](https://www.github.com/googleapis/google-cloud-go/issues/3636)) ([4129469](https://www.github.com/googleapis/google-cloud-go/commit/412946966cf7f53c51deff1b1cc1a12d62ed0279)) +* **memcache:** start generating apiv1 ([#3579](https://www.github.com/googleapis/google-cloud-go/issues/3579)) ([eabf7cf](https://www.github.com/googleapis/google-cloud-go/commit/eabf7cfde7b3a3cc1b35c320ba52e07be9926359)) +* **networkconnectivity:** initial generation of apiv1alpha1 ([#3567](https://www.github.com/googleapis/google-cloud-go/issues/3567)) ([adf489a](https://www.github.com/googleapis/google-cloud-go/commit/adf489a536292e3196677621477eae0d52761e7f)) +* **orgpolicy:** start generating apiv2 ([#3652](https://www.github.com/googleapis/google-cloud-go/issues/3652)) ([c103847](https://www.github.com/googleapis/google-cloud-go/commit/c1038475779fda3589aa9659d4ad0b703036b531)) +* **osconfig/agentendpoint:** add ApplyConfigTask to AgentEndpoint API ([9070c86](https://www.github.com/googleapis/google-cloud-go/commit/9070c86e2c69f9405d42fc0e6fe7afd4a256d8b8)) +* **osconfig/agentendpoint:** add ApplyConfigTask to AgentEndpoint API ([9af529c](https://www.github.com/googleapis/google-cloud-go/commit/9af529c21e98b62c4617f7a7191c307659cf8bb8)) +* **recommender:** add bindings for folder/org type resources for protos in recommendations, insights and recommender_service to enable v1 api for folder/org ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) +* **recommender:** auto generated cl for enabling v1beta1 folder/org APIs and integration test ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) +* **resourcemanager:** start generating apiv2 ([#3575](https://www.github.com/googleapis/google-cloud-go/issues/3575)) ([93d0ebc](https://www.github.com/googleapis/google-cloud-go/commit/93d0ebceb4270351518a13958005bb68f0cace60)) +* **secretmanager:** added expire_time and ttl fields to Secret ([9974a80](https://www.github.com/googleapis/google-cloud-go/commit/9974a8017b5de8129a586f2404a23396caea0ee1)) +* **secretmanager:** added expire_time and ttl fields to Secret ([ac22beb](https://www.github.com/googleapis/google-cloud-go/commit/ac22beb9b90771b24c8b35db7587ad3f5c0a970e)) +* **servicecontrol:** start generating apiv1 ([#3644](https://www.github.com/googleapis/google-cloud-go/issues/3644)) ([f84938b](https://www.github.com/googleapis/google-cloud-go/commit/f84938bb4042a5629fd66bda42de028fd833648a)) +* **servicemanagement:** start generating apiv1 ([#3614](https://www.github.com/googleapis/google-cloud-go/issues/3614)) ([b96134f](https://www.github.com/googleapis/google-cloud-go/commit/b96134fe91c182237359000cd544af5fec60d7db)) + + +### Bug Fixes + +* **datacatalog:** Update PHP package name casing to match the PHP namespace in the proto files ([c7ecf0f](https://www.github.com/googleapis/google-cloud-go/commit/c7ecf0f3f454606b124e52d20af2545b2c68646f)) +* **internal/godocfx:** add TOC element for module root package ([#3599](https://www.github.com/googleapis/google-cloud-go/issues/3599)) ([1d6eb23](https://www.github.com/googleapis/google-cloud-go/commit/1d6eb238206fcf8815d88981527ef176851afd7a)) +* **profiler:** Force gax to retry in case of certificate errors ([#3178](https://www.github.com/googleapis/google-cloud-go/issues/3178)) ([35dcd72](https://www.github.com/googleapis/google-cloud-go/commit/35dcd725dcd03266ed7439de40c277376b38cd71)) + +## [0.75.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.74.0...v0.75.0) (2021-01-11) + + +### Features + +* **all:** auto-regenerate gapics , refs [#3514](https://www.github.com/googleapis/google-cloud-go/issues/3514) [#3501](https://www.github.com/googleapis/google-cloud-go/issues/3501) [#3497](https://www.github.com/googleapis/google-cloud-go/issues/3497) [#3455](https://www.github.com/googleapis/google-cloud-go/issues/3455) [#3448](https://www.github.com/googleapis/google-cloud-go/issues/3448) +* **channel:** start generating apiv1 ([#3517](https://www.github.com/googleapis/google-cloud-go/issues/3517)) ([2cf3b3c](https://www.github.com/googleapis/google-cloud-go/commit/2cf3b3cf7d99f2efd6868a710fad9e935fc87965)) + + +### Bug Fixes + +* **internal/gapicgen:** don't regen files that have been deleted ([#3471](https://www.github.com/googleapis/google-cloud-go/issues/3471)) ([112ca94](https://www.github.com/googleapis/google-cloud-go/commit/112ca9416cc8a2502b32547dc8d789655452f84a)) + +## [0.74.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.73.0...v0.74.0) (2020-12-10) + + +### Features + +* **all:** auto-regenerate gapics , refs [#3440](https://www.github.com/googleapis/google-cloud-go/issues/3440) [#3436](https://www.github.com/googleapis/google-cloud-go/issues/3436) [#3394](https://www.github.com/googleapis/google-cloud-go/issues/3394) [#3391](https://www.github.com/googleapis/google-cloud-go/issues/3391) [#3374](https://www.github.com/googleapis/google-cloud-go/issues/3374) +* **internal/gapicgen:** support generating only gapics with genlocal ([#3383](https://www.github.com/googleapis/google-cloud-go/issues/3383)) ([eaa742a](https://www.github.com/googleapis/google-cloud-go/commit/eaa742a248dc7d93c019863248f28e37f88aae84)) +* **servicedirectory:** start generating apiv1 ([#3382](https://www.github.com/googleapis/google-cloud-go/issues/3382)) ([2774925](https://www.github.com/googleapis/google-cloud-go/commit/2774925925909071ebc585cf7400373334c156ba)) + + +### Bug Fixes + +* **internal/gapicgen:** don't create genproto pr as draft ([#3379](https://www.github.com/googleapis/google-cloud-go/issues/3379)) ([517ab0f](https://www.github.com/googleapis/google-cloud-go/commit/517ab0f25e544498c5374b256354bc41ba936ad5)) + +## [0.73.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.72.0...v0.73.0) (2020-12-04) + + +### Features + +* **all:** auto-regenerate gapics , refs [#3335](https://www.github.com/googleapis/google-cloud-go/issues/3335) [#3294](https://www.github.com/googleapis/google-cloud-go/issues/3294) [#3250](https://www.github.com/googleapis/google-cloud-go/issues/3250) [#3229](https://www.github.com/googleapis/google-cloud-go/issues/3229) [#3211](https://www.github.com/googleapis/google-cloud-go/issues/3211) [#3217](https://www.github.com/googleapis/google-cloud-go/issues/3217) [#3212](https://www.github.com/googleapis/google-cloud-go/issues/3212) [#3209](https://www.github.com/googleapis/google-cloud-go/issues/3209) [#3206](https://www.github.com/googleapis/google-cloud-go/issues/3206) [#3199](https://www.github.com/googleapis/google-cloud-go/issues/3199) +* **artifactregistry:** start generating apiv1beta2 ([#3352](https://www.github.com/googleapis/google-cloud-go/issues/3352)) ([2e6f20b](https://www.github.com/googleapis/google-cloud-go/commit/2e6f20b0ab438b0b366a1a3802fc64d1a0e66fff)) +* **internal:** copy pubsub Message and PublishResult to internal/pubsub ([#3351](https://www.github.com/googleapis/google-cloud-go/issues/3351)) ([82521ee](https://www.github.com/googleapis/google-cloud-go/commit/82521ee5038735c1663525658d27e4df00ec90be)) +* **internal/gapicgen:** support adding context to regen ([#3174](https://www.github.com/googleapis/google-cloud-go/issues/3174)) ([941ab02](https://www.github.com/googleapis/google-cloud-go/commit/941ab029ba6f7f33e8b2e31e3818aeb68312a999)) +* **internal/kokoro:** add ability to regen all DocFX YAML ([#3191](https://www.github.com/googleapis/google-cloud-go/issues/3191)) ([e12046b](https://www.github.com/googleapis/google-cloud-go/commit/e12046bc4431d33aee72c324e6eb5cc907a4214a)) + + +### Bug Fixes + +* **internal/godocfx:** filter out test packages from other modules ([#3197](https://www.github.com/googleapis/google-cloud-go/issues/3197)) ([1d397aa](https://www.github.com/googleapis/google-cloud-go/commit/1d397aa8b41f8f980cba1d3dcc50f11e4d4f4ca0)) + +## [0.72.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.71.0...v0.72.0) (2020-11-10) + + +### Features + +* **all:** auto-regenerate gapics , refs [#3177](https://www.github.com/googleapis/google-cloud-go/issues/3177) [#3164](https://www.github.com/googleapis/google-cloud-go/issues/3164) [#3149](https://www.github.com/googleapis/google-cloud-go/issues/3149) [#3142](https://www.github.com/googleapis/google-cloud-go/issues/3142) [#3136](https://www.github.com/googleapis/google-cloud-go/issues/3136) [#3130](https://www.github.com/googleapis/google-cloud-go/issues/3130) [#3121](https://www.github.com/googleapis/google-cloud-go/issues/3121) [#3119](https://www.github.com/googleapis/google-cloud-go/issues/3119) + + +### Bug Fixes + +* **all:** Update hand-written clients to not use WithEndpoint override ([#3111](https://www.github.com/googleapis/google-cloud-go/issues/3111)) ([f0cfd05](https://www.github.com/googleapis/google-cloud-go/commit/f0cfd0532f5204ff16f7bae406efa72603d16f44)) +* **internal/godocfx:** rename README files to pkg-readme ([#3185](https://www.github.com/googleapis/google-cloud-go/issues/3185)) ([d3a8571](https://www.github.com/googleapis/google-cloud-go/commit/d3a85719be411b692aede3331abb29b5a7b3da9a)) + + +## [0.71.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.70.0...v0.71.0) (2020-10-30) + + +### Features + +* **all:** auto-regenerate gapics , refs [#3115](https://www.github.com/googleapis/google-cloud-go/issues/3115) [#3106](https://www.github.com/googleapis/google-cloud-go/issues/3106) [#3102](https://www.github.com/googleapis/google-cloud-go/issues/3102) [#3083](https://www.github.com/googleapis/google-cloud-go/issues/3083) [#3073](https://www.github.com/googleapis/google-cloud-go/issues/3073) [#3057](https://www.github.com/googleapis/google-cloud-go/issues/3057) [#3044](https://www.github.com/googleapis/google-cloud-go/issues/3044) +* **billing/budgets:** start generating apiv1 ([#3099](https://www.github.com/googleapis/google-cloud-go/issues/3099)) ([e760c85](https://www.github.com/googleapis/google-cloud-go/commit/e760c859de88a6e79b6dffc653dbf75f1630d8e3)) +* **internal:** auto-run godocfx on new mods ([#3069](https://www.github.com/googleapis/google-cloud-go/issues/3069)) ([49f497e](https://www.github.com/googleapis/google-cloud-go/commit/49f497eab80ce34dfb4ca41f033a5c0429ff5e42)) +* **pubsublite:** Added Pub/Sub Lite clients and routing headers ([#3105](https://www.github.com/googleapis/google-cloud-go/issues/3105)) ([98668fa](https://www.github.com/googleapis/google-cloud-go/commit/98668fa5457d26ed34debee708614f027020e5bc)) +* **pubsublite:** Message type and message routers ([#3077](https://www.github.com/googleapis/google-cloud-go/issues/3077)) ([179fc55](https://www.github.com/googleapis/google-cloud-go/commit/179fc550b545a5344358a243da7007ffaa7b5171)) +* **pubsublite:** Pub/Sub Lite admin client ([#3036](https://www.github.com/googleapis/google-cloud-go/issues/3036)) ([749473e](https://www.github.com/googleapis/google-cloud-go/commit/749473ead30bf1872634821d3238d1299b99acc6)) +* **pubsublite:** Publish settings and errors ([#3075](https://www.github.com/googleapis/google-cloud-go/issues/3075)) ([9eb9fcb](https://www.github.com/googleapis/google-cloud-go/commit/9eb9fcb79f17ad7c08c77c455ba3e8d89e3bdbf2)) +* **pubsublite:** Retryable stream wrapper ([#3068](https://www.github.com/googleapis/google-cloud-go/issues/3068)) ([97cfd45](https://www.github.com/googleapis/google-cloud-go/commit/97cfd4587f2f51996bd685ff486308b70eb51900)) + + +### Bug Fixes + +* **internal/kokoro:** remove unnecessary cd ([#3071](https://www.github.com/googleapis/google-cloud-go/issues/3071)) ([c1a4c3e](https://www.github.com/googleapis/google-cloud-go/commit/c1a4c3eaffcdc3cffe0e223fcfa1f60879cd23bb)) +* **pubsublite:** Disable integration tests for project id ([#3087](https://www.github.com/googleapis/google-cloud-go/issues/3087)) ([a0982f7](https://www.github.com/googleapis/google-cloud-go/commit/a0982f79d6461feabdf31363f29fed7dc5677fe7)) + +## [0.70.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.69.0...v0.70.0) (2020-10-19) + + +### Features + +* **all:** auto-regenerate gapics , refs [#3047](https://www.github.com/googleapis/google-cloud-go/issues/3047) [#3035](https://www.github.com/googleapis/google-cloud-go/issues/3035) [#3025](https://www.github.com/googleapis/google-cloud-go/issues/3025) +* **managedidentities:** start generating apiv1 ([#3032](https://www.github.com/googleapis/google-cloud-go/issues/3032)) ([10ccca2](https://www.github.com/googleapis/google-cloud-go/commit/10ccca238074d24fea580a4cd8e64478818b0b44)) +* **pubsublite:** Types for resource paths and topic/subscription configs ([#3026](https://www.github.com/googleapis/google-cloud-go/issues/3026)) ([6f7fa86](https://www.github.com/googleapis/google-cloud-go/commit/6f7fa86ed906258f98d996aab40184f3a46f9714)) + +## [0.69.1](https://www.github.com/googleapis/google-cloud-go/compare/v0.69.0...v0.69.1) (2020-10-14) + +This is an empty release that was created solely to aid in pubsublite's module +carve out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. + +## [0.69.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.68.0...v0.69.0) (2020-10-14) + + +### Features + +* **accessapproval:** start generating apiv1 ([#3002](https://www.github.com/googleapis/google-cloud-go/issues/3002)) ([709d6e7](https://www.github.com/googleapis/google-cloud-go/commit/709d6e76393e6ac00ff488efd83bfe873173b045)) +* **all:** auto-regenerate gapics , refs [#3010](https://www.github.com/googleapis/google-cloud-go/issues/3010) [#3005](https://www.github.com/googleapis/google-cloud-go/issues/3005) [#2993](https://www.github.com/googleapis/google-cloud-go/issues/2993) [#2989](https://www.github.com/googleapis/google-cloud-go/issues/2989) [#2981](https://www.github.com/googleapis/google-cloud-go/issues/2981) [#2976](https://www.github.com/googleapis/google-cloud-go/issues/2976) [#2968](https://www.github.com/googleapis/google-cloud-go/issues/2968) [#2958](https://www.github.com/googleapis/google-cloud-go/issues/2958) +* **cmd/go-cloud-debug-agent:** mark as deprecated ([#2964](https://www.github.com/googleapis/google-cloud-go/issues/2964)) ([276ec88](https://www.github.com/googleapis/google-cloud-go/commit/276ec88b05852c33a3ba437e18d072f7ffd8fd33)) +* **godocfx:** add nesting to TOC ([#2972](https://www.github.com/googleapis/google-cloud-go/issues/2972)) ([3a49b2d](https://www.github.com/googleapis/google-cloud-go/commit/3a49b2d142a353f98429235c3f380431430b4dbf)) +* **internal/godocfx:** HTML-ify package summary ([#2986](https://www.github.com/googleapis/google-cloud-go/issues/2986)) ([9e64b01](https://www.github.com/googleapis/google-cloud-go/commit/9e64b018255bd8d9b31d60e8f396966251de946b)) +* **internal/kokoro:** make publish_docs VERSION optional ([#2979](https://www.github.com/googleapis/google-cloud-go/issues/2979)) ([76e35f6](https://www.github.com/googleapis/google-cloud-go/commit/76e35f689cb60bd5db8e14b8c8d367c5902bcb0e)) +* **websecurityscanner:** start generating apiv1 ([#3006](https://www.github.com/googleapis/google-cloud-go/issues/3006)) ([1d92e20](https://www.github.com/googleapis/google-cloud-go/commit/1d92e2062a13f62d7a96be53a7354c0cacca6a85)) + + +### Bug Fixes + +* **godocfx:** make extra files optional, filter out third_party ([#2985](https://www.github.com/googleapis/google-cloud-go/issues/2985)) ([f268921](https://www.github.com/googleapis/google-cloud-go/commit/f2689214a24b2e325d3e8f54441bb11fbef925f0)) + +## [0.68.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.67.0...v0.68.0) (2020-10-02) + + +### Features + +* **all:** auto-regenerate gapics , refs [#2952](https://www.github.com/googleapis/google-cloud-go/issues/2952) [#2944](https://www.github.com/googleapis/google-cloud-go/issues/2944) [#2935](https://www.github.com/googleapis/google-cloud-go/issues/2935) + +## [0.67.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.66.0...v0.67.0) (2020-09-29) + + +### Features + +* **all:** auto-regenerate gapics , refs [#2933](https://www.github.com/googleapis/google-cloud-go/issues/2933) [#2919](https://www.github.com/googleapis/google-cloud-go/issues/2919) [#2913](https://www.github.com/googleapis/google-cloud-go/issues/2913) [#2910](https://www.github.com/googleapis/google-cloud-go/issues/2910) [#2899](https://www.github.com/googleapis/google-cloud-go/issues/2899) [#2897](https://www.github.com/googleapis/google-cloud-go/issues/2897) [#2886](https://www.github.com/googleapis/google-cloud-go/issues/2886) [#2877](https://www.github.com/googleapis/google-cloud-go/issues/2877) [#2869](https://www.github.com/googleapis/google-cloud-go/issues/2869) [#2864](https://www.github.com/googleapis/google-cloud-go/issues/2864) +* **assuredworkloads:** start generating apiv1beta1 ([#2866](https://www.github.com/googleapis/google-cloud-go/issues/2866)) ([7598c4d](https://www.github.com/googleapis/google-cloud-go/commit/7598c4dd2462e8270a2c7b1f496af58ca81ff568)) +* **dialogflow/cx:** start generating apiv3beta1 ([#2875](https://www.github.com/googleapis/google-cloud-go/issues/2875)) ([37ca93a](https://www.github.com/googleapis/google-cloud-go/commit/37ca93ad69eda363d956f0174d444ed5914f5a72)) +* **docfx:** add support for examples ([#2884](https://www.github.com/googleapis/google-cloud-go/issues/2884)) ([0cc0de3](https://www.github.com/googleapis/google-cloud-go/commit/0cc0de300d58be6d3b7eeb2f1baebfa6df076830)) +* **godocfx:** include README in output ([#2927](https://www.github.com/googleapis/google-cloud-go/issues/2927)) ([f084690](https://www.github.com/googleapis/google-cloud-go/commit/f084690a2ea08ce73bafaaced95ad271fd01e11e)) +* **talent:** start generating apiv4 ([#2871](https://www.github.com/googleapis/google-cloud-go/issues/2871)) ([5c98071](https://www.github.com/googleapis/google-cloud-go/commit/5c98071b03822c58862d1fa5442ff36d627f1a61)) + + +### Bug Fixes + +* **godocfx:** filter out other modules, sort pkgs ([#2894](https://www.github.com/googleapis/google-cloud-go/issues/2894)) ([868db45](https://www.github.com/googleapis/google-cloud-go/commit/868db45e2e6f4e9ad48432be86c849f335e1083d)) +* **godocfx:** shorten function names ([#2880](https://www.github.com/googleapis/google-cloud-go/issues/2880)) ([48a0217](https://www.github.com/googleapis/google-cloud-go/commit/48a0217930750c1f4327f2622b0f2a3ec8afc0b7)) +* **translate:** properly name examples ([#2892](https://www.github.com/googleapis/google-cloud-go/issues/2892)) ([c19e141](https://www.github.com/googleapis/google-cloud-go/commit/c19e1415e6fa76b7ea66a7fc67ad3ba22670a2ba)), refs [#2883](https://www.github.com/googleapis/google-cloud-go/issues/2883) + +## [0.66.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.65.0...v0.66.0) (2020-09-15) + + +### Features + +* **all:** auto-regenerate gapics , refs [#2849](https://www.github.com/googleapis/google-cloud-go/issues/2849) [#2843](https://www.github.com/googleapis/google-cloud-go/issues/2843) [#2841](https://www.github.com/googleapis/google-cloud-go/issues/2841) [#2819](https://www.github.com/googleapis/google-cloud-go/issues/2819) [#2816](https://www.github.com/googleapis/google-cloud-go/issues/2816) [#2809](https://www.github.com/googleapis/google-cloud-go/issues/2809) [#2801](https://www.github.com/googleapis/google-cloud-go/issues/2801) [#2795](https://www.github.com/googleapis/google-cloud-go/issues/2795) [#2791](https://www.github.com/googleapis/google-cloud-go/issues/2791) [#2788](https://www.github.com/googleapis/google-cloud-go/issues/2788) [#2781](https://www.github.com/googleapis/google-cloud-go/issues/2781) +* **analytics/data:** start generating apiv1alpha ([#2796](https://www.github.com/googleapis/google-cloud-go/issues/2796)) ([e93132c](https://www.github.com/googleapis/google-cloud-go/commit/e93132c77725de3c80c34d566df269eabfcfde93)) +* **area120/tables:** start generating apiv1alpha1 ([#2807](https://www.github.com/googleapis/google-cloud-go/issues/2807)) ([9e5a4d0](https://www.github.com/googleapis/google-cloud-go/commit/9e5a4d0dee0d83be0c020797a2f579d9e42ef521)) +* **cloudbuild:** Start generating apiv1/v3 ([#2830](https://www.github.com/googleapis/google-cloud-go/issues/2830)) ([358a536](https://www.github.com/googleapis/google-cloud-go/commit/358a5368da64cf4868551652e852ceb453504f64)) +* **godocfx:** create Go DocFX YAML generator ([#2854](https://www.github.com/googleapis/google-cloud-go/issues/2854)) ([37c70ac](https://www.github.com/googleapis/google-cloud-go/commit/37c70acd91768567106ff3b2b130835998d974c5)) +* **security/privateca:** start generating apiv1beta1 ([#2806](https://www.github.com/googleapis/google-cloud-go/issues/2806)) ([f985141](https://www.github.com/googleapis/google-cloud-go/commit/f9851412183989dc69733a7e61ad39a9378cd893)) +* **video/transcoder:** start generating apiv1beta1 ([#2797](https://www.github.com/googleapis/google-cloud-go/issues/2797)) ([390dda8](https://www.github.com/googleapis/google-cloud-go/commit/390dda8ff2c526e325e434ad0aec778b7aa97ea4)) +* **workflows:** start generating apiv1beta ([#2799](https://www.github.com/googleapis/google-cloud-go/issues/2799)) ([0e39665](https://www.github.com/googleapis/google-cloud-go/commit/0e39665ccb788caec800e2887d433ca6e0cf9901)) +* **workflows/executions:** start generating apiv1beta ([#2800](https://www.github.com/googleapis/google-cloud-go/issues/2800)) ([7eaa0d1](https://www.github.com/googleapis/google-cloud-go/commit/7eaa0d184c6a2141d8bf4514b3fd20715b50a580)) + + +### Bug Fixes + +* **internal/kokoro:** install the right version of docuploader ([#2861](https://www.github.com/googleapis/google-cloud-go/issues/2861)) ([d8489c1](https://www.github.com/googleapis/google-cloud-go/commit/d8489c141b8b02e83d6426f4baebd3658ae11639)) +* **internal/kokoro:** remove extra dash in doc tarball ([#2862](https://www.github.com/googleapis/google-cloud-go/issues/2862)) ([690ddcc](https://www.github.com/googleapis/google-cloud-go/commit/690ddccc5202b5a70f1afa5c518dca37b6a0861c)) +* **profiler:** do not collect disabled profile types ([#2836](https://www.github.com/googleapis/google-cloud-go/issues/2836)) ([faeb498](https://www.github.com/googleapis/google-cloud-go/commit/faeb4985bf6afdcddba4553efa874642bf7f08ed)), refs [#2835](https://www.github.com/googleapis/google-cloud-go/issues/2835) + + +### Reverts + +* **cloudbuild): "feat(cloudbuild:** Start generating apiv1/v3" ([#2840](https://www.github.com/googleapis/google-cloud-go/issues/2840)) ([3aaf755](https://www.github.com/googleapis/google-cloud-go/commit/3aaf755476dfea1700986fc086f53fc1ab756557)) + +## [0.65.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.64.0...v0.65.0) (2020-08-27) + + +### Announcements + +The following changes will be included in an upcoming release and are not +included in this one. + +#### Default Deadlines + +By default, non-streaming methods, like Create or Get methods, will have a +default deadline applied to the context provided at call time, unless a context +deadline is already set. Streaming methods have no default deadline and will run +indefinitely, unless the context provided at call time contains a deadline. + +To opt-out of this behavior, set the environment variable +`GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE` to `true` prior to +initializing a client. This opt-out mechanism will be removed in a later +release, with a notice similar to this one ahead of its removal. + + +### Features + +* **all:** auto-regenerate gapics , refs [#2774](https://www.github.com/googleapis/google-cloud-go/issues/2774) [#2764](https://www.github.com/googleapis/google-cloud-go/issues/2764) + + +### Bug Fixes + +* **all:** correct minor typos ([#2756](https://www.github.com/googleapis/google-cloud-go/issues/2756)) ([03d78b5](https://www.github.com/googleapis/google-cloud-go/commit/03d78b5627819cb64d1f3866f90043f709e825e1)) +* **compute/metadata:** remove leading slash for Get suffix ([#2760](https://www.github.com/googleapis/google-cloud-go/issues/2760)) ([f0d605c](https://www.github.com/googleapis/google-cloud-go/commit/f0d605ccf32391a9da056a2c551158bd076c128d)) + +## [0.64.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.63.0...v0.64.0) (2020-08-18) + + +### Features + +* **all:** auto-regenerate gapics , refs [#2734](https://www.github.com/googleapis/google-cloud-go/issues/2734) [#2731](https://www.github.com/googleapis/google-cloud-go/issues/2731) [#2730](https://www.github.com/googleapis/google-cloud-go/issues/2730) [#2725](https://www.github.com/googleapis/google-cloud-go/issues/2725) [#2722](https://www.github.com/googleapis/google-cloud-go/issues/2722) [#2706](https://www.github.com/googleapis/google-cloud-go/issues/2706) +* **pubsublite:** start generating v1 ([#2700](https://www.github.com/googleapis/google-cloud-go/issues/2700)) ([d2e777f](https://www.github.com/googleapis/google-cloud-go/commit/d2e777f56e08146646b3ffb7a78856795094ab4e)) + +## [0.63.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.62.0...v0.63.0) (2020-08-05) + + +### Features + +* **all:** auto-regenerate gapics ([#2682](https://www.github.com/googleapis/google-cloud-go/issues/2682)) ([63bfd63](https://www.github.com/googleapis/google-cloud-go/commit/63bfd638da169e0f1f4fa4a5125da2955022dc04)) +* **analytics/admin:** start generating apiv1alpha ([#2670](https://www.github.com/googleapis/google-cloud-go/issues/2670)) ([268199e](https://www.github.com/googleapis/google-cloud-go/commit/268199e5350a64a83ecf198e0e0fa4863f00fa6c)) +* **functions/metadata:** Special-case marshaling ([#2669](https://www.github.com/googleapis/google-cloud-go/issues/2669)) ([d8d7fc6](https://www.github.com/googleapis/google-cloud-go/commit/d8d7fc66cbc42f79bec25fb0daaf53d926e3645b)) +* **gaming:** start generate apiv1 ([#2681](https://www.github.com/googleapis/google-cloud-go/issues/2681)) ([1adfd0a](https://www.github.com/googleapis/google-cloud-go/commit/1adfd0aed6b2c0e1dd0c575a5ec0f49388fa5601)) +* **internal/kokoro:** add script to test compatibility with samples ([#2637](https://www.github.com/googleapis/google-cloud-go/issues/2637)) ([f2aa76a](https://www.github.com/googleapis/google-cloud-go/commit/f2aa76a0058e86c1c33bb634d2c084b58f77ab32)) + +## v0.62.0 + +### Announcements + +- There was a breaking change to `cloud.google.com/go/dataproc/apiv1` that was + merged in [this PR](https://github.com/googleapis/google-cloud-go/pull/2606). + This fixed a broken API response for `DiagnoseCluster`. When polling on the + Long Running Operation(LRO), the API now returns + `(*dataprocpb.DiagnoseClusterResults, error)` whereas it only returned an + `error` before. + +### Changes + +- all: + - Updated all direct dependencies. + - Updated contributing guidelines to suggest allowing edits from maintainers. +- billing/budgets: + - Start generating client for apiv1beta1. +- functions: + - Start generating client for apiv1. +- notebooks: + - Start generating client apiv1beta1. +- profiler: + - update proftest to support parsing floating-point backoff durations. + - Fix the regexp used to parse backoff duration. +- Various updates to autogenerated clients. + +## v0.61.0 + +### Changes + +- all: + - Update all direct dependencies. +- dashboard: + - Start generating client for apiv1. +- policytroubleshooter: + - Start generating client for apiv1. +- profiler: + - Disable OpenCensus Telemetry for requests made by the profiler package by default. You can re-enable it using `profiler.Config.EnableOCTelemetry`. +- Various updates to autogenerated clients. + +## v0.60.0 + +### Changes + +- all: + - Refactored examples to reduce module dependencies. + - Update sub-modules to use cloud.google.com/go v0.59.0. +- internal: + - Start generating client for gaming apiv1beta. +- Various updates to autogenerated clients. + +## v0.59.0 + +### Announcements + +goolgeapis/google-cloud-go has moved its source of truth to GitHub and is no longer a mirror. This means that our +contributing process has changed a bit. We will now be conducting all code reviews on GitHub which means we now accept +pull requests! If you have a version of the codebase previously checked out you may wish to update your git remote to +point to GitHub. + +### Changes + +- all: + - Remove dependency on honnef.co/go/tools. + - Update our contributing instructions now that we use GitHub for reviews. + - Remove some un-inclusive terminology. +- compute/metadata: + - Pass cancelable context to DNS lookup. +- .github: + - Update templates issue/PR templates. +- internal: + - Bump several clients to GA. + - Fix GoDoc badge source. + - Several automation changes related to the move to GitHub. + - Start generating a client for asset v1p5beta1. +- Various updates to autogenerated clients. + +## v0.58.0 + +### Deprecation notice + +- `cloud.google.com/go/monitoring/apiv3` has been deprecated due to breaking + changes in the API. Please migrate to `cloud.google.com/go/monitoring/apiv3/v2`. + +### Changes + +- all: + - The remaining uses of gtransport.Dial have been removed. + - The `genproto` dependency has been updated to a version that makes use of + new `protoreflect` library. For more information on these protobuf changes + please see the following post from the official Go blog: + https://blog.golang.org/protobuf-apiv2. +- internal: + - Started generation of datastore admin v1 client. + - Updated protofuf version used for generation to 3.12.X. + - Update the release levels for several APIs. + - Generate clients with protoc-gen-go@v1.4.1. +- monitoring: + - Re-enable generation of monitoring/apiv3 under v2 directory (see deprecation + notice above). +- profiler: + - Fixed flakiness in tests. +- Various updates to autogenerated clients. + +## v0.57.0 + +- all: + - Update module dependency `google.golang.org/api` to `v0.21.0`. +- errorreporting: + - Add exported SetGoogleClientInfo wrappers to manual file. +- expr/v1alpha1: + - Deprecate client. This client will be removed in a future release. +- internal: + - Fix possible data race in TestTracer. + - Pin versions of tools used for generation. + - Correct the release levels for BigQuery APIs. + - Start generation osconfig v1. +- longrunning: + - Add exported SetGoogleClientInfo wrappers to manual file. +- monitoring: + - Stop generation of monitoring/apiv3 because of incoming breaking change. +- trace: + - Add exported SetGoogleClientInfo wrappers to manual file. +- Various updates to autogenerated clients. + +## v0.56.0 + +- secretmanager: + - add IAM helper +- profiler: + - try all us-west1 zones for integration tests +- internal: + - add config to generate webrisk v1 + - add repo and commit to buildcop invocation + - add recaptchaenterprise v1 generation config + - update microgenerator to v0.12.5 + - add datacatalog client + - start generating security center settings v1beta + - start generating osconfig agentendpoint v1 + - setup generation for bigquery/connection/v1beta1 +- all: + - increase continous testing timeout to 45m + - various updates to autogenerated clients. + +## v0.55.0 + +- Various updates to autogenerated clients. + +## v0.54.0 + +- all: + - remove unused golang.org/x/exp from mod file + - update godoc.org links to pkg.go.dev +- compute/metadata: + - use defaultClient when http.Client is nil + - remove subscribeClient +- iam: + - add support for v3 policy and IAM conditions +- Various updates to autogenerated clients. + +## v0.53.0 + +- all: most clients now use transport/grpc.DialPool rather than Dial (see #1777 for outliers). + - Connection pooling now does not use the deprecated (and soon to be removed) gRPC load balancer API. +- profiler: remove symbolization (drops support for go1.10) +- Various updates to autogenerated clients. + +## v0.52.0 + +- internal/gapicgen: multiple improvements related to library generation. +- compute/metadata: unset ResponseHeaderTimeout in defaultClient +- docs: fix link to KMS in README.md +- Various updates to autogenerated clients. + +## v0.51.0 + +- secretmanager: + - add IAM helper for generic resource IAM handle +- cloudbuild: + - migrate to microgen in a major version +- Various updates to autogenerated clients. + +## v0.50.0 + +- profiler: + - Support disabling CPU profile collection. + - Log when a profile creation attempt begins. +- compute/metadata: + - Fix panic on malformed URLs. + - InstanceName returns actual instance name. +- Various updates to autogenerated clients. + +## v0.49.0 + +- functions/metadata: + - Handle string resources in JSON unmarshaller. +- Various updates to autogenerated clients. + +## v0.48.0 + +- Various updates to autogenerated clients + +## v0.47.0 + +This release drops support for Go 1.9 and Go 1.10: we continue to officially +support Go 1.11, Go 1.12, and Go 1.13. + +- Various updates to autogenerated clients. +- Add cloudbuild/apiv1 client. + +## v0.46.3 + +This is an empty release that was created solely to aid in storage's module +carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. + +## v0.46.2 + +This is an empty release that was created solely to aid in spanner's module +carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. + +## v0.46.1 + +This is an empty release that was created solely to aid in firestore's module +carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. + +## v0.46.0 + +- spanner: + - Retry "Session not found" for read-only transactions. + - Retry aborted PDMLs. +- spanner/spannertest: + - Fix a bug that was causing 0X-prefixed number to be parsed incorrectly. +- storage: + - Add HMACKeyOptions. + - Remove *REGIONAL from StorageClass documentation. Using MULTI_REGIONAL, + DURABLE_REDUCED_AVAILABILITY, and REGIONAL are no longer best practice + StorageClasses but they are still acceptable values. +- trace: + - Remove cloud.google.com/go/trace. Package cloud.google.com/go/trace has been + marked OBSOLETE for several years: it is now no longer provided. If you + relied on this package, please vendor it or switch to using + https://cloud.google.com/trace/docs/setup/go (which obsoleted it). + +## v0.45.1 + +This is an empty release that was created solely to aid in pubsub's module +carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. + +## v0.45.0 + +- compute/metadata: + - Add Email method. +- storage: + - Fix duplicated retry logic. + - Add ReaderObjectAttrs.StartOffset. + - Support reading last N bytes of a file when a negative range is given, such + as `obj.NewRangeReader(ctx, -10, -1)`. + - Add HMACKey listing functionality. +- spanner/spannertest: + - Support primary keys with no columns. + - Fix MinInt64 parsing. + - Implement deletion of key ranges. + - Handle reads during a read-write transaction. + - Handle returning DATE values. +- pubsub: + - Fix Ack/Modack request size calculation. +- logging: + - Add auto-detection of monitored resources on GAE Standard. + +## v0.44.3 + +This is an empty release that was created solely to aid in bigtable's module +carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. + +## v0.44.2 + +This is an empty release that was created solely to aid in bigquery's module +carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. + +## v0.44.1 + +This is an empty release that was created solely to aid in datastore's module +carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. + +## v0.44.0 + +- datastore: + - Interface elements whose underlying types are supported, are now supported. + - Reduce time to initial retry from 1s to 100ms. +- firestore: + - Add Increment transformation. +- storage: + - Allow emulator with STORAGE_EMULATOR_HOST. + - Add methods for HMAC key management. +- pubsub: + - Add PublishCount and PublishLatency measurements. + - Add DefaultPublishViews and DefaultSubscribeViews for convenience of + importing all views. + - Add add Subscription.PushConfig.AuthenticationMethod. +- spanner: + - Allow emulator usage with SPANNER_EMULATOR_HOST. + - Add cloud.google.com/go/spanner/spannertest, a spanner emulator. + - Add cloud.google.com/go/spanner/spansql which contains types and a parser + for the Cloud Spanner SQL dialect. +- asset: + - Add apiv1p2beta1 client. + +## v0.43.0 + +This is an empty release that was created solely to aid in logging's module +carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. + +## v0.42.0 + +- bigtable: + - Add an admin method to update an instance and clusters. + - Fix bttest regex matching behavior for alternations (things like `|a`). + - Expose BlockAllFilter filter. +- bigquery: + - Add Routines API support. +- storage: + - Add read-only Bucket.LocationType. +- logging: + - Add TraceSampled to Entry. + - Fix to properly extract {Trace, Span}Id from X-Cloud-Trace-Context. +- pubsub: + - Add Cloud Key Management to TopicConfig. + - Change ExpirationPolicy to optional.Duration. +- automl: + - Add apiv1beta1 client. +- iam: + - Fix compilation problem with iam/credentials/apiv1. + +## v0.41.0 + +- bigtable: + - Check results from PredicateFilter in bttest, which fixes certain false matches. +- profiler: + - debugLog checks user defined logging options before logging. +- spanner: + - PartitionedUpdates respect query parameters. + - StartInstance allows specifying cloud API access scopes. +- bigquery: + - Use empty slice instead of nil for ValueSaver, fixing an issue with zero-length, repeated, nested fields causing panics. +- firestore: + - Return same number of snapshots as doc refs (in the form of duplicate records) during GetAll. +- replay: + - Change references to IPv4 addresses to localhost, making replay compatible with IPv6. + +## v0.40.0 + +- all: + - Update to protobuf-golang v1.3.1. +- datastore: + - Attempt to decode GAE-encoded keys if initial decoding attempt fails. + - Support integer time conversion. +- pubsub: + - Add PublishSettings.BundlerByteLimit. If users receive pubsub.ErrOverflow, + this value should be adjusted higher. + - Use IPv6 compatible target in testutil. +- bigtable: + - Fix Latin-1 regexp filters in bttest, allowing \C. + - Expose PassAllFilter. +- profiler: + - Add log messages for slow path in start. + - Fix start to allow retry until success. +- firestore: + - Add admin client. +- containeranalysis: + - Add apiv1 client. +- grafeas: + - Add apiv1 client. + +## 0.39.0 + +- bigtable: + - Implement DeleteInstance in bttest. + - Return an error on invalid ReadRowsRequest.RowRange key ranges in bttest. +- bigquery: + - Move RequirePartitionFilter outside of TimePartioning. + - Expose models API. +- firestore: + - Allow array values in create and update calls. + - Add CollectionGroup method. +- pubsub: + - Add ExpirationPolicy to Subscription. +- storage: + - Add V4 signing. +- rpcreplay: + - Match streams by first sent request. This further improves rpcreplay's + ability to distinguish streams. +- httpreplay: + - Set up Man-In-The-Middle config only once. This should improve proxy + creation when multiple proxies are used in a single process. + - Remove error on empty Content-Type, allowing requests with no Content-Type + header but a non-empty body. +- all: + - Fix an edge case bug in auto-generated library pagination by properly + propagating pagetoken. + +## 0.38.0 + +This update includes a substantial reduction in our transitive dependency list +by way of updating to opencensus@v0.21.0. + +- spanner: + - Error implements GRPCStatus, allowing status.Convert. +- bigtable: + - Fix a bug in bttest that prevents single column queries returning results + that match other filters. + - Remove verbose retry logging. +- logging: + - Ensure RequestUrl has proper UTF-8, removing the need for users to wrap and + rune replace manually. +- recaptchaenterprise: + - Add v1beta1 client. +- phishingprotection: + - Add v1beta1 client. + +## 0.37.4 + +This patch releases re-builds the go.sum. This was not possible in the +previous release. + +- firestore: + - Add sentinel value DetectProjectID for auto-detecting project ID. + - Add OpenCensus tracing for public methods. + - Marked stable. All future changes come with a backwards compatibility + guarantee. + - Removed firestore/apiv1beta1. All users relying on this low-level library + should migrate to firestore/apiv1. Note that most users should use the + high-level firestore package instead. +- pubsub: + - Allow large messages in synchronous pull case. + - Cap bundler byte limit. This should prevent OOM conditions when there are + a very large number of message publishes occurring. +- storage: + - Add ETag to BucketAttrs and ObjectAttrs. +- datastore: + - Removed some non-sensical OpenCensus traces. +- webrisk: + - Add v1 client. +- asset: + - Add v1 client. +- cloudtasks: + - Add v2 client. + +## 0.37.3 + +This patch release removes github.com/golang/lint from the transitive +dependency list, resolving `go get -u` problems. + +Note: this release intentionally has a broken go.sum. Please use v0.37.4. + +## 0.37.2 + +This patch release is mostly intended to bring in v0.3.0 of +google.golang.org/api, which fixes a GCF deployment issue. + +Note: we had to-date accidentally marked Redis as stable. In this release, we've +fixed it by downgrading its documentation to alpha, as it is in other languages +and docs. + +- all: + - Document context in generated libraries. + +## 0.37.1 + +Small go.mod version bumps to bring in v0.2.0 of google.golang.org/api, which +introduces a new oauth2 url. + +## 0.37.0 + +- spanner: + - Add BatchDML method. + - Reduced initial time between retries. +- bigquery: + - Produce better error messages for InferSchema. + - Add logical type control for avro loads. + - Add support for the GEOGRAPHY type. +- datastore: + - Add sentinel value DetectProjectID for auto-detecting project ID. + - Allow flatten tag on struct pointers. + - Fixed a bug that caused queries to panic with invalid queries. Instead they + will now return an error. +- profiler: + - Add ability to override GCE zone and instance. +- pubsub: + - BEHAVIOR CHANGE: Refactor error code retry logic. RPCs should now more + consistently retry specific error codes based on whether they're idempotent + or non-idempotent. +- httpreplay: Fixed a bug when a non-GET request had a zero-length body causing + the Content-Length header to be dropped. +- iot: + - Add new apiv1 client. +- securitycenter: + - Add new apiv1 client. +- cloudscheduler: + - Add new apiv1 client. + +## 0.36.0 + +- spanner: + - Reduce minimum retry backoff from 1s to 100ms. This makes time between + retries much faster and should improve latency. +- storage: + - Add support for Bucket Policy Only. +- kms: + - Add ResourceIAM helper method. + - Deprecate KeyRingIAM and CryptoKeyIAM. Please use ResourceIAM. +- firestore: + - Switch from v1beta1 API to v1 API. + - Allow emulator with FIRESTORE_EMULATOR_HOST. +- bigquery: + - Add NumLongTermBytes to Table. + - Add TotalBytesProcessedAccuracy to QueryStatistics. +- irm: + - Add new v1alpha2 client. +- talent: + - Add new v4beta1 client. +- rpcreplay: + - Fix connection to work with grpc >= 1.17. + - It is now required for an actual gRPC server to be running for Dial to + succeed. + +## 0.35.1 + +- spanner: + - Adds OpenCensus views back to public API. + +## v0.35.0 + +- all: + - Add go.mod and go.sum. + - Switch usage of gax-go to gax-go/v2. +- bigquery: + - Fix bug where time partitioning could not be removed from a table. + - Fix panic that occurred with empty query parameters. +- bttest: + - Fix bug where deleted rows were returned by ReadRows. +- bigtable/emulator: + - Configure max message size to 256 MiB. +- firestore: + - Allow non-transactional queries in transactions. + - Allow StartAt/EndBefore on direct children at any depth. + - QuerySnapshotIterator.Stop may be called in an error state. + - Fix bug the prevented reset of transaction write state in between retries. +- functions/metadata: + - Make Metadata.Resource a pointer. +- logging: + - Make SpanID available in logging.Entry. +- metadata: + - Wrap !200 error code in a typed err. +- profiler: + - Add function to check if function name is within a particular file in the + profile. + - Set parent field in create profile request. + - Return kubernetes client to start cluster, so client can be used to poll + cluster. + - Add function for checking if filename is in profile. +- pubsub: + - Fix bug where messages expired without an initial modack in + synchronous=true mode. + - Receive does not retry ResourceExhausted errors. +- spanner: + - client.Close now cancels existing requests and should be much faster for + large amounts of sessions. + - Correctly allow MinOpened sessions to be spun up. + +## v0.34.0 + +- functions/metadata: + - Switch to using JSON in context. + - Make Resource a value. +- vision: Fix ProductSearch return type. +- datastore: Add an example for how to handle MultiError. + +## v0.33.1 + +- compute: Removes an erroneously added go.mod. +- logging: Populate source location in fromLogEntry. + +## v0.33.0 + +- bttest: + - Add support for apply_label_transformer. +- expr: + - Add expr library. +- firestore: + - Support retrieval of missing documents. +- kms: + - Add IAM methods. +- pubsub: + - Clarify extension documentation. +- scheduler: + - Add v1beta1 client. +- vision: + - Add product search helper. + - Add new product search client. + +## v0.32.0 + +Note: This release is the last to support Go 1.6 and 1.8. + +- bigquery: + - Add support for removing an expiration. + - Ignore NeverExpire in Table.Create. + - Validate table expiration time. +- cbt: + - Add note about not supporting arbitrary bytes. +- datastore: + - Align key checks. +- firestore: + - Return an error when using Start/End without providing values. +- pubsub: + - Add pstest Close method. + - Clarify MaxExtension documentation. +- securitycenter: + - Add v1beta1 client. +- spanner: + - Allow nil in mutations. + - Improve doc of SessionPoolConfig.MaxOpened. + - Increase session deletion timeout from 5s to 15s. + +## v0.31.0 + +- bigtable: + - Group mutations across multiple requests. +- bigquery: + - Link to bigquery troubleshooting errors page in bigquery.Error comment. +- cbt: + - Fix go generate command. + - Document usage of both maxage + maxversions. +- datastore: + - Passing nil keys results in ErrInvalidKey. +- firestore: + - Clarify what Document.DataTo does with untouched struct fields. +- profile: + - Validate service name in agent. +- pubsub: + - Fix deadlock with pstest and ctx.Cancel. + - Fix a possible deadlock in pstest. +- trace: + - Update doc URL with new fragment. + +Special thanks to @fastest963 for going above and beyond helping us to debug +hard-to-reproduce Pub/Sub issues. + +## v0.30.0 + +- spanner: DML support added. See https://godoc.org/cloud.google.com/go/spanner#hdr-DML_and_Partitioned_DML for more information. +- bigtable: bttest supports row sample filter. +- functions: metadata package added for accessing Cloud Functions resource metadata. + +## v0.29.0 + +- bigtable: + - Add retry to all idempotent RPCs. + - cbt supports complex GC policies. + - Emulator supports arbitrary bytes in regex filters. +- firestore: Add ArrayUnion and ArrayRemove. +- logging: Add the ContextFunc option to supply the context used for + asynchronous RPCs. +- profiler: Ignore NotDefinedError when fetching the instance name +- pubsub: + - BEHAVIOR CHANGE: Receive doesn't retry if an RPC returns codes.Cancelled. + - BEHAVIOR CHANGE: Receive retries on Unavailable intead of returning. + - Fix deadlock. + - Restore Ack/Nack/Modacks metrics. + - Improve context handling in iterator. + - Implement synchronous mode for Receive. + - pstest: add Pull. +- spanner: Add a metric for the number of sessions currently opened. +- storage: + - Canceling the context releases all resources. + - Add additional RetentionPolicy attributes. +- vision/apiv1: Add LocalizeObjects method. + +## v0.28.0 + +- bigtable: + - Emulator returns Unimplemented for snapshot RPCs. +- bigquery: + - Support zero-length repeated, nested fields. +- cloud assets: + - Add v1beta client. +- datastore: + - Don't nil out transaction ID on retry. +- firestore: + - BREAKING CHANGE: When watching a query with Query.Snapshots, QuerySnapshotIterator.Next + returns a QuerySnapshot which contains read time, result size, change list and the DocumentIterator + (previously, QuerySnapshotIterator.Next returned just the DocumentIterator). See: https://godoc.org/cloud.google.com/go/firestore#Query.Snapshots. + - Add array-contains operator. +- IAM: + - Add iam/credentials/apiv1 client. +- pubsub: + - Canceling the context passed to Subscription.Receive causes Receive to return when + processing finishes on all messages currently in progress, even if new messages are arriving. +- redis: + - Add redis/apiv1 client. +- storage: + - Add Reader.Attrs. + - Deprecate several Reader getter methods: please use Reader.Attrs for these instead. + - Add ObjectHandle.Bucket and ObjectHandle.Object methods. + +## v0.27.0 + +- bigquery: + - Allow modification of encryption configuration and partitioning options to a table via the Update call. + - Add a SchemaFromJSON function that converts a JSON table schema. +- bigtable: + - Restore cbt count functionality. +- containeranalysis: + - Add v1beta client. +- spanner: + - Fix a case where an iterator might not be closed correctly. +- storage: + - Add ServiceAccount method https://godoc.org/cloud.google.com/go/storage#Client.ServiceAccount. + - Add a method to Reader that returns the parsed value of the Last-Modified header. + +## v0.26.0 + +- bigquery: + - Support filtering listed jobs by min/max creation time. + - Support data clustering (https://godoc.org/cloud.google.com/go/bigquery#Clustering). + - Include job creator email in Job struct. +- bigtable: + - Add `RowSampleFilter`. + - emulator: BREAKING BEHAVIOR CHANGE: Regexps in row, family, column and value filters + must match the entire target string to succeed. Previously, the emulator was + succeeding on partial matches. + NOTE: As of this release, this change only affects the emulator when run + from this repo (bigtable/cmd/emulator/cbtemulator.go). The version launched + from `gcloud` will be updated in a subsequent `gcloud` release. +- dataproc: Add apiv1beta2 client. +- datastore: Save non-nil pointer fields on omitempty. +- logging: populate Entry.Trace from the HTTP X-Cloud-Trace-Context header. +- logging/logadmin: Support writer_identity and include_children. +- pubsub: + - Support labels on topics and subscriptions. + - Support message storage policy for topics. + - Use the distribution of ack times to determine when to extend ack deadlines. + The only user-visible effect of this change should be that programs that + call only `Subscription.Receive` need no IAM permissions other than `Pub/Sub + Subscriber`. +- storage: + - Support predefined ACLs. + - Support additional ACL fields other than Entity and Role. + - Support bucket websites. + - Support bucket logging. + + +## v0.25.0 + +- Added [Code of Conduct](https://github.com/googleapis/google-cloud-go/blob/master/CODE_OF_CONDUCT.md) +- bigtable: + - cbt: Support a GC policy of "never". +- errorreporting: + - Support User. + - Close now calls Flush. + - Use OnError (previously ignored). + - Pass through the RPC error as-is to OnError. +- httpreplay: A tool for recording and replaying HTTP requests + (for the bigquery and storage clients in this repo). +- kms: v1 client added +- logging: add SourceLocation to Entry. +- storage: improve CRC checking on read. + +## v0.24.0 + +- bigquery: Support for the NUMERIC type. +- bigtable: + - cbt: Optionally specify columns for read/lookup + - Support instance-level administration. +- oslogin: New client for the OS Login API. +- pubsub: + - The package is now stable. There will be no further breaking changes. + - Internal changes to improve Subscription.Receive behavior. +- storage: Support updating bucket lifecycle config. +- spanner: Support struct-typed parameter bindings. +- texttospeech: New client for the Text-to-Speech API. + +## v0.23.0 + +- bigquery: Add DDL stats to query statistics. +- bigtable: + - cbt: Add cells-per-column limit for row lookup. + - cbt: Make it possible to combine read filters. +- dlp: v2beta2 client removed. Use the v2 client instead. +- firestore, spanner: Fix compilation errors due to protobuf changes. + +## v0.22.0 + +- bigtable: + - cbt: Support cells per column limit for row read. + - bttest: Correctly handle empty RowSet. + - Fix ReadModifyWrite operation in emulator. + - Fix API path in GetCluster. + +- bigquery: + - BEHAVIOR CHANGE: Retry on 503 status code. + - Add dataset.DeleteWithContents. + - Add SchemaUpdateOptions for query jobs. + - Add Timeline to QueryStatistics. + - Add more stats to ExplainQueryStage. + - Support Parquet data format. + +- datastore: + - Support omitempty for times. + +- dlp: + - **BREAKING CHANGE:** Remove v1beta1 client. Please migrate to the v2 client, + which is now out of beta. + - Add v2 client. + +- firestore: + - BEHAVIOR CHANGE: Treat set({}, MergeAll) as valid. + +- iam: + - Support JWT signing via SignJwt callopt. + +- profiler: + - BEHAVIOR CHANGE: PollForSerialOutput returns an error when context.Done. + - BEHAVIOR CHANGE: Increase the initial backoff to 1 minute. + - Avoid returning empty serial port output. + +- pubsub: + - BEHAVIOR CHANGE: Don't backoff during next retryable error once stream is healthy. + - BEHAVIOR CHANGE: Don't backoff on EOF. + - pstest: Support Acknowledge and ModifyAckDeadline RPCs. + +- redis: + - Add v1 beta Redis client. + +- spanner: + - Support SessionLabels. + +- speech: + - Add api v1 beta1 client. + +- storage: + - BEHAVIOR CHANGE: Retry reads when retryable error occurs. + - Fix delete of object in requester-pays bucket. + - Support KMS integration. + +## v0.21.0 + +- bigquery: + - Add OpenCensus tracing. + +- firestore: + - **BREAKING CHANGE:** If a document does not exist, return a DocumentSnapshot + whose Exists method returns false. DocumentRef.Get and Transaction.Get + return the non-nil DocumentSnapshot in addition to a NotFound error. + **DocumentRef.GetAll and Transaction.GetAll return a non-nil + DocumentSnapshot instead of nil.** + - Add DocumentIterator.Stop. **Call Stop whenever you are done with a + DocumentIterator.** + - Added Query.Snapshots and DocumentRef.Snapshots, which provide realtime + notification of updates. See https://cloud.google.com/firestore/docs/query-data/listen. + - Canceling an RPC now always returns a grpc.Status with codes.Canceled. + +- spanner: + - Add `CommitTimestamp`, which supports inserting the commit timestamp of a + transaction into a column. + +## v0.20.0 + +- bigquery: Support SchemaUpdateOptions for load jobs. + +- bigtable: + - Add SampleRowKeys. + - cbt: Support union, intersection GCPolicy. + - Retry admin RPCS. + - Add trace spans to retries. + +- datastore: Add OpenCensus tracing. + +- firestore: + - Fix queries involving Null and NaN. + - Allow Timestamp protobuffers for time values. + +- logging: Add a WriteTimeout option. + +- spanner: Support Batch API. + +- storage: Add OpenCensus tracing. + +## v0.19.0 + +- bigquery: + - Support customer-managed encryption keys. + +- bigtable: + - Improved emulator support. + - Support GetCluster. + +- datastore: + - Add general mutations. + - Support pointer struct fields. + - Support transaction options. + +- firestore: + - Add Transaction.GetAll. + - Support document cursors. + +- logging: + - Support concurrent RPCs to the service. + - Support per-entry resources. + +- profiler: + - Add config options to disable heap and thread profiling. + - Read the project ID from $GOOGLE_CLOUD_PROJECT when it's set. + +- pubsub: + - BEHAVIOR CHANGE: Release flow control after ack/nack (instead of after the + callback returns). + - Add SubscriptionInProject. + - Add OpenCensus instrumentation for streaming pull. + +- storage: + - Support CORS. + +## v0.18.0 + +- bigquery: + - Marked stable. + - Schema inference of nullable fields supported. + - Added TimePartitioning to QueryConfig. + +- firestore: Data provided to DocumentRef.Set with a Merge option can contain + Delete sentinels. + +- logging: Clients can accept parent resources other than projects. + +- pubsub: + - pubsub/pstest: A lighweight fake for pubsub. Experimental; feedback welcome. + - Support updating more subscription metadata: AckDeadline, + RetainAckedMessages and RetentionDuration. + +- oslogin/apiv1beta: New client for the Cloud OS Login API. + +- rpcreplay: A package for recording and replaying gRPC traffic. + +- spanner: + - Add a ReadWithOptions that supports a row limit, as well as an index. + - Support query plan and execution statistics. + - Added [OpenCensus](http://opencensus.io) support. + +- storage: Clarify checksum validation for gzipped files (it is not validated + when the file is served uncompressed). + + +## v0.17.0 + +- firestore BREAKING CHANGES: + - Remove UpdateMap and UpdateStruct; rename UpdatePaths to Update. + Change + `docref.UpdateMap(ctx, map[string]interface{}{"a.b", 1})` + to + `docref.Update(ctx, []firestore.Update{{Path: "a.b", Value: 1}})` + + Change + `docref.UpdateStruct(ctx, []string{"Field"}, aStruct)` + to + `docref.Update(ctx, []firestore.Update{{Path: "Field", Value: aStruct.Field}})` + - Rename MergePaths to Merge; require args to be FieldPaths + - A value stored as an integer can be read into a floating-point field, and vice versa. +- bigtable/cmd/cbt: + - Support deleting a column. + - Add regex option for row read. +- spanner: Mark stable. +- storage: + - Add Reader.ContentEncoding method. + - Fix handling of SignedURL headers. +- bigquery: + - If Uploader.Put is called with no rows, it returns nil without making a + call. + - Schema inference supports the "nullable" option in struct tags for + non-required fields. + - TimePartitioning supports "Field". + + +## v0.16.0 + +- Other bigquery changes: + - `JobIterator.Next` returns `*Job`; removed `JobInfo` (BREAKING CHANGE). + - UseStandardSQL is deprecated; set UseLegacySQL to true if you need + Legacy SQL. + - Uploader.Put will generate a random insert ID if you do not provide one. + - Support time partitioning for load jobs. + - Support dry-run queries. + - A `Job` remembers its last retrieved status. + - Support retrieving job configuration. + - Support labels for jobs and tables. + - Support dataset access lists. + - Improve support for external data sources, including data from Bigtable and + Google Sheets, and tables with external data. + - Support updating a table's view configuration. + - Fix uploading civil times with nanoseconds. + +- storage: + - Support PubSub notifications. + - Support Requester Pays buckets. + +- profiler: Support goroutine and mutex profile types. + +## v0.15.0 + +- firestore: beta release. See the + [announcement](https://firebase.googleblog.com/2017/10/introducing-cloud-firestore.html). + +- errorreporting: The existing package has been redesigned. + +- errors: This package has been removed. Use errorreporting. + + +## v0.14.0 + +- bigquery BREAKING CHANGES: + - Standard SQL is the default for queries and views. + - `Table.Create` takes `TableMetadata` as a second argument, instead of + options. + - `Dataset.Create` takes `DatasetMetadata` as a second argument. + - `DatasetMetadata` field `ID` renamed to `FullID` + - `TableMetadata` field `ID` renamed to `FullID` + +- Other bigquery changes: + - The client will append a random suffix to a provided job ID if you set + `AddJobIDSuffix` to true in a job config. + - Listing jobs is supported. + - Better retry logic. + +- vision, language, speech: clients are now stable + +- monitoring: client is now beta + +- profiler: + - Rename InstanceName to Instance, ZoneName to Zone + - Auto-detect service name and version on AppEngine. + +## v0.13.0 + +- bigquery: UseLegacySQL options for CreateTable and QueryConfig. Use these + options to continue using Legacy SQL after the client switches its default + to Standard SQL. + +- bigquery: Support for updating dataset labels. + +- bigquery: Set DatasetIterator.ProjectID to list datasets in a project other + than the client's. DatasetsInProject is no longer needed and is deprecated. + +- bigtable: Fail ListInstances when any zones fail. + +- spanner: support decoding of slices of basic types (e.g. []string, []int64, + etc.) + +- logging/logadmin: UpdateSink no longer creates a sink if it is missing + (actually a change to the underlying service, not the client) + +- profiler: Service and ServiceVersion replace Target in Config. + +## v0.12.0 + +- pubsub: Subscription.Receive now uses streaming pull. + +- pubsub: add Client.TopicInProject to access topics in a different project + than the client. + +- errors: renamed errorreporting. The errors package will be removed shortly. + +- datastore: improved retry behavior. + +- bigquery: support updates to dataset metadata, with etags. + +- bigquery: add etag support to Table.Update (BREAKING: etag argument added). + +- bigquery: generate all job IDs on the client. + +- storage: support bucket lifecycle configurations. + + +## v0.11.0 + +- Clients for spanner, pubsub and video are now in beta. + +- New client for DLP. + +- spanner: performance and testing improvements. + +- storage: requester-pays buckets are supported. + +- storage, profiler, bigtable, bigquery: bug fixes and other minor improvements. + +- pubsub: bug fixes and other minor improvements + +## v0.10.0 + +- pubsub: Subscription.ModifyPushConfig replaced with Subscription.Update. + +- pubsub: Subscription.Receive now runs concurrently for higher throughput. + +- vision: cloud.google.com/go/vision is deprecated. Use +cloud.google.com/go/vision/apiv1 instead. + +- translation: now stable. + +- trace: several changes to the surface. See the link below. + +### Code changes required from v0.9.0 + +- pubsub: Replace + + ``` + sub.ModifyPushConfig(ctx, pubsub.PushConfig{Endpoint: "https://example.com/push"}) + ``` + + with + + ``` + sub.Update(ctx, pubsub.SubscriptionConfigToUpdate{ + PushConfig: &pubsub.PushConfig{Endpoint: "https://example.com/push"}, + }) + ``` + +- trace: traceGRPCServerInterceptor will be provided from *trace.Client. +Given an initialized `*trace.Client` named `tc`, instead of + + ``` + s := grpc.NewServer(grpc.UnaryInterceptor(trace.GRPCServerInterceptor(tc))) + ``` + + write + + ``` + s := grpc.NewServer(grpc.UnaryInterceptor(tc.GRPCServerInterceptor())) + ``` + +- trace trace.GRPCClientInterceptor will also provided from *trace.Client. +Instead of + + ``` + conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(trace.GRPCClientInterceptor())) + ``` + + write + + ``` + conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor())) + ``` + +- trace: We removed the deprecated `trace.EnableGRPCTracing`. Use the gRPC +interceptor as a dial option as shown below when initializing Cloud package +clients: + + ``` + c, err := pubsub.NewClient(ctx, "project-id", option.WithGRPCDialOption(grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor()))) + if err != nil { + ... + } + ``` + + +## v0.9.0 + +- Breaking changes to some autogenerated clients. +- rpcreplay package added. + +## v0.8.0 + +- profiler package added. +- storage: + - Retry Objects.Insert call. + - Add ProgressFunc to WRiter. +- pubsub: breaking changes: + - Publish is now asynchronous ([announcement](https://groups.google.com/d/topic/google-api-go-announce/aaqRDIQ3rvU/discussion)). + - Subscription.Pull replaced by Subscription.Receive, which takes a callback ([announcement](https://groups.google.com/d/topic/google-api-go-announce/8pt6oetAdKc/discussion)). + - Message.Done replaced with Message.Ack and Message.Nack. + +## v0.7.0 + +- Release of a client library for Spanner. See +the +[blog +post](https://cloudplatform.googleblog.com/2017/02/introducing-Cloud-Spanner-a-global-database-service-for-mission-critical-applications.html). +Note that although the Spanner service is beta, the Go client library is alpha. + +## v0.6.0 + +- Beta release of BigQuery, DataStore, Logging and Storage. See the +[blog post](https://cloudplatform.googleblog.com/2016/12/announcing-new-google-cloud-client.html). + +- bigquery: + - struct support. Read a row directly into a struct with +`RowIterator.Next`, and upload a row directly from a struct with `Uploader.Put`. +You can also use field tags. See the [package documentation][cloud-bigquery-ref] +for details. + + - The `ValueList` type was removed. It is no longer necessary. Instead of + ```go + var v ValueList + ... it.Next(&v) .. + ``` + use + + ```go + var v []Value + ... it.Next(&v) ... + ``` + + - Previously, repeatedly calling `RowIterator.Next` on the same `[]Value` or + `ValueList` would append to the slice. Now each call resets the size to zero first. + + - Schema inference will infer the SQL type BYTES for a struct field of + type []byte. Previously it inferred STRING. + + - The types `uint`, `uint64` and `uintptr` are no longer supported in schema + inference. BigQuery's integer type is INT64, and those types may hold values + that are not correctly represented in a 64-bit signed integer. + +## v0.5.0 + +- bigquery: + - The SQL types DATE, TIME and DATETIME are now supported. They correspond to + the `Date`, `Time` and `DateTime` types in the new `cloud.google.com/go/civil` + package. + - Support for query parameters. + - Support deleting a dataset. + - Values from INTEGER columns will now be returned as int64, not int. This + will avoid errors arising from large values on 32-bit systems. +- datastore: + - Nested Go structs encoded as Entity values, instead of a +flattened list of the embedded struct's fields. This means that you may now have twice-nested slices, eg. + ```go + type State struct { + Cities []struct{ + Populations []int + } + } + ``` + See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/79jtrdeuJAg) for +more details. + - Contexts no longer hold namespaces; instead you must set a key's namespace + explicitly. Also, key functions have been changed and renamed. + - The WithNamespace function has been removed. To specify a namespace in a Query, use the Query.Namespace method: + ```go + q := datastore.NewQuery("Kind").Namespace("ns") + ``` + - All the fields of Key are exported. That means you can construct any Key with a struct literal: + ```go + k := &Key{Kind: "Kind", ID: 37, Namespace: "ns"} + ``` + - As a result of the above, the Key methods Kind, ID, d.Name, Parent, SetParent and Namespace have been removed. + - `NewIncompleteKey` has been removed, replaced by `IncompleteKey`. Replace + ```go + NewIncompleteKey(ctx, kind, parent) + ``` + with + ```go + IncompleteKey(kind, parent) + ``` + and if you do use namespaces, make sure you set the namespace on the returned key. + - `NewKey` has been removed, replaced by `NameKey` and `IDKey`. Replace + ```go + NewKey(ctx, kind, name, 0, parent) + NewKey(ctx, kind, "", id, parent) + ``` + with + ```go + NameKey(kind, name, parent) + IDKey(kind, id, parent) + ``` + and if you do use namespaces, make sure you set the namespace on the returned key. + - The `Done` variable has been removed. Replace `datastore.Done` with `iterator.Done`, from the package `google.golang.org/api/iterator`. + - The `Client.Close` method will have a return type of error. It will return the result of closing the underlying gRPC connection. + - See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/hqXtM_4Ix-0) for +more details. + +## v0.4.0 + +- bigquery: + -`NewGCSReference` is now a function, not a method on `Client`. + - `Table.LoaderFrom` now accepts a `ReaderSource`, enabling + loading data into a table from a file or any `io.Reader`. + * Client.Table and Client.OpenTable have been removed. + Replace + ```go + client.OpenTable("project", "dataset", "table") + ``` + with + ```go + client.DatasetInProject("project", "dataset").Table("table") + ``` + + * Client.CreateTable has been removed. + Replace + ```go + client.CreateTable(ctx, "project", "dataset", "table") + ``` + with + ```go + client.DatasetInProject("project", "dataset").Table("table").Create(ctx) + ``` + + * Dataset.ListTables have been replaced with Dataset.Tables. + Replace + ```go + tables, err := ds.ListTables(ctx) + ``` + with + ```go + it := ds.Tables(ctx) + for { + table, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: use table. + } + ``` + + * Client.Read has been replaced with Job.Read, Table.Read and Query.Read. + Replace + ```go + it, err := client.Read(ctx, job) + ``` + with + ```go + it, err := job.Read(ctx) + ``` + and similarly for reading from tables or queries. + + * The iterator returned from the Read methods is now named RowIterator. Its + behavior is closer to the other iterators in these libraries. It no longer + supports the Schema method; see the next item. + Replace + ```go + for it.Next(ctx) { + var vals ValueList + if err := it.Get(&vals); err != nil { + // TODO: Handle error. + } + // TODO: use vals. + } + if err := it.Err(); err != nil { + // TODO: Handle error. + } + ``` + with + ``` + for { + var vals ValueList + err := it.Next(&vals) + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: use vals. + } + ``` + Instead of the `RecordsPerRequest(n)` option, write + ```go + it.PageInfo().MaxSize = n + ``` + Instead of the `StartIndex(i)` option, write + ```go + it.StartIndex = i + ``` + + * ValueLoader.Load now takes a Schema in addition to a slice of Values. + Replace + ```go + func (vl *myValueLoader) Load(v []bigquery.Value) + ``` + with + ```go + func (vl *myValueLoader) Load(v []bigquery.Value, s bigquery.Schema) + ``` + + + * Table.Patch is replace by Table.Update. + Replace + ```go + p := table.Patch() + p.Description("new description") + metadata, err := p.Apply(ctx) + ``` + with + ```go + metadata, err := table.Update(ctx, bigquery.TableMetadataToUpdate{ + Description: "new description", + }) + ``` + + * Client.Copy is replaced by separate methods for each of its four functions. + All options have been replaced by struct fields. + + * To load data from Google Cloud Storage into a table, use Table.LoaderFrom. + + Replace + ```go + client.Copy(ctx, table, gcsRef) + ``` + with + ```go + table.LoaderFrom(gcsRef).Run(ctx) + ``` + Instead of passing options to Copy, set fields on the Loader: + ```go + loader := table.LoaderFrom(gcsRef) + loader.WriteDisposition = bigquery.WriteTruncate + ``` + + * To extract data from a table into Google Cloud Storage, use + Table.ExtractorTo. Set fields on the returned Extractor instead of + passing options. + + Replace + ```go + client.Copy(ctx, gcsRef, table) + ``` + with + ```go + table.ExtractorTo(gcsRef).Run(ctx) + ``` + + * To copy data into a table from one or more other tables, use + Table.CopierFrom. Set fields on the returned Copier instead of passing options. + + Replace + ```go + client.Copy(ctx, dstTable, srcTable) + ``` + with + ```go + dst.Table.CopierFrom(srcTable).Run(ctx) + ``` + + * To start a query job, create a Query and call its Run method. Set fields + on the query instead of passing options. + + Replace + ```go + client.Copy(ctx, table, query) + ``` + with + ```go + query.Run(ctx) + ``` + + * Table.NewUploader has been renamed to Table.Uploader. Instead of options, + configure an Uploader by setting its fields. + Replace + ```go + u := table.NewUploader(bigquery.UploadIgnoreUnknownValues()) + ``` + with + ```go + u := table.NewUploader(bigquery.UploadIgnoreUnknownValues()) + u.IgnoreUnknownValues = true + ``` + +- pubsub: remove `pubsub.Done`. Use `iterator.Done` instead, where `iterator` is the package +`google.golang.org/api/iterator`. + +## v0.3.0 + +- storage: + * AdminClient replaced by methods on Client. + Replace + ```go + adminClient.CreateBucket(ctx, bucketName, attrs) + ``` + with + ```go + client.Bucket(bucketName).Create(ctx, projectID, attrs) + ``` + + * BucketHandle.List replaced by BucketHandle.Objects. + Replace + ```go + for query != nil { + objs, err := bucket.List(d.ctx, query) + if err != nil { ... } + query = objs.Next + for _, obj := range objs.Results { + fmt.Println(obj) + } + } + ``` + with + ```go + iter := bucket.Objects(d.ctx, query) + for { + obj, err := iter.Next() + if err == iterator.Done { + break + } + if err != nil { ... } + fmt.Println(obj) + } + ``` + (The `iterator` package is at `google.golang.org/api/iterator`.) + + Replace `Query.Cursor` with `ObjectIterator.PageInfo().Token`. + + Replace `Query.MaxResults` with `ObjectIterator.PageInfo().MaxSize`. + + + * ObjectHandle.CopyTo replaced by ObjectHandle.CopierFrom. + Replace + ```go + attrs, err := src.CopyTo(ctx, dst, nil) + ``` + with + ```go + attrs, err := dst.CopierFrom(src).Run(ctx) + ``` + + Replace + ```go + attrs, err := src.CopyTo(ctx, dst, &storage.ObjectAttrs{ContextType: "text/html"}) + ``` + with + ```go + c := dst.CopierFrom(src) + c.ContextType = "text/html" + attrs, err := c.Run(ctx) + ``` + + * ObjectHandle.ComposeFrom replaced by ObjectHandle.ComposerFrom. + Replace + ```go + attrs, err := dst.ComposeFrom(ctx, []*storage.ObjectHandle{src1, src2}, nil) + ``` + with + ```go + attrs, err := dst.ComposerFrom(src1, src2).Run(ctx) + ``` + + * ObjectHandle.Update's ObjectAttrs argument replaced by ObjectAttrsToUpdate. + Replace + ```go + attrs, err := obj.Update(ctx, &storage.ObjectAttrs{ContextType: "text/html"}) + ``` + with + ```go + attrs, err := obj.Update(ctx, storage.ObjectAttrsToUpdate{ContextType: "text/html"}) + ``` + + * ObjectHandle.WithConditions replaced by ObjectHandle.If. + Replace + ```go + obj.WithConditions(storage.Generation(gen), storage.IfMetaGenerationMatch(mgen)) + ``` + with + ```go + obj.Generation(gen).If(storage.Conditions{MetagenerationMatch: mgen}) + ``` + + Replace + ```go + obj.WithConditions(storage.IfGenerationMatch(0)) + ``` + with + ```go + obj.If(storage.Conditions{DoesNotExist: true}) + ``` + + * `storage.Done` replaced by `iterator.Done` (from package `google.golang.org/api/iterator`). + +- Package preview/logging deleted. Use logging instead. + +## v0.2.0 + +- Logging client replaced with preview version (see below). + +- New clients for some of Google's Machine Learning APIs: Vision, Speech, and +Natural Language. + +- Preview version of a new [Stackdriver Logging][cloud-logging] client in +[`cloud.google.com/go/preview/logging`](https://godoc.org/cloud.google.com/go/preview/logging). +This client uses gRPC as its transport layer, and supports log reading, sinks +and metrics. It will replace the current client at `cloud.google.com/go/logging` shortly. diff --git a/vendor/google.golang.org/appengine/CONTRIBUTING.md b/vendor/cloud.google.com/go/CODE_OF_CONDUCT.md similarity index 53% rename from vendor/google.golang.org/appengine/CONTRIBUTING.md rename to vendor/cloud.google.com/go/CODE_OF_CONDUCT.md index ffc298520..8fd1bc9c2 100644 --- a/vendor/google.golang.org/appengine/CONTRIBUTING.md +++ b/vendor/cloud.google.com/go/CODE_OF_CONDUCT.md @@ -1,48 +1,4 @@ -# Contributing - -1. Sign one of the contributor license agreements below. -1. Get the package: - - `go get -d google.golang.org/appengine` -1. Change into the checked out source: - - `cd $GOPATH/src/google.golang.org/appengine` -1. Fork the repo. -1. Set your fork as a remote: - - `git remote add fork git@github.com:GITHUB_USERNAME/appengine.git` -1. Make changes, commit to your fork. -1. Send a pull request with your changes. - The first line of your commit message is conventionally a one-line summary of the change, prefixed by the primary affected package, and is used as the title of your pull request. - -# Testing - -## Running system tests - -Download and install the [Go App Engine SDK](https://cloud.google.com/appengine/docs/go/download). Make sure the `go_appengine` dir is in your `PATH`. - -Set the `APPENGINE_DEV_APPSERVER` environment variable to `/path/to/go_appengine/dev_appserver.py`. - -Run tests with `goapp test`: - -``` -goapp test -v google.golang.org/appengine/... -``` - -## Contributor License Agreements - -Before we can accept your pull requests you'll need to sign a Contributor -License Agreement (CLA): - -- **If you are an individual writing original source code** and **you own the -intellectual property**, then you'll need to sign an [individual CLA][indvcla]. -- **If you work for a company that wants to allow you to contribute your work**, -then you'll need to sign a [corporate CLA][corpcla]. - -You can sign these electronically (just scroll to the bottom). After that, -we'll be able to accept your pull requests. - -## Contributor Code of Conduct +# Contributor Code of Conduct As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, @@ -86,5 +42,3 @@ or contacting one or more of the project maintainers. This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) -[indvcla]: https://developers.google.com/open-source/cla/individual -[corpcla]: https://developers.google.com/open-source/cla/corporate diff --git a/vendor/cloud.google.com/go/CONTRIBUTING.md b/vendor/cloud.google.com/go/CONTRIBUTING.md new file mode 100644 index 000000000..36d1b275e --- /dev/null +++ b/vendor/cloud.google.com/go/CONTRIBUTING.md @@ -0,0 +1,364 @@ +# Contributing + +1. [File an issue](https://github.com/googleapis/google-cloud-go/issues/new/choose). + The issue will be used to discuss the bug or feature and should be created + before sending a PR. + +1. [Install Go](https://golang.org/dl/). + 1. Ensure that your `GOBIN` directory (by default `$(go env GOPATH)/bin`) + is in your `PATH`. + 1. Check it's working by running `go version`. + * If it doesn't work, check the install location, usually + `/usr/local/go`, is on your `PATH`. + +1. Sign one of the +[contributor license agreements](#contributor-license-agreements) below. + +1. Clone the repo: + `git clone https://github.com/googleapis/google-cloud-go` + +1. Change into the checked out source: + `cd google-cloud-go` + +1. Fork the repo. + +1. Set your fork as a remote: + `git remote add fork git@github.com:GITHUB_USERNAME/google-cloud-go.git` + +1. Make changes, commit to your fork. + + Commit messages should follow the + [Conventional Commits Style](https://www.conventionalcommits.org). The scope + portion should always be filled with the name of the package affected by the + changes being made. For example: + ``` + feat(functions): add gophers codelab + ``` + +1. Send a pull request with your changes. + + To minimize friction, consider setting `Allow edits from maintainers` on the + PR, which will enable project committers and automation to update your PR. + +1. A maintainer will review the pull request and make comments. + + Prefer adding additional commits over amending and force-pushing since it can + be difficult to follow code reviews when the commit history changes. + + Commits will be squashed when they're merged. + +## Policy on new dependencies + +While the Go ecosystem is rich with useful modules, in this project we try to +minimize the number of direct dependencies we have on modules that are not +Google-owned. + +Adding new third party dependencies can have the following effects: +* broadens the vulnerability surface +* increases so called "vanity" import routing infrastructure failure points +* increases complexity of our own [`third_party`][] imports + +So if you are contributing, please either contribute the full implementation +directly, or find a Google-owned project that provides the functionality. Of +course, there may be exceptions to this rule, but those should be well defined +and agreed upon by the maintainers ahead of time. + +## Testing + +We test code against two versions of Go, the minimum and maximum versions +supported by our clients. To see which versions these are checkout our +[README](README.md#supported-versions). + +### Integration Tests + +In addition to the unit tests, you may run the integration test suite. These +directions describe setting up your environment to run integration tests for +_all_ packages: note that many of these instructions may be redundant if you +intend only to run integration tests on a single package. + +#### GCP Setup + +To run the integrations tests, creation and configuration of three projects in +the Google Developers Console is required: one specifically for Firestore +integration tests, one specifically for Bigtable integration tests, and another +for all other integration tests. We'll refer to these projects as +"Firestore project", "Bigtable project" and "general project". + +Note: You can skip setting up Bigtable project if you do not plan working on or running a few Bigtable +tests that require a secondary project + +After creating each project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount) +for each project. Ensure the project-level **Owner** +[IAM role](https://console.cloud.google.com/iam-admin/iam/project) role is added to +each service account. During the creation of the service account, you should +download the JSON credential file for use later. + +Next, ensure the following APIs are enabled in the general project: + +- BigQuery API +- BigQuery Data Transfer API +- Cloud Dataproc API +- Cloud Dataproc Control API Private +- Cloud Datastore API +- Cloud Firestore API +- Cloud Key Management Service (KMS) API +- Cloud Natural Language API +- Cloud OS Login API +- Cloud Pub/Sub API +- Cloud Resource Manager API +- Cloud Spanner API +- Cloud Speech API +- Cloud Translation API +- Cloud Video Intelligence API +- Cloud Vision API +- Compute Engine API +- Compute Engine Instance Group Manager API +- Container Registry API +- Firebase Rules API +- Google Cloud APIs +- Google Cloud Deployment Manager V2 API +- Google Cloud SQL +- Google Cloud Storage +- Google Cloud Storage JSON API +- Google Compute Engine Instance Group Updater API +- Google Compute Engine Instance Groups API +- Kubernetes Engine API +- Cloud Error Reporting API +- Pub/Sub Lite API + +Next, create a Datastore database in the general project, and a Firestore +database in the Firestore project. + +Finally, in the general project, create an API key for the translate API: + +- Go to GCP Developer Console. +- Navigate to APIs & Services > Credentials. +- Click Create Credentials > API Key. +- Save this key for use in `GCLOUD_TESTS_API_KEY` as described below. + +#### Local Setup + +Once the three projects are created and configured, set the following environment +variables: + +- `GCLOUD_TESTS_GOLANG_PROJECT_ID`: Developers Console project's ID (e.g. +bamboo-shift-455) for the general project. +- `GCLOUD_TESTS_GOLANG_KEY`: The path to the JSON key file of the general +project's service account. +- `GCLOUD_TESTS_GOLANG_DATASTORE_DATABASES`: Comma separated list of developer's Datastore databases. If not provided, default database i.e. empty string is used. +- `GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID`: Developers Console project's ID +(e.g. doorway-cliff-677) for the Firestore project. +- `GCLOUD_TESTS_GOLANG_FIRESTORE_DATABASES` : Comma separated list of developer's Firestore databases. If not provided, default database is used. +- `GCLOUD_TESTS_GOLANG_FIRESTORE_KEY`: The path to the JSON key file of the +Firestore project's service account. +- `GCLOUD_TESTS_API_KEY`: API key for using the Translate API created above. +- `GCLOUD_TESTS_GOLANG_SECONDARY_BIGTABLE_PROJECT_ID`: Developers Console project's ID (e.g. doorway-cliff-677) for Bigtable optional secondary project. This can be same as Firestore project or any project other than the general project. +- `GCLOUD_TESTS_BIGTABLE_CLUSTER`: Cluster ID of Bigtable cluster in general project +- `GCLOUD_TESTS_BIGTABLE_PRI_PROJ_SEC_CLUSTER`: Optional. Cluster ID of Bigtable secondary cluster in general project + +As part of the setup that follows, the following variables will be configured: + +- `GCLOUD_TESTS_GOLANG_KEYRING`: The full name of the keyring for the tests, +in the form +"projects/P/locations/L/keyRings/R". The creation of this is described below. +- `GCLOUD_TESTS_BIGTABLE_KEYRING`: The full name of the keyring for the bigtable tests, +in the form +"projects/P/locations/L/keyRings/R". The creation of this is described below. Expected to be single region. +- `GCLOUD_TESTS_GOLANG_ZONE`: Compute Engine zone. + +Install the [gcloud command-line tool][gcloudcli] to your machine and use it to +create some resources used in integration tests. + +From the project's root directory: + +``` sh +# Sets the default project in your env. +$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID + +# Authenticates the gcloud tool with your account. +$ gcloud auth login + +# Create the indexes for all the databases you want to use in the datastore integration tests. +# Use empty string as databaseID or skip database flag for default database. +$ gcloud alpha datastore indexes create --database=your-databaseID-1 --project=$GCLOUD_TESTS_GOLANG_PROJECT_ID testdata/index.yaml + +# Creates a Google Cloud storage bucket with the same name as your test project, +# and with the Cloud Logging service account as owner, for the sink +# integration tests in logging. +$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID +$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID + +# Creates a PubSub topic for integration tests of storage notifications. +$ gcloud beta pubsub topics create go-storage-notification-test +# Next, go to the Pub/Sub dashboard in GCP console. Authorize the user +# "service-@gs-project-accounts.iam.gserviceaccount.com" +# as a publisher to that topic. + +# Creates a Spanner instance for the spanner integration tests. +$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 10 --description 'Instance for go client test' +# NOTE: Spanner instances are priced by the node-hour, so you may want to +# delete the instance after testing with 'gcloud beta spanner instances delete'. + +$ export MY_KEYRING=some-keyring-name +$ export MY_LOCATION=global +$ export MY_SINGLE_LOCATION=us-central1 +# Creates a KMS keyring, in the same location as the default location for your +# project's buckets. +$ gcloud kms keyrings create $MY_KEYRING --location $MY_LOCATION +# Creates two keys in the keyring, named key1 and key2. +$ gcloud kms keys create key1 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption +$ gcloud kms keys create key2 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption +# Sets the GCLOUD_TESTS_GOLANG_KEYRING environment variable. +$ export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING +# Authorizes Google Cloud Storage to encrypt and decrypt using key1. +$ gsutil kms authorize -p $GCLOUD_TESTS_GOLANG_PROJECT_ID -k $GCLOUD_TESTS_GOLANG_KEYRING/cryptoKeys/key1 + +# Create KMS Key in one region for Bigtable +$ gcloud kms keyrings create $MY_KEYRING --location $MY_SINGLE_LOCATION +$ gcloud kms keys create key1 --keyring $MY_KEYRING --location $MY_SINGLE_LOCATION --purpose encryption +# Sets the GCLOUD_TESTS_BIGTABLE_KEYRING environment variable. +$ export GCLOUD_TESTS_BIGTABLE_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_SINGLE_LOCATION/keyRings/$MY_KEYRING +# Create a service agent, https://cloud.google.com/bigtable/docs/use-cmek#gcloud: +$ gcloud beta services identity create \ + --service=bigtableadmin.googleapis.com \ + --project $GCLOUD_TESTS_GOLANG_PROJECT_ID +# Note the service agent email for the agent created. +$ export SERVICE_AGENT_EMAIL= + +# Authorizes Google Cloud Bigtable to encrypt and decrypt using key1 +$ gcloud kms keys add-iam-policy-binding key1 \ + --keyring $MY_KEYRING \ + --location $MY_SINGLE_LOCATION \ + --role roles/cloudkms.cryptoKeyEncrypterDecrypter \ + --member "serviceAccount:$SERVICE_AGENT_EMAIL" \ + --project $GCLOUD_TESTS_GOLANG_PROJECT_ID +``` + +It may be useful to add exports to your shell initialization for future use. +For instance, in `.zshrc`: + +```sh +#### START GO SDK Test Variables +# Developers Console project's ID (e.g. bamboo-shift-455) for the general project. +export GCLOUD_TESTS_GOLANG_PROJECT_ID=your-project + +# Developers Console project's ID (e.g. bamboo-shift-455) for the Bigtable project. +export GCLOUD_TESTS_GOLANG_SECONDARY_BIGTABLE_PROJECT_ID=your-bigtable-optional-secondary-project + +# The path to the JSON key file of the general project's service account. +export GCLOUD_TESTS_GOLANG_KEY=~/directory/your-project-abcd1234.json + +# Comma separated list of developer's Datastore databases. If not provided, +# default database i.e. empty string is used. +export GCLOUD_TESTS_GOLANG_DATASTORE_DATABASES=your-database-1,your-database-2 + +# Developers Console project's ID (e.g. doorway-cliff-677) for the Firestore project. +export GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID=your-firestore-project + +# Comma separated list of developer's Firestore databases. If not provided, default database is used. +export GCLOUD_TESTS_GOLANG_FIRESTORE_DATABASES=your-database-1,your-database-2 + +# The path to the JSON key file of the Firestore project's service account. +export GCLOUD_TESTS_GOLANG_FIRESTORE_KEY=~/directory/your-firestore-project-abcd1234.json + +# The full name of the keyring for the tests, in the form "projects/P/locations/L/keyRings/R". +# The creation of this is described below. +export MY_KEYRING=my-golang-sdk-test +export MY_LOCATION=global +export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING + +# API key for using the Translate API. +export GCLOUD_TESTS_API_KEY=abcdefghijk123456789 + +# Compute Engine zone. (https://cloud.google.com/compute/docs/regions-zones) +export GCLOUD_TESTS_GOLANG_ZONE=your-chosen-region +#### END GO SDK Test Variables +``` + +#### Running + +Once you've done the necessary setup, you can run the integration tests by +running: + +``` sh +$ go test -v ./... +``` + +Note that the above command will not run the tests in other modules. To run +tests on other modules, first navigate to the appropriate +subdirectory. For instance, to run only the tests for datastore: +``` sh +$ cd datastore +$ go test -v ./... +``` + +#### Replay + +Some packages can record the RPCs during integration tests to a file for +subsequent replay. To record, pass the `-record` flag to `go test`. The +recording will be saved to the _package_`.replay` file. To replay integration +tests from a saved recording, the replay file must be present, the `-short` +flag must be passed to `go test`, and the `GCLOUD_TESTS_GOLANG_ENABLE_REPLAY` +environment variable must have a non-empty value. + +## Contributor License Agreements + +Before we can accept your pull requests you'll need to sign a Contributor +License Agreement (CLA): + +- **If you are an individual writing original source code** and **you own the +intellectual property**, then you'll need to sign an [individual CLA][indvcla]. +- **If you work for a company that wants to allow you to contribute your +work**, then you'll need to sign a [corporate CLA][corpcla]. + +You can sign these electronically (just scroll to the bottom). After that, +we'll be able to accept your pull requests. + +## Contributor Code of Conduct + +As contributors and maintainers of this project, +and in the interest of fostering an open and welcoming community, +we pledge to respect all people who contribute through reporting issues, +posting feature requests, updating documentation, +submitting pull requests or patches, and other activities. + +We are committed to making participation in this project +a harassment-free experience for everyone, +regardless of level of experience, gender, gender identity and expression, +sexual orientation, disability, personal appearance, +body size, race, ethnicity, age, religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing other's private information, +such as physical or electronic +addresses, without explicit permission +* Other unethical or unprofessional conduct. + +Project maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct. +By adopting this Code of Conduct, +project maintainers commit themselves to fairly and consistently +applying these principles to every aspect of managing this project. +Project maintainers who do not follow or enforce the Code of Conduct +may be permanently removed from the project team. + +This code of conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior +may be reported by opening an issue +or contacting one or more of the project maintainers. + +This Code of Conduct is adapted from the [Contributor Covenant](https://contributor-covenant.org), version 1.2.0, +available at [https://contributor-covenant.org/version/1/2/0/](https://contributor-covenant.org/version/1/2/0/) + +[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/ +[indvcla]: https://developers.google.com/open-source/cla/individual +[corpcla]: https://developers.google.com/open-source/cla/corporate +[`third_party`]: https://opensource.google/documentation/reference/thirdparty diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md new file mode 100644 index 000000000..995149790 --- /dev/null +++ b/vendor/cloud.google.com/go/README.md @@ -0,0 +1,86 @@ +# Google Cloud Client Libraries for Go + +[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go.svg)](https://pkg.go.dev/cloud.google.com/go) + +Go packages for [Google Cloud Platform](https://cloud.google.com) services. + +``` go +import "cloud.google.com/go" +``` + +To install the packages on your system, *do not clone the repo*. Instead: + +1. Change to your project directory: `cd /my/cloud/project` +1. Get the package you want to use. Some products have their own module, so it's + best to `go get` the package(s) you want to use: + +```bash +go get cloud.google.com/go/firestore # Replace with the package you want to use. +``` + +**NOTE:** Some of these packages are under development, and may occasionally +make backwards-incompatible changes. + +## Supported APIs + +For an updated list of all of our released APIs please see our +[reference docs](https://cloud.google.com/go/docs/reference). + +## [Go Versions Supported](#supported-versions) + +Our libraries are compatible with at least the three most recent, major Go +releases. They are currently compatible with: + +- Go 1.22 +- Go 1.21 +- Go 1.20 + +## Authorization + +By default, each API will use [Google Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials) +for authorization credentials used in calling the API endpoints. This will allow your +application to run in many environments without requiring explicit configuration. + +```go +client, err := storage.NewClient(ctx) +``` + +To authorize using a +[JSON key file](https://cloud.google.com/iam/docs/managing-service-account-keys), +pass +[`option.WithCredentialsFile`](https://pkg.go.dev/google.golang.org/api/option#WithCredentialsFile) +to the `NewClient` function of the desired package. For example: + +```go +client, err := storage.NewClient(ctx, option.WithCredentialsFile("path/to/keyfile.json")) +``` + +You can exert more control over authorization by using the +[`golang.org/x/oauth2`](https://pkg.go.dev/golang.org/x/oauth2) package to +create an `oauth2.TokenSource`. Then pass +[`option.WithTokenSource`](https://pkg.go.dev/google.golang.org/api/option#WithTokenSource) +to the `NewClient` function: + +```go +tokenSource := ... +client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource)) +``` + +## Contributing + +Contributions are welcome. Please, see the +[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) +document for details. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. +See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. + +## Links + +- [Go on Google Cloud](https://cloud.google.com/go/home) +- [Getting started with Go on Google Cloud](https://cloud.google.com/go/getting-started) +- [App Engine Quickstart](https://cloud.google.com/appengine/docs/standard/go/quickstart) +- [Cloud Functions Quickstart](https://cloud.google.com/functions/docs/quickstart-go) +- [Cloud Run Quickstart](https://cloud.google.com/run/docs/quickstarts/build-and-deploy#go) diff --git a/vendor/cloud.google.com/go/RELEASING.md b/vendor/cloud.google.com/go/RELEASING.md new file mode 100644 index 000000000..6d0fcf4f9 --- /dev/null +++ b/vendor/cloud.google.com/go/RELEASING.md @@ -0,0 +1,141 @@ +# Releasing + +## Determine which module to release + +The Go client libraries have several modules. Each module does not strictly +correspond to a single library - they correspond to trees of directories. If a +file needs to be released, you must release the closest ancestor module. + +To see all modules: + +```bash +$ cat `find . -name go.mod` | grep module +module cloud.google.com/go/pubsub +module cloud.google.com/go/spanner +module cloud.google.com/go +module cloud.google.com/go/bigtable +module cloud.google.com/go/bigquery +module cloud.google.com/go/storage +module cloud.google.com/go/pubsublite +module cloud.google.com/go/firestore +module cloud.google.com/go/logging +module cloud.google.com/go/internal/gapicgen +module cloud.google.com/go/internal/godocfx +module cloud.google.com/go/internal/examples/fake +module cloud.google.com/go/internal/examples/mock +module cloud.google.com/go/datastore +``` + +The `cloud.google.com/go` is the repository root module. Each other module is +a submodule. + +So, if you need to release a change in `bigtable/bttest/inmem.go`, the closest +ancestor module is `cloud.google.com/go/bigtable` - so you should release a new +version of the `cloud.google.com/go/bigtable` submodule. + +If you need to release a change in `asset/apiv1/asset_client.go`, the closest +ancestor module is `cloud.google.com/go` - so you should release a new version +of the `cloud.google.com/go` repository root module. Note: releasing +`cloud.google.com/go` has no impact on any of the submodules, and vice-versa. +They are released entirely independently. + +## Test failures + +If there are any test failures in the Kokoro build, releases are blocked until +the failures have been resolved. + +## How to release + +### Automated Releases (`cloud.google.com/go` and submodules) + +We now use [release-please](https://github.com/googleapis/release-please) to +perform automated releases for `cloud.google.com/go` and all submodules. + +1. If there are changes that have not yet been released, a + [pull request](https://github.com/googleapis/google-cloud-go/pull/2971) will + be automatically opened by release-please + with a title like "chore: release X.Y.Z" (for the root module) or + "chore: release datastore X.Y.Z" (for the datastore submodule), where X.Y.Z + is the next version to be released. Find the desired pull request + [here](https://github.com/googleapis/google-cloud-go/pulls) +1. Check for failures in the + [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are + any failures in the most recent build, address them before proceeding with + the release. (This applies even if the failures are in a different submodule + from the one being released.) +1. Review the release notes. These are automatically generated from the titles + of any merged commits since the previous release. If you would like to edit + them, this can be done by updating the changes in the release PR. +1. To cut a release, approve and merge the pull request. Doing so will + update the `CHANGES.md`, tag the merged commit with the appropriate version, + and draft a GitHub release which will copy the notes from `CHANGES.md`. + +### Manual Release (`cloud.google.com/go`) + +If for whatever reason the automated release process is not working as expected, +here is how to manually cut a release of `cloud.google.com/go`. + +1. Check for failures in the + [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are + any failures in the most recent build, address them before proceeding with + the release. +1. Navigate to `google-cloud-go/` and switch to main. +1. `git pull` +1. Run `git tag -l | grep -v beta | grep -v alpha` to see all existing releases. + The current latest tag `$CV` is the largest tag. It should look something + like `vX.Y.Z` (note: ignore all `LIB/vX.Y.Z` tags - these are tags for a + specific library, not the module root). We'll call the current version `$CV` + and the new version `$NV`. +1. On main, run `git log $CV...` to list all the changes since the last + release. NOTE: You must manually visually parse out changes to submodules [1] + (the `git log` is going to show you things in submodules, which are not going + to be part of your release). +1. Edit `CHANGES.md` to include a summary of the changes. +1. In `internal/version/version.go`, update `const Repo` to today's date with + the format `YYYYMMDD`. +1. In `internal/version` run `go generate`. +1. Commit the changes, ignoring the generated `.go-r` file. Push to your fork, + and create a PR titled `chore: release $NV`. +1. Wait for the PR to be reviewed and merged. Once it's merged, and without + merging any other PRs in the meantime: + a. Switch to main. + b. `git pull` + c. Tag the repo with the next version: `git tag $NV`. + d. Push the tag to origin: + `git push origin $NV` +1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases) + with the new release, copying the contents of `CHANGES.md`. + +### Manual Releases (submodules) + +If for whatever reason the automated release process is not working as expected, +here is how to manually cut a release of a submodule. + +(these instructions assume we're releasing `cloud.google.com/go/datastore` - adjust accordingly) + +1. Check for failures in the + [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are + any failures in the most recent build, address them before proceeding with + the release. (This applies even if the failures are in a different submodule + from the one being released.) +1. Navigate to `google-cloud-go/` and switch to main. +1. `git pull` +1. Run `git tag -l | grep datastore | grep -v beta | grep -v alpha` to see all + existing releases. The current latest tag `$CV` is the largest tag. It + should look something like `datastore/vX.Y.Z`. We'll call the current version + `$CV` and the new version `$NV`. +1. On main, run `git log $CV.. -- datastore/` to list all the changes to the + submodule directory since the last release. +1. Edit `datastore/CHANGES.md` to include a summary of the changes. +1. In `internal/version` run `go generate`. +1. Commit the changes, ignoring the generated `.go-r` file. Push to your fork, + and create a PR titled `chore(datastore): release $NV`. +1. Wait for the PR to be reviewed and merged. Once it's merged, and without + merging any other PRs in the meantime: + a. Switch to main. + b. `git pull` + c. Tag the repo with the next version: `git tag $NV`. + d. Push the tag to origin: + `git push origin $NV` +1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases) + with the new release, copying the contents of `datastore/CHANGES.md`. diff --git a/vendor/cloud.google.com/go/SECURITY.md b/vendor/cloud.google.com/go/SECURITY.md new file mode 100644 index 000000000..8b58ae9c0 --- /dev/null +++ b/vendor/cloud.google.com/go/SECURITY.md @@ -0,0 +1,7 @@ +# Security Policy + +To report a security issue, please use [g.co/vulnz](https://g.co/vulnz). + +The Google Security Team will respond within 5 working days of your report on g.co/vulnz. + +We use g.co/vulnz for our intake, and do coordination and disclosure here using GitHub Security Advisory to privately discuss and fix the issue. diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md new file mode 100644 index 000000000..7d6c9cc1b --- /dev/null +++ b/vendor/cloud.google.com/go/auth/CHANGES.md @@ -0,0 +1,291 @@ +# Changelog + +## [0.9.7](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.6...auth/v0.9.7) (2024-10-01) + + +### Bug Fixes + +* **auth:** Restore support for non-default service accounts for DirectPath ([#10937](https://github.com/googleapis/google-cloud-go/issues/10937)) ([a38650e](https://github.com/googleapis/google-cloud-go/commit/a38650edbf420223077498cafa537aec74b37aad)), refs [#10907](https://github.com/googleapis/google-cloud-go/issues/10907) + +## [0.9.6](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.5...auth/v0.9.6) (2024-09-30) + + +### Bug Fixes + +* **auth:** Make aws credentials provider retrieve fresh credentials ([#10920](https://github.com/googleapis/google-cloud-go/issues/10920)) ([250fbf8](https://github.com/googleapis/google-cloud-go/commit/250fbf87d858d865e399a241b7e537c4ff0c3dd8)) + +## [0.9.5](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.4...auth/v0.9.5) (2024-09-25) + + +### Bug Fixes + +* **auth:** Restore support for GOOGLE_CLOUD_UNIVERSE_DOMAIN env ([#10915](https://github.com/googleapis/google-cloud-go/issues/10915)) ([94caaaa](https://github.com/googleapis/google-cloud-go/commit/94caaaa061362d0e00ef6214afcc8a0a3e7ebfb2)) +* **auth:** Skip directpath credentials overwrite when it's not on GCE ([#10833](https://github.com/googleapis/google-cloud-go/issues/10833)) ([7e5e8d1](https://github.com/googleapis/google-cloud-go/commit/7e5e8d10b761b0a6e43e19a028528db361bc07b1)) +* **auth:** Use new context for non-blocking token refresh ([#10919](https://github.com/googleapis/google-cloud-go/issues/10919)) ([cf7102d](https://github.com/googleapis/google-cloud-go/commit/cf7102d33a21be1e5a9d47a49456b3a57c43b350)) + +## [0.9.4](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.3...auth/v0.9.4) (2024-09-11) + + +### Bug Fixes + +* **auth:** Enable self-signed JWT for non-GDU universe domain ([#10831](https://github.com/googleapis/google-cloud-go/issues/10831)) ([f9869f7](https://github.com/googleapis/google-cloud-go/commit/f9869f7903cfd34d1b97c25d0dc5669d2c5138e6)) + +## [0.9.3](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.2...auth/v0.9.3) (2024-09-03) + + +### Bug Fixes + +* **auth:** Choose quota project envvar over file when both present ([#10807](https://github.com/googleapis/google-cloud-go/issues/10807)) ([2d8dd77](https://github.com/googleapis/google-cloud-go/commit/2d8dd7700eff92d4b95027be55e26e1e7aa79181)), refs [#10804](https://github.com/googleapis/google-cloud-go/issues/10804) + +## [0.9.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.1...auth/v0.9.2) (2024-08-30) + + +### Bug Fixes + +* **auth:** Handle non-Transport DefaultTransport ([#10733](https://github.com/googleapis/google-cloud-go/issues/10733)) ([98d91dc](https://github.com/googleapis/google-cloud-go/commit/98d91dc8316b247498fab41ab35e57a0446fe556)), refs [#10742](https://github.com/googleapis/google-cloud-go/issues/10742) +* **auth:** Make sure quota option takes precedence over env/file ([#10797](https://github.com/googleapis/google-cloud-go/issues/10797)) ([f1b050d](https://github.com/googleapis/google-cloud-go/commit/f1b050d56d804b245cab048c2980d32b0eaceb4e)), refs [#10795](https://github.com/googleapis/google-cloud-go/issues/10795) + + +### Documentation + +* **auth:** Fix Go doc comment link ([#10751](https://github.com/googleapis/google-cloud-go/issues/10751)) ([015acfa](https://github.com/googleapis/google-cloud-go/commit/015acfab4d172650928bb1119bc2cd6307b9a437)) + +## [0.9.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.0...auth/v0.9.1) (2024-08-22) + + +### Bug Fixes + +* **auth:** Setting expireEarly to default when the value is 0 ([#10732](https://github.com/googleapis/google-cloud-go/issues/10732)) ([5e67869](https://github.com/googleapis/google-cloud-go/commit/5e67869a31e9e8ecb4eeebd2cfa11a761c3b1948)) + +## [0.9.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.8.1...auth/v0.9.0) (2024-08-16) + + +### Features + +* **auth:** Auth library can talk to S2A over mTLS ([#10634](https://github.com/googleapis/google-cloud-go/issues/10634)) ([5250a13](https://github.com/googleapis/google-cloud-go/commit/5250a13ec95b8d4eefbe0158f82857ff2189cb45)) + +## [0.8.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.8.0...auth/v0.8.1) (2024-08-13) + + +### Bug Fixes + +* **auth:** Make default client creation more lenient ([#10669](https://github.com/googleapis/google-cloud-go/issues/10669)) ([1afb9ee](https://github.com/googleapis/google-cloud-go/commit/1afb9ee1ee9de9810722800018133304a0ca34d1)), refs [#10638](https://github.com/googleapis/google-cloud-go/issues/10638) + +## [0.8.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.3...auth/v0.8.0) (2024-08-07) + + +### Features + +* **auth:** Adds support for X509 workload identity federation ([#10373](https://github.com/googleapis/google-cloud-go/issues/10373)) ([5d07505](https://github.com/googleapis/google-cloud-go/commit/5d075056cbe27bb1da4072a26070c41f8999eb9b)) + +## [0.7.3](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.2...auth/v0.7.3) (2024-08-01) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758)) +* **auth:** Disable automatic universe domain check for MDS ([#10620](https://github.com/googleapis/google-cloud-go/issues/10620)) ([7cea5ed](https://github.com/googleapis/google-cloud-go/commit/7cea5edd5a0c1e6bca558696f5607879141910e8)) +* **auth:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758)) + +## [0.7.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.1...auth/v0.7.2) (2024-07-22) + + +### Bug Fixes + +* **auth:** Use default client for universe metadata lookup ([#10551](https://github.com/googleapis/google-cloud-go/issues/10551)) ([d9046fd](https://github.com/googleapis/google-cloud-go/commit/d9046fdd1435d1ce48f374806c1def4cb5ac6cd3)), refs [#10544](https://github.com/googleapis/google-cloud-go/issues/10544) + +## [0.7.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.0...auth/v0.7.1) (2024-07-10) + + +### Bug Fixes + +* **auth:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5)) + +## [0.7.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.6.1...auth/v0.7.0) (2024-07-09) + + +### Features + +* **auth:** Add workload X509 cert provider as a default cert provider ([#10479](https://github.com/googleapis/google-cloud-go/issues/10479)) ([c51ee6c](https://github.com/googleapis/google-cloud-go/commit/c51ee6cf65ce05b4d501083e49d468c75ac1ea63)) + + +### Bug Fixes + +* **auth/oauth2adapt:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) +* **auth:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) +* **auth:** Check len of slices, not non-nil ([#10483](https://github.com/googleapis/google-cloud-go/issues/10483)) ([0a966a1](https://github.com/googleapis/google-cloud-go/commit/0a966a183e5f0e811977216d736d875b7233e942)) + +## [0.6.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.6.0...auth/v0.6.1) (2024-07-01) + + +### Bug Fixes + +* **auth:** Support gRPC API keys ([#10460](https://github.com/googleapis/google-cloud-go/issues/10460)) ([daa6646](https://github.com/googleapis/google-cloud-go/commit/daa6646d2af5d7fb5b30489f4934c7db89868c7c)) +* **auth:** Update http and grpc transports to support token exchange over mTLS ([#10397](https://github.com/googleapis/google-cloud-go/issues/10397)) ([c6dfdcf](https://github.com/googleapis/google-cloud-go/commit/c6dfdcf893c3f971eba15026c12db0a960ae81f2)) + +## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.2...auth/v0.6.0) (2024-06-25) + + +### Features + +* **auth:** Add non-blocking token refresh for compute MDS ([#10263](https://github.com/googleapis/google-cloud-go/issues/10263)) ([9ac350d](https://github.com/googleapis/google-cloud-go/commit/9ac350da11a49b8e2174d3fc5b1a5070fec78b4e)) + + +### Bug Fixes + +* **auth:** Return error if envvar detected file returns an error ([#10431](https://github.com/googleapis/google-cloud-go/issues/10431)) ([e52b9a7](https://github.com/googleapis/google-cloud-go/commit/e52b9a7c45468827f5d220ab00965191faeb9d05)) + +## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.1...auth/v0.5.2) (2024-06-24) + + +### Bug Fixes + +* **auth:** Fetch initial token when CachedTokenProviderOptions.DisableAutoRefresh is true ([#10415](https://github.com/googleapis/google-cloud-go/issues/10415)) ([3266763](https://github.com/googleapis/google-cloud-go/commit/32667635ca2efad05cd8c087c004ca07d7406913)), refs [#10414](https://github.com/googleapis/google-cloud-go/issues/10414) + +## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.0...auth/v0.5.1) (2024-05-31) + + +### Bug Fixes + +* **auth:** Pass through client to 2LO and 3LO flows ([#10290](https://github.com/googleapis/google-cloud-go/issues/10290)) ([685784e](https://github.com/googleapis/google-cloud-go/commit/685784ea84358c15e9214bdecb307d37aa3b6d2f)) + +## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.2...auth/v0.5.0) (2024-05-28) + + +### Features + +* **auth:** Adds X509 workload certificate provider ([#10233](https://github.com/googleapis/google-cloud-go/issues/10233)) ([17a9db7](https://github.com/googleapis/google-cloud-go/commit/17a9db73af35e3d1a7a25ac4fd1377a103de6150)) + +## [0.4.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.1...auth/v0.4.2) (2024-05-16) + + +### Bug Fixes + +* **auth:** Enable client certificates by default only for GDU ([#10151](https://github.com/googleapis/google-cloud-go/issues/10151)) ([7c52978](https://github.com/googleapis/google-cloud-go/commit/7c529786275a39b7e00525f7d5e7be0d963e9e15)) +* **auth:** Handle non-Transport DefaultTransport ([#10162](https://github.com/googleapis/google-cloud-go/issues/10162)) ([fa3bfdb](https://github.com/googleapis/google-cloud-go/commit/fa3bfdb23aaa45b34394a8b61e753b3587506782)), refs [#10159](https://github.com/googleapis/google-cloud-go/issues/10159) +* **auth:** Have refresh time match docs ([#10147](https://github.com/googleapis/google-cloud-go/issues/10147)) ([bcb5568](https://github.com/googleapis/google-cloud-go/commit/bcb5568c07a54dd3d2e869d15f502b0741a609e8)) +* **auth:** Update compute token fetching error with named prefix ([#10180](https://github.com/googleapis/google-cloud-go/issues/10180)) ([4573504](https://github.com/googleapis/google-cloud-go/commit/4573504828d2928bebedc875d87650ba227829ea)) + +## [0.4.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.0...auth/v0.4.1) (2024-05-09) + + +### Bug Fixes + +* **auth:** Don't try to detect default creds it opt configured ([#10143](https://github.com/googleapis/google-cloud-go/issues/10143)) ([804632e](https://github.com/googleapis/google-cloud-go/commit/804632e7c5b0b85ff522f7951114485e256eb5bc)) + +## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.3.0...auth/v0.4.0) (2024-05-07) + + +### Features + +* **auth:** Enable client certificates by default ([#10102](https://github.com/googleapis/google-cloud-go/issues/10102)) ([9013e52](https://github.com/googleapis/google-cloud-go/commit/9013e5200a6ec0f178ed91acb255481ffb073a2c)) + + +### Bug Fixes + +* **auth:** Get s2a logic up to date ([#10093](https://github.com/googleapis/google-cloud-go/issues/10093)) ([4fe9ae4](https://github.com/googleapis/google-cloud-go/commit/4fe9ae4b7101af2a5221d6d6b2e77b479305bb06)) + +## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.2...auth/v0.3.0) (2024-04-23) + + +### Features + +* **auth/httptransport:** Add ability to customize transport ([#10023](https://github.com/googleapis/google-cloud-go/issues/10023)) ([72c7f6b](https://github.com/googleapis/google-cloud-go/commit/72c7f6bbec3136cc7a62788fc7186bc33ef6c3b3)), refs [#9812](https://github.com/googleapis/google-cloud-go/issues/9812) [#9814](https://github.com/googleapis/google-cloud-go/issues/9814) + + +### Bug Fixes + +* **auth/credentials:** Error on bad file name if explicitly set ([#10018](https://github.com/googleapis/google-cloud-go/issues/10018)) ([55beaa9](https://github.com/googleapis/google-cloud-go/commit/55beaa993aaf052d8be39766afc6777c3c2a0bdd)), refs [#9809](https://github.com/googleapis/google-cloud-go/issues/9809) + +## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.1...auth/v0.2.2) (2024-04-19) + + +### Bug Fixes + +* **auth:** Add internal opt to skip validation on transports ([#9999](https://github.com/googleapis/google-cloud-go/issues/9999)) ([9e20ef8](https://github.com/googleapis/google-cloud-go/commit/9e20ef89f6287d6bd03b8697d5898dc43b4a77cf)), refs [#9823](https://github.com/googleapis/google-cloud-go/issues/9823) +* **auth:** Set secure flag for gRPC conn pools ([#10002](https://github.com/googleapis/google-cloud-go/issues/10002)) ([14e3956](https://github.com/googleapis/google-cloud-go/commit/14e3956dfd736399731b5ee8d9b178ae085cf7ba)), refs [#9833](https://github.com/googleapis/google-cloud-go/issues/9833) + +## [0.2.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.0...auth/v0.2.1) (2024-04-18) + + +### Bug Fixes + +* **auth:** Default gRPC token type to Bearer if not set ([#9800](https://github.com/googleapis/google-cloud-go/issues/9800)) ([5284066](https://github.com/googleapis/google-cloud-go/commit/5284066670b6fe65d79089cfe0199c9660f87fc7)) + +## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.1.1...auth/v0.2.0) (2024-04-15) + +### Breaking Changes + +In the below mentioned commits there were a few large breaking changes since the +last release of the module. + +1. The `Credentials` type has been moved to the root of the module as it is + becoming the core abstraction for the whole module. +2. Because of the above mentioned change many functions that previously + returned a `TokenProvider` now return `Credentials`. Similarly, these + functions have been renamed to be more specific. +3. Most places that used to take an optional `TokenProvider` now accept + `Credentials`. You can make a `Credentials` from a `TokenProvider` using the + constructor found in the `auth` package. +4. The `detect` package has been renamed to `credentials`. With this change some + function signatures were also updated for better readability. +5. Derivative auth flows like `impersonate` and `downscope` have been moved to + be under the new `credentials` package. + +Although these changes are disruptive we think that they are for the best of the +long-term health of the module. We do not expect any more large breaking changes +like these in future revisions, even before 1.0.0. This version will be the +first version of the auth library that our client libraries start to use and +depend on. + +### Features + +* **auth/credentials/externalaccount:** Add default TokenURL ([#9700](https://github.com/googleapis/google-cloud-go/issues/9700)) ([81830e6](https://github.com/googleapis/google-cloud-go/commit/81830e6848ceefd055aa4d08f933d1154455a0f6)) +* **auth:** Add downscope.Options.UniverseDomain ([#9634](https://github.com/googleapis/google-cloud-go/issues/9634)) ([52cf7d7](https://github.com/googleapis/google-cloud-go/commit/52cf7d780853594291c4e34302d618299d1f5a1d)) +* **auth:** Add universe domain to grpctransport and httptransport ([#9663](https://github.com/googleapis/google-cloud-go/issues/9663)) ([67d353b](https://github.com/googleapis/google-cloud-go/commit/67d353beefe3b607c08c891876fbd95ab89e5fe3)), refs [#9670](https://github.com/googleapis/google-cloud-go/issues/9670) +* **auth:** Add UniverseDomain to DetectOptions ([#9536](https://github.com/googleapis/google-cloud-go/issues/9536)) ([3618d3f](https://github.com/googleapis/google-cloud-go/commit/3618d3f7061615c0e189f376c75abc201203b501)) +* **auth:** Make package externalaccount public ([#9633](https://github.com/googleapis/google-cloud-go/issues/9633)) ([a0978d8](https://github.com/googleapis/google-cloud-go/commit/a0978d8e96968399940ebd7d092539772bf9caac)) +* **auth:** Move credentials to base auth package ([#9590](https://github.com/googleapis/google-cloud-go/issues/9590)) ([1a04baf](https://github.com/googleapis/google-cloud-go/commit/1a04bafa83c27342b9308d785645e1e5423ea10d)) +* **auth:** Refactor public sigs to use Credentials ([#9603](https://github.com/googleapis/google-cloud-go/issues/9603)) ([69cb240](https://github.com/googleapis/google-cloud-go/commit/69cb240c530b1f7173a9af2555c19e9a1beb56c5)) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a)) +* **auth:** Fix uint32 conversion ([9221c7f](https://github.com/googleapis/google-cloud-go/commit/9221c7fa12cef9d5fb7ddc92f41f1d6204971c7b)) +* **auth:** Port sts expires fix ([#9618](https://github.com/googleapis/google-cloud-go/issues/9618)) ([7bec97b](https://github.com/googleapis/google-cloud-go/commit/7bec97b2f51ed3ac4f9b88bf100d301da3f5d1bd)) +* **auth:** Read universe_domain from all credentials files ([#9632](https://github.com/googleapis/google-cloud-go/issues/9632)) ([16efbb5](https://github.com/googleapis/google-cloud-go/commit/16efbb52e39ea4a319e5ee1e95c0e0305b6d9824)) +* **auth:** Remove content-type header from idms get requests ([#9508](https://github.com/googleapis/google-cloud-go/issues/9508)) ([8589f41](https://github.com/googleapis/google-cloud-go/commit/8589f41599d265d7c3d46a3d86c9fab2329cbdd9)) +* **auth:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a)) + +## [0.1.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.1.0...auth/v0.1.1) (2024-03-10) + + +### Bug Fixes + +* **auth/impersonate:** Properly send default detect params ([#9529](https://github.com/googleapis/google-cloud-go/issues/9529)) ([5b6b8be](https://github.com/googleapis/google-cloud-go/commit/5b6b8bef577f82707e51f5cc5d258d5bdf90218f)), refs [#9136](https://github.com/googleapis/google-cloud-go/issues/9136) +* **auth:** Update grpc-go to v1.56.3 ([343cea8](https://github.com/googleapis/google-cloud-go/commit/343cea8c43b1e31ae21ad50ad31d3b0b60143f8c)) +* **auth:** Update grpc-go to v1.59.0 ([81a97b0](https://github.com/googleapis/google-cloud-go/commit/81a97b06cb28b25432e4ece595c55a9857e960b7)) + +## 0.1.0 (2023-10-18) + + +### Features + +* **auth:** Add base auth package ([#8465](https://github.com/googleapis/google-cloud-go/issues/8465)) ([6a45f26](https://github.com/googleapis/google-cloud-go/commit/6a45f26b809b64edae21f312c18d4205f96b180e)) +* **auth:** Add cert support to httptransport ([#8569](https://github.com/googleapis/google-cloud-go/issues/8569)) ([37e3435](https://github.com/googleapis/google-cloud-go/commit/37e3435f8e98595eafab481bdfcb31a4c56fa993)) +* **auth:** Add Credentials.UniverseDomain() ([#8654](https://github.com/googleapis/google-cloud-go/issues/8654)) ([af0aa1e](https://github.com/googleapis/google-cloud-go/commit/af0aa1ed8015bc8fe0dd87a7549ae029107cbdb8)) +* **auth:** Add detect package ([#8491](https://github.com/googleapis/google-cloud-go/issues/8491)) ([d977419](https://github.com/googleapis/google-cloud-go/commit/d977419a3269f6acc193df77a2136a6eb4b4add7)) +* **auth:** Add downscope package ([#8532](https://github.com/googleapis/google-cloud-go/issues/8532)) ([dda9bff](https://github.com/googleapis/google-cloud-go/commit/dda9bff8ec70e6d104901b4105d13dcaa4e2404c)) +* **auth:** Add grpctransport package ([#8625](https://github.com/googleapis/google-cloud-go/issues/8625)) ([69a8347](https://github.com/googleapis/google-cloud-go/commit/69a83470bdcc7ed10c6c36d1abc3b7cfdb8a0ee5)) +* **auth:** Add httptransport package ([#8567](https://github.com/googleapis/google-cloud-go/issues/8567)) ([6898597](https://github.com/googleapis/google-cloud-go/commit/6898597d2ea95d630fcd00fd15c58c75ea843bff)) +* **auth:** Add idtoken package ([#8580](https://github.com/googleapis/google-cloud-go/issues/8580)) ([a79e693](https://github.com/googleapis/google-cloud-go/commit/a79e693e97e4e3e1c6742099af3dbc58866d88fe)) +* **auth:** Add impersonate package ([#8578](https://github.com/googleapis/google-cloud-go/issues/8578)) ([e29ba0c](https://github.com/googleapis/google-cloud-go/commit/e29ba0cb7bd3888ab9e808087027dc5a32474c04)) +* **auth:** Add support for external accounts in detect ([#8508](https://github.com/googleapis/google-cloud-go/issues/8508)) ([62210d5](https://github.com/googleapis/google-cloud-go/commit/62210d5d3e56e8e9f35db8e6ac0defec19582507)) +* **auth:** Port external account changes ([#8697](https://github.com/googleapis/google-cloud-go/issues/8697)) ([5823db5](https://github.com/googleapis/google-cloud-go/commit/5823db5d633069999b58b9131a7f9cd77e82c899)) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) +* **auth:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/cloud.google.com/go/auth/LICENSE similarity index 100% rename from vendor/google.golang.org/appengine/LICENSE rename to vendor/cloud.google.com/go/auth/LICENSE diff --git a/vendor/cloud.google.com/go/auth/README.md b/vendor/cloud.google.com/go/auth/README.md new file mode 100644 index 000000000..6fe4f0763 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/README.md @@ -0,0 +1,40 @@ +# Google Auth Library for Go + +[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/auth.svg)](https://pkg.go.dev/cloud.google.com/go/auth) + +## Install + +``` bash +go get cloud.google.com/go/auth@latest +``` + +## Usage + +The most common way this library is used is transitively, by default, from any +of our Go client libraries. + +### Notable use-cases + +- To create a credential directly please see examples in the + [credentials](https://pkg.go.dev/cloud.google.com/go/auth/credentials) + package. +- To create a authenticated HTTP client please see examples in the + [httptransport](https://pkg.go.dev/cloud.google.com/go/auth/httptransport) + package. +- To create a authenticated gRPC connection please see examples in the + [grpctransport](https://pkg.go.dev/cloud.google.com/go/auth/grpctransport) + package. +- To create an ID token please see examples in the + [idtoken](https://pkg.go.dev/cloud.google.com/go/auth/credentials/idtoken) + package. + +## Contributing + +Contributions are welcome. Please, see the +[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) +document for details. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. +See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. diff --git a/vendor/cloud.google.com/go/auth/auth.go b/vendor/cloud.google.com/go/auth/auth.go new file mode 100644 index 000000000..32fb058df --- /dev/null +++ b/vendor/cloud.google.com/go/auth/auth.go @@ -0,0 +1,614 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package auth provides utilities for managing Google Cloud credentials, +// including functionality for creating, caching, and refreshing OAuth2 tokens. +// It offers customizable options for different OAuth2 flows, such as 2-legged +// (2LO) and 3-legged (3LO) OAuth, along with support for PKCE and automatic +// token management. +package auth + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/jwt" +) + +const ( + // Parameter keys for AuthCodeURL method to support PKCE. + codeChallengeKey = "code_challenge" + codeChallengeMethodKey = "code_challenge_method" + + // Parameter key for Exchange method to support PKCE. + codeVerifierKey = "code_verifier" + + // 3 minutes and 45 seconds before expiration. The shortest MDS cache is 4 minutes, + // so we give it 15 seconds to refresh it's cache before attempting to refresh a token. + defaultExpiryDelta = 225 * time.Second + + universeDomainDefault = "googleapis.com" +) + +// tokenState represents different states for a [Token]. +type tokenState int + +const ( + // fresh indicates that the [Token] is valid. It is not expired or close to + // expired, or the token has no expiry. + fresh tokenState = iota + // stale indicates that the [Token] is close to expired, and should be + // refreshed. The token can be used normally. + stale + // invalid indicates that the [Token] is expired or invalid. The token + // cannot be used for a normal operation. + invalid +) + +var ( + defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" + defaultHeader = &jwt.Header{Algorithm: jwt.HeaderAlgRSA256, Type: jwt.HeaderType} + + // for testing + timeNow = time.Now +) + +// TokenProvider specifies an interface for anything that can return a token. +type TokenProvider interface { + // Token returns a Token or an error. + // The Token returned must be safe to use + // concurrently. + // The returned Token must not be modified. + // The context provided must be sent along to any requests that are made in + // the implementing code. + Token(context.Context) (*Token, error) +} + +// Token holds the credential token used to authorized requests. All fields are +// considered read-only. +type Token struct { + // Value is the token used to authorize requests. It is usually an access + // token but may be other types of tokens such as ID tokens in some flows. + Value string + // Type is the type of token Value is. If uninitialized, it should be + // assumed to be a "Bearer" token. + Type string + // Expiry is the time the token is set to expire. + Expiry time.Time + // Metadata may include, but is not limited to, the body of the token + // response returned by the server. + Metadata map[string]interface{} // TODO(codyoss): maybe make a method to flatten metadata to avoid []string for url.Values +} + +// IsValid reports that a [Token] is non-nil, has a [Token.Value], and has not +// expired. A token is considered expired if [Token.Expiry] has passed or will +// pass in the next 225 seconds. +func (t *Token) IsValid() bool { + return t.isValidWithEarlyExpiry(defaultExpiryDelta) +} + +// MetadataString is a convenience method for accessing string values in the +// token's metadata. Returns an empty string if the metadata is nil or the value +// for the given key cannot be cast to a string. +func (t *Token) MetadataString(k string) string { + if t.Metadata == nil { + return "" + } + s, ok := t.Metadata[k].(string) + if !ok { + return "" + } + return s +} + +func (t *Token) isValidWithEarlyExpiry(earlyExpiry time.Duration) bool { + if t.isEmpty() { + return false + } + if t.Expiry.IsZero() { + return true + } + return !t.Expiry.Round(0).Add(-earlyExpiry).Before(timeNow()) +} + +func (t *Token) isEmpty() bool { + return t == nil || t.Value == "" +} + +// Credentials holds Google credentials, including +// [Application Default Credentials]. +// +// [Application Default Credentials]: https://developers.google.com/accounts/docs/application-default-credentials +type Credentials struct { + json []byte + projectID CredentialsPropertyProvider + quotaProjectID CredentialsPropertyProvider + // universeDomain is the default service domain for a given Cloud universe. + universeDomain CredentialsPropertyProvider + + TokenProvider +} + +// JSON returns the bytes associated with the the file used to source +// credentials if one was used. +func (c *Credentials) JSON() []byte { + return c.json +} + +// ProjectID returns the associated project ID from the underlying file or +// environment. +func (c *Credentials) ProjectID(ctx context.Context) (string, error) { + if c.projectID == nil { + return internal.GetProjectID(c.json, ""), nil + } + v, err := c.projectID.GetProperty(ctx) + if err != nil { + return "", err + } + return internal.GetProjectID(c.json, v), nil +} + +// QuotaProjectID returns the associated quota project ID from the underlying +// file or environment. +func (c *Credentials) QuotaProjectID(ctx context.Context) (string, error) { + if c.quotaProjectID == nil { + return internal.GetQuotaProject(c.json, ""), nil + } + v, err := c.quotaProjectID.GetProperty(ctx) + if err != nil { + return "", err + } + return internal.GetQuotaProject(c.json, v), nil +} + +// UniverseDomain returns the default service domain for a given Cloud universe. +// The default value is "googleapis.com". +func (c *Credentials) UniverseDomain(ctx context.Context) (string, error) { + if c.universeDomain == nil { + return universeDomainDefault, nil + } + v, err := c.universeDomain.GetProperty(ctx) + if err != nil { + return "", err + } + if v == "" { + return universeDomainDefault, nil + } + return v, err +} + +// CredentialsPropertyProvider provides an implementation to fetch a property +// value for [Credentials]. +type CredentialsPropertyProvider interface { + GetProperty(context.Context) (string, error) +} + +// CredentialsPropertyFunc is a type adapter to allow the use of ordinary +// functions as a [CredentialsPropertyProvider]. +type CredentialsPropertyFunc func(context.Context) (string, error) + +// GetProperty loads the properly value provided the given context. +func (p CredentialsPropertyFunc) GetProperty(ctx context.Context) (string, error) { + return p(ctx) +} + +// CredentialsOptions are used to configure [Credentials]. +type CredentialsOptions struct { + // TokenProvider is a means of sourcing a token for the credentials. Required. + TokenProvider TokenProvider + // JSON is the raw contents of the credentials file if sourced from a file. + JSON []byte + // ProjectIDProvider resolves the project ID associated with the + // credentials. + ProjectIDProvider CredentialsPropertyProvider + // QuotaProjectIDProvider resolves the quota project ID associated with the + // credentials. + QuotaProjectIDProvider CredentialsPropertyProvider + // UniverseDomainProvider resolves the universe domain with the credentials. + UniverseDomainProvider CredentialsPropertyProvider +} + +// NewCredentials returns new [Credentials] from the provided options. Most users +// will want to build this object a function from the +// [cloud.google.com/go/auth/credentials] package. +func NewCredentials(opts *CredentialsOptions) *Credentials { + creds := &Credentials{ + TokenProvider: opts.TokenProvider, + json: opts.JSON, + projectID: opts.ProjectIDProvider, + quotaProjectID: opts.QuotaProjectIDProvider, + universeDomain: opts.UniverseDomainProvider, + } + + return creds +} + +// CachedTokenProviderOptions provided options for configuring a +// CachedTokenProvider. +type CachedTokenProviderOptions struct { + // DisableAutoRefresh makes the TokenProvider always return the same token, + // even if it is expired. The default is false. Optional. + DisableAutoRefresh bool + // ExpireEarly configures the amount of time before a token expires, that it + // should be refreshed. If unset, the default value is 3 minutes and 45 + // seconds. Optional. + ExpireEarly time.Duration + // DisableAsyncRefresh configures a synchronous workflow that refreshes + // stale tokens while blocking. The default is false. Optional. + DisableAsyncRefresh bool +} + +func (ctpo *CachedTokenProviderOptions) autoRefresh() bool { + if ctpo == nil { + return true + } + return !ctpo.DisableAutoRefresh +} + +func (ctpo *CachedTokenProviderOptions) expireEarly() time.Duration { + if ctpo == nil || ctpo.ExpireEarly == 0 { + return defaultExpiryDelta + } + return ctpo.ExpireEarly +} + +func (ctpo *CachedTokenProviderOptions) blockingRefresh() bool { + if ctpo == nil { + return false + } + return ctpo.DisableAsyncRefresh +} + +// NewCachedTokenProvider wraps a [TokenProvider] to cache the tokens returned +// by the underlying provider. By default it will refresh tokens asynchronously +// (non-blocking mode) within a window that starts 3 minutes and 45 seconds +// before they expire. The asynchronous (non-blocking) refresh can be changed to +// a synchronous (blocking) refresh using the +// CachedTokenProviderOptions.DisableAsyncRefresh option. The time-before-expiry +// duration can be configured using the CachedTokenProviderOptions.ExpireEarly +// option. +func NewCachedTokenProvider(tp TokenProvider, opts *CachedTokenProviderOptions) TokenProvider { + if ctp, ok := tp.(*cachedTokenProvider); ok { + return ctp + } + return &cachedTokenProvider{ + tp: tp, + autoRefresh: opts.autoRefresh(), + expireEarly: opts.expireEarly(), + blockingRefresh: opts.blockingRefresh(), + } +} + +type cachedTokenProvider struct { + tp TokenProvider + autoRefresh bool + expireEarly time.Duration + blockingRefresh bool + + mu sync.Mutex + cachedToken *Token + // isRefreshRunning ensures that the non-blocking refresh will only be + // attempted once, even if multiple callers enter the Token method. + isRefreshRunning bool + // isRefreshErr ensures that the non-blocking refresh will only be attempted + // once per refresh window if an error is encountered. + isRefreshErr bool +} + +func (c *cachedTokenProvider) Token(ctx context.Context) (*Token, error) { + if c.blockingRefresh { + return c.tokenBlocking(ctx) + } + return c.tokenNonBlocking(ctx) +} + +func (c *cachedTokenProvider) tokenNonBlocking(ctx context.Context) (*Token, error) { + switch c.tokenState() { + case fresh: + c.mu.Lock() + defer c.mu.Unlock() + return c.cachedToken, nil + case stale: + // Call tokenAsync with a new Context because the user-provided context + // may have a short timeout incompatible with async token refresh. + c.tokenAsync(context.Background()) + // Return the stale token immediately to not block customer requests to Cloud services. + c.mu.Lock() + defer c.mu.Unlock() + return c.cachedToken, nil + default: // invalid + return c.tokenBlocking(ctx) + } +} + +// tokenState reports the token's validity. +func (c *cachedTokenProvider) tokenState() tokenState { + c.mu.Lock() + defer c.mu.Unlock() + t := c.cachedToken + if t == nil || t.Value == "" { + return invalid + } else if t.Expiry.IsZero() { + return fresh + } else if timeNow().After(t.Expiry.Round(0)) { + return invalid + } else if timeNow().After(t.Expiry.Round(0).Add(-c.expireEarly)) { + return stale + } + return fresh +} + +// tokenAsync uses a bool to ensure that only one non-blocking token refresh +// happens at a time, even if multiple callers have entered this function +// concurrently. This avoids creating an arbitrary number of concurrent +// goroutines. Retries should be attempted and managed within the Token method. +// If the refresh attempt fails, no further attempts are made until the refresh +// window expires and the token enters the invalid state, at which point the +// blocking call to Token should likely return the same error on the main goroutine. +func (c *cachedTokenProvider) tokenAsync(ctx context.Context) { + fn := func() { + c.mu.Lock() + c.isRefreshRunning = true + c.mu.Unlock() + t, err := c.tp.Token(ctx) + c.mu.Lock() + defer c.mu.Unlock() + c.isRefreshRunning = false + if err != nil { + // Discard errors from the non-blocking refresh, but prevent further + // attempts. + c.isRefreshErr = true + return + } + c.cachedToken = t + } + c.mu.Lock() + defer c.mu.Unlock() + if !c.isRefreshRunning && !c.isRefreshErr { + go fn() + } +} + +func (c *cachedTokenProvider) tokenBlocking(ctx context.Context) (*Token, error) { + c.mu.Lock() + defer c.mu.Unlock() + c.isRefreshErr = false + if c.cachedToken.IsValid() || (!c.autoRefresh && !c.cachedToken.isEmpty()) { + return c.cachedToken, nil + } + t, err := c.tp.Token(ctx) + if err != nil { + return nil, err + } + c.cachedToken = t + return t, nil +} + +// Error is a error associated with retrieving a [Token]. It can hold useful +// additional details for debugging. +type Error struct { + // Response is the HTTP response associated with error. The body will always + // be already closed and consumed. + Response *http.Response + // Body is the HTTP response body. + Body []byte + // Err is the underlying wrapped error. + Err error + + // code returned in the token response + code string + // description returned in the token response + description string + // uri returned in the token response + uri string +} + +func (e *Error) Error() string { + if e.code != "" { + s := fmt.Sprintf("auth: %q", e.code) + if e.description != "" { + s += fmt.Sprintf(" %q", e.description) + } + if e.uri != "" { + s += fmt.Sprintf(" %q", e.uri) + } + return s + } + return fmt.Sprintf("auth: cannot fetch token: %v\nResponse: %s", e.Response.StatusCode, e.Body) +} + +// Temporary returns true if the error is considered temporary and may be able +// to be retried. +func (e *Error) Temporary() bool { + if e.Response == nil { + return false + } + sc := e.Response.StatusCode + return sc == http.StatusInternalServerError || sc == http.StatusServiceUnavailable || sc == http.StatusRequestTimeout || sc == http.StatusTooManyRequests +} + +func (e *Error) Unwrap() error { + return e.Err +} + +// Style describes how the token endpoint wants to receive the ClientID and +// ClientSecret. +type Style int + +const ( + // StyleUnknown means the value has not been initiated. Sending this in + // a request will cause the token exchange to fail. + StyleUnknown Style = iota + // StyleInParams sends client info in the body of a POST request. + StyleInParams + // StyleInHeader sends client info using Basic Authorization header. + StyleInHeader +) + +// Options2LO is the configuration settings for doing a 2-legged JWT OAuth2 flow. +type Options2LO struct { + // Email is the OAuth2 client ID. This value is set as the "iss" in the + // JWT. + Email string + // PrivateKey contains the contents of an RSA private key or the + // contents of a PEM file that contains a private key. It is used to sign + // the JWT created. + PrivateKey []byte + // TokenURL is th URL the JWT is sent to. Required. + TokenURL string + // PrivateKeyID is the ID of the key used to sign the JWT. It is used as the + // "kid" in the JWT header. Optional. + PrivateKeyID string + // Subject is the used for to impersonate a user. It is used as the "sub" in + // the JWT.m Optional. + Subject string + // Scopes specifies requested permissions for the token. Optional. + Scopes []string + // Expires specifies the lifetime of the token. Optional. + Expires time.Duration + // Audience specifies the "aud" in the JWT. Optional. + Audience string + // PrivateClaims allows specifying any custom claims for the JWT. Optional. + PrivateClaims map[string]interface{} + + // Client is the client to be used to make the underlying token requests. + // Optional. + Client *http.Client + // UseIDToken requests that the token returned be an ID token if one is + // returned from the server. Optional. + UseIDToken bool +} + +func (o *Options2LO) client() *http.Client { + if o.Client != nil { + return o.Client + } + return internal.DefaultClient() +} + +func (o *Options2LO) validate() error { + if o == nil { + return errors.New("auth: options must be provided") + } + if o.Email == "" { + return errors.New("auth: email must be provided") + } + if len(o.PrivateKey) == 0 { + return errors.New("auth: private key must be provided") + } + if o.TokenURL == "" { + return errors.New("auth: token URL must be provided") + } + return nil +} + +// New2LOTokenProvider returns a [TokenProvider] from the provided options. +func New2LOTokenProvider(opts *Options2LO) (TokenProvider, error) { + if err := opts.validate(); err != nil { + return nil, err + } + return tokenProvider2LO{opts: opts, Client: opts.client()}, nil +} + +type tokenProvider2LO struct { + opts *Options2LO + Client *http.Client +} + +func (tp tokenProvider2LO) Token(ctx context.Context) (*Token, error) { + pk, err := internal.ParseKey(tp.opts.PrivateKey) + if err != nil { + return nil, err + } + claimSet := &jwt.Claims{ + Iss: tp.opts.Email, + Scope: strings.Join(tp.opts.Scopes, " "), + Aud: tp.opts.TokenURL, + AdditionalClaims: tp.opts.PrivateClaims, + Sub: tp.opts.Subject, + } + if t := tp.opts.Expires; t > 0 { + claimSet.Exp = time.Now().Add(t).Unix() + } + if aud := tp.opts.Audience; aud != "" { + claimSet.Aud = aud + } + h := *defaultHeader + h.KeyID = tp.opts.PrivateKeyID + payload, err := jwt.EncodeJWS(&h, claimSet, pk) + if err != nil { + return nil, err + } + v := url.Values{} + v.Set("grant_type", defaultGrantType) + v.Set("assertion", payload) + req, err := http.NewRequestWithContext(ctx, "POST", tp.opts.TokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + resp, body, err := internal.DoRequest(tp.Client, req) + if err != nil { + return nil, fmt.Errorf("auth: cannot fetch token: %w", err) + } + if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { + return nil, &Error{ + Response: resp, + Body: body, + } + } + // tokenRes is the JSON response body. + var tokenRes struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + IDToken string `json:"id_token"` + ExpiresIn int64 `json:"expires_in"` + } + if err := json.Unmarshal(body, &tokenRes); err != nil { + return nil, fmt.Errorf("auth: cannot fetch token: %w", err) + } + token := &Token{ + Value: tokenRes.AccessToken, + Type: tokenRes.TokenType, + } + token.Metadata = make(map[string]interface{}) + json.Unmarshal(body, &token.Metadata) // no error checks for optional fields + + if secs := tokenRes.ExpiresIn; secs > 0 { + token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) + } + if v := tokenRes.IDToken; v != "" { + // decode returned id token to get expiry + claimSet, err := jwt.DecodeJWS(v) + if err != nil { + return nil, fmt.Errorf("auth: error decoding JWT token: %w", err) + } + token.Expiry = time.Unix(claimSet.Exp, 0) + } + if tp.opts.UseIDToken { + if tokenRes.IDToken == "" { + return nil, fmt.Errorf("auth: response doesn't have JWT token") + } + token.Value = tokenRes.IDToken + } + return token, nil +} diff --git a/vendor/cloud.google.com/go/auth/credentials/compute.go b/vendor/cloud.google.com/go/auth/credentials/compute.go new file mode 100644 index 000000000..6f70fa353 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/compute.go @@ -0,0 +1,86 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credentials + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/url" + "strings" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/compute/metadata" +) + +var ( + computeTokenMetadata = map[string]interface{}{ + "auth.google.tokenSource": "compute-metadata", + "auth.google.serviceAccount": "default", + } + computeTokenURI = "instance/service-accounts/default/token" +) + +// computeTokenProvider creates a [cloud.google.com/go/auth.TokenProvider] that +// uses the metadata service to retrieve tokens. +func computeTokenProvider(opts *DetectOptions) auth.TokenProvider { + return auth.NewCachedTokenProvider(computeProvider{scopes: opts.Scopes}, &auth.CachedTokenProviderOptions{ + ExpireEarly: opts.EarlyTokenRefresh, + DisableAsyncRefresh: opts.DisableAsyncRefresh, + }) +} + +// computeProvider fetches tokens from the google cloud metadata service. +type computeProvider struct { + scopes []string +} + +type metadataTokenResp struct { + AccessToken string `json:"access_token"` + ExpiresInSec int `json:"expires_in"` + TokenType string `json:"token_type"` +} + +func (cs computeProvider) Token(ctx context.Context) (*auth.Token, error) { + tokenURI, err := url.Parse(computeTokenURI) + if err != nil { + return nil, err + } + if len(cs.scopes) > 0 { + v := url.Values{} + v.Set("scopes", strings.Join(cs.scopes, ",")) + tokenURI.RawQuery = v.Encode() + } + tokenJSON, err := metadata.GetWithContext(ctx, tokenURI.String()) + if err != nil { + return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) + } + var res metadataTokenResp + if err := json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res); err != nil { + return nil, fmt.Errorf("credentials: invalid token JSON from metadata: %w", err) + } + if res.ExpiresInSec == 0 || res.AccessToken == "" { + return nil, errors.New("credentials: incomplete token received from metadata") + } + return &auth.Token{ + Value: res.AccessToken, + Type: res.TokenType, + Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), + Metadata: computeTokenMetadata, + }, nil + +} diff --git a/vendor/cloud.google.com/go/auth/credentials/detect.go b/vendor/cloud.google.com/go/auth/credentials/detect.go new file mode 100644 index 000000000..010afc37c --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/detect.go @@ -0,0 +1,262 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credentials + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" + "cloud.google.com/go/compute/metadata" +) + +const ( + // jwtTokenURL is Google's OAuth 2.0 token URL to use with the JWT(2LO) flow. + jwtTokenURL = "https://oauth2.googleapis.com/token" + + // Google's OAuth 2.0 default endpoints. + googleAuthURL = "https://accounts.google.com/o/oauth2/auth" + googleTokenURL = "https://oauth2.googleapis.com/token" + + // GoogleMTLSTokenURL is Google's default OAuth2.0 mTLS endpoint. + GoogleMTLSTokenURL = "https://oauth2.mtls.googleapis.com/token" + + // Help on default credentials + adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc" +) + +var ( + // for testing + allowOnGCECheck = true +) + +// OnGCE reports whether this process is running in Google Cloud. +func OnGCE() bool { + // TODO(codyoss): once all libs use this auth lib move metadata check here + return allowOnGCECheck && metadata.OnGCE() +} + +// DetectDefault searches for "Application Default Credentials" and returns +// a credential based on the [DetectOptions] provided. +// +// It looks for credentials in the following places, preferring the first +// location found: +// +// - A JSON file whose path is specified by the GOOGLE_APPLICATION_CREDENTIALS +// environment variable. For workload identity federation, refer to +// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation +// on how to generate the JSON configuration file for on-prem/non-Google +// cloud platforms. +// - A JSON file in a location known to the gcloud command-line tool. On +// Windows, this is %APPDATA%/gcloud/application_default_credentials.json. On +// other systems, $HOME/.config/gcloud/application_default_credentials.json. +// - On Google Compute Engine, Google App Engine standard second generation +// runtimes, and Google App Engine flexible environment, it fetches +// credentials from the metadata server. +func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) { + if err := opts.validate(); err != nil { + return nil, err + } + if len(opts.CredentialsJSON) > 0 { + return readCredentialsFileJSON(opts.CredentialsJSON, opts) + } + if opts.CredentialsFile != "" { + return readCredentialsFile(opts.CredentialsFile, opts) + } + if filename := os.Getenv(credsfile.GoogleAppCredsEnvVar); filename != "" { + creds, err := readCredentialsFile(filename, opts) + if err != nil { + return nil, err + } + return creds, nil + } + + fileName := credsfile.GetWellKnownFileName() + if b, err := os.ReadFile(fileName); err == nil { + return readCredentialsFileJSON(b, opts) + } + + if OnGCE() { + return auth.NewCredentials(&auth.CredentialsOptions{ + TokenProvider: computeTokenProvider(opts), + ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { + return metadata.ProjectIDWithContext(ctx) + }), + UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{}, + }), nil + } + + return nil, fmt.Errorf("credentials: could not find default credentials. See %v for more information", adcSetupURL) +} + +// DetectOptions provides configuration for [DetectDefault]. +type DetectOptions struct { + // Scopes that credentials tokens should have. Example: + // https://www.googleapis.com/auth/cloud-platform. Required if Audience is + // not provided. + Scopes []string + // Audience that credentials tokens should have. Only applicable for 2LO + // flows with service accounts. If specified, scopes should not be provided. + Audience string + // Subject is the user email used for [domain wide delegation](https://developers.google.com/identity/protocols/oauth2/service-account#delegatingauthority). + // Optional. + Subject string + // EarlyTokenRefresh configures how early before a token expires that it + // should be refreshed. Once the token’s time until expiration has entered + // this refresh window the token is considered valid but stale. If unset, + // the default value is 3 minutes and 45 seconds. Optional. + EarlyTokenRefresh time.Duration + // DisableAsyncRefresh configures a synchronous workflow that refreshes + // stale tokens while blocking. The default is false. Optional. + DisableAsyncRefresh bool + // AuthHandlerOptions configures an authorization handler and other options + // for 3LO flows. It is required, and only used, for client credential + // flows. + AuthHandlerOptions *auth.AuthorizationHandlerOptions + // TokenURL allows to set the token endpoint for user credential flows. If + // unset the default value is: https://oauth2.googleapis.com/token. + // Optional. + TokenURL string + // STSAudience is the audience sent to when retrieving an STS token. + // Currently this only used for GDCH auth flow, for which it is required. + STSAudience string + // CredentialsFile overrides detection logic and sources a credential file + // from the provided filepath. If provided, CredentialsJSON must not be. + // Optional. + CredentialsFile string + // CredentialsJSON overrides detection logic and uses the JSON bytes as the + // source for the credential. If provided, CredentialsFile must not be. + // Optional. + CredentialsJSON []byte + // UseSelfSignedJWT directs service account based credentials to create a + // self-signed JWT with the private key found in the file, skipping any + // network requests that would normally be made. Optional. + UseSelfSignedJWT bool + // Client configures the underlying client used to make network requests + // when fetching tokens. Optional. + Client *http.Client + // UniverseDomain is the default service domain for a given Cloud universe. + // The default value is "googleapis.com". This option is ignored for + // authentication flows that do not support universe domain. Optional. + UniverseDomain string +} + +func (o *DetectOptions) validate() error { + if o == nil { + return errors.New("credentials: options must be provided") + } + if len(o.Scopes) > 0 && o.Audience != "" { + return errors.New("credentials: both scopes and audience were provided") + } + if len(o.CredentialsJSON) > 0 && o.CredentialsFile != "" { + return errors.New("credentials: both credentials file and JSON were provided") + } + return nil +} + +func (o *DetectOptions) tokenURL() string { + if o.TokenURL != "" { + return o.TokenURL + } + return googleTokenURL +} + +func (o *DetectOptions) scopes() []string { + scopes := make([]string, len(o.Scopes)) + copy(scopes, o.Scopes) + return scopes +} + +func (o *DetectOptions) client() *http.Client { + if o.Client != nil { + return o.Client + } + return internal.DefaultClient() +} + +func readCredentialsFile(filename string, opts *DetectOptions) (*auth.Credentials, error) { + b, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + return readCredentialsFileJSON(b, opts) +} + +func readCredentialsFileJSON(b []byte, opts *DetectOptions) (*auth.Credentials, error) { + // attempt to parse jsonData as a Google Developers Console client_credentials.json. + config := clientCredConfigFromJSON(b, opts) + if config != nil { + if config.AuthHandlerOpts == nil { + return nil, errors.New("credentials: auth handler must be specified for this credential filetype") + } + tp, err := auth.New3LOTokenProvider(config) + if err != nil { + return nil, err + } + return auth.NewCredentials(&auth.CredentialsOptions{ + TokenProvider: tp, + JSON: b, + }), nil + } + return fileCredentials(b, opts) +} + +func clientCredConfigFromJSON(b []byte, opts *DetectOptions) *auth.Options3LO { + var creds credsfile.ClientCredentialsFile + var c *credsfile.Config3LO + if err := json.Unmarshal(b, &creds); err != nil { + return nil + } + switch { + case creds.Web != nil: + c = creds.Web + case creds.Installed != nil: + c = creds.Installed + default: + return nil + } + if len(c.RedirectURIs) < 1 { + return nil + } + var handleOpts *auth.AuthorizationHandlerOptions + if opts.AuthHandlerOptions != nil { + handleOpts = &auth.AuthorizationHandlerOptions{ + Handler: opts.AuthHandlerOptions.Handler, + State: opts.AuthHandlerOptions.State, + PKCEOpts: opts.AuthHandlerOptions.PKCEOpts, + } + } + return &auth.Options3LO{ + ClientID: c.ClientID, + ClientSecret: c.ClientSecret, + RedirectURL: c.RedirectURIs[0], + Scopes: opts.scopes(), + AuthURL: c.AuthURI, + TokenURL: c.TokenURI, + Client: opts.client(), + EarlyTokenExpiry: opts.EarlyTokenRefresh, + AuthHandlerOpts: handleOpts, + // TODO(codyoss): refactor this out. We need to add in auto-detection + // for this use case. + AuthStyle: auth.StyleInParams, + } +} diff --git a/vendor/cloud.google.com/go/auth/credentials/doc.go b/vendor/cloud.google.com/go/auth/credentials/doc.go new file mode 100644 index 000000000..1dbb2866b --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/doc.go @@ -0,0 +1,45 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package credentials provides support for making OAuth2 authorized and +// authenticated HTTP requests to Google APIs. It supports the Web server flow, +// client-side credentials, service accounts, Google Compute Engine service +// accounts, Google App Engine service accounts and workload identity federation +// from non-Google cloud platforms. +// +// A brief overview of the package follows. For more information, please read +// https://developers.google.com/accounts/docs/OAuth2 +// and +// https://developers.google.com/accounts/docs/application-default-credentials. +// For more information on using workload identity federation, refer to +// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation. +// +// # Credentials +// +// The [cloud.google.com/go/auth.Credentials] type represents Google +// credentials, including Application Default Credentials. +// +// Use [DetectDefault] to obtain Application Default Credentials. +// +// Application Default Credentials support workload identity federation to +// access Google Cloud resources from non-Google Cloud platforms including Amazon +// Web Services (AWS), Microsoft Azure or any identity provider that supports +// OpenID Connect (OIDC). Workload identity federation is recommended for +// non-Google Cloud environments as it avoids the need to download, manage, and +// store service account private keys locally. +// +// # Workforce Identity Federation +// +// For more information on this feature see [cloud.google.com/go/auth/credentials/externalaccount]. +package credentials diff --git a/vendor/cloud.google.com/go/auth/credentials/filetypes.go b/vendor/cloud.google.com/go/auth/credentials/filetypes.go new file mode 100644 index 000000000..6591b1811 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/filetypes.go @@ -0,0 +1,225 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credentials + +import ( + "errors" + "fmt" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials/internal/externalaccount" + "cloud.google.com/go/auth/credentials/internal/externalaccountuser" + "cloud.google.com/go/auth/credentials/internal/gdch" + "cloud.google.com/go/auth/credentials/internal/impersonate" + internalauth "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" +) + +func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { + fileType, err := credsfile.ParseFileType(b) + if err != nil { + return nil, err + } + + var projectID, universeDomain string + var tp auth.TokenProvider + switch fileType { + case credsfile.ServiceAccountKey: + f, err := credsfile.ParseServiceAccount(b) + if err != nil { + return nil, err + } + tp, err = handleServiceAccount(f, opts) + if err != nil { + return nil, err + } + projectID = f.ProjectID + universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) + case credsfile.UserCredentialsKey: + f, err := credsfile.ParseUserCredentials(b) + if err != nil { + return nil, err + } + tp, err = handleUserCredential(f, opts) + if err != nil { + return nil, err + } + universeDomain = f.UniverseDomain + case credsfile.ExternalAccountKey: + f, err := credsfile.ParseExternalAccount(b) + if err != nil { + return nil, err + } + tp, err = handleExternalAccount(f, opts) + if err != nil { + return nil, err + } + universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) + case credsfile.ExternalAccountAuthorizedUserKey: + f, err := credsfile.ParseExternalAccountAuthorizedUser(b) + if err != nil { + return nil, err + } + tp, err = handleExternalAccountAuthorizedUser(f, opts) + if err != nil { + return nil, err + } + universeDomain = f.UniverseDomain + case credsfile.ImpersonatedServiceAccountKey: + f, err := credsfile.ParseImpersonatedServiceAccount(b) + if err != nil { + return nil, err + } + tp, err = handleImpersonatedServiceAccount(f, opts) + if err != nil { + return nil, err + } + universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) + case credsfile.GDCHServiceAccountKey: + f, err := credsfile.ParseGDCHServiceAccount(b) + if err != nil { + return nil, err + } + tp, err = handleGDCHServiceAccount(f, opts) + if err != nil { + return nil, err + } + projectID = f.Project + universeDomain = f.UniverseDomain + default: + return nil, fmt.Errorf("credentials: unsupported filetype %q", fileType) + } + return auth.NewCredentials(&auth.CredentialsOptions{ + TokenProvider: auth.NewCachedTokenProvider(tp, &auth.CachedTokenProviderOptions{ + ExpireEarly: opts.EarlyTokenRefresh, + }), + JSON: b, + ProjectIDProvider: internalauth.StaticCredentialsProperty(projectID), + // TODO(codyoss): only set quota project here if there was a user override + UniverseDomainProvider: internalauth.StaticCredentialsProperty(universeDomain), + }), nil +} + +// resolveUniverseDomain returns optsUniverseDomain if non-empty, in order to +// support configuring universe-specific credentials in code. Auth flows +// unsupported for universe domain should not use this func, but should instead +// simply set the file universe domain on the credentials. +func resolveUniverseDomain(optsUniverseDomain, fileUniverseDomain string) string { + if optsUniverseDomain != "" { + return optsUniverseDomain + } + return fileUniverseDomain +} + +func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + ud := resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) + if opts.UseSelfSignedJWT { + return configureSelfSignedJWT(f, opts) + } else if ud != "" && ud != internalauth.DefaultUniverseDomain { + // For non-GDU universe domains, token exchange is impossible and services + // must support self-signed JWTs. + opts.UseSelfSignedJWT = true + return configureSelfSignedJWT(f, opts) + } + opts2LO := &auth.Options2LO{ + Email: f.ClientEmail, + PrivateKey: []byte(f.PrivateKey), + PrivateKeyID: f.PrivateKeyID, + Scopes: opts.scopes(), + TokenURL: f.TokenURL, + Subject: opts.Subject, + Client: opts.client(), + } + if opts2LO.TokenURL == "" { + opts2LO.TokenURL = jwtTokenURL + } + return auth.New2LOTokenProvider(opts2LO) +} + +func handleUserCredential(f *credsfile.UserCredentialsFile, opts *DetectOptions) (auth.TokenProvider, error) { + opts3LO := &auth.Options3LO{ + ClientID: f.ClientID, + ClientSecret: f.ClientSecret, + Scopes: opts.scopes(), + AuthURL: googleAuthURL, + TokenURL: opts.tokenURL(), + AuthStyle: auth.StyleInParams, + EarlyTokenExpiry: opts.EarlyTokenRefresh, + RefreshToken: f.RefreshToken, + Client: opts.client(), + } + return auth.New3LOTokenProvider(opts3LO) +} + +func handleExternalAccount(f *credsfile.ExternalAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + externalOpts := &externalaccount.Options{ + Audience: f.Audience, + SubjectTokenType: f.SubjectTokenType, + TokenURL: f.TokenURL, + TokenInfoURL: f.TokenInfoURL, + ServiceAccountImpersonationURL: f.ServiceAccountImpersonationURL, + ClientSecret: f.ClientSecret, + ClientID: f.ClientID, + CredentialSource: f.CredentialSource, + QuotaProjectID: f.QuotaProjectID, + Scopes: opts.scopes(), + WorkforcePoolUserProject: f.WorkforcePoolUserProject, + Client: opts.client(), + IsDefaultClient: opts.Client == nil, + } + if f.ServiceAccountImpersonation != nil { + externalOpts.ServiceAccountImpersonationLifetimeSeconds = f.ServiceAccountImpersonation.TokenLifetimeSeconds + } + return externalaccount.NewTokenProvider(externalOpts) +} + +func handleExternalAccountAuthorizedUser(f *credsfile.ExternalAccountAuthorizedUserFile, opts *DetectOptions) (auth.TokenProvider, error) { + externalOpts := &externalaccountuser.Options{ + Audience: f.Audience, + RefreshToken: f.RefreshToken, + TokenURL: f.TokenURL, + TokenInfoURL: f.TokenInfoURL, + ClientID: f.ClientID, + ClientSecret: f.ClientSecret, + Scopes: opts.scopes(), + Client: opts.client(), + } + return externalaccountuser.NewTokenProvider(externalOpts) +} + +func handleImpersonatedServiceAccount(f *credsfile.ImpersonatedServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + if f.ServiceAccountImpersonationURL == "" || f.CredSource == nil { + return nil, errors.New("missing 'source_credentials' field or 'service_account_impersonation_url' in credentials") + } + + tp, err := fileCredentials(f.CredSource, opts) + if err != nil { + return nil, err + } + return impersonate.NewTokenProvider(&impersonate.Options{ + URL: f.ServiceAccountImpersonationURL, + Scopes: opts.scopes(), + Tp: tp, + Delegates: f.Delegates, + Client: opts.client(), + }) +} + +func handleGDCHServiceAccount(f *credsfile.GDCHServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + return gdch.NewTokenProvider(f, &gdch.Options{ + STSAudience: opts.STSAudience, + Client: opts.client(), + }) +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go new file mode 100644 index 000000000..d8b5d4fde --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go @@ -0,0 +1,520 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "bytes" + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "path" + "sort" + "strings" + "time" + + "cloud.google.com/go/auth/internal" +) + +var ( + // getenv aliases os.Getenv for testing + getenv = os.Getenv +) + +const ( + // AWS Signature Version 4 signing algorithm identifier. + awsAlgorithm = "AWS4-HMAC-SHA256" + + // The termination string for the AWS credential scope value as defined in + // https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html + awsRequestType = "aws4_request" + + // The AWS authorization header name for the security session token if available. + awsSecurityTokenHeader = "x-amz-security-token" + + // The name of the header containing the session token for metadata endpoint calls + awsIMDSv2SessionTokenHeader = "X-aws-ec2-metadata-token" + + awsIMDSv2SessionTTLHeader = "X-aws-ec2-metadata-token-ttl-seconds" + + awsIMDSv2SessionTTL = "300" + + // The AWS authorization header name for the auto-generated date. + awsDateHeader = "x-amz-date" + + defaultRegionalCredentialVerificationURL = "https://sts.{region}.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15" + + // Supported AWS configuration environment variables. + awsAccessKeyIDEnvVar = "AWS_ACCESS_KEY_ID" + awsDefaultRegionEnvVar = "AWS_DEFAULT_REGION" + awsRegionEnvVar = "AWS_REGION" + awsSecretAccessKeyEnvVar = "AWS_SECRET_ACCESS_KEY" + awsSessionTokenEnvVar = "AWS_SESSION_TOKEN" + + awsTimeFormatLong = "20060102T150405Z" + awsTimeFormatShort = "20060102" + awsProviderType = "aws" +) + +type awsSubjectProvider struct { + EnvironmentID string + RegionURL string + RegionalCredVerificationURL string + CredVerificationURL string + IMDSv2SessionTokenURL string + TargetResource string + requestSigner *awsRequestSigner + region string + securityCredentialsProvider AwsSecurityCredentialsProvider + reqOpts *RequestOptions + + Client *http.Client +} + +func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error) { + // Set Defaults + if sp.RegionalCredVerificationURL == "" { + sp.RegionalCredVerificationURL = defaultRegionalCredentialVerificationURL + } + headers := make(map[string]string) + if sp.shouldUseMetadataServer() { + awsSessionToken, err := sp.getAWSSessionToken(ctx) + if err != nil { + return "", err + } + + if awsSessionToken != "" { + headers[awsIMDSv2SessionTokenHeader] = awsSessionToken + } + } + + awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers) + if err != nil { + return "", err + } + if sp.region, err = sp.getRegion(ctx, headers); err != nil { + return "", err + } + sp.requestSigner = &awsRequestSigner{ + RegionName: sp.region, + AwsSecurityCredentials: awsSecurityCredentials, + } + + // Generate the signed request to AWS STS GetCallerIdentity API. + // Use the required regional endpoint. Otherwise, the request will fail. + req, err := http.NewRequestWithContext(ctx, "POST", strings.Replace(sp.RegionalCredVerificationURL, "{region}", sp.region, 1), nil) + if err != nil { + return "", err + } + // The full, canonical resource name of the workload identity pool + // provider, with or without the HTTPS prefix. + // Including this header as part of the signature is recommended to + // ensure data integrity. + if sp.TargetResource != "" { + req.Header.Set("x-goog-cloud-target-resource", sp.TargetResource) + } + sp.requestSigner.signRequest(req) + + /* + The GCP STS endpoint expects the headers to be formatted as: + # [ + # {key: 'x-amz-date', value: '...'}, + # {key: 'Authorization', value: '...'}, + # ... + # ] + # And then serialized as: + # quote(json.dumps({ + # url: '...', + # method: 'POST', + # headers: [{key: 'x-amz-date', value: '...'}, ...] + # })) + */ + + awsSignedReq := awsRequest{ + URL: req.URL.String(), + Method: "POST", + } + for headerKey, headerList := range req.Header { + for _, headerValue := range headerList { + awsSignedReq.Headers = append(awsSignedReq.Headers, awsRequestHeader{ + Key: headerKey, + Value: headerValue, + }) + } + } + sort.Slice(awsSignedReq.Headers, func(i, j int) bool { + headerCompare := strings.Compare(awsSignedReq.Headers[i].Key, awsSignedReq.Headers[j].Key) + if headerCompare == 0 { + return strings.Compare(awsSignedReq.Headers[i].Value, awsSignedReq.Headers[j].Value) < 0 + } + return headerCompare < 0 + }) + + result, err := json.Marshal(awsSignedReq) + if err != nil { + return "", err + } + return url.QueryEscape(string(result)), nil +} + +func (sp *awsSubjectProvider) providerType() string { + if sp.securityCredentialsProvider != nil { + return programmaticProviderType + } + return awsProviderType +} + +func (sp *awsSubjectProvider) getAWSSessionToken(ctx context.Context) (string, error) { + if sp.IMDSv2SessionTokenURL == "" { + return "", nil + } + req, err := http.NewRequestWithContext(ctx, "PUT", sp.IMDSv2SessionTokenURL, nil) + if err != nil { + return "", err + } + req.Header.Set(awsIMDSv2SessionTTLHeader, awsIMDSv2SessionTTL) + + resp, body, err := internal.DoRequest(sp.Client, req) + if err != nil { + return "", err + } + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("credentials: unable to retrieve AWS session token: %s", body) + } + return string(body), nil +} + +func (sp *awsSubjectProvider) getRegion(ctx context.Context, headers map[string]string) (string, error) { + if sp.securityCredentialsProvider != nil { + return sp.securityCredentialsProvider.AwsRegion(ctx, sp.reqOpts) + } + if canRetrieveRegionFromEnvironment() { + if envAwsRegion := getenv(awsRegionEnvVar); envAwsRegion != "" { + return envAwsRegion, nil + } + return getenv(awsDefaultRegionEnvVar), nil + } + + if sp.RegionURL == "" { + return "", errors.New("credentials: unable to determine AWS region") + } + + req, err := http.NewRequestWithContext(ctx, "GET", sp.RegionURL, nil) + if err != nil { + return "", err + } + + for name, value := range headers { + req.Header.Add(name, value) + } + resp, body, err := internal.DoRequest(sp.Client, req) + if err != nil { + return "", err + } + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("credentials: unable to retrieve AWS region - %s", body) + } + + // This endpoint will return the region in format: us-east-2b. + // Only the us-east-2 part should be used. + bodyLen := len(body) + if bodyLen == 0 { + return "", nil + } + return string(body[:bodyLen-1]), nil +} + +func (sp *awsSubjectProvider) getSecurityCredentials(ctx context.Context, headers map[string]string) (result *AwsSecurityCredentials, err error) { + if sp.securityCredentialsProvider != nil { + return sp.securityCredentialsProvider.AwsSecurityCredentials(ctx, sp.reqOpts) + } + if canRetrieveSecurityCredentialFromEnvironment() { + return &AwsSecurityCredentials{ + AccessKeyID: getenv(awsAccessKeyIDEnvVar), + SecretAccessKey: getenv(awsSecretAccessKeyEnvVar), + SessionToken: getenv(awsSessionTokenEnvVar), + }, nil + } + + roleName, err := sp.getMetadataRoleName(ctx, headers) + if err != nil { + return + } + credentials, err := sp.getMetadataSecurityCredentials(ctx, roleName, headers) + if err != nil { + return + } + + if credentials.AccessKeyID == "" { + return result, errors.New("credentials: missing AccessKeyId credential") + } + if credentials.SecretAccessKey == "" { + return result, errors.New("credentials: missing SecretAccessKey credential") + } + + return credentials, nil +} + +func (sp *awsSubjectProvider) getMetadataSecurityCredentials(ctx context.Context, roleName string, headers map[string]string) (*AwsSecurityCredentials, error) { + var result *AwsSecurityCredentials + + req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/%s", sp.CredVerificationURL, roleName), nil) + if err != nil { + return result, err + } + for name, value := range headers { + req.Header.Add(name, value) + } + resp, body, err := internal.DoRequest(sp.Client, req) + if err != nil { + return result, err + } + if resp.StatusCode != http.StatusOK { + return result, fmt.Errorf("credentials: unable to retrieve AWS security credentials - %s", body) + } + if err := json.Unmarshal(body, &result); err != nil { + return nil, err + } + return result, nil +} + +func (sp *awsSubjectProvider) getMetadataRoleName(ctx context.Context, headers map[string]string) (string, error) { + if sp.CredVerificationURL == "" { + return "", errors.New("credentials: unable to determine the AWS metadata server security credentials endpoint") + } + req, err := http.NewRequestWithContext(ctx, "GET", sp.CredVerificationURL, nil) + if err != nil { + return "", err + } + for name, value := range headers { + req.Header.Add(name, value) + } + + resp, body, err := internal.DoRequest(sp.Client, req) + if err != nil { + return "", err + } + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("credentials: unable to retrieve AWS role name - %s", body) + } + return string(body), nil +} + +// awsRequestSigner is a utility class to sign http requests using a AWS V4 signature. +type awsRequestSigner struct { + RegionName string + AwsSecurityCredentials *AwsSecurityCredentials +} + +// signRequest adds the appropriate headers to an http.Request +// or returns an error if something prevented this. +func (rs *awsRequestSigner) signRequest(req *http.Request) error { + // req is assumed non-nil + signedRequest := cloneRequest(req) + timestamp := Now() + signedRequest.Header.Set("host", requestHost(req)) + if rs.AwsSecurityCredentials.SessionToken != "" { + signedRequest.Header.Set(awsSecurityTokenHeader, rs.AwsSecurityCredentials.SessionToken) + } + if signedRequest.Header.Get("date") == "" { + signedRequest.Header.Set(awsDateHeader, timestamp.Format(awsTimeFormatLong)) + } + authorizationCode, err := rs.generateAuthentication(signedRequest, timestamp) + if err != nil { + return err + } + signedRequest.Header.Set("Authorization", authorizationCode) + req.Header = signedRequest.Header + return nil +} + +func (rs *awsRequestSigner) generateAuthentication(req *http.Request, timestamp time.Time) (string, error) { + canonicalHeaderColumns, canonicalHeaderData := canonicalHeaders(req) + dateStamp := timestamp.Format(awsTimeFormatShort) + serviceName := "" + + if splitHost := strings.Split(requestHost(req), "."); len(splitHost) > 0 { + serviceName = splitHost[0] + } + credentialScope := strings.Join([]string{dateStamp, rs.RegionName, serviceName, awsRequestType}, "/") + requestString, err := canonicalRequest(req, canonicalHeaderColumns, canonicalHeaderData) + if err != nil { + return "", err + } + requestHash, err := getSha256([]byte(requestString)) + if err != nil { + return "", err + } + + stringToSign := strings.Join([]string{awsAlgorithm, timestamp.Format(awsTimeFormatLong), credentialScope, requestHash}, "\n") + signingKey := []byte("AWS4" + rs.AwsSecurityCredentials.SecretAccessKey) + for _, signingInput := range []string{ + dateStamp, rs.RegionName, serviceName, awsRequestType, stringToSign, + } { + signingKey, err = getHmacSha256(signingKey, []byte(signingInput)) + if err != nil { + return "", err + } + } + + return fmt.Sprintf("%s Credential=%s/%s, SignedHeaders=%s, Signature=%s", awsAlgorithm, rs.AwsSecurityCredentials.AccessKeyID, credentialScope, canonicalHeaderColumns, hex.EncodeToString(signingKey)), nil +} + +func getSha256(input []byte) (string, error) { + hash := sha256.New() + if _, err := hash.Write(input); err != nil { + return "", err + } + return hex.EncodeToString(hash.Sum(nil)), nil +} + +func getHmacSha256(key, input []byte) ([]byte, error) { + hash := hmac.New(sha256.New, key) + if _, err := hash.Write(input); err != nil { + return nil, err + } + return hash.Sum(nil), nil +} + +func cloneRequest(r *http.Request) *http.Request { + r2 := new(http.Request) + *r2 = *r + if r.Header != nil { + r2.Header = make(http.Header, len(r.Header)) + + // Find total number of values. + headerCount := 0 + for _, headerValues := range r.Header { + headerCount += len(headerValues) + } + copiedHeaders := make([]string, headerCount) // shared backing array for headers' values + + for headerKey, headerValues := range r.Header { + headerCount = copy(copiedHeaders, headerValues) + r2.Header[headerKey] = copiedHeaders[:headerCount:headerCount] + copiedHeaders = copiedHeaders[headerCount:] + } + } + return r2 +} + +func canonicalPath(req *http.Request) string { + result := req.URL.EscapedPath() + if result == "" { + return "/" + } + return path.Clean(result) +} + +func canonicalQuery(req *http.Request) string { + queryValues := req.URL.Query() + for queryKey := range queryValues { + sort.Strings(queryValues[queryKey]) + } + return queryValues.Encode() +} + +func canonicalHeaders(req *http.Request) (string, string) { + // Header keys need to be sorted alphabetically. + var headers []string + lowerCaseHeaders := make(http.Header) + for k, v := range req.Header { + k := strings.ToLower(k) + if _, ok := lowerCaseHeaders[k]; ok { + // include additional values + lowerCaseHeaders[k] = append(lowerCaseHeaders[k], v...) + } else { + headers = append(headers, k) + lowerCaseHeaders[k] = v + } + } + sort.Strings(headers) + + var fullHeaders bytes.Buffer + for _, header := range headers { + headerValue := strings.Join(lowerCaseHeaders[header], ",") + fullHeaders.WriteString(header) + fullHeaders.WriteRune(':') + fullHeaders.WriteString(headerValue) + fullHeaders.WriteRune('\n') + } + + return strings.Join(headers, ";"), fullHeaders.String() +} + +func requestDataHash(req *http.Request) (string, error) { + var requestData []byte + if req.Body != nil { + requestBody, err := req.GetBody() + if err != nil { + return "", err + } + defer requestBody.Close() + + requestData, err = internal.ReadAll(requestBody) + if err != nil { + return "", err + } + } + + return getSha256(requestData) +} + +func requestHost(req *http.Request) string { + if req.Host != "" { + return req.Host + } + return req.URL.Host +} + +func canonicalRequest(req *http.Request, canonicalHeaderColumns, canonicalHeaderData string) (string, error) { + dataHash, err := requestDataHash(req) + if err != nil { + return "", err + } + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", req.Method, canonicalPath(req), canonicalQuery(req), canonicalHeaderData, canonicalHeaderColumns, dataHash), nil +} + +type awsRequestHeader struct { + Key string `json:"key"` + Value string `json:"value"` +} + +type awsRequest struct { + URL string `json:"url"` + Method string `json:"method"` + Headers []awsRequestHeader `json:"headers"` +} + +// The AWS region can be provided through AWS_REGION or AWS_DEFAULT_REGION. Only one is +// required. +func canRetrieveRegionFromEnvironment() bool { + return getenv(awsRegionEnvVar) != "" || getenv(awsDefaultRegionEnvVar) != "" +} + +// Check if both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are available. +func canRetrieveSecurityCredentialFromEnvironment() bool { + return getenv(awsAccessKeyIDEnvVar) != "" && getenv(awsSecretAccessKeyEnvVar) != "" +} + +func (sp *awsSubjectProvider) shouldUseMetadataServer() bool { + return sp.securityCredentialsProvider == nil && (!canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment()) +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/executable_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/executable_provider.go new file mode 100644 index 000000000..d5765c474 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/executable_provider.go @@ -0,0 +1,284 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "os/exec" + "regexp" + "strings" + "time" + + "cloud.google.com/go/auth/internal" +) + +const ( + executableSupportedMaxVersion = 1 + executableDefaultTimeout = 30 * time.Second + executableSource = "response" + executableProviderType = "executable" + outputFileSource = "output file" + + allowExecutablesEnvVar = "GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES" + + jwtTokenType = "urn:ietf:params:oauth:token-type:jwt" + idTokenType = "urn:ietf:params:oauth:token-type:id_token" + saml2TokenType = "urn:ietf:params:oauth:token-type:saml2" +) + +var ( + serviceAccountImpersonationRE = regexp.MustCompile(`https://iamcredentials..+/v1/projects/-/serviceAccounts/(.*@.*):generateAccessToken`) +) + +type nonCacheableError struct { + message string +} + +func (nce nonCacheableError) Error() string { + return nce.message +} + +// environment is a contract for testing +type environment interface { + existingEnv() []string + getenv(string) string + run(ctx context.Context, command string, env []string) ([]byte, error) + now() time.Time +} + +type runtimeEnvironment struct{} + +func (r runtimeEnvironment) existingEnv() []string { + return os.Environ() +} +func (r runtimeEnvironment) getenv(key string) string { + return os.Getenv(key) +} +func (r runtimeEnvironment) now() time.Time { + return time.Now().UTC() +} + +func (r runtimeEnvironment) run(ctx context.Context, command string, env []string) ([]byte, error) { + splitCommand := strings.Fields(command) + cmd := exec.CommandContext(ctx, splitCommand[0], splitCommand[1:]...) + cmd.Env = env + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + if ctx.Err() == context.DeadlineExceeded { + return nil, context.DeadlineExceeded + } + if exitError, ok := err.(*exec.ExitError); ok { + return nil, exitCodeError(exitError) + } + return nil, executableError(err) + } + + bytesStdout := bytes.TrimSpace(stdout.Bytes()) + if len(bytesStdout) > 0 { + return bytesStdout, nil + } + return bytes.TrimSpace(stderr.Bytes()), nil +} + +type executableSubjectProvider struct { + Command string + Timeout time.Duration + OutputFile string + client *http.Client + opts *Options + env environment +} + +type executableResponse struct { + Version int `json:"version,omitempty"` + Success *bool `json:"success,omitempty"` + TokenType string `json:"token_type,omitempty"` + ExpirationTime int64 `json:"expiration_time,omitempty"` + IDToken string `json:"id_token,omitempty"` + SamlResponse string `json:"saml_response,omitempty"` + Code string `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (sp *executableSubjectProvider) parseSubjectTokenFromSource(response []byte, source string, now int64) (string, error) { + var result executableResponse + if err := json.Unmarshal(response, &result); err != nil { + return "", jsonParsingError(source, string(response)) + } + // Validate + if result.Version == 0 { + return "", missingFieldError(source, "version") + } + if result.Success == nil { + return "", missingFieldError(source, "success") + } + if !*result.Success { + if result.Code == "" || result.Message == "" { + return "", malformedFailureError() + } + return "", userDefinedError(result.Code, result.Message) + } + if result.Version > executableSupportedMaxVersion || result.Version < 0 { + return "", unsupportedVersionError(source, result.Version) + } + if result.ExpirationTime == 0 && sp.OutputFile != "" { + return "", missingFieldError(source, "expiration_time") + } + if result.TokenType == "" { + return "", missingFieldError(source, "token_type") + } + if result.ExpirationTime != 0 && result.ExpirationTime < now { + return "", tokenExpiredError() + } + + switch result.TokenType { + case jwtTokenType, idTokenType: + if result.IDToken == "" { + return "", missingFieldError(source, "id_token") + } + return result.IDToken, nil + case saml2TokenType: + if result.SamlResponse == "" { + return "", missingFieldError(source, "saml_response") + } + return result.SamlResponse, nil + default: + return "", tokenTypeError(source) + } +} + +func (sp *executableSubjectProvider) subjectToken(ctx context.Context) (string, error) { + if token, err := sp.getTokenFromOutputFile(); token != "" || err != nil { + return token, err + } + return sp.getTokenFromExecutableCommand(ctx) +} + +func (sp *executableSubjectProvider) providerType() string { + return executableProviderType +} + +func (sp *executableSubjectProvider) getTokenFromOutputFile() (token string, err error) { + if sp.OutputFile == "" { + // This ExecutableCredentialSource doesn't use an OutputFile. + return "", nil + } + + file, err := os.Open(sp.OutputFile) + if err != nil { + // No OutputFile found. Hasn't been created yet, so skip it. + return "", nil + } + defer file.Close() + + data, err := internal.ReadAll(file) + if err != nil || len(data) == 0 { + // Cachefile exists, but no data found. Get new credential. + return "", nil + } + + token, err = sp.parseSubjectTokenFromSource(data, outputFileSource, sp.env.now().Unix()) + if err != nil { + if _, ok := err.(nonCacheableError); ok { + // If the cached token is expired we need a new token, + // and if the cache contains a failure, we need to try again. + return "", nil + } + + // There was an error in the cached token, and the developer should be aware of it. + return "", err + } + // Token parsing succeeded. Use found token. + return token, nil +} + +func (sp *executableSubjectProvider) executableEnvironment() []string { + result := sp.env.existingEnv() + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_AUDIENCE=%v", sp.opts.Audience)) + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_TOKEN_TYPE=%v", sp.opts.SubjectTokenType)) + result = append(result, "GOOGLE_EXTERNAL_ACCOUNT_INTERACTIVE=0") + if sp.opts.ServiceAccountImpersonationURL != "" { + matches := serviceAccountImpersonationRE.FindStringSubmatch(sp.opts.ServiceAccountImpersonationURL) + if matches != nil { + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_IMPERSONATED_EMAIL=%v", matches[1])) + } + } + if sp.OutputFile != "" { + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_OUTPUT_FILE=%v", sp.OutputFile)) + } + return result +} + +func (sp *executableSubjectProvider) getTokenFromExecutableCommand(ctx context.Context) (string, error) { + // For security reasons, we need our consumers to set this environment variable to allow executables to be run. + if sp.env.getenv(allowExecutablesEnvVar) != "1" { + return "", errors.New("credentials: executables need to be explicitly allowed (set GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES to '1') to run") + } + + ctx, cancel := context.WithDeadline(ctx, sp.env.now().Add(sp.Timeout)) + defer cancel() + + output, err := sp.env.run(ctx, sp.Command, sp.executableEnvironment()) + if err != nil { + return "", err + } + return sp.parseSubjectTokenFromSource(output, executableSource, sp.env.now().Unix()) +} + +func missingFieldError(source, field string) error { + return fmt.Errorf("credentials: %q missing %q field", source, field) +} + +func jsonParsingError(source, data string) error { + return fmt.Errorf("credentials: unable to parse %q: %v", source, data) +} + +func malformedFailureError() error { + return nonCacheableError{"credentials: response must include `error` and `message` fields when unsuccessful"} +} + +func userDefinedError(code, message string) error { + return nonCacheableError{fmt.Sprintf("credentials: response contains unsuccessful response: (%v) %v", code, message)} +} + +func unsupportedVersionError(source string, version int) error { + return fmt.Errorf("credentials: %v contains unsupported version: %v", source, version) +} + +func tokenExpiredError() error { + return nonCacheableError{"credentials: the token returned by the executable is expired"} +} + +func tokenTypeError(source string) error { + return fmt.Errorf("credentials: %v contains unsupported token type", source) +} + +func exitCodeError(err *exec.ExitError) error { + return fmt.Errorf("credentials: executable command failed with exit code %v: %w", err.ExitCode(), err) +} + +func executableError(err error) error { + return fmt.Errorf("credentials: executable command failed: %w", err) +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go new file mode 100644 index 000000000..112186a9e --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go @@ -0,0 +1,407 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "context" + "errors" + "fmt" + "net/http" + "regexp" + "strconv" + "strings" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials/internal/impersonate" + "cloud.google.com/go/auth/credentials/internal/stsexchange" + "cloud.google.com/go/auth/internal/credsfile" +) + +const ( + timeoutMinimum = 5 * time.Second + timeoutMaximum = 120 * time.Second + + universeDomainPlaceholder = "UNIVERSE_DOMAIN" + defaultTokenURL = "https://sts.UNIVERSE_DOMAIN/v1/token" + defaultUniverseDomain = "googleapis.com" +) + +var ( + // Now aliases time.Now for testing + Now = func() time.Time { + return time.Now().UTC() + } + validWorkforceAudiencePattern *regexp.Regexp = regexp.MustCompile(`//iam\.googleapis\.com/locations/[^/]+/workforcePools/`) +) + +// Options stores the configuration for fetching tokens with external credentials. +type Options struct { + // Audience is the Secure Token Service (STS) audience which contains the resource name for the workload + // identity pool or the workforce pool and the provider identifier in that pool. + Audience string + // SubjectTokenType is the STS token type based on the Oauth2.0 token exchange spec + // e.g. `urn:ietf:params:oauth:token-type:jwt`. + SubjectTokenType string + // TokenURL is the STS token exchange endpoint. + TokenURL string + // TokenInfoURL is the token_info endpoint used to retrieve the account related information ( + // user attributes like account identifier, eg. email, username, uid, etc). This is + // needed for gCloud session account identification. + TokenInfoURL string + // ServiceAccountImpersonationURL is the URL for the service account impersonation request. This is only + // required for workload identity pools when APIs to be accessed have not integrated with UberMint. + ServiceAccountImpersonationURL string + // ServiceAccountImpersonationLifetimeSeconds is the number of seconds the service account impersonation + // token will be valid for. + ServiceAccountImpersonationLifetimeSeconds int + // ClientSecret is currently only required if token_info endpoint also + // needs to be called with the generated GCP access token. When provided, STS will be + // called with additional basic authentication using client_id as username and client_secret as password. + ClientSecret string + // ClientID is only required in conjunction with ClientSecret, as described above. + ClientID string + // CredentialSource contains the necessary information to retrieve the token itself, as well + // as some environmental information. + CredentialSource *credsfile.CredentialSource + // QuotaProjectID is injected by gCloud. If the value is non-empty, the Auth libraries + // will set the x-goog-user-project which overrides the project associated with the credentials. + QuotaProjectID string + // Scopes contains the desired scopes for the returned access token. + Scopes []string + // WorkforcePoolUserProject should be set when it is a workforce pool and + // not a workload identity pool. The underlying principal must still have + // serviceusage.services.use IAM permission to use the project for + // billing/quota. Optional. + WorkforcePoolUserProject string + // UniverseDomain is the default service domain for a given Cloud universe. + // This value will be used in the default STS token URL. The default value + // is "googleapis.com". It will not be used if TokenURL is set. Optional. + UniverseDomain string + // SubjectTokenProvider is an optional token provider for OIDC/SAML + // credentials. One of SubjectTokenProvider, AWSSecurityCredentialProvider + // or CredentialSource must be provided. Optional. + SubjectTokenProvider SubjectTokenProvider + // AwsSecurityCredentialsProvider is an AWS Security Credential provider + // for AWS credentials. One of SubjectTokenProvider, + // AWSSecurityCredentialProvider or CredentialSource must be provided. Optional. + AwsSecurityCredentialsProvider AwsSecurityCredentialsProvider + // Client for token request. + Client *http.Client + // IsDefaultClient marks whether the client passed in is a default client that can be overriden. + // This is important for X509 credentials which should create a new client if the default was used + // but should respect a client explicitly passed in by the user. + IsDefaultClient bool +} + +// SubjectTokenProvider can be used to supply a subject token to exchange for a +// GCP access token. +type SubjectTokenProvider interface { + // SubjectToken should return a valid subject token or an error. + // The external account token provider does not cache the returned subject + // token, so caching logic should be implemented in the provider to prevent + // multiple requests for the same subject token. + SubjectToken(ctx context.Context, opts *RequestOptions) (string, error) +} + +// RequestOptions contains information about the requested subject token or AWS +// security credentials from the Google external account credential. +type RequestOptions struct { + // Audience is the requested audience for the external account credential. + Audience string + // Subject token type is the requested subject token type for the external + // account credential. Expected values include: + // “urn:ietf:params:oauth:token-type:jwt” + // “urn:ietf:params:oauth:token-type:id-token” + // “urn:ietf:params:oauth:token-type:saml2” + // “urn:ietf:params:aws:token-type:aws4_request” + SubjectTokenType string +} + +// AwsSecurityCredentialsProvider can be used to supply AwsSecurityCredentials +// and an AWS Region to exchange for a GCP access token. +type AwsSecurityCredentialsProvider interface { + // AwsRegion should return the AWS region or an error. + AwsRegion(ctx context.Context, opts *RequestOptions) (string, error) + // GetAwsSecurityCredentials should return a valid set of + // AwsSecurityCredentials or an error. The external account token provider + // does not cache the returned security credentials, so caching logic should + // be implemented in the provider to prevent multiple requests for the + // same security credentials. + AwsSecurityCredentials(ctx context.Context, opts *RequestOptions) (*AwsSecurityCredentials, error) +} + +// AwsSecurityCredentials models AWS security credentials. +type AwsSecurityCredentials struct { + // AccessKeyId is the AWS Access Key ID - Required. + AccessKeyID string `json:"AccessKeyID"` + // SecretAccessKey is the AWS Secret Access Key - Required. + SecretAccessKey string `json:"SecretAccessKey"` + // SessionToken is the AWS Session token. This should be provided for + // temporary AWS security credentials - Optional. + SessionToken string `json:"Token"` +} + +func (o *Options) validate() error { + if o.Audience == "" { + return fmt.Errorf("externalaccount: Audience must be set") + } + if o.SubjectTokenType == "" { + return fmt.Errorf("externalaccount: Subject token type must be set") + } + if o.WorkforcePoolUserProject != "" { + if valid := validWorkforceAudiencePattern.MatchString(o.Audience); !valid { + return fmt.Errorf("externalaccount: workforce_pool_user_project should not be set for non-workforce pool credentials") + } + } + count := 0 + if o.CredentialSource != nil { + count++ + } + if o.SubjectTokenProvider != nil { + count++ + } + if o.AwsSecurityCredentialsProvider != nil { + count++ + } + if count == 0 { + return fmt.Errorf("externalaccount: one of CredentialSource, SubjectTokenProvider, or AwsSecurityCredentialsProvider must be set") + } + if count > 1 { + return fmt.Errorf("externalaccount: only one of CredentialSource, SubjectTokenProvider, or AwsSecurityCredentialsProvider must be set") + } + return nil +} + +// client returns the http client that should be used for the token exchange. If a non-default client +// is provided, then the client configured in the options will always be returned. If a default client +// is provided and the options are configured for X509 credentials, a new client will be created. +func (o *Options) client() (*http.Client, error) { + // If a client was provided and no override certificate config location was provided, use the provided client. + if o.CredentialSource == nil || o.CredentialSource.Certificate == nil || (!o.IsDefaultClient && o.CredentialSource.Certificate.CertificateConfigLocation == "") { + return o.Client, nil + } + + // If a new client should be created, validate and use the certificate source to create a new mTLS client. + cert := o.CredentialSource.Certificate + if !cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation == "" { + return nil, errors.New("credentials: \"certificate\" object must either specify a certificate_config_location or use_default_certificate_config should be true") + } + if cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation != "" { + return nil, errors.New("credentials: \"certificate\" object cannot specify both a certificate_config_location and use_default_certificate_config=true") + } + return createX509Client(cert.CertificateConfigLocation) +} + +// resolveTokenURL sets the default STS token endpoint with the configured +// universe domain. +func (o *Options) resolveTokenURL() { + if o.TokenURL != "" { + return + } else if o.UniverseDomain != "" { + o.TokenURL = strings.Replace(defaultTokenURL, universeDomainPlaceholder, o.UniverseDomain, 1) + } else { + o.TokenURL = strings.Replace(defaultTokenURL, universeDomainPlaceholder, defaultUniverseDomain, 1) + } +} + +// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider] +// configured with the provided options. +func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { + if err := opts.validate(); err != nil { + return nil, err + } + opts.resolveTokenURL() + stp, err := newSubjectTokenProvider(opts) + if err != nil { + return nil, err + } + + client, err := opts.client() + if err != nil { + return nil, err + } + + tp := &tokenProvider{ + client: client, + opts: opts, + stp: stp, + } + + if opts.ServiceAccountImpersonationURL == "" { + return auth.NewCachedTokenProvider(tp, nil), nil + } + + scopes := make([]string, len(opts.Scopes)) + copy(scopes, opts.Scopes) + // needed for impersonation + tp.opts.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"} + imp, err := impersonate.NewTokenProvider(&impersonate.Options{ + Client: client, + URL: opts.ServiceAccountImpersonationURL, + Scopes: scopes, + Tp: auth.NewCachedTokenProvider(tp, nil), + TokenLifetimeSeconds: opts.ServiceAccountImpersonationLifetimeSeconds, + }) + if err != nil { + return nil, err + } + return auth.NewCachedTokenProvider(imp, nil), nil +} + +type subjectTokenProvider interface { + subjectToken(ctx context.Context) (string, error) + providerType() string +} + +// tokenProvider is the provider that handles external credentials. It is used to retrieve Tokens. +type tokenProvider struct { + client *http.Client + opts *Options + stp subjectTokenProvider +} + +func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) { + subjectToken, err := tp.stp.subjectToken(ctx) + if err != nil { + return nil, err + } + + stsRequest := &stsexchange.TokenRequest{ + GrantType: stsexchange.GrantType, + Audience: tp.opts.Audience, + Scope: tp.opts.Scopes, + RequestedTokenType: stsexchange.TokenType, + SubjectToken: subjectToken, + SubjectTokenType: tp.opts.SubjectTokenType, + } + header := make(http.Header) + header.Set("Content-Type", "application/x-www-form-urlencoded") + header.Add("x-goog-api-client", getGoogHeaderValue(tp.opts, tp.stp)) + clientAuth := stsexchange.ClientAuthentication{ + AuthStyle: auth.StyleInHeader, + ClientID: tp.opts.ClientID, + ClientSecret: tp.opts.ClientSecret, + } + var options map[string]interface{} + // Do not pass workforce_pool_user_project when client authentication is used. + // The client ID is sufficient for determining the user project. + if tp.opts.WorkforcePoolUserProject != "" && tp.opts.ClientID == "" { + options = map[string]interface{}{ + "userProject": tp.opts.WorkforcePoolUserProject, + } + } + stsResp, err := stsexchange.ExchangeToken(ctx, &stsexchange.Options{ + Client: tp.client, + Endpoint: tp.opts.TokenURL, + Request: stsRequest, + Authentication: clientAuth, + Headers: header, + ExtraOpts: options, + }) + if err != nil { + return nil, err + } + + tok := &auth.Token{ + Value: stsResp.AccessToken, + Type: stsResp.TokenType, + } + // The RFC8693 doesn't define the explicit 0 of "expires_in" field behavior. + if stsResp.ExpiresIn <= 0 { + return nil, fmt.Errorf("credentials: got invalid expiry from security token service") + } + tok.Expiry = Now().Add(time.Duration(stsResp.ExpiresIn) * time.Second) + return tok, nil +} + +// newSubjectTokenProvider determines the type of credsfile.CredentialSource needed to create a +// subjectTokenProvider +func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { + reqOpts := &RequestOptions{Audience: o.Audience, SubjectTokenType: o.SubjectTokenType} + if o.AwsSecurityCredentialsProvider != nil { + return &awsSubjectProvider{ + securityCredentialsProvider: o.AwsSecurityCredentialsProvider, + TargetResource: o.Audience, + reqOpts: reqOpts, + }, nil + } else if o.SubjectTokenProvider != nil { + return &programmaticProvider{stp: o.SubjectTokenProvider, opts: reqOpts}, nil + } else if len(o.CredentialSource.EnvironmentID) > 3 && o.CredentialSource.EnvironmentID[:3] == "aws" { + if awsVersion, err := strconv.Atoi(o.CredentialSource.EnvironmentID[3:]); err == nil { + if awsVersion != 1 { + return nil, fmt.Errorf("credentials: aws version '%d' is not supported in the current build", awsVersion) + } + + awsProvider := &awsSubjectProvider{ + EnvironmentID: o.CredentialSource.EnvironmentID, + RegionURL: o.CredentialSource.RegionURL, + RegionalCredVerificationURL: o.CredentialSource.RegionalCredVerificationURL, + CredVerificationURL: o.CredentialSource.URL, + TargetResource: o.Audience, + Client: o.Client, + } + if o.CredentialSource.IMDSv2SessionTokenURL != "" { + awsProvider.IMDSv2SessionTokenURL = o.CredentialSource.IMDSv2SessionTokenURL + } + + return awsProvider, nil + } + } else if o.CredentialSource.File != "" { + return &fileSubjectProvider{File: o.CredentialSource.File, Format: o.CredentialSource.Format}, nil + } else if o.CredentialSource.URL != "" { + return &urlSubjectProvider{URL: o.CredentialSource.URL, Headers: o.CredentialSource.Headers, Format: o.CredentialSource.Format, Client: o.Client}, nil + } else if o.CredentialSource.Executable != nil { + ec := o.CredentialSource.Executable + if ec.Command == "" { + return nil, errors.New("credentials: missing `command` field — executable command must be provided") + } + + execProvider := &executableSubjectProvider{} + execProvider.Command = ec.Command + if ec.TimeoutMillis == 0 { + execProvider.Timeout = executableDefaultTimeout + } else { + execProvider.Timeout = time.Duration(ec.TimeoutMillis) * time.Millisecond + if execProvider.Timeout < timeoutMinimum || execProvider.Timeout > timeoutMaximum { + return nil, fmt.Errorf("credentials: invalid `timeout_millis` field — executable timeout must be between %v and %v seconds", timeoutMinimum.Seconds(), timeoutMaximum.Seconds()) + } + } + execProvider.OutputFile = ec.OutputFile + execProvider.client = o.Client + execProvider.opts = o + execProvider.env = runtimeEnvironment{} + return execProvider, nil + } else if o.CredentialSource.Certificate != nil { + cert := o.CredentialSource.Certificate + if !cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation == "" { + return nil, errors.New("credentials: \"certificate\" object must either specify a certificate_config_location or use_default_certificate_config should be true") + } + if cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation != "" { + return nil, errors.New("credentials: \"certificate\" object cannot specify both a certificate_config_location and use_default_certificate_config=true") + } + return &x509Provider{}, nil + } + return nil, errors.New("credentials: unable to parse credential source") +} + +func getGoogHeaderValue(conf *Options, p subjectTokenProvider) string { + return fmt.Sprintf("gl-go/%s auth/%s google-byoid-sdk source/%s sa-impersonation/%t config-lifetime/%t", + goVersion(), + "unknown", + p.providerType(), + conf.ServiceAccountImpersonationURL != "", + conf.ServiceAccountImpersonationLifetimeSeconds != 0) +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/file_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/file_provider.go new file mode 100644 index 000000000..8186939fe --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/file_provider.go @@ -0,0 +1,78 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "os" + + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" +) + +const ( + fileProviderType = "file" +) + +type fileSubjectProvider struct { + File string + Format *credsfile.Format +} + +func (sp *fileSubjectProvider) subjectToken(context.Context) (string, error) { + tokenFile, err := os.Open(sp.File) + if err != nil { + return "", fmt.Errorf("credentials: failed to open credential file %q: %w", sp.File, err) + } + defer tokenFile.Close() + tokenBytes, err := internal.ReadAll(tokenFile) + if err != nil { + return "", fmt.Errorf("credentials: failed to read credential file: %w", err) + } + tokenBytes = bytes.TrimSpace(tokenBytes) + + if sp.Format == nil { + return string(tokenBytes), nil + } + switch sp.Format.Type { + case fileTypeJSON: + jsonData := make(map[string]interface{}) + err = json.Unmarshal(tokenBytes, &jsonData) + if err != nil { + return "", fmt.Errorf("credentials: failed to unmarshal subject token file: %w", err) + } + val, ok := jsonData[sp.Format.SubjectTokenFieldName] + if !ok { + return "", errors.New("credentials: provided subject_token_field_name not found in credentials") + } + token, ok := val.(string) + if !ok { + return "", errors.New("credentials: improperly formatted subject token") + } + return token, nil + case fileTypeText: + return string(tokenBytes), nil + default: + return "", errors.New("credentials: invalid credential_source file format type: " + sp.Format.Type) + } +} + +func (sp *fileSubjectProvider) providerType() string { + return fileProviderType +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/info.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/info.go new file mode 100644 index 000000000..8e4b4379b --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/info.go @@ -0,0 +1,74 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "runtime" + "strings" + "unicode" +) + +var ( + // version is a package internal global variable for testing purposes. + version = runtime.Version +) + +// versionUnknown is only used when the runtime version cannot be determined. +const versionUnknown = "UNKNOWN" + +// goVersion returns a Go runtime version derived from the runtime environment +// that is modified to be suitable for reporting in a header, meaning it has no +// whitespace. If it is unable to determine the Go runtime version, it returns +// versionUnknown. +func goVersion() string { + const develPrefix = "devel +" + + s := version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } else if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + + notSemverRune := func(r rune) bool { + return !strings.ContainsRune("0123456789.", r) + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + // Some release candidates already have a dash in them. + if !strings.HasPrefix(prerelease, "-") { + prerelease = "-" + prerelease + } + s += prerelease + } + return s + } + return versionUnknown +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/programmatic_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/programmatic_provider.go new file mode 100644 index 000000000..be3c87351 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/programmatic_provider.go @@ -0,0 +1,30 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import "context" + +type programmaticProvider struct { + opts *RequestOptions + stp SubjectTokenProvider +} + +func (pp *programmaticProvider) providerType() string { + return programmaticProviderType +} + +func (pp *programmaticProvider) subjectToken(ctx context.Context) (string, error) { + return pp.stp.SubjectToken(ctx, pp.opts) +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go new file mode 100644 index 000000000..0a020599e --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go @@ -0,0 +1,88 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" +) + +const ( + fileTypeText = "text" + fileTypeJSON = "json" + urlProviderType = "url" + programmaticProviderType = "programmatic" + x509ProviderType = "x509" +) + +type urlSubjectProvider struct { + URL string + Headers map[string]string + Format *credsfile.Format + Client *http.Client +} + +func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error) { + req, err := http.NewRequestWithContext(ctx, "GET", sp.URL, nil) + if err != nil { + return "", fmt.Errorf("credentials: HTTP request for URL-sourced credential failed: %w", err) + } + + for key, val := range sp.Headers { + req.Header.Add(key, val) + } + resp, body, err := internal.DoRequest(sp.Client, req) + if err != nil { + return "", fmt.Errorf("credentials: invalid response when retrieving subject token: %w", err) + } + if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { + return "", fmt.Errorf("credentials: status code %d: %s", c, body) + } + + if sp.Format == nil { + return string(body), nil + } + switch sp.Format.Type { + case "json": + jsonData := make(map[string]interface{}) + err = json.Unmarshal(body, &jsonData) + if err != nil { + return "", fmt.Errorf("credentials: failed to unmarshal subject token file: %w", err) + } + val, ok := jsonData[sp.Format.SubjectTokenFieldName] + if !ok { + return "", errors.New("credentials: provided subject_token_field_name not found in credentials") + } + token, ok := val.(string) + if !ok { + return "", errors.New("credentials: improperly formatted subject token") + } + return token, nil + case fileTypeText: + return string(body), nil + default: + return "", errors.New("credentials: invalid credential_source file format type: " + sp.Format.Type) + } +} + +func (sp *urlSubjectProvider) providerType() string { + return urlProviderType +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go new file mode 100644 index 000000000..115df5881 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go @@ -0,0 +1,63 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "context" + "crypto/tls" + "net/http" + "time" + + "cloud.google.com/go/auth/internal/transport/cert" +) + +// x509Provider implements the subjectTokenProvider type for +// x509 workload identity credentials. Because x509 credentials +// rely on an mTLS connection to represent the 3rd party identity +// rather than a subject token, this provider will always return +// an empty string when a subject token is requested by the external account +// token provider. +type x509Provider struct { +} + +func (xp *x509Provider) providerType() string { + return x509ProviderType +} + +func (xp *x509Provider) subjectToken(ctx context.Context) (string, error) { + return "", nil +} + +// createX509Client creates a new client that is configured with mTLS, using the +// certificate configuration specified in the credential source. +func createX509Client(certificateConfigLocation string) (*http.Client, error) { + certProvider, err := cert.NewWorkloadX509CertProvider(certificateConfigLocation) + if err != nil { + return nil, err + } + trans := http.DefaultTransport.(*http.Transport).Clone() + + trans.TLSClientConfig = &tls.Config{ + GetClientCertificate: certProvider, + } + + // Create a client with default settings plus the X509 workload cert and key. + client := &http.Client{ + Transport: trans, + Timeout: 30 * time.Second, + } + + return client, nil +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go new file mode 100644 index 000000000..0d7885479 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go @@ -0,0 +1,110 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccountuser + +import ( + "context" + "errors" + "net/http" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials/internal/stsexchange" + "cloud.google.com/go/auth/internal" +) + +// Options stores the configuration for fetching tokens with external authorized +// user credentials. +type Options struct { + // Audience is the Secure Token Service (STS) audience which contains the + // resource name for the workforce pool and the provider identifier in that + // pool. + Audience string + // RefreshToken is the OAuth 2.0 refresh token. + RefreshToken string + // TokenURL is the STS token exchange endpoint for refresh. + TokenURL string + // TokenInfoURL is the STS endpoint URL for token introspection. Optional. + TokenInfoURL string + // ClientID is only required in conjunction with ClientSecret, as described + // below. + ClientID string + // ClientSecret is currently only required if token_info endpoint also needs + // to be called with the generated a cloud access token. When provided, STS + // will be called with additional basic authentication using client_id as + // username and client_secret as password. + ClientSecret string + // Scopes contains the desired scopes for the returned access token. + Scopes []string + + // Client for token request. + Client *http.Client +} + +func (c *Options) validate() bool { + return c.ClientID != "" && c.ClientSecret != "" && c.RefreshToken != "" && c.TokenURL != "" +} + +// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider] +// configured with the provided options. +func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { + if !opts.validate() { + return nil, errors.New("credentials: invalid external_account_authorized_user configuration") + } + + tp := &tokenProvider{ + o: opts, + } + return auth.NewCachedTokenProvider(tp, nil), nil +} + +type tokenProvider struct { + o *Options +} + +func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) { + opts := tp.o + + clientAuth := stsexchange.ClientAuthentication{ + AuthStyle: auth.StyleInHeader, + ClientID: opts.ClientID, + ClientSecret: opts.ClientSecret, + } + headers := make(http.Header) + headers.Set("Content-Type", "application/x-www-form-urlencoded") + stsResponse, err := stsexchange.RefreshAccessToken(ctx, &stsexchange.Options{ + Client: opts.Client, + Endpoint: opts.TokenURL, + RefreshToken: opts.RefreshToken, + Authentication: clientAuth, + Headers: headers, + }) + if err != nil { + return nil, err + } + if stsResponse.ExpiresIn < 0 { + return nil, errors.New("credentials: invalid expiry from security token service") + } + + // guarded by the wrapping with CachedTokenProvider + if stsResponse.RefreshToken != "" { + opts.RefreshToken = stsResponse.RefreshToken + } + return &auth.Token{ + Value: stsResponse.AccessToken, + Expiry: time.Now().UTC().Add(time.Duration(stsResponse.ExpiresIn) * time.Second), + Type: internal.TokenTypeBearer, + }, nil +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go new file mode 100644 index 000000000..720045d3b --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go @@ -0,0 +1,184 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gdch + +import ( + "context" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "strings" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" + "cloud.google.com/go/auth/internal/jwt" +) + +const ( + // GrantType is the grant type for the token request. + GrantType = "urn:ietf:params:oauth:token-type:token-exchange" + requestTokenType = "urn:ietf:params:oauth:token-type:access_token" + subjectTokenType = "urn:k8s:params:oauth:token-type:serviceaccount" +) + +var ( + gdchSupportFormatVersions map[string]bool = map[string]bool{ + "1": true, + } +) + +// Options for [NewTokenProvider]. +type Options struct { + STSAudience string + Client *http.Client +} + +// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider] from a +// GDCH cred file. +func NewTokenProvider(f *credsfile.GDCHServiceAccountFile, o *Options) (auth.TokenProvider, error) { + if !gdchSupportFormatVersions[f.FormatVersion] { + return nil, fmt.Errorf("credentials: unsupported gdch_service_account format %q", f.FormatVersion) + } + if o.STSAudience == "" { + return nil, errors.New("credentials: STSAudience must be set for the GDCH auth flows") + } + pk, err := internal.ParseKey([]byte(f.PrivateKey)) + if err != nil { + return nil, err + } + certPool, err := loadCertPool(f.CertPath) + if err != nil { + return nil, err + } + + tp := gdchProvider{ + serviceIdentity: fmt.Sprintf("system:serviceaccount:%s:%s", f.Project, f.Name), + tokenURL: f.TokenURL, + aud: o.STSAudience, + pk: pk, + pkID: f.PrivateKeyID, + certPool: certPool, + client: o.Client, + } + return tp, nil +} + +func loadCertPool(path string) (*x509.CertPool, error) { + pool := x509.NewCertPool() + pem, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("credentials: failed to read certificate: %w", err) + } + pool.AppendCertsFromPEM(pem) + return pool, nil +} + +type gdchProvider struct { + serviceIdentity string + tokenURL string + aud string + pk *rsa.PrivateKey + pkID string + certPool *x509.CertPool + + client *http.Client +} + +func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) { + addCertToTransport(g.client, g.certPool) + iat := time.Now() + exp := iat.Add(time.Hour) + claims := jwt.Claims{ + Iss: g.serviceIdentity, + Sub: g.serviceIdentity, + Aud: g.tokenURL, + Iat: iat.Unix(), + Exp: exp.Unix(), + } + h := jwt.Header{ + Algorithm: jwt.HeaderAlgRSA256, + Type: jwt.HeaderType, + KeyID: string(g.pkID), + } + payload, err := jwt.EncodeJWS(&h, &claims, g.pk) + if err != nil { + return nil, err + } + v := url.Values{} + v.Set("grant_type", GrantType) + v.Set("audience", g.aud) + v.Set("requested_token_type", requestTokenType) + v.Set("subject_token", payload) + v.Set("subject_token_type", subjectTokenType) + + req, err := http.NewRequestWithContext(ctx, "POST", g.tokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + resp, body, err := internal.DoRequest(g.client, req) + if err != nil { + return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) + } + if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices { + return nil, &auth.Error{ + Response: resp, + Body: body, + } + } + + var tokenRes struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + ExpiresIn int64 `json:"expires_in"` // relative seconds from now + } + if err := json.Unmarshal(body, &tokenRes); err != nil { + return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) + } + token := &auth.Token{ + Value: tokenRes.AccessToken, + Type: tokenRes.TokenType, + } + raw := make(map[string]interface{}) + json.Unmarshal(body, &raw) // no error checks for optional fields + token.Metadata = raw + + if secs := tokenRes.ExpiresIn; secs > 0 { + token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) + } + return token, nil +} + +// addCertToTransport makes a best effort attempt at adding in the cert info to +// the client. It tries to keep all configured transport settings if the +// underlying transport is an http.Transport. Or else it overwrites the +// transport with defaults adding in the certs. +func addCertToTransport(hc *http.Client, certPool *x509.CertPool) { + trans, ok := hc.Transport.(*http.Transport) + if !ok { + trans = http.DefaultTransport.(*http.Transport).Clone() + } + trans.TLSClientConfig = &tls.Config{ + RootCAs: certPool, + } +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go new file mode 100644 index 000000000..ed53afa51 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go @@ -0,0 +1,146 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package impersonate + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" +) + +const ( + defaultTokenLifetime = "3600s" + authHeaderKey = "Authorization" +) + +// generateAccesstokenReq is used for service account impersonation +type generateAccessTokenReq struct { + Delegates []string `json:"delegates,omitempty"` + Lifetime string `json:"lifetime,omitempty"` + Scope []string `json:"scope,omitempty"` +} + +type impersonateTokenResponse struct { + AccessToken string `json:"accessToken"` + ExpireTime string `json:"expireTime"` +} + +// NewTokenProvider uses a source credential, stored in Ts, to request an access token to the provided URL. +// Scopes can be defined when the access token is requested. +func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { + if err := opts.validate(); err != nil { + return nil, err + } + return opts, nil +} + +// Options for [NewTokenProvider]. +type Options struct { + // Tp is the source credential used to generate a token on the + // impersonated service account. Required. + Tp auth.TokenProvider + + // URL is the endpoint to call to generate a token + // on behalf of the service account. Required. + URL string + // Scopes that the impersonated credential should have. Required. + Scopes []string + // Delegates are the service account email addresses in a delegation chain. + // Each service account must be granted roles/iam.serviceAccountTokenCreator + // on the next service account in the chain. Optional. + Delegates []string + // TokenLifetimeSeconds is the number of seconds the impersonation token will + // be valid for. Defaults to 1 hour if unset. Optional. + TokenLifetimeSeconds int + // Client configures the underlying client used to make network requests + // when fetching tokens. Required. + Client *http.Client +} + +func (o *Options) validate() error { + if o.Tp == nil { + return errors.New("credentials: missing required 'source_credentials' field in impersonated credentials") + } + if o.URL == "" { + return errors.New("credentials: missing required 'service_account_impersonation_url' field in impersonated credentials") + } + return nil +} + +// Token performs the exchange to get a temporary service account token to allow access to GCP. +func (o *Options) Token(ctx context.Context) (*auth.Token, error) { + lifetime := defaultTokenLifetime + if o.TokenLifetimeSeconds != 0 { + lifetime = fmt.Sprintf("%ds", o.TokenLifetimeSeconds) + } + reqBody := generateAccessTokenReq{ + Lifetime: lifetime, + Scope: o.Scopes, + Delegates: o.Delegates, + } + b, err := json.Marshal(reqBody) + if err != nil { + return nil, fmt.Errorf("credentials: unable to marshal request: %w", err) + } + req, err := http.NewRequestWithContext(ctx, "POST", o.URL, bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("credentials: unable to create impersonation request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + if err := setAuthHeader(ctx, o.Tp, req); err != nil { + return nil, err + } + resp, body, err := internal.DoRequest(o.Client, req) + if err != nil { + return nil, fmt.Errorf("credentials: unable to generate access token: %w", err) + } + if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { + return nil, fmt.Errorf("credentials: status code %d: %s", c, body) + } + + var accessTokenResp impersonateTokenResponse + if err := json.Unmarshal(body, &accessTokenResp); err != nil { + return nil, fmt.Errorf("credentials: unable to parse response: %w", err) + } + expiry, err := time.Parse(time.RFC3339, accessTokenResp.ExpireTime) + if err != nil { + return nil, fmt.Errorf("credentials: unable to parse expiry: %w", err) + } + return &auth.Token{ + Value: accessTokenResp.AccessToken, + Expiry: expiry, + Type: internal.TokenTypeBearer, + }, nil +} + +func setAuthHeader(ctx context.Context, tp auth.TokenProvider, r *http.Request) error { + t, err := tp.Token(ctx) + if err != nil { + return err + } + typ := t.Type + if typ == "" { + typ = internal.TokenTypeBearer + } + r.Header.Set(authHeaderKey, typ+" "+t.Value) + return nil +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go new file mode 100644 index 000000000..768a9dafc --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go @@ -0,0 +1,161 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stsexchange + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" +) + +const ( + // GrantType for a sts exchange. + GrantType = "urn:ietf:params:oauth:grant-type:token-exchange" + // TokenType for a sts exchange. + TokenType = "urn:ietf:params:oauth:token-type:access_token" + + jwtTokenType = "urn:ietf:params:oauth:token-type:jwt" +) + +// Options stores the configuration for making an sts exchange request. +type Options struct { + Client *http.Client + Endpoint string + Request *TokenRequest + Authentication ClientAuthentication + Headers http.Header + // ExtraOpts are optional fields marshalled into the `options` field of the + // request body. + ExtraOpts map[string]interface{} + RefreshToken string +} + +// RefreshAccessToken performs the token exchange using a refresh token flow. +func RefreshAccessToken(ctx context.Context, opts *Options) (*TokenResponse, error) { + data := url.Values{} + data.Set("grant_type", "refresh_token") + data.Set("refresh_token", opts.RefreshToken) + return doRequest(ctx, opts, data) +} + +// ExchangeToken performs an oauth2 token exchange with the provided endpoint. +func ExchangeToken(ctx context.Context, opts *Options) (*TokenResponse, error) { + data := url.Values{} + data.Set("audience", opts.Request.Audience) + data.Set("grant_type", GrantType) + data.Set("requested_token_type", TokenType) + data.Set("subject_token_type", opts.Request.SubjectTokenType) + data.Set("subject_token", opts.Request.SubjectToken) + data.Set("scope", strings.Join(opts.Request.Scope, " ")) + if opts.ExtraOpts != nil { + opts, err := json.Marshal(opts.ExtraOpts) + if err != nil { + return nil, fmt.Errorf("credentials: failed to marshal additional options: %w", err) + } + data.Set("options", string(opts)) + } + return doRequest(ctx, opts, data) +} + +func doRequest(ctx context.Context, opts *Options, data url.Values) (*TokenResponse, error) { + opts.Authentication.InjectAuthentication(data, opts.Headers) + encodedData := data.Encode() + + req, err := http.NewRequestWithContext(ctx, "POST", opts.Endpoint, strings.NewReader(encodedData)) + if err != nil { + return nil, fmt.Errorf("credentials: failed to properly build http request: %w", err) + + } + for key, list := range opts.Headers { + for _, val := range list { + req.Header.Add(key, val) + } + } + req.Header.Set("Content-Length", strconv.Itoa(len(encodedData))) + + resp, body, err := internal.DoRequest(opts.Client, req) + if err != nil { + return nil, fmt.Errorf("credentials: invalid response from Secure Token Server: %w", err) + } + if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices { + return nil, fmt.Errorf("credentials: status code %d: %s", c, body) + } + var stsResp TokenResponse + if err := json.Unmarshal(body, &stsResp); err != nil { + return nil, fmt.Errorf("credentials: failed to unmarshal response body from Secure Token Server: %w", err) + } + + return &stsResp, nil +} + +// TokenRequest contains fields necessary to make an oauth2 token +// exchange. +type TokenRequest struct { + ActingParty struct { + ActorToken string + ActorTokenType string + } + GrantType string + Resource string + Audience string + Scope []string + RequestedTokenType string + SubjectToken string + SubjectTokenType string +} + +// TokenResponse is used to decode the remote server response during +// an oauth2 token exchange. +type TokenResponse struct { + AccessToken string `json:"access_token"` + IssuedTokenType string `json:"issued_token_type"` + TokenType string `json:"token_type"` + ExpiresIn int `json:"expires_in"` + Scope string `json:"scope"` + RefreshToken string `json:"refresh_token"` +} + +// ClientAuthentication represents an OAuth client ID and secret and the +// mechanism for passing these credentials as stated in rfc6749#2.3.1. +type ClientAuthentication struct { + AuthStyle auth.Style + ClientID string + ClientSecret string +} + +// InjectAuthentication is used to add authentication to a Secure Token Service +// exchange request. It modifies either the passed url.Values or http.Header +// depending on the desired authentication format. +func (c *ClientAuthentication) InjectAuthentication(values url.Values, headers http.Header) { + if c.ClientID == "" || c.ClientSecret == "" || values == nil || headers == nil { + return + } + switch c.AuthStyle { + case auth.StyleInHeader: + plainHeader := c.ClientID + ":" + c.ClientSecret + headers.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(plainHeader))) + default: + values.Set("client_id", c.ClientID) + values.Set("client_secret", c.ClientSecret) + } +} diff --git a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go new file mode 100644 index 000000000..6ae29de6c --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go @@ -0,0 +1,85 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credentials + +import ( + "context" + "crypto/rsa" + "errors" + "fmt" + "strings" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" + "cloud.google.com/go/auth/internal/jwt" +) + +var ( + // for testing + now func() time.Time = time.Now +) + +// configureSelfSignedJWT uses the private key in the service account to create +// a JWT without making a network call. +func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + if len(opts.scopes()) == 0 && opts.Audience == "" { + return nil, errors.New("credentials: both scopes and audience are empty") + } + pk, err := internal.ParseKey([]byte(f.PrivateKey)) + if err != nil { + return nil, fmt.Errorf("credentials: could not parse key: %w", err) + } + return &selfSignedTokenProvider{ + email: f.ClientEmail, + audience: opts.Audience, + scopes: opts.scopes(), + pk: pk, + pkID: f.PrivateKeyID, + }, nil +} + +type selfSignedTokenProvider struct { + email string + audience string + scopes []string + pk *rsa.PrivateKey + pkID string +} + +func (tp *selfSignedTokenProvider) Token(context.Context) (*auth.Token, error) { + iat := now() + exp := iat.Add(time.Hour) + scope := strings.Join(tp.scopes, " ") + c := &jwt.Claims{ + Iss: tp.email, + Sub: tp.email, + Aud: tp.audience, + Scope: scope, + Iat: iat.Unix(), + Exp: exp.Unix(), + } + h := &jwt.Header{ + Algorithm: jwt.HeaderAlgRSA256, + Type: jwt.HeaderType, + KeyID: string(tp.pkID), + } + msg, err := jwt.EncodeJWS(h, c, tp.pk) + if err != nil { + return nil, fmt.Errorf("credentials: could not encode JWT: %w", err) + } + return &auth.Token{Value: msg, Type: internal.TokenTypeBearer, Expiry: exp}, nil +} diff --git a/vendor/cloud.google.com/go/auth/grpctransport/dial_socketopt.go b/vendor/cloud.google.com/go/auth/grpctransport/dial_socketopt.go new file mode 100644 index 000000000..e61360805 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/grpctransport/dial_socketopt.go @@ -0,0 +1,62 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux +// +build linux + +package grpctransport + +import ( + "context" + "net" + "syscall" + + "google.golang.org/grpc" +) + +const ( + // defaultTCPUserTimeout is the default TCP_USER_TIMEOUT socket option. By + // default is 20 seconds. + tcpUserTimeoutMilliseconds = 20000 + + // Copied from golang.org/x/sys/unix.TCP_USER_TIMEOUT. + tcpUserTimeoutOp = 0x12 +) + +func init() { + // timeoutDialerOption is a grpc.DialOption that contains dialer with + // socket option TCP_USER_TIMEOUT. This dialer requires go versions 1.11+. + timeoutDialerOption = grpc.WithContextDialer(dialTCPUserTimeout) +} + +func dialTCPUserTimeout(ctx context.Context, addr string) (net.Conn, error) { + control := func(network, address string, c syscall.RawConn) error { + var syscallErr error + controlErr := c.Control(func(fd uintptr) { + syscallErr = syscall.SetsockoptInt( + int(fd), syscall.IPPROTO_TCP, tcpUserTimeoutOp, tcpUserTimeoutMilliseconds) + }) + if syscallErr != nil { + return syscallErr + } + if controlErr != nil { + return controlErr + } + return nil + } + d := &net.Dialer{ + Control: control, + } + return d.DialContext(ctx, "tcp", addr) +} diff --git a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go new file mode 100644 index 000000000..8696df148 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go @@ -0,0 +1,126 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpctransport + +import ( + "context" + "net" + "os" + "strconv" + "strings" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal/compute" + "google.golang.org/grpc" + grpcgoogle "google.golang.org/grpc/credentials/google" +) + +func isDirectPathEnabled(endpoint string, opts *Options) bool { + if opts.InternalOptions != nil && !opts.InternalOptions.EnableDirectPath { + return false + } + if !checkDirectPathEndPoint(endpoint) { + return false + } + if b, _ := strconv.ParseBool(os.Getenv(disableDirectPathEnvVar)); b { + return false + } + return true +} + +func checkDirectPathEndPoint(endpoint string) bool { + // Only [dns:///]host[:port] is supported, not other schemes (e.g., "tcp://" or "unix://"). + // Also don't try direct path if the user has chosen an alternate name resolver + // (i.e., via ":///" prefix). + if strings.Contains(endpoint, "://") && !strings.HasPrefix(endpoint, "dns:///") { + return false + } + + if endpoint == "" { + return false + } + + return true +} + +func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, o *Options) bool { + if tp == nil { + return false + } + tok, err := tp.Token(context.Background()) + if err != nil { + return false + } + if tok == nil { + return false + } + if o.InternalOptions != nil && o.InternalOptions.EnableNonDefaultSAForDirectPath { + return true + } + if tok.MetadataString("auth.google.tokenSource") != "compute-metadata" { + return false + } + if tok.MetadataString("auth.google.serviceAccount") != "default" { + return false + } + return true +} + +func isDirectPathXdsUsed(o *Options) bool { + // Method 1: Enable DirectPath xDS by env; + if b, _ := strconv.ParseBool(os.Getenv(enableDirectPathXdsEnvVar)); b { + return true + } + // Method 2: Enable DirectPath xDS by option; + if o.InternalOptions != nil && o.InternalOptions.EnableDirectPathXds { + return true + } + return false +} + +// configureDirectPath returns some dial options and an endpoint to use if the +// configuration allows the use of direct path. If it does not the provided +// grpcOpts and endpoint are returned. +func configureDirectPath(grpcOpts []grpc.DialOption, opts *Options, endpoint string, creds *auth.Credentials) ([]grpc.DialOption, string) { + if isDirectPathEnabled(endpoint, opts) && compute.OnComputeEngine() && isTokenProviderDirectPathCompatible(creds, opts) { + // Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates. + grpcOpts = []grpc.DialOption{ + grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(grpcgoogle.DefaultCredentialsOptions{PerRPCCreds: &grpcCredentialsProvider{creds: creds}}))} + if timeoutDialerOption != nil { + grpcOpts = append(grpcOpts, timeoutDialerOption) + } + // Check if google-c2p resolver is enabled for DirectPath + if isDirectPathXdsUsed(opts) { + // google-c2p resolver target must not have a port number + if addr, _, err := net.SplitHostPort(endpoint); err == nil { + endpoint = "google-c2p:///" + addr + } else { + endpoint = "google-c2p:///" + endpoint + } + } else { + if !strings.HasPrefix(endpoint, "dns:///") { + endpoint = "dns:///" + endpoint + } + grpcOpts = append(grpcOpts, + // For now all DirectPath go clients will be using the following lb config, but in future + // when different services need different configs, then we should change this to a + // per-service config. + grpc.WithDisableServiceConfig(), + grpc.WithDefaultServiceConfig(`{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}`)) + } + // TODO: add support for system parameters (quota project, request reason) via chained interceptor. + } + return grpcOpts, endpoint +} diff --git a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go new file mode 100644 index 000000000..9d959af74 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go @@ -0,0 +1,400 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package grpctransport provides functionality for managing gRPC client +// connections to Google Cloud services. +package grpctransport + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net/http" + "os" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/transport" + "go.opencensus.io/plugin/ocgrpc" + "google.golang.org/grpc" + grpccreds "google.golang.org/grpc/credentials" + grpcinsecure "google.golang.org/grpc/credentials/insecure" +) + +const ( + // Check env to disable DirectPath traffic. + disableDirectPathEnvVar = "GOOGLE_CLOUD_DISABLE_DIRECT_PATH" + + // Check env to decide if using google-c2p resolver for DirectPath traffic. + enableDirectPathXdsEnvVar = "GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS" + + quotaProjectHeaderKey = "X-goog-user-project" +) + +var ( + // Set at init time by dial_socketopt.go. If nil, socketopt is not supported. + timeoutDialerOption grpc.DialOption +) + +// ClientCertProvider is a function that returns a TLS client certificate to be +// used when opening TLS connections. It follows the same semantics as +// [crypto/tls.Config.GetClientCertificate]. +type ClientCertProvider = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + +// Options used to configure a [GRPCClientConnPool] from [Dial]. +type Options struct { + // DisableTelemetry disables default telemetry (OpenTelemetry). An example + // reason to do so would be to bind custom telemetry that overrides the + // defaults. + DisableTelemetry bool + // DisableAuthentication specifies that no authentication should be used. It + // is suitable only for testing and for accessing public resources, like + // public Google Cloud Storage buckets. + DisableAuthentication bool + // Endpoint overrides the default endpoint to be used for a service. + Endpoint string + // Metadata is extra gRPC metadata that will be appended to every outgoing + // request. + Metadata map[string]string + // GRPCDialOpts are dial options that will be passed to `grpc.Dial` when + // establishing a`grpc.Conn`` + GRPCDialOpts []grpc.DialOption + // PoolSize is specifies how many connections to balance between when making + // requests. If unset or less than 1, the value defaults to 1. + PoolSize int + // Credentials used to add Authorization metadata to all requests. If set + // DetectOpts are ignored. + Credentials *auth.Credentials + // ClientCertProvider is a function that returns a TLS client certificate to + // be used when opening TLS connections. It follows the same semantics as + // crypto/tls.Config.GetClientCertificate. + ClientCertProvider ClientCertProvider + // DetectOpts configures settings for detect Application Default + // Credentials. + DetectOpts *credentials.DetectOptions + // UniverseDomain is the default service domain for a given Cloud universe. + // The default value is "googleapis.com". This is the universe domain + // configured for the client, which will be compared to the universe domain + // that is separately configured for the credentials. + UniverseDomain string + // APIKey specifies an API key to be used as the basis for authentication. + // If set DetectOpts are ignored. + APIKey string + + // InternalOptions are NOT meant to be set directly by consumers of this + // package, they should only be set by generated client code. + InternalOptions *InternalOptions +} + +// client returns the client a user set for the detect options or nil if one was +// not set. +func (o *Options) client() *http.Client { + if o.DetectOpts != nil && o.DetectOpts.Client != nil { + return o.DetectOpts.Client + } + return nil +} + +func (o *Options) validate() error { + if o == nil { + return errors.New("grpctransport: opts required to be non-nil") + } + if o.InternalOptions != nil && o.InternalOptions.SkipValidation { + return nil + } + hasCreds := o.APIKey != "" || + o.Credentials != nil || + (o.DetectOpts != nil && len(o.DetectOpts.CredentialsJSON) > 0) || + (o.DetectOpts != nil && o.DetectOpts.CredentialsFile != "") + if o.DisableAuthentication && hasCreds { + return errors.New("grpctransport: DisableAuthentication is incompatible with options that set or detect credentials") + } + return nil +} + +func (o *Options) resolveDetectOptions() *credentials.DetectOptions { + io := o.InternalOptions + // soft-clone these so we are not updating a ref the user holds and may reuse + do := transport.CloneDetectOptions(o.DetectOpts) + + // If scoped JWTs are enabled user provided an aud, allow self-signed JWT. + if (io != nil && io.EnableJWTWithScope) || do.Audience != "" { + do.UseSelfSignedJWT = true + } + // Only default scopes if user did not also set an audience. + if len(do.Scopes) == 0 && do.Audience == "" && io != nil && len(io.DefaultScopes) > 0 { + do.Scopes = make([]string, len(io.DefaultScopes)) + copy(do.Scopes, io.DefaultScopes) + } + if len(do.Scopes) == 0 && do.Audience == "" && io != nil { + do.Audience = o.InternalOptions.DefaultAudience + } + if o.ClientCertProvider != nil { + tlsConfig := &tls.Config{ + GetClientCertificate: o.ClientCertProvider, + } + do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig) + do.TokenURL = credentials.GoogleMTLSTokenURL + } + return do +} + +// InternalOptions are only meant to be set by generated client code. These are +// not meant to be set directly by consumers of this package. Configuration in +// this type is considered EXPERIMENTAL and may be removed at any time in the +// future without warning. +type InternalOptions struct { + // EnableNonDefaultSAForDirectPath overrides the default requirement for + // using the default service account for DirectPath. + EnableNonDefaultSAForDirectPath bool + // EnableDirectPath overrides the default attempt to use DirectPath. + EnableDirectPath bool + // EnableDirectPathXds overrides the default DirectPath type. It is only + // valid when DirectPath is enabled. + EnableDirectPathXds bool + // EnableJWTWithScope specifies if scope can be used with self-signed JWT. + EnableJWTWithScope bool + // DefaultAudience specifies a default audience to be used as the audience + // field ("aud") for the JWT token authentication. + DefaultAudience string + // DefaultEndpointTemplate combined with UniverseDomain specifies + // the default endpoint. + DefaultEndpointTemplate string + // DefaultMTLSEndpoint specifies the default mTLS endpoint. + DefaultMTLSEndpoint string + // DefaultScopes specifies the default OAuth2 scopes to be used for a + // service. + DefaultScopes []string + // SkipValidation bypasses validation on Options. It should only be used + // internally for clients that needs more control over their transport. + SkipValidation bool +} + +// Dial returns a GRPCClientConnPool that can be used to communicate with a +// Google cloud service, configured with the provided [Options]. It +// automatically appends Authorization metadata to all outgoing requests. +func Dial(ctx context.Context, secure bool, opts *Options) (GRPCClientConnPool, error) { + if err := opts.validate(); err != nil { + return nil, err + } + if opts.PoolSize <= 1 { + conn, err := dial(ctx, secure, opts) + if err != nil { + return nil, err + } + return &singleConnPool{conn}, nil + } + pool := &roundRobinConnPool{} + for i := 0; i < opts.PoolSize; i++ { + conn, err := dial(ctx, secure, opts) + if err != nil { + // ignore close error, if any + defer pool.Close() + return nil, err + } + pool.conns = append(pool.conns, conn) + } + return pool, nil +} + +// return a GRPCClientConnPool if pool == 1 or else a pool of of them if >1 +func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, error) { + tOpts := &transport.Options{ + Endpoint: opts.Endpoint, + ClientCertProvider: opts.ClientCertProvider, + Client: opts.client(), + UniverseDomain: opts.UniverseDomain, + } + if io := opts.InternalOptions; io != nil { + tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate + tOpts.DefaultMTLSEndpoint = io.DefaultMTLSEndpoint + tOpts.EnableDirectPath = io.EnableDirectPath + tOpts.EnableDirectPathXds = io.EnableDirectPathXds + } + transportCreds, endpoint, err := transport.GetGRPCTransportCredsAndEndpoint(tOpts) + if err != nil { + return nil, err + } + + if !secure { + transportCreds = grpcinsecure.NewCredentials() + } + + // Initialize gRPC dial options with transport-level security options. + grpcOpts := []grpc.DialOption{ + grpc.WithTransportCredentials(transportCreds), + } + + // Ensure the token exchange HTTP transport uses the same ClientCertProvider as the GRPC API transport. + opts.ClientCertProvider, err = transport.GetClientCertificateProvider(tOpts) + if err != nil { + return nil, err + } + + if opts.APIKey != "" { + grpcOpts = append(grpcOpts, + grpc.WithPerRPCCredentials(&grpcKeyProvider{ + apiKey: opts.APIKey, + metadata: opts.Metadata, + secure: secure, + }), + ) + } else if !opts.DisableAuthentication { + metadata := opts.Metadata + + var creds *auth.Credentials + if opts.Credentials != nil { + creds = opts.Credentials + } else { + var err error + creds, err = credentials.DetectDefault(opts.resolveDetectOptions()) + if err != nil { + return nil, err + } + } + + qp, err := creds.QuotaProjectID(ctx) + if err != nil { + return nil, err + } + if qp != "" { + if metadata == nil { + metadata = make(map[string]string, 1) + } + // Don't overwrite user specified quota + if _, ok := metadata[quotaProjectHeaderKey]; !ok { + metadata[quotaProjectHeaderKey] = qp + } + } + grpcOpts = append(grpcOpts, + grpc.WithPerRPCCredentials(&grpcCredentialsProvider{ + creds: creds, + metadata: metadata, + clientUniverseDomain: opts.UniverseDomain, + }), + ) + + // Attempt Direct Path + grpcOpts, endpoint = configureDirectPath(grpcOpts, opts, endpoint, creds) + } + + // Add tracing, but before the other options, so that clients can override the + // gRPC stats handler. + // This assumes that gRPC options are processed in order, left to right. + grpcOpts = addOCStatsHandler(grpcOpts, opts) + grpcOpts = append(grpcOpts, opts.GRPCDialOpts...) + + return grpc.NewClient(endpoint, grpcOpts...) +} + +// grpcKeyProvider satisfies https://pkg.go.dev/google.golang.org/grpc/credentials#PerRPCCredentials. +type grpcKeyProvider struct { + apiKey string + metadata map[string]string + secure bool +} + +func (g *grpcKeyProvider) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + metadata := make(map[string]string, len(g.metadata)+1) + metadata["X-goog-api-key"] = g.apiKey + for k, v := range g.metadata { + metadata[k] = v + } + return metadata, nil +} + +func (g *grpcKeyProvider) RequireTransportSecurity() bool { + return g.secure +} + +// grpcCredentialsProvider satisfies https://pkg.go.dev/google.golang.org/grpc/credentials#PerRPCCredentials. +type grpcCredentialsProvider struct { + creds *auth.Credentials + + secure bool + + // Additional metadata attached as headers. + metadata map[string]string + clientUniverseDomain string +} + +// getClientUniverseDomain returns the default service domain for a given Cloud +// universe, with the following precedence: +// +// 1. A non-empty option.WithUniverseDomain or similar client option. +// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN. +// 3. The default value "googleapis.com". +// +// This is the universe domain configured for the client, which will be compared +// to the universe domain that is separately configured for the credentials. +func (c *grpcCredentialsProvider) getClientUniverseDomain() string { + if c.clientUniverseDomain != "" { + return c.clientUniverseDomain + } + if envUD := os.Getenv(internal.UniverseDomainEnvVar); envUD != "" { + return envUD + } + return internal.DefaultUniverseDomain +} + +func (c *grpcCredentialsProvider) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + token, err := c.creds.Token(ctx) + if err != nil { + return nil, err + } + if token.MetadataString("auth.google.tokenSource") != "compute-metadata" { + credentialsUniverseDomain, err := c.creds.UniverseDomain(ctx) + if err != nil { + return nil, err + } + if err := transport.ValidateUniverseDomain(c.getClientUniverseDomain(), credentialsUniverseDomain); err != nil { + return nil, err + } + } + if c.secure { + ri, _ := grpccreds.RequestInfoFromContext(ctx) + if err = grpccreds.CheckSecurityLevel(ri.AuthInfo, grpccreds.PrivacyAndIntegrity); err != nil { + return nil, fmt.Errorf("unable to transfer credentials PerRPCCredentials: %v", err) + } + } + metadata := make(map[string]string, len(c.metadata)+1) + setAuthMetadata(token, metadata) + for k, v := range c.metadata { + metadata[k] = v + } + return metadata, nil +} + +// setAuthMetadata uses the provided token to set the Authorization metadata. +// If the token.Type is empty, the type is assumed to be Bearer. +func setAuthMetadata(token *auth.Token, m map[string]string) { + typ := token.Type + if typ == "" { + typ = internal.TokenTypeBearer + } + m["authorization"] = typ + " " + token.Value +} + +func (c *grpcCredentialsProvider) RequireTransportSecurity() bool { + return c.secure +} + +func addOCStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOption { + if opts.DisableTelemetry { + return dialOpts + } + return append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) +} diff --git a/vendor/cloud.google.com/go/auth/grpctransport/pool.go b/vendor/cloud.google.com/go/auth/grpctransport/pool.go new file mode 100644 index 000000000..642679f9b --- /dev/null +++ b/vendor/cloud.google.com/go/auth/grpctransport/pool.go @@ -0,0 +1,119 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpctransport + +import ( + "context" + "fmt" + "sync/atomic" + + "google.golang.org/grpc" +) + +// GRPCClientConnPool is an interface that satisfies +// [google.golang.org/grpc.ClientConnInterface] and has some utility functions +// that are needed for connection lifecycle when using in a client library. It +// may be a pool or a single connection. This interface is not intended to, and +// can't be, implemented by others. +type GRPCClientConnPool interface { + // Connection returns a [google.golang.org/grpc.ClientConn] from the pool. + // + // ClientConn aren't returned to the pool and should not be closed directly. + Connection() *grpc.ClientConn + + // Len returns the number of connections in the pool. It will always return + // the same value. + Len() int + + // Close closes every ClientConn in the pool. The error returned by Close + // may be a single error or multiple errors. + Close() error + + grpc.ClientConnInterface + + // private ensure others outside this package can't implement this type + private() +} + +// singleConnPool is a special case for a single connection. +type singleConnPool struct { + *grpc.ClientConn +} + +func (p *singleConnPool) Connection() *grpc.ClientConn { return p.ClientConn } +func (p *singleConnPool) Len() int { return 1 } +func (p *singleConnPool) private() {} + +type roundRobinConnPool struct { + conns []*grpc.ClientConn + + idx uint32 // access via sync/atomic +} + +func (p *roundRobinConnPool) Len() int { + return len(p.conns) +} + +func (p *roundRobinConnPool) Connection() *grpc.ClientConn { + i := atomic.AddUint32(&p.idx, 1) + return p.conns[i%uint32(len(p.conns))] +} + +func (p *roundRobinConnPool) Close() error { + var errs multiError + for _, conn := range p.conns { + if err := conn.Close(); err != nil { + errs = append(errs, err) + } + } + if len(errs) == 0 { + return nil + } + return errs +} + +func (p *roundRobinConnPool) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...grpc.CallOption) error { + return p.Connection().Invoke(ctx, method, args, reply, opts...) +} + +func (p *roundRobinConnPool) NewStream(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) { + return p.Connection().NewStream(ctx, desc, method, opts...) +} + +func (p *roundRobinConnPool) private() {} + +// multiError represents errors from multiple conns in the group. +type multiError []error + +func (m multiError) Error() string { + s, n := "", 0 + for _, e := range m { + if e != nil { + if n == 0 { + s = e.Error() + } + n++ + } + } + switch n { + case 0: + return "(0 errors)" + case 1: + return s + case 2: + return s + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", s, n-1) +} diff --git a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go new file mode 100644 index 000000000..30fedf956 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go @@ -0,0 +1,226 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package httptransport provides functionality for managing HTTP client +// connections to Google Cloud services. +package httptransport + +import ( + "crypto/tls" + "errors" + "fmt" + "net/http" + + "cloud.google.com/go/auth" + detect "cloud.google.com/go/auth/credentials" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/transport" +) + +// ClientCertProvider is a function that returns a TLS client certificate to be +// used when opening TLS connections. It follows the same semantics as +// [crypto/tls.Config.GetClientCertificate]. +type ClientCertProvider = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + +// Options used to configure a [net/http.Client] from [NewClient]. +type Options struct { + // DisableTelemetry disables default telemetry (OpenTelemetry). An example + // reason to do so would be to bind custom telemetry that overrides the + // defaults. + DisableTelemetry bool + // DisableAuthentication specifies that no authentication should be used. It + // is suitable only for testing and for accessing public resources, like + // public Google Cloud Storage buckets. + DisableAuthentication bool + // Headers are extra HTTP headers that will be appended to every outgoing + // request. + Headers http.Header + // BaseRoundTripper overrides the base transport used for serving requests. + // If specified ClientCertProvider is ignored. + BaseRoundTripper http.RoundTripper + // Endpoint overrides the default endpoint to be used for a service. + Endpoint string + // APIKey specifies an API key to be used as the basis for authentication. + // If set DetectOpts are ignored. + APIKey string + // Credentials used to add Authorization header to all requests. If set + // DetectOpts are ignored. + Credentials *auth.Credentials + // ClientCertProvider is a function that returns a TLS client certificate to + // be used when opening TLS connections. It follows the same semantics as + // crypto/tls.Config.GetClientCertificate. + ClientCertProvider ClientCertProvider + // DetectOpts configures settings for detect Application Default + // Credentials. + DetectOpts *detect.DetectOptions + // UniverseDomain is the default service domain for a given Cloud universe. + // The default value is "googleapis.com". This is the universe domain + // configured for the client, which will be compared to the universe domain + // that is separately configured for the credentials. + UniverseDomain string + + // InternalOptions are NOT meant to be set directly by consumers of this + // package, they should only be set by generated client code. + InternalOptions *InternalOptions +} + +func (o *Options) validate() error { + if o == nil { + return errors.New("httptransport: opts required to be non-nil") + } + if o.InternalOptions != nil && o.InternalOptions.SkipValidation { + return nil + } + hasCreds := o.APIKey != "" || + o.Credentials != nil || + (o.DetectOpts != nil && len(o.DetectOpts.CredentialsJSON) > 0) || + (o.DetectOpts != nil && o.DetectOpts.CredentialsFile != "") + if o.DisableAuthentication && hasCreds { + return errors.New("httptransport: DisableAuthentication is incompatible with options that set or detect credentials") + } + return nil +} + +// client returns the client a user set for the detect options or nil if one was +// not set. +func (o *Options) client() *http.Client { + if o.DetectOpts != nil && o.DetectOpts.Client != nil { + return o.DetectOpts.Client + } + return nil +} + +func (o *Options) resolveDetectOptions() *detect.DetectOptions { + io := o.InternalOptions + // soft-clone these so we are not updating a ref the user holds and may reuse + do := transport.CloneDetectOptions(o.DetectOpts) + + // If scoped JWTs are enabled user provided an aud, allow self-signed JWT. + if (io != nil && io.EnableJWTWithScope) || do.Audience != "" { + do.UseSelfSignedJWT = true + } + // Only default scopes if user did not also set an audience. + if len(do.Scopes) == 0 && do.Audience == "" && io != nil && len(io.DefaultScopes) > 0 { + do.Scopes = make([]string, len(io.DefaultScopes)) + copy(do.Scopes, io.DefaultScopes) + } + if len(do.Scopes) == 0 && do.Audience == "" && io != nil { + do.Audience = o.InternalOptions.DefaultAudience + } + if o.ClientCertProvider != nil { + tlsConfig := &tls.Config{ + GetClientCertificate: o.ClientCertProvider, + } + do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig) + do.TokenURL = detect.GoogleMTLSTokenURL + } + return do +} + +// InternalOptions are only meant to be set by generated client code. These are +// not meant to be set directly by consumers of this package. Configuration in +// this type is considered EXPERIMENTAL and may be removed at any time in the +// future without warning. +type InternalOptions struct { + // EnableJWTWithScope specifies if scope can be used with self-signed JWT. + EnableJWTWithScope bool + // DefaultAudience specifies a default audience to be used as the audience + // field ("aud") for the JWT token authentication. + DefaultAudience string + // DefaultEndpointTemplate combined with UniverseDomain specifies the + // default endpoint. + DefaultEndpointTemplate string + // DefaultMTLSEndpoint specifies the default mTLS endpoint. + DefaultMTLSEndpoint string + // DefaultScopes specifies the default OAuth2 scopes to be used for a + // service. + DefaultScopes []string + // SkipValidation bypasses validation on Options. It should only be used + // internally for clients that needs more control over their transport. + SkipValidation bool +} + +// AddAuthorizationMiddleware adds a middleware to the provided client's +// transport that sets the Authorization header with the value produced by the +// provided [cloud.google.com/go/auth.Credentials]. An error is returned only +// if client or creds is nil. +func AddAuthorizationMiddleware(client *http.Client, creds *auth.Credentials) error { + if client == nil || creds == nil { + return fmt.Errorf("httptransport: client and tp must not be nil") + } + base := client.Transport + if base == nil { + if dt, ok := http.DefaultTransport.(*http.Transport); ok { + base = dt.Clone() + } else { + // Directly reuse the DefaultTransport if the application has + // replaced it with an implementation of RoundTripper other than + // http.Transport. + base = http.DefaultTransport + } + } + client.Transport = &authTransport{ + creds: creds, + base: base, + // TODO(quartzmo): Somehow set clientUniverseDomain from impersonate calls. + } + return nil +} + +// NewClient returns a [net/http.Client] that can be used to communicate with a +// Google cloud service, configured with the provided [Options]. It +// automatically appends Authorization headers to all outgoing requests. +func NewClient(opts *Options) (*http.Client, error) { + if err := opts.validate(); err != nil { + return nil, err + } + + tOpts := &transport.Options{ + Endpoint: opts.Endpoint, + ClientCertProvider: opts.ClientCertProvider, + Client: opts.client(), + UniverseDomain: opts.UniverseDomain, + } + if io := opts.InternalOptions; io != nil { + tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate + tOpts.DefaultMTLSEndpoint = io.DefaultMTLSEndpoint + } + clientCertProvider, dialTLSContext, err := transport.GetHTTPTransportConfig(tOpts) + if err != nil { + return nil, err + } + baseRoundTripper := opts.BaseRoundTripper + if baseRoundTripper == nil { + baseRoundTripper = defaultBaseTransport(clientCertProvider, dialTLSContext) + } + // Ensure the token exchange transport uses the same ClientCertProvider as the API transport. + opts.ClientCertProvider = clientCertProvider + trans, err := newTransport(baseRoundTripper, opts) + if err != nil { + return nil, err + } + return &http.Client{ + Transport: trans, + }, nil +} + +// SetAuthHeader uses the provided token to set the Authorization header on a +// request. If the token.Type is empty, the type is assumed to be Bearer. +func SetAuthHeader(token *auth.Token, req *http.Request) { + typ := token.Type + if typ == "" { + typ = internal.TokenTypeBearer + } + req.Header.Set("Authorization", typ+" "+token.Value) +} diff --git a/vendor/cloud.google.com/go/auth/httptransport/trace.go b/vendor/cloud.google.com/go/auth/httptransport/trace.go new file mode 100644 index 000000000..467c477c0 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/httptransport/trace.go @@ -0,0 +1,93 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package httptransport + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "net/http" + "strconv" + "strings" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +const ( + httpHeaderMaxSize = 200 + cloudTraceHeader = `X-Cloud-Trace-Context` +) + +// asserts the httpFormat fulfills this foreign interface +var _ propagation.HTTPFormat = (*httpFormat)(nil) + +// httpFormat implements propagation.httpFormat to propagate +// traces in HTTP headers for Google Cloud Platform and Cloud Trace. +type httpFormat struct{} + +// SpanContextFromRequest extracts a Cloud Trace span context from incoming requests. +func (f *httpFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { + h := req.Header.Get(cloudTraceHeader) + // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat. + // Return if the header is empty or missing, or if the header is unreasonably + // large, to avoid making unnecessary copies of a large string. + if h == "" || len(h) > httpHeaderMaxSize { + return trace.SpanContext{}, false + } + + // Parse the trace id field. + slash := strings.Index(h, `/`) + if slash == -1 { + return trace.SpanContext{}, false + } + tid, h := h[:slash], h[slash+1:] + + buf, err := hex.DecodeString(tid) + if err != nil { + return trace.SpanContext{}, false + } + copy(sc.TraceID[:], buf) + + // Parse the span id field. + spanstr := h + semicolon := strings.Index(h, `;`) + if semicolon != -1 { + spanstr, h = h[:semicolon], h[semicolon+1:] + } + sid, err := strconv.ParseUint(spanstr, 10, 64) + if err != nil { + return trace.SpanContext{}, false + } + binary.BigEndian.PutUint64(sc.SpanID[:], sid) + + // Parse the options field, options field is optional. + if !strings.HasPrefix(h, "o=") { + return sc, true + } + o, err := strconv.ParseUint(h[2:], 10, 32) + if err != nil { + return trace.SpanContext{}, false + } + sc.TraceOptions = trace.TraceOptions(o) + return sc, true +} + +// SpanContextToRequest modifies the given request to include a Cloud Trace header. +func (f *httpFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { + sid := binary.BigEndian.Uint64(sc.SpanID[:]) + header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions)) + req.Header.Set(cloudTraceHeader, header) +} diff --git a/vendor/cloud.google.com/go/auth/httptransport/transport.go b/vendor/cloud.google.com/go/auth/httptransport/transport.go new file mode 100644 index 000000000..93c0b1369 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/httptransport/transport.go @@ -0,0 +1,231 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package httptransport + +import ( + "context" + "crypto/tls" + "net" + "net/http" + "os" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials" + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/transport" + "cloud.google.com/go/auth/internal/transport/cert" + "go.opencensus.io/plugin/ochttp" + "golang.org/x/net/http2" +) + +const ( + quotaProjectHeaderKey = "X-goog-user-project" +) + +func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, error) { + var headers = opts.Headers + ht := &headerTransport{ + base: base, + headers: headers, + } + var trans http.RoundTripper = ht + trans = addOCTransport(trans, opts) + switch { + case opts.DisableAuthentication: + // Do nothing. + case opts.APIKey != "": + qp := internal.GetQuotaProject(nil, opts.Headers.Get(quotaProjectHeaderKey)) + if qp != "" { + if headers == nil { + headers = make(map[string][]string, 1) + } + headers.Set(quotaProjectHeaderKey, qp) + } + trans = &apiKeyTransport{ + Transport: trans, + Key: opts.APIKey, + } + default: + var creds *auth.Credentials + if opts.Credentials != nil { + creds = opts.Credentials + } else { + var err error + creds, err = credentials.DetectDefault(opts.resolveDetectOptions()) + if err != nil { + return nil, err + } + } + qp, err := creds.QuotaProjectID(context.Background()) + if err != nil { + return nil, err + } + if qp != "" { + if headers == nil { + headers = make(map[string][]string, 1) + } + // Don't overwrite user specified quota + if v := headers.Get(quotaProjectHeaderKey); v == "" { + headers.Set(quotaProjectHeaderKey, qp) + } + } + creds.TokenProvider = auth.NewCachedTokenProvider(creds.TokenProvider, nil) + trans = &authTransport{ + base: trans, + creds: creds, + clientUniverseDomain: opts.UniverseDomain, + } + } + return trans, nil +} + +// defaultBaseTransport returns the base HTTP transport. +// On App Engine, this is urlfetch.Transport. +// Otherwise, use a default transport, taking most defaults from +// http.DefaultTransport. +// If TLSCertificate is available, set TLSClientConfig as well. +func defaultBaseTransport(clientCertSource cert.Provider, dialTLSContext func(context.Context, string, string) (net.Conn, error)) http.RoundTripper { + defaultTransport, ok := http.DefaultTransport.(*http.Transport) + if !ok { + defaultTransport = transport.BaseTransport() + } + trans := defaultTransport.Clone() + trans.MaxIdleConnsPerHost = 100 + + if clientCertSource != nil { + trans.TLSClientConfig = &tls.Config{ + GetClientCertificate: clientCertSource, + } + } + if dialTLSContext != nil { + // If DialTLSContext is set, TLSClientConfig wil be ignored + trans.DialTLSContext = dialTLSContext + } + + // Configures the ReadIdleTimeout HTTP/2 option for the + // transport. This allows broken idle connections to be pruned more quickly, + // preventing the client from attempting to re-use connections that will no + // longer work. + http2Trans, err := http2.ConfigureTransports(trans) + if err == nil { + http2Trans.ReadIdleTimeout = time.Second * 31 + } + + return trans +} + +type apiKeyTransport struct { + // Key is the API Key to set on requests. + Key string + // Transport is the underlying HTTP transport. + // If nil, http.DefaultTransport is used. + Transport http.RoundTripper +} + +func (t *apiKeyTransport) RoundTrip(req *http.Request) (*http.Response, error) { + newReq := *req + args := newReq.URL.Query() + args.Set("key", t.Key) + newReq.URL.RawQuery = args.Encode() + return t.Transport.RoundTrip(&newReq) +} + +type headerTransport struct { + headers http.Header + base http.RoundTripper +} + +func (t *headerTransport) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.base + newReq := *req + newReq.Header = make(http.Header) + for k, vv := range req.Header { + newReq.Header[k] = vv + } + + for k, v := range t.headers { + newReq.Header[k] = v + } + + return rt.RoundTrip(&newReq) +} + +func addOCTransport(trans http.RoundTripper, opts *Options) http.RoundTripper { + if opts.DisableTelemetry { + return trans + } + return &ochttp.Transport{ + Base: trans, + Propagation: &httpFormat{}, + } +} + +type authTransport struct { + creds *auth.Credentials + base http.RoundTripper + clientUniverseDomain string +} + +// getClientUniverseDomain returns the default service domain for a given Cloud +// universe, with the following precedence: +// +// 1. A non-empty option.WithUniverseDomain or similar client option. +// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN. +// 3. The default value "googleapis.com". +// +// This is the universe domain configured for the client, which will be compared +// to the universe domain that is separately configured for the credentials. +func (t *authTransport) getClientUniverseDomain() string { + if t.clientUniverseDomain != "" { + return t.clientUniverseDomain + } + if envUD := os.Getenv(internal.UniverseDomainEnvVar); envUD != "" { + return envUD + } + return internal.DefaultUniverseDomain +} + +// RoundTrip authorizes and authenticates the request with an +// access token from Transport's Source. Per the RoundTripper contract we must +// not modify the initial request, so we clone it, and we must close the body +// on any errors that happens during our token logic. +func (t *authTransport) RoundTrip(req *http.Request) (*http.Response, error) { + reqBodyClosed := false + if req.Body != nil { + defer func() { + if !reqBodyClosed { + req.Body.Close() + } + }() + } + token, err := t.creds.Token(req.Context()) + if err != nil { + return nil, err + } + if token.MetadataString("auth.google.tokenSource") != "compute-metadata" { + credentialsUniverseDomain, err := t.creds.UniverseDomain(req.Context()) + if err != nil { + return nil, err + } + if err := transport.ValidateUniverseDomain(t.getClientUniverseDomain(), credentialsUniverseDomain); err != nil { + return nil, err + } + } + req2 := req.Clone(req.Context()) + SetAuthHeader(token, req2) + reqBodyClosed = true + return t.base.RoundTrip(req2) +} diff --git a/vendor/cloud.google.com/go/auth/internal/compute/compute.go b/vendor/cloud.google.com/go/auth/internal/compute/compute.go new file mode 100644 index 000000000..651bd61fb --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/compute/compute.go @@ -0,0 +1,66 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +import ( + "log" + "runtime" + "strings" + "sync" +) + +var ( + vmOnGCEOnce sync.Once + vmOnGCE bool +) + +// OnComputeEngine returns whether the client is running on GCE. +// +// This is a copy of the gRPC internal googlecloud.OnGCE() func at: +// https://github.com/grpc/grpc-go/blob/master/internal/googlecloud/googlecloud.go +// The functionality is similar to the metadata.OnGCE() func at: +// https://github.com/xmenxk/google-cloud-go/blob/main/compute/metadata/metadata.go +// +// The difference is that OnComputeEngine() does not perform HTTP or DNS check on the metadata server. +// In particular, OnComputeEngine() will return false on Serverless. +func OnComputeEngine() bool { + vmOnGCEOnce.Do(func() { + mf, err := manufacturer() + if err != nil { + log.Printf("Failed to read manufacturer, vmOnGCE=false: %v", err) + return + } + vmOnGCE = isRunningOnGCE(mf, runtime.GOOS) + }) + return vmOnGCE +} + +// isRunningOnGCE checks whether the local system, without doing a network request, is +// running on GCP. +func isRunningOnGCE(manufacturer []byte, goos string) bool { + name := string(manufacturer) + switch goos { + case "linux": + name = strings.TrimSpace(name) + return name == "Google" || name == "Google Compute Engine" + case "windows": + name = strings.Replace(name, " ", "", -1) + name = strings.Replace(name, "\n", "", -1) + name = strings.Replace(name, "\r", "", -1) + return name == "Google" + default: + return false + } +} diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go new file mode 100644 index 000000000..af490bf4f --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go @@ -0,0 +1,22 @@ +//go:build !(linux || windows) +// +build !linux,!windows + +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +func manufacturer() ([]byte, error) { + return nil, nil +} diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go new file mode 100644 index 000000000..d92178df8 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go @@ -0,0 +1,23 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +import "os" + +const linuxProductNameFile = "/sys/class/dmi/id/product_name" + +func manufacturer() ([]byte, error) { + return os.ReadFile(linuxProductNameFile) +} diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go new file mode 100644 index 000000000..16be9df30 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go @@ -0,0 +1,46 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +import ( + "errors" + "os/exec" + "regexp" + "strings" +) + +const ( + windowsCheckCommand = "powershell.exe" + windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" + powershellOutputFilter = "Manufacturer" + windowsManufacturerRegex = ":(.*)" +) + +func manufacturer() ([]byte, error) { + cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) + out, err := cmd.Output() + if err != nil { + return nil, err + } + for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { + if strings.HasPrefix(line, powershellOutputFilter) { + re := regexp.MustCompile(windowsManufacturerRegex) + name := re.FindString(line) + name = strings.TrimLeft(name, ":") + return []byte(name), nil + } + } + return nil, errors.New("cannot determine the machine's manufacturer") +} diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/credsfile.go b/vendor/cloud.google.com/go/auth/internal/credsfile/credsfile.go new file mode 100644 index 000000000..9cd4bed61 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/credsfile/credsfile.go @@ -0,0 +1,107 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package credsfile is meant to hide implementation details from the pubic +// surface of the detect package. It should not import any other packages in +// this module. It is located under the main internal package so other +// sub-packages can use these parsed types as well. +package credsfile + +import ( + "os" + "os/user" + "path/filepath" + "runtime" +) + +const ( + // GoogleAppCredsEnvVar is the environment variable for setting the + // application default credentials. + GoogleAppCredsEnvVar = "GOOGLE_APPLICATION_CREDENTIALS" + userCredsFilename = "application_default_credentials.json" +) + +// CredentialType represents different credential filetypes Google credentials +// can be. +type CredentialType int + +const ( + // UnknownCredType is an unidentified file type. + UnknownCredType CredentialType = iota + // UserCredentialsKey represents a user creds file type. + UserCredentialsKey + // ServiceAccountKey represents a service account file type. + ServiceAccountKey + // ImpersonatedServiceAccountKey represents a impersonated service account + // file type. + ImpersonatedServiceAccountKey + // ExternalAccountKey represents a external account file type. + ExternalAccountKey + // GDCHServiceAccountKey represents a GDCH file type. + GDCHServiceAccountKey + // ExternalAccountAuthorizedUserKey represents a external account authorized + // user file type. + ExternalAccountAuthorizedUserKey +) + +// parseCredentialType returns the associated filetype based on the parsed +// typeString provided. +func parseCredentialType(typeString string) CredentialType { + switch typeString { + case "service_account": + return ServiceAccountKey + case "authorized_user": + return UserCredentialsKey + case "impersonated_service_account": + return ImpersonatedServiceAccountKey + case "external_account": + return ExternalAccountKey + case "external_account_authorized_user": + return ExternalAccountAuthorizedUserKey + case "gdch_service_account": + return GDCHServiceAccountKey + default: + return UnknownCredType + } +} + +// GetFileNameFromEnv returns the override if provided or detects a filename +// from the environment. +func GetFileNameFromEnv(override string) string { + if override != "" { + return override + } + return os.Getenv(GoogleAppCredsEnvVar) +} + +// GetWellKnownFileName tries to locate the filepath for the user credential +// file based on the environment. +func GetWellKnownFileName() string { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud", userCredsFilename) + } + return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", userCredsFilename) +} + +// guessUnixHomeDir default to checking for HOME, but not all unix systems have +// this set, do have a fallback. +func guessUnixHomeDir() string { + if v := os.Getenv("HOME"); v != "" { + return v + } + if u, err := user.Current(); err == nil { + return u.HomeDir + } + return "" +} diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go b/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go new file mode 100644 index 000000000..3be6e5bbb --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go @@ -0,0 +1,157 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credsfile + +import ( + "encoding/json" +) + +// Config3LO is the internals of a client creds file. +type Config3LO struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + RedirectURIs []string `json:"redirect_uris"` + AuthURI string `json:"auth_uri"` + TokenURI string `json:"token_uri"` +} + +// ClientCredentialsFile representation. +type ClientCredentialsFile struct { + Web *Config3LO `json:"web"` + Installed *Config3LO `json:"installed"` + UniverseDomain string `json:"universe_domain"` +} + +// ServiceAccountFile representation. +type ServiceAccountFile struct { + Type string `json:"type"` + ProjectID string `json:"project_id"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + ClientEmail string `json:"client_email"` + ClientID string `json:"client_id"` + AuthURL string `json:"auth_uri"` + TokenURL string `json:"token_uri"` + UniverseDomain string `json:"universe_domain"` +} + +// UserCredentialsFile representation. +type UserCredentialsFile struct { + Type string `json:"type"` + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + QuotaProjectID string `json:"quota_project_id"` + RefreshToken string `json:"refresh_token"` + UniverseDomain string `json:"universe_domain"` +} + +// ExternalAccountFile representation. +type ExternalAccountFile struct { + Type string `json:"type"` + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + Audience string `json:"audience"` + SubjectTokenType string `json:"subject_token_type"` + ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"` + TokenURL string `json:"token_url"` + CredentialSource *CredentialSource `json:"credential_source,omitempty"` + TokenInfoURL string `json:"token_info_url"` + ServiceAccountImpersonation *ServiceAccountImpersonationInfo `json:"service_account_impersonation,omitempty"` + QuotaProjectID string `json:"quota_project_id"` + WorkforcePoolUserProject string `json:"workforce_pool_user_project"` + UniverseDomain string `json:"universe_domain"` +} + +// ExternalAccountAuthorizedUserFile representation. +type ExternalAccountAuthorizedUserFile struct { + Type string `json:"type"` + Audience string `json:"audience"` + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + RefreshToken string `json:"refresh_token"` + TokenURL string `json:"token_url"` + TokenInfoURL string `json:"token_info_url"` + RevokeURL string `json:"revoke_url"` + QuotaProjectID string `json:"quota_project_id"` + UniverseDomain string `json:"universe_domain"` +} + +// CredentialSource stores the information necessary to retrieve the credentials for the STS exchange. +// +// One field amongst File, URL, Certificate, and Executable should be filled, depending on the kind of credential in question. +// The EnvironmentID should start with AWS if being used for an AWS credential. +type CredentialSource struct { + File string `json:"file"` + URL string `json:"url"` + Headers map[string]string `json:"headers"` + Executable *ExecutableConfig `json:"executable,omitempty"` + Certificate *CertificateConfig `json:"certificate"` + EnvironmentID string `json:"environment_id"` // TODO: Make type for this + RegionURL string `json:"region_url"` + RegionalCredVerificationURL string `json:"regional_cred_verification_url"` + CredVerificationURL string `json:"cred_verification_url"` + IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"` + Format *Format `json:"format,omitempty"` +} + +// Format describes the format of a [CredentialSource]. +type Format struct { + // Type is either "text" or "json". When not provided "text" type is assumed. + Type string `json:"type"` + // SubjectTokenFieldName is only required for JSON format. This would be "access_token" for azure. + SubjectTokenFieldName string `json:"subject_token_field_name"` +} + +// ExecutableConfig represents the command to run for an executable +// [CredentialSource]. +type ExecutableConfig struct { + Command string `json:"command"` + TimeoutMillis int `json:"timeout_millis"` + OutputFile string `json:"output_file"` +} + +// CertificateConfig represents the options used to set up X509 based workload +// [CredentialSource] +type CertificateConfig struct { + UseDefaultCertificateConfig bool `json:"use_default_certificate_config"` + CertificateConfigLocation string `json:"certificate_config_location"` +} + +// ServiceAccountImpersonationInfo has impersonation configuration. +type ServiceAccountImpersonationInfo struct { + TokenLifetimeSeconds int `json:"token_lifetime_seconds"` +} + +// ImpersonatedServiceAccountFile representation. +type ImpersonatedServiceAccountFile struct { + Type string `json:"type"` + ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"` + Delegates []string `json:"delegates"` + CredSource json.RawMessage `json:"source_credentials"` + UniverseDomain string `json:"universe_domain"` +} + +// GDCHServiceAccountFile represents the Google Distributed Cloud Hosted (GDCH) service identity file. +type GDCHServiceAccountFile struct { + Type string `json:"type"` + FormatVersion string `json:"format_version"` + Project string `json:"project"` + Name string `json:"name"` + CertPath string `json:"ca_cert_path"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + TokenURL string `json:"token_uri"` + UniverseDomain string `json:"universe_domain"` +} diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/parse.go b/vendor/cloud.google.com/go/auth/internal/credsfile/parse.go new file mode 100644 index 000000000..a02b9f5df --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/credsfile/parse.go @@ -0,0 +1,98 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credsfile + +import ( + "encoding/json" +) + +// ParseServiceAccount parses bytes into a [ServiceAccountFile]. +func ParseServiceAccount(b []byte) (*ServiceAccountFile, error) { + var f *ServiceAccountFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +// ParseClientCredentials parses bytes into a +// [credsfile.ClientCredentialsFile]. +func ParseClientCredentials(b []byte) (*ClientCredentialsFile, error) { + var f *ClientCredentialsFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +// ParseUserCredentials parses bytes into a [UserCredentialsFile]. +func ParseUserCredentials(b []byte) (*UserCredentialsFile, error) { + var f *UserCredentialsFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +// ParseExternalAccount parses bytes into a [ExternalAccountFile]. +func ParseExternalAccount(b []byte) (*ExternalAccountFile, error) { + var f *ExternalAccountFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +// ParseExternalAccountAuthorizedUser parses bytes into a +// [ExternalAccountAuthorizedUserFile]. +func ParseExternalAccountAuthorizedUser(b []byte) (*ExternalAccountAuthorizedUserFile, error) { + var f *ExternalAccountAuthorizedUserFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +// ParseImpersonatedServiceAccount parses bytes into a +// [ImpersonatedServiceAccountFile]. +func ParseImpersonatedServiceAccount(b []byte) (*ImpersonatedServiceAccountFile, error) { + var f *ImpersonatedServiceAccountFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +// ParseGDCHServiceAccount parses bytes into a [GDCHServiceAccountFile]. +func ParseGDCHServiceAccount(b []byte) (*GDCHServiceAccountFile, error) { + var f *GDCHServiceAccountFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + return f, nil +} + +type fileTypeChecker struct { + Type string `json:"type"` +} + +// ParseFileType determines the [CredentialType] based on bytes provided. +func ParseFileType(b []byte) (CredentialType, error) { + var f fileTypeChecker + if err := json.Unmarshal(b, &f); err != nil { + return 0, err + } + return parseCredentialType(f.Type), nil +} diff --git a/vendor/cloud.google.com/go/auth/internal/internal.go b/vendor/cloud.google.com/go/auth/internal/internal.go new file mode 100644 index 000000000..66a51f19c --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/internal.go @@ -0,0 +1,216 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "context" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "net/http" + "os" + "sync" + "time" + + "cloud.google.com/go/compute/metadata" +) + +const ( + // TokenTypeBearer is the auth header prefix for bearer tokens. + TokenTypeBearer = "Bearer" + + // QuotaProjectEnvVar is the environment variable for setting the quota + // project. + QuotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT" + // UniverseDomainEnvVar is the environment variable for setting the default + // service domain for a given Cloud universe. + UniverseDomainEnvVar = "GOOGLE_CLOUD_UNIVERSE_DOMAIN" + projectEnvVar = "GOOGLE_CLOUD_PROJECT" + maxBodySize = 1 << 20 + + // DefaultUniverseDomain is the default value for universe domain. + // Universe domain is the default service domain for a given Cloud universe. + DefaultUniverseDomain = "googleapis.com" +) + +type clonableTransport interface { + Clone() *http.Transport +} + +// DefaultClient returns an [http.Client] with some defaults set. If +// the current [http.DefaultTransport] is a [clonableTransport], as +// is the case for an [*http.Transport], the clone will be used. +// Otherwise the [http.DefaultTransport] is used directly. +func DefaultClient() *http.Client { + if transport, ok := http.DefaultTransport.(clonableTransport); ok { + return &http.Client{ + Transport: transport.Clone(), + Timeout: 30 * time.Second, + } + } + + return &http.Client{ + Transport: http.DefaultTransport, + Timeout: 30 * time.Second, + } +} + +// ParseKey converts the binary contents of a private key file +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +func ParseKey(key []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(key) + if block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8: %w", err) + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("private key is invalid") + } + return parsed, nil +} + +// GetQuotaProject retrieves quota project with precedence being: override, +// environment variable, creds json file. +func GetQuotaProject(b []byte, override string) string { + if override != "" { + return override + } + if env := os.Getenv(QuotaProjectEnvVar); env != "" { + return env + } + if b == nil { + return "" + } + var v struct { + QuotaProject string `json:"quota_project_id"` + } + if err := json.Unmarshal(b, &v); err != nil { + return "" + } + return v.QuotaProject +} + +// GetProjectID retrieves project with precedence being: override, +// environment variable, creds json file. +func GetProjectID(b []byte, override string) string { + if override != "" { + return override + } + if env := os.Getenv(projectEnvVar); env != "" { + return env + } + if b == nil { + return "" + } + var v struct { + ProjectID string `json:"project_id"` // standard service account key + Project string `json:"project"` // gdch key + } + if err := json.Unmarshal(b, &v); err != nil { + return "" + } + if v.ProjectID != "" { + return v.ProjectID + } + return v.Project +} + +// DoRequest executes the provided req with the client. It reads the response +// body, closes it, and returns it. +func DoRequest(client *http.Client, req *http.Request) (*http.Response, []byte, error) { + resp, err := client.Do(req) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + body, err := ReadAll(io.LimitReader(resp.Body, maxBodySize)) + if err != nil { + return nil, nil, err + } + return resp, body, nil +} + +// ReadAll consumes the whole reader and safely reads the content of its body +// with some overflow protection. +func ReadAll(r io.Reader) ([]byte, error) { + return io.ReadAll(io.LimitReader(r, maxBodySize)) +} + +// StaticCredentialsProperty is a helper for creating static credentials +// properties. +func StaticCredentialsProperty(s string) StaticProperty { + return StaticProperty(s) +} + +// StaticProperty always returns that value of the underlying string. +type StaticProperty string + +// GetProperty loads the properly value provided the given context. +func (p StaticProperty) GetProperty(context.Context) (string, error) { + return string(p), nil +} + +// ComputeUniverseDomainProvider fetches the credentials universe domain from +// the google cloud metadata service. +type ComputeUniverseDomainProvider struct { + universeDomainOnce sync.Once + universeDomain string + universeDomainErr error +} + +// GetProperty fetches the credentials universe domain from the google cloud +// metadata service. +func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string, error) { + c.universeDomainOnce.Do(func() { + c.universeDomain, c.universeDomainErr = getMetadataUniverseDomain(ctx) + }) + if c.universeDomainErr != nil { + return "", c.universeDomainErr + } + return c.universeDomain, nil +} + +// httpGetMetadataUniverseDomain is a package var for unit test substitution. +var httpGetMetadataUniverseDomain = func(ctx context.Context) (string, error) { + ctx, cancel := context.WithTimeout(ctx, 1*time.Second) + defer cancel() + return metadata.GetWithContext(ctx, "universe/universe_domain") +} + +func getMetadataUniverseDomain(ctx context.Context) (string, error) { + universeDomain, err := httpGetMetadataUniverseDomain(ctx) + if err == nil { + return universeDomain, nil + } + if _, ok := err.(metadata.NotDefinedError); ok { + // http.StatusNotFound (404) + return DefaultUniverseDomain, nil + } + return "", err +} diff --git a/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go new file mode 100644 index 000000000..dc28b3c3b --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go @@ -0,0 +1,171 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jwt + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" +) + +const ( + // HeaderAlgRSA256 is the RS256 [Header.Algorithm]. + HeaderAlgRSA256 = "RS256" + // HeaderAlgES256 is the ES256 [Header.Algorithm]. + HeaderAlgES256 = "ES256" + // HeaderType is the standard [Header.Type]. + HeaderType = "JWT" +) + +// Header represents a JWT header. +type Header struct { + Algorithm string `json:"alg"` + Type string `json:"typ"` + KeyID string `json:"kid"` +} + +func (h *Header) encode() (string, error) { + b, err := json.Marshal(h) + if err != nil { + return "", err + } + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// Claims represents the claims set of a JWT. +type Claims struct { + // Iss is the issuer JWT claim. + Iss string `json:"iss"` + // Scope is the scope JWT claim. + Scope string `json:"scope,omitempty"` + // Exp is the expiry JWT claim. If unset, default is in one hour from now. + Exp int64 `json:"exp"` + // Iat is the subject issued at claim. If unset, default is now. + Iat int64 `json:"iat"` + // Aud is the audience JWT claim. Optional. + Aud string `json:"aud"` + // Sub is the subject JWT claim. Optional. + Sub string `json:"sub,omitempty"` + // AdditionalClaims contains any additional non-standard JWT claims. Optional. + AdditionalClaims map[string]interface{} `json:"-"` +} + +func (c *Claims) encode() (string, error) { + // Compensate for skew + now := time.Now().Add(-10 * time.Second) + if c.Iat == 0 { + c.Iat = now.Unix() + } + if c.Exp == 0 { + c.Exp = now.Add(time.Hour).Unix() + } + if c.Exp < c.Iat { + return "", fmt.Errorf("jwt: invalid Exp = %d; must be later than Iat = %d", c.Exp, c.Iat) + } + + b, err := json.Marshal(c) + if err != nil { + return "", err + } + + if len(c.AdditionalClaims) == 0 { + return base64.RawURLEncoding.EncodeToString(b), nil + } + + // Marshal private claim set and then append it to b. + prv, err := json.Marshal(c.AdditionalClaims) + if err != nil { + return "", fmt.Errorf("invalid map of additional claims %v: %w", c.AdditionalClaims, err) + } + + // Concatenate public and private claim JSON objects. + if !bytes.HasSuffix(b, []byte{'}'}) { + return "", fmt.Errorf("invalid JSON %s", b) + } + if !bytes.HasPrefix(prv, []byte{'{'}) { + return "", fmt.Errorf("invalid JSON %s", prv) + } + b[len(b)-1] = ',' // Replace closing curly brace with a comma. + b = append(b, prv[1:]...) // Append private claims. + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// EncodeJWS encodes the data using the provided key as a JSON web signature. +func EncodeJWS(header *Header, c *Claims, key *rsa.PrivateKey) (string, error) { + head, err := header.encode() + if err != nil { + return "", err + } + claims, err := c.encode() + if err != nil { + return "", err + } + ss := fmt.Sprintf("%s.%s", head, claims) + h := sha256.New() + h.Write([]byte(ss)) + sig, err := rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil +} + +// DecodeJWS decodes a claim set from a JWS payload. +func DecodeJWS(payload string) (*Claims, error) { + // decode returned id token to get expiry + s := strings.Split(payload, ".") + if len(s) < 2 { + return nil, errors.New("invalid token received") + } + decoded, err := base64.RawURLEncoding.DecodeString(s[1]) + if err != nil { + return nil, err + } + c := &Claims{} + if err := json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c); err != nil { + return nil, err + } + if err := json.NewDecoder(bytes.NewBuffer(decoded)).Decode(&c.AdditionalClaims); err != nil { + return nil, err + } + return c, err +} + +// VerifyJWS tests whether the provided JWT token's signature was produced by +// the private key associated with the provided public key. +func VerifyJWS(token string, key *rsa.PublicKey) error { + parts := strings.Split(token, ".") + if len(parts) != 3 { + return errors.New("jwt: invalid token received, token must have 3 parts") + } + + signedContent := parts[0] + "." + parts[1] + signatureString, err := base64.RawURLEncoding.DecodeString(parts[2]) + if err != nil { + return err + } + + h := sha256.New() + h.Write([]byte(signedContent)) + return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), signatureString) +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cba.go b/vendor/cloud.google.com/go/auth/internal/transport/cba.go new file mode 100644 index 000000000..26e037c1a --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/cba.go @@ -0,0 +1,356 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "context" + "crypto/tls" + "crypto/x509" + "errors" + "log" + "net" + "net/http" + "net/url" + "os" + "strconv" + "strings" + + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/transport/cert" + "github.com/google/s2a-go" + "github.com/google/s2a-go/fallback" + "google.golang.org/grpc/credentials" +) + +const ( + mTLSModeAlways = "always" + mTLSModeNever = "never" + mTLSModeAuto = "auto" + + // Experimental: if true, the code will try MTLS with S2A as the default for transport security. Default value is false. + googleAPIUseS2AEnv = "EXPERIMENTAL_GOOGLE_API_USE_S2A" + googleAPIUseCertSource = "GOOGLE_API_USE_CLIENT_CERTIFICATE" + googleAPIUseMTLS = "GOOGLE_API_USE_MTLS_ENDPOINT" + googleAPIUseMTLSOld = "GOOGLE_API_USE_MTLS" + + universeDomainPlaceholder = "UNIVERSE_DOMAIN" + + mtlsMDSRoot = "/run/google-mds-mtls/root.crt" + mtlsMDSKey = "/run/google-mds-mtls/client.key" +) + +var ( + errUniverseNotSupportedMTLS = errors.New("mTLS is not supported in any universe other than googleapis.com") +) + +// Options is a struct that is duplicated information from the individual +// transport packages in order to avoid cyclic deps. It correlates 1:1 with +// fields on httptransport.Options and grpctransport.Options. +type Options struct { + Endpoint string + DefaultMTLSEndpoint string + DefaultEndpointTemplate string + ClientCertProvider cert.Provider + Client *http.Client + UniverseDomain string + EnableDirectPath bool + EnableDirectPathXds bool +} + +// getUniverseDomain returns the default service domain for a given Cloud +// universe. +func (o *Options) getUniverseDomain() string { + if o.UniverseDomain == "" { + return internal.DefaultUniverseDomain + } + return o.UniverseDomain +} + +// isUniverseDomainGDU returns true if the universe domain is the default Google +// universe. +func (o *Options) isUniverseDomainGDU() bool { + return o.getUniverseDomain() == internal.DefaultUniverseDomain +} + +// defaultEndpoint returns the DefaultEndpointTemplate merged with the +// universe domain if the DefaultEndpointTemplate is set, otherwise returns an +// empty string. +func (o *Options) defaultEndpoint() string { + if o.DefaultEndpointTemplate == "" { + return "" + } + return strings.Replace(o.DefaultEndpointTemplate, universeDomainPlaceholder, o.getUniverseDomain(), 1) +} + +// mergedEndpoint merges a user-provided Endpoint of format host[:port] with the +// default endpoint. +func (o *Options) mergedEndpoint() (string, error) { + defaultEndpoint := o.defaultEndpoint() + u, err := url.Parse(fixScheme(defaultEndpoint)) + if err != nil { + return "", err + } + return strings.Replace(defaultEndpoint, u.Host, o.Endpoint, 1), nil +} + +func fixScheme(baseURL string) string { + if !strings.Contains(baseURL, "://") { + baseURL = "https://" + baseURL + } + return baseURL +} + +// GetGRPCTransportCredsAndEndpoint returns an instance of +// [google.golang.org/grpc/credentials.TransportCredentials], and the +// corresponding endpoint to use for GRPC client. +func GetGRPCTransportCredsAndEndpoint(opts *Options) (credentials.TransportCredentials, string, error) { + config, err := getTransportConfig(opts) + if err != nil { + return nil, "", err + } + + defaultTransportCreds := credentials.NewTLS(&tls.Config{ + GetClientCertificate: config.clientCertSource, + }) + + var s2aAddr string + var transportCredsForS2A credentials.TransportCredentials + + if config.mtlsS2AAddress != "" { + s2aAddr = config.mtlsS2AAddress + transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey) + if err != nil { + log.Printf("Loading MTLS MDS credentials failed: %v", err) + return defaultTransportCreds, config.endpoint, nil + } + } else if config.s2aAddress != "" { + s2aAddr = config.s2aAddress + } else { + return defaultTransportCreds, config.endpoint, nil + } + + var fallbackOpts *s2a.FallbackOptions + // In case of S2A failure, fall back to the endpoint that would've been used without S2A. + if fallbackHandshake, err := fallback.DefaultFallbackClientHandshakeFunc(config.endpoint); err == nil { + fallbackOpts = &s2a.FallbackOptions{ + FallbackClientHandshakeFunc: fallbackHandshake, + } + } + + s2aTransportCreds, err := s2a.NewClientCreds(&s2a.ClientOptions{ + S2AAddress: s2aAddr, + TransportCreds: transportCredsForS2A, + FallbackOpts: fallbackOpts, + }) + if err != nil { + // Use default if we cannot initialize S2A client transport credentials. + return defaultTransportCreds, config.endpoint, nil + } + return s2aTransportCreds, config.s2aMTLSEndpoint, nil +} + +// GetHTTPTransportConfig returns a client certificate source and a function for +// dialing MTLS with S2A. +func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context, string, string) (net.Conn, error), error) { + config, err := getTransportConfig(opts) + if err != nil { + return nil, nil, err + } + + var s2aAddr string + var transportCredsForS2A credentials.TransportCredentials + + if config.mtlsS2AAddress != "" { + s2aAddr = config.mtlsS2AAddress + transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey) + if err != nil { + log.Printf("Loading MTLS MDS credentials failed: %v", err) + return config.clientCertSource, nil, nil + } + } else if config.s2aAddress != "" { + s2aAddr = config.s2aAddress + } else { + return config.clientCertSource, nil, nil + } + + var fallbackOpts *s2a.FallbackOptions + // In case of S2A failure, fall back to the endpoint that would've been used without S2A. + if fallbackURL, err := url.Parse(config.endpoint); err == nil { + if fallbackDialer, fallbackServerAddr, err := fallback.DefaultFallbackDialerAndAddress(fallbackURL.Hostname()); err == nil { + fallbackOpts = &s2a.FallbackOptions{ + FallbackDialer: &s2a.FallbackDialer{ + Dialer: fallbackDialer, + ServerAddr: fallbackServerAddr, + }, + } + } + } + + dialTLSContextFunc := s2a.NewS2ADialTLSContextFunc(&s2a.ClientOptions{ + S2AAddress: s2aAddr, + TransportCreds: transportCredsForS2A, + FallbackOpts: fallbackOpts, + }) + return nil, dialTLSContextFunc, nil +} + +func loadMTLSMDSTransportCreds(mtlsMDSRootFile, mtlsMDSKeyFile string) (credentials.TransportCredentials, error) { + rootPEM, err := os.ReadFile(mtlsMDSRootFile) + if err != nil { + return nil, err + } + caCertPool := x509.NewCertPool() + ok := caCertPool.AppendCertsFromPEM(rootPEM) + if !ok { + return nil, errors.New("failed to load MTLS MDS root certificate") + } + // The mTLS MDS credentials are formatted as the concatenation of a PEM-encoded certificate chain + // followed by a PEM-encoded private key. For this reason, the concatenation is passed in to the + // tls.X509KeyPair function as both the certificate chain and private key arguments. + cert, err := tls.LoadX509KeyPair(mtlsMDSKeyFile, mtlsMDSKeyFile) + if err != nil { + return nil, err + } + tlsConfig := tls.Config{ + RootCAs: caCertPool, + Certificates: []tls.Certificate{cert}, + MinVersion: tls.VersionTLS13, + } + return credentials.NewTLS(&tlsConfig), nil +} + +func getTransportConfig(opts *Options) (*transportConfig, error) { + clientCertSource, err := GetClientCertificateProvider(opts) + if err != nil { + return nil, err + } + endpoint, err := getEndpoint(opts, clientCertSource) + if err != nil { + return nil, err + } + defaultTransportConfig := transportConfig{ + clientCertSource: clientCertSource, + endpoint: endpoint, + } + + if !shouldUseS2A(clientCertSource, opts) { + return &defaultTransportConfig, nil + } + if !opts.isUniverseDomainGDU() { + return nil, errUniverseNotSupportedMTLS + } + + s2aAddress := GetS2AAddress() + mtlsS2AAddress := GetMTLSS2AAddress() + if s2aAddress == "" && mtlsS2AAddress == "" { + return &defaultTransportConfig, nil + } + return &transportConfig{ + clientCertSource: clientCertSource, + endpoint: endpoint, + s2aAddress: s2aAddress, + mtlsS2AAddress: mtlsS2AAddress, + s2aMTLSEndpoint: opts.DefaultMTLSEndpoint, + }, nil +} + +// GetClientCertificateProvider returns a default client certificate source, if +// not provided by the user. +// +// A nil default source can be returned if the source does not exist. Any exceptions +// encountered while initializing the default source will be reported as client +// error (ex. corrupt metadata file). +func GetClientCertificateProvider(opts *Options) (cert.Provider, error) { + if !isClientCertificateEnabled(opts) { + return nil, nil + } else if opts.ClientCertProvider != nil { + return opts.ClientCertProvider, nil + } + return cert.DefaultProvider() + +} + +// isClientCertificateEnabled returns true by default for all GDU universe domain, unless explicitly overridden by env var +func isClientCertificateEnabled(opts *Options) bool { + if value, ok := os.LookupEnv(googleAPIUseCertSource); ok { + // error as false is OK + b, _ := strconv.ParseBool(value) + return b + } + return opts.isUniverseDomainGDU() +} + +type transportConfig struct { + // The client certificate source. + clientCertSource cert.Provider + // The corresponding endpoint to use based on client certificate source. + endpoint string + // The plaintext S2A address if it can be used, otherwise an empty string. + s2aAddress string + // The MTLS S2A address if it can be used, otherwise an empty string. + mtlsS2AAddress string + // The MTLS endpoint to use with S2A. + s2aMTLSEndpoint string +} + +// getEndpoint returns the endpoint for the service, taking into account the +// user-provided endpoint override "settings.Endpoint". +// +// If no endpoint override is specified, we will either return the default endpoint or +// the default mTLS endpoint if a client certificate is available. +// +// You can override the default endpoint choice (mtls vs. regular) by setting the +// GOOGLE_API_USE_MTLS_ENDPOINT environment variable. +// +// If the endpoint override is an address (host:port) rather than full base +// URL (ex. https://...), then the user-provided address will be merged into +// the default endpoint. For example, WithEndpoint("myhost:8000") and +// DefaultEndpointTemplate("https://UNIVERSE_DOMAIN/bar/baz") will return "https://myhost:8080/bar/baz" +func getEndpoint(opts *Options, clientCertSource cert.Provider) (string, error) { + if opts.Endpoint == "" { + mtlsMode := getMTLSMode() + if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) { + if !opts.isUniverseDomainGDU() { + return "", errUniverseNotSupportedMTLS + } + return opts.DefaultMTLSEndpoint, nil + } + return opts.defaultEndpoint(), nil + } + if strings.Contains(opts.Endpoint, "://") { + // User passed in a full URL path, use it verbatim. + return opts.Endpoint, nil + } + if opts.defaultEndpoint() == "" { + // If DefaultEndpointTemplate is not configured, + // use the user provided endpoint verbatim. This allows a naked + // "host[:port]" URL to be used with GRPC Direct Path. + return opts.Endpoint, nil + } + + // Assume user-provided endpoint is host[:port], merge it with the default endpoint. + return opts.mergedEndpoint() +} + +func getMTLSMode() string { + mode := os.Getenv(googleAPIUseMTLS) + if mode == "" { + mode = os.Getenv(googleAPIUseMTLSOld) // Deprecated. + } + if mode == "" { + return mTLSModeAuto + } + return strings.ToLower(mode) +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go new file mode 100644 index 000000000..5cedc50f1 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go @@ -0,0 +1,65 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cert + +import ( + "crypto/tls" + "errors" + "sync" +) + +// defaultCertData holds all the variables pertaining to +// the default certificate provider created by [DefaultProvider]. +// +// A singleton model is used to allow the provider to be reused +// by the transport layer. As mentioned in [DefaultProvider] (provider nil, nil) +// may be returned to indicate a default provider could not be found, which +// will skip extra tls config in the transport layer . +type defaultCertData struct { + once sync.Once + provider Provider + err error +} + +var ( + defaultCert defaultCertData +) + +// Provider is a function that can be passed into crypto/tls.Config.GetClientCertificate. +type Provider func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + +// errSourceUnavailable is a sentinel error to indicate certificate source is unavailable. +var errSourceUnavailable = errors.New("certificate source is unavailable") + +// DefaultProvider returns a certificate source using the preferred EnterpriseCertificateProxySource. +// If EnterpriseCertificateProxySource is not available, fall back to the legacy SecureConnectSource. +// +// If neither source is available (due to missing configurations), a nil Source and a nil Error are +// returned to indicate that a default certificate source is unavailable. +func DefaultProvider() (Provider, error) { + defaultCert.once.Do(func() { + defaultCert.provider, defaultCert.err = NewWorkloadX509CertProvider("") + if errors.Is(defaultCert.err, errSourceUnavailable) { + defaultCert.provider, defaultCert.err = NewEnterpriseCertificateProxyProvider("") + if errors.Is(defaultCert.err, errSourceUnavailable) { + defaultCert.provider, defaultCert.err = NewSecureConnectProvider("") + if errors.Is(defaultCert.err, errSourceUnavailable) { + defaultCert.provider, defaultCert.err = nil, nil + } + } + } + }) + return defaultCert.provider, defaultCert.err +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go new file mode 100644 index 000000000..366515916 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go @@ -0,0 +1,56 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cert + +import ( + "crypto/tls" + "errors" + + "github.com/googleapis/enterprise-certificate-proxy/client" +) + +type ecpSource struct { + key *client.Key +} + +// NewEnterpriseCertificateProxyProvider creates a certificate source +// using the Enterprise Certificate Proxy client, which delegates +// certifcate related operations to an OS-specific "signer binary" +// that communicates with the native keystore (ex. keychain on MacOS). +// +// The configFilePath points to a config file containing relevant parameters +// such as the certificate issuer and the location of the signer binary. +// If configFilePath is empty, the client will attempt to load the config from +// a well-known gcloud location. +func NewEnterpriseCertificateProxyProvider(configFilePath string) (Provider, error) { + key, err := client.Cred(configFilePath) + if err != nil { + if errors.Is(err, client.ErrCredUnavailable) { + return nil, errSourceUnavailable + } + return nil, err + } + + return (&ecpSource{ + key: key, + }).getClientCertificate, nil +} + +func (s *ecpSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + var cert tls.Certificate + cert.PrivateKey = s.key + cert.Certificate = s.key.CertificateChain() + return &cert, nil +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go new file mode 100644 index 000000000..738cb2161 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go @@ -0,0 +1,124 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cert + +import ( + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "os/user" + "path/filepath" + "sync" + "time" +) + +const ( + metadataPath = ".secureConnect" + metadataFile = "context_aware_metadata.json" +) + +type secureConnectSource struct { + metadata secureConnectMetadata + + // Cache the cert to avoid executing helper command repeatedly. + cachedCertMutex sync.Mutex + cachedCert *tls.Certificate +} + +type secureConnectMetadata struct { + Cmd []string `json:"cert_provider_command"` +} + +// NewSecureConnectProvider creates a certificate source using +// the Secure Connect Helper and its associated metadata file. +// +// The configFilePath points to the location of the context aware metadata file. +// If configFilePath is empty, use the default context aware metadata location. +func NewSecureConnectProvider(configFilePath string) (Provider, error) { + if configFilePath == "" { + user, err := user.Current() + if err != nil { + // Error locating the default config means Secure Connect is not supported. + return nil, errSourceUnavailable + } + configFilePath = filepath.Join(user.HomeDir, metadataPath, metadataFile) + } + + file, err := os.ReadFile(configFilePath) + if err != nil { + // Config file missing means Secure Connect is not supported. + // There are non-os.ErrNotExist errors that may be returned. + // (e.g. if the home directory is /dev/null, *nix systems will + // return ENOTDIR instead of ENOENT) + return nil, errSourceUnavailable + } + + var metadata secureConnectMetadata + if err := json.Unmarshal(file, &metadata); err != nil { + return nil, fmt.Errorf("cert: could not parse JSON in %q: %w", configFilePath, err) + } + if err := validateMetadata(metadata); err != nil { + return nil, fmt.Errorf("cert: invalid config in %q: %w", configFilePath, err) + } + return (&secureConnectSource{ + metadata: metadata, + }).getClientCertificate, nil +} + +func validateMetadata(metadata secureConnectMetadata) error { + if len(metadata.Cmd) == 0 { + return errors.New("empty cert_provider_command") + } + return nil +} + +func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + s.cachedCertMutex.Lock() + defer s.cachedCertMutex.Unlock() + if s.cachedCert != nil && !isCertificateExpired(s.cachedCert) { + return s.cachedCert, nil + } + // Expand OS environment variables in the cert provider command such as "$HOME". + for i := 0; i < len(s.metadata.Cmd); i++ { + s.metadata.Cmd[i] = os.ExpandEnv(s.metadata.Cmd[i]) + } + command := s.metadata.Cmd + data, err := exec.Command(command[0], command[1:]...).Output() + if err != nil { + return nil, err + } + cert, err := tls.X509KeyPair(data, data) + if err != nil { + return nil, err + } + s.cachedCert = &cert + return &cert, nil +} + +// isCertificateExpired returns true if the given cert is expired or invalid. +func isCertificateExpired(cert *tls.Certificate) bool { + if len(cert.Certificate) == 0 { + return true + } + parsed, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return true + } + return time.Now().After(parsed.NotAfter) +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go new file mode 100644 index 000000000..e8675bf82 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go @@ -0,0 +1,117 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cert + +import ( + "crypto/tls" + "encoding/json" + "errors" + "io" + "os" + + "github.com/googleapis/enterprise-certificate-proxy/client/util" +) + +type certConfigs struct { + Workload *workloadSource `json:"workload"` +} + +type workloadSource struct { + CertPath string `json:"cert_path"` + KeyPath string `json:"key_path"` +} + +type certificateConfig struct { + CertConfigs certConfigs `json:"cert_configs"` +} + +// NewWorkloadX509CertProvider creates a certificate source +// that reads a certificate and private key file from the local file system. +// This is intended to be used for workload identity federation. +// +// The configFilePath points to a config file containing relevant parameters +// such as the certificate and key file paths. +// If configFilePath is empty, the client will attempt to load the config from +// a well-known gcloud location. +func NewWorkloadX509CertProvider(configFilePath string) (Provider, error) { + if configFilePath == "" { + envFilePath := util.GetConfigFilePathFromEnv() + if envFilePath != "" { + configFilePath = envFilePath + } else { + configFilePath = util.GetDefaultConfigFilePath() + } + } + + certFile, keyFile, err := getCertAndKeyFiles(configFilePath) + if err != nil { + return nil, err + } + + source := &workloadSource{ + CertPath: certFile, + KeyPath: keyFile, + } + return source.getClientCertificate, nil +} + +// getClientCertificate attempts to load the certificate and key from the files specified in the +// certificate config. +func (s *workloadSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + cert, err := tls.LoadX509KeyPair(s.CertPath, s.KeyPath) + if err != nil { + return nil, err + } + return &cert, nil +} + +// getCertAndKeyFiles attempts to read the provided config file and return the certificate and private +// key file paths. +func getCertAndKeyFiles(configFilePath string) (string, string, error) { + jsonFile, err := os.Open(configFilePath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return "", "", errSourceUnavailable + } + return "", "", err + } + + byteValue, err := io.ReadAll(jsonFile) + if err != nil { + return "", "", err + } + + var config certificateConfig + if err := json.Unmarshal(byteValue, &config); err != nil { + return "", "", err + } + + if config.CertConfigs.Workload == nil { + return "", "", errSourceUnavailable + } + + certFile := config.CertConfigs.Workload.CertPath + keyFile := config.CertConfigs.Workload.KeyPath + + if certFile == "" { + return "", "", errors.New("certificate configuration is missing the certificate file location") + } + + if keyFile == "" { + return "", "", errors.New("certificate configuration is missing the key file location") + } + + return certFile, keyFile, nil +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go new file mode 100644 index 000000000..37894bfcd --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go @@ -0,0 +1,134 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "context" + "encoding/json" + "fmt" + "log" + "os" + "strconv" + "sync" + + "cloud.google.com/go/auth/internal/transport/cert" + "cloud.google.com/go/compute/metadata" +) + +const ( + configEndpointSuffix = "instance/platform-security/auto-mtls-configuration" +) + +var ( + mtlsConfiguration *mtlsConfig + + mtlsOnce sync.Once +) + +// GetS2AAddress returns the S2A address to be reached via plaintext connection. +// Returns empty string if not set or invalid. +func GetS2AAddress() string { + getMetadataMTLSAutoConfig() + if !mtlsConfiguration.valid() { + return "" + } + return mtlsConfiguration.S2A.PlaintextAddress +} + +// GetMTLSS2AAddress returns the S2A address to be reached via MTLS connection. +// Returns empty string if not set or invalid. +func GetMTLSS2AAddress() string { + getMetadataMTLSAutoConfig() + if !mtlsConfiguration.valid() { + return "" + } + return mtlsConfiguration.S2A.MTLSAddress +} + +// mtlsConfig contains the configuration for establishing MTLS connections with Google APIs. +type mtlsConfig struct { + S2A *s2aAddresses `json:"s2a"` +} + +func (c *mtlsConfig) valid() bool { + return c != nil && c.S2A != nil +} + +// s2aAddresses contains the plaintext and/or MTLS S2A addresses. +type s2aAddresses struct { + // PlaintextAddress is the plaintext address to reach S2A + PlaintextAddress string `json:"plaintext_address"` + // MTLSAddress is the MTLS address to reach S2A + MTLSAddress string `json:"mtls_address"` +} + +func getMetadataMTLSAutoConfig() { + var err error + mtlsOnce.Do(func() { + mtlsConfiguration, err = queryConfig() + if err != nil { + log.Printf("Getting MTLS config failed: %v", err) + } + }) +} + +var httpGetMetadataMTLSConfig = func() (string, error) { + return metadata.GetWithContext(context.Background(), configEndpointSuffix) +} + +func queryConfig() (*mtlsConfig, error) { + resp, err := httpGetMetadataMTLSConfig() + if err != nil { + return nil, fmt.Errorf("querying MTLS config from MDS endpoint failed: %w", err) + } + var config mtlsConfig + err = json.Unmarshal([]byte(resp), &config) + if err != nil { + return nil, fmt.Errorf("unmarshalling MTLS config from MDS endpoint failed: %w", err) + } + if config.S2A == nil { + return nil, fmt.Errorf("returned MTLS config from MDS endpoint is invalid: %v", config) + } + return &config, nil +} + +func shouldUseS2A(clientCertSource cert.Provider, opts *Options) bool { + // If client cert is found, use that over S2A. + if clientCertSource != nil { + return false + } + // If EXPERIMENTAL_GOOGLE_API_USE_S2A is not set to true, skip S2A. + if !isGoogleS2AEnabled() { + return false + } + // If DefaultMTLSEndpoint is not set or has endpoint override, skip S2A. + if opts.DefaultMTLSEndpoint == "" || opts.Endpoint != "" { + return false + } + // If custom HTTP client is provided, skip S2A. + if opts.Client != nil { + return false + } + // If directPath is enabled, skip S2A. + return !opts.EnableDirectPath && !opts.EnableDirectPathXds +} + +func isGoogleS2AEnabled() bool { + b, err := strconv.ParseBool(os.Getenv(googleAPIUseS2AEnv)) + if err != nil { + return false + } + return b +} diff --git a/vendor/cloud.google.com/go/auth/internal/transport/transport.go b/vendor/cloud.google.com/go/auth/internal/transport/transport.go new file mode 100644 index 000000000..cc586ec5b --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/transport/transport.go @@ -0,0 +1,105 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transport provided internal helpers for the two transport packages +// (grpctransport and httptransport). +package transport + +import ( + "crypto/tls" + "fmt" + "net" + "net/http" + "time" + + "cloud.google.com/go/auth/credentials" +) + +// CloneDetectOptions clones a user set detect option into some new memory that +// we can internally manipulate before sending onto the detect package. +func CloneDetectOptions(oldDo *credentials.DetectOptions) *credentials.DetectOptions { + if oldDo == nil { + // it is valid for users not to set this, but we will need to to default + // some options for them in this case so return some initialized memory + // to work with. + return &credentials.DetectOptions{} + } + newDo := &credentials.DetectOptions{ + // Simple types + Audience: oldDo.Audience, + Subject: oldDo.Subject, + EarlyTokenRefresh: oldDo.EarlyTokenRefresh, + TokenURL: oldDo.TokenURL, + STSAudience: oldDo.STSAudience, + CredentialsFile: oldDo.CredentialsFile, + UseSelfSignedJWT: oldDo.UseSelfSignedJWT, + UniverseDomain: oldDo.UniverseDomain, + + // These fields are are pointer types that we just want to use exactly + // as the user set, copy the ref + Client: oldDo.Client, + AuthHandlerOptions: oldDo.AuthHandlerOptions, + } + + // Smartly size this memory and copy below. + if len(oldDo.CredentialsJSON) > 0 { + newDo.CredentialsJSON = make([]byte, len(oldDo.CredentialsJSON)) + copy(newDo.CredentialsJSON, oldDo.CredentialsJSON) + } + if len(oldDo.Scopes) > 0 { + newDo.Scopes = make([]string, len(oldDo.Scopes)) + copy(newDo.Scopes, oldDo.Scopes) + } + + return newDo +} + +// ValidateUniverseDomain verifies that the universe domain configured for the +// client matches the universe domain configured for the credentials. +func ValidateUniverseDomain(clientUniverseDomain, credentialsUniverseDomain string) error { + if clientUniverseDomain != credentialsUniverseDomain { + return fmt.Errorf( + "the configured universe domain (%q) does not match the universe "+ + "domain found in the credentials (%q). If you haven't configured "+ + "the universe domain explicitly, \"googleapis.com\" is the default", + clientUniverseDomain, + credentialsUniverseDomain) + } + return nil +} + +// DefaultHTTPClientWithTLS constructs an HTTPClient using the provided tlsConfig, to support mTLS. +func DefaultHTTPClientWithTLS(tlsConfig *tls.Config) *http.Client { + trans := BaseTransport() + trans.TLSClientConfig = tlsConfig + return &http.Client{Transport: trans} +} + +// BaseTransport returns a default [http.Transport] which can be used if +// [http.DefaultTransport] has been overwritten. +func BaseTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + MaxIdleConnsPerHost: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md new file mode 100644 index 000000000..7faf6e0c9 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md @@ -0,0 +1,54 @@ +# Changelog + +## [0.2.4](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.3...auth/oauth2adapt/v0.2.4) (2024-08-08) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758)) + +## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.2...auth/oauth2adapt/v0.2.3) (2024-07-10) + + +### Bug Fixes + +* **auth/oauth2adapt:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) + +## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.1...auth/oauth2adapt/v0.2.2) (2024-04-23) + + +### Bug Fixes + +* **auth/oauth2adapt:** Bump x/net to v0.24.0 ([ba31ed5](https://github.com/googleapis/google-cloud-go/commit/ba31ed5fda2c9664f2e1cf972469295e63deb5b4)) + +## [0.2.1](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.0...auth/oauth2adapt/v0.2.1) (2024-04-18) + + +### Bug Fixes + +* **auth/oauth2adapt:** Adapt Token Types to be translated ([#9801](https://github.com/googleapis/google-cloud-go/issues/9801)) ([70f4115](https://github.com/googleapis/google-cloud-go/commit/70f411555ebbf2b71e6d425cc8d2030644c6b438)), refs [#9800](https://github.com/googleapis/google-cloud-go/issues/9800) + +## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.1.0...auth/oauth2adapt/v0.2.0) (2024-04-16) + + +### Features + +* **auth/oauth2adapt:** Add helpers for working with credentials types ([#9694](https://github.com/googleapis/google-cloud-go/issues/9694)) ([cf33b55](https://github.com/googleapis/google-cloud-go/commit/cf33b5514423a2ac5c2a323a1cd99aac34fd4233)) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a)) + +## 0.1.0 (2023-10-19) + + +### Features + +* **auth/oauth2adapt:** Adds a new module to translate types ([#8595](https://github.com/googleapis/google-cloud-go/issues/8595)) ([6933c5a](https://github.com/googleapis/google-cloud-go/commit/6933c5a0c1fc8e58cbfff8bbca439d671b94672f)) +* **auth/oauth2adapt:** Fixup deps for release ([#8747](https://github.com/googleapis/google-cloud-go/issues/8747)) ([749d243](https://github.com/googleapis/google-cloud-go/commit/749d243862b025a6487a4d2d339219889b4cfe70)) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/cloud.google.com/go/auth/oauth2adapt/LICENSE similarity index 100% rename from vendor/github.com/aws/aws-sdk-go/LICENSE.txt rename to vendor/cloud.google.com/go/auth/oauth2adapt/LICENSE diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go new file mode 100644 index 000000000..9835ac571 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go @@ -0,0 +1,164 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package oauth2adapt helps converts types used in [cloud.google.com/go/auth] +// and [golang.org/x/oauth2]. +package oauth2adapt + +import ( + "context" + "encoding/json" + "errors" + + "cloud.google.com/go/auth" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" +) + +// TokenProviderFromTokenSource converts any [golang.org/x/oauth2.TokenSource] +// into a [cloud.google.com/go/auth.TokenProvider]. +func TokenProviderFromTokenSource(ts oauth2.TokenSource) auth.TokenProvider { + return &tokenProviderAdapter{ts: ts} +} + +type tokenProviderAdapter struct { + ts oauth2.TokenSource +} + +// Token fulfills the [cloud.google.com/go/auth.TokenProvider] interface. It +// is a light wrapper around the underlying TokenSource. +func (tp *tokenProviderAdapter) Token(context.Context) (*auth.Token, error) { + tok, err := tp.ts.Token() + if err != nil { + var err2 *oauth2.RetrieveError + if ok := errors.As(err, &err2); ok { + return nil, AuthErrorFromRetrieveError(err2) + } + return nil, err + } + return &auth.Token{ + Value: tok.AccessToken, + Type: tok.Type(), + Expiry: tok.Expiry, + }, nil +} + +// TokenSourceFromTokenProvider converts any +// [cloud.google.com/go/auth.TokenProvider] into a +// [golang.org/x/oauth2.TokenSource]. +func TokenSourceFromTokenProvider(tp auth.TokenProvider) oauth2.TokenSource { + return &tokenSourceAdapter{tp: tp} +} + +type tokenSourceAdapter struct { + tp auth.TokenProvider +} + +// Token fulfills the [golang.org/x/oauth2.TokenSource] interface. It +// is a light wrapper around the underlying TokenProvider. +func (ts *tokenSourceAdapter) Token() (*oauth2.Token, error) { + tok, err := ts.tp.Token(context.Background()) + if err != nil { + var err2 *auth.Error + if ok := errors.As(err, &err2); ok { + return nil, AddRetrieveErrorToAuthError(err2) + } + return nil, err + } + return &oauth2.Token{ + AccessToken: tok.Value, + TokenType: tok.Type, + Expiry: tok.Expiry, + }, nil +} + +// AuthCredentialsFromOauth2Credentials converts a [golang.org/x/oauth2/google.Credentials] +// to a [cloud.google.com/go/auth.Credentials]. +func AuthCredentialsFromOauth2Credentials(creds *google.Credentials) *auth.Credentials { + if creds == nil { + return nil + } + return auth.NewCredentials(&auth.CredentialsOptions{ + TokenProvider: TokenProviderFromTokenSource(creds.TokenSource), + JSON: creds.JSON, + ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { + return creds.ProjectID, nil + }), + UniverseDomainProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { + return creds.GetUniverseDomain() + }), + }) +} + +// Oauth2CredentialsFromAuthCredentials converts a [cloud.google.com/go/auth.Credentials] +// to a [golang.org/x/oauth2/google.Credentials]. +func Oauth2CredentialsFromAuthCredentials(creds *auth.Credentials) *google.Credentials { + if creds == nil { + return nil + } + // Throw away errors as old credentials are not request aware. Also, no + // network requests are currently happening for this use case. + projectID, _ := creds.ProjectID(context.Background()) + + return &google.Credentials{ + TokenSource: TokenSourceFromTokenProvider(creds.TokenProvider), + ProjectID: projectID, + JSON: creds.JSON(), + UniverseDomainProvider: func() (string, error) { + return creds.UniverseDomain(context.Background()) + }, + } +} + +type oauth2Error struct { + ErrorCode string `json:"error"` + ErrorDescription string `json:"error_description"` + ErrorURI string `json:"error_uri"` +} + +// AddRetrieveErrorToAuthError returns the same error provided and adds a +// [golang.org/x/oauth2.RetrieveError] to the error chain by setting the `Err` field on the +// [cloud.google.com/go/auth.Error]. +func AddRetrieveErrorToAuthError(err *auth.Error) *auth.Error { + if err == nil { + return nil + } + e := &oauth2.RetrieveError{ + Response: err.Response, + Body: err.Body, + } + err.Err = e + if len(err.Body) > 0 { + var oErr oauth2Error + // ignore the error as it only fills in extra details + json.Unmarshal(err.Body, &oErr) + e.ErrorCode = oErr.ErrorCode + e.ErrorDescription = oErr.ErrorDescription + e.ErrorURI = oErr.ErrorURI + } + return err +} + +// AuthErrorFromRetrieveError returns an [cloud.google.com/go/auth.Error] that +// wraps the provided [golang.org/x/oauth2.RetrieveError]. +func AuthErrorFromRetrieveError(err *oauth2.RetrieveError) *auth.Error { + if err == nil { + return nil + } + return &auth.Error{ + Response: err.Response, + Body: err.Body, + Err: err, + } +} diff --git a/vendor/cloud.google.com/go/auth/threelegged.go b/vendor/cloud.google.com/go/auth/threelegged.go new file mode 100644 index 000000000..97a57f469 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/threelegged.go @@ -0,0 +1,368 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "mime" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "cloud.google.com/go/auth/internal" +) + +// AuthorizationHandler is a 3-legged-OAuth helper that prompts the user for +// OAuth consent at the specified auth code URL and returns an auth code and +// state upon approval. +type AuthorizationHandler func(authCodeURL string) (code string, state string, err error) + +// Options3LO are the options for doing a 3-legged OAuth2 flow. +type Options3LO struct { + // ClientID is the application's ID. + ClientID string + // ClientSecret is the application's secret. Not required if AuthHandlerOpts + // is set. + ClientSecret string + // AuthURL is the URL for authenticating. + AuthURL string + // TokenURL is the URL for retrieving a token. + TokenURL string + // AuthStyle is used to describe how to client info in the token request. + AuthStyle Style + // RefreshToken is the token used to refresh the credential. Not required + // if AuthHandlerOpts is set. + RefreshToken string + // RedirectURL is the URL to redirect users to. Optional. + RedirectURL string + // Scopes specifies requested permissions for the Token. Optional. + Scopes []string + + // URLParams are the set of values to apply to the token exchange. Optional. + URLParams url.Values + // Client is the client to be used to make the underlying token requests. + // Optional. + Client *http.Client + // EarlyTokenExpiry is the time before the token expires that it should be + // refreshed. If not set the default value is 3 minutes and 45 seconds. + // Optional. + EarlyTokenExpiry time.Duration + + // AuthHandlerOpts provides a set of options for doing a + // 3-legged OAuth2 flow with a custom [AuthorizationHandler]. Optional. + AuthHandlerOpts *AuthorizationHandlerOptions +} + +func (o *Options3LO) validate() error { + if o == nil { + return errors.New("auth: options must be provided") + } + if o.ClientID == "" { + return errors.New("auth: client ID must be provided") + } + if o.AuthHandlerOpts == nil && o.ClientSecret == "" { + return errors.New("auth: client secret must be provided") + } + if o.AuthURL == "" { + return errors.New("auth: auth URL must be provided") + } + if o.TokenURL == "" { + return errors.New("auth: token URL must be provided") + } + if o.AuthStyle == StyleUnknown { + return errors.New("auth: auth style must be provided") + } + if o.AuthHandlerOpts == nil && o.RefreshToken == "" { + return errors.New("auth: refresh token must be provided") + } + return nil +} + +// PKCEOptions holds parameters to support PKCE. +type PKCEOptions struct { + // Challenge is the un-padded, base64-url-encoded string of the encrypted code verifier. + Challenge string // The un-padded, base64-url-encoded string of the encrypted code verifier. + // ChallengeMethod is the encryption method (ex. S256). + ChallengeMethod string + // Verifier is the original, non-encrypted secret. + Verifier string // The original, non-encrypted secret. +} + +type tokenJSON struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + RefreshToken string `json:"refresh_token"` + ExpiresIn int `json:"expires_in"` + // error fields + ErrorCode string `json:"error"` + ErrorDescription string `json:"error_description"` + ErrorURI string `json:"error_uri"` +} + +func (e *tokenJSON) expiry() (t time.Time) { + if v := e.ExpiresIn; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + return +} + +func (o *Options3LO) client() *http.Client { + if o.Client != nil { + return o.Client + } + return internal.DefaultClient() +} + +// authCodeURL returns a URL that points to a OAuth2 consent page. +func (o *Options3LO) authCodeURL(state string, values url.Values) string { + var buf bytes.Buffer + buf.WriteString(o.AuthURL) + v := url.Values{ + "response_type": {"code"}, + "client_id": {o.ClientID}, + } + if o.RedirectURL != "" { + v.Set("redirect_uri", o.RedirectURL) + } + if len(o.Scopes) > 0 { + v.Set("scope", strings.Join(o.Scopes, " ")) + } + if state != "" { + v.Set("state", state) + } + if o.AuthHandlerOpts != nil { + if o.AuthHandlerOpts.PKCEOpts != nil && + o.AuthHandlerOpts.PKCEOpts.Challenge != "" { + v.Set(codeChallengeKey, o.AuthHandlerOpts.PKCEOpts.Challenge) + } + if o.AuthHandlerOpts.PKCEOpts != nil && + o.AuthHandlerOpts.PKCEOpts.ChallengeMethod != "" { + v.Set(codeChallengeMethodKey, o.AuthHandlerOpts.PKCEOpts.ChallengeMethod) + } + } + for k := range values { + v.Set(k, v.Get(k)) + } + if strings.Contains(o.AuthURL, "?") { + buf.WriteByte('&') + } else { + buf.WriteByte('?') + } + buf.WriteString(v.Encode()) + return buf.String() +} + +// New3LOTokenProvider returns a [TokenProvider] based on the 3-legged OAuth2 +// configuration. The TokenProvider is caches and auto-refreshes tokens by +// default. +func New3LOTokenProvider(opts *Options3LO) (TokenProvider, error) { + if err := opts.validate(); err != nil { + return nil, err + } + if opts.AuthHandlerOpts != nil { + return new3LOTokenProviderWithAuthHandler(opts), nil + } + return NewCachedTokenProvider(&tokenProvider3LO{opts: opts, refreshToken: opts.RefreshToken, client: opts.client()}, &CachedTokenProviderOptions{ + ExpireEarly: opts.EarlyTokenExpiry, + }), nil +} + +// AuthorizationHandlerOptions provides a set of options to specify for doing a +// 3-legged OAuth2 flow with a custom [AuthorizationHandler]. +type AuthorizationHandlerOptions struct { + // AuthorizationHandler specifies the handler used to for the authorization + // part of the flow. + Handler AuthorizationHandler + // State is used verify that the "state" is identical in the request and + // response before exchanging the auth code for OAuth2 token. + State string + // PKCEOpts allows setting configurations for PKCE. Optional. + PKCEOpts *PKCEOptions +} + +func new3LOTokenProviderWithAuthHandler(opts *Options3LO) TokenProvider { + return NewCachedTokenProvider(&tokenProviderWithHandler{opts: opts, state: opts.AuthHandlerOpts.State}, &CachedTokenProviderOptions{ + ExpireEarly: opts.EarlyTokenExpiry, + }) +} + +// exchange handles the final exchange portion of the 3lo flow. Returns a Token, +// refreshToken, and error. +func (o *Options3LO) exchange(ctx context.Context, code string) (*Token, string, error) { + // Build request + v := url.Values{ + "grant_type": {"authorization_code"}, + "code": {code}, + } + if o.RedirectURL != "" { + v.Set("redirect_uri", o.RedirectURL) + } + if o.AuthHandlerOpts != nil && + o.AuthHandlerOpts.PKCEOpts != nil && + o.AuthHandlerOpts.PKCEOpts.Verifier != "" { + v.Set(codeVerifierKey, o.AuthHandlerOpts.PKCEOpts.Verifier) + } + for k := range o.URLParams { + v.Set(k, o.URLParams.Get(k)) + } + return fetchToken(ctx, o, v) +} + +// This struct is not safe for concurrent access alone, but the way it is used +// in this package by wrapping it with a cachedTokenProvider makes it so. +type tokenProvider3LO struct { + opts *Options3LO + client *http.Client + refreshToken string +} + +func (tp *tokenProvider3LO) Token(ctx context.Context) (*Token, error) { + if tp.refreshToken == "" { + return nil, errors.New("auth: token expired and refresh token is not set") + } + v := url.Values{ + "grant_type": {"refresh_token"}, + "refresh_token": {tp.refreshToken}, + } + for k := range tp.opts.URLParams { + v.Set(k, tp.opts.URLParams.Get(k)) + } + + tk, rt, err := fetchToken(ctx, tp.opts, v) + if err != nil { + return nil, err + } + if tp.refreshToken != rt && rt != "" { + tp.refreshToken = rt + } + return tk, err +} + +type tokenProviderWithHandler struct { + opts *Options3LO + state string +} + +func (tp tokenProviderWithHandler) Token(ctx context.Context) (*Token, error) { + url := tp.opts.authCodeURL(tp.state, nil) + code, state, err := tp.opts.AuthHandlerOpts.Handler(url) + if err != nil { + return nil, err + } + if state != tp.state { + return nil, errors.New("auth: state mismatch in 3-legged-OAuth flow") + } + tok, _, err := tp.opts.exchange(ctx, code) + return tok, err +} + +// fetchToken returns a Token, refresh token, and/or an error. +func fetchToken(ctx context.Context, o *Options3LO, v url.Values) (*Token, string, error) { + var refreshToken string + if o.AuthStyle == StyleInParams { + if o.ClientID != "" { + v.Set("client_id", o.ClientID) + } + if o.ClientSecret != "" { + v.Set("client_secret", o.ClientSecret) + } + } + req, err := http.NewRequestWithContext(ctx, "POST", o.TokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, refreshToken, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + if o.AuthStyle == StyleInHeader { + req.SetBasicAuth(url.QueryEscape(o.ClientID), url.QueryEscape(o.ClientSecret)) + } + + // Make request + resp, body, err := internal.DoRequest(o.client(), req) + if err != nil { + return nil, refreshToken, err + } + failureStatus := resp.StatusCode < 200 || resp.StatusCode > 299 + tokError := &Error{ + Response: resp, + Body: body, + } + + var token *Token + // errors ignored because of default switch on content + content, _, _ := mime.ParseMediaType(resp.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": + // some endpoints return a query string + vals, err := url.ParseQuery(string(body)) + if err != nil { + if failureStatus { + return nil, refreshToken, tokError + } + return nil, refreshToken, fmt.Errorf("auth: cannot parse response: %w", err) + } + tokError.code = vals.Get("error") + tokError.description = vals.Get("error_description") + tokError.uri = vals.Get("error_uri") + token = &Token{ + Value: vals.Get("access_token"), + Type: vals.Get("token_type"), + Metadata: make(map[string]interface{}, len(vals)), + } + for k, v := range vals { + token.Metadata[k] = v + } + refreshToken = vals.Get("refresh_token") + e := vals.Get("expires_in") + expires, _ := strconv.Atoi(e) + if expires != 0 { + token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) + } + default: + var tj tokenJSON + if err = json.Unmarshal(body, &tj); err != nil { + if failureStatus { + return nil, refreshToken, tokError + } + return nil, refreshToken, fmt.Errorf("auth: cannot parse json: %w", err) + } + tokError.code = tj.ErrorCode + tokError.description = tj.ErrorDescription + tokError.uri = tj.ErrorURI + token = &Token{ + Value: tj.AccessToken, + Type: tj.TokenType, + Expiry: tj.expiry(), + Metadata: make(map[string]interface{}), + } + json.Unmarshal(body, &token.Metadata) // optional field, skip err check + refreshToken = tj.RefreshToken + } + // according to spec, servers should respond status 400 in error case + // https://www.rfc-editor.org/rfc/rfc6749#section-5.2 + // but some unorthodox servers respond 200 in error case + if failureStatus || tokError.code != "" { + return nil, refreshToken, tokError + } + if token.Value == "" { + return nil, refreshToken, errors.New("auth: server response missing access_token") + } + return token, refreshToken, nil +} diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md new file mode 100644 index 000000000..da7db19b1 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -0,0 +1,59 @@ +# Changes + +## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.1...compute/metadata/v0.5.2) (2024-09-20) + + +### Bug Fixes + +* **compute/metadata:** Close Response Body for failed request ([#10891](https://github.com/googleapis/google-cloud-go/issues/10891)) ([e91d45e](https://github.com/googleapis/google-cloud-go/commit/e91d45e4757a9e354114509ba9800085d9e0ff1f)) + +## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.0...compute/metadata/v0.5.1) (2024-09-12) + + +### Bug Fixes + +* **compute/metadata:** Check error chain for retryable error ([#10840](https://github.com/googleapis/google-cloud-go/issues/10840)) ([2bdedef](https://github.com/googleapis/google-cloud-go/commit/2bdedeff621b223d63cebc4355fcf83bc68412cd)) + +## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.4.0...compute/metadata/v0.5.0) (2024-07-10) + + +### Features + +* **compute/metadata:** Add sys check for windows OnGCE ([#10521](https://github.com/googleapis/google-cloud-go/issues/10521)) ([3b9a830](https://github.com/googleapis/google-cloud-go/commit/3b9a83063960d2a2ac20beb47cc15818a68bd302)) + +## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.3.0...compute/metadata/v0.4.0) (2024-07-01) + + +### Features + +* **compute/metadata:** Add context for all functions/methods ([#10370](https://github.com/googleapis/google-cloud-go/issues/10370)) ([66b8efe](https://github.com/googleapis/google-cloud-go/commit/66b8efe7ad877e052b2987bb4475477e38c67bb3)) + + +### Documentation + +* **compute/metadata:** Update OnGCE description ([#10408](https://github.com/googleapis/google-cloud-go/issues/10408)) ([6a46dca](https://github.com/googleapis/google-cloud-go/commit/6a46dca4eae4f88ec6f88822e01e5bf8aeca787f)) + +## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.3...compute/metadata/v0.3.0) (2024-04-15) + + +### Features + +* **compute/metadata:** Add context aware functions ([#9733](https://github.com/googleapis/google-cloud-go/issues/9733)) ([e4eb5b4](https://github.com/googleapis/google-cloud-go/commit/e4eb5b46ee2aec9d2fc18300bfd66015e25a0510)) + +## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.2...compute/metadata/v0.2.3) (2022-12-15) + + +### Bug Fixes + +* **compute/metadata:** Switch DNS lookup to an absolute lookup ([119b410](https://github.com/googleapis/google-cloud-go/commit/119b41060c7895e45e48aee5621ad35607c4d021)), refs [#7165](https://github.com/googleapis/google-cloud-go/issues/7165) + +## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.1...compute/metadata/v0.2.2) (2022-12-01) + + +### Bug Fixes + +* **compute/metadata:** Set IdleConnTimeout for http.Client ([#7084](https://github.com/googleapis/google-cloud-go/issues/7084)) ([766516a](https://github.com/googleapis/google-cloud-go/commit/766516aaf3816bfb3159efeea65aa3d1d205a3e2)), refs [#5430](https://github.com/googleapis/google-cloud-go/issues/5430) + +## [0.1.0] (2022-10-26) + +Initial release of metadata being it's own module. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/LICENSE b/vendor/cloud.google.com/go/compute/metadata/LICENSE similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/LICENSE rename to vendor/cloud.google.com/go/compute/metadata/LICENSE index af39a91e7..d64569567 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/LICENSE +++ b/vendor/cloud.google.com/go/compute/metadata/LICENSE @@ -187,7 +187,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2016 Microsoft Corporation + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/compute/metadata/README.md b/vendor/cloud.google.com/go/compute/metadata/README.md new file mode 100644 index 000000000..f940fb2c8 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/README.md @@ -0,0 +1,27 @@ +# Compute API + +[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/compute.svg)](https://pkg.go.dev/cloud.google.com/go/compute/metadata) + +This is a utility library for communicating with Google Cloud metadata service +on Google Cloud. + +## Install + +```bash +go get cloud.google.com/go/compute/metadata +``` + +## Go Version Support + +See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported) +section in the root directory's README. + +## Contributing + +Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) +document for details. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. See +[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 125b7033c..c160b4786 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -16,19 +16,18 @@ // metadata and API service accounts. // // This package is a wrapper around the GCE metadata service, -// as documented at https://developers.google.com/compute/docs/metadata. +// as documented at https://cloud.google.com/compute/docs/metadata/overview. package metadata // import "cloud.google.com/go/compute/metadata" import ( "context" "encoding/json" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" "os" - "runtime" "strings" "sync" "time" @@ -61,25 +60,20 @@ var ( instID = &cachedValue{k: "instance/id", trim: true} ) -var ( - defaultClient = &Client{hc: &http.Client{ - Transport: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - ResponseHeaderTimeout: 2 * time.Second, - }, - }} - subscribeClient = &Client{hc: &http.Client{ +var defaultClient = &Client{hc: newDefaultHTTPClient()} + +func newDefaultHTTPClient() *http.Client { + return &http.Client{ Transport: &http.Transport{ Dial: (&net.Dialer{ Timeout: 2 * time.Second, KeepAlive: 30 * time.Second, }).Dial, + IdleConnTimeout: 60 * time.Second, }, - }} -) + Timeout: 5 * time.Second, + } +} // NotDefinedError is returned when requested metadata is not defined. // @@ -93,16 +87,16 @@ func (suffix NotDefinedError) Error() string { return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) } -func (c *cachedValue) get(cl *Client) (v string, err error) { +func (c *cachedValue) get(ctx context.Context, cl *Client) (v string, err error) { defer c.mu.Unlock() c.mu.Lock() if c.v != "" { return c.v, nil } if c.trim { - v, err = cl.getTrimmed(c.k) + v, err = cl.getTrimmed(ctx, c.k) } else { - v, err = cl.Get(c.k) + v, err = cl.GetWithContext(ctx, c.k) } if err == nil { c.v = v @@ -115,7 +109,9 @@ var ( onGCE bool ) -// OnGCE reports whether this process is running on Google Compute Engine. +// OnGCE reports whether this process is running on Google Compute Platforms. +// NOTE: True returned from `OnGCE` does not guarantee that the metadata server +// is accessible from this process and have all the metadata defined. func OnGCE() bool { onGCEOnce.Do(initOnGCE) return onGCE @@ -141,7 +137,7 @@ func testOnGCE() bool { go func() { req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) req.Header.Set("User-Agent", userAgent) - res, err := defaultClient.hc.Do(req.WithContext(ctx)) + res, err := newDefaultHTTPClient().Do(req.WithContext(ctx)) if err != nil { resc <- false return @@ -151,7 +147,8 @@ func testOnGCE() bool { }() go func() { - addrs, err := net.LookupHost("metadata.google.internal") + resolver := &net.Resolver{} + addrs, err := resolver.LookupHost(ctx, "metadata.google.internal.") if err != nil || len(addrs) == 0 { resc <- false return @@ -192,76 +189,213 @@ func testOnGCE() bool { return <-resc } -// systemInfoSuggestsGCE reports whether the local system (without -// doing network requests) suggests that we're running on GCE. If this -// returns true, testOnGCE tries a bit harder to reach its metadata -// server. -func systemInfoSuggestsGCE() bool { - if runtime.GOOS != "linux" { - // We don't have any non-Linux clues available, at least yet. - return false - } - slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") - name := strings.TrimSpace(string(slurp)) - return name == "Google" || name == "Google Compute Engine" +// Subscribe calls Client.SubscribeWithContext on the default client. +// +// Deprecated: Please use the context aware variant [SubscribeWithContext]. +func Subscribe(suffix string, fn func(v string, ok bool) error) error { + return defaultClient.SubscribeWithContext(context.Background(), suffix, func(ctx context.Context, v string, ok bool) error { return fn(v, ok) }) } -// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no -// ResponseHeaderTimeout). -func Subscribe(suffix string, fn func(v string, ok bool) error) error { - return subscribeClient.Subscribe(suffix, fn) +// SubscribeWithContext calls Client.SubscribeWithContext on the default client. +func SubscribeWithContext(ctx context.Context, suffix string, fn func(ctx context.Context, v string, ok bool) error) error { + return defaultClient.SubscribeWithContext(ctx, suffix, fn) +} + +// Get calls Client.GetWithContext on the default client. +// +// Deprecated: Please use the context aware variant [GetWithContext]. +func Get(suffix string) (string, error) { + return defaultClient.GetWithContext(context.Background(), suffix) } -// Get calls Client.Get on the default client. -func Get(suffix string) (string, error) { return defaultClient.Get(suffix) } +// GetWithContext calls Client.GetWithContext on the default client. +func GetWithContext(ctx context.Context, suffix string) (string, error) { + return defaultClient.GetWithContext(ctx, suffix) +} // ProjectID returns the current instance's project ID string. -func ProjectID() (string, error) { return defaultClient.ProjectID() } +// +// Deprecated: Please use the context aware variant [ProjectIDWithContext]. +func ProjectID() (string, error) { + return defaultClient.ProjectIDWithContext(context.Background()) +} + +// ProjectIDWithContext returns the current instance's project ID string. +func ProjectIDWithContext(ctx context.Context) (string, error) { + return defaultClient.ProjectIDWithContext(ctx) +} // NumericProjectID returns the current instance's numeric project ID. -func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() } +// +// Deprecated: Please use the context aware variant [NumericProjectIDWithContext]. +func NumericProjectID() (string, error) { + return defaultClient.NumericProjectIDWithContext(context.Background()) +} + +// NumericProjectIDWithContext returns the current instance's numeric project ID. +func NumericProjectIDWithContext(ctx context.Context) (string, error) { + return defaultClient.NumericProjectIDWithContext(ctx) +} // InternalIP returns the instance's primary internal IP address. -func InternalIP() (string, error) { return defaultClient.InternalIP() } +// +// Deprecated: Please use the context aware variant [InternalIPWithContext]. +func InternalIP() (string, error) { + return defaultClient.InternalIPWithContext(context.Background()) +} + +// InternalIPWithContext returns the instance's primary internal IP address. +func InternalIPWithContext(ctx context.Context) (string, error) { + return defaultClient.InternalIPWithContext(ctx) +} // ExternalIP returns the instance's primary external (public) IP address. -func ExternalIP() (string, error) { return defaultClient.ExternalIP() } +// +// Deprecated: Please use the context aware variant [ExternalIPWithContext]. +func ExternalIP() (string, error) { + return defaultClient.ExternalIPWithContext(context.Background()) +} + +// ExternalIPWithContext returns the instance's primary external (public) IP address. +func ExternalIPWithContext(ctx context.Context) (string, error) { + return defaultClient.ExternalIPWithContext(ctx) +} + +// Email calls Client.EmailWithContext on the default client. +// +// Deprecated: Please use the context aware variant [EmailWithContext]. +func Email(serviceAccount string) (string, error) { + return defaultClient.EmailWithContext(context.Background(), serviceAccount) +} + +// EmailWithContext calls Client.EmailWithContext on the default client. +func EmailWithContext(ctx context.Context, serviceAccount string) (string, error) { + return defaultClient.EmailWithContext(ctx, serviceAccount) +} // Hostname returns the instance's hostname. This will be of the form // ".c..internal". -func Hostname() (string, error) { return defaultClient.Hostname() } +// +// Deprecated: Please use the context aware variant [HostnameWithContext]. +func Hostname() (string, error) { + return defaultClient.HostnameWithContext(context.Background()) +} + +// HostnameWithContext returns the instance's hostname. This will be of the form +// ".c..internal". +func HostnameWithContext(ctx context.Context) (string, error) { + return defaultClient.HostnameWithContext(ctx) +} // InstanceTags returns the list of user-defined instance tags, // assigned when initially creating a GCE instance. -func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() } +// +// Deprecated: Please use the context aware variant [InstanceTagsWithContext]. +func InstanceTags() ([]string, error) { + return defaultClient.InstanceTagsWithContext(context.Background()) +} + +// InstanceTagsWithContext returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTagsWithContext(ctx context.Context) ([]string, error) { + return defaultClient.InstanceTagsWithContext(ctx) +} // InstanceID returns the current VM's numeric instance ID. -func InstanceID() (string, error) { return defaultClient.InstanceID() } +// +// Deprecated: Please use the context aware variant [InstanceIDWithContext]. +func InstanceID() (string, error) { + return defaultClient.InstanceIDWithContext(context.Background()) +} + +// InstanceIDWithContext returns the current VM's numeric instance ID. +func InstanceIDWithContext(ctx context.Context) (string, error) { + return defaultClient.InstanceIDWithContext(ctx) +} // InstanceName returns the current VM's instance ID string. -func InstanceName() (string, error) { return defaultClient.InstanceName() } +// +// Deprecated: Please use the context aware variant [InstanceNameWithContext]. +func InstanceName() (string, error) { + return defaultClient.InstanceNameWithContext(context.Background()) +} + +// InstanceNameWithContext returns the current VM's instance ID string. +func InstanceNameWithContext(ctx context.Context) (string, error) { + return defaultClient.InstanceNameWithContext(ctx) +} // Zone returns the current VM's zone, such as "us-central1-b". -func Zone() (string, error) { return defaultClient.Zone() } +// +// Deprecated: Please use the context aware variant [ZoneWithContext]. +func Zone() (string, error) { + return defaultClient.ZoneWithContext(context.Background()) +} + +// ZoneWithContext returns the current VM's zone, such as "us-central1-b". +func ZoneWithContext(ctx context.Context) (string, error) { + return defaultClient.ZoneWithContext(ctx) +} + +// InstanceAttributes calls Client.InstanceAttributesWithContext on the default client. +// +// Deprecated: Please use the context aware variant [InstanceAttributesWithContext. +func InstanceAttributes() ([]string, error) { + return defaultClient.InstanceAttributesWithContext(context.Background()) +} -// InstanceAttributes calls Client.InstanceAttributes on the default client. -func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() } +// InstanceAttributesWithContext calls Client.ProjectAttributesWithContext on the default client. +func InstanceAttributesWithContext(ctx context.Context) ([]string, error) { + return defaultClient.InstanceAttributesWithContext(ctx) +} + +// ProjectAttributes calls Client.ProjectAttributesWithContext on the default client. +// +// Deprecated: Please use the context aware variant [ProjectAttributesWithContext]. +func ProjectAttributes() ([]string, error) { + return defaultClient.ProjectAttributesWithContext(context.Background()) +} -// ProjectAttributes calls Client.ProjectAttributes on the default client. -func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() } +// ProjectAttributesWithContext calls Client.ProjectAttributesWithContext on the default client. +func ProjectAttributesWithContext(ctx context.Context) ([]string, error) { + return defaultClient.ProjectAttributesWithContext(ctx) +} -// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client. +// InstanceAttributeValue calls Client.InstanceAttributeValueWithContext on the default client. +// +// Deprecated: Please use the context aware variant [InstanceAttributeValueWithContext]. func InstanceAttributeValue(attr string) (string, error) { - return defaultClient.InstanceAttributeValue(attr) + return defaultClient.InstanceAttributeValueWithContext(context.Background(), attr) } -// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client. +// InstanceAttributeValueWithContext calls Client.InstanceAttributeValueWithContext on the default client. +func InstanceAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return defaultClient.InstanceAttributeValueWithContext(ctx, attr) +} + +// ProjectAttributeValue calls Client.ProjectAttributeValueWithContext on the default client. +// +// Deprecated: Please use the context aware variant [ProjectAttributeValueWithContext]. func ProjectAttributeValue(attr string) (string, error) { - return defaultClient.ProjectAttributeValue(attr) + return defaultClient.ProjectAttributeValueWithContext(context.Background(), attr) +} + +// ProjectAttributeValueWithContext calls Client.ProjectAttributeValueWithContext on the default client. +func ProjectAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return defaultClient.ProjectAttributeValueWithContext(ctx, attr) +} + +// Scopes calls Client.ScopesWithContext on the default client. +// +// Deprecated: Please use the context aware variant [ScopesWithContext]. +func Scopes(serviceAccount string) ([]string, error) { + return defaultClient.ScopesWithContext(context.Background(), serviceAccount) } -// Scopes calls Client.Scopes on the default client. -func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) } +// ScopesWithContext calls Client.ScopesWithContext on the default client. +func ScopesWithContext(ctx context.Context, serviceAccount string) ([]string, error) { + return defaultClient.ScopesWithContext(ctx, serviceAccount) +} func strsContains(ss []string, s string) bool { for _, v := range ss { @@ -277,15 +411,19 @@ type Client struct { hc *http.Client } -// NewClient returns a Client that can be used to fetch metadata. All HTTP requests -// will use the given http.Client instead of the default client. +// NewClient returns a Client that can be used to fetch metadata. +// Returns the client that uses the specified http.Client for HTTP requests. +// If nil is specified, returns the default client. func NewClient(c *http.Client) *Client { + if c == nil { + return defaultClient + } return &Client{hc: c} } // getETag returns a value from the metadata service as well as the associated ETag. // This func is otherwise equivalent to Get. -func (c *Client) getETag(suffix string) (value, etag string, err error) { +func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string, err error) { // Using a fixed IP makes it very difficult to spoof the metadata service in // a container, which is an important use-case for local testing of cloud // deployments. To enable spoofing of the metadata service, the environment @@ -300,19 +438,42 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) { // being stable anyway. host = metadataIP } + suffix = strings.TrimLeft(suffix, "/") u := "http://" + host + "/computeMetadata/v1/" + suffix - req, _ := http.NewRequest("GET", u, nil) - req.Header.Set("Metadata-Flavor", "Google") - req.Header.Set("User-Agent", userAgent) - res, err := c.hc.Do(req) + req, err := http.NewRequestWithContext(ctx, "GET", u, nil) if err != nil { return "", "", err } + req.Header.Set("Metadata-Flavor", "Google") + req.Header.Set("User-Agent", userAgent) + var res *http.Response + var reqErr error + retryer := newRetryer() + for { + res, reqErr = c.hc.Do(req) + var code int + if res != nil { + code = res.StatusCode + } + if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry { + if res != nil && res.Body != nil { + res.Body.Close() + } + if err := sleep(ctx, delay); err != nil { + return "", "", err + } + continue + } + break + } + if reqErr != nil { + return "", "", reqErr + } defer res.Body.Close() if res.StatusCode == http.StatusNotFound { return "", "", NotDefinedError(suffix) } - all, err := ioutil.ReadAll(res.Body) + all, err := io.ReadAll(res.Body) if err != nil { return "", "", err } @@ -330,19 +491,37 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) { // // If the requested metadata is not defined, the returned error will // be of type NotDefinedError. +// +// Deprecated: Please use the context aware variant [Client.GetWithContext]. func (c *Client) Get(suffix string) (string, error) { - val, _, err := c.getETag(suffix) + return c.GetWithContext(context.Background(), suffix) +} + +// GetWithContext returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +// +// NOTE: Without an extra deadline in the context this call can take in the +// worst case, with internal backoff retries, up to 15 seconds (e.g. when server +// is responding slowly). Pass context with additional timeouts when needed. +func (c *Client) GetWithContext(ctx context.Context, suffix string) (string, error) { + val, _, err := c.getETag(ctx, suffix) return val, err } -func (c *Client) getTrimmed(suffix string) (s string, err error) { - s, err = c.Get(suffix) +func (c *Client) getTrimmed(ctx context.Context, suffix string) (s string, err error) { + s, err = c.GetWithContext(ctx, suffix) s = strings.TrimSpace(s) return } -func (c *Client) lines(suffix string) ([]string, error) { - j, err := c.Get(suffix) +func (c *Client) lines(ctx context.Context, suffix string) ([]string, error) { + j, err := c.GetWithContext(ctx, suffix) if err != nil { return nil, err } @@ -354,35 +533,104 @@ func (c *Client) lines(suffix string) ([]string, error) { } // ProjectID returns the current instance's project ID string. -func (c *Client) ProjectID() (string, error) { return projID.get(c) } +// +// Deprecated: Please use the context aware variant [Client.ProjectIDWithContext]. +func (c *Client) ProjectID() (string, error) { return c.ProjectIDWithContext(context.Background()) } + +// ProjectIDWithContext returns the current instance's project ID string. +func (c *Client) ProjectIDWithContext(ctx context.Context) (string, error) { return projID.get(ctx, c) } // NumericProjectID returns the current instance's numeric project ID. -func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) } +// +// Deprecated: Please use the context aware variant [Client.NumericProjectIDWithContext]. +func (c *Client) NumericProjectID() (string, error) { + return c.NumericProjectIDWithContext(context.Background()) +} + +// NumericProjectIDWithContext returns the current instance's numeric project ID. +func (c *Client) NumericProjectIDWithContext(ctx context.Context) (string, error) { + return projNum.get(ctx, c) +} // InstanceID returns the current VM's numeric instance ID. -func (c *Client) InstanceID() (string, error) { return instID.get(c) } +// +// Deprecated: Please use the context aware variant [Client.InstanceIDWithContext]. +func (c *Client) InstanceID() (string, error) { + return c.InstanceIDWithContext(context.Background()) +} + +// InstanceIDWithContext returns the current VM's numeric instance ID. +func (c *Client) InstanceIDWithContext(ctx context.Context) (string, error) { + return instID.get(ctx, c) +} // InternalIP returns the instance's primary internal IP address. +// +// Deprecated: Please use the context aware variant [Client.InternalIPWithContext]. func (c *Client) InternalIP() (string, error) { - return c.getTrimmed("instance/network-interfaces/0/ip") + return c.InternalIPWithContext(context.Background()) +} + +// InternalIPWithContext returns the instance's primary internal IP address. +func (c *Client) InternalIPWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/network-interfaces/0/ip") +} + +// Email returns the email address associated with the service account. +// +// Deprecated: Please use the context aware variant [Client.EmailWithContext]. +func (c *Client) Email(serviceAccount string) (string, error) { + return c.EmailWithContext(context.Background(), serviceAccount) +} + +// EmailWithContext returns the email address associated with the service account. +// The serviceAccount parameter default value (empty string or "default" value) +// will use the instance's main account. +func (c *Client) EmailWithContext(ctx context.Context, serviceAccount string) (string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return c.getTrimmed(ctx, "instance/service-accounts/"+serviceAccount+"/email") } // ExternalIP returns the instance's primary external (public) IP address. +// +// Deprecated: Please use the context aware variant [Client.ExternalIPWithContext]. func (c *Client) ExternalIP() (string, error) { - return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") + return c.ExternalIPWithContext(context.Background()) +} + +// ExternalIPWithContext returns the instance's primary external (public) IP address. +func (c *Client) ExternalIPWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/network-interfaces/0/access-configs/0/external-ip") } // Hostname returns the instance's hostname. This will be of the form // ".c..internal". +// +// Deprecated: Please use the context aware variant [Client.HostnameWithContext]. func (c *Client) Hostname() (string, error) { - return c.getTrimmed("instance/hostname") + return c.HostnameWithContext(context.Background()) } -// InstanceTags returns the list of user-defined instance tags, -// assigned when initially creating a GCE instance. +// HostnameWithContext returns the instance's hostname. This will be of the form +// ".c..internal". +func (c *Client) HostnameWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags. +// +// Deprecated: Please use the context aware variant [Client.InstanceTagsWithContext]. func (c *Client) InstanceTags() ([]string, error) { + return c.InstanceTagsWithContext(context.Background()) +} + +// InstanceTagsWithContext returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func (c *Client) InstanceTagsWithContext(ctx context.Context) ([]string, error) { var s []string - j, err := c.Get("instance/tags") + j, err := c.GetWithContext(ctx, "instance/tags") if err != nil { return nil, err } @@ -393,17 +641,27 @@ func (c *Client) InstanceTags() ([]string, error) { } // InstanceName returns the current VM's instance ID string. +// +// Deprecated: Please use the context aware variant [Client.InstanceNameWithContext]. func (c *Client) InstanceName() (string, error) { - host, err := c.Hostname() - if err != nil { - return "", err - } - return strings.Split(host, ".")[0], nil + return c.InstanceNameWithContext(context.Background()) +} + +// InstanceNameWithContext returns the current VM's instance ID string. +func (c *Client) InstanceNameWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/name") } // Zone returns the current VM's zone, such as "us-central1-b". +// +// Deprecated: Please use the context aware variant [Client.ZoneWithContext]. func (c *Client) Zone() (string, error) { - zone, err := c.getTrimmed("instance/zone") + return c.ZoneWithContext(context.Background()) +} + +// ZoneWithContext returns the current VM's zone, such as "us-central1-b". +func (c *Client) ZoneWithContext(ctx context.Context) (string, error) { + zone, err := c.getTrimmed(ctx, "instance/zone") // zone is of the form "projects//zones/". if err != nil { return "", err @@ -414,12 +672,34 @@ func (c *Client) Zone() (string, error) { // InstanceAttributes returns the list of user-defined attributes, // assigned when initially creating a GCE VM instance. The value of an // attribute can be obtained with InstanceAttributeValue. -func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") } +// +// Deprecated: Please use the context aware variant [Client.InstanceAttributesWithContext]. +func (c *Client) InstanceAttributes() ([]string, error) { + return c.InstanceAttributesWithContext(context.Background()) +} + +// InstanceAttributesWithContext returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func (c *Client) InstanceAttributesWithContext(ctx context.Context) ([]string, error) { + return c.lines(ctx, "instance/attributes/") +} // ProjectAttributes returns the list of user-defined attributes // applying to the project as a whole, not just this VM. The value of // an attribute can be obtained with ProjectAttributeValue. -func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") } +// +// Deprecated: Please use the context aware variant [Client.ProjectAttributesWithContext]. +func (c *Client) ProjectAttributes() ([]string, error) { + return c.ProjectAttributesWithContext(context.Background()) +} + +// ProjectAttributesWithContext returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func (c *Client) ProjectAttributesWithContext(ctx context.Context) ([]string, error) { + return c.lines(ctx, "project/attributes/") +} // InstanceAttributeValue returns the value of the provided VM // instance attribute. @@ -429,8 +709,22 @@ func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project // // InstanceAttributeValue may return ("", nil) if the attribute was // defined to be the empty string. +// +// Deprecated: Please use the context aware variant [Client.InstanceAttributeValueWithContext]. func (c *Client) InstanceAttributeValue(attr string) (string, error) { - return c.Get("instance/attributes/" + attr) + return c.InstanceAttributeValueWithContext(context.Background(), attr) +} + +// InstanceAttributeValueWithContext returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) InstanceAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return c.GetWithContext(ctx, "instance/attributes/"+attr) } // ProjectAttributeValue returns the value of the provided @@ -441,39 +735,71 @@ func (c *Client) InstanceAttributeValue(attr string) (string, error) { // // ProjectAttributeValue may return ("", nil) if the attribute was // defined to be the empty string. +// +// Deprecated: Please use the context aware variant [Client.ProjectAttributeValueWithContext]. func (c *Client) ProjectAttributeValue(attr string) (string, error) { - return c.Get("project/attributes/" + attr) + return c.ProjectAttributeValueWithContext(context.Background(), attr) +} + +// ProjectAttributeValueWithContext returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) ProjectAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return c.GetWithContext(ctx, "project/attributes/"+attr) } // Scopes returns the service account scopes for the given account. // The account may be empty or the string "default" to use the instance's // main account. +// +// Deprecated: Please use the context aware variant [Client.ScopesWithContext]. func (c *Client) Scopes(serviceAccount string) ([]string, error) { + return c.ScopesWithContext(context.Background(), serviceAccount) +} + +// ScopesWithContext returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func (c *Client) ScopesWithContext(ctx context.Context, serviceAccount string) ([]string, error) { if serviceAccount == "" { serviceAccount = "default" } - return c.lines("instance/service-accounts/" + serviceAccount + "/scopes") + return c.lines(ctx, "instance/service-accounts/"+serviceAccount+"/scopes") } // Subscribe subscribes to a value from the metadata service. // The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". // The suffix may contain query parameters. // -// Subscribe calls fn with the latest metadata value indicated by the provided -// suffix. If the metadata value is deleted, fn is called with the empty string -// and ok false. Subscribe blocks until fn returns a non-nil error or the value -// is deleted. Subscribe returns the error value returned from the last call to -// fn, which may be nil when ok == false. +// Deprecated: Please use the context aware variant [Client.SubscribeWithContext]. func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error { + return c.SubscribeWithContext(context.Background(), suffix, func(ctx context.Context, v string, ok bool) error { return fn(v, ok) }) +} + +// SubscribeWithContext subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// The suffix may contain query parameters. +// +// SubscribeWithContext calls fn with the latest metadata value indicated by the +// provided suffix. If the metadata value is deleted, fn is called with the +// empty string and ok false. Subscribe blocks until fn returns a non-nil error +// or the value is deleted. Subscribe returns the error value returned from the +// last call to fn, which may be nil when ok == false. +func (c *Client) SubscribeWithContext(ctx context.Context, suffix string, fn func(ctx context.Context, v string, ok bool) error) error { const failedSubscribeSleep = time.Second * 5 // First check to see if the metadata value exists at all. - val, lastETag, err := c.getETag(suffix) + val, lastETag, err := c.getETag(ctx, suffix) if err != nil { return err } - if err := fn(val, true); err != nil { + if err := fn(ctx, val, true); err != nil { return err } @@ -484,7 +810,7 @@ func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) erro suffix += "?wait_for_change=true&last_etag=" } for { - val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag)) + val, etag, err := c.getETag(ctx, suffix+url.QueryEscape(lastETag)) if err != nil { if _, deleted := err.(NotDefinedError); !deleted { time.Sleep(failedSubscribeSleep) @@ -494,7 +820,7 @@ func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) erro } lastETag = etag - if err := fn(val, ok); err != nil || !ok { + if err := fn(ctx, val, ok); err != nil || !ok { return err } } diff --git a/vendor/cloud.google.com/go/compute/metadata/retry.go b/vendor/cloud.google.com/go/compute/metadata/retry.go new file mode 100644 index 000000000..3d4bc75dd --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/retry.go @@ -0,0 +1,114 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "context" + "io" + "math/rand" + "net/http" + "time" +) + +const ( + maxRetryAttempts = 5 +) + +var ( + syscallRetryable = func(error) bool { return false } +) + +// defaultBackoff is basically equivalent to gax.Backoff without the need for +// the dependency. +type defaultBackoff struct { + max time.Duration + mul float64 + cur time.Duration +} + +func (b *defaultBackoff) Pause() time.Duration { + d := time.Duration(1 + rand.Int63n(int64(b.cur))) + b.cur = time.Duration(float64(b.cur) * b.mul) + if b.cur > b.max { + b.cur = b.max + } + return d +} + +// sleep is the equivalent of gax.Sleep without the need for the dependency. +func sleep(ctx context.Context, d time.Duration) error { + t := time.NewTimer(d) + select { + case <-ctx.Done(): + t.Stop() + return ctx.Err() + case <-t.C: + return nil + } +} + +func newRetryer() *metadataRetryer { + return &metadataRetryer{bo: &defaultBackoff{ + cur: 100 * time.Millisecond, + max: 30 * time.Second, + mul: 2, + }} +} + +type backoff interface { + Pause() time.Duration +} + +type metadataRetryer struct { + bo backoff + attempts int +} + +func (r *metadataRetryer) Retry(status int, err error) (time.Duration, bool) { + if status == http.StatusOK { + return 0, false + } + retryOk := shouldRetry(status, err) + if !retryOk { + return 0, false + } + if r.attempts == maxRetryAttempts { + return 0, false + } + r.attempts++ + return r.bo.Pause(), true +} + +func shouldRetry(status int, err error) bool { + if 500 <= status && status <= 599 { + return true + } + if err == io.ErrUnexpectedEOF { + return true + } + // Transient network errors should be retried. + if syscallRetryable(err) { + return true + } + if err, ok := err.(interface{ Temporary() bool }); ok { + if err.Temporary() { + return true + } + } + if err, ok := err.(interface{ Unwrap() error }); ok { + return shouldRetry(status, err.Unwrap()) + } + return false +} diff --git a/vendor/google.golang.org/api/option/credentials_notgo19.go b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go similarity index 60% rename from vendor/google.golang.org/api/option/credentials_notgo19.go rename to vendor/cloud.google.com/go/compute/metadata/retry_linux.go index 74d3a4b5b..2e53f0123 100644 --- a/vendor/google.golang.org/api/option/credentials_notgo19.go +++ b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go @@ -1,4 +1,4 @@ -// Copyright 2018 Google LLC +// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,21 +12,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !go1.9 +//go:build linux +// +build linux -package option +package metadata import ( - "golang.org/x/oauth2/google" - "google.golang.org/api/internal" + "errors" + "syscall" ) -type withCreds google.DefaultCredentials - -func (w *withCreds) Apply(o *internal.DialSettings) { - o.Credentials = (*google.DefaultCredentials)(w) -} - -func WithCredentials(creds *google.DefaultCredentials) ClientOption { - return (*withCreds)(creds) +func init() { + // Initialize syscallRetryable to return true on transient socket-level + // errors. These errors are specific to Linux. + syscallRetryable = func(err error) bool { + return errors.Is(err, syscall.ECONNRESET) || errors.Is(err, syscall.ECONNREFUSED) + } } diff --git a/vendor/cloud.google.com/go/compute/metadata/syscheck.go b/vendor/cloud.google.com/go/compute/metadata/syscheck.go new file mode 100644 index 000000000..e0704fa64 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/syscheck.go @@ -0,0 +1,26 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows && !linux + +package metadata + +// systemInfoSuggestsGCE reports whether the local system (without +// doing network requests) suggests that we're running on GCE. If this +// returns true, testOnGCE tries a bit harder to reach its metadata +// server. +func systemInfoSuggestsGCE() bool { + // We don't currently have checks for other GOOS + return false +} diff --git a/vendor/google.golang.org/api/option/credentials_go19.go b/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go similarity index 57% rename from vendor/google.golang.org/api/option/credentials_go19.go rename to vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go index 0636a8294..74689acbb 100644 --- a/vendor/google.golang.org/api/option/credentials_go19.go +++ b/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go @@ -1,4 +1,4 @@ -// Copyright 2018 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,22 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build go1.9 +//go:build linux -package option +package metadata import ( - "golang.org/x/oauth2/google" - "google.golang.org/api/internal" + "os" + "strings" ) -type withCreds google.Credentials - -func (w *withCreds) Apply(o *internal.DialSettings) { - o.Credentials = (*google.Credentials)(w) -} - -// WithCredentials returns a ClientOption that authenticates API calls. -func WithCredentials(creds *google.Credentials) ClientOption { - return (*withCreds)(creds) +func systemInfoSuggestsGCE() bool { + b, _ := os.ReadFile("/sys/class/dmi/id/product_name") + name := strings.TrimSpace(string(b)) + return name == "Google" || name == "Google Compute Engine" } diff --git a/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go b/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go new file mode 100644 index 000000000..c0ce62787 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go @@ -0,0 +1,38 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build windows + +package metadata + +import ( + "strings" + + "golang.org/x/sys/windows/registry" +) + +func systemInfoSuggestsGCE() bool { + k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SYSTEM\HardwareConfig\Current`, registry.QUERY_VALUE) + if err != nil { + return false + } + defer k.Close() + + s, _, err := k.GetStringValue("SystemProductName") + if err != nil { + return false + } + s = strings.TrimSpace(s) + return strings.HasPrefix(s, "Google") +} diff --git a/vendor/cloud.google.com/go/debug.md b/vendor/cloud.google.com/go/debug.md new file mode 100644 index 000000000..2010ed7a6 --- /dev/null +++ b/vendor/cloud.google.com/go/debug.md @@ -0,0 +1,398 @@ +# Logging, Debugging and Telemetry + +**Warning: The OpenCensus project is obsolete and was archived on July 31st, +2023.** This means that any security vulnerabilities that are found will not be +patched. We recommend that you migrate from OpenCensus tracing to +OpenTelemetry, the successor project. See [OpenCensus](#opencensus) below for +details. + +Logging, debugging and telemetry all capture data that can be used for +troubleshooting. Logging records specific events and transactions. Debugging +exposes values for immediate analysis. Telemetry is suitable for production use +and can serve both logging and monitoring purposes. Telemetry tracing follows +requests through a system to provide a view of component interactions. Telemetry +metrics collects data for significant performance indicators, offering insights +into a system's health. + +## Logging and debugging + +While working with the Go Client Libraries you may run into some situations +where you need a deeper level of understanding about what is going on in order +to solve your problem. Here are some tips and tricks that you can use in these +cases. *Note* that many of the tips in this section will have a performance +impact and are therefore not recommended for sustained production use. Use these +tips locally or in production for a *limited time* to help get a better +understanding of what is going on. + +### HTTP based clients + +All of our auto-generated clients have a constructor to create a client that +uses HTTP/JSON instead of gRPC. Additionally a couple of our hand-written +clients like Storage and Bigquery are also HTTP based. Here are some tips for +debugging these clients. + +#### Try setting Go's HTTP debug variable + +Try setting the following environment variable for verbose Go HTTP logging: +GODEBUG=http2debug=1. To read more about this feature please see the godoc for +[net/http](https://pkg.go.dev/net/http). + +*WARNING*: Enabling this debug variable will log headers and payloads which may +contain private information. + +#### Add in your own logging with an HTTP middleware + +You may want to add in your own logging around HTTP requests. One way to do this +is to register a custom HTTP client with a logging transport built in. Here is +an example of how you would do this with the storage client. + +*WARNING*: Adding this middleware will log headers and payloads which may +contain private information. + +```go +package main + +import ( + "context" + "fmt" + "log" + "net/http" + "net/http/httputil" + + "cloud.google.com/go/storage" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + htransport "google.golang.org/api/transport/http" +) + +type loggingRoundTripper struct { + rt http.RoundTripper +} + +func (d loggingRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { + // Will create a dump of the request and body. + dump, err := httputil.DumpRequest(r, true) + if err != nil { + log.Println("error dumping request") + } + log.Printf("%s", dump) + return d.rt.RoundTrip(r) +} + +func main() { + ctx := context.Background() + + // Create a transport with authentication built-in detected with + // [ADC](https://google.aip.dev/auth/4110). Note you will have to pass any + // required scoped for the client you are using. + trans, err := htransport.NewTransport(ctx, + http.DefaultTransport, + option.WithScopes(storage.ScopeFullControl), + ) + if err != nil { + log.Fatal(err) + } + + // Embed customized transport into an HTTP client. + hc := &http.Client{ + Transport: loggingRoundTripper{rt: trans}, + } + + // Supply custom HTTP client for use by the library. + client, err := storage.NewClient(ctx, option.WithHTTPClient(hc)) + if err != nil { + log.Fatal(err) + } + defer client.Close() + // Use the client +} +``` + +### gRPC based clients + +#### Try setting grpc-go's debug variables + +Try setting the following environment variables for grpc-go: +`GRPC_GO_LOG_VERBOSITY_LEVEL=99` `GRPC_GO_LOG_SEVERITY_LEVEL=info`. These are +good for diagnosing connection level failures. For more information please see +[grpc-go's debug documentation](https://pkg.go.dev/google.golang.org/grpc/examples/features/debugging#section-readme). + +#### Add in your own logging with a gRPC interceptors + +You may want to add in your own logging around gRPC requests. One way to do this +is to register a custom interceptor that adds logging. Here is +an example of how you would do this with the secretmanager client. Note this +example registers a UnaryClientInterceptor but you may want/need to register +a StreamClientInterceptor instead-of/as-well depending on what kinds of +RPCs you are calling. + +*WARNING*: Adding this interceptor will log metadata and payloads which may +contain private information. + +```go +package main + +import ( + "context" + "log" + + secretmanager "cloud.google.com/go/secretmanager/apiv1" + "google.golang.org/api/option" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/reflect/protoreflect" +) + +func loggingUnaryInterceptor() grpc.UnaryClientInterceptor { + return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + err := invoker(ctx, method, req, reply, cc, opts...) + log.Printf("Invoked method: %v", method) + md, ok := metadata.FromOutgoingContext(ctx) + if ok { + log.Println("Metadata:") + for k, v := range md { + log.Printf("Key: %v, Value: %v", k, v) + } + } + reqb, merr := protojson.Marshal(req.(protoreflect.ProtoMessage)) + if merr == nil { + log.Printf("Request: %s", reqb) + } + return err + } +} + +func main() { + ctx := context.Background() + // Supply custom gRPC interceptor for use by the client. + client, err := secretmanager.NewClient(ctx, + option.WithGRPCDialOption(grpc.WithUnaryInterceptor(loggingUnaryInterceptor())), + ) + if err != nil { + log.Fatal(err) + } + defer client.Close() + // Use the client +} +``` + +## Telemetry + +**Warning: The OpenCensus project is obsolete and was archived on July 31st, +2023.** This means that any security vulnerabilities that are found will not be +patched. We recommend that you migrate from OpenCensus tracing to +OpenTelemetry, the successor project. The default experimental tracing support +for OpenCensus is now deprecated in the Google Cloud client libraries for Go. +See [OpenCensus](#opencensus) below for details. + +The Google Cloud client libraries for Go now use the +[OpenTelemetry](https://opentelemetry.io/docs/what-is-opentelemetry/) project by +default. Temporary opt-in support for OpenCensus is still available. The +transition from OpenCensus to OpenTelemetry is covered in the following +sections. + +### Tracing (experimental) + +Apart from spans created by underlying libraries such as gRPC, Google Cloud Go +generated clients do not create spans. Only the spans created by following +hand-written clients are in scope for the discussion in this section: + +* [cloud.google.com/go/bigquery](https://pkg.go.dev/cloud.google.com/go/bigquery) +* [cloud.google.com/go/bigtable](https://pkg.go.dev/cloud.google.com/go/bigtable) +* [cloud.google.com/go/datastore](https://pkg.go.dev/cloud.google.com/go/datastore) +* [cloud.google.com/go/firestore](https://pkg.go.dev/cloud.google.com/go/firestore) +* [cloud.google.com/go/spanner](https://pkg.go.dev/cloud.google.com/go/spanner) +* [cloud.google.com/go/storage](https://pkg.go.dev/cloud.google.com/go/storage) + +Currently, the spans created by these clients are for OpenTelemetry. OpenCensus +users are urged to transition to OpenTelemetry as soon as possible, as explained +in the next section. OpenCensus users can still opt-in to the deprecated +OpenCensus support via an environment variable, as described below. + +#### OpenCensus + +**Warning: The OpenCensus project is obsolete and was archived on July 31st, +2023.** This means that any security vulnerabilities that are found will not be +patched. We recommend that you migrate from OpenCensus tracing to +OpenTelemetry, the successor project. The default experimental tracing support +for OpenCensus is now deprecated in the Google Cloud client libraries for Go. + +Using the [OpenTelemetry-Go - OpenCensus Bridge](https://pkg.go.dev/go.opentelemetry.io/otel/bridge/opencensus), you can immediately begin exporting your traces with OpenTelemetry, even while +dependencies of your application remain instrumented with OpenCensus. If you do +not use the bridge, you will need to migrate your entire application and all of +its instrumented dependencies at once. For simple applications, this may be +possible, but we expect the bridge to be helpful if multiple libraries with +instrumentation are used. + +On May 29, 2024, six months after the +[release](https://github.com/googleapis/google-cloud-go/releases/tag/v0.111.0) +of experimental, opt-in support for OpenTelemetry tracing, the default tracing +support in the clients above was changed from OpenCensus to OpenTelemetry, and +the experimental OpenCensus support was marked as deprecated. To continue +using the OpenCensus support, set the environment variable +`GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING` to the case-insensitive +value `opencensus` before loading the client library. + +```sh +export GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING=opencensus +``` + +On December 2nd, 2024, one year after the release of OpenTelemetry support, the +experimental and deprecated support for OpenCensus tracing will be removed. + +Please note that all Google Cloud Go clients currently provide experimental +support for the propagation of both OpenCensus and OpenTelemetry trace context +to their receiving endpoints. The experimental support for OpenCensus trace +context propagation will be removed at the same time as the experimental +OpenCensus tracing support. + +Please refer to the following resources: + +* [Sunsetting OpenCensus](https://opentelemetry.io/blog/2023/sunsetting-opencensus/) +* [OpenTelemetry-Go - OpenCensus Bridge](https://pkg.go.dev/go.opentelemetry.io/otel/bridge/opencensus) + +#### OpenTelemetry + +The default experimental tracing support for OpenCensus is now deprecated in the +Google Cloud client libraries for Go. + +On May 29, 2024, the default experimental tracing support in the Google Cloud +client libraries for Go was changed from OpenCensus to OpenTelemetry. + +**Warning: OpenTelemetry-Go ensures +[compatibility](https://github.com/open-telemetry/opentelemetry-go/tree/main?tab=readme-ov-file#compatibility) +with ONLY the current supported versions of the [Go +language](https://go.dev/doc/devel/release#policy). This support may be narrower +than the support that has been offered historically by the Go Client Libraries. +Ensure that your Go runtime version is supported by the OpenTelemetry-Go +[compatibility](https://github.com/open-telemetry/opentelemetry-go/tree/main?tab=readme-ov-file#compatibility) +policy before enabling OpenTelemetry instrumentation.** + +Please refer to the following resources: + +* [What is OpenTelemetry?](https://opentelemetry.io/docs/what-is-opentelemetry/) +* [Cloud Trace - Go and OpenTelemetry](https://cloud.google.com/trace/docs/setup/go-ot) +* On GCE, [use Ops Agent and OpenTelemetry](https://cloud.google.com/trace/docs/otlp) + +##### Configuring the OpenTelemetry-Go - OpenCensus Bridge + +To configure the OpenCensus bridge with OpenTelemetry and Cloud Trace: + +```go +import ( + "context" + "log" + "os" + texporter "github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace" + octrace "go.opencensus.io/trace" + "go.opentelemetry.io/contrib/detectors/gcp" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/bridge/opencensus" + "go.opentelemetry.io/otel/sdk/resource" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.7.0" +) + +func main() { + // Create exporter. + ctx := context.Background() + projectID := os.Getenv("GOOGLE_CLOUD_PROJECT") + exporter, err := texporter.New(texporter.WithProjectID(projectID)) + if err != nil { + log.Fatalf("texporter.New: %v", err) + } + // Identify your application using resource detection + res, err := resource.New(ctx, + // Use the GCP resource detector to detect information about the GCP platform + resource.WithDetectors(gcp.NewDetector()), + // Keep the default detectors + resource.WithTelemetrySDK(), + // Add your own custom attributes to identify your application + resource.WithAttributes( + semconv.ServiceNameKey.String("my-application"), + ), + ) + if err != nil { + log.Fatalf("resource.New: %v", err) + } + // Create trace provider with the exporter. + // + // By default it uses AlwaysSample() which samples all traces. + // In a production environment or high QPS setup please use + // probabilistic sampling. + // Example: + // tp := sdktrace.NewTracerProvider(sdktrace.WithSampler(sdktrace.TraceIDRatioBased(0.0001)), ...) + tp := sdktrace.NewTracerProvider( + sdktrace.WithBatcher(exporter), + sdktrace.WithResource(res), + ) + defer tp.Shutdown(ctx) // flushes any pending spans, and closes connections. + otel.SetTracerProvider(tp) + tracer := otel.GetTracerProvider().Tracer("example.com/trace") + // Configure the OpenCensus tracer to use the bridge. + octrace.DefaultTracer = opencensus.NewTracer(tracer) + // Use otel tracer to create spans... +} + +``` + +##### Configuring context propagation + +In order to pass options to OpenTelemetry trace context propagation, follow the +appropriate example for the client's underlying transport. + +###### Passing options in HTTP-based clients + +```go +ctx := context.Background() +trans, err := htransport.NewTransport(ctx, + http.DefaultTransport, + option.WithScopes(storage.ScopeFullControl), +) +if err != nil { + log.Fatal(err) +} +// An example of passing options to the otelhttp.Transport. +otelOpts := otelhttp.WithFilter(func(r *http.Request) bool { + return r.URL.Path != "/ping" +}) +hc := &http.Client{ + Transport: otelhttp.NewTransport(trans, otelOpts), +} +client, err := storage.NewClient(ctx, option.WithHTTPClient(hc)) +``` + +Note that scopes must be set manually in this user-configured solution. + +###### Passing options in gRPC-based clients + +```go +projectID := "..." +ctx := context.Background() + +// An example of passing options to grpc.WithStatsHandler. +otelOpts := otelgrpc.WithMessageEvents(otelgrpc.ReceivedEvents) +dialOpts := grpc.WithStatsHandler(otelgrpc.NewClientHandler(otelOpts)) + +ctx := context.Background() +c, err := datastore.NewClient(ctx, projectID, option.WithGRPCDialOption(dialOpts)) +if err != nil { + log.Fatal(err) +} +defer c.Close() +``` + +### Metrics (experimental) + +The generated clients do not create metrics. Only the following hand-written +clients create experimental OpenCensus metrics: + +* [cloud.google.com/go/bigquery](https://pkg.go.dev/cloud.google.com/go/bigquery) +* [cloud.google.com/go/pubsub](https://pkg.go.dev/cloud.google.com/go/pubsub) +* [cloud.google.com/go/spanner](https://pkg.go.dev/cloud.google.com/go/spanner) + +#### OpenTelemetry + +The transition of the experimental metrics in the clients above from OpenCensus +to OpenTelemetry is still TBD. \ No newline at end of file diff --git a/vendor/cloud.google.com/go/doc.go b/vendor/cloud.google.com/go/doc.go new file mode 100644 index 000000000..133ff6855 --- /dev/null +++ b/vendor/cloud.google.com/go/doc.go @@ -0,0 +1,294 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package cloud is the root of the packages used to access Google Cloud +Services. See https://pkg.go.dev/cloud.google.com/go for a full list +of sub-modules. + +# Client Options + +All clients in sub-packages are configurable via client options. These options +are described here: https://pkg.go.dev/google.golang.org/api/option. + +# Endpoint Override + +Endpoint configuration is used to specify the URL to which requests are +sent. It is used for services that support or require regional endpoints, as +well as for other use cases such as [testing against fake servers]. + +For example, the Vertex AI service recommends that you configure the endpoint to +the location with the features you want that is closest to your physical +location or the location of your users. There is no global endpoint for Vertex +AI. See [Vertex AI - Locations] for more details. The following example +demonstrates configuring a Vertex AI client with a regional endpoint: + + ctx := context.Background() + endpoint := "us-central1-aiplatform.googleapis.com:443" + client, err := aiplatform.NewDatasetClient(ctx, option.WithEndpoint(endpoint)) + +# Authentication and Authorization + +All of the clients support authentication via [Google Application Default Credentials], +or by providing a JSON key file for a Service Account. See examples below. + +Google Application Default Credentials (ADC) is the recommended way to authorize +and authenticate clients. For information on how to create and obtain +Application Default Credentials, see +https://cloud.google.com/docs/authentication/production. If you have your +environment configured correctly you will not need to pass any extra information +to the client libraries. Here is an example of a client using ADC to +authenticate: + + client, err := secretmanager.NewClient(context.Background()) + if err != nil { + // TODO: handle error. + } + _ = client // Use the client. + +You can use a file with credentials to authenticate and authorize, such as a +JSON key file associated with a Google service account. Service Account keys can +be created and downloaded from https://console.cloud.google.com/iam-admin/serviceaccounts. +This example uses the Secret Manger client, but the same steps apply to the +all other client libraries this package as well. Example: + + client, err := secretmanager.NewClient(context.Background(), + option.WithCredentialsFile("/path/to/service-account-key.json")) + if err != nil { + // TODO: handle error. + } + _ = client // Use the client. + +In some cases (for instance, you don't want to store secrets on disk), you can +create credentials from in-memory JSON and use the WithCredentials option. +This example uses the Secret Manager client, but the same steps apply to +all other client libraries as well. Note that scopes can be +found at https://developers.google.com/identity/protocols/oauth2/scopes, and +are also provided in all auto-generated libraries: for example, +cloud.google.com/go/secretmanager/apiv1 provides DefaultAuthScopes. Example: + + ctx := context.Background() + // https://pkg.go.dev/golang.org/x/oauth2/google + creds, err := google.CredentialsFromJSON(ctx, []byte("JSON creds"), secretmanager.DefaultAuthScopes()...) + if err != nil { + // TODO: handle error. + } + client, err := secretmanager.NewClient(ctx, option.WithCredentials(creds)) + if err != nil { + // TODO: handle error. + } + _ = client // Use the client. + +# Timeouts and Cancellation + +By default, non-streaming methods, like Create or Get, will have a default +deadline applied to the context provided at call time, unless a context deadline +is already set. Streaming methods have no default deadline and will run +indefinitely. To set timeouts or arrange for cancellation, use +[context]. Transient errors will be retried when correctness allows. + +Here is an example of setting a timeout for an RPC using +[context.WithTimeout]: + + ctx := context.Background() + // Do not set a timeout on the context passed to NewClient: dialing happens + // asynchronously, and the context is used to refresh credentials in the + // background. + client, err := secretmanager.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // Time out if it takes more than 10 seconds to create a dataset. + tctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() // Always call cancel. + + req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/project-id/secrets/name"} + if err := client.DeleteSecret(tctx, req); err != nil { + // TODO: handle error. + } + +Here is an example of setting a timeout for an RPC using +[github.com/googleapis/gax-go/v2.WithTimeout]: + + ctx := context.Background() + // Do not set a timeout on the context passed to NewClient: dialing happens + // asynchronously, and the context is used to refresh credentials in the + // background. + client, err := secretmanager.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + + req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/project-id/secrets/name"} + // Time out if it takes more than 10 seconds to create a dataset. + if err := client.DeleteSecret(tctx, req, gax.WithTimeout(10*time.Second)); err != nil { + // TODO: handle error. + } + +Here is an example of how to arrange for an RPC to be canceled, use +[context.WithCancel]: + + ctx := context.Background() + // Do not cancel the context passed to NewClient: dialing happens asynchronously, + // and the context is used to refresh credentials in the background. + client, err := secretmanager.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + cctx, cancel := context.WithCancel(ctx) + defer cancel() // Always call cancel. + + // TODO: Make the cancel function available to whatever might want to cancel the + // call--perhaps a GUI button. + req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/proj/secrets/name"} + if err := client.DeleteSecret(cctx, req); err != nil { + // TODO: handle error. + } + +Do not attempt to control the initial connection (dialing) of a service by +setting a timeout on the context passed to NewClient. Dialing is non-blocking, +so timeouts would be ineffective and would only interfere with credential +refreshing, which uses the same context. + +# Headers + +Regardless of which transport is used, request headers can be set in the same +way using [`callctx.SetHeaders`][setheaders]. + +Here is a generic example: + + // Set the header "key" to "value". + ctx := callctx.SetHeaders(context.Background(), "key", "value") + + // Then use ctx in a subsequent request. + response, err := client.GetSecret(ctx, request) + +## Google-reserved headers + +There are a some header keys that Google reserves for internal use that must +not be ovewritten. The following header keys are broadly considered reserved +and should not be conveyed by client library users unless instructed to do so: + +* `x-goog-api-client` +* `x-goog-request-params` + +Be sure to check the individual package documentation for other service-specific +reserved headers. For example, Storage supports a specific auditing header that +is mentioned in that [module's documentation][storagedocs]. + +## Google Cloud system parameters + +Google Cloud services respect [system parameters][system parameters] that can be +used to augment request and/or response behavior. For the most part, they are +not needed when using one of the enclosed client libraries. However, those that +may be necessary are made available via the [`callctx`][callctx] package. If not +present there, consider opening an issue on that repo to request a new constant. + +# Connection Pooling + +Connection pooling differs in clients based on their transport. Cloud +clients either rely on HTTP or gRPC transports to communicate +with Google Cloud. + +Cloud clients that use HTTP rely on the underlying HTTP transport to cache +connections for later re-use. These are cached to the http.MaxIdleConns +and http.MaxIdleConnsPerHost settings in http.DefaultTransport by default. + +For gRPC clients, connection pooling is configurable. Users of Cloud Client +Libraries may specify option.WithGRPCConnectionPool(n) as a client option to +NewClient calls. This configures the underlying gRPC connections to be pooled +and accessed in a round robin fashion. + +# Using the Libraries in Container environments(Docker) + +Minimal container images like Alpine lack CA certificates. This causes RPCs to +appear to hang, because gRPC retries indefinitely. See +https://github.com/googleapis/google-cloud-go/issues/928 for more information. + +# Debugging + +For tips on how to write tests against code that calls into our libraries check +out our [Debugging Guide]. + +# Testing + +For tips on how to write tests against code that calls into our libraries check +out our [Testing Guide]. + +# Inspecting errors + +Most of the errors returned by the generated clients are wrapped in an +[github.com/googleapis/gax-go/v2/apierror.APIError] and can be further unwrapped +into a [google.golang.org/grpc/status.Status] or +[google.golang.org/api/googleapi.Error] depending on the transport used to make +the call (gRPC or REST). Converting your errors to these types can be a useful +way to get more information about what went wrong while debugging. + +APIError gives access to specific details in the error. The transport-specific +errors can still be unwrapped using the APIError. + + if err != nil { + var ae *apierror.APIError + if errors.As(err, &ae) { + log.Println(ae.Reason()) + log.Println(ae.Details().Help.GetLinks()) + } + } + +If the gRPC transport was used, the [google.golang.org/grpc/status.Status] can +still be parsed using the [google.golang.org/grpc/status.FromError] function. + + if err != nil { + if s, ok := status.FromError(err); ok { + log.Println(s.Message()) + for _, d := range s.Proto().Details { + log.Println(d) + } + } + } + +# Client Stability + +Semver is used to communicate stability of the sub-modules of this package. +Note, some stable sub-modules do contain packages, and sometimes features, that +are considered unstable. If something is unstable it will be explicitly labeled +as such. Example of package does in an unstable package: + + NOTE: This package is in beta. It is not stable, and may be subject to changes. + +Clients that contain alpha and beta in their import path may change or go away +without notice. + +Clients marked stable will maintain compatibility with future versions for as +long as we can reasonably sustain. Incompatible changes might be made in some +situations, including: + + - Security bugs may prompt backwards-incompatible changes. + - Situations in which components are no longer feasible to maintain without + making breaking changes, including removal. + - Parts of the client surface may be outright unstable and subject to change. + These parts of the surface will be labeled with the note, "It is EXPERIMENTAL + and subject to change or removal without notice." + +[testing against fake servers]: https://github.com/googleapis/google-cloud-go/blob/main/testing.md#testing-grpc-services-using-fakes +[Vertex AI - Locations]: https://cloud.google.com/vertex-ai/docs/general/locations +[Google Application Default Credentials]: https://cloud.google.com/docs/authentication/external/set-up-adc +[Testing Guide]: https://github.com/googleapis/google-cloud-go/blob/main/testing.md +[Debugging Guide]: https://github.com/googleapis/google-cloud-go/blob/main/debug.md +[callctx]: https://pkg.go.dev/github.com/googleapis/gax-go/v2/callctx#pkg-constants +[setheaders]: https://pkg.go.dev/github.com/googleapis/gax-go/v2/callctx#SetHeaders +[storagedocs]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Sending_Custom_Headers +[system parameters]: https://cloud.google.com/apis/docs/system-parameters +*/ +package cloud // import "cloud.google.com/go" diff --git a/vendor/cloud.google.com/go/go.work b/vendor/cloud.google.com/go/go.work new file mode 100644 index 000000000..8f27dc5b1 --- /dev/null +++ b/vendor/cloud.google.com/go/go.work @@ -0,0 +1,181 @@ +go 1.20 + +use ( + . + ./accessapproval + ./accesscontextmanager + ./advisorynotifications + ./ai + ./aiplatform + ./alloydb + ./analytics + ./apigateway + ./apigeeconnect + ./apigeeregistry + ./apikeys + ./appengine + ./apphub + ./apps + ./area120 + ./artifactregistry + ./asset + ./assuredworkloads + ./auth + ./auth/oauth2adapt + ./automl + ./backupdr + ./baremetalsolution + ./batch + ./beyondcorp + ./bigquery + ./bigtable + ./billing + ./binaryauthorization + ./certificatemanager + ./channel + ./chat + ./cloudbuild + ./cloudcontrolspartner + ./clouddms + ./cloudprofiler + ./cloudquotas + ./cloudtasks + ./commerce + ./compute + ./compute/metadata + ./confidentialcomputing + ./config + ./contactcenterinsights + ./container + ./containeranalysis + ./datacatalog + ./dataflow + ./dataform + ./datafusion + ./datalabeling + ./dataplex + ./dataproc + ./dataqna + ./datastore + ./datastream + ./deploy + ./developerconnect + ./dialogflow + ./discoveryengine + ./dlp + ./documentai + ./domains + ./edgecontainer + ./edgenetwork + ./errorreporting + ./essentialcontacts + ./eventarc + ./filestore + ./firestore + ./functions + ./gkebackup + ./gkeconnect + ./gkehub + ./gkemulticloud + ./grafeas + ./gsuiteaddons + ./iam + ./iap + ./identitytoolkit + ./ids + ./internal/actions + ./internal/aliasfix + ./internal/aliasgen + ./internal/carver + ./internal/examples/fake + ./internal/examples/mock + ./internal/gapicgen + ./internal/generated/snippets + ./internal/godocfx + ./internal/postprocessor + ./internal/protoveneer + ./iot + ./kms + ./language + ./lifesciences + ./logging + ./longrunning + ./managedidentities + ./managedkafka + ./maps + ./mediatranslation + ./memcache + ./metastore + ./migrationcenter + ./monitoring + ./netapp + ./networkconnectivity + ./networkmanagement + ./networksecurity + ./networkservices + ./notebooks + ./optimization + ./orchestration + ./orgpolicy + ./osconfig + ./oslogin + ./parallelstore + ./phishingprotection + ./policysimulator + ./policytroubleshooter + ./privatecatalog + ./privilegedaccessmanager + ./profiler + ./pubsub + ./pubsublite + ./rapidmigrationassessment + ./recaptchaenterprise + ./recommendationengine + ./recommender + ./redis + ./resourcemanager + ./resourcesettings + ./retail + ./run + ./scheduler + ./secretmanager + ./securesourcemanager + ./security + ./securitycenter + ./securitycentermanagement + ./securityposture + ./servicecontrol + ./servicedirectory + ./servicehealth + ./servicemanagement + ./serviceusage + ./shell + ./shopping + ./spanner + ./spanner/test/opentelemetry/test + ./speech + ./storage + ./storage/internal/benchmarks + ./storageinsights + ./storagetransfer + ./streetview + ./support + ./talent + ./telcoautomation + ./texttospeech + ./tpu + ./trace + ./translate + ./vertexai + ./video + ./videointelligence + ./vision + ./visionai + ./vmmigration + ./vmwareengine + ./vpcaccess + ./webrisk + ./websecurityscanner + ./workflows + ./workstations +) diff --git a/vendor/cloud.google.com/go/go.work.sum b/vendor/cloud.google.com/go/go.work.sum new file mode 100644 index 000000000..9c215e269 --- /dev/null +++ b/vendor/cloud.google.com/go/go.work.sum @@ -0,0 +1,91 @@ +cloud.google.com/go/auth v0.2.0/go.mod h1:+yb+oy3/P0geX6DLKlqiGHARGR6EX2GRtYCzWOCQSbU= +cloud.google.com/go/auth/oauth2adapt v0.2.0/go.mod h1:AfqujpDAlTfLfeCIl/HJZZlIxD8+nJoZ5e0x1IxGq5k= +cloud.google.com/go/dataproc v1.12.0 h1:W47qHL3W4BPkAIbk4SWmIERwsWBaNnWm0P2sdx3YgGU= +cloud.google.com/go/gaming v1.9.0 h1:7vEhFnZmd931Mo7sZ6pJy7uQPDxF7m7v8xtBheG08tc= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.18.0 h1:ugYJK/neZQtQeh2jc5xNoDFiMQojlAkoqJMRb7vTu1U= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.18.0/go.mod h1:Xx0VKh7GJ4si3rmElbh19Mejxz68ibWg/J30ZOMrqzU= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.46.0/go.mod h1:V28hx+cUCZC9e3qcqszMb+Sbt8cQZtHTiXOmyDzoDOg= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/aws/aws-sdk-go-v2 v1.16.10/go.mod h1:WTACcleLz6VZTp7fak4EO5b9Q4foxbn+8PIz3PmyKlo= +github.com/aws/aws-sdk-go-v2/config v1.15.9/go.mod h1:rv/l/TbZo67kp99v/3Kb0qV6Fm1KEtKyruEV2GvVfgs= +github.com/aws/aws-sdk-go-v2/credentials v1.12.12/go.mod h1:vFHC2HifIWHebmoVsfpqliKuqbAY2LaVlvy03JzF4c4= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.11/go.mod h1:38Asv/UyQbDNpSXCurZRlDMjzIl6J+wUe8vY3TtUuzA= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.17/go.mod h1:6qtGip7sJEyvgsLjphRZWF9qPe3xJf1mL/MM01E35Wc= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.11/go.mod h1:cYAfnB+9ZkmZWpQWmPDsuIGm4EA+6k2ZVtxKjw/XJBY= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.18/go.mod h1:hTHq8hL4bAxJyng364s9d4IUGXZOs7Y5LSqAhIiIQ2A= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.18.3/go.mod h1:BrAJyOMrnwzYVQcP5ziqlCpnEuFfkNppZLzqDyW/YTg= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.11/go.mod h1:OEofCUKF7Hri4ShOCokF6k6hGq9PCB2sywt/9rLSXjY= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.15/go.mod h1:dDVD4ElJRTQXx7dOQ59EkqGyNU9tnwy1RKln+oLIOTU= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.12/go.mod h1:b53qpmhHk7mTL2J/tfG6f38neZiyBQSiNXGCuNKq4+4= +github.com/aws/smithy-go v1.12.1/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs= +github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs= +github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww= +github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= +github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= +github.com/fullstorydev/grpcurl v1.8.7/go.mod h1:pVtM4qe3CMoLaIzYS8uvTuDj2jVYmXqMUkZeijnXp/E= +github.com/gliderlabs/ssh v0.3.7/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= +github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= +github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= +github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/google/go-jsonnet v0.20.0/go.mod h1:VbgWF9JX7ztlv770x/TolZNGGFfiHEVx9G6ca2eUmeA= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/hoisie/redis v0.0.0-20160730154456-b5c6e81454e0/go.mod h1:pMYMxVaKJqCDC1JUg/XbPJ4/fSazB25zORpFzqsIGIc= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/itchyny/gojq v0.12.9/go.mod h1:T4Ip7AETUXeGpD+436m+UEl3m3tokRgajd5pRfsR5oE= +github.com/itchyny/timefmt-go v0.1.4/go.mod h1:nEP7L+2YmAbT2kZ2HfSs1d8Xtw9LY8D2stDBckWakZ8= +github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/miekg/dns v1.1.33/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mmcloughlin/avo v0.5.0/go.mod h1:ChHFdoV7ql95Wi7vuq2YT1bwCJqiWdZrQ1im3VujLYM= +github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +go.einride.tech/aip v0.68.0 h1:4seM66oLzTpz50u4K1zlJyOXQ3tCzcJN7I22tKkjipw= +go.opentelemetry.io/otel v1.23.1/go.mod h1:Td0134eafDLcTS4y+zQ26GE8u3dEuRBiBCTUIRHaikA= +go.opentelemetry.io/otel/bridge/opencensus v0.40.0 h1:pqDiayRhBgoqy1vwnscik+TizcImJ58l053NScJyZso= +go.opentelemetry.io/otel/bridge/opencensus v0.40.0/go.mod h1:1NvVHb6tLTe5A9qCYz+eErW0t8iPn4ZfR6tDKcqlGTM= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0/go.mod h1:U707O40ee1FpQGyhvqnzmCJm1Wh6OX6GGBVn0E6Uyyk= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0/go.mod h1:qcTO4xHAxZLaLxPd60TdE88rxtItPHgHWqOhOGRr0as= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.44.0/go.mod h1:sTt30Evb7hJB/gEk27qLb1+l9n4Tb8HvHkR0Wx3S6CU= +go.opentelemetry.io/otel/metric v1.23.1/go.mod h1:mpG2QPlAfnK8yNhNJAxDZruU9Y1/HubbC+KyH8FaCWI= +go.opentelemetry.io/otel/trace v1.23.1/go.mod h1:4IpnpJFwr1mo/6HL8XIPJaE9y0+u1KcVmuW7dwFSVrI= +golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= +golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= +golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/telemetry v0.0.0-20240208230135-b75ee8823808/go.mod h1:KG1lNk5ZFNssSZLrpVb4sMXKMpGwGXOxSG3rnu2gZQQ= +golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= +golang.org/x/tools v0.10.0 h1:tvDr/iQoUqNdohiYm0LmmKcBk+q86lb9EprIUFhHHGg= +golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= +google.golang.org/api v0.174.0/go.mod h1:aC7tB6j0HR1Nl0ni5ghpx6iLasmAX78Zkh/wgxAAjLg= +google.golang.org/api v0.185.0 h1:ENEKk1k4jW8SmmaT6RE+ZasxmxezCrD5Vw4npvr+pAU= +google.golang.org/api v0.185.0/go.mod h1:HNfvIkJGlgrIlrbYkAm9W9IdkmKZjOTVh33YltygGbg= +google.golang.org/genproto v0.0.0-20230725213213-b022f6e96895/go.mod h1:0ggbjUrZYpy1q+ANUS30SEoGZ53cdfwtbuG7Ptgy108= +google.golang.org/genproto/googleapis/api v0.0.0-20230725213213-b022f6e96895/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240515191416-fc5f0ca64291/go.mod h1:RGnPtTG7r4i8sPlNyDeikXF99hMM+hN6QMm4ooG9g2g= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20231120223509-83a465c0220f/go.mod h1:iIgEblxoG4klcXsG0d9cpoxJ4xndv6+1FkDROCHhPRI= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20240102182953-50ed04b92917/go.mod h1:O9TvT7A9NLgdqqF0JJXJ+axpaoYiEb8txGmkvy+AvLc= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20240513163218-0867130af1f8/go.mod h1:RCpt0+3mpEDPldc32vXBM8ADXlFL95T8Chxx0nv0/zE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230725213213-b022f6e96895/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md new file mode 100644 index 000000000..498a15a5f --- /dev/null +++ b/vendor/cloud.google.com/go/iam/CHANGES.md @@ -0,0 +1,224 @@ +# Changes + + +## [1.2.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.2.0...iam/v1.2.1) (2024-09-12) + + +### Bug Fixes + +* **iam:** Bump dependencies ([2ddeb15](https://github.com/googleapis/google-cloud-go/commit/2ddeb1544a53188a7592046b98913982f1b0cf04)) + +## [1.2.0](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.13...iam/v1.2.0) (2024-08-20) + + +### Features + +* **iam:** Add support for Go 1.23 iterators ([84461c0](https://github.com/googleapis/google-cloud-go/commit/84461c0ba464ec2f951987ba60030e37c8a8fc18)) + +## [1.1.13](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.12...iam/v1.1.13) (2024-08-08) + + +### Bug Fixes + +* **iam:** Update google.golang.org/api to v0.191.0 ([5b32644](https://github.com/googleapis/google-cloud-go/commit/5b32644eb82eb6bd6021f80b4fad471c60fb9d73)) + +## [1.1.12](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.11...iam/v1.1.12) (2024-07-24) + + +### Bug Fixes + +* **iam:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758)) + +## [1.1.11](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.10...iam/v1.1.11) (2024-07-10) + + +### Bug Fixes + +* **iam:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5)) + +## [1.1.10](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.9...iam/v1.1.10) (2024-07-01) + + +### Bug Fixes + +* **iam:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) + +## [1.1.9](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.8...iam/v1.1.9) (2024-06-26) + + +### Bug Fixes + +* **iam:** Enable new auth lib ([b95805f](https://github.com/googleapis/google-cloud-go/commit/b95805f4c87d3e8d10ea23bd7a2d68d7a4157568)) + +## [1.1.8](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.7...iam/v1.1.8) (2024-05-01) + + +### Bug Fixes + +* **iam:** Bump x/net to v0.24.0 ([ba31ed5](https://github.com/googleapis/google-cloud-go/commit/ba31ed5fda2c9664f2e1cf972469295e63deb5b4)) + +## [1.1.7](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.6...iam/v1.1.7) (2024-03-14) + + +### Bug Fixes + +* **iam:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a)) + +## [1.1.6](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.5...iam/v1.1.6) (2024-01-30) + + +### Bug Fixes + +* **iam:** Enable universe domain resolution options ([fd1d569](https://github.com/googleapis/google-cloud-go/commit/fd1d56930fa8a747be35a224611f4797b8aeb698)) + +## [1.1.5](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.4...iam/v1.1.5) (2023-11-01) + + +### Bug Fixes + +* **iam:** Bump google.golang.org/api to v0.149.0 ([8d2ab9f](https://github.com/googleapis/google-cloud-go/commit/8d2ab9f320a86c1c0fab90513fc05861561d0880)) + +## [1.1.4](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.3...iam/v1.1.4) (2023-10-26) + + +### Bug Fixes + +* **iam:** Update grpc-go to v1.59.0 ([81a97b0](https://github.com/googleapis/google-cloud-go/commit/81a97b06cb28b25432e4ece595c55a9857e960b7)) + +## [1.1.3](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.2...iam/v1.1.3) (2023-10-12) + + +### Bug Fixes + +* **iam:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) + +## [1.1.2](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.1...iam/v1.1.2) (2023-08-08) + + +### Documentation + +* **iam:** Minor formatting ([b4349cc](https://github.com/googleapis/google-cloud-go/commit/b4349cc507870ff8629bbc07de578b63bb889626)) + +## [1.1.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.0...iam/v1.1.1) (2023-06-20) + + +### Bug Fixes + +* **iam:** REST query UpdateMask bug ([df52820](https://github.com/googleapis/google-cloud-go/commit/df52820b0e7721954809a8aa8700b93c5662dc9b)) + +## [1.1.0](https://github.com/googleapis/google-cloud-go/compare/iam/v1.0.1...iam/v1.1.0) (2023-05-30) + + +### Features + +* **iam:** Update all direct dependencies ([b340d03](https://github.com/googleapis/google-cloud-go/commit/b340d030f2b52a4ce48846ce63984b28583abde6)) + +## [1.0.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.0.0...iam/v1.0.1) (2023-05-08) + + +### Bug Fixes + +* **iam:** Update grpc to v1.55.0 ([1147ce0](https://github.com/googleapis/google-cloud-go/commit/1147ce02a990276ca4f8ab7a1ab65c14da4450ef)) + +## [1.0.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.13.0...iam/v1.0.0) (2023-04-04) + + +### Features + +* **iam:** Promote to GA ([#7627](https://github.com/googleapis/google-cloud-go/issues/7627)) ([b351906](https://github.com/googleapis/google-cloud-go/commit/b351906a10e17a02d7f7e2551bc1585fd9dc3742)) + +## [0.13.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.12.0...iam/v0.13.0) (2023-03-15) + + +### Features + +* **iam:** Update iam and longrunning deps ([91a1f78](https://github.com/googleapis/google-cloud-go/commit/91a1f784a109da70f63b96414bba8a9b4254cddd)) + +## [0.12.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.11.0...iam/v0.12.0) (2023-02-17) + + +### Features + +* **iam:** Migrate to new stubs ([a61ddcd](https://github.com/googleapis/google-cloud-go/commit/a61ddcd3041c7af4a15109dc4431f9b327c497fb)) + +## [0.11.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.10.0...iam/v0.11.0) (2023-02-16) + + +### Features + +* **iam:** Start generating proto stubs ([970d763](https://github.com/googleapis/google-cloud-go/commit/970d763531b54b2bc75d7ff26a20b6e05150cab8)) + +## [0.10.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.9.0...iam/v0.10.0) (2023-01-04) + + +### Features + +* **iam:** Add REST client ([06a54a1](https://github.com/googleapis/google-cloud-go/commit/06a54a16a5866cce966547c51e203b9e09a25bc0)) + +## [0.9.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.8.0...iam/v0.9.0) (2022-12-15) + + +### Features + +* **iam:** Rewrite iam sigs and update proto import ([#7137](https://github.com/googleapis/google-cloud-go/issues/7137)) ([ad67fa3](https://github.com/googleapis/google-cloud-go/commit/ad67fa36c263c161226f7fecbab5221592374dca)) + +## [0.8.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.7.0...iam/v0.8.0) (2022-12-05) + + +### Features + +* **iam:** Start generating and refresh some libraries ([#7089](https://github.com/googleapis/google-cloud-go/issues/7089)) ([a9045ff](https://github.com/googleapis/google-cloud-go/commit/a9045ff191a711089c37f1d94a63522d9939ce38)) + +## [0.7.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.6.0...iam/v0.7.0) (2022-11-03) + + +### Features + +* **iam:** rewrite signatures in terms of new location ([3c4b2b3](https://github.com/googleapis/google-cloud-go/commit/3c4b2b34565795537aac1661e6af2442437e34ad)) + +## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.5.0...iam/v0.6.0) (2022-10-25) + + +### Features + +* **iam:** start generating stubs dir ([de2d180](https://github.com/googleapis/google-cloud-go/commit/de2d18066dc613b72f6f8db93ca60146dabcfdcc)) + +## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.4.0...iam/v0.5.0) (2022-09-28) + + +### Features + +* **iam:** remove ListApplicablePolicies ([52dddd1](https://github.com/googleapis/google-cloud-go/commit/52dddd1ed89fbe77e1859311c3b993a77a82bfc7)) + +## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.3.0...iam/v0.4.0) (2022-09-06) + + +### Features + +* **iam:** start generating apiv2 ([#6605](https://github.com/googleapis/google-cloud-go/issues/6605)) ([a6004e7](https://github.com/googleapis/google-cloud-go/commit/a6004e762f782869cd85688937475744f7b17e50)) + +## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.2.0...iam/v0.3.0) (2022-02-23) + + +### Features + +* **iam:** set versionClient to module version ([55f0d92](https://github.com/googleapis/google-cloud-go/commit/55f0d92bf112f14b024b4ab0076c9875a17423c9)) + +## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.1.1...iam/v0.2.0) (2022-02-14) + + +### Features + +* **iam:** add file for tracking version ([17b36ea](https://github.com/googleapis/google-cloud-go/commit/17b36ead42a96b1a01105122074e65164357519e)) + +### [0.1.1](https://www.github.com/googleapis/google-cloud-go/compare/iam/v0.1.0...iam/v0.1.1) (2022-01-14) + + +### Bug Fixes + +* **iam:** run formatter ([#5277](https://www.github.com/googleapis/google-cloud-go/issues/5277)) ([8682e4e](https://www.github.com/googleapis/google-cloud-go/commit/8682e4ed57a4428a659fbc225f56c91767e2a4a9)) + +## v0.1.0 + +This is the first tag to carve out iam as its own module. See +[Add a module to a multi-module repository](https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository). diff --git a/vendor/cloud.google.com/go/iam/LICENSE b/vendor/cloud.google.com/go/iam/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/cloud.google.com/go/iam/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/iam/README.md b/vendor/cloud.google.com/go/iam/README.md new file mode 100644 index 000000000..0072cc9e2 --- /dev/null +++ b/vendor/cloud.google.com/go/iam/README.md @@ -0,0 +1,40 @@ +# IAM API + +[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/iam.svg)](https://pkg.go.dev/cloud.google.com/go/iam) + +Go Client Library for IAM API. + +## Install + +```bash +go get cloud.google.com/go/iam +``` + +## Stability + +The stability of this module is indicated by SemVer. + +However, a `v1+` module may have breaking changes in two scenarios: + +* Packages with `alpha` or `beta` in the import path +* The GoDoc has an explicit stability disclaimer (for example, for an experimental feature). + +## Go Version Support + +See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported) +section in the root directory's README. + +## Authorization + +See the [Authorization](https://github.com/googleapis/google-cloud-go#authorization) +section in the root directory's README. + +## Contributing + +Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) +document for details. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. See +[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go new file mode 100644 index 000000000..619b4c4fa --- /dev/null +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go @@ -0,0 +1,672 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/iam/v1/iam_policy.proto + +package iampb + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Request message for `SetIamPolicy` method. +type SetIamPolicyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // REQUIRED: The resource for which the policy is being specified. + // See the operation documentation for the appropriate value for this field. + Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + // REQUIRED: The complete policy to be applied to the `resource`. The size of + // the policy is limited to a few 10s of KB. An empty policy is a + // valid policy but certain Cloud Platform services (such as Projects) + // might reject them. + Policy *Policy `protobuf:"bytes,2,opt,name=policy,proto3" json:"policy,omitempty"` + // OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only + // the fields in the mask will be modified. If no mask is provided, the + // following default mask is used: + // + // `paths: "bindings, etag"` + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *SetIamPolicyRequest) Reset() { + *x = SetIamPolicyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SetIamPolicyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetIamPolicyRequest) ProtoMessage() {} + +func (x *SetIamPolicyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetIamPolicyRequest.ProtoReflect.Descriptor instead. +func (*SetIamPolicyRequest) Descriptor() ([]byte, []int) { + return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{0} +} + +func (x *SetIamPolicyRequest) GetResource() string { + if x != nil { + return x.Resource + } + return "" +} + +func (x *SetIamPolicyRequest) GetPolicy() *Policy { + if x != nil { + return x.Policy + } + return nil +} + +func (x *SetIamPolicyRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +// Request message for `GetIamPolicy` method. +type GetIamPolicyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // REQUIRED: The resource for which the policy is being requested. + // See the operation documentation for the appropriate value for this field. + Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + // OPTIONAL: A `GetPolicyOptions` object for specifying options to + // `GetIamPolicy`. + Options *GetPolicyOptions `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` +} + +func (x *GetIamPolicyRequest) Reset() { + *x = GetIamPolicyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetIamPolicyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetIamPolicyRequest) ProtoMessage() {} + +func (x *GetIamPolicyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetIamPolicyRequest.ProtoReflect.Descriptor instead. +func (*GetIamPolicyRequest) Descriptor() ([]byte, []int) { + return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{1} +} + +func (x *GetIamPolicyRequest) GetResource() string { + if x != nil { + return x.Resource + } + return "" +} + +func (x *GetIamPolicyRequest) GetOptions() *GetPolicyOptions { + if x != nil { + return x.Options + } + return nil +} + +// Request message for `TestIamPermissions` method. +type TestIamPermissionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // REQUIRED: The resource for which the policy detail is being requested. + // See the operation documentation for the appropriate value for this field. + Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + // The set of permissions to check for the `resource`. Permissions with + // wildcards (such as '*' or 'storage.*') are not allowed. For more + // information see + // [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions). + Permissions []string `protobuf:"bytes,2,rep,name=permissions,proto3" json:"permissions,omitempty"` +} + +func (x *TestIamPermissionsRequest) Reset() { + *x = TestIamPermissionsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TestIamPermissionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TestIamPermissionsRequest) ProtoMessage() {} + +func (x *TestIamPermissionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TestIamPermissionsRequest.ProtoReflect.Descriptor instead. +func (*TestIamPermissionsRequest) Descriptor() ([]byte, []int) { + return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{2} +} + +func (x *TestIamPermissionsRequest) GetResource() string { + if x != nil { + return x.Resource + } + return "" +} + +func (x *TestIamPermissionsRequest) GetPermissions() []string { + if x != nil { + return x.Permissions + } + return nil +} + +// Response message for `TestIamPermissions` method. +type TestIamPermissionsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A subset of `TestPermissionsRequest.permissions` that the caller is + // allowed. + Permissions []string `protobuf:"bytes,1,rep,name=permissions,proto3" json:"permissions,omitempty"` +} + +func (x *TestIamPermissionsResponse) Reset() { + *x = TestIamPermissionsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TestIamPermissionsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TestIamPermissionsResponse) ProtoMessage() {} + +func (x *TestIamPermissionsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TestIamPermissionsResponse.ProtoReflect.Descriptor instead. +func (*TestIamPermissionsResponse) Descriptor() ([]byte, []int) { + return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{3} +} + +func (x *TestIamPermissionsResponse) GetPermissions() []string { + if x != nil { + return x.Permissions + } + return nil +} + +var File_google_iam_v1_iam_policy_proto protoreflect.FileDescriptor + +var file_google_iam_v1_iam_policy_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, + 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x1a, + 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, + 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, + 0x31, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xad, 0x01, + 0x0a, 0x13, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a, + 0x01, 0x2a, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x32, 0x0a, 0x06, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, + 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x77, 0x0a, + 0x13, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a, 0x01, + 0x2a, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x39, 0x0a, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x69, 0x0a, 0x19, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, + 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a, 0x01, 0x2a, + 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0b, 0x70, 0x65, + 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x22, 0x3e, 0x0a, 0x1a, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, + 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x20, 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x32, 0xb4, 0x03, 0x0a, 0x09, 0x49, 0x41, 0x4d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, + 0x74, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, + 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, + 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x23, 0x3a, 0x01, 0x2a, 0x22, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x74, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, + 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x3a, 0x01, 0x2a, 0x22, 0x1e, 0x2f, 0x76, 0x31, + 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x67, + 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x9a, 0x01, 0x0a, 0x12, + 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, + 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, + 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x3a, + 0x01, 0x2a, 0x22, 0x24, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, + 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x1e, 0xca, 0x41, 0x1b, 0x69, 0x61, 0x6d, + 0x2d, 0x6d, 0x65, 0x74, 0x61, 0x2d, 0x61, 0x70, 0x69, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x42, 0x7f, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x49, + 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x69, + 0x61, 0x6d, 0x70, 0x62, 0x3b, 0x69, 0x61, 0x6d, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, + 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_google_iam_v1_iam_policy_proto_rawDescOnce sync.Once + file_google_iam_v1_iam_policy_proto_rawDescData = file_google_iam_v1_iam_policy_proto_rawDesc +) + +func file_google_iam_v1_iam_policy_proto_rawDescGZIP() []byte { + file_google_iam_v1_iam_policy_proto_rawDescOnce.Do(func() { + file_google_iam_v1_iam_policy_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_iam_v1_iam_policy_proto_rawDescData) + }) + return file_google_iam_v1_iam_policy_proto_rawDescData +} + +var file_google_iam_v1_iam_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_google_iam_v1_iam_policy_proto_goTypes = []any{ + (*SetIamPolicyRequest)(nil), // 0: google.iam.v1.SetIamPolicyRequest + (*GetIamPolicyRequest)(nil), // 1: google.iam.v1.GetIamPolicyRequest + (*TestIamPermissionsRequest)(nil), // 2: google.iam.v1.TestIamPermissionsRequest + (*TestIamPermissionsResponse)(nil), // 3: google.iam.v1.TestIamPermissionsResponse + (*Policy)(nil), // 4: google.iam.v1.Policy + (*fieldmaskpb.FieldMask)(nil), // 5: google.protobuf.FieldMask + (*GetPolicyOptions)(nil), // 6: google.iam.v1.GetPolicyOptions +} +var file_google_iam_v1_iam_policy_proto_depIdxs = []int32{ + 4, // 0: google.iam.v1.SetIamPolicyRequest.policy:type_name -> google.iam.v1.Policy + 5, // 1: google.iam.v1.SetIamPolicyRequest.update_mask:type_name -> google.protobuf.FieldMask + 6, // 2: google.iam.v1.GetIamPolicyRequest.options:type_name -> google.iam.v1.GetPolicyOptions + 0, // 3: google.iam.v1.IAMPolicy.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest + 1, // 4: google.iam.v1.IAMPolicy.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest + 2, // 5: google.iam.v1.IAMPolicy.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest + 4, // 6: google.iam.v1.IAMPolicy.SetIamPolicy:output_type -> google.iam.v1.Policy + 4, // 7: google.iam.v1.IAMPolicy.GetIamPolicy:output_type -> google.iam.v1.Policy + 3, // 8: google.iam.v1.IAMPolicy.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse + 6, // [6:9] is the sub-list for method output_type + 3, // [3:6] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_google_iam_v1_iam_policy_proto_init() } +func file_google_iam_v1_iam_policy_proto_init() { + if File_google_iam_v1_iam_policy_proto != nil { + return + } + file_google_iam_v1_options_proto_init() + file_google_iam_v1_policy_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_iam_v1_iam_policy_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*SetIamPolicyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_iam_policy_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*GetIamPolicyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_iam_policy_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*TestIamPermissionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_iam_policy_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*TestIamPermissionsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_iam_v1_iam_policy_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_iam_v1_iam_policy_proto_goTypes, + DependencyIndexes: file_google_iam_v1_iam_policy_proto_depIdxs, + MessageInfos: file_google_iam_v1_iam_policy_proto_msgTypes, + }.Build() + File_google_iam_v1_iam_policy_proto = out.File + file_google_iam_v1_iam_policy_proto_rawDesc = nil + file_google_iam_v1_iam_policy_proto_goTypes = nil + file_google_iam_v1_iam_policy_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// IAMPolicyClient is the client API for IAMPolicy service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type IAMPolicyClient interface { + // Sets the access control policy on the specified resource. Replaces any + // existing policy. + // + // Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors. + SetIamPolicy(ctx context.Context, in *SetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) + // Gets the access control policy for a resource. + // Returns an empty policy if the resource exists and does not have a policy + // set. + GetIamPolicy(ctx context.Context, in *GetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) + // Returns permissions that a caller has on the specified resource. + // If the resource does not exist, this will return an empty set of + // permissions, not a `NOT_FOUND` error. + // + // Note: This operation is designed to be used for building permission-aware + // UIs and command-line tools, not for authorization checking. This operation + // may "fail open" without warning. + TestIamPermissions(ctx context.Context, in *TestIamPermissionsRequest, opts ...grpc.CallOption) (*TestIamPermissionsResponse, error) +} + +type iAMPolicyClient struct { + cc grpc.ClientConnInterface +} + +func NewIAMPolicyClient(cc grpc.ClientConnInterface) IAMPolicyClient { + return &iAMPolicyClient{cc} +} + +func (c *iAMPolicyClient) SetIamPolicy(ctx context.Context, in *SetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) { + out := new(Policy) + err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/SetIamPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *iAMPolicyClient) GetIamPolicy(ctx context.Context, in *GetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) { + out := new(Policy) + err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/GetIamPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *iAMPolicyClient) TestIamPermissions(ctx context.Context, in *TestIamPermissionsRequest, opts ...grpc.CallOption) (*TestIamPermissionsResponse, error) { + out := new(TestIamPermissionsResponse) + err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/TestIamPermissions", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// IAMPolicyServer is the server API for IAMPolicy service. +type IAMPolicyServer interface { + // Sets the access control policy on the specified resource. Replaces any + // existing policy. + // + // Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors. + SetIamPolicy(context.Context, *SetIamPolicyRequest) (*Policy, error) + // Gets the access control policy for a resource. + // Returns an empty policy if the resource exists and does not have a policy + // set. + GetIamPolicy(context.Context, *GetIamPolicyRequest) (*Policy, error) + // Returns permissions that a caller has on the specified resource. + // If the resource does not exist, this will return an empty set of + // permissions, not a `NOT_FOUND` error. + // + // Note: This operation is designed to be used for building permission-aware + // UIs and command-line tools, not for authorization checking. This operation + // may "fail open" without warning. + TestIamPermissions(context.Context, *TestIamPermissionsRequest) (*TestIamPermissionsResponse, error) +} + +// UnimplementedIAMPolicyServer can be embedded to have forward compatible implementations. +type UnimplementedIAMPolicyServer struct { +} + +func (*UnimplementedIAMPolicyServer) SetIamPolicy(context.Context, *SetIamPolicyRequest) (*Policy, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetIamPolicy not implemented") +} +func (*UnimplementedIAMPolicyServer) GetIamPolicy(context.Context, *GetIamPolicyRequest) (*Policy, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetIamPolicy not implemented") +} +func (*UnimplementedIAMPolicyServer) TestIamPermissions(context.Context, *TestIamPermissionsRequest) (*TestIamPermissionsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TestIamPermissions not implemented") +} + +func RegisterIAMPolicyServer(s *grpc.Server, srv IAMPolicyServer) { + s.RegisterService(&_IAMPolicy_serviceDesc, srv) +} + +func _IAMPolicy_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetIamPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMPolicyServer).SetIamPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.v1.IAMPolicy/SetIamPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMPolicyServer).SetIamPolicy(ctx, req.(*SetIamPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IAMPolicy_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetIamPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMPolicyServer).GetIamPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.v1.IAMPolicy/GetIamPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMPolicyServer).GetIamPolicy(ctx, req.(*GetIamPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IAMPolicy_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TestIamPermissionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IAMPolicyServer).TestIamPermissions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.iam.v1.IAMPolicy/TestIamPermissions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IAMPolicyServer).TestIamPermissions(ctx, req.(*TestIamPermissionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _IAMPolicy_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.iam.v1.IAMPolicy", + HandlerType: (*IAMPolicyServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SetIamPolicy", + Handler: _IAMPolicy_SetIamPolicy_Handler, + }, + { + MethodName: "GetIamPolicy", + Handler: _IAMPolicy_GetIamPolicy_Handler, + }, + { + MethodName: "TestIamPermissions", + Handler: _IAMPolicy_TestIamPermissions_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/iam/v1/iam_policy.proto", +} diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go new file mode 100644 index 000000000..f1c1c084e --- /dev/null +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go @@ -0,0 +1,187 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/iam/v1/options.proto + +package iampb + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Encapsulates settings provided to GetIamPolicy. +type GetPolicyOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. The maximum policy version that will be used to format the + // policy. + // + // Valid values are 0, 1, and 3. Requests specifying an invalid value will be + // rejected. + // + // Requests for policies with any conditional role bindings must specify + // version 3. Policies with no conditional role bindings may specify any valid + // value or leave the field unset. + // + // The policy in the response might use the policy version that you specified, + // or it might use a lower policy version. For example, if you specify version + // 3, but the policy has no conditional role bindings, the response uses + // version 1. + // + // To learn which resources support conditions in their IAM policies, see the + // [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-policies). + RequestedPolicyVersion int32 `protobuf:"varint,1,opt,name=requested_policy_version,json=requestedPolicyVersion,proto3" json:"requested_policy_version,omitempty"` +} + +func (x *GetPolicyOptions) Reset() { + *x = GetPolicyOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_options_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetPolicyOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetPolicyOptions) ProtoMessage() {} + +func (x *GetPolicyOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_options_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetPolicyOptions.ProtoReflect.Descriptor instead. +func (*GetPolicyOptions) Descriptor() ([]byte, []int) { + return file_google_iam_v1_options_proto_rawDescGZIP(), []int{0} +} + +func (x *GetPolicyOptions) GetRequestedPolicyVersion() int32 { + if x != nil { + return x.RequestedPolicyVersion + } + return 0 +} + +var File_google_iam_v1_options_proto protoreflect.FileDescriptor + +var file_google_iam_v1_options_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x22, 0x4c, 0x0a, 0x10, + 0x47, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x38, 0x0a, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x7d, 0x0a, 0x11, 0x63, 0x6f, + 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, + 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x69, + 0x61, 0x6d, 0x70, 0x62, 0x3b, 0x69, 0x61, 0x6d, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, + 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_google_iam_v1_options_proto_rawDescOnce sync.Once + file_google_iam_v1_options_proto_rawDescData = file_google_iam_v1_options_proto_rawDesc +) + +func file_google_iam_v1_options_proto_rawDescGZIP() []byte { + file_google_iam_v1_options_proto_rawDescOnce.Do(func() { + file_google_iam_v1_options_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_iam_v1_options_proto_rawDescData) + }) + return file_google_iam_v1_options_proto_rawDescData +} + +var file_google_iam_v1_options_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_iam_v1_options_proto_goTypes = []any{ + (*GetPolicyOptions)(nil), // 0: google.iam.v1.GetPolicyOptions +} +var file_google_iam_v1_options_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_iam_v1_options_proto_init() } +func file_google_iam_v1_options_proto_init() { + if File_google_iam_v1_options_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_iam_v1_options_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*GetPolicyOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_iam_v1_options_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_iam_v1_options_proto_goTypes, + DependencyIndexes: file_google_iam_v1_options_proto_depIdxs, + MessageInfos: file_google_iam_v1_options_proto_msgTypes, + }.Build() + File_google_iam_v1_options_proto = out.File + file_google_iam_v1_options_proto_rawDesc = nil + file_google_iam_v1_options_proto_goTypes = nil + file_google_iam_v1_options_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go new file mode 100644 index 000000000..4dda5d6d0 --- /dev/null +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go @@ -0,0 +1,1180 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/iam/v1/policy.proto + +package iampb + +import ( + reflect "reflect" + sync "sync" + + expr "google.golang.org/genproto/googleapis/type/expr" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The list of valid permission types for which logging can be configured. +// Admin writes are always logged, and are not configurable. +type AuditLogConfig_LogType int32 + +const ( + // Default case. Should never be this. + AuditLogConfig_LOG_TYPE_UNSPECIFIED AuditLogConfig_LogType = 0 + // Admin reads. Example: CloudIAM getIamPolicy + AuditLogConfig_ADMIN_READ AuditLogConfig_LogType = 1 + // Data writes. Example: CloudSQL Users create + AuditLogConfig_DATA_WRITE AuditLogConfig_LogType = 2 + // Data reads. Example: CloudSQL Users list + AuditLogConfig_DATA_READ AuditLogConfig_LogType = 3 +) + +// Enum value maps for AuditLogConfig_LogType. +var ( + AuditLogConfig_LogType_name = map[int32]string{ + 0: "LOG_TYPE_UNSPECIFIED", + 1: "ADMIN_READ", + 2: "DATA_WRITE", + 3: "DATA_READ", + } + AuditLogConfig_LogType_value = map[string]int32{ + "LOG_TYPE_UNSPECIFIED": 0, + "ADMIN_READ": 1, + "DATA_WRITE": 2, + "DATA_READ": 3, + } +) + +func (x AuditLogConfig_LogType) Enum() *AuditLogConfig_LogType { + p := new(AuditLogConfig_LogType) + *p = x + return p +} + +func (x AuditLogConfig_LogType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AuditLogConfig_LogType) Descriptor() protoreflect.EnumDescriptor { + return file_google_iam_v1_policy_proto_enumTypes[0].Descriptor() +} + +func (AuditLogConfig_LogType) Type() protoreflect.EnumType { + return &file_google_iam_v1_policy_proto_enumTypes[0] +} + +func (x AuditLogConfig_LogType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AuditLogConfig_LogType.Descriptor instead. +func (AuditLogConfig_LogType) EnumDescriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{3, 0} +} + +// The type of action performed on a Binding in a policy. +type BindingDelta_Action int32 + +const ( + // Unspecified. + BindingDelta_ACTION_UNSPECIFIED BindingDelta_Action = 0 + // Addition of a Binding. + BindingDelta_ADD BindingDelta_Action = 1 + // Removal of a Binding. + BindingDelta_REMOVE BindingDelta_Action = 2 +) + +// Enum value maps for BindingDelta_Action. +var ( + BindingDelta_Action_name = map[int32]string{ + 0: "ACTION_UNSPECIFIED", + 1: "ADD", + 2: "REMOVE", + } + BindingDelta_Action_value = map[string]int32{ + "ACTION_UNSPECIFIED": 0, + "ADD": 1, + "REMOVE": 2, + } +) + +func (x BindingDelta_Action) Enum() *BindingDelta_Action { + p := new(BindingDelta_Action) + *p = x + return p +} + +func (x BindingDelta_Action) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (BindingDelta_Action) Descriptor() protoreflect.EnumDescriptor { + return file_google_iam_v1_policy_proto_enumTypes[1].Descriptor() +} + +func (BindingDelta_Action) Type() protoreflect.EnumType { + return &file_google_iam_v1_policy_proto_enumTypes[1] +} + +func (x BindingDelta_Action) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use BindingDelta_Action.Descriptor instead. +func (BindingDelta_Action) EnumDescriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{5, 0} +} + +// The type of action performed on an audit configuration in a policy. +type AuditConfigDelta_Action int32 + +const ( + // Unspecified. + AuditConfigDelta_ACTION_UNSPECIFIED AuditConfigDelta_Action = 0 + // Addition of an audit configuration. + AuditConfigDelta_ADD AuditConfigDelta_Action = 1 + // Removal of an audit configuration. + AuditConfigDelta_REMOVE AuditConfigDelta_Action = 2 +) + +// Enum value maps for AuditConfigDelta_Action. +var ( + AuditConfigDelta_Action_name = map[int32]string{ + 0: "ACTION_UNSPECIFIED", + 1: "ADD", + 2: "REMOVE", + } + AuditConfigDelta_Action_value = map[string]int32{ + "ACTION_UNSPECIFIED": 0, + "ADD": 1, + "REMOVE": 2, + } +) + +func (x AuditConfigDelta_Action) Enum() *AuditConfigDelta_Action { + p := new(AuditConfigDelta_Action) + *p = x + return p +} + +func (x AuditConfigDelta_Action) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AuditConfigDelta_Action) Descriptor() protoreflect.EnumDescriptor { + return file_google_iam_v1_policy_proto_enumTypes[2].Descriptor() +} + +func (AuditConfigDelta_Action) Type() protoreflect.EnumType { + return &file_google_iam_v1_policy_proto_enumTypes[2] +} + +func (x AuditConfigDelta_Action) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AuditConfigDelta_Action.Descriptor instead. +func (AuditConfigDelta_Action) EnumDescriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{6, 0} +} + +// An Identity and Access Management (IAM) policy, which specifies access +// controls for Google Cloud resources. +// +// A `Policy` is a collection of `bindings`. A `binding` binds one or more +// `members`, or principals, to a single `role`. Principals can be user +// accounts, service accounts, Google groups, and domains (such as G Suite). A +// `role` is a named list of permissions; each `role` can be an IAM predefined +// role or a user-created custom role. +// +// For some types of Google Cloud resources, a `binding` can also specify a +// `condition`, which is a logical expression that allows access to a resource +// only if the expression evaluates to `true`. A condition can add constraints +// based on attributes of the request, the resource, or both. To learn which +// resources support conditions in their IAM policies, see the +// [IAM +// documentation](https://cloud.google.com/iam/help/conditions/resource-policies). +// +// **JSON example:** +// +// ``` +// +// { +// "bindings": [ +// { +// "role": "roles/resourcemanager.organizationAdmin", +// "members": [ +// "user:mike@example.com", +// "group:admins@example.com", +// "domain:google.com", +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" +// ] +// }, +// { +// "role": "roles/resourcemanager.organizationViewer", +// "members": [ +// "user:eve@example.com" +// ], +// "condition": { +// "title": "expirable access", +// "description": "Does not grant access after Sep 2020", +// "expression": "request.time < +// timestamp('2020-10-01T00:00:00.000Z')", +// } +// } +// ], +// "etag": "BwWWja0YfJA=", +// "version": 3 +// } +// +// ``` +// +// **YAML example:** +// +// ``` +// +// bindings: +// - members: +// - user:mike@example.com +// - group:admins@example.com +// - domain:google.com +// - serviceAccount:my-project-id@appspot.gserviceaccount.com +// role: roles/resourcemanager.organizationAdmin +// - members: +// - user:eve@example.com +// role: roles/resourcemanager.organizationViewer +// condition: +// title: expirable access +// description: Does not grant access after Sep 2020 +// expression: request.time < timestamp('2020-10-01T00:00:00.000Z') +// etag: BwWWja0YfJA= +// version: 3 +// +// ``` +// +// For a description of IAM and its features, see the +// [IAM documentation](https://cloud.google.com/iam/docs/). +type Policy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the format of the policy. + // + // Valid values are `0`, `1`, and `3`. Requests that specify an invalid value + // are rejected. + // + // Any operation that affects conditional role bindings must specify version + // `3`. This requirement applies to the following operations: + // + // - Getting a policy that includes a conditional role binding + // - Adding a conditional role binding to a policy + // - Changing a conditional role binding in a policy + // - Removing any role binding, with or without a condition, from a policy + // that includes conditions + // + // **Important:** If you use IAM Conditions, you must include the `etag` field + // whenever you call `setIamPolicy`. If you omit this field, then IAM allows + // you to overwrite a version `3` policy with a version `1` policy, and all of + // the conditions in the version `3` policy are lost. + // + // If a policy does not include any conditions, operations on that policy may + // specify any valid version or leave the field unset. + // + // To learn which resources support conditions in their IAM policies, see the + // [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-policies). + Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + // Associates a list of `members`, or principals, with a `role`. Optionally, + // may specify a `condition` that determines how and when the `bindings` are + // applied. Each of the `bindings` must contain at least one principal. + // + // The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 + // of these principals can be Google groups. Each occurrence of a principal + // counts towards these limits. For example, if the `bindings` grant 50 + // different roles to `user:alice@example.com`, and not to any other + // principal, then you can add another 1,450 principals to the `bindings` in + // the `Policy`. + Bindings []*Binding `protobuf:"bytes,4,rep,name=bindings,proto3" json:"bindings,omitempty"` + // Specifies cloud audit logging configuration for this policy. + AuditConfigs []*AuditConfig `protobuf:"bytes,6,rep,name=audit_configs,json=auditConfigs,proto3" json:"audit_configs,omitempty"` + // `etag` is used for optimistic concurrency control as a way to help + // prevent simultaneous updates of a policy from overwriting each other. + // It is strongly suggested that systems make use of the `etag` in the + // read-modify-write cycle to perform policy updates in order to avoid race + // conditions: An `etag` is returned in the response to `getIamPolicy`, and + // systems are expected to put that etag in the request to `setIamPolicy` to + // ensure that their change will be applied to the same version of the policy. + // + // **Important:** If you use IAM Conditions, you must include the `etag` field + // whenever you call `setIamPolicy`. If you omit this field, then IAM allows + // you to overwrite a version `3` policy with a version `1` policy, and all of + // the conditions in the version `3` policy are lost. + Etag []byte `protobuf:"bytes,3,opt,name=etag,proto3" json:"etag,omitempty"` +} + +func (x *Policy) Reset() { + *x = Policy{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_policy_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Policy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Policy) ProtoMessage() {} + +func (x *Policy) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_policy_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Policy.ProtoReflect.Descriptor instead. +func (*Policy) Descriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{0} +} + +func (x *Policy) GetVersion() int32 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *Policy) GetBindings() []*Binding { + if x != nil { + return x.Bindings + } + return nil +} + +func (x *Policy) GetAuditConfigs() []*AuditConfig { + if x != nil { + return x.AuditConfigs + } + return nil +} + +func (x *Policy) GetEtag() []byte { + if x != nil { + return x.Etag + } + return nil +} + +// Associates `members`, or principals, with a `role`. +type Binding struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Role that is assigned to the list of `members`, or principals. + // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` + // Specifies the principals requesting access for a Google Cloud resource. + // `members` can have the following values: + // + // - `allUsers`: A special identifier that represents anyone who is + // on the internet; with or without a Google account. + // + // - `allAuthenticatedUsers`: A special identifier that represents anyone + // who is authenticated with a Google account or a service account. + // + // - `user:{emailid}`: An email address that represents a specific Google + // account. For example, `alice@example.com` . + // + // - `serviceAccount:{emailid}`: An email address that represents a service + // account. For example, `my-other-app@appspot.gserviceaccount.com`. + // + // - `group:{emailid}`: An email address that represents a Google group. + // For example, `admins@example.com`. + // + // - `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique + // identifier) representing a user that has been recently deleted. For + // example, `alice@example.com?uid=123456789012345678901`. If the user is + // recovered, this value reverts to `user:{emailid}` and the recovered user + // retains the role in the binding. + // + // - `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus + // unique identifier) representing a service account that has been recently + // deleted. For example, + // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. + // If the service account is undeleted, this value reverts to + // `serviceAccount:{emailid}` and the undeleted service account retains the + // role in the binding. + // + // - `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique + // identifier) representing a Google group that has been recently + // deleted. For example, `admins@example.com?uid=123456789012345678901`. If + // the group is recovered, this value reverts to `group:{emailid}` and the + // recovered group retains the role in the binding. + // + // - `domain:{domain}`: The G Suite domain (primary) that represents all the + // users of that domain. For example, `google.com` or `example.com`. + Members []string `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` + // The condition that is associated with this binding. + // + // If the condition evaluates to `true`, then this binding applies to the + // current request. + // + // If the condition evaluates to `false`, then this binding does not apply to + // the current request. However, a different role binding might grant the same + // role to one or more of the principals in this binding. + // + // To learn which resources support conditions in their IAM policies, see the + // [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-policies). + Condition *expr.Expr `protobuf:"bytes,3,opt,name=condition,proto3" json:"condition,omitempty"` +} + +func (x *Binding) Reset() { + *x = Binding{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_policy_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Binding) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Binding) ProtoMessage() {} + +func (x *Binding) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_policy_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Binding.ProtoReflect.Descriptor instead. +func (*Binding) Descriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{1} +} + +func (x *Binding) GetRole() string { + if x != nil { + return x.Role + } + return "" +} + +func (x *Binding) GetMembers() []string { + if x != nil { + return x.Members + } + return nil +} + +func (x *Binding) GetCondition() *expr.Expr { + if x != nil { + return x.Condition + } + return nil +} + +// Specifies the audit configuration for a service. +// The configuration determines which permission types are logged, and what +// identities, if any, are exempted from logging. +// An AuditConfig must have one or more AuditLogConfigs. +// +// If there are AuditConfigs for both `allServices` and a specific service, +// the union of the two AuditConfigs is used for that service: the log_types +// specified in each AuditConfig are enabled, and the exempted_members in each +// AuditLogConfig are exempted. +// +// Example Policy with multiple AuditConfigs: +// +// { +// "audit_configs": [ +// { +// "service": "allServices", +// "audit_log_configs": [ +// { +// "log_type": "DATA_READ", +// "exempted_members": [ +// "user:jose@example.com" +// ] +// }, +// { +// "log_type": "DATA_WRITE" +// }, +// { +// "log_type": "ADMIN_READ" +// } +// ] +// }, +// { +// "service": "sampleservice.googleapis.com", +// "audit_log_configs": [ +// { +// "log_type": "DATA_READ" +// }, +// { +// "log_type": "DATA_WRITE", +// "exempted_members": [ +// "user:aliya@example.com" +// ] +// } +// ] +// } +// ] +// } +// +// For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ +// logging. It also exempts `jose@example.com` from DATA_READ logging, and +// `aliya@example.com` from DATA_WRITE logging. +type AuditConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies a service that will be enabled for audit logging. + // For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. + // `allServices` is a special value that covers all services. + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` + // The configuration for logging of each type of permission. + AuditLogConfigs []*AuditLogConfig `protobuf:"bytes,3,rep,name=audit_log_configs,json=auditLogConfigs,proto3" json:"audit_log_configs,omitempty"` +} + +func (x *AuditConfig) Reset() { + *x = AuditConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_policy_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AuditConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AuditConfig) ProtoMessage() {} + +func (x *AuditConfig) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_policy_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AuditConfig.ProtoReflect.Descriptor instead. +func (*AuditConfig) Descriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{2} +} + +func (x *AuditConfig) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +func (x *AuditConfig) GetAuditLogConfigs() []*AuditLogConfig { + if x != nil { + return x.AuditLogConfigs + } + return nil +} + +// Provides the configuration for logging a type of permissions. +// Example: +// +// { +// "audit_log_configs": [ +// { +// "log_type": "DATA_READ", +// "exempted_members": [ +// "user:jose@example.com" +// ] +// }, +// { +// "log_type": "DATA_WRITE" +// } +// ] +// } +// +// This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting +// jose@example.com from DATA_READ logging. +type AuditLogConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The log type that this config enables. + LogType AuditLogConfig_LogType `protobuf:"varint,1,opt,name=log_type,json=logType,proto3,enum=google.iam.v1.AuditLogConfig_LogType" json:"log_type,omitempty"` + // Specifies the identities that do not cause logging for this type of + // permission. + // Follows the same format of + // [Binding.members][google.iam.v1.Binding.members]. + ExemptedMembers []string `protobuf:"bytes,2,rep,name=exempted_members,json=exemptedMembers,proto3" json:"exempted_members,omitempty"` +} + +func (x *AuditLogConfig) Reset() { + *x = AuditLogConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_policy_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AuditLogConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AuditLogConfig) ProtoMessage() {} + +func (x *AuditLogConfig) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_policy_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AuditLogConfig.ProtoReflect.Descriptor instead. +func (*AuditLogConfig) Descriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{3} +} + +func (x *AuditLogConfig) GetLogType() AuditLogConfig_LogType { + if x != nil { + return x.LogType + } + return AuditLogConfig_LOG_TYPE_UNSPECIFIED +} + +func (x *AuditLogConfig) GetExemptedMembers() []string { + if x != nil { + return x.ExemptedMembers + } + return nil +} + +// The difference delta between two policies. +type PolicyDelta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The delta for Bindings between two policies. + BindingDeltas []*BindingDelta `protobuf:"bytes,1,rep,name=binding_deltas,json=bindingDeltas,proto3" json:"binding_deltas,omitempty"` + // The delta for AuditConfigs between two policies. + AuditConfigDeltas []*AuditConfigDelta `protobuf:"bytes,2,rep,name=audit_config_deltas,json=auditConfigDeltas,proto3" json:"audit_config_deltas,omitempty"` +} + +func (x *PolicyDelta) Reset() { + *x = PolicyDelta{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_policy_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PolicyDelta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PolicyDelta) ProtoMessage() {} + +func (x *PolicyDelta) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_policy_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PolicyDelta.ProtoReflect.Descriptor instead. +func (*PolicyDelta) Descriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{4} +} + +func (x *PolicyDelta) GetBindingDeltas() []*BindingDelta { + if x != nil { + return x.BindingDeltas + } + return nil +} + +func (x *PolicyDelta) GetAuditConfigDeltas() []*AuditConfigDelta { + if x != nil { + return x.AuditConfigDeltas + } + return nil +} + +// One delta entry for Binding. Each individual change (only one member in each +// entry) to a binding will be a separate entry. +type BindingDelta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The action that was performed on a Binding. + // Required + Action BindingDelta_Action `protobuf:"varint,1,opt,name=action,proto3,enum=google.iam.v1.BindingDelta_Action" json:"action,omitempty"` + // Role that is assigned to `members`. + // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. + // Required + Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` + // A single identity requesting access for a Google Cloud resource. + // Follows the same format of Binding.members. + // Required + Member string `protobuf:"bytes,3,opt,name=member,proto3" json:"member,omitempty"` + // The condition that is associated with this binding. + Condition *expr.Expr `protobuf:"bytes,4,opt,name=condition,proto3" json:"condition,omitempty"` +} + +func (x *BindingDelta) Reset() { + *x = BindingDelta{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_policy_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BindingDelta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BindingDelta) ProtoMessage() {} + +func (x *BindingDelta) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_policy_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BindingDelta.ProtoReflect.Descriptor instead. +func (*BindingDelta) Descriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{5} +} + +func (x *BindingDelta) GetAction() BindingDelta_Action { + if x != nil { + return x.Action + } + return BindingDelta_ACTION_UNSPECIFIED +} + +func (x *BindingDelta) GetRole() string { + if x != nil { + return x.Role + } + return "" +} + +func (x *BindingDelta) GetMember() string { + if x != nil { + return x.Member + } + return "" +} + +func (x *BindingDelta) GetCondition() *expr.Expr { + if x != nil { + return x.Condition + } + return nil +} + +// One delta entry for AuditConfig. Each individual change (only one +// exempted_member in each entry) to a AuditConfig will be a separate entry. +type AuditConfigDelta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The action that was performed on an audit configuration in a policy. + // Required + Action AuditConfigDelta_Action `protobuf:"varint,1,opt,name=action,proto3,enum=google.iam.v1.AuditConfigDelta_Action" json:"action,omitempty"` + // Specifies a service that was configured for Cloud Audit Logging. + // For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. + // `allServices` is a special value that covers all services. + // Required + Service string `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"` + // A single identity that is exempted from "data access" audit + // logging for the `service` specified above. + // Follows the same format of Binding.members. + ExemptedMember string `protobuf:"bytes,3,opt,name=exempted_member,json=exemptedMember,proto3" json:"exempted_member,omitempty"` + // Specifies the log_type that was be enabled. ADMIN_ACTIVITY is always + // enabled, and cannot be configured. + // Required + LogType string `protobuf:"bytes,4,opt,name=log_type,json=logType,proto3" json:"log_type,omitempty"` +} + +func (x *AuditConfigDelta) Reset() { + *x = AuditConfigDelta{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_policy_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AuditConfigDelta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AuditConfigDelta) ProtoMessage() {} + +func (x *AuditConfigDelta) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_policy_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AuditConfigDelta.ProtoReflect.Descriptor instead. +func (*AuditConfigDelta) Descriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{6} +} + +func (x *AuditConfigDelta) GetAction() AuditConfigDelta_Action { + if x != nil { + return x.Action + } + return AuditConfigDelta_ACTION_UNSPECIFIED +} + +func (x *AuditConfigDelta) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +func (x *AuditConfigDelta) GetExemptedMember() string { + if x != nil { + return x.ExemptedMember + } + return "" +} + +func (x *AuditConfigDelta) GetLogType() string { + if x != nil { + return x.LogType + } + return "" +} + +var File_google_iam_v1_policy_proto protoreflect.FileDescriptor + +var file_google_iam_v1_policy_proto_rawDesc = []byte{ + 0x0a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x1a, 0x16, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xab, 0x01, 0x0a, 0x06, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x18, + 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x08, 0x62, 0x69, 0x6e, 0x64, + 0x69, 0x6e, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x69, + 0x6e, 0x67, 0x52, 0x08, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3f, 0x0a, 0x0d, + 0x61, 0x75, 0x64, 0x69, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x06, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, + 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x0c, 0x61, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x12, 0x0a, + 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x65, 0x74, 0x61, + 0x67, 0x22, 0x68, 0x0a, 0x07, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, + 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x6f, + 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x45, 0x78, 0x70, 0x72, + 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x72, 0x0a, 0x0b, 0x41, + 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, 0x11, 0x61, 0x75, 0x64, 0x69, 0x74, 0x5f, 0x6c, 0x6f, + 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, + 0x41, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, + 0x61, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x22, + 0xd1, 0x01, 0x0a, 0x0e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x40, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, + 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x6c, 0x6f, 0x67, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, + 0x5f, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, + 0x65, 0x78, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x22, + 0x52, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x14, 0x4c, 0x4f, + 0x47, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x44, 0x4d, 0x49, 0x4e, 0x5f, 0x52, 0x45, + 0x41, 0x44, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x57, 0x52, 0x49, + 0x54, 0x45, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x52, 0x45, 0x41, + 0x44, 0x10, 0x03, 0x22, 0xa2, 0x01, 0x0a, 0x0b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x44, 0x65, + 0x6c, 0x74, 0x61, 0x12, 0x42, 0x0a, 0x0e, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, + 0x65, 0x6c, 0x74, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, 0x6e, 0x64, + 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x0d, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, + 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x12, 0x4f, 0x0a, 0x13, 0x61, 0x75, 0x64, 0x69, 0x74, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, + 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x11, 0x61, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x22, 0xde, 0x01, 0x0a, 0x0c, 0x42, 0x69, 0x6e, + 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x3a, 0x0a, 0x06, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, + 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x6d, + 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x6d, 0x62, 0x65, + 0x72, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x35, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, + 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, + 0x06, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x10, 0x02, 0x22, 0xe7, 0x01, 0x0a, 0x10, 0x41, 0x75, + 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x3e, + 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x41, + 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x2e, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, + 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x65, 0x78, 0x65, 0x6d, + 0x70, 0x74, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0e, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x6d, 0x62, 0x65, + 0x72, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x22, 0x35, 0x0a, 0x06, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, + 0x0a, 0x03, 0x41, 0x44, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x45, 0x4d, 0x4f, 0x56, + 0x45, 0x10, 0x02, 0x42, 0x7c, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x69, 0x61, 0x6d, + 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x69, 0x61, 0x6d, 0x70, 0x62, 0x3b, 0x69, 0x61, 0x6d, + 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, + 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_iam_v1_policy_proto_rawDescOnce sync.Once + file_google_iam_v1_policy_proto_rawDescData = file_google_iam_v1_policy_proto_rawDesc +) + +func file_google_iam_v1_policy_proto_rawDescGZIP() []byte { + file_google_iam_v1_policy_proto_rawDescOnce.Do(func() { + file_google_iam_v1_policy_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_iam_v1_policy_proto_rawDescData) + }) + return file_google_iam_v1_policy_proto_rawDescData +} + +var file_google_iam_v1_policy_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_google_iam_v1_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_google_iam_v1_policy_proto_goTypes = []any{ + (AuditLogConfig_LogType)(0), // 0: google.iam.v1.AuditLogConfig.LogType + (BindingDelta_Action)(0), // 1: google.iam.v1.BindingDelta.Action + (AuditConfigDelta_Action)(0), // 2: google.iam.v1.AuditConfigDelta.Action + (*Policy)(nil), // 3: google.iam.v1.Policy + (*Binding)(nil), // 4: google.iam.v1.Binding + (*AuditConfig)(nil), // 5: google.iam.v1.AuditConfig + (*AuditLogConfig)(nil), // 6: google.iam.v1.AuditLogConfig + (*PolicyDelta)(nil), // 7: google.iam.v1.PolicyDelta + (*BindingDelta)(nil), // 8: google.iam.v1.BindingDelta + (*AuditConfigDelta)(nil), // 9: google.iam.v1.AuditConfigDelta + (*expr.Expr)(nil), // 10: google.type.Expr +} +var file_google_iam_v1_policy_proto_depIdxs = []int32{ + 4, // 0: google.iam.v1.Policy.bindings:type_name -> google.iam.v1.Binding + 5, // 1: google.iam.v1.Policy.audit_configs:type_name -> google.iam.v1.AuditConfig + 10, // 2: google.iam.v1.Binding.condition:type_name -> google.type.Expr + 6, // 3: google.iam.v1.AuditConfig.audit_log_configs:type_name -> google.iam.v1.AuditLogConfig + 0, // 4: google.iam.v1.AuditLogConfig.log_type:type_name -> google.iam.v1.AuditLogConfig.LogType + 8, // 5: google.iam.v1.PolicyDelta.binding_deltas:type_name -> google.iam.v1.BindingDelta + 9, // 6: google.iam.v1.PolicyDelta.audit_config_deltas:type_name -> google.iam.v1.AuditConfigDelta + 1, // 7: google.iam.v1.BindingDelta.action:type_name -> google.iam.v1.BindingDelta.Action + 10, // 8: google.iam.v1.BindingDelta.condition:type_name -> google.type.Expr + 2, // 9: google.iam.v1.AuditConfigDelta.action:type_name -> google.iam.v1.AuditConfigDelta.Action + 10, // [10:10] is the sub-list for method output_type + 10, // [10:10] is the sub-list for method input_type + 10, // [10:10] is the sub-list for extension type_name + 10, // [10:10] is the sub-list for extension extendee + 0, // [0:10] is the sub-list for field type_name +} + +func init() { file_google_iam_v1_policy_proto_init() } +func file_google_iam_v1_policy_proto_init() { + if File_google_iam_v1_policy_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_iam_v1_policy_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Policy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_policy_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*Binding); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_policy_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*AuditConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_policy_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*AuditLogConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_policy_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*PolicyDelta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_policy_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*BindingDelta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_policy_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*AuditConfigDelta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_iam_v1_policy_proto_rawDesc, + NumEnums: 3, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_iam_v1_policy_proto_goTypes, + DependencyIndexes: file_google_iam_v1_policy_proto_depIdxs, + EnumInfos: file_google_iam_v1_policy_proto_enumTypes, + MessageInfos: file_google_iam_v1_policy_proto_msgTypes, + }.Build() + File_google_iam_v1_policy_proto = out.File + file_google_iam_v1_policy_proto_rawDesc = nil + file_google_iam_v1_policy_proto_goTypes = nil + file_google_iam_v1_policy_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/iam/iam.go b/vendor/cloud.google.com/go/iam/iam.go new file mode 100644 index 000000000..f004a7afb --- /dev/null +++ b/vendor/cloud.google.com/go/iam/iam.go @@ -0,0 +1,387 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package iam supports the resource-specific operations of Google Cloud +// IAM (Identity and Access Management) for the Google Cloud Libraries. +// See https://cloud.google.com/iam for more about IAM. +// +// Users of the Google Cloud Libraries will typically not use this package +// directly. Instead they will begin with some resource that supports IAM, like +// a pubsub topic, and call its IAM method to get a Handle for that resource. +package iam + +import ( + "context" + "fmt" + "time" + + pb "cloud.google.com/go/iam/apiv1/iampb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// client abstracts the IAMPolicy API to allow multiple implementations. +type client interface { + Get(ctx context.Context, resource string) (*pb.Policy, error) + Set(ctx context.Context, resource string, p *pb.Policy) error + Test(ctx context.Context, resource string, perms []string) ([]string, error) + GetWithVersion(ctx context.Context, resource string, requestedPolicyVersion int32) (*pb.Policy, error) +} + +// grpcClient implements client for the standard gRPC-based IAMPolicy service. +type grpcClient struct { + c pb.IAMPolicyClient +} + +var withRetry = gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60 * time.Second, + Multiplier: 1.3, + }) +}) + +func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) { + return g.GetWithVersion(ctx, resource, 1) +} + +func (g *grpcClient) GetWithVersion(ctx context.Context, resource string, requestedPolicyVersion int32) (*pb.Policy, error) { + var proto *pb.Policy + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource)) + ctx = insertMetadata(ctx, md) + + err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { + var err error + proto, err = g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{ + Resource: resource, + Options: &pb.GetPolicyOptions{ + RequestedPolicyVersion: requestedPolicyVersion, + }, + }) + return err + }, withRetry) + if err != nil { + return nil, err + } + return proto, nil +} + +func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource)) + ctx = insertMetadata(ctx, md) + + return gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { + _, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{ + Resource: resource, + Policy: p, + }) + return err + }, withRetry) +} + +func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) { + var res *pb.TestIamPermissionsResponse + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource)) + ctx = insertMetadata(ctx, md) + + err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { + var err error + res, err = g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{ + Resource: resource, + Permissions: perms, + }) + return err + }, withRetry) + if err != nil { + return nil, err + } + return res.Permissions, nil +} + +// A Handle provides IAM operations for a resource. +type Handle struct { + c client + resource string +} + +// A Handle3 provides IAM operations for a resource. It is similar to a Handle, but provides access to newer IAM features (e.g., conditions). +type Handle3 struct { + c client + resource string + version int32 +} + +// InternalNewHandle is for use by the Google Cloud Libraries only. +// +// InternalNewHandle returns a Handle for resource. +// The conn parameter refers to a server that must support the IAMPolicy service. +func InternalNewHandle(conn grpc.ClientConnInterface, resource string) *Handle { + return InternalNewHandleGRPCClient(pb.NewIAMPolicyClient(conn), resource) +} + +// InternalNewHandleGRPCClient is for use by the Google Cloud Libraries only. +// +// InternalNewHandleClient returns a Handle for resource using the given +// grpc service that implements IAM as a mixin +func InternalNewHandleGRPCClient(c pb.IAMPolicyClient, resource string) *Handle { + return InternalNewHandleClient(&grpcClient{c: c}, resource) +} + +// InternalNewHandleClient is for use by the Google Cloud Libraries only. +// +// InternalNewHandleClient returns a Handle for resource using the given +// client implementation. +func InternalNewHandleClient(c client, resource string) *Handle { + return &Handle{ + c: c, + resource: resource, + } +} + +// V3 returns a Handle3, which is like Handle except it sets +// requestedPolicyVersion to 3 when retrieving a policy and policy.version to 3 +// when storing a policy. +func (h *Handle) V3() *Handle3 { + return &Handle3{ + c: h.c, + resource: h.resource, + version: 3, + } +} + +// Policy retrieves the IAM policy for the resource. +func (h *Handle) Policy(ctx context.Context) (*Policy, error) { + proto, err := h.c.Get(ctx, h.resource) + if err != nil { + return nil, err + } + return &Policy{InternalProto: proto}, nil +} + +// SetPolicy replaces the resource's current policy with the supplied Policy. +// +// If policy was created from a prior call to Get, then the modification will +// only succeed if the policy has not changed since the Get. +func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error { + return h.c.Set(ctx, h.resource, policy.InternalProto) +} + +// TestPermissions returns the subset of permissions that the caller has on the resource. +func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) { + return h.c.Test(ctx, h.resource, permissions) +} + +// A RoleName is a name representing a collection of permissions. +type RoleName string + +// Common role names. +const ( + Owner RoleName = "roles/owner" + Editor RoleName = "roles/editor" + Viewer RoleName = "roles/viewer" +) + +const ( + // AllUsers is a special member that denotes all users, even unauthenticated ones. + AllUsers = "allUsers" + + // AllAuthenticatedUsers is a special member that denotes all authenticated users. + AllAuthenticatedUsers = "allAuthenticatedUsers" +) + +// A Policy is a list of Bindings representing roles +// granted to members. +// +// The zero Policy is a valid policy with no bindings. +type Policy struct { + // TODO(jba): when type aliases are available, put Policy into an internal package + // and provide an exported alias here. + + // This field is exported for use by the Google Cloud Libraries only. + // It may become unexported in a future release. + InternalProto *pb.Policy +} + +// Members returns the list of members with the supplied role. +// The return value should not be modified. Use Add and Remove +// to modify the members of a role. +func (p *Policy) Members(r RoleName) []string { + b := p.binding(r) + if b == nil { + return nil + } + return b.Members +} + +// HasRole reports whether member has role r. +func (p *Policy) HasRole(member string, r RoleName) bool { + return memberIndex(member, p.binding(r)) >= 0 +} + +// Add adds member member to role r if it is not already present. +// A new binding is created if there is no binding for the role. +func (p *Policy) Add(member string, r RoleName) { + b := p.binding(r) + if b == nil { + if p.InternalProto == nil { + p.InternalProto = &pb.Policy{} + } + p.InternalProto.Bindings = append(p.InternalProto.Bindings, &pb.Binding{ + Role: string(r), + Members: []string{member}, + }) + return + } + if memberIndex(member, b) < 0 { + b.Members = append(b.Members, member) + return + } +} + +// Remove removes member from role r if it is present. +func (p *Policy) Remove(member string, r RoleName) { + bi := p.bindingIndex(r) + if bi < 0 { + return + } + bindings := p.InternalProto.Bindings + b := bindings[bi] + mi := memberIndex(member, b) + if mi < 0 { + return + } + // Order doesn't matter for bindings or members, so to remove, move the last item + // into the removed spot and shrink the slice. + if len(b.Members) == 1 { + // Remove binding. + last := len(bindings) - 1 + bindings[bi] = bindings[last] + bindings[last] = nil + p.InternalProto.Bindings = bindings[:last] + return + } + // Remove member. + // TODO(jba): worry about multiple copies of m? + last := len(b.Members) - 1 + b.Members[mi] = b.Members[last] + b.Members[last] = "" + b.Members = b.Members[:last] +} + +// Roles returns the names of all the roles that appear in the Policy. +func (p *Policy) Roles() []RoleName { + if p.InternalProto == nil { + return nil + } + var rns []RoleName + for _, b := range p.InternalProto.Bindings { + rns = append(rns, RoleName(b.Role)) + } + return rns +} + +// binding returns the Binding for the suppied role, or nil if there isn't one. +func (p *Policy) binding(r RoleName) *pb.Binding { + i := p.bindingIndex(r) + if i < 0 { + return nil + } + return p.InternalProto.Bindings[i] +} + +func (p *Policy) bindingIndex(r RoleName) int { + if p.InternalProto == nil { + return -1 + } + for i, b := range p.InternalProto.Bindings { + if b.Role == string(r) { + return i + } + } + return -1 +} + +// memberIndex returns the index of m in b's Members, or -1 if not found. +func memberIndex(m string, b *pb.Binding) int { + if b == nil { + return -1 + } + for i, mm := range b.Members { + if mm == m { + return i + } + } + return -1 +} + +// insertMetadata inserts metadata into the given context +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// A Policy3 is a list of Bindings representing roles granted to members. +// +// The zero Policy3 is a valid policy with no bindings. +// +// It is similar to a Policy, except a Policy3 provides direct access to the +// list of Bindings. +// +// The policy version is always set to 3. +type Policy3 struct { + etag []byte + Bindings []*pb.Binding +} + +// Policy retrieves the IAM policy for the resource. +// +// requestedPolicyVersion is always set to 3. +func (h *Handle3) Policy(ctx context.Context) (*Policy3, error) { + proto, err := h.c.GetWithVersion(ctx, h.resource, h.version) + if err != nil { + return nil, err + } + return &Policy3{ + Bindings: proto.Bindings, + etag: proto.Etag, + }, nil +} + +// SetPolicy replaces the resource's current policy with the supplied Policy. +// +// If policy was created from a prior call to Get, then the modification will +// only succeed if the policy has not changed since the Get. +func (h *Handle3) SetPolicy(ctx context.Context, policy *Policy3) error { + return h.c.Set(ctx, h.resource, &pb.Policy{ + Bindings: policy.Bindings, + Etag: policy.etag, + Version: h.version, + }) +} + +// TestPermissions returns the subset of permissions that the caller has on the resource. +func (h *Handle3) TestPermissions(ctx context.Context, permissions []string) ([]string, error) { + return h.c.Test(ctx, h.resource, permissions) +} diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json new file mode 100644 index 000000000..54acae7cd --- /dev/null +++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json @@ -0,0 +1,2982 @@ +{ + "cloud.google.com/go/accessapproval/apiv1": { + "api_shortname": "accessapproval", + "distribution_name": "cloud.google.com/go/accessapproval/apiv1", + "description": "Access Approval API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/accessapproval/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/accesscontextmanager/apiv1": { + "api_shortname": "accesscontextmanager", + "distribution_name": "cloud.google.com/go/accesscontextmanager/apiv1", + "description": "Access Context Manager API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/accesscontextmanager/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/advisorynotifications/apiv1": { + "api_shortname": "advisorynotifications", + "distribution_name": "cloud.google.com/go/advisorynotifications/apiv1", + "description": "Advisory Notifications API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/advisorynotifications/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/ai/generativelanguage/apiv1": { + "api_shortname": "generativelanguage", + "distribution_name": "cloud.google.com/go/ai/generativelanguage/apiv1", + "description": "Generative Language API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/ai/latest/generativelanguage/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/ai/generativelanguage/apiv1beta": { + "api_shortname": "generativelanguage", + "distribution_name": "cloud.google.com/go/ai/generativelanguage/apiv1beta", + "description": "Generative Language API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/ai/latest/generativelanguage/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/ai/generativelanguage/apiv1beta2": { + "api_shortname": "generativelanguage", + "distribution_name": "cloud.google.com/go/ai/generativelanguage/apiv1beta2", + "description": "Generative Language API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/ai/latest/generativelanguage/apiv1beta2", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/aiplatform/apiv1": { + "api_shortname": "aiplatform", + "distribution_name": "cloud.google.com/go/aiplatform/apiv1", + "description": "Vertex AI API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/aiplatform/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/aiplatform/apiv1beta1": { + "api_shortname": "aiplatform", + "distribution_name": "cloud.google.com/go/aiplatform/apiv1beta1", + "description": "Vertex AI API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/aiplatform/latest/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/alloydb/apiv1": { + "api_shortname": "alloydb", + "distribution_name": "cloud.google.com/go/alloydb/apiv1", + "description": "AlloyDB API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/alloydb/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/alloydb/apiv1alpha": { + "api_shortname": "alloydb", + "distribution_name": "cloud.google.com/go/alloydb/apiv1alpha", + "description": "AlloyDB API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/alloydb/latest/apiv1alpha", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/alloydb/apiv1beta": { + "api_shortname": "alloydb", + "distribution_name": "cloud.google.com/go/alloydb/apiv1beta", + "description": "AlloyDB API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/alloydb/latest/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/alloydb/connectors/apiv1": { + "api_shortname": "connectors", + "distribution_name": "cloud.google.com/go/alloydb/connectors/apiv1", + "description": "AlloyDB connectors", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/alloydb/latest/connectors/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/alloydb/connectors/apiv1alpha": { + "api_shortname": "connectors", + "distribution_name": "cloud.google.com/go/alloydb/connectors/apiv1alpha", + "description": "AlloyDB connectors", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/alloydb/latest/connectors/apiv1alpha", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/alloydb/connectors/apiv1beta": { + "api_shortname": "connectors", + "distribution_name": "cloud.google.com/go/alloydb/connectors/apiv1beta", + "description": "AlloyDB connectors", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/alloydb/latest/connectors/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/analytics/admin/apiv1alpha": { + "api_shortname": "analyticsadmin", + "distribution_name": "cloud.google.com/go/analytics/admin/apiv1alpha", + "description": "Google Analytics Admin API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/analytics/latest/admin/apiv1alpha", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/apigateway/apiv1": { + "api_shortname": "apigateway", + "distribution_name": "cloud.google.com/go/apigateway/apiv1", + "description": "API Gateway API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apigateway/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/apigeeconnect/apiv1": { + "api_shortname": "apigeeconnect", + "distribution_name": "cloud.google.com/go/apigeeconnect/apiv1", + "description": "Apigee Connect API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apigeeconnect/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/apigeeregistry/apiv1": { + "api_shortname": "apigeeregistry", + "distribution_name": "cloud.google.com/go/apigeeregistry/apiv1", + "description": "Apigee Registry API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apigeeregistry/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/apikeys/apiv2": { + "api_shortname": "apikeys", + "distribution_name": "cloud.google.com/go/apikeys/apiv2", + "description": "API Keys API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apikeys/latest/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/appengine/apiv1": { + "api_shortname": "appengine", + "distribution_name": "cloud.google.com/go/appengine/apiv1", + "description": "App Engine Admin API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/appengine/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/apphub/apiv1": { + "api_shortname": "apphub", + "distribution_name": "cloud.google.com/go/apphub/apiv1", + "description": "App Hub API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apphub/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/apps/events/subscriptions/apiv1": { + "api_shortname": "workspaceevents", + "distribution_name": "cloud.google.com/go/apps/events/subscriptions/apiv1", + "description": "Google Workspace Events API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apps/latest/events/subscriptions/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/apps/meet/apiv2": { + "api_shortname": "meet", + "distribution_name": "cloud.google.com/go/apps/meet/apiv2", + "description": "Google Meet API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apps/latest/meet/apiv2", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/apps/meet/apiv2beta": { + "api_shortname": "meet", + "distribution_name": "cloud.google.com/go/apps/meet/apiv2beta", + "description": "Google Meet API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apps/latest/meet/apiv2beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/area120/tables/apiv1alpha1": { + "api_shortname": "area120tables", + "distribution_name": "cloud.google.com/go/area120/tables/apiv1alpha1", + "description": "Area120 Tables API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/area120/latest/tables/apiv1alpha1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/artifactregistry/apiv1": { + "api_shortname": "artifactregistry", + "distribution_name": "cloud.google.com/go/artifactregistry/apiv1", + "description": "Artifact Registry API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/artifactregistry/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/artifactregistry/apiv1beta2": { + "api_shortname": "artifactregistry", + "distribution_name": "cloud.google.com/go/artifactregistry/apiv1beta2", + "description": "Artifact Registry API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/artifactregistry/latest/apiv1beta2", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/asset/apiv1": { + "api_shortname": "cloudasset", + "distribution_name": "cloud.google.com/go/asset/apiv1", + "description": "Cloud Asset API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/asset/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/asset/apiv1p2beta1": { + "api_shortname": "cloudasset", + "distribution_name": "cloud.google.com/go/asset/apiv1p2beta1", + "description": "Cloud Asset API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/asset/latest/apiv1p2beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/asset/apiv1p5beta1": { + "api_shortname": "cloudasset", + "distribution_name": "cloud.google.com/go/asset/apiv1p5beta1", + "description": "Cloud Asset API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/asset/latest/apiv1p5beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/assuredworkloads/apiv1": { + "api_shortname": "assuredworkloads", + "distribution_name": "cloud.google.com/go/assuredworkloads/apiv1", + "description": "Assured Workloads API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/assuredworkloads/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/assuredworkloads/apiv1beta1": { + "api_shortname": "assuredworkloads", + "distribution_name": "cloud.google.com/go/assuredworkloads/apiv1beta1", + "description": "Assured Workloads API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/assuredworkloads/latest/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/automl/apiv1": { + "api_shortname": "automl", + "distribution_name": "cloud.google.com/go/automl/apiv1", + "description": "Cloud AutoML API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/automl/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/automl/apiv1beta1": { + "api_shortname": "automl", + "distribution_name": "cloud.google.com/go/automl/apiv1beta1", + "description": "Cloud AutoML API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/automl/latest/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/backupdr/apiv1": { + "api_shortname": "backupdr", + "distribution_name": "cloud.google.com/go/backupdr/apiv1", + "description": "Backup and DR Service API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/backupdr/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/baremetalsolution/apiv2": { + "api_shortname": "baremetalsolution", + "distribution_name": "cloud.google.com/go/baremetalsolution/apiv2", + "description": "Bare Metal Solution API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/baremetalsolution/latest/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/batch/apiv1": { + "api_shortname": "batch", + "distribution_name": "cloud.google.com/go/batch/apiv1", + "description": "Batch API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/batch/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/beyondcorp/appconnections/apiv1": { + "api_shortname": "beyondcorp", + "distribution_name": "cloud.google.com/go/beyondcorp/appconnections/apiv1", + "description": "BeyondCorp API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appconnections/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/beyondcorp/appconnectors/apiv1": { + "api_shortname": "beyondcorp", + "distribution_name": "cloud.google.com/go/beyondcorp/appconnectors/apiv1", + "description": "BeyondCorp API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appconnectors/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/beyondcorp/appgateways/apiv1": { + "api_shortname": "beyondcorp", + "distribution_name": "cloud.google.com/go/beyondcorp/appgateways/apiv1", + "description": "BeyondCorp API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appgateways/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/beyondcorp/clientconnectorservices/apiv1": { + "api_shortname": "beyondcorp", + "distribution_name": "cloud.google.com/go/beyondcorp/clientconnectorservices/apiv1", + "description": "BeyondCorp API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/clientconnectorservices/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/beyondcorp/clientgateways/apiv1": { + "api_shortname": "beyondcorp", + "distribution_name": "cloud.google.com/go/beyondcorp/clientgateways/apiv1", + "description": "BeyondCorp API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/clientgateways/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/bigquery": { + "api_shortname": "bigquery", + "distribution_name": "cloud.google.com/go/bigquery", + "description": "BigQuery", + "language": "go", + "client_library_type": "manual", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest", + "release_level": "stable", + "library_type": "GAPIC_MANUAL" + }, + "cloud.google.com/go/bigquery/analyticshub/apiv1": { + "api_shortname": "analyticshub", + "distribution_name": "cloud.google.com/go/bigquery/analyticshub/apiv1", + "description": "Analytics Hub API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/analyticshub/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/bigquery/biglake/apiv1": { + "api_shortname": "biglake", + "distribution_name": "cloud.google.com/go/bigquery/biglake/apiv1", + "description": "BigLake API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/biglake/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/bigquery/biglake/apiv1alpha1": { + "api_shortname": "biglake", + "distribution_name": "cloud.google.com/go/bigquery/biglake/apiv1alpha1", + "description": "BigLake API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/biglake/apiv1alpha1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/bigquery/connection/apiv1": { + "api_shortname": "bigqueryconnection", + "distribution_name": "cloud.google.com/go/bigquery/connection/apiv1", + "description": "BigQuery Connection API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/connection/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/bigquery/connection/apiv1beta1": { + "api_shortname": "bigqueryconnection", + "distribution_name": "cloud.google.com/go/bigquery/connection/apiv1beta1", + "description": "BigQuery Connection API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/connection/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/bigquery/dataexchange/apiv1beta1": { + "api_shortname": "analyticshub", + "distribution_name": "cloud.google.com/go/bigquery/dataexchange/apiv1beta1", + "description": "Analytics Hub API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/dataexchange/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/bigquery/datapolicies/apiv1": { + "api_shortname": "bigquerydatapolicy", + "distribution_name": "cloud.google.com/go/bigquery/datapolicies/apiv1", + "description": "BigQuery Data Policy API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/datapolicies/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/bigquery/datapolicies/apiv1beta1": { + "api_shortname": "bigquerydatapolicy", + "distribution_name": "cloud.google.com/go/bigquery/datapolicies/apiv1beta1", + "description": "BigQuery Data Policy API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/datapolicies/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/bigquery/datatransfer/apiv1": { + "api_shortname": "bigquerydatatransfer", + "distribution_name": "cloud.google.com/go/bigquery/datatransfer/apiv1", + "description": "BigQuery Data Transfer API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/datatransfer/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/bigquery/migration/apiv2": { + "api_shortname": "bigquerymigration", + "distribution_name": "cloud.google.com/go/bigquery/migration/apiv2", + "description": "BigQuery Migration API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/migration/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/bigquery/migration/apiv2alpha": { + "api_shortname": "bigquerymigration", + "distribution_name": "cloud.google.com/go/bigquery/migration/apiv2alpha", + "description": "BigQuery Migration API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/migration/apiv2alpha", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/bigquery/reservation/apiv1": { + "api_shortname": "bigqueryreservation", + "distribution_name": "cloud.google.com/go/bigquery/reservation/apiv1", + "description": "BigQuery Reservation API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/reservation/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/bigquery/storage/apiv1": { + "api_shortname": "bigquerystorage", + "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1", + "description": "BigQuery Storage API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/storage/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/bigquery/storage/apiv1beta1": { + "api_shortname": "bigquerystorage", + "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1beta1", + "description": "BigQuery Storage API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/storage/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/bigquery/storage/apiv1beta2": { + "api_shortname": "bigquerystorage", + "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1beta2", + "description": "BigQuery Storage API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/storage/apiv1beta2", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/bigtable": { + "api_shortname": "bigtable", + "distribution_name": "cloud.google.com/go/bigtable", + "description": "Cloud BigTable", + "language": "go", + "client_library_type": "manual", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigtable/latest", + "release_level": "stable", + "library_type": "GAPIC_MANUAL" + }, + "cloud.google.com/go/bigtable/admin/apiv2": { + "api_shortname": "bigtableadmin", + "distribution_name": "cloud.google.com/go/bigtable/admin/apiv2", + "description": "Cloud Bigtable Admin API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigtable/latest/admin/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/bigtable/apiv2": { + "api_shortname": "bigtable", + "distribution_name": "cloud.google.com/go/bigtable/apiv2", + "description": "Cloud Bigtable API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigtable/latest/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/billing/apiv1": { + "api_shortname": "cloudbilling", + "distribution_name": "cloud.google.com/go/billing/apiv1", + "description": "Cloud Billing API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/billing/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/billing/budgets/apiv1": { + "api_shortname": "billingbudgets", + "distribution_name": "cloud.google.com/go/billing/budgets/apiv1", + "description": "Cloud Billing Budget API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/billing/latest/budgets/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/billing/budgets/apiv1beta1": { + "api_shortname": "billingbudgets", + "distribution_name": "cloud.google.com/go/billing/budgets/apiv1beta1", + "description": "Cloud Billing Budget API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/billing/latest/budgets/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/binaryauthorization/apiv1": { + "api_shortname": "binaryauthorization", + "distribution_name": "cloud.google.com/go/binaryauthorization/apiv1", + "description": "Binary Authorization API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/binaryauthorization/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/binaryauthorization/apiv1beta1": { + "api_shortname": "binaryauthorization", + "distribution_name": "cloud.google.com/go/binaryauthorization/apiv1beta1", + "description": "Binary Authorization API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/binaryauthorization/latest/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/certificatemanager/apiv1": { + "api_shortname": "certificatemanager", + "distribution_name": "cloud.google.com/go/certificatemanager/apiv1", + "description": "Certificate Manager API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/certificatemanager/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/channel/apiv1": { + "api_shortname": "cloudchannel", + "distribution_name": "cloud.google.com/go/channel/apiv1", + "description": "Cloud Channel API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/channel/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/chat/apiv1": { + "api_shortname": "chat", + "distribution_name": "cloud.google.com/go/chat/apiv1", + "description": "Google Chat API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/chat/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/cloudbuild/apiv1/v2": { + "api_shortname": "cloudbuild", + "distribution_name": "cloud.google.com/go/cloudbuild/apiv1/v2", + "description": "Cloud Build API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudbuild/latest/apiv1/v2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/cloudbuild/apiv2": { + "api_shortname": "cloudbuild", + "distribution_name": "cloud.google.com/go/cloudbuild/apiv2", + "description": "Cloud Build API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudbuild/latest/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/cloudcontrolspartner/apiv1": { + "api_shortname": "cloudcontrolspartner", + "distribution_name": "cloud.google.com/go/cloudcontrolspartner/apiv1", + "description": "Cloud Controls Partner API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudcontrolspartner/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/cloudcontrolspartner/apiv1beta": { + "api_shortname": "cloudcontrolspartner", + "distribution_name": "cloud.google.com/go/cloudcontrolspartner/apiv1beta", + "description": "Cloud Controls Partner API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudcontrolspartner/latest/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/clouddms/apiv1": { + "api_shortname": "datamigration", + "distribution_name": "cloud.google.com/go/clouddms/apiv1", + "description": "Database Migration API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/clouddms/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/cloudprofiler/apiv2": { + "api_shortname": "cloudprofiler", + "distribution_name": "cloud.google.com/go/cloudprofiler/apiv2", + "description": "Cloud Profiler API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudprofiler/latest/apiv2", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/cloudquotas/apiv1": { + "api_shortname": "cloudquotas", + "distribution_name": "cloud.google.com/go/cloudquotas/apiv1", + "description": "Cloud Quotas API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudquotas/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/cloudtasks/apiv2": { + "api_shortname": "cloudtasks", + "distribution_name": "cloud.google.com/go/cloudtasks/apiv2", + "description": "Cloud Tasks API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudtasks/latest/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/cloudtasks/apiv2beta2": { + "api_shortname": "cloudtasks", + "distribution_name": "cloud.google.com/go/cloudtasks/apiv2beta2", + "description": "Cloud Tasks API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudtasks/latest/apiv2beta2", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/cloudtasks/apiv2beta3": { + "api_shortname": "cloudtasks", + "distribution_name": "cloud.google.com/go/cloudtasks/apiv2beta3", + "description": "Cloud Tasks API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudtasks/latest/apiv2beta3", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/commerce/consumer/procurement/apiv1": { + "api_shortname": "cloudcommerceconsumerprocurement", + "distribution_name": "cloud.google.com/go/commerce/consumer/procurement/apiv1", + "description": "Cloud Commerce Consumer Procurement API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/commerce/latest/consumer/procurement/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/compute/apiv1": { + "api_shortname": "compute", + "distribution_name": "cloud.google.com/go/compute/apiv1", + "description": "Google Compute Engine API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/compute/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/compute/metadata": { + "api_shortname": "compute-metadata", + "distribution_name": "cloud.google.com/go/compute/metadata", + "description": "Service Metadata API", + "language": "go", + "client_library_type": "manual", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/compute/latest/metadata", + "release_level": "stable", + "library_type": "CORE" + }, + "cloud.google.com/go/confidentialcomputing/apiv1": { + "api_shortname": "confidentialcomputing", + "distribution_name": "cloud.google.com/go/confidentialcomputing/apiv1", + "description": "Confidential Computing API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/confidentialcomputing/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/confidentialcomputing/apiv1alpha1": { + "api_shortname": "confidentialcomputing", + "distribution_name": "cloud.google.com/go/confidentialcomputing/apiv1alpha1", + "description": "Confidential Computing API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/confidentialcomputing/latest/apiv1alpha1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/config/apiv1": { + "api_shortname": "config", + "distribution_name": "cloud.google.com/go/config/apiv1", + "description": "Infrastructure Manager API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/config/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/contactcenterinsights/apiv1": { + "api_shortname": "contactcenterinsights", + "distribution_name": "cloud.google.com/go/contactcenterinsights/apiv1", + "description": "Contact Center AI Insights API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/contactcenterinsights/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/container/apiv1": { + "api_shortname": "container", + "distribution_name": "cloud.google.com/go/container/apiv1", + "description": "Kubernetes Engine API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/container/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/containeranalysis/apiv1beta1": { + "api_shortname": "containeranalysis", + "distribution_name": "cloud.google.com/go/containeranalysis/apiv1beta1", + "description": "Container Analysis API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/containeranalysis/latest/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/datacatalog/apiv1": { + "api_shortname": "datacatalog", + "distribution_name": "cloud.google.com/go/datacatalog/apiv1", + "description": "Google Cloud Data Catalog API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datacatalog/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/datacatalog/apiv1beta1": { + "api_shortname": "datacatalog", + "distribution_name": "cloud.google.com/go/datacatalog/apiv1beta1", + "description": "Google Cloud Data Catalog API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datacatalog/latest/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/datacatalog/lineage/apiv1": { + "api_shortname": "datalineage", + "distribution_name": "cloud.google.com/go/datacatalog/lineage/apiv1", + "description": "Data Lineage API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datacatalog/latest/lineage/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/dataflow/apiv1beta3": { + "api_shortname": "dataflow", + "distribution_name": "cloud.google.com/go/dataflow/apiv1beta3", + "description": "Dataflow API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataflow/latest/apiv1beta3", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/dataform/apiv1alpha2": { + "api_shortname": "dataform", + "distribution_name": "cloud.google.com/go/dataform/apiv1alpha2", + "description": "Dataform API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataform/latest/apiv1alpha2", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/dataform/apiv1beta1": { + "api_shortname": "dataform", + "distribution_name": "cloud.google.com/go/dataform/apiv1beta1", + "description": "Dataform API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataform/latest/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/datafusion/apiv1": { + "api_shortname": "datafusion", + "distribution_name": "cloud.google.com/go/datafusion/apiv1", + "description": "Cloud Data Fusion API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datafusion/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/datalabeling/apiv1beta1": { + "api_shortname": "datalabeling", + "distribution_name": "cloud.google.com/go/datalabeling/apiv1beta1", + "description": "Data Labeling API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datalabeling/latest/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/dataplex/apiv1": { + "api_shortname": "dataplex", + "distribution_name": "cloud.google.com/go/dataplex/apiv1", + "description": "Cloud Dataplex API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataplex/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/dataproc/v2/apiv1": { + "api_shortname": "dataproc", + "distribution_name": "cloud.google.com/go/dataproc/v2/apiv1", + "description": "Cloud Dataproc API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataproc/v2/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/dataqna/apiv1alpha": { + "api_shortname": "dataqna", + "distribution_name": "cloud.google.com/go/dataqna/apiv1alpha", + "description": "Data QnA API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataqna/latest/apiv1alpha", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/datastore": { + "api_shortname": "datastore", + "distribution_name": "cloud.google.com/go/datastore", + "description": "Cloud Datastore", + "language": "go", + "client_library_type": "manual", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastore/latest", + "release_level": "stable", + "library_type": "GAPIC_MANUAL" + }, + "cloud.google.com/go/datastore/admin/apiv1": { + "api_shortname": "datastore", + "distribution_name": "cloud.google.com/go/datastore/admin/apiv1", + "description": "Cloud Datastore API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastore/latest/admin/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/datastream/apiv1": { + "api_shortname": "datastream", + "distribution_name": "cloud.google.com/go/datastream/apiv1", + "description": "Datastream API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastream/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/datastream/apiv1alpha1": { + "api_shortname": "datastream", + "distribution_name": "cloud.google.com/go/datastream/apiv1alpha1", + "description": "Datastream API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastream/latest/apiv1alpha1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/debugger/apiv2": { + "api_shortname": "clouddebugger", + "distribution_name": "cloud.google.com/go/debugger/apiv2", + "description": "Stackdriver Debugger API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/debugger/apiv2", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/deploy/apiv1": { + "api_shortname": "clouddeploy", + "distribution_name": "cloud.google.com/go/deploy/apiv1", + "description": "Cloud Deploy API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/deploy/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/developerconnect/apiv1": { + "api_shortname": "developerconnect", + "distribution_name": "cloud.google.com/go/developerconnect/apiv1", + "description": "Developer Connect API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/developerconnect/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/dialogflow/apiv2": { + "api_shortname": "dialogflow", + "distribution_name": "cloud.google.com/go/dialogflow/apiv2", + "description": "Dialogflow API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/dialogflow/apiv2beta1": { + "api_shortname": "dialogflow", + "distribution_name": "cloud.google.com/go/dialogflow/apiv2beta1", + "description": "Dialogflow API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/apiv2beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/dialogflow/cx/apiv3": { + "api_shortname": "dialogflow", + "distribution_name": "cloud.google.com/go/dialogflow/cx/apiv3", + "description": "Dialogflow API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/cx/apiv3", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/dialogflow/cx/apiv3beta1": { + "api_shortname": "dialogflow", + "distribution_name": "cloud.google.com/go/dialogflow/cx/apiv3beta1", + "description": "Dialogflow API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/cx/apiv3beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/discoveryengine/apiv1": { + "api_shortname": "discoveryengine", + "distribution_name": "cloud.google.com/go/discoveryengine/apiv1", + "description": "Discovery Engine API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/discoveryengine/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/discoveryengine/apiv1alpha": { + "api_shortname": "discoveryengine", + "distribution_name": "cloud.google.com/go/discoveryengine/apiv1alpha", + "description": "Discovery Engine API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/discoveryengine/latest/apiv1alpha", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/discoveryengine/apiv1beta": { + "api_shortname": "discoveryengine", + "distribution_name": "cloud.google.com/go/discoveryengine/apiv1beta", + "description": "Discovery Engine API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/discoveryengine/latest/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/dlp/apiv2": { + "api_shortname": "dlp", + "distribution_name": "cloud.google.com/go/dlp/apiv2", + "description": "Sensitive Data Protection (DLP)", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dlp/latest/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/documentai/apiv1": { + "api_shortname": "documentai", + "distribution_name": "cloud.google.com/go/documentai/apiv1", + "description": "Cloud Document AI API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/documentai/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/documentai/apiv1beta3": { + "api_shortname": "documentai", + "distribution_name": "cloud.google.com/go/documentai/apiv1beta3", + "description": "Cloud Document AI API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/documentai/latest/apiv1beta3", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/domains/apiv1beta1": { + "api_shortname": "domains", + "distribution_name": "cloud.google.com/go/domains/apiv1beta1", + "description": "Cloud Domains API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/domains/latest/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/edgecontainer/apiv1": { + "api_shortname": "edgecontainer", + "distribution_name": "cloud.google.com/go/edgecontainer/apiv1", + "description": "Distributed Cloud Edge Container API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/edgecontainer/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/edgenetwork/apiv1": { + "api_shortname": "edgenetwork", + "distribution_name": "cloud.google.com/go/edgenetwork/apiv1", + "description": "Distributed Cloud Edge Network API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/edgenetwork/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/errorreporting": { + "api_shortname": "clouderrorreporting", + "distribution_name": "cloud.google.com/go/errorreporting", + "description": "Cloud Error Reporting API", + "language": "go", + "client_library_type": "manual", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/errorreporting/latest", + "release_level": "preview", + "library_type": "GAPIC_MANUAL" + }, + "cloud.google.com/go/errorreporting/apiv1beta1": { + "api_shortname": "clouderrorreporting", + "distribution_name": "cloud.google.com/go/errorreporting/apiv1beta1", + "description": "Error Reporting API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/errorreporting/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/essentialcontacts/apiv1": { + "api_shortname": "essentialcontacts", + "distribution_name": "cloud.google.com/go/essentialcontacts/apiv1", + "description": "Essential Contacts API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/essentialcontacts/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/eventarc/apiv1": { + "api_shortname": "eventarc", + "distribution_name": "cloud.google.com/go/eventarc/apiv1", + "description": "Eventarc API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/eventarc/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/eventarc/publishing/apiv1": { + "api_shortname": "eventarcpublishing", + "distribution_name": "cloud.google.com/go/eventarc/publishing/apiv1", + "description": "Eventarc Publishing API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/eventarc/latest/publishing/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/filestore/apiv1": { + "api_shortname": "file", + "distribution_name": "cloud.google.com/go/filestore/apiv1", + "description": "Cloud Filestore API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/filestore/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/firestore": { + "api_shortname": "firestore", + "distribution_name": "cloud.google.com/go/firestore", + "description": "Cloud Firestore API", + "language": "go", + "client_library_type": "manual", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/firestore/latest", + "release_level": "stable", + "library_type": "GAPIC_MANUAL" + }, + "cloud.google.com/go/firestore/apiv1": { + "api_shortname": "firestore", + "distribution_name": "cloud.google.com/go/firestore/apiv1", + "description": "Cloud Firestore API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/firestore/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/firestore/apiv1/admin": { + "api_shortname": "firestore", + "distribution_name": "cloud.google.com/go/firestore/apiv1/admin", + "description": "Cloud Firestore API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/firestore/latest/apiv1/admin", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/functions/apiv1": { + "api_shortname": "cloudfunctions", + "distribution_name": "cloud.google.com/go/functions/apiv1", + "description": "Cloud Functions API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/functions/apiv2": { + "api_shortname": "cloudfunctions", + "distribution_name": "cloud.google.com/go/functions/apiv2", + "description": "Cloud Functions API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/functions/apiv2beta": { + "api_shortname": "cloudfunctions", + "distribution_name": "cloud.google.com/go/functions/apiv2beta", + "description": "Cloud Functions API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/apiv2beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/functions/metadata": { + "api_shortname": "firestore-metadata", + "distribution_name": "cloud.google.com/go/functions/metadata", + "description": "Cloud Functions", + "language": "go", + "client_library_type": "manual", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/metadata", + "release_level": "preview", + "library_type": "CORE" + }, + "cloud.google.com/go/gkebackup/apiv1": { + "api_shortname": "gkebackup", + "distribution_name": "cloud.google.com/go/gkebackup/apiv1", + "description": "Backup for GKE API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkebackup/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/gkeconnect/gateway/apiv1beta1": { + "api_shortname": "connectgateway", + "distribution_name": "cloud.google.com/go/gkeconnect/gateway/apiv1beta1", + "description": "Connect Gateway API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkeconnect/latest/gateway/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/gkehub/apiv1beta1": { + "api_shortname": "gkehub", + "distribution_name": "cloud.google.com/go/gkehub/apiv1beta1", + "description": "GKE Hub API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkehub/latest/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/gkemulticloud/apiv1": { + "api_shortname": "gkemulticloud", + "distribution_name": "cloud.google.com/go/gkemulticloud/apiv1", + "description": "Anthos Multi-Cloud API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkemulticloud/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/gsuiteaddons/apiv1": { + "api_shortname": "gsuiteaddons", + "distribution_name": "cloud.google.com/go/gsuiteaddons/apiv1", + "description": "Google Workspace Add-ons API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gsuiteaddons/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/iam": { + "api_shortname": "iam", + "distribution_name": "cloud.google.com/go/iam", + "description": "Cloud IAM", + "language": "go", + "client_library_type": "manual", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest", + "release_level": "stable", + "library_type": "CORE" + }, + "cloud.google.com/go/iam/apiv1": { + "api_shortname": "iam-meta-api", + "distribution_name": "cloud.google.com/go/iam/apiv1", + "description": "IAM Meta API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/iam/apiv2": { + "api_shortname": "iam", + "distribution_name": "cloud.google.com/go/iam/apiv2", + "description": "Identity and Access Management (IAM) API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/iam/credentials/apiv1": { + "api_shortname": "iamcredentials", + "distribution_name": "cloud.google.com/go/iam/credentials/apiv1", + "description": "IAM Service Account Credentials API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest/credentials/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/iap/apiv1": { + "api_shortname": "iap", + "distribution_name": "cloud.google.com/go/iap/apiv1", + "description": "Cloud Identity-Aware Proxy API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iap/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/identitytoolkit/apiv2": { + "api_shortname": "identitytoolkit", + "distribution_name": "cloud.google.com/go/identitytoolkit/apiv2", + "description": "Identity Toolkit API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/identitytoolkit/latest/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/ids/apiv1": { + "api_shortname": "ids", + "distribution_name": "cloud.google.com/go/ids/apiv1", + "description": "Cloud IDS API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/ids/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/iot/apiv1": { + "api_shortname": "cloudiot", + "distribution_name": "cloud.google.com/go/iot/apiv1", + "description": "Cloud IoT API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iot/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/kms/apiv1": { + "api_shortname": "cloudkms", + "distribution_name": "cloud.google.com/go/kms/apiv1", + "description": "Cloud Key Management Service (KMS) API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/kms/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/kms/inventory/apiv1": { + "api_shortname": "kmsinventory", + "distribution_name": "cloud.google.com/go/kms/inventory/apiv1", + "description": "KMS Inventory API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/kms/latest/inventory/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/language/apiv1": { + "api_shortname": "language", + "distribution_name": "cloud.google.com/go/language/apiv1", + "description": "Cloud Natural Language API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/language/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/language/apiv1beta2": { + "api_shortname": "language", + "distribution_name": "cloud.google.com/go/language/apiv1beta2", + "description": "Cloud Natural Language API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/language/latest/apiv1beta2", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/language/apiv2": { + "api_shortname": "language", + "distribution_name": "cloud.google.com/go/language/apiv2", + "description": "Cloud Natural Language API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/language/latest/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/lifesciences/apiv2beta": { + "api_shortname": "lifesciences", + "distribution_name": "cloud.google.com/go/lifesciences/apiv2beta", + "description": "Cloud Life Sciences API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/lifesciences/latest/apiv2beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/logging": { + "api_shortname": "logging", + "distribution_name": "cloud.google.com/go/logging", + "description": "Cloud Logging API", + "language": "go", + "client_library_type": "manual", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/logging/latest", + "release_level": "stable", + "library_type": "GAPIC_MANUAL" + }, + "cloud.google.com/go/logging/apiv2": { + "api_shortname": "logging", + "distribution_name": "cloud.google.com/go/logging/apiv2", + "description": "Cloud Logging API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/logging/latest/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/longrunning/autogen": { + "api_shortname": "longrunning", + "distribution_name": "cloud.google.com/go/longrunning/autogen", + "description": "Long Running Operations API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/longrunning/latest/autogen", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/managedidentities/apiv1": { + "api_shortname": "managedidentities", + "distribution_name": "cloud.google.com/go/managedidentities/apiv1", + "description": "Managed Service for Microsoft Active Directory API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/managedidentities/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/managedkafka/apiv1": { + "api_shortname": "managedkafka", + "distribution_name": "cloud.google.com/go/managedkafka/apiv1", + "description": "Apache Kafka for BigQuery API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/managedkafka/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/maps/addressvalidation/apiv1": { + "api_shortname": "addressvalidation", + "distribution_name": "cloud.google.com/go/maps/addressvalidation/apiv1", + "description": "Address Validation API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/addressvalidation/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/maps/fleetengine/apiv1": { + "api_shortname": "fleetengine", + "distribution_name": "cloud.google.com/go/maps/fleetengine/apiv1", + "description": "Local Rides and Deliveries API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/fleetengine/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/maps/fleetengine/delivery/apiv1": { + "api_shortname": "fleetengine", + "distribution_name": "cloud.google.com/go/maps/fleetengine/delivery/apiv1", + "description": "Last Mile Fleet Solution Delivery API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/fleetengine/delivery/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/maps/places/apiv1": { + "api_shortname": "places", + "distribution_name": "cloud.google.com/go/maps/places/apiv1", + "description": "Places API (New)", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/places/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/maps/routeoptimization/apiv1": { + "api_shortname": "routeoptimization", + "distribution_name": "cloud.google.com/go/maps/routeoptimization/apiv1", + "description": "Route Optimization API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/routeoptimization/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/maps/routing/apiv2": { + "api_shortname": "routes", + "distribution_name": "cloud.google.com/go/maps/routing/apiv2", + "description": "Routes API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/routing/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/maps/solar/apiv1": { + "api_shortname": "solar", + "distribution_name": "cloud.google.com/go/maps/solar/apiv1", + "description": "Solar API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/solar/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/mediatranslation/apiv1beta1": { + "api_shortname": "mediatranslation", + "distribution_name": "cloud.google.com/go/mediatranslation/apiv1beta1", + "description": "Media Translation API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/mediatranslation/latest/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/memcache/apiv1": { + "api_shortname": "memcache", + "distribution_name": "cloud.google.com/go/memcache/apiv1", + "description": "Cloud Memorystore for Memcached API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/memcache/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/memcache/apiv1beta2": { + "api_shortname": "memcache", + "distribution_name": "cloud.google.com/go/memcache/apiv1beta2", + "description": "Cloud Memorystore for Memcached API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/memcache/latest/apiv1beta2", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/metastore/apiv1": { + "api_shortname": "metastore", + "distribution_name": "cloud.google.com/go/metastore/apiv1", + "description": "Dataproc Metastore API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/metastore/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/metastore/apiv1alpha": { + "api_shortname": "metastore", + "distribution_name": "cloud.google.com/go/metastore/apiv1alpha", + "description": "Dataproc Metastore API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/metastore/latest/apiv1alpha", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/metastore/apiv1beta": { + "api_shortname": "metastore", + "distribution_name": "cloud.google.com/go/metastore/apiv1beta", + "description": "Dataproc Metastore API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/metastore/latest/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/migrationcenter/apiv1": { + "api_shortname": "migrationcenter", + "distribution_name": "cloud.google.com/go/migrationcenter/apiv1", + "description": "Migration Center API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/migrationcenter/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/monitoring/apiv3/v2": { + "api_shortname": "monitoring", + "distribution_name": "cloud.google.com/go/monitoring/apiv3/v2", + "description": "Cloud Monitoring API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/monitoring/latest/apiv3/v2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/monitoring/dashboard/apiv1": { + "api_shortname": "monitoring", + "distribution_name": "cloud.google.com/go/monitoring/dashboard/apiv1", + "description": "Cloud Monitoring API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/monitoring/latest/dashboard/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/monitoring/metricsscope/apiv1": { + "api_shortname": "monitoring", + "distribution_name": "cloud.google.com/go/monitoring/metricsscope/apiv1", + "description": "Cloud Monitoring API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/monitoring/latest/metricsscope/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/netapp/apiv1": { + "api_shortname": "netapp", + "distribution_name": "cloud.google.com/go/netapp/apiv1", + "description": "NetApp API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/netapp/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/networkconnectivity/apiv1": { + "api_shortname": "networkconnectivity", + "distribution_name": "cloud.google.com/go/networkconnectivity/apiv1", + "description": "Network Connectivity API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networkconnectivity/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/networkconnectivity/apiv1alpha1": { + "api_shortname": "networkconnectivity", + "distribution_name": "cloud.google.com/go/networkconnectivity/apiv1alpha1", + "description": "Network Connectivity API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networkconnectivity/latest/apiv1alpha1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/networkmanagement/apiv1": { + "api_shortname": "networkmanagement", + "distribution_name": "cloud.google.com/go/networkmanagement/apiv1", + "description": "Network Management API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networkmanagement/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/networksecurity/apiv1beta1": { + "api_shortname": "networksecurity", + "distribution_name": "cloud.google.com/go/networksecurity/apiv1beta1", + "description": "Network Security API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networksecurity/latest/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/networkservices/apiv1": { + "api_shortname": "networkservices", + "distribution_name": "cloud.google.com/go/networkservices/apiv1", + "description": "Network Services API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networkservices/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/notebooks/apiv1": { + "api_shortname": "notebooks", + "distribution_name": "cloud.google.com/go/notebooks/apiv1", + "description": "Notebooks API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/notebooks/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/notebooks/apiv1beta1": { + "api_shortname": "notebooks", + "distribution_name": "cloud.google.com/go/notebooks/apiv1beta1", + "description": "Notebooks API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/notebooks/latest/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/notebooks/apiv2": { + "api_shortname": "notebooks", + "distribution_name": "cloud.google.com/go/notebooks/apiv2", + "description": "Notebooks API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/notebooks/latest/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/optimization/apiv1": { + "api_shortname": "cloudoptimization", + "distribution_name": "cloud.google.com/go/optimization/apiv1", + "description": "Cloud Optimization API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/optimization/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/orchestration/airflow/service/apiv1": { + "api_shortname": "composer", + "distribution_name": "cloud.google.com/go/orchestration/airflow/service/apiv1", + "description": "Cloud Composer API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/orchestration/latest/airflow/service/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/orgpolicy/apiv2": { + "api_shortname": "orgpolicy", + "distribution_name": "cloud.google.com/go/orgpolicy/apiv2", + "description": "Organization Policy API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/orgpolicy/latest/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/osconfig/agentendpoint/apiv1": { + "api_shortname": "osconfig", + "distribution_name": "cloud.google.com/go/osconfig/agentendpoint/apiv1", + "description": "OS Config API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/agentendpoint/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/osconfig/agentendpoint/apiv1beta": { + "api_shortname": "osconfig", + "distribution_name": "cloud.google.com/go/osconfig/agentendpoint/apiv1beta", + "description": "OS Config API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/agentendpoint/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/osconfig/apiv1": { + "api_shortname": "osconfig", + "distribution_name": "cloud.google.com/go/osconfig/apiv1", + "description": "OS Config API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/osconfig/apiv1alpha": { + "api_shortname": "osconfig", + "distribution_name": "cloud.google.com/go/osconfig/apiv1alpha", + "description": "OS Config API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/apiv1alpha", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/osconfig/apiv1beta": { + "api_shortname": "osconfig", + "distribution_name": "cloud.google.com/go/osconfig/apiv1beta", + "description": "OS Config API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/oslogin/apiv1": { + "api_shortname": "oslogin", + "distribution_name": "cloud.google.com/go/oslogin/apiv1", + "description": "Cloud OS Login API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/oslogin/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/oslogin/apiv1beta": { + "api_shortname": "oslogin", + "distribution_name": "cloud.google.com/go/oslogin/apiv1beta", + "description": "Cloud OS Login API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/oslogin/latest/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/parallelstore/apiv1beta": { + "api_shortname": "parallelstore", + "distribution_name": "cloud.google.com/go/parallelstore/apiv1beta", + "description": "Parallelstore API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/parallelstore/latest/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/phishingprotection/apiv1beta1": { + "api_shortname": "phishingprotection", + "distribution_name": "cloud.google.com/go/phishingprotection/apiv1beta1", + "description": "Phishing Protection API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/phishingprotection/latest/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/policysimulator/apiv1": { + "api_shortname": "policysimulator", + "distribution_name": "cloud.google.com/go/policysimulator/apiv1", + "description": "Policy Simulator API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/policysimulator/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/policytroubleshooter/apiv1": { + "api_shortname": "policytroubleshooter", + "distribution_name": "cloud.google.com/go/policytroubleshooter/apiv1", + "description": "Policy Troubleshooter API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/policytroubleshooter/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/policytroubleshooter/iam/apiv3": { + "api_shortname": "policytroubleshooter", + "distribution_name": "cloud.google.com/go/policytroubleshooter/iam/apiv3", + "description": "Policy Troubleshooter API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/policytroubleshooter/latest/iam/apiv3", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/privatecatalog/apiv1beta1": { + "api_shortname": "cloudprivatecatalog", + "distribution_name": "cloud.google.com/go/privatecatalog/apiv1beta1", + "description": "Cloud Private Catalog API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/privatecatalog/latest/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/privilegedaccessmanager/apiv1": { + "api_shortname": "privilegedaccessmanager", + "distribution_name": "cloud.google.com/go/privilegedaccessmanager/apiv1", + "description": "Privileged Access Manager API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/privilegedaccessmanager/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/profiler": { + "api_shortname": "cloudprofiler", + "distribution_name": "cloud.google.com/go/profiler", + "description": "Cloud Profiler", + "language": "go", + "client_library_type": "manual", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/profiler/latest", + "release_level": "stable", + "library_type": "AGENT" + }, + "cloud.google.com/go/pubsub": { + "api_shortname": "pubsub", + "distribution_name": "cloud.google.com/go/pubsub", + "description": "Cloud PubSub", + "language": "go", + "client_library_type": "manual", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsub/latest", + "release_level": "stable", + "library_type": "GAPIC_MANUAL" + }, + "cloud.google.com/go/pubsub/apiv1": { + "api_shortname": "pubsub", + "distribution_name": "cloud.google.com/go/pubsub/apiv1", + "description": "Cloud Pub/Sub API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsub/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/pubsublite": { + "api_shortname": "pubsublite", + "distribution_name": "cloud.google.com/go/pubsublite", + "description": "Cloud PubSub Lite", + "language": "go", + "client_library_type": "manual", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsublite/latest", + "release_level": "stable", + "library_type": "GAPIC_MANUAL" + }, + "cloud.google.com/go/pubsublite/apiv1": { + "api_shortname": "pubsublite", + "distribution_name": "cloud.google.com/go/pubsublite/apiv1", + "description": "Pub/Sub Lite API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsublite/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/rapidmigrationassessment/apiv1": { + "api_shortname": "rapidmigrationassessment", + "distribution_name": "cloud.google.com/go/rapidmigrationassessment/apiv1", + "description": "Rapid Migration Assessment API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/rapidmigrationassessment/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/recaptchaenterprise/v2/apiv1": { + "api_shortname": "recaptchaenterprise", + "distribution_name": "cloud.google.com/go/recaptchaenterprise/v2/apiv1", + "description": "reCAPTCHA Enterprise API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recaptchaenterprise/v2/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/recaptchaenterprise/v2/apiv1beta1": { + "api_shortname": "recaptchaenterprise", + "distribution_name": "cloud.google.com/go/recaptchaenterprise/v2/apiv1beta1", + "description": "reCAPTCHA Enterprise API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recaptchaenterprise/v2/latest/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/recommendationengine/apiv1beta1": { + "api_shortname": "recommendationengine", + "distribution_name": "cloud.google.com/go/recommendationengine/apiv1beta1", + "description": "Recommendations AI", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recommendationengine/latest/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/recommender/apiv1": { + "api_shortname": "recommender", + "distribution_name": "cloud.google.com/go/recommender/apiv1", + "description": "Recommender API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recommender/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/recommender/apiv1beta1": { + "api_shortname": "recommender", + "distribution_name": "cloud.google.com/go/recommender/apiv1beta1", + "description": "Recommender API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recommender/latest/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/redis/apiv1": { + "api_shortname": "redis", + "distribution_name": "cloud.google.com/go/redis/apiv1", + "description": "Google Cloud Memorystore for Redis API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/redis/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/redis/apiv1beta1": { + "api_shortname": "redis", + "distribution_name": "cloud.google.com/go/redis/apiv1beta1", + "description": "Google Cloud Memorystore for Redis API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/redis/latest/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/redis/cluster/apiv1": { + "api_shortname": "redis", + "distribution_name": "cloud.google.com/go/redis/cluster/apiv1", + "description": "Google Cloud Memorystore for Redis API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/redis/latest/cluster/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/resourcemanager/apiv2": { + "api_shortname": "cloudresourcemanager", + "distribution_name": "cloud.google.com/go/resourcemanager/apiv2", + "description": "Cloud Resource Manager API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/resourcemanager/latest/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/resourcemanager/apiv3": { + "api_shortname": "cloudresourcemanager", + "distribution_name": "cloud.google.com/go/resourcemanager/apiv3", + "description": "Cloud Resource Manager API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/resourcemanager/latest/apiv3", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/resourcesettings/apiv1": { + "api_shortname": "resourcesettings", + "distribution_name": "cloud.google.com/go/resourcesettings/apiv1", + "description": "Resource Settings API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/resourcesettings/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/retail/apiv2": { + "api_shortname": "retail", + "distribution_name": "cloud.google.com/go/retail/apiv2", + "description": "Vertex AI Search for Retail API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/retail/apiv2alpha": { + "api_shortname": "retail", + "distribution_name": "cloud.google.com/go/retail/apiv2alpha", + "description": "Vertex AI Search for Retail API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2alpha", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/retail/apiv2beta": { + "api_shortname": "retail", + "distribution_name": "cloud.google.com/go/retail/apiv2beta", + "description": "Vertex AI Search for Retail API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/rpcreplay": { + "api_shortname": "rpcreplay", + "distribution_name": "cloud.google.com/go/rpcreplay", + "description": "RPC Replay", + "language": "go", + "client_library_type": "manual", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/rpcreplay", + "release_level": "stable", + "library_type": "OTHER" + }, + "cloud.google.com/go/run/apiv2": { + "api_shortname": "run", + "distribution_name": "cloud.google.com/go/run/apiv2", + "description": "Cloud Run Admin API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/run/latest/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/scheduler/apiv1": { + "api_shortname": "cloudscheduler", + "distribution_name": "cloud.google.com/go/scheduler/apiv1", + "description": "Cloud Scheduler API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/scheduler/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/scheduler/apiv1beta1": { + "api_shortname": "cloudscheduler", + "distribution_name": "cloud.google.com/go/scheduler/apiv1beta1", + "description": "Cloud Scheduler API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/scheduler/latest/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/secretmanager/apiv1": { + "api_shortname": "secretmanager", + "distribution_name": "cloud.google.com/go/secretmanager/apiv1", + "description": "Secret Manager API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/secretmanager/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/secretmanager/apiv1beta2": { + "api_shortname": "secretmanager", + "distribution_name": "cloud.google.com/go/secretmanager/apiv1beta2", + "description": "Secret Manager API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/secretmanager/latest/apiv1beta2", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/securesourcemanager/apiv1": { + "api_shortname": "securesourcemanager", + "distribution_name": "cloud.google.com/go/securesourcemanager/apiv1", + "description": "Secure Source Manager API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securesourcemanager/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/security/privateca/apiv1": { + "api_shortname": "privateca", + "distribution_name": "cloud.google.com/go/security/privateca/apiv1", + "description": "Certificate Authority API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/security/latest/privateca/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/security/publicca/apiv1": { + "api_shortname": "publicca", + "distribution_name": "cloud.google.com/go/security/publicca/apiv1", + "description": "Public Certificate Authority API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/security/latest/publicca/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/security/publicca/apiv1beta1": { + "api_shortname": "publicca", + "distribution_name": "cloud.google.com/go/security/publicca/apiv1beta1", + "description": "Public Certificate Authority API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/security/latest/publicca/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/securitycenter/apiv1": { + "api_shortname": "securitycenter", + "distribution_name": "cloud.google.com/go/securitycenter/apiv1", + "description": "Security Command Center API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycenter/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/securitycenter/apiv1beta1": { + "api_shortname": "securitycenter", + "distribution_name": "cloud.google.com/go/securitycenter/apiv1beta1", + "description": "Security Command Center API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycenter/latest/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/securitycenter/apiv1p1beta1": { + "api_shortname": "securitycenter", + "distribution_name": "cloud.google.com/go/securitycenter/apiv1p1beta1", + "description": "Security Command Center API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycenter/latest/apiv1p1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/securitycenter/apiv2": { + "api_shortname": "securitycenter", + "distribution_name": "cloud.google.com/go/securitycenter/apiv2", + "description": "Security Command Center API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycenter/latest/apiv2", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/securitycenter/settings/apiv1beta1": { + "api_shortname": "securitycenter", + "distribution_name": "cloud.google.com/go/securitycenter/settings/apiv1beta1", + "description": "Cloud Security Command Center API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycenter/latest/settings/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/securitycentermanagement/apiv1": { + "api_shortname": "securitycentermanagement", + "distribution_name": "cloud.google.com/go/securitycentermanagement/apiv1", + "description": "Security Center Management API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycentermanagement/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/securityposture/apiv1": { + "api_shortname": "securityposture", + "distribution_name": "cloud.google.com/go/securityposture/apiv1", + "description": "Security Posture API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securityposture/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/servicecontrol/apiv1": { + "api_shortname": "servicecontrol", + "distribution_name": "cloud.google.com/go/servicecontrol/apiv1", + "description": "Service Control API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicecontrol/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/servicedirectory/apiv1": { + "api_shortname": "servicedirectory", + "distribution_name": "cloud.google.com/go/servicedirectory/apiv1", + "description": "Service Directory API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicedirectory/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/servicedirectory/apiv1beta1": { + "api_shortname": "servicedirectory", + "distribution_name": "cloud.google.com/go/servicedirectory/apiv1beta1", + "description": "Service Directory API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicedirectory/latest/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/servicehealth/apiv1": { + "api_shortname": "servicehealth", + "distribution_name": "cloud.google.com/go/servicehealth/apiv1", + "description": "Service Health API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicehealth/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/servicemanagement/apiv1": { + "api_shortname": "servicemanagement", + "distribution_name": "cloud.google.com/go/servicemanagement/apiv1", + "description": "Service Management API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicemanagement/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/serviceusage/apiv1": { + "api_shortname": "serviceusage", + "distribution_name": "cloud.google.com/go/serviceusage/apiv1", + "description": "Service Usage API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/serviceusage/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/shell/apiv1": { + "api_shortname": "cloudshell", + "distribution_name": "cloud.google.com/go/shell/apiv1", + "description": "Cloud Shell API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shell/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/shopping/css/apiv1": { + "api_shortname": "css", + "distribution_name": "cloud.google.com/go/shopping/css/apiv1", + "description": "CSS API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/css/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/shopping/merchant/accounts/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/accounts/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/accounts/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/shopping/merchant/conversions/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/conversions/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/conversions/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/shopping/merchant/datasources/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/datasources/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/datasources/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/shopping/merchant/inventories/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/inventories/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/inventories/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/shopping/merchant/lfp/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/lfp/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/lfp/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/shopping/merchant/notifications/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/notifications/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/notifications/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/shopping/merchant/products/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/products/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/products/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/shopping/merchant/promotions/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/promotions/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/promotions/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/shopping/merchant/quota/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/quota/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/quota/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/shopping/merchant/reports/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/reports/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/reports/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/spanner": { + "api_shortname": "spanner", + "distribution_name": "cloud.google.com/go/spanner", + "description": "Cloud Spanner", + "language": "go", + "client_library_type": "manual", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest", + "release_level": "stable", + "library_type": "GAPIC_MANUAL" + }, + "cloud.google.com/go/spanner/admin/database/apiv1": { + "api_shortname": "spanner", + "distribution_name": "cloud.google.com/go/spanner/admin/database/apiv1", + "description": "Cloud Spanner API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest/admin/database/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/spanner/admin/instance/apiv1": { + "api_shortname": "spanner", + "distribution_name": "cloud.google.com/go/spanner/admin/instance/apiv1", + "description": "Cloud Spanner Instance Admin API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest/admin/instance/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/spanner/apiv1": { + "api_shortname": "spanner", + "distribution_name": "cloud.google.com/go/spanner/apiv1", + "description": "Cloud Spanner API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/spanner/executor/apiv1": { + "api_shortname": "spanner-cloud-executor", + "distribution_name": "cloud.google.com/go/spanner/executor/apiv1", + "description": "Cloud Spanner Executor test API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest/executor/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/speech/apiv1": { + "api_shortname": "speech", + "distribution_name": "cloud.google.com/go/speech/apiv1", + "description": "Cloud Speech-to-Text API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/speech/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/speech/apiv1p1beta1": { + "api_shortname": "speech", + "distribution_name": "cloud.google.com/go/speech/apiv1p1beta1", + "description": "Cloud Speech-to-Text API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/speech/latest/apiv1p1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/speech/apiv2": { + "api_shortname": "speech", + "distribution_name": "cloud.google.com/go/speech/apiv2", + "description": "Cloud Speech-to-Text API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/speech/latest/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/storage": { + "api_shortname": "storage", + "distribution_name": "cloud.google.com/go/storage", + "description": "Cloud Storage (GCS)", + "language": "go", + "client_library_type": "manual", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest", + "release_level": "stable", + "library_type": "GAPIC_MANUAL" + }, + "cloud.google.com/go/storage/control/apiv2": { + "api_shortname": "storage", + "distribution_name": "cloud.google.com/go/storage/control/apiv2", + "description": "Storage Control API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest/control/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/storage/internal/apiv2": { + "api_shortname": "storage", + "distribution_name": "cloud.google.com/go/storage/internal/apiv2", + "description": "Cloud Storage API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest/internal/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/storageinsights/apiv1": { + "api_shortname": "storageinsights", + "distribution_name": "cloud.google.com/go/storageinsights/apiv1", + "description": "Storage Insights API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storageinsights/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/storagetransfer/apiv1": { + "api_shortname": "storagetransfer", + "distribution_name": "cloud.google.com/go/storagetransfer/apiv1", + "description": "Storage Transfer API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storagetransfer/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/streetview/publish/apiv1": { + "api_shortname": "streetviewpublish", + "distribution_name": "cloud.google.com/go/streetview/publish/apiv1", + "description": "Street View Publish API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/streetview/latest/publish/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/support/apiv2": { + "api_shortname": "cloudsupport", + "distribution_name": "cloud.google.com/go/support/apiv2", + "description": "Google Cloud Support API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/support/latest/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/talent/apiv4": { + "api_shortname": "jobs", + "distribution_name": "cloud.google.com/go/talent/apiv4", + "description": "Cloud Talent Solution API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/talent/latest/apiv4", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/talent/apiv4beta1": { + "api_shortname": "jobs", + "distribution_name": "cloud.google.com/go/talent/apiv4beta1", + "description": "Cloud Talent Solution API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/talent/latest/apiv4beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/telcoautomation/apiv1": { + "api_shortname": "telcoautomation", + "distribution_name": "cloud.google.com/go/telcoautomation/apiv1", + "description": "Telco Automation API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/telcoautomation/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/texttospeech/apiv1": { + "api_shortname": "texttospeech", + "distribution_name": "cloud.google.com/go/texttospeech/apiv1", + "description": "Cloud Text-to-Speech API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/texttospeech/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/tpu/apiv1": { + "api_shortname": "tpu", + "distribution_name": "cloud.google.com/go/tpu/apiv1", + "description": "Cloud TPU API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/tpu/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/trace/apiv1": { + "api_shortname": "cloudtrace", + "distribution_name": "cloud.google.com/go/trace/apiv1", + "description": "Stackdriver Trace API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/trace/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/trace/apiv2": { + "api_shortname": "cloudtrace", + "distribution_name": "cloud.google.com/go/trace/apiv2", + "description": "Stackdriver Trace API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/trace/latest/apiv2", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/translate/apiv3": { + "api_shortname": "translate", + "distribution_name": "cloud.google.com/go/translate/apiv3", + "description": "Cloud Translation API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/translate/latest/apiv3", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/video/livestream/apiv1": { + "api_shortname": "livestream", + "distribution_name": "cloud.google.com/go/video/livestream/apiv1", + "description": "Live Stream API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/video/latest/livestream/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/video/stitcher/apiv1": { + "api_shortname": "videostitcher", + "distribution_name": "cloud.google.com/go/video/stitcher/apiv1", + "description": "Video Stitcher API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/video/latest/stitcher/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/video/transcoder/apiv1": { + "api_shortname": "transcoder", + "distribution_name": "cloud.google.com/go/video/transcoder/apiv1", + "description": "Transcoder API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/video/latest/transcoder/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/videointelligence/apiv1": { + "api_shortname": "videointelligence", + "distribution_name": "cloud.google.com/go/videointelligence/apiv1", + "description": "Cloud Video Intelligence API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/videointelligence/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/videointelligence/apiv1beta2": { + "api_shortname": "videointelligence", + "distribution_name": "cloud.google.com/go/videointelligence/apiv1beta2", + "description": "Google Cloud Video Intelligence API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/videointelligence/latest/apiv1beta2", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/videointelligence/apiv1p3beta1": { + "api_shortname": "videointelligence", + "distribution_name": "cloud.google.com/go/videointelligence/apiv1p3beta1", + "description": "Cloud Video Intelligence API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/videointelligence/latest/apiv1p3beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/vision/v2/apiv1": { + "api_shortname": "vision", + "distribution_name": "cloud.google.com/go/vision/v2/apiv1", + "description": "Cloud Vision API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/v2/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/vision/v2/apiv1p1beta1": { + "api_shortname": "vision", + "distribution_name": "cloud.google.com/go/vision/v2/apiv1p1beta1", + "description": "Cloud Vision API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/v2/latest/apiv1p1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/visionai/apiv1": { + "api_shortname": "visionai", + "distribution_name": "cloud.google.com/go/visionai/apiv1", + "description": "Vision AI API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/visionai/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/vmmigration/apiv1": { + "api_shortname": "vmmigration", + "distribution_name": "cloud.google.com/go/vmmigration/apiv1", + "description": "VM Migration API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vmmigration/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/vmwareengine/apiv1": { + "api_shortname": "vmwareengine", + "distribution_name": "cloud.google.com/go/vmwareengine/apiv1", + "description": "VMware Engine API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vmwareengine/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/vpcaccess/apiv1": { + "api_shortname": "vpcaccess", + "distribution_name": "cloud.google.com/go/vpcaccess/apiv1", + "description": "Serverless VPC Access API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vpcaccess/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/webrisk/apiv1": { + "api_shortname": "webrisk", + "distribution_name": "cloud.google.com/go/webrisk/apiv1", + "description": "Web Risk API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/webrisk/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/webrisk/apiv1beta1": { + "api_shortname": "webrisk", + "distribution_name": "cloud.google.com/go/webrisk/apiv1beta1", + "description": "Web Risk API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/webrisk/latest/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/websecurityscanner/apiv1": { + "api_shortname": "websecurityscanner", + "distribution_name": "cloud.google.com/go/websecurityscanner/apiv1", + "description": "Web Security Scanner API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/websecurityscanner/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/workflows/apiv1": { + "api_shortname": "workflows", + "distribution_name": "cloud.google.com/go/workflows/apiv1", + "description": "Workflows API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workflows/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/workflows/apiv1beta": { + "api_shortname": "workflows", + "distribution_name": "cloud.google.com/go/workflows/apiv1beta", + "description": "Workflows API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workflows/latest/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/workflows/executions/apiv1": { + "api_shortname": "workflowexecutions", + "distribution_name": "cloud.google.com/go/workflows/executions/apiv1", + "description": "Workflow Executions API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workflows/latest/executions/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/workflows/executions/apiv1beta": { + "api_shortname": "workflowexecutions", + "distribution_name": "cloud.google.com/go/workflows/executions/apiv1beta", + "description": "Workflow Executions API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workflows/latest/executions/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/workstations/apiv1": { + "api_shortname": "workstations", + "distribution_name": "cloud.google.com/go/workstations/apiv1", + "description": "Cloud Workstations API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workstations/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/workstations/apiv1beta": { + "api_shortname": "workstations", + "distribution_name": "cloud.google.com/go/workstations/apiv1beta", + "description": "Cloud Workstations API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workstations/latest/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + } +} diff --git a/vendor/cloud.google.com/go/internal/README.md b/vendor/cloud.google.com/go/internal/README.md new file mode 100644 index 000000000..972857e9d --- /dev/null +++ b/vendor/cloud.google.com/go/internal/README.md @@ -0,0 +1,29 @@ +# Internal + +This directory contains internal code for cloud.google.com/go packages. + +## .repo-metadata-full.json + +`.repo-metadata-full.json` contains metadata about the packages in this repo. It +is generated by `internal/gapicgen/generator`. It's processed by external tools +to build lists of all of the packages. + +Don't make breaking changes to the format without consulting with the external +tools. + +One day, we may want to create individual `.repo-metadata.json` files next to +each package, which is the pattern followed by some other languages. External +tools would then talk to pkg.go.dev or some other service to get the overall +list of packages and use the `.repo-metadata.json` files to get the additional +metadata required. For now, `.repo-metadata-full.json` includes everything. + +### Updating OwlBot SHA + +You may want to manually update the which version of the post-processor will be +used -- to do this you need to update the SHA in the OwlBot lock file. + +See the [postprocessor/README](postprocessor/README.md) for detailed +instructions. + +*Note*: OwlBot will eventually open a pull request to update this value if it +discovers a new version of the container. diff --git a/vendor/cloud.google.com/go/internal/annotate.go b/vendor/cloud.google.com/go/internal/annotate.go new file mode 100644 index 000000000..30d7bcf77 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/annotate.go @@ -0,0 +1,55 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "fmt" + + "google.golang.org/api/googleapi" + "google.golang.org/grpc/status" +) + +// Annotate prepends msg to the error message in err, attempting +// to preserve other information in err, like an error code. +// +// Annotate panics if err is nil. +// +// Annotate knows about these error types: +// - "google.golang.org/grpc/status".Status +// - "google.golang.org/api/googleapi".Error +// If the error is not one of these types, Annotate behaves +// like +// +// fmt.Errorf("%s: %v", msg, err) +func Annotate(err error, msg string) error { + if err == nil { + panic("Annotate called with nil") + } + if s, ok := status.FromError(err); ok { + p := s.Proto() + p.Message = msg + ": " + p.Message + return status.ErrorProto(p) + } + if g, ok := err.(*googleapi.Error); ok { + g.Message = msg + ": " + g.Message + return g + } + return fmt.Errorf("%s: %v", msg, err) +} + +// Annotatef uses format and args to format a string, then calls Annotate. +func Annotatef(err error, format string, args ...interface{}) error { + return Annotate(err, fmt.Sprintf(format, args...)) +} diff --git a/vendor/cloud.google.com/go/internal/gen_info.sh b/vendor/cloud.google.com/go/internal/gen_info.sh new file mode 100644 index 000000000..59c190653 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/gen_info.sh @@ -0,0 +1,46 @@ +#!/bin/sh + +# Script to generate info.go files with methods for all clients. + +if [[ $# != 2 ]]; then + echo >&2 "usage: $0 DIR PACKAGE" + exit 1 +fi + +outfile=info.go + +cd $1 + +cat <<'EOF' > $outfile +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Also passes any +// provided key-value pairs. Intended for use by Google-written clients. +// +// Internal use only. + +EOF + +echo -e >> $outfile "package $2\n" + + +awk '/^func \(c \*[A-Z].*\) setGoogleClientInfo/ { + printf("func (c %s SetGoogleClientInfo(keyval ...string) {\n", $3); + printf(" c.setGoogleClientInfo(keyval...)\n"); + printf("}\n\n"); +}' *_client.go >> $outfile + +gofmt -w $outfile diff --git a/vendor/cloud.google.com/go/internal/optional/optional.go b/vendor/cloud.google.com/go/internal/optional/optional.go new file mode 100644 index 000000000..72780f764 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/optional/optional.go @@ -0,0 +1,108 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package optional provides versions of primitive types that can +// be nil. These are useful in methods that update some of an API object's +// fields. +package optional + +import ( + "fmt" + "strings" + "time" +) + +type ( + // Bool is either a bool or nil. + Bool interface{} + + // String is either a string or nil. + String interface{} + + // Int is either an int or nil. + Int interface{} + + // Uint is either a uint or nil. + Uint interface{} + + // Float64 is either a float64 or nil. + Float64 interface{} + + // Duration is either a time.Duration or nil. + Duration interface{} +) + +// ToBool returns its argument as a bool. +// It panics if its argument is nil or not a bool. +func ToBool(v Bool) bool { + x, ok := v.(bool) + if !ok { + doPanic("Bool", v) + } + return x +} + +// ToString returns its argument as a string. +// It panics if its argument is nil or not a string. +func ToString(v String) string { + x, ok := v.(string) + if !ok { + doPanic("String", v) + } + return x +} + +// ToInt returns its argument as an int. +// It panics if its argument is nil or not an int. +func ToInt(v Int) int { + x, ok := v.(int) + if !ok { + doPanic("Int", v) + } + return x +} + +// ToUint returns its argument as a uint. +// It panics if its argument is nil or not a uint. +func ToUint(v Uint) uint { + x, ok := v.(uint) + if !ok { + doPanic("Uint", v) + } + return x +} + +// ToFloat64 returns its argument as a float64. +// It panics if its argument is nil or not a float64. +func ToFloat64(v Float64) float64 { + x, ok := v.(float64) + if !ok { + doPanic("Float64", v) + } + return x +} + +// ToDuration returns its argument as a time.Duration. +// It panics if its argument is nil or not a time.Duration. +func ToDuration(v Duration) time.Duration { + x, ok := v.(time.Duration) + if !ok { + doPanic("Duration", v) + } + return x +} + +func doPanic(capType string, v interface{}) { + panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v)) +} diff --git a/vendor/cloud.google.com/go/internal/retry.go b/vendor/cloud.google.com/go/internal/retry.go new file mode 100644 index 000000000..4c9220e0d --- /dev/null +++ b/vendor/cloud.google.com/go/internal/retry.go @@ -0,0 +1,76 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "context" + "fmt" + "time" + + gax "github.com/googleapis/gax-go/v2" +) + +// Retry calls the supplied function f repeatedly according to the provided +// backoff parameters. It returns when one of the following occurs: +// When f's first return value is true, Retry immediately returns with f's second +// return value. +// When the provided context is done, Retry returns with an error that +// includes both ctx.Error() and the last error returned by f. +func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error { + return retry(ctx, bo, f, gax.Sleep) +} + +func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error), + sleep func(context.Context, time.Duration) error) error { + var lastErr error + for { + stop, err := f() + if stop { + return err + } + // Remember the last "real" error from f. + if err != nil && err != context.Canceled && err != context.DeadlineExceeded { + lastErr = err + } + p := bo.Pause() + if ctxErr := sleep(ctx, p); ctxErr != nil { + if lastErr != nil { + return wrappedCallErr{ctxErr: ctxErr, wrappedErr: lastErr} + } + return ctxErr + } + } +} + +// Use this error type to return an error which allows introspection of both +// the context error and the error from the service. +type wrappedCallErr struct { + ctxErr error + wrappedErr error +} + +func (e wrappedCallErr) Error() string { + return fmt.Sprintf("retry failed with %v; last error: %v", e.ctxErr, e.wrappedErr) +} + +func (e wrappedCallErr) Unwrap() error { + return e.wrappedErr +} + +// Is allows errors.Is to match the error from the call as well as context +// sentinel errors. +func (e wrappedCallErr) Is(err error) bool { + return e.ctxErr == err || e.wrappedErr == err +} diff --git a/vendor/cloud.google.com/go/internal/trace/trace.go b/vendor/cloud.google.com/go/internal/trace/trace.go new file mode 100644 index 000000000..e8daf800a --- /dev/null +++ b/vendor/cloud.google.com/go/internal/trace/trace.go @@ -0,0 +1,275 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + "sync" + + "go.opencensus.io/trace" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + ottrace "go.opentelemetry.io/otel/trace" + "google.golang.org/api/googleapi" + "google.golang.org/genproto/googleapis/rpc/code" + "google.golang.org/grpc/status" +) + +const ( + // Deprecated: The default experimental tracing support for OpenCensus is + // now deprecated in the Google Cloud client libraries for Go. + // TelemetryPlatformTracingOpenCensus is the value to which the environment + // variable GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING should be + // set to enable OpenCensus tracing. + TelemetryPlatformTracingOpenCensus = "opencensus" + // TelemetryPlatformTracingOpenTelemetry is the value to which the environment + // variable GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING should be + // set to enable OpenTelemetry tracing. + TelemetryPlatformTracingOpenTelemetry = "opentelemetry" + // TelemetryPlatformTracingVar is the name of the environment + // variable that can be set to change the default tracing from OpenTelemetry + // to OpenCensus. + // + // The default experimental tracing support for OpenCensus is now deprecated + // in the Google Cloud client libraries for Go. + TelemetryPlatformTracingVar = "GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING" + // OpenTelemetryTracerName is the name given to the OpenTelemetry Tracer + // when it is obtained from the OpenTelemetry TracerProvider. + OpenTelemetryTracerName = "cloud.google.com/go" +) + +var ( + // openCensusTracingEnabledMu guards access to openCensusTracingEnabled field + openCensusTracingEnabledMu = sync.RWMutex{} + // openCensusTracingEnabled is true if the environment variable + // GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is set to the + // case-insensitive value "opencensus". + openCensusTracingEnabled bool = strings.EqualFold(strings.TrimSpace( + os.Getenv(TelemetryPlatformTracingVar)), TelemetryPlatformTracingOpenCensus) +) + +// SetOpenTelemetryTracingEnabledField programmatically sets the value provided +// by GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING for the purpose of +// unit testing. Do not invoke it directly. Intended for use only in unit tests. +// Restore original value after each test. +// +// The default experimental tracing support for OpenCensus is now deprecated in +// the Google Cloud client libraries for Go. +func SetOpenTelemetryTracingEnabledField(enabled bool) { + openCensusTracingEnabledMu.Lock() + defer openCensusTracingEnabledMu.Unlock() + openCensusTracingEnabled = !enabled +} + +// Deprecated: The default experimental tracing support for OpenCensus is now +// deprecated in the Google Cloud client libraries for Go. +// +// IsOpenCensusTracingEnabled returns true if the environment variable +// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is set to the +// case-insensitive value "opencensus". +func IsOpenCensusTracingEnabled() bool { + openCensusTracingEnabledMu.RLock() + defer openCensusTracingEnabledMu.RUnlock() + return openCensusTracingEnabled +} + +// IsOpenTelemetryTracingEnabled returns true if the environment variable +// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is NOT set to the +// case-insensitive value "opencensus". +func IsOpenTelemetryTracingEnabled() bool { + return !IsOpenCensusTracingEnabled() +} + +// StartSpan adds a span to the trace with the given name. If IsOpenCensusTracingEnabled +// returns true, the span will be an OpenCensus span. If IsOpenTelemetryTracingEnabled +// returns true, the span will be an OpenTelemetry span. Set the environment variable +// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive +// value "opencensus" before loading the package to use OpenCensus tracing. +// The default was OpenCensus until May 29, 2024, at which time the default was +// changed to "opencensus". Explicitly setting the environment variable to +// "opencensus" is required to continue using OpenCensus tracing. +// +// The default experimental tracing support for OpenCensus is now deprecated in +// the Google Cloud client libraries for Go. +func StartSpan(ctx context.Context, name string) context.Context { + if IsOpenTelemetryTracingEnabled() { + ctx, _ = otel.GetTracerProvider().Tracer(OpenTelemetryTracerName).Start(ctx, name) + } else { + ctx, _ = trace.StartSpan(ctx, name) + } + return ctx +} + +// EndSpan ends a span with the given error. If IsOpenCensusTracingEnabled +// returns true, the span will be an OpenCensus span. If IsOpenTelemetryTracingEnabled +// returns true, the span will be an OpenTelemetry span. Set the environment variable +// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive +// value "opencensus" before loading the package to use OpenCensus tracing. +// The default was OpenCensus until May 29, 2024, at which time the default was +// changed to "opencensus". Explicitly setting the environment variable to +// "opencensus" is required to continue using OpenCensus tracing. +// +// The default experimental tracing support for OpenCensus is now deprecated in +// the Google Cloud client libraries for Go. +func EndSpan(ctx context.Context, err error) { + if IsOpenTelemetryTracingEnabled() { + span := ottrace.SpanFromContext(ctx) + if err != nil { + span.SetStatus(codes.Error, toOpenTelemetryStatusDescription(err)) + span.RecordError(err) + } + span.End() + } else { + span := trace.FromContext(ctx) + if err != nil { + span.SetStatus(toStatus(err)) + } + span.End() + } +} + +// toStatus converts an error to an equivalent OpenCensus status. +func toStatus(err error) trace.Status { + var err2 *googleapi.Error + if ok := errors.As(err, &err2); ok { + return trace.Status{Code: httpStatusCodeToOCCode(err2.Code), Message: err2.Message} + } else if s, ok := status.FromError(err); ok { + return trace.Status{Code: int32(s.Code()), Message: s.Message()} + } else { + return trace.Status{Code: int32(code.Code_UNKNOWN), Message: err.Error()} + } +} + +// toOpenTelemetryStatus converts an error to an equivalent OpenTelemetry status description. +func toOpenTelemetryStatusDescription(err error) string { + var err2 *googleapi.Error + if ok := errors.As(err, &err2); ok { + return err2.Message + } else if s, ok := status.FromError(err); ok { + return s.Message() + } else { + return err.Error() + } +} + +// TODO(deklerk): switch to using OpenCensus function when it becomes available. +// Reference: https://github.com/googleapis/googleapis/blob/26b634d2724ac5dd30ae0b0cbfb01f07f2e4050e/google/rpc/code.proto +func httpStatusCodeToOCCode(httpStatusCode int) int32 { + switch httpStatusCode { + case 200: + return int32(code.Code_OK) + case 499: + return int32(code.Code_CANCELLED) + case 500: + return int32(code.Code_UNKNOWN) // Could also be Code_INTERNAL, Code_DATA_LOSS + case 400: + return int32(code.Code_INVALID_ARGUMENT) // Could also be Code_OUT_OF_RANGE + case 504: + return int32(code.Code_DEADLINE_EXCEEDED) + case 404: + return int32(code.Code_NOT_FOUND) + case 409: + return int32(code.Code_ALREADY_EXISTS) // Could also be Code_ABORTED + case 403: + return int32(code.Code_PERMISSION_DENIED) + case 401: + return int32(code.Code_UNAUTHENTICATED) + case 429: + return int32(code.Code_RESOURCE_EXHAUSTED) + case 501: + return int32(code.Code_UNIMPLEMENTED) + case 503: + return int32(code.Code_UNAVAILABLE) + default: + return int32(code.Code_UNKNOWN) + } +} + +// TracePrintf retrieves the current OpenCensus or OpenTelemetry span from context, then: +// * calls Span.Annotatef if OpenCensus is enabled; or +// * calls Span.AddEvent if OpenTelemetry is enabled. +// +// If IsOpenCensusTracingEnabled returns true, the expected span must be an +// OpenCensus span. If IsOpenTelemetryTracingEnabled returns true, the expected +// span must be an OpenTelemetry span. Set the environment variable +// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive +// value "opencensus" before loading the package to use OpenCensus tracing. +// The default was OpenCensus until May 29, 2024, at which time the default was +// changed to "opencensus". Explicitly setting the environment variable to +// "opencensus" is required to continue using OpenCensus tracing. +// +// The default experimental tracing support for OpenCensus is now deprecated in +// the Google Cloud client libraries for Go. +func TracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) { + if IsOpenTelemetryTracingEnabled() { + attrs := otAttrs(attrMap) + ottrace.SpanFromContext(ctx).AddEvent(fmt.Sprintf(format, args...), ottrace.WithAttributes(attrs...)) + } else { + attrs := ocAttrs(attrMap) + // TODO: (odeke-em): perhaps just pass around spans due to the cost + // incurred from using trace.FromContext(ctx) yet we could avoid + // throwing away the work done by ctx, span := trace.StartSpan. + trace.FromContext(ctx).Annotatef(attrs, format, args...) + } +} + +// ocAttrs converts a generic map to OpenCensus attributes. +func ocAttrs(attrMap map[string]interface{}) []trace.Attribute { + var attrs []trace.Attribute + for k, v := range attrMap { + var a trace.Attribute + switch v := v.(type) { + case string: + a = trace.StringAttribute(k, v) + case bool: + a = trace.BoolAttribute(k, v) + case int: + a = trace.Int64Attribute(k, int64(v)) + case int64: + a = trace.Int64Attribute(k, v) + default: + a = trace.StringAttribute(k, fmt.Sprintf("%#v", v)) + } + attrs = append(attrs, a) + } + return attrs +} + +// otAttrs converts a generic map to OpenTelemetry attributes. +func otAttrs(attrMap map[string]interface{}) []attribute.KeyValue { + var attrs []attribute.KeyValue + for k, v := range attrMap { + var a attribute.KeyValue + switch v := v.(type) { + case string: + a = attribute.Key(k).String(v) + case bool: + a = attribute.Key(k).Bool(v) + case int: + a = attribute.Key(k).Int(v) + case int64: + a = attribute.Key(k).Int64(v) + default: + a = attribute.Key(k).String(fmt.Sprintf("%#v", v)) + } + attrs = append(attrs, a) + } + return attrs +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh b/vendor/cloud.google.com/go/internal/version/update_version.sh similarity index 50% rename from vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh rename to vendor/cloud.google.com/go/internal/version/update_version.sh index 113d40cbe..d7c5a3e21 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh +++ b/vendor/cloud.google.com/go/internal/version/update_version.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2018 gRPC authors. +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,21 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -set -eux -o pipefail +today=$(date +%Y%m%d) -TMP=$(mktemp -d) - -function finish { - rm -rf "$TMP" -} -trap finish EXIT - -pushd "$TMP" -mkdir -p grpc/binarylog/grpc_binarylog_v1 -curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/binlog/v1/binarylog.proto > grpc/binarylog/grpc_binarylog_v1/binarylog.proto - -protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/binarylog/grpc_binarylog_v1/*.proto -popd -rm -f ./grpc_binarylog_v1/*.pb.go -cp "$TMP"/grpc/binarylog/grpc_binarylog_v1/*.pb.go ../../binarylog/grpc_binarylog_v1/ +sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE diff --git a/vendor/cloud.google.com/go/internal/version/version.go b/vendor/cloud.google.com/go/internal/version/version.go new file mode 100644 index 000000000..fd9dd91e9 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/version/version.go @@ -0,0 +1,71 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate ./update_version.sh + +// Package version contains version information for Google Cloud Client +// Libraries for Go, as reported in request headers. +package version + +import ( + "runtime" + "strings" + "unicode" +) + +// Repo is the current version of the client libraries in this +// repo. It should be a date in YYYYMMDD format. +const Repo = "20201104" + +// Go returns the Go runtime version. The returned string +// has no whitespace. +func Go() string { + return goVersion +} + +var goVersion = goVer(runtime.Version()) + +const develPrefix = "devel +" + +func goVer(s string) string { + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + s += "-" + prerelease + } + return s + } + return "" +} + +func notSemverRune(r rune) bool { + return !strings.ContainsRune("0123456789.", r) +} diff --git a/vendor/cloud.google.com/go/kms/LICENSE b/vendor/cloud.google.com/go/kms/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/cloud.google.com/go/kms/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/kms/apiv1/autokey_admin_client.go b/vendor/cloud.google.com/go/kms/apiv1/autokey_admin_client.go new file mode 100644 index 000000000..16440fbc7 --- /dev/null +++ b/vendor/cloud.google.com/go/kms/apiv1/autokey_admin_client.go @@ -0,0 +1,1271 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package kms + +import ( + "bytes" + "context" + "fmt" + "io" + "math" + "net/http" + "net/url" + "time" + + iampb "cloud.google.com/go/iam/apiv1/iampb" + kmspb "cloud.google.com/go/kms/apiv1/kmspb" + longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/googleapi" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + httptransport "google.golang.org/api/transport/http" + locationpb "google.golang.org/genproto/googleapis/cloud/location" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" +) + +var newAutokeyAdminClientHook clientHook + +// AutokeyAdminCallOptions contains the retry settings for each method of AutokeyAdminClient. +type AutokeyAdminCallOptions struct { + UpdateAutokeyConfig []gax.CallOption + GetAutokeyConfig []gax.CallOption + ShowEffectiveAutokeyConfig []gax.CallOption + GetLocation []gax.CallOption + ListLocations []gax.CallOption + GetIamPolicy []gax.CallOption + SetIamPolicy []gax.CallOption + TestIamPermissions []gax.CallOption + GetOperation []gax.CallOption +} + +func defaultAutokeyAdminGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("cloudkms.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("cloudkms.UNIVERSE_DOMAIN:443"), + internaloption.WithDefaultMTLSEndpoint("cloudkms.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://cloudkms.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultAutokeyAdminCallOptions() *AutokeyAdminCallOptions { + return &AutokeyAdminCallOptions{ + UpdateAutokeyConfig: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetAutokeyConfig: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ShowEffectiveAutokeyConfig: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetLocation: []gax.CallOption{}, + ListLocations: []gax.CallOption{}, + GetIamPolicy: []gax.CallOption{}, + SetIamPolicy: []gax.CallOption{}, + TestIamPermissions: []gax.CallOption{}, + GetOperation: []gax.CallOption{}, + } +} + +func defaultAutokeyAdminRESTCallOptions() *AutokeyAdminCallOptions { + return &AutokeyAdminCallOptions{ + UpdateAutokeyConfig: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + GetAutokeyConfig: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + ShowEffectiveAutokeyConfig: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + GetLocation: []gax.CallOption{}, + ListLocations: []gax.CallOption{}, + GetIamPolicy: []gax.CallOption{}, + SetIamPolicy: []gax.CallOption{}, + TestIamPermissions: []gax.CallOption{}, + GetOperation: []gax.CallOption{}, + } +} + +// internalAutokeyAdminClient is an interface that defines the methods available from Cloud Key Management Service (KMS) API. +type internalAutokeyAdminClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + UpdateAutokeyConfig(context.Context, *kmspb.UpdateAutokeyConfigRequest, ...gax.CallOption) (*kmspb.AutokeyConfig, error) + GetAutokeyConfig(context.Context, *kmspb.GetAutokeyConfigRequest, ...gax.CallOption) (*kmspb.AutokeyConfig, error) + ShowEffectiveAutokeyConfig(context.Context, *kmspb.ShowEffectiveAutokeyConfigRequest, ...gax.CallOption) (*kmspb.ShowEffectiveAutokeyConfigResponse, error) + GetLocation(context.Context, *locationpb.GetLocationRequest, ...gax.CallOption) (*locationpb.Location, error) + ListLocations(context.Context, *locationpb.ListLocationsRequest, ...gax.CallOption) *LocationIterator + GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) + SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) + TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) + GetOperation(context.Context, *longrunningpb.GetOperationRequest, ...gax.CallOption) (*longrunningpb.Operation, error) +} + +// AutokeyAdminClient is a client for interacting with Cloud Key Management Service (KMS) API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// Provides interfaces for managing Cloud KMS +// Autokey (at https://cloud.google.com/kms/help/autokey) folder-level +// configurations. A configuration is inherited by all descendent projects. A +// configuration at one folder overrides any other configurations in its +// ancestry. Setting a configuration on a folder is a prerequisite for Cloud KMS +// Autokey, so that users working in a descendant project can request +// provisioned CryptoKeys, ready for Customer +// Managed Encryption Key (CMEK) use, on-demand. +type AutokeyAdminClient struct { + // The internal transport-dependent client. + internalClient internalAutokeyAdminClient + + // The call options for this service. + CallOptions *AutokeyAdminCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *AutokeyAdminClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *AutokeyAdminClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *AutokeyAdminClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// UpdateAutokeyConfig updates the AutokeyConfig for a +// folder. The caller must have both cloudkms.autokeyConfigs.update +// permission on the parent folder and cloudkms.cryptoKeys.setIamPolicy +// permission on the provided key project. A +// KeyHandle creation in the folder’s +// descendant projects will use this configuration to determine where to +// create the resulting CryptoKey. +func (c *AutokeyAdminClient) UpdateAutokeyConfig(ctx context.Context, req *kmspb.UpdateAutokeyConfigRequest, opts ...gax.CallOption) (*kmspb.AutokeyConfig, error) { + return c.internalClient.UpdateAutokeyConfig(ctx, req, opts...) +} + +// GetAutokeyConfig returns the AutokeyConfig for a +// folder. +func (c *AutokeyAdminClient) GetAutokeyConfig(ctx context.Context, req *kmspb.GetAutokeyConfigRequest, opts ...gax.CallOption) (*kmspb.AutokeyConfig, error) { + return c.internalClient.GetAutokeyConfig(ctx, req, opts...) +} + +// ShowEffectiveAutokeyConfig returns the effective Cloud KMS Autokey configuration for a given project. +func (c *AutokeyAdminClient) ShowEffectiveAutokeyConfig(ctx context.Context, req *kmspb.ShowEffectiveAutokeyConfigRequest, opts ...gax.CallOption) (*kmspb.ShowEffectiveAutokeyConfigResponse, error) { + return c.internalClient.ShowEffectiveAutokeyConfig(ctx, req, opts...) +} + +// GetLocation gets information about a location. +func (c *AutokeyAdminClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) { + return c.internalClient.GetLocation(ctx, req, opts...) +} + +// ListLocations lists information about the supported locations for this service. +func (c *AutokeyAdminClient) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator { + return c.internalClient.ListLocations(ctx, req, opts...) +} + +// GetIamPolicy gets the access control policy for a resource. Returns an empty policy +// if the resource exists and does not have a policy set. +func (c *AutokeyAdminClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + return c.internalClient.GetIamPolicy(ctx, req, opts...) +} + +// SetIamPolicy sets the access control policy on the specified resource. Replaces +// any existing policy. +// +// Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED +// errors. +func (c *AutokeyAdminClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + return c.internalClient.SetIamPolicy(ctx, req, opts...) +} + +// TestIamPermissions returns permissions that a caller has on the specified resource. If the +// resource does not exist, this will return an empty set of +// permissions, not a NOT_FOUND error. +// +// Note: This operation is designed to be used for building +// permission-aware UIs and command-line tools, not for authorization +// checking. This operation may “fail open” without warning. +func (c *AutokeyAdminClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + return c.internalClient.TestIamPermissions(ctx, req, opts...) +} + +// GetOperation is a utility method from google.longrunning.Operations. +func (c *AutokeyAdminClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + return c.internalClient.GetOperation(ctx, req, opts...) +} + +// autokeyAdminGRPCClient is a client for interacting with Cloud Key Management Service (KMS) API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type autokeyAdminGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // Points back to the CallOptions field of the containing AutokeyAdminClient + CallOptions **AutokeyAdminCallOptions + + // The gRPC API client. + autokeyAdminClient kmspb.AutokeyAdminClient + + operationsClient longrunningpb.OperationsClient + + iamPolicyClient iampb.IAMPolicyClient + + locationsClient locationpb.LocationsClient + + // The x-goog-* metadata to be sent with each request. + xGoogHeaders []string +} + +// NewAutokeyAdminClient creates a new autokey admin client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// Provides interfaces for managing Cloud KMS +// Autokey (at https://cloud.google.com/kms/help/autokey) folder-level +// configurations. A configuration is inherited by all descendent projects. A +// configuration at one folder overrides any other configurations in its +// ancestry. Setting a configuration on a folder is a prerequisite for Cloud KMS +// Autokey, so that users working in a descendant project can request +// provisioned CryptoKeys, ready for Customer +// Managed Encryption Key (CMEK) use, on-demand. +func NewAutokeyAdminClient(ctx context.Context, opts ...option.ClientOption) (*AutokeyAdminClient, error) { + clientOpts := defaultAutokeyAdminGRPCClientOptions() + if newAutokeyAdminClientHook != nil { + hookOpts, err := newAutokeyAdminClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := AutokeyAdminClient{CallOptions: defaultAutokeyAdminCallOptions()} + + c := &autokeyAdminGRPCClient{ + connPool: connPool, + autokeyAdminClient: kmspb.NewAutokeyAdminClient(connPool), + CallOptions: &client.CallOptions, + operationsClient: longrunningpb.NewOperationsClient(connPool), + iamPolicyClient: iampb.NewIAMPolicyClient(connPool), + locationsClient: locationpb.NewLocationsClient(connPool), + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *autokeyAdminGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *autokeyAdminGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *autokeyAdminGRPCClient) Close() error { + return c.connPool.Close() +} + +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type autokeyAdminRESTClient struct { + // The http endpoint to connect to. + endpoint string + + // The http client. + httpClient *http.Client + + // The x-goog-* headers to be sent with each request. + xGoogHeaders []string + + // Points back to the CallOptions field of the containing AutokeyAdminClient + CallOptions **AutokeyAdminCallOptions +} + +// NewAutokeyAdminRESTClient creates a new autokey admin rest client. +// +// Provides interfaces for managing Cloud KMS +// Autokey (at https://cloud.google.com/kms/help/autokey) folder-level +// configurations. A configuration is inherited by all descendent projects. A +// configuration at one folder overrides any other configurations in its +// ancestry. Setting a configuration on a folder is a prerequisite for Cloud KMS +// Autokey, so that users working in a descendant project can request +// provisioned CryptoKeys, ready for Customer +// Managed Encryption Key (CMEK) use, on-demand. +func NewAutokeyAdminRESTClient(ctx context.Context, opts ...option.ClientOption) (*AutokeyAdminClient, error) { + clientOpts := append(defaultAutokeyAdminRESTClientOptions(), opts...) + httpClient, endpoint, err := httptransport.NewClient(ctx, clientOpts...) + if err != nil { + return nil, err + } + + callOpts := defaultAutokeyAdminRESTCallOptions() + c := &autokeyAdminRESTClient{ + endpoint: endpoint, + httpClient: httpClient, + CallOptions: &callOpts, + } + c.setGoogleClientInfo() + + return &AutokeyAdminClient{internalClient: c, CallOptions: callOpts}, nil +} + +func defaultAutokeyAdminRESTClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("https://cloudkms.googleapis.com"), + internaloption.WithDefaultEndpointTemplate("https://cloudkms.UNIVERSE_DOMAIN"), + internaloption.WithDefaultMTLSEndpoint("https://cloudkms.mtls.googleapis.com"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://cloudkms.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), + } +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *autokeyAdminRESTClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *autokeyAdminRESTClient) Close() error { + // Replace httpClient with nil to force cleanup. + c.httpClient = nil + return nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: This method always returns nil. +func (c *autokeyAdminRESTClient) Connection() *grpc.ClientConn { + return nil +} +func (c *autokeyAdminGRPCClient) UpdateAutokeyConfig(ctx context.Context, req *kmspb.UpdateAutokeyConfigRequest, opts ...gax.CallOption) (*kmspb.AutokeyConfig, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "autokey_config.name", url.QueryEscape(req.GetAutokeyConfig().GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).UpdateAutokeyConfig[0:len((*c.CallOptions).UpdateAutokeyConfig):len((*c.CallOptions).UpdateAutokeyConfig)], opts...) + var resp *kmspb.AutokeyConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.autokeyAdminClient.UpdateAutokeyConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *autokeyAdminGRPCClient) GetAutokeyConfig(ctx context.Context, req *kmspb.GetAutokeyConfigRequest, opts ...gax.CallOption) (*kmspb.AutokeyConfig, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetAutokeyConfig[0:len((*c.CallOptions).GetAutokeyConfig):len((*c.CallOptions).GetAutokeyConfig)], opts...) + var resp *kmspb.AutokeyConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.autokeyAdminClient.GetAutokeyConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *autokeyAdminGRPCClient) ShowEffectiveAutokeyConfig(ctx context.Context, req *kmspb.ShowEffectiveAutokeyConfigRequest, opts ...gax.CallOption) (*kmspb.ShowEffectiveAutokeyConfigResponse, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ShowEffectiveAutokeyConfig[0:len((*c.CallOptions).ShowEffectiveAutokeyConfig):len((*c.CallOptions).ShowEffectiveAutokeyConfig)], opts...) + var resp *kmspb.ShowEffectiveAutokeyConfigResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.autokeyAdminClient.ShowEffectiveAutokeyConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *autokeyAdminGRPCClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetLocation[0:len((*c.CallOptions).GetLocation):len((*c.CallOptions).GetLocation)], opts...) + var resp *locationpb.Location + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.locationsClient.GetLocation(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *autokeyAdminGRPCClient) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListLocations[0:len((*c.CallOptions).ListLocations):len((*c.CallOptions).ListLocations)], opts...) + it := &LocationIterator{} + req = proto.Clone(req).(*locationpb.ListLocationsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*locationpb.Location, string, error) { + resp := &locationpb.ListLocationsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.locationsClient.ListLocations(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetLocations(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *autokeyAdminGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.GetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *autokeyAdminGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.SetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *autokeyAdminGRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...) + var resp *iampb.TestIamPermissionsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.TestIamPermissions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *autokeyAdminGRPCClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.operationsClient.GetOperation(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateAutokeyConfig updates the AutokeyConfig for a +// folder. The caller must have both cloudkms.autokeyConfigs.update +// permission on the parent folder and cloudkms.cryptoKeys.setIamPolicy +// permission on the provided key project. A +// KeyHandle creation in the folder’s +// descendant projects will use this configuration to determine where to +// create the resulting CryptoKey. +func (c *autokeyAdminRESTClient) UpdateAutokeyConfig(ctx context.Context, req *kmspb.UpdateAutokeyConfigRequest, opts ...gax.CallOption) (*kmspb.AutokeyConfig, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + body := req.GetAutokeyConfig() + jsonReq, err := m.Marshal(body) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetAutokeyConfig().GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + if req.GetUpdateMask() != nil { + field, err := protojson.Marshal(req.GetUpdateMask()) + if err != nil { + return nil, err + } + params.Add("updateMask", string(field[1:len(field)-1])) + } + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "autokey_config.name", url.QueryEscape(req.GetAutokeyConfig().GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).UpdateAutokeyConfig[0:len((*c.CallOptions).UpdateAutokeyConfig):len((*c.CallOptions).UpdateAutokeyConfig)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.AutokeyConfig{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("PATCH", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// GetAutokeyConfig returns the AutokeyConfig for a +// folder. +func (c *autokeyAdminRESTClient) GetAutokeyConfig(ctx context.Context, req *kmspb.GetAutokeyConfigRequest, opts ...gax.CallOption) (*kmspb.AutokeyConfig, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).GetAutokeyConfig[0:len((*c.CallOptions).GetAutokeyConfig):len((*c.CallOptions).GetAutokeyConfig)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.AutokeyConfig{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// ShowEffectiveAutokeyConfig returns the effective Cloud KMS Autokey configuration for a given project. +func (c *autokeyAdminRESTClient) ShowEffectiveAutokeyConfig(ctx context.Context, req *kmspb.ShowEffectiveAutokeyConfigRequest, opts ...gax.CallOption) (*kmspb.ShowEffectiveAutokeyConfigResponse, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v:showEffectiveAutokeyConfig", req.GetParent()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).ShowEffectiveAutokeyConfig[0:len((*c.CallOptions).ShowEffectiveAutokeyConfig):len((*c.CallOptions).ShowEffectiveAutokeyConfig)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.ShowEffectiveAutokeyConfigResponse{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// GetLocation gets information about a location. +func (c *autokeyAdminRESTClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).GetLocation[0:len((*c.CallOptions).GetLocation):len((*c.CallOptions).GetLocation)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &locationpb.Location{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// ListLocations lists information about the supported locations for this service. +func (c *autokeyAdminRESTClient) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator { + it := &LocationIterator{} + req = proto.Clone(req).(*locationpb.ListLocationsRequest) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + it.InternalFetch = func(pageSize int, pageToken string) ([]*locationpb.Location, string, error) { + resp := &locationpb.ListLocationsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, "", err + } + baseUrl.Path += fmt.Sprintf("/v1/%v/locations", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + if req.GetFilter() != "" { + params.Add("filter", fmt.Sprintf("%v", req.GetFilter())) + } + if req.GetPageSize() != 0 { + params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize())) + } + if req.GetPageToken() != "" { + params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken())) + } + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := append(c.xGoogHeaders, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, "", e + } + it.Response = resp + return resp.GetLocations(), resp.GetNextPageToken(), nil + } + + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +// GetIamPolicy gets the access control policy for a resource. Returns an empty policy +// if the resource exists and does not have a policy set. +func (c *autokeyAdminRESTClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v:getIamPolicy", req.GetResource()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + if req.GetOptions().GetRequestedPolicyVersion() != 0 { + params.Add("options.requestedPolicyVersion", fmt.Sprintf("%v", req.GetOptions().GetRequestedPolicyVersion())) + } + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &iampb.Policy{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// SetIamPolicy sets the access control policy on the specified resource. Replaces +// any existing policy. +// +// Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED +// errors. +func (c *autokeyAdminRESTClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + jsonReq, err := m.Marshal(req) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v:setIamPolicy", req.GetResource()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &iampb.Policy{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// TestIamPermissions returns permissions that a caller has on the specified resource. If the +// resource does not exist, this will return an empty set of +// permissions, not a NOT_FOUND error. +// +// Note: This operation is designed to be used for building +// permission-aware UIs and command-line tools, not for authorization +// checking. This operation may “fail open” without warning. +func (c *autokeyAdminRESTClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + jsonReq, err := m.Marshal(req) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v:testIamPermissions", req.GetResource()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &iampb.TestIamPermissionsResponse{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// GetOperation is a utility method from google.longrunning.Operations. +func (c *autokeyAdminRESTClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &longrunningpb.Operation{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/kms/apiv1/autokey_client.go b/vendor/cloud.google.com/go/kms/apiv1/autokey_client.go new file mode 100644 index 000000000..0cd015059 --- /dev/null +++ b/vendor/cloud.google.com/go/kms/apiv1/autokey_client.go @@ -0,0 +1,1405 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package kms + +import ( + "bytes" + "context" + "fmt" + "io" + "math" + "net/http" + "net/url" + "time" + + iampb "cloud.google.com/go/iam/apiv1/iampb" + kmspb "cloud.google.com/go/kms/apiv1/kmspb" + "cloud.google.com/go/longrunning" + lroauto "cloud.google.com/go/longrunning/autogen" + longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/googleapi" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + httptransport "google.golang.org/api/transport/http" + locationpb "google.golang.org/genproto/googleapis/cloud/location" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" +) + +var newAutokeyClientHook clientHook + +// AutokeyCallOptions contains the retry settings for each method of AutokeyClient. +type AutokeyCallOptions struct { + CreateKeyHandle []gax.CallOption + GetKeyHandle []gax.CallOption + ListKeyHandles []gax.CallOption + GetLocation []gax.CallOption + ListLocations []gax.CallOption + GetIamPolicy []gax.CallOption + SetIamPolicy []gax.CallOption + TestIamPermissions []gax.CallOption + GetOperation []gax.CallOption +} + +func defaultAutokeyGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("cloudkms.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("cloudkms.UNIVERSE_DOMAIN:443"), + internaloption.WithDefaultMTLSEndpoint("cloudkms.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://cloudkms.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultAutokeyCallOptions() *AutokeyCallOptions { + return &AutokeyCallOptions{ + CreateKeyHandle: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + }, + GetKeyHandle: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListKeyHandles: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetLocation: []gax.CallOption{}, + ListLocations: []gax.CallOption{}, + GetIamPolicy: []gax.CallOption{}, + SetIamPolicy: []gax.CallOption{}, + TestIamPermissions: []gax.CallOption{}, + GetOperation: []gax.CallOption{}, + } +} + +func defaultAutokeyRESTCallOptions() *AutokeyCallOptions { + return &AutokeyCallOptions{ + CreateKeyHandle: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + }, + GetKeyHandle: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + ListKeyHandles: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + GetLocation: []gax.CallOption{}, + ListLocations: []gax.CallOption{}, + GetIamPolicy: []gax.CallOption{}, + SetIamPolicy: []gax.CallOption{}, + TestIamPermissions: []gax.CallOption{}, + GetOperation: []gax.CallOption{}, + } +} + +// internalAutokeyClient is an interface that defines the methods available from Cloud Key Management Service (KMS) API. +type internalAutokeyClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + CreateKeyHandle(context.Context, *kmspb.CreateKeyHandleRequest, ...gax.CallOption) (*CreateKeyHandleOperation, error) + CreateKeyHandleOperation(name string) *CreateKeyHandleOperation + GetKeyHandle(context.Context, *kmspb.GetKeyHandleRequest, ...gax.CallOption) (*kmspb.KeyHandle, error) + ListKeyHandles(context.Context, *kmspb.ListKeyHandlesRequest, ...gax.CallOption) *KeyHandleIterator + GetLocation(context.Context, *locationpb.GetLocationRequest, ...gax.CallOption) (*locationpb.Location, error) + ListLocations(context.Context, *locationpb.ListLocationsRequest, ...gax.CallOption) *LocationIterator + GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) + SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) + TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) + GetOperation(context.Context, *longrunningpb.GetOperationRequest, ...gax.CallOption) (*longrunningpb.Operation, error) +} + +// AutokeyClient is a client for interacting with Cloud Key Management Service (KMS) API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// Provides interfaces for using Cloud KMS +// Autokey (at https://cloud.google.com/kms/help/autokey) to provision new +// CryptoKeys, ready for Customer Managed +// Encryption Key (CMEK) use, on-demand. To support certain client tooling, this +// feature is modeled around a KeyHandle +// resource: creating a KeyHandle in a resource +// project and given location triggers Cloud KMS Autokey to provision a +// CryptoKey in the configured key project and +// the same location. +// +// Prior to use in a given resource project, +// UpdateAutokeyConfig +// should have been called on an ancestor folder, setting the key project where +// Cloud KMS Autokey should create new +// CryptoKeys. See documentation for additional +// prerequisites. To check what key project, if any, is currently configured on +// a resource project’s ancestor folder, see +// ShowEffectiveAutokeyConfig. +type AutokeyClient struct { + // The internal transport-dependent client. + internalClient internalAutokeyClient + + // The call options for this service. + CallOptions *AutokeyCallOptions + + // LROClient is used internally to handle long-running operations. + // It is exposed so that its CallOptions can be modified if required. + // Users should not Close this client. + LROClient *lroauto.OperationsClient +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *AutokeyClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *AutokeyClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *AutokeyClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// CreateKeyHandle creates a new KeyHandle, triggering the +// provisioning of a new CryptoKey for CMEK +// use with the given resource type in the configured key project and the same +// location. GetOperation should be used to resolve +// the resulting long-running operation and get the resulting +// KeyHandle and +// CryptoKey. +func (c *AutokeyClient) CreateKeyHandle(ctx context.Context, req *kmspb.CreateKeyHandleRequest, opts ...gax.CallOption) (*CreateKeyHandleOperation, error) { + return c.internalClient.CreateKeyHandle(ctx, req, opts...) +} + +// CreateKeyHandleOperation returns a new CreateKeyHandleOperation from a given name. +// The name must be that of a previously created CreateKeyHandleOperation, possibly from a different process. +func (c *AutokeyClient) CreateKeyHandleOperation(name string) *CreateKeyHandleOperation { + return c.internalClient.CreateKeyHandleOperation(name) +} + +// GetKeyHandle returns the KeyHandle. +func (c *AutokeyClient) GetKeyHandle(ctx context.Context, req *kmspb.GetKeyHandleRequest, opts ...gax.CallOption) (*kmspb.KeyHandle, error) { + return c.internalClient.GetKeyHandle(ctx, req, opts...) +} + +// ListKeyHandles lists KeyHandles. +func (c *AutokeyClient) ListKeyHandles(ctx context.Context, req *kmspb.ListKeyHandlesRequest, opts ...gax.CallOption) *KeyHandleIterator { + return c.internalClient.ListKeyHandles(ctx, req, opts...) +} + +// GetLocation gets information about a location. +func (c *AutokeyClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) { + return c.internalClient.GetLocation(ctx, req, opts...) +} + +// ListLocations lists information about the supported locations for this service. +func (c *AutokeyClient) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator { + return c.internalClient.ListLocations(ctx, req, opts...) +} + +// GetIamPolicy gets the access control policy for a resource. Returns an empty policy +// if the resource exists and does not have a policy set. +func (c *AutokeyClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + return c.internalClient.GetIamPolicy(ctx, req, opts...) +} + +// SetIamPolicy sets the access control policy on the specified resource. Replaces +// any existing policy. +// +// Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED +// errors. +func (c *AutokeyClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + return c.internalClient.SetIamPolicy(ctx, req, opts...) +} + +// TestIamPermissions returns permissions that a caller has on the specified resource. If the +// resource does not exist, this will return an empty set of +// permissions, not a NOT_FOUND error. +// +// Note: This operation is designed to be used for building +// permission-aware UIs and command-line tools, not for authorization +// checking. This operation may “fail open” without warning. +func (c *AutokeyClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + return c.internalClient.TestIamPermissions(ctx, req, opts...) +} + +// GetOperation is a utility method from google.longrunning.Operations. +func (c *AutokeyClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + return c.internalClient.GetOperation(ctx, req, opts...) +} + +// autokeyGRPCClient is a client for interacting with Cloud Key Management Service (KMS) API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type autokeyGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // Points back to the CallOptions field of the containing AutokeyClient + CallOptions **AutokeyCallOptions + + // The gRPC API client. + autokeyClient kmspb.AutokeyClient + + // LROClient is used internally to handle long-running operations. + // It is exposed so that its CallOptions can be modified if required. + // Users should not Close this client. + LROClient **lroauto.OperationsClient + + operationsClient longrunningpb.OperationsClient + + iamPolicyClient iampb.IAMPolicyClient + + locationsClient locationpb.LocationsClient + + // The x-goog-* metadata to be sent with each request. + xGoogHeaders []string +} + +// NewAutokeyClient creates a new autokey client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// Provides interfaces for using Cloud KMS +// Autokey (at https://cloud.google.com/kms/help/autokey) to provision new +// CryptoKeys, ready for Customer Managed +// Encryption Key (CMEK) use, on-demand. To support certain client tooling, this +// feature is modeled around a KeyHandle +// resource: creating a KeyHandle in a resource +// project and given location triggers Cloud KMS Autokey to provision a +// CryptoKey in the configured key project and +// the same location. +// +// Prior to use in a given resource project, +// UpdateAutokeyConfig +// should have been called on an ancestor folder, setting the key project where +// Cloud KMS Autokey should create new +// CryptoKeys. See documentation for additional +// prerequisites. To check what key project, if any, is currently configured on +// a resource project’s ancestor folder, see +// ShowEffectiveAutokeyConfig. +func NewAutokeyClient(ctx context.Context, opts ...option.ClientOption) (*AutokeyClient, error) { + clientOpts := defaultAutokeyGRPCClientOptions() + if newAutokeyClientHook != nil { + hookOpts, err := newAutokeyClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := AutokeyClient{CallOptions: defaultAutokeyCallOptions()} + + c := &autokeyGRPCClient{ + connPool: connPool, + autokeyClient: kmspb.NewAutokeyClient(connPool), + CallOptions: &client.CallOptions, + operationsClient: longrunningpb.NewOperationsClient(connPool), + iamPolicyClient: iampb.NewIAMPolicyClient(connPool), + locationsClient: locationpb.NewLocationsClient(connPool), + } + c.setGoogleClientInfo() + + client.internalClient = c + + client.LROClient, err = lroauto.NewOperationsClient(ctx, gtransport.WithConnPool(connPool)) + if err != nil { + // This error "should not happen", since we are just reusing old connection pool + // and never actually need to dial. + // If this does happen, we could leak connp. However, we cannot close conn: + // If the user invoked the constructor with option.WithGRPCConn, + // we would close a connection that's still in use. + // TODO: investigate error conditions. + return nil, err + } + c.LROClient = &client.LROClient + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *autokeyGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *autokeyGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *autokeyGRPCClient) Close() error { + return c.connPool.Close() +} + +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type autokeyRESTClient struct { + // The http endpoint to connect to. + endpoint string + + // The http client. + httpClient *http.Client + + // LROClient is used internally to handle long-running operations. + // It is exposed so that its CallOptions can be modified if required. + // Users should not Close this client. + LROClient **lroauto.OperationsClient + + // The x-goog-* headers to be sent with each request. + xGoogHeaders []string + + // Points back to the CallOptions field of the containing AutokeyClient + CallOptions **AutokeyCallOptions +} + +// NewAutokeyRESTClient creates a new autokey rest client. +// +// Provides interfaces for using Cloud KMS +// Autokey (at https://cloud.google.com/kms/help/autokey) to provision new +// CryptoKeys, ready for Customer Managed +// Encryption Key (CMEK) use, on-demand. To support certain client tooling, this +// feature is modeled around a KeyHandle +// resource: creating a KeyHandle in a resource +// project and given location triggers Cloud KMS Autokey to provision a +// CryptoKey in the configured key project and +// the same location. +// +// Prior to use in a given resource project, +// UpdateAutokeyConfig +// should have been called on an ancestor folder, setting the key project where +// Cloud KMS Autokey should create new +// CryptoKeys. See documentation for additional +// prerequisites. To check what key project, if any, is currently configured on +// a resource project’s ancestor folder, see +// ShowEffectiveAutokeyConfig. +func NewAutokeyRESTClient(ctx context.Context, opts ...option.ClientOption) (*AutokeyClient, error) { + clientOpts := append(defaultAutokeyRESTClientOptions(), opts...) + httpClient, endpoint, err := httptransport.NewClient(ctx, clientOpts...) + if err != nil { + return nil, err + } + + callOpts := defaultAutokeyRESTCallOptions() + c := &autokeyRESTClient{ + endpoint: endpoint, + httpClient: httpClient, + CallOptions: &callOpts, + } + c.setGoogleClientInfo() + + lroOpts := []option.ClientOption{ + option.WithHTTPClient(httpClient), + option.WithEndpoint(endpoint), + } + opClient, err := lroauto.NewOperationsRESTClient(ctx, lroOpts...) + if err != nil { + return nil, err + } + c.LROClient = &opClient + + return &AutokeyClient{internalClient: c, CallOptions: callOpts}, nil +} + +func defaultAutokeyRESTClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("https://cloudkms.googleapis.com"), + internaloption.WithDefaultEndpointTemplate("https://cloudkms.UNIVERSE_DOMAIN"), + internaloption.WithDefaultMTLSEndpoint("https://cloudkms.mtls.googleapis.com"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://cloudkms.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), + } +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *autokeyRESTClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *autokeyRESTClient) Close() error { + // Replace httpClient with nil to force cleanup. + c.httpClient = nil + return nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: This method always returns nil. +func (c *autokeyRESTClient) Connection() *grpc.ClientConn { + return nil +} +func (c *autokeyGRPCClient) CreateKeyHandle(ctx context.Context, req *kmspb.CreateKeyHandleRequest, opts ...gax.CallOption) (*CreateKeyHandleOperation, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateKeyHandle[0:len((*c.CallOptions).CreateKeyHandle):len((*c.CallOptions).CreateKeyHandle)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.autokeyClient.CreateKeyHandle(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return &CreateKeyHandleOperation{ + lro: longrunning.InternalNewOperation(*c.LROClient, resp), + }, nil +} + +func (c *autokeyGRPCClient) GetKeyHandle(ctx context.Context, req *kmspb.GetKeyHandleRequest, opts ...gax.CallOption) (*kmspb.KeyHandle, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetKeyHandle[0:len((*c.CallOptions).GetKeyHandle):len((*c.CallOptions).GetKeyHandle)], opts...) + var resp *kmspb.KeyHandle + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.autokeyClient.GetKeyHandle(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *autokeyGRPCClient) ListKeyHandles(ctx context.Context, req *kmspb.ListKeyHandlesRequest, opts ...gax.CallOption) *KeyHandleIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListKeyHandles[0:len((*c.CallOptions).ListKeyHandles):len((*c.CallOptions).ListKeyHandles)], opts...) + it := &KeyHandleIterator{} + req = proto.Clone(req).(*kmspb.ListKeyHandlesRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*kmspb.KeyHandle, string, error) { + resp := &kmspb.ListKeyHandlesResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.autokeyClient.ListKeyHandles(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetKeyHandles(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *autokeyGRPCClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetLocation[0:len((*c.CallOptions).GetLocation):len((*c.CallOptions).GetLocation)], opts...) + var resp *locationpb.Location + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.locationsClient.GetLocation(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *autokeyGRPCClient) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListLocations[0:len((*c.CallOptions).ListLocations):len((*c.CallOptions).ListLocations)], opts...) + it := &LocationIterator{} + req = proto.Clone(req).(*locationpb.ListLocationsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*locationpb.Location, string, error) { + resp := &locationpb.ListLocationsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.locationsClient.ListLocations(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetLocations(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *autokeyGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.GetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *autokeyGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.SetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *autokeyGRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...) + var resp *iampb.TestIamPermissionsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.TestIamPermissions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *autokeyGRPCClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.operationsClient.GetOperation(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateKeyHandle creates a new KeyHandle, triggering the +// provisioning of a new CryptoKey for CMEK +// use with the given resource type in the configured key project and the same +// location. GetOperation should be used to resolve +// the resulting long-running operation and get the resulting +// KeyHandle and +// CryptoKey. +func (c *autokeyRESTClient) CreateKeyHandle(ctx context.Context, req *kmspb.CreateKeyHandleRequest, opts ...gax.CallOption) (*CreateKeyHandleOperation, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + body := req.GetKeyHandle() + jsonReq, err := m.Marshal(body) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v/keyHandles", req.GetParent()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + if req.GetKeyHandleId() != "" { + params.Add("keyHandleId", fmt.Sprintf("%v", req.GetKeyHandleId())) + } + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &longrunningpb.Operation{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + + override := fmt.Sprintf("/v1/%s", resp.GetName()) + return &CreateKeyHandleOperation{ + lro: longrunning.InternalNewOperation(*c.LROClient, resp), + pollPath: override, + }, nil +} + +// GetKeyHandle returns the KeyHandle. +func (c *autokeyRESTClient) GetKeyHandle(ctx context.Context, req *kmspb.GetKeyHandleRequest, opts ...gax.CallOption) (*kmspb.KeyHandle, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).GetKeyHandle[0:len((*c.CallOptions).GetKeyHandle):len((*c.CallOptions).GetKeyHandle)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.KeyHandle{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// ListKeyHandles lists KeyHandles. +func (c *autokeyRESTClient) ListKeyHandles(ctx context.Context, req *kmspb.ListKeyHandlesRequest, opts ...gax.CallOption) *KeyHandleIterator { + it := &KeyHandleIterator{} + req = proto.Clone(req).(*kmspb.ListKeyHandlesRequest) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + it.InternalFetch = func(pageSize int, pageToken string) ([]*kmspb.KeyHandle, string, error) { + resp := &kmspb.ListKeyHandlesResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, "", err + } + baseUrl.Path += fmt.Sprintf("/v1/%v/keyHandles", req.GetParent()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + if req.GetFilter() != "" { + params.Add("filter", fmt.Sprintf("%v", req.GetFilter())) + } + if req.GetPageSize() != 0 { + params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize())) + } + if req.GetPageToken() != "" { + params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken())) + } + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := append(c.xGoogHeaders, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, "", e + } + it.Response = resp + return resp.GetKeyHandles(), resp.GetNextPageToken(), nil + } + + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +// GetLocation gets information about a location. +func (c *autokeyRESTClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).GetLocation[0:len((*c.CallOptions).GetLocation):len((*c.CallOptions).GetLocation)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &locationpb.Location{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// ListLocations lists information about the supported locations for this service. +func (c *autokeyRESTClient) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator { + it := &LocationIterator{} + req = proto.Clone(req).(*locationpb.ListLocationsRequest) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + it.InternalFetch = func(pageSize int, pageToken string) ([]*locationpb.Location, string, error) { + resp := &locationpb.ListLocationsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, "", err + } + baseUrl.Path += fmt.Sprintf("/v1/%v/locations", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + if req.GetFilter() != "" { + params.Add("filter", fmt.Sprintf("%v", req.GetFilter())) + } + if req.GetPageSize() != 0 { + params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize())) + } + if req.GetPageToken() != "" { + params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken())) + } + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := append(c.xGoogHeaders, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, "", e + } + it.Response = resp + return resp.GetLocations(), resp.GetNextPageToken(), nil + } + + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +// GetIamPolicy gets the access control policy for a resource. Returns an empty policy +// if the resource exists and does not have a policy set. +func (c *autokeyRESTClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v:getIamPolicy", req.GetResource()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + if req.GetOptions().GetRequestedPolicyVersion() != 0 { + params.Add("options.requestedPolicyVersion", fmt.Sprintf("%v", req.GetOptions().GetRequestedPolicyVersion())) + } + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &iampb.Policy{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// SetIamPolicy sets the access control policy on the specified resource. Replaces +// any existing policy. +// +// Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED +// errors. +func (c *autokeyRESTClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + jsonReq, err := m.Marshal(req) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v:setIamPolicy", req.GetResource()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &iampb.Policy{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// TestIamPermissions returns permissions that a caller has on the specified resource. If the +// resource does not exist, this will return an empty set of +// permissions, not a NOT_FOUND error. +// +// Note: This operation is designed to be used for building +// permission-aware UIs and command-line tools, not for authorization +// checking. This operation may “fail open” without warning. +func (c *autokeyRESTClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + jsonReq, err := m.Marshal(req) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v:testIamPermissions", req.GetResource()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &iampb.TestIamPermissionsResponse{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// GetOperation is a utility method from google.longrunning.Operations. +func (c *autokeyRESTClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &longrunningpb.Operation{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// CreateKeyHandleOperation returns a new CreateKeyHandleOperation from a given name. +// The name must be that of a previously created CreateKeyHandleOperation, possibly from a different process. +func (c *autokeyGRPCClient) CreateKeyHandleOperation(name string) *CreateKeyHandleOperation { + return &CreateKeyHandleOperation{ + lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}), + } +} + +// CreateKeyHandleOperation returns a new CreateKeyHandleOperation from a given name. +// The name must be that of a previously created CreateKeyHandleOperation, possibly from a different process. +func (c *autokeyRESTClient) CreateKeyHandleOperation(name string) *CreateKeyHandleOperation { + override := fmt.Sprintf("/v1/%s", name) + return &CreateKeyHandleOperation{ + lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}), + pollPath: override, + } +} diff --git a/vendor/cloud.google.com/go/kms/apiv1/auxiliary.go b/vendor/cloud.google.com/go/kms/apiv1/auxiliary.go new file mode 100644 index 000000000..fa594ed81 --- /dev/null +++ b/vendor/cloud.google.com/go/kms/apiv1/auxiliary.go @@ -0,0 +1,421 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package kms + +import ( + "context" + "time" + + kmspb "cloud.google.com/go/kms/apiv1/kmspb" + "cloud.google.com/go/longrunning" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + locationpb "google.golang.org/genproto/googleapis/cloud/location" +) + +// CreateKeyHandleOperation manages a long-running operation from CreateKeyHandle. +type CreateKeyHandleOperation struct { + lro *longrunning.Operation + pollPath string +} + +// Wait blocks until the long-running operation is completed, returning the response and any errors encountered. +// +// See documentation of Poll for error-handling information. +func (op *CreateKeyHandleOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*kmspb.KeyHandle, error) { + opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...) + var resp kmspb.KeyHandle + if err := op.lro.WaitWithInterval(ctx, &resp, time.Minute, opts...); err != nil { + return nil, err + } + return &resp, nil +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true, and the response of the operation is returned. +// If Poll succeeds and the operation has not completed, the returned response and error are both nil. +func (op *CreateKeyHandleOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*kmspb.KeyHandle, error) { + opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...) + var resp kmspb.KeyHandle + if err := op.lro.Poll(ctx, &resp, opts...); err != nil { + return nil, err + } + if !op.Done() { + return nil, nil + } + return &resp, nil +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *CreateKeyHandleOperation) Metadata() (*kmspb.CreateKeyHandleMetadata, error) { + var meta kmspb.CreateKeyHandleMetadata + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *CreateKeyHandleOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *CreateKeyHandleOperation) Name() string { + return op.lro.Name() +} + +// CryptoKeyIterator manages a stream of *kmspb.CryptoKey. +type CryptoKeyIterator struct { + items []*kmspb.CryptoKey + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*kmspb.CryptoKey, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *CryptoKeyIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *CryptoKeyIterator) Next() (*kmspb.CryptoKey, error) { + var item *kmspb.CryptoKey + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *CryptoKeyIterator) bufLen() int { + return len(it.items) +} + +func (it *CryptoKeyIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// CryptoKeyVersionIterator manages a stream of *kmspb.CryptoKeyVersion. +type CryptoKeyVersionIterator struct { + items []*kmspb.CryptoKeyVersion + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*kmspb.CryptoKeyVersion, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *CryptoKeyVersionIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *CryptoKeyVersionIterator) Next() (*kmspb.CryptoKeyVersion, error) { + var item *kmspb.CryptoKeyVersion + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *CryptoKeyVersionIterator) bufLen() int { + return len(it.items) +} + +func (it *CryptoKeyVersionIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// EkmConnectionIterator manages a stream of *kmspb.EkmConnection. +type EkmConnectionIterator struct { + items []*kmspb.EkmConnection + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*kmspb.EkmConnection, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *EkmConnectionIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *EkmConnectionIterator) Next() (*kmspb.EkmConnection, error) { + var item *kmspb.EkmConnection + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *EkmConnectionIterator) bufLen() int { + return len(it.items) +} + +func (it *EkmConnectionIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// ImportJobIterator manages a stream of *kmspb.ImportJob. +type ImportJobIterator struct { + items []*kmspb.ImportJob + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*kmspb.ImportJob, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ImportJobIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *ImportJobIterator) Next() (*kmspb.ImportJob, error) { + var item *kmspb.ImportJob + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ImportJobIterator) bufLen() int { + return len(it.items) +} + +func (it *ImportJobIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// KeyHandleIterator manages a stream of *kmspb.KeyHandle. +type KeyHandleIterator struct { + items []*kmspb.KeyHandle + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*kmspb.KeyHandle, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *KeyHandleIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *KeyHandleIterator) Next() (*kmspb.KeyHandle, error) { + var item *kmspb.KeyHandle + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *KeyHandleIterator) bufLen() int { + return len(it.items) +} + +func (it *KeyHandleIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// KeyRingIterator manages a stream of *kmspb.KeyRing. +type KeyRingIterator struct { + items []*kmspb.KeyRing + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*kmspb.KeyRing, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *KeyRingIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *KeyRingIterator) Next() (*kmspb.KeyRing, error) { + var item *kmspb.KeyRing + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *KeyRingIterator) bufLen() int { + return len(it.items) +} + +func (it *KeyRingIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// LocationIterator manages a stream of *locationpb.Location. +type LocationIterator struct { + items []*locationpb.Location + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*locationpb.Location, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *LocationIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *LocationIterator) Next() (*locationpb.Location, error) { + var item *locationpb.Location + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *LocationIterator) bufLen() int { + return len(it.items) +} + +func (it *LocationIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/kms/apiv1/auxiliary_go123.go b/vendor/cloud.google.com/go/kms/apiv1/auxiliary_go123.go new file mode 100644 index 000000000..7a8043ee9 --- /dev/null +++ b/vendor/cloud.google.com/go/kms/apiv1/auxiliary_go123.go @@ -0,0 +1,69 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +//go:build go1.23 + +package kms + +import ( + "iter" + + kmspb "cloud.google.com/go/kms/apiv1/kmspb" + "github.com/googleapis/gax-go/v2/iterator" + locationpb "google.golang.org/genproto/googleapis/cloud/location" +) + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *CryptoKeyIterator) All() iter.Seq2[*kmspb.CryptoKey, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *CryptoKeyVersionIterator) All() iter.Seq2[*kmspb.CryptoKeyVersion, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *EkmConnectionIterator) All() iter.Seq2[*kmspb.EkmConnection, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *ImportJobIterator) All() iter.Seq2[*kmspb.ImportJob, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *KeyHandleIterator) All() iter.Seq2[*kmspb.KeyHandle, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *KeyRingIterator) All() iter.Seq2[*kmspb.KeyRing, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *LocationIterator) All() iter.Seq2[*locationpb.Location, error] { + return iterator.RangeAdapter(it.Next) +} diff --git a/vendor/cloud.google.com/go/kms/apiv1/doc.go b/vendor/cloud.google.com/go/kms/apiv1/doc.go new file mode 100644 index 000000000..432780fa5 --- /dev/null +++ b/vendor/cloud.google.com/go/kms/apiv1/doc.go @@ -0,0 +1,128 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +// Package kms is an auto-generated package for the +// Cloud Key Management Service (KMS) API. +// +// Manages keys and performs cryptographic operations in a central cloud +// service, for direct use by other cloud resources and applications. +// +// # General documentation +// +// For information that is relevant for all client libraries please reference +// https://pkg.go.dev/cloud.google.com/go#pkg-overview. Some information on this +// page includes: +// +// - [Authentication and Authorization] +// - [Timeouts and Cancellation] +// - [Testing against Client Libraries] +// - [Debugging Client Libraries] +// - [Inspecting errors] +// +// # Example usage +// +// To get started with this package, create a client. +// +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := kms.NewAutokeyClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() +// +// The client will use your default application credentials. Clients should be reused instead of created as needed. +// The methods of Client are safe for concurrent use by multiple goroutines. +// The returned client must be Closed when it is done being used. +// +// # Using the Client +// +// The following is an example of making an API call with the newly created client. +// +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := kms.NewAutokeyClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() +// +// req := &kmspb.CreateKeyHandleRequest{ +// // TODO: Fill request struct fields. +// // See https://pkg.go.dev/cloud.google.com/go/kms/apiv1/kmspb#CreateKeyHandleRequest. +// } +// op, err := c.CreateKeyHandle(ctx, req) +// if err != nil { +// // TODO: Handle error. +// } +// +// resp, err := op.Wait(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// // TODO: Use resp. +// _ = resp +// +// # Use of Context +// +// The ctx passed to NewAutokeyClient is used for authentication requests and +// for creating the underlying connection, but is not used for subsequent calls. +// Individual methods on the client use the ctx given to them. +// +// To close the open connection, use the Close() method. +// +// [Authentication and Authorization]: https://pkg.go.dev/cloud.google.com/go#hdr-Authentication_and_Authorization +// [Timeouts and Cancellation]: https://pkg.go.dev/cloud.google.com/go#hdr-Timeouts_and_Cancellation +// [Testing against Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Testing +// [Debugging Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Debugging +// [Inspecting errors]: https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors +package kms // import "cloud.google.com/go/kms/apiv1" + +import ( + "context" + + "google.golang.org/api/option" +) + +// For more information on implementing a client constructor hook, see +// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors. +type clientHookParams struct{} +type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error) + +var versionClient string + +func getVersionClient() string { + if versionClient == "" { + return "UNKNOWN" + } + return versionClient +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloudkms", + } +} diff --git a/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go b/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go new file mode 100644 index 000000000..411a37084 --- /dev/null +++ b/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go @@ -0,0 +1,1723 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package kms + +import ( + "bytes" + "context" + "fmt" + "io" + "math" + "net/http" + "net/url" + "time" + + iampb "cloud.google.com/go/iam/apiv1/iampb" + kmspb "cloud.google.com/go/kms/apiv1/kmspb" + longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/googleapi" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + httptransport "google.golang.org/api/transport/http" + locationpb "google.golang.org/genproto/googleapis/cloud/location" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" +) + +var newEkmClientHook clientHook + +// EkmCallOptions contains the retry settings for each method of EkmClient. +type EkmCallOptions struct { + ListEkmConnections []gax.CallOption + GetEkmConnection []gax.CallOption + CreateEkmConnection []gax.CallOption + UpdateEkmConnection []gax.CallOption + GetEkmConfig []gax.CallOption + UpdateEkmConfig []gax.CallOption + VerifyConnectivity []gax.CallOption + GetLocation []gax.CallOption + ListLocations []gax.CallOption + GetIamPolicy []gax.CallOption + SetIamPolicy []gax.CallOption + TestIamPermissions []gax.CallOption + GetOperation []gax.CallOption +} + +func defaultEkmGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("cloudkms.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("cloudkms.UNIVERSE_DOMAIN:443"), + internaloption.WithDefaultMTLSEndpoint("cloudkms.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://cloudkms.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultEkmCallOptions() *EkmCallOptions { + return &EkmCallOptions{ + ListEkmConnections: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetEkmConnection: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateEkmConnection: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + UpdateEkmConnection: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetEkmConfig: []gax.CallOption{}, + UpdateEkmConfig: []gax.CallOption{}, + VerifyConnectivity: []gax.CallOption{}, + GetLocation: []gax.CallOption{}, + ListLocations: []gax.CallOption{}, + GetIamPolicy: []gax.CallOption{}, + SetIamPolicy: []gax.CallOption{}, + TestIamPermissions: []gax.CallOption{}, + GetOperation: []gax.CallOption{}, + } +} + +func defaultEkmRESTCallOptions() *EkmCallOptions { + return &EkmCallOptions{ + ListEkmConnections: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + GetEkmConnection: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + CreateEkmConnection: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + UpdateEkmConnection: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + GetEkmConfig: []gax.CallOption{}, + UpdateEkmConfig: []gax.CallOption{}, + VerifyConnectivity: []gax.CallOption{}, + GetLocation: []gax.CallOption{}, + ListLocations: []gax.CallOption{}, + GetIamPolicy: []gax.CallOption{}, + SetIamPolicy: []gax.CallOption{}, + TestIamPermissions: []gax.CallOption{}, + GetOperation: []gax.CallOption{}, + } +} + +// internalEkmClient is an interface that defines the methods available from Cloud Key Management Service (KMS) API. +type internalEkmClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + ListEkmConnections(context.Context, *kmspb.ListEkmConnectionsRequest, ...gax.CallOption) *EkmConnectionIterator + GetEkmConnection(context.Context, *kmspb.GetEkmConnectionRequest, ...gax.CallOption) (*kmspb.EkmConnection, error) + CreateEkmConnection(context.Context, *kmspb.CreateEkmConnectionRequest, ...gax.CallOption) (*kmspb.EkmConnection, error) + UpdateEkmConnection(context.Context, *kmspb.UpdateEkmConnectionRequest, ...gax.CallOption) (*kmspb.EkmConnection, error) + GetEkmConfig(context.Context, *kmspb.GetEkmConfigRequest, ...gax.CallOption) (*kmspb.EkmConfig, error) + UpdateEkmConfig(context.Context, *kmspb.UpdateEkmConfigRequest, ...gax.CallOption) (*kmspb.EkmConfig, error) + VerifyConnectivity(context.Context, *kmspb.VerifyConnectivityRequest, ...gax.CallOption) (*kmspb.VerifyConnectivityResponse, error) + GetLocation(context.Context, *locationpb.GetLocationRequest, ...gax.CallOption) (*locationpb.Location, error) + ListLocations(context.Context, *locationpb.ListLocationsRequest, ...gax.CallOption) *LocationIterator + GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) + SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) + TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) + GetOperation(context.Context, *longrunningpb.GetOperationRequest, ...gax.CallOption) (*longrunningpb.Operation, error) +} + +// EkmClient is a client for interacting with Cloud Key Management Service (KMS) API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// # Google Cloud Key Management EKM Service +// +// Manages external cryptographic keys and operations using those keys. +// Implements a REST model with the following objects: +// +// EkmConnection +type EkmClient struct { + // The internal transport-dependent client. + internalClient internalEkmClient + + // The call options for this service. + CallOptions *EkmCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *EkmClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *EkmClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *EkmClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// ListEkmConnections lists EkmConnections. +func (c *EkmClient) ListEkmConnections(ctx context.Context, req *kmspb.ListEkmConnectionsRequest, opts ...gax.CallOption) *EkmConnectionIterator { + return c.internalClient.ListEkmConnections(ctx, req, opts...) +} + +// GetEkmConnection returns metadata for a given +// EkmConnection. +func (c *EkmClient) GetEkmConnection(ctx context.Context, req *kmspb.GetEkmConnectionRequest, opts ...gax.CallOption) (*kmspb.EkmConnection, error) { + return c.internalClient.GetEkmConnection(ctx, req, opts...) +} + +// CreateEkmConnection creates a new EkmConnection in a given +// Project and Location. +func (c *EkmClient) CreateEkmConnection(ctx context.Context, req *kmspb.CreateEkmConnectionRequest, opts ...gax.CallOption) (*kmspb.EkmConnection, error) { + return c.internalClient.CreateEkmConnection(ctx, req, opts...) +} + +// UpdateEkmConnection updates an EkmConnection's metadata. +func (c *EkmClient) UpdateEkmConnection(ctx context.Context, req *kmspb.UpdateEkmConnectionRequest, opts ...gax.CallOption) (*kmspb.EkmConnection, error) { + return c.internalClient.UpdateEkmConnection(ctx, req, opts...) +} + +// GetEkmConfig returns the EkmConfig singleton resource +// for a given project and location. +func (c *EkmClient) GetEkmConfig(ctx context.Context, req *kmspb.GetEkmConfigRequest, opts ...gax.CallOption) (*kmspb.EkmConfig, error) { + return c.internalClient.GetEkmConfig(ctx, req, opts...) +} + +// UpdateEkmConfig updates the EkmConfig singleton resource +// for a given project and location. +func (c *EkmClient) UpdateEkmConfig(ctx context.Context, req *kmspb.UpdateEkmConfigRequest, opts ...gax.CallOption) (*kmspb.EkmConfig, error) { + return c.internalClient.UpdateEkmConfig(ctx, req, opts...) +} + +// VerifyConnectivity verifies that Cloud KMS can successfully connect to the external key +// manager specified by an EkmConnection. +// If there is an error connecting to the EKM, this method returns a +// FAILED_PRECONDITION status containing structured information as described +// at https://cloud.google.com/kms/docs/reference/ekm_errors (at https://cloud.google.com/kms/docs/reference/ekm_errors). +func (c *EkmClient) VerifyConnectivity(ctx context.Context, req *kmspb.VerifyConnectivityRequest, opts ...gax.CallOption) (*kmspb.VerifyConnectivityResponse, error) { + return c.internalClient.VerifyConnectivity(ctx, req, opts...) +} + +// GetLocation gets information about a location. +func (c *EkmClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) { + return c.internalClient.GetLocation(ctx, req, opts...) +} + +// ListLocations lists information about the supported locations for this service. +func (c *EkmClient) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator { + return c.internalClient.ListLocations(ctx, req, opts...) +} + +// GetIamPolicy gets the access control policy for a resource. Returns an empty policy +// if the resource exists and does not have a policy set. +func (c *EkmClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + return c.internalClient.GetIamPolicy(ctx, req, opts...) +} + +// SetIamPolicy sets the access control policy on the specified resource. Replaces +// any existing policy. +// +// Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED +// errors. +func (c *EkmClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + return c.internalClient.SetIamPolicy(ctx, req, opts...) +} + +// TestIamPermissions returns permissions that a caller has on the specified resource. If the +// resource does not exist, this will return an empty set of +// permissions, not a NOT_FOUND error. +// +// Note: This operation is designed to be used for building +// permission-aware UIs and command-line tools, not for authorization +// checking. This operation may “fail open” without warning. +func (c *EkmClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + return c.internalClient.TestIamPermissions(ctx, req, opts...) +} + +// GetOperation is a utility method from google.longrunning.Operations. +func (c *EkmClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + return c.internalClient.GetOperation(ctx, req, opts...) +} + +// ekmGRPCClient is a client for interacting with Cloud Key Management Service (KMS) API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type ekmGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // Points back to the CallOptions field of the containing EkmClient + CallOptions **EkmCallOptions + + // The gRPC API client. + ekmClient kmspb.EkmServiceClient + + operationsClient longrunningpb.OperationsClient + + iamPolicyClient iampb.IAMPolicyClient + + locationsClient locationpb.LocationsClient + + // The x-goog-* metadata to be sent with each request. + xGoogHeaders []string +} + +// NewEkmClient creates a new ekm service client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// # Google Cloud Key Management EKM Service +// +// Manages external cryptographic keys and operations using those keys. +// Implements a REST model with the following objects: +// +// EkmConnection +func NewEkmClient(ctx context.Context, opts ...option.ClientOption) (*EkmClient, error) { + clientOpts := defaultEkmGRPCClientOptions() + if newEkmClientHook != nil { + hookOpts, err := newEkmClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := EkmClient{CallOptions: defaultEkmCallOptions()} + + c := &ekmGRPCClient{ + connPool: connPool, + ekmClient: kmspb.NewEkmServiceClient(connPool), + CallOptions: &client.CallOptions, + operationsClient: longrunningpb.NewOperationsClient(connPool), + iamPolicyClient: iampb.NewIAMPolicyClient(connPool), + locationsClient: locationpb.NewLocationsClient(connPool), + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *ekmGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *ekmGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *ekmGRPCClient) Close() error { + return c.connPool.Close() +} + +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type ekmRESTClient struct { + // The http endpoint to connect to. + endpoint string + + // The http client. + httpClient *http.Client + + // The x-goog-* headers to be sent with each request. + xGoogHeaders []string + + // Points back to the CallOptions field of the containing EkmClient + CallOptions **EkmCallOptions +} + +// NewEkmRESTClient creates a new ekm service rest client. +// +// # Google Cloud Key Management EKM Service +// +// Manages external cryptographic keys and operations using those keys. +// Implements a REST model with the following objects: +// +// EkmConnection +func NewEkmRESTClient(ctx context.Context, opts ...option.ClientOption) (*EkmClient, error) { + clientOpts := append(defaultEkmRESTClientOptions(), opts...) + httpClient, endpoint, err := httptransport.NewClient(ctx, clientOpts...) + if err != nil { + return nil, err + } + + callOpts := defaultEkmRESTCallOptions() + c := &ekmRESTClient{ + endpoint: endpoint, + httpClient: httpClient, + CallOptions: &callOpts, + } + c.setGoogleClientInfo() + + return &EkmClient{internalClient: c, CallOptions: callOpts}, nil +} + +func defaultEkmRESTClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("https://cloudkms.googleapis.com"), + internaloption.WithDefaultEndpointTemplate("https://cloudkms.UNIVERSE_DOMAIN"), + internaloption.WithDefaultMTLSEndpoint("https://cloudkms.mtls.googleapis.com"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://cloudkms.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), + } +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *ekmRESTClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *ekmRESTClient) Close() error { + // Replace httpClient with nil to force cleanup. + c.httpClient = nil + return nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: This method always returns nil. +func (c *ekmRESTClient) Connection() *grpc.ClientConn { + return nil +} +func (c *ekmGRPCClient) ListEkmConnections(ctx context.Context, req *kmspb.ListEkmConnectionsRequest, opts ...gax.CallOption) *EkmConnectionIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListEkmConnections[0:len((*c.CallOptions).ListEkmConnections):len((*c.CallOptions).ListEkmConnections)], opts...) + it := &EkmConnectionIterator{} + req = proto.Clone(req).(*kmspb.ListEkmConnectionsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*kmspb.EkmConnection, string, error) { + resp := &kmspb.ListEkmConnectionsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.ekmClient.ListEkmConnections(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetEkmConnections(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *ekmGRPCClient) GetEkmConnection(ctx context.Context, req *kmspb.GetEkmConnectionRequest, opts ...gax.CallOption) (*kmspb.EkmConnection, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetEkmConnection[0:len((*c.CallOptions).GetEkmConnection):len((*c.CallOptions).GetEkmConnection)], opts...) + var resp *kmspb.EkmConnection + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.ekmClient.GetEkmConnection(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *ekmGRPCClient) CreateEkmConnection(ctx context.Context, req *kmspb.CreateEkmConnectionRequest, opts ...gax.CallOption) (*kmspb.EkmConnection, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateEkmConnection[0:len((*c.CallOptions).CreateEkmConnection):len((*c.CallOptions).CreateEkmConnection)], opts...) + var resp *kmspb.EkmConnection + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.ekmClient.CreateEkmConnection(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *ekmGRPCClient) UpdateEkmConnection(ctx context.Context, req *kmspb.UpdateEkmConnectionRequest, opts ...gax.CallOption) (*kmspb.EkmConnection, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "ekm_connection.name", url.QueryEscape(req.GetEkmConnection().GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).UpdateEkmConnection[0:len((*c.CallOptions).UpdateEkmConnection):len((*c.CallOptions).UpdateEkmConnection)], opts...) + var resp *kmspb.EkmConnection + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.ekmClient.UpdateEkmConnection(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *ekmGRPCClient) GetEkmConfig(ctx context.Context, req *kmspb.GetEkmConfigRequest, opts ...gax.CallOption) (*kmspb.EkmConfig, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetEkmConfig[0:len((*c.CallOptions).GetEkmConfig):len((*c.CallOptions).GetEkmConfig)], opts...) + var resp *kmspb.EkmConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.ekmClient.GetEkmConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *ekmGRPCClient) UpdateEkmConfig(ctx context.Context, req *kmspb.UpdateEkmConfigRequest, opts ...gax.CallOption) (*kmspb.EkmConfig, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "ekm_config.name", url.QueryEscape(req.GetEkmConfig().GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).UpdateEkmConfig[0:len((*c.CallOptions).UpdateEkmConfig):len((*c.CallOptions).UpdateEkmConfig)], opts...) + var resp *kmspb.EkmConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.ekmClient.UpdateEkmConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *ekmGRPCClient) VerifyConnectivity(ctx context.Context, req *kmspb.VerifyConnectivityRequest, opts ...gax.CallOption) (*kmspb.VerifyConnectivityResponse, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).VerifyConnectivity[0:len((*c.CallOptions).VerifyConnectivity):len((*c.CallOptions).VerifyConnectivity)], opts...) + var resp *kmspb.VerifyConnectivityResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.ekmClient.VerifyConnectivity(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *ekmGRPCClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetLocation[0:len((*c.CallOptions).GetLocation):len((*c.CallOptions).GetLocation)], opts...) + var resp *locationpb.Location + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.locationsClient.GetLocation(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *ekmGRPCClient) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListLocations[0:len((*c.CallOptions).ListLocations):len((*c.CallOptions).ListLocations)], opts...) + it := &LocationIterator{} + req = proto.Clone(req).(*locationpb.ListLocationsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*locationpb.Location, string, error) { + resp := &locationpb.ListLocationsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.locationsClient.ListLocations(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetLocations(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *ekmGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.GetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *ekmGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.SetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *ekmGRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...) + var resp *iampb.TestIamPermissionsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.TestIamPermissions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *ekmGRPCClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.operationsClient.GetOperation(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListEkmConnections lists EkmConnections. +func (c *ekmRESTClient) ListEkmConnections(ctx context.Context, req *kmspb.ListEkmConnectionsRequest, opts ...gax.CallOption) *EkmConnectionIterator { + it := &EkmConnectionIterator{} + req = proto.Clone(req).(*kmspb.ListEkmConnectionsRequest) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + it.InternalFetch = func(pageSize int, pageToken string) ([]*kmspb.EkmConnection, string, error) { + resp := &kmspb.ListEkmConnectionsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, "", err + } + baseUrl.Path += fmt.Sprintf("/v1/%v/ekmConnections", req.GetParent()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + if req.GetFilter() != "" { + params.Add("filter", fmt.Sprintf("%v", req.GetFilter())) + } + if req.GetOrderBy() != "" { + params.Add("orderBy", fmt.Sprintf("%v", req.GetOrderBy())) + } + if req.GetPageSize() != 0 { + params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize())) + } + if req.GetPageToken() != "" { + params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken())) + } + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := append(c.xGoogHeaders, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, "", e + } + it.Response = resp + return resp.GetEkmConnections(), resp.GetNextPageToken(), nil + } + + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +// GetEkmConnection returns metadata for a given +// EkmConnection. +func (c *ekmRESTClient) GetEkmConnection(ctx context.Context, req *kmspb.GetEkmConnectionRequest, opts ...gax.CallOption) (*kmspb.EkmConnection, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).GetEkmConnection[0:len((*c.CallOptions).GetEkmConnection):len((*c.CallOptions).GetEkmConnection)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.EkmConnection{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// CreateEkmConnection creates a new EkmConnection in a given +// Project and Location. +func (c *ekmRESTClient) CreateEkmConnection(ctx context.Context, req *kmspb.CreateEkmConnectionRequest, opts ...gax.CallOption) (*kmspb.EkmConnection, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + body := req.GetEkmConnection() + jsonReq, err := m.Marshal(body) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v/ekmConnections", req.GetParent()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + params.Add("ekmConnectionId", fmt.Sprintf("%v", req.GetEkmConnectionId())) + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).CreateEkmConnection[0:len((*c.CallOptions).CreateEkmConnection):len((*c.CallOptions).CreateEkmConnection)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.EkmConnection{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// UpdateEkmConnection updates an EkmConnection's metadata. +func (c *ekmRESTClient) UpdateEkmConnection(ctx context.Context, req *kmspb.UpdateEkmConnectionRequest, opts ...gax.CallOption) (*kmspb.EkmConnection, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + body := req.GetEkmConnection() + jsonReq, err := m.Marshal(body) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetEkmConnection().GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + if req.GetUpdateMask() != nil { + field, err := protojson.Marshal(req.GetUpdateMask()) + if err != nil { + return nil, err + } + params.Add("updateMask", string(field[1:len(field)-1])) + } + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "ekm_connection.name", url.QueryEscape(req.GetEkmConnection().GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).UpdateEkmConnection[0:len((*c.CallOptions).UpdateEkmConnection):len((*c.CallOptions).UpdateEkmConnection)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.EkmConnection{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("PATCH", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// GetEkmConfig returns the EkmConfig singleton resource +// for a given project and location. +func (c *ekmRESTClient) GetEkmConfig(ctx context.Context, req *kmspb.GetEkmConfigRequest, opts ...gax.CallOption) (*kmspb.EkmConfig, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).GetEkmConfig[0:len((*c.CallOptions).GetEkmConfig):len((*c.CallOptions).GetEkmConfig)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.EkmConfig{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// UpdateEkmConfig updates the EkmConfig singleton resource +// for a given project and location. +func (c *ekmRESTClient) UpdateEkmConfig(ctx context.Context, req *kmspb.UpdateEkmConfigRequest, opts ...gax.CallOption) (*kmspb.EkmConfig, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + body := req.GetEkmConfig() + jsonReq, err := m.Marshal(body) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetEkmConfig().GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + if req.GetUpdateMask() != nil { + field, err := protojson.Marshal(req.GetUpdateMask()) + if err != nil { + return nil, err + } + params.Add("updateMask", string(field[1:len(field)-1])) + } + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "ekm_config.name", url.QueryEscape(req.GetEkmConfig().GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).UpdateEkmConfig[0:len((*c.CallOptions).UpdateEkmConfig):len((*c.CallOptions).UpdateEkmConfig)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.EkmConfig{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("PATCH", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// VerifyConnectivity verifies that Cloud KMS can successfully connect to the external key +// manager specified by an EkmConnection. +// If there is an error connecting to the EKM, this method returns a +// FAILED_PRECONDITION status containing structured information as described +// at https://cloud.google.com/kms/docs/reference/ekm_errors (at https://cloud.google.com/kms/docs/reference/ekm_errors). +func (c *ekmRESTClient) VerifyConnectivity(ctx context.Context, req *kmspb.VerifyConnectivityRequest, opts ...gax.CallOption) (*kmspb.VerifyConnectivityResponse, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v:verifyConnectivity", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).VerifyConnectivity[0:len((*c.CallOptions).VerifyConnectivity):len((*c.CallOptions).VerifyConnectivity)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.VerifyConnectivityResponse{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// GetLocation gets information about a location. +func (c *ekmRESTClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).GetLocation[0:len((*c.CallOptions).GetLocation):len((*c.CallOptions).GetLocation)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &locationpb.Location{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// ListLocations lists information about the supported locations for this service. +func (c *ekmRESTClient) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator { + it := &LocationIterator{} + req = proto.Clone(req).(*locationpb.ListLocationsRequest) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + it.InternalFetch = func(pageSize int, pageToken string) ([]*locationpb.Location, string, error) { + resp := &locationpb.ListLocationsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, "", err + } + baseUrl.Path += fmt.Sprintf("/v1/%v/locations", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + if req.GetFilter() != "" { + params.Add("filter", fmt.Sprintf("%v", req.GetFilter())) + } + if req.GetPageSize() != 0 { + params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize())) + } + if req.GetPageToken() != "" { + params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken())) + } + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := append(c.xGoogHeaders, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, "", e + } + it.Response = resp + return resp.GetLocations(), resp.GetNextPageToken(), nil + } + + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +// GetIamPolicy gets the access control policy for a resource. Returns an empty policy +// if the resource exists and does not have a policy set. +func (c *ekmRESTClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v:getIamPolicy", req.GetResource()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + if req.GetOptions().GetRequestedPolicyVersion() != 0 { + params.Add("options.requestedPolicyVersion", fmt.Sprintf("%v", req.GetOptions().GetRequestedPolicyVersion())) + } + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &iampb.Policy{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// SetIamPolicy sets the access control policy on the specified resource. Replaces +// any existing policy. +// +// Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED +// errors. +func (c *ekmRESTClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + jsonReq, err := m.Marshal(req) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v:setIamPolicy", req.GetResource()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &iampb.Policy{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// TestIamPermissions returns permissions that a caller has on the specified resource. If the +// resource does not exist, this will return an empty set of +// permissions, not a NOT_FOUND error. +// +// Note: This operation is designed to be used for building +// permission-aware UIs and command-line tools, not for authorization +// checking. This operation may “fail open” without warning. +func (c *ekmRESTClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + jsonReq, err := m.Marshal(req) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v:testIamPermissions", req.GetResource()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &iampb.TestIamPermissionsResponse{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// GetOperation is a utility method from google.longrunning.Operations. +func (c *ekmRESTClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &longrunningpb.Operation{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/kms/apiv1/gapic_metadata.json b/vendor/cloud.google.com/go/kms/apiv1/gapic_metadata.json new file mode 100644 index 000000000..47b250252 --- /dev/null +++ b/vendor/cloud.google.com/go/kms/apiv1/gapic_metadata.json @@ -0,0 +1,715 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods.", + "language": "go", + "protoPackage": "google.cloud.kms.v1", + "libraryPackage": "cloud.google.com/go/kms/apiv1", + "services": { + "Autokey": { + "clients": { + "grpc": { + "libraryClient": "AutokeyClient", + "rpcs": { + "CreateKeyHandle": { + "methods": [ + "CreateKeyHandle" + ] + }, + "GetIamPolicy": { + "methods": [ + "GetIamPolicy" + ] + }, + "GetKeyHandle": { + "methods": [ + "GetKeyHandle" + ] + }, + "GetLocation": { + "methods": [ + "GetLocation" + ] + }, + "GetOperation": { + "methods": [ + "GetOperation" + ] + }, + "ListKeyHandles": { + "methods": [ + "ListKeyHandles" + ] + }, + "ListLocations": { + "methods": [ + "ListLocations" + ] + }, + "SetIamPolicy": { + "methods": [ + "SetIamPolicy" + ] + }, + "TestIamPermissions": { + "methods": [ + "TestIamPermissions" + ] + } + } + }, + "rest": { + "libraryClient": "AutokeyClient", + "rpcs": { + "CreateKeyHandle": { + "methods": [ + "CreateKeyHandle" + ] + }, + "GetIamPolicy": { + "methods": [ + "GetIamPolicy" + ] + }, + "GetKeyHandle": { + "methods": [ + "GetKeyHandle" + ] + }, + "GetLocation": { + "methods": [ + "GetLocation" + ] + }, + "GetOperation": { + "methods": [ + "GetOperation" + ] + }, + "ListKeyHandles": { + "methods": [ + "ListKeyHandles" + ] + }, + "ListLocations": { + "methods": [ + "ListLocations" + ] + }, + "SetIamPolicy": { + "methods": [ + "SetIamPolicy" + ] + }, + "TestIamPermissions": { + "methods": [ + "TestIamPermissions" + ] + } + } + } + } + }, + "AutokeyAdmin": { + "clients": { + "grpc": { + "libraryClient": "AutokeyAdminClient", + "rpcs": { + "GetAutokeyConfig": { + "methods": [ + "GetAutokeyConfig" + ] + }, + "GetIamPolicy": { + "methods": [ + "GetIamPolicy" + ] + }, + "GetLocation": { + "methods": [ + "GetLocation" + ] + }, + "GetOperation": { + "methods": [ + "GetOperation" + ] + }, + "ListLocations": { + "methods": [ + "ListLocations" + ] + }, + "SetIamPolicy": { + "methods": [ + "SetIamPolicy" + ] + }, + "ShowEffectiveAutokeyConfig": { + "methods": [ + "ShowEffectiveAutokeyConfig" + ] + }, + "TestIamPermissions": { + "methods": [ + "TestIamPermissions" + ] + }, + "UpdateAutokeyConfig": { + "methods": [ + "UpdateAutokeyConfig" + ] + } + } + }, + "rest": { + "libraryClient": "AutokeyAdminClient", + "rpcs": { + "GetAutokeyConfig": { + "methods": [ + "GetAutokeyConfig" + ] + }, + "GetIamPolicy": { + "methods": [ + "GetIamPolicy" + ] + }, + "GetLocation": { + "methods": [ + "GetLocation" + ] + }, + "GetOperation": { + "methods": [ + "GetOperation" + ] + }, + "ListLocations": { + "methods": [ + "ListLocations" + ] + }, + "SetIamPolicy": { + "methods": [ + "SetIamPolicy" + ] + }, + "ShowEffectiveAutokeyConfig": { + "methods": [ + "ShowEffectiveAutokeyConfig" + ] + }, + "TestIamPermissions": { + "methods": [ + "TestIamPermissions" + ] + }, + "UpdateAutokeyConfig": { + "methods": [ + "UpdateAutokeyConfig" + ] + } + } + } + } + }, + "EkmService": { + "clients": { + "grpc": { + "libraryClient": "EkmClient", + "rpcs": { + "CreateEkmConnection": { + "methods": [ + "CreateEkmConnection" + ] + }, + "GetEkmConfig": { + "methods": [ + "GetEkmConfig" + ] + }, + "GetEkmConnection": { + "methods": [ + "GetEkmConnection" + ] + }, + "GetIamPolicy": { + "methods": [ + "GetIamPolicy" + ] + }, + "GetLocation": { + "methods": [ + "GetLocation" + ] + }, + "GetOperation": { + "methods": [ + "GetOperation" + ] + }, + "ListEkmConnections": { + "methods": [ + "ListEkmConnections" + ] + }, + "ListLocations": { + "methods": [ + "ListLocations" + ] + }, + "SetIamPolicy": { + "methods": [ + "SetIamPolicy" + ] + }, + "TestIamPermissions": { + "methods": [ + "TestIamPermissions" + ] + }, + "UpdateEkmConfig": { + "methods": [ + "UpdateEkmConfig" + ] + }, + "UpdateEkmConnection": { + "methods": [ + "UpdateEkmConnection" + ] + }, + "VerifyConnectivity": { + "methods": [ + "VerifyConnectivity" + ] + } + } + }, + "rest": { + "libraryClient": "EkmClient", + "rpcs": { + "CreateEkmConnection": { + "methods": [ + "CreateEkmConnection" + ] + }, + "GetEkmConfig": { + "methods": [ + "GetEkmConfig" + ] + }, + "GetEkmConnection": { + "methods": [ + "GetEkmConnection" + ] + }, + "GetIamPolicy": { + "methods": [ + "GetIamPolicy" + ] + }, + "GetLocation": { + "methods": [ + "GetLocation" + ] + }, + "GetOperation": { + "methods": [ + "GetOperation" + ] + }, + "ListEkmConnections": { + "methods": [ + "ListEkmConnections" + ] + }, + "ListLocations": { + "methods": [ + "ListLocations" + ] + }, + "SetIamPolicy": { + "methods": [ + "SetIamPolicy" + ] + }, + "TestIamPermissions": { + "methods": [ + "TestIamPermissions" + ] + }, + "UpdateEkmConfig": { + "methods": [ + "UpdateEkmConfig" + ] + }, + "UpdateEkmConnection": { + "methods": [ + "UpdateEkmConnection" + ] + }, + "VerifyConnectivity": { + "methods": [ + "VerifyConnectivity" + ] + } + } + } + } + }, + "KeyManagementService": { + "clients": { + "grpc": { + "libraryClient": "KeyManagementClient", + "rpcs": { + "AsymmetricDecrypt": { + "methods": [ + "AsymmetricDecrypt" + ] + }, + "AsymmetricSign": { + "methods": [ + "AsymmetricSign" + ] + }, + "CreateCryptoKey": { + "methods": [ + "CreateCryptoKey" + ] + }, + "CreateCryptoKeyVersion": { + "methods": [ + "CreateCryptoKeyVersion" + ] + }, + "CreateImportJob": { + "methods": [ + "CreateImportJob" + ] + }, + "CreateKeyRing": { + "methods": [ + "CreateKeyRing" + ] + }, + "Decrypt": { + "methods": [ + "Decrypt" + ] + }, + "DestroyCryptoKeyVersion": { + "methods": [ + "DestroyCryptoKeyVersion" + ] + }, + "Encrypt": { + "methods": [ + "Encrypt" + ] + }, + "GenerateRandomBytes": { + "methods": [ + "GenerateRandomBytes" + ] + }, + "GetCryptoKey": { + "methods": [ + "GetCryptoKey" + ] + }, + "GetCryptoKeyVersion": { + "methods": [ + "GetCryptoKeyVersion" + ] + }, + "GetIamPolicy": { + "methods": [ + "GetIamPolicy" + ] + }, + "GetImportJob": { + "methods": [ + "GetImportJob" + ] + }, + "GetKeyRing": { + "methods": [ + "GetKeyRing" + ] + }, + "GetLocation": { + "methods": [ + "GetLocation" + ] + }, + "GetOperation": { + "methods": [ + "GetOperation" + ] + }, + "GetPublicKey": { + "methods": [ + "GetPublicKey" + ] + }, + "ImportCryptoKeyVersion": { + "methods": [ + "ImportCryptoKeyVersion" + ] + }, + "ListCryptoKeyVersions": { + "methods": [ + "ListCryptoKeyVersions" + ] + }, + "ListCryptoKeys": { + "methods": [ + "ListCryptoKeys" + ] + }, + "ListImportJobs": { + "methods": [ + "ListImportJobs" + ] + }, + "ListKeyRings": { + "methods": [ + "ListKeyRings" + ] + }, + "ListLocations": { + "methods": [ + "ListLocations" + ] + }, + "MacSign": { + "methods": [ + "MacSign" + ] + }, + "MacVerify": { + "methods": [ + "MacVerify" + ] + }, + "RawDecrypt": { + "methods": [ + "RawDecrypt" + ] + }, + "RawEncrypt": { + "methods": [ + "RawEncrypt" + ] + }, + "RestoreCryptoKeyVersion": { + "methods": [ + "RestoreCryptoKeyVersion" + ] + }, + "SetIamPolicy": { + "methods": [ + "SetIamPolicy" + ] + }, + "TestIamPermissions": { + "methods": [ + "TestIamPermissions" + ] + }, + "UpdateCryptoKey": { + "methods": [ + "UpdateCryptoKey" + ] + }, + "UpdateCryptoKeyPrimaryVersion": { + "methods": [ + "UpdateCryptoKeyPrimaryVersion" + ] + }, + "UpdateCryptoKeyVersion": { + "methods": [ + "UpdateCryptoKeyVersion" + ] + } + } + }, + "rest": { + "libraryClient": "KeyManagementClient", + "rpcs": { + "AsymmetricDecrypt": { + "methods": [ + "AsymmetricDecrypt" + ] + }, + "AsymmetricSign": { + "methods": [ + "AsymmetricSign" + ] + }, + "CreateCryptoKey": { + "methods": [ + "CreateCryptoKey" + ] + }, + "CreateCryptoKeyVersion": { + "methods": [ + "CreateCryptoKeyVersion" + ] + }, + "CreateImportJob": { + "methods": [ + "CreateImportJob" + ] + }, + "CreateKeyRing": { + "methods": [ + "CreateKeyRing" + ] + }, + "Decrypt": { + "methods": [ + "Decrypt" + ] + }, + "DestroyCryptoKeyVersion": { + "methods": [ + "DestroyCryptoKeyVersion" + ] + }, + "Encrypt": { + "methods": [ + "Encrypt" + ] + }, + "GenerateRandomBytes": { + "methods": [ + "GenerateRandomBytes" + ] + }, + "GetCryptoKey": { + "methods": [ + "GetCryptoKey" + ] + }, + "GetCryptoKeyVersion": { + "methods": [ + "GetCryptoKeyVersion" + ] + }, + "GetIamPolicy": { + "methods": [ + "GetIamPolicy" + ] + }, + "GetImportJob": { + "methods": [ + "GetImportJob" + ] + }, + "GetKeyRing": { + "methods": [ + "GetKeyRing" + ] + }, + "GetLocation": { + "methods": [ + "GetLocation" + ] + }, + "GetOperation": { + "methods": [ + "GetOperation" + ] + }, + "GetPublicKey": { + "methods": [ + "GetPublicKey" + ] + }, + "ImportCryptoKeyVersion": { + "methods": [ + "ImportCryptoKeyVersion" + ] + }, + "ListCryptoKeyVersions": { + "methods": [ + "ListCryptoKeyVersions" + ] + }, + "ListCryptoKeys": { + "methods": [ + "ListCryptoKeys" + ] + }, + "ListImportJobs": { + "methods": [ + "ListImportJobs" + ] + }, + "ListKeyRings": { + "methods": [ + "ListKeyRings" + ] + }, + "ListLocations": { + "methods": [ + "ListLocations" + ] + }, + "MacSign": { + "methods": [ + "MacSign" + ] + }, + "MacVerify": { + "methods": [ + "MacVerify" + ] + }, + "RawDecrypt": { + "methods": [ + "RawDecrypt" + ] + }, + "RawEncrypt": { + "methods": [ + "RawEncrypt" + ] + }, + "RestoreCryptoKeyVersion": { + "methods": [ + "RestoreCryptoKeyVersion" + ] + }, + "SetIamPolicy": { + "methods": [ + "SetIamPolicy" + ] + }, + "TestIamPermissions": { + "methods": [ + "TestIamPermissions" + ] + }, + "UpdateCryptoKey": { + "methods": [ + "UpdateCryptoKey" + ] + }, + "UpdateCryptoKeyPrimaryVersion": { + "methods": [ + "UpdateCryptoKeyPrimaryVersion" + ] + }, + "UpdateCryptoKeyVersion": { + "methods": [ + "UpdateCryptoKeyVersion" + ] + } + } + } + } + } + } +} diff --git a/vendor/cloud.google.com/go/kms/apiv1/iam.go b/vendor/cloud.google.com/go/kms/apiv1/iam.go new file mode 100644 index 000000000..a86b08a6c --- /dev/null +++ b/vendor/cloud.google.com/go/kms/apiv1/iam.go @@ -0,0 +1,40 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kms + +import ( + "cloud.google.com/go/iam" + "cloud.google.com/go/kms/apiv1/kmspb" +) + +// KeyRingIAM returns a handle to inspect and change permissions of a KeyRing. +// +// Deprecated: Please use ResourceIAM and provide the KeyRing.Name as input. +func (c *KeyManagementClient) KeyRingIAM(keyRing *kmspb.KeyRing) *iam.Handle { + return iam.InternalNewHandle(c.Connection(), keyRing.Name) +} + +// CryptoKeyIAM returns a handle to inspect and change permissions of a CryptoKey. +// +// Deprecated: Please use ResourceIAM and provide the CryptoKey.Name as input. +func (c *KeyManagementClient) CryptoKeyIAM(cryptoKey *kmspb.CryptoKey) *iam.Handle { + return iam.InternalNewHandle(c.Connection(), cryptoKey.Name) +} + +// ResourceIAM returns a handle to inspect and change permissions of the resource +// indicated by the given resource path. +func (c *KeyManagementClient) ResourceIAM(resourcePath string) *iam.Handle { + return iam.InternalNewHandle(c.Connection(), resourcePath) +} diff --git a/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go b/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go new file mode 100644 index 000000000..285df9b61 --- /dev/null +++ b/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go @@ -0,0 +1,4532 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package kms + +import ( + "bytes" + "context" + "fmt" + "io" + "math" + "net/http" + "net/url" + "time" + + iampb "cloud.google.com/go/iam/apiv1/iampb" + kmspb "cloud.google.com/go/kms/apiv1/kmspb" + longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/googleapi" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + httptransport "google.golang.org/api/transport/http" + locationpb "google.golang.org/genproto/googleapis/cloud/location" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" +) + +var newKeyManagementClientHook clientHook + +// KeyManagementCallOptions contains the retry settings for each method of KeyManagementClient. +type KeyManagementCallOptions struct { + ListKeyRings []gax.CallOption + ListCryptoKeys []gax.CallOption + ListCryptoKeyVersions []gax.CallOption + ListImportJobs []gax.CallOption + GetKeyRing []gax.CallOption + GetCryptoKey []gax.CallOption + GetCryptoKeyVersion []gax.CallOption + GetPublicKey []gax.CallOption + GetImportJob []gax.CallOption + CreateKeyRing []gax.CallOption + CreateCryptoKey []gax.CallOption + CreateCryptoKeyVersion []gax.CallOption + ImportCryptoKeyVersion []gax.CallOption + CreateImportJob []gax.CallOption + UpdateCryptoKey []gax.CallOption + UpdateCryptoKeyVersion []gax.CallOption + UpdateCryptoKeyPrimaryVersion []gax.CallOption + DestroyCryptoKeyVersion []gax.CallOption + RestoreCryptoKeyVersion []gax.CallOption + Encrypt []gax.CallOption + Decrypt []gax.CallOption + RawEncrypt []gax.CallOption + RawDecrypt []gax.CallOption + AsymmetricSign []gax.CallOption + AsymmetricDecrypt []gax.CallOption + MacSign []gax.CallOption + MacVerify []gax.CallOption + GenerateRandomBytes []gax.CallOption + GetLocation []gax.CallOption + ListLocations []gax.CallOption + GetIamPolicy []gax.CallOption + SetIamPolicy []gax.CallOption + TestIamPermissions []gax.CallOption + GetOperation []gax.CallOption +} + +func defaultKeyManagementGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("cloudkms.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("cloudkms.UNIVERSE_DOMAIN:443"), + internaloption.WithDefaultMTLSEndpoint("cloudkms.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://cloudkms.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultKeyManagementCallOptions() *KeyManagementCallOptions { + return &KeyManagementCallOptions{ + ListKeyRings: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListCryptoKeys: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListCryptoKeyVersions: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListImportJobs: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetKeyRing: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetCryptoKey: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetCryptoKeyVersion: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetPublicKey: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetImportJob: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateKeyRing: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateCryptoKey: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateCryptoKeyVersion: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + }, + ImportCryptoKeyVersion: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + }, + CreateImportJob: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + UpdateCryptoKey: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + UpdateCryptoKeyVersion: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + UpdateCryptoKeyPrimaryVersion: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + DestroyCryptoKeyVersion: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + RestoreCryptoKeyVersion: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + Encrypt: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + Decrypt: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + RawEncrypt: []gax.CallOption{}, + RawDecrypt: []gax.CallOption{}, + AsymmetricSign: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + AsymmetricDecrypt: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + MacSign: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + MacVerify: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GenerateRandomBytes: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetLocation: []gax.CallOption{}, + ListLocations: []gax.CallOption{}, + GetIamPolicy: []gax.CallOption{}, + SetIamPolicy: []gax.CallOption{}, + TestIamPermissions: []gax.CallOption{}, + GetOperation: []gax.CallOption{}, + } +} + +func defaultKeyManagementRESTCallOptions() *KeyManagementCallOptions { + return &KeyManagementCallOptions{ + ListKeyRings: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + ListCryptoKeys: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + ListCryptoKeyVersions: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + ListImportJobs: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + GetKeyRing: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + GetCryptoKey: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + GetCryptoKeyVersion: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + GetPublicKey: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + GetImportJob: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + CreateKeyRing: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + CreateCryptoKey: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + CreateCryptoKeyVersion: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + }, + ImportCryptoKeyVersion: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + }, + CreateImportJob: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + UpdateCryptoKey: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + UpdateCryptoKeyVersion: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + UpdateCryptoKeyPrimaryVersion: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + DestroyCryptoKeyVersion: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + RestoreCryptoKeyVersion: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + Encrypt: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + Decrypt: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + RawEncrypt: []gax.CallOption{}, + RawDecrypt: []gax.CallOption{}, + AsymmetricSign: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + AsymmetricDecrypt: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + MacSign: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + MacVerify: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + GenerateRandomBytes: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout) + }), + }, + GetLocation: []gax.CallOption{}, + ListLocations: []gax.CallOption{}, + GetIamPolicy: []gax.CallOption{}, + SetIamPolicy: []gax.CallOption{}, + TestIamPermissions: []gax.CallOption{}, + GetOperation: []gax.CallOption{}, + } +} + +// internalKeyManagementClient is an interface that defines the methods available from Cloud Key Management Service (KMS) API. +type internalKeyManagementClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + ListKeyRings(context.Context, *kmspb.ListKeyRingsRequest, ...gax.CallOption) *KeyRingIterator + ListCryptoKeys(context.Context, *kmspb.ListCryptoKeysRequest, ...gax.CallOption) *CryptoKeyIterator + ListCryptoKeyVersions(context.Context, *kmspb.ListCryptoKeyVersionsRequest, ...gax.CallOption) *CryptoKeyVersionIterator + ListImportJobs(context.Context, *kmspb.ListImportJobsRequest, ...gax.CallOption) *ImportJobIterator + GetKeyRing(context.Context, *kmspb.GetKeyRingRequest, ...gax.CallOption) (*kmspb.KeyRing, error) + GetCryptoKey(context.Context, *kmspb.GetCryptoKeyRequest, ...gax.CallOption) (*kmspb.CryptoKey, error) + GetCryptoKeyVersion(context.Context, *kmspb.GetCryptoKeyVersionRequest, ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) + GetPublicKey(context.Context, *kmspb.GetPublicKeyRequest, ...gax.CallOption) (*kmspb.PublicKey, error) + GetImportJob(context.Context, *kmspb.GetImportJobRequest, ...gax.CallOption) (*kmspb.ImportJob, error) + CreateKeyRing(context.Context, *kmspb.CreateKeyRingRequest, ...gax.CallOption) (*kmspb.KeyRing, error) + CreateCryptoKey(context.Context, *kmspb.CreateCryptoKeyRequest, ...gax.CallOption) (*kmspb.CryptoKey, error) + CreateCryptoKeyVersion(context.Context, *kmspb.CreateCryptoKeyVersionRequest, ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) + ImportCryptoKeyVersion(context.Context, *kmspb.ImportCryptoKeyVersionRequest, ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) + CreateImportJob(context.Context, *kmspb.CreateImportJobRequest, ...gax.CallOption) (*kmspb.ImportJob, error) + UpdateCryptoKey(context.Context, *kmspb.UpdateCryptoKeyRequest, ...gax.CallOption) (*kmspb.CryptoKey, error) + UpdateCryptoKeyVersion(context.Context, *kmspb.UpdateCryptoKeyVersionRequest, ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) + UpdateCryptoKeyPrimaryVersion(context.Context, *kmspb.UpdateCryptoKeyPrimaryVersionRequest, ...gax.CallOption) (*kmspb.CryptoKey, error) + DestroyCryptoKeyVersion(context.Context, *kmspb.DestroyCryptoKeyVersionRequest, ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) + RestoreCryptoKeyVersion(context.Context, *kmspb.RestoreCryptoKeyVersionRequest, ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) + Encrypt(context.Context, *kmspb.EncryptRequest, ...gax.CallOption) (*kmspb.EncryptResponse, error) + Decrypt(context.Context, *kmspb.DecryptRequest, ...gax.CallOption) (*kmspb.DecryptResponse, error) + RawEncrypt(context.Context, *kmspb.RawEncryptRequest, ...gax.CallOption) (*kmspb.RawEncryptResponse, error) + RawDecrypt(context.Context, *kmspb.RawDecryptRequest, ...gax.CallOption) (*kmspb.RawDecryptResponse, error) + AsymmetricSign(context.Context, *kmspb.AsymmetricSignRequest, ...gax.CallOption) (*kmspb.AsymmetricSignResponse, error) + AsymmetricDecrypt(context.Context, *kmspb.AsymmetricDecryptRequest, ...gax.CallOption) (*kmspb.AsymmetricDecryptResponse, error) + MacSign(context.Context, *kmspb.MacSignRequest, ...gax.CallOption) (*kmspb.MacSignResponse, error) + MacVerify(context.Context, *kmspb.MacVerifyRequest, ...gax.CallOption) (*kmspb.MacVerifyResponse, error) + GenerateRandomBytes(context.Context, *kmspb.GenerateRandomBytesRequest, ...gax.CallOption) (*kmspb.GenerateRandomBytesResponse, error) + GetLocation(context.Context, *locationpb.GetLocationRequest, ...gax.CallOption) (*locationpb.Location, error) + ListLocations(context.Context, *locationpb.ListLocationsRequest, ...gax.CallOption) *LocationIterator + GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) + SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) + TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) + GetOperation(context.Context, *longrunningpb.GetOperationRequest, ...gax.CallOption) (*longrunningpb.Operation, error) +} + +// KeyManagementClient is a client for interacting with Cloud Key Management Service (KMS) API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// # Google Cloud Key Management Service +// +// Manages cryptographic keys and operations using those keys. Implements a REST +// model with the following objects: +// +// KeyRing +// +// CryptoKey +// +// CryptoKeyVersion +// +// ImportJob +// +// If you are using manual gRPC libraries, see +// Using gRPC with Cloud KMS (at https://cloud.google.com/kms/docs/grpc). +type KeyManagementClient struct { + // The internal transport-dependent client. + internalClient internalKeyManagementClient + + // The call options for this service. + CallOptions *KeyManagementCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *KeyManagementClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *KeyManagementClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *KeyManagementClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// ListKeyRings lists KeyRings. +func (c *KeyManagementClient) ListKeyRings(ctx context.Context, req *kmspb.ListKeyRingsRequest, opts ...gax.CallOption) *KeyRingIterator { + return c.internalClient.ListKeyRings(ctx, req, opts...) +} + +// ListCryptoKeys lists CryptoKeys. +func (c *KeyManagementClient) ListCryptoKeys(ctx context.Context, req *kmspb.ListCryptoKeysRequest, opts ...gax.CallOption) *CryptoKeyIterator { + return c.internalClient.ListCryptoKeys(ctx, req, opts...) +} + +// ListCryptoKeyVersions lists CryptoKeyVersions. +func (c *KeyManagementClient) ListCryptoKeyVersions(ctx context.Context, req *kmspb.ListCryptoKeyVersionsRequest, opts ...gax.CallOption) *CryptoKeyVersionIterator { + return c.internalClient.ListCryptoKeyVersions(ctx, req, opts...) +} + +// ListImportJobs lists ImportJobs. +func (c *KeyManagementClient) ListImportJobs(ctx context.Context, req *kmspb.ListImportJobsRequest, opts ...gax.CallOption) *ImportJobIterator { + return c.internalClient.ListImportJobs(ctx, req, opts...) +} + +// GetKeyRing returns metadata for a given KeyRing. +func (c *KeyManagementClient) GetKeyRing(ctx context.Context, req *kmspb.GetKeyRingRequest, opts ...gax.CallOption) (*kmspb.KeyRing, error) { + return c.internalClient.GetKeyRing(ctx, req, opts...) +} + +// GetCryptoKey returns metadata for a given CryptoKey, as +// well as its primary +// CryptoKeyVersion. +func (c *KeyManagementClient) GetCryptoKey(ctx context.Context, req *kmspb.GetCryptoKeyRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) { + return c.internalClient.GetCryptoKey(ctx, req, opts...) +} + +// GetCryptoKeyVersion returns metadata for a given +// CryptoKeyVersion. +func (c *KeyManagementClient) GetCryptoKeyVersion(ctx context.Context, req *kmspb.GetCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + return c.internalClient.GetCryptoKeyVersion(ctx, req, opts...) +} + +// GetPublicKey returns the public key for the given +// CryptoKeyVersion. The +// CryptoKey.purpose must be +// ASYMMETRIC_SIGN +// or +// ASYMMETRIC_DECRYPT. +func (c *KeyManagementClient) GetPublicKey(ctx context.Context, req *kmspb.GetPublicKeyRequest, opts ...gax.CallOption) (*kmspb.PublicKey, error) { + return c.internalClient.GetPublicKey(ctx, req, opts...) +} + +// GetImportJob returns metadata for a given ImportJob. +func (c *KeyManagementClient) GetImportJob(ctx context.Context, req *kmspb.GetImportJobRequest, opts ...gax.CallOption) (*kmspb.ImportJob, error) { + return c.internalClient.GetImportJob(ctx, req, opts...) +} + +// CreateKeyRing create a new KeyRing in a given Project and +// Location. +func (c *KeyManagementClient) CreateKeyRing(ctx context.Context, req *kmspb.CreateKeyRingRequest, opts ...gax.CallOption) (*kmspb.KeyRing, error) { + return c.internalClient.CreateKeyRing(ctx, req, opts...) +} + +// CreateCryptoKey create a new CryptoKey within a +// KeyRing. +// +// CryptoKey.purpose and +// CryptoKey.version_template.algorithm +// are required. +func (c *KeyManagementClient) CreateCryptoKey(ctx context.Context, req *kmspb.CreateCryptoKeyRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) { + return c.internalClient.CreateCryptoKey(ctx, req, opts...) +} + +// CreateCryptoKeyVersion create a new CryptoKeyVersion in a +// CryptoKey. +// +// The server will assign the next sequential id. If unset, +// state will be set to +// ENABLED. +func (c *KeyManagementClient) CreateCryptoKeyVersion(ctx context.Context, req *kmspb.CreateCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + return c.internalClient.CreateCryptoKeyVersion(ctx, req, opts...) +} + +// ImportCryptoKeyVersion import wrapped key material into a +// CryptoKeyVersion. +// +// All requests must specify a CryptoKey. If +// a CryptoKeyVersion is additionally +// specified in the request, key material will be reimported into that +// version. Otherwise, a new version will be created, and will be assigned the +// next sequential id within the CryptoKey. +func (c *KeyManagementClient) ImportCryptoKeyVersion(ctx context.Context, req *kmspb.ImportCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + return c.internalClient.ImportCryptoKeyVersion(ctx, req, opts...) +} + +// CreateImportJob create a new ImportJob within a +// KeyRing. +// +// ImportJob.import_method is +// required. +func (c *KeyManagementClient) CreateImportJob(ctx context.Context, req *kmspb.CreateImportJobRequest, opts ...gax.CallOption) (*kmspb.ImportJob, error) { + return c.internalClient.CreateImportJob(ctx, req, opts...) +} + +// UpdateCryptoKey update a CryptoKey. +func (c *KeyManagementClient) UpdateCryptoKey(ctx context.Context, req *kmspb.UpdateCryptoKeyRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) { + return c.internalClient.UpdateCryptoKey(ctx, req, opts...) +} + +// UpdateCryptoKeyVersion update a CryptoKeyVersion's +// metadata. +// +// state may be changed between +// ENABLED +// and +// DISABLED +// using this method. See +// DestroyCryptoKeyVersion +// and +// RestoreCryptoKeyVersion +// to move between other states. +func (c *KeyManagementClient) UpdateCryptoKeyVersion(ctx context.Context, req *kmspb.UpdateCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + return c.internalClient.UpdateCryptoKeyVersion(ctx, req, opts...) +} + +// UpdateCryptoKeyPrimaryVersion update the version of a CryptoKey that +// will be used in +// Encrypt. +// +// Returns an error if called on a key whose purpose is not +// ENCRYPT_DECRYPT. +func (c *KeyManagementClient) UpdateCryptoKeyPrimaryVersion(ctx context.Context, req *kmspb.UpdateCryptoKeyPrimaryVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) { + return c.internalClient.UpdateCryptoKeyPrimaryVersion(ctx, req, opts...) +} + +// DestroyCryptoKeyVersion schedule a CryptoKeyVersion for +// destruction. +// +// Upon calling this method, +// CryptoKeyVersion.state will +// be set to +// DESTROY_SCHEDULED, +// and destroy_time will +// be set to the time +// destroy_scheduled_duration +// in the future. At that time, the +// state will automatically +// change to +// DESTROYED, +// and the key material will be irrevocably destroyed. +// +// Before the +// destroy_time is +// reached, +// RestoreCryptoKeyVersion +// may be called to reverse the process. +func (c *KeyManagementClient) DestroyCryptoKeyVersion(ctx context.Context, req *kmspb.DestroyCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + return c.internalClient.DestroyCryptoKeyVersion(ctx, req, opts...) +} + +// RestoreCryptoKeyVersion restore a CryptoKeyVersion in the +// DESTROY_SCHEDULED +// state. +// +// Upon restoration of the CryptoKeyVersion, +// state will be set to +// DISABLED, +// and destroy_time will +// be cleared. +func (c *KeyManagementClient) RestoreCryptoKeyVersion(ctx context.Context, req *kmspb.RestoreCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + return c.internalClient.RestoreCryptoKeyVersion(ctx, req, opts...) +} + +// Encrypt encrypts data, so that it can only be recovered by a call to +// Decrypt. The +// CryptoKey.purpose must be +// ENCRYPT_DECRYPT. +func (c *KeyManagementClient) Encrypt(ctx context.Context, req *kmspb.EncryptRequest, opts ...gax.CallOption) (*kmspb.EncryptResponse, error) { + return c.internalClient.Encrypt(ctx, req, opts...) +} + +// Decrypt decrypts data that was protected by +// Encrypt. The +// CryptoKey.purpose must be +// ENCRYPT_DECRYPT. +func (c *KeyManagementClient) Decrypt(ctx context.Context, req *kmspb.DecryptRequest, opts ...gax.CallOption) (*kmspb.DecryptResponse, error) { + return c.internalClient.Decrypt(ctx, req, opts...) +} + +// RawEncrypt encrypts data using portable cryptographic primitives. Most users should +// choose Encrypt and +// Decrypt rather than +// their raw counterparts. The +// CryptoKey.purpose must be +// RAW_ENCRYPT_DECRYPT. +func (c *KeyManagementClient) RawEncrypt(ctx context.Context, req *kmspb.RawEncryptRequest, opts ...gax.CallOption) (*kmspb.RawEncryptResponse, error) { + return c.internalClient.RawEncrypt(ctx, req, opts...) +} + +// RawDecrypt decrypts data that was originally encrypted using a raw cryptographic +// mechanism. The CryptoKey.purpose +// must be +// RAW_ENCRYPT_DECRYPT. +func (c *KeyManagementClient) RawDecrypt(ctx context.Context, req *kmspb.RawDecryptRequest, opts ...gax.CallOption) (*kmspb.RawDecryptResponse, error) { + return c.internalClient.RawDecrypt(ctx, req, opts...) +} + +// AsymmetricSign signs data using a CryptoKeyVersion +// with CryptoKey.purpose +// ASYMMETRIC_SIGN, producing a signature that can be verified with the public +// key retrieved from +// GetPublicKey. +func (c *KeyManagementClient) AsymmetricSign(ctx context.Context, req *kmspb.AsymmetricSignRequest, opts ...gax.CallOption) (*kmspb.AsymmetricSignResponse, error) { + return c.internalClient.AsymmetricSign(ctx, req, opts...) +} + +// AsymmetricDecrypt decrypts data that was encrypted with a public key retrieved from +// GetPublicKey +// corresponding to a CryptoKeyVersion +// with CryptoKey.purpose +// ASYMMETRIC_DECRYPT. +func (c *KeyManagementClient) AsymmetricDecrypt(ctx context.Context, req *kmspb.AsymmetricDecryptRequest, opts ...gax.CallOption) (*kmspb.AsymmetricDecryptResponse, error) { + return c.internalClient.AsymmetricDecrypt(ctx, req, opts...) +} + +// MacSign signs data using a CryptoKeyVersion +// with CryptoKey.purpose MAC, +// producing a tag that can be verified by another source with the same key. +func (c *KeyManagementClient) MacSign(ctx context.Context, req *kmspb.MacSignRequest, opts ...gax.CallOption) (*kmspb.MacSignResponse, error) { + return c.internalClient.MacSign(ctx, req, opts...) +} + +// MacVerify verifies MAC tag using a +// CryptoKeyVersion with +// CryptoKey.purpose MAC, and returns +// a response that indicates whether or not the verification was successful. +func (c *KeyManagementClient) MacVerify(ctx context.Context, req *kmspb.MacVerifyRequest, opts ...gax.CallOption) (*kmspb.MacVerifyResponse, error) { + return c.internalClient.MacVerify(ctx, req, opts...) +} + +// GenerateRandomBytes generate random bytes using the Cloud KMS randomness source in the provided +// location. +func (c *KeyManagementClient) GenerateRandomBytes(ctx context.Context, req *kmspb.GenerateRandomBytesRequest, opts ...gax.CallOption) (*kmspb.GenerateRandomBytesResponse, error) { + return c.internalClient.GenerateRandomBytes(ctx, req, opts...) +} + +// GetLocation gets information about a location. +func (c *KeyManagementClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) { + return c.internalClient.GetLocation(ctx, req, opts...) +} + +// ListLocations lists information about the supported locations for this service. +func (c *KeyManagementClient) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator { + return c.internalClient.ListLocations(ctx, req, opts...) +} + +// GetIamPolicy gets the access control policy for a resource. Returns an empty policy +// if the resource exists and does not have a policy set. +func (c *KeyManagementClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + return c.internalClient.GetIamPolicy(ctx, req, opts...) +} + +// SetIamPolicy sets the access control policy on the specified resource. Replaces +// any existing policy. +// +// Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED +// errors. +func (c *KeyManagementClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + return c.internalClient.SetIamPolicy(ctx, req, opts...) +} + +// TestIamPermissions returns permissions that a caller has on the specified resource. If the +// resource does not exist, this will return an empty set of +// permissions, not a NOT_FOUND error. +// +// Note: This operation is designed to be used for building +// permission-aware UIs and command-line tools, not for authorization +// checking. This operation may “fail open” without warning. +func (c *KeyManagementClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + return c.internalClient.TestIamPermissions(ctx, req, opts...) +} + +// GetOperation is a utility method from google.longrunning.Operations. +func (c *KeyManagementClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + return c.internalClient.GetOperation(ctx, req, opts...) +} + +// keyManagementGRPCClient is a client for interacting with Cloud Key Management Service (KMS) API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type keyManagementGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // Points back to the CallOptions field of the containing KeyManagementClient + CallOptions **KeyManagementCallOptions + + // The gRPC API client. + keyManagementClient kmspb.KeyManagementServiceClient + + operationsClient longrunningpb.OperationsClient + + iamPolicyClient iampb.IAMPolicyClient + + locationsClient locationpb.LocationsClient + + // The x-goog-* metadata to be sent with each request. + xGoogHeaders []string +} + +// NewKeyManagementClient creates a new key management service client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// # Google Cloud Key Management Service +// +// Manages cryptographic keys and operations using those keys. Implements a REST +// model with the following objects: +// +// KeyRing +// +// CryptoKey +// +// CryptoKeyVersion +// +// ImportJob +// +// If you are using manual gRPC libraries, see +// Using gRPC with Cloud KMS (at https://cloud.google.com/kms/docs/grpc). +func NewKeyManagementClient(ctx context.Context, opts ...option.ClientOption) (*KeyManagementClient, error) { + clientOpts := defaultKeyManagementGRPCClientOptions() + if newKeyManagementClientHook != nil { + hookOpts, err := newKeyManagementClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := KeyManagementClient{CallOptions: defaultKeyManagementCallOptions()} + + c := &keyManagementGRPCClient{ + connPool: connPool, + keyManagementClient: kmspb.NewKeyManagementServiceClient(connPool), + CallOptions: &client.CallOptions, + operationsClient: longrunningpb.NewOperationsClient(connPool), + iamPolicyClient: iampb.NewIAMPolicyClient(connPool), + locationsClient: locationpb.NewLocationsClient(connPool), + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *keyManagementGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *keyManagementGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *keyManagementGRPCClient) Close() error { + return c.connPool.Close() +} + +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type keyManagementRESTClient struct { + // The http endpoint to connect to. + endpoint string + + // The http client. + httpClient *http.Client + + // The x-goog-* headers to be sent with each request. + xGoogHeaders []string + + // Points back to the CallOptions field of the containing KeyManagementClient + CallOptions **KeyManagementCallOptions +} + +// NewKeyManagementRESTClient creates a new key management service rest client. +// +// # Google Cloud Key Management Service +// +// Manages cryptographic keys and operations using those keys. Implements a REST +// model with the following objects: +// +// KeyRing +// +// CryptoKey +// +// CryptoKeyVersion +// +// ImportJob +// +// If you are using manual gRPC libraries, see +// Using gRPC with Cloud KMS (at https://cloud.google.com/kms/docs/grpc). +func NewKeyManagementRESTClient(ctx context.Context, opts ...option.ClientOption) (*KeyManagementClient, error) { + clientOpts := append(defaultKeyManagementRESTClientOptions(), opts...) + httpClient, endpoint, err := httptransport.NewClient(ctx, clientOpts...) + if err != nil { + return nil, err + } + + callOpts := defaultKeyManagementRESTCallOptions() + c := &keyManagementRESTClient{ + endpoint: endpoint, + httpClient: httpClient, + CallOptions: &callOpts, + } + c.setGoogleClientInfo() + + return &KeyManagementClient{internalClient: c, CallOptions: callOpts}, nil +} + +func defaultKeyManagementRESTClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("https://cloudkms.googleapis.com"), + internaloption.WithDefaultEndpointTemplate("https://cloudkms.UNIVERSE_DOMAIN"), + internaloption.WithDefaultMTLSEndpoint("https://cloudkms.mtls.googleapis.com"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://cloudkms.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), + } +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *keyManagementRESTClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *keyManagementRESTClient) Close() error { + // Replace httpClient with nil to force cleanup. + c.httpClient = nil + return nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: This method always returns nil. +func (c *keyManagementRESTClient) Connection() *grpc.ClientConn { + return nil +} +func (c *keyManagementGRPCClient) ListKeyRings(ctx context.Context, req *kmspb.ListKeyRingsRequest, opts ...gax.CallOption) *KeyRingIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListKeyRings[0:len((*c.CallOptions).ListKeyRings):len((*c.CallOptions).ListKeyRings)], opts...) + it := &KeyRingIterator{} + req = proto.Clone(req).(*kmspb.ListKeyRingsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*kmspb.KeyRing, string, error) { + resp := &kmspb.ListKeyRingsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.ListKeyRings(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetKeyRings(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *keyManagementGRPCClient) ListCryptoKeys(ctx context.Context, req *kmspb.ListCryptoKeysRequest, opts ...gax.CallOption) *CryptoKeyIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListCryptoKeys[0:len((*c.CallOptions).ListCryptoKeys):len((*c.CallOptions).ListCryptoKeys)], opts...) + it := &CryptoKeyIterator{} + req = proto.Clone(req).(*kmspb.ListCryptoKeysRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*kmspb.CryptoKey, string, error) { + resp := &kmspb.ListCryptoKeysResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.ListCryptoKeys(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetCryptoKeys(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *keyManagementGRPCClient) ListCryptoKeyVersions(ctx context.Context, req *kmspb.ListCryptoKeyVersionsRequest, opts ...gax.CallOption) *CryptoKeyVersionIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListCryptoKeyVersions[0:len((*c.CallOptions).ListCryptoKeyVersions):len((*c.CallOptions).ListCryptoKeyVersions)], opts...) + it := &CryptoKeyVersionIterator{} + req = proto.Clone(req).(*kmspb.ListCryptoKeyVersionsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*kmspb.CryptoKeyVersion, string, error) { + resp := &kmspb.ListCryptoKeyVersionsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.ListCryptoKeyVersions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetCryptoKeyVersions(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *keyManagementGRPCClient) ListImportJobs(ctx context.Context, req *kmspb.ListImportJobsRequest, opts ...gax.CallOption) *ImportJobIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListImportJobs[0:len((*c.CallOptions).ListImportJobs):len((*c.CallOptions).ListImportJobs)], opts...) + it := &ImportJobIterator{} + req = proto.Clone(req).(*kmspb.ListImportJobsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*kmspb.ImportJob, string, error) { + resp := &kmspb.ListImportJobsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.ListImportJobs(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetImportJobs(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *keyManagementGRPCClient) GetKeyRing(ctx context.Context, req *kmspb.GetKeyRingRequest, opts ...gax.CallOption) (*kmspb.KeyRing, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetKeyRing[0:len((*c.CallOptions).GetKeyRing):len((*c.CallOptions).GetKeyRing)], opts...) + var resp *kmspb.KeyRing + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.GetKeyRing(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) GetCryptoKey(ctx context.Context, req *kmspb.GetCryptoKeyRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetCryptoKey[0:len((*c.CallOptions).GetCryptoKey):len((*c.CallOptions).GetCryptoKey)], opts...) + var resp *kmspb.CryptoKey + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.GetCryptoKey(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) GetCryptoKeyVersion(ctx context.Context, req *kmspb.GetCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetCryptoKeyVersion[0:len((*c.CallOptions).GetCryptoKeyVersion):len((*c.CallOptions).GetCryptoKeyVersion)], opts...) + var resp *kmspb.CryptoKeyVersion + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.GetCryptoKeyVersion(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) GetPublicKey(ctx context.Context, req *kmspb.GetPublicKeyRequest, opts ...gax.CallOption) (*kmspb.PublicKey, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetPublicKey[0:len((*c.CallOptions).GetPublicKey):len((*c.CallOptions).GetPublicKey)], opts...) + var resp *kmspb.PublicKey + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.GetPublicKey(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) GetImportJob(ctx context.Context, req *kmspb.GetImportJobRequest, opts ...gax.CallOption) (*kmspb.ImportJob, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetImportJob[0:len((*c.CallOptions).GetImportJob):len((*c.CallOptions).GetImportJob)], opts...) + var resp *kmspb.ImportJob + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.GetImportJob(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) CreateKeyRing(ctx context.Context, req *kmspb.CreateKeyRingRequest, opts ...gax.CallOption) (*kmspb.KeyRing, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateKeyRing[0:len((*c.CallOptions).CreateKeyRing):len((*c.CallOptions).CreateKeyRing)], opts...) + var resp *kmspb.KeyRing + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.CreateKeyRing(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) CreateCryptoKey(ctx context.Context, req *kmspb.CreateCryptoKeyRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateCryptoKey[0:len((*c.CallOptions).CreateCryptoKey):len((*c.CallOptions).CreateCryptoKey)], opts...) + var resp *kmspb.CryptoKey + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.CreateCryptoKey(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) CreateCryptoKeyVersion(ctx context.Context, req *kmspb.CreateCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateCryptoKeyVersion[0:len((*c.CallOptions).CreateCryptoKeyVersion):len((*c.CallOptions).CreateCryptoKeyVersion)], opts...) + var resp *kmspb.CryptoKeyVersion + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.CreateCryptoKeyVersion(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) ImportCryptoKeyVersion(ctx context.Context, req *kmspb.ImportCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ImportCryptoKeyVersion[0:len((*c.CallOptions).ImportCryptoKeyVersion):len((*c.CallOptions).ImportCryptoKeyVersion)], opts...) + var resp *kmspb.CryptoKeyVersion + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.ImportCryptoKeyVersion(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) CreateImportJob(ctx context.Context, req *kmspb.CreateImportJobRequest, opts ...gax.CallOption) (*kmspb.ImportJob, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateImportJob[0:len((*c.CallOptions).CreateImportJob):len((*c.CallOptions).CreateImportJob)], opts...) + var resp *kmspb.ImportJob + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.CreateImportJob(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) UpdateCryptoKey(ctx context.Context, req *kmspb.UpdateCryptoKeyRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "crypto_key.name", url.QueryEscape(req.GetCryptoKey().GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).UpdateCryptoKey[0:len((*c.CallOptions).UpdateCryptoKey):len((*c.CallOptions).UpdateCryptoKey)], opts...) + var resp *kmspb.CryptoKey + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.UpdateCryptoKey(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) UpdateCryptoKeyVersion(ctx context.Context, req *kmspb.UpdateCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "crypto_key_version.name", url.QueryEscape(req.GetCryptoKeyVersion().GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).UpdateCryptoKeyVersion[0:len((*c.CallOptions).UpdateCryptoKeyVersion):len((*c.CallOptions).UpdateCryptoKeyVersion)], opts...) + var resp *kmspb.CryptoKeyVersion + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.UpdateCryptoKeyVersion(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) UpdateCryptoKeyPrimaryVersion(ctx context.Context, req *kmspb.UpdateCryptoKeyPrimaryVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).UpdateCryptoKeyPrimaryVersion[0:len((*c.CallOptions).UpdateCryptoKeyPrimaryVersion):len((*c.CallOptions).UpdateCryptoKeyPrimaryVersion)], opts...) + var resp *kmspb.CryptoKey + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.UpdateCryptoKeyPrimaryVersion(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) DestroyCryptoKeyVersion(ctx context.Context, req *kmspb.DestroyCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).DestroyCryptoKeyVersion[0:len((*c.CallOptions).DestroyCryptoKeyVersion):len((*c.CallOptions).DestroyCryptoKeyVersion)], opts...) + var resp *kmspb.CryptoKeyVersion + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.DestroyCryptoKeyVersion(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) RestoreCryptoKeyVersion(ctx context.Context, req *kmspb.RestoreCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).RestoreCryptoKeyVersion[0:len((*c.CallOptions).RestoreCryptoKeyVersion):len((*c.CallOptions).RestoreCryptoKeyVersion)], opts...) + var resp *kmspb.CryptoKeyVersion + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.RestoreCryptoKeyVersion(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) Encrypt(ctx context.Context, req *kmspb.EncryptRequest, opts ...gax.CallOption) (*kmspb.EncryptResponse, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).Encrypt[0:len((*c.CallOptions).Encrypt):len((*c.CallOptions).Encrypt)], opts...) + var resp *kmspb.EncryptResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.Encrypt(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) Decrypt(ctx context.Context, req *kmspb.DecryptRequest, opts ...gax.CallOption) (*kmspb.DecryptResponse, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).Decrypt[0:len((*c.CallOptions).Decrypt):len((*c.CallOptions).Decrypt)], opts...) + var resp *kmspb.DecryptResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.Decrypt(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) RawEncrypt(ctx context.Context, req *kmspb.RawEncryptRequest, opts ...gax.CallOption) (*kmspb.RawEncryptResponse, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).RawEncrypt[0:len((*c.CallOptions).RawEncrypt):len((*c.CallOptions).RawEncrypt)], opts...) + var resp *kmspb.RawEncryptResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.RawEncrypt(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) RawDecrypt(ctx context.Context, req *kmspb.RawDecryptRequest, opts ...gax.CallOption) (*kmspb.RawDecryptResponse, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).RawDecrypt[0:len((*c.CallOptions).RawDecrypt):len((*c.CallOptions).RawDecrypt)], opts...) + var resp *kmspb.RawDecryptResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.RawDecrypt(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) AsymmetricSign(ctx context.Context, req *kmspb.AsymmetricSignRequest, opts ...gax.CallOption) (*kmspb.AsymmetricSignResponse, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).AsymmetricSign[0:len((*c.CallOptions).AsymmetricSign):len((*c.CallOptions).AsymmetricSign)], opts...) + var resp *kmspb.AsymmetricSignResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.AsymmetricSign(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) AsymmetricDecrypt(ctx context.Context, req *kmspb.AsymmetricDecryptRequest, opts ...gax.CallOption) (*kmspb.AsymmetricDecryptResponse, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).AsymmetricDecrypt[0:len((*c.CallOptions).AsymmetricDecrypt):len((*c.CallOptions).AsymmetricDecrypt)], opts...) + var resp *kmspb.AsymmetricDecryptResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.AsymmetricDecrypt(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) MacSign(ctx context.Context, req *kmspb.MacSignRequest, opts ...gax.CallOption) (*kmspb.MacSignResponse, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).MacSign[0:len((*c.CallOptions).MacSign):len((*c.CallOptions).MacSign)], opts...) + var resp *kmspb.MacSignResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.MacSign(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) MacVerify(ctx context.Context, req *kmspb.MacVerifyRequest, opts ...gax.CallOption) (*kmspb.MacVerifyResponse, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).MacVerify[0:len((*c.CallOptions).MacVerify):len((*c.CallOptions).MacVerify)], opts...) + var resp *kmspb.MacVerifyResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.MacVerify(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) GenerateRandomBytes(ctx context.Context, req *kmspb.GenerateRandomBytesRequest, opts ...gax.CallOption) (*kmspb.GenerateRandomBytesResponse, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "location", url.QueryEscape(req.GetLocation()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GenerateRandomBytes[0:len((*c.CallOptions).GenerateRandomBytes):len((*c.CallOptions).GenerateRandomBytes)], opts...) + var resp *kmspb.GenerateRandomBytesResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.keyManagementClient.GenerateRandomBytes(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetLocation[0:len((*c.CallOptions).GetLocation):len((*c.CallOptions).GetLocation)], opts...) + var resp *locationpb.Location + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.locationsClient.GetLocation(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListLocations[0:len((*c.CallOptions).ListLocations):len((*c.CallOptions).ListLocations)], opts...) + it := &LocationIterator{} + req = proto.Clone(req).(*locationpb.ListLocationsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*locationpb.Location, string, error) { + resp := &locationpb.ListLocationsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.locationsClient.ListLocations(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetLocations(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *keyManagementGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.GetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.SetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...) + var resp *iampb.TestIamPermissionsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamPolicyClient.TestIamPermissions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *keyManagementGRPCClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.operationsClient.GetOperation(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListKeyRings lists KeyRings. +func (c *keyManagementRESTClient) ListKeyRings(ctx context.Context, req *kmspb.ListKeyRingsRequest, opts ...gax.CallOption) *KeyRingIterator { + it := &KeyRingIterator{} + req = proto.Clone(req).(*kmspb.ListKeyRingsRequest) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + it.InternalFetch = func(pageSize int, pageToken string) ([]*kmspb.KeyRing, string, error) { + resp := &kmspb.ListKeyRingsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, "", err + } + baseUrl.Path += fmt.Sprintf("/v1/%v/keyRings", req.GetParent()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + if req.GetFilter() != "" { + params.Add("filter", fmt.Sprintf("%v", req.GetFilter())) + } + if req.GetOrderBy() != "" { + params.Add("orderBy", fmt.Sprintf("%v", req.GetOrderBy())) + } + if req.GetPageSize() != 0 { + params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize())) + } + if req.GetPageToken() != "" { + params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken())) + } + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := append(c.xGoogHeaders, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, "", e + } + it.Response = resp + return resp.GetKeyRings(), resp.GetNextPageToken(), nil + } + + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +// ListCryptoKeys lists CryptoKeys. +func (c *keyManagementRESTClient) ListCryptoKeys(ctx context.Context, req *kmspb.ListCryptoKeysRequest, opts ...gax.CallOption) *CryptoKeyIterator { + it := &CryptoKeyIterator{} + req = proto.Clone(req).(*kmspb.ListCryptoKeysRequest) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + it.InternalFetch = func(pageSize int, pageToken string) ([]*kmspb.CryptoKey, string, error) { + resp := &kmspb.ListCryptoKeysResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, "", err + } + baseUrl.Path += fmt.Sprintf("/v1/%v/cryptoKeys", req.GetParent()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + if req.GetFilter() != "" { + params.Add("filter", fmt.Sprintf("%v", req.GetFilter())) + } + if req.GetOrderBy() != "" { + params.Add("orderBy", fmt.Sprintf("%v", req.GetOrderBy())) + } + if req.GetPageSize() != 0 { + params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize())) + } + if req.GetPageToken() != "" { + params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken())) + } + if req.GetVersionView() != 0 { + params.Add("versionView", fmt.Sprintf("%v", req.GetVersionView())) + } + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := append(c.xGoogHeaders, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, "", e + } + it.Response = resp + return resp.GetCryptoKeys(), resp.GetNextPageToken(), nil + } + + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +// ListCryptoKeyVersions lists CryptoKeyVersions. +func (c *keyManagementRESTClient) ListCryptoKeyVersions(ctx context.Context, req *kmspb.ListCryptoKeyVersionsRequest, opts ...gax.CallOption) *CryptoKeyVersionIterator { + it := &CryptoKeyVersionIterator{} + req = proto.Clone(req).(*kmspb.ListCryptoKeyVersionsRequest) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + it.InternalFetch = func(pageSize int, pageToken string) ([]*kmspb.CryptoKeyVersion, string, error) { + resp := &kmspb.ListCryptoKeyVersionsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, "", err + } + baseUrl.Path += fmt.Sprintf("/v1/%v/cryptoKeyVersions", req.GetParent()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + if req.GetFilter() != "" { + params.Add("filter", fmt.Sprintf("%v", req.GetFilter())) + } + if req.GetOrderBy() != "" { + params.Add("orderBy", fmt.Sprintf("%v", req.GetOrderBy())) + } + if req.GetPageSize() != 0 { + params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize())) + } + if req.GetPageToken() != "" { + params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken())) + } + if req.GetView() != 0 { + params.Add("view", fmt.Sprintf("%v", req.GetView())) + } + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := append(c.xGoogHeaders, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, "", e + } + it.Response = resp + return resp.GetCryptoKeyVersions(), resp.GetNextPageToken(), nil + } + + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +// ListImportJobs lists ImportJobs. +func (c *keyManagementRESTClient) ListImportJobs(ctx context.Context, req *kmspb.ListImportJobsRequest, opts ...gax.CallOption) *ImportJobIterator { + it := &ImportJobIterator{} + req = proto.Clone(req).(*kmspb.ListImportJobsRequest) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + it.InternalFetch = func(pageSize int, pageToken string) ([]*kmspb.ImportJob, string, error) { + resp := &kmspb.ListImportJobsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, "", err + } + baseUrl.Path += fmt.Sprintf("/v1/%v/importJobs", req.GetParent()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + if req.GetFilter() != "" { + params.Add("filter", fmt.Sprintf("%v", req.GetFilter())) + } + if req.GetOrderBy() != "" { + params.Add("orderBy", fmt.Sprintf("%v", req.GetOrderBy())) + } + if req.GetPageSize() != 0 { + params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize())) + } + if req.GetPageToken() != "" { + params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken())) + } + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := append(c.xGoogHeaders, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, "", e + } + it.Response = resp + return resp.GetImportJobs(), resp.GetNextPageToken(), nil + } + + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +// GetKeyRing returns metadata for a given KeyRing. +func (c *keyManagementRESTClient) GetKeyRing(ctx context.Context, req *kmspb.GetKeyRingRequest, opts ...gax.CallOption) (*kmspb.KeyRing, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).GetKeyRing[0:len((*c.CallOptions).GetKeyRing):len((*c.CallOptions).GetKeyRing)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.KeyRing{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// GetCryptoKey returns metadata for a given CryptoKey, as +// well as its primary +// CryptoKeyVersion. +func (c *keyManagementRESTClient) GetCryptoKey(ctx context.Context, req *kmspb.GetCryptoKeyRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).GetCryptoKey[0:len((*c.CallOptions).GetCryptoKey):len((*c.CallOptions).GetCryptoKey)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.CryptoKey{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// GetCryptoKeyVersion returns metadata for a given +// CryptoKeyVersion. +func (c *keyManagementRESTClient) GetCryptoKeyVersion(ctx context.Context, req *kmspb.GetCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).GetCryptoKeyVersion[0:len((*c.CallOptions).GetCryptoKeyVersion):len((*c.CallOptions).GetCryptoKeyVersion)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.CryptoKeyVersion{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// GetPublicKey returns the public key for the given +// CryptoKeyVersion. The +// CryptoKey.purpose must be +// ASYMMETRIC_SIGN +// or +// ASYMMETRIC_DECRYPT. +func (c *keyManagementRESTClient) GetPublicKey(ctx context.Context, req *kmspb.GetPublicKeyRequest, opts ...gax.CallOption) (*kmspb.PublicKey, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v/publicKey", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).GetPublicKey[0:len((*c.CallOptions).GetPublicKey):len((*c.CallOptions).GetPublicKey)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.PublicKey{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// GetImportJob returns metadata for a given ImportJob. +func (c *keyManagementRESTClient) GetImportJob(ctx context.Context, req *kmspb.GetImportJobRequest, opts ...gax.CallOption) (*kmspb.ImportJob, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).GetImportJob[0:len((*c.CallOptions).GetImportJob):len((*c.CallOptions).GetImportJob)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.ImportJob{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// CreateKeyRing create a new KeyRing in a given Project and +// Location. +func (c *keyManagementRESTClient) CreateKeyRing(ctx context.Context, req *kmspb.CreateKeyRingRequest, opts ...gax.CallOption) (*kmspb.KeyRing, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + body := req.GetKeyRing() + jsonReq, err := m.Marshal(body) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v/keyRings", req.GetParent()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + params.Add("keyRingId", fmt.Sprintf("%v", req.GetKeyRingId())) + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).CreateKeyRing[0:len((*c.CallOptions).CreateKeyRing):len((*c.CallOptions).CreateKeyRing)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.KeyRing{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// CreateCryptoKey create a new CryptoKey within a +// KeyRing. +// +// CryptoKey.purpose and +// CryptoKey.version_template.algorithm +// are required. +func (c *keyManagementRESTClient) CreateCryptoKey(ctx context.Context, req *kmspb.CreateCryptoKeyRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + body := req.GetCryptoKey() + jsonReq, err := m.Marshal(body) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v/cryptoKeys", req.GetParent()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + params.Add("cryptoKeyId", fmt.Sprintf("%v", req.GetCryptoKeyId())) + if req.GetSkipInitialVersionCreation() { + params.Add("skipInitialVersionCreation", fmt.Sprintf("%v", req.GetSkipInitialVersionCreation())) + } + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).CreateCryptoKey[0:len((*c.CallOptions).CreateCryptoKey):len((*c.CallOptions).CreateCryptoKey)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.CryptoKey{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// CreateCryptoKeyVersion create a new CryptoKeyVersion in a +// CryptoKey. +// +// The server will assign the next sequential id. If unset, +// state will be set to +// ENABLED. +func (c *keyManagementRESTClient) CreateCryptoKeyVersion(ctx context.Context, req *kmspb.CreateCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + body := req.GetCryptoKeyVersion() + jsonReq, err := m.Marshal(body) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v/cryptoKeyVersions", req.GetParent()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).CreateCryptoKeyVersion[0:len((*c.CallOptions).CreateCryptoKeyVersion):len((*c.CallOptions).CreateCryptoKeyVersion)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.CryptoKeyVersion{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// ImportCryptoKeyVersion import wrapped key material into a +// CryptoKeyVersion. +// +// All requests must specify a CryptoKey. If +// a CryptoKeyVersion is additionally +// specified in the request, key material will be reimported into that +// version. Otherwise, a new version will be created, and will be assigned the +// next sequential id within the CryptoKey. +func (c *keyManagementRESTClient) ImportCryptoKeyVersion(ctx context.Context, req *kmspb.ImportCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + jsonReq, err := m.Marshal(req) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v/cryptoKeyVersions:import", req.GetParent()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).ImportCryptoKeyVersion[0:len((*c.CallOptions).ImportCryptoKeyVersion):len((*c.CallOptions).ImportCryptoKeyVersion)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.CryptoKeyVersion{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// CreateImportJob create a new ImportJob within a +// KeyRing. +// +// ImportJob.import_method is +// required. +func (c *keyManagementRESTClient) CreateImportJob(ctx context.Context, req *kmspb.CreateImportJobRequest, opts ...gax.CallOption) (*kmspb.ImportJob, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + body := req.GetImportJob() + jsonReq, err := m.Marshal(body) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v/importJobs", req.GetParent()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + params.Add("importJobId", fmt.Sprintf("%v", req.GetImportJobId())) + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).CreateImportJob[0:len((*c.CallOptions).CreateImportJob):len((*c.CallOptions).CreateImportJob)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.ImportJob{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// UpdateCryptoKey update a CryptoKey. +func (c *keyManagementRESTClient) UpdateCryptoKey(ctx context.Context, req *kmspb.UpdateCryptoKeyRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + body := req.GetCryptoKey() + jsonReq, err := m.Marshal(body) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetCryptoKey().GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + if req.GetUpdateMask() != nil { + field, err := protojson.Marshal(req.GetUpdateMask()) + if err != nil { + return nil, err + } + params.Add("updateMask", string(field[1:len(field)-1])) + } + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "crypto_key.name", url.QueryEscape(req.GetCryptoKey().GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).UpdateCryptoKey[0:len((*c.CallOptions).UpdateCryptoKey):len((*c.CallOptions).UpdateCryptoKey)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.CryptoKey{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("PATCH", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// UpdateCryptoKeyVersion update a CryptoKeyVersion's +// metadata. +// +// state may be changed between +// ENABLED +// and +// DISABLED +// using this method. See +// DestroyCryptoKeyVersion +// and +// RestoreCryptoKeyVersion +// to move between other states. +func (c *keyManagementRESTClient) UpdateCryptoKeyVersion(ctx context.Context, req *kmspb.UpdateCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + body := req.GetCryptoKeyVersion() + jsonReq, err := m.Marshal(body) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetCryptoKeyVersion().GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + if req.GetUpdateMask() != nil { + field, err := protojson.Marshal(req.GetUpdateMask()) + if err != nil { + return nil, err + } + params.Add("updateMask", string(field[1:len(field)-1])) + } + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "crypto_key_version.name", url.QueryEscape(req.GetCryptoKeyVersion().GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).UpdateCryptoKeyVersion[0:len((*c.CallOptions).UpdateCryptoKeyVersion):len((*c.CallOptions).UpdateCryptoKeyVersion)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.CryptoKeyVersion{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("PATCH", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// UpdateCryptoKeyPrimaryVersion update the version of a CryptoKey that +// will be used in +// Encrypt. +// +// Returns an error if called on a key whose purpose is not +// ENCRYPT_DECRYPT. +func (c *keyManagementRESTClient) UpdateCryptoKeyPrimaryVersion(ctx context.Context, req *kmspb.UpdateCryptoKeyPrimaryVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKey, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + jsonReq, err := m.Marshal(req) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v:updatePrimaryVersion", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).UpdateCryptoKeyPrimaryVersion[0:len((*c.CallOptions).UpdateCryptoKeyPrimaryVersion):len((*c.CallOptions).UpdateCryptoKeyPrimaryVersion)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.CryptoKey{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// DestroyCryptoKeyVersion schedule a CryptoKeyVersion for +// destruction. +// +// Upon calling this method, +// CryptoKeyVersion.state will +// be set to +// DESTROY_SCHEDULED, +// and destroy_time will +// be set to the time +// destroy_scheduled_duration +// in the future. At that time, the +// state will automatically +// change to +// DESTROYED, +// and the key material will be irrevocably destroyed. +// +// Before the +// destroy_time is +// reached, +// RestoreCryptoKeyVersion +// may be called to reverse the process. +func (c *keyManagementRESTClient) DestroyCryptoKeyVersion(ctx context.Context, req *kmspb.DestroyCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + jsonReq, err := m.Marshal(req) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v:destroy", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).DestroyCryptoKeyVersion[0:len((*c.CallOptions).DestroyCryptoKeyVersion):len((*c.CallOptions).DestroyCryptoKeyVersion)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.CryptoKeyVersion{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// RestoreCryptoKeyVersion restore a CryptoKeyVersion in the +// DESTROY_SCHEDULED +// state. +// +// Upon restoration of the CryptoKeyVersion, +// state will be set to +// DISABLED, +// and destroy_time will +// be cleared. +func (c *keyManagementRESTClient) RestoreCryptoKeyVersion(ctx context.Context, req *kmspb.RestoreCryptoKeyVersionRequest, opts ...gax.CallOption) (*kmspb.CryptoKeyVersion, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + jsonReq, err := m.Marshal(req) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v:restore", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).RestoreCryptoKeyVersion[0:len((*c.CallOptions).RestoreCryptoKeyVersion):len((*c.CallOptions).RestoreCryptoKeyVersion)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.CryptoKeyVersion{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// Encrypt encrypts data, so that it can only be recovered by a call to +// Decrypt. The +// CryptoKey.purpose must be +// ENCRYPT_DECRYPT. +func (c *keyManagementRESTClient) Encrypt(ctx context.Context, req *kmspb.EncryptRequest, opts ...gax.CallOption) (*kmspb.EncryptResponse, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + jsonReq, err := m.Marshal(req) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v:encrypt", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).Encrypt[0:len((*c.CallOptions).Encrypt):len((*c.CallOptions).Encrypt)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.EncryptResponse{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// Decrypt decrypts data that was protected by +// Encrypt. The +// CryptoKey.purpose must be +// ENCRYPT_DECRYPT. +func (c *keyManagementRESTClient) Decrypt(ctx context.Context, req *kmspb.DecryptRequest, opts ...gax.CallOption) (*kmspb.DecryptResponse, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + jsonReq, err := m.Marshal(req) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v:decrypt", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).Decrypt[0:len((*c.CallOptions).Decrypt):len((*c.CallOptions).Decrypt)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.DecryptResponse{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// RawEncrypt encrypts data using portable cryptographic primitives. Most users should +// choose Encrypt and +// Decrypt rather than +// their raw counterparts. The +// CryptoKey.purpose must be +// RAW_ENCRYPT_DECRYPT. +func (c *keyManagementRESTClient) RawEncrypt(ctx context.Context, req *kmspb.RawEncryptRequest, opts ...gax.CallOption) (*kmspb.RawEncryptResponse, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + jsonReq, err := m.Marshal(req) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v:rawEncrypt", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).RawEncrypt[0:len((*c.CallOptions).RawEncrypt):len((*c.CallOptions).RawEncrypt)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.RawEncryptResponse{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// RawDecrypt decrypts data that was originally encrypted using a raw cryptographic +// mechanism. The CryptoKey.purpose +// must be +// RAW_ENCRYPT_DECRYPT. +func (c *keyManagementRESTClient) RawDecrypt(ctx context.Context, req *kmspb.RawDecryptRequest, opts ...gax.CallOption) (*kmspb.RawDecryptResponse, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + jsonReq, err := m.Marshal(req) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v:rawDecrypt", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).RawDecrypt[0:len((*c.CallOptions).RawDecrypt):len((*c.CallOptions).RawDecrypt)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.RawDecryptResponse{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// AsymmetricSign signs data using a CryptoKeyVersion +// with CryptoKey.purpose +// ASYMMETRIC_SIGN, producing a signature that can be verified with the public +// key retrieved from +// GetPublicKey. +func (c *keyManagementRESTClient) AsymmetricSign(ctx context.Context, req *kmspb.AsymmetricSignRequest, opts ...gax.CallOption) (*kmspb.AsymmetricSignResponse, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + jsonReq, err := m.Marshal(req) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v:asymmetricSign", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).AsymmetricSign[0:len((*c.CallOptions).AsymmetricSign):len((*c.CallOptions).AsymmetricSign)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.AsymmetricSignResponse{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// AsymmetricDecrypt decrypts data that was encrypted with a public key retrieved from +// GetPublicKey +// corresponding to a CryptoKeyVersion +// with CryptoKey.purpose +// ASYMMETRIC_DECRYPT. +func (c *keyManagementRESTClient) AsymmetricDecrypt(ctx context.Context, req *kmspb.AsymmetricDecryptRequest, opts ...gax.CallOption) (*kmspb.AsymmetricDecryptResponse, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + jsonReq, err := m.Marshal(req) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v:asymmetricDecrypt", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).AsymmetricDecrypt[0:len((*c.CallOptions).AsymmetricDecrypt):len((*c.CallOptions).AsymmetricDecrypt)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.AsymmetricDecryptResponse{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// MacSign signs data using a CryptoKeyVersion +// with CryptoKey.purpose MAC, +// producing a tag that can be verified by another source with the same key. +func (c *keyManagementRESTClient) MacSign(ctx context.Context, req *kmspb.MacSignRequest, opts ...gax.CallOption) (*kmspb.MacSignResponse, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + jsonReq, err := m.Marshal(req) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v:macSign", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).MacSign[0:len((*c.CallOptions).MacSign):len((*c.CallOptions).MacSign)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.MacSignResponse{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// MacVerify verifies MAC tag using a +// CryptoKeyVersion with +// CryptoKey.purpose MAC, and returns +// a response that indicates whether or not the verification was successful. +func (c *keyManagementRESTClient) MacVerify(ctx context.Context, req *kmspb.MacVerifyRequest, opts ...gax.CallOption) (*kmspb.MacVerifyResponse, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + jsonReq, err := m.Marshal(req) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v:macVerify", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).MacVerify[0:len((*c.CallOptions).MacVerify):len((*c.CallOptions).MacVerify)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.MacVerifyResponse{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// GenerateRandomBytes generate random bytes using the Cloud KMS randomness source in the provided +// location. +func (c *keyManagementRESTClient) GenerateRandomBytes(ctx context.Context, req *kmspb.GenerateRandomBytesRequest, opts ...gax.CallOption) (*kmspb.GenerateRandomBytesResponse, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + jsonReq, err := m.Marshal(req) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v:generateRandomBytes", req.GetLocation()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "location", url.QueryEscape(req.GetLocation()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).GenerateRandomBytes[0:len((*c.CallOptions).GenerateRandomBytes):len((*c.CallOptions).GenerateRandomBytes)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &kmspb.GenerateRandomBytesResponse{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// GetLocation gets information about a location. +func (c *keyManagementRESTClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).GetLocation[0:len((*c.CallOptions).GetLocation):len((*c.CallOptions).GetLocation)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &locationpb.Location{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// ListLocations lists information about the supported locations for this service. +func (c *keyManagementRESTClient) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator { + it := &LocationIterator{} + req = proto.Clone(req).(*locationpb.ListLocationsRequest) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + it.InternalFetch = func(pageSize int, pageToken string) ([]*locationpb.Location, string, error) { + resp := &locationpb.ListLocationsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, "", err + } + baseUrl.Path += fmt.Sprintf("/v1/%v/locations", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + if req.GetFilter() != "" { + params.Add("filter", fmt.Sprintf("%v", req.GetFilter())) + } + if req.GetPageSize() != 0 { + params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize())) + } + if req.GetPageToken() != "" { + params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken())) + } + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := append(c.xGoogHeaders, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, "", e + } + it.Response = resp + return resp.GetLocations(), resp.GetNextPageToken(), nil + } + + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +// GetIamPolicy gets the access control policy for a resource. Returns an empty policy +// if the resource exists and does not have a policy set. +func (c *keyManagementRESTClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v:getIamPolicy", req.GetResource()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + if req.GetOptions().GetRequestedPolicyVersion() != 0 { + params.Add("options.requestedPolicyVersion", fmt.Sprintf("%v", req.GetOptions().GetRequestedPolicyVersion())) + } + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &iampb.Policy{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// SetIamPolicy sets the access control policy on the specified resource. Replaces +// any existing policy. +// +// Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED +// errors. +func (c *keyManagementRESTClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + jsonReq, err := m.Marshal(req) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v:setIamPolicy", req.GetResource()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &iampb.Policy{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// TestIamPermissions returns permissions that a caller has on the specified resource. If the +// resource does not exist, this will return an empty set of +// permissions, not a NOT_FOUND error. +// +// Note: This operation is designed to be used for building +// permission-aware UIs and command-line tools, not for authorization +// checking. This operation may “fail open” without warning. +func (c *keyManagementRESTClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + jsonReq, err := m.Marshal(req) + if err != nil { + return nil, err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v:testIamPermissions", req.GetResource()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &iampb.TestIamPermissionsResponse{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// GetOperation is a utility method from google.longrunning.Operations. +func (c *keyManagementRESTClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) + + params := url.Values{} + params.Add("$alt", "json;enum-encoding=int") + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &longrunningpb.Operation{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey.pb.go new file mode 100644 index 000000000..4b2a60c2c --- /dev/null +++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey.pb.go @@ -0,0 +1,872 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/cloud/kms/v1/autokey.proto + +package kmspb + +import ( + context "context" + reflect "reflect" + sync "sync" + + longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Request message for +// [Autokey.CreateKeyHandle][google.cloud.kms.v1.Autokey.CreateKeyHandle]. +type CreateKeyHandleRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of the resource project and location to create the + // [KeyHandle][google.cloud.kms.v1.KeyHandle] in, e.g. + // `projects/{PROJECT_ID}/locations/{LOCATION}`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Optional. Id of the [KeyHandle][google.cloud.kms.v1.KeyHandle]. Must be + // unique to the resource project and location. If not provided by the caller, + // a new UUID is used. + KeyHandleId string `protobuf:"bytes,2,opt,name=key_handle_id,json=keyHandleId,proto3" json:"key_handle_id,omitempty"` + // Required. [KeyHandle][google.cloud.kms.v1.KeyHandle] to create. + KeyHandle *KeyHandle `protobuf:"bytes,3,opt,name=key_handle,json=keyHandle,proto3" json:"key_handle,omitempty"` +} + +func (x *CreateKeyHandleRequest) Reset() { + *x = CreateKeyHandleRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateKeyHandleRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateKeyHandleRequest) ProtoMessage() {} + +func (x *CreateKeyHandleRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateKeyHandleRequest.ProtoReflect.Descriptor instead. +func (*CreateKeyHandleRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_autokey_proto_rawDescGZIP(), []int{0} +} + +func (x *CreateKeyHandleRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateKeyHandleRequest) GetKeyHandleId() string { + if x != nil { + return x.KeyHandleId + } + return "" +} + +func (x *CreateKeyHandleRequest) GetKeyHandle() *KeyHandle { + if x != nil { + return x.KeyHandle + } + return nil +} + +// Request message for [GetKeyHandle][google.cloud.kms.v1.Autokey.GetKeyHandle]. +type GetKeyHandleRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of the [KeyHandle][google.cloud.kms.v1.KeyHandle] resource, + // e.g. + // `projects/{PROJECT_ID}/locations/{LOCATION}/keyHandles/{KEY_HANDLE_ID}`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetKeyHandleRequest) Reset() { + *x = GetKeyHandleRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetKeyHandleRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetKeyHandleRequest) ProtoMessage() {} + +func (x *GetKeyHandleRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetKeyHandleRequest.ProtoReflect.Descriptor instead. +func (*GetKeyHandleRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_autokey_proto_rawDescGZIP(), []int{1} +} + +func (x *GetKeyHandleRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Resource-oriented representation of a request to Cloud KMS Autokey and the +// resulting provisioning of a [CryptoKey][google.cloud.kms.v1.CryptoKey]. +type KeyHandle struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier. Name of the [KeyHandle][google.cloud.kms.v1.KeyHandle] + // resource, e.g. + // `projects/{PROJECT_ID}/locations/{LOCATION}/keyHandles/{KEY_HANDLE_ID}`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Output only. Name of a [CryptoKey][google.cloud.kms.v1.CryptoKey] that has + // been provisioned for Customer Managed Encryption Key (CMEK) use in the + // [KeyHandle][google.cloud.kms.v1.KeyHandle] project and location for the + // requested resource type. The [CryptoKey][google.cloud.kms.v1.CryptoKey] + // project will reflect the value configured in the + // [AutokeyConfig][google.cloud.kms.v1.AutokeyConfig] on the resource + // project's ancestor folder at the time of the + // [KeyHandle][google.cloud.kms.v1.KeyHandle] creation. If more than one + // ancestor folder has a configured + // [AutokeyConfig][google.cloud.kms.v1.AutokeyConfig], the nearest of these + // configurations is used. + KmsKey string `protobuf:"bytes,3,opt,name=kms_key,json=kmsKey,proto3" json:"kms_key,omitempty"` + // Required. Indicates the resource type that the resulting + // [CryptoKey][google.cloud.kms.v1.CryptoKey] is meant to protect, e.g. + // `{SERVICE}.googleapis.com/{TYPE}`. See documentation for supported resource + // types. + ResourceTypeSelector string `protobuf:"bytes,4,opt,name=resource_type_selector,json=resourceTypeSelector,proto3" json:"resource_type_selector,omitempty"` +} + +func (x *KeyHandle) Reset() { + *x = KeyHandle{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyHandle) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyHandle) ProtoMessage() {} + +func (x *KeyHandle) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyHandle.ProtoReflect.Descriptor instead. +func (*KeyHandle) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_autokey_proto_rawDescGZIP(), []int{2} +} + +func (x *KeyHandle) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *KeyHandle) GetKmsKey() string { + if x != nil { + return x.KmsKey + } + return "" +} + +func (x *KeyHandle) GetResourceTypeSelector() string { + if x != nil { + return x.ResourceTypeSelector + } + return "" +} + +// Metadata message for +// [CreateKeyHandle][google.cloud.kms.v1.Autokey.CreateKeyHandle] long-running +// operation response. +type CreateKeyHandleMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CreateKeyHandleMetadata) Reset() { + *x = CreateKeyHandleMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateKeyHandleMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateKeyHandleMetadata) ProtoMessage() {} + +func (x *CreateKeyHandleMetadata) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateKeyHandleMetadata.ProtoReflect.Descriptor instead. +func (*CreateKeyHandleMetadata) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_autokey_proto_rawDescGZIP(), []int{3} +} + +// Request message for +// [Autokey.ListKeyHandles][google.cloud.kms.v1.Autokey.ListKeyHandles]. +type ListKeyHandlesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of the resource project and location from which to list + // [KeyHandles][google.cloud.kms.v1.KeyHandle], e.g. + // `projects/{PROJECT_ID}/locations/{LOCATION}`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Optional. Optional limit on the number of + // [KeyHandles][google.cloud.kms.v1.KeyHandle] to include in the response. The + // service may return fewer than this value. Further + // [KeyHandles][google.cloud.kms.v1.KeyHandle] can subsequently be obtained by + // including the + // [ListKeyHandlesResponse.next_page_token][google.cloud.kms.v1.ListKeyHandlesResponse.next_page_token] + // in a subsequent request. If unspecified, at most + // 100 [KeyHandles][google.cloud.kms.v1.KeyHandle] will be returned. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Optional. Optional pagination token, returned earlier via + // [ListKeyHandlesResponse.next_page_token][google.cloud.kms.v1.ListKeyHandlesResponse.next_page_token]. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // Optional. Filter to apply when listing + // [KeyHandles][google.cloud.kms.v1.KeyHandle], e.g. + // `resource_type_selector="{SERVICE}.googleapis.com/{TYPE}"`. + Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"` +} + +func (x *ListKeyHandlesRequest) Reset() { + *x = ListKeyHandlesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListKeyHandlesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListKeyHandlesRequest) ProtoMessage() {} + +func (x *ListKeyHandlesRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListKeyHandlesRequest.ProtoReflect.Descriptor instead. +func (*ListKeyHandlesRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_autokey_proto_rawDescGZIP(), []int{4} +} + +func (x *ListKeyHandlesRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListKeyHandlesRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListKeyHandlesRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *ListKeyHandlesRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +// Response message for +// [Autokey.ListKeyHandles][google.cloud.kms.v1.Autokey.ListKeyHandles]. +type ListKeyHandlesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Resulting [KeyHandles][google.cloud.kms.v1.KeyHandle]. + KeyHandles []*KeyHandle `protobuf:"bytes,1,rep,name=key_handles,json=keyHandles,proto3" json:"key_handles,omitempty"` + // A token to retrieve next page of results. Pass this value in + // [ListKeyHandlesRequest.page_token][google.cloud.kms.v1.ListKeyHandlesRequest.page_token] + // to retrieve the next page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListKeyHandlesResponse) Reset() { + *x = ListKeyHandlesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListKeyHandlesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListKeyHandlesResponse) ProtoMessage() {} + +func (x *ListKeyHandlesResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListKeyHandlesResponse.ProtoReflect.Descriptor instead. +func (*ListKeyHandlesResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_autokey_proto_rawDescGZIP(), []int{5} +} + +func (x *ListKeyHandlesResponse) GetKeyHandles() []*KeyHandle { + if x != nil { + return x.KeyHandles + } + return nil +} + +func (x *ListKeyHandlesResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +var File_google_cloud_kms_v1_autokey_proto protoreflect.FileDescriptor + +var file_google_cloud_kms_v1_autokey_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x6b, + 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2f, + 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xc8, 0x01, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x48, 0x61, + 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x27, + 0x0a, 0x0d, 0x6b, 0x65, 0x79, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x6b, 0x65, 0x79, 0x48, + 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x42, 0x0a, 0x0a, 0x6b, 0x65, 0x79, 0x5f, 0x68, + 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x09, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x22, 0x54, 0x0a, 0x13, 0x47, + 0x65, 0x74, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, + 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x22, 0xa3, 0x02, 0x0a, 0x09, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x12, + 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x08, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, + 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x03, 0xfa, 0x41, + 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x16, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x3a, 0x7e, 0xea, 0x41, 0x7b, 0x0a, 0x21, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x12, + 0x3f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, + 0x6c, 0x65, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x7d, + 0x2a, 0x0a, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x32, 0x09, 0x6b, 0x65, + 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x22, 0x19, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x22, 0xbd, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x48, 0x61, + 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, + 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, + 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, + 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x22, 0x81, 0x01, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x48, 0x61, + 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, + 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, + 0x6c, 0x65, 0x52, 0x0a, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x12, 0x26, + 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, + 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x32, 0xb4, 0x05, 0x0a, 0x07, 0x41, 0x75, 0x74, 0x6f, 0x6b, + 0x65, 0x79, 0x12, 0xeb, 0x01, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, + 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, + 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x8b, 0x01, 0xca, 0x41, 0x24, 0x0a, 0x09, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, + 0x64, 0x6c, 0x65, 0x12, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x48, 0x61, + 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda, 0x41, 0x1f, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x6b, 0x65, 0x79, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, + 0x2c, 0x6b, 0x65, 0x79, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x3c, 0x3a, 0x0a, 0x6b, 0x65, 0x79, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, + 0x22, 0x2e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, + 0x12, 0x97, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, + 0x65, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x48, 0x61, + 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x22, 0x3d, 0xda, 0x41, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x30, 0x12, 0x2e, 0x2f, 0x76, 0x31, 0x2f, + 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, + 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xaa, 0x01, 0x0a, 0x0e, 0x4c, + 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x12, 0x2a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, + 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3f, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x30, 0x12, 0x2e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6b, 0x65, 0x79, + 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x1a, 0x74, 0xca, 0x41, 0x17, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x57, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, + 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, + 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x42, 0x54, 0x0a, + 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x0c, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, + 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6b, 0x6d, + 0x73, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0x3b, 0x6b, 0x6d, + 0x73, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_cloud_kms_v1_autokey_proto_rawDescOnce sync.Once + file_google_cloud_kms_v1_autokey_proto_rawDescData = file_google_cloud_kms_v1_autokey_proto_rawDesc +) + +func file_google_cloud_kms_v1_autokey_proto_rawDescGZIP() []byte { + file_google_cloud_kms_v1_autokey_proto_rawDescOnce.Do(func() { + file_google_cloud_kms_v1_autokey_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_cloud_kms_v1_autokey_proto_rawDescData) + }) + return file_google_cloud_kms_v1_autokey_proto_rawDescData +} + +var file_google_cloud_kms_v1_autokey_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_google_cloud_kms_v1_autokey_proto_goTypes = []any{ + (*CreateKeyHandleRequest)(nil), // 0: google.cloud.kms.v1.CreateKeyHandleRequest + (*GetKeyHandleRequest)(nil), // 1: google.cloud.kms.v1.GetKeyHandleRequest + (*KeyHandle)(nil), // 2: google.cloud.kms.v1.KeyHandle + (*CreateKeyHandleMetadata)(nil), // 3: google.cloud.kms.v1.CreateKeyHandleMetadata + (*ListKeyHandlesRequest)(nil), // 4: google.cloud.kms.v1.ListKeyHandlesRequest + (*ListKeyHandlesResponse)(nil), // 5: google.cloud.kms.v1.ListKeyHandlesResponse + (*longrunningpb.Operation)(nil), // 6: google.longrunning.Operation +} +var file_google_cloud_kms_v1_autokey_proto_depIdxs = []int32{ + 2, // 0: google.cloud.kms.v1.CreateKeyHandleRequest.key_handle:type_name -> google.cloud.kms.v1.KeyHandle + 2, // 1: google.cloud.kms.v1.ListKeyHandlesResponse.key_handles:type_name -> google.cloud.kms.v1.KeyHandle + 0, // 2: google.cloud.kms.v1.Autokey.CreateKeyHandle:input_type -> google.cloud.kms.v1.CreateKeyHandleRequest + 1, // 3: google.cloud.kms.v1.Autokey.GetKeyHandle:input_type -> google.cloud.kms.v1.GetKeyHandleRequest + 4, // 4: google.cloud.kms.v1.Autokey.ListKeyHandles:input_type -> google.cloud.kms.v1.ListKeyHandlesRequest + 6, // 5: google.cloud.kms.v1.Autokey.CreateKeyHandle:output_type -> google.longrunning.Operation + 2, // 6: google.cloud.kms.v1.Autokey.GetKeyHandle:output_type -> google.cloud.kms.v1.KeyHandle + 5, // 7: google.cloud.kms.v1.Autokey.ListKeyHandles:output_type -> google.cloud.kms.v1.ListKeyHandlesResponse + 5, // [5:8] is the sub-list for method output_type + 2, // [2:5] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_google_cloud_kms_v1_autokey_proto_init() } +func file_google_cloud_kms_v1_autokey_proto_init() { + if File_google_cloud_kms_v1_autokey_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_cloud_kms_v1_autokey_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*CreateKeyHandleRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_autokey_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*GetKeyHandleRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_autokey_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*KeyHandle); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_autokey_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*CreateKeyHandleMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_autokey_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*ListKeyHandlesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_autokey_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*ListKeyHandlesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_cloud_kms_v1_autokey_proto_rawDesc, + NumEnums: 0, + NumMessages: 6, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_cloud_kms_v1_autokey_proto_goTypes, + DependencyIndexes: file_google_cloud_kms_v1_autokey_proto_depIdxs, + MessageInfos: file_google_cloud_kms_v1_autokey_proto_msgTypes, + }.Build() + File_google_cloud_kms_v1_autokey_proto = out.File + file_google_cloud_kms_v1_autokey_proto_rawDesc = nil + file_google_cloud_kms_v1_autokey_proto_goTypes = nil + file_google_cloud_kms_v1_autokey_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// AutokeyClient is the client API for Autokey service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type AutokeyClient interface { + // Creates a new [KeyHandle][google.cloud.kms.v1.KeyHandle], triggering the + // provisioning of a new [CryptoKey][google.cloud.kms.v1.CryptoKey] for CMEK + // use with the given resource type in the configured key project and the same + // location. [GetOperation][Operations.GetOperation] should be used to resolve + // the resulting long-running operation and get the resulting + // [KeyHandle][google.cloud.kms.v1.KeyHandle] and + // [CryptoKey][google.cloud.kms.v1.CryptoKey]. + CreateKeyHandle(ctx context.Context, in *CreateKeyHandleRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) + // Returns the [KeyHandle][google.cloud.kms.v1.KeyHandle]. + GetKeyHandle(ctx context.Context, in *GetKeyHandleRequest, opts ...grpc.CallOption) (*KeyHandle, error) + // Lists [KeyHandles][google.cloud.kms.v1.KeyHandle]. + ListKeyHandles(ctx context.Context, in *ListKeyHandlesRequest, opts ...grpc.CallOption) (*ListKeyHandlesResponse, error) +} + +type autokeyClient struct { + cc grpc.ClientConnInterface +} + +func NewAutokeyClient(cc grpc.ClientConnInterface) AutokeyClient { + return &autokeyClient{cc} +} + +func (c *autokeyClient) CreateKeyHandle(ctx context.Context, in *CreateKeyHandleRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) { + out := new(longrunningpb.Operation) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.Autokey/CreateKeyHandle", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *autokeyClient) GetKeyHandle(ctx context.Context, in *GetKeyHandleRequest, opts ...grpc.CallOption) (*KeyHandle, error) { + out := new(KeyHandle) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.Autokey/GetKeyHandle", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *autokeyClient) ListKeyHandles(ctx context.Context, in *ListKeyHandlesRequest, opts ...grpc.CallOption) (*ListKeyHandlesResponse, error) { + out := new(ListKeyHandlesResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.Autokey/ListKeyHandles", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// AutokeyServer is the server API for Autokey service. +type AutokeyServer interface { + // Creates a new [KeyHandle][google.cloud.kms.v1.KeyHandle], triggering the + // provisioning of a new [CryptoKey][google.cloud.kms.v1.CryptoKey] for CMEK + // use with the given resource type in the configured key project and the same + // location. [GetOperation][Operations.GetOperation] should be used to resolve + // the resulting long-running operation and get the resulting + // [KeyHandle][google.cloud.kms.v1.KeyHandle] and + // [CryptoKey][google.cloud.kms.v1.CryptoKey]. + CreateKeyHandle(context.Context, *CreateKeyHandleRequest) (*longrunningpb.Operation, error) + // Returns the [KeyHandle][google.cloud.kms.v1.KeyHandle]. + GetKeyHandle(context.Context, *GetKeyHandleRequest) (*KeyHandle, error) + // Lists [KeyHandles][google.cloud.kms.v1.KeyHandle]. + ListKeyHandles(context.Context, *ListKeyHandlesRequest) (*ListKeyHandlesResponse, error) +} + +// UnimplementedAutokeyServer can be embedded to have forward compatible implementations. +type UnimplementedAutokeyServer struct { +} + +func (*UnimplementedAutokeyServer) CreateKeyHandle(context.Context, *CreateKeyHandleRequest) (*longrunningpb.Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateKeyHandle not implemented") +} +func (*UnimplementedAutokeyServer) GetKeyHandle(context.Context, *GetKeyHandleRequest) (*KeyHandle, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetKeyHandle not implemented") +} +func (*UnimplementedAutokeyServer) ListKeyHandles(context.Context, *ListKeyHandlesRequest) (*ListKeyHandlesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListKeyHandles not implemented") +} + +func RegisterAutokeyServer(s *grpc.Server, srv AutokeyServer) { + s.RegisterService(&_Autokey_serviceDesc, srv) +} + +func _Autokey_CreateKeyHandle_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateKeyHandleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AutokeyServer).CreateKeyHandle(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.Autokey/CreateKeyHandle", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AutokeyServer).CreateKeyHandle(ctx, req.(*CreateKeyHandleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Autokey_GetKeyHandle_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetKeyHandleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AutokeyServer).GetKeyHandle(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.Autokey/GetKeyHandle", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AutokeyServer).GetKeyHandle(ctx, req.(*GetKeyHandleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Autokey_ListKeyHandles_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListKeyHandlesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AutokeyServer).ListKeyHandles(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.Autokey/ListKeyHandles", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AutokeyServer).ListKeyHandles(ctx, req.(*ListKeyHandlesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Autokey_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.kms.v1.Autokey", + HandlerType: (*AutokeyServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateKeyHandle", + Handler: _Autokey_CreateKeyHandle_Handler, + }, + { + MethodName: "GetKeyHandle", + Handler: _Autokey_GetKeyHandle_Handler, + }, + { + MethodName: "ListKeyHandles", + Handler: _Autokey_ListKeyHandles_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/kms/v1/autokey.proto", +} diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey_admin.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey_admin.pb.go new file mode 100644 index 000000000..0d612bba5 --- /dev/null +++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey_admin.pb.go @@ -0,0 +1,815 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/cloud/kms/v1/autokey_admin.proto + +package kmspb + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The states AutokeyConfig can be in. +type AutokeyConfig_State int32 + +const ( + // The state of the AutokeyConfig is unspecified. + AutokeyConfig_STATE_UNSPECIFIED AutokeyConfig_State = 0 + // The AutokeyConfig is currently active. + AutokeyConfig_ACTIVE AutokeyConfig_State = 1 + // A previously configured key project has been deleted and the current + // AutokeyConfig is unusable. + AutokeyConfig_KEY_PROJECT_DELETED AutokeyConfig_State = 2 + // The AutokeyConfig is not yet initialized or has been reset to its default + // uninitialized state. + AutokeyConfig_UNINITIALIZED AutokeyConfig_State = 3 +) + +// Enum value maps for AutokeyConfig_State. +var ( + AutokeyConfig_State_name = map[int32]string{ + 0: "STATE_UNSPECIFIED", + 1: "ACTIVE", + 2: "KEY_PROJECT_DELETED", + 3: "UNINITIALIZED", + } + AutokeyConfig_State_value = map[string]int32{ + "STATE_UNSPECIFIED": 0, + "ACTIVE": 1, + "KEY_PROJECT_DELETED": 2, + "UNINITIALIZED": 3, + } +) + +func (x AutokeyConfig_State) Enum() *AutokeyConfig_State { + p := new(AutokeyConfig_State) + *p = x + return p +} + +func (x AutokeyConfig_State) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AutokeyConfig_State) Descriptor() protoreflect.EnumDescriptor { + return file_google_cloud_kms_v1_autokey_admin_proto_enumTypes[0].Descriptor() +} + +func (AutokeyConfig_State) Type() protoreflect.EnumType { + return &file_google_cloud_kms_v1_autokey_admin_proto_enumTypes[0] +} + +func (x AutokeyConfig_State) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AutokeyConfig_State.Descriptor instead. +func (AutokeyConfig_State) EnumDescriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_autokey_admin_proto_rawDescGZIP(), []int{2, 0} +} + +// Request message for +// [UpdateAutokeyConfig][google.cloud.kms.v1.AutokeyAdmin.UpdateAutokeyConfig]. +type UpdateAutokeyConfigRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. [AutokeyConfig][google.cloud.kms.v1.AutokeyConfig] with values to + // update. + AutokeyConfig *AutokeyConfig `protobuf:"bytes,1,opt,name=autokey_config,json=autokeyConfig,proto3" json:"autokey_config,omitempty"` + // Required. Masks which fields of the + // [AutokeyConfig][google.cloud.kms.v1.AutokeyConfig] to update, e.g. + // `keyProject`. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateAutokeyConfigRequest) Reset() { + *x = UpdateAutokeyConfigRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateAutokeyConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateAutokeyConfigRequest) ProtoMessage() {} + +func (x *UpdateAutokeyConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateAutokeyConfigRequest.ProtoReflect.Descriptor instead. +func (*UpdateAutokeyConfigRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_autokey_admin_proto_rawDescGZIP(), []int{0} +} + +func (x *UpdateAutokeyConfigRequest) GetAutokeyConfig() *AutokeyConfig { + if x != nil { + return x.AutokeyConfig + } + return nil +} + +func (x *UpdateAutokeyConfigRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +// Request message for +// [GetAutokeyConfig][google.cloud.kms.v1.AutokeyAdmin.GetAutokeyConfig]. +type GetAutokeyConfigRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of the [AutokeyConfig][google.cloud.kms.v1.AutokeyConfig] + // resource, e.g. `folders/{FOLDER_NUMBER}/autokeyConfig`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetAutokeyConfigRequest) Reset() { + *x = GetAutokeyConfigRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetAutokeyConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetAutokeyConfigRequest) ProtoMessage() {} + +func (x *GetAutokeyConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetAutokeyConfigRequest.ProtoReflect.Descriptor instead. +func (*GetAutokeyConfigRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_autokey_admin_proto_rawDescGZIP(), []int{1} +} + +func (x *GetAutokeyConfigRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Cloud KMS Autokey configuration for a folder. +type AutokeyConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier. Name of the [AutokeyConfig][google.cloud.kms.v1.AutokeyConfig] + // resource, e.g. `folders/{FOLDER_NUMBER}/autokeyConfig`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Optional. Name of the key project, e.g. `projects/{PROJECT_ID}` or + // `projects/{PROJECT_NUMBER}`, where Cloud KMS Autokey will provision a new + // [CryptoKey][google.cloud.kms.v1.CryptoKey] when a + // [KeyHandle][google.cloud.kms.v1.KeyHandle] is created. On + // [UpdateAutokeyConfig][google.cloud.kms.v1.AutokeyAdmin.UpdateAutokeyConfig], + // the caller will require `cloudkms.cryptoKeys.setIamPolicy` permission on + // this key project. Once configured, for Cloud KMS Autokey to function + // properly, this key project must have the Cloud KMS API activated and the + // Cloud KMS Service Agent for this key project must be granted the + // `cloudkms.admin` role (or pertinent permissions). A request with an empty + // key project field will clear the configuration. + KeyProject string `protobuf:"bytes,2,opt,name=key_project,json=keyProject,proto3" json:"key_project,omitempty"` + // Output only. The state for the AutokeyConfig. + State AutokeyConfig_State `protobuf:"varint,4,opt,name=state,proto3,enum=google.cloud.kms.v1.AutokeyConfig_State" json:"state,omitempty"` +} + +func (x *AutokeyConfig) Reset() { + *x = AutokeyConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AutokeyConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AutokeyConfig) ProtoMessage() {} + +func (x *AutokeyConfig) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AutokeyConfig.ProtoReflect.Descriptor instead. +func (*AutokeyConfig) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_autokey_admin_proto_rawDescGZIP(), []int{2} +} + +func (x *AutokeyConfig) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *AutokeyConfig) GetKeyProject() string { + if x != nil { + return x.KeyProject + } + return "" +} + +func (x *AutokeyConfig) GetState() AutokeyConfig_State { + if x != nil { + return x.State + } + return AutokeyConfig_STATE_UNSPECIFIED +} + +// Request message for +// [ShowEffectiveAutokeyConfig][google.cloud.kms.v1.AutokeyAdmin.ShowEffectiveAutokeyConfig]. +type ShowEffectiveAutokeyConfigRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of the resource project to the show effective Cloud KMS + // Autokey configuration for. This may be helpful for interrogating the effect + // of nested folder configurations on a given resource project. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` +} + +func (x *ShowEffectiveAutokeyConfigRequest) Reset() { + *x = ShowEffectiveAutokeyConfigRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ShowEffectiveAutokeyConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ShowEffectiveAutokeyConfigRequest) ProtoMessage() {} + +func (x *ShowEffectiveAutokeyConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ShowEffectiveAutokeyConfigRequest.ProtoReflect.Descriptor instead. +func (*ShowEffectiveAutokeyConfigRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_autokey_admin_proto_rawDescGZIP(), []int{3} +} + +func (x *ShowEffectiveAutokeyConfigRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +// Response message for +// [ShowEffectiveAutokeyConfig][google.cloud.kms.v1.AutokeyAdmin.ShowEffectiveAutokeyConfig]. +type ShowEffectiveAutokeyConfigResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of the key project configured in the resource project's folder + // ancestry. + KeyProject string `protobuf:"bytes,1,opt,name=key_project,json=keyProject,proto3" json:"key_project,omitempty"` +} + +func (x *ShowEffectiveAutokeyConfigResponse) Reset() { + *x = ShowEffectiveAutokeyConfigResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ShowEffectiveAutokeyConfigResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ShowEffectiveAutokeyConfigResponse) ProtoMessage() {} + +func (x *ShowEffectiveAutokeyConfigResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ShowEffectiveAutokeyConfigResponse.ProtoReflect.Descriptor instead. +func (*ShowEffectiveAutokeyConfigResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_autokey_admin_proto_rawDescGZIP(), []int{4} +} + +func (x *ShowEffectiveAutokeyConfigResponse) GetKeyProject() string { + if x != nil { + return x.KeyProject + } + return "" +} + +var File_google_cloud_kms_v1_autokey_admin_proto protoreflect.FileDescriptor + +var file_google_cloud_kms_v1_autokey_admin_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x6b, + 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x5f, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x1c, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xae, 0x01, 0x0a, 0x1a, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x75, + 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0e, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, + 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, + 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x5c, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x6f, 0x6b, + 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, + 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x22, 0xd6, 0x02, 0x0a, 0x0d, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x08, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, + 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x6b, 0x65, 0x79, 0x50, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x12, 0x43, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0x56, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, + 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, + 0x56, 0x45, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x4b, 0x45, 0x59, 0x5f, 0x50, 0x52, 0x4f, 0x4a, + 0x45, 0x43, 0x54, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x11, 0x0a, + 0x0d, 0x55, 0x4e, 0x49, 0x4e, 0x49, 0x54, 0x49, 0x41, 0x4c, 0x49, 0x5a, 0x45, 0x44, 0x10, 0x03, + 0x3a, 0x69, 0xea, 0x41, 0x66, 0x0a, 0x25, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, + 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1e, 0x66, 0x6f, + 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x61, + 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2a, 0x0e, 0x61, 0x75, + 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x32, 0x0d, 0x61, 0x75, + 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x70, 0x0a, 0x21, 0x53, + 0x68, 0x6f, 0x77, 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, 0x75, 0x74, 0x6f, + 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x22, 0x45, 0x0a, + 0x22, 0x53, 0x68, 0x6f, 0x77, 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, 0x75, + 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6b, 0x65, 0x79, 0x50, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x32, 0xc8, 0x05, 0x0a, 0x0c, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, + 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0xd2, 0x01, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2f, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, + 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x22, 0x66, 0xda, 0x41, 0x1a, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, + 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x43, 0x3a, 0x0e, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x32, 0x31, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x61, 0x75, + 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x75, 0x74, 0x6f, + 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x12, 0x97, 0x01, 0x0a, 0x10, 0x47, + 0x65, 0x74, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, + 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x22, 0x31, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x24, + 0x12, 0x22, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x66, 0x6f, 0x6c, 0x64, + 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x7d, 0x12, 0xd2, 0x01, 0x0a, 0x1a, 0x53, 0x68, 0x6f, 0x77, 0x45, 0x66, 0x66, + 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x45, 0x66, + 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, + 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x43, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, + 0x68, 0x6f, 0x77, 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, 0x75, 0x74, 0x6f, + 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x74, 0xca, 0x41, 0x17, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x57, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, + 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, + 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, + 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x42, + 0x59, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x41, 0x75, 0x74, 0x6f, + 0x6b, 0x65, 0x79, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6b, 0x6d, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x6b, + 0x6d, 0x73, 0x70, 0x62, 0x3b, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_google_cloud_kms_v1_autokey_admin_proto_rawDescOnce sync.Once + file_google_cloud_kms_v1_autokey_admin_proto_rawDescData = file_google_cloud_kms_v1_autokey_admin_proto_rawDesc +) + +func file_google_cloud_kms_v1_autokey_admin_proto_rawDescGZIP() []byte { + file_google_cloud_kms_v1_autokey_admin_proto_rawDescOnce.Do(func() { + file_google_cloud_kms_v1_autokey_admin_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_cloud_kms_v1_autokey_admin_proto_rawDescData) + }) + return file_google_cloud_kms_v1_autokey_admin_proto_rawDescData +} + +var file_google_cloud_kms_v1_autokey_admin_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_cloud_kms_v1_autokey_admin_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_google_cloud_kms_v1_autokey_admin_proto_goTypes = []any{ + (AutokeyConfig_State)(0), // 0: google.cloud.kms.v1.AutokeyConfig.State + (*UpdateAutokeyConfigRequest)(nil), // 1: google.cloud.kms.v1.UpdateAutokeyConfigRequest + (*GetAutokeyConfigRequest)(nil), // 2: google.cloud.kms.v1.GetAutokeyConfigRequest + (*AutokeyConfig)(nil), // 3: google.cloud.kms.v1.AutokeyConfig + (*ShowEffectiveAutokeyConfigRequest)(nil), // 4: google.cloud.kms.v1.ShowEffectiveAutokeyConfigRequest + (*ShowEffectiveAutokeyConfigResponse)(nil), // 5: google.cloud.kms.v1.ShowEffectiveAutokeyConfigResponse + (*fieldmaskpb.FieldMask)(nil), // 6: google.protobuf.FieldMask +} +var file_google_cloud_kms_v1_autokey_admin_proto_depIdxs = []int32{ + 3, // 0: google.cloud.kms.v1.UpdateAutokeyConfigRequest.autokey_config:type_name -> google.cloud.kms.v1.AutokeyConfig + 6, // 1: google.cloud.kms.v1.UpdateAutokeyConfigRequest.update_mask:type_name -> google.protobuf.FieldMask + 0, // 2: google.cloud.kms.v1.AutokeyConfig.state:type_name -> google.cloud.kms.v1.AutokeyConfig.State + 1, // 3: google.cloud.kms.v1.AutokeyAdmin.UpdateAutokeyConfig:input_type -> google.cloud.kms.v1.UpdateAutokeyConfigRequest + 2, // 4: google.cloud.kms.v1.AutokeyAdmin.GetAutokeyConfig:input_type -> google.cloud.kms.v1.GetAutokeyConfigRequest + 4, // 5: google.cloud.kms.v1.AutokeyAdmin.ShowEffectiveAutokeyConfig:input_type -> google.cloud.kms.v1.ShowEffectiveAutokeyConfigRequest + 3, // 6: google.cloud.kms.v1.AutokeyAdmin.UpdateAutokeyConfig:output_type -> google.cloud.kms.v1.AutokeyConfig + 3, // 7: google.cloud.kms.v1.AutokeyAdmin.GetAutokeyConfig:output_type -> google.cloud.kms.v1.AutokeyConfig + 5, // 8: google.cloud.kms.v1.AutokeyAdmin.ShowEffectiveAutokeyConfig:output_type -> google.cloud.kms.v1.ShowEffectiveAutokeyConfigResponse + 6, // [6:9] is the sub-list for method output_type + 3, // [3:6] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_google_cloud_kms_v1_autokey_admin_proto_init() } +func file_google_cloud_kms_v1_autokey_admin_proto_init() { + if File_google_cloud_kms_v1_autokey_admin_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*UpdateAutokeyConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*GetAutokeyConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*AutokeyConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*ShowEffectiveAutokeyConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*ShowEffectiveAutokeyConfigResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_cloud_kms_v1_autokey_admin_proto_rawDesc, + NumEnums: 1, + NumMessages: 5, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_cloud_kms_v1_autokey_admin_proto_goTypes, + DependencyIndexes: file_google_cloud_kms_v1_autokey_admin_proto_depIdxs, + EnumInfos: file_google_cloud_kms_v1_autokey_admin_proto_enumTypes, + MessageInfos: file_google_cloud_kms_v1_autokey_admin_proto_msgTypes, + }.Build() + File_google_cloud_kms_v1_autokey_admin_proto = out.File + file_google_cloud_kms_v1_autokey_admin_proto_rawDesc = nil + file_google_cloud_kms_v1_autokey_admin_proto_goTypes = nil + file_google_cloud_kms_v1_autokey_admin_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// AutokeyAdminClient is the client API for AutokeyAdmin service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type AutokeyAdminClient interface { + // Updates the [AutokeyConfig][google.cloud.kms.v1.AutokeyConfig] for a + // folder. The caller must have both `cloudkms.autokeyConfigs.update` + // permission on the parent folder and `cloudkms.cryptoKeys.setIamPolicy` + // permission on the provided key project. A + // [KeyHandle][google.cloud.kms.v1.KeyHandle] creation in the folder's + // descendant projects will use this configuration to determine where to + // create the resulting [CryptoKey][google.cloud.kms.v1.CryptoKey]. + UpdateAutokeyConfig(ctx context.Context, in *UpdateAutokeyConfigRequest, opts ...grpc.CallOption) (*AutokeyConfig, error) + // Returns the [AutokeyConfig][google.cloud.kms.v1.AutokeyConfig] for a + // folder. + GetAutokeyConfig(ctx context.Context, in *GetAutokeyConfigRequest, opts ...grpc.CallOption) (*AutokeyConfig, error) + // Returns the effective Cloud KMS Autokey configuration for a given project. + ShowEffectiveAutokeyConfig(ctx context.Context, in *ShowEffectiveAutokeyConfigRequest, opts ...grpc.CallOption) (*ShowEffectiveAutokeyConfigResponse, error) +} + +type autokeyAdminClient struct { + cc grpc.ClientConnInterface +} + +func NewAutokeyAdminClient(cc grpc.ClientConnInterface) AutokeyAdminClient { + return &autokeyAdminClient{cc} +} + +func (c *autokeyAdminClient) UpdateAutokeyConfig(ctx context.Context, in *UpdateAutokeyConfigRequest, opts ...grpc.CallOption) (*AutokeyConfig, error) { + out := new(AutokeyConfig) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.AutokeyAdmin/UpdateAutokeyConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *autokeyAdminClient) GetAutokeyConfig(ctx context.Context, in *GetAutokeyConfigRequest, opts ...grpc.CallOption) (*AutokeyConfig, error) { + out := new(AutokeyConfig) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.AutokeyAdmin/GetAutokeyConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *autokeyAdminClient) ShowEffectiveAutokeyConfig(ctx context.Context, in *ShowEffectiveAutokeyConfigRequest, opts ...grpc.CallOption) (*ShowEffectiveAutokeyConfigResponse, error) { + out := new(ShowEffectiveAutokeyConfigResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.AutokeyAdmin/ShowEffectiveAutokeyConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// AutokeyAdminServer is the server API for AutokeyAdmin service. +type AutokeyAdminServer interface { + // Updates the [AutokeyConfig][google.cloud.kms.v1.AutokeyConfig] for a + // folder. The caller must have both `cloudkms.autokeyConfigs.update` + // permission on the parent folder and `cloudkms.cryptoKeys.setIamPolicy` + // permission on the provided key project. A + // [KeyHandle][google.cloud.kms.v1.KeyHandle] creation in the folder's + // descendant projects will use this configuration to determine where to + // create the resulting [CryptoKey][google.cloud.kms.v1.CryptoKey]. + UpdateAutokeyConfig(context.Context, *UpdateAutokeyConfigRequest) (*AutokeyConfig, error) + // Returns the [AutokeyConfig][google.cloud.kms.v1.AutokeyConfig] for a + // folder. + GetAutokeyConfig(context.Context, *GetAutokeyConfigRequest) (*AutokeyConfig, error) + // Returns the effective Cloud KMS Autokey configuration for a given project. + ShowEffectiveAutokeyConfig(context.Context, *ShowEffectiveAutokeyConfigRequest) (*ShowEffectiveAutokeyConfigResponse, error) +} + +// UnimplementedAutokeyAdminServer can be embedded to have forward compatible implementations. +type UnimplementedAutokeyAdminServer struct { +} + +func (*UnimplementedAutokeyAdminServer) UpdateAutokeyConfig(context.Context, *UpdateAutokeyConfigRequest) (*AutokeyConfig, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateAutokeyConfig not implemented") +} +func (*UnimplementedAutokeyAdminServer) GetAutokeyConfig(context.Context, *GetAutokeyConfigRequest) (*AutokeyConfig, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetAutokeyConfig not implemented") +} +func (*UnimplementedAutokeyAdminServer) ShowEffectiveAutokeyConfig(context.Context, *ShowEffectiveAutokeyConfigRequest) (*ShowEffectiveAutokeyConfigResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ShowEffectiveAutokeyConfig not implemented") +} + +func RegisterAutokeyAdminServer(s *grpc.Server, srv AutokeyAdminServer) { + s.RegisterService(&_AutokeyAdmin_serviceDesc, srv) +} + +func _AutokeyAdmin_UpdateAutokeyConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateAutokeyConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AutokeyAdminServer).UpdateAutokeyConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.AutokeyAdmin/UpdateAutokeyConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AutokeyAdminServer).UpdateAutokeyConfig(ctx, req.(*UpdateAutokeyConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AutokeyAdmin_GetAutokeyConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetAutokeyConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AutokeyAdminServer).GetAutokeyConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.AutokeyAdmin/GetAutokeyConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AutokeyAdminServer).GetAutokeyConfig(ctx, req.(*GetAutokeyConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AutokeyAdmin_ShowEffectiveAutokeyConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ShowEffectiveAutokeyConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AutokeyAdminServer).ShowEffectiveAutokeyConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.AutokeyAdmin/ShowEffectiveAutokeyConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AutokeyAdminServer).ShowEffectiveAutokeyConfig(ctx, req.(*ShowEffectiveAutokeyConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _AutokeyAdmin_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.kms.v1.AutokeyAdmin", + HandlerType: (*AutokeyAdminServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "UpdateAutokeyConfig", + Handler: _AutokeyAdmin_UpdateAutokeyConfig_Handler, + }, + { + MethodName: "GetAutokeyConfig", + Handler: _AutokeyAdmin_GetAutokeyConfig_Handler, + }, + { + MethodName: "ShowEffectiveAutokeyConfig", + Handler: _AutokeyAdmin_ShowEffectiveAutokeyConfig_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/kms/v1/autokey_admin.proto", +} diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go new file mode 100644 index 000000000..eaa41d032 --- /dev/null +++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go @@ -0,0 +1,1965 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/cloud/kms/v1/ekm_service.proto + +package kmspb + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// [KeyManagementMode][google.cloud.kms.v1.EkmConnection.KeyManagementMode] +// describes who can perform control plane cryptographic operations using this +// [EkmConnection][google.cloud.kms.v1.EkmConnection]. +type EkmConnection_KeyManagementMode int32 + +const ( + // Not specified. + EkmConnection_KEY_MANAGEMENT_MODE_UNSPECIFIED EkmConnection_KeyManagementMode = 0 + // EKM-side key management operations on + // [CryptoKeys][google.cloud.kms.v1.CryptoKey] created with this + // [EkmConnection][google.cloud.kms.v1.EkmConnection] must be initiated from + // the EKM directly and cannot be performed from Cloud KMS. This means that: + // * When creating a + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] associated with + // this + // + // [EkmConnection][google.cloud.kms.v1.EkmConnection], the caller must + // supply the key path of pre-existing external key material that will be + // linked to the [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. + // - Destruction of external key material cannot be requested via the + // Cloud KMS API and must be performed directly in the EKM. + // - Automatic rotation of key material is not supported. + EkmConnection_MANUAL EkmConnection_KeyManagementMode = 1 + // All [CryptoKeys][google.cloud.kms.v1.CryptoKey] created with this + // [EkmConnection][google.cloud.kms.v1.EkmConnection] use EKM-side key + // management operations initiated from Cloud KMS. This means that: + // * When a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] + // associated with this [EkmConnection][google.cloud.kms.v1.EkmConnection] + // is + // + // created, the EKM automatically generates new key material and a new + // key path. The caller cannot supply the key path of pre-existing + // external key material. + // - Destruction of external key material associated with this + // [EkmConnection][google.cloud.kms.v1.EkmConnection] can be requested by + // calling [DestroyCryptoKeyVersion][EkmService.DestroyCryptoKeyVersion]. + // - Automatic rotation of key material is supported. + EkmConnection_CLOUD_KMS EkmConnection_KeyManagementMode = 2 +) + +// Enum value maps for EkmConnection_KeyManagementMode. +var ( + EkmConnection_KeyManagementMode_name = map[int32]string{ + 0: "KEY_MANAGEMENT_MODE_UNSPECIFIED", + 1: "MANUAL", + 2: "CLOUD_KMS", + } + EkmConnection_KeyManagementMode_value = map[string]int32{ + "KEY_MANAGEMENT_MODE_UNSPECIFIED": 0, + "MANUAL": 1, + "CLOUD_KMS": 2, + } +) + +func (x EkmConnection_KeyManagementMode) Enum() *EkmConnection_KeyManagementMode { + p := new(EkmConnection_KeyManagementMode) + *p = x + return p +} + +func (x EkmConnection_KeyManagementMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (EkmConnection_KeyManagementMode) Descriptor() protoreflect.EnumDescriptor { + return file_google_cloud_kms_v1_ekm_service_proto_enumTypes[0].Descriptor() +} + +func (EkmConnection_KeyManagementMode) Type() protoreflect.EnumType { + return &file_google_cloud_kms_v1_ekm_service_proto_enumTypes[0] +} + +func (x EkmConnection_KeyManagementMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use EkmConnection_KeyManagementMode.Descriptor instead. +func (EkmConnection_KeyManagementMode) EnumDescriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{8, 0} +} + +// Request message for +// [EkmService.ListEkmConnections][google.cloud.kms.v1.EkmService.ListEkmConnections]. +type ListEkmConnectionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the location associated with the + // [EkmConnections][google.cloud.kms.v1.EkmConnection] to list, in the format + // `projects/*/locations/*`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Optional. Optional limit on the number of + // [EkmConnections][google.cloud.kms.v1.EkmConnection] to include in the + // response. Further [EkmConnections][google.cloud.kms.v1.EkmConnection] can + // subsequently be obtained by including the + // [ListEkmConnectionsResponse.next_page_token][google.cloud.kms.v1.ListEkmConnectionsResponse.next_page_token] + // in a subsequent request. If unspecified, the server will pick an + // appropriate default. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Optional. Optional pagination token, returned earlier via + // [ListEkmConnectionsResponse.next_page_token][google.cloud.kms.v1.ListEkmConnectionsResponse.next_page_token]. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // Optional. Only include resources that match the filter in the response. For + // more information, see + // [Sorting and filtering list + // results](https://cloud.google.com/kms/docs/sorting-and-filtering). + Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"` + // Optional. Specify how the results should be sorted. If not specified, the + // results will be sorted in the default order. For more information, see + // [Sorting and filtering list + // results](https://cloud.google.com/kms/docs/sorting-and-filtering). + OrderBy string `protobuf:"bytes,5,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` +} + +func (x *ListEkmConnectionsRequest) Reset() { + *x = ListEkmConnectionsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListEkmConnectionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListEkmConnectionsRequest) ProtoMessage() {} + +func (x *ListEkmConnectionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListEkmConnectionsRequest.ProtoReflect.Descriptor instead. +func (*ListEkmConnectionsRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{0} +} + +func (x *ListEkmConnectionsRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListEkmConnectionsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListEkmConnectionsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *ListEkmConnectionsRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListEkmConnectionsRequest) GetOrderBy() string { + if x != nil { + return x.OrderBy + } + return "" +} + +// Response message for +// [EkmService.ListEkmConnections][google.cloud.kms.v1.EkmService.ListEkmConnections]. +type ListEkmConnectionsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of [EkmConnections][google.cloud.kms.v1.EkmConnection]. + EkmConnections []*EkmConnection `protobuf:"bytes,1,rep,name=ekm_connections,json=ekmConnections,proto3" json:"ekm_connections,omitempty"` + // A token to retrieve next page of results. Pass this value in + // [ListEkmConnectionsRequest.page_token][google.cloud.kms.v1.ListEkmConnectionsRequest.page_token] + // to retrieve the next page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // The total number of [EkmConnections][google.cloud.kms.v1.EkmConnection] + // that matched the query. + TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` +} + +func (x *ListEkmConnectionsResponse) Reset() { + *x = ListEkmConnectionsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListEkmConnectionsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListEkmConnectionsResponse) ProtoMessage() {} + +func (x *ListEkmConnectionsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListEkmConnectionsResponse.ProtoReflect.Descriptor instead. +func (*ListEkmConnectionsResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ListEkmConnectionsResponse) GetEkmConnections() []*EkmConnection { + if x != nil { + return x.EkmConnections + } + return nil +} + +func (x *ListEkmConnectionsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +func (x *ListEkmConnectionsResponse) GetTotalSize() int32 { + if x != nil { + return x.TotalSize + } + return 0 +} + +// Request message for +// [EkmService.GetEkmConnection][google.cloud.kms.v1.EkmService.GetEkmConnection]. +type GetEkmConnectionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The [name][google.cloud.kms.v1.EkmConnection.name] of the + // [EkmConnection][google.cloud.kms.v1.EkmConnection] to get. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetEkmConnectionRequest) Reset() { + *x = GetEkmConnectionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetEkmConnectionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetEkmConnectionRequest) ProtoMessage() {} + +func (x *GetEkmConnectionRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetEkmConnectionRequest.ProtoReflect.Descriptor instead. +func (*GetEkmConnectionRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{2} +} + +func (x *GetEkmConnectionRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Request message for +// [EkmService.CreateEkmConnection][google.cloud.kms.v1.EkmService.CreateEkmConnection]. +type CreateEkmConnectionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the location associated with the + // [EkmConnection][google.cloud.kms.v1.EkmConnection], in the format + // `projects/*/locations/*`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Required. It must be unique within a location and match the regular + // expression `[a-zA-Z0-9_-]{1,63}`. + EkmConnectionId string `protobuf:"bytes,2,opt,name=ekm_connection_id,json=ekmConnectionId,proto3" json:"ekm_connection_id,omitempty"` + // Required. An [EkmConnection][google.cloud.kms.v1.EkmConnection] with + // initial field values. + EkmConnection *EkmConnection `protobuf:"bytes,3,opt,name=ekm_connection,json=ekmConnection,proto3" json:"ekm_connection,omitempty"` +} + +func (x *CreateEkmConnectionRequest) Reset() { + *x = CreateEkmConnectionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateEkmConnectionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateEkmConnectionRequest) ProtoMessage() {} + +func (x *CreateEkmConnectionRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateEkmConnectionRequest.ProtoReflect.Descriptor instead. +func (*CreateEkmConnectionRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{3} +} + +func (x *CreateEkmConnectionRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateEkmConnectionRequest) GetEkmConnectionId() string { + if x != nil { + return x.EkmConnectionId + } + return "" +} + +func (x *CreateEkmConnectionRequest) GetEkmConnection() *EkmConnection { + if x != nil { + return x.EkmConnection + } + return nil +} + +// Request message for +// [EkmService.UpdateEkmConnection][google.cloud.kms.v1.EkmService.UpdateEkmConnection]. +type UpdateEkmConnectionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. [EkmConnection][google.cloud.kms.v1.EkmConnection] with updated + // values. + EkmConnection *EkmConnection `protobuf:"bytes,1,opt,name=ekm_connection,json=ekmConnection,proto3" json:"ekm_connection,omitempty"` + // Required. List of fields to be updated in this request. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateEkmConnectionRequest) Reset() { + *x = UpdateEkmConnectionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateEkmConnectionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateEkmConnectionRequest) ProtoMessage() {} + +func (x *UpdateEkmConnectionRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateEkmConnectionRequest.ProtoReflect.Descriptor instead. +func (*UpdateEkmConnectionRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{4} +} + +func (x *UpdateEkmConnectionRequest) GetEkmConnection() *EkmConnection { + if x != nil { + return x.EkmConnection + } + return nil +} + +func (x *UpdateEkmConnectionRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +// Request message for +// [EkmService.GetEkmConfig][google.cloud.kms.v1.EkmService.GetEkmConfig]. +type GetEkmConfigRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The [name][google.cloud.kms.v1.EkmConfig.name] of the + // [EkmConfig][google.cloud.kms.v1.EkmConfig] to get. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetEkmConfigRequest) Reset() { + *x = GetEkmConfigRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetEkmConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetEkmConfigRequest) ProtoMessage() {} + +func (x *GetEkmConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetEkmConfigRequest.ProtoReflect.Descriptor instead. +func (*GetEkmConfigRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{5} +} + +func (x *GetEkmConfigRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Request message for +// [EkmService.UpdateEkmConfig][google.cloud.kms.v1.EkmService.UpdateEkmConfig]. +type UpdateEkmConfigRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. [EkmConfig][google.cloud.kms.v1.EkmConfig] with updated values. + EkmConfig *EkmConfig `protobuf:"bytes,1,opt,name=ekm_config,json=ekmConfig,proto3" json:"ekm_config,omitempty"` + // Required. List of fields to be updated in this request. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateEkmConfigRequest) Reset() { + *x = UpdateEkmConfigRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateEkmConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateEkmConfigRequest) ProtoMessage() {} + +func (x *UpdateEkmConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateEkmConfigRequest.ProtoReflect.Descriptor instead. +func (*UpdateEkmConfigRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{6} +} + +func (x *UpdateEkmConfigRequest) GetEkmConfig() *EkmConfig { + if x != nil { + return x.EkmConfig + } + return nil +} + +func (x *UpdateEkmConfigRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +// A [Certificate][google.cloud.kms.v1.Certificate] represents an X.509 +// certificate used to authenticate HTTPS connections to EKM replicas. +type Certificate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The raw certificate bytes in DER format. + RawDer []byte `protobuf:"bytes,1,opt,name=raw_der,json=rawDer,proto3" json:"raw_der,omitempty"` + // Output only. True if the certificate was parsed successfully. + Parsed bool `protobuf:"varint,2,opt,name=parsed,proto3" json:"parsed,omitempty"` + // Output only. The issuer distinguished name in RFC 2253 format. Only present + // if [parsed][google.cloud.kms.v1.Certificate.parsed] is true. + Issuer string `protobuf:"bytes,3,opt,name=issuer,proto3" json:"issuer,omitempty"` + // Output only. The subject distinguished name in RFC 2253 format. Only + // present if [parsed][google.cloud.kms.v1.Certificate.parsed] is true. + Subject string `protobuf:"bytes,4,opt,name=subject,proto3" json:"subject,omitempty"` + // Output only. The subject Alternative DNS names. Only present if + // [parsed][google.cloud.kms.v1.Certificate.parsed] is true. + SubjectAlternativeDnsNames []string `protobuf:"bytes,5,rep,name=subject_alternative_dns_names,json=subjectAlternativeDnsNames,proto3" json:"subject_alternative_dns_names,omitempty"` + // Output only. The certificate is not valid before this time. Only present if + // [parsed][google.cloud.kms.v1.Certificate.parsed] is true. + NotBeforeTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=not_before_time,json=notBeforeTime,proto3" json:"not_before_time,omitempty"` + // Output only. The certificate is not valid after this time. Only present if + // [parsed][google.cloud.kms.v1.Certificate.parsed] is true. + NotAfterTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=not_after_time,json=notAfterTime,proto3" json:"not_after_time,omitempty"` + // Output only. The certificate serial number as a hex string. Only present if + // [parsed][google.cloud.kms.v1.Certificate.parsed] is true. + SerialNumber string `protobuf:"bytes,8,opt,name=serial_number,json=serialNumber,proto3" json:"serial_number,omitempty"` + // Output only. The SHA-256 certificate fingerprint as a hex string. Only + // present if [parsed][google.cloud.kms.v1.Certificate.parsed] is true. + Sha256Fingerprint string `protobuf:"bytes,9,opt,name=sha256_fingerprint,json=sha256Fingerprint,proto3" json:"sha256_fingerprint,omitempty"` +} + +func (x *Certificate) Reset() { + *x = Certificate{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Certificate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Certificate) ProtoMessage() {} + +func (x *Certificate) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Certificate.ProtoReflect.Descriptor instead. +func (*Certificate) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{7} +} + +func (x *Certificate) GetRawDer() []byte { + if x != nil { + return x.RawDer + } + return nil +} + +func (x *Certificate) GetParsed() bool { + if x != nil { + return x.Parsed + } + return false +} + +func (x *Certificate) GetIssuer() string { + if x != nil { + return x.Issuer + } + return "" +} + +func (x *Certificate) GetSubject() string { + if x != nil { + return x.Subject + } + return "" +} + +func (x *Certificate) GetSubjectAlternativeDnsNames() []string { + if x != nil { + return x.SubjectAlternativeDnsNames + } + return nil +} + +func (x *Certificate) GetNotBeforeTime() *timestamppb.Timestamp { + if x != nil { + return x.NotBeforeTime + } + return nil +} + +func (x *Certificate) GetNotAfterTime() *timestamppb.Timestamp { + if x != nil { + return x.NotAfterTime + } + return nil +} + +func (x *Certificate) GetSerialNumber() string { + if x != nil { + return x.SerialNumber + } + return "" +} + +func (x *Certificate) GetSha256Fingerprint() string { + if x != nil { + return x.Sha256Fingerprint + } + return "" +} + +// An [EkmConnection][google.cloud.kms.v1.EkmConnection] represents an +// individual EKM connection. It can be used for creating +// [CryptoKeys][google.cloud.kms.v1.CryptoKey] and +// [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] with a +// [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of +// [EXTERNAL_VPC][CryptoKeyVersion.ProtectionLevel.EXTERNAL_VPC], as well as +// performing cryptographic operations using keys created within the +// [EkmConnection][google.cloud.kms.v1.EkmConnection]. +type EkmConnection struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The resource name for the + // [EkmConnection][google.cloud.kms.v1.EkmConnection] in the format + // `projects/*/locations/*/ekmConnections/*`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Output only. The time at which the + // [EkmConnection][google.cloud.kms.v1.EkmConnection] was created. + CreateTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + // Optional. A list of + // [ServiceResolvers][google.cloud.kms.v1.EkmConnection.ServiceResolver] where + // the EKM can be reached. There should be one ServiceResolver per EKM + // replica. Currently, only a single + // [ServiceResolver][google.cloud.kms.v1.EkmConnection.ServiceResolver] is + // supported. + ServiceResolvers []*EkmConnection_ServiceResolver `protobuf:"bytes,3,rep,name=service_resolvers,json=serviceResolvers,proto3" json:"service_resolvers,omitempty"` + // Optional. Etag of the currently stored + // [EkmConnection][google.cloud.kms.v1.EkmConnection]. + Etag string `protobuf:"bytes,5,opt,name=etag,proto3" json:"etag,omitempty"` + // Optional. Describes who can perform control plane operations on the EKM. If + // unset, this defaults to + // [MANUAL][google.cloud.kms.v1.EkmConnection.KeyManagementMode.MANUAL]. + KeyManagementMode EkmConnection_KeyManagementMode `protobuf:"varint,6,opt,name=key_management_mode,json=keyManagementMode,proto3,enum=google.cloud.kms.v1.EkmConnection_KeyManagementMode" json:"key_management_mode,omitempty"` + // Optional. Identifies the EKM Crypto Space that this + // [EkmConnection][google.cloud.kms.v1.EkmConnection] maps to. Note: This + // field is required if + // [KeyManagementMode][google.cloud.kms.v1.EkmConnection.KeyManagementMode] is + // [CLOUD_KMS][google.cloud.kms.v1.EkmConnection.KeyManagementMode.CLOUD_KMS]. + CryptoSpacePath string `protobuf:"bytes,7,opt,name=crypto_space_path,json=cryptoSpacePath,proto3" json:"crypto_space_path,omitempty"` +} + +func (x *EkmConnection) Reset() { + *x = EkmConnection{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EkmConnection) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EkmConnection) ProtoMessage() {} + +func (x *EkmConnection) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EkmConnection.ProtoReflect.Descriptor instead. +func (*EkmConnection) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{8} +} + +func (x *EkmConnection) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *EkmConnection) GetCreateTime() *timestamppb.Timestamp { + if x != nil { + return x.CreateTime + } + return nil +} + +func (x *EkmConnection) GetServiceResolvers() []*EkmConnection_ServiceResolver { + if x != nil { + return x.ServiceResolvers + } + return nil +} + +func (x *EkmConnection) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +func (x *EkmConnection) GetKeyManagementMode() EkmConnection_KeyManagementMode { + if x != nil { + return x.KeyManagementMode + } + return EkmConnection_KEY_MANAGEMENT_MODE_UNSPECIFIED +} + +func (x *EkmConnection) GetCryptoSpacePath() string { + if x != nil { + return x.CryptoSpacePath + } + return "" +} + +// An [EkmConfig][google.cloud.kms.v1.EkmConfig] is a singleton resource that +// represents configuration parameters that apply to all +// [CryptoKeys][google.cloud.kms.v1.CryptoKey] and +// [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] with a +// [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of +// [EXTERNAL_VPC][CryptoKeyVersion.ProtectionLevel.EXTERNAL_VPC] in a given +// project and location. +type EkmConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The resource name for the + // [EkmConfig][google.cloud.kms.v1.EkmConfig] in the format + // `projects/*/locations/*/ekmConfig`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Optional. Resource name of the default + // [EkmConnection][google.cloud.kms.v1.EkmConnection]. Setting this field to + // the empty string removes the default. + DefaultEkmConnection string `protobuf:"bytes,2,opt,name=default_ekm_connection,json=defaultEkmConnection,proto3" json:"default_ekm_connection,omitempty"` +} + +func (x *EkmConfig) Reset() { + *x = EkmConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EkmConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EkmConfig) ProtoMessage() {} + +func (x *EkmConfig) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EkmConfig.ProtoReflect.Descriptor instead. +func (*EkmConfig) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{9} +} + +func (x *EkmConfig) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *EkmConfig) GetDefaultEkmConnection() string { + if x != nil { + return x.DefaultEkmConnection + } + return "" +} + +// Request message for +// [EkmService.VerifyConnectivity][google.cloud.kms.v1.EkmService.VerifyConnectivity]. +type VerifyConnectivityRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The [name][google.cloud.kms.v1.EkmConnection.name] of the + // [EkmConnection][google.cloud.kms.v1.EkmConnection] to verify. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *VerifyConnectivityRequest) Reset() { + *x = VerifyConnectivityRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VerifyConnectivityRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VerifyConnectivityRequest) ProtoMessage() {} + +func (x *VerifyConnectivityRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VerifyConnectivityRequest.ProtoReflect.Descriptor instead. +func (*VerifyConnectivityRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{10} +} + +func (x *VerifyConnectivityRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Response message for +// [EkmService.VerifyConnectivity][google.cloud.kms.v1.EkmService.VerifyConnectivity]. +type VerifyConnectivityResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VerifyConnectivityResponse) Reset() { + *x = VerifyConnectivityResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VerifyConnectivityResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VerifyConnectivityResponse) ProtoMessage() {} + +func (x *VerifyConnectivityResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VerifyConnectivityResponse.ProtoReflect.Descriptor instead. +func (*VerifyConnectivityResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{11} +} + +// A [ServiceResolver][google.cloud.kms.v1.EkmConnection.ServiceResolver] +// represents an EKM replica that can be reached within an +// [EkmConnection][google.cloud.kms.v1.EkmConnection]. +type EkmConnection_ServiceResolver struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the Service Directory service pointing to + // an EKM replica, in the format + // `projects/*/locations/*/namespaces/*/services/*`. + ServiceDirectoryService string `protobuf:"bytes,1,opt,name=service_directory_service,json=serviceDirectoryService,proto3" json:"service_directory_service,omitempty"` + // Optional. The filter applied to the endpoints of the resolved service. If + // no filter is specified, all endpoints will be considered. An endpoint + // will be chosen arbitrarily from the filtered list for each request. + // + // For endpoint filter syntax and examples, see + // https://cloud.google.com/service-directory/docs/reference/rpc/google.cloud.servicedirectory.v1#resolveservicerequest. + EndpointFilter string `protobuf:"bytes,2,opt,name=endpoint_filter,json=endpointFilter,proto3" json:"endpoint_filter,omitempty"` + // Required. The hostname of the EKM replica used at TLS and HTTP layers. + Hostname string `protobuf:"bytes,3,opt,name=hostname,proto3" json:"hostname,omitempty"` + // Required. A list of leaf server certificates used to authenticate HTTPS + // connections to the EKM replica. Currently, a maximum of 10 + // [Certificate][google.cloud.kms.v1.Certificate] is supported. + ServerCertificates []*Certificate `protobuf:"bytes,4,rep,name=server_certificates,json=serverCertificates,proto3" json:"server_certificates,omitempty"` +} + +func (x *EkmConnection_ServiceResolver) Reset() { + *x = EkmConnection_ServiceResolver{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EkmConnection_ServiceResolver) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EkmConnection_ServiceResolver) ProtoMessage() {} + +func (x *EkmConnection_ServiceResolver) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EkmConnection_ServiceResolver.ProtoReflect.Descriptor instead. +func (*EkmConnection_ServiceResolver) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP(), []int{8, 0} +} + +func (x *EkmConnection_ServiceResolver) GetServiceDirectoryService() string { + if x != nil { + return x.ServiceDirectoryService + } + return "" +} + +func (x *EkmConnection_ServiceResolver) GetEndpointFilter() string { + if x != nil { + return x.EndpointFilter + } + return "" +} + +func (x *EkmConnection_ServiceResolver) GetHostname() string { + if x != nil { + return x.Hostname + } + return "" +} + +func (x *EkmConnection_ServiceResolver) GetServerCertificates() []*Certificate { + if x != nil { + return x.ServerCertificates + } + return nil +} + +var File_google_cloud_kms_v1_ekm_service_proto protoreflect.FileDescriptor + +var file_google_cloud_kms_v1_ekm_service_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x6b, + 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x65, 0x6b, 0x6d, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0xe1, 0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x41, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, + 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, + 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, + 0x62, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x6f, + 0x72, 0x64, 0x65, 0x72, 0x42, 0x79, 0x22, 0xb0, 0x01, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x45, + 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0f, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, + 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x5c, 0x0a, 0x17, 0x47, 0x65, 0x74, + 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xe0, 0x01, 0x0a, 0x1a, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x2f, 0x0a, 0x11, 0x65, 0x6b, 0x6d, + 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x4e, 0x0a, 0x0e, 0x65, 0x6b, + 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x65, 0x6b, 0x6d, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xae, 0x01, 0x0a, 0x1a, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0e, 0x65, 0x6b, 0x6d, + 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x65, 0x6b, 0x6d, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x54, 0x0a, 0x13, 0x47, + 0x65, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, + 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x22, 0x9e, 0x01, 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x0a, + 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, + 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, + 0x73, 0x6b, 0x22, 0xba, 0x03, 0x0a, 0x0b, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x12, 0x1c, 0x0a, 0x07, 0x72, 0x61, 0x77, 0x5f, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x72, 0x61, 0x77, 0x44, 0x65, 0x72, + 0x12, 0x1b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x06, 0x70, 0x61, 0x72, 0x73, 0x65, 0x64, 0x12, 0x1b, 0x0a, + 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x07, 0x73, 0x75, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, + 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x46, 0x0a, 0x1d, 0x73, 0x75, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, + 0x5f, 0x64, 0x6e, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x1a, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x6e, 0x73, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x12, 0x47, 0x0a, 0x0f, 0x6e, 0x6f, 0x74, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0d, 0x6e, 0x6f, 0x74, + 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x45, 0x0a, 0x0e, 0x6e, 0x6f, + 0x74, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x54, 0x69, 0x6d, + 0x65, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x5f, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x73, + 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x32, 0x0a, 0x12, 0x73, + 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, + 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x11, 0x73, 0x68, + 0x61, 0x32, 0x35, 0x36, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x22, + 0xf7, 0x06, 0x0a, 0x0d, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, + 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x64, 0x0a, 0x11, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, + 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, + 0x72, 0x73, 0x12, 0x17, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x69, 0x0a, 0x13, 0x6b, + 0x65, 0x79, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, + 0x64, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, + 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4b, 0x65, 0x79, + 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x52, 0x11, 0x6b, 0x65, 0x79, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x2f, 0x0a, 0x11, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x5f, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x53, 0x70, + 0x61, 0x63, 0x65, 0x50, 0x61, 0x74, 0x68, 0x1a, 0xa5, 0x02, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x12, 0x6b, 0x0a, 0x19, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2f, + 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x29, 0x0a, 0x27, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, + 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2c, 0x0a, 0x0f, 0x65, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1f, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x68, + 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x56, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x12, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x22, + 0x53, 0x0a, 0x11, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x1f, 0x4b, 0x45, 0x59, 0x5f, 0x4d, 0x41, 0x4e, 0x41, + 0x47, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x41, 0x4e, + 0x55, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x5f, 0x4b, + 0x4d, 0x53, 0x10, 0x02, 0x3a, 0x73, 0xea, 0x41, 0x70, 0x0a, 0x25, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x47, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x22, 0xe4, 0x01, 0x0a, 0x09, 0x45, 0x6b, + 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x63, 0x0a, 0x16, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x6b, 0x6d, 0x5f, + 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x2d, 0xe0, 0x41, 0x01, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, + 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x14, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x59, 0xea, 0x41, 0x56, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x31, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x22, 0x5e, 0x0a, 0x19, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x45, 0x6b, 0x6d, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x22, 0x1c, 0x0a, 0x1a, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xdc, + 0x0b, 0x0a, 0x0a, 0x45, 0x6b, 0x6d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xba, 0x01, + 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, + 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, + 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x43, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x65, 0x6b, 0x6d, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xa7, 0x01, 0x0a, 0x10, 0x47, + 0x65, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, + 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x22, 0x41, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, + 0x12, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x2a, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xe0, 0x01, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, + 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, + 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x22, 0x74, 0xda, 0x41, 0x27, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x65, 0x6b, 0x6d, + 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x2c, 0x65, + 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x44, 0x3a, 0x0e, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x22, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xe2, 0x01, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, + 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x76, 0xda, 0x41, 0x1a, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, + 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x53, 0x3a, 0x0e, 0x65, 0x6b, 0x6d, 0x5f, 0x63, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x41, 0x2f, 0x76, 0x31, 0x2f, 0x7b, + 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x6e, + 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x94, 0x01, 0x0a, + 0x0c, 0x47, 0x65, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x28, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, + 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x3a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2d, 0x12, 0x2b, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x7d, 0x12, 0xc3, 0x01, 0x0a, 0x0f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6b, + 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x22, 0x63, 0xda, 0x41, 0x16, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x44, 0x3a, 0x0a, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x32, 0x36, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x65, + 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x12, 0xcb, 0x01, 0x0a, 0x12, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, + 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x54, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x47, + 0x12, 0x45, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x2a, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x1a, 0x74, 0xca, 0x41, 0x17, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x57, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, + 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, + 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x42, 0x85, 0x02, + 0xea, 0x41, 0x7c, 0x0a, 0x27, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x64, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x51, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, + 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, + 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x0a, + 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x0f, 0x45, 0x6b, 0x6d, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, + 0x2f, 0x6b, 0x6d, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x6b, 0x6d, 0x73, 0x70, 0x62, + 0x3b, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56, 0x31, 0xca, + 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4b, + 0x6d, 0x73, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_cloud_kms_v1_ekm_service_proto_rawDescOnce sync.Once + file_google_cloud_kms_v1_ekm_service_proto_rawDescData = file_google_cloud_kms_v1_ekm_service_proto_rawDesc +) + +func file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP() []byte { + file_google_cloud_kms_v1_ekm_service_proto_rawDescOnce.Do(func() { + file_google_cloud_kms_v1_ekm_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_cloud_kms_v1_ekm_service_proto_rawDescData) + }) + return file_google_cloud_kms_v1_ekm_service_proto_rawDescData +} + +var file_google_cloud_kms_v1_ekm_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_cloud_kms_v1_ekm_service_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_google_cloud_kms_v1_ekm_service_proto_goTypes = []any{ + (EkmConnection_KeyManagementMode)(0), // 0: google.cloud.kms.v1.EkmConnection.KeyManagementMode + (*ListEkmConnectionsRequest)(nil), // 1: google.cloud.kms.v1.ListEkmConnectionsRequest + (*ListEkmConnectionsResponse)(nil), // 2: google.cloud.kms.v1.ListEkmConnectionsResponse + (*GetEkmConnectionRequest)(nil), // 3: google.cloud.kms.v1.GetEkmConnectionRequest + (*CreateEkmConnectionRequest)(nil), // 4: google.cloud.kms.v1.CreateEkmConnectionRequest + (*UpdateEkmConnectionRequest)(nil), // 5: google.cloud.kms.v1.UpdateEkmConnectionRequest + (*GetEkmConfigRequest)(nil), // 6: google.cloud.kms.v1.GetEkmConfigRequest + (*UpdateEkmConfigRequest)(nil), // 7: google.cloud.kms.v1.UpdateEkmConfigRequest + (*Certificate)(nil), // 8: google.cloud.kms.v1.Certificate + (*EkmConnection)(nil), // 9: google.cloud.kms.v1.EkmConnection + (*EkmConfig)(nil), // 10: google.cloud.kms.v1.EkmConfig + (*VerifyConnectivityRequest)(nil), // 11: google.cloud.kms.v1.VerifyConnectivityRequest + (*VerifyConnectivityResponse)(nil), // 12: google.cloud.kms.v1.VerifyConnectivityResponse + (*EkmConnection_ServiceResolver)(nil), // 13: google.cloud.kms.v1.EkmConnection.ServiceResolver + (*fieldmaskpb.FieldMask)(nil), // 14: google.protobuf.FieldMask + (*timestamppb.Timestamp)(nil), // 15: google.protobuf.Timestamp +} +var file_google_cloud_kms_v1_ekm_service_proto_depIdxs = []int32{ + 9, // 0: google.cloud.kms.v1.ListEkmConnectionsResponse.ekm_connections:type_name -> google.cloud.kms.v1.EkmConnection + 9, // 1: google.cloud.kms.v1.CreateEkmConnectionRequest.ekm_connection:type_name -> google.cloud.kms.v1.EkmConnection + 9, // 2: google.cloud.kms.v1.UpdateEkmConnectionRequest.ekm_connection:type_name -> google.cloud.kms.v1.EkmConnection + 14, // 3: google.cloud.kms.v1.UpdateEkmConnectionRequest.update_mask:type_name -> google.protobuf.FieldMask + 10, // 4: google.cloud.kms.v1.UpdateEkmConfigRequest.ekm_config:type_name -> google.cloud.kms.v1.EkmConfig + 14, // 5: google.cloud.kms.v1.UpdateEkmConfigRequest.update_mask:type_name -> google.protobuf.FieldMask + 15, // 6: google.cloud.kms.v1.Certificate.not_before_time:type_name -> google.protobuf.Timestamp + 15, // 7: google.cloud.kms.v1.Certificate.not_after_time:type_name -> google.protobuf.Timestamp + 15, // 8: google.cloud.kms.v1.EkmConnection.create_time:type_name -> google.protobuf.Timestamp + 13, // 9: google.cloud.kms.v1.EkmConnection.service_resolvers:type_name -> google.cloud.kms.v1.EkmConnection.ServiceResolver + 0, // 10: google.cloud.kms.v1.EkmConnection.key_management_mode:type_name -> google.cloud.kms.v1.EkmConnection.KeyManagementMode + 8, // 11: google.cloud.kms.v1.EkmConnection.ServiceResolver.server_certificates:type_name -> google.cloud.kms.v1.Certificate + 1, // 12: google.cloud.kms.v1.EkmService.ListEkmConnections:input_type -> google.cloud.kms.v1.ListEkmConnectionsRequest + 3, // 13: google.cloud.kms.v1.EkmService.GetEkmConnection:input_type -> google.cloud.kms.v1.GetEkmConnectionRequest + 4, // 14: google.cloud.kms.v1.EkmService.CreateEkmConnection:input_type -> google.cloud.kms.v1.CreateEkmConnectionRequest + 5, // 15: google.cloud.kms.v1.EkmService.UpdateEkmConnection:input_type -> google.cloud.kms.v1.UpdateEkmConnectionRequest + 6, // 16: google.cloud.kms.v1.EkmService.GetEkmConfig:input_type -> google.cloud.kms.v1.GetEkmConfigRequest + 7, // 17: google.cloud.kms.v1.EkmService.UpdateEkmConfig:input_type -> google.cloud.kms.v1.UpdateEkmConfigRequest + 11, // 18: google.cloud.kms.v1.EkmService.VerifyConnectivity:input_type -> google.cloud.kms.v1.VerifyConnectivityRequest + 2, // 19: google.cloud.kms.v1.EkmService.ListEkmConnections:output_type -> google.cloud.kms.v1.ListEkmConnectionsResponse + 9, // 20: google.cloud.kms.v1.EkmService.GetEkmConnection:output_type -> google.cloud.kms.v1.EkmConnection + 9, // 21: google.cloud.kms.v1.EkmService.CreateEkmConnection:output_type -> google.cloud.kms.v1.EkmConnection + 9, // 22: google.cloud.kms.v1.EkmService.UpdateEkmConnection:output_type -> google.cloud.kms.v1.EkmConnection + 10, // 23: google.cloud.kms.v1.EkmService.GetEkmConfig:output_type -> google.cloud.kms.v1.EkmConfig + 10, // 24: google.cloud.kms.v1.EkmService.UpdateEkmConfig:output_type -> google.cloud.kms.v1.EkmConfig + 12, // 25: google.cloud.kms.v1.EkmService.VerifyConnectivity:output_type -> google.cloud.kms.v1.VerifyConnectivityResponse + 19, // [19:26] is the sub-list for method output_type + 12, // [12:19] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name +} + +func init() { file_google_cloud_kms_v1_ekm_service_proto_init() } +func file_google_cloud_kms_v1_ekm_service_proto_init() { + if File_google_cloud_kms_v1_ekm_service_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*ListEkmConnectionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ListEkmConnectionsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*GetEkmConnectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*CreateEkmConnectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*UpdateEkmConnectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*GetEkmConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*UpdateEkmConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*Certificate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*EkmConnection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*EkmConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*VerifyConnectivityRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*VerifyConnectivityResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*EkmConnection_ServiceResolver); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_cloud_kms_v1_ekm_service_proto_rawDesc, + NumEnums: 1, + NumMessages: 13, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_cloud_kms_v1_ekm_service_proto_goTypes, + DependencyIndexes: file_google_cloud_kms_v1_ekm_service_proto_depIdxs, + EnumInfos: file_google_cloud_kms_v1_ekm_service_proto_enumTypes, + MessageInfos: file_google_cloud_kms_v1_ekm_service_proto_msgTypes, + }.Build() + File_google_cloud_kms_v1_ekm_service_proto = out.File + file_google_cloud_kms_v1_ekm_service_proto_rawDesc = nil + file_google_cloud_kms_v1_ekm_service_proto_goTypes = nil + file_google_cloud_kms_v1_ekm_service_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// EkmServiceClient is the client API for EkmService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type EkmServiceClient interface { + // Lists [EkmConnections][google.cloud.kms.v1.EkmConnection]. + ListEkmConnections(ctx context.Context, in *ListEkmConnectionsRequest, opts ...grpc.CallOption) (*ListEkmConnectionsResponse, error) + // Returns metadata for a given + // [EkmConnection][google.cloud.kms.v1.EkmConnection]. + GetEkmConnection(ctx context.Context, in *GetEkmConnectionRequest, opts ...grpc.CallOption) (*EkmConnection, error) + // Creates a new [EkmConnection][google.cloud.kms.v1.EkmConnection] in a given + // Project and Location. + CreateEkmConnection(ctx context.Context, in *CreateEkmConnectionRequest, opts ...grpc.CallOption) (*EkmConnection, error) + // Updates an [EkmConnection][google.cloud.kms.v1.EkmConnection]'s metadata. + UpdateEkmConnection(ctx context.Context, in *UpdateEkmConnectionRequest, opts ...grpc.CallOption) (*EkmConnection, error) + // Returns the [EkmConfig][google.cloud.kms.v1.EkmConfig] singleton resource + // for a given project and location. + GetEkmConfig(ctx context.Context, in *GetEkmConfigRequest, opts ...grpc.CallOption) (*EkmConfig, error) + // Updates the [EkmConfig][google.cloud.kms.v1.EkmConfig] singleton resource + // for a given project and location. + UpdateEkmConfig(ctx context.Context, in *UpdateEkmConfigRequest, opts ...grpc.CallOption) (*EkmConfig, error) + // Verifies that Cloud KMS can successfully connect to the external key + // manager specified by an [EkmConnection][google.cloud.kms.v1.EkmConnection]. + // If there is an error connecting to the EKM, this method returns a + // FAILED_PRECONDITION status containing structured information as described + // at https://cloud.google.com/kms/docs/reference/ekm_errors. + VerifyConnectivity(ctx context.Context, in *VerifyConnectivityRequest, opts ...grpc.CallOption) (*VerifyConnectivityResponse, error) +} + +type ekmServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewEkmServiceClient(cc grpc.ClientConnInterface) EkmServiceClient { + return &ekmServiceClient{cc} +} + +func (c *ekmServiceClient) ListEkmConnections(ctx context.Context, in *ListEkmConnectionsRequest, opts ...grpc.CallOption) (*ListEkmConnectionsResponse, error) { + out := new(ListEkmConnectionsResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.EkmService/ListEkmConnections", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ekmServiceClient) GetEkmConnection(ctx context.Context, in *GetEkmConnectionRequest, opts ...grpc.CallOption) (*EkmConnection, error) { + out := new(EkmConnection) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.EkmService/GetEkmConnection", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ekmServiceClient) CreateEkmConnection(ctx context.Context, in *CreateEkmConnectionRequest, opts ...grpc.CallOption) (*EkmConnection, error) { + out := new(EkmConnection) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.EkmService/CreateEkmConnection", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ekmServiceClient) UpdateEkmConnection(ctx context.Context, in *UpdateEkmConnectionRequest, opts ...grpc.CallOption) (*EkmConnection, error) { + out := new(EkmConnection) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.EkmService/UpdateEkmConnection", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ekmServiceClient) GetEkmConfig(ctx context.Context, in *GetEkmConfigRequest, opts ...grpc.CallOption) (*EkmConfig, error) { + out := new(EkmConfig) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.EkmService/GetEkmConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ekmServiceClient) UpdateEkmConfig(ctx context.Context, in *UpdateEkmConfigRequest, opts ...grpc.CallOption) (*EkmConfig, error) { + out := new(EkmConfig) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.EkmService/UpdateEkmConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ekmServiceClient) VerifyConnectivity(ctx context.Context, in *VerifyConnectivityRequest, opts ...grpc.CallOption) (*VerifyConnectivityResponse, error) { + out := new(VerifyConnectivityResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.EkmService/VerifyConnectivity", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// EkmServiceServer is the server API for EkmService service. +type EkmServiceServer interface { + // Lists [EkmConnections][google.cloud.kms.v1.EkmConnection]. + ListEkmConnections(context.Context, *ListEkmConnectionsRequest) (*ListEkmConnectionsResponse, error) + // Returns metadata for a given + // [EkmConnection][google.cloud.kms.v1.EkmConnection]. + GetEkmConnection(context.Context, *GetEkmConnectionRequest) (*EkmConnection, error) + // Creates a new [EkmConnection][google.cloud.kms.v1.EkmConnection] in a given + // Project and Location. + CreateEkmConnection(context.Context, *CreateEkmConnectionRequest) (*EkmConnection, error) + // Updates an [EkmConnection][google.cloud.kms.v1.EkmConnection]'s metadata. + UpdateEkmConnection(context.Context, *UpdateEkmConnectionRequest) (*EkmConnection, error) + // Returns the [EkmConfig][google.cloud.kms.v1.EkmConfig] singleton resource + // for a given project and location. + GetEkmConfig(context.Context, *GetEkmConfigRequest) (*EkmConfig, error) + // Updates the [EkmConfig][google.cloud.kms.v1.EkmConfig] singleton resource + // for a given project and location. + UpdateEkmConfig(context.Context, *UpdateEkmConfigRequest) (*EkmConfig, error) + // Verifies that Cloud KMS can successfully connect to the external key + // manager specified by an [EkmConnection][google.cloud.kms.v1.EkmConnection]. + // If there is an error connecting to the EKM, this method returns a + // FAILED_PRECONDITION status containing structured information as described + // at https://cloud.google.com/kms/docs/reference/ekm_errors. + VerifyConnectivity(context.Context, *VerifyConnectivityRequest) (*VerifyConnectivityResponse, error) +} + +// UnimplementedEkmServiceServer can be embedded to have forward compatible implementations. +type UnimplementedEkmServiceServer struct { +} + +func (*UnimplementedEkmServiceServer) ListEkmConnections(context.Context, *ListEkmConnectionsRequest) (*ListEkmConnectionsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListEkmConnections not implemented") +} +func (*UnimplementedEkmServiceServer) GetEkmConnection(context.Context, *GetEkmConnectionRequest) (*EkmConnection, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetEkmConnection not implemented") +} +func (*UnimplementedEkmServiceServer) CreateEkmConnection(context.Context, *CreateEkmConnectionRequest) (*EkmConnection, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateEkmConnection not implemented") +} +func (*UnimplementedEkmServiceServer) UpdateEkmConnection(context.Context, *UpdateEkmConnectionRequest) (*EkmConnection, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateEkmConnection not implemented") +} +func (*UnimplementedEkmServiceServer) GetEkmConfig(context.Context, *GetEkmConfigRequest) (*EkmConfig, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetEkmConfig not implemented") +} +func (*UnimplementedEkmServiceServer) UpdateEkmConfig(context.Context, *UpdateEkmConfigRequest) (*EkmConfig, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateEkmConfig not implemented") +} +func (*UnimplementedEkmServiceServer) VerifyConnectivity(context.Context, *VerifyConnectivityRequest) (*VerifyConnectivityResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VerifyConnectivity not implemented") +} + +func RegisterEkmServiceServer(s *grpc.Server, srv EkmServiceServer) { + s.RegisterService(&_EkmService_serviceDesc, srv) +} + +func _EkmService_ListEkmConnections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListEkmConnectionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EkmServiceServer).ListEkmConnections(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.EkmService/ListEkmConnections", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EkmServiceServer).ListEkmConnections(ctx, req.(*ListEkmConnectionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _EkmService_GetEkmConnection_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetEkmConnectionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EkmServiceServer).GetEkmConnection(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.EkmService/GetEkmConnection", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EkmServiceServer).GetEkmConnection(ctx, req.(*GetEkmConnectionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _EkmService_CreateEkmConnection_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateEkmConnectionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EkmServiceServer).CreateEkmConnection(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.EkmService/CreateEkmConnection", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EkmServiceServer).CreateEkmConnection(ctx, req.(*CreateEkmConnectionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _EkmService_UpdateEkmConnection_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateEkmConnectionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EkmServiceServer).UpdateEkmConnection(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.EkmService/UpdateEkmConnection", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EkmServiceServer).UpdateEkmConnection(ctx, req.(*UpdateEkmConnectionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _EkmService_GetEkmConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetEkmConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EkmServiceServer).GetEkmConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.EkmService/GetEkmConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EkmServiceServer).GetEkmConfig(ctx, req.(*GetEkmConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _EkmService_UpdateEkmConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateEkmConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EkmServiceServer).UpdateEkmConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.EkmService/UpdateEkmConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EkmServiceServer).UpdateEkmConfig(ctx, req.(*UpdateEkmConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _EkmService_VerifyConnectivity_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VerifyConnectivityRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EkmServiceServer).VerifyConnectivity(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.EkmService/VerifyConnectivity", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EkmServiceServer).VerifyConnectivity(ctx, req.(*VerifyConnectivityRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _EkmService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.kms.v1.EkmService", + HandlerType: (*EkmServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListEkmConnections", + Handler: _EkmService_ListEkmConnections_Handler, + }, + { + MethodName: "GetEkmConnection", + Handler: _EkmService_GetEkmConnection_Handler, + }, + { + MethodName: "CreateEkmConnection", + Handler: _EkmService_CreateEkmConnection_Handler, + }, + { + MethodName: "UpdateEkmConnection", + Handler: _EkmService_UpdateEkmConnection_Handler, + }, + { + MethodName: "GetEkmConfig", + Handler: _EkmService_GetEkmConfig_Handler, + }, + { + MethodName: "UpdateEkmConfig", + Handler: _EkmService_UpdateEkmConfig_Handler, + }, + { + MethodName: "VerifyConnectivity", + Handler: _EkmService_VerifyConnectivity_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/kms/v1/ekm_service.proto", +} diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go new file mode 100644 index 000000000..74d2c1f46 --- /dev/null +++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go @@ -0,0 +1,2902 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/cloud/kms/v1/resources.proto + +package kmspb + +import ( + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] specifies how +// cryptographic operations are performed. For more information, see [Protection +// levels] (https://cloud.google.com/kms/docs/algorithms#protection_levels). +type ProtectionLevel int32 + +const ( + // Not specified. + ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED ProtectionLevel = 0 + // Crypto operations are performed in software. + ProtectionLevel_SOFTWARE ProtectionLevel = 1 + // Crypto operations are performed in a Hardware Security Module. + ProtectionLevel_HSM ProtectionLevel = 2 + // Crypto operations are performed by an external key manager. + ProtectionLevel_EXTERNAL ProtectionLevel = 3 + // Crypto operations are performed in an EKM-over-VPC backend. + ProtectionLevel_EXTERNAL_VPC ProtectionLevel = 4 +) + +// Enum value maps for ProtectionLevel. +var ( + ProtectionLevel_name = map[int32]string{ + 0: "PROTECTION_LEVEL_UNSPECIFIED", + 1: "SOFTWARE", + 2: "HSM", + 3: "EXTERNAL", + 4: "EXTERNAL_VPC", + } + ProtectionLevel_value = map[string]int32{ + "PROTECTION_LEVEL_UNSPECIFIED": 0, + "SOFTWARE": 1, + "HSM": 2, + "EXTERNAL": 3, + "EXTERNAL_VPC": 4, + } +) + +func (x ProtectionLevel) Enum() *ProtectionLevel { + p := new(ProtectionLevel) + *p = x + return p +} + +func (x ProtectionLevel) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ProtectionLevel) Descriptor() protoreflect.EnumDescriptor { + return file_google_cloud_kms_v1_resources_proto_enumTypes[0].Descriptor() +} + +func (ProtectionLevel) Type() protoreflect.EnumType { + return &file_google_cloud_kms_v1_resources_proto_enumTypes[0] +} + +func (x ProtectionLevel) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ProtectionLevel.Descriptor instead. +func (ProtectionLevel) EnumDescriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{0} +} + +// Describes the reason for a data access. Please refer to +// https://cloud.google.com/assured-workloads/key-access-justifications/docs/justification-codes +// for the detailed semantic meaning of justification reason codes. +type AccessReason int32 + +const ( + // Unspecified access reason. + AccessReason_REASON_UNSPECIFIED AccessReason = 0 + // Customer-initiated support. + AccessReason_CUSTOMER_INITIATED_SUPPORT AccessReason = 1 + // Google-initiated access for system management and troubleshooting. + AccessReason_GOOGLE_INITIATED_SERVICE AccessReason = 2 + // Google-initiated access in response to a legal request or legal process. + AccessReason_THIRD_PARTY_DATA_REQUEST AccessReason = 3 + // Google-initiated access for security, fraud, abuse, or compliance purposes. + AccessReason_GOOGLE_INITIATED_REVIEW AccessReason = 4 + // Customer uses their account to perform any access to their own data which + // their IAM policy authorizes. + AccessReason_CUSTOMER_INITIATED_ACCESS AccessReason = 5 + // Google systems access customer data to help optimize the structure of the + // data or quality for future uses by the customer. + AccessReason_GOOGLE_INITIATED_SYSTEM_OPERATION AccessReason = 6 + // No reason is expected for this key request. + AccessReason_REASON_NOT_EXPECTED AccessReason = 7 + // Customer uses their account to perform any access to their own data which + // their IAM policy authorizes, and one of the following is true: + // + // - A Google administrator has reset the root-access account associated with + // the user's organization within the past 7 days. + // - A Google-initiated emergency access operation has interacted with a + // resource in the same project or folder as the currently accessed resource + // within the past 7 days. + AccessReason_MODIFIED_CUSTOMER_INITIATED_ACCESS AccessReason = 8 + // Google systems access customer data to help optimize the structure of the + // data or quality for future uses by the customer, and one of the following + // is true: + // + // - A Google administrator has reset the root-access account associated with + // the user's organization within the past 7 days. + // - A Google-initiated emergency access operation has interacted with a + // resource in the same project or folder as the currently accessed resource + // within the past 7 days. + AccessReason_MODIFIED_GOOGLE_INITIATED_SYSTEM_OPERATION AccessReason = 9 + // Google-initiated access to maintain system reliability. + AccessReason_GOOGLE_RESPONSE_TO_PRODUCTION_ALERT AccessReason = 10 + // One of the following operations is being executed while simultaneously + // encountering an internal technical issue which prevented a more precise + // justification code from being generated: + // + // - Your account has been used to perform any access to your own data which + // your IAM policy authorizes. + // - An automated Google system operates on encrypted customer data which your + // IAM policy authorizes. + // - Customer-initiated Google support access. + // - Google-initiated support access to protect system reliability. + AccessReason_CUSTOMER_AUTHORIZED_WORKFLOW_SERVICING AccessReason = 11 +) + +// Enum value maps for AccessReason. +var ( + AccessReason_name = map[int32]string{ + 0: "REASON_UNSPECIFIED", + 1: "CUSTOMER_INITIATED_SUPPORT", + 2: "GOOGLE_INITIATED_SERVICE", + 3: "THIRD_PARTY_DATA_REQUEST", + 4: "GOOGLE_INITIATED_REVIEW", + 5: "CUSTOMER_INITIATED_ACCESS", + 6: "GOOGLE_INITIATED_SYSTEM_OPERATION", + 7: "REASON_NOT_EXPECTED", + 8: "MODIFIED_CUSTOMER_INITIATED_ACCESS", + 9: "MODIFIED_GOOGLE_INITIATED_SYSTEM_OPERATION", + 10: "GOOGLE_RESPONSE_TO_PRODUCTION_ALERT", + 11: "CUSTOMER_AUTHORIZED_WORKFLOW_SERVICING", + } + AccessReason_value = map[string]int32{ + "REASON_UNSPECIFIED": 0, + "CUSTOMER_INITIATED_SUPPORT": 1, + "GOOGLE_INITIATED_SERVICE": 2, + "THIRD_PARTY_DATA_REQUEST": 3, + "GOOGLE_INITIATED_REVIEW": 4, + "CUSTOMER_INITIATED_ACCESS": 5, + "GOOGLE_INITIATED_SYSTEM_OPERATION": 6, + "REASON_NOT_EXPECTED": 7, + "MODIFIED_CUSTOMER_INITIATED_ACCESS": 8, + "MODIFIED_GOOGLE_INITIATED_SYSTEM_OPERATION": 9, + "GOOGLE_RESPONSE_TO_PRODUCTION_ALERT": 10, + "CUSTOMER_AUTHORIZED_WORKFLOW_SERVICING": 11, + } +) + +func (x AccessReason) Enum() *AccessReason { + p := new(AccessReason) + *p = x + return p +} + +func (x AccessReason) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AccessReason) Descriptor() protoreflect.EnumDescriptor { + return file_google_cloud_kms_v1_resources_proto_enumTypes[1].Descriptor() +} + +func (AccessReason) Type() protoreflect.EnumType { + return &file_google_cloud_kms_v1_resources_proto_enumTypes[1] +} + +func (x AccessReason) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AccessReason.Descriptor instead. +func (AccessReason) EnumDescriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{1} +} + +// [CryptoKeyPurpose][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose] +// describes the cryptographic capabilities of a +// [CryptoKey][google.cloud.kms.v1.CryptoKey]. A given key can only be used +// for the operations allowed by its purpose. For more information, see [Key +// purposes](https://cloud.google.com/kms/docs/algorithms#key_purposes). +type CryptoKey_CryptoKeyPurpose int32 + +const ( + // Not specified. + CryptoKey_CRYPTO_KEY_PURPOSE_UNSPECIFIED CryptoKey_CryptoKeyPurpose = 0 + // [CryptoKeys][google.cloud.kms.v1.CryptoKey] with this purpose may be used + // with [Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt] and + // [Decrypt][google.cloud.kms.v1.KeyManagementService.Decrypt]. + CryptoKey_ENCRYPT_DECRYPT CryptoKey_CryptoKeyPurpose = 1 + // [CryptoKeys][google.cloud.kms.v1.CryptoKey] with this purpose may be used + // with + // [AsymmetricSign][google.cloud.kms.v1.KeyManagementService.AsymmetricSign] + // and + // [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey]. + CryptoKey_ASYMMETRIC_SIGN CryptoKey_CryptoKeyPurpose = 5 + // [CryptoKeys][google.cloud.kms.v1.CryptoKey] with this purpose may be used + // with + // [AsymmetricDecrypt][google.cloud.kms.v1.KeyManagementService.AsymmetricDecrypt] + // and + // [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey]. + CryptoKey_ASYMMETRIC_DECRYPT CryptoKey_CryptoKeyPurpose = 6 + // [CryptoKeys][google.cloud.kms.v1.CryptoKey] with this purpose may be used + // with [RawEncrypt][google.cloud.kms.v1.KeyManagementService.RawEncrypt] + // and [RawDecrypt][google.cloud.kms.v1.KeyManagementService.RawDecrypt]. + // This purpose is meant to be used for interoperable symmetric + // encryption and does not support automatic CryptoKey rotation. + CryptoKey_RAW_ENCRYPT_DECRYPT CryptoKey_CryptoKeyPurpose = 7 + // [CryptoKeys][google.cloud.kms.v1.CryptoKey] with this purpose may be used + // with [MacSign][google.cloud.kms.v1.KeyManagementService.MacSign]. + CryptoKey_MAC CryptoKey_CryptoKeyPurpose = 9 +) + +// Enum value maps for CryptoKey_CryptoKeyPurpose. +var ( + CryptoKey_CryptoKeyPurpose_name = map[int32]string{ + 0: "CRYPTO_KEY_PURPOSE_UNSPECIFIED", + 1: "ENCRYPT_DECRYPT", + 5: "ASYMMETRIC_SIGN", + 6: "ASYMMETRIC_DECRYPT", + 7: "RAW_ENCRYPT_DECRYPT", + 9: "MAC", + } + CryptoKey_CryptoKeyPurpose_value = map[string]int32{ + "CRYPTO_KEY_PURPOSE_UNSPECIFIED": 0, + "ENCRYPT_DECRYPT": 1, + "ASYMMETRIC_SIGN": 5, + "ASYMMETRIC_DECRYPT": 6, + "RAW_ENCRYPT_DECRYPT": 7, + "MAC": 9, + } +) + +func (x CryptoKey_CryptoKeyPurpose) Enum() *CryptoKey_CryptoKeyPurpose { + p := new(CryptoKey_CryptoKeyPurpose) + *p = x + return p +} + +func (x CryptoKey_CryptoKeyPurpose) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CryptoKey_CryptoKeyPurpose) Descriptor() protoreflect.EnumDescriptor { + return file_google_cloud_kms_v1_resources_proto_enumTypes[2].Descriptor() +} + +func (CryptoKey_CryptoKeyPurpose) Type() protoreflect.EnumType { + return &file_google_cloud_kms_v1_resources_proto_enumTypes[2] +} + +func (x CryptoKey_CryptoKeyPurpose) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CryptoKey_CryptoKeyPurpose.Descriptor instead. +func (CryptoKey_CryptoKeyPurpose) EnumDescriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{1, 0} +} + +// Attestation formats provided by the HSM. +type KeyOperationAttestation_AttestationFormat int32 + +const ( + // Not specified. + KeyOperationAttestation_ATTESTATION_FORMAT_UNSPECIFIED KeyOperationAttestation_AttestationFormat = 0 + // Cavium HSM attestation compressed with gzip. Note that this format is + // defined by Cavium and subject to change at any time. + // + // See + // https://www.marvell.com/products/security-solutions/nitrox-hs-adapters/software-key-attestation.html. + KeyOperationAttestation_CAVIUM_V1_COMPRESSED KeyOperationAttestation_AttestationFormat = 3 + // Cavium HSM attestation V2 compressed with gzip. This is a new format + // introduced in Cavium's version 3.2-08. + KeyOperationAttestation_CAVIUM_V2_COMPRESSED KeyOperationAttestation_AttestationFormat = 4 +) + +// Enum value maps for KeyOperationAttestation_AttestationFormat. +var ( + KeyOperationAttestation_AttestationFormat_name = map[int32]string{ + 0: "ATTESTATION_FORMAT_UNSPECIFIED", + 3: "CAVIUM_V1_COMPRESSED", + 4: "CAVIUM_V2_COMPRESSED", + } + KeyOperationAttestation_AttestationFormat_value = map[string]int32{ + "ATTESTATION_FORMAT_UNSPECIFIED": 0, + "CAVIUM_V1_COMPRESSED": 3, + "CAVIUM_V2_COMPRESSED": 4, + } +) + +func (x KeyOperationAttestation_AttestationFormat) Enum() *KeyOperationAttestation_AttestationFormat { + p := new(KeyOperationAttestation_AttestationFormat) + *p = x + return p +} + +func (x KeyOperationAttestation_AttestationFormat) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (KeyOperationAttestation_AttestationFormat) Descriptor() protoreflect.EnumDescriptor { + return file_google_cloud_kms_v1_resources_proto_enumTypes[3].Descriptor() +} + +func (KeyOperationAttestation_AttestationFormat) Type() protoreflect.EnumType { + return &file_google_cloud_kms_v1_resources_proto_enumTypes[3] +} + +func (x KeyOperationAttestation_AttestationFormat) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use KeyOperationAttestation_AttestationFormat.Descriptor instead. +func (KeyOperationAttestation_AttestationFormat) EnumDescriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{3, 0} +} + +// The algorithm of the +// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion], indicating what +// parameters must be used for each cryptographic operation. +// +// The +// [GOOGLE_SYMMETRIC_ENCRYPTION][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm.GOOGLE_SYMMETRIC_ENCRYPTION] +// algorithm is usable with +// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] +// [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT]. +// +// Algorithms beginning with `RSA_SIGN_` are usable with +// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] +// [ASYMMETRIC_SIGN][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_SIGN]. +// +// The fields in the name after `RSA_SIGN_` correspond to the following +// parameters: padding algorithm, modulus bit length, and digest algorithm. +// +// For PSS, the salt length used is equal to the length of digest +// algorithm. For example, +// [RSA_SIGN_PSS_2048_SHA256][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm.RSA_SIGN_PSS_2048_SHA256] +// will use PSS with a salt length of 256 bits or 32 bytes. +// +// Algorithms beginning with `RSA_DECRYPT_` are usable with +// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] +// [ASYMMETRIC_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_DECRYPT]. +// +// The fields in the name after `RSA_DECRYPT_` correspond to the following +// parameters: padding algorithm, modulus bit length, and digest algorithm. +// +// Algorithms beginning with `EC_SIGN_` are usable with +// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] +// [ASYMMETRIC_SIGN][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_SIGN]. +// +// The fields in the name after `EC_SIGN_` correspond to the following +// parameters: elliptic curve, digest algorithm. +// +// Algorithms beginning with `HMAC_` are usable with +// [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] +// [MAC][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.MAC]. +// +// The suffix following `HMAC_` corresponds to the hash algorithm being used +// (eg. SHA256). +// +// For more information, see [Key purposes and algorithms] +// (https://cloud.google.com/kms/docs/algorithms). +type CryptoKeyVersion_CryptoKeyVersionAlgorithm int32 + +const ( + // Not specified. + CryptoKeyVersion_CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED CryptoKeyVersion_CryptoKeyVersionAlgorithm = 0 + // Creates symmetric encryption keys. + CryptoKeyVersion_GOOGLE_SYMMETRIC_ENCRYPTION CryptoKeyVersion_CryptoKeyVersionAlgorithm = 1 + // AES-GCM (Galois Counter Mode) using 128-bit keys. + CryptoKeyVersion_AES_128_GCM CryptoKeyVersion_CryptoKeyVersionAlgorithm = 41 + // AES-GCM (Galois Counter Mode) using 256-bit keys. + CryptoKeyVersion_AES_256_GCM CryptoKeyVersion_CryptoKeyVersionAlgorithm = 19 + // AES-CBC (Cipher Block Chaining Mode) using 128-bit keys. + CryptoKeyVersion_AES_128_CBC CryptoKeyVersion_CryptoKeyVersionAlgorithm = 42 + // AES-CBC (Cipher Block Chaining Mode) using 256-bit keys. + CryptoKeyVersion_AES_256_CBC CryptoKeyVersion_CryptoKeyVersionAlgorithm = 43 + // AES-CTR (Counter Mode) using 128-bit keys. + CryptoKeyVersion_AES_128_CTR CryptoKeyVersion_CryptoKeyVersionAlgorithm = 44 + // AES-CTR (Counter Mode) using 256-bit keys. + CryptoKeyVersion_AES_256_CTR CryptoKeyVersion_CryptoKeyVersionAlgorithm = 45 + // RSASSA-PSS 2048 bit key with a SHA256 digest. + CryptoKeyVersion_RSA_SIGN_PSS_2048_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 2 + // RSASSA-PSS 3072 bit key with a SHA256 digest. + CryptoKeyVersion_RSA_SIGN_PSS_3072_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 3 + // RSASSA-PSS 4096 bit key with a SHA256 digest. + CryptoKeyVersion_RSA_SIGN_PSS_4096_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 4 + // RSASSA-PSS 4096 bit key with a SHA512 digest. + CryptoKeyVersion_RSA_SIGN_PSS_4096_SHA512 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 15 + // RSASSA-PKCS1-v1_5 with a 2048 bit key and a SHA256 digest. + CryptoKeyVersion_RSA_SIGN_PKCS1_2048_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 5 + // RSASSA-PKCS1-v1_5 with a 3072 bit key and a SHA256 digest. + CryptoKeyVersion_RSA_SIGN_PKCS1_3072_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 6 + // RSASSA-PKCS1-v1_5 with a 4096 bit key and a SHA256 digest. + CryptoKeyVersion_RSA_SIGN_PKCS1_4096_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 7 + // RSASSA-PKCS1-v1_5 with a 4096 bit key and a SHA512 digest. + CryptoKeyVersion_RSA_SIGN_PKCS1_4096_SHA512 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 16 + // RSASSA-PKCS1-v1_5 signing without encoding, with a 2048 bit key. + CryptoKeyVersion_RSA_SIGN_RAW_PKCS1_2048 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 28 + // RSASSA-PKCS1-v1_5 signing without encoding, with a 3072 bit key. + CryptoKeyVersion_RSA_SIGN_RAW_PKCS1_3072 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 29 + // RSASSA-PKCS1-v1_5 signing without encoding, with a 4096 bit key. + CryptoKeyVersion_RSA_SIGN_RAW_PKCS1_4096 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 30 + // RSAES-OAEP 2048 bit key with a SHA256 digest. + CryptoKeyVersion_RSA_DECRYPT_OAEP_2048_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 8 + // RSAES-OAEP 3072 bit key with a SHA256 digest. + CryptoKeyVersion_RSA_DECRYPT_OAEP_3072_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 9 + // RSAES-OAEP 4096 bit key with a SHA256 digest. + CryptoKeyVersion_RSA_DECRYPT_OAEP_4096_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 10 + // RSAES-OAEP 4096 bit key with a SHA512 digest. + CryptoKeyVersion_RSA_DECRYPT_OAEP_4096_SHA512 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 17 + // RSAES-OAEP 2048 bit key with a SHA1 digest. + CryptoKeyVersion_RSA_DECRYPT_OAEP_2048_SHA1 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 37 + // RSAES-OAEP 3072 bit key with a SHA1 digest. + CryptoKeyVersion_RSA_DECRYPT_OAEP_3072_SHA1 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 38 + // RSAES-OAEP 4096 bit key with a SHA1 digest. + CryptoKeyVersion_RSA_DECRYPT_OAEP_4096_SHA1 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 39 + // ECDSA on the NIST P-256 curve with a SHA256 digest. + // Other hash functions can also be used: + // https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms + CryptoKeyVersion_EC_SIGN_P256_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 12 + // ECDSA on the NIST P-384 curve with a SHA384 digest. + // Other hash functions can also be used: + // https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms + CryptoKeyVersion_EC_SIGN_P384_SHA384 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 13 + // ECDSA on the non-NIST secp256k1 curve. This curve is only supported for + // HSM protection level. + // Other hash functions can also be used: + // https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms + CryptoKeyVersion_EC_SIGN_SECP256K1_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 31 + // EdDSA on the Curve25519 in pure mode (taking data as input). + CryptoKeyVersion_EC_SIGN_ED25519 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 40 + // HMAC-SHA256 signing with a 256 bit key. + CryptoKeyVersion_HMAC_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 32 + // HMAC-SHA1 signing with a 160 bit key. + CryptoKeyVersion_HMAC_SHA1 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 33 + // HMAC-SHA384 signing with a 384 bit key. + CryptoKeyVersion_HMAC_SHA384 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 34 + // HMAC-SHA512 signing with a 512 bit key. + CryptoKeyVersion_HMAC_SHA512 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 35 + // HMAC-SHA224 signing with a 224 bit key. + CryptoKeyVersion_HMAC_SHA224 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 36 + // Algorithm representing symmetric encryption by an external key manager. + CryptoKeyVersion_EXTERNAL_SYMMETRIC_ENCRYPTION CryptoKeyVersion_CryptoKeyVersionAlgorithm = 18 +) + +// Enum value maps for CryptoKeyVersion_CryptoKeyVersionAlgorithm. +var ( + CryptoKeyVersion_CryptoKeyVersionAlgorithm_name = map[int32]string{ + 0: "CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED", + 1: "GOOGLE_SYMMETRIC_ENCRYPTION", + 41: "AES_128_GCM", + 19: "AES_256_GCM", + 42: "AES_128_CBC", + 43: "AES_256_CBC", + 44: "AES_128_CTR", + 45: "AES_256_CTR", + 2: "RSA_SIGN_PSS_2048_SHA256", + 3: "RSA_SIGN_PSS_3072_SHA256", + 4: "RSA_SIGN_PSS_4096_SHA256", + 15: "RSA_SIGN_PSS_4096_SHA512", + 5: "RSA_SIGN_PKCS1_2048_SHA256", + 6: "RSA_SIGN_PKCS1_3072_SHA256", + 7: "RSA_SIGN_PKCS1_4096_SHA256", + 16: "RSA_SIGN_PKCS1_4096_SHA512", + 28: "RSA_SIGN_RAW_PKCS1_2048", + 29: "RSA_SIGN_RAW_PKCS1_3072", + 30: "RSA_SIGN_RAW_PKCS1_4096", + 8: "RSA_DECRYPT_OAEP_2048_SHA256", + 9: "RSA_DECRYPT_OAEP_3072_SHA256", + 10: "RSA_DECRYPT_OAEP_4096_SHA256", + 17: "RSA_DECRYPT_OAEP_4096_SHA512", + 37: "RSA_DECRYPT_OAEP_2048_SHA1", + 38: "RSA_DECRYPT_OAEP_3072_SHA1", + 39: "RSA_DECRYPT_OAEP_4096_SHA1", + 12: "EC_SIGN_P256_SHA256", + 13: "EC_SIGN_P384_SHA384", + 31: "EC_SIGN_SECP256K1_SHA256", + 40: "EC_SIGN_ED25519", + 32: "HMAC_SHA256", + 33: "HMAC_SHA1", + 34: "HMAC_SHA384", + 35: "HMAC_SHA512", + 36: "HMAC_SHA224", + 18: "EXTERNAL_SYMMETRIC_ENCRYPTION", + } + CryptoKeyVersion_CryptoKeyVersionAlgorithm_value = map[string]int32{ + "CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED": 0, + "GOOGLE_SYMMETRIC_ENCRYPTION": 1, + "AES_128_GCM": 41, + "AES_256_GCM": 19, + "AES_128_CBC": 42, + "AES_256_CBC": 43, + "AES_128_CTR": 44, + "AES_256_CTR": 45, + "RSA_SIGN_PSS_2048_SHA256": 2, + "RSA_SIGN_PSS_3072_SHA256": 3, + "RSA_SIGN_PSS_4096_SHA256": 4, + "RSA_SIGN_PSS_4096_SHA512": 15, + "RSA_SIGN_PKCS1_2048_SHA256": 5, + "RSA_SIGN_PKCS1_3072_SHA256": 6, + "RSA_SIGN_PKCS1_4096_SHA256": 7, + "RSA_SIGN_PKCS1_4096_SHA512": 16, + "RSA_SIGN_RAW_PKCS1_2048": 28, + "RSA_SIGN_RAW_PKCS1_3072": 29, + "RSA_SIGN_RAW_PKCS1_4096": 30, + "RSA_DECRYPT_OAEP_2048_SHA256": 8, + "RSA_DECRYPT_OAEP_3072_SHA256": 9, + "RSA_DECRYPT_OAEP_4096_SHA256": 10, + "RSA_DECRYPT_OAEP_4096_SHA512": 17, + "RSA_DECRYPT_OAEP_2048_SHA1": 37, + "RSA_DECRYPT_OAEP_3072_SHA1": 38, + "RSA_DECRYPT_OAEP_4096_SHA1": 39, + "EC_SIGN_P256_SHA256": 12, + "EC_SIGN_P384_SHA384": 13, + "EC_SIGN_SECP256K1_SHA256": 31, + "EC_SIGN_ED25519": 40, + "HMAC_SHA256": 32, + "HMAC_SHA1": 33, + "HMAC_SHA384": 34, + "HMAC_SHA512": 35, + "HMAC_SHA224": 36, + "EXTERNAL_SYMMETRIC_ENCRYPTION": 18, + } +) + +func (x CryptoKeyVersion_CryptoKeyVersionAlgorithm) Enum() *CryptoKeyVersion_CryptoKeyVersionAlgorithm { + p := new(CryptoKeyVersion_CryptoKeyVersionAlgorithm) + *p = x + return p +} + +func (x CryptoKeyVersion_CryptoKeyVersionAlgorithm) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CryptoKeyVersion_CryptoKeyVersionAlgorithm) Descriptor() protoreflect.EnumDescriptor { + return file_google_cloud_kms_v1_resources_proto_enumTypes[4].Descriptor() +} + +func (CryptoKeyVersion_CryptoKeyVersionAlgorithm) Type() protoreflect.EnumType { + return &file_google_cloud_kms_v1_resources_proto_enumTypes[4] +} + +func (x CryptoKeyVersion_CryptoKeyVersionAlgorithm) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CryptoKeyVersion_CryptoKeyVersionAlgorithm.Descriptor instead. +func (CryptoKeyVersion_CryptoKeyVersionAlgorithm) EnumDescriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{4, 0} +} + +// The state of a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion], +// indicating if it can be used. +type CryptoKeyVersion_CryptoKeyVersionState int32 + +const ( + // Not specified. + CryptoKeyVersion_CRYPTO_KEY_VERSION_STATE_UNSPECIFIED CryptoKeyVersion_CryptoKeyVersionState = 0 + // This version is still being generated. It may not be used, enabled, + // disabled, or destroyed yet. Cloud KMS will automatically mark this + // version + // [ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED] + // as soon as the version is ready. + CryptoKeyVersion_PENDING_GENERATION CryptoKeyVersion_CryptoKeyVersionState = 5 + // This version may be used for cryptographic operations. + CryptoKeyVersion_ENABLED CryptoKeyVersion_CryptoKeyVersionState = 1 + // This version may not be used, but the key material is still available, + // and the version can be placed back into the + // [ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED] + // state. + CryptoKeyVersion_DISABLED CryptoKeyVersion_CryptoKeyVersionState = 2 + // This version is destroyed, and the key material is no longer stored. + // This version may only become + // [ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED] + // again if this version is + // [reimport_eligible][google.cloud.kms.v1.CryptoKeyVersion.reimport_eligible] + // and the original key material is reimported with a call to + // [KeyManagementService.ImportCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.ImportCryptoKeyVersion]. + CryptoKeyVersion_DESTROYED CryptoKeyVersion_CryptoKeyVersionState = 3 + // This version is scheduled for destruction, and will be destroyed soon. + // Call + // [RestoreCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion] + // to put it back into the + // [DISABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DISABLED] + // state. + CryptoKeyVersion_DESTROY_SCHEDULED CryptoKeyVersion_CryptoKeyVersionState = 4 + // This version is still being imported. It may not be used, enabled, + // disabled, or destroyed yet. Cloud KMS will automatically mark this + // version + // [ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED] + // as soon as the version is ready. + CryptoKeyVersion_PENDING_IMPORT CryptoKeyVersion_CryptoKeyVersionState = 6 + // This version was not imported successfully. It may not be used, enabled, + // disabled, or destroyed. The submitted key material has been discarded. + // Additional details can be found in + // [CryptoKeyVersion.import_failure_reason][google.cloud.kms.v1.CryptoKeyVersion.import_failure_reason]. + CryptoKeyVersion_IMPORT_FAILED CryptoKeyVersion_CryptoKeyVersionState = 7 + // This version was not generated successfully. It may not be used, enabled, + // disabled, or destroyed. Additional details can be found in + // [CryptoKeyVersion.generation_failure_reason][google.cloud.kms.v1.CryptoKeyVersion.generation_failure_reason]. + CryptoKeyVersion_GENERATION_FAILED CryptoKeyVersion_CryptoKeyVersionState = 8 + // This version was destroyed, and it may not be used or enabled again. + // Cloud KMS is waiting for the corresponding key material residing in an + // external key manager to be destroyed. + CryptoKeyVersion_PENDING_EXTERNAL_DESTRUCTION CryptoKeyVersion_CryptoKeyVersionState = 9 + // This version was destroyed, and it may not be used or enabled again. + // However, Cloud KMS could not confirm that the corresponding key material + // residing in an external key manager was destroyed. Additional details can + // be found in + // [CryptoKeyVersion.external_destruction_failure_reason][google.cloud.kms.v1.CryptoKeyVersion.external_destruction_failure_reason]. + CryptoKeyVersion_EXTERNAL_DESTRUCTION_FAILED CryptoKeyVersion_CryptoKeyVersionState = 10 +) + +// Enum value maps for CryptoKeyVersion_CryptoKeyVersionState. +var ( + CryptoKeyVersion_CryptoKeyVersionState_name = map[int32]string{ + 0: "CRYPTO_KEY_VERSION_STATE_UNSPECIFIED", + 5: "PENDING_GENERATION", + 1: "ENABLED", + 2: "DISABLED", + 3: "DESTROYED", + 4: "DESTROY_SCHEDULED", + 6: "PENDING_IMPORT", + 7: "IMPORT_FAILED", + 8: "GENERATION_FAILED", + 9: "PENDING_EXTERNAL_DESTRUCTION", + 10: "EXTERNAL_DESTRUCTION_FAILED", + } + CryptoKeyVersion_CryptoKeyVersionState_value = map[string]int32{ + "CRYPTO_KEY_VERSION_STATE_UNSPECIFIED": 0, + "PENDING_GENERATION": 5, + "ENABLED": 1, + "DISABLED": 2, + "DESTROYED": 3, + "DESTROY_SCHEDULED": 4, + "PENDING_IMPORT": 6, + "IMPORT_FAILED": 7, + "GENERATION_FAILED": 8, + "PENDING_EXTERNAL_DESTRUCTION": 9, + "EXTERNAL_DESTRUCTION_FAILED": 10, + } +) + +func (x CryptoKeyVersion_CryptoKeyVersionState) Enum() *CryptoKeyVersion_CryptoKeyVersionState { + p := new(CryptoKeyVersion_CryptoKeyVersionState) + *p = x + return p +} + +func (x CryptoKeyVersion_CryptoKeyVersionState) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CryptoKeyVersion_CryptoKeyVersionState) Descriptor() protoreflect.EnumDescriptor { + return file_google_cloud_kms_v1_resources_proto_enumTypes[5].Descriptor() +} + +func (CryptoKeyVersion_CryptoKeyVersionState) Type() protoreflect.EnumType { + return &file_google_cloud_kms_v1_resources_proto_enumTypes[5] +} + +func (x CryptoKeyVersion_CryptoKeyVersionState) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CryptoKeyVersion_CryptoKeyVersionState.Descriptor instead. +func (CryptoKeyVersion_CryptoKeyVersionState) EnumDescriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{4, 1} +} + +// A view for [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]s. +// Controls the level of detail returned for +// [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] in +// [KeyManagementService.ListCryptoKeyVersions][google.cloud.kms.v1.KeyManagementService.ListCryptoKeyVersions] +// and +// [KeyManagementService.ListCryptoKeys][google.cloud.kms.v1.KeyManagementService.ListCryptoKeys]. +type CryptoKeyVersion_CryptoKeyVersionView int32 + +const ( + // Default view for each + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. Does not + // include the + // [attestation][google.cloud.kms.v1.CryptoKeyVersion.attestation] field. + CryptoKeyVersion_CRYPTO_KEY_VERSION_VIEW_UNSPECIFIED CryptoKeyVersion_CryptoKeyVersionView = 0 + // Provides all fields in each + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion], including the + // [attestation][google.cloud.kms.v1.CryptoKeyVersion.attestation]. + CryptoKeyVersion_FULL CryptoKeyVersion_CryptoKeyVersionView = 1 +) + +// Enum value maps for CryptoKeyVersion_CryptoKeyVersionView. +var ( + CryptoKeyVersion_CryptoKeyVersionView_name = map[int32]string{ + 0: "CRYPTO_KEY_VERSION_VIEW_UNSPECIFIED", + 1: "FULL", + } + CryptoKeyVersion_CryptoKeyVersionView_value = map[string]int32{ + "CRYPTO_KEY_VERSION_VIEW_UNSPECIFIED": 0, + "FULL": 1, + } +) + +func (x CryptoKeyVersion_CryptoKeyVersionView) Enum() *CryptoKeyVersion_CryptoKeyVersionView { + p := new(CryptoKeyVersion_CryptoKeyVersionView) + *p = x + return p +} + +func (x CryptoKeyVersion_CryptoKeyVersionView) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CryptoKeyVersion_CryptoKeyVersionView) Descriptor() protoreflect.EnumDescriptor { + return file_google_cloud_kms_v1_resources_proto_enumTypes[6].Descriptor() +} + +func (CryptoKeyVersion_CryptoKeyVersionView) Type() protoreflect.EnumType { + return &file_google_cloud_kms_v1_resources_proto_enumTypes[6] +} + +func (x CryptoKeyVersion_CryptoKeyVersionView) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CryptoKeyVersion_CryptoKeyVersionView.Descriptor instead. +func (CryptoKeyVersion_CryptoKeyVersionView) EnumDescriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{4, 2} +} + +// [ImportMethod][google.cloud.kms.v1.ImportJob.ImportMethod] describes the +// key wrapping method chosen for this +// [ImportJob][google.cloud.kms.v1.ImportJob]. +type ImportJob_ImportMethod int32 + +const ( + // Not specified. + ImportJob_IMPORT_METHOD_UNSPECIFIED ImportJob_ImportMethod = 0 + // This ImportMethod represents the CKM_RSA_AES_KEY_WRAP key wrapping + // scheme defined in the PKCS #11 standard. In summary, this involves + // wrapping the raw key with an ephemeral AES key, and wrapping the + // ephemeral AES key with a 3072 bit RSA key. For more details, see + // [RSA AES key wrap + // mechanism](http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/cos01/pkcs11-curr-v2.40-cos01.html#_Toc408226908). + ImportJob_RSA_OAEP_3072_SHA1_AES_256 ImportJob_ImportMethod = 1 + // This ImportMethod represents the CKM_RSA_AES_KEY_WRAP key wrapping + // scheme defined in the PKCS #11 standard. In summary, this involves + // wrapping the raw key with an ephemeral AES key, and wrapping the + // ephemeral AES key with a 4096 bit RSA key. For more details, see + // [RSA AES key wrap + // mechanism](http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/cos01/pkcs11-curr-v2.40-cos01.html#_Toc408226908). + ImportJob_RSA_OAEP_4096_SHA1_AES_256 ImportJob_ImportMethod = 2 + // This ImportMethod represents the CKM_RSA_AES_KEY_WRAP key wrapping + // scheme defined in the PKCS #11 standard. In summary, this involves + // wrapping the raw key with an ephemeral AES key, and wrapping the + // ephemeral AES key with a 3072 bit RSA key. For more details, see + // [RSA AES key wrap + // mechanism](http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/cos01/pkcs11-curr-v2.40-cos01.html#_Toc408226908). + ImportJob_RSA_OAEP_3072_SHA256_AES_256 ImportJob_ImportMethod = 3 + // This ImportMethod represents the CKM_RSA_AES_KEY_WRAP key wrapping + // scheme defined in the PKCS #11 standard. In summary, this involves + // wrapping the raw key with an ephemeral AES key, and wrapping the + // ephemeral AES key with a 4096 bit RSA key. For more details, see + // [RSA AES key wrap + // mechanism](http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/cos01/pkcs11-curr-v2.40-cos01.html#_Toc408226908). + ImportJob_RSA_OAEP_4096_SHA256_AES_256 ImportJob_ImportMethod = 4 + // This ImportMethod represents RSAES-OAEP with a 3072 bit RSA key. The + // key material to be imported is wrapped directly with the RSA key. Due + // to technical limitations of RSA wrapping, this method cannot be used to + // wrap RSA keys for import. + ImportJob_RSA_OAEP_3072_SHA256 ImportJob_ImportMethod = 5 + // This ImportMethod represents RSAES-OAEP with a 4096 bit RSA key. The + // key material to be imported is wrapped directly with the RSA key. Due + // to technical limitations of RSA wrapping, this method cannot be used to + // wrap RSA keys for import. + ImportJob_RSA_OAEP_4096_SHA256 ImportJob_ImportMethod = 6 +) + +// Enum value maps for ImportJob_ImportMethod. +var ( + ImportJob_ImportMethod_name = map[int32]string{ + 0: "IMPORT_METHOD_UNSPECIFIED", + 1: "RSA_OAEP_3072_SHA1_AES_256", + 2: "RSA_OAEP_4096_SHA1_AES_256", + 3: "RSA_OAEP_3072_SHA256_AES_256", + 4: "RSA_OAEP_4096_SHA256_AES_256", + 5: "RSA_OAEP_3072_SHA256", + 6: "RSA_OAEP_4096_SHA256", + } + ImportJob_ImportMethod_value = map[string]int32{ + "IMPORT_METHOD_UNSPECIFIED": 0, + "RSA_OAEP_3072_SHA1_AES_256": 1, + "RSA_OAEP_4096_SHA1_AES_256": 2, + "RSA_OAEP_3072_SHA256_AES_256": 3, + "RSA_OAEP_4096_SHA256_AES_256": 4, + "RSA_OAEP_3072_SHA256": 5, + "RSA_OAEP_4096_SHA256": 6, + } +) + +func (x ImportJob_ImportMethod) Enum() *ImportJob_ImportMethod { + p := new(ImportJob_ImportMethod) + *p = x + return p +} + +func (x ImportJob_ImportMethod) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ImportJob_ImportMethod) Descriptor() protoreflect.EnumDescriptor { + return file_google_cloud_kms_v1_resources_proto_enumTypes[7].Descriptor() +} + +func (ImportJob_ImportMethod) Type() protoreflect.EnumType { + return &file_google_cloud_kms_v1_resources_proto_enumTypes[7] +} + +func (x ImportJob_ImportMethod) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ImportJob_ImportMethod.Descriptor instead. +func (ImportJob_ImportMethod) EnumDescriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{6, 0} +} + +// The state of the [ImportJob][google.cloud.kms.v1.ImportJob], indicating if +// it can be used. +type ImportJob_ImportJobState int32 + +const ( + // Not specified. + ImportJob_IMPORT_JOB_STATE_UNSPECIFIED ImportJob_ImportJobState = 0 + // The wrapping key for this job is still being generated. It may not be + // used. Cloud KMS will automatically mark this job as + // [ACTIVE][google.cloud.kms.v1.ImportJob.ImportJobState.ACTIVE] as soon as + // the wrapping key is generated. + ImportJob_PENDING_GENERATION ImportJob_ImportJobState = 1 + // This job may be used in + // [CreateCryptoKey][google.cloud.kms.v1.KeyManagementService.CreateCryptoKey] + // and + // [CreateCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.CreateCryptoKeyVersion] + // requests. + ImportJob_ACTIVE ImportJob_ImportJobState = 2 + // This job can no longer be used and may not leave this state once entered. + ImportJob_EXPIRED ImportJob_ImportJobState = 3 +) + +// Enum value maps for ImportJob_ImportJobState. +var ( + ImportJob_ImportJobState_name = map[int32]string{ + 0: "IMPORT_JOB_STATE_UNSPECIFIED", + 1: "PENDING_GENERATION", + 2: "ACTIVE", + 3: "EXPIRED", + } + ImportJob_ImportJobState_value = map[string]int32{ + "IMPORT_JOB_STATE_UNSPECIFIED": 0, + "PENDING_GENERATION": 1, + "ACTIVE": 2, + "EXPIRED": 3, + } +) + +func (x ImportJob_ImportJobState) Enum() *ImportJob_ImportJobState { + p := new(ImportJob_ImportJobState) + *p = x + return p +} + +func (x ImportJob_ImportJobState) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ImportJob_ImportJobState) Descriptor() protoreflect.EnumDescriptor { + return file_google_cloud_kms_v1_resources_proto_enumTypes[8].Descriptor() +} + +func (ImportJob_ImportJobState) Type() protoreflect.EnumType { + return &file_google_cloud_kms_v1_resources_proto_enumTypes[8] +} + +func (x ImportJob_ImportJobState) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ImportJob_ImportJobState.Descriptor instead. +func (ImportJob_ImportJobState) EnumDescriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{6, 1} +} + +// A [KeyRing][google.cloud.kms.v1.KeyRing] is a toplevel logical grouping of +// [CryptoKeys][google.cloud.kms.v1.CryptoKey]. +type KeyRing struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The resource name for the + // [KeyRing][google.cloud.kms.v1.KeyRing] in the format + // `projects/*/locations/*/keyRings/*`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Output only. The time at which this [KeyRing][google.cloud.kms.v1.KeyRing] + // was created. + CreateTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` +} + +func (x *KeyRing) Reset() { + *x = KeyRing{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyRing) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyRing) ProtoMessage() {} + +func (x *KeyRing) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyRing.ProtoReflect.Descriptor instead. +func (*KeyRing) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{0} +} + +func (x *KeyRing) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *KeyRing) GetCreateTime() *timestamppb.Timestamp { + if x != nil { + return x.CreateTime + } + return nil +} + +// A [CryptoKey][google.cloud.kms.v1.CryptoKey] represents a logical key that +// can be used for cryptographic operations. +// +// A [CryptoKey][google.cloud.kms.v1.CryptoKey] is made up of zero or more +// [versions][google.cloud.kms.v1.CryptoKeyVersion], which represent the actual +// key material used in cryptographic operations. +type CryptoKey struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The resource name for this + // [CryptoKey][google.cloud.kms.v1.CryptoKey] in the format + // `projects/*/locations/*/keyRings/*/cryptoKeys/*`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Output only. A copy of the "primary" + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] that will be used + // by [Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt] when this + // [CryptoKey][google.cloud.kms.v1.CryptoKey] is given in + // [EncryptRequest.name][google.cloud.kms.v1.EncryptRequest.name]. + // + // The [CryptoKey][google.cloud.kms.v1.CryptoKey]'s primary version can be + // updated via + // [UpdateCryptoKeyPrimaryVersion][google.cloud.kms.v1.KeyManagementService.UpdateCryptoKeyPrimaryVersion]. + // + // Keys with [purpose][google.cloud.kms.v1.CryptoKey.purpose] + // [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT] + // may have a primary. For other keys, this field will be omitted. + Primary *CryptoKeyVersion `protobuf:"bytes,2,opt,name=primary,proto3" json:"primary,omitempty"` + // Immutable. The immutable purpose of this + // [CryptoKey][google.cloud.kms.v1.CryptoKey]. + Purpose CryptoKey_CryptoKeyPurpose `protobuf:"varint,3,opt,name=purpose,proto3,enum=google.cloud.kms.v1.CryptoKey_CryptoKeyPurpose" json:"purpose,omitempty"` + // Output only. The time at which this + // [CryptoKey][google.cloud.kms.v1.CryptoKey] was created. + CreateTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + // At [next_rotation_time][google.cloud.kms.v1.CryptoKey.next_rotation_time], + // the Key Management Service will automatically: + // + // 1. Create a new version of this [CryptoKey][google.cloud.kms.v1.CryptoKey]. + // 2. Mark the new version as primary. + // + // Key rotations performed manually via + // [CreateCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.CreateCryptoKeyVersion] + // and + // [UpdateCryptoKeyPrimaryVersion][google.cloud.kms.v1.KeyManagementService.UpdateCryptoKeyPrimaryVersion] + // do not affect + // [next_rotation_time][google.cloud.kms.v1.CryptoKey.next_rotation_time]. + // + // Keys with [purpose][google.cloud.kms.v1.CryptoKey.purpose] + // [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT] + // support automatic rotation. For other keys, this field must be omitted. + NextRotationTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=next_rotation_time,json=nextRotationTime,proto3" json:"next_rotation_time,omitempty"` + // Controls the rate of automatic rotation. + // + // Types that are assignable to RotationSchedule: + // + // *CryptoKey_RotationPeriod + RotationSchedule isCryptoKey_RotationSchedule `protobuf_oneof:"rotation_schedule"` + // A template describing settings for new + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] instances. The + // properties of new [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] + // instances created by either + // [CreateCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.CreateCryptoKeyVersion] + // or auto-rotation are controlled by this template. + VersionTemplate *CryptoKeyVersionTemplate `protobuf:"bytes,11,opt,name=version_template,json=versionTemplate,proto3" json:"version_template,omitempty"` + // Labels with user-defined metadata. For more information, see + // [Labeling Keys](https://cloud.google.com/kms/docs/labeling-keys). + Labels map[string]string `protobuf:"bytes,10,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Immutable. Whether this key may contain imported versions only. + ImportOnly bool `protobuf:"varint,13,opt,name=import_only,json=importOnly,proto3" json:"import_only,omitempty"` + // Immutable. The period of time that versions of this key spend in the + // [DESTROY_SCHEDULED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED] + // state before transitioning to + // [DESTROYED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROYED]. + // If not specified at creation time, the default duration is 30 days. + DestroyScheduledDuration *durationpb.Duration `protobuf:"bytes,14,opt,name=destroy_scheduled_duration,json=destroyScheduledDuration,proto3" json:"destroy_scheduled_duration,omitempty"` + // Immutable. The resource name of the backend environment where the key + // material for all [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] + // associated with this [CryptoKey][google.cloud.kms.v1.CryptoKey] reside and + // where all related cryptographic operations are performed. Only applicable + // if [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] have a + // [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of + // [EXTERNAL_VPC][CryptoKeyVersion.ProtectionLevel.EXTERNAL_VPC], with the + // resource name in the format `projects/*/locations/*/ekmConnections/*`. + // Note, this list is non-exhaustive and may apply to additional + // [ProtectionLevels][google.cloud.kms.v1.ProtectionLevel] in the future. + CryptoKeyBackend string `protobuf:"bytes,15,opt,name=crypto_key_backend,json=cryptoKeyBackend,proto3" json:"crypto_key_backend,omitempty"` + // Optional. The policy used for Key Access Justifications Policy Enforcement. + // If this field is present and this key is enrolled in Key Access + // Justifications Policy Enforcement, the policy will be evaluated in encrypt, + // decrypt, and sign operations, and the operation will fail if rejected by + // the policy. The policy is defined by specifying zero or more allowed + // justification codes. + // https://cloud.google.com/assured-workloads/key-access-justifications/docs/justification-codes + // By default, this field is absent, and all justification codes are allowed. + KeyAccessJustificationsPolicy *KeyAccessJustificationsPolicy `protobuf:"bytes,17,opt,name=key_access_justifications_policy,json=keyAccessJustificationsPolicy,proto3" json:"key_access_justifications_policy,omitempty"` +} + +func (x *CryptoKey) Reset() { + *x = CryptoKey{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CryptoKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CryptoKey) ProtoMessage() {} + +func (x *CryptoKey) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CryptoKey.ProtoReflect.Descriptor instead. +func (*CryptoKey) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{1} +} + +func (x *CryptoKey) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CryptoKey) GetPrimary() *CryptoKeyVersion { + if x != nil { + return x.Primary + } + return nil +} + +func (x *CryptoKey) GetPurpose() CryptoKey_CryptoKeyPurpose { + if x != nil { + return x.Purpose + } + return CryptoKey_CRYPTO_KEY_PURPOSE_UNSPECIFIED +} + +func (x *CryptoKey) GetCreateTime() *timestamppb.Timestamp { + if x != nil { + return x.CreateTime + } + return nil +} + +func (x *CryptoKey) GetNextRotationTime() *timestamppb.Timestamp { + if x != nil { + return x.NextRotationTime + } + return nil +} + +func (m *CryptoKey) GetRotationSchedule() isCryptoKey_RotationSchedule { + if m != nil { + return m.RotationSchedule + } + return nil +} + +func (x *CryptoKey) GetRotationPeriod() *durationpb.Duration { + if x, ok := x.GetRotationSchedule().(*CryptoKey_RotationPeriod); ok { + return x.RotationPeriod + } + return nil +} + +func (x *CryptoKey) GetVersionTemplate() *CryptoKeyVersionTemplate { + if x != nil { + return x.VersionTemplate + } + return nil +} + +func (x *CryptoKey) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *CryptoKey) GetImportOnly() bool { + if x != nil { + return x.ImportOnly + } + return false +} + +func (x *CryptoKey) GetDestroyScheduledDuration() *durationpb.Duration { + if x != nil { + return x.DestroyScheduledDuration + } + return nil +} + +func (x *CryptoKey) GetCryptoKeyBackend() string { + if x != nil { + return x.CryptoKeyBackend + } + return "" +} + +func (x *CryptoKey) GetKeyAccessJustificationsPolicy() *KeyAccessJustificationsPolicy { + if x != nil { + return x.KeyAccessJustificationsPolicy + } + return nil +} + +type isCryptoKey_RotationSchedule interface { + isCryptoKey_RotationSchedule() +} + +type CryptoKey_RotationPeriod struct { + // [next_rotation_time][google.cloud.kms.v1.CryptoKey.next_rotation_time] + // will be advanced by this period when the service automatically rotates a + // key. Must be at least 24 hours and at most 876,000 hours. + // + // If [rotation_period][google.cloud.kms.v1.CryptoKey.rotation_period] is + // set, + // [next_rotation_time][google.cloud.kms.v1.CryptoKey.next_rotation_time] + // must also be set. + // + // Keys with [purpose][google.cloud.kms.v1.CryptoKey.purpose] + // [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT] + // support automatic rotation. For other keys, this field must be omitted. + RotationPeriod *durationpb.Duration `protobuf:"bytes,8,opt,name=rotation_period,json=rotationPeriod,proto3,oneof"` +} + +func (*CryptoKey_RotationPeriod) isCryptoKey_RotationSchedule() {} + +// A [CryptoKeyVersionTemplate][google.cloud.kms.v1.CryptoKeyVersionTemplate] +// specifies the properties to use when creating a new +// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion], either manually +// with +// [CreateCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.CreateCryptoKeyVersion] +// or automatically as a result of auto-rotation. +type CryptoKeyVersionTemplate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] to use when creating + // a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] based on this + // template. Immutable. Defaults to + // [SOFTWARE][google.cloud.kms.v1.ProtectionLevel.SOFTWARE]. + ProtectionLevel ProtectionLevel `protobuf:"varint,1,opt,name=protection_level,json=protectionLevel,proto3,enum=google.cloud.kms.v1.ProtectionLevel" json:"protection_level,omitempty"` + // Required. + // [Algorithm][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm] + // to use when creating a + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] based on this + // template. + // + // For backwards compatibility, GOOGLE_SYMMETRIC_ENCRYPTION is implied if both + // this field is omitted and + // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] is + // [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT]. + Algorithm CryptoKeyVersion_CryptoKeyVersionAlgorithm `protobuf:"varint,3,opt,name=algorithm,proto3,enum=google.cloud.kms.v1.CryptoKeyVersion_CryptoKeyVersionAlgorithm" json:"algorithm,omitempty"` +} + +func (x *CryptoKeyVersionTemplate) Reset() { + *x = CryptoKeyVersionTemplate{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CryptoKeyVersionTemplate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CryptoKeyVersionTemplate) ProtoMessage() {} + +func (x *CryptoKeyVersionTemplate) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CryptoKeyVersionTemplate.ProtoReflect.Descriptor instead. +func (*CryptoKeyVersionTemplate) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{2} +} + +func (x *CryptoKeyVersionTemplate) GetProtectionLevel() ProtectionLevel { + if x != nil { + return x.ProtectionLevel + } + return ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED +} + +func (x *CryptoKeyVersionTemplate) GetAlgorithm() CryptoKeyVersion_CryptoKeyVersionAlgorithm { + if x != nil { + return x.Algorithm + } + return CryptoKeyVersion_CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED +} + +// Contains an HSM-generated attestation about a key operation. For more +// information, see [Verifying attestations] +// (https://cloud.google.com/kms/docs/attest-key). +type KeyOperationAttestation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The format of the attestation data. + Format KeyOperationAttestation_AttestationFormat `protobuf:"varint,4,opt,name=format,proto3,enum=google.cloud.kms.v1.KeyOperationAttestation_AttestationFormat" json:"format,omitempty"` + // Output only. The attestation data provided by the HSM when the key + // operation was performed. + Content []byte `protobuf:"bytes,5,opt,name=content,proto3" json:"content,omitempty"` + // Output only. The certificate chains needed to validate the attestation + CertChains *KeyOperationAttestation_CertificateChains `protobuf:"bytes,6,opt,name=cert_chains,json=certChains,proto3" json:"cert_chains,omitempty"` +} + +func (x *KeyOperationAttestation) Reset() { + *x = KeyOperationAttestation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyOperationAttestation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyOperationAttestation) ProtoMessage() {} + +func (x *KeyOperationAttestation) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyOperationAttestation.ProtoReflect.Descriptor instead. +func (*KeyOperationAttestation) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{3} +} + +func (x *KeyOperationAttestation) GetFormat() KeyOperationAttestation_AttestationFormat { + if x != nil { + return x.Format + } + return KeyOperationAttestation_ATTESTATION_FORMAT_UNSPECIFIED +} + +func (x *KeyOperationAttestation) GetContent() []byte { + if x != nil { + return x.Content + } + return nil +} + +func (x *KeyOperationAttestation) GetCertChains() *KeyOperationAttestation_CertificateChains { + if x != nil { + return x.CertChains + } + return nil +} + +// A [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] represents an +// individual cryptographic key, and the associated key material. +// +// An +// [ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED] +// version can be used for cryptographic operations. +// +// For security reasons, the raw cryptographic key material represented by a +// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] can never be viewed +// or exported. It can only be used to encrypt, decrypt, or sign data when an +// authorized user or application invokes Cloud KMS. +type CryptoKeyVersion struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The resource name for this + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] in the format + // `projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The current state of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. + State CryptoKeyVersion_CryptoKeyVersionState `protobuf:"varint,3,opt,name=state,proto3,enum=google.cloud.kms.v1.CryptoKeyVersion_CryptoKeyVersionState" json:"state,omitempty"` + // Output only. The [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] + // describing how crypto operations are performed with this + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. + ProtectionLevel ProtectionLevel `protobuf:"varint,7,opt,name=protection_level,json=protectionLevel,proto3,enum=google.cloud.kms.v1.ProtectionLevel" json:"protection_level,omitempty"` + // Output only. The + // [CryptoKeyVersionAlgorithm][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm] + // that this [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] + // supports. + Algorithm CryptoKeyVersion_CryptoKeyVersionAlgorithm `protobuf:"varint,10,opt,name=algorithm,proto3,enum=google.cloud.kms.v1.CryptoKeyVersion_CryptoKeyVersionAlgorithm" json:"algorithm,omitempty"` + // Output only. Statement that was generated and signed by the HSM at key + // creation time. Use this statement to verify attributes of the key as stored + // on the HSM, independently of Google. Only provided for key versions with + // [protection_level][google.cloud.kms.v1.CryptoKeyVersion.protection_level] + // [HSM][google.cloud.kms.v1.ProtectionLevel.HSM]. + Attestation *KeyOperationAttestation `protobuf:"bytes,8,opt,name=attestation,proto3" json:"attestation,omitempty"` + // Output only. The time at which this + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] was created. + CreateTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + // Output only. The time this + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]'s key material was + // generated. + GenerateTime *timestamppb.Timestamp `protobuf:"bytes,11,opt,name=generate_time,json=generateTime,proto3" json:"generate_time,omitempty"` + // Output only. The time this + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]'s key material is + // scheduled for destruction. Only present if + // [state][google.cloud.kms.v1.CryptoKeyVersion.state] is + // [DESTROY_SCHEDULED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED]. + DestroyTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=destroy_time,json=destroyTime,proto3" json:"destroy_time,omitempty"` + // Output only. The time this CryptoKeyVersion's key material was + // destroyed. Only present if + // [state][google.cloud.kms.v1.CryptoKeyVersion.state] is + // [DESTROYED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROYED]. + DestroyEventTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=destroy_event_time,json=destroyEventTime,proto3" json:"destroy_event_time,omitempty"` + // Output only. The name of the [ImportJob][google.cloud.kms.v1.ImportJob] + // used in the most recent import of this + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. Only present if + // the underlying key material was imported. + ImportJob string `protobuf:"bytes,14,opt,name=import_job,json=importJob,proto3" json:"import_job,omitempty"` + // Output only. The time at which this + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]'s key material was + // most recently imported. + ImportTime *timestamppb.Timestamp `protobuf:"bytes,15,opt,name=import_time,json=importTime,proto3" json:"import_time,omitempty"` + // Output only. The root cause of the most recent import failure. Only present + // if [state][google.cloud.kms.v1.CryptoKeyVersion.state] is + // [IMPORT_FAILED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.IMPORT_FAILED]. + ImportFailureReason string `protobuf:"bytes,16,opt,name=import_failure_reason,json=importFailureReason,proto3" json:"import_failure_reason,omitempty"` + // Output only. The root cause of the most recent generation failure. Only + // present if [state][google.cloud.kms.v1.CryptoKeyVersion.state] is + // [GENERATION_FAILED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.GENERATION_FAILED]. + GenerationFailureReason string `protobuf:"bytes,19,opt,name=generation_failure_reason,json=generationFailureReason,proto3" json:"generation_failure_reason,omitempty"` + // Output only. The root cause of the most recent external destruction + // failure. Only present if + // [state][google.cloud.kms.v1.CryptoKeyVersion.state] is + // [EXTERNAL_DESTRUCTION_FAILED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.EXTERNAL_DESTRUCTION_FAILED]. + ExternalDestructionFailureReason string `protobuf:"bytes,20,opt,name=external_destruction_failure_reason,json=externalDestructionFailureReason,proto3" json:"external_destruction_failure_reason,omitempty"` + // ExternalProtectionLevelOptions stores a group of additional fields for + // configuring a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] that + // are specific to the + // [EXTERNAL][google.cloud.kms.v1.ProtectionLevel.EXTERNAL] protection level + // and [EXTERNAL_VPC][google.cloud.kms.v1.ProtectionLevel.EXTERNAL_VPC] + // protection levels. + ExternalProtectionLevelOptions *ExternalProtectionLevelOptions `protobuf:"bytes,17,opt,name=external_protection_level_options,json=externalProtectionLevelOptions,proto3" json:"external_protection_level_options,omitempty"` + // Output only. Whether or not this key version is eligible for reimport, by + // being specified as a target in + // [ImportCryptoKeyVersionRequest.crypto_key_version][google.cloud.kms.v1.ImportCryptoKeyVersionRequest.crypto_key_version]. + ReimportEligible bool `protobuf:"varint,18,opt,name=reimport_eligible,json=reimportEligible,proto3" json:"reimport_eligible,omitempty"` +} + +func (x *CryptoKeyVersion) Reset() { + *x = CryptoKeyVersion{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CryptoKeyVersion) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CryptoKeyVersion) ProtoMessage() {} + +func (x *CryptoKeyVersion) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CryptoKeyVersion.ProtoReflect.Descriptor instead. +func (*CryptoKeyVersion) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{4} +} + +func (x *CryptoKeyVersion) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CryptoKeyVersion) GetState() CryptoKeyVersion_CryptoKeyVersionState { + if x != nil { + return x.State + } + return CryptoKeyVersion_CRYPTO_KEY_VERSION_STATE_UNSPECIFIED +} + +func (x *CryptoKeyVersion) GetProtectionLevel() ProtectionLevel { + if x != nil { + return x.ProtectionLevel + } + return ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED +} + +func (x *CryptoKeyVersion) GetAlgorithm() CryptoKeyVersion_CryptoKeyVersionAlgorithm { + if x != nil { + return x.Algorithm + } + return CryptoKeyVersion_CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED +} + +func (x *CryptoKeyVersion) GetAttestation() *KeyOperationAttestation { + if x != nil { + return x.Attestation + } + return nil +} + +func (x *CryptoKeyVersion) GetCreateTime() *timestamppb.Timestamp { + if x != nil { + return x.CreateTime + } + return nil +} + +func (x *CryptoKeyVersion) GetGenerateTime() *timestamppb.Timestamp { + if x != nil { + return x.GenerateTime + } + return nil +} + +func (x *CryptoKeyVersion) GetDestroyTime() *timestamppb.Timestamp { + if x != nil { + return x.DestroyTime + } + return nil +} + +func (x *CryptoKeyVersion) GetDestroyEventTime() *timestamppb.Timestamp { + if x != nil { + return x.DestroyEventTime + } + return nil +} + +func (x *CryptoKeyVersion) GetImportJob() string { + if x != nil { + return x.ImportJob + } + return "" +} + +func (x *CryptoKeyVersion) GetImportTime() *timestamppb.Timestamp { + if x != nil { + return x.ImportTime + } + return nil +} + +func (x *CryptoKeyVersion) GetImportFailureReason() string { + if x != nil { + return x.ImportFailureReason + } + return "" +} + +func (x *CryptoKeyVersion) GetGenerationFailureReason() string { + if x != nil { + return x.GenerationFailureReason + } + return "" +} + +func (x *CryptoKeyVersion) GetExternalDestructionFailureReason() string { + if x != nil { + return x.ExternalDestructionFailureReason + } + return "" +} + +func (x *CryptoKeyVersion) GetExternalProtectionLevelOptions() *ExternalProtectionLevelOptions { + if x != nil { + return x.ExternalProtectionLevelOptions + } + return nil +} + +func (x *CryptoKeyVersion) GetReimportEligible() bool { + if x != nil { + return x.ReimportEligible + } + return false +} + +// The public keys for a given +// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. Obtained via +// [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey]. +type PublicKey struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The public key, encoded in PEM format. For more information, see the + // [RFC 7468](https://tools.ietf.org/html/rfc7468) sections for + // [General Considerations](https://tools.ietf.org/html/rfc7468#section-2) and + // [Textual Encoding of Subject Public Key Info] + // (https://tools.ietf.org/html/rfc7468#section-13). + Pem string `protobuf:"bytes,1,opt,name=pem,proto3" json:"pem,omitempty"` + // The + // [Algorithm][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm] + // associated with this key. + Algorithm CryptoKeyVersion_CryptoKeyVersionAlgorithm `protobuf:"varint,2,opt,name=algorithm,proto3,enum=google.cloud.kms.v1.CryptoKeyVersion_CryptoKeyVersionAlgorithm" json:"algorithm,omitempty"` + // Integrity verification field. A CRC32C checksum of the returned + // [PublicKey.pem][google.cloud.kms.v1.PublicKey.pem]. An integrity check of + // [PublicKey.pem][google.cloud.kms.v1.PublicKey.pem] can be performed by + // computing the CRC32C checksum of + // [PublicKey.pem][google.cloud.kms.v1.PublicKey.pem] and comparing your + // results to this field. Discard the response in case of non-matching + // checksum values, and perform a limited number of retries. A persistent + // mismatch may indicate an issue in your computation of the CRC32C checksum. + // Note: This field is defined as int64 for reasons of compatibility across + // different languages. However, it is a non-negative integer, which will + // never exceed 2^32-1, and can be safely downconverted to uint32 in languages + // that support this type. + // + // NOTE: This field is in Beta. + PemCrc32C *wrapperspb.Int64Value `protobuf:"bytes,3,opt,name=pem_crc32c,json=pemCrc32c,proto3" json:"pem_crc32c,omitempty"` + // The [name][google.cloud.kms.v1.CryptoKeyVersion.name] of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] public key. + // Provided here for verification. + // + // NOTE: This field is in Beta. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // The [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] public key. + ProtectionLevel ProtectionLevel `protobuf:"varint,5,opt,name=protection_level,json=protectionLevel,proto3,enum=google.cloud.kms.v1.ProtectionLevel" json:"protection_level,omitempty"` +} + +func (x *PublicKey) Reset() { + *x = PublicKey{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PublicKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublicKey) ProtoMessage() {} + +func (x *PublicKey) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublicKey.ProtoReflect.Descriptor instead. +func (*PublicKey) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{5} +} + +func (x *PublicKey) GetPem() string { + if x != nil { + return x.Pem + } + return "" +} + +func (x *PublicKey) GetAlgorithm() CryptoKeyVersion_CryptoKeyVersionAlgorithm { + if x != nil { + return x.Algorithm + } + return CryptoKeyVersion_CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED +} + +func (x *PublicKey) GetPemCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.PemCrc32C + } + return nil +} + +func (x *PublicKey) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *PublicKey) GetProtectionLevel() ProtectionLevel { + if x != nil { + return x.ProtectionLevel + } + return ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED +} + +// An [ImportJob][google.cloud.kms.v1.ImportJob] can be used to create +// [CryptoKeys][google.cloud.kms.v1.CryptoKey] and +// [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] using pre-existing +// key material, generated outside of Cloud KMS. +// +// When an [ImportJob][google.cloud.kms.v1.ImportJob] is created, Cloud KMS will +// generate a "wrapping key", which is a public/private key pair. You use the +// wrapping key to encrypt (also known as wrap) the pre-existing key material to +// protect it during the import process. The nature of the wrapping key depends +// on the choice of +// [import_method][google.cloud.kms.v1.ImportJob.import_method]. When the +// wrapping key generation is complete, the +// [state][google.cloud.kms.v1.ImportJob.state] will be set to +// [ACTIVE][google.cloud.kms.v1.ImportJob.ImportJobState.ACTIVE] and the +// [public_key][google.cloud.kms.v1.ImportJob.public_key] can be fetched. The +// fetched public key can then be used to wrap your pre-existing key material. +// +// Once the key material is wrapped, it can be imported into a new +// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] in an existing +// [CryptoKey][google.cloud.kms.v1.CryptoKey] by calling +// [ImportCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.ImportCryptoKeyVersion]. +// Multiple [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] can be +// imported with a single [ImportJob][google.cloud.kms.v1.ImportJob]. Cloud KMS +// uses the private key portion of the wrapping key to unwrap the key material. +// Only Cloud KMS has access to the private key. +// +// An [ImportJob][google.cloud.kms.v1.ImportJob] expires 3 days after it is +// created. Once expired, Cloud KMS will no longer be able to import or unwrap +// any key material that was wrapped with the +// [ImportJob][google.cloud.kms.v1.ImportJob]'s public key. +// +// For more information, see +// [Importing a key](https://cloud.google.com/kms/docs/importing-a-key). +type ImportJob struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. The resource name for this + // [ImportJob][google.cloud.kms.v1.ImportJob] in the format + // `projects/*/locations/*/keyRings/*/importJobs/*`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. Immutable. The wrapping method to be used for incoming key + // material. + ImportMethod ImportJob_ImportMethod `protobuf:"varint,2,opt,name=import_method,json=importMethod,proto3,enum=google.cloud.kms.v1.ImportJob_ImportMethod" json:"import_method,omitempty"` + // Required. Immutable. The protection level of the + // [ImportJob][google.cloud.kms.v1.ImportJob]. This must match the + // [protection_level][google.cloud.kms.v1.CryptoKeyVersionTemplate.protection_level] + // of the [version_template][google.cloud.kms.v1.CryptoKey.version_template] + // on the [CryptoKey][google.cloud.kms.v1.CryptoKey] you attempt to import + // into. + ProtectionLevel ProtectionLevel `protobuf:"varint,9,opt,name=protection_level,json=protectionLevel,proto3,enum=google.cloud.kms.v1.ProtectionLevel" json:"protection_level,omitempty"` + // Output only. The time at which this + // [ImportJob][google.cloud.kms.v1.ImportJob] was created. + CreateTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + // Output only. The time this [ImportJob][google.cloud.kms.v1.ImportJob]'s key + // material was generated. + GenerateTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=generate_time,json=generateTime,proto3" json:"generate_time,omitempty"` + // Output only. The time at which this + // [ImportJob][google.cloud.kms.v1.ImportJob] is scheduled for expiration and + // can no longer be used to import key material. + ExpireTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` + // Output only. The time this [ImportJob][google.cloud.kms.v1.ImportJob] + // expired. Only present if [state][google.cloud.kms.v1.ImportJob.state] is + // [EXPIRED][google.cloud.kms.v1.ImportJob.ImportJobState.EXPIRED]. + ExpireEventTime *timestamppb.Timestamp `protobuf:"bytes,10,opt,name=expire_event_time,json=expireEventTime,proto3" json:"expire_event_time,omitempty"` + // Output only. The current state of the + // [ImportJob][google.cloud.kms.v1.ImportJob], indicating if it can be used. + State ImportJob_ImportJobState `protobuf:"varint,6,opt,name=state,proto3,enum=google.cloud.kms.v1.ImportJob_ImportJobState" json:"state,omitempty"` + // Output only. The public key with which to wrap key material prior to + // import. Only returned if [state][google.cloud.kms.v1.ImportJob.state] is + // [ACTIVE][google.cloud.kms.v1.ImportJob.ImportJobState.ACTIVE]. + PublicKey *ImportJob_WrappingPublicKey `protobuf:"bytes,7,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` + // Output only. Statement that was generated and signed by the key creator + // (for example, an HSM) at key creation time. Use this statement to verify + // attributes of the key as stored on the HSM, independently of Google. + // Only present if the chosen + // [ImportMethod][google.cloud.kms.v1.ImportJob.ImportMethod] is one with a + // protection level of [HSM][google.cloud.kms.v1.ProtectionLevel.HSM]. + Attestation *KeyOperationAttestation `protobuf:"bytes,8,opt,name=attestation,proto3" json:"attestation,omitempty"` +} + +func (x *ImportJob) Reset() { + *x = ImportJob{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImportJob) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImportJob) ProtoMessage() {} + +func (x *ImportJob) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImportJob.ProtoReflect.Descriptor instead. +func (*ImportJob) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{6} +} + +func (x *ImportJob) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ImportJob) GetImportMethod() ImportJob_ImportMethod { + if x != nil { + return x.ImportMethod + } + return ImportJob_IMPORT_METHOD_UNSPECIFIED +} + +func (x *ImportJob) GetProtectionLevel() ProtectionLevel { + if x != nil { + return x.ProtectionLevel + } + return ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED +} + +func (x *ImportJob) GetCreateTime() *timestamppb.Timestamp { + if x != nil { + return x.CreateTime + } + return nil +} + +func (x *ImportJob) GetGenerateTime() *timestamppb.Timestamp { + if x != nil { + return x.GenerateTime + } + return nil +} + +func (x *ImportJob) GetExpireTime() *timestamppb.Timestamp { + if x != nil { + return x.ExpireTime + } + return nil +} + +func (x *ImportJob) GetExpireEventTime() *timestamppb.Timestamp { + if x != nil { + return x.ExpireEventTime + } + return nil +} + +func (x *ImportJob) GetState() ImportJob_ImportJobState { + if x != nil { + return x.State + } + return ImportJob_IMPORT_JOB_STATE_UNSPECIFIED +} + +func (x *ImportJob) GetPublicKey() *ImportJob_WrappingPublicKey { + if x != nil { + return x.PublicKey + } + return nil +} + +func (x *ImportJob) GetAttestation() *KeyOperationAttestation { + if x != nil { + return x.Attestation + } + return nil +} + +// ExternalProtectionLevelOptions stores a group of additional fields for +// configuring a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] that +// are specific to the [EXTERNAL][google.cloud.kms.v1.ProtectionLevel.EXTERNAL] +// protection level and +// [EXTERNAL_VPC][google.cloud.kms.v1.ProtectionLevel.EXTERNAL_VPC] protection +// levels. +type ExternalProtectionLevelOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The URI for an external resource that this + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] represents. + ExternalKeyUri string `protobuf:"bytes,1,opt,name=external_key_uri,json=externalKeyUri,proto3" json:"external_key_uri,omitempty"` + // The path to the external key material on the EKM when using + // [EkmConnection][google.cloud.kms.v1.EkmConnection] e.g., "v0/my/key". Set + // this field instead of external_key_uri when using an + // [EkmConnection][google.cloud.kms.v1.EkmConnection]. + EkmConnectionKeyPath string `protobuf:"bytes,2,opt,name=ekm_connection_key_path,json=ekmConnectionKeyPath,proto3" json:"ekm_connection_key_path,omitempty"` +} + +func (x *ExternalProtectionLevelOptions) Reset() { + *x = ExternalProtectionLevelOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExternalProtectionLevelOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExternalProtectionLevelOptions) ProtoMessage() {} + +func (x *ExternalProtectionLevelOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExternalProtectionLevelOptions.ProtoReflect.Descriptor instead. +func (*ExternalProtectionLevelOptions) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{7} +} + +func (x *ExternalProtectionLevelOptions) GetExternalKeyUri() string { + if x != nil { + return x.ExternalKeyUri + } + return "" +} + +func (x *ExternalProtectionLevelOptions) GetEkmConnectionKeyPath() string { + if x != nil { + return x.EkmConnectionKeyPath + } + return "" +} + +// A +// [KeyAccessJustificationsPolicy][google.cloud.kms.v1.KeyAccessJustificationsPolicy] +// specifies zero or more allowed +// [AccessReason][google.cloud.kms.v1.AccessReason] values for encrypt, decrypt, +// and sign operations on a [CryptoKey][google.cloud.kms.v1.CryptoKey]. +type KeyAccessJustificationsPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of allowed reasons for access to a + // [CryptoKey][google.cloud.kms.v1.CryptoKey]. Zero allowed access reasons + // means all encrypt, decrypt, and sign operations for the + // [CryptoKey][google.cloud.kms.v1.CryptoKey] associated with this policy will + // fail. + AllowedAccessReasons []AccessReason `protobuf:"varint,1,rep,packed,name=allowed_access_reasons,json=allowedAccessReasons,proto3,enum=google.cloud.kms.v1.AccessReason" json:"allowed_access_reasons,omitempty"` +} + +func (x *KeyAccessJustificationsPolicy) Reset() { + *x = KeyAccessJustificationsPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyAccessJustificationsPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyAccessJustificationsPolicy) ProtoMessage() {} + +func (x *KeyAccessJustificationsPolicy) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyAccessJustificationsPolicy.ProtoReflect.Descriptor instead. +func (*KeyAccessJustificationsPolicy) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{8} +} + +func (x *KeyAccessJustificationsPolicy) GetAllowedAccessReasons() []AccessReason { + if x != nil { + return x.AllowedAccessReasons + } + return nil +} + +// Certificate chains needed to verify the attestation. +// Certificates in chains are PEM-encoded and are ordered based on +// https://tools.ietf.org/html/rfc5246#section-7.4.2. +type KeyOperationAttestation_CertificateChains struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Cavium certificate chain corresponding to the attestation. + CaviumCerts []string `protobuf:"bytes,1,rep,name=cavium_certs,json=caviumCerts,proto3" json:"cavium_certs,omitempty"` + // Google card certificate chain corresponding to the attestation. + GoogleCardCerts []string `protobuf:"bytes,2,rep,name=google_card_certs,json=googleCardCerts,proto3" json:"google_card_certs,omitempty"` + // Google partition certificate chain corresponding to the attestation. + GooglePartitionCerts []string `protobuf:"bytes,3,rep,name=google_partition_certs,json=googlePartitionCerts,proto3" json:"google_partition_certs,omitempty"` +} + +func (x *KeyOperationAttestation_CertificateChains) Reset() { + *x = KeyOperationAttestation_CertificateChains{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyOperationAttestation_CertificateChains) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyOperationAttestation_CertificateChains) ProtoMessage() {} + +func (x *KeyOperationAttestation_CertificateChains) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyOperationAttestation_CertificateChains.ProtoReflect.Descriptor instead. +func (*KeyOperationAttestation_CertificateChains) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *KeyOperationAttestation_CertificateChains) GetCaviumCerts() []string { + if x != nil { + return x.CaviumCerts + } + return nil +} + +func (x *KeyOperationAttestation_CertificateChains) GetGoogleCardCerts() []string { + if x != nil { + return x.GoogleCardCerts + } + return nil +} + +func (x *KeyOperationAttestation_CertificateChains) GetGooglePartitionCerts() []string { + if x != nil { + return x.GooglePartitionCerts + } + return nil +} + +// The public key component of the wrapping key. For details of the type of +// key this public key corresponds to, see the +// [ImportMethod][google.cloud.kms.v1.ImportJob.ImportMethod]. +type ImportJob_WrappingPublicKey struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The public key, encoded in PEM format. For more information, see the [RFC + // 7468](https://tools.ietf.org/html/rfc7468) sections for [General + // Considerations](https://tools.ietf.org/html/rfc7468#section-2) and + // [Textual Encoding of Subject Public Key Info] + // (https://tools.ietf.org/html/rfc7468#section-13). + Pem string `protobuf:"bytes,1,opt,name=pem,proto3" json:"pem,omitempty"` +} + +func (x *ImportJob_WrappingPublicKey) Reset() { + *x = ImportJob_WrappingPublicKey{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImportJob_WrappingPublicKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImportJob_WrappingPublicKey) ProtoMessage() {} + +func (x *ImportJob_WrappingPublicKey) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImportJob_WrappingPublicKey.ProtoReflect.Descriptor instead. +func (*ImportJob_WrappingPublicKey) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *ImportJob_WrappingPublicKey) GetPem() string { + if x != nil { + return x.Pem + } + return "" +} + +var File_google_cloud_kms_v1_resources_proto protoreflect.FileDescriptor + +var file_google_cloud_kms_v1_resources_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x6b, + 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, + 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc7, 0x01, 0x0a, 0x07, 0x4b, 0x65, 0x79, 0x52, + 0x69, 0x6e, 0x67, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, + 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x3a, 0x61, + 0xea, 0x41, 0x5e, 0x0a, 0x1f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4b, 0x65, 0x79, + 0x52, 0x69, 0x6e, 0x67, 0x12, 0x3b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, + 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, + 0x7d, 0x22, 0xd4, 0x09, 0x0a, 0x09, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, + 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x6d, + 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x4e, + 0x0a, 0x07, 0x70, 0x75, 0x72, 0x70, 0x6f, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, + 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x2e, + 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x50, 0x75, 0x72, 0x70, 0x6f, 0x73, 0x65, + 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x07, 0x70, 0x75, 0x72, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x40, + 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, + 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, + 0x12, 0x48, 0x0a, 0x12, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x72, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x6e, 0x65, 0x78, 0x74, 0x52, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x0f, 0x72, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, + 0x52, 0x0e, 0x72, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, + 0x12, 0x58, 0x0a, 0x10, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x65, 0x6d, 0x70, + 0x6c, 0x61, 0x74, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x0f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x12, 0x42, 0x0a, 0x06, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x24, + 0x0a, 0x0b, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x0d, 0x20, + 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x0a, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, + 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x5c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x5f, + 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, + 0x79, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x12, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, + 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, + 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x03, 0x0a, 0x01, 0x2a, 0x52, 0x10, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x4b, 0x65, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x12, 0x80, 0x01, 0x0a, 0x20, + 0x6b, 0x65, 0x79, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6a, 0x75, 0x73, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, + 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4a, 0x75, 0x73, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, + 0x1d, 0x6b, 0x65, 0x79, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4a, 0x75, 0x73, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x39, + 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x9a, 0x01, 0x0a, 0x10, 0x43, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x50, 0x75, 0x72, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x22, + 0x0a, 0x1e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x4f, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x50, 0x55, 0x52, + 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x5f, 0x44, 0x45, + 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x41, 0x53, 0x59, 0x4d, 0x4d, + 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x10, 0x05, 0x12, 0x16, 0x0a, 0x12, + 0x41, 0x53, 0x59, 0x4d, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59, + 0x50, 0x54, 0x10, 0x06, 0x12, 0x17, 0x0a, 0x13, 0x52, 0x41, 0x57, 0x5f, 0x45, 0x4e, 0x43, 0x52, + 0x59, 0x50, 0x54, 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x07, 0x12, 0x07, 0x0a, + 0x03, 0x4d, 0x41, 0x43, 0x10, 0x09, 0x3a, 0x7b, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x53, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, + 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, + 0x65, 0x79, 0x7d, 0x42, 0x13, 0x0a, 0x11, 0x72, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x22, 0xcf, 0x01, 0x0a, 0x18, 0x43, 0x72, 0x79, + 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x6d, + 0x70, 0x6c, 0x61, 0x74, 0x65, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, + 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x62, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, + 0x74, 0x68, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x22, 0x83, 0x04, 0x0a, 0x17, 0x4b, + 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x74, 0x74, 0x65, 0x73, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, + 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x06, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x12, 0x64, 0x0a, 0x0b, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, + 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, + 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x65, + 0x72, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x1a, 0x98, 0x01, 0x0a, 0x11, 0x43, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x21, + 0x0a, 0x0c, 0x63, 0x61, 0x76, 0x69, 0x75, 0x6d, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x61, 0x76, 0x69, 0x75, 0x6d, 0x43, 0x65, 0x72, 0x74, + 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x63, 0x61, 0x72, 0x64, + 0x5f, 0x63, 0x65, 0x72, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x43, 0x61, 0x72, 0x64, 0x43, 0x65, 0x72, 0x74, 0x73, 0x12, 0x34, 0x0a, + 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x65, + 0x72, 0x74, 0x73, 0x22, 0x6b, 0x0a, 0x11, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x22, 0x0a, 0x1e, 0x41, 0x54, 0x54, 0x45, + 0x53, 0x54, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, + 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, + 0x43, 0x41, 0x56, 0x49, 0x55, 0x4d, 0x5f, 0x56, 0x31, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x52, 0x45, + 0x53, 0x53, 0x45, 0x44, 0x10, 0x03, 0x12, 0x18, 0x0a, 0x14, 0x43, 0x41, 0x56, 0x49, 0x55, 0x4d, + 0x5f, 0x56, 0x32, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x52, 0x45, 0x53, 0x53, 0x45, 0x44, 0x10, 0x04, + 0x22, 0x89, 0x15, 0x0a, 0x10, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x51, + 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x54, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x62, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, + 0x69, 0x74, 0x68, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x42, 0x03, 0xe0, 0x41, 0x03, + 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x53, 0x0a, 0x0b, 0x61, + 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x0d, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x0c, 0x64, 0x65, 0x73, 0x74, + 0x72, 0x6f, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, + 0x0b, 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4d, 0x0a, 0x12, + 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x10, 0x64, 0x65, 0x73, 0x74, 0x72, + 0x6f, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x69, + 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x12, + 0x40, 0x0a, 0x0b, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0f, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x69, 0x6d, + 0x65, 0x12, 0x37, 0x0a, 0x15, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x66, 0x61, 0x69, 0x6c, + 0x75, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x46, 0x61, 0x69, + 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x19, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, + 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x13, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x17, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, + 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x52, 0x0a, 0x23, 0x65, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x65, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x61, 0x73, + 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x20, 0x65, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x65, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, + 0x7e, 0x0a, 0x21, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x74, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x1e, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x30, 0x0a, 0x11, 0x72, 0x65, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x65, 0x6c, 0x69, 0x67, + 0x69, 0x62, 0x6c, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, + 0x10, 0x72, 0x65, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x45, 0x6c, 0x69, 0x67, 0x69, 0x62, 0x6c, + 0x65, 0x22, 0xe2, 0x07, 0x0a, 0x19, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, + 0x2c, 0x0a, 0x28, 0x43, 0x52, 0x59, 0x50, 0x54, 0x4f, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x45, + 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x41, 0x4c, 0x47, 0x4f, 0x52, 0x49, 0x54, 0x48, 0x4d, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1f, 0x0a, + 0x1b, 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x5f, 0x53, 0x59, 0x4d, 0x4d, 0x45, 0x54, 0x52, 0x49, + 0x43, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x0f, + 0x0a, 0x0b, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x10, 0x29, 0x12, + 0x0f, 0x0a, 0x0b, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, 0x43, 0x4d, 0x10, 0x13, + 0x12, 0x0f, 0x0a, 0x0b, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x43, 0x42, 0x43, 0x10, + 0x2a, 0x12, 0x0f, 0x0a, 0x0b, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x43, 0x42, 0x43, + 0x10, 0x2b, 0x12, 0x0f, 0x0a, 0x0b, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x43, 0x54, + 0x52, 0x10, 0x2c, 0x12, 0x0f, 0x0a, 0x0b, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x43, + 0x54, 0x52, 0x10, 0x2d, 0x12, 0x1c, 0x0a, 0x18, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, + 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x32, 0x30, 0x34, 0x38, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, + 0x10, 0x02, 0x12, 0x1c, 0x0a, 0x18, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, + 0x53, 0x53, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x03, + 0x12, 0x1c, 0x0a, 0x18, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x53, 0x53, + 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x1c, + 0x0a, 0x18, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x34, + 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x0f, 0x12, 0x1e, 0x0a, 0x1a, + 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x32, + 0x30, 0x34, 0x38, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x05, 0x12, 0x1e, 0x0a, 0x1a, + 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x33, + 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x06, 0x12, 0x1e, 0x0a, 0x1a, + 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x34, + 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x07, 0x12, 0x1e, 0x0a, 0x1a, + 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x34, + 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x10, 0x12, 0x1b, 0x0a, 0x17, + 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x41, 0x57, 0x5f, 0x50, 0x4b, 0x43, + 0x53, 0x31, 0x5f, 0x32, 0x30, 0x34, 0x38, 0x10, 0x1c, 0x12, 0x1b, 0x0a, 0x17, 0x52, 0x53, 0x41, + 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x41, 0x57, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, + 0x33, 0x30, 0x37, 0x32, 0x10, 0x1d, 0x12, 0x1b, 0x0a, 0x17, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, + 0x47, 0x4e, 0x5f, 0x52, 0x41, 0x57, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x34, 0x30, 0x39, + 0x36, 0x10, 0x1e, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59, + 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x32, 0x30, 0x34, 0x38, 0x5f, 0x53, 0x48, 0x41, + 0x32, 0x35, 0x36, 0x10, 0x08, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x44, 0x45, 0x43, + 0x52, 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, + 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x09, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x44, + 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x34, 0x30, 0x39, 0x36, + 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x0a, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41, + 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x34, 0x30, + 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x11, 0x12, 0x1e, 0x0a, 0x1a, 0x52, + 0x53, 0x41, 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, + 0x32, 0x30, 0x34, 0x38, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x10, 0x25, 0x12, 0x1e, 0x0a, 0x1a, 0x52, + 0x53, 0x41, 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, + 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x10, 0x26, 0x12, 0x1e, 0x0a, 0x1a, 0x52, + 0x53, 0x41, 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, + 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x10, 0x27, 0x12, 0x17, 0x0a, 0x13, 0x45, + 0x43, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x32, 0x35, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, + 0x35, 0x36, 0x10, 0x0c, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x43, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, + 0x50, 0x33, 0x38, 0x34, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x0d, 0x12, 0x1c, 0x0a, + 0x18, 0x45, 0x43, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, + 0x4b, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x1f, 0x12, 0x13, 0x0a, 0x0f, 0x45, + 0x43, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x28, + 0x12, 0x0f, 0x0a, 0x0b, 0x48, 0x4d, 0x41, 0x43, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, + 0x20, 0x12, 0x0d, 0x0a, 0x09, 0x48, 0x4d, 0x41, 0x43, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x10, 0x21, + 0x12, 0x0f, 0x0a, 0x0b, 0x48, 0x4d, 0x41, 0x43, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, + 0x22, 0x12, 0x0f, 0x0a, 0x0b, 0x48, 0x4d, 0x41, 0x43, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, + 0x10, 0x23, 0x12, 0x0f, 0x0a, 0x0b, 0x48, 0x4d, 0x41, 0x43, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x32, + 0x34, 0x10, 0x24, 0x12, 0x21, 0x0a, 0x1d, 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, + 0x53, 0x59, 0x4d, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, + 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x12, 0x22, 0x9b, 0x02, 0x0a, 0x15, 0x43, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x28, 0x0a, 0x24, 0x43, 0x52, 0x59, 0x50, 0x54, 0x4f, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, + 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, + 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x50, 0x45, + 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, + 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x45, 0x4e, 0x41, 0x42, 0x4c, 0x45, 0x44, 0x10, 0x01, 0x12, + 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x53, 0x41, 0x42, 0x4c, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0d, 0x0a, + 0x09, 0x44, 0x45, 0x53, 0x54, 0x52, 0x4f, 0x59, 0x45, 0x44, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, + 0x44, 0x45, 0x53, 0x54, 0x52, 0x4f, 0x59, 0x5f, 0x53, 0x43, 0x48, 0x45, 0x44, 0x55, 0x4c, 0x45, + 0x44, 0x10, 0x04, 0x12, 0x12, 0x0a, 0x0e, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x49, + 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x49, 0x4d, 0x50, 0x4f, 0x52, + 0x54, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x07, 0x12, 0x15, 0x0a, 0x11, 0x47, 0x45, + 0x4e, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, + 0x08, 0x12, 0x20, 0x0a, 0x1c, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x45, 0x58, 0x54, + 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x52, 0x55, 0x43, 0x54, 0x49, 0x4f, + 0x4e, 0x10, 0x09, 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, + 0x44, 0x45, 0x53, 0x54, 0x52, 0x55, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x41, 0x49, 0x4c, + 0x45, 0x44, 0x10, 0x0a, 0x22, 0x49, 0x0a, 0x14, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, + 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x56, 0x69, 0x65, 0x77, 0x12, 0x27, 0x0a, 0x23, + 0x43, 0x52, 0x59, 0x50, 0x54, 0x4f, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, + 0x4f, 0x4e, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x01, 0x3a, + 0xaa, 0x01, 0xea, 0x41, 0xa6, 0x01, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x7a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, + 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, + 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x22, 0xce, 0x03, 0x0a, + 0x09, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x65, + 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x70, 0x65, 0x6d, 0x12, 0x5d, 0x0a, 0x09, + 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, + 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, + 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x3a, 0x0a, 0x0a, 0x70, + 0x65, 0x6d, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x70, 0x65, + 0x6d, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4f, 0x0a, 0x10, 0x70, + 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, + 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0xae, 0x01, 0xea, + 0x41, 0xaa, 0x01, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x84, 0x01, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, + 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, + 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, + 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x2f, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x7d, 0x2f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x22, 0xd4, 0x09, + 0x0a, 0x09, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x12, 0x17, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x58, 0x0a, 0x0d, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x2e, 0x49, 0x6d, 0x70, 0x6f, + 0x72, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, + 0x52, 0x0c, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x57, + 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, + 0x65, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, + 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x42, 0x06, + 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x0d, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x52, 0x0c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, + 0x40, 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, + 0x65, 0x12, 0x4b, 0x0a, 0x11, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0f, 0x65, + 0x78, 0x70, 0x69, 0x72, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x48, + 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x2e, 0x49, 0x6d, + 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x54, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, + 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x2e, 0x57, 0x72, 0x61, + 0x70, 0x70, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x53, + 0x0a, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0x25, 0x0a, 0x11, 0x57, 0x72, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x50, + 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x65, 0x6d, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x70, 0x65, 0x6d, 0x22, 0xe5, 0x01, 0x0a, 0x0c, 0x49, + 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1d, 0x0a, 0x19, 0x49, + 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, + 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, + 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x31, + 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, + 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x31, + 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x10, 0x02, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, + 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, + 0x35, 0x36, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x10, 0x03, 0x12, 0x20, 0x0a, 0x1c, + 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, + 0x41, 0x32, 0x35, 0x36, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x18, + 0x0a, 0x14, 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, + 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x05, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x53, 0x41, 0x5f, + 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, + 0x10, 0x06, 0x22, 0x63, 0x0a, 0x0e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x20, 0x0a, 0x1c, 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x4a, + 0x4f, 0x42, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, + 0x47, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x0a, + 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x45, 0x58, + 0x50, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x3a, 0x7b, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x12, + 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, + 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x69, 0x6d, 0x70, + 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x2f, 0x7b, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, + 0x6a, 0x6f, 0x62, 0x7d, 0x22, 0x81, 0x01, 0x0a, 0x1e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4b, 0x65, 0x79, 0x55, 0x72, + 0x69, 0x12, 0x35, 0x0a, 0x17, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x14, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x4b, 0x65, 0x79, 0x50, 0x61, 0x74, 0x68, 0x22, 0x78, 0x0a, 0x1d, 0x4b, 0x65, 0x79, 0x41, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x4a, 0x75, 0x73, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x57, 0x0a, 0x16, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x73, + 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x52, 0x14, 0x61, 0x6c, + 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x73, 0x2a, 0x6a, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x50, 0x52, 0x4f, 0x54, 0x45, 0x43, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x4f, 0x46, 0x54, 0x57, + 0x41, 0x52, 0x45, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x48, 0x53, 0x4d, 0x10, 0x02, 0x12, 0x0c, + 0x0a, 0x08, 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, + 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x56, 0x50, 0x43, 0x10, 0x04, 0x2a, 0xab, + 0x03, 0x0a, 0x0c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, + 0x16, 0x0a, 0x12, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1e, 0x0a, 0x1a, 0x43, 0x55, 0x53, 0x54, 0x4f, + 0x4d, 0x45, 0x52, 0x5f, 0x49, 0x4e, 0x49, 0x54, 0x49, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, 0x55, + 0x50, 0x50, 0x4f, 0x52, 0x54, 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x47, 0x4f, 0x4f, 0x47, 0x4c, + 0x45, 0x5f, 0x49, 0x4e, 0x49, 0x54, 0x49, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, 0x45, 0x52, 0x56, + 0x49, 0x43, 0x45, 0x10, 0x02, 0x12, 0x1c, 0x0a, 0x18, 0x54, 0x48, 0x49, 0x52, 0x44, 0x5f, 0x50, + 0x41, 0x52, 0x54, 0x59, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, + 0x54, 0x10, 0x03, 0x12, 0x1b, 0x0a, 0x17, 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x5f, 0x49, 0x4e, + 0x49, 0x54, 0x49, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x52, 0x45, 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, + 0x12, 0x1d, 0x0a, 0x19, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x45, 0x52, 0x5f, 0x49, 0x4e, 0x49, + 0x54, 0x49, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x05, 0x12, + 0x25, 0x0a, 0x21, 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x5f, 0x49, 0x4e, 0x49, 0x54, 0x49, 0x41, + 0x54, 0x45, 0x44, 0x5f, 0x53, 0x59, 0x53, 0x54, 0x45, 0x4d, 0x5f, 0x4f, 0x50, 0x45, 0x52, 0x41, + 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x06, 0x12, 0x17, 0x0a, 0x13, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, + 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x45, 0x58, 0x50, 0x45, 0x43, 0x54, 0x45, 0x44, 0x10, 0x07, 0x12, + 0x26, 0x0a, 0x22, 0x4d, 0x4f, 0x44, 0x49, 0x46, 0x49, 0x45, 0x44, 0x5f, 0x43, 0x55, 0x53, 0x54, + 0x4f, 0x4d, 0x45, 0x52, 0x5f, 0x49, 0x4e, 0x49, 0x54, 0x49, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x41, + 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x08, 0x12, 0x2e, 0x0a, 0x2a, 0x4d, 0x4f, 0x44, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x5f, 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x5f, 0x49, 0x4e, 0x49, 0x54, 0x49, + 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, 0x59, 0x53, 0x54, 0x45, 0x4d, 0x5f, 0x4f, 0x50, 0x45, 0x52, + 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x09, 0x12, 0x27, 0x0a, 0x23, 0x47, 0x4f, 0x4f, 0x47, 0x4c, + 0x45, 0x5f, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x5f, 0x54, 0x4f, 0x5f, 0x50, 0x52, + 0x4f, 0x44, 0x55, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x41, 0x4c, 0x45, 0x52, 0x54, 0x10, 0x0a, + 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x45, 0x52, 0x5f, 0x41, 0x55, 0x54, + 0x48, 0x4f, 0x52, 0x49, 0x5a, 0x45, 0x44, 0x5f, 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, 0x4f, 0x57, + 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x49, 0x4e, 0x47, 0x10, 0x0b, 0x42, 0x88, 0x01, 0x0a, + 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x4b, 0x6d, 0x73, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x67, 0x6f, 0x2f, 0x6b, 0x6d, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x6b, 0x6d, 0x73, + 0x70, 0x62, 0x3b, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56, + 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, + 0x5c, 0x4b, 0x6d, 0x73, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_cloud_kms_v1_resources_proto_rawDescOnce sync.Once + file_google_cloud_kms_v1_resources_proto_rawDescData = file_google_cloud_kms_v1_resources_proto_rawDesc +) + +func file_google_cloud_kms_v1_resources_proto_rawDescGZIP() []byte { + file_google_cloud_kms_v1_resources_proto_rawDescOnce.Do(func() { + file_google_cloud_kms_v1_resources_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_cloud_kms_v1_resources_proto_rawDescData) + }) + return file_google_cloud_kms_v1_resources_proto_rawDescData +} + +var file_google_cloud_kms_v1_resources_proto_enumTypes = make([]protoimpl.EnumInfo, 9) +var file_google_cloud_kms_v1_resources_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_google_cloud_kms_v1_resources_proto_goTypes = []any{ + (ProtectionLevel)(0), // 0: google.cloud.kms.v1.ProtectionLevel + (AccessReason)(0), // 1: google.cloud.kms.v1.AccessReason + (CryptoKey_CryptoKeyPurpose)(0), // 2: google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose + (KeyOperationAttestation_AttestationFormat)(0), // 3: google.cloud.kms.v1.KeyOperationAttestation.AttestationFormat + (CryptoKeyVersion_CryptoKeyVersionAlgorithm)(0), // 4: google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm + (CryptoKeyVersion_CryptoKeyVersionState)(0), // 5: google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState + (CryptoKeyVersion_CryptoKeyVersionView)(0), // 6: google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionView + (ImportJob_ImportMethod)(0), // 7: google.cloud.kms.v1.ImportJob.ImportMethod + (ImportJob_ImportJobState)(0), // 8: google.cloud.kms.v1.ImportJob.ImportJobState + (*KeyRing)(nil), // 9: google.cloud.kms.v1.KeyRing + (*CryptoKey)(nil), // 10: google.cloud.kms.v1.CryptoKey + (*CryptoKeyVersionTemplate)(nil), // 11: google.cloud.kms.v1.CryptoKeyVersionTemplate + (*KeyOperationAttestation)(nil), // 12: google.cloud.kms.v1.KeyOperationAttestation + (*CryptoKeyVersion)(nil), // 13: google.cloud.kms.v1.CryptoKeyVersion + (*PublicKey)(nil), // 14: google.cloud.kms.v1.PublicKey + (*ImportJob)(nil), // 15: google.cloud.kms.v1.ImportJob + (*ExternalProtectionLevelOptions)(nil), // 16: google.cloud.kms.v1.ExternalProtectionLevelOptions + (*KeyAccessJustificationsPolicy)(nil), // 17: google.cloud.kms.v1.KeyAccessJustificationsPolicy + nil, // 18: google.cloud.kms.v1.CryptoKey.LabelsEntry + (*KeyOperationAttestation_CertificateChains)(nil), // 19: google.cloud.kms.v1.KeyOperationAttestation.CertificateChains + (*ImportJob_WrappingPublicKey)(nil), // 20: google.cloud.kms.v1.ImportJob.WrappingPublicKey + (*timestamppb.Timestamp)(nil), // 21: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 22: google.protobuf.Duration + (*wrapperspb.Int64Value)(nil), // 23: google.protobuf.Int64Value +} +var file_google_cloud_kms_v1_resources_proto_depIdxs = []int32{ + 21, // 0: google.cloud.kms.v1.KeyRing.create_time:type_name -> google.protobuf.Timestamp + 13, // 1: google.cloud.kms.v1.CryptoKey.primary:type_name -> google.cloud.kms.v1.CryptoKeyVersion + 2, // 2: google.cloud.kms.v1.CryptoKey.purpose:type_name -> google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose + 21, // 3: google.cloud.kms.v1.CryptoKey.create_time:type_name -> google.protobuf.Timestamp + 21, // 4: google.cloud.kms.v1.CryptoKey.next_rotation_time:type_name -> google.protobuf.Timestamp + 22, // 5: google.cloud.kms.v1.CryptoKey.rotation_period:type_name -> google.protobuf.Duration + 11, // 6: google.cloud.kms.v1.CryptoKey.version_template:type_name -> google.cloud.kms.v1.CryptoKeyVersionTemplate + 18, // 7: google.cloud.kms.v1.CryptoKey.labels:type_name -> google.cloud.kms.v1.CryptoKey.LabelsEntry + 22, // 8: google.cloud.kms.v1.CryptoKey.destroy_scheduled_duration:type_name -> google.protobuf.Duration + 17, // 9: google.cloud.kms.v1.CryptoKey.key_access_justifications_policy:type_name -> google.cloud.kms.v1.KeyAccessJustificationsPolicy + 0, // 10: google.cloud.kms.v1.CryptoKeyVersionTemplate.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel + 4, // 11: google.cloud.kms.v1.CryptoKeyVersionTemplate.algorithm:type_name -> google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm + 3, // 12: google.cloud.kms.v1.KeyOperationAttestation.format:type_name -> google.cloud.kms.v1.KeyOperationAttestation.AttestationFormat + 19, // 13: google.cloud.kms.v1.KeyOperationAttestation.cert_chains:type_name -> google.cloud.kms.v1.KeyOperationAttestation.CertificateChains + 5, // 14: google.cloud.kms.v1.CryptoKeyVersion.state:type_name -> google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState + 0, // 15: google.cloud.kms.v1.CryptoKeyVersion.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel + 4, // 16: google.cloud.kms.v1.CryptoKeyVersion.algorithm:type_name -> google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm + 12, // 17: google.cloud.kms.v1.CryptoKeyVersion.attestation:type_name -> google.cloud.kms.v1.KeyOperationAttestation + 21, // 18: google.cloud.kms.v1.CryptoKeyVersion.create_time:type_name -> google.protobuf.Timestamp + 21, // 19: google.cloud.kms.v1.CryptoKeyVersion.generate_time:type_name -> google.protobuf.Timestamp + 21, // 20: google.cloud.kms.v1.CryptoKeyVersion.destroy_time:type_name -> google.protobuf.Timestamp + 21, // 21: google.cloud.kms.v1.CryptoKeyVersion.destroy_event_time:type_name -> google.protobuf.Timestamp + 21, // 22: google.cloud.kms.v1.CryptoKeyVersion.import_time:type_name -> google.protobuf.Timestamp + 16, // 23: google.cloud.kms.v1.CryptoKeyVersion.external_protection_level_options:type_name -> google.cloud.kms.v1.ExternalProtectionLevelOptions + 4, // 24: google.cloud.kms.v1.PublicKey.algorithm:type_name -> google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm + 23, // 25: google.cloud.kms.v1.PublicKey.pem_crc32c:type_name -> google.protobuf.Int64Value + 0, // 26: google.cloud.kms.v1.PublicKey.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel + 7, // 27: google.cloud.kms.v1.ImportJob.import_method:type_name -> google.cloud.kms.v1.ImportJob.ImportMethod + 0, // 28: google.cloud.kms.v1.ImportJob.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel + 21, // 29: google.cloud.kms.v1.ImportJob.create_time:type_name -> google.protobuf.Timestamp + 21, // 30: google.cloud.kms.v1.ImportJob.generate_time:type_name -> google.protobuf.Timestamp + 21, // 31: google.cloud.kms.v1.ImportJob.expire_time:type_name -> google.protobuf.Timestamp + 21, // 32: google.cloud.kms.v1.ImportJob.expire_event_time:type_name -> google.protobuf.Timestamp + 8, // 33: google.cloud.kms.v1.ImportJob.state:type_name -> google.cloud.kms.v1.ImportJob.ImportJobState + 20, // 34: google.cloud.kms.v1.ImportJob.public_key:type_name -> google.cloud.kms.v1.ImportJob.WrappingPublicKey + 12, // 35: google.cloud.kms.v1.ImportJob.attestation:type_name -> google.cloud.kms.v1.KeyOperationAttestation + 1, // 36: google.cloud.kms.v1.KeyAccessJustificationsPolicy.allowed_access_reasons:type_name -> google.cloud.kms.v1.AccessReason + 37, // [37:37] is the sub-list for method output_type + 37, // [37:37] is the sub-list for method input_type + 37, // [37:37] is the sub-list for extension type_name + 37, // [37:37] is the sub-list for extension extendee + 0, // [0:37] is the sub-list for field type_name +} + +func init() { file_google_cloud_kms_v1_resources_proto_init() } +func file_google_cloud_kms_v1_resources_proto_init() { + if File_google_cloud_kms_v1_resources_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_cloud_kms_v1_resources_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*KeyRing); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_resources_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*CryptoKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_resources_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*CryptoKeyVersionTemplate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_resources_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*KeyOperationAttestation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_resources_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*CryptoKeyVersion); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_resources_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*PublicKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_resources_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*ImportJob); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_resources_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*ExternalProtectionLevelOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_resources_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*KeyAccessJustificationsPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_resources_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*KeyOperationAttestation_CertificateChains); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_resources_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*ImportJob_WrappingPublicKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_cloud_kms_v1_resources_proto_msgTypes[1].OneofWrappers = []any{ + (*CryptoKey_RotationPeriod)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_cloud_kms_v1_resources_proto_rawDesc, + NumEnums: 9, + NumMessages: 12, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_cloud_kms_v1_resources_proto_goTypes, + DependencyIndexes: file_google_cloud_kms_v1_resources_proto_depIdxs, + EnumInfos: file_google_cloud_kms_v1_resources_proto_enumTypes, + MessageInfos: file_google_cloud_kms_v1_resources_proto_msgTypes, + }.Build() + File_google_cloud_kms_v1_resources_proto = out.File + file_google_cloud_kms_v1_resources_proto_rawDesc = nil + file_google_cloud_kms_v1_resources_proto_goTypes = nil + file_google_cloud_kms_v1_resources_proto_depIdxs = nil +} diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go new file mode 100644 index 000000000..d629ee2d2 --- /dev/null +++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go @@ -0,0 +1,7283 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/cloud/kms/v1/service.proto + +package kmspb + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Request message for +// [KeyManagementService.ListKeyRings][google.cloud.kms.v1.KeyManagementService.ListKeyRings]. +type ListKeyRingsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the location associated with the + // [KeyRings][google.cloud.kms.v1.KeyRing], in the format + // `projects/*/locations/*`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Optional. Optional limit on the number of + // [KeyRings][google.cloud.kms.v1.KeyRing] to include in the response. Further + // [KeyRings][google.cloud.kms.v1.KeyRing] can subsequently be obtained by + // including the + // [ListKeyRingsResponse.next_page_token][google.cloud.kms.v1.ListKeyRingsResponse.next_page_token] + // in a subsequent request. If unspecified, the server will pick an + // appropriate default. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Optional. Optional pagination token, returned earlier via + // [ListKeyRingsResponse.next_page_token][google.cloud.kms.v1.ListKeyRingsResponse.next_page_token]. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // Optional. Only include resources that match the filter in the response. For + // more information, see + // [Sorting and filtering list + // results](https://cloud.google.com/kms/docs/sorting-and-filtering). + Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"` + // Optional. Specify how the results should be sorted. If not specified, the + // results will be sorted in the default order. For more information, see + // [Sorting and filtering list + // results](https://cloud.google.com/kms/docs/sorting-and-filtering). + OrderBy string `protobuf:"bytes,5,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` +} + +func (x *ListKeyRingsRequest) Reset() { + *x = ListKeyRingsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListKeyRingsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListKeyRingsRequest) ProtoMessage() {} + +func (x *ListKeyRingsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListKeyRingsRequest.ProtoReflect.Descriptor instead. +func (*ListKeyRingsRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{0} +} + +func (x *ListKeyRingsRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListKeyRingsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListKeyRingsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *ListKeyRingsRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListKeyRingsRequest) GetOrderBy() string { + if x != nil { + return x.OrderBy + } + return "" +} + +// Request message for +// [KeyManagementService.ListCryptoKeys][google.cloud.kms.v1.KeyManagementService.ListCryptoKeys]. +type ListCryptoKeysRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the [KeyRing][google.cloud.kms.v1.KeyRing] + // to list, in the format `projects/*/locations/*/keyRings/*`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Optional. Optional limit on the number of + // [CryptoKeys][google.cloud.kms.v1.CryptoKey] to include in the response. + // Further [CryptoKeys][google.cloud.kms.v1.CryptoKey] can subsequently be + // obtained by including the + // [ListCryptoKeysResponse.next_page_token][google.cloud.kms.v1.ListCryptoKeysResponse.next_page_token] + // in a subsequent request. If unspecified, the server will pick an + // appropriate default. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Optional. Optional pagination token, returned earlier via + // [ListCryptoKeysResponse.next_page_token][google.cloud.kms.v1.ListCryptoKeysResponse.next_page_token]. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // The fields of the primary version to include in the response. + VersionView CryptoKeyVersion_CryptoKeyVersionView `protobuf:"varint,4,opt,name=version_view,json=versionView,proto3,enum=google.cloud.kms.v1.CryptoKeyVersion_CryptoKeyVersionView" json:"version_view,omitempty"` + // Optional. Only include resources that match the filter in the response. For + // more information, see + // [Sorting and filtering list + // results](https://cloud.google.com/kms/docs/sorting-and-filtering). + Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"` + // Optional. Specify how the results should be sorted. If not specified, the + // results will be sorted in the default order. For more information, see + // [Sorting and filtering list + // results](https://cloud.google.com/kms/docs/sorting-and-filtering). + OrderBy string `protobuf:"bytes,6,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` +} + +func (x *ListCryptoKeysRequest) Reset() { + *x = ListCryptoKeysRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListCryptoKeysRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListCryptoKeysRequest) ProtoMessage() {} + +func (x *ListCryptoKeysRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListCryptoKeysRequest.ProtoReflect.Descriptor instead. +func (*ListCryptoKeysRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ListCryptoKeysRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListCryptoKeysRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListCryptoKeysRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *ListCryptoKeysRequest) GetVersionView() CryptoKeyVersion_CryptoKeyVersionView { + if x != nil { + return x.VersionView + } + return CryptoKeyVersion_CRYPTO_KEY_VERSION_VIEW_UNSPECIFIED +} + +func (x *ListCryptoKeysRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListCryptoKeysRequest) GetOrderBy() string { + if x != nil { + return x.OrderBy + } + return "" +} + +// Request message for +// [KeyManagementService.ListCryptoKeyVersions][google.cloud.kms.v1.KeyManagementService.ListCryptoKeyVersions]. +type ListCryptoKeyVersionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the + // [CryptoKey][google.cloud.kms.v1.CryptoKey] to list, in the format + // `projects/*/locations/*/keyRings/*/cryptoKeys/*`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Optional. Optional limit on the number of + // [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] to include in the + // response. Further [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] + // can subsequently be obtained by including the + // [ListCryptoKeyVersionsResponse.next_page_token][google.cloud.kms.v1.ListCryptoKeyVersionsResponse.next_page_token] + // in a subsequent request. If unspecified, the server will pick an + // appropriate default. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Optional. Optional pagination token, returned earlier via + // [ListCryptoKeyVersionsResponse.next_page_token][google.cloud.kms.v1.ListCryptoKeyVersionsResponse.next_page_token]. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // The fields to include in the response. + View CryptoKeyVersion_CryptoKeyVersionView `protobuf:"varint,4,opt,name=view,proto3,enum=google.cloud.kms.v1.CryptoKeyVersion_CryptoKeyVersionView" json:"view,omitempty"` + // Optional. Only include resources that match the filter in the response. For + // more information, see + // [Sorting and filtering list + // results](https://cloud.google.com/kms/docs/sorting-and-filtering). + Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"` + // Optional. Specify how the results should be sorted. If not specified, the + // results will be sorted in the default order. For more information, see + // [Sorting and filtering list + // results](https://cloud.google.com/kms/docs/sorting-and-filtering). + OrderBy string `protobuf:"bytes,6,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` +} + +func (x *ListCryptoKeyVersionsRequest) Reset() { + *x = ListCryptoKeyVersionsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListCryptoKeyVersionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListCryptoKeyVersionsRequest) ProtoMessage() {} + +func (x *ListCryptoKeyVersionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListCryptoKeyVersionsRequest.ProtoReflect.Descriptor instead. +func (*ListCryptoKeyVersionsRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{2} +} + +func (x *ListCryptoKeyVersionsRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListCryptoKeyVersionsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListCryptoKeyVersionsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *ListCryptoKeyVersionsRequest) GetView() CryptoKeyVersion_CryptoKeyVersionView { + if x != nil { + return x.View + } + return CryptoKeyVersion_CRYPTO_KEY_VERSION_VIEW_UNSPECIFIED +} + +func (x *ListCryptoKeyVersionsRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListCryptoKeyVersionsRequest) GetOrderBy() string { + if x != nil { + return x.OrderBy + } + return "" +} + +// Request message for +// [KeyManagementService.ListImportJobs][google.cloud.kms.v1.KeyManagementService.ListImportJobs]. +type ListImportJobsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the [KeyRing][google.cloud.kms.v1.KeyRing] + // to list, in the format `projects/*/locations/*/keyRings/*`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Optional. Optional limit on the number of + // [ImportJobs][google.cloud.kms.v1.ImportJob] to include in the response. + // Further [ImportJobs][google.cloud.kms.v1.ImportJob] can subsequently be + // obtained by including the + // [ListImportJobsResponse.next_page_token][google.cloud.kms.v1.ListImportJobsResponse.next_page_token] + // in a subsequent request. If unspecified, the server will pick an + // appropriate default. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Optional. Optional pagination token, returned earlier via + // [ListImportJobsResponse.next_page_token][google.cloud.kms.v1.ListImportJobsResponse.next_page_token]. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // Optional. Only include resources that match the filter in the response. For + // more information, see + // [Sorting and filtering list + // results](https://cloud.google.com/kms/docs/sorting-and-filtering). + Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"` + // Optional. Specify how the results should be sorted. If not specified, the + // results will be sorted in the default order. For more information, see + // [Sorting and filtering list + // results](https://cloud.google.com/kms/docs/sorting-and-filtering). + OrderBy string `protobuf:"bytes,5,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` +} + +func (x *ListImportJobsRequest) Reset() { + *x = ListImportJobsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListImportJobsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListImportJobsRequest) ProtoMessage() {} + +func (x *ListImportJobsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListImportJobsRequest.ProtoReflect.Descriptor instead. +func (*ListImportJobsRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{3} +} + +func (x *ListImportJobsRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListImportJobsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListImportJobsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *ListImportJobsRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListImportJobsRequest) GetOrderBy() string { + if x != nil { + return x.OrderBy + } + return "" +} + +// Response message for +// [KeyManagementService.ListKeyRings][google.cloud.kms.v1.KeyManagementService.ListKeyRings]. +type ListKeyRingsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of [KeyRings][google.cloud.kms.v1.KeyRing]. + KeyRings []*KeyRing `protobuf:"bytes,1,rep,name=key_rings,json=keyRings,proto3" json:"key_rings,omitempty"` + // A token to retrieve next page of results. Pass this value in + // [ListKeyRingsRequest.page_token][google.cloud.kms.v1.ListKeyRingsRequest.page_token] + // to retrieve the next page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // The total number of [KeyRings][google.cloud.kms.v1.KeyRing] that matched + // the query. + TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` +} + +func (x *ListKeyRingsResponse) Reset() { + *x = ListKeyRingsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListKeyRingsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListKeyRingsResponse) ProtoMessage() {} + +func (x *ListKeyRingsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListKeyRingsResponse.ProtoReflect.Descriptor instead. +func (*ListKeyRingsResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{4} +} + +func (x *ListKeyRingsResponse) GetKeyRings() []*KeyRing { + if x != nil { + return x.KeyRings + } + return nil +} + +func (x *ListKeyRingsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +func (x *ListKeyRingsResponse) GetTotalSize() int32 { + if x != nil { + return x.TotalSize + } + return 0 +} + +// Response message for +// [KeyManagementService.ListCryptoKeys][google.cloud.kms.v1.KeyManagementService.ListCryptoKeys]. +type ListCryptoKeysResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of [CryptoKeys][google.cloud.kms.v1.CryptoKey]. + CryptoKeys []*CryptoKey `protobuf:"bytes,1,rep,name=crypto_keys,json=cryptoKeys,proto3" json:"crypto_keys,omitempty"` + // A token to retrieve next page of results. Pass this value in + // [ListCryptoKeysRequest.page_token][google.cloud.kms.v1.ListCryptoKeysRequest.page_token] + // to retrieve the next page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // The total number of [CryptoKeys][google.cloud.kms.v1.CryptoKey] that + // matched the query. + TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` +} + +func (x *ListCryptoKeysResponse) Reset() { + *x = ListCryptoKeysResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListCryptoKeysResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListCryptoKeysResponse) ProtoMessage() {} + +func (x *ListCryptoKeysResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListCryptoKeysResponse.ProtoReflect.Descriptor instead. +func (*ListCryptoKeysResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{5} +} + +func (x *ListCryptoKeysResponse) GetCryptoKeys() []*CryptoKey { + if x != nil { + return x.CryptoKeys + } + return nil +} + +func (x *ListCryptoKeysResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +func (x *ListCryptoKeysResponse) GetTotalSize() int32 { + if x != nil { + return x.TotalSize + } + return 0 +} + +// Response message for +// [KeyManagementService.ListCryptoKeyVersions][google.cloud.kms.v1.KeyManagementService.ListCryptoKeyVersions]. +type ListCryptoKeyVersionsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion]. + CryptoKeyVersions []*CryptoKeyVersion `protobuf:"bytes,1,rep,name=crypto_key_versions,json=cryptoKeyVersions,proto3" json:"crypto_key_versions,omitempty"` + // A token to retrieve next page of results. Pass this value in + // [ListCryptoKeyVersionsRequest.page_token][google.cloud.kms.v1.ListCryptoKeyVersionsRequest.page_token] + // to retrieve the next page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // The total number of + // [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] that matched the + // query. + TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` +} + +func (x *ListCryptoKeyVersionsResponse) Reset() { + *x = ListCryptoKeyVersionsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListCryptoKeyVersionsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListCryptoKeyVersionsResponse) ProtoMessage() {} + +func (x *ListCryptoKeyVersionsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListCryptoKeyVersionsResponse.ProtoReflect.Descriptor instead. +func (*ListCryptoKeyVersionsResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{6} +} + +func (x *ListCryptoKeyVersionsResponse) GetCryptoKeyVersions() []*CryptoKeyVersion { + if x != nil { + return x.CryptoKeyVersions + } + return nil +} + +func (x *ListCryptoKeyVersionsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +func (x *ListCryptoKeyVersionsResponse) GetTotalSize() int32 { + if x != nil { + return x.TotalSize + } + return 0 +} + +// Response message for +// [KeyManagementService.ListImportJobs][google.cloud.kms.v1.KeyManagementService.ListImportJobs]. +type ListImportJobsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of [ImportJobs][google.cloud.kms.v1.ImportJob]. + ImportJobs []*ImportJob `protobuf:"bytes,1,rep,name=import_jobs,json=importJobs,proto3" json:"import_jobs,omitempty"` + // A token to retrieve next page of results. Pass this value in + // [ListImportJobsRequest.page_token][google.cloud.kms.v1.ListImportJobsRequest.page_token] + // to retrieve the next page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // The total number of [ImportJobs][google.cloud.kms.v1.ImportJob] that + // matched the query. + TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` +} + +func (x *ListImportJobsResponse) Reset() { + *x = ListImportJobsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListImportJobsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListImportJobsResponse) ProtoMessage() {} + +func (x *ListImportJobsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListImportJobsResponse.ProtoReflect.Descriptor instead. +func (*ListImportJobsResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{7} +} + +func (x *ListImportJobsResponse) GetImportJobs() []*ImportJob { + if x != nil { + return x.ImportJobs + } + return nil +} + +func (x *ListImportJobsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +func (x *ListImportJobsResponse) GetTotalSize() int32 { + if x != nil { + return x.TotalSize + } + return 0 +} + +// Request message for +// [KeyManagementService.GetKeyRing][google.cloud.kms.v1.KeyManagementService.GetKeyRing]. +type GetKeyRingRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The [name][google.cloud.kms.v1.KeyRing.name] of the + // [KeyRing][google.cloud.kms.v1.KeyRing] to get. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetKeyRingRequest) Reset() { + *x = GetKeyRingRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetKeyRingRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetKeyRingRequest) ProtoMessage() {} + +func (x *GetKeyRingRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetKeyRingRequest.ProtoReflect.Descriptor instead. +func (*GetKeyRingRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{8} +} + +func (x *GetKeyRingRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Request message for +// [KeyManagementService.GetCryptoKey][google.cloud.kms.v1.KeyManagementService.GetCryptoKey]. +type GetCryptoKeyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The [name][google.cloud.kms.v1.CryptoKey.name] of the + // [CryptoKey][google.cloud.kms.v1.CryptoKey] to get. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetCryptoKeyRequest) Reset() { + *x = GetCryptoKeyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetCryptoKeyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetCryptoKeyRequest) ProtoMessage() {} + +func (x *GetCryptoKeyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetCryptoKeyRequest.ProtoReflect.Descriptor instead. +func (*GetCryptoKeyRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{9} +} + +func (x *GetCryptoKeyRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Request message for +// [KeyManagementService.GetCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.GetCryptoKeyVersion]. +type GetCryptoKeyVersionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The [name][google.cloud.kms.v1.CryptoKeyVersion.name] of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] to get. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetCryptoKeyVersionRequest) Reset() { + *x = GetCryptoKeyVersionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetCryptoKeyVersionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetCryptoKeyVersionRequest) ProtoMessage() {} + +func (x *GetCryptoKeyVersionRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetCryptoKeyVersionRequest.ProtoReflect.Descriptor instead. +func (*GetCryptoKeyVersionRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{10} +} + +func (x *GetCryptoKeyVersionRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Request message for +// [KeyManagementService.GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey]. +type GetPublicKeyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The [name][google.cloud.kms.v1.CryptoKeyVersion.name] of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] public key to get. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetPublicKeyRequest) Reset() { + *x = GetPublicKeyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetPublicKeyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetPublicKeyRequest) ProtoMessage() {} + +func (x *GetPublicKeyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetPublicKeyRequest.ProtoReflect.Descriptor instead. +func (*GetPublicKeyRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{11} +} + +func (x *GetPublicKeyRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Request message for +// [KeyManagementService.GetImportJob][google.cloud.kms.v1.KeyManagementService.GetImportJob]. +type GetImportJobRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The [name][google.cloud.kms.v1.ImportJob.name] of the + // [ImportJob][google.cloud.kms.v1.ImportJob] to get. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetImportJobRequest) Reset() { + *x = GetImportJobRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetImportJobRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetImportJobRequest) ProtoMessage() {} + +func (x *GetImportJobRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetImportJobRequest.ProtoReflect.Descriptor instead. +func (*GetImportJobRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{12} +} + +func (x *GetImportJobRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Request message for +// [KeyManagementService.CreateKeyRing][google.cloud.kms.v1.KeyManagementService.CreateKeyRing]. +type CreateKeyRingRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the location associated with the + // [KeyRings][google.cloud.kms.v1.KeyRing], in the format + // `projects/*/locations/*`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Required. It must be unique within a location and match the regular + // expression `[a-zA-Z0-9_-]{1,63}` + KeyRingId string `protobuf:"bytes,2,opt,name=key_ring_id,json=keyRingId,proto3" json:"key_ring_id,omitempty"` + // Required. A [KeyRing][google.cloud.kms.v1.KeyRing] with initial field + // values. + KeyRing *KeyRing `protobuf:"bytes,3,opt,name=key_ring,json=keyRing,proto3" json:"key_ring,omitempty"` +} + +func (x *CreateKeyRingRequest) Reset() { + *x = CreateKeyRingRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateKeyRingRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateKeyRingRequest) ProtoMessage() {} + +func (x *CreateKeyRingRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateKeyRingRequest.ProtoReflect.Descriptor instead. +func (*CreateKeyRingRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{13} +} + +func (x *CreateKeyRingRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateKeyRingRequest) GetKeyRingId() string { + if x != nil { + return x.KeyRingId + } + return "" +} + +func (x *CreateKeyRingRequest) GetKeyRing() *KeyRing { + if x != nil { + return x.KeyRing + } + return nil +} + +// Request message for +// [KeyManagementService.CreateCryptoKey][google.cloud.kms.v1.KeyManagementService.CreateCryptoKey]. +type CreateCryptoKeyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The [name][google.cloud.kms.v1.KeyRing.name] of the KeyRing + // associated with the [CryptoKeys][google.cloud.kms.v1.CryptoKey]. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Required. It must be unique within a KeyRing and match the regular + // expression `[a-zA-Z0-9_-]{1,63}` + CryptoKeyId string `protobuf:"bytes,2,opt,name=crypto_key_id,json=cryptoKeyId,proto3" json:"crypto_key_id,omitempty"` + // Required. A [CryptoKey][google.cloud.kms.v1.CryptoKey] with initial field + // values. + CryptoKey *CryptoKey `protobuf:"bytes,3,opt,name=crypto_key,json=cryptoKey,proto3" json:"crypto_key,omitempty"` + // If set to true, the request will create a + // [CryptoKey][google.cloud.kms.v1.CryptoKey] without any + // [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion]. You must + // manually call + // [CreateCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.CreateCryptoKeyVersion] + // or + // [ImportCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.ImportCryptoKeyVersion] + // before you can use this [CryptoKey][google.cloud.kms.v1.CryptoKey]. + SkipInitialVersionCreation bool `protobuf:"varint,5,opt,name=skip_initial_version_creation,json=skipInitialVersionCreation,proto3" json:"skip_initial_version_creation,omitempty"` +} + +func (x *CreateCryptoKeyRequest) Reset() { + *x = CreateCryptoKeyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateCryptoKeyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateCryptoKeyRequest) ProtoMessage() {} + +func (x *CreateCryptoKeyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateCryptoKeyRequest.ProtoReflect.Descriptor instead. +func (*CreateCryptoKeyRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{14} +} + +func (x *CreateCryptoKeyRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateCryptoKeyRequest) GetCryptoKeyId() string { + if x != nil { + return x.CryptoKeyId + } + return "" +} + +func (x *CreateCryptoKeyRequest) GetCryptoKey() *CryptoKey { + if x != nil { + return x.CryptoKey + } + return nil +} + +func (x *CreateCryptoKeyRequest) GetSkipInitialVersionCreation() bool { + if x != nil { + return x.SkipInitialVersionCreation + } + return false +} + +// Request message for +// [KeyManagementService.CreateCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.CreateCryptoKeyVersion]. +type CreateCryptoKeyVersionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The [name][google.cloud.kms.v1.CryptoKey.name] of the + // [CryptoKey][google.cloud.kms.v1.CryptoKey] associated with the + // [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion]. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Required. A [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] with + // initial field values. + CryptoKeyVersion *CryptoKeyVersion `protobuf:"bytes,2,opt,name=crypto_key_version,json=cryptoKeyVersion,proto3" json:"crypto_key_version,omitempty"` +} + +func (x *CreateCryptoKeyVersionRequest) Reset() { + *x = CreateCryptoKeyVersionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateCryptoKeyVersionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateCryptoKeyVersionRequest) ProtoMessage() {} + +func (x *CreateCryptoKeyVersionRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateCryptoKeyVersionRequest.ProtoReflect.Descriptor instead. +func (*CreateCryptoKeyVersionRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{15} +} + +func (x *CreateCryptoKeyVersionRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateCryptoKeyVersionRequest) GetCryptoKeyVersion() *CryptoKeyVersion { + if x != nil { + return x.CryptoKeyVersion + } + return nil +} + +// Request message for +// [KeyManagementService.ImportCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.ImportCryptoKeyVersion]. +type ImportCryptoKeyVersionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The [name][google.cloud.kms.v1.CryptoKey.name] of the + // [CryptoKey][google.cloud.kms.v1.CryptoKey] to be imported into. + // + // The create permission is only required on this key when creating a new + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Optional. The optional [name][google.cloud.kms.v1.CryptoKeyVersion.name] of + // an existing [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] to + // target for an import operation. If this field is not present, a new + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] containing the + // supplied key material is created. + // + // If this field is present, the supplied key material is imported into + // the existing [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. To + // import into an existing + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion], the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] must be a child of + // [ImportCryptoKeyVersionRequest.parent][google.cloud.kms.v1.ImportCryptoKeyVersionRequest.parent], + // have been previously created via [ImportCryptoKeyVersion][], and be in + // [DESTROYED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROYED] + // or + // [IMPORT_FAILED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.IMPORT_FAILED] + // state. The key material and algorithm must match the previous + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] exactly if the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] has ever contained + // key material. + CryptoKeyVersion string `protobuf:"bytes,6,opt,name=crypto_key_version,json=cryptoKeyVersion,proto3" json:"crypto_key_version,omitempty"` + // Required. The + // [algorithm][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm] + // of the key being imported. This does not need to match the + // [version_template][google.cloud.kms.v1.CryptoKey.version_template] of the + // [CryptoKey][google.cloud.kms.v1.CryptoKey] this version imports into. + Algorithm CryptoKeyVersion_CryptoKeyVersionAlgorithm `protobuf:"varint,2,opt,name=algorithm,proto3,enum=google.cloud.kms.v1.CryptoKeyVersion_CryptoKeyVersionAlgorithm" json:"algorithm,omitempty"` + // Required. The [name][google.cloud.kms.v1.ImportJob.name] of the + // [ImportJob][google.cloud.kms.v1.ImportJob] that was used to wrap this key + // material. + ImportJob string `protobuf:"bytes,4,opt,name=import_job,json=importJob,proto3" json:"import_job,omitempty"` + // Optional. The wrapped key material to import. + // + // Before wrapping, key material must be formatted. If importing symmetric key + // material, the expected key material format is plain bytes. If importing + // asymmetric key material, the expected key material format is PKCS#8-encoded + // DER (the PrivateKeyInfo structure from RFC 5208). + // + // When wrapping with import methods + // ([RSA_OAEP_3072_SHA1_AES_256][google.cloud.kms.v1.ImportJob.ImportMethod.RSA_OAEP_3072_SHA1_AES_256] + // or + // [RSA_OAEP_4096_SHA1_AES_256][google.cloud.kms.v1.ImportJob.ImportMethod.RSA_OAEP_4096_SHA1_AES_256] + // or + // [RSA_OAEP_3072_SHA256_AES_256][google.cloud.kms.v1.ImportJob.ImportMethod.RSA_OAEP_3072_SHA256_AES_256] + // or + // [RSA_OAEP_4096_SHA256_AES_256][google.cloud.kms.v1.ImportJob.ImportMethod.RSA_OAEP_4096_SHA256_AES_256]), + // + // this field must contain the concatenation of: + //
    + // + //
  1. An ephemeral AES-256 wrapping key wrapped with the + // [public_key][google.cloud.kms.v1.ImportJob.public_key] using + // RSAES-OAEP with SHA-1/SHA-256, MGF1 with SHA-1/SHA-256, and an empty + // label. + //
  2. + //
  3. The formatted key to be imported, wrapped with the ephemeral AES-256 + // key using AES-KWP (RFC 5649). + //
  4. + // + //
+ // + // This format is the same as the format produced by PKCS#11 mechanism + // CKM_RSA_AES_KEY_WRAP. + // + // When wrapping with import methods + // ([RSA_OAEP_3072_SHA256][google.cloud.kms.v1.ImportJob.ImportMethod.RSA_OAEP_3072_SHA256] + // or + // [RSA_OAEP_4096_SHA256][google.cloud.kms.v1.ImportJob.ImportMethod.RSA_OAEP_4096_SHA256]), + // + // this field must contain the formatted key to be imported, wrapped with the + // [public_key][google.cloud.kms.v1.ImportJob.public_key] using RSAES-OAEP + // with SHA-256, MGF1 with SHA-256, and an empty label. + WrappedKey []byte `protobuf:"bytes,8,opt,name=wrapped_key,json=wrappedKey,proto3" json:"wrapped_key,omitempty"` + // This field is legacy. Use the field + // [wrapped_key][google.cloud.kms.v1.ImportCryptoKeyVersionRequest.wrapped_key] + // instead. + // + // Types that are assignable to WrappedKeyMaterial: + // + // *ImportCryptoKeyVersionRequest_RsaAesWrappedKey + WrappedKeyMaterial isImportCryptoKeyVersionRequest_WrappedKeyMaterial `protobuf_oneof:"wrapped_key_material"` +} + +func (x *ImportCryptoKeyVersionRequest) Reset() { + *x = ImportCryptoKeyVersionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImportCryptoKeyVersionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImportCryptoKeyVersionRequest) ProtoMessage() {} + +func (x *ImportCryptoKeyVersionRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImportCryptoKeyVersionRequest.ProtoReflect.Descriptor instead. +func (*ImportCryptoKeyVersionRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{16} +} + +func (x *ImportCryptoKeyVersionRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ImportCryptoKeyVersionRequest) GetCryptoKeyVersion() string { + if x != nil { + return x.CryptoKeyVersion + } + return "" +} + +func (x *ImportCryptoKeyVersionRequest) GetAlgorithm() CryptoKeyVersion_CryptoKeyVersionAlgorithm { + if x != nil { + return x.Algorithm + } + return CryptoKeyVersion_CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED +} + +func (x *ImportCryptoKeyVersionRequest) GetImportJob() string { + if x != nil { + return x.ImportJob + } + return "" +} + +func (x *ImportCryptoKeyVersionRequest) GetWrappedKey() []byte { + if x != nil { + return x.WrappedKey + } + return nil +} + +func (m *ImportCryptoKeyVersionRequest) GetWrappedKeyMaterial() isImportCryptoKeyVersionRequest_WrappedKeyMaterial { + if m != nil { + return m.WrappedKeyMaterial + } + return nil +} + +func (x *ImportCryptoKeyVersionRequest) GetRsaAesWrappedKey() []byte { + if x, ok := x.GetWrappedKeyMaterial().(*ImportCryptoKeyVersionRequest_RsaAesWrappedKey); ok { + return x.RsaAesWrappedKey + } + return nil +} + +type isImportCryptoKeyVersionRequest_WrappedKeyMaterial interface { + isImportCryptoKeyVersionRequest_WrappedKeyMaterial() +} + +type ImportCryptoKeyVersionRequest_RsaAesWrappedKey struct { + // Optional. This field has the same meaning as + // [wrapped_key][google.cloud.kms.v1.ImportCryptoKeyVersionRequest.wrapped_key]. + // Prefer to use that field in new work. Either that field or this field + // (but not both) must be specified. + RsaAesWrappedKey []byte `protobuf:"bytes,5,opt,name=rsa_aes_wrapped_key,json=rsaAesWrappedKey,proto3,oneof"` +} + +func (*ImportCryptoKeyVersionRequest_RsaAesWrappedKey) isImportCryptoKeyVersionRequest_WrappedKeyMaterial() { +} + +// Request message for +// [KeyManagementService.CreateImportJob][google.cloud.kms.v1.KeyManagementService.CreateImportJob]. +type CreateImportJobRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The [name][google.cloud.kms.v1.KeyRing.name] of the + // [KeyRing][google.cloud.kms.v1.KeyRing] associated with the + // [ImportJobs][google.cloud.kms.v1.ImportJob]. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Required. It must be unique within a KeyRing and match the regular + // expression `[a-zA-Z0-9_-]{1,63}` + ImportJobId string `protobuf:"bytes,2,opt,name=import_job_id,json=importJobId,proto3" json:"import_job_id,omitempty"` + // Required. An [ImportJob][google.cloud.kms.v1.ImportJob] with initial field + // values. + ImportJob *ImportJob `protobuf:"bytes,3,opt,name=import_job,json=importJob,proto3" json:"import_job,omitempty"` +} + +func (x *CreateImportJobRequest) Reset() { + *x = CreateImportJobRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateImportJobRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateImportJobRequest) ProtoMessage() {} + +func (x *CreateImportJobRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateImportJobRequest.ProtoReflect.Descriptor instead. +func (*CreateImportJobRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{17} +} + +func (x *CreateImportJobRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateImportJobRequest) GetImportJobId() string { + if x != nil { + return x.ImportJobId + } + return "" +} + +func (x *CreateImportJobRequest) GetImportJob() *ImportJob { + if x != nil { + return x.ImportJob + } + return nil +} + +// Request message for +// [KeyManagementService.UpdateCryptoKey][google.cloud.kms.v1.KeyManagementService.UpdateCryptoKey]. +type UpdateCryptoKeyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. [CryptoKey][google.cloud.kms.v1.CryptoKey] with updated values. + CryptoKey *CryptoKey `protobuf:"bytes,1,opt,name=crypto_key,json=cryptoKey,proto3" json:"crypto_key,omitempty"` + // Required. List of fields to be updated in this request. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateCryptoKeyRequest) Reset() { + *x = UpdateCryptoKeyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateCryptoKeyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateCryptoKeyRequest) ProtoMessage() {} + +func (x *UpdateCryptoKeyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateCryptoKeyRequest.ProtoReflect.Descriptor instead. +func (*UpdateCryptoKeyRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{18} +} + +func (x *UpdateCryptoKeyRequest) GetCryptoKey() *CryptoKey { + if x != nil { + return x.CryptoKey + } + return nil +} + +func (x *UpdateCryptoKeyRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +// Request message for +// [KeyManagementService.UpdateCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.UpdateCryptoKeyVersion]. +type UpdateCryptoKeyVersionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] with + // updated values. + CryptoKeyVersion *CryptoKeyVersion `protobuf:"bytes,1,opt,name=crypto_key_version,json=cryptoKeyVersion,proto3" json:"crypto_key_version,omitempty"` + // Required. List of fields to be updated in this request. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateCryptoKeyVersionRequest) Reset() { + *x = UpdateCryptoKeyVersionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateCryptoKeyVersionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateCryptoKeyVersionRequest) ProtoMessage() {} + +func (x *UpdateCryptoKeyVersionRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateCryptoKeyVersionRequest.ProtoReflect.Descriptor instead. +func (*UpdateCryptoKeyVersionRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{19} +} + +func (x *UpdateCryptoKeyVersionRequest) GetCryptoKeyVersion() *CryptoKeyVersion { + if x != nil { + return x.CryptoKeyVersion + } + return nil +} + +func (x *UpdateCryptoKeyVersionRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +// Request message for +// [KeyManagementService.UpdateCryptoKeyPrimaryVersion][google.cloud.kms.v1.KeyManagementService.UpdateCryptoKeyPrimaryVersion]. +type UpdateCryptoKeyPrimaryVersionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the + // [CryptoKey][google.cloud.kms.v1.CryptoKey] to update. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The id of the child + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] to use as primary. + CryptoKeyVersionId string `protobuf:"bytes,2,opt,name=crypto_key_version_id,json=cryptoKeyVersionId,proto3" json:"crypto_key_version_id,omitempty"` +} + +func (x *UpdateCryptoKeyPrimaryVersionRequest) Reset() { + *x = UpdateCryptoKeyPrimaryVersionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateCryptoKeyPrimaryVersionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateCryptoKeyPrimaryVersionRequest) ProtoMessage() {} + +func (x *UpdateCryptoKeyPrimaryVersionRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateCryptoKeyPrimaryVersionRequest.ProtoReflect.Descriptor instead. +func (*UpdateCryptoKeyPrimaryVersionRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{20} +} + +func (x *UpdateCryptoKeyPrimaryVersionRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *UpdateCryptoKeyPrimaryVersionRequest) GetCryptoKeyVersionId() string { + if x != nil { + return x.CryptoKeyVersionId + } + return "" +} + +// Request message for +// [KeyManagementService.DestroyCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.DestroyCryptoKeyVersion]. +type DestroyCryptoKeyVersionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] to destroy. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *DestroyCryptoKeyVersionRequest) Reset() { + *x = DestroyCryptoKeyVersionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DestroyCryptoKeyVersionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DestroyCryptoKeyVersionRequest) ProtoMessage() {} + +func (x *DestroyCryptoKeyVersionRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DestroyCryptoKeyVersionRequest.ProtoReflect.Descriptor instead. +func (*DestroyCryptoKeyVersionRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{21} +} + +func (x *DestroyCryptoKeyVersionRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Request message for +// [KeyManagementService.RestoreCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion]. +type RestoreCryptoKeyVersionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] to restore. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *RestoreCryptoKeyVersionRequest) Reset() { + *x = RestoreCryptoKeyVersionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RestoreCryptoKeyVersionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RestoreCryptoKeyVersionRequest) ProtoMessage() {} + +func (x *RestoreCryptoKeyVersionRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RestoreCryptoKeyVersionRequest.ProtoReflect.Descriptor instead. +func (*RestoreCryptoKeyVersionRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{22} +} + +func (x *RestoreCryptoKeyVersionRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Request message for +// [KeyManagementService.Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt]. +type EncryptRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the + // [CryptoKey][google.cloud.kms.v1.CryptoKey] or + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] to use for + // encryption. + // + // If a [CryptoKey][google.cloud.kms.v1.CryptoKey] is specified, the server + // will use its [primary version][google.cloud.kms.v1.CryptoKey.primary]. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The data to encrypt. Must be no larger than 64KiB. + // + // The maximum size depends on the key version's + // [protection_level][google.cloud.kms.v1.CryptoKeyVersionTemplate.protection_level]. + // For [SOFTWARE][google.cloud.kms.v1.ProtectionLevel.SOFTWARE], + // [EXTERNAL][google.cloud.kms.v1.ProtectionLevel.EXTERNAL], and + // [EXTERNAL_VPC][google.cloud.kms.v1.ProtectionLevel.EXTERNAL_VPC] keys, the + // plaintext must be no larger than 64KiB. For + // [HSM][google.cloud.kms.v1.ProtectionLevel.HSM] keys, the combined length of + // the plaintext and additional_authenticated_data fields must be no larger + // than 8KiB. + Plaintext []byte `protobuf:"bytes,2,opt,name=plaintext,proto3" json:"plaintext,omitempty"` + // Optional. Optional data that, if specified, must also be provided during + // decryption through + // [DecryptRequest.additional_authenticated_data][google.cloud.kms.v1.DecryptRequest.additional_authenticated_data]. + // + // The maximum size depends on the key version's + // [protection_level][google.cloud.kms.v1.CryptoKeyVersionTemplate.protection_level]. + // For [SOFTWARE][google.cloud.kms.v1.ProtectionLevel.SOFTWARE], + // [EXTERNAL][google.cloud.kms.v1.ProtectionLevel.EXTERNAL], and + // [EXTERNAL_VPC][google.cloud.kms.v1.ProtectionLevel.EXTERNAL_VPC] keys the + // AAD must be no larger than 64KiB. For + // [HSM][google.cloud.kms.v1.ProtectionLevel.HSM] keys, the combined length of + // the plaintext and additional_authenticated_data fields must be no larger + // than 8KiB. + AdditionalAuthenticatedData []byte `protobuf:"bytes,3,opt,name=additional_authenticated_data,json=additionalAuthenticatedData,proto3" json:"additional_authenticated_data,omitempty"` + // Optional. An optional CRC32C checksum of the + // [EncryptRequest.plaintext][google.cloud.kms.v1.EncryptRequest.plaintext]. + // If specified, + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // verify the integrity of the received + // [EncryptRequest.plaintext][google.cloud.kms.v1.EncryptRequest.plaintext] + // using this checksum. + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // report an error if the checksum verification fails. If you receive a + // checksum error, your client should verify that + // CRC32C([EncryptRequest.plaintext][google.cloud.kms.v1.EncryptRequest.plaintext]) + // is equal to + // [EncryptRequest.plaintext_crc32c][google.cloud.kms.v1.EncryptRequest.plaintext_crc32c], + // and if so, perform a limited number of retries. A persistent mismatch may + // indicate an issue in your computation of the CRC32C checksum. Note: This + // field is defined as int64 for reasons of compatibility across different + // languages. However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that support + // this type. + PlaintextCrc32C *wrapperspb.Int64Value `protobuf:"bytes,7,opt,name=plaintext_crc32c,json=plaintextCrc32c,proto3" json:"plaintext_crc32c,omitempty"` + // Optional. An optional CRC32C checksum of the + // [EncryptRequest.additional_authenticated_data][google.cloud.kms.v1.EncryptRequest.additional_authenticated_data]. + // If specified, + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // verify the integrity of the received + // [EncryptRequest.additional_authenticated_data][google.cloud.kms.v1.EncryptRequest.additional_authenticated_data] + // using this checksum. + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // report an error if the checksum verification fails. If you receive a + // checksum error, your client should verify that + // CRC32C([EncryptRequest.additional_authenticated_data][google.cloud.kms.v1.EncryptRequest.additional_authenticated_data]) + // is equal to + // [EncryptRequest.additional_authenticated_data_crc32c][google.cloud.kms.v1.EncryptRequest.additional_authenticated_data_crc32c], + // and if so, perform a limited number of retries. A persistent mismatch may + // indicate an issue in your computation of the CRC32C checksum. Note: This + // field is defined as int64 for reasons of compatibility across different + // languages. However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that support + // this type. + AdditionalAuthenticatedDataCrc32C *wrapperspb.Int64Value `protobuf:"bytes,8,opt,name=additional_authenticated_data_crc32c,json=additionalAuthenticatedDataCrc32c,proto3" json:"additional_authenticated_data_crc32c,omitempty"` +} + +func (x *EncryptRequest) Reset() { + *x = EncryptRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EncryptRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EncryptRequest) ProtoMessage() {} + +func (x *EncryptRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EncryptRequest.ProtoReflect.Descriptor instead. +func (*EncryptRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{23} +} + +func (x *EncryptRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *EncryptRequest) GetPlaintext() []byte { + if x != nil { + return x.Plaintext + } + return nil +} + +func (x *EncryptRequest) GetAdditionalAuthenticatedData() []byte { + if x != nil { + return x.AdditionalAuthenticatedData + } + return nil +} + +func (x *EncryptRequest) GetPlaintextCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.PlaintextCrc32C + } + return nil +} + +func (x *EncryptRequest) GetAdditionalAuthenticatedDataCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.AdditionalAuthenticatedDataCrc32C + } + return nil +} + +// Request message for +// [KeyManagementService.Decrypt][google.cloud.kms.v1.KeyManagementService.Decrypt]. +type DecryptRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the + // [CryptoKey][google.cloud.kms.v1.CryptoKey] to use for decryption. The + // server will choose the appropriate version. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The encrypted data originally returned in + // [EncryptResponse.ciphertext][google.cloud.kms.v1.EncryptResponse.ciphertext]. + Ciphertext []byte `protobuf:"bytes,2,opt,name=ciphertext,proto3" json:"ciphertext,omitempty"` + // Optional. Optional data that must match the data originally supplied in + // [EncryptRequest.additional_authenticated_data][google.cloud.kms.v1.EncryptRequest.additional_authenticated_data]. + AdditionalAuthenticatedData []byte `protobuf:"bytes,3,opt,name=additional_authenticated_data,json=additionalAuthenticatedData,proto3" json:"additional_authenticated_data,omitempty"` + // Optional. An optional CRC32C checksum of the + // [DecryptRequest.ciphertext][google.cloud.kms.v1.DecryptRequest.ciphertext]. + // If specified, + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // verify the integrity of the received + // [DecryptRequest.ciphertext][google.cloud.kms.v1.DecryptRequest.ciphertext] + // using this checksum. + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // report an error if the checksum verification fails. If you receive a + // checksum error, your client should verify that + // CRC32C([DecryptRequest.ciphertext][google.cloud.kms.v1.DecryptRequest.ciphertext]) + // is equal to + // [DecryptRequest.ciphertext_crc32c][google.cloud.kms.v1.DecryptRequest.ciphertext_crc32c], + // and if so, perform a limited number of retries. A persistent mismatch may + // indicate an issue in your computation of the CRC32C checksum. Note: This + // field is defined as int64 for reasons of compatibility across different + // languages. However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that support + // this type. + CiphertextCrc32C *wrapperspb.Int64Value `protobuf:"bytes,5,opt,name=ciphertext_crc32c,json=ciphertextCrc32c,proto3" json:"ciphertext_crc32c,omitempty"` + // Optional. An optional CRC32C checksum of the + // [DecryptRequest.additional_authenticated_data][google.cloud.kms.v1.DecryptRequest.additional_authenticated_data]. + // If specified, + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // verify the integrity of the received + // [DecryptRequest.additional_authenticated_data][google.cloud.kms.v1.DecryptRequest.additional_authenticated_data] + // using this checksum. + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // report an error if the checksum verification fails. If you receive a + // checksum error, your client should verify that + // CRC32C([DecryptRequest.additional_authenticated_data][google.cloud.kms.v1.DecryptRequest.additional_authenticated_data]) + // is equal to + // [DecryptRequest.additional_authenticated_data_crc32c][google.cloud.kms.v1.DecryptRequest.additional_authenticated_data_crc32c], + // and if so, perform a limited number of retries. A persistent mismatch may + // indicate an issue in your computation of the CRC32C checksum. Note: This + // field is defined as int64 for reasons of compatibility across different + // languages. However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that support + // this type. + AdditionalAuthenticatedDataCrc32C *wrapperspb.Int64Value `protobuf:"bytes,6,opt,name=additional_authenticated_data_crc32c,json=additionalAuthenticatedDataCrc32c,proto3" json:"additional_authenticated_data_crc32c,omitempty"` +} + +func (x *DecryptRequest) Reset() { + *x = DecryptRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DecryptRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DecryptRequest) ProtoMessage() {} + +func (x *DecryptRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DecryptRequest.ProtoReflect.Descriptor instead. +func (*DecryptRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{24} +} + +func (x *DecryptRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *DecryptRequest) GetCiphertext() []byte { + if x != nil { + return x.Ciphertext + } + return nil +} + +func (x *DecryptRequest) GetAdditionalAuthenticatedData() []byte { + if x != nil { + return x.AdditionalAuthenticatedData + } + return nil +} + +func (x *DecryptRequest) GetCiphertextCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.CiphertextCrc32C + } + return nil +} + +func (x *DecryptRequest) GetAdditionalAuthenticatedDataCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.AdditionalAuthenticatedDataCrc32C + } + return nil +} + +// Request message for +// [KeyManagementService.RawEncrypt][google.cloud.kms.v1.KeyManagementService.RawEncrypt]. +type RawEncryptRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] to use for + // encryption. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The data to encrypt. Must be no larger than 64KiB. + // + // The maximum size depends on the key version's + // [protection_level][google.cloud.kms.v1.CryptoKeyVersionTemplate.protection_level]. + // For [SOFTWARE][google.cloud.kms.v1.ProtectionLevel.SOFTWARE] keys, the + // plaintext must be no larger than 64KiB. For + // [HSM][google.cloud.kms.v1.ProtectionLevel.HSM] keys, the combined length of + // the plaintext and additional_authenticated_data fields must be no larger + // than 8KiB. + Plaintext []byte `protobuf:"bytes,2,opt,name=plaintext,proto3" json:"plaintext,omitempty"` + // Optional. Optional data that, if specified, must also be provided during + // decryption through + // [RawDecryptRequest.additional_authenticated_data][google.cloud.kms.v1.RawDecryptRequest.additional_authenticated_data]. + // + // This field may only be used in conjunction with an + // [algorithm][google.cloud.kms.v1.CryptoKeyVersion.algorithm] that accepts + // additional authenticated data (for example, AES-GCM). + // + // The maximum size depends on the key version's + // [protection_level][google.cloud.kms.v1.CryptoKeyVersionTemplate.protection_level]. + // For [SOFTWARE][google.cloud.kms.v1.ProtectionLevel.SOFTWARE] keys, the + // plaintext must be no larger than 64KiB. For + // [HSM][google.cloud.kms.v1.ProtectionLevel.HSM] keys, the combined length of + // the plaintext and additional_authenticated_data fields must be no larger + // than 8KiB. + AdditionalAuthenticatedData []byte `protobuf:"bytes,3,opt,name=additional_authenticated_data,json=additionalAuthenticatedData,proto3" json:"additional_authenticated_data,omitempty"` + // Optional. An optional CRC32C checksum of the + // [RawEncryptRequest.plaintext][google.cloud.kms.v1.RawEncryptRequest.plaintext]. + // If specified, + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // verify the integrity of the received plaintext using this checksum. + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // report an error if the checksum verification fails. If you receive a + // checksum error, your client should verify that CRC32C(plaintext) is equal + // to plaintext_crc32c, and if so, perform a limited number of retries. A + // persistent mismatch may indicate an issue in your computation of the CRC32C + // checksum. Note: This field is defined as int64 for reasons of compatibility + // across different languages. However, it is a non-negative integer, which + // will never exceed 2^32-1, and can be safely downconverted to uint32 in + // languages that support this type. + PlaintextCrc32C *wrapperspb.Int64Value `protobuf:"bytes,4,opt,name=plaintext_crc32c,json=plaintextCrc32c,proto3" json:"plaintext_crc32c,omitempty"` + // Optional. An optional CRC32C checksum of the + // [RawEncryptRequest.additional_authenticated_data][google.cloud.kms.v1.RawEncryptRequest.additional_authenticated_data]. + // If specified, + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // verify the integrity of the received additional_authenticated_data using + // this checksum. + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // report an error if the checksum verification fails. If you receive a + // checksum error, your client should verify that + // CRC32C(additional_authenticated_data) is equal to + // additional_authenticated_data_crc32c, and if so, perform + // a limited number of retries. A persistent mismatch may indicate an issue in + // your computation of the CRC32C checksum. + // Note: This field is defined as int64 for reasons of compatibility across + // different languages. However, it is a non-negative integer, which will + // never exceed 2^32-1, and can be safely downconverted to uint32 in languages + // that support this type. + AdditionalAuthenticatedDataCrc32C *wrapperspb.Int64Value `protobuf:"bytes,5,opt,name=additional_authenticated_data_crc32c,json=additionalAuthenticatedDataCrc32c,proto3" json:"additional_authenticated_data_crc32c,omitempty"` + // Optional. A customer-supplied initialization vector that will be used for + // encryption. If it is not provided for AES-CBC and AES-CTR, one will be + // generated. It will be returned in + // [RawEncryptResponse.initialization_vector][google.cloud.kms.v1.RawEncryptResponse.initialization_vector]. + InitializationVector []byte `protobuf:"bytes,6,opt,name=initialization_vector,json=initializationVector,proto3" json:"initialization_vector,omitempty"` + // Optional. An optional CRC32C checksum of the + // [RawEncryptRequest.initialization_vector][google.cloud.kms.v1.RawEncryptRequest.initialization_vector]. + // If specified, + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // verify the integrity of the received initialization_vector using this + // checksum. [KeyManagementService][google.cloud.kms.v1.KeyManagementService] + // will report an error if the checksum verification fails. If you receive a + // checksum error, your client should verify that + // CRC32C(initialization_vector) is equal to + // initialization_vector_crc32c, and if so, perform + // a limited number of retries. A persistent mismatch may indicate an issue in + // your computation of the CRC32C checksum. + // Note: This field is defined as int64 for reasons of compatibility across + // different languages. However, it is a non-negative integer, which will + // never exceed 2^32-1, and can be safely downconverted to uint32 in languages + // that support this type. + InitializationVectorCrc32C *wrapperspb.Int64Value `protobuf:"bytes,7,opt,name=initialization_vector_crc32c,json=initializationVectorCrc32c,proto3" json:"initialization_vector_crc32c,omitempty"` +} + +func (x *RawEncryptRequest) Reset() { + *x = RawEncryptRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RawEncryptRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RawEncryptRequest) ProtoMessage() {} + +func (x *RawEncryptRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RawEncryptRequest.ProtoReflect.Descriptor instead. +func (*RawEncryptRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{25} +} + +func (x *RawEncryptRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *RawEncryptRequest) GetPlaintext() []byte { + if x != nil { + return x.Plaintext + } + return nil +} + +func (x *RawEncryptRequest) GetAdditionalAuthenticatedData() []byte { + if x != nil { + return x.AdditionalAuthenticatedData + } + return nil +} + +func (x *RawEncryptRequest) GetPlaintextCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.PlaintextCrc32C + } + return nil +} + +func (x *RawEncryptRequest) GetAdditionalAuthenticatedDataCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.AdditionalAuthenticatedDataCrc32C + } + return nil +} + +func (x *RawEncryptRequest) GetInitializationVector() []byte { + if x != nil { + return x.InitializationVector + } + return nil +} + +func (x *RawEncryptRequest) GetInitializationVectorCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.InitializationVectorCrc32C + } + return nil +} + +// Request message for +// [KeyManagementService.RawDecrypt][google.cloud.kms.v1.KeyManagementService.RawDecrypt]. +type RawDecryptRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] to use for + // decryption. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The encrypted data originally returned in + // [RawEncryptResponse.ciphertext][google.cloud.kms.v1.RawEncryptResponse.ciphertext]. + Ciphertext []byte `protobuf:"bytes,2,opt,name=ciphertext,proto3" json:"ciphertext,omitempty"` + // Optional. Optional data that must match the data originally supplied in + // [RawEncryptRequest.additional_authenticated_data][google.cloud.kms.v1.RawEncryptRequest.additional_authenticated_data]. + AdditionalAuthenticatedData []byte `protobuf:"bytes,3,opt,name=additional_authenticated_data,json=additionalAuthenticatedData,proto3" json:"additional_authenticated_data,omitempty"` + // Required. The initialization vector (IV) used during encryption, which must + // match the data originally provided in + // [RawEncryptResponse.initialization_vector][google.cloud.kms.v1.RawEncryptResponse.initialization_vector]. + InitializationVector []byte `protobuf:"bytes,4,opt,name=initialization_vector,json=initializationVector,proto3" json:"initialization_vector,omitempty"` + // The length of the authentication tag that is appended to the end of + // the ciphertext. If unspecified (0), the default value for the key's + // algorithm will be used (for AES-GCM, the default value is 16). + TagLength int32 `protobuf:"varint,5,opt,name=tag_length,json=tagLength,proto3" json:"tag_length,omitempty"` + // Optional. An optional CRC32C checksum of the + // [RawDecryptRequest.ciphertext][google.cloud.kms.v1.RawDecryptRequest.ciphertext]. + // If specified, + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // verify the integrity of the received ciphertext using this checksum. + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // report an error if the checksum verification fails. If you receive a + // checksum error, your client should verify that CRC32C(ciphertext) is equal + // to ciphertext_crc32c, and if so, perform a limited number of retries. A + // persistent mismatch may indicate an issue in your computation of the CRC32C + // checksum. Note: This field is defined as int64 for reasons of compatibility + // across different languages. However, it is a non-negative integer, which + // will never exceed 2^32-1, and can be safely downconverted to uint32 in + // languages that support this type. + CiphertextCrc32C *wrapperspb.Int64Value `protobuf:"bytes,6,opt,name=ciphertext_crc32c,json=ciphertextCrc32c,proto3" json:"ciphertext_crc32c,omitempty"` + // Optional. An optional CRC32C checksum of the + // [RawDecryptRequest.additional_authenticated_data][google.cloud.kms.v1.RawDecryptRequest.additional_authenticated_data]. + // If specified, + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // verify the integrity of the received additional_authenticated_data using + // this checksum. + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // report an error if the checksum verification fails. If you receive a + // checksum error, your client should verify that + // CRC32C(additional_authenticated_data) is equal to + // additional_authenticated_data_crc32c, and if so, perform + // a limited number of retries. A persistent mismatch may indicate an issue in + // your computation of the CRC32C checksum. + // Note: This field is defined as int64 for reasons of compatibility across + // different languages. However, it is a non-negative integer, which will + // never exceed 2^32-1, and can be safely downconverted to uint32 in languages + // that support this type. + AdditionalAuthenticatedDataCrc32C *wrapperspb.Int64Value `protobuf:"bytes,7,opt,name=additional_authenticated_data_crc32c,json=additionalAuthenticatedDataCrc32c,proto3" json:"additional_authenticated_data_crc32c,omitempty"` + // Optional. An optional CRC32C checksum of the + // [RawDecryptRequest.initialization_vector][google.cloud.kms.v1.RawDecryptRequest.initialization_vector]. + // If specified, + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // verify the integrity of the received initialization_vector using this + // checksum. [KeyManagementService][google.cloud.kms.v1.KeyManagementService] + // will report an error if the checksum verification fails. If you receive a + // checksum error, your client should verify that + // CRC32C(initialization_vector) is equal to initialization_vector_crc32c, and + // if so, perform a limited number of retries. A persistent mismatch may + // indicate an issue in your computation of the CRC32C checksum. + // Note: This field is defined as int64 for reasons of compatibility across + // different languages. However, it is a non-negative integer, which will + // never exceed 2^32-1, and can be safely downconverted to uint32 in languages + // that support this type. + InitializationVectorCrc32C *wrapperspb.Int64Value `protobuf:"bytes,8,opt,name=initialization_vector_crc32c,json=initializationVectorCrc32c,proto3" json:"initialization_vector_crc32c,omitempty"` +} + +func (x *RawDecryptRequest) Reset() { + *x = RawDecryptRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RawDecryptRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RawDecryptRequest) ProtoMessage() {} + +func (x *RawDecryptRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RawDecryptRequest.ProtoReflect.Descriptor instead. +func (*RawDecryptRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{26} +} + +func (x *RawDecryptRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *RawDecryptRequest) GetCiphertext() []byte { + if x != nil { + return x.Ciphertext + } + return nil +} + +func (x *RawDecryptRequest) GetAdditionalAuthenticatedData() []byte { + if x != nil { + return x.AdditionalAuthenticatedData + } + return nil +} + +func (x *RawDecryptRequest) GetInitializationVector() []byte { + if x != nil { + return x.InitializationVector + } + return nil +} + +func (x *RawDecryptRequest) GetTagLength() int32 { + if x != nil { + return x.TagLength + } + return 0 +} + +func (x *RawDecryptRequest) GetCiphertextCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.CiphertextCrc32C + } + return nil +} + +func (x *RawDecryptRequest) GetAdditionalAuthenticatedDataCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.AdditionalAuthenticatedDataCrc32C + } + return nil +} + +func (x *RawDecryptRequest) GetInitializationVectorCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.InitializationVectorCrc32C + } + return nil +} + +// Request message for +// [KeyManagementService.AsymmetricSign][google.cloud.kms.v1.KeyManagementService.AsymmetricSign]. +type AsymmetricSignRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] to use for + // signing. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Optional. The digest of the data to sign. The digest must be produced with + // the same digest algorithm as specified by the key version's + // [algorithm][google.cloud.kms.v1.CryptoKeyVersion.algorithm]. + // + // This field may not be supplied if + // [AsymmetricSignRequest.data][google.cloud.kms.v1.AsymmetricSignRequest.data] + // is supplied. + Digest *Digest `protobuf:"bytes,3,opt,name=digest,proto3" json:"digest,omitempty"` + // Optional. An optional CRC32C checksum of the + // [AsymmetricSignRequest.digest][google.cloud.kms.v1.AsymmetricSignRequest.digest]. + // If specified, + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // verify the integrity of the received + // [AsymmetricSignRequest.digest][google.cloud.kms.v1.AsymmetricSignRequest.digest] + // using this checksum. + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // report an error if the checksum verification fails. If you receive a + // checksum error, your client should verify that + // CRC32C([AsymmetricSignRequest.digest][google.cloud.kms.v1.AsymmetricSignRequest.digest]) + // is equal to + // [AsymmetricSignRequest.digest_crc32c][google.cloud.kms.v1.AsymmetricSignRequest.digest_crc32c], + // and if so, perform a limited number of retries. A persistent mismatch may + // indicate an issue in your computation of the CRC32C checksum. Note: This + // field is defined as int64 for reasons of compatibility across different + // languages. However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that support + // this type. + DigestCrc32C *wrapperspb.Int64Value `protobuf:"bytes,4,opt,name=digest_crc32c,json=digestCrc32c,proto3" json:"digest_crc32c,omitempty"` + // Optional. The data to sign. + // It can't be supplied if + // [AsymmetricSignRequest.digest][google.cloud.kms.v1.AsymmetricSignRequest.digest] + // is supplied. + Data []byte `protobuf:"bytes,6,opt,name=data,proto3" json:"data,omitempty"` + // Optional. An optional CRC32C checksum of the + // [AsymmetricSignRequest.data][google.cloud.kms.v1.AsymmetricSignRequest.data]. + // If specified, + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // verify the integrity of the received + // [AsymmetricSignRequest.data][google.cloud.kms.v1.AsymmetricSignRequest.data] + // using this checksum. + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // report an error if the checksum verification fails. If you receive a + // checksum error, your client should verify that + // CRC32C([AsymmetricSignRequest.data][google.cloud.kms.v1.AsymmetricSignRequest.data]) + // is equal to + // [AsymmetricSignRequest.data_crc32c][google.cloud.kms.v1.AsymmetricSignRequest.data_crc32c], + // and if so, perform a limited number of retries. A persistent mismatch may + // indicate an issue in your computation of the CRC32C checksum. Note: This + // field is defined as int64 for reasons of compatibility across different + // languages. However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that support + // this type. + DataCrc32C *wrapperspb.Int64Value `protobuf:"bytes,7,opt,name=data_crc32c,json=dataCrc32c,proto3" json:"data_crc32c,omitempty"` +} + +func (x *AsymmetricSignRequest) Reset() { + *x = AsymmetricSignRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AsymmetricSignRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AsymmetricSignRequest) ProtoMessage() {} + +func (x *AsymmetricSignRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AsymmetricSignRequest.ProtoReflect.Descriptor instead. +func (*AsymmetricSignRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{27} +} + +func (x *AsymmetricSignRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *AsymmetricSignRequest) GetDigest() *Digest { + if x != nil { + return x.Digest + } + return nil +} + +func (x *AsymmetricSignRequest) GetDigestCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.DigestCrc32C + } + return nil +} + +func (x *AsymmetricSignRequest) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +func (x *AsymmetricSignRequest) GetDataCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.DataCrc32C + } + return nil +} + +// Request message for +// [KeyManagementService.AsymmetricDecrypt][google.cloud.kms.v1.KeyManagementService.AsymmetricDecrypt]. +type AsymmetricDecryptRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] to use for + // decryption. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The data encrypted with the named + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]'s public key using + // OAEP. + Ciphertext []byte `protobuf:"bytes,3,opt,name=ciphertext,proto3" json:"ciphertext,omitempty"` + // Optional. An optional CRC32C checksum of the + // [AsymmetricDecryptRequest.ciphertext][google.cloud.kms.v1.AsymmetricDecryptRequest.ciphertext]. + // If specified, + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // verify the integrity of the received + // [AsymmetricDecryptRequest.ciphertext][google.cloud.kms.v1.AsymmetricDecryptRequest.ciphertext] + // using this checksum. + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // report an error if the checksum verification fails. If you receive a + // checksum error, your client should verify that + // CRC32C([AsymmetricDecryptRequest.ciphertext][google.cloud.kms.v1.AsymmetricDecryptRequest.ciphertext]) + // is equal to + // [AsymmetricDecryptRequest.ciphertext_crc32c][google.cloud.kms.v1.AsymmetricDecryptRequest.ciphertext_crc32c], + // and if so, perform a limited number of retries. A persistent mismatch may + // indicate an issue in your computation of the CRC32C checksum. Note: This + // field is defined as int64 for reasons of compatibility across different + // languages. However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that support + // this type. + CiphertextCrc32C *wrapperspb.Int64Value `protobuf:"bytes,4,opt,name=ciphertext_crc32c,json=ciphertextCrc32c,proto3" json:"ciphertext_crc32c,omitempty"` +} + +func (x *AsymmetricDecryptRequest) Reset() { + *x = AsymmetricDecryptRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AsymmetricDecryptRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AsymmetricDecryptRequest) ProtoMessage() {} + +func (x *AsymmetricDecryptRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AsymmetricDecryptRequest.ProtoReflect.Descriptor instead. +func (*AsymmetricDecryptRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{28} +} + +func (x *AsymmetricDecryptRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *AsymmetricDecryptRequest) GetCiphertext() []byte { + if x != nil { + return x.Ciphertext + } + return nil +} + +func (x *AsymmetricDecryptRequest) GetCiphertextCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.CiphertextCrc32C + } + return nil +} + +// Request message for +// [KeyManagementService.MacSign][google.cloud.kms.v1.KeyManagementService.MacSign]. +type MacSignRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] to use for + // signing. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The data to sign. The MAC tag is computed over this data field + // based on the specific algorithm. + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + // Optional. An optional CRC32C checksum of the + // [MacSignRequest.data][google.cloud.kms.v1.MacSignRequest.data]. If + // specified, [KeyManagementService][google.cloud.kms.v1.KeyManagementService] + // will verify the integrity of the received + // [MacSignRequest.data][google.cloud.kms.v1.MacSignRequest.data] using this + // checksum. [KeyManagementService][google.cloud.kms.v1.KeyManagementService] + // will report an error if the checksum verification fails. If you receive a + // checksum error, your client should verify that + // CRC32C([MacSignRequest.data][google.cloud.kms.v1.MacSignRequest.data]) is + // equal to + // [MacSignRequest.data_crc32c][google.cloud.kms.v1.MacSignRequest.data_crc32c], + // and if so, perform a limited number of retries. A persistent mismatch may + // indicate an issue in your computation of the CRC32C checksum. Note: This + // field is defined as int64 for reasons of compatibility across different + // languages. However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that support + // this type. + DataCrc32C *wrapperspb.Int64Value `protobuf:"bytes,3,opt,name=data_crc32c,json=dataCrc32c,proto3" json:"data_crc32c,omitempty"` +} + +func (x *MacSignRequest) Reset() { + *x = MacSignRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MacSignRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MacSignRequest) ProtoMessage() {} + +func (x *MacSignRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MacSignRequest.ProtoReflect.Descriptor instead. +func (*MacSignRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{29} +} + +func (x *MacSignRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *MacSignRequest) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +func (x *MacSignRequest) GetDataCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.DataCrc32C + } + return nil +} + +// Request message for +// [KeyManagementService.MacVerify][google.cloud.kms.v1.KeyManagementService.MacVerify]. +type MacVerifyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] to use for + // verification. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The data used previously as a + // [MacSignRequest.data][google.cloud.kms.v1.MacSignRequest.data] to generate + // the MAC tag. + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + // Optional. An optional CRC32C checksum of the + // [MacVerifyRequest.data][google.cloud.kms.v1.MacVerifyRequest.data]. If + // specified, [KeyManagementService][google.cloud.kms.v1.KeyManagementService] + // will verify the integrity of the received + // [MacVerifyRequest.data][google.cloud.kms.v1.MacVerifyRequest.data] using + // this checksum. + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] will + // report an error if the checksum verification fails. If you receive a + // checksum error, your client should verify that + // CRC32C([MacVerifyRequest.data][google.cloud.kms.v1.MacVerifyRequest.data]) + // is equal to + // [MacVerifyRequest.data_crc32c][google.cloud.kms.v1.MacVerifyRequest.data_crc32c], + // and if so, perform a limited number of retries. A persistent mismatch may + // indicate an issue in your computation of the CRC32C checksum. Note: This + // field is defined as int64 for reasons of compatibility across different + // languages. However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that support + // this type. + DataCrc32C *wrapperspb.Int64Value `protobuf:"bytes,3,opt,name=data_crc32c,json=dataCrc32c,proto3" json:"data_crc32c,omitempty"` + // Required. The signature to verify. + Mac []byte `protobuf:"bytes,4,opt,name=mac,proto3" json:"mac,omitempty"` + // Optional. An optional CRC32C checksum of the + // [MacVerifyRequest.mac][google.cloud.kms.v1.MacVerifyRequest.mac]. If + // specified, [KeyManagementService][google.cloud.kms.v1.KeyManagementService] + // will verify the integrity of the received + // [MacVerifyRequest.mac][google.cloud.kms.v1.MacVerifyRequest.mac] using this + // checksum. [KeyManagementService][google.cloud.kms.v1.KeyManagementService] + // will report an error if the checksum verification fails. If you receive a + // checksum error, your client should verify that + // CRC32C([MacVerifyRequest.tag][]) is equal to + // [MacVerifyRequest.mac_crc32c][google.cloud.kms.v1.MacVerifyRequest.mac_crc32c], + // and if so, perform a limited number of retries. A persistent mismatch may + // indicate an issue in your computation of the CRC32C checksum. Note: This + // field is defined as int64 for reasons of compatibility across different + // languages. However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that support + // this type. + MacCrc32C *wrapperspb.Int64Value `protobuf:"bytes,5,opt,name=mac_crc32c,json=macCrc32c,proto3" json:"mac_crc32c,omitempty"` +} + +func (x *MacVerifyRequest) Reset() { + *x = MacVerifyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MacVerifyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MacVerifyRequest) ProtoMessage() {} + +func (x *MacVerifyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MacVerifyRequest.ProtoReflect.Descriptor instead. +func (*MacVerifyRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{30} +} + +func (x *MacVerifyRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *MacVerifyRequest) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +func (x *MacVerifyRequest) GetDataCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.DataCrc32C + } + return nil +} + +func (x *MacVerifyRequest) GetMac() []byte { + if x != nil { + return x.Mac + } + return nil +} + +func (x *MacVerifyRequest) GetMacCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.MacCrc32C + } + return nil +} + +// Request message for +// [KeyManagementService.GenerateRandomBytes][google.cloud.kms.v1.KeyManagementService.GenerateRandomBytes]. +type GenerateRandomBytesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The project-specific location in which to generate random bytes. + // For example, "projects/my-project/locations/us-central1". + Location string `protobuf:"bytes,1,opt,name=location,proto3" json:"location,omitempty"` + // The length in bytes of the amount of randomness to retrieve. Minimum 8 + // bytes, maximum 1024 bytes. + LengthBytes int32 `protobuf:"varint,2,opt,name=length_bytes,json=lengthBytes,proto3" json:"length_bytes,omitempty"` + // The [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] to use when + // generating the random data. Currently, only + // [HSM][google.cloud.kms.v1.ProtectionLevel.HSM] protection level is + // supported. + ProtectionLevel ProtectionLevel `protobuf:"varint,3,opt,name=protection_level,json=protectionLevel,proto3,enum=google.cloud.kms.v1.ProtectionLevel" json:"protection_level,omitempty"` +} + +func (x *GenerateRandomBytesRequest) Reset() { + *x = GenerateRandomBytesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GenerateRandomBytesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GenerateRandomBytesRequest) ProtoMessage() {} + +func (x *GenerateRandomBytesRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GenerateRandomBytesRequest.ProtoReflect.Descriptor instead. +func (*GenerateRandomBytesRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{31} +} + +func (x *GenerateRandomBytesRequest) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *GenerateRandomBytesRequest) GetLengthBytes() int32 { + if x != nil { + return x.LengthBytes + } + return 0 +} + +func (x *GenerateRandomBytesRequest) GetProtectionLevel() ProtectionLevel { + if x != nil { + return x.ProtectionLevel + } + return ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED +} + +// Response message for +// [KeyManagementService.Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt]. +type EncryptResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource name of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] used in + // encryption. Check this field to verify that the intended resource was used + // for encryption. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The encrypted data. + Ciphertext []byte `protobuf:"bytes,2,opt,name=ciphertext,proto3" json:"ciphertext,omitempty"` + // Integrity verification field. A CRC32C checksum of the returned + // [EncryptResponse.ciphertext][google.cloud.kms.v1.EncryptResponse.ciphertext]. + // An integrity check of + // [EncryptResponse.ciphertext][google.cloud.kms.v1.EncryptResponse.ciphertext] + // can be performed by computing the CRC32C checksum of + // [EncryptResponse.ciphertext][google.cloud.kms.v1.EncryptResponse.ciphertext] + // and comparing your results to this field. Discard the response in case of + // non-matching checksum values, and perform a limited number of retries. A + // persistent mismatch may indicate an issue in your computation of the CRC32C + // checksum. Note: This field is defined as int64 for reasons of compatibility + // across different languages. However, it is a non-negative integer, which + // will never exceed 2^32-1, and can be safely downconverted to uint32 in + // languages that support this type. + CiphertextCrc32C *wrapperspb.Int64Value `protobuf:"bytes,4,opt,name=ciphertext_crc32c,json=ciphertextCrc32c,proto3" json:"ciphertext_crc32c,omitempty"` + // Integrity verification field. A flag indicating whether + // [EncryptRequest.plaintext_crc32c][google.cloud.kms.v1.EncryptRequest.plaintext_crc32c] + // was received by + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] and used + // for the integrity verification of the + // [plaintext][google.cloud.kms.v1.EncryptRequest.plaintext]. A false value of + // this field indicates either that + // [EncryptRequest.plaintext_crc32c][google.cloud.kms.v1.EncryptRequest.plaintext_crc32c] + // was left unset or that it was not delivered to + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService]. If you've + // set + // [EncryptRequest.plaintext_crc32c][google.cloud.kms.v1.EncryptRequest.plaintext_crc32c] + // but this field is still false, discard the response and perform a limited + // number of retries. + VerifiedPlaintextCrc32C bool `protobuf:"varint,5,opt,name=verified_plaintext_crc32c,json=verifiedPlaintextCrc32c,proto3" json:"verified_plaintext_crc32c,omitempty"` + // Integrity verification field. A flag indicating whether + // [EncryptRequest.additional_authenticated_data_crc32c][google.cloud.kms.v1.EncryptRequest.additional_authenticated_data_crc32c] + // was received by + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] and used + // for the integrity verification of the + // [AAD][google.cloud.kms.v1.EncryptRequest.additional_authenticated_data]. A + // false value of this field indicates either that + // [EncryptRequest.additional_authenticated_data_crc32c][google.cloud.kms.v1.EncryptRequest.additional_authenticated_data_crc32c] + // was left unset or that it was not delivered to + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService]. If you've + // set + // [EncryptRequest.additional_authenticated_data_crc32c][google.cloud.kms.v1.EncryptRequest.additional_authenticated_data_crc32c] + // but this field is still false, discard the response and perform a limited + // number of retries. + VerifiedAdditionalAuthenticatedDataCrc32C bool `protobuf:"varint,6,opt,name=verified_additional_authenticated_data_crc32c,json=verifiedAdditionalAuthenticatedDataCrc32c,proto3" json:"verified_additional_authenticated_data_crc32c,omitempty"` + // The [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] used in + // encryption. + ProtectionLevel ProtectionLevel `protobuf:"varint,7,opt,name=protection_level,json=protectionLevel,proto3,enum=google.cloud.kms.v1.ProtectionLevel" json:"protection_level,omitempty"` +} + +func (x *EncryptResponse) Reset() { + *x = EncryptResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EncryptResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EncryptResponse) ProtoMessage() {} + +func (x *EncryptResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EncryptResponse.ProtoReflect.Descriptor instead. +func (*EncryptResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{32} +} + +func (x *EncryptResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *EncryptResponse) GetCiphertext() []byte { + if x != nil { + return x.Ciphertext + } + return nil +} + +func (x *EncryptResponse) GetCiphertextCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.CiphertextCrc32C + } + return nil +} + +func (x *EncryptResponse) GetVerifiedPlaintextCrc32C() bool { + if x != nil { + return x.VerifiedPlaintextCrc32C + } + return false +} + +func (x *EncryptResponse) GetVerifiedAdditionalAuthenticatedDataCrc32C() bool { + if x != nil { + return x.VerifiedAdditionalAuthenticatedDataCrc32C + } + return false +} + +func (x *EncryptResponse) GetProtectionLevel() ProtectionLevel { + if x != nil { + return x.ProtectionLevel + } + return ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED +} + +// Response message for +// [KeyManagementService.Decrypt][google.cloud.kms.v1.KeyManagementService.Decrypt]. +type DecryptResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The decrypted data originally supplied in + // [EncryptRequest.plaintext][google.cloud.kms.v1.EncryptRequest.plaintext]. + Plaintext []byte `protobuf:"bytes,1,opt,name=plaintext,proto3" json:"plaintext,omitempty"` + // Integrity verification field. A CRC32C checksum of the returned + // [DecryptResponse.plaintext][google.cloud.kms.v1.DecryptResponse.plaintext]. + // An integrity check of + // [DecryptResponse.plaintext][google.cloud.kms.v1.DecryptResponse.plaintext] + // can be performed by computing the CRC32C checksum of + // [DecryptResponse.plaintext][google.cloud.kms.v1.DecryptResponse.plaintext] + // and comparing your results to this field. Discard the response in case of + // non-matching checksum values, and perform a limited number of retries. A + // persistent mismatch may indicate an issue in your computation of the CRC32C + // checksum. Note: receiving this response message indicates that + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] is able to + // successfully decrypt the + // [ciphertext][google.cloud.kms.v1.DecryptRequest.ciphertext]. Note: This + // field is defined as int64 for reasons of compatibility across different + // languages. However, it is a non-negative integer, which will never exceed + // 2^32-1, and can be safely downconverted to uint32 in languages that support + // this type. + PlaintextCrc32C *wrapperspb.Int64Value `protobuf:"bytes,2,opt,name=plaintext_crc32c,json=plaintextCrc32c,proto3" json:"plaintext_crc32c,omitempty"` + // Whether the Decryption was performed using the primary key version. + UsedPrimary bool `protobuf:"varint,3,opt,name=used_primary,json=usedPrimary,proto3" json:"used_primary,omitempty"` + // The [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] used in + // decryption. + ProtectionLevel ProtectionLevel `protobuf:"varint,4,opt,name=protection_level,json=protectionLevel,proto3,enum=google.cloud.kms.v1.ProtectionLevel" json:"protection_level,omitempty"` +} + +func (x *DecryptResponse) Reset() { + *x = DecryptResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DecryptResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DecryptResponse) ProtoMessage() {} + +func (x *DecryptResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DecryptResponse.ProtoReflect.Descriptor instead. +func (*DecryptResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{33} +} + +func (x *DecryptResponse) GetPlaintext() []byte { + if x != nil { + return x.Plaintext + } + return nil +} + +func (x *DecryptResponse) GetPlaintextCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.PlaintextCrc32C + } + return nil +} + +func (x *DecryptResponse) GetUsedPrimary() bool { + if x != nil { + return x.UsedPrimary + } + return false +} + +func (x *DecryptResponse) GetProtectionLevel() ProtectionLevel { + if x != nil { + return x.ProtectionLevel + } + return ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED +} + +// Response message for +// [KeyManagementService.RawEncrypt][google.cloud.kms.v1.KeyManagementService.RawEncrypt]. +type RawEncryptResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The encrypted data. In the case of AES-GCM, the authentication tag + // is the [tag_length][google.cloud.kms.v1.RawEncryptResponse.tag_length] + // bytes at the end of this field. + Ciphertext []byte `protobuf:"bytes,1,opt,name=ciphertext,proto3" json:"ciphertext,omitempty"` + // The initialization vector (IV) generated by the service during + // encryption. This value must be stored and provided in + // [RawDecryptRequest.initialization_vector][google.cloud.kms.v1.RawDecryptRequest.initialization_vector] + // at decryption time. + InitializationVector []byte `protobuf:"bytes,2,opt,name=initialization_vector,json=initializationVector,proto3" json:"initialization_vector,omitempty"` + // The length of the authentication tag that is appended to + // the end of the ciphertext. + TagLength int32 `protobuf:"varint,3,opt,name=tag_length,json=tagLength,proto3" json:"tag_length,omitempty"` + // Integrity verification field. A CRC32C checksum of the returned + // [RawEncryptResponse.ciphertext][google.cloud.kms.v1.RawEncryptResponse.ciphertext]. + // An integrity check of ciphertext can be performed by computing the CRC32C + // checksum of ciphertext and comparing your results to this field. Discard + // the response in case of non-matching checksum values, and perform a limited + // number of retries. A persistent mismatch may indicate an issue in your + // computation of the CRC32C checksum. Note: This field is defined as int64 + // for reasons of compatibility across different languages. However, it is a + // non-negative integer, which will never exceed 2^32-1, and can be safely + // downconverted to uint32 in languages that support this type. + CiphertextCrc32C *wrapperspb.Int64Value `protobuf:"bytes,4,opt,name=ciphertext_crc32c,json=ciphertextCrc32c,proto3" json:"ciphertext_crc32c,omitempty"` + // Integrity verification field. A CRC32C checksum of the returned + // [RawEncryptResponse.initialization_vector][google.cloud.kms.v1.RawEncryptResponse.initialization_vector]. + // An integrity check of initialization_vector can be performed by computing + // the CRC32C checksum of initialization_vector and comparing your results to + // this field. Discard the response in case of non-matching checksum values, + // and perform a limited number of retries. A persistent mismatch may indicate + // an issue in your computation of the CRC32C checksum. Note: This field is + // defined as int64 for reasons of compatibility across different languages. + // However, it is a non-negative integer, which will never exceed 2^32-1, and + // can be safely downconverted to uint32 in languages that support this type. + InitializationVectorCrc32C *wrapperspb.Int64Value `protobuf:"bytes,5,opt,name=initialization_vector_crc32c,json=initializationVectorCrc32c,proto3" json:"initialization_vector_crc32c,omitempty"` + // Integrity verification field. A flag indicating whether + // [RawEncryptRequest.plaintext_crc32c][google.cloud.kms.v1.RawEncryptRequest.plaintext_crc32c] + // was received by + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] and used + // for the integrity verification of the plaintext. A false value of this + // field indicates either that + // [RawEncryptRequest.plaintext_crc32c][google.cloud.kms.v1.RawEncryptRequest.plaintext_crc32c] + // was left unset or that it was not delivered to + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService]. If you've + // set + // [RawEncryptRequest.plaintext_crc32c][google.cloud.kms.v1.RawEncryptRequest.plaintext_crc32c] + // but this field is still false, discard the response and perform a limited + // number of retries. + VerifiedPlaintextCrc32C bool `protobuf:"varint,6,opt,name=verified_plaintext_crc32c,json=verifiedPlaintextCrc32c,proto3" json:"verified_plaintext_crc32c,omitempty"` + // Integrity verification field. A flag indicating whether + // [RawEncryptRequest.additional_authenticated_data_crc32c][google.cloud.kms.v1.RawEncryptRequest.additional_authenticated_data_crc32c] + // was received by + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] and used + // for the integrity verification of additional_authenticated_data. A false + // value of this field indicates either that // + // [RawEncryptRequest.additional_authenticated_data_crc32c][google.cloud.kms.v1.RawEncryptRequest.additional_authenticated_data_crc32c] + // was left unset or that it was not delivered to + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService]. If you've + // set + // [RawEncryptRequest.additional_authenticated_data_crc32c][google.cloud.kms.v1.RawEncryptRequest.additional_authenticated_data_crc32c] + // but this field is still false, discard the response and perform a limited + // number of retries. + VerifiedAdditionalAuthenticatedDataCrc32C bool `protobuf:"varint,7,opt,name=verified_additional_authenticated_data_crc32c,json=verifiedAdditionalAuthenticatedDataCrc32c,proto3" json:"verified_additional_authenticated_data_crc32c,omitempty"` + // Integrity verification field. A flag indicating whether + // [RawEncryptRequest.initialization_vector_crc32c][google.cloud.kms.v1.RawEncryptRequest.initialization_vector_crc32c] + // was received by + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] and used + // for the integrity verification of initialization_vector. A false value of + // this field indicates either that + // [RawEncryptRequest.initialization_vector_crc32c][google.cloud.kms.v1.RawEncryptRequest.initialization_vector_crc32c] + // was left unset or that it was not delivered to + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService]. If you've + // set + // [RawEncryptRequest.initialization_vector_crc32c][google.cloud.kms.v1.RawEncryptRequest.initialization_vector_crc32c] + // but this field is still false, discard the response and perform a limited + // number of retries. + VerifiedInitializationVectorCrc32C bool `protobuf:"varint,10,opt,name=verified_initialization_vector_crc32c,json=verifiedInitializationVectorCrc32c,proto3" json:"verified_initialization_vector_crc32c,omitempty"` + // The resource name of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] used in + // encryption. Check this field to verify that the intended resource was used + // for encryption. + Name string `protobuf:"bytes,8,opt,name=name,proto3" json:"name,omitempty"` + // The [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] used in + // encryption. + ProtectionLevel ProtectionLevel `protobuf:"varint,9,opt,name=protection_level,json=protectionLevel,proto3,enum=google.cloud.kms.v1.ProtectionLevel" json:"protection_level,omitempty"` +} + +func (x *RawEncryptResponse) Reset() { + *x = RawEncryptResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RawEncryptResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RawEncryptResponse) ProtoMessage() {} + +func (x *RawEncryptResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RawEncryptResponse.ProtoReflect.Descriptor instead. +func (*RawEncryptResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{34} +} + +func (x *RawEncryptResponse) GetCiphertext() []byte { + if x != nil { + return x.Ciphertext + } + return nil +} + +func (x *RawEncryptResponse) GetInitializationVector() []byte { + if x != nil { + return x.InitializationVector + } + return nil +} + +func (x *RawEncryptResponse) GetTagLength() int32 { + if x != nil { + return x.TagLength + } + return 0 +} + +func (x *RawEncryptResponse) GetCiphertextCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.CiphertextCrc32C + } + return nil +} + +func (x *RawEncryptResponse) GetInitializationVectorCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.InitializationVectorCrc32C + } + return nil +} + +func (x *RawEncryptResponse) GetVerifiedPlaintextCrc32C() bool { + if x != nil { + return x.VerifiedPlaintextCrc32C + } + return false +} + +func (x *RawEncryptResponse) GetVerifiedAdditionalAuthenticatedDataCrc32C() bool { + if x != nil { + return x.VerifiedAdditionalAuthenticatedDataCrc32C + } + return false +} + +func (x *RawEncryptResponse) GetVerifiedInitializationVectorCrc32C() bool { + if x != nil { + return x.VerifiedInitializationVectorCrc32C + } + return false +} + +func (x *RawEncryptResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *RawEncryptResponse) GetProtectionLevel() ProtectionLevel { + if x != nil { + return x.ProtectionLevel + } + return ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED +} + +// Response message for +// [KeyManagementService.RawDecrypt][google.cloud.kms.v1.KeyManagementService.RawDecrypt]. +type RawDecryptResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The decrypted data. + Plaintext []byte `protobuf:"bytes,1,opt,name=plaintext,proto3" json:"plaintext,omitempty"` + // Integrity verification field. A CRC32C checksum of the returned + // [RawDecryptResponse.plaintext][google.cloud.kms.v1.RawDecryptResponse.plaintext]. + // An integrity check of plaintext can be performed by computing the CRC32C + // checksum of plaintext and comparing your results to this field. Discard the + // response in case of non-matching checksum values, and perform a limited + // number of retries. A persistent mismatch may indicate an issue in your + // computation of the CRC32C checksum. Note: receiving this response message + // indicates that + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] is able to + // successfully decrypt the + // [ciphertext][google.cloud.kms.v1.RawDecryptRequest.ciphertext]. + // Note: This field is defined as int64 for reasons of compatibility across + // different languages. However, it is a non-negative integer, which will + // never exceed 2^32-1, and can be safely downconverted to uint32 in languages + // that support this type. + PlaintextCrc32C *wrapperspb.Int64Value `protobuf:"bytes,2,opt,name=plaintext_crc32c,json=plaintextCrc32c,proto3" json:"plaintext_crc32c,omitempty"` + // The [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] used in + // decryption. + ProtectionLevel ProtectionLevel `protobuf:"varint,3,opt,name=protection_level,json=protectionLevel,proto3,enum=google.cloud.kms.v1.ProtectionLevel" json:"protection_level,omitempty"` + // Integrity verification field. A flag indicating whether + // [RawDecryptRequest.ciphertext_crc32c][google.cloud.kms.v1.RawDecryptRequest.ciphertext_crc32c] + // was received by + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] and used + // for the integrity verification of the ciphertext. A false value of this + // field indicates either that + // [RawDecryptRequest.ciphertext_crc32c][google.cloud.kms.v1.RawDecryptRequest.ciphertext_crc32c] + // was left unset or that it was not delivered to + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService]. If you've + // set + // [RawDecryptRequest.ciphertext_crc32c][google.cloud.kms.v1.RawDecryptRequest.ciphertext_crc32c] + // but this field is still false, discard the response and perform a limited + // number of retries. + VerifiedCiphertextCrc32C bool `protobuf:"varint,4,opt,name=verified_ciphertext_crc32c,json=verifiedCiphertextCrc32c,proto3" json:"verified_ciphertext_crc32c,omitempty"` + // Integrity verification field. A flag indicating whether + // [RawDecryptRequest.additional_authenticated_data_crc32c][google.cloud.kms.v1.RawDecryptRequest.additional_authenticated_data_crc32c] + // was received by + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] and used + // for the integrity verification of additional_authenticated_data. A false + // value of this field indicates either that // + // [RawDecryptRequest.additional_authenticated_data_crc32c][google.cloud.kms.v1.RawDecryptRequest.additional_authenticated_data_crc32c] + // was left unset or that it was not delivered to + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService]. If you've + // set + // [RawDecryptRequest.additional_authenticated_data_crc32c][google.cloud.kms.v1.RawDecryptRequest.additional_authenticated_data_crc32c] + // but this field is still false, discard the response and perform a limited + // number of retries. + VerifiedAdditionalAuthenticatedDataCrc32C bool `protobuf:"varint,5,opt,name=verified_additional_authenticated_data_crc32c,json=verifiedAdditionalAuthenticatedDataCrc32c,proto3" json:"verified_additional_authenticated_data_crc32c,omitempty"` + // Integrity verification field. A flag indicating whether + // [RawDecryptRequest.initialization_vector_crc32c][google.cloud.kms.v1.RawDecryptRequest.initialization_vector_crc32c] + // was received by + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] and used + // for the integrity verification of initialization_vector. A false value of + // this field indicates either that + // [RawDecryptRequest.initialization_vector_crc32c][google.cloud.kms.v1.RawDecryptRequest.initialization_vector_crc32c] + // was left unset or that it was not delivered to + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService]. If you've + // set + // [RawDecryptRequest.initialization_vector_crc32c][google.cloud.kms.v1.RawDecryptRequest.initialization_vector_crc32c] + // but this field is still false, discard the response and perform a limited + // number of retries. + VerifiedInitializationVectorCrc32C bool `protobuf:"varint,6,opt,name=verified_initialization_vector_crc32c,json=verifiedInitializationVectorCrc32c,proto3" json:"verified_initialization_vector_crc32c,omitempty"` +} + +func (x *RawDecryptResponse) Reset() { + *x = RawDecryptResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RawDecryptResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RawDecryptResponse) ProtoMessage() {} + +func (x *RawDecryptResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RawDecryptResponse.ProtoReflect.Descriptor instead. +func (*RawDecryptResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{35} +} + +func (x *RawDecryptResponse) GetPlaintext() []byte { + if x != nil { + return x.Plaintext + } + return nil +} + +func (x *RawDecryptResponse) GetPlaintextCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.PlaintextCrc32C + } + return nil +} + +func (x *RawDecryptResponse) GetProtectionLevel() ProtectionLevel { + if x != nil { + return x.ProtectionLevel + } + return ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED +} + +func (x *RawDecryptResponse) GetVerifiedCiphertextCrc32C() bool { + if x != nil { + return x.VerifiedCiphertextCrc32C + } + return false +} + +func (x *RawDecryptResponse) GetVerifiedAdditionalAuthenticatedDataCrc32C() bool { + if x != nil { + return x.VerifiedAdditionalAuthenticatedDataCrc32C + } + return false +} + +func (x *RawDecryptResponse) GetVerifiedInitializationVectorCrc32C() bool { + if x != nil { + return x.VerifiedInitializationVectorCrc32C + } + return false +} + +// Response message for +// [KeyManagementService.AsymmetricSign][google.cloud.kms.v1.KeyManagementService.AsymmetricSign]. +type AsymmetricSignResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The created signature. + Signature []byte `protobuf:"bytes,1,opt,name=signature,proto3" json:"signature,omitempty"` + // Integrity verification field. A CRC32C checksum of the returned + // [AsymmetricSignResponse.signature][google.cloud.kms.v1.AsymmetricSignResponse.signature]. + // An integrity check of + // [AsymmetricSignResponse.signature][google.cloud.kms.v1.AsymmetricSignResponse.signature] + // can be performed by computing the CRC32C checksum of + // [AsymmetricSignResponse.signature][google.cloud.kms.v1.AsymmetricSignResponse.signature] + // and comparing your results to this field. Discard the response in case of + // non-matching checksum values, and perform a limited number of retries. A + // persistent mismatch may indicate an issue in your computation of the CRC32C + // checksum. Note: This field is defined as int64 for reasons of compatibility + // across different languages. However, it is a non-negative integer, which + // will never exceed 2^32-1, and can be safely downconverted to uint32 in + // languages that support this type. + SignatureCrc32C *wrapperspb.Int64Value `protobuf:"bytes,2,opt,name=signature_crc32c,json=signatureCrc32c,proto3" json:"signature_crc32c,omitempty"` + // Integrity verification field. A flag indicating whether + // [AsymmetricSignRequest.digest_crc32c][google.cloud.kms.v1.AsymmetricSignRequest.digest_crc32c] + // was received by + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] and used + // for the integrity verification of the + // [digest][google.cloud.kms.v1.AsymmetricSignRequest.digest]. A false value + // of this field indicates either that + // [AsymmetricSignRequest.digest_crc32c][google.cloud.kms.v1.AsymmetricSignRequest.digest_crc32c] + // was left unset or that it was not delivered to + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService]. If you've + // set + // [AsymmetricSignRequest.digest_crc32c][google.cloud.kms.v1.AsymmetricSignRequest.digest_crc32c] + // but this field is still false, discard the response and perform a limited + // number of retries. + VerifiedDigestCrc32C bool `protobuf:"varint,3,opt,name=verified_digest_crc32c,json=verifiedDigestCrc32c,proto3" json:"verified_digest_crc32c,omitempty"` + // The resource name of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] used for signing. + // Check this field to verify that the intended resource was used for signing. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // Integrity verification field. A flag indicating whether + // [AsymmetricSignRequest.data_crc32c][google.cloud.kms.v1.AsymmetricSignRequest.data_crc32c] + // was received by + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] and used + // for the integrity verification of the + // [data][google.cloud.kms.v1.AsymmetricSignRequest.data]. A false value of + // this field indicates either that + // [AsymmetricSignRequest.data_crc32c][google.cloud.kms.v1.AsymmetricSignRequest.data_crc32c] + // was left unset or that it was not delivered to + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService]. If you've + // set + // [AsymmetricSignRequest.data_crc32c][google.cloud.kms.v1.AsymmetricSignRequest.data_crc32c] + // but this field is still false, discard the response and perform a limited + // number of retries. + VerifiedDataCrc32C bool `protobuf:"varint,5,opt,name=verified_data_crc32c,json=verifiedDataCrc32c,proto3" json:"verified_data_crc32c,omitempty"` + // The [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] used for signing. + ProtectionLevel ProtectionLevel `protobuf:"varint,6,opt,name=protection_level,json=protectionLevel,proto3,enum=google.cloud.kms.v1.ProtectionLevel" json:"protection_level,omitempty"` +} + +func (x *AsymmetricSignResponse) Reset() { + *x = AsymmetricSignResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AsymmetricSignResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AsymmetricSignResponse) ProtoMessage() {} + +func (x *AsymmetricSignResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AsymmetricSignResponse.ProtoReflect.Descriptor instead. +func (*AsymmetricSignResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{36} +} + +func (x *AsymmetricSignResponse) GetSignature() []byte { + if x != nil { + return x.Signature + } + return nil +} + +func (x *AsymmetricSignResponse) GetSignatureCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.SignatureCrc32C + } + return nil +} + +func (x *AsymmetricSignResponse) GetVerifiedDigestCrc32C() bool { + if x != nil { + return x.VerifiedDigestCrc32C + } + return false +} + +func (x *AsymmetricSignResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *AsymmetricSignResponse) GetVerifiedDataCrc32C() bool { + if x != nil { + return x.VerifiedDataCrc32C + } + return false +} + +func (x *AsymmetricSignResponse) GetProtectionLevel() ProtectionLevel { + if x != nil { + return x.ProtectionLevel + } + return ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED +} + +// Response message for +// [KeyManagementService.AsymmetricDecrypt][google.cloud.kms.v1.KeyManagementService.AsymmetricDecrypt]. +type AsymmetricDecryptResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The decrypted data originally encrypted with the matching public key. + Plaintext []byte `protobuf:"bytes,1,opt,name=plaintext,proto3" json:"plaintext,omitempty"` + // Integrity verification field. A CRC32C checksum of the returned + // [AsymmetricDecryptResponse.plaintext][google.cloud.kms.v1.AsymmetricDecryptResponse.plaintext]. + // An integrity check of + // [AsymmetricDecryptResponse.plaintext][google.cloud.kms.v1.AsymmetricDecryptResponse.plaintext] + // can be performed by computing the CRC32C checksum of + // [AsymmetricDecryptResponse.plaintext][google.cloud.kms.v1.AsymmetricDecryptResponse.plaintext] + // and comparing your results to this field. Discard the response in case of + // non-matching checksum values, and perform a limited number of retries. A + // persistent mismatch may indicate an issue in your computation of the CRC32C + // checksum. Note: This field is defined as int64 for reasons of compatibility + // across different languages. However, it is a non-negative integer, which + // will never exceed 2^32-1, and can be safely downconverted to uint32 in + // languages that support this type. + PlaintextCrc32C *wrapperspb.Int64Value `protobuf:"bytes,2,opt,name=plaintext_crc32c,json=plaintextCrc32c,proto3" json:"plaintext_crc32c,omitempty"` + // Integrity verification field. A flag indicating whether + // [AsymmetricDecryptRequest.ciphertext_crc32c][google.cloud.kms.v1.AsymmetricDecryptRequest.ciphertext_crc32c] + // was received by + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] and used + // for the integrity verification of the + // [ciphertext][google.cloud.kms.v1.AsymmetricDecryptRequest.ciphertext]. A + // false value of this field indicates either that + // [AsymmetricDecryptRequest.ciphertext_crc32c][google.cloud.kms.v1.AsymmetricDecryptRequest.ciphertext_crc32c] + // was left unset or that it was not delivered to + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService]. If you've + // set + // [AsymmetricDecryptRequest.ciphertext_crc32c][google.cloud.kms.v1.AsymmetricDecryptRequest.ciphertext_crc32c] + // but this field is still false, discard the response and perform a limited + // number of retries. + VerifiedCiphertextCrc32C bool `protobuf:"varint,3,opt,name=verified_ciphertext_crc32c,json=verifiedCiphertextCrc32c,proto3" json:"verified_ciphertext_crc32c,omitempty"` + // The [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] used in + // decryption. + ProtectionLevel ProtectionLevel `protobuf:"varint,4,opt,name=protection_level,json=protectionLevel,proto3,enum=google.cloud.kms.v1.ProtectionLevel" json:"protection_level,omitempty"` +} + +func (x *AsymmetricDecryptResponse) Reset() { + *x = AsymmetricDecryptResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AsymmetricDecryptResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AsymmetricDecryptResponse) ProtoMessage() {} + +func (x *AsymmetricDecryptResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AsymmetricDecryptResponse.ProtoReflect.Descriptor instead. +func (*AsymmetricDecryptResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{37} +} + +func (x *AsymmetricDecryptResponse) GetPlaintext() []byte { + if x != nil { + return x.Plaintext + } + return nil +} + +func (x *AsymmetricDecryptResponse) GetPlaintextCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.PlaintextCrc32C + } + return nil +} + +func (x *AsymmetricDecryptResponse) GetVerifiedCiphertextCrc32C() bool { + if x != nil { + return x.VerifiedCiphertextCrc32C + } + return false +} + +func (x *AsymmetricDecryptResponse) GetProtectionLevel() ProtectionLevel { + if x != nil { + return x.ProtectionLevel + } + return ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED +} + +// Response message for +// [KeyManagementService.MacSign][google.cloud.kms.v1.KeyManagementService.MacSign]. +type MacSignResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource name of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] used for signing. + // Check this field to verify that the intended resource was used for signing. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The created signature. + Mac []byte `protobuf:"bytes,2,opt,name=mac,proto3" json:"mac,omitempty"` + // Integrity verification field. A CRC32C checksum of the returned + // [MacSignResponse.mac][google.cloud.kms.v1.MacSignResponse.mac]. An + // integrity check of + // [MacSignResponse.mac][google.cloud.kms.v1.MacSignResponse.mac] can be + // performed by computing the CRC32C checksum of + // [MacSignResponse.mac][google.cloud.kms.v1.MacSignResponse.mac] and + // comparing your results to this field. Discard the response in case of + // non-matching checksum values, and perform a limited number of retries. A + // persistent mismatch may indicate an issue in your computation of the CRC32C + // checksum. Note: This field is defined as int64 for reasons of compatibility + // across different languages. However, it is a non-negative integer, which + // will never exceed 2^32-1, and can be safely downconverted to uint32 in + // languages that support this type. + MacCrc32C *wrapperspb.Int64Value `protobuf:"bytes,3,opt,name=mac_crc32c,json=macCrc32c,proto3" json:"mac_crc32c,omitempty"` + // Integrity verification field. A flag indicating whether + // [MacSignRequest.data_crc32c][google.cloud.kms.v1.MacSignRequest.data_crc32c] + // was received by + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] and used + // for the integrity verification of the + // [data][google.cloud.kms.v1.MacSignRequest.data]. A false value of this + // field indicates either that + // [MacSignRequest.data_crc32c][google.cloud.kms.v1.MacSignRequest.data_crc32c] + // was left unset or that it was not delivered to + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService]. If you've + // set + // [MacSignRequest.data_crc32c][google.cloud.kms.v1.MacSignRequest.data_crc32c] + // but this field is still false, discard the response and perform a limited + // number of retries. + VerifiedDataCrc32C bool `protobuf:"varint,4,opt,name=verified_data_crc32c,json=verifiedDataCrc32c,proto3" json:"verified_data_crc32c,omitempty"` + // The [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] used for signing. + ProtectionLevel ProtectionLevel `protobuf:"varint,5,opt,name=protection_level,json=protectionLevel,proto3,enum=google.cloud.kms.v1.ProtectionLevel" json:"protection_level,omitempty"` +} + +func (x *MacSignResponse) Reset() { + *x = MacSignResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MacSignResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MacSignResponse) ProtoMessage() {} + +func (x *MacSignResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MacSignResponse.ProtoReflect.Descriptor instead. +func (*MacSignResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{38} +} + +func (x *MacSignResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *MacSignResponse) GetMac() []byte { + if x != nil { + return x.Mac + } + return nil +} + +func (x *MacSignResponse) GetMacCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.MacCrc32C + } + return nil +} + +func (x *MacSignResponse) GetVerifiedDataCrc32C() bool { + if x != nil { + return x.VerifiedDataCrc32C + } + return false +} + +func (x *MacSignResponse) GetProtectionLevel() ProtectionLevel { + if x != nil { + return x.ProtectionLevel + } + return ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED +} + +// Response message for +// [KeyManagementService.MacVerify][google.cloud.kms.v1.KeyManagementService.MacVerify]. +type MacVerifyResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource name of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] used for + // verification. Check this field to verify that the intended resource was + // used for verification. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // This field indicates whether or not the verification operation for + // [MacVerifyRequest.mac][google.cloud.kms.v1.MacVerifyRequest.mac] over + // [MacVerifyRequest.data][google.cloud.kms.v1.MacVerifyRequest.data] was + // successful. + Success bool `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"` + // Integrity verification field. A flag indicating whether + // [MacVerifyRequest.data_crc32c][google.cloud.kms.v1.MacVerifyRequest.data_crc32c] + // was received by + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] and used + // for the integrity verification of the + // [data][google.cloud.kms.v1.MacVerifyRequest.data]. A false value of this + // field indicates either that + // [MacVerifyRequest.data_crc32c][google.cloud.kms.v1.MacVerifyRequest.data_crc32c] + // was left unset or that it was not delivered to + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService]. If you've + // set + // [MacVerifyRequest.data_crc32c][google.cloud.kms.v1.MacVerifyRequest.data_crc32c] + // but this field is still false, discard the response and perform a limited + // number of retries. + VerifiedDataCrc32C bool `protobuf:"varint,3,opt,name=verified_data_crc32c,json=verifiedDataCrc32c,proto3" json:"verified_data_crc32c,omitempty"` + // Integrity verification field. A flag indicating whether + // [MacVerifyRequest.mac_crc32c][google.cloud.kms.v1.MacVerifyRequest.mac_crc32c] + // was received by + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService] and used + // for the integrity verification of the + // [data][google.cloud.kms.v1.MacVerifyRequest.mac]. A false value of this + // field indicates either that + // [MacVerifyRequest.mac_crc32c][google.cloud.kms.v1.MacVerifyRequest.mac_crc32c] + // was left unset or that it was not delivered to + // [KeyManagementService][google.cloud.kms.v1.KeyManagementService]. If you've + // set + // [MacVerifyRequest.mac_crc32c][google.cloud.kms.v1.MacVerifyRequest.mac_crc32c] + // but this field is still false, discard the response and perform a limited + // number of retries. + VerifiedMacCrc32C bool `protobuf:"varint,4,opt,name=verified_mac_crc32c,json=verifiedMacCrc32c,proto3" json:"verified_mac_crc32c,omitempty"` + // Integrity verification field. This value is used for the integrity + // verification of [MacVerifyResponse.success]. If the value of this field + // contradicts the value of [MacVerifyResponse.success], discard the response + // and perform a limited number of retries. + VerifiedSuccessIntegrity bool `protobuf:"varint,5,opt,name=verified_success_integrity,json=verifiedSuccessIntegrity,proto3" json:"verified_success_integrity,omitempty"` + // The [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of the + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] used for + // verification. + ProtectionLevel ProtectionLevel `protobuf:"varint,6,opt,name=protection_level,json=protectionLevel,proto3,enum=google.cloud.kms.v1.ProtectionLevel" json:"protection_level,omitempty"` +} + +func (x *MacVerifyResponse) Reset() { + *x = MacVerifyResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MacVerifyResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MacVerifyResponse) ProtoMessage() {} + +func (x *MacVerifyResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MacVerifyResponse.ProtoReflect.Descriptor instead. +func (*MacVerifyResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{39} +} + +func (x *MacVerifyResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *MacVerifyResponse) GetSuccess() bool { + if x != nil { + return x.Success + } + return false +} + +func (x *MacVerifyResponse) GetVerifiedDataCrc32C() bool { + if x != nil { + return x.VerifiedDataCrc32C + } + return false +} + +func (x *MacVerifyResponse) GetVerifiedMacCrc32C() bool { + if x != nil { + return x.VerifiedMacCrc32C + } + return false +} + +func (x *MacVerifyResponse) GetVerifiedSuccessIntegrity() bool { + if x != nil { + return x.VerifiedSuccessIntegrity + } + return false +} + +func (x *MacVerifyResponse) GetProtectionLevel() ProtectionLevel { + if x != nil { + return x.ProtectionLevel + } + return ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED +} + +// Response message for +// [KeyManagementService.GenerateRandomBytes][google.cloud.kms.v1.KeyManagementService.GenerateRandomBytes]. +type GenerateRandomBytesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The generated data. + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + // Integrity verification field. A CRC32C checksum of the returned + // [GenerateRandomBytesResponse.data][google.cloud.kms.v1.GenerateRandomBytesResponse.data]. + // An integrity check of + // [GenerateRandomBytesResponse.data][google.cloud.kms.v1.GenerateRandomBytesResponse.data] + // can be performed by computing the CRC32C checksum of + // [GenerateRandomBytesResponse.data][google.cloud.kms.v1.GenerateRandomBytesResponse.data] + // and comparing your results to this field. Discard the response in case of + // non-matching checksum values, and perform a limited number of retries. A + // persistent mismatch may indicate an issue in your computation of the CRC32C + // checksum. Note: This field is defined as int64 for reasons of compatibility + // across different languages. However, it is a non-negative integer, which + // will never exceed 2^32-1, and can be safely downconverted to uint32 in + // languages that support this type. + DataCrc32C *wrapperspb.Int64Value `protobuf:"bytes,3,opt,name=data_crc32c,json=dataCrc32c,proto3" json:"data_crc32c,omitempty"` +} + +func (x *GenerateRandomBytesResponse) Reset() { + *x = GenerateRandomBytesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GenerateRandomBytesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GenerateRandomBytesResponse) ProtoMessage() {} + +func (x *GenerateRandomBytesResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GenerateRandomBytesResponse.ProtoReflect.Descriptor instead. +func (*GenerateRandomBytesResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{40} +} + +func (x *GenerateRandomBytesResponse) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +func (x *GenerateRandomBytesResponse) GetDataCrc32C() *wrapperspb.Int64Value { + if x != nil { + return x.DataCrc32C + } + return nil +} + +// A [Digest][google.cloud.kms.v1.Digest] holds a cryptographic message digest. +type Digest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The message digest. + // + // Types that are assignable to Digest: + // + // *Digest_Sha256 + // *Digest_Sha384 + // *Digest_Sha512 + Digest isDigest_Digest `protobuf_oneof:"digest"` +} + +func (x *Digest) Reset() { + *x = Digest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Digest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Digest) ProtoMessage() {} + +func (x *Digest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Digest.ProtoReflect.Descriptor instead. +func (*Digest) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{41} +} + +func (m *Digest) GetDigest() isDigest_Digest { + if m != nil { + return m.Digest + } + return nil +} + +func (x *Digest) GetSha256() []byte { + if x, ok := x.GetDigest().(*Digest_Sha256); ok { + return x.Sha256 + } + return nil +} + +func (x *Digest) GetSha384() []byte { + if x, ok := x.GetDigest().(*Digest_Sha384); ok { + return x.Sha384 + } + return nil +} + +func (x *Digest) GetSha512() []byte { + if x, ok := x.GetDigest().(*Digest_Sha512); ok { + return x.Sha512 + } + return nil +} + +type isDigest_Digest interface { + isDigest_Digest() +} + +type Digest_Sha256 struct { + // A message digest produced with the SHA-256 algorithm. + Sha256 []byte `protobuf:"bytes,1,opt,name=sha256,proto3,oneof"` +} + +type Digest_Sha384 struct { + // A message digest produced with the SHA-384 algorithm. + Sha384 []byte `protobuf:"bytes,2,opt,name=sha384,proto3,oneof"` +} + +type Digest_Sha512 struct { + // A message digest produced with the SHA-512 algorithm. + Sha512 []byte `protobuf:"bytes,3,opt,name=sha512,proto3,oneof"` +} + +func (*Digest_Sha256) isDigest_Digest() {} + +func (*Digest_Sha384) isDigest_Digest() {} + +func (*Digest_Sha512) isDigest_Digest() {} + +// Cloud KMS metadata for the given +// [google.cloud.location.Location][google.cloud.location.Location]. +type LocationMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Indicates whether [CryptoKeys][google.cloud.kms.v1.CryptoKey] with + // [protection_level][google.cloud.kms.v1.CryptoKeyVersionTemplate.protection_level] + // [HSM][google.cloud.kms.v1.ProtectionLevel.HSM] can be created in this + // location. + HsmAvailable bool `protobuf:"varint,1,opt,name=hsm_available,json=hsmAvailable,proto3" json:"hsm_available,omitempty"` + // Indicates whether [CryptoKeys][google.cloud.kms.v1.CryptoKey] with + // [protection_level][google.cloud.kms.v1.CryptoKeyVersionTemplate.protection_level] + // [EXTERNAL][google.cloud.kms.v1.ProtectionLevel.EXTERNAL] can be created in + // this location. + EkmAvailable bool `protobuf:"varint,2,opt,name=ekm_available,json=ekmAvailable,proto3" json:"ekm_available,omitempty"` +} + +func (x *LocationMetadata) Reset() { + *x = LocationMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocationMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocationMetadata) ProtoMessage() {} + +func (x *LocationMetadata) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocationMetadata.ProtoReflect.Descriptor instead. +func (*LocationMetadata) Descriptor() ([]byte, []int) { + return file_google_cloud_kms_v1_service_proto_rawDescGZIP(), []int{42} +} + +func (x *LocationMetadata) GetHsmAvailable() bool { + if x != nil { + return x.HsmAvailable + } + return false +} + +func (x *LocationMetadata) GetEkmAvailable() bool { + if x != nil { + return x.EkmAvailable + } + return false +} + +var File_google_cloud_kms_v1_service_proto protoreflect.FileDescriptor + +var file_google_cloud_kms_v1_service_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x6b, + 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x6b, 0x6d, 0x73, 0x2f, 0x76, 0x31, + 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0xdb, 0x01, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x69, + 0x6e, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, + 0x41, 0x23, 0x0a, 0x21, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x20, 0x0a, + 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, + 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x12, 0x1e, 0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x42, 0x79, + 0x22, 0xba, 0x02, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, + 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, + 0x41, 0x21, 0x0a, 0x1f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4b, 0x65, 0x79, 0x52, + 0x69, 0x6e, 0x67, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x70, + 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, + 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x12, 0x5d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x69, 0x65, + 0x77, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x56, + 0x69, 0x65, 0x77, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x56, 0x69, 0x65, 0x77, + 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, + 0x08, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x42, 0x79, 0x22, 0xb4, 0x02, + 0x0a, 0x1c, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, + 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, + 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, + 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, + 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x4e, 0x0a, 0x04, 0x76, 0x69, 0x65, 0x77, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, + 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x72, 0x79, + 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x56, 0x69, 0x65, + 0x77, 0x52, 0x04, 0x76, 0x69, 0x65, 0x77, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x6f, 0x72, 0x64, + 0x65, 0x72, 0x42, 0x79, 0x22, 0xdb, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6d, 0x70, + 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, + 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, + 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, + 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, + 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, + 0x42, 0x79, 0x22, 0x98, 0x01, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x69, + 0x6e, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x09, 0x6b, + 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x52, 0x08, 0x6b, 0x65, + 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, + 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1d, + 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x22, 0xa0, 0x01, + 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x0b, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0a, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, + 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, + 0x22, 0xbd, 0x01, 0x0a, 0x1d, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, + 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x55, 0x0a, 0x13, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, + 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, + 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, + 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, + 0x22, 0xa0, 0x01, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, + 0x6f, 0x62, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x0b, 0x69, + 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, + 0x52, 0x0a, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x12, 0x26, 0x0a, 0x0f, + 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, + 0x69, 0x7a, 0x65, 0x22, 0x50, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, + 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x54, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x43, 0x72, 0x79, 0x70, + 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, + 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, + 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x62, 0x0a, 0x1a, 0x47, + 0x65, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2a, 0x0a, + 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, + 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, + 0x5b, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x54, 0x0a, 0x13, + 0x47, 0x65, 0x74, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x22, 0xbc, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, + 0x52, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x23, + 0x0a, 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, + 0x67, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x52, + 0x69, 0x6e, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, + 0x67, 0x22, 0x89, 0x02, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, + 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, + 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4b, 0x65, + 0x79, 0x52, 0x69, 0x6e, 0x67, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x27, 0x0a, + 0x0d, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x4b, 0x65, 0x79, 0x49, 0x64, 0x12, 0x42, 0x0a, 0x0a, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x09, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x41, 0x0a, 0x1d, 0x73, 0x6b, + 0x69, 0x70, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x1a, 0x73, 0x6b, 0x69, 0x70, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xbc, 0x01, + 0x0a, 0x1d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, + 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x41, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, + 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x12, 0x58, 0x0a, 0x12, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x10, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xbe, 0x03, 0x0a, + 0x1d, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, + 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, + 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x12, 0x5e, 0x0a, 0x12, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0, + 0x41, 0x01, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x10, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x62, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x72, 0x79, 0x70, + 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, + 0x72, 0x69, 0x74, 0x68, 0x6d, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, + 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x22, 0x0a, 0x0a, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, + 0x6a, 0x6f, 0x62, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, + 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x12, 0x24, 0x0a, 0x0b, 0x77, 0x72, 0x61, + 0x70, 0x70, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x12, + 0x34, 0x0a, 0x13, 0x72, 0x73, 0x61, 0x5f, 0x61, 0x65, 0x73, 0x5f, 0x77, 0x72, 0x61, 0x70, 0x70, + 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, + 0x01, 0x48, 0x00, 0x52, 0x10, 0x72, 0x73, 0x61, 0x41, 0x65, 0x73, 0x57, 0x72, 0x61, 0x70, 0x70, + 0x65, 0x64, 0x4b, 0x65, 0x79, 0x42, 0x16, 0x0a, 0x14, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, + 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x22, 0xc6, 0x01, + 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, + 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, + 0x0a, 0x1f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, + 0x67, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x27, 0x0a, 0x0d, 0x69, 0x6d, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, + 0x49, 0x64, 0x12, 0x42, 0x0a, 0x0a, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, + 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x69, 0x6d, 0x70, + 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x22, 0x9e, 0x01, 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x42, 0x0a, 0x0a, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, + 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, + 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xbb, 0x01, 0x0a, 0x1d, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x58, 0x0a, 0x12, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, + 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x10, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, + 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x9d, 0x01, 0x0a, 0x24, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, + 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, + 0x15, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x12, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x66, 0x0a, 0x1e, 0x44, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, + 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x66, 0x0a, + 0x1e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, + 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x44, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xdb, 0x02, 0x0a, 0x0e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a, 0x01, + 0x2a, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x09, 0x70, 0x6c, 0x61, 0x69, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x09, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x47, 0x0a, 0x1d, 0x61, 0x64, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x1b, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, + 0x61, 0x74, 0x61, 0x12, 0x4b, 0x0a, 0x10, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, + 0x0f, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, + 0x12, 0x71, 0x0a, 0x24, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x61, + 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, + 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x21, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, + 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x43, 0x72, 0x63, + 0x33, 0x32, 0x63, 0x22, 0xff, 0x02, 0x0a, 0x0e, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0a, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, + 0x65, 0x78, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, + 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x12, 0x47, 0x0a, 0x1d, 0x61, 0x64, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x1b, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, + 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x11, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, + 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x10, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, + 0x32, 0x63, 0x12, 0x71, 0x0a, 0x24, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x64, + 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, + 0x41, 0x01, 0x52, 0x21, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41, 0x75, + 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x43, + 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xf6, 0x03, 0x0a, 0x11, 0x52, 0x61, 0x77, 0x45, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x09, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x70, 0x6c, + 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x47, 0x0a, 0x1d, 0x61, 0x64, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x52, 0x1b, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41, + 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, + 0x12, 0x4b, 0x0a, 0x10, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x72, + 0x63, 0x33, 0x32, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, + 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0f, 0x70, 0x6c, + 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x71, 0x0a, + 0x24, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, + 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, + 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, + 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x21, 0x61, + 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, + 0x12, 0x38, 0x0a, 0x15, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x14, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x62, 0x0a, 0x1c, 0x69, 0x6e, + 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, + 0x41, 0x01, 0x52, 0x1a, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x99, + 0x04, 0x0a, 0x11, 0x52, 0x61, 0x77, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, + 0x0a, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, + 0x78, 0x74, 0x12, 0x47, 0x0a, 0x1d, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x1b, + 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, + 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x38, 0x0a, 0x15, 0x69, + 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x14, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x67, 0x5f, 0x6c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x61, 0x67, 0x4c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x12, 0x4d, 0x0a, 0x11, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, + 0x78, 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, + 0x01, 0x52, 0x10, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, + 0x33, 0x32, 0x63, 0x12, 0x71, 0x0a, 0x24, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, + 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x52, 0x21, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41, + 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, + 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x62, 0x0a, 0x1c, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, + 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, + 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, + 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x1a, + 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xba, 0x02, 0x0a, 0x15, 0x41, + 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x30, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x06, 0x64, 0x69, + 0x67, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x64, 0x69, + 0x67, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x5f, 0x63, + 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, + 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0c, 0x64, + 0x69, 0x67, 0x65, 0x73, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x17, 0x0a, 0x04, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x04, + 0x64, 0x61, 0x74, 0x61, 0x12, 0x41, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, + 0x33, 0x32, 0x63, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, + 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x64, 0x61, 0x74, + 0x61, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xd4, 0x01, 0x0a, 0x18, 0x41, 0x73, 0x79, 0x6d, + 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x30, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0a, 0x63, 0x69, + 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x12, + 0x4d, 0x0a, 0x11, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x72, + 0x63, 0x33, 0x32, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, + 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x63, 0x69, + 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xb2, + 0x01, 0x0a, 0x0e, 0x4d, 0x61, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x44, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x30, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, + 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x41, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x72, 0x63, + 0x33, 0x32, 0x63, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4d, 0x61, 0x63, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2a, 0x0a, 0x28, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, + 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17, + 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x41, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, + 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, + 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, + 0x64, 0x61, 0x74, 0x61, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x15, 0x0a, 0x03, 0x6d, 0x61, + 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x03, 0x6d, 0x61, + 0x63, 0x12, 0x3f, 0x0a, 0x0a, 0x6d, 0x61, 0x63, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x6d, 0x61, 0x63, 0x43, 0x72, 0x63, 0x33, + 0x32, 0x63, 0x22, 0xac, 0x01, 0x0a, 0x1a, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, + 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, + 0x0c, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x0b, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x42, 0x79, 0x74, 0x65, 0x73, + 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, + 0x65, 0x76, 0x65, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, + 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x22, 0xfe, 0x02, 0x0a, 0x0f, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x69, 0x70, + 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x63, + 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x12, 0x48, 0x0a, 0x11, 0x63, 0x69, 0x70, + 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x10, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, + 0x33, 0x32, 0x63, 0x12, 0x3a, 0x0a, 0x19, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, + 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, + 0x50, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, + 0x60, 0x0a, 0x2d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x29, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, + 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, + 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x43, 0x72, 0x63, 0x33, 0x32, + 0x63, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x22, 0xeb, 0x01, 0x0a, 0x0f, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, 0x6c, 0x61, 0x69, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x12, 0x46, 0x0a, 0x10, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x70, 0x6c, 0x61, + 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x21, 0x0a, 0x0c, + 0x75, 0x73, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0b, 0x75, 0x73, 0x65, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, + 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, + 0x76, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, + 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, + 0x22, 0x87, 0x05, 0x0a, 0x12, 0x52, 0x61, 0x77, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x69, 0x70, 0x68, 0x65, + 0x72, 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x63, 0x69, 0x70, + 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x12, 0x33, 0x0a, 0x15, 0x69, 0x6e, 0x69, 0x74, 0x69, + 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x14, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, + 0x74, 0x61, 0x67, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x09, 0x74, 0x61, 0x67, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x48, 0x0a, 0x11, 0x63, + 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x10, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x43, + 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x5d, 0x0a, 0x1c, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x63, + 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, + 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, + 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x72, + 0x63, 0x33, 0x32, 0x63, 0x12, 0x3a, 0x0a, 0x19, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, + 0x5f, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, + 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, + 0x64, 0x50, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, + 0x12, 0x60, 0x0a, 0x2d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x61, 0x64, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, + 0x63, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x29, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, + 0x64, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, 0x65, + 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x43, 0x72, 0x63, 0x33, + 0x32, 0x63, 0x12, 0x51, 0x0a, 0x25, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x69, + 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x22, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x49, 0x6e, 0x69, 0x74, 0x69, + 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, + 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f, + 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0xbe, 0x03, 0x0a, 0x12, 0x52, + 0x61, 0x77, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, + 0x46, 0x0a, 0x10, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x72, 0x63, + 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, + 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x3c, 0x0a, 0x1a, 0x76, 0x65, 0x72, 0x69, + 0x66, 0x69, 0x65, 0x64, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x5f, + 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, + 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x60, 0x0a, 0x2d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x65, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x75, + 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, + 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x29, 0x76, + 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, 0x61, + 0x74, 0x61, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x51, 0x0a, 0x25, 0x76, 0x65, 0x72, 0x69, + 0x66, 0x69, 0x65, 0x64, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, + 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x22, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, + 0x64, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xcb, 0x02, 0x0a, 0x16, + 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x12, 0x46, 0x0a, 0x10, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x73, 0x69, 0x67, + 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x34, 0x0a, 0x16, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x5f, + 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x43, 0x72, 0x63, 0x33, + 0x32, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x44, 0x61, + 0x74, 0x61, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0x90, 0x02, 0x0a, 0x19, 0x41, 0x73, + 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x6c, 0x61, 0x69, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, 0x6c, 0x61, 0x69, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x46, 0x0a, 0x10, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x70, 0x6c, + 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x3c, 0x0a, + 0x1a, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, + 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x18, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x43, 0x69, 0x70, 0x68, 0x65, + 0x72, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x4f, 0x0a, 0x10, 0x70, + 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, + 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0xf6, 0x01, 0x0a, + 0x0f, 0x4d, 0x61, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x03, 0x6d, 0x61, 0x63, 0x12, 0x3a, 0x0a, 0x0a, 0x6d, 0x61, 0x63, 0x5f, 0x63, 0x72, + 0x63, 0x33, 0x32, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, + 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x6d, 0x61, 0x63, 0x43, 0x72, 0x63, 0x33, + 0x32, 0x63, 0x12, 0x30, 0x0a, 0x14, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x64, + 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x12, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x43, 0x72, + 0x63, 0x33, 0x32, 0x63, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, + 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0xb2, 0x02, 0x0a, 0x11, 0x4d, 0x61, 0x63, 0x56, 0x65, 0x72, + 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x76, 0x65, 0x72, + 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, + 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, + 0x64, 0x44, 0x61, 0x74, 0x61, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x2e, 0x0a, 0x13, 0x76, + 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6d, 0x61, 0x63, 0x5f, 0x63, 0x72, 0x63, 0x33, + 0x32, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x65, 0x64, 0x4d, 0x61, 0x63, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x3c, 0x0a, 0x1a, 0x76, + 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, + 0x69, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x18, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x49, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x69, 0x74, 0x79, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f, + 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0x6f, 0x0a, 0x1b, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3c, 0x0a, + 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x60, 0x0a, 0x06, 0x44, + 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x12, + 0x18, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, + 0x00, 0x52, 0x06, 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x12, 0x18, 0x0a, 0x06, 0x73, 0x68, 0x61, + 0x35, 0x31, 0x32, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x73, 0x68, 0x61, + 0x35, 0x31, 0x32, 0x42, 0x08, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0x5c, 0x0a, + 0x10, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x12, 0x23, 0x0a, 0x0d, 0x68, 0x73, 0x6d, 0x5f, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, + 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x68, 0x73, 0x6d, 0x41, 0x76, 0x61, + 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6b, 0x6d, 0x5f, 0x61, 0x76, + 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x65, + 0x6b, 0x6d, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x32, 0x90, 0x2e, 0x0a, 0x14, + 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x12, 0xa2, 0x01, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, + 0x52, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, + 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, + 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3d, 0xda, 0x41, 0x06, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2e, 0x12, 0x2c, 0x2f, 0x76, 0x31, + 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, + 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x12, 0xb5, 0x01, 0x0a, 0x0e, 0x4c, 0x69, + 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x2a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, + 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4a, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3b, 0x12, 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, + 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, + 0x73, 0x12, 0xde, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x31, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, + 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x5e, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x4f, 0x12, 0x4d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, + 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x7d, + 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0xb5, 0x01, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6d, 0x70, 0x6f, 0x72, + 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6d, 0x70, 0x6f, + 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4a, + 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3b, 0x12, + 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, + 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x12, 0x8f, 0x01, 0x0a, 0x0a, 0x47, + 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x22, + 0x3b, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2e, 0x12, 0x2c, + 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, + 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa2, 0x01, 0x0a, + 0x0c, 0x47, 0x65, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x28, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x22, 0x48, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3b, 0x12, 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, + 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, + 0x7d, 0x12, 0xcb, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, + 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x47, 0x65, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x22, 0x5c, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x4f, + 0x12, 0x4d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, + 0xc0, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, + 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, + 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x22, 0x66, 0xda, 0x41, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x59, 0x12, 0x57, 0x2f, 0x76, 0x31, 0x2f, 0x7b, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, + 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, + 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, + 0x65, 0x79, 0x12, 0xa2, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, + 0x4a, 0x6f, 0x62, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x6d, 0x70, + 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x22, 0x48, 0xda, + 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3b, 0x12, 0x39, 0x2f, 0x76, + 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, + 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, + 0x4a, 0x6f, 0x62, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xb6, 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x69, + 0x6e, 0x67, 0x22, 0x5c, 0xda, 0x41, 0x1b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x6b, 0x65, + 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x69, 0x64, 0x2c, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, + 0x6e, 0x67, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x3a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, + 0x6e, 0x67, 0x22, 0x2c, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, + 0x12, 0xcf, 0x01, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, + 0x79, 0x22, 0x6f, 0xda, 0x41, 0x1f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, 0x2c, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x47, 0x3a, 0x0a, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x22, 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, + 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, + 0x79, 0x73, 0x12, 0xfb, 0x01, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, + 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, + 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x85, 0x01, 0xda, 0x41, 0x19, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x63, 0x3a, 0x12, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x22, 0x4d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, + 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0xd4, 0x01, 0x0a, 0x16, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, + 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, + 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x5f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x59, 0x3a, 0x01, + 0x2a, 0x22, 0x54, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, + 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x3a, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x12, 0xcf, 0x01, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x12, 0x2b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, + 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, + 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x22, 0x6f, 0xda, 0x41, 0x1f, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x2c, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x69, + 0x64, 0x2c, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x47, 0x3a, 0x0a, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x22, 0x39, + 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x69, + 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x12, 0xd1, 0x01, 0x0a, 0x0f, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x2b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x22, 0x71, 0xda, 0x41, 0x16, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x52, 0x3a, 0x0a, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x32, 0x44, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x93, 0x02, + 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, + 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, + 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x22, 0x9d, 0x01, 0xda, 0x41, 0x1e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, + 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2c, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x76, 0x3a, 0x12, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x32, 0x60, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, + 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, + 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x2a, 0x7d, 0x12, 0xf2, 0x01, 0x0a, 0x1d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x50, 0x72, 0x69, 0x6d, 0x61, + 0x72, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, + 0x22, 0x76, 0xda, 0x41, 0x1a, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x53, 0x3a, 0x01, 0x2a, 0x22, 0x4e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, + 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, + 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, + 0x2f, 0x2a, 0x7d, 0x3a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, + 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0xde, 0x01, 0x0a, 0x17, 0x44, 0x65, 0x73, + 0x74, 0x72, 0x6f, 0x79, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x74, 0x72, + 0x6f, 0x79, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x22, 0x67, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x5a, 0x3a, + 0x01, 0x2a, 0x22, 0x55, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, + 0x7d, 0x3a, 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x12, 0xde, 0x01, 0x0a, 0x17, 0x52, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x22, 0x67, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x5a, + 0x3a, 0x01, 0x2a, 0x22, 0x55, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, + 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x2a, 0x7d, 0x3a, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x12, 0xb4, 0x01, 0x0a, 0x07, 0x45, + 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x5e, 0xda, 0x41, 0x0e, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x70, 0x6c, 0x61, 0x69, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x47, 0x3a, 0x01, 0x2a, 0x22, 0x42, 0x2f, + 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, + 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2a, 0x7d, 0x3a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x12, 0xb4, 0x01, 0x0a, 0x07, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12, 0x23, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5e, 0xda, 0x41, 0x0f, 0x6e, 0x61, 0x6d, + 0x65, 0x2c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x46, 0x3a, 0x01, 0x2a, 0x22, 0x41, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, + 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, + 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x7d, + 0x3a, 0x64, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12, 0xc2, 0x01, 0x0a, 0x0a, 0x52, 0x61, 0x77, + 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x61, + 0x77, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, + 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x61, 0x77, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x63, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x5d, + 0x3a, 0x01, 0x2a, 0x22, 0x58, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, + 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x2a, 0x7d, 0x3a, 0x72, 0x61, 0x77, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12, 0xc2, 0x01, + 0x0a, 0x0a, 0x52, 0x61, 0x77, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12, 0x26, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, + 0x76, 0x31, 0x2e, 0x52, 0x61, 0x77, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x61, 0x77, 0x44, 0x65, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x63, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x5d, 0x3a, 0x01, 0x2a, 0x22, 0x58, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, + 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, + 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, + 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x72, 0x61, 0x77, 0x44, 0x65, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x12, 0xe0, 0x01, 0x0a, 0x0e, 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x53, 0x69, 0x67, 0x6e, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x73, 0x79, 0x6d, + 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x75, + 0xda, 0x41, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x61, 0x3a, 0x01, 0x2a, 0x22, 0x5c, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, + 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, + 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, + 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x61, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x53, 0x69, 0x67, 0x6e, 0x12, 0xf0, 0x01, 0x0a, 0x11, 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12, 0x2d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7c, 0xda, 0x41, 0x0f, 0x6e, + 0x61, 0x6d, 0x65, 0x2c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x64, 0x3a, 0x01, 0x2a, 0x22, 0x5f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, + 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, + 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, + 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x61, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12, 0xc2, 0x01, 0x0a, 0x07, 0x4d, 0x61, 0x63, + 0x53, 0x69, 0x67, 0x6e, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x63, 0x53, 0x69, + 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x4d, 0x61, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x6c, 0xda, 0x41, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x64, 0x61, 0x74, 0x61, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x5a, 0x3a, 0x01, 0x2a, 0x22, 0x55, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, + 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, + 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x61, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x12, 0xce, 0x01, + 0x0a, 0x09, 0x4d, 0x61, 0x63, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, 0x25, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x4d, 0x61, 0x63, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x63, 0x56, 0x65, 0x72, 0x69, + 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x72, 0xda, 0x41, 0x0d, 0x6e, + 0x61, 0x6d, 0x65, 0x2c, 0x64, 0x61, 0x74, 0x61, 0x2c, 0x6d, 0x61, 0x63, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x5c, 0x3a, 0x01, 0x2a, 0x22, 0x57, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, + 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, + 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x61, 0x63, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, 0xe7, + 0x01, 0x0a, 0x13, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x61, 0x6e, 0x64, 0x6f, + 0x6d, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x42, 0x79, 0x74, 0x65, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6d, 0xda, 0x41, 0x26, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x5f, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x2c, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, + 0x65, 0x76, 0x65, 0x6c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3e, 0x3a, 0x01, 0x2a, 0x22, 0x39, 0x2f, + 0x76, 0x31, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x61, 0x6e, + 0x64, 0x6f, 0x6d, 0x42, 0x79, 0x74, 0x65, 0x73, 0x1a, 0x74, 0xca, 0x41, 0x17, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x57, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, + 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, + 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, + 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x42, 0x7f, + 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x08, 0x4b, 0x6d, 0x73, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6b, 0x6d, 0x73, 0x2f, 0x61, + 0x70, 0x69, 0x76, 0x31, 0x2f, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0x3b, 0x6b, 0x6d, 0x73, 0x70, 0x62, + 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4b, 0x6d, 0x73, 0x5c, 0x56, 0x31, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_cloud_kms_v1_service_proto_rawDescOnce sync.Once + file_google_cloud_kms_v1_service_proto_rawDescData = file_google_cloud_kms_v1_service_proto_rawDesc +) + +func file_google_cloud_kms_v1_service_proto_rawDescGZIP() []byte { + file_google_cloud_kms_v1_service_proto_rawDescOnce.Do(func() { + file_google_cloud_kms_v1_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_cloud_kms_v1_service_proto_rawDescData) + }) + return file_google_cloud_kms_v1_service_proto_rawDescData +} + +var file_google_cloud_kms_v1_service_proto_msgTypes = make([]protoimpl.MessageInfo, 43) +var file_google_cloud_kms_v1_service_proto_goTypes = []any{ + (*ListKeyRingsRequest)(nil), // 0: google.cloud.kms.v1.ListKeyRingsRequest + (*ListCryptoKeysRequest)(nil), // 1: google.cloud.kms.v1.ListCryptoKeysRequest + (*ListCryptoKeyVersionsRequest)(nil), // 2: google.cloud.kms.v1.ListCryptoKeyVersionsRequest + (*ListImportJobsRequest)(nil), // 3: google.cloud.kms.v1.ListImportJobsRequest + (*ListKeyRingsResponse)(nil), // 4: google.cloud.kms.v1.ListKeyRingsResponse + (*ListCryptoKeysResponse)(nil), // 5: google.cloud.kms.v1.ListCryptoKeysResponse + (*ListCryptoKeyVersionsResponse)(nil), // 6: google.cloud.kms.v1.ListCryptoKeyVersionsResponse + (*ListImportJobsResponse)(nil), // 7: google.cloud.kms.v1.ListImportJobsResponse + (*GetKeyRingRequest)(nil), // 8: google.cloud.kms.v1.GetKeyRingRequest + (*GetCryptoKeyRequest)(nil), // 9: google.cloud.kms.v1.GetCryptoKeyRequest + (*GetCryptoKeyVersionRequest)(nil), // 10: google.cloud.kms.v1.GetCryptoKeyVersionRequest + (*GetPublicKeyRequest)(nil), // 11: google.cloud.kms.v1.GetPublicKeyRequest + (*GetImportJobRequest)(nil), // 12: google.cloud.kms.v1.GetImportJobRequest + (*CreateKeyRingRequest)(nil), // 13: google.cloud.kms.v1.CreateKeyRingRequest + (*CreateCryptoKeyRequest)(nil), // 14: google.cloud.kms.v1.CreateCryptoKeyRequest + (*CreateCryptoKeyVersionRequest)(nil), // 15: google.cloud.kms.v1.CreateCryptoKeyVersionRequest + (*ImportCryptoKeyVersionRequest)(nil), // 16: google.cloud.kms.v1.ImportCryptoKeyVersionRequest + (*CreateImportJobRequest)(nil), // 17: google.cloud.kms.v1.CreateImportJobRequest + (*UpdateCryptoKeyRequest)(nil), // 18: google.cloud.kms.v1.UpdateCryptoKeyRequest + (*UpdateCryptoKeyVersionRequest)(nil), // 19: google.cloud.kms.v1.UpdateCryptoKeyVersionRequest + (*UpdateCryptoKeyPrimaryVersionRequest)(nil), // 20: google.cloud.kms.v1.UpdateCryptoKeyPrimaryVersionRequest + (*DestroyCryptoKeyVersionRequest)(nil), // 21: google.cloud.kms.v1.DestroyCryptoKeyVersionRequest + (*RestoreCryptoKeyVersionRequest)(nil), // 22: google.cloud.kms.v1.RestoreCryptoKeyVersionRequest + (*EncryptRequest)(nil), // 23: google.cloud.kms.v1.EncryptRequest + (*DecryptRequest)(nil), // 24: google.cloud.kms.v1.DecryptRequest + (*RawEncryptRequest)(nil), // 25: google.cloud.kms.v1.RawEncryptRequest + (*RawDecryptRequest)(nil), // 26: google.cloud.kms.v1.RawDecryptRequest + (*AsymmetricSignRequest)(nil), // 27: google.cloud.kms.v1.AsymmetricSignRequest + (*AsymmetricDecryptRequest)(nil), // 28: google.cloud.kms.v1.AsymmetricDecryptRequest + (*MacSignRequest)(nil), // 29: google.cloud.kms.v1.MacSignRequest + (*MacVerifyRequest)(nil), // 30: google.cloud.kms.v1.MacVerifyRequest + (*GenerateRandomBytesRequest)(nil), // 31: google.cloud.kms.v1.GenerateRandomBytesRequest + (*EncryptResponse)(nil), // 32: google.cloud.kms.v1.EncryptResponse + (*DecryptResponse)(nil), // 33: google.cloud.kms.v1.DecryptResponse + (*RawEncryptResponse)(nil), // 34: google.cloud.kms.v1.RawEncryptResponse + (*RawDecryptResponse)(nil), // 35: google.cloud.kms.v1.RawDecryptResponse + (*AsymmetricSignResponse)(nil), // 36: google.cloud.kms.v1.AsymmetricSignResponse + (*AsymmetricDecryptResponse)(nil), // 37: google.cloud.kms.v1.AsymmetricDecryptResponse + (*MacSignResponse)(nil), // 38: google.cloud.kms.v1.MacSignResponse + (*MacVerifyResponse)(nil), // 39: google.cloud.kms.v1.MacVerifyResponse + (*GenerateRandomBytesResponse)(nil), // 40: google.cloud.kms.v1.GenerateRandomBytesResponse + (*Digest)(nil), // 41: google.cloud.kms.v1.Digest + (*LocationMetadata)(nil), // 42: google.cloud.kms.v1.LocationMetadata + (CryptoKeyVersion_CryptoKeyVersionView)(0), // 43: google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionView + (*KeyRing)(nil), // 44: google.cloud.kms.v1.KeyRing + (*CryptoKey)(nil), // 45: google.cloud.kms.v1.CryptoKey + (*CryptoKeyVersion)(nil), // 46: google.cloud.kms.v1.CryptoKeyVersion + (*ImportJob)(nil), // 47: google.cloud.kms.v1.ImportJob + (CryptoKeyVersion_CryptoKeyVersionAlgorithm)(0), // 48: google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm + (*fieldmaskpb.FieldMask)(nil), // 49: google.protobuf.FieldMask + (*wrapperspb.Int64Value)(nil), // 50: google.protobuf.Int64Value + (ProtectionLevel)(0), // 51: google.cloud.kms.v1.ProtectionLevel + (*PublicKey)(nil), // 52: google.cloud.kms.v1.PublicKey +} +var file_google_cloud_kms_v1_service_proto_depIdxs = []int32{ + 43, // 0: google.cloud.kms.v1.ListCryptoKeysRequest.version_view:type_name -> google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionView + 43, // 1: google.cloud.kms.v1.ListCryptoKeyVersionsRequest.view:type_name -> google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionView + 44, // 2: google.cloud.kms.v1.ListKeyRingsResponse.key_rings:type_name -> google.cloud.kms.v1.KeyRing + 45, // 3: google.cloud.kms.v1.ListCryptoKeysResponse.crypto_keys:type_name -> google.cloud.kms.v1.CryptoKey + 46, // 4: google.cloud.kms.v1.ListCryptoKeyVersionsResponse.crypto_key_versions:type_name -> google.cloud.kms.v1.CryptoKeyVersion + 47, // 5: google.cloud.kms.v1.ListImportJobsResponse.import_jobs:type_name -> google.cloud.kms.v1.ImportJob + 44, // 6: google.cloud.kms.v1.CreateKeyRingRequest.key_ring:type_name -> google.cloud.kms.v1.KeyRing + 45, // 7: google.cloud.kms.v1.CreateCryptoKeyRequest.crypto_key:type_name -> google.cloud.kms.v1.CryptoKey + 46, // 8: google.cloud.kms.v1.CreateCryptoKeyVersionRequest.crypto_key_version:type_name -> google.cloud.kms.v1.CryptoKeyVersion + 48, // 9: google.cloud.kms.v1.ImportCryptoKeyVersionRequest.algorithm:type_name -> google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm + 47, // 10: google.cloud.kms.v1.CreateImportJobRequest.import_job:type_name -> google.cloud.kms.v1.ImportJob + 45, // 11: google.cloud.kms.v1.UpdateCryptoKeyRequest.crypto_key:type_name -> google.cloud.kms.v1.CryptoKey + 49, // 12: google.cloud.kms.v1.UpdateCryptoKeyRequest.update_mask:type_name -> google.protobuf.FieldMask + 46, // 13: google.cloud.kms.v1.UpdateCryptoKeyVersionRequest.crypto_key_version:type_name -> google.cloud.kms.v1.CryptoKeyVersion + 49, // 14: google.cloud.kms.v1.UpdateCryptoKeyVersionRequest.update_mask:type_name -> google.protobuf.FieldMask + 50, // 15: google.cloud.kms.v1.EncryptRequest.plaintext_crc32c:type_name -> google.protobuf.Int64Value + 50, // 16: google.cloud.kms.v1.EncryptRequest.additional_authenticated_data_crc32c:type_name -> google.protobuf.Int64Value + 50, // 17: google.cloud.kms.v1.DecryptRequest.ciphertext_crc32c:type_name -> google.protobuf.Int64Value + 50, // 18: google.cloud.kms.v1.DecryptRequest.additional_authenticated_data_crc32c:type_name -> google.protobuf.Int64Value + 50, // 19: google.cloud.kms.v1.RawEncryptRequest.plaintext_crc32c:type_name -> google.protobuf.Int64Value + 50, // 20: google.cloud.kms.v1.RawEncryptRequest.additional_authenticated_data_crc32c:type_name -> google.protobuf.Int64Value + 50, // 21: google.cloud.kms.v1.RawEncryptRequest.initialization_vector_crc32c:type_name -> google.protobuf.Int64Value + 50, // 22: google.cloud.kms.v1.RawDecryptRequest.ciphertext_crc32c:type_name -> google.protobuf.Int64Value + 50, // 23: google.cloud.kms.v1.RawDecryptRequest.additional_authenticated_data_crc32c:type_name -> google.protobuf.Int64Value + 50, // 24: google.cloud.kms.v1.RawDecryptRequest.initialization_vector_crc32c:type_name -> google.protobuf.Int64Value + 41, // 25: google.cloud.kms.v1.AsymmetricSignRequest.digest:type_name -> google.cloud.kms.v1.Digest + 50, // 26: google.cloud.kms.v1.AsymmetricSignRequest.digest_crc32c:type_name -> google.protobuf.Int64Value + 50, // 27: google.cloud.kms.v1.AsymmetricSignRequest.data_crc32c:type_name -> google.protobuf.Int64Value + 50, // 28: google.cloud.kms.v1.AsymmetricDecryptRequest.ciphertext_crc32c:type_name -> google.protobuf.Int64Value + 50, // 29: google.cloud.kms.v1.MacSignRequest.data_crc32c:type_name -> google.protobuf.Int64Value + 50, // 30: google.cloud.kms.v1.MacVerifyRequest.data_crc32c:type_name -> google.protobuf.Int64Value + 50, // 31: google.cloud.kms.v1.MacVerifyRequest.mac_crc32c:type_name -> google.protobuf.Int64Value + 51, // 32: google.cloud.kms.v1.GenerateRandomBytesRequest.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel + 50, // 33: google.cloud.kms.v1.EncryptResponse.ciphertext_crc32c:type_name -> google.protobuf.Int64Value + 51, // 34: google.cloud.kms.v1.EncryptResponse.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel + 50, // 35: google.cloud.kms.v1.DecryptResponse.plaintext_crc32c:type_name -> google.protobuf.Int64Value + 51, // 36: google.cloud.kms.v1.DecryptResponse.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel + 50, // 37: google.cloud.kms.v1.RawEncryptResponse.ciphertext_crc32c:type_name -> google.protobuf.Int64Value + 50, // 38: google.cloud.kms.v1.RawEncryptResponse.initialization_vector_crc32c:type_name -> google.protobuf.Int64Value + 51, // 39: google.cloud.kms.v1.RawEncryptResponse.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel + 50, // 40: google.cloud.kms.v1.RawDecryptResponse.plaintext_crc32c:type_name -> google.protobuf.Int64Value + 51, // 41: google.cloud.kms.v1.RawDecryptResponse.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel + 50, // 42: google.cloud.kms.v1.AsymmetricSignResponse.signature_crc32c:type_name -> google.protobuf.Int64Value + 51, // 43: google.cloud.kms.v1.AsymmetricSignResponse.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel + 50, // 44: google.cloud.kms.v1.AsymmetricDecryptResponse.plaintext_crc32c:type_name -> google.protobuf.Int64Value + 51, // 45: google.cloud.kms.v1.AsymmetricDecryptResponse.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel + 50, // 46: google.cloud.kms.v1.MacSignResponse.mac_crc32c:type_name -> google.protobuf.Int64Value + 51, // 47: google.cloud.kms.v1.MacSignResponse.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel + 51, // 48: google.cloud.kms.v1.MacVerifyResponse.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel + 50, // 49: google.cloud.kms.v1.GenerateRandomBytesResponse.data_crc32c:type_name -> google.protobuf.Int64Value + 0, // 50: google.cloud.kms.v1.KeyManagementService.ListKeyRings:input_type -> google.cloud.kms.v1.ListKeyRingsRequest + 1, // 51: google.cloud.kms.v1.KeyManagementService.ListCryptoKeys:input_type -> google.cloud.kms.v1.ListCryptoKeysRequest + 2, // 52: google.cloud.kms.v1.KeyManagementService.ListCryptoKeyVersions:input_type -> google.cloud.kms.v1.ListCryptoKeyVersionsRequest + 3, // 53: google.cloud.kms.v1.KeyManagementService.ListImportJobs:input_type -> google.cloud.kms.v1.ListImportJobsRequest + 8, // 54: google.cloud.kms.v1.KeyManagementService.GetKeyRing:input_type -> google.cloud.kms.v1.GetKeyRingRequest + 9, // 55: google.cloud.kms.v1.KeyManagementService.GetCryptoKey:input_type -> google.cloud.kms.v1.GetCryptoKeyRequest + 10, // 56: google.cloud.kms.v1.KeyManagementService.GetCryptoKeyVersion:input_type -> google.cloud.kms.v1.GetCryptoKeyVersionRequest + 11, // 57: google.cloud.kms.v1.KeyManagementService.GetPublicKey:input_type -> google.cloud.kms.v1.GetPublicKeyRequest + 12, // 58: google.cloud.kms.v1.KeyManagementService.GetImportJob:input_type -> google.cloud.kms.v1.GetImportJobRequest + 13, // 59: google.cloud.kms.v1.KeyManagementService.CreateKeyRing:input_type -> google.cloud.kms.v1.CreateKeyRingRequest + 14, // 60: google.cloud.kms.v1.KeyManagementService.CreateCryptoKey:input_type -> google.cloud.kms.v1.CreateCryptoKeyRequest + 15, // 61: google.cloud.kms.v1.KeyManagementService.CreateCryptoKeyVersion:input_type -> google.cloud.kms.v1.CreateCryptoKeyVersionRequest + 16, // 62: google.cloud.kms.v1.KeyManagementService.ImportCryptoKeyVersion:input_type -> google.cloud.kms.v1.ImportCryptoKeyVersionRequest + 17, // 63: google.cloud.kms.v1.KeyManagementService.CreateImportJob:input_type -> google.cloud.kms.v1.CreateImportJobRequest + 18, // 64: google.cloud.kms.v1.KeyManagementService.UpdateCryptoKey:input_type -> google.cloud.kms.v1.UpdateCryptoKeyRequest + 19, // 65: google.cloud.kms.v1.KeyManagementService.UpdateCryptoKeyVersion:input_type -> google.cloud.kms.v1.UpdateCryptoKeyVersionRequest + 20, // 66: google.cloud.kms.v1.KeyManagementService.UpdateCryptoKeyPrimaryVersion:input_type -> google.cloud.kms.v1.UpdateCryptoKeyPrimaryVersionRequest + 21, // 67: google.cloud.kms.v1.KeyManagementService.DestroyCryptoKeyVersion:input_type -> google.cloud.kms.v1.DestroyCryptoKeyVersionRequest + 22, // 68: google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion:input_type -> google.cloud.kms.v1.RestoreCryptoKeyVersionRequest + 23, // 69: google.cloud.kms.v1.KeyManagementService.Encrypt:input_type -> google.cloud.kms.v1.EncryptRequest + 24, // 70: google.cloud.kms.v1.KeyManagementService.Decrypt:input_type -> google.cloud.kms.v1.DecryptRequest + 25, // 71: google.cloud.kms.v1.KeyManagementService.RawEncrypt:input_type -> google.cloud.kms.v1.RawEncryptRequest + 26, // 72: google.cloud.kms.v1.KeyManagementService.RawDecrypt:input_type -> google.cloud.kms.v1.RawDecryptRequest + 27, // 73: google.cloud.kms.v1.KeyManagementService.AsymmetricSign:input_type -> google.cloud.kms.v1.AsymmetricSignRequest + 28, // 74: google.cloud.kms.v1.KeyManagementService.AsymmetricDecrypt:input_type -> google.cloud.kms.v1.AsymmetricDecryptRequest + 29, // 75: google.cloud.kms.v1.KeyManagementService.MacSign:input_type -> google.cloud.kms.v1.MacSignRequest + 30, // 76: google.cloud.kms.v1.KeyManagementService.MacVerify:input_type -> google.cloud.kms.v1.MacVerifyRequest + 31, // 77: google.cloud.kms.v1.KeyManagementService.GenerateRandomBytes:input_type -> google.cloud.kms.v1.GenerateRandomBytesRequest + 4, // 78: google.cloud.kms.v1.KeyManagementService.ListKeyRings:output_type -> google.cloud.kms.v1.ListKeyRingsResponse + 5, // 79: google.cloud.kms.v1.KeyManagementService.ListCryptoKeys:output_type -> google.cloud.kms.v1.ListCryptoKeysResponse + 6, // 80: google.cloud.kms.v1.KeyManagementService.ListCryptoKeyVersions:output_type -> google.cloud.kms.v1.ListCryptoKeyVersionsResponse + 7, // 81: google.cloud.kms.v1.KeyManagementService.ListImportJobs:output_type -> google.cloud.kms.v1.ListImportJobsResponse + 44, // 82: google.cloud.kms.v1.KeyManagementService.GetKeyRing:output_type -> google.cloud.kms.v1.KeyRing + 45, // 83: google.cloud.kms.v1.KeyManagementService.GetCryptoKey:output_type -> google.cloud.kms.v1.CryptoKey + 46, // 84: google.cloud.kms.v1.KeyManagementService.GetCryptoKeyVersion:output_type -> google.cloud.kms.v1.CryptoKeyVersion + 52, // 85: google.cloud.kms.v1.KeyManagementService.GetPublicKey:output_type -> google.cloud.kms.v1.PublicKey + 47, // 86: google.cloud.kms.v1.KeyManagementService.GetImportJob:output_type -> google.cloud.kms.v1.ImportJob + 44, // 87: google.cloud.kms.v1.KeyManagementService.CreateKeyRing:output_type -> google.cloud.kms.v1.KeyRing + 45, // 88: google.cloud.kms.v1.KeyManagementService.CreateCryptoKey:output_type -> google.cloud.kms.v1.CryptoKey + 46, // 89: google.cloud.kms.v1.KeyManagementService.CreateCryptoKeyVersion:output_type -> google.cloud.kms.v1.CryptoKeyVersion + 46, // 90: google.cloud.kms.v1.KeyManagementService.ImportCryptoKeyVersion:output_type -> google.cloud.kms.v1.CryptoKeyVersion + 47, // 91: google.cloud.kms.v1.KeyManagementService.CreateImportJob:output_type -> google.cloud.kms.v1.ImportJob + 45, // 92: google.cloud.kms.v1.KeyManagementService.UpdateCryptoKey:output_type -> google.cloud.kms.v1.CryptoKey + 46, // 93: google.cloud.kms.v1.KeyManagementService.UpdateCryptoKeyVersion:output_type -> google.cloud.kms.v1.CryptoKeyVersion + 45, // 94: google.cloud.kms.v1.KeyManagementService.UpdateCryptoKeyPrimaryVersion:output_type -> google.cloud.kms.v1.CryptoKey + 46, // 95: google.cloud.kms.v1.KeyManagementService.DestroyCryptoKeyVersion:output_type -> google.cloud.kms.v1.CryptoKeyVersion + 46, // 96: google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion:output_type -> google.cloud.kms.v1.CryptoKeyVersion + 32, // 97: google.cloud.kms.v1.KeyManagementService.Encrypt:output_type -> google.cloud.kms.v1.EncryptResponse + 33, // 98: google.cloud.kms.v1.KeyManagementService.Decrypt:output_type -> google.cloud.kms.v1.DecryptResponse + 34, // 99: google.cloud.kms.v1.KeyManagementService.RawEncrypt:output_type -> google.cloud.kms.v1.RawEncryptResponse + 35, // 100: google.cloud.kms.v1.KeyManagementService.RawDecrypt:output_type -> google.cloud.kms.v1.RawDecryptResponse + 36, // 101: google.cloud.kms.v1.KeyManagementService.AsymmetricSign:output_type -> google.cloud.kms.v1.AsymmetricSignResponse + 37, // 102: google.cloud.kms.v1.KeyManagementService.AsymmetricDecrypt:output_type -> google.cloud.kms.v1.AsymmetricDecryptResponse + 38, // 103: google.cloud.kms.v1.KeyManagementService.MacSign:output_type -> google.cloud.kms.v1.MacSignResponse + 39, // 104: google.cloud.kms.v1.KeyManagementService.MacVerify:output_type -> google.cloud.kms.v1.MacVerifyResponse + 40, // 105: google.cloud.kms.v1.KeyManagementService.GenerateRandomBytes:output_type -> google.cloud.kms.v1.GenerateRandomBytesResponse + 78, // [78:106] is the sub-list for method output_type + 50, // [50:78] is the sub-list for method input_type + 50, // [50:50] is the sub-list for extension type_name + 50, // [50:50] is the sub-list for extension extendee + 0, // [0:50] is the sub-list for field type_name +} + +func init() { file_google_cloud_kms_v1_service_proto_init() } +func file_google_cloud_kms_v1_service_proto_init() { + if File_google_cloud_kms_v1_service_proto != nil { + return + } + file_google_cloud_kms_v1_resources_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_cloud_kms_v1_service_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*ListKeyRingsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ListCryptoKeysRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*ListCryptoKeyVersionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*ListImportJobsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*ListKeyRingsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*ListCryptoKeysResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*ListCryptoKeyVersionsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*ListImportJobsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*GetKeyRingRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*GetCryptoKeyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*GetCryptoKeyVersionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*GetPublicKeyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*GetImportJobRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[13].Exporter = func(v any, i int) any { + switch v := v.(*CreateKeyRingRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[14].Exporter = func(v any, i int) any { + switch v := v.(*CreateCryptoKeyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[15].Exporter = func(v any, i int) any { + switch v := v.(*CreateCryptoKeyVersionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[16].Exporter = func(v any, i int) any { + switch v := v.(*ImportCryptoKeyVersionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[17].Exporter = func(v any, i int) any { + switch v := v.(*CreateImportJobRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[18].Exporter = func(v any, i int) any { + switch v := v.(*UpdateCryptoKeyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[19].Exporter = func(v any, i int) any { + switch v := v.(*UpdateCryptoKeyVersionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[20].Exporter = func(v any, i int) any { + switch v := v.(*UpdateCryptoKeyPrimaryVersionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[21].Exporter = func(v any, i int) any { + switch v := v.(*DestroyCryptoKeyVersionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[22].Exporter = func(v any, i int) any { + switch v := v.(*RestoreCryptoKeyVersionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[23].Exporter = func(v any, i int) any { + switch v := v.(*EncryptRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[24].Exporter = func(v any, i int) any { + switch v := v.(*DecryptRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[25].Exporter = func(v any, i int) any { + switch v := v.(*RawEncryptRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[26].Exporter = func(v any, i int) any { + switch v := v.(*RawDecryptRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[27].Exporter = func(v any, i int) any { + switch v := v.(*AsymmetricSignRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[28].Exporter = func(v any, i int) any { + switch v := v.(*AsymmetricDecryptRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[29].Exporter = func(v any, i int) any { + switch v := v.(*MacSignRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[30].Exporter = func(v any, i int) any { + switch v := v.(*MacVerifyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[31].Exporter = func(v any, i int) any { + switch v := v.(*GenerateRandomBytesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[32].Exporter = func(v any, i int) any { + switch v := v.(*EncryptResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[33].Exporter = func(v any, i int) any { + switch v := v.(*DecryptResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[34].Exporter = func(v any, i int) any { + switch v := v.(*RawEncryptResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[35].Exporter = func(v any, i int) any { + switch v := v.(*RawDecryptResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[36].Exporter = func(v any, i int) any { + switch v := v.(*AsymmetricSignResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[37].Exporter = func(v any, i int) any { + switch v := v.(*AsymmetricDecryptResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[38].Exporter = func(v any, i int) any { + switch v := v.(*MacSignResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[39].Exporter = func(v any, i int) any { + switch v := v.(*MacVerifyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[40].Exporter = func(v any, i int) any { + switch v := v.(*GenerateRandomBytesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[41].Exporter = func(v any, i int) any { + switch v := v.(*Digest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[42].Exporter = func(v any, i int) any { + switch v := v.(*LocationMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[16].OneofWrappers = []any{ + (*ImportCryptoKeyVersionRequest_RsaAesWrappedKey)(nil), + } + file_google_cloud_kms_v1_service_proto_msgTypes[41].OneofWrappers = []any{ + (*Digest_Sha256)(nil), + (*Digest_Sha384)(nil), + (*Digest_Sha512)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_cloud_kms_v1_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 43, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_cloud_kms_v1_service_proto_goTypes, + DependencyIndexes: file_google_cloud_kms_v1_service_proto_depIdxs, + MessageInfos: file_google_cloud_kms_v1_service_proto_msgTypes, + }.Build() + File_google_cloud_kms_v1_service_proto = out.File + file_google_cloud_kms_v1_service_proto_rawDesc = nil + file_google_cloud_kms_v1_service_proto_goTypes = nil + file_google_cloud_kms_v1_service_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// KeyManagementServiceClient is the client API for KeyManagementService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type KeyManagementServiceClient interface { + // Lists [KeyRings][google.cloud.kms.v1.KeyRing]. + ListKeyRings(ctx context.Context, in *ListKeyRingsRequest, opts ...grpc.CallOption) (*ListKeyRingsResponse, error) + // Lists [CryptoKeys][google.cloud.kms.v1.CryptoKey]. + ListCryptoKeys(ctx context.Context, in *ListCryptoKeysRequest, opts ...grpc.CallOption) (*ListCryptoKeysResponse, error) + // Lists [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion]. + ListCryptoKeyVersions(ctx context.Context, in *ListCryptoKeyVersionsRequest, opts ...grpc.CallOption) (*ListCryptoKeyVersionsResponse, error) + // Lists [ImportJobs][google.cloud.kms.v1.ImportJob]. + ListImportJobs(ctx context.Context, in *ListImportJobsRequest, opts ...grpc.CallOption) (*ListImportJobsResponse, error) + // Returns metadata for a given [KeyRing][google.cloud.kms.v1.KeyRing]. + GetKeyRing(ctx context.Context, in *GetKeyRingRequest, opts ...grpc.CallOption) (*KeyRing, error) + // Returns metadata for a given [CryptoKey][google.cloud.kms.v1.CryptoKey], as + // well as its [primary][google.cloud.kms.v1.CryptoKey.primary] + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. + GetCryptoKey(ctx context.Context, in *GetCryptoKeyRequest, opts ...grpc.CallOption) (*CryptoKey, error) + // Returns metadata for a given + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. + GetCryptoKeyVersion(ctx context.Context, in *GetCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) + // Returns the public key for the given + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. The + // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] must be + // [ASYMMETRIC_SIGN][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_SIGN] + // or + // [ASYMMETRIC_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_DECRYPT]. + GetPublicKey(ctx context.Context, in *GetPublicKeyRequest, opts ...grpc.CallOption) (*PublicKey, error) + // Returns metadata for a given [ImportJob][google.cloud.kms.v1.ImportJob]. + GetImportJob(ctx context.Context, in *GetImportJobRequest, opts ...grpc.CallOption) (*ImportJob, error) + // Create a new [KeyRing][google.cloud.kms.v1.KeyRing] in a given Project and + // Location. + CreateKeyRing(ctx context.Context, in *CreateKeyRingRequest, opts ...grpc.CallOption) (*KeyRing, error) + // Create a new [CryptoKey][google.cloud.kms.v1.CryptoKey] within a + // [KeyRing][google.cloud.kms.v1.KeyRing]. + // + // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] and + // [CryptoKey.version_template.algorithm][google.cloud.kms.v1.CryptoKeyVersionTemplate.algorithm] + // are required. + CreateCryptoKey(ctx context.Context, in *CreateCryptoKeyRequest, opts ...grpc.CallOption) (*CryptoKey, error) + // Create a new [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] in a + // [CryptoKey][google.cloud.kms.v1.CryptoKey]. + // + // The server will assign the next sequential id. If unset, + // [state][google.cloud.kms.v1.CryptoKeyVersion.state] will be set to + // [ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED]. + CreateCryptoKeyVersion(ctx context.Context, in *CreateCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) + // Import wrapped key material into a + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. + // + // All requests must specify a [CryptoKey][google.cloud.kms.v1.CryptoKey]. If + // a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] is additionally + // specified in the request, key material will be reimported into that + // version. Otherwise, a new version will be created, and will be assigned the + // next sequential id within the [CryptoKey][google.cloud.kms.v1.CryptoKey]. + ImportCryptoKeyVersion(ctx context.Context, in *ImportCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) + // Create a new [ImportJob][google.cloud.kms.v1.ImportJob] within a + // [KeyRing][google.cloud.kms.v1.KeyRing]. + // + // [ImportJob.import_method][google.cloud.kms.v1.ImportJob.import_method] is + // required. + CreateImportJob(ctx context.Context, in *CreateImportJobRequest, opts ...grpc.CallOption) (*ImportJob, error) + // Update a [CryptoKey][google.cloud.kms.v1.CryptoKey]. + UpdateCryptoKey(ctx context.Context, in *UpdateCryptoKeyRequest, opts ...grpc.CallOption) (*CryptoKey, error) + // Update a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]'s + // metadata. + // + // [state][google.cloud.kms.v1.CryptoKeyVersion.state] may be changed between + // [ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED] + // and + // [DISABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DISABLED] + // using this method. See + // [DestroyCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.DestroyCryptoKeyVersion] + // and + // [RestoreCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion] + // to move between other states. + UpdateCryptoKeyVersion(ctx context.Context, in *UpdateCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) + // Update the version of a [CryptoKey][google.cloud.kms.v1.CryptoKey] that + // will be used in + // [Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt]. + // + // Returns an error if called on a key whose purpose is not + // [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT]. + UpdateCryptoKeyPrimaryVersion(ctx context.Context, in *UpdateCryptoKeyPrimaryVersionRequest, opts ...grpc.CallOption) (*CryptoKey, error) + // Schedule a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] for + // destruction. + // + // Upon calling this method, + // [CryptoKeyVersion.state][google.cloud.kms.v1.CryptoKeyVersion.state] will + // be set to + // [DESTROY_SCHEDULED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED], + // and [destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time] will + // be set to the time + // [destroy_scheduled_duration][google.cloud.kms.v1.CryptoKey.destroy_scheduled_duration] + // in the future. At that time, the + // [state][google.cloud.kms.v1.CryptoKeyVersion.state] will automatically + // change to + // [DESTROYED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROYED], + // and the key material will be irrevocably destroyed. + // + // Before the + // [destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time] is + // reached, + // [RestoreCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion] + // may be called to reverse the process. + DestroyCryptoKeyVersion(ctx context.Context, in *DestroyCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) + // Restore a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] in the + // [DESTROY_SCHEDULED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED] + // state. + // + // Upon restoration of the CryptoKeyVersion, + // [state][google.cloud.kms.v1.CryptoKeyVersion.state] will be set to + // [DISABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DISABLED], + // and [destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time] will + // be cleared. + RestoreCryptoKeyVersion(ctx context.Context, in *RestoreCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) + // Encrypts data, so that it can only be recovered by a call to + // [Decrypt][google.cloud.kms.v1.KeyManagementService.Decrypt]. The + // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] must be + // [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT]. + Encrypt(ctx context.Context, in *EncryptRequest, opts ...grpc.CallOption) (*EncryptResponse, error) + // Decrypts data that was protected by + // [Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt]. The + // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] must be + // [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT]. + Decrypt(ctx context.Context, in *DecryptRequest, opts ...grpc.CallOption) (*DecryptResponse, error) + // Encrypts data using portable cryptographic primitives. Most users should + // choose [Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt] and + // [Decrypt][google.cloud.kms.v1.KeyManagementService.Decrypt] rather than + // their raw counterparts. The + // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] must be + // [RAW_ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.RAW_ENCRYPT_DECRYPT]. + RawEncrypt(ctx context.Context, in *RawEncryptRequest, opts ...grpc.CallOption) (*RawEncryptResponse, error) + // Decrypts data that was originally encrypted using a raw cryptographic + // mechanism. The [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] + // must be + // [RAW_ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.RAW_ENCRYPT_DECRYPT]. + RawDecrypt(ctx context.Context, in *RawDecryptRequest, opts ...grpc.CallOption) (*RawDecryptResponse, error) + // Signs data using a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] + // with [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] + // ASYMMETRIC_SIGN, producing a signature that can be verified with the public + // key retrieved from + // [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey]. + AsymmetricSign(ctx context.Context, in *AsymmetricSignRequest, opts ...grpc.CallOption) (*AsymmetricSignResponse, error) + // Decrypts data that was encrypted with a public key retrieved from + // [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey] + // corresponding to a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] + // with [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] + // ASYMMETRIC_DECRYPT. + AsymmetricDecrypt(ctx context.Context, in *AsymmetricDecryptRequest, opts ...grpc.CallOption) (*AsymmetricDecryptResponse, error) + // Signs data using a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] + // with [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] MAC, + // producing a tag that can be verified by another source with the same key. + MacSign(ctx context.Context, in *MacSignRequest, opts ...grpc.CallOption) (*MacSignResponse, error) + // Verifies MAC tag using a + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] with + // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] MAC, and returns + // a response that indicates whether or not the verification was successful. + MacVerify(ctx context.Context, in *MacVerifyRequest, opts ...grpc.CallOption) (*MacVerifyResponse, error) + // Generate random bytes using the Cloud KMS randomness source in the provided + // location. + GenerateRandomBytes(ctx context.Context, in *GenerateRandomBytesRequest, opts ...grpc.CallOption) (*GenerateRandomBytesResponse, error) +} + +type keyManagementServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewKeyManagementServiceClient(cc grpc.ClientConnInterface) KeyManagementServiceClient { + return &keyManagementServiceClient{cc} +} + +func (c *keyManagementServiceClient) ListKeyRings(ctx context.Context, in *ListKeyRingsRequest, opts ...grpc.CallOption) (*ListKeyRingsResponse, error) { + out := new(ListKeyRingsResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/ListKeyRings", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) ListCryptoKeys(ctx context.Context, in *ListCryptoKeysRequest, opts ...grpc.CallOption) (*ListCryptoKeysResponse, error) { + out := new(ListCryptoKeysResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/ListCryptoKeys", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) ListCryptoKeyVersions(ctx context.Context, in *ListCryptoKeyVersionsRequest, opts ...grpc.CallOption) (*ListCryptoKeyVersionsResponse, error) { + out := new(ListCryptoKeyVersionsResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/ListCryptoKeyVersions", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) ListImportJobs(ctx context.Context, in *ListImportJobsRequest, opts ...grpc.CallOption) (*ListImportJobsResponse, error) { + out := new(ListImportJobsResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/ListImportJobs", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) GetKeyRing(ctx context.Context, in *GetKeyRingRequest, opts ...grpc.CallOption) (*KeyRing, error) { + out := new(KeyRing) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/GetKeyRing", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) GetCryptoKey(ctx context.Context, in *GetCryptoKeyRequest, opts ...grpc.CallOption) (*CryptoKey, error) { + out := new(CryptoKey) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/GetCryptoKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) GetCryptoKeyVersion(ctx context.Context, in *GetCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) { + out := new(CryptoKeyVersion) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/GetCryptoKeyVersion", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) GetPublicKey(ctx context.Context, in *GetPublicKeyRequest, opts ...grpc.CallOption) (*PublicKey, error) { + out := new(PublicKey) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/GetPublicKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) GetImportJob(ctx context.Context, in *GetImportJobRequest, opts ...grpc.CallOption) (*ImportJob, error) { + out := new(ImportJob) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/GetImportJob", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) CreateKeyRing(ctx context.Context, in *CreateKeyRingRequest, opts ...grpc.CallOption) (*KeyRing, error) { + out := new(KeyRing) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/CreateKeyRing", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) CreateCryptoKey(ctx context.Context, in *CreateCryptoKeyRequest, opts ...grpc.CallOption) (*CryptoKey, error) { + out := new(CryptoKey) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/CreateCryptoKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) CreateCryptoKeyVersion(ctx context.Context, in *CreateCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) { + out := new(CryptoKeyVersion) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/CreateCryptoKeyVersion", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) ImportCryptoKeyVersion(ctx context.Context, in *ImportCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) { + out := new(CryptoKeyVersion) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/ImportCryptoKeyVersion", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) CreateImportJob(ctx context.Context, in *CreateImportJobRequest, opts ...grpc.CallOption) (*ImportJob, error) { + out := new(ImportJob) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/CreateImportJob", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) UpdateCryptoKey(ctx context.Context, in *UpdateCryptoKeyRequest, opts ...grpc.CallOption) (*CryptoKey, error) { + out := new(CryptoKey) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/UpdateCryptoKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) UpdateCryptoKeyVersion(ctx context.Context, in *UpdateCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) { + out := new(CryptoKeyVersion) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/UpdateCryptoKeyVersion", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) UpdateCryptoKeyPrimaryVersion(ctx context.Context, in *UpdateCryptoKeyPrimaryVersionRequest, opts ...grpc.CallOption) (*CryptoKey, error) { + out := new(CryptoKey) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/UpdateCryptoKeyPrimaryVersion", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) DestroyCryptoKeyVersion(ctx context.Context, in *DestroyCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) { + out := new(CryptoKeyVersion) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/DestroyCryptoKeyVersion", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) RestoreCryptoKeyVersion(ctx context.Context, in *RestoreCryptoKeyVersionRequest, opts ...grpc.CallOption) (*CryptoKeyVersion, error) { + out := new(CryptoKeyVersion) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/RestoreCryptoKeyVersion", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) Encrypt(ctx context.Context, in *EncryptRequest, opts ...grpc.CallOption) (*EncryptResponse, error) { + out := new(EncryptResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/Encrypt", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) Decrypt(ctx context.Context, in *DecryptRequest, opts ...grpc.CallOption) (*DecryptResponse, error) { + out := new(DecryptResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/Decrypt", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) RawEncrypt(ctx context.Context, in *RawEncryptRequest, opts ...grpc.CallOption) (*RawEncryptResponse, error) { + out := new(RawEncryptResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/RawEncrypt", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) RawDecrypt(ctx context.Context, in *RawDecryptRequest, opts ...grpc.CallOption) (*RawDecryptResponse, error) { + out := new(RawDecryptResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/RawDecrypt", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) AsymmetricSign(ctx context.Context, in *AsymmetricSignRequest, opts ...grpc.CallOption) (*AsymmetricSignResponse, error) { + out := new(AsymmetricSignResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/AsymmetricSign", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) AsymmetricDecrypt(ctx context.Context, in *AsymmetricDecryptRequest, opts ...grpc.CallOption) (*AsymmetricDecryptResponse, error) { + out := new(AsymmetricDecryptResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/AsymmetricDecrypt", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) MacSign(ctx context.Context, in *MacSignRequest, opts ...grpc.CallOption) (*MacSignResponse, error) { + out := new(MacSignResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/MacSign", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) MacVerify(ctx context.Context, in *MacVerifyRequest, opts ...grpc.CallOption) (*MacVerifyResponse, error) { + out := new(MacVerifyResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/MacVerify", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *keyManagementServiceClient) GenerateRandomBytes(ctx context.Context, in *GenerateRandomBytesRequest, opts ...grpc.CallOption) (*GenerateRandomBytesResponse, error) { + out := new(GenerateRandomBytesResponse) + err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.KeyManagementService/GenerateRandomBytes", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// KeyManagementServiceServer is the server API for KeyManagementService service. +type KeyManagementServiceServer interface { + // Lists [KeyRings][google.cloud.kms.v1.KeyRing]. + ListKeyRings(context.Context, *ListKeyRingsRequest) (*ListKeyRingsResponse, error) + // Lists [CryptoKeys][google.cloud.kms.v1.CryptoKey]. + ListCryptoKeys(context.Context, *ListCryptoKeysRequest) (*ListCryptoKeysResponse, error) + // Lists [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion]. + ListCryptoKeyVersions(context.Context, *ListCryptoKeyVersionsRequest) (*ListCryptoKeyVersionsResponse, error) + // Lists [ImportJobs][google.cloud.kms.v1.ImportJob]. + ListImportJobs(context.Context, *ListImportJobsRequest) (*ListImportJobsResponse, error) + // Returns metadata for a given [KeyRing][google.cloud.kms.v1.KeyRing]. + GetKeyRing(context.Context, *GetKeyRingRequest) (*KeyRing, error) + // Returns metadata for a given [CryptoKey][google.cloud.kms.v1.CryptoKey], as + // well as its [primary][google.cloud.kms.v1.CryptoKey.primary] + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. + GetCryptoKey(context.Context, *GetCryptoKeyRequest) (*CryptoKey, error) + // Returns metadata for a given + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. + GetCryptoKeyVersion(context.Context, *GetCryptoKeyVersionRequest) (*CryptoKeyVersion, error) + // Returns the public key for the given + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. The + // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] must be + // [ASYMMETRIC_SIGN][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_SIGN] + // or + // [ASYMMETRIC_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_DECRYPT]. + GetPublicKey(context.Context, *GetPublicKeyRequest) (*PublicKey, error) + // Returns metadata for a given [ImportJob][google.cloud.kms.v1.ImportJob]. + GetImportJob(context.Context, *GetImportJobRequest) (*ImportJob, error) + // Create a new [KeyRing][google.cloud.kms.v1.KeyRing] in a given Project and + // Location. + CreateKeyRing(context.Context, *CreateKeyRingRequest) (*KeyRing, error) + // Create a new [CryptoKey][google.cloud.kms.v1.CryptoKey] within a + // [KeyRing][google.cloud.kms.v1.KeyRing]. + // + // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] and + // [CryptoKey.version_template.algorithm][google.cloud.kms.v1.CryptoKeyVersionTemplate.algorithm] + // are required. + CreateCryptoKey(context.Context, *CreateCryptoKeyRequest) (*CryptoKey, error) + // Create a new [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] in a + // [CryptoKey][google.cloud.kms.v1.CryptoKey]. + // + // The server will assign the next sequential id. If unset, + // [state][google.cloud.kms.v1.CryptoKeyVersion.state] will be set to + // [ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED]. + CreateCryptoKeyVersion(context.Context, *CreateCryptoKeyVersionRequest) (*CryptoKeyVersion, error) + // Import wrapped key material into a + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. + // + // All requests must specify a [CryptoKey][google.cloud.kms.v1.CryptoKey]. If + // a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] is additionally + // specified in the request, key material will be reimported into that + // version. Otherwise, a new version will be created, and will be assigned the + // next sequential id within the [CryptoKey][google.cloud.kms.v1.CryptoKey]. + ImportCryptoKeyVersion(context.Context, *ImportCryptoKeyVersionRequest) (*CryptoKeyVersion, error) + // Create a new [ImportJob][google.cloud.kms.v1.ImportJob] within a + // [KeyRing][google.cloud.kms.v1.KeyRing]. + // + // [ImportJob.import_method][google.cloud.kms.v1.ImportJob.import_method] is + // required. + CreateImportJob(context.Context, *CreateImportJobRequest) (*ImportJob, error) + // Update a [CryptoKey][google.cloud.kms.v1.CryptoKey]. + UpdateCryptoKey(context.Context, *UpdateCryptoKeyRequest) (*CryptoKey, error) + // Update a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]'s + // metadata. + // + // [state][google.cloud.kms.v1.CryptoKeyVersion.state] may be changed between + // [ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED] + // and + // [DISABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DISABLED] + // using this method. See + // [DestroyCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.DestroyCryptoKeyVersion] + // and + // [RestoreCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion] + // to move between other states. + UpdateCryptoKeyVersion(context.Context, *UpdateCryptoKeyVersionRequest) (*CryptoKeyVersion, error) + // Update the version of a [CryptoKey][google.cloud.kms.v1.CryptoKey] that + // will be used in + // [Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt]. + // + // Returns an error if called on a key whose purpose is not + // [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT]. + UpdateCryptoKeyPrimaryVersion(context.Context, *UpdateCryptoKeyPrimaryVersionRequest) (*CryptoKey, error) + // Schedule a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] for + // destruction. + // + // Upon calling this method, + // [CryptoKeyVersion.state][google.cloud.kms.v1.CryptoKeyVersion.state] will + // be set to + // [DESTROY_SCHEDULED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED], + // and [destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time] will + // be set to the time + // [destroy_scheduled_duration][google.cloud.kms.v1.CryptoKey.destroy_scheduled_duration] + // in the future. At that time, the + // [state][google.cloud.kms.v1.CryptoKeyVersion.state] will automatically + // change to + // [DESTROYED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROYED], + // and the key material will be irrevocably destroyed. + // + // Before the + // [destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time] is + // reached, + // [RestoreCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion] + // may be called to reverse the process. + DestroyCryptoKeyVersion(context.Context, *DestroyCryptoKeyVersionRequest) (*CryptoKeyVersion, error) + // Restore a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] in the + // [DESTROY_SCHEDULED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED] + // state. + // + // Upon restoration of the CryptoKeyVersion, + // [state][google.cloud.kms.v1.CryptoKeyVersion.state] will be set to + // [DISABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DISABLED], + // and [destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time] will + // be cleared. + RestoreCryptoKeyVersion(context.Context, *RestoreCryptoKeyVersionRequest) (*CryptoKeyVersion, error) + // Encrypts data, so that it can only be recovered by a call to + // [Decrypt][google.cloud.kms.v1.KeyManagementService.Decrypt]. The + // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] must be + // [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT]. + Encrypt(context.Context, *EncryptRequest) (*EncryptResponse, error) + // Decrypts data that was protected by + // [Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt]. The + // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] must be + // [ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT]. + Decrypt(context.Context, *DecryptRequest) (*DecryptResponse, error) + // Encrypts data using portable cryptographic primitives. Most users should + // choose [Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt] and + // [Decrypt][google.cloud.kms.v1.KeyManagementService.Decrypt] rather than + // their raw counterparts. The + // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] must be + // [RAW_ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.RAW_ENCRYPT_DECRYPT]. + RawEncrypt(context.Context, *RawEncryptRequest) (*RawEncryptResponse, error) + // Decrypts data that was originally encrypted using a raw cryptographic + // mechanism. The [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] + // must be + // [RAW_ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.RAW_ENCRYPT_DECRYPT]. + RawDecrypt(context.Context, *RawDecryptRequest) (*RawDecryptResponse, error) + // Signs data using a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] + // with [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] + // ASYMMETRIC_SIGN, producing a signature that can be verified with the public + // key retrieved from + // [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey]. + AsymmetricSign(context.Context, *AsymmetricSignRequest) (*AsymmetricSignResponse, error) + // Decrypts data that was encrypted with a public key retrieved from + // [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey] + // corresponding to a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] + // with [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] + // ASYMMETRIC_DECRYPT. + AsymmetricDecrypt(context.Context, *AsymmetricDecryptRequest) (*AsymmetricDecryptResponse, error) + // Signs data using a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] + // with [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] MAC, + // producing a tag that can be verified by another source with the same key. + MacSign(context.Context, *MacSignRequest) (*MacSignResponse, error) + // Verifies MAC tag using a + // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] with + // [CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] MAC, and returns + // a response that indicates whether or not the verification was successful. + MacVerify(context.Context, *MacVerifyRequest) (*MacVerifyResponse, error) + // Generate random bytes using the Cloud KMS randomness source in the provided + // location. + GenerateRandomBytes(context.Context, *GenerateRandomBytesRequest) (*GenerateRandomBytesResponse, error) +} + +// UnimplementedKeyManagementServiceServer can be embedded to have forward compatible implementations. +type UnimplementedKeyManagementServiceServer struct { +} + +func (*UnimplementedKeyManagementServiceServer) ListKeyRings(context.Context, *ListKeyRingsRequest) (*ListKeyRingsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListKeyRings not implemented") +} +func (*UnimplementedKeyManagementServiceServer) ListCryptoKeys(context.Context, *ListCryptoKeysRequest) (*ListCryptoKeysResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListCryptoKeys not implemented") +} +func (*UnimplementedKeyManagementServiceServer) ListCryptoKeyVersions(context.Context, *ListCryptoKeyVersionsRequest) (*ListCryptoKeyVersionsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListCryptoKeyVersions not implemented") +} +func (*UnimplementedKeyManagementServiceServer) ListImportJobs(context.Context, *ListImportJobsRequest) (*ListImportJobsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListImportJobs not implemented") +} +func (*UnimplementedKeyManagementServiceServer) GetKeyRing(context.Context, *GetKeyRingRequest) (*KeyRing, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetKeyRing not implemented") +} +func (*UnimplementedKeyManagementServiceServer) GetCryptoKey(context.Context, *GetCryptoKeyRequest) (*CryptoKey, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetCryptoKey not implemented") +} +func (*UnimplementedKeyManagementServiceServer) GetCryptoKeyVersion(context.Context, *GetCryptoKeyVersionRequest) (*CryptoKeyVersion, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetCryptoKeyVersion not implemented") +} +func (*UnimplementedKeyManagementServiceServer) GetPublicKey(context.Context, *GetPublicKeyRequest) (*PublicKey, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPublicKey not implemented") +} +func (*UnimplementedKeyManagementServiceServer) GetImportJob(context.Context, *GetImportJobRequest) (*ImportJob, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetImportJob not implemented") +} +func (*UnimplementedKeyManagementServiceServer) CreateKeyRing(context.Context, *CreateKeyRingRequest) (*KeyRing, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateKeyRing not implemented") +} +func (*UnimplementedKeyManagementServiceServer) CreateCryptoKey(context.Context, *CreateCryptoKeyRequest) (*CryptoKey, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateCryptoKey not implemented") +} +func (*UnimplementedKeyManagementServiceServer) CreateCryptoKeyVersion(context.Context, *CreateCryptoKeyVersionRequest) (*CryptoKeyVersion, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateCryptoKeyVersion not implemented") +} +func (*UnimplementedKeyManagementServiceServer) ImportCryptoKeyVersion(context.Context, *ImportCryptoKeyVersionRequest) (*CryptoKeyVersion, error) { + return nil, status.Errorf(codes.Unimplemented, "method ImportCryptoKeyVersion not implemented") +} +func (*UnimplementedKeyManagementServiceServer) CreateImportJob(context.Context, *CreateImportJobRequest) (*ImportJob, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateImportJob not implemented") +} +func (*UnimplementedKeyManagementServiceServer) UpdateCryptoKey(context.Context, *UpdateCryptoKeyRequest) (*CryptoKey, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateCryptoKey not implemented") +} +func (*UnimplementedKeyManagementServiceServer) UpdateCryptoKeyVersion(context.Context, *UpdateCryptoKeyVersionRequest) (*CryptoKeyVersion, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateCryptoKeyVersion not implemented") +} +func (*UnimplementedKeyManagementServiceServer) UpdateCryptoKeyPrimaryVersion(context.Context, *UpdateCryptoKeyPrimaryVersionRequest) (*CryptoKey, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateCryptoKeyPrimaryVersion not implemented") +} +func (*UnimplementedKeyManagementServiceServer) DestroyCryptoKeyVersion(context.Context, *DestroyCryptoKeyVersionRequest) (*CryptoKeyVersion, error) { + return nil, status.Errorf(codes.Unimplemented, "method DestroyCryptoKeyVersion not implemented") +} +func (*UnimplementedKeyManagementServiceServer) RestoreCryptoKeyVersion(context.Context, *RestoreCryptoKeyVersionRequest) (*CryptoKeyVersion, error) { + return nil, status.Errorf(codes.Unimplemented, "method RestoreCryptoKeyVersion not implemented") +} +func (*UnimplementedKeyManagementServiceServer) Encrypt(context.Context, *EncryptRequest) (*EncryptResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Encrypt not implemented") +} +func (*UnimplementedKeyManagementServiceServer) Decrypt(context.Context, *DecryptRequest) (*DecryptResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Decrypt not implemented") +} +func (*UnimplementedKeyManagementServiceServer) RawEncrypt(context.Context, *RawEncryptRequest) (*RawEncryptResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RawEncrypt not implemented") +} +func (*UnimplementedKeyManagementServiceServer) RawDecrypt(context.Context, *RawDecryptRequest) (*RawDecryptResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RawDecrypt not implemented") +} +func (*UnimplementedKeyManagementServiceServer) AsymmetricSign(context.Context, *AsymmetricSignRequest) (*AsymmetricSignResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AsymmetricSign not implemented") +} +func (*UnimplementedKeyManagementServiceServer) AsymmetricDecrypt(context.Context, *AsymmetricDecryptRequest) (*AsymmetricDecryptResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AsymmetricDecrypt not implemented") +} +func (*UnimplementedKeyManagementServiceServer) MacSign(context.Context, *MacSignRequest) (*MacSignResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MacSign not implemented") +} +func (*UnimplementedKeyManagementServiceServer) MacVerify(context.Context, *MacVerifyRequest) (*MacVerifyResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MacVerify not implemented") +} +func (*UnimplementedKeyManagementServiceServer) GenerateRandomBytes(context.Context, *GenerateRandomBytesRequest) (*GenerateRandomBytesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GenerateRandomBytes not implemented") +} + +func RegisterKeyManagementServiceServer(s *grpc.Server, srv KeyManagementServiceServer) { + s.RegisterService(&_KeyManagementService_serviceDesc, srv) +} + +func _KeyManagementService_ListKeyRings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListKeyRingsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).ListKeyRings(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/ListKeyRings", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).ListKeyRings(ctx, req.(*ListKeyRingsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_ListCryptoKeys_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListCryptoKeysRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).ListCryptoKeys(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/ListCryptoKeys", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).ListCryptoKeys(ctx, req.(*ListCryptoKeysRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_ListCryptoKeyVersions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListCryptoKeyVersionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).ListCryptoKeyVersions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/ListCryptoKeyVersions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).ListCryptoKeyVersions(ctx, req.(*ListCryptoKeyVersionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_ListImportJobs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListImportJobsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).ListImportJobs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/ListImportJobs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).ListImportJobs(ctx, req.(*ListImportJobsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_GetKeyRing_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetKeyRingRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).GetKeyRing(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/GetKeyRing", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).GetKeyRing(ctx, req.(*GetKeyRingRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_GetCryptoKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetCryptoKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).GetCryptoKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/GetCryptoKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).GetCryptoKey(ctx, req.(*GetCryptoKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_GetCryptoKeyVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetCryptoKeyVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).GetCryptoKeyVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/GetCryptoKeyVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).GetCryptoKeyVersion(ctx, req.(*GetCryptoKeyVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_GetPublicKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPublicKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).GetPublicKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/GetPublicKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).GetPublicKey(ctx, req.(*GetPublicKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_GetImportJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetImportJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).GetImportJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/GetImportJob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).GetImportJob(ctx, req.(*GetImportJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_CreateKeyRing_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateKeyRingRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).CreateKeyRing(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/CreateKeyRing", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).CreateKeyRing(ctx, req.(*CreateKeyRingRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_CreateCryptoKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateCryptoKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).CreateCryptoKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/CreateCryptoKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).CreateCryptoKey(ctx, req.(*CreateCryptoKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_CreateCryptoKeyVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateCryptoKeyVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).CreateCryptoKeyVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/CreateCryptoKeyVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).CreateCryptoKeyVersion(ctx, req.(*CreateCryptoKeyVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_ImportCryptoKeyVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ImportCryptoKeyVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).ImportCryptoKeyVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/ImportCryptoKeyVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).ImportCryptoKeyVersion(ctx, req.(*ImportCryptoKeyVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_CreateImportJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateImportJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).CreateImportJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/CreateImportJob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).CreateImportJob(ctx, req.(*CreateImportJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_UpdateCryptoKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateCryptoKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).UpdateCryptoKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/UpdateCryptoKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).UpdateCryptoKey(ctx, req.(*UpdateCryptoKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_UpdateCryptoKeyVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateCryptoKeyVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).UpdateCryptoKeyVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/UpdateCryptoKeyVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).UpdateCryptoKeyVersion(ctx, req.(*UpdateCryptoKeyVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_UpdateCryptoKeyPrimaryVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateCryptoKeyPrimaryVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).UpdateCryptoKeyPrimaryVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/UpdateCryptoKeyPrimaryVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).UpdateCryptoKeyPrimaryVersion(ctx, req.(*UpdateCryptoKeyPrimaryVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_DestroyCryptoKeyVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DestroyCryptoKeyVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).DestroyCryptoKeyVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/DestroyCryptoKeyVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).DestroyCryptoKeyVersion(ctx, req.(*DestroyCryptoKeyVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_RestoreCryptoKeyVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RestoreCryptoKeyVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).RestoreCryptoKeyVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/RestoreCryptoKeyVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).RestoreCryptoKeyVersion(ctx, req.(*RestoreCryptoKeyVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_Encrypt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EncryptRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).Encrypt(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/Encrypt", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).Encrypt(ctx, req.(*EncryptRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_Decrypt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DecryptRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).Decrypt(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/Decrypt", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).Decrypt(ctx, req.(*DecryptRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_RawEncrypt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawEncryptRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).RawEncrypt(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/RawEncrypt", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).RawEncrypt(ctx, req.(*RawEncryptRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_RawDecrypt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawDecryptRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).RawDecrypt(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/RawDecrypt", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).RawDecrypt(ctx, req.(*RawDecryptRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_AsymmetricSign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AsymmetricSignRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).AsymmetricSign(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/AsymmetricSign", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).AsymmetricSign(ctx, req.(*AsymmetricSignRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_AsymmetricDecrypt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AsymmetricDecryptRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).AsymmetricDecrypt(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/AsymmetricDecrypt", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).AsymmetricDecrypt(ctx, req.(*AsymmetricDecryptRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_MacSign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MacSignRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).MacSign(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/MacSign", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).MacSign(ctx, req.(*MacSignRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_MacVerify_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MacVerifyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).MacVerify(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/MacVerify", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).MacVerify(ctx, req.(*MacVerifyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KeyManagementService_GenerateRandomBytes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GenerateRandomBytesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyManagementServiceServer).GenerateRandomBytes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.kms.v1.KeyManagementService/GenerateRandomBytes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyManagementServiceServer).GenerateRandomBytes(ctx, req.(*GenerateRandomBytesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _KeyManagementService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.kms.v1.KeyManagementService", + HandlerType: (*KeyManagementServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListKeyRings", + Handler: _KeyManagementService_ListKeyRings_Handler, + }, + { + MethodName: "ListCryptoKeys", + Handler: _KeyManagementService_ListCryptoKeys_Handler, + }, + { + MethodName: "ListCryptoKeyVersions", + Handler: _KeyManagementService_ListCryptoKeyVersions_Handler, + }, + { + MethodName: "ListImportJobs", + Handler: _KeyManagementService_ListImportJobs_Handler, + }, + { + MethodName: "GetKeyRing", + Handler: _KeyManagementService_GetKeyRing_Handler, + }, + { + MethodName: "GetCryptoKey", + Handler: _KeyManagementService_GetCryptoKey_Handler, + }, + { + MethodName: "GetCryptoKeyVersion", + Handler: _KeyManagementService_GetCryptoKeyVersion_Handler, + }, + { + MethodName: "GetPublicKey", + Handler: _KeyManagementService_GetPublicKey_Handler, + }, + { + MethodName: "GetImportJob", + Handler: _KeyManagementService_GetImportJob_Handler, + }, + { + MethodName: "CreateKeyRing", + Handler: _KeyManagementService_CreateKeyRing_Handler, + }, + { + MethodName: "CreateCryptoKey", + Handler: _KeyManagementService_CreateCryptoKey_Handler, + }, + { + MethodName: "CreateCryptoKeyVersion", + Handler: _KeyManagementService_CreateCryptoKeyVersion_Handler, + }, + { + MethodName: "ImportCryptoKeyVersion", + Handler: _KeyManagementService_ImportCryptoKeyVersion_Handler, + }, + { + MethodName: "CreateImportJob", + Handler: _KeyManagementService_CreateImportJob_Handler, + }, + { + MethodName: "UpdateCryptoKey", + Handler: _KeyManagementService_UpdateCryptoKey_Handler, + }, + { + MethodName: "UpdateCryptoKeyVersion", + Handler: _KeyManagementService_UpdateCryptoKeyVersion_Handler, + }, + { + MethodName: "UpdateCryptoKeyPrimaryVersion", + Handler: _KeyManagementService_UpdateCryptoKeyPrimaryVersion_Handler, + }, + { + MethodName: "DestroyCryptoKeyVersion", + Handler: _KeyManagementService_DestroyCryptoKeyVersion_Handler, + }, + { + MethodName: "RestoreCryptoKeyVersion", + Handler: _KeyManagementService_RestoreCryptoKeyVersion_Handler, + }, + { + MethodName: "Encrypt", + Handler: _KeyManagementService_Encrypt_Handler, + }, + { + MethodName: "Decrypt", + Handler: _KeyManagementService_Decrypt_Handler, + }, + { + MethodName: "RawEncrypt", + Handler: _KeyManagementService_RawEncrypt_Handler, + }, + { + MethodName: "RawDecrypt", + Handler: _KeyManagementService_RawDecrypt_Handler, + }, + { + MethodName: "AsymmetricSign", + Handler: _KeyManagementService_AsymmetricSign_Handler, + }, + { + MethodName: "AsymmetricDecrypt", + Handler: _KeyManagementService_AsymmetricDecrypt_Handler, + }, + { + MethodName: "MacSign", + Handler: _KeyManagementService_MacSign_Handler, + }, + { + MethodName: "MacVerify", + Handler: _KeyManagementService_MacVerify_Handler, + }, + { + MethodName: "GenerateRandomBytes", + Handler: _KeyManagementService_GenerateRandomBytes_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/kms/v1/service.proto", +} diff --git a/vendor/google.golang.org/api/transport/http/dial_appengine.go b/vendor/cloud.google.com/go/kms/apiv1/version.go similarity index 69% rename from vendor/google.golang.org/api/transport/http/dial_appengine.go rename to vendor/cloud.google.com/go/kms/apiv1/version.go index 04c81413c..f84de4ca5 100644 --- a/vendor/google.golang.org/api/transport/http/dial_appengine.go +++ b/vendor/cloud.google.com/go/kms/apiv1/version.go @@ -1,4 +1,4 @@ -// Copyright 2016 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,19 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build appengine +// Code generated by gapicgen. DO NOT EDIT. -package http +package kms -import ( - "context" - "net/http" - - "google.golang.org/appengine/urlfetch" -) +import "cloud.google.com/go/kms/internal" func init() { - appengineUrlfetchHook = func(ctx context.Context) http.RoundTripper { - return &urlfetch.Transport{Context: ctx} - } + versionClient = internal.Version } diff --git a/vendor/cloud.google.com/go/kms/internal/version.go b/vendor/cloud.google.com/go/kms/internal/version.go new file mode 100644 index 000000000..0b0f0e914 --- /dev/null +++ b/vendor/cloud.google.com/go/kms/internal/version.go @@ -0,0 +1,18 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +// Version is the current tagged release of the library. +const Version = "1.20.0" diff --git a/vendor/cloud.google.com/go/longrunning/CHANGES.md b/vendor/cloud.google.com/go/longrunning/CHANGES.md new file mode 100644 index 000000000..d120456cd --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/CHANGES.md @@ -0,0 +1,138 @@ +# Changes + +## [0.6.1](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.6.0...longrunning/v0.6.1) (2024-09-12) + + +### Bug Fixes + +* **longrunning:** Bump dependencies ([2ddeb15](https://github.com/googleapis/google-cloud-go/commit/2ddeb1544a53188a7592046b98913982f1b0cf04)) + +## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.12...longrunning/v0.6.0) (2024-08-20) + + +### Features + +* **longrunning:** Add support for Go 1.23 iterators ([84461c0](https://github.com/googleapis/google-cloud-go/commit/84461c0ba464ec2f951987ba60030e37c8a8fc18)) + +## [0.5.12](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.11...longrunning/v0.5.12) (2024-08-08) + + +### Bug Fixes + +* **longrunning:** Update google.golang.org/api to v0.191.0 ([5b32644](https://github.com/googleapis/google-cloud-go/commit/5b32644eb82eb6bd6021f80b4fad471c60fb9d73)) + +## [0.5.11](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.10...longrunning/v0.5.11) (2024-07-24) + + +### Bug Fixes + +* **longrunning:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758)) + +## [0.5.10](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.9...longrunning/v0.5.10) (2024-07-10) + + +### Bug Fixes + +* **longrunning:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5)) + +## [0.5.9](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.8...longrunning/v0.5.9) (2024-07-01) + + +### Bug Fixes + +* **longrunning:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) + +## [0.5.8](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.7...longrunning/v0.5.8) (2024-06-26) + + +### Bug Fixes + +* **longrunning:** Enable new auth lib ([b95805f](https://github.com/googleapis/google-cloud-go/commit/b95805f4c87d3e8d10ea23bd7a2d68d7a4157568)) + +## [0.5.7](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.6...longrunning/v0.5.7) (2024-05-01) + + +### Bug Fixes + +* **longrunning:** Bump x/net to v0.24.0 ([ba31ed5](https://github.com/googleapis/google-cloud-go/commit/ba31ed5fda2c9664f2e1cf972469295e63deb5b4)) + +## [0.5.6](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.5...longrunning/v0.5.6) (2024-03-14) + + +### Bug Fixes + +* **longrunning:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a)) + +## [0.5.5](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.4...longrunning/v0.5.5) (2024-01-30) + + +### Bug Fixes + +* **longrunning:** Enable universe domain resolution options ([fd1d569](https://github.com/googleapis/google-cloud-go/commit/fd1d56930fa8a747be35a224611f4797b8aeb698)) + +## [0.5.4](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.3...longrunning/v0.5.4) (2023-11-01) + + +### Bug Fixes + +* **longrunning:** Bump google.golang.org/api to v0.149.0 ([8d2ab9f](https://github.com/googleapis/google-cloud-go/commit/8d2ab9f320a86c1c0fab90513fc05861561d0880)) + +## [0.5.3](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.2...longrunning/v0.5.3) (2023-10-26) + + +### Bug Fixes + +* **longrunning:** Update grpc-go to v1.59.0 ([81a97b0](https://github.com/googleapis/google-cloud-go/commit/81a97b06cb28b25432e4ece595c55a9857e960b7)) + +## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.1...longrunning/v0.5.2) (2023-10-12) + + +### Bug Fixes + +* **longrunning:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) + +## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.0...longrunning/v0.5.1) (2023-06-20) + + +### Bug Fixes + +* **longrunning:** REST query UpdateMask bug ([df52820](https://github.com/googleapis/google-cloud-go/commit/df52820b0e7721954809a8aa8700b93c5662dc9b)) + +## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.4.2...longrunning/v0.5.0) (2023-05-30) + + +### Features + +* **longrunning:** Update all direct dependencies ([b340d03](https://github.com/googleapis/google-cloud-go/commit/b340d030f2b52a4ce48846ce63984b28583abde6)) + +## [0.4.2](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.4.1...longrunning/v0.4.2) (2023-05-08) + + +### Bug Fixes + +* **longrunning:** Update grpc to v1.55.0 ([1147ce0](https://github.com/googleapis/google-cloud-go/commit/1147ce02a990276ca4f8ab7a1ab65c14da4450ef)) + +## [0.4.1](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.4.0...longrunning/v0.4.1) (2023-02-14) + + +### Bug Fixes + +* **longrunning:** Properly parse errors with apierror ([#7392](https://github.com/googleapis/google-cloud-go/issues/7392)) ([e768e48](https://github.com/googleapis/google-cloud-go/commit/e768e487e10b197ba42a2339014136d066190610)) + +## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.3.0...longrunning/v0.4.0) (2023-01-04) + + +### Features + +* **longrunning:** Add REST client ([06a54a1](https://github.com/googleapis/google-cloud-go/commit/06a54a16a5866cce966547c51e203b9e09a25bc0)) + +## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.2.1...longrunning/v0.3.0) (2022-11-03) + + +### Features + +* **longrunning:** rewrite signatures in terms of new location ([3c4b2b3](https://github.com/googleapis/google-cloud-go/commit/3c4b2b34565795537aac1661e6af2442437e34ad)) + +## v0.1.0 + +Initial release. diff --git a/vendor/cloud.google.com/go/longrunning/LICENSE b/vendor/cloud.google.com/go/longrunning/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/longrunning/README.md b/vendor/cloud.google.com/go/longrunning/README.md new file mode 100644 index 000000000..a07f3093f --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/README.md @@ -0,0 +1,26 @@ +# longrunning + +[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/longrunning.svg)](https://pkg.go.dev/cloud.google.com/go/longrunning) + +A helper library for working with long running operations. + +## Install + +```bash +go get cloud.google.com/go/longrunning +``` + +## Go Version Support + +See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported) +section in the root directory's README. + +## Contributing + +Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) +document for details. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. See +[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. diff --git a/vendor/cloud.google.com/go/longrunning/autogen/auxiliary.go b/vendor/cloud.google.com/go/longrunning/autogen/auxiliary.go new file mode 100644 index 000000000..a42e61e99 --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/autogen/auxiliary.go @@ -0,0 +1,69 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package longrunning + +import ( + longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb" + "google.golang.org/api/iterator" +) + +// OperationIterator manages a stream of *longrunningpb.Operation. +type OperationIterator struct { + items []*longrunningpb.Operation + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*longrunningpb.Operation, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *OperationIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *OperationIterator) Next() (*longrunningpb.Operation, error) { + var item *longrunningpb.Operation + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *OperationIterator) bufLen() int { + return len(it.items) +} + +func (it *OperationIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/longrunning/autogen/auxiliary_go123.go b/vendor/cloud.google.com/go/longrunning/autogen/auxiliary_go123.go new file mode 100644 index 000000000..eca6d4def --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/autogen/auxiliary_go123.go @@ -0,0 +1,32 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +//go:build go1.23 + +package longrunning + +import ( + "iter" + + longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb" + "github.com/googleapis/gax-go/v2/iterator" +) + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *OperationIterator) All() iter.Seq2[*longrunningpb.Operation, error] { + return iterator.RangeAdapter(it.Next) +} diff --git a/vendor/cloud.google.com/go/longrunning/autogen/doc.go b/vendor/cloud.google.com/go/longrunning/autogen/doc.go new file mode 100644 index 000000000..7976ed734 --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/autogen/doc.go @@ -0,0 +1,117 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +// Package longrunning is an auto-generated package for the +// Long Running Operations API. +// +// # General documentation +// +// For information that is relevant for all client libraries please reference +// https://pkg.go.dev/cloud.google.com/go#pkg-overview. Some information on this +// page includes: +// +// - [Authentication and Authorization] +// - [Timeouts and Cancellation] +// - [Testing against Client Libraries] +// - [Debugging Client Libraries] +// - [Inspecting errors] +// +// # Example usage +// +// To get started with this package, create a client. +// +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := longrunning.NewOperationsClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() +// +// The client will use your default application credentials. Clients should be reused instead of created as needed. +// The methods of Client are safe for concurrent use by multiple goroutines. +// The returned client must be Closed when it is done being used. +// +// # Using the Client +// +// The following is an example of making an API call with the newly created client. +// +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := longrunning.NewOperationsClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() +// +// req := &longrunningpb.CancelOperationRequest{ +// // TODO: Fill request struct fields. +// // See https://pkg.go.dev/cloud.google.com/go/longrunning/autogen/longrunningpb#CancelOperationRequest. +// } +// err = c.CancelOperation(ctx, req) +// if err != nil { +// // TODO: Handle error. +// } +// +// # Use of Context +// +// The ctx passed to NewOperationsClient is used for authentication requests and +// for creating the underlying connection, but is not used for subsequent calls. +// Individual methods on the client use the ctx given to them. +// +// To close the open connection, use the Close() method. +// +// [Authentication and Authorization]: https://pkg.go.dev/cloud.google.com/go#hdr-Authentication_and_Authorization +// [Timeouts and Cancellation]: https://pkg.go.dev/cloud.google.com/go#hdr-Timeouts_and_Cancellation +// [Testing against Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Testing +// [Debugging Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Debugging +// [Inspecting errors]: https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors +package longrunning // import "cloud.google.com/go/longrunning/autogen" + +import ( + "context" + + "google.golang.org/api/option" +) + +// For more information on implementing a client constructor hook, see +// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors. +type clientHookParams struct{} +type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error) + +var versionClient string + +func getVersionClient() string { + if versionClient == "" { + return "UNKNOWN" + } + return versionClient +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "", + } +} diff --git a/vendor/cloud.google.com/go/longrunning/autogen/from_conn.go b/vendor/cloud.google.com/go/longrunning/autogen/from_conn.go new file mode 100644 index 000000000..f09714b9b --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/autogen/from_conn.go @@ -0,0 +1,30 @@ +// Copyright 2020, Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package longrunning + +import ( + "context" + + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +// InternalFromConn is for use by the Google Cloud Libraries only. +// +// Deprecated. Use `NewOperationsClient(ctx, option.WithGRPCConn(conn))` instead. +func InternalFromConn(conn *grpc.ClientConn) *OperationsClient { + c, _ := NewOperationsClient(context.Background(), option.WithGRPCConn(conn)) + return c +} diff --git a/vendor/cloud.google.com/go/longrunning/autogen/gapic_metadata.json b/vendor/cloud.google.com/go/longrunning/autogen/gapic_metadata.json new file mode 100644 index 000000000..527142821 --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/autogen/gapic_metadata.json @@ -0,0 +1,73 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods.", + "language": "go", + "protoPackage": "google.longrunning", + "libraryPackage": "cloud.google.com/go/longrunning/autogen", + "services": { + "Operations": { + "clients": { + "grpc": { + "libraryClient": "OperationsClient", + "rpcs": { + "CancelOperation": { + "methods": [ + "CancelOperation" + ] + }, + "DeleteOperation": { + "methods": [ + "DeleteOperation" + ] + }, + "GetOperation": { + "methods": [ + "GetOperation" + ] + }, + "ListOperations": { + "methods": [ + "ListOperations" + ] + }, + "WaitOperation": { + "methods": [ + "WaitOperation" + ] + } + } + }, + "rest": { + "libraryClient": "OperationsClient", + "rpcs": { + "CancelOperation": { + "methods": [ + "CancelOperation" + ] + }, + "DeleteOperation": { + "methods": [ + "DeleteOperation" + ] + }, + "GetOperation": { + "methods": [ + "GetOperation" + ] + }, + "ListOperations": { + "methods": [ + "ListOperations" + ] + }, + "WaitOperation": { + "methods": [ + "WaitOperation" + ] + } + } + } + } + } + } +} diff --git a/vendor/cloud.google.com/go/longrunning/autogen/info.go b/vendor/cloud.google.com/go/longrunning/autogen/info.go new file mode 100644 index 000000000..b006c4d01 --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/autogen/info.go @@ -0,0 +1,24 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package longrunning + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Also passes any +// provided key-value pairs. Intended for use by Google-written clients. +// +// Internal use only. +func (c *OperationsClient) SetGoogleClientInfo(keyval ...string) { + c.setGoogleClientInfo(keyval...) +} diff --git a/vendor/cloud.google.com/go/longrunning/autogen/longrunningpb/operations.pb.go b/vendor/cloud.google.com/go/longrunning/autogen/longrunningpb/operations.pb.go new file mode 100644 index 000000000..0a4d66c63 --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/autogen/longrunningpb/operations.pb.go @@ -0,0 +1,1230 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/longrunning/operations.proto + +package longrunningpb + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + status "google.golang.org/genproto/googleapis/rpc/status" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status1 "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This resource represents a long-running operation that is the result of a +// network API call. +type Operation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The server-assigned name, which is only unique within the same service that + // originally returns it. If you use the default HTTP mapping, the + // `name` should be a resource name ending with `operations/{unique_id}`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Service-specific metadata associated with the operation. It typically + // contains progress information and common metadata such as create time. + // Some services might not provide such metadata. Any method that returns a + // long-running operation should document the metadata type, if any. + Metadata *anypb.Any `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` + // If the value is `false`, it means the operation is still in progress. + // If `true`, the operation is completed, and either `error` or `response` is + // available. + Done bool `protobuf:"varint,3,opt,name=done,proto3" json:"done,omitempty"` + // The operation result, which can be either an `error` or a valid `response`. + // If `done` == `false`, neither `error` nor `response` is set. + // If `done` == `true`, exactly one of `error` or `response` is set. + // + // Types that are assignable to Result: + // + // *Operation_Error + // *Operation_Response + Result isOperation_Result `protobuf_oneof:"result"` +} + +func (x *Operation) Reset() { + *x = Operation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_longrunning_operations_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Operation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Operation) ProtoMessage() {} + +func (x *Operation) ProtoReflect() protoreflect.Message { + mi := &file_google_longrunning_operations_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Operation.ProtoReflect.Descriptor instead. +func (*Operation) Descriptor() ([]byte, []int) { + return file_google_longrunning_operations_proto_rawDescGZIP(), []int{0} +} + +func (x *Operation) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Operation) GetMetadata() *anypb.Any { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Operation) GetDone() bool { + if x != nil { + return x.Done + } + return false +} + +func (m *Operation) GetResult() isOperation_Result { + if m != nil { + return m.Result + } + return nil +} + +func (x *Operation) GetError() *status.Status { + if x, ok := x.GetResult().(*Operation_Error); ok { + return x.Error + } + return nil +} + +func (x *Operation) GetResponse() *anypb.Any { + if x, ok := x.GetResult().(*Operation_Response); ok { + return x.Response + } + return nil +} + +type isOperation_Result interface { + isOperation_Result() +} + +type Operation_Error struct { + // The error result of the operation in case of failure or cancellation. + Error *status.Status `protobuf:"bytes,4,opt,name=error,proto3,oneof"` +} + +type Operation_Response struct { + // The normal response of the operation in case of success. If the original + // method returns no data on success, such as `Delete`, the response is + // `google.protobuf.Empty`. If the original method is standard + // `Get`/`Create`/`Update`, the response should be the resource. For other + // methods, the response should have the type `XxxResponse`, where `Xxx` + // is the original method name. For example, if the original method name + // is `TakeSnapshot()`, the inferred response type is + // `TakeSnapshotResponse`. + Response *anypb.Any `protobuf:"bytes,5,opt,name=response,proto3,oneof"` +} + +func (*Operation_Error) isOperation_Result() {} + +func (*Operation_Response) isOperation_Result() {} + +// The request message for [Operations.GetOperation][google.longrunning.Operations.GetOperation]. +type GetOperationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the operation resource. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetOperationRequest) Reset() { + *x = GetOperationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_longrunning_operations_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetOperationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetOperationRequest) ProtoMessage() {} + +func (x *GetOperationRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_longrunning_operations_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetOperationRequest.ProtoReflect.Descriptor instead. +func (*GetOperationRequest) Descriptor() ([]byte, []int) { + return file_google_longrunning_operations_proto_rawDescGZIP(), []int{1} +} + +func (x *GetOperationRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The request message for [Operations.ListOperations][google.longrunning.Operations.ListOperations]. +type ListOperationsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the operation's parent resource. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // The standard list filter. + Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + // The standard list page size. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // The standard list page token. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListOperationsRequest) Reset() { + *x = ListOperationsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_longrunning_operations_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListOperationsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListOperationsRequest) ProtoMessage() {} + +func (x *ListOperationsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_longrunning_operations_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListOperationsRequest.ProtoReflect.Descriptor instead. +func (*ListOperationsRequest) Descriptor() ([]byte, []int) { + return file_google_longrunning_operations_proto_rawDescGZIP(), []int{2} +} + +func (x *ListOperationsRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListOperationsRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListOperationsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListOperationsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The response message for [Operations.ListOperations][google.longrunning.Operations.ListOperations]. +type ListOperationsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A list of operations that matches the specified filter in the request. + Operations []*Operation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"` + // The standard List next-page token. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListOperationsResponse) Reset() { + *x = ListOperationsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_longrunning_operations_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListOperationsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListOperationsResponse) ProtoMessage() {} + +func (x *ListOperationsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_longrunning_operations_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListOperationsResponse.ProtoReflect.Descriptor instead. +func (*ListOperationsResponse) Descriptor() ([]byte, []int) { + return file_google_longrunning_operations_proto_rawDescGZIP(), []int{3} +} + +func (x *ListOperationsResponse) GetOperations() []*Operation { + if x != nil { + return x.Operations + } + return nil +} + +func (x *ListOperationsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// The request message for [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]. +type CancelOperationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the operation resource to be cancelled. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *CancelOperationRequest) Reset() { + *x = CancelOperationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_longrunning_operations_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CancelOperationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CancelOperationRequest) ProtoMessage() {} + +func (x *CancelOperationRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_longrunning_operations_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CancelOperationRequest.ProtoReflect.Descriptor instead. +func (*CancelOperationRequest) Descriptor() ([]byte, []int) { + return file_google_longrunning_operations_proto_rawDescGZIP(), []int{4} +} + +func (x *CancelOperationRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The request message for [Operations.DeleteOperation][google.longrunning.Operations.DeleteOperation]. +type DeleteOperationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the operation resource to be deleted. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *DeleteOperationRequest) Reset() { + *x = DeleteOperationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_longrunning_operations_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteOperationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteOperationRequest) ProtoMessage() {} + +func (x *DeleteOperationRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_longrunning_operations_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteOperationRequest.ProtoReflect.Descriptor instead. +func (*DeleteOperationRequest) Descriptor() ([]byte, []int) { + return file_google_longrunning_operations_proto_rawDescGZIP(), []int{5} +} + +func (x *DeleteOperationRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The request message for [Operations.WaitOperation][google.longrunning.Operations.WaitOperation]. +type WaitOperationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the operation resource to wait on. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The maximum duration to wait before timing out. If left blank, the wait + // will be at most the time permitted by the underlying HTTP/RPC protocol. + // If RPC context deadline is also specified, the shorter one will be used. + Timeout *durationpb.Duration `protobuf:"bytes,2,opt,name=timeout,proto3" json:"timeout,omitempty"` +} + +func (x *WaitOperationRequest) Reset() { + *x = WaitOperationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_longrunning_operations_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WaitOperationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WaitOperationRequest) ProtoMessage() {} + +func (x *WaitOperationRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_longrunning_operations_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WaitOperationRequest.ProtoReflect.Descriptor instead. +func (*WaitOperationRequest) Descriptor() ([]byte, []int) { + return file_google_longrunning_operations_proto_rawDescGZIP(), []int{6} +} + +func (x *WaitOperationRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *WaitOperationRequest) GetTimeout() *durationpb.Duration { + if x != nil { + return x.Timeout + } + return nil +} + +// A message representing the message types used by a long-running operation. +// +// Example: +// +// rpc LongRunningRecognize(LongRunningRecognizeRequest) +// returns (google.longrunning.Operation) { +// option (google.longrunning.operation_info) = { +// response_type: "LongRunningRecognizeResponse" +// metadata_type: "LongRunningRecognizeMetadata" +// }; +// } +type OperationInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The message name of the primary return type for this + // long-running operation. + // This type will be used to deserialize the LRO's response. + // + // If the response is in a different package from the rpc, a fully-qualified + // message name must be used (e.g. `google.protobuf.Struct`). + // + // Note: Altering this value constitutes a breaking change. + ResponseType string `protobuf:"bytes,1,opt,name=response_type,json=responseType,proto3" json:"response_type,omitempty"` + // Required. The message name of the metadata type for this long-running + // operation. + // + // If the response is in a different package from the rpc, a fully-qualified + // message name must be used (e.g. `google.protobuf.Struct`). + // + // Note: Altering this value constitutes a breaking change. + MetadataType string `protobuf:"bytes,2,opt,name=metadata_type,json=metadataType,proto3" json:"metadata_type,omitempty"` +} + +func (x *OperationInfo) Reset() { + *x = OperationInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_longrunning_operations_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OperationInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OperationInfo) ProtoMessage() {} + +func (x *OperationInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_longrunning_operations_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OperationInfo.ProtoReflect.Descriptor instead. +func (*OperationInfo) Descriptor() ([]byte, []int) { + return file_google_longrunning_operations_proto_rawDescGZIP(), []int{7} +} + +func (x *OperationInfo) GetResponseType() string { + if x != nil { + return x.ResponseType + } + return "" +} + +func (x *OperationInfo) GetMetadataType() string { + if x != nil { + return x.MetadataType + } + return "" +} + +var file_google_longrunning_operations_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.MethodOptions)(nil), + ExtensionType: (*OperationInfo)(nil), + Field: 1049, + Name: "google.longrunning.operation_info", + Tag: "bytes,1049,opt,name=operation_info", + Filename: "google/longrunning/operations.proto", + }, +} + +// Extension fields to descriptorpb.MethodOptions. +var ( + // Additional information regarding long-running operations. + // In particular, this specifies the types that are returned from + // long-running operations. + // + // Required for methods that return `google.longrunning.Operation`; invalid + // otherwise. + // + // optional google.longrunning.OperationInfo operation_info = 1049; + E_OperationInfo = &file_google_longrunning_operations_proto_extTypes[0] +) + +var File_google_longrunning_operations_proto protoreflect.FileDescriptor + +var file_google_longrunning_operations_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, + 0x6e, 0x69, 0x6e, 0x67, 0x2f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, + 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, + 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xcf, 0x01, 0x0a, 0x09, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x48, 0x00, + 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x32, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, + 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x08, 0x0a, 0x06, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x29, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x22, 0x7f, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, + 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x22, 0x7f, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x0a, 0x6f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, + 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, + 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, + 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x22, 0x2c, 0x0a, 0x16, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x22, 0x2c, 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x5f, + 0x0a, 0x14, 0x57, 0x61, 0x69, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, + 0x59, 0x0a, 0x0d, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, + 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x54, 0x79, 0x70, 0x65, 0x32, 0xaa, 0x05, 0x0a, 0x0a, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x94, 0x01, 0x0a, 0x0e, 0x4c, 0x69, + 0x73, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x29, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, + 0x67, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x2b, 0xda, 0x41, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x2f, 0x76, 0x31, 0x2f, 0x7b, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x7d, + 0x12, 0x7f, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, + 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x27, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x12, 0x18, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, + 0x6d, 0x65, 0x3d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2a, + 0x7d, 0x12, 0x7e, 0x0a, 0x0f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, + 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x27, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x2a, 0x18, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, + 0x6d, 0x65, 0x3d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2a, + 0x7d, 0x12, 0x88, 0x01, 0x0a, 0x0f, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, + 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, + 0x6c, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x31, 0xda, 0x41, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x24, 0x3a, 0x01, 0x2a, 0x22, 0x1f, 0x2f, 0x76, 0x31, + 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x2a, 0x2a, 0x7d, 0x3a, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x12, 0x5a, 0x0a, 0x0d, + 0x57, 0x61, 0x69, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, + 0x6e, 0x67, 0x2e, 0x57, 0x61, 0x69, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x1a, 0x1d, 0xca, 0x41, 0x1a, 0x6c, 0x6f, 0x6e, + 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x3a, 0x69, 0x0a, 0x0e, 0x6f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, + 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x0d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, + 0x66, 0x6f, 0x42, 0x9d, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x42, 0x0f, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x43, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, + 0x67, 0x2f, 0x61, 0x75, 0x74, 0x6f, 0x67, 0x65, 0x6e, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, + 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, + 0x69, 0x6e, 0x67, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x12, 0x47, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0xca, 0x02, 0x12, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, + 0x6e, 0x67, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_longrunning_operations_proto_rawDescOnce sync.Once + file_google_longrunning_operations_proto_rawDescData = file_google_longrunning_operations_proto_rawDesc +) + +func file_google_longrunning_operations_proto_rawDescGZIP() []byte { + file_google_longrunning_operations_proto_rawDescOnce.Do(func() { + file_google_longrunning_operations_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_longrunning_operations_proto_rawDescData) + }) + return file_google_longrunning_operations_proto_rawDescData +} + +var file_google_longrunning_operations_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_google_longrunning_operations_proto_goTypes = []any{ + (*Operation)(nil), // 0: google.longrunning.Operation + (*GetOperationRequest)(nil), // 1: google.longrunning.GetOperationRequest + (*ListOperationsRequest)(nil), // 2: google.longrunning.ListOperationsRequest + (*ListOperationsResponse)(nil), // 3: google.longrunning.ListOperationsResponse + (*CancelOperationRequest)(nil), // 4: google.longrunning.CancelOperationRequest + (*DeleteOperationRequest)(nil), // 5: google.longrunning.DeleteOperationRequest + (*WaitOperationRequest)(nil), // 6: google.longrunning.WaitOperationRequest + (*OperationInfo)(nil), // 7: google.longrunning.OperationInfo + (*anypb.Any)(nil), // 8: google.protobuf.Any + (*status.Status)(nil), // 9: google.rpc.Status + (*durationpb.Duration)(nil), // 10: google.protobuf.Duration + (*descriptorpb.MethodOptions)(nil), // 11: google.protobuf.MethodOptions + (*emptypb.Empty)(nil), // 12: google.protobuf.Empty +} +var file_google_longrunning_operations_proto_depIdxs = []int32{ + 8, // 0: google.longrunning.Operation.metadata:type_name -> google.protobuf.Any + 9, // 1: google.longrunning.Operation.error:type_name -> google.rpc.Status + 8, // 2: google.longrunning.Operation.response:type_name -> google.protobuf.Any + 0, // 3: google.longrunning.ListOperationsResponse.operations:type_name -> google.longrunning.Operation + 10, // 4: google.longrunning.WaitOperationRequest.timeout:type_name -> google.protobuf.Duration + 11, // 5: google.longrunning.operation_info:extendee -> google.protobuf.MethodOptions + 7, // 6: google.longrunning.operation_info:type_name -> google.longrunning.OperationInfo + 2, // 7: google.longrunning.Operations.ListOperations:input_type -> google.longrunning.ListOperationsRequest + 1, // 8: google.longrunning.Operations.GetOperation:input_type -> google.longrunning.GetOperationRequest + 5, // 9: google.longrunning.Operations.DeleteOperation:input_type -> google.longrunning.DeleteOperationRequest + 4, // 10: google.longrunning.Operations.CancelOperation:input_type -> google.longrunning.CancelOperationRequest + 6, // 11: google.longrunning.Operations.WaitOperation:input_type -> google.longrunning.WaitOperationRequest + 3, // 12: google.longrunning.Operations.ListOperations:output_type -> google.longrunning.ListOperationsResponse + 0, // 13: google.longrunning.Operations.GetOperation:output_type -> google.longrunning.Operation + 12, // 14: google.longrunning.Operations.DeleteOperation:output_type -> google.protobuf.Empty + 12, // 15: google.longrunning.Operations.CancelOperation:output_type -> google.protobuf.Empty + 0, // 16: google.longrunning.Operations.WaitOperation:output_type -> google.longrunning.Operation + 12, // [12:17] is the sub-list for method output_type + 7, // [7:12] is the sub-list for method input_type + 6, // [6:7] is the sub-list for extension type_name + 5, // [5:6] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_google_longrunning_operations_proto_init() } +func file_google_longrunning_operations_proto_init() { + if File_google_longrunning_operations_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_longrunning_operations_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Operation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_longrunning_operations_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*GetOperationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_longrunning_operations_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*ListOperationsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_longrunning_operations_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*ListOperationsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_longrunning_operations_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*CancelOperationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_longrunning_operations_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*DeleteOperationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_longrunning_operations_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*WaitOperationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_longrunning_operations_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*OperationInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_longrunning_operations_proto_msgTypes[0].OneofWrappers = []any{ + (*Operation_Error)(nil), + (*Operation_Response)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_longrunning_operations_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 1, + NumServices: 1, + }, + GoTypes: file_google_longrunning_operations_proto_goTypes, + DependencyIndexes: file_google_longrunning_operations_proto_depIdxs, + MessageInfos: file_google_longrunning_operations_proto_msgTypes, + ExtensionInfos: file_google_longrunning_operations_proto_extTypes, + }.Build() + File_google_longrunning_operations_proto = out.File + file_google_longrunning_operations_proto_rawDesc = nil + file_google_longrunning_operations_proto_goTypes = nil + file_google_longrunning_operations_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// OperationsClient is the client API for Operations service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type OperationsClient interface { + // Lists operations that match the specified filter in the request. If the + // server doesn't support this method, it returns `UNIMPLEMENTED`. + // + // NOTE: the `name` binding allows API services to override the binding + // to use different resource name schemes, such as `users/*/operations`. To + // override the binding, API services can add a binding such as + // `"/v1/{name=users/*}/operations"` to their service configuration. + // For backwards compatibility, the default name includes the operations + // collection id, however overriding users must ensure the name binding + // is the parent resource, without the operations collection id. + ListOperations(ctx context.Context, in *ListOperationsRequest, opts ...grpc.CallOption) (*ListOperationsResponse, error) + // Gets the latest state of a long-running operation. Clients can use this + // method to poll the operation result at intervals as recommended by the API + // service. + GetOperation(ctx context.Context, in *GetOperationRequest, opts ...grpc.CallOption) (*Operation, error) + // Deletes a long-running operation. This method indicates that the client is + // no longer interested in the operation result. It does not cancel the + // operation. If the server doesn't support this method, it returns + // `google.rpc.Code.UNIMPLEMENTED`. + DeleteOperation(ctx context.Context, in *DeleteOperationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Starts asynchronous cancellation on a long-running operation. The server + // makes a best effort to cancel the operation, but success is not + // guaranteed. If the server doesn't support this method, it returns + // `google.rpc.Code.UNIMPLEMENTED`. Clients can use + // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or + // other methods to check whether the cancellation succeeded or whether the + // operation completed despite cancellation. On successful cancellation, + // the operation is not deleted; instead, it becomes an operation with + // an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, + // corresponding to `Code.CANCELLED`. + CancelOperation(ctx context.Context, in *CancelOperationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Waits until the specified long-running operation is done or reaches at most + // a specified timeout, returning the latest state. If the operation is + // already done, the latest state is immediately returned. If the timeout + // specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + // timeout is used. If the server does not support this method, it returns + // `google.rpc.Code.UNIMPLEMENTED`. + // Note that this method is on a best-effort basis. It may return the latest + // state before the specified timeout (including immediately), meaning even an + // immediate response is no guarantee that the operation is done. + WaitOperation(ctx context.Context, in *WaitOperationRequest, opts ...grpc.CallOption) (*Operation, error) +} + +type operationsClient struct { + cc grpc.ClientConnInterface +} + +func NewOperationsClient(cc grpc.ClientConnInterface) OperationsClient { + return &operationsClient{cc} +} + +func (c *operationsClient) ListOperations(ctx context.Context, in *ListOperationsRequest, opts ...grpc.CallOption) (*ListOperationsResponse, error) { + out := new(ListOperationsResponse) + err := c.cc.Invoke(ctx, "/google.longrunning.Operations/ListOperations", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *operationsClient) GetOperation(ctx context.Context, in *GetOperationRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := c.cc.Invoke(ctx, "/google.longrunning.Operations/GetOperation", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *operationsClient) DeleteOperation(ctx context.Context, in *DeleteOperationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.longrunning.Operations/DeleteOperation", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *operationsClient) CancelOperation(ctx context.Context, in *CancelOperationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.longrunning.Operations/CancelOperation", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *operationsClient) WaitOperation(ctx context.Context, in *WaitOperationRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := c.cc.Invoke(ctx, "/google.longrunning.Operations/WaitOperation", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// OperationsServer is the server API for Operations service. +type OperationsServer interface { + // Lists operations that match the specified filter in the request. If the + // server doesn't support this method, it returns `UNIMPLEMENTED`. + // + // NOTE: the `name` binding allows API services to override the binding + // to use different resource name schemes, such as `users/*/operations`. To + // override the binding, API services can add a binding such as + // `"/v1/{name=users/*}/operations"` to their service configuration. + // For backwards compatibility, the default name includes the operations + // collection id, however overriding users must ensure the name binding + // is the parent resource, without the operations collection id. + ListOperations(context.Context, *ListOperationsRequest) (*ListOperationsResponse, error) + // Gets the latest state of a long-running operation. Clients can use this + // method to poll the operation result at intervals as recommended by the API + // service. + GetOperation(context.Context, *GetOperationRequest) (*Operation, error) + // Deletes a long-running operation. This method indicates that the client is + // no longer interested in the operation result. It does not cancel the + // operation. If the server doesn't support this method, it returns + // `google.rpc.Code.UNIMPLEMENTED`. + DeleteOperation(context.Context, *DeleteOperationRequest) (*emptypb.Empty, error) + // Starts asynchronous cancellation on a long-running operation. The server + // makes a best effort to cancel the operation, but success is not + // guaranteed. If the server doesn't support this method, it returns + // `google.rpc.Code.UNIMPLEMENTED`. Clients can use + // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or + // other methods to check whether the cancellation succeeded or whether the + // operation completed despite cancellation. On successful cancellation, + // the operation is not deleted; instead, it becomes an operation with + // an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, + // corresponding to `Code.CANCELLED`. + CancelOperation(context.Context, *CancelOperationRequest) (*emptypb.Empty, error) + // Waits until the specified long-running operation is done or reaches at most + // a specified timeout, returning the latest state. If the operation is + // already done, the latest state is immediately returned. If the timeout + // specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + // timeout is used. If the server does not support this method, it returns + // `google.rpc.Code.UNIMPLEMENTED`. + // Note that this method is on a best-effort basis. It may return the latest + // state before the specified timeout (including immediately), meaning even an + // immediate response is no guarantee that the operation is done. + WaitOperation(context.Context, *WaitOperationRequest) (*Operation, error) +} + +// UnimplementedOperationsServer can be embedded to have forward compatible implementations. +type UnimplementedOperationsServer struct { +} + +func (*UnimplementedOperationsServer) ListOperations(context.Context, *ListOperationsRequest) (*ListOperationsResponse, error) { + return nil, status1.Errorf(codes.Unimplemented, "method ListOperations not implemented") +} +func (*UnimplementedOperationsServer) GetOperation(context.Context, *GetOperationRequest) (*Operation, error) { + return nil, status1.Errorf(codes.Unimplemented, "method GetOperation not implemented") +} +func (*UnimplementedOperationsServer) DeleteOperation(context.Context, *DeleteOperationRequest) (*emptypb.Empty, error) { + return nil, status1.Errorf(codes.Unimplemented, "method DeleteOperation not implemented") +} +func (*UnimplementedOperationsServer) CancelOperation(context.Context, *CancelOperationRequest) (*emptypb.Empty, error) { + return nil, status1.Errorf(codes.Unimplemented, "method CancelOperation not implemented") +} +func (*UnimplementedOperationsServer) WaitOperation(context.Context, *WaitOperationRequest) (*Operation, error) { + return nil, status1.Errorf(codes.Unimplemented, "method WaitOperation not implemented") +} + +func RegisterOperationsServer(s *grpc.Server, srv OperationsServer) { + s.RegisterService(&_Operations_serviceDesc, srv) +} + +func _Operations_ListOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListOperationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OperationsServer).ListOperations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.longrunning.Operations/ListOperations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OperationsServer).ListOperations(ctx, req.(*ListOperationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Operations_GetOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OperationsServer).GetOperation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.longrunning.Operations/GetOperation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OperationsServer).GetOperation(ctx, req.(*GetOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Operations_DeleteOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OperationsServer).DeleteOperation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.longrunning.Operations/DeleteOperation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OperationsServer).DeleteOperation(ctx, req.(*DeleteOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Operations_CancelOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CancelOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OperationsServer).CancelOperation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.longrunning.Operations/CancelOperation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OperationsServer).CancelOperation(ctx, req.(*CancelOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Operations_WaitOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WaitOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OperationsServer).WaitOperation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.longrunning.Operations/WaitOperation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OperationsServer).WaitOperation(ctx, req.(*WaitOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Operations_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.longrunning.Operations", + HandlerType: (*OperationsServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListOperations", + Handler: _Operations_ListOperations_Handler, + }, + { + MethodName: "GetOperation", + Handler: _Operations_GetOperation_Handler, + }, + { + MethodName: "DeleteOperation", + Handler: _Operations_DeleteOperation_Handler, + }, + { + MethodName: "CancelOperation", + Handler: _Operations_CancelOperation_Handler, + }, + { + MethodName: "WaitOperation", + Handler: _Operations_WaitOperation_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/longrunning/operations.proto", +} diff --git a/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go b/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go new file mode 100644 index 000000000..3be65a155 --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go @@ -0,0 +1,875 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package longrunning + +import ( + "bytes" + "context" + "fmt" + "io" + "math" + "net/http" + "net/url" + "time" + + longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/googleapi" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + httptransport "google.golang.org/api/transport/http" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" +) + +var newOperationsClientHook clientHook + +// OperationsCallOptions contains the retry settings for each method of OperationsClient. +type OperationsCallOptions struct { + ListOperations []gax.CallOption + GetOperation []gax.CallOption + DeleteOperation []gax.CallOption + CancelOperation []gax.CallOption + WaitOperation []gax.CallOption +} + +func defaultOperationsGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("longrunning.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("longrunning.UNIVERSE_DOMAIN:443"), + internaloption.WithDefaultMTLSEndpoint("longrunning.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://longrunning.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultOperationsCallOptions() *OperationsCallOptions { + return &OperationsCallOptions{ + ListOperations: []gax.CallOption{ + gax.WithTimeout(10000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 500 * time.Millisecond, + Max: 10000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + GetOperation: []gax.CallOption{ + gax.WithTimeout(10000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 500 * time.Millisecond, + Max: 10000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + DeleteOperation: []gax.CallOption{ + gax.WithTimeout(10000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 500 * time.Millisecond, + Max: 10000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + CancelOperation: []gax.CallOption{ + gax.WithTimeout(10000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 500 * time.Millisecond, + Max: 10000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + WaitOperation: []gax.CallOption{}, + } +} + +func defaultOperationsRESTCallOptions() *OperationsCallOptions { + return &OperationsCallOptions{ + ListOperations: []gax.CallOption{ + gax.WithTimeout(10000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 500 * time.Millisecond, + Max: 10000 * time.Millisecond, + Multiplier: 2.00, + }, + http.StatusServiceUnavailable) + }), + }, + GetOperation: []gax.CallOption{ + gax.WithTimeout(10000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 500 * time.Millisecond, + Max: 10000 * time.Millisecond, + Multiplier: 2.00, + }, + http.StatusServiceUnavailable) + }), + }, + DeleteOperation: []gax.CallOption{ + gax.WithTimeout(10000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 500 * time.Millisecond, + Max: 10000 * time.Millisecond, + Multiplier: 2.00, + }, + http.StatusServiceUnavailable) + }), + }, + CancelOperation: []gax.CallOption{ + gax.WithTimeout(10000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 500 * time.Millisecond, + Max: 10000 * time.Millisecond, + Multiplier: 2.00, + }, + http.StatusServiceUnavailable) + }), + }, + WaitOperation: []gax.CallOption{}, + } +} + +// internalOperationsClient is an interface that defines the methods available from Long Running Operations API. +type internalOperationsClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + ListOperations(context.Context, *longrunningpb.ListOperationsRequest, ...gax.CallOption) *OperationIterator + GetOperation(context.Context, *longrunningpb.GetOperationRequest, ...gax.CallOption) (*longrunningpb.Operation, error) + DeleteOperation(context.Context, *longrunningpb.DeleteOperationRequest, ...gax.CallOption) error + CancelOperation(context.Context, *longrunningpb.CancelOperationRequest, ...gax.CallOption) error + WaitOperation(context.Context, *longrunningpb.WaitOperationRequest, ...gax.CallOption) (*longrunningpb.Operation, error) +} + +// OperationsClient is a client for interacting with Long Running Operations API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// Manages long-running operations with an API service. +// +// When an API method normally takes long time to complete, it can be designed +// to return Operation to the client, and the client can use this +// interface to receive the real response asynchronously by polling the +// operation resource, or pass the operation resource to another API (such as +// Google Cloud Pub/Sub API) to receive the response. Any API service that +// returns long-running operations should implement the Operations interface +// so developers can have a consistent client experience. +type OperationsClient struct { + // The internal transport-dependent client. + internalClient internalOperationsClient + + // The call options for this service. + CallOptions *OperationsCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *OperationsClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *OperationsClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *OperationsClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// ListOperations lists operations that match the specified filter in the request. If the +// server doesn’t support this method, it returns UNIMPLEMENTED. +// +// NOTE: the name binding allows API services to override the binding +// to use different resource name schemes, such as users/*/operations. To +// override the binding, API services can add a binding such as +// "/v1/{name=users/*}/operations" to their service configuration. +// For backwards compatibility, the default name includes the operations +// collection id, however overriding users must ensure the name binding +// is the parent resource, without the operations collection id. +func (c *OperationsClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator { + return c.internalClient.ListOperations(ctx, req, opts...) +} + +// GetOperation gets the latest state of a long-running operation. Clients can use this +// method to poll the operation result at intervals as recommended by the API +// service. +func (c *OperationsClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + return c.internalClient.GetOperation(ctx, req, opts...) +} + +// DeleteOperation deletes a long-running operation. This method indicates that the client is +// no longer interested in the operation result. It does not cancel the +// operation. If the server doesn’t support this method, it returns +// google.rpc.Code.UNIMPLEMENTED. +func (c *OperationsClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteOperation(ctx, req, opts...) +} + +// CancelOperation starts asynchronous cancellation on a long-running operation. The server +// makes a best effort to cancel the operation, but success is not +// guaranteed. If the server doesn’t support this method, it returns +// google.rpc.Code.UNIMPLEMENTED. Clients can use +// Operations.GetOperation or +// other methods to check whether the cancellation succeeded or whether the +// operation completed despite cancellation. On successful cancellation, +// the operation is not deleted; instead, it becomes an operation with +// an Operation.error value with a google.rpc.Status.code of 1, +// corresponding to Code.CANCELLED. +func (c *OperationsClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error { + return c.internalClient.CancelOperation(ctx, req, opts...) +} + +// WaitOperation waits until the specified long-running operation is done or reaches at most +// a specified timeout, returning the latest state. If the operation is +// already done, the latest state is immediately returned. If the timeout +// specified is greater than the default HTTP/RPC timeout, the HTTP/RPC +// timeout is used. If the server does not support this method, it returns +// google.rpc.Code.UNIMPLEMENTED. +// Note that this method is on a best-effort basis. It may return the latest +// state before the specified timeout (including immediately), meaning even an +// immediate response is no guarantee that the operation is done. +func (c *OperationsClient) WaitOperation(ctx context.Context, req *longrunningpb.WaitOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + return c.internalClient.WaitOperation(ctx, req, opts...) +} + +// operationsGRPCClient is a client for interacting with Long Running Operations API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type operationsGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // Points back to the CallOptions field of the containing OperationsClient + CallOptions **OperationsCallOptions + + // The gRPC API client. + operationsClient longrunningpb.OperationsClient + + // The x-goog-* metadata to be sent with each request. + xGoogHeaders []string +} + +// NewOperationsClient creates a new operations client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// Manages long-running operations with an API service. +// +// When an API method normally takes long time to complete, it can be designed +// to return Operation to the client, and the client can use this +// interface to receive the real response asynchronously by polling the +// operation resource, or pass the operation resource to another API (such as +// Google Cloud Pub/Sub API) to receive the response. Any API service that +// returns long-running operations should implement the Operations interface +// so developers can have a consistent client experience. +func NewOperationsClient(ctx context.Context, opts ...option.ClientOption) (*OperationsClient, error) { + clientOpts := defaultOperationsGRPCClientOptions() + if newOperationsClientHook != nil { + hookOpts, err := newOperationsClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := OperationsClient{CallOptions: defaultOperationsCallOptions()} + + c := &operationsGRPCClient{ + connPool: connPool, + operationsClient: longrunningpb.NewOperationsClient(connPool), + CallOptions: &client.CallOptions, + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *operationsGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *operationsGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *operationsGRPCClient) Close() error { + return c.connPool.Close() +} + +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type operationsRESTClient struct { + // The http endpoint to connect to. + endpoint string + + // The http client. + httpClient *http.Client + + // The x-goog-* headers to be sent with each request. + xGoogHeaders []string + + // Points back to the CallOptions field of the containing OperationsClient + CallOptions **OperationsCallOptions +} + +// NewOperationsRESTClient creates a new operations rest client. +// +// Manages long-running operations with an API service. +// +// When an API method normally takes long time to complete, it can be designed +// to return Operation to the client, and the client can use this +// interface to receive the real response asynchronously by polling the +// operation resource, or pass the operation resource to another API (such as +// Google Cloud Pub/Sub API) to receive the response. Any API service that +// returns long-running operations should implement the Operations interface +// so developers can have a consistent client experience. +func NewOperationsRESTClient(ctx context.Context, opts ...option.ClientOption) (*OperationsClient, error) { + clientOpts := append(defaultOperationsRESTClientOptions(), opts...) + httpClient, endpoint, err := httptransport.NewClient(ctx, clientOpts...) + if err != nil { + return nil, err + } + + callOpts := defaultOperationsRESTCallOptions() + c := &operationsRESTClient{ + endpoint: endpoint, + httpClient: httpClient, + CallOptions: &callOpts, + } + c.setGoogleClientInfo() + + return &OperationsClient{internalClient: c, CallOptions: callOpts}, nil +} + +func defaultOperationsRESTClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("https://longrunning.googleapis.com"), + internaloption.WithDefaultEndpointTemplate("https://longrunning.UNIVERSE_DOMAIN"), + internaloption.WithDefaultMTLSEndpoint("https://longrunning.mtls.googleapis.com"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://longrunning.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), + } +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *operationsRESTClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *operationsRESTClient) Close() error { + // Replace httpClient with nil to force cleanup. + c.httpClient = nil + return nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: This method always returns nil. +func (c *operationsRESTClient) Connection() *grpc.ClientConn { + return nil +} +func (c *operationsGRPCClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListOperations[0:len((*c.CallOptions).ListOperations):len((*c.CallOptions).ListOperations)], opts...) + it := &OperationIterator{} + req = proto.Clone(req).(*longrunningpb.ListOperationsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) { + resp := &longrunningpb.ListOperationsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.operationsClient.ListOperations(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetOperations(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *operationsGRPCClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.operationsClient.GetOperation(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *operationsGRPCClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).DeleteOperation[0:len((*c.CallOptions).DeleteOperation):len((*c.CallOptions).DeleteOperation)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.operationsClient.DeleteOperation(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *operationsGRPCClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error { + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CancelOperation[0:len((*c.CallOptions).CancelOperation):len((*c.CallOptions).CancelOperation)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.operationsClient.CancelOperation(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *operationsGRPCClient) WaitOperation(ctx context.Context, req *longrunningpb.WaitOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, c.xGoogHeaders...) + opts = append((*c.CallOptions).WaitOperation[0:len((*c.CallOptions).WaitOperation):len((*c.CallOptions).WaitOperation)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.operationsClient.WaitOperation(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListOperations lists operations that match the specified filter in the request. If the +// server doesn’t support this method, it returns UNIMPLEMENTED. +// +// NOTE: the name binding allows API services to override the binding +// to use different resource name schemes, such as users/*/operations. To +// override the binding, API services can add a binding such as +// "/v1/{name=users/*}/operations" to their service configuration. +// For backwards compatibility, the default name includes the operations +// collection id, however overriding users must ensure the name binding +// is the parent resource, without the operations collection id. +func (c *operationsRESTClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator { + it := &OperationIterator{} + req = proto.Clone(req).(*longrunningpb.ListOperationsRequest) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) { + resp := &longrunningpb.ListOperationsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, "", err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) + + params := url.Values{} + if req.GetFilter() != "" { + params.Add("filter", fmt.Sprintf("%v", req.GetFilter())) + } + if req.GetPageSize() != 0 { + params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize())) + } + if req.GetPageToken() != "" { + params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken())) + } + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := append(c.xGoogHeaders, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, "", e + } + it.Response = resp + return resp.GetOperations(), resp.GetNextPageToken(), nil + } + + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +// GetOperation gets the latest state of a long-running operation. Clients can use this +// method to poll the operation result at intervals as recommended by the API +// service. +func (c *operationsRESTClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &longrunningpb.Operation{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// DeleteOperation deletes a long-running operation. This method indicates that the client is +// no longer interested in the operation result. It does not cancel the +// operation. If the server doesn’t support this method, it returns +// google.rpc.Code.UNIMPLEMENTED. +func (c *operationsRESTClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("DELETE", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + // Returns nil if there is no error, otherwise wraps + // the response code and body into a non-nil error + return googleapi.CheckResponse(httpRsp) + }, opts...) +} + +// CancelOperation starts asynchronous cancellation on a long-running operation. The server +// makes a best effort to cancel the operation, but success is not +// guaranteed. If the server doesn’t support this method, it returns +// google.rpc.Code.UNIMPLEMENTED. Clients can use +// Operations.GetOperation or +// other methods to check whether the cancellation succeeded or whether the +// operation completed despite cancellation. On successful cancellation, +// the operation is not deleted; instead, it becomes an operation with +// an Operation.error value with a google.rpc.Status.code of 1, +// corresponding to Code.CANCELLED. +func (c *operationsRESTClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + jsonReq, err := m.Marshal(req) + if err != nil { + return err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return err + } + baseUrl.Path += fmt.Sprintf("/v1/%v:cancel", req.GetName()) + + // Build HTTP headers from client and context metadata. + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} + + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + // Returns nil if there is no error, otherwise wraps + // the response code and body into a non-nil error + return googleapi.CheckResponse(httpRsp) + }, opts...) +} + +// WaitOperation waits until the specified long-running operation is done or reaches at most +// a specified timeout, returning the latest state. If the operation is +// already done, the latest state is immediately returned. If the timeout +// specified is greater than the default HTTP/RPC timeout, the HTTP/RPC +// timeout is used. If the server does not support this method, it returns +// google.rpc.Code.UNIMPLEMENTED. +// Note that this method is on a best-effort basis. It may return the latest +// state before the specified timeout (including immediately), meaning even an +// immediate response is no guarantee that the operation is done. +func (c *operationsRESTClient) WaitOperation(ctx context.Context, req *longrunningpb.WaitOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("") + + params := url.Values{} + if req.GetName() != "" { + params.Add("name", fmt.Sprintf("%v", req.GetName())) + } + if req.GetTimeout() != nil { + field, err := protojson.Marshal(req.GetTimeout()) + if err != nil { + return nil, err + } + params.Add("timeout", string(field[1:len(field)-1])) + } + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + hds := append(c.xGoogHeaders, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) + opts = append((*c.CallOptions).WaitOperation[0:len((*c.CallOptions).WaitOperation):len((*c.CallOptions).WaitOperation)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &longrunningpb.Operation{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return err + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/longrunning/longrunning.go b/vendor/cloud.google.com/go/longrunning/longrunning.go new file mode 100644 index 000000000..3c75b761e --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/longrunning.go @@ -0,0 +1,182 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package longrunning supports Long Running Operations for the Google Cloud Libraries. +// See google.golang.org/genproto/googleapis/longrunning for its service definition. +// +// Users of the Google Cloud Libraries will typically not use this package directly. +// Instead they will call functions returning Operations and call their methods. +// +// This package is still experimental and subject to change. +package longrunning // import "cloud.google.com/go/longrunning" + +import ( + "context" + "errors" + "fmt" + "time" + + autogen "cloud.google.com/go/longrunning/autogen" + pb "cloud.google.com/go/longrunning/autogen/longrunningpb" + gax "github.com/googleapis/gax-go/v2" + "github.com/googleapis/gax-go/v2/apierror" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/protoadapt" + "google.golang.org/protobuf/types/known/anypb" +) + +// ErrNoMetadata is the error returned by Metadata if the operation contains no metadata. +var ErrNoMetadata = errors.New("operation contains no metadata") + +// Operation represents the result of an API call that may not be ready yet. +type Operation struct { + c operationsClient + proto *pb.Operation +} + +type operationsClient interface { + GetOperation(context.Context, *pb.GetOperationRequest, ...gax.CallOption) (*pb.Operation, error) + CancelOperation(context.Context, *pb.CancelOperationRequest, ...gax.CallOption) error + DeleteOperation(context.Context, *pb.DeleteOperationRequest, ...gax.CallOption) error +} + +// InternalNewOperation is for use by the google Cloud Libraries only. +// +// InternalNewOperation returns an long-running operation, abstracting the raw pb.Operation. +// The conn parameter refers to a server that proto was received from. +func InternalNewOperation(inner *autogen.OperationsClient, proto *pb.Operation) *Operation { + return &Operation{ + c: inner, + proto: proto, + } +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service +// from which the operation is created. +func (op *Operation) Name() string { + return op.proto.Name +} + +// Done reports whether the long-running operation has completed. +func (op *Operation) Done() bool { + return op.proto.Done +} + +// Metadata unmarshals op's metadata into meta. +// If op does not contain any metadata, Metadata returns ErrNoMetadata and meta is unmodified. +func (op *Operation) Metadata(meta protoadapt.MessageV1) error { + if m := op.proto.Metadata; m != nil { + metav2 := protoadapt.MessageV2Of(meta) + return anypb.UnmarshalTo(m, metav2, proto.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}) + } + return ErrNoMetadata +} + +// Poll fetches the latest state of a long-running operation. +// +// If Poll fails, the error is returned and op is unmodified. +// If Poll succeeds and the operation has completed with failure, +// the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true; if resp != nil, the response of the operation +// is stored in resp. +func (op *Operation) Poll(ctx context.Context, resp protoadapt.MessageV1, opts ...gax.CallOption) error { + if !op.Done() { + p, err := op.c.GetOperation(ctx, &pb.GetOperationRequest{Name: op.Name()}, opts...) + if err != nil { + return err + } + op.proto = p + } + if !op.Done() { + return nil + } + + switch r := op.proto.Result.(type) { + case *pb.Operation_Error: + err, _ := apierror.FromError(status.ErrorProto(r.Error)) + return err + case *pb.Operation_Response: + if resp == nil { + return nil + } + respv2 := protoadapt.MessageV2Of(resp) + return anypb.UnmarshalTo(r.Response, respv2, proto.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}) + default: + return fmt.Errorf("unsupported result type %[1]T: %[1]v", r) + } +} + +// DefaultWaitInterval is the polling interval used by Operation.Wait. +const DefaultWaitInterval = 60 * time.Second + +// Wait is equivalent to WaitWithInterval using DefaultWaitInterval. +func (op *Operation) Wait(ctx context.Context, resp protoadapt.MessageV1, opts ...gax.CallOption) error { + return op.WaitWithInterval(ctx, resp, DefaultWaitInterval, opts...) +} + +// WaitWithInterval blocks until the operation is completed. +// If resp != nil, Wait stores the response in resp. +// WaitWithInterval polls every interval, except initially +// when it polls using exponential backoff. +// +// See documentation of Poll for error-handling information. +func (op *Operation) WaitWithInterval(ctx context.Context, resp protoadapt.MessageV1, interval time.Duration, opts ...gax.CallOption) error { + bo := gax.Backoff{ + Initial: 1 * time.Second, + Max: interval, + } + if bo.Max < bo.Initial { + bo.Max = bo.Initial + } + return op.wait(ctx, resp, &bo, gax.Sleep, opts...) +} + +type sleeper func(context.Context, time.Duration) error + +// wait implements Wait, taking exponentialBackoff and sleeper arguments for testing. +func (op *Operation) wait(ctx context.Context, resp protoadapt.MessageV1, bo *gax.Backoff, sl sleeper, opts ...gax.CallOption) error { + for { + if err := op.Poll(ctx, resp, opts...); err != nil { + return err + } + if op.Done() { + return nil + } + if err := sl(ctx, bo.Pause()); err != nil { + return err + } + } +} + +// Cancel starts asynchronous cancellation on a long-running operation. The server +// makes a best effort to cancel the operation, but success is not +// guaranteed. If the server doesn't support this method, it returns +// status.Code(err) == codes.Unimplemented. Clients can use +// Poll or other methods to check whether the cancellation succeeded or whether the +// operation completed despite cancellation. On successful cancellation, +// the operation is not deleted; instead, op.Poll returns an error +// with code Canceled. +func (op *Operation) Cancel(ctx context.Context, opts ...gax.CallOption) error { + return op.c.CancelOperation(ctx, &pb.CancelOperationRequest{Name: op.Name()}, opts...) +} + +// Delete deletes a long-running operation. This method indicates that the client is +// no longer interested in the operation result. It does not cancel the +// operation. If the server doesn't support this method, status.Code(err) == codes.Unimplemented. +func (op *Operation) Delete(ctx context.Context, opts ...gax.CallOption) error { + return op.c.DeleteOperation(ctx, &pb.DeleteOperationRequest{Name: op.Name()}, opts...) +} diff --git a/vendor/cloud.google.com/go/longrunning/tidyfix.go b/vendor/cloud.google.com/go/longrunning/tidyfix.go new file mode 100644 index 000000000..d9a07f99e --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/tidyfix.go @@ -0,0 +1,23 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the {{.RootMod}} import, won't actually become part of +// the resultant binary. +//go:build modhack +// +build modhack + +package longrunning + +// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "cloud.google.com/go" diff --git a/vendor/cloud.google.com/go/migration.md b/vendor/cloud.google.com/go/migration.md new file mode 100644 index 000000000..224dcfa13 --- /dev/null +++ b/vendor/cloud.google.com/go/migration.md @@ -0,0 +1,50 @@ +# go-genproto to google-cloud-go message type migration + +The message types for all of our client libraries are being migrated from the +`google.golang.org/genproto` [module](https://pkg.go.dev/google.golang.org/genproto) +to their respective product specific module in this repository. For example +this asset request type that was once found in [genproto](https://pkg.go.dev/google.golang.org/genproto@v0.0.0-20220908141613-51c1cc9bc6d0/googleapis/cloud/asset/v1p5beta1#ListAssetsRequest) +can now be found in directly in the [asset module](https://pkg.go.dev/cloud.google.com/go/asset/apiv1p5beta1/assetpb#ListAssetsRequest). + +Although the type definitions have moved, aliases have been left in the old +genproto packages to ensure a smooth non-breaking transition. + +## How do I migrate to the new packages? + +The easiest option is to run a migration tool at the root of our project. It is +like `go fix`, but specifically for this migration. Before running the tool it +is best to make sure any modules that have the prefix of `cloud.google.com/go` +are up to date. To run the tool, do the following: + +```bash +go run cloud.google.com/go/internal/aliasfix/cmd/aliasfix@latest . +go mod tidy +``` + +The tool should only change up to one line in the import statement per file. +This can also be done by hand if you prefer. + +## Do I have to migrate? + +Yes if you wish to keep using the newest versions of our client libraries with +the newest features -- You should migrate by the start of 2023. Until then we +will keep updating the aliases in go-genproto weekly. If you have an existing +workload that uses these client libraries and does not need to update its +dependencies there is no action to take. All existing written code will continue +to work. + +## Why are these types being moved + +1. This change will help simplify dependency trees over time. +2. The types will now be in product specific modules that are versioned + independently with semver. This is especially a benefit for users that rely + on multiple clients in a single application. Because message types are no + longer mono-packaged users are less likely to run into intermediate + dependency conflicts when updating dependencies. +3. Having all these types in one repository will help us ensure that unintended + changes are caught before they would be released. + +## Have questions? + +Please reach out to us on our [issue tracker](https://github.com/googleapis/google-cloud-go/issues/new?assignees=&labels=genproto-migration&template=migration-issue.md&title=package%3A+migration+help) +if you have any questions or concerns. diff --git a/vendor/cloud.google.com/go/release-please-config-individual.json b/vendor/cloud.google.com/go/release-please-config-individual.json new file mode 100644 index 000000000..3dacbc5e6 --- /dev/null +++ b/vendor/cloud.google.com/go/release-please-config-individual.json @@ -0,0 +1,60 @@ +{ + "$schema": "https://raw.githubusercontent.com/googleapis/release-please/main/schemas/config.json", + "release-type": "go-yoshi", + "include-component-in-tag": true, + "separate-pull-requests": true, + "tag-separator": "/", + "packages": { + "ai": { + "component": "ai" + }, + "aiplatform": { + "component": "aiplatform" + }, + "auth": { + "component": "auth" + }, + "auth/oauth2adapt": { + "component": "auth/oauth2adapt" + }, + "bigquery": { + "component": "bigquery" + }, + "bigtable": { + "component": "bigtable" + }, + "datastore": { + "component": "datastore" + }, + "errorreporting": { + "component": "errorreporting" + }, + "firestore": { + "component": "firestore" + }, + "logging": { + "component": "logging" + }, + "profiler": { + "component": "profiler" + }, + "pubsub": { + "component": "pubsub" + }, + "pubsublite": { + "component": "pubsublite" + }, + "spanner": { + "component": "spanner" + }, + "storage": { + "component": "storage" + }, + "vertexai": { + "component": "vertexai" + } + }, + "plugins": [ + "sentence-case" + ] +} diff --git a/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json new file mode 100644 index 000000000..1e924a834 --- /dev/null +++ b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json @@ -0,0 +1,451 @@ +{ + "release-type": "go-yoshi", + "include-component-in-tag": true, + "tag-separator": "/", + "packages": { + "accessapproval": { + "component": "accessapproval" + }, + "accesscontextmanager": { + "component": "accesscontextmanager" + }, + "advisorynotifications": { + "component": "advisorynotifications" + }, + "alloydb": { + "component": "alloydb" + }, + "analytics": { + "component": "analytics" + }, + "apigateway": { + "component": "apigateway" + }, + "apigeeconnect": { + "component": "apigeeconnect" + }, + "apigeeregistry": { + "component": "apigeeregistry" + }, + "apikeys": { + "component": "apikeys" + }, + "appengine": { + "component": "appengine" + }, + "apphub": { + "component": "apphub" + }, + "apps": { + "component": "apps" + }, + "area120": { + "component": "area120" + }, + "artifactregistry": { + "component": "artifactregistry" + }, + "asset": { + "component": "asset" + }, + "assuredworkloads": { + "component": "assuredworkloads" + }, + "automl": { + "component": "automl" + }, + "backupdr": { + "component": "backupdr" + }, + "baremetalsolution": { + "component": "baremetalsolution" + }, + "batch": { + "component": "batch" + }, + "beyondcorp": { + "component": "beyondcorp" + }, + "billing": { + "component": "billing" + }, + "binaryauthorization": { + "component": "binaryauthorization" + }, + "certificatemanager": { + "component": "certificatemanager" + }, + "channel": { + "component": "channel" + }, + "chat": { + "component": "chat" + }, + "cloudbuild": { + "component": "cloudbuild" + }, + "cloudcontrolspartner": { + "component": "cloudcontrolspartner" + }, + "clouddms": { + "component": "clouddms" + }, + "cloudprofiler": { + "component": "cloudprofiler" + }, + "cloudquotas": { + "component": "cloudquotas" + }, + "cloudtasks": { + "component": "cloudtasks" + }, + "commerce": { + "component": "commerce" + }, + "compute": { + "component": "compute" + }, + "compute/metadata": { + "component": "compute/metadata" + }, + "confidentialcomputing": { + "component": "confidentialcomputing" + }, + "config": { + "component": "config" + }, + "contactcenterinsights": { + "component": "contactcenterinsights" + }, + "container": { + "component": "container" + }, + "containeranalysis": { + "component": "containeranalysis" + }, + "datacatalog": { + "component": "datacatalog" + }, + "dataflow": { + "component": "dataflow" + }, + "dataform": { + "component": "dataform" + }, + "datafusion": { + "component": "datafusion" + }, + "datalabeling": { + "component": "datalabeling" + }, + "dataplex": { + "component": "dataplex" + }, + "dataproc": { + "component": "dataproc" + }, + "dataqna": { + "component": "dataqna" + }, + "datastream": { + "component": "datastream" + }, + "deploy": { + "component": "deploy" + }, + "developerconnect": { + "component": "developerconnect" + }, + "dialogflow": { + "component": "dialogflow" + }, + "discoveryengine": { + "component": "discoveryengine" + }, + "dlp": { + "component": "dlp" + }, + "documentai": { + "component": "documentai" + }, + "domains": { + "component": "domains" + }, + "edgecontainer": { + "component": "edgecontainer" + }, + "edgenetwork": { + "component": "edgenetwork" + }, + "essentialcontacts": { + "component": "essentialcontacts" + }, + "eventarc": { + "component": "eventarc" + }, + "filestore": { + "component": "filestore" + }, + "functions": { + "component": "functions" + }, + "gkebackup": { + "component": "gkebackup" + }, + "gkeconnect": { + "component": "gkeconnect" + }, + "gkehub": { + "component": "gkehub" + }, + "gkemulticloud": { + "component": "gkemulticloud" + }, + "grafeas": { + "component": "grafeas" + }, + "gsuiteaddons": { + "component": "gsuiteaddons" + }, + "iam": { + "component": "iam" + }, + "iap": { + "component": "iap" + }, + "identitytoolkit": { + "component": "identitytoolkit" + }, + "ids": { + "component": "ids" + }, + "iot": { + "component": "iot" + }, + "kms": { + "component": "kms" + }, + "language": { + "component": "language" + }, + "lifesciences": { + "component": "lifesciences" + }, + "longrunning": { + "component": "longrunning" + }, + "managedidentities": { + "component": "managedidentities" + }, + "managedkafka": { + "component": "managedkafka" + }, + "maps": { + "component": "maps" + }, + "mediatranslation": { + "component": "mediatranslation" + }, + "memcache": { + "component": "memcache" + }, + "metastore": { + "component": "metastore" + }, + "migrationcenter": { + "component": "migrationcenter" + }, + "monitoring": { + "component": "monitoring" + }, + "netapp": { + "component": "netapp" + }, + "networkconnectivity": { + "component": "networkconnectivity" + }, + "networkmanagement": { + "component": "networkmanagement" + }, + "networksecurity": { + "component": "networksecurity" + }, + "networkservices": { + "component": "networkservices" + }, + "notebooks": { + "component": "notebooks" + }, + "optimization": { + "component": "optimization" + }, + "orchestration": { + "component": "orchestration" + }, + "orgpolicy": { + "component": "orgpolicy" + }, + "osconfig": { + "component": "osconfig" + }, + "oslogin": { + "component": "oslogin" + }, + "parallelstore": { + "component": "parallelstore" + }, + "phishingprotection": { + "component": "phishingprotection" + }, + "policysimulator": { + "component": "policysimulator" + }, + "policytroubleshooter": { + "component": "policytroubleshooter" + }, + "privatecatalog": { + "component": "privatecatalog" + }, + "privilegedaccessmanager": { + "component": "privilegedaccessmanager" + }, + "rapidmigrationassessment": { + "component": "rapidmigrationassessment" + }, + "recaptchaenterprise": { + "component": "recaptchaenterprise" + }, + "recommendationengine": { + "component": "recommendationengine" + }, + "recommender": { + "component": "recommender" + }, + "redis": { + "component": "redis" + }, + "resourcemanager": { + "component": "resourcemanager" + }, + "resourcesettings": { + "component": "resourcesettings" + }, + "retail": { + "component": "retail" + }, + "run": { + "component": "run" + }, + "scheduler": { + "component": "scheduler" + }, + "secretmanager": { + "component": "secretmanager" + }, + "securesourcemanager": { + "component": "securesourcemanager" + }, + "security": { + "component": "security" + }, + "securitycenter": { + "component": "securitycenter" + }, + "securitycentermanagement": { + "component": "securitycentermanagement" + }, + "securityposture": { + "component": "securityposture" + }, + "servicecontrol": { + "component": "servicecontrol" + }, + "servicedirectory": { + "component": "servicedirectory" + }, + "servicehealth": { + "component": "servicehealth" + }, + "servicemanagement": { + "component": "servicemanagement" + }, + "serviceusage": { + "component": "serviceusage" + }, + "shell": { + "component": "shell" + }, + "shopping": { + "component": "shopping" + }, + "speech": { + "component": "speech" + }, + "storageinsights": { + "component": "storageinsights" + }, + "storagetransfer": { + "component": "storagetransfer" + }, + "streetview": { + "component": "streetview" + }, + "support": { + "component": "support" + }, + "talent": { + "component": "talent" + }, + "telcoautomation": { + "component": "telcoautomation" + }, + "texttospeech": { + "component": "texttospeech" + }, + "tpu": { + "component": "tpu" + }, + "trace": { + "component": "trace" + }, + "translate": { + "component": "translate" + }, + "video": { + "component": "video" + }, + "videointelligence": { + "component": "videointelligence" + }, + "vision": { + "component": "vision" + }, + "visionai": { + "component": "visionai" + }, + "vmmigration": { + "component": "vmmigration" + }, + "vmwareengine": { + "component": "vmwareengine" + }, + "vpcaccess": { + "component": "vpcaccess" + }, + "webrisk": { + "component": "webrisk" + }, + "websecurityscanner": { + "component": "websecurityscanner" + }, + "workflows": { + "component": "workflows" + }, + "workstations": { + "component": "workstations" + } + }, + "plugins": [ + "sentence-case" + ] +} diff --git a/vendor/cloud.google.com/go/release-please-config.json b/vendor/cloud.google.com/go/release-please-config.json new file mode 100644 index 000000000..1400245b8 --- /dev/null +++ b/vendor/cloud.google.com/go/release-please-config.json @@ -0,0 +1,11 @@ +{ + "release-type": "go-yoshi", + "separate-pull-requests": true, + "include-component-in-tag": false, + "packages": { + ".": { + "component": "main" + } + }, + "plugins": ["sentence-case"] +} diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md new file mode 100644 index 000000000..e9fb55585 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/CHANGES.md @@ -0,0 +1,602 @@ +# Changes + + +## [1.43.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.42.0...storage/v1.43.0) (2024-07-03) + + +### Features + +* **storage/transfermanager:** Add DownloadDirectory ([#10430](https://github.com/googleapis/google-cloud-go/issues/10430)) ([0d0e5dd](https://github.com/googleapis/google-cloud-go/commit/0d0e5dd5214769cc2c197991c2ece1303bd600de)) +* **storage/transfermanager:** Automatically shard downloads ([#10379](https://github.com/googleapis/google-cloud-go/issues/10379)) ([05816f9](https://github.com/googleapis/google-cloud-go/commit/05816f9fafd3132c371da37f3a879bb9e8e7e604)) + + +### Bug Fixes + +* **storage/transfermanager:** WaitAndClose waits for Callbacks to finish ([#10504](https://github.com/googleapis/google-cloud-go/issues/10504)) ([0e81002](https://github.com/googleapis/google-cloud-go/commit/0e81002b3a5e560c874d814d28a35a102311d9ef)), refs [#10502](https://github.com/googleapis/google-cloud-go/issues/10502) +* **storage:** Allow empty soft delete on Create ([#10394](https://github.com/googleapis/google-cloud-go/issues/10394)) ([d8bd2c1](https://github.com/googleapis/google-cloud-go/commit/d8bd2c1ffc4f27503a74ded438d8bfbdd7707c63)), refs [#10380](https://github.com/googleapis/google-cloud-go/issues/10380) +* **storage:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) +* **storage:** Retry broken pipe error ([#10374](https://github.com/googleapis/google-cloud-go/issues/10374)) ([2f4daa1](https://github.com/googleapis/google-cloud-go/commit/2f4daa11acf9d3f260fa888333090359c4d9198e)), refs [#9178](https://github.com/googleapis/google-cloud-go/issues/9178) + + +### Documentation + +* **storage/control:** Remove allowlist note from Folders RPCs ([d6c543c](https://github.com/googleapis/google-cloud-go/commit/d6c543c3969016c63e158a862fc173dff60fb8d9)) + +## [1.42.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.41.0...storage/v1.42.0) (2024-06-10) + + +### Features + +* **storage:** Add new package transfermanager. This package is intended for parallel uploads and downloads, and is in preview. It is not stable, and is likely to change. ([#10045](https://github.com/googleapis/google-cloud-go/issues/10045)) ([cde5cbb](https://github.com/googleapis/google-cloud-go/commit/cde5cbba3145d5a702683656a42158621234fe71)) +* **storage:** Add bucket HierarchicalNamespace ([#10315](https://github.com/googleapis/google-cloud-go/issues/10315)) ([b92406c](https://github.com/googleapis/google-cloud-go/commit/b92406ccfadfdcee379e86d6f78c901d772401a9)), refs [#10146](https://github.com/googleapis/google-cloud-go/issues/10146) +* **storage:** Add BucketName to BucketHandle ([#10127](https://github.com/googleapis/google-cloud-go/issues/10127)) ([203cc59](https://github.com/googleapis/google-cloud-go/commit/203cc599e5e2f2f821dc75b47c5a4c9073333f05)) + + +### Bug Fixes + +* **storage:** Set invocation headers on xml reads ([#10250](https://github.com/googleapis/google-cloud-go/issues/10250)) ([c87e1ab](https://github.com/googleapis/google-cloud-go/commit/c87e1ab6f9618b8b3f4d0005ac159abd87b0daaf)) + + +### Documentation + +* **storage:** Update autoclass doc ([#10135](https://github.com/googleapis/google-cloud-go/issues/10135)) ([e4b2737](https://github.com/googleapis/google-cloud-go/commit/e4b2737ddc16d3bf8139a6def7326ac905f62acd)) + +## [1.41.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.40.0...storage/v1.41.0) (2024-05-13) + + +### Features + +* **storage/control:** Make Managed Folders operations public ([264a6dc](https://github.com/googleapis/google-cloud-go/commit/264a6dcddbffaec987dce1dc00f6550c263d2df7)) +* **storage:** Support for soft delete policies and restore ([#9520](https://github.com/googleapis/google-cloud-go/issues/9520)) ([985deb2](https://github.com/googleapis/google-cloud-go/commit/985deb2bdd1c79944cdd960bd3fbfa38cbfa1c91)) + + +### Bug Fixes + +* **storage/control:** An existing resource pattern value `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder=**}` to resource definition `storage.googleapis.com/ManagedFolder` is removed ([3e25053](https://github.com/googleapis/google-cloud-go/commit/3e250530567ee81ed4f51a3856c5940dbec35289)) +* **storage:** Add internaloption.WithDefaultEndpointTemplate ([3b41408](https://github.com/googleapis/google-cloud-go/commit/3b414084450a5764a0248756e95e13383a645f90)) +* **storage:** Bump x/net to v0.24.0 ([ba31ed5](https://github.com/googleapis/google-cloud-go/commit/ba31ed5fda2c9664f2e1cf972469295e63deb5b4)) +* **storage:** Disable gax retries for gRPC ([#9747](https://github.com/googleapis/google-cloud-go/issues/9747)) ([bbfc0ac](https://github.com/googleapis/google-cloud-go/commit/bbfc0acc272f21bf1f558ea23648183d5a11cda5)) +* **storage:** More strongly match regex ([#9706](https://github.com/googleapis/google-cloud-go/issues/9706)) ([3cfc8eb](https://github.com/googleapis/google-cloud-go/commit/3cfc8eb418e064d734bf3d8708162062dbbe988f)), refs [#9705](https://github.com/googleapis/google-cloud-go/issues/9705) +* **storage:** Retry net.OpError on connection reset ([#10154](https://github.com/googleapis/google-cloud-go/issues/10154)) ([54fab10](https://github.com/googleapis/google-cloud-go/commit/54fab107f98b4f79c9df2959a05b981be0a613c1)), refs [#9478](https://github.com/googleapis/google-cloud-go/issues/9478) +* **storage:** Wrap error when MaxAttempts is hit ([#9767](https://github.com/googleapis/google-cloud-go/issues/9767)) ([9cb262b](https://github.com/googleapis/google-cloud-go/commit/9cb262bb65a162665bfb8bed0022615131bae1f2)), refs [#9720](https://github.com/googleapis/google-cloud-go/issues/9720) + + +### Documentation + +* **storage/control:** Update storage control documentation and add PHP for publishing ([1d757c6](https://github.com/googleapis/google-cloud-go/commit/1d757c66478963d6cbbef13fee939632c742759c)) + +## [1.40.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.39.1...storage/v1.40.0) (2024-03-29) + + +### Features + +* **storage:** Implement io.WriterTo in Reader ([#9659](https://github.com/googleapis/google-cloud-go/issues/9659)) ([8264a96](https://github.com/googleapis/google-cloud-go/commit/8264a962d1c21d52e8fca50af064c5535c3708d3)) +* **storage:** New storage control client ([#9631](https://github.com/googleapis/google-cloud-go/issues/9631)) ([1f4d279](https://github.com/googleapis/google-cloud-go/commit/1f4d27957743878976d6b4549cc02a5bb894d330)) + + +### Bug Fixes + +* **storage:** Retry errors from last recv on uploads ([#9616](https://github.com/googleapis/google-cloud-go/issues/9616)) ([b6574aa](https://github.com/googleapis/google-cloud-go/commit/b6574aa42ebad0532c2749b6ece879b932f95cb9)) +* **storage:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a)) + + +### Performance Improvements + +* **storage:** Remove protobuf's copy of data on unmarshalling ([#9526](https://github.com/googleapis/google-cloud-go/issues/9526)) ([81281c0](https://github.com/googleapis/google-cloud-go/commit/81281c04e503fd83301baf88cc352c77f5d476ca)) + +## [1.39.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.39.0...storage/v1.39.1) (2024-03-11) + + +### Bug Fixes + +* **storage:** Add object validation case and test ([#9521](https://github.com/googleapis/google-cloud-go/issues/9521)) ([386bef3](https://github.com/googleapis/google-cloud-go/commit/386bef319b4678beaa926ddfe4edef190f11b68d)) + +## [1.39.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.38.0...storage/v1.39.0) (2024-02-29) + + +### Features + +* **storage:** Make it possible to disable Content-Type sniffing ([#9431](https://github.com/googleapis/google-cloud-go/issues/9431)) ([0676670](https://github.com/googleapis/google-cloud-go/commit/067667058c06689b64401be11858d84441584039)) + +## [1.38.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.37.0...storage/v1.38.0) (2024-02-12) + + +### Features + +* **storage:** Support auto-detection of access ID for external_account creds ([#9208](https://github.com/googleapis/google-cloud-go/issues/9208)) ([b958d44](https://github.com/googleapis/google-cloud-go/commit/b958d44589f2b6b226ea3bef23829ac75a0aa6a6)) +* **storage:** Support custom hostname for VirtualHostedStyle SignedURLs ([#9348](https://github.com/googleapis/google-cloud-go/issues/9348)) ([7eec40e](https://github.com/googleapis/google-cloud-go/commit/7eec40e4cf82c53e5bf02bd2c14e0b25043da6d0)) +* **storage:** Support universe domains ([#9344](https://github.com/googleapis/google-cloud-go/issues/9344)) ([29a7498](https://github.com/googleapis/google-cloud-go/commit/29a7498b8eb0d00fdb5acd7ee8ce0e5a2a8c11ce)) + + +### Bug Fixes + +* **storage:** Fix v4 url signing for hosts that specify ports ([#9347](https://github.com/googleapis/google-cloud-go/issues/9347)) ([f127b46](https://github.com/googleapis/google-cloud-go/commit/f127b4648f861c1ba44f41a280a62652620c04c2)) + + +### Documentation + +* **storage:** Indicate that gRPC is incompatible with universe domains ([#9386](https://github.com/googleapis/google-cloud-go/issues/9386)) ([e8bd85b](https://github.com/googleapis/google-cloud-go/commit/e8bd85bbce12d5f7ab87fa49d166a6a0d84bd12d)) + +## [1.37.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.36.0...storage/v1.37.0) (2024-01-24) + + +### Features + +* **storage:** Add maxAttempts RetryOption ([#9215](https://github.com/googleapis/google-cloud-go/issues/9215)) ([e348cc5](https://github.com/googleapis/google-cloud-go/commit/e348cc5340e127b530e8ee4664fd995e6f038b2c)) +* **storage:** Support IncludeFoldersAsPrefixes ([#9211](https://github.com/googleapis/google-cloud-go/issues/9211)) ([98c9d71](https://github.com/googleapis/google-cloud-go/commit/98c9d7157306de5134547a67c084c248484c9a51)) + + +### Bug Fixes + +* **storage:** Migrate deprecated proto dep ([#9232](https://github.com/googleapis/google-cloud-go/issues/9232)) ([ebbb610](https://github.com/googleapis/google-cloud-go/commit/ebbb610e0f58035fd01ad7893971382d8bbd092f)), refs [#9189](https://github.com/googleapis/google-cloud-go/issues/9189) + +## [1.36.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.35.1...storage/v1.36.0) (2023-12-14) + + +### Features + +* **storage:** Add object retention feature ([#9072](https://github.com/googleapis/google-cloud-go/issues/9072)) ([16ecfd1](https://github.com/googleapis/google-cloud-go/commit/16ecfd150ff1982f03d207a80a82e934d1013874)) + + +### Bug Fixes + +* **storage:** Do not inhibit the dead code elimination. ([#8543](https://github.com/googleapis/google-cloud-go/issues/8543)) ([ca2493f](https://github.com/googleapis/google-cloud-go/commit/ca2493f43c299bbaed5f7e5b70f66cc763ff9802)) +* **storage:** Set flush and get_state to false on the last write in gRPC ([#9013](https://github.com/googleapis/google-cloud-go/issues/9013)) ([c1e9fe5](https://github.com/googleapis/google-cloud-go/commit/c1e9fe5f4166a71e55814ccf126926ec0e0e7945)) + +## [1.35.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.35.0...storage/v1.35.1) (2023-11-09) + + +### Bug Fixes + +* **storage:** Rename aux.go to auxiliary.go fixing windows build ([ba23673](https://github.com/googleapis/google-cloud-go/commit/ba23673da7707c31292e4aa29d65b7ac1446d4a6)) + +## [1.35.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.34.1...storage/v1.35.0) (2023-11-09) + + +### Features + +* **storage:** Change gRPC writes to use bi-directional streams ([#8930](https://github.com/googleapis/google-cloud-go/issues/8930)) ([3e23a36](https://github.com/googleapis/google-cloud-go/commit/3e23a364b1a20c4fda7aef257e4136586ec769a4)) + +## [1.34.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.34.0...storage/v1.34.1) (2023-11-01) + + +### Bug Fixes + +* **storage:** Bump google.golang.org/api to v0.149.0 ([8d2ab9f](https://github.com/googleapis/google-cloud-go/commit/8d2ab9f320a86c1c0fab90513fc05861561d0880)) + +## [1.34.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.33.0...storage/v1.34.0) (2023-10-31) + + +### Features + +* **storage/internal:** Add match_glob field to ListObjectsRequest ([#8618](https://github.com/googleapis/google-cloud-go/issues/8618)) ([e9ae601](https://github.com/googleapis/google-cloud-go/commit/e9ae6018983ae09781740e4ff939e6e365863dbb)) +* **storage/internal:** Add terminal_storage_class fields to Autoclass message ([57fc1a6](https://github.com/googleapis/google-cloud-go/commit/57fc1a6de326456eb68ef25f7a305df6636ed386)) +* **storage/internal:** Adds the RestoreObject operation ([56ce871](https://github.com/googleapis/google-cloud-go/commit/56ce87195320634b07ae0b012efcc5f2b3813fb0)) +* **storage:** Support autoclass v2.1 ([#8721](https://github.com/googleapis/google-cloud-go/issues/8721)) ([fe1e195](https://github.com/googleapis/google-cloud-go/commit/fe1e19590a252c6adc6ca6c51a69b6e561e143b8)) +* **storage:** Support MatchGlob for gRPC ([#8670](https://github.com/googleapis/google-cloud-go/issues/8670)) ([3df0287](https://github.com/googleapis/google-cloud-go/commit/3df0287f88d5e2c4526e9e6b8dc2a4ca54f88918)), refs [#7727](https://github.com/googleapis/google-cloud-go/issues/7727) + + +### Bug Fixes + +* **storage:** Drop stream reference after closing it for gRPC writes ([#8872](https://github.com/googleapis/google-cloud-go/issues/8872)) ([525abde](https://github.com/googleapis/google-cloud-go/commit/525abdee433864d4d456f1f1fff5599017b557ff)) +* **storage:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) +* **storage:** Update grpc-go to v1.56.3 ([343cea8](https://github.com/googleapis/google-cloud-go/commit/343cea8c43b1e31ae21ad50ad31d3b0b60143f8c)) +* **storage:** Update grpc-go to v1.59.0 ([81a97b0](https://github.com/googleapis/google-cloud-go/commit/81a97b06cb28b25432e4ece595c55a9857e960b7)) + +## [1.33.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.32.0...storage/v1.33.0) (2023-09-07) + + +### Features + +* **storage:** Export gRPC client constructor ([#8509](https://github.com/googleapis/google-cloud-go/issues/8509)) ([1a928ae](https://github.com/googleapis/google-cloud-go/commit/1a928ae205f2325cb5206304af4d609dc3c1447a)) + +## [1.32.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.31.0...storage/v1.32.0) (2023-08-15) + + +### Features + +* **storage:** Add support for custom headers ([#8294](https://github.com/googleapis/google-cloud-go/issues/8294)) ([313fd4a](https://github.com/googleapis/google-cloud-go/commit/313fd4a60380d36c5ecaead3e968dbc84d044a0b)) +* **storage:** Add trace span to Writer ([#8375](https://github.com/googleapis/google-cloud-go/issues/8375)) ([f7ac85b](https://github.com/googleapis/google-cloud-go/commit/f7ac85bec2806d351529714bd7744a91a9fdefdd)), refs [#6144](https://github.com/googleapis/google-cloud-go/issues/6144) +* **storage:** Support single-shot uploads in gRPC ([#8348](https://github.com/googleapis/google-cloud-go/issues/8348)) ([7de4a7d](https://github.com/googleapis/google-cloud-go/commit/7de4a7da31ab279a343b1592b15a126cda03e5e7)), refs [#7798](https://github.com/googleapis/google-cloud-go/issues/7798) +* **storage:** Trace span covers life of a Reader ([#8390](https://github.com/googleapis/google-cloud-go/issues/8390)) ([8de30d7](https://github.com/googleapis/google-cloud-go/commit/8de30d752eec2fed2ea4c127482d3e213f9050e2)) + + +### Bug Fixes + +* **storage:** Fix AllObjects condition in gRPC ([#8184](https://github.com/googleapis/google-cloud-go/issues/8184)) ([2b99e4f](https://github.com/googleapis/google-cloud-go/commit/2b99e4f39be20fe21e8bc5c1ec1c0e758222c46e)), refs [#6205](https://github.com/googleapis/google-cloud-go/issues/6205) +* **storage:** Fix gRPC generation/condition issues ([#8396](https://github.com/googleapis/google-cloud-go/issues/8396)) ([ca68ff5](https://github.com/googleapis/google-cloud-go/commit/ca68ff54b680732b59b223655070d0f6abccefee)) +* **storage:** Same method name and Trace Span name ([#8150](https://github.com/googleapis/google-cloud-go/issues/8150)) ([e277213](https://github.com/googleapis/google-cloud-go/commit/e2772133896bb94097b5d1f090f1bcafd136f2ed)) +* **storage:** Update gRPC retry codes ([#8202](https://github.com/googleapis/google-cloud-go/issues/8202)) ([afdf772](https://github.com/googleapis/google-cloud-go/commit/afdf772fc6a90b3010eee9d70ab65e22e276f53f)) + +## [1.31.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.30.1...storage/v1.31.0) (2023-06-27) + + +### Features + +* **storage/internal:** Add ctype=CORD for ChecksummedData.content ([ca94e27](https://github.com/googleapis/google-cloud-go/commit/ca94e2724f9e2610b46aefd0a3b5ddc06102e91b)) +* **storage:** Add support for MatchGlob ([#8097](https://github.com/googleapis/google-cloud-go/issues/8097)) ([9426a5a](https://github.com/googleapis/google-cloud-go/commit/9426a5a45d4c2fd07f84261f6d602680e79cdc48)), refs [#7727](https://github.com/googleapis/google-cloud-go/issues/7727) [#7728](https://github.com/googleapis/google-cloud-go/issues/7728) +* **storage:** Respect WithEndpoint for SignedURLs and PostPolicy ([#8113](https://github.com/googleapis/google-cloud-go/issues/8113)) ([f918f23](https://github.com/googleapis/google-cloud-go/commit/f918f23a3cda4fbc8d709e32b914ead8b735d664)) +* **storage:** Update all direct dependencies ([b340d03](https://github.com/googleapis/google-cloud-go/commit/b340d030f2b52a4ce48846ce63984b28583abde6)) + + +### Bug Fixes + +* **storage:** Fix CreateBucket logic for gRPC ([#8165](https://github.com/googleapis/google-cloud-go/issues/8165)) ([8424e7e](https://github.com/googleapis/google-cloud-go/commit/8424e7e145a117c91006318fa924a8b2643c1c7e)), refs [#8162](https://github.com/googleapis/google-cloud-go/issues/8162) +* **storage:** Fix reads with "./" in object names [XML] ([#8017](https://github.com/googleapis/google-cloud-go/issues/8017)) ([6b7b21f](https://github.com/googleapis/google-cloud-go/commit/6b7b21f8a334b6ad3a25e1f66ae1265b4d1f0995)) +* **storage:** Fix routing header for writes ([#8159](https://github.com/googleapis/google-cloud-go/issues/8159)) ([42a59f5](https://github.com/googleapis/google-cloud-go/commit/42a59f5a23ab9b4743ab032ad92304922c801d93)), refs [#8142](https://github.com/googleapis/google-cloud-go/issues/8142) [#8143](https://github.com/googleapis/google-cloud-go/issues/8143) [#8144](https://github.com/googleapis/google-cloud-go/issues/8144) [#8145](https://github.com/googleapis/google-cloud-go/issues/8145) [#8149](https://github.com/googleapis/google-cloud-go/issues/8149) +* **storage:** REST query UpdateMask bug ([df52820](https://github.com/googleapis/google-cloud-go/commit/df52820b0e7721954809a8aa8700b93c5662dc9b)) +* **storage:** Update grpc to v1.55.0 ([1147ce0](https://github.com/googleapis/google-cloud-go/commit/1147ce02a990276ca4f8ab7a1ab65c14da4450ef)) + + +### Documentation + +* **storage/internal:** Clarifications about behavior of DeleteObject RPC ([3f1ed9c](https://github.com/googleapis/google-cloud-go/commit/3f1ed9c63fb115f47607a3ab478842fe5ba0df11)) +* **storage/internal:** Clarified the behavior of supplying bucket.name field in CreateBucket to reflect actual implementation ([ebae64d](https://github.com/googleapis/google-cloud-go/commit/ebae64d53397ec5dfe851f098754eaa1f5df7cb1)) +* **storage/internal:** Revert ChecksummedData message definition not to specify ctype=CORD, because it would be a breaking change. ([ef61e47](https://github.com/googleapis/google-cloud-go/commit/ef61e4799280a355b960da8ae240ceb2efbe71ac)) +* **storage/internal:** Update routing annotations for CancelResumableWriteRequest and QueryWriteStatusRequest ([4900851](https://github.com/googleapis/google-cloud-go/commit/49008518e168fe6f7891b907d6fc14eecdef758c)) +* **storage/internal:** Updated ChecksummedData message definition to specify ctype=CORD, and removed incorrect earlier attempt that set that annotation in the ReadObjectResponse message definition ([ef61e47](https://github.com/googleapis/google-cloud-go/commit/ef61e4799280a355b960da8ae240ceb2efbe71ac)) +* **storage:** WithXMLReads should mention XML instead of JSON API ([#7881](https://github.com/googleapis/google-cloud-go/issues/7881)) ([36f56c8](https://github.com/googleapis/google-cloud-go/commit/36f56c80c456ca74ffc03df76844ce15980ced82)) + +## [1.30.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.30.0...storage/v1.30.1) (2023-03-21) + + +### Bug Fixes + +* **storage:** Retract versions with Copier bug ([#7583](https://github.com/googleapis/google-cloud-go/issues/7583)) ([9c10b6f](https://github.com/googleapis/google-cloud-go/commit/9c10b6f8a54cb8447260148b5e4a9b5160281020)) + * Versions v1.25.0-v1.27.0 are retracted due to [#6857](https://github.com/googleapis/google-cloud-go/issues/6857). +* **storage:** SignedURL v4 allows headers with colons in value ([#7603](https://github.com/googleapis/google-cloud-go/issues/7603)) ([6b50f9b](https://github.com/googleapis/google-cloud-go/commit/6b50f9b368f5b271ade1706c342865cef46712e6)) + +## [1.30.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.29.0...storage/v1.30.0) (2023-03-15) + + +### Features + +* **storage/internal:** Update routing annotation for CreateBucketRequest docs: Add support for end-to-end checksumming in the gRPC WriteObject flow feat!: BREAKING CHANGE - renaming Notification to NotificationConfig ([2fef56f](https://github.com/googleapis/google-cloud-go/commit/2fef56f75a63dc4ff6e0eea56c7b26d4831c8e27)) +* **storage:** Json downloads ([#7158](https://github.com/googleapis/google-cloud-go/issues/7158)) ([574a86c](https://github.com/googleapis/google-cloud-go/commit/574a86c614445f8c3f5a54446820df774c31cd47)) +* **storage:** Update iam and longrunning deps ([91a1f78](https://github.com/googleapis/google-cloud-go/commit/91a1f784a109da70f63b96414bba8a9b4254cddd)) + + +### Bug Fixes + +* **storage:** Specify credentials with STORAGE_EMULATOR_HOST ([#7271](https://github.com/googleapis/google-cloud-go/issues/7271)) ([940ae15](https://github.com/googleapis/google-cloud-go/commit/940ae15f725ff384e345e627feb03d22e1fd8db5)) + +## [1.29.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.28.1...storage/v1.29.0) (2023-01-19) + + +### Features + +* **storage:** Add ComponentCount as part of ObjectAttrs ([#7230](https://github.com/googleapis/google-cloud-go/issues/7230)) ([a19bca6](https://github.com/googleapis/google-cloud-go/commit/a19bca60704b4fbb674cf51d828580aa653c8210)) +* **storage:** Add REST client ([06a54a1](https://github.com/googleapis/google-cloud-go/commit/06a54a16a5866cce966547c51e203b9e09a25bc0)) + + +### Documentation + +* **storage/internal:** Corrected typos and spellings ([7357077](https://github.com/googleapis/google-cloud-go/commit/735707796d81d7f6f32fc3415800c512fe62297e)) + +## [1.28.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.28.0...storage/v1.28.1) (2022-12-02) + + +### Bug Fixes + +* **storage:** downgrade some dependencies ([7540152](https://github.com/googleapis/google-cloud-go/commit/754015236d5af7c82a75da218b71a87b9ead6eb5)) + +## [1.28.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.27.0...storage/v1.28.0) (2022-11-03) + + +### Features + +* **storage/internal:** Add routing annotations ([ce3f945](https://github.com/googleapis/google-cloud-go/commit/ce3f9458e511eca0910992763232abbcd64698f1)) +* **storage:** Add Autoclass support ([#6828](https://github.com/googleapis/google-cloud-go/issues/6828)) ([f7c7f41](https://github.com/googleapis/google-cloud-go/commit/f7c7f41e4d7fcffe05860e1114cb20f40c869da8)) + + +### Bug Fixes + +* **storage:** Fix read-write race in Writer.Write ([#6817](https://github.com/googleapis/google-cloud-go/issues/6817)) ([4766d3e](https://github.com/googleapis/google-cloud-go/commit/4766d3e1004119b93c6bd352024b5bf3404252eb)) +* **storage:** Fix request token passing for Copier.Run ([#6863](https://github.com/googleapis/google-cloud-go/issues/6863)) ([faaab06](https://github.com/googleapis/google-cloud-go/commit/faaab066d8e509dc440bcbc87391557ecee7dbf2)), refs [#6857](https://github.com/googleapis/google-cloud-go/issues/6857) + + +### Documentation + +* **storage:** Update broken links for SignURL and PostPolicy ([#6779](https://github.com/googleapis/google-cloud-go/issues/6779)) ([776138b](https://github.com/googleapis/google-cloud-go/commit/776138bc06a1e5fd45acbf8f9d36e9dc6ce31dd3)) + +## [1.27.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.26.0...storage/v1.27.0) (2022-09-22) + + +### Features + +* **storage:** Find GoogleAccessID when using impersonated creds ([#6591](https://github.com/googleapis/google-cloud-go/issues/6591)) ([a2d16a7](https://github.com/googleapis/google-cloud-go/commit/a2d16a7a778c85d13217fc67955ec5dac1da34e8)) + +## [1.26.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.25.0...storage/v1.26.0) (2022-08-29) + + +### Features + +* **storage:** export ShouldRetry ([#6370](https://github.com/googleapis/google-cloud-go/issues/6370)) ([0da9ab0](https://github.com/googleapis/google-cloud-go/commit/0da9ab0831540569dc04c0a23437b084b1564e15)), refs [#6362](https://github.com/googleapis/google-cloud-go/issues/6362) + + +### Bug Fixes + +* **storage:** allow to use age=0 in OLM conditions ([#6204](https://github.com/googleapis/google-cloud-go/issues/6204)) ([c85704f](https://github.com/googleapis/google-cloud-go/commit/c85704f4284626ce728cb48f3b130f2ce2a0165e)) + +## [1.25.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.24.0...storage/v1.25.0) (2022-08-11) + + +### Features + +* **storage/internal:** Add routing annotations ([8a8ba85](https://github.com/googleapis/google-cloud-go/commit/8a8ba85311f85701c97fd7c10f1d88b738ce423f)) +* **storage:** refactor to use transport-agnostic interface ([#6465](https://github.com/googleapis/google-cloud-go/issues/6465)) ([d03c3e1](https://github.com/googleapis/google-cloud-go/commit/d03c3e15a79fe9afa1232d9c8bd4c484a9bb927e)) + +## [1.24.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.23.0...storage/v1.24.0) (2022-07-20) + + +### Features + +* **storage:** add Custom Placement Config Dual Region Support ([#6294](https://github.com/googleapis/google-cloud-go/issues/6294)) ([5a8c607](https://github.com/googleapis/google-cloud-go/commit/5a8c607e3a9a3265887e27cb13f8943f3e3fa23d)) + +## [1.23.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.22.1...storage/v1.23.0) (2022-06-23) + + +### Features + +* **storage:** add support for OLM Prefix/Suffix ([#5929](https://github.com/googleapis/google-cloud-go/issues/5929)) ([ec21d10](https://github.com/googleapis/google-cloud-go/commit/ec21d10d6d1b01aa97a52560319775041707690d)) +* **storage:** support AbortIncompleteMultipartUpload LifecycleAction ([#5812](https://github.com/googleapis/google-cloud-go/issues/5812)) ([fdec929](https://github.com/googleapis/google-cloud-go/commit/fdec929b9da6e01dda0ab3c72544d44d6bd82bd4)), refs [#5795](https://github.com/googleapis/google-cloud-go/issues/5795) + + +### Bug Fixes + +* **storage:** allow for Age *int64 type and int64 type ([#6230](https://github.com/googleapis/google-cloud-go/issues/6230)) ([cc7acb8](https://github.com/googleapis/google-cloud-go/commit/cc7acb8bffb31828e9e96d4834a65f9728494473)) + +### [1.22.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.22.0...storage/v1.22.1) (2022-05-19) + + +### Bug Fixes + +* **storage:** bump genproto, remove deadcode ([#6059](https://github.com/googleapis/google-cloud-go/issues/6059)) ([bb10f9f](https://github.com/googleapis/google-cloud-go/commit/bb10f9faca57dc3b987e0fb601090887b3507f07)) +* **storage:** remove field that no longer exists ([#6061](https://github.com/googleapis/google-cloud-go/issues/6061)) ([ee150cf](https://github.com/googleapis/google-cloud-go/commit/ee150cfd194463ddfcb59898cfb0237e47777973)) + +## [1.22.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.21.0...storage/v1.22.0) (2022-03-31) + + +### Features + +* **storage:** allow specifying includeTrailingDelimiter ([#5617](https://github.com/googleapis/google-cloud-go/issues/5617)) ([a34503b](https://github.com/googleapis/google-cloud-go/commit/a34503bc0f0b95399285e8db66976b227e3b0072)) +* **storage:** set versionClient to module version ([55f0d92](https://github.com/googleapis/google-cloud-go/commit/55f0d92bf112f14b024b4ab0076c9875a17423c9)) + + +### Bug Fixes + +* **storage:** respect STORAGE_EMULATOR_HOST in signedURL ([#5673](https://github.com/googleapis/google-cloud-go/issues/5673)) ([1c249ae](https://github.com/googleapis/google-cloud-go/commit/1c249ae5b4980cf53fa74635943ca8bf6a96a341)) + +## [1.21.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.20.0...storage/v1.21.0) (2022-02-17) + + +### Features + +* **storage:** add better version metadata to calls ([#5507](https://github.com/googleapis/google-cloud-go/issues/5507)) ([13fe0bc](https://github.com/googleapis/google-cloud-go/commit/13fe0bc0d8acbffd46b59ab69b25449f1cbd6a88)), refs [#2749](https://github.com/googleapis/google-cloud-go/issues/2749) +* **storage:** add Writer.ChunkRetryDeadline ([#5482](https://github.com/googleapis/google-cloud-go/issues/5482)) ([498a746](https://github.com/googleapis/google-cloud-go/commit/498a746769fa43958b92af8875b927879947128e)) + +## [1.20.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.19.0...storage/v1.20.0) (2022-02-04) + + +### Features + +* **storage/internal:** Update definition of RewriteObjectRequest to bring to parity with JSON API support ([#5447](https://www.github.com/googleapis/google-cloud-go/issues/5447)) ([7d175ef](https://www.github.com/googleapis/google-cloud-go/commit/7d175ef12b7b3e75585427f5dd2aab4a175e92d6)) + +## [1.19.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.18.2...storage/v1.19.0) (2022-01-25) + + +### Features + +* **storage:** add fully configurable and idempotency-aware retry strategy ([#5384](https://www.github.com/googleapis/google-cloud-go/issues/5384), [#5185](https://www.github.com/googleapis/google-cloud-go/issues/5185), [#5170](https://www.github.com/googleapis/google-cloud-go/issues/5170), [#5223](https://www.github.com/googleapis/google-cloud-go/issues/5223), [#5221](https://www.github.com/googleapis/google-cloud-go/issues/5221), [#5193](https://www.github.com/googleapis/google-cloud-go/issues/5193), [#5159](https://www.github.com/googleapis/google-cloud-go/issues/5159), [#5165](https://www.github.com/googleapis/google-cloud-go/issues/5165), [#5166](https://www.github.com/googleapis/google-cloud-go/issues/5166), [#5210](https://www.github.com/googleapis/google-cloud-go/issues/5210), [#5172](https://www.github.com/googleapis/google-cloud-go/issues/5172), [#5314](https://www.github.com/googleapis/google-cloud-go/issues/5314)) + * This release contains changes to fully align this library's retry strategy + with best practices as described in the + Cloud Storage [docs](https://cloud.google.com/storage/docs/retry-strategy). + * The library will now retry only idempotent operations by default. This means + that for certain operations, including object upload, compose, rewrite, + update, and delete, requests will not be retried by default unless + [idempotency conditions](https://cloud.google.com/storage/docs/retry-strategy#idempotency) + for the request have been met. + * The library now has methods to configure aspects of retry policy for + API calls, including which errors are retried, the timing of the + exponential backoff, and how idempotency is taken into account. + * If you wish to re-enable retries for a non-idempotent request, use the + [RetryAlways](https://pkg.go.dev/cloud.google.com/go/storage@main#RetryAlways) + policy. + * For full details on how to configure retries, see the + [package docs](https://pkg.go.dev/cloud.google.com/go/storage@main#hdr-Retrying_failed_requests) + and the + [Cloud Storage docs](https://cloud.google.com/storage/docs/retry-strategy) +* **storage:** GenerateSignedPostPolicyV4 can use existing creds to authenticate ([#5105](https://www.github.com/googleapis/google-cloud-go/issues/5105)) ([46489f4](https://www.github.com/googleapis/google-cloud-go/commit/46489f4c8a634068a3e7cf2fd5e5ca11b555c0a8)) +* **storage:** post policy can be signed with a fn that takes raw bytes ([#5079](https://www.github.com/googleapis/google-cloud-go/issues/5079)) ([25d1278](https://www.github.com/googleapis/google-cloud-go/commit/25d1278cab539fbfdd8563ed6b297e30d3fe555c)) +* **storage:** add rpo (turbo replication) support ([#5003](https://www.github.com/googleapis/google-cloud-go/issues/5003)) ([3bd5995](https://www.github.com/googleapis/google-cloud-go/commit/3bd59958e0c06d2655b67fcb5410668db3c52af0)) + +### Bug Fixes + +* **storage:** fix nil check in gRPC Reader ([#5376](https://www.github.com/googleapis/google-cloud-go/issues/5376)) ([5e7d722](https://www.github.com/googleapis/google-cloud-go/commit/5e7d722d18a62b28ba98169b3bdbb49401377264)) + +### [1.18.2](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.18.1...storage/v1.18.2) (2021-10-18) + + +### Bug Fixes + +* **storage:** upgrade genproto ([#4993](https://www.github.com/googleapis/google-cloud-go/issues/4993)) ([5ca462d](https://www.github.com/googleapis/google-cloud-go/commit/5ca462d99fe851b7cddfd70108798e2fa959bdfd)), refs [#4991](https://www.github.com/googleapis/google-cloud-go/issues/4991) + +### [1.18.1](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.18.0...storage/v1.18.1) (2021-10-14) + + +### Bug Fixes + +* **storage:** don't assume auth from a client option ([#4982](https://www.github.com/googleapis/google-cloud-go/issues/4982)) ([e17334d](https://www.github.com/googleapis/google-cloud-go/commit/e17334d1fe7645d89d14ae7148313498b984dfbb)) + +## [1.18.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.17.0...storage/v1.18.0) (2021-10-11) + + +### Features + +* **storage:** returned wrapped error for timeouts ([#4802](https://www.github.com/googleapis/google-cloud-go/issues/4802)) ([0e102a3](https://www.github.com/googleapis/google-cloud-go/commit/0e102a385dc67a06f6b444b3a93e6998428529be)), refs [#4197](https://www.github.com/googleapis/google-cloud-go/issues/4197) +* **storage:** SignedUrl can use existing creds to authenticate ([#4604](https://www.github.com/googleapis/google-cloud-go/issues/4604)) ([b824c89](https://www.github.com/googleapis/google-cloud-go/commit/b824c897e6941270747b612f6d36a8d6ae081315)) + + +### Bug Fixes + +* **storage:** update PAP to use inherited instead of unspecified ([#4909](https://www.github.com/googleapis/google-cloud-go/issues/4909)) ([dac26b1](https://www.github.com/googleapis/google-cloud-go/commit/dac26b1af2f2972f12775341173bcc5f982438b8)) + +## [1.17.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.16.1...storage/v1.17.0) (2021-09-28) + + +### Features + +* **storage:** add projectNumber field to bucketAttrs. ([#4805](https://www.github.com/googleapis/google-cloud-go/issues/4805)) ([07343af](https://www.github.com/googleapis/google-cloud-go/commit/07343afc15085b164cc41d202d13f9d46f5c0d02)) + + +### Bug Fixes + +* **storage:** align retry idempotency (part 1) ([#4715](https://www.github.com/googleapis/google-cloud-go/issues/4715)) ([ffa903e](https://www.github.com/googleapis/google-cloud-go/commit/ffa903eeec61aa3869e5220e2f09371127b5c393)) + +### [1.16.1](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.16.0...storage/v1.16.1) (2021-08-30) + + +### Bug Fixes + +* **storage/internal:** Update encryption_key fields to "bytes" type. fix: Improve date/times and field name clarity in lifecycle conditions. ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758)) +* **storage:** accept emulator env var without scheme ([#4616](https://www.github.com/googleapis/google-cloud-go/issues/4616)) ([5f8cbb9](https://www.github.com/googleapis/google-cloud-go/commit/5f8cbb98070109e2a34409ac775ed63b94d37efd)) +* **storage:** preserve supplied endpoint's scheme ([#4609](https://www.github.com/googleapis/google-cloud-go/issues/4609)) ([ee2756f](https://www.github.com/googleapis/google-cloud-go/commit/ee2756fb0a335d591464a770c9fa4f8fe0ba2e01)) +* **storage:** remove unnecessary variable ([#4608](https://www.github.com/googleapis/google-cloud-go/issues/4608)) ([27fc784](https://www.github.com/googleapis/google-cloud-go/commit/27fc78456fb251652bdf5cdb493734a7e1e643e1)) +* **storage:** retry LockRetentionPolicy ([#4439](https://www.github.com/googleapis/google-cloud-go/issues/4439)) ([09879ea](https://www.github.com/googleapis/google-cloud-go/commit/09879ea80cb67f9bfd8fc9384b0fda335567cba9)), refs [#4437](https://www.github.com/googleapis/google-cloud-go/issues/4437) +* **storage:** revise Reader to send XML preconditions ([#4479](https://www.github.com/googleapis/google-cloud-go/issues/4479)) ([e36b29a](https://www.github.com/googleapis/google-cloud-go/commit/e36b29a3d43bce5c1c044f7daf6e1db00b0a49e0)), refs [#4470](https://www.github.com/googleapis/google-cloud-go/issues/4470) + +## [1.16.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.15.0...storage/v1.16.0) (2021-06-28) + + +### Features + +* **storage:** support PublicAccessPrevention ([#3608](https://www.github.com/googleapis/google-cloud-go/issues/3608)) ([99bc782](https://www.github.com/googleapis/google-cloud-go/commit/99bc782fb50a47602b45278384ef5d5b5da9263b)), refs [#3203](https://www.github.com/googleapis/google-cloud-go/issues/3203) + + +### Bug Fixes + +* **storage:** fix Writer.ChunkSize validation ([#4255](https://www.github.com/googleapis/google-cloud-go/issues/4255)) ([69c2e9d](https://www.github.com/googleapis/google-cloud-go/commit/69c2e9dc6303e1a004d3104a8178532fa738e742)), refs [#4167](https://www.github.com/googleapis/google-cloud-go/issues/4167) +* **storage:** try to reopen for failed Reads ([#4226](https://www.github.com/googleapis/google-cloud-go/issues/4226)) ([564102b](https://www.github.com/googleapis/google-cloud-go/commit/564102b335dbfb558bec8af883e5f898efb5dd10)), refs [#3040](https://www.github.com/googleapis/google-cloud-go/issues/3040) + +## [1.15.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.13.0...storage/v1.15.0) (2021-04-21) + + +### Features + +* **transport** Bump dependency on google.golang.org/api to pick up HTTP/2 + config updates (see [googleapis/google-api-go-client#882](https://github.com/googleapis/google-api-go-client/pull/882)). + +### Bug Fixes + +* **storage:** retry io.ErrUnexpectedEOF ([#3957](https://www.github.com/googleapis/google-cloud-go/issues/3957)) ([f6590cd](https://www.github.com/googleapis/google-cloud-go/commit/f6590cdc26c8479be5df48949fa59f879e0c24fc)) + + +## v1.14.0 + +- Updates to various dependencies. + +## [1.13.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.12.0...v1.13.0) (2021-02-03) + + +### Features + +* **storage:** add missing StorageClass in BucketAttrsToUpdate ([#3038](https://www.github.com/googleapis/google-cloud-go/issues/3038)) ([2fa1b72](https://www.github.com/googleapis/google-cloud-go/commit/2fa1b727f8a7b20aa62fe0990530744f6c109be0)) +* **storage:** add projection parameter for BucketHandle.Objects() ([#3549](https://www.github.com/googleapis/google-cloud-go/issues/3549)) ([9b9c3dc](https://www.github.com/googleapis/google-cloud-go/commit/9b9c3dce3ee10af5b6c4d070821bf47a861efd5b)) + + +### Bug Fixes + +* **storage:** fix endpoint selection logic ([#3172](https://www.github.com/googleapis/google-cloud-go/issues/3172)) ([99edf0d](https://www.github.com/googleapis/google-cloud-go/commit/99edf0d211a9e617f2586fbc83b6f9630da3c537)) + +## v1.12.0 +- V4 signed URL fixes: + - Fix encoding of spaces in query parameters. + - Add fields that were missing from PostPolicyV4 policy conditions. +- Fix Query to correctly list prefixes as well as objects when SetAttrSelection + is used. + +## v1.11.0 +- Add support for CustomTime and NoncurrentTime object lifecycle management + features. + +## v1.10.0 +- Bump dependency on google.golang.org/api to capture changes to retry logic + which will make retries on writes more resilient. +- Improve documentation for Writer.ChunkSize. +- Fix a bug in lifecycle to allow callers to clear lifecycle rules on a bucket. + +## v1.9.0 +- Add retry for transient network errors on most operations (with the exception + of writes). +- Bump dependency for google.golang.org/api to capture a change in the default + HTTP transport which will improve performance for reads under heavy load. +- Add CRC32C checksum validation option to Composer. + +## v1.8.0 +- Add support for V4 signed post policies. + +## v1.7.0 +- V4 signed URL support: + - Add support for bucket-bound domains and virtual hosted style URLs. + - Add support for query parameters in the signature. + - Fix text encoding to align with standards. +- Add the object name to query parameters for write calls. +- Fix retry behavior when reading files with Content-Encoding gzip. +- Fix response header in reader. +- New code examples: + - Error handling for `ObjectHandle` preconditions. + - Existence checks for buckets and objects. + +## v1.6.0 + +- Updated option handling: + - Don't drop custom scopes (#1756) + - Don't drop port in provided endpoint (#1737) + +## v1.5.0 + +- Honor WithEndpoint client option for reads as well as writes. +- Add archive storage class to docs. +- Make fixes to storage benchwrapper. + +## v1.4.0 + +- When listing objects in a bucket, allow callers to specify which attributes + are queried. This allows for performance optimization. + +## v1.3.0 + +- Use `storage.googleapis.com/storage/v1` by default for GCS requests + instead of `www.googleapis.com/storage/v1`. + +## v1.2.1 + +- Fixed a bug where UniformBucketLevelAccess and BucketPolicyOnly were not + being sent in all cases. + +## v1.2.0 + +- Add support for UniformBucketLevelAccess. This configures access checks + to use only bucket-level IAM policies. + See: https://godoc.org/cloud.google.com/go/storage#UniformBucketLevelAccess. +- Fix userAgent to use correct version. + +## v1.1.2 + +- Fix memory leak in BucketIterator and ObjectIterator. + +## v1.1.1 + +- Send BucketPolicyOnly even when it's disabled. + +## v1.1.0 + +- Performance improvements for ObjectIterator and BucketIterator. +- Fix Bucket.ObjectIterator size calculation checks. +- Added HMACKeyOptions to all the methods which allows for options such as + UserProject to be set per invocation and optionally be used. + +## v1.0.0 + +This is the first tag to carve out storage as its own module. See: +https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. diff --git a/vendor/cloud.google.com/go/storage/LICENSE b/vendor/cloud.google.com/go/storage/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/storage/README.md b/vendor/cloud.google.com/go/storage/README.md new file mode 100644 index 000000000..b2f411210 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/README.md @@ -0,0 +1,32 @@ +## Cloud Storage [![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/storage.svg)](https://pkg.go.dev/cloud.google.com/go/storage) + +- [About Cloud Storage](https://cloud.google.com/storage/) +- [API documentation](https://cloud.google.com/storage/docs) +- [Go client documentation](https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest) +- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/main/storage) + +### Example Usage + +First create a `storage.Client` to use throughout your application: + +[snip]:# (storage-1) +```go +client, err := storage.NewClient(ctx) +if err != nil { + log.Fatal(err) +} +``` + +[snip]:# (storage-2) +```go +// Read the object1 from bucket. +rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx) +if err != nil { + log.Fatal(err) +} +defer rc.Close() +body, err := io.ReadAll(rc) +if err != nil { + log.Fatal(err) +} +``` diff --git a/vendor/cloud.google.com/go/storage/acl.go b/vendor/cloud.google.com/go/storage/acl.go new file mode 100644 index 000000000..560a5605d --- /dev/null +++ b/vendor/cloud.google.com/go/storage/acl.go @@ -0,0 +1,345 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + + "cloud.google.com/go/internal/trace" + "cloud.google.com/go/storage/internal/apiv2/storagepb" + raw "google.golang.org/api/storage/v1" +) + +// ACLRole is the level of access to grant. +type ACLRole string + +const ( + RoleOwner ACLRole = "OWNER" + RoleReader ACLRole = "READER" + RoleWriter ACLRole = "WRITER" +) + +// ACLEntity refers to a user or group. +// They are sometimes referred to as grantees. +// +// It could be in the form of: +// "user-", "user-", "group-", "group-", +// "domain-" and "project-team-". +// +// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers. +type ACLEntity string + +const ( + AllUsers ACLEntity = "allUsers" + AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers" +) + +// ACLRule represents a grant for a role to an entity (user, group or team) for a +// Google Cloud Storage object or bucket. +type ACLRule struct { + Entity ACLEntity + EntityID string + Role ACLRole + Domain string + Email string + ProjectTeam *ProjectTeam +} + +// ProjectTeam is the project team associated with the entity, if any. +type ProjectTeam struct { + ProjectNumber string + Team string +} + +// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object. +// ACLHandle on an object operates on the latest generation of that object by default. +// Selecting a specific generation of an object is not currently supported by the client. +type ACLHandle struct { + c *Client + bucket string + object string + isDefault bool + userProject string // for requester-pays buckets + retry *retryConfig +} + +// Delete permanently deletes the ACL entry for the given entity. +func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Delete") + defer func() { trace.EndSpan(ctx, err) }() + + if a.object != "" { + return a.objectDelete(ctx, entity) + } + if a.isDefault { + return a.bucketDefaultDelete(ctx, entity) + } + return a.bucketDelete(ctx, entity) +} + +// Set sets the role for the given entity. +func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Set") + defer func() { trace.EndSpan(ctx, err) }() + + if a.object != "" { + return a.objectSet(ctx, entity, role, false) + } + if a.isDefault { + return a.objectSet(ctx, entity, role, true) + } + return a.bucketSet(ctx, entity, role) +} + +// List retrieves ACL entries. +func (a *ACLHandle) List(ctx context.Context) (rules []ACLRule, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.List") + defer func() { trace.EndSpan(ctx, err) }() + + if a.object != "" { + return a.objectList(ctx) + } + if a.isDefault { + return a.bucketDefaultList(ctx) + } + return a.bucketList(ctx) +} + +func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) { + opts := makeStorageOpts(true, a.retry, a.userProject) + return a.c.tc.ListDefaultObjectACLs(ctx, a.bucket, opts...) +} + +func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error { + opts := makeStorageOpts(false, a.retry, a.userProject) + return a.c.tc.DeleteDefaultObjectACL(ctx, a.bucket, entity, opts...) +} + +func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) { + opts := makeStorageOpts(true, a.retry, a.userProject) + return a.c.tc.ListBucketACLs(ctx, a.bucket, opts...) +} + +func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error { + opts := makeStorageOpts(false, a.retry, a.userProject) + return a.c.tc.UpdateBucketACL(ctx, a.bucket, entity, role, opts...) +} + +func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error { + opts := makeStorageOpts(false, a.retry, a.userProject) + return a.c.tc.DeleteBucketACL(ctx, a.bucket, entity, opts...) +} + +func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) { + opts := makeStorageOpts(true, a.retry, a.userProject) + return a.c.tc.ListObjectACLs(ctx, a.bucket, a.object, opts...) +} + +func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error { + opts := makeStorageOpts(false, a.retry, a.userProject) + if isBucketDefault { + return a.c.tc.UpdateDefaultObjectACL(ctx, a.bucket, entity, role, opts...) + } + return a.c.tc.UpdateObjectACL(ctx, a.bucket, a.object, entity, role, opts...) +} + +func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error { + opts := makeStorageOpts(false, a.retry, a.userProject) + return a.c.tc.DeleteObjectACL(ctx, a.bucket, a.object, entity, opts...) +} + +func toObjectACLRules(items []*raw.ObjectAccessControl) []ACLRule { + var rs []ACLRule + for _, item := range items { + rs = append(rs, toObjectACLRule(item)) + } + return rs +} + +func toObjectACLRulesFromProto(items []*storagepb.ObjectAccessControl) []ACLRule { + var rs []ACLRule + for _, item := range items { + rs = append(rs, toObjectACLRuleFromProto(item)) + } + return rs +} + +func toBucketACLRules(items []*raw.BucketAccessControl) []ACLRule { + var rs []ACLRule + for _, item := range items { + rs = append(rs, toBucketACLRule(item)) + } + return rs +} + +func toBucketACLRulesFromProto(items []*storagepb.BucketAccessControl) []ACLRule { + var rs []ACLRule + for _, item := range items { + rs = append(rs, toBucketACLRuleFromProto(item)) + } + return rs +} + +func toObjectACLRule(a *raw.ObjectAccessControl) ACLRule { + return ACLRule{ + Entity: ACLEntity(a.Entity), + EntityID: a.EntityId, + Role: ACLRole(a.Role), + Domain: a.Domain, + Email: a.Email, + ProjectTeam: toObjectProjectTeam(a.ProjectTeam), + } +} + +func toObjectACLRuleFromProto(a *storagepb.ObjectAccessControl) ACLRule { + return ACLRule{ + Entity: ACLEntity(a.GetEntity()), + EntityID: a.GetEntityId(), + Role: ACLRole(a.GetRole()), + Domain: a.GetDomain(), + Email: a.GetEmail(), + ProjectTeam: toProjectTeamFromProto(a.GetProjectTeam()), + } +} + +func toBucketACLRule(a *raw.BucketAccessControl) ACLRule { + return ACLRule{ + Entity: ACLEntity(a.Entity), + EntityID: a.EntityId, + Role: ACLRole(a.Role), + Domain: a.Domain, + Email: a.Email, + ProjectTeam: toBucketProjectTeam(a.ProjectTeam), + } +} + +func toBucketACLRuleFromProto(a *storagepb.BucketAccessControl) ACLRule { + return ACLRule{ + Entity: ACLEntity(a.GetEntity()), + EntityID: a.GetEntityId(), + Role: ACLRole(a.GetRole()), + Domain: a.GetDomain(), + Email: a.GetEmail(), + ProjectTeam: toProjectTeamFromProto(a.GetProjectTeam()), + } +} + +func toRawObjectACL(rules []ACLRule) []*raw.ObjectAccessControl { + if len(rules) == 0 { + return nil + } + r := make([]*raw.ObjectAccessControl, 0, len(rules)) + for _, rule := range rules { + r = append(r, rule.toRawObjectAccessControl("")) // bucket name unnecessary + } + return r +} + +func toProtoObjectACL(rules []ACLRule) []*storagepb.ObjectAccessControl { + if len(rules) == 0 { + return nil + } + r := make([]*storagepb.ObjectAccessControl, 0, len(rules)) + for _, rule := range rules { + r = append(r, rule.toProtoObjectAccessControl("")) // bucket name unnecessary + } + return r +} + +func toRawBucketACL(rules []ACLRule) []*raw.BucketAccessControl { + if len(rules) == 0 { + return nil + } + r := make([]*raw.BucketAccessControl, 0, len(rules)) + for _, rule := range rules { + r = append(r, rule.toRawBucketAccessControl("")) // bucket name unnecessary + } + return r +} + +func toProtoBucketACL(rules []ACLRule) []*storagepb.BucketAccessControl { + if len(rules) == 0 { + return nil + } + r := make([]*storagepb.BucketAccessControl, 0, len(rules)) + for _, rule := range rules { + r = append(r, rule.toProtoBucketAccessControl()) + } + return r +} + +func (r ACLRule) toRawBucketAccessControl(bucket string) *raw.BucketAccessControl { + return &raw.BucketAccessControl{ + Bucket: bucket, + Entity: string(r.Entity), + Role: string(r.Role), + // The other fields are not settable. + } +} + +func (r ACLRule) toRawObjectAccessControl(bucket string) *raw.ObjectAccessControl { + return &raw.ObjectAccessControl{ + Bucket: bucket, + Entity: string(r.Entity), + Role: string(r.Role), + // The other fields are not settable. + } +} + +func (r ACLRule) toProtoObjectAccessControl(bucket string) *storagepb.ObjectAccessControl { + return &storagepb.ObjectAccessControl{ + Entity: string(r.Entity), + Role: string(r.Role), + // The other fields are not settable. + } +} + +func (r ACLRule) toProtoBucketAccessControl() *storagepb.BucketAccessControl { + return &storagepb.BucketAccessControl{ + Entity: string(r.Entity), + Role: string(r.Role), + // The other fields are not settable. + } +} + +func toBucketProjectTeam(p *raw.BucketAccessControlProjectTeam) *ProjectTeam { + if p == nil { + return nil + } + return &ProjectTeam{ + ProjectNumber: p.ProjectNumber, + Team: p.Team, + } +} + +func toProjectTeamFromProto(p *storagepb.ProjectTeam) *ProjectTeam { + if p == nil { + return nil + } + return &ProjectTeam{ + ProjectNumber: p.GetProjectNumber(), + Team: p.GetTeam(), + } +} + +func toObjectProjectTeam(p *raw.ObjectAccessControlProjectTeam) *ProjectTeam { + if p == nil { + return nil + } + return &ProjectTeam{ + ProjectNumber: p.ProjectNumber, + Team: p.Team, + } +} diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go new file mode 100644 index 000000000..d582a60d0 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -0,0 +1,2388 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "reflect" + "strings" + "time" + + "cloud.google.com/go/compute/metadata" + "cloud.google.com/go/internal/optional" + "cloud.google.com/go/internal/trace" + "cloud.google.com/go/storage/internal/apiv2/storagepb" + "google.golang.org/api/googleapi" + "google.golang.org/api/iamcredentials/v1" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + raw "google.golang.org/api/storage/v1" + dpb "google.golang.org/genproto/googleapis/type/date" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/durationpb" +) + +// BucketHandle provides operations on a Google Cloud Storage bucket. +// Use Client.Bucket to get a handle. +type BucketHandle struct { + c *Client + name string + acl ACLHandle + defaultObjectACL ACLHandle + conds *BucketConditions + userProject string // project for Requester Pays buckets + retry *retryConfig + enableObjectRetention *bool +} + +// Bucket returns a BucketHandle, which provides operations on the named bucket. +// This call does not perform any network operations. +// +// The supplied name must contain only lowercase letters, numbers, dashes, +// underscores, and dots. The full specification for valid bucket names can be +// found at: +// +// https://cloud.google.com/storage/docs/bucket-naming +func (c *Client) Bucket(name string) *BucketHandle { + retry := c.retry.clone() + return &BucketHandle{ + c: c, + name: name, + acl: ACLHandle{ + c: c, + bucket: name, + retry: retry, + }, + defaultObjectACL: ACLHandle{ + c: c, + bucket: name, + isDefault: true, + retry: retry, + }, + retry: retry, + } +} + +// Create creates the Bucket in the project. +// If attrs is nil the API defaults will be used. +func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create") + defer func() { trace.EndSpan(ctx, err) }() + + o := makeStorageOpts(true, b.retry, b.userProject) + + if _, err := b.c.tc.CreateBucket(ctx, projectID, b.name, attrs, b.enableObjectRetention, o...); err != nil { + return err + } + return nil +} + +// Delete deletes the Bucket. +func (b *BucketHandle) Delete(ctx context.Context) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Delete") + defer func() { trace.EndSpan(ctx, err) }() + + o := makeStorageOpts(true, b.retry, b.userProject) + return b.c.tc.DeleteBucket(ctx, b.name, b.conds, o...) +} + +// ACL returns an ACLHandle, which provides access to the bucket's access control list. +// This controls who can list, create or overwrite the objects in a bucket. +// This call does not perform any network operations. +func (b *BucketHandle) ACL() *ACLHandle { + return &b.acl +} + +// DefaultObjectACL returns an ACLHandle, which provides access to the bucket's default object ACLs. +// These ACLs are applied to newly created objects in this bucket that do not have a defined ACL. +// This call does not perform any network operations. +func (b *BucketHandle) DefaultObjectACL() *ACLHandle { + return &b.defaultObjectACL +} + +// BucketName returns the name of the bucket. +func (b *BucketHandle) BucketName() string { + return b.name +} + +// Object returns an ObjectHandle, which provides operations on the named object. +// This call does not perform any network operations such as fetching the object or verifying its existence. +// Use methods on ObjectHandle to perform network operations. +// +// name must consist entirely of valid UTF-8-encoded runes. The full specification +// for valid object names can be found at: +// +// https://cloud.google.com/storage/docs/naming-objects +func (b *BucketHandle) Object(name string) *ObjectHandle { + retry := b.retry.clone() + return &ObjectHandle{ + c: b.c, + bucket: b.name, + object: name, + acl: ACLHandle{ + c: b.c, + bucket: b.name, + object: name, + userProject: b.userProject, + retry: retry, + }, + gen: -1, + userProject: b.userProject, + retry: retry, + } +} + +// Attrs returns the metadata for the bucket. +func (b *BucketHandle) Attrs(ctx context.Context) (attrs *BucketAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Attrs") + defer func() { trace.EndSpan(ctx, err) }() + + o := makeStorageOpts(true, b.retry, b.userProject) + return b.c.tc.GetBucket(ctx, b.name, b.conds, o...) +} + +// Update updates a bucket's attributes. +func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (attrs *BucketAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Update") + defer func() { trace.EndSpan(ctx, err) }() + + isIdempotent := b.conds != nil && b.conds.MetagenerationMatch != 0 + o := makeStorageOpts(isIdempotent, b.retry, b.userProject) + return b.c.tc.UpdateBucket(ctx, b.name, &uattrs, b.conds, o...) +} + +// SignedURL returns a URL for the specified object. Signed URLs allow anyone +// access to a restricted resource for a limited time without needing a Google +// account or signing in. +// For more information about signed URLs, see "[Overview of access control]." +// +// This method requires the Method and Expires fields in the specified +// SignedURLOptions to be non-nil. You may need to set the GoogleAccessID and +// PrivateKey fields in some cases. Read more on the [automatic detection of credentials] +// for this method. +// +// [Overview of access control]: https://cloud.google.com/storage/docs/accesscontrol#signed_urls_query_string_authentication +// [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing +func (b *BucketHandle) SignedURL(object string, opts *SignedURLOptions) (string, error) { + // Make a copy of opts so we don't modify the pointer parameter. + newopts := opts.clone() + + if newopts.Hostname == "" { + // Extract the correct host from the readhost set on the client + newopts.Hostname = b.c.xmlHost + } + + if opts.GoogleAccessID != "" && (opts.SignBytes != nil || len(opts.PrivateKey) > 0) { + return SignedURL(b.name, object, newopts) + } + + if newopts.GoogleAccessID == "" { + id, err := b.detectDefaultGoogleAccessID() + if err != nil { + return "", err + } + newopts.GoogleAccessID = id + } + if newopts.SignBytes == nil && len(newopts.PrivateKey) == 0 { + if b.c.creds != nil && len(b.c.creds.JSON) > 0 { + var sa struct { + PrivateKey string `json:"private_key"` + } + err := json.Unmarshal(b.c.creds.JSON, &sa) + if err == nil && sa.PrivateKey != "" { + newopts.PrivateKey = []byte(sa.PrivateKey) + } + } + + // Don't error out if we can't unmarshal the private key from the client, + // fallback to the default sign function for the service account. + if len(newopts.PrivateKey) == 0 { + newopts.SignBytes = b.defaultSignBytesFunc(newopts.GoogleAccessID) + } + } + return SignedURL(b.name, object, newopts) +} + +// GenerateSignedPostPolicyV4 generates a PostPolicyV4 value from bucket, object and opts. +// The generated URL and fields will then allow an unauthenticated client to perform multipart uploads. +// +// This method requires the Expires field in the specified PostPolicyV4Options +// to be non-nil. You may need to set the GoogleAccessID and PrivateKey fields +// in some cases. Read more on the [automatic detection of credentials] for this method. +// +// [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing +func (b *BucketHandle) GenerateSignedPostPolicyV4(object string, opts *PostPolicyV4Options) (*PostPolicyV4, error) { + // Make a copy of opts so we don't modify the pointer parameter. + newopts := opts.clone() + + if newopts.Hostname == "" { + // Extract the correct host from the readhost set on the client + newopts.Hostname = b.c.xmlHost + } + + if opts.GoogleAccessID != "" && (opts.SignRawBytes != nil || opts.SignBytes != nil || len(opts.PrivateKey) > 0) { + return GenerateSignedPostPolicyV4(b.name, object, newopts) + } + + if newopts.GoogleAccessID == "" { + id, err := b.detectDefaultGoogleAccessID() + if err != nil { + return nil, err + } + newopts.GoogleAccessID = id + } + if newopts.SignBytes == nil && newopts.SignRawBytes == nil && len(newopts.PrivateKey) == 0 { + if b.c.creds != nil && len(b.c.creds.JSON) > 0 { + var sa struct { + PrivateKey string `json:"private_key"` + } + err := json.Unmarshal(b.c.creds.JSON, &sa) + if err == nil && sa.PrivateKey != "" { + newopts.PrivateKey = []byte(sa.PrivateKey) + } + } + + // Don't error out if we can't unmarshal the private key from the client, + // fallback to the default sign function for the service account. + if len(newopts.PrivateKey) == 0 { + newopts.SignRawBytes = b.defaultSignBytesFunc(newopts.GoogleAccessID) + } + } + return GenerateSignedPostPolicyV4(b.name, object, newopts) +} + +func (b *BucketHandle) detectDefaultGoogleAccessID() (string, error) { + returnErr := errors.New("no credentials found on client and not on GCE (Google Compute Engine)") + + if b.c.creds != nil && len(b.c.creds.JSON) > 0 { + var sa struct { + ClientEmail string `json:"client_email"` + SAImpersonationURL string `json:"service_account_impersonation_url"` + CredType string `json:"type"` + } + + err := json.Unmarshal(b.c.creds.JSON, &sa) + if err != nil { + returnErr = err + } else { + switch sa.CredType { + case "impersonated_service_account", "external_account": + start, end := strings.LastIndex(sa.SAImpersonationURL, "/"), strings.LastIndex(sa.SAImpersonationURL, ":") + + if end <= start { + returnErr = errors.New("error parsing external or impersonated service account credentials") + } else { + return sa.SAImpersonationURL[start+1 : end], nil + } + case "service_account": + if sa.ClientEmail != "" { + return sa.ClientEmail, nil + } + returnErr = errors.New("empty service account client email") + default: + returnErr = errors.New("unable to parse credentials; only service_account, external_account and impersonated_service_account credentials are supported") + } + } + } + + // Don't error out if we can't unmarshal, fallback to GCE check. + if metadata.OnGCE() { + email, err := metadata.Email("default") + if err == nil && email != "" { + return email, nil + } else if err != nil { + returnErr = err + } else { + returnErr = errors.New("empty email from GCE metadata service") + } + + } + return "", fmt.Errorf("storage: unable to detect default GoogleAccessID: %w. Please provide the GoogleAccessID or use a supported means for autodetecting it (see https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing)", returnErr) +} + +func (b *BucketHandle) defaultSignBytesFunc(email string) func([]byte) ([]byte, error) { + return func(in []byte) ([]byte, error) { + ctx := context.Background() + + // It's ok to recreate this service per call since we pass in the http client, + // circumventing the cost of recreating the auth/transport layer + svc, err := iamcredentials.NewService(ctx, option.WithHTTPClient(b.c.hc)) + if err != nil { + return nil, fmt.Errorf("unable to create iamcredentials client: %w", err) + } + + resp, err := svc.Projects.ServiceAccounts.SignBlob(fmt.Sprintf("projects/-/serviceAccounts/%s", email), &iamcredentials.SignBlobRequest{ + Payload: base64.StdEncoding.EncodeToString(in), + }).Do() + if err != nil { + return nil, fmt.Errorf("unable to sign bytes: %w", err) + } + out, err := base64.StdEncoding.DecodeString(resp.SignedBlob) + if err != nil { + return nil, fmt.Errorf("unable to base64 decode response: %w", err) + } + return out, nil + } +} + +// BucketAttrs represents the metadata for a Google Cloud Storage bucket. +// Read-only fields are ignored by BucketHandle.Create. +type BucketAttrs struct { + // Name is the name of the bucket. + // This field is read-only. + Name string + + // ACL is the list of access control rules on the bucket. + ACL []ACLRule + + // BucketPolicyOnly is an alias for UniformBucketLevelAccess. Use of + // UniformBucketLevelAccess is recommended above the use of this field. + // Setting BucketPolicyOnly.Enabled OR UniformBucketLevelAccess.Enabled to + // true, will enable UniformBucketLevelAccess. + BucketPolicyOnly BucketPolicyOnly + + // UniformBucketLevelAccess configures access checks to use only bucket-level IAM + // policies and ignore any ACL rules for the bucket. + // See https://cloud.google.com/storage/docs/uniform-bucket-level-access + // for more information. + UniformBucketLevelAccess UniformBucketLevelAccess + + // PublicAccessPrevention is the setting for the bucket's + // PublicAccessPrevention policy, which can be used to prevent public access + // of data in the bucket. See + // https://cloud.google.com/storage/docs/public-access-prevention for more + // information. + PublicAccessPrevention PublicAccessPrevention + + // DefaultObjectACL is the list of access controls to + // apply to new objects when no object ACL is provided. + DefaultObjectACL []ACLRule + + // DefaultEventBasedHold is the default value for event-based hold on + // newly created objects in this bucket. It defaults to false. + DefaultEventBasedHold bool + + // If not empty, applies a predefined set of access controls. It should be set + // only when creating a bucket. + // It is always empty for BucketAttrs returned from the service. + // See https://cloud.google.com/storage/docs/json_api/v1/buckets/insert + // for valid values. + PredefinedACL string + + // If not empty, applies a predefined set of default object access controls. + // It should be set only when creating a bucket. + // It is always empty for BucketAttrs returned from the service. + // See https://cloud.google.com/storage/docs/json_api/v1/buckets/insert + // for valid values. + PredefinedDefaultObjectACL string + + // Location is the location of the bucket. It defaults to "US". + // If specifying a dual-region, CustomPlacementConfig should be set in conjunction. + Location string + + // The bucket's custom placement configuration that holds a list of + // regional locations for custom dual regions. + CustomPlacementConfig *CustomPlacementConfig + + // MetaGeneration is the metadata generation of the bucket. + // This field is read-only. + MetaGeneration int64 + + // StorageClass is the default storage class of the bucket. This defines + // how objects in the bucket are stored and determines the SLA + // and the cost of storage. Typical values are "STANDARD", "NEARLINE", + // "COLDLINE" and "ARCHIVE". Defaults to "STANDARD". + // See https://cloud.google.com/storage/docs/storage-classes for all + // valid values. + StorageClass string + + // Created is the creation time of the bucket. + // This field is read-only. + Created time.Time + + // VersioningEnabled reports whether this bucket has versioning enabled. + VersioningEnabled bool + + // Labels are the bucket's labels. + Labels map[string]string + + // RequesterPays reports whether the bucket is a Requester Pays bucket. + // Clients performing operations on Requester Pays buckets must provide + // a user project (see BucketHandle.UserProject), which will be billed + // for the operations. + RequesterPays bool + + // Lifecycle is the lifecycle configuration for objects in the bucket. + Lifecycle Lifecycle + + // Retention policy enforces a minimum retention time for all objects + // contained in the bucket. A RetentionPolicy of nil implies the bucket + // has no minimum data retention. + // + // This feature is in private alpha release. It is not currently available to + // most customers. It might be changed in backwards-incompatible ways and is not + // subject to any SLA or deprecation policy. + RetentionPolicy *RetentionPolicy + + // The bucket's Cross-Origin Resource Sharing (CORS) configuration. + CORS []CORS + + // The encryption configuration used by default for newly inserted objects. + Encryption *BucketEncryption + + // The logging configuration. + Logging *BucketLogging + + // The website configuration. + Website *BucketWebsite + + // Etag is the HTTP/1.1 Entity tag for the bucket. + // This field is read-only. + Etag string + + // LocationType describes how data is stored and replicated. + // Typical values are "multi-region", "region" and "dual-region". + // This field is read-only. + LocationType string + + // The project number of the project the bucket belongs to. + // This field is read-only. + ProjectNumber uint64 + + // RPO configures the Recovery Point Objective (RPO) policy of the bucket. + // Set to RPOAsyncTurbo to turn on Turbo Replication for a bucket. + // See https://cloud.google.com/storage/docs/managing-turbo-replication for + // more information. + RPO RPO + + // Autoclass holds the bucket's autoclass configuration. If enabled, + // allows for the automatic selection of the best storage class + // based on object access patterns. + Autoclass *Autoclass + + // ObjectRetentionMode reports whether individual objects in the bucket can + // be configured with a retention policy. An empty value means that object + // retention is disabled. + // This field is read-only. Object retention can be enabled only by creating + // a bucket with SetObjectRetention set to true on the BucketHandle. It + // cannot be modified once the bucket is created. + // ObjectRetention cannot be configured or reported through the gRPC API. + ObjectRetentionMode string + + // SoftDeletePolicy contains the bucket's soft delete policy, which defines + // the period of time that soft-deleted objects will be retained, and cannot + // be permanently deleted. By default, new buckets will be created with a + // 7 day retention duration. In order to fully disable soft delete, you need + // to set a policy with a RetentionDuration of 0. + SoftDeletePolicy *SoftDeletePolicy + + // HierarchicalNamespace contains the bucket's hierarchical namespace + // configuration. Hierarchical namespace enabled buckets can contain + // [cloud.google.com/go/storage/control/apiv2/controlpb.Folder] resources. + // It cannot be modified after bucket creation time. + // UniformBucketLevelAccess must also also be enabled on the bucket. + HierarchicalNamespace *HierarchicalNamespace +} + +// BucketPolicyOnly is an alias for UniformBucketLevelAccess. +// Use of UniformBucketLevelAccess is preferred above BucketPolicyOnly. +type BucketPolicyOnly struct { + // Enabled specifies whether access checks use only bucket-level IAM + // policies. Enabled may be disabled until the locked time. + Enabled bool + // LockedTime specifies the deadline for changing Enabled from true to + // false. + LockedTime time.Time +} + +// UniformBucketLevelAccess configures access checks to use only bucket-level IAM +// policies. +type UniformBucketLevelAccess struct { + // Enabled specifies whether access checks use only bucket-level IAM + // policies. Enabled may be disabled until the locked time. + Enabled bool + // LockedTime specifies the deadline for changing Enabled from true to + // false. + LockedTime time.Time +} + +// PublicAccessPrevention configures the Public Access Prevention feature, which +// can be used to disallow public access to any data in a bucket. See +// https://cloud.google.com/storage/docs/public-access-prevention for more +// information. +type PublicAccessPrevention int + +const ( + // PublicAccessPreventionUnknown is a zero value, used only if this field is + // not set in a call to GCS. + PublicAccessPreventionUnknown PublicAccessPrevention = iota + + // PublicAccessPreventionUnspecified corresponds to a value of "unspecified". + // Deprecated: use PublicAccessPreventionInherited + PublicAccessPreventionUnspecified + + // PublicAccessPreventionEnforced corresponds to a value of "enforced". This + // enforces Public Access Prevention on the bucket. + PublicAccessPreventionEnforced + + // PublicAccessPreventionInherited corresponds to a value of "inherited" + // and is the default for buckets. + PublicAccessPreventionInherited + + publicAccessPreventionUnknown string = "" + // TODO: remove unspecified when change is fully completed + publicAccessPreventionUnspecified = "unspecified" + publicAccessPreventionEnforced = "enforced" + publicAccessPreventionInherited = "inherited" +) + +func (p PublicAccessPrevention) String() string { + switch p { + case PublicAccessPreventionInherited, PublicAccessPreventionUnspecified: + return publicAccessPreventionInherited + case PublicAccessPreventionEnforced: + return publicAccessPreventionEnforced + default: + return publicAccessPreventionUnknown + } +} + +// Lifecycle is the lifecycle configuration for objects in the bucket. +type Lifecycle struct { + Rules []LifecycleRule +} + +// RetentionPolicy enforces a minimum retention time for all objects +// contained in the bucket. +// +// Any attempt to overwrite or delete objects younger than the retention +// period will result in an error. An unlocked retention policy can be +// modified or removed from the bucket via the Update method. A +// locked retention policy cannot be removed or shortened in duration +// for the lifetime of the bucket. +// +// This feature is in private alpha release. It is not currently available to +// most customers. It might be changed in backwards-incompatible ways and is not +// subject to any SLA or deprecation policy. +type RetentionPolicy struct { + // RetentionPeriod specifies the duration that objects need to be + // retained. Retention duration must be greater than zero and less than + // 100 years. Note that enforcement of retention periods less than a day + // is not guaranteed. Such periods should only be used for testing + // purposes. + RetentionPeriod time.Duration + + // EffectiveTime is the time from which the policy was enforced and + // effective. This field is read-only. + EffectiveTime time.Time + + // IsLocked describes whether the bucket is locked. Once locked, an + // object retention policy cannot be modified. + // This field is read-only. + IsLocked bool +} + +const ( + // RFC3339 timestamp with only the date segment, used for CreatedBefore, + // CustomTimeBefore, and NoncurrentTimeBefore in LifecycleRule. + rfc3339Date = "2006-01-02" + + // DeleteAction is a lifecycle action that deletes a live and/or archived + // objects. Takes precedence over SetStorageClass actions. + DeleteAction = "Delete" + + // SetStorageClassAction changes the storage class of live and/or archived + // objects. + SetStorageClassAction = "SetStorageClass" + + // AbortIncompleteMPUAction is a lifecycle action that aborts an incomplete + // multipart upload when the multipart upload meets the conditions specified + // in the lifecycle rule. The AgeInDays condition is the only allowed + // condition for this action. AgeInDays is measured from the time the + // multipart upload was created. + AbortIncompleteMPUAction = "AbortIncompleteMultipartUpload" +) + +// LifecycleRule is a lifecycle configuration rule. +// +// When all the configured conditions are met by an object in the bucket, the +// configured action will automatically be taken on that object. +type LifecycleRule struct { + // Action is the action to take when all of the associated conditions are + // met. + Action LifecycleAction + + // Condition is the set of conditions that must be met for the associated + // action to be taken. + Condition LifecycleCondition +} + +// LifecycleAction is a lifecycle configuration action. +type LifecycleAction struct { + // Type is the type of action to take on matching objects. + // + // Acceptable values are storage.DeleteAction, storage.SetStorageClassAction, + // and storage.AbortIncompleteMPUAction. + Type string + + // StorageClass is the storage class to set on matching objects if the Action + // is "SetStorageClass". + StorageClass string +} + +// Liveness specifies whether the object is live or not. +type Liveness int + +const ( + // LiveAndArchived includes both live and archived objects. + LiveAndArchived Liveness = iota + // Live specifies that the object is still live. + Live + // Archived specifies that the object is archived. + Archived +) + +// LifecycleCondition is a set of conditions used to match objects and take an +// action automatically. +// +// All configured conditions must be met for the associated action to be taken. +type LifecycleCondition struct { + // AllObjects is used to select all objects in a bucket by + // setting AgeInDays to 0. + AllObjects bool + + // AgeInDays is the age of the object in days. + // If you want to set AgeInDays to `0` use AllObjects set to `true`. + AgeInDays int64 + + // CreatedBefore is the time the object was created. + // + // This condition is satisfied when an object is created before midnight of + // the specified date in UTC. + CreatedBefore time.Time + + // CustomTimeBefore is the CustomTime metadata field of the object. This + // condition is satisfied when an object's CustomTime timestamp is before + // midnight of the specified date in UTC. + // + // This condition can only be satisfied if CustomTime has been set. + CustomTimeBefore time.Time + + // DaysSinceCustomTime is the days elapsed since the CustomTime date of the + // object. This condition can only be satisfied if CustomTime has been set. + // Note: Using `0` as the value will be ignored by the library and not sent to the API. + DaysSinceCustomTime int64 + + // DaysSinceNoncurrentTime is the days elapsed since the noncurrent timestamp + // of the object. This condition is relevant only for versioned objects. + // Note: Using `0` as the value will be ignored by the library and not sent to the API. + DaysSinceNoncurrentTime int64 + + // Liveness specifies the object's liveness. Relevant only for versioned objects + Liveness Liveness + + // MatchesPrefix is the condition matching an object if any of the + // matches_prefix strings are an exact prefix of the object's name. + MatchesPrefix []string + + // MatchesStorageClasses is the condition matching the object's storage + // class. + // + // Values include "STANDARD", "NEARLINE", "COLDLINE" and "ARCHIVE". + MatchesStorageClasses []string + + // MatchesSuffix is the condition matching an object if any of the + // matches_suffix strings are an exact suffix of the object's name. + MatchesSuffix []string + + // NoncurrentTimeBefore is the noncurrent timestamp of the object. This + // condition is satisfied when an object's noncurrent timestamp is before + // midnight of the specified date in UTC. + // + // This condition is relevant only for versioned objects. + NoncurrentTimeBefore time.Time + + // NumNewerVersions is the condition matching objects with a number of newer versions. + // + // If the value is N, this condition is satisfied when there are at least N + // versions (including the live version) newer than this version of the + // object. + // Note: Using `0` as the value will be ignored by the library and not sent to the API. + NumNewerVersions int64 +} + +// BucketLogging holds the bucket's logging configuration, which defines the +// destination bucket and optional name prefix for the current bucket's +// logs. +type BucketLogging struct { + // The destination bucket where the current bucket's logs + // should be placed. + LogBucket string + + // A prefix for log object names. + LogObjectPrefix string +} + +// BucketWebsite holds the bucket's website configuration, controlling how the +// service behaves when accessing bucket contents as a web site. See +// https://cloud.google.com/storage/docs/static-website for more information. +type BucketWebsite struct { + // If the requested object path is missing, the service will ensure the path has + // a trailing '/', append this suffix, and attempt to retrieve the resulting + // object. This allows the creation of index.html objects to represent directory + // pages. + MainPageSuffix string + + // If the requested object path is missing, and any mainPageSuffix object is + // missing, if applicable, the service will return the named object from this + // bucket as the content for a 404 Not Found result. + NotFoundPage string +} + +// CustomPlacementConfig holds the bucket's custom placement +// configuration for Custom Dual Regions. See +// https://cloud.google.com/storage/docs/locations#location-dr for more information. +type CustomPlacementConfig struct { + // The list of regional locations in which data is placed. + // Custom Dual Regions require exactly 2 regional locations. + DataLocations []string +} + +// Autoclass holds the bucket's autoclass configuration. If enabled, +// allows for the automatic selection of the best storage class +// based on object access patterns. See +// https://cloud.google.com/storage/docs/using-autoclass for more information. +type Autoclass struct { + // Enabled specifies whether the autoclass feature is enabled + // on the bucket. + Enabled bool + // ToggleTime is the time from which Autoclass was last toggled. + // If Autoclass is enabled when the bucket is created, the ToggleTime + // is set to the bucket creation time. This field is read-only. + ToggleTime time.Time + // TerminalStorageClass: The storage class that objects in the bucket + // eventually transition to if they are not read for a certain length of + // time. Valid values are NEARLINE and ARCHIVE. + // To modify TerminalStorageClass, Enabled must be set to true. + TerminalStorageClass string + // TerminalStorageClassUpdateTime represents the time of the most recent + // update to "TerminalStorageClass". + TerminalStorageClassUpdateTime time.Time +} + +// SoftDeletePolicy contains the bucket's soft delete policy, which defines the +// period of time that soft-deleted objects will be retained, and cannot be +// permanently deleted. +type SoftDeletePolicy struct { + // EffectiveTime indicates the time from which the policy, or one with a + // greater retention, was effective. This field is read-only. + EffectiveTime time.Time + + // RetentionDuration is the amount of time that soft-deleted objects in the + // bucket will be retained and cannot be permanently deleted. + RetentionDuration time.Duration +} + +// HierarchicalNamespace contains the bucket's hierarchical namespace +// configuration. Hierarchical namespace enabled buckets can contain +// [cloud.google.com/go/storage/control/apiv2/controlpb.Folder] resources. +type HierarchicalNamespace struct { + // Enabled indicates whether hierarchical namespace features are enabled on + // the bucket. This can only be set at bucket creation time currently. + Enabled bool +} + +func newBucket(b *raw.Bucket) (*BucketAttrs, error) { + if b == nil { + return nil, nil + } + rp, err := toRetentionPolicy(b.RetentionPolicy) + if err != nil { + return nil, err + } + + return &BucketAttrs{ + Name: b.Name, + Location: b.Location, + MetaGeneration: b.Metageneration, + DefaultEventBasedHold: b.DefaultEventBasedHold, + StorageClass: b.StorageClass, + Created: convertTime(b.TimeCreated), + VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled, + ACL: toBucketACLRules(b.Acl), + DefaultObjectACL: toObjectACLRules(b.DefaultObjectAcl), + Labels: b.Labels, + RequesterPays: b.Billing != nil && b.Billing.RequesterPays, + Lifecycle: toLifecycle(b.Lifecycle), + RetentionPolicy: rp, + ObjectRetentionMode: toBucketObjectRetention(b.ObjectRetention), + CORS: toCORS(b.Cors), + Encryption: toBucketEncryption(b.Encryption), + Logging: toBucketLogging(b.Logging), + Website: toBucketWebsite(b.Website), + BucketPolicyOnly: toBucketPolicyOnly(b.IamConfiguration), + UniformBucketLevelAccess: toUniformBucketLevelAccess(b.IamConfiguration), + PublicAccessPrevention: toPublicAccessPrevention(b.IamConfiguration), + Etag: b.Etag, + LocationType: b.LocationType, + ProjectNumber: b.ProjectNumber, + RPO: toRPO(b), + CustomPlacementConfig: customPlacementFromRaw(b.CustomPlacementConfig), + Autoclass: toAutoclassFromRaw(b.Autoclass), + SoftDeletePolicy: toSoftDeletePolicyFromRaw(b.SoftDeletePolicy), + HierarchicalNamespace: toHierarchicalNamespaceFromRaw(b.HierarchicalNamespace), + }, nil +} + +func newBucketFromProto(b *storagepb.Bucket) *BucketAttrs { + if b == nil { + return nil + } + return &BucketAttrs{ + Name: parseBucketName(b.GetName()), + Location: b.GetLocation(), + MetaGeneration: b.GetMetageneration(), + DefaultEventBasedHold: b.GetDefaultEventBasedHold(), + StorageClass: b.GetStorageClass(), + Created: b.GetCreateTime().AsTime(), + VersioningEnabled: b.GetVersioning().GetEnabled(), + ACL: toBucketACLRulesFromProto(b.GetAcl()), + DefaultObjectACL: toObjectACLRulesFromProto(b.GetDefaultObjectAcl()), + Labels: b.GetLabels(), + RequesterPays: b.GetBilling().GetRequesterPays(), + Lifecycle: toLifecycleFromProto(b.GetLifecycle()), + RetentionPolicy: toRetentionPolicyFromProto(b.GetRetentionPolicy()), + CORS: toCORSFromProto(b.GetCors()), + Encryption: toBucketEncryptionFromProto(b.GetEncryption()), + Logging: toBucketLoggingFromProto(b.GetLogging()), + Website: toBucketWebsiteFromProto(b.GetWebsite()), + BucketPolicyOnly: toBucketPolicyOnlyFromProto(b.GetIamConfig()), + UniformBucketLevelAccess: toUniformBucketLevelAccessFromProto(b.GetIamConfig()), + PublicAccessPrevention: toPublicAccessPreventionFromProto(b.GetIamConfig()), + LocationType: b.GetLocationType(), + RPO: toRPOFromProto(b), + CustomPlacementConfig: customPlacementFromProto(b.GetCustomPlacementConfig()), + ProjectNumber: parseProjectNumber(b.GetProject()), // this can return 0 the project resource name is ID based + Autoclass: toAutoclassFromProto(b.GetAutoclass()), + SoftDeletePolicy: toSoftDeletePolicyFromProto(b.SoftDeletePolicy), + HierarchicalNamespace: toHierarchicalNamespaceFromProto(b.HierarchicalNamespace), + } +} + +// toRawBucket copies the editable attribute from b to the raw library's Bucket type. +func (b *BucketAttrs) toRawBucket() *raw.Bucket { + // Copy label map. + var labels map[string]string + if len(b.Labels) > 0 { + labels = make(map[string]string, len(b.Labels)) + for k, v := range b.Labels { + labels[k] = v + } + } + // Ignore VersioningEnabled if it is false. This is OK because + // we only call this method when creating a bucket, and by default + // new buckets have versioning off. + var v *raw.BucketVersioning + if b.VersioningEnabled { + v = &raw.BucketVersioning{Enabled: true} + } + var bb *raw.BucketBilling + if b.RequesterPays { + bb = &raw.BucketBilling{RequesterPays: true} + } + var bktIAM *raw.BucketIamConfiguration + if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled || b.PublicAccessPrevention != PublicAccessPreventionUnknown { + bktIAM = &raw.BucketIamConfiguration{} + if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled { + bktIAM.UniformBucketLevelAccess = &raw.BucketIamConfigurationUniformBucketLevelAccess{ + Enabled: true, + } + } + if b.PublicAccessPrevention != PublicAccessPreventionUnknown { + bktIAM.PublicAccessPrevention = b.PublicAccessPrevention.String() + } + } + return &raw.Bucket{ + Name: b.Name, + Location: b.Location, + StorageClass: b.StorageClass, + Acl: toRawBucketACL(b.ACL), + DefaultObjectAcl: toRawObjectACL(b.DefaultObjectACL), + Versioning: v, + Labels: labels, + Billing: bb, + Lifecycle: toRawLifecycle(b.Lifecycle), + RetentionPolicy: b.RetentionPolicy.toRawRetentionPolicy(), + Cors: toRawCORS(b.CORS), + Encryption: b.Encryption.toRawBucketEncryption(), + Logging: b.Logging.toRawBucketLogging(), + Website: b.Website.toRawBucketWebsite(), + IamConfiguration: bktIAM, + Rpo: b.RPO.String(), + CustomPlacementConfig: b.CustomPlacementConfig.toRawCustomPlacement(), + Autoclass: b.Autoclass.toRawAutoclass(), + SoftDeletePolicy: b.SoftDeletePolicy.toRawSoftDeletePolicy(), + HierarchicalNamespace: b.HierarchicalNamespace.toRawHierarchicalNamespace(), + } +} + +func (b *BucketAttrs) toProtoBucket() *storagepb.Bucket { + if b == nil { + return &storagepb.Bucket{} + } + + // Copy label map. + var labels map[string]string + if len(b.Labels) > 0 { + labels = make(map[string]string, len(b.Labels)) + for k, v := range b.Labels { + labels[k] = v + } + } + + // Ignore VersioningEnabled if it is false. This is OK because + // we only call this method when creating a bucket, and by default + // new buckets have versioning off. + var v *storagepb.Bucket_Versioning + if b.VersioningEnabled { + v = &storagepb.Bucket_Versioning{Enabled: true} + } + var bb *storagepb.Bucket_Billing + if b.RequesterPays { + bb = &storagepb.Bucket_Billing{RequesterPays: true} + } + var bktIAM *storagepb.Bucket_IamConfig + if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled || b.PublicAccessPrevention != PublicAccessPreventionUnknown { + bktIAM = &storagepb.Bucket_IamConfig{} + if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled { + bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{ + Enabled: true, + } + } + if b.PublicAccessPrevention != PublicAccessPreventionUnknown { + bktIAM.PublicAccessPrevention = b.PublicAccessPrevention.String() + } + } + + return &storagepb.Bucket{ + Name: b.Name, + Location: b.Location, + StorageClass: b.StorageClass, + Acl: toProtoBucketACL(b.ACL), + DefaultObjectAcl: toProtoObjectACL(b.DefaultObjectACL), + Versioning: v, + Labels: labels, + Billing: bb, + Lifecycle: toProtoLifecycle(b.Lifecycle), + RetentionPolicy: b.RetentionPolicy.toProtoRetentionPolicy(), + Cors: toProtoCORS(b.CORS), + Encryption: b.Encryption.toProtoBucketEncryption(), + Logging: b.Logging.toProtoBucketLogging(), + Website: b.Website.toProtoBucketWebsite(), + IamConfig: bktIAM, + Rpo: b.RPO.String(), + CustomPlacementConfig: b.CustomPlacementConfig.toProtoCustomPlacement(), + Autoclass: b.Autoclass.toProtoAutoclass(), + SoftDeletePolicy: b.SoftDeletePolicy.toProtoSoftDeletePolicy(), + HierarchicalNamespace: b.HierarchicalNamespace.toProtoHierarchicalNamespace(), + } +} + +func (ua *BucketAttrsToUpdate) toProtoBucket() *storagepb.Bucket { + if ua == nil { + return &storagepb.Bucket{} + } + + var v *storagepb.Bucket_Versioning + if ua.VersioningEnabled != nil { + v = &storagepb.Bucket_Versioning{Enabled: optional.ToBool(ua.VersioningEnabled)} + } + var bb *storagepb.Bucket_Billing + if ua.RequesterPays != nil { + bb = &storagepb.Bucket_Billing{RequesterPays: optional.ToBool(ua.RequesterPays)} + } + + var bktIAM *storagepb.Bucket_IamConfig + if ua.UniformBucketLevelAccess != nil || ua.BucketPolicyOnly != nil || ua.PublicAccessPrevention != PublicAccessPreventionUnknown { + bktIAM = &storagepb.Bucket_IamConfig{} + + if ua.BucketPolicyOnly != nil { + bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{ + Enabled: optional.ToBool(ua.BucketPolicyOnly.Enabled), + } + } + + if ua.UniformBucketLevelAccess != nil { + // UniformBucketLevelAccess takes precedence over BucketPolicyOnly, + // so Enabled will be overriden here if both are set + bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{ + Enabled: optional.ToBool(ua.UniformBucketLevelAccess.Enabled), + } + } + + if ua.PublicAccessPrevention != PublicAccessPreventionUnknown { + bktIAM.PublicAccessPrevention = ua.PublicAccessPrevention.String() + } + } + + var defaultHold bool + if ua.DefaultEventBasedHold != nil { + defaultHold = optional.ToBool(ua.DefaultEventBasedHold) + } + var lifecycle Lifecycle + if ua.Lifecycle != nil { + lifecycle = *ua.Lifecycle + } + var bktACL []*storagepb.BucketAccessControl + if ua.acl != nil { + bktACL = toProtoBucketACL(ua.acl) + } + if ua.PredefinedACL != "" { + // Clear ACL or the call will fail. + bktACL = nil + } + var bktDefaultObjectACL []*storagepb.ObjectAccessControl + if ua.defaultObjectACL != nil { + bktDefaultObjectACL = toProtoObjectACL(ua.defaultObjectACL) + } + if ua.PredefinedDefaultObjectACL != "" { + // Clear ACLs or the call will fail. + bktDefaultObjectACL = nil + } + + return &storagepb.Bucket{ + StorageClass: ua.StorageClass, + Acl: bktACL, + DefaultObjectAcl: bktDefaultObjectACL, + DefaultEventBasedHold: defaultHold, + Versioning: v, + Billing: bb, + Lifecycle: toProtoLifecycle(lifecycle), + RetentionPolicy: ua.RetentionPolicy.toProtoRetentionPolicy(), + Cors: toProtoCORS(ua.CORS), + Encryption: ua.Encryption.toProtoBucketEncryption(), + Logging: ua.Logging.toProtoBucketLogging(), + Website: ua.Website.toProtoBucketWebsite(), + IamConfig: bktIAM, + Rpo: ua.RPO.String(), + Autoclass: ua.Autoclass.toProtoAutoclass(), + SoftDeletePolicy: ua.SoftDeletePolicy.toProtoSoftDeletePolicy(), + Labels: ua.setLabels, + } +} + +// CORS is the bucket's Cross-Origin Resource Sharing (CORS) configuration. +type CORS struct { + // MaxAge is the value to return in the Access-Control-Max-Age + // header used in preflight responses. + MaxAge time.Duration + + // Methods is the list of HTTP methods on which to include CORS response + // headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list + // of methods, and means "any method". + Methods []string + + // Origins is the list of Origins eligible to receive CORS response + // headers. Note: "*" is permitted in the list of origins, and means + // "any Origin". + Origins []string + + // ResponseHeaders is the list of HTTP headers other than the simple + // response headers to give permission for the user-agent to share + // across domains. + ResponseHeaders []string +} + +// BucketEncryption is a bucket's encryption configuration. +type BucketEncryption struct { + // A Cloud KMS key name, in the form + // projects/P/locations/L/keyRings/R/cryptoKeys/K, that will be used to encrypt + // objects inserted into this bucket, if no encryption method is specified. + // The key's location must be the same as the bucket's. + DefaultKMSKeyName string +} + +// BucketAttrsToUpdate define the attributes to update during an Update call. +type BucketAttrsToUpdate struct { + // If set, updates whether the bucket uses versioning. + VersioningEnabled optional.Bool + + // If set, updates whether the bucket is a Requester Pays bucket. + RequesterPays optional.Bool + + // DefaultEventBasedHold is the default value for event-based hold on + // newly created objects in this bucket. + DefaultEventBasedHold optional.Bool + + // BucketPolicyOnly is an alias for UniformBucketLevelAccess. Use of + // UniformBucketLevelAccess is recommended above the use of this field. + // Setting BucketPolicyOnly.Enabled OR UniformBucketLevelAccess.Enabled to + // true, will enable UniformBucketLevelAccess. If both BucketPolicyOnly and + // UniformBucketLevelAccess are set, the value of UniformBucketLevelAccess + // will take precedence. + BucketPolicyOnly *BucketPolicyOnly + + // UniformBucketLevelAccess configures access checks to use only bucket-level IAM + // policies and ignore any ACL rules for the bucket. + // See https://cloud.google.com/storage/docs/uniform-bucket-level-access + // for more information. + UniformBucketLevelAccess *UniformBucketLevelAccess + + // PublicAccessPrevention is the setting for the bucket's + // PublicAccessPrevention policy, which can be used to prevent public access + // of data in the bucket. See + // https://cloud.google.com/storage/docs/public-access-prevention for more + // information. + PublicAccessPrevention PublicAccessPrevention + + // StorageClass is the default storage class of the bucket. This defines + // how objects in the bucket are stored and determines the SLA + // and the cost of storage. Typical values are "STANDARD", "NEARLINE", + // "COLDLINE" and "ARCHIVE". Defaults to "STANDARD". + // See https://cloud.google.com/storage/docs/storage-classes for all + // valid values. + StorageClass string + + // If set, updates the retention policy of the bucket. Using + // RetentionPolicy.RetentionPeriod = 0 will delete the existing policy. + // + // This feature is in private alpha release. It is not currently available to + // most customers. It might be changed in backwards-incompatible ways and is not + // subject to any SLA or deprecation policy. + RetentionPolicy *RetentionPolicy + + // If set, replaces the CORS configuration with a new configuration. + // An empty (rather than nil) slice causes all CORS policies to be removed. + CORS []CORS + + // If set, replaces the encryption configuration of the bucket. Using + // BucketEncryption.DefaultKMSKeyName = "" will delete the existing + // configuration. + Encryption *BucketEncryption + + // If set, replaces the lifecycle configuration of the bucket. + Lifecycle *Lifecycle + + // If set, replaces the logging configuration of the bucket. + Logging *BucketLogging + + // If set, replaces the website configuration of the bucket. + Website *BucketWebsite + + // If not empty, applies a predefined set of access controls. + // See https://cloud.google.com/storage/docs/json_api/v1/buckets/patch. + PredefinedACL string + + // If not empty, applies a predefined set of default object access controls. + // See https://cloud.google.com/storage/docs/json_api/v1/buckets/patch. + PredefinedDefaultObjectACL string + + // RPO configures the Recovery Point Objective (RPO) policy of the bucket. + // Set to RPOAsyncTurbo to turn on Turbo Replication for a bucket. + // See https://cloud.google.com/storage/docs/managing-turbo-replication for + // more information. + RPO RPO + + // If set, updates the autoclass configuration of the bucket. + // To disable autoclass on the bucket, set to an empty &Autoclass{}. + // To update the configuration for Autoclass.TerminalStorageClass, + // Autoclass.Enabled must also be set to true. + // See https://cloud.google.com/storage/docs/using-autoclass for more information. + Autoclass *Autoclass + + // If set, updates the soft delete policy of the bucket. + SoftDeletePolicy *SoftDeletePolicy + + // acl is the list of access control rules on the bucket. + // It is unexported and only used internally by the gRPC client. + // Library users should use ACLHandle methods directly. + acl []ACLRule + + // defaultObjectACL is the list of access controls to + // apply to new objects when no object ACL is provided. + // It is unexported and only used internally by the gRPC client. + // Library users should use ACLHandle methods directly. + defaultObjectACL []ACLRule + + setLabels map[string]string + deleteLabels map[string]bool +} + +// SetLabel causes a label to be added or modified when ua is used +// in a call to Bucket.Update. +func (ua *BucketAttrsToUpdate) SetLabel(name, value string) { + if ua.setLabels == nil { + ua.setLabels = map[string]string{} + } + ua.setLabels[name] = value +} + +// DeleteLabel causes a label to be deleted when ua is used in a +// call to Bucket.Update. +func (ua *BucketAttrsToUpdate) DeleteLabel(name string) { + if ua.deleteLabels == nil { + ua.deleteLabels = map[string]bool{} + } + ua.deleteLabels[name] = true +} + +func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { + rb := &raw.Bucket{} + if ua.CORS != nil { + rb.Cors = toRawCORS(ua.CORS) + rb.ForceSendFields = append(rb.ForceSendFields, "Cors") + } + if ua.DefaultEventBasedHold != nil { + rb.DefaultEventBasedHold = optional.ToBool(ua.DefaultEventBasedHold) + rb.ForceSendFields = append(rb.ForceSendFields, "DefaultEventBasedHold") + } + if ua.RetentionPolicy != nil { + if ua.RetentionPolicy.RetentionPeriod == 0 { + rb.NullFields = append(rb.NullFields, "RetentionPolicy") + rb.RetentionPolicy = nil + } else { + rb.RetentionPolicy = ua.RetentionPolicy.toRawRetentionPolicy() + } + } + if ua.VersioningEnabled != nil { + rb.Versioning = &raw.BucketVersioning{ + Enabled: optional.ToBool(ua.VersioningEnabled), + ForceSendFields: []string{"Enabled"}, + } + } + if ua.RequesterPays != nil { + rb.Billing = &raw.BucketBilling{ + RequesterPays: optional.ToBool(ua.RequesterPays), + ForceSendFields: []string{"RequesterPays"}, + } + } + if ua.BucketPolicyOnly != nil { + rb.IamConfiguration = &raw.BucketIamConfiguration{ + UniformBucketLevelAccess: &raw.BucketIamConfigurationUniformBucketLevelAccess{ + Enabled: ua.BucketPolicyOnly.Enabled, + ForceSendFields: []string{"Enabled"}, + }, + } + } + if ua.UniformBucketLevelAccess != nil { + rb.IamConfiguration = &raw.BucketIamConfiguration{ + UniformBucketLevelAccess: &raw.BucketIamConfigurationUniformBucketLevelAccess{ + Enabled: ua.UniformBucketLevelAccess.Enabled, + ForceSendFields: []string{"Enabled"}, + }, + } + } + if ua.PublicAccessPrevention != PublicAccessPreventionUnknown { + if rb.IamConfiguration == nil { + rb.IamConfiguration = &raw.BucketIamConfiguration{} + } + rb.IamConfiguration.PublicAccessPrevention = ua.PublicAccessPrevention.String() + } + if ua.Encryption != nil { + if ua.Encryption.DefaultKMSKeyName == "" { + rb.NullFields = append(rb.NullFields, "Encryption") + rb.Encryption = nil + } else { + rb.Encryption = ua.Encryption.toRawBucketEncryption() + } + } + if ua.Lifecycle != nil { + rb.Lifecycle = toRawLifecycle(*ua.Lifecycle) + rb.ForceSendFields = append(rb.ForceSendFields, "Lifecycle") + } + if ua.Logging != nil { + if *ua.Logging == (BucketLogging{}) { + rb.NullFields = append(rb.NullFields, "Logging") + rb.Logging = nil + } else { + rb.Logging = ua.Logging.toRawBucketLogging() + } + } + if ua.Website != nil { + if *ua.Website == (BucketWebsite{}) { + rb.NullFields = append(rb.NullFields, "Website") + rb.Website = nil + } else { + rb.Website = ua.Website.toRawBucketWebsite() + } + } + if ua.Autoclass != nil { + rb.Autoclass = &raw.BucketAutoclass{ + Enabled: ua.Autoclass.Enabled, + TerminalStorageClass: ua.Autoclass.TerminalStorageClass, + ForceSendFields: []string{"Enabled"}, + } + rb.ForceSendFields = append(rb.ForceSendFields, "Autoclass") + } + if ua.SoftDeletePolicy != nil { + if ua.SoftDeletePolicy.RetentionDuration == 0 { + rb.NullFields = append(rb.NullFields, "SoftDeletePolicy") + rb.SoftDeletePolicy = nil + } else { + rb.SoftDeletePolicy = ua.SoftDeletePolicy.toRawSoftDeletePolicy() + } + } + if ua.PredefinedACL != "" { + // Clear ACL or the call will fail. + rb.Acl = nil + rb.ForceSendFields = append(rb.ForceSendFields, "Acl") + } + if ua.PredefinedDefaultObjectACL != "" { + // Clear ACLs or the call will fail. + rb.DefaultObjectAcl = nil + rb.ForceSendFields = append(rb.ForceSendFields, "DefaultObjectAcl") + } + + rb.StorageClass = ua.StorageClass + rb.Rpo = ua.RPO.String() + + if ua.setLabels != nil || ua.deleteLabels != nil { + rb.Labels = map[string]string{} + for k, v := range ua.setLabels { + rb.Labels[k] = v + } + if len(rb.Labels) == 0 && len(ua.deleteLabels) > 0 { + rb.ForceSendFields = append(rb.ForceSendFields, "Labels") + } + for l := range ua.deleteLabels { + rb.NullFields = append(rb.NullFields, "Labels."+l) + } + } + return rb +} + +// If returns a new BucketHandle that applies a set of preconditions. +// Preconditions already set on the BucketHandle are ignored. The supplied +// BucketConditions must have exactly one field set to a non-zero value; +// otherwise an error will be returned from any operation on the BucketHandle. +// Operations on the new handle will return an error if the preconditions are not +// satisfied. The only valid preconditions for buckets are MetagenerationMatch +// and MetagenerationNotMatch. +func (b *BucketHandle) If(conds BucketConditions) *BucketHandle { + b2 := *b + b2.conds = &conds + return &b2 +} + +// BucketConditions constrain bucket methods to act on specific metagenerations. +// +// The zero value is an empty set of constraints. +type BucketConditions struct { + // MetagenerationMatch specifies that the bucket must have the given + // metageneration for the operation to occur. + // If MetagenerationMatch is zero, it has no effect. + MetagenerationMatch int64 + + // MetagenerationNotMatch specifies that the bucket must not have the given + // metageneration for the operation to occur. + // If MetagenerationNotMatch is zero, it has no effect. + MetagenerationNotMatch int64 +} + +func (c *BucketConditions) validate(method string) error { + if *c == (BucketConditions{}) { + return fmt.Errorf("storage: %s: empty conditions", method) + } + if c.MetagenerationMatch != 0 && c.MetagenerationNotMatch != 0 { + return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method) + } + return nil +} + +// UserProject returns a new BucketHandle that passes the project ID as the user +// project for all subsequent calls. Calls with a user project will be billed to that +// project rather than to the bucket's owning project. +// +// A user project is required for all operations on Requester Pays buckets. +func (b *BucketHandle) UserProject(projectID string) *BucketHandle { + b2 := *b + b2.userProject = projectID + b2.acl.userProject = projectID + b2.defaultObjectACL.userProject = projectID + return &b2 +} + +// LockRetentionPolicy locks a bucket's retention policy until a previously-configured +// RetentionPeriod past the EffectiveTime. Note that if RetentionPeriod is set to less +// than a day, the retention policy is treated as a development configuration and locking +// will have no effect. The BucketHandle must have a metageneration condition that +// matches the bucket's metageneration. See BucketHandle.If. +// +// This feature is in private alpha release. It is not currently available to +// most customers. It might be changed in backwards-incompatible ways and is not +// subject to any SLA or deprecation policy. +func (b *BucketHandle) LockRetentionPolicy(ctx context.Context) error { + o := makeStorageOpts(true, b.retry, b.userProject) + return b.c.tc.LockBucketRetentionPolicy(ctx, b.name, b.conds, o...) +} + +// SetObjectRetention returns a new BucketHandle that will enable object retention +// on bucket creation. To enable object retention, you must use the returned +// handle to create the bucket. This has no effect on an already existing bucket. +// ObjectRetention is not enabled by default. +// ObjectRetention cannot be configured through the gRPC API. +func (b *BucketHandle) SetObjectRetention(enable bool) *BucketHandle { + b2 := *b + b2.enableObjectRetention = &enable + return &b2 +} + +// applyBucketConds modifies the provided call using the conditions in conds. +// call is something that quacks like a *raw.WhateverCall. +func applyBucketConds(method string, conds *BucketConditions, call interface{}) error { + if conds == nil { + return nil + } + if err := conds.validate(method); err != nil { + return err + } + cval := reflect.ValueOf(call) + switch { + case conds.MetagenerationMatch != 0: + if !setIfMetagenerationMatch(cval, conds.MetagenerationMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) + } + case conds.MetagenerationNotMatch != 0: + if !setIfMetagenerationNotMatch(cval, conds.MetagenerationNotMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) + } + } + return nil +} + +// applyBucketConds modifies the provided request message using the conditions +// in conds. msg is a protobuf Message that has fields if_metageneration_match +// and if_metageneration_not_match. +func applyBucketCondsProto(method string, conds *BucketConditions, msg proto.Message) error { + rmsg := msg.ProtoReflect() + + if conds == nil { + return nil + } + if err := conds.validate(method); err != nil { + return err + } + + switch { + case conds.MetagenerationMatch != 0: + if !setConditionProtoField(rmsg, "if_metageneration_match", conds.MetagenerationMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) + } + case conds.MetagenerationNotMatch != 0: + if !setConditionProtoField(rmsg, "if_metageneration_not_match", conds.MetagenerationNotMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) + } + } + return nil +} + +func (rp *RetentionPolicy) toRawRetentionPolicy() *raw.BucketRetentionPolicy { + if rp == nil { + return nil + } + return &raw.BucketRetentionPolicy{ + RetentionPeriod: int64(rp.RetentionPeriod / time.Second), + } +} + +func (rp *RetentionPolicy) toProtoRetentionPolicy() *storagepb.Bucket_RetentionPolicy { + if rp == nil { + return nil + } + // RetentionPeriod must be greater than 0, so if it is 0, the user left it + // unset, and so we should not send it in the request i.e. nil is sent. + var dur *durationpb.Duration + if rp.RetentionPeriod != 0 { + dur = durationpb.New(rp.RetentionPeriod) + } + return &storagepb.Bucket_RetentionPolicy{ + RetentionDuration: dur, + } +} + +func toRetentionPolicy(rp *raw.BucketRetentionPolicy) (*RetentionPolicy, error) { + if rp == nil || rp.EffectiveTime == "" { + return nil, nil + } + t, err := time.Parse(time.RFC3339, rp.EffectiveTime) + if err != nil { + return nil, err + } + return &RetentionPolicy{ + RetentionPeriod: time.Duration(rp.RetentionPeriod) * time.Second, + EffectiveTime: t, + IsLocked: rp.IsLocked, + }, nil +} + +func toRetentionPolicyFromProto(rp *storagepb.Bucket_RetentionPolicy) *RetentionPolicy { + if rp == nil || rp.GetEffectiveTime().AsTime().Unix() == 0 { + return nil + } + return &RetentionPolicy{ + RetentionPeriod: rp.GetRetentionDuration().AsDuration(), + EffectiveTime: rp.GetEffectiveTime().AsTime(), + IsLocked: rp.GetIsLocked(), + } +} + +func toBucketObjectRetention(or *raw.BucketObjectRetention) string { + if or == nil { + return "" + } + return or.Mode +} + +func toRawCORS(c []CORS) []*raw.BucketCors { + var out []*raw.BucketCors + for _, v := range c { + out = append(out, &raw.BucketCors{ + MaxAgeSeconds: int64(v.MaxAge / time.Second), + Method: v.Methods, + Origin: v.Origins, + ResponseHeader: v.ResponseHeaders, + }) + } + return out +} + +func toProtoCORS(c []CORS) []*storagepb.Bucket_Cors { + var out []*storagepb.Bucket_Cors + for _, v := range c { + out = append(out, &storagepb.Bucket_Cors{ + MaxAgeSeconds: int32(v.MaxAge / time.Second), + Method: v.Methods, + Origin: v.Origins, + ResponseHeader: v.ResponseHeaders, + }) + } + return out +} + +func toCORS(rc []*raw.BucketCors) []CORS { + var out []CORS + for _, v := range rc { + out = append(out, CORS{ + MaxAge: time.Duration(v.MaxAgeSeconds) * time.Second, + Methods: v.Method, + Origins: v.Origin, + ResponseHeaders: v.ResponseHeader, + }) + } + return out +} + +func toCORSFromProto(rc []*storagepb.Bucket_Cors) []CORS { + var out []CORS + for _, v := range rc { + out = append(out, CORS{ + MaxAge: time.Duration(v.GetMaxAgeSeconds()) * time.Second, + Methods: v.GetMethod(), + Origins: v.GetOrigin(), + ResponseHeaders: v.GetResponseHeader(), + }) + } + return out +} + +func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle { + var rl raw.BucketLifecycle + if len(l.Rules) == 0 { + rl.ForceSendFields = []string{"Rule"} + } + for _, r := range l.Rules { + rr := &raw.BucketLifecycleRule{ + Action: &raw.BucketLifecycleRuleAction{ + Type: r.Action.Type, + StorageClass: r.Action.StorageClass, + }, + Condition: &raw.BucketLifecycleRuleCondition{ + DaysSinceCustomTime: r.Condition.DaysSinceCustomTime, + DaysSinceNoncurrentTime: r.Condition.DaysSinceNoncurrentTime, + MatchesPrefix: r.Condition.MatchesPrefix, + MatchesStorageClass: r.Condition.MatchesStorageClasses, + MatchesSuffix: r.Condition.MatchesSuffix, + NumNewerVersions: r.Condition.NumNewerVersions, + }, + } + + // AllObjects takes precedent when both AllObjects and AgeInDays are set + // Rationale: If you've opted into using AllObjects, it makes sense that you + // understand the implications of how this option works with AgeInDays. + if r.Condition.AllObjects { + rr.Condition.Age = googleapi.Int64(0) + rr.Condition.ForceSendFields = []string{"Age"} + } else if r.Condition.AgeInDays > 0 { + rr.Condition.Age = googleapi.Int64(r.Condition.AgeInDays) + } + + switch r.Condition.Liveness { + case LiveAndArchived: + rr.Condition.IsLive = nil + case Live: + rr.Condition.IsLive = googleapi.Bool(true) + case Archived: + rr.Condition.IsLive = googleapi.Bool(false) + } + + if !r.Condition.CreatedBefore.IsZero() { + rr.Condition.CreatedBefore = r.Condition.CreatedBefore.Format(rfc3339Date) + } + if !r.Condition.CustomTimeBefore.IsZero() { + rr.Condition.CustomTimeBefore = r.Condition.CustomTimeBefore.Format(rfc3339Date) + } + if !r.Condition.NoncurrentTimeBefore.IsZero() { + rr.Condition.NoncurrentTimeBefore = r.Condition.NoncurrentTimeBefore.Format(rfc3339Date) + } + rl.Rule = append(rl.Rule, rr) + } + return &rl +} + +func toProtoLifecycle(l Lifecycle) *storagepb.Bucket_Lifecycle { + var rl storagepb.Bucket_Lifecycle + + for _, r := range l.Rules { + rr := &storagepb.Bucket_Lifecycle_Rule{ + Action: &storagepb.Bucket_Lifecycle_Rule_Action{ + Type: r.Action.Type, + StorageClass: r.Action.StorageClass, + }, + Condition: &storagepb.Bucket_Lifecycle_Rule_Condition{ + // Note: The Apiary types use int64 (even though the Discovery + // doc states "format: int32"), so the client types used int64, + // but the proto uses int32 so we have a potentially lossy + // conversion. + DaysSinceCustomTime: proto.Int32(int32(r.Condition.DaysSinceCustomTime)), + DaysSinceNoncurrentTime: proto.Int32(int32(r.Condition.DaysSinceNoncurrentTime)), + MatchesPrefix: r.Condition.MatchesPrefix, + MatchesStorageClass: r.Condition.MatchesStorageClasses, + MatchesSuffix: r.Condition.MatchesSuffix, + NumNewerVersions: proto.Int32(int32(r.Condition.NumNewerVersions)), + }, + } + + // Only set AgeDays in the proto if it is non-zero, or if the user has set + // Condition.AllObjects. + if r.Condition.AgeInDays != 0 { + rr.Condition.AgeDays = proto.Int32(int32(r.Condition.AgeInDays)) + } + if r.Condition.AllObjects { + rr.Condition.AgeDays = proto.Int32(0) + } + + switch r.Condition.Liveness { + case LiveAndArchived: + rr.Condition.IsLive = nil + case Live: + rr.Condition.IsLive = proto.Bool(true) + case Archived: + rr.Condition.IsLive = proto.Bool(false) + } + + if !r.Condition.CreatedBefore.IsZero() { + rr.Condition.CreatedBefore = timeToProtoDate(r.Condition.CreatedBefore) + } + if !r.Condition.CustomTimeBefore.IsZero() { + rr.Condition.CustomTimeBefore = timeToProtoDate(r.Condition.CustomTimeBefore) + } + if !r.Condition.NoncurrentTimeBefore.IsZero() { + rr.Condition.NoncurrentTimeBefore = timeToProtoDate(r.Condition.NoncurrentTimeBefore) + } + rl.Rule = append(rl.Rule, rr) + } + return &rl +} + +func toLifecycle(rl *raw.BucketLifecycle) Lifecycle { + var l Lifecycle + if rl == nil { + return l + } + for _, rr := range rl.Rule { + r := LifecycleRule{ + Action: LifecycleAction{ + Type: rr.Action.Type, + StorageClass: rr.Action.StorageClass, + }, + Condition: LifecycleCondition{ + DaysSinceCustomTime: rr.Condition.DaysSinceCustomTime, + DaysSinceNoncurrentTime: rr.Condition.DaysSinceNoncurrentTime, + MatchesPrefix: rr.Condition.MatchesPrefix, + MatchesStorageClasses: rr.Condition.MatchesStorageClass, + MatchesSuffix: rr.Condition.MatchesSuffix, + NumNewerVersions: rr.Condition.NumNewerVersions, + }, + } + if rr.Condition.Age != nil { + r.Condition.AgeInDays = *rr.Condition.Age + if *rr.Condition.Age == 0 { + r.Condition.AllObjects = true + } + } + + if rr.Condition.IsLive == nil { + r.Condition.Liveness = LiveAndArchived + } else if *rr.Condition.IsLive { + r.Condition.Liveness = Live + } else { + r.Condition.Liveness = Archived + } + + if rr.Condition.CreatedBefore != "" { + r.Condition.CreatedBefore, _ = time.Parse(rfc3339Date, rr.Condition.CreatedBefore) + } + if rr.Condition.CustomTimeBefore != "" { + r.Condition.CustomTimeBefore, _ = time.Parse(rfc3339Date, rr.Condition.CustomTimeBefore) + } + if rr.Condition.NoncurrentTimeBefore != "" { + r.Condition.NoncurrentTimeBefore, _ = time.Parse(rfc3339Date, rr.Condition.NoncurrentTimeBefore) + } + l.Rules = append(l.Rules, r) + } + return l +} + +func toLifecycleFromProto(rl *storagepb.Bucket_Lifecycle) Lifecycle { + var l Lifecycle + if rl == nil { + return l + } + for _, rr := range rl.GetRule() { + r := LifecycleRule{ + Action: LifecycleAction{ + Type: rr.GetAction().GetType(), + StorageClass: rr.GetAction().GetStorageClass(), + }, + Condition: LifecycleCondition{ + AgeInDays: int64(rr.GetCondition().GetAgeDays()), + DaysSinceCustomTime: int64(rr.GetCondition().GetDaysSinceCustomTime()), + DaysSinceNoncurrentTime: int64(rr.GetCondition().GetDaysSinceNoncurrentTime()), + MatchesPrefix: rr.GetCondition().GetMatchesPrefix(), + MatchesStorageClasses: rr.GetCondition().GetMatchesStorageClass(), + MatchesSuffix: rr.GetCondition().GetMatchesSuffix(), + NumNewerVersions: int64(rr.GetCondition().GetNumNewerVersions()), + }, + } + + // Only set Condition.AllObjects if AgeDays is zero, not if it is nil. + if rr.GetCondition().AgeDays != nil && rr.GetCondition().GetAgeDays() == 0 { + r.Condition.AllObjects = true + } + + if rr.GetCondition().IsLive == nil { + r.Condition.Liveness = LiveAndArchived + } else if rr.GetCondition().GetIsLive() { + r.Condition.Liveness = Live + } else { + r.Condition.Liveness = Archived + } + + if rr.GetCondition().GetCreatedBefore() != nil { + r.Condition.CreatedBefore = protoDateToUTCTime(rr.GetCondition().GetCreatedBefore()) + } + if rr.GetCondition().GetCustomTimeBefore() != nil { + r.Condition.CustomTimeBefore = protoDateToUTCTime(rr.GetCondition().GetCustomTimeBefore()) + } + if rr.GetCondition().GetNoncurrentTimeBefore() != nil { + r.Condition.NoncurrentTimeBefore = protoDateToUTCTime(rr.GetCondition().GetNoncurrentTimeBefore()) + } + l.Rules = append(l.Rules, r) + } + return l +} + +func (e *BucketEncryption) toRawBucketEncryption() *raw.BucketEncryption { + if e == nil { + return nil + } + return &raw.BucketEncryption{ + DefaultKmsKeyName: e.DefaultKMSKeyName, + } +} + +func (e *BucketEncryption) toProtoBucketEncryption() *storagepb.Bucket_Encryption { + if e == nil { + return nil + } + return &storagepb.Bucket_Encryption{ + DefaultKmsKey: e.DefaultKMSKeyName, + } +} + +func toBucketEncryption(e *raw.BucketEncryption) *BucketEncryption { + if e == nil { + return nil + } + return &BucketEncryption{DefaultKMSKeyName: e.DefaultKmsKeyName} +} + +func toBucketEncryptionFromProto(e *storagepb.Bucket_Encryption) *BucketEncryption { + if e == nil { + return nil + } + return &BucketEncryption{DefaultKMSKeyName: e.GetDefaultKmsKey()} +} + +func (b *BucketLogging) toRawBucketLogging() *raw.BucketLogging { + if b == nil { + return nil + } + return &raw.BucketLogging{ + LogBucket: b.LogBucket, + LogObjectPrefix: b.LogObjectPrefix, + } +} + +func (b *BucketLogging) toProtoBucketLogging() *storagepb.Bucket_Logging { + if b == nil { + return nil + } + return &storagepb.Bucket_Logging{ + LogBucket: bucketResourceName(globalProjectAlias, b.LogBucket), + LogObjectPrefix: b.LogObjectPrefix, + } +} + +func toBucketLogging(b *raw.BucketLogging) *BucketLogging { + if b == nil { + return nil + } + return &BucketLogging{ + LogBucket: b.LogBucket, + LogObjectPrefix: b.LogObjectPrefix, + } +} + +func toBucketLoggingFromProto(b *storagepb.Bucket_Logging) *BucketLogging { + if b == nil { + return nil + } + lb := parseBucketName(b.GetLogBucket()) + return &BucketLogging{ + LogBucket: lb, + LogObjectPrefix: b.GetLogObjectPrefix(), + } +} + +func (w *BucketWebsite) toRawBucketWebsite() *raw.BucketWebsite { + if w == nil { + return nil + } + return &raw.BucketWebsite{ + MainPageSuffix: w.MainPageSuffix, + NotFoundPage: w.NotFoundPage, + } +} + +func (w *BucketWebsite) toProtoBucketWebsite() *storagepb.Bucket_Website { + if w == nil { + return nil + } + return &storagepb.Bucket_Website{ + MainPageSuffix: w.MainPageSuffix, + NotFoundPage: w.NotFoundPage, + } +} + +func toBucketWebsite(w *raw.BucketWebsite) *BucketWebsite { + if w == nil { + return nil + } + return &BucketWebsite{ + MainPageSuffix: w.MainPageSuffix, + NotFoundPage: w.NotFoundPage, + } +} + +func toBucketWebsiteFromProto(w *storagepb.Bucket_Website) *BucketWebsite { + if w == nil { + return nil + } + return &BucketWebsite{ + MainPageSuffix: w.GetMainPageSuffix(), + NotFoundPage: w.GetNotFoundPage(), + } +} + +func toBucketPolicyOnly(b *raw.BucketIamConfiguration) BucketPolicyOnly { + if b == nil || b.BucketPolicyOnly == nil || !b.BucketPolicyOnly.Enabled { + return BucketPolicyOnly{} + } + lt, err := time.Parse(time.RFC3339, b.BucketPolicyOnly.LockedTime) + if err != nil { + return BucketPolicyOnly{ + Enabled: true, + } + } + return BucketPolicyOnly{ + Enabled: true, + LockedTime: lt, + } +} + +func toBucketPolicyOnlyFromProto(b *storagepb.Bucket_IamConfig) BucketPolicyOnly { + if b == nil || !b.GetUniformBucketLevelAccess().GetEnabled() { + return BucketPolicyOnly{} + } + return BucketPolicyOnly{ + Enabled: true, + LockedTime: b.GetUniformBucketLevelAccess().GetLockTime().AsTime(), + } +} + +func toUniformBucketLevelAccess(b *raw.BucketIamConfiguration) UniformBucketLevelAccess { + if b == nil || b.UniformBucketLevelAccess == nil || !b.UniformBucketLevelAccess.Enabled { + return UniformBucketLevelAccess{} + } + lt, err := time.Parse(time.RFC3339, b.UniformBucketLevelAccess.LockedTime) + if err != nil { + return UniformBucketLevelAccess{ + Enabled: true, + } + } + return UniformBucketLevelAccess{ + Enabled: true, + LockedTime: lt, + } +} + +func toUniformBucketLevelAccessFromProto(b *storagepb.Bucket_IamConfig) UniformBucketLevelAccess { + if b == nil || !b.GetUniformBucketLevelAccess().GetEnabled() { + return UniformBucketLevelAccess{} + } + return UniformBucketLevelAccess{ + Enabled: true, + LockedTime: b.GetUniformBucketLevelAccess().GetLockTime().AsTime(), + } +} + +func toPublicAccessPrevention(b *raw.BucketIamConfiguration) PublicAccessPrevention { + if b == nil { + return PublicAccessPreventionUnknown + } + switch b.PublicAccessPrevention { + case publicAccessPreventionInherited, publicAccessPreventionUnspecified: + return PublicAccessPreventionInherited + case publicAccessPreventionEnforced: + return PublicAccessPreventionEnforced + default: + return PublicAccessPreventionUnknown + } +} + +func toPublicAccessPreventionFromProto(b *storagepb.Bucket_IamConfig) PublicAccessPrevention { + if b == nil { + return PublicAccessPreventionUnknown + } + switch b.GetPublicAccessPrevention() { + case publicAccessPreventionInherited, publicAccessPreventionUnspecified: + return PublicAccessPreventionInherited + case publicAccessPreventionEnforced: + return PublicAccessPreventionEnforced + default: + return PublicAccessPreventionUnknown + } +} + +func toRPO(b *raw.Bucket) RPO { + if b == nil { + return RPOUnknown + } + switch b.Rpo { + case rpoDefault: + return RPODefault + case rpoAsyncTurbo: + return RPOAsyncTurbo + default: + return RPOUnknown + } +} + +func toRPOFromProto(b *storagepb.Bucket) RPO { + if b == nil { + return RPOUnknown + } + switch b.GetRpo() { + case rpoDefault: + return RPODefault + case rpoAsyncTurbo: + return RPOAsyncTurbo + default: + return RPOUnknown + } +} + +func customPlacementFromRaw(c *raw.BucketCustomPlacementConfig) *CustomPlacementConfig { + if c == nil { + return nil + } + return &CustomPlacementConfig{DataLocations: c.DataLocations} +} + +func (c *CustomPlacementConfig) toRawCustomPlacement() *raw.BucketCustomPlacementConfig { + if c == nil { + return nil + } + return &raw.BucketCustomPlacementConfig{ + DataLocations: c.DataLocations, + } +} + +func (c *CustomPlacementConfig) toProtoCustomPlacement() *storagepb.Bucket_CustomPlacementConfig { + if c == nil { + return nil + } + return &storagepb.Bucket_CustomPlacementConfig{ + DataLocations: c.DataLocations, + } +} + +func customPlacementFromProto(c *storagepb.Bucket_CustomPlacementConfig) *CustomPlacementConfig { + if c == nil { + return nil + } + return &CustomPlacementConfig{DataLocations: c.GetDataLocations()} +} + +func (a *Autoclass) toRawAutoclass() *raw.BucketAutoclass { + if a == nil { + return nil + } + // Excluding read only fields ToggleTime and TerminalStorageClassUpdateTime. + return &raw.BucketAutoclass{ + Enabled: a.Enabled, + TerminalStorageClass: a.TerminalStorageClass, + } +} + +func (a *Autoclass) toProtoAutoclass() *storagepb.Bucket_Autoclass { + if a == nil { + return nil + } + // Excluding read only fields ToggleTime and TerminalStorageClassUpdateTime. + ba := &storagepb.Bucket_Autoclass{ + Enabled: a.Enabled, + } + if a.TerminalStorageClass != "" { + ba.TerminalStorageClass = &a.TerminalStorageClass + } + return ba +} + +func toAutoclassFromRaw(a *raw.BucketAutoclass) *Autoclass { + if a == nil || a.ToggleTime == "" { + return nil + } + ac := &Autoclass{ + Enabled: a.Enabled, + TerminalStorageClass: a.TerminalStorageClass, + } + // Return ToggleTime and TSCUpdateTime only if parsed with valid values. + t, err := time.Parse(time.RFC3339, a.ToggleTime) + if err == nil { + ac.ToggleTime = t + } + ut, err := time.Parse(time.RFC3339, a.TerminalStorageClassUpdateTime) + if err == nil { + ac.TerminalStorageClassUpdateTime = ut + } + return ac +} + +func toAutoclassFromProto(a *storagepb.Bucket_Autoclass) *Autoclass { + if a == nil || a.GetToggleTime().AsTime().Unix() == 0 { + return nil + } + return &Autoclass{ + Enabled: a.GetEnabled(), + ToggleTime: a.GetToggleTime().AsTime(), + TerminalStorageClass: a.GetTerminalStorageClass(), + TerminalStorageClassUpdateTime: a.GetTerminalStorageClassUpdateTime().AsTime(), + } +} + +func (p *SoftDeletePolicy) toRawSoftDeletePolicy() *raw.BucketSoftDeletePolicy { + if p == nil { + return nil + } + // Excluding read only field EffectiveTime. + // ForceSendFields must be set to send a zero value for RetentionDuration and disable + // soft delete. + return &raw.BucketSoftDeletePolicy{ + RetentionDurationSeconds: int64(p.RetentionDuration.Seconds()), + ForceSendFields: []string{"RetentionDurationSeconds"}, + } +} + +func (p *SoftDeletePolicy) toProtoSoftDeletePolicy() *storagepb.Bucket_SoftDeletePolicy { + if p == nil { + return nil + } + // Excluding read only field EffectiveTime. + return &storagepb.Bucket_SoftDeletePolicy{ + RetentionDuration: durationpb.New(p.RetentionDuration), + } +} + +func toSoftDeletePolicyFromRaw(p *raw.BucketSoftDeletePolicy) *SoftDeletePolicy { + if p == nil { + return nil + } + + policy := &SoftDeletePolicy{ + RetentionDuration: time.Duration(p.RetentionDurationSeconds) * time.Second, + } + + // Return EffectiveTime only if parsed to a valid value. + if t, err := time.Parse(time.RFC3339, p.EffectiveTime); err == nil { + policy.EffectiveTime = t + } + + return policy +} + +func toSoftDeletePolicyFromProto(p *storagepb.Bucket_SoftDeletePolicy) *SoftDeletePolicy { + if p == nil { + return nil + } + return &SoftDeletePolicy{ + EffectiveTime: p.GetEffectiveTime().AsTime(), + RetentionDuration: p.GetRetentionDuration().AsDuration(), + } +} + +func (hns *HierarchicalNamespace) toProtoHierarchicalNamespace() *storagepb.Bucket_HierarchicalNamespace { + if hns == nil { + return nil + } + return &storagepb.Bucket_HierarchicalNamespace{ + Enabled: hns.Enabled, + } +} + +func (hns *HierarchicalNamespace) toRawHierarchicalNamespace() *raw.BucketHierarchicalNamespace { + if hns == nil { + return nil + } + return &raw.BucketHierarchicalNamespace{ + Enabled: hns.Enabled, + } +} + +func toHierarchicalNamespaceFromProto(p *storagepb.Bucket_HierarchicalNamespace) *HierarchicalNamespace { + if p == nil { + return nil + } + return &HierarchicalNamespace{ + Enabled: p.Enabled, + } +} + +func toHierarchicalNamespaceFromRaw(r *raw.BucketHierarchicalNamespace) *HierarchicalNamespace { + if r == nil { + return nil + } + return &HierarchicalNamespace{ + Enabled: r.Enabled, + } +} + +// Objects returns an iterator over the objects in the bucket that match the +// Query q. If q is nil, no filtering is done. Objects will be iterated over +// lexicographically by name. +// +// Note: The returned iterator is not safe for concurrent operations without explicit synchronization. +func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator { + o := makeStorageOpts(true, b.retry, b.userProject) + return b.c.tc.ListObjects(ctx, b.name, q, o...) +} + +// Retryer returns a bucket handle that is configured with custom retry +// behavior as specified by the options that are passed to it. All operations +// on the new handle will use the customized retry configuration. +// Retry options set on a object handle will take precedence over options set on +// the bucket handle. +// These retry options will merge with the client's retry configuration (if set) +// for the returned handle. Options passed into this method will take precedence +// over retry options on the client. Note that you must explicitly pass in each +// option you want to override. +func (b *BucketHandle) Retryer(opts ...RetryOption) *BucketHandle { + b2 := *b + var retry *retryConfig + if b.retry != nil { + // merge the options with the existing retry + retry = b.retry + } else { + retry = &retryConfig{} + } + for _, opt := range opts { + opt.apply(retry) + } + b2.retry = retry + b2.acl.retry = retry + b2.defaultObjectACL.retry = retry + return &b2 +} + +// An ObjectIterator is an iterator over ObjectAttrs. +// +// Note: This iterator is not safe for concurrent operations without explicit synchronization. +type ObjectIterator struct { + ctx context.Context + query Query + pageInfo *iterator.PageInfo + nextFunc func() error + items []*ObjectAttrs +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +// +// Note: This method is not safe for concurrent operations without explicit synchronization. +func (it *ObjectIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +// Next returns the next result. Its second return value is iterator.Done if +// there are no more results. Once Next returns iterator.Done, all subsequent +// calls will return iterator.Done. +// +// In addition, if Next returns an error other than iterator.Done, all +// subsequent calls will return the same error. To continue iteration, a new +// `ObjectIterator` must be created. Since objects are ordered lexicographically +// by name, `Query.StartOffset` can be used to create a new iterator which will +// start at the desired place. See +// https://pkg.go.dev/cloud.google.com/go/storage?tab=doc#hdr-Listing_objects. +// +// If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will +// have a non-empty Prefix field, and a zero value for all other fields. These +// represent prefixes. +// +// Note: This method is not safe for concurrent operations without explicit synchronization. +func (it *ObjectIterator) Next() (*ObjectAttrs, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +// Buckets returns an iterator over the buckets in the project. You may +// optionally set the iterator's Prefix field to restrict the list to buckets +// whose names begin with the prefix. By default, all buckets in the project +// are returned. +// +// Note: The returned iterator is not safe for concurrent operations without explicit synchronization. +func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator { + o := makeStorageOpts(true, c.retry, "") + return c.tc.ListBuckets(ctx, projectID, o...) +} + +// A BucketIterator is an iterator over BucketAttrs. +// +// Note: This iterator is not safe for concurrent operations without explicit synchronization. +type BucketIterator struct { + // Prefix restricts the iterator to buckets whose names begin with it. + Prefix string + + ctx context.Context + projectID string + buckets []*BucketAttrs + pageInfo *iterator.PageInfo + nextFunc func() error +} + +// Next returns the next result. Its second return value is iterator.Done if +// there are no more results. Once Next returns iterator.Done, all subsequent +// calls will return iterator.Done. +// +// Note: This method is not safe for concurrent operations without explicit synchronization. +func (it *BucketIterator) Next() (*BucketAttrs, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + b := it.buckets[0] + it.buckets = it.buckets[1:] + return b, nil +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +// +// Note: This method is not safe for concurrent operations without explicit synchronization. +func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +// RPO (Recovery Point Objective) configures the turbo replication feature. See +// https://cloud.google.com/storage/docs/managing-turbo-replication for more information. +type RPO int + +const ( + // RPOUnknown is a zero value. It may be returned from bucket.Attrs() if RPO + // is not present in the bucket metadata, that is, the bucket is not dual-region. + // This value is also used if the RPO field is not set in a call to GCS. + RPOUnknown RPO = iota + + // RPODefault represents default replication. It is used to reset RPO on an + // existing bucket that has this field set to RPOAsyncTurbo. Otherwise it + // is equivalent to RPOUnknown, and is always ignored. This value is valid + // for dual- or multi-region buckets. + RPODefault + + // RPOAsyncTurbo represents turbo replication and is used to enable Turbo + // Replication on a bucket. This value is only valid for dual-region buckets. + RPOAsyncTurbo + + rpoUnknown string = "" + rpoDefault = "DEFAULT" + rpoAsyncTurbo = "ASYNC_TURBO" +) + +func (rpo RPO) String() string { + switch rpo { + case RPODefault: + return rpoDefault + case RPOAsyncTurbo: + return rpoAsyncTurbo + default: + return rpoUnknown + } +} + +// protoDateToUTCTime returns a new Time based on the google.type.Date, in UTC. +// +// Hours, minutes, seconds, and nanoseconds are set to 0. +func protoDateToUTCTime(d *dpb.Date) time.Time { + return protoDateToTime(d, time.UTC) +} + +// protoDateToTime returns a new Time based on the google.type.Date and provided +// *time.Location. +// +// Hours, minutes, seconds, and nanoseconds are set to 0. +func protoDateToTime(d *dpb.Date, l *time.Location) time.Time { + return time.Date(int(d.GetYear()), time.Month(d.GetMonth()), int(d.GetDay()), 0, 0, 0, 0, l) +} + +// timeToProtoDate returns a new google.type.Date based on the provided time.Time. +// The location is ignored, as is anything more precise than the day. +func timeToProtoDate(t time.Time) *dpb.Date { + return &dpb.Date{ + Year: int32(t.Year()), + Month: int32(t.Month()), + Day: int32(t.Day()), + } +} diff --git a/vendor/cloud.google.com/go/storage/client.go b/vendor/cloud.google.com/go/storage/client.go new file mode 100644 index 000000000..bbe89276a --- /dev/null +++ b/vendor/cloud.google.com/go/storage/client.go @@ -0,0 +1,352 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "io" + "time" + + "cloud.google.com/go/iam/apiv1/iampb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/option" +) + +// TODO(noahdietz): Move existing factory methods to this file. + +// storageClient is an internal-only interface designed to separate the +// transport-specific logic of making Storage API calls from the logic of the +// client library. +// +// Implementation requirements beyond implementing the interface include: +// * factory method(s) must accept a `userProject string` param +// * `settings` must be retained per instance +// * `storageOption`s must be resolved in the order they are received +// * all API errors must be wrapped in the gax-go APIError type +// * any unimplemented interface methods must return a StorageUnimplementedErr +// +// TODO(noahdietz): This interface is currently not used in the production code +// paths +type storageClient interface { + + // Top-level methods. + + GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error) + CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, enableObjectRetention *bool, opts ...storageOption) (*BucketAttrs, error) + ListBuckets(ctx context.Context, project string, opts ...storageOption) *BucketIterator + Close() error + + // Bucket methods. + + DeleteBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error + GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) + UpdateBucket(ctx context.Context, bucket string, uattrs *BucketAttrsToUpdate, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) + LockBucketRetentionPolicy(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error + ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) *ObjectIterator + + // Object metadata methods. + + DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error + GetObject(ctx context.Context, params *getObjectParams, opts ...storageOption) (*ObjectAttrs, error) + UpdateObject(ctx context.Context, params *updateObjectParams, opts ...storageOption) (*ObjectAttrs, error) + RestoreObject(ctx context.Context, params *restoreObjectParams, opts ...storageOption) (*ObjectAttrs, error) + + // Default Object ACL methods. + + DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error + ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) + UpdateDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error + + // Bucket ACL methods. + + DeleteBucketACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error + ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) + UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error + + // Object ACL methods. + + DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error + ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error) + UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error + + // Media operations. + + ComposeObject(ctx context.Context, req *composeObjectRequest, opts ...storageOption) (*ObjectAttrs, error) + RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error) + + NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (*Reader, error) + OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) + + // IAM methods. + + GetIamPolicy(ctx context.Context, resource string, version int32, opts ...storageOption) (*iampb.Policy, error) + SetIamPolicy(ctx context.Context, resource string, policy *iampb.Policy, opts ...storageOption) error + TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) + + // HMAC Key methods. + + GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error) + ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator + UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) + CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error) + DeleteHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) error + + // Notification methods. + ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (map[string]*Notification, error) + CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (*Notification, error) + DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) error +} + +// settings contains transport-agnostic configuration for API calls made via +// the storageClient inteface. All implementations must utilize settings +// and respect those that are applicable. +type settings struct { + // retry is the complete retry configuration to use when evaluating if an + // API call should be retried. + retry *retryConfig + + // gax is a set of gax.CallOption to be conveyed to gax.Invoke. + // Note: Not all storageClient interfaces will must use gax.Invoke. + gax []gax.CallOption + + // idempotent indicates if the call is idempotent or not when considering + // if the call should be retired or not. + idempotent bool + + // clientOption is a set of option.ClientOption to be used during client + // transport initialization. See https://pkg.go.dev/google.golang.org/api/option + // for a list of supported options. + clientOption []option.ClientOption + + // userProject is the user project that should be billed for the request. + userProject string +} + +func initSettings(opts ...storageOption) *settings { + s := &settings{} + resolveOptions(s, opts...) + return s +} + +func resolveOptions(s *settings, opts ...storageOption) { + for _, o := range opts { + o.Apply(s) + } +} + +// callSettings is a helper for resolving storage options against the settings +// in the context of an individual call. This is to ensure that client-level +// default settings are not mutated by two different calls getting options. +// +// Example: s := callSettings(c.settings, opts...) +func callSettings(defaults *settings, opts ...storageOption) *settings { + if defaults == nil { + return nil + } + // This does not make a deep copy of the pointer/slice fields, but all + // options replace the settings fields rather than modify their values in + // place. + cs := *defaults + resolveOptions(&cs, opts...) + return &cs +} + +// makeStorageOpts is a helper for generating a set of storageOption based on +// idempotency, retryConfig, and userProject. All top-level client operations +// will generally have to pass these options through the interface. +func makeStorageOpts(isIdempotent bool, retry *retryConfig, userProject string) []storageOption { + opts := []storageOption{idempotent(isIdempotent)} + if retry != nil { + opts = append(opts, withRetryConfig(retry)) + } + if userProject != "" { + opts = append(opts, withUserProject(userProject)) + } + return opts +} + +// storageOption is the transport-agnostic call option for the storageClient +// interface. +type storageOption interface { + Apply(s *settings) +} + +func withRetryConfig(rc *retryConfig) storageOption { + return &retryOption{rc} +} + +type retryOption struct { + rc *retryConfig +} + +func (o *retryOption) Apply(s *settings) { s.retry = o.rc } + +func idempotent(i bool) storageOption { + return &idempotentOption{i} +} + +type idempotentOption struct { + idempotency bool +} + +func (o *idempotentOption) Apply(s *settings) { s.idempotent = o.idempotency } + +func withClientOptions(opts ...option.ClientOption) storageOption { + return &clientOption{opts: opts} +} + +type clientOption struct { + opts []option.ClientOption +} + +func (o *clientOption) Apply(s *settings) { s.clientOption = o.opts } + +func withUserProject(project string) storageOption { + return &userProjectOption{project} +} + +type userProjectOption struct { + project string +} + +func (o *userProjectOption) Apply(s *settings) { s.userProject = o.project } + +type openWriterParams struct { + // Writer configuration + + // ctx is the context used by the writer routine to make all network calls + // and to manage the writer routine - see `Writer.ctx`. + // Required. + ctx context.Context + // chunkSize - see `Writer.ChunkSize`. + // Optional. + chunkSize int + // chunkRetryDeadline - see `Writer.ChunkRetryDeadline`. + // Optional. + chunkRetryDeadline time.Duration + + // Object/request properties + + // bucket - see `Writer.o.bucket`. + // Required. + bucket string + // attrs - see `Writer.ObjectAttrs`. + // Required. + attrs *ObjectAttrs + // forceEmptyContentType - Disables auto-detect of Content-Type + // Optional. + forceEmptyContentType bool + // conds - see `Writer.o.conds`. + // Optional. + conds *Conditions + // encryptionKey - see `Writer.o.encryptionKey` + // Optional. + encryptionKey []byte + // sendCRC32C - see `Writer.SendCRC32C`. + // Optional. + sendCRC32C bool + + // Writer callbacks + + // donec - see `Writer.donec`. + // Required. + donec chan struct{} + // setError callback for reporting errors - see `Writer.error`. + // Required. + setError func(error) + // progress callback for reporting upload progress - see `Writer.progress`. + // Required. + progress func(int64) + // setObj callback for reporting the resulting object - see `Writer.obj`. + // Required. + setObj func(*ObjectAttrs) +} + +type newRangeReaderParams struct { + bucket string + conds *Conditions + encryptionKey []byte + gen int64 + length int64 + object string + offset int64 + readCompressed bool // Use accept-encoding: gzip. Only works for HTTP currently. +} + +type getObjectParams struct { + bucket, object string + gen int64 + encryptionKey []byte + conds *Conditions + softDeleted bool +} + +type updateObjectParams struct { + bucket, object string + uattrs *ObjectAttrsToUpdate + gen int64 + encryptionKey []byte + conds *Conditions + overrideRetention *bool +} + +type restoreObjectParams struct { + bucket, object string + gen int64 + encryptionKey []byte + conds *Conditions + copySourceACL bool +} + +type composeObjectRequest struct { + dstBucket string + dstObject destinationObject + srcs []sourceObject + predefinedACL string + sendCRC32C bool +} + +type sourceObject struct { + name string + bucket string + gen int64 + conds *Conditions + encryptionKey []byte +} + +type destinationObject struct { + name string + bucket string + conds *Conditions + attrs *ObjectAttrs // attrs to set on the destination object. + encryptionKey []byte + keyName string +} + +type rewriteObjectRequest struct { + srcObject sourceObject + dstObject destinationObject + predefinedACL string + token string + maxBytesRewrittenPerCall int64 +} + +type rewriteObjectResponse struct { + resource *ObjectAttrs + done bool + written int64 + size int64 + token string +} diff --git a/vendor/cloud.google.com/go/storage/copy.go b/vendor/cloud.google.com/go/storage/copy.go new file mode 100644 index 000000000..a0b9a2683 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/copy.go @@ -0,0 +1,233 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "errors" + "fmt" + + "cloud.google.com/go/internal/trace" +) + +// CopierFrom creates a Copier that can copy src to dst. +// You can immediately call Run on the returned Copier, or +// you can configure it first. +// +// For Requester Pays buckets, the user project of dst is billed, unless it is empty, +// in which case the user project of src is billed. +func (dst *ObjectHandle) CopierFrom(src *ObjectHandle) *Copier { + return &Copier{dst: dst, src: src} +} + +// A Copier copies a source object to a destination. +type Copier struct { + // ObjectAttrs are optional attributes to set on the destination object. + // Any attributes must be initialized before any calls on the Copier. Nil + // or zero-valued attributes are ignored. + ObjectAttrs + + // RewriteToken can be set before calling Run to resume a copy + // operation. After Run returns a non-nil error, RewriteToken will + // have been updated to contain the value needed to resume the copy. + RewriteToken string + + // ProgressFunc can be used to monitor the progress of a multi-RPC copy + // operation. If ProgressFunc is not nil and copying requires multiple + // calls to the underlying service (see + // https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite), then + // ProgressFunc will be invoked after each call with the number of bytes of + // content copied so far and the total size in bytes of the source object. + // + // ProgressFunc is intended to make upload progress available to the + // application. For example, the implementation of ProgressFunc may update + // a progress bar in the application's UI, or log the result of + // float64(copiedBytes)/float64(totalBytes). + // + // ProgressFunc should return quickly without blocking. + ProgressFunc func(copiedBytes, totalBytes uint64) + + // The Cloud KMS key, in the form projects/P/locations/L/keyRings/R/cryptoKeys/K, + // that will be used to encrypt the object. Overrides the object's KMSKeyName, if + // any. + // + // Providing both a DestinationKMSKeyName and a customer-supplied encryption key + // (via ObjectHandle.Key) on the destination object will result in an error when + // Run is called. + DestinationKMSKeyName string + + dst, src *ObjectHandle + + // The maximum number of bytes that will be rewritten per rewrite request. + // Most callers shouldn't need to specify this parameter - it is primarily + // in place to support testing. If specified the value must be an integral + // multiple of 1 MiB (1048576). Also, this only applies to requests where + // the source and destination span locations and/or storage classes. Finally, + // this value must not change across rewrite calls else you'll get an error + // that the `rewriteToken` is invalid. + maxBytesRewrittenPerCall int64 +} + +// Run performs the copy. +func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Copier.Run") + defer func() { trace.EndSpan(ctx, err) }() + + if err := c.src.validate(); err != nil { + return nil, err + } + if err := c.dst.validate(); err != nil { + return nil, err + } + if c.DestinationKMSKeyName != "" && c.dst.encryptionKey != nil { + return nil, errors.New("storage: cannot use DestinationKMSKeyName with a customer-supplied encryption key") + } + if c.dst.gen != defaultGen { + return nil, fmt.Errorf("storage: generation cannot be specified on copy destination, got %v", c.dst.gen) + } + // Convert destination attributes to raw form, omitting the bucket. + // If the bucket is included but name or content-type aren't, the service + // returns a 400 with "Required" as the only message. Omitting the bucket + // does not cause any problems. + req := &rewriteObjectRequest{ + srcObject: sourceObject{ + name: c.src.object, + bucket: c.src.bucket, + gen: c.src.gen, + conds: c.src.conds, + encryptionKey: c.src.encryptionKey, + }, + dstObject: destinationObject{ + name: c.dst.object, + bucket: c.dst.bucket, + conds: c.dst.conds, + attrs: &c.ObjectAttrs, + encryptionKey: c.dst.encryptionKey, + keyName: c.DestinationKMSKeyName, + }, + predefinedACL: c.PredefinedACL, + token: c.RewriteToken, + maxBytesRewrittenPerCall: c.maxBytesRewrittenPerCall, + } + + isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist) + var userProject string + if c.dst.userProject != "" { + userProject = c.dst.userProject + } else if c.src.userProject != "" { + userProject = c.src.userProject + } + opts := makeStorageOpts(isIdempotent, c.dst.retry, userProject) + + for { + res, err := c.dst.c.tc.RewriteObject(ctx, req, opts...) + if err != nil { + return nil, err + } + c.RewriteToken = res.token + req.token = res.token + if c.ProgressFunc != nil { + c.ProgressFunc(uint64(res.written), uint64(res.size)) + } + if res.done { // Finished successfully. + return res.resource, nil + } + } +} + +// ComposerFrom creates a Composer that can compose srcs into dst. +// You can immediately call Run on the returned Composer, or you can +// configure it first. +// +// The encryption key for the destination object will be used to decrypt all +// source objects and encrypt the destination object. It is an error +// to specify an encryption key for any of the source objects. +func (dst *ObjectHandle) ComposerFrom(srcs ...*ObjectHandle) *Composer { + return &Composer{dst: dst, srcs: srcs} +} + +// A Composer composes source objects into a destination object. +// +// For Requester Pays buckets, the user project of dst is billed. +type Composer struct { + // ObjectAttrs are optional attributes to set on the destination object. + // Any attributes must be initialized before any calls on the Composer. Nil + // or zero-valued attributes are ignored. + ObjectAttrs + + // SendCRC specifies whether to transmit a CRC32C field. It should be set + // to true in addition to setting the Composer's CRC32C field, because zero + // is a valid CRC and normally a zero would not be transmitted. + // If a CRC32C is sent, and the data in the destination object does not match + // the checksum, the compose will be rejected. + SendCRC32C bool + + dst *ObjectHandle + srcs []*ObjectHandle +} + +// Run performs the compose operation. +func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Composer.Run") + defer func() { trace.EndSpan(ctx, err) }() + + if err := c.dst.validate(); err != nil { + return nil, err + } + if c.dst.gen != defaultGen { + return nil, fmt.Errorf("storage: generation cannot be specified on compose destination, got %v", c.dst.gen) + } + if len(c.srcs) == 0 { + return nil, errors.New("storage: at least one source object must be specified") + } + + for _, src := range c.srcs { + if err := src.validate(); err != nil { + return nil, err + } + if src.bucket != c.dst.bucket { + return nil, fmt.Errorf("storage: all source objects must be in bucket %q, found %q", c.dst.bucket, src.bucket) + } + if src.encryptionKey != nil { + return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object) + } + } + + req := &composeObjectRequest{ + dstBucket: c.dst.bucket, + predefinedACL: c.PredefinedACL, + sendCRC32C: c.SendCRC32C, + } + req.dstObject = destinationObject{ + name: c.dst.object, + bucket: c.dst.bucket, + conds: c.dst.conds, + attrs: &c.ObjectAttrs, + encryptionKey: c.dst.encryptionKey, + } + for _, src := range c.srcs { + s := sourceObject{ + name: src.object, + bucket: src.bucket, + gen: src.gen, + conds: src.conds, + } + req.srcs = append(req.srcs, s) + } + + isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist) + opts := makeStorageOpts(isIdempotent, c.dst.retry, c.dst.userProject) + return c.dst.c.tc.ComposeObject(ctx, req, opts...) +} diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go new file mode 100644 index 000000000..c274c762e --- /dev/null +++ b/vendor/cloud.google.com/go/storage/doc.go @@ -0,0 +1,379 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package storage provides an easy way to work with Google Cloud Storage. +Google Cloud Storage stores data in named objects, which are grouped into buckets. + +More information about Google Cloud Storage is available at +https://cloud.google.com/storage/docs. + +See https://pkg.go.dev/cloud.google.com/go for authentication, timeouts, +connection pooling and similar aspects of this package. + +# Creating a Client + +To start working with this package, create a [Client]: + + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + +The client will use your default application credentials. Clients should be +reused instead of created as needed. The methods of [Client] are safe for +concurrent use by multiple goroutines. + +You may configure the client by passing in options from the [google.golang.org/api/option] +package. You may also use options defined in this package, such as [WithJSONReads]. + +If you only wish to access public data, you can create +an unauthenticated client with + + client, err := storage.NewClient(ctx, option.WithoutAuthentication()) + +To use an emulator with this library, you can set the STORAGE_EMULATOR_HOST +environment variable to the address at which your emulator is running. This will +send requests to that address instead of to Cloud Storage. You can then create +and use a client as usual: + + // Set STORAGE_EMULATOR_HOST environment variable. + err := os.Setenv("STORAGE_EMULATOR_HOST", "localhost:9000") + if err != nil { + // TODO: Handle error. + } + + // Create client as usual. + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + // This request is now directed to http://localhost:9000/storage/v1/b + // instead of https://storage.googleapis.com/storage/v1/b + if err := client.Bucket("my-bucket").Create(ctx, projectID, nil); err != nil { + // TODO: Handle error. + } + +Please note that there is no official emulator for Cloud Storage. + +# Buckets + +A Google Cloud Storage bucket is a collection of objects. To work with a +bucket, make a bucket handle: + + bkt := client.Bucket(bucketName) + +A handle is a reference to a bucket. You can have a handle even if the +bucket doesn't exist yet. To create a bucket in Google Cloud Storage, +call [BucketHandle.Create]: + + if err := bkt.Create(ctx, projectID, nil); err != nil { + // TODO: Handle error. + } + +Note that although buckets are associated with projects, bucket names are +global across all projects. + +Each bucket has associated metadata, represented in this package by +[BucketAttrs]. The third argument to [BucketHandle.Create] allows you to set +the initial [BucketAttrs] of a bucket. To retrieve a bucket's attributes, use +[BucketHandle.Attrs]: + + attrs, err := bkt.Attrs(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n", + attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass) + +# Objects + +An object holds arbitrary data as a sequence of bytes, like a file. You +refer to objects using a handle, just as with buckets, but unlike buckets +you don't explicitly create an object. Instead, the first time you write +to an object it will be created. You can use the standard Go [io.Reader] +and [io.Writer] interfaces to read and write object data: + + obj := bkt.Object("data") + // Write something to obj. + // w implements io.Writer. + w := obj.NewWriter(ctx) + // Write some text to obj. This will either create the object or overwrite whatever is there already. + if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil { + // TODO: Handle error. + } + // Close, just like writing a file. + if err := w.Close(); err != nil { + // TODO: Handle error. + } + + // Read it back. + r, err := obj.NewReader(ctx) + if err != nil { + // TODO: Handle error. + } + defer r.Close() + if _, err := io.Copy(os.Stdout, r); err != nil { + // TODO: Handle error. + } + // Prints "This object contains text." + +Objects also have attributes, which you can fetch with [ObjectHandle.Attrs]: + + objAttrs, err := obj.Attrs(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("object %s has size %d and can be read using %s\n", + objAttrs.Name, objAttrs.Size, objAttrs.MediaLink) + +# Listing objects + +Listing objects in a bucket is done with the [BucketHandle.Objects] method: + + query := &storage.Query{Prefix: ""} + + var names []string + it := bkt.Objects(ctx, query) + for { + attrs, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + log.Fatal(err) + } + names = append(names, attrs.Name) + } + +Objects are listed lexicographically by name. To filter objects +lexicographically, [Query.StartOffset] and/or [Query.EndOffset] can be used: + + query := &storage.Query{ + Prefix: "", + StartOffset: "bar/", // Only list objects lexicographically >= "bar/" + EndOffset: "foo/", // Only list objects lexicographically < "foo/" + } + + // ... as before + +If only a subset of object attributes is needed when listing, specifying this +subset using [Query.SetAttrSelection] may speed up the listing process: + + query := &storage.Query{Prefix: ""} + query.SetAttrSelection([]string{"Name"}) + + // ... as before + +# ACLs + +Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of +ACLRules, each of which specifies the role of a user, group or project. ACLs +are suitable for fine-grained control, but you may prefer using IAM to control +access at the project level (see [Cloud Storage IAM docs]. + +To list the ACLs of a bucket or object, obtain an [ACLHandle] and call [ACLHandle.List]: + + acls, err := obj.ACL().List(ctx) + if err != nil { + // TODO: Handle error. + } + for _, rule := range acls { + fmt.Printf("%s has role %s\n", rule.Entity, rule.Role) + } + +You can also set and delete ACLs. + +# Conditions + +Every object has a generation and a metageneration. The generation changes +whenever the content changes, and the metageneration changes whenever the +metadata changes. [Conditions] let you check these values before an operation; +the operation only executes if the conditions match. You can use conditions to +prevent race conditions in read-modify-write operations. + +For example, say you've read an object's metadata into objAttrs. Now +you want to write to that object, but only if its contents haven't changed +since you read it. Here is how to express that: + + w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx) + // Proceed with writing as above. + +# Signed URLs + +You can obtain a URL that lets anyone read or write an object for a limited time. +Signing a URL requires credentials authorized to sign a URL. To use the same +authentication that was used when instantiating the Storage client, use +[BucketHandle.SignedURL]. + + url, err := client.Bucket(bucketName).SignedURL(objectName, opts) + if err != nil { + // TODO: Handle error. + } + fmt.Println(url) + +You can also sign a URL without creating a client. See the documentation of +[SignedURL] for details. + + url, err := storage.SignedURL(bucketName, "shared-object", opts) + if err != nil { + // TODO: Handle error. + } + fmt.Println(url) + +# Post Policy V4 Signed Request + +A type of signed request that allows uploads through HTML forms directly to Cloud Storage with +temporary permission. Conditions can be applied to restrict how the HTML form is used and exercised +by a user. + +For more information, please see the [XML POST Object docs] as well +as the documentation of [BucketHandle.GenerateSignedPostPolicyV4]. + + pv4, err := client.Bucket(bucketName).GenerateSignedPostPolicyV4(objectName, opts) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("URL: %s\nFields; %v\n", pv4.URL, pv4.Fields) + +# Credential requirements for signing + +If the GoogleAccessID and PrivateKey option fields are not provided, they will +be automatically detected by [BucketHandle.SignedURL] and +[BucketHandle.GenerateSignedPostPolicyV4] if any of the following are true: + - you are authenticated to the Storage Client with a service account's + downloaded private key, either directly in code or by setting the + GOOGLE_APPLICATION_CREDENTIALS environment variable (see [Other Environments]), + - your application is running on Google Compute Engine (GCE), or + - you are logged into [gcloud using application default credentials] + with [impersonation enabled]. + +Detecting GoogleAccessID may not be possible if you are authenticated using a +token source or using [option.WithHTTPClient]. In this case, you can provide a +service account email for GoogleAccessID and the client will attempt to sign +the URL or Post Policy using that service account. + +To generate the signature, you must have: + - iam.serviceAccounts.signBlob permissions on the GoogleAccessID service + account, and + - the [IAM Service Account Credentials API] enabled (unless authenticating + with a downloaded private key). + +# Errors + +Errors returned by this client are often of the type [googleapi.Error]. +These errors can be introspected for more information by using [errors.As] +with the richer [googleapi.Error] type. For example: + + var e *googleapi.Error + if ok := errors.As(err, &e); ok { + if e.Code == 409 { ... } + } + +# Retrying failed requests + +Methods in this package may retry calls that fail with transient errors. +Retrying continues indefinitely unless the controlling context is canceled, the +client is closed, or a non-transient error is received. To stop retries from +continuing, use context timeouts or cancellation. + +The retry strategy in this library follows best practices for Cloud Storage. By +default, operations are retried only if they are idempotent, and exponential +backoff with jitter is employed. In addition, errors are only retried if they +are defined as transient by the service. See the [Cloud Storage retry docs] +for more information. + +Users can configure non-default retry behavior for a single library call (using +[BucketHandle.Retryer] and [ObjectHandle.Retryer]) or for all calls made by a +client (using [Client.SetRetry]). For example: + + o := client.Bucket(bucket).Object(object).Retryer( + // Use WithBackoff to change the timing of the exponential backoff. + storage.WithBackoff(gax.Backoff{ + Initial: 2 * time.Second, + }), + // Use WithPolicy to configure the idempotency policy. RetryAlways will + // retry the operation even if it is non-idempotent. + storage.WithPolicy(storage.RetryAlways), + ) + + // Use a context timeout to set an overall deadline on the call, including all + // potential retries. + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + // Delete an object using the specified strategy and timeout. + if err := o.Delete(ctx); err != nil { + // Handle err. + } + +# Sending Custom Headers + +You can add custom headers to any API call made by this package by using +[callctx.SetHeaders] on the context which is passed to the method. For example, +to add a [custom audit logging] header: + + ctx := context.Background() + ctx = callctx.SetHeaders(ctx, "x-goog-custom-audit-", "") + // Use client as usual with the context and the additional headers will be sent. + client.Bucket("my-bucket").Attrs(ctx) + +# Experimental gRPC API + +This package includes support for the Cloud Storage gRPC API, which is currently +in preview. This implementation uses gRPC rather than the current JSON & XML +APIs to make requests to Cloud Storage. Kindly contact the Google Cloud Storage gRPC +team at gcs-grpc-contact@google.com with a list of GCS buckets you would like to +allowlist to access this API. The Go Storage gRPC library is not yet generally +available, so it may be subject to breaking changes. + +To create a client which will use gRPC, use the alternate constructor: + + ctx := context.Background() + client, err := storage.NewGRPCClient(ctx) + if err != nil { + // TODO: Handle error. + } + // Use client as usual. + +If the application is running within GCP, users may get better performance by +enabling Direct Google Access (enabling requests to skip some proxy steps). To enable, +set the environment variable `GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS=true` and add +the following side-effect imports to your application: + + import ( + _ "google.golang.org/grpc/balancer/rls" + _ "google.golang.org/grpc/xds/googledirectpath" + ) + +# Storage Control API + +Certain control plane and long-running operations for Cloud Storage (including Folder +and Managed Folder operations) are supported via the autogenerated Storage Control +client, which is available as a subpackage in this module. See package docs at +[cloud.google.com/go/storage/control/apiv2] or reference the [Storage Control API] docs. + +[Cloud Storage IAM docs]: https://cloud.google.com/storage/docs/access-control/iam +[XML POST Object docs]: https://cloud.google.com/storage/docs/xml-api/post-object +[Cloud Storage retry docs]: https://cloud.google.com/storage/docs/retry-strategy +[Other Environments]: https://cloud.google.com/storage/docs/authentication#libauth +[gcloud using application default credentials]: https://cloud.google.com/sdk/gcloud/reference/auth/application-default/login +[impersonation enabled]: https://cloud.google.com/sdk/gcloud/reference#--impersonate-service-account +[IAM Service Account Credentials API]: https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview +[custom audit logging]: https://cloud.google.com/storage/docs/audit-logging#add-custom-metadata +[Storage Control API]: https://cloud.google.com/storage/docs/reference/rpc/google.storage.control.v2 +*/ +package storage // import "cloud.google.com/go/storage" diff --git a/vendor/cloud.google.com/go/storage/emulator_test.sh b/vendor/cloud.google.com/go/storage/emulator_test.sh new file mode 100644 index 000000000..7bad7cf39 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/emulator_test.sh @@ -0,0 +1,92 @@ +#!/bin/bash +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License.. + +# Fail on any error +set -eo pipefail + +# Display commands being run +set -x + +# Only run on Go 1.17+ +min_minor_ver=17 + +v=`go version | { read _ _ v _; echo ${v#go}; }` +comps=(${v//./ }) +minor_ver=${comps[1]} + +if [ "$minor_ver" -lt "$min_minor_ver" ]; then + echo minor version $minor_ver, skipping + exit 0 +fi + +export STORAGE_EMULATOR_HOST="http://localhost:9000" +export STORAGE_EMULATOR_HOST_GRPC="localhost:8888" + +DEFAULT_IMAGE_NAME='gcr.io/cloud-devrel-public-resources/storage-testbench' +DEFAULT_IMAGE_TAG='latest' +DOCKER_IMAGE=${DEFAULT_IMAGE_NAME}:${DEFAULT_IMAGE_TAG} +CONTAINER_NAME=storage_testbench + +# Note: --net=host makes the container bind directly to the Docker host’s network, +# with no network isolation. If we were to use port-mapping instead, reset connection errors +# would be captured differently and cause unexpected test behaviour. +# The host networking driver works only on Linux hosts. +# See more about using host networking: https://docs.docker.com/network/host/ +DOCKER_NETWORK="--net=host" +# Note: We do not expect the RetryConformanceTest suite to pass on darwin due to +# differences in the network errors emitted by the system. +if [ `go env GOOS` == 'darwin' ]; then + DOCKER_NETWORK="-p 9000:9000 -p 8888:8888" +fi + +# Get the docker image for the testbench +docker pull $DOCKER_IMAGE + +# Start the testbench + +docker run --name $CONTAINER_NAME --rm -d $DOCKER_NETWORK $DOCKER_IMAGE +echo "Running the Cloud Storage testbench: $STORAGE_EMULATOR_HOST" +sleep 1 + +# Stop the testbench & cleanup environment variables +function cleanup() { + echo "Cleanup testbench" + docker stop $CONTAINER_NAME + unset STORAGE_EMULATOR_HOST; + unset STORAGE_EMULATOR_HOST_GRPC; +} +trap cleanup EXIT + +# Check that the server is running - retry several times to allow for start-up time +response=$(curl -w "%{http_code}\n" $STORAGE_EMULATOR_HOST --retry-connrefused --retry 5 -o /dev/null) + +if [[ $response != 200 ]] +then + echo "Testbench server did not start correctly" + exit 1 +fi + +# Start the gRPC server on port 8888. +echo "Starting the gRPC server on port 8888" +response=$(curl -w "%{http_code}\n" --retry 5 --retry-max-time 40 -o /dev/null "$STORAGE_EMULATOR_HOST/start_grpc?port=8888") + +if [[ $response != 200 ]] +then + echo "Testbench gRPC server did not start correctly" + exit 1 +fi + +# Run tests +go test -v -timeout 10m ./ -run="^Test(RetryConformance|.*Emulated)$" -short 2>&1 | tee -a sponge_log.log diff --git a/vendor/cloud.google.com/go/storage/grpc_client.go b/vendor/cloud.google.com/go/storage/grpc_client.go new file mode 100644 index 000000000..d81a17b6b --- /dev/null +++ b/vendor/cloud.google.com/go/storage/grpc_client.go @@ -0,0 +1,2289 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "hash/crc32" + "io" + "net/url" + "os" + + "cloud.google.com/go/iam/apiv1/iampb" + "cloud.google.com/go/internal/trace" + gapic "cloud.google.com/go/storage/internal/apiv2" + "cloud.google.com/go/storage/internal/apiv2/storagepb" + "github.com/googleapis/gax-go/v2" + "google.golang.org/api/googleapi" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" +) + +const ( + // defaultConnPoolSize is the default number of channels + // to initialize in the GAPIC gRPC connection pool. A larger + // connection pool may be necessary for jobs that require + // high throughput and/or leverage many concurrent streams + // if not running via DirectPath. + // + // This is only used for the gRPC client. + defaultConnPoolSize = 1 + + // maxPerMessageWriteSize is the maximum amount of content that can be sent + // per WriteObjectRequest message. A buffer reaching this amount will + // precipitate a flush of the buffer. It is only used by the gRPC Writer + // implementation. + maxPerMessageWriteSize int = int(storagepb.ServiceConstants_MAX_WRITE_CHUNK_BYTES) + + // globalProjectAlias is the project ID alias used for global buckets. + // + // This is only used for the gRPC API. + globalProjectAlias = "_" + + // msgEntityNotSupported indicates ACL entites using project ID are not currently supported. + // + // This is only used for the gRPC API. + msgEntityNotSupported = "The gRPC API currently does not support ACL entities using project ID, use project numbers instead" +) + +// defaultGRPCOptions returns a set of the default client options +// for gRPC client initialization. +func defaultGRPCOptions() []option.ClientOption { + defaults := []option.ClientOption{ + option.WithGRPCConnectionPool(defaultConnPoolSize), + } + + // Set emulator options for gRPC if an emulator was specified. Note that in a + // hybrid client, STORAGE_EMULATOR_HOST will set the host to use for HTTP and + // STORAGE_EMULATOR_HOST_GRPC will set the host to use for gRPC (when using a + // local emulator, HTTP and gRPC must use different ports, so this is + // necessary). + // + // TODO: When the newHybridClient is not longer used, remove + // STORAGE_EMULATOR_HOST_GRPC and use STORAGE_EMULATOR_HOST for both the + // HTTP and gRPC based clients. + if host := os.Getenv("STORAGE_EMULATOR_HOST_GRPC"); host != "" { + // Strip the scheme from the emulator host. WithEndpoint does not take a + // scheme for gRPC. + host = stripScheme(host) + + defaults = append(defaults, + option.WithEndpoint(host), + option.WithGRPCDialOption(grpc.WithInsecure()), + option.WithoutAuthentication(), + ) + } else { + // Only enable DirectPath when the emulator is not being targeted. + defaults = append(defaults, internaloption.EnableDirectPath(true)) + } + + return defaults +} + +// grpcStorageClient is the gRPC API implementation of the transport-agnostic +// storageClient interface. +type grpcStorageClient struct { + raw *gapic.Client + settings *settings +} + +// newGRPCStorageClient initializes a new storageClient that uses the gRPC +// Storage API. +func newGRPCStorageClient(ctx context.Context, opts ...storageOption) (storageClient, error) { + s := initSettings(opts...) + s.clientOption = append(defaultGRPCOptions(), s.clientOption...) + // Disable all gax-level retries in favor of retry logic in the veneer client. + s.gax = append(s.gax, gax.WithRetry(nil)) + + config := newStorageConfig(s.clientOption...) + if config.readAPIWasSet { + return nil, errors.New("storage: GRPC is incompatible with any option that specifies an API for reads") + } + + g, err := gapic.NewClient(ctx, s.clientOption...) + if err != nil { + return nil, err + } + + return &grpcStorageClient{ + raw: g, + settings: s, + }, nil +} + +func (c *grpcStorageClient) Close() error { + return c.raw.Close() +} + +// Top-level methods. + +func (c *grpcStorageClient) GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error) { + s := callSettings(c.settings, opts...) + req := &storagepb.GetServiceAccountRequest{ + Project: toProjectResource(project), + } + var resp *storagepb.ServiceAccount + err := run(ctx, func(ctx context.Context) error { + var err error + resp, err = c.raw.GetServiceAccount(ctx, req, s.gax...) + return err + }, s.retry, s.idempotent) + if err != nil { + return "", err + } + return resp.EmailAddress, err +} + +func (c *grpcStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, enableObjectRetention *bool, opts ...storageOption) (*BucketAttrs, error) { + if enableObjectRetention != nil { + // TO-DO: implement ObjectRetention once available - see b/308194853 + return nil, status.Errorf(codes.Unimplemented, "storage: object retention is not supported in gRPC") + } + + s := callSettings(c.settings, opts...) + b := attrs.toProtoBucket() + b.Project = toProjectResource(project) + // If there is lifecycle information but no location, explicitly set + // the location. This is a GCS quirk/bug. + if b.GetLocation() == "" && b.GetLifecycle() != nil { + b.Location = "US" + } + + req := &storagepb.CreateBucketRequest{ + Parent: fmt.Sprintf("projects/%s", globalProjectAlias), + Bucket: b, + BucketId: bucket, + } + if attrs != nil { + req.PredefinedAcl = attrs.PredefinedACL + req.PredefinedDefaultObjectAcl = attrs.PredefinedDefaultObjectACL + } + + var battrs *BucketAttrs + err := run(ctx, func(ctx context.Context) error { + res, err := c.raw.CreateBucket(ctx, req, s.gax...) + + battrs = newBucketFromProto(res) + + return err + }, s.retry, s.idempotent) + + return battrs, err +} + +func (c *grpcStorageClient) ListBuckets(ctx context.Context, project string, opts ...storageOption) *BucketIterator { + s := callSettings(c.settings, opts...) + it := &BucketIterator{ + ctx: ctx, + projectID: project, + } + + var gitr *gapic.BucketIterator + fetch := func(pageSize int, pageToken string) (token string, err error) { + + var buckets []*storagepb.Bucket + var next string + err = run(it.ctx, func(ctx context.Context) error { + // Initialize GAPIC-based iterator when pageToken is empty, which + // indicates that this fetch call is attempting to get the first page. + // + // Note: Initializing the GAPIC-based iterator lazily is necessary to + // capture the BucketIterator.Prefix set by the user *after* the + // BucketIterator is returned to them from the veneer. + if pageToken == "" { + req := &storagepb.ListBucketsRequest{ + Parent: toProjectResource(it.projectID), + Prefix: it.Prefix, + } + gitr = c.raw.ListBuckets(ctx, req, s.gax...) + } + buckets, next, err = gitr.InternalFetch(pageSize, pageToken) + return err + }, s.retry, s.idempotent) + if err != nil { + return "", err + } + + for _, bkt := range buckets { + b := newBucketFromProto(bkt) + it.buckets = append(it.buckets, b) + } + + return next, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + fetch, + func() int { return len(it.buckets) }, + func() interface{} { b := it.buckets; it.buckets = nil; return b }) + + return it +} + +// Bucket methods. + +func (c *grpcStorageClient) DeleteBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + req := &storagepb.DeleteBucketRequest{ + Name: bucketResourceName(globalProjectAlias, bucket), + } + if err := applyBucketCondsProto("grpcStorageClient.DeleteBucket", conds, req); err != nil { + return err + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + + return run(ctx, func(ctx context.Context) error { + return c.raw.DeleteBucket(ctx, req, s.gax...) + }, s.retry, s.idempotent) +} + +func (c *grpcStorageClient) GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) { + s := callSettings(c.settings, opts...) + req := &storagepb.GetBucketRequest{ + Name: bucketResourceName(globalProjectAlias, bucket), + ReadMask: &fieldmaskpb.FieldMask{Paths: []string{"*"}}, + } + if err := applyBucketCondsProto("grpcStorageClient.GetBucket", conds, req); err != nil { + return nil, err + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + + var battrs *BucketAttrs + err := run(ctx, func(ctx context.Context) error { + res, err := c.raw.GetBucket(ctx, req, s.gax...) + + battrs = newBucketFromProto(res) + + return err + }, s.retry, s.idempotent) + + if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { + return nil, ErrBucketNotExist + } + + return battrs, err +} +func (c *grpcStorageClient) UpdateBucket(ctx context.Context, bucket string, uattrs *BucketAttrsToUpdate, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) { + s := callSettings(c.settings, opts...) + b := uattrs.toProtoBucket() + b.Name = bucketResourceName(globalProjectAlias, bucket) + req := &storagepb.UpdateBucketRequest{ + Bucket: b, + PredefinedAcl: uattrs.PredefinedACL, + PredefinedDefaultObjectAcl: uattrs.PredefinedDefaultObjectACL, + } + if err := applyBucketCondsProto("grpcStorageClient.UpdateBucket", conds, req); err != nil { + return nil, err + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + + var paths []string + fieldMask := &fieldmaskpb.FieldMask{ + Paths: paths, + } + if uattrs.CORS != nil { + fieldMask.Paths = append(fieldMask.Paths, "cors") + } + if uattrs.DefaultEventBasedHold != nil { + fieldMask.Paths = append(fieldMask.Paths, "default_event_based_hold") + } + if uattrs.RetentionPolicy != nil { + fieldMask.Paths = append(fieldMask.Paths, "retention_policy") + } + if uattrs.VersioningEnabled != nil { + fieldMask.Paths = append(fieldMask.Paths, "versioning") + } + if uattrs.RequesterPays != nil { + fieldMask.Paths = append(fieldMask.Paths, "billing") + } + if uattrs.BucketPolicyOnly != nil || uattrs.UniformBucketLevelAccess != nil || uattrs.PublicAccessPrevention != PublicAccessPreventionUnknown { + fieldMask.Paths = append(fieldMask.Paths, "iam_config") + } + if uattrs.Encryption != nil { + fieldMask.Paths = append(fieldMask.Paths, "encryption") + } + if uattrs.Lifecycle != nil { + fieldMask.Paths = append(fieldMask.Paths, "lifecycle") + } + if uattrs.Logging != nil { + fieldMask.Paths = append(fieldMask.Paths, "logging") + } + if uattrs.Website != nil { + fieldMask.Paths = append(fieldMask.Paths, "website") + } + if uattrs.PredefinedACL != "" { + // In cases where PredefinedACL is set, Acl is cleared. + fieldMask.Paths = append(fieldMask.Paths, "acl") + } + if uattrs.PredefinedDefaultObjectACL != "" { + // In cases where PredefinedDefaultObjectACL is set, DefaultObjectAcl is cleared. + fieldMask.Paths = append(fieldMask.Paths, "default_object_acl") + } + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. + if uattrs.acl != nil { + // In cases where acl is set by UpdateBucketACL method. + fieldMask.Paths = append(fieldMask.Paths, "acl") + } + if uattrs.defaultObjectACL != nil { + // In cases where defaultObjectACL is set by UpdateBucketACL method. + fieldMask.Paths = append(fieldMask.Paths, "default_object_acl") + } + if uattrs.StorageClass != "" { + fieldMask.Paths = append(fieldMask.Paths, "storage_class") + } + if uattrs.RPO != RPOUnknown { + fieldMask.Paths = append(fieldMask.Paths, "rpo") + } + if uattrs.Autoclass != nil { + fieldMask.Paths = append(fieldMask.Paths, "autoclass") + } + if uattrs.SoftDeletePolicy != nil { + fieldMask.Paths = append(fieldMask.Paths, "soft_delete_policy") + } + + for label := range uattrs.setLabels { + fieldMask.Paths = append(fieldMask.Paths, fmt.Sprintf("labels.%s", label)) + } + + // Delete a label by not including it in Bucket.Labels but adding the key to the update mask. + for label := range uattrs.deleteLabels { + fieldMask.Paths = append(fieldMask.Paths, fmt.Sprintf("labels.%s", label)) + } + + req.UpdateMask = fieldMask + + if len(fieldMask.Paths) < 1 { + // Nothing to update. Send a get request for current attrs instead. This + // maintains consistency with JSON bucket updates. + opts = append(opts, idempotent(true)) + return c.GetBucket(ctx, bucket, conds, opts...) + } + + var battrs *BucketAttrs + err := run(ctx, func(ctx context.Context) error { + res, err := c.raw.UpdateBucket(ctx, req, s.gax...) + battrs = newBucketFromProto(res) + return err + }, s.retry, s.idempotent) + + return battrs, err +} +func (c *grpcStorageClient) LockBucketRetentionPolicy(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + req := &storagepb.LockBucketRetentionPolicyRequest{ + Bucket: bucketResourceName(globalProjectAlias, bucket), + } + if err := applyBucketCondsProto("grpcStorageClient.LockBucketRetentionPolicy", conds, req); err != nil { + return err + } + + return run(ctx, func(ctx context.Context) error { + _, err := c.raw.LockBucketRetentionPolicy(ctx, req, s.gax...) + return err + }, s.retry, s.idempotent) + +} +func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) *ObjectIterator { + s := callSettings(c.settings, opts...) + it := &ObjectIterator{ + ctx: ctx, + } + if q != nil { + it.query = *q + } + req := &storagepb.ListObjectsRequest{ + Parent: bucketResourceName(globalProjectAlias, bucket), + Prefix: it.query.Prefix, + Delimiter: it.query.Delimiter, + Versions: it.query.Versions, + LexicographicStart: it.query.StartOffset, + LexicographicEnd: it.query.EndOffset, + IncludeTrailingDelimiter: it.query.IncludeTrailingDelimiter, + MatchGlob: it.query.MatchGlob, + ReadMask: q.toFieldMask(), // a nil Query still results in a "*" FieldMask + SoftDeleted: it.query.SoftDeleted, + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + fetch := func(pageSize int, pageToken string) (token string, err error) { + // IncludeFoldersAsPrefixes is not supported for gRPC + // TODO: remove this when support is added in the proto. + if it.query.IncludeFoldersAsPrefixes { + return "", status.Errorf(codes.Unimplemented, "storage: IncludeFoldersAsPrefixes is not supported in gRPC") + } + var objects []*storagepb.Object + var gitr *gapic.ObjectIterator + err = run(it.ctx, func(ctx context.Context) error { + gitr = c.raw.ListObjects(ctx, req, s.gax...) + it.ctx = ctx + objects, token, err = gitr.InternalFetch(pageSize, pageToken) + return err + }, s.retry, s.idempotent) + if err != nil { + if st, ok := status.FromError(err); ok && st.Code() == codes.NotFound { + err = ErrBucketNotExist + } + return "", err + } + + for _, obj := range objects { + b := newObjectFromProto(obj) + it.items = append(it.items, b) + } + + // Response is always non-nil after a successful request. + res := gitr.Response.(*storagepb.ListObjectsResponse) + for _, prefix := range res.GetPrefixes() { + it.items = append(it.items, &ObjectAttrs{Prefix: prefix}) + } + + return token, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + + return it +} + +// Object metadata methods. + +func (c *grpcStorageClient) DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + req := &storagepb.DeleteObjectRequest{ + Bucket: bucketResourceName(globalProjectAlias, bucket), + Object: object, + } + if err := applyCondsProto("grpcStorageClient.DeleteObject", gen, conds, req); err != nil { + return err + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + err := run(ctx, func(ctx context.Context) error { + return c.raw.DeleteObject(ctx, req, s.gax...) + }, s.retry, s.idempotent) + if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { + return ErrObjectNotExist + } + return err +} + +func (c *grpcStorageClient) GetObject(ctx context.Context, params *getObjectParams, opts ...storageOption) (*ObjectAttrs, error) { + s := callSettings(c.settings, opts...) + req := &storagepb.GetObjectRequest{ + Bucket: bucketResourceName(globalProjectAlias, params.bucket), + Object: params.object, + // ProjectionFull by default. + ReadMask: &fieldmaskpb.FieldMask{Paths: []string{"*"}}, + } + if err := applyCondsProto("grpcStorageClient.GetObject", params.gen, params.conds, req); err != nil { + return nil, err + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + if params.encryptionKey != nil { + req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(params.encryptionKey) + } + if params.softDeleted { + req.SoftDeleted = ¶ms.softDeleted + } + + var attrs *ObjectAttrs + err := run(ctx, func(ctx context.Context) error { + res, err := c.raw.GetObject(ctx, req, s.gax...) + attrs = newObjectFromProto(res) + + return err + }, s.retry, s.idempotent) + + if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { + return nil, ErrObjectNotExist + } + + return attrs, err +} + +func (c *grpcStorageClient) UpdateObject(ctx context.Context, params *updateObjectParams, opts ...storageOption) (*ObjectAttrs, error) { + uattrs := params.uattrs + if params.overrideRetention != nil || uattrs.Retention != nil { + // TO-DO: implement ObjectRetention once available - see b/308194853 + return nil, status.Errorf(codes.Unimplemented, "storage: object retention is not supported in gRPC") + } + s := callSettings(c.settings, opts...) + o := uattrs.toProtoObject(bucketResourceName(globalProjectAlias, params.bucket), params.object) + // For Update, generation is passed via the object message rather than a field on the request. + if params.gen >= 0 { + o.Generation = params.gen + } + req := &storagepb.UpdateObjectRequest{ + Object: o, + PredefinedAcl: uattrs.PredefinedACL, + } + if err := applyCondsProto("grpcStorageClient.UpdateObject", defaultGen, params.conds, req); err != nil { + return nil, err + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + if params.encryptionKey != nil { + req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(params.encryptionKey) + } + + fieldMask := &fieldmaskpb.FieldMask{Paths: nil} + if uattrs.EventBasedHold != nil { + fieldMask.Paths = append(fieldMask.Paths, "event_based_hold") + } + if uattrs.TemporaryHold != nil { + fieldMask.Paths = append(fieldMask.Paths, "temporary_hold") + } + if uattrs.ContentType != nil { + fieldMask.Paths = append(fieldMask.Paths, "content_type") + } + if uattrs.ContentLanguage != nil { + fieldMask.Paths = append(fieldMask.Paths, "content_language") + } + if uattrs.ContentEncoding != nil { + fieldMask.Paths = append(fieldMask.Paths, "content_encoding") + } + if uattrs.ContentDisposition != nil { + fieldMask.Paths = append(fieldMask.Paths, "content_disposition") + } + if uattrs.CacheControl != nil { + fieldMask.Paths = append(fieldMask.Paths, "cache_control") + } + if !uattrs.CustomTime.IsZero() { + fieldMask.Paths = append(fieldMask.Paths, "custom_time") + } + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. + if uattrs.ACL != nil || len(uattrs.PredefinedACL) > 0 { + fieldMask.Paths = append(fieldMask.Paths, "acl") + } + + if uattrs.Metadata != nil { + // We don't support deleting a specific metadata key; metadata is deleted + // as a whole if provided an empty map, so we do not use dot notation here + if len(uattrs.Metadata) == 0 { + fieldMask.Paths = append(fieldMask.Paths, "metadata") + } else { + // We can, however, use dot notation for adding keys + for key := range uattrs.Metadata { + fieldMask.Paths = append(fieldMask.Paths, fmt.Sprintf("metadata.%s", key)) + } + } + } + + req.UpdateMask = fieldMask + + if len(fieldMask.Paths) < 1 { + // Nothing to update. To maintain consistency with JSON, we must still + // update the object because metageneration and other fields are + // updated even on an empty update. + // gRPC will fail if the fieldmask is empty, so instead we add an + // output-only field to the update mask. Output-only fields are (and must + // be - see AIP 161) ignored, but allow us to send an empty update because + // any mask that is valid for read (as this one is) must be valid for write. + fieldMask.Paths = append(fieldMask.Paths, "create_time") + } + + var attrs *ObjectAttrs + err := run(ctx, func(ctx context.Context) error { + res, err := c.raw.UpdateObject(ctx, req, s.gax...) + attrs = newObjectFromProto(res) + return err + }, s.retry, s.idempotent) + if e, ok := status.FromError(err); ok && e.Code() == codes.NotFound { + return nil, ErrObjectNotExist + } + + return attrs, err +} + +func (c *grpcStorageClient) RestoreObject(ctx context.Context, params *restoreObjectParams, opts ...storageOption) (*ObjectAttrs, error) { + s := callSettings(c.settings, opts...) + req := &storagepb.RestoreObjectRequest{ + Bucket: bucketResourceName(globalProjectAlias, params.bucket), + Object: params.object, + CopySourceAcl: ¶ms.copySourceACL, + } + if err := applyCondsProto("grpcStorageClient.RestoreObject", params.gen, params.conds, req); err != nil { + return nil, err + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + + var attrs *ObjectAttrs + err := run(ctx, func(ctx context.Context) error { + res, err := c.raw.RestoreObject(ctx, req, s.gax...) + attrs = newObjectFromProto(res) + return err + }, s.retry, s.idempotent) + if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { + return nil, ErrObjectNotExist + } + return attrs, err +} + +// Default Object ACL methods. + +func (c *grpcStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error { + // There is no separate API for PATCH in gRPC. + // Make a GET call first to retrieve BucketAttrs. + attrs, err := c.GetBucket(ctx, bucket, nil, opts...) + if err != nil { + return err + } + // Delete the entity and copy other remaining ACL entities. + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. + // Return error if entity is not found or a project ID is used. + invalidEntity := true + var acl []ACLRule + for _, a := range attrs.DefaultObjectACL { + if a.Entity != entity { + acl = append(acl, a) + } + if a.Entity == entity { + invalidEntity = false + } + } + if invalidEntity { + return fmt.Errorf("storage: entity %v was not found on bucket %v, got %v. %v", entity, bucket, attrs.DefaultObjectACL, msgEntityNotSupported) + } + uattrs := &BucketAttrsToUpdate{defaultObjectACL: acl} + // Call UpdateBucket with a MetagenerationMatch precondition set. + if _, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...); err != nil { + return err + } + return nil +} + +func (c *grpcStorageClient) ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) { + attrs, err := c.GetBucket(ctx, bucket, nil, opts...) + if err != nil { + return nil, err + } + return attrs.DefaultObjectACL, nil +} + +func (c *grpcStorageClient) UpdateDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error { + // There is no separate API for PATCH in gRPC. + // Make a GET call first to retrieve BucketAttrs. + attrs, err := c.GetBucket(ctx, bucket, nil, opts...) + if err != nil { + return err + } + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. + var acl []ACLRule + aclRule := ACLRule{Entity: entity, Role: role} + acl = append(attrs.DefaultObjectACL, aclRule) + uattrs := &BucketAttrsToUpdate{defaultObjectACL: acl} + // Call UpdateBucket with a MetagenerationMatch precondition set. + if _, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...); err != nil { + return err + } + return nil +} + +// Bucket ACL methods. + +func (c *grpcStorageClient) DeleteBucketACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error { + // There is no separate API for PATCH in gRPC. + // Make a GET call first to retrieve BucketAttrs. + attrs, err := c.GetBucket(ctx, bucket, nil, opts...) + if err != nil { + return err + } + // Delete the entity and copy other remaining ACL entities. + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. + // Return error if entity is not found or a project ID is used. + invalidEntity := true + var acl []ACLRule + for _, a := range attrs.ACL { + if a.Entity != entity { + acl = append(acl, a) + } + if a.Entity == entity { + invalidEntity = false + } + } + if invalidEntity { + return fmt.Errorf("storage: entity %v was not found on bucket %v, got %v. %v", entity, bucket, attrs.ACL, msgEntityNotSupported) + } + uattrs := &BucketAttrsToUpdate{acl: acl} + // Call UpdateBucket with a MetagenerationMatch precondition set. + if _, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...); err != nil { + return err + } + return nil +} + +func (c *grpcStorageClient) ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) { + attrs, err := c.GetBucket(ctx, bucket, nil, opts...) + if err != nil { + return nil, err + } + return attrs.ACL, nil +} + +func (c *grpcStorageClient) UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error { + // There is no separate API for PATCH in gRPC. + // Make a GET call first to retrieve BucketAttrs. + attrs, err := c.GetBucket(ctx, bucket, nil, opts...) + if err != nil { + return err + } + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. + var acl []ACLRule + aclRule := ACLRule{Entity: entity, Role: role} + acl = append(attrs.ACL, aclRule) + uattrs := &BucketAttrsToUpdate{acl: acl} + // Call UpdateBucket with a MetagenerationMatch precondition set. + if _, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...); err != nil { + return err + } + return nil +} + +// Object ACL methods. + +func (c *grpcStorageClient) DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error { + // There is no separate API for PATCH in gRPC. + // Make a GET call first to retrieve ObjectAttrs. + attrs, err := c.GetObject(ctx, &getObjectParams{bucket, object, defaultGen, nil, nil, false}, opts...) + if err != nil { + return err + } + // Delete the entity and copy other remaining ACL entities. + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. + // Return error if entity is not found or a project ID is used. + invalidEntity := true + var acl []ACLRule + for _, a := range attrs.ACL { + if a.Entity != entity { + acl = append(acl, a) + } + if a.Entity == entity { + invalidEntity = false + } + } + if invalidEntity { + return fmt.Errorf("storage: entity %v was not found on bucket %v, got %v. %v", entity, bucket, attrs.ACL, msgEntityNotSupported) + } + uattrs := &ObjectAttrsToUpdate{ACL: acl} + // Call UpdateObject with the specified metageneration. + params := &updateObjectParams{bucket: bucket, object: object, uattrs: uattrs, gen: defaultGen, conds: &Conditions{MetagenerationMatch: attrs.Metageneration}} + if _, err = c.UpdateObject(ctx, params, opts...); err != nil { + return err + } + return nil +} + +// ListObjectACLs retrieves object ACL entries. By default, it operates on the latest generation of this object. +// Selecting a specific generation of this object is not currently supported by the client. +func (c *grpcStorageClient) ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error) { + o, err := c.GetObject(ctx, &getObjectParams{bucket, object, defaultGen, nil, nil, false}, opts...) + if err != nil { + return nil, err + } + return o.ACL, nil +} + +func (c *grpcStorageClient) UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error { + // There is no separate API for PATCH in gRPC. + // Make a GET call first to retrieve ObjectAttrs. + attrs, err := c.GetObject(ctx, &getObjectParams{bucket, object, defaultGen, nil, nil, false}, opts...) + if err != nil { + return err + } + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. + var acl []ACLRule + aclRule := ACLRule{Entity: entity, Role: role} + acl = append(attrs.ACL, aclRule) + uattrs := &ObjectAttrsToUpdate{ACL: acl} + // Call UpdateObject with the specified metageneration. + params := &updateObjectParams{bucket: bucket, object: object, uattrs: uattrs, gen: defaultGen, conds: &Conditions{MetagenerationMatch: attrs.Metageneration}} + if _, err = c.UpdateObject(ctx, params, opts...); err != nil { + return err + } + return nil +} + +// Media operations. + +func (c *grpcStorageClient) ComposeObject(ctx context.Context, req *composeObjectRequest, opts ...storageOption) (*ObjectAttrs, error) { + s := callSettings(c.settings, opts...) + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + + dstObjPb := req.dstObject.attrs.toProtoObject(req.dstBucket) + dstObjPb.Name = req.dstObject.name + + if req.sendCRC32C { + dstObjPb.Checksums.Crc32C = &req.dstObject.attrs.CRC32C + } + + srcs := []*storagepb.ComposeObjectRequest_SourceObject{} + for _, src := range req.srcs { + srcObjPb := &storagepb.ComposeObjectRequest_SourceObject{Name: src.name, ObjectPreconditions: &storagepb.ComposeObjectRequest_SourceObject_ObjectPreconditions{}} + if src.gen >= 0 { + srcObjPb.Generation = src.gen + } + if err := applyCondsProto("ComposeObject source", defaultGen, src.conds, srcObjPb.ObjectPreconditions); err != nil { + return nil, err + } + srcs = append(srcs, srcObjPb) + } + + rawReq := &storagepb.ComposeObjectRequest{ + Destination: dstObjPb, + SourceObjects: srcs, + } + if err := applyCondsProto("ComposeObject destination", defaultGen, req.dstObject.conds, rawReq); err != nil { + return nil, err + } + if req.predefinedACL != "" { + rawReq.DestinationPredefinedAcl = req.predefinedACL + } + if req.dstObject.encryptionKey != nil { + rawReq.CommonObjectRequestParams = toProtoCommonObjectRequestParams(req.dstObject.encryptionKey) + } + + var obj *storagepb.Object + var err error + if err := run(ctx, func(ctx context.Context) error { + obj, err = c.raw.ComposeObject(ctx, rawReq, s.gax...) + return err + }, s.retry, s.idempotent); err != nil { + return nil, err + } + + return newObjectFromProto(obj), nil +} +func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error) { + s := callSettings(c.settings, opts...) + obj := req.dstObject.attrs.toProtoObject("") + call := &storagepb.RewriteObjectRequest{ + SourceBucket: bucketResourceName(globalProjectAlias, req.srcObject.bucket), + SourceObject: req.srcObject.name, + RewriteToken: req.token, + DestinationBucket: bucketResourceName(globalProjectAlias, req.dstObject.bucket), + DestinationName: req.dstObject.name, + Destination: obj, + DestinationKmsKey: req.dstObject.keyName, + DestinationPredefinedAcl: req.predefinedACL, + CommonObjectRequestParams: toProtoCommonObjectRequestParams(req.dstObject.encryptionKey), + } + + // The userProject, whether source or destination project, is decided by the code calling the interface. + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + if err := applyCondsProto("Copy destination", defaultGen, req.dstObject.conds, call); err != nil { + return nil, err + } + if err := applySourceCondsProto(req.srcObject.gen, req.srcObject.conds, call); err != nil { + return nil, err + } + + if len(req.dstObject.encryptionKey) > 0 { + call.CommonObjectRequestParams = toProtoCommonObjectRequestParams(req.dstObject.encryptionKey) + } + if len(req.srcObject.encryptionKey) > 0 { + srcParams := toProtoCommonObjectRequestParams(req.srcObject.encryptionKey) + call.CopySourceEncryptionAlgorithm = srcParams.GetEncryptionAlgorithm() + call.CopySourceEncryptionKeyBytes = srcParams.GetEncryptionKeyBytes() + call.CopySourceEncryptionKeySha256Bytes = srcParams.GetEncryptionKeySha256Bytes() + } + + call.MaxBytesRewrittenPerCall = req.maxBytesRewrittenPerCall + + var res *storagepb.RewriteResponse + var err error + + retryCall := func(ctx context.Context) error { res, err = c.raw.RewriteObject(ctx, call, s.gax...); return err } + + if err := run(ctx, retryCall, s.retry, s.idempotent); err != nil { + return nil, err + } + + r := &rewriteObjectResponse{ + done: res.GetDone(), + written: res.GetTotalBytesRewritten(), + size: res.GetObjectSize(), + token: res.GetRewriteToken(), + resource: newObjectFromProto(res.GetResource()), + } + + return r, nil +} + +// bytesCodec is a grpc codec which permits receiving messages as either +// protobuf messages, or as raw []bytes. +type bytesCodec struct { + encoding.Codec +} + +func (bytesCodec) Marshal(v any) ([]byte, error) { + vv, ok := v.(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) + } + return proto.Marshal(vv) +} + +func (bytesCodec) Unmarshal(data []byte, v any) error { + switch v := v.(type) { + case *[]byte: + // If gRPC could recycle the data []byte after unmarshaling (through + // buffer pools), we would need to make a copy here. + *v = data + return nil + case proto.Message: + return proto.Unmarshal(data, v) + default: + return fmt.Errorf("can not unmarshal type %T", v) + } +} + +func (bytesCodec) Name() string { + // If this isn't "", then gRPC sets the content-subtype of the call to this + // value and we get errors. + return "" +} + +func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.NewRangeReader") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + + s.gax = append(s.gax, gax.WithGRPCOptions( + grpc.ForceCodec(bytesCodec{}), + )) + + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + + b := bucketResourceName(globalProjectAlias, params.bucket) + req := &storagepb.ReadObjectRequest{ + Bucket: b, + Object: params.object, + CommonObjectRequestParams: toProtoCommonObjectRequestParams(params.encryptionKey), + } + // The default is a negative value, which means latest. + if params.gen >= 0 { + req.Generation = params.gen + } + + var databuf []byte + + // Define a function that initiates a Read with offset and length, assuming + // we have already read seen bytes. + reopen := func(seen int64) (*readStreamResponse, context.CancelFunc, error) { + // If the context has already expired, return immediately without making + // we call. + if err := ctx.Err(); err != nil { + return nil, nil, err + } + + cc, cancel := context.WithCancel(ctx) + + req.ReadOffset = params.offset + seen + + // Only set a ReadLimit if length is greater than zero, because <= 0 means + // to read it all. + if params.length > 0 { + req.ReadLimit = params.length - seen + } + + if err := applyCondsProto("gRPCReader.reopen", params.gen, params.conds, req); err != nil { + cancel() + return nil, nil, err + } + + var stream storagepb.Storage_ReadObjectClient + var msg *storagepb.ReadObjectResponse + var err error + + err = run(cc, func(ctx context.Context) error { + stream, err = c.raw.ReadObject(cc, req, s.gax...) + if err != nil { + return err + } + + // Receive the message into databuf as a wire-encoded message so we can + // use a custom decoder to avoid an extra copy at the protobuf layer. + err := stream.RecvMsg(&databuf) + // These types of errors show up on the Recv call, rather than the + // initialization of the stream via ReadObject above. + if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { + return ErrObjectNotExist + } + if err != nil { + return err + } + // Use a custom decoder that uses protobuf unmarshalling for all + // fields except the checksummed data. + // Subsequent receives in Read calls will skip all protobuf + // unmarshalling and directly read the content from the gRPC []byte + // response, since only the first call will contain other fields. + msg, err = readFullObjectResponse(databuf) + + return err + }, s.retry, s.idempotent) + if err != nil { + // Close the stream context we just created to ensure we don't leak + // resources. + cancel() + return nil, nil, err + } + + return &readStreamResponse{stream, msg}, cancel, nil + } + + res, cancel, err := reopen(0) + if err != nil { + return nil, err + } + + // The first message was Recv'd on stream open, use it to populate the + // object metadata. + msg := res.response + obj := msg.GetMetadata() + // This is the size of the entire object, even if only a range was requested. + size := obj.GetSize() + + // Only support checksums when reading an entire object, not a range. + var ( + wantCRC uint32 + checkCRC bool + ) + if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil && params.offset == 0 && params.length < 0 { + wantCRC = checksums.GetCrc32C() + checkCRC = true + } + + r = &Reader{ + Attrs: ReaderObjectAttrs{ + Size: size, + ContentType: obj.GetContentType(), + ContentEncoding: obj.GetContentEncoding(), + CacheControl: obj.GetCacheControl(), + LastModified: obj.GetUpdateTime().AsTime(), + Metageneration: obj.GetMetageneration(), + Generation: obj.GetGeneration(), + }, + reader: &gRPCReader{ + stream: res.stream, + reopen: reopen, + cancel: cancel, + size: size, + // Store the content from the first Recv in the + // client buffer for reading later. + leftovers: msg.GetChecksummedData().GetContent(), + settings: s, + zeroRange: params.length == 0, + databuf: databuf, + wantCRC: wantCRC, + checkCRC: checkCRC, + }, + checkCRC: checkCRC, + } + + cr := msg.GetContentRange() + if cr != nil { + r.Attrs.StartOffset = cr.GetStart() + r.remain = cr.GetEnd() - cr.GetStart() + } else { + r.remain = size + } + + // For a zero-length request, explicitly close the stream and set remaining + // bytes to zero. + if params.length == 0 { + r.remain = 0 + r.reader.Close() + } + + return r, nil +} + +func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) { + s := callSettings(c.settings, opts...) + + var offset int64 + errorf := params.setError + progress := params.progress + setObj := params.setObj + + pr, pw := io.Pipe() + gw := newGRPCWriter(c, params, pr) + gw.settings = s + if s.userProject != "" { + gw.ctx = setUserProjectMetadata(gw.ctx, s.userProject) + } + + // This function reads the data sent to the pipe and sends sets of messages + // on the gRPC client-stream as the buffer is filled. + go func() { + defer close(params.donec) + + // Loop until there is an error or the Object has been finalized. + for { + // Note: This blocks until either the buffer is full or EOF is read. + recvd, doneReading, err := gw.read() + if err != nil { + err = checkCanceled(err) + errorf(err) + pr.CloseWithError(err) + return + } + + if params.attrs.Retention != nil { + // TO-DO: remove once ObjectRetention is available - see b/308194853 + err = status.Errorf(codes.Unimplemented, "storage: object retention is not supported in gRPC") + errorf(err) + pr.CloseWithError(err) + return + } + // The chunk buffer is full, but there is no end in sight. This + // means that either: + // 1. A resumable upload will need to be used to send + // multiple chunks, until we are done reading data. Start a + // resumable upload if it has not already been started. + // 2. ChunkSize of zero may also have a full buffer, but a resumable + // session should not be initiated in this case. + if !doneReading && gw.upid == "" && params.chunkSize != 0 { + err = gw.startResumableUpload() + if err != nil { + err = checkCanceled(err) + errorf(err) + pr.CloseWithError(err) + return + } + } + + o, off, err := gw.uploadBuffer(recvd, offset, doneReading) + if err != nil { + err = checkCanceled(err) + errorf(err) + pr.CloseWithError(err) + return + } + + // At this point, the current buffer has been uploaded. For resumable + // uploads and chunkSize = 0, capture the committed offset here in case + // the upload was not finalized and another chunk is to be uploaded. Call + // the progress function for resumable uploads only. + if gw.upid != "" || gw.chunkSize == 0 { + offset = off + } + if gw.upid != "" { + progress(offset) + } + + // When we are done reading data without errors, set the object and + // finish. + if doneReading { + // Build Object from server's response. + setObj(newObjectFromProto(o)) + return + } + } + }() + + return pw, nil +} + +// IAM methods. + +func (c *grpcStorageClient) GetIamPolicy(ctx context.Context, resource string, version int32, opts ...storageOption) (*iampb.Policy, error) { + // TODO: Need a way to set UserProject, potentially in X-Goog-User-Project system parameter. + s := callSettings(c.settings, opts...) + req := &iampb.GetIamPolicyRequest{ + Resource: bucketResourceName(globalProjectAlias, resource), + Options: &iampb.GetPolicyOptions{ + RequestedPolicyVersion: version, + }, + } + var rp *iampb.Policy + err := run(ctx, func(ctx context.Context) error { + var err error + rp, err = c.raw.GetIamPolicy(ctx, req, s.gax...) + return err + }, s.retry, s.idempotent) + + return rp, err +} + +func (c *grpcStorageClient) SetIamPolicy(ctx context.Context, resource string, policy *iampb.Policy, opts ...storageOption) error { + // TODO: Need a way to set UserProject, potentially in X-Goog-User-Project system parameter. + s := callSettings(c.settings, opts...) + + req := &iampb.SetIamPolicyRequest{ + Resource: bucketResourceName(globalProjectAlias, resource), + Policy: policy, + } + + return run(ctx, func(ctx context.Context) error { + _, err := c.raw.SetIamPolicy(ctx, req, s.gax...) + return err + }, s.retry, s.idempotent) +} + +func (c *grpcStorageClient) TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) { + // TODO: Need a way to set UserProject, potentially in X-Goog-User-Project system parameter. + s := callSettings(c.settings, opts...) + req := &iampb.TestIamPermissionsRequest{ + Resource: bucketResourceName(globalProjectAlias, resource), + Permissions: permissions, + } + var res *iampb.TestIamPermissionsResponse + err := run(ctx, func(ctx context.Context) error { + var err error + res, err = c.raw.TestIamPermissions(ctx, req, s.gax...) + return err + }, s.retry, s.idempotent) + if err != nil { + return nil, err + } + return res.Permissions, nil +} + +// HMAC Key methods. + +func (c *grpcStorageClient) GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error) { + s := callSettings(c.settings, opts...) + req := &storagepb.GetHmacKeyRequest{ + AccessId: accessID, + Project: toProjectResource(project), + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + var metadata *storagepb.HmacKeyMetadata + err := run(ctx, func(ctx context.Context) error { + var err error + metadata, err = c.raw.GetHmacKey(ctx, req, s.gax...) + return err + }, s.retry, s.idempotent) + if err != nil { + return nil, err + } + return toHMACKeyFromProto(metadata), nil +} + +func (c *grpcStorageClient) ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator { + s := callSettings(c.settings, opts...) + req := &storagepb.ListHmacKeysRequest{ + Project: toProjectResource(project), + ServiceAccountEmail: serviceAccountEmail, + ShowDeletedKeys: showDeletedKeys, + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + it := &HMACKeysIterator{ + ctx: ctx, + projectID: project, + retry: s.retry, + } + fetch := func(pageSize int, pageToken string) (token string, err error) { + var hmacKeys []*storagepb.HmacKeyMetadata + err = run(it.ctx, func(ctx context.Context) error { + gitr := c.raw.ListHmacKeys(ctx, req, s.gax...) + hmacKeys, token, err = gitr.InternalFetch(pageSize, pageToken) + return err + }, s.retry, s.idempotent) + if err != nil { + return "", err + } + for _, hkmd := range hmacKeys { + hk := toHMACKeyFromProto(hkmd) + it.hmacKeys = append(it.hmacKeys, hk) + } + + return token, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + fetch, + func() int { return len(it.hmacKeys) - it.index }, + func() interface{} { + prev := it.hmacKeys + it.hmacKeys = it.hmacKeys[:0] + it.index = 0 + return prev + }) + return it +} + +func (c *grpcStorageClient) UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) { + s := callSettings(c.settings, opts...) + hk := &storagepb.HmacKeyMetadata{ + AccessId: accessID, + Project: toProjectResource(project), + ServiceAccountEmail: serviceAccountEmail, + State: string(attrs.State), + Etag: attrs.Etag, + } + var paths []string + fieldMask := &fieldmaskpb.FieldMask{ + Paths: paths, + } + if attrs.State != "" { + fieldMask.Paths = append(fieldMask.Paths, "state") + } + req := &storagepb.UpdateHmacKeyRequest{ + HmacKey: hk, + UpdateMask: fieldMask, + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + var metadata *storagepb.HmacKeyMetadata + err := run(ctx, func(ctx context.Context) error { + var err error + metadata, err = c.raw.UpdateHmacKey(ctx, req, s.gax...) + return err + }, s.retry, s.idempotent) + if err != nil { + return nil, err + } + return toHMACKeyFromProto(metadata), nil +} + +func (c *grpcStorageClient) CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error) { + s := callSettings(c.settings, opts...) + req := &storagepb.CreateHmacKeyRequest{ + Project: toProjectResource(project), + ServiceAccountEmail: serviceAccountEmail, + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + var res *storagepb.CreateHmacKeyResponse + err := run(ctx, func(ctx context.Context) error { + var err error + res, err = c.raw.CreateHmacKey(ctx, req, s.gax...) + return err + }, s.retry, s.idempotent) + if err != nil { + return nil, err + } + key := toHMACKeyFromProto(res.Metadata) + key.Secret = base64.StdEncoding.EncodeToString(res.SecretKeyBytes) + + return key, nil +} + +func (c *grpcStorageClient) DeleteHMACKey(ctx context.Context, project string, accessID string, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + req := &storagepb.DeleteHmacKeyRequest{ + AccessId: accessID, + Project: toProjectResource(project), + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + return run(ctx, func(ctx context.Context) error { + return c.raw.DeleteHmacKey(ctx, req, s.gax...) + }, s.retry, s.idempotent) +} + +// Notification methods. + +func (c *grpcStorageClient) ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (n map[string]*Notification, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.ListNotifications") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + req := &storagepb.ListNotificationConfigsRequest{ + Parent: bucketResourceName(globalProjectAlias, bucket), + } + var notifications []*storagepb.NotificationConfig + err = run(ctx, func(ctx context.Context) error { + gitr := c.raw.ListNotificationConfigs(ctx, req, s.gax...) + for { + // PageSize is not set and fallbacks to the API default pageSize of 100. + items, nextPageToken, err := gitr.InternalFetch(int(req.GetPageSize()), req.GetPageToken()) + if err != nil { + return err + } + notifications = append(notifications, items...) + // If there are no more results, nextPageToken is empty and err is nil. + if nextPageToken == "" { + return err + } + req.PageToken = nextPageToken + } + }, s.retry, s.idempotent) + if err != nil { + return nil, err + } + + return notificationsToMapFromProto(notifications), nil +} + +func (c *grpcStorageClient) CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (ret *Notification, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.CreateNotification") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + req := &storagepb.CreateNotificationConfigRequest{ + Parent: bucketResourceName(globalProjectAlias, bucket), + NotificationConfig: toProtoNotification(n), + } + var pbn *storagepb.NotificationConfig + err = run(ctx, func(ctx context.Context) error { + var err error + pbn, err = c.raw.CreateNotificationConfig(ctx, req, s.gax...) + return err + }, s.retry, s.idempotent) + if err != nil { + return nil, err + } + return toNotificationFromProto(pbn), err +} + +func (c *grpcStorageClient) DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.DeleteNotification") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + req := &storagepb.DeleteNotificationConfigRequest{Name: id} + return run(ctx, func(ctx context.Context) error { + return c.raw.DeleteNotificationConfig(ctx, req, s.gax...) + }, s.retry, s.idempotent) +} + +// setUserProjectMetadata appends a project ID to the outgoing Context metadata +// via the x-goog-user-project system parameter defined at +// https://cloud.google.com/apis/docs/system-parameters. This is only for +// billing purposes, and is generally optional, except for requester-pays +// buckets. +func setUserProjectMetadata(ctx context.Context, project string) context.Context { + return metadata.AppendToOutgoingContext(ctx, "x-goog-user-project", project) +} + +type readStreamResponse struct { + stream storagepb.Storage_ReadObjectClient + response *storagepb.ReadObjectResponse +} + +type gRPCReader struct { + seen, size int64 + zeroRange bool + stream storagepb.Storage_ReadObjectClient + reopen func(seen int64) (*readStreamResponse, context.CancelFunc, error) + leftovers []byte + databuf []byte + cancel context.CancelFunc + settings *settings + checkCRC bool // should we check the CRC? + wantCRC uint32 // the CRC32c value the server sent in the header + gotCRC uint32 // running crc +} + +// Update the running CRC with the data in the slice, if CRC checking was enabled. +func (r *gRPCReader) updateCRC(b []byte) { + if r.checkCRC { + r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, b) + } +} + +// Checks whether the CRC matches at the conclusion of a read, if CRC checking was enabled. +func (r *gRPCReader) runCRCCheck() error { + if r.checkCRC && r.gotCRC != r.wantCRC { + return fmt.Errorf("storage: bad CRC on read: got %d, want %d", r.gotCRC, r.wantCRC) + } + return nil +} + +// Read reads bytes into the user's buffer from an open gRPC stream. +func (r *gRPCReader) Read(p []byte) (int, error) { + // The entire object has been read by this reader, check the checksum if + // necessary and return EOF. + if r.size == r.seen || r.zeroRange { + if err := r.runCRCCheck(); err != nil { + return 0, err + } + return 0, io.EOF + } + + // No stream to read from, either never initialized or Close was called. + // Note: There is a potential concurrency issue if multiple routines are + // using the same reader. One encounters an error and the stream is closed + // and then reopened while the other routine attempts to read from it. + if r.stream == nil { + return 0, fmt.Errorf("storage: reader has been closed") + } + + var n int + // Read leftovers and return what was available to conform to the Reader + // interface: https://pkg.go.dev/io#Reader. + if len(r.leftovers) > 0 { + n = copy(p, r.leftovers) + r.seen += int64(n) + r.updateCRC(p[:n]) + r.leftovers = r.leftovers[n:] + return n, nil + } + + // Attempt to Recv the next message on the stream. + content, err := r.recv() + if err != nil { + return 0, err + } + + // TODO: Determine if we need to capture incremental CRC32C for this + // chunk. The Object CRC32C checksum is captured when directed to read + // the entire Object. If directed to read a range, we may need to + // calculate the range's checksum for verification if the checksum is + // present in the response here. + // TODO: Figure out if we need to support decompressive transcoding + // https://cloud.google.com/storage/docs/transcoding. + n = copy(p[n:], content) + leftover := len(content) - n + if leftover > 0 { + // Wasn't able to copy all of the data in the message, store for + // future Read calls. + r.leftovers = content[n:] + } + r.seen += int64(n) + r.updateCRC(p[:n]) + + return n, nil +} + +// WriteTo writes all the data requested by the Reader into w, implementing +// io.WriterTo. +func (r *gRPCReader) WriteTo(w io.Writer) (int64, error) { + // The entire object has been read by this reader, check the checksum if + // necessary and return nil. + if r.size == r.seen || r.zeroRange { + if err := r.runCRCCheck(); err != nil { + return 0, err + } + return 0, nil + } + + // No stream to read from, either never initialized or Close was called. + // Note: There is a potential concurrency issue if multiple routines are + // using the same reader. One encounters an error and the stream is closed + // and then reopened while the other routine attempts to read from it. + if r.stream == nil { + return 0, fmt.Errorf("storage: reader has been closed") + } + + // Track bytes written during before call. + var alreadySeen = r.seen + + // Write any leftovers to the stream. There will be some leftovers from the + // original NewRangeReader call. + if len(r.leftovers) > 0 { + // Write() will write the entire leftovers slice unless there is an error. + written, err := w.Write(r.leftovers) + r.seen += int64(written) + r.updateCRC(r.leftovers) + r.leftovers = nil + if err != nil { + return r.seen - alreadySeen, err + } + } + + // Loop and receive additional messages until the entire data is written. + for { + // Attempt to receive the next message on the stream. + // Will terminate with io.EOF once data has all come through. + // recv() handles stream reopening and retry logic so no need for retries here. + msg, err := r.recv() + if err != nil { + if err == io.EOF { + // We are done; check the checksum if necessary and return. + err = r.runCRCCheck() + } + return r.seen - alreadySeen, err + } + + // TODO: Determine if we need to capture incremental CRC32C for this + // chunk. The Object CRC32C checksum is captured when directed to read + // the entire Object. If directed to read a range, we may need to + // calculate the range's checksum for verification if the checksum is + // present in the response here. + // TODO: Figure out if we need to support decompressive transcoding + // https://cloud.google.com/storage/docs/transcoding. + written, err := w.Write(msg) + r.seen += int64(written) + r.updateCRC(msg) + if err != nil { + return r.seen - alreadySeen, err + } + } + +} + +// Close cancels the read stream's context in order for it to be closed and +// collected. +func (r *gRPCReader) Close() error { + if r.cancel != nil { + r.cancel() + } + r.stream = nil + return nil +} + +// recv attempts to Recv the next message on the stream and extract the object +// data that it contains. In the event that a retryable error is encountered, +// the stream will be closed, reopened, and RecvMsg again. +// This will attempt to Recv until one of the following is true: +// +// * Recv is successful +// * A non-retryable error is encountered +// * The Reader's context is canceled +// +// The last error received is the one that is returned, which could be from +// an attempt to reopen the stream. +func (r *gRPCReader) recv() ([]byte, error) { + err := r.stream.RecvMsg(&r.databuf) + + var shouldRetry = ShouldRetry + if r.settings.retry != nil && r.settings.retry.shouldRetry != nil { + shouldRetry = r.settings.retry.shouldRetry + } + if err != nil && shouldRetry(err) { + // This will "close" the existing stream and immediately attempt to + // reopen the stream, but will backoff if further attempts are necessary. + // Reopening the stream Recvs the first message, so if retrying is + // successful, the next logical chunk will be returned. + msg, err := r.reopenStream() + return msg.GetChecksummedData().GetContent(), err + } + + if err != nil { + return nil, err + } + + return readObjectResponseContent(r.databuf) +} + +// ReadObjectResponse field and subfield numbers. +const ( + checksummedDataField = protowire.Number(1) + checksummedDataContentField = protowire.Number(1) + checksummedDataCRC32CField = protowire.Number(2) + objectChecksumsField = protowire.Number(2) + contentRangeField = protowire.Number(3) + metadataField = protowire.Number(4) +) + +// readObjectResponseContent returns the checksummed_data.content field of a +// ReadObjectResponse message, or an error if the message is invalid. +// This can be used on recvs of objects after the first recv, since only the +// first message will contain non-data fields. +func readObjectResponseContent(b []byte) ([]byte, error) { + checksummedData, err := readProtoBytes(b, checksummedDataField) + if err != nil { + return b, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData: %v", err) + } + content, err := readProtoBytes(checksummedData, checksummedDataContentField) + if err != nil { + return content, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Content: %v", err) + } + + return content, nil +} + +// readFullObjectResponse returns the ReadObjectResponse that is encoded in the +// wire-encoded message buffer b, or an error if the message is invalid. +// This must be used on the first recv of an object as it may contain all fields +// of ReadObjectResponse, and we use or pass on those fields to the user. +// This function is essentially identical to proto.Unmarshal, except it aliases +// the data in the input []byte. If the proto library adds a feature to +// Unmarshal that does that, this function can be dropped. +func readFullObjectResponse(b []byte) (*storagepb.ReadObjectResponse, error) { + msg := &storagepb.ReadObjectResponse{} + + // Loop over the entire message, extracting fields as we go. This does not + // handle field concatenation, in which the contents of a single field + // are split across multiple protobuf tags. + off := 0 + for off < len(b) { + // Consume the next tag. This will tell us which field is next in the + // buffer, its type, and how much space it takes up. + fieldNum, fieldType, fieldLength := protowire.ConsumeTag(b[off:]) + if fieldLength < 0 { + return nil, protowire.ParseError(fieldLength) + } + off += fieldLength + + // Unmarshal the field according to its type. Only fields that are not + // nil will be present. + switch { + case fieldNum == checksummedDataField && fieldType == protowire.BytesType: + // The ChecksummedData field was found. Initialize the struct. + msg.ChecksummedData = &storagepb.ChecksummedData{} + + // Get the bytes corresponding to the checksummed data. + fieldContent, n := protowire.ConsumeBytes(b[off:]) + if n < 0 { + return nil, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData: %v", protowire.ParseError(n)) + } + off += n + + // Get the nested fields. We need to do this manually as it contains + // the object content bytes. + contentOff := 0 + for contentOff < len(fieldContent) { + gotNum, gotTyp, n := protowire.ConsumeTag(fieldContent[contentOff:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + contentOff += n + + switch { + case gotNum == checksummedDataContentField && gotTyp == protowire.BytesType: + // Get the content bytes. + bytes, n := protowire.ConsumeBytes(fieldContent[contentOff:]) + if n < 0 { + return nil, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Content: %v", protowire.ParseError(n)) + } + msg.ChecksummedData.Content = bytes + contentOff += n + case gotNum == checksummedDataCRC32CField && gotTyp == protowire.Fixed32Type: + v, n := protowire.ConsumeFixed32(fieldContent[contentOff:]) + if n < 0 { + return nil, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Crc32C: %v", protowire.ParseError(n)) + } + msg.ChecksummedData.Crc32C = &v + contentOff += n + default: + n = protowire.ConsumeFieldValue(gotNum, gotTyp, fieldContent[contentOff:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + contentOff += n + } + } + case fieldNum == objectChecksumsField && fieldType == protowire.BytesType: + // The field was found. Initialize the struct. + msg.ObjectChecksums = &storagepb.ObjectChecksums{} + + // Get the bytes corresponding to the checksums. + bytes, n := protowire.ConsumeBytes(b[off:]) + if n < 0 { + return nil, fmt.Errorf("invalid ReadObjectResponse.ObjectChecksums: %v", protowire.ParseError(n)) + } + off += n + + // Unmarshal. + if err := proto.Unmarshal(bytes, msg.ObjectChecksums); err != nil { + return nil, err + } + case fieldNum == contentRangeField && fieldType == protowire.BytesType: + msg.ContentRange = &storagepb.ContentRange{} + + bytes, n := protowire.ConsumeBytes(b[off:]) + if n < 0 { + return nil, fmt.Errorf("invalid ReadObjectResponse.ContentRange: %v", protowire.ParseError(n)) + } + off += n + + if err := proto.Unmarshal(bytes, msg.ContentRange); err != nil { + return nil, err + } + case fieldNum == metadataField && fieldType == protowire.BytesType: + msg.Metadata = &storagepb.Object{} + + bytes, n := protowire.ConsumeBytes(b[off:]) + if n < 0 { + return nil, fmt.Errorf("invalid ReadObjectResponse.Metadata: %v", protowire.ParseError(n)) + } + off += n + + if err := proto.Unmarshal(bytes, msg.Metadata); err != nil { + return nil, err + } + default: + fieldLength = protowire.ConsumeFieldValue(fieldNum, fieldType, b[off:]) + if fieldLength < 0 { + return nil, fmt.Errorf("default: %v", protowire.ParseError(fieldLength)) + } + off += fieldLength + } + } + + return msg, nil +} + +// readProtoBytes returns the contents of the protobuf field with number num +// and type bytes from a wire-encoded message. If the field cannot be found, +// the returned slice will be nil and no error will be returned. +// +// It does not handle field concatenation, in which the contents of a single field +// are split across multiple protobuf tags. Encoded data containing split fields +// of this form is technically permissable, but uncommon. +func readProtoBytes(b []byte, num protowire.Number) ([]byte, error) { + off := 0 + for off < len(b) { + gotNum, gotTyp, n := protowire.ConsumeTag(b[off:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + off += n + if gotNum == num && gotTyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b[off:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + return b, nil + } + n = protowire.ConsumeFieldValue(gotNum, gotTyp, b[off:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + off += n + } + return nil, nil +} + +// reopenStream "closes" the existing stream and attempts to reopen a stream and +// sets the Reader's stream and cancelStream properties in the process. +func (r *gRPCReader) reopenStream() (*storagepb.ReadObjectResponse, error) { + // Close existing stream and initialize new stream with updated offset. + r.Close() + + res, cancel, err := r.reopen(r.seen) + if err != nil { + return nil, err + } + r.stream = res.stream + r.cancel = cancel + return res.response, nil +} + +func newGRPCWriter(c *grpcStorageClient, params *openWriterParams, r io.Reader) *gRPCWriter { + size := params.chunkSize + + // Round up chunksize to nearest 256KiB + if size%googleapi.MinUploadChunkSize != 0 { + size += googleapi.MinUploadChunkSize - (size % googleapi.MinUploadChunkSize) + } + + // A completely bufferless upload is not possible as it is in JSON because + // the buffer must be provided to the message. However use the minimum size + // possible in this case. + if params.chunkSize == 0 { + size = googleapi.MinUploadChunkSize + } + + return &gRPCWriter{ + buf: make([]byte, size), + c: c, + ctx: params.ctx, + reader: r, + bucket: params.bucket, + attrs: params.attrs, + conds: params.conds, + encryptionKey: params.encryptionKey, + sendCRC32C: params.sendCRC32C, + chunkSize: params.chunkSize, + forceEmptyContentType: params.forceEmptyContentType, + } +} + +// gRPCWriter is a wrapper around the the gRPC client-stream API that manages +// sending chunks of data provided by the user over the stream. +type gRPCWriter struct { + c *grpcStorageClient + buf []byte + reader io.Reader + + ctx context.Context + + bucket string + attrs *ObjectAttrs + conds *Conditions + encryptionKey []byte + settings *settings + + sendCRC32C bool + chunkSize int + forceEmptyContentType bool + + // The gRPC client-stream used for sending buffers. + stream storagepb.Storage_BidiWriteObjectClient + + // The Resumable Upload ID started by a gRPC-based Writer. + upid string +} + +// startResumableUpload initializes a Resumable Upload with gRPC and sets the +// upload ID on the Writer. +func (w *gRPCWriter) startResumableUpload() error { + spec, err := w.writeObjectSpec() + if err != nil { + return err + } + req := &storagepb.StartResumableWriteRequest{ + WriteObjectSpec: spec, + CommonObjectRequestParams: toProtoCommonObjectRequestParams(w.encryptionKey), + } + // TODO: Currently the checksums are only sent on the request to initialize + // the upload, but in the future, we must also support sending it + // on the *last* message of the stream. + req.ObjectChecksums = toProtoChecksums(w.sendCRC32C, w.attrs) + return run(w.ctx, func(ctx context.Context) error { + upres, err := w.c.raw.StartResumableWrite(w.ctx, req) + w.upid = upres.GetUploadId() + return err + }, w.settings.retry, w.settings.idempotent) +} + +// queryProgress is a helper that queries the status of the resumable upload +// associated with the given upload ID. +func (w *gRPCWriter) queryProgress() (int64, error) { + var persistedSize int64 + err := run(w.ctx, func(ctx context.Context) error { + q, err := w.c.raw.QueryWriteStatus(w.ctx, &storagepb.QueryWriteStatusRequest{ + UploadId: w.upid, + }) + persistedSize = q.GetPersistedSize() + return err + }, w.settings.retry, true) + + // q.GetCommittedSize() will return 0 if q is nil. + return persistedSize, err +} + +// uploadBuffer uploads the buffer at the given offset using a bi-directional +// Write stream. It will open a new stream if necessary (on the first call or +// after resuming from failure). The resulting write offset after uploading the +// buffer is returned, as well as well as the final Object if the upload is +// completed. +// +// Returns object, persisted size, and any error that is not retriable. +func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*storagepb.Object, int64, error) { + var shouldRetry = ShouldRetry + if w.settings.retry != nil && w.settings.retry.shouldRetry != nil { + shouldRetry = w.settings.retry.shouldRetry + } + + var err error + var lastWriteOfEntireObject bool + + sent := 0 + writeOffset := start + + toWrite := w.buf[:recvd] + + // Send a request with as many bytes as possible. + // Loop until all bytes are sent. +sendBytes: // label this loop so that we can use a continue statement from a nested block + for { + bytesNotYetSent := recvd - sent + remainingDataFitsInSingleReq := bytesNotYetSent <= maxPerMessageWriteSize + + if remainingDataFitsInSingleReq && doneReading { + lastWriteOfEntireObject = true + } + + // Send the maximum amount of bytes we can, unless we don't have that many. + bytesToSendInCurrReq := maxPerMessageWriteSize + if remainingDataFitsInSingleReq { + bytesToSendInCurrReq = bytesNotYetSent + } + + // Prepare chunk section for upload. + data := toWrite[sent : sent+bytesToSendInCurrReq] + + req := &storagepb.BidiWriteObjectRequest{ + Data: &storagepb.BidiWriteObjectRequest_ChecksummedData{ + ChecksummedData: &storagepb.ChecksummedData{ + Content: data, + }, + }, + WriteOffset: writeOffset, + FinishWrite: lastWriteOfEntireObject, + Flush: remainingDataFitsInSingleReq && !lastWriteOfEntireObject, + StateLookup: remainingDataFitsInSingleReq && !lastWriteOfEntireObject, + } + + // Open a new stream if necessary and set the first_message field on + // the request. The first message on the WriteObject stream must either + // be the Object or the Resumable Upload ID. + if w.stream == nil { + hds := []string{"x-goog-request-params", fmt.Sprintf("bucket=projects/_/buckets/%s", url.QueryEscape(w.bucket))} + ctx := gax.InsertMetadataIntoOutgoingContext(w.ctx, hds...) + + w.stream, err = w.c.raw.BidiWriteObject(ctx) + if err != nil { + return nil, 0, err + } + + if w.upid != "" { // resumable upload + req.FirstMessage = &storagepb.BidiWriteObjectRequest_UploadId{UploadId: w.upid} + } else { // non-resumable + spec, err := w.writeObjectSpec() + if err != nil { + return nil, 0, err + } + req.FirstMessage = &storagepb.BidiWriteObjectRequest_WriteObjectSpec{ + WriteObjectSpec: spec, + } + req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(w.encryptionKey) + // For a non-resumable upload, checksums must be sent in this message. + // TODO: Currently the checksums are only sent on the first message + // of the stream, but in the future, we must also support sending it + // on the *last* message of the stream (instead of the first). + req.ObjectChecksums = toProtoChecksums(w.sendCRC32C, w.attrs) + } + } + + err = w.stream.Send(req) + if err == io.EOF { + // err was io.EOF. The client-side of a stream only gets an EOF on Send + // when the backend closes the stream and wants to return an error + // status. + + // Receive from the stream Recv() until it returns a non-nil error + // to receive the server's status as an error. We may get multiple + // messages before the error due to buffering. + err = nil + for err == nil { + _, err = w.stream.Recv() + } + // Drop the stream reference as a new one will need to be created if + // we retry. + w.stream = nil + + // Retriable errors mean we should start over and attempt to + // resend the entire buffer via a new stream. + // If not retriable, falling through will return the error received. + if shouldRetry(err) { + // TODO: Add test case for failure modes of querying progress. + writeOffset, err = w.determineOffset(start) + if err != nil { + return nil, 0, err + } + sent = int(writeOffset) - int(start) + + // Continue sending requests, opening a new stream and resending + // any bytes not yet persisted as per QueryWriteStatus + continue sendBytes + } + } + if err != nil { + return nil, 0, err + } + + // Update the immediate stream's sent total and the upload offset with + // the data sent. + sent += len(data) + writeOffset += int64(len(data)) + + // Not done sending data, do not attempt to commit it yet, loop around + // and send more data. + if recvd-sent > 0 { + continue sendBytes + } + + // The buffer has been uploaded and there is still more data to be + // uploaded, but this is not a resumable upload session. Therefore, + // don't check persisted data. + if !lastWriteOfEntireObject && w.chunkSize == 0 { + return nil, writeOffset, nil + } + + // Done sending the data in the buffer (remainingDataFitsInSingleReq + // should == true if we reach this code). + // If we are done sending the whole object, close the stream and get the final + // object. Otherwise, receive from the stream to confirm the persisted data. + if !lastWriteOfEntireObject { + resp, err := w.stream.Recv() + + // Retriable errors mean we should start over and attempt to + // resend the entire buffer via a new stream. + // If not retriable, falling through will return the error received + // from closing the stream. + if shouldRetry(err) { + writeOffset, err = w.determineOffset(start) + if err != nil { + return nil, 0, err + } + sent = int(writeOffset) - int(start) + + // Drop the stream reference as a new one will need to be created. + w.stream = nil + + continue sendBytes + } + if err != nil { + return nil, 0, err + } + + if resp.GetPersistedSize() != writeOffset { + // Retry if not all bytes were persisted. + writeOffset = resp.GetPersistedSize() + sent = int(writeOffset) - int(start) + continue sendBytes + } + } else { + // If the object is done uploading, close the send stream to signal + // to the server that we are done sending so that we can receive + // from the stream without blocking. + err = w.stream.CloseSend() + if err != nil { + // CloseSend() retries the send internally. It never returns an + // error in the current implementation, but we check it anyway in + // case that it does in the future. + return nil, 0, err + } + + // Stream receives do not block once send is closed, but we may not + // receive the response with the object right away; loop until we + // receive the object or error out. + var obj *storagepb.Object + for obj == nil { + resp, err := w.stream.Recv() + if shouldRetry(err) { + writeOffset, err = w.determineOffset(start) + if err != nil { + return nil, 0, err + } + sent = int(writeOffset) - int(start) + w.stream = nil + continue sendBytes + } + if err != nil { + return nil, 0, err + } + + obj = resp.GetResource() + } + + // Even though we received the object response, continue reading + // until we receive a non-nil error, to ensure the stream does not + // leak even if the context isn't cancelled. See: + // https://pkg.go.dev/google.golang.org/grpc#ClientConn.NewStream + for err == nil { + _, err = w.stream.Recv() + } + + return obj, writeOffset, nil + } + + return nil, writeOffset, nil + } +} + +// determineOffset either returns the offset given to it in the case of a simple +// upload, or queries the write status in the case a resumable upload is being +// used. +func (w *gRPCWriter) determineOffset(offset int64) (int64, error) { + // For a Resumable Upload, we must start from however much data + // was committed. + if w.upid != "" { + committed, err := w.queryProgress() + if err != nil { + return 0, err + } + offset = committed + } + return offset, nil +} + +// writeObjectSpec constructs a WriteObjectSpec proto using the Writer's +// ObjectAttrs and applies its Conditions. This is only used for gRPC. +func (w *gRPCWriter) writeObjectSpec() (*storagepb.WriteObjectSpec, error) { + // To avoid modifying the ObjectAttrs embeded in the calling writer, deref + // the ObjectAttrs pointer to make a copy, then assign the desired name to + // the attribute. + attrs := *w.attrs + + spec := &storagepb.WriteObjectSpec{ + Resource: attrs.toProtoObject(w.bucket), + } + // WriteObject doesn't support the generation condition, so use default. + if err := applyCondsProto("WriteObject", defaultGen, w.conds, spec); err != nil { + return nil, err + } + return spec, nil +} + +// read copies the data in the reader to the given buffer and reports how much +// data was read into the buffer and if there is no more data to read (EOF). +// Furthermore, if the attrs.ContentType is unset, the first bytes of content +// will be sniffed for a matching content type unless forceEmptyContentType is enabled. +func (w *gRPCWriter) read() (int, bool, error) { + if w.attrs.ContentType == "" && !w.forceEmptyContentType { + w.reader, w.attrs.ContentType = gax.DetermineContentType(w.reader) + } + // Set n to -1 to start the Read loop. + var n, recvd int = -1, 0 + var err error + for err == nil && n != 0 { + // The routine blocks here until data is received. + n, err = w.reader.Read(w.buf[recvd:]) + recvd += n + } + var done bool + if err == io.EOF { + done = true + err = nil + } + return recvd, done, err +} + +func checkCanceled(err error) error { + if status.Code(err) == codes.Canceled { + return context.Canceled + } + + return err +} diff --git a/vendor/cloud.google.com/go/storage/hmac.go b/vendor/cloud.google.com/go/storage/hmac.go new file mode 100644 index 000000000..f7811a5d1 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/hmac.go @@ -0,0 +1,356 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "errors" + "fmt" + "time" + + "cloud.google.com/go/storage/internal/apiv2/storagepb" + "google.golang.org/api/iterator" + raw "google.golang.org/api/storage/v1" +) + +// HMACState is the state of the HMAC key. +type HMACState string + +const ( + // Active is the status for an active key that can be used to sign + // requests. + Active HMACState = "ACTIVE" + + // Inactive is the status for an inactive key thus requests signed by + // this key will be denied. + Inactive HMACState = "INACTIVE" + + // Deleted is the status for a key that is deleted. + // Once in this state the key cannot key cannot be recovered + // and does not count towards key limits. Deleted keys will be cleaned + // up later. + Deleted HMACState = "DELETED" +) + +// HMACKey is the representation of a Google Cloud Storage HMAC key. +// +// HMAC keys are used to authenticate signed access to objects. To enable HMAC key +// authentication, please visit https://cloud.google.com/storage/docs/migrating. +type HMACKey struct { + // The HMAC's secret key. + Secret string + + // AccessID is the ID of the HMAC key. + AccessID string + + // Etag is the HTTP/1.1 Entity tag. + Etag string + + // ID is the ID of the HMAC key, including the ProjectID and AccessID. + ID string + + // ProjectID is the ID of the project that owns the + // service account to which the key authenticates. + ProjectID string + + // ServiceAccountEmail is the email address + // of the key's associated service account. + ServiceAccountEmail string + + // CreatedTime is the creation time of the HMAC key. + CreatedTime time.Time + + // UpdatedTime is the last modification time of the HMAC key metadata. + UpdatedTime time.Time + + // State is the state of the HMAC key. + // It can be one of StateActive, StateInactive or StateDeleted. + State HMACState +} + +// HMACKeyHandle helps provide access and management for HMAC keys. +type HMACKeyHandle struct { + projectID string + accessID string + retry *retryConfig + tc storageClient +} + +// HMACKeyHandle creates a handle that will be used for HMACKey operations. +func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle { + return &HMACKeyHandle{ + projectID: projectID, + accessID: accessID, + retry: c.retry, + tc: c.tc, + } +} + +// Get invokes an RPC to retrieve the HMAC key referenced by the +// HMACKeyHandle's accessID. +// +// Options such as UserProjectForHMACKeys can be used to set the +// userProject to be billed against for operations. +func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMACKey, error) { + desc := new(hmacKeyDesc) + for _, opt := range opts { + opt.withHMACKeyDesc(desc) + } + + o := makeStorageOpts(true, hkh.retry, desc.userProjectID) + hk, err := hkh.tc.GetHMACKey(ctx, hkh.projectID, hkh.accessID, o...) + + return hk, err +} + +// Delete invokes an RPC to delete the key referenced by accessID, on Google Cloud Storage. +// Only inactive HMAC keys can be deleted. +// After deletion, a key cannot be used to authenticate requests. +func (hkh *HMACKeyHandle) Delete(ctx context.Context, opts ...HMACKeyOption) error { + desc := new(hmacKeyDesc) + for _, opt := range opts { + opt.withHMACKeyDesc(desc) + } + + o := makeStorageOpts(true, hkh.retry, desc.userProjectID) + return hkh.tc.DeleteHMACKey(ctx, hkh.projectID, hkh.accessID, o...) +} + +func toHMACKeyFromRaw(hk *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, error) { + hkmd := hk.Metadata + if hkmd == nil { + return nil, errors.New("field Metadata cannot be nil") + } + createdTime, err := time.Parse(time.RFC3339, hkmd.TimeCreated) + if err != nil { + return nil, fmt.Errorf("field CreatedTime: %w", err) + } + updatedTime, err := time.Parse(time.RFC3339, hkmd.Updated) + if err != nil && !updatedTimeCanBeNil { + return nil, fmt.Errorf("field UpdatedTime: %w", err) + } + + hmKey := &HMACKey{ + AccessID: hkmd.AccessId, + Secret: hk.Secret, + Etag: hkmd.Etag, + ID: hkmd.Id, + State: HMACState(hkmd.State), + ProjectID: hkmd.ProjectId, + CreatedTime: createdTime, + UpdatedTime: updatedTime, + + ServiceAccountEmail: hkmd.ServiceAccountEmail, + } + + return hmKey, nil +} + +func toHMACKeyFromProto(pbmd *storagepb.HmacKeyMetadata) *HMACKey { + if pbmd == nil { + return nil + } + + return &HMACKey{ + AccessID: pbmd.GetAccessId(), + ID: pbmd.GetId(), + State: HMACState(pbmd.GetState()), + ProjectID: pbmd.GetProject(), + CreatedTime: convertProtoTime(pbmd.GetCreateTime()), + UpdatedTime: convertProtoTime(pbmd.GetUpdateTime()), + ServiceAccountEmail: pbmd.GetServiceAccountEmail(), + } +} + +// CreateHMACKey invokes an RPC for Google Cloud Storage to create a new HMACKey. +func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEmail string, opts ...HMACKeyOption) (*HMACKey, error) { + if projectID == "" { + return nil, errors.New("storage: expecting a non-blank projectID") + } + if serviceAccountEmail == "" { + return nil, errors.New("storage: expecting a non-blank service account email") + } + + desc := new(hmacKeyDesc) + for _, opt := range opts { + opt.withHMACKeyDesc(desc) + } + + o := makeStorageOpts(false, c.retry, desc.userProjectID) + hk, err := c.tc.CreateHMACKey(ctx, projectID, serviceAccountEmail, o...) + return hk, err +} + +// HMACKeyAttrsToUpdate defines the attributes of an HMACKey that will be updated. +type HMACKeyAttrsToUpdate struct { + // State is required and must be either StateActive or StateInactive. + State HMACState + + // Etag is an optional field and it is the HTTP/1.1 Entity tag. + Etag string +} + +// Update mutates the HMACKey referred to by accessID. +func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate, opts ...HMACKeyOption) (*HMACKey, error) { + if au.State != Active && au.State != Inactive { + return nil, fmt.Errorf("storage: invalid state %q for update, must be either %q or %q", au.State, Active, Inactive) + } + + desc := new(hmacKeyDesc) + for _, opt := range opts { + opt.withHMACKeyDesc(desc) + } + + isIdempotent := len(au.Etag) > 0 + o := makeStorageOpts(isIdempotent, h.retry, desc.userProjectID) + hk, err := h.tc.UpdateHMACKey(ctx, h.projectID, desc.forServiceAccountEmail, h.accessID, &au, o...) + return hk, err +} + +// An HMACKeysIterator is an iterator over HMACKeys. +// +// Note: This iterator is not safe for concurrent operations without explicit synchronization. +type HMACKeysIterator struct { + ctx context.Context + raw *raw.ProjectsHmacKeysService + projectID string + hmacKeys []*HMACKey + pageInfo *iterator.PageInfo + nextFunc func() error + index int + desc hmacKeyDesc + retry *retryConfig +} + +// ListHMACKeys returns an iterator for listing HMACKeys. +// +// Note: This iterator is not safe for concurrent operations without explicit synchronization. +func (c *Client) ListHMACKeys(ctx context.Context, projectID string, opts ...HMACKeyOption) *HMACKeysIterator { + desc := new(hmacKeyDesc) + for _, opt := range opts { + opt.withHMACKeyDesc(desc) + } + + o := makeStorageOpts(true, c.retry, desc.userProjectID) + return c.tc.ListHMACKeys(ctx, projectID, desc.forServiceAccountEmail, desc.showDeletedKeys, o...) +} + +// Next returns the next result. Its second return value is iterator.Done if +// there are no more results. Once Next returns iterator.Done, all subsequent +// calls will return iterator.Done. +// +// Note: This iterator is not safe for concurrent operations without explicit synchronization. +func (it *HMACKeysIterator) Next() (*HMACKey, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + + key := it.hmacKeys[it.index] + it.index++ + + return key, nil +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +// +// Note: This iterator is not safe for concurrent operations without explicit synchronization. +func (it *HMACKeysIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string, err error) { + // TODO: Remove fetch method upon integration. This method is internalized into + // httpStorageClient.ListHMACKeys() as it is the only caller. + call := it.raw.List(it.projectID) + if pageToken != "" { + call = call.PageToken(pageToken) + } + if it.desc.showDeletedKeys { + call = call.ShowDeletedKeys(true) + } + if it.desc.userProjectID != "" { + call = call.UserProject(it.desc.userProjectID) + } + if it.desc.forServiceAccountEmail != "" { + call = call.ServiceAccountEmail(it.desc.forServiceAccountEmail) + } + if pageSize > 0 { + call = call.MaxResults(int64(pageSize)) + } + + var resp *raw.HmacKeysMetadata + err = run(it.ctx, func(ctx context.Context) error { + resp, err = call.Context(ctx).Do() + return err + }, it.retry, true) + if err != nil { + return "", err + } + + for _, metadata := range resp.Items { + hk := &raw.HmacKey{ + Metadata: metadata, + } + hkey, err := toHMACKeyFromRaw(hk, true) + if err != nil { + return "", err + } + it.hmacKeys = append(it.hmacKeys, hkey) + } + return resp.NextPageToken, nil +} + +type hmacKeyDesc struct { + forServiceAccountEmail string + showDeletedKeys bool + userProjectID string +} + +// HMACKeyOption configures the behavior of HMACKey related methods and actions. +type HMACKeyOption interface { + withHMACKeyDesc(*hmacKeyDesc) +} + +type hmacKeyDescFunc func(*hmacKeyDesc) + +func (hkdf hmacKeyDescFunc) withHMACKeyDesc(hkd *hmacKeyDesc) { + hkdf(hkd) +} + +// ForHMACKeyServiceAccountEmail returns HMAC Keys that are +// associated with the email address of a service account in the project. +// +// Only one service account email can be used as a filter, so if multiple +// of these options are applied, the last email to be set will be used. +func ForHMACKeyServiceAccountEmail(serviceAccountEmail string) HMACKeyOption { + return hmacKeyDescFunc(func(hkd *hmacKeyDesc) { + hkd.forServiceAccountEmail = serviceAccountEmail + }) +} + +// ShowDeletedHMACKeys will also list keys whose state is "DELETED". +func ShowDeletedHMACKeys() HMACKeyOption { + return hmacKeyDescFunc(func(hkd *hmacKeyDesc) { + hkd.showDeletedKeys = true + }) +} + +// UserProjectForHMACKeys will bill the request against userProjectID +// if userProjectID is non-empty. +// +// Note: This is a noop right now and only provided for API compatibility. +func UserProjectForHMACKeys(userProjectID string) HMACKeyOption { + return hmacKeyDescFunc(func(hkd *hmacKeyDesc) { + hkd.userProjectID = userProjectID + }) +} diff --git a/vendor/cloud.google.com/go/storage/http_client.go b/vendor/cloud.google.com/go/storage/http_client.go new file mode 100644 index 000000000..0e213a663 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/http_client.go @@ -0,0 +1,1486 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "hash/crc32" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "reflect" + "strconv" + "strings" + "time" + + "cloud.google.com/go/iam/apiv1/iampb" + "cloud.google.com/go/internal/optional" + "cloud.google.com/go/internal/trace" + "github.com/googleapis/gax-go/v2/callctx" + "golang.org/x/oauth2/google" + "google.golang.org/api/googleapi" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + raw "google.golang.org/api/storage/v1" + "google.golang.org/api/transport" + htransport "google.golang.org/api/transport/http" +) + +// httpStorageClient is the HTTP-JSON API implementation of the transport-agnostic +// storageClient interface. +type httpStorageClient struct { + creds *google.Credentials + hc *http.Client + xmlHost string + raw *raw.Service + scheme string + settings *settings + config *storageConfig +} + +// newHTTPStorageClient initializes a new storageClient that uses the HTTP-JSON +// Storage API. +func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageClient, error) { + s := initSettings(opts...) + o := s.clientOption + config := newStorageConfig(o...) + + var creds *google.Credentials + // In general, it is recommended to use raw.NewService instead of htransport.NewClient + // since raw.NewService configures the correct default endpoints when initializing the + // internal http client. However, in our case, "NewRangeReader" in reader.go needs to + // access the http client directly to make requests, so we create the client manually + // here so it can be re-used by both reader.go and raw.NewService. This means we need to + // manually configure the default endpoint options on the http client. Furthermore, we + // need to account for STORAGE_EMULATOR_HOST override when setting the default endpoints. + if host := os.Getenv("STORAGE_EMULATOR_HOST"); host == "" { + // Prepend default options to avoid overriding options passed by the user. + o = append([]option.ClientOption{option.WithScopes(ScopeFullControl, "https://www.googleapis.com/auth/cloud-platform"), option.WithUserAgent(userAgent)}, o...) + + o = append(o, internaloption.WithDefaultEndpointTemplate("https://storage.UNIVERSE_DOMAIN/storage/v1/"), + internaloption.WithDefaultMTLSEndpoint("https://storage.mtls.googleapis.com/storage/v1/"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + ) + // Don't error out here. The user may have passed in their own HTTP + // client which does not auth with ADC or other common conventions. + c, err := transport.Creds(ctx, o...) + if err == nil { + creds = c + o = append(o, internaloption.WithCredentials(creds)) + } + } else { + var hostURL *url.URL + + if strings.Contains(host, "://") { + h, err := url.Parse(host) + if err != nil { + return nil, err + } + hostURL = h + } else { + // Add scheme for user if not supplied in STORAGE_EMULATOR_HOST + // URL is only parsed correctly if it has a scheme, so we build it ourselves + hostURL = &url.URL{Scheme: "http", Host: host} + } + + hostURL.Path = "storage/v1/" + endpoint := hostURL.String() + + // Append the emulator host as default endpoint for the user + o = append([]option.ClientOption{option.WithoutAuthentication()}, o...) + + o = append(o, internaloption.WithDefaultEndpointTemplate(endpoint)) + o = append(o, internaloption.WithDefaultMTLSEndpoint(endpoint)) + } + s.clientOption = o + + // htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpointTemplate, and WithDefaultMTLSEndpoint. + hc, ep, err := htransport.NewClient(ctx, s.clientOption...) + if err != nil { + return nil, fmt.Errorf("dialing: %w", err) + } + // RawService should be created with the chosen endpoint to take account of user override. + rawService, err := raw.NewService(ctx, option.WithEndpoint(ep), option.WithHTTPClient(hc)) + if err != nil { + return nil, fmt.Errorf("storage client: %w", err) + } + // Update xmlHost and scheme with the chosen endpoint. + u, err := url.Parse(ep) + if err != nil { + return nil, fmt.Errorf("supplied endpoint %q is not valid: %w", ep, err) + } + + return &httpStorageClient{ + creds: creds, + hc: hc, + xmlHost: u.Host, + raw: rawService, + scheme: u.Scheme, + settings: s, + config: &config, + }, nil +} + +func (c *httpStorageClient) Close() error { + c.hc.CloseIdleConnections() + return nil +} + +// Top-level methods. + +func (c *httpStorageClient) GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error) { + s := callSettings(c.settings, opts...) + call := c.raw.Projects.ServiceAccount.Get(project) + var res *raw.ServiceAccount + err := run(ctx, func(ctx context.Context) error { + var err error + res, err = call.Context(ctx).Do() + return err + }, s.retry, s.idempotent) + if err != nil { + return "", err + } + return res.EmailAddress, nil +} + +func (c *httpStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, enableObjectRetention *bool, opts ...storageOption) (*BucketAttrs, error) { + s := callSettings(c.settings, opts...) + var bkt *raw.Bucket + if attrs != nil { + bkt = attrs.toRawBucket() + } else { + bkt = &raw.Bucket{} + } + bkt.Name = bucket + // If there is lifecycle information but no location, explicitly set + // the location. This is a GCS quirk/bug. + if bkt.Location == "" && bkt.Lifecycle != nil { + bkt.Location = "US" + } + req := c.raw.Buckets.Insert(project, bkt) + if attrs != nil && attrs.PredefinedACL != "" { + req.PredefinedAcl(attrs.PredefinedACL) + } + if attrs != nil && attrs.PredefinedDefaultObjectACL != "" { + req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL) + } + if enableObjectRetention != nil { + req.EnableObjectRetention(*enableObjectRetention) + } + var battrs *BucketAttrs + err := run(ctx, func(ctx context.Context) error { + b, err := req.Context(ctx).Do() + if err != nil { + return err + } + battrs, err = newBucket(b) + return err + }, s.retry, s.idempotent) + return battrs, err +} + +func (c *httpStorageClient) ListBuckets(ctx context.Context, project string, opts ...storageOption) *BucketIterator { + s := callSettings(c.settings, opts...) + it := &BucketIterator{ + ctx: ctx, + projectID: project, + } + + fetch := func(pageSize int, pageToken string) (token string, err error) { + req := c.raw.Buckets.List(it.projectID) + req.Projection("full") + req.Prefix(it.Prefix) + req.PageToken(pageToken) + if pageSize > 0 { + req.MaxResults(int64(pageSize)) + } + var resp *raw.Buckets + err = run(it.ctx, func(ctx context.Context) error { + resp, err = req.Context(ctx).Do() + return err + }, s.retry, s.idempotent) + if err != nil { + return "", err + } + for _, item := range resp.Items { + b, err := newBucket(item) + if err != nil { + return "", err + } + it.buckets = append(it.buckets, b) + } + return resp.NextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + fetch, + func() int { return len(it.buckets) }, + func() interface{} { b := it.buckets; it.buckets = nil; return b }) + + return it +} + +// Bucket methods. + +func (c *httpStorageClient) DeleteBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + req := c.raw.Buckets.Delete(bucket) + if err := applyBucketConds("httpStorageClient.DeleteBucket", conds, req); err != nil { + return err + } + if s.userProject != "" { + req.UserProject(s.userProject) + } + + return run(ctx, func(ctx context.Context) error { return req.Context(ctx).Do() }, s.retry, s.idempotent) +} + +func (c *httpStorageClient) GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) { + s := callSettings(c.settings, opts...) + req := c.raw.Buckets.Get(bucket).Projection("full") + err := applyBucketConds("httpStorageClient.GetBucket", conds, req) + if err != nil { + return nil, err + } + if s.userProject != "" { + req.UserProject(s.userProject) + } + + var resp *raw.Bucket + err = run(ctx, func(ctx context.Context) error { + resp, err = req.Context(ctx).Do() + return err + }, s.retry, s.idempotent) + + var e *googleapi.Error + if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { + return nil, ErrBucketNotExist + } + if err != nil { + return nil, err + } + return newBucket(resp) +} +func (c *httpStorageClient) UpdateBucket(ctx context.Context, bucket string, uattrs *BucketAttrsToUpdate, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) { + s := callSettings(c.settings, opts...) + rb := uattrs.toRawBucket() + req := c.raw.Buckets.Patch(bucket, rb).Projection("full") + err := applyBucketConds("httpStorageClient.UpdateBucket", conds, req) + if err != nil { + return nil, err + } + if s.userProject != "" { + req.UserProject(s.userProject) + } + if uattrs != nil && uattrs.PredefinedACL != "" { + req.PredefinedAcl(uattrs.PredefinedACL) + } + if uattrs != nil && uattrs.PredefinedDefaultObjectACL != "" { + req.PredefinedDefaultObjectAcl(uattrs.PredefinedDefaultObjectACL) + } + + var rawBucket *raw.Bucket + err = run(ctx, func(ctx context.Context) error { + rawBucket, err = req.Context(ctx).Do() + return err + }, s.retry, s.idempotent) + if err != nil { + return nil, err + } + return newBucket(rawBucket) +} + +func (c *httpStorageClient) LockBucketRetentionPolicy(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + + var metageneration int64 + if conds != nil { + metageneration = conds.MetagenerationMatch + } + req := c.raw.Buckets.LockRetentionPolicy(bucket, metageneration) + + return run(ctx, func(ctx context.Context) error { + _, err := req.Context(ctx).Do() + return err + }, s.retry, s.idempotent) +} +func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) *ObjectIterator { + s := callSettings(c.settings, opts...) + it := &ObjectIterator{ + ctx: ctx, + } + if q != nil { + it.query = *q + } + fetch := func(pageSize int, pageToken string) (string, error) { + req := c.raw.Objects.List(bucket) + if it.query.SoftDeleted { + req.SoftDeleted(it.query.SoftDeleted) + } + projection := it.query.Projection + if projection == ProjectionDefault { + projection = ProjectionFull + } + req.Projection(projection.String()) + req.Delimiter(it.query.Delimiter) + req.Prefix(it.query.Prefix) + req.StartOffset(it.query.StartOffset) + req.EndOffset(it.query.EndOffset) + req.Versions(it.query.Versions) + req.IncludeTrailingDelimiter(it.query.IncludeTrailingDelimiter) + req.MatchGlob(it.query.MatchGlob) + req.IncludeFoldersAsPrefixes(it.query.IncludeFoldersAsPrefixes) + if selection := it.query.toFieldSelection(); selection != "" { + req.Fields("nextPageToken", googleapi.Field(selection)) + } + req.PageToken(pageToken) + if s.userProject != "" { + req.UserProject(s.userProject) + } + if pageSize > 0 { + req.MaxResults(int64(pageSize)) + } + var resp *raw.Objects + var err error + err = run(it.ctx, func(ctx context.Context) error { + resp, err = req.Context(ctx).Do() + return err + }, s.retry, s.idempotent) + if err != nil { + var e *googleapi.Error + if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { + err = ErrBucketNotExist + } + return "", err + } + for _, item := range resp.Items { + it.items = append(it.items, newObject(item)) + } + for _, prefix := range resp.Prefixes { + it.items = append(it.items, &ObjectAttrs{Prefix: prefix}) + } + return resp.NextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + + return it +} + +// Object metadata methods. + +func (c *httpStorageClient) DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + req := c.raw.Objects.Delete(bucket, object).Context(ctx) + if err := applyConds("Delete", gen, conds, req); err != nil { + return err + } + if s.userProject != "" { + req.UserProject(s.userProject) + } + err := run(ctx, func(ctx context.Context) error { return req.Context(ctx).Do() }, s.retry, s.idempotent) + var e *googleapi.Error + if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { + return ErrObjectNotExist + } + return err +} + +func (c *httpStorageClient) GetObject(ctx context.Context, params *getObjectParams, opts ...storageOption) (*ObjectAttrs, error) { + s := callSettings(c.settings, opts...) + req := c.raw.Objects.Get(params.bucket, params.object).Projection("full").Context(ctx) + if err := applyConds("Attrs", params.gen, params.conds, req); err != nil { + return nil, err + } + if s.userProject != "" { + req.UserProject(s.userProject) + } + if err := setEncryptionHeaders(req.Header(), params.encryptionKey, false); err != nil { + return nil, err + } + if params.softDeleted { + req.SoftDeleted(params.softDeleted) + } + + var obj *raw.Object + var err error + err = run(ctx, func(ctx context.Context) error { + obj, err = req.Context(ctx).Do() + return err + }, s.retry, s.idempotent) + var e *googleapi.Error + if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + if err != nil { + return nil, err + } + return newObject(obj), nil +} + +func (c *httpStorageClient) UpdateObject(ctx context.Context, params *updateObjectParams, opts ...storageOption) (*ObjectAttrs, error) { + uattrs := params.uattrs + s := callSettings(c.settings, opts...) + + var attrs ObjectAttrs + // Lists of fields to send, and set to null, in the JSON. + var forceSendFields, nullFields []string + if uattrs.ContentType != nil { + attrs.ContentType = optional.ToString(uattrs.ContentType) + // For ContentType, sending the empty string is a no-op. + // Instead we send a null. + if attrs.ContentType == "" { + nullFields = append(nullFields, "ContentType") + } else { + forceSendFields = append(forceSendFields, "ContentType") + } + } + if uattrs.ContentLanguage != nil { + attrs.ContentLanguage = optional.ToString(uattrs.ContentLanguage) + // For ContentLanguage it's an error to send the empty string. + // Instead we send a null. + if attrs.ContentLanguage == "" { + nullFields = append(nullFields, "ContentLanguage") + } else { + forceSendFields = append(forceSendFields, "ContentLanguage") + } + } + if uattrs.ContentEncoding != nil { + attrs.ContentEncoding = optional.ToString(uattrs.ContentEncoding) + forceSendFields = append(forceSendFields, "ContentEncoding") + } + if uattrs.ContentDisposition != nil { + attrs.ContentDisposition = optional.ToString(uattrs.ContentDisposition) + forceSendFields = append(forceSendFields, "ContentDisposition") + } + if uattrs.CacheControl != nil { + attrs.CacheControl = optional.ToString(uattrs.CacheControl) + forceSendFields = append(forceSendFields, "CacheControl") + } + if uattrs.EventBasedHold != nil { + attrs.EventBasedHold = optional.ToBool(uattrs.EventBasedHold) + forceSendFields = append(forceSendFields, "EventBasedHold") + } + if uattrs.TemporaryHold != nil { + attrs.TemporaryHold = optional.ToBool(uattrs.TemporaryHold) + forceSendFields = append(forceSendFields, "TemporaryHold") + } + if !uattrs.CustomTime.IsZero() { + attrs.CustomTime = uattrs.CustomTime + forceSendFields = append(forceSendFields, "CustomTime") + } + if uattrs.Metadata != nil { + attrs.Metadata = uattrs.Metadata + if len(attrs.Metadata) == 0 { + // Sending the empty map is a no-op. We send null instead. + nullFields = append(nullFields, "Metadata") + } else { + forceSendFields = append(forceSendFields, "Metadata") + } + } + if uattrs.ACL != nil { + attrs.ACL = uattrs.ACL + // It's an error to attempt to delete the ACL, so + // we don't append to nullFields here. + forceSendFields = append(forceSendFields, "Acl") + } + if uattrs.Retention != nil { + // For ObjectRetention it's an error to send empty fields. + // Instead we send a null as the user's intention is to remove. + if uattrs.Retention.Mode == "" && uattrs.Retention.RetainUntil.IsZero() { + nullFields = append(nullFields, "Retention") + } else { + attrs.Retention = uattrs.Retention + forceSendFields = append(forceSendFields, "Retention") + } + } + rawObj := attrs.toRawObject(params.bucket) + rawObj.ForceSendFields = forceSendFields + rawObj.NullFields = nullFields + call := c.raw.Objects.Patch(params.bucket, params.object, rawObj).Projection("full") + if err := applyConds("Update", params.gen, params.conds, call); err != nil { + return nil, err + } + if s.userProject != "" { + call.UserProject(s.userProject) + } + if uattrs.PredefinedACL != "" { + call.PredefinedAcl(uattrs.PredefinedACL) + } + if err := setEncryptionHeaders(call.Header(), params.encryptionKey, false); err != nil { + return nil, err + } + + if params.overrideRetention != nil { + call.OverrideUnlockedRetention(*params.overrideRetention) + } + + var obj *raw.Object + var err error + err = run(ctx, func(ctx context.Context) error { obj, err = call.Context(ctx).Do(); return err }, s.retry, s.idempotent) + var e *googleapi.Error + if errors.As(err, &e) && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + if err != nil { + return nil, err + } + return newObject(obj), nil +} + +func (c *httpStorageClient) RestoreObject(ctx context.Context, params *restoreObjectParams, opts ...storageOption) (*ObjectAttrs, error) { + s := callSettings(c.settings, opts...) + req := c.raw.Objects.Restore(params.bucket, params.object, params.gen).Context(ctx) + // Do not set the generation here since it's not an optional condition; it gets set above. + if err := applyConds("RestoreObject", defaultGen, params.conds, req); err != nil { + return nil, err + } + if s.userProject != "" { + req.UserProject(s.userProject) + } + if params.copySourceACL { + req.CopySourceAcl(params.copySourceACL) + } + if err := setEncryptionHeaders(req.Header(), params.encryptionKey, false); err != nil { + return nil, err + } + + var obj *raw.Object + var err error + err = run(ctx, func(ctx context.Context) error { obj, err = req.Context(ctx).Do(); return err }, s.retry, s.idempotent) + var e *googleapi.Error + if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + return newObject(obj), err +} + +// Default Object ACL methods. + +func (c *httpStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + req := c.raw.DefaultObjectAccessControls.Delete(bucket, string(entity)) + configureACLCall(ctx, s.userProject, req) + return run(ctx, func(ctx context.Context) error { return req.Context(ctx).Do() }, s.retry, s.idempotent) +} + +func (c *httpStorageClient) ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) { + s := callSettings(c.settings, opts...) + var acls *raw.ObjectAccessControls + var err error + req := c.raw.DefaultObjectAccessControls.List(bucket) + configureACLCall(ctx, s.userProject, req) + err = run(ctx, func(ctx context.Context) error { + acls, err = req.Context(ctx).Do() + return err + }, s.retry, true) + if err != nil { + return nil, err + } + return toObjectACLRules(acls.Items), nil +} +func (c *httpStorageClient) UpdateDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + type setRequest interface { + Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error) + Header() http.Header + } + acl := &raw.ObjectAccessControl{ + Bucket: bucket, + Entity: string(entity), + Role: string(role), + } + var err error + req := c.raw.DefaultObjectAccessControls.Update(bucket, string(entity), acl) + configureACLCall(ctx, s.userProject, req) + return run(ctx, func(ctx context.Context) error { + _, err = req.Context(ctx).Do() + return err + }, s.retry, s.idempotent) +} + +// Bucket ACL methods. + +func (c *httpStorageClient) DeleteBucketACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + req := c.raw.BucketAccessControls.Delete(bucket, string(entity)) + configureACLCall(ctx, s.userProject, req) + return run(ctx, func(ctx context.Context) error { return req.Context(ctx).Do() }, s.retry, s.idempotent) +} + +func (c *httpStorageClient) ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) { + s := callSettings(c.settings, opts...) + var acls *raw.BucketAccessControls + var err error + req := c.raw.BucketAccessControls.List(bucket) + configureACLCall(ctx, s.userProject, req) + err = run(ctx, func(ctx context.Context) error { + acls, err = req.Context(ctx).Do() + return err + }, s.retry, true) + if err != nil { + return nil, err + } + return toBucketACLRules(acls.Items), nil +} + +func (c *httpStorageClient) UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + acl := &raw.BucketAccessControl{ + Bucket: bucket, + Entity: string(entity), + Role: string(role), + } + req := c.raw.BucketAccessControls.Update(bucket, string(entity), acl) + configureACLCall(ctx, s.userProject, req) + var err error + return run(ctx, func(ctx context.Context) error { + _, err = req.Context(ctx).Do() + return err + }, s.retry, s.idempotent) +} + +// configureACLCall sets the context and user project on the apiary library call. +// This will panic if the call does not have the correct methods. +func configureACLCall(ctx context.Context, userProject string, call interface{ Header() http.Header }) { + vc := reflect.ValueOf(call) + vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)}) + if userProject != "" { + vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(userProject)}) + } +} + +// Object ACL methods. + +func (c *httpStorageClient) DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + req := c.raw.ObjectAccessControls.Delete(bucket, object, string(entity)) + configureACLCall(ctx, s.userProject, req) + return run(ctx, func(ctx context.Context) error { return req.Context(ctx).Do() }, s.retry, s.idempotent) +} + +// ListObjectACLs retrieves object ACL entries. By default, it operates on the latest generation of this object. +// Selecting a specific generation of this object is not currently supported by the client. +func (c *httpStorageClient) ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error) { + s := callSettings(c.settings, opts...) + var acls *raw.ObjectAccessControls + var err error + req := c.raw.ObjectAccessControls.List(bucket, object) + configureACLCall(ctx, s.userProject, req) + err = run(ctx, func(ctx context.Context) error { + acls, err = req.Context(ctx).Do() + return err + }, s.retry, s.idempotent) + if err != nil { + return nil, err + } + return toObjectACLRules(acls.Items), nil +} + +func (c *httpStorageClient) UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + type setRequest interface { + Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error) + Header() http.Header + } + + acl := &raw.ObjectAccessControl{ + Bucket: bucket, + Entity: string(entity), + Role: string(role), + } + var err error + req := c.raw.ObjectAccessControls.Update(bucket, object, string(entity), acl) + configureACLCall(ctx, s.userProject, req) + return run(ctx, func(ctx context.Context) error { + _, err = req.Context(ctx).Do() + return err + }, s.retry, s.idempotent) +} + +// Media operations. + +func (c *httpStorageClient) ComposeObject(ctx context.Context, req *composeObjectRequest, opts ...storageOption) (*ObjectAttrs, error) { + s := callSettings(c.settings, opts...) + rawReq := &raw.ComposeRequest{} + // Compose requires a non-empty Destination, so we always set it, + // even if the caller-provided ObjectAttrs is the zero value. + rawReq.Destination = req.dstObject.attrs.toRawObject(req.dstBucket) + if req.sendCRC32C { + rawReq.Destination.Crc32c = encodeUint32(req.dstObject.attrs.CRC32C) + } + for _, src := range req.srcs { + srcObj := &raw.ComposeRequestSourceObjects{ + Name: src.name, + } + if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil { + return nil, err + } + rawReq.SourceObjects = append(rawReq.SourceObjects, srcObj) + } + + call := c.raw.Objects.Compose(req.dstBucket, req.dstObject.name, rawReq) + if err := applyConds("ComposeFrom destination", defaultGen, req.dstObject.conds, call); err != nil { + return nil, err + } + if s.userProject != "" { + call.UserProject(s.userProject) + } + if req.predefinedACL != "" { + call.DestinationPredefinedAcl(req.predefinedACL) + } + if err := setEncryptionHeaders(call.Header(), req.dstObject.encryptionKey, false); err != nil { + return nil, err + } + var obj *raw.Object + + var err error + retryCall := func(ctx context.Context) error { obj, err = call.Context(ctx).Do(); return err } + + if err := run(ctx, retryCall, s.retry, s.idempotent); err != nil { + return nil, err + } + return newObject(obj), nil +} +func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error) { + s := callSettings(c.settings, opts...) + rawObject := req.dstObject.attrs.toRawObject("") + call := c.raw.Objects.Rewrite(req.srcObject.bucket, req.srcObject.name, req.dstObject.bucket, req.dstObject.name, rawObject) + + call.Projection("full") + if req.token != "" { + call.RewriteToken(req.token) + } + if req.dstObject.keyName != "" { + call.DestinationKmsKeyName(req.dstObject.keyName) + } + if req.predefinedACL != "" { + call.DestinationPredefinedAcl(req.predefinedACL) + } + if err := applyConds("Copy destination", defaultGen, req.dstObject.conds, call); err != nil { + return nil, err + } + if err := applySourceConds(req.srcObject.gen, req.srcObject.conds, call); err != nil { + return nil, err + } + if s.userProject != "" { + call.UserProject(s.userProject) + } + // Set destination encryption headers. + if err := setEncryptionHeaders(call.Header(), req.dstObject.encryptionKey, false); err != nil { + return nil, err + } + // Set source encryption headers. + if err := setEncryptionHeaders(call.Header(), req.srcObject.encryptionKey, true); err != nil { + return nil, err + } + + if req.maxBytesRewrittenPerCall != 0 { + call.MaxBytesRewrittenPerCall(req.maxBytesRewrittenPerCall) + } + + var res *raw.RewriteResponse + var err error + + retryCall := func(ctx context.Context) error { res, err = call.Context(ctx).Do(); return err } + + if err := run(ctx, retryCall, s.retry, s.idempotent); err != nil { + return nil, err + } + + r := &rewriteObjectResponse{ + done: res.Done, + written: res.TotalBytesRewritten, + size: res.ObjectSize, + token: res.RewriteToken, + resource: newObject(res.Resource), + } + + return r, nil +} + +func (c *httpStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.NewRangeReader") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + + if c.config.useJSONforReads { + return c.newRangeReaderJSON(ctx, params, s) + } + return c.newRangeReaderXML(ctx, params, s) +} + +func (c *httpStorageClient) newRangeReaderXML(ctx context.Context, params *newRangeReaderParams, s *settings) (r *Reader, err error) { + u := &url.URL{ + Scheme: c.scheme, + Host: c.xmlHost, + Path: fmt.Sprintf("/%s/%s", params.bucket, params.object), + RawPath: fmt.Sprintf("/%s/%s", params.bucket, url.PathEscape(params.object)), + } + verb := "GET" + if params.length == 0 { + verb = "HEAD" + } + req, err := http.NewRequest(verb, u.String(), nil) + if err != nil { + return nil, err + } + + if s.userProject != "" { + req.Header.Set("X-Goog-User-Project", s.userProject) + } + + if err := setRangeReaderHeaders(req.Header, params); err != nil { + return nil, err + } + + reopen := readerReopen(ctx, req.Header, params, s, + func(ctx context.Context) (*http.Response, error) { + // Set custom headers passed in via the context. This is only required for XML; + // for gRPC & JSON this is handled in the GAPIC and Apiary layers respectively. + ctxHeaders := callctx.HeadersFromContext(ctx) + for k, vals := range ctxHeaders { + for _, v := range vals { + req.Header.Set(k, v) + } + } + return c.hc.Do(req.WithContext(ctx)) + }, + func() error { return setConditionsHeaders(req.Header, params.conds) }, + func() { req.URL.RawQuery = fmt.Sprintf("generation=%d", params.gen) }) + + res, err := reopen(0) + if err != nil { + return nil, err + } + return parseReadResponse(res, params, reopen) +} + +func (c *httpStorageClient) newRangeReaderJSON(ctx context.Context, params *newRangeReaderParams, s *settings) (r *Reader, err error) { + call := c.raw.Objects.Get(params.bucket, params.object) + + call.Projection("full") + + if s.userProject != "" { + call.UserProject(s.userProject) + } + + if err := setRangeReaderHeaders(call.Header(), params); err != nil { + return nil, err + } + + reopen := readerReopen(ctx, call.Header(), params, s, func(ctx context.Context) (*http.Response, error) { return call.Context(ctx).Download() }, + func() error { return applyConds("NewReader", params.gen, params.conds, call) }, + func() { call.Generation(params.gen) }) + + res, err := reopen(0) + if err != nil { + return nil, err + } + return parseReadResponse(res, params, reopen) +} + +func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) { + s := callSettings(c.settings, opts...) + errorf := params.setError + setObj := params.setObj + progress := params.progress + attrs := params.attrs + + mediaOpts := []googleapi.MediaOption{ + googleapi.ChunkSize(params.chunkSize), + } + if c := attrs.ContentType; c != "" || params.forceEmptyContentType { + mediaOpts = append(mediaOpts, googleapi.ContentType(c)) + } + if params.chunkRetryDeadline != 0 { + mediaOpts = append(mediaOpts, googleapi.ChunkRetryDeadline(params.chunkRetryDeadline)) + } + + pr, pw := io.Pipe() + + go func() { + defer close(params.donec) + + rawObj := attrs.toRawObject(params.bucket) + if params.sendCRC32C { + rawObj.Crc32c = encodeUint32(attrs.CRC32C) + } + if attrs.MD5 != nil { + rawObj.Md5Hash = base64.StdEncoding.EncodeToString(attrs.MD5) + } + call := c.raw.Objects.Insert(params.bucket, rawObj). + Media(pr, mediaOpts...). + Projection("full"). + Context(params.ctx). + Name(params.attrs.Name) + call.ProgressUpdater(func(n, _ int64) { progress(n) }) + + if attrs.KMSKeyName != "" { + call.KmsKeyName(attrs.KMSKeyName) + } + if attrs.PredefinedACL != "" { + call.PredefinedAcl(attrs.PredefinedACL) + } + if err := setEncryptionHeaders(call.Header(), params.encryptionKey, false); err != nil { + errorf(err) + pr.CloseWithError(err) + return + } + var resp *raw.Object + err := applyConds("NewWriter", defaultGen, params.conds, call) + if err == nil { + if s.userProject != "" { + call.UserProject(s.userProject) + } + // TODO(tritone): Remove this code when Uploads begin to support + // retry attempt header injection with "client header" injection. + setClientHeader(call.Header()) + + // The internals that perform call.Do automatically retry both the initial + // call to set up the upload as well as calls to upload individual chunks + // for a resumable upload (as long as the chunk size is non-zero). Hence + // there is no need to add retries here. + + // Retry only when the operation is idempotent or the retry policy is RetryAlways. + var useRetry bool + if (s.retry == nil || s.retry.policy == RetryIdempotent) && s.idempotent { + useRetry = true + } else if s.retry != nil && s.retry.policy == RetryAlways { + useRetry = true + } + if useRetry { + if s.retry != nil { + call.WithRetry(s.retry.backoff, s.retry.shouldRetry) + } else { + call.WithRetry(nil, nil) + } + } + resp, err = call.Do() + } + if err != nil { + errorf(err) + pr.CloseWithError(err) + return + } + setObj(newObject(resp)) + }() + + return pw, nil +} + +// IAM methods. + +func (c *httpStorageClient) GetIamPolicy(ctx context.Context, resource string, version int32, opts ...storageOption) (*iampb.Policy, error) { + s := callSettings(c.settings, opts...) + call := c.raw.Buckets.GetIamPolicy(resource).OptionsRequestedPolicyVersion(int64(version)) + if s.userProject != "" { + call.UserProject(s.userProject) + } + var rp *raw.Policy + err := run(ctx, func(ctx context.Context) error { + var err error + rp, err = call.Context(ctx).Do() + return err + }, s.retry, s.idempotent) + if err != nil { + return nil, err + } + return iamFromStoragePolicy(rp), nil +} + +func (c *httpStorageClient) SetIamPolicy(ctx context.Context, resource string, policy *iampb.Policy, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + + rp := iamToStoragePolicy(policy) + call := c.raw.Buckets.SetIamPolicy(resource, rp) + if s.userProject != "" { + call.UserProject(s.userProject) + } + + return run(ctx, func(ctx context.Context) error { + _, err := call.Context(ctx).Do() + return err + }, s.retry, s.idempotent) +} + +func (c *httpStorageClient) TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) { + s := callSettings(c.settings, opts...) + call := c.raw.Buckets.TestIamPermissions(resource, permissions) + if s.userProject != "" { + call.UserProject(s.userProject) + } + var res *raw.TestIamPermissionsResponse + err := run(ctx, func(ctx context.Context) error { + var err error + res, err = call.Context(ctx).Do() + return err + }, s.retry, s.idempotent) + if err != nil { + return nil, err + } + return res.Permissions, nil +} + +// HMAC Key methods. + +func (c *httpStorageClient) GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error) { + s := callSettings(c.settings, opts...) + call := c.raw.Projects.HmacKeys.Get(project, accessID) + if s.userProject != "" { + call = call.UserProject(s.userProject) + } + + var metadata *raw.HmacKeyMetadata + var err error + if err := run(ctx, func(ctx context.Context) error { + metadata, err = call.Context(ctx).Do() + return err + }, s.retry, s.idempotent); err != nil { + return nil, err + } + hk := &raw.HmacKey{ + Metadata: metadata, + } + return toHMACKeyFromRaw(hk, false) +} + +func (c *httpStorageClient) ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator { + s := callSettings(c.settings, opts...) + it := &HMACKeysIterator{ + ctx: ctx, + raw: c.raw.Projects.HmacKeys, + projectID: project, + retry: s.retry, + } + fetch := func(pageSize int, pageToken string) (token string, err error) { + call := c.raw.Projects.HmacKeys.List(project) + if pageToken != "" { + call = call.PageToken(pageToken) + } + if pageSize > 0 { + call = call.MaxResults(int64(pageSize)) + } + if showDeletedKeys { + call = call.ShowDeletedKeys(true) + } + if s.userProject != "" { + call = call.UserProject(s.userProject) + } + if serviceAccountEmail != "" { + call = call.ServiceAccountEmail(serviceAccountEmail) + } + + var resp *raw.HmacKeysMetadata + err = run(it.ctx, func(ctx context.Context) error { + resp, err = call.Context(ctx).Do() + return err + }, s.retry, s.idempotent) + if err != nil { + return "", err + } + + for _, metadata := range resp.Items { + hk := &raw.HmacKey{ + Metadata: metadata, + } + hkey, err := toHMACKeyFromRaw(hk, true) + if err != nil { + return "", err + } + it.hmacKeys = append(it.hmacKeys, hkey) + } + return resp.NextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + fetch, + func() int { return len(it.hmacKeys) - it.index }, + func() interface{} { + prev := it.hmacKeys + it.hmacKeys = it.hmacKeys[:0] + it.index = 0 + return prev + }) + return it +} + +func (c *httpStorageClient) UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) { + s := callSettings(c.settings, opts...) + call := c.raw.Projects.HmacKeys.Update(project, accessID, &raw.HmacKeyMetadata{ + Etag: attrs.Etag, + State: string(attrs.State), + }) + if s.userProject != "" { + call = call.UserProject(s.userProject) + } + + var metadata *raw.HmacKeyMetadata + var err error + if err := run(ctx, func(ctx context.Context) error { + metadata, err = call.Context(ctx).Do() + return err + }, s.retry, s.idempotent); err != nil { + return nil, err + } + hk := &raw.HmacKey{ + Metadata: metadata, + } + return toHMACKeyFromRaw(hk, false) +} + +func (c *httpStorageClient) CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error) { + s := callSettings(c.settings, opts...) + call := c.raw.Projects.HmacKeys.Create(project, serviceAccountEmail) + if s.userProject != "" { + call = call.UserProject(s.userProject) + } + + var hk *raw.HmacKey + if err := run(ctx, func(ctx context.Context) error { + h, err := call.Context(ctx).Do() + hk = h + return err + }, s.retry, s.idempotent); err != nil { + return nil, err + } + return toHMACKeyFromRaw(hk, true) +} + +func (c *httpStorageClient) DeleteHMACKey(ctx context.Context, project string, accessID string, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + call := c.raw.Projects.HmacKeys.Delete(project, accessID) + if s.userProject != "" { + call = call.UserProject(s.userProject) + } + return run(ctx, func(ctx context.Context) error { + return call.Context(ctx).Do() + }, s.retry, s.idempotent) +} + +// Notification methods. + +// ListNotifications returns all the Notifications configured for this bucket, as a map indexed by notification ID. +// +// Note: This API does not support pagination. However, entity limits cap the number of notifications on a single bucket, +// so all results will be returned in the first response. See https://cloud.google.com/storage/quotas#buckets. +func (c *httpStorageClient) ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (n map[string]*Notification, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.ListNotifications") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + call := c.raw.Notifications.List(bucket) + if s.userProject != "" { + call.UserProject(s.userProject) + } + var res *raw.Notifications + err = run(ctx, func(ctx context.Context) error { + res, err = call.Context(ctx).Do() + return err + }, s.retry, true) + if err != nil { + return nil, err + } + return notificationsToMap(res.Items), nil +} + +func (c *httpStorageClient) CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (ret *Notification, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.CreateNotification") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + call := c.raw.Notifications.Insert(bucket, toRawNotification(n)) + if s.userProject != "" { + call.UserProject(s.userProject) + } + var rn *raw.Notification + err = run(ctx, func(ctx context.Context) error { + rn, err = call.Context(ctx).Do() + return err + }, s.retry, s.idempotent) + if err != nil { + return nil, err + } + return toNotification(rn), nil +} + +func (c *httpStorageClient) DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.DeleteNotification") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + call := c.raw.Notifications.Delete(bucket, id) + if s.userProject != "" { + call.UserProject(s.userProject) + } + return run(ctx, func(ctx context.Context) error { + return call.Context(ctx).Do() + }, s.retry, s.idempotent) +} + +type httpReader struct { + body io.ReadCloser + seen int64 + reopen func(seen int64) (*http.Response, error) + checkCRC bool // should we check the CRC? + wantCRC uint32 // the CRC32c value the server sent in the header + gotCRC uint32 // running crc +} + +func (r *httpReader) Read(p []byte) (int, error) { + n := 0 + for len(p[n:]) > 0 { + m, err := r.body.Read(p[n:]) + n += m + r.seen += int64(m) + if r.checkCRC { + r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n]) + } + if err == nil { + return n, nil + } + if err == io.EOF { + // Check CRC here. It would be natural to check it in Close, but + // everybody defers Close on the assumption that it doesn't return + // anything worth looking at. + if r.checkCRC { + if r.gotCRC != r.wantCRC { + return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d", + r.gotCRC, r.wantCRC) + } + } + return n, err + } + // Read failed (likely due to connection issues), but we will try to reopen + // the pipe and continue. Send a ranged read request that takes into account + // the number of bytes we've already seen. + res, err := r.reopen(r.seen) + if err != nil { + // reopen already retries + return n, err + } + r.body.Close() + r.body = res.Body + } + return n, nil +} + +func (r *httpReader) Close() error { + return r.body.Close() +} + +func setRangeReaderHeaders(h http.Header, params *newRangeReaderParams) error { + if params.readCompressed { + h.Set("Accept-Encoding", "gzip") + } + if err := setEncryptionHeaders(h, params.encryptionKey, false); err != nil { + return err + } + return nil +} + +// readerReopen initiates a Read with offset and length, assuming we +// have already read seen bytes. +func readerReopen(ctx context.Context, header http.Header, params *newRangeReaderParams, s *settings, + doDownload func(context.Context) (*http.Response, error), applyConditions func() error, setGeneration func()) func(int64) (*http.Response, error) { + return func(seen int64) (*http.Response, error) { + // If the context has already expired, return immediately without making a + // call. + if err := ctx.Err(); err != nil { + return nil, err + } + start := params.offset + seen + if params.length < 0 && start < 0 { + header.Set("Range", fmt.Sprintf("bytes=%d", start)) + } else if params.length < 0 && start > 0 { + header.Set("Range", fmt.Sprintf("bytes=%d-", start)) + } else if params.length > 0 { + // The end character isn't affected by how many bytes we've seen. + header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, params.offset+params.length-1)) + } + // We wait to assign conditions here because the generation number can change in between reopen() runs. + if err := applyConditions(); err != nil { + return nil, err + } + // If an object generation is specified, include generation as query string parameters. + if params.gen >= 0 { + setGeneration() + } + + var err error + var res *http.Response + err = run(ctx, func(ctx context.Context) error { + res, err = doDownload(ctx) + if err != nil { + var e *googleapi.Error + if errors.As(err, &e) { + if e.Code == http.StatusNotFound { + return ErrObjectNotExist + } + } + return err + } + + if res.StatusCode == http.StatusNotFound { + // this check is necessary only for XML + res.Body.Close() + return ErrObjectNotExist + } + if res.StatusCode < 200 || res.StatusCode > 299 { + body, _ := ioutil.ReadAll(res.Body) + res.Body.Close() + return &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + Body: string(body), + } + } + + partialContentNotSatisfied := + !decompressiveTranscoding(res) && + start > 0 && params.length != 0 && + res.StatusCode != http.StatusPartialContent + + if partialContentNotSatisfied { + res.Body.Close() + return errors.New("storage: partial request not satisfied") + } + + // With "Content-Encoding": "gzip" aka decompressive transcoding, GCS serves + // back the whole file regardless of the range count passed in as per: + // https://cloud.google.com/storage/docs/transcoding#range, + // thus we have to manually move the body forward by seen bytes. + if decompressiveTranscoding(res) && seen > 0 { + _, _ = io.CopyN(ioutil.Discard, res.Body, seen) + } + + // If a generation hasn't been specified, and this is the first response we get, let's record the + // generation. In future requests we'll use this generation as a precondition to avoid data races. + if params.gen < 0 && res.Header.Get("X-Goog-Generation") != "" { + gen64, err := strconv.ParseInt(res.Header.Get("X-Goog-Generation"), 10, 64) + if err != nil { + return err + } + params.gen = gen64 + } + return nil + }, s.retry, s.idempotent) + if err != nil { + return nil, err + } + return res, nil + } +} + +func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen func(int64) (*http.Response, error)) (*Reader, error) { + var err error + var ( + size int64 // total size of object, even if a range was requested. + checkCRC bool + crc uint32 + startOffset int64 // non-zero if range request. + ) + if res.StatusCode == http.StatusPartialContent { + cr := strings.TrimSpace(res.Header.Get("Content-Range")) + if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") { + return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) + } + // Content range is formatted -/. We take + // the total size. + size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64) + if err != nil { + return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) + } + + dashIndex := strings.Index(cr, "-") + if dashIndex >= 0 { + startOffset, err = strconv.ParseInt(cr[len("bytes="):dashIndex], 10, 64) + if err != nil { + return nil, fmt.Errorf("storage: invalid Content-Range %q: %w", cr, err) + } + } + } else { + size = res.ContentLength + // Check the CRC iff all of the following hold: + // - We asked for content (length != 0). + // - We got all the content (status != PartialContent). + // - The server sent a CRC header. + // - The Go http stack did not uncompress the file. + // - We were not served compressed data that was uncompressed on download. + // The problem with the last two cases is that the CRC will not match -- GCS + // computes it on the compressed contents, but we compute it on the + // uncompressed contents. + if params.length != 0 && !res.Uncompressed && !uncompressedByServer(res) { + crc, checkCRC = parseCRC32c(res) + } + } + + remain := res.ContentLength + body := res.Body + // If the user requested zero bytes, explicitly close and remove the request + // body. + if params.length == 0 { + remain = 0 + body.Close() + body = emptyBody + } + var metaGen int64 + if res.Header.Get("X-Goog-Metageneration") != "" { + metaGen, err = strconv.ParseInt(res.Header.Get("X-Goog-Metageneration"), 10, 64) + if err != nil { + return nil, err + } + } + + var lm time.Time + if res.Header.Get("Last-Modified") != "" { + lm, err = http.ParseTime(res.Header.Get("Last-Modified")) + if err != nil { + return nil, err + } + } + + attrs := ReaderObjectAttrs{ + Size: size, + ContentType: res.Header.Get("Content-Type"), + ContentEncoding: res.Header.Get("Content-Encoding"), + CacheControl: res.Header.Get("Cache-Control"), + LastModified: lm, + StartOffset: startOffset, + Generation: params.gen, + Metageneration: metaGen, + } + return &Reader{ + Attrs: attrs, + size: size, + remain: remain, + checkCRC: checkCRC, + reader: &httpReader{ + reopen: reopen, + body: body, + wantCRC: crc, + checkCRC: checkCRC, + }, + }, nil +} diff --git a/vendor/cloud.google.com/go/storage/iam.go b/vendor/cloud.google.com/go/storage/iam.go new file mode 100644 index 000000000..4c01bff4f --- /dev/null +++ b/vendor/cloud.google.com/go/storage/iam.go @@ -0,0 +1,133 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + + "cloud.google.com/go/iam" + "cloud.google.com/go/iam/apiv1/iampb" + "cloud.google.com/go/internal/trace" + raw "google.golang.org/api/storage/v1" + "google.golang.org/genproto/googleapis/type/expr" +) + +// IAM provides access to IAM access control for the bucket. +func (b *BucketHandle) IAM() *iam.Handle { + return iam.InternalNewHandleClient(&iamClient{ + userProject: b.userProject, + retry: b.retry, + client: b.c, + }, b.name) +} + +// iamClient implements the iam.client interface. +type iamClient struct { + userProject string + retry *retryConfig + client *Client +} + +func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy, err error) { + return c.GetWithVersion(ctx, resource, 1) +} + +func (c *iamClient) GetWithVersion(ctx context.Context, resource string, requestedPolicyVersion int32) (p *iampb.Policy, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Get") + defer func() { trace.EndSpan(ctx, err) }() + + o := makeStorageOpts(true, c.retry, c.userProject) + return c.client.tc.GetIamPolicy(ctx, resource, requestedPolicyVersion, o...) +} + +func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Set") + defer func() { trace.EndSpan(ctx, err) }() + + isIdempotent := len(p.Etag) > 0 + o := makeStorageOpts(isIdempotent, c.retry, c.userProject) + return c.client.tc.SetIamPolicy(ctx, resource, p, o...) +} + +func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (permissions []string, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Test") + defer func() { trace.EndSpan(ctx, err) }() + + o := makeStorageOpts(true, c.retry, c.userProject) + return c.client.tc.TestIamPermissions(ctx, resource, perms, o...) +} + +func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy { + return &raw.Policy{ + Bindings: iamToStorageBindings(ip.Bindings), + Etag: string(ip.Etag), + Version: int64(ip.Version), + } +} + +func iamToStorageBindings(ibs []*iampb.Binding) []*raw.PolicyBindings { + var rbs []*raw.PolicyBindings + for _, ib := range ibs { + rbs = append(rbs, &raw.PolicyBindings{ + Role: ib.Role, + Members: ib.Members, + Condition: iamToStorageCondition(ib.Condition), + }) + } + return rbs +} + +func iamToStorageCondition(exprpb *expr.Expr) *raw.Expr { + if exprpb == nil { + return nil + } + return &raw.Expr{ + Expression: exprpb.Expression, + Description: exprpb.Description, + Location: exprpb.Location, + Title: exprpb.Title, + } +} + +func iamFromStoragePolicy(rp *raw.Policy) *iampb.Policy { + return &iampb.Policy{ + Bindings: iamFromStorageBindings(rp.Bindings), + Etag: []byte(rp.Etag), + } +} + +func iamFromStorageBindings(rbs []*raw.PolicyBindings) []*iampb.Binding { + var ibs []*iampb.Binding + for _, rb := range rbs { + ibs = append(ibs, &iampb.Binding{ + Role: rb.Role, + Members: rb.Members, + Condition: iamFromStorageCondition(rb.Condition), + }) + } + return ibs +} + +func iamFromStorageCondition(rawexpr *raw.Expr) *expr.Expr { + if rawexpr == nil { + return nil + } + return &expr.Expr{ + Expression: rawexpr.Expression, + Description: rawexpr.Description, + Location: rawexpr.Location, + Title: rawexpr.Title, + } +} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go new file mode 100644 index 000000000..415b2b585 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go @@ -0,0 +1,210 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package storage + +import ( + storagepb "cloud.google.com/go/storage/internal/apiv2/storagepb" + "google.golang.org/api/iterator" +) + +// BucketIterator manages a stream of *storagepb.Bucket. +type BucketIterator struct { + items []*storagepb.Bucket + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*storagepb.Bucket, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *BucketIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *BucketIterator) Next() (*storagepb.Bucket, error) { + var item *storagepb.Bucket + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *BucketIterator) bufLen() int { + return len(it.items) +} + +func (it *BucketIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// HmacKeyMetadataIterator manages a stream of *storagepb.HmacKeyMetadata. +type HmacKeyMetadataIterator struct { + items []*storagepb.HmacKeyMetadata + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*storagepb.HmacKeyMetadata, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *HmacKeyMetadataIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *HmacKeyMetadataIterator) Next() (*storagepb.HmacKeyMetadata, error) { + var item *storagepb.HmacKeyMetadata + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *HmacKeyMetadataIterator) bufLen() int { + return len(it.items) +} + +func (it *HmacKeyMetadataIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// NotificationConfigIterator manages a stream of *storagepb.NotificationConfig. +type NotificationConfigIterator struct { + items []*storagepb.NotificationConfig + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*storagepb.NotificationConfig, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *NotificationConfigIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *NotificationConfigIterator) Next() (*storagepb.NotificationConfig, error) { + var item *storagepb.NotificationConfig + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *NotificationConfigIterator) bufLen() int { + return len(it.items) +} + +func (it *NotificationConfigIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// ObjectIterator manages a stream of *storagepb.Object. +type ObjectIterator struct { + items []*storagepb.Object + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*storagepb.Object, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ObjectIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *ObjectIterator) Next() (*storagepb.Object, error) { + var item *storagepb.Object + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ObjectIterator) bufLen() int { + return len(it.items) +} + +func (it *ObjectIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go new file mode 100644 index 000000000..5e2a8f0ad --- /dev/null +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go @@ -0,0 +1,152 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +// Package storage is an auto-generated package for the +// Cloud Storage API. +// +// Stop. This folder is likely not what you are looking for. This folder +// contains protocol buffer definitions for an API only accessible to select +// customers. Customers not participating should not depend on this file. +// Please contact Google Cloud sales if you are interested. Unless told +// otherwise by a Google Cloud representative, do not use or otherwise rely +// on any of the contents of this folder. If you would like to use Cloud +// Storage, please consult our official documentation (at +// https://cloud.google.com/storage/docs/apis) for details on our XML and +// JSON APIs, or else consider one of our client libraries (at +// https://cloud.google.com/storage/docs/reference/libraries). This API +// defined in this folder is unreleased and may shut off, break, or fail at +// any time for any users who are not registered as a part of a private +// preview program. +// +// # General documentation +// +// For information that is relevant for all client libraries please reference +// https://pkg.go.dev/cloud.google.com/go#pkg-overview. Some information on this +// page includes: +// +// - [Authentication and Authorization] +// - [Timeouts and Cancellation] +// - [Testing against Client Libraries] +// - [Debugging Client Libraries] +// - [Inspecting errors] +// +// # Example usage +// +// To get started with this package, create a client. +// +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := storage.NewClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() +// +// The client will use your default application credentials. Clients should be reused instead of created as needed. +// The methods of Client are safe for concurrent use by multiple goroutines. +// The returned client must be Closed when it is done being used. +// +// # Using the Client +// +// The following is an example of making an API call with the newly created client. +// +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := storage.NewClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() +// stream, err := c.BidiWriteObject(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// go func() { +// reqs := []*storagepb.BidiWriteObjectRequest{ +// // TODO: Create requests. +// } +// for _, req := range reqs { +// if err := stream.Send(req); err != nil { +// // TODO: Handle error. +// } +// } +// stream.CloseSend() +// }() +// for { +// resp, err := stream.Recv() +// if err == io.EOF { +// break +// } +// if err != nil { +// // TODO: handle error. +// } +// // TODO: Use resp. +// _ = resp +// } +// +// # Use of Context +// +// The ctx passed to NewClient is used for authentication requests and +// for creating the underlying connection, but is not used for subsequent calls. +// Individual methods on the client use the ctx given to them. +// +// To close the open connection, use the Close() method. +// +// [Authentication and Authorization]: https://pkg.go.dev/cloud.google.com/go#hdr-Authentication_and_Authorization +// [Timeouts and Cancellation]: https://pkg.go.dev/cloud.google.com/go#hdr-Timeouts_and_Cancellation +// [Testing against Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Testing +// [Debugging Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Debugging +// [Inspecting errors]: https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors +package storage // import "cloud.google.com/go/storage/internal/apiv2" + +import ( + "context" + + "google.golang.org/api/option" +) + +// For more information on implementing a client constructor hook, see +// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors. +type clientHookParams struct{} +type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error) + +var versionClient string + +func getVersionClient() string { + if versionClient == "" { + return "UNKNOWN" + } + return versionClient +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write", + } +} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json b/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json new file mode 100644 index 000000000..56256bb2c --- /dev/null +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json @@ -0,0 +1,178 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods.", + "language": "go", + "protoPackage": "google.storage.v2", + "libraryPackage": "cloud.google.com/go/storage/internal/apiv2", + "services": { + "Storage": { + "clients": { + "grpc": { + "libraryClient": "Client", + "rpcs": { + "BidiWriteObject": { + "methods": [ + "BidiWriteObject" + ] + }, + "CancelResumableWrite": { + "methods": [ + "CancelResumableWrite" + ] + }, + "ComposeObject": { + "methods": [ + "ComposeObject" + ] + }, + "CreateBucket": { + "methods": [ + "CreateBucket" + ] + }, + "CreateHmacKey": { + "methods": [ + "CreateHmacKey" + ] + }, + "CreateNotificationConfig": { + "methods": [ + "CreateNotificationConfig" + ] + }, + "DeleteBucket": { + "methods": [ + "DeleteBucket" + ] + }, + "DeleteHmacKey": { + "methods": [ + "DeleteHmacKey" + ] + }, + "DeleteNotificationConfig": { + "methods": [ + "DeleteNotificationConfig" + ] + }, + "DeleteObject": { + "methods": [ + "DeleteObject" + ] + }, + "GetBucket": { + "methods": [ + "GetBucket" + ] + }, + "GetHmacKey": { + "methods": [ + "GetHmacKey" + ] + }, + "GetIamPolicy": { + "methods": [ + "GetIamPolicy" + ] + }, + "GetNotificationConfig": { + "methods": [ + "GetNotificationConfig" + ] + }, + "GetObject": { + "methods": [ + "GetObject" + ] + }, + "GetServiceAccount": { + "methods": [ + "GetServiceAccount" + ] + }, + "ListBuckets": { + "methods": [ + "ListBuckets" + ] + }, + "ListHmacKeys": { + "methods": [ + "ListHmacKeys" + ] + }, + "ListNotificationConfigs": { + "methods": [ + "ListNotificationConfigs" + ] + }, + "ListObjects": { + "methods": [ + "ListObjects" + ] + }, + "LockBucketRetentionPolicy": { + "methods": [ + "LockBucketRetentionPolicy" + ] + }, + "QueryWriteStatus": { + "methods": [ + "QueryWriteStatus" + ] + }, + "ReadObject": { + "methods": [ + "ReadObject" + ] + }, + "RestoreObject": { + "methods": [ + "RestoreObject" + ] + }, + "RewriteObject": { + "methods": [ + "RewriteObject" + ] + }, + "SetIamPolicy": { + "methods": [ + "SetIamPolicy" + ] + }, + "StartResumableWrite": { + "methods": [ + "StartResumableWrite" + ] + }, + "TestIamPermissions": { + "methods": [ + "TestIamPermissions" + ] + }, + "UpdateBucket": { + "methods": [ + "UpdateBucket" + ] + }, + "UpdateHmacKey": { + "methods": [ + "UpdateHmacKey" + ] + }, + "UpdateObject": { + "methods": [ + "UpdateObject" + ] + }, + "WriteObject": { + "methods": [ + "WriteObject" + ] + } + } + } + } + } + } +} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go new file mode 100644 index 000000000..82ec5db90 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go @@ -0,0 +1,1919 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package storage + +import ( + "context" + "fmt" + "math" + "net/url" + "regexp" + "strings" + "time" + + iampb "cloud.google.com/go/iam/apiv1/iampb" + storagepb "cloud.google.com/go/storage/internal/apiv2/storagepb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/proto" +) + +var newClientHook clientHook + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + DeleteBucket []gax.CallOption + GetBucket []gax.CallOption + CreateBucket []gax.CallOption + ListBuckets []gax.CallOption + LockBucketRetentionPolicy []gax.CallOption + GetIamPolicy []gax.CallOption + SetIamPolicy []gax.CallOption + TestIamPermissions []gax.CallOption + UpdateBucket []gax.CallOption + DeleteNotificationConfig []gax.CallOption + GetNotificationConfig []gax.CallOption + CreateNotificationConfig []gax.CallOption + ListNotificationConfigs []gax.CallOption + ComposeObject []gax.CallOption + DeleteObject []gax.CallOption + RestoreObject []gax.CallOption + CancelResumableWrite []gax.CallOption + GetObject []gax.CallOption + ReadObject []gax.CallOption + UpdateObject []gax.CallOption + WriteObject []gax.CallOption + BidiWriteObject []gax.CallOption + ListObjects []gax.CallOption + RewriteObject []gax.CallOption + StartResumableWrite []gax.CallOption + QueryWriteStatus []gax.CallOption + GetServiceAccount []gax.CallOption + CreateHmacKey []gax.CallOption + DeleteHmacKey []gax.CallOption + GetHmacKey []gax.CallOption + ListHmacKeys []gax.CallOption + UpdateHmacKey []gax.CallOption +} + +func defaultGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("storage.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("storage.UNIVERSE_DOMAIN:443"), + internaloption.WithDefaultMTLSEndpoint("storage.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + internaloption.WithDefaultAudience("https://storage.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultCallOptions() *CallOptions { + return &CallOptions{ + DeleteBucket: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + GetBucket: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + CreateBucket: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + ListBuckets: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + LockBucketRetentionPolicy: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + GetIamPolicy: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + SetIamPolicy: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + TestIamPermissions: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + UpdateBucket: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + DeleteNotificationConfig: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + GetNotificationConfig: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + CreateNotificationConfig: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + ListNotificationConfigs: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + ComposeObject: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + DeleteObject: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + RestoreObject: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + CancelResumableWrite: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + GetObject: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + ReadObject: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + UpdateObject: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + WriteObject: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + BidiWriteObject: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + ListObjects: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + RewriteObject: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + StartResumableWrite: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + QueryWriteStatus: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + GetServiceAccount: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + CreateHmacKey: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + DeleteHmacKey: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + GetHmacKey: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + ListHmacKeys: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + UpdateHmacKey: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + } +} + +// internalClient is an interface that defines the methods available from Cloud Storage API. +type internalClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + DeleteBucket(context.Context, *storagepb.DeleteBucketRequest, ...gax.CallOption) error + GetBucket(context.Context, *storagepb.GetBucketRequest, ...gax.CallOption) (*storagepb.Bucket, error) + CreateBucket(context.Context, *storagepb.CreateBucketRequest, ...gax.CallOption) (*storagepb.Bucket, error) + ListBuckets(context.Context, *storagepb.ListBucketsRequest, ...gax.CallOption) *BucketIterator + LockBucketRetentionPolicy(context.Context, *storagepb.LockBucketRetentionPolicyRequest, ...gax.CallOption) (*storagepb.Bucket, error) + GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) + SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) + TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) + UpdateBucket(context.Context, *storagepb.UpdateBucketRequest, ...gax.CallOption) (*storagepb.Bucket, error) + DeleteNotificationConfig(context.Context, *storagepb.DeleteNotificationConfigRequest, ...gax.CallOption) error + GetNotificationConfig(context.Context, *storagepb.GetNotificationConfigRequest, ...gax.CallOption) (*storagepb.NotificationConfig, error) + CreateNotificationConfig(context.Context, *storagepb.CreateNotificationConfigRequest, ...gax.CallOption) (*storagepb.NotificationConfig, error) + ListNotificationConfigs(context.Context, *storagepb.ListNotificationConfigsRequest, ...gax.CallOption) *NotificationConfigIterator + ComposeObject(context.Context, *storagepb.ComposeObjectRequest, ...gax.CallOption) (*storagepb.Object, error) + DeleteObject(context.Context, *storagepb.DeleteObjectRequest, ...gax.CallOption) error + RestoreObject(context.Context, *storagepb.RestoreObjectRequest, ...gax.CallOption) (*storagepb.Object, error) + CancelResumableWrite(context.Context, *storagepb.CancelResumableWriteRequest, ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) + GetObject(context.Context, *storagepb.GetObjectRequest, ...gax.CallOption) (*storagepb.Object, error) + ReadObject(context.Context, *storagepb.ReadObjectRequest, ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) + UpdateObject(context.Context, *storagepb.UpdateObjectRequest, ...gax.CallOption) (*storagepb.Object, error) + WriteObject(context.Context, ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) + BidiWriteObject(context.Context, ...gax.CallOption) (storagepb.Storage_BidiWriteObjectClient, error) + ListObjects(context.Context, *storagepb.ListObjectsRequest, ...gax.CallOption) *ObjectIterator + RewriteObject(context.Context, *storagepb.RewriteObjectRequest, ...gax.CallOption) (*storagepb.RewriteResponse, error) + StartResumableWrite(context.Context, *storagepb.StartResumableWriteRequest, ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) + QueryWriteStatus(context.Context, *storagepb.QueryWriteStatusRequest, ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error) + GetServiceAccount(context.Context, *storagepb.GetServiceAccountRequest, ...gax.CallOption) (*storagepb.ServiceAccount, error) + CreateHmacKey(context.Context, *storagepb.CreateHmacKeyRequest, ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error) + DeleteHmacKey(context.Context, *storagepb.DeleteHmacKeyRequest, ...gax.CallOption) error + GetHmacKey(context.Context, *storagepb.GetHmacKeyRequest, ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) + ListHmacKeys(context.Context, *storagepb.ListHmacKeysRequest, ...gax.CallOption) *HmacKeyMetadataIterator + UpdateHmacKey(context.Context, *storagepb.UpdateHmacKeyRequest, ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) +} + +// Client is a client for interacting with Cloud Storage API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// API Overview and Naming SyntaxThe Cloud Storage gRPC API allows applications to read and write data through +// the abstractions of buckets and objects. For a description of these +// abstractions please see https://cloud.google.com/storage/docs (at https://cloud.google.com/storage/docs). +// +// Resources are named as follows: +// +// Projects are referred to as they are defined by the Resource Manager API, +// using strings like projects/123456 or projects/my-string-id. +// +// Buckets are named using string names of the form: +// projects/{project}/buckets/{bucket} +// For globally unique buckets, _ may be substituted for the project. +// +// Objects are uniquely identified by their name along with the name of the +// bucket they belong to, as separate strings in this API. For example: +// +// ReadObjectRequest { +// bucket: ‘projects/_/buckets/my-bucket’ +// object: ‘my-object’ +// } +// Note that object names can contain / characters, which are treated as +// any other character (no special directory semantics). +type Client struct { + // The internal transport-dependent client. + internalClient internalClient + + // The call options for this service. + CallOptions *CallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *Client) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// DeleteBucket permanently deletes an empty bucket. +func (c *Client) DeleteBucket(ctx context.Context, req *storagepb.DeleteBucketRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteBucket(ctx, req, opts...) +} + +// GetBucket returns metadata for the specified bucket. +func (c *Client) GetBucket(ctx context.Context, req *storagepb.GetBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { + return c.internalClient.GetBucket(ctx, req, opts...) +} + +// CreateBucket creates a new bucket. +func (c *Client) CreateBucket(ctx context.Context, req *storagepb.CreateBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { + return c.internalClient.CreateBucket(ctx, req, opts...) +} + +// ListBuckets retrieves a list of buckets for a given project. +func (c *Client) ListBuckets(ctx context.Context, req *storagepb.ListBucketsRequest, opts ...gax.CallOption) *BucketIterator { + return c.internalClient.ListBuckets(ctx, req, opts...) +} + +// LockBucketRetentionPolicy locks retention policy on a bucket. +func (c *Client) LockBucketRetentionPolicy(ctx context.Context, req *storagepb.LockBucketRetentionPolicyRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { + return c.internalClient.LockBucketRetentionPolicy(ctx, req, opts...) +} + +// GetIamPolicy gets the IAM policy for a specified bucket. +// The resource field in the request should be +// projects/_/buckets/{bucket}. +func (c *Client) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + return c.internalClient.GetIamPolicy(ctx, req, opts...) +} + +// SetIamPolicy updates an IAM policy for the specified bucket. +// The resource field in the request should be +// projects/_/buckets/{bucket}. +func (c *Client) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + return c.internalClient.SetIamPolicy(ctx, req, opts...) +} + +// TestIamPermissions tests a set of permissions on the given bucket or object to see which, if +// any, are held by the caller. +// The resource field in the request should be +// projects/_/buckets/{bucket} for a bucket or +// projects/_/buckets/{bucket}/objects/{object} for an object. +func (c *Client) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + return c.internalClient.TestIamPermissions(ctx, req, opts...) +} + +// UpdateBucket updates a bucket. Equivalent to JSON API’s storage.buckets.patch method. +func (c *Client) UpdateBucket(ctx context.Context, req *storagepb.UpdateBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { + return c.internalClient.UpdateBucket(ctx, req, opts...) +} + +// DeleteNotificationConfig permanently deletes a NotificationConfig. +func (c *Client) DeleteNotificationConfig(ctx context.Context, req *storagepb.DeleteNotificationConfigRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteNotificationConfig(ctx, req, opts...) +} + +// GetNotificationConfig view a NotificationConfig. +func (c *Client) GetNotificationConfig(ctx context.Context, req *storagepb.GetNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) { + return c.internalClient.GetNotificationConfig(ctx, req, opts...) +} + +// CreateNotificationConfig creates a NotificationConfig for a given bucket. +// These NotificationConfigs, when triggered, publish messages to the +// specified Pub/Sub topics. See +// https://cloud.google.com/storage/docs/pubsub-notifications (at https://cloud.google.com/storage/docs/pubsub-notifications). +func (c *Client) CreateNotificationConfig(ctx context.Context, req *storagepb.CreateNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) { + return c.internalClient.CreateNotificationConfig(ctx, req, opts...) +} + +// ListNotificationConfigs retrieves a list of NotificationConfigs for a given bucket. +func (c *Client) ListNotificationConfigs(ctx context.Context, req *storagepb.ListNotificationConfigsRequest, opts ...gax.CallOption) *NotificationConfigIterator { + return c.internalClient.ListNotificationConfigs(ctx, req, opts...) +} + +// ComposeObject concatenates a list of existing objects into a new object in the same +// bucket. +func (c *Client) ComposeObject(ctx context.Context, req *storagepb.ComposeObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { + return c.internalClient.ComposeObject(ctx, req, opts...) +} + +// DeleteObject deletes an object and its metadata. +// +// Deletions are normally permanent when versioning is disabled or whenever +// the generation parameter is used. However, if soft delete is enabled for +// the bucket, deleted objects can be restored using RestoreObject until the +// soft delete retention period has passed. +func (c *Client) DeleteObject(ctx context.Context, req *storagepb.DeleteObjectRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteObject(ctx, req, opts...) +} + +// RestoreObject restores a soft-deleted object. +func (c *Client) RestoreObject(ctx context.Context, req *storagepb.RestoreObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { + return c.internalClient.RestoreObject(ctx, req, opts...) +} + +// CancelResumableWrite cancels an in-progress resumable upload. +// +// Any attempts to write to the resumable upload after cancelling the upload +// will fail. +// +// The behavior for currently in progress write operations is not guaranteed - +// they could either complete before the cancellation or fail if the +// cancellation completes first. +func (c *Client) CancelResumableWrite(ctx context.Context, req *storagepb.CancelResumableWriteRequest, opts ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) { + return c.internalClient.CancelResumableWrite(ctx, req, opts...) +} + +// GetObject retrieves an object’s metadata. +func (c *Client) GetObject(ctx context.Context, req *storagepb.GetObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { + return c.internalClient.GetObject(ctx, req, opts...) +} + +// ReadObject reads an object’s data. +func (c *Client) ReadObject(ctx context.Context, req *storagepb.ReadObjectRequest, opts ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) { + return c.internalClient.ReadObject(ctx, req, opts...) +} + +// UpdateObject updates an object’s metadata. +// Equivalent to JSON API’s storage.objects.patch. +func (c *Client) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { + return c.internalClient.UpdateObject(ctx, req, opts...) +} + +// WriteObject stores a new object and metadata. +// +// An object can be written either in a single message stream or in a +// resumable sequence of message streams. To write using a single stream, +// the client should include in the first message of the stream an +// WriteObjectSpec describing the destination bucket, object, and any +// preconditions. Additionally, the final message must set ‘finish_write’ to +// true, or else it is an error. +// +// For a resumable write, the client should instead call +// StartResumableWrite(), populating a WriteObjectSpec into that request. +// They should then attach the returned upload_id to the first message of +// each following call to WriteObject. If the stream is closed before +// finishing the upload (either explicitly by the client or due to a network +// error or an error response from the server), the client should do as +// follows: +// +// Check the result Status of the stream, to determine if writing can be +// resumed on this stream or must be restarted from scratch (by calling +// StartResumableWrite()). The resumable errors are DEADLINE_EXCEEDED, +// INTERNAL, and UNAVAILABLE. For each case, the client should use binary +// exponential backoff before retrying. Additionally, writes can be +// resumed after RESOURCE_EXHAUSTED errors, but only after taking +// appropriate measures, which may include reducing aggregate send rate +// across clients and/or requesting a quota increase for your project. +// +// If the call to WriteObject returns ABORTED, that indicates +// concurrent attempts to update the resumable write, caused either by +// multiple racing clients or by a single client where the previous +// request was timed out on the client side but nonetheless reached the +// server. In this case the client should take steps to prevent further +// concurrent writes (e.g., increase the timeouts, stop using more than +// one process to perform the upload, etc.), and then should follow the +// steps below for resuming the upload. +// +// For resumable errors, the client should call QueryWriteStatus() and +// then continue writing from the returned persisted_size. This may be +// less than the amount of data the client previously sent. Note also that +// it is acceptable to send data starting at an offset earlier than the +// returned persisted_size; in this case, the service will skip data at +// offsets that were already persisted (without checking that it matches +// the previously written data), and write only the data starting from the +// persisted offset. Even though the data isn’t written, it may still +// incur a performance cost over resuming at the correct write offset. +// This behavior can make client-side handling simpler in some cases. +// +// Clients must only send data that is a multiple of 256 KiB per message, +// unless the object is being finished with finish_write set to true. +// +// The service will not view the object as complete until the client has +// sent a WriteObjectRequest with finish_write set to true. Sending any +// requests on a stream after sending a request with finish_write set to +// true will cause an error. The client should check the response it +// receives to determine how much data the service was able to commit and +// whether the service views the object as complete. +// +// Attempting to resume an already finalized object will result in an OK +// status, with a WriteObjectResponse containing the finalized object’s +// metadata. +// +// Alternatively, the BidiWriteObject operation may be used to write an +// object with controls over flushing and the ability to fetch the ability to +// determine the current persisted size. +func (c *Client) WriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) { + return c.internalClient.WriteObject(ctx, opts...) +} + +// BidiWriteObject stores a new object and metadata. +// +// This is similar to the WriteObject call with the added support for +// manual flushing of persisted state, and the ability to determine current +// persisted size without closing the stream. +// +// The client may specify one or both of the state_lookup and flush fields +// in each BidiWriteObjectRequest. If flush is specified, the data written +// so far will be persisted to storage. If state_lookup is specified, the +// service will respond with a BidiWriteObjectResponse that contains the +// persisted size. If both flush and state_lookup are specified, the flush +// will always occur before a state_lookup, so that both may be set in the +// same request and the returned state will be the state of the object +// post-flush. When the stream is closed, a BidiWriteObjectResponse will +// always be sent to the client, regardless of the value of state_lookup. +func (c *Client) BidiWriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_BidiWriteObjectClient, error) { + return c.internalClient.BidiWriteObject(ctx, opts...) +} + +// ListObjects retrieves a list of objects matching the criteria. +func (c *Client) ListObjects(ctx context.Context, req *storagepb.ListObjectsRequest, opts ...gax.CallOption) *ObjectIterator { + return c.internalClient.ListObjects(ctx, req, opts...) +} + +// RewriteObject rewrites a source object to a destination object. Optionally overrides +// metadata. +func (c *Client) RewriteObject(ctx context.Context, req *storagepb.RewriteObjectRequest, opts ...gax.CallOption) (*storagepb.RewriteResponse, error) { + return c.internalClient.RewriteObject(ctx, req, opts...) +} + +// StartResumableWrite starts a resumable write. How long the write operation remains valid, and +// what happens when the write operation becomes invalid, are +// service-dependent. +func (c *Client) StartResumableWrite(ctx context.Context, req *storagepb.StartResumableWriteRequest, opts ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) { + return c.internalClient.StartResumableWrite(ctx, req, opts...) +} + +// QueryWriteStatus determines the persisted_size for an object that is being written, which +// can then be used as the write_offset for the next Write() call. +// +// If the object does not exist (i.e., the object has been deleted, or the +// first Write() has not yet reached the service), this method returns the +// error NOT_FOUND. +// +// The client may call QueryWriteStatus() at any time to determine how +// much data has been processed for this object. This is useful if the +// client is buffering data and needs to know which data can be safely +// evicted. For any sequence of QueryWriteStatus() calls for a given +// object name, the sequence of returned persisted_size values will be +// non-decreasing. +func (c *Client) QueryWriteStatus(ctx context.Context, req *storagepb.QueryWriteStatusRequest, opts ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error) { + return c.internalClient.QueryWriteStatus(ctx, req, opts...) +} + +// GetServiceAccount retrieves the name of a project’s Google Cloud Storage service account. +func (c *Client) GetServiceAccount(ctx context.Context, req *storagepb.GetServiceAccountRequest, opts ...gax.CallOption) (*storagepb.ServiceAccount, error) { + return c.internalClient.GetServiceAccount(ctx, req, opts...) +} + +// CreateHmacKey creates a new HMAC key for the given service account. +func (c *Client) CreateHmacKey(ctx context.Context, req *storagepb.CreateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error) { + return c.internalClient.CreateHmacKey(ctx, req, opts...) +} + +// DeleteHmacKey deletes a given HMAC key. Key must be in an INACTIVE state. +func (c *Client) DeleteHmacKey(ctx context.Context, req *storagepb.DeleteHmacKeyRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteHmacKey(ctx, req, opts...) +} + +// GetHmacKey gets an existing HMAC key metadata for the given id. +func (c *Client) GetHmacKey(ctx context.Context, req *storagepb.GetHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) { + return c.internalClient.GetHmacKey(ctx, req, opts...) +} + +// ListHmacKeys lists HMAC keys under a given project with the additional filters provided. +func (c *Client) ListHmacKeys(ctx context.Context, req *storagepb.ListHmacKeysRequest, opts ...gax.CallOption) *HmacKeyMetadataIterator { + return c.internalClient.ListHmacKeys(ctx, req, opts...) +} + +// UpdateHmacKey updates a given HMAC key state between ACTIVE and INACTIVE. +func (c *Client) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) { + return c.internalClient.UpdateHmacKey(ctx, req, opts...) +} + +// gRPCClient is a client for interacting with Cloud Storage API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type gRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // Points back to the CallOptions field of the containing Client + CallOptions **CallOptions + + // The gRPC API client. + client storagepb.StorageClient + + // The x-goog-* metadata to be sent with each request. + xGoogHeaders []string +} + +// NewClient creates a new storage client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// API Overview and Naming SyntaxThe Cloud Storage gRPC API allows applications to read and write data through +// the abstractions of buckets and objects. For a description of these +// abstractions please see https://cloud.google.com/storage/docs (at https://cloud.google.com/storage/docs). +// +// Resources are named as follows: +// +// Projects are referred to as they are defined by the Resource Manager API, +// using strings like projects/123456 or projects/my-string-id. +// +// Buckets are named using string names of the form: +// projects/{project}/buckets/{bucket} +// For globally unique buckets, _ may be substituted for the project. +// +// Objects are uniquely identified by their name along with the name of the +// bucket they belong to, as separate strings in this API. For example: +// +// ReadObjectRequest { +// bucket: ‘projects/_/buckets/my-bucket’ +// object: ‘my-object’ +// } +// Note that object names can contain / characters, which are treated as +// any other character (no special directory semantics). +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + clientOpts := defaultGRPCClientOptions() + if newClientHook != nil { + hookOpts, err := newClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := Client{CallOptions: defaultCallOptions()} + + c := &gRPCClient{ + connPool: connPool, + client: storagepb.NewStorageClient(connPool), + CallOptions: &client.CallOptions, + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *gRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *gRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *gRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *gRPCClient) DeleteBucket(ctx context.Context, req *storagepb.DeleteBucketRequest, opts ...gax.CallOption) error { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).DeleteBucket[0:len((*c.CallOptions).DeleteBucket):len((*c.CallOptions).DeleteBucket)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteBucket(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *gRPCClient) GetBucket(ctx context.Context, req *storagepb.GetBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetBucket[0:len((*c.CallOptions).GetBucket):len((*c.CallOptions).GetBucket)], opts...) + var resp *storagepb.Bucket + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetBucket(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) CreateBucket(ctx context.Context, req *storagepb.CreateBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) + } + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket().GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket().GetProject())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket().GetProject())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateBucket[0:len((*c.CallOptions).CreateBucket):len((*c.CallOptions).CreateBucket)], opts...) + var resp *storagepb.Bucket + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateBucket(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) ListBuckets(ctx context.Context, req *storagepb.ListBucketsRequest, opts ...gax.CallOption) *BucketIterator { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListBuckets[0:len((*c.CallOptions).ListBuckets):len((*c.CallOptions).ListBuckets)], opts...) + it := &BucketIterator{} + req = proto.Clone(req).(*storagepb.ListBucketsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.Bucket, string, error) { + resp := &storagepb.ListBucketsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListBuckets(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetBuckets(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *gRPCClient) LockBucketRetentionPolicy(ctx context.Context, req *storagepb.LockBucketRetentionPolicyRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).LockBucketRetentionPolicy[0:len((*c.CallOptions).LockBucketRetentionPolicy):len((*c.CallOptions).LockBucketRetentionPolicy)], opts...) + var resp *storagepb.Bucket + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.LockBucketRetentionPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.SetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) + } + if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...) + var resp *iampb.TestIamPermissionsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.TestIamPermissions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) UpdateBucket(ctx context.Context, req *storagepb.UpdateBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket().GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket().GetName())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket().GetName())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).UpdateBucket[0:len((*c.CallOptions).UpdateBucket):len((*c.CallOptions).UpdateBucket)], opts...) + var resp *storagepb.Bucket + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.UpdateBucket(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) DeleteNotificationConfig(ctx context.Context, req *storagepb.DeleteNotificationConfigRequest, opts ...gax.CallOption) error { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).DeleteNotificationConfig[0:len((*c.CallOptions).DeleteNotificationConfig):len((*c.CallOptions).DeleteNotificationConfig)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteNotificationConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *gRPCClient) GetNotificationConfig(ctx context.Context, req *storagepb.GetNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetNotificationConfig[0:len((*c.CallOptions).GetNotificationConfig):len((*c.CallOptions).GetNotificationConfig)], opts...) + var resp *storagepb.NotificationConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetNotificationConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) CreateNotificationConfig(ctx context.Context, req *storagepb.CreateNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateNotificationConfig[0:len((*c.CallOptions).CreateNotificationConfig):len((*c.CallOptions).CreateNotificationConfig)], opts...) + var resp *storagepb.NotificationConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateNotificationConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) ListNotificationConfigs(ctx context.Context, req *storagepb.ListNotificationConfigsRequest, opts ...gax.CallOption) *NotificationConfigIterator { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListNotificationConfigs[0:len((*c.CallOptions).ListNotificationConfigs):len((*c.CallOptions).ListNotificationConfigs)], opts...) + it := &NotificationConfigIterator{} + req = proto.Clone(req).(*storagepb.ListNotificationConfigsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.NotificationConfig, string, error) { + resp := &storagepb.ListNotificationConfigsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListNotificationConfigs(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetNotificationConfigs(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *gRPCClient) ComposeObject(ctx context.Context, req *storagepb.ComposeObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetDestination().GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetDestination().GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetDestination().GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ComposeObject[0:len((*c.CallOptions).ComposeObject):len((*c.CallOptions).ComposeObject)], opts...) + var resp *storagepb.Object + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ComposeObject(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) DeleteObject(ctx context.Context, req *storagepb.DeleteObjectRequest, opts ...gax.CallOption) error { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).DeleteObject[0:len((*c.CallOptions).DeleteObject):len((*c.CallOptions).DeleteObject)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteObject(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *gRPCClient) RestoreObject(ctx context.Context, req *storagepb.RestoreObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).RestoreObject[0:len((*c.CallOptions).RestoreObject):len((*c.CallOptions).RestoreObject)], opts...) + var resp *storagepb.Object + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.RestoreObject(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) CancelResumableWrite(ctx context.Context, req *storagepb.CancelResumableWriteRequest, opts ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetUploadId()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CancelResumableWrite[0:len((*c.CallOptions).CancelResumableWrite):len((*c.CallOptions).CancelResumableWrite)], opts...) + var resp *storagepb.CancelResumableWriteResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CancelResumableWrite(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) GetObject(ctx context.Context, req *storagepb.GetObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetObject[0:len((*c.CallOptions).GetObject):len((*c.CallOptions).GetObject)], opts...) + var resp *storagepb.Object + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetObject(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) ReadObject(ctx context.Context, req *storagepb.ReadObjectRequest, opts ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ReadObject[0:len((*c.CallOptions).ReadObject):len((*c.CallOptions).ReadObject)], opts...) + var resp storagepb.Storage_ReadObjectClient + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ReadObject(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetObject().GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetObject().GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetObject().GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).UpdateObject[0:len((*c.CallOptions).UpdateObject):len((*c.CallOptions).UpdateObject)], opts...) + var resp *storagepb.Object + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.UpdateObject(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) WriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) { + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, c.xGoogHeaders...) + var resp storagepb.Storage_WriteObjectClient + opts = append((*c.CallOptions).WriteObject[0:len((*c.CallOptions).WriteObject):len((*c.CallOptions).WriteObject)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.WriteObject(ctx, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) BidiWriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_BidiWriteObjectClient, error) { + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, c.xGoogHeaders...) + var resp storagepb.Storage_BidiWriteObjectClient + opts = append((*c.CallOptions).BidiWriteObject[0:len((*c.CallOptions).BidiWriteObject):len((*c.CallOptions).BidiWriteObject)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.BidiWriteObject(ctx, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) ListObjects(ctx context.Context, req *storagepb.ListObjectsRequest, opts ...gax.CallOption) *ObjectIterator { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListObjects[0:len((*c.CallOptions).ListObjects):len((*c.CallOptions).ListObjects)], opts...) + it := &ObjectIterator{} + req = proto.Clone(req).(*storagepb.ListObjectsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.Object, string, error) { + resp := &storagepb.ListObjectsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListObjects(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetObjects(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *gRPCClient) RewriteObject(ctx context.Context, req *storagepb.RewriteObjectRequest, opts ...gax.CallOption) (*storagepb.RewriteResponse, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetSourceBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetSourceBucket())[1])) > 0 { + routingHeadersMap["source_bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetSourceBucket())[1]) + } + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetDestinationBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetDestinationBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetDestinationBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).RewriteObject[0:len((*c.CallOptions).RewriteObject):len((*c.CallOptions).RewriteObject)], opts...) + var resp *storagepb.RewriteResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.RewriteObject(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) StartResumableWrite(ctx context.Context, req *storagepb.StartResumableWriteRequest, opts ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetWriteObjectSpec().GetResource().GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetWriteObjectSpec().GetResource().GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetWriteObjectSpec().GetResource().GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).StartResumableWrite[0:len((*c.CallOptions).StartResumableWrite):len((*c.CallOptions).StartResumableWrite)], opts...) + var resp *storagepb.StartResumableWriteResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.StartResumableWrite(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) QueryWriteStatus(ctx context.Context, req *storagepb.QueryWriteStatusRequest, opts ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetUploadId()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).QueryWriteStatus[0:len((*c.CallOptions).QueryWriteStatus):len((*c.CallOptions).QueryWriteStatus)], opts...) + var resp *storagepb.QueryWriteStatusResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.QueryWriteStatus(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) GetServiceAccount(ctx context.Context, req *storagepb.GetServiceAccountRequest, opts ...gax.CallOption) (*storagepb.ServiceAccount, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetServiceAccount[0:len((*c.CallOptions).GetServiceAccount):len((*c.CallOptions).GetServiceAccount)], opts...) + var resp *storagepb.ServiceAccount + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetServiceAccount(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) CreateHmacKey(ctx context.Context, req *storagepb.CreateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateHmacKey[0:len((*c.CallOptions).CreateHmacKey):len((*c.CallOptions).CreateHmacKey)], opts...) + var resp *storagepb.CreateHmacKeyResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateHmacKey(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) DeleteHmacKey(ctx context.Context, req *storagepb.DeleteHmacKeyRequest, opts ...gax.CallOption) error { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).DeleteHmacKey[0:len((*c.CallOptions).DeleteHmacKey):len((*c.CallOptions).DeleteHmacKey)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteHmacKey(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *gRPCClient) GetHmacKey(ctx context.Context, req *storagepb.GetHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetHmacKey[0:len((*c.CallOptions).GetHmacKey):len((*c.CallOptions).GetHmacKey)], opts...) + var resp *storagepb.HmacKeyMetadata + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetHmacKey(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *gRPCClient) ListHmacKeys(ctx context.Context, req *storagepb.ListHmacKeysRequest, opts ...gax.CallOption) *HmacKeyMetadataIterator { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListHmacKeys[0:len((*c.CallOptions).ListHmacKeys):len((*c.CallOptions).ListHmacKeys)], opts...) + it := &HmacKeyMetadataIterator{} + req = proto.Clone(req).(*storagepb.ListHmacKeysRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.HmacKeyMetadata, string, error) { + resp := &storagepb.ListHmacKeysResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListHmacKeys(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetHmacKeys(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *gRPCClient) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetHmacKey().GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetHmacKey().GetProject())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetHmacKey().GetProject())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).UpdateHmacKey[0:len((*c.CallOptions).UpdateHmacKey):len((*c.CallOptions).UpdateHmacKey)], opts...) + var resp *storagepb.HmacKeyMetadata + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.UpdateHmacKey(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go new file mode 100644 index 000000000..aeb7512f4 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go @@ -0,0 +1,11796 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v4.25.3 +// source: google/storage/v2/storage.proto + +package storagepb + +import ( + context "context" + reflect "reflect" + sync "sync" + + iampb "cloud.google.com/go/iam/apiv1/iampb" + _ "google.golang.org/genproto/googleapis/api/annotations" + date "google.golang.org/genproto/googleapis/type/date" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A collection of constant values meaningful to the Storage API. +type ServiceConstants_Values int32 + +const ( + // Unused. Proto3 requires first enum to be 0. + ServiceConstants_VALUES_UNSPECIFIED ServiceConstants_Values = 0 + // The maximum size chunk that can will be returned in a single + // ReadRequest. + // 2 MiB. + ServiceConstants_MAX_READ_CHUNK_BYTES ServiceConstants_Values = 2097152 + // The maximum size chunk that can be sent in a single WriteObjectRequest. + // 2 MiB. + ServiceConstants_MAX_WRITE_CHUNK_BYTES ServiceConstants_Values = 2097152 + // The maximum size of an object in MB - whether written in a single stream + // or composed from multiple other objects. + // 5 TiB. + ServiceConstants_MAX_OBJECT_SIZE_MB ServiceConstants_Values = 5242880 + // The maximum length field name that can be sent in a single + // custom metadata field. + // 1 KiB. + ServiceConstants_MAX_CUSTOM_METADATA_FIELD_NAME_BYTES ServiceConstants_Values = 1024 + // The maximum length field value that can be sent in a single + // custom_metadata field. + // 4 KiB. + ServiceConstants_MAX_CUSTOM_METADATA_FIELD_VALUE_BYTES ServiceConstants_Values = 4096 + // The maximum total bytes that can be populated into all field names and + // values of the custom_metadata for one object. + // 8 KiB. + ServiceConstants_MAX_CUSTOM_METADATA_TOTAL_SIZE_BYTES ServiceConstants_Values = 8192 + // The maximum total bytes that can be populated into all bucket metadata + // fields. + // 20 KiB. + ServiceConstants_MAX_BUCKET_METADATA_TOTAL_SIZE_BYTES ServiceConstants_Values = 20480 + // The maximum number of NotificationConfigs that can be registered + // for a given bucket. + ServiceConstants_MAX_NOTIFICATION_CONFIGS_PER_BUCKET ServiceConstants_Values = 100 + // The maximum number of LifecycleRules that can be registered for a given + // bucket. + ServiceConstants_MAX_LIFECYCLE_RULES_PER_BUCKET ServiceConstants_Values = 100 + // The maximum number of custom attributes per NotificationConfigs. + ServiceConstants_MAX_NOTIFICATION_CUSTOM_ATTRIBUTES ServiceConstants_Values = 5 + // The maximum length of a custom attribute key included in + // NotificationConfig. + ServiceConstants_MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_KEY_LENGTH ServiceConstants_Values = 256 + // The maximum length of a custom attribute value included in a + // NotificationConfig. + ServiceConstants_MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_VALUE_LENGTH ServiceConstants_Values = 1024 + // The maximum number of key/value entries per bucket label. + ServiceConstants_MAX_LABELS_ENTRIES_COUNT ServiceConstants_Values = 64 + // The maximum character length of the key or value in a bucket + // label map. + ServiceConstants_MAX_LABELS_KEY_VALUE_LENGTH ServiceConstants_Values = 63 + // The maximum byte size of the key or value in a bucket label + // map. + ServiceConstants_MAX_LABELS_KEY_VALUE_BYTES ServiceConstants_Values = 128 + // The maximum number of object IDs that can be included in a + // DeleteObjectsRequest. + ServiceConstants_MAX_OBJECT_IDS_PER_DELETE_OBJECTS_REQUEST ServiceConstants_Values = 1000 + // The maximum number of days for which a token returned by the + // GetListObjectsSplitPoints RPC is valid. + ServiceConstants_SPLIT_TOKEN_MAX_VALID_DAYS ServiceConstants_Values = 14 +) + +// Enum value maps for ServiceConstants_Values. +var ( + ServiceConstants_Values_name = map[int32]string{ + 0: "VALUES_UNSPECIFIED", + 2097152: "MAX_READ_CHUNK_BYTES", + // Duplicate value: 2097152: "MAX_WRITE_CHUNK_BYTES", + 5242880: "MAX_OBJECT_SIZE_MB", + 1024: "MAX_CUSTOM_METADATA_FIELD_NAME_BYTES", + 4096: "MAX_CUSTOM_METADATA_FIELD_VALUE_BYTES", + 8192: "MAX_CUSTOM_METADATA_TOTAL_SIZE_BYTES", + 20480: "MAX_BUCKET_METADATA_TOTAL_SIZE_BYTES", + 100: "MAX_NOTIFICATION_CONFIGS_PER_BUCKET", + // Duplicate value: 100: "MAX_LIFECYCLE_RULES_PER_BUCKET", + 5: "MAX_NOTIFICATION_CUSTOM_ATTRIBUTES", + 256: "MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_KEY_LENGTH", + // Duplicate value: 1024: "MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_VALUE_LENGTH", + 64: "MAX_LABELS_ENTRIES_COUNT", + 63: "MAX_LABELS_KEY_VALUE_LENGTH", + 128: "MAX_LABELS_KEY_VALUE_BYTES", + 1000: "MAX_OBJECT_IDS_PER_DELETE_OBJECTS_REQUEST", + 14: "SPLIT_TOKEN_MAX_VALID_DAYS", + } + ServiceConstants_Values_value = map[string]int32{ + "VALUES_UNSPECIFIED": 0, + "MAX_READ_CHUNK_BYTES": 2097152, + "MAX_WRITE_CHUNK_BYTES": 2097152, + "MAX_OBJECT_SIZE_MB": 5242880, + "MAX_CUSTOM_METADATA_FIELD_NAME_BYTES": 1024, + "MAX_CUSTOM_METADATA_FIELD_VALUE_BYTES": 4096, + "MAX_CUSTOM_METADATA_TOTAL_SIZE_BYTES": 8192, + "MAX_BUCKET_METADATA_TOTAL_SIZE_BYTES": 20480, + "MAX_NOTIFICATION_CONFIGS_PER_BUCKET": 100, + "MAX_LIFECYCLE_RULES_PER_BUCKET": 100, + "MAX_NOTIFICATION_CUSTOM_ATTRIBUTES": 5, + "MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_KEY_LENGTH": 256, + "MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_VALUE_LENGTH": 1024, + "MAX_LABELS_ENTRIES_COUNT": 64, + "MAX_LABELS_KEY_VALUE_LENGTH": 63, + "MAX_LABELS_KEY_VALUE_BYTES": 128, + "MAX_OBJECT_IDS_PER_DELETE_OBJECTS_REQUEST": 1000, + "SPLIT_TOKEN_MAX_VALID_DAYS": 14, + } +) + +func (x ServiceConstants_Values) Enum() *ServiceConstants_Values { + p := new(ServiceConstants_Values) + *p = x + return p +} + +func (x ServiceConstants_Values) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ServiceConstants_Values) Descriptor() protoreflect.EnumDescriptor { + return file_google_storage_v2_storage_proto_enumTypes[0].Descriptor() +} + +func (ServiceConstants_Values) Type() protoreflect.EnumType { + return &file_google_storage_v2_storage_proto_enumTypes[0] +} + +func (x ServiceConstants_Values) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ServiceConstants_Values.Descriptor instead. +func (ServiceConstants_Values) EnumDescriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42, 0} +} + +// Request message for DeleteBucket. +type DeleteBucketRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of a bucket to delete. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // If set, only deletes the bucket if its metageneration matches this value. + IfMetagenerationMatch *int64 `protobuf:"varint,2,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // If set, only deletes the bucket if its metageneration does not match this + // value. + IfMetagenerationNotMatch *int64 `protobuf:"varint,3,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` +} + +func (x *DeleteBucketRequest) Reset() { + *x = DeleteBucketRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteBucketRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteBucketRequest) ProtoMessage() {} + +func (x *DeleteBucketRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteBucketRequest.ProtoReflect.Descriptor instead. +func (*DeleteBucketRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{0} +} + +func (x *DeleteBucketRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *DeleteBucketRequest) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *DeleteBucketRequest) GetIfMetagenerationNotMatch() int64 { + if x != nil && x.IfMetagenerationNotMatch != nil { + return *x.IfMetagenerationNotMatch + } + return 0 +} + +// Request message for GetBucket. +type GetBucketRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of a bucket. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // If set, and if the bucket's current metageneration does not match the + // specified value, the request will return an error. + IfMetagenerationMatch *int64 `protobuf:"varint,2,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // If set, and if the bucket's current metageneration matches the specified + // value, the request will return an error. + IfMetagenerationNotMatch *int64 `protobuf:"varint,3,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` + // Mask specifying which fields to read. + // A "*" field may be used to indicate all fields. + // If no mask is specified, will default to all fields. + ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,5,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` +} + +func (x *GetBucketRequest) Reset() { + *x = GetBucketRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetBucketRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetBucketRequest) ProtoMessage() {} + +func (x *GetBucketRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetBucketRequest.ProtoReflect.Descriptor instead. +func (*GetBucketRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{1} +} + +func (x *GetBucketRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *GetBucketRequest) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *GetBucketRequest) GetIfMetagenerationNotMatch() int64 { + if x != nil && x.IfMetagenerationNotMatch != nil { + return *x.IfMetagenerationNotMatch + } + return 0 +} + +func (x *GetBucketRequest) GetReadMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.ReadMask + } + return nil +} + +// Request message for CreateBucket. +type CreateBucketRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The project to which this bucket will belong. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Properties of the new bucket being inserted. + // The name of the bucket is specified in the `bucket_id` field. Populating + // `bucket.name` field will result in an error. + // The project of the bucket must be specified in the `bucket.project` field. + // This field must be in `projects/{projectIdentifier}` format, + // {projectIdentifier} can be the project ID or project number. The `parent` + // field must be either empty or `projects/_`. + Bucket *Bucket `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Required. The ID to use for this bucket, which will become the final + // component of the bucket's resource name. For example, the value `foo` might + // result in a bucket with the name `projects/123456/buckets/foo`. + BucketId string `protobuf:"bytes,3,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` + // Apply a predefined set of access controls to this bucket. + // Valid values are "authenticatedRead", "private", "projectPrivate", + // "publicRead", or "publicReadWrite". + PredefinedAcl string `protobuf:"bytes,6,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"` + // Apply a predefined set of default object access controls to this bucket. + // Valid values are "authenticatedRead", "bucketOwnerFullControl", + // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". + PredefinedDefaultObjectAcl string `protobuf:"bytes,7,opt,name=predefined_default_object_acl,json=predefinedDefaultObjectAcl,proto3" json:"predefined_default_object_acl,omitempty"` +} + +func (x *CreateBucketRequest) Reset() { + *x = CreateBucketRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateBucketRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateBucketRequest) ProtoMessage() {} + +func (x *CreateBucketRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateBucketRequest.ProtoReflect.Descriptor instead. +func (*CreateBucketRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{2} +} + +func (x *CreateBucketRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateBucketRequest) GetBucket() *Bucket { + if x != nil { + return x.Bucket + } + return nil +} + +func (x *CreateBucketRequest) GetBucketId() string { + if x != nil { + return x.BucketId + } + return "" +} + +func (x *CreateBucketRequest) GetPredefinedAcl() string { + if x != nil { + return x.PredefinedAcl + } + return "" +} + +func (x *CreateBucketRequest) GetPredefinedDefaultObjectAcl() string { + if x != nil { + return x.PredefinedDefaultObjectAcl + } + return "" +} + +// Request message for ListBuckets. +type ListBucketsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The project whose buckets we are listing. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Maximum number of buckets to return in a single response. The service will + // use this parameter or 1,000 items, whichever is smaller. If "acl" is + // present in the read_mask, the service will use this parameter of 200 items, + // whichever is smaller. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // A previously-returned page token representing part of the larger set of + // results to view. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // Filter results to buckets whose names begin with this prefix. + Prefix string `protobuf:"bytes,4,opt,name=prefix,proto3" json:"prefix,omitempty"` + // Mask specifying which fields to read from each result. + // If no mask is specified, will default to all fields except items.owner, + // items.acl, and items.default_object_acl. + // * may be used to mean "all fields". + ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,5,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` +} + +func (x *ListBucketsRequest) Reset() { + *x = ListBucketsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListBucketsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListBucketsRequest) ProtoMessage() {} + +func (x *ListBucketsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListBucketsRequest.ProtoReflect.Descriptor instead. +func (*ListBucketsRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{3} +} + +func (x *ListBucketsRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListBucketsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListBucketsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *ListBucketsRequest) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +func (x *ListBucketsRequest) GetReadMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.ReadMask + } + return nil +} + +// The result of a call to Buckets.ListBuckets +type ListBucketsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of items. + Buckets []*Bucket `protobuf:"bytes,1,rep,name=buckets,proto3" json:"buckets,omitempty"` + // The continuation token, used to page through large result sets. Provide + // this value in a subsequent request to return the next page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListBucketsResponse) Reset() { + *x = ListBucketsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListBucketsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListBucketsResponse) ProtoMessage() {} + +func (x *ListBucketsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListBucketsResponse.ProtoReflect.Descriptor instead. +func (*ListBucketsResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{4} +} + +func (x *ListBucketsResponse) GetBuckets() []*Bucket { + if x != nil { + return x.Buckets + } + return nil +} + +func (x *ListBucketsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// Request message for LockBucketRetentionPolicyRequest. +type LockBucketRetentionPolicyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of a bucket. + Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Required. Makes the operation conditional on whether bucket's current + // metageneration matches the given value. Must be positive. + IfMetagenerationMatch int64 `protobuf:"varint,2,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3" json:"if_metageneration_match,omitempty"` +} + +func (x *LockBucketRetentionPolicyRequest) Reset() { + *x = LockBucketRetentionPolicyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LockBucketRetentionPolicyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LockBucketRetentionPolicyRequest) ProtoMessage() {} + +func (x *LockBucketRetentionPolicyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LockBucketRetentionPolicyRequest.ProtoReflect.Descriptor instead. +func (*LockBucketRetentionPolicyRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{5} +} + +func (x *LockBucketRetentionPolicyRequest) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *LockBucketRetentionPolicyRequest) GetIfMetagenerationMatch() int64 { + if x != nil { + return x.IfMetagenerationMatch + } + return 0 +} + +// Request for UpdateBucket method. +type UpdateBucketRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The bucket to update. + // The bucket's `name` field will be used to identify the bucket. + Bucket *Bucket `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + // If set, will only modify the bucket if its metageneration matches this + // value. + IfMetagenerationMatch *int64 `protobuf:"varint,2,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // If set, will only modify the bucket if its metageneration does not match + // this value. + IfMetagenerationNotMatch *int64 `protobuf:"varint,3,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` + // Apply a predefined set of access controls to this bucket. + // Valid values are "authenticatedRead", "private", "projectPrivate", + // "publicRead", or "publicReadWrite". + PredefinedAcl string `protobuf:"bytes,8,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"` + // Apply a predefined set of default object access controls to this bucket. + // Valid values are "authenticatedRead", "bucketOwnerFullControl", + // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". + PredefinedDefaultObjectAcl string `protobuf:"bytes,9,opt,name=predefined_default_object_acl,json=predefinedDefaultObjectAcl,proto3" json:"predefined_default_object_acl,omitempty"` + // Required. List of fields to be updated. + // + // To specify ALL fields, equivalent to the JSON API's "update" function, + // specify a single field with the value `*`. Note: not recommended. If a new + // field is introduced at a later time, an older client updating with the `*` + // may accidentally reset the new field's value. + // + // Not specifying any fields is an error. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,6,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateBucketRequest) Reset() { + *x = UpdateBucketRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateBucketRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateBucketRequest) ProtoMessage() {} + +func (x *UpdateBucketRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateBucketRequest.ProtoReflect.Descriptor instead. +func (*UpdateBucketRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{6} +} + +func (x *UpdateBucketRequest) GetBucket() *Bucket { + if x != nil { + return x.Bucket + } + return nil +} + +func (x *UpdateBucketRequest) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *UpdateBucketRequest) GetIfMetagenerationNotMatch() int64 { + if x != nil && x.IfMetagenerationNotMatch != nil { + return *x.IfMetagenerationNotMatch + } + return 0 +} + +func (x *UpdateBucketRequest) GetPredefinedAcl() string { + if x != nil { + return x.PredefinedAcl + } + return "" +} + +func (x *UpdateBucketRequest) GetPredefinedDefaultObjectAcl() string { + if x != nil { + return x.PredefinedDefaultObjectAcl + } + return "" +} + +func (x *UpdateBucketRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +// Request message for DeleteNotificationConfig. +type DeleteNotificationConfigRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The parent bucket of the NotificationConfig. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *DeleteNotificationConfigRequest) Reset() { + *x = DeleteNotificationConfigRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteNotificationConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteNotificationConfigRequest) ProtoMessage() {} + +func (x *DeleteNotificationConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteNotificationConfigRequest.ProtoReflect.Descriptor instead. +func (*DeleteNotificationConfigRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{7} +} + +func (x *DeleteNotificationConfigRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Request message for GetNotificationConfig. +type GetNotificationConfigRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The parent bucket of the NotificationConfig. + // Format: + // `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetNotificationConfigRequest) Reset() { + *x = GetNotificationConfigRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetNotificationConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetNotificationConfigRequest) ProtoMessage() {} + +func (x *GetNotificationConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetNotificationConfigRequest.ProtoReflect.Descriptor instead. +func (*GetNotificationConfigRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{8} +} + +func (x *GetNotificationConfigRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Request message for CreateNotificationConfig. +type CreateNotificationConfigRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The bucket to which this NotificationConfig belongs. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Required. Properties of the NotificationConfig to be inserted. + NotificationConfig *NotificationConfig `protobuf:"bytes,2,opt,name=notification_config,json=notificationConfig,proto3" json:"notification_config,omitempty"` +} + +func (x *CreateNotificationConfigRequest) Reset() { + *x = CreateNotificationConfigRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateNotificationConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateNotificationConfigRequest) ProtoMessage() {} + +func (x *CreateNotificationConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateNotificationConfigRequest.ProtoReflect.Descriptor instead. +func (*CreateNotificationConfigRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{9} +} + +func (x *CreateNotificationConfigRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateNotificationConfigRequest) GetNotificationConfig() *NotificationConfig { + if x != nil { + return x.NotificationConfig + } + return nil +} + +// Request message for ListNotifications. +type ListNotificationConfigsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of a Google Cloud Storage bucket. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // The maximum number of NotificationConfigs to return. The service may + // return fewer than this value. The default value is 100. Specifying a value + // above 100 will result in a page_size of 100. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // A page token, received from a previous `ListNotificationConfigs` call. + // Provide this to retrieve the subsequent page. + // + // When paginating, all other parameters provided to `ListNotificationConfigs` + // must match the call that provided the page token. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListNotificationConfigsRequest) Reset() { + *x = ListNotificationConfigsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListNotificationConfigsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListNotificationConfigsRequest) ProtoMessage() {} + +func (x *ListNotificationConfigsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListNotificationConfigsRequest.ProtoReflect.Descriptor instead. +func (*ListNotificationConfigsRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{10} +} + +func (x *ListNotificationConfigsRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListNotificationConfigsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListNotificationConfigsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The result of a call to ListNotificationConfigs +type ListNotificationConfigsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of items. + NotificationConfigs []*NotificationConfig `protobuf:"bytes,1,rep,name=notification_configs,json=notificationConfigs,proto3" json:"notification_configs,omitempty"` + // A token, which can be sent as `page_token` to retrieve the next page. + // If this field is omitted, there are no subsequent pages. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListNotificationConfigsResponse) Reset() { + *x = ListNotificationConfigsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListNotificationConfigsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListNotificationConfigsResponse) ProtoMessage() {} + +func (x *ListNotificationConfigsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListNotificationConfigsResponse.ProtoReflect.Descriptor instead. +func (*ListNotificationConfigsResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{11} +} + +func (x *ListNotificationConfigsResponse) GetNotificationConfigs() []*NotificationConfig { + if x != nil { + return x.NotificationConfigs + } + return nil +} + +func (x *ListNotificationConfigsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// Request message for ComposeObject. +type ComposeObjectRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Properties of the resulting object. + Destination *Object `protobuf:"bytes,1,opt,name=destination,proto3" json:"destination,omitempty"` + // The list of source objects that will be concatenated into a single object. + SourceObjects []*ComposeObjectRequest_SourceObject `protobuf:"bytes,2,rep,name=source_objects,json=sourceObjects,proto3" json:"source_objects,omitempty"` + // Apply a predefined set of access controls to the destination object. + // Valid values are "authenticatedRead", "bucketOwnerFullControl", + // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". + DestinationPredefinedAcl string `protobuf:"bytes,9,opt,name=destination_predefined_acl,json=destinationPredefinedAcl,proto3" json:"destination_predefined_acl,omitempty"` + // Makes the operation conditional on whether the object's current generation + // matches the given value. Setting to 0 makes the operation succeed only if + // there are no live versions of the object. + IfGenerationMatch *int64 `protobuf:"varint,4,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration matches the given value. + IfMetagenerationMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // Resource name of the Cloud KMS key, of the form + // `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`, + // that will be used to encrypt the object. Overrides the object + // metadata's `kms_key_name` value, if any. + KmsKey string `protobuf:"bytes,6,opt,name=kms_key,json=kmsKey,proto3" json:"kms_key,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,7,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` + // The checksums of the complete object. This will be validated against the + // combined checksums of the component objects. + ObjectChecksums *ObjectChecksums `protobuf:"bytes,10,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` +} + +func (x *ComposeObjectRequest) Reset() { + *x = ComposeObjectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ComposeObjectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ComposeObjectRequest) ProtoMessage() {} + +func (x *ComposeObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ComposeObjectRequest.ProtoReflect.Descriptor instead. +func (*ComposeObjectRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12} +} + +func (x *ComposeObjectRequest) GetDestination() *Object { + if x != nil { + return x.Destination + } + return nil +} + +func (x *ComposeObjectRequest) GetSourceObjects() []*ComposeObjectRequest_SourceObject { + if x != nil { + return x.SourceObjects + } + return nil +} + +func (x *ComposeObjectRequest) GetDestinationPredefinedAcl() string { + if x != nil { + return x.DestinationPredefinedAcl + } + return "" +} + +func (x *ComposeObjectRequest) GetIfGenerationMatch() int64 { + if x != nil && x.IfGenerationMatch != nil { + return *x.IfGenerationMatch + } + return 0 +} + +func (x *ComposeObjectRequest) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *ComposeObjectRequest) GetKmsKey() string { + if x != nil { + return x.KmsKey + } + return "" +} + +func (x *ComposeObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +func (x *ComposeObjectRequest) GetObjectChecksums() *ObjectChecksums { + if x != nil { + return x.ObjectChecksums + } + return nil +} + +// Message for deleting an object. +// `bucket` and `object` **must** be set. +type DeleteObjectRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of the bucket in which the object resides. + Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Required. The name of the finalized object to delete. + // Note: If you want to delete an unfinalized resumable upload please use + // `CancelResumableWrite`. + Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` + // If present, permanently deletes a specific revision of this object (as + // opposed to the latest version, the default). + Generation int64 `protobuf:"varint,4,opt,name=generation,proto3" json:"generation,omitempty"` + // Makes the operation conditional on whether the object's current generation + // matches the given value. Setting to 0 makes the operation succeed only if + // there are no live versions of the object. + IfGenerationMatch *int64 `protobuf:"varint,5,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` + // Makes the operation conditional on whether the object's live generation + // does not match the given value. If no live object exists, the precondition + // fails. Setting to 0 makes the operation succeed only if there is a live + // version of the object. + IfGenerationNotMatch *int64 `protobuf:"varint,6,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration matches the given value. + IfMetagenerationMatch *int64 `protobuf:"varint,7,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration does not match the given value. + IfMetagenerationNotMatch *int64 `protobuf:"varint,8,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,10,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` +} + +func (x *DeleteObjectRequest) Reset() { + *x = DeleteObjectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteObjectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteObjectRequest) ProtoMessage() {} + +func (x *DeleteObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteObjectRequest.ProtoReflect.Descriptor instead. +func (*DeleteObjectRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{13} +} + +func (x *DeleteObjectRequest) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *DeleteObjectRequest) GetObject() string { + if x != nil { + return x.Object + } + return "" +} + +func (x *DeleteObjectRequest) GetGeneration() int64 { + if x != nil { + return x.Generation + } + return 0 +} + +func (x *DeleteObjectRequest) GetIfGenerationMatch() int64 { + if x != nil && x.IfGenerationMatch != nil { + return *x.IfGenerationMatch + } + return 0 +} + +func (x *DeleteObjectRequest) GetIfGenerationNotMatch() int64 { + if x != nil && x.IfGenerationNotMatch != nil { + return *x.IfGenerationNotMatch + } + return 0 +} + +func (x *DeleteObjectRequest) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *DeleteObjectRequest) GetIfMetagenerationNotMatch() int64 { + if x != nil && x.IfMetagenerationNotMatch != nil { + return *x.IfMetagenerationNotMatch + } + return 0 +} + +func (x *DeleteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +// Message for restoring an object. +// `bucket`, `object`, and `generation` **must** be set. +type RestoreObjectRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of the bucket in which the object resides. + Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Required. The name of the object to restore. + Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` + // Required. The specific revision of the object to restore. + Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"` + // Makes the operation conditional on whether the object's current generation + // matches the given value. Setting to 0 makes the operation succeed only if + // there are no live versions of the object. + IfGenerationMatch *int64 `protobuf:"varint,4,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` + // Makes the operation conditional on whether the object's live generation + // does not match the given value. If no live object exists, the precondition + // fails. Setting to 0 makes the operation succeed only if there is a live + // version of the object. + IfGenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration matches the given value. + IfMetagenerationMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration does not match the given value. + IfMetagenerationNotMatch *int64 `protobuf:"varint,7,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` + // If false or unset, the bucket's default object ACL will be used. + // If true, copy the source object's access controls. + // Return an error if bucket has UBLA enabled. + CopySourceAcl *bool `protobuf:"varint,9,opt,name=copy_source_acl,json=copySourceAcl,proto3,oneof" json:"copy_source_acl,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` +} + +func (x *RestoreObjectRequest) Reset() { + *x = RestoreObjectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RestoreObjectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RestoreObjectRequest) ProtoMessage() {} + +func (x *RestoreObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RestoreObjectRequest.ProtoReflect.Descriptor instead. +func (*RestoreObjectRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{14} +} + +func (x *RestoreObjectRequest) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *RestoreObjectRequest) GetObject() string { + if x != nil { + return x.Object + } + return "" +} + +func (x *RestoreObjectRequest) GetGeneration() int64 { + if x != nil { + return x.Generation + } + return 0 +} + +func (x *RestoreObjectRequest) GetIfGenerationMatch() int64 { + if x != nil && x.IfGenerationMatch != nil { + return *x.IfGenerationMatch + } + return 0 +} + +func (x *RestoreObjectRequest) GetIfGenerationNotMatch() int64 { + if x != nil && x.IfGenerationNotMatch != nil { + return *x.IfGenerationNotMatch + } + return 0 +} + +func (x *RestoreObjectRequest) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *RestoreObjectRequest) GetIfMetagenerationNotMatch() int64 { + if x != nil && x.IfMetagenerationNotMatch != nil { + return *x.IfMetagenerationNotMatch + } + return 0 +} + +func (x *RestoreObjectRequest) GetCopySourceAcl() bool { + if x != nil && x.CopySourceAcl != nil { + return *x.CopySourceAcl + } + return false +} + +func (x *RestoreObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +// Message for canceling an in-progress resumable upload. +// `upload_id` **must** be set. +type CancelResumableWriteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The upload_id of the resumable upload to cancel. This should be + // copied from the `upload_id` field of `StartResumableWriteResponse`. + UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"` +} + +func (x *CancelResumableWriteRequest) Reset() { + *x = CancelResumableWriteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CancelResumableWriteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CancelResumableWriteRequest) ProtoMessage() {} + +func (x *CancelResumableWriteRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CancelResumableWriteRequest.ProtoReflect.Descriptor instead. +func (*CancelResumableWriteRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{15} +} + +func (x *CancelResumableWriteRequest) GetUploadId() string { + if x != nil { + return x.UploadId + } + return "" +} + +// Empty response message for canceling an in-progress resumable upload, will be +// extended as needed. +type CancelResumableWriteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CancelResumableWriteResponse) Reset() { + *x = CancelResumableWriteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CancelResumableWriteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CancelResumableWriteResponse) ProtoMessage() {} + +func (x *CancelResumableWriteResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CancelResumableWriteResponse.ProtoReflect.Descriptor instead. +func (*CancelResumableWriteResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{16} +} + +// Request message for ReadObject. +type ReadObjectRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the bucket containing the object to read. + Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Required. The name of the object to read. + Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` + // If present, selects a specific revision of this object (as opposed + // to the latest version, the default). + Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"` + // The offset for the first byte to return in the read, relative to the start + // of the object. + // + // A negative `read_offset` value will be interpreted as the number of bytes + // back from the end of the object to be returned. For example, if an object's + // length is 15 bytes, a ReadObjectRequest with `read_offset` = -5 and + // `read_limit` = 3 would return bytes 10 through 12 of the object. Requesting + // a negative offset with magnitude larger than the size of the object will + // return the entire object. + ReadOffset int64 `protobuf:"varint,4,opt,name=read_offset,json=readOffset,proto3" json:"read_offset,omitempty"` + // The maximum number of `data` bytes the server is allowed to return in the + // sum of all `Object` messages. A `read_limit` of zero indicates that there + // is no limit, and a negative `read_limit` will cause an error. + // + // If the stream returns fewer bytes than allowed by the `read_limit` and no + // error occurred, the stream includes all data from the `read_offset` to the + // end of the resource. + ReadLimit int64 `protobuf:"varint,5,opt,name=read_limit,json=readLimit,proto3" json:"read_limit,omitempty"` + // Makes the operation conditional on whether the object's current generation + // matches the given value. Setting to 0 makes the operation succeed only if + // there are no live versions of the object. + IfGenerationMatch *int64 `protobuf:"varint,6,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` + // Makes the operation conditional on whether the object's live generation + // does not match the given value. If no live object exists, the precondition + // fails. Setting to 0 makes the operation succeed only if there is a live + // version of the object. + IfGenerationNotMatch *int64 `protobuf:"varint,7,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration matches the given value. + IfMetagenerationMatch *int64 `protobuf:"varint,8,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration does not match the given value. + IfMetagenerationNotMatch *int64 `protobuf:"varint,9,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,10,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` + // Mask specifying which fields to read. + // The checksummed_data field and its children will always be present. + // If no mask is specified, will default to all fields except metadata.owner + // and metadata.acl. + // * may be used to mean "all fields". + ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,12,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` +} + +func (x *ReadObjectRequest) Reset() { + *x = ReadObjectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadObjectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadObjectRequest) ProtoMessage() {} + +func (x *ReadObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadObjectRequest.ProtoReflect.Descriptor instead. +func (*ReadObjectRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{17} +} + +func (x *ReadObjectRequest) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *ReadObjectRequest) GetObject() string { + if x != nil { + return x.Object + } + return "" +} + +func (x *ReadObjectRequest) GetGeneration() int64 { + if x != nil { + return x.Generation + } + return 0 +} + +func (x *ReadObjectRequest) GetReadOffset() int64 { + if x != nil { + return x.ReadOffset + } + return 0 +} + +func (x *ReadObjectRequest) GetReadLimit() int64 { + if x != nil { + return x.ReadLimit + } + return 0 +} + +func (x *ReadObjectRequest) GetIfGenerationMatch() int64 { + if x != nil && x.IfGenerationMatch != nil { + return *x.IfGenerationMatch + } + return 0 +} + +func (x *ReadObjectRequest) GetIfGenerationNotMatch() int64 { + if x != nil && x.IfGenerationNotMatch != nil { + return *x.IfGenerationNotMatch + } + return 0 +} + +func (x *ReadObjectRequest) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *ReadObjectRequest) GetIfMetagenerationNotMatch() int64 { + if x != nil && x.IfMetagenerationNotMatch != nil { + return *x.IfMetagenerationNotMatch + } + return 0 +} + +func (x *ReadObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +func (x *ReadObjectRequest) GetReadMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.ReadMask + } + return nil +} + +// Request message for GetObject. +type GetObjectRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of the bucket in which the object resides. + Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Required. Name of the object. + Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` + // If present, selects a specific revision of this object (as opposed to the + // latest version, the default). + Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"` + // If true, return the soft-deleted version of this object. + SoftDeleted *bool `protobuf:"varint,11,opt,name=soft_deleted,json=softDeleted,proto3,oneof" json:"soft_deleted,omitempty"` + // Makes the operation conditional on whether the object's current generation + // matches the given value. Setting to 0 makes the operation succeed only if + // there are no live versions of the object. + IfGenerationMatch *int64 `protobuf:"varint,4,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` + // Makes the operation conditional on whether the object's live generation + // does not match the given value. If no live object exists, the precondition + // fails. Setting to 0 makes the operation succeed only if there is a live + // version of the object. + IfGenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration matches the given value. + IfMetagenerationMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration does not match the given value. + IfMetagenerationNotMatch *int64 `protobuf:"varint,7,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` + // Mask specifying which fields to read. + // If no mask is specified, will default to all fields except metadata.acl and + // metadata.owner. + // * may be used to mean "all fields". + ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,10,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` +} + +func (x *GetObjectRequest) Reset() { + *x = GetObjectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetObjectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetObjectRequest) ProtoMessage() {} + +func (x *GetObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetObjectRequest.ProtoReflect.Descriptor instead. +func (*GetObjectRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{18} +} + +func (x *GetObjectRequest) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *GetObjectRequest) GetObject() string { + if x != nil { + return x.Object + } + return "" +} + +func (x *GetObjectRequest) GetGeneration() int64 { + if x != nil { + return x.Generation + } + return 0 +} + +func (x *GetObjectRequest) GetSoftDeleted() bool { + if x != nil && x.SoftDeleted != nil { + return *x.SoftDeleted + } + return false +} + +func (x *GetObjectRequest) GetIfGenerationMatch() int64 { + if x != nil && x.IfGenerationMatch != nil { + return *x.IfGenerationMatch + } + return 0 +} + +func (x *GetObjectRequest) GetIfGenerationNotMatch() int64 { + if x != nil && x.IfGenerationNotMatch != nil { + return *x.IfGenerationNotMatch + } + return 0 +} + +func (x *GetObjectRequest) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *GetObjectRequest) GetIfMetagenerationNotMatch() int64 { + if x != nil && x.IfMetagenerationNotMatch != nil { + return *x.IfMetagenerationNotMatch + } + return 0 +} + +func (x *GetObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +func (x *GetObjectRequest) GetReadMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.ReadMask + } + return nil +} + +// Response message for ReadObject. +type ReadObjectResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A portion of the data for the object. The service **may** leave `data` + // empty for any given `ReadResponse`. This enables the service to inform the + // client that the request is still live while it is running an operation to + // generate more data. + ChecksummedData *ChecksummedData `protobuf:"bytes,1,opt,name=checksummed_data,json=checksummedData,proto3" json:"checksummed_data,omitempty"` + // The checksums of the complete object. If the object is downloaded in full, + // the client should compute one of these checksums over the downloaded object + // and compare it against the value provided here. + ObjectChecksums *ObjectChecksums `protobuf:"bytes,2,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` + // If read_offset and or read_limit was specified on the + // ReadObjectRequest, ContentRange will be populated on the first + // ReadObjectResponse message of the read stream. + ContentRange *ContentRange `protobuf:"bytes,3,opt,name=content_range,json=contentRange,proto3" json:"content_range,omitempty"` + // Metadata of the object whose media is being returned. + // Only populated in the first response in the stream. + Metadata *Object `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *ReadObjectResponse) Reset() { + *x = ReadObjectResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadObjectResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadObjectResponse) ProtoMessage() {} + +func (x *ReadObjectResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadObjectResponse.ProtoReflect.Descriptor instead. +func (*ReadObjectResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{19} +} + +func (x *ReadObjectResponse) GetChecksummedData() *ChecksummedData { + if x != nil { + return x.ChecksummedData + } + return nil +} + +func (x *ReadObjectResponse) GetObjectChecksums() *ObjectChecksums { + if x != nil { + return x.ObjectChecksums + } + return nil +} + +func (x *ReadObjectResponse) GetContentRange() *ContentRange { + if x != nil { + return x.ContentRange + } + return nil +} + +func (x *ReadObjectResponse) GetMetadata() *Object { + if x != nil { + return x.Metadata + } + return nil +} + +// Describes an attempt to insert an object, possibly over multiple requests. +type WriteObjectSpec struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Destination object, including its name and its metadata. + Resource *Object `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + // Apply a predefined set of access controls to this object. + // Valid values are "authenticatedRead", "bucketOwnerFullControl", + // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". + PredefinedAcl string `protobuf:"bytes,7,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"` + // Makes the operation conditional on whether the object's current + // generation matches the given value. Setting to 0 makes the operation + // succeed only if there are no live versions of the object. + IfGenerationMatch *int64 `protobuf:"varint,3,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` + // Makes the operation conditional on whether the object's live + // generation does not match the given value. If no live object exists, the + // precondition fails. Setting to 0 makes the operation succeed only if + // there is a live version of the object. + IfGenerationNotMatch *int64 `protobuf:"varint,4,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration matches the given value. + IfMetagenerationMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration does not match the given value. + IfMetagenerationNotMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` + // The expected final object size being uploaded. + // If this value is set, closing the stream after writing fewer or more than + // `object_size` bytes will result in an OUT_OF_RANGE error. + // + // This situation is considered a client error, and if such an error occurs + // you must start the upload over from scratch, this time sending the correct + // number of bytes. + ObjectSize *int64 `protobuf:"varint,8,opt,name=object_size,json=objectSize,proto3,oneof" json:"object_size,omitempty"` +} + +func (x *WriteObjectSpec) Reset() { + *x = WriteObjectSpec{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WriteObjectSpec) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WriteObjectSpec) ProtoMessage() {} + +func (x *WriteObjectSpec) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WriteObjectSpec.ProtoReflect.Descriptor instead. +func (*WriteObjectSpec) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{20} +} + +func (x *WriteObjectSpec) GetResource() *Object { + if x != nil { + return x.Resource + } + return nil +} + +func (x *WriteObjectSpec) GetPredefinedAcl() string { + if x != nil { + return x.PredefinedAcl + } + return "" +} + +func (x *WriteObjectSpec) GetIfGenerationMatch() int64 { + if x != nil && x.IfGenerationMatch != nil { + return *x.IfGenerationMatch + } + return 0 +} + +func (x *WriteObjectSpec) GetIfGenerationNotMatch() int64 { + if x != nil && x.IfGenerationNotMatch != nil { + return *x.IfGenerationNotMatch + } + return 0 +} + +func (x *WriteObjectSpec) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *WriteObjectSpec) GetIfMetagenerationNotMatch() int64 { + if x != nil && x.IfMetagenerationNotMatch != nil { + return *x.IfMetagenerationNotMatch + } + return 0 +} + +func (x *WriteObjectSpec) GetObjectSize() int64 { + if x != nil && x.ObjectSize != nil { + return *x.ObjectSize + } + return 0 +} + +// Request message for WriteObject. +type WriteObjectRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The first message of each stream should set one of the following. + // + // Types that are assignable to FirstMessage: + // + // *WriteObjectRequest_UploadId + // *WriteObjectRequest_WriteObjectSpec + FirstMessage isWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"` + // Required. The offset from the beginning of the object at which the data + // should be written. + // + // In the first `WriteObjectRequest` of a `WriteObject()` action, it + // indicates the initial offset for the `Write()` call. The value **must** be + // equal to the `persisted_size` that a call to `QueryWriteStatus()` would + // return (0 if this is the first write to the object). + // + // On subsequent calls, this value **must** be no larger than the sum of the + // first `write_offset` and the sizes of all `data` chunks sent previously on + // this stream. + // + // An incorrect value will cause an error. + WriteOffset int64 `protobuf:"varint,3,opt,name=write_offset,json=writeOffset,proto3" json:"write_offset,omitempty"` + // A portion of the data for the object. + // + // Types that are assignable to Data: + // + // *WriteObjectRequest_ChecksummedData + Data isWriteObjectRequest_Data `protobuf_oneof:"data"` + // Checksums for the complete object. If the checksums computed by the service + // don't match the specified checksums the call will fail. May only be + // provided in the first or last request (either with first_message, or + // finish_write set). + ObjectChecksums *ObjectChecksums `protobuf:"bytes,6,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` + // If `true`, this indicates that the write is complete. Sending any + // `WriteObjectRequest`s subsequent to one in which `finish_write` is `true` + // will cause an error. + // For a non-resumable write (where the upload_id was not set in the first + // message), it is an error not to set this field in the final message of the + // stream. + FinishWrite bool `protobuf:"varint,7,opt,name=finish_write,json=finishWrite,proto3" json:"finish_write,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` +} + +func (x *WriteObjectRequest) Reset() { + *x = WriteObjectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WriteObjectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WriteObjectRequest) ProtoMessage() {} + +func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WriteObjectRequest.ProtoReflect.Descriptor instead. +func (*WriteObjectRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{21} +} + +func (m *WriteObjectRequest) GetFirstMessage() isWriteObjectRequest_FirstMessage { + if m != nil { + return m.FirstMessage + } + return nil +} + +func (x *WriteObjectRequest) GetUploadId() string { + if x, ok := x.GetFirstMessage().(*WriteObjectRequest_UploadId); ok { + return x.UploadId + } + return "" +} + +func (x *WriteObjectRequest) GetWriteObjectSpec() *WriteObjectSpec { + if x, ok := x.GetFirstMessage().(*WriteObjectRequest_WriteObjectSpec); ok { + return x.WriteObjectSpec + } + return nil +} + +func (x *WriteObjectRequest) GetWriteOffset() int64 { + if x != nil { + return x.WriteOffset + } + return 0 +} + +func (m *WriteObjectRequest) GetData() isWriteObjectRequest_Data { + if m != nil { + return m.Data + } + return nil +} + +func (x *WriteObjectRequest) GetChecksummedData() *ChecksummedData { + if x, ok := x.GetData().(*WriteObjectRequest_ChecksummedData); ok { + return x.ChecksummedData + } + return nil +} + +func (x *WriteObjectRequest) GetObjectChecksums() *ObjectChecksums { + if x != nil { + return x.ObjectChecksums + } + return nil +} + +func (x *WriteObjectRequest) GetFinishWrite() bool { + if x != nil { + return x.FinishWrite + } + return false +} + +func (x *WriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +type isWriteObjectRequest_FirstMessage interface { + isWriteObjectRequest_FirstMessage() +} + +type WriteObjectRequest_UploadId struct { + // For resumable uploads. This should be the `upload_id` returned from a + // call to `StartResumableWriteResponse`. + UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3,oneof"` +} + +type WriteObjectRequest_WriteObjectSpec struct { + // For non-resumable uploads. Describes the overall upload, including the + // destination bucket and object name, preconditions, etc. + WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,2,opt,name=write_object_spec,json=writeObjectSpec,proto3,oneof"` +} + +func (*WriteObjectRequest_UploadId) isWriteObjectRequest_FirstMessage() {} + +func (*WriteObjectRequest_WriteObjectSpec) isWriteObjectRequest_FirstMessage() {} + +type isWriteObjectRequest_Data interface { + isWriteObjectRequest_Data() +} + +type WriteObjectRequest_ChecksummedData struct { + // The data to insert. If a crc32c checksum is provided that doesn't match + // the checksum computed by the service, the request will fail. + ChecksummedData *ChecksummedData `protobuf:"bytes,4,opt,name=checksummed_data,json=checksummedData,proto3,oneof"` +} + +func (*WriteObjectRequest_ChecksummedData) isWriteObjectRequest_Data() {} + +// Response message for WriteObject. +type WriteObjectResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The response will set one of the following. + // + // Types that are assignable to WriteStatus: + // + // *WriteObjectResponse_PersistedSize + // *WriteObjectResponse_Resource + WriteStatus isWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"` +} + +func (x *WriteObjectResponse) Reset() { + *x = WriteObjectResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WriteObjectResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WriteObjectResponse) ProtoMessage() {} + +func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WriteObjectResponse.ProtoReflect.Descriptor instead. +func (*WriteObjectResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{22} +} + +func (m *WriteObjectResponse) GetWriteStatus() isWriteObjectResponse_WriteStatus { + if m != nil { + return m.WriteStatus + } + return nil +} + +func (x *WriteObjectResponse) GetPersistedSize() int64 { + if x, ok := x.GetWriteStatus().(*WriteObjectResponse_PersistedSize); ok { + return x.PersistedSize + } + return 0 +} + +func (x *WriteObjectResponse) GetResource() *Object { + if x, ok := x.GetWriteStatus().(*WriteObjectResponse_Resource); ok { + return x.Resource + } + return nil +} + +type isWriteObjectResponse_WriteStatus interface { + isWriteObjectResponse_WriteStatus() +} + +type WriteObjectResponse_PersistedSize struct { + // The total number of bytes that have been processed for the given object + // from all `WriteObject` calls. Only set if the upload has not finalized. + PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"` +} + +type WriteObjectResponse_Resource struct { + // A resource containing the metadata for the uploaded object. Only set if + // the upload has finalized. + Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"` +} + +func (*WriteObjectResponse_PersistedSize) isWriteObjectResponse_WriteStatus() {} + +func (*WriteObjectResponse_Resource) isWriteObjectResponse_WriteStatus() {} + +// Request message for BidiWriteObject. +type BidiWriteObjectRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The first message of each stream should set one of the following. + // + // Types that are assignable to FirstMessage: + // + // *BidiWriteObjectRequest_UploadId + // *BidiWriteObjectRequest_WriteObjectSpec + FirstMessage isBidiWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"` + // Required. The offset from the beginning of the object at which the data + // should be written. + // + // In the first `WriteObjectRequest` of a `WriteObject()` action, it + // indicates the initial offset for the `Write()` call. The value **must** be + // equal to the `persisted_size` that a call to `QueryWriteStatus()` would + // return (0 if this is the first write to the object). + // + // On subsequent calls, this value **must** be no larger than the sum of the + // first `write_offset` and the sizes of all `data` chunks sent previously on + // this stream. + // + // An invalid value will cause an error. + WriteOffset int64 `protobuf:"varint,3,opt,name=write_offset,json=writeOffset,proto3" json:"write_offset,omitempty"` + // A portion of the data for the object. + // + // Types that are assignable to Data: + // + // *BidiWriteObjectRequest_ChecksummedData + Data isBidiWriteObjectRequest_Data `protobuf_oneof:"data"` + // Checksums for the complete object. If the checksums computed by the service + // don't match the specified checksums the call will fail. May only be + // provided in the first or last request (either with first_message, or + // finish_write set). + ObjectChecksums *ObjectChecksums `protobuf:"bytes,6,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` + // For each BidiWriteObjectRequest where state_lookup is `true` or the client + // closes the stream, the service will send a BidiWriteObjectResponse + // containing the current persisted size. The persisted size sent in responses + // covers all the bytes the server has persisted thus far and can be used to + // decide what data is safe for the client to drop. Note that the object's + // current size reported by the BidiWriteObjectResponse may lag behind the + // number of bytes written by the client. This field is ignored if + // `finish_write` is set to true. + StateLookup bool `protobuf:"varint,7,opt,name=state_lookup,json=stateLookup,proto3" json:"state_lookup,omitempty"` + // Persists data written on the stream, up to and including the current + // message, to permanent storage. This option should be used sparingly as it + // may reduce performance. Ongoing writes will periodically be persisted on + // the server even when `flush` is not set. This field is ignored if + // `finish_write` is set to true since there's no need to checkpoint or flush + // if this message completes the write. + Flush bool `protobuf:"varint,8,opt,name=flush,proto3" json:"flush,omitempty"` + // If `true`, this indicates that the write is complete. Sending any + // `WriteObjectRequest`s subsequent to one in which `finish_write` is `true` + // will cause an error. + // For a non-resumable write (where the upload_id was not set in the first + // message), it is an error not to set this field in the final message of the + // stream. + FinishWrite bool `protobuf:"varint,9,opt,name=finish_write,json=finishWrite,proto3" json:"finish_write,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,10,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` +} + +func (x *BidiWriteObjectRequest) Reset() { + *x = BidiWriteObjectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BidiWriteObjectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BidiWriteObjectRequest) ProtoMessage() {} + +func (x *BidiWriteObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BidiWriteObjectRequest.ProtoReflect.Descriptor instead. +func (*BidiWriteObjectRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{23} +} + +func (m *BidiWriteObjectRequest) GetFirstMessage() isBidiWriteObjectRequest_FirstMessage { + if m != nil { + return m.FirstMessage + } + return nil +} + +func (x *BidiWriteObjectRequest) GetUploadId() string { + if x, ok := x.GetFirstMessage().(*BidiWriteObjectRequest_UploadId); ok { + return x.UploadId + } + return "" +} + +func (x *BidiWriteObjectRequest) GetWriteObjectSpec() *WriteObjectSpec { + if x, ok := x.GetFirstMessage().(*BidiWriteObjectRequest_WriteObjectSpec); ok { + return x.WriteObjectSpec + } + return nil +} + +func (x *BidiWriteObjectRequest) GetWriteOffset() int64 { + if x != nil { + return x.WriteOffset + } + return 0 +} + +func (m *BidiWriteObjectRequest) GetData() isBidiWriteObjectRequest_Data { + if m != nil { + return m.Data + } + return nil +} + +func (x *BidiWriteObjectRequest) GetChecksummedData() *ChecksummedData { + if x, ok := x.GetData().(*BidiWriteObjectRequest_ChecksummedData); ok { + return x.ChecksummedData + } + return nil +} + +func (x *BidiWriteObjectRequest) GetObjectChecksums() *ObjectChecksums { + if x != nil { + return x.ObjectChecksums + } + return nil +} + +func (x *BidiWriteObjectRequest) GetStateLookup() bool { + if x != nil { + return x.StateLookup + } + return false +} + +func (x *BidiWriteObjectRequest) GetFlush() bool { + if x != nil { + return x.Flush + } + return false +} + +func (x *BidiWriteObjectRequest) GetFinishWrite() bool { + if x != nil { + return x.FinishWrite + } + return false +} + +func (x *BidiWriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +type isBidiWriteObjectRequest_FirstMessage interface { + isBidiWriteObjectRequest_FirstMessage() +} + +type BidiWriteObjectRequest_UploadId struct { + // For resumable uploads. This should be the `upload_id` returned from a + // call to `StartResumableWriteResponse`. + UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3,oneof"` +} + +type BidiWriteObjectRequest_WriteObjectSpec struct { + // For non-resumable uploads. Describes the overall upload, including the + // destination bucket and object name, preconditions, etc. + WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,2,opt,name=write_object_spec,json=writeObjectSpec,proto3,oneof"` +} + +func (*BidiWriteObjectRequest_UploadId) isBidiWriteObjectRequest_FirstMessage() {} + +func (*BidiWriteObjectRequest_WriteObjectSpec) isBidiWriteObjectRequest_FirstMessage() {} + +type isBidiWriteObjectRequest_Data interface { + isBidiWriteObjectRequest_Data() +} + +type BidiWriteObjectRequest_ChecksummedData struct { + // The data to insert. If a crc32c checksum is provided that doesn't match + // the checksum computed by the service, the request will fail. + ChecksummedData *ChecksummedData `protobuf:"bytes,4,opt,name=checksummed_data,json=checksummedData,proto3,oneof"` +} + +func (*BidiWriteObjectRequest_ChecksummedData) isBidiWriteObjectRequest_Data() {} + +// Response message for BidiWriteObject. +type BidiWriteObjectResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The response will set one of the following. + // + // Types that are assignable to WriteStatus: + // + // *BidiWriteObjectResponse_PersistedSize + // *BidiWriteObjectResponse_Resource + WriteStatus isBidiWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"` +} + +func (x *BidiWriteObjectResponse) Reset() { + *x = BidiWriteObjectResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BidiWriteObjectResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BidiWriteObjectResponse) ProtoMessage() {} + +func (x *BidiWriteObjectResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BidiWriteObjectResponse.ProtoReflect.Descriptor instead. +func (*BidiWriteObjectResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{24} +} + +func (m *BidiWriteObjectResponse) GetWriteStatus() isBidiWriteObjectResponse_WriteStatus { + if m != nil { + return m.WriteStatus + } + return nil +} + +func (x *BidiWriteObjectResponse) GetPersistedSize() int64 { + if x, ok := x.GetWriteStatus().(*BidiWriteObjectResponse_PersistedSize); ok { + return x.PersistedSize + } + return 0 +} + +func (x *BidiWriteObjectResponse) GetResource() *Object { + if x, ok := x.GetWriteStatus().(*BidiWriteObjectResponse_Resource); ok { + return x.Resource + } + return nil +} + +type isBidiWriteObjectResponse_WriteStatus interface { + isBidiWriteObjectResponse_WriteStatus() +} + +type BidiWriteObjectResponse_PersistedSize struct { + // The total number of bytes that have been processed for the given object + // from all `WriteObject` calls. Only set if the upload has not finalized. + PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"` +} + +type BidiWriteObjectResponse_Resource struct { + // A resource containing the metadata for the uploaded object. Only set if + // the upload has finalized. + Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"` +} + +func (*BidiWriteObjectResponse_PersistedSize) isBidiWriteObjectResponse_WriteStatus() {} + +func (*BidiWriteObjectResponse_Resource) isBidiWriteObjectResponse_WriteStatus() {} + +// Request message for ListObjects. +type ListObjectsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of the bucket in which to look for objects. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Maximum number of `items` plus `prefixes` to return + // in a single page of responses. As duplicate `prefixes` are + // omitted, fewer total results may be returned than requested. The service + // will use this parameter or 1,000 items, whichever is smaller. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // A previously-returned page token representing part of the larger set of + // results to view. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // If set, returns results in a directory-like mode. `items` will contain + // only objects whose names, aside from the `prefix`, do not + // contain `delimiter`. Objects whose names, aside from the + // `prefix`, contain `delimiter` will have their name, + // truncated after the `delimiter`, returned in + // `prefixes`. Duplicate `prefixes` are omitted. + Delimiter string `protobuf:"bytes,4,opt,name=delimiter,proto3" json:"delimiter,omitempty"` + // If true, objects that end in exactly one instance of `delimiter` + // will have their metadata included in `items` in addition to + // `prefixes`. + IncludeTrailingDelimiter bool `protobuf:"varint,5,opt,name=include_trailing_delimiter,json=includeTrailingDelimiter,proto3" json:"include_trailing_delimiter,omitempty"` + // Filter results to objects whose names begin with this prefix. + Prefix string `protobuf:"bytes,6,opt,name=prefix,proto3" json:"prefix,omitempty"` + // If `true`, lists all versions of an object as distinct results. + // For more information, see + // [Object + // Versioning](https://cloud.google.com/storage/docs/object-versioning). + Versions bool `protobuf:"varint,7,opt,name=versions,proto3" json:"versions,omitempty"` + // Mask specifying which fields to read from each result. + // If no mask is specified, will default to all fields except items.acl and + // items.owner. + // * may be used to mean "all fields". + ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,8,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` + // Optional. Filter results to objects whose names are lexicographically equal + // to or after lexicographic_start. If lexicographic_end is also set, the + // objects listed have names between lexicographic_start (inclusive) and + // lexicographic_end (exclusive). + LexicographicStart string `protobuf:"bytes,10,opt,name=lexicographic_start,json=lexicographicStart,proto3" json:"lexicographic_start,omitempty"` + // Optional. Filter results to objects whose names are lexicographically + // before lexicographic_end. If lexicographic_start is also set, the objects + // listed have names between lexicographic_start (inclusive) and + // lexicographic_end (exclusive). + LexicographicEnd string `protobuf:"bytes,11,opt,name=lexicographic_end,json=lexicographicEnd,proto3" json:"lexicographic_end,omitempty"` + // Optional. If true, only list all soft-deleted versions of the object. + // Soft delete policy is required to set this option. + SoftDeleted bool `protobuf:"varint,12,opt,name=soft_deleted,json=softDeleted,proto3" json:"soft_deleted,omitempty"` + // Optional. If true, will also include folders and managed folders (besides + // objects) in the returned `prefixes`. Requires `delimiter` to be set to '/'. + IncludeFoldersAsPrefixes bool `protobuf:"varint,13,opt,name=include_folders_as_prefixes,json=includeFoldersAsPrefixes,proto3" json:"include_folders_as_prefixes,omitempty"` + // Optional. Filter results to objects and prefixes that match this glob + // pattern. See [List Objects Using + // Glob](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob) + // for the full syntax. + MatchGlob string `protobuf:"bytes,14,opt,name=match_glob,json=matchGlob,proto3" json:"match_glob,omitempty"` +} + +func (x *ListObjectsRequest) Reset() { + *x = ListObjectsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListObjectsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListObjectsRequest) ProtoMessage() {} + +func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListObjectsRequest.ProtoReflect.Descriptor instead. +func (*ListObjectsRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{25} +} + +func (x *ListObjectsRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *ListObjectsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListObjectsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *ListObjectsRequest) GetDelimiter() string { + if x != nil { + return x.Delimiter + } + return "" +} + +func (x *ListObjectsRequest) GetIncludeTrailingDelimiter() bool { + if x != nil { + return x.IncludeTrailingDelimiter + } + return false +} + +func (x *ListObjectsRequest) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +func (x *ListObjectsRequest) GetVersions() bool { + if x != nil { + return x.Versions + } + return false +} + +func (x *ListObjectsRequest) GetReadMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.ReadMask + } + return nil +} + +func (x *ListObjectsRequest) GetLexicographicStart() string { + if x != nil { + return x.LexicographicStart + } + return "" +} + +func (x *ListObjectsRequest) GetLexicographicEnd() string { + if x != nil { + return x.LexicographicEnd + } + return "" +} + +func (x *ListObjectsRequest) GetSoftDeleted() bool { + if x != nil { + return x.SoftDeleted + } + return false +} + +func (x *ListObjectsRequest) GetIncludeFoldersAsPrefixes() bool { + if x != nil { + return x.IncludeFoldersAsPrefixes + } + return false +} + +func (x *ListObjectsRequest) GetMatchGlob() string { + if x != nil { + return x.MatchGlob + } + return "" +} + +// Request object for `QueryWriteStatus`. +type QueryWriteStatusRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the resume token for the object whose write status is + // being requested. + UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,2,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` +} + +func (x *QueryWriteStatusRequest) Reset() { + *x = QueryWriteStatusRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryWriteStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryWriteStatusRequest) ProtoMessage() {} + +func (x *QueryWriteStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryWriteStatusRequest.ProtoReflect.Descriptor instead. +func (*QueryWriteStatusRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{26} +} + +func (x *QueryWriteStatusRequest) GetUploadId() string { + if x != nil { + return x.UploadId + } + return "" +} + +func (x *QueryWriteStatusRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +// Response object for `QueryWriteStatus`. +type QueryWriteStatusResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The response will set one of the following. + // + // Types that are assignable to WriteStatus: + // + // *QueryWriteStatusResponse_PersistedSize + // *QueryWriteStatusResponse_Resource + WriteStatus isQueryWriteStatusResponse_WriteStatus `protobuf_oneof:"write_status"` +} + +func (x *QueryWriteStatusResponse) Reset() { + *x = QueryWriteStatusResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryWriteStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryWriteStatusResponse) ProtoMessage() {} + +func (x *QueryWriteStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryWriteStatusResponse.ProtoReflect.Descriptor instead. +func (*QueryWriteStatusResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{27} +} + +func (m *QueryWriteStatusResponse) GetWriteStatus() isQueryWriteStatusResponse_WriteStatus { + if m != nil { + return m.WriteStatus + } + return nil +} + +func (x *QueryWriteStatusResponse) GetPersistedSize() int64 { + if x, ok := x.GetWriteStatus().(*QueryWriteStatusResponse_PersistedSize); ok { + return x.PersistedSize + } + return 0 +} + +func (x *QueryWriteStatusResponse) GetResource() *Object { + if x, ok := x.GetWriteStatus().(*QueryWriteStatusResponse_Resource); ok { + return x.Resource + } + return nil +} + +type isQueryWriteStatusResponse_WriteStatus interface { + isQueryWriteStatusResponse_WriteStatus() +} + +type QueryWriteStatusResponse_PersistedSize struct { + // The total number of bytes that have been processed for the given object + // from all `WriteObject` calls. This is the correct value for the + // 'write_offset' field to use when resuming the `WriteObject` operation. + // Only set if the upload has not finalized. + PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"` +} + +type QueryWriteStatusResponse_Resource struct { + // A resource containing the metadata for the uploaded object. Only set if + // the upload has finalized. + Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"` +} + +func (*QueryWriteStatusResponse_PersistedSize) isQueryWriteStatusResponse_WriteStatus() {} + +func (*QueryWriteStatusResponse_Resource) isQueryWriteStatusResponse_WriteStatus() {} + +// Request message for RewriteObject. +// If the source object is encrypted using a Customer-Supplied Encryption Key +// the key information must be provided in the copy_source_encryption_algorithm, +// copy_source_encryption_key_bytes, and copy_source_encryption_key_sha256_bytes +// fields. If the destination object should be encrypted the keying information +// should be provided in the encryption_algorithm, encryption_key_bytes, and +// encryption_key_sha256_bytes fields of the +// common_object_request_params.customer_encryption field. +type RewriteObjectRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Immutable. The name of the destination object. + // See the + // [Naming Guidelines](https://cloud.google.com/storage/docs/objects#naming). + // Example: `test.txt` + // The `name` field by itself does not uniquely identify a Cloud Storage + // object. A Cloud Storage object is uniquely identified by the tuple of + // (bucket, object, generation). + DestinationName string `protobuf:"bytes,24,opt,name=destination_name,json=destinationName,proto3" json:"destination_name,omitempty"` + // Required. Immutable. The name of the bucket containing the destination + // object. + DestinationBucket string `protobuf:"bytes,25,opt,name=destination_bucket,json=destinationBucket,proto3" json:"destination_bucket,omitempty"` + // The name of the Cloud KMS key that will be used to encrypt the destination + // object. The Cloud KMS key must be located in same location as the object. + // If the parameter is not specified, the request uses the destination + // bucket's default encryption key, if any, or else the Google-managed + // encryption key. + DestinationKmsKey string `protobuf:"bytes,27,opt,name=destination_kms_key,json=destinationKmsKey,proto3" json:"destination_kms_key,omitempty"` + // Properties of the destination, post-rewrite object. + // The `name`, `bucket` and `kms_key` fields must not be populated (these + // values are specified in the `destination_name`, `destination_bucket`, and + // `destination_kms_key` fields). + // If `destination` is present it will be used to construct the destination + // object's metadata; otherwise the destination object's metadata will be + // copied from the source object. + Destination *Object `protobuf:"bytes,1,opt,name=destination,proto3" json:"destination,omitempty"` + // Required. Name of the bucket in which to find the source object. + SourceBucket string `protobuf:"bytes,2,opt,name=source_bucket,json=sourceBucket,proto3" json:"source_bucket,omitempty"` + // Required. Name of the source object. + SourceObject string `protobuf:"bytes,3,opt,name=source_object,json=sourceObject,proto3" json:"source_object,omitempty"` + // If present, selects a specific revision of the source object (as opposed to + // the latest version, the default). + SourceGeneration int64 `protobuf:"varint,4,opt,name=source_generation,json=sourceGeneration,proto3" json:"source_generation,omitempty"` + // Include this field (from the previous rewrite response) on each rewrite + // request after the first one, until the rewrite response 'done' flag is + // true. Calls that provide a rewriteToken can omit all other request fields, + // but if included those fields must match the values provided in the first + // rewrite request. + RewriteToken string `protobuf:"bytes,5,opt,name=rewrite_token,json=rewriteToken,proto3" json:"rewrite_token,omitempty"` + // Apply a predefined set of access controls to the destination object. + // Valid values are "authenticatedRead", "bucketOwnerFullControl", + // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". + DestinationPredefinedAcl string `protobuf:"bytes,28,opt,name=destination_predefined_acl,json=destinationPredefinedAcl,proto3" json:"destination_predefined_acl,omitempty"` + // Makes the operation conditional on whether the object's current generation + // matches the given value. Setting to 0 makes the operation succeed only if + // there are no live versions of the object. + IfGenerationMatch *int64 `protobuf:"varint,7,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` + // Makes the operation conditional on whether the object's live generation + // does not match the given value. If no live object exists, the precondition + // fails. Setting to 0 makes the operation succeed only if there is a live + // version of the object. + IfGenerationNotMatch *int64 `protobuf:"varint,8,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` + // Makes the operation conditional on whether the destination object's current + // metageneration matches the given value. + IfMetagenerationMatch *int64 `protobuf:"varint,9,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // Makes the operation conditional on whether the destination object's current + // metageneration does not match the given value. + IfMetagenerationNotMatch *int64 `protobuf:"varint,10,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` + // Makes the operation conditional on whether the source object's live + // generation matches the given value. + IfSourceGenerationMatch *int64 `protobuf:"varint,11,opt,name=if_source_generation_match,json=ifSourceGenerationMatch,proto3,oneof" json:"if_source_generation_match,omitempty"` + // Makes the operation conditional on whether the source object's live + // generation does not match the given value. + IfSourceGenerationNotMatch *int64 `protobuf:"varint,12,opt,name=if_source_generation_not_match,json=ifSourceGenerationNotMatch,proto3,oneof" json:"if_source_generation_not_match,omitempty"` + // Makes the operation conditional on whether the source object's current + // metageneration matches the given value. + IfSourceMetagenerationMatch *int64 `protobuf:"varint,13,opt,name=if_source_metageneration_match,json=ifSourceMetagenerationMatch,proto3,oneof" json:"if_source_metageneration_match,omitempty"` + // Makes the operation conditional on whether the source object's current + // metageneration does not match the given value. + IfSourceMetagenerationNotMatch *int64 `protobuf:"varint,14,opt,name=if_source_metageneration_not_match,json=ifSourceMetagenerationNotMatch,proto3,oneof" json:"if_source_metageneration_not_match,omitempty"` + // The maximum number of bytes that will be rewritten per rewrite request. + // Most callers + // shouldn't need to specify this parameter - it is primarily in place to + // support testing. If specified the value must be an integral multiple of + // 1 MiB (1048576). Also, this only applies to requests where the source and + // destination span locations and/or storage classes. Finally, this value must + // not change across rewrite calls else you'll get an error that the + // `rewriteToken` is invalid. + MaxBytesRewrittenPerCall int64 `protobuf:"varint,15,opt,name=max_bytes_rewritten_per_call,json=maxBytesRewrittenPerCall,proto3" json:"max_bytes_rewritten_per_call,omitempty"` + // The algorithm used to encrypt the source object, if any. Used if the source + // object was encrypted with a Customer-Supplied Encryption Key. + CopySourceEncryptionAlgorithm string `protobuf:"bytes,16,opt,name=copy_source_encryption_algorithm,json=copySourceEncryptionAlgorithm,proto3" json:"copy_source_encryption_algorithm,omitempty"` + // The raw bytes (not base64-encoded) AES-256 encryption key used to encrypt + // the source object, if it was encrypted with a Customer-Supplied Encryption + // Key. + CopySourceEncryptionKeyBytes []byte `protobuf:"bytes,21,opt,name=copy_source_encryption_key_bytes,json=copySourceEncryptionKeyBytes,proto3" json:"copy_source_encryption_key_bytes,omitempty"` + // The raw bytes (not base64-encoded) SHA256 hash of the encryption key used + // to encrypt the source object, if it was encrypted with a Customer-Supplied + // Encryption Key. + CopySourceEncryptionKeySha256Bytes []byte `protobuf:"bytes,22,opt,name=copy_source_encryption_key_sha256_bytes,json=copySourceEncryptionKeySha256Bytes,proto3" json:"copy_source_encryption_key_sha256_bytes,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,19,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` + // The checksums of the complete object. This will be used to validate the + // destination object after rewriting. + ObjectChecksums *ObjectChecksums `protobuf:"bytes,29,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` +} + +func (x *RewriteObjectRequest) Reset() { + *x = RewriteObjectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RewriteObjectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RewriteObjectRequest) ProtoMessage() {} + +func (x *RewriteObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RewriteObjectRequest.ProtoReflect.Descriptor instead. +func (*RewriteObjectRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{28} +} + +func (x *RewriteObjectRequest) GetDestinationName() string { + if x != nil { + return x.DestinationName + } + return "" +} + +func (x *RewriteObjectRequest) GetDestinationBucket() string { + if x != nil { + return x.DestinationBucket + } + return "" +} + +func (x *RewriteObjectRequest) GetDestinationKmsKey() string { + if x != nil { + return x.DestinationKmsKey + } + return "" +} + +func (x *RewriteObjectRequest) GetDestination() *Object { + if x != nil { + return x.Destination + } + return nil +} + +func (x *RewriteObjectRequest) GetSourceBucket() string { + if x != nil { + return x.SourceBucket + } + return "" +} + +func (x *RewriteObjectRequest) GetSourceObject() string { + if x != nil { + return x.SourceObject + } + return "" +} + +func (x *RewriteObjectRequest) GetSourceGeneration() int64 { + if x != nil { + return x.SourceGeneration + } + return 0 +} + +func (x *RewriteObjectRequest) GetRewriteToken() string { + if x != nil { + return x.RewriteToken + } + return "" +} + +func (x *RewriteObjectRequest) GetDestinationPredefinedAcl() string { + if x != nil { + return x.DestinationPredefinedAcl + } + return "" +} + +func (x *RewriteObjectRequest) GetIfGenerationMatch() int64 { + if x != nil && x.IfGenerationMatch != nil { + return *x.IfGenerationMatch + } + return 0 +} + +func (x *RewriteObjectRequest) GetIfGenerationNotMatch() int64 { + if x != nil && x.IfGenerationNotMatch != nil { + return *x.IfGenerationNotMatch + } + return 0 +} + +func (x *RewriteObjectRequest) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *RewriteObjectRequest) GetIfMetagenerationNotMatch() int64 { + if x != nil && x.IfMetagenerationNotMatch != nil { + return *x.IfMetagenerationNotMatch + } + return 0 +} + +func (x *RewriteObjectRequest) GetIfSourceGenerationMatch() int64 { + if x != nil && x.IfSourceGenerationMatch != nil { + return *x.IfSourceGenerationMatch + } + return 0 +} + +func (x *RewriteObjectRequest) GetIfSourceGenerationNotMatch() int64 { + if x != nil && x.IfSourceGenerationNotMatch != nil { + return *x.IfSourceGenerationNotMatch + } + return 0 +} + +func (x *RewriteObjectRequest) GetIfSourceMetagenerationMatch() int64 { + if x != nil && x.IfSourceMetagenerationMatch != nil { + return *x.IfSourceMetagenerationMatch + } + return 0 +} + +func (x *RewriteObjectRequest) GetIfSourceMetagenerationNotMatch() int64 { + if x != nil && x.IfSourceMetagenerationNotMatch != nil { + return *x.IfSourceMetagenerationNotMatch + } + return 0 +} + +func (x *RewriteObjectRequest) GetMaxBytesRewrittenPerCall() int64 { + if x != nil { + return x.MaxBytesRewrittenPerCall + } + return 0 +} + +func (x *RewriteObjectRequest) GetCopySourceEncryptionAlgorithm() string { + if x != nil { + return x.CopySourceEncryptionAlgorithm + } + return "" +} + +func (x *RewriteObjectRequest) GetCopySourceEncryptionKeyBytes() []byte { + if x != nil { + return x.CopySourceEncryptionKeyBytes + } + return nil +} + +func (x *RewriteObjectRequest) GetCopySourceEncryptionKeySha256Bytes() []byte { + if x != nil { + return x.CopySourceEncryptionKeySha256Bytes + } + return nil +} + +func (x *RewriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +func (x *RewriteObjectRequest) GetObjectChecksums() *ObjectChecksums { + if x != nil { + return x.ObjectChecksums + } + return nil +} + +// A rewrite response. +type RewriteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The total bytes written so far, which can be used to provide a waiting user + // with a progress indicator. This property is always present in the response. + TotalBytesRewritten int64 `protobuf:"varint,1,opt,name=total_bytes_rewritten,json=totalBytesRewritten,proto3" json:"total_bytes_rewritten,omitempty"` + // The total size of the object being copied in bytes. This property is always + // present in the response. + ObjectSize int64 `protobuf:"varint,2,opt,name=object_size,json=objectSize,proto3" json:"object_size,omitempty"` + // `true` if the copy is finished; otherwise, `false` if + // the copy is in progress. This property is always present in the response. + Done bool `protobuf:"varint,3,opt,name=done,proto3" json:"done,omitempty"` + // A token to use in subsequent requests to continue copying data. This token + // is present in the response only when there is more data to copy. + RewriteToken string `protobuf:"bytes,4,opt,name=rewrite_token,json=rewriteToken,proto3" json:"rewrite_token,omitempty"` + // A resource containing the metadata for the copied-to object. This property + // is present in the response only when copying completes. + Resource *Object `protobuf:"bytes,5,opt,name=resource,proto3" json:"resource,omitempty"` +} + +func (x *RewriteResponse) Reset() { + *x = RewriteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RewriteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RewriteResponse) ProtoMessage() {} + +func (x *RewriteResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RewriteResponse.ProtoReflect.Descriptor instead. +func (*RewriteResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{29} +} + +func (x *RewriteResponse) GetTotalBytesRewritten() int64 { + if x != nil { + return x.TotalBytesRewritten + } + return 0 +} + +func (x *RewriteResponse) GetObjectSize() int64 { + if x != nil { + return x.ObjectSize + } + return 0 +} + +func (x *RewriteResponse) GetDone() bool { + if x != nil { + return x.Done + } + return false +} + +func (x *RewriteResponse) GetRewriteToken() string { + if x != nil { + return x.RewriteToken + } + return "" +} + +func (x *RewriteResponse) GetResource() *Object { + if x != nil { + return x.Resource + } + return nil +} + +// Request message StartResumableWrite. +type StartResumableWriteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The destination bucket, object, and metadata, as well as any + // preconditions. + WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,1,opt,name=write_object_spec,json=writeObjectSpec,proto3" json:"write_object_spec,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,3,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` + // The checksums of the complete object. This will be used to validate the + // uploaded object. For each upload, object_checksums can be provided with + // either StartResumableWriteRequest or the WriteObjectRequest with + // finish_write set to `true`. + ObjectChecksums *ObjectChecksums `protobuf:"bytes,5,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` +} + +func (x *StartResumableWriteRequest) Reset() { + *x = StartResumableWriteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StartResumableWriteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartResumableWriteRequest) ProtoMessage() {} + +func (x *StartResumableWriteRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartResumableWriteRequest.ProtoReflect.Descriptor instead. +func (*StartResumableWriteRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30} +} + +func (x *StartResumableWriteRequest) GetWriteObjectSpec() *WriteObjectSpec { + if x != nil { + return x.WriteObjectSpec + } + return nil +} + +func (x *StartResumableWriteRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +func (x *StartResumableWriteRequest) GetObjectChecksums() *ObjectChecksums { + if x != nil { + return x.ObjectChecksums + } + return nil +} + +// Response object for `StartResumableWrite`. +type StartResumableWriteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The upload_id of the newly started resumable write operation. This + // value should be copied into the `WriteObjectRequest.upload_id` field. + UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"` +} + +func (x *StartResumableWriteResponse) Reset() { + *x = StartResumableWriteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StartResumableWriteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartResumableWriteResponse) ProtoMessage() {} + +func (x *StartResumableWriteResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartResumableWriteResponse.ProtoReflect.Descriptor instead. +func (*StartResumableWriteResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{31} +} + +func (x *StartResumableWriteResponse) GetUploadId() string { + if x != nil { + return x.UploadId + } + return "" +} + +// Request message for UpdateObject. +type UpdateObjectRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The object to update. + // The object's bucket and name fields are used to identify the object to + // update. If present, the object's generation field selects a specific + // revision of this object whose metadata should be updated. Otherwise, + // assumes the live version of the object. + Object *Object `protobuf:"bytes,1,opt,name=object,proto3" json:"object,omitempty"` + // Makes the operation conditional on whether the object's current generation + // matches the given value. Setting to 0 makes the operation succeed only if + // there are no live versions of the object. + IfGenerationMatch *int64 `protobuf:"varint,2,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` + // Makes the operation conditional on whether the object's live generation + // does not match the given value. If no live object exists, the precondition + // fails. Setting to 0 makes the operation succeed only if there is a live + // version of the object. + IfGenerationNotMatch *int64 `protobuf:"varint,3,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration matches the given value. + IfMetagenerationMatch *int64 `protobuf:"varint,4,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration does not match the given value. + IfMetagenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` + // Apply a predefined set of access controls to this object. + // Valid values are "authenticatedRead", "bucketOwnerFullControl", + // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". + PredefinedAcl string `protobuf:"bytes,10,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"` + // Required. List of fields to be updated. + // + // To specify ALL fields, equivalent to the JSON API's "update" function, + // specify a single field with the value `*`. Note: not recommended. If a new + // field is introduced at a later time, an older client updating with the `*` + // may accidentally reset the new field's value. + // + // Not specifying any fields is an error. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,7,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` +} + +func (x *UpdateObjectRequest) Reset() { + *x = UpdateObjectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateObjectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateObjectRequest) ProtoMessage() {} + +func (x *UpdateObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateObjectRequest.ProtoReflect.Descriptor instead. +func (*UpdateObjectRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{32} +} + +func (x *UpdateObjectRequest) GetObject() *Object { + if x != nil { + return x.Object + } + return nil +} + +func (x *UpdateObjectRequest) GetIfGenerationMatch() int64 { + if x != nil && x.IfGenerationMatch != nil { + return *x.IfGenerationMatch + } + return 0 +} + +func (x *UpdateObjectRequest) GetIfGenerationNotMatch() int64 { + if x != nil && x.IfGenerationNotMatch != nil { + return *x.IfGenerationNotMatch + } + return 0 +} + +func (x *UpdateObjectRequest) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *UpdateObjectRequest) GetIfMetagenerationNotMatch() int64 { + if x != nil && x.IfMetagenerationNotMatch != nil { + return *x.IfMetagenerationNotMatch + } + return 0 +} + +func (x *UpdateObjectRequest) GetPredefinedAcl() string { + if x != nil { + return x.PredefinedAcl + } + return "" +} + +func (x *UpdateObjectRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +func (x *UpdateObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +// Request message for GetServiceAccount. +type GetServiceAccountRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Project ID, in the format of "projects/{projectIdentifier}". + // {projectIdentifier} can be the project ID or project number. + Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` +} + +func (x *GetServiceAccountRequest) Reset() { + *x = GetServiceAccountRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetServiceAccountRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetServiceAccountRequest) ProtoMessage() {} + +func (x *GetServiceAccountRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetServiceAccountRequest.ProtoReflect.Descriptor instead. +func (*GetServiceAccountRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{33} +} + +func (x *GetServiceAccountRequest) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +// Request message for CreateHmacKey. +type CreateHmacKeyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The project that the HMAC-owning service account lives in, in the + // format of "projects/{projectIdentifier}". {projectIdentifier} can be the + // project ID or project number. + Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` + // Required. The service account to create the HMAC for. + ServiceAccountEmail string `protobuf:"bytes,2,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` +} + +func (x *CreateHmacKeyRequest) Reset() { + *x = CreateHmacKeyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateHmacKeyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateHmacKeyRequest) ProtoMessage() {} + +func (x *CreateHmacKeyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateHmacKeyRequest.ProtoReflect.Descriptor instead. +func (*CreateHmacKeyRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{34} +} + +func (x *CreateHmacKeyRequest) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +func (x *CreateHmacKeyRequest) GetServiceAccountEmail() string { + if x != nil { + return x.ServiceAccountEmail + } + return "" +} + +// Create hmac response. The only time the secret for an HMAC will be returned. +type CreateHmacKeyResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Key metadata. + Metadata *HmacKeyMetadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // HMAC key secret material. + // In raw bytes format (not base64-encoded). + SecretKeyBytes []byte `protobuf:"bytes,3,opt,name=secret_key_bytes,json=secretKeyBytes,proto3" json:"secret_key_bytes,omitempty"` +} + +func (x *CreateHmacKeyResponse) Reset() { + *x = CreateHmacKeyResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateHmacKeyResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateHmacKeyResponse) ProtoMessage() {} + +func (x *CreateHmacKeyResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateHmacKeyResponse.ProtoReflect.Descriptor instead. +func (*CreateHmacKeyResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{35} +} + +func (x *CreateHmacKeyResponse) GetMetadata() *HmacKeyMetadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *CreateHmacKeyResponse) GetSecretKeyBytes() []byte { + if x != nil { + return x.SecretKeyBytes + } + return nil +} + +// Request object to delete a given HMAC key. +type DeleteHmacKeyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The identifying key for the HMAC to delete. + AccessId string `protobuf:"bytes,1,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"` + // Required. The project that owns the HMAC key, in the format of + // "projects/{projectIdentifier}". + // {projectIdentifier} can be the project ID or project number. + Project string `protobuf:"bytes,2,opt,name=project,proto3" json:"project,omitempty"` +} + +func (x *DeleteHmacKeyRequest) Reset() { + *x = DeleteHmacKeyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteHmacKeyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteHmacKeyRequest) ProtoMessage() {} + +func (x *DeleteHmacKeyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteHmacKeyRequest.ProtoReflect.Descriptor instead. +func (*DeleteHmacKeyRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{36} +} + +func (x *DeleteHmacKeyRequest) GetAccessId() string { + if x != nil { + return x.AccessId + } + return "" +} + +func (x *DeleteHmacKeyRequest) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +// Request object to get metadata on a given HMAC key. +type GetHmacKeyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The identifying key for the HMAC to delete. + AccessId string `protobuf:"bytes,1,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"` + // Required. The project the HMAC key lies in, in the format of + // "projects/{projectIdentifier}". + // {projectIdentifier} can be the project ID or project number. + Project string `protobuf:"bytes,2,opt,name=project,proto3" json:"project,omitempty"` +} + +func (x *GetHmacKeyRequest) Reset() { + *x = GetHmacKeyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetHmacKeyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetHmacKeyRequest) ProtoMessage() {} + +func (x *GetHmacKeyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetHmacKeyRequest.ProtoReflect.Descriptor instead. +func (*GetHmacKeyRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{37} +} + +func (x *GetHmacKeyRequest) GetAccessId() string { + if x != nil { + return x.AccessId + } + return "" +} + +func (x *GetHmacKeyRequest) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +// Request to fetch a list of HMAC keys under a given project. +type ListHmacKeysRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The project to list HMAC keys for, in the format of + // "projects/{projectIdentifier}". + // {projectIdentifier} can be the project ID or project number. + Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` + // The maximum number of keys to return. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // A previously returned token from ListHmacKeysResponse to get the next page. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // If set, filters to only return HMAC keys for specified service account. + ServiceAccountEmail string `protobuf:"bytes,4,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` + // If set, return deleted keys that have not yet been wiped out. + ShowDeletedKeys bool `protobuf:"varint,5,opt,name=show_deleted_keys,json=showDeletedKeys,proto3" json:"show_deleted_keys,omitempty"` +} + +func (x *ListHmacKeysRequest) Reset() { + *x = ListHmacKeysRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListHmacKeysRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListHmacKeysRequest) ProtoMessage() {} + +func (x *ListHmacKeysRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListHmacKeysRequest.ProtoReflect.Descriptor instead. +func (*ListHmacKeysRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38} +} + +func (x *ListHmacKeysRequest) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +func (x *ListHmacKeysRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListHmacKeysRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *ListHmacKeysRequest) GetServiceAccountEmail() string { + if x != nil { + return x.ServiceAccountEmail + } + return "" +} + +func (x *ListHmacKeysRequest) GetShowDeletedKeys() bool { + if x != nil { + return x.ShowDeletedKeys + } + return false +} + +// Hmac key list response with next page information. +type ListHmacKeysResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of items. + HmacKeys []*HmacKeyMetadata `protobuf:"bytes,1,rep,name=hmac_keys,json=hmacKeys,proto3" json:"hmac_keys,omitempty"` + // The continuation token, used to page through large result sets. Provide + // this value in a subsequent request to return the next page of results. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListHmacKeysResponse) Reset() { + *x = ListHmacKeysResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListHmacKeysResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListHmacKeysResponse) ProtoMessage() {} + +func (x *ListHmacKeysResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListHmacKeysResponse.ProtoReflect.Descriptor instead. +func (*ListHmacKeysResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39} +} + +func (x *ListHmacKeysResponse) GetHmacKeys() []*HmacKeyMetadata { + if x != nil { + return x.HmacKeys + } + return nil +} + +func (x *ListHmacKeysResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// Request object to update an HMAC key state. +// HmacKeyMetadata.state is required and the only writable field in +// UpdateHmacKey operation. Specifying fields other than state will result in an +// error. +type UpdateHmacKeyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The HMAC key to update. + // If present, the hmac_key's `id` field will be used to identify the key. + // Otherwise, the hmac_key's access_id and project fields will be used to + // identify the key. + HmacKey *HmacKeyMetadata `protobuf:"bytes,1,opt,name=hmac_key,json=hmacKey,proto3" json:"hmac_key,omitempty"` + // Update mask for hmac_key. + // Not specifying any fields will mean only the `state` field is updated to + // the value specified in `hmac_key`. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateHmacKeyRequest) Reset() { + *x = UpdateHmacKeyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateHmacKeyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateHmacKeyRequest) ProtoMessage() {} + +func (x *UpdateHmacKeyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateHmacKeyRequest.ProtoReflect.Descriptor instead. +func (*UpdateHmacKeyRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40} +} + +func (x *UpdateHmacKeyRequest) GetHmacKey() *HmacKeyMetadata { + if x != nil { + return x.HmacKey + } + return nil +} + +func (x *UpdateHmacKeyRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +// Parameters that can be passed to any object request. +type CommonObjectRequestParams struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Encryption algorithm used with the Customer-Supplied Encryption Keys + // feature. + EncryptionAlgorithm string `protobuf:"bytes,1,opt,name=encryption_algorithm,json=encryptionAlgorithm,proto3" json:"encryption_algorithm,omitempty"` + // Encryption key used with the Customer-Supplied Encryption Keys feature. + // In raw bytes format (not base64-encoded). + EncryptionKeyBytes []byte `protobuf:"bytes,4,opt,name=encryption_key_bytes,json=encryptionKeyBytes,proto3" json:"encryption_key_bytes,omitempty"` + // SHA256 hash of encryption key used with the Customer-Supplied Encryption + // Keys feature. + EncryptionKeySha256Bytes []byte `protobuf:"bytes,5,opt,name=encryption_key_sha256_bytes,json=encryptionKeySha256Bytes,proto3" json:"encryption_key_sha256_bytes,omitempty"` +} + +func (x *CommonObjectRequestParams) Reset() { + *x = CommonObjectRequestParams{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CommonObjectRequestParams) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CommonObjectRequestParams) ProtoMessage() {} + +func (x *CommonObjectRequestParams) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CommonObjectRequestParams.ProtoReflect.Descriptor instead. +func (*CommonObjectRequestParams) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{41} +} + +func (x *CommonObjectRequestParams) GetEncryptionAlgorithm() string { + if x != nil { + return x.EncryptionAlgorithm + } + return "" +} + +func (x *CommonObjectRequestParams) GetEncryptionKeyBytes() []byte { + if x != nil { + return x.EncryptionKeyBytes + } + return nil +} + +func (x *CommonObjectRequestParams) GetEncryptionKeySha256Bytes() []byte { + if x != nil { + return x.EncryptionKeySha256Bytes + } + return nil +} + +// Shared constants. +type ServiceConstants struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ServiceConstants) Reset() { + *x = ServiceConstants{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceConstants) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceConstants) ProtoMessage() {} + +func (x *ServiceConstants) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceConstants.ProtoReflect.Descriptor instead. +func (*ServiceConstants) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42} +} + +// A bucket. +type Bucket struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Immutable. The name of the bucket. + // Format: `projects/{project}/buckets/{bucket}` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Output only. The user-chosen part of the bucket name. The `{bucket}` + // portion of the `name` field. For globally unique buckets, this is equal to + // the "bucket name" of other Cloud Storage APIs. Example: "pub". + BucketId string `protobuf:"bytes,2,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` + // The etag of the bucket. + // If included in the metadata of an UpdateBucketRequest, the operation will + // only be performed if the etag matches that of the bucket. + Etag string `protobuf:"bytes,29,opt,name=etag,proto3" json:"etag,omitempty"` + // Immutable. The project which owns this bucket, in the format of + // "projects/{projectIdentifier}". + // {projectIdentifier} can be the project ID or project number. + Project string `protobuf:"bytes,3,opt,name=project,proto3" json:"project,omitempty"` + // Output only. The metadata generation of this bucket. + Metageneration int64 `protobuf:"varint,4,opt,name=metageneration,proto3" json:"metageneration,omitempty"` + // Immutable. The location of the bucket. Object data for objects in the + // bucket resides in physical storage within this region. Defaults to `US`. + // See the + // [https://developers.google.com/storage/docs/concepts-techniques#specifyinglocations"][developer's + // guide] for the authoritative list. Attempting to update this field after + // the bucket is created will result in an error. + Location string `protobuf:"bytes,5,opt,name=location,proto3" json:"location,omitempty"` + // Output only. The location type of the bucket (region, dual-region, + // multi-region, etc). + LocationType string `protobuf:"bytes,6,opt,name=location_type,json=locationType,proto3" json:"location_type,omitempty"` + // The bucket's default storage class, used whenever no storageClass is + // specified for a newly-created object. This defines how objects in the + // bucket are stored and determines the SLA and the cost of storage. + // If this value is not specified when the bucket is created, it will default + // to `STANDARD`. For more information, see + // https://developers.google.com/storage/docs/storage-classes. + StorageClass string `protobuf:"bytes,7,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"` + // The recovery point objective for cross-region replication of the bucket. + // Applicable only for dual- and multi-region buckets. "DEFAULT" uses default + // replication. "ASYNC_TURBO" enables turbo replication, valid for dual-region + // buckets only. If rpo is not specified when the bucket is created, it + // defaults to "DEFAULT". For more information, see + // https://cloud.google.com/storage/docs/availability-durability#turbo-replication. + Rpo string `protobuf:"bytes,27,opt,name=rpo,proto3" json:"rpo,omitempty"` + // Access controls on the bucket. + // If iam_config.uniform_bucket_level_access is enabled on this bucket, + // requests to set, read, or modify acl is an error. + Acl []*BucketAccessControl `protobuf:"bytes,8,rep,name=acl,proto3" json:"acl,omitempty"` + // Default access controls to apply to new objects when no ACL is provided. + // If iam_config.uniform_bucket_level_access is enabled on this bucket, + // requests to set, read, or modify acl is an error. + DefaultObjectAcl []*ObjectAccessControl `protobuf:"bytes,9,rep,name=default_object_acl,json=defaultObjectAcl,proto3" json:"default_object_acl,omitempty"` + // The bucket's lifecycle config. See + // [https://developers.google.com/storage/docs/lifecycle]Lifecycle Management] + // for more information. + Lifecycle *Bucket_Lifecycle `protobuf:"bytes,10,opt,name=lifecycle,proto3" json:"lifecycle,omitempty"` + // Output only. The creation time of the bucket. + CreateTime *timestamppb.Timestamp `protobuf:"bytes,11,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + // The bucket's [https://www.w3.org/TR/cors/][Cross-Origin Resource Sharing] + // (CORS) config. + Cors []*Bucket_Cors `protobuf:"bytes,12,rep,name=cors,proto3" json:"cors,omitempty"` + // Output only. The modification time of the bucket. + UpdateTime *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` + // The default value for event-based hold on newly created objects in this + // bucket. Event-based hold is a way to retain objects indefinitely until an + // event occurs, signified by the + // hold's release. After being released, such objects will be subject to + // bucket-level retention (if any). One sample use case of this flag is for + // banks to hold loan documents for at least 3 years after loan is paid in + // full. Here, bucket-level retention is 3 years and the event is loan being + // paid in full. In this example, these objects will be held intact for any + // number of years until the event has occurred (event-based hold on the + // object is released) and then 3 more years after that. That means retention + // duration of the objects begins from the moment event-based hold + // transitioned from true to false. Objects under event-based hold cannot be + // deleted, overwritten or archived until the hold is removed. + DefaultEventBasedHold bool `protobuf:"varint,14,opt,name=default_event_based_hold,json=defaultEventBasedHold,proto3" json:"default_event_based_hold,omitempty"` + // User-provided labels, in key/value pairs. + Labels map[string]string `protobuf:"bytes,15,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The bucket's website config, controlling how the service behaves + // when accessing bucket contents as a web site. See the + // [https://cloud.google.com/storage/docs/static-website][Static Website + // Examples] for more information. + Website *Bucket_Website `protobuf:"bytes,16,opt,name=website,proto3" json:"website,omitempty"` + // The bucket's versioning config. + Versioning *Bucket_Versioning `protobuf:"bytes,17,opt,name=versioning,proto3" json:"versioning,omitempty"` + // The bucket's logging config, which defines the destination bucket + // and name prefix (if any) for the current bucket's logs. + Logging *Bucket_Logging `protobuf:"bytes,18,opt,name=logging,proto3" json:"logging,omitempty"` + // Output only. The owner of the bucket. This is always the project team's + // owner group. + Owner *Owner `protobuf:"bytes,19,opt,name=owner,proto3" json:"owner,omitempty"` + // Encryption config for a bucket. + Encryption *Bucket_Encryption `protobuf:"bytes,20,opt,name=encryption,proto3" json:"encryption,omitempty"` + // The bucket's billing config. + Billing *Bucket_Billing `protobuf:"bytes,21,opt,name=billing,proto3" json:"billing,omitempty"` + // The bucket's retention policy. The retention policy enforces a minimum + // retention time for all objects contained in the bucket, based on their + // creation time. Any attempt to overwrite or delete objects younger than the + // retention period will result in a PERMISSION_DENIED error. An unlocked + // retention policy can be modified or removed from the bucket via a + // storage.buckets.update operation. A locked retention policy cannot be + // removed or shortened in duration for the lifetime of the bucket. + // Attempting to remove or decrease period of a locked retention policy will + // result in a PERMISSION_DENIED error. + RetentionPolicy *Bucket_RetentionPolicy `protobuf:"bytes,22,opt,name=retention_policy,json=retentionPolicy,proto3" json:"retention_policy,omitempty"` + // The bucket's IAM config. + IamConfig *Bucket_IamConfig `protobuf:"bytes,23,opt,name=iam_config,json=iamConfig,proto3" json:"iam_config,omitempty"` + // Reserved for future use. + SatisfiesPzs bool `protobuf:"varint,25,opt,name=satisfies_pzs,json=satisfiesPzs,proto3" json:"satisfies_pzs,omitempty"` + // Configuration that, if present, specifies the data placement for a + // [https://cloud.google.com/storage/docs/use-dual-regions][Dual Region]. + CustomPlacementConfig *Bucket_CustomPlacementConfig `protobuf:"bytes,26,opt,name=custom_placement_config,json=customPlacementConfig,proto3" json:"custom_placement_config,omitempty"` + // The bucket's Autoclass configuration. If there is no configuration, the + // Autoclass feature will be disabled and have no effect on the bucket. + Autoclass *Bucket_Autoclass `protobuf:"bytes,28,opt,name=autoclass,proto3" json:"autoclass,omitempty"` + // Optional. The bucket's hierarchical namespace configuration. If there is no + // configuration, the hierarchical namespace feature will be disabled and have + // no effect on the bucket. + HierarchicalNamespace *Bucket_HierarchicalNamespace `protobuf:"bytes,32,opt,name=hierarchical_namespace,json=hierarchicalNamespace,proto3" json:"hierarchical_namespace,omitempty"` + // Optional. The bucket's soft delete policy. The soft delete policy prevents + // soft-deleted objects from being permanently deleted. + SoftDeletePolicy *Bucket_SoftDeletePolicy `protobuf:"bytes,31,opt,name=soft_delete_policy,json=softDeletePolicy,proto3" json:"soft_delete_policy,omitempty"` +} + +func (x *Bucket) Reset() { + *x = Bucket{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket) ProtoMessage() {} + +func (x *Bucket) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket.ProtoReflect.Descriptor instead. +func (*Bucket) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43} +} + +func (x *Bucket) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Bucket) GetBucketId() string { + if x != nil { + return x.BucketId + } + return "" +} + +func (x *Bucket) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +func (x *Bucket) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +func (x *Bucket) GetMetageneration() int64 { + if x != nil { + return x.Metageneration + } + return 0 +} + +func (x *Bucket) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *Bucket) GetLocationType() string { + if x != nil { + return x.LocationType + } + return "" +} + +func (x *Bucket) GetStorageClass() string { + if x != nil { + return x.StorageClass + } + return "" +} + +func (x *Bucket) GetRpo() string { + if x != nil { + return x.Rpo + } + return "" +} + +func (x *Bucket) GetAcl() []*BucketAccessControl { + if x != nil { + return x.Acl + } + return nil +} + +func (x *Bucket) GetDefaultObjectAcl() []*ObjectAccessControl { + if x != nil { + return x.DefaultObjectAcl + } + return nil +} + +func (x *Bucket) GetLifecycle() *Bucket_Lifecycle { + if x != nil { + return x.Lifecycle + } + return nil +} + +func (x *Bucket) GetCreateTime() *timestamppb.Timestamp { + if x != nil { + return x.CreateTime + } + return nil +} + +func (x *Bucket) GetCors() []*Bucket_Cors { + if x != nil { + return x.Cors + } + return nil +} + +func (x *Bucket) GetUpdateTime() *timestamppb.Timestamp { + if x != nil { + return x.UpdateTime + } + return nil +} + +func (x *Bucket) GetDefaultEventBasedHold() bool { + if x != nil { + return x.DefaultEventBasedHold + } + return false +} + +func (x *Bucket) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *Bucket) GetWebsite() *Bucket_Website { + if x != nil { + return x.Website + } + return nil +} + +func (x *Bucket) GetVersioning() *Bucket_Versioning { + if x != nil { + return x.Versioning + } + return nil +} + +func (x *Bucket) GetLogging() *Bucket_Logging { + if x != nil { + return x.Logging + } + return nil +} + +func (x *Bucket) GetOwner() *Owner { + if x != nil { + return x.Owner + } + return nil +} + +func (x *Bucket) GetEncryption() *Bucket_Encryption { + if x != nil { + return x.Encryption + } + return nil +} + +func (x *Bucket) GetBilling() *Bucket_Billing { + if x != nil { + return x.Billing + } + return nil +} + +func (x *Bucket) GetRetentionPolicy() *Bucket_RetentionPolicy { + if x != nil { + return x.RetentionPolicy + } + return nil +} + +func (x *Bucket) GetIamConfig() *Bucket_IamConfig { + if x != nil { + return x.IamConfig + } + return nil +} + +func (x *Bucket) GetSatisfiesPzs() bool { + if x != nil { + return x.SatisfiesPzs + } + return false +} + +func (x *Bucket) GetCustomPlacementConfig() *Bucket_CustomPlacementConfig { + if x != nil { + return x.CustomPlacementConfig + } + return nil +} + +func (x *Bucket) GetAutoclass() *Bucket_Autoclass { + if x != nil { + return x.Autoclass + } + return nil +} + +func (x *Bucket) GetHierarchicalNamespace() *Bucket_HierarchicalNamespace { + if x != nil { + return x.HierarchicalNamespace + } + return nil +} + +func (x *Bucket) GetSoftDeletePolicy() *Bucket_SoftDeletePolicy { + if x != nil { + return x.SoftDeletePolicy + } + return nil +} + +// An access-control entry. +type BucketAccessControl struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The access permission for the entity. + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` + // The ID of the access-control entry. + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + // The entity holding the permission, in one of the following forms: + // * `user-{userid}` + // * `user-{email}` + // * `group-{groupid}` + // * `group-{email}` + // * `domain-{domain}` + // * `project-{team}-{projectnumber}` + // * `project-{team}-{projectid}` + // * `allUsers` + // * `allAuthenticatedUsers` + // Examples: + // * The user `liz@example.com` would be `user-liz@example.com`. + // * The group `example@googlegroups.com` would be + // `group-example@googlegroups.com` + // * All members of the Google Apps for Business domain `example.com` would be + // `domain-example.com` + // For project entities, `project-{team}-{projectnumber}` format will be + // returned on response. + Entity string `protobuf:"bytes,3,opt,name=entity,proto3" json:"entity,omitempty"` + // Output only. The alternative entity format, if exists. For project + // entities, `project-{team}-{projectid}` format will be returned on response. + EntityAlt string `protobuf:"bytes,9,opt,name=entity_alt,json=entityAlt,proto3" json:"entity_alt,omitempty"` + // The ID for the entity, if any. + EntityId string `protobuf:"bytes,4,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"` + // The etag of the BucketAccessControl. + // If included in the metadata of an update or delete request message, the + // operation operation will only be performed if the etag matches that of the + // bucket's BucketAccessControl. + Etag string `protobuf:"bytes,8,opt,name=etag,proto3" json:"etag,omitempty"` + // The email address associated with the entity, if any. + Email string `protobuf:"bytes,5,opt,name=email,proto3" json:"email,omitempty"` + // The domain associated with the entity, if any. + Domain string `protobuf:"bytes,6,opt,name=domain,proto3" json:"domain,omitempty"` + // The project team associated with the entity, if any. + ProjectTeam *ProjectTeam `protobuf:"bytes,7,opt,name=project_team,json=projectTeam,proto3" json:"project_team,omitempty"` +} + +func (x *BucketAccessControl) Reset() { + *x = BucketAccessControl{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BucketAccessControl) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BucketAccessControl) ProtoMessage() {} + +func (x *BucketAccessControl) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BucketAccessControl.ProtoReflect.Descriptor instead. +func (*BucketAccessControl) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{44} +} + +func (x *BucketAccessControl) GetRole() string { + if x != nil { + return x.Role + } + return "" +} + +func (x *BucketAccessControl) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *BucketAccessControl) GetEntity() string { + if x != nil { + return x.Entity + } + return "" +} + +func (x *BucketAccessControl) GetEntityAlt() string { + if x != nil { + return x.EntityAlt + } + return "" +} + +func (x *BucketAccessControl) GetEntityId() string { + if x != nil { + return x.EntityId + } + return "" +} + +func (x *BucketAccessControl) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +func (x *BucketAccessControl) GetEmail() string { + if x != nil { + return x.Email + } + return "" +} + +func (x *BucketAccessControl) GetDomain() string { + if x != nil { + return x.Domain + } + return "" +} + +func (x *BucketAccessControl) GetProjectTeam() *ProjectTeam { + if x != nil { + return x.ProjectTeam + } + return nil +} + +// Message used to convey content being read or written, along with an optional +// checksum. +type ChecksummedData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. The data. + Content []byte `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` + // If set, the CRC32C digest of the content field. + Crc32C *uint32 `protobuf:"fixed32,2,opt,name=crc32c,proto3,oneof" json:"crc32c,omitempty"` +} + +func (x *ChecksummedData) Reset() { + *x = ChecksummedData{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ChecksummedData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ChecksummedData) ProtoMessage() {} + +func (x *ChecksummedData) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ChecksummedData.ProtoReflect.Descriptor instead. +func (*ChecksummedData) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{45} +} + +func (x *ChecksummedData) GetContent() []byte { + if x != nil { + return x.Content + } + return nil +} + +func (x *ChecksummedData) GetCrc32C() uint32 { + if x != nil && x.Crc32C != nil { + return *x.Crc32C + } + return 0 +} + +// Message used for storing full (not subrange) object checksums. +type ObjectChecksums struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // CRC32C digest of the object data. Computed by the Cloud Storage service for + // all written objects. + // If set in a WriteObjectRequest, service will validate that the stored + // object matches this checksum. + Crc32C *uint32 `protobuf:"fixed32,1,opt,name=crc32c,proto3,oneof" json:"crc32c,omitempty"` + // 128 bit MD5 hash of the object data. + // For more information about using the MD5 hash, see + // [https://cloud.google.com/storage/docs/hashes-etags#json-api][Hashes and + // ETags: Best Practices]. + // Not all objects will provide an MD5 hash. For example, composite objects + // provide only crc32c hashes. + // This value is equivalent to running `cat object.txt | openssl md5 -binary` + Md5Hash []byte `protobuf:"bytes,2,opt,name=md5_hash,json=md5Hash,proto3" json:"md5_hash,omitempty"` +} + +func (x *ObjectChecksums) Reset() { + *x = ObjectChecksums{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ObjectChecksums) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ObjectChecksums) ProtoMessage() {} + +func (x *ObjectChecksums) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ObjectChecksums.ProtoReflect.Descriptor instead. +func (*ObjectChecksums) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{46} +} + +func (x *ObjectChecksums) GetCrc32C() uint32 { + if x != nil && x.Crc32C != nil { + return *x.Crc32C + } + return 0 +} + +func (x *ObjectChecksums) GetMd5Hash() []byte { + if x != nil { + return x.Md5Hash + } + return nil +} + +// Hmac Key Metadata, which includes all information other than the secret. +type HmacKeyMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Immutable. Resource name ID of the key in the format + // {projectIdentifier}/{accessId}. + // {projectIdentifier} can be the project ID or project number. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Immutable. Globally unique id for keys. + AccessId string `protobuf:"bytes,2,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"` + // Immutable. Identifies the project that owns the service account of the + // specified HMAC key, in the format "projects/{projectIdentifier}". + // {projectIdentifier} can be the project ID or project number. + Project string `protobuf:"bytes,3,opt,name=project,proto3" json:"project,omitempty"` + // Output only. Email of the service account the key authenticates as. + ServiceAccountEmail string `protobuf:"bytes,4,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` + // State of the key. One of ACTIVE, INACTIVE, or DELETED. + // Writable, can be updated by UpdateHmacKey operation. + State string `protobuf:"bytes,5,opt,name=state,proto3" json:"state,omitempty"` + // Output only. The creation time of the HMAC key. + CreateTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + // Output only. The last modification time of the HMAC key metadata. + UpdateTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` + // The etag of the HMAC key. + Etag string `protobuf:"bytes,8,opt,name=etag,proto3" json:"etag,omitempty"` +} + +func (x *HmacKeyMetadata) Reset() { + *x = HmacKeyMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HmacKeyMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HmacKeyMetadata) ProtoMessage() {} + +func (x *HmacKeyMetadata) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HmacKeyMetadata.ProtoReflect.Descriptor instead. +func (*HmacKeyMetadata) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{47} +} + +func (x *HmacKeyMetadata) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *HmacKeyMetadata) GetAccessId() string { + if x != nil { + return x.AccessId + } + return "" +} + +func (x *HmacKeyMetadata) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +func (x *HmacKeyMetadata) GetServiceAccountEmail() string { + if x != nil { + return x.ServiceAccountEmail + } + return "" +} + +func (x *HmacKeyMetadata) GetState() string { + if x != nil { + return x.State + } + return "" +} + +func (x *HmacKeyMetadata) GetCreateTime() *timestamppb.Timestamp { + if x != nil { + return x.CreateTime + } + return nil +} + +func (x *HmacKeyMetadata) GetUpdateTime() *timestamppb.Timestamp { + if x != nil { + return x.UpdateTime + } + return nil +} + +func (x *HmacKeyMetadata) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +// A directive to publish Pub/Sub notifications upon changes to a bucket. +type NotificationConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The resource name of this NotificationConfig. + // Format: + // `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}` + // The `{project}` portion may be `_` for globally unique buckets. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The Pub/Sub topic to which this subscription publishes. Formatted + // as: + // '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}' + Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` + // The etag of the NotificationConfig. + // If included in the metadata of GetNotificationConfigRequest, the operation + // will only be performed if the etag matches that of the NotificationConfig. + Etag string `protobuf:"bytes,7,opt,name=etag,proto3" json:"etag,omitempty"` + // If present, only send notifications about listed event types. If + // empty, sent notifications for all event types. + EventTypes []string `protobuf:"bytes,3,rep,name=event_types,json=eventTypes,proto3" json:"event_types,omitempty"` + // A list of additional attributes to attach to each Pub/Sub + // message published for this NotificationConfig. + CustomAttributes map[string]string `protobuf:"bytes,4,rep,name=custom_attributes,json=customAttributes,proto3" json:"custom_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // If present, only apply this NotificationConfig to object names that + // begin with this prefix. + ObjectNamePrefix string `protobuf:"bytes,5,opt,name=object_name_prefix,json=objectNamePrefix,proto3" json:"object_name_prefix,omitempty"` + // Required. The desired content of the Payload. + PayloadFormat string `protobuf:"bytes,6,opt,name=payload_format,json=payloadFormat,proto3" json:"payload_format,omitempty"` +} + +func (x *NotificationConfig) Reset() { + *x = NotificationConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NotificationConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NotificationConfig) ProtoMessage() {} + +func (x *NotificationConfig) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NotificationConfig.ProtoReflect.Descriptor instead. +func (*NotificationConfig) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{48} +} + +func (x *NotificationConfig) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NotificationConfig) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *NotificationConfig) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +func (x *NotificationConfig) GetEventTypes() []string { + if x != nil { + return x.EventTypes + } + return nil +} + +func (x *NotificationConfig) GetCustomAttributes() map[string]string { + if x != nil { + return x.CustomAttributes + } + return nil +} + +func (x *NotificationConfig) GetObjectNamePrefix() string { + if x != nil { + return x.ObjectNamePrefix + } + return "" +} + +func (x *NotificationConfig) GetPayloadFormat() string { + if x != nil { + return x.PayloadFormat + } + return "" +} + +// Describes the Customer-Supplied Encryption Key mechanism used to store an +// Object's data at rest. +type CustomerEncryption struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The encryption algorithm. + EncryptionAlgorithm string `protobuf:"bytes,1,opt,name=encryption_algorithm,json=encryptionAlgorithm,proto3" json:"encryption_algorithm,omitempty"` + // SHA256 hash value of the encryption key. + // In raw bytes format (not base64-encoded). + KeySha256Bytes []byte `protobuf:"bytes,3,opt,name=key_sha256_bytes,json=keySha256Bytes,proto3" json:"key_sha256_bytes,omitempty"` +} + +func (x *CustomerEncryption) Reset() { + *x = CustomerEncryption{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CustomerEncryption) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CustomerEncryption) ProtoMessage() {} + +func (x *CustomerEncryption) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CustomerEncryption.ProtoReflect.Descriptor instead. +func (*CustomerEncryption) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{49} +} + +func (x *CustomerEncryption) GetEncryptionAlgorithm() string { + if x != nil { + return x.EncryptionAlgorithm + } + return "" +} + +func (x *CustomerEncryption) GetKeySha256Bytes() []byte { + if x != nil { + return x.KeySha256Bytes + } + return nil +} + +// An object. +type Object struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Immutable. The name of this object. Nearly any sequence of unicode + // characters is valid. See + // [Guidelines](https://cloud.google.com/storage/docs/objects#naming). + // Example: `test.txt` + // The `name` field by itself does not uniquely identify a Cloud Storage + // object. A Cloud Storage object is uniquely identified by the tuple of + // (bucket, object, generation). + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Immutable. The name of the bucket containing this object. + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + // The etag of the object. + // If included in the metadata of an update or delete request message, the + // operation will only be performed if the etag matches that of the live + // object. + Etag string `protobuf:"bytes,27,opt,name=etag,proto3" json:"etag,omitempty"` + // Immutable. The content generation of this object. Used for object + // versioning. + Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"` + // Output only. The version of the metadata for this generation of this + // object. Used for preconditions and for detecting changes in metadata. A + // metageneration number is only meaningful in the context of a particular + // generation of a particular object. + Metageneration int64 `protobuf:"varint,4,opt,name=metageneration,proto3" json:"metageneration,omitempty"` + // Storage class of the object. + StorageClass string `protobuf:"bytes,5,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"` + // Output only. Content-Length of the object data in bytes, matching + // [https://tools.ietf.org/html/rfc7230#section-3.3.2][RFC 7230 §3.3.2]. + Size int64 `protobuf:"varint,6,opt,name=size,proto3" json:"size,omitempty"` + // Content-Encoding of the object data, matching + // [https://tools.ietf.org/html/rfc7231#section-3.1.2.2][RFC 7231 §3.1.2.2] + ContentEncoding string `protobuf:"bytes,7,opt,name=content_encoding,json=contentEncoding,proto3" json:"content_encoding,omitempty"` + // Content-Disposition of the object data, matching + // [https://tools.ietf.org/html/rfc6266][RFC 6266]. + ContentDisposition string `protobuf:"bytes,8,opt,name=content_disposition,json=contentDisposition,proto3" json:"content_disposition,omitempty"` + // Cache-Control directive for the object data, matching + // [https://tools.ietf.org/html/rfc7234#section-5.2"][RFC 7234 §5.2]. + // If omitted, and the object is accessible to all anonymous users, the + // default will be `public, max-age=3600`. + CacheControl string `protobuf:"bytes,9,opt,name=cache_control,json=cacheControl,proto3" json:"cache_control,omitempty"` + // Access controls on the object. + // If iam_config.uniform_bucket_level_access is enabled on the parent + // bucket, requests to set, read, or modify acl is an error. + Acl []*ObjectAccessControl `protobuf:"bytes,10,rep,name=acl,proto3" json:"acl,omitempty"` + // Content-Language of the object data, matching + // [https://tools.ietf.org/html/rfc7231#section-3.1.3.2][RFC 7231 §3.1.3.2]. + ContentLanguage string `protobuf:"bytes,11,opt,name=content_language,json=contentLanguage,proto3" json:"content_language,omitempty"` + // Output only. If this object is noncurrent, this is the time when the object + // became noncurrent. + DeleteTime *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=delete_time,json=deleteTime,proto3" json:"delete_time,omitempty"` + // Content-Type of the object data, matching + // [https://tools.ietf.org/html/rfc7231#section-3.1.1.5][RFC 7231 §3.1.1.5]. + // If an object is stored without a Content-Type, it is served as + // `application/octet-stream`. + ContentType string `protobuf:"bytes,13,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` + // Output only. The creation time of the object. + CreateTime *timestamppb.Timestamp `protobuf:"bytes,14,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + // Output only. Number of underlying components that make up this object. + // Components are accumulated by compose operations. + ComponentCount int32 `protobuf:"varint,15,opt,name=component_count,json=componentCount,proto3" json:"component_count,omitempty"` + // Output only. Hashes for the data part of this object. This field is used + // for output only and will be silently ignored if provided in requests. + Checksums *ObjectChecksums `protobuf:"bytes,16,opt,name=checksums,proto3" json:"checksums,omitempty"` + // Output only. The modification time of the object metadata. + // Set initially to object creation time and then updated whenever any + // metadata of the object changes. This includes changes made by a requester, + // such as modifying custom metadata, as well as changes made by Cloud Storage + // on behalf of a requester, such as changing the storage class based on an + // Object Lifecycle Configuration. + UpdateTime *timestamppb.Timestamp `protobuf:"bytes,17,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` + // Cloud KMS Key used to encrypt this object, if the object is encrypted by + // such a key. + KmsKey string `protobuf:"bytes,18,opt,name=kms_key,json=kmsKey,proto3" json:"kms_key,omitempty"` + // Output only. The time at which the object's storage class was last changed. + // When the object is initially created, it will be set to time_created. + UpdateStorageClassTime *timestamppb.Timestamp `protobuf:"bytes,19,opt,name=update_storage_class_time,json=updateStorageClassTime,proto3" json:"update_storage_class_time,omitempty"` + // Whether an object is under temporary hold. While this flag is set to true, + // the object is protected against deletion and overwrites. A common use case + // of this flag is regulatory investigations where objects need to be retained + // while the investigation is ongoing. Note that unlike event-based hold, + // temporary hold does not impact retention expiration time of an object. + TemporaryHold bool `protobuf:"varint,20,opt,name=temporary_hold,json=temporaryHold,proto3" json:"temporary_hold,omitempty"` + // A server-determined value that specifies the earliest time that the + // object's retention period expires. + // Note 1: This field is not provided for objects with an active event-based + // hold, since retention expiration is unknown until the hold is removed. + // Note 2: This value can be provided even when temporary hold is set (so that + // the user can reason about policy without having to first unset the + // temporary hold). + RetentionExpireTime *timestamppb.Timestamp `protobuf:"bytes,21,opt,name=retention_expire_time,json=retentionExpireTime,proto3" json:"retention_expire_time,omitempty"` + // User-provided metadata, in key/value pairs. + Metadata map[string]string `protobuf:"bytes,22,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Whether an object is under event-based hold. + // An event-based hold is a way to force the retention of an object until + // after some event occurs. Once the hold is released by explicitly setting + // this field to false, the object will become subject to any bucket-level + // retention policy, except that the retention duration will be calculated + // from the time the event based hold was lifted, rather than the time the + // object was created. + // + // In a WriteObject request, not setting this field implies that the value + // should be taken from the parent bucket's "default_event_based_hold" field. + // In a response, this field will always be set to true or false. + EventBasedHold *bool `protobuf:"varint,23,opt,name=event_based_hold,json=eventBasedHold,proto3,oneof" json:"event_based_hold,omitempty"` + // Output only. The owner of the object. This will always be the uploader of + // the object. + Owner *Owner `protobuf:"bytes,24,opt,name=owner,proto3" json:"owner,omitempty"` + // Metadata of Customer-Supplied Encryption Key, if the object is encrypted by + // such a key. + CustomerEncryption *CustomerEncryption `protobuf:"bytes,25,opt,name=customer_encryption,json=customerEncryption,proto3" json:"customer_encryption,omitempty"` + // A user-specified timestamp set on an object. + CustomTime *timestamppb.Timestamp `protobuf:"bytes,26,opt,name=custom_time,json=customTime,proto3" json:"custom_time,omitempty"` + // Output only. This is the time when the object became soft-deleted. + // + // Soft-deleted objects are only accessible if a soft_delete_policy is + // enabled. Also see hard_delete_time. + SoftDeleteTime *timestamppb.Timestamp `protobuf:"bytes,28,opt,name=soft_delete_time,json=softDeleteTime,proto3,oneof" json:"soft_delete_time,omitempty"` + // Output only. The time when the object will be permanently deleted. + // + // Only set when an object becomes soft-deleted with a soft_delete_policy. + // Otherwise, the object will not be accessible. + HardDeleteTime *timestamppb.Timestamp `protobuf:"bytes,29,opt,name=hard_delete_time,json=hardDeleteTime,proto3,oneof" json:"hard_delete_time,omitempty"` +} + +func (x *Object) Reset() { + *x = Object{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Object) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Object) ProtoMessage() {} + +func (x *Object) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Object.ProtoReflect.Descriptor instead. +func (*Object) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{50} +} + +func (x *Object) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Object) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *Object) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +func (x *Object) GetGeneration() int64 { + if x != nil { + return x.Generation + } + return 0 +} + +func (x *Object) GetMetageneration() int64 { + if x != nil { + return x.Metageneration + } + return 0 +} + +func (x *Object) GetStorageClass() string { + if x != nil { + return x.StorageClass + } + return "" +} + +func (x *Object) GetSize() int64 { + if x != nil { + return x.Size + } + return 0 +} + +func (x *Object) GetContentEncoding() string { + if x != nil { + return x.ContentEncoding + } + return "" +} + +func (x *Object) GetContentDisposition() string { + if x != nil { + return x.ContentDisposition + } + return "" +} + +func (x *Object) GetCacheControl() string { + if x != nil { + return x.CacheControl + } + return "" +} + +func (x *Object) GetAcl() []*ObjectAccessControl { + if x != nil { + return x.Acl + } + return nil +} + +func (x *Object) GetContentLanguage() string { + if x != nil { + return x.ContentLanguage + } + return "" +} + +func (x *Object) GetDeleteTime() *timestamppb.Timestamp { + if x != nil { + return x.DeleteTime + } + return nil +} + +func (x *Object) GetContentType() string { + if x != nil { + return x.ContentType + } + return "" +} + +func (x *Object) GetCreateTime() *timestamppb.Timestamp { + if x != nil { + return x.CreateTime + } + return nil +} + +func (x *Object) GetComponentCount() int32 { + if x != nil { + return x.ComponentCount + } + return 0 +} + +func (x *Object) GetChecksums() *ObjectChecksums { + if x != nil { + return x.Checksums + } + return nil +} + +func (x *Object) GetUpdateTime() *timestamppb.Timestamp { + if x != nil { + return x.UpdateTime + } + return nil +} + +func (x *Object) GetKmsKey() string { + if x != nil { + return x.KmsKey + } + return "" +} + +func (x *Object) GetUpdateStorageClassTime() *timestamppb.Timestamp { + if x != nil { + return x.UpdateStorageClassTime + } + return nil +} + +func (x *Object) GetTemporaryHold() bool { + if x != nil { + return x.TemporaryHold + } + return false +} + +func (x *Object) GetRetentionExpireTime() *timestamppb.Timestamp { + if x != nil { + return x.RetentionExpireTime + } + return nil +} + +func (x *Object) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Object) GetEventBasedHold() bool { + if x != nil && x.EventBasedHold != nil { + return *x.EventBasedHold + } + return false +} + +func (x *Object) GetOwner() *Owner { + if x != nil { + return x.Owner + } + return nil +} + +func (x *Object) GetCustomerEncryption() *CustomerEncryption { + if x != nil { + return x.CustomerEncryption + } + return nil +} + +func (x *Object) GetCustomTime() *timestamppb.Timestamp { + if x != nil { + return x.CustomTime + } + return nil +} + +func (x *Object) GetSoftDeleteTime() *timestamppb.Timestamp { + if x != nil { + return x.SoftDeleteTime + } + return nil +} + +func (x *Object) GetHardDeleteTime() *timestamppb.Timestamp { + if x != nil { + return x.HardDeleteTime + } + return nil +} + +// An access-control entry. +type ObjectAccessControl struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The access permission for the entity. + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` + // The ID of the access-control entry. + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + // The entity holding the permission, in one of the following forms: + // * `user-{userid}` + // * `user-{email}` + // * `group-{groupid}` + // * `group-{email}` + // * `domain-{domain}` + // * `project-{team}-{projectnumber}` + // * `project-{team}-{projectid}` + // * `allUsers` + // * `allAuthenticatedUsers` + // Examples: + // * The user `liz@example.com` would be `user-liz@example.com`. + // * The group `example@googlegroups.com` would be + // `group-example@googlegroups.com`. + // * All members of the Google Apps for Business domain `example.com` would be + // `domain-example.com`. + // For project entities, `project-{team}-{projectnumber}` format will be + // returned on response. + Entity string `protobuf:"bytes,3,opt,name=entity,proto3" json:"entity,omitempty"` + // Output only. The alternative entity format, if exists. For project + // entities, `project-{team}-{projectid}` format will be returned on response. + EntityAlt string `protobuf:"bytes,9,opt,name=entity_alt,json=entityAlt,proto3" json:"entity_alt,omitempty"` + // The ID for the entity, if any. + EntityId string `protobuf:"bytes,4,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"` + // The etag of the ObjectAccessControl. + // If included in the metadata of an update or delete request message, the + // operation will only be performed if the etag matches that of the live + // object's ObjectAccessControl. + Etag string `protobuf:"bytes,8,opt,name=etag,proto3" json:"etag,omitempty"` + // The email address associated with the entity, if any. + Email string `protobuf:"bytes,5,opt,name=email,proto3" json:"email,omitempty"` + // The domain associated with the entity, if any. + Domain string `protobuf:"bytes,6,opt,name=domain,proto3" json:"domain,omitempty"` + // The project team associated with the entity, if any. + ProjectTeam *ProjectTeam `protobuf:"bytes,7,opt,name=project_team,json=projectTeam,proto3" json:"project_team,omitempty"` +} + +func (x *ObjectAccessControl) Reset() { + *x = ObjectAccessControl{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ObjectAccessControl) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ObjectAccessControl) ProtoMessage() {} + +func (x *ObjectAccessControl) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ObjectAccessControl.ProtoReflect.Descriptor instead. +func (*ObjectAccessControl) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{51} +} + +func (x *ObjectAccessControl) GetRole() string { + if x != nil { + return x.Role + } + return "" +} + +func (x *ObjectAccessControl) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *ObjectAccessControl) GetEntity() string { + if x != nil { + return x.Entity + } + return "" +} + +func (x *ObjectAccessControl) GetEntityAlt() string { + if x != nil { + return x.EntityAlt + } + return "" +} + +func (x *ObjectAccessControl) GetEntityId() string { + if x != nil { + return x.EntityId + } + return "" +} + +func (x *ObjectAccessControl) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +func (x *ObjectAccessControl) GetEmail() string { + if x != nil { + return x.Email + } + return "" +} + +func (x *ObjectAccessControl) GetDomain() string { + if x != nil { + return x.Domain + } + return "" +} + +func (x *ObjectAccessControl) GetProjectTeam() *ProjectTeam { + if x != nil { + return x.ProjectTeam + } + return nil +} + +// The result of a call to Objects.ListObjects +type ListObjectsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of items. + Objects []*Object `protobuf:"bytes,1,rep,name=objects,proto3" json:"objects,omitempty"` + // The list of prefixes of objects matching-but-not-listed up to and including + // the requested delimiter. + Prefixes []string `protobuf:"bytes,2,rep,name=prefixes,proto3" json:"prefixes,omitempty"` + // The continuation token, used to page through large result sets. Provide + // this value in a subsequent request to return the next page of results. + NextPageToken string `protobuf:"bytes,3,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListObjectsResponse) Reset() { + *x = ListObjectsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListObjectsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListObjectsResponse) ProtoMessage() {} + +func (x *ListObjectsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[52] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListObjectsResponse.ProtoReflect.Descriptor instead. +func (*ListObjectsResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{52} +} + +func (x *ListObjectsResponse) GetObjects() []*Object { + if x != nil { + return x.Objects + } + return nil +} + +func (x *ListObjectsResponse) GetPrefixes() []string { + if x != nil { + return x.Prefixes + } + return nil +} + +func (x *ListObjectsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// Represents the Viewers, Editors, or Owners of a given project. +type ProjectTeam struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The project number. + ProjectNumber string `protobuf:"bytes,1,opt,name=project_number,json=projectNumber,proto3" json:"project_number,omitempty"` + // The team. + Team string `protobuf:"bytes,2,opt,name=team,proto3" json:"team,omitempty"` +} + +func (x *ProjectTeam) Reset() { + *x = ProjectTeam{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProjectTeam) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProjectTeam) ProtoMessage() {} + +func (x *ProjectTeam) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProjectTeam.ProtoReflect.Descriptor instead. +func (*ProjectTeam) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{53} +} + +func (x *ProjectTeam) GetProjectNumber() string { + if x != nil { + return x.ProjectNumber + } + return "" +} + +func (x *ProjectTeam) GetTeam() string { + if x != nil { + return x.Team + } + return "" +} + +// A service account, owned by Cloud Storage, which may be used when taking +// action on behalf of a given project, for example to publish Pub/Sub +// notifications or to retrieve security keys. +type ServiceAccount struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The ID of the notification. + EmailAddress string `protobuf:"bytes,1,opt,name=email_address,json=emailAddress,proto3" json:"email_address,omitempty"` +} + +func (x *ServiceAccount) Reset() { + *x = ServiceAccount{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceAccount) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceAccount) ProtoMessage() {} + +func (x *ServiceAccount) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceAccount.ProtoReflect.Descriptor instead. +func (*ServiceAccount) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{54} +} + +func (x *ServiceAccount) GetEmailAddress() string { + if x != nil { + return x.EmailAddress + } + return "" +} + +// The owner of a specific resource. +type Owner struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The entity, in the form `user-`*userId*. + Entity string `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"` + // The ID for the entity. + EntityId string `protobuf:"bytes,2,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"` +} + +func (x *Owner) Reset() { + *x = Owner{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Owner) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Owner) ProtoMessage() {} + +func (x *Owner) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Owner.ProtoReflect.Descriptor instead. +func (*Owner) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{55} +} + +func (x *Owner) GetEntity() string { + if x != nil { + return x.Entity + } + return "" +} + +func (x *Owner) GetEntityId() string { + if x != nil { + return x.EntityId + } + return "" +} + +// Specifies a requested range of bytes to download. +type ContentRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The starting offset of the object data. This value is inclusive. + Start int64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` + // The ending offset of the object data. This value is exclusive. + End int64 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"` + // The complete length of the object data. + CompleteLength int64 `protobuf:"varint,3,opt,name=complete_length,json=completeLength,proto3" json:"complete_length,omitempty"` +} + +func (x *ContentRange) Reset() { + *x = ContentRange{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ContentRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ContentRange) ProtoMessage() {} + +func (x *ContentRange) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[56] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ContentRange.ProtoReflect.Descriptor instead. +func (*ContentRange) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{56} +} + +func (x *ContentRange) GetStart() int64 { + if x != nil { + return x.Start + } + return 0 +} + +func (x *ContentRange) GetEnd() int64 { + if x != nil { + return x.End + } + return 0 +} + +func (x *ContentRange) GetCompleteLength() int64 { + if x != nil { + return x.CompleteLength + } + return 0 +} + +// Description of a source object for a composition request. +type ComposeObjectRequest_SourceObject struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The source object's name. All source objects must reside in the + // same bucket. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The generation of this object to use as the source. + Generation int64 `protobuf:"varint,2,opt,name=generation,proto3" json:"generation,omitempty"` + // Conditions that must be met for this operation to execute. + ObjectPreconditions *ComposeObjectRequest_SourceObject_ObjectPreconditions `protobuf:"bytes,3,opt,name=object_preconditions,json=objectPreconditions,proto3" json:"object_preconditions,omitempty"` +} + +func (x *ComposeObjectRequest_SourceObject) Reset() { + *x = ComposeObjectRequest_SourceObject{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ComposeObjectRequest_SourceObject) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ComposeObjectRequest_SourceObject) ProtoMessage() {} + +func (x *ComposeObjectRequest_SourceObject) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[57] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ComposeObjectRequest_SourceObject.ProtoReflect.Descriptor instead. +func (*ComposeObjectRequest_SourceObject) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12, 0} +} + +func (x *ComposeObjectRequest_SourceObject) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ComposeObjectRequest_SourceObject) GetGeneration() int64 { + if x != nil { + return x.Generation + } + return 0 +} + +func (x *ComposeObjectRequest_SourceObject) GetObjectPreconditions() *ComposeObjectRequest_SourceObject_ObjectPreconditions { + if x != nil { + return x.ObjectPreconditions + } + return nil +} + +// Preconditions for a source object of a composition request. +type ComposeObjectRequest_SourceObject_ObjectPreconditions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Only perform the composition if the generation of the source object + // that would be used matches this value. If this value and a generation + // are both specified, they must be the same value or the call will fail. + IfGenerationMatch *int64 `protobuf:"varint,1,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` +} + +func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) Reset() { + *x = ComposeObjectRequest_SourceObject_ObjectPreconditions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoMessage() {} + +func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[58] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ComposeObjectRequest_SourceObject_ObjectPreconditions.ProtoReflect.Descriptor instead. +func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12, 0, 0} +} + +func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) GetIfGenerationMatch() int64 { + if x != nil && x.IfGenerationMatch != nil { + return *x.IfGenerationMatch + } + return 0 +} + +// Billing properties of a bucket. +type Bucket_Billing struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // When set to true, Requester Pays is enabled for this bucket. + RequesterPays bool `protobuf:"varint,1,opt,name=requester_pays,json=requesterPays,proto3" json:"requester_pays,omitempty"` +} + +func (x *Bucket_Billing) Reset() { + *x = Bucket_Billing{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Billing) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Billing) ProtoMessage() {} + +func (x *Bucket_Billing) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[59] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Billing.ProtoReflect.Descriptor instead. +func (*Bucket_Billing) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 0} +} + +func (x *Bucket_Billing) GetRequesterPays() bool { + if x != nil { + return x.RequesterPays + } + return false +} + +// Cross-Origin Response sharing (CORS) properties for a bucket. +// For more on Cloud Storage and CORS, see +// https://cloud.google.com/storage/docs/cross-origin. +// For more on CORS in general, see https://tools.ietf.org/html/rfc6454. +type Bucket_Cors struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of Origins eligible to receive CORS response headers. See + // [https://tools.ietf.org/html/rfc6454][RFC 6454] for more on origins. + // Note: "*" is permitted in the list of origins, and means "any Origin". + Origin []string `protobuf:"bytes,1,rep,name=origin,proto3" json:"origin,omitempty"` + // The list of HTTP methods on which to include CORS response headers, + // (`GET`, `OPTIONS`, `POST`, etc) Note: "*" is permitted in the list of + // methods, and means "any method". + Method []string `protobuf:"bytes,2,rep,name=method,proto3" json:"method,omitempty"` + // The list of HTTP headers other than the + // [https://www.w3.org/TR/cors/#simple-response-header][simple response + // headers] to give permission for the user-agent to share across domains. + ResponseHeader []string `protobuf:"bytes,3,rep,name=response_header,json=responseHeader,proto3" json:"response_header,omitempty"` + // The value, in seconds, to return in the + // [https://www.w3.org/TR/cors/#access-control-max-age-response-header][Access-Control-Max-Age + // header] used in preflight responses. + MaxAgeSeconds int32 `protobuf:"varint,4,opt,name=max_age_seconds,json=maxAgeSeconds,proto3" json:"max_age_seconds,omitempty"` +} + +func (x *Bucket_Cors) Reset() { + *x = Bucket_Cors{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[60] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Cors) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Cors) ProtoMessage() {} + +func (x *Bucket_Cors) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[60] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Cors.ProtoReflect.Descriptor instead. +func (*Bucket_Cors) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 1} +} + +func (x *Bucket_Cors) GetOrigin() []string { + if x != nil { + return x.Origin + } + return nil +} + +func (x *Bucket_Cors) GetMethod() []string { + if x != nil { + return x.Method + } + return nil +} + +func (x *Bucket_Cors) GetResponseHeader() []string { + if x != nil { + return x.ResponseHeader + } + return nil +} + +func (x *Bucket_Cors) GetMaxAgeSeconds() int32 { + if x != nil { + return x.MaxAgeSeconds + } + return 0 +} + +// Encryption properties of a bucket. +type Bucket_Encryption struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the Cloud KMS key that will be used to encrypt objects + // inserted into this bucket, if no encryption method is specified. + DefaultKmsKey string `protobuf:"bytes,1,opt,name=default_kms_key,json=defaultKmsKey,proto3" json:"default_kms_key,omitempty"` +} + +func (x *Bucket_Encryption) Reset() { + *x = Bucket_Encryption{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[61] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Encryption) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Encryption) ProtoMessage() {} + +func (x *Bucket_Encryption) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[61] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Encryption.ProtoReflect.Descriptor instead. +func (*Bucket_Encryption) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 2} +} + +func (x *Bucket_Encryption) GetDefaultKmsKey() string { + if x != nil { + return x.DefaultKmsKey + } + return "" +} + +// Bucket restriction options. +type Bucket_IamConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Bucket restriction options currently enforced on the bucket. + UniformBucketLevelAccess *Bucket_IamConfig_UniformBucketLevelAccess `protobuf:"bytes,1,opt,name=uniform_bucket_level_access,json=uniformBucketLevelAccess,proto3" json:"uniform_bucket_level_access,omitempty"` + // Whether IAM will enforce public access prevention. Valid values are + // "enforced" or "inherited". + PublicAccessPrevention string `protobuf:"bytes,3,opt,name=public_access_prevention,json=publicAccessPrevention,proto3" json:"public_access_prevention,omitempty"` +} + +func (x *Bucket_IamConfig) Reset() { + *x = Bucket_IamConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[62] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_IamConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_IamConfig) ProtoMessage() {} + +func (x *Bucket_IamConfig) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[62] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_IamConfig.ProtoReflect.Descriptor instead. +func (*Bucket_IamConfig) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 3} +} + +func (x *Bucket_IamConfig) GetUniformBucketLevelAccess() *Bucket_IamConfig_UniformBucketLevelAccess { + if x != nil { + return x.UniformBucketLevelAccess + } + return nil +} + +func (x *Bucket_IamConfig) GetPublicAccessPrevention() string { + if x != nil { + return x.PublicAccessPrevention + } + return "" +} + +// Lifecycle properties of a bucket. +// For more information, see https://cloud.google.com/storage/docs/lifecycle. +type Bucket_Lifecycle struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A lifecycle management rule, which is made of an action to take and the + // condition(s) under which the action will be taken. + Rule []*Bucket_Lifecycle_Rule `protobuf:"bytes,1,rep,name=rule,proto3" json:"rule,omitempty"` +} + +func (x *Bucket_Lifecycle) Reset() { + *x = Bucket_Lifecycle{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[63] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Lifecycle) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Lifecycle) ProtoMessage() {} + +func (x *Bucket_Lifecycle) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[63] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Lifecycle.ProtoReflect.Descriptor instead. +func (*Bucket_Lifecycle) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4} +} + +func (x *Bucket_Lifecycle) GetRule() []*Bucket_Lifecycle_Rule { + if x != nil { + return x.Rule + } + return nil +} + +// Logging-related properties of a bucket. +type Bucket_Logging struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The destination bucket where the current bucket's logs should be placed, + // using path format (like `projects/123456/buckets/foo`). + LogBucket string `protobuf:"bytes,1,opt,name=log_bucket,json=logBucket,proto3" json:"log_bucket,omitempty"` + // A prefix for log object names. + LogObjectPrefix string `protobuf:"bytes,2,opt,name=log_object_prefix,json=logObjectPrefix,proto3" json:"log_object_prefix,omitempty"` +} + +func (x *Bucket_Logging) Reset() { + *x = Bucket_Logging{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[64] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Logging) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Logging) ProtoMessage() {} + +func (x *Bucket_Logging) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[64] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Logging.ProtoReflect.Descriptor instead. +func (*Bucket_Logging) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 5} +} + +func (x *Bucket_Logging) GetLogBucket() string { + if x != nil { + return x.LogBucket + } + return "" +} + +func (x *Bucket_Logging) GetLogObjectPrefix() string { + if x != nil { + return x.LogObjectPrefix + } + return "" +} + +// Retention policy properties of a bucket. +type Bucket_RetentionPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Server-determined value that indicates the time from which policy was + // enforced and effective. + EffectiveTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=effective_time,json=effectiveTime,proto3" json:"effective_time,omitempty"` + // Once locked, an object retention policy cannot be modified. + IsLocked bool `protobuf:"varint,2,opt,name=is_locked,json=isLocked,proto3" json:"is_locked,omitempty"` + // The duration that objects need to be retained. Retention duration must be + // greater than zero and less than 100 years. Note that enforcement of + // retention periods less than a day is not guaranteed. Such periods should + // only be used for testing purposes. Any `nanos` value specified will be + // rounded down to the nearest second. + RetentionDuration *durationpb.Duration `protobuf:"bytes,4,opt,name=retention_duration,json=retentionDuration,proto3" json:"retention_duration,omitempty"` +} + +func (x *Bucket_RetentionPolicy) Reset() { + *x = Bucket_RetentionPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[65] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_RetentionPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_RetentionPolicy) ProtoMessage() {} + +func (x *Bucket_RetentionPolicy) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[65] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_RetentionPolicy.ProtoReflect.Descriptor instead. +func (*Bucket_RetentionPolicy) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 6} +} + +func (x *Bucket_RetentionPolicy) GetEffectiveTime() *timestamppb.Timestamp { + if x != nil { + return x.EffectiveTime + } + return nil +} + +func (x *Bucket_RetentionPolicy) GetIsLocked() bool { + if x != nil { + return x.IsLocked + } + return false +} + +func (x *Bucket_RetentionPolicy) GetRetentionDuration() *durationpb.Duration { + if x != nil { + return x.RetentionDuration + } + return nil +} + +// Soft delete policy properties of a bucket. +type Bucket_SoftDeletePolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The period of time that soft-deleted objects in the bucket must be + // retained and cannot be permanently deleted. The duration must be greater + // than or equal to 7 days and less than 1 year. + RetentionDuration *durationpb.Duration `protobuf:"bytes,1,opt,name=retention_duration,json=retentionDuration,proto3,oneof" json:"retention_duration,omitempty"` + // Time from which the policy was effective. This is service-provided. + EffectiveTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=effective_time,json=effectiveTime,proto3,oneof" json:"effective_time,omitempty"` +} + +func (x *Bucket_SoftDeletePolicy) Reset() { + *x = Bucket_SoftDeletePolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[66] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_SoftDeletePolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_SoftDeletePolicy) ProtoMessage() {} + +func (x *Bucket_SoftDeletePolicy) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[66] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_SoftDeletePolicy.ProtoReflect.Descriptor instead. +func (*Bucket_SoftDeletePolicy) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 7} +} + +func (x *Bucket_SoftDeletePolicy) GetRetentionDuration() *durationpb.Duration { + if x != nil { + return x.RetentionDuration + } + return nil +} + +func (x *Bucket_SoftDeletePolicy) GetEffectiveTime() *timestamppb.Timestamp { + if x != nil { + return x.EffectiveTime + } + return nil +} + +// Properties of a bucket related to versioning. +// For more on Cloud Storage versioning, see +// https://cloud.google.com/storage/docs/object-versioning. +type Bucket_Versioning struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // While set to true, versioning is fully enabled for this bucket. + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` +} + +func (x *Bucket_Versioning) Reset() { + *x = Bucket_Versioning{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[67] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Versioning) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Versioning) ProtoMessage() {} + +func (x *Bucket_Versioning) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[67] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Versioning.ProtoReflect.Descriptor instead. +func (*Bucket_Versioning) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 8} +} + +func (x *Bucket_Versioning) GetEnabled() bool { + if x != nil { + return x.Enabled + } + return false +} + +// Properties of a bucket related to accessing the contents as a static +// website. For more on hosting a static website via Cloud Storage, see +// https://cloud.google.com/storage/docs/hosting-static-website. +type Bucket_Website struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If the requested object path is missing, the service will ensure the path + // has a trailing '/', append this suffix, and attempt to retrieve the + // resulting object. This allows the creation of `index.html` + // objects to represent directory pages. + MainPageSuffix string `protobuf:"bytes,1,opt,name=main_page_suffix,json=mainPageSuffix,proto3" json:"main_page_suffix,omitempty"` + // If the requested object path is missing, and any + // `mainPageSuffix` object is missing, if applicable, the service + // will return the named object from this bucket as the content for a + // [https://tools.ietf.org/html/rfc7231#section-6.5.4][404 Not Found] + // result. + NotFoundPage string `protobuf:"bytes,2,opt,name=not_found_page,json=notFoundPage,proto3" json:"not_found_page,omitempty"` +} + +func (x *Bucket_Website) Reset() { + *x = Bucket_Website{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[68] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Website) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Website) ProtoMessage() {} + +func (x *Bucket_Website) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[68] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Website.ProtoReflect.Descriptor instead. +func (*Bucket_Website) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 9} +} + +func (x *Bucket_Website) GetMainPageSuffix() string { + if x != nil { + return x.MainPageSuffix + } + return "" +} + +func (x *Bucket_Website) GetNotFoundPage() string { + if x != nil { + return x.NotFoundPage + } + return "" +} + +// Configuration for Custom Dual Regions. It should specify precisely two +// eligible regions within the same Multiregion. More information on regions +// may be found [https://cloud.google.com/storage/docs/locations][here]. +type Bucket_CustomPlacementConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // List of locations to use for data placement. + DataLocations []string `protobuf:"bytes,1,rep,name=data_locations,json=dataLocations,proto3" json:"data_locations,omitempty"` +} + +func (x *Bucket_CustomPlacementConfig) Reset() { + *x = Bucket_CustomPlacementConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[69] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_CustomPlacementConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_CustomPlacementConfig) ProtoMessage() {} + +func (x *Bucket_CustomPlacementConfig) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[69] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_CustomPlacementConfig.ProtoReflect.Descriptor instead. +func (*Bucket_CustomPlacementConfig) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 10} +} + +func (x *Bucket_CustomPlacementConfig) GetDataLocations() []string { + if x != nil { + return x.DataLocations + } + return nil +} + +// Configuration for a bucket's Autoclass feature. +type Bucket_Autoclass struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Enables Autoclass. + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + // Output only. Latest instant at which the `enabled` field was set to true + // after being disabled/unconfigured or set to false after being enabled. If + // Autoclass is enabled when the bucket is created, the toggle_time is set + // to the bucket creation time. + ToggleTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=toggle_time,json=toggleTime,proto3" json:"toggle_time,omitempty"` + // An object in an Autoclass bucket will eventually cool down to the + // terminal storage class if there is no access to the object. + // The only valid values are NEARLINE and ARCHIVE. + TerminalStorageClass *string `protobuf:"bytes,3,opt,name=terminal_storage_class,json=terminalStorageClass,proto3,oneof" json:"terminal_storage_class,omitempty"` + // Output only. Latest instant at which the autoclass terminal storage class + // was updated. + TerminalStorageClassUpdateTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=terminal_storage_class_update_time,json=terminalStorageClassUpdateTime,proto3,oneof" json:"terminal_storage_class_update_time,omitempty"` +} + +func (x *Bucket_Autoclass) Reset() { + *x = Bucket_Autoclass{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[70] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Autoclass) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Autoclass) ProtoMessage() {} + +func (x *Bucket_Autoclass) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[70] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Autoclass.ProtoReflect.Descriptor instead. +func (*Bucket_Autoclass) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 11} +} + +func (x *Bucket_Autoclass) GetEnabled() bool { + if x != nil { + return x.Enabled + } + return false +} + +func (x *Bucket_Autoclass) GetToggleTime() *timestamppb.Timestamp { + if x != nil { + return x.ToggleTime + } + return nil +} + +func (x *Bucket_Autoclass) GetTerminalStorageClass() string { + if x != nil && x.TerminalStorageClass != nil { + return *x.TerminalStorageClass + } + return "" +} + +func (x *Bucket_Autoclass) GetTerminalStorageClassUpdateTime() *timestamppb.Timestamp { + if x != nil { + return x.TerminalStorageClassUpdateTime + } + return nil +} + +// Configuration for a bucket's hierarchical namespace feature. +type Bucket_HierarchicalNamespace struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. Enables the hierarchical namespace feature. + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` +} + +func (x *Bucket_HierarchicalNamespace) Reset() { + *x = Bucket_HierarchicalNamespace{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[71] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_HierarchicalNamespace) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_HierarchicalNamespace) ProtoMessage() {} + +func (x *Bucket_HierarchicalNamespace) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[71] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_HierarchicalNamespace.ProtoReflect.Descriptor instead. +func (*Bucket_HierarchicalNamespace) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 12} +} + +func (x *Bucket_HierarchicalNamespace) GetEnabled() bool { + if x != nil { + return x.Enabled + } + return false +} + +// Settings for Uniform Bucket level access. +// See https://cloud.google.com/storage/docs/uniform-bucket-level-access. +type Bucket_IamConfig_UniformBucketLevelAccess struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If set, access checks only use bucket-level IAM policies or above. + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + // The deadline time for changing + // `iam_config.uniform_bucket_level_access.enabled` from `true` to + // `false`. Mutable until the specified deadline is reached, but not + // afterward. + LockTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=lock_time,json=lockTime,proto3" json:"lock_time,omitempty"` +} + +func (x *Bucket_IamConfig_UniformBucketLevelAccess) Reset() { + *x = Bucket_IamConfig_UniformBucketLevelAccess{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[73] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_IamConfig_UniformBucketLevelAccess) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_IamConfig_UniformBucketLevelAccess) ProtoMessage() {} + +func (x *Bucket_IamConfig_UniformBucketLevelAccess) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[73] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_IamConfig_UniformBucketLevelAccess.ProtoReflect.Descriptor instead. +func (*Bucket_IamConfig_UniformBucketLevelAccess) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 3, 0} +} + +func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetEnabled() bool { + if x != nil { + return x.Enabled + } + return false +} + +func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetLockTime() *timestamppb.Timestamp { + if x != nil { + return x.LockTime + } + return nil +} + +// A lifecycle Rule, combining an action to take on an object and a +// condition which will trigger that action. +type Bucket_Lifecycle_Rule struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The action to take. + Action *Bucket_Lifecycle_Rule_Action `protobuf:"bytes,1,opt,name=action,proto3" json:"action,omitempty"` + // The condition(s) under which the action will be taken. + Condition *Bucket_Lifecycle_Rule_Condition `protobuf:"bytes,2,opt,name=condition,proto3" json:"condition,omitempty"` +} + +func (x *Bucket_Lifecycle_Rule) Reset() { + *x = Bucket_Lifecycle_Rule{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[74] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Lifecycle_Rule) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Lifecycle_Rule) ProtoMessage() {} + +func (x *Bucket_Lifecycle_Rule) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[74] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Lifecycle_Rule.ProtoReflect.Descriptor instead. +func (*Bucket_Lifecycle_Rule) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0} +} + +func (x *Bucket_Lifecycle_Rule) GetAction() *Bucket_Lifecycle_Rule_Action { + if x != nil { + return x.Action + } + return nil +} + +func (x *Bucket_Lifecycle_Rule) GetCondition() *Bucket_Lifecycle_Rule_Condition { + if x != nil { + return x.Condition + } + return nil +} + +// An action to take on an object. +type Bucket_Lifecycle_Rule_Action struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Type of the action. Currently, only `Delete`, `SetStorageClass`, and + // `AbortIncompleteMultipartUpload` are supported. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Target storage class. Required iff the type of the action is + // SetStorageClass. + StorageClass string `protobuf:"bytes,2,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"` +} + +func (x *Bucket_Lifecycle_Rule_Action) Reset() { + *x = Bucket_Lifecycle_Rule_Action{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[75] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Lifecycle_Rule_Action) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Lifecycle_Rule_Action) ProtoMessage() {} + +func (x *Bucket_Lifecycle_Rule_Action) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[75] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Lifecycle_Rule_Action.ProtoReflect.Descriptor instead. +func (*Bucket_Lifecycle_Rule_Action) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0, 0} +} + +func (x *Bucket_Lifecycle_Rule_Action) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Bucket_Lifecycle_Rule_Action) GetStorageClass() string { + if x != nil { + return x.StorageClass + } + return "" +} + +// A condition of an object which triggers some action. +type Bucket_Lifecycle_Rule_Condition struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Age of an object (in days). This condition is satisfied when an + // object reaches the specified age. + // A value of 0 indicates that all objects immediately match this + // condition. + AgeDays *int32 `protobuf:"varint,1,opt,name=age_days,json=ageDays,proto3,oneof" json:"age_days,omitempty"` + // This condition is satisfied when an object is created before midnight + // of the specified date in UTC. + CreatedBefore *date.Date `protobuf:"bytes,2,opt,name=created_before,json=createdBefore,proto3" json:"created_before,omitempty"` + // Relevant only for versioned objects. If the value is + // `true`, this condition matches live objects; if the value + // is `false`, it matches archived objects. + IsLive *bool `protobuf:"varint,3,opt,name=is_live,json=isLive,proto3,oneof" json:"is_live,omitempty"` + // Relevant only for versioned objects. If the value is N, this + // condition is satisfied when there are at least N versions (including + // the live version) newer than this version of the object. + NumNewerVersions *int32 `protobuf:"varint,4,opt,name=num_newer_versions,json=numNewerVersions,proto3,oneof" json:"num_newer_versions,omitempty"` + // Objects having any of the storage classes specified by this condition + // will be matched. Values include `MULTI_REGIONAL`, `REGIONAL`, + // `NEARLINE`, `COLDLINE`, `STANDARD`, and + // `DURABLE_REDUCED_AVAILABILITY`. + MatchesStorageClass []string `protobuf:"bytes,5,rep,name=matches_storage_class,json=matchesStorageClass,proto3" json:"matches_storage_class,omitempty"` + // Number of days that have elapsed since the custom timestamp set on an + // object. + // The value of the field must be a nonnegative integer. + DaysSinceCustomTime *int32 `protobuf:"varint,7,opt,name=days_since_custom_time,json=daysSinceCustomTime,proto3,oneof" json:"days_since_custom_time,omitempty"` + // An object matches this condition if the custom timestamp set on the + // object is before the specified date in UTC. + CustomTimeBefore *date.Date `protobuf:"bytes,8,opt,name=custom_time_before,json=customTimeBefore,proto3" json:"custom_time_before,omitempty"` + // This condition is relevant only for versioned objects. An object + // version satisfies this condition only if these many days have been + // passed since it became noncurrent. The value of the field must be a + // nonnegative integer. If it's zero, the object version will become + // eligible for Lifecycle action as soon as it becomes noncurrent. + DaysSinceNoncurrentTime *int32 `protobuf:"varint,9,opt,name=days_since_noncurrent_time,json=daysSinceNoncurrentTime,proto3,oneof" json:"days_since_noncurrent_time,omitempty"` + // This condition is relevant only for versioned objects. An object + // version satisfies this condition only if it became noncurrent before + // the specified date in UTC. + NoncurrentTimeBefore *date.Date `protobuf:"bytes,10,opt,name=noncurrent_time_before,json=noncurrentTimeBefore,proto3" json:"noncurrent_time_before,omitempty"` + // List of object name prefixes. If any prefix exactly matches the + // beginning of the object name, the condition evaluates to true. + MatchesPrefix []string `protobuf:"bytes,11,rep,name=matches_prefix,json=matchesPrefix,proto3" json:"matches_prefix,omitempty"` + // List of object name suffixes. If any suffix exactly matches the + // end of the object name, the condition evaluates to true. + MatchesSuffix []string `protobuf:"bytes,12,rep,name=matches_suffix,json=matchesSuffix,proto3" json:"matches_suffix,omitempty"` +} + +func (x *Bucket_Lifecycle_Rule_Condition) Reset() { + *x = Bucket_Lifecycle_Rule_Condition{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[76] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_Lifecycle_Rule_Condition) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_Lifecycle_Rule_Condition) ProtoMessage() {} + +func (x *Bucket_Lifecycle_Rule_Condition) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[76] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_Lifecycle_Rule_Condition.ProtoReflect.Descriptor instead. +func (*Bucket_Lifecycle_Rule_Condition) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0, 1} +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetAgeDays() int32 { + if x != nil && x.AgeDays != nil { + return *x.AgeDays + } + return 0 +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetCreatedBefore() *date.Date { + if x != nil { + return x.CreatedBefore + } + return nil +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetIsLive() bool { + if x != nil && x.IsLive != nil { + return *x.IsLive + } + return false +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetNumNewerVersions() int32 { + if x != nil && x.NumNewerVersions != nil { + return *x.NumNewerVersions + } + return 0 +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetMatchesStorageClass() []string { + if x != nil { + return x.MatchesStorageClass + } + return nil +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetDaysSinceCustomTime() int32 { + if x != nil && x.DaysSinceCustomTime != nil { + return *x.DaysSinceCustomTime + } + return 0 +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetCustomTimeBefore() *date.Date { + if x != nil { + return x.CustomTimeBefore + } + return nil +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetDaysSinceNoncurrentTime() int32 { + if x != nil && x.DaysSinceNoncurrentTime != nil { + return *x.DaysSinceNoncurrentTime + } + return 0 +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetNoncurrentTimeBefore() *date.Date { + if x != nil { + return x.NoncurrentTimeBefore + } + return nil +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetMatchesPrefix() []string { + if x != nil { + return x.MatchesPrefix + } + return nil +} + +func (x *Bucket_Lifecycle_Rule_Condition) GetMatchesSuffix() []string { + if x != nil { + return x.MatchesSuffix + } + return nil +} + +var File_google_storage_v2_storage_proto protoreflect.FileDescriptor + +var file_google_storage_v2_storage_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x11, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, + 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, + 0x76, 0x31, 0x2f, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, + 0x76, 0x31, 0x2f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x64, 0x61, 0x74, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x02, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x39, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, + 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, + 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x42, 0x1a, 0x0a, 0x18, 0x5f, + 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, + 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x02, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, + 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, + 0x22, 0x93, 0x02, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, + 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e, + 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, + 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, + 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, + 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x22, 0xf3, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, + 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, + 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, + 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0c, + 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x72, 0x0a, 0x13, + 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, + 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, + 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x22, 0x9e, 0x01, 0x0a, 0x20, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x22, 0xb6, 0x03, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, + 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, + 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, + 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, + 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x1a, + 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, + 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x68, 0x0a, 0x1f, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x2b, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x65, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2b, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xc9, 0x01, 0x0a, 0x1f, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x49, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x31, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2b, 0x12, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x5b, 0x0a, 0x13, 0x6e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x12, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xa7, 0x01, 0x0a, 0x1e, 0x4c, 0x69, 0x73, 0x74, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, 0xfa, + 0x41, 0x2b, 0x12, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, + 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x22, 0xa3, 0x01, 0x0a, 0x1f, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, + 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, + 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xc3, 0x07, 0x0a, 0x14, 0x43, 0x6f, 0x6d, 0x70, + 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, + 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, + 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, + 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, + 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, + 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, + 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, + 0x75, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x1a, 0xa8, + 0x02, 0x0a, 0x0c, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7b, 0x0a, 0x14, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, + 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x13, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x62, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, + 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x13, + 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, + 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xe2, 0x04, + 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, + 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, + 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, + 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x22, 0xa9, 0x05, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, + 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, + 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, + 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, + 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x2b, + 0x0a, 0x0f, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63, + 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x48, 0x04, 0x52, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x63, 0x6c, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, + 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, + 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, + 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, + 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x63, + 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x6c, 0x22, 0x3f, + 0x0a, 0x1b, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, + 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, + 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, + 0x1e, 0x0a, 0x1c, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, + 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0xec, 0x05, 0x0a, 0x11, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x66, 0x73, + 0x65, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, + 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, + 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, + 0x6b, 0x48, 0x04, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, + 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xe4, + 0x05, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, + 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, + 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x15, 0x69, 0x66, 0x4d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x18, 0x69, 0x66, + 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, + 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x05, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, + 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x73, 0x6f, 0x66, 0x74, 0x5f, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, + 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x10, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x44, 0x0a, 0x0d, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, + 0x67, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x12, 0x35, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8c, 0x04, 0x0a, 0x0f, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x3a, 0x0a, 0x08, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, + 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, + 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, + 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, + 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, + 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, + 0x12, 0x24, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, + 0x69, 0x7a, 0x65, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, + 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, + 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x22, 0xf8, 0x03, 0x0a, 0x12, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, + 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, + 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, + 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, + 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, + 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, + 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, + 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, + 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x66, 0x69, + 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69, 0x72, 0x73, + 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, + 0x61, 0x22, 0x87, 0x01, 0x0a, 0x13, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, + 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, + 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, + 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, + 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xb5, 0x04, 0x0a, 0x16, + 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, + 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, + 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, + 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, + 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, + 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69, + 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, + 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, + 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69, + 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64, + 0x61, 0x74, 0x61, 0x22, 0x8b, 0x01, 0x0a, 0x17, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, + 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x22, 0xe3, 0x04, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, + 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, + 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, + 0x72, 0x12, 0x3c, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x72, 0x61, + 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72, + 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, + 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, + 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, + 0x01, 0x12, 0x34, 0x0a, 0x13, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, + 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x52, 0x12, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, + 0x69, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x11, 0x6c, 0x65, 0x78, 0x69, 0x63, + 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, + 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x12, 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66, + 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x64, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x66, 0x6f, 0x6c, + 0x64, 0x65, 0x72, 0x73, 0x5f, 0x61, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x18, 0x69, 0x6e, 0x63, + 0x6c, 0x75, 0x64, 0x65, 0x46, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x41, 0x73, 0x50, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x67, + 0x6c, 0x6f, 0x62, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x47, 0x6c, 0x6f, 0x62, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, + 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaa, 0x01, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, + 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, + 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x22, 0xb5, 0x0e, 0x0a, 0x14, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x10, + 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0f, + 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x57, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, + 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x56, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, + 0x1b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x11, 0x64, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, + 0x12, 0x3b, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, + 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0c, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, + 0x61, 0x63, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, + 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, + 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x40, 0x0a, 0x1a, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x17, 0x69, 0x66, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x05, 0x52, 0x1a, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, + 0x12, 0x48, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x48, 0x06, 0x52, 0x1b, 0x69, 0x66, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4f, 0x0a, 0x22, 0x69, 0x66, + 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x48, 0x07, 0x52, 0x1e, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x1c, 0x6d, + 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, + 0x65, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, + 0x74, 0x74, 0x65, 0x6e, 0x50, 0x65, 0x72, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x47, 0x0a, 0x20, 0x63, + 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, + 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, + 0x69, 0x74, 0x68, 0x6d, 0x12, 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, + 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c, + 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x27, + 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, + 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x63, + 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x73, 0x75, 0x6d, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, + 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x1d, 0x0a, 0x1b, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, + 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x01, 0x0a, 0x0f, + 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, + 0x74, 0x65, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x35, 0x0a, + 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x22, 0xaf, 0x02, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, + 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, + 0x65, 0x63, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x22, 0x3a, 0x0a, 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, + 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, + 0x49, 0x64, 0x22, 0x87, 0x05, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, + 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, + 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, + 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x6d, 0x0a, + 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, + 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, + 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x69, 0x0a, 0x18, + 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, + 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x9e, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x81, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, + 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x73, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x87, 0x01, 0x0a, + 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, + 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x84, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x48, 0x6d, + 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, + 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, + 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x80, 0x02, + 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, + 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, + 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x68, 0x6f, 0x77, 0x5f, 0x64, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0f, 0x73, 0x68, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, + 0x22, 0x7f, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x09, 0x68, 0x6d, 0x61, 0x63, + 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, + 0x08, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, + 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x22, 0x97, 0x01, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, + 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x68, 0x6d, + 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x3b, + 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, + 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xbf, 0x01, 0x0a, 0x19, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, + 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x30, 0x0a, 0x14, + 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x65, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3d, + 0x0a, 0x1b, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, + 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x18, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, + 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xca, 0x05, + 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x74, 0x73, 0x22, 0xb5, 0x05, 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, + 0x12, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x14, 0x4d, 0x41, 0x58, 0x5f, 0x52, 0x45, 0x41, + 0x44, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, + 0x80, 0x01, 0x12, 0x1c, 0x0a, 0x15, 0x4d, 0x41, 0x58, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, + 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, + 0x12, 0x19, 0x0a, 0x12, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x53, + 0x49, 0x5a, 0x45, 0x5f, 0x4d, 0x42, 0x10, 0x80, 0x80, 0xc0, 0x02, 0x12, 0x29, 0x0a, 0x24, 0x4d, + 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, + 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x42, 0x59, + 0x54, 0x45, 0x53, 0x10, 0x80, 0x08, 0x12, 0x2a, 0x0a, 0x25, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, + 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, + 0x45, 0x4c, 0x44, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, + 0x80, 0x20, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, + 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, + 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x40, 0x12, 0x2a, 0x0a, + 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x4d, 0x45, 0x54, 0x41, + 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, + 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0xa0, 0x01, 0x12, 0x27, 0x0a, 0x23, 0x4d, 0x41, 0x58, + 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, + 0x4e, 0x46, 0x49, 0x47, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, + 0x10, 0x64, 0x12, 0x22, 0x0a, 0x1e, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x49, 0x46, 0x45, 0x43, 0x59, + 0x43, 0x4c, 0x45, 0x5f, 0x52, 0x55, 0x4c, 0x45, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, + 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x26, 0x0a, 0x22, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, + 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, + 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x53, 0x10, 0x05, 0x12, 0x31, + 0x0a, 0x2c, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, + 0x55, 0x54, 0x45, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, + 0x02, 0x12, 0x33, 0x0a, 0x2e, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, + 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, + 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, + 0x47, 0x54, 0x48, 0x10, 0x80, 0x08, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, + 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x49, 0x45, 0x53, 0x5f, 0x43, 0x4f, 0x55, + 0x4e, 0x54, 0x10, 0x40, 0x12, 0x1f, 0x0a, 0x1b, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, + 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, + 0x47, 0x54, 0x48, 0x10, 0x3f, 0x12, 0x1f, 0x0a, 0x1a, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, + 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, + 0x54, 0x45, 0x53, 0x10, 0x80, 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, + 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x49, 0x44, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x4c, + 0x45, 0x54, 0x45, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53, 0x5f, 0x52, 0x45, 0x51, 0x55, + 0x45, 0x53, 0x54, 0x10, 0xe8, 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x50, 0x4c, 0x49, 0x54, 0x5f, + 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, + 0x44, 0x41, 0x59, 0x53, 0x10, 0x0e, 0x1a, 0x02, 0x10, 0x01, 0x22, 0xf5, 0x23, 0x0a, 0x06, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, + 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, + 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x65, 0x74, 0x61, 0x67, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, + 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x1f, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, + 0x12, 0x10, 0x0a, 0x03, 0x72, 0x70, 0x6f, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, + 0x70, 0x6f, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x54, 0x0a, 0x12, + 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, + 0x63, 0x6c, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x52, 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, + 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x52, 0x09, 0x6c, 0x69, 0x66, 0x65, + 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, + 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x2e, 0x43, 0x6f, 0x72, 0x73, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x37, 0x0a, + 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, + 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x15, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, + 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, + 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2e, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x52, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, + 0x74, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0a, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x67, + 0x69, 0x6e, 0x67, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6c, 0x6f, + 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x13, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x3b, 0x0a, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x42, 0x69, 0x6c, + 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, + 0x10, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x2e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x42, 0x0a, 0x0a, 0x69, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x69, 0x61, + 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69, 0x73, + 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, + 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x50, 0x7a, 0x73, 0x12, 0x67, 0x0a, 0x17, + 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, + 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, + 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, + 0x73, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x09, 0x61, + 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x6b, 0x0a, 0x16, 0x68, 0x69, 0x65, 0x72, + 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x20, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x2e, 0x48, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x15, + 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x5d, 0x0a, 0x12, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x1f, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x53, 0x6f, 0x66, + 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, + 0x41, 0x01, 0x52, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x30, 0x0a, 0x07, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, + 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x65, 0x72, 0x50, 0x61, 0x79, 0x73, 0x1a, 0x87, 0x01, 0x0a, 0x04, 0x43, 0x6f, 0x72, 0x73, 0x12, + 0x16, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, + 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, + 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, + 0x1a, 0x5c, 0x0a, 0x0a, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, + 0x0a, 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, + 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x1a, 0xb1, + 0x02, 0x0a, 0x09, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1b, + 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6c, + 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, + 0x18, 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, + 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x70, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x18, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, + 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63, + 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, + 0x6d, 0x65, 0x1a, 0xdb, 0x07, 0x0a, 0x09, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, + 0x12, 0x3c, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, + 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x1a, 0x8f, + 0x07, 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, + 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x50, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, + 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x43, 0x6f, + 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x1a, 0x41, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x1a, 0xa8, 0x05, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x08, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x07, 0x61, 0x67, 0x65, 0x44, 0x61, 0x79, 0x73, + 0x88, 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, + 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x0d, + 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1c, 0x0a, + 0x07, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, + 0x52, 0x06, 0x69, 0x73, 0x4c, 0x69, 0x76, 0x65, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x12, 0x6e, + 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x48, 0x02, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x4e, 0x65, + 0x77, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x32, + 0x0a, 0x15, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, + 0x73, 0x73, 0x12, 0x38, 0x0a, 0x16, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, + 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x05, 0x48, 0x03, 0x52, 0x13, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x43, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x12, + 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, + 0x72, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x10, 0x63, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x40, 0x0a, + 0x1a, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x05, 0x48, 0x04, 0x52, 0x17, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x6f, + 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, + 0x47, 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, + 0x74, 0x65, 0x52, 0x14, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, + 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, + 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, + 0x78, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, + 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x64, + 0x61, 0x79, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x42, + 0x15, 0x0a, 0x13, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, + 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, + 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x1a, 0x54, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, + 0x6f, 0x67, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x6c, 0x6f, 0x67, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x6f, + 0x67, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x6f, 0x67, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, 0xbb, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x65, 0x66, + 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, + 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, + 0x09, 0x69, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x08, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x48, 0x0a, 0x12, 0x72, 0x65, + 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xd3, 0x01, 0x0a, 0x10, 0x53, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4d, 0x0a, 0x12, 0x72, 0x65, 0x74, + 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x48, 0x00, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x46, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x01, 0x52, 0x0d, + 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, + 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x65, 0x66, 0x66, 0x65, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x26, 0x0a, 0x0a, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x1a, 0x59, 0x0a, 0x07, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a, + 0x10, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, + 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x61, 0x69, 0x6e, 0x50, 0x61, 0x67, + 0x65, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x66, + 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x67, 0x65, 0x1a, 0x3e, 0x0a, + 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, + 0x64, 0x61, 0x74, 0x61, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xd6, 0x02, + 0x0a, 0x09, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x0b, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x74, 0x6f, 0x67, + 0x67, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x16, 0x74, 0x65, 0x72, 0x6d, 0x69, + 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x74, 0x65, 0x72, 0x6d, 0x69, + 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x88, + 0x01, 0x01, 0x12, 0x70, 0x0a, 0x22, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x48, + 0x01, 0x52, 0x1e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, + 0x65, 0x88, 0x01, 0x01, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, + 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x42, + 0x25, 0x0a, 0x23, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x36, 0x0a, 0x15, 0x48, 0x69, 0x65, 0x72, 0x61, 0x72, + 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0x1d, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x39, + 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x47, 0xea, 0x41, 0x44, 0x0a, 0x1d, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x7d, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, + 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, + 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, + 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, + 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, + 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, + 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x5a, 0x0a, 0x0f, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, + 0x1f, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x42, 0x05, 0xe0, 0x41, 0x01, 0x08, 0x01, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x07, + 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, + 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x54, 0x0a, 0x0f, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x63, + 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, + 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x64, 0x35, 0x5f, + 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x64, 0x35, 0x48, + 0x61, 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xfe, + 0x02, 0x0a, 0x0f, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, + 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, + 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, + 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, + 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, + 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, + 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x22, + 0x85, 0x04, 0x0a, 0x12, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, + 0x61, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x1f, + 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, + 0x68, 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, + 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2a, 0x0a, 0x0e, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x1a, 0x43, 0x0a, 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x7d, 0xea, 0x41, 0x7a, 0x0a, 0x29, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x22, 0x71, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a, + 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, + 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, + 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x53, + 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xb6, 0x0d, 0x0a, 0x06, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, + 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, + 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, + 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, + 0x67, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, + 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, + 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x13, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, + 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x29, 0x0a, 0x10, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, + 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x64, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0b, + 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2c, + 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x63, 0x6f, + 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x09, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, + 0x75, 0x6d, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, + 0x75, 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, + 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x5a, 0x0a, 0x19, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x16, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, + 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x74, 0x65, 0x6d, 0x70, + 0x6f, 0x72, 0x61, 0x72, 0x79, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x4e, 0x0a, 0x15, 0x72, 0x65, 0x74, + 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x13, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x45, + 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, + 0x0a, 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, + 0x6c, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, + 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, + 0x65, 0x72, 0x12, 0x56, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x65, + 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, + 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x5f, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1c, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x48, 0x01, 0x52, 0x0e, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x4e, 0x0a, 0x10, 0x68, 0x61, 0x72, 0x64, 0x5f, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1d, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x48, 0x02, 0x52, 0x0e, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, + 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x73, 0x6f, + 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x42, 0x13, + 0x0a, 0x11, 0x5f, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, + 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, + 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, + 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, + 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, + 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, + 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, 0x01, + 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, + 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, + 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, + 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x48, + 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, 0x0a, + 0x0e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x22, 0x35, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6d, + 0x61, 0x69, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, + 0x3c, 0x0a, 0x05, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x5f, 0x0a, + 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, + 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, + 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0xaa, + 0x27, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x6f, + 0x0a, 0x09, 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x22, 0xda, 0x41, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, + 0xab, 0x01, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x22, 0x58, 0xda, 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x8a, + 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, + 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x1e, 0x0a, + 0x0e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x85, 0x01, + 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0xda, 0x41, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x22, 0x26, 0xda, 0x41, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x8a, 0xd3, + 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, + 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x75, 0x0a, 0x0c, 0x47, + 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, + 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x2a, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, + 0x2a, 0x7d, 0x12, 0x7c, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x31, 0xda, + 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, + 0x12, 0xd7, 0x01, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, + 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, + 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, + 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0xda, 0x41, + 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37, + 0xda, 0x41, 0x12, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1c, 0x12, 0x1a, 0x0a, 0x0b, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x9f, 0x01, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x22, 0x37, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, + 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0xa8, 0x01, 0x0a, 0x15, 0x47, 0x65, + 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x37, 0xda, 0x41, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, + 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0xb1, 0x01, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x3a, 0xda, 0x41, + 0x1a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x8a, 0xd3, 0xe4, 0x93, 0x02, + 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xa8, 0x01, 0x0a, 0x17, 0x4c, 0x69, 0x73, + 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x73, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, + 0x2a, 0x2a, 0x7d, 0x12, 0x7e, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x23, + 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, + 0x2a, 0x2a, 0x7d, 0x12, 0x98, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x8d, + 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x22, 0x38, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xba, + 0x01, 0x0a, 0x14, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, + 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, + 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, + 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, + 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, + 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x95, 0x01, 0x0a, 0x09, + 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, + 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, + 0x2a, 0x2a, 0x7d, 0x12, 0xa5, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, + 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, + 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x30, 0x01, 0x12, 0x8c, 0x01, 0x0a, 0x0c, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, + 0x39, 0xda, 0x41, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x60, 0x0a, 0x0b, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x6e, 0x0a, 0x0f, + 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, + 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x84, 0x01, 0x0a, + 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, + 0x2a, 0x2a, 0x7d, 0x12, 0x98, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x3a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, 0x64, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, + 0x01, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, + 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, + 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x0a, + 0x21, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, + 0x65, 0x63, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, + 0xae, 0x01, 0x0a, 0x10, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, + 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, + 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, + 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, + 0x12, 0x80, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, + 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, + 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x12, 0x95, 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, + 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0xda, 0x41, 0x1d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, + 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x77, 0x0a, 0x0d, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x25, 0xda, + 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x7d, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, + 0x65, 0x79, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, + 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x25, 0xda, 0x41, + 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x12, 0x7c, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, + 0x65, 0x79, 0x73, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, + 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x12, 0x9d, 0x01, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, + 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, + 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x22, 0x3f, 0xda, 0x41, 0x14, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x12, + 0x20, 0x0a, 0x10, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, + 0x7d, 0x1a, 0xa7, 0x02, 0xca, 0x41, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x8a, + 0x02, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, + 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, + 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, + 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, + 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, + 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, + 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0xe2, 0x01, 0xea, 0x41, + 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, + 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, + 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x42, 0x0c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x3e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_storage_v2_storage_proto_rawDescOnce sync.Once + file_google_storage_v2_storage_proto_rawDescData = file_google_storage_v2_storage_proto_rawDesc +) + +func file_google_storage_v2_storage_proto_rawDescGZIP() []byte { + file_google_storage_v2_storage_proto_rawDescOnce.Do(func() { + file_google_storage_v2_storage_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_storage_v2_storage_proto_rawDescData) + }) + return file_google_storage_v2_storage_proto_rawDescData +} + +var file_google_storage_v2_storage_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_storage_v2_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 79) +var file_google_storage_v2_storage_proto_goTypes = []any{ + (ServiceConstants_Values)(0), // 0: google.storage.v2.ServiceConstants.Values + (*DeleteBucketRequest)(nil), // 1: google.storage.v2.DeleteBucketRequest + (*GetBucketRequest)(nil), // 2: google.storage.v2.GetBucketRequest + (*CreateBucketRequest)(nil), // 3: google.storage.v2.CreateBucketRequest + (*ListBucketsRequest)(nil), // 4: google.storage.v2.ListBucketsRequest + (*ListBucketsResponse)(nil), // 5: google.storage.v2.ListBucketsResponse + (*LockBucketRetentionPolicyRequest)(nil), // 6: google.storage.v2.LockBucketRetentionPolicyRequest + (*UpdateBucketRequest)(nil), // 7: google.storage.v2.UpdateBucketRequest + (*DeleteNotificationConfigRequest)(nil), // 8: google.storage.v2.DeleteNotificationConfigRequest + (*GetNotificationConfigRequest)(nil), // 9: google.storage.v2.GetNotificationConfigRequest + (*CreateNotificationConfigRequest)(nil), // 10: google.storage.v2.CreateNotificationConfigRequest + (*ListNotificationConfigsRequest)(nil), // 11: google.storage.v2.ListNotificationConfigsRequest + (*ListNotificationConfigsResponse)(nil), // 12: google.storage.v2.ListNotificationConfigsResponse + (*ComposeObjectRequest)(nil), // 13: google.storage.v2.ComposeObjectRequest + (*DeleteObjectRequest)(nil), // 14: google.storage.v2.DeleteObjectRequest + (*RestoreObjectRequest)(nil), // 15: google.storage.v2.RestoreObjectRequest + (*CancelResumableWriteRequest)(nil), // 16: google.storage.v2.CancelResumableWriteRequest + (*CancelResumableWriteResponse)(nil), // 17: google.storage.v2.CancelResumableWriteResponse + (*ReadObjectRequest)(nil), // 18: google.storage.v2.ReadObjectRequest + (*GetObjectRequest)(nil), // 19: google.storage.v2.GetObjectRequest + (*ReadObjectResponse)(nil), // 20: google.storage.v2.ReadObjectResponse + (*WriteObjectSpec)(nil), // 21: google.storage.v2.WriteObjectSpec + (*WriteObjectRequest)(nil), // 22: google.storage.v2.WriteObjectRequest + (*WriteObjectResponse)(nil), // 23: google.storage.v2.WriteObjectResponse + (*BidiWriteObjectRequest)(nil), // 24: google.storage.v2.BidiWriteObjectRequest + (*BidiWriteObjectResponse)(nil), // 25: google.storage.v2.BidiWriteObjectResponse + (*ListObjectsRequest)(nil), // 26: google.storage.v2.ListObjectsRequest + (*QueryWriteStatusRequest)(nil), // 27: google.storage.v2.QueryWriteStatusRequest + (*QueryWriteStatusResponse)(nil), // 28: google.storage.v2.QueryWriteStatusResponse + (*RewriteObjectRequest)(nil), // 29: google.storage.v2.RewriteObjectRequest + (*RewriteResponse)(nil), // 30: google.storage.v2.RewriteResponse + (*StartResumableWriteRequest)(nil), // 31: google.storage.v2.StartResumableWriteRequest + (*StartResumableWriteResponse)(nil), // 32: google.storage.v2.StartResumableWriteResponse + (*UpdateObjectRequest)(nil), // 33: google.storage.v2.UpdateObjectRequest + (*GetServiceAccountRequest)(nil), // 34: google.storage.v2.GetServiceAccountRequest + (*CreateHmacKeyRequest)(nil), // 35: google.storage.v2.CreateHmacKeyRequest + (*CreateHmacKeyResponse)(nil), // 36: google.storage.v2.CreateHmacKeyResponse + (*DeleteHmacKeyRequest)(nil), // 37: google.storage.v2.DeleteHmacKeyRequest + (*GetHmacKeyRequest)(nil), // 38: google.storage.v2.GetHmacKeyRequest + (*ListHmacKeysRequest)(nil), // 39: google.storage.v2.ListHmacKeysRequest + (*ListHmacKeysResponse)(nil), // 40: google.storage.v2.ListHmacKeysResponse + (*UpdateHmacKeyRequest)(nil), // 41: google.storage.v2.UpdateHmacKeyRequest + (*CommonObjectRequestParams)(nil), // 42: google.storage.v2.CommonObjectRequestParams + (*ServiceConstants)(nil), // 43: google.storage.v2.ServiceConstants + (*Bucket)(nil), // 44: google.storage.v2.Bucket + (*BucketAccessControl)(nil), // 45: google.storage.v2.BucketAccessControl + (*ChecksummedData)(nil), // 46: google.storage.v2.ChecksummedData + (*ObjectChecksums)(nil), // 47: google.storage.v2.ObjectChecksums + (*HmacKeyMetadata)(nil), // 48: google.storage.v2.HmacKeyMetadata + (*NotificationConfig)(nil), // 49: google.storage.v2.NotificationConfig + (*CustomerEncryption)(nil), // 50: google.storage.v2.CustomerEncryption + (*Object)(nil), // 51: google.storage.v2.Object + (*ObjectAccessControl)(nil), // 52: google.storage.v2.ObjectAccessControl + (*ListObjectsResponse)(nil), // 53: google.storage.v2.ListObjectsResponse + (*ProjectTeam)(nil), // 54: google.storage.v2.ProjectTeam + (*ServiceAccount)(nil), // 55: google.storage.v2.ServiceAccount + (*Owner)(nil), // 56: google.storage.v2.Owner + (*ContentRange)(nil), // 57: google.storage.v2.ContentRange + (*ComposeObjectRequest_SourceObject)(nil), // 58: google.storage.v2.ComposeObjectRequest.SourceObject + (*ComposeObjectRequest_SourceObject_ObjectPreconditions)(nil), // 59: google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + (*Bucket_Billing)(nil), // 60: google.storage.v2.Bucket.Billing + (*Bucket_Cors)(nil), // 61: google.storage.v2.Bucket.Cors + (*Bucket_Encryption)(nil), // 62: google.storage.v2.Bucket.Encryption + (*Bucket_IamConfig)(nil), // 63: google.storage.v2.Bucket.IamConfig + (*Bucket_Lifecycle)(nil), // 64: google.storage.v2.Bucket.Lifecycle + (*Bucket_Logging)(nil), // 65: google.storage.v2.Bucket.Logging + (*Bucket_RetentionPolicy)(nil), // 66: google.storage.v2.Bucket.RetentionPolicy + (*Bucket_SoftDeletePolicy)(nil), // 67: google.storage.v2.Bucket.SoftDeletePolicy + (*Bucket_Versioning)(nil), // 68: google.storage.v2.Bucket.Versioning + (*Bucket_Website)(nil), // 69: google.storage.v2.Bucket.Website + (*Bucket_CustomPlacementConfig)(nil), // 70: google.storage.v2.Bucket.CustomPlacementConfig + (*Bucket_Autoclass)(nil), // 71: google.storage.v2.Bucket.Autoclass + (*Bucket_HierarchicalNamespace)(nil), // 72: google.storage.v2.Bucket.HierarchicalNamespace + nil, // 73: google.storage.v2.Bucket.LabelsEntry + (*Bucket_IamConfig_UniformBucketLevelAccess)(nil), // 74: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + (*Bucket_Lifecycle_Rule)(nil), // 75: google.storage.v2.Bucket.Lifecycle.Rule + (*Bucket_Lifecycle_Rule_Action)(nil), // 76: google.storage.v2.Bucket.Lifecycle.Rule.Action + (*Bucket_Lifecycle_Rule_Condition)(nil), // 77: google.storage.v2.Bucket.Lifecycle.Rule.Condition + nil, // 78: google.storage.v2.NotificationConfig.CustomAttributesEntry + nil, // 79: google.storage.v2.Object.MetadataEntry + (*fieldmaskpb.FieldMask)(nil), // 80: google.protobuf.FieldMask + (*timestamppb.Timestamp)(nil), // 81: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 82: google.protobuf.Duration + (*date.Date)(nil), // 83: google.type.Date + (*iampb.GetIamPolicyRequest)(nil), // 84: google.iam.v1.GetIamPolicyRequest + (*iampb.SetIamPolicyRequest)(nil), // 85: google.iam.v1.SetIamPolicyRequest + (*iampb.TestIamPermissionsRequest)(nil), // 86: google.iam.v1.TestIamPermissionsRequest + (*emptypb.Empty)(nil), // 87: google.protobuf.Empty + (*iampb.Policy)(nil), // 88: google.iam.v1.Policy + (*iampb.TestIamPermissionsResponse)(nil), // 89: google.iam.v1.TestIamPermissionsResponse +} +var file_google_storage_v2_storage_proto_depIdxs = []int32{ + 80, // 0: google.storage.v2.GetBucketRequest.read_mask:type_name -> google.protobuf.FieldMask + 44, // 1: google.storage.v2.CreateBucketRequest.bucket:type_name -> google.storage.v2.Bucket + 80, // 2: google.storage.v2.ListBucketsRequest.read_mask:type_name -> google.protobuf.FieldMask + 44, // 3: google.storage.v2.ListBucketsResponse.buckets:type_name -> google.storage.v2.Bucket + 44, // 4: google.storage.v2.UpdateBucketRequest.bucket:type_name -> google.storage.v2.Bucket + 80, // 5: google.storage.v2.UpdateBucketRequest.update_mask:type_name -> google.protobuf.FieldMask + 49, // 6: google.storage.v2.CreateNotificationConfigRequest.notification_config:type_name -> google.storage.v2.NotificationConfig + 49, // 7: google.storage.v2.ListNotificationConfigsResponse.notification_configs:type_name -> google.storage.v2.NotificationConfig + 51, // 8: google.storage.v2.ComposeObjectRequest.destination:type_name -> google.storage.v2.Object + 58, // 9: google.storage.v2.ComposeObjectRequest.source_objects:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject + 42, // 10: google.storage.v2.ComposeObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 47, // 11: google.storage.v2.ComposeObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 42, // 12: google.storage.v2.DeleteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 42, // 13: google.storage.v2.RestoreObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 42, // 14: google.storage.v2.ReadObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 80, // 15: google.storage.v2.ReadObjectRequest.read_mask:type_name -> google.protobuf.FieldMask + 42, // 16: google.storage.v2.GetObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 80, // 17: google.storage.v2.GetObjectRequest.read_mask:type_name -> google.protobuf.FieldMask + 46, // 18: google.storage.v2.ReadObjectResponse.checksummed_data:type_name -> google.storage.v2.ChecksummedData + 47, // 19: google.storage.v2.ReadObjectResponse.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 57, // 20: google.storage.v2.ReadObjectResponse.content_range:type_name -> google.storage.v2.ContentRange + 51, // 21: google.storage.v2.ReadObjectResponse.metadata:type_name -> google.storage.v2.Object + 51, // 22: google.storage.v2.WriteObjectSpec.resource:type_name -> google.storage.v2.Object + 21, // 23: google.storage.v2.WriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec + 46, // 24: google.storage.v2.WriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData + 47, // 25: google.storage.v2.WriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 42, // 26: google.storage.v2.WriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 51, // 27: google.storage.v2.WriteObjectResponse.resource:type_name -> google.storage.v2.Object + 21, // 28: google.storage.v2.BidiWriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec + 46, // 29: google.storage.v2.BidiWriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData + 47, // 30: google.storage.v2.BidiWriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 42, // 31: google.storage.v2.BidiWriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 51, // 32: google.storage.v2.BidiWriteObjectResponse.resource:type_name -> google.storage.v2.Object + 80, // 33: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask + 42, // 34: google.storage.v2.QueryWriteStatusRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 51, // 35: google.storage.v2.QueryWriteStatusResponse.resource:type_name -> google.storage.v2.Object + 51, // 36: google.storage.v2.RewriteObjectRequest.destination:type_name -> google.storage.v2.Object + 42, // 37: google.storage.v2.RewriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 47, // 38: google.storage.v2.RewriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 51, // 39: google.storage.v2.RewriteResponse.resource:type_name -> google.storage.v2.Object + 21, // 40: google.storage.v2.StartResumableWriteRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec + 42, // 41: google.storage.v2.StartResumableWriteRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 47, // 42: google.storage.v2.StartResumableWriteRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 51, // 43: google.storage.v2.UpdateObjectRequest.object:type_name -> google.storage.v2.Object + 80, // 44: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask + 42, // 45: google.storage.v2.UpdateObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 48, // 46: google.storage.v2.CreateHmacKeyResponse.metadata:type_name -> google.storage.v2.HmacKeyMetadata + 48, // 47: google.storage.v2.ListHmacKeysResponse.hmac_keys:type_name -> google.storage.v2.HmacKeyMetadata + 48, // 48: google.storage.v2.UpdateHmacKeyRequest.hmac_key:type_name -> google.storage.v2.HmacKeyMetadata + 80, // 49: google.storage.v2.UpdateHmacKeyRequest.update_mask:type_name -> google.protobuf.FieldMask + 45, // 50: google.storage.v2.Bucket.acl:type_name -> google.storage.v2.BucketAccessControl + 52, // 51: google.storage.v2.Bucket.default_object_acl:type_name -> google.storage.v2.ObjectAccessControl + 64, // 52: google.storage.v2.Bucket.lifecycle:type_name -> google.storage.v2.Bucket.Lifecycle + 81, // 53: google.storage.v2.Bucket.create_time:type_name -> google.protobuf.Timestamp + 61, // 54: google.storage.v2.Bucket.cors:type_name -> google.storage.v2.Bucket.Cors + 81, // 55: google.storage.v2.Bucket.update_time:type_name -> google.protobuf.Timestamp + 73, // 56: google.storage.v2.Bucket.labels:type_name -> google.storage.v2.Bucket.LabelsEntry + 69, // 57: google.storage.v2.Bucket.website:type_name -> google.storage.v2.Bucket.Website + 68, // 58: google.storage.v2.Bucket.versioning:type_name -> google.storage.v2.Bucket.Versioning + 65, // 59: google.storage.v2.Bucket.logging:type_name -> google.storage.v2.Bucket.Logging + 56, // 60: google.storage.v2.Bucket.owner:type_name -> google.storage.v2.Owner + 62, // 61: google.storage.v2.Bucket.encryption:type_name -> google.storage.v2.Bucket.Encryption + 60, // 62: google.storage.v2.Bucket.billing:type_name -> google.storage.v2.Bucket.Billing + 66, // 63: google.storage.v2.Bucket.retention_policy:type_name -> google.storage.v2.Bucket.RetentionPolicy + 63, // 64: google.storage.v2.Bucket.iam_config:type_name -> google.storage.v2.Bucket.IamConfig + 70, // 65: google.storage.v2.Bucket.custom_placement_config:type_name -> google.storage.v2.Bucket.CustomPlacementConfig + 71, // 66: google.storage.v2.Bucket.autoclass:type_name -> google.storage.v2.Bucket.Autoclass + 72, // 67: google.storage.v2.Bucket.hierarchical_namespace:type_name -> google.storage.v2.Bucket.HierarchicalNamespace + 67, // 68: google.storage.v2.Bucket.soft_delete_policy:type_name -> google.storage.v2.Bucket.SoftDeletePolicy + 54, // 69: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam + 81, // 70: google.storage.v2.HmacKeyMetadata.create_time:type_name -> google.protobuf.Timestamp + 81, // 71: google.storage.v2.HmacKeyMetadata.update_time:type_name -> google.protobuf.Timestamp + 78, // 72: google.storage.v2.NotificationConfig.custom_attributes:type_name -> google.storage.v2.NotificationConfig.CustomAttributesEntry + 52, // 73: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl + 81, // 74: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp + 81, // 75: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp + 47, // 76: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums + 81, // 77: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp + 81, // 78: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp + 81, // 79: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp + 79, // 80: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry + 56, // 81: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner + 50, // 82: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption + 81, // 83: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp + 81, // 84: google.storage.v2.Object.soft_delete_time:type_name -> google.protobuf.Timestamp + 81, // 85: google.storage.v2.Object.hard_delete_time:type_name -> google.protobuf.Timestamp + 54, // 86: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam + 51, // 87: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object + 59, // 88: google.storage.v2.ComposeObjectRequest.SourceObject.object_preconditions:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + 74, // 89: google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access:type_name -> google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + 75, // 90: google.storage.v2.Bucket.Lifecycle.rule:type_name -> google.storage.v2.Bucket.Lifecycle.Rule + 81, // 91: google.storage.v2.Bucket.RetentionPolicy.effective_time:type_name -> google.protobuf.Timestamp + 82, // 92: google.storage.v2.Bucket.RetentionPolicy.retention_duration:type_name -> google.protobuf.Duration + 82, // 93: google.storage.v2.Bucket.SoftDeletePolicy.retention_duration:type_name -> google.protobuf.Duration + 81, // 94: google.storage.v2.Bucket.SoftDeletePolicy.effective_time:type_name -> google.protobuf.Timestamp + 81, // 95: google.storage.v2.Bucket.Autoclass.toggle_time:type_name -> google.protobuf.Timestamp + 81, // 96: google.storage.v2.Bucket.Autoclass.terminal_storage_class_update_time:type_name -> google.protobuf.Timestamp + 81, // 97: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.lock_time:type_name -> google.protobuf.Timestamp + 76, // 98: google.storage.v2.Bucket.Lifecycle.Rule.action:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Action + 77, // 99: google.storage.v2.Bucket.Lifecycle.Rule.condition:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Condition + 83, // 100: google.storage.v2.Bucket.Lifecycle.Rule.Condition.created_before:type_name -> google.type.Date + 83, // 101: google.storage.v2.Bucket.Lifecycle.Rule.Condition.custom_time_before:type_name -> google.type.Date + 83, // 102: google.storage.v2.Bucket.Lifecycle.Rule.Condition.noncurrent_time_before:type_name -> google.type.Date + 1, // 103: google.storage.v2.Storage.DeleteBucket:input_type -> google.storage.v2.DeleteBucketRequest + 2, // 104: google.storage.v2.Storage.GetBucket:input_type -> google.storage.v2.GetBucketRequest + 3, // 105: google.storage.v2.Storage.CreateBucket:input_type -> google.storage.v2.CreateBucketRequest + 4, // 106: google.storage.v2.Storage.ListBuckets:input_type -> google.storage.v2.ListBucketsRequest + 6, // 107: google.storage.v2.Storage.LockBucketRetentionPolicy:input_type -> google.storage.v2.LockBucketRetentionPolicyRequest + 84, // 108: google.storage.v2.Storage.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest + 85, // 109: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest + 86, // 110: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest + 7, // 111: google.storage.v2.Storage.UpdateBucket:input_type -> google.storage.v2.UpdateBucketRequest + 8, // 112: google.storage.v2.Storage.DeleteNotificationConfig:input_type -> google.storage.v2.DeleteNotificationConfigRequest + 9, // 113: google.storage.v2.Storage.GetNotificationConfig:input_type -> google.storage.v2.GetNotificationConfigRequest + 10, // 114: google.storage.v2.Storage.CreateNotificationConfig:input_type -> google.storage.v2.CreateNotificationConfigRequest + 11, // 115: google.storage.v2.Storage.ListNotificationConfigs:input_type -> google.storage.v2.ListNotificationConfigsRequest + 13, // 116: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest + 14, // 117: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest + 15, // 118: google.storage.v2.Storage.RestoreObject:input_type -> google.storage.v2.RestoreObjectRequest + 16, // 119: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest + 19, // 120: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest + 18, // 121: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest + 33, // 122: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest + 22, // 123: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest + 24, // 124: google.storage.v2.Storage.BidiWriteObject:input_type -> google.storage.v2.BidiWriteObjectRequest + 26, // 125: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest + 29, // 126: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest + 31, // 127: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest + 27, // 128: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest + 34, // 129: google.storage.v2.Storage.GetServiceAccount:input_type -> google.storage.v2.GetServiceAccountRequest + 35, // 130: google.storage.v2.Storage.CreateHmacKey:input_type -> google.storage.v2.CreateHmacKeyRequest + 37, // 131: google.storage.v2.Storage.DeleteHmacKey:input_type -> google.storage.v2.DeleteHmacKeyRequest + 38, // 132: google.storage.v2.Storage.GetHmacKey:input_type -> google.storage.v2.GetHmacKeyRequest + 39, // 133: google.storage.v2.Storage.ListHmacKeys:input_type -> google.storage.v2.ListHmacKeysRequest + 41, // 134: google.storage.v2.Storage.UpdateHmacKey:input_type -> google.storage.v2.UpdateHmacKeyRequest + 87, // 135: google.storage.v2.Storage.DeleteBucket:output_type -> google.protobuf.Empty + 44, // 136: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket + 44, // 137: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket + 5, // 138: google.storage.v2.Storage.ListBuckets:output_type -> google.storage.v2.ListBucketsResponse + 44, // 139: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket + 88, // 140: google.storage.v2.Storage.GetIamPolicy:output_type -> google.iam.v1.Policy + 88, // 141: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy + 89, // 142: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse + 44, // 143: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket + 87, // 144: google.storage.v2.Storage.DeleteNotificationConfig:output_type -> google.protobuf.Empty + 49, // 145: google.storage.v2.Storage.GetNotificationConfig:output_type -> google.storage.v2.NotificationConfig + 49, // 146: google.storage.v2.Storage.CreateNotificationConfig:output_type -> google.storage.v2.NotificationConfig + 12, // 147: google.storage.v2.Storage.ListNotificationConfigs:output_type -> google.storage.v2.ListNotificationConfigsResponse + 51, // 148: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object + 87, // 149: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty + 51, // 150: google.storage.v2.Storage.RestoreObject:output_type -> google.storage.v2.Object + 17, // 151: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse + 51, // 152: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object + 20, // 153: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse + 51, // 154: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object + 23, // 155: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse + 25, // 156: google.storage.v2.Storage.BidiWriteObject:output_type -> google.storage.v2.BidiWriteObjectResponse + 53, // 157: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse + 30, // 158: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse + 32, // 159: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse + 28, // 160: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse + 55, // 161: google.storage.v2.Storage.GetServiceAccount:output_type -> google.storage.v2.ServiceAccount + 36, // 162: google.storage.v2.Storage.CreateHmacKey:output_type -> google.storage.v2.CreateHmacKeyResponse + 87, // 163: google.storage.v2.Storage.DeleteHmacKey:output_type -> google.protobuf.Empty + 48, // 164: google.storage.v2.Storage.GetHmacKey:output_type -> google.storage.v2.HmacKeyMetadata + 40, // 165: google.storage.v2.Storage.ListHmacKeys:output_type -> google.storage.v2.ListHmacKeysResponse + 48, // 166: google.storage.v2.Storage.UpdateHmacKey:output_type -> google.storage.v2.HmacKeyMetadata + 135, // [135:167] is the sub-list for method output_type + 103, // [103:135] is the sub-list for method input_type + 103, // [103:103] is the sub-list for extension type_name + 103, // [103:103] is the sub-list for extension extendee + 0, // [0:103] is the sub-list for field type_name +} + +func init() { file_google_storage_v2_storage_proto_init() } +func file_google_storage_v2_storage_proto_init() { + if File_google_storage_v2_storage_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_storage_v2_storage_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*DeleteBucketRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*GetBucketRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*CreateBucketRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*ListBucketsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*ListBucketsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*LockBucketRetentionPolicyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*UpdateBucketRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*DeleteNotificationConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*GetNotificationConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*CreateNotificationConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*ListNotificationConfigsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*ListNotificationConfigsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*ComposeObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[13].Exporter = func(v any, i int) any { + switch v := v.(*DeleteObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[14].Exporter = func(v any, i int) any { + switch v := v.(*RestoreObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[15].Exporter = func(v any, i int) any { + switch v := v.(*CancelResumableWriteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[16].Exporter = func(v any, i int) any { + switch v := v.(*CancelResumableWriteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[17].Exporter = func(v any, i int) any { + switch v := v.(*ReadObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[18].Exporter = func(v any, i int) any { + switch v := v.(*GetObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[19].Exporter = func(v any, i int) any { + switch v := v.(*ReadObjectResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[20].Exporter = func(v any, i int) any { + switch v := v.(*WriteObjectSpec); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[21].Exporter = func(v any, i int) any { + switch v := v.(*WriteObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[22].Exporter = func(v any, i int) any { + switch v := v.(*WriteObjectResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[23].Exporter = func(v any, i int) any { + switch v := v.(*BidiWriteObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[24].Exporter = func(v any, i int) any { + switch v := v.(*BidiWriteObjectResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[25].Exporter = func(v any, i int) any { + switch v := v.(*ListObjectsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[26].Exporter = func(v any, i int) any { + switch v := v.(*QueryWriteStatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[27].Exporter = func(v any, i int) any { + switch v := v.(*QueryWriteStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[28].Exporter = func(v any, i int) any { + switch v := v.(*RewriteObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[29].Exporter = func(v any, i int) any { + switch v := v.(*RewriteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[30].Exporter = func(v any, i int) any { + switch v := v.(*StartResumableWriteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[31].Exporter = func(v any, i int) any { + switch v := v.(*StartResumableWriteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[32].Exporter = func(v any, i int) any { + switch v := v.(*UpdateObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[33].Exporter = func(v any, i int) any { + switch v := v.(*GetServiceAccountRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[34].Exporter = func(v any, i int) any { + switch v := v.(*CreateHmacKeyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[35].Exporter = func(v any, i int) any { + switch v := v.(*CreateHmacKeyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[36].Exporter = func(v any, i int) any { + switch v := v.(*DeleteHmacKeyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[37].Exporter = func(v any, i int) any { + switch v := v.(*GetHmacKeyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[38].Exporter = func(v any, i int) any { + switch v := v.(*ListHmacKeysRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[39].Exporter = func(v any, i int) any { + switch v := v.(*ListHmacKeysResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[40].Exporter = func(v any, i int) any { + switch v := v.(*UpdateHmacKeyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[41].Exporter = func(v any, i int) any { + switch v := v.(*CommonObjectRequestParams); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[42].Exporter = func(v any, i int) any { + switch v := v.(*ServiceConstants); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[43].Exporter = func(v any, i int) any { + switch v := v.(*Bucket); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[44].Exporter = func(v any, i int) any { + switch v := v.(*BucketAccessControl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[45].Exporter = func(v any, i int) any { + switch v := v.(*ChecksummedData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[46].Exporter = func(v any, i int) any { + switch v := v.(*ObjectChecksums); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[47].Exporter = func(v any, i int) any { + switch v := v.(*HmacKeyMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[48].Exporter = func(v any, i int) any { + switch v := v.(*NotificationConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[49].Exporter = func(v any, i int) any { + switch v := v.(*CustomerEncryption); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[50].Exporter = func(v any, i int) any { + switch v := v.(*Object); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[51].Exporter = func(v any, i int) any { + switch v := v.(*ObjectAccessControl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[52].Exporter = func(v any, i int) any { + switch v := v.(*ListObjectsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[53].Exporter = func(v any, i int) any { + switch v := v.(*ProjectTeam); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[54].Exporter = func(v any, i int) any { + switch v := v.(*ServiceAccount); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[55].Exporter = func(v any, i int) any { + switch v := v.(*Owner); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[56].Exporter = func(v any, i int) any { + switch v := v.(*ContentRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[57].Exporter = func(v any, i int) any { + switch v := v.(*ComposeObjectRequest_SourceObject); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[58].Exporter = func(v any, i int) any { + switch v := v.(*ComposeObjectRequest_SourceObject_ObjectPreconditions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[59].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_Billing); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[60].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_Cors); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[61].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_Encryption); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[62].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_IamConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[63].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_Lifecycle); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[64].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_Logging); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[65].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_RetentionPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[66].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_SoftDeletePolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[67].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_Versioning); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[68].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_Website); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[69].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_CustomPlacementConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[70].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_Autoclass); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[71].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_HierarchicalNamespace); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[73].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_IamConfig_UniformBucketLevelAccess); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[74].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_Lifecycle_Rule); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[75].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_Lifecycle_Rule_Action); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[76].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_Lifecycle_Rule_Condition); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_storage_v2_storage_proto_msgTypes[0].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[1].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[3].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[6].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[12].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[13].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[14].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[17].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[18].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[20].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[21].OneofWrappers = []any{ + (*WriteObjectRequest_UploadId)(nil), + (*WriteObjectRequest_WriteObjectSpec)(nil), + (*WriteObjectRequest_ChecksummedData)(nil), + } + file_google_storage_v2_storage_proto_msgTypes[22].OneofWrappers = []any{ + (*WriteObjectResponse_PersistedSize)(nil), + (*WriteObjectResponse_Resource)(nil), + } + file_google_storage_v2_storage_proto_msgTypes[23].OneofWrappers = []any{ + (*BidiWriteObjectRequest_UploadId)(nil), + (*BidiWriteObjectRequest_WriteObjectSpec)(nil), + (*BidiWriteObjectRequest_ChecksummedData)(nil), + } + file_google_storage_v2_storage_proto_msgTypes[24].OneofWrappers = []any{ + (*BidiWriteObjectResponse_PersistedSize)(nil), + (*BidiWriteObjectResponse_Resource)(nil), + } + file_google_storage_v2_storage_proto_msgTypes[25].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[27].OneofWrappers = []any{ + (*QueryWriteStatusResponse_PersistedSize)(nil), + (*QueryWriteStatusResponse_Resource)(nil), + } + file_google_storage_v2_storage_proto_msgTypes[28].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[32].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[45].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[46].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[50].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[58].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[66].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[70].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[76].OneofWrappers = []any{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_storage_v2_storage_proto_rawDesc, + NumEnums: 1, + NumMessages: 79, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_storage_v2_storage_proto_goTypes, + DependencyIndexes: file_google_storage_v2_storage_proto_depIdxs, + EnumInfos: file_google_storage_v2_storage_proto_enumTypes, + MessageInfos: file_google_storage_v2_storage_proto_msgTypes, + }.Build() + File_google_storage_v2_storage_proto = out.File + file_google_storage_v2_storage_proto_rawDesc = nil + file_google_storage_v2_storage_proto_goTypes = nil + file_google_storage_v2_storage_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// StorageClient is the client API for Storage service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type StorageClient interface { + // Permanently deletes an empty bucket. + DeleteBucket(ctx context.Context, in *DeleteBucketRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Returns metadata for the specified bucket. + GetBucket(ctx context.Context, in *GetBucketRequest, opts ...grpc.CallOption) (*Bucket, error) + // Creates a new bucket. + CreateBucket(ctx context.Context, in *CreateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) + // Retrieves a list of buckets for a given project. + ListBuckets(ctx context.Context, in *ListBucketsRequest, opts ...grpc.CallOption) (*ListBucketsResponse, error) + // Locks retention policy on a bucket. + LockBucketRetentionPolicy(ctx context.Context, in *LockBucketRetentionPolicyRequest, opts ...grpc.CallOption) (*Bucket, error) + // Gets the IAM policy for a specified bucket. + // The `resource` field in the request should be + // `projects/_/buckets/{bucket}`. + GetIamPolicy(ctx context.Context, in *iampb.GetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) + // Updates an IAM policy for the specified bucket. + // The `resource` field in the request should be + // `projects/_/buckets/{bucket}`. + SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) + // Tests a set of permissions on the given bucket or object to see which, if + // any, are held by the caller. + // The `resource` field in the request should be + // `projects/_/buckets/{bucket}` for a bucket or + // `projects/_/buckets/{bucket}/objects/{object}` for an object. + TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error) + // Updates a bucket. Equivalent to JSON API's storage.buckets.patch method. + UpdateBucket(ctx context.Context, in *UpdateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) + // Permanently deletes a NotificationConfig. + DeleteNotificationConfig(ctx context.Context, in *DeleteNotificationConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // View a NotificationConfig. + GetNotificationConfig(ctx context.Context, in *GetNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) + // Creates a NotificationConfig for a given bucket. + // These NotificationConfigs, when triggered, publish messages to the + // specified Pub/Sub topics. See + // https://cloud.google.com/storage/docs/pubsub-notifications. + CreateNotificationConfig(ctx context.Context, in *CreateNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) + // Retrieves a list of NotificationConfigs for a given bucket. + ListNotificationConfigs(ctx context.Context, in *ListNotificationConfigsRequest, opts ...grpc.CallOption) (*ListNotificationConfigsResponse, error) + // Concatenates a list of existing objects into a new object in the same + // bucket. + ComposeObject(ctx context.Context, in *ComposeObjectRequest, opts ...grpc.CallOption) (*Object, error) + // Deletes an object and its metadata. + // + // Deletions are normally permanent when versioning is disabled or whenever + // the generation parameter is used. However, if soft delete is enabled for + // the bucket, deleted objects can be restored using RestoreObject until the + // soft delete retention period has passed. + DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Restores a soft-deleted object. + RestoreObject(ctx context.Context, in *RestoreObjectRequest, opts ...grpc.CallOption) (*Object, error) + // Cancels an in-progress resumable upload. + // + // Any attempts to write to the resumable upload after cancelling the upload + // will fail. + // + // The behavior for currently in progress write operations is not guaranteed - + // they could either complete before the cancellation or fail if the + // cancellation completes first. + CancelResumableWrite(ctx context.Context, in *CancelResumableWriteRequest, opts ...grpc.CallOption) (*CancelResumableWriteResponse, error) + // Retrieves an object's metadata. + GetObject(ctx context.Context, in *GetObjectRequest, opts ...grpc.CallOption) (*Object, error) + // Reads an object's data. + ReadObject(ctx context.Context, in *ReadObjectRequest, opts ...grpc.CallOption) (Storage_ReadObjectClient, error) + // Updates an object's metadata. + // Equivalent to JSON API's storage.objects.patch. + UpdateObject(ctx context.Context, in *UpdateObjectRequest, opts ...grpc.CallOption) (*Object, error) + // Stores a new object and metadata. + // + // An object can be written either in a single message stream or in a + // resumable sequence of message streams. To write using a single stream, + // the client should include in the first message of the stream an + // `WriteObjectSpec` describing the destination bucket, object, and any + // preconditions. Additionally, the final message must set 'finish_write' to + // true, or else it is an error. + // + // For a resumable write, the client should instead call + // `StartResumableWrite()`, populating a `WriteObjectSpec` into that request. + // They should then attach the returned `upload_id` to the first message of + // each following call to `WriteObject`. If the stream is closed before + // finishing the upload (either explicitly by the client or due to a network + // error or an error response from the server), the client should do as + // follows: + // - Check the result Status of the stream, to determine if writing can be + // resumed on this stream or must be restarted from scratch (by calling + // `StartResumableWrite()`). The resumable errors are DEADLINE_EXCEEDED, + // INTERNAL, and UNAVAILABLE. For each case, the client should use binary + // exponential backoff before retrying. Additionally, writes can be + // resumed after RESOURCE_EXHAUSTED errors, but only after taking + // appropriate measures, which may include reducing aggregate send rate + // across clients and/or requesting a quota increase for your project. + // - If the call to `WriteObject` returns `ABORTED`, that indicates + // concurrent attempts to update the resumable write, caused either by + // multiple racing clients or by a single client where the previous + // request was timed out on the client side but nonetheless reached the + // server. In this case the client should take steps to prevent further + // concurrent writes (e.g., increase the timeouts, stop using more than + // one process to perform the upload, etc.), and then should follow the + // steps below for resuming the upload. + // - For resumable errors, the client should call `QueryWriteStatus()` and + // then continue writing from the returned `persisted_size`. This may be + // less than the amount of data the client previously sent. Note also that + // it is acceptable to send data starting at an offset earlier than the + // returned `persisted_size`; in this case, the service will skip data at + // offsets that were already persisted (without checking that it matches + // the previously written data), and write only the data starting from the + // persisted offset. Even though the data isn't written, it may still + // incur a performance cost over resuming at the correct write offset. + // This behavior can make client-side handling simpler in some cases. + // - Clients must only send data that is a multiple of 256 KiB per message, + // unless the object is being finished with `finish_write` set to `true`. + // + // The service will not view the object as complete until the client has + // sent a `WriteObjectRequest` with `finish_write` set to `true`. Sending any + // requests on a stream after sending a request with `finish_write` set to + // `true` will cause an error. The client **should** check the response it + // receives to determine how much data the service was able to commit and + // whether the service views the object as complete. + // + // Attempting to resume an already finalized object will result in an OK + // status, with a WriteObjectResponse containing the finalized object's + // metadata. + // + // Alternatively, the BidiWriteObject operation may be used to write an + // object with controls over flushing and the ability to fetch the ability to + // determine the current persisted size. + WriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_WriteObjectClient, error) + // Stores a new object and metadata. + // + // This is similar to the WriteObject call with the added support for + // manual flushing of persisted state, and the ability to determine current + // persisted size without closing the stream. + // + // The client may specify one or both of the `state_lookup` and `flush` fields + // in each BidiWriteObjectRequest. If `flush` is specified, the data written + // so far will be persisted to storage. If `state_lookup` is specified, the + // service will respond with a BidiWriteObjectResponse that contains the + // persisted size. If both `flush` and `state_lookup` are specified, the flush + // will always occur before a `state_lookup`, so that both may be set in the + // same request and the returned state will be the state of the object + // post-flush. When the stream is closed, a BidiWriteObjectResponse will + // always be sent to the client, regardless of the value of `state_lookup`. + BidiWriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_BidiWriteObjectClient, error) + // Retrieves a list of objects matching the criteria. + ListObjects(ctx context.Context, in *ListObjectsRequest, opts ...grpc.CallOption) (*ListObjectsResponse, error) + // Rewrites a source object to a destination object. Optionally overrides + // metadata. + RewriteObject(ctx context.Context, in *RewriteObjectRequest, opts ...grpc.CallOption) (*RewriteResponse, error) + // Starts a resumable write. How long the write operation remains valid, and + // what happens when the write operation becomes invalid, are + // service-dependent. + StartResumableWrite(ctx context.Context, in *StartResumableWriteRequest, opts ...grpc.CallOption) (*StartResumableWriteResponse, error) + // Determines the `persisted_size` for an object that is being written, which + // can then be used as the `write_offset` for the next `Write()` call. + // + // If the object does not exist (i.e., the object has been deleted, or the + // first `Write()` has not yet reached the service), this method returns the + // error `NOT_FOUND`. + // + // The client **may** call `QueryWriteStatus()` at any time to determine how + // much data has been processed for this object. This is useful if the + // client is buffering data and needs to know which data can be safely + // evicted. For any sequence of `QueryWriteStatus()` calls for a given + // object name, the sequence of returned `persisted_size` values will be + // non-decreasing. + QueryWriteStatus(ctx context.Context, in *QueryWriteStatusRequest, opts ...grpc.CallOption) (*QueryWriteStatusResponse, error) + // Retrieves the name of a project's Google Cloud Storage service account. + GetServiceAccount(ctx context.Context, in *GetServiceAccountRequest, opts ...grpc.CallOption) (*ServiceAccount, error) + // Creates a new HMAC key for the given service account. + CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequest, opts ...grpc.CallOption) (*CreateHmacKeyResponse, error) + // Deletes a given HMAC key. Key must be in an INACTIVE state. + DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Gets an existing HMAC key metadata for the given id. + GetHmacKey(ctx context.Context, in *GetHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) + // Lists HMAC keys under a given project with the additional filters provided. + ListHmacKeys(ctx context.Context, in *ListHmacKeysRequest, opts ...grpc.CallOption) (*ListHmacKeysResponse, error) + // Updates a given HMAC key state between ACTIVE and INACTIVE. + UpdateHmacKey(ctx context.Context, in *UpdateHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) +} + +type storageClient struct { + cc grpc.ClientConnInterface +} + +func NewStorageClient(cc grpc.ClientConnInterface) StorageClient { + return &storageClient{cc} +} + +func (c *storageClient) DeleteBucket(ctx context.Context, in *DeleteBucketRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteBucket", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) GetBucket(ctx context.Context, in *GetBucketRequest, opts ...grpc.CallOption) (*Bucket, error) { + out := new(Bucket) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetBucket", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) CreateBucket(ctx context.Context, in *CreateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) { + out := new(Bucket) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateBucket", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) ListBuckets(ctx context.Context, in *ListBucketsRequest, opts ...grpc.CallOption) (*ListBucketsResponse, error) { + out := new(ListBucketsResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListBuckets", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) LockBucketRetentionPolicy(ctx context.Context, in *LockBucketRetentionPolicyRequest, opts ...grpc.CallOption) (*Bucket, error) { + out := new(Bucket) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/LockBucketRetentionPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) GetIamPolicy(ctx context.Context, in *iampb.GetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) { + out := new(iampb.Policy) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetIamPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) { + out := new(iampb.Policy) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/SetIamPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error) { + out := new(iampb.TestIamPermissionsResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/TestIamPermissions", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) UpdateBucket(ctx context.Context, in *UpdateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) { + out := new(Bucket) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/UpdateBucket", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) DeleteNotificationConfig(ctx context.Context, in *DeleteNotificationConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteNotificationConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) GetNotificationConfig(ctx context.Context, in *GetNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) { + out := new(NotificationConfig) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetNotificationConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) CreateNotificationConfig(ctx context.Context, in *CreateNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) { + out := new(NotificationConfig) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateNotificationConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) ListNotificationConfigs(ctx context.Context, in *ListNotificationConfigsRequest, opts ...grpc.CallOption) (*ListNotificationConfigsResponse, error) { + out := new(ListNotificationConfigsResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListNotificationConfigs", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) ComposeObject(ctx context.Context, in *ComposeObjectRequest, opts ...grpc.CallOption) (*Object, error) { + out := new(Object) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ComposeObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) RestoreObject(ctx context.Context, in *RestoreObjectRequest, opts ...grpc.CallOption) (*Object, error) { + out := new(Object) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/RestoreObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) CancelResumableWrite(ctx context.Context, in *CancelResumableWriteRequest, opts ...grpc.CallOption) (*CancelResumableWriteResponse, error) { + out := new(CancelResumableWriteResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CancelResumableWrite", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) GetObject(ctx context.Context, in *GetObjectRequest, opts ...grpc.CallOption) (*Object, error) { + out := new(Object) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) ReadObject(ctx context.Context, in *ReadObjectRequest, opts ...grpc.CallOption) (Storage_ReadObjectClient, error) { + stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[0], "/google.storage.v2.Storage/ReadObject", opts...) + if err != nil { + return nil, err + } + x := &storageReadObjectClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Storage_ReadObjectClient interface { + Recv() (*ReadObjectResponse, error) + grpc.ClientStream +} + +type storageReadObjectClient struct { + grpc.ClientStream +} + +func (x *storageReadObjectClient) Recv() (*ReadObjectResponse, error) { + m := new(ReadObjectResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *storageClient) UpdateObject(ctx context.Context, in *UpdateObjectRequest, opts ...grpc.CallOption) (*Object, error) { + out := new(Object) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/UpdateObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) WriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_WriteObjectClient, error) { + stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[1], "/google.storage.v2.Storage/WriteObject", opts...) + if err != nil { + return nil, err + } + x := &storageWriteObjectClient{stream} + return x, nil +} + +type Storage_WriteObjectClient interface { + Send(*WriteObjectRequest) error + CloseAndRecv() (*WriteObjectResponse, error) + grpc.ClientStream +} + +type storageWriteObjectClient struct { + grpc.ClientStream +} + +func (x *storageWriteObjectClient) Send(m *WriteObjectRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *storageWriteObjectClient) CloseAndRecv() (*WriteObjectResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(WriteObjectResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *storageClient) BidiWriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_BidiWriteObjectClient, error) { + stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[2], "/google.storage.v2.Storage/BidiWriteObject", opts...) + if err != nil { + return nil, err + } + x := &storageBidiWriteObjectClient{stream} + return x, nil +} + +type Storage_BidiWriteObjectClient interface { + Send(*BidiWriteObjectRequest) error + Recv() (*BidiWriteObjectResponse, error) + grpc.ClientStream +} + +type storageBidiWriteObjectClient struct { + grpc.ClientStream +} + +func (x *storageBidiWriteObjectClient) Send(m *BidiWriteObjectRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *storageBidiWriteObjectClient) Recv() (*BidiWriteObjectResponse, error) { + m := new(BidiWriteObjectResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *storageClient) ListObjects(ctx context.Context, in *ListObjectsRequest, opts ...grpc.CallOption) (*ListObjectsResponse, error) { + out := new(ListObjectsResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListObjects", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) RewriteObject(ctx context.Context, in *RewriteObjectRequest, opts ...grpc.CallOption) (*RewriteResponse, error) { + out := new(RewriteResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/RewriteObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) StartResumableWrite(ctx context.Context, in *StartResumableWriteRequest, opts ...grpc.CallOption) (*StartResumableWriteResponse, error) { + out := new(StartResumableWriteResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/StartResumableWrite", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) QueryWriteStatus(ctx context.Context, in *QueryWriteStatusRequest, opts ...grpc.CallOption) (*QueryWriteStatusResponse, error) { + out := new(QueryWriteStatusResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/QueryWriteStatus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) GetServiceAccount(ctx context.Context, in *GetServiceAccountRequest, opts ...grpc.CallOption) (*ServiceAccount, error) { + out := new(ServiceAccount) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetServiceAccount", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequest, opts ...grpc.CallOption) (*CreateHmacKeyResponse, error) { + out := new(CreateHmacKeyResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateHmacKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteHmacKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) GetHmacKey(ctx context.Context, in *GetHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) { + out := new(HmacKeyMetadata) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetHmacKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) ListHmacKeys(ctx context.Context, in *ListHmacKeysRequest, opts ...grpc.CallOption) (*ListHmacKeysResponse, error) { + out := new(ListHmacKeysResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListHmacKeys", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) UpdateHmacKey(ctx context.Context, in *UpdateHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) { + out := new(HmacKeyMetadata) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/UpdateHmacKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// StorageServer is the server API for Storage service. +type StorageServer interface { + // Permanently deletes an empty bucket. + DeleteBucket(context.Context, *DeleteBucketRequest) (*emptypb.Empty, error) + // Returns metadata for the specified bucket. + GetBucket(context.Context, *GetBucketRequest) (*Bucket, error) + // Creates a new bucket. + CreateBucket(context.Context, *CreateBucketRequest) (*Bucket, error) + // Retrieves a list of buckets for a given project. + ListBuckets(context.Context, *ListBucketsRequest) (*ListBucketsResponse, error) + // Locks retention policy on a bucket. + LockBucketRetentionPolicy(context.Context, *LockBucketRetentionPolicyRequest) (*Bucket, error) + // Gets the IAM policy for a specified bucket. + // The `resource` field in the request should be + // `projects/_/buckets/{bucket}`. + GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest) (*iampb.Policy, error) + // Updates an IAM policy for the specified bucket. + // The `resource` field in the request should be + // `projects/_/buckets/{bucket}`. + SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error) + // Tests a set of permissions on the given bucket or object to see which, if + // any, are held by the caller. + // The `resource` field in the request should be + // `projects/_/buckets/{bucket}` for a bucket or + // `projects/_/buckets/{bucket}/objects/{object}` for an object. + TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) + // Updates a bucket. Equivalent to JSON API's storage.buckets.patch method. + UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) + // Permanently deletes a NotificationConfig. + DeleteNotificationConfig(context.Context, *DeleteNotificationConfigRequest) (*emptypb.Empty, error) + // View a NotificationConfig. + GetNotificationConfig(context.Context, *GetNotificationConfigRequest) (*NotificationConfig, error) + // Creates a NotificationConfig for a given bucket. + // These NotificationConfigs, when triggered, publish messages to the + // specified Pub/Sub topics. See + // https://cloud.google.com/storage/docs/pubsub-notifications. + CreateNotificationConfig(context.Context, *CreateNotificationConfigRequest) (*NotificationConfig, error) + // Retrieves a list of NotificationConfigs for a given bucket. + ListNotificationConfigs(context.Context, *ListNotificationConfigsRequest) (*ListNotificationConfigsResponse, error) + // Concatenates a list of existing objects into a new object in the same + // bucket. + ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) + // Deletes an object and its metadata. + // + // Deletions are normally permanent when versioning is disabled or whenever + // the generation parameter is used. However, if soft delete is enabled for + // the bucket, deleted objects can be restored using RestoreObject until the + // soft delete retention period has passed. + DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error) + // Restores a soft-deleted object. + RestoreObject(context.Context, *RestoreObjectRequest) (*Object, error) + // Cancels an in-progress resumable upload. + // + // Any attempts to write to the resumable upload after cancelling the upload + // will fail. + // + // The behavior for currently in progress write operations is not guaranteed - + // they could either complete before the cancellation or fail if the + // cancellation completes first. + CancelResumableWrite(context.Context, *CancelResumableWriteRequest) (*CancelResumableWriteResponse, error) + // Retrieves an object's metadata. + GetObject(context.Context, *GetObjectRequest) (*Object, error) + // Reads an object's data. + ReadObject(*ReadObjectRequest, Storage_ReadObjectServer) error + // Updates an object's metadata. + // Equivalent to JSON API's storage.objects.patch. + UpdateObject(context.Context, *UpdateObjectRequest) (*Object, error) + // Stores a new object and metadata. + // + // An object can be written either in a single message stream or in a + // resumable sequence of message streams. To write using a single stream, + // the client should include in the first message of the stream an + // `WriteObjectSpec` describing the destination bucket, object, and any + // preconditions. Additionally, the final message must set 'finish_write' to + // true, or else it is an error. + // + // For a resumable write, the client should instead call + // `StartResumableWrite()`, populating a `WriteObjectSpec` into that request. + // They should then attach the returned `upload_id` to the first message of + // each following call to `WriteObject`. If the stream is closed before + // finishing the upload (either explicitly by the client or due to a network + // error or an error response from the server), the client should do as + // follows: + // - Check the result Status of the stream, to determine if writing can be + // resumed on this stream or must be restarted from scratch (by calling + // `StartResumableWrite()`). The resumable errors are DEADLINE_EXCEEDED, + // INTERNAL, and UNAVAILABLE. For each case, the client should use binary + // exponential backoff before retrying. Additionally, writes can be + // resumed after RESOURCE_EXHAUSTED errors, but only after taking + // appropriate measures, which may include reducing aggregate send rate + // across clients and/or requesting a quota increase for your project. + // - If the call to `WriteObject` returns `ABORTED`, that indicates + // concurrent attempts to update the resumable write, caused either by + // multiple racing clients or by a single client where the previous + // request was timed out on the client side but nonetheless reached the + // server. In this case the client should take steps to prevent further + // concurrent writes (e.g., increase the timeouts, stop using more than + // one process to perform the upload, etc.), and then should follow the + // steps below for resuming the upload. + // - For resumable errors, the client should call `QueryWriteStatus()` and + // then continue writing from the returned `persisted_size`. This may be + // less than the amount of data the client previously sent. Note also that + // it is acceptable to send data starting at an offset earlier than the + // returned `persisted_size`; in this case, the service will skip data at + // offsets that were already persisted (without checking that it matches + // the previously written data), and write only the data starting from the + // persisted offset. Even though the data isn't written, it may still + // incur a performance cost over resuming at the correct write offset. + // This behavior can make client-side handling simpler in some cases. + // - Clients must only send data that is a multiple of 256 KiB per message, + // unless the object is being finished with `finish_write` set to `true`. + // + // The service will not view the object as complete until the client has + // sent a `WriteObjectRequest` with `finish_write` set to `true`. Sending any + // requests on a stream after sending a request with `finish_write` set to + // `true` will cause an error. The client **should** check the response it + // receives to determine how much data the service was able to commit and + // whether the service views the object as complete. + // + // Attempting to resume an already finalized object will result in an OK + // status, with a WriteObjectResponse containing the finalized object's + // metadata. + // + // Alternatively, the BidiWriteObject operation may be used to write an + // object with controls over flushing and the ability to fetch the ability to + // determine the current persisted size. + WriteObject(Storage_WriteObjectServer) error + // Stores a new object and metadata. + // + // This is similar to the WriteObject call with the added support for + // manual flushing of persisted state, and the ability to determine current + // persisted size without closing the stream. + // + // The client may specify one or both of the `state_lookup` and `flush` fields + // in each BidiWriteObjectRequest. If `flush` is specified, the data written + // so far will be persisted to storage. If `state_lookup` is specified, the + // service will respond with a BidiWriteObjectResponse that contains the + // persisted size. If both `flush` and `state_lookup` are specified, the flush + // will always occur before a `state_lookup`, so that both may be set in the + // same request and the returned state will be the state of the object + // post-flush. When the stream is closed, a BidiWriteObjectResponse will + // always be sent to the client, regardless of the value of `state_lookup`. + BidiWriteObject(Storage_BidiWriteObjectServer) error + // Retrieves a list of objects matching the criteria. + ListObjects(context.Context, *ListObjectsRequest) (*ListObjectsResponse, error) + // Rewrites a source object to a destination object. Optionally overrides + // metadata. + RewriteObject(context.Context, *RewriteObjectRequest) (*RewriteResponse, error) + // Starts a resumable write. How long the write operation remains valid, and + // what happens when the write operation becomes invalid, are + // service-dependent. + StartResumableWrite(context.Context, *StartResumableWriteRequest) (*StartResumableWriteResponse, error) + // Determines the `persisted_size` for an object that is being written, which + // can then be used as the `write_offset` for the next `Write()` call. + // + // If the object does not exist (i.e., the object has been deleted, or the + // first `Write()` has not yet reached the service), this method returns the + // error `NOT_FOUND`. + // + // The client **may** call `QueryWriteStatus()` at any time to determine how + // much data has been processed for this object. This is useful if the + // client is buffering data and needs to know which data can be safely + // evicted. For any sequence of `QueryWriteStatus()` calls for a given + // object name, the sequence of returned `persisted_size` values will be + // non-decreasing. + QueryWriteStatus(context.Context, *QueryWriteStatusRequest) (*QueryWriteStatusResponse, error) + // Retrieves the name of a project's Google Cloud Storage service account. + GetServiceAccount(context.Context, *GetServiceAccountRequest) (*ServiceAccount, error) + // Creates a new HMAC key for the given service account. + CreateHmacKey(context.Context, *CreateHmacKeyRequest) (*CreateHmacKeyResponse, error) + // Deletes a given HMAC key. Key must be in an INACTIVE state. + DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*emptypb.Empty, error) + // Gets an existing HMAC key metadata for the given id. + GetHmacKey(context.Context, *GetHmacKeyRequest) (*HmacKeyMetadata, error) + // Lists HMAC keys under a given project with the additional filters provided. + ListHmacKeys(context.Context, *ListHmacKeysRequest) (*ListHmacKeysResponse, error) + // Updates a given HMAC key state between ACTIVE and INACTIVE. + UpdateHmacKey(context.Context, *UpdateHmacKeyRequest) (*HmacKeyMetadata, error) +} + +// UnimplementedStorageServer can be embedded to have forward compatible implementations. +type UnimplementedStorageServer struct { +} + +func (*UnimplementedStorageServer) DeleteBucket(context.Context, *DeleteBucketRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteBucket not implemented") +} +func (*UnimplementedStorageServer) GetBucket(context.Context, *GetBucketRequest) (*Bucket, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetBucket not implemented") +} +func (*UnimplementedStorageServer) CreateBucket(context.Context, *CreateBucketRequest) (*Bucket, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateBucket not implemented") +} +func (*UnimplementedStorageServer) ListBuckets(context.Context, *ListBucketsRequest) (*ListBucketsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListBuckets not implemented") +} +func (*UnimplementedStorageServer) LockBucketRetentionPolicy(context.Context, *LockBucketRetentionPolicyRequest) (*Bucket, error) { + return nil, status.Errorf(codes.Unimplemented, "method LockBucketRetentionPolicy not implemented") +} +func (*UnimplementedStorageServer) GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetIamPolicy not implemented") +} +func (*UnimplementedStorageServer) SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetIamPolicy not implemented") +} +func (*UnimplementedStorageServer) TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TestIamPermissions not implemented") +} +func (*UnimplementedStorageServer) UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateBucket not implemented") +} +func (*UnimplementedStorageServer) DeleteNotificationConfig(context.Context, *DeleteNotificationConfigRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteNotificationConfig not implemented") +} +func (*UnimplementedStorageServer) GetNotificationConfig(context.Context, *GetNotificationConfigRequest) (*NotificationConfig, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetNotificationConfig not implemented") +} +func (*UnimplementedStorageServer) CreateNotificationConfig(context.Context, *CreateNotificationConfigRequest) (*NotificationConfig, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateNotificationConfig not implemented") +} +func (*UnimplementedStorageServer) ListNotificationConfigs(context.Context, *ListNotificationConfigsRequest) (*ListNotificationConfigsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListNotificationConfigs not implemented") +} +func (*UnimplementedStorageServer) ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) { + return nil, status.Errorf(codes.Unimplemented, "method ComposeObject not implemented") +} +func (*UnimplementedStorageServer) DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteObject not implemented") +} +func (*UnimplementedStorageServer) RestoreObject(context.Context, *RestoreObjectRequest) (*Object, error) { + return nil, status.Errorf(codes.Unimplemented, "method RestoreObject not implemented") +} +func (*UnimplementedStorageServer) CancelResumableWrite(context.Context, *CancelResumableWriteRequest) (*CancelResumableWriteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CancelResumableWrite not implemented") +} +func (*UnimplementedStorageServer) GetObject(context.Context, *GetObjectRequest) (*Object, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetObject not implemented") +} +func (*UnimplementedStorageServer) ReadObject(*ReadObjectRequest, Storage_ReadObjectServer) error { + return status.Errorf(codes.Unimplemented, "method ReadObject not implemented") +} +func (*UnimplementedStorageServer) UpdateObject(context.Context, *UpdateObjectRequest) (*Object, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateObject not implemented") +} +func (*UnimplementedStorageServer) WriteObject(Storage_WriteObjectServer) error { + return status.Errorf(codes.Unimplemented, "method WriteObject not implemented") +} +func (*UnimplementedStorageServer) BidiWriteObject(Storage_BidiWriteObjectServer) error { + return status.Errorf(codes.Unimplemented, "method BidiWriteObject not implemented") +} +func (*UnimplementedStorageServer) ListObjects(context.Context, *ListObjectsRequest) (*ListObjectsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListObjects not implemented") +} +func (*UnimplementedStorageServer) RewriteObject(context.Context, *RewriteObjectRequest) (*RewriteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RewriteObject not implemented") +} +func (*UnimplementedStorageServer) StartResumableWrite(context.Context, *StartResumableWriteRequest) (*StartResumableWriteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StartResumableWrite not implemented") +} +func (*UnimplementedStorageServer) QueryWriteStatus(context.Context, *QueryWriteStatusRequest) (*QueryWriteStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method QueryWriteStatus not implemented") +} +func (*UnimplementedStorageServer) GetServiceAccount(context.Context, *GetServiceAccountRequest) (*ServiceAccount, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetServiceAccount not implemented") +} +func (*UnimplementedStorageServer) CreateHmacKey(context.Context, *CreateHmacKeyRequest) (*CreateHmacKeyResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateHmacKey not implemented") +} +func (*UnimplementedStorageServer) DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteHmacKey not implemented") +} +func (*UnimplementedStorageServer) GetHmacKey(context.Context, *GetHmacKeyRequest) (*HmacKeyMetadata, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetHmacKey not implemented") +} +func (*UnimplementedStorageServer) ListHmacKeys(context.Context, *ListHmacKeysRequest) (*ListHmacKeysResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListHmacKeys not implemented") +} +func (*UnimplementedStorageServer) UpdateHmacKey(context.Context, *UpdateHmacKeyRequest) (*HmacKeyMetadata, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateHmacKey not implemented") +} + +func RegisterStorageServer(s *grpc.Server, srv StorageServer) { + s.RegisterService(&_Storage_serviceDesc, srv) +} + +func _Storage_DeleteBucket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteBucketRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).DeleteBucket(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/DeleteBucket", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).DeleteBucket(ctx, req.(*DeleteBucketRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_GetBucket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetBucketRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).GetBucket(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/GetBucket", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).GetBucket(ctx, req.(*GetBucketRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_CreateBucket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateBucketRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).CreateBucket(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/CreateBucket", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).CreateBucket(ctx, req.(*CreateBucketRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_ListBuckets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListBucketsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).ListBuckets(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/ListBuckets", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).ListBuckets(ctx, req.(*ListBucketsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_LockBucketRetentionPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LockBucketRetentionPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).LockBucketRetentionPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/LockBucketRetentionPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).LockBucketRetentionPolicy(ctx, req.(*LockBucketRetentionPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(iampb.GetIamPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).GetIamPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/GetIamPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).GetIamPolicy(ctx, req.(*iampb.GetIamPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(iampb.SetIamPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).SetIamPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/SetIamPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).SetIamPolicy(ctx, req.(*iampb.SetIamPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(iampb.TestIamPermissionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).TestIamPermissions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/TestIamPermissions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).TestIamPermissions(ctx, req.(*iampb.TestIamPermissionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_UpdateBucket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateBucketRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).UpdateBucket(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/UpdateBucket", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).UpdateBucket(ctx, req.(*UpdateBucketRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_DeleteNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteNotificationConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).DeleteNotificationConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/DeleteNotificationConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).DeleteNotificationConfig(ctx, req.(*DeleteNotificationConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_GetNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNotificationConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).GetNotificationConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/GetNotificationConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).GetNotificationConfig(ctx, req.(*GetNotificationConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_CreateNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateNotificationConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).CreateNotificationConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/CreateNotificationConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).CreateNotificationConfig(ctx, req.(*CreateNotificationConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_ListNotificationConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNotificationConfigsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).ListNotificationConfigs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/ListNotificationConfigs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).ListNotificationConfigs(ctx, req.(*ListNotificationConfigsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_ComposeObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ComposeObjectRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).ComposeObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/ComposeObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).ComposeObject(ctx, req.(*ComposeObjectRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_DeleteObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteObjectRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).DeleteObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/DeleteObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).DeleteObject(ctx, req.(*DeleteObjectRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_RestoreObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RestoreObjectRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).RestoreObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/RestoreObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).RestoreObject(ctx, req.(*RestoreObjectRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_CancelResumableWrite_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CancelResumableWriteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).CancelResumableWrite(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/CancelResumableWrite", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).CancelResumableWrite(ctx, req.(*CancelResumableWriteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_GetObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetObjectRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).GetObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/GetObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).GetObject(ctx, req.(*GetObjectRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_ReadObject_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ReadObjectRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(StorageServer).ReadObject(m, &storageReadObjectServer{stream}) +} + +type Storage_ReadObjectServer interface { + Send(*ReadObjectResponse) error + grpc.ServerStream +} + +type storageReadObjectServer struct { + grpc.ServerStream +} + +func (x *storageReadObjectServer) Send(m *ReadObjectResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Storage_UpdateObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateObjectRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).UpdateObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/UpdateObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).UpdateObject(ctx, req.(*UpdateObjectRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_WriteObject_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(StorageServer).WriteObject(&storageWriteObjectServer{stream}) +} + +type Storage_WriteObjectServer interface { + SendAndClose(*WriteObjectResponse) error + Recv() (*WriteObjectRequest, error) + grpc.ServerStream +} + +type storageWriteObjectServer struct { + grpc.ServerStream +} + +func (x *storageWriteObjectServer) SendAndClose(m *WriteObjectResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *storageWriteObjectServer) Recv() (*WriteObjectRequest, error) { + m := new(WriteObjectRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Storage_BidiWriteObject_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(StorageServer).BidiWriteObject(&storageBidiWriteObjectServer{stream}) +} + +type Storage_BidiWriteObjectServer interface { + Send(*BidiWriteObjectResponse) error + Recv() (*BidiWriteObjectRequest, error) + grpc.ServerStream +} + +type storageBidiWriteObjectServer struct { + grpc.ServerStream +} + +func (x *storageBidiWriteObjectServer) Send(m *BidiWriteObjectResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *storageBidiWriteObjectServer) Recv() (*BidiWriteObjectRequest, error) { + m := new(BidiWriteObjectRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Storage_ListObjects_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListObjectsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).ListObjects(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/ListObjects", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).ListObjects(ctx, req.(*ListObjectsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_RewriteObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RewriteObjectRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).RewriteObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/RewriteObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).RewriteObject(ctx, req.(*RewriteObjectRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_StartResumableWrite_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartResumableWriteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).StartResumableWrite(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/StartResumableWrite", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).StartResumableWrite(ctx, req.(*StartResumableWriteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_QueryWriteStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryWriteStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).QueryWriteStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/QueryWriteStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).QueryWriteStatus(ctx, req.(*QueryWriteStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_GetServiceAccount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetServiceAccountRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).GetServiceAccount(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/GetServiceAccount", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).GetServiceAccount(ctx, req.(*GetServiceAccountRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_CreateHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateHmacKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).CreateHmacKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/CreateHmacKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).CreateHmacKey(ctx, req.(*CreateHmacKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_DeleteHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteHmacKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).DeleteHmacKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/DeleteHmacKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).DeleteHmacKey(ctx, req.(*DeleteHmacKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_GetHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetHmacKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).GetHmacKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/GetHmacKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).GetHmacKey(ctx, req.(*GetHmacKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_ListHmacKeys_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListHmacKeysRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).ListHmacKeys(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/ListHmacKeys", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).ListHmacKeys(ctx, req.(*ListHmacKeysRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_UpdateHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateHmacKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).UpdateHmacKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/UpdateHmacKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).UpdateHmacKey(ctx, req.(*UpdateHmacKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Storage_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.storage.v2.Storage", + HandlerType: (*StorageServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "DeleteBucket", + Handler: _Storage_DeleteBucket_Handler, + }, + { + MethodName: "GetBucket", + Handler: _Storage_GetBucket_Handler, + }, + { + MethodName: "CreateBucket", + Handler: _Storage_CreateBucket_Handler, + }, + { + MethodName: "ListBuckets", + Handler: _Storage_ListBuckets_Handler, + }, + { + MethodName: "LockBucketRetentionPolicy", + Handler: _Storage_LockBucketRetentionPolicy_Handler, + }, + { + MethodName: "GetIamPolicy", + Handler: _Storage_GetIamPolicy_Handler, + }, + { + MethodName: "SetIamPolicy", + Handler: _Storage_SetIamPolicy_Handler, + }, + { + MethodName: "TestIamPermissions", + Handler: _Storage_TestIamPermissions_Handler, + }, + { + MethodName: "UpdateBucket", + Handler: _Storage_UpdateBucket_Handler, + }, + { + MethodName: "DeleteNotificationConfig", + Handler: _Storage_DeleteNotificationConfig_Handler, + }, + { + MethodName: "GetNotificationConfig", + Handler: _Storage_GetNotificationConfig_Handler, + }, + { + MethodName: "CreateNotificationConfig", + Handler: _Storage_CreateNotificationConfig_Handler, + }, + { + MethodName: "ListNotificationConfigs", + Handler: _Storage_ListNotificationConfigs_Handler, + }, + { + MethodName: "ComposeObject", + Handler: _Storage_ComposeObject_Handler, + }, + { + MethodName: "DeleteObject", + Handler: _Storage_DeleteObject_Handler, + }, + { + MethodName: "RestoreObject", + Handler: _Storage_RestoreObject_Handler, + }, + { + MethodName: "CancelResumableWrite", + Handler: _Storage_CancelResumableWrite_Handler, + }, + { + MethodName: "GetObject", + Handler: _Storage_GetObject_Handler, + }, + { + MethodName: "UpdateObject", + Handler: _Storage_UpdateObject_Handler, + }, + { + MethodName: "ListObjects", + Handler: _Storage_ListObjects_Handler, + }, + { + MethodName: "RewriteObject", + Handler: _Storage_RewriteObject_Handler, + }, + { + MethodName: "StartResumableWrite", + Handler: _Storage_StartResumableWrite_Handler, + }, + { + MethodName: "QueryWriteStatus", + Handler: _Storage_QueryWriteStatus_Handler, + }, + { + MethodName: "GetServiceAccount", + Handler: _Storage_GetServiceAccount_Handler, + }, + { + MethodName: "CreateHmacKey", + Handler: _Storage_CreateHmacKey_Handler, + }, + { + MethodName: "DeleteHmacKey", + Handler: _Storage_DeleteHmacKey_Handler, + }, + { + MethodName: "GetHmacKey", + Handler: _Storage_GetHmacKey_Handler, + }, + { + MethodName: "ListHmacKeys", + Handler: _Storage_ListHmacKeys_Handler, + }, + { + MethodName: "UpdateHmacKey", + Handler: _Storage_UpdateHmacKey_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ReadObject", + Handler: _Storage_ReadObject_Handler, + ServerStreams: true, + }, + { + StreamName: "WriteObject", + Handler: _Storage_WriteObject_Handler, + ClientStreams: true, + }, + { + StreamName: "BidiWriteObject", + Handler: _Storage_BidiWriteObject_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "google/storage/v2/storage.proto", +} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/version.go b/vendor/cloud.google.com/go/storage/internal/apiv2/version.go new file mode 100644 index 000000000..15920f3f6 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/version.go @@ -0,0 +1,23 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by gapicgen. DO NOT EDIT. + +package storage + +import "cloud.google.com/go/storage/internal" + +func init() { + versionClient = internal.Version +} diff --git a/vendor/cloud.google.com/go/storage/internal/version.go b/vendor/cloud.google.com/go/storage/internal/version.go new file mode 100644 index 000000000..e5b2de091 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/internal/version.go @@ -0,0 +1,18 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +// Version is the current tagged release of the library. +const Version = "1.43.0" diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go new file mode 100644 index 000000000..de57b4bbb --- /dev/null +++ b/vendor/cloud.google.com/go/storage/invoke.go @@ -0,0 +1,157 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "net/url" + "strings" + + "cloud.google.com/go/internal" + "cloud.google.com/go/internal/version" + sinternal "cloud.google.com/go/storage/internal" + "github.com/google/uuid" + gax "github.com/googleapis/gax-go/v2" + "github.com/googleapis/gax-go/v2/callctx" + "google.golang.org/api/googleapi" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var defaultRetry *retryConfig = &retryConfig{} +var xGoogDefaultHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), sinternal.Version) + +const ( + xGoogHeaderKey = "x-goog-api-client" + idempotencyHeaderKey = "x-goog-gcs-idempotency-token" +) + +// run determines whether a retry is necessary based on the config and +// idempotency information. It then calls the function with or without retries +// as appropriate, using the configured settings. +func run(ctx context.Context, call func(ctx context.Context) error, retry *retryConfig, isIdempotent bool) error { + attempts := 1 + invocationID := uuid.New().String() + + if retry == nil { + retry = defaultRetry + } + if (retry.policy == RetryIdempotent && !isIdempotent) || retry.policy == RetryNever { + ctxWithHeaders := setInvocationHeaders(ctx, invocationID, attempts) + return call(ctxWithHeaders) + } + bo := gax.Backoff{} + if retry.backoff != nil { + bo.Multiplier = retry.backoff.Multiplier + bo.Initial = retry.backoff.Initial + bo.Max = retry.backoff.Max + } + var errorFunc func(err error) bool = ShouldRetry + if retry.shouldRetry != nil { + errorFunc = retry.shouldRetry + } + + return internal.Retry(ctx, bo, func() (stop bool, err error) { + ctxWithHeaders := setInvocationHeaders(ctx, invocationID, attempts) + err = call(ctxWithHeaders) + if err != nil && retry.maxAttempts != nil && attempts >= *retry.maxAttempts { + return true, fmt.Errorf("storage: retry failed after %v attempts; last error: %w", *retry.maxAttempts, err) + } + attempts++ + return !errorFunc(err), err + }) +} + +// Sets invocation ID headers on the context which will be propagated as +// headers in the call to the service (for both gRPC and HTTP). +func setInvocationHeaders(ctx context.Context, invocationID string, attempts int) context.Context { + invocationHeader := fmt.Sprintf("gccl-invocation-id/%v gccl-attempt-count/%v", invocationID, attempts) + xGoogHeader := strings.Join([]string{invocationHeader, xGoogDefaultHeader}, " ") + + // TODO: remove this once the respective transport packages merge xGoogHeader. + // Also remove gl-go at that time, as it will be repeated. + hdrs := callctx.HeadersFromContext(ctx) + for _, v := range hdrs[xGoogHeaderKey] { + xGoogHeader = strings.Join([]string{xGoogHeader, v}, " ") + } + + if hdrs[xGoogHeaderKey] != nil { + // Replace the key instead of adding it, if there was anything to merge with. + hdrs[xGoogHeaderKey] = []string{xGoogHeader} + } else { + // TODO: keep this line when removing the above code. + ctx = callctx.SetHeaders(ctx, xGoogHeaderKey, xGoogHeader) + } + + ctx = callctx.SetHeaders(ctx, idempotencyHeaderKey, invocationID) + return ctx +} + +// ShouldRetry returns true if an error is retryable, based on best practice +// guidance from GCS. See +// https://cloud.google.com/storage/docs/retry-strategy#go for more information +// on what errors are considered retryable. +// +// If you would like to customize retryable errors, use the WithErrorFunc to +// supply a RetryOption to your library calls. For example, to retry additional +// errors, you can write a custom func that wraps ShouldRetry and also specifies +// additional errors that should return true. +func ShouldRetry(err error) bool { + if err == nil { + return false + } + if errors.Is(err, io.ErrUnexpectedEOF) { + return true + } + if errors.Is(err, net.ErrClosed) { + return true + } + + switch e := err.(type) { + case *googleapi.Error: + // Retry on 408, 429, and 5xx, according to + // https://cloud.google.com/storage/docs/exponential-backoff. + return e.Code == 408 || e.Code == 429 || (e.Code >= 500 && e.Code < 600) + case *net.OpError, *url.Error: + // Retry socket-level errors ECONNREFUSED and ECONNRESET (from syscall). + // Unfortunately the error type is unexported, so we resort to string + // matching. + retriable := []string{"connection refused", "connection reset", "broken pipe"} + for _, s := range retriable { + if strings.Contains(e.Error(), s) { + return true + } + } + case interface{ Temporary() bool }: + if e.Temporary() { + return true + } + } + // UNAVAILABLE, RESOURCE_EXHAUSTED, and INTERNAL codes are all retryable for gRPC. + if st, ok := status.FromError(err); ok { + if code := st.Code(); code == codes.Unavailable || code == codes.ResourceExhausted || code == codes.Internal { + return true + } + } + // Unwrap is only supported in go1.13.x+ + if e, ok := err.(interface{ Unwrap() error }); ok { + return ShouldRetry(e.Unwrap()) + } + return false +} diff --git a/vendor/cloud.google.com/go/storage/notifications.go b/vendor/cloud.google.com/go/storage/notifications.go new file mode 100644 index 000000000..1d6cfdf59 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/notifications.go @@ -0,0 +1,200 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "errors" + "fmt" + "regexp" + + "cloud.google.com/go/internal/trace" + "cloud.google.com/go/storage/internal/apiv2/storagepb" + raw "google.golang.org/api/storage/v1" +) + +// A Notification describes how to send Cloud PubSub messages when certain +// events occur in a bucket. +type Notification struct { + //The ID of the notification. + ID string + + // The ID of the topic to which this subscription publishes. + TopicID string + + // The ID of the project to which the topic belongs. + TopicProjectID string + + // Only send notifications about listed event types. If empty, send notifications + // for all event types. + // See https://cloud.google.com/storage/docs/pubsub-notifications#events. + EventTypes []string + + // If present, only apply this notification configuration to object names that + // begin with this prefix. + ObjectNamePrefix string + + // An optional list of additional attributes to attach to each Cloud PubSub + // message published for this notification subscription. + CustomAttributes map[string]string + + // The contents of the message payload. + // See https://cloud.google.com/storage/docs/pubsub-notifications#payload. + PayloadFormat string +} + +// Values for Notification.PayloadFormat. +const ( + // Send no payload with notification messages. + NoPayload = "NONE" + + // Send object metadata as JSON with notification messages. + JSONPayload = "JSON_API_V1" +) + +// Values for Notification.EventTypes. +const ( + // Event that occurs when an object is successfully created. + ObjectFinalizeEvent = "OBJECT_FINALIZE" + + // Event that occurs when the metadata of an existing object changes. + ObjectMetadataUpdateEvent = "OBJECT_METADATA_UPDATE" + + // Event that occurs when an object is permanently deleted. + ObjectDeleteEvent = "OBJECT_DELETE" + + // Event that occurs when the live version of an object becomes an + // archived version. + ObjectArchiveEvent = "OBJECT_ARCHIVE" +) + +func toNotification(rn *raw.Notification) *Notification { + n := &Notification{ + ID: rn.Id, + EventTypes: rn.EventTypes, + ObjectNamePrefix: rn.ObjectNamePrefix, + CustomAttributes: rn.CustomAttributes, + PayloadFormat: rn.PayloadFormat, + } + n.TopicProjectID, n.TopicID = parseNotificationTopic(rn.Topic) + return n +} + +func toNotificationFromProto(pbn *storagepb.NotificationConfig) *Notification { + n := &Notification{ + ID: pbn.GetName(), + EventTypes: pbn.GetEventTypes(), + ObjectNamePrefix: pbn.GetObjectNamePrefix(), + CustomAttributes: pbn.GetCustomAttributes(), + PayloadFormat: pbn.GetPayloadFormat(), + } + n.TopicProjectID, n.TopicID = parseNotificationTopic(pbn.Topic) + return n +} + +func toProtoNotification(n *Notification) *storagepb.NotificationConfig { + return &storagepb.NotificationConfig{ + Name: n.ID, + Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s", + n.TopicProjectID, n.TopicID), + EventTypes: n.EventTypes, + ObjectNamePrefix: n.ObjectNamePrefix, + CustomAttributes: n.CustomAttributes, + PayloadFormat: n.PayloadFormat, + } +} + +var topicRE = regexp.MustCompile(`^//pubsub\.googleapis\.com/projects/([^/]+)/topics/([^/]+)`) + +// parseNotificationTopic extracts the project and topic IDs from from the full +// resource name returned by the service. If the name is malformed, it returns +// "?" for both IDs. +func parseNotificationTopic(nt string) (projectID, topicID string) { + matches := topicRE.FindStringSubmatch(nt) + if matches == nil { + return "?", "?" + } + return matches[1], matches[2] +} + +func toRawNotification(n *Notification) *raw.Notification { + return &raw.Notification{ + Id: n.ID, + Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s", + n.TopicProjectID, n.TopicID), + EventTypes: n.EventTypes, + ObjectNamePrefix: n.ObjectNamePrefix, + CustomAttributes: n.CustomAttributes, + PayloadFormat: string(n.PayloadFormat), + } +} + +// AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID +// and PayloadFormat, and must not set its ID. The other fields are all optional. The +// returned Notification's ID can be used to refer to it. +func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (ret *Notification, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.AddNotification") + defer func() { trace.EndSpan(ctx, err) }() + + if n.ID != "" { + return nil, errors.New("storage: AddNotification: ID must not be set") + } + if n.TopicProjectID == "" { + return nil, errors.New("storage: AddNotification: missing TopicProjectID") + } + if n.TopicID == "" { + return nil, errors.New("storage: AddNotification: missing TopicID") + } + + opts := makeStorageOpts(false, b.retry, b.userProject) + ret, err = b.c.tc.CreateNotification(ctx, b.name, n, opts...) + return ret, err +} + +// Notifications returns all the Notifications configured for this bucket, as a map +// indexed by notification ID. +func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notification, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications") + defer func() { trace.EndSpan(ctx, err) }() + + opts := makeStorageOpts(true, b.retry, b.userProject) + n, err = b.c.tc.ListNotifications(ctx, b.name, opts...) + return n, err +} + +func notificationsToMap(rns []*raw.Notification) map[string]*Notification { + m := map[string]*Notification{} + for _, rn := range rns { + m[rn.Id] = toNotification(rn) + } + return m +} + +func notificationsToMapFromProto(ns []*storagepb.NotificationConfig) map[string]*Notification { + m := map[string]*Notification{} + for _, n := range ns { + m[n.Name] = toNotificationFromProto(n) + } + return m +} + +// DeleteNotification deletes the notification with the given ID. +func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification") + defer func() { trace.EndSpan(ctx, err) }() + + opts := makeStorageOpts(true, b.retry, b.userProject) + return b.c.tc.DeleteNotification(ctx, b.name, id, opts...) +} diff --git a/vendor/cloud.google.com/go/storage/option.go b/vendor/cloud.google.com/go/storage/option.go new file mode 100644 index 000000000..debdb0f52 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/option.go @@ -0,0 +1,80 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" +) + +// storageConfig contains the Storage client option configuration that can be +// set through storageClientOptions. +type storageConfig struct { + useJSONforReads bool + readAPIWasSet bool +} + +// newStorageConfig generates a new storageConfig with all the given +// storageClientOptions applied. +func newStorageConfig(opts ...option.ClientOption) storageConfig { + var conf storageConfig + for _, opt := range opts { + if storageOpt, ok := opt.(storageClientOption); ok { + storageOpt.ApplyStorageOpt(&conf) + } + } + return conf +} + +// A storageClientOption is an option for a Google Storage client. +type storageClientOption interface { + option.ClientOption + ApplyStorageOpt(*storageConfig) +} + +// WithJSONReads is an option that may be passed to [NewClient]. +// It sets the client to use the Cloud Storage JSON API for object +// reads. Currently, the default API used for reads is XML, but JSON will +// become the default in a future release. +// +// Setting this option is required to use the GenerationNotMatch condition. We +// also recommend using JSON reads to ensure consistency with other client +// operations (all of which use JSON by default). +// +// Note that when this option is set, reads will return a zero date for +// [ReaderObjectAttrs].LastModified and may return a different value for +// [ReaderObjectAttrs].CacheControl. +func WithJSONReads() option.ClientOption { + return &withReadAPI{useJSON: true} +} + +// WithXMLReads is an option that may be passed to [NewClient]. +// It sets the client to use the Cloud Storage XML API for object reads. +// +// This is the current default, but the default will switch to JSON in a future +// release. +func WithXMLReads() option.ClientOption { + return &withReadAPI{useJSON: false} +} + +type withReadAPI struct { + internaloption.EmbeddableAdapter + useJSON bool +} + +func (w *withReadAPI) ApplyStorageOpt(c *storageConfig) { + c.useJSONforReads = w.useJSON + c.readAPIWasSet = true +} diff --git a/vendor/cloud.google.com/go/storage/post_policy_v4.go b/vendor/cloud.google.com/go/storage/post_policy_v4.go new file mode 100644 index 000000000..6bc73fb7a --- /dev/null +++ b/vendor/cloud.google.com/go/storage/post_policy_v4.go @@ -0,0 +1,443 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/url" + "strings" + "time" +) + +// PostPolicyV4Options are used to construct a signed post policy. +// Please see https://cloud.google.com/storage/docs/xml-api/post-object +// for reference about the fields. +type PostPolicyV4Options struct { + // GoogleAccessID represents the authorizer of the signed post policy generation. + // It is typically the Google service account client email address from + // the Google Developers Console in the form of "xxx@developer.gserviceaccount.com". + // Required. + GoogleAccessID string + + // PrivateKey is the Google service account private key. It is obtainable + // from the Google Developers Console. + // At https://console.developers.google.com/project//apiui/credential, + // create a service account client ID or reuse one of your existing service account + // credentials. Click on the "Generate new P12 key" to generate and download + // a new private key. Once you download the P12 file, use the following command + // to convert it into a PEM file. + // + // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes + // + // Provide the contents of the PEM file as a byte slice. + // Exactly one of PrivateKey or SignBytes must be non-nil. + PrivateKey []byte + + // SignBytes is a function for implementing custom signing. + // + // Deprecated: Use SignRawBytes. If both SignBytes and SignRawBytes are defined, + // SignBytes will be ignored. + // This SignBytes function expects the bytes it receives to be hashed, while + // SignRawBytes accepts the raw bytes without hashing, allowing more flexibility. + // Add the following to the top of your signing function to hash the bytes + // to use SignRawBytes instead: + // shaSum := sha256.Sum256(bytes) + // bytes = shaSum[:] + // + SignBytes func(hashBytes []byte) (signature []byte, err error) + + // SignRawBytes is a function for implementing custom signing. For example, if + // your application is running on Google App Engine, you can use + // appengine's internal signing function: + // ctx := appengine.NewContext(request) + // acc, _ := appengine.ServiceAccount(ctx) + // &PostPolicyV4Options{ + // GoogleAccessID: acc, + // SignRawBytes: func(b []byte) ([]byte, error) { + // _, signedBytes, err := appengine.SignBytes(ctx, b) + // return signedBytes, err + // }, + // // etc. + // }) + // + // SignRawBytes is equivalent to the SignBytes field on SignedURLOptions; + // that is, you may use the same signing function for the two. + // + // Exactly one of PrivateKey or SignRawBytes must be non-nil. + SignRawBytes func(bytes []byte) (signature []byte, err error) + + // Expires is the expiration time on the signed post policy. + // It must be a time in the future. + // Required. + Expires time.Time + + // Style provides options for the type of URL to use. Options are + // PathStyle (default), BucketBoundHostname, and VirtualHostedStyle. See + // https://cloud.google.com/storage/docs/request-endpoints for details. + // Optional. + Style URLStyle + + // Insecure when set indicates that the generated URL's scheme + // will use "http" instead of "https" (default). + // Optional. + Insecure bool + + // Fields specifies the attributes of a PostPolicyV4 request. + // When Fields is non-nil, its attributes must match those that will + // passed into field Conditions. + // Optional. + Fields *PolicyV4Fields + + // The conditions that the uploaded file will be expected to conform to. + // When used, the failure of an upload to satisfy a condition will result in + // a 4XX status code, back with the message describing the problem. + // Optional. + Conditions []PostPolicyV4Condition + + // Hostname sets the host of the signed post policy. This field overrides + // any endpoint set on a storage Client or through STORAGE_EMULATOR_HOST. + // Only compatible with PathStyle URLStyle. + // Optional. + Hostname string + + shouldHashSignBytes bool +} + +func (opts *PostPolicyV4Options) clone() *PostPolicyV4Options { + return &PostPolicyV4Options{ + GoogleAccessID: opts.GoogleAccessID, + PrivateKey: opts.PrivateKey, + SignBytes: opts.SignBytes, + SignRawBytes: opts.SignRawBytes, + Expires: opts.Expires, + Style: opts.Style, + Insecure: opts.Insecure, + Fields: opts.Fields, + Conditions: opts.Conditions, + shouldHashSignBytes: opts.shouldHashSignBytes, + Hostname: opts.Hostname, + } +} + +// PolicyV4Fields describes the attributes for a PostPolicyV4 request. +type PolicyV4Fields struct { + // ACL specifies the access control permissions for the object. + // Optional. + ACL string + // CacheControl specifies the caching directives for the object. + // Optional. + CacheControl string + // ContentType specifies the media type of the object. + // Optional. + ContentType string + // ContentDisposition specifies how the file will be served back to requesters. + // Optional. + ContentDisposition string + // ContentEncoding specifies the decompressive transcoding that the object. + // This field is complementary to ContentType in that the file could be + // compressed but ContentType specifies the file's original media type. + // Optional. + ContentEncoding string + // Metadata specifies custom metadata for the object. + // If any key doesn't begin with "x-goog-meta-", an error will be returned. + // Optional. + Metadata map[string]string + // StatusCodeOnSuccess when set, specifies the status code that Cloud Storage + // will serve back on successful upload of the object. + // Optional. + StatusCodeOnSuccess int + // RedirectToURLOnSuccess when set, specifies the URL that Cloud Storage + // will serve back on successful upload of the object. + // Optional. + RedirectToURLOnSuccess string +} + +// PostPolicyV4 describes the URL and respective form fields for a generated PostPolicyV4 request. +type PostPolicyV4 struct { + // URL is the generated URL that the file upload will be made to. + URL string + // Fields specifies the generated key-values that the file uploader + // must include in their multipart upload form. + Fields map[string]string +} + +// PostPolicyV4Condition describes the constraints that the subsequent +// object upload's multipart form fields will be expected to conform to. +type PostPolicyV4Condition interface { + isEmpty() bool + json.Marshaler +} + +type startsWith struct { + key, value string +} + +func (sw *startsWith) MarshalJSON() ([]byte, error) { + return json.Marshal([]string{"starts-with", sw.key, sw.value}) +} +func (sw *startsWith) isEmpty() bool { + return sw.value == "" +} + +// ConditionStartsWith checks that an attributes starts with value. +// An empty value will cause this condition to be ignored. +func ConditionStartsWith(key, value string) PostPolicyV4Condition { + return &startsWith{key, value} +} + +type contentLengthRangeCondition struct { + start, end uint64 +} + +func (clr *contentLengthRangeCondition) MarshalJSON() ([]byte, error) { + return json.Marshal([]interface{}{"content-length-range", clr.start, clr.end}) +} +func (clr *contentLengthRangeCondition) isEmpty() bool { + return clr.start == 0 && clr.end == 0 +} + +type singleValueCondition struct { + name, value string +} + +func (svc *singleValueCondition) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]string{svc.name: svc.value}) +} +func (svc *singleValueCondition) isEmpty() bool { + return svc.value == "" +} + +// ConditionContentLengthRange constraints the limits that the +// multipart upload's range header will be expected to be within. +func ConditionContentLengthRange(start, end uint64) PostPolicyV4Condition { + return &contentLengthRangeCondition{start, end} +} + +func conditionRedirectToURLOnSuccess(redirectURL string) PostPolicyV4Condition { + return &singleValueCondition{"success_action_redirect", redirectURL} +} + +func conditionStatusCodeOnSuccess(statusCode int) PostPolicyV4Condition { + svc := &singleValueCondition{name: "success_action_status"} + if statusCode > 0 { + svc.value = fmt.Sprintf("%d", statusCode) + } + return svc +} + +// GenerateSignedPostPolicyV4 generates a PostPolicyV4 value from bucket, object and opts. +// The generated URL and fields will then allow an unauthenticated client to perform multipart uploads. +// If initializing a Storage Client, instead use the Bucket.GenerateSignedPostPolicyV4 +// method which uses the Client's credentials to handle authentication. +func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options) (*PostPolicyV4, error) { + if bucket == "" { + return nil, errors.New("storage: bucket must be non-empty") + } + if object == "" { + return nil, errors.New("storage: object must be non-empty") + } + now := utcNow() + if err := validatePostPolicyV4Options(opts, now); err != nil { + return nil, err + } + + var signingFn func(hashedBytes []byte) ([]byte, error) + switch { + case opts.SignRawBytes != nil: + signingFn = opts.SignRawBytes + case opts.shouldHashSignBytes: + signingFn = opts.SignBytes + case len(opts.PrivateKey) != 0: + parsedRSAPrivKey, err := parseKey(opts.PrivateKey) + if err != nil { + return nil, err + } + signingFn = func(b []byte) ([]byte, error) { + sum := sha256.Sum256(b) + return rsa.SignPKCS1v15(rand.Reader, parsedRSAPrivKey, crypto.SHA256, sum[:]) + } + + default: + return nil, errors.New("storage: exactly one of PrivateKey or SignRawBytes must be set") + } + + var descFields PolicyV4Fields + if opts.Fields != nil { + descFields = *opts.Fields + } + + if err := validateMetadata(descFields.Metadata); err != nil { + return nil, err + } + + // Build the policy. + conds := make([]PostPolicyV4Condition, len(opts.Conditions)) + copy(conds, opts.Conditions) + conds = append(conds, + // These are ordered lexicographically. Technically the order doesn't matter + // for creating the policy, but we use this order to match the + // cross-language conformance tests for this feature. + &singleValueCondition{"acl", descFields.ACL}, + &singleValueCondition{"cache-control", descFields.CacheControl}, + &singleValueCondition{"content-disposition", descFields.ContentDisposition}, + &singleValueCondition{"content-encoding", descFields.ContentEncoding}, + &singleValueCondition{"content-type", descFields.ContentType}, + conditionRedirectToURLOnSuccess(descFields.RedirectToURLOnSuccess), + conditionStatusCodeOnSuccess(descFields.StatusCodeOnSuccess), + ) + + YYYYMMDD := now.Format(yearMonthDay) + policyFields := map[string]string{ + "key": object, + "x-goog-date": now.Format(iso8601), + "x-goog-credential": opts.GoogleAccessID + "/" + YYYYMMDD + "/auto/storage/goog4_request", + "x-goog-algorithm": "GOOG4-RSA-SHA256", + "acl": descFields.ACL, + "cache-control": descFields.CacheControl, + "content-disposition": descFields.ContentDisposition, + "content-encoding": descFields.ContentEncoding, + "content-type": descFields.ContentType, + "success_action_redirect": descFields.RedirectToURLOnSuccess, + } + for key, value := range descFields.Metadata { + conds = append(conds, &singleValueCondition{key, value}) + policyFields[key] = value + } + + // Following from the order expected by the conformance test cases, + // hence manually inserting these fields in a specific order. + conds = append(conds, + &singleValueCondition{"bucket", bucket}, + &singleValueCondition{"key", object}, + &singleValueCondition{"x-goog-date", now.Format(iso8601)}, + &singleValueCondition{ + name: "x-goog-credential", + value: opts.GoogleAccessID + "/" + YYYYMMDD + "/auto/storage/goog4_request", + }, + &singleValueCondition{"x-goog-algorithm", "GOOG4-RSA-SHA256"}, + ) + + nonEmptyConds := make([]PostPolicyV4Condition, 0, len(opts.Conditions)) + for _, cond := range conds { + if cond == nil || !cond.isEmpty() { + nonEmptyConds = append(nonEmptyConds, cond) + } + } + condsAsJSON, err := json.Marshal(map[string]interface{}{ + "conditions": nonEmptyConds, + "expiration": opts.Expires.Format(time.RFC3339), + }) + if err != nil { + return nil, fmt.Errorf("storage: PostPolicyV4 JSON serialization failed: %w", err) + } + + b64Policy := base64.StdEncoding.EncodeToString(condsAsJSON) + var signature []byte + var signErr error + + if opts.shouldHashSignBytes { + // SignBytes expects hashed bytes as input instead of raw bytes, so we hash them + shaSum := sha256.Sum256([]byte(b64Policy)) + signature, signErr = signingFn(shaSum[:]) + } else { + signature, signErr = signingFn([]byte(b64Policy)) + } + if signErr != nil { + return nil, signErr + } + + policyFields["policy"] = b64Policy + policyFields["x-goog-signature"] = fmt.Sprintf("%x", signature) + + // Construct the URL. + scheme := "https" + if opts.Insecure { + scheme = "http" + } + path := opts.Style.path(bucket, "") + "/" + u := &url.URL{ + Path: path, + RawPath: pathEncodeV4(path), + Host: opts.Style.host(opts.Hostname, bucket), + Scheme: scheme, + } + + if descFields.StatusCodeOnSuccess > 0 { + policyFields["success_action_status"] = fmt.Sprintf("%d", descFields.StatusCodeOnSuccess) + } + + // Clear out fields with blanks values. + for key, value := range policyFields { + if value == "" { + delete(policyFields, key) + } + } + pp4 := &PostPolicyV4{ + Fields: policyFields, + URL: u.String(), + } + return pp4, nil +} + +// validatePostPolicyV4Options checks that: +// * GoogleAccessID is set +// * either PrivateKey or SignRawBytes/SignBytes is set, but not both +// * the deadline set in Expires is not in the past +// * if Style is not set, it'll use PathStyle +// * sets shouldHashSignBytes to true if opts.SignBytes should be used +func validatePostPolicyV4Options(opts *PostPolicyV4Options, now time.Time) error { + if opts == nil || opts.GoogleAccessID == "" { + return errors.New("storage: missing required GoogleAccessID") + } + if privBlank, signBlank := len(opts.PrivateKey) == 0, opts.SignBytes == nil && opts.SignRawBytes == nil; privBlank == signBlank { + return errors.New("storage: exactly one of PrivateKey or SignRawBytes must be set") + } + if opts.Expires.Before(now) { + return errors.New("storage: expecting Expires to be in the future") + } + if opts.Style == nil { + opts.Style = PathStyle() + } + if opts.SignRawBytes == nil && opts.SignBytes != nil { + opts.shouldHashSignBytes = true + } + return nil +} + +// validateMetadata ensures that all keys passed in have a prefix of "x-goog-meta-", +// otherwise it will return an error. +func validateMetadata(hdrs map[string]string) (err error) { + if len(hdrs) == 0 { + return nil + } + + badKeys := make([]string, 0, len(hdrs)) + for key := range hdrs { + if !strings.HasPrefix(key, "x-goog-meta-") { + badKeys = append(badKeys, key) + } + } + if len(badKeys) != 0 { + err = errors.New("storage: expected metadata to begin with x-goog-meta-, got " + strings.Join(badKeys, ", ")) + } + return +} diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go new file mode 100644 index 000000000..6da2432f0 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/reader.go @@ -0,0 +1,286 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "fmt" + "hash/crc32" + "io" + "io/ioutil" + "net/http" + "strings" + "time" + + "cloud.google.com/go/internal/trace" +) + +var crc32cTable = crc32.MakeTable(crc32.Castagnoli) + +// ReaderObjectAttrs are attributes about the object being read. These are populated +// during the New call. This struct only holds a subset of object attributes: to +// get the full set of attributes, use ObjectHandle.Attrs. +// +// Each field is read-only. +type ReaderObjectAttrs struct { + // Size is the length of the object's content. + Size int64 + + // StartOffset is the byte offset within the object + // from which reading begins. + // This value is only non-zero for range requests. + StartOffset int64 + + // ContentType is the MIME type of the object's content. + ContentType string + + // ContentEncoding is the encoding of the object's content. + ContentEncoding string + + // CacheControl specifies whether and for how long browser and Internet + // caches are allowed to cache your objects. + CacheControl string + + // LastModified is the time that the object was last modified. + LastModified time.Time + + // Generation is the generation number of the object's content. + Generation int64 + + // Metageneration is the version of the metadata for this object at + // this generation. This field is used for preconditions and for + // detecting changes in metadata. A metageneration number is only + // meaningful in the context of a particular generation of a + // particular object. + Metageneration int64 +} + +// NewReader creates a new Reader to read the contents of the +// object. +// ErrObjectNotExist will be returned if the object is not found. +// +// The caller must call Close on the returned Reader when done reading. +// +// By default, reads are made using the Cloud Storage XML API. We recommend +// using the JSON API instead, which can be done by setting [WithJSONReads] +// when calling [NewClient]. This ensures consistency with other client +// operations, which all use JSON. JSON will become the default in a future +// release. +func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) { + return o.NewRangeReader(ctx, 0, -1) +} + +// NewRangeReader reads part of an object, reading at most length bytes +// starting at the given offset. If length is negative, the object is read +// until the end. If offset is negative, the object is read abs(offset) bytes +// from the end, and length must also be negative to indicate all remaining +// bytes will be read. +// +// If the object's metadata property "Content-Encoding" is set to "gzip" or satisfies +// decompressive transcoding per https://cloud.google.com/storage/docs/transcoding +// that file will be served back whole, regardless of the requested range as +// Google Cloud Storage dictates. +// +// By default, reads are made using the Cloud Storage XML API. We recommend +// using the JSON API instead, which can be done by setting [WithJSONReads] +// when calling [NewClient]. This ensures consistency with other client +// operations, which all use JSON. JSON will become the default in a future +// release. +func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (r *Reader, err error) { + // This span covers the life of the reader. It is closed via the context + // in Reader.Close. + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Reader") + + if err := o.validate(); err != nil { + return nil, err + } + if offset < 0 && length >= 0 { + return nil, fmt.Errorf("storage: invalid offset %d < 0 requires negative length", offset) + } + if o.conds != nil { + if err := o.conds.validate("NewRangeReader"); err != nil { + return nil, err + } + } + + opts := makeStorageOpts(true, o.retry, o.userProject) + + params := &newRangeReaderParams{ + bucket: o.bucket, + object: o.object, + gen: o.gen, + offset: offset, + length: length, + encryptionKey: o.encryptionKey, + conds: o.conds, + readCompressed: o.readCompressed, + } + + r, err = o.c.tc.NewRangeReader(ctx, params, opts...) + + // Pass the context so that the span can be closed in Reader.Close, or close the + // span now if there is an error. + if err == nil { + r.ctx = ctx + } else { + trace.EndSpan(ctx, err) + } + + return r, err +} + +// decompressiveTranscoding returns true if the request was served decompressed +// and different than its original storage form. This happens when the "Content-Encoding" +// header is "gzip". +// See: +// - https://cloud.google.com/storage/docs/transcoding#transcoding_and_gzip +// - https://github.com/googleapis/google-cloud-go/issues/1800 +func decompressiveTranscoding(res *http.Response) bool { + // Decompressive Transcoding. + return res.Header.Get("Content-Encoding") == "gzip" || + res.Header.Get("X-Goog-Stored-Content-Encoding") == "gzip" +} + +func uncompressedByServer(res *http.Response) bool { + // If the data is stored as gzip but is not encoded as gzip, then it + // was uncompressed by the server. + return res.Header.Get("X-Goog-Stored-Content-Encoding") == "gzip" && + res.Header.Get("Content-Encoding") != "gzip" +} + +// parseCRC32c parses the crc32c hash from the X-Goog-Hash header. +// It can parse headers in the form [crc32c=xxx md5=xxx] (XML responses) or the +// form [crc32c=xxx,md5=xxx] (JSON responses). The md5 hash is ignored. +func parseCRC32c(res *http.Response) (uint32, bool) { + const prefix = "crc32c=" + for _, spec := range res.Header["X-Goog-Hash"] { + values := strings.Split(spec, ",") + + for _, v := range values { + if strings.HasPrefix(v, prefix) { + c, err := decodeUint32(v[len(prefix):]) + if err == nil { + return c, true + } + } + } + + } + return 0, false +} + +// setConditionsHeaders sets precondition request headers for downloads +// using the XML API. It assumes that the conditions have been validated. +func setConditionsHeaders(headers http.Header, conds *Conditions) error { + if conds == nil { + return nil + } + if conds.MetagenerationMatch != 0 { + headers.Set("x-goog-if-metageneration-match", fmt.Sprint(conds.MetagenerationMatch)) + } + switch { + case conds.GenerationMatch != 0: + headers.Set("x-goog-if-generation-match", fmt.Sprint(conds.GenerationMatch)) + case conds.DoesNotExist: + headers.Set("x-goog-if-generation-match", "0") + } + return nil +} + +var emptyBody = ioutil.NopCloser(strings.NewReader("")) + +// Reader reads a Cloud Storage object. +// It implements io.Reader. +// +// Typically, a Reader computes the CRC of the downloaded content and compares it to +// the stored CRC, returning an error from Read if there is a mismatch. This integrity check +// is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding. +type Reader struct { + Attrs ReaderObjectAttrs + seen, remain, size int64 + checkCRC bool // Did we check the CRC? This is now only used by tests. + + reader io.ReadCloser + ctx context.Context +} + +// Close closes the Reader. It must be called when done reading. +func (r *Reader) Close() error { + err := r.reader.Close() + trace.EndSpan(r.ctx, err) + return err +} + +func (r *Reader) Read(p []byte) (int, error) { + n, err := r.reader.Read(p) + if r.remain != -1 { + r.remain -= int64(n) + } + return n, err +} + +// WriteTo writes all the data from the Reader to w. Fulfills the io.WriterTo interface. +// This is called implicitly when calling io.Copy on a Reader. +func (r *Reader) WriteTo(w io.Writer) (int64, error) { + // This implicitly calls r.reader.WriteTo for gRPC only. JSON and XML don't have an + // implementation of WriteTo. + n, err := io.Copy(w, r.reader) + if r.remain != -1 { + r.remain -= int64(n) + } + return n, err +} + +// Size returns the size of the object in bytes. +// The returned value is always the same and is not affected by +// calls to Read or Close. +// +// Deprecated: use Reader.Attrs.Size. +func (r *Reader) Size() int64 { + return r.Attrs.Size +} + +// Remain returns the number of bytes left to read, or -1 if unknown. +func (r *Reader) Remain() int64 { + return r.remain +} + +// ContentType returns the content type of the object. +// +// Deprecated: use Reader.Attrs.ContentType. +func (r *Reader) ContentType() string { + return r.Attrs.ContentType +} + +// ContentEncoding returns the content encoding of the object. +// +// Deprecated: use Reader.Attrs.ContentEncoding. +func (r *Reader) ContentEncoding() string { + return r.Attrs.ContentEncoding +} + +// CacheControl returns the cache control of the object. +// +// Deprecated: use Reader.Attrs.CacheControl. +func (r *Reader) CacheControl() string { + return r.Attrs.CacheControl +} + +// LastModified returns the value of the Last-Modified header. +// +// Deprecated: use Reader.Attrs.LastModified. +func (r *Reader) LastModified() (time.Time, error) { + return r.Attrs.LastModified, nil +} diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go new file mode 100644 index 000000000..b6316fa66 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -0,0 +1,2450 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "bytes" + "context" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" + + "cloud.google.com/go/internal/optional" + "cloud.google.com/go/internal/trace" + "cloud.google.com/go/storage/internal" + "cloud.google.com/go/storage/internal/apiv2/storagepb" + "github.com/googleapis/gax-go/v2" + "golang.org/x/oauth2/google" + "google.golang.org/api/googleapi" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + raw "google.golang.org/api/storage/v1" + "google.golang.org/api/transport" + htransport "google.golang.org/api/transport/http" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/known/fieldmaskpb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +// Methods which can be used in signed URLs. +var signedURLMethods = map[string]bool{"DELETE": true, "GET": true, "HEAD": true, "POST": true, "PUT": true} + +var ( + // ErrBucketNotExist indicates that the bucket does not exist. + ErrBucketNotExist = errors.New("storage: bucket doesn't exist") + // ErrObjectNotExist indicates that the object does not exist. + ErrObjectNotExist = errors.New("storage: object doesn't exist") + // errMethodNotSupported indicates that the method called is not currently supported by the client. + // TODO: Export this error when launching the transport-agnostic client. + errMethodNotSupported = errors.New("storage: method is not currently supported") + // errMethodNotValid indicates that given HTTP method is not valid. + errMethodNotValid = fmt.Errorf("storage: HTTP method should be one of %v", reflect.ValueOf(signedURLMethods).MapKeys()) +) + +var userAgent = fmt.Sprintf("gcloud-golang-storage/%s", internal.Version) + +const ( + // ScopeFullControl grants permissions to manage your + // data and permissions in Google Cloud Storage. + ScopeFullControl = raw.DevstorageFullControlScope + + // ScopeReadOnly grants permissions to + // view your data in Google Cloud Storage. + ScopeReadOnly = raw.DevstorageReadOnlyScope + + // ScopeReadWrite grants permissions to manage your + // data in Google Cloud Storage. + ScopeReadWrite = raw.DevstorageReadWriteScope + + // aes256Algorithm is the AES256 encryption algorithm used with the + // Customer-Supplied Encryption Keys feature. + aes256Algorithm = "AES256" + + // defaultGen indicates the latest object generation by default, + // using a negative value. + defaultGen = int64(-1) +) + +// TODO: remove this once header with invocation ID is applied to all methods. +func setClientHeader(headers http.Header) { + headers.Set("x-goog-api-client", xGoogDefaultHeader) +} + +// Client is a client for interacting with Google Cloud Storage. +// +// Clients should be reused instead of created as needed. +// The methods of Client are safe for concurrent use by multiple goroutines. +type Client struct { + hc *http.Client + raw *raw.Service + // Scheme describes the scheme under the current host. + scheme string + // xmlHost is the default host used for XML requests. + xmlHost string + // May be nil. + creds *google.Credentials + retry *retryConfig + + // tc is the transport-agnostic client implemented with either gRPC or HTTP. + tc storageClient +} + +// NewClient creates a new Google Cloud Storage client using the HTTP transport. +// The default scope is ScopeFullControl. To use a different scope, like +// ScopeReadOnly, use option.WithScopes. +// +// Clients should be reused instead of created as needed. The methods of Client +// are safe for concurrent use by multiple goroutines. +// +// You may configure the client by passing in options from the [google.golang.org/api/option] +// package. You may also use options defined in this package, such as [WithJSONReads]. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + var creds *google.Credentials + + // In general, it is recommended to use raw.NewService instead of htransport.NewClient + // since raw.NewService configures the correct default endpoints when initializing the + // internal http client. However, in our case, "NewRangeReader" in reader.go needs to + // access the http client directly to make requests, so we create the client manually + // here so it can be re-used by both reader.go and raw.NewService. This means we need to + // manually configure the default endpoint options on the http client. Furthermore, we + // need to account for STORAGE_EMULATOR_HOST override when setting the default endpoints. + if host := os.Getenv("STORAGE_EMULATOR_HOST"); host == "" { + // Prepend default options to avoid overriding options passed by the user. + opts = append([]option.ClientOption{option.WithScopes(ScopeFullControl, "https://www.googleapis.com/auth/cloud-platform"), option.WithUserAgent(userAgent)}, opts...) + + opts = append(opts, internaloption.WithDefaultEndpointTemplate("https://storage.UNIVERSE_DOMAIN/storage/v1/"), + internaloption.WithDefaultMTLSEndpoint("https://storage.mtls.googleapis.com/storage/v1/"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + ) + + // Don't error out here. The user may have passed in their own HTTP + // client which does not auth with ADC or other common conventions. + c, err := transport.Creds(ctx, opts...) + if err == nil { + creds = c + opts = append(opts, internaloption.WithCredentials(creds)) + } + } else { + var hostURL *url.URL + + if strings.Contains(host, "://") { + h, err := url.Parse(host) + if err != nil { + return nil, err + } + hostURL = h + } else { + // Add scheme for user if not supplied in STORAGE_EMULATOR_HOST + // URL is only parsed correctly if it has a scheme, so we build it ourselves + hostURL = &url.URL{Scheme: "http", Host: host} + } + + hostURL.Path = "storage/v1/" + endpoint := hostURL.String() + + // Append the emulator host as default endpoint for the user + opts = append([]option.ClientOption{ + option.WithoutAuthentication(), + internaloption.SkipDialSettingsValidation(), + internaloption.WithDefaultEndpointTemplate(endpoint), + internaloption.WithDefaultMTLSEndpoint(endpoint), + }, opts...) + } + + // htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpointTemplate, and WithDefaultMTLSEndpoint. + hc, ep, err := htransport.NewClient(ctx, opts...) + if err != nil { + return nil, fmt.Errorf("dialing: %w", err) + } + // RawService should be created with the chosen endpoint to take account of user override. + rawService, err := raw.NewService(ctx, option.WithEndpoint(ep), option.WithHTTPClient(hc)) + if err != nil { + return nil, fmt.Errorf("storage client: %w", err) + } + // Update xmlHost and scheme with the chosen endpoint. + u, err := url.Parse(ep) + if err != nil { + return nil, fmt.Errorf("supplied endpoint %q is not valid: %w", ep, err) + } + + tc, err := newHTTPStorageClient(ctx, withClientOptions(opts...)) + if err != nil { + return nil, fmt.Errorf("storage: %w", err) + } + + return &Client{ + hc: hc, + raw: rawService, + scheme: u.Scheme, + xmlHost: u.Host, + creds: creds, + tc: tc, + }, nil +} + +// NewGRPCClient creates a new Storage client using the gRPC transport and API. +// Client methods which have not been implemented in gRPC will return an error. +// In particular, methods for Cloud Pub/Sub notifications are not supported. +// Using a non-default universe domain is also not supported with the Storage +// gRPC client. +// +// The storage gRPC API is still in preview and not yet publicly available. +// If you would like to use the API, please first contact your GCP account rep to +// request access. The API may be subject to breaking changes. +// +// Clients should be reused instead of created as needed. The methods of Client +// are safe for concurrent use by multiple goroutines. +// +// You may configure the client by passing in options from the [google.golang.org/api/option] +// package. +func NewGRPCClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + tc, err := newGRPCStorageClient(ctx, withClientOptions(opts...)) + if err != nil { + return nil, err + } + + return &Client{tc: tc}, nil +} + +// Close closes the Client. +// +// Close need not be called at program exit. +func (c *Client) Close() error { + // Set fields to nil so that subsequent uses will panic. + c.hc = nil + c.raw = nil + c.creds = nil + if c.tc != nil { + return c.tc.Close() + } + return nil +} + +// SigningScheme determines the API version to use when signing URLs. +type SigningScheme int + +const ( + // SigningSchemeDefault is presently V2 and will change to V4 in the future. + SigningSchemeDefault SigningScheme = iota + + // SigningSchemeV2 uses the V2 scheme to sign URLs. + SigningSchemeV2 + + // SigningSchemeV4 uses the V4 scheme to sign URLs. + SigningSchemeV4 +) + +// URLStyle determines the style to use for the signed URL. PathStyle is the +// default. All non-default options work with V4 scheme only. See +// https://cloud.google.com/storage/docs/request-endpoints for details. +type URLStyle interface { + // host should return the host portion of the signed URL, not including + // the scheme (e.g. storage.googleapis.com). + host(hostname, bucket string) string + + // path should return the path portion of the signed URL, which may include + // both the bucket and object name or only the object name depending on the + // style. + path(bucket, object string) string +} + +type pathStyle struct{} + +type virtualHostedStyle struct{} + +type bucketBoundHostname struct { + hostname string +} + +func (s pathStyle) host(hostname, bucket string) string { + if hostname != "" { + return stripScheme(hostname) + } + + if host := os.Getenv("STORAGE_EMULATOR_HOST"); host != "" { + return stripScheme(host) + } + + return "storage.googleapis.com" +} + +func (s virtualHostedStyle) host(hostname, bucket string) string { + if hostname != "" { + return bucket + "." + stripScheme(hostname) + } + + if host := os.Getenv("STORAGE_EMULATOR_HOST"); host != "" { + return bucket + "." + stripScheme(host) + } + + return bucket + ".storage.googleapis.com" +} + +func (s bucketBoundHostname) host(_, bucket string) string { + return s.hostname +} + +func (s pathStyle) path(bucket, object string) string { + p := bucket + if object != "" { + p += "/" + object + } + return p +} + +func (s virtualHostedStyle) path(bucket, object string) string { + return object +} + +func (s bucketBoundHostname) path(bucket, object string) string { + return object +} + +// PathStyle is the default style, and will generate a URL of the form +// "//". By default, is +// storage.googleapis.com, but setting an endpoint on the storage Client or +// through STORAGE_EMULATOR_HOST overrides this. Setting Hostname on +// SignedURLOptions or PostPolicyV4Options overrides everything else. +func PathStyle() URLStyle { + return pathStyle{} +} + +// VirtualHostedStyle generates a URL relative to the bucket's virtual +// hostname, e.g. ".storage.googleapis.com/". +func VirtualHostedStyle() URLStyle { + return virtualHostedStyle{} +} + +// BucketBoundHostname generates a URL with a custom hostname tied to a +// specific GCS bucket. The desired hostname should be passed in using the +// hostname argument. Generated urls will be of the form +// "/". See +// https://cloud.google.com/storage/docs/request-endpoints#cname and +// https://cloud.google.com/load-balancing/docs/https/adding-backend-buckets-to-load-balancers +// for details. Note that for CNAMEs, only HTTP is supported, so Insecure must +// be set to true. +func BucketBoundHostname(hostname string) URLStyle { + return bucketBoundHostname{hostname: hostname} +} + +// Strips the scheme from a host if it contains it +func stripScheme(host string) string { + if strings.Contains(host, "://") { + host = strings.SplitN(host, "://", 2)[1] + } + return host +} + +// SignedURLOptions allows you to restrict the access to the signed URL. +type SignedURLOptions struct { + // GoogleAccessID represents the authorizer of the signed URL generation. + // It is typically the Google service account client email address from + // the Google Developers Console in the form of "xxx@developer.gserviceaccount.com". + // Required. + GoogleAccessID string + + // PrivateKey is the Google service account private key. It is obtainable + // from the Google Developers Console. + // At https://console.developers.google.com/project//apiui/credential, + // create a service account client ID or reuse one of your existing service account + // credentials. Click on the "Generate new P12 key" to generate and download + // a new private key. Once you download the P12 file, use the following command + // to convert it into a PEM file. + // + // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes + // + // Provide the contents of the PEM file as a byte slice. + // Exactly one of PrivateKey or SignBytes must be non-nil. + PrivateKey []byte + + // SignBytes is a function for implementing custom signing. For example, if + // your application is running on Google App Engine, you can use + // appengine's internal signing function: + // ctx := appengine.NewContext(request) + // acc, _ := appengine.ServiceAccount(ctx) + // url, err := SignedURL("bucket", "object", &SignedURLOptions{ + // GoogleAccessID: acc, + // SignBytes: func(b []byte) ([]byte, error) { + // _, signedBytes, err := appengine.SignBytes(ctx, b) + // return signedBytes, err + // }, + // // etc. + // }) + // + // Exactly one of PrivateKey or SignBytes must be non-nil. + SignBytes func([]byte) ([]byte, error) + + // Method is the HTTP method to be used with the signed URL. + // Signed URLs can be used with GET, HEAD, PUT, and DELETE requests. + // Required. + Method string + + // Expires is the expiration time on the signed URL. It must be + // a datetime in the future. For SigningSchemeV4, the expiration may be no + // more than seven days in the future. + // Required. + Expires time.Time + + // ContentType is the content type header the client must provide + // to use the generated signed URL. + // Optional. + ContentType string + + // Headers is a list of extension headers the client must provide + // in order to use the generated signed URL. Each must be a string of the + // form "key:values", with multiple values separated by a semicolon. + // Optional. + Headers []string + + // QueryParameters is a map of additional query parameters. When + // SigningScheme is V4, this is used in computing the signature, and the + // client must use the same query parameters when using the generated signed + // URL. + // Optional. + QueryParameters url.Values + + // MD5 is the base64 encoded MD5 checksum of the file. + // If provided, the client should provide the exact value on the request + // header in order to use the signed URL. + // Optional. + MD5 string + + // Style provides options for the type of URL to use. Options are + // PathStyle (default), BucketBoundHostname, and VirtualHostedStyle. See + // https://cloud.google.com/storage/docs/request-endpoints for details. + // Only supported for V4 signing. + // Optional. + Style URLStyle + + // Insecure determines whether the signed URL should use HTTPS (default) or + // HTTP. + // Only supported for V4 signing. + // Optional. + Insecure bool + + // Scheme determines the version of URL signing to use. Default is + // SigningSchemeV2. + Scheme SigningScheme + + // Hostname sets the host of the signed URL. This field overrides any + // endpoint set on a storage Client or through STORAGE_EMULATOR_HOST. + // Only compatible with PathStyle and VirtualHostedStyle URLStyles. + // Optional. + Hostname string +} + +func (opts *SignedURLOptions) clone() *SignedURLOptions { + return &SignedURLOptions{ + GoogleAccessID: opts.GoogleAccessID, + SignBytes: opts.SignBytes, + PrivateKey: opts.PrivateKey, + Method: opts.Method, + Expires: opts.Expires, + ContentType: opts.ContentType, + Headers: opts.Headers, + QueryParameters: opts.QueryParameters, + MD5: opts.MD5, + Style: opts.Style, + Insecure: opts.Insecure, + Scheme: opts.Scheme, + Hostname: opts.Hostname, + } +} + +var ( + tabRegex = regexp.MustCompile(`[\t]+`) + // I was tempted to call this spacex. :) + spaceRegex = regexp.MustCompile(` +`) + + canonicalHeaderRegexp = regexp.MustCompile(`(?i)^(x-goog-[^:]+):(.*)?$`) + excludedCanonicalHeaders = map[string]bool{ + "x-goog-encryption-key": true, + "x-goog-encryption-key-sha256": true, + } +) + +// v2SanitizeHeaders applies the specifications for canonical extension headers at +// https://cloud.google.com/storage/docs/access-control/signed-urls-v2#about-canonical-extension-headers +func v2SanitizeHeaders(hdrs []string) []string { + headerMap := map[string][]string{} + for _, hdr := range hdrs { + // No leading or trailing whitespaces. + sanitizedHeader := strings.TrimSpace(hdr) + + var header, value string + // Only keep canonical headers, discard any others. + headerMatches := canonicalHeaderRegexp.FindStringSubmatch(sanitizedHeader) + if len(headerMatches) == 0 { + continue + } + header = headerMatches[1] + value = headerMatches[2] + + header = strings.ToLower(strings.TrimSpace(header)) + value = strings.TrimSpace(value) + + if excludedCanonicalHeaders[header] { + // Do not keep any deliberately excluded canonical headers when signing. + continue + } + + if len(value) > 0 { + // Remove duplicate headers by appending the values of duplicates + // in their order of appearance. + headerMap[header] = append(headerMap[header], value) + } + } + + var sanitizedHeaders []string + for header, values := range headerMap { + // There should be no spaces around the colon separating the header name + // from the header value or around the values themselves. The values + // should be separated by commas. + // + // NOTE: The semantics for headers without a value are not clear. + // However from specifications these should be edge-cases anyway and we + // should assume that there will be no canonical headers using empty + // values. Any such headers are discarded at the regexp stage above. + sanitizedHeaders = append(sanitizedHeaders, fmt.Sprintf("%s:%s", header, strings.Join(values, ","))) + } + sort.Strings(sanitizedHeaders) + return sanitizedHeaders +} + +// v4SanitizeHeaders applies the specifications for canonical extension headers +// at https://cloud.google.com/storage/docs/authentication/canonical-requests#about-headers. +// +// V4 does a couple things differently from V2: +// - Headers get sorted by key, instead of by key:value. We do this in +// signedURLV4. +// - There's no canonical regexp: we simply split headers on :. +// - We don't exclude canonical headers. +// - We replace leading and trailing spaces in header values, like v2, but also +// all intermediate space duplicates get stripped. That is, there's only ever +// a single consecutive space. +func v4SanitizeHeaders(hdrs []string) []string { + headerMap := map[string][]string{} + for _, hdr := range hdrs { + // No leading or trailing whitespaces. + sanitizedHeader := strings.TrimSpace(hdr) + + var key, value string + headerMatches := strings.SplitN(sanitizedHeader, ":", 2) + if len(headerMatches) < 2 { + continue + } + + key = headerMatches[0] + value = headerMatches[1] + + key = strings.ToLower(strings.TrimSpace(key)) + value = strings.TrimSpace(value) + value = string(spaceRegex.ReplaceAll([]byte(value), []byte(" "))) + value = string(tabRegex.ReplaceAll([]byte(value), []byte("\t"))) + + if len(value) > 0 { + // Remove duplicate headers by appending the values of duplicates + // in their order of appearance. + headerMap[key] = append(headerMap[key], value) + } + } + + var sanitizedHeaders []string + for header, values := range headerMap { + // There should be no spaces around the colon separating the header name + // from the header value or around the values themselves. The values + // should be separated by commas. + // + // NOTE: The semantics for headers without a value are not clear. + // However from specifications these should be edge-cases anyway and we + // should assume that there will be no canonical headers using empty + // values. Any such headers are discarded at the regexp stage above. + sanitizedHeaders = append(sanitizedHeaders, fmt.Sprintf("%s:%s", header, strings.Join(values, ","))) + } + return sanitizedHeaders +} + +// SignedURL returns a URL for the specified object. Signed URLs allow anyone +// access to a restricted resource for a limited time without needing a +// Google account or signing in. For more information about signed URLs, see +// https://cloud.google.com/storage/docs/accesscontrol#signed_urls_query_string_authentication +// If initializing a Storage Client, instead use the Bucket.SignedURL method +// which uses the Client's credentials to handle authentication. +func SignedURL(bucket, object string, opts *SignedURLOptions) (string, error) { + now := utcNow() + if err := validateOptions(opts, now); err != nil { + return "", err + } + + switch opts.Scheme { + case SigningSchemeV2: + opts.Headers = v2SanitizeHeaders(opts.Headers) + return signedURLV2(bucket, object, opts) + case SigningSchemeV4: + opts.Headers = v4SanitizeHeaders(opts.Headers) + return signedURLV4(bucket, object, opts, now) + default: // SigningSchemeDefault + opts.Headers = v2SanitizeHeaders(opts.Headers) + return signedURLV2(bucket, object, opts) + } +} + +func validateOptions(opts *SignedURLOptions, now time.Time) error { + if opts == nil { + return errors.New("storage: missing required SignedURLOptions") + } + if opts.GoogleAccessID == "" { + return errors.New("storage: missing required GoogleAccessID") + } + if (opts.PrivateKey == nil) == (opts.SignBytes == nil) { + return errors.New("storage: exactly one of PrivateKey or SignedBytes must be set") + } + opts.Method = strings.ToUpper(opts.Method) + if _, ok := signedURLMethods[opts.Method]; !ok { + return errMethodNotValid + } + if opts.Expires.IsZero() { + return errors.New("storage: missing required expires option") + } + if opts.MD5 != "" { + md5, err := base64.StdEncoding.DecodeString(opts.MD5) + if err != nil || len(md5) != 16 { + return errors.New("storage: invalid MD5 checksum") + } + } + if opts.Style == nil { + opts.Style = PathStyle() + } + if _, ok := opts.Style.(pathStyle); !ok && opts.Scheme == SigningSchemeV2 { + return errors.New("storage: only path-style URLs are permitted with SigningSchemeV2") + } + if opts.Scheme == SigningSchemeV4 { + cutoff := now.Add(604801 * time.Second) // 7 days + 1 second + if !opts.Expires.Before(cutoff) { + return errors.New("storage: expires must be within seven days from now") + } + } + return nil +} + +const ( + iso8601 = "20060102T150405Z" + yearMonthDay = "20060102" +) + +// utcNow returns the current time in UTC and is a variable to allow for +// reassignment in tests to provide deterministic signed URL values. +var utcNow = func() time.Time { + return time.Now().UTC() +} + +// extractHeaderNames takes in a series of key:value headers and returns the +// header names only. +func extractHeaderNames(kvs []string) []string { + var res []string + for _, header := range kvs { + nameValue := strings.SplitN(header, ":", 2) + res = append(res, nameValue[0]) + } + return res +} + +// pathEncodeV4 creates an encoded string that matches the v4 signature spec. +// Following the spec precisely is necessary in order to ensure that the URL +// and signing string are correctly formed, and Go's url.PathEncode and +// url.QueryEncode don't generate an exact match without some additional logic. +func pathEncodeV4(path string) string { + segments := strings.Split(path, "/") + var encodedSegments []string + for _, s := range segments { + encodedSegments = append(encodedSegments, url.QueryEscape(s)) + } + encodedStr := strings.Join(encodedSegments, "/") + encodedStr = strings.Replace(encodedStr, "+", "%20", -1) + return encodedStr +} + +// signedURLV4 creates a signed URL using the sigV4 algorithm. +func signedURLV4(bucket, name string, opts *SignedURLOptions, now time.Time) (string, error) { + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "%s\n", opts.Method) + + u := &url.URL{Path: opts.Style.path(bucket, name)} + u.RawPath = pathEncodeV4(u.Path) + + // Note: we have to add a / here because GCS does so auto-magically, despite + // our encoding not doing so (and we have to exactly match their + // canonical query). + fmt.Fprintf(buf, "/%s\n", u.RawPath) + + headerNames := append(extractHeaderNames(opts.Headers), "host") + if opts.ContentType != "" { + headerNames = append(headerNames, "content-type") + } + if opts.MD5 != "" { + headerNames = append(headerNames, "content-md5") + } + sort.Strings(headerNames) + signedHeaders := strings.Join(headerNames, ";") + timestamp := now.Format(iso8601) + credentialScope := fmt.Sprintf("%s/auto/storage/goog4_request", now.Format(yearMonthDay)) + canonicalQueryString := url.Values{ + "X-Goog-Algorithm": {"GOOG4-RSA-SHA256"}, + "X-Goog-Credential": {fmt.Sprintf("%s/%s", opts.GoogleAccessID, credentialScope)}, + "X-Goog-Date": {timestamp}, + "X-Goog-Expires": {fmt.Sprintf("%d", int(opts.Expires.Sub(now).Seconds()))}, + "X-Goog-SignedHeaders": {signedHeaders}, + } + // Add user-supplied query parameters to the canonical query string. For V4, + // it's necessary to include these. + for k, v := range opts.QueryParameters { + canonicalQueryString[k] = append(canonicalQueryString[k], v...) + } + // url.Values.Encode escaping is correct, except that a space must be replaced + // by `%20` rather than `+`. + escapedQuery := strings.Replace(canonicalQueryString.Encode(), "+", "%20", -1) + fmt.Fprintf(buf, "%s\n", escapedQuery) + + // Fill in the hostname based on the desired URL style. + u.Host = opts.Style.host(opts.Hostname, bucket) + + // Fill in the URL scheme. + if opts.Insecure { + u.Scheme = "http" + } else { + u.Scheme = "https" + } + + var headersWithValue []string + headersWithValue = append(headersWithValue, "host:"+u.Hostname()) + headersWithValue = append(headersWithValue, opts.Headers...) + if opts.ContentType != "" { + headersWithValue = append(headersWithValue, "content-type:"+opts.ContentType) + } + if opts.MD5 != "" { + headersWithValue = append(headersWithValue, "content-md5:"+opts.MD5) + } + // Trim extra whitespace from headers and replace with a single space. + var trimmedHeaders []string + for _, h := range headersWithValue { + trimmedHeaders = append(trimmedHeaders, strings.Join(strings.Fields(h), " ")) + } + canonicalHeaders := strings.Join(sortHeadersByKey(trimmedHeaders), "\n") + fmt.Fprintf(buf, "%s\n\n", canonicalHeaders) + fmt.Fprintf(buf, "%s\n", signedHeaders) + + // If the user provides a value for X-Goog-Content-SHA256, we must use + // that value in the request string. If not, we use UNSIGNED-PAYLOAD. + sha256Header := false + for _, h := range trimmedHeaders { + if strings.HasPrefix(strings.ToLower(h), "x-goog-content-sha256") && strings.Contains(h, ":") { + sha256Header = true + fmt.Fprintf(buf, "%s", strings.SplitN(h, ":", 2)[1]) + break + } + } + if !sha256Header { + fmt.Fprint(buf, "UNSIGNED-PAYLOAD") + } + + sum := sha256.Sum256(buf.Bytes()) + hexDigest := hex.EncodeToString(sum[:]) + signBuf := &bytes.Buffer{} + fmt.Fprint(signBuf, "GOOG4-RSA-SHA256\n") + fmt.Fprintf(signBuf, "%s\n", timestamp) + fmt.Fprintf(signBuf, "%s\n", credentialScope) + fmt.Fprintf(signBuf, "%s", hexDigest) + + signBytes := opts.SignBytes + if opts.PrivateKey != nil { + key, err := parseKey(opts.PrivateKey) + if err != nil { + return "", err + } + signBytes = func(b []byte) ([]byte, error) { + sum := sha256.Sum256(b) + return rsa.SignPKCS1v15( + rand.Reader, + key, + crypto.SHA256, + sum[:], + ) + } + } + b, err := signBytes(signBuf.Bytes()) + if err != nil { + return "", err + } + signature := hex.EncodeToString(b) + canonicalQueryString.Set("X-Goog-Signature", string(signature)) + u.RawQuery = canonicalQueryString.Encode() + return u.String(), nil +} + +// takes a list of headerKey:headervalue1,headervalue2,etc and sorts by header +// key. +func sortHeadersByKey(hdrs []string) []string { + headersMap := map[string]string{} + var headersKeys []string + for _, h := range hdrs { + parts := strings.SplitN(h, ":", 2) + k := parts[0] + v := parts[1] + headersMap[k] = v + headersKeys = append(headersKeys, k) + } + sort.Strings(headersKeys) + var sorted []string + for _, k := range headersKeys { + v := headersMap[k] + sorted = append(sorted, fmt.Sprintf("%s:%s", k, v)) + } + return sorted +} + +func signedURLV2(bucket, name string, opts *SignedURLOptions) (string, error) { + signBytes := opts.SignBytes + if opts.PrivateKey != nil { + key, err := parseKey(opts.PrivateKey) + if err != nil { + return "", err + } + signBytes = func(b []byte) ([]byte, error) { + sum := sha256.Sum256(b) + return rsa.SignPKCS1v15( + rand.Reader, + key, + crypto.SHA256, + sum[:], + ) + } + } + + u := &url.URL{ + Path: fmt.Sprintf("/%s/%s", bucket, name), + } + + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "%s\n", opts.Method) + fmt.Fprintf(buf, "%s\n", opts.MD5) + fmt.Fprintf(buf, "%s\n", opts.ContentType) + fmt.Fprintf(buf, "%d\n", opts.Expires.Unix()) + if len(opts.Headers) > 0 { + fmt.Fprintf(buf, "%s\n", strings.Join(opts.Headers, "\n")) + } + fmt.Fprintf(buf, "%s", u.String()) + + b, err := signBytes(buf.Bytes()) + if err != nil { + return "", err + } + encoded := base64.StdEncoding.EncodeToString(b) + u.Scheme = "https" + u.Host = PathStyle().host(opts.Hostname, bucket) + q := u.Query() + q.Set("GoogleAccessId", opts.GoogleAccessID) + q.Set("Expires", fmt.Sprintf("%d", opts.Expires.Unix())) + q.Set("Signature", string(encoded)) + u.RawQuery = q.Encode() + return u.String(), nil +} + +// ObjectHandle provides operations on an object in a Google Cloud Storage bucket. +// Use BucketHandle.Object to get a handle. +type ObjectHandle struct { + c *Client + bucket string + object string + acl ACLHandle + gen int64 // a negative value indicates latest + conds *Conditions + encryptionKey []byte // AES-256 key + userProject string // for requester-pays buckets + readCompressed bool // Accept-Encoding: gzip + retry *retryConfig + overrideRetention *bool + softDeleted bool +} + +// ACL provides access to the object's access control list. +// This controls who can read and write this object. +// This call does not perform any network operations. +func (o *ObjectHandle) ACL() *ACLHandle { + return &o.acl +} + +// Generation returns a new ObjectHandle that operates on a specific generation +// of the object. +// By default, the handle operates on the latest generation. Not +// all operations work when given a specific generation; check the API +// endpoints at https://cloud.google.com/storage/docs/json_api/ for details. +func (o *ObjectHandle) Generation(gen int64) *ObjectHandle { + o2 := *o + o2.gen = gen + return &o2 +} + +// If returns a new ObjectHandle that applies a set of preconditions. +// Preconditions already set on the ObjectHandle are ignored. The supplied +// Conditions must have at least one field set to a non-default value; +// otherwise an error will be returned from any operation on the ObjectHandle. +// Operations on the new handle will return an error if the preconditions are not +// satisfied. See https://cloud.google.com/storage/docs/generations-preconditions +// for more details. +func (o *ObjectHandle) If(conds Conditions) *ObjectHandle { + o2 := *o + o2.conds = &conds + return &o2 +} + +// Key returns a new ObjectHandle that uses the supplied encryption +// key to encrypt and decrypt the object's contents. +// +// Encryption key must be a 32-byte AES-256 key. +// See https://cloud.google.com/storage/docs/encryption for details. +func (o *ObjectHandle) Key(encryptionKey []byte) *ObjectHandle { + o2 := *o + o2.encryptionKey = encryptionKey + return &o2 +} + +// Attrs returns meta information about the object. +// ErrObjectNotExist will be returned if the object is not found. +func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Attrs") + defer func() { trace.EndSpan(ctx, err) }() + + if err := o.validate(); err != nil { + return nil, err + } + opts := makeStorageOpts(true, o.retry, o.userProject) + return o.c.tc.GetObject(ctx, &getObjectParams{o.bucket, o.object, o.gen, o.encryptionKey, o.conds, o.softDeleted}, opts...) +} + +// Update updates an object with the provided attributes. See +// ObjectAttrsToUpdate docs for details on treatment of zero values. +// ErrObjectNotExist will be returned if the object is not found. +func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (oa *ObjectAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Update") + defer func() { trace.EndSpan(ctx, err) }() + + if err := o.validate(); err != nil { + return nil, err + } + isIdempotent := o.conds != nil && o.conds.MetagenerationMatch != 0 + opts := makeStorageOpts(isIdempotent, o.retry, o.userProject) + return o.c.tc.UpdateObject(ctx, + &updateObjectParams{ + bucket: o.bucket, + object: o.object, + uattrs: &uattrs, + gen: o.gen, + encryptionKey: o.encryptionKey, + conds: o.conds, + overrideRetention: o.overrideRetention, + }, opts...) +} + +// BucketName returns the name of the bucket. +func (o *ObjectHandle) BucketName() string { + return o.bucket +} + +// ObjectName returns the name of the object. +func (o *ObjectHandle) ObjectName() string { + return o.object +} + +// ObjectAttrsToUpdate is used to update the attributes of an object. +// Only fields set to non-nil values will be updated. +// For all fields except CustomTime and Retention, set the field to its zero +// value to delete it. CustomTime cannot be deleted or changed to an earlier +// time once set. Retention can be deleted (only if the Mode is Unlocked) by +// setting it to an empty value (not nil). +// +// For example, to change ContentType and delete ContentEncoding, Metadata and +// Retention, use: +// +// ObjectAttrsToUpdate{ +// ContentType: "text/html", +// ContentEncoding: "", +// Metadata: map[string]string{}, +// Retention: &ObjectRetention{}, +// } +type ObjectAttrsToUpdate struct { + EventBasedHold optional.Bool + TemporaryHold optional.Bool + ContentType optional.String + ContentLanguage optional.String + ContentEncoding optional.String + ContentDisposition optional.String + CacheControl optional.String + CustomTime time.Time // Cannot be deleted or backdated from its current value. + Metadata map[string]string // Set to map[string]string{} to delete. + ACL []ACLRule + + // If not empty, applies a predefined set of access controls. ACL must be nil. + // See https://cloud.google.com/storage/docs/json_api/v1/objects/patch. + PredefinedACL string + + // Retention contains the retention configuration for this object. + // Operations other than setting the retention for the first time or + // extending the RetainUntil time on the object retention must be done + // on an ObjectHandle with OverrideUnlockedRetention set to true. + Retention *ObjectRetention +} + +// Delete deletes the single specified object. +func (o *ObjectHandle) Delete(ctx context.Context) error { + if err := o.validate(); err != nil { + return err + } + // Delete is idempotent if GenerationMatch or Generation have been passed in. + // The default generation is negative to get the latest version of the object. + isIdempotent := (o.conds != nil && o.conds.GenerationMatch != 0) || o.gen >= 0 + opts := makeStorageOpts(isIdempotent, o.retry, o.userProject) + return o.c.tc.DeleteObject(ctx, o.bucket, o.object, o.gen, o.conds, opts...) +} + +// ReadCompressed when true causes the read to happen without decompressing. +func (o *ObjectHandle) ReadCompressed(compressed bool) *ObjectHandle { + o2 := *o + o2.readCompressed = compressed + return &o2 +} + +// OverrideUnlockedRetention provides an option for overriding an Unlocked +// Retention policy. This must be set to true in order to change a policy +// from Unlocked to Locked, to set it to null, or to reduce its +// RetainUntil attribute. It is not required for setting the ObjectRetention for +// the first time nor for extending the RetainUntil time. +func (o *ObjectHandle) OverrideUnlockedRetention(override bool) *ObjectHandle { + o2 := *o + o2.overrideRetention = &override + return &o2 +} + +// SoftDeleted returns an object handle that can be used to get an object that +// has been soft deleted. To get a soft deleted object, the generation must be +// set on the object using ObjectHandle.Generation. +// Note that an error will be returned if a live object is queried using this. +func (o *ObjectHandle) SoftDeleted() *ObjectHandle { + o2 := *o + o2.softDeleted = true + return &o2 +} + +// RestoreOptions allows you to set options when restoring an object. +type RestoreOptions struct { + /// CopySourceACL indicates whether the restored object should copy the + // access controls of the source object. Only valid for buckets with + // fine-grained access. If uniform bucket-level access is enabled, setting + // CopySourceACL will cause an error. + CopySourceACL bool +} + +// Restore will restore a soft-deleted object to a live object. +// Note that you must specify a generation to use this method. +func (o *ObjectHandle) Restore(ctx context.Context, opts *RestoreOptions) (*ObjectAttrs, error) { + if err := o.validate(); err != nil { + return nil, err + } + + // Since the generation is required by restore calls, we set the default to + // 0 instead of a negative value, which returns a more descriptive error. + gen := o.gen + if o.gen == defaultGen { + gen = 0 + } + + // Restore is always idempotent because Generation is a required param. + sOpts := makeStorageOpts(true, o.retry, o.userProject) + return o.c.tc.RestoreObject(ctx, &restoreObjectParams{ + bucket: o.bucket, + object: o.object, + gen: gen, + conds: o.conds, + copySourceACL: opts.CopySourceACL, + }, sOpts...) +} + +// NewWriter returns a storage Writer that writes to the GCS object +// associated with this ObjectHandle. +// +// A new object will be created unless an object with this name already exists. +// Otherwise any previous object with the same name will be replaced. +// The object will not be available (and any previous object will remain) +// until Close has been called. +// +// Attributes can be set on the object by modifying the returned Writer's +// ObjectAttrs field before the first call to Write. If no ContentType +// attribute is specified, the content type will be automatically sniffed +// using net/http.DetectContentType. +// +// Note that each Writer allocates an internal buffer of size Writer.ChunkSize. +// See the ChunkSize docs for more information. +// +// It is the caller's responsibility to call Close when writing is done. To +// stop writing without saving the data, cancel the context. +func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Writer") + return &Writer{ + ctx: ctx, + o: o, + donec: make(chan struct{}), + ObjectAttrs: ObjectAttrs{Name: o.object}, + ChunkSize: googleapi.DefaultUploadChunkSize, + } +} + +func (o *ObjectHandle) validate() error { + if o.bucket == "" { + return errors.New("storage: bucket name is empty") + } + if o.object == "" { + return errors.New("storage: object name is empty") + } + if !utf8.ValidString(o.object) { + return fmt.Errorf("storage: object name %q is not valid UTF-8", o.object) + } + // Names . and .. are not valid; see https://cloud.google.com/storage/docs/objects#naming + if o.object == "." || o.object == ".." { + return fmt.Errorf("storage: object name %q is not valid", o.object) + } + return nil +} + +// parseKey converts the binary contents of a private key file to an +// *rsa.PrivateKey. It detects whether the private key is in a PEM container or +// not. If so, it extracts the private key from PEM container before +// conversion. It only supports PEM containers with no passphrase. +func parseKey(key []byte) (*rsa.PrivateKey, error) { + if block, _ := pem.Decode(key); block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, err + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("oauth2: private key is invalid") + } + return parsed, nil +} + +// toRawObject copies the editable attributes from o to the raw library's Object type. +func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object { + var ret string + if !o.RetentionExpirationTime.IsZero() { + ret = o.RetentionExpirationTime.Format(time.RFC3339) + } + var ct string + if !o.CustomTime.IsZero() { + ct = o.CustomTime.Format(time.RFC3339) + } + return &raw.Object{ + Bucket: bucket, + Name: o.Name, + EventBasedHold: o.EventBasedHold, + TemporaryHold: o.TemporaryHold, + RetentionExpirationTime: ret, + ContentType: o.ContentType, + ContentEncoding: o.ContentEncoding, + ContentLanguage: o.ContentLanguage, + CacheControl: o.CacheControl, + ContentDisposition: o.ContentDisposition, + StorageClass: o.StorageClass, + Acl: toRawObjectACL(o.ACL), + Metadata: o.Metadata, + CustomTime: ct, + Retention: o.Retention.toRawObjectRetention(), + } +} + +// toProtoObject copies the editable attributes from o to the proto library's Object type. +func (o *ObjectAttrs) toProtoObject(b string) *storagepb.Object { + // For now, there are only globally unique buckets, and "_" is the alias + // project ID for such buckets. If the bucket is not provided, like in the + // destination ObjectAttrs of a Copy, do not attempt to format it. + if b != "" { + b = bucketResourceName(globalProjectAlias, b) + } + + return &storagepb.Object{ + Bucket: b, + Name: o.Name, + EventBasedHold: proto.Bool(o.EventBasedHold), + TemporaryHold: o.TemporaryHold, + ContentType: o.ContentType, + ContentEncoding: o.ContentEncoding, + ContentLanguage: o.ContentLanguage, + CacheControl: o.CacheControl, + ContentDisposition: o.ContentDisposition, + StorageClass: o.StorageClass, + Acl: toProtoObjectACL(o.ACL), + Metadata: o.Metadata, + CreateTime: toProtoTimestamp(o.Created), + CustomTime: toProtoTimestamp(o.CustomTime), + DeleteTime: toProtoTimestamp(o.Deleted), + RetentionExpireTime: toProtoTimestamp(o.RetentionExpirationTime), + UpdateTime: toProtoTimestamp(o.Updated), + KmsKey: o.KMSKeyName, + Generation: o.Generation, + Size: o.Size, + } +} + +// toProtoObject copies the attributes to update from uattrs to the proto library's Object type. +func (uattrs *ObjectAttrsToUpdate) toProtoObject(bucket, object string) *storagepb.Object { + o := &storagepb.Object{ + Name: object, + Bucket: bucket, + } + if uattrs == nil { + return o + } + + if uattrs.EventBasedHold != nil { + o.EventBasedHold = proto.Bool(optional.ToBool(uattrs.EventBasedHold)) + } + if uattrs.TemporaryHold != nil { + o.TemporaryHold = optional.ToBool(uattrs.TemporaryHold) + } + if uattrs.ContentType != nil { + o.ContentType = optional.ToString(uattrs.ContentType) + } + if uattrs.ContentLanguage != nil { + o.ContentLanguage = optional.ToString(uattrs.ContentLanguage) + } + if uattrs.ContentEncoding != nil { + o.ContentEncoding = optional.ToString(uattrs.ContentEncoding) + } + if uattrs.ContentDisposition != nil { + o.ContentDisposition = optional.ToString(uattrs.ContentDisposition) + } + if uattrs.CacheControl != nil { + o.CacheControl = optional.ToString(uattrs.CacheControl) + } + if !uattrs.CustomTime.IsZero() { + o.CustomTime = toProtoTimestamp(uattrs.CustomTime) + } + if uattrs.ACL != nil { + o.Acl = toProtoObjectACL(uattrs.ACL) + } + + o.Metadata = uattrs.Metadata + + return o +} + +// ObjectAttrs represents the metadata for a Google Cloud Storage (GCS) object. +type ObjectAttrs struct { + // Bucket is the name of the bucket containing this GCS object. + // This field is read-only. + Bucket string + + // Name is the name of the object within the bucket. + // This field is read-only. + Name string + + // ContentType is the MIME type of the object's content. + ContentType string + + // ContentLanguage is the content language of the object's content. + ContentLanguage string + + // CacheControl is the Cache-Control header to be sent in the response + // headers when serving the object data. + CacheControl string + + // EventBasedHold specifies whether an object is under event-based hold. New + // objects created in a bucket whose DefaultEventBasedHold is set will + // default to that value. + EventBasedHold bool + + // TemporaryHold specifies whether an object is under temporary hold. While + // this flag is set to true, the object is protected against deletion and + // overwrites. + TemporaryHold bool + + // RetentionExpirationTime is a server-determined value that specifies the + // earliest time that the object's retention period expires. + // This is a read-only field. + RetentionExpirationTime time.Time + + // ACL is the list of access control rules for the object. + ACL []ACLRule + + // If not empty, applies a predefined set of access controls. It should be set + // only when writing, copying or composing an object. When copying or composing, + // it acts as the destinationPredefinedAcl parameter. + // PredefinedACL is always empty for ObjectAttrs returned from the service. + // See https://cloud.google.com/storage/docs/json_api/v1/objects/insert + // for valid values. + PredefinedACL string + + // Owner is the owner of the object. This field is read-only. + // + // If non-zero, it is in the form of "user-". + Owner string + + // Size is the length of the object's content. This field is read-only. + Size int64 + + // ContentEncoding is the encoding of the object's content. + ContentEncoding string + + // ContentDisposition is the optional Content-Disposition header of the object + // sent in the response headers. + ContentDisposition string + + // MD5 is the MD5 hash of the object's content. This field is read-only, + // except when used from a Writer. If set on a Writer, the uploaded + // data is rejected if its MD5 hash does not match this field. + MD5 []byte + + // CRC32C is the CRC32 checksum of the object's content using the Castagnoli93 + // polynomial. This field is read-only, except when used from a Writer or + // Composer. In those cases, if the SendCRC32C field in the Writer or Composer + // is set to is true, the uploaded data is rejected if its CRC32C hash does + // not match this field. + // + // Note: For a Writer, SendCRC32C must be set to true BEFORE the first call to + // Writer.Write() in order to send the checksum. + CRC32C uint32 + + // MediaLink is an URL to the object's content. This field is read-only. + MediaLink string + + // Metadata represents user-provided metadata, in key/value pairs. + // It can be nil if no metadata is provided. + // + // For object downloads using Reader, metadata keys are sent as headers. + // Therefore, avoid setting metadata keys using characters that are not valid + // for headers. See https://www.rfc-editor.org/rfc/rfc7230#section-3.2.6. + Metadata map[string]string + + // Generation is the generation number of the object's content. + // This field is read-only. + Generation int64 + + // Metageneration is the version of the metadata for this + // object at this generation. This field is used for preconditions + // and for detecting changes in metadata. A metageneration number + // is only meaningful in the context of a particular generation + // of a particular object. This field is read-only. + Metageneration int64 + + // StorageClass is the storage class of the object. This defines + // how objects are stored and determines the SLA and the cost of storage. + // Typical values are "STANDARD", "NEARLINE", "COLDLINE" and "ARCHIVE". + // Defaults to "STANDARD". + // See https://cloud.google.com/storage/docs/storage-classes for all + // valid values. + StorageClass string + + // Created is the time the object was created. This field is read-only. + Created time.Time + + // Deleted is the time the object was deleted. + // If not deleted, it is the zero value. This field is read-only. + Deleted time.Time + + // Updated is the creation or modification time of the object. + // For buckets with versioning enabled, changing an object's + // metadata does not change this property. This field is read-only. + Updated time.Time + + // CustomerKeySHA256 is the base64-encoded SHA-256 hash of the + // customer-supplied encryption key for the object. It is empty if there is + // no customer-supplied encryption key. + // See // https://cloud.google.com/storage/docs/encryption for more about + // encryption in Google Cloud Storage. + CustomerKeySHA256 string + + // Cloud KMS key name, in the form + // projects/P/locations/L/keyRings/R/cryptoKeys/K, used to encrypt this object, + // if the object is encrypted by such a key. + // + // Providing both a KMSKeyName and a customer-supplied encryption key (via + // ObjectHandle.Key) will result in an error when writing an object. + KMSKeyName string + + // Prefix is set only for ObjectAttrs which represent synthetic "directory + // entries" when iterating over buckets using Query.Delimiter. See + // ObjectIterator.Next. When set, no other fields in ObjectAttrs will be + // populated. + Prefix string + + // Etag is the HTTP/1.1 Entity tag for the object. + // This field is read-only. + Etag string + + // A user-specified timestamp which can be applied to an object. This is + // typically set in order to use the CustomTimeBefore and DaysSinceCustomTime + // LifecycleConditions to manage object lifecycles. + // + // CustomTime cannot be removed once set on an object. It can be updated to a + // later value but not to an earlier one. For more information see + // https://cloud.google.com/storage/docs/metadata#custom-time . + CustomTime time.Time + + // ComponentCount is the number of objects contained within a composite object. + // For non-composite objects, the value will be zero. + // This field is read-only. + ComponentCount int64 + + // Retention contains the retention configuration for this object. + // ObjectRetention cannot be configured or reported through the gRPC API. + Retention *ObjectRetention + + // SoftDeleteTime is the time when the object became soft-deleted. + // Soft-deleted objects are only accessible on an object handle returned by + // ObjectHandle.SoftDeleted; if ObjectHandle.SoftDeleted has not been set, + // ObjectHandle.Attrs will return ErrObjectNotExist if the object is soft-deleted. + // This field is read-only. + SoftDeleteTime time.Time + + // HardDeleteTime is the time when the object will be permanently deleted. + // Only set when an object becomes soft-deleted with a soft delete policy. + // Soft-deleted objects are only accessible on an object handle returned by + // ObjectHandle.SoftDeleted; if ObjectHandle.SoftDeleted has not been set, + // ObjectHandle.Attrs will return ErrObjectNotExist if the object is soft-deleted. + // This field is read-only. + HardDeleteTime time.Time +} + +// ObjectRetention contains the retention configuration for this object. +type ObjectRetention struct { + // Mode is the retention policy's mode on this object. Valid values are + // "Locked" and "Unlocked". + // Locked retention policies cannot be changed. Unlocked policies require an + // override to change. + Mode string + + // RetainUntil is the time this object will be retained until. + RetainUntil time.Time +} + +func (r *ObjectRetention) toRawObjectRetention() *raw.ObjectRetention { + if r == nil { + return nil + } + return &raw.ObjectRetention{ + Mode: r.Mode, + RetainUntilTime: r.RetainUntil.Format(time.RFC3339), + } +} + +func toObjectRetention(r *raw.ObjectRetention) *ObjectRetention { + if r == nil { + return nil + } + return &ObjectRetention{ + Mode: r.Mode, + RetainUntil: convertTime(r.RetainUntilTime), + } +} + +// convertTime converts a time in RFC3339 format to time.Time. +// If any error occurs in parsing, the zero-value time.Time is silently returned. +func convertTime(t string) time.Time { + var r time.Time + if t != "" { + r, _ = time.Parse(time.RFC3339, t) + } + return r +} + +func convertProtoTime(t *timestamppb.Timestamp) time.Time { + var r time.Time + if t != nil { + r = t.AsTime() + } + return r +} + +func toProtoTimestamp(t time.Time) *timestamppb.Timestamp { + if t.IsZero() { + return nil + } + + return timestamppb.New(t) +} + +func newObject(o *raw.Object) *ObjectAttrs { + if o == nil { + return nil + } + owner := "" + if o.Owner != nil { + owner = o.Owner.Entity + } + md5, _ := base64.StdEncoding.DecodeString(o.Md5Hash) + crc32c, _ := decodeUint32(o.Crc32c) + var sha256 string + if o.CustomerEncryption != nil { + sha256 = o.CustomerEncryption.KeySha256 + } + return &ObjectAttrs{ + Bucket: o.Bucket, + Name: o.Name, + ContentType: o.ContentType, + ContentLanguage: o.ContentLanguage, + CacheControl: o.CacheControl, + EventBasedHold: o.EventBasedHold, + TemporaryHold: o.TemporaryHold, + RetentionExpirationTime: convertTime(o.RetentionExpirationTime), + ACL: toObjectACLRules(o.Acl), + Owner: owner, + ContentEncoding: o.ContentEncoding, + ContentDisposition: o.ContentDisposition, + Size: int64(o.Size), + MD5: md5, + CRC32C: crc32c, + MediaLink: o.MediaLink, + Metadata: o.Metadata, + Generation: o.Generation, + Metageneration: o.Metageneration, + StorageClass: o.StorageClass, + CustomerKeySHA256: sha256, + KMSKeyName: o.KmsKeyName, + Created: convertTime(o.TimeCreated), + Deleted: convertTime(o.TimeDeleted), + Updated: convertTime(o.Updated), + Etag: o.Etag, + CustomTime: convertTime(o.CustomTime), + ComponentCount: o.ComponentCount, + Retention: toObjectRetention(o.Retention), + SoftDeleteTime: convertTime(o.SoftDeleteTime), + HardDeleteTime: convertTime(o.HardDeleteTime), + } +} + +func newObjectFromProto(o *storagepb.Object) *ObjectAttrs { + if o == nil { + return nil + } + return &ObjectAttrs{ + Bucket: parseBucketName(o.Bucket), + Name: o.Name, + ContentType: o.ContentType, + ContentLanguage: o.ContentLanguage, + CacheControl: o.CacheControl, + EventBasedHold: o.GetEventBasedHold(), + TemporaryHold: o.TemporaryHold, + RetentionExpirationTime: convertProtoTime(o.GetRetentionExpireTime()), + ACL: toObjectACLRulesFromProto(o.GetAcl()), + Owner: o.GetOwner().GetEntity(), + ContentEncoding: o.ContentEncoding, + ContentDisposition: o.ContentDisposition, + Size: int64(o.Size), + MD5: o.GetChecksums().GetMd5Hash(), + CRC32C: o.GetChecksums().GetCrc32C(), + Metadata: o.Metadata, + Generation: o.Generation, + Metageneration: o.Metageneration, + StorageClass: o.StorageClass, + // CustomerKeySHA256 needs to be presented as base64 encoded, but the response from gRPC is not. + CustomerKeySHA256: base64.StdEncoding.EncodeToString(o.GetCustomerEncryption().GetKeySha256Bytes()), + KMSKeyName: o.GetKmsKey(), + Created: convertProtoTime(o.GetCreateTime()), + Deleted: convertProtoTime(o.GetDeleteTime()), + Updated: convertProtoTime(o.GetUpdateTime()), + CustomTime: convertProtoTime(o.GetCustomTime()), + ComponentCount: int64(o.ComponentCount), + SoftDeleteTime: convertProtoTime(o.GetSoftDeleteTime()), + HardDeleteTime: convertProtoTime(o.GetHardDeleteTime()), + } +} + +// Decode a uint32 encoded in Base64 in big-endian byte order. +func decodeUint32(b64 string) (uint32, error) { + d, err := base64.StdEncoding.DecodeString(b64) + if err != nil { + return 0, err + } + if len(d) != 4 { + return 0, fmt.Errorf("storage: %q does not encode a 32-bit value", d) + } + return uint32(d[0])<<24 + uint32(d[1])<<16 + uint32(d[2])<<8 + uint32(d[3]), nil +} + +// Encode a uint32 as Base64 in big-endian byte order. +func encodeUint32(u uint32) string { + b := []byte{byte(u >> 24), byte(u >> 16), byte(u >> 8), byte(u)} + return base64.StdEncoding.EncodeToString(b) +} + +// Projection is enumerated type for Query.Projection. +type Projection int + +const ( + // ProjectionDefault returns all fields of objects. + ProjectionDefault Projection = iota + + // ProjectionFull returns all fields of objects. + ProjectionFull + + // ProjectionNoACL returns all fields of objects except for Owner and ACL. + ProjectionNoACL +) + +func (p Projection) String() string { + switch p { + case ProjectionFull: + return "full" + case ProjectionNoACL: + return "noAcl" + default: + return "" + } +} + +// Query represents a query to filter objects from a bucket. +type Query struct { + // Delimiter returns results in a directory-like fashion. + // Results will contain only objects whose names, aside from the + // prefix, do not contain delimiter. Objects whose names, + // aside from the prefix, contain delimiter will have their name, + // truncated after the delimiter, returned in prefixes. + // Duplicate prefixes are omitted. + // Must be set to / when used with the MatchGlob parameter to filter results + // in a directory-like mode. + // Optional. + Delimiter string + + // Prefix is the prefix filter to query objects + // whose names begin with this prefix. + // Optional. + Prefix string + + // Versions indicates whether multiple versions of the same + // object will be included in the results. + Versions bool + + // attrSelection is used to select only specific fields to be returned by + // the query. It is set by the user calling SetAttrSelection. These + // are used by toFieldMask and toFieldSelection for gRPC and HTTP/JSON + // clients respectively. + attrSelection []string + + // StartOffset is used to filter results to objects whose names are + // lexicographically equal to or after startOffset. If endOffset is also set, + // the objects listed will have names between startOffset (inclusive) and + // endOffset (exclusive). + StartOffset string + + // EndOffset is used to filter results to objects whose names are + // lexicographically before endOffset. If startOffset is also set, the objects + // listed will have names between startOffset (inclusive) and endOffset (exclusive). + EndOffset string + + // Projection defines the set of properties to return. It will default to ProjectionFull, + // which returns all properties. Passing ProjectionNoACL will omit Owner and ACL, + // which may improve performance when listing many objects. + Projection Projection + + // IncludeTrailingDelimiter controls how objects which end in a single + // instance of Delimiter (for example, if Query.Delimiter = "/" and the + // object name is "foo/bar/") are included in the results. By default, these + // objects only show up as prefixes. If IncludeTrailingDelimiter is set to + // true, they will also be included as objects and their metadata will be + // populated in the returned ObjectAttrs. + IncludeTrailingDelimiter bool + + // MatchGlob is a glob pattern used to filter results (for example, foo*bar). See + // https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-object-glob + // for syntax details. When Delimiter is set in conjunction with MatchGlob, + // it must be set to /. + MatchGlob string + + // IncludeFoldersAsPrefixes includes Folders and Managed Folders in the set of + // prefixes returned by the query. Only applicable if Delimiter is set to /. + // IncludeFoldersAsPrefixes is not yet implemented in the gRPC API. + IncludeFoldersAsPrefixes bool + + // SoftDeleted indicates whether to list soft-deleted objects. + // If true, only objects that have been soft-deleted will be listed. + // By default, soft-deleted objects are not listed. + SoftDeleted bool +} + +// attrToFieldMap maps the field names of ObjectAttrs to the underlying field +// names in the API call. Only the ObjectAttrs field names are visible to users +// because they are already part of the public API of the package. +var attrToFieldMap = map[string]string{ + "Bucket": "bucket", + "Name": "name", + "ContentType": "contentType", + "ContentLanguage": "contentLanguage", + "CacheControl": "cacheControl", + "EventBasedHold": "eventBasedHold", + "TemporaryHold": "temporaryHold", + "RetentionExpirationTime": "retentionExpirationTime", + "ACL": "acl", + "Owner": "owner", + "ContentEncoding": "contentEncoding", + "ContentDisposition": "contentDisposition", + "Size": "size", + "MD5": "md5Hash", + "CRC32C": "crc32c", + "MediaLink": "mediaLink", + "Metadata": "metadata", + "Generation": "generation", + "Metageneration": "metageneration", + "StorageClass": "storageClass", + "CustomerKeySHA256": "customerEncryption", + "KMSKeyName": "kmsKeyName", + "Created": "timeCreated", + "Deleted": "timeDeleted", + "Updated": "updated", + "Etag": "etag", + "CustomTime": "customTime", + "ComponentCount": "componentCount", + "Retention": "retention", + "HardDeleteTime": "hardDeleteTime", + "SoftDeleteTime": "softDeleteTime", +} + +// attrToProtoFieldMap maps the field names of ObjectAttrs to the underlying field +// names in the protobuf Object message. +var attrToProtoFieldMap = map[string]string{ + "Name": "name", + "Bucket": "bucket", + "Etag": "etag", + "Generation": "generation", + "Metageneration": "metageneration", + "StorageClass": "storage_class", + "Size": "size", + "ContentEncoding": "content_encoding", + "ContentDisposition": "content_disposition", + "CacheControl": "cache_control", + "ACL": "acl", + "ContentLanguage": "content_language", + "Deleted": "delete_time", + "ContentType": "content_type", + "Created": "create_time", + "CRC32C": "checksums.crc32c", + "MD5": "checksums.md5_hash", + "Updated": "update_time", + "KMSKeyName": "kms_key", + "TemporaryHold": "temporary_hold", + "RetentionExpirationTime": "retention_expire_time", + "Metadata": "metadata", + "EventBasedHold": "event_based_hold", + "Owner": "owner", + "CustomerKeySHA256": "customer_encryption", + "CustomTime": "custom_time", + "ComponentCount": "component_count", + "HardDeleteTime": "hard_delete_time", + "SoftDeleteTime": "soft_delete_time", + // MediaLink was explicitly excluded from the proto as it is an HTTP-ism. + // "MediaLink": "mediaLink", + // TODO: add object retention - b/308194853 +} + +// SetAttrSelection makes the query populate only specific attributes of +// objects. When iterating over objects, if you only need each object's name +// and size, pass []string{"Name", "Size"} to this method. Only these fields +// will be fetched for each object across the network; the other fields of +// ObjectAttr will remain at their default values. This is a performance +// optimization; for more information, see +// https://cloud.google.com/storage/docs/json_api/v1/how-tos/performance +func (q *Query) SetAttrSelection(attrs []string) error { + // Validate selections. + for _, attr := range attrs { + // If the attr is acceptable for one of the two sets, then it is OK. + // If it is not acceptable for either, then return an error. + // The respective masking implementations ignore unknown attrs which + // makes switching between transports a little easier. + _, okJSON := attrToFieldMap[attr] + _, okGRPC := attrToProtoFieldMap[attr] + + if !okJSON && !okGRPC { + return fmt.Errorf("storage: attr %v is not valid", attr) + } + } + + q.attrSelection = attrs + + return nil +} + +func (q *Query) toFieldSelection() string { + if q == nil || len(q.attrSelection) == 0 { + return "" + } + fieldSet := make(map[string]bool) + + for _, attr := range q.attrSelection { + field, ok := attrToFieldMap[attr] + if !ok { + // Future proofing, skip unknown fields, let SetAttrSelection handle + // error modes. + continue + } + fieldSet[field] = true + } + + var s string + if len(fieldSet) > 0 { + var b bytes.Buffer + b.WriteString("prefixes,items(") + first := true + for field := range fieldSet { + if !first { + b.WriteString(",") + } + first = false + b.WriteString(field) + } + b.WriteString(")") + s = b.String() + } + return s +} + +func (q *Query) toFieldMask() *fieldmaskpb.FieldMask { + // The default behavior with no Query is ProjectionDefault (i.e. ProjectionFull). + if q == nil { + return &fieldmaskpb.FieldMask{Paths: []string{"*"}} + } + + // User selected attributes via q.SetAttrSeleciton. This takes precedence + // over the Projection. + if numSelected := len(q.attrSelection); numSelected > 0 { + protoFieldPaths := make([]string, 0, numSelected) + + for _, attr := range q.attrSelection { + pf, ok := attrToProtoFieldMap[attr] + if !ok { + // Future proofing, skip unknown fields, let SetAttrSelection + // handle error modes. + continue + } + protoFieldPaths = append(protoFieldPaths, pf) + } + + return &fieldmaskpb.FieldMask{Paths: protoFieldPaths} + } + + // ProjectDefault == ProjectionFull which means all fields. + fm := &fieldmaskpb.FieldMask{Paths: []string{"*"}} + if q.Projection == ProjectionNoACL { + paths := make([]string, 0, len(attrToProtoFieldMap)-2) // omitting two fields + for _, f := range attrToProtoFieldMap { + // Skip the acl and owner fields for "NoACL". + if f == "acl" || f == "owner" { + continue + } + paths = append(paths, f) + } + fm.Paths = paths + } + + return fm +} + +// Conditions constrain methods to act on specific generations of +// objects. +// +// The zero value is an empty set of constraints. Not all conditions or +// combinations of conditions are applicable to all methods. +// See https://cloud.google.com/storage/docs/generations-preconditions +// for details on how these operate. +type Conditions struct { + // Generation constraints. + // At most one of the following can be set to a non-zero value. + + // GenerationMatch specifies that the object must have the given generation + // for the operation to occur. + // If GenerationMatch is zero, it has no effect. + // Use DoesNotExist to specify that the object does not exist in the bucket. + GenerationMatch int64 + + // GenerationNotMatch specifies that the object must not have the given + // generation for the operation to occur. + // If GenerationNotMatch is zero, it has no effect. + // This condition only works for object reads if the WithJSONReads client + // option is set. + GenerationNotMatch int64 + + // DoesNotExist specifies that the object must not exist in the bucket for + // the operation to occur. + // If DoesNotExist is false, it has no effect. + DoesNotExist bool + + // Metadata generation constraints. + // At most one of the following can be set to a non-zero value. + + // MetagenerationMatch specifies that the object must have the given + // metageneration for the operation to occur. + // If MetagenerationMatch is zero, it has no effect. + MetagenerationMatch int64 + + // MetagenerationNotMatch specifies that the object must not have the given + // metageneration for the operation to occur. + // If MetagenerationNotMatch is zero, it has no effect. + // This condition only works for object reads if the WithJSONReads client + // option is set. + MetagenerationNotMatch int64 +} + +func (c *Conditions) validate(method string) error { + if *c == (Conditions{}) { + return fmt.Errorf("storage: %s: empty conditions", method) + } + if !c.isGenerationValid() { + return fmt.Errorf("storage: %s: multiple conditions specified for generation", method) + } + if !c.isMetagenerationValid() { + return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method) + } + return nil +} + +func (c *Conditions) isGenerationValid() bool { + n := 0 + if c.GenerationMatch != 0 { + n++ + } + if c.GenerationNotMatch != 0 { + n++ + } + if c.DoesNotExist { + n++ + } + return n <= 1 +} + +func (c *Conditions) isMetagenerationValid() bool { + return c.MetagenerationMatch == 0 || c.MetagenerationNotMatch == 0 +} + +// applyConds modifies the provided call using the conditions in conds. +// call is something that quacks like a *raw.WhateverCall. +func applyConds(method string, gen int64, conds *Conditions, call interface{}) error { + cval := reflect.ValueOf(call) + if gen >= 0 { + if !setGeneration(cval, gen) { + return fmt.Errorf("storage: %s: generation not supported", method) + } + } + if conds == nil { + return nil + } + if err := conds.validate(method); err != nil { + return err + } + switch { + case conds.GenerationMatch != 0: + if !setIfGenerationMatch(cval, conds.GenerationMatch) { + return fmt.Errorf("storage: %s: ifGenerationMatch not supported", method) + } + case conds.GenerationNotMatch != 0: + if !setIfGenerationNotMatch(cval, conds.GenerationNotMatch) { + return fmt.Errorf("storage: %s: ifGenerationNotMatch not supported", method) + } + case conds.DoesNotExist: + if !setIfGenerationMatch(cval, int64(0)) { + return fmt.Errorf("storage: %s: DoesNotExist not supported", method) + } + } + switch { + case conds.MetagenerationMatch != 0: + if !setIfMetagenerationMatch(cval, conds.MetagenerationMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) + } + case conds.MetagenerationNotMatch != 0: + if !setIfMetagenerationNotMatch(cval, conds.MetagenerationNotMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) + } + } + return nil +} + +func applySourceConds(gen int64, conds *Conditions, call *raw.ObjectsRewriteCall) error { + if gen >= 0 { + call.SourceGeneration(gen) + } + if conds == nil { + return nil + } + if err := conds.validate("CopyTo source"); err != nil { + return err + } + switch { + case conds.GenerationMatch != 0: + call.IfSourceGenerationMatch(conds.GenerationMatch) + case conds.GenerationNotMatch != 0: + call.IfSourceGenerationNotMatch(conds.GenerationNotMatch) + case conds.DoesNotExist: + call.IfSourceGenerationMatch(0) + } + switch { + case conds.MetagenerationMatch != 0: + call.IfSourceMetagenerationMatch(conds.MetagenerationMatch) + case conds.MetagenerationNotMatch != 0: + call.IfSourceMetagenerationNotMatch(conds.MetagenerationNotMatch) + } + return nil +} + +func applySourceCondsProto(gen int64, conds *Conditions, call *storagepb.RewriteObjectRequest) error { + if gen >= 0 { + call.SourceGeneration = gen + } + if conds == nil { + return nil + } + if err := conds.validate("CopyTo source"); err != nil { + return err + } + switch { + case conds.GenerationMatch != 0: + call.IfSourceGenerationMatch = proto.Int64(conds.GenerationMatch) + case conds.GenerationNotMatch != 0: + call.IfSourceGenerationNotMatch = proto.Int64(conds.GenerationNotMatch) + case conds.DoesNotExist: + call.IfSourceGenerationMatch = proto.Int64(0) + } + switch { + case conds.MetagenerationMatch != 0: + call.IfSourceMetagenerationMatch = proto.Int64(conds.MetagenerationMatch) + case conds.MetagenerationNotMatch != 0: + call.IfSourceMetagenerationNotMatch = proto.Int64(conds.MetagenerationNotMatch) + } + return nil +} + +// setGeneration sets Generation on a *raw.WhateverCall. +// We can't use anonymous interfaces because the return type is +// different, since the field setters are builders. +// We also make sure to supply a compile-time constant to MethodByName; +// otherwise, the Go Linker will disable dead code elimination, leading +// to larger binaries for all packages that import storage. +func setGeneration(cval reflect.Value, value interface{}) bool { + return setCondition(cval.MethodByName("Generation"), value) +} + +// setIfGenerationMatch sets IfGenerationMatch on a *raw.WhateverCall. +// See also setGeneration. +func setIfGenerationMatch(cval reflect.Value, value interface{}) bool { + return setCondition(cval.MethodByName("IfGenerationMatch"), value) +} + +// setIfGenerationNotMatch sets IfGenerationNotMatch on a *raw.WhateverCall. +// See also setGeneration. +func setIfGenerationNotMatch(cval reflect.Value, value interface{}) bool { + return setCondition(cval.MethodByName("IfGenerationNotMatch"), value) +} + +// setIfMetagenerationMatch sets IfMetagenerationMatch on a *raw.WhateverCall. +// See also setGeneration. +func setIfMetagenerationMatch(cval reflect.Value, value interface{}) bool { + return setCondition(cval.MethodByName("IfMetagenerationMatch"), value) +} + +// setIfMetagenerationNotMatch sets IfMetagenerationNotMatch on a *raw.WhateverCall. +// See also setGeneration. +func setIfMetagenerationNotMatch(cval reflect.Value, value interface{}) bool { + return setCondition(cval.MethodByName("IfMetagenerationNotMatch"), value) +} + +func setCondition(setter reflect.Value, value interface{}) bool { + if setter.IsValid() { + setter.Call([]reflect.Value{reflect.ValueOf(value)}) + } + return setter.IsValid() +} + +// Retryer returns an object handle that is configured with custom retry +// behavior as specified by the options that are passed to it. All operations +// on the new handle will use the customized retry configuration. +// These retry options will merge with the bucket's retryer (if set) for the +// returned handle. Options passed into this method will take precedence over +// retry options on the bucket and client. Note that you must explicitly pass in +// each option you want to override. +func (o *ObjectHandle) Retryer(opts ...RetryOption) *ObjectHandle { + o2 := *o + var retry *retryConfig + if o.retry != nil { + // merge the options with the existing retry + retry = o.retry + } else { + retry = &retryConfig{} + } + for _, opt := range opts { + opt.apply(retry) + } + o2.retry = retry + o2.acl.retry = retry + return &o2 +} + +// SetRetry configures the client with custom retry behavior as specified by the +// options that are passed to it. All operations using this client will use the +// customized retry configuration. +// This should be called once before using the client for network operations, as +// there could be indeterminate behaviour with operations in progress. +// Retry options set on a bucket or object handle will take precedence over +// these options. +func (c *Client) SetRetry(opts ...RetryOption) { + var retry *retryConfig + if c.retry != nil { + // merge the options with the existing retry + retry = c.retry + } else { + retry = &retryConfig{} + } + for _, opt := range opts { + opt.apply(retry) + } + c.retry = retry +} + +// RetryOption allows users to configure non-default retry behavior for API +// calls made to GCS. +type RetryOption interface { + apply(config *retryConfig) +} + +// WithBackoff allows configuration of the backoff timing used for retries. +// Available configuration options (Initial, Max and Multiplier) are described +// at https://pkg.go.dev/github.com/googleapis/gax-go/v2#Backoff. If any fields +// are not supplied by the user, gax default values will be used. +func WithBackoff(backoff gax.Backoff) RetryOption { + return &withBackoff{ + backoff: backoff, + } +} + +type withBackoff struct { + backoff gax.Backoff +} + +func (wb *withBackoff) apply(config *retryConfig) { + config.backoff = &wb.backoff +} + +// WithMaxAttempts configures the maximum number of times an API call can be made +// in the case of retryable errors. +// For example, if you set WithMaxAttempts(5), the operation will be attempted up to 5 +// times total (initial call plus 4 retries). +// Without this setting, operations will continue retrying indefinitely +// until either the context is canceled or a deadline is reached. +func WithMaxAttempts(maxAttempts int) RetryOption { + return &withMaxAttempts{ + maxAttempts: maxAttempts, + } +} + +type withMaxAttempts struct { + maxAttempts int +} + +func (wb *withMaxAttempts) apply(config *retryConfig) { + config.maxAttempts = &wb.maxAttempts +} + +// RetryPolicy describes the available policies for which operations should be +// retried. The default is `RetryIdempotent`. +type RetryPolicy int + +const ( + // RetryIdempotent causes only idempotent operations to be retried when the + // service returns a transient error. Using this policy, fully idempotent + // operations (such as `ObjectHandle.Attrs()`) will always be retried. + // Conditionally idempotent operations (for example `ObjectHandle.Update()`) + // will be retried only if the necessary conditions have been supplied (in + // the case of `ObjectHandle.Update()` this would mean supplying a + // `Conditions.MetagenerationMatch` condition is required). + RetryIdempotent RetryPolicy = iota + + // RetryAlways causes all operations to be retried when the service returns a + // transient error, regardless of idempotency considerations. + RetryAlways + + // RetryNever causes the client to not perform retries on failed operations. + RetryNever +) + +// WithPolicy allows the configuration of which operations should be performed +// with retries for transient errors. +func WithPolicy(policy RetryPolicy) RetryOption { + return &withPolicy{ + policy: policy, + } +} + +type withPolicy struct { + policy RetryPolicy +} + +func (ws *withPolicy) apply(config *retryConfig) { + config.policy = ws.policy +} + +// WithErrorFunc allows users to pass a custom function to the retryer. Errors +// will be retried if and only if `shouldRetry(err)` returns true. +// By default, the following errors are retried (see ShouldRetry for the default +// function): +// +// - HTTP responses with codes 408, 429, 502, 503, and 504. +// +// - Transient network errors such as connection reset and io.ErrUnexpectedEOF. +// +// - Errors which are considered transient using the Temporary() interface. +// +// - Wrapped versions of these errors. +// +// This option can be used to retry on a different set of errors than the +// default. Users can use the default ShouldRetry function inside their custom +// function if they only want to make minor modifications to default behavior. +func WithErrorFunc(shouldRetry func(err error) bool) RetryOption { + return &withErrorFunc{ + shouldRetry: shouldRetry, + } +} + +type withErrorFunc struct { + shouldRetry func(err error) bool +} + +func (wef *withErrorFunc) apply(config *retryConfig) { + config.shouldRetry = wef.shouldRetry +} + +type retryConfig struct { + backoff *gax.Backoff + policy RetryPolicy + shouldRetry func(err error) bool + maxAttempts *int +} + +func (r *retryConfig) clone() *retryConfig { + if r == nil { + return nil + } + + var bo *gax.Backoff + if r.backoff != nil { + bo = &gax.Backoff{ + Initial: r.backoff.Initial, + Max: r.backoff.Max, + Multiplier: r.backoff.Multiplier, + } + } + + return &retryConfig{ + backoff: bo, + policy: r.policy, + shouldRetry: r.shouldRetry, + maxAttempts: r.maxAttempts, + } +} + +// composeSourceObj wraps a *raw.ComposeRequestSourceObjects, but adds the methods +// that modifyCall searches for by name. +type composeSourceObj struct { + src *raw.ComposeRequestSourceObjects +} + +func (c composeSourceObj) Generation(gen int64) { + c.src.Generation = gen +} + +func (c composeSourceObj) IfGenerationMatch(gen int64) { + // It's safe to overwrite ObjectPreconditions, since its only field is + // IfGenerationMatch. + c.src.ObjectPreconditions = &raw.ComposeRequestSourceObjectsObjectPreconditions{ + IfGenerationMatch: gen, + } +} + +func setEncryptionHeaders(headers http.Header, key []byte, copySource bool) error { + if key == nil { + return nil + } + // TODO(jbd): Ask the API team to return a more user-friendly error + // and avoid doing this check at the client level. + if len(key) != 32 { + return errors.New("storage: not a 32-byte AES-256 key") + } + var cs string + if copySource { + cs = "copy-source-" + } + headers.Set("x-goog-"+cs+"encryption-algorithm", aes256Algorithm) + headers.Set("x-goog-"+cs+"encryption-key", base64.StdEncoding.EncodeToString(key)) + keyHash := sha256.Sum256(key) + headers.Set("x-goog-"+cs+"encryption-key-sha256", base64.StdEncoding.EncodeToString(keyHash[:])) + return nil +} + +// toProtoCommonObjectRequestParams sets customer-supplied encryption to the proto library's CommonObjectRequestParams. +func toProtoCommonObjectRequestParams(key []byte) *storagepb.CommonObjectRequestParams { + if key == nil { + return nil + } + keyHash := sha256.Sum256(key) + return &storagepb.CommonObjectRequestParams{ + EncryptionAlgorithm: aes256Algorithm, + EncryptionKeyBytes: key, + EncryptionKeySha256Bytes: keyHash[:], + } +} + +func toProtoChecksums(sendCRC32C bool, attrs *ObjectAttrs) *storagepb.ObjectChecksums { + var checksums *storagepb.ObjectChecksums + if sendCRC32C { + checksums = &storagepb.ObjectChecksums{ + Crc32C: proto.Uint32(attrs.CRC32C), + } + } + if len(attrs.MD5) != 0 { + if checksums == nil { + checksums = &storagepb.ObjectChecksums{ + Md5Hash: attrs.MD5, + } + } else { + checksums.Md5Hash = attrs.MD5 + } + } + return checksums +} + +// ServiceAccount fetches the email address of the given project's Google Cloud Storage service account. +func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string, error) { + o := makeStorageOpts(true, c.retry, "") + return c.tc.GetServiceAccount(ctx, projectID, o...) +} + +// bucketResourceName formats the given project ID and bucketResourceName ID +// into a Bucket resource name. This is the format necessary for the gRPC API as +// it conforms to the Resource-oriented design practices in https://google.aip.dev/121. +func bucketResourceName(p, b string) string { + return fmt.Sprintf("projects/%s/buckets/%s", p, b) +} + +// parseBucketName strips the leading resource path segment and returns the +// bucket ID, which is the simple Bucket name typical of the v1 API. +func parseBucketName(b string) string { + sep := strings.LastIndex(b, "/") + return b[sep+1:] +} + +// parseProjectNumber consume the given resource name and parses out the project +// number if one is present i.e. it is not a project ID. +func parseProjectNumber(r string) uint64 { + projectID := regexp.MustCompile(`projects\/([0-9]+)\/?`) + if matches := projectID.FindStringSubmatch(r); len(matches) > 0 { + // Capture group follows the matched segment. For example: + // input: projects/123/bars/456 + // output: [projects/123/, 123] + number, err := strconv.ParseUint(matches[1], 10, 64) + if err != nil { + return 0 + } + return number + } + + return 0 +} + +// toProjectResource accepts a project ID and formats it as a Project resource +// name. +func toProjectResource(project string) string { + return fmt.Sprintf("projects/%s", project) +} + +// setConditionProtoField uses protobuf reflection to set named condition field +// to the given condition value if supported on the protobuf message. +func setConditionProtoField(m protoreflect.Message, f string, v int64) bool { + fields := m.Descriptor().Fields() + if rf := fields.ByName(protoreflect.Name(f)); rf != nil { + m.Set(rf, protoreflect.ValueOfInt64(v)) + return true + } + + return false +} + +// applyCondsProto validates and attempts to set the conditions on a protobuf +// message using protobuf reflection. +func applyCondsProto(method string, gen int64, conds *Conditions, msg proto.Message) error { + rmsg := msg.ProtoReflect() + + if gen >= 0 { + if !setConditionProtoField(rmsg, "generation", gen) { + return fmt.Errorf("storage: %s: generation not supported", method) + } + } + if conds == nil { + return nil + } + if err := conds.validate(method); err != nil { + return err + } + + switch { + case conds.GenerationMatch != 0: + if !setConditionProtoField(rmsg, "if_generation_match", conds.GenerationMatch) { + return fmt.Errorf("storage: %s: ifGenerationMatch not supported", method) + } + case conds.GenerationNotMatch != 0: + if !setConditionProtoField(rmsg, "if_generation_not_match", conds.GenerationNotMatch) { + return fmt.Errorf("storage: %s: ifGenerationNotMatch not supported", method) + } + case conds.DoesNotExist: + if !setConditionProtoField(rmsg, "if_generation_match", int64(0)) { + return fmt.Errorf("storage: %s: DoesNotExist not supported", method) + } + } + switch { + case conds.MetagenerationMatch != 0: + if !setConditionProtoField(rmsg, "if_metageneration_match", conds.MetagenerationMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) + } + case conds.MetagenerationNotMatch != 0: + if !setConditionProtoField(rmsg, "if_metageneration_not_match", conds.MetagenerationNotMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) + } + } + return nil +} diff --git a/vendor/cloud.google.com/go/storage/storage.replay b/vendor/cloud.google.com/go/storage/storage.replay new file mode 100644 index 000000000..07ed1bfaf --- /dev/null +++ b/vendor/cloud.google.com/go/storage/storage.replay @@ -0,0 +1,30067 @@ +{ + "Initial": "IjIwMTktMDUtMDJUMjI6MjM6NTMuNDAzNDMyMDEzWiI=", + "Version": "0.2", + "Converter": { + "ClearHeaders": [ + "^X-Goog-.*Encryption-Key$" + ], + "RemoveRequestHeaders": [ + "^Authorization$", + "^Proxy-Authorization$", + "^Connection$", + "^Content-Type$", + "^Date$", + "^Host$", + "^Transfer-Encoding$", + "^Via$", + "^X-Forwarded-.*$", + "^X-Cloud-Trace-Context$", + "^X-Goog-Api-Client$", + "^X-Google-.*$", + "^X-Gfe-.*$" + ], + "RemoveResponseHeaders": [ + "^X-Google-.*$", + "^X-Gfe-.*$" + ], + "ClearParams": null, + "RemoveParams": null + }, + "Entries": [ + { + "ID": "f5f231bed6e14b7f", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "60" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIn0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "485" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:23:54 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrZvgYBWgsCwPaGI9bo1ccC0WCBc8kJgydTwioDtXR9xps4HiDoKXI-vjYUl876SMqF0JhmhaEBgvxrIL9Y989YCFrH65xGys_r1JbPdi9M9N0kS3M" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1NC42MTBaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "9a9914424ef59619", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "60" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyIn0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "485" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:23:55 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqFvRYrCleVqpn0QshSvzW5I1-8o7N6vGYh8o5G1f-AHnsX2N_x-NKJrvlxnXqm9auw5gMoWFaJTSTtKL5y85WlQ_eAjmmlrkD4tbHYBZJ386xgaZw" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU1LjEwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1NS4xMDlaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "17f2abbdd781a33b", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0002?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:23:55 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:23:55 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoYoTmTG5mxpFGPvmECUTlGMlQwhfmqGsZtBtZ9xV89Pw3q-p5BBeX_3imdofr_7EBT7nBm4v5alpg45Zi8a8ET28qBH2xfNe4n15HR-1fhGou2wQU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU1LjEwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1NS4xMDlaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "6752f5a9a036af11", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0002?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:23:56 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur4bFsk4ylv96GsTkuDKG--hVaCR_UEhZ_fAzMGt5Eu5ZKncHOLjU_f2PcNP9saFGW-UkH9jXwt_nuR0G2zXOBjMJmLdd7Ml61bGMMrJeVa0OtcGpM" + ] + }, + "Body": "" + } + }, + { + "ID": "9c25646df7aacad9", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "543" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJsYWJlbHMiOnsiZW1wdHkiOiIiLCJsMSI6InYxIn0sImxpZmVjeWNsZSI6eyJydWxlIjpbeyJhY3Rpb24iOnsic3RvcmFnZUNsYXNzIjoiTkVBUkxJTkUiLCJ0eXBlIjoiU2V0U3RvcmFnZUNsYXNzIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjEwLCJjcmVhdGVkQmVmb3JlIjoiMjAxNy0wMS0wMSIsImlzTGl2ZSI6ZmFsc2UsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTVVMVElfUkVHSU9OQUwiLCJTVEFOREFSRCJdLCJudW1OZXdlclZlcnNpb25zIjozfX0seyJhY3Rpb24iOnsidHlwZSI6IkRlbGV0ZSJ9LCJjb25kaXRpb24iOnsiYWdlIjozMCwiY3JlYXRlZEJlZm9yZSI6IjIwMTctMDEtMDEiLCJpc0xpdmUiOnRydWUsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTkVBUkxJTkUiXSwibnVtTmV3ZXJWZXJzaW9ucyI6MTB9fV19LCJsb2NhdGlvbiI6IlVTIiwibmFtZSI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInN0b3JhZ2VDbGFzcyI6Ik5FQVJMSU5FIiwidmVyc2lvbmluZyI6eyJlbmFibGVkIjp0cnVlfX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "926" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:23:56 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq6CjN9PjjzT3LmHxW_tU_ciQ1rahetoQGbX_gX8EC5il7tPJi2yxi5VZxnDNrp1h14b7Ix8tnvtkHufAyO1-lMRutdHK5GSzonff78Nm6KPAKN5fU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU2LjQwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1Ni40MDlaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOnRydWV9LCJsaWZlY3ljbGUiOnsicnVsZSI6W3siYWN0aW9uIjp7InR5cGUiOiJTZXRTdG9yYWdlQ2xhc3MiLCJzdG9yYWdlQ2xhc3MiOiJORUFSTElORSJ9LCJjb25kaXRpb24iOnsiYWdlIjoxMCwiY3JlYXRlZEJlZm9yZSI6IjIwMTctMDEtMDEiLCJpc0xpdmUiOmZhbHNlLCJtYXRjaGVzU3RvcmFnZUNsYXNzIjpbIk1VTFRJX1JFR0lPTkFMIiwiU1RBTkRBUkQiXSwibnVtTmV3ZXJWZXJzaW9ucyI6M319LHsiYWN0aW9uIjp7InR5cGUiOiJEZWxldGUifSwiY29uZGl0aW9uIjp7ImFnZSI6MzAsImNyZWF0ZWRCZWZvcmUiOiIyMDE3LTAxLTAxIiwiaXNMaXZlIjp0cnVlLCJtYXRjaGVzU3RvcmFnZUNsYXNzIjpbIk5FQVJMSU5FIl0sIm51bU5ld2VyVmVyc2lvbnMiOjEwfX1dfSwibGFiZWxzIjp7ImwxIjoidjEiLCJlbXB0eSI6IiJ9LCJzdG9yYWdlQ2xhc3MiOiJORUFSTElORSIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "f795b9adcb1b546e", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0002?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2872" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:23:56 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:23:56 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up2-mWQyRDbFSpF6U96vQpaBYr74NgiUWh3-KZnLWaFYnhQti1tgKWNtL15YgK8blaRSnzGeACPA6jNuM34yhr7bxztrdN2tobEQAzD5RVgzpqx14w" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU2LjQwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1Ni40MDlaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInZlcnNpb25pbmciOnsiZW5hYmxlZCI6dHJ1ZX0sImxpZmVjeWNsZSI6eyJydWxlIjpbeyJhY3Rpb24iOnsidHlwZSI6IlNldFN0b3JhZ2VDbGFzcyIsInN0b3JhZ2VDbGFzcyI6Ik5FQVJMSU5FIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjEwLCJjcmVhdGVkQmVmb3JlIjoiMjAxNy0wMS0wMSIsImlzTGl2ZSI6ZmFsc2UsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTVVMVElfUkVHSU9OQUwiLCJTVEFOREFSRCJdLCJudW1OZXdlclZlcnNpb25zIjozfX0seyJhY3Rpb24iOnsidHlwZSI6IkRlbGV0ZSJ9LCJjb25kaXRpb24iOnsiYWdlIjozMCwiY3JlYXRlZEJlZm9yZSI6IjIwMTctMDEtMDEiLCJpc0xpdmUiOnRydWUsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTkVBUkxJTkUiXSwibnVtTmV3ZXJWZXJzaW9ucyI6MTB9fV19LCJsYWJlbHMiOnsibDEiOiJ2MSIsImVtcHR5IjoiIn0sInN0b3JhZ2VDbGFzcyI6Ik5FQVJMSU5FIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "2ee3f84c4e4045fb", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0002?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:23:57 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UplJEr-Hxa3hDFT5ozLEHYhHfaxlYFpc9Vwm8AL831-w_7BBgxHjjEsU8Br_uLnLes0h9hz37iuE9V8uVZ2liHY7ZD4piNH31oyapjCtwyXrukIP94" + ] + }, + "Body": "" + } + }, + { + "ID": "16f19dbf8e206756", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:23:57 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:23:57 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqvp5XjvB8nhNuz-bTeN9OklTfiBGldYKkcY13JF6oUfpV0z_jwoEQD3B3Ss3wWpaSmZfePjo7fkkr-hP3jbrUazNHaqQliiHqOBSNmSoPmwJpzfOI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1NC42MTBaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "949f5ce411d6f672", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:23:58 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpVeVcmmuUyOt3Hbja89_Ewi6GRsJtRduqK93OT4Ys1aK5GqDWeGxyDbczUyRLeUYvZgtJzYLwVOOUszqAF4ipSXgZ1L_byd9cJ7ttVfQ_ceQBXxY4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1NC42MTBaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "b75303fbdafb66d0", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "64" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJsYWJlbHMiOnsiZW1wdHkiOiIiLCJsMSI6InYxIn0sInZlcnNpb25pbmciOnsiZW5hYmxlZCI6dHJ1ZX19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2493" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:23:58 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqDnNlC3m95GplHjE79aqzhtwgfJCWQjHaCFG4i7qmTFliz2gdE4OiOnKAPIoNqxEngE35065YXNYA65aMSSeEluDKmQ__rJXcS_DpRdoYP4rZIyPo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1OC40MzZaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInZlcnNpb25pbmciOnsiZW5hYmxlZCI6dHJ1ZX0sImxhYmVscyI6eyJlbXB0eSI6IiIsImwxIjoidjEifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FJPSJ9" + } + }, + { + "ID": "831805b62d969707", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "93" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJsYWJlbHMiOnsiYWJzZW50IjpudWxsLCJlbXB0eSI6bnVsbCwibDEiOiJ2MiIsIm5ldyI6Im5ldyJ9LCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOmZhbHNlfX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2495" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:23:59 GMT" + ], + "Etag": [ + "CAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq8yNb3V9Kq8zPa_pdVrJIYv83v4fu6xAHwktfTz_Cy4K1rpi8xrDzYmw5wICaazfMcAiYPhM8r4Y6WkeOyeCRInvkJ6ndduhNN_dgu1U59uI5F3Qc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1OS4wMzJaIiwibWV0YWdlbmVyYXRpb24iOiIzIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBTT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInZlcnNpb25pbmciOnsiZW5hYmxlZCI6ZmFsc2V9LCJsYWJlbHMiOnsibDEiOiJ2MiIsIm5ldyI6Im5ldyJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQU09In0=" + } + }, + { + "ID": "8a816d061fe9e7e0", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "77" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJsaWZlY3ljbGUiOnsicnVsZSI6W3siYWN0aW9uIjp7InR5cGUiOiJEZWxldGUifSwiY29uZGl0aW9uIjp7ImFnZSI6MzB9fV19fQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2570" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:23:59 GMT" + ], + "Etag": [ + "CAQ=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrbjywEmDPkqxln7-Nx_8ngRxoWvncfCx1fGVpPZGEjjmg8OJgv0uaczxapjlNeEcvMnqWI_RVzG6_588QaO8nCnPVnE2kkIek3D_t6UNL9CCPYLRc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1OS41MzBaIiwibWV0YWdlbmVyYXRpb24iOiI0IiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FRPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVE9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQVE9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBUT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVE9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBUT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInZlcnNpb25pbmciOnsiZW5hYmxlZCI6ZmFsc2V9LCJsaWZlY3ljbGUiOnsicnVsZSI6W3siYWN0aW9uIjp7InR5cGUiOiJEZWxldGUifSwiY29uZGl0aW9uIjp7ImFnZSI6MzB9fV19LCJsYWJlbHMiOnsibDEiOiJ2MiIsIm5ldyI6Im5ldyJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQVE9In0=" + } + }, + { + "ID": "6a60397ebae323e7", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiYnVja2V0UG9saWN5T25seSJ9Cg==", + "dGVzdA==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3305" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:00 GMT" + ], + "Etag": [ + "CPmu4bnx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UokOvJnDKKHAQOa8mJLrxFpWWvQ2U_BC_3uI0Z4x870Q068evHio_t_YudbSq614h77-ofhBsyHpoknWnm_YrnXxkHzopreKoMBykFIcbsSB8TDKEE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9idWNrZXRQb2xpY3lPbmx5LzE1NTY4MzU4NDAwNTUxNjEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRQb2xpY3lPbmx5IiwibmFtZSI6ImJ1Y2tldFBvbGljeU9ubHkiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MDA1NTE2MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowMC4wNTRaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDAuMDU0WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjAwLjA1NFoiLCJzaXplIjoiNCIsIm1kNUhhc2giOiJDWTlyelVZaDAzUEszazZESmllMDlnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYnVja2V0UG9saWN5T25seT9nZW5lcmF0aW9uPTE1NTY4MzU4NDAwNTUxNjEmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2J1Y2tldFBvbGljeU9ubHkvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldFBvbGljeU9ubHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MDA1NTE2MSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUG11NGJueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRQb2xpY3lPbmx5L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYnVja2V0UG9saWN5T25seSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQwMDU1MTYxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ1BtdTRibngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2J1Y2tldFBvbGljeU9ubHkvMTU1NjgzNTg0MDA1NTE2MS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYnVja2V0UG9saWN5T25seS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldFBvbGljeU9ubHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MDA1NTE2MSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDUG11NGJueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRQb2xpY3lPbmx5L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYnVja2V0UG9saWN5T25seSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQwMDU1MTYxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1BtdTRibngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJocUJ5d0E9PSIsImV0YWciOiJDUG11NGJueC9lRUNFQUU9In0=" + } + }, + { + "ID": "32d392d1da32f27b", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketPolicyOnly/acl/user-test%40example.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "111" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJ1c2VyLXRlc3RAZXhhbXBsZS5jb20iLCJyb2xlIjoiUkVBREVSIn0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "519" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:00 GMT" + ], + "Etag": [ + "CPmu4bnx/eECEAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uoau0xgFp6ib5wM0bBWjRlklvDRPOu0VZ-LFCeENUWXutmkXSfgUbtr2Nuefb7Pm_yLvCNqtB9B6k_N1V7AlvkN4_JEz67ZSXQ_sAD5L1teQIpGiqA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3VzZXItdGVzdEBleGFtcGxlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2J1Y2tldFBvbGljeU9ubHkvYWNsL3VzZXItdGVzdEBleGFtcGxlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldFBvbGljeU9ubHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MDA1NTE2MSIsImVudGl0eSI6InVzZXItdGVzdEBleGFtcGxlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJlbWFpbCI6InRlc3RAZXhhbXBsZS5jb20iLCJldGFnIjoiQ1BtdTRibngvZUVDRUFJPSJ9" + } + }, + { + "ID": "6e3d0eda38a3ab6e", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "59" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJpYW1Db25maWd1cmF0aW9uIjp7ImJ1Y2tldFBvbGljeU9ubHkiOnsiZW5hYmxlZCI6dHJ1ZX19fQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "663" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:01 GMT" + ], + "Etag": [ + "CAU=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpSPFJHLzusxOr_OvhStJlWEpOs2EuthMO0Ys6pS9bsQeP0fthp_VUZfa8_sN8TX6PJYpxIdFlxB2QUaIujot1cUrssoU74XFrAwoqhlmiE5y9Aw-w" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowMS4yNDJaIiwibWV0YWdlbmVyYXRpb24iOiI1IiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOnRydWUsImxvY2tlZFRpbWUiOiIyMDE5LTA3LTMxVDIyOjI0OjAxLjIzMFoifX0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOmZhbHNlfSwibGlmZWN5Y2xlIjp7InJ1bGUiOlt7ImFjdGlvbiI6eyJ0eXBlIjoiRGVsZXRlIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjMwfX1dfSwibGFiZWxzIjp7ImwxIjoidjIiLCJuZXciOiJuZXcifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FVPSJ9" + } + }, + { + "ID": "8f8dbb687dc49edb", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/acl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13230" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:01 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:01 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpGpW2zLlN7nAgV5IVkKU3kx4QWHCkzAgMa-QPC1PyCol8CP9W605bMUmFMeerbR4enzmeNMvtb4a2HzBPUZ296YfGdtkt_6Guq82E226xzC5TPq4w" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"invalid","message":"Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INVALID_REQUEST_FOR_BUCKET_POLICY_ONLY_RESOURCE: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=INVALID_VALUE, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INVALID_REQUEST_FOR_BUCKET_POLICY_ONLY_RESOURCE: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.INVALID_VALUE, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INVALID_REQUEST_FOR_BUCKET_POLICY_ONLY_RESOURCE: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=INVALID_VALUE, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only., unnamedArguments=[]}, location=null, message=Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only., reason=invalid, rpcCode=400} Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INVALID_REQUEST_FOR_BUCKET_POLICY_ONLY_RESOURCE: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only."}}" + } + }, + { + "ID": "42ce6421b2c9fae4", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketPolicyOnly/acl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13358" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:02 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:02 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqJwxja3nYzWYbg_I5gWvOiow2ORuo8tNA-_Vzw7DX_YVhhb6_p1giUk3WjUHWt-lyDA13adhPGi4BDfIXzQyf-ZL3lsHoa2sNm29BIqRznw4mkAdE" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly., unnamedArguments=[]}, location=null, message=another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly., reason=forbidden, rpcCode=403} another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly."}}" + } + }, + { + "ID": "0ca0bddf513110f4", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "45" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJpYW1Db25maWd1cmF0aW9uIjp7ImJ1Y2tldFBvbGljeU9ubHkiOnt9fX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "624" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:02 GMT" + ], + "Etag": [ + "CAY=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpdDWPDU9-gPzHEieE0Rqx_40yf8fJLhwAP6fVsdS4F7I7sWj0h-Ti7VoDWciZgI_lgNUB7qyh08wjTAxrTLTsSiIYt2GR6ksxhDRupMoPWni5kXmE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowMi4zNjFaIiwibWV0YWdlbmVyYXRpb24iOiI2IiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOmZhbHNlfSwibGlmZWN5Y2xlIjp7InJ1bGUiOlt7ImFjdGlvbiI6eyJ0eXBlIjoiRGVsZXRlIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjMwfX1dfSwibGFiZWxzIjp7ImwxIjoidjIiLCJuZXciOiJuZXcifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FZPSJ9" + } + }, + { + "ID": "8a8bee740102593d", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketPolicyOnly/acl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2964" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:02 GMT" + ], + "Etag": [ + "CPmu4bnx/eECEAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:02 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq59Mf8Ea4fgpDnUzupeIP3bGt3VpyI6HjL4KJtDKAD_h-Ua-AJSX3u3x4TCsx2MZcIVhMs9pW9SWsrIcsvsr3kGt2Je9W87LElbN5dlw02EItBcR4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2J1Y2tldFBvbGljeU9ubHkvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldFBvbGljeU9ubHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MDA1NTE2MSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUG11NGJueC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRQb2xpY3lPbmx5L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYnVja2V0UG9saWN5T25seSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQwMDU1MTYxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ1BtdTRibngvZUVDRUFJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2J1Y2tldFBvbGljeU9ubHkvMTU1NjgzNTg0MDA1NTE2MS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYnVja2V0UG9saWN5T25seS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldFBvbGljeU9ubHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MDA1NTE2MSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDUG11NGJueC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRQb2xpY3lPbmx5L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYnVja2V0UG9saWN5T25seSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQwMDU1MTYxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1BtdTRibngvZUVDRUFJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2J1Y2tldFBvbGljeU9ubHkvMTU1NjgzNTg0MDA1NTE2MS91c2VyLXRlc3RAZXhhbXBsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRQb2xpY3lPbmx5L2FjbC91c2VyLXRlc3RAZXhhbXBsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJidWNrZXRQb2xpY3lPbmx5IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDAwNTUxNjEiLCJlbnRpdHkiOiJ1c2VyLXRlc3RAZXhhbXBsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZW1haWwiOiJ0ZXN0QGV4YW1wbGUuY29tIiwiZXRhZyI6IkNQbXU0Ym54L2VFQ0VBST0ifV19" + } + }, + { + "ID": "91ba2c22c5f5de9a", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketPolicyOnly?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:03 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrwotKay181GHzZsWfp6BzJA4FDOIfK2s1WlzB9p8QsIEX42AtvMhkgLWqSIyEhb-MSv9snqx0WRwUs5sDN3_5NVoFTzQeLkDBR9mbsixI-udc8SLI" + ] + }, + "Body": "" + } + }, + { + "ID": "43fec6c3a8c8cb63", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiY29uZGRlbCJ9Cg==", + "Zm9v" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3161" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:03 GMT" + ], + "Etag": [ + "CNHLq7vx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrrYnzZRz4GFV3BsHoF-vv9v2Lc13Wp6-P5iowSZFjqyykVhYZ6CkesIVxA2v2xNCbVeWgCBXCFFt9fje73cis1cECwwqrYLr5QF5bZCy4TsJ-LT8k" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb25kZGVsLzE1NTY4MzU4NDMzNjg0MDEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb25kZGVsIiwibmFtZSI6ImNvbmRkZWwiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MzM2ODQwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowMy4zNjhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDMuMzY4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjAzLjM2OFoiLCJzaXplIjoiMyIsIm1kNUhhc2giOiJyTDBZMjB6QytGenQ3MlZQek1TazJBPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29uZGRlbD9nZW5lcmF0aW9uPTE1NTY4MzU4NDMzNjg0MDEmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29uZGRlbC8xNTU2ODM1ODQzMzY4NDAxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbmRkZWwvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbmRkZWwiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MzM2ODQwMSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTkhMcTd2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29uZGRlbC8xNTU2ODM1ODQzMzY4NDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb25kZGVsL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29uZGRlbCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQzMzY4NDAxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ05ITHE3dngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbmRkZWwvMTU1NjgzNTg0MzM2ODQwMS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29uZGRlbC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbmRkZWwiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MzM2ODQwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTkhMcTd2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29uZGRlbC8xNTU2ODM1ODQzMzY4NDAxL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb25kZGVsL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29uZGRlbCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQzMzY4NDAxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ05ITHE3dngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJ6OFN1SFE9PSIsImV0YWciOiJDTkhMcTd2eC9lRUNFQUU9In0=" + } + }, + { + "ID": "ba03d9efb1402903", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/conddel?alt=json\u0026generation=1556835843368400\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12249" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:03 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:03 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur-mcvaL-eBgBoviwBg_r8LyEEK3JNyFaj0cFy2neOMo1pVkWpkGQ-3gz8vQNtAGoX-Q7_CMYLNv_I0pOoy5iRr-MdYKny5ICdC1s3ji5DwL7sqmn0" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"No such object: go-integration-test-20190502-80633403432013-0001/conddel","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.name, message=No such object: go-integration-test-20190502-80633403432013-0001/conddel, unnamedArguments=[]}, location=entity.resource_id.name, message=No such object: go-integration-test-20190502-80633403432013-0001/conddel, reason=notFound, rpcCode=404} No such object: go-integration-test-20190502-80633403432013-0001/conddel: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"No such object: go-integration-test-20190502-80633403432013-0001/conddel"}}" + } + }, + { + "ID": "e26af2cd4673cc7c", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/conddel?alt=json\u0026ifMetagenerationMatch=2\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 412, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12051" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:03 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:03 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uopfd5TJBzhGauzgyZW0h-TNFtDHz34k0kjbeuTeQPnMGBaiSFz9FdWA2gxp7qKp-V586voh7kqHgnVSW_QI4bWEuC35pj8NbCmmpNL_9pstpCgckw" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"conditionNotMet","message":"Precondition Failed","locationType":"header","location":"If-Match","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=PRECONDITION_FAILED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=preconditionFailed, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.CONDITION_NOT_MET, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CONDITION_NOT_MET, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=null, unnamedArguments=[]}, location=headers.If-Match, message=Precondition Failed, reason=conditionNotMet, rpcCode=412} Precondition Failed: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":412,"message":"Precondition Failed"}}" + } + }, + { + "ID": "857f8a30eb55b023", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/conddel?alt=json\u0026ifMetagenerationNotMatch=1\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 304, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uprv3QMjU4zz49ScYI4YaY9FitNwO7wGMQ1KZ8Y99w4D7keznFfA8M80bMdKvSH55jsWsDoLKcQ1VvIqIEUw88NqSfUM7E5SR_y5zfDaCf_uHSlgPM" + ] + }, + "Body": "" + } + }, + { + "ID": "4c69a7dc19302935", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/conddel?alt=json\u0026generation=1556835843368401\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpoiZa1zqMV-8ZkeUrvT6Xi0uJul6yGWWth5s3YbFtA2p0-6vc_54sNtEbxbnsASZyMDXLelHaCSixgcVT6JDVELrXHDim9gHcjjlSTF6BsHWu181A" + ] + }, + "Body": "" + } + }, + { + "ID": "834f05b1eb2dbc32", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoib2JqMSJ9Cg==", + "TuDshcL7vdCAXh8L42NvEQ==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3150" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Etag": [ + "CLnS+bvx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UozIFQVqWdhJxDLMyV7dfkeraSOw2Mw_AsI_aCWnAUudrz4HjQgZ4kbnvKXJfNF3SMQhxzxZHhk1Otmw7PgPxL7HyDkPDyK1DeHDc8GyfY1B3Q3YfA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxIiwibmFtZSI6Im9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNC42NDZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDQuNjQ2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiU25hL1VXdjdtY1pJMjNvRTV0VWFiUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajE/Z2VuZXJhdGlvbj0xNTU2ODM1ODQ0NjQ3MjI1JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiQ1Q2ZFRBPT0iLCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9" + } + }, + { + "ID": "34a1730e8bbe1d09", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoib2JqMiJ9Cg==", + "55GZ37DvGFQS3PnkEKv3Jg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3150" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:05 GMT" + ], + "Etag": [ + "CJ2Xkrzx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoVrbP1vUH6TU_zfm_Aaca624z-91MoULnLIBcn9Hg4htv5T5Z6B4XYJMFZUuDjWWeBhR6EpG1UZL4iJe0TJybYQ2CCsB_k63CcOex39xaOIT8UYUA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoyLzE1NTY4MzU4NDUwNDkyNDUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyIiwibmFtZSI6Im9iajIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS4wNDhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuMDQ4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjA0OFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiQ0Mxd2x3ck1PSXEwZHZNa015bFVoZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajI/Z2VuZXJhdGlvbj0xNTU2ODM1ODQ1MDQ5MjQ1JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDUwNDkyNDUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKMlhrcnp4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoyLzE1NTY4MzU4NDUwNDkyNDUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDUwNDkyNDUiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKMlhrcnp4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoialY1QVZRPT0iLCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9" + } + }, + { + "ID": "b7c2c8ec1e65fb6d", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoib2JqL3dpdGgvc2xhc2hlcyJ9Cg==", + "kT7fkMXrRdrhn2P2+EeT5g==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3366" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:05 GMT" + ], + "Etag": [ + "COqurrzx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrEgFAbU2gAqyKMITI9vr9kAgIBfNY3ZGs1l2_X4e-fVtNb01M9N81N-83LdChVzrk8iB09yuKUKdxRc0nMYrgy7S1fqPwbJdsQ6TJfMB05JJj6qr4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmovd2l0aC9zbGFzaGVzLzE1NTY4MzU4NDU1MTEwMTgiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcyIsIm5hbWUiOiJvYmovd2l0aC9zbGFzaGVzIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuNTEwWiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjUxMFoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS41MTBaIiwic2l6ZSI6IjE2IiwibWQ1SGFzaCI6InVlei9oSjZ3QXJlRFFuY2NEVWR4Zmc9PSIsIm1lZGlhTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2Rvd25sb2FkL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcz9nZW5lcmF0aW9uPTE1NTY4MzU4NDU1MTEwMTgmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iaiUyRndpdGglMkZzbGFzaGVzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iai93aXRoL3NsYXNoZXMvMTU1NjgzNTg0NTUxMTAxOC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqJTJGd2l0aCUyRnNsYXNoZXMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPcXVycnp4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoib2VvK0ZBPT0iLCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9" + } + }, + { + "ID": "0dbae3d4b454f434", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3366" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:05 GMT" + ], + "Etag": [ + "COqurrzx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqk7CzWGAqaT12X_fTymveyPtsPJIPHD814QaAOIQLA_AJo_4OtnQOOXJM2jJhIZ1Co4NgEmboDUG9su4SN5fDTHObNh9elLts9VpUJ9iSYrIsn-Os" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmovd2l0aC9zbGFzaGVzLzE1NTY4MzU4NDU1MTEwMTgiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcyIsIm5hbWUiOiJvYmovd2l0aC9zbGFzaGVzIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuNTEwWiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjUxMFoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS41MTBaIiwic2l6ZSI6IjE2IiwibWQ1SGFzaCI6InVlei9oSjZ3QXJlRFFuY2NEVWR4Zmc9PSIsIm1lZGlhTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2Rvd25sb2FkL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcz9nZW5lcmF0aW9uPTE1NTY4MzU4NDU1MTEwMTgmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iaiUyRndpdGglMkZzbGFzaGVzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iai93aXRoL3NsYXNoZXMvMTU1NjgzNTg0NTUxMTAxOC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqJTJGd2l0aCUyRnNsYXNoZXMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPcXVycnp4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoib2VvK0ZBPT0iLCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9" + } + }, + { + "ID": "3793882b91d38a07", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3150" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:06 GMT" + ], + "Etag": [ + "CLnS+bvx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpzUH_BRjU20OGbDAW2dy0lx9Gk_1Ko7zks6KG6OEpq0pBPfCIBjkCeIZTxKxtvxnOd1hqlZF7SlbNoyNUqX5UFXpZY5NjP5GTeUkm512vaBR5vnDI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxIiwibmFtZSI6Im9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNC42NDZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDQuNjQ2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiU25hL1VXdjdtY1pJMjNvRTV0VWFiUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajE/Z2VuZXJhdGlvbj0xNTU2ODM1ODQ0NjQ3MjI1JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiQ1Q2ZFRBPT0iLCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9" + } + }, + { + "ID": "39924c2826bcd33f", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3150" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:06 GMT" + ], + "Etag": [ + "CJ2Xkrzx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrgOr-dpWoY-JJi6vRigJX3Mpi_WQ4zWvWKK382DCP-mzWSnrpbaOzijhTCdNz6pmXskVVs30APwlqZgvb-S6xAwXqKJG5n6832Py4UlTdDR_INTew" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoyLzE1NTY4MzU4NDUwNDkyNDUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyIiwibmFtZSI6Im9iajIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS4wNDhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuMDQ4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjA0OFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiQ0Mxd2x3ck1PSXEwZHZNa015bFVoZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajI/Z2VuZXJhdGlvbj0xNTU2ODM1ODQ1MDQ5MjQ1JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDUwNDkyNDUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKMlhrcnp4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoyLzE1NTY4MzU4NDUwNDkyNDUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDUwNDkyNDUiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKMlhrcnp4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoialY1QVZRPT0iLCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9" + } + }, + { + "ID": "89ea9ab5e21a635e", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "9705" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:06 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:06 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqj9dj5PbUhIU3baBv98vMViDP-BnVPs0APrFYL4jSbKc7fK6eAGoyDfsccGzxK-t0lGvozizW4ltrga8DXo_oxlZ9-k5a85v9i0PWTFIisPC7xuL8" + ] + }, + "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2","name":"obj2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845049245","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.048Z","updated":"2019-05-02T22:24:05.048Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.048Z","size":"16","md5Hash":"CC1wlwrMOIq0dvMkMylUhg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?generation=1556835845049245&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ2Xkrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"jV5AVQ==","etag":"CJ2Xkrzx/eECEAE="}]}" + } + }, + { + "ID": "b27c106562362f6b", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=1\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3446" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:06 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:06 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoIo3JJa2rfg9_o3bXN_5QU6XU_iIrWYfuEuvKTQL0O5tjIHkODIDd4biyHxjbGNFnrQNbkiEUXWEBlo-3fTVgfFTOWuaPpgae-SafTBP8h5X5ddJ4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwibmV4dFBhZ2VUb2tlbiI6IkNoQnZZbW92ZDJsMGFDOXpiR0Z6YUdWeiIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmovd2l0aC9zbGFzaGVzLzE1NTY4MzU4NDU1MTEwMTgiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcyIsIm5hbWUiOiJvYmovd2l0aC9zbGFzaGVzIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuNTEwWiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjUxMFoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS41MTBaIiwic2l6ZSI6IjE2IiwibWQ1SGFzaCI6InVlei9oSjZ3QXJlRFFuY2NEVWR4Zmc9PSIsIm1lZGlhTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2Rvd25sb2FkL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcz9nZW5lcmF0aW9uPTE1NTY4MzU4NDU1MTEwMTgmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iaiUyRndpdGglMkZzbGFzaGVzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iai93aXRoL3NsYXNoZXMvMTU1NjgzNTg0NTUxMTAxOC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqJTJGd2l0aCUyRnNsYXNoZXMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPcXVycnp4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoib2VvK0ZBPT0iLCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9XX0=" + } + }, + { + "ID": "ca654b46531c4cb2", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=1\u0026pageToken=ChBvYmovd2l0aC9zbGFzaGVz\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3214" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:07 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:07 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqINI0Ii418NlxoYErOrjhTKb3-G3n-8h1Ryc_4YT4Tksc7WXA9m4CcJWInyhJahYC-UUrM47O5EK-3FUt3toZOZxWlbwhE6Sfnra0H-CqjmvxWyRs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwibmV4dFBhZ2VUb2tlbiI6IkNnUnZZbW94IiwiaXRlbXMiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEiLCJuYW1lIjoib2JqMSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNC42NDZaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDQuNjQ2WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJTbmEvVVd2N21jWkkyM29FNXRVYWJRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMT9nZW5lcmF0aW9uPTE1NTY4MzU4NDQ2NDcyMjUmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTG5TK2J2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTG5TK2J2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJDVDZkVEE9PSIsImV0YWciOiJDTG5TK2J2eC9lRUNFQUU9In1dfQ==" + } + }, + { + "ID": "0186889a60e651db", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=1\u0026pageToken=CgRvYmox\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3187" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:07 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:07 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpFTK3QnFf4m9htpY4s3P9_NHuDiQl8InpJoWPOaQHo0XaYnSoSqkH7CcUbm0-sWamsCZbd4DHoXHLCrea3ff9rLpWlptkL0aMKV_Dleb1Xm-j14fQ" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwiaXRlbXMiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIiLCJuYW1lIjoib2JqMiIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjA0OFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS4wNDhaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuMDQ4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJDQzF3bHdyTU9JcTBkdk1rTXlsVWhnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMj9nZW5lcmF0aW9uPTE1NTY4MzU4NDUwNDkyNDUmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJqVjVBVlE9PSIsImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In1dfQ==" + } + }, + { + "ID": "a253776f79c46fe1", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=1\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3446" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:07 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:07 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uoadifz-4B7kC7DvXOyrNa5omK5-DXUNJBtT_d5I1jciPUvGlRm1L1vkvVMU1Y5n9zFig2CF_qqBLlcdvoYCaUeaNUu706L0fKJhJkA8vV5JUvFq0E" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwibmV4dFBhZ2VUb2tlbiI6IkNoQnZZbW92ZDJsMGFDOXpiR0Z6YUdWeiIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmovd2l0aC9zbGFzaGVzLzE1NTY4MzU4NDU1MTEwMTgiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcyIsIm5hbWUiOiJvYmovd2l0aC9zbGFzaGVzIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuNTEwWiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjUxMFoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS41MTBaIiwic2l6ZSI6IjE2IiwibWQ1SGFzaCI6InVlei9oSjZ3QXJlRFFuY2NEVWR4Zmc9PSIsIm1lZGlhTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2Rvd25sb2FkL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcz9nZW5lcmF0aW9uPTE1NTY4MzU4NDU1MTEwMTgmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iaiUyRndpdGglMkZzbGFzaGVzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iai93aXRoL3NsYXNoZXMvMTU1NjgzNTg0NTUxMTAxOC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqJTJGd2l0aCUyRnNsYXNoZXMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPcXVycnp4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoib2VvK0ZBPT0iLCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9XX0=" + } + }, + { + "ID": "dc8959751769ee9c", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=1\u0026pageToken=ChBvYmovd2l0aC9zbGFzaGVz\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3214" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:08 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:08 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqLeV9n2aiAq15pl3CqEWOt3_OYHsxbG0lHFIKr7Emh5btUEXLXl4nZWPi27LinKW7i0LYvp-HZK5b9E742MR_PNFe87treTpz_QgUmchGPKLcdZVM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwibmV4dFBhZ2VUb2tlbiI6IkNnUnZZbW94IiwiaXRlbXMiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEiLCJuYW1lIjoib2JqMSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNC42NDZaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDQuNjQ2WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJTbmEvVVd2N21jWkkyM29FNXRVYWJRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMT9nZW5lcmF0aW9uPTE1NTY4MzU4NDQ2NDcyMjUmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTG5TK2J2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTG5TK2J2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJDVDZkVEE9PSIsImV0YWciOiJDTG5TK2J2eC9lRUNFQUU9In1dfQ==" + } + }, + { + "ID": "23f7f167269db6b2", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=1\u0026pageToken=CgRvYmox\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3187" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:08 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:08 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo4hY4jSbETgB5jf-AU8_b1sRvfTSrlt_Pt6UXiuTte4GtueVBgDwuMEliwE2-nOSiRE6juXOCbR1LQlRrpek1TLeRRf1cCVbz90YcCIGhWV_zFz0o" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwiaXRlbXMiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIiLCJuYW1lIjoib2JqMiIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjA0OFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS4wNDhaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuMDQ4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJDQzF3bHdyTU9JcTBkdk1rTXlsVWhnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMj9nZW5lcmF0aW9uPTE1NTY4MzU4NDUwNDkyNDUmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJqVjVBVlE9PSIsImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In1dfQ==" + } + }, + { + "ID": "518c5855f7f2035b", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=2\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "6581" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:09 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:09 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqGaiKkmr1oBduBvG8APGBVoM_Ve7zHw4TBuyV3QYcFr9SYzEnATEwE5P6BH-yBsVXSmaRLej1a55x18Bra_ZAC7nYh2UKvWM66JGE3U1muaDBabyw" + ] + }, + "Body": "{"kind":"storage#objects","nextPageToken":"CgRvYmox","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="}]}" + } + }, + { + "ID": "5887398d8cd8c906", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=2\u0026pageToken=CgRvYmox\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3187" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:09 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:09 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqGAvr7ojM3PcmnrTMTX-TXgYKzfo-3gzwYU_l9OaCfpth_ad2lWUJf6w6B8pjEGsAFZJvNueCHYGSJdx6YJ7NAe71oah1xi6lGQvQkWEwAl0fXd7Y" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwiaXRlbXMiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIiLCJuYW1lIjoib2JqMiIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjA0OFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS4wNDhaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuMDQ4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJDQzF3bHdyTU9JcTBkdk1rTXlsVWhnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMj9nZW5lcmF0aW9uPTE1NTY4MzU4NDUwNDkyNDUmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJqVjVBVlE9PSIsImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In1dfQ==" + } + }, + { + "ID": "81d53e304c7ed90d", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=2\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "6581" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:09 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:09 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur9XEMeCefUS1K7QkIxrxZboQ1zQ5SkrEUZMzrQdNBNAuDQnpDczXzJvF-rZmxFVYSdMySScTTu-FICOdI91b-fQVRomrqAxJwgn60FoObIVVpgkog" + ] + }, + "Body": "{"kind":"storage#objects","nextPageToken":"CgRvYmox","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="}]}" + } + }, + { + "ID": "0be1bdae5ba86bfd", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=2\u0026pageToken=CgRvYmox\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3187" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:10 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:10 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UocXesX_BUCirPixQFUhYfAIoR0Os309HMGHarzTGrqH1PkmHuGaaiNSH_pJq8nnWUXEnjk1cvuPGJWCF21t7EJQtCIGBWQoPBNV6OpvALZGdpPXuI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwiaXRlbXMiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIiLCJuYW1lIjoib2JqMiIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjA0OFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS4wNDhaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuMDQ4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJDQzF3bHdyTU9JcTBkdk1rTXlsVWhnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMj9nZW5lcmF0aW9uPTE1NTY4MzU4NDUwNDkyNDUmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJqVjVBVlE9PSIsImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In1dfQ==" + } + }, + { + "ID": "592a3fdf8b5a83e5", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=3\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "9705" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:10 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:10 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoS2FM55Dkg1PRaNQBzsu5KBns-svmRrQ5byrUGRHgnsBQQDdE3ZznljVTrNq3wDAClddQ4zbCMQSLdqxPPNlom-n1tMg1hO8s6kS8_CTWy2SOM6As" + ] + }, + "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2","name":"obj2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845049245","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.048Z","updated":"2019-05-02T22:24:05.048Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.048Z","size":"16","md5Hash":"CC1wlwrMOIq0dvMkMylUhg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?generation=1556835845049245&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ2Xkrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"jV5AVQ==","etag":"CJ2Xkrzx/eECEAE="}]}" + } + }, + { + "ID": "b01846866b585241", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=3\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "9705" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:10 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:10 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo22tWMx8uW6afXbfFrFT8UZzPbsqPItYMdRAPHNEuksgeqWI2US_xblUG6C8WgcHvfEeR_19eVmBTmW6QE7rmosaIm_raZxW9FuCHHiSi8TJnZ1CI" + ] + }, + "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2","name":"obj2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845049245","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.048Z","updated":"2019-05-02T22:24:05.048Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.048Z","size":"16","md5Hash":"CC1wlwrMOIq0dvMkMylUhg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?generation=1556835845049245&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ2Xkrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"jV5AVQ==","etag":"CJ2Xkrzx/eECEAE="}]}" + } + }, + { + "ID": "2eeefee032c6e805", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=13\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "9705" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:11 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:11 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq55nB00t16AHTo1gSxBFUNplz5SJcGYBOeCPsK-MD5OLO_kHKT3YNZq69AAHpozaWVrTU_jDQqiqMFSoPhhmlF2dR1mcq6Ihk_y8uuFY6FM4O_hmI" + ] + }, + "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2","name":"obj2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845049245","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.048Z","updated":"2019-05-02T22:24:05.048Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.048Z","size":"16","md5Hash":"CC1wlwrMOIq0dvMkMylUhg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?generation=1556835845049245&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ2Xkrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"jV5AVQ==","etag":"CJ2Xkrzx/eECEAE="}]}" + } + }, + { + "ID": "11bfe17bc978cdfd", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=13\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "9705" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:11 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:11 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UotgN85lDspseXEgwnXhmfSmio9oOxSzhqtyabApVH0KQz_aVg3DbQ7m0Z0L9SzPzzmNEUfU6xhAT5xQAXf-wvY1NHDFxdf9IJ6YoVNhN0aHN-75hY" + ] + }, + "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2","name":"obj2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845049245","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.048Z","updated":"2019-05-02T22:24:05.048Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.048Z","size":"16","md5Hash":"CC1wlwrMOIq0dvMkMylUhg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?generation=1556835845049245&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ2Xkrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"jV5AVQ==","etag":"CJ2Xkrzx/eECEAE="}]}" + } + }, + { + "ID": "c05bfa6f1a43381b", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:11 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:11 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrVVjXVa2SMY6cjYeIgdcZOivQwE2Crz6UPZs2VXVBFMwReZs7kKbETuTMwuEMHKKm_LIrk-o6jS07MimubSWVxGMzT1BtCacU1X2Go_RckmyJPZso" + ] + }, + "Body": "TuDshcL7vdCAXh8L42NvEQ==" + } + }, + { + "ID": "f2932604385db16e", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:12 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrADckAOw2vMHdy0qy02f20x-8s_Ue81-NYwSpKeqp6Zx6eCPReLM6qXUyxFz0xHNGob5WJA8J4ctl6ZFzL2fEVwBMNY8xxzpkKOAwSu9n9e0OE63E" + ] + }, + "Body": "TuDshcL7vdCAXh8L42NvEQ==" + } + }, + { + "ID": "8581afc6216ad2cb", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj2", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:12 GMT" + ], + "Etag": [ + "\"082d70970acc388ab476f32433295486\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:05 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:05 GMT" + ], + "X-Goog-Generation": [ + "1556835845049245" + ], + "X-Goog-Hash": [ + "crc32c=jV5AVQ==", + "md5=CC1wlwrMOIq0dvMkMylUhg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrkoYpGTn57WC5t-sItsEdWimp47AtSuqw8autFOD3TirChrdQThg_Fh-kykfgvnTaOyiw1InKeYs0Z2MISmjUpu4uHUUGDKTX6W0zQEuUks7i2BqQ" + ] + }, + "Body": "55GZ37DvGFQS3PnkEKv3Jg==" + } + }, + { + "ID": "ae129e597d5dd3e8", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj2", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:12 GMT" + ], + "Etag": [ + "\"082d70970acc388ab476f32433295486\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:05 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:05 GMT" + ], + "X-Goog-Generation": [ + "1556835845049245" + ], + "X-Goog-Hash": [ + "crc32c=jV5AVQ==", + "md5=CC1wlwrMOIq0dvMkMylUhg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoaoET13zBO6-kmfarxy5tCZhmqc2oO38xOd6m6OQ55e-MyyKYM35hNakm5xYdWaoUNsKwVCiklp4HtYE8afVObxcDERuTCgyTw-olewCN6VBwd-H0" + ] + }, + "Body": "55GZ37DvGFQS3PnkEKv3Jg==" + } + }, + { + "ID": "11acf26dc7680b3e", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj/with/slashes", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:12 GMT" + ], + "Etag": [ + "\"b9ecff849eb002b78342771c0d47717e\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:05 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:05 GMT" + ], + "X-Goog-Generation": [ + "1556835845511018" + ], + "X-Goog-Hash": [ + "crc32c=oeo+FA==", + "md5=uez/hJ6wAreDQnccDUdxfg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up_XtsS2bu4fnGI-Z85gqz1MECYqweRqin42arAyEfpJpZwmadI1-Op9V7n-q3UaYyRtUWFR7an7zKxWhJ_CB6PXz-C55C1nPoI7EWVEV2oXcS-q8c" + ] + }, + "Body": "kT7fkMXrRdrhn2P2+EeT5g==" + } + }, + { + "ID": "ff72204b0c538268", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj/with/slashes", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:12 GMT" + ], + "Etag": [ + "\"b9ecff849eb002b78342771c0d47717e\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:05 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:05 GMT" + ], + "X-Goog-Generation": [ + "1556835845511018" + ], + "X-Goog-Hash": [ + "crc32c=oeo+FA==", + "md5=uez/hJ6wAreDQnccDUdxfg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqrjLdnTX47-_XGNBlBZ0rEgsIkYJMzixBaVmhjn8IsK66UpQAUO-njMM-WznI-DBNIbXVTcPQKZBNe2ZHZEskfuRjsFwMjtooyUH_PqTfZnARVv8M" + ] + }, + "Body": "kT7fkMXrRdrhn2P2+EeT5g==" + } + }, + { + "ID": "7972502f866e015e", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "Range": [ + "bytes=0-15" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:12 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoNiJIZYFETpngMkDItRU7PtN7Fv4bDU7Kfvndst1JR2eGmW-tOMTfng8Abx6EWwPvvodLkSk-1TM4Gywxzh3c24gPDN4NsQMuh40Mfp0Jvg1syOls" + ] + }, + "Body": "TuDshcL7vdCAXh8L42NvEQ==" + } + }, + { + "ID": "251a62254c59cbd1", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "Range": [ + "bytes=0-7" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 206, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "8" + ], + "Content-Range": [ + "bytes 0-7/16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:12 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UobLFpkqodTM1bHuzS0GzRCKZypPQkf3nH1hbOexCjgjAcheZhV_zxJRyLhOYZoW8ABEaVMJLULNrzAm1VZs4JrTdtT46fYIcQe7OBeJp0aHI7Lr5s" + ] + }, + "Body": "TuDshcL7vdA=" + } + }, + { + "ID": "2482617229166e50", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "Range": [ + "bytes=8-23" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 206, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "8" + ], + "Content-Range": [ + "bytes 8-15/16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:12 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uon8ZtKgOTqFm36bWHd3hf535jPGQBEX9j2yPrr11_ycAJjfI4A-mFWMTsFRR3nwSo68784B9TUPUjQd0cib0QIrfBjfDcz91p6oPJG3VpV9afywIY" + ] + }, + "Body": "gF4fC+NjbxE=" + } + }, + { + "ID": "74ff0c5848c7dba6", + "Request": { + "Method": "HEAD", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:12 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqKc-08ZJEWVbyaV84xKlnrHkl2PBuBUQP6xtx6TzZW0fsjyZ-SHshiy-QMGCuP1UF4x1ettHyDvRbAleHIXy4y7wMwoUWST2NKs_0oRA0oVqOoKtQ" + ] + }, + "Body": "" + } + }, + { + "ID": "90888164f5d2d40d", + "Request": { + "Method": "HEAD", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:13 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:13 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoTSlg3r_LSoiqQyJVJDzL-0xNj6jlSNp9zLUmlPtu3xbhCLBDNAxY9CRbyEjw7UudDcsFOe43C3QlsVjoBJln1q179ZspYWXY-fHqjrztAfJkZUpU" + ] + }, + "Body": "" + } + }, + { + "ID": "7961f3dc74fa0663", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "Range": [ + "bytes=8-" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 206, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "8" + ], + "Content-Range": [ + "bytes 8-15/16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:13 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:13 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqJjC8HFLb8u_EaJVF0_TUOhNWhbooW0oANE8_LPjoIdLcoU42caZe7LjgbTIjCb0Ss3horP2noxirF3u3FGq0Yoh4rjuHBVhwZcemZHnKLgJUUbQQ" + ] + }, + "Body": "gF4fC+NjbxE=" + } + }, + { + "ID": "af8c1fcfa456592a", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "Range": [ + "bytes=0-31" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:13 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:13 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqq6ro5-i6q8pIDup0yW35uFEynuLK95P8OSFhj7b70DK33tjv3fkOdkobsfwplOAGuNME2bLjQuyy8mhd2xBbNjyjvXt13Nc48PB4M2t_46RoM3Ak" + ] + }, + "Body": "TuDshcL7vdCAXh8L42NvEQ==" + } + }, + { + "ID": "d312c241da5bd348", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "Range": [ + "bytes=32-41" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 416, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "167" + ], + "Content-Type": [ + "application/xml; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:13 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:13 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpAvzLs-GgdTPEGM2kvABFTxehSb-DhZQ2Y31nQ9ad0RMmfIMtVGRBXvkXo7FwGAy78ZHMSGWRKG-DYMy6JU9PCM4Tpo8PDmm4-rHa_LVpH2dFN-JM" + ] + }, + "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+SW52YWxpZFJhbmdlPC9Db2RlPjxNZXNzYWdlPlRoZSByZXF1ZXN0ZWQgcmFuZ2UgY2Fubm90IGJlIHNhdGlzZmllZC48L01lc3NhZ2U+PERldGFpbHM+Ynl0ZXM9MzItNDE8L0RldGFpbHM+PC9FcnJvcj4=" + } + }, + { + "ID": "2bbd010d0173c966", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3150" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:13 GMT" + ], + "Etag": [ + "CLnS+bvx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpZDScoUU4zzBoXc21jt6C1vcW1SrU6ELiWodrCID-OoNKVMbecv_DSgHZATQGs4GS-BebkzqdalqTq5C77aKXQMxiks-9A5kyrc8N4aY9L5YndaI8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxIiwibmFtZSI6Im9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNC42NDZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDQuNjQ2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiU25hL1VXdjdtY1pJMjNvRTV0VWFiUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajE/Z2VuZXJhdGlvbj0xNTU2ODM1ODQ0NjQ3MjI1JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiQ1Q2ZFRBPT0iLCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9" + } + }, + { + "ID": "e3522e1cc7aef940", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2570" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:13 GMT" + ], + "Etag": [ + "CAY=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:13 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur5x_r1PKMQrnKbsfUWRR6wBwFQTM8U9mGwyq_fK56rrHE_UoLDxLRpiuaFGLVi9L4Hj67UXH76drA6KQQQgOsMVo0YEl1pVS2P7OKQ64WF4NubRCU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowMi4zNjFaIiwibWV0YWdlbmVyYXRpb24iOiI2IiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FZPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQVk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBWT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBWT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInZlcnNpb25pbmciOnsiZW5hYmxlZCI6ZmFsc2V9LCJsaWZlY3ljbGUiOnsicnVsZSI6W3siYWN0aW9uIjp7InR5cGUiOiJEZWxldGUifSwiY29uZGl0aW9uIjp7ImFnZSI6MzB9fV19LCJsYWJlbHMiOnsibDEiOiJ2MiIsIm5ldyI6Im5ldyJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQVk9In0=" + } + }, + { + "ID": "77ed46692038573c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/copy-obj1?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3333" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:14 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpBnZgUt8GyAer5VG-_nEuf3YizRjF2t4MS6vOflI_Vid1-zVpQ99TuphB3pZjPZNI5ZoHJxCmDv3lyLwyhUDvk_p_NO9x8qZTEpjf1EOO5uxRsOxo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMTYiLCJvYmplY3RTaXplIjoiMTYiLCJkb25lIjp0cnVlLCJyZXNvdXJjZSI6eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb3B5LW9iajEvMTU1NjgzNTg1NDE0NDc5MiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMSIsIm5hbWUiOiJjb3B5LW9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NDE0NDc5MiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxNC4xNDRaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTQuMTQ0WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjE0LjE0NFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiU25hL1VXdjdtY1pJMjNvRTV0VWFiUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMT9nZW5lcmF0aW9uPTE1NTY4MzU4NTQxNDQ3OTImYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29weS1vYmoxLzE1NTY4MzU4NTQxNDQ3OTIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29weS1vYmoxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb3B5LW9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NDE0NDc5MiIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSmlxdmNEeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29weS1vYmoxLzE1NTY4MzU4NTQxNDQ3OTIvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvcHktb2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODU0MTQ0NzkyIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0ppcXZjRHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvcHktb2JqMS8xNTU2ODM1ODU0MTQ0NzkyL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb3B5LW9iajEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb3B5LW9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NDE0NDc5MiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSmlxdmNEeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29weS1vYmoxLzE1NTY4MzU4NTQxNDQ3OTIvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvcHktb2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODU0MTQ0NzkyIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0ppcXZjRHgvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJDVDZkVEE9PSIsImV0YWciOiJDSmlxdmNEeC9lRUNFQUU9In19" + } + }, + { + "ID": "9187dbb998c64d40", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/copy-obj1?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "31" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJjb250ZW50RW5jb2RpbmciOiJpZGVudGl0eSJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3299" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:14 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpyrF7U9KSc3XcJb-T_KGf3Fg2yhFUjJqTl06wpqqhG5IT6chgLCWKnLTuamfHtVq8XcP7acZy4TGyKIEvZ4L36TGTfSM8Zp_JyF8K4rN5p375VBMc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMTYiLCJvYmplY3RTaXplIjoiMTYiLCJkb25lIjp0cnVlLCJyZXNvdXJjZSI6eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb3B5LW9iajEvMTU1NjgzNTg1NDc0NzU0MyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMSIsIm5hbWUiOiJjb3B5LW9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NDc0NzU0MyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxNC43NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTQuNzQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjE0Ljc0N1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiU25hL1VXdjdtY1pJMjNvRTV0VWFiUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMT9nZW5lcmF0aW9uPTE1NTY4MzU4NTQ3NDc1NDMmYWx0PW1lZGlhIiwiY29udGVudEVuY29kaW5nIjoiaWRlbnRpdHkiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb3B5LW9iajEvMTU1NjgzNTg1NDc0NzU0My9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb3B5LW9iajEvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvcHktb2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODU0NzQ3NTQzIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNKZVA0c0R4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb3B5LW9iajEvMTU1NjgzNTg1NDc0NzU0My9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29weS1vYmoxL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29weS1vYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NTQ3NDc1NDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSmVQNHNEeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29weS1vYmoxLzE1NTY4MzU4NTQ3NDc1NDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvcHktb2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODU0NzQ3NTQzIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNKZVA0c0R4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb3B5LW9iajEvMTU1NjgzNTg1NDc0NzU0My91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29weS1vYmoxL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29weS1vYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NTQ3NDc1NDMiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSmVQNHNEeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IkNUNmRUQT09IiwiZXRhZyI6IkNKZVA0c0R4L2VFQ0VBRT0ifX0=" + } + }, + { + "ID": "c199412d75fadc45", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "193" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJhY2wiOlt7ImVudGl0eSI6ImRvbWFpbi1nb29nbGUuY29tIiwicm9sZSI6IlJFQURFUiJ9XSwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJjb250ZW50VHlwZSI6InRleHQvaHRtbCIsIm1ldGFkYXRhIjp7ImtleSI6InZhbHVlIn19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2046" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:15 GMT" + ], + "Etag": [ + "CLnS+bvx/eECEAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqKO5A_yihnTzTSNbXxyLm-I3MkyC58APsD1U6RZ5_xpjjjL3nXVZIyACkeiDKXRZyU_oP1Yt0FeX23ONp6JeNyoy0J0kW8GwGMgPeN1ATdWGzCMXs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxIiwibmFtZSI6Im9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9odG1sIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxNS4yMjBaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDQuNjQ2WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJTbmEvVVd2N21jWkkyM29FNXRVYWJRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMT9nZW5lcmF0aW9uPTE1NTY4MzU4NDQ2NDcyMjUmYWx0PW1lZGlhIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJtZXRhZGF0YSI6eyJrZXkiOiJ2YWx1ZSJ9LCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC9kb21haW4tZ29vZ2xlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6ImRvbWFpbi1nb29nbGUuY29tIiwicm9sZSI6IlJFQURFUiIsImRvbWFpbiI6Imdvb2dsZS5jb20iLCJldGFnIjoiQ0xuUytidngvZUVDRUFJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBST0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiQ1Q2ZFRBPT0iLCJldGFnIjoiQ0xuUytidngvZUVDRUFJPSJ9" + } + }, + { + "ID": "c845025158c6b7d0", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "120" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50TGFuZ3VhZ2UiOm51bGwsImNvbnRlbnRUeXBlIjpudWxsLCJtZXRhZGF0YSI6bnVsbH0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "1970" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:15 GMT" + ], + "Etag": [ + "CLnS+bvx/eECEAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UryOVdmGzCqBOS2ZZ0nw1SqbHF7qJ1CeTxgb6a1BJyoA42NTNZ8RdEVQGNF5u2hWdSQW79cBOonxjms_MoogyDJVAtKHl1rys87QHF6-750NNNSoEw" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxIiwibmFtZSI6Im9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNC42NDZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTUuNTI3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiU25hL1VXdjdtY1pJMjNvRTV0VWFiUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajE/Z2VuZXJhdGlvbj0xNTU2ODM1ODQ0NjQ3MjI1JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9kb21haW4tZ29vZ2xlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEvYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDTG5TK2J2eC9lRUNFQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0xuUytidngvZUVDRUFNPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJDVDZkVEE9PSIsImV0YWciOiJDTG5TK2J2eC9lRUNFQU09In0=" + } + }, + { + "ID": "811b0b9d4980b14f", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoiY2hlY2tzdW0tb2JqZWN0In0K", + "aGVsbG93b3JsZA==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3305" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:16 GMT" + ], + "Etag": [ + "CIGhrMHx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqJTh_P91pby-vQkRF12GexDihy7TZAlUPamDV_REldvXeqvbrWY_kB0Vcp1lYyuIY7EK2_eMBYYSRMMIVEYz5fxxt51-9Br_UmYvnuRgjhfC16AuA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jaGVja3N1bS1vYmplY3QvMTU1NjgzNTg1NTk2MjI0MSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NoZWNrc3VtLW9iamVjdCIsIm5hbWUiOiJjaGVja3N1bS1vYmplY3QiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NTk2MjI0MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxNS45NjFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTUuOTYxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjE1Ljk2MVoiLCJzaXplIjoiMTAiLCJtZDVIYXNoIjoiL0Y0RGpUaWxjRElJVkVIbi9uQVFzQT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NoZWNrc3VtLW9iamVjdD9nZW5lcmF0aW9uPTE1NTY4MzU4NTU5NjIyNDEmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY2hlY2tzdW0tb2JqZWN0LzE1NTY4MzU4NTU5NjIyNDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY2hlY2tzdW0tb2JqZWN0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjaGVja3N1bS1vYmplY3QiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NTk2MjI0MSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSUdock1IeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY2hlY2tzdW0tb2JqZWN0LzE1NTY4MzU4NTU5NjIyNDEvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NoZWNrc3VtLW9iamVjdC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNoZWNrc3VtLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODU1OTYyMjQxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0lHaHJNSHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NoZWNrc3VtLW9iamVjdC8xNTU2ODM1ODU1OTYyMjQxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jaGVja3N1bS1vYmplY3QvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjaGVja3N1bS1vYmplY3QiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NTk2MjI0MSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSUdock1IeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY2hlY2tzdW0tb2JqZWN0LzE1NTY4MzU4NTU5NjIyNDEvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NoZWNrc3VtLW9iamVjdC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNoZWNrc3VtLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODU1OTYyMjQxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0lHaHJNSHgvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJWc3UwZ0E9PSIsImV0YWciOiJDSUdock1IeC9lRUNFQUU9In0=" + } + }, + { + "ID": "69c77b8bdd7cc643", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoiemVyby1vYmplY3QifQo=", + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3240" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:16 GMT" + ], + "Etag": [ + "CLirz8Hx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur2iXVwACD0yFKYjt0WT-lW1Tx6PtpOgDYttPBWBnJZ3CPf4cUK7heI_9SwdzYXoieaRHDj9n3w3M_SExedwQqQ-GTOY9qM9DPmrPy11hEny0WjECc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS96ZXJvLW9iamVjdC8xNTU2ODM1ODU2NTM3MDE2Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vemVyby1vYmplY3QiLCJuYW1lIjoiemVyby1vYmplY3QiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NjUzNzAxNiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxNi41MzZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTYuNTM2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjE2LjUzNloiLCJzaXplIjoiMCIsIm1kNUhhc2giOiIxQjJNMlk4QXNnVHBnQW1ZN1BoQ2ZnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vemVyby1vYmplY3Q/Z2VuZXJhdGlvbj0xNTU2ODM1ODU2NTM3MDE2JmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3plcm8tb2JqZWN0LzE1NTY4MzU4NTY1MzcwMTYvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vemVyby1vYmplY3QvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Inplcm8tb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NTY1MzcwMTYiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0xpcno4SHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3plcm8tb2JqZWN0LzE1NTY4MzU4NTY1MzcwMTYvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3plcm8tb2JqZWN0L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiemVyby1vYmplY3QiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NjUzNzAxNiIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNMaXJ6OEh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS96ZXJvLW9iamVjdC8xNTU2ODM1ODU2NTM3MDE2L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby96ZXJvLW9iamVjdC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Inplcm8tb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NTY1MzcwMTYiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0xpcno4SHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3plcm8tb2JqZWN0LzE1NTY4MzU4NTY1MzcwMTYvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3plcm8tb2JqZWN0L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiemVyby1vYmplY3QiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NjUzNzAxNiIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMaXJ6OEh4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiQUFBQUFBPT0iLCJldGFnIjoiQ0xpcno4SHgvZUVDRUFFPSJ9" + } + }, + { + "ID": "1017883279ca634a", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/allUsers?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "98" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJhbGxVc2VycyIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "417" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:17 GMT" + ], + "Etag": [ + "CLnS+bvx/eECEAQ=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoKqYaaFUj6b4ezS4lnnQgYv4CHVSsqhVSlLP3QqpItNahh25KnLzbTccK_idrfjKV0gExzr17MvFmoRw7oUIWyJJINmpEXEw5EmHScxipsb3KbWZ8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L2FsbFVzZXJzIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvYWxsVXNlcnMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJhbGxVc2VycyIsInJvbGUiOiJSRUFERVIiLCJldGFnIjoiQ0xuUytidngvZUVDRUFRPSJ9" + } + }, + { + "ID": "35f04bf2400be96f", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "application/octet-stream" + ], + "Date": [ + "Thu, 02 May 2019 22:24:17 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:17 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "4" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqC9teasrZdeHJo8KLaQQ251d02ORMxXreH3TKcaQp4NWLVbpiAw1GpW9WeSdFgbpWtVDdyzfjE93gAs6uA0kdlDPZsIEfJVK3GFfaZE2aW5aFCn8Y" + ] + }, + "Body": "TuDshcL7vdCAXh8L42NvEQ==" + } + }, + { + "ID": "d01040504d2a91c4", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoib2JqMSJ9Cg==", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 401, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "30343" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:17 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "Www-Authenticate": [ + "Bearer realm=\"https://accounts.google.com/\"" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqRhLZOcm0K7eNByUzCBgYfgiytuZ6Hj06jPkbkK77LS80OPgO_78AL530-2AcrlLe1fIy0jM0tonLLncLuOszrqKcGgVhqEJuE48-67vqBRACjqmY" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.","locationType":"header","location":"Authorization","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=LOGIN_REQUIRED, category=USER_ERROR, cause=com.google.api.server.core.Fault: ImmutableErrorDefinition{base=LOGIN_REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=unauthorized, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.authenticated_user, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1., unnamedArguments=[]}, location=headers.Authorization, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1., reason=required, rpcCode=401} Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={WWW-Authenticate=[Bearer realm=\"https://accounts.google.com/\"]}, httpStatus=unauthorized, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.authenticated_user, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1., unnamedArguments=[]}, location=headers.Authorization, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1., reason=required, rpcCode=401} Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.auth.AuthenticatorInterceptor.addChallengeHeader(AuthenticatorInterceptor.java:269)\n\tat com.google.api.server.auth.AuthenticatorInterceptor.processErrorResponse(AuthenticatorInterceptor.java:236)\n\tat com.google.api.server.auth.GaiaMintInterceptor.processErrorResponse(GaiaMintInterceptor.java:768)\n\tat com.google.api.server.core.intercept.AroundInterceptorWrapper.processErrorResponse(AroundInterceptorWrapper.java:28)\n\tat com.google.api.server.stats.StatsBootstrap$InterceptorStatsRecorder.processErrorResponse(StatsBootstrap.java:315)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception.handleErrorResponse(Interceptions.java:202)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception.access$200(Interceptions.java:103)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception$1.call(Interceptions.java:144)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception$1.call(Interceptions.java:137)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.setException(AbstractFuture.java:753)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:68)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\nCaused by: com.google.api.server.core.Fault: ImmutableErrorDefinition{base=LOGIN_REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=unauthorized, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.authenticated_user, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1., unnamedArguments=[]}, location=headers.Authorization, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1., reason=required, rpcCode=401} Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\t... 20 more\n"}],"code":401,"message":"Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1."}}" + } + }, + { + "ID": "db327c92dac99584", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/copy-obj1?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:18 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uoa0LrEhOIeq-iYERbwsOkXjr0QR1ANe3tDsleUqoFNU7fRIruaqYikdnU1svzhc8iGRwx0A5dAGYQ_mM045LN_oxU1c76ugXtnWS2PW8wBcmst7hk" + ] + }, + "Body": "" + } + }, + { + "ID": "e70c028bd3dc9250", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/copy-obj1?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12275" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:18 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:18 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqrvFq18HewCbAkJcD7BmKgE8_WOFqSNkcwbpbNqCDPIy5jLVf2fIaHdg76_1rHuAHTtFM84Zr6euptOehRZOqA38Zc_BrJUSZKjg686imvazehpBc" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.name, message=No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1, unnamedArguments=[]}, location=entity.resource_id.name, message=No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1, reason=notFound, rpcCode=404} No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1"}}" + } + }, + { + "ID": "4d7120b41e583669", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/copy-obj1?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12215" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:18 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:18 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoJ1cGOQjoNiRk8qv-igyccVp3Vg_ut7NLSEO3A-yyQwgGnoXR5jPbi3tuomYWrS57GIvd6GpShkcAYSIJ2e94Jx_1SseYkYtlSF8a7qsDU0OVFYgM" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.name, message=No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1, unnamedArguments=[]}, location=entity.resource_id.name, message=No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1, reason=notFound, rpcCode=404} No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1"}}" + } + }, + { + "ID": "e7711da9b067a58f", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1/compose?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "156" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6Im9iajEifSx7Im5hbWUiOiJvYmoyIn0seyJuYW1lIjoib2JqL3dpdGgvc2xhc2hlcyJ9XX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "750" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:19 GMT" + ], + "Etag": [ + "CI2048Lx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoJ5Us6Rp03fUUqSAPFFGPuqW2qR4gCfzJ4w3W1Kx8C0JGfNDxte0QlBoVEi4K7rV5zkUY1IXQvIk6FOU3DxP8ECZdxkVJBW57Jf5iXo8kRyLVu7OA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb21wb3NlZDEvMTU1NjgzNTg1ODk2Mjk1NyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbXBvc2VkMSIsIm5hbWUiOiJjb21wb3NlZDEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1ODk2Mjk1NyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxOC45NjJaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTguOTYyWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjE4Ljk2MloiLCJzaXplIjoiNDgiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29tcG9zZWQxP2dlbmVyYXRpb249MTU1NjgzNTg1ODk2Mjk1NyZhbHQ9bWVkaWEiLCJjcmMzMmMiOiJBYldCeVE9PSIsImNvbXBvbmVudENvdW50IjozLCJldGFnIjoiQ0kyMDQ4THgvZUVDRUFFPSJ9" + } + }, + { + "ID": "de5d78de4310ff60", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/composed1", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "48" + ], + "Content-Type": [ + "application/octet-stream" + ], + "Date": [ + "Thu, 02 May 2019 22:24:19 GMT" + ], + "Etag": [ + "\"-CI2048Lx/eECEAE=\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:19 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:18 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Component-Count": [ + "3" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:18 GMT" + ], + "X-Goog-Generation": [ + "1556835858962957" + ], + "X-Goog-Hash": [ + "crc32c=AbWByQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "48" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqprnxhGEtAMeMcwiKn43QTEdsHrB7k_jCjOqnvH984bk5CIjIhSalrDVNwlLb37IU67jIYyIY1Nq8uaSz5HGUROWwpE6SVmGvIGXnuAbTP7x8Ez5w" + ] + }, + "Body": "TuDshcL7vdCAXh8L42NvEeeRmd+w7xhUEtz55BCr9yaRPt+QxetF2uGfY/b4R5Pm" + } + }, + { + "ID": "4b7c14a3f35089ce", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2/compose?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "182" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50VHlwZSI6InRleHQvanNvbiJ9LCJzb3VyY2VPYmplY3RzIjpbeyJuYW1lIjoib2JqMSJ9LHsibmFtZSI6Im9iajIifSx7Im5hbWUiOiJvYmovd2l0aC9zbGFzaGVzIn1dfQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "776" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:19 GMT" + ], + "Etag": [ + "CP+RiMPx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrMYc2wH1kAYUthDQQhZ4gGQU6kYXq6KGs1msu9ifujWYQ2dwS5ifqZFWiTYf74Rq2y9VkzALfhjkTK6anDL-SwHxUH3IwMe8j1rrrbaMbv7eLQCR8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb21wb3NlZDIvMTU1NjgzNTg1OTU2NDc5OSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbXBvc2VkMiIsIm5hbWUiOiJjb21wb3NlZDIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1OTU2NDc5OSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9qc29uIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjE5LjU2NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxOS41NjRaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTkuNTY0WiIsInNpemUiOiI0OCIsIm1lZGlhTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2Rvd25sb2FkL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb21wb3NlZDI/Z2VuZXJhdGlvbj0xNTU2ODM1ODU5NTY0Nzk5JmFsdD1tZWRpYSIsImNyYzMyYyI6IkFiV0J5UT09IiwiY29tcG9uZW50Q291bnQiOjMsImV0YWciOiJDUCtSaU1QeC9lRUNFQUU9In0=" + } + }, + { + "ID": "26a4cb091ed2f1bd", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/composed2", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "48" + ], + "Content-Type": [ + "text/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:19 GMT" + ], + "Etag": [ + "\"-CP+RiMPx/eECEAE=\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:19 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:19 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Component-Count": [ + "3" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:19 GMT" + ], + "X-Goog-Generation": [ + "1556835859564799" + ], + "X-Goog-Hash": [ + "crc32c=AbWByQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "48" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur-t-yqapXjBzjIrFWFhaT080f06JahbskCp0DC_-izUjPK4fYn556m2qyRDgj9HjkkDPtKR6dauzF5afjNZZ61bN_dTvw15d82A206-SdS1Jv_YM4" + ] + }, + "Body": "TuDshcL7vdCAXh8L42NvEeeRmd+w7xhUEtz55BCr9yaRPt+QxetF2uGfY/b4R5Pm" + } + }, + { + "ID": "6b59ed7d3d098970", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50RW5jb2RpbmciOiJnemlwIiwibmFtZSI6Imd6aXAtdGVzdCJ9Cg==", + "H4sIAAAAAAAA/2IgEgACAAD//7E97OkoAAAA" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3227" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:20 GMT" + ], + "Etag": [ + "CNuJssPx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoXNmswaWh2Gfcz1pYPy7etwk09Nz_dmuYVq55M2p3ox38A3mC0QgoHf-hL1uCZxQvusJSIHeugTQvmCgZvRdXV-3juD0W8KI669m0EILAHSfnVoJ8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9nemlwLXRlc3QvMTU1NjgzNTg2MDI1MTg2NyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2d6aXAtdGVzdCIsIm5hbWUiOiJnemlwLXRlc3QiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2MDI1MTg2NyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiYXBwbGljYXRpb24veC1nemlwIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjIwLjI1MVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyMC4yNTFaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MjAuMjUxWiIsInNpemUiOiIyNyIsIm1kNUhhc2giOiJPdEN3K2FSUklScUtHRkFFT2F4K3F3PT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vZ3ppcC10ZXN0P2dlbmVyYXRpb249MTU1NjgzNTg2MDI1MTg2NyZhbHQ9bWVkaWEiLCJjb250ZW50RW5jb2RpbmciOiJnemlwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvZ3ppcC10ZXN0LzE1NTY4MzU4NjAyNTE4NjcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vZ3ppcC10ZXN0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJnemlwLXRlc3QiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2MDI1MTg2NyIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTnVKc3NQeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvZ3ppcC10ZXN0LzE1NTY4MzU4NjAyNTE4NjcvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2d6aXAtdGVzdC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imd6aXAtdGVzdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODYwMjUxODY3IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ051SnNzUHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2d6aXAtdGVzdC8xNTU2ODM1ODYwMjUxODY3L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9nemlwLXRlc3QvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJnemlwLXRlc3QiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2MDI1MTg2NyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTnVKc3NQeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvZ3ppcC10ZXN0LzE1NTY4MzU4NjAyNTE4NjcvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2d6aXAtdGVzdC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imd6aXAtdGVzdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODYwMjUxODY3IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ051SnNzUHgvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiI5RGh3QkE9PSIsImV0YWciOiJDTnVKc3NQeC9lRUNFQUU9In0=" + } + }, + { + "ID": "070a9af78f7967bf", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/gzip-test", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "none" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Type": [ + "application/x-gzip" + ], + "Date": [ + "Thu, 02 May 2019 22:24:20 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:20 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:20 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Accept-Encoding" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:20 GMT" + ], + "X-Goog-Generation": [ + "1556835860251867" + ], + "X-Goog-Hash": [ + "crc32c=9DhwBA==", + "md5=OtCw+aRRIRqKGFAEOax+qw==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "gzip" + ], + "X-Goog-Stored-Content-Length": [ + "27" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UosPALnXk-3j5nBlDDWX7hBP4xBNcCe7lGNkEU-sHHTWsRexFWLoB-1gn8RfnlqS6Q5ZTrR86NBxpP1T0bFxgHnlYUYGh-G3mThRCgGIAWjXggOeOU" + ] + }, + "Body": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==" + } + }, + { + "ID": "3ece6665583d65fb", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj-not-exists", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "225" + ], + "Content-Type": [ + "application/xml; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:20 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:20 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uph0zmuYk7ZmOWzbNkj24Iai85wz2vKj0mMnMkIHIoDUlDAUCA0e_CgFT5fV_XAvgUx06I9FspomkB_ImflSVCvj55GK6zEoY5iV1hN96nh4AmPBN0" + ] + }, + "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+Tm9TdWNoS2V5PC9Db2RlPjxNZXNzYWdlPlRoZSBzcGVjaWZpZWQga2V5IGRvZXMgbm90IGV4aXN0LjwvTWVzc2FnZT48RGV0YWlscz5ObyBzdWNoIG9iamVjdDogZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iai1ub3QtZXhpc3RzPC9EZXRhaWxzPjwvRXJyb3I+" + } + }, + { + "ID": "602f4fdfff4e490c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoic2lnbmVkVVJMIn0K", + "VGhpcyBpcyBhIHRlc3Qgb2YgU2lnbmVkVVJMLgo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3230" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:21 GMT" + ], + "Etag": [ + "CNat6MPx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoWSi2VcVAHJJzhGSPjVT17kZYaCh3uZvWXBU0R4hVmPZJ10gvbv_Ucrfd2kwrBvRYoZaQcYkY5Q3M-oywiCNtLm6SM9InWHz2Omc-0pWJJjfQWPCM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zaWduZWRVUkwvMTU1NjgzNTg2MTE0MTIwNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NpZ25lZFVSTCIsIm5hbWUiOiJzaWduZWRVUkwiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2MTE0MTIwNiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyMS4xNDBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MjEuMTQwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjIxLjE0MFoiLCJzaXplIjoiMjkiLCJtZDVIYXNoIjoiSnl4dmd3bTluMk1zckdUTVBiTWVZQT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NpZ25lZFVSTD9nZW5lcmF0aW9uPTE1NTY4MzU4NjExNDEyMDYmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvc2lnbmVkVVJMLzE1NTY4MzU4NjExNDEyMDYvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vc2lnbmVkVVJML2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJzaWduZWRVUkwiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2MTE0MTIwNiIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTmF0Nk1QeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvc2lnbmVkVVJMLzE1NTY4MzU4NjExNDEyMDYvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NpZ25lZFVSTC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InNpZ25lZFVSTCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODYxMTQxMjA2IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ05hdDZNUHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3NpZ25lZFVSTC8xNTU2ODM1ODYxMTQxMjA2L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9zaWduZWRVUkwvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJzaWduZWRVUkwiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2MTE0MTIwNiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTmF0Nk1QeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvc2lnbmVkVVJMLzE1NTY4MzU4NjExNDEyMDYvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NpZ25lZFVSTC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InNpZ25lZFVSTCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODYxMTQxMjA2IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ05hdDZNUHgvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJaVHFBTHc9PSIsImV0YWciOiJDTmF0Nk1QeC9lRUNFQUU9In0=" + } + }, + { + "ID": "4a1c9dc802f4427c", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "119" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:23 GMT" + ], + "Etag": [ + "CAc=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrbFZENfdiDkKpD_H_zm44x7h5bTZ3VD6MEoiiXuMwxHHK-zCSSoPRA2ALNUXoR3EcS1c-huuyZBIiw7ULKii7BNqKz0RMu4lWRxrDRQBOjGwKDDgE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQWM9In0=" + } + }, + { + "ID": "e12cb7360f4fd8dd", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/defaultObjectAcl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "684" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:23 GMT" + ], + "Etag": [ + "CAc=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:23 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrVJI9g41JQEDEgaIAOXKSCe2B8xklFKSoIciM7uC0uod7t7NzXfxsIrI6mQeZGkWa2B12xdKpSJBndrNbBvygZ6aXxpBBvL8d80NJ42u8KQC7AaFE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBYz0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQWM9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBYz0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBYz0ifV19" + } + }, + { + "ID": "d25b1dc2d7247768", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiYWNsMSJ9Cg==", + "/pXmp0sD+azbBKjod9MhwQ==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3631" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:24 GMT" + ], + "Etag": [ + "COr+pcXx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UosBTMUgCzCYlGDKqRCN7Kexl832zyi4FxoRqfYtmTr8yv63z4BAQBeCTLajyZqMbRyzjje7TtPPYomUSv_8zrQ8bFdXlNzbTSeKK8kXi2Ys82jus4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wxLzE1NTY4MzU4NjQyNDgxNzAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hY2wxIiwibmFtZSI6ImFjbDEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDI0ODE3MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiYXBwbGljYXRpb24vb2N0ZXQtc3RyZWFtIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjI0LjI0N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyNC4yNDdaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MjQuMjQ3WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiIwRTl0Rk5wWmowL1dLT0o2ZlY5cGF3PT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMT9nZW5lcmF0aW9uPTE1NTY4MzU4NjQyNDgxNzAmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMS8xNTU2ODM1ODY0MjQ4MTcwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2FjbDEvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImFjbDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDI0ODE3MCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDT3IrcGNYeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMS8xNTU2ODM1ODY0MjQ4MTcwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hY2wxL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYWNsMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY0MjQ4MTcwIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ09yK3BjWHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbDEvMTU1NjgzNTg2NDI0ODE3MC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImFjbDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDI0ODE3MCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT3IrcGNYeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMS8xNTU2ODM1ODY0MjQ4MTcwL2RvbWFpbi1nb29nbGUuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMS9hY2wvZG9tYWluLWdvb2dsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJhY2wxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjQyNDgxNzAiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNPcitwY1h4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wxLzE1NTY4MzU4NjQyNDgxNzAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2FjbDEvYWNsL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJhY2wxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjQyNDgxNzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT3IrcGNYeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IkZpRG1WZz09IiwiZXRhZyI6IkNPcitwY1h4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "1d3df0d90130f5fe", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiYWNsMiJ9Cg==", + "HSHw5Wsm5u7iJL/jjKkPVQ==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3631" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:24 GMT" + ], + "Etag": [ + "CJrxw8Xx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpMl0_NB41LujALQWmul06CCiuw7okuKlhYG_qilsoXlld2qoYgc2-ABYogriA37QOJMK5ZlJ5sYzP2Mp7CTSzK9uccFLi10TdrGFMUjUpD0OvcXrE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wyLzE1NTY4MzU4NjQ3Mzc5NDYiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hY2wyIiwibmFtZSI6ImFjbDIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDczNzk0NiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiYXBwbGljYXRpb24vb2N0ZXQtc3RyZWFtIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjI0LjczN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyNC43MzdaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MjQuNzM3WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJjOStPL3JnMjRIVEZCYytldFdqZWZnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMj9nZW5lcmF0aW9uPTE1NTY4MzU4NjQ3Mzc5NDYmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMi8xNTU2ODM1ODY0NzM3OTQ2L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2FjbDIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImFjbDIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDczNzk0NiIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSnJ4dzhYeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMi8xNTU2ODM1ODY0NzM3OTQ2L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hY2wyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYWNsMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY0NzM3OTQ2IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0pyeHc4WHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbDIvMTU1NjgzNTg2NDczNzk0Ni9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImFjbDIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDczNzk0NiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSnJ4dzhYeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMi8xNTU2ODM1ODY0NzM3OTQ2L2RvbWFpbi1nb29nbGUuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMi9hY2wvZG9tYWluLWdvb2dsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJhY2wyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjQ3Mzc5NDYiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNKcnh3OFh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wyLzE1NTY4MzU4NjQ3Mzc5NDYvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2FjbDIvYWNsL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJhY2wyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjQ3Mzc5NDYiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSnJ4dzhYeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IkF0TlJ0QT09IiwiZXRhZyI6IkNKcnh3OFh4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "a270daf4f72d9d3d", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1/acl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2767" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:25 GMT" + ], + "Etag": [ + "COr+pcXx/eECEAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:25 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UranlihhkPcnPzTT2UYs4r6Hritk60lULu-pzO6YX-EWIlMlFiXzKmxaZVgOcPfnCgfJsRktKsL_TbFun1PfupraBREfYklCvRXKvJ_526gW-Jf9zs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMS8xNTU2ODM1ODY0MjQ4MTcwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2FjbDEvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImFjbDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDI0ODE3MCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDT3IrcGNYeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMS8xNTU2ODM1ODY0MjQ4MTcwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hY2wxL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYWNsMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY0MjQ4MTcwIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ09yK3BjWHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbDEvMTU1NjgzNTg2NDI0ODE3MC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImFjbDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDI0ODE3MCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT3IrcGNYeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMS8xNTU2ODM1ODY0MjQ4MTcwL2RvbWFpbi1nb29nbGUuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMS9hY2wvZG9tYWluLWdvb2dsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJhY2wxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjQyNDgxNzAiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNPcitwY1h4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wxLzE1NTY4MzU4NjQyNDgxNzAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2FjbDEvYWNsL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJhY2wxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjQyNDgxNzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT3IrcGNYeC9lRUNFQUU9In1dfQ==" + } + }, + { + "ID": "328125b2bc991e95", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1/acl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:25 GMT" + ], + "Etag": [ + "COr+pcXx/eECEAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UosP78wuW4e1mPLJgcCbCxBC4Je5LWjKugd0fVK51u-P3qeXbQXj4_Amo25ZHyut2LZvoipvmK95xEvebSyVyzVydKyU2VSbHLDkwnRpR-nr9UGIf0" + ] + }, + "Body": "" + } + }, + { + "ID": "1c7595376917851f", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:26 GMT" + ], + "Etag": [ + "CAg=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqnZXiLrdCFoctnkKUqdvXLdSp9xumFk20gsRJc4xB1CTZ14aLZYwbDJmi90E28Q2_E4klaJBOaB1OCcpbDBoF1N9E9Bk3VEaNYYrHKMoECuO7RMPs" + ] + }, + "Body": "" + } + }, + { + "ID": "4183198b151a3557", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/acl/user-jbd%40google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "109" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJ1c2VyLWpiZEBnb29nbGUuY29tIiwicm9sZSI6IlJFQURFUiJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "386" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:27 GMT" + ], + "Etag": [ + "CAk=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo361-YkcyyOvd5CL4q3LgTM1Bk4RdkPY3cn-qOm6Dn0vghTnmIE9VKkzGiOnjRNnD7SWXGMqpxaCGvCt6P44D55PqOwAtVTF8PNwmEK9HWCoVXABo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvdXNlci1qYmRAZ29vZ2xlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvdXNlci1qYmRAZ29vZ2xlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InVzZXItamJkQGdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZW1haWwiOiJqYmRAZ29vZ2xlLmNvbSIsImV0YWciOiJDQWs9In0=" + } + }, + { + "ID": "e98f53a71d8839c9", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/acl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "1789" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:28 GMT" + ], + "Etag": [ + "CAk=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:28 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrqACfvLjoBUrCN_6ksyMdPlv7yPWQGo1D9u1UiCCgFDRlEwr6NOp18BSxaeTuMIabf2ciNj7_Ba1W6YCz64HlcYyQOoEXNfGYGKcAqGTubXsGHPVk" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FrPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQWs9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQWs9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvdXNlci1qYmRAZ29vZ2xlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvdXNlci1qYmRAZ29vZ2xlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InVzZXItamJkQGdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZW1haWwiOiJqYmRAZ29vZ2xlLmNvbSIsImV0YWciOiJDQWs9In1dfQ==" + } + }, + { + "ID": "28b9a29086758e65", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/acl/user-jbd%40google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:28 GMT" + ], + "Etag": [ + "CAo=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UozsFdtI43Ltv91E18-CrdRZ4pQDVZXReJOVhpWDu3mxNA7rrWU7hNGl4kPMd0FgcVWDigZA5JBNsYbFV113Ew6Cp2uTsuk0uI1io_mhA6-Lmd2h18" + ] + }, + "Body": "" + } + }, + { + "ID": "8ef9f7b6caefd750", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiZ29waGVyIn0K", + "ZGF0YQ==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3196" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:29 GMT" + ], + "Etag": [ + "CIXH3cfx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo97hrh9l4pmrJQqaH8Qu3axsdbEPH2Y8GSEso8-ExKuf4ot8o2uS79ROJcgCtql4vLQJ_4L8q0Z7E9E2WYbbl6vKUYKdQiCI0POisS9o_ViNh9-Yk" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9nb3BoZXIvMTU1NjgzNTg2OTM1MjgzNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2dvcGhlciIsIm5hbWUiOiJnb3BoZXIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2OTM1MjgzNyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyOS4zNTJaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MjkuMzUyWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjI5LjM1MloiLCJzaXplIjoiNCIsIm1kNUhhc2giOiJqWGQvT0YwOS9zaUJYU0QzU1dBbTNBPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vZ29waGVyP2dlbmVyYXRpb249MTU1NjgzNTg2OTM1MjgzNyZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9nb3BoZXIvMTU1NjgzNTg2OTM1MjgzNy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9nb3BoZXIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImdvcGhlciIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY5MzUyODM3IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNJWEgzY2Z4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9nb3BoZXIvMTU1NjgzNTg2OTM1MjgzNy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vZ29waGVyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiZ29waGVyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjkzNTI4MzciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSVhIM2NmeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvZ29waGVyLzE1NTY4MzU4NjkzNTI4MzcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2dvcGhlci9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImdvcGhlciIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY5MzUyODM3IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNJWEgzY2Z4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9nb3BoZXIvMTU1NjgzNTg2OTM1MjgzNy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vZ29waGVyL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiZ29waGVyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjkzNTI4MzciLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSVhIM2NmeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6InJ0aDkwUT09IiwiZXRhZyI6IkNJWEgzY2Z4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "ae3a5920bc8a9c3c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoi0JPQvtGE0LXRgNC+0LLQuCJ9Cg==", + "ZGF0YQ==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3548" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:29 GMT" + ], + "Etag": [ + "CPzK+8fx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrPLZGQqU8j04JqxMmfsqas3HGTlrdx5NM99YR5nCedJYGMSaZ1ynvd2UbUcdwps_gfRo__0XapzHuD3hM_bshff65qlnw_i2cOwp97N2EhfUsZ1X4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS/Qk9C+0YTQtdGA0L7QstC4LzE1NTY4MzU4Njk4NDQ4NjAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby8lRDAlOTMlRDAlQkUlRDElODQlRDAlQjUlRDElODAlRDAlQkUlRDAlQjIlRDAlQjgiLCJuYW1lIjoi0JPQvtGE0LXRgNC+0LLQuCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY5ODQ0ODYwIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluOyBjaGFyc2V0PXV0Zi04IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjI5Ljg0NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyOS44NDRaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MjkuODQ0WiIsInNpemUiOiI0IiwibWQ1SGFzaCI6ImpYZC9PRjA5L3NpQlhTRDNTV0FtM0E9PSIsIm1lZGlhTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2Rvd25sb2FkL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby8lRDAlOTMlRDAlQkUlRDElODQlRDAlQjUlRDElODAlRDAlQkUlRDAlQjIlRDAlQjg/Z2VuZXJhdGlvbj0xNTU2ODM1ODY5ODQ0ODYwJmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL9CT0L7RhNC10YDQvtCy0LgvMTU1NjgzNTg2OTg0NDg2MC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby8lRDAlOTMlRDAlQkUlRDElODQlRDAlQjUlRDElODAlRDAlQkUlRDAlQjIlRDAlQjgvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ItCT0L7RhNC10YDQvtCy0LgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2OTg0NDg2MCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUHpLKzhmeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEv0JPQvtGE0LXRgNC+0LLQuC8xNTU2ODM1ODY5ODQ0ODYwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby8lRDAlOTMlRDAlQkUlRDElODQlRDAlQjUlRDElODAlRDAlQkUlRDAlQjIlRDAlQjgvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiLQk9C+0YTQtdGA0L7QstC4IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4Njk4NDQ4NjAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDUHpLKzhmeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEv0JPQvtGE0LXRgNC+0LLQuC8xNTU2ODM1ODY5ODQ0ODYwL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby8lRDAlOTMlRDAlQkUlRDElODQlRDAlQjUlRDElODAlRDAlQkUlRDAlQjIlRDAlQjgvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiLQk9C+0YTQtdGA0L7QstC4IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4Njk4NDQ4NjAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ1B6Sys4ZngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL9CT0L7RhNC10YDQvtCy0LgvMTU1NjgzNTg2OTg0NDg2MC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vJUQwJTkzJUQwJUJFJUQxJTg0JUQwJUI1JUQxJTgwJUQwJUJFJUQwJUIyJUQwJUI4L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoi0JPQvtGE0LXRgNC+0LLQuCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY5ODQ0ODYwIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1B6Sys4ZngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJydGg5MFE9PSIsImV0YWciOiJDUHpLKzhmeC9lRUNFQUU9In0=" + } + }, + { + "ID": "0104a746dbe20580", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiYSJ9Cg==", + "ZGF0YQ==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3116" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:30 GMT" + ], + "Etag": [ + "COKVlMjx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqY12miALkWDvx8PHEyL23oxWZlw8Jq7Cn6kS8E8_Tn1tTWWoTuS_pWb5pi9qevCIvqK6aTK56MzwrMuy9axIjpw5yyMZ42MaWK_YFkBr95OnK5IbM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hLzE1NTY4MzU4NzAyNDc2NTAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hIiwibmFtZSI6ImEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MDI0NzY1MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMC4yNDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzAuMjQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMwLjI0N1oiLCJzaXplIjoiNCIsIm1kNUhhc2giOiJqWGQvT0YwOS9zaUJYU0QzU1dBbTNBPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYT9nZW5lcmF0aW9uPTE1NTY4MzU4NzAyNDc2NTAmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYS8xNTU2ODM1ODcwMjQ3NjUwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2EvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MDI0NzY1MCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDT0tWbE1qeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYS8xNTU2ODM1ODcwMjQ3NjUwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODcwMjQ3NjUwIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ09LVmxNangvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2EvMTU1NjgzNTg3MDI0NzY1MC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MDI0NzY1MCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT0tWbE1qeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYS8xNTU2ODM1ODcwMjQ3NjUwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODcwMjQ3NjUwIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ09LVmxNangvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJydGg5MFE9PSIsImV0YWciOiJDT0tWbE1qeC9lRUNFQUU9In0=" + } + }, + { + "ID": "da19af085be0fb47", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYSJ9Cg==", + "ZGF0YQ==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "19484" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:30 GMT" + ], + "Etag": [ + "CLmmusjx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrAJr55dqTDkY_jBKB0vAWyE0owtzB_kEoH91_FKTr9C3bHd8fWUiTrsruL5mgwXg2l8gbPiOXad-A9HIJ3Zt_AeXq_p0m-Q8LrHQqJ6sAKsbzV4Zw" + ] + }, + "Body": "{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/1556835870872377","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","name":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835870872377","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:30.872Z","updated":"2019-05-02T22:24:30.872Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:30.872Z","size":"4","md5Hash":"jXd/OF09/siBXSD3SWAm3A==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?generation=1556835870872377&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/1556835870872377/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","generation":"1556835870872377","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLmmusjx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/1556835870872377/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","generation":"1556835870872377","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLmmusjx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/1556835870872377/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","generation":"1556835870872377","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLmmusjx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/1556835870872377/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","generation":"1556835870872377","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLmmusjx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"rth90Q==","etag":"CLmmusjx/eECEAE="}" + } + }, + { + "ID": "96f915707a76c50c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAifQo=", + "ZGF0YQ==" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "2948" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:31 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrQIwi81TFCNpDIZUjs_5tSUFqURLnaBF7g2l1sGMuHCTWZeLyr45VWkcc8fUDYnF-P84QixYQbTzJGuJzOOx7mw1qluYDqoORFGPICfvKmNxrZM3A" + ] + }, + "Body": "eyJlcnJvciI6eyJlcnJvcnMiOlt7ImRvbWFpbiI6Imdsb2JhbCIsInJlYXNvbiI6InJlcXVpcmVkIiwibWVzc2FnZSI6IlJlcXVpcmVkIiwiZGVidWdJbmZvIjoiY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUuRmF1bHQ6IEltbXV0YWJsZUVycm9yRGVmaW5pdGlvbntiYXNlPVJFUVVJUkVELCBjYXRlZ29yeT1VU0VSX0VSUk9SLCBjYXVzZT1udWxsLCBkZWJ1Z0luZm89bnVsbCwgZG9tYWluPWdsb2JhbCwgZXh0ZW5kZWRIZWxwPW51bGwsIGh0dHBIZWFkZXJzPXt9LCBodHRwU3RhdHVzPWJhZFJlcXVlc3QsIGludGVybmFsUmVhc29uPVJlYXNvbnthcmd1bWVudHM9e30sIGNhdXNlPW51bGwsIGNvZGU9Z2RhdGEuQ29yZUVycm9yRG9tYWluLlJFUVVJUkVELCBjcmVhdGVkQnlCYWNrZW5kPXRydWUsIGRlYnVnTWVzc2FnZT1udWxsLCBlcnJvclByb3RvQ29kZT1SRVFVSVJFRCwgZXJyb3JQcm90b0RvbWFpbj1nZGF0YS5Db3JlRXJyb3JEb21haW4sIGZpbHRlcmVkTWVzc2FnZT1udWxsLCBsb2NhdGlvbj1lbnRpdHkucmVzb3VyY2UuaWQubmFtZSwgbWVzc2FnZT1udWxsLCB1bm5hbWVkQXJndW1lbnRzPVtdfSwgbG9jYXRpb249ZW50aXR5LnJlc291cmNlLmlkLm5hbWUsIG1lc3NhZ2U9UmVxdWlyZWQsIHJlYXNvbj1yZXF1aXJlZCwgcnBjQ29kZT00MDB9IFJlcXVpcmVkXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLkVycm9yQ29sbGVjdG9yLnRvRmF1bHQoRXJyb3JDb2xsZWN0b3IuamF2YTo1NClcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnJlc3QuYWRhcHRlci5yb3N5LlJvc3lFcnJvckNvbnZlcnRlci50b0ZhdWx0KFJvc3lFcnJvckNvbnZlcnRlci5qYXZhOjY3KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUhhbmRsZXIkMi5jYWxsKFJvc3lIYW5kbGVyLmphdmE6MjU5KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUhhbmRsZXIkMi5jYWxsKFJvc3lIYW5kbGVyLmphdmE6MjM5KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuRGlyZWN0RXhlY3V0b3IuZXhlY3V0ZShEaXJlY3RFeGVjdXRvci5qYXZhOjMwKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuZXhlY3V0ZUxpc3RlbmVyKEFic3RyYWN0RnV0dXJlLmphdmE6MTE0Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmNvbXBsZXRlKEFic3RyYWN0RnV0dXJlLmphdmE6OTYzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuc2V0KEFic3RyYWN0RnV0dXJlLmphdmE6NzMxKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuRGlyZWN0RXhlY3V0b3IuZXhlY3V0ZShEaXJlY3RFeGVjdXRvci5qYXZhOjMwKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuZXhlY3V0ZUxpc3RlbmVyKEFic3RyYWN0RnV0dXJlLmphdmE6MTE0Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmNvbXBsZXRlKEFic3RyYWN0RnV0dXJlLmphdmE6OTYzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuc2V0KEFic3RyYWN0RnV0dXJlLmphdmE6NzMxKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIudGhyZWFkLlRocmVhZFRyYWNrZXJzJFRocmVhZFRyYWNraW5nUnVubmFibGUucnVuKFRocmVhZFRyYWNrZXJzLmphdmE6MTI2KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JFRyYWNlQ29udGV4dFJ1bm5hYmxlLnJ1bkluQ29udGV4dChUcmFjZUNvbnRleHQuamF2YTo0NTMpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5zZXJ2ZXIuQ29tbW9uTW9kdWxlJENvbnRleHRDYXJyeWluZ0V4ZWN1dG9yU2VydmljZSQxLnJ1bkluQ29udGV4dChDb21tb25Nb2R1bGUuamF2YTo4MDIpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUkMS5ydW4oVHJhY2VDb250ZXh0LmphdmE6NDYwKVxuXHRhdCBpby5ncnBjLkNvbnRleHQucnVuKENvbnRleHQuamF2YTo1NjUpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5DdXJyZW50Q29udGV4dC5ydW5JbkNvbnRleHQoQ3VycmVudENvbnRleHQuamF2YToyMDQpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkQWJzdHJhY3RUcmFjZUNvbnRleHRDYWxsYmFjay5ydW5JbkluaGVyaXRlZENvbnRleHROb1VucmVmKFRyYWNlQ29udGV4dC5qYXZhOjMxOSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRBYnN0cmFjdFRyYWNlQ29udGV4dENhbGxiYWNrLnJ1bkluSW5oZXJpdGVkQ29udGV4dChUcmFjZUNvbnRleHQuamF2YTozMTEpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUucnVuKFRyYWNlQ29udGV4dC5qYXZhOjQ1Nylcblx0YXQgY29tLmdvb2dsZS5nc2UuaW50ZXJuYWwuRGlzcGF0Y2hRdWV1ZUltcGwkV29ya2VyVGhyZWFkLnJ1bihEaXNwYXRjaFF1ZXVlSW1wbC5qYXZhOjQwMylcbiJ9XSwiY29kZSI6NDAwLCJtZXNzYWdlIjoiUmVxdWlyZWQifX0=" + } + }, + { + "ID": "7a562d6e65ef8a7a", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWEifQo=", + "ZGF0YQ==" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "4785" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:31 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ups-3Re4_9njFMmkYwND6BuK5cgxs5tKVRwQ2CGkcks-K0aALbethXTYaEb6fLos6w_FSnM8YkPGM3TETCAoyYpVbQPlotXR9CH3DXngjQNv0yv_ZQ" + ] + }, + "Body": "eyJlcnJvciI6eyJlcnJvcnMiOlt7ImRvbWFpbiI6Imdsb2JhbCIsInJlYXNvbiI6ImludmFsaWQiLCJtZXNzYWdlIjoiVGhlIG1heGltdW0gb2JqZWN0IGxlbmd0aCBpcyAxMDI0IGNoYXJhY3RlcnMsIGJ1dCBnb3QgYSBuYW1lIHdpdGggMTAyNSBjaGFyYWN0ZXJzOiAnJ2FhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhLi4uJyciLCJkZWJ1Z0luZm8iOiJjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS5GYXVsdDogSW1tdXRhYmxlRXJyb3JEZWZpbml0aW9ue2Jhc2U9SU5WQUxJRF9WQUxVRSwgY2F0ZWdvcnk9VVNFUl9FUlJPUiwgY2F1c2U9bnVsbCwgZGVidWdJbmZvPW51bGwsIGRvbWFpbj1nbG9iYWwsIGV4dGVuZGVkSGVscD1udWxsLCBodHRwSGVhZGVycz17fSwgaHR0cFN0YXR1cz1iYWRSZXF1ZXN0LCBpbnRlcm5hbFJlYXNvbj1SZWFzb257YXJndW1lbnRzPXt9LCBjYXVzZT1udWxsLCBjb2RlPWdkYXRhLkNvcmVFcnJvckRvbWFpbi5JTlZBTElEX1ZBTFVFLCBjcmVhdGVkQnlCYWNrZW5kPXRydWUsIGRlYnVnTWVzc2FnZT1udWxsLCBlcnJvclByb3RvQ29kZT1JTlZBTElEX1ZBTFVFLCBlcnJvclByb3RvRG9tYWluPWdkYXRhLkNvcmVFcnJvckRvbWFpbiwgZmlsdGVyZWRNZXNzYWdlPW51bGwsIGxvY2F0aW9uPWVudGl0eS5yZXNvdXJjZS5pZC5uYW1lLCBtZXNzYWdlPVRoZSBtYXhpbXVtIG9iamVjdCBsZW5ndGggaXMgMTAyNCBjaGFyYWN0ZXJzLCBidXQgZ290IGEgbmFtZSB3aXRoIDEwMjUgY2hhcmFjdGVyczogJydhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYS4uLicnLCB1bm5hbWVkQXJndW1lbnRzPVthYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYV19LCBsb2NhdGlvbj1lbnRpdHkucmVzb3VyY2UuaWQubmFtZSwgbWVzc2FnZT1UaGUgbWF4aW11bSBvYmplY3QgbGVuZ3RoIGlzIDEwMjQgY2hhcmFjdGVycywgYnV0IGdvdCBhIG5hbWUgd2l0aCAxMDI1IGNoYXJhY3RlcnM6ICcnYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWEuLi4nJywgcmVhc29uPWludmFsaWQsIHJwY0NvZGU9NDAwfSBUaGUgbWF4aW11bSBvYmplY3QgbGVuZ3RoIGlzIDEwMjQgY2hhcmFjdGVycywgYnV0IGdvdCBhIG5hbWUgd2l0aCAxMDI1IGNoYXJhY3RlcnM6ICcnYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWEuLi4nJ1xuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS5FcnJvckNvbGxlY3Rvci50b0ZhdWx0KEVycm9yQ29sbGVjdG9yLmphdmE6NTQpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5yZXN0LmFkYXB0ZXIucm9zeS5Sb3N5RXJyb3JDb252ZXJ0ZXIudG9GYXVsdChSb3N5RXJyb3JDb252ZXJ0ZXIuamF2YTo2Nylcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnJlc3QuYWRhcHRlci5yb3N5LlJvc3lIYW5kbGVyJDIuY2FsbChSb3N5SGFuZGxlci5qYXZhOjI1OSlcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnJlc3QuYWRhcHRlci5yb3N5LlJvc3lIYW5kbGVyJDIuY2FsbChSb3N5SGFuZGxlci5qYXZhOjIzOSlcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUudXRpbC5DYWxsYWJsZUZ1dHVyZS5ydW4oQ2FsbGFibGVGdXR1cmUuamF2YTo2Milcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkRpcmVjdEV4ZWN1dG9yLmV4ZWN1dGUoRGlyZWN0RXhlY3V0b3IuamF2YTozMClcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmV4ZWN1dGVMaXN0ZW5lcihBYnN0cmFjdEZ1dHVyZS5qYXZhOjExNDMpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5jb21wbGV0ZShBYnN0cmFjdEZ1dHVyZS5qYXZhOjk2Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLnNldChBYnN0cmFjdEZ1dHVyZS5qYXZhOjczMSlcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUudXRpbC5DYWxsYWJsZUZ1dHVyZS5ydW4oQ2FsbGFibGVGdXR1cmUuamF2YTo2Milcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkRpcmVjdEV4ZWN1dG9yLmV4ZWN1dGUoRGlyZWN0RXhlY3V0b3IuamF2YTozMClcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmV4ZWN1dGVMaXN0ZW5lcihBYnN0cmFjdEZ1dHVyZS5qYXZhOjExNDMpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5jb21wbGV0ZShBYnN0cmFjdEZ1dHVyZS5qYXZhOjk2Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLnNldChBYnN0cmFjdEZ1dHVyZS5qYXZhOjczMSlcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUudXRpbC5DYWxsYWJsZUZ1dHVyZS5ydW4oQ2FsbGFibGVGdXR1cmUuamF2YTo2Milcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnRocmVhZC5UaHJlYWRUcmFja2VycyRUaHJlYWRUcmFja2luZ1J1bm5hYmxlLnJ1bihUaHJlYWRUcmFja2Vycy5qYXZhOjEyNilcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRUcmFjZUNvbnRleHRSdW5uYWJsZS5ydW5JbkNvbnRleHQoVHJhY2VDb250ZXh0LmphdmE6NDUzKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuc2VydmVyLkNvbW1vbk1vZHVsZSRDb250ZXh0Q2FycnlpbmdFeGVjdXRvclNlcnZpY2UkMS5ydW5JbkNvbnRleHQoQ29tbW9uTW9kdWxlLmphdmE6ODAyKVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JFRyYWNlQ29udGV4dFJ1bm5hYmxlJDEucnVuKFRyYWNlQ29udGV4dC5qYXZhOjQ2MClcblx0YXQgaW8uZ3JwYy5Db250ZXh0LnJ1bihDb250ZXh0LmphdmE6NTY1KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuQ3VycmVudENvbnRleHQucnVuSW5Db250ZXh0KEN1cnJlbnRDb250ZXh0LmphdmE6MjA0KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JEFic3RyYWN0VHJhY2VDb250ZXh0Q2FsbGJhY2sucnVuSW5Jbmhlcml0ZWRDb250ZXh0Tm9VbnJlZihUcmFjZUNvbnRleHQuamF2YTozMTkpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkQWJzdHJhY3RUcmFjZUNvbnRleHRDYWxsYmFjay5ydW5JbkluaGVyaXRlZENvbnRleHQoVHJhY2VDb250ZXh0LmphdmE6MzExKVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JFRyYWNlQ29udGV4dFJ1bm5hYmxlLnJ1bihUcmFjZUNvbnRleHQuamF2YTo0NTcpXG5cdGF0IGNvbS5nb29nbGUuZ3NlLmludGVybmFsLkRpc3BhdGNoUXVldWVJbXBsJFdvcmtlclRocmVhZC5ydW4oRGlzcGF0Y2hRdWV1ZUltcGwuamF2YTo0MDMpXG4ifV0sImNvZGUiOjQwMCwibWVzc2FnZSI6IlRoZSBtYXhpbXVtIG9iamVjdCBsZW5ndGggaXMgMTAyNCBjaGFyYWN0ZXJzLCBidXQgZ290IGEgbmFtZSB3aXRoIDEwMjUgY2hhcmFjdGVyczogJydhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYS4uLicnIn19" + } + }, + { + "ID": "239b642e1919a84a", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoibmV3XG5saW5lcyJ9Cg==", + "ZGF0YQ==" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "3270" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:31 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Urk-XOPoshQkyIHYZp241R2W5lOuLwqPMx606rubzPOaGggM2z_sZO93dYCJHffXuPDOjy64lIxrBXIYW4QT04eM932jPtzPdVd-PFWssV9RYk83JE" + ] + }, + "Body": "eyJlcnJvciI6eyJlcnJvcnMiOlt7ImRvbWFpbiI6Imdsb2JhbCIsInJlYXNvbiI6ImludmFsaWQiLCJtZXNzYWdlIjoiRGlzYWxsb3dlZCB1bmljb2RlIGNoYXJhY3RlcnMgcHJlc2VudCBpbiBvYmplY3QgbmFtZSAnJ25ld1xubGluZXMnJyIsImRlYnVnSW5mbyI6ImNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLkZhdWx0OiBJbW11dGFibGVFcnJvckRlZmluaXRpb257YmFzZT1JTlZBTElEX1ZBTFVFLCBjYXRlZ29yeT1VU0VSX0VSUk9SLCBjYXVzZT1udWxsLCBkZWJ1Z0luZm89bnVsbCwgZG9tYWluPWdsb2JhbCwgZXh0ZW5kZWRIZWxwPW51bGwsIGh0dHBIZWFkZXJzPXt9LCBodHRwU3RhdHVzPWJhZFJlcXVlc3QsIGludGVybmFsUmVhc29uPVJlYXNvbnthcmd1bWVudHM9e30sIGNhdXNlPW51bGwsIGNvZGU9Z2RhdGEuQ29yZUVycm9yRG9tYWluLklOVkFMSURfVkFMVUUsIGNyZWF0ZWRCeUJhY2tlbmQ9dHJ1ZSwgZGVidWdNZXNzYWdlPW51bGwsIGVycm9yUHJvdG9Db2RlPUlOVkFMSURfVkFMVUUsIGVycm9yUHJvdG9Eb21haW49Z2RhdGEuQ29yZUVycm9yRG9tYWluLCBmaWx0ZXJlZE1lc3NhZ2U9bnVsbCwgbG9jYXRpb249ZW50aXR5LnJlc291cmNlLmlkLm5hbWUsIG1lc3NhZ2U9RGlzYWxsb3dlZCB1bmljb2RlIGNoYXJhY3RlcnMgcHJlc2VudCBpbiBvYmplY3QgbmFtZSAnJ25ld1xubGluZXMnJywgdW5uYW1lZEFyZ3VtZW50cz1bbmV3XG5saW5lc119LCBsb2NhdGlvbj1lbnRpdHkucmVzb3VyY2UuaWQubmFtZSwgbWVzc2FnZT1EaXNhbGxvd2VkIHVuaWNvZGUgY2hhcmFjdGVycyBwcmVzZW50IGluIG9iamVjdCBuYW1lICcnbmV3XG5saW5lcycnLCByZWFzb249aW52YWxpZCwgcnBjQ29kZT00MDB9IERpc2FsbG93ZWQgdW5pY29kZSBjaGFyYWN0ZXJzIHByZXNlbnQgaW4gb2JqZWN0IG5hbWUgJyduZXdcbmxpbmVzJydcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUuRXJyb3JDb2xsZWN0b3IudG9GYXVsdChFcnJvckNvbGxlY3Rvci5qYXZhOjU0KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUVycm9yQ29udmVydGVyLnRvRmF1bHQoUm9zeUVycm9yQ29udmVydGVyLmphdmE6NjcpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5yZXN0LmFkYXB0ZXIucm9zeS5Sb3N5SGFuZGxlciQyLmNhbGwoUm9zeUhhbmRsZXIuamF2YToyNTkpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5yZXN0LmFkYXB0ZXIucm9zeS5Sb3N5SGFuZGxlciQyLmNhbGwoUm9zeUhhbmRsZXIuamF2YToyMzkpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLnV0aWwuQ2FsbGFibGVGdXR1cmUucnVuKENhbGxhYmxlRnV0dXJlLmphdmE6NjIpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5EaXJlY3RFeGVjdXRvci5leGVjdXRlKERpcmVjdEV4ZWN1dG9yLmphdmE6MzApXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5leGVjdXRlTGlzdGVuZXIoQWJzdHJhY3RGdXR1cmUuamF2YToxMTQzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuY29tcGxldGUoQWJzdHJhY3RGdXR1cmUuamF2YTo5NjMpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5zZXQoQWJzdHJhY3RGdXR1cmUuamF2YTo3MzEpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLnV0aWwuQ2FsbGFibGVGdXR1cmUucnVuKENhbGxhYmxlRnV0dXJlLmphdmE6NjIpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5EaXJlY3RFeGVjdXRvci5leGVjdXRlKERpcmVjdEV4ZWN1dG9yLmphdmE6MzApXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5leGVjdXRlTGlzdGVuZXIoQWJzdHJhY3RGdXR1cmUuamF2YToxMTQzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuY29tcGxldGUoQWJzdHJhY3RGdXR1cmUuamF2YTo5NjMpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5zZXQoQWJzdHJhY3RGdXR1cmUuamF2YTo3MzEpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLnV0aWwuQ2FsbGFibGVGdXR1cmUucnVuKENhbGxhYmxlRnV0dXJlLmphdmE6NjIpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci50aHJlYWQuVGhyZWFkVHJhY2tlcnMkVGhyZWFkVHJhY2tpbmdSdW5uYWJsZS5ydW4oVGhyZWFkVHJhY2tlcnMuamF2YToxMjYpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUucnVuSW5Db250ZXh0KFRyYWNlQ29udGV4dC5qYXZhOjQ1Mylcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnNlcnZlci5Db21tb25Nb2R1bGUkQ29udGV4dENhcnJ5aW5nRXhlY3V0b3JTZXJ2aWNlJDEucnVuSW5Db250ZXh0KENvbW1vbk1vZHVsZS5qYXZhOjgwMilcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRUcmFjZUNvbnRleHRSdW5uYWJsZSQxLnJ1bihUcmFjZUNvbnRleHQuamF2YTo0NjApXG5cdGF0IGlvLmdycGMuQ29udGV4dC5ydW4oQ29udGV4dC5qYXZhOjU2NSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLkN1cnJlbnRDb250ZXh0LnJ1bkluQ29udGV4dChDdXJyZW50Q29udGV4dC5qYXZhOjIwNClcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRBYnN0cmFjdFRyYWNlQ29udGV4dENhbGxiYWNrLnJ1bkluSW5oZXJpdGVkQ29udGV4dE5vVW5yZWYoVHJhY2VDb250ZXh0LmphdmE6MzE5KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JEFic3RyYWN0VHJhY2VDb250ZXh0Q2FsbGJhY2sucnVuSW5Jbmhlcml0ZWRDb250ZXh0KFRyYWNlQ29udGV4dC5qYXZhOjMxMSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRUcmFjZUNvbnRleHRSdW5uYWJsZS5ydW4oVHJhY2VDb250ZXh0LmphdmE6NDU3KVxuXHRhdCBjb20uZ29vZ2xlLmdzZS5pbnRlcm5hbC5EaXNwYXRjaFF1ZXVlSW1wbCRXb3JrZXJUaHJlYWQucnVuKERpc3BhdGNoUXVldWVJbXBsLmphdmE6NDAzKVxuIn1dLCJjb2RlIjo0MDAsIm1lc3NhZ2UiOiJEaXNhbGxvd2VkIHVuaWNvZGUgY2hhcmFjdGVycyBwcmVzZW50IGluIG9iamVjdCBuYW1lICcnbmV3XG5saW5lcycnIn19" + } + }, + { + "ID": "3cd795cb0b675f98", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:31 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpOXjD9T_c_FRByyyA-CjE753kXZefEzIwQdrNG6OewoHOvKC3Bb9LleziQ6-ymVDg-F1S1eeQeFNJH7nJxDsdA_VmxOjeBzTDB_F3--_1NtwMF6Do" + ] + }, + "Body": "" + } + }, + { + "ID": "361d4541e2cb460a", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/a?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:31 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpomJFlpfOIJoFvdCmQgD2dRXSREzV1ToI2ntcHi8I9tSyzMCUk0ZrytllkwR_nU8WFZ0YuXVl0Kwsq1EhJL85RlBgCkBNc1P-yVMyNbdt5YrooKok" + ] + }, + "Body": "" + } + }, + { + "ID": "a4e929446e49411d", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/%D0%93%D0%BE%D1%84%D0%B5%D1%80%D0%BE%D0%B2%D0%B8?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:31 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqmdInqhuXhUxVjtfRvXvHd6Rmo1ODUbeNFFTBW4diuT7HDjzmYnvd4TU6oJPqbck52yFAR99fMx4Tks1TllSUCB4GsMVbBNENfZU6te-hXIB8XyJs" + ] + }, + "Body": "" + } + }, + { + "ID": "89b2ef62a071f7b9", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gopher?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:32 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrRgapcSgK1GOswXS37Vynb9BRj0go8bNPOBtrFZ3TDr-nzIdlvKBQuwHtXl4Tlb-mLl-A65zL5Iw3unCEuy6lwtWl5-V7REtcIVHkrWLK5AoLoHqA" + ] + }, + "Body": "" + } + }, + { + "ID": "ca91b2e4ee4555cb", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiY29udGVudCJ9Cg==", + "SXQgd2FzIHRoZSBiZXN0IG9mIHRpbWVzLCBpdCB3YXMgdGhlIHdvcnN0IG9mIHRpbWVzLg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3213" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:32 GMT" + ], + "Etag": [ + "CLGNmsnx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UovWW5HSrWbcvQoWGU3c_n64wwcau06tjEM-fzdADFSbmmylG3VLjae6MRNIM4B9gDitCKfjo1_xPQNAZEmMYzRplVmdI4cIrBC8ursFSUywoCSJeU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzI0NDIwMzMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MjQ0MjAzMyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMi40NDFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzIuNDQxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMyLjQ0MVoiLCJzaXplIjoiNTIiLCJtZDVIYXNoIjoiSzI4NUF3S1dXZlZSZEJjQ1VYaHpOZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQ/Z2VuZXJhdGlvbj0xNTU2ODM1ODcyNDQyMDMzJmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MjQ0MjAzMy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzI0NDIwMzMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0xHTm1zbngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MjQ0MjAzMy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MjQ0MjAzMyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNMR05tc254L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzI0NDIwMzMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzI0NDIwMzMiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0xHTm1zbngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MjQ0MjAzMy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MjQ0MjAzMyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMR05tc254L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiRmNYTThRPT0iLCJldGFnIjoiQ0xHTm1zbngvZUVDRUFFPSJ9" + } + }, + { + "ID": "eb678345630fe080", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3213" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:32 GMT" + ], + "Etag": [ + "CLGNmsnx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrtPOErXGrnlQtXCV1PhgO5nz80m75Fzs7DoR-y8Ava3jvL9NDJ4L-i8SIV_tktR8VqCvv9uh-X1ltJCj3isa_oY8TlV-OsZeIiESuDrRTYueQWQP0" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzI0NDIwMzMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MjQ0MjAzMyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMi40NDFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzIuNDQxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMyLjQ0MVoiLCJzaXplIjoiNTIiLCJtZDVIYXNoIjoiSzI4NUF3S1dXZlZSZEJjQ1VYaHpOZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQ/Z2VuZXJhdGlvbj0xNTU2ODM1ODcyNDQyMDMzJmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MjQ0MjAzMy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzI0NDIwMzMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0xHTm1zbngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MjQ0MjAzMy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MjQ0MjAzMyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNMR05tc254L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzI0NDIwMzMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzI0NDIwMzMiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0xHTm1zbngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MjQ0MjAzMy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MjQ0MjAzMyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMR05tc254L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiRmNYTThRPT0iLCJldGFnIjoiQ0xHTm1zbngvZUVDRUFFPSJ9" + } + }, + { + "ID": "d76ba152a62e7dc1", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiY29udGVudCJ9Cg==", + "PGh0bWw+PGhlYWQ+PHRpdGxlPk15IGZpcnN0IHBhZ2U8L3RpdGxlPjwvaGVhZD48L2h0bWw+" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3212" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:33 GMT" + ], + "Etag": [ + "CLPJv8nx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqk5XoKCyt74JBpeU8oREfzh3e0LLtXupBjcWtsFfHNZDEn4DjcmzWo6resyQ1gEisBp_STlU4hVaU3MbTyX3LM443_Gsu4ouZGdf3ZTvTzsLD_zNw" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzMwNTU5MjMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MzA1NTkyMyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9odG1sOyBjaGFyc2V0PXV0Zi04IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMzLjA1NVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMy4wNTVaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzMuMDU1WiIsInNpemUiOiI1NCIsIm1kNUhhc2giOiJOOHA4L3M5RndkQUFubHZyL2xFQWpRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudD9nZW5lcmF0aW9uPTE1NTY4MzU4NzMwNTU5MjMmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczMDU1OTIzL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MzA1NTkyMyIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTFBKdjhueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczMDU1OTIzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczMDU1OTIzIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0xQSnY4bngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MzA1NTkyMy9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MzA1NTkyMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTFBKdjhueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczMDU1OTIzL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczMDU1OTIzIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0xQSnY4bngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJHb1Vic1E9PSIsImV0YWciOiJDTFBKdjhueC9lRUNFQUU9In0=" + } + }, + { + "ID": "f785ed7f0490bb6d", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3212" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:33 GMT" + ], + "Etag": [ + "CLPJv8nx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrCqpaad9mhxk7VZgTc8xIunote5-AHPp3vYkUbSU8uiVcq65rcnrbtIuCqJ63JTuVJvKw7pFwnLjyn5fL37JzfXLhmLAowAV67_Qqtxxhiy4WQars" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzMwNTU5MjMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MzA1NTkyMyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9odG1sOyBjaGFyc2V0PXV0Zi04IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMzLjA1NVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMy4wNTVaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzMuMDU1WiIsInNpemUiOiI1NCIsIm1kNUhhc2giOiJOOHA4L3M5RndkQUFubHZyL2xFQWpRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudD9nZW5lcmF0aW9uPTE1NTY4MzU4NzMwNTU5MjMmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczMDU1OTIzL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MzA1NTkyMyIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTFBKdjhueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczMDU1OTIzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczMDU1OTIzIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0xQSnY4bngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MzA1NTkyMy9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MzA1NTkyMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTFBKdjhueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczMDU1OTIzL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczMDU1OTIzIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0xQSnY4bngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJHb1Vic1E9PSIsImV0YWciOiJDTFBKdjhueC9lRUNFQUU9In0=" + } + }, + { + "ID": "096d156b863922d6", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvaHRtbCIsIm5hbWUiOiJjb250ZW50In0K", + "PGh0bWw+PGhlYWQ+PHRpdGxlPk15IGZpcnN0IHBhZ2U8L3RpdGxlPjwvaGVhZD48L2h0bWw+" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3197" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:33 GMT" + ], + "Etag": [ + "CPW+6cnx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoivaSzjVdOm2KP4nXCF8VQDNHXmlNjm9rZwTaWzjvBmH3oK01QYTnHqZ5DhYRewu_1H0KoQgFpUIC-M4OKZoOhRHrq0-H8HLT__SqD1D5cmezrqHo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzM3NDI3MDkiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3Mzc0MjcwOSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9odG1sIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMzLjc0MloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMy43NDJaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzMuNzQyWiIsInNpemUiOiI1NCIsIm1kNUhhc2giOiJOOHA4L3M5RndkQUFubHZyL2xFQWpRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudD9nZW5lcmF0aW9uPTE1NTY4MzU4NzM3NDI3MDkmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczNzQyNzA5L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3Mzc0MjcwOSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUFcrNmNueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczNzQyNzA5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczNzQyNzA5IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ1BXKzZjbngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3Mzc0MjcwOS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3Mzc0MjcwOSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDUFcrNmNueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczNzQyNzA5L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczNzQyNzA5IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1BXKzZjbngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJHb1Vic1E9PSIsImV0YWciOiJDUFcrNmNueC9lRUNFQUU9In0=" + } + }, + { + "ID": "0e96d304e9558094", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3197" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:34 GMT" + ], + "Etag": [ + "CPW+6cnx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uoc-8JwtQprLtec5Ft6q3z4p0ooDaCCLmO9oVgrOXvBpoFhApztj8anfFwEcHdzpB6FBlUFqou0viF_HLHJvHolsxeCCoDOhqyIggOOlWjnSRg8XzY" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzM3NDI3MDkiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3Mzc0MjcwOSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9odG1sIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMzLjc0MloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMy43NDJaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzMuNzQyWiIsInNpemUiOiI1NCIsIm1kNUhhc2giOiJOOHA4L3M5RndkQUFubHZyL2xFQWpRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudD9nZW5lcmF0aW9uPTE1NTY4MzU4NzM3NDI3MDkmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczNzQyNzA5L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3Mzc0MjcwOSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUFcrNmNueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczNzQyNzA5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczNzQyNzA5IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ1BXKzZjbngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3Mzc0MjcwOS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3Mzc0MjcwOSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDUFcrNmNueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczNzQyNzA5L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczNzQyNzA5IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1BXKzZjbngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJHb1Vic1E9PSIsImV0YWciOiJDUFcrNmNueC9lRUNFQUU9In0=" + } + }, + { + "ID": "b4c8dedc10a69e07", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6ImltYWdlL2pwZWciLCJuYW1lIjoiY29udGVudCJ9Cg==", + "PGh0bWw+PGhlYWQ+PHRpdGxlPk15IGZpcnN0IHBhZ2U8L3RpdGxlPjwvaGVhZD48L2h0bWw+" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3198" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:34 GMT" + ], + "Etag": [ + "CM/Yjsrx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpC3UBVkNENjfRoaL2M1fPRh-iTDTwCsgF8O3TOX_iBuXcDxMjOoTmrIhD4cE5g1s7PObP66TZB1lecFypnLz8hL-aowsOxUMuL0KXJvrgoGxKHUFg" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzQzNTIyMDciLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NDM1MjIwNyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiaW1hZ2UvanBlZyIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNC4zNTFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzQuMzUxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM0LjM1MVoiLCJzaXplIjoiNTQiLCJtZDVIYXNoIjoiTjhwOC9zOUZ3ZEFBbmx2ci9sRUFqUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQ/Z2VuZXJhdGlvbj0xNTU2ODM1ODc0MzUyMjA3JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3NDM1MjIwNy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzQzNTIyMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ00vWWpzcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3NDM1MjIwNy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NDM1MjIwNyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNNL1lqc3J4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzQzNTIyMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzQzNTIyMDciLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ00vWWpzcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3NDM1MjIwNy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NDM1MjIwNyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNNL1lqc3J4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiR29VYnNRPT0iLCJldGFnIjoiQ00vWWpzcngvZUVDRUFFPSJ9" + } + }, + { + "ID": "c45640ec17230f64", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3198" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:34 GMT" + ], + "Etag": [ + "CM/Yjsrx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrvqLsOaMe2H6Xh3MyDCOuHmLzU1fn_Zh6kHLkpTpn4EWj3nFkeROu1g-cABD151cTh8YmY798iEku-Q3TnZIMZ5mexmHiCJKdUhAolbSd9b_apMK8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzQzNTIyMDciLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NDM1MjIwNyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiaW1hZ2UvanBlZyIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNC4zNTFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzQuMzUxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM0LjM1MVoiLCJzaXplIjoiNTQiLCJtZDVIYXNoIjoiTjhwOC9zOUZ3ZEFBbmx2ci9sRUFqUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQ/Z2VuZXJhdGlvbj0xNTU2ODM1ODc0MzUyMjA3JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3NDM1MjIwNy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzQzNTIyMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ00vWWpzcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3NDM1MjIwNy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NDM1MjIwNyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNNL1lqc3J4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzQzNTIyMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzQzNTIyMDciLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ00vWWpzcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3NDM1MjIwNy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NDM1MjIwNyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNNL1lqc3J4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiR29VYnNRPT0iLCJldGFnIjoiQ00vWWpzcngvZUVDRUFFPSJ9" + } + }, + { + "ID": "752ab630b062d61e", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoiY3VzdG9tZXItZW5jcnlwdGlvbiJ9Cg==", + "dG9wIHNlY3JldC4=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3482" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:35 GMT" + ], + "Etag": [ + "CPjnucrx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpaWw6jJMKNOoCNMXwJNVVsxqzPhGfMo-5g3krJ7A8_PSGTx4W2OakfmtDvzOpg1hexLJcsWunNHfyWVXsjBpM58dtQlM6VucQW4Uxndlb9RIp0UhM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uIiwibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNS4wNThaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzUuMDU4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM1LjA1OFoiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24/Z2VuZXJhdGlvbj0xNTU2ODM1ODc1MDU4NjgwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjdXN0b21lci1lbmNyeXB0aW9uIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzUwNTg2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNQam51Y3J4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24vYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjdXN0b21lci1lbmNyeXB0aW9uIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzUwNTg2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNQam51Y3J4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoicjBOR3JnPT0iLCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSIsImN1c3RvbWVyRW5jcnlwdGlvbiI6eyJlbmNyeXB0aW9uQWxnb3JpdGhtIjoiQUVTMjU2Iiwia2V5U2hhMjU2IjoiSCtMbW5YaFJvZUk2VE1XNWJzVjZIeVVrNnB5R2MySU1icVliQVhCY3BzMD0ifX0=" + } + }, + { + "ID": "3f4d8fd0e94ca6de", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3425" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:35 GMT" + ], + "Etag": [ + "CPjnucrx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoO-NbHUptMgzQPx9zApWGCeqMgfkk2mt6PJl1iWbq8ocUYq_0ud8gPuj9bcBTBOBdqewKzSouddziC-BFLoOdQJ6ElkN136q5y39ZRDN4zq6UWLUs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uIiwibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNS4wNThaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzUuMDU4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM1LjA1OFoiLCJzaXplIjoiMTEiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbj9nZW5lcmF0aW9uPTE1NTY4MzU4NzUwNTg2ODAmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUGpudWNyeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc1MDU4NjgwIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDUGpudWNyeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc1MDU4NjgwIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSIsImN1c3RvbWVyRW5jcnlwdGlvbiI6eyJlbmNyeXB0aW9uQWxnb3JpdGhtIjoiQUVTMjU2Iiwia2V5U2hhMjU2IjoiSCtMbW5YaFJvZUk2VE1XNWJzVjZIeVVrNnB5R2MySU1icVliQVhCY3BzMD0ifX0=" + } + }, + { + "ID": "0355971107342253", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3482" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:35 GMT" + ], + "Etag": [ + "CPjnucrx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UryZM2k28xNlFIDnXCH6SJ8RA2lDVNIx-5_eYRExB4-D1-dg_3fWvU3AREwoow4DvlYN4eyzA1AthWs5y0tYZEzvamBoZufkrXPmD64aT8kzAatns8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uIiwibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNS4wNThaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzUuMDU4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM1LjA1OFoiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24/Z2VuZXJhdGlvbj0xNTU2ODM1ODc1MDU4NjgwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjdXN0b21lci1lbmNyeXB0aW9uIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzUwNTg2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNQam51Y3J4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24vYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjdXN0b21lci1lbmNyeXB0aW9uIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzUwNTg2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNQam51Y3J4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoicjBOR3JnPT0iLCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSIsImN1c3RvbWVyRW5jcnlwdGlvbiI6eyJlbmNyeXB0aW9uQWxnb3JpdGhtIjoiQUVTMjU2Iiwia2V5U2hhMjU2IjoiSCtMbW5YaFJvZUk2VE1XNWJzVjZIeVVrNnB5R2MySU1icVliQVhCY3BzMD0ifX0=" + } + }, + { + "ID": "8f8c7cba5307c918", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "85" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3448" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:36 GMT" + ], + "Etag": [ + "CPjnucrx/eECEAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Upnljw7UWMuKFeJUJp_gu-lyfzDRVTwgInddze_9zF5waX4glKKpcRrGpe50cvS8dV_xbbKLQa-CqAMVxlEp7qIHsmTdUR1amjrq0w9U_qrn4gYwSw" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uIiwibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNS4wNThaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzUuOTIwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM1LjA1OFoiLCJzaXplIjoiMTEiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbj9nZW5lcmF0aW9uPTE1NTY4MzU4NzUwNTg2ODAmYWx0PW1lZGlhIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc1MDU4NjgwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNQam51Y3J4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24vYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjdXN0b21lci1lbmNyeXB0aW9uIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzUwNTg2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDUGpudWNyeC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc1MDU4NjgwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNQam51Y3J4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24vYWNsL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjdXN0b21lci1lbmNyeXB0aW9uIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzUwNTg2ODAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDUGpudWNyeC9lRUNFQUk9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImV0YWciOiJDUGpudWNyeC9lRUNFQUk9IiwiY3VzdG9tZXJFbmNyeXB0aW9uIjp7ImVuY3J5cHRpb25BbGdvcml0aG0iOiJBRVMyNTYiLCJrZXlTaGEyNTYiOiJIK0xtblhoUm9lSTZUTVc1YnNWNkh5VWs2cHlHYzJJTWJxWWJBWEJjcHMwPSJ9fQ==" + } + }, + { + "ID": "fc766ddbbfcd9d8a", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "85" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3505" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:36 GMT" + ], + "Etag": [ + "CPjnucrx/eECEAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqlTuEeQHkdcuHKu_aWtuYjopXaORKyG9TF597i1ybzIPgJHE_oiKH-xYwG_D2txhF_Sv6Jzfy44Dfga1bINCpPLseDEeD_Ez4rvxx8baTX1uJ7lJo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uIiwibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNS4wNThaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzYuMjI1WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM1LjA1OFoiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24/Z2VuZXJhdGlvbj0xNTU2ODM1ODc1MDU4NjgwJmFsdD1tZWRpYSIsImNvbnRlbnRMYW5ndWFnZSI6ImVuIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUGpudWNyeC9lRUNFQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc1MDU4NjgwIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDUGpudWNyeC9lRUNFQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc1MDU4NjgwIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1BqbnVjcngvZUVDRUFNPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJyME5Hcmc9PSIsImV0YWciOiJDUGpudWNyeC9lRUNFQU09IiwiY3VzdG9tZXJFbmNyeXB0aW9uIjp7ImVuY3J5cHRpb25BbGdvcml0aG0iOiJBRVMyNTYiLCJrZXlTaGEyNTYiOiJIK0xtblhoUm9lSTZUTVc1YnNWNkh5VWs2cHlHYzJJTWJxWWJBWEJjcHMwPSJ9fQ==" + } + }, + { + "ID": "fb1e502029272329", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "277" + ], + "Content-Type": [ + "application/xml; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:36 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:36 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Urm5pF1ghApp-RiVg_TAipd-LdTbF-hcDPcVRYbmj7KUIQxnsMUF5WOcrMA1t7ZltdvPZhyfsELISuH6DGJlUUedaCNH-B5j580GjBYLUKCwmAADeM" + ] + }, + "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+UmVzb3VyY2VJc0VuY3J5cHRlZFdpdGhDdXN0b21lckVuY3J5cHRpb25LZXk8L0NvZGU+PE1lc3NhZ2U+VGhlIHJlc291cmNlIGlzIGVuY3J5cHRlZCB3aXRoIGEgY3VzdG9tZXIgZW5jcnlwdGlvbiBrZXkuPC9NZXNzYWdlPjxEZXRhaWxzPlRoZSByZXF1ZXN0ZWQgb2JqZWN0IGlzIGVuY3J5cHRlZCBieSBhIGN1c3RvbWVyLXN1cHBsaWVkIGVuY3J5cHRpb24ga2V5LjwvRGV0YWlscz48L0Vycm9yPg==" + } + }, + { + "ID": "c151ba4fab0c6499", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Language": [ + "en" + ], + "Content-Length": [ + "11" + ], + "Content-Type": [ + "text/plain; charset=utf-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:36 GMT" + ], + "Etag": [ + "\"-CPjnucrx/eECEAM=\"" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:35 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:35 GMT" + ], + "X-Goog-Generation": [ + "1556835875058680" + ], + "X-Goog-Hash": [ + "crc32c=r0NGrg==", + "md5=xwWNFa0VdXPmlAwrlcAJcg==" + ], + "X-Goog-Metageneration": [ + "3" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "11" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur68mIGQ8VAtTRZs4F6ixZZYKcqe1oTqWHWqmp7gNF-81XKf2xX6eS4PcegbKx7bYnb4i6dzTCoLlbaJRNHwwLVzNbYMkGNy_bTHbWYTgtlBSKVtWs" + ] + }, + "Body": "dG9wIHNlY3JldC4=" + } + }, + { + "ID": "e09c6326cc60ad82", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12563" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:36 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqldLQvGKuu2fihIdwh7CvU7W0yE3vUXu8jqefTPSMhlGgvR1EfeYFqjk_Zxj7fEpv7AjugvHnn1DmkJxJWhrZyM8b5gS4uJID92D_018h2YHH1r30" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"resourceIsEncryptedWithCustomerEncryptionKey","message":"The target object is encrypted by a customer-supplied encryption key.","extendedHelp":"https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=INVALID_VALUE, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, domain=global, extendedHelp=https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, errorProtoCode=RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=null, message=The requested object is encrypted by a customer-supplied encryption key., unnamedArguments=[]}, location=null, message=The target object is encrypted by a customer-supplied encryption key., reason=resourceIsEncryptedWithCustomerEncryptionKey, rpcCode=400} The target object is encrypted by a customer-supplied encryption key.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"The target object is encrypted by a customer-supplied encryption key."}}" + } + }, + { + "ID": "6c83b36cc6fc482c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Copy-Source-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Copy-Source-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Copy-Source-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3527" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:37 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqGOUzu40Jq-_GztkQxoFN64MHcSoZjph7ivV8tqrp6ENxFIB0c82xqEdKKfcpqbJ2YXtXQgwflZgX-uiVOgGY_1c8SLoV7geQMfErTz1Q0NDM4YDM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMTEiLCJvYmplY3RTaXplIjoiMTEiLCJkb25lIjp0cnVlLCJyZXNvdXJjZSI6eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3NzEwODYxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMiIsIm5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NzEwODYxNCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNy4xMDhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzcuMTA4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM3LjEwOFoiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMj9nZW5lcmF0aW9uPTE1NTY4MzU4NzcxMDg2MTQmYWx0PW1lZGlhIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3NzEwODYxNC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uLTIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc3MTA4NjE0IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNJYjN0c3Z4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3NzEwODYxNC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzcxMDg2MTQiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSWIzdHN2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi0yLzE1NTY4MzU4NzcxMDg2MTQvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc3MTA4NjE0IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNJYjN0c3Z4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3NzEwODYxNC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzcxMDg2MTQiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSWIzdHN2eC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6InIwTkdyZz09IiwiZXRhZyI6IkNJYjN0c3Z4L2VFQ0VBRT0ifX0=" + } + }, + { + "ID": "e499f3b7077fb222", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption-2", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Language": [ + "en" + ], + "Content-Length": [ + "11" + ], + "Content-Type": [ + "text/plain; charset=utf-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:37 GMT" + ], + "Etag": [ + "\"c7058d15ad157573e6940c2b95c00972\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:37 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:37 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:37 GMT" + ], + "X-Goog-Generation": [ + "1556835877108614" + ], + "X-Goog-Hash": [ + "crc32c=r0NGrg==", + "md5=xwWNFa0VdXPmlAwrlcAJcg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "11" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpfEf1WUM8E5dSmOe1sOfgheagnlrcFUolvMMBiP6jDvPqoS7Sx0IwklM-DwnuAbBJPQ_EHhX-42njQu9vUjXqamH1U6np0Vmi0BngqFYOTc9MVP3M" + ] + }, + "Body": "dG9wIHNlY3JldC4=" + } + }, + { + "ID": "9e67dfa901c4e619", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "FnBvfQ1dDsyS8kHD+aB6HHIglDoQ5Im7WYDm3XYTGrQ=" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12563" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:37 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoYPZ-j39Sr1Whn6F3PYeVaxqIIjS_fBgSNkBO7f7B8RMWih6iCCLf1cPDXB3Br-0XBkdm6i9N9fGeK84_laVYWuJc0uJwSCXydk0bSWezw0qec5yE" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"resourceIsEncryptedWithCustomerEncryptionKey","message":"The target object is encrypted by a customer-supplied encryption key.","extendedHelp":"https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=INVALID_VALUE, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, domain=global, extendedHelp=https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, errorProtoCode=RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=null, message=The requested object is encrypted by a customer-supplied encryption key., unnamedArguments=[]}, location=null, message=The target object is encrypted by a customer-supplied encryption key., reason=resourceIsEncryptedWithCustomerEncryptionKey, rpcCode=400} The target object is encrypted by a customer-supplied encryption key.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"The target object is encrypted by a customer-supplied encryption key."}}" + } + }, + { + "ID": "670f74ac2a0b734e", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Copy-Source-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Copy-Source-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Copy-Source-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "FnBvfQ1dDsyS8kHD+aB6HHIglDoQ5Im7WYDm3XYTGrQ=" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3640" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:38 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoWwxnT2DHS1ymBhZJmnwxQiajAVYbOH62dyhX86syLNW_TlPnzYaM_7kACpN8ar5EeAHQ9fsMSHP4CqMcb6ZWg5KY3OkbPvfitdLalAA9MXxMFqoo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMTEiLCJvYmplY3RTaXplIjoiMTEiLCJkb25lIjp0cnVlLCJyZXNvdXJjZSI6eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODE4MzI1MSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMiIsIm5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3ODE4MzI1MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozOC4xODJaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzguMTgyWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM4LjE4MloiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMj9nZW5lcmF0aW9uPTE1NTY4MzU4NzgxODMyNTEmYWx0PW1lZGlhIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODE4MzI1MS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uLTIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc4MTgzMjUxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNOUEMrTXZ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODE4MzI1MS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzgxODMyNTEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTlBDK012eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi0yLzE1NTY4MzU4NzgxODMyNTEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc4MTgzMjUxIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNOUEMrTXZ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODE4MzI1MS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzgxODMyNTEiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTlBDK012eC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6InIwTkdyZz09IiwiZXRhZyI6IkNOUEMrTXZ4L2VFQ0VBRT0iLCJjdXN0b21lckVuY3J5cHRpb24iOnsiZW5jcnlwdGlvbkFsZ29yaXRobSI6IkFFUzI1NiIsImtleVNoYTI1NiI6IkZuQnZmUTFkRHN5UzhrSEQrYUI2SEhJZ2xEb1E1SW03V1lEbTNYWVRHclE9In19fQ==" + } + }, + { + "ID": "9c6e7ad2d8e2c68a", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption-2", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "277" + ], + "Content-Type": [ + "application/xml; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:38 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:38 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoSrQtDkljyQ-aUJoC8m6nY20wZiHLTwh4znEZ5hyKxLQUrZIi7PnU416twfTwfPXWvd4cPGZC3NVdpJWg332KnUKSEdFPY2BQwLJ-_9S5eVdwMrf8" + ] + }, + "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+UmVzb3VyY2VJc0VuY3J5cHRlZFdpdGhDdXN0b21lckVuY3J5cHRpb25LZXk8L0NvZGU+PE1lc3NhZ2U+VGhlIHJlc291cmNlIGlzIGVuY3J5cHRlZCB3aXRoIGEgY3VzdG9tZXIgZW5jcnlwdGlvbiBrZXkuPC9NZXNzYWdlPjxEZXRhaWxzPlRoZSByZXF1ZXN0ZWQgb2JqZWN0IGlzIGVuY3J5cHRlZCBieSBhIGN1c3RvbWVyLXN1cHBsaWVkIGVuY3J5cHRpb24ga2V5LjwvRGV0YWlscz48L0Vycm9yPg==" + } + }, + { + "ID": "b5fbec9f26148f95", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption-2", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "FnBvfQ1dDsyS8kHD+aB6HHIglDoQ5Im7WYDm3XYTGrQ=" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Language": [ + "en" + ], + "Content-Length": [ + "11" + ], + "Content-Type": [ + "text/plain; charset=utf-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:38 GMT" + ], + "Etag": [ + "\"-CNPC+Mvx/eECEAE=\"" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:38 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key-Sha256": [ + "FnBvfQ1dDsyS8kHD+aB6HHIglDoQ5Im7WYDm3XYTGrQ=" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:38 GMT" + ], + "X-Goog-Generation": [ + "1556835878183251" + ], + "X-Goog-Hash": [ + "crc32c=r0NGrg==", + "md5=xwWNFa0VdXPmlAwrlcAJcg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "11" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpTr8Qws5nL-el_ied6M7FYtryiLpFfhGNN-TxakIl1FCAOBJwMeq6_KBQ0l4UmT8VePQQ9h_S8r55NolBB1Ex12-iKvooCkTsNmvQ4rE4hOqQGbLc" + ] + }, + "Body": "dG9wIHNlY3JldC4=" + } + }, + { + "ID": "616de25561ebbffa", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Copy-Source-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Copy-Source-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Copy-Source-Encryption-Key-Sha256": [ + "FnBvfQ1dDsyS8kHD+aB6HHIglDoQ5Im7WYDm3XYTGrQ=" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3640" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:39 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqyD8XYw6tgOBbcxdUIVAw_vXrrryw0bgh-L-jRW0hyJt0lCHU9K26isn6tykgml7UgX52dgw65xD_ht4yxbHkY_VFl6MkEY6H-mWRx0_52xj49RW8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMTEiLCJvYmplY3RTaXplIjoiMTEiLCJkb25lIjp0cnVlLCJyZXNvdXJjZSI6eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODk5ODUxMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMiIsIm5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3ODk5ODUxMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozOC45OThaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzguOTk4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM4Ljk5OFoiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMj9nZW5lcmF0aW9uPTE1NTY4MzU4Nzg5OTg1MTEmYWx0PW1lZGlhIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODk5ODUxMS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uLTIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc4OTk4NTExIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPK2pxc3p4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODk5ODUxMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4Nzg5OTg1MTEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTytqcXN6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi0yLzE1NTY4MzU4Nzg5OTg1MTEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc4OTk4NTExIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPK2pxc3p4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODk5ODUxMS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4Nzg5OTg1MTEiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTytqcXN6eC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6InIwTkdyZz09IiwiZXRhZyI6IkNPK2pxc3p4L2VFQ0VBRT0iLCJjdXN0b21lckVuY3J5cHRpb24iOnsiZW5jcnlwdGlvbkFsZ29yaXRobSI6IkFFUzI1NiIsImtleVNoYTI1NiI6IkgrTG1uWGhSb2VJNlRNVzVic1Y2SHlVazZweUdjMklNYnFZYkFYQmNwczA9In19fQ==" + } + }, + { + "ID": "2ddf1402c6efc3a8", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/compose?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "160" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24ifSx7Im5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTIifV19Cg==" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "13334" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:39 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrFitgTFIY6yt8kk3zprPGtWncWc7Ad6oUFVXVdE0O3QqK0qlYGB7RSw-dwN1AvMk4Sndi8gR22cF8qs88ZlmdAxB-1zYdUNqwArmpP2xni66xX-L4" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"resourceIsEncryptedWithCustomerEncryptionKey","message":"The target object is encrypted by a customer-supplied encryption key.","extendedHelp":"https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=INVALID_VALUE, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=null, message=Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key., unnamedArguments=[]}, location=null, message=The target object is encrypted by a customer-supplied encryption key., reason=resourceIsEncryptedWithCustomerEncryptionKey, rpcCode=400} The target object is encrypted by a customer-supplied encryption key.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"The target object is encrypted by a customer-supplied encryption key."}}" + } + }, + { + "ID": "eea75f5f6c3c7e89", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/compose?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "160" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24ifSx7Im5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTIifV19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "911" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:39 GMT" + ], + "Etag": [ + "CPDp3szx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uotyvz71ZfU1J_hl3NsDz9ldw92nWlIp9muxpBUsdv0nTyfJEqz-yQFyEwE2UM11wv7tkpLQfffGlKhlx7CVK0S1UYJnS2XQu2P0Uiz5bbamHxE520" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTMvMTU1NjgzNTg3OTg1OTQ0MCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMyIsIm5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3OTg1OTQ0MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozOS44NTlaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzkuODU5WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM5Ljg1OVoiLCJzaXplIjoiMjIiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0zP2dlbmVyYXRpb249MTU1NjgzNTg3OTg1OTQ0MCZhbHQ9bWVkaWEiLCJjcmMzMmMiOiI1ajF5cGc9PSIsImNvbXBvbmVudENvdW50IjoyLCJldGFnIjoiQ1BEcDNzengvZUVDRUFFPSIsImN1c3RvbWVyRW5jcnlwdGlvbiI6eyJlbmNyeXB0aW9uQWxnb3JpdGhtIjoiQUVTMjU2Iiwia2V5U2hhMjU2IjoiSCtMbW5YaFJvZUk2VE1XNWJzVjZIeVVrNnB5R2MySU1icVliQVhCY3BzMD0ifX0=" + } + }, + { + "ID": "b19fb82ecf450b51", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption-3", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "277" + ], + "Content-Type": [ + "application/xml; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:40 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:40 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Urcj3TJ6B5h4Q0DT7G_ioI_Icu9iv65m57OlRH6h9mKD9zZZl320H3QD6A7fNd1UPBSGGDZ9I0TG4_lSFa8JgkJQEVRGVRvVQv36b7ArCuGmg0loV0" + ] + }, + "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+UmVzb3VyY2VJc0VuY3J5cHRlZFdpdGhDdXN0b21lckVuY3J5cHRpb25LZXk8L0NvZGU+PE1lc3NhZ2U+VGhlIHJlc291cmNlIGlzIGVuY3J5cHRlZCB3aXRoIGEgY3VzdG9tZXIgZW5jcnlwdGlvbiBrZXkuPC9NZXNzYWdlPjxEZXRhaWxzPlRoZSByZXF1ZXN0ZWQgb2JqZWN0IGlzIGVuY3J5cHRlZCBieSBhIGN1c3RvbWVyLXN1cHBsaWVkIGVuY3J5cHRpb24ga2V5LjwvRGV0YWlscz48L0Vycm9yPg==" + } + }, + { + "ID": "1f0a12bd00a88965", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption-3", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "22" + ], + "Content-Type": [ + "application/octet-stream" + ], + "Date": [ + "Thu, 02 May 2019 22:24:40 GMT" + ], + "Etag": [ + "\"-CPDp3szx/eECEAE=\"" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:39 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Component-Count": [ + "2" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:39 GMT" + ], + "X-Goog-Generation": [ + "1556835879859440" + ], + "X-Goog-Hash": [ + "crc32c=5j1ypg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "22" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur7DoEkjdrkGGrYpsCDbd2Y6c_Mu8sEA7cN_k06l4WjY25UJsijDFWSlSIA9SCi-ouLuI3if4Rh33a5n5vKcN3oSYjzK92eSPVCczEpQdILKFWyQh4" + ] + }, + "Body": "dG9wIHNlY3JldC50b3Agc2VjcmV0Lg==" + } + }, + { + "ID": "f649a81672a92823", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Copy-Source-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Copy-Source-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Copy-Source-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3527" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:40 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrfhqrYGhW8ew5dxbfg_DcMhaJh6k2oY_JqMwPRSDtS3Ef5kljeo8oTONrxRIAP8I0ScqxFCy7okNejkqOeO4Vr1TBZ2mc4MDjwiJA52ERSgZMUyvM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMTEiLCJvYmplY3RTaXplIjoiMTEiLCJkb25lIjp0cnVlLCJyZXNvdXJjZSI6eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg4MDcxNzUwNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMiIsIm5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MDcxNzUwNiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0MC43MTdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDAuNzE3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQwLjcxN1oiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMj9nZW5lcmF0aW9uPTE1NTY4MzU4ODA3MTc1MDYmYWx0PW1lZGlhIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg4MDcxNzUwNi9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uLTIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODgwNzE3NTA2IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNNS1prODN4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg4MDcxNzUwNi9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODA3MTc1MDYiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTUtaazgzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi0yLzE1NTY4MzU4ODA3MTc1MDYvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODgwNzE3NTA2IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNNS1prODN4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg4MDcxNzUwNi91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODA3MTc1MDYiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTUtaazgzeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6InIwTkdyZz09IiwiZXRhZyI6IkNNS1prODN4L2VFQ0VBRT0ifX0=" + } + }, + { + "ID": "61763fd57e906039", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/compose?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "129" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiJ9XX0K" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "13444" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:41 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur7TRBQyfrSgWnsPL0CVfQCd5s_QJN9pRUHB1YttIoObh1YkzCEtuRxrwyKYtccpyodUCMmTVRyD6d0oDy-c2_Q7GK-_6OC5h_EF_1e9-t6E8VZCQ8" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"resourceNotEncryptedWithCustomerEncryptionKey","message":"The target object is not encrypted by a customer-supplied encryption key.","extendedHelp":"https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_NOT_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=INVALID_VALUE, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_NOT_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.RESOURCE_NOT_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_NOT_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=RESOURCE_NOT_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.encryptionKey, message=Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key., unnamedArguments=[]}, location=entity.encryptionKey, message=The target object is not encrypted by a customer-supplied encryption key., reason=resourceNotEncryptedWithCustomerEncryptionKey, rpcCode=400} The target object is not encrypted by a customer-supplied encryption key.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_NOT_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"The target object is not encrypted by a customer-supplied encryption key."}}" + } + }, + { + "ID": "b45cb452c78d7b50", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2571" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:41 GMT" + ], + "Etag": [ + "CAo=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:41 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq2YnotZtU1JMnJw10vtgVDeukiWK_4DXx0QFWA91CaCYLPXLDKzCzY8xqb6EGkVxvw731As27REu_hYOqZTwSfJJ6SeHemliLV9JgQYjfngxL0HFA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyOC44ODJaIiwibWV0YWdlbmVyYXRpb24iOiIxMCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBbz0ifSx7ImtpbmQiOiJzdG9yYWdlI2J1Y2tldEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0FvPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0FvPSJ9XSwiZGVmYXVsdE9iamVjdEFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDQW89In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0FvPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQW89In1dLCJpYW1Db25maWd1cmF0aW9uIjp7ImJ1Y2tldFBvbGljeU9ubHkiOnsiZW5hYmxlZCI6ZmFsc2V9fSwib3duZXIiOnsiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0In0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOmZhbHNlfSwibGlmZWN5Y2xlIjp7InJ1bGUiOlt7ImFjdGlvbiI6eyJ0eXBlIjoiRGVsZXRlIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjMwfX1dfSwibGFiZWxzIjp7Im5ldyI6Im5ldyIsImwxIjoidjIifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FvPSJ9" + } + }, + { + "ID": "6831287fd0edbcd4", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoicG9zYyJ9Cg==", + "Zm9v" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3128" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:42 GMT" + ], + "Etag": [ + "CJek2c3x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrXGhYaqbrodjXeCKtZGAt0hM0GrM8GBcZBkGkLJ7550-Iqaxp681SkhDDDOp0V3-xMmoq3rjWOC8A4XvJfAzgxGsO2VekCUJgUBMalL8hEAur49q8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9wb3NjLzE1NTY4MzU4ODE4NjU3NTEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjIiwibmFtZSI6InBvc2MiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MTg2NTc1MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0MS44NjVaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDEuODY1WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQxLjg2NVoiLCJzaXplIjoiMyIsIm1kNUhhc2giOiJyTDBZMjB6QytGenQ3MlZQek1TazJBPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYz9nZW5lcmF0aW9uPTE1NTY4MzU4ODE4NjU3NTEmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgxODY1NzUxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3Bvc2MvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MTg2NTc1MSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSmVrMmMzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgxODY1NzUxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoicG9zYyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODgxODY1NzUxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0plazJjM3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MvMTU1NjgzNTg4MTg2NTc1MS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MTg2NTc1MSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSmVrMmMzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgxODY1NzUxL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoicG9zYyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODgxODY1NzUxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0plazJjM3gvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJ6OFN1SFE9PSIsImV0YWciOiJDSmVrMmMzeC9lRUNFQUU9In0=" + } + }, + { + "ID": "7588cdc84e3976f4", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3128" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:42 GMT" + ], + "Etag": [ + "CJek2c3x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpGBHQWnSHLUYlW5nck2w_3QN4x5uYmOM-GIatNzD1tqhzK0JOf7rBe1BKILpdyEUPlSmrLwrukcqJwHgfezE50OQL7ha9n6_fc6qUMoVwkEGawOgA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9wb3NjLzE1NTY4MzU4ODE4NjU3NTEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjIiwibmFtZSI6InBvc2MiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MTg2NTc1MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0MS44NjVaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDEuODY1WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQxLjg2NVoiLCJzaXplIjoiMyIsIm1kNUhhc2giOiJyTDBZMjB6QytGenQ3MlZQek1TazJBPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYz9nZW5lcmF0aW9uPTE1NTY4MzU4ODE4NjU3NTEmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgxODY1NzUxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3Bvc2MvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MTg2NTc1MSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSmVrMmMzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgxODY1NzUxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoicG9zYyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODgxODY1NzUxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0plazJjM3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MvMTU1NjgzNTg4MTg2NTc1MS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MTg2NTc1MSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSmVrMmMzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgxODY1NzUxL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoicG9zYyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODgxODY1NzUxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0plazJjM3gvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJ6OFN1SFE9PSIsImV0YWciOiJDSmVrMmMzeC9lRUNFQUU9In0=" + } + }, + { + "ID": "974ed1b42608e806", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/posc?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "34" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJzdG9yYWdlQ2xhc3MiOiJNVUxUSV9SRUdJT05BTCJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:42 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrKCtuc_GsAj6sf3zXpqT2_KZrdL0SEwsunv07k0DaaEHMj8gkX4kRCcm5r8AUrD9RNYjoKeceiCyY4pDN8nrHljshmEIXF2P29Oyd0KSuqDjsNeps" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMyIsIm9iamVjdFNpemUiOiIzIiwiZG9uZSI6dHJ1ZSwicmVzb3VyY2UiOnsia2luZCI6InN0b3JhZ2Ujb2JqZWN0IiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgyNzYwNjA3Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYyIsIm5hbWUiOiJwb3NjIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODI3NjA2MDciLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDIuNzYwWiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQyLjc2MFoiLCJzdG9yYWdlQ2xhc3MiOiJNVUxUSV9SRUdJT05BTCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0Mi43NjBaIiwic2l6ZSI6IjMiLCJtZDVIYXNoIjoickwwWTIwekMrRnp0NzJWUHpNU2syQT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3Bvc2M/Z2VuZXJhdGlvbj0xNTU2ODM1ODgyNzYwNjA3JmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MvMTU1NjgzNTg4Mjc2MDYwNy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJwb3NjIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODI3NjA2MDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0ovemo4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MvMTU1NjgzNTg4Mjc2MDYwNy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYy9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4Mjc2MDYwNyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKL3pqODd4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9wb3NjLzE1NTY4MzU4ODI3NjA2MDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3Bvc2MvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJwb3NjIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODI3NjA2MDciLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0ovemo4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MvMTU1NjgzNTg4Mjc2MDYwNy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYy9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4Mjc2MDYwNyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKL3pqODd4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiejhTdUhRPT0iLCJldGFnIjoiQ0ovemo4N3gvZUVDRUFFPSJ9fQ==" + } + }, + { + "ID": "a2be30bf1e3cb539", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoicG9zYzIiLCJzdG9yYWdlQ2xhc3MiOiJNVUxUSV9SRUdJT05BTCJ9Cg==", + "eHh4" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3150" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:43 GMT" + ], + "Etag": [ + "CILrp87x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqziYHpTkcbyMFRtHko_e04Rze8FHLbALw476U2bB9k_SqPwILBzxKRLzFvRN4q3RLt4xR1mpR2lgJ1Zq0BNYCefLOZsOeq4JbzJYLxGL5V799xkQ8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9wb3NjMi8xNTU2ODM1ODgzMTUyNzcwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYzIiLCJuYW1lIjoicG9zYzIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MzE1Mjc3MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0My4xNTJaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDMuMTUyWiIsInN0b3JhZ2VDbGFzcyI6Ik1VTFRJX1JFR0lPTkFMIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQzLjE1MloiLCJzaXplIjoiMyIsIm1kNUhhc2giOiI5V0dxOXU4TDhVMUNDTHRHcE15enJRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYzI/Z2VuZXJhdGlvbj0xNTU2ODM1ODgzMTUyNzcwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MyLzE1NTY4MzU4ODMxNTI3NzAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYzIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODMxNTI3NzAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0lMcnA4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MyLzE1NTY4MzU4ODMxNTI3NzAvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3Bvc2MyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoicG9zYzIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MzE1Mjc3MCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNJTHJwODd4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9wb3NjMi8xNTU2ODM1ODgzMTUyNzcwL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODMxNTI3NzAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0lMcnA4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MyLzE1NTY4MzU4ODMxNTI3NzAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3Bvc2MyL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoicG9zYzIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MzE1Mjc3MCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNJTHJwODd4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiMTdxQUJRPT0iLCJldGFnIjoiQ0lMcnA4N3gvZUVDRUFFPSJ9" + } + }, + { + "ID": "d570e13411ade628", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoiYnVja2V0SW5Db3B5QXR0cnMifQo=", + "Zm9v" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3336" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:43 GMT" + ], + "Etag": [ + "CPCDx87x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrltsLp6xgl5GQBA_ZoqyMKcRlP-MvWyo0epRXUAbOAkOUUpAOgezp4fFQRP5wEM1fq771AWUgtYvQ3HLXCnwxmDaCqJxEhICiYhxDp8RpvCnn3xuI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9idWNrZXRJbkNvcHlBdHRycy8xNTU2ODM1ODgzNjYzODU2Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYnVja2V0SW5Db3B5QXR0cnMiLCJuYW1lIjoiYnVja2V0SW5Db3B5QXR0cnMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MzY2Mzg1NiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0My42NjNaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDMuNjYzWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQzLjY2M1oiLCJzaXplIjoiMyIsIm1kNUhhc2giOiJyTDBZMjB6QytGenQ3MlZQek1TazJBPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYnVja2V0SW5Db3B5QXR0cnM/Z2VuZXJhdGlvbj0xNTU2ODM1ODgzNjYzODU2JmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2J1Y2tldEluQ29weUF0dHJzLzE1NTY4MzU4ODM2NjM4NTYvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYnVja2V0SW5Db3B5QXR0cnMvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldEluQ29weUF0dHJzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODM2NjM4NTYiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ1BDRHg4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2J1Y2tldEluQ29weUF0dHJzLzE1NTY4MzU4ODM2NjM4NTYvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2J1Y2tldEluQ29weUF0dHJzL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYnVja2V0SW5Db3B5QXR0cnMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MzY2Mzg1NiIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNQQ0R4ODd4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9idWNrZXRJbkNvcHlBdHRycy8xNTU2ODM1ODgzNjYzODU2L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRJbkNvcHlBdHRycy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldEluQ29weUF0dHJzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODM2NjM4NTYiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ1BDRHg4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2J1Y2tldEluQ29weUF0dHJzLzE1NTY4MzU4ODM2NjM4NTYvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2J1Y2tldEluQ29weUF0dHJzL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYnVja2V0SW5Db3B5QXR0cnMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MzY2Mzg1NiIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNQQ0R4ODd4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiejhTdUhRPT0iLCJldGFnIjoiQ1BDRHg4N3gvZUVDRUFFPSJ9" + } + }, + { + "ID": "284a06aa5d3f4c0f", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "62" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEifQo=" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "2972" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:43 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqEuzT5vUHzBcQTT84B5lbxQRZ8Wazbvf7pARGdim0OKOWWM6MdR9KrH11f9w9bxrybc2YHzoveHnAwpEgFzwUcXlLN8xDttCt9pwOnPMX3My8utXg" + ] + }, + "Body": "eyJlcnJvciI6eyJlcnJvcnMiOlt7ImRvbWFpbiI6Imdsb2JhbCIsInJlYXNvbiI6InJlcXVpcmVkIiwibWVzc2FnZSI6IlJlcXVpcmVkIiwiZGVidWdJbmZvIjoiY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUuRmF1bHQ6IEltbXV0YWJsZUVycm9yRGVmaW5pdGlvbntiYXNlPVJFUVVJUkVELCBjYXRlZ29yeT1VU0VSX0VSUk9SLCBjYXVzZT1udWxsLCBkZWJ1Z0luZm89bnVsbCwgZG9tYWluPWdsb2JhbCwgZXh0ZW5kZWRIZWxwPW51bGwsIGh0dHBIZWFkZXJzPXt9LCBodHRwU3RhdHVzPWJhZFJlcXVlc3QsIGludGVybmFsUmVhc29uPVJlYXNvbnthcmd1bWVudHM9e30sIGNhdXNlPW51bGwsIGNvZGU9Z2RhdGEuQ29yZUVycm9yRG9tYWluLlJFUVVJUkVELCBjcmVhdGVkQnlCYWNrZW5kPXRydWUsIGRlYnVnTWVzc2FnZT1udWxsLCBlcnJvclByb3RvQ29kZT1SRVFVSVJFRCwgZXJyb3JQcm90b0RvbWFpbj1nZGF0YS5Db3JlRXJyb3JEb21haW4sIGZpbHRlcmVkTWVzc2FnZT1udWxsLCBsb2NhdGlvbj1lbnRpdHkuZGVzdGluYXRpb25fcmVzb3VyY2UuaWQubmFtZSwgbWVzc2FnZT1udWxsLCB1bm5hbWVkQXJndW1lbnRzPVtdfSwgbG9jYXRpb249ZW50aXR5LmRlc3RpbmF0aW9uX3Jlc291cmNlLmlkLm5hbWUsIG1lc3NhZ2U9UmVxdWlyZWQsIHJlYXNvbj1yZXF1aXJlZCwgcnBjQ29kZT00MDB9IFJlcXVpcmVkXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLkVycm9yQ29sbGVjdG9yLnRvRmF1bHQoRXJyb3JDb2xsZWN0b3IuamF2YTo1NClcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnJlc3QuYWRhcHRlci5yb3N5LlJvc3lFcnJvckNvbnZlcnRlci50b0ZhdWx0KFJvc3lFcnJvckNvbnZlcnRlci5qYXZhOjY3KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUhhbmRsZXIkMi5jYWxsKFJvc3lIYW5kbGVyLmphdmE6MjU5KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUhhbmRsZXIkMi5jYWxsKFJvc3lIYW5kbGVyLmphdmE6MjM5KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuRGlyZWN0RXhlY3V0b3IuZXhlY3V0ZShEaXJlY3RFeGVjdXRvci5qYXZhOjMwKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuZXhlY3V0ZUxpc3RlbmVyKEFic3RyYWN0RnV0dXJlLmphdmE6MTE0Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmNvbXBsZXRlKEFic3RyYWN0RnV0dXJlLmphdmE6OTYzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuc2V0KEFic3RyYWN0RnV0dXJlLmphdmE6NzMxKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuRGlyZWN0RXhlY3V0b3IuZXhlY3V0ZShEaXJlY3RFeGVjdXRvci5qYXZhOjMwKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuZXhlY3V0ZUxpc3RlbmVyKEFic3RyYWN0RnV0dXJlLmphdmE6MTE0Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmNvbXBsZXRlKEFic3RyYWN0RnV0dXJlLmphdmE6OTYzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuc2V0KEFic3RyYWN0RnV0dXJlLmphdmE6NzMxKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIudGhyZWFkLlRocmVhZFRyYWNrZXJzJFRocmVhZFRyYWNraW5nUnVubmFibGUucnVuKFRocmVhZFRyYWNrZXJzLmphdmE6MTI2KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JFRyYWNlQ29udGV4dFJ1bm5hYmxlLnJ1bkluQ29udGV4dChUcmFjZUNvbnRleHQuamF2YTo0NTMpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5zZXJ2ZXIuQ29tbW9uTW9kdWxlJENvbnRleHRDYXJyeWluZ0V4ZWN1dG9yU2VydmljZSQxLnJ1bkluQ29udGV4dChDb21tb25Nb2R1bGUuamF2YTo4MDIpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUkMS5ydW4oVHJhY2VDb250ZXh0LmphdmE6NDYwKVxuXHRhdCBpby5ncnBjLkNvbnRleHQucnVuKENvbnRleHQuamF2YTo1NjUpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5DdXJyZW50Q29udGV4dC5ydW5JbkNvbnRleHQoQ3VycmVudENvbnRleHQuamF2YToyMDQpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkQWJzdHJhY3RUcmFjZUNvbnRleHRDYWxsYmFjay5ydW5JbkluaGVyaXRlZENvbnRleHROb1VucmVmKFRyYWNlQ29udGV4dC5qYXZhOjMxOSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRBYnN0cmFjdFRyYWNlQ29udGV4dENhbGxiYWNrLnJ1bkluSW5oZXJpdGVkQ29udGV4dChUcmFjZUNvbnRleHQuamF2YTozMTEpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUucnVuKFRyYWNlQ29udGV4dC5qYXZhOjQ1Nylcblx0YXQgY29tLmdvb2dsZS5nc2UuaW50ZXJuYWwuRGlzcGF0Y2hRdWV1ZUltcGwkV29ya2VyVGhyZWFkLnJ1bihEaXNwYXRjaFF1ZXVlSW1wbC5qYXZhOjQwMylcbiJ9XSwiY29kZSI6NDAwLCJtZXNzYWdlIjoiUmVxdWlyZWQifX0=" + } + }, + { + "ID": "631a4dc25c1479df", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjcmMzMmMiOiJjSCtBK3c9PSIsIm5hbWUiOiJoYXNoZXNPblVwbG9hZC0xIn0K", + "SSBjYW4ndCB3YWl0IHRvIGJlIHZlcmlmaWVk" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3321" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:44 GMT" + ], + "Etag": [ + "CIes587x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up5pzFgsk-trM6xfNGHCutAafrBhKStla4toQDjvEsTPe4TTesYnwc0KLg9WK95RXOKqdm_KUngd4hv6Tucfns_MALlJyx1s6A2cZR3vco6jKPTW00" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9oYXNoZXNPblVwbG9hZC0xLzE1NTY4MzU4ODQxOTMyODciLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9oYXNoZXNPblVwbG9hZC0xIiwibmFtZSI6Imhhc2hlc09uVXBsb2FkLTEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NDE5MzI4NyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0NC4xOTJaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDQuMTkyWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ0LjE5MloiLCJzaXplIjoiMjciLCJtZDVIYXNoIjoib2ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2hhc2hlc09uVXBsb2FkLTE/Z2VuZXJhdGlvbj0xNTU2ODM1ODg0MTkzMjg3JmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NDE5MzI4Ny9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9oYXNoZXNPblVwbG9hZC0xL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJoYXNoZXNPblVwbG9hZC0xIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODQxOTMyODciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0llczU4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NDE5MzI4Ny9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vaGFzaGVzT25VcGxvYWQtMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imhhc2hlc09uVXBsb2FkLTEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NDE5MzI4NyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNJZXM1ODd4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9oYXNoZXNPblVwbG9hZC0xLzE1NTY4MzU4ODQxOTMyODcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2hhc2hlc09uVXBsb2FkLTEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJoYXNoZXNPblVwbG9hZC0xIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODQxOTMyODciLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0llczU4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NDE5MzI4Ny91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vaGFzaGVzT25VcGxvYWQtMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imhhc2hlc09uVXBsb2FkLTEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NDE5MzI4NyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNJZXM1ODd4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiY0grQSt3PT0iLCJldGFnIjoiQ0llczU4N3gvZUVDRUFFPSJ9" + } + }, + { + "ID": "e60d42eeeafea205", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjcmMzMmMiOiJjSCtBL0E9PSIsIm5hbWUiOiJoYXNoZXNPblVwbG9hZC0xIn0K", + "SSBjYW4ndCB3YWl0IHRvIGJlIHZlcmlmaWVk" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "3301" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:44 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpZ9QOOcNdGntWr097PRNnaDFbt2xarczbyHRwkwxSiwRZIhV26iZGbh5vHIpvl3Z5Dxc7hjtWuutHTMn8jpCTEowLJNAre2A6mZxVNtV52Vh6XDX4" + ] + }, + "Body": "eyJlcnJvciI6eyJlcnJvcnMiOlt7ImRvbWFpbiI6Imdsb2JhbCIsInJlYXNvbiI6ImludmFsaWQiLCJtZXNzYWdlIjoiUHJvdmlkZWQgQ1JDMzJDIFwiY0grQS9BPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgQ1JDMzJDIFwiY0grQSt3PT1cIi4iLCJkZWJ1Z0luZm8iOiJjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS5GYXVsdDogSW1tdXRhYmxlRXJyb3JEZWZpbml0aW9ue2Jhc2U9SU5WQUxJRF9WQUxVRSwgY2F0ZWdvcnk9VVNFUl9FUlJPUiwgY2F1c2U9bnVsbCwgZGVidWdJbmZvPW51bGwsIGRvbWFpbj1nbG9iYWwsIGV4dGVuZGVkSGVscD1udWxsLCBodHRwSGVhZGVycz17fSwgaHR0cFN0YXR1cz1iYWRSZXF1ZXN0LCBpbnRlcm5hbFJlYXNvbj1SZWFzb257YXJndW1lbnRzPXt9LCBjYXVzZT1udWxsLCBjb2RlPWdkYXRhLkNvcmVFcnJvckRvbWFpbi5JTlZBTElEX1ZBTFVFLCBjcmVhdGVkQnlCYWNrZW5kPXRydWUsIGRlYnVnTWVzc2FnZT1udWxsLCBlcnJvclByb3RvQ29kZT1JTlZBTElEX1ZBTFVFLCBlcnJvclByb3RvRG9tYWluPWdkYXRhLkNvcmVFcnJvckRvbWFpbiwgZmlsdGVyZWRNZXNzYWdlPW51bGwsIGxvY2F0aW9uPWVudGl0eS5yZXNvdXJjZS5jcmMzMmMsIG1lc3NhZ2U9UHJvdmlkZWQgQ1JDMzJDIFwiY0grQS9BPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgQ1JDMzJDIFwiY0grQSt3PT1cIi4sIHVubmFtZWRBcmd1bWVudHM9W2NIK0EvQT09XX0sIGxvY2F0aW9uPWVudGl0eS5yZXNvdXJjZS5jcmMzMmMsIG1lc3NhZ2U9UHJvdmlkZWQgQ1JDMzJDIFwiY0grQS9BPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgQ1JDMzJDIFwiY0grQSt3PT1cIi4sIHJlYXNvbj1pbnZhbGlkLCBycGNDb2RlPTQwMH0gUHJvdmlkZWQgQ1JDMzJDIFwiY0grQS9BPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgQ1JDMzJDIFwiY0grQSt3PT1cIi5cblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUuRXJyb3JDb2xsZWN0b3IudG9GYXVsdChFcnJvckNvbGxlY3Rvci5qYXZhOjU0KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUVycm9yQ29udmVydGVyLnRvRmF1bHQoUm9zeUVycm9yQ29udmVydGVyLmphdmE6NjcpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5yZXN0LmFkYXB0ZXIucm9zeS5Sb3N5SGFuZGxlciQyLmNhbGwoUm9zeUhhbmRsZXIuamF2YToyNTkpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5yZXN0LmFkYXB0ZXIucm9zeS5Sb3N5SGFuZGxlciQyLmNhbGwoUm9zeUhhbmRsZXIuamF2YToyMzkpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLnV0aWwuQ2FsbGFibGVGdXR1cmUucnVuKENhbGxhYmxlRnV0dXJlLmphdmE6NjIpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5EaXJlY3RFeGVjdXRvci5leGVjdXRlKERpcmVjdEV4ZWN1dG9yLmphdmE6MzApXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5leGVjdXRlTGlzdGVuZXIoQWJzdHJhY3RGdXR1cmUuamF2YToxMTQzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuY29tcGxldGUoQWJzdHJhY3RGdXR1cmUuamF2YTo5NjMpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5zZXQoQWJzdHJhY3RGdXR1cmUuamF2YTo3MzEpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLnV0aWwuQ2FsbGFibGVGdXR1cmUucnVuKENhbGxhYmxlRnV0dXJlLmphdmE6NjIpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5EaXJlY3RFeGVjdXRvci5leGVjdXRlKERpcmVjdEV4ZWN1dG9yLmphdmE6MzApXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5leGVjdXRlTGlzdGVuZXIoQWJzdHJhY3RGdXR1cmUuamF2YToxMTQzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuY29tcGxldGUoQWJzdHJhY3RGdXR1cmUuamF2YTo5NjMpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5zZXQoQWJzdHJhY3RGdXR1cmUuamF2YTo3MzEpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLnV0aWwuQ2FsbGFibGVGdXR1cmUucnVuKENhbGxhYmxlRnV0dXJlLmphdmE6NjIpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci50aHJlYWQuVGhyZWFkVHJhY2tlcnMkVGhyZWFkVHJhY2tpbmdSdW5uYWJsZS5ydW4oVGhyZWFkVHJhY2tlcnMuamF2YToxMjYpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUucnVuSW5Db250ZXh0KFRyYWNlQ29udGV4dC5qYXZhOjQ1Mylcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnNlcnZlci5Db21tb25Nb2R1bGUkQ29udGV4dENhcnJ5aW5nRXhlY3V0b3JTZXJ2aWNlJDEucnVuSW5Db250ZXh0KENvbW1vbk1vZHVsZS5qYXZhOjgwMilcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRUcmFjZUNvbnRleHRSdW5uYWJsZSQxLnJ1bihUcmFjZUNvbnRleHQuamF2YTo0NjApXG5cdGF0IGlvLmdycGMuQ29udGV4dC5ydW4oQ29udGV4dC5qYXZhOjU2NSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLkN1cnJlbnRDb250ZXh0LnJ1bkluQ29udGV4dChDdXJyZW50Q29udGV4dC5qYXZhOjIwNClcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRBYnN0cmFjdFRyYWNlQ29udGV4dENhbGxiYWNrLnJ1bkluSW5oZXJpdGVkQ29udGV4dE5vVW5yZWYoVHJhY2VDb250ZXh0LmphdmE6MzE5KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JEFic3RyYWN0VHJhY2VDb250ZXh0Q2FsbGJhY2sucnVuSW5Jbmhlcml0ZWRDb250ZXh0KFRyYWNlQ29udGV4dC5qYXZhOjMxMSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRUcmFjZUNvbnRleHRSdW5uYWJsZS5ydW4oVHJhY2VDb250ZXh0LmphdmE6NDU3KVxuXHRhdCBjb20uZ29vZ2xlLmdzZS5pbnRlcm5hbC5EaXNwYXRjaFF1ZXVlSW1wbCRXb3JrZXJUaHJlYWQucnVuKERpc3BhdGNoUXVldWVJbXBsLmphdmE6NDAzKVxuIn1dLCJjb2RlIjo0MDAsIm1lc3NhZ2UiOiJQcm92aWRlZCBDUkMzMkMgXCJjSCtBL0E9PVwiIGRvZXNuJ3QgbWF0Y2ggY2FsY3VsYXRlZCBDUkMzMkMgXCJjSCtBK3c9PVwiLiJ9fQ==" + } + }, + { + "ID": "5a293e6162b30ef3", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoiaGFzaGVzT25VcGxvYWQtMSJ9Cg==", + "SSBjYW4ndCB3YWl0IHRvIGJlIHZlcmlmaWVk" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3321" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:44 GMT" + ], + "Etag": [ + "CJuDg8/x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqgpXYBQOYHYXTQAVb7IGna0BRqHaB7oBdZ19frcgAVGErLbEHe3bESKZ7zKSO6r0AtvqwfCEuJty95EeD5MkwqIsXbr88YcTG0j7M-g4yTKkejukI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9oYXNoZXNPblVwbG9hZC0xLzE1NTY4MzU4ODQ2NDY4MTEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9oYXNoZXNPblVwbG9hZC0xIiwibmFtZSI6Imhhc2hlc09uVXBsb2FkLTEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NDY0NjgxMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0NC42NDZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDQuNjQ2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ0LjY0NloiLCJzaXplIjoiMjciLCJtZDVIYXNoIjoib2ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2hhc2hlc09uVXBsb2FkLTE/Z2VuZXJhdGlvbj0xNTU2ODM1ODg0NjQ2ODExJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NDY0NjgxMS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9oYXNoZXNPblVwbG9hZC0xL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJoYXNoZXNPblVwbG9hZC0xIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODQ2NDY4MTEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0p1RGc4L3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NDY0NjgxMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vaGFzaGVzT25VcGxvYWQtMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imhhc2hlc09uVXBsb2FkLTEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NDY0NjgxMSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKdURnOC94L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9oYXNoZXNPblVwbG9hZC0xLzE1NTY4MzU4ODQ2NDY4MTEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2hhc2hlc09uVXBsb2FkLTEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJoYXNoZXNPblVwbG9hZC0xIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODQ2NDY4MTEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0p1RGc4L3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NDY0NjgxMS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vaGFzaGVzT25VcGxvYWQtMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imhhc2hlc09uVXBsb2FkLTEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NDY0NjgxMSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKdURnOC94L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiY0grQSt3PT0iLCJldGFnIjoiQ0p1RGc4L3gvZUVDRUFFPSJ9" + } + }, + { + "ID": "19f19648f1196c10", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJtZDVIYXNoIjoib2ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09IiwibmFtZSI6Imhhc2hlc09uVXBsb2FkLTEifQo=", + "SSBjYW4ndCB3YWl0IHRvIGJlIHZlcmlmaWVk" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3321" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:45 GMT" + ], + "Etag": [ + "CKDem8/x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqAmKuvPxUsjTHDB5nMuNG92dU0gql0Yol2zlvPDWEDuXHQRLhOJzYBAH207Ot-_IB22pH5QSfTaTayqEWBUfkobCR9YGVa79W0LG4fv9fnAjxk5Cs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9oYXNoZXNPblVwbG9hZC0xLzE1NTY4MzU4ODUwNTE2ODAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9oYXNoZXNPblVwbG9hZC0xIiwibmFtZSI6Imhhc2hlc09uVXBsb2FkLTEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NTA1MTY4MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0NS4wNTFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDUuMDUxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ1LjA1MVoiLCJzaXplIjoiMjciLCJtZDVIYXNoIjoib2ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2hhc2hlc09uVXBsb2FkLTE/Z2VuZXJhdGlvbj0xNTU2ODM1ODg1MDUxNjgwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NTA1MTY4MC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9oYXNoZXNPblVwbG9hZC0xL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJoYXNoZXNPblVwbG9hZC0xIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODUwNTE2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0tEZW04L3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NTA1MTY4MC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vaGFzaGVzT25VcGxvYWQtMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imhhc2hlc09uVXBsb2FkLTEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NTA1MTY4MCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNLRGVtOC94L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9oYXNoZXNPblVwbG9hZC0xLzE1NTY4MzU4ODUwNTE2ODAvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2hhc2hlc09uVXBsb2FkLTEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJoYXNoZXNPblVwbG9hZC0xIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODUwNTE2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0tEZW04L3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NTA1MTY4MC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vaGFzaGVzT25VcGxvYWQtMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imhhc2hlc09uVXBsb2FkLTEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NTA1MTY4MCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNLRGVtOC94L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiY0grQSt3PT0iLCJldGFnIjoiQ0tEZW04L3gvZUVDRUFFPSJ9" + } + }, + { + "ID": "880416f9891fc25f", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJtZDVIYXNoIjoib3ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09IiwibmFtZSI6Imhhc2hlc09uVXBsb2FkLTEifQo=", + "SSBjYW4ndCB3YWl0IHRvIGJlIHZlcmlmaWVk" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "3515" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:45 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoFC5_DRlr9xktR_eWpfuFqiZRbenVOATtUEg3tfJC8JbCqagKQzn0_sYLvxvG52ordbl2Nwo0L6Y0AbwuenLAo1uoX4uzHnro-JiY8MkdQEsO3uSA" + ] + }, + "Body": "eyJlcnJvciI6eyJlcnJvcnMiOlt7ImRvbWFpbiI6Imdsb2JhbCIsInJlYXNvbiI6ImludmFsaWQiLCJtZXNzYWdlIjoiUHJvdmlkZWQgTUQ1IGhhc2ggXCJvdlpqR2xjWFBKaUdPQWZLRmJKbDFRPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgTUQ1IGhhc2ggXCJvZlpqR2xjWFBKaUdPQWZLRmJKbDFRPT1cIi4iLCJkZWJ1Z0luZm8iOiJjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS5GYXVsdDogSW1tdXRhYmxlRXJyb3JEZWZpbml0aW9ue2Jhc2U9SU5WQUxJRF9WQUxVRSwgY2F0ZWdvcnk9VVNFUl9FUlJPUiwgY2F1c2U9bnVsbCwgZGVidWdJbmZvPW51bGwsIGRvbWFpbj1nbG9iYWwsIGV4dGVuZGVkSGVscD1udWxsLCBodHRwSGVhZGVycz17fSwgaHR0cFN0YXR1cz1iYWRSZXF1ZXN0LCBpbnRlcm5hbFJlYXNvbj1SZWFzb257YXJndW1lbnRzPXt9LCBjYXVzZT1udWxsLCBjb2RlPWdkYXRhLkNvcmVFcnJvckRvbWFpbi5JTlZBTElEX1ZBTFVFLCBjcmVhdGVkQnlCYWNrZW5kPXRydWUsIGRlYnVnTWVzc2FnZT1udWxsLCBlcnJvclByb3RvQ29kZT1JTlZBTElEX1ZBTFVFLCBlcnJvclByb3RvRG9tYWluPWdkYXRhLkNvcmVFcnJvckRvbWFpbiwgZmlsdGVyZWRNZXNzYWdlPW51bGwsIGxvY2F0aW9uPWVudGl0eS5yZXNvdXJjZS5tZDVfaGFzaF9iYXNlNjQsIG1lc3NhZ2U9UHJvdmlkZWQgTUQ1IGhhc2ggXCJvdlpqR2xjWFBKaUdPQWZLRmJKbDFRPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgTUQ1IGhhc2ggXCJvZlpqR2xjWFBKaUdPQWZLRmJKbDFRPT1cIi4sIHVubmFtZWRBcmd1bWVudHM9W292WmpHbGNYUEppR09BZktGYkpsMVE9PV19LCBsb2NhdGlvbj1lbnRpdHkucmVzb3VyY2UubWQ1X2hhc2hfYmFzZTY0LCBtZXNzYWdlPVByb3ZpZGVkIE1ENSBoYXNoIFwib3ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09XCIgZG9lc24ndCBtYXRjaCBjYWxjdWxhdGVkIE1ENSBoYXNoIFwib2ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09XCIuLCByZWFzb249aW52YWxpZCwgcnBjQ29kZT00MDB9IFByb3ZpZGVkIE1ENSBoYXNoIFwib3ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09XCIgZG9lc24ndCBtYXRjaCBjYWxjdWxhdGVkIE1ENSBoYXNoIFwib2ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09XCIuXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLkVycm9yQ29sbGVjdG9yLnRvRmF1bHQoRXJyb3JDb2xsZWN0b3IuamF2YTo1NClcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnJlc3QuYWRhcHRlci5yb3N5LlJvc3lFcnJvckNvbnZlcnRlci50b0ZhdWx0KFJvc3lFcnJvckNvbnZlcnRlci5qYXZhOjY3KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUhhbmRsZXIkMi5jYWxsKFJvc3lIYW5kbGVyLmphdmE6MjU5KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUhhbmRsZXIkMi5jYWxsKFJvc3lIYW5kbGVyLmphdmE6MjM5KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuRGlyZWN0RXhlY3V0b3IuZXhlY3V0ZShEaXJlY3RFeGVjdXRvci5qYXZhOjMwKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuZXhlY3V0ZUxpc3RlbmVyKEFic3RyYWN0RnV0dXJlLmphdmE6MTE0Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmNvbXBsZXRlKEFic3RyYWN0RnV0dXJlLmphdmE6OTYzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuc2V0KEFic3RyYWN0RnV0dXJlLmphdmE6NzMxKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuRGlyZWN0RXhlY3V0b3IuZXhlY3V0ZShEaXJlY3RFeGVjdXRvci5qYXZhOjMwKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuZXhlY3V0ZUxpc3RlbmVyKEFic3RyYWN0RnV0dXJlLmphdmE6MTE0Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmNvbXBsZXRlKEFic3RyYWN0RnV0dXJlLmphdmE6OTYzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuc2V0KEFic3RyYWN0RnV0dXJlLmphdmE6NzMxKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIudGhyZWFkLlRocmVhZFRyYWNrZXJzJFRocmVhZFRyYWNraW5nUnVubmFibGUucnVuKFRocmVhZFRyYWNrZXJzLmphdmE6MTI2KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JFRyYWNlQ29udGV4dFJ1bm5hYmxlLnJ1bkluQ29udGV4dChUcmFjZUNvbnRleHQuamF2YTo0NTMpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5zZXJ2ZXIuQ29tbW9uTW9kdWxlJENvbnRleHRDYXJyeWluZ0V4ZWN1dG9yU2VydmljZSQxLnJ1bkluQ29udGV4dChDb21tb25Nb2R1bGUuamF2YTo4MDIpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUkMS5ydW4oVHJhY2VDb250ZXh0LmphdmE6NDYwKVxuXHRhdCBpby5ncnBjLkNvbnRleHQucnVuKENvbnRleHQuamF2YTo1NjUpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5DdXJyZW50Q29udGV4dC5ydW5JbkNvbnRleHQoQ3VycmVudENvbnRleHQuamF2YToyMDQpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkQWJzdHJhY3RUcmFjZUNvbnRleHRDYWxsYmFjay5ydW5JbkluaGVyaXRlZENvbnRleHROb1VucmVmKFRyYWNlQ29udGV4dC5qYXZhOjMxOSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRBYnN0cmFjdFRyYWNlQ29udGV4dENhbGxiYWNrLnJ1bkluSW5oZXJpdGVkQ29udGV4dChUcmFjZUNvbnRleHQuamF2YTozMTEpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUucnVuKFRyYWNlQ29udGV4dC5qYXZhOjQ1Nylcblx0YXQgY29tLmdvb2dsZS5nc2UuaW50ZXJuYWwuRGlzcGF0Y2hRdWV1ZUltcGwkV29ya2VyVGhyZWFkLnJ1bihEaXNwYXRjaFF1ZXVlSW1wbC5qYXZhOjQwMylcbiJ9XSwiY29kZSI6NDAwLCJtZXNzYWdlIjoiUHJvdmlkZWQgTUQ1IGhhc2ggXCJvdlpqR2xjWFBKaUdPQWZLRmJKbDFRPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgTUQ1IGhhc2ggXCJvZlpqR2xjWFBKaUdPQWZLRmJKbDFRPT1cIi4ifX0=" + } + }, + { + "ID": "e1a7bef7c2f0f7b9", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/iam?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "341" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:45 GMT" + ], + "Etag": [ + "CAo=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:45 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrDioJ0fktjtyI_mABE4-oH9KqDlurM5mWiYDhoDp_LRJYSPvDxjuoaYzNhjoTHdHPALySTzCxScTstm27R-praR27EAm4Gx9k3wHAAwKJUptqRnFE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNwb2xpY3kiLCJyZXNvdXJjZUlkIjoicHJvamVjdHMvXy9idWNrZXRzL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImJpbmRpbmdzIjpbeyJyb2xlIjoicm9sZXMvc3RvcmFnZS5sZWdhY3lCdWNrZXRPd25lciIsIm1lbWJlcnMiOlsicHJvamVjdEVkaXRvcjpkZWtsZXJrLXNhbmRib3giLCJwcm9qZWN0T3duZXI6ZGVrbGVyay1zYW5kYm94Il19LHsicm9sZSI6InJvbGVzL3N0b3JhZ2UubGVnYWN5QnVja2V0UmVhZGVyIiwibWVtYmVycyI6WyJwcm9qZWN0Vmlld2VyOmRla2xlcmstc2FuZGJveCJdfV0sImV0YWciOiJDQW89In0=" + } + }, + { + "ID": "df5371a2b0cd8c3b", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/iam?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "317" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJiaW5kaW5ncyI6W3sibWVtYmVycyI6WyJwcm9qZWN0RWRpdG9yOmRla2xlcmstc2FuZGJveCIsInByb2plY3RPd25lcjpkZWtsZXJrLXNhbmRib3giXSwicm9sZSI6InJvbGVzL3N0b3JhZ2UubGVnYWN5QnVja2V0T3duZXIifSx7Im1lbWJlcnMiOlsicHJvamVjdFZpZXdlcjpkZWtsZXJrLXNhbmRib3giXSwicm9sZSI6InJvbGVzL3N0b3JhZ2UubGVnYWN5QnVja2V0UmVhZGVyIn0seyJtZW1iZXJzIjpbInByb2plY3RWaWV3ZXI6ZGVrbGVyay1zYW5kYm94Il0sInJvbGUiOiJyb2xlcy9zdG9yYWdlLm9iamVjdFZpZXdlciJ9XSwiZXRhZyI6IkNBbz0ifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "423" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:46 GMT" + ], + "Etag": [ + "CAs=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrAVgetrpv1xjtKv2B7KFBdnbg20V-btiRn3sD4kQF7shWIQ1-FqJMRsbYfcYJyTMQHv_BhW_eVzvcc56z0LOcYfE-wbCRZpqYjen6c-bVcMzPet2A" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNwb2xpY3kiLCJyZXNvdXJjZUlkIjoicHJvamVjdHMvXy9idWNrZXRzL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImJpbmRpbmdzIjpbeyJyb2xlIjoicm9sZXMvc3RvcmFnZS5sZWdhY3lCdWNrZXRPd25lciIsIm1lbWJlcnMiOlsicHJvamVjdEVkaXRvcjpkZWtsZXJrLXNhbmRib3giLCJwcm9qZWN0T3duZXI6ZGVrbGVyay1zYW5kYm94Il19LHsicm9sZSI6InJvbGVzL3N0b3JhZ2UubGVnYWN5QnVja2V0UmVhZGVyIiwibWVtYmVycyI6WyJwcm9qZWN0Vmlld2VyOmRla2xlcmstc2FuZGJveCJdfSx7InJvbGUiOiJyb2xlcy9zdG9yYWdlLm9iamVjdFZpZXdlciIsIm1lbWJlcnMiOlsicHJvamVjdFZpZXdlcjpkZWtsZXJrLXNhbmRib3giXX1dLCJldGFnIjoiQ0FzPSJ9" + } + }, + { + "ID": "feaa4b1a4dda0450", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/iam?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "423" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:46 GMT" + ], + "Etag": [ + "CAs=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:46 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqwMVZVQQ8s-ZBUJDLKyC-qMMTANncoBhRQWlMCITVUXQrTnFGGxu4GdKyVcuudrSa-DWy5w5iLE-i4a407GopUNma3xIq1eNiyVAziPD7jQ4YluzA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNwb2xpY3kiLCJyZXNvdXJjZUlkIjoicHJvamVjdHMvXy9idWNrZXRzL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImJpbmRpbmdzIjpbeyJyb2xlIjoicm9sZXMvc3RvcmFnZS5sZWdhY3lCdWNrZXRPd25lciIsIm1lbWJlcnMiOlsicHJvamVjdEVkaXRvcjpkZWtsZXJrLXNhbmRib3giLCJwcm9qZWN0T3duZXI6ZGVrbGVyay1zYW5kYm94Il19LHsicm9sZSI6InJvbGVzL3N0b3JhZ2UubGVnYWN5QnVja2V0UmVhZGVyIiwibWVtYmVycyI6WyJwcm9qZWN0Vmlld2VyOmRla2xlcmstc2FuZGJveCJdfSx7InJvbGUiOiJyb2xlcy9zdG9yYWdlLm9iamVjdFZpZXdlciIsIm1lbWJlcnMiOlsicHJvamVjdFZpZXdlcjpkZWtsZXJrLXNhbmRib3giXX1dLCJldGFnIjoiQ0FzPSJ9" + } + }, + { + "ID": "23d94c8a09c0c334", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/iam/testPermissions?alt=json\u0026permissions=storage.buckets.get\u0026permissions=storage.buckets.delete\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "108" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:46 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:46 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoR-XKWu9nOWmFZve2-NeDyDvZDo5rYpPD3avVEmZkZ-lODvHCSR0D7zdPeCa61L5EtOIYJBqmC0D9Xc229A1GDS5d91vgAGuzRoxRnNa9DjBw68xM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSN0ZXN0SWFtUGVybWlzc2lvbnNSZXNwb25zZSIsInBlcm1pc3Npb25zIjpbInN0b3JhZ2UuYnVja2V0cy5nZXQiLCJzdG9yYWdlLmJ1Y2tldHMuZGVsZXRlIl19" + } + }, + { + "ID": "3d0a13fe961ed15c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "93" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9LCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIn0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "518" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:47 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoIRjhruHo5rweyX35Zt-Mzm5UDD5vr4319gSNjtnJHD2RWtVLvQdjuZ0jv-XKT3s1xcJO7CvoU-qAehCknukI5ssvv2LgwazyhnkcuFws3isqM9uo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ2LjgwM1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0Ni44MDNaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImJpbGxpbmciOnsicmVxdWVzdGVyUGF5cyI6dHJ1ZX0sImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "c1f385994f4db8e3", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/user-integration%40gcloud-golang-firestore-tests.iam.gserviceaccount.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "159" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIn0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "589" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:48 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UozEPu-BKQnmBUxTwsOWB7mmIPC5lppmUt6lA150OiXgQttSVRetVysUp299p7PbjI08qchUQl2idgMbfBCScDL5SuoHi_u9ani0DXYX2OV9QXAovQ" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBST0ifQ==" + } + }, + { + "ID": "ccb3141a79c55333", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3054" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:48 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:48 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoG9XpooOFPlHtC8fg7llzYYETQUMEm0PvG4TbxBF5KdfCdB_sxNi0Yy9KrAmMDPBVyq0Iuaq7mzQOhQe9lXe5PBOc8-CGTXZiEMkMMJe3OYxjExIM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ2LjgwM1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0Ny45MTlaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBST0ifV0sImRlZmF1bHRPYmplY3RBY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0FJPSJ9XSwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sIm93bmVyIjp7ImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCJ9LCJsb2NhdGlvbiI6IlVTIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9LCJldGFnIjoiQ0FJPSJ9" + } + }, + { + "ID": "2d4244737318f2d5", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3054" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:48 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:48 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqn_UdEXhLLXE1QsYjwtyqQlzX3nDvpAO1WNpRkHhyV39RJKkxMBrcd8f6Xo3VZFDps7iKuQHnW49yRqjh42062lkisOH7otDXrdAKAih4Iz9fzERM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ2LjgwM1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0Ny45MTlaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBST0ifV0sImRlZmF1bHRPYmplY3RBY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0FJPSJ9XSwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sIm93bmVyIjp7ImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCJ9LCJsb2NhdGlvbiI6IlVTIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9LCJldGFnIjoiQ0FJPSJ9" + } + }, + { + "ID": "b63c3951975a766e", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12183" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:49 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:49 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq-LP6kQLHNpjT5jYVkPzNXyfr838sGr4jGMVPq-FOq01ayCWvyWVAsvUbxp0NSdOYJcW-z0i2NkQLuSKGQYrX_3z8LW1Vb4uL2jxfC8kSiuz28hkA" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "0764f2598b67c664", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3054" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:49 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:49 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrGddSv5GSaZB7qBmb0PigbMbJmWs91JB0W99oOTcbMeUk9QeBf-7QF7W_BpAn3wNxqVD5cBDPgGuKrZCfYv77SgCGxcPW-jrXsUY5h50q8eFL0_3o" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ2LjgwM1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0Ny45MTlaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBST0ifV0sImRlZmF1bHRPYmplY3RBY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0FJPSJ9XSwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sIm93bmVyIjp7ImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCJ9LCJsb2NhdGlvbiI6IlVTIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9LCJldGFnIjoiQ0FJPSJ9" + } + }, + { + "ID": "bdb0ff507e4df251", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13039" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:49 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:49 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up8t6bOWiJucB8KpVlIp8WjWLSUKssQaynZNguVy_oBM2ggBePUFnoteMcbw_L9aSHoD4hwi91dMaBC4aVc6VuaMSBSVuCe0L_0IymxhBIx3SRj_As" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "d2e66ae2cc0d11bc", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiZm9vIn0K", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3133" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:50 GMT" + ], + "Etag": [ + "COq52NHx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqQ_qlqWPPh6t8-gwPlD5zRB96zsmFgzyez4LG3XN4uErivnyHcCeeHje_i95VLJcWYXn7_-knEn0faPBCCAIStf4fXGs_Lz_VzHK6CLBZjU-oBxcU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDI0MDc0NiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MDI0MDc0NiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MC4yNDBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTAuMjQwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUwLjI0MFoiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MDI0MDc0NiZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDI0MDc0Ni9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkwMjQwNzQ2IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPcTUyTkh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDI0MDc0Ni9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTAyNDA3NDYiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT3E1Mk5IeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTAyNDA3NDYvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkwMjQwNzQ2IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPcTUyTkh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDI0MDc0Ni91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTAyNDA3NDYiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT3E1Mk5IeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNPcTUyTkh4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "89b2bc9232a865a4", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiZm9vIn0K", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3133" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:50 GMT" + ], + "Etag": [ + "CJTz9tHx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrymnpCwS6vZ0kM33ZD4nBupWs9hAuaOGpwiNcS4GJctgaLPe9CQ0Ada06yQV1aEfh9FtX6lgYh9jHwbsT6qdgOYOYe9yGDdAF9f91aE4y4VUHQHts" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDczOTYwNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MDczOTYwNCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MC43MzlaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTAuNzM5WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUwLjczOVoiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MDczOTYwNCZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDczOTYwNC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkwNzM5NjA0IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNKVHo5dEh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDczOTYwNC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTA3Mzk2MDQiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSlR6OXRIeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTA3Mzk2MDQvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkwNzM5NjA0IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNKVHo5dEh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDczOTYwNC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTA3Mzk2MDQiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSlR6OXRIeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNKVHo5dEh4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "a70634faba29225b", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiZm9vIn0K", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12243" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:51 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqsIKUUc6WyIHUlDvAIDlXVeOofVG4sVV3Rus8ktfPMTUTI7e4PH4657AoDnNOKKy9TOgt8yUtUvCNvDFQC1xwVApFofRhWT3kcjsEYRgZPCDDsUqM" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "5539336327f2e0ba", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiZm9vIn0K", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:51 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrH7pn9r8O1Zu1Nc2aNUO_CO75MyvOJyONGw27TUkfWRSSmFIWJUZ6F3e3OprVF5jKtAkQ73puZbyNCK6mozyHrmPRRa0mtyuBPhkYg4DzPGkXJOSU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTEuNTQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJpbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "029ec3d54709e7ce", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiZm9vIn0K", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "13099" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:52 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqcpItVCw5LaIWvDckOjfoChIifJGtuURBAmgtpzzna_iVOfDSaQOhxiMDVfYkV3mKG7__z0WTNRdVpa2Zs2IPcIAKwqwSeIMaLtz--MluSCHv9kc8" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "bc0d0ed808bc8e68", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0003/foo", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "5" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:52 GMT" + ], + "Etag": [ + "\"5d41402abc4b2a76b9719d911017c592\"" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:51 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Generation": [ + "1556835891548001" + ], + "X-Goog-Hash": [ + "crc32c=mnG7TA==", + "md5=XUFAKrxLKna5cZ2REBfFkg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "5" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpljfpVGJewuMZUyJZ8DU7j2k6JmfPqXXgkeeu8vGdK5j-C_DUgpubS4nFEp1z8EsN-fUdceCxfiy-FGIJiFpME38hWnztW_WIn6zeSh6LCbwIK9oM" + ] + }, + "Body": "aGVsbG8=" + } + }, + { + "ID": "b695481e00d2d6d9", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0003/foo", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ], + "X-Goog-User-Project": [ + "deklerk-sandbox" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "5" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:52 GMT" + ], + "Etag": [ + "\"5d41402abc4b2a76b9719d911017c592\"" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:51 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Generation": [ + "1556835891548001" + ], + "X-Goog-Hash": [ + "crc32c=mnG7TA==", + "md5=XUFAKrxLKna5cZ2REBfFkg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "5" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrmYf7b7ig2ase_O8qHEsgSAXifAlxVdDy_Zh5Qaqx1IlL1wkTvy1BSblI6ZDz3M2X-Y6KrdJp1IaP6nDU1F_Yhyt84Y7dOG80r2g-x4E7NAyyjV0o" + ] + }, + "Body": "aGVsbG8=" + } + }, + { + "ID": "2fe3b6cc96ffa451", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0003/foo", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "266" + ], + "Content-Type": [ + "application/xml; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:52 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:52 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur-S2G_BUo7MzXUj-7vUDuVFWfBIA5GJYfTdaeruyelzRGFimnhov8wRB_ozWC2cGPQ-a2xQk8bw_cVV_D2Q7c7rdR1ujaTM3oIjr8vjsJBZXa1VeA" + ] + }, + "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+VXNlclByb2plY3RNaXNzaW5nPC9Db2RlPjxNZXNzYWdlPkJ1Y2tldCBpcyBhIHJlcXVlc3RlciBwYXlzIGJ1Y2tldCBidXQgbm8gdXNlciBwcm9qZWN0IHByb3ZpZGVkLjwvTWVzc2FnZT48RGV0YWlscz5CdWNrZXQgaXMgUmVxdWVzdGVyIFBheXMgYnVja2V0IGJ1dCBubyBiaWxsaW5nIHByb2plY3QgaWQgcHJvdmlkZWQgZm9yIG5vbi1vd25lci48L0RldGFpbHM+PC9FcnJvcj4=" + } + }, + { + "ID": "59bdcad91d723d0e", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0003/foo", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ], + "X-Goog-User-Project": [ + "gcloud-golang-firestore-tests" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "5" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:52 GMT" + ], + "Etag": [ + "\"5d41402abc4b2a76b9719d911017c592\"" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:51 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Generation": [ + "1556835891548001" + ], + "X-Goog-Hash": [ + "crc32c=mnG7TA==", + "md5=XUFAKrxLKna5cZ2REBfFkg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "5" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up3lrNfk5bvup2RRbcsT6CeDACQitaiF3BTMyQ7B23ACtTqwWweq9ooOfZn3CX7tsbzljhlY_009nsTXXQwm7V_xNNd8FZwT44CzSpul-SZIRODC2w" + ] + }, + "Body": "aGVsbG8=" + } + }, + { + "ID": "7b7b6a79cd2a53e3", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0003/foo", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ], + "X-Goog-User-Project": [ + "veener-jba" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "342" + ], + "Content-Type": [ + "application/xml; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:52 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:52 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpT7Ge_QpcDrqajg6a4kZrmoA55ZSma9IM3pyz0Ow1bu5nLURc0V7cAThbvikg47_O-ox0uMdF6zBbtVtKL7olyMUGd9wqCQ9ubELVoJXiNMDehvXY" + ] + }, + "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+VXNlclByb2plY3RBY2Nlc3NEZW5pZWQ8L0NvZGU+PE1lc3NhZ2U+UmVxdWVzdGVyIGRvZXMgbm90IGhhdmUgc2VydmljZXVzYWdlLnNlcnZpY2VzLnVzZSBwZXJtaXNzaW9ucyBvbiB1c2VyIHByb2plY3QuPC9NZXNzYWdlPjxEZXRhaWxzPmludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIGRvZXMgbm90IGhhdmUgc2VydmljZXVzYWdlLnNlcnZpY2VzLnVzZSBhY2Nlc3MgdG8gcHJvamVjdCA2NDIwODA5MTgxMDEuPC9EZXRhaWxzPjwvRXJyb3I+" + } + }, + { + "ID": "914bca19df35cbed", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:53 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpM7lBsoxgNb-rh81Lfe0tWFGTkXQYfEB12ofkKZ8SK8kK5bPRUsQMVhm3aYB2N9e5_QSBZA25my-t5LpgR8TVW1lNTGd97OoD7bdBhP_CGo3xMbos" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTEuNTQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJpbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "164b4ffce0c7e233", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:53 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqbRkPvid4Zo9mQsbMXlTlOQ1womp_CnHUJcNWRamG_lp4ABhgVPVOdL8lE11J3tmvVkYhzcEPpIJuA_RTvuAbssXFsNo9Fu14cQ44HwhSRYk7r_dU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTEuNTQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJpbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "d04d3cb2ac9d6376", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12183" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:53 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:53 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrcH6PdfDpm0th9SnZgYwMYIiYGYz2VvNs-Nb0VYFbZdA4QXfOJiRFg-xKqmDq09HRt7j8OwTDO7UMm2EiqwjPz7j_JPIvfcCzfCnVVoF3XiZFomuQ" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "8b3aeb1f423d76c5", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:54 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoX_zzYo9AYFb44YmIz49k5z8v8ITzEQsDrSxEE50-7AC-ndg18Yo5DFAZ9ZFCDtT5P7kch3c0-YlkhX-5oYCUIm-mmPmoTg0CKWVI0TJ9la9z9-vE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTEuNTQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJpbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "f32edc9a9263c0a1", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13039" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:54 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:54 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uok5lyOpE3YUZDwRz_bsA-E-xzWeqaApyadumOOFoA_YmOi_xnvFaJr6gfp9Gaktbpud9mv8qDCIvq1Yu7CyAVY0ScanSfNCke3naR_G7kPKYgrywY" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "da73739950dbfa82", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "85" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3216" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:54 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrdpofRnchjkCQz5LLoghzg-RHoGATH3xbgJVW0LKGBkH025w5EB5RlGmHHnmXuPtK_5Ffyl1bxjmoc_h7_vGvjcxWbHv9-NKe9fLO_oNYRmw-ffvU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTQuNzM4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBST0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoibW5HN1RBPT0iLCJldGFnIjoiQ09HZXFOTHgvZUVDRUFJPSJ9" + } + }, + { + "ID": "94e697431c9323b9", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "85" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3216" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:55 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqhr0guIL00afW_qpaRvxEGRSJEWTtfrZP2Tv8Vqt2xas1ivvvMHVbSpT12ltU7FjytFhno9SFXTS8pW1S0S8qJdFctUCa83tkcQRXgN1uPUwhX1ws" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTUuMTMwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBTT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoibW5HN1RBPT0iLCJldGFnIjoiQ09HZXFOTHgvZUVDRUFNPSJ9" + } + }, + { + "ID": "3c5d694690a75054", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "85" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12375" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:55 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UogsU3nh3dfRYWq9YqTmJc7CfCX1U0b-IrWpDCM7M4YwfuUWgzzzcydWdHLrF0UPqRxNFQj3eMHBbDuHYcF0xuUNah4g5J_nt26feHRp-moh7uFXNw" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "e050063f79f64820", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "85" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3216" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:55 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAQ=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq4hs_SY8_2HHPHu8NfVIkOdwmZz7h6XH0nWRSTQJSqzQZQGvv1p83Z6W209h3bXsbN9_ESiU3OIyLRwmEldfBP94hRhN5t1JnagcKPcMEOv8FtvHs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiNCIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTUuODI0WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFRPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBUT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFRPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBUT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoibW5HN1RBPT0iLCJldGFnIjoiQ09HZXFOTHgvZUVDRUFRPSJ9" + } + }, + { + "ID": "f7edef926cdb7d75", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "85" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "13231" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:56 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqyQ9DLrBDGkx7uOkrIKrzMMYA4YygdFsd1si16n2Lr4I5Rfp1lxwadC3jThiJbE-cDyuZATHtvM2bzpsRDPJzbmKgtxYydHykiB-oCsSFqfwy50gw" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "2745d195a40b86f1", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "377" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:57 GMT" + ], + "Etag": [ + "CAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpGeWJRYTNsu57knuRFhwHuoeUMIJhUtuQj4PLhGDoFcITB2Y--7JgaOCVhepJ6a5RRKvU9d3UrAyrtbGNOVHyW2WAbN_KpfkiFhYq8l7HPERpLDjg" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQU09In0=" + } + }, + { + "ID": "5258a70437064146", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "377" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:57 GMT" + ], + "Etag": [ + "CAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqImoOCe_iDYCV8XHpaaEXPn-jeAf0M1MyixGju1tNCSTBIMEKDzh_wxVxapMmXWPnOes7YX0T0Fr663x7eJCl85B0EGXggUgp8bqeDV3cJ-mqeDrY" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQU09In0=" + } + }, + { + "ID": "5cef2c464b7356c8", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12243" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:58 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpH5br-AVWN2g1nkkM_FJWZIjgOgh2NGLuiyYUS7QD2DSs6B-c0IQHgC2NUyy0eZq4McQhwxBGhUJ05-WeD_bX35i7skN7Jn_q3ELAaDwpi6_x3Z8c" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "11cd5140a264e423", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "377" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:58 GMT" + ], + "Etag": [ + "CAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo2_tsxxuZC3Ni-A0zL0E9w8OVne0sWfLi61Cv48eTWj2R4F6G0AmXZU7J8L-WnDJf4s-6GpZV0SyQqrjBA_Mp75pJfWb6cl9_7oimTy8HPnxAtMwc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQU09In0=" + } + }, + { + "ID": "dac6629e58d45469", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "13099" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:58 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrTXUzCa6WOvWD6NqoHhkdwRtG0Z4h0QurHOuKU1gyAWzXOt8lc1NVsLqe-6m8siE76k7l8g0EY3Le273_4GFB81gHMhM4qBOOgbwSBgLl4zKHZspU" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "1c8663192ed534e8", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2370" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:59 GMT" + ], + "Etag": [ + "CAM=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:59 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqfc2TzyVBAwGuVpWWQt7OYr3IQEmVj5R7LIr5LSRAhwFernkoD6TFVnJno_ria4LL8P9tSQgzCF7uJpStJTpYgTOiW5FqTxQ-AgbOjFmAMjah3o6U" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI2J1Y2tldEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9kb21haW4tZ29vZ2xlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvZG9tYWluLWdvb2dsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBTT0ifV19" + } + }, + { + "ID": "51858ae7ea77845a", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2370" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:59 GMT" + ], + "Etag": [ + "CAM=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:59 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqc5V7vaPL-d4aMLJi5QXyxZYWWtGBx0QuvfeB8rFlhwPGPFLQUABxm_XXkR0AOeIqdZ-intSu7I5srWdFKX85aY8r-z4oRTuB-CjGUe5uMiXQzXU4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI2J1Y2tldEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9kb21haW4tZ29vZ2xlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvZG9tYWluLWdvb2dsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBTT0ifV19" + } + }, + { + "ID": "8248d1314da9a068", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12203" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:59 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:59 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqrg4CYWnZtCkMXT5KHCcQXY-aSGc6JuuEoVkTtKgObB9qr-dE-5sMKCrbTfpFudhid3ulDpkNuOUmRmBy4k7QQoRxqgues_a8dB85cOybZKZTM864" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "8c1628782bcf7eaf", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2370" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:00 GMT" + ], + "Etag": [ + "CAM=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:00 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqmer5wTHG4zziRJUR_QqUbX_xgytHZkMg2KFntbVz8pn55RFN7idAyYcqz3AhqthLD-bHH2Lggdg4MuIdjHVk_kKHxvDdqI0p814avmZPYAhJdvsE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI2J1Y2tldEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9kb21haW4tZ29vZ2xlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvZG9tYWluLWdvb2dsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBTT0ifV19" + } + }, + { + "ID": "6813d64060667ba0", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13059" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:00 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:00 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpsvYC9PE_CO5H9MncFfA0uYzQqFBOqu0_zdexcxDTJ22CO_vv1LiRY6ZSC49_FMeUZBA3KdMfOnS7DsSxBpqrLC9ZM6GAs2sFolmZKhp0RWJjvOzs" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "76ac0290041d5a26", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:01 GMT" + ], + "Etag": [ + "CAQ=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur-ejQ-G_PN4CMllngdiEh1SqHQNy8TXBA-htpazVSOadp1oThRsYsQuflX2kwkDQHFpeo1UgDPXqi5BRq8qZUBsM2koJIMOp-APWnB8T_qsTHlY30" + ] + }, + "Body": "" + } + }, + { + "ID": "0de0d173f3f5c2cc", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "11631" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:01 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:01 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqLOtWcLW6yZk8n72e3qkdl4ft2-s7FJFYyRv_5REkk9Q6d7qk6Pcpcy_HMCJFgKTV5RZzzIU3k5hceknIV1XNGVWK7gOCFimyeb8sFjNZC7y7rypI" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.scope, message=null, unnamedArguments=[]}, location=entity.resource_id.scope, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" + } + }, + { + "ID": "eb2af5bce5b9e1df", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12243" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:02 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:02 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqgaldc1OJvgmfZxcZs4i5d6EXMyFK93ZUJLzSK8Vo-qPYKRwhvPzK8GFv3gqrdyjBbvdc2Uz57nCwy10LhC5AD2pscln8VbaQIOAHqxvkwsqV27Rw" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "ad55ad01147a8c62", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "11631" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:02 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:02 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoQpkkPNbb1CGY8lAzUfymkmZ7PNlRK6hK3deBeoMip13ARTb212DIgf4lgUDM8PVQsAqcnKwzNulbZICoGzHwrsBZfyi7p5TGX2yJHLXSAGEY0MBE" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.scope, message=null, unnamedArguments=[]}, location=entity.resource_id.scope, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" + } + }, + { + "ID": "1ef9ea9b9bc398a6", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13099" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:03 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:03 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur5WrZwygTHM0dDH6L1kypZzaAQtCwxt9wA7VXvgnjwrfwdRnTw0D7K3VLYLRTPDcf8mRFn5CetkcimFxfqUDoWLMF7bH1TLiBr38K6aBU74aRWUd4" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "5c1cadcffc6c95e0", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "119" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:04 GMT" + ], + "Etag": [ + "CAU=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo_n__u-lZKgZqveqge9YBw7wQMldCd4t0io7l_L_nlcF4DSPBPvZd9jGPCstE1EYuQS9L-Z3Y53Q15FlqM1IcmRDv6bYEJQWqkpuzCRRa_TLh9P7A" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQVU9In0=" + } + }, + { + "ID": "4a615256ab8c55b4", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "119" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:04 GMT" + ], + "Etag": [ + "CAU=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpwGIlBhvugAIHxiObS0RPpJ40OY_R3exJfPh5IHLLB3SrdYtGMpD01QLdxc3E0IPGkBiTrwM-Z2X1gMcxuOqJWJUrhV4xTFe_n5G8bBxzievz39F4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQVU9In0=" + } + }, + { + "ID": "63c0acd95aa571a1", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12243" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqNu5DFt-lpr-_4YX8LRIhoi3KVRYVgFAlG7WOPsY7IGJ2NLZSF3SQhmkvfL_IKZ0FWIwx1nGT6HBBe9t2RYXNE1kXis-9KgEaAWhrEByTud5QGx5c" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "3c6b1be88134f9d9", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "119" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:05 GMT" + ], + "Etag": [ + "CAU=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqzTVWCTbjlM7VCFWFbOojrtt8ZMY9bRy0eXj0Uxj3gaYalvwqR0WBrtbX0iGQ-aN5pv3-Acc7EWIoHBa37YbMpx9fuoJUpTswmdpiCOH3wbMN7tIk" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQVU9In0=" + } + }, + { + "ID": "60cde48b740e9a63", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "13099" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:05 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrLyiKgPsp1RDgH3Q2SGlQAwTvGJwVDvb9U9FfvTysIFo6sO-5AoXrGPQfZpr80aMOEKojmPksURjhsteWSV9gdPsOlPIJMorxvQMHeomMCh-Bgmuo" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "95b915570e7c2375", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "684" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:05 GMT" + ], + "Etag": [ + "CAU=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:05 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrOZjNI5VbLPTLuBwhSAdcgxvqgcFpH-ED4XyEgIbnndmtC_zJ0lkLfudymSRpdRE4NBxn1Vp-AmZnb1ONsP2I1GBnLYfpYRbzFQpmihPrgBkDnaB4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBVT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBVT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBVT0ifV19" + } + }, + { + "ID": "e4170c637b705043", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "684" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:05 GMT" + ], + "Etag": [ + "CAU=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:05 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqXQonYrNQZxcXoDllGz19Ruu-Eq1_3FVjMndISB0h1O1ojodwFr2Xor2nZHBZgv3NN_YOSrunnT_KYTfBFNdqtVvqVC2L19Qhwk-cxse_FqrMVbIM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBVT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBVT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBVT0ifV19" + } + }, + { + "ID": "a3918deffd2f8f0f", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12203" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:06 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:06 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpCCE2tr_4iWFlwmQkFVvzfskSWvZdvohQYV52oXF-tWUfie6Brp5TYrvUYx7jH8zsTg9qqk_kv0th8pOA1WsHahBUbDYuaqAmW0EpSJI6_xjBPdZ8" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "42a941620757dac5", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "684" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:06 GMT" + ], + "Etag": [ + "CAU=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:06 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uoqov2civPKG8oSKmpVgUSoOD4W3rev4av_O6jgxkc5tSpPMgtxZeG3NznVRuFJLgClao2zPH8BvZIxTAAPUi_R-H0zgPz6ou8k9SMzLrcgW_HW100" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBVT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBVT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBVT0ifV19" + } + }, + { + "ID": "ae79f2e22d58681a", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13059" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:06 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:06 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqP_EhJin_8uqVkiSwAv0jBAI8A0o7nSwNAQ-Z_EdAnq2PItIGJZl5NOE_xcDTFce_ncHfsf6LvV4NMvuxiOsl32BOddZj_6x5dC36QqrKvhXKaQJ0" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "99becaf4890b3c21", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:08 GMT" + ], + "Etag": [ + "CAY=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpWMu40YNkuE3QcNByTmrKw_cnq4a8DVtcknBJ7gTnLdH1BC6CqoFOStPpa6y4PMmdpLpquozlOzvI4NWjgZYp1d9F_wBEyaq-wquTQAWGXNy3qktY" + ] + }, + "Body": "" + } + }, + { + "ID": "586ccc26d5a22712", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "11631" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:08 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:08 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpsRO9GedosQ-kg2lCQpx2S6C0-HH592CdB3SU9yGMwEEGKX9md7KGeSDhecrm_6Ug6tyYtEMlsMq08YBAKGprXfcN9130mRolu_HVey3CcrXK1nZ0" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.scope, message=null, unnamedArguments=[]}, location=entity.resource_id.scope, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" + } + }, + { + "ID": "fad4bdab588e0f3a", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12243" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:08 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:08 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoUefKoF0asUvxzmAtX5z5m_5FRy9UfliKkjQN8cYyrHAotGwiQA72QN0lYfA0HXbe7gFRokwBAY8R5OpHcEdRq5K9sw8AwHuNtruFtBMxjN0D3hR4" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "23bb9ce53da2f525", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "11631" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:09 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:09 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Upa2KE5JJOXq2FOn7RHasJpOeeHqWU2kk3BC4cQcGQ9jGzh6XIksN6Z6u1NVoCbxW7-3zGiZMdAop-oa_EuyPAauD__JgzNPdngeNeXO7lsGaVI6i8" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.scope, message=null, unnamedArguments=[]}, location=entity.resource_id.scope, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" + } + }, + { + "ID": "46482e47e655836d", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13099" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:09 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:09 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Urze42blCAS2Dskbf5jkbCkdo5IhfOjcTlcufV1ooZd5x_QD0zK1gRoXLXfvLjKHEKaJ7cuDeSRmNjuqgEoxJFMmXUoPOIetWLYbH0G-lJcM6tydew" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "856a1fb5ffdb000f", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "463" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:09 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAU=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoFSnYuPlv_JejZ0nEjoWRXeLwj0MS-ddTlIaSOjaK5BudyIcIOeVDEeyFYWsF-GCZzMC0F7v-UEw0_lbsYBXCMZcLp6WZu_s3OPDREhpz2s3Txw-s" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBVT0ifQ==" + } + }, + { + "ID": "a74c3e1b541a9ffc", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "463" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:10 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAU=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ury3MiMZho3xaSHmFgwHNvKIHTa0_gZzfKvX8FW6iVGNkdSbXvmVypIUuIoKXDjd4rOXpFghdoJlES9A_iXUvZyfWAZLvWqR1w6sDRymrIARVd0dPE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBVT0ifQ==" + } + }, + { + "ID": "913da4fa24ea5f1f", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12243" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:10 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoKAxObyNBS_Mew44t3k1u80cLBGQW_I2ohrF96rIjZRxFscjGFrvsOHfLxw78rTRnpHmfA0uXnNz8T2FPc_Op16Lnn1YkfyOezSEhDBx11qZbinEE" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "5dbdf24cae75e565", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "463" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:10 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAU=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpoJgRZ665WjiUif7SnQ-Vg4B0kJl-UQ_kIk5XgmcQIL5zmCBp3NCxjksybkvNKpGcQnIJSLevfyWd2fugRv5y5vxmEBgqFffIy8ibTIyBDH4b7R74" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBVT0ifQ==" + } + }, + { + "ID": "f41892a142fe6d2b", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "13099" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:11 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpThInLTHhtepE7J4k-Rg6OoLRyOqnjPz4sZmfIH6wp5TSIyLrZQhExKAP52XbwUwMYp7x-xypSMY5kfR66I-dmpRWaFkfs0Lhy-Z6kqb5JUXSppqk" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "5eb0710be2571dab", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2800" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:11 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAU=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:11 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpbetUCGwYg4rhIR-DP3ntPqbkFwSqutNltVR55I4qedP-rZ5hC7ospMy6hZ4aQaNOhNv0frmst8MQZ0j6Mp8BcTC6qy7mU8bhEAiMGLycR5XxUWfI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFVPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoidXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ09HZXFOTHgvZUVDRUFVPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL2RvbWFpbi1nb29nbGUuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9kb21haW4tZ29vZ2xlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In1dfQ==" + } + }, + { + "ID": "56aaafb0a24b0644", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2800" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:11 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAU=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:11 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrqQczDm-dzjgTlLFcJ2lXQrMTpQ_8UP-1hNbGj7eZEgZcbxWA-oyXqEe4tJXq3gYdZ3mEIcAzaDtNeBHWidpIev7kdXWank04_JSWbQtU2UomJqeU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFVPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoidXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ09HZXFOTHgvZUVDRUFVPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL2RvbWFpbi1nb29nbGUuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9kb21haW4tZ29vZ2xlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In1dfQ==" + } + }, + { + "ID": "3bbc40d778939612", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12203" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur5kJWpLwxz76BKPll75-nOsLv_uoh7X-TW0SbNHCYEzkGq8VZ-W8a5tKGUF6rKea4ejr4MhcqJVr1Zu7O-zNAfQWJMsHC9CTkOrPa5Iu1s0KBJDdY" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "a2868e9e376e356b", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2800" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAU=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoyZfAJB3TroGxzYv0gy7dHUwpU0jtIeU_EyQSFQKzHlHbyRa33r_a-B1aHw5tHaoawuDvv7skisdx10yhmm9ZRrPw0ibwpMkX5Cwhf0OK85DF84jM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFVPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoidXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ09HZXFOTHgvZUVDRUFVPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL2RvbWFpbi1nb29nbGUuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9kb21haW4tZ29vZ2xlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In1dfQ==" + } + }, + { + "ID": "ffb216dc74ccc27e", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13059" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqBumxgw_FTJlt7J3xLsBDxljAcEyC2G5aQqhEibawMttB-ogPabqDfz8ZN65WtOikIggZQ8OqW8zigV4Q5vfQkog7NMasAuivcMjhAsgqv_6qWv_E" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "183157f40c6c1bcd", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:13 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAY=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrC6xxVQeIIjitNwDv5PhDUIBQbtGdHJDi_AmLs8vTbGzkby-5hguMIwI_UeHlCuGF1u5jBoVwCOgSqJBiJJ5fxh8oCzBn-nHGcGAdIOeQt7gTL0RQ" + ] + }, + "Body": "" + } + }, + { + "ID": "4b6b925053570053", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "11631" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:13 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:13 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpiIBwh04pJH1-X9d1oJluF_2ZCJEFLzF4E7SI-easf2noIolxavCbpH7NP9GMmNXnVK_ZJm4kW8coV5ArXDr-Rwodjnd9Bn2hFiJ_puNeWWldJ6mA" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.scope, message=null, unnamedArguments=[]}, location=entity.resource_id.scope, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" + } + }, + { + "ID": "9b4f9c448170e89d", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12243" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:14 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:14 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqHc1VVBdvXFq9aDPAprpXNP5xPt-ZtPdYXwCQPHX_s4gkCMom31AtHeDocV_hker5qryWoYnRD8PPu5c7JqDNQ75v7GgE1vO5xAt7AqWPoLU5MO5A" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "692d41cf9ea91eb2", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "11631" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:14 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:14 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpuFB_mdJg9Fd7Sj06Xdcrj7Up2xXVc_P-SgISEEtiv4v9doPXzNbzBz92Q2UfEchYWSQnikHhS34dUAOefj9J4vaBYqjc9RjFmrn8YcPNyS8aWKgI" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.scope, message=null, unnamedArguments=[]}, location=entity.resource_id.scope, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" + } + }, + { + "ID": "db035a3cc51de007", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13099" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:14 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:14 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrGb7BqO44vNpZjBnLrSI6_jVCVRFevSqm5eux2DIE2zmUY3sH0T0st7OOsLPcBjvLSbDfijMcdJwsgpM1fQGcerB9bS8Ffd4Qgzdjth0I8FreV4Vs" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "e1d95794cb6be989", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/rewriteTo/b/go-integration-test-20190502-80633403432013-0003/o/copy?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3273" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:15 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoVcH2L0r9wGab--fnRuKptLRoUoHCUHfRegh3rKNQExHVsYLnt1NFwOKFa_6hRbeYXO1aY8-F-KiD6IY3sWykIKXr5iqelEJHTWEMQ8wHuzIcWLO8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiNSIsIm9iamVjdFNpemUiOiI1IiwiZG9uZSI6dHJ1ZSwicmVzb3VyY2UiOnsia2luZCI6InN0b3JhZ2Ujb2JqZWN0IiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1MjY2MjE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29weSIsIm5hbWUiOiJjb3B5IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MTUyNjYyMTQiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTUuMjY1WiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE1LjI2NVoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxNS4yNjVaIiwic2l6ZSI6IjUiLCJtZDVIYXNoIjoiWFVGQUtyeExLbmE1Y1oyUkVCZkZrZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2NvcHk/Z2VuZXJhdGlvbj0xNTU2ODM1OTE1MjY2MjE0JmFsdD1tZWRpYSIsImNvbnRlbnRMYW5ndWFnZSI6ImVuIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1MjY2MjE0L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2NvcHkvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImNvcHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNTI2NjIxNCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDS2J4ejkzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1MjY2MjE0L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb3B5L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiY29weSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE1MjY2MjE0IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0tieHo5M3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2NvcHkvMTU1NjgzNTkxNTI2NjIxNC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29weS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImNvcHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNTI2NjIxNCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDS2J4ejkzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1MjY2MjE0L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb3B5L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiY29weSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE1MjY2MjE0IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0tieHo5M3gvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJtbkc3VEE9PSIsImV0YWciOiJDS2J4ejkzeC9lRUNFQUU9In19" + } + }, + { + "ID": "297f13e741cceb04", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/rewriteTo/b/go-integration-test-20190502-80633403432013-0003/o/copy?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3273" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:15 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UroLXQs6945-UOUyN7qOhICUNnGSzCVrNCv5DD9uMyvgD08zpuufBZj8WM8Fxe0DuOuQowqbPEn1UdJdbFTW7puyeY4zX8fM6UgT7SZIVB0Xi-B0OM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiNSIsIm9iamVjdFNpemUiOiI1IiwiZG9uZSI6dHJ1ZSwicmVzb3VyY2UiOnsia2luZCI6InN0b3JhZ2Ujb2JqZWN0IiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1ODQxOTk3Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29weSIsIm5hbWUiOiJjb3B5IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MTU4NDE5OTciLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTUuODQxWiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE1Ljg0MVoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxNS44NDFaIiwic2l6ZSI6IjUiLCJtZDVIYXNoIjoiWFVGQUtyeExLbmE1Y1oyUkVCZkZrZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2NvcHk/Z2VuZXJhdGlvbj0xNTU2ODM1OTE1ODQxOTk3JmFsdD1tZWRpYSIsImNvbnRlbnRMYW5ndWFnZSI6ImVuIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1ODQxOTk3L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2NvcHkvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImNvcHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNTg0MTk5NyIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTTJEODkzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1ODQxOTk3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb3B5L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiY29weSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE1ODQxOTk3IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ00yRDg5M3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2NvcHkvMTU1NjgzNTkxNTg0MTk5Ny9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29weS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImNvcHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNTg0MTk5NyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTTJEODkzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1ODQxOTk3L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb3B5L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiY29weSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE1ODQxOTk3IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ00yRDg5M3gvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJtbkc3VEE9PSIsImV0YWciOiJDTTJEODkzeC9lRUNFQUU9In19" + } + }, + { + "ID": "9617f04d2dc95a7c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/rewriteTo/b/go-integration-test-20190502-80633403432013-0003/o/copy?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12683" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:16 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqeJiJeDLLZgn9vlRyNk1plY-7dcPXaz47gPNAhMqmSx5ZMvg6pqWj18PVUR65UGuWE7Amtua_zqsv0dVyDZE2FFceMeIQcQ2n6noklvRS9kxPXcEU" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "2309e069a65c894f", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/rewriteTo/b/go-integration-test-20190502-80633403432013-0003/o/copy?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3333" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:16 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpxhvVZmlJMvR93BoI0JsL0TYbDN-kmlYHQOOlRfb-Nnqpc8iUImRtJT8Qvx0XWrOsALmLv4Oub_aeCCWkHPvqaF28oAazlhqRZYoxrPlaQlW0N1Qs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiNSIsIm9iamVjdFNpemUiOiI1IiwiZG9uZSI6dHJ1ZSwicmVzb3VyY2UiOnsia2luZCI6InN0b3JhZ2Ujb2JqZWN0IiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE2NjM3MTY1Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29weSIsIm5hbWUiOiJjb3B5IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MTY2MzcxNjUiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTYuNjM2WiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE2LjYzNloiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxNi42MzZaIiwic2l6ZSI6IjUiLCJtZDVIYXNoIjoiWFVGQUtyeExLbmE1Y1oyUkVCZkZrZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2NvcHk/Z2VuZXJhdGlvbj0xNTU2ODM1OTE2NjM3MTY1JmFsdD1tZWRpYSIsImNvbnRlbnRMYW5ndWFnZSI6ImVuIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE2NjM3MTY1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2NvcHkvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImNvcHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNjYzNzE2NSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTzNIbzk3eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE2NjM3MTY1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb3B5L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiY29weSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE2NjM3MTY1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ08zSG85N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2NvcHkvMTU1NjgzNTkxNjYzNzE2NS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29weS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImNvcHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNjYzNzE2NSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTzNIbzk3eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE2NjM3MTY1L3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb3B5L2FjbC91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiY29weSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE2NjM3MTY1IiwiZW50aXR5IjoidXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ08zSG85N3gvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJtbkc3VEE9PSIsImV0YWciOiJDTzNIbzk3eC9lRUNFQUU9In19" + } + }, + { + "ID": "d2fe101ea1da654d", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/rewriteTo/b/go-integration-test-20190502-80633403432013-0003/o/copy?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "13539" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:16 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqjFigs6aYtqXW_7VucOIgJY2MQZwuLv9B12s2ffhWgWDxtJyjLinG4A8U9gKBCpiTt9jUvdHKtaG20qZfyfN9Q1eVAkUQh_zv7ESbqnWhsiheI7zw" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "6620f07aff04b6fd", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/compose/compose?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "127" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImZvbyJ9LHsibmFtZSI6ImNvcHkifV19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "742" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:17 GMT" + ], + "Etag": [ + "CP/B0t7x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpyWMaddvNz0IWnQ6KIl-tv6fJZCvrqTlGuaoHvKqXhxb3O1EKUIEirKSVcaedFZyxtjLhPD7Nj6qf1CRGWG1VnVigo5Wf3sYFDuuLvctxVpoHT0rY" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9jb21wb3NlLzE1NTY4MzU5MTc0MDY0NjMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb21wb3NlIiwibmFtZSI6ImNvbXBvc2UiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNzQwNjQ2MyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxNy40MDZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTcuNDA2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE3LjQwNloiLCJzaXplIjoiMTAiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29tcG9zZT9nZW5lcmF0aW9uPTE1NTY4MzU5MTc0MDY0NjMmYWx0PW1lZGlhIiwiY3JjMzJjIjoiL1JDT2dnPT0iLCJjb21wb25lbnRDb3VudCI6MiwiZXRhZyI6IkNQL0IwdDd4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "7f88847ad9c49799", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/compose/compose?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "127" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImZvbyJ9LHsibmFtZSI6ImNvcHkifV19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "742" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:18 GMT" + ], + "Etag": [ + "CKav+N7x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Urtn69BB0ytbCZpuCbus4w__tiLeBpqeNzD2gKl5KN2LgP9ZJOIjudGKpDOJRhyW_WQjVOCJBC1CSB_AgE0MdPzVDAKWt13VfQ8aKe3pMWh4y1kOlI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9jb21wb3NlLzE1NTY4MzU5MTgwMjY2NjIiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb21wb3NlIiwibmFtZSI6ImNvbXBvc2UiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxODAyNjY2MiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxOC4wMjZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTguMDI2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE4LjAyNloiLCJzaXplIjoiMTAiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29tcG9zZT9nZW5lcmF0aW9uPTE1NTY4MzU5MTgwMjY2NjImYWx0PW1lZGlhIiwiY3JjMzJjIjoiL1JDT2dnPT0iLCJjb21wb25lbnRDb3VudCI6MiwiZXRhZyI6IkNLYXYrTjd4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "a21f3ddca57ed424", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/compose/compose?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "127" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImZvbyJ9LHsibmFtZSI6ImNvcHkifV19Cg==" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12267" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:18 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrE9jrLJ0W1PUHWT3ldYhVsL9nNhAeFPU2K8g2rCMimty07QlN95esxj7dBAySQ-nzvK17L3WA7vjmxjCpeQADa2bztj6ke8NZjb8fuWkQHsnnzKXA" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "08fda3fa173e6e11", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/compose/compose?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "127" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImZvbyJ9LHsibmFtZSI6ImNvcHkifV19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "742" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:19 GMT" + ], + "Etag": [ + "CKy2sd/x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqfGXVJ2-MQaZWJ-JS6runpSycA3GDKIXYHDGf3VBOXnADbUzL2YhZVmlQcW_95jXY0eMkK4Z5tSRk3cxGP_84T_1t3YDYQS2p37ZouBk7GfX-4xxw" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9jb21wb3NlLzE1NTY4MzU5MTg5NjE0NTIiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb21wb3NlIiwibmFtZSI6ImNvbXBvc2UiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxODk2MTQ1MiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxOC45NjFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTguOTYxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE4Ljk2MVoiLCJzaXplIjoiMTAiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29tcG9zZT9nZW5lcmF0aW9uPTE1NTY4MzU5MTg5NjE0NTImYWx0PW1lZGlhIiwiY3JjMzJjIjoiL1JDT2dnPT0iLCJjb21wb25lbnRDb3VudCI6MiwiZXRhZyI6IkNLeTJzZC94L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "0c9e19bd4ec37de2", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/compose/compose?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "127" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImZvbyJ9LHsibmFtZSI6ImNvcHkifV19Cg==" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "13123" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:19 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqGREJ2AZDcrZiJzPX5ltwlB28vRYhZ_xv4LMIO3La2IgFWk4wd5IVrqBs3z2epgtg7wkPpZm4qPH9zTzSIsXNxdLLHcNMMYr2azidefxtJVgXqgDY" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "84daee26f7d989ce", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJuYW1lIjoiZm9vIn0K", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3112" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:19 GMT" + ], + "Etag": [ + "CLmF4d/x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoLuw-z5wwTKy-X92_i7pyTrimolUNRUGrElhep6rUH_mmpgu_Vxp-1ncr8x8XNXHLs8NFRW5S7WVUsaDRRTrC5KBPea4EoFLANK8MFY0FpTkXr7_g" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkxOTc0MTYyNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxOTc0MTYyNSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxOS43NDFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTkuNzQxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE5Ljc0MVoiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTkxOTc0MTYyNSZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkxOTc0MTYyNS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE5NzQxNjI1IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNMbUY0ZC94L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkxOTc0MTYyNS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MTk3NDE2MjUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTG1GNGQveC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU5MTk3NDE2MjUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE5NzQxNjI1IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNMbUY0ZC94L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkxOTc0MTYyNS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MTk3NDE2MjUiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTG1GNGQveC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNMbUY0ZC94L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "8457a5701f4ced08", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:20 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpKF1M6PpAT354ON3809wMCor-B9KITRuEV34MQczP3OD4Co0sJYtNSub2FUzA8qanTDrtANRK0RuxPTz314o3HQro2HLOoyKZBow5CZ4sfsunKHv0" + ] + }, + "Body": "" + } + }, + { + "ID": "1c2ba8a7670e2218", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJuYW1lIjoiZm9vIn0K", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3112" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:20 GMT" + ], + "Etag": [ + "CP+SgODx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uoq9hgjjwBbSH1zXN2UlNtWO31Qf5uk4ijjGCQlETB9_5H_CouvU3q4tyHAKEbIBlaW1_mNk3Fsp6LgXqsPPKvFzs3-WHeADAgc2-GpD-0inoYUHMs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDI1MTI2MyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkyMDI1MTI2MyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyMC4yNTBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MjAuMjUwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjIwLjI1MFoiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTkyMDI1MTI2MyZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDI1MTI2My9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIwMjUxMjYzIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNQK1NnT0R4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDI1MTI2My9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjAyNTEyNjMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDUCtTZ09EeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU5MjAyNTEyNjMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIwMjUxMjYzIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNQK1NnT0R4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDI1MTI2My91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjAyNTEyNjMiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDUCtTZ09EeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNQK1NnT0R4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "7e6d2f97bd881584", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:20 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UohQH2nTuRg_FtGhlfr9zC26C6lKFA-aZTf4GbiV5kWZCJ9E4YLHfIUKLpCLIgEfX9TAvi4oPNWfEBpbY-orlescLxrxYcB1U5fLmzzHQR2R0teR-s" + ] + }, + "Body": "" + } + }, + { + "ID": "48e189abff98ea93", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJuYW1lIjoiZm9vIn0K", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3112" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:21 GMT" + ], + "Etag": [ + "CM3Aq+Dx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq_NscG9s3iq-8NZq8vQhzdc9FJMaAfq_0GborrX84b8gG2zpbUNtEVnrjFjLyOXqBeJwthPgPUPcsGbIcD597LaPlxt-VNaetM2BLtMtYry8pPeQ8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDk2MTYxMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkyMDk2MTYxMyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyMC45NjFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MjAuOTYxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjIwLjk2MVoiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTkyMDk2MTYxMyZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDk2MTYxMy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIwOTYxNjEzIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNNM0FxK0R4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDk2MTYxMy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjA5NjE2MTMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTTNBcStEeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU5MjA5NjE2MTMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIwOTYxNjEzIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNNM0FxK0R4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDk2MTYxMy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjA5NjE2MTMiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTTNBcStEeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNNM0FxK0R4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "02d6922c26c06cbd", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12243" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:21 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:21 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrmjSJHhErkbzi8feJ_-8XOMqsIzGrEe6frQzWXQ7dPl0D8MW1mMHClKdwb2zBOTvpx7mXQWy2STZYSqx2o7O1Tr2DB1RZfwCZ3xp1jgw3NCICi7Xw" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "3fee93fca0ef933c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJuYW1lIjoiZm9vIn0K", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3112" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:21 GMT" + ], + "Etag": [ + "CLfH1eDx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqLO91a0iSaTsK7EA2iN621UX-atvJ-8m8BeqeR8_o_CQLB8Pco5FMXznwQY7DZ8u0GPd1BvLE98s5S7T1Z6yL66OKL0CFFUntLBqNAKlji2Dcme6g" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMTY1MDYxNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkyMTY1MDYxNSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyMS42NTBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MjEuNjUwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjIxLjY1MFoiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTkyMTY1MDYxNSZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMTY1MDYxNS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIxNjUwNjE1IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNMZkgxZUR4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMTY1MDYxNS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjE2NTA2MTUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTGZIMWVEeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU5MjE2NTA2MTUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIxNjUwNjE1IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNMZkgxZUR4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMTY1MDYxNS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjE2NTA2MTUiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTGZIMWVEeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNMZkgxZUR4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "5c26cf93373a296a", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:22 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Urx3fvI3sQc5JiIazK1OQw5W12poVdjRyBKxIBM6N98jEJli9F0O33KVHHcHaP-QybHub31QmLNngZpzcVKJq33DSaVMW2Vpi5BUaVAAUL2jMoyOxs" + ] + }, + "Body": "" + } + }, + { + "ID": "f8b5020ef71e4047", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJuYW1lIjoiZm9vIn0K", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3112" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:22 GMT" + ], + "Etag": [ + "CIungeHx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq8QEz45d-uPa7qZ6Nl_SMqw2iepQXoM4aAkTx4YCqArXnSKI1UO3ZhJ5-_PDg3Uq6PPa2JDBTbxckiZCRMQvf0Cv6E_8Cwk3jKXGkneWFJYSeMAts" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMjM2NzM3MSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkyMjM2NzM3MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyMi4zNjZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MjIuMzY2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjIyLjM2NloiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTkyMjM2NzM3MSZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMjM2NzM3MS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIyMzY3MzcxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNJdW5nZUh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMjM2NzM3MS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjIzNjczNzEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSXVuZ2VIeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU5MjIzNjczNzEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIyMzY3MzcxIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNJdW5nZUh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMjM2NzM3MS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjIzNjczNzEiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSXVuZ2VIeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNJdW5nZUh4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "7cfadd8c1e7c0bd8", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13099" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:22 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:22 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpjVnPaklKmbO8qV2lapwEIAwPjvf4HSJs3kNHHO_xZH4bV65pVPLKCb2qjMPCYSwEKRZzH6NpYjm6WlxsBdJnfBvpyRpIR017q9iJDyBcVPNUoDn8" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "b1381b9995cf35f5", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:23 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrD1hmfKK3yAwFXKqIwvCsEVMb_e7MWysJc7zbyVjZ_sbD9P41lzc0gYSef9yl8CRa6B0RtPq14V5MBSdy6C0i_cAkgANDN1da3O5QKELvACU11moU" + ] + }, + "Body": "" + } + }, + { + "ID": "5e06ccad06bbd110", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/copy?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:23 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur2FRcf4QGZH2KhkP3kATb9eOy4S9lgrA3NQXYqhc1KBZDbcZN5NHF4MB6WiTxOraNE0HhkW3SOg8sp1gS4kAcWRZ2G0EGWmLgL6RUcwr3a2O-WY10" + ] + }, + "Body": "" + } + }, + { + "ID": "b637ccaa36faa55b", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/compose?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:23 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqGzSAdUK8pPs53JjaSJVQJObMJRsfQ5FIpSL0D4tNfft68cfE8Xb5fjSsAQG_PfmKZyX1Ind-s6dW6FgwREeHzu_OnEeOwCOaT8EZi2N4vNeWLvac" + ] + }, + "Body": "" + } + }, + { + "ID": "15055c361faad5c1", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:24 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqwmXVRTmXX3P5iId4-EjuA1oviIQD-Vbdr7M5rgd7d5IxNvmPVkZz_zFyMBNoHd_DzXnPMN5R7grAe1yDdOzbyafIBwRcFsLh4r0a7ghp4LkNSLZk" + ] + }, + "Body": "" + } + }, + { + "ID": "307f8a44d4ce6e9f", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/notificationConfigs?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "32" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:24 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:24 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpaHmcEJBKGI7CtW9fK8-j5Z-oRk_dTwMS40wVaoNH1PN7k6x6d_9aD3w3ce6WGZJF5OYxkdoifp6L2vVz5lIGjzcmmIZ4Wxp1vktBOv8hLa417Mp4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNub3RpZmljYXRpb25zIn0=" + } + }, + { + "ID": "f8df88961018be68", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/notificationConfigs?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "121" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJwYXlsb2FkX2Zvcm1hdCI6Ik5PTkUiLCJ0b3BpYyI6Ii8vcHVic3ViLmdvb2dsZWFwaXMuY29tL3Byb2plY3RzL2Rla2xlcmstc2FuZGJveC90b3BpY3MvZ28tc3RvcmFnZS1ub3RpZmljYXRpb24tdGVzdCJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "297" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:24 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqWiAA6LvnU4rExe2rVTwwFd51SI93o_d2tOj9OY1jk_qe1Yd1eiTXIa2eNecJQsmgwuFr4BUQoWy8ndutrCqGucmVIRL0jA6-i-vaGGVRF8uEe0AM" + ] + }, + "Body": "eyJpZCI6IjExIiwidG9waWMiOiIvL3B1YnN1Yi5nb29nbGVhcGlzLmNvbS9wcm9qZWN0cy9kZWtsZXJrLXNhbmRib3gvdG9waWNzL2dvLXN0b3JhZ2Utbm90aWZpY2F0aW9uLXRlc3QiLCJwYXlsb2FkX2Zvcm1hdCI6Ik5PTkUiLCJldGFnIjoiMTEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvbm90aWZpY2F0aW9uQ29uZmlncy8xMSIsImtpbmQiOiJzdG9yYWdlI25vdGlmaWNhdGlvbiJ9" + } + }, + { + "ID": "45f44fa6c3668273", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/notificationConfigs?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "340" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:25 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:25 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoFWgB9V-ilyDIBP7KEF3SY7uTg-rikYE1ooEgWYkiXb2cW6oOwwtH1W7oe24WU2A9oyJIGOSskz3_icHqmMn7CfPoZQ-Lu5QdtZzv_5062_-UzAt0" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNub3RpZmljYXRpb25zIiwiaXRlbXMiOlt7ImlkIjoiMTEiLCJ0b3BpYyI6Ii8vcHVic3ViLmdvb2dsZWFwaXMuY29tL3Byb2plY3RzL2Rla2xlcmstc2FuZGJveC90b3BpY3MvZ28tc3RvcmFnZS1ub3RpZmljYXRpb24tdGVzdCIsInBheWxvYWRfZm9ybWF0IjoiTk9ORSIsImV0YWciOiIxMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9ub3RpZmljYXRpb25Db25maWdzLzExIiwia2luZCI6InN0b3JhZ2Ujbm90aWZpY2F0aW9uIn1dfQ==" + } + }, + { + "ID": "08c2546e2c262ee7", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/notificationConfigs/11?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:25 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoTmXr5TZZPRnABz_XbbZpjwU3iPz2e802RkcmNFr3sLzUx205tdmAb6vVWMNlYMUGZ5tGoddlqQ_oPnz0Xdvpz8CiD2eDLMDyrMbxVFjc3BinwBo0" + ] + }, + "Body": "" + } + }, + { + "ID": "efc8b67f9c2f4b40", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/notificationConfigs?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "32" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:25 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:25 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UraMBYT56qFvKLe_OPJfpJRhTy2qSpQyRjP6CScOnnijgaQ88eQtVahktX_Vsvy-G7J11d7GNT64FnpqZrewu1sUmuNNLiEtmkqVFILdNcWed6i1w4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNub3RpZmljYXRpb25zIn0=" + } + }, + { + "ID": "6dd92f9ce3b15a57", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Length": [ + "7903" + ], + "Content-Type": [ + "application/octet-stream" + ], + "Date": [ + "Thu, 02 May 2019 22:25:25 GMT" + ], + "Etag": [ + "\"7a5fd4743bd647485f88496fadb05c51\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:25:25 GMT" + ], + "Last-Modified": [ + "Tue, 04 Oct 2016 16:42:07 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Generation": [ + "1475599327662000" + ], + "X-Goog-Hash": [ + "crc32c=PWBt8g==", + "md5=el/UdDvWR0hfiElvrbBcUQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "7903" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpedRCcxIOhXCWdipyV2E0R3z00CUlOK6rPlof1gpKuQbLeJmvMoPFn28o8zqmmeVJ5rbX41bB6Hp116-_ISgEXl4Htmc1VS0Aq41lJQiN_mIvozbY" + ] + }, + "Body": "GROUP = L1_METADATA_FILE
  GROUP = METADATA_FILE_INFO
    ORIGIN = "Image courtesy of the U.S. Geological Survey"
    REQUEST_ID = "0701609191051_00004"
    LANDSAT_SCENE_ID = "LC80440342016259LGN00"
    FILE_DATE = 2016-09-20T03:13:02Z
    STATION_ID = "LGN"
    PROCESSING_SOFTWARE_VERSION = "LPGS_2.6.2"
  END_GROUP = METADATA_FILE_INFO
  GROUP = PRODUCT_METADATA
    DATA_TYPE = "L1T"
    ELEVATION_SOURCE = "GLS2000"
    OUTPUT_FORMAT = "GEOTIFF"
    SPACECRAFT_ID = "LANDSAT_8"
    SENSOR_ID = "OLI_TIRS"
    WRS_PATH = 44
    WRS_ROW = 34
    NADIR_OFFNADIR = "NADIR"
    TARGET_WRS_PATH = 44
    TARGET_WRS_ROW = 34
    DATE_ACQUIRED = 2016-09-15
    SCENE_CENTER_TIME = "18:46:18.6867380Z"
    CORNER_UL_LAT_PRODUCT = 38.52819
    CORNER_UL_LON_PRODUCT = -123.40843
    CORNER_UR_LAT_PRODUCT = 38.50765
    CORNER_UR_LON_PRODUCT = -120.76933
    CORNER_LL_LAT_PRODUCT = 36.41633
    CORNER_LL_LON_PRODUCT = -123.39709
    CORNER_LR_LAT_PRODUCT = 36.39729
    CORNER_LR_LON_PRODUCT = -120.83117
    CORNER_UL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_UL_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_UR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_UR_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_LL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_LL_PROJECTION_Y_PRODUCT = 4030200.000
    CORNER_LR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_LR_PROJECTION_Y_PRODUCT = 4030200.000
    PANCHROMATIC_LINES = 15621
    PANCHROMATIC_SAMPLES = 15341
    REFLECTIVE_LINES = 7811
    REFLECTIVE_SAMPLES = 7671
    THERMAL_LINES = 7811
    THERMAL_SAMPLES = 7671
    FILE_NAME_BAND_1 = "LC80440342016259LGN00_B1.TIF"
    FILE_NAME_BAND_2 = "LC80440342016259LGN00_B2.TIF"
    FILE_NAME_BAND_3 = "LC80440342016259LGN00_B3.TIF"
    FILE_NAME_BAND_4 = "LC80440342016259LGN00_B4.TIF"
    FILE_NAME_BAND_5 = "LC80440342016259LGN00_B5.TIF"
    FILE_NAME_BAND_6 = "LC80440342016259LGN00_B6.TIF"
    FILE_NAME_BAND_7 = "LC80440342016259LGN00_B7.TIF"
    FILE_NAME_BAND_8 = "LC80440342016259LGN00_B8.TIF"
    FILE_NAME_BAND_9 = "LC80440342016259LGN00_B9.TIF"
    FILE_NAME_BAND_10 = "LC80440342016259LGN00_B10.TIF"
    FILE_NAME_BAND_11 = "LC80440342016259LGN00_B11.TIF"
    FILE_NAME_BAND_QUALITY = "LC80440342016259LGN00_BQA.TIF"
    METADATA_FILE_NAME = "LC80440342016259LGN00_MTL.txt"
    BPF_NAME_OLI = "LO8BPF20160915183057_20160915200950.01"
    BPF_NAME_TIRS = "LT8BPF20160902084122_20160917074027.02"
    CPF_NAME = "L8CPF20160701_20160930.02"
    RLUT_FILE_NAME = "L8RLUT20150303_20431231v11.h5"
  END_GROUP = PRODUCT_METADATA
  GROUP = IMAGE_ATTRIBUTES
    CLOUD_COVER = 29.56
    CLOUD_COVER_LAND = 3.33
    IMAGE_QUALITY_OLI = 9
    IMAGE_QUALITY_TIRS = 9
    TIRS_SSM_MODEL = "FINAL"
    TIRS_SSM_POSITION_STATUS = "ESTIMATED"
    ROLL_ANGLE = -0.001
    SUN_AZIMUTH = 148.48049396
    SUN_ELEVATION = 50.93768399
    EARTH_SUN_DISTANCE = 1.0053752
    GROUND_CONTROL_POINTS_VERSION = 4
    GROUND_CONTROL_POINTS_MODEL = 548
    GEOMETRIC_RMSE_MODEL = 5.857
    GEOMETRIC_RMSE_MODEL_Y = 3.841
    GEOMETRIC_RMSE_MODEL_X = 4.422
    GROUND_CONTROL_POINTS_VERIFY = 228
    GEOMETRIC_RMSE_VERIFY = 3.382
  END_GROUP = IMAGE_ATTRIBUTES
  GROUP = MIN_MAX_RADIANCE
    RADIANCE_MAXIMUM_BAND_1 = 751.95709
    RADIANCE_MINIMUM_BAND_1 = -62.09686
    RADIANCE_MAXIMUM_BAND_2 = 770.01318
    RADIANCE_MINIMUM_BAND_2 = -63.58794
    RADIANCE_MAXIMUM_BAND_3 = 709.56061
    RADIANCE_MINIMUM_BAND_3 = -58.59575
    RADIANCE_MAXIMUM_BAND_4 = 598.34149
    RADIANCE_MINIMUM_BAND_4 = -49.41123
    RADIANCE_MAXIMUM_BAND_5 = 366.15515
    RADIANCE_MINIMUM_BAND_5 = -30.23721
    RADIANCE_MAXIMUM_BAND_6 = 91.05946
    RADIANCE_MINIMUM_BAND_6 = -7.51972
    RADIANCE_MAXIMUM_BAND_7 = 30.69191
    RADIANCE_MINIMUM_BAND_7 = -2.53455
    RADIANCE_MAXIMUM_BAND_8 = 677.15784
    RADIANCE_MINIMUM_BAND_8 = -55.91992
    RADIANCE_MAXIMUM_BAND_9 = 143.10173
    RADIANCE_MINIMUM_BAND_9 = -11.81739
    RADIANCE_MAXIMUM_BAND_10 = 22.00180
    RADIANCE_MINIMUM_BAND_10 = 0.10033
    RADIANCE_MAXIMUM_BAND_11 = 22.00180
    RADIANCE_MINIMUM_BAND_11 = 0.10033
  END_GROUP = MIN_MAX_RADIANCE
  GROUP = MIN_MAX_REFLECTANCE
    REFLECTANCE_MAXIMUM_BAND_1 = 1.210700
    REFLECTANCE_MINIMUM_BAND_1 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_2 = 1.210700
    REFLECTANCE_MINIMUM_BAND_2 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_3 = 1.210700
    REFLECTANCE_MINIMUM_BAND_3 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_4 = 1.210700
    REFLECTANCE_MINIMUM_BAND_4 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_5 = 1.210700
    REFLECTANCE_MINIMUM_BAND_5 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_6 = 1.210700
    REFLECTANCE_MINIMUM_BAND_6 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_7 = 1.210700
    REFLECTANCE_MINIMUM_BAND_7 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_8 = 1.210700
    REFLECTANCE_MINIMUM_BAND_8 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_9 = 1.210700
    REFLECTANCE_MINIMUM_BAND_9 = -0.099980
  END_GROUP = MIN_MAX_REFLECTANCE
  GROUP = MIN_MAX_PIXEL_VALUE
    QUANTIZE_CAL_MAX_BAND_1 = 65535
    QUANTIZE_CAL_MIN_BAND_1 = 1
    QUANTIZE_CAL_MAX_BAND_2 = 65535
    QUANTIZE_CAL_MIN_BAND_2 = 1
    QUANTIZE_CAL_MAX_BAND_3 = 65535
    QUANTIZE_CAL_MIN_BAND_3 = 1
    QUANTIZE_CAL_MAX_BAND_4 = 65535
    QUANTIZE_CAL_MIN_BAND_4 = 1
    QUANTIZE_CAL_MAX_BAND_5 = 65535
    QUANTIZE_CAL_MIN_BAND_5 = 1
    QUANTIZE_CAL_MAX_BAND_6 = 65535
    QUANTIZE_CAL_MIN_BAND_6 = 1
    QUANTIZE_CAL_MAX_BAND_7 = 65535
    QUANTIZE_CAL_MIN_BAND_7 = 1
    QUANTIZE_CAL_MAX_BAND_8 = 65535
    QUANTIZE_CAL_MIN_BAND_8 = 1
    QUANTIZE_CAL_MAX_BAND_9 = 65535
    QUANTIZE_CAL_MIN_BAND_9 = 1
    QUANTIZE_CAL_MAX_BAND_10 = 65535
    QUANTIZE_CAL_MIN_BAND_10 = 1
    QUANTIZE_CAL_MAX_BAND_11 = 65535
    QUANTIZE_CAL_MIN_BAND_11 = 1
  END_GROUP = MIN_MAX_PIXEL_VALUE
  GROUP = RADIOMETRIC_RESCALING
    RADIANCE_MULT_BAND_1 = 1.2422E-02
    RADIANCE_MULT_BAND_2 = 1.2720E-02
    RADIANCE_MULT_BAND_3 = 1.1721E-02
    RADIANCE_MULT_BAND_4 = 9.8842E-03
    RADIANCE_MULT_BAND_5 = 6.0487E-03
    RADIANCE_MULT_BAND_6 = 1.5042E-03
    RADIANCE_MULT_BAND_7 = 5.0701E-04
    RADIANCE_MULT_BAND_8 = 1.1186E-02
    RADIANCE_MULT_BAND_9 = 2.3640E-03
    RADIANCE_MULT_BAND_10 = 3.3420E-04
    RADIANCE_MULT_BAND_11 = 3.3420E-04
    RADIANCE_ADD_BAND_1 = -62.10928
    RADIANCE_ADD_BAND_2 = -63.60066
    RADIANCE_ADD_BAND_3 = -58.60747
    RADIANCE_ADD_BAND_4 = -49.42112
    RADIANCE_ADD_BAND_5 = -30.24326
    RADIANCE_ADD_BAND_6 = -7.52122
    RADIANCE_ADD_BAND_7 = -2.53505
    RADIANCE_ADD_BAND_8 = -55.93110
    RADIANCE_ADD_BAND_9 = -11.81975
    RADIANCE_ADD_BAND_10 = 0.10000
    RADIANCE_ADD_BAND_11 = 0.10000
    REFLECTANCE_MULT_BAND_1 = 2.0000E-05
    REFLECTANCE_MULT_BAND_2 = 2.0000E-05
    REFLECTANCE_MULT_BAND_3 = 2.0000E-05
    REFLECTANCE_MULT_BAND_4 = 2.0000E-05
    REFLECTANCE_MULT_BAND_5 = 2.0000E-05
    REFLECTANCE_MULT_BAND_6 = 2.0000E-05
    REFLECTANCE_MULT_BAND_7 = 2.0000E-05
    REFLECTANCE_MULT_BAND_8 = 2.0000E-05
    REFLECTANCE_MULT_BAND_9 = 2.0000E-05
    REFLECTANCE_ADD_BAND_1 = -0.100000
    REFLECTANCE_ADD_BAND_2 = -0.100000
    REFLECTANCE_ADD_BAND_3 = -0.100000
    REFLECTANCE_ADD_BAND_4 = -0.100000
    REFLECTANCE_ADD_BAND_5 = -0.100000
    REFLECTANCE_ADD_BAND_6 = -0.100000
    REFLECTANCE_ADD_BAND_7 = -0.100000
    REFLECTANCE_ADD_BAND_8 = -0.100000
    REFLECTANCE_ADD_BAND_9 = -0.100000
  END_GROUP = RADIOMETRIC_RESCALING
  GROUP = TIRS_THERMAL_CONSTANTS
    K1_CONSTANT_BAND_10 = 774.8853
    K1_CONSTANT_BAND_11 = 480.8883
    K2_CONSTANT_BAND_10 = 1321.0789
    K2_CONSTANT_BAND_11 = 1201.1442
  END_GROUP = TIRS_THERMAL_CONSTANTS
  GROUP = PROJECTION_PARAMETERS
    MAP_PROJECTION = "UTM"
    DATUM = "WGS84"
    ELLIPSOID = "WGS84"
    UTM_ZONE = 10
    GRID_CELL_SIZE_PANCHROMATIC = 15.00
    GRID_CELL_SIZE_REFLECTIVE = 30.00
    GRID_CELL_SIZE_THERMAL = 30.00
    ORIENTATION = "NORTH_UP"
    RESAMPLING_OPTION = "CUBIC_CONVOLUTION"
  END_GROUP = PROJECTION_PARAMETERS
END_GROUP = L1_METADATA_FILE
END
" + } + }, + { + "ID": "914cb60452456134", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o?alt=json\u0026delimiter=\u0026pageToken=\u0026prefix=LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2F\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "12632" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:26 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:26 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpLE4EijIrGqoHRLrCqmprZtxBU2JtHLLsZ-nIakblnuZX8s6FZA1SEaczyybdjB32loN1-gVCf83PFM4x06S24ATKp9-xRgQ9QNC9fwVH_ec4e9JQ" + ] + }, + "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B1.TIF/1475599144579000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B1.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B1.TIF","bucket":"gcp-public-data-landsat","generation":"1475599144579000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:39:04.545Z","updated":"2016-10-04T16:39:04.545Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:39:04.545Z","size":"74721736","md5Hash":"835L6B5frB0zCB6s22r2Sw==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B1.TIF?generation=1475599144579000&alt=media","crc32c":"934Brg==","etag":"CLjf35bLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B10.TIF/1475599310042000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B10.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B10.TIF","bucket":"gcp-public-data-landsat","generation":"1475599310042000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:41:50.002Z","updated":"2016-10-04T16:41:50.002Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:41:50.002Z","size":"58681228","md5Hash":"BW623xHg15IhV24mbrL+Aw==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B10.TIF?generation=1475599310042000&alt=media","crc32c":"xzV2fg==","etag":"CJDn0uXLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B11.TIF/1475599319188000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B11.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B11.TIF","bucket":"gcp-public-data-landsat","generation":"1475599319188000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:41:59.149Z","updated":"2016-10-04T16:41:59.149Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:41:59.149Z","size":"56796439","md5Hash":"FOxiyxJXqAflRT8lFnSdOg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B11.TIF?generation=1475599319188000&alt=media","crc32c":"p/HFVw==","etag":"CKCEgerLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B2.TIF/1475599161224000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B2.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B2.TIF","bucket":"gcp-public-data-landsat","generation":"1475599161224000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:39:21.160Z","updated":"2016-10-04T16:39:21.160Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:39:21.160Z","size":"77149771","md5Hash":"MP22zjOo2Ns0iY4MTPJRwA==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B2.TIF?generation=1475599161224000&alt=media","crc32c":"rI8YRg==","etag":"CMDW157Lwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B3.TIF/1475599178435000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B3.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B3.TIF","bucket":"gcp-public-data-landsat","generation":"1475599178435000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:39:38.376Z","updated":"2016-10-04T16:39:38.376Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:39:38.376Z","size":"80293687","md5Hash":"vQMiGeDuBg6cr3XsfIEjoQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B3.TIF?generation=1475599178435000&alt=media","crc32c":"uZBrnA==","etag":"CLiT8qbLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B4.TIF/1475599194268000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B4.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B4.TIF","bucket":"gcp-public-data-landsat","generation":"1475599194268000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:39:54.211Z","updated":"2016-10-04T16:39:54.211Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:39:54.211Z","size":"84494375","md5Hash":"FWeVA01ZO0+mA+ERFczuhA==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B4.TIF?generation=1475599194268000&alt=media","crc32c":"Wes5oQ==","etag":"CODCuK7Lwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B5.TIF/1475599202979000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B5.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B5.TIF","bucket":"gcp-public-data-landsat","generation":"1475599202979000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:40:02.937Z","updated":"2016-10-04T16:40:02.937Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:40:02.937Z","size":"89318467","md5Hash":"p4oyKHAGo5Ky3Kg1TK1ZQw==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B5.TIF?generation=1475599202979000&alt=media","crc32c":"pTYuuw==","etag":"CLiZzLLLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B6.TIF/1475599233481000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B6.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B6.TIF","bucket":"gcp-public-data-landsat","generation":"1475599233481000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:40:33.349Z","updated":"2016-10-04T16:40:33.349Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:40:33.349Z","size":"89465767","md5Hash":"2Z72GUOKtlgzT9VRSGYXjA==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B6.TIF?generation=1475599233481000&alt=media","crc32c":"INXHbQ==","etag":"CKjykcHLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B7.TIF/1475599241055000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B7.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B7.TIF","bucket":"gcp-public-data-landsat","generation":"1475599241055000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:40:41.021Z","updated":"2016-10-04T16:40:41.021Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:40:41.021Z","size":"86462614","md5Hash":"8gPNQ7QZoF2CNZZ9Emrlog==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B7.TIF?generation=1475599241055000&alt=media","crc32c":"uwCD+A==","etag":"CJiW4MTLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B8.TIF/1475599281338000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B8.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B8.TIF","bucket":"gcp-public-data-landsat","generation":"1475599281338000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:41:21.300Z","updated":"2016-10-04T16:41:21.300Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:41:21.300Z","size":"318887774","md5Hash":"y795LrUzBwk2tL6PM01cEA==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B8.TIF?generation=1475599281338000&alt=media","crc32c":"Z3+ZhQ==","etag":"CJDt+tfLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B9.TIF/1475599291425000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B9.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B9.TIF","bucket":"gcp-public-data-landsat","generation":"1475599291425000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:41:31.361Z","updated":"2016-10-04T16:41:31.361Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:41:31.361Z","size":"44308205","md5Hash":"5B41E2DBbY52pYPUGVh95g==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B9.TIF?generation=1475599291425000&alt=media","crc32c":"a0ODQw==","etag":"COjB4tzLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_BQA.TIF/1475599327222000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_BQA.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_BQA.TIF","bucket":"gcp-public-data-landsat","generation":"1475599327222000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:42:07.159Z","updated":"2016-10-04T16:42:07.159Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:42:07.159Z","size":"3354719","md5Hash":"zqigvl5Envmi/GLc8yH51A==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_BQA.TIF?generation=1475599327222000&alt=media","crc32c":"WOBgKA==","etag":"CPCx6+3Lwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt/1475599327662000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_MTL.txt","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt","bucket":"gcp-public-data-landsat","generation":"1475599327662000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:42:07.618Z","updated":"2016-10-04T16:42:07.618Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:42:07.618Z","size":"7903","md5Hash":"el/UdDvWR0hfiElvrbBcUQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_MTL.txt?generation=1475599327662000&alt=media","crc32c":"PWBt8g==","etag":"CLCfhu7Lwc8CEAE="}]}" + } + }, + { + "ID": "3f3f45cd2b718e17", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/noauth", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "247" + ], + "Content-Type": [ + "application/xml; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:26 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:26 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqsZlmkTyf2_fqITeaIiM8s2MLUvz5qFiP_rzA4Mf6Q9LMxsiQeP-GBRwHON_XvnG2qef3XL1EzgTLA_GxtLKZRjdzOBndJPf9XbJa9KjJCIPlducQ" + ] + }, + "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+QWNjZXNzRGVuaWVkPC9Db2RlPjxNZXNzYWdlPkFjY2VzcyBkZW5pZWQuPC9NZXNzYWdlPjxEZXRhaWxzPkFub255bW91cyBjYWxsZXIgZG9lcyBub3QgaGF2ZSBzdG9yYWdlLm9iamVjdHMuZ2V0IGFjY2VzcyB0byBnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvbm9hdXRoLjwvRGV0YWlscz48L0Vycm9yPg==" + } + }, + { + "ID": "110cdd2daefc6162", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoibm9hdXRoIn0K", + "Yg==" + ] + }, + "Response": { + "StatusCode": 401, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "30405" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:26 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "Www-Authenticate": [ + "Bearer realm=\"https://accounts.google.com/\"" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrGgfg36ZmqdKbc0NqBivwXULVnF2NuBAD31rJqzC5Rj9xgHFbRnwxJCbdxCuUdlAhGW_-H88mBadNGmqch0fmAyAd80HPANBkUmIgkolsL4HK7XAU" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.","locationType":"header","location":"Authorization","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=LOGIN_REQUIRED, category=USER_ERROR, cause=com.google.api.server.core.Fault: ImmutableErrorDefinition{base=LOGIN_REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=unauthorized, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.authenticated_user, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth., unnamedArguments=[]}, location=headers.Authorization, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth., reason=required, rpcCode=401} Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={WWW-Authenticate=[Bearer realm=\"https://accounts.google.com/\"]}, httpStatus=unauthorized, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.authenticated_user, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth., unnamedArguments=[]}, location=headers.Authorization, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth., reason=required, rpcCode=401} Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.auth.AuthenticatorInterceptor.addChallengeHeader(AuthenticatorInterceptor.java:269)\n\tat com.google.api.server.auth.AuthenticatorInterceptor.processErrorResponse(AuthenticatorInterceptor.java:236)\n\tat com.google.api.server.auth.GaiaMintInterceptor.processErrorResponse(GaiaMintInterceptor.java:768)\n\tat com.google.api.server.core.intercept.AroundInterceptorWrapper.processErrorResponse(AroundInterceptorWrapper.java:28)\n\tat com.google.api.server.stats.StatsBootstrap$InterceptorStatsRecorder.processErrorResponse(StatsBootstrap.java:315)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception.handleErrorResponse(Interceptions.java:202)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception.access$200(Interceptions.java:103)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception$1.call(Interceptions.java:144)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception$1.call(Interceptions.java:137)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.setException(AbstractFuture.java:753)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:68)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\nCaused by: com.google.api.server.core.Fault: ImmutableErrorDefinition{base=LOGIN_REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=unauthorized, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.authenticated_user, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth., unnamedArguments=[]}, location=headers.Authorization, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth., reason=required, rpcCode=401} Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\t... 20 more\n"}],"code":401,"message":"Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth."}}" + } + }, + { + "ID": "848c7013eb1665b1", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Length": [ + "7903" + ], + "Content-Type": [ + "application/octet-stream" + ], + "Date": [ + "Thu, 02 May 2019 22:25:27 GMT" + ], + "Etag": [ + "\"7a5fd4743bd647485f88496fadb05c51\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:25:27 GMT" + ], + "Last-Modified": [ + "Tue, 04 Oct 2016 16:42:07 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Generation": [ + "1475599327662000" + ], + "X-Goog-Hash": [ + "crc32c=PWBt8g==", + "md5=el/UdDvWR0hfiElvrbBcUQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "7903" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqbrgn51bDFbwZ1c2_BxCHhog7If9w6ooKAtb2YCerQcObpiFJZqT3-Jn7zTXEEPVuysmxKw4PmvEOmkbCAJVkmbEVZm6z877JKFhrXTrkWqWYooq8" + ] + }, + "Body": "GROUP = L1_METADATA_FILE
  GROUP = METADATA_FILE_INFO
    ORIGIN = "Image courtesy of the U.S. Geological Survey"
    REQUEST_ID = "0701609191051_00004"
    LANDSAT_SCENE_ID = "LC80440342016259LGN00"
    FILE_DATE = 2016-09-20T03:13:02Z
    STATION_ID = "LGN"
    PROCESSING_SOFTWARE_VERSION = "LPGS_2.6.2"
  END_GROUP = METADATA_FILE_INFO
  GROUP = PRODUCT_METADATA
    DATA_TYPE = "L1T"
    ELEVATION_SOURCE = "GLS2000"
    OUTPUT_FORMAT = "GEOTIFF"
    SPACECRAFT_ID = "LANDSAT_8"
    SENSOR_ID = "OLI_TIRS"
    WRS_PATH = 44
    WRS_ROW = 34
    NADIR_OFFNADIR = "NADIR"
    TARGET_WRS_PATH = 44
    TARGET_WRS_ROW = 34
    DATE_ACQUIRED = 2016-09-15
    SCENE_CENTER_TIME = "18:46:18.6867380Z"
    CORNER_UL_LAT_PRODUCT = 38.52819
    CORNER_UL_LON_PRODUCT = -123.40843
    CORNER_UR_LAT_PRODUCT = 38.50765
    CORNER_UR_LON_PRODUCT = -120.76933
    CORNER_LL_LAT_PRODUCT = 36.41633
    CORNER_LL_LON_PRODUCT = -123.39709
    CORNER_LR_LAT_PRODUCT = 36.39729
    CORNER_LR_LON_PRODUCT = -120.83117
    CORNER_UL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_UL_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_UR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_UR_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_LL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_LL_PROJECTION_Y_PRODUCT = 4030200.000
    CORNER_LR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_LR_PROJECTION_Y_PRODUCT = 4030200.000
    PANCHROMATIC_LINES = 15621
    PANCHROMATIC_SAMPLES = 15341
    REFLECTIVE_LINES = 7811
    REFLECTIVE_SAMPLES = 7671
    THERMAL_LINES = 7811
    THERMAL_SAMPLES = 7671
    FILE_NAME_BAND_1 = "LC80440342016259LGN00_B1.TIF"
    FILE_NAME_BAND_2 = "LC80440342016259LGN00_B2.TIF"
    FILE_NAME_BAND_3 = "LC80440342016259LGN00_B3.TIF"
    FILE_NAME_BAND_4 = "LC80440342016259LGN00_B4.TIF"
    FILE_NAME_BAND_5 = "LC80440342016259LGN00_B5.TIF"
    FILE_NAME_BAND_6 = "LC80440342016259LGN00_B6.TIF"
    FILE_NAME_BAND_7 = "LC80440342016259LGN00_B7.TIF"
    FILE_NAME_BAND_8 = "LC80440342016259LGN00_B8.TIF"
    FILE_NAME_BAND_9 = "LC80440342016259LGN00_B9.TIF"
    FILE_NAME_BAND_10 = "LC80440342016259LGN00_B10.TIF"
    FILE_NAME_BAND_11 = "LC80440342016259LGN00_B11.TIF"
    FILE_NAME_BAND_QUALITY = "LC80440342016259LGN00_BQA.TIF"
    METADATA_FILE_NAME = "LC80440342016259LGN00_MTL.txt"
    BPF_NAME_OLI = "LO8BPF20160915183057_20160915200950.01"
    BPF_NAME_TIRS = "LT8BPF20160902084122_20160917074027.02"
    CPF_NAME = "L8CPF20160701_20160930.02"
    RLUT_FILE_NAME = "L8RLUT20150303_20431231v11.h5"
  END_GROUP = PRODUCT_METADATA
  GROUP = IMAGE_ATTRIBUTES
    CLOUD_COVER = 29.56
    CLOUD_COVER_LAND = 3.33
    IMAGE_QUALITY_OLI = 9
    IMAGE_QUALITY_TIRS = 9
    TIRS_SSM_MODEL = "FINAL"
    TIRS_SSM_POSITION_STATUS = "ESTIMATED"
    ROLL_ANGLE = -0.001
    SUN_AZIMUTH = 148.48049396
    SUN_ELEVATION = 50.93768399
    EARTH_SUN_DISTANCE = 1.0053752
    GROUND_CONTROL_POINTS_VERSION = 4
    GROUND_CONTROL_POINTS_MODEL = 548
    GEOMETRIC_RMSE_MODEL = 5.857
    GEOMETRIC_RMSE_MODEL_Y = 3.841
    GEOMETRIC_RMSE_MODEL_X = 4.422
    GROUND_CONTROL_POINTS_VERIFY = 228
    GEOMETRIC_RMSE_VERIFY = 3.382
  END_GROUP = IMAGE_ATTRIBUTES
  GROUP = MIN_MAX_RADIANCE
    RADIANCE_MAXIMUM_BAND_1 = 751.95709
    RADIANCE_MINIMUM_BAND_1 = -62.09686
    RADIANCE_MAXIMUM_BAND_2 = 770.01318
    RADIANCE_MINIMUM_BAND_2 = -63.58794
    RADIANCE_MAXIMUM_BAND_3 = 709.56061
    RADIANCE_MINIMUM_BAND_3 = -58.59575
    RADIANCE_MAXIMUM_BAND_4 = 598.34149
    RADIANCE_MINIMUM_BAND_4 = -49.41123
    RADIANCE_MAXIMUM_BAND_5 = 366.15515
    RADIANCE_MINIMUM_BAND_5 = -30.23721
    RADIANCE_MAXIMUM_BAND_6 = 91.05946
    RADIANCE_MINIMUM_BAND_6 = -7.51972
    RADIANCE_MAXIMUM_BAND_7 = 30.69191
    RADIANCE_MINIMUM_BAND_7 = -2.53455
    RADIANCE_MAXIMUM_BAND_8 = 677.15784
    RADIANCE_MINIMUM_BAND_8 = -55.91992
    RADIANCE_MAXIMUM_BAND_9 = 143.10173
    RADIANCE_MINIMUM_BAND_9 = -11.81739
    RADIANCE_MAXIMUM_BAND_10 = 22.00180
    RADIANCE_MINIMUM_BAND_10 = 0.10033
    RADIANCE_MAXIMUM_BAND_11 = 22.00180
    RADIANCE_MINIMUM_BAND_11 = 0.10033
  END_GROUP = MIN_MAX_RADIANCE
  GROUP = MIN_MAX_REFLECTANCE
    REFLECTANCE_MAXIMUM_BAND_1 = 1.210700
    REFLECTANCE_MINIMUM_BAND_1 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_2 = 1.210700
    REFLECTANCE_MINIMUM_BAND_2 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_3 = 1.210700
    REFLECTANCE_MINIMUM_BAND_3 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_4 = 1.210700
    REFLECTANCE_MINIMUM_BAND_4 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_5 = 1.210700
    REFLECTANCE_MINIMUM_BAND_5 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_6 = 1.210700
    REFLECTANCE_MINIMUM_BAND_6 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_7 = 1.210700
    REFLECTANCE_MINIMUM_BAND_7 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_8 = 1.210700
    REFLECTANCE_MINIMUM_BAND_8 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_9 = 1.210700
    REFLECTANCE_MINIMUM_BAND_9 = -0.099980
  END_GROUP = MIN_MAX_REFLECTANCE
  GROUP = MIN_MAX_PIXEL_VALUE
    QUANTIZE_CAL_MAX_BAND_1 = 65535
    QUANTIZE_CAL_MIN_BAND_1 = 1
    QUANTIZE_CAL_MAX_BAND_2 = 65535
    QUANTIZE_CAL_MIN_BAND_2 = 1
    QUANTIZE_CAL_MAX_BAND_3 = 65535
    QUANTIZE_CAL_MIN_BAND_3 = 1
    QUANTIZE_CAL_MAX_BAND_4 = 65535
    QUANTIZE_CAL_MIN_BAND_4 = 1
    QUANTIZE_CAL_MAX_BAND_5 = 65535
    QUANTIZE_CAL_MIN_BAND_5 = 1
    QUANTIZE_CAL_MAX_BAND_6 = 65535
    QUANTIZE_CAL_MIN_BAND_6 = 1
    QUANTIZE_CAL_MAX_BAND_7 = 65535
    QUANTIZE_CAL_MIN_BAND_7 = 1
    QUANTIZE_CAL_MAX_BAND_8 = 65535
    QUANTIZE_CAL_MIN_BAND_8 = 1
    QUANTIZE_CAL_MAX_BAND_9 = 65535
    QUANTIZE_CAL_MIN_BAND_9 = 1
    QUANTIZE_CAL_MAX_BAND_10 = 65535
    QUANTIZE_CAL_MIN_BAND_10 = 1
    QUANTIZE_CAL_MAX_BAND_11 = 65535
    QUANTIZE_CAL_MIN_BAND_11 = 1
  END_GROUP = MIN_MAX_PIXEL_VALUE
  GROUP = RADIOMETRIC_RESCALING
    RADIANCE_MULT_BAND_1 = 1.2422E-02
    RADIANCE_MULT_BAND_2 = 1.2720E-02
    RADIANCE_MULT_BAND_3 = 1.1721E-02
    RADIANCE_MULT_BAND_4 = 9.8842E-03
    RADIANCE_MULT_BAND_5 = 6.0487E-03
    RADIANCE_MULT_BAND_6 = 1.5042E-03
    RADIANCE_MULT_BAND_7 = 5.0701E-04
    RADIANCE_MULT_BAND_8 = 1.1186E-02
    RADIANCE_MULT_BAND_9 = 2.3640E-03
    RADIANCE_MULT_BAND_10 = 3.3420E-04
    RADIANCE_MULT_BAND_11 = 3.3420E-04
    RADIANCE_ADD_BAND_1 = -62.10928
    RADIANCE_ADD_BAND_2 = -63.60066
    RADIANCE_ADD_BAND_3 = -58.60747
    RADIANCE_ADD_BAND_4 = -49.42112
    RADIANCE_ADD_BAND_5 = -30.24326
    RADIANCE_ADD_BAND_6 = -7.52122
    RADIANCE_ADD_BAND_7 = -2.53505
    RADIANCE_ADD_BAND_8 = -55.93110
    RADIANCE_ADD_BAND_9 = -11.81975
    RADIANCE_ADD_BAND_10 = 0.10000
    RADIANCE_ADD_BAND_11 = 0.10000
    REFLECTANCE_MULT_BAND_1 = 2.0000E-05
    REFLECTANCE_MULT_BAND_2 = 2.0000E-05
    REFLECTANCE_MULT_BAND_3 = 2.0000E-05
    REFLECTANCE_MULT_BAND_4 = 2.0000E-05
    REFLECTANCE_MULT_BAND_5 = 2.0000E-05
    REFLECTANCE_MULT_BAND_6 = 2.0000E-05
    REFLECTANCE_MULT_BAND_7 = 2.0000E-05
    REFLECTANCE_MULT_BAND_8 = 2.0000E-05
    REFLECTANCE_MULT_BAND_9 = 2.0000E-05
    REFLECTANCE_ADD_BAND_1 = -0.100000
    REFLECTANCE_ADD_BAND_2 = -0.100000
    REFLECTANCE_ADD_BAND_3 = -0.100000
    REFLECTANCE_ADD_BAND_4 = -0.100000
    REFLECTANCE_ADD_BAND_5 = -0.100000
    REFLECTANCE_ADD_BAND_6 = -0.100000
    REFLECTANCE_ADD_BAND_7 = -0.100000
    REFLECTANCE_ADD_BAND_8 = -0.100000
    REFLECTANCE_ADD_BAND_9 = -0.100000
  END_GROUP = RADIOMETRIC_RESCALING
  GROUP = TIRS_THERMAL_CONSTANTS
    K1_CONSTANT_BAND_10 = 774.8853
    K1_CONSTANT_BAND_11 = 480.8883
    K2_CONSTANT_BAND_10 = 1321.0789
    K2_CONSTANT_BAND_11 = 1201.1442
  END_GROUP = TIRS_THERMAL_CONSTANTS
  GROUP = PROJECTION_PARAMETERS
    MAP_PROJECTION = "UTM"
    DATUM = "WGS84"
    ELLIPSOID = "WGS84"
    UTM_ZONE = 10
    GRID_CELL_SIZE_PANCHROMATIC = 15.00
    GRID_CELL_SIZE_REFLECTIVE = 30.00
    GRID_CELL_SIZE_THERMAL = 30.00
    ORIENTATION = "NORTH_UP"
    RESAMPLING_OPTION = "CUBIC_CONVOLUTION"
  END_GROUP = PROJECTION_PARAMETERS
END_GROUP = L1_METADATA_FILE
END
" + } + }, + { + "ID": "a5f818071a0f22da", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Length": [ + "7903" + ], + "Content-Type": [ + "application/octet-stream" + ], + "Date": [ + "Thu, 02 May 2019 22:25:27 GMT" + ], + "Etag": [ + "\"7a5fd4743bd647485f88496fadb05c51\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:25:27 GMT" + ], + "Last-Modified": [ + "Tue, 04 Oct 2016 16:42:07 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Generation": [ + "1475599327662000" + ], + "X-Goog-Hash": [ + "crc32c=PWBt8g==", + "md5=el/UdDvWR0hfiElvrbBcUQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "7903" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrozB-0-kEtkjITLFZA2uQpw77J_Zc6GErrYrIyxkPTWeUHJNBRLw4JXhyIEFG8szc7bhqblQVKoKYiZ4myOcfL5zIM9KYGIbCyAP9e4sEEdx2pBe0" + ] + }, + "Body": "GROUP = L1_METADATA_FILE
  GROUP = METADATA_FILE_INFO
    ORIGIN = "Image courtesy of the U.S. Geological Survey"
    REQUEST_ID = "0701609191051_00004"
    LANDSAT_SCENE_ID = "LC80440342016259LGN00"
    FILE_DATE = 2016-09-20T03:13:02Z
    STATION_ID = "LGN"
    PROCESSING_SOFTWARE_VERSION = "LPGS_2.6.2"
  END_GROUP = METADATA_FILE_INFO
  GROUP = PRODUCT_METADATA
    DATA_TYPE = "L1T"
    ELEVATION_SOURCE = "GLS2000"
    OUTPUT_FORMAT = "GEOTIFF"
    SPACECRAFT_ID = "LANDSAT_8"
    SENSOR_ID = "OLI_TIRS"
    WRS_PATH = 44
    WRS_ROW = 34
    NADIR_OFFNADIR = "NADIR"
    TARGET_WRS_PATH = 44
    TARGET_WRS_ROW = 34
    DATE_ACQUIRED = 2016-09-15
    SCENE_CENTER_TIME = "18:46:18.6867380Z"
    CORNER_UL_LAT_PRODUCT = 38.52819
    CORNER_UL_LON_PRODUCT = -123.40843
    CORNER_UR_LAT_PRODUCT = 38.50765
    CORNER_UR_LON_PRODUCT = -120.76933
    CORNER_LL_LAT_PRODUCT = 36.41633
    CORNER_LL_LON_PRODUCT = -123.39709
    CORNER_LR_LAT_PRODUCT = 36.39729
    CORNER_LR_LON_PRODUCT = -120.83117
    CORNER_UL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_UL_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_UR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_UR_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_LL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_LL_PROJECTION_Y_PRODUCT = 4030200.000
    CORNER_LR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_LR_PROJECTION_Y_PRODUCT = 4030200.000
    PANCHROMATIC_LINES = 15621
    PANCHROMATIC_SAMPLES = 15341
    REFLECTIVE_LINES = 7811
    REFLECTIVE_SAMPLES = 7671
    THERMAL_LINES = 7811
    THERMAL_SAMPLES = 7671
    FILE_NAME_BAND_1 = "LC80440342016259LGN00_B1.TIF"
    FILE_NAME_BAND_2 = "LC80440342016259LGN00_B2.TIF"
    FILE_NAME_BAND_3 = "LC80440342016259LGN00_B3.TIF"
    FILE_NAME_BAND_4 = "LC80440342016259LGN00_B4.TIF"
    FILE_NAME_BAND_5 = "LC80440342016259LGN00_B5.TIF"
    FILE_NAME_BAND_6 = "LC80440342016259LGN00_B6.TIF"
    FILE_NAME_BAND_7 = "LC80440342016259LGN00_B7.TIF"
    FILE_NAME_BAND_8 = "LC80440342016259LGN00_B8.TIF"
    FILE_NAME_BAND_9 = "LC80440342016259LGN00_B9.TIF"
    FILE_NAME_BAND_10 = "LC80440342016259LGN00_B10.TIF"
    FILE_NAME_BAND_11 = "LC80440342016259LGN00_B11.TIF"
    FILE_NAME_BAND_QUALITY = "LC80440342016259LGN00_BQA.TIF"
    METADATA_FILE_NAME = "LC80440342016259LGN00_MTL.txt"
    BPF_NAME_OLI = "LO8BPF20160915183057_20160915200950.01"
    BPF_NAME_TIRS = "LT8BPF20160902084122_20160917074027.02"
    CPF_NAME = "L8CPF20160701_20160930.02"
    RLUT_FILE_NAME = "L8RLUT20150303_20431231v11.h5"
  END_GROUP = PRODUCT_METADATA
  GROUP = IMAGE_ATTRIBUTES
    CLOUD_COVER = 29.56
    CLOUD_COVER_LAND = 3.33
    IMAGE_QUALITY_OLI = 9
    IMAGE_QUALITY_TIRS = 9
    TIRS_SSM_MODEL = "FINAL"
    TIRS_SSM_POSITION_STATUS = "ESTIMATED"
    ROLL_ANGLE = -0.001
    SUN_AZIMUTH = 148.48049396
    SUN_ELEVATION = 50.93768399
    EARTH_SUN_DISTANCE = 1.0053752
    GROUND_CONTROL_POINTS_VERSION = 4
    GROUND_CONTROL_POINTS_MODEL = 548
    GEOMETRIC_RMSE_MODEL = 5.857
    GEOMETRIC_RMSE_MODEL_Y = 3.841
    GEOMETRIC_RMSE_MODEL_X = 4.422
    GROUND_CONTROL_POINTS_VERIFY = 228
    GEOMETRIC_RMSE_VERIFY = 3.382
  END_GROUP = IMAGE_ATTRIBUTES
  GROUP = MIN_MAX_RADIANCE
    RADIANCE_MAXIMUM_BAND_1 = 751.95709
    RADIANCE_MINIMUM_BAND_1 = -62.09686
    RADIANCE_MAXIMUM_BAND_2 = 770.01318
    RADIANCE_MINIMUM_BAND_2 = -63.58794
    RADIANCE_MAXIMUM_BAND_3 = 709.56061
    RADIANCE_MINIMUM_BAND_3 = -58.59575
    RADIANCE_MAXIMUM_BAND_4 = 598.34149
    RADIANCE_MINIMUM_BAND_4 = -49.41123
    RADIANCE_MAXIMUM_BAND_5 = 366.15515
    RADIANCE_MINIMUM_BAND_5 = -30.23721
    RADIANCE_MAXIMUM_BAND_6 = 91.05946
    RADIANCE_MINIMUM_BAND_6 = -7.51972
    RADIANCE_MAXIMUM_BAND_7 = 30.69191
    RADIANCE_MINIMUM_BAND_7 = -2.53455
    RADIANCE_MAXIMUM_BAND_8 = 677.15784
    RADIANCE_MINIMUM_BAND_8 = -55.91992
    RADIANCE_MAXIMUM_BAND_9 = 143.10173
    RADIANCE_MINIMUM_BAND_9 = -11.81739
    RADIANCE_MAXIMUM_BAND_10 = 22.00180
    RADIANCE_MINIMUM_BAND_10 = 0.10033
    RADIANCE_MAXIMUM_BAND_11 = 22.00180
    RADIANCE_MINIMUM_BAND_11 = 0.10033
  END_GROUP = MIN_MAX_RADIANCE
  GROUP = MIN_MAX_REFLECTANCE
    REFLECTANCE_MAXIMUM_BAND_1 = 1.210700
    REFLECTANCE_MINIMUM_BAND_1 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_2 = 1.210700
    REFLECTANCE_MINIMUM_BAND_2 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_3 = 1.210700
    REFLECTANCE_MINIMUM_BAND_3 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_4 = 1.210700
    REFLECTANCE_MINIMUM_BAND_4 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_5 = 1.210700
    REFLECTANCE_MINIMUM_BAND_5 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_6 = 1.210700
    REFLECTANCE_MINIMUM_BAND_6 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_7 = 1.210700
    REFLECTANCE_MINIMUM_BAND_7 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_8 = 1.210700
    REFLECTANCE_MINIMUM_BAND_8 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_9 = 1.210700
    REFLECTANCE_MINIMUM_BAND_9 = -0.099980
  END_GROUP = MIN_MAX_REFLECTANCE
  GROUP = MIN_MAX_PIXEL_VALUE
    QUANTIZE_CAL_MAX_BAND_1 = 65535
    QUANTIZE_CAL_MIN_BAND_1 = 1
    QUANTIZE_CAL_MAX_BAND_2 = 65535
    QUANTIZE_CAL_MIN_BAND_2 = 1
    QUANTIZE_CAL_MAX_BAND_3 = 65535
    QUANTIZE_CAL_MIN_BAND_3 = 1
    QUANTIZE_CAL_MAX_BAND_4 = 65535
    QUANTIZE_CAL_MIN_BAND_4 = 1
    QUANTIZE_CAL_MAX_BAND_5 = 65535
    QUANTIZE_CAL_MIN_BAND_5 = 1
    QUANTIZE_CAL_MAX_BAND_6 = 65535
    QUANTIZE_CAL_MIN_BAND_6 = 1
    QUANTIZE_CAL_MAX_BAND_7 = 65535
    QUANTIZE_CAL_MIN_BAND_7 = 1
    QUANTIZE_CAL_MAX_BAND_8 = 65535
    QUANTIZE_CAL_MIN_BAND_8 = 1
    QUANTIZE_CAL_MAX_BAND_9 = 65535
    QUANTIZE_CAL_MIN_BAND_9 = 1
    QUANTIZE_CAL_MAX_BAND_10 = 65535
    QUANTIZE_CAL_MIN_BAND_10 = 1
    QUANTIZE_CAL_MAX_BAND_11 = 65535
    QUANTIZE_CAL_MIN_BAND_11 = 1
  END_GROUP = MIN_MAX_PIXEL_VALUE
  GROUP = RADIOMETRIC_RESCALING
    RADIANCE_MULT_BAND_1 = 1.2422E-02
    RADIANCE_MULT_BAND_2 = 1.2720E-02
    RADIANCE_MULT_BAND_3 = 1.1721E-02
    RADIANCE_MULT_BAND_4 = 9.8842E-03
    RADIANCE_MULT_BAND_5 = 6.0487E-03
    RADIANCE_MULT_BAND_6 = 1.5042E-03
    RADIANCE_MULT_BAND_7 = 5.0701E-04
    RADIANCE_MULT_BAND_8 = 1.1186E-02
    RADIANCE_MULT_BAND_9 = 2.3640E-03
    RADIANCE_MULT_BAND_10 = 3.3420E-04
    RADIANCE_MULT_BAND_11 = 3.3420E-04
    RADIANCE_ADD_BAND_1 = -62.10928
    RADIANCE_ADD_BAND_2 = -63.60066
    RADIANCE_ADD_BAND_3 = -58.60747
    RADIANCE_ADD_BAND_4 = -49.42112
    RADIANCE_ADD_BAND_5 = -30.24326
    RADIANCE_ADD_BAND_6 = -7.52122
    RADIANCE_ADD_BAND_7 = -2.53505
    RADIANCE_ADD_BAND_8 = -55.93110
    RADIANCE_ADD_BAND_9 = -11.81975
    RADIANCE_ADD_BAND_10 = 0.10000
    RADIANCE_ADD_BAND_11 = 0.10000
    REFLECTANCE_MULT_BAND_1 = 2.0000E-05
    REFLECTANCE_MULT_BAND_2 = 2.0000E-05
    REFLECTANCE_MULT_BAND_3 = 2.0000E-05
    REFLECTANCE_MULT_BAND_4 = 2.0000E-05
    REFLECTANCE_MULT_BAND_5 = 2.0000E-05
    REFLECTANCE_MULT_BAND_6 = 2.0000E-05
    REFLECTANCE_MULT_BAND_7 = 2.0000E-05
    REFLECTANCE_MULT_BAND_8 = 2.0000E-05
    REFLECTANCE_MULT_BAND_9 = 2.0000E-05
    REFLECTANCE_ADD_BAND_1 = -0.100000
    REFLECTANCE_ADD_BAND_2 = -0.100000
    REFLECTANCE_ADD_BAND_3 = -0.100000
    REFLECTANCE_ADD_BAND_4 = -0.100000
    REFLECTANCE_ADD_BAND_5 = -0.100000
    REFLECTANCE_ADD_BAND_6 = -0.100000
    REFLECTANCE_ADD_BAND_7 = -0.100000
    REFLECTANCE_ADD_BAND_8 = -0.100000
    REFLECTANCE_ADD_BAND_9 = -0.100000
  END_GROUP = RADIOMETRIC_RESCALING
  GROUP = TIRS_THERMAL_CONSTANTS
    K1_CONSTANT_BAND_10 = 774.8853
    K1_CONSTANT_BAND_11 = 480.8883
    K2_CONSTANT_BAND_10 = 1321.0789
    K2_CONSTANT_BAND_11 = 1201.1442
  END_GROUP = TIRS_THERMAL_CONSTANTS
  GROUP = PROJECTION_PARAMETERS
    MAP_PROJECTION = "UTM"
    DATUM = "WGS84"
    ELLIPSOID = "WGS84"
    UTM_ZONE = 10
    GRID_CELL_SIZE_PANCHROMATIC = 15.00
    GRID_CELL_SIZE_REFLECTIVE = 30.00
    GRID_CELL_SIZE_THERMAL = 30.00
    ORIENTATION = "NORTH_UP"
    RESAMPLING_OPTION = "CUBIC_CONVOLUTION"
  END_GROUP = PROJECTION_PARAMETERS
END_GROUP = L1_METADATA_FILE
END
" + } + }, + { + "ID": "55eeb942e5603431", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt", + "Header": { + "Range": [ + "bytes=1-" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 206, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Length": [ + "7902" + ], + "Content-Range": [ + "bytes 1-7902/7903" + ], + "Content-Type": [ + "application/octet-stream" + ], + "Date": [ + "Thu, 02 May 2019 22:25:27 GMT" + ], + "Etag": [ + "\"7a5fd4743bd647485f88496fadb05c51\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:25:27 GMT" + ], + "Last-Modified": [ + "Tue, 04 Oct 2016 16:42:07 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Generation": [ + "1475599327662000" + ], + "X-Goog-Hash": [ + "crc32c=PWBt8g==", + "md5=el/UdDvWR0hfiElvrbBcUQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "7903" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqlAytrsQn43Os8JxSOx4C8v9ApqqtqwEE7kZ-voKAcmcYe32lG7ANHxzNrwkqN8bbLLohoAHd88brZDVaC3U6Q01dhBBoeDFnlkCzHKUJjA8ZWrgM" + ] + }, + "Body": "ROUP = L1_METADATA_FILE
  GROUP = METADATA_FILE_INFO
    ORIGIN = "Image courtesy of the U.S. Geological Survey"
    REQUEST_ID = "0701609191051_00004"
    LANDSAT_SCENE_ID = "LC80440342016259LGN00"
    FILE_DATE = 2016-09-20T03:13:02Z
    STATION_ID = "LGN"
    PROCESSING_SOFTWARE_VERSION = "LPGS_2.6.2"
  END_GROUP = METADATA_FILE_INFO
  GROUP = PRODUCT_METADATA
    DATA_TYPE = "L1T"
    ELEVATION_SOURCE = "GLS2000"
    OUTPUT_FORMAT = "GEOTIFF"
    SPACECRAFT_ID = "LANDSAT_8"
    SENSOR_ID = "OLI_TIRS"
    WRS_PATH = 44
    WRS_ROW = 34
    NADIR_OFFNADIR = "NADIR"
    TARGET_WRS_PATH = 44
    TARGET_WRS_ROW = 34
    DATE_ACQUIRED = 2016-09-15
    SCENE_CENTER_TIME = "18:46:18.6867380Z"
    CORNER_UL_LAT_PRODUCT = 38.52819
    CORNER_UL_LON_PRODUCT = -123.40843
    CORNER_UR_LAT_PRODUCT = 38.50765
    CORNER_UR_LON_PRODUCT = -120.76933
    CORNER_LL_LAT_PRODUCT = 36.41633
    CORNER_LL_LON_PRODUCT = -123.39709
    CORNER_LR_LAT_PRODUCT = 36.39729
    CORNER_LR_LON_PRODUCT = -120.83117
    CORNER_UL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_UL_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_UR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_UR_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_LL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_LL_PROJECTION_Y_PRODUCT = 4030200.000
    CORNER_LR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_LR_PROJECTION_Y_PRODUCT = 4030200.000
    PANCHROMATIC_LINES = 15621
    PANCHROMATIC_SAMPLES = 15341
    REFLECTIVE_LINES = 7811
    REFLECTIVE_SAMPLES = 7671
    THERMAL_LINES = 7811
    THERMAL_SAMPLES = 7671
    FILE_NAME_BAND_1 = "LC80440342016259LGN00_B1.TIF"
    FILE_NAME_BAND_2 = "LC80440342016259LGN00_B2.TIF"
    FILE_NAME_BAND_3 = "LC80440342016259LGN00_B3.TIF"
    FILE_NAME_BAND_4 = "LC80440342016259LGN00_B4.TIF"
    FILE_NAME_BAND_5 = "LC80440342016259LGN00_B5.TIF"
    FILE_NAME_BAND_6 = "LC80440342016259LGN00_B6.TIF"
    FILE_NAME_BAND_7 = "LC80440342016259LGN00_B7.TIF"
    FILE_NAME_BAND_8 = "LC80440342016259LGN00_B8.TIF"
    FILE_NAME_BAND_9 = "LC80440342016259LGN00_B9.TIF"
    FILE_NAME_BAND_10 = "LC80440342016259LGN00_B10.TIF"
    FILE_NAME_BAND_11 = "LC80440342016259LGN00_B11.TIF"
    FILE_NAME_BAND_QUALITY = "LC80440342016259LGN00_BQA.TIF"
    METADATA_FILE_NAME = "LC80440342016259LGN00_MTL.txt"
    BPF_NAME_OLI = "LO8BPF20160915183057_20160915200950.01"
    BPF_NAME_TIRS = "LT8BPF20160902084122_20160917074027.02"
    CPF_NAME = "L8CPF20160701_20160930.02"
    RLUT_FILE_NAME = "L8RLUT20150303_20431231v11.h5"
  END_GROUP = PRODUCT_METADATA
  GROUP = IMAGE_ATTRIBUTES
    CLOUD_COVER = 29.56
    CLOUD_COVER_LAND = 3.33
    IMAGE_QUALITY_OLI = 9
    IMAGE_QUALITY_TIRS = 9
    TIRS_SSM_MODEL = "FINAL"
    TIRS_SSM_POSITION_STATUS = "ESTIMATED"
    ROLL_ANGLE = -0.001
    SUN_AZIMUTH = 148.48049396
    SUN_ELEVATION = 50.93768399
    EARTH_SUN_DISTANCE = 1.0053752
    GROUND_CONTROL_POINTS_VERSION = 4
    GROUND_CONTROL_POINTS_MODEL = 548
    GEOMETRIC_RMSE_MODEL = 5.857
    GEOMETRIC_RMSE_MODEL_Y = 3.841
    GEOMETRIC_RMSE_MODEL_X = 4.422
    GROUND_CONTROL_POINTS_VERIFY = 228
    GEOMETRIC_RMSE_VERIFY = 3.382
  END_GROUP = IMAGE_ATTRIBUTES
  GROUP = MIN_MAX_RADIANCE
    RADIANCE_MAXIMUM_BAND_1 = 751.95709
    RADIANCE_MINIMUM_BAND_1 = -62.09686
    RADIANCE_MAXIMUM_BAND_2 = 770.01318
    RADIANCE_MINIMUM_BAND_2 = -63.58794
    RADIANCE_MAXIMUM_BAND_3 = 709.56061
    RADIANCE_MINIMUM_BAND_3 = -58.59575
    RADIANCE_MAXIMUM_BAND_4 = 598.34149
    RADIANCE_MINIMUM_BAND_4 = -49.41123
    RADIANCE_MAXIMUM_BAND_5 = 366.15515
    RADIANCE_MINIMUM_BAND_5 = -30.23721
    RADIANCE_MAXIMUM_BAND_6 = 91.05946
    RADIANCE_MINIMUM_BAND_6 = -7.51972
    RADIANCE_MAXIMUM_BAND_7 = 30.69191
    RADIANCE_MINIMUM_BAND_7 = -2.53455
    RADIANCE_MAXIMUM_BAND_8 = 677.15784
    RADIANCE_MINIMUM_BAND_8 = -55.91992
    RADIANCE_MAXIMUM_BAND_9 = 143.10173
    RADIANCE_MINIMUM_BAND_9 = -11.81739
    RADIANCE_MAXIMUM_BAND_10 = 22.00180
    RADIANCE_MINIMUM_BAND_10 = 0.10033
    RADIANCE_MAXIMUM_BAND_11 = 22.00180
    RADIANCE_MINIMUM_BAND_11 = 0.10033
  END_GROUP = MIN_MAX_RADIANCE
  GROUP = MIN_MAX_REFLECTANCE
    REFLECTANCE_MAXIMUM_BAND_1 = 1.210700
    REFLECTANCE_MINIMUM_BAND_1 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_2 = 1.210700
    REFLECTANCE_MINIMUM_BAND_2 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_3 = 1.210700
    REFLECTANCE_MINIMUM_BAND_3 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_4 = 1.210700
    REFLECTANCE_MINIMUM_BAND_4 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_5 = 1.210700
    REFLECTANCE_MINIMUM_BAND_5 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_6 = 1.210700
    REFLECTANCE_MINIMUM_BAND_6 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_7 = 1.210700
    REFLECTANCE_MINIMUM_BAND_7 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_8 = 1.210700
    REFLECTANCE_MINIMUM_BAND_8 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_9 = 1.210700
    REFLECTANCE_MINIMUM_BAND_9 = -0.099980
  END_GROUP = MIN_MAX_REFLECTANCE
  GROUP = MIN_MAX_PIXEL_VALUE
    QUANTIZE_CAL_MAX_BAND_1 = 65535
    QUANTIZE_CAL_MIN_BAND_1 = 1
    QUANTIZE_CAL_MAX_BAND_2 = 65535
    QUANTIZE_CAL_MIN_BAND_2 = 1
    QUANTIZE_CAL_MAX_BAND_3 = 65535
    QUANTIZE_CAL_MIN_BAND_3 = 1
    QUANTIZE_CAL_MAX_BAND_4 = 65535
    QUANTIZE_CAL_MIN_BAND_4 = 1
    QUANTIZE_CAL_MAX_BAND_5 = 65535
    QUANTIZE_CAL_MIN_BAND_5 = 1
    QUANTIZE_CAL_MAX_BAND_6 = 65535
    QUANTIZE_CAL_MIN_BAND_6 = 1
    QUANTIZE_CAL_MAX_BAND_7 = 65535
    QUANTIZE_CAL_MIN_BAND_7 = 1
    QUANTIZE_CAL_MAX_BAND_8 = 65535
    QUANTIZE_CAL_MIN_BAND_8 = 1
    QUANTIZE_CAL_MAX_BAND_9 = 65535
    QUANTIZE_CAL_MIN_BAND_9 = 1
    QUANTIZE_CAL_MAX_BAND_10 = 65535
    QUANTIZE_CAL_MIN_BAND_10 = 1
    QUANTIZE_CAL_MAX_BAND_11 = 65535
    QUANTIZE_CAL_MIN_BAND_11 = 1
  END_GROUP = MIN_MAX_PIXEL_VALUE
  GROUP = RADIOMETRIC_RESCALING
    RADIANCE_MULT_BAND_1 = 1.2422E-02
    RADIANCE_MULT_BAND_2 = 1.2720E-02
    RADIANCE_MULT_BAND_3 = 1.1721E-02
    RADIANCE_MULT_BAND_4 = 9.8842E-03
    RADIANCE_MULT_BAND_5 = 6.0487E-03
    RADIANCE_MULT_BAND_6 = 1.5042E-03
    RADIANCE_MULT_BAND_7 = 5.0701E-04
    RADIANCE_MULT_BAND_8 = 1.1186E-02
    RADIANCE_MULT_BAND_9 = 2.3640E-03
    RADIANCE_MULT_BAND_10 = 3.3420E-04
    RADIANCE_MULT_BAND_11 = 3.3420E-04
    RADIANCE_ADD_BAND_1 = -62.10928
    RADIANCE_ADD_BAND_2 = -63.60066
    RADIANCE_ADD_BAND_3 = -58.60747
    RADIANCE_ADD_BAND_4 = -49.42112
    RADIANCE_ADD_BAND_5 = -30.24326
    RADIANCE_ADD_BAND_6 = -7.52122
    RADIANCE_ADD_BAND_7 = -2.53505
    RADIANCE_ADD_BAND_8 = -55.93110
    RADIANCE_ADD_BAND_9 = -11.81975
    RADIANCE_ADD_BAND_10 = 0.10000
    RADIANCE_ADD_BAND_11 = 0.10000
    REFLECTANCE_MULT_BAND_1 = 2.0000E-05
    REFLECTANCE_MULT_BAND_2 = 2.0000E-05
    REFLECTANCE_MULT_BAND_3 = 2.0000E-05
    REFLECTANCE_MULT_BAND_4 = 2.0000E-05
    REFLECTANCE_MULT_BAND_5 = 2.0000E-05
    REFLECTANCE_MULT_BAND_6 = 2.0000E-05
    REFLECTANCE_MULT_BAND_7 = 2.0000E-05
    REFLECTANCE_MULT_BAND_8 = 2.0000E-05
    REFLECTANCE_MULT_BAND_9 = 2.0000E-05
    REFLECTANCE_ADD_BAND_1 = -0.100000
    REFLECTANCE_ADD_BAND_2 = -0.100000
    REFLECTANCE_ADD_BAND_3 = -0.100000
    REFLECTANCE_ADD_BAND_4 = -0.100000
    REFLECTANCE_ADD_BAND_5 = -0.100000
    REFLECTANCE_ADD_BAND_6 = -0.100000
    REFLECTANCE_ADD_BAND_7 = -0.100000
    REFLECTANCE_ADD_BAND_8 = -0.100000
    REFLECTANCE_ADD_BAND_9 = -0.100000
  END_GROUP = RADIOMETRIC_RESCALING
  GROUP = TIRS_THERMAL_CONSTANTS
    K1_CONSTANT_BAND_10 = 774.8853
    K1_CONSTANT_BAND_11 = 480.8883
    K2_CONSTANT_BAND_10 = 1321.0789
    K2_CONSTANT_BAND_11 = 1201.1442
  END_GROUP = TIRS_THERMAL_CONSTANTS
  GROUP = PROJECTION_PARAMETERS
    MAP_PROJECTION = "UTM"
    DATUM = "WGS84"
    ELLIPSOID = "WGS84"
    UTM_ZONE = 10
    GRID_CELL_SIZE_PANCHROMATIC = 15.00
    GRID_CELL_SIZE_REFLECTIVE = 30.00
    GRID_CELL_SIZE_THERMAL = 30.00
    ORIENTATION = "NORTH_UP"
    RESAMPLING_OPTION = "CUBIC_CONVOLUTION"
  END_GROUP = PROJECTION_PARAMETERS
END_GROUP = L1_METADATA_FILE
END
" + } + }, + { + "ID": "320dc27adc377057", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt", + "Header": { + "Range": [ + "bytes=0-17" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 206, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Length": [ + "18" + ], + "Content-Range": [ + "bytes 0-17/7903" + ], + "Content-Type": [ + "application/octet-stream" + ], + "Date": [ + "Thu, 02 May 2019 22:25:27 GMT" + ], + "Etag": [ + "\"7a5fd4743bd647485f88496fadb05c51\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:25:27 GMT" + ], + "Last-Modified": [ + "Tue, 04 Oct 2016 16:42:07 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Generation": [ + "1475599327662000" + ], + "X-Goog-Hash": [ + "crc32c=PWBt8g==", + "md5=el/UdDvWR0hfiElvrbBcUQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "7903" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UovperPufphUvaf55r54Wd-USAbL2ZQVTseICyulStqI633iJcFBLryyqecsHQcoU2cXp4MsKgB8uQu979IXcnv-aGNm6viDydIrqmPqA7SmPElPGI" + ] + }, + "Body": "R1JPVVAgPSBMMV9NRVRBREFU" + } + }, + { + "ID": "a20a4e3b35dfd271", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/storage-library-test-bucket/gzipped-text.txt", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Encoding": [ + "gzip" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:25:27 GMT" + ], + "Etag": [ + "\"c6117833aa4d1510d09ef69144d56790\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:25:27 GMT" + ], + "Last-Modified": [ + "Tue, 14 Nov 2017 13:07:32 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Accept-Encoding" + ], + "X-Goog-Generation": [ + "1510664852486988" + ], + "X-Goog-Hash": [ + "crc32c=T1s5RQ==", + "md5=xhF4M6pNFRDQnvaRRNVnkA==" + ], + "X-Goog-Metageneration": [ + "2" + ], + "X-Goog-Storage-Class": [ + "MULTI_REGIONAL" + ], + "X-Goog-Stored-Content-Encoding": [ + "gzip" + ], + "X-Goog-Stored-Content-Length": [ + "31" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq7491k1eCc7fe_JApsP1zDcSyo759KmHvh-9YHm9ekpOGaG8v1bZNPjaMEkikJSDYt_LkVMHrb9HTDx9vvDGy3Zm1kPrlxS4933Sw-Wdh35lDomi4" + ] + }, + "Body": "H4sIAAAAAAAAC8tIzcnJVyjPL8pJAQCFEUoNCwAAAA==" + } + }, + { + "ID": "d1941f2e08f3bc52", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/storage-library-test-bucket/gzipped-text.txt", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Encoding": [ + "gzip" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:25:27 GMT" + ], + "Etag": [ + "\"c6117833aa4d1510d09ef69144d56790\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:25:27 GMT" + ], + "Last-Modified": [ + "Tue, 14 Nov 2017 13:07:32 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Accept-Encoding" + ], + "X-Goog-Generation": [ + "1510664852486988" + ], + "X-Goog-Hash": [ + "crc32c=T1s5RQ==", + "md5=xhF4M6pNFRDQnvaRRNVnkA==" + ], + "X-Goog-Metageneration": [ + "2" + ], + "X-Goog-Storage-Class": [ + "MULTI_REGIONAL" + ], + "X-Goog-Stored-Content-Encoding": [ + "gzip" + ], + "X-Goog-Stored-Content-Length": [ + "31" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoYIJ-m0VbRdjw7ZGI_0TiIfj6fcuhJkomWPdQGApFxH2_LnekHIdv7igEpJAM3a-zrTOzR20bzvc-JBunTe_-f_Hsyxz_VPxJNrQY7PSrQ3OMfEhQ" + ] + }, + "Body": "H4sIAAAAAAAAC8tIzcnJVyjPL8pJAQCFEUoNCwAAAA==" + } + }, + { + "ID": "2451a87df39e1241", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/storage-library-test-bucket/gzipped-text.txt", + "Header": { + "Range": [ + "bytes=1-8" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:25:27 GMT" + ], + "Etag": [ + "W/\"c6117833aa4d1510d09ef69144d56790\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:25:27 GMT" + ], + "Last-Modified": [ + "Tue, 14 Nov 2017 13:07:32 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Accept-Encoding" + ], + "Warning": [ + "214 UploadServer gunzipped" + ], + "X-Goog-Generation": [ + "1510664852486988" + ], + "X-Goog-Hash": [ + "crc32c=T1s5RQ==", + "md5=xhF4M6pNFRDQnvaRRNVnkA==" + ], + "X-Goog-Metageneration": [ + "2" + ], + "X-Goog-Storage-Class": [ + "MULTI_REGIONAL" + ], + "X-Goog-Stored-Content-Encoding": [ + "gzip" + ], + "X-Goog-Stored-Content-Length": [ + "31" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Response-Body-Transformations": [ + "gunzipped" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoNfvi75DBdUL5KpenQbqbsYr5A8YSQp2lGAPIhe4FlijJP95WctTUrdrLIyq3riprP50HzntCuw7zh5ycZfqbJBkFbibeIwEp5bVDj7yVpJbqctiI" + ] + }, + "Body": "aGVsbG8gd29ybGQ=" + } + }, + { + "ID": "f0e47a86731e8924", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/storage-library-test-bucket/gzipped-text.txt", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Range": [ + "bytes=1-8" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 206, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Encoding": [ + "gzip" + ], + "Content-Range": [ + "bytes 1-8/31" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:25:27 GMT" + ], + "Etag": [ + "\"c6117833aa4d1510d09ef69144d56790\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:25:27 GMT" + ], + "Last-Modified": [ + "Tue, 14 Nov 2017 13:07:32 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Accept-Encoding" + ], + "X-Goog-Generation": [ + "1510664852486988" + ], + "X-Goog-Hash": [ + "crc32c=T1s5RQ==", + "md5=xhF4M6pNFRDQnvaRRNVnkA==" + ], + "X-Goog-Metageneration": [ + "2" + ], + "X-Goog-Storage-Class": [ + "MULTI_REGIONAL" + ], + "X-Goog-Stored-Content-Encoding": [ + "gzip" + ], + "X-Goog-Stored-Content-Length": [ + "31" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Upzmwe6UNntf6SxdYLZTz0aZiFJ7UANU6y7I2YKJbADGSVoCRe63OoxcC4uh-9n2JEnkvQgq0dwCHCbQ3qmYG4cWFEovG_fIMKFx6O11iPQUhLmeFw" + ] + }, + "Body": "iwgAAAAAAAA=" + } + }, + { + "ID": "2a14c414736e344d", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "168" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJjb3JzIjpbeyJtYXhBZ2VTZWNvbmRzIjozNjAwLCJtZXRob2QiOlsiUE9TVCJdLCJvcmlnaW4iOlsic29tZS1vcmlnaW4uY29tIl0sInJlc3BvbnNlSGVhZGVyIjpbImZvby1iYXIiXX1dLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0In0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "593" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:28 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrgiftU7BDllYqI1jhKj6RbZME3LoakRb653ThaSSD_kzX1O5odabBcDFfG-oswR6YOcDZTW174qxbUaa2G1EvajUt_wJAD3ViGgoMwT_YjoTvObkc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjI4LjAwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyOC4wMDlaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJjb3JzIjpbeyJvcmlnaW4iOlsic29tZS1vcmlnaW4uY29tIl0sIm1ldGhvZCI6WyJQT1NUIl0sInJlc3BvbnNlSGVhZGVyIjpbImZvby1iYXIiXSwibWF4QWdlU2Vjb25kcyI6MzYwMH1dLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "b03cb69547c1a6de", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0004?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "99" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJjb3JzIjpbeyJtYXhBZ2VTZWNvbmRzIjozNjAwLCJtZXRob2QiOlsiR0VUIl0sIm9yaWdpbiI6WyIqIl0sInJlc3BvbnNlSGVhZGVyIjpbInNvbWUtaGVhZGVyIl19XX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2528" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:28 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Urw36CL-_RvF58GoVX1bJlTZ1YUUdWXJKQDIac7eTOYj5s65KYFsk2ndonwA5ro_9nGPfo4qRIYyt7J1Xxb01TiJUQPRXzH-z9T_S5UQE6fxOssN0g" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjI4LjAwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyOC43MjRaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImNvcnMiOlt7Im9yaWdpbiI6WyIqIl0sIm1ldGhvZCI6WyJHRVQiXSwicmVzcG9uc2VIZWFkZXIiOlsic29tZS1oZWFkZXIiXSwibWF4QWdlU2Vjb25kcyI6MzYwMH1dLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" + } + }, + { + "ID": "735da673f622341d", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0004?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2528" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:29 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:29 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Urb-FeCpjdYOEj6HdkFXRMfVXffmMB8xjLqLwPvjgs3FghXs7r94UrXEpX0rW_CgEpjG6v6iHCWCkwxib31kFkLPMZMMZxHTy66dPT2AdBDsIE8Y8k" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjI4LjAwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyOC43MjRaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImNvcnMiOlt7Im9yaWdpbiI6WyIqIl0sIm1ldGhvZCI6WyJHRVQiXSwicmVzcG9uc2VIZWFkZXIiOlsic29tZS1oZWFkZXIiXSwibWF4QWdlU2Vjb25kcyI6MzYwMH1dLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" + } + }, + { + "ID": "97ce23a7211781ed", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "168" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJjb3JzIjpbeyJtYXhBZ2VTZWNvbmRzIjozNjAwLCJtZXRob2QiOlsiUE9TVCJdLCJvcmlnaW4iOlsic29tZS1vcmlnaW4uY29tIl0sInJlc3BvbnNlSGVhZGVyIjpbImZvby1iYXIiXX1dLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1In0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "593" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:30 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrqrjU-mu7dGgY6-CuNd9AwgGuKtQ_O589el9eyWSAb54cDYnQN74dl8HxRVzIUayStgIinEuuux2afAlvkUGm9lhrefX8nugBXNL2lGluNcfQKfRo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjI5LjcyNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyOS43MjRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJjb3JzIjpbeyJvcmlnaW4iOlsic29tZS1vcmlnaW4uY29tIl0sIm1ldGhvZCI6WyJQT1NUIl0sInJlc3BvbnNlSGVhZGVyIjpbImZvby1iYXIiXSwibWF4QWdlU2Vjb25kcyI6MzYwMH1dLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "a8f5b1c42d7a3c5f", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0005?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "12" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJjb3JzIjpbXX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:30 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpZ4q-BeojZ4WgF5ZHNzJG7sIvdOxmprH5GqoQ3DM-QllSzHWOwAfhVdJZH7WmVK5Dfs1TQH-pKgneNLb_9uYM-rDFb_5Hh9vs89o4pOPT9ob9ezTE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjI5LjcyNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMC42MjhaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" + } + }, + { + "ID": "cab9472f01ed4bd5", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0005?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:31 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:31 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo0__TFFuzUsEi6Eck0G1lblGy_1abmyPHERWBavjN2GHncyWWnKQ6yYaFDpDVQ-kfBS15M6H9NoxXUDS0hZ9VX8S9hK4rUGAAFWXznHjOntSK5ldY" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjI5LjcyNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMC42MjhaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" + } + }, + { + "ID": "7a196e6b35872d48", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "168" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJjb3JzIjpbeyJtYXhBZ2VTZWNvbmRzIjozNjAwLCJtZXRob2QiOlsiUE9TVCJdLCJvcmlnaW4iOlsic29tZS1vcmlnaW4uY29tIl0sInJlc3BvbnNlSGVhZGVyIjpbImZvby1iYXIiXX1dLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2In0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "593" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:31 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpJhdZl7X4YXxzKdcr7SJ3C6Xadtlo0ixoS8DNO1jaDHs5MeGKu9nx2pHWPpnUWd13HZ8inxG0pMh3un84TucDfyLu4Z-5Gp7Ka3HBfPnt_w_VTiuE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMxLjQwN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMS40MDdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJjb3JzIjpbeyJvcmlnaW4iOlsic29tZS1vcmlnaW4uY29tIl0sIm1ldGhvZCI6WyJQT1NUIl0sInJlc3BvbnNlSGVhZGVyIjpbImZvby1iYXIiXSwibWF4QWdlU2Vjb25kcyI6MzYwMH1dLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "24b6a3b712ccab17", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0006?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2539" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:31 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqe_Q9NPVf3WTO_It8-A42wh0FD3KyF4kopi6posCEX6l28cnowG8O7WSYwETklwvpp0Uk1fCXiVmgwqLlWCSc3ez-nYllKp5m_UVtxup1E1UNadGg" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMxLjQwN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMS40MDdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImNvcnMiOlt7Im9yaWdpbiI6WyJzb21lLW9yaWdpbi5jb20iXSwibWV0aG9kIjpbIlBPU1QiXSwicmVzcG9uc2VIZWFkZXIiOlsiZm9vLWJhciJdLCJtYXhBZ2VTZWNvbmRzIjozNjAwfV0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "ebed90fad884e1cd", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0006?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2539" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:32 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:32 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrjLjhrtR5T2TdsRM6X3HPy_6vnZjhUfpjytCivwknDLKhiMkND-f1qeZ4te6AnGrv_qtnGI4OGajQJaydHJvQZGWpnBQN9VOXHqYjSBTCxnNbcbC4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMxLjQwN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMS40MDdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImNvcnMiOlt7Im9yaWdpbiI6WyJzb21lLW9yaWdpbi5jb20iXSwibWV0aG9kIjpbIlBPU1QiXSwicmVzcG9uc2VIZWFkZXIiOlsiZm9vLWJhciJdLCJtYXhBZ2VTZWNvbmRzIjozNjAwfV0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "31ee7f558375c66a", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0006?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:32 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up70C3kZ6tTqgyRHbTyjXA5pLWMohouKPHCPhexU8UuWw0cAmLxjHcwaa2xBT-Soq-CLyVxoWeEkxFMhRqqK87xaM38Q2hSjGkeXEhmhoZ3iy5s6D8" + ] + }, + "Body": "" + } + }, + { + "ID": "72458186a1839a5a", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0005?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:32 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpsiYUGF50kaeS-bMsAR51mlDVCwH9HwmqU1hCFSwzrMDgCmGF46ZuQGfqWx4SJprlBWIxaNwT_EQMKQrTQARvFyee5ZOJq21xZUsVEc1iBW0KkiVk" + ] + }, + "Body": "" + } + }, + { + "ID": "20fcdf5cb9502a64", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0004?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:33 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up8ALydoTkq1rLUaKiwrh6YEhczTwI3VX8iKuCNoqr6N2BSBvtxD2Qc9De99Svk_4EX82rynlWctsS2F9Ffr4WuDby30_W1Fl7TMWFZGWfo5Wc_nfI" + ] + }, + "Body": "" + } + }, + { + "ID": "ace99589753f3389", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "60" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3In0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "485" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:33 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UotVHCMZG4hcbTx20gUPBV1LsUxIAVuDym9d21c2In0VVLzBxOM_DAULuLEd9LTMmpNg6A7yW4yqEsnRlHhAaMbpe7MwAWbadF2gJJ2M7uGdA-rcLQ" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMzLjcwNVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMy43MDVaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "466aeae01d2d2933", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0007?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:34 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:34 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoVuGt1j5ranQuT0nS5bzOIcVM5M42RwIHcF2mKK_H1ctJEqC_djWDMm00OBFXY4yxsTFdQ1ZYjWVnFq8GhUAWUsvsj4_B9Ur8MzJQcbAzG9vHgIjs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMzLjcwNVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMy43MDVaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "34d8505e483be050", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0007?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "31" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZWZhdWx0RXZlbnRCYXNlZEhvbGQiOnRydWV9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2460" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:34 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo5A5V7O5JcpElASqEqUZ_UiEjPa-39PCu3SwJk8EE9cHfyeVL5DWQkAss8k9iRx7YjD4ZC7WnzE_ENz95PuKdccjgzeC2GLFAUzA-sR2iEf-lNil4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMzLjcwNVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNC42MzJaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImRlZmF1bHRFdmVudEJhc2VkSG9sZCI6dHJ1ZSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FJPSJ9" + } + }, + { + "ID": "728568d994e275c7", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0007?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2460" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:34 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:34 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrOVH8W8OREkTrF0UKlK3dOp_AAbRF7Rsx16Zbjdl5caBObFjXETVORm7o-d-YYcPIjcrx1mKcQwxHCIXpdE5P6baH6WeY3Wc3JVF7UcQnKna-a_U4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMzLjcwNVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNC42MzJaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImRlZmF1bHRFdmVudEJhc2VkSG9sZCI6dHJ1ZSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FJPSJ9" + } + }, + { + "ID": "b7230d8a6b23fd8b", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0007?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "35" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9fQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2493" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:35 GMT" + ], + "Etag": [ + "CAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uouwpmid1Y6D0uXWBb_WrmlZVHKYnw0RGbreddchrmPzQUE1H_DeclZnbl6Yb2346L4YNt44ZeZWEM2u46h7Zx45lw9rUFxZDQIvlXRn4vT6fW4WQQ" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMzLjcwNVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNS4zMzNaIiwibWV0YWdlbmVyYXRpb24iOiIzIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBTT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImRlZmF1bHRFdmVudEJhc2VkSG9sZCI6dHJ1ZSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9LCJldGFnIjoiQ0FNPSJ9" + } + }, + { + "ID": "ec9da9e78b750503", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0007?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2493" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:35 GMT" + ], + "Etag": [ + "CAM=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:35 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq25oUSnJMHsIEARHohSGtkz5vw7RCXuQupRBliEjCXMeivgUIK0y9C3U4yWEJ-_182SvGUQLvo5rXxymcoyahwAEhfe21FJU_M2IijdiWsSv83eow" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMzLjcwNVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNS4zMzNaIiwibWV0YWdlbmVyYXRpb24iOiIzIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBTT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImRlZmF1bHRFdmVudEJhc2VkSG9sZCI6dHJ1ZSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9LCJldGFnIjoiQ0FNPSJ9" + } + }, + { + "ID": "ad49d37f07d631a5", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0007?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:36 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq02fbr_iko_zBCbJljbMwyW7mOaz1cqR6AWHm2ac3m9BcqrUXNkmRqxk0R2rZW6SAUC_KzbOze-1wUGY2E0g6wbYLUqcb8IOdqN2-ROvYO49uxzjE" + ] + }, + "Body": "" + } + }, + { + "ID": "7b04c14c4e323488", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "60" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4In0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "485" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:36 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrJrrmhbKyKRTvA3_mNqzRsaaMOAAlvn8UMwBiOR77-6X6m5InqoXMb4qoollPzgpcFOmchQ54jFNic0xJf-tiGu0tlrzSdbjh5Hzw8Rl3RnJy_jPw" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM2LjUwMVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNi41MDFaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "ae3e9c648aaff158", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJuYW1lIjoic29tZS1vYmoifQo=", + "X7Xb+/Xtxt2fLTn7y+yBvw==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:37 GMT" + ], + "Etag": [ + "CJbmhujx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrJv7oOcHCzf9atnIPo70j3xSldrZtXTb5CmV1iuM4mMPU4hiBgpDqCAdUDI3i9hlR2qRie9qT30AJNshzO5CSsuzoTQ88W255VO8VVumaCSZ0KXZs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNy4xMzdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuMTM3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiZExyZG9WZ2p4bEFBR05hWVgxQm1idz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTkzNzEzNzQzMCZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSmJtaHVqeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvc29tZS1vYmovMTU1NjgzNTkzNzEzNzQzMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSmJtaHVqeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Ik01ZUcvQT09IiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "e7a12fdbeb636933", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:37 GMT" + ], + "Etag": [ + "CJbmhujx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo-mQ6Bnw1qbAyHbMdumiv5YlTBrFQbHo-9tXn02VufGPTp3_Q8nXXp9VIcdRmzH2CJbhaVLu5fUFYZ6VKgcitBwcE-dCwxWrzaljgyT6zmM7x01ls" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNy4xMzdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuMTM3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiZExyZG9WZ2p4bEFBR05hWVgxQm1idz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTkzNzEzNzQzMCZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSmJtaHVqeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvc29tZS1vYmovMTU1NjgzNTkzNzEzNzQzMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSmJtaHVqeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Ik01ZUcvQT09IiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "142eb6c42bc6f90e", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "84" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJldmVudEJhc2VkSG9sZCI6dHJ1ZX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3215" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:37 GMT" + ], + "Etag": [ + "CJbmhujx/eECEAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrBTG1gAXfMDmjm_clsDlv04E6VRtZUsUKnaVr4bC3AIRXILH2f0UebFf1_SmXPSf6Go8sw7TXti9yO112PAG5QNin3LwaSjRuFDSH_unnMZJwRceM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNy4xMzdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuODI1WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiZExyZG9WZ2p4bEFBR05hWVgxQm1idz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTkzNzEzNzQzMCZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSmJtaHVqeC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvc29tZS1vYmovMTU1NjgzNTkzNzEzNzQzMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSmJtaHVqeC9lRUNFQUk9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Ik01ZUcvQT09IiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBST0iLCJldmVudEJhc2VkSG9sZCI6dHJ1ZX0=" + } + }, + { + "ID": "12487e4f20538041", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3215" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:38 GMT" + ], + "Etag": [ + "CJbmhujx/eECEAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqxyG_OCKjwh7TR9zL9nbOGPP1ttxni9cP68b18nTZ7wZ8WqVE0MAGh1XFphNYZEYr-2kdgNgmqQrOvFiNbg6hJi-RfjCWYg0KHPLKfzyWc-gejszc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNy4xMzdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuODI1WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiZExyZG9WZ2p4bEFBR05hWVgxQm1idz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTkzNzEzNzQzMCZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSmJtaHVqeC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvc29tZS1vYmovMTU1NjgzNTkzNzEzNzQzMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSmJtaHVqeC9lRUNFQUk9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Ik01ZUcvQT09IiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBST0iLCJldmVudEJhc2VkSG9sZCI6dHJ1ZX0=" + } + }, + { + "ID": "e3a625bb7418cea9", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "82" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJjb250ZW50VHlwZSI6ImZvbyJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:38 GMT" + ], + "Etag": [ + "CJbmhujx/eECEAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoVf0XeyD5sC9KvHEILX2e7x7VyJ9d9xoyrP3NqGFlsMgvK3zgeOEOXDvSpftUSy4opndA74-LNRkmAi8Q_6wc91iC8OWV7X3VR8kENWwclNK30V_U" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsImNvbnRlbnRUeXBlIjoiZm9vIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozOC40MjBaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuMTM3WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJkTHJkb1ZnanhsQUFHTmFZWDFCbWJ3PT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTM3MTM3NDMwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0pibWh1angvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0pibWh1angvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBTT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiTTVlRy9BPT0iLCJldGFnIjoiQ0pibWh1angvZUVDRUFNPSIsImV2ZW50QmFzZWRIb2xkIjp0cnVlfQ==" + } + }, + { + "ID": "2290ac6bb34b705b", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:38 GMT" + ], + "Etag": [ + "CJbmhujx/eECEAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoKagfJuJDLVLgIcrGtlV_UXlXLSqfEFj2-knllMBWWLktTv22ZfKoeSo8lY6gSfE1LKlvn87WiyJv75QWpcWdXzvablAMBOAZ_XMe-1D8dKZ5ipHs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsImNvbnRlbnRUeXBlIjoiZm9vIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozOC40MjBaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuMTM3WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJkTHJkb1ZnanhsQUFHTmFZWDFCbWJ3PT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTM3MTM3NDMwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0pibWh1angvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0pibWh1angvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBTT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiTTVlRy9BPT0iLCJldGFnIjoiQ0pibWh1angvZUVDRUFNPSIsImV2ZW50QmFzZWRIb2xkIjp0cnVlfQ==" + } + }, + { + "ID": "572d50dc9b5ce8a4", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "85" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJldmVudEJhc2VkSG9sZCI6ZmFsc2V9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3194" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:39 GMT" + ], + "Etag": [ + "CJbmhujx/eECEAQ=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrWVb2L8OGfosYWTjk_F-zmyNwltYI93f2tqyP4EkOx7hIRpsSG2iGDFXB7SP7DKn-teczhb9tiBKRM7rYG5vcJ5jVinideFoX44khld5BhtRaQsUc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiNCIsImNvbnRlbnRUeXBlIjoiZm9vIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozOC45MTlaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuMTM3WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJkTHJkb1ZnanhsQUFHTmFZWDFCbWJ3PT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTM3MTM3NDMwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0pibWh1angvZUVDRUFRPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBUT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0pibWh1angvZUVDRUFRPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBUT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiTTVlRy9BPT0iLCJldGFnIjoiQ0pibWh1angvZUVDRUFRPSIsImV2ZW50QmFzZWRIb2xkIjpmYWxzZX0=" + } + }, + { + "ID": "bda340f0117c1fb4", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:39 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UotG32AgmOwDD_bLLd_W64JCOZQ2-UgeY_cOZwomlkWb6Vx5JSE54tdsuCPvawuyJ8eNuCf8EM0qXlIjtAyg6QQiuN9t72vHrQLsrSs6WtJBDGZziU" + ] + }, + "Body": "" + } + }, + { + "ID": "9a0b7379af024a3a", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:39 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur7S22mcdvPtDq_Z9iUbo4v0ApOQXNFgXFNZuacd8yZUq2FhZKco01Z2T4vJSv8s1yZkGXzoUrAuUKnCBx0vZuE-MDGnmTF_YxhQoXobyvAyktswdE" + ] + }, + "Body": "" + } + }, + { + "ID": "affb380cf86f173c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "60" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5In0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "485" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:40 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrF4q2mbuFoVeV9q22libc-FjfB1eDXSMYhNGZNU6sbaMWEKdc-8eXu1gtMLC7Ia9bSRU0oxgpIRR9516KZRcg7o_VGdrU1lliVPuSY9rYa4OsJymE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjA4MVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0MC4wODFaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "88123486ecd710ed", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJuYW1lIjoic29tZS1vYmoifQo=", + "cGCusp668ZMwY59j94Srfg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:41 GMT" + ], + "Etag": [ + "CNC/3Onx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrixhGvSHn6UzWmshHjh8Ny2NE4LW-gcPPW4dCm9dZTDfBwtENYMJra88jZlphQFcyabnQwTegZW9_6bL8KYNOsyYQYgdtEDvFFMQeJ9B_IXQbrsgI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0MC42MzhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDAuNjM4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiUjhVSExIMG84ZjFlTVhVRHRyMTZLZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTk0MDYzODY3MiZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTkMvM09ueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvc29tZS1vYmovMTU1NjgzNTk0MDYzODY3Mi9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTkMvM09ueC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IjB6R3NGUT09IiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "042e51e8b073dc93", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:41 GMT" + ], + "Etag": [ + "CNC/3Onx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqTH44ERv_v2A5OmhnLVBFOkQ_bPdeDI-MNir5xjrZt1ybrczlk9GwseHZ-kt566XrMnuYGTaCkr4ImF__dpbt98CqMpmYkiJaR1vKcI1DC7o3Dbl8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0MC42MzhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDAuNjM4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiUjhVSExIMG84ZjFlTVhVRHRyMTZLZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTk0MDYzODY3MiZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTkMvM09ueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvc29tZS1vYmovMTU1NjgzNTk0MDYzODY3Mi9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTkMvM09ueC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IjB6R3NGUT09IiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "ed898d656784527a", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "83" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJ0ZW1wb3JhcnlIb2xkIjp0cnVlfQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3214" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:41 GMT" + ], + "Etag": [ + "CNC/3Onx/eECEAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Urh-jh9w3q8AC6TmG1NF_6JgV-Js74mV6EkB4ccrlZ1ZCQ9TlA4AhJ-J6rkqtJ9fKiy4YjT_l-TN6dQhnCwZvTUqoaJC-W-z6QQZywuQ1uX9a90PhU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0MC42MzhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDEuNTE5WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiUjhVSExIMG84ZjFlTVhVRHRyMTZLZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTk0MDYzODY3MiZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTkMvM09ueC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvc29tZS1vYmovMTU1NjgzNTk0MDYzODY3Mi9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTkMvM09ueC9lRUNFQUk9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IjB6R3NGUT09IiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBST0iLCJ0ZW1wb3JhcnlIb2xkIjp0cnVlfQ==" + } + }, + { + "ID": "9ddea19a0c44c449", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3214" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:41 GMT" + ], + "Etag": [ + "CNC/3Onx/eECEAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoZVnUhVRVPu1unMjxNHQzZrlqTsVtPMZHc_YPTkvy18AAD3d_TOg9qTNAEv5NXUC5n0XF1sPedifaWR19a5w7dCMltRYfuJUfGxaMzyrvpqKbGjx4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0MC42MzhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDEuNTE5WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiUjhVSExIMG84ZjFlTVhVRHRyMTZLZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTk0MDYzODY3MiZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTkMvM09ueC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvc29tZS1vYmovMTU1NjgzNTk0MDYzODY3Mi9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTkMvM09ueC9lRUNFQUk9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IjB6R3NGUT09IiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBST0iLCJ0ZW1wb3JhcnlIb2xkIjp0cnVlfQ==" + } + }, + { + "ID": "9b6c4955472bdb51", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "82" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJjb250ZW50VHlwZSI6ImZvbyJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3192" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:42 GMT" + ], + "Etag": [ + "CNC/3Onx/eECEAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoybPBMseeVzkBPlzTObhT-eKnJ9TbsC15btCmXKMDvqgD5Uz4JclrgL9lBqMq09UjP8GL2_3zCDkfA0gJ8SHCDZbHEi1XYcuAUT7hmXazBL3IIMqc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsImNvbnRlbnRUeXBlIjoiZm9vIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Mi4xMjNaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDAuNjM4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJSOFVITEgwbzhmMWVNWFVEdHIxNktnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTQwNjM4NjcyJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ05DLzNPbngvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ05DLzNPbngvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBTT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiMHpHc0ZRPT0iLCJldGFnIjoiQ05DLzNPbngvZUVDRUFNPSIsInRlbXBvcmFyeUhvbGQiOnRydWV9" + } + }, + { + "ID": "0227238a512229dc", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3192" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:42 GMT" + ], + "Etag": [ + "CNC/3Onx/eECEAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up6PS1r6NaUKQ9158DD3cKWHqyParXLdcW8k7sToMskeTU6BjpajWENp3qNHqLMFvX9NrK78TIw31h_ANYkltj3vAQoYq2RG5C6ZB2LCvUIch96SMo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsImNvbnRlbnRUeXBlIjoiZm9vIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Mi4xMjNaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDAuNjM4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJSOFVITEgwbzhmMWVNWFVEdHIxNktnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTQwNjM4NjcyJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ05DLzNPbngvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ05DLzNPbngvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBTT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiMHpHc0ZRPT0iLCJldGFnIjoiQ05DLzNPbngvZUVDRUFNPSIsInRlbXBvcmFyeUhvbGQiOnRydWV9" + } + }, + { + "ID": "7a81946424736215", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "84" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJ0ZW1wb3JhcnlIb2xkIjpmYWxzZX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:42 GMT" + ], + "Etag": [ + "CNC/3Onx/eECEAQ=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo1klBHpEl6qb-hfnVYr2KJLmo_3_1fgqlMYMBpBt23b-U9pF7bPcNEiJaMtrGEhZPQb9-QnqHiU8qPfmpNpSdy9KR41FwZm-89gyu3EtE5gq3J9-0" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiNCIsImNvbnRlbnRUeXBlIjoiZm9vIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Mi43MjFaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDAuNjM4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJSOFVITEgwbzhmMWVNWFVEdHIxNktnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTQwNjM4NjcyJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ05DLzNPbngvZUVDRUFRPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBUT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ05DLzNPbngvZUVDRUFRPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBUT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiMHpHc0ZRPT0iLCJldGFnIjoiQ05DLzNPbngvZUVDRUFRPSIsInRlbXBvcmFyeUhvbGQiOmZhbHNlfQ==" + } + }, + { + "ID": "7865e37b40b9ec66", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:43 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo2nfd81k4cBaY8ZFRGIu8G6DHyGUVAdIJCB65E8ZqqO9ADJIV4K22sQaOA8fqGL3jyi3tIszjBVsXHOFrCgca1vLXSpA1-3s2cn-MVhTKpzZ5tzY4" + ] + }, + "Body": "" + } + }, + { + "ID": "fd9104ab049c0173", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:43 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqMqIYvjYuUxhnSLyoY-p5BYUiuZIX7cs418asOvABObfrQc8eXZpo-V4LSZVWWgt4CNoviMvFwcdC9sCTKCZU1H-9Ifcuf8lptTMZpiWMJjZfD-DY" + ] + }, + "Body": "" + } + }, + { + "ID": "e735515a80a67931", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "105" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjM2MDAifX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "573" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:44 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqxx-CzGd9qn-QCqeE_iYOkeO93A0IpWx1voocTxhTrdkkBJonYBIWXeaEq3g-LNThw7j85IKVotWyBNZlHzqYSgBchb0DeXXetMqS8WBkY603Sa60" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQzLjgwNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0My44MDRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiMzYwMCIsImVmZmVjdGl2ZVRpbWUiOiIyMDE5LTA1LTAyVDIyOjI1OjQzLjgwNFoifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FFPSJ9" + } + }, + { + "ID": "94bfaea89e1f47c2", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0010/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAiLCJuYW1lIjoic29tZS1vYmoifQo=", + "29UJUEl2/QvM9FnDBFnPRA==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3245" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:44 GMT" + ], + "Etag": [ + "CPXIv+vx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqLSIzBtq8Qn_3f51_hlfoIkcpamafGwB3U8er3K_mzANwxXyscBHWZhaxb2-3M2wOT6GZER6-zLAtdeeSNtSlUMHkHX77gocrZGYj103HA-AGjVSM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9zb21lLW9iai8xNTU2ODM1OTQ0MzU5MDI5Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0NDM1OTAyOSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiYXBwbGljYXRpb24vb2N0ZXQtc3RyZWFtIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ0LjM1OFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0NC4zNThaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDQuMzU4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJQYzFTWFEyUUk3MGxNNlZIN1F0NUFnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTQ0MzU5MDI5JmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3NvbWUtb2JqLzE1NTY4MzU5NDQzNTkwMjkvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDQzNTkwMjkiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ1BYSXYrdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3NvbWUtb2JqLzE1NTY4MzU5NDQzNTkwMjkvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0NDM1OTAyOSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNQWEl2K3Z4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9zb21lLW9iai8xNTU2ODM1OTQ0MzU5MDI5L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDQzNTkwMjkiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ1BYSXYrdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3NvbWUtb2JqLzE1NTY4MzU5NDQzNTkwMjkvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0NDM1OTAyOSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNQWEl2K3Z4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoidUpwZFdRPT0iLCJldGFnIjoiQ1BYSXYrdngvZUVDRUFFPSIsInJldGVudGlvbkV4cGlyYXRpb25UaW1lIjoiMjAxOS0wNS0wMlQyMzoyNTo0NC4zNThaIn0=" + } + }, + { + "ID": "7db6d4640ce21307", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0010/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3245" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:44 GMT" + ], + "Etag": [ + "CPXIv+vx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoUK3qzYL0xUcMKfWD6mmDZqRfnd8q7O14gBrKng7OYalNXZR3ZegpD1VyqjzW6bgQqFvLrtFjWXwPoJ4E78_nThYByR2n3Hm33ms2fvCdaRSOy1Qc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9zb21lLW9iai8xNTU2ODM1OTQ0MzU5MDI5Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0NDM1OTAyOSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiYXBwbGljYXRpb24vb2N0ZXQtc3RyZWFtIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ0LjM1OFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0NC4zNThaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDQuMzU4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJQYzFTWFEyUUk3MGxNNlZIN1F0NUFnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTQ0MzU5MDI5JmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3NvbWUtb2JqLzE1NTY4MzU5NDQzNTkwMjkvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDQzNTkwMjkiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ1BYSXYrdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3NvbWUtb2JqLzE1NTY4MzU5NDQzNTkwMjkvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0NDM1OTAyOSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNQWEl2K3Z4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9zb21lLW9iai8xNTU2ODM1OTQ0MzU5MDI5L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDQzNTkwMjkiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ1BYSXYrdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3NvbWUtb2JqLzE1NTY4MzU5NDQzNTkwMjkvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0NDM1OTAyOSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNQWEl2K3Z4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoidUpwZFdRPT0iLCJldGFnIjoiQ1BYSXYrdngvZUVDRUFFPSIsInJldGVudGlvbkV4cGlyYXRpb25UaW1lIjoiMjAxOS0wNS0wMlQyMzoyNTo0NC4zNThaIn0=" + } + }, + { + "ID": "cec800ec28205dab", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0010?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "25" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJyZXRlbnRpb25Qb2xpY3kiOm51bGx9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:45 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqAm9jtf7nMSv2Bbn_pFGaLCmlq2CtqUdGeEOyYrkllqiADBk9_xsJS3BufmzfGIdARRHN7jVwbeHKev2CHMyzWoGA8UxNGNIldPCC4oZ1dMPARKHk" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQzLjgwNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0NS4yMjZaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" + } + }, + { + "ID": "c787f6d5b23123a3", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0010/o/some-obj?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:45 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrgP5ghUeBxvXks4XlillwzLwfwyRGxt5y-FImGHSwZ-Meht6M0-keE0YTUG2Qy5hH5i1gpWPyGHXweFDyrk8a3QbDPnG5shDEoxzNOlMEsX6V8-4I" + ] + }, + "Body": "" + } + }, + { + "ID": "144726e64e401a23", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0010?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:45 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpxuxvNJ3APW11H4y-qweAzn3ChIZ0eRjSetoSHkGwVvO7nMR17h5Hbj8I8zgOGjR7HPA201lDCw47ejD6kj1wCz3ZTIrUz-r-qjdm0vGGszQYFidQ" + ] + }, + "Body": "" + } + }, + { + "ID": "eebfbd37f51a5cc6", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "103" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExIiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "571" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:46 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uox0foiLUu9KuEF-KVU8lIN_yUKAJsYEuwVqgSXvzFRGmzJtSZvFeNvMDS8W0aQ7c8YoI89qS_MxU8zBSBqcljYpIZe1NPIvkEFEG5K22RJkAuwmh8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ2LjE3M1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Ni4xNzNaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiNjAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNTo0Ni4xNzNaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "c7ddc04b5d29113a", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0011?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "47" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiMzYwMCJ9fQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2519" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:46 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpD4zsQLTflaR7g-z0FszOHvw5oXvxqdiyjlAnMwMcBcUEjidrpIGgpvmrz1EWi2AyWkdDCSY0l8ItQxYnWwSZt24jOxXBlzq-gCNrCCE6j0fJVGKw" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ2LjE3M1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Ni44NjNaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiIzNjAwIiwiZWZmZWN0aXZlVGltZSI6IjIwMTktMDUtMDJUMjI6MjU6NDYuMTczWiJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" + } + }, + { + "ID": "e08a38dca4dda51f", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0011?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2519" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:47 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:47 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrsphQdkHT0ctCFLyPV1JRkq7cyQ7w1tvYmAWv-6tYsuThaMN-vkUJoTNz2gPKGy6jV0yqdbXaAlRlD_5n6GiQa-ULOyKLi-C0q8y25lfk7L6zROE4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ2LjE3M1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Ni44NjNaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiIzNjAwIiwiZWZmZWN0aXZlVGltZSI6IjIwMTktMDUtMDJUMjI6MjU6NDYuMTczWiJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" + } + }, + { + "ID": "fe586a68284be237", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "103" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyIiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "571" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:47 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpAjkPxULicnoY5wPnfrg-D7yzTbTzTZcUShEN8JSsqVMN758lI4DlkvFibNK9URhYVBH0xclTheAOO_CEDoVb5egvcF-WJqJ3SDisU2YPaZsohRro" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ3LjU2NVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Ny41NjVaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiNjAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNTo0Ny41NjVaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "732c3421adfb99d8", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0012?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "47" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiMzYwMCJ9fQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2519" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:48 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UohfaTh8JFEjT7TTMwV7NkrcAsgkdpDyovvbjHJLNbXalzOWX3_HwhowBimaqZXhkuvBx1D38zsUZ5dEzigoyoXRJtgHfy7ClUrshBL_yMfPkZ4JE4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ3LjU2NVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0OC4yNzBaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiIzNjAwIiwiZWZmZWN0aXZlVGltZSI6IjIwMTktMDUtMDJUMjI6MjU6NDcuNTY1WiJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" + } + }, + { + "ID": "aebe46d0a31a6e95", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0012?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2519" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:48 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:48 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uop7c4AxllIcwFmLfqhIIaGP3Q9uHfmQoAm9LttwjQIvzHiJvu4fg_Q3UfNP8YbDuP97jLsG78JbCK68eSQv8ZHPp8kpzfdJcvOER0cs75wQ9gC7NA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ3LjU2NVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0OC4yNzBaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiIzNjAwIiwiZWZmZWN0aXZlVGltZSI6IjIwMTktMDUtMDJUMjI6MjU6NDcuNTY1WiJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" + } + }, + { + "ID": "bea48b3667650bdd", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "103" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzIiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "571" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:49 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo1p1nEuvoUAqYHYa8wuU6qF64gN2JPCxLqqFEUiKXLgo9rwPH_btm63155v5hbuS6sow8cYEvsXVJthsJ1Zn8qfb05USodp1EvTTrt_m1rpJnIxdo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ5LjAxNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0OS4wMTRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiNjAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNTo0OS4wMTRaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "a951cae4048b1267", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0013?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "25" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJyZXRlbnRpb25Qb2xpY3kiOm51bGx9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:49 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpZAHhwxqSSY5MYFK1fdLLirRH41rc7W2YWSnn4CQZkbdR_98fRYjddtqZVfpEizqsZ1BNa5A7ENxQz7BwQSamASc8XAC5gyZp_pwi-Yu8l0XLSCpU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ5LjAxNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0OS42MjdaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" + } + }, + { + "ID": "e7f0506408261db3", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0013?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:49 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:49 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UotOOFJlGACBcheGxpS8qxLlGsDT0xOhT_Cg1M_6DxUI16IiOu-YHaPvwzxilOKOrTvL8kRzcVUiNuFeS6rQExTmdK70I1IOH2d2BLOrMm1L5Int1g" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ5LjAxNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0OS42MjdaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" + } + }, + { + "ID": "b2c4589e9b9b5ad2", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "103" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 429, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12201" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:50 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoFdLKiYfqS-J3PXjvHGxpXtJwVnUB7Li8XrR-DTYkmyed-ZlUmUcmZFgGyIayuACtUWVYBcRbhYOKvgkfl8srE0OdJbc15OHWBttYqenraJ0MdXj8" + ] + }, + "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" + } + }, + { + "ID": "836baafb4e98da80", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "103" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 429, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12201" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:51 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpNuySlLo4SanUdFJK3SGoCidNgWEW4mUuFDGMLLSzY-UUA3TJtN5_FLoLq7et7d1YSUDlOoGZY4a_GdewGQtsUwGdbHLz-bopObpzPy160Wm3UMpg" + ] + }, + "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" + } + }, + { + "ID": "281ffb73e66efd1f", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "103" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 429, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12201" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:52 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo8PK9JysFiApLjDSbbo9GloKqKjLO8vr85RWGkeC_Q0iEQf7zu2mwQVeiuJ81pfdJCUja0HrhNG13sczI2XoflqlsWNFvdxHGjCBrvDObr-s6Y32U" + ] + }, + "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" + } + }, + { + "ID": "515ecb8616d48a7f", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "103" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "571" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:55 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqF93J_Bpwg-_yefVpKWteSyWBSx9FqNm8MP5eZKqVfvDWpSF6EPZsISss7hNOkKMM5OuV8stlJIW2UAkzsSgkYOHx5fCwOwylbtqAyb8OlYJCHz6Q" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU1LjIwNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1NS4yMDRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiNjAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNTo1NS4yMDRaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "771e55d617425f26", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0014?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "25" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJyZXRlbnRpb25Qb2xpY3kiOm51bGx9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:55 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up6Slob_akPo2UWNmzCe-LD4cb-9DVTq_FPlNxA7mKVF6fBFks6gFnZ83BSajEU2qD8g14Y-pX20kbfoKf8XGR9pIW2xXH_oHCxsjspHZ2ARGePTMA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU1LjIwNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1NS44MjVaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" + } + }, + { + "ID": "9526aab2be8b214c", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0014?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:56 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:56 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UopNhdzHbHv4sXyk0r1owz7bkkPQwYIGbb3gRingbq7vgESKltWLIr9PDDIUNREfumEvZDfpxMineWYNzZ0qdOaI1QNYqXjnuxO6sCUDjBYEhr76uk" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU1LjIwNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1NS44MjVaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" + } + }, + { + "ID": "0aabff8aa1cc1e9c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "103" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "571" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:56 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpTj6zYv1NMHDI_ktA1d-p7HKq4YnJMxxI9nXubPfCMyIspOgya8HYibmvOMFwLtCPagnzRHe0HbmTCSqJpYMay0xYZstGSE_IUWRqI9lUhzq0Iqe4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU2LjM3NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1Ni4zNzRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiNjAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNTo1Ni4zNzRaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "05147b5b5fdd7882", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0015?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2517" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:56 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uoshhz3RuOTlNgZ3gOLis_dSFBsqr7khyJCdWUD8BGk0_j-AHxA5etttVHuHTrUcDksF4LdtfJLhlVrAJe9PrGiYZ-Wy57j6tb7L6vc0qHoTvolIRg" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU2LjM3NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1Ni4zNzRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiI2MCIsImVmZmVjdGl2ZVRpbWUiOiIyMDE5LTA1LTAyVDIyOjI1OjU2LjM3NFoifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FFPSJ9" + } + }, + { + "ID": "7de8d497e7e1a76c", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0015?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2517" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:57 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:57 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up3mjbvLxwRDDhjg8GTGBNcJM7EraDydPsmD77oQcyYYGVlQnUqg0NQbezf4KQOoOHoEnC9jMR052e0olW3qi9KdAi4-qJEiKGI1rGivb5hnJnmU5o" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU2LjM3NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1Ni4zNzRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiI2MCIsImVmZmVjdGl2ZVRpbWUiOiIyMDE5LTA1LTAyVDIyOjI1OjU2LjM3NFoifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FFPSJ9" + } + }, + { + "ID": "3876040740117531", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0015?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:57 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqGdmh-XzrU5d5VwRugwWr452hENxpWf1sEtNCDaGksLJ1taCgnEjL_0oAlELqirqyXLlw-frE8LYo1V37Pk1G3qvWGk9E1iSZV7uECOjsdE3JagK8" + ] + }, + "Body": "" + } + }, + { + "ID": "39fb456e528c7081", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0014?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:57 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqKcAQsTdqHfiXj71c5kivJTwCNhY6xUQpZtaxywsmr1kNG5_MdJBsOvuLVbDSHKRIsG9tDxk_9Gz-7zaqP_pqke1aqKOFg9NeCU4ZqfjD0SBy0Q5Q" + ] + }, + "Body": "" + } + }, + { + "ID": "3fdacef865a6fc39", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0013?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:58 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur0PIkQTUvOl8-6tC08mwiS3nBPydo1OYUeD6Q46ZrU6VkmpRwxc9MikGqJlsejuGZkyY_aaxUVKYGREjOF363tfkbII3of6pGFuQ8rINOf8uFV5ts" + ] + }, + "Body": "" + } + }, + { + "ID": "2da48fa864dd9efe", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0012?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:58 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoWkbW_2n8KvRkdk7Lh_5eOVBjW6JvoW0i9LNQUHM-Df2H1QIbUYIwFnUGLt-bPfFa46Ur-v8Q76mU24KpjW7WIC_HVq4Q5H96pRYk7I6ggUsckTs0" + ] + }, + "Body": "" + } + }, + { + "ID": "3ef26f9548e0aec4", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0011?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:59 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqZfOfnhYy6nk2TxHz3h1eMu_0KlczCaOb-xcAIBRoRgxk7Q67BqsT5fvb-vAky2phYzQWNHLTk-0T8V2tpPY4Hg23Ub9evuo6_YEndEVdiBRhTcFI" + ] + }, + "Body": "" + } + }, + { + "ID": "8f3abbeca307cc5a", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "106" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "574" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:59 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqdSQK1TuFGAgs143LV6RLNjzfrsAl5vt-Jb_RQ4dcVGDzK8I29sKKrPmcY0rLIJuVIQar3mdRGhNLhqXp2xCKli28TFq4rtsIgiA4DxCtdVbajIhY" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU5LjM2NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1OS4zNjRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiOTAwMDAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNTo1OS4zNjRaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "bb92a1c4ecd9e884", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0016/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoic29tZS1vYmplY3QifQo=", + "aGVsbG8gd29ybGQ=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3315" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:00 GMT" + ], + "Etag": [ + "COup9/Lx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrRoCa6t1nTWVUrdfIXfyxZBI6PKBkzO24svoJRDEYMEsduIrzBpdDiX8mIUWSxcL8-pSRZgcDvy9_iHKpoVbiUNt4QhlG7zosGpa3WOk--YlBxjeM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9zb21lLW9iamVjdC8xNTU2ODM1OTU5OTUyNjE5Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2L28vc29tZS1vYmplY3QiLCJuYW1lIjoic29tZS1vYmplY3QiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk1OTk1MjYxOSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1OS45NTJaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NTkuOTUyWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU5Ljk1MloiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoiWHJZN3UrQWU3dENUeXlLN2oxck53dz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9vL3NvbWUtb2JqZWN0P2dlbmVyYXRpb249MTU1NjgzNTk1OTk1MjYxOSZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9zb21lLW9iamVjdC8xNTU2ODM1OTU5OTUyNjE5L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9vL3NvbWUtb2JqZWN0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYiLCJvYmplY3QiOiJzb21lLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTU5OTUyNjE5IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPdXA5L0x4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9zb21lLW9iamVjdC8xNTU2ODM1OTU5OTUyNjE5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYvby9zb21lLW9iamVjdC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsIm9iamVjdCI6InNvbWUtb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NTk5NTI2MTkiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT3VwOS9MeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYvc29tZS1vYmplY3QvMTU1NjgzNTk1OTk1MjYxOS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2L28vc29tZS1vYmplY3QvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYiLCJvYmplY3QiOiJzb21lLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTU5OTUyNjE5IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPdXA5L0x4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9zb21lLW9iamVjdC8xNTU2ODM1OTU5OTUyNjE5L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYvby9zb21lLW9iamVjdC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsIm9iamVjdCI6InNvbWUtb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NTk5NTI2MTkiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT3VwOS9MeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6InlaUmxxZz09IiwiZXRhZyI6IkNPdXA5L0x4L2VFQ0VBRT0iLCJyZXRlbnRpb25FeHBpcmF0aW9uVGltZSI6IjIwMTktMDUtMDNUMjM6MjU6NTkuOTUyWiJ9" + } + }, + { + "ID": "8f2ac73db961f1d6", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0016/o/some-object?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13884" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:00 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:00 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uok63dRFSkGv0Fum7yPhHf_AtYTWceX5_zkck648-jhWkyym1ezg2Og-LqKJ6nnZCW_gPZuU01OkZErKAQbL7mvQroCq8nEtNTEAosMxk9fJRKpYlc" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RETENTION_POLICY_NOT_MET: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RETENTION_POLICY_NOT_MET: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RETENTION_POLICY_NOT_MET: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00, unnamedArguments=[]}, location=null, message=Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00, reason=forbidden, rpcCode=403} Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RETENTION_POLICY_NOT_MET: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00"}}" + } + }, + { + "ID": "5a7a0988789dfb17", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0016?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "25" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJyZXRlbnRpb25Qb2xpY3kiOm51bGx9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:00 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrZIBPbBTYIUYYaBFrcXzNnp7uuC8DoPry45bX57PyfL7aM5a83NLxYnvWsXD62w3LrRVksNhJbrLnSL5e8ZS6b0Cto8ufh-V00gNrGI-CaTc4achc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU5LjM2NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjowMC43NTZaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" + } + }, + { + "ID": "8410bda61aa0b6fd", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0016/o/some-object?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:01 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo2MOVFr2ZwvkwYm32qtqndRBhJnBc28nVBEBowKjIIxpOxiYcSef4wtz9sLTJ23PbXr1F8L_TfGwyShv5j1eTsQ-91OXU8Q8JEZtui9VVyK96j2D4" + ] + }, + "Body": "" + } + }, + { + "ID": "7b024f33e16d850f", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0016?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:01 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpDkcsqcqPLSySgf13o9jWNafbKdVjSRqGNu3ZedvF7TlL9TBswdMMpcl0zQOHtvXWIrhrcZODr7RjRhHuvS8gJdMbnYeQDHrBCbiH6x9WrnaZ5uIQ" + ] + }, + "Body": "" + } + }, + { + "ID": "33b0efc1b91cfb42", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "106" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "574" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:02 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpItvsNld0b1FGsjQgaVCDxW22UEra8vRGfkWtbDS7R2kMbHRPr0I6iPjzaQBOaa9-xUJxGO-JV6RhMlWwH2NY-FXY2-0lrkZiAzIL-tdFwkCwlF-Q" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjAxLjkwOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjowMS45MDhaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiOTAwMDAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNjowMS45MDhaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "632b9a0387d9cff1", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2520" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:02 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:02 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpEVtrO8ed8TEHdpzyw857ZykloGKTT56atylmGnu682Y0VxI_T6SWN0JLFupJ_2Cu80ntiZttFSCBqwqFGfxVrRqsNnf0KBDyzqOH8eI-BaWPBXvE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjAxLjkwOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjowMS45MDhaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiI5MDAwMCIsImVmZmVjdGl2ZVRpbWUiOiIyMDE5LTA1LTAyVDIyOjI2OjAxLjkwOFoifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FFPSJ9" + } + }, + { + "ID": "a32a5924f876577b", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017/lockRetentionPolicy?alt=json\u0026ifMetagenerationMatch=1\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "0" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "639" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:03 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpsYL8RxAKClLw1KRdSSIORQHYKKf_HMml2vtwDOFrh0kTYDzhGEgX2RJAPQKOP2-70q7ZZ2VfkkGz12tmTiRgSuwB2dM3xxZxZucASROVeoit9Shg" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjAxLjkwOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjowMy41NDRaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sIm93bmVyIjp7ImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCJ9LCJsb2NhdGlvbiI6IlVTIiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIiwiZWZmZWN0aXZlVGltZSI6IjIwMTktMDUtMDJUMjI6MjY6MDEuOTA4WiIsImlzTG9ja2VkIjp0cnVlfSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FJPSJ9" + } + }, + { + "ID": "718775f2f8320cfd", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2536" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:03 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:03 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UprHupyIuGJCYeUCPKigfkARZRkCc2Jz051KFrXK7YS0FbdKxNe3r20jon1ejkooHsAtcXQOi1tDV41Ob9dJ3Zoyj7stDImJ0eQriMO0WLLQ__-V54" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjAxLjkwOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjowMy41NDRaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiI5MDAwMCIsImVmZmVjdGl2ZVRpbWUiOiIyMDE5LTA1LTAyVDIyOjI2OjAxLjkwOFoiLCJpc0xvY2tlZCI6dHJ1ZX0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" + } + }, + { + "ID": "2f4b1104fa94954b", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "47" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiMzYwMCJ9fQo=" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "13774" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrafLpm39Fg6hBYYi9lSEizIWUdMduEDQykOeT9Bch6uf9BNQUO9sOAnWpTklSpNac1yW_kSNj-JR8LMmVLirC2kqG2DcQdE-Hg5dtVBkVZ4Xhjlc8" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.UpdateAndPatchBucket.updateBucket(UpdateAndPatchBucket.java:119)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.patchBucket(PatchBucket.java:196)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:141)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:46)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.update(BucketsDelegator.java:102)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 19 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.UpdateAndPatchBucket.updateBucket(UpdateAndPatchBucket.java:119)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.patchBucket(PatchBucket.java:196)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:141)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:46)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.update(BucketsDelegator.java:102)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 19 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.UpdateAndPatchBucket.updateBucket(UpdateAndPatchBucket.java:119)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.patchBucket(PatchBucket.java:196)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:141)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:46)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.update(BucketsDelegator.java:102)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 19 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'., unnamedArguments=[]}, location=null, message=Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'., reason=forbidden, rpcCode=403} Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.UpdateAndPatchBucket.updateBucket(UpdateAndPatchBucket.java:119)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.patchBucket(PatchBucket.java:196)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:141)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:46)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.update(BucketsDelegator.java:102)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 19 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'."}}" + } + }, + { + "ID": "a8964565736a4805", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "106" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE4IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 429, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12201" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrNbiNCNtu8DhS-gNXfoe4R9DFKEVzknZ0g6YoI0JdvwH6P199oVJ2oaJwh9WXMte6frhDr_vJo1_PlxLhezMnUTJzMYZgPlPWdU1QN9imk3hXgawE" + ] + }, + "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" + } + }, + { + "ID": "49648eb4e4e2ddda", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "106" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE4IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 429, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12201" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:05 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UplUHmGCg4wOKtpKVz2KH0cn-th8s-KXQR62bOkAF1pV1zIZJvsoI_tURcFttkyHks8g05Hln5fT4Ej_O-Lx6JCTovYYwhhHIK7AAcialHMj5fPHk4" + ] + }, + "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" + } + }, + { + "ID": "42cf12714cf13289", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "106" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE4IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 429, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12201" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:07 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrcjWbo0pc_i_GQTU2WE6YR0XhWYMGRPJutu1MQXo6XbRKJAYhzNK4p2-_gCqZ6BvNGncUP1UZNoTrXsFFGt1RRTRR3DS9XNiws0gt9YQdp_I8dqqM" + ] + }, + "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" + } + }, + { + "ID": "4f0fdb3eb607f33b", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "106" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE4IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 429, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12201" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:11 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up0EsbUXQesAgz95sFt6OzWRDj5WAXqEpYaqHgGvCClYnBGvjX8eQmor80UQOCIDzT6v9ZAtOPVhFIqkJG6rV0-CDXiLAnr4gwDrPkqgezvm8PQTaY" + ] + }, + "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" + } + }, + { + "ID": "a5a8b8af13be21cf", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "106" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE4IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "574" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:15 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpF7Lqxvm1dNC6q_v81ZB1Ui9c2ZLJ3GXLX6Y6UNZN1nbkF78c0J3NwXavHF1cMFZ5oP8ufCnmCLg9vQHM6tfs2ih8YxQSfE4D3AxGsvBaGNP3VUMo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE4IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE1LjA5N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxNS4wOTdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiOTAwMDAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNjoxNS4wOTdaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "e1dbaeed8c6d1ffb", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0018/lockRetentionPolicy?alt=json\u0026ifMetagenerationMatch=0\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "0" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 412, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12155" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:16 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur4dRXLSTXYf9VXr3eOCMfuLBwDLvNQMXwIYjwEuSFQO_p-SxZQXQJ7I_VA0T5Hn8gnnEZIqLV3vLCnurXr0cyHDEMpCdxWOiIiqsXlgkbcLKf78to" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"conditionNotMet","message":"Precondition Failed","locationType":"header","location":"If-Match","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:57)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.lockRetentionPolicy(BucketsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=PRECONDITION_FAILED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:57)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.lockRetentionPolicy(BucketsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=preconditionFailed, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.CONDITION_NOT_MET, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:57)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.lockRetentionPolicy(BucketsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CONDITION_NOT_MET, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=null, unnamedArguments=[]}, location=headers.If-Match, message=Precondition Failed, reason=conditionNotMet, rpcCode=412} Precondition Failed: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:57)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.lockRetentionPolicy(BucketsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":412,"message":"Precondition Failed"}}" + } + }, + { + "ID": "814dffcceacb3bd8", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026kmsKeyName=projects%2Fdeklerk-sandbox%2Flocations%2Fglobal%2FkeyRings%2Fgo-integration-test%2FcryptoKeys%2Fkey1\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoia21zIn0K", + "bXkgc2VjcmV0" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3234" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:17 GMT" + ], + "Etag": [ + "CMGB+Prx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Upis-8CwJYvr7glFAMnec_3T3YDojfTz3O_uICK5tQQmfJF2_rFtNnbqopcxBL5L3aKyoYe4zOLoqN4_MFFrhYKDm6Tmrl3rytuh0ymlCV15VZKyrQ" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1NyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2ttcyIsIm5hbWUiOiJrbXMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3Njc0MTA1NyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxNi43NDBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTYuNzQwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE2Ljc0MFoiLCJzaXplIjoiOSIsIm1kNUhhc2giOiJBQVBRUzQ2VHJuTVlucWlLQWJhZ3RRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28va21zP2dlbmVyYXRpb249MTU1NjgzNTk3Njc0MTA1NyZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1Ny9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9rbXMvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc2NzQxMDU3IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNNR0IrUHJ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1Ny9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28va21zL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzY3NDEwNTciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTUdCK1ByeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEva21zLzE1NTY4MzU5NzY3NDEwNTcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2ttcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc2NzQxMDU3IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNNR0IrUHJ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1Ny91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28va21zL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzY3NDEwNTciLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTUdCK1ByeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IlVJNzg1QT09IiwiZXRhZyI6IkNNR0IrUHJ4L2VFQ0VBRT0iLCJrbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTEvY3J5cHRvS2V5VmVyc2lvbnMvMSJ9" + } + }, + { + "ID": "9b05110f2b59351b", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/kms", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "9" + ], + "Content-Type": [ + "text/plain; charset=utf-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:17 GMT" + ], + "Etag": [ + "\"-CMGB+Prx/eECEAE=\"" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:26:16 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Encryption-Kms-Key-Name": [ + "projects/deklerk-sandbox/locations/global/keyRings/go-integration-test/cryptoKeys/key1/cryptoKeyVersions/1" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:26:16 GMT" + ], + "X-Goog-Generation": [ + "1556835976741057" + ], + "X-Goog-Hash": [ + "crc32c=UI785A==", + "md5=AAPQS46TrnMYnqiKAbagtQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "9" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpZ6aSiGPoeZKANPoMPgXhL1QD5nbILvzv8v3sOMId95Y65wOPssqesNA1MXzp86JA4Qa5gAMxxx5cnY4z3-tCUnvsKoIHn3NXOK2U6ZSRLzYNeQeU" + ] + }, + "Body": "bXkgc2VjcmV0" + } + }, + { + "ID": "011b81453e7060ac", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/kms?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3234" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:17 GMT" + ], + "Etag": [ + "CMGB+Prx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq5t8aexqBqqxnLJFK9mWmGdVrFfd3gaCGOKfGJMO1phB3ATnxt67-cLVpELRy1gNnjNOK7-zMzWCR6yV1v5PQcv3lccINA--Q92RoECpvBON-QKtc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1NyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2ttcyIsIm5hbWUiOiJrbXMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3Njc0MTA1NyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxNi43NDBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTYuNzQwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE2Ljc0MFoiLCJzaXplIjoiOSIsIm1kNUhhc2giOiJBQVBRUzQ2VHJuTVlucWlLQWJhZ3RRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28va21zP2dlbmVyYXRpb249MTU1NjgzNTk3Njc0MTA1NyZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1Ny9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9rbXMvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc2NzQxMDU3IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNNR0IrUHJ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1Ny9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28va21zL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzY3NDEwNTciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTUdCK1ByeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEva21zLzE1NTY4MzU5NzY3NDEwNTcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2ttcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc2NzQxMDU3IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNNR0IrUHJ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1Ny91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28va21zL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzY3NDEwNTciLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTUdCK1ByeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IlVJNzg1QT09IiwiZXRhZyI6IkNNR0IrUHJ4L2VFQ0VBRT0iLCJrbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTEvY3J5cHRvS2V5VmVyc2lvbnMvMSJ9" + } + }, + { + "ID": "0dbd0a37c735be56", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/kms?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:17 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur_RD6un7NLXQpzwRBFEfZdp982qhfEzyvw8f_P1Lx1wKv8sUid99lQ7Nba5Rs73IJfv3qHXjQfUDHjWclvCAcS91nMboiexn-FwiMrUuKqCQ_d__4" + ] + }, + "Body": "" + } + }, + { + "ID": "1b81fc72e201f2a2", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "Io4lnOPU+EThO0X0nq7mNEXB1rWxZsBI4L37pBmyfDc=" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiY3NlayJ9Cg==", + "bXkgc2VjcmV0" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3262" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:17 GMT" + ], + "Etag": [ + "CKCxsfvx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqRTb3RtqG1gIL-ENbVZC8nWKRHapQP78uVzv4yyL2t7N8cA5jEBWa014iJALicWO06KthvAMPLctgZCJ2UvPqo72IO4_LheEfTr13-_h43M-roUyc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jc2VrLzE1NTY4MzU5Nzc2ODEwNTYiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jc2VrIiwibmFtZSI6ImNzZWsiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3NzY4MTA1NiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxNy42ODBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTcuNjgwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE3LjY4MFoiLCJzaXplIjoiOSIsIm1kNUhhc2giOiJBQVBRUzQ2VHJuTVlucWlLQWJhZ3RRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3Nlaz9nZW5lcmF0aW9uPTE1NTY4MzU5Nzc2ODEwNTYmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3Nlay8xNTU2ODM1OTc3NjgxMDU2L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NzZWsvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNzZWsiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3NzY4MTA1NiIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDS0N4c2Z2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3Nlay8xNTU2ODM1OTc3NjgxMDU2L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jc2VrL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3NlayIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc3NjgxMDU2IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0tDeHNmdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NzZWsvMTU1NjgzNTk3NzY4MTA1Ni9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3Nlay9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNzZWsiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3NzY4MTA1NiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDS0N4c2Z2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3Nlay8xNTU2ODM1OTc3NjgxMDU2L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jc2VrL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3NlayIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc3NjgxMDU2IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0tDeHNmdngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJVSTc4NUE9PSIsImV0YWciOiJDS0N4c2Z2eC9lRUNFQUU9IiwiY3VzdG9tZXJFbmNyeXB0aW9uIjp7ImVuY3J5cHRpb25BbGdvcml0aG0iOiJBRVMyNTYiLCJrZXlTaGEyNTYiOiJJbzRsbk9QVStFVGhPMFgwbnE3bU5FWEIxcld4WnNCSTRMMzdwQm15ZkRjPSJ9fQ==" + } + }, + { + "ID": "6da27052d0dabd22", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/csek/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/cmek?alt=json\u0026destinationKmsKeyName=projects%2Fdeklerk-sandbox%2Flocations%2Fglobal%2FkeyRings%2Fgo-integration-test%2FcryptoKeys%2Fkey1\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Copy-Source-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Copy-Source-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Copy-Source-Encryption-Key-Sha256": [ + "Io4lnOPU+EThO0X0nq7mNEXB1rWxZsBI4L37pBmyfDc=" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3372" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:18 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpKmOV156VWN4edUkrNip1urAj8WTpDcgGTS520rVGilFT71KrZLZQxUMKloxtxKx5R6V9pUvIqYym68P8lQ8RSc5rEE_D1KlH-NyPgybiPERbxYZ4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiOSIsIm9iamVjdFNpemUiOiI5IiwiZG9uZSI6dHJ1ZSwicmVzb3VyY2UiOnsia2luZCI6InN0b3JhZ2Ujb2JqZWN0IiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY21lay8xNTU2ODM1OTc4MTI2NDc3Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY21layIsIm5hbWUiOiJjbWVrIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzgxMjY0NzciLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTguMTI2WiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE4LjEyNloiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxOC4xMjZaIiwic2l6ZSI6IjkiLCJtZDVIYXNoIjoiQUFQUVM0NlRybk1ZbnFpS0FiYWd0UT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NtZWs/Z2VuZXJhdGlvbj0xNTU2ODM1OTc4MTI2NDc3JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NtZWsvMTU1NjgzNTk3ODEyNjQ3Ny9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jbWVrL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjbWVrIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzgxMjY0NzciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0kzSnpQdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NtZWsvMTU1NjgzNTk3ODEyNjQ3Ny9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY21lay9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNtZWsiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3ODEyNjQ3NyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNJM0p6UHZ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jbWVrLzE1NTY4MzU5NzgxMjY0NzcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NtZWsvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjbWVrIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzgxMjY0NzciLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0kzSnpQdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NtZWsvMTU1NjgzNTk3ODEyNjQ3Ny91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY21lay9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNtZWsiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3ODEyNjQ3NyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNJM0p6UHZ4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiVUk3ODVBPT0iLCJldGFnIjoiQ0kzSnpQdngvZUVDRUFFPSIsImttc0tleU5hbWUiOiJwcm9qZWN0cy9kZWtsZXJrLXNhbmRib3gvbG9jYXRpb25zL2dsb2JhbC9rZXlSaW5ncy9nby1pbnRlZ3JhdGlvbi10ZXN0L2NyeXB0b0tleXMva2V5MS9jcnlwdG9LZXlWZXJzaW9ucy8xIn19" + } + }, + { + "ID": "6ff8a4be1ed6bda3", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/cmek", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "9" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:26:18 GMT" + ], + "Etag": [ + "\"-CI3JzPvx/eECEAE=\"" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:26:18 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Encryption-Kms-Key-Name": [ + "projects/deklerk-sandbox/locations/global/keyRings/go-integration-test/cryptoKeys/key1/cryptoKeyVersions/1" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:26:18 GMT" + ], + "X-Goog-Generation": [ + "1556835978126477" + ], + "X-Goog-Hash": [ + "crc32c=UI785A==", + "md5=AAPQS46TrnMYnqiKAbagtQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "9" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqDR7RHLf_CQraIBM5Z3vCUxSJndE0E6FBOLFBqmYvMntnrQyVIfxwuTE6BtHL4b8oQszU6rIycc72JMUjiGRM_XoGdjsk-WhsbBmL_3c3x5H1s4bs" + ] + }, + "Body": "bXkgc2VjcmV0" + } + }, + { + "ID": "51f1995364e644ed", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/cmek?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3271" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:18 GMT" + ], + "Etag": [ + "CI3JzPvx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpM1hbkOXC-oSxvoqjP8s2MpxREJEpzVp2TTYj-6B8467MLYSA-yrWs9UoGwRGQUCJTR76e-qAmQnbzGsFGT31Kc3qjevb8SM0MynLHL9GhOMkm72w" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jbWVrLzE1NTY4MzU5NzgxMjY0NzciLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jbWVrIiwibmFtZSI6ImNtZWsiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3ODEyNjQ3NyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxOC4xMjZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTguMTI2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE4LjEyNloiLCJzaXplIjoiOSIsIm1kNUhhc2giOiJBQVBRUzQ2VHJuTVlucWlLQWJhZ3RRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY21laz9nZW5lcmF0aW9uPTE1NTY4MzU5NzgxMjY0NzcmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY21lay8xNTU2ODM1OTc4MTI2NDc3L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NtZWsvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNtZWsiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3ODEyNjQ3NyIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSTNKelB2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY21lay8xNTU2ODM1OTc4MTI2NDc3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jbWVrL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY21layIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc4MTI2NDc3IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0kzSnpQdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NtZWsvMTU1NjgzNTk3ODEyNjQ3Ny9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY21lay9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNtZWsiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3ODEyNjQ3NyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSTNKelB2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY21lay8xNTU2ODM1OTc4MTI2NDc3L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jbWVrL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY21layIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc4MTI2NDc3IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0kzSnpQdngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJVSTc4NUE9PSIsImV0YWciOiJDSTNKelB2eC9lRUNFQUU9Iiwia21zS2V5TmFtZSI6InByb2plY3RzL2Rla2xlcmstc2FuZGJveC9sb2NhdGlvbnMvZ2xvYmFsL2tleVJpbmdzL2dvLWludGVncmF0aW9uLXRlc3QvY3J5cHRvS2V5cy9rZXkxL2NyeXB0b0tleVZlcnNpb25zLzEifQ==" + } + }, + { + "ID": "8972a840dc3d95a5", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/csek?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:18 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoTmNCzrFh_7cFaryet35nLbKqM6dyo7nvsyZApHIj6fLKcMDb9ZA-_TRNjGOLNFjjoD0IZ1YOz9P-ftNchBFkjWDTaFzBjbHmryCc9JInU3wd_DLw" + ] + }, + "Body": "" + } + }, + { + "ID": "1e28b278fe3e0a66", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/cmek?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:18 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqrX5OILXadm6r5e8fLyuLEhQncM5jrmmDaOesSqraquvQbXXKhTqNJ63RYU3qj90M3FLjwnAOz8oOBTQQDOq2sRqUkbpBbJzYj82Rp3Rd9VLpNns0" + ] + }, + "Body": "" + } + }, + { + "ID": "d1ac5d123ee4bae6", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "200" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJlbmNyeXB0aW9uIjp7ImRlZmF1bHRLbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTEifSwibG9jYXRpb24iOiJVUyIsIm5hbWUiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "609" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:19 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpbajYTiCBBelYuwcqOs7_O-SyJbcV6WGwYhC4JCMmeImLlFLb93JyaiYrSp-201iYZTIdQ2VViFR_Emxe2Z7b4jV_G-0o2xzkqnkSnz0-Qcw_q0Wc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5LjA2N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxOS4wNjdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImVuY3J5cHRpb24iOnsiZGVmYXVsdEttc0tleU5hbWUiOiJwcm9qZWN0cy9kZWtsZXJrLXNhbmRib3gvbG9jYXRpb25zL2dsb2JhbC9rZXlSaW5ncy9nby1pbnRlZ3JhdGlvbi10ZXN0L2NyeXB0b0tleXMva2V5MSJ9LCJsb2NhdGlvbiI6IlVTIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FFPSJ9" + } + }, + { + "ID": "d035cdb991d0fabb", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2555" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:19 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:19 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uoi5SKd4TdRXg-INW-U3pKcPA2ioBWIc2zdC5W3J4efS9iZ0EKFNqprjQ-NGEK_3wSDGezk4Al3Dd4JovGxXSnH73rIw22S_R__XRrpTQmYpTiNFGQ" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5LjA2N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxOS4wNjdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJlbmNyeXB0aW9uIjp7ImRlZmF1bHRLbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTEifSwib3duZXIiOnsiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0In0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "672cb2bcc1ed4ee2", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0019/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJuYW1lIjoia21zIn0K", + "bXkgc2VjcmV0" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3234" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:20 GMT" + ], + "Etag": [ + "CLHMt/zx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Upyst1aH69iaN9J4055oX7H2yReN5TADIX11Vul8KakNuYZbJ7yXm1GBdvCDVB-IPTvbhzELJWgfurnIHxGpzKJszLO70n58xMnVbSLsHX1J3W02Lw" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9vL2ttcyIsIm5hbWUiOiJrbXMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3OTg3OTk4NSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxOS44NzlaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTkuODc5WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5Ljg3OVoiLCJzaXplIjoiOSIsIm1kNUhhc2giOiJBQVBRUzQ2VHJuTVlucWlLQWJhZ3RRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L28va21zP2dlbmVyYXRpb249MTU1NjgzNTk3OTg3OTk4NSZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvby9rbXMvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc5ODc5OTg1IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNMSE10L3p4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L28va21zL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5Iiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5Nzk4Nzk5ODUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTEhNdC96eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkva21zLzE1NTY4MzU5Nzk4Nzk5ODUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9vL2ttcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc5ODc5OTg1IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNMSE10L3p4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L28va21zL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5Iiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5Nzk4Nzk5ODUiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTEhNdC96eC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IlVJNzg1QT09IiwiZXRhZyI6IkNMSE10L3p4L2VFQ0VBRT0iLCJrbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTEvY3J5cHRvS2V5VmVyc2lvbnMvMSJ9" + } + }, + { + "ID": "c037ecafa6249395", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0019/kms", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "9" + ], + "Content-Type": [ + "text/plain; charset=utf-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:20 GMT" + ], + "Etag": [ + "\"-CLHMt/zx/eECEAE=\"" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:26:19 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Encryption-Kms-Key-Name": [ + "projects/deklerk-sandbox/locations/global/keyRings/go-integration-test/cryptoKeys/key1/cryptoKeyVersions/1" + ], + "X-Goog-Generation": [ + "1556835979879985" + ], + "X-Goog-Hash": [ + "crc32c=UI785A==", + "md5=AAPQS46TrnMYnqiKAbagtQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "9" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrrQOFSgCkxAaKVPAQhz0ohMN6irPryraHLjWDEQgV3dNEwc5cxqcDDRChzx6_HR1Z_NPlm3vmjFCJcNK-2rAz-tmvVS8w36Q5U9Lc2wAGUgL_joX4" + ] + }, + "Body": "bXkgc2VjcmV0" + } + }, + { + "ID": "fc3eda648104f042", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019/o/kms?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3234" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:20 GMT" + ], + "Etag": [ + "CLHMt/zx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqHPSbutJRXX-6i05QnH_4UizrPft0fsGc3_ISDNmXQ4C3RPbEAnNfS1Ren0j3k2z8zxk9AV1WZQCWiKG9D5A8gZAZESFxbSZyjbBH8jkLEsEX54NQ" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9vL2ttcyIsIm5hbWUiOiJrbXMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3OTg3OTk4NSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxOS44NzlaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTkuODc5WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5Ljg3OVoiLCJzaXplIjoiOSIsIm1kNUhhc2giOiJBQVBRUzQ2VHJuTVlucWlLQWJhZ3RRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L28va21zP2dlbmVyYXRpb249MTU1NjgzNTk3OTg3OTk4NSZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvby9rbXMvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc5ODc5OTg1IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNMSE10L3p4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L28va21zL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5Iiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5Nzk4Nzk5ODUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTEhNdC96eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkva21zLzE1NTY4MzU5Nzk4Nzk5ODUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9vL2ttcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc5ODc5OTg1IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNMSE10L3p4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L28va21zL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5Iiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5Nzk4Nzk5ODUiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTEhNdC96eC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IlVJNzg1QT09IiwiZXRhZyI6IkNMSE10L3p4L2VFQ0VBRT0iLCJrbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTEvY3J5cHRvS2V5VmVyc2lvbnMvMSJ9" + } + }, + { + "ID": "13283bc1ba20f019", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019/o/kms?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:20 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo2b3OtHWqpsRWxurpcGgrEtM10ZDdsrIa2z0nFsO-P4G3RAGYMTVMR0MCtwLDePrKyhfnsK-d9T5AIcRpeaIe8Z40Qm7gkZP4Fdi9jXd6ceYe5Iwc" + ] + }, + "Body": "" + } + }, + { + "ID": "ca2255586aa4aeab", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "126" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJlbmNyeXB0aW9uIjp7ImRlZmF1bHRLbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTIifX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2555" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:21 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur05CMGbsLFprbwWYFSeE3I5dKFdFUzO--ImyAzkLt6ueNGpMDviyX6c1S4F6t1bqXTSnyQXXQzgugu8JoecrL3-1eJOPsqnN-OBMubntJCblkyWqE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5LjA2N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyMS4zMzZaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJlbmNyeXB0aW9uIjp7ImRlZmF1bHRLbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTIifSwib3duZXIiOnsiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0In0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" + } + }, + { + "ID": "e1c336910a43425f", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2555" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:21 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:21 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Urcwbg5Z9C0rFvSTWYj4FxF27ooV0028Cm-yme-BrfmvK0dWsRkfRQEMWylWwokcmM2kIvLKEiucJlvDLHvnbJPB_HjH3zF2Lm8xCwV_9gPN42QDeo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5LjA2N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyMS4zMzZaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJlbmNyeXB0aW9uIjp7ImRlZmF1bHRLbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTIifSwib3duZXIiOnsiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0In0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" + } + }, + { + "ID": "581a002b653f595d", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "20" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJlbmNyeXB0aW9uIjpudWxsfQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:22 GMT" + ], + "Etag": [ + "CAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Upv2LawTqc_eDVGR_hv2jSCyEA77H33uMkPevVxUUDKj1u4a9IWOH2f1oMXpU52-49Jg17SUSoJUJai1Ueff3ahN58GC9FCxwJt5v5xOHI3SytFjpo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5LjA2N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyMi4xMjRaIiwibWV0YWdlbmVyYXRpb24iOiIzIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBTT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBTT0ifQ==" + } + }, + { + "ID": "a0db97c641f3f17c", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:22 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo15eoN1OrQFM6eqxJcuNFl7Wfr6x_VvJ0ENBlhhC_Hedf5al2OplGnNslWnRQT9RVs92RVLevZ_F_ohutgnNsUbK1PVsR04UnQ_A6ea3C0ZLy8FkI" + ] + }, + "Body": "" + } + }, + { + "ID": "ca84fc3a71a55ea8", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026predefinedAcl=authenticatedRead\u0026predefinedDefaultObjectAcl=publicRead\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "60" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIn0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "1468" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:23 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrnCCRKNh19fdBAL3pyCzbtLnG7z20pSwSZKl9uTerD6o6-B5mB79mqZRIbXNcUmcpTdpxTreKjvsHK3Y9EKq_5fkzBPInDj8YkjbGViIpd1ldSots" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjIzLjEwNloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyMy4xMDZaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2FsbEF1dGhlbnRpY2F0ZWRVc2VycyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9hY2wvYWxsQXV0aGVudGljYXRlZFVzZXJzIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwiZW50aXR5IjoiYWxsQXV0aGVudGljYXRlZFVzZXJzIiwicm9sZSI6IlJFQURFUiIsImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoiYWxsVXNlcnMiLCJyb2xlIjoiUkVBREVSIiwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "ed54d82686b4d390", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "1468" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:23 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:23 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpXJh1_aAMuDVjYxLZJdwqAt7OwZdmTkc0_5wf_LshhNm_So0q87o7gJ31T58vp1b0_CucSecAyyyOLUbYTDrWDDTZGMkZBpfx1iBArjRtBcud1IgI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjIzLjEwNloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyMy4xMDZaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2FsbEF1dGhlbnRpY2F0ZWRVc2VycyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9hY2wvYWxsQXV0aGVudGljYXRlZFVzZXJzIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwiZW50aXR5IjoiYWxsQXV0aGVudGljYXRlZFVzZXJzIiwicm9sZSI6IlJFQURFUiIsImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoiYWxsVXNlcnMiLCJyb2xlIjoiUkVBREVSIiwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "224aee5425ffb86e", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020?alt=json\u0026predefinedAcl=private\u0026predefinedDefaultObjectAcl=authenticatedRead\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "33" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJhY2wiOltdLCJkZWZhdWx0T2JqZWN0QWNsIjpbXX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "1113" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:24 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoknWPZdEFnvKZGCGO_Z26ZaKeXNdBzc6k6qqlbGdQXVEtPvLPR2oYtKCXsqDgFBjMw4wfWVXwDfmcc5u5kBAxshpuzKGgCp8UOxv-i_Rml56mzrOI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjIzLjEwNloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyNC40MjVaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9XSwiZGVmYXVsdE9iamVjdEFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6ImFsbEF1dGhlbnRpY2F0ZWRVc2VycyIsInJvbGUiOiJSRUFERVIiLCJldGFnIjoiQ0FJPSJ9XSwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sIm93bmVyIjp7ImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCJ9LCJsb2NhdGlvbiI6IlVTIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FJPSJ9" + } + }, + { + "ID": "b6a80796e20baf03", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o?alt=json\u0026predefinedAcl=authenticatedRead\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJuYW1lIjoicHJpdmF0ZSJ9Cg==", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "1995" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:25 GMT" + ], + "Etag": [ + "CNDq5v7x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqMfGdFW2xElqOJcP4q5XeDjVehJr4FmjcFPij-Y31f_168_Jgb6zibcWxCOJzrV1aUYYTXZW9DMX-BcvfJi6YXoBCPpN23EhrGQyY7tHSb0SVurGY" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9wcml2YXRlLzE1NTY4MzU5ODQ4NDgyMDgiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9wcml2YXRlIiwibmFtZSI6InByaXZhdGUiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4NDg0ODIwOCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyNC44NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjQuODQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI0Ljg0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL28vcHJpdmF0ZT9nZW5lcmF0aW9uPTE1NTY4MzU5ODQ4NDgyMDgmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvcHJpdmF0ZS8xNTU2ODM1OTg0ODQ4MjA4L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9wcml2YXRlL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwib2JqZWN0IjoicHJpdmF0ZSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg0ODQ4MjA4IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ05EcTV2N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL3ByaXZhdGUvMTU1NjgzNTk4NDg0ODIwOC9hbGxBdXRoZW50aWNhdGVkVXNlcnMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9wcml2YXRlL2FjbC9hbGxBdXRoZW50aWNhdGVkVXNlcnMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJvYmplY3QiOiJwcml2YXRlIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODQ4NDgyMDgiLCJlbnRpdHkiOiJhbGxBdXRoZW50aWNhdGVkVXNlcnMiLCJyb2xlIjoiUkVBREVSIiwiZXRhZyI6IkNORHE1djd4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoibW5HN1RBPT0iLCJldGFnIjoiQ05EcTV2N3gvZUVDRUFFPSJ9" + } + }, + { + "ID": "4812f1e0e907256e", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o/private?alt=json\u0026predefinedAcl=private\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "62" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "1529" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:25 GMT" + ], + "Etag": [ + "CNDq5v7x/eECEAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqTjsMMIlet4CWUmGoB3TuGzhTHXlDwuUe1SAQO8H2vmbTo2QYYV_WozfDO2NyZpsA9LCtgwwthwxeEeEG182WQTTe3xLWnsVjqPZx-iho8JAgI66U" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9wcml2YXRlLzE1NTY4MzU5ODQ4NDgyMDgiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9wcml2YXRlIiwibmFtZSI6InByaXZhdGUiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4NDg0ODIwOCIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyNC44NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjUuNDIzWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI0Ljg0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL28vcHJpdmF0ZT9nZW5lcmF0aW9uPTE1NTY4MzU5ODQ4NDgyMDgmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvcHJpdmF0ZS8xNTU2ODM1OTg0ODQ4MjA4L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9wcml2YXRlL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwib2JqZWN0IjoicHJpdmF0ZSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg0ODQ4MjA4IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ05EcTV2N3gvZUVDRUFJPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJtbkc3VEE9PSIsImV0YWciOiJDTkRxNXY3eC9lRUNFQUk9In0=" + } + }, + { + "ID": "969763dc18755620", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o/private/rewriteTo/b/go-integration-test-20190502-80633403432013-0020/o/dst?alt=json\u0026destinationPredefinedAcl=publicRead\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2017" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:25 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoNk9xKdyuh-I4RkIXqQxMF4A82DZ9b2HYZOdVK7ELOsnMYBqOmrqsY5hzZJsLXMZCZ6zOSzCeefQZuTgYHwyd-jAttJ4QoA1lDk0HTwZqigIfpjCc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiNSIsIm9iamVjdFNpemUiOiI1IiwiZG9uZSI6dHJ1ZSwicmVzb3VyY2UiOnsia2luZCI6InN0b3JhZ2Ujb2JqZWN0IiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvZHN0LzE1NTY4MzU5ODU4NTAyNjkiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9kc3QiLCJuYW1lIjoiZHN0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODU4NTAyNjkiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW47IGNoYXJzZXQ9dXRmLTgiLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjUuODQ5WiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI1Ljg0OVoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyNS44NDlaIiwic2l6ZSI6IjUiLCJtZDVIYXNoIjoiWFVGQUtyeExLbmE1Y1oyUkVCZkZrZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9vL2RzdD9nZW5lcmF0aW9uPTE1NTY4MzU5ODU4NTAyNjkmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvZHN0LzE1NTY4MzU5ODU4NTAyNjkvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9vL2RzdC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsIm9iamVjdCI6ImRzdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg1ODUwMjY5IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0ozL28vL3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2RzdC8xNTU2ODM1OTg1ODUwMjY5L2FsbFVzZXJzIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL28vZHN0L2FjbC9hbGxVc2VycyIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsIm9iamVjdCI6ImRzdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg1ODUwMjY5IiwiZW50aXR5IjoiYWxsVXNlcnMiLCJyb2xlIjoiUkVBREVSIiwiZXRhZyI6IkNKMy9vLy94L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoibW5HN1RBPT0iLCJldGFnIjoiQ0ozL28vL3gvZUVDRUFFPSJ9fQ==" + } + }, + { + "ID": "8db7e2a6b620e794", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o/comp/compose?alt=json\u0026destinationPredefinedAcl=authenticatedRead\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "130" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6InByaXZhdGUifSx7Im5hbWUiOiJkc3QifV19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "1906" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:26 GMT" + ], + "Etag": [ + "CJGwwP/x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpqjCYxNFNTu3V1H_kV5WJxkbEYE9I3H5fYjfGMPO7n6C9NJstYZGcIh6xVA5RAxMCwP1PdzXITgTI1m2vm5xP5nhzLVRHKhder2qzYFGsAooH-kG0" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9jb21wLzE1NTY4MzU5ODYzMTUyODEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9jb21wIiwibmFtZSI6ImNvbXAiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4NjMxNTI4MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyNi4zMTRaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjYuMzE0WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI2LjMxNFoiLCJzaXplIjoiMTAiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL28vY29tcD9nZW5lcmF0aW9uPTE1NTY4MzU5ODYzMTUyODEmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvY29tcC8xNTU2ODM1OTg2MzE1MjgxL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9jb21wL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwib2JqZWN0IjoiY29tcCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg2MzE1MjgxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0pHd3dQL3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2NvbXAvMTU1NjgzNTk4NjMxNTI4MS9hbGxBdXRoZW50aWNhdGVkVXNlcnMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9jb21wL2FjbC9hbGxBdXRoZW50aWNhdGVkVXNlcnMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJvYmplY3QiOiJjb21wIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODYzMTUyODEiLCJlbnRpdHkiOiJhbGxBdXRoZW50aWNhdGVkVXNlcnMiLCJyb2xlIjoiUkVBREVSIiwiZXRhZyI6IkNKR3d3UC94L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiL1JDT2dnPT0iLCJjb21wb25lbnRDb3VudCI6MiwiZXRhZyI6IkNKR3d3UC94L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "72a0ce58c513f6d5", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o/comp?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:26 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpWOQtHF0cKNME8sTEm9vbS9KKKLLK9afxka1b6TTVGxDjCQ4qAdnA9zshsfVFVKK5hNS10aABAQog_gjYCZTXbbsF4bQNBM0i4R7bK2r7saocPtFo" + ] + }, + "Body": "" + } + }, + { + "ID": "111b5a296b726631", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o/dst?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:26 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqPsYXABoYOnGueMXiIvH9AntIPEf4xc6i18Nnr_XEYBxy-3LSK9klIcNyfSB7we4lgbctsiYA2e2WWBJlfqk1GnK6YA2fj-e3IiPbKwBwTSLjr1Jk" + ] + }, + "Body": "" + } + }, + { + "ID": "27e4065cd2000d43", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o/private?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:27 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpMPK_b_vcRkSIj1t37L47as9idfpbrNWqzirdNBLwoaGDMPlbjatMzjLThnYRNwk9CqTNQ5H8eoKFbunpe5TMktUo_FHyI7rryoUhbCpkbytRrTeM" + ] + }, + "Body": "" + } + }, + { + "ID": "cccb109f5fba2a3a", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:27 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoVAiJOaePqfycNhiwbkPOptOqgpe9Tvo0jo1aW2nviozj4-QcjTtnGGyhcdrZ8QTxT-KHPo-saYS9ESC_6osYhChJOCi8b9nXtuqH0BqlvywXs9FE" + ] + }, + "Body": "" + } + }, + { + "ID": "cfab6af30981f62b", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/projects/deklerk-sandbox/serviceAccount?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "116" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:28 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:28 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrDBTBzZwm7GH6Jklv6hzuaZaO3JdgliJk8zLZkVNDqwDO7tYP-c06rzrTj03llkWuV_l1k9w2tbUcGG2Rtv-1MdozY6pktTutDMLfyhyHe1DaEIys" + ] + }, + "Body": "eyJlbWFpbF9hZGRyZXNzIjoic2VydmljZS00OTYxNjk2MDE3MTRAZ3MtcHJvamVjdC1hY2NvdW50cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImtpbmQiOiJzdG9yYWdlI3NlcnZpY2VBY2NvdW50In0=" + } + }, + { + "ID": "677e783b9aea8c15", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoic29tZS1vYmplY3QifQo=", + "vcVTjdWWssp9XFk0D3Nk5w==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3262" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:28 GMT" + ], + "Etag": [ + "CPryt4Dy/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrdWRt6oMzDM98VpA0VDmWRuNvox2ZBw6ZpjbyOFDKCFKgJ92jsLkF8v4M4UUxaTUVpmr8iElUTld7Q2g5_MZSbRatDE2zVgaY-769Ppq7Nx6f0HAg" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vc29tZS1vYmplY3QiLCJuYW1lIjoic29tZS1vYmplY3QiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4ODI3MzUzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyOC4yNzNaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjguMjczWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI4LjI3M1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoidEhhY2ZTS2ZCeUMrLytjbEc3cStqdz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NvbWUtb2JqZWN0P2dlbmVyYXRpb249MTU1NjgzNTk4ODI3MzUzMCZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NvbWUtb2JqZWN0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJzb21lLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg4MjczNTMwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNQcnl0NER5L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9zb21lLW9iamVjdC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InNvbWUtb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODgyNzM1MzAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDUHJ5dDREeS9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvc29tZS1vYmplY3QvMTU1NjgzNTk4ODI3MzUzMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vc29tZS1vYmplY3QvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJzb21lLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg4MjczNTMwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNQcnl0NER5L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9zb21lLW9iamVjdC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InNvbWUtb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODgyNzM1MzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDUHJ5dDREeS9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IlNtMWdLdz09IiwiZXRhZyI6IkNQcnl0NER5L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "ff3c05fe02cd38ac", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/some-object", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:26:28 GMT" + ], + "Etag": [ + "\"b4769c7d229f0720beffe7251bbabe8f\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:27:28 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:26:28 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:26:28 GMT" + ], + "X-Goog-Generation": [ + "1556835988273530" + ], + "X-Goog-Hash": [ + "crc32c=Sm1gKw==", + "md5=tHacfSKfByC+/+clG7q+jw==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqn6PsKCJsyZAhI_JtSOcVKs7SWuqYsoprQTUagoZXqu1CHWyBc1TzWfUyF9iv5VBS4iZPP5qz0Lf3b51p8O36Sud2-QS2zDg9FKhtm1uBFKLpcTOs" + ] + }, + "Body": "vcVTjdWWssp9XFk0D3Nk5w==" + } + }, + { + "ID": "d1c7cc7c0e1d2829", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3262" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:28 GMT" + ], + "Etag": [ + "CPryt4Dy/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqluzdkA1sUAJQJZodwWmM7SP7tNoX2JJjDiqHTy3XtjEqiVDNpI0whWTFJJH4caozRYjJPjTb6_ywm82dnAiYEo-YCknhlvkWexfTq1BM9MRWwQeg" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vc29tZS1vYmplY3QiLCJuYW1lIjoic29tZS1vYmplY3QiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4ODI3MzUzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyOC4yNzNaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjguMjczWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI4LjI3M1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoidEhhY2ZTS2ZCeUMrLytjbEc3cStqdz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NvbWUtb2JqZWN0P2dlbmVyYXRpb249MTU1NjgzNTk4ODI3MzUzMCZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NvbWUtb2JqZWN0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJzb21lLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg4MjczNTMwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNQcnl0NER5L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9zb21lLW9iamVjdC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InNvbWUtb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODgyNzM1MzAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDUHJ5dDREeS9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvc29tZS1vYmplY3QvMTU1NjgzNTk4ODI3MzUzMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vc29tZS1vYmplY3QvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJzb21lLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg4MjczNTMwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNQcnl0NER5L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9zb21lLW9iamVjdC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InNvbWUtb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODgyNzM1MzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDUHJ5dDREeS9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IlNtMWdLdz09IiwiZXRhZyI6IkNQcnl0NER5L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "951f74cbe6b30b67", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2571" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:28 GMT" + ], + "Etag": [ + "CA0=" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:28 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoMRpk7F0qXPvUV4uNPdV7VhfUO2Mm-qPBbx3VV92y1wRyElPF1kbRNEGJ1KqbIW5w2XozD7IzjxBx4KevqcPxZr0_E1iU1_NASedJ4lZj4EFoP9UI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyNS41MjdaIiwibWV0YWdlbmVyYXRpb24iOiIxMyIsImFjbCI6W3sia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQTA9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0EwPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0EwPSJ9XSwiZGVmYXVsdE9iamVjdEFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDQTA9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0EwPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQTA9In1dLCJpYW1Db25maWd1cmF0aW9uIjp7ImJ1Y2tldFBvbGljeU9ubHkiOnsiZW5hYmxlZCI6ZmFsc2V9fSwib3duZXIiOnsiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0In0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOmZhbHNlfSwibGlmZWN5Y2xlIjp7InJ1bGUiOlt7ImFjdGlvbiI6eyJ0eXBlIjoiRGVsZXRlIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjMwfX1dfSwibGFiZWxzIjp7ImwxIjoidjIiLCJuZXciOiJuZXcifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0EwPSJ9" + } + }, + { + "ID": "aadc7916a3fc24ad", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0021?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "11805" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqhoAeZtt8ffiWaPr6J0LEaVR3vjerqtJ7gMM9BObOkz_6MtctDuS_s8-DZRxtTZsGfMDtekIKG2v8p2_sb3MxvTgeLhgO4vqvJNIszTaMjjyQcEdY" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.name, message=null, unnamedArguments=[]}, location=entity.resource_id.name, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" + } + }, + { + "ID": "03b45b7eac939177", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/storage-library-test-bucket/Caf%C3%A9", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Length": [ + "20" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Etag": [ + "\"ade43306cb39336d630e101af5fb51b4\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:26:29 GMT" + ], + "Last-Modified": [ + "Fri, 24 Mar 2017 20:04:38 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Generation": [ + "1490385878535828" + ], + "X-Goog-Hash": [ + "crc32c=fN3yZg==", + "md5=reQzBss5M21jDhAa9ftRtA==" + ], + "X-Goog-Metageneration": [ + "2" + ], + "X-Goog-Storage-Class": [ + "MULTI_REGIONAL" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "20" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpFU2KyS5OA-bAy_MovHXHg5zJZ2kznc6tgTTrSjilrNHiw0NimPHZApcRXOS_ejrSSy8nIn02xipxUWxvHM3ru5IYhJRTeBZxhlq6_XQVsATkvink" + ] + }, + "Body": "Tm9ybWFsaXphdGlvbiBGb3JtIEM=" + } + }, + { + "ID": "725883eeccd4b42c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoiemVybyJ9Cg==", + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3128" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Etag": [ + "CKyp9oDy/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UruPh9tBADr1LSQWFlj1C4tjlS6uwlcy_MVRYaTGD4e_qEUlq0iXJx6ns3JHbH7I9olH7aSJpDW6VQcV5EdLwjQMecgA7qKhxzLoYFW4BZdEXbUrro" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS96ZXJvLzE1NTY4MzU5ODkyOTYzMDAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby96ZXJvIiwibmFtZSI6Inplcm8iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4OTI5NjMwMCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyOS4yOTVaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjkuMjk1WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI5LjI5NVoiLCJzaXplIjoiMCIsIm1kNUhhc2giOiIxQjJNMlk4QXNnVHBnQW1ZN1BoQ2ZnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vemVybz9nZW5lcmF0aW9uPTE1NTY4MzU5ODkyOTYzMDAmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvemVyby8xNTU2ODM1OTg5Mjk2MzAwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3plcm8vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Inplcm8iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4OTI5NjMwMCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDS3lwOW9EeS9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvemVyby8xNTU2ODM1OTg5Mjk2MzAwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby96ZXJvL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiemVybyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg5Mjk2MzAwIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0t5cDlvRHkvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3plcm8vMTU1NjgzNTk4OTI5NjMwMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vemVyby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Inplcm8iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4OTI5NjMwMCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDS3lwOW9EeS9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvemVyby8xNTU2ODM1OTg5Mjk2MzAwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby96ZXJvL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiemVybyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg5Mjk2MzAwIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0t5cDlvRHkvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJBQUFBQUE9PSIsImV0YWciOiJDS3lwOW9EeS9lRUNFQUU9In0=" + } + }, + { + "ID": "036f06773f6d6306", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0021/o?alt=json\u0026delimiter=\u0026pageToken=\u0026prefix=\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "11821" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqs9uKpqdPwYRv420tAk0KmBT6DS2POPsIj39ROT-r3ymrd2SB-oOJEXuaHGOfih1ckOqp-YrvHHAh7l7A-6rwlFjoBSb4qCtWzXGPjekN0pUBIok8" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:171)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:41)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.list(ObjectsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:171)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:41)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.list(ObjectsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:171)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:41)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.list(ObjectsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.bucket, message=null, unnamedArguments=[]}, location=entity.bucket, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:171)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:41)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.list(ObjectsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" + } + }, + { + "ID": "6b9dc540a02ed56f", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/storage-library-test-bucket/Cafe%CC%81", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Length": [ + "20" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Etag": [ + "\"df597679bac7c6150429ad80a1a05680\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:26:29 GMT" + ], + "Last-Modified": [ + "Fri, 24 Mar 2017 20:04:37 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Generation": [ + "1490385877705600" + ], + "X-Goog-Hash": [ + "crc32c=qBeWjQ==", + "md5=31l2ebrHxhUEKa2AoaBWgA==" + ], + "X-Goog-Metageneration": [ + "2" + ], + "X-Goog-Storage-Class": [ + "MULTI_REGIONAL" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "20" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrK69XFh4eDvl0RvSevMYIwMVs-nlDelMhoacS-4faSvuEateXMEqzdMvHClWdzkyqcEk7IQ1N0JMcv5uEPt6V98chYK-p9otm8puF4Uf846EBUEmg" + ] + }, + "Body": "Tm9ybWFsaXphdGlvbiBGb3JtIEQ=" + } + }, + { + "ID": "09f71762da168c2f", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/zero", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "text/plain; charset=utf-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Etag": [ + "\"d41d8cd98f00b204e9800998ecf8427e\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:26:29 GMT" + ], + "X-Goog-Generation": [ + "1556835989296300" + ], + "X-Goog-Hash": [ + "crc32c=AAAAAA==", + "md5=1B2M2Y8AsgTpgAmY7PhCfg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "0" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrL3cJhuZXF-arxiYeo4MSf1to_dAoiyMisikOum5rqnrSnFa0OZ25Sl8hUU7lcIeW6P3-dwXK6qv0yHGFYMFnUB0VYpI9BRT16gMo8ikQ_L3gBmew" + ] + }, + "Body": "" + } + }, + { + "ID": "681729281517b9c0", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrKCNWRWyBsgMgWxsrSjXZMKW22GWVttfUWHw_UNSF7X7hnnz4h-cD7vFzaLO7OVWvnuMyW9qpJAA4eJaERT9hSjGfv41XVw_9ouvBOeIK9ykq-DrE" + ] + }, + "Body": "" + } + }, + { + "ID": "39261fa7e6e9aede", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "60" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyIn0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "485" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:33 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpEt-jSVBZqHHBwfNkeP4NXxMdC1ISsFp2E_M6JiSw9rDj2gTnmTZhrvOn-bo_f9Lx_HMxMDgsXfO0ftWqvUjGwdmHAYNnYK5JSJozLfuosPNxv_S4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjMzLjU1N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjozMy41NTdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "3e70965b0aa1111b", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0022?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:34 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:34 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrB-4r560VjwZdR64GS_7_mZDefc-iM5ma_H8KjYfrdNXt-IlkgvzShsy7iNLseN6coOnkJwWT5tLbP54_M7nMUjord_jmpb1i19SBRA_3RQ_64yys" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjMzLjU1N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjozMy41NTdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "b80f9a99661cd572", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0022?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:34 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur4ItFRDN2_vB0_Ej095pAE061aywdVAoU_00t3fazm21R5ZLxUHpQL4ifTJ58218EkOeDBSWNaOaWuuzTnTM0DhXTe4l2X3pytOdfcZdIeyLV1rWA" + ] + }, + "Body": "" + } + }, + { + "ID": "979ffb3f3450c3d5", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "543" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJsYWJlbHMiOnsiZW1wdHkiOiIiLCJsMSI6InYxIn0sImxpZmVjeWNsZSI6eyJydWxlIjpbeyJhY3Rpb24iOnsic3RvcmFnZUNsYXNzIjoiTkVBUkxJTkUiLCJ0eXBlIjoiU2V0U3RvcmFnZUNsYXNzIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjEwLCJjcmVhdGVkQmVmb3JlIjoiMjAxNy0wMS0wMSIsImlzTGl2ZSI6ZmFsc2UsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTVVMVElfUkVHSU9OQUwiLCJTVEFOREFSRCJdLCJudW1OZXdlclZlcnNpb25zIjozfX0seyJhY3Rpb24iOnsidHlwZSI6IkRlbGV0ZSJ9LCJjb25kaXRpb24iOnsiYWdlIjozMCwiY3JlYXRlZEJlZm9yZSI6IjIwMTctMDEtMDEiLCJpc0xpdmUiOnRydWUsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTkVBUkxJTkUiXSwibnVtTmV3ZXJWZXJzaW9ucyI6MTB9fV19LCJsb2NhdGlvbiI6IlVTIiwibmFtZSI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInN0b3JhZ2VDbGFzcyI6Ik5FQVJMSU5FIiwidmVyc2lvbmluZyI6eyJlbmFibGVkIjp0cnVlfX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "926" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:34 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqM_YYHcsYolLFJJwWhO9AUYmnc3Vp3J6C5AydYOy-0RVAmBxObG8QpwZOsn28vDVllo_OZ1-4bASi_RGi4wdply2CPhzUOCVjP11XNzVYZ4_EiFb4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjM0LjU2M1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjozNC41NjNaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOnRydWV9LCJsaWZlY3ljbGUiOnsicnVsZSI6W3siYWN0aW9uIjp7InR5cGUiOiJTZXRTdG9yYWdlQ2xhc3MiLCJzdG9yYWdlQ2xhc3MiOiJORUFSTElORSJ9LCJjb25kaXRpb24iOnsiYWdlIjoxMCwiY3JlYXRlZEJlZm9yZSI6IjIwMTctMDEtMDEiLCJpc0xpdmUiOmZhbHNlLCJtYXRjaGVzU3RvcmFnZUNsYXNzIjpbIk1VTFRJX1JFR0lPTkFMIiwiU1RBTkRBUkQiXSwibnVtTmV3ZXJWZXJzaW9ucyI6M319LHsiYWN0aW9uIjp7InR5cGUiOiJEZWxldGUifSwiY29uZGl0aW9uIjp7ImFnZSI6MzAsImNyZWF0ZWRCZWZvcmUiOiIyMDE3LTAxLTAxIiwiaXNMaXZlIjp0cnVlLCJtYXRjaGVzU3RvcmFnZUNsYXNzIjpbIk5FQVJMSU5FIl0sIm51bU5ld2VyVmVyc2lvbnMiOjEwfX1dfSwibGFiZWxzIjp7ImwxIjoidjEiLCJlbXB0eSI6IiJ9LCJzdG9yYWdlQ2xhc3MiOiJORUFSTElORSIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "4931edd6fbd7e278", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0022?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2872" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:34 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:34 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpxOq-rTQ-0CFxAKezuPpbibtEmD_yASKzhW5dOiUdwVORx6a7_e1dmDnNCdamDPwLsBOpLueQD8iDbwlracb2uxGwwZ6yz92QD-t-JSKM-jSNp2xA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjM0LjU2M1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjozNC41NjNaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInZlcnNpb25pbmciOnsiZW5hYmxlZCI6dHJ1ZX0sImxpZmVjeWNsZSI6eyJydWxlIjpbeyJhY3Rpb24iOnsidHlwZSI6IlNldFN0b3JhZ2VDbGFzcyIsInN0b3JhZ2VDbGFzcyI6Ik5FQVJMSU5FIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjEwLCJjcmVhdGVkQmVmb3JlIjoiMjAxNy0wMS0wMSIsImlzTGl2ZSI6ZmFsc2UsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTVVMVElfUkVHSU9OQUwiLCJTVEFOREFSRCJdLCJudW1OZXdlclZlcnNpb25zIjozfX0seyJhY3Rpb24iOnsidHlwZSI6IkRlbGV0ZSJ9LCJjb25kaXRpb24iOnsiYWdlIjozMCwiY3JlYXRlZEJlZm9yZSI6IjIwMTctMDEtMDEiLCJpc0xpdmUiOnRydWUsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTkVBUkxJTkUiXSwibnVtTmV3ZXJWZXJzaW9ucyI6MTB9fV19LCJsYWJlbHMiOnsibDEiOiJ2MSIsImVtcHR5IjoiIn0sInN0b3JhZ2VDbGFzcyI6Ik5FQVJMSU5FIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "12c06b5419a1fa81", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0022?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:35 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uoc24sLEBwU00YKpSYT4V6iJu3WxME2LgbyRWaYj4kX8s6Gjatzl0AWcCwPRhj_Bq0BOoUhKQzS8L0GJlhHC36eyEWEIs2rk2BcUyo0aE3U8XbuHuM" + ] + }, + "Body": "" + } + }, + { + "ID": "bf24dffbe6cb8609", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2571" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:35 GMT" + ], + "Etag": [ + "CA0=" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:35 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqVkn03LSqmt8RiOPdW3JO4dpsS9vRp7VK8Qc0HwhmS3L9KDOgmyZIqLnvAwuAyvoIz1VsbqeoLwMMtz9pCIX216P4K3OpEs-x_bf_VSwMiFehgZiM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyNS41MjdaIiwibWV0YWdlbmVyYXRpb24iOiIxMyIsImFjbCI6W3sia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQTA9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0EwPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0EwPSJ9XSwiZGVmYXVsdE9iamVjdEFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDQTA9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0EwPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQTA9In1dLCJpYW1Db25maWd1cmF0aW9uIjp7ImJ1Y2tldFBvbGljeU9ubHkiOnsiZW5hYmxlZCI6ZmFsc2V9fSwib3duZXIiOnsiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0In0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOmZhbHNlfSwibGlmZWN5Y2xlIjp7InJ1bGUiOlt7ImFjdGlvbiI6eyJ0eXBlIjoiRGVsZXRlIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjMwfX1dfSwibGFiZWxzIjp7ImwxIjoidjIiLCJuZXciOiJuZXcifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0EwPSJ9" + } + }, + { + "ID": "2e41eac5e693f3e1", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026pageToken=\u0026prefix=\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "64746" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:35 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:35 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqgGJECa6hRJfdjE3ErHERxS3HPU_SALrzjf-HIDuv4lfxMvOwrkF2pfY2jUJ3b8TcIjWcnaa85Rs4LBucKffiSWfFXcfLZMF0EuiVRj9oH6Kp4tOg" + ] + }, + "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/acl1/1556835864248170","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1","name":"acl1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835864248170","metageneration":"2","contentType":"application/octet-stream","timeCreated":"2019-05-02T22:24:24.247Z","updated":"2019-05-02T22:24:25.629Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:24.247Z","size":"16","md5Hash":"0E9tFNpZj0/WKOJ6fV9paw==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1?generation=1556835864248170&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl1/1556835864248170/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl1","generation":"1556835864248170","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COr+pcXx/eECEAI="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl1/1556835864248170/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl1","generation":"1556835864248170","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COr+pcXx/eECEAI="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl1/1556835864248170/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl1","generation":"1556835864248170","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COr+pcXx/eECEAI="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl1/1556835864248170/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl1","generation":"1556835864248170","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COr+pcXx/eECEAI="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"FiDmVg==","etag":"COr+pcXx/eECEAI="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/acl2/1556835864737946","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2","name":"acl2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835864737946","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2019-05-02T22:24:24.737Z","updated":"2019-05-02T22:24:24.737Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:24.737Z","size":"16","md5Hash":"c9+O/rg24HTFBc+etWjefg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2?generation=1556835864737946&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl2/1556835864737946/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl2","generation":"1556835864737946","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJrxw8Xx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl2/1556835864737946/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl2","generation":"1556835864737946","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJrxw8Xx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl2/1556835864737946/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl2","generation":"1556835864737946","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJrxw8Xx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl2/1556835864737946/domain-google.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2/acl/domain-google.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl2","generation":"1556835864737946","entity":"domain-google.com","role":"READER","domain":"google.com","etag":"CJrxw8Xx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl2/1556835864737946/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl2","generation":"1556835864737946","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJrxw8Xx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"AtNRtA==","etag":"CJrxw8Xx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/bucketInCopyAttrs/1556835883663856","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs","name":"bucketInCopyAttrs","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835883663856","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:43.663Z","updated":"2019-05-02T22:24:43.663Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:43.663Z","size":"3","md5Hash":"rL0Y20zC+Fzt72VPzMSk2A==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs?generation=1556835883663856&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/bucketInCopyAttrs/1556835883663856/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"bucketInCopyAttrs","generation":"1556835883663856","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CPCDx87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/bucketInCopyAttrs/1556835883663856/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"bucketInCopyAttrs","generation":"1556835883663856","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CPCDx87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/bucketInCopyAttrs/1556835883663856/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"bucketInCopyAttrs","generation":"1556835883663856","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CPCDx87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/bucketInCopyAttrs/1556835883663856/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"bucketInCopyAttrs","generation":"1556835883663856","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CPCDx87x/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"z8SuHQ==","etag":"CPCDx87x/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/checksum-object/1556835855962241","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object","name":"checksum-object","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835855962241","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:15.961Z","updated":"2019-05-02T22:24:15.961Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:15.961Z","size":"10","md5Hash":"/F4DjTilcDIIVEHn/nAQsA==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object?generation=1556835855962241&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/checksum-object/1556835855962241/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"checksum-object","generation":"1556835855962241","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CIGhrMHx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/checksum-object/1556835855962241/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"checksum-object","generation":"1556835855962241","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CIGhrMHx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/checksum-object/1556835855962241/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"checksum-object","generation":"1556835855962241","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CIGhrMHx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/checksum-object/1556835855962241/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"checksum-object","generation":"1556835855962241","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CIGhrMHx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"Vsu0gA==","etag":"CIGhrMHx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/composed1/1556835858962957","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1","name":"composed1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835858962957","metageneration":"1","timeCreated":"2019-05-02T22:24:18.962Z","updated":"2019-05-02T22:24:18.962Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:18.962Z","size":"48","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1?generation=1556835858962957&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed1/1556835858962957/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed1","generation":"1556835858962957","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CI2048Lx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed1/1556835858962957/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed1","generation":"1556835858962957","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CI2048Lx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed1/1556835858962957/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed1","generation":"1556835858962957","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CI2048Lx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed1/1556835858962957/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed1","generation":"1556835858962957","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CI2048Lx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"AbWByQ==","componentCount":3,"etag":"CI2048Lx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/composed2/1556835859564799","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2","name":"composed2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835859564799","metageneration":"1","contentType":"text/json","timeCreated":"2019-05-02T22:24:19.564Z","updated":"2019-05-02T22:24:19.564Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:19.564Z","size":"48","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2?generation=1556835859564799&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed2/1556835859564799/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed2","generation":"1556835859564799","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CP+RiMPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed2/1556835859564799/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed2","generation":"1556835859564799","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CP+RiMPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed2/1556835859564799/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed2","generation":"1556835859564799","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CP+RiMPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed2/1556835859564799/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed2","generation":"1556835859564799","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CP+RiMPx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"AbWByQ==","componentCount":3,"etag":"CP+RiMPx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/content/1556835874352207","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content","name":"content","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835874352207","metageneration":"1","contentType":"image/jpeg","timeCreated":"2019-05-02T22:24:34.351Z","updated":"2019-05-02T22:24:34.351Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:34.351Z","size":"54","md5Hash":"N8p8/s9FwdAAnlvr/lEAjQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content?generation=1556835874352207&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/content/1556835874352207/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"content","generation":"1556835874352207","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CM/Yjsrx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/content/1556835874352207/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"content","generation":"1556835874352207","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CM/Yjsrx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/content/1556835874352207/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"content","generation":"1556835874352207","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CM/Yjsrx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/content/1556835874352207/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"content","generation":"1556835874352207","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CM/Yjsrx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"GoUbsQ==","etag":"CM/Yjsrx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption/1556835875058680","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption","name":"customer-encryption","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835875058680","metageneration":"3","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:35.058Z","updated":"2019-05-02T22:24:36.225Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:35.058Z","size":"11","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption?generation=1556835875058680&alt=media","contentLanguage":"en","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption/1556835875058680/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption","generation":"1556835875058680","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CPjnucrx/eECEAM="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption/1556835875058680/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption","generation":"1556835875058680","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CPjnucrx/eECEAM="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption/1556835875058680/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption","generation":"1556835875058680","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CPjnucrx/eECEAM="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption/1556835875058680/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption","generation":"1556835875058680","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CPjnucrx/eECEAM="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"etag":"CPjnucrx/eECEAM=","customerEncryption":{"encryptionAlgorithm":"AES256","keySha256":"H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0="}},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-2/1556835880717506","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2","name":"customer-encryption-2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835880717506","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:40.717Z","updated":"2019-05-02T22:24:40.717Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:40.717Z","size":"11","md5Hash":"xwWNFa0VdXPmlAwrlcAJcg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?generation=1556835880717506&alt=media","contentLanguage":"en","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-2/1556835880717506/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-2","generation":"1556835880717506","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CMKZk83x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-2/1556835880717506/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-2","generation":"1556835880717506","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CMKZk83x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-2/1556835880717506/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-2","generation":"1556835880717506","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CMKZk83x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-2/1556835880717506/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-2","generation":"1556835880717506","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CMKZk83x/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"r0NGrg==","etag":"CMKZk83x/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-3/1556835879859440","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3","name":"customer-encryption-3","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835879859440","metageneration":"1","timeCreated":"2019-05-02T22:24:39.859Z","updated":"2019-05-02T22:24:39.859Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:39.859Z","size":"22","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3?generation=1556835879859440&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-3/1556835879859440/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-3","generation":"1556835879859440","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CPDp3szx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-3/1556835879859440/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-3","generation":"1556835879859440","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CPDp3szx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-3/1556835879859440/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-3","generation":"1556835879859440","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CPDp3szx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-3/1556835879859440/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-3","generation":"1556835879859440","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CPDp3szx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"componentCount":2,"etag":"CPDp3szx/eECEAE=","customerEncryption":{"encryptionAlgorithm":"AES256","keySha256":"H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0="}},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/gzip-test/1556835860251867","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test","name":"gzip-test","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835860251867","metageneration":"1","contentType":"application/x-gzip","timeCreated":"2019-05-02T22:24:20.251Z","updated":"2019-05-02T22:24:20.251Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:20.251Z","size":"27","md5Hash":"OtCw+aRRIRqKGFAEOax+qw==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test?generation=1556835860251867&alt=media","contentEncoding":"gzip","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/gzip-test/1556835860251867/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"gzip-test","generation":"1556835860251867","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CNuJssPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/gzip-test/1556835860251867/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"gzip-test","generation":"1556835860251867","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CNuJssPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/gzip-test/1556835860251867/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"gzip-test","generation":"1556835860251867","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CNuJssPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/gzip-test/1556835860251867/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"gzip-test","generation":"1556835860251867","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CNuJssPx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"9DhwBA==","etag":"CNuJssPx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/hashesOnUpload-1/1556835885051680","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1","name":"hashesOnUpload-1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835885051680","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:45.051Z","updated":"2019-05-02T22:24:45.051Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:45.051Z","size":"27","md5Hash":"ofZjGlcXPJiGOAfKFbJl1Q==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1?generation=1556835885051680&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/hashesOnUpload-1/1556835885051680/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"hashesOnUpload-1","generation":"1556835885051680","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CKDem8/x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/hashesOnUpload-1/1556835885051680/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"hashesOnUpload-1","generation":"1556835885051680","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CKDem8/x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/hashesOnUpload-1/1556835885051680/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"hashesOnUpload-1","generation":"1556835885051680","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CKDem8/x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/hashesOnUpload-1/1556835885051680/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"hashesOnUpload-1","generation":"1556835885051680","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CKDem8/x/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"cH+A+w==","etag":"CKDem8/x/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"4","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:17.121Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/domain-google.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/domain-google.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"domain-google.com","role":"READER","domain":"google.com","etag":"CLnS+bvx/eECEAQ="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAQ="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/allUsers","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/allUsers","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"allUsers","role":"READER","etag":"CLnS+bvx/eECEAQ="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAQ="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2","name":"obj2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845049245","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.048Z","updated":"2019-05-02T22:24:05.048Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.048Z","size":"16","md5Hash":"CC1wlwrMOIq0dvMkMylUhg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?generation=1556835845049245&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ2Xkrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"jV5AVQ==","etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/posc/1556835882760607","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc","name":"posc","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835882760607","metageneration":"1","timeCreated":"2019-05-02T22:24:42.760Z","updated":"2019-05-02T22:24:42.760Z","storageClass":"MULTI_REGIONAL","timeStorageClassUpdated":"2019-05-02T22:24:42.760Z","size":"3","md5Hash":"rL0Y20zC+Fzt72VPzMSk2A==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc?generation=1556835882760607&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc/1556835882760607/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc","generation":"1556835882760607","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ/zj87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc/1556835882760607/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc","generation":"1556835882760607","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ/zj87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc/1556835882760607/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc","generation":"1556835882760607","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ/zj87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc/1556835882760607/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc","generation":"1556835882760607","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ/zj87x/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"z8SuHQ==","etag":"CJ/zj87x/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/posc2/1556835883152770","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2","name":"posc2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835883152770","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:43.152Z","updated":"2019-05-02T22:24:43.152Z","storageClass":"MULTI_REGIONAL","timeStorageClassUpdated":"2019-05-02T22:24:43.152Z","size":"3","md5Hash":"9WGq9u8L8U1CCLtGpMyzrQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2?generation=1556835883152770&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc2/1556835883152770/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc2","generation":"1556835883152770","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CILrp87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc2/1556835883152770/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc2","generation":"1556835883152770","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CILrp87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc2/1556835883152770/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc2","generation":"1556835883152770","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CILrp87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc2/1556835883152770/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc2","generation":"1556835883152770","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CILrp87x/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"17qABQ==","etag":"CILrp87x/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/signedURL/1556835861141206","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL","name":"signedURL","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835861141206","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:21.140Z","updated":"2019-05-02T22:24:21.140Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:21.140Z","size":"29","md5Hash":"Jyxvgwm9n2MsrGTMPbMeYA==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL?generation=1556835861141206&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/signedURL/1556835861141206/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"signedURL","generation":"1556835861141206","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CNat6MPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/signedURL/1556835861141206/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"signedURL","generation":"1556835861141206","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CNat6MPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/signedURL/1556835861141206/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"signedURL","generation":"1556835861141206","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CNat6MPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/signedURL/1556835861141206/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"signedURL","generation":"1556835861141206","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CNat6MPx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"ZTqALw==","etag":"CNat6MPx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/some-object/1556835988273530","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object","name":"some-object","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835988273530","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:26:28.273Z","updated":"2019-05-02T22:26:28.273Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:26:28.273Z","size":"16","md5Hash":"tHacfSKfByC+/+clG7q+jw==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object?generation=1556835988273530&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/some-object/1556835988273530/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"some-object","generation":"1556835988273530","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CPryt4Dy/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/some-object/1556835988273530/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"some-object","generation":"1556835988273530","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CPryt4Dy/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/some-object/1556835988273530/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"some-object","generation":"1556835988273530","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CPryt4Dy/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/some-object/1556835988273530/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"some-object","generation":"1556835988273530","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CPryt4Dy/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"Sm1gKw==","etag":"CPryt4Dy/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/zero-object/1556835856537016","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object","name":"zero-object","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835856537016","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:16.536Z","updated":"2019-05-02T22:24:16.536Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:16.536Z","size":"0","md5Hash":"1B2M2Y8AsgTpgAmY7PhCfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object?generation=1556835856537016&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/zero-object/1556835856537016/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"zero-object","generation":"1556835856537016","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLirz8Hx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/zero-object/1556835856537016/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"zero-object","generation":"1556835856537016","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLirz8Hx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/zero-object/1556835856537016/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"zero-object","generation":"1556835856537016","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLirz8Hx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/zero-object/1556835856537016/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"zero-object","generation":"1556835856537016","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLirz8Hx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"AAAAAA==","etag":"CLirz8Hx/eECEAE="}]}" + } + }, + { + "ID": "744cbe6970c9623b", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:35 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpfI-4jHQ7K_XJuo8cMIfOvPLKerfuQA1KlRDBi4Fq11Uc9i--oKNuFL_MbH0CCxK8PeI4r4fmZILF9hdSGAMMpMmyX0w67j7t84C4tRDx2LnbzG4I" + ] + }, + "Body": "" + } + }, + { + "ID": "a2bea52380600211", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:35 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrzTcVhZWJuBZITVX_K6haiVlOGb5fEYRdiiH8ODa4XsVSPl4FCDs7zPpKMTfoZA1veIMd4ccdcj8rUqWDMcRfDU5l4cOGvNhGZV-h9S2AVJfl6GdE" + ] + }, + "Body": "" + } + }, + { + "ID": "e187777c49170a6d", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:36 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UouPwITheFghtKlzs2vxoAGnEm_V7OZZnYkk_0pLhV8g5YN6TPRS2n1h0iAtzkuFXYtjovp3ifqt_351LO6-CobCar6VgRnJ-_EQ9WgOWhIC9DzYTs" + ] + }, + "Body": "" + } + }, + { + "ID": "653fa745ed683364", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:36 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoBLZLK-ePmvYxO2ByvMQ1dXb_4-5yhOR-1nYkxjkINZf5fY9ptO2H4RfRtbIrRQ-QQNZHqZGgCOGJakKLkNgVI2_ExobQLkJ-JaJ-ViDWL9GZCEgU" + ] + }, + "Body": "" + } + }, + { + "ID": "91230665f80c46f7", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:36 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Upasg8dCwbL7dWntUy3O1t_wWVeDNNT3YCz1uVNL8QUNh4z75y0p9OfL57wFX4CUv3NV06Oi9f7a85m42BzgDZl-kqmv4com_INYmRYjKbwrTdFB8A" + ] + }, + "Body": "" + } + }, + { + "ID": "dc68e1f94de4fa2a", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:36 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqXdUtFVSSFIrK72haTC8pKLogl9pB4lDLpARc8oDKdNt1yfbHj4VAQH0M_Doqj1mLY2LVmThFxvG_1mvtVBm-N4x_J-c5Izq46lZh_xx3wOQEYZKQ" + ] + }, + "Body": "" + } + }, + { + "ID": "db5b5c31ef8e503e", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:36 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur2nVuDhUpL1-5n7rzvboeHuOBlvh-59eoy59Y5ZIyvUy6FXYj2eCphVTBFkaIPXK7XIS6DJXkPzZABjiFi4hMA7Ewdgp860sYu44qguzhTfOymXYY" + ] + }, + "Body": "" + } + }, + { + "ID": "f44103066254972d", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:36 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqul73HMDr4p6XCqdfKz99RILwd68VT50DnynneOl5m5E_a7ky2mbMA8mdM6xyAWCQtiJm8dCxvIuKYtjLavA8JUlkGIqqCLGfLbB2EvWahd55Fw_A" + ] + }, + "Body": "" + } + }, + { + "ID": "99aa9c86d4998414", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:36 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqfk5JfeEeFo80cy1Y-OxXgzSIbt_x7qDWML2yJopIrt9DsPmRq4cDVFqg6sUFStj6aMrjxP7iu_tbwZzS_3BtnwqfPrJZvHllZ7tdeFTXLmto742o" + ] + }, + "Body": "" + } + }, + { + "ID": "24bd829460fb4df6", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:37 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UprF0LO3CObAApP8iwUO6IvC-Yh-5whtr0F_mMzaj0rltEl4C-AZjNuBMGuQUue-v9oBqb-nQU2ykIenSZ3UYnK4XVV7moFIS43VyP7pByYqsHPkMo" + ] + }, + "Body": "" + } + }, + { + "ID": "acdf7531eddfd72c", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:37 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqAEWwZD_lXrdbrjMg-PYbmLP1-zSzjESAs-osetXJrFTueJ6ufxVfA3xkMG5KjtEz-68YryfKl9gAGsgfseX6H5COwltvyW6pJSdxDdA-dTV_S8eU" + ] + }, + "Body": "" + } + }, + { + "ID": "8facd7ede0dc71a6", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:37 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrInkaBVPO1ip6mFSpF0Nihd1wYE9HNfIz8HfFsZkiEVG1mX7eKMItsbmNwWMehWZ0qaKAf081imDi7DmKqs6mEy7vyTokMNXVUp5LrFoQ-yDfVRuc" + ] + }, + "Body": "" + } + }, + { + "ID": "6affac5f0a8dea5a", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:37 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqhbUYzZ-vUqGcyfzGFaQrr89Q05cXGCB0EbWA2Dy8dmQxB3uWJ7_FUwi6Ish-VMpOrZv78bT9qAHd5Tr5b3s8bTCX6QR9HlelvxkKdyrUv2G8QQuY" + ] + }, + "Body": "" + } + }, + { + "ID": "8258a6e1b5dc73aa", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:37 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Upola6CIBa9ONjl53PxMpFGQkHvYEau96KG2mgCmUDWDhxZKCw0T58QQRx8OIJW5sisH_acrPcg2o-LLdJgOxCtlVSw-t6wGisVL2-TSopEuezgmu4" + ] + }, + "Body": "" + } + }, + { + "ID": "12186a37ecd09333", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:37 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpFxLaOTSsPe7hDbzBznGCoZvAUmeNgWBwS45X2j3rpLlk8_MIFFuCa-aBpAM83d23ixudKpklnhiqCcrQQotCUZhkrMLpWLHBJdmcM0d5rCKL6opY" + ] + }, + "Body": "" + } + }, + { + "ID": "e7a92beca78e4ec6", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:37 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrpITV4ogt1-_eCsp0KFz7tVeM2A4FzSevs8Q0BPZ36nEPrP9aAaB4nfWASzLZgi2hNM1l9eFkf5UbYYWDT5J-nQFCapVNcm_4Nr43yd94Ce95SVps" + ] + }, + "Body": "" + } + }, + { + "ID": "5bb0506f3ca9135d", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:38 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqFBt86Wz7Oqelsb0pPpIskBYdMYLR3XuBmD2F5DwyXt84Q1AUyL0Q94YtqTJP1yzgohHCkJWSGibqUARgP8Ilv9v4lVipsdfjSdFHgIjFJ1H-vuU4" + ] + }, + "Body": "" + } + }, + { + "ID": "3ea05ac204d7a112", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:38 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up2mLLaTTO-0iqVf4m8WXPEsP-mHlaS6hz_gavjGI5yzU0jCjBNDRw1gj6Wa_cQIBzfjytwP2XYhBIJeAglwa6Uqv87WhzeqgGXt4u820FuZKVgY-w" + ] + }, + "Body": "" + } + }, + { + "ID": "bed8580dc40e850e", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:38 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UodR7nshMeihBskPB6loBZoNDRFEUxojhFCKhQ0NpiEMRHY0ZHU8ncI4Sr7FHUmIDXyK8xVg_I7Qt5ESrqnGmRzW0h1eM8OGfEM-3weHyzMqN-VMik" + ] + }, + "Body": "" + } + }, + { + "ID": "57489f1d9de0ddb8", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:38 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqxQ_NSeD-rrAbZXERIaDr3_1wDC_RZ4MyuFZnOhdDqLkn4qub7rAIamqSQKDD8-jbn0c3XmxvowTghxhq4tZaxxHTmYJtB_FPvJxRZtCt3Leugx4g" + ] + }, + "Body": "" + } + }, + { + "ID": "006b3176688c0ac8", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:38 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo4l8LfaJI78MH37sa7nOmY2A8EB4paZccrewnIFFud-pwGLeVf067ZiRaBAOAPsPYlaxxtsr6gtECA6UY0xiUBHPTLFOe4VNnNQpniNWYJEfZ1-I8" + ] + }, + "Body": "" + } + }, + { + "ID": "a853ce25e734dbfd", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026pageToken=\u0026prefix=go-integration-test\u0026prettyPrint=false\u0026project=deklerk-sandbox\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "47300" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:39 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:39 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrnXhEY-kWgvl-XqP1kXnVDpQphHD9znE-EyBBlFT-kHZtXEsAW5QG5PuzlaJPfQUwZqnTHP0wxu6YDbRCgDMB31UR760_lfEiX88PtVKIdTgCWm5A" + ] + }, + "Body": "{"kind":"storage#buckets","items":[{"kind":"storage#bucket","id":"go-integration-test-20190502-66597705133146-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66597705133146-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-66597705133146-0001","timeCreated":"2019-05-02T18:29:58.601Z","updated":"2019-05-02T18:29:58.601Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66597705133146-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66597705133146-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-66597705133146-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66597705133146-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66597705133146-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-66597705133146-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66597705133146-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66597705133146-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-66597705133146-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-66611506907602-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66611506907602-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-66611506907602-0001","timeCreated":"2019-05-02T18:30:12.264Z","updated":"2019-05-02T18:30:12.264Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66611506907602-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66611506907602-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-66611506907602-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66611506907602-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66611506907602-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-66611506907602-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66611506907602-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66611506907602-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-66611506907602-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-66633965420818-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66633965420818-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-66633965420818-0001","timeCreated":"2019-05-02T18:30:34.662Z","updated":"2019-05-02T18:30:34.662Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66633965420818-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66633965420818-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-66633965420818-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66633965420818-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66633965420818-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-66633965420818-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66633965420818-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66633965420818-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-66633965420818-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-78294113514519-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-78294113514519-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-78294113514519-0001","timeCreated":"2019-05-02T21:44:54.960Z","updated":"2019-05-02T21:44:54.960Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-78294113514519-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-78294113514519-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-78294113514519-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-78294113514519-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-78294113514519-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-78294113514519-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-78294113514519-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-78294113514519-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-78294113514519-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-79385133382825-0017","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0017","projectNumber":"496169601714","name":"go-integration-test-20190502-79385133382825-0017","timeCreated":"2019-05-02T22:05:34.375Z","updated":"2019-05-02T22:05:36.341Z","metageneration":"2","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79385133382825-0017/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0017/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-79385133382825-0017","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79385133382825-0017/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0017/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-79385133382825-0017","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79385133382825-0017/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0017/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-79385133382825-0017","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:05:34.375Z","isLocked":true},"storageClass":"STANDARD","etag":"CAI="},{"kind":"storage#bucket","id":"go-integration-test-20190502-79385133382825-0018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0018","projectNumber":"496169601714","name":"go-integration-test-20190502-79385133382825-0018","timeCreated":"2019-05-02T22:05:37.733Z","updated":"2019-05-02T22:05:37.733Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79385133382825-0018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0018/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-79385133382825-0018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79385133382825-0018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0018/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-79385133382825-0018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79385133382825-0018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0018/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-79385133382825-0018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:05:37.733Z"},"storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-79911595924903-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79911595924903-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-79911595924903-0001","timeCreated":"2019-05-02T22:11:52.358Z","updated":"2019-05-02T22:11:52.358Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79911595924903-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79911595924903-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-79911595924903-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79911595924903-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79911595924903-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-79911595924903-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79911595924903-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79911595924903-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-79911595924903-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-79928727982393-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-79928727982393-0001","timeCreated":"2019-05-02T22:12:09.706Z","updated":"2019-05-02T22:13:41.634Z","metageneration":"13","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-79928727982393-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CA0="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-79928727982393-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CA0="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-79928727982393-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CA0="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CA0="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CA0="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CA0="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","versioning":{"enabled":false},"lifecycle":{"rule":[{"action":{"type":"Delete"},"condition":{"age":30}}]},"labels":{"l1":"v2","new":"new"},"storageClass":"STANDARD","etag":"CA0="},{"kind":"storage#bucket","id":"go-integration-test-20190502-79928727982393-0017","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0017","projectNumber":"496169601714","name":"go-integration-test-20190502-79928727982393-0017","timeCreated":"2019-05-02T22:14:17.486Z","updated":"2019-05-02T22:14:18.905Z","metageneration":"2","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0017/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0017/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-79928727982393-0017","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0017/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0017/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-79928727982393-0017","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0017/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0017/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-79928727982393-0017","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:14:17.486Z","isLocked":true},"storageClass":"STANDARD","etag":"CAI="},{"kind":"storage#bucket","id":"go-integration-test-20190502-79928727982393-0018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0018","projectNumber":"496169601714","name":"go-integration-test-20190502-79928727982393-0018","timeCreated":"2019-05-02T22:14:20.105Z","updated":"2019-05-02T22:14:20.105Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0018/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-79928727982393-0018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0018/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-79928727982393-0018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0018/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-79928727982393-0018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:14:20.105Z"},"storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80326589403446-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-80326589403446-0001","timeCreated":"2019-05-02T22:18:47.398Z","updated":"2019-05-02T22:18:47.398Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80326589403446-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80326589403446-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80326589403446-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80326589403446-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80326589403446-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80326589403446-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80326589403446-0002","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0002","projectNumber":"496169601714","name":"go-integration-test-20190502-80326589403446-0002","timeCreated":"2019-05-02T22:18:48.114Z","updated":"2019-05-02T22:18:48.114Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80326589403446-0002/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0002/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80326589403446-0002","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80326589403446-0002/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0002/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80326589403446-0002","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80326589403446-0002/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0002/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80326589403446-0002","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80333955303539-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-80333955303539-0001","timeCreated":"2019-05-02T22:18:54.701Z","updated":"2019-05-02T22:18:54.701Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80333955303539-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80333955303539-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80333955303539-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80333955303539-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80333955303539-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80333955303539-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80333955303539-0002","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0002","projectNumber":"496169601714","name":"go-integration-test-20190502-80333955303539-0002","timeCreated":"2019-05-02T22:18:55.293Z","updated":"2019-05-02T22:18:55.293Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80333955303539-0002/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0002/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80333955303539-0002","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80333955303539-0002/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0002/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80333955303539-0002","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80333955303539-0002/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0002/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80333955303539-0002","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80445830430808-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-80445830430808-0001","timeCreated":"2019-05-02T22:20:46.897Z","updated":"2019-05-02T22:22:07.429Z","metageneration":"13","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80445830430808-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CA0="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80445830430808-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CA0="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80445830430808-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CA0="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CA0="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CA0="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CA0="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","versioning":{"enabled":false},"lifecycle":{"rule":[{"action":{"type":"Delete"},"condition":{"age":30}}]},"labels":{"new":"new","l1":"v2"},"storageClass":"STANDARD","etag":"CA0="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80445830430808-0017","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0017","projectNumber":"496169601714","name":"go-integration-test-20190502-80445830430808-0017","timeCreated":"2019-05-02T22:22:50.428Z","updated":"2019-05-02T22:22:51.935Z","metageneration":"2","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0017/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0017/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80445830430808-0017","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0017/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0017/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80445830430808-0017","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0017/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0017/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80445830430808-0017","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:22:50.428Z","isLocked":true},"storageClass":"STANDARD","etag":"CAI="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80445830430808-0018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0018","projectNumber":"496169601714","name":"go-integration-test-20190502-80445830430808-0018","timeCreated":"2019-05-02T22:22:52.962Z","updated":"2019-05-02T22:22:52.962Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0018/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80445830430808-0018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0018/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80445830430808-0018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0018/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80445830430808-0018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:22:52.962Z"},"storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80633403432013-0017","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017","projectNumber":"496169601714","name":"go-integration-test-20190502-80633403432013-0017","timeCreated":"2019-05-02T22:26:01.908Z","updated":"2019-05-02T22:26:03.544Z","metageneration":"2","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80633403432013-0017/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0017","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80633403432013-0017/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0017","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80633403432013-0017/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0017","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:26:01.908Z","isLocked":true},"storageClass":"STANDARD","etag":"CAI="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80633403432013-0018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0018","projectNumber":"496169601714","name":"go-integration-test-20190502-80633403432013-0018","timeCreated":"2019-05-02T22:26:15.097Z","updated":"2019-05-02T22:26:15.097Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80633403432013-0018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0018/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80633403432013-0018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0018/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80633403432013-0018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0018/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:26:15.097Z"},"storageClass":"STANDARD","etag":"CAE="}]}" + } + } + ] +} \ No newline at end of file diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go new file mode 100644 index 000000000..43a0f0d10 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/writer.go @@ -0,0 +1,282 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "errors" + "fmt" + "io" + "sync" + "time" + "unicode/utf8" + + "cloud.google.com/go/internal/trace" +) + +// A Writer writes a Cloud Storage object. +type Writer struct { + // ObjectAttrs are optional attributes to set on the object. Any attributes + // must be initialized before the first Write call. Nil or zero-valued + // attributes are ignored. + ObjectAttrs + + // SendCRC32C specifies whether to transmit a CRC32C field. It should be set + // to true in addition to setting the Writer's CRC32C field, because zero + // is a valid CRC and normally a zero would not be transmitted. + // If a CRC32C is sent, and the data written does not match the checksum, + // the write will be rejected. + // + // Note: SendCRC32C must be set to true BEFORE the first call to + // Writer.Write() in order to send the checksum. If it is set after that + // point, the checksum will be ignored. + SendCRC32C bool + + // ChunkSize controls the maximum number of bytes of the object that the + // Writer will attempt to send to the server in a single request. Objects + // smaller than the size will be sent in a single request, while larger + // objects will be split over multiple requests. The value will be rounded up + // to the nearest multiple of 256K. The default ChunkSize is 16MiB. + // + // Each Writer will internally allocate a buffer of size ChunkSize. This is + // used to buffer input data and allow for the input to be sent again if a + // request must be retried. + // + // If you upload small objects (< 16MiB), you should set ChunkSize + // to a value slightly larger than the objects' sizes to avoid memory bloat. + // This is especially important if you are uploading many small objects + // concurrently. See + // https://cloud.google.com/storage/docs/json_api/v1/how-tos/upload#size + // for more information about performance trade-offs related to ChunkSize. + // + // If ChunkSize is set to zero, chunking will be disabled and the object will + // be uploaded in a single request without the use of a buffer. This will + // further reduce memory used during uploads, but will also prevent the writer + // from retrying in case of a transient error from the server or resuming an + // upload that fails midway through, since the buffer is required in order to + // retry the failed request. + // + // ChunkSize must be set before the first Write call. + ChunkSize int + + // ChunkRetryDeadline sets a per-chunk retry deadline for multi-chunk + // resumable uploads. + // + // For uploads of larger files, the Writer will attempt to retry if the + // request to upload a particular chunk fails with a transient error. + // If a single chunk has been attempting to upload for longer than this + // deadline and the request fails, it will no longer be retried, and the error + // will be returned to the caller. This is only applicable for files which are + // large enough to require a multi-chunk resumable upload. The default value + // is 32s. Users may want to pick a longer deadline if they are using larger + // values for ChunkSize or if they expect to have a slow or unreliable + // internet connection. + // + // To set a deadline on the entire upload, use context timeout or + // cancellation. + ChunkRetryDeadline time.Duration + + // ForceEmptyContentType is an optional parameter that is used to disable + // auto-detection of Content-Type. By default, if a blank Content-Type + // is provided, then gax.DetermineContentType is called to sniff the type. + ForceEmptyContentType bool + + // ProgressFunc can be used to monitor the progress of a large write + // operation. If ProgressFunc is not nil and writing requires multiple + // calls to the underlying service (see + // https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload), + // then ProgressFunc will be invoked after each call with the number of bytes of + // content copied so far. + // + // ProgressFunc should return quickly without blocking. + ProgressFunc func(int64) + + ctx context.Context + o *ObjectHandle + + opened bool + pw *io.PipeWriter + + donec chan struct{} // closed after err and obj are set. + obj *ObjectAttrs + + mu sync.Mutex + err error +} + +// Write appends to w. It implements the io.Writer interface. +// +// Since writes happen asynchronously, Write may return a nil +// error even though the write failed (or will fail). Always +// use the error returned from Writer.Close to determine if +// the upload was successful. +// +// Writes will be retried on transient errors from the server, unless +// Writer.ChunkSize has been set to zero. +func (w *Writer) Write(p []byte) (n int, err error) { + w.mu.Lock() + werr := w.err + w.mu.Unlock() + if werr != nil { + return 0, werr + } + if !w.opened { + if err := w.openWriter(); err != nil { + return 0, err + } + } + n, err = w.pw.Write(p) + if err != nil { + w.mu.Lock() + werr := w.err + w.mu.Unlock() + // Preserve existing functionality that when context is canceled, Write will return + // context.Canceled instead of "io: read/write on closed pipe". This hides the + // pipe implementation detail from users and makes Write seem as though it's an RPC. + if errors.Is(werr, context.Canceled) || errors.Is(werr, context.DeadlineExceeded) { + return n, werr + } + } + return n, err +} + +// Close completes the write operation and flushes any buffered data. +// If Close doesn't return an error, metadata about the written object +// can be retrieved by calling Attrs. +func (w *Writer) Close() error { + if !w.opened { + if err := w.openWriter(); err != nil { + return err + } + } + + // Closing either the read or write causes the entire pipe to close. + if err := w.pw.Close(); err != nil { + return err + } + + <-w.donec + w.mu.Lock() + defer w.mu.Unlock() + trace.EndSpan(w.ctx, w.err) + return w.err +} + +func (w *Writer) openWriter() (err error) { + if err := w.validateWriteAttrs(); err != nil { + return err + } + if w.o.gen != defaultGen { + return fmt.Errorf("storage: generation not supported on Writer, got %v", w.o.gen) + } + + isIdempotent := w.o.conds != nil && (w.o.conds.GenerationMatch >= 0 || w.o.conds.DoesNotExist == true) + opts := makeStorageOpts(isIdempotent, w.o.retry, w.o.userProject) + params := &openWriterParams{ + ctx: w.ctx, + chunkSize: w.ChunkSize, + chunkRetryDeadline: w.ChunkRetryDeadline, + bucket: w.o.bucket, + attrs: &w.ObjectAttrs, + conds: w.o.conds, + encryptionKey: w.o.encryptionKey, + sendCRC32C: w.SendCRC32C, + donec: w.donec, + setError: w.error, + progress: w.progress, + setObj: func(o *ObjectAttrs) { w.obj = o }, + forceEmptyContentType: w.ForceEmptyContentType, + } + if err := w.ctx.Err(); err != nil { + return err // short-circuit + } + w.pw, err = w.o.c.tc.OpenWriter(params, opts...) + if err != nil { + return err + } + w.opened = true + go w.monitorCancel() + + return nil +} + +// monitorCancel is intended to be used as a background goroutine. It monitors the +// context, and when it observes that the context has been canceled, it manually +// closes things that do not take a context. +func (w *Writer) monitorCancel() { + select { + case <-w.ctx.Done(): + w.mu.Lock() + werr := w.ctx.Err() + w.err = werr + w.mu.Unlock() + + // Closing either the read or write causes the entire pipe to close. + w.CloseWithError(werr) + case <-w.donec: + } +} + +// CloseWithError aborts the write operation with the provided error. +// CloseWithError always returns nil. +// +// Deprecated: cancel the context passed to NewWriter instead. +func (w *Writer) CloseWithError(err error) error { + if !w.opened { + return nil + } + return w.pw.CloseWithError(err) +} + +// Attrs returns metadata about a successfully-written object. +// It's only valid to call it after Close returns nil. +func (w *Writer) Attrs() *ObjectAttrs { + return w.obj +} + +func (w *Writer) validateWriteAttrs() error { + attrs := w.ObjectAttrs + // Check the developer didn't change the object Name (this is unfortunate, but + // we don't want to store an object under the wrong name). + if attrs.Name != w.o.object { + return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object) + } + if !utf8.ValidString(attrs.Name) { + return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name) + } + if attrs.KMSKeyName != "" && w.o.encryptionKey != nil { + return errors.New("storage: cannot use KMSKeyName with a customer-supplied encryption key") + } + if w.ChunkSize < 0 { + return errors.New("storage: Writer.ChunkSize must be non-negative") + } + return nil +} + +// progress is a convenience wrapper that reports write progress to the Writer +// ProgressFunc if it is set and progress is non-zero. +func (w *Writer) progress(p int64) { + if w.ProgressFunc != nil && p != 0 { + w.ProgressFunc(p) + } +} + +// error acquires the Writer's lock, sets the Writer's err to the given error, +// then relinquishes the lock. +func (w *Writer) error(err error) { + w.mu.Lock() + w.err = err + w.mu.Unlock() +} diff --git a/vendor/cloud.google.com/go/testing.md b/vendor/cloud.google.com/go/testing.md new file mode 100644 index 000000000..78bb35b3b --- /dev/null +++ b/vendor/cloud.google.com/go/testing.md @@ -0,0 +1,237 @@ +# Testing Code that depends on Go Client Libraries + +The Go client libraries generated as a part of `cloud.google.com/go` all take +the approach of returning concrete types instead of interfaces. That way, new +fields and methods can be added to the libraries without breaking users. This +document will go over some patterns that can be used to test code that depends +on the Go client libraries. + +## Testing gRPC services using fakes + +*Note*: You can see the full +[example code using a fake here](https://github.com/googleapis/google-cloud-go/tree/main/internal/examples/fake). + +The clients found in `cloud.google.com/go` are gRPC based, with a couple of +notable exceptions being the [`storage`](https://pkg.go.dev/cloud.google.com/go/storage) +and [`bigquery`](https://pkg.go.dev/cloud.google.com/go/bigquery) clients. +Interactions with gRPC services can be faked by serving up your own in-memory +server within your test. One benefit of using this approach is that you don’t +need to define an interface in your runtime code; you can keep using +concrete struct types. You instead define a fake server in your test code. For +example, take a look at the following function: + +```go +import ( + "context" + "fmt" + "log" + "os" + + translate "cloud.google.com/go/translate/apiv3" + "github.com/googleapis/gax-go/v2" + translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3" +) + +func TranslateTextWithConcreteClient(client *translate.TranslationClient, text string, targetLang string) (string, error) { + ctx := context.Background() + log.Printf("Translating %q to %q", text, targetLang) + req := &translatepb.TranslateTextRequest{ + Parent: fmt.Sprintf("projects/%s/locations/global", os.Getenv("GOOGLE_CLOUD_PROJECT")), + TargetLanguageCode: "en-US", + Contents: []string{text}, + } + resp, err := client.TranslateText(ctx, req) + if err != nil { + return "", fmt.Errorf("unable to translate text: %v", err) + } + translations := resp.GetTranslations() + if len(translations) != 1 { + return "", fmt.Errorf("expected only one result, got %d", len(translations)) + } + return translations[0].TranslatedText, nil +} +``` + +Here is an example of what a fake server implementation would look like for +faking the interactions above: + +```go +import ( + "context" + + translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3" +) + +type fakeTranslationServer struct { + translatepb.UnimplementedTranslationServiceServer +} + +func (f *fakeTranslationServer) TranslateText(ctx context.Context, req *translatepb.TranslateTextRequest) (*translatepb.TranslateTextResponse, error) { + resp := &translatepb.TranslateTextResponse{ + Translations: []*translatepb.Translation{ + &translatepb.Translation{ + TranslatedText: "Hello World", + }, + }, + } + return resp, nil +} +``` + +All of the generated protobuf code found in [google.golang.org/genproto](https://pkg.go.dev/google.golang.org/genproto) +contains a similar `package.UnimplementedFooServer` type that is useful for +creating fakes. By embedding the unimplemented server in the +`fakeTranslationServer`, the fake will “inherit” all of the RPCs the server +exposes. Then, by providing our own `fakeTranslationServer.TranslateText` +method you can “override” the default unimplemented behavior of the one RPC that +you would like to be faked. + +The test itself does require a little bit of setup: start up a `net.Listener`, +register the server, and tell the client library to call the server: + +```go +import ( + "context" + "net" + "testing" + + translate "cloud.google.com/go/translate/apiv3" + "google.golang.org/api/option" + translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +func TestTranslateTextWithConcreteClient(t *testing.T) { + ctx := context.Background() + + // Setup the fake server. + fakeTranslationServer := &fakeTranslationServer{} + l, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatal(err) + } + gsrv := grpc.NewServer() + translatepb.RegisterTranslationServiceServer(gsrv, fakeTranslationServer) + fakeServerAddr := l.Addr().String() + go func() { + if err := gsrv.Serve(l); err != nil { + panic(err) + } + }() + + // Create a client. + client, err := translate.NewTranslationClient(ctx, + option.WithEndpoint(fakeServerAddr), + option.WithoutAuthentication(), + option.WithGRPCDialOption(grpc.WithTransportCredentials(insecure.NewCredentials())), + ) + if err != nil { + t.Fatal(err) + } + + // Run the test. + text, err := TranslateTextWithConcreteClient(client, "Hola Mundo", "en-US") + if err != nil { + t.Fatal(err) + } + if text != "Hello World" { + t.Fatalf("got %q, want Hello World", text) + } +} +``` + +## Testing using mocks + +*Note*: You can see the full +[example code using a mock here](https://github.com/googleapis/google-cloud-go/tree/main/internal/examples/mock). + +When mocking code you need to work with interfaces. Let’s create an interface +for the `cloud.google.com/go/translate/apiv3` client used in the +`TranslateTextWithConcreteClient` function mentioned in the previous section. +The `translate.Client` has over a dozen methods but this code only uses one of +them. Here is an interface that satisfies the interactions of the +`translate.Client` in this function. + +```go +type TranslationClient interface { + TranslateText(ctx context.Context, req *translatepb.TranslateTextRequest, opts ...gax.CallOption) (*translatepb.TranslateTextResponse, error) +} +``` + +Now that we have an interface that satisfies the method being used we can +rewrite the function signature to take the interface instead of the concrete +type. + +```go +func TranslateTextWithInterfaceClient(client TranslationClient, text string, targetLang string) (string, error) { +// ... +} +``` + +This allows a real `translate.Client` to be passed to the method in production +and for a mock implementation to be passed in during testing. This pattern can +be applied to any Go code, not just `cloud.google.com/go`. This is because +interfaces in Go are implicitly satisfied. Structs in the client libraries can +implicitly implement interfaces defined in your codebase. Let’s take a look at +what it might look like to define a lightweight mock for the `TranslationClient` +interface. + +```go +import ( + "context" + "testing" + + "github.com/googleapis/gax-go/v2" + translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3" +) + +type mockClient struct{} + +func (*mockClient) TranslateText(_ context.Context, req *translatepb.TranslateTextRequest, opts ...gax.CallOption) (*translatepb.TranslateTextResponse, error) { + resp := &translatepb.TranslateTextResponse{ + Translations: []*translatepb.Translation{ + &translatepb.Translation{ + TranslatedText: "Hello World", + }, + }, + } + return resp, nil +} + +func TestTranslateTextWithAbstractClient(t *testing.T) { + client := &mockClient{} + text, err := TranslateTextWithInterfaceClient(client, "Hola Mundo", "en-US") + if err != nil { + t.Fatal(err) + } + if text != "Hello World" { + t.Fatalf("got %q, want Hello World", text) + } +} +``` + +If you prefer to not write your own mocks there are mocking frameworks such as +[golang/mock](https://github.com/golang/mock) which can generate mocks for you +from an interface. As a word of caution though, try to not +[overuse mocks](https://testing.googleblog.com/2013/05/testing-on-toilet-dont-overuse-mocks.html). + +## Testing using emulators + +Some of the client libraries provided in `cloud.google.com/go` support running +against a service emulator. The concept is similar to that of using fakes, +mentioned above, but the server is managed for you. You just need to start it up +and instruct the client library to talk to the emulator by setting a service +specific emulator environment variable. Current services/environment-variables +are: + +- bigtable: `BIGTABLE_EMULATOR_HOST` +- datastore: `DATASTORE_EMULATOR_HOST` +- firestore: `FIRESTORE_EMULATOR_HOST` +- pubsub: `PUBSUB_EMULATOR_HOST` +- spanner: `SPANNER_EMULATOR_HOST` +- storage: `STORAGE_EMULATOR_HOST` + - Although the storage client supports an emulator environment variable there is no official emulator provided by gcloud. + +For more information on emulators please refer to the +[gcloud documentation](https://cloud.google.com/sdk/gcloud/reference/beta/emulators). diff --git a/vendor/filippo.io/age/.gitattributes b/vendor/filippo.io/age/.gitattributes new file mode 100644 index 000000000..bb7ec5ecf --- /dev/null +++ b/vendor/filippo.io/age/.gitattributes @@ -0,0 +1,2 @@ +*.age binary +testdata/testkit/* binary diff --git a/vendor/filippo.io/age/AUTHORS b/vendor/filippo.io/age/AUTHORS new file mode 100644 index 000000000..1fbcfc4c2 --- /dev/null +++ b/vendor/filippo.io/age/AUTHORS @@ -0,0 +1,6 @@ +# This is the official list of age authors for copyright purposes. +# To be included, send a change adding the individual or company +# who owns a contribution's copyright. + +Google LLC +Filippo Valsorda diff --git a/vendor/filippo.io/age/LICENSE b/vendor/filippo.io/age/LICENSE new file mode 100644 index 000000000..3b50c9893 --- /dev/null +++ b/vendor/filippo.io/age/LICENSE @@ -0,0 +1,27 @@ +Copyright 2019 The age Authors + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of the age project nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/filippo.io/age/README.md b/vendor/filippo.io/age/README.md new file mode 100644 index 000000000..b2ccbd05d --- /dev/null +++ b/vendor/filippo.io/age/README.md @@ -0,0 +1,271 @@ +

+ + + + The age logo, a wireframe of St. Peters dome in Rome, with the text: age, file encryption + +

+ +[![Go Reference](https://pkg.go.dev/badge/filippo.io/age.svg)](https://pkg.go.dev/filippo.io/age) +[![man page]()](https://filippo.io/age/age.1) +[![C2SP specification](https://img.shields.io/badge/%C2%A7%23-specification-blueviolet)](https://age-encryption.org/v1) + +age is a simple, modern and secure file encryption tool, format, and Go library. + +It features small explicit keys, no config options, and UNIX-style composability. + +``` +$ age-keygen -o key.txt +Public key: age1ql3z7hjy54pw3hyww5ayyfg7zqgvc7w3j2elw8zmrj2kg5sfn9aqmcac8p +$ tar cvz ~/data | age -r age1ql3z7hjy54pw3hyww5ayyfg7zqgvc7w3j2elw8zmrj2kg5sfn9aqmcac8p > data.tar.gz.age +$ age --decrypt -i key.txt data.tar.gz.age > data.tar.gz +``` + +📜 The format specification is at [age-encryption.org/v1](https://age-encryption.org/v1). age was designed by [@Benjojo12](https://twitter.com/Benjojo12) and [@FiloSottile](https://twitter.com/FiloSottile). + +📬 Follow the maintenance of this project by subscribing to [Maintainer Dispatches](https://filippo.io/newsletter)! + +🦀 An alternative interoperable Rust implementation is available at [github.com/str4d/rage](https://github.com/str4d/rage). + +🔑 Hardware PIV tokens such as YubiKeys are supported through the [age-plugin-yubikey](https://github.com/str4d/age-plugin-yubikey) plugin. + +✨ For more plugins, implementations, tools, and integrations, check out the [awesome age](https://github.com/FiloSottile/awesome-age) list. + +💬 The author pronounces it `[aɡe̞]` [with a hard *g*](https://translate.google.com/?sl=it&text=aghe), like GIF, and is always spelled lowercase. + +## Installation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Homebrew (macOS or Linux) + brew install age +
MacPorts + port install age +
Alpine Linux v3.15+ + apk add age +
Arch Linux + pacman -S age +
Debian 12+ (Bookworm) + apt install age +
Debian 11 (Bullseye) + apt install age/bullseye-backports + (enable backports for age v1.0.0+) +
Fedora 33+ + dnf install age +
Gentoo Linux + emerge app-crypt/age +
NixOS / Nix + nix-env -i age +
openSUSE Tumbleweed + zypper install age +
Ubuntu 22.04+ + apt install age +
Void Linux + xbps-install age +
FreeBSD + pkg install age (security/age) +
OpenBSD 6.7+ + pkg_add age (security/age) +
Chocolatey (Windows) + choco install age.portable +
Scoop (Windows) + scoop bucket add extras && scoop install age +
pkgx + pkgx install age +
+ +On Windows, Linux, macOS, and FreeBSD you can use the pre-built binaries. + +``` +https://dl.filippo.io/age/latest?for=linux/amd64 +https://dl.filippo.io/age/v1.1.1?for=darwin/arm64 +... +``` + +If your system has [a supported version of Go](https://go.dev/dl/), you can build from source. + +``` +go install filippo.io/age/cmd/...@latest +``` + +Help from new packagers is very welcome. + +## Usage + +For the full documentation, read [the age(1) man page](https://filippo.io/age/age.1). + +``` +Usage: + age [--encrypt] (-r RECIPIENT | -R PATH)... [--armor] [-o OUTPUT] [INPUT] + age [--encrypt] --passphrase [--armor] [-o OUTPUT] [INPUT] + age --decrypt [-i PATH]... [-o OUTPUT] [INPUT] + +Options: + -e, --encrypt Encrypt the input to the output. Default if omitted. + -d, --decrypt Decrypt the input to the output. + -o, --output OUTPUT Write the result to the file at path OUTPUT. + -a, --armor Encrypt to a PEM encoded format. + -p, --passphrase Encrypt with a passphrase. + -r, --recipient RECIPIENT Encrypt to the specified RECIPIENT. Can be repeated. + -R, --recipients-file PATH Encrypt to recipients listed at PATH. Can be repeated. + -i, --identity PATH Use the identity file at PATH. Can be repeated. + +INPUT defaults to standard input, and OUTPUT defaults to standard output. +If OUTPUT exists, it will be overwritten. + +RECIPIENT can be an age public key generated by age-keygen ("age1...") +or an SSH public key ("ssh-ed25519 AAAA...", "ssh-rsa AAAA..."). + +Recipient files contain one or more recipients, one per line. Empty lines +and lines starting with "#" are ignored as comments. "-" may be used to +read recipients from standard input. + +Identity files contain one or more secret keys ("AGE-SECRET-KEY-1..."), +one per line, or an SSH key. Empty lines and lines starting with "#" are +ignored as comments. Passphrase encrypted age files can be used as +identity files. Multiple key files can be provided, and any unused ones +will be ignored. "-" may be used to read identities from standard input. + +When --encrypt is specified explicitly, -i can also be used to encrypt to an +identity file symmetrically, instead or in addition to normal recipients. +``` + +### Multiple recipients + +Files can be encrypted to multiple recipients by repeating `-r/--recipient`. Every recipient will be able to decrypt the file. + +``` +$ age -o example.jpg.age -r age1ql3z7hjy54pw3hyww5ayyfg7zqgvc7w3j2elw8zmrj2kg5sfn9aqmcac8p \ + -r age1lggyhqrw2nlhcxprm67z43rta597azn8gknawjehu9d9dl0jq3yqqvfafg example.jpg +``` + +#### Recipient files + +Multiple recipients can also be listed one per line in one or more files passed with the `-R/--recipients-file` flag. + +``` +$ cat recipients.txt +# Alice +age1ql3z7hjy54pw3hyww5ayyfg7zqgvc7w3j2elw8zmrj2kg5sfn9aqmcac8p +# Bob +age1lggyhqrw2nlhcxprm67z43rta597azn8gknawjehu9d9dl0jq3yqqvfafg +$ age -R recipients.txt example.jpg > example.jpg.age +``` + +If the argument to `-R` (or `-i`) is `-`, the file is read from standard input. + +### Passphrases + +Files can be encrypted with a passphrase by using `-p/--passphrase`. By default age will automatically generate a secure passphrase. Passphrase protected files are automatically detected at decrypt time. + +``` +$ age -p secrets.txt > secrets.txt.age +Enter passphrase (leave empty to autogenerate a secure one): +Using the autogenerated passphrase "release-response-step-brand-wrap-ankle-pair-unusual-sword-train". +$ age -d secrets.txt.age > secrets.txt +Enter passphrase: +``` + +### Passphrase-protected key files + +If an identity file passed to `-i` is a passphrase encrypted age file, it will be automatically decrypted. + +``` +$ age-keygen | age -p > key.age +Public key: age1yhm4gctwfmrpz87tdslm550wrx6m79y9f2hdzt0lndjnehwj0ukqrjpyx5 +Enter passphrase (leave empty to autogenerate a secure one): +Using the autogenerated passphrase "hip-roast-boring-snake-mention-east-wasp-honey-input-actress". +$ age -r age1yhm4gctwfmrpz87tdslm550wrx6m79y9f2hdzt0lndjnehwj0ukqrjpyx5 secrets.txt > secrets.txt.age +$ age -d -i key.age secrets.txt.age > secrets.txt +Enter passphrase for identity file "key.age": +``` + +Passphrase-protected identity files are not necessary for most use cases, where access to the encrypted identity file implies access to the whole system. However, they can be useful if the identity file is stored remotely. + +### SSH keys + +As a convenience feature, age also supports encrypting to `ssh-rsa` and `ssh-ed25519` SSH public keys, and decrypting with the respective private key file. (`ssh-agent` is not supported.) + +``` +$ age -R ~/.ssh/id_ed25519.pub example.jpg > example.jpg.age +$ age -d -i ~/.ssh/id_ed25519 example.jpg.age > example.jpg +``` + +Note that SSH key support employs more complex cryptography, and embeds a public key tag in the encrypted file, making it possible to track files that are encrypted to a specific public key. + +#### Encrypting to a GitHub user + +Combining SSH key support and `-R`, you can easily encrypt a file to the SSH keys listed on a GitHub profile. + +``` +$ curl https://github.com/benjojo.keys | age -R - example.jpg > example.jpg.age +``` + +Keep in mind that people might not protect SSH keys long-term, since they are revokable when used only for authentication, and that SSH keys held on YubiKeys can't be used to decrypt files. diff --git a/vendor/filippo.io/age/age.go b/vendor/filippo.io/age/age.go new file mode 100644 index 000000000..c9b17bc82 --- /dev/null +++ b/vendor/filippo.io/age/age.go @@ -0,0 +1,271 @@ +// Copyright 2019 The age Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package age implements file encryption according to the age-encryption.org/v1 +// specification. +// +// For most use cases, use the Encrypt and Decrypt functions with +// X25519Recipient and X25519Identity. If passphrase encryption is required, use +// ScryptRecipient and ScryptIdentity. For compatibility with existing SSH keys +// use the filippo.io/age/agessh package. +// +// age encrypted files are binary and not malleable. For encoding them as text, +// use the filippo.io/age/armor package. +// +// # Key management +// +// age does not have a global keyring. Instead, since age keys are small, +// textual, and cheap, you are encouraged to generate dedicated keys for each +// task and application. +// +// Recipient public keys can be passed around as command line flags and in +// config files, while secret keys should be stored in dedicated files, through +// secret management systems, or as environment variables. +// +// There is no default path for age keys. Instead, they should be stored at +// application-specific paths. The CLI supports files where private keys are +// listed one per line, ignoring empty lines and lines starting with "#". These +// files can be parsed with ParseIdentities. +// +// When integrating age into a new system, it's recommended that you only +// support X25519 keys, and not SSH keys. The latter are supported for manual +// encryption operations. If you need to tie into existing key management +// infrastructure, you might want to consider implementing your own Recipient +// and Identity. +// +// # Backwards compatibility +// +// Files encrypted with a stable version (not alpha, beta, or release candidate) +// of age, or with any v1.0.0 beta or release candidate, will decrypt with any +// later versions of the v1 API. This might change in v2, in which case v1 will +// be maintained with security fixes for compatibility with older files. +// +// If decrypting an older file poses a security risk, doing so might require an +// explicit opt-in in the API. +package age + +import ( + "crypto/hmac" + "crypto/rand" + "errors" + "fmt" + "io" + "sort" + + "filippo.io/age/internal/format" + "filippo.io/age/internal/stream" +) + +// An Identity is passed to Decrypt to unwrap an opaque file key from a +// recipient stanza. It can be for example a secret key like X25519Identity, a +// plugin, or a custom implementation. +// +// Unwrap must return an error wrapping ErrIncorrectIdentity if none of the +// recipient stanzas match the identity, any other error will be considered +// fatal. +// +// Most age API users won't need to interact with this directly, and should +// instead pass Recipient implementations to Encrypt and Identity +// implementations to Decrypt. +type Identity interface { + Unwrap(stanzas []*Stanza) (fileKey []byte, err error) +} + +var ErrIncorrectIdentity = errors.New("incorrect identity for recipient block") + +// A Recipient is passed to Encrypt to wrap an opaque file key to one or more +// recipient stanza(s). It can be for example a public key like X25519Recipient, +// a plugin, or a custom implementation. +// +// Most age API users won't need to interact with this directly, and should +// instead pass Recipient implementations to Encrypt and Identity +// implementations to Decrypt. +type Recipient interface { + Wrap(fileKey []byte) ([]*Stanza, error) +} + +// RecipientWithLabels can be optionally implemented by a Recipient, in which +// case Encrypt will use WrapWithLabels instead of Wrap. +// +// Encrypt will succeed only if the labels returned by all the recipients +// (assuming the empty set for those that don't implement RecipientWithLabels) +// are the same. +// +// This can be used to ensure a recipient is only used with other recipients +// with equivalent properties (for example by setting a "postquantum" label) or +// to ensure a recipient is always used alone (by returning a random label, for +// example to preserve its authentication properties). +type RecipientWithLabels interface { + WrapWithLabels(fileKey []byte) (s []*Stanza, labels []string, err error) +} + +// A Stanza is a section of the age header that encapsulates the file key as +// encrypted to a specific recipient. +// +// Most age API users won't need to interact with this directly, and should +// instead pass Recipient implementations to Encrypt and Identity +// implementations to Decrypt. +type Stanza struct { + Type string + Args []string + Body []byte +} + +const fileKeySize = 16 +const streamNonceSize = 16 + +// Encrypt encrypts a file to one or more recipients. +// +// Writes to the returned WriteCloser are encrypted and written to dst as an age +// file. Every recipient will be able to decrypt the file. +// +// The caller must call Close on the WriteCloser when done for the last chunk to +// be encrypted and flushed to dst. +func Encrypt(dst io.Writer, recipients ...Recipient) (io.WriteCloser, error) { + if len(recipients) == 0 { + return nil, errors.New("no recipients specified") + } + + fileKey := make([]byte, fileKeySize) + if _, err := rand.Read(fileKey); err != nil { + return nil, err + } + + hdr := &format.Header{} + var labels []string + for i, r := range recipients { + stanzas, l, err := wrapWithLabels(r, fileKey) + if err != nil { + return nil, fmt.Errorf("failed to wrap key for recipient #%d: %v", i, err) + } + sort.Strings(l) + if i == 0 { + labels = l + } else if !slicesEqual(labels, l) { + return nil, fmt.Errorf("incompatible recipients") + } + for _, s := range stanzas { + hdr.Recipients = append(hdr.Recipients, (*format.Stanza)(s)) + } + } + if mac, err := headerMAC(fileKey, hdr); err != nil { + return nil, fmt.Errorf("failed to compute header MAC: %v", err) + } else { + hdr.MAC = mac + } + if err := hdr.Marshal(dst); err != nil { + return nil, fmt.Errorf("failed to write header: %v", err) + } + + nonce := make([]byte, streamNonceSize) + if _, err := rand.Read(nonce); err != nil { + return nil, err + } + if _, err := dst.Write(nonce); err != nil { + return nil, fmt.Errorf("failed to write nonce: %v", err) + } + + return stream.NewWriter(streamKey(fileKey, nonce), dst) +} + +func wrapWithLabels(r Recipient, fileKey []byte) (s []*Stanza, labels []string, err error) { + if r, ok := r.(RecipientWithLabels); ok { + return r.WrapWithLabels(fileKey) + } + s, err = r.Wrap(fileKey) + return +} + +func slicesEqual(s1, s2 []string) bool { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + if s1[i] != s2[i] { + return false + } + } + return true +} + +// NoIdentityMatchError is returned by Decrypt when none of the supplied +// identities match the encrypted file. +type NoIdentityMatchError struct { + // Errors is a slice of all the errors returned to Decrypt by the Unwrap + // calls it made. They all wrap ErrIncorrectIdentity. + Errors []error +} + +func (*NoIdentityMatchError) Error() string { + return "no identity matched any of the recipients" +} + +// Decrypt decrypts a file encrypted to one or more identities. +// +// It returns a Reader reading the decrypted plaintext of the age file read +// from src. All identities will be tried until one successfully decrypts the file. +func Decrypt(src io.Reader, identities ...Identity) (io.Reader, error) { + if len(identities) == 0 { + return nil, errors.New("no identities specified") + } + + hdr, payload, err := format.Parse(src) + if err != nil { + return nil, fmt.Errorf("failed to read header: %w", err) + } + + stanzas := make([]*Stanza, 0, len(hdr.Recipients)) + for _, s := range hdr.Recipients { + stanzas = append(stanzas, (*Stanza)(s)) + } + errNoMatch := &NoIdentityMatchError{} + var fileKey []byte + for _, id := range identities { + fileKey, err = id.Unwrap(stanzas) + if errors.Is(err, ErrIncorrectIdentity) { + errNoMatch.Errors = append(errNoMatch.Errors, err) + continue + } + if err != nil { + return nil, err + } + + break + } + if fileKey == nil { + return nil, errNoMatch + } + + if mac, err := headerMAC(fileKey, hdr); err != nil { + return nil, fmt.Errorf("failed to compute header MAC: %v", err) + } else if !hmac.Equal(mac, hdr.MAC) { + return nil, errors.New("bad header MAC") + } + + nonce := make([]byte, streamNonceSize) + if _, err := io.ReadFull(payload, nonce); err != nil { + return nil, fmt.Errorf("failed to read nonce: %w", err) + } + + return stream.NewReader(streamKey(fileKey, nonce), payload) +} + +// multiUnwrap is a helper that implements Identity.Unwrap in terms of a +// function that unwraps a single recipient stanza. +func multiUnwrap(unwrap func(*Stanza) ([]byte, error), stanzas []*Stanza) ([]byte, error) { + for _, s := range stanzas { + fileKey, err := unwrap(s) + if errors.Is(err, ErrIncorrectIdentity) { + // If we ever start returning something interesting wrapping + // ErrIncorrectIdentity, we should let it make its way up through + // Decrypt into NoIdentityMatchError.Errors. + continue + } + if err != nil { + return nil, err + } + return fileKey, nil + } + return nil, ErrIncorrectIdentity +} diff --git a/vendor/filippo.io/age/armor/armor.go b/vendor/filippo.io/age/armor/armor.go new file mode 100644 index 000000000..ac6397e34 --- /dev/null +++ b/vendor/filippo.io/age/armor/armor.go @@ -0,0 +1,187 @@ +// Copyright 2019 The age Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package armor provides a strict, streaming implementation of the ASCII +// armoring format for age files. +// +// It's PEM with type "AGE ENCRYPTED FILE", 64 character columns, no headers, +// and strict base64 decoding. +package armor + +import ( + "bufio" + "bytes" + "encoding/base64" + "errors" + "fmt" + "io" + + "filippo.io/age/internal/format" +) + +const ( + Header = "-----BEGIN AGE ENCRYPTED FILE-----" + Footer = "-----END AGE ENCRYPTED FILE-----" +) + +type armoredWriter struct { + started, closed bool + encoder *format.WrappedBase64Encoder + dst io.Writer +} + +func (a *armoredWriter) Write(p []byte) (int, error) { + if !a.started { + if _, err := io.WriteString(a.dst, Header+"\n"); err != nil { + return 0, err + } + } + a.started = true + return a.encoder.Write(p) +} + +func (a *armoredWriter) Close() error { + if a.closed { + return errors.New("ArmoredWriter already closed") + } + a.closed = true + if err := a.encoder.Close(); err != nil { + return err + } + footer := Footer + "\n" + if !a.encoder.LastLineIsEmpty() { + footer = "\n" + footer + } + _, err := io.WriteString(a.dst, footer) + return err +} + +func NewWriter(dst io.Writer) io.WriteCloser { + // TODO: write a test with aligned and misaligned sizes, and 8 and 10 steps. + return &armoredWriter{ + dst: dst, + encoder: format.NewWrappedBase64Encoder(base64.StdEncoding, dst), + } +} + +type armoredReader struct { + r *bufio.Reader + started bool + unread []byte // backed by buf + buf [format.BytesPerLine]byte + err error +} + +func NewReader(r io.Reader) io.Reader { + return &armoredReader{r: bufio.NewReader(r)} +} + +func (r *armoredReader) Read(p []byte) (int, error) { + if len(r.unread) > 0 { + n := copy(p, r.unread) + r.unread = r.unread[n:] + return n, nil + } + if r.err != nil { + return 0, r.err + } + + getLine := func() ([]byte, error) { + line, err := r.r.ReadBytes('\n') + if err == io.EOF && len(line) == 0 { + return nil, io.ErrUnexpectedEOF + } else if err != nil && err != io.EOF { + return nil, err + } + line = bytes.TrimSuffix(line, []byte("\n")) + line = bytes.TrimSuffix(line, []byte("\r")) + return line, nil + } + + const maxWhitespace = 1024 + drainTrailing := func() error { + buf, err := io.ReadAll(io.LimitReader(r.r, maxWhitespace)) + if err != nil { + return err + } + if len(bytes.TrimSpace(buf)) != 0 { + return errors.New("trailing data after armored file") + } + if len(buf) == maxWhitespace { + return errors.New("too much trailing whitespace") + } + return io.EOF + } + + var removedWhitespace int + for !r.started { + line, err := getLine() + if err != nil { + return 0, r.setErr(err) + } + // Ignore leading whitespace. + if len(bytes.TrimSpace(line)) == 0 { + removedWhitespace += len(line) + 1 + if removedWhitespace > maxWhitespace { + return 0, r.setErr(errors.New("too much leading whitespace")) + } + continue + } + if string(line) != Header { + return 0, r.setErr(fmt.Errorf("invalid first line: %q", line)) + } + r.started = true + } + line, err := getLine() + if err != nil { + return 0, r.setErr(err) + } + if string(line) == Footer { + return 0, r.setErr(drainTrailing()) + } + if len(line) > format.ColumnsPerLine { + return 0, r.setErr(errors.New("column limit exceeded")) + } + r.unread = r.buf[:] + n, err := base64.StdEncoding.Strict().Decode(r.unread, line) + if err != nil { + return 0, r.setErr(err) + } + r.unread = r.unread[:n] + + if n < format.BytesPerLine { + line, err := getLine() + if err != nil { + return 0, r.setErr(err) + } + if string(line) != Footer { + return 0, r.setErr(fmt.Errorf("invalid closing line: %q", line)) + } + r.setErr(drainTrailing()) + } + + nn := copy(p, r.unread) + r.unread = r.unread[nn:] + return nn, nil +} + +type Error struct { + err error +} + +func (e *Error) Error() string { + return "invalid armor: " + e.err.Error() +} + +func (e *Error) Unwrap() error { + return e.err +} + +func (r *armoredReader) setErr(err error) error { + if err != io.EOF { + err = &Error{err} + } + r.err = err + return err +} diff --git a/vendor/filippo.io/age/internal/bech32/bech32.go b/vendor/filippo.io/age/internal/bech32/bech32.go new file mode 100644 index 000000000..6f8a48cd6 --- /dev/null +++ b/vendor/filippo.io/age/internal/bech32/bech32.go @@ -0,0 +1,173 @@ +// Copyright (c) 2017 Takatoshi Nakagawa +// Copyright (c) 2019 The age Authors +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package bech32 is a modified version of the reference implementation of BIP173. +package bech32 + +import ( + "fmt" + "strings" +) + +var charset = "qpzry9x8gf2tvdw0s3jn54khce6mua7l" + +var generator = []uint32{0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3} + +func polymod(values []byte) uint32 { + chk := uint32(1) + for _, v := range values { + top := chk >> 25 + chk = (chk & 0x1ffffff) << 5 + chk = chk ^ uint32(v) + for i := 0; i < 5; i++ { + bit := top >> i & 1 + if bit == 1 { + chk ^= generator[i] + } + } + } + return chk +} + +func hrpExpand(hrp string) []byte { + h := []byte(strings.ToLower(hrp)) + var ret []byte + for _, c := range h { + ret = append(ret, c>>5) + } + ret = append(ret, 0) + for _, c := range h { + ret = append(ret, c&31) + } + return ret +} + +func verifyChecksum(hrp string, data []byte) bool { + return polymod(append(hrpExpand(hrp), data...)) == 1 +} + +func createChecksum(hrp string, data []byte) []byte { + values := append(hrpExpand(hrp), data...) + values = append(values, []byte{0, 0, 0, 0, 0, 0}...) + mod := polymod(values) ^ 1 + ret := make([]byte, 6) + for p := range ret { + shift := 5 * (5 - p) + ret[p] = byte(mod>>shift) & 31 + } + return ret +} + +func convertBits(data []byte, frombits, tobits byte, pad bool) ([]byte, error) { + var ret []byte + acc := uint32(0) + bits := byte(0) + maxv := byte(1<>frombits != 0 { + return nil, fmt.Errorf("invalid data range: data[%d]=%d (frombits=%d)", idx, value, frombits) + } + acc = acc<= tobits { + bits -= tobits + ret = append(ret, byte(acc>>bits)&maxv) + } + } + if pad { + if bits > 0 { + ret = append(ret, byte(acc<<(tobits-bits))&maxv) + } + } else if bits >= frombits { + return nil, fmt.Errorf("illegal zero padding") + } else if byte(acc<<(tobits-bits))&maxv != 0 { + return nil, fmt.Errorf("non-zero padding") + } + return ret, nil +} + +// Encode encodes the HRP and a bytes slice to Bech32. If the HRP is uppercase, +// the output will be uppercase. +func Encode(hrp string, data []byte) (string, error) { + values, err := convertBits(data, 8, 5, true) + if err != nil { + return "", err + } + if len(hrp) < 1 { + return "", fmt.Errorf("invalid HRP: %q", hrp) + } + for p, c := range hrp { + if c < 33 || c > 126 { + return "", fmt.Errorf("invalid HRP character: hrp[%d]=%d", p, c) + } + } + if strings.ToUpper(hrp) != hrp && strings.ToLower(hrp) != hrp { + return "", fmt.Errorf("mixed case HRP: %q", hrp) + } + lower := strings.ToLower(hrp) == hrp + hrp = strings.ToLower(hrp) + var ret strings.Builder + ret.WriteString(hrp) + ret.WriteString("1") + for _, p := range values { + ret.WriteByte(charset[p]) + } + for _, p := range createChecksum(hrp, values) { + ret.WriteByte(charset[p]) + } + if lower { + return ret.String(), nil + } + return strings.ToUpper(ret.String()), nil +} + +// Decode decodes a Bech32 string. If the string is uppercase, the HRP will be uppercase. +func Decode(s string) (hrp string, data []byte, err error) { + if strings.ToLower(s) != s && strings.ToUpper(s) != s { + return "", nil, fmt.Errorf("mixed case") + } + pos := strings.LastIndex(s, "1") + if pos < 1 || pos+7 > len(s) { + return "", nil, fmt.Errorf("separator '1' at invalid position: pos=%d, len=%d", pos, len(s)) + } + hrp = s[:pos] + for p, c := range hrp { + if c < 33 || c > 126 { + return "", nil, fmt.Errorf("invalid character human-readable part: s[%d]=%d", p, c) + } + } + s = strings.ToLower(s) + for p, c := range s[pos+1:] { + d := strings.IndexRune(charset, c) + if d == -1 { + return "", nil, fmt.Errorf("invalid character data part: s[%d]=%v", p, c) + } + data = append(data, byte(d)) + } + if !verifyChecksum(hrp, data) { + return "", nil, fmt.Errorf("invalid checksum") + } + data, err = convertBits(data[:len(data)-6], 5, 8, false) + if err != nil { + return "", nil, err + } + return hrp, data, nil +} diff --git a/vendor/filippo.io/age/internal/format/format.go b/vendor/filippo.io/age/internal/format/format.go new file mode 100644 index 000000000..aa77b756f --- /dev/null +++ b/vendor/filippo.io/age/internal/format/format.go @@ -0,0 +1,311 @@ +// Copyright 2019 The age Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package format implements the age file format. +package format + +import ( + "bufio" + "bytes" + "encoding/base64" + "errors" + "fmt" + "io" + "strings" +) + +type Header struct { + Recipients []*Stanza + MAC []byte +} + +// Stanza is assignable to age.Stanza, and if this package is made public, +// age.Stanza can be made a type alias of this type. +type Stanza struct { + Type string + Args []string + Body []byte +} + +var b64 = base64.RawStdEncoding.Strict() + +func DecodeString(s string) ([]byte, error) { + // CR and LF are ignored by DecodeString, but we don't want any malleability. + if strings.ContainsAny(s, "\n\r") { + return nil, errors.New(`unexpected newline character`) + } + return b64.DecodeString(s) +} + +var EncodeToString = b64.EncodeToString + +const ColumnsPerLine = 64 + +const BytesPerLine = ColumnsPerLine / 4 * 3 + +// NewWrappedBase64Encoder returns a WrappedBase64Encoder that writes to dst. +func NewWrappedBase64Encoder(enc *base64.Encoding, dst io.Writer) *WrappedBase64Encoder { + w := &WrappedBase64Encoder{dst: dst} + w.enc = base64.NewEncoder(enc, WriterFunc(w.writeWrapped)) + return w +} + +type WriterFunc func(p []byte) (int, error) + +func (f WriterFunc) Write(p []byte) (int, error) { return f(p) } + +// WrappedBase64Encoder is a standard base64 encoder that inserts an LF +// character every ColumnsPerLine bytes. It does not insert a newline neither at +// the beginning nor at the end of the stream, but it ensures the last line is +// shorter than ColumnsPerLine, which means it might be empty. +type WrappedBase64Encoder struct { + enc io.WriteCloser + dst io.Writer + written int + buf bytes.Buffer +} + +func (w *WrappedBase64Encoder) Write(p []byte) (int, error) { return w.enc.Write(p) } + +func (w *WrappedBase64Encoder) Close() error { + return w.enc.Close() +} + +func (w *WrappedBase64Encoder) writeWrapped(p []byte) (int, error) { + if w.buf.Len() != 0 { + panic("age: internal error: non-empty WrappedBase64Encoder.buf") + } + for len(p) > 0 { + toWrite := ColumnsPerLine - (w.written % ColumnsPerLine) + if toWrite > len(p) { + toWrite = len(p) + } + n, _ := w.buf.Write(p[:toWrite]) + w.written += n + p = p[n:] + if w.written%ColumnsPerLine == 0 { + w.buf.Write([]byte("\n")) + } + } + if _, err := w.buf.WriteTo(w.dst); err != nil { + // We always return n = 0 on error because it's hard to work back to the + // input length that ended up written out. Not ideal, but Write errors + // are not recoverable anyway. + return 0, err + } + return len(p), nil +} + +// LastLineIsEmpty returns whether the last output line was empty, either +// because no input was written, or because a multiple of BytesPerLine was. +// +// Calling LastLineIsEmpty before Close is meaningless. +func (w *WrappedBase64Encoder) LastLineIsEmpty() bool { + return w.written%ColumnsPerLine == 0 +} + +const intro = "age-encryption.org/v1\n" + +var stanzaPrefix = []byte("->") +var footerPrefix = []byte("---") + +func (r *Stanza) Marshal(w io.Writer) error { + if _, err := w.Write(stanzaPrefix); err != nil { + return err + } + for _, a := range append([]string{r.Type}, r.Args...) { + if _, err := io.WriteString(w, " "+a); err != nil { + return err + } + } + if _, err := io.WriteString(w, "\n"); err != nil { + return err + } + ww := NewWrappedBase64Encoder(b64, w) + if _, err := ww.Write(r.Body); err != nil { + return err + } + if err := ww.Close(); err != nil { + return err + } + _, err := io.WriteString(w, "\n") + return err +} + +func (h *Header) MarshalWithoutMAC(w io.Writer) error { + if _, err := io.WriteString(w, intro); err != nil { + return err + } + for _, r := range h.Recipients { + if err := r.Marshal(w); err != nil { + return err + } + } + _, err := fmt.Fprintf(w, "%s", footerPrefix) + return err +} + +func (h *Header) Marshal(w io.Writer) error { + if err := h.MarshalWithoutMAC(w); err != nil { + return err + } + mac := b64.EncodeToString(h.MAC) + _, err := fmt.Fprintf(w, " %s\n", mac) + return err +} + +type StanzaReader struct { + r *bufio.Reader + err error +} + +func NewStanzaReader(r *bufio.Reader) *StanzaReader { + return &StanzaReader{r: r} +} + +func (r *StanzaReader) ReadStanza() (s *Stanza, err error) { + // Read errors are unrecoverable. + if r.err != nil { + return nil, r.err + } + defer func() { r.err = err }() + + s = &Stanza{} + + line, err := r.r.ReadBytes('\n') + if err != nil { + return nil, fmt.Errorf("failed to read line: %w", err) + } + if !bytes.HasPrefix(line, stanzaPrefix) { + return nil, fmt.Errorf("malformed stanza opening line: %q", line) + } + prefix, args := splitArgs(line) + if prefix != string(stanzaPrefix) || len(args) < 1 { + return nil, fmt.Errorf("malformed stanza: %q", line) + } + for _, a := range args { + if !isValidString(a) { + return nil, fmt.Errorf("malformed stanza: %q", line) + } + } + s.Type = args[0] + s.Args = args[1:] + + for { + line, err := r.r.ReadBytes('\n') + if err != nil { + return nil, fmt.Errorf("failed to read line: %w", err) + } + + b, err := DecodeString(strings.TrimSuffix(string(line), "\n")) + if err != nil { + if bytes.HasPrefix(line, footerPrefix) || bytes.HasPrefix(line, stanzaPrefix) { + return nil, fmt.Errorf("malformed body line %q: stanza ended without a short line\nNote: this might be a file encrypted with an old beta version of age or rage. Use age v1.0.0-beta6 or rage to decrypt it.", line) + } + return nil, errorf("malformed body line %q: %v", line, err) + } + if len(b) > BytesPerLine { + return nil, errorf("malformed body line %q: too long", line) + } + s.Body = append(s.Body, b...) + if len(b) < BytesPerLine { + // A stanza body always ends with a short line. + return s, nil + } + } +} + +type ParseError struct { + err error +} + +func (e *ParseError) Error() string { + return "parsing age header: " + e.err.Error() +} + +func (e *ParseError) Unwrap() error { + return e.err +} + +func errorf(format string, a ...interface{}) error { + return &ParseError{fmt.Errorf(format, a...)} +} + +// Parse returns the header and a Reader that begins at the start of the +// payload. +func Parse(input io.Reader) (*Header, io.Reader, error) { + h := &Header{} + rr := bufio.NewReader(input) + + line, err := rr.ReadString('\n') + if err != nil { + return nil, nil, errorf("failed to read intro: %w", err) + } + if line != intro { + return nil, nil, errorf("unexpected intro: %q", line) + } + + sr := NewStanzaReader(rr) + for { + peek, err := rr.Peek(len(footerPrefix)) + if err != nil { + return nil, nil, errorf("failed to read header: %w", err) + } + + if bytes.Equal(peek, footerPrefix) { + line, err := rr.ReadBytes('\n') + if err != nil { + return nil, nil, fmt.Errorf("failed to read header: %w", err) + } + + prefix, args := splitArgs(line) + if prefix != string(footerPrefix) || len(args) != 1 { + return nil, nil, errorf("malformed closing line: %q", line) + } + h.MAC, err = DecodeString(args[0]) + if err != nil || len(h.MAC) != 32 { + return nil, nil, errorf("malformed closing line %q: %v", line, err) + } + break + } + + s, err := sr.ReadStanza() + if err != nil { + return nil, nil, fmt.Errorf("failed to parse header: %w", err) + } + h.Recipients = append(h.Recipients, s) + } + + // If input is a bufio.Reader, rr might be equal to input because + // bufio.NewReader short-circuits. In this case we can just return it (and + // we would end up reading the buffer twice if we prepended the peek below). + if rr == input { + return h, rr, nil + } + // Otherwise, unwind the bufio overread and return the unbuffered input. + buf, err := rr.Peek(rr.Buffered()) + if err != nil { + return nil, nil, errorf("internal error: %v", err) + } + payload := io.MultiReader(bytes.NewReader(buf), input) + return h, payload, nil +} + +func splitArgs(line []byte) (string, []string) { + l := strings.TrimSuffix(string(line), "\n") + parts := strings.Split(l, " ") + return parts[0], parts[1:] +} + +func isValidString(s string) bool { + if len(s) == 0 { + return false + } + for _, c := range s { + if c < 33 || c > 126 { + return false + } + } + return true +} diff --git a/vendor/filippo.io/age/internal/stream/stream.go b/vendor/filippo.io/age/internal/stream/stream.go new file mode 100644 index 000000000..7cf02c470 --- /dev/null +++ b/vendor/filippo.io/age/internal/stream/stream.go @@ -0,0 +1,231 @@ +// Copyright 2019 The age Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package stream implements a variant of the STREAM chunked encryption scheme. +package stream + +import ( + "crypto/cipher" + "errors" + "fmt" + "io" + + "golang.org/x/crypto/chacha20poly1305" + "golang.org/x/crypto/poly1305" +) + +const ChunkSize = 64 * 1024 + +type Reader struct { + a cipher.AEAD + src io.Reader + + unread []byte // decrypted but unread data, backed by buf + buf [encChunkSize]byte + + err error + nonce [chacha20poly1305.NonceSize]byte +} + +const ( + encChunkSize = ChunkSize + poly1305.TagSize + lastChunkFlag = 0x01 +) + +func NewReader(key []byte, src io.Reader) (*Reader, error) { + aead, err := chacha20poly1305.New(key) + if err != nil { + return nil, err + } + return &Reader{ + a: aead, + src: src, + }, nil +} + +func (r *Reader) Read(p []byte) (int, error) { + if len(r.unread) > 0 { + n := copy(p, r.unread) + r.unread = r.unread[n:] + return n, nil + } + if r.err != nil { + return 0, r.err + } + if len(p) == 0 { + return 0, nil + } + + last, err := r.readChunk() + if err != nil { + r.err = err + return 0, err + } + + n := copy(p, r.unread) + r.unread = r.unread[n:] + + if last { + // Ensure there is an EOF after the last chunk as expected. In other + // words, check for trailing data after a full-length final chunk. + // Hopefully, the underlying reader supports returning EOF even if it + // had previously returned an EOF to ReadFull. + if _, err := r.src.Read(make([]byte, 1)); err == nil { + r.err = errors.New("trailing data after end of encrypted file") + } else if err != io.EOF { + r.err = fmt.Errorf("non-EOF error reading after end of encrypted file: %w", err) + } else { + r.err = io.EOF + } + } + + return n, nil +} + +// readChunk reads the next chunk of ciphertext from r.src and makes it available +// in r.unread. last is true if the chunk was marked as the end of the message. +// readChunk must not be called again after returning a last chunk or an error. +func (r *Reader) readChunk() (last bool, err error) { + if len(r.unread) != 0 { + panic("stream: internal error: readChunk called with dirty buffer") + } + + in := r.buf[:] + n, err := io.ReadFull(r.src, in) + switch { + case err == io.EOF: + // A message can't end without a marked chunk. This message is truncated. + return false, io.ErrUnexpectedEOF + case err == io.ErrUnexpectedEOF: + // The last chunk can be short, but not empty unless it's the first and + // only chunk. + if !nonceIsZero(&r.nonce) && n == r.a.Overhead() { + return false, errors.New("last chunk is empty, try age v1.0.0, and please consider reporting this") + } + in = in[:n] + last = true + setLastChunkFlag(&r.nonce) + case err != nil: + return false, err + } + + outBuf := make([]byte, 0, ChunkSize) + out, err := r.a.Open(outBuf, r.nonce[:], in, nil) + if err != nil && !last { + // Check if this was a full-length final chunk. + last = true + setLastChunkFlag(&r.nonce) + out, err = r.a.Open(outBuf, r.nonce[:], in, nil) + } + if err != nil { + return false, errors.New("failed to decrypt and authenticate payload chunk") + } + + incNonce(&r.nonce) + r.unread = r.buf[:copy(r.buf[:], out)] + return last, nil +} + +func incNonce(nonce *[chacha20poly1305.NonceSize]byte) { + for i := len(nonce) - 2; i >= 0; i-- { + nonce[i]++ + if nonce[i] != 0 { + break + } else if i == 0 { + // The counter is 88 bits, this is unreachable. + panic("stream: chunk counter wrapped around") + } + } +} + +func setLastChunkFlag(nonce *[chacha20poly1305.NonceSize]byte) { + nonce[len(nonce)-1] = lastChunkFlag +} + +func nonceIsZero(nonce *[chacha20poly1305.NonceSize]byte) bool { + return *nonce == [chacha20poly1305.NonceSize]byte{} +} + +type Writer struct { + a cipher.AEAD + dst io.Writer + unwritten []byte // backed by buf + buf [encChunkSize]byte + nonce [chacha20poly1305.NonceSize]byte + err error +} + +func NewWriter(key []byte, dst io.Writer) (*Writer, error) { + aead, err := chacha20poly1305.New(key) + if err != nil { + return nil, err + } + w := &Writer{ + a: aead, + dst: dst, + } + w.unwritten = w.buf[:0] + return w, nil +} + +func (w *Writer) Write(p []byte) (n int, err error) { + // TODO: consider refactoring with a bytes.Buffer. + if w.err != nil { + return 0, w.err + } + if len(p) == 0 { + return 0, nil + } + + total := len(p) + for len(p) > 0 { + freeBuf := w.buf[len(w.unwritten):ChunkSize] + n := copy(freeBuf, p) + p = p[n:] + w.unwritten = w.unwritten[:len(w.unwritten)+n] + + if len(w.unwritten) == ChunkSize && len(p) > 0 { + if err := w.flushChunk(notLastChunk); err != nil { + w.err = err + return 0, err + } + } + } + return total, nil +} + +// Close flushes the last chunk. It does not close the underlying Writer. +func (w *Writer) Close() error { + if w.err != nil { + return w.err + } + + w.err = w.flushChunk(lastChunk) + if w.err != nil { + return w.err + } + + w.err = errors.New("stream.Writer is already closed") + return nil +} + +const ( + lastChunk = true + notLastChunk = false +) + +func (w *Writer) flushChunk(last bool) error { + if !last && len(w.unwritten) != ChunkSize { + panic("stream: internal error: flush called with partial chunk") + } + + if last { + setLastChunkFlag(&w.nonce) + } + buf := w.a.Seal(w.buf[:0], w.nonce[:], w.unwritten, nil) + _, err := w.dst.Write(buf) + w.unwritten = w.buf[:0] + incNonce(&w.nonce) + return err +} diff --git a/vendor/filippo.io/age/parse.go b/vendor/filippo.io/age/parse.go new file mode 100644 index 000000000..373d1a893 --- /dev/null +++ b/vendor/filippo.io/age/parse.go @@ -0,0 +1,84 @@ +// Copyright 2021 The age Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package age + +import ( + "bufio" + "fmt" + "io" + "strings" +) + +// ParseIdentities parses a file with one or more private key encodings, one per +// line. Empty lines and lines starting with "#" are ignored. +// +// This is the same syntax as the private key files accepted by the CLI, except +// the CLI also accepts SSH private keys, which are not recommended for the +// average application. +// +// Currently, all returned values are of type *X25519Identity, but different +// types might be returned in the future. +func ParseIdentities(f io.Reader) ([]Identity, error) { + const privateKeySizeLimit = 1 << 24 // 16 MiB + var ids []Identity + scanner := bufio.NewScanner(io.LimitReader(f, privateKeySizeLimit)) + var n int + for scanner.Scan() { + n++ + line := scanner.Text() + if strings.HasPrefix(line, "#") || line == "" { + continue + } + i, err := ParseX25519Identity(line) + if err != nil { + return nil, fmt.Errorf("error at line %d: %v", n, err) + } + ids = append(ids, i) + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("failed to read secret keys file: %v", err) + } + if len(ids) == 0 { + return nil, fmt.Errorf("no secret keys found") + } + return ids, nil +} + +// ParseRecipients parses a file with one or more public key encodings, one per +// line. Empty lines and lines starting with "#" are ignored. +// +// This is the same syntax as the recipients files accepted by the CLI, except +// the CLI also accepts SSH recipients, which are not recommended for the +// average application. +// +// Currently, all returned values are of type *X25519Recipient, but different +// types might be returned in the future. +func ParseRecipients(f io.Reader) ([]Recipient, error) { + const recipientFileSizeLimit = 1 << 24 // 16 MiB + var recs []Recipient + scanner := bufio.NewScanner(io.LimitReader(f, recipientFileSizeLimit)) + var n int + for scanner.Scan() { + n++ + line := scanner.Text() + if strings.HasPrefix(line, "#") || line == "" { + continue + } + r, err := ParseX25519Recipient(line) + if err != nil { + // Hide the error since it might unintentionally leak the contents + // of confidential files. + return nil, fmt.Errorf("malformed recipient at line %d", n) + } + recs = append(recs, r) + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("failed to read recipients file: %v", err) + } + if len(recs) == 0 { + return nil, fmt.Errorf("no recipients found") + } + return recs, nil +} diff --git a/vendor/filippo.io/age/primitives.go b/vendor/filippo.io/age/primitives.go new file mode 100644 index 000000000..804b0193f --- /dev/null +++ b/vendor/filippo.io/age/primitives.go @@ -0,0 +1,72 @@ +// Copyright 2019 The age Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package age + +import ( + "crypto/hmac" + "crypto/sha256" + "errors" + "io" + + "filippo.io/age/internal/format" + "golang.org/x/crypto/chacha20poly1305" + "golang.org/x/crypto/hkdf" +) + +// aeadEncrypt encrypts a message with a one-time key. +func aeadEncrypt(key, plaintext []byte) ([]byte, error) { + aead, err := chacha20poly1305.New(key) + if err != nil { + return nil, err + } + // The nonce is fixed because this function is only used in places where the + // spec guarantees each key is only used once (by deriving it from values + // that include fresh randomness), allowing us to save the overhead. + // For the code that encrypts the actual payload, look at the + // filippo.io/age/internal/stream package. + nonce := make([]byte, chacha20poly1305.NonceSize) + return aead.Seal(nil, nonce, plaintext, nil), nil +} + +var errIncorrectCiphertextSize = errors.New("encrypted value has unexpected length") + +// aeadDecrypt decrypts a message of an expected fixed size. +// +// The message size is limited to mitigate multi-key attacks, where a ciphertext +// can be crafted that decrypts successfully under multiple keys. Short +// ciphertexts can only target two keys, which has limited impact. +func aeadDecrypt(key []byte, size int, ciphertext []byte) ([]byte, error) { + aead, err := chacha20poly1305.New(key) + if err != nil { + return nil, err + } + if len(ciphertext) != size+aead.Overhead() { + return nil, errIncorrectCiphertextSize + } + nonce := make([]byte, chacha20poly1305.NonceSize) + return aead.Open(nil, nonce, ciphertext, nil) +} + +func headerMAC(fileKey []byte, hdr *format.Header) ([]byte, error) { + h := hkdf.New(sha256.New, fileKey, nil, []byte("header")) + hmacKey := make([]byte, 32) + if _, err := io.ReadFull(h, hmacKey); err != nil { + return nil, err + } + hh := hmac.New(sha256.New, hmacKey) + if err := hdr.MarshalWithoutMAC(hh); err != nil { + return nil, err + } + return hh.Sum(nil), nil +} + +func streamKey(fileKey, nonce []byte) []byte { + h := hkdf.New(sha256.New, fileKey, nonce, []byte("payload")) + streamKey := make([]byte, chacha20poly1305.KeySize) + if _, err := io.ReadFull(h, streamKey); err != nil { + panic("age: internal error: failed to read from HKDF: " + err.Error()) + } + return streamKey +} diff --git a/vendor/filippo.io/age/scrypt.go b/vendor/filippo.io/age/scrypt.go new file mode 100644 index 000000000..73d13b7fa --- /dev/null +++ b/vendor/filippo.io/age/scrypt.go @@ -0,0 +1,206 @@ +// Copyright 2019 The age Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package age + +import ( + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "regexp" + "strconv" + + "filippo.io/age/internal/format" + "golang.org/x/crypto/chacha20poly1305" + "golang.org/x/crypto/scrypt" +) + +const scryptLabel = "age-encryption.org/v1/scrypt" + +// ScryptRecipient is a password-based recipient. Anyone with the password can +// decrypt the message. +// +// If a ScryptRecipient is used, it must be the only recipient for the file: it +// can't be mixed with other recipient types and can't be used multiple times +// for the same file. +// +// Its use is not recommended for automated systems, which should prefer +// X25519Recipient. +type ScryptRecipient struct { + password []byte + workFactor int +} + +var _ Recipient = &ScryptRecipient{} + +// NewScryptRecipient returns a new ScryptRecipient with the provided password. +func NewScryptRecipient(password string) (*ScryptRecipient, error) { + if len(password) == 0 { + return nil, errors.New("passphrase can't be empty") + } + r := &ScryptRecipient{ + password: []byte(password), + // TODO: automatically scale this to 1s (with a min) in the CLI. + workFactor: 18, // 1s on a modern machine + } + return r, nil +} + +// SetWorkFactor sets the scrypt work factor to 2^logN. +// It must be called before Wrap. +// +// If SetWorkFactor is not called, a reasonable default is used. +func (r *ScryptRecipient) SetWorkFactor(logN int) { + if logN > 30 || logN < 1 { + panic("age: SetWorkFactor called with illegal value") + } + r.workFactor = logN +} + +const scryptSaltSize = 16 + +func (r *ScryptRecipient) Wrap(fileKey []byte) ([]*Stanza, error) { + salt := make([]byte, scryptSaltSize) + if _, err := rand.Read(salt[:]); err != nil { + return nil, err + } + + logN := r.workFactor + l := &Stanza{ + Type: "scrypt", + Args: []string{format.EncodeToString(salt), strconv.Itoa(logN)}, + } + + salt = append([]byte(scryptLabel), salt...) + k, err := scrypt.Key(r.password, salt, 1< 30 || logN < 1 { + panic("age: SetMaxWorkFactor called with illegal value") + } + i.maxWorkFactor = logN +} + +func (i *ScryptIdentity) Unwrap(stanzas []*Stanza) ([]byte, error) { + for _, s := range stanzas { + if s.Type == "scrypt" && len(stanzas) != 1 { + return nil, errors.New("an scrypt recipient must be the only one") + } + } + return multiUnwrap(i.unwrap, stanzas) +} + +var digitsRe = regexp.MustCompile(`^[1-9][0-9]*$`) + +func (i *ScryptIdentity) unwrap(block *Stanza) ([]byte, error) { + if block.Type != "scrypt" { + return nil, ErrIncorrectIdentity + } + if len(block.Args) != 2 { + return nil, errors.New("invalid scrypt recipient block") + } + salt, err := format.DecodeString(block.Args[0]) + if err != nil { + return nil, fmt.Errorf("failed to parse scrypt salt: %v", err) + } + if len(salt) != scryptSaltSize { + return nil, errors.New("invalid scrypt recipient block") + } + if w := block.Args[1]; !digitsRe.MatchString(w) { + return nil, fmt.Errorf("scrypt work factor encoding invalid: %q", w) + } + logN, err := strconv.Atoi(block.Args[1]) + if err != nil { + return nil, fmt.Errorf("failed to parse scrypt work factor: %v", err) + } + if logN > i.maxWorkFactor { + return nil, fmt.Errorf("scrypt work factor too large: %v", logN) + } + if logN <= 0 { // unreachable + return nil, fmt.Errorf("invalid scrypt work factor: %v", logN) + } + + salt = append([]byte(scryptLabel), salt...) + k, err := scrypt.Key(i.password, salt, 1< These changes affect only code written against previous beta versions of `v1.7.0` and `v1.8.0` +* The function `NewTokenCredential` has been removed from the `fake` package. Use a literal `&fake.TokenCredential{}` instead. +* The field `TracingNamespace` in `runtime.PipelineOptions` has been replaced by `TracingOptions`. + +### Bugs Fixed + +* Fixed an issue that could cause some allowed HTTP header values to not show up in logs. +* Include error text instead of error type in traces when the transport returns an error. +* Fixed an issue that could cause an HTTP/2 request to hang when the TCP connection becomes unresponsive. +* Block key and SAS authentication for non TLS protected endpoints. +* Passing a `nil` credential value will no longer cause a panic. Instead, the authentication is skipped. +* Calling `Error` on a zero-value `azcore.ResponseError` will no longer panic. +* Fixed an issue in `fake.PagerResponder[T]` that would cause a trailing error to be omitted when iterating over pages. +* Context values created by `azcore` will no longer flow across disjoint HTTP requests. + +### Other Changes + +* Skip generating trace info for no-op tracers. +* The `clientName` paramater in client constructors has been renamed to `moduleName`. + +## 1.9.0-beta.1 (2023-10-05) + +### Other Changes + +* The beta features for tracing and fakes have been reinstated. + +## 1.8.0 (2023-10-05) + +### Features Added + +* This includes the following features from `v1.8.0-beta.N` releases. + * Claims and CAE for authentication. + * New `messaging` package. + * Various helpers in the `runtime` package. + * Deprecation of `runtime.With*` funcs and their replacements in the `policy` package. +* Added types `KeyCredential` and `SASCredential` to the `azcore` package. + * Includes their respective constructor functions. +* Added types `KeyCredentialPolicy` and `SASCredentialPolicy` to the `azcore/runtime` package. + * Includes their respective constructor functions and options types. + +### Breaking Changes +> These changes affect only code written against beta versions of `v1.8.0` +* The beta features for tracing and fakes have been omitted for this release. + +### Bugs Fixed + +* Fixed an issue that could cause some ARM RPs to not be automatically registered. +* Block bearer token authentication for non TLS protected endpoints. + +### Other Changes + +* Updated dependencies. + +## 1.8.0-beta.3 (2023-09-07) + +### Features Added + +* Added function `FetcherForNextLink` and `FetcherForNextLinkOptions` to the `runtime` package to centralize creation of `Pager[T].Fetcher` from a next link URL. + +### Bugs Fixed + +* Suppress creating spans for nested SDK API calls. The HTTP span will be a child of the outer API span. + +### Other Changes + +* The following functions in the `runtime` package are now exposed from the `policy` package, and the `runtime` versions have been deprecated. + * `WithCaptureResponse` + * `WithHTTPHeader` + * `WithRetryOptions` + +## 1.7.2 (2023-09-06) + +### Bugs Fixed + +* Fix default HTTP transport to work in WASM modules. + +## 1.8.0-beta.2 (2023-08-14) + +### Features Added + +* Added function `SanitizePagerPollerPath` to the `server` package to centralize sanitization and formalize the contract. +* Added `TokenRequestOptions.EnableCAE` to indicate whether to request a CAE token. + +### Breaking Changes + +> This change affects only code written against beta version `v1.8.0-beta.1`. +* `messaging.CloudEvent` deserializes JSON objects as `[]byte`, instead of `json.RawMessage`. See the documentation for CloudEvent.Data for more information. + +> This change affects only code written against beta versions `v1.7.0-beta.2` and `v1.8.0-beta.1`. +* Removed parameter from method `Span.End()` and its type `tracing.SpanEndOptions`. This API GA'ed in `v1.2.0` so we cannot change it. + +### Bugs Fixed + +* Propagate any query parameters when constructing a fake poller and/or injecting next links. + +## 1.7.1 (2023-08-14) + +## Bugs Fixed + +* Enable TLS renegotiation in the default transport policy. + +## 1.8.0-beta.1 (2023-07-12) + +### Features Added + +- `messaging/CloudEvent` allows you to serialize/deserialize CloudEvents, as described in the CloudEvents 1.0 specification: [link](https://github.com/cloudevents/spec) + +### Other Changes + +* The beta features for CAE, tracing, and fakes have been reinstated. + +## 1.7.0 (2023-07-12) + +### Features Added +* Added method `WithClientName()` to type `azcore.Client` to support shallow cloning of a client with a new name used for tracing. + +### Breaking Changes +> These changes affect only code written against beta versions v1.7.0-beta.1 or v1.7.0-beta.2 +* The beta features for CAE, tracing, and fakes have been omitted for this release. + +## 1.7.0-beta.2 (2023-06-06) + +### Breaking Changes +> These changes affect only code written against beta version v1.7.0-beta.1 +* Method `SpanFromContext()` on type `tracing.Tracer` had the `bool` return value removed. + * This includes the field `SpanFromContext` in supporting type `tracing.TracerOptions`. +* Method `AddError()` has been removed from type `tracing.Span`. +* Method `Span.End()` now requires an argument of type `*tracing.SpanEndOptions`. + +## 1.6.1 (2023-06-06) + +### Bugs Fixed +* Fixed an issue in `azcore.NewClient()` and `arm.NewClient()` that could cause an incorrect module name to be used in telemetry. + +### Other Changes +* This version contains all bug fixes from `v1.7.0-beta.1` + +## 1.7.0-beta.1 (2023-05-24) + +### Features Added +* Restored CAE support for ARM clients. +* Added supporting features to enable distributed tracing. + * Added func `runtime.StartSpan()` for use by SDKs to start spans. + * Added method `WithContext()` to `runtime.Request` to support shallow cloning with a new context. + * Added field `TracingNamespace` to `runtime.PipelineOptions`. + * Added field `Tracer` to `runtime.NewPollerOptions` and `runtime.NewPollerFromResumeTokenOptions` types. + * Added field `SpanFromContext` to `tracing.TracerOptions`. + * Added methods `Enabled()`, `SetAttributes()`, and `SpanFromContext()` to `tracing.Tracer`. + * Added supporting pipeline policies to include HTTP spans when creating clients. +* Added package `fake` to support generated fakes packages in SDKs. + * The package contains public surface area exposed by fake servers and supporting APIs intended only for use by the fake server implementations. + * Added an internal fake poller implementation. + +### Bugs Fixed +* Retry policy always clones the underlying `*http.Request` before invoking the next policy. +* Added some non-standard error codes to the list of error codes for unregistered resource providers. + +## 1.6.0 (2023-05-04) + +### Features Added +* Added support for ARM cross-tenant authentication. Set the `AuxiliaryTenants` field of `arm.ClientOptions` to enable. +* Added `TenantID` field to `policy.TokenRequestOptions`. + +## 1.5.0 (2023-04-06) + +### Features Added +* Added `ShouldRetry` to `policy.RetryOptions` for finer-grained control over when to retry. + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.5.0-beta.1 +> These features will return in v1.6.0-beta.1. +* Removed `TokenRequestOptions.Claims` and `.TenantID` +* Removed ARM client support for CAE and cross-tenant auth. + +### Bugs Fixed +* Added non-conformant LRO terminal states `Cancelled` and `Completed`. + +### Other Changes +* Updated to latest `internal` module. + +## 1.5.0-beta.1 (2023-03-02) + +### Features Added +* This release includes the features added in v1.4.0-beta.1 + +## 1.4.0 (2023-03-02) +> This release doesn't include features added in v1.4.0-beta.1. They will return in v1.5.0-beta.1. + +### Features Added +* Add `Clone()` method for `arm/policy.ClientOptions`. + +### Bugs Fixed +* ARM's RP registration policy will no longer swallow unrecognized errors. +* Fixed an issue in `runtime.NewPollerFromResumeToken()` when resuming a `Poller` with a custom `PollingHandler`. +* Fixed wrong policy copy in `arm/runtime.NewPipeline()`. + +## 1.4.0-beta.1 (2023-02-02) + +### Features Added +* Added support for ARM cross-tenant authentication. Set the `AuxiliaryTenants` field of `arm.ClientOptions` to enable. +* Added `Claims` and `TenantID` fields to `policy.TokenRequestOptions`. +* ARM bearer token policy handles CAE challenges. + +## 1.3.1 (2023-02-02) + +### Other Changes +* Update dependencies to latest versions. + +## 1.3.0 (2023-01-06) + +### Features Added +* Added `BearerTokenOptions.AuthorizationHandler` to enable extending `runtime.BearerTokenPolicy` + with custom authorization logic +* Added `Client` types and matching constructors to the `azcore` and `arm` packages. These represent a basic client for HTTP and ARM respectively. + +### Other Changes +* Updated `internal` module to latest version. +* `policy/Request.SetBody()` allows replacing a request's body with an empty one + +## 1.2.0 (2022-11-04) + +### Features Added +* Added `ClientOptions.APIVersion` field, which overrides the default version a client + requests of the service, if the client supports this (all ARM clients do). +* Added package `tracing` that contains the building blocks for distributed tracing. +* Added field `TracingProvider` to type `policy.ClientOptions` that will be used to set the per-client tracing implementation. + +### Bugs Fixed +* Fixed an issue in `runtime.SetMultipartFormData` to properly handle slices of `io.ReadSeekCloser`. +* Fixed the MaxRetryDelay default to be 60s. +* Failure to poll the state of an LRO will now return an `*azcore.ResponseError` for poller types that require this behavior. +* Fixed a bug in `runtime.NewPipeline` that would cause pipeline-specified allowed headers and query parameters to be lost. + +### Other Changes +* Retain contents of read-only fields when sending requests. + +## 1.1.4 (2022-10-06) + +### Bugs Fixed +* Don't retry a request if the `Retry-After` delay is greater than the configured `RetryOptions.MaxRetryDelay`. +* `runtime.JoinPaths`: do not unconditionally add a forward slash before the query string + +### Other Changes +* Removed logging URL from retry policy as it's redundant. +* Retry policy logs when it exits due to a non-retriable status code. + +## 1.1.3 (2022-09-01) + +### Bugs Fixed +* Adjusted the initial retry delay to 800ms per the Azure SDK guidelines. + +## 1.1.2 (2022-08-09) + +### Other Changes +* Fixed various doc bugs. + +## 1.1.1 (2022-06-30) + +### Bugs Fixed +* Avoid polling when a RELO LRO synchronously terminates. + +## 1.1.0 (2022-06-03) + +### Other Changes +* The one-second floor for `Frequency` when calling `PollUntilDone()` has been removed when running tests. + +## 1.0.0 (2022-05-12) + +### Features Added +* Added interface `runtime.PollingHandler` to support custom poller implementations. + * Added field `PollingHandler` of this type to `runtime.NewPollerOptions[T]` and `runtime.NewPollerFromResumeTokenOptions[T]`. + +### Breaking Changes +* Renamed `cloud.Configuration.LoginEndpoint` to `.ActiveDirectoryAuthorityHost` +* Renamed `cloud.AzurePublicCloud` to `cloud.AzurePublic` +* Removed `AuxiliaryTenants` field from `arm/ClientOptions` and `arm/policy/BearerTokenOptions` +* Removed `TokenRequestOptions.TenantID` +* `Poller[T].PollUntilDone()` now takes an `options *PollUntilDoneOptions` param instead of `freq time.Duration` +* Removed `arm/runtime.Poller[T]`, `arm/runtime.NewPoller[T]()` and `arm/runtime.NewPollerFromResumeToken[T]()` +* Removed `arm/runtime.FinalStateVia` and related `const` values +* Renamed `runtime.PageProcessor` to `runtime.PagingHandler` +* The `arm/runtime.ProviderRepsonse` and `arm/runtime.Provider` types are no longer exported. +* Renamed `NewRequestIdPolicy()` to `NewRequestIDPolicy()` +* `TokenCredential.GetToken` now returns `AccessToken` by value. + +### Bugs Fixed +* When per-try timeouts are enabled, only cancel the context after the body has been read and closed. +* The `Operation-Location` poller now properly handles `final-state-via` values. +* Improvements in `runtime.Poller[T]` + * `Poll()` shouldn't cache errors, allowing for additional retries when in a non-terminal state. + * `Result()` will cache the terminal result or error but not transient errors, allowing for additional retries. + +### Other Changes +* Updated to latest `internal` module and absorbed breaking changes. + * Use `temporal.Resource` and deleted copy. +* The internal poller implementation has been refactored. + * The implementation in `internal/pollers/poller.go` has been merged into `runtime/poller.go` with some slight modification. + * The internal poller types had their methods updated to conform to the `runtime.PollingHandler` interface. + * The creation of resume tokens has been refactored so that implementers of `runtime.PollingHandler` don't need to know about it. +* `NewPipeline()` places policies from `ClientOptions` after policies from `PipelineOptions` +* Default User-Agent headers no longer include `azcore` version information + +## 0.23.1 (2022-04-14) + +### Bugs Fixed +* Include XML header when marshalling XML content. +* Handle XML namespaces when searching for error code. +* Handle `odata.error` when searching for error code. + +## 0.23.0 (2022-04-04) + +### Features Added +* Added `runtime.Pager[T any]` and `runtime.Poller[T any]` supporting types for central, generic, implementations. +* Added `cloud` package with a new API for cloud configuration +* Added `FinalStateVia` field to `runtime.NewPollerOptions[T any]` type. + +### Breaking Changes +* Removed the `Poller` type-alias to the internal poller implementation. +* Added `Ptr[T any]` and `SliceOfPtrs[T any]` in the `to` package and removed all non-generic implementations. +* `NullValue` and `IsNullValue` now take a generic type parameter instead of an interface func parameter. +* Replaced `arm.Endpoint` with `cloud` API + * Removed the `endpoint` parameter from `NewRPRegistrationPolicy()` + * `arm/runtime.NewPipeline()` and `.NewRPRegistrationPolicy()` now return an `error` +* Refactored `NewPoller` and `NewPollerFromResumeToken` funcs in `arm/runtime` and `runtime` packages. + * Removed the `pollerID` parameter as it's no longer required. + * Created optional parameter structs and moved optional parameters into them. +* Changed `FinalStateVia` field to a `const` type. + +### Other Changes +* Converted expiring resource and dependent types to use generics. + +## 0.22.0 (2022-03-03) + +### Features Added +* Added header `WWW-Authenticate` to the default allow-list of headers for logging. +* Added a pipeline policy that enables the retrieval of HTTP responses from API calls. + * Added `runtime.WithCaptureResponse` to enable the policy at the API level (off by default). + +### Breaking Changes +* Moved `WithHTTPHeader` and `WithRetryOptions` from the `policy` package to the `runtime` package. + +## 0.21.1 (2022-02-04) + +### Bugs Fixed +* Restore response body after reading in `Poller.FinalResponse()`. (#16911) +* Fixed bug in `NullValue` that could lead to incorrect comparisons for empty maps/slices (#16969) + +### Other Changes +* `BearerTokenPolicy` is more resilient to transient authentication failures. (#16789) + +## 0.21.0 (2022-01-11) + +### Features Added +* Added `AllowedHeaders` and `AllowedQueryParams` to `policy.LogOptions` to control which headers and query parameters are written to the logger. +* Added `azcore.ResponseError` type which is returned from APIs when a non-success HTTP status code is received. + +### Breaking Changes +* Moved `[]policy.Policy` parameters of `arm/runtime.NewPipeline` and `runtime.NewPipeline` into a new struct, `runtime.PipelineOptions` +* Renamed `arm/ClientOptions.Host` to `.Endpoint` +* Moved `Request.SkipBodyDownload` method to function `runtime.SkipBodyDownload` +* Removed `azcore.HTTPResponse` interface type +* `arm.NewPoller()` and `runtime.NewPoller()` no longer require an `eu` parameter +* `runtime.NewResponseError()` no longer requires an `error` parameter + +## 0.20.0 (2021-10-22) + +### Breaking Changes +* Removed `arm.Connection` +* Removed `azcore.Credential` and `.NewAnonymousCredential()` + * `NewRPRegistrationPolicy` now requires an `azcore.TokenCredential` +* `runtime.NewPipeline` has a new signature that simplifies implementing custom authentication +* `arm/runtime.RegistrationOptions` embeds `policy.ClientOptions` +* Contents in the `log` package have been slightly renamed. +* Removed `AuthenticationOptions` in favor of `policy.BearerTokenOptions` +* Changed parameters for `NewBearerTokenPolicy()` +* Moved policy config options out of `arm/runtime` and into `arm/policy` + +### Features Added +* Updating Documentation +* Added string typdef `arm.Endpoint` to provide a hint toward expected ARM client endpoints +* `azcore.ClientOptions` contains common pipeline configuration settings +* Added support for multi-tenant authorization in `arm/runtime` +* Require one second minimum when calling `PollUntilDone()` + +### Bug Fixes +* Fixed a potential panic when creating the default Transporter. +* Close LRO initial response body when creating a poller. +* Fixed a panic when recursively cloning structs that contain time.Time. + +## 0.19.0 (2021-08-25) + +### Breaking Changes +* Split content out of `azcore` into various packages. The intent is to separate content based on its usage (common, uncommon, SDK authors). + * `azcore` has all core functionality. + * `log` contains facilities for configuring in-box logging. + * `policy` is used for configuring pipeline options and creating custom pipeline policies. + * `runtime` contains various helpers used by SDK authors and generated content. + * `streaming` has helpers for streaming IO operations. +* `NewTelemetryPolicy()` now requires module and version parameters and the `Value` option has been removed. + * As a result, the `Request.Telemetry()` method has been removed. +* The telemetry policy now includes the SDK prefix `azsdk-go-` so callers no longer need to provide it. +* The `*http.Request` in `runtime.Request` is no longer anonymously embedded. Use the `Raw()` method to access it. +* The `UserAgent` and `Version` constants have been made internal, `Module` and `Version` respectively. + +### Bug Fixes +* Fixed an issue in the retry policy where the request body could be overwritten after a rewind. + +### Other Changes +* Moved modules `armcore` and `to` content into `arm` and `to` packages respectively. + * The `Pipeline()` method on `armcore.Connection` has been replaced by `NewPipeline()` in `arm.Connection`. It takes module and version parameters used by the telemetry policy. +* Poller logic has been consolidated across ARM and core implementations. + * This required some changes to the internal interfaces for core pollers. +* The core poller types have been improved, including more logging and test coverage. + +## 0.18.1 (2021-08-20) + +### Features Added +* Adds an `ETag` type for comparing etags and handling etags on requests +* Simplifies the `requestBodyProgess` and `responseBodyProgress` into a single `progress` object + +### Bugs Fixed +* `JoinPaths` will preserve query parameters encoded in the `root` url. + +### Other Changes +* Bumps dependency on `internal` module to the latest version (v0.7.0) + +## 0.18.0 (2021-07-29) +### Features Added +* Replaces methods from Logger type with two package methods for interacting with the logging functionality. +* `azcore.SetClassifications` replaces `azcore.Logger().SetClassifications` +* `azcore.SetListener` replaces `azcore.Logger().SetListener` + +### Breaking Changes +* Removes `Logger` type from `azcore` + + +## 0.17.0 (2021-07-27) +### Features Added +* Adding TenantID to TokenRequestOptions (https://github.com/Azure/azure-sdk-for-go/pull/14879) +* Adding AuxiliaryTenants to AuthenticationOptions (https://github.com/Azure/azure-sdk-for-go/pull/15123) + +### Breaking Changes +* Rename `AnonymousCredential` to `NewAnonymousCredential` (https://github.com/Azure/azure-sdk-for-go/pull/15104) +* rename `AuthenticationPolicyOptions` to `AuthenticationOptions` (https://github.com/Azure/azure-sdk-for-go/pull/15103) +* Make Header constants private (https://github.com/Azure/azure-sdk-for-go/pull/15038) + + +## 0.16.2 (2021-05-26) +### Features Added +* Improved support for byte arrays [#14715](https://github.com/Azure/azure-sdk-for-go/pull/14715) + + +## 0.16.1 (2021-05-19) +### Features Added +* Add license.txt to azcore module [#14682](https://github.com/Azure/azure-sdk-for-go/pull/14682) + + +## 0.16.0 (2021-05-07) +### Features Added +* Remove extra `*` in UnmarshalAsByteArray() [#14642](https://github.com/Azure/azure-sdk-for-go/pull/14642) + + +## 0.15.1 (2021-05-06) +### Features Added +* Cache the original request body on Request [#14634](https://github.com/Azure/azure-sdk-for-go/pull/14634) + + +## 0.15.0 (2021-05-05) +### Features Added +* Add support for null map and slice +* Export `Response.Payload` method + +### Breaking Changes +* remove `Response.UnmarshalError` as it's no longer required + + +## 0.14.5 (2021-04-23) +### Features Added +* Add `UnmarshalError()` on `azcore.Response` + + +## 0.14.4 (2021-04-22) +### Features Added +* Support for basic LRO polling +* Added type `LROPoller` and supporting types for basic polling on long running operations. +* rename poller param and added doc comment + +### Bugs Fixed +* Fixed content type detection bug in logging. + + +## 0.14.3 (2021-03-29) +### Features Added +* Add support for multi-part form data +* Added method `WriteMultipartFormData()` to Request. + + +## 0.14.2 (2021-03-17) +### Features Added +* Add support for encoding JSON null values +* Adds `NullValue()` and `IsNullValue()` functions for setting and detecting sentinel values used for encoding a JSON null. +* Documentation fixes + +### Bugs Fixed +* Fixed improper error wrapping + + +## 0.14.1 (2021-02-08) +### Features Added +* Add `Pager` and `Poller` interfaces to azcore + + +## 0.14.0 (2021-01-12) +### Features Added +* Accept zero-value options for default values +* Specify zero-value options structs to accept default values. +* Remove `DefaultXxxOptions()` methods. +* Do not silently change TryTimeout on negative values +* make per-try timeout opt-in + + +## 0.13.4 (2020-11-20) +### Features Added +* Include telemetry string in User Agent + + +## 0.13.3 (2020-11-20) +### Features Added +* Updating response body handling on `azcore.Response` + + +## 0.13.2 (2020-11-13) +### Features Added +* Remove implementation of stateless policies as first-class functions. + + +## 0.13.1 (2020-11-05) +### Features Added +* Add `Telemetry()` method to `azcore.Request()` + + +## 0.13.0 (2020-10-14) +### Features Added +* Rename `log` to `logger` to avoid name collision with the log package. +* Documentation improvements +* Simplified `DefaultHTTPClientTransport()` implementation + + +## 0.12.1 (2020-10-13) +### Features Added +* Update `internal` module dependence to `v0.5.0` + + +## 0.12.0 (2020-10-08) +### Features Added +* Removed storage specific content +* Removed internal content to prevent API clutter +* Refactored various policy options to conform with our options pattern + + +## 0.11.0 (2020-09-22) +### Features Added + +* Removed `LogError` and `LogSlowResponse`. +* Renamed `options` in `RequestLogOptions`. +* Updated `NewRequestLogPolicy()` to follow standard pattern for options. +* Refactored `requestLogPolicy.Do()` per above changes. +* Cleaned up/added logging in retry policy. +* Export `NewResponseError()` +* Fix `RequestLogOptions` comment + + +## 0.10.1 (2020-09-17) +### Features Added +* Add default console logger +* Default console logger writes to stderr. To enable it, set env var `AZURE_SDK_GO_LOGGING` to the value 'all'. +* Added `Logger.Writef()` to reduce the need for `ShouldLog()` checks. +* Add `LogLongRunningOperation` + + +## 0.10.0 (2020-09-10) +### Features Added +* The `request` and `transport` interfaces have been refactored to align with the patterns in the standard library. +* `NewRequest()` now uses `http.NewRequestWithContext()` and performs additional validation, it also requires a context parameter. +* The `Policy` and `Transport` interfaces have had their context parameter removed as the context is associated with the underlying `http.Request`. +* `Pipeline.Do()` will validate the HTTP request before sending it through the pipeline, avoiding retries on a malformed request. +* The `Retrier` interface has been replaced with the `NonRetriableError` interface, and the retry policy updated to test for this. +* `Request.SetBody()` now requires a content type parameter for setting the request's MIME type. +* moved path concatenation into `JoinPaths()` func + + +## 0.9.6 (2020-08-18) +### Features Added +* Improvements to body download policy +* Always download the response body for error responses, i.e. HTTP status codes >= 400. +* Simplify variable declarations + + +## 0.9.5 (2020-08-11) +### Features Added +* Set the Content-Length header in `Request.SetBody` + + +## 0.9.4 (2020-08-03) +### Features Added +* Fix cancellation of per try timeout +* Per try timeout is used to ensure that an HTTP operation doesn't take too long, e.g. that a GET on some URL doesn't take an inordinant amount of time. +* Once the HTTP request returns, the per try timeout should be cancelled, not when the response has been read to completion. +* Do not drain response body if there are no more retries +* Do not retry non-idempotent operations when body download fails + + +## 0.9.3 (2020-07-28) +### Features Added +* Add support for custom HTTP request headers +* Inserts an internal policy into the pipeline that can extract HTTP header values from the caller's context, adding them to the request. +* Use `azcore.WithHTTPHeader` to add HTTP headers to a context. +* Remove method specific to Go 1.14 + + +## 0.9.2 (2020-07-28) +### Features Added +* Omit read-only content from request payloads +* If any field in a payload's object graph contains `azure:"ro"`, make a clone of the object graph, omitting all fields with this annotation. +* Verify no fields were dropped +* Handle embedded struct types +* Added test for cloning by value +* Add messages to failures + + +## 0.9.1 (2020-07-22) +### Features Added +* Updated dependency on internal module to fix race condition. + + +## 0.9.0 (2020-07-09) +### Features Added +* Add `HTTPResponse` interface to be used by callers to access the raw HTTP response from an error in the event of an API call failure. +* Updated `sdk/internal` dependency to latest version. +* Rename package alias + + +## 0.8.2 (2020-06-29) +### Features Added +* Added missing documentation comments + +### Bugs Fixed +* Fixed a bug in body download policy. + + +## 0.8.1 (2020-06-26) +### Features Added +* Miscellaneous clean-up reported by linters + + +## 0.8.0 (2020-06-01) +### Features Added +* Differentiate between standard and URL encoding. + + +## 0.7.1 (2020-05-27) +### Features Added +* Add support for for base64 encoding and decoding of payloads. + + +## 0.7.0 (2020-05-12) +### Features Added +* Change `RetryAfter()` to a function. + + +## 0.6.0 (2020-04-29) +### Features Added +* Updating `RetryAfter` to only return the detaion in the RetryAfter header + + +## 0.5.0 (2020-03-23) +### Features Added +* Export `TransportFunc` + +### Breaking Changes +* Removed `IterationDone` + + +## 0.4.1 (2020-02-25) +### Features Added +* Ensure per-try timeout is properly cancelled +* Explicitly call cancel the per-try timeout when the response body has been read/closed by the body download policy. +* When the response body is returned to the caller for reading/closing, wrap it in a `responseBodyReader` that will cancel the timeout when the body is closed. +* `Logger.Should()` will return false if no listener is set. + + +## 0.4.0 (2020-02-18) +### Features Added +* Enable custom `RetryOptions` to be specified per API call +* Added `WithRetryOptions()` that adds a custom `RetryOptions` to the provided context, allowing custom settings per API call. +* Remove 429 from the list of default HTTP status codes for retry. +* Change StatusCodesForRetry to a slice so consumers can append to it. +* Added support for retry-after in HTTP-date format. +* Cleaned up some comments specific to storage. +* Remove `Request.SetQueryParam()` +* Renamed `MaxTries` to `MaxRetries` + +## 0.3.0 (2020-01-16) +### Features Added +* Added `DefaultRetryOptions` to create initialized default options. + +### Breaking Changes +* Removed `Response.CheckStatusCode()` + + +## 0.2.0 (2020-01-15) +### Features Added +* Add support for marshalling and unmarshalling JSON +* Removed `Response.Payload` field +* Exit early when unmarsahlling if there is no payload + + +## 0.1.0 (2020-01-10) +### Features Added +* Initial release diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt new file mode 100644 index 000000000..48ea6616b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md new file mode 100644 index 000000000..35a74e18d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md @@ -0,0 +1,39 @@ +# Azure Core Client Module for Go + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azcore)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore) +[![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/go/go%20-%20azcore%20-%20ci?branchName=main)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=1843&branchName=main) +[![Code Coverage](https://img.shields.io/azure-devops/coverage/azure-sdk/public/1843/main)](https://img.shields.io/azure-devops/coverage/azure-sdk/public/1843/main) + +The `azcore` module provides a set of common interfaces and types for Go SDK client modules. +These modules follow the [Azure SDK Design Guidelines for Go](https://azure.github.io/azure-sdk/golang_introduction.html). + +## Getting started + +This project uses [Go modules](https://github.com/golang/go/wiki/Modules) for versioning and dependency management. + +Typically, you will not need to explicitly install `azcore` as it will be installed as a client module dependency. +To add the latest version to your `go.mod` file, execute the following command. + +```bash +go get github.com/Azure/azure-sdk-for-go/sdk/azcore +``` + +General documentation and examples can be found on [pkg.go.dev](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore). + +## Contributing +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit [https://cla.microsoft.com](https://cla.microsoft.com). + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information, see the +[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any +additional questions or comments. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go new file mode 100644 index 000000000..00f2d5a0a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go @@ -0,0 +1,224 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package resource + +import ( + "fmt" + "strings" +) + +const ( + providersKey = "providers" + subscriptionsKey = "subscriptions" + resourceGroupsLowerKey = "resourcegroups" + locationsKey = "locations" + builtInResourceNamespace = "Microsoft.Resources" +) + +// RootResourceID defines the tenant as the root parent of all other ResourceID. +var RootResourceID = &ResourceID{ + Parent: nil, + ResourceType: TenantResourceType, + Name: "", +} + +// ResourceID represents a resource ID such as `/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myRg`. +// Don't create this type directly, use ParseResourceID instead. +type ResourceID struct { + // Parent is the parent ResourceID of this instance. + // Can be nil if there is no parent. + Parent *ResourceID + + // SubscriptionID is the subscription ID in this resource ID. + // The value can be empty if the resource ID does not contain a subscription ID. + SubscriptionID string + + // ResourceGroupName is the resource group name in this resource ID. + // The value can be empty if the resource ID does not contain a resource group name. + ResourceGroupName string + + // Provider represents the provider name in this resource ID. + // This is only valid when the resource ID represents a resource provider. + // Example: `/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Insights` + Provider string + + // Location is the location in this resource ID. + // The value can be empty if the resource ID does not contain a location name. + Location string + + // ResourceType represents the type of this resource ID. + ResourceType ResourceType + + // Name is the resource name of this resource ID. + Name string + + isChild bool + stringValue string +} + +// ParseResourceID parses a string to an instance of ResourceID +func ParseResourceID(id string) (*ResourceID, error) { + if len(id) == 0 { + return nil, fmt.Errorf("invalid resource ID: id cannot be empty") + } + + if !strings.HasPrefix(id, "/") { + return nil, fmt.Errorf("invalid resource ID: resource id '%s' must start with '/'", id) + } + + parts := splitStringAndOmitEmpty(id, "/") + + if len(parts) < 2 { + return nil, fmt.Errorf("invalid resource ID: %s", id) + } + + if !strings.EqualFold(parts[0], subscriptionsKey) && !strings.EqualFold(parts[0], providersKey) { + return nil, fmt.Errorf("invalid resource ID: %s", id) + } + + return appendNext(RootResourceID, parts, id) +} + +// String returns the string of the ResourceID +func (id *ResourceID) String() string { + if len(id.stringValue) > 0 { + return id.stringValue + } + + if id.Parent == nil { + return "" + } + + builder := strings.Builder{} + builder.WriteString(id.Parent.String()) + + if id.isChild { + builder.WriteString(fmt.Sprintf("/%s", id.ResourceType.lastType())) + if len(id.Name) > 0 { + builder.WriteString(fmt.Sprintf("/%s", id.Name)) + } + } else { + builder.WriteString(fmt.Sprintf("/providers/%s/%s/%s", id.ResourceType.Namespace, id.ResourceType.Type, id.Name)) + } + + id.stringValue = builder.String() + + return id.stringValue +} + +func newResourceID(parent *ResourceID, resourceTypeName string, resourceName string) *ResourceID { + id := &ResourceID{} + id.init(parent, chooseResourceType(resourceTypeName, parent), resourceName, true) + return id +} + +func newResourceIDWithResourceType(parent *ResourceID, resourceType ResourceType, resourceName string) *ResourceID { + id := &ResourceID{} + id.init(parent, resourceType, resourceName, true) + return id +} + +func newResourceIDWithProvider(parent *ResourceID, providerNamespace, resourceTypeName, resourceName string) *ResourceID { + id := &ResourceID{} + id.init(parent, NewResourceType(providerNamespace, resourceTypeName), resourceName, false) + return id +} + +func chooseResourceType(resourceTypeName string, parent *ResourceID) ResourceType { + if strings.EqualFold(resourceTypeName, resourceGroupsLowerKey) { + return ResourceGroupResourceType + } else if strings.EqualFold(resourceTypeName, subscriptionsKey) && parent != nil && parent.ResourceType.String() == TenantResourceType.String() { + return SubscriptionResourceType + } + + return parent.ResourceType.AppendChild(resourceTypeName) +} + +func (id *ResourceID) init(parent *ResourceID, resourceType ResourceType, name string, isChild bool) { + if parent != nil { + id.Provider = parent.Provider + id.SubscriptionID = parent.SubscriptionID + id.ResourceGroupName = parent.ResourceGroupName + id.Location = parent.Location + } + + if resourceType.String() == SubscriptionResourceType.String() { + id.SubscriptionID = name + } + + if resourceType.lastType() == locationsKey { + id.Location = name + } + + if resourceType.String() == ResourceGroupResourceType.String() { + id.ResourceGroupName = name + } + + if resourceType.String() == ProviderResourceType.String() { + id.Provider = name + } + + if parent == nil { + id.Parent = RootResourceID + } else { + id.Parent = parent + } + id.isChild = isChild + id.ResourceType = resourceType + id.Name = name +} + +func appendNext(parent *ResourceID, parts []string, id string) (*ResourceID, error) { + if len(parts) == 0 { + return parent, nil + } + + if len(parts) == 1 { + // subscriptions and resourceGroups are not valid ids without their names + if strings.EqualFold(parts[0], subscriptionsKey) || strings.EqualFold(parts[0], resourceGroupsLowerKey) { + return nil, fmt.Errorf("invalid resource ID: %s", id) + } + + // resourceGroup must contain either child or provider resource type + if parent.ResourceType.String() == ResourceGroupResourceType.String() { + return nil, fmt.Errorf("invalid resource ID: %s", id) + } + + return newResourceID(parent, parts[0], ""), nil + } + + if strings.EqualFold(parts[0], providersKey) && (len(parts) == 2 || strings.EqualFold(parts[2], providersKey)) { + // provider resource can only be on a tenant or a subscription parent + if parent.ResourceType.String() != SubscriptionResourceType.String() && parent.ResourceType.String() != TenantResourceType.String() { + return nil, fmt.Errorf("invalid resource ID: %s", id) + } + + return appendNext(newResourceIDWithResourceType(parent, ProviderResourceType, parts[1]), parts[2:], id) + } + + if len(parts) > 3 && strings.EqualFold(parts[0], providersKey) { + return appendNext(newResourceIDWithProvider(parent, parts[1], parts[2], parts[3]), parts[4:], id) + } + + if len(parts) > 1 && !strings.EqualFold(parts[0], providersKey) { + return appendNext(newResourceID(parent, parts[0], parts[1]), parts[2:], id) + } + + return nil, fmt.Errorf("invalid resource ID: %s", id) +} + +func splitStringAndOmitEmpty(v, sep string) []string { + r := make([]string, 0) + for _, s := range strings.Split(v, sep) { + if len(s) == 0 { + continue + } + r = append(r, s) + } + + return r +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_type.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_type.go new file mode 100644 index 000000000..ca03ac971 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_type.go @@ -0,0 +1,114 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package resource + +import ( + "fmt" + "strings" +) + +// SubscriptionResourceType is the ResourceType of a subscription +var SubscriptionResourceType = NewResourceType(builtInResourceNamespace, "subscriptions") + +// ResourceGroupResourceType is the ResourceType of a resource group +var ResourceGroupResourceType = NewResourceType(builtInResourceNamespace, "resourceGroups") + +// TenantResourceType is the ResourceType of a tenant +var TenantResourceType = NewResourceType(builtInResourceNamespace, "tenants") + +// ProviderResourceType is the ResourceType of a provider +var ProviderResourceType = NewResourceType(builtInResourceNamespace, "providers") + +// ResourceType represents an Azure resource type, e.g. "Microsoft.Network/virtualNetworks/subnets". +// Don't create this type directly, use ParseResourceType or NewResourceType instead. +type ResourceType struct { + // Namespace is the namespace of the resource type. + // e.g. "Microsoft.Network" in resource type "Microsoft.Network/virtualNetworks/subnets" + Namespace string + + // Type is the full type name of the resource type. + // e.g. "virtualNetworks/subnets" in resource type "Microsoft.Network/virtualNetworks/subnets" + Type string + + // Types is the slice of all the sub-types of this resource type. + // e.g. ["virtualNetworks", "subnets"] in resource type "Microsoft.Network/virtualNetworks/subnets" + Types []string + + stringValue string +} + +// String returns the string of the ResourceType +func (t ResourceType) String() string { + return t.stringValue +} + +// IsParentOf returns true when the receiver is the parent resource type of the child. +func (t ResourceType) IsParentOf(child ResourceType) bool { + if !strings.EqualFold(t.Namespace, child.Namespace) { + return false + } + if len(t.Types) >= len(child.Types) { + return false + } + for i := range t.Types { + if !strings.EqualFold(t.Types[i], child.Types[i]) { + return false + } + } + + return true +} + +// AppendChild creates an instance of ResourceType using the receiver as the parent with childType appended to it. +func (t ResourceType) AppendChild(childType string) ResourceType { + return NewResourceType(t.Namespace, fmt.Sprintf("%s/%s", t.Type, childType)) +} + +// NewResourceType creates an instance of ResourceType using a provider namespace +// such as "Microsoft.Network" and type such as "virtualNetworks/subnets". +func NewResourceType(providerNamespace, typeName string) ResourceType { + return ResourceType{ + Namespace: providerNamespace, + Type: typeName, + Types: splitStringAndOmitEmpty(typeName, "/"), + stringValue: fmt.Sprintf("%s/%s", providerNamespace, typeName), + } +} + +// ParseResourceType parses the ResourceType from a resource type string (e.g. Microsoft.Network/virtualNetworks/subsets) +// or a resource identifier string. +// e.g. /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myRg/providers/Microsoft.Network/virtualNetworks/vnet/subnets/mySubnet) +func ParseResourceType(resourceIDOrType string) (ResourceType, error) { + // split the path into segments + parts := splitStringAndOmitEmpty(resourceIDOrType, "/") + + // There must be at least a namespace and type name + if len(parts) < 1 { + return ResourceType{}, fmt.Errorf("invalid resource ID or type: %s", resourceIDOrType) + } + + // if the type is just subscriptions, it is a built-in type in the Microsoft.Resources namespace + if len(parts) == 1 { + // Simple resource type + return NewResourceType(builtInResourceNamespace, parts[0]), nil + } else if strings.Contains(parts[0], ".") { + // Handle resource types (Microsoft.Compute/virtualMachines, Microsoft.Network/virtualNetworks/subnets) + // it is a full type name + return NewResourceType(parts[0], strings.Join(parts[1:], "/")), nil + } else { + // Check if ResourceID + id, err := ParseResourceID(resourceIDOrType) + if err != nil { + return ResourceType{}, err + } + return NewResourceType(id.ResourceType.Namespace, id.ResourceType.Type), nil + } +} + +func (t ResourceType) lastType() string { + return t.Types[len(t.Types)-1] +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go new file mode 100644 index 000000000..f18caf848 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go @@ -0,0 +1,108 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package policy + +import ( + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// BearerTokenOptions configures the bearer token policy's behavior. +type BearerTokenOptions struct { + // AuxiliaryTenants are additional tenant IDs for authenticating cross-tenant requests. + // The policy will add a token from each of these tenants to every request. The + // authenticating user or service principal must be a guest in these tenants, and the + // policy's credential must support multitenant authentication. + AuxiliaryTenants []string + + // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP. + // By default, authenticated requests to an HTTP endpoint are rejected by the client. + // WARNING: setting this to true will allow sending the authentication key in clear text. Use with caution. + InsecureAllowCredentialWithHTTP bool + + // Scopes contains the list of permission scopes required for the token. + Scopes []string +} + +// RegistrationOptions configures the registration policy's behavior. +// All zero-value fields will be initialized with their default values. +type RegistrationOptions struct { + policy.ClientOptions + + // MaxAttempts is the total number of times to attempt automatic registration + // in the event that an attempt fails. + // The default value is 3. + // Set to a value less than zero to disable the policy. + MaxAttempts int + + // PollingDelay is the amount of time to sleep between polling intervals. + // The default value is 15 seconds. + // A value less than zero means no delay between polling intervals (not recommended). + PollingDelay time.Duration + + // PollingDuration is the amount of time to wait before abandoning polling. + // The default valule is 5 minutes. + // NOTE: Setting this to a small value might cause the policy to prematurely fail. + PollingDuration time.Duration + + // StatusCodes contains the slice of custom HTTP status codes to use instead + // of the default http.StatusConflict. This should only be set if a service + // returns a non-standard HTTP status code when unregistered. + StatusCodes []int +} + +// ClientOptions contains configuration settings for a client's pipeline. +type ClientOptions struct { + policy.ClientOptions + + // AuxiliaryTenants are additional tenant IDs for authenticating cross-tenant requests. + // The client will add a token from each of these tenants to every request. The + // authenticating user or service principal must be a guest in these tenants, and the + // client's credential must support multitenant authentication. + AuxiliaryTenants []string + + // DisableRPRegistration disables the auto-RP registration policy. Defaults to false. + DisableRPRegistration bool +} + +// Clone return a deep copy of the current options. +func (o *ClientOptions) Clone() *ClientOptions { + if o == nil { + return nil + } + copiedOptions := *o + copiedOptions.Cloud.Services = copyMap(copiedOptions.Cloud.Services) + copiedOptions.Logging.AllowedHeaders = copyArray(copiedOptions.Logging.AllowedHeaders) + copiedOptions.Logging.AllowedQueryParams = copyArray(copiedOptions.Logging.AllowedQueryParams) + copiedOptions.Retry.StatusCodes = copyArray(copiedOptions.Retry.StatusCodes) + copiedOptions.PerRetryPolicies = copyArray(copiedOptions.PerRetryPolicies) + copiedOptions.PerCallPolicies = copyArray(copiedOptions.PerCallPolicies) + return &copiedOptions +} + +// copyMap return a new map with all the key value pair in the src map +func copyMap[K comparable, V any](src map[K]V) map[K]V { + if src == nil { + return nil + } + copiedMap := make(map[K]V) + for k, v := range src { + copiedMap[k] = v + } + return copiedMap +} + +// copyMap return a new array with all the elements in the src array +func copyArray[T any](src []T) []T { + if src == nil { + return nil + } + copiedArray := make([]T, len(src)) + copy(copiedArray, src) + return copiedArray +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go new file mode 100644 index 000000000..6a7c916b4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go @@ -0,0 +1,70 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "errors" + "reflect" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + armpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + azpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + azruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +) + +// NewPipeline creates a pipeline from connection options. Policies from ClientOptions are +// placed after policies from PipelineOptions. The telemetry policy, when enabled, will +// use the specified module and version info. +func NewPipeline(module, version string, cred azcore.TokenCredential, plOpts azruntime.PipelineOptions, options *armpolicy.ClientOptions) (azruntime.Pipeline, error) { + if options == nil { + options = &armpolicy.ClientOptions{} + } + conf, err := getConfiguration(&options.ClientOptions) + if err != nil { + return azruntime.Pipeline{}, err + } + authPolicy := NewBearerTokenPolicy(cred, &armpolicy.BearerTokenOptions{ + AuxiliaryTenants: options.AuxiliaryTenants, + InsecureAllowCredentialWithHTTP: options.InsecureAllowCredentialWithHTTP, + Scopes: []string{conf.Audience + "/.default"}, + }) + // we don't want to modify the underlying array in plOpts.PerRetry + perRetry := make([]azpolicy.Policy, len(plOpts.PerRetry), len(plOpts.PerRetry)+1) + copy(perRetry, plOpts.PerRetry) + perRetry = append(perRetry, authPolicy, exported.PolicyFunc(httpTraceNamespacePolicy)) + plOpts.PerRetry = perRetry + if !options.DisableRPRegistration { + regRPOpts := armpolicy.RegistrationOptions{ClientOptions: options.ClientOptions} + regPolicy, err := NewRPRegistrationPolicy(cred, ®RPOpts) + if err != nil { + return azruntime.Pipeline{}, err + } + // we don't want to modify the underlying array in plOpts.PerCall + perCall := make([]azpolicy.Policy, len(plOpts.PerCall), len(plOpts.PerCall)+1) + copy(perCall, plOpts.PerCall) + perCall = append(perCall, regPolicy) + plOpts.PerCall = perCall + } + if plOpts.APIVersion.Name == "" { + plOpts.APIVersion.Name = "api-version" + } + return azruntime.NewPipeline(module, version, plOpts, &options.ClientOptions), nil +} + +func getConfiguration(o *azpolicy.ClientOptions) (cloud.ServiceConfiguration, error) { + c := cloud.AzurePublic + if !reflect.ValueOf(o.Cloud).IsZero() { + c = o.Cloud + } + if conf, ok := c.Services[cloud.ResourceManager]; ok && conf.Endpoint != "" && conf.Audience != "" { + return conf, nil + } else { + return conf, errors.New("provided Cloud field is missing Azure Resource Manager configuration") + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go new file mode 100644 index 000000000..765fbc684 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go @@ -0,0 +1,146 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "encoding/base64" + "fmt" + "net/http" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + armpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + azpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + azruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/internal/temporal" +) + +const headerAuxiliaryAuthorization = "x-ms-authorization-auxiliary" + +// acquiringResourceState holds data for an auxiliary token request +type acquiringResourceState struct { + ctx context.Context + p *BearerTokenPolicy + tenant string +} + +// acquireAuxToken acquires a token from an auxiliary tenant. Only one thread/goroutine at a time ever calls this function. +func acquireAuxToken(state acquiringResourceState) (newResource azcore.AccessToken, newExpiration time.Time, err error) { + tk, err := state.p.cred.GetToken(state.ctx, azpolicy.TokenRequestOptions{ + EnableCAE: true, + Scopes: state.p.scopes, + TenantID: state.tenant, + }) + if err != nil { + return azcore.AccessToken{}, time.Time{}, err + } + return tk, tk.ExpiresOn, nil +} + +// BearerTokenPolicy authorizes requests with bearer tokens acquired from a TokenCredential. +type BearerTokenPolicy struct { + auxResources map[string]*temporal.Resource[azcore.AccessToken, acquiringResourceState] + btp *azruntime.BearerTokenPolicy + cred azcore.TokenCredential + scopes []string +} + +// NewBearerTokenPolicy creates a policy object that authorizes requests with bearer tokens. +// cred: an azcore.TokenCredential implementation such as a credential object from azidentity +// opts: optional settings. Pass nil to accept default values; this is the same as passing a zero-value options. +func NewBearerTokenPolicy(cred azcore.TokenCredential, opts *armpolicy.BearerTokenOptions) *BearerTokenPolicy { + if opts == nil { + opts = &armpolicy.BearerTokenOptions{} + } + p := &BearerTokenPolicy{cred: cred} + p.auxResources = make(map[string]*temporal.Resource[azcore.AccessToken, acquiringResourceState], len(opts.AuxiliaryTenants)) + for _, t := range opts.AuxiliaryTenants { + p.auxResources[t] = temporal.NewResource(acquireAuxToken) + } + p.scopes = make([]string, len(opts.Scopes)) + copy(p.scopes, opts.Scopes) + p.btp = azruntime.NewBearerTokenPolicy(cred, opts.Scopes, &azpolicy.BearerTokenOptions{ + InsecureAllowCredentialWithHTTP: opts.InsecureAllowCredentialWithHTTP, + AuthorizationHandler: azpolicy.AuthorizationHandler{ + OnChallenge: p.onChallenge, + OnRequest: p.onRequest, + }, + }) + return p +} + +func (b *BearerTokenPolicy) onChallenge(req *azpolicy.Request, res *http.Response, authNZ func(azpolicy.TokenRequestOptions) error) error { + challenge := res.Header.Get(shared.HeaderWWWAuthenticate) + claims, err := parseChallenge(challenge) + if err != nil { + // the challenge contains claims we can't parse + return err + } else if claims != "" { + // request a new token having the specified claims, send the request again + return authNZ(azpolicy.TokenRequestOptions{Claims: claims, EnableCAE: true, Scopes: b.scopes}) + } + // auth challenge didn't include claims, so this is a simple authorization failure + return azruntime.NewResponseError(res) +} + +// onRequest authorizes requests with one or more bearer tokens +func (b *BearerTokenPolicy) onRequest(req *azpolicy.Request, authNZ func(azpolicy.TokenRequestOptions) error) error { + // authorize the request with a token for the primary tenant + err := authNZ(azpolicy.TokenRequestOptions{EnableCAE: true, Scopes: b.scopes}) + if err != nil || len(b.auxResources) == 0 { + return err + } + // add tokens for auxiliary tenants + as := acquiringResourceState{ + ctx: req.Raw().Context(), + p: b, + } + auxTokens := make([]string, 0, len(b.auxResources)) + for tenant, er := range b.auxResources { + as.tenant = tenant + auxTk, err := er.Get(as) + if err != nil { + return err + } + auxTokens = append(auxTokens, fmt.Sprintf("%s%s", shared.BearerTokenPrefix, auxTk.Token)) + } + req.Raw().Header.Set(headerAuxiliaryAuthorization, strings.Join(auxTokens, ", ")) + return nil +} + +// Do authorizes a request with a bearer token +func (b *BearerTokenPolicy) Do(req *azpolicy.Request) (*http.Response, error) { + return b.btp.Do(req) +} + +// parseChallenge parses claims from an authentication challenge issued by ARM so a client can request a token +// that will satisfy conditional access policies. It returns a non-nil error when the given value contains +// claims it can't parse. If the value contains no claims, it returns an empty string and a nil error. +func parseChallenge(wwwAuthenticate string) (string, error) { + claims := "" + var err error + for _, param := range strings.Split(wwwAuthenticate, ",") { + if _, after, found := strings.Cut(param, "claims="); found { + if claims != "" { + // The header contains multiple challenges, at least two of which specify claims. The specs allow this + // but it's unclear what a client should do in this case and there's as yet no concrete example of it. + err = fmt.Errorf("found multiple claims challenges in %q", wwwAuthenticate) + break + } + // trim stuff that would get an error from RawURLEncoding; claims may or may not be padded + claims = strings.Trim(after, `\"=`) + // we don't return this error because it's something unhelpful like "illegal base64 data at input byte 42" + if b, decErr := base64.RawURLEncoding.DecodeString(claims); decErr == nil { + claims = string(b) + } else { + err = fmt.Errorf("failed to parse claims from %q", wwwAuthenticate) + break + } + } + } + return claims, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go new file mode 100644 index 000000000..810ac9d9f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go @@ -0,0 +1,322 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource" + armpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + azpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +const ( + // LogRPRegistration entries contain information specific to the automatic registration of an RP. + // Entries of this classification are written IFF the policy needs to take any action. + LogRPRegistration log.Event = "RPRegistration" +) + +// init sets any default values +func setDefaults(r *armpolicy.RegistrationOptions) { + if r.MaxAttempts == 0 { + r.MaxAttempts = 3 + } else if r.MaxAttempts < 0 { + r.MaxAttempts = 0 + } + if r.PollingDelay == 0 { + r.PollingDelay = 15 * time.Second + } else if r.PollingDelay < 0 { + r.PollingDelay = 0 + } + if r.PollingDuration == 0 { + r.PollingDuration = 5 * time.Minute + } + if len(r.StatusCodes) == 0 { + r.StatusCodes = []int{http.StatusConflict} + } +} + +// NewRPRegistrationPolicy creates a policy object configured using the specified options. +// The policy controls whether an unregistered resource provider should automatically be +// registered. See https://aka.ms/rps-not-found for more information. +func NewRPRegistrationPolicy(cred azcore.TokenCredential, o *armpolicy.RegistrationOptions) (azpolicy.Policy, error) { + if o == nil { + o = &armpolicy.RegistrationOptions{} + } + conf, err := getConfiguration(&o.ClientOptions) + if err != nil { + return nil, err + } + authPolicy := NewBearerTokenPolicy(cred, &armpolicy.BearerTokenOptions{Scopes: []string{conf.Audience + "/.default"}}) + p := &rpRegistrationPolicy{ + endpoint: conf.Endpoint, + pipeline: runtime.NewPipeline(shared.Module, shared.Version, runtime.PipelineOptions{PerRetry: []azpolicy.Policy{authPolicy}}, &o.ClientOptions), + options: *o, + } + // init the copy + setDefaults(&p.options) + return p, nil +} + +type rpRegistrationPolicy struct { + endpoint string + pipeline runtime.Pipeline + options armpolicy.RegistrationOptions +} + +func (r *rpRegistrationPolicy) Do(req *azpolicy.Request) (*http.Response, error) { + if r.options.MaxAttempts == 0 { + // policy is disabled + return req.Next() + } + const registeredState = "Registered" + var rp string + var resp *http.Response + for attempts := 0; attempts < r.options.MaxAttempts; attempts++ { + var err error + // make the original request + resp, err = req.Next() + // getting a 409 is the first indication that the RP might need to be registered, check error response + if err != nil || !runtime.HasStatusCode(resp, r.options.StatusCodes...) { + return resp, err + } + var reqErr requestError + if err = runtime.UnmarshalAsJSON(resp, &reqErr); err != nil { + return resp, err + } + if reqErr.ServiceError == nil { + // missing service error info. just return the response + // to the caller so its error unmarshalling will kick in + return resp, err + } + if !isUnregisteredRPCode(reqErr.ServiceError.Code) { + // not a 409 due to unregistered RP. just return the response + // to the caller so its error unmarshalling will kick in + return resp, err + } + res, err := resource.ParseResourceID(req.Raw().URL.Path) + if err != nil { + return resp, err + } + rp = res.ResourceType.Namespace + logRegistrationExit := func(v any) { + log.Writef(LogRPRegistration, "END registration for %s: %v", rp, v) + } + log.Writef(LogRPRegistration, "BEGIN registration for %s", rp) + // create client and make the registration request + // we use the scheme and host from the original request + rpOps := &providersOperations{ + p: r.pipeline, + u: r.endpoint, + subID: res.SubscriptionID, + } + if _, err = rpOps.Register(&shared.ContextWithDeniedValues{Context: req.Raw().Context()}, rp); err != nil { + logRegistrationExit(err) + return resp, err + } + + // RP was registered, however we need to wait for the registration to complete + pollCtx, pollCancel := context.WithTimeout(&shared.ContextWithDeniedValues{Context: req.Raw().Context()}, r.options.PollingDuration) + var lastRegState string + for { + // get the current registration state + getResp, err := rpOps.Get(pollCtx, rp) + if err != nil { + pollCancel() + logRegistrationExit(err) + return resp, err + } + if getResp.Provider.RegistrationState != nil && !strings.EqualFold(*getResp.Provider.RegistrationState, lastRegState) { + // registration state has changed, or was updated for the first time + lastRegState = *getResp.Provider.RegistrationState + log.Writef(LogRPRegistration, "registration state is %s", lastRegState) + } + if strings.EqualFold(lastRegState, registeredState) { + // registration complete + pollCancel() + logRegistrationExit(lastRegState) + break + } + // wait before trying again + select { + case <-time.After(r.options.PollingDelay): + // continue polling + case <-pollCtx.Done(): + pollCancel() + logRegistrationExit(pollCtx.Err()) + return resp, pollCtx.Err() + } + } + // RP was successfully registered, retry the original request + err = req.RewindBody() + if err != nil { + return resp, err + } + } + // if we get here it means we exceeded the number of attempts + return resp, fmt.Errorf("exceeded attempts to register %s", rp) +} + +var unregisteredRPCodes = []string{ + "MissingSubscriptionRegistration", + "MissingRegistrationForResourceProvider", + "Subscription Not Registered", + "SubscriptionNotRegistered", +} + +func isUnregisteredRPCode(errorCode string) bool { + for _, code := range unregisteredRPCodes { + if strings.EqualFold(errorCode, code) { + return true + } + } + return false +} + +// minimal error definitions to simplify detection +type requestError struct { + ServiceError *serviceError `json:"error"` +} + +type serviceError struct { + Code string `json:"code"` +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +// the following code was copied from module armresources, providers.go and models.go +// only the minimum amount of code was copied to get this working and some edits were made. +/////////////////////////////////////////////////////////////////////////////////////////////// + +type providersOperations struct { + p runtime.Pipeline + u string + subID string +} + +// Get - Gets the specified resource provider. +func (client *providersOperations) Get(ctx context.Context, resourceProviderNamespace string) (providerResponse, error) { + req, err := client.getCreateRequest(ctx, resourceProviderNamespace) + if err != nil { + return providerResponse{}, err + } + resp, err := client.p.Do(req) + if err != nil { + return providerResponse{}, err + } + result, err := client.getHandleResponse(resp) + if err != nil { + return providerResponse{}, err + } + return result, nil +} + +// getCreateRequest creates the Get request. +func (client *providersOperations) getCreateRequest(ctx context.Context, resourceProviderNamespace string) (*azpolicy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}" + urlPath = strings.ReplaceAll(urlPath, "{resourceProviderNamespace}", url.PathEscape(resourceProviderNamespace)) + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.u, urlPath)) + if err != nil { + return nil, err + } + query := req.Raw().URL.Query() + query.Set("api-version", "2019-05-01") + req.Raw().URL.RawQuery = query.Encode() + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *providersOperations) getHandleResponse(resp *http.Response) (providerResponse, error) { + if !runtime.HasStatusCode(resp, http.StatusOK) { + return providerResponse{}, exported.NewResponseError(resp) + } + result := providerResponse{RawResponse: resp} + err := runtime.UnmarshalAsJSON(resp, &result.Provider) + if err != nil { + return providerResponse{}, err + } + return result, err +} + +// Register - Registers a subscription with a resource provider. +func (client *providersOperations) Register(ctx context.Context, resourceProviderNamespace string) (providerResponse, error) { + req, err := client.registerCreateRequest(ctx, resourceProviderNamespace) + if err != nil { + return providerResponse{}, err + } + resp, err := client.p.Do(req) + if err != nil { + return providerResponse{}, err + } + result, err := client.registerHandleResponse(resp) + if err != nil { + return providerResponse{}, err + } + return result, nil +} + +// registerCreateRequest creates the Register request. +func (client *providersOperations) registerCreateRequest(ctx context.Context, resourceProviderNamespace string) (*azpolicy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register" + urlPath = strings.ReplaceAll(urlPath, "{resourceProviderNamespace}", url.PathEscape(resourceProviderNamespace)) + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.u, urlPath)) + if err != nil { + return nil, err + } + query := req.Raw().URL.Query() + query.Set("api-version", "2019-05-01") + req.Raw().URL.RawQuery = query.Encode() + return req, nil +} + +// registerHandleResponse handles the Register response. +func (client *providersOperations) registerHandleResponse(resp *http.Response) (providerResponse, error) { + if !runtime.HasStatusCode(resp, http.StatusOK) { + return providerResponse{}, exported.NewResponseError(resp) + } + result := providerResponse{RawResponse: resp} + err := runtime.UnmarshalAsJSON(resp, &result.Provider) + if err != nil { + return providerResponse{}, err + } + return result, err +} + +// ProviderResponse is the response envelope for operations that return a Provider type. +type providerResponse struct { + // Resource provider information. + Provider *provider + + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// Provider - Resource provider information. +type provider struct { + // The provider ID. + ID *string `json:"id,omitempty"` + + // The namespace of the resource provider. + Namespace *string `json:"namespace,omitempty"` + + // The registration policy of the resource provider. + RegistrationPolicy *string `json:"registrationPolicy,omitempty"` + + // The registration state of the resource provider. + RegistrationState *string `json:"registrationState,omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_trace_namespace.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_trace_namespace.go new file mode 100644 index 000000000..6cea18424 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_trace_namespace.go @@ -0,0 +1,30 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" +) + +// httpTraceNamespacePolicy is a policy that adds the az.namespace attribute to the current Span +func httpTraceNamespacePolicy(req *policy.Request) (resp *http.Response, err error) { + rawTracer := req.Raw().Context().Value(shared.CtxWithTracingTracer{}) + if tracer, ok := rawTracer.(tracing.Tracer); ok && tracer.Enabled() { + rt, err := resource.ParseResourceType(req.Raw().URL.Path) + if err == nil { + // add the namespace attribute to the current span + span := tracer.SpanFromContext(req.Raw().Context()) + span.SetAttributes(tracing.Attribute{Key: shared.TracingNamespaceAttrName, Value: rt.Namespace}) + } + } + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/runtime.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/runtime.go new file mode 100644 index 000000000..1400d4379 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/runtime.go @@ -0,0 +1,24 @@ +//go:build go1.16 +// +build go1.16 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + +func init() { + cloud.AzureChina.Services[cloud.ResourceManager] = cloud.ServiceConfiguration{ + Audience: "https://management.core.chinacloudapi.cn", + Endpoint: "https://management.chinacloudapi.cn", + } + cloud.AzureGovernment.Services[cloud.ResourceManager] = cloud.ServiceConfiguration{ + Audience: "https://management.core.usgovcloudapi.net", + Endpoint: "https://management.usgovcloudapi.net", + } + cloud.AzurePublic.Services[cloud.ResourceManager] = cloud.ServiceConfiguration{ + Audience: "https://management.core.windows.net/", + Endpoint: "https://management.azure.com", + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml new file mode 100644 index 000000000..99348527b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml @@ -0,0 +1,29 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azcore/ + - eng/ + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azcore/ + - eng/ + +extends: + template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + ServiceDirectory: azcore diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go new file mode 100644 index 000000000..9d077a3e1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go @@ -0,0 +1,44 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package cloud + +var ( + // AzureChina contains configuration for Azure China. + AzureChina = Configuration{ + ActiveDirectoryAuthorityHost: "https://login.chinacloudapi.cn/", Services: map[ServiceName]ServiceConfiguration{}, + } + // AzureGovernment contains configuration for Azure Government. + AzureGovernment = Configuration{ + ActiveDirectoryAuthorityHost: "https://login.microsoftonline.us/", Services: map[ServiceName]ServiceConfiguration{}, + } + // AzurePublic contains configuration for Azure Public Cloud. + AzurePublic = Configuration{ + ActiveDirectoryAuthorityHost: "https://login.microsoftonline.com/", Services: map[ServiceName]ServiceConfiguration{}, + } +) + +// ServiceName identifies a cloud service. +type ServiceName string + +// ResourceManager is a global constant identifying Azure Resource Manager. +const ResourceManager ServiceName = "resourceManager" + +// ServiceConfiguration configures a specific cloud service such as Azure Resource Manager. +type ServiceConfiguration struct { + // Audience is the audience the client will request for its access tokens. + Audience string + // Endpoint is the service's base URL. + Endpoint string +} + +// Configuration configures a cloud. +type Configuration struct { + // ActiveDirectoryAuthorityHost is the base URL of the cloud's Azure Active Directory. + ActiveDirectoryAuthorityHost string + // Services contains configuration for the cloud's services. + Services map[ServiceName]ServiceConfiguration +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go new file mode 100644 index 000000000..985b1bde2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go @@ -0,0 +1,53 @@ +//go:build go1.16 +// +build go1.16 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +/* +Package cloud implements a configuration API for applications deployed to sovereign or private Azure clouds. + +Azure SDK client configuration defaults are appropriate for Azure Public Cloud (sometimes referred to as +"Azure Commercial" or simply "Microsoft Azure"). This package enables applications deployed to other +Azure Clouds to configure clients appropriately. + +This package contains predefined configuration for well-known sovereign clouds such as Azure Government and +Azure China. Azure SDK clients accept this configuration via the Cloud field of azcore.ClientOptions. For +example, configuring a credential and ARM client for Azure Government: + + opts := azcore.ClientOptions{Cloud: cloud.AzureGovernment} + cred, err := azidentity.NewDefaultAzureCredential( + &azidentity.DefaultAzureCredentialOptions{ClientOptions: opts}, + ) + handle(err) + + client, err := armsubscription.NewClient( + cred, &arm.ClientOptions{ClientOptions: opts}, + ) + handle(err) + +Applications deployed to a private cloud such as Azure Stack create a Configuration object with +appropriate values: + + c := cloud.Configuration{ + ActiveDirectoryAuthorityHost: "https://...", + Services: map[cloud.ServiceName]cloud.ServiceConfiguration{ + cloud.ResourceManager: { + Audience: "...", + Endpoint: "https://...", + }, + }, + } + opts := azcore.ClientOptions{Cloud: c} + + cred, err := azidentity.NewDefaultAzureCredential( + &azidentity.DefaultAzureCredentialOptions{ClientOptions: opts}, + ) + handle(err) + + client, err := armsubscription.NewClient( + cred, &arm.ClientOptions{ClientOptions: opts}, + ) + handle(err) +*/ +package cloud diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go new file mode 100644 index 000000000..9d1c2f0c0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go @@ -0,0 +1,173 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azcore + +import ( + "reflect" + "sync" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" +) + +// AccessToken represents an Azure service bearer access token with expiry information. +type AccessToken = exported.AccessToken + +// TokenCredential represents a credential capable of providing an OAuth token. +type TokenCredential = exported.TokenCredential + +// KeyCredential contains an authentication key used to authenticate to an Azure service. +type KeyCredential = exported.KeyCredential + +// NewKeyCredential creates a new instance of [KeyCredential] with the specified values. +// - key is the authentication key +func NewKeyCredential(key string) *KeyCredential { + return exported.NewKeyCredential(key) +} + +// SASCredential contains a shared access signature used to authenticate to an Azure service. +type SASCredential = exported.SASCredential + +// NewSASCredential creates a new instance of [SASCredential] with the specified values. +// - sas is the shared access signature +func NewSASCredential(sas string) *SASCredential { + return exported.NewSASCredential(sas) +} + +// holds sentinel values used to send nulls +var nullables map[reflect.Type]any = map[reflect.Type]any{} +var nullablesMu sync.RWMutex + +// NullValue is used to send an explicit 'null' within a request. +// This is typically used in JSON-MERGE-PATCH operations to delete a value. +func NullValue[T any]() T { + t := shared.TypeOfT[T]() + + nullablesMu.RLock() + v, found := nullables[t] + nullablesMu.RUnlock() + + if found { + // return the sentinel object + return v.(T) + } + + // promote to exclusive lock and check again (double-checked locking pattern) + nullablesMu.Lock() + defer nullablesMu.Unlock() + v, found = nullables[t] + + if !found { + var o reflect.Value + if k := t.Kind(); k == reflect.Map { + o = reflect.MakeMap(t) + } else if k == reflect.Slice { + // empty slices appear to all point to the same data block + // which causes comparisons to become ambiguous. so we create + // a slice with len/cap of one which ensures a unique address. + o = reflect.MakeSlice(t, 1, 1) + } else { + o = reflect.New(t.Elem()) + } + v = o.Interface() + nullables[t] = v + } + // return the sentinel object + return v.(T) +} + +// IsNullValue returns true if the field contains a null sentinel value. +// This is used by custom marshallers to properly encode a null value. +func IsNullValue[T any](v T) bool { + // see if our map has a sentinel object for this *T + t := reflect.TypeOf(v) + nullablesMu.RLock() + defer nullablesMu.RUnlock() + + if o, found := nullables[t]; found { + o1 := reflect.ValueOf(o) + v1 := reflect.ValueOf(v) + // we found it; return true if v points to the sentinel object. + // NOTE: maps and slices can only be compared to nil, else you get + // a runtime panic. so we compare addresses instead. + return o1.Pointer() == v1.Pointer() + } + // no sentinel object for this *t + return false +} + +// ClientOptions contains optional settings for a client's pipeline. +// Instances can be shared across calls to SDK client constructors when uniform configuration is desired. +// Zero-value fields will have their specified default values applied during use. +type ClientOptions = policy.ClientOptions + +// Client is a basic HTTP client. It consists of a pipeline and tracing provider. +type Client struct { + pl runtime.Pipeline + tr tracing.Tracer + + // cached on the client to support shallow copying with new values + tp tracing.Provider + modVer string + namespace string +} + +// NewClient creates a new Client instance with the provided values. +// - moduleName - the fully qualified name of the module where the client is defined; used by the telemetry policy and tracing provider. +// - moduleVersion - the semantic version of the module; used by the telemetry policy and tracing provider. +// - plOpts - pipeline configuration options; can be the zero-value +// - options - optional client configurations; pass nil to accept the default values +func NewClient(moduleName, moduleVersion string, plOpts runtime.PipelineOptions, options *ClientOptions) (*Client, error) { + if options == nil { + options = &ClientOptions{} + } + + if !options.Telemetry.Disabled { + if err := shared.ValidateModVer(moduleVersion); err != nil { + return nil, err + } + } + + pl := runtime.NewPipeline(moduleName, moduleVersion, plOpts, options) + + tr := options.TracingProvider.NewTracer(moduleName, moduleVersion) + if tr.Enabled() && plOpts.Tracing.Namespace != "" { + tr.SetAttributes(tracing.Attribute{Key: shared.TracingNamespaceAttrName, Value: plOpts.Tracing.Namespace}) + } + + return &Client{ + pl: pl, + tr: tr, + tp: options.TracingProvider, + modVer: moduleVersion, + namespace: plOpts.Tracing.Namespace, + }, nil +} + +// Pipeline returns the pipeline for this client. +func (c *Client) Pipeline() runtime.Pipeline { + return c.pl +} + +// Tracer returns the tracer for this client. +func (c *Client) Tracer() tracing.Tracer { + return c.tr +} + +// WithClientName returns a shallow copy of the Client with its tracing client name changed to clientName. +// Note that the values for module name and version will be preserved from the source Client. +// - clientName - the fully qualified name of the client ("package.Client"); this is used by the tracing provider when creating spans +func (c *Client) WithClientName(clientName string) *Client { + tr := c.tp.NewTracer(clientName, c.modVer) + if tr.Enabled() && c.namespace != "" { + tr.SetAttributes(tracing.Attribute{Key: shared.TracingNamespaceAttrName, Value: c.namespace}) + } + return &Client{pl: c.pl, tr: tr, tp: c.tp, modVer: c.modVer, namespace: c.namespace} +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go new file mode 100644 index 000000000..654a5f404 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go @@ -0,0 +1,264 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +/* +Package azcore implements an HTTP request/response middleware pipeline used by Azure SDK clients. + +The middleware consists of three components. + + - One or more Policy instances. + - A Transporter instance. + - A Pipeline instance that combines the Policy and Transporter instances. + +# Implementing the Policy Interface + +A Policy can be implemented in two ways; as a first-class function for a stateless Policy, or as +a method on a type for a stateful Policy. Note that HTTP requests made via the same pipeline share +the same Policy instances, so if a Policy mutates its state it MUST be properly synchronized to +avoid race conditions. + +A Policy's Do method is called when an HTTP request wants to be sent over the network. The Do method can +perform any operation(s) it desires. For example, it can log the outgoing request, mutate the URL, headers, +and/or query parameters, inject a failure, etc. Once the Policy has successfully completed its request +work, it must call the Next() method on the *policy.Request instance in order to pass the request to the +next Policy in the chain. + +When an HTTP response comes back, the Policy then gets a chance to process the response/error. The Policy instance +can log the response, retry the operation if it failed due to a transient error or timeout, unmarshal the response +body, etc. Once the Policy has successfully completed its response work, it must return the *http.Response +and error instances to its caller. + +Template for implementing a stateless Policy: + + type policyFunc func(*policy.Request) (*http.Response, error) + + // Do implements the Policy interface on policyFunc. + func (pf policyFunc) Do(req *policy.Request) (*http.Response, error) { + return pf(req) + } + + func NewMyStatelessPolicy() policy.Policy { + return policyFunc(func(req *policy.Request) (*http.Response, error) { + // TODO: mutate/process Request here + + // forward Request to next Policy & get Response/error + resp, err := req.Next() + + // TODO: mutate/process Response/error here + + // return Response/error to previous Policy + return resp, err + }) + } + +Template for implementing a stateful Policy: + + type MyStatefulPolicy struct { + // TODO: add configuration/setting fields here + } + + // TODO: add initialization args to NewMyStatefulPolicy() + func NewMyStatefulPolicy() policy.Policy { + return &MyStatefulPolicy{ + // TODO: initialize configuration/setting fields here + } + } + + func (p *MyStatefulPolicy) Do(req *policy.Request) (resp *http.Response, err error) { + // TODO: mutate/process Request here + + // forward Request to next Policy & get Response/error + resp, err := req.Next() + + // TODO: mutate/process Response/error here + + // return Response/error to previous Policy + return resp, err + } + +# Implementing the Transporter Interface + +The Transporter interface is responsible for sending the HTTP request and returning the corresponding +HTTP response or error. The Transporter is invoked by the last Policy in the chain. The default Transporter +implementation uses a shared http.Client from the standard library. + +The same stateful/stateless rules for Policy implementations apply to Transporter implementations. + +# Using Policy and Transporter Instances Via a Pipeline + +To use the Policy and Transporter instances, an application passes them to the runtime.NewPipeline function. + + func NewPipeline(transport Transporter, policies ...Policy) Pipeline + +The specified Policy instances form a chain and are invoked in the order provided to NewPipeline +followed by the Transporter. + +Once the Pipeline has been created, create a runtime.Request instance and pass it to Pipeline's Do method. + + func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) + + func (p Pipeline) Do(req *Request) (*http.Request, error) + +The Pipeline.Do method sends the specified Request through the chain of Policy and Transporter +instances. The response/error is then sent through the same chain of Policy instances in reverse +order. For example, assuming there are Policy types PolicyA, PolicyB, and PolicyC along with +TransportA. + + pipeline := NewPipeline(TransportA, PolicyA, PolicyB, PolicyC) + +The flow of Request and Response looks like the following: + + policy.Request -> PolicyA -> PolicyB -> PolicyC -> TransportA -----+ + | + HTTP(S) endpoint + | + caller <--------- PolicyA <- PolicyB <- PolicyC <- http.Response-+ + +# Creating a Request Instance + +The Request instance passed to Pipeline's Do method is a wrapper around an *http.Request. It also +contains some internal state and provides various convenience methods. You create a Request instance +by calling the runtime.NewRequest function: + + func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) + +If the Request should contain a body, call the SetBody method. + + func (req *Request) SetBody(body ReadSeekCloser, contentType string) error + +A seekable stream is required so that upon retry, the retry Policy instance can seek the stream +back to the beginning before retrying the network request and re-uploading the body. + +# Sending an Explicit Null + +Operations like JSON-MERGE-PATCH send a JSON null to indicate a value should be deleted. + + { + "delete-me": null + } + +This requirement conflicts with the SDK's default marshalling that specifies "omitempty" as +a means to resolve the ambiguity between a field to be excluded and its zero-value. + + type Widget struct { + Name *string `json:",omitempty"` + Count *int `json:",omitempty"` + } + +In the above example, Name and Count are defined as pointer-to-type to disambiguate between +a missing value (nil) and a zero-value (0) which might have semantic differences. + +In a PATCH operation, any fields left as nil are to have their values preserved. When updating +a Widget's count, one simply specifies the new value for Count, leaving Name nil. + +To fulfill the requirement for sending a JSON null, the NullValue() function can be used. + + w := Widget{ + Count: azcore.NullValue[*int](), + } + +This sends an explict "null" for Count, indicating that any current value for Count should be deleted. + +# Processing the Response + +When the HTTP response is received, the *http.Response is returned directly. Each Policy instance +can inspect/mutate the *http.Response. + +# Built-in Logging + +To enable logging, set environment variable AZURE_SDK_GO_LOGGING to "all" before executing your program. + +By default the logger writes to stderr. This can be customized by calling log.SetListener, providing +a callback that writes to the desired location. Any custom logging implementation MUST provide its +own synchronization to handle concurrent invocations. + +See the docs for the log package for further details. + +# Pageable Operations + +Pageable operations return potentially large data sets spread over multiple GET requests. The result of +each GET is a "page" of data consisting of a slice of items. + +Pageable operations can be identified by their New*Pager naming convention and return type of *runtime.Pager[T]. + + func (c *WidgetClient) NewListWidgetsPager(o *Options) *runtime.Pager[PageResponse] + +The call to WidgetClient.NewListWidgetsPager() returns an instance of *runtime.Pager[T] for fetching pages +and determining if there are more pages to fetch. No IO calls are made until the NextPage() method is invoked. + + pager := widgetClient.NewListWidgetsPager(nil) + for pager.More() { + page, err := pager.NextPage(context.TODO()) + // handle err + for _, widget := range page.Values { + // process widget + } + } + +# Long-Running Operations + +Long-running operations (LROs) are operations consisting of an initial request to start the operation followed +by polling to determine when the operation has reached a terminal state. An LRO's terminal state is one +of the following values. + + - Succeeded - the LRO completed successfully + - Failed - the LRO failed to complete + - Canceled - the LRO was canceled + +LROs can be identified by their Begin* prefix and their return type of *runtime.Poller[T]. + + func (c *WidgetClient) BeginCreateOrUpdate(ctx context.Context, w Widget, o *Options) (*runtime.Poller[Response], error) + +When a call to WidgetClient.BeginCreateOrUpdate() returns a nil error, it means that the LRO has started. +It does _not_ mean that the widget has been created or updated (or failed to be created/updated). + +The *runtime.Poller[T] provides APIs for determining the state of the LRO. To wait for the LRO to complete, +call the PollUntilDone() method. + + poller, err := widgetClient.BeginCreateOrUpdate(context.TODO(), Widget{}, nil) + // handle err + result, err := poller.PollUntilDone(context.TODO(), nil) + // handle err + // use result + +The call to PollUntilDone() will block the current goroutine until the LRO has reached a terminal state or the +context is canceled/timed out. + +Note that LROs can take anywhere from several seconds to several minutes. The duration is operation-dependent. Due to +this variant behavior, pollers do _not_ have a preconfigured time-out. Use a context with the appropriate cancellation +mechanism as required. + +# Resume Tokens + +Pollers provide the ability to serialize their state into a "resume token" which can be used by another process to +recreate the poller. This is achieved via the runtime.Poller[T].ResumeToken() method. + + token, err := poller.ResumeToken() + // handle error + +Note that a token can only be obtained for a poller that's in a non-terminal state. Also note that any subsequent calls +to poller.Poll() might change the poller's state. In this case, a new token should be created. + +After the token has been obtained, it can be used to recreate an instance of the originating poller. + + poller, err := widgetClient.BeginCreateOrUpdate(nil, Widget{}, &Options{ + ResumeToken: token, + }) + +When resuming a poller, no IO is performed, and zero-value arguments can be used for everything but the Options.ResumeToken. + +Resume tokens are unique per service client and operation. Attempting to resume a poller for LRO BeginB() with a token from LRO +BeginA() will result in an error. + +# Fakes + +The fake package contains types used for constructing in-memory fake servers used in unit tests. +This allows writing tests to cover various success/error conditions without the need for connecting to a live service. + +Please see https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/samples/fakes for details and examples on how to use fakes. +*/ +package azcore diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go new file mode 100644 index 000000000..17bd50c67 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go @@ -0,0 +1,14 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azcore + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + +// ResponseError is returned when a request is made to a service and +// the service returns a non-success HTTP status code. +// Use errors.As() to access this type in the error chain. +type ResponseError = exported.ResponseError diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go new file mode 100644 index 000000000..2b19d01f7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go @@ -0,0 +1,57 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azcore + +import ( + "strings" +) + +// ETag is a property used for optimistic concurrency during updates +// ETag is a validator based on https://tools.ietf.org/html/rfc7232#section-2.3.2 +// An ETag can be empty (""). +type ETag string + +// ETagAny is an ETag that represents everything, the value is "*" +const ETagAny ETag = "*" + +// Equals does a strong comparison of two ETags. Equals returns true when both +// ETags are not weak and the values of the underlying strings are equal. +func (e ETag) Equals(other ETag) bool { + return !e.IsWeak() && !other.IsWeak() && e == other +} + +// WeakEquals does a weak comparison of two ETags. Two ETags are equivalent if their opaque-tags match +// character-by-character, regardless of either or both being tagged as "weak". +func (e ETag) WeakEquals(other ETag) bool { + getStart := func(e1 ETag) int { + if e1.IsWeak() { + return 2 + } + return 0 + } + aStart := getStart(e) + bStart := getStart(other) + + aVal := e[aStart:] + bVal := other[bStart:] + + return aVal == bVal +} + +// IsWeak specifies whether the ETag is strong or weak. +func (e ETag) IsWeak() bool { + return len(e) >= 4 && strings.HasPrefix(string(e), "W/\"") && strings.HasSuffix(string(e), "\"") +} + +// MatchConditions specifies HTTP options for conditional requests. +type MatchConditions struct { + // Optionally limit requests to resources that have a matching ETag. + IfMatch *ETag + + // Optionally limit requests to resources that do not match the ETag. + IfNoneMatch *ETag +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go new file mode 100644 index 000000000..f2b296b6d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go @@ -0,0 +1,175 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "context" + "encoding/base64" + "fmt" + "io" + "net/http" + "sync/atomic" + "time" +) + +type nopCloser struct { + io.ReadSeeker +} + +func (n nopCloser) Close() error { + return nil +} + +// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. +// Exported as streaming.NopCloser(). +func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { + return nopCloser{rs} +} + +// HasStatusCode returns true if the Response's status code is one of the specified values. +// Exported as runtime.HasStatusCode(). +func HasStatusCode(resp *http.Response, statusCodes ...int) bool { + if resp == nil { + return false + } + for _, sc := range statusCodes { + if resp.StatusCode == sc { + return true + } + } + return false +} + +// AccessToken represents an Azure service bearer access token with expiry information. +// Exported as azcore.AccessToken. +type AccessToken struct { + Token string + ExpiresOn time.Time +} + +// TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token. +// Exported as policy.TokenRequestOptions. +type TokenRequestOptions struct { + // Claims are any additional claims required for the token to satisfy a conditional access policy, such as a + // service may return in a claims challenge following an authorization failure. If a service returned the + // claims value base64 encoded, it must be decoded before setting this field. + Claims string + + // EnableCAE indicates whether to enable Continuous Access Evaluation (CAE) for the requested token. When true, + // azidentity credentials request CAE tokens for resource APIs supporting CAE. Clients are responsible for + // handling CAE challenges. If a client that doesn't handle CAE challenges receives a CAE token, it may end up + // in a loop retrying an API call with a token that has been revoked due to CAE. + EnableCAE bool + + // Scopes contains the list of permission scopes required for the token. + Scopes []string + + // TenantID identifies the tenant from which to request the token. azidentity credentials authenticate in + // their configured default tenants when this field isn't set. + TenantID string +} + +// TokenCredential represents a credential capable of providing an OAuth token. +// Exported as azcore.TokenCredential. +type TokenCredential interface { + // GetToken requests an access token for the specified set of scopes. + GetToken(ctx context.Context, options TokenRequestOptions) (AccessToken, error) +} + +// DecodeByteArray will base-64 decode the provided string into v. +// Exported as runtime.DecodeByteArray() +func DecodeByteArray(s string, v *[]byte, format Base64Encoding) error { + if len(s) == 0 { + return nil + } + payload := string(s) + if payload[0] == '"' { + // remove surrounding quotes + payload = payload[1 : len(payload)-1] + } + switch format { + case Base64StdFormat: + decoded, err := base64.StdEncoding.DecodeString(payload) + if err == nil { + *v = decoded + return nil + } + return err + case Base64URLFormat: + // use raw encoding as URL format should not contain any '=' characters + decoded, err := base64.RawURLEncoding.DecodeString(payload) + if err == nil { + *v = decoded + return nil + } + return err + default: + return fmt.Errorf("unrecognized byte array format: %d", format) + } +} + +// KeyCredential contains an authentication key used to authenticate to an Azure service. +// Exported as azcore.KeyCredential. +type KeyCredential struct { + cred *keyCredential +} + +// NewKeyCredential creates a new instance of [KeyCredential] with the specified values. +// - key is the authentication key +func NewKeyCredential(key string) *KeyCredential { + return &KeyCredential{cred: newKeyCredential(key)} +} + +// Update replaces the existing key with the specified value. +func (k *KeyCredential) Update(key string) { + k.cred.Update(key) +} + +// SASCredential contains a shared access signature used to authenticate to an Azure service. +// Exported as azcore.SASCredential. +type SASCredential struct { + cred *keyCredential +} + +// NewSASCredential creates a new instance of [SASCredential] with the specified values. +// - sas is the shared access signature +func NewSASCredential(sas string) *SASCredential { + return &SASCredential{cred: newKeyCredential(sas)} +} + +// Update replaces the existing shared access signature with the specified value. +func (k *SASCredential) Update(sas string) { + k.cred.Update(sas) +} + +// KeyCredentialGet returns the key for cred. +func KeyCredentialGet(cred *KeyCredential) string { + return cred.cred.Get() +} + +// SASCredentialGet returns the shared access sig for cred. +func SASCredentialGet(cred *SASCredential) string { + return cred.cred.Get() +} + +type keyCredential struct { + key atomic.Value // string +} + +func newKeyCredential(key string) *keyCredential { + keyCred := keyCredential{} + keyCred.key.Store(key) + return &keyCred +} + +func (k *keyCredential) Get() string { + return k.key.Load().(string) +} + +func (k *keyCredential) Update(key string) { + k.key.Store(key) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go new file mode 100644 index 000000000..e45f831ed --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go @@ -0,0 +1,77 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "errors" + "net/http" +) + +// Policy represents an extensibility point for the Pipeline that can mutate the specified +// Request and react to the received Response. +// Exported as policy.Policy. +type Policy interface { + // Do applies the policy to the specified Request. When implementing a Policy, mutate the + // request before calling req.Next() to move on to the next policy, and respond to the result + // before returning to the caller. + Do(req *Request) (*http.Response, error) +} + +// Pipeline represents a primitive for sending HTTP requests and receiving responses. +// Its behavior can be extended by specifying policies during construction. +// Exported as runtime.Pipeline. +type Pipeline struct { + policies []Policy +} + +// Transporter represents an HTTP pipeline transport used to send HTTP requests and receive responses. +// Exported as policy.Transporter. +type Transporter interface { + // Do sends the HTTP request and returns the HTTP response or error. + Do(req *http.Request) (*http.Response, error) +} + +// used to adapt a TransportPolicy to a Policy +type transportPolicy struct { + trans Transporter +} + +func (tp transportPolicy) Do(req *Request) (*http.Response, error) { + if tp.trans == nil { + return nil, errors.New("missing transporter") + } + resp, err := tp.trans.Do(req.Raw()) + if err != nil { + return nil, err + } else if resp == nil { + // there was no response and no error (rare but can happen) + // this ensures the retry policy will retry the request + return nil, errors.New("received nil response") + } + return resp, nil +} + +// NewPipeline creates a new Pipeline object from the specified Policies. +// Not directly exported, but used as part of runtime.NewPipeline(). +func NewPipeline(transport Transporter, policies ...Policy) Pipeline { + // transport policy must always be the last in the slice + policies = append(policies, transportPolicy{trans: transport}) + return Pipeline{ + policies: policies, + } +} + +// Do is called for each and every HTTP request. It passes the request through all +// the Policy objects (which can transform the Request's URL/query parameters/headers) +// and ultimately sends the transformed HTTP request over the network. +func (p Pipeline) Do(req *Request) (*http.Response, error) { + if req == nil { + return nil, errors.New("request cannot be nil") + } + req.policies = p.policies + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go new file mode 100644 index 000000000..e3e2d4e58 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go @@ -0,0 +1,260 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "net/http" + "reflect" + "strconv" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" +) + +// Base64Encoding is usesd to specify which base-64 encoder/decoder to use when +// encoding/decoding a slice of bytes to/from a string. +// Exported as runtime.Base64Encoding +type Base64Encoding int + +const ( + // Base64StdFormat uses base64.StdEncoding for encoding and decoding payloads. + Base64StdFormat Base64Encoding = 0 + + // Base64URLFormat uses base64.RawURLEncoding for encoding and decoding payloads. + Base64URLFormat Base64Encoding = 1 +) + +// EncodeByteArray will base-64 encode the byte slice v. +// Exported as runtime.EncodeByteArray() +func EncodeByteArray(v []byte, format Base64Encoding) string { + if format == Base64URLFormat { + return base64.RawURLEncoding.EncodeToString(v) + } + return base64.StdEncoding.EncodeToString(v) +} + +// Request is an abstraction over the creation of an HTTP request as it passes through the pipeline. +// Don't use this type directly, use NewRequest() instead. +// Exported as policy.Request. +type Request struct { + req *http.Request + body io.ReadSeekCloser + policies []Policy + values opValues +} + +type opValues map[reflect.Type]any + +// Set adds/changes a value +func (ov opValues) set(value any) { + ov[reflect.TypeOf(value)] = value +} + +// Get looks for a value set by SetValue first +func (ov opValues) get(value any) bool { + v, ok := ov[reflect.ValueOf(value).Elem().Type()] + if ok { + reflect.ValueOf(value).Elem().Set(reflect.ValueOf(v)) + } + return ok +} + +// NewRequestFromRequest creates a new policy.Request with an existing *http.Request +// Exported as runtime.NewRequestFromRequest(). +func NewRequestFromRequest(req *http.Request) (*Request, error) { + policyReq := &Request{req: req} + + if req.Body != nil { + // we can avoid a body copy here if the underlying stream is already a + // ReadSeekCloser. + readSeekCloser, isReadSeekCloser := req.Body.(io.ReadSeekCloser) + + if !isReadSeekCloser { + // since this is an already populated http.Request we want to copy + // over its body, if it has one. + bodyBytes, err := io.ReadAll(req.Body) + + if err != nil { + return nil, err + } + + if err := req.Body.Close(); err != nil { + return nil, err + } + + readSeekCloser = NopCloser(bytes.NewReader(bodyBytes)) + } + + // SetBody also takes care of updating the http.Request's body + // as well, so they should stay in-sync from this point. + if err := policyReq.SetBody(readSeekCloser, req.Header.Get("Content-Type")); err != nil { + return nil, err + } + } + + return policyReq, nil +} + +// NewRequest creates a new Request with the specified input. +// Exported as runtime.NewRequest(). +func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) { + req, err := http.NewRequestWithContext(ctx, httpMethod, endpoint, nil) + if err != nil { + return nil, err + } + if req.URL.Host == "" { + return nil, errors.New("no Host in request URL") + } + if !(req.URL.Scheme == "http" || req.URL.Scheme == "https") { + return nil, fmt.Errorf("unsupported protocol scheme %s", req.URL.Scheme) + } + return &Request{req: req}, nil +} + +// Body returns the original body specified when the Request was created. +func (req *Request) Body() io.ReadSeekCloser { + return req.body +} + +// Raw returns the underlying HTTP request. +func (req *Request) Raw() *http.Request { + return req.req +} + +// Next calls the next policy in the pipeline. +// If there are no more policies, nil and an error are returned. +// This method is intended to be called from pipeline policies. +// To send a request through a pipeline call Pipeline.Do(). +func (req *Request) Next() (*http.Response, error) { + if len(req.policies) == 0 { + return nil, errors.New("no more policies") + } + nextPolicy := req.policies[0] + nextReq := *req + nextReq.policies = nextReq.policies[1:] + return nextPolicy.Do(&nextReq) +} + +// SetOperationValue adds/changes a mutable key/value associated with a single operation. +func (req *Request) SetOperationValue(value any) { + if req.values == nil { + req.values = opValues{} + } + req.values.set(value) +} + +// OperationValue looks for a value set by SetOperationValue(). +func (req *Request) OperationValue(value any) bool { + if req.values == nil { + return false + } + return req.values.get(value) +} + +// SetBody sets the specified ReadSeekCloser as the HTTP request body, and sets Content-Type and Content-Length +// accordingly. If the ReadSeekCloser is nil or empty, Content-Length won't be set. If contentType is "", +// Content-Type won't be set, and if it was set, will be deleted. +// Use streaming.NopCloser to turn an io.ReadSeeker into an io.ReadSeekCloser. +func (req *Request) SetBody(body io.ReadSeekCloser, contentType string) error { + // clobber the existing Content-Type to preserve behavior + return SetBody(req, body, contentType, true) +} + +// RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation. +func (req *Request) RewindBody() error { + if req.body != nil { + // Reset the stream back to the beginning and restore the body + _, err := req.body.Seek(0, io.SeekStart) + req.req.Body = req.body + return err + } + return nil +} + +// Close closes the request body. +func (req *Request) Close() error { + if req.body == nil { + return nil + } + return req.body.Close() +} + +// Clone returns a deep copy of the request with its context changed to ctx. +func (req *Request) Clone(ctx context.Context) *Request { + r2 := *req + r2.req = req.req.Clone(ctx) + return &r2 +} + +// WithContext returns a shallow copy of the request with its context changed to ctx. +func (req *Request) WithContext(ctx context.Context) *Request { + r2 := new(Request) + *r2 = *req + r2.req = r2.req.WithContext(ctx) + return r2 +} + +// not exported but dependent on Request + +// PolicyFunc is a type that implements the Policy interface. +// Use this type when implementing a stateless policy as a first-class function. +type PolicyFunc func(*Request) (*http.Response, error) + +// Do implements the Policy interface on policyFunc. +func (pf PolicyFunc) Do(req *Request) (*http.Response, error) { + return pf(req) +} + +// SetBody sets the specified ReadSeekCloser as the HTTP request body, and sets Content-Type and Content-Length accordingly. +// - req is the request to modify +// - body is the request body; if nil or empty, Content-Length won't be set +// - contentType is the value for the Content-Type header; if empty, Content-Type will be deleted +// - clobberContentType when true, will overwrite the existing value of Content-Type with contentType +func SetBody(req *Request, body io.ReadSeekCloser, contentType string, clobberContentType bool) error { + var err error + var size int64 + if body != nil { + size, err = body.Seek(0, io.SeekEnd) // Seek to the end to get the stream's size + if err != nil { + return err + } + } + if size == 0 { + // treat an empty stream the same as a nil one: assign req a nil body + body = nil + // RFC 9110 specifies a client shouldn't set Content-Length on a request containing no content + // (Del is a no-op when the header has no value) + req.req.Header.Del(shared.HeaderContentLength) + } else { + _, err = body.Seek(0, io.SeekStart) + if err != nil { + return err + } + req.req.Header.Set(shared.HeaderContentLength, strconv.FormatInt(size, 10)) + req.Raw().GetBody = func() (io.ReadCloser, error) { + _, err := body.Seek(0, io.SeekStart) // Seek back to the beginning of the stream + return body, err + } + } + // keep a copy of the body argument. this is to handle cases + // where req.Body is replaced, e.g. httputil.DumpRequest and friends. + req.body = body + req.req.Body = body + req.req.ContentLength = size + if contentType == "" { + // Del is a no-op when the header has no value + req.req.Header.Del(shared.HeaderContentType) + } else if req.req.Header.Get(shared.HeaderContentType) == "" || clobberContentType { + req.req.Header.Set(shared.HeaderContentType, contentType) + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go new file mode 100644 index 000000000..08a954587 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go @@ -0,0 +1,167 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "regexp" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" +) + +// NewResponseError creates a new *ResponseError from the provided HTTP response. +// Exported as runtime.NewResponseError(). +func NewResponseError(resp *http.Response) error { + // prefer the error code in the response header + if ec := resp.Header.Get(shared.HeaderXMSErrorCode); ec != "" { + return NewResponseErrorWithErrorCode(resp, ec) + } + + // if we didn't get x-ms-error-code, check in the response body + body, err := exported.Payload(resp, nil) + if err != nil { + // since we're not returning the ResponseError in this + // case we also don't want to write it to the log. + return err + } + + var errorCode string + if len(body) > 0 { + if fromJSON := extractErrorCodeJSON(body); fromJSON != "" { + errorCode = fromJSON + } else if fromXML := extractErrorCodeXML(body); fromXML != "" { + errorCode = fromXML + } + } + + return NewResponseErrorWithErrorCode(resp, errorCode) +} + +// NewResponseErrorWithErrorCode creates an *azcore.ResponseError from the provided HTTP response and errorCode. +// Exported as runtime.NewResponseErrorWithErrorCode(). +func NewResponseErrorWithErrorCode(resp *http.Response, errorCode string) error { + respErr := &ResponseError{ + ErrorCode: errorCode, + StatusCode: resp.StatusCode, + RawResponse: resp, + } + log.Write(log.EventResponseError, respErr.Error()) + return respErr +} + +func extractErrorCodeJSON(body []byte) string { + var rawObj map[string]any + if err := json.Unmarshal(body, &rawObj); err != nil { + // not a JSON object + return "" + } + + // check if this is a wrapped error, i.e. { "error": { ... } } + // if so then unwrap it + if wrapped, ok := rawObj["error"]; ok { + unwrapped, ok := wrapped.(map[string]any) + if !ok { + return "" + } + rawObj = unwrapped + } else if wrapped, ok := rawObj["odata.error"]; ok { + // check if this a wrapped odata error, i.e. { "odata.error": { ... } } + unwrapped, ok := wrapped.(map[string]any) + if !ok { + return "" + } + rawObj = unwrapped + } + + // now check for the error code + code, ok := rawObj["code"] + if !ok { + return "" + } + codeStr, ok := code.(string) + if !ok { + return "" + } + return codeStr +} + +func extractErrorCodeXML(body []byte) string { + // regular expression is much easier than dealing with the XML parser + rx := regexp.MustCompile(`<(?:\w+:)?[c|C]ode>\s*(\w+)\s*<\/(?:\w+:)?[c|C]ode>`) + res := rx.FindStringSubmatch(string(body)) + if len(res) != 2 { + return "" + } + // first submatch is the entire thing, second one is the captured error code + return res[1] +} + +// ResponseError is returned when a request is made to a service and +// the service returns a non-success HTTP status code. +// Use errors.As() to access this type in the error chain. +// Exported as azcore.ResponseError. +type ResponseError struct { + // ErrorCode is the error code returned by the resource provider if available. + ErrorCode string + + // StatusCode is the HTTP status code as defined in https://pkg.go.dev/net/http#pkg-constants. + StatusCode int + + // RawResponse is the underlying HTTP response. + RawResponse *http.Response +} + +// Error implements the error interface for type ResponseError. +// Note that the message contents are not contractual and can change over time. +func (e *ResponseError) Error() string { + const separator = "--------------------------------------------------------------------------------" + // write the request method and URL with response status code + msg := &bytes.Buffer{} + if e.RawResponse != nil { + if e.RawResponse.Request != nil { + fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) + } else { + fmt.Fprintln(msg, "Request information not available") + } + fmt.Fprintln(msg, separator) + fmt.Fprintf(msg, "RESPONSE %d: %s\n", e.RawResponse.StatusCode, e.RawResponse.Status) + } else { + fmt.Fprintln(msg, "Missing RawResponse") + fmt.Fprintln(msg, separator) + } + if e.ErrorCode != "" { + fmt.Fprintf(msg, "ERROR CODE: %s\n", e.ErrorCode) + } else { + fmt.Fprintln(msg, "ERROR CODE UNAVAILABLE") + } + if e.RawResponse != nil { + fmt.Fprintln(msg, separator) + body, err := exported.Payload(e.RawResponse, nil) + if err != nil { + // this really shouldn't fail at this point as the response + // body is already cached (it was read in NewResponseError) + fmt.Fprintf(msg, "Error reading response body: %v", err) + } else if len(body) > 0 { + if err := json.Indent(msg, body, "", " "); err != nil { + // failed to pretty-print so just dump it verbatim + fmt.Fprint(msg, string(body)) + } + // the standard library doesn't have a pretty-printer for XML + fmt.Fprintln(msg) + } else { + fmt.Fprintln(msg, "Response contained no body") + } + } + fmt.Fprintln(msg, separator) + + return msg.String() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go new file mode 100644 index 000000000..6fc6d1400 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go @@ -0,0 +1,50 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// This is an internal helper package to combine the complete logging APIs. +package log + +import ( + azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +type Event = log.Event + +const ( + EventRequest = azlog.EventRequest + EventResponse = azlog.EventResponse + EventResponseError = azlog.EventResponseError + EventRetryPolicy = azlog.EventRetryPolicy + EventLRO = azlog.EventLRO +) + +// Write invokes the underlying listener with the specified event and message. +// If the event shouldn't be logged or there is no listener then Write does nothing. +func Write(cls log.Event, msg string) { + log.Write(cls, msg) +} + +// Writef invokes the underlying listener with the specified event and formatted message. +// If the event shouldn't be logged or there is no listener then Writef does nothing. +func Writef(cls log.Event, format string, a ...any) { + log.Writef(cls, format, a...) +} + +// SetListener will set the Logger to write to the specified listener. +func SetListener(lst func(Event, string)) { + log.SetListener(lst) +} + +// Should returns true if the specified log event should be written to the log. +// By default all log events will be logged. Call SetEvents() to limit +// the log events for logging. +// If no listener has been set this will return false. +// Calling this method is useful when the message to log is computationally expensive +// and you want to avoid the overhead if its log event is not enabled. +func Should(cls log.Event) bool { + return log.Should(cls) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go new file mode 100644 index 000000000..a53462760 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go @@ -0,0 +1,159 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package async + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/async-api-reference.md + +// Applicable returns true if the LRO is using Azure-AsyncOperation. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderAzureAsync) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]any) bool { + _, ok := token["asyncURL"] + return ok +} + +// Poller is an LRO poller that uses the Azure-AsyncOperation pattern. +type Poller[T any] struct { + pl exported.Pipeline + + resp *http.Response + + // The URL from Azure-AsyncOperation header. + AsyncURL string `json:"asyncURL"` + + // The URL from Location header. + LocURL string `json:"locURL"` + + // The URL from the initial LRO request. + OrigURL string `json:"origURL"` + + // The HTTP method from the initial LRO request. + Method string `json:"method"` + + // The value of final-state-via from swagger, can be the empty string. + FinalState pollers.FinalStateVia `json:"finalState"` + + // The LRO's current state. + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response and final-state type. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Azure-AsyncOperation poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Azure-AsyncOperation poller.") + asyncURL := resp.Header.Get(shared.HeaderAzureAsync) + if asyncURL == "" { + return nil, errors.New("response is missing Azure-AsyncOperation header") + } + if !poller.IsValidURL(asyncURL) { + return nil, fmt.Errorf("invalid polling URL %s", asyncURL) + } + // check for provisioning state. if the operation is a RELO + // and terminates synchronously this will prevent extra polling. + // it's ok if there's no provisioning state. + state, _ := poller.GetProvisioningState(resp) + if state == "" { + state = poller.StatusInProgress + } + p := &Poller[T]{ + pl: pl, + resp: resp, + AsyncURL: asyncURL, + LocURL: resp.Header.Get(shared.HeaderLocation), + OrigURL: resp.Request.URL.String(), + Method: resp.Request.Method, + FinalState: finalState, + CurState: state, + } + return p, nil +} + +// Done returns true if the LRO is in a terminal state. +func (p *Poller[T]) Done() bool { + return poller.IsTerminalState(p.CurState) +} + +// Poll retrieves the current state of the LRO. +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.AsyncURL, p.pl, func(resp *http.Response) (string, error) { + if !poller.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } + state, err := poller.GetStatus(resp) + if err != nil { + return "", err + } else if state == "" { + return "", errors.New("the response did not contain a status") + } + p.resp = resp + p.CurState = state + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + if p.resp.StatusCode == http.StatusNoContent { + return nil + } else if poller.Failed(p.CurState) { + return exported.NewResponseError(p.resp) + } + var req *exported.Request + var err error + if p.Method == http.MethodPatch || p.Method == http.MethodPut { + // for PATCH and PUT, the final GET is on the original resource URL + req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL) + } else if p.Method == http.MethodPost { + if p.FinalState == pollers.FinalStateViaAzureAsyncOp { + // no final GET required + } else if p.FinalState == pollers.FinalStateViaOriginalURI { + req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL) + } else if p.LocURL != "" { + // ideally FinalState would be set to "location" but it isn't always. + // must check last due to more permissive condition. + req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) + } + } + if err != nil { + return err + } + + // if a final GET request has been created, execute it + if req != nil { + resp, err := p.pl.Do(req) + if err != nil { + return err + } + p.resp = resp + } + + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), "", out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go new file mode 100644 index 000000000..8751b0514 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go @@ -0,0 +1,135 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package body + +import ( + "context" + "errors" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// Kind is the identifier of this type in a resume token. +const kind = "body" + +// Applicable returns true if the LRO is using no headers, just provisioning state. +// This is only applicable to PATCH and PUT methods and assumes no polling headers. +func Applicable(resp *http.Response) bool { + // we can't check for absense of headers due to some misbehaving services + // like redis that return a Location header but don't actually use that protocol + return resp.Request.Method == http.MethodPatch || resp.Request.Method == http.MethodPut +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]any) bool { + t, ok := token["type"] + if !ok { + return false + } + tt, ok := t.(string) + if !ok { + return false + } + return tt == kind +} + +// Poller is an LRO poller that uses the Body pattern. +type Poller[T any] struct { + pl exported.Pipeline + + resp *http.Response + + // The poller's type, used for resume token processing. + Type string `json:"type"` + + // The URL for polling. + PollURL string `json:"pollURL"` + + // The LRO's current state. + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Body poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Body poller.") + p := &Poller[T]{ + pl: pl, + resp: resp, + Type: kind, + PollURL: resp.Request.URL.String(), + } + // default initial state to InProgress. depending on the HTTP + // status code and provisioning state, we might change the value. + curState := poller.StatusInProgress + provState, err := poller.GetProvisioningState(resp) + if err != nil && !errors.Is(err, poller.ErrNoBody) { + return nil, err + } + if resp.StatusCode == http.StatusCreated && provState != "" { + // absense of provisioning state is ok for a 201, means the operation is in progress + curState = provState + } else if resp.StatusCode == http.StatusOK { + if provState != "" { + curState = provState + } else if provState == "" { + // for a 200, absense of provisioning state indicates success + curState = poller.StatusSucceeded + } + } else if resp.StatusCode == http.StatusNoContent { + curState = poller.StatusSucceeded + } + p.CurState = curState + return p, nil +} + +func (p *Poller[T]) Done() bool { + return poller.IsTerminalState(p.CurState) +} + +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) { + if !poller.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } + if resp.StatusCode == http.StatusNoContent { + p.resp = resp + p.CurState = poller.StatusSucceeded + return p.CurState, nil + } + state, err := poller.GetProvisioningState(resp) + if errors.Is(err, poller.ErrNoBody) { + // a missing response body in non-204 case is an error + return "", err + } else if state == "" { + // a response body without provisioning state is considered terminal success + state = poller.StatusSucceeded + } else if err != nil { + return "", err + } + p.resp = resp + p.CurState = state + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), "", out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go new file mode 100644 index 000000000..7f8d11b8b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go @@ -0,0 +1,133 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package fake + +import ( + "context" + "errors" + "fmt" + "net/http" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// Applicable returns true if the LRO is a fake. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderFakePollerStatus) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]any) bool { + _, ok := token["fakeURL"] + return ok +} + +// Poller is an LRO poller that uses the Core-Fake-Poller pattern. +type Poller[T any] struct { + pl exported.Pipeline + + resp *http.Response + + // The API name from CtxAPINameKey + APIName string `json:"apiName"` + + // The URL from Core-Fake-Poller header. + FakeURL string `json:"fakeURL"` + + // The LRO's current state. + FakeStatus string `json:"status"` +} + +// lroStatusURLSuffix is the URL path suffix for a faked LRO. +const lroStatusURLSuffix = "/get/fake/status" + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Core-Fake-Poller poller.") + return &Poller[T]{pl: pl}, nil + } + + log.Write(log.EventLRO, "Using Core-Fake-Poller poller.") + fakeStatus := resp.Header.Get(shared.HeaderFakePollerStatus) + if fakeStatus == "" { + return nil, errors.New("response is missing Fake-Poller-Status header") + } + + ctxVal := resp.Request.Context().Value(shared.CtxAPINameKey{}) + if ctxVal == nil { + return nil, errors.New("missing value for CtxAPINameKey") + } + + apiName, ok := ctxVal.(string) + if !ok { + return nil, fmt.Errorf("expected string for CtxAPINameKey, the type was %T", ctxVal) + } + + qp := "" + if resp.Request.URL.RawQuery != "" { + qp = "?" + resp.Request.URL.RawQuery + } + + p := &Poller[T]{ + pl: pl, + resp: resp, + APIName: apiName, + // NOTE: any changes to this path format MUST be reflected in SanitizePollerPath() + FakeURL: fmt.Sprintf("%s://%s%s%s%s", resp.Request.URL.Scheme, resp.Request.URL.Host, resp.Request.URL.Path, lroStatusURLSuffix, qp), + FakeStatus: fakeStatus, + } + return p, nil +} + +// Done returns true if the LRO is in a terminal state. +func (p *Poller[T]) Done() bool { + return poller.IsTerminalState(p.FakeStatus) +} + +// Poll retrieves the current state of the LRO. +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + ctx = context.WithValue(ctx, shared.CtxAPINameKey{}, p.APIName) + err := pollers.PollHelper(ctx, p.FakeURL, p.pl, func(resp *http.Response) (string, error) { + if !poller.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } + fakeStatus := resp.Header.Get(shared.HeaderFakePollerStatus) + if fakeStatus == "" { + return "", errors.New("response is missing Fake-Poller-Status header") + } + p.resp = resp + p.FakeStatus = fakeStatus + return p.FakeStatus, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + if p.resp.StatusCode == http.StatusNoContent { + return nil + } else if poller.Failed(p.FakeStatus) { + return exported.NewResponseError(p.resp) + } + + return pollers.ResultHelper(p.resp, poller.Failed(p.FakeStatus), "", out) +} + +// SanitizePollerPath removes any fake-appended suffix from a URL's path. +func SanitizePollerPath(path string) string { + return strings.TrimSuffix(path, lroStatusURLSuffix) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go new file mode 100644 index 000000000..048285275 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go @@ -0,0 +1,123 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package loc + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// Kind is the identifier of this type in a resume token. +const kind = "loc" + +// Applicable returns true if the LRO is using Location. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderLocation) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]any) bool { + t, ok := token["type"] + if !ok { + return false + } + tt, ok := t.(string) + if !ok { + return false + } + return tt == kind +} + +// Poller is an LRO poller that uses the Location pattern. +type Poller[T any] struct { + pl exported.Pipeline + resp *http.Response + + Type string `json:"type"` + PollURL string `json:"pollURL"` + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Location poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Location poller.") + locURL := resp.Header.Get(shared.HeaderLocation) + if locURL == "" { + return nil, errors.New("response is missing Location header") + } + if !poller.IsValidURL(locURL) { + return nil, fmt.Errorf("invalid polling URL %s", locURL) + } + // check for provisioning state. if the operation is a RELO + // and terminates synchronously this will prevent extra polling. + // it's ok if there's no provisioning state. + state, _ := poller.GetProvisioningState(resp) + if state == "" { + state = poller.StatusInProgress + } + return &Poller[T]{ + pl: pl, + resp: resp, + Type: kind, + PollURL: locURL, + CurState: state, + }, nil +} + +func (p *Poller[T]) Done() bool { + return poller.IsTerminalState(p.CurState) +} + +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) { + // location polling can return an updated polling URL + if h := resp.Header.Get(shared.HeaderLocation); h != "" { + p.PollURL = h + } + // if provisioning state is available, use that. this is only + // for some ARM LRO scenarios (e.g. DELETE with a Location header) + // so if it's missing then use HTTP status code. + provState, _ := poller.GetProvisioningState(resp) + p.resp = resp + if provState != "" { + p.CurState = provState + } else if resp.StatusCode == http.StatusAccepted { + p.CurState = poller.StatusInProgress + } else if resp.StatusCode > 199 && resp.StatusCode < 300 { + // any 2xx other than a 202 indicates success + p.CurState = poller.StatusSucceeded + } else if pollers.IsNonTerminalHTTPStatusCode(resp) { + // the request timed out or is being throttled. + // DO NOT include this as a terminal failure. preserve + // the existing state and return the response. + } else { + p.CurState = poller.StatusFailed + } + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), "", out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go new file mode 100644 index 000000000..03699fd76 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go @@ -0,0 +1,150 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package op + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// Applicable returns true if the LRO is using Operation-Location. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderOperationLocation) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]any) bool { + _, ok := token["oplocURL"] + return ok +} + +// Poller is an LRO poller that uses the Operation-Location pattern. +type Poller[T any] struct { + pl exported.Pipeline + resp *http.Response + + OpLocURL string `json:"oplocURL"` + LocURL string `json:"locURL"` + OrigURL string `json:"origURL"` + Method string `json:"method"` + FinalState pollers.FinalStateVia `json:"finalState"` + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Operation-Location poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Operation-Location poller.") + opURL := resp.Header.Get(shared.HeaderOperationLocation) + if opURL == "" { + return nil, errors.New("response is missing Operation-Location header") + } + if !poller.IsValidURL(opURL) { + return nil, fmt.Errorf("invalid Operation-Location URL %s", opURL) + } + locURL := resp.Header.Get(shared.HeaderLocation) + // Location header is optional + if locURL != "" && !poller.IsValidURL(locURL) { + return nil, fmt.Errorf("invalid Location URL %s", locURL) + } + // default initial state to InProgress. if the + // service sent us a status then use that instead. + curState := poller.StatusInProgress + status, err := poller.GetStatus(resp) + if err != nil && !errors.Is(err, poller.ErrNoBody) { + return nil, err + } + if status != "" { + curState = status + } + + return &Poller[T]{ + pl: pl, + resp: resp, + OpLocURL: opURL, + LocURL: locURL, + OrigURL: resp.Request.URL.String(), + Method: resp.Request.Method, + FinalState: finalState, + CurState: curState, + }, nil +} + +func (p *Poller[T]) Done() bool { + return poller.IsTerminalState(p.CurState) +} + +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.OpLocURL, p.pl, func(resp *http.Response) (string, error) { + if !poller.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } + state, err := poller.GetStatus(resp) + if err != nil { + return "", err + } else if state == "" { + return "", errors.New("the response did not contain a status") + } + p.resp = resp + p.CurState = state + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + var req *exported.Request + var err error + + // when the payload is included with the status monitor on + // terminal success it's in the "result" JSON property + payloadPath := "result" + + if p.FinalState == pollers.FinalStateViaLocation && p.LocURL != "" { + req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) + } else if rl, rlErr := poller.GetResourceLocation(p.resp); rlErr != nil && !errors.Is(rlErr, poller.ErrNoBody) { + return rlErr + } else if rl != "" { + req, err = exported.NewRequest(ctx, http.MethodGet, rl) + } else if p.Method == http.MethodPatch || p.Method == http.MethodPut { + req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL) + } else if p.Method == http.MethodPost && p.LocURL != "" { + req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) + } + if err != nil { + return err + } + + // if a final GET request has been created, execute it + if req != nil { + // no JSON path when making a final GET request + payloadPath = "" + resp, err := p.pl.Do(req) + if err != nil { + return err + } + p.resp = resp + } + + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), payloadPath, out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go new file mode 100644 index 000000000..37ed647f4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go @@ -0,0 +1,24 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package pollers + +// FinalStateVia is the enumerated type for the possible final-state-via values. +type FinalStateVia string + +const ( + // FinalStateViaAzureAsyncOp indicates the final payload comes from the Azure-AsyncOperation URL. + FinalStateViaAzureAsyncOp FinalStateVia = "azure-async-operation" + + // FinalStateViaLocation indicates the final payload comes from the Location URL. + FinalStateViaLocation FinalStateVia = "location" + + // FinalStateViaOriginalURI indicates the final payload comes from the original URL. + FinalStateViaOriginalURI FinalStateVia = "original-uri" + + // FinalStateViaOpLocation indicates the final payload comes from the Operation-Location URL. + FinalStateViaOpLocation FinalStateVia = "operation-location" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go new file mode 100644 index 000000000..6a7a32e03 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go @@ -0,0 +1,212 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package pollers + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "reflect" + + azexported "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// getTokenTypeName creates a type name from the type parameter T. +func getTokenTypeName[T any]() (string, error) { + tt := shared.TypeOfT[T]() + var n string + if tt.Kind() == reflect.Pointer { + n = "*" + tt = tt.Elem() + } + n += tt.Name() + if n == "" { + return "", errors.New("nameless types are not allowed") + } + return n, nil +} + +type resumeTokenWrapper[T any] struct { + Type string `json:"type"` + Token T `json:"token"` +} + +// NewResumeToken creates a resume token from the specified type. +// An error is returned if the generic type has no name (e.g. struct{}). +func NewResumeToken[TResult, TSource any](from TSource) (string, error) { + n, err := getTokenTypeName[TResult]() + if err != nil { + return "", err + } + b, err := json.Marshal(resumeTokenWrapper[TSource]{ + Type: n, + Token: from, + }) + if err != nil { + return "", err + } + return string(b), nil +} + +// ExtractToken returns the poller-specific token information from the provided token value. +func ExtractToken(token string) ([]byte, error) { + raw := map[string]json.RawMessage{} + if err := json.Unmarshal([]byte(token), &raw); err != nil { + return nil, err + } + // this is dependent on the type resumeTokenWrapper[T] + tk, ok := raw["token"] + if !ok { + return nil, errors.New("missing token value") + } + return tk, nil +} + +// IsTokenValid returns an error if the specified token isn't applicable for generic type T. +func IsTokenValid[T any](token string) error { + raw := map[string]any{} + if err := json.Unmarshal([]byte(token), &raw); err != nil { + return err + } + t, ok := raw["type"] + if !ok { + return errors.New("missing type value") + } + tt, ok := t.(string) + if !ok { + return fmt.Errorf("invalid type format %T", t) + } + n, err := getTokenTypeName[T]() + if err != nil { + return err + } + if tt != n { + return fmt.Errorf("cannot resume from this poller token. token is for type %s, not %s", tt, n) + } + return nil +} + +// used if the operation synchronously completed +type NopPoller[T any] struct { + resp *http.Response + result T +} + +// NewNopPoller creates a NopPoller from the provided response. +// It unmarshals the response body into an instance of T. +func NewNopPoller[T any](resp *http.Response) (*NopPoller[T], error) { + np := &NopPoller[T]{resp: resp} + if resp.StatusCode == http.StatusNoContent { + return np, nil + } + payload, err := exported.Payload(resp, nil) + if err != nil { + return nil, err + } + if len(payload) == 0 { + return np, nil + } + if err = json.Unmarshal(payload, &np.result); err != nil { + return nil, err + } + return np, nil +} + +func (*NopPoller[T]) Done() bool { + return true +} + +func (p *NopPoller[T]) Poll(context.Context) (*http.Response, error) { + return p.resp, nil +} + +func (p *NopPoller[T]) Result(ctx context.Context, out *T) error { + *out = p.result + return nil +} + +// PollHelper creates and executes the request, calling update() with the response. +// If the request fails, the update func is not called. +// The update func returns the state of the operation for logging purposes or an error +// if it fails to extract the required state from the response. +func PollHelper(ctx context.Context, endpoint string, pl azexported.Pipeline, update func(resp *http.Response) (string, error)) error { + req, err := azexported.NewRequest(ctx, http.MethodGet, endpoint) + if err != nil { + return err + } + resp, err := pl.Do(req) + if err != nil { + return err + } + state, err := update(resp) + if err != nil { + return err + } + log.Writef(log.EventLRO, "State %s", state) + return nil +} + +// ResultHelper processes the response as success or failure. +// In the success case, it unmarshals the payload into either a new instance of T or out. +// In the failure case, it creates an *azcore.Response error from the response. +func ResultHelper[T any](resp *http.Response, failed bool, jsonPath string, out *T) error { + // short-circuit the simple success case with no response body to unmarshal + if resp.StatusCode == http.StatusNoContent { + return nil + } + + defer resp.Body.Close() + if !poller.StatusCodeValid(resp) || failed { + // the LRO failed. unmarshall the error and update state + return azexported.NewResponseError(resp) + } + + // success case + payload, err := exported.Payload(resp, nil) + if err != nil { + return err + } + + if jsonPath != "" && len(payload) > 0 { + // extract the payload from the specified JSON path. + // do this before the zero-length check in case there + // is no payload. + jsonBody := map[string]json.RawMessage{} + if err = json.Unmarshal(payload, &jsonBody); err != nil { + return err + } + payload = jsonBody[jsonPath] + } + + if len(payload) == 0 { + return nil + } + + if err = json.Unmarshal(payload, out); err != nil { + return err + } + return nil +} + +// IsNonTerminalHTTPStatusCode returns true if the HTTP status code should be +// considered non-terminal thus eligible for retry. +func IsNonTerminalHTTPStatusCode(resp *http.Response) bool { + return exported.HasStatusCode(resp, + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + ) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go new file mode 100644 index 000000000..7cb8c207e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -0,0 +1,44 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package shared + +const ( + ContentTypeAppJSON = "application/json" + ContentTypeAppXML = "application/xml" + ContentTypeTextPlain = "text/plain" +) + +const ( + HeaderAuthorization = "Authorization" + HeaderAuxiliaryAuthorization = "x-ms-authorization-auxiliary" + HeaderAzureAsync = "Azure-AsyncOperation" + HeaderContentLength = "Content-Length" + HeaderContentType = "Content-Type" + HeaderFakePollerStatus = "Fake-Poller-Status" + HeaderLocation = "Location" + HeaderOperationLocation = "Operation-Location" + HeaderRetryAfter = "Retry-After" + HeaderRetryAfterMS = "Retry-After-Ms" + HeaderUserAgent = "User-Agent" + HeaderWWWAuthenticate = "WWW-Authenticate" + HeaderXMSClientRequestID = "x-ms-client-request-id" + HeaderXMSRequestID = "x-ms-request-id" + HeaderXMSErrorCode = "x-ms-error-code" + HeaderXMSRetryAfterMS = "x-ms-retry-after-ms" +) + +const BearerTokenPrefix = "Bearer " + +const TracingNamespaceAttrName = "az.namespace" + +const ( + // Module is the name of the calling module used in telemetry data. + Module = "azcore" + + // Version is the semantic version (see http://semver.org) of this module. + Version = "v1.14.0" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go new file mode 100644 index 000000000..d3da2c5fd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go @@ -0,0 +1,149 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package shared + +import ( + "context" + "fmt" + "net/http" + "reflect" + "regexp" + "strconv" + "time" +) + +// NOTE: when adding a new context key type, it likely needs to be +// added to the deny-list of key types in ContextWithDeniedValues + +// CtxWithHTTPHeaderKey is used as a context key for adding/retrieving http.Header. +type CtxWithHTTPHeaderKey struct{} + +// CtxWithRetryOptionsKey is used as a context key for adding/retrieving RetryOptions. +type CtxWithRetryOptionsKey struct{} + +// CtxWithCaptureResponse is used as a context key for retrieving the raw response. +type CtxWithCaptureResponse struct{} + +// CtxWithTracingTracer is used as a context key for adding/retrieving tracing.Tracer. +type CtxWithTracingTracer struct{} + +// CtxAPINameKey is used as a context key for adding/retrieving the API name. +type CtxAPINameKey struct{} + +// Delay waits for the duration to elapse or the context to be cancelled. +func Delay(ctx context.Context, delay time.Duration) error { + select { + case <-time.After(delay): + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// RetryAfter returns non-zero if the response contains one of the headers with a "retry after" value. +// Headers are checked in the following order: retry-after-ms, x-ms-retry-after-ms, retry-after +func RetryAfter(resp *http.Response) time.Duration { + if resp == nil { + return 0 + } + + type retryData struct { + header string + units time.Duration + + // custom is used when the regular algorithm failed and is optional. + // the returned duration is used verbatim (units is not applied). + custom func(string) time.Duration + } + + nop := func(string) time.Duration { return 0 } + + // the headers are listed in order of preference + retries := []retryData{ + { + header: HeaderRetryAfterMS, + units: time.Millisecond, + custom: nop, + }, + { + header: HeaderXMSRetryAfterMS, + units: time.Millisecond, + custom: nop, + }, + { + header: HeaderRetryAfter, + units: time.Second, + + // retry-after values are expressed in either number of + // seconds or an HTTP-date indicating when to try again + custom: func(ra string) time.Duration { + t, err := time.Parse(time.RFC1123, ra) + if err != nil { + return 0 + } + return time.Until(t) + }, + }, + } + + for _, retry := range retries { + v := resp.Header.Get(retry.header) + if v == "" { + continue + } + if retryAfter, _ := strconv.Atoi(v); retryAfter > 0 { + return time.Duration(retryAfter) * retry.units + } else if d := retry.custom(v); d > 0 { + return d + } + } + + return 0 +} + +// TypeOfT returns the type of the generic type param. +func TypeOfT[T any]() reflect.Type { + // you can't, at present, obtain the type of + // a type parameter, so this is the trick + return reflect.TypeOf((*T)(nil)).Elem() +} + +// TransportFunc is a helper to use a first-class func to satisfy the Transporter interface. +type TransportFunc func(*http.Request) (*http.Response, error) + +// Do implements the Transporter interface for the TransportFunc type. +func (pf TransportFunc) Do(req *http.Request) (*http.Response, error) { + return pf(req) +} + +// ValidateModVer verifies that moduleVersion is a valid semver 2.0 string. +func ValidateModVer(moduleVersion string) error { + modVerRegx := regexp.MustCompile(`^v\d+\.\d+\.\d+(?:-[a-zA-Z0-9_.-]+)?$`) + if !modVerRegx.MatchString(moduleVersion) { + return fmt.Errorf("malformed moduleVersion param value %s", moduleVersion) + } + return nil +} + +// ContextWithDeniedValues wraps an existing [context.Context], denying access to certain context values. +// Pipeline policies that create new requests to be sent down their own pipeline MUST wrap the caller's +// context with an instance of this type. This is to prevent context values from flowing across disjoint +// requests which can have unintended side-effects. +type ContextWithDeniedValues struct { + context.Context +} + +// Value implements part of the [context.Context] interface. +// It acts as a deny-list for certain context keys. +func (c *ContextWithDeniedValues) Value(key any) any { + switch key.(type) { + case CtxAPINameKey, CtxWithCaptureResponse, CtxWithHTTPHeaderKey, CtxWithRetryOptionsKey, CtxWithTracingTracer: + return nil + default: + return c.Context.Value(key) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go new file mode 100644 index 000000000..2f3901bff --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package log contains functionality for configuring logging behavior. +// Default logging to stderr can be enabled by setting environment variable AZURE_SDK_GO_LOGGING to "all". +package log diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go new file mode 100644 index 000000000..f260dac36 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go @@ -0,0 +1,55 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// Package log provides functionality for configuring logging facilities. +package log + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// Event is used to group entries. Each group can be toggled on or off. +type Event = log.Event + +const ( + // EventRequest entries contain information about HTTP requests. + // This includes information like the URL, query parameters, and headers. + EventRequest Event = "Request" + + // EventResponse entries contain information about HTTP responses. + // This includes information like the HTTP status code, headers, and request URL. + EventResponse Event = "Response" + + // EventResponseError entries contain information about HTTP responses that returned + // an *azcore.ResponseError (i.e. responses with a non 2xx HTTP status code). + // This includes the contents of ResponseError.Error(). + EventResponseError Event = "ResponseError" + + // EventRetryPolicy entries contain information specific to the retry policy in use. + EventRetryPolicy Event = "Retry" + + // EventLRO entries contain information specific to long-running operations. + // This includes information like polling location, operation state, and sleep intervals. + EventLRO Event = "LongRunningOperation" +) + +// SetEvents is used to control which events are written to +// the log. By default all log events are writen. +// NOTE: this is not goroutine safe and should be called before using SDK clients. +func SetEvents(cls ...Event) { + log.SetEvents(cls...) +} + +// SetListener will set the Logger to write to the specified Listener. +// NOTE: this is not goroutine safe and should be called before using SDK clients. +func SetListener(lst func(Event, string)) { + log.SetListener(lst) +} + +// for testing purposes +func resetEvents() { + log.TestResetEvents() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go new file mode 100644 index 000000000..fad2579ed --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package policy contains the definitions needed for configuring in-box pipeline policies +// and creating custom policies. +package policy diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go new file mode 100644 index 000000000..8d9845358 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go @@ -0,0 +1,197 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package policy + +import ( + "context" + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" +) + +// Policy represents an extensibility point for the Pipeline that can mutate the specified +// Request and react to the received Response. +type Policy = exported.Policy + +// Transporter represents an HTTP pipeline transport used to send HTTP requests and receive responses. +type Transporter = exported.Transporter + +// Request is an abstraction over the creation of an HTTP request as it passes through the pipeline. +// Don't use this type directly, use runtime.NewRequest() instead. +type Request = exported.Request + +// ClientOptions contains optional settings for a client's pipeline. +// Instances can be shared across calls to SDK client constructors when uniform configuration is desired. +// Zero-value fields will have their specified default values applied during use. +type ClientOptions struct { + // APIVersion overrides the default version requested of the service. + // Set with caution as this package version has not been tested with arbitrary service versions. + APIVersion string + + // Cloud specifies a cloud for the client. The default is Azure Public Cloud. + Cloud cloud.Configuration + + // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP. + // By default, authenticated requests to an HTTP endpoint are rejected by the client. + // WARNING: setting this to true will allow sending the credential in clear text. Use with caution. + InsecureAllowCredentialWithHTTP bool + + // Logging configures the built-in logging policy. + Logging LogOptions + + // Retry configures the built-in retry policy. + Retry RetryOptions + + // Telemetry configures the built-in telemetry policy. + Telemetry TelemetryOptions + + // TracingProvider configures the tracing provider. + // It defaults to a no-op tracer. + TracingProvider tracing.Provider + + // Transport sets the transport for HTTP requests. + Transport Transporter + + // PerCallPolicies contains custom policies to inject into the pipeline. + // Each policy is executed once per request. + PerCallPolicies []Policy + + // PerRetryPolicies contains custom policies to inject into the pipeline. + // Each policy is executed once per request, and for each retry of that request. + PerRetryPolicies []Policy +} + +// LogOptions configures the logging policy's behavior. +type LogOptions struct { + // IncludeBody indicates if request and response bodies should be included in logging. + // The default value is false. + // NOTE: enabling this can lead to disclosure of sensitive information, use with care. + IncludeBody bool + + // AllowedHeaders is the slice of headers to log with their values intact. + // All headers not in the slice will have their values REDACTED. + // Applies to request and response headers. + AllowedHeaders []string + + // AllowedQueryParams is the slice of query parameters to log with their values intact. + // All query parameters not in the slice will have their values REDACTED. + AllowedQueryParams []string +} + +// RetryOptions configures the retry policy's behavior. +// Zero-value fields will have their specified default values applied during use. +// This allows for modification of a subset of fields. +type RetryOptions struct { + // MaxRetries specifies the maximum number of attempts a failed operation will be retried + // before producing an error. + // The default value is three. A value less than zero means one try and no retries. + MaxRetries int32 + + // TryTimeout indicates the maximum time allowed for any single try of an HTTP request. + // This is disabled by default. Specify a value greater than zero to enable. + // NOTE: Setting this to a small value might cause premature HTTP request time-outs. + TryTimeout time.Duration + + // RetryDelay specifies the initial amount of delay to use before retrying an operation. + // The value is used only if the HTTP response does not contain a Retry-After header. + // The delay increases exponentially with each retry up to the maximum specified by MaxRetryDelay. + // The default value is four seconds. A value less than zero means no delay between retries. + RetryDelay time.Duration + + // MaxRetryDelay specifies the maximum delay allowed before retrying an operation. + // Typically the value is greater than or equal to the value specified in RetryDelay. + // The default Value is 60 seconds. A value less than zero means there is no cap. + MaxRetryDelay time.Duration + + // StatusCodes specifies the HTTP status codes that indicate the operation should be retried. + // A nil slice will use the following values. + // http.StatusRequestTimeout 408 + // http.StatusTooManyRequests 429 + // http.StatusInternalServerError 500 + // http.StatusBadGateway 502 + // http.StatusServiceUnavailable 503 + // http.StatusGatewayTimeout 504 + // Specifying values will replace the default values. + // Specifying an empty slice will disable retries for HTTP status codes. + StatusCodes []int + + // ShouldRetry evaluates if the retry policy should retry the request. + // When specified, the function overrides comparison against the list of + // HTTP status codes and error checking within the retry policy. Context + // and NonRetriable errors remain evaluated before calling ShouldRetry. + // The *http.Response and error parameters are mutually exclusive, i.e. + // if one is nil, the other is not nil. + // A return value of true means the retry policy should retry. + ShouldRetry func(*http.Response, error) bool +} + +// TelemetryOptions configures the telemetry policy's behavior. +type TelemetryOptions struct { + // ApplicationID is an application-specific identification string to add to the User-Agent. + // It has a maximum length of 24 characters and must not contain any spaces. + ApplicationID string + + // Disabled will prevent the addition of any telemetry data to the User-Agent. + Disabled bool +} + +// TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token. +type TokenRequestOptions = exported.TokenRequestOptions + +// BearerTokenOptions configures the bearer token policy's behavior. +type BearerTokenOptions struct { + // AuthorizationHandler allows SDK developers to run client-specific logic when BearerTokenPolicy must authorize a request. + // When this field isn't set, the policy follows its default behavior of authorizing every request with a bearer token from + // its given credential. + AuthorizationHandler AuthorizationHandler + + // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP. + // By default, authenticated requests to an HTTP endpoint are rejected by the client. + // WARNING: setting this to true will allow sending the bearer token in clear text. Use with caution. + InsecureAllowCredentialWithHTTP bool +} + +// AuthorizationHandler allows SDK developers to insert custom logic that runs when BearerTokenPolicy must authorize a request. +type AuthorizationHandler struct { + // OnRequest is called each time the policy receives a request. Its func parameter authorizes the request with a token + // from the policy's given credential. Implementations that need to perform I/O should use the Request's context, + // available from Request.Raw().Context(). When OnRequest returns an error, the policy propagates that error and doesn't + // send the request. When OnRequest is nil, the policy follows its default behavior, authorizing the request with a + // token from its credential according to its configuration. + OnRequest func(*Request, func(TokenRequestOptions) error) error + + // OnChallenge is called when the policy receives a 401 response, allowing the AuthorizationHandler to re-authorize the + // request according to an authentication challenge (the Response's WWW-Authenticate header). OnChallenge is responsible + // for parsing parameters from the challenge. Its func parameter will authorize the request with a token from the policy's + // given credential. Implementations that need to perform I/O should use the Request's context, available from + // Request.Raw().Context(). When OnChallenge returns nil, the policy will send the request again. When OnChallenge is nil, + // the policy will return any 401 response to the client. + OnChallenge func(*Request, *http.Response, func(TokenRequestOptions) error) error +} + +// WithCaptureResponse applies the HTTP response retrieval annotation to the parent context. +// The resp parameter will contain the HTTP response after the request has completed. +func WithCaptureResponse(parent context.Context, resp **http.Response) context.Context { + return context.WithValue(parent, shared.CtxWithCaptureResponse{}, resp) +} + +// WithHTTPHeader adds the specified http.Header to the parent context. +// Use this to specify custom HTTP headers at the API-call level. +// Any overlapping headers will have their values replaced with the values specified here. +func WithHTTPHeader(parent context.Context, header http.Header) context.Context { + return context.WithValue(parent, shared.CtxWithHTTPHeaderKey{}, header) +} + +// WithRetryOptions adds the specified RetryOptions to the parent context. +// Use this to specify custom RetryOptions at the API-call level. +func WithRetryOptions(parent context.Context, options RetryOptions) context.Context { + return context.WithValue(parent, shared.CtxWithRetryOptionsKey{}, options) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go new file mode 100644 index 000000000..c9cfa438c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package runtime contains various facilities for creating requests and handling responses. +// The content is intended for SDK authors. +package runtime diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go new file mode 100644 index 000000000..c0d56158e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go @@ -0,0 +1,27 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" +) + +// NewResponseError creates an *azcore.ResponseError from the provided HTTP response. +// Call this when a service request returns a non-successful status code. +// The error code will be extracted from the *http.Response, either from the x-ms-error-code +// header (preferred) or attempted to be parsed from the response body. +func NewResponseError(resp *http.Response) error { + return exported.NewResponseError(resp) +} + +// NewResponseErrorWithErrorCode creates an *azcore.ResponseError from the provided HTTP response and errorCode. +// Use this variant when the error code is in a non-standard location. +func NewResponseErrorWithErrorCode(resp *http.Response, errorCode string) error { + return exported.NewResponseErrorWithErrorCode(resp, errorCode) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go new file mode 100644 index 000000000..b960cff0b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go @@ -0,0 +1,137 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "reflect" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" +) + +// PagingHandler contains the required data for constructing a Pager. +type PagingHandler[T any] struct { + // More returns a boolean indicating if there are more pages to fetch. + // It uses the provided page to make the determination. + More func(T) bool + + // Fetcher fetches the first and subsequent pages. + Fetcher func(context.Context, *T) (T, error) + + // Tracer contains the Tracer from the client that's creating the Pager. + Tracer tracing.Tracer +} + +// Pager provides operations for iterating over paged responses. +type Pager[T any] struct { + current *T + handler PagingHandler[T] + tracer tracing.Tracer + firstPage bool +} + +// NewPager creates an instance of Pager using the specified PagingHandler. +// Pass a non-nil T for firstPage if the first page has already been retrieved. +func NewPager[T any](handler PagingHandler[T]) *Pager[T] { + return &Pager[T]{ + handler: handler, + tracer: handler.Tracer, + firstPage: true, + } +} + +// More returns true if there are more pages to retrieve. +func (p *Pager[T]) More() bool { + if p.current != nil { + return p.handler.More(*p.current) + } + return true +} + +// NextPage advances the pager to the next page. +func (p *Pager[T]) NextPage(ctx context.Context) (T, error) { + if p.current != nil { + if p.firstPage { + // we get here if it's an LRO-pager, we already have the first page + p.firstPage = false + return *p.current, nil + } else if !p.handler.More(*p.current) { + return *new(T), errors.New("no more pages") + } + } else { + // non-LRO case, first page + p.firstPage = false + } + + var err error + ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.NextPage", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil) + defer func() { endSpan(err) }() + + resp, err := p.handler.Fetcher(ctx, p.current) + if err != nil { + return *new(T), err + } + p.current = &resp + return *p.current, nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface for Pager[T]. +func (p *Pager[T]) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, &p.current) +} + +// FetcherForNextLinkOptions contains the optional values for [FetcherForNextLink]. +type FetcherForNextLinkOptions struct { + // NextReq is the func to be called when requesting subsequent pages. + // Used for paged operations that have a custom next link operation. + NextReq func(context.Context, string) (*policy.Request, error) + + // StatusCodes contains additional HTTP status codes indicating success. + // The default value is http.StatusOK. + StatusCodes []int +} + +// FetcherForNextLink is a helper containing boilerplate code to simplify creating a PagingHandler[T].Fetcher from a next link URL. +// - ctx is the [context.Context] controlling the lifetime of the HTTP operation +// - pl is the [Pipeline] used to dispatch the HTTP request +// - nextLink is the URL used to fetch the next page. the empty string indicates the first page is to be requested +// - firstReq is the func to be called when creating the request for the first page +// - options contains any optional parameters, pass nil to accept the default values +func FetcherForNextLink(ctx context.Context, pl Pipeline, nextLink string, firstReq func(context.Context) (*policy.Request, error), options *FetcherForNextLinkOptions) (*http.Response, error) { + var req *policy.Request + var err error + if options == nil { + options = &FetcherForNextLinkOptions{} + } + if nextLink == "" { + req, err = firstReq(ctx) + } else if nextLink, err = EncodeQueryParams(nextLink); err == nil { + if options.NextReq != nil { + req, err = options.NextReq(ctx, nextLink) + } else { + req, err = NewRequest(ctx, http.MethodGet, nextLink) + } + } + if err != nil { + return nil, err + } + resp, err := pl.Do(req) + if err != nil { + return nil, err + } + successCodes := []int{http.StatusOK} + successCodes = append(successCodes, options.StatusCodes...) + if !HasStatusCode(resp, successCodes...) { + return nil, NewResponseError(resp) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go new file mode 100644 index 000000000..6b1f5c083 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go @@ -0,0 +1,94 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// PipelineOptions contains Pipeline options for SDK developers +type PipelineOptions struct { + // AllowedHeaders is the slice of headers to log with their values intact. + // All headers not in the slice will have their values REDACTED. + // Applies to request and response headers. + AllowedHeaders []string + + // AllowedQueryParameters is the slice of query parameters to log with their values intact. + // All query parameters not in the slice will have their values REDACTED. + AllowedQueryParameters []string + + // APIVersion overrides the default version requested of the service. + // Set with caution as this package version has not been tested with arbitrary service versions. + APIVersion APIVersionOptions + + // PerCall contains custom policies to inject into the pipeline. + // Each policy is executed once per request. + PerCall []policy.Policy + + // PerRetry contains custom policies to inject into the pipeline. + // Each policy is executed once per request, and for each retry of that request. + PerRetry []policy.Policy + + // Tracing contains options used to configure distributed tracing. + Tracing TracingOptions +} + +// TracingOptions contains tracing options for SDK developers. +type TracingOptions struct { + // Namespace contains the value to use for the az.namespace span attribute. + Namespace string +} + +// Pipeline represents a primitive for sending HTTP requests and receiving responses. +// Its behavior can be extended by specifying policies during construction. +type Pipeline = exported.Pipeline + +// NewPipeline creates a pipeline from connection options, with any additional policies as specified. +// Policies from ClientOptions are placed after policies from PipelineOptions. +// The module and version parameters are used by the telemetry policy, when enabled. +func NewPipeline(module, version string, plOpts PipelineOptions, options *policy.ClientOptions) Pipeline { + cp := policy.ClientOptions{} + if options != nil { + cp = *options + } + if len(plOpts.AllowedHeaders) > 0 { + headers := make([]string, len(plOpts.AllowedHeaders)+len(cp.Logging.AllowedHeaders)) + copy(headers, plOpts.AllowedHeaders) + headers = append(headers, cp.Logging.AllowedHeaders...) + cp.Logging.AllowedHeaders = headers + } + if len(plOpts.AllowedQueryParameters) > 0 { + qp := make([]string, len(plOpts.AllowedQueryParameters)+len(cp.Logging.AllowedQueryParams)) + copy(qp, plOpts.AllowedQueryParameters) + qp = append(qp, cp.Logging.AllowedQueryParams...) + cp.Logging.AllowedQueryParams = qp + } + // we put the includeResponsePolicy at the very beginning so that the raw response + // is populated with the final response (some policies might mutate the response) + policies := []policy.Policy{exported.PolicyFunc(includeResponsePolicy)} + if cp.APIVersion != "" { + policies = append(policies, newAPIVersionPolicy(cp.APIVersion, &plOpts.APIVersion)) + } + if !cp.Telemetry.Disabled { + policies = append(policies, NewTelemetryPolicy(module, version, &cp.Telemetry)) + } + policies = append(policies, plOpts.PerCall...) + policies = append(policies, cp.PerCallPolicies...) + policies = append(policies, NewRetryPolicy(&cp.Retry)) + policies = append(policies, plOpts.PerRetry...) + policies = append(policies, cp.PerRetryPolicies...) + policies = append(policies, exported.PolicyFunc(httpHeaderPolicy)) + policies = append(policies, newHTTPTracePolicy(cp.Logging.AllowedQueryParams)) + policies = append(policies, NewLogPolicy(&cp.Logging)) + policies = append(policies, exported.PolicyFunc(bodyDownloadPolicy)) + transport := cp.Transport + if transport == nil { + transport = defaultHTTPClient + } + return exported.NewPipeline(transport, policies...) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go new file mode 100644 index 000000000..e5309aa6c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go @@ -0,0 +1,75 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// APIVersionOptions contains options for API versions +type APIVersionOptions struct { + // Location indicates where to set the version on a request, for example in a header or query param + Location APIVersionLocation + // Name is the name of the header or query parameter, for example "api-version" + Name string +} + +// APIVersionLocation indicates which part of a request identifies the service version +type APIVersionLocation int + +const ( + // APIVersionLocationQueryParam indicates a query parameter + APIVersionLocationQueryParam = 0 + // APIVersionLocationHeader indicates a header + APIVersionLocationHeader = 1 +) + +// newAPIVersionPolicy constructs an APIVersionPolicy. If version is "", Do will be a no-op. If version +// isn't empty and opts.Name is empty, Do will return an error. +func newAPIVersionPolicy(version string, opts *APIVersionOptions) *apiVersionPolicy { + if opts == nil { + opts = &APIVersionOptions{} + } + return &apiVersionPolicy{location: opts.Location, name: opts.Name, version: version} +} + +// apiVersionPolicy enables users to set the API version of every request a client sends. +type apiVersionPolicy struct { + // location indicates whether "name" refers to a query parameter or header. + location APIVersionLocation + + // name of the query param or header whose value should be overridden; provided by the client. + name string + + // version is the value (provided by the user) that replaces the default version value. + version string +} + +// Do sets the request's API version, if the policy is configured to do so, replacing any prior value. +func (a *apiVersionPolicy) Do(req *policy.Request) (*http.Response, error) { + if a.version != "" { + if a.name == "" { + // user set ClientOptions.APIVersion but the client ctor didn't set PipelineOptions.APIVersionOptions + return nil, errors.New("this client doesn't support overriding its API version") + } + switch a.location { + case APIVersionLocationHeader: + req.Raw().Header.Set(a.name, a.version) + case APIVersionLocationQueryParam: + q := req.Raw().URL.Query() + q.Set(a.name, a.version) + req.Raw().URL.RawQuery = q.Encode() + default: + return nil, fmt.Errorf("unknown APIVersionLocation %d", a.location) + } + } + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go new file mode 100644 index 000000000..cb2a69528 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go @@ -0,0 +1,123 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "errors" + "net/http" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" + "github.com/Azure/azure-sdk-for-go/sdk/internal/temporal" +) + +// BearerTokenPolicy authorizes requests with bearer tokens acquired from a TokenCredential. +type BearerTokenPolicy struct { + // mainResource is the resource to be retreived using the tenant specified in the credential + mainResource *temporal.Resource[exported.AccessToken, acquiringResourceState] + // the following fields are read-only + authzHandler policy.AuthorizationHandler + cred exported.TokenCredential + scopes []string + allowHTTP bool +} + +type acquiringResourceState struct { + req *policy.Request + p *BearerTokenPolicy + tro policy.TokenRequestOptions +} + +// acquire acquires or updates the resource; only one +// thread/goroutine at a time ever calls this function +func acquire(state acquiringResourceState) (newResource exported.AccessToken, newExpiration time.Time, err error) { + tk, err := state.p.cred.GetToken(&shared.ContextWithDeniedValues{Context: state.req.Raw().Context()}, state.tro) + if err != nil { + return exported.AccessToken{}, time.Time{}, err + } + return tk, tk.ExpiresOn, nil +} + +// NewBearerTokenPolicy creates a policy object that authorizes requests with bearer tokens. +// cred: an azcore.TokenCredential implementation such as a credential object from azidentity +// scopes: the list of permission scopes required for the token. +// opts: optional settings. Pass nil to accept default values; this is the same as passing a zero-value options. +func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts *policy.BearerTokenOptions) *BearerTokenPolicy { + if opts == nil { + opts = &policy.BearerTokenOptions{} + } + return &BearerTokenPolicy{ + authzHandler: opts.AuthorizationHandler, + cred: cred, + scopes: scopes, + mainResource: temporal.NewResource(acquire), + allowHTTP: opts.InsecureAllowCredentialWithHTTP, + } +} + +// authenticateAndAuthorize returns a function which authorizes req with a token from the policy's credential +func (b *BearerTokenPolicy) authenticateAndAuthorize(req *policy.Request) func(policy.TokenRequestOptions) error { + return func(tro policy.TokenRequestOptions) error { + as := acquiringResourceState{p: b, req: req, tro: tro} + tk, err := b.mainResource.Get(as) + if err != nil { + return err + } + req.Raw().Header.Set(shared.HeaderAuthorization, shared.BearerTokenPrefix+tk.Token) + return nil + } +} + +// Do authorizes a request with a bearer token +func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { + // skip adding the authorization header if no TokenCredential was provided. + // this prevents a panic that might be hard to diagnose and allows testing + // against http endpoints that don't require authentication. + if b.cred == nil { + return req.Next() + } + + if err := checkHTTPSForAuth(req, b.allowHTTP); err != nil { + return nil, err + } + + var err error + if b.authzHandler.OnRequest != nil { + err = b.authzHandler.OnRequest(req, b.authenticateAndAuthorize(req)) + } else { + err = b.authenticateAndAuthorize(req)(policy.TokenRequestOptions{Scopes: b.scopes}) + } + if err != nil { + return nil, errorinfo.NonRetriableError(err) + } + + res, err := req.Next() + if err != nil { + return nil, err + } + + if res.StatusCode == http.StatusUnauthorized { + b.mainResource.Expire() + if res.Header.Get("WWW-Authenticate") != "" && b.authzHandler.OnChallenge != nil { + if err = b.authzHandler.OnChallenge(req, res, b.authenticateAndAuthorize(req)); err == nil { + res, err = req.Next() + } + } + } + if err != nil { + err = errorinfo.NonRetriableError(err) + } + return res, err +} + +func checkHTTPSForAuth(req *policy.Request, allowHTTP bool) error { + if strings.ToLower(req.Raw().URL.Scheme) != "https" && !allowHTTP { + return errorinfo.NonRetriableError(errors.New("authenticated requests are not permitted for non TLS protected (https) endpoints")) + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go new file mode 100644 index 000000000..99dc029f0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go @@ -0,0 +1,72 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "fmt" + "net/http" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" +) + +// bodyDownloadPolicy creates a policy object that downloads the response's body to a []byte. +func bodyDownloadPolicy(req *policy.Request) (*http.Response, error) { + resp, err := req.Next() + if err != nil { + return resp, err + } + var opValues bodyDownloadPolicyOpValues + // don't skip downloading error response bodies + if req.OperationValue(&opValues); opValues.Skip && resp.StatusCode < 400 { + return resp, err + } + // Either bodyDownloadPolicyOpValues was not specified (so skip is false) + // or it was specified and skip is false: don't skip downloading the body + _, err = Payload(resp) + if err != nil { + return resp, newBodyDownloadError(err, req) + } + return resp, err +} + +// bodyDownloadPolicyOpValues is the struct containing the per-operation values +type bodyDownloadPolicyOpValues struct { + Skip bool +} + +type bodyDownloadError struct { + err error +} + +func newBodyDownloadError(err error, req *policy.Request) error { + // on failure, only retry the request for idempotent operations. + // we currently identify them as DELETE, GET, and PUT requests. + if m := strings.ToUpper(req.Raw().Method); m == http.MethodDelete || m == http.MethodGet || m == http.MethodPut { + // error is safe for retry + return err + } + // wrap error to avoid retries + return &bodyDownloadError{ + err: err, + } +} + +func (b *bodyDownloadError) Error() string { + return fmt.Sprintf("body download policy: %s", b.err.Error()) +} + +func (b *bodyDownloadError) NonRetriable() { + // marker method +} + +func (b *bodyDownloadError) Unwrap() error { + return b.err +} + +var _ errorinfo.NonRetriable = (*bodyDownloadError)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go new file mode 100644 index 000000000..c230af0af --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go @@ -0,0 +1,40 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// newHTTPHeaderPolicy creates a policy object that adds custom HTTP headers to a request +func httpHeaderPolicy(req *policy.Request) (*http.Response, error) { + // check if any custom HTTP headers have been specified + if header := req.Raw().Context().Value(shared.CtxWithHTTPHeaderKey{}); header != nil { + for k, v := range header.(http.Header) { + // use Set to replace any existing value + // it also canonicalizes the header key + req.Raw().Header.Set(k, v[0]) + // add any remaining values + for i := 1; i < len(v); i++ { + req.Raw().Header.Add(k, v[i]) + } + } + } + return req.Next() +} + +// WithHTTPHeader adds the specified http.Header to the parent context. +// Use this to specify custom HTTP headers at the API-call level. +// Any overlapping headers will have their values replaced with the values specified here. +// Deprecated: use [policy.WithHTTPHeader] instead. +func WithHTTPHeader(parent context.Context, header http.Header) context.Context { + return policy.WithHTTPHeader(parent, header) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go new file mode 100644 index 000000000..bc6989310 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go @@ -0,0 +1,150 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" +) + +const ( + attrHTTPMethod = "http.method" + attrHTTPURL = "http.url" + attrHTTPUserAgent = "http.user_agent" + attrHTTPStatusCode = "http.status_code" + + attrAZClientReqID = "az.client_request_id" + attrAZServiceReqID = "az.service_request_id" + + attrNetPeerName = "net.peer.name" +) + +// newHTTPTracePolicy creates a new instance of the httpTracePolicy. +// - allowedQueryParams contains the user-specified query parameters that don't need to be redacted from the trace +func newHTTPTracePolicy(allowedQueryParams []string) exported.Policy { + return &httpTracePolicy{allowedQP: getAllowedQueryParams(allowedQueryParams)} +} + +// httpTracePolicy is a policy that creates a trace for the HTTP request and its response +type httpTracePolicy struct { + allowedQP map[string]struct{} +} + +// Do implements the pipeline.Policy interfaces for the httpTracePolicy type. +func (h *httpTracePolicy) Do(req *policy.Request) (resp *http.Response, err error) { + rawTracer := req.Raw().Context().Value(shared.CtxWithTracingTracer{}) + if tracer, ok := rawTracer.(tracing.Tracer); ok && tracer.Enabled() { + attributes := []tracing.Attribute{ + {Key: attrHTTPMethod, Value: req.Raw().Method}, + {Key: attrHTTPURL, Value: getSanitizedURL(*req.Raw().URL, h.allowedQP)}, + {Key: attrNetPeerName, Value: req.Raw().URL.Host}, + } + + if ua := req.Raw().Header.Get(shared.HeaderUserAgent); ua != "" { + attributes = append(attributes, tracing.Attribute{Key: attrHTTPUserAgent, Value: ua}) + } + if reqID := req.Raw().Header.Get(shared.HeaderXMSClientRequestID); reqID != "" { + attributes = append(attributes, tracing.Attribute{Key: attrAZClientReqID, Value: reqID}) + } + + ctx := req.Raw().Context() + ctx, span := tracer.Start(ctx, "HTTP "+req.Raw().Method, &tracing.SpanOptions{ + Kind: tracing.SpanKindClient, + Attributes: attributes, + }) + + defer func() { + if resp != nil { + span.SetAttributes(tracing.Attribute{Key: attrHTTPStatusCode, Value: resp.StatusCode}) + if resp.StatusCode > 399 { + span.SetStatus(tracing.SpanStatusError, resp.Status) + } + if reqID := resp.Header.Get(shared.HeaderXMSRequestID); reqID != "" { + span.SetAttributes(tracing.Attribute{Key: attrAZServiceReqID, Value: reqID}) + } + } else if err != nil { + var urlErr *url.Error + if errors.As(err, &urlErr) { + // calling *url.Error.Error() will include the unsanitized URL + // which we don't want. in addition, we already have the HTTP verb + // and sanitized URL in the trace so we aren't losing any info + err = urlErr.Err + } + span.SetStatus(tracing.SpanStatusError, err.Error()) + } + span.End() + }() + + req = req.WithContext(ctx) + } + resp, err = req.Next() + return +} + +// StartSpanOptions contains the optional values for StartSpan. +type StartSpanOptions struct { + // Attributes contains key-value pairs of attributes for the span. + Attributes []tracing.Attribute +} + +// StartSpan starts a new tracing span. +// You must call the returned func to terminate the span. Pass the applicable error +// if the span will exit with an error condition. +// - ctx is the parent context of the newly created context +// - name is the name of the span. this is typically the fully qualified name of an API ("Client.Method") +// - tracer is the client's Tracer for creating spans +// - options contains optional values. pass nil to accept any default values +func StartSpan(ctx context.Context, name string, tracer tracing.Tracer, options *StartSpanOptions) (context.Context, func(error)) { + if !tracer.Enabled() { + return ctx, func(err error) {} + } + + // we MUST propagate the active tracer before returning so that the trace policy can access it + ctx = context.WithValue(ctx, shared.CtxWithTracingTracer{}, tracer) + + const newSpanKind = tracing.SpanKindInternal + if activeSpan := ctx.Value(ctxActiveSpan{}); activeSpan != nil { + // per the design guidelines, if a SDK method Foo() calls SDK method Bar(), + // then the span for Bar() must be suppressed. however, if Bar() makes a REST + // call, then Bar's HTTP span must be a child of Foo's span. + // however, there is an exception to this rule. if the SDK method Foo() is a + // messaging producer/consumer, and it takes a callback that's a SDK method + // Bar(), then the span for Bar() must _not_ be suppressed. + if kind := activeSpan.(tracing.SpanKind); kind == tracing.SpanKindClient || kind == tracing.SpanKindInternal { + return ctx, func(err error) {} + } + } + + if options == nil { + options = &StartSpanOptions{} + } + + ctx, span := tracer.Start(ctx, name, &tracing.SpanOptions{ + Kind: newSpanKind, + Attributes: options.Attributes, + }) + ctx = context.WithValue(ctx, ctxActiveSpan{}, newSpanKind) + return ctx, func(err error) { + if err != nil { + errType := strings.Replace(fmt.Sprintf("%T", err), "*exported.", "*azcore.", 1) + span.SetStatus(tracing.SpanStatusError, fmt.Sprintf("%s:\n%s", errType, err.Error())) + } + span.End() + } +} + +// ctxActiveSpan is used as a context key for indicating a SDK client span is in progress. +type ctxActiveSpan struct{} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go new file mode 100644 index 000000000..bb00f6c2f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go @@ -0,0 +1,35 @@ +//go:build go1.16 +// +build go1.16 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// includeResponsePolicy creates a policy that retrieves the raw HTTP response upon request +func includeResponsePolicy(req *policy.Request) (*http.Response, error) { + resp, err := req.Next() + if resp == nil { + return resp, err + } + if httpOutRaw := req.Raw().Context().Value(shared.CtxWithCaptureResponse{}); httpOutRaw != nil { + httpOut := httpOutRaw.(**http.Response) + *httpOut = resp + } + return resp, err +} + +// WithCaptureResponse applies the HTTP response retrieval annotation to the parent context. +// The resp parameter will contain the HTTP response after the request has completed. +// Deprecated: use [policy.WithCaptureResponse] instead. +func WithCaptureResponse(parent context.Context, resp **http.Response) context.Context { + return policy.WithCaptureResponse(parent, resp) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go new file mode 100644 index 000000000..eeb1c09cc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go @@ -0,0 +1,64 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// KeyCredentialPolicy authorizes requests with a [azcore.KeyCredential]. +type KeyCredentialPolicy struct { + cred *exported.KeyCredential + header string + prefix string + allowHTTP bool +} + +// KeyCredentialPolicyOptions contains the optional values configuring [KeyCredentialPolicy]. +type KeyCredentialPolicyOptions struct { + // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP. + // By default, authenticated requests to an HTTP endpoint are rejected by the client. + // WARNING: setting this to true will allow sending the authentication key in clear text. Use with caution. + InsecureAllowCredentialWithHTTP bool + + // Prefix is used if the key requires a prefix before it's inserted into the HTTP request. + Prefix string +} + +// NewKeyCredentialPolicy creates a new instance of [KeyCredentialPolicy]. +// - cred is the [azcore.KeyCredential] used to authenticate with the service +// - header is the name of the HTTP request header in which the key is placed +// - options contains optional configuration, pass nil to accept the default values +func NewKeyCredentialPolicy(cred *exported.KeyCredential, header string, options *KeyCredentialPolicyOptions) *KeyCredentialPolicy { + if options == nil { + options = &KeyCredentialPolicyOptions{} + } + return &KeyCredentialPolicy{ + cred: cred, + header: header, + prefix: options.Prefix, + allowHTTP: options.InsecureAllowCredentialWithHTTP, + } +} + +// Do implementes the Do method on the [policy.Polilcy] interface. +func (k *KeyCredentialPolicy) Do(req *policy.Request) (*http.Response, error) { + // skip adding the authorization header if no KeyCredential was provided. + // this prevents a panic that might be hard to diagnose and allows testing + // against http endpoints that don't require authentication. + if k.cred != nil { + if err := checkHTTPSForAuth(req, k.allowHTTP); err != nil { + return nil, err + } + val := exported.KeyCredentialGet(k.cred) + if k.prefix != "" { + val = k.prefix + val + } + req.Raw().Header.Add(k.header, val) + } + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go new file mode 100644 index 000000000..f048d7fb5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go @@ -0,0 +1,264 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/diag" +) + +type logPolicy struct { + includeBody bool + allowedHeaders map[string]struct{} + allowedQP map[string]struct{} +} + +// NewLogPolicy creates a request/response logging policy object configured using the specified options. +// Pass nil to accept the default values; this is the same as passing a zero-value options. +func NewLogPolicy(o *policy.LogOptions) policy.Policy { + if o == nil { + o = &policy.LogOptions{} + } + // construct default hash set of allowed headers + allowedHeaders := map[string]struct{}{ + "accept": {}, + "cache-control": {}, + "connection": {}, + "content-length": {}, + "content-type": {}, + "date": {}, + "etag": {}, + "expires": {}, + "if-match": {}, + "if-modified-since": {}, + "if-none-match": {}, + "if-unmodified-since": {}, + "last-modified": {}, + "ms-cv": {}, + "pragma": {}, + "request-id": {}, + "retry-after": {}, + "server": {}, + "traceparent": {}, + "transfer-encoding": {}, + "user-agent": {}, + "www-authenticate": {}, + "x-ms-request-id": {}, + "x-ms-client-request-id": {}, + "x-ms-return-client-request-id": {}, + } + // add any caller-specified allowed headers to the set + for _, ah := range o.AllowedHeaders { + allowedHeaders[strings.ToLower(ah)] = struct{}{} + } + // now do the same thing for query params + allowedQP := getAllowedQueryParams(o.AllowedQueryParams) + return &logPolicy{ + includeBody: o.IncludeBody, + allowedHeaders: allowedHeaders, + allowedQP: allowedQP, + } +} + +// getAllowedQueryParams merges the default set of allowed query parameters +// with a custom set (usually comes from client options). +func getAllowedQueryParams(customAllowedQP []string) map[string]struct{} { + allowedQP := map[string]struct{}{ + "api-version": {}, + } + for _, qp := range customAllowedQP { + allowedQP[strings.ToLower(qp)] = struct{}{} + } + return allowedQP +} + +// logPolicyOpValues is the struct containing the per-operation values +type logPolicyOpValues struct { + try int32 + start time.Time +} + +func (p *logPolicy) Do(req *policy.Request) (*http.Response, error) { + // Get the per-operation values. These are saved in the Message's map so that they persist across each retry calling into this policy object. + var opValues logPolicyOpValues + if req.OperationValue(&opValues); opValues.start.IsZero() { + opValues.start = time.Now() // If this is the 1st try, record this operation's start time + } + opValues.try++ // The first try is #1 (not #0) + req.SetOperationValue(opValues) + + // Log the outgoing request as informational + if log.Should(log.EventRequest) { + b := &bytes.Buffer{} + fmt.Fprintf(b, "==> OUTGOING REQUEST (Try=%d)\n", opValues.try) + p.writeRequestWithResponse(b, req, nil, nil) + var err error + if p.includeBody { + err = writeReqBody(req, b) + } + log.Write(log.EventRequest, b.String()) + if err != nil { + return nil, err + } + } + + // Set the time for this particular retry operation and then Do the operation. + tryStart := time.Now() + response, err := req.Next() // Make the request + tryEnd := time.Now() + tryDuration := tryEnd.Sub(tryStart) + opDuration := tryEnd.Sub(opValues.start) + + if log.Should(log.EventResponse) { + // We're going to log this; build the string to log + b := &bytes.Buffer{} + fmt.Fprintf(b, "==> REQUEST/RESPONSE (Try=%d/%v, OpTime=%v) -- ", opValues.try, tryDuration, opDuration) + if err != nil { // This HTTP request did not get a response from the service + fmt.Fprint(b, "REQUEST ERROR\n") + } else { + fmt.Fprint(b, "RESPONSE RECEIVED\n") + } + + p.writeRequestWithResponse(b, req, response, err) + if err != nil { + // skip frames runtime.Callers() and runtime.StackTrace() + b.WriteString(diag.StackTrace(2, 32)) + } else if p.includeBody { + err = writeRespBody(response, b) + } + log.Write(log.EventResponse, b.String()) + } + return response, err +} + +const redactedValue = "REDACTED" + +// getSanitizedURL returns a sanitized string for the provided url.URL +func getSanitizedURL(u url.URL, allowedQueryParams map[string]struct{}) string { + // redact applicable query params + qp := u.Query() + for k := range qp { + if _, ok := allowedQueryParams[strings.ToLower(k)]; !ok { + qp.Set(k, redactedValue) + } + } + u.RawQuery = qp.Encode() + return u.String() +} + +// writeRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are +// not nil, then these are also written into the Buffer. +func (p *logPolicy) writeRequestWithResponse(b *bytes.Buffer, req *policy.Request, resp *http.Response, err error) { + // Write the request into the buffer. + fmt.Fprint(b, " "+req.Raw().Method+" "+getSanitizedURL(*req.Raw().URL, p.allowedQP)+"\n") + p.writeHeader(b, req.Raw().Header) + if resp != nil { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprint(b, " RESPONSE Status: "+resp.Status+"\n") + p.writeHeader(b, resp.Header) + } + if err != nil { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprint(b, " ERROR:\n"+err.Error()+"\n") + } +} + +// formatHeaders appends an HTTP request's or response's header into a Buffer. +func (p *logPolicy) writeHeader(b *bytes.Buffer, header http.Header) { + if len(header) == 0 { + b.WriteString(" (no headers)\n") + return + } + keys := make([]string, 0, len(header)) + // Alphabetize the headers + for k := range header { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + // don't use Get() as it will canonicalize k which might cause a mismatch + value := header[k][0] + // redact all header values not in the allow-list + if _, ok := p.allowedHeaders[strings.ToLower(k)]; !ok { + value = redactedValue + } + fmt.Fprintf(b, " %s: %+v\n", k, value) + } +} + +// returns true if the request/response body should be logged. +// this is determined by looking at the content-type header value. +func shouldLogBody(b *bytes.Buffer, contentType string) bool { + contentType = strings.ToLower(contentType) + if strings.HasPrefix(contentType, "text") || + strings.Contains(contentType, "json") || + strings.Contains(contentType, "xml") { + return true + } + fmt.Fprintf(b, " Skip logging body for %s\n", contentType) + return false +} + +// writes to a buffer, used for logging purposes +func writeReqBody(req *policy.Request, b *bytes.Buffer) error { + if req.Raw().Body == nil { + fmt.Fprint(b, " Request contained no body\n") + return nil + } + if ct := req.Raw().Header.Get(shared.HeaderContentType); !shouldLogBody(b, ct) { + return nil + } + body, err := io.ReadAll(req.Raw().Body) + if err != nil { + fmt.Fprintf(b, " Failed to read request body: %s\n", err.Error()) + return err + } + if err := req.RewindBody(); err != nil { + return err + } + logBody(b, body) + return nil +} + +// writes to a buffer, used for logging purposes +func writeRespBody(resp *http.Response, b *bytes.Buffer) error { + ct := resp.Header.Get(shared.HeaderContentType) + if ct == "" { + fmt.Fprint(b, " Response contained no body\n") + return nil + } else if !shouldLogBody(b, ct) { + return nil + } + body, err := Payload(resp) + if err != nil { + fmt.Fprintf(b, " Failed to read response body: %s\n", err.Error()) + return err + } + if len(body) > 0 { + logBody(b, body) + } else { + fmt.Fprint(b, " Response contained no body\n") + } + return nil +} + +func logBody(b *bytes.Buffer, body []byte) { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprintln(b, string(body)) + fmt.Fprintln(b, " --------------------------------------------------------------------------------") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go new file mode 100644 index 000000000..360a7f211 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go @@ -0,0 +1,34 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" +) + +type requestIDPolicy struct{} + +// NewRequestIDPolicy returns a policy that add the x-ms-client-request-id header +func NewRequestIDPolicy() policy.Policy { + return &requestIDPolicy{} +} + +func (r *requestIDPolicy) Do(req *policy.Request) (*http.Response, error) { + if req.Raw().Header.Get(shared.HeaderXMSClientRequestID) == "" { + id, err := uuid.New() + if err != nil { + return nil, err + } + req.Raw().Header.Set(shared.HeaderXMSClientRequestID, id.String()) + } + + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go new file mode 100644 index 000000000..e15eea824 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go @@ -0,0 +1,256 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "errors" + "io" + "math" + "math/rand" + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" + "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" +) + +const ( + defaultMaxRetries = 3 +) + +func setDefaults(o *policy.RetryOptions) { + if o.MaxRetries == 0 { + o.MaxRetries = defaultMaxRetries + } else if o.MaxRetries < 0 { + o.MaxRetries = 0 + } + + // SDK guidelines specify the default MaxRetryDelay is 60 seconds + if o.MaxRetryDelay == 0 { + o.MaxRetryDelay = 60 * time.Second + } else if o.MaxRetryDelay < 0 { + // not really an unlimited cap, but sufficiently large enough to be considered as such + o.MaxRetryDelay = math.MaxInt64 + } + if o.RetryDelay == 0 { + o.RetryDelay = 800 * time.Millisecond + } else if o.RetryDelay < 0 { + o.RetryDelay = 0 + } + if o.StatusCodes == nil { + // NOTE: if you change this list, you MUST update the docs in policy/policy.go + o.StatusCodes = []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } + } +} + +func calcDelay(o policy.RetryOptions, try int32) time.Duration { // try is >=1; never 0 + delay := time.Duration((1< o.MaxRetryDelay { + delay = o.MaxRetryDelay + } + return delay +} + +// NewRetryPolicy creates a policy object configured using the specified options. +// Pass nil to accept the default values; this is the same as passing a zero-value options. +func NewRetryPolicy(o *policy.RetryOptions) policy.Policy { + if o == nil { + o = &policy.RetryOptions{} + } + p := &retryPolicy{options: *o} + return p +} + +type retryPolicy struct { + options policy.RetryOptions +} + +func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) { + options := p.options + // check if the retry options have been overridden for this call + if override := req.Raw().Context().Value(shared.CtxWithRetryOptionsKey{}); override != nil { + options = override.(policy.RetryOptions) + } + setDefaults(&options) + // Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) + // When to retry: connection failure or temporary/timeout. + var rwbody *retryableRequestBody + if req.Body() != nil { + // wrap the body so we control when it's actually closed. + // do this outside the for loop so defers don't accumulate. + rwbody = &retryableRequestBody{body: req.Body()} + defer rwbody.realClose() + } + try := int32(1) + for { + resp = nil // reset + // unfortunately we don't have access to the custom allow-list of query params, so we'll redact everything but the default allowed QPs + log.Writef(log.EventRetryPolicy, "=====> Try=%d for %s %s", try, req.Raw().Method, getSanitizedURL(*req.Raw().URL, getAllowedQueryParams(nil))) + + // For each try, seek to the beginning of the Body stream. We do this even for the 1st try because + // the stream may not be at offset 0 when we first get it and we want the same behavior for the + // 1st try as for additional tries. + err = req.RewindBody() + if err != nil { + return + } + // RewindBody() restores Raw().Body to its original state, so set our rewindable after + if rwbody != nil { + req.Raw().Body = rwbody + } + + if options.TryTimeout == 0 { + clone := req.Clone(req.Raw().Context()) + resp, err = clone.Next() + } else { + // Set the per-try time for this particular retry operation and then Do the operation. + tryCtx, tryCancel := context.WithTimeout(req.Raw().Context(), options.TryTimeout) + clone := req.Clone(tryCtx) + resp, err = clone.Next() // Make the request + // if the body was already downloaded or there was an error it's safe to cancel the context now + if err != nil { + tryCancel() + } else if exported.PayloadDownloaded(resp) { + tryCancel() + } else { + // must cancel the context after the body has been read and closed + resp.Body = &contextCancelReadCloser{cf: tryCancel, body: resp.Body} + } + } + if err == nil { + log.Writef(log.EventRetryPolicy, "response %d", resp.StatusCode) + } else { + log.Writef(log.EventRetryPolicy, "error %v", err) + } + + if ctxErr := req.Raw().Context().Err(); ctxErr != nil { + // don't retry if the parent context has been cancelled or its deadline exceeded + err = ctxErr + log.Writef(log.EventRetryPolicy, "abort due to %v", err) + return + } + + // check if the error is not retriable + var nre errorinfo.NonRetriable + if errors.As(err, &nre) { + // the error says it's not retriable so don't retry + log.Writef(log.EventRetryPolicy, "non-retriable error %T", nre) + return + } + + if options.ShouldRetry != nil { + // a non-nil ShouldRetry overrides our HTTP status code check + if !options.ShouldRetry(resp, err) { + // predicate says we shouldn't retry + log.Write(log.EventRetryPolicy, "exit due to ShouldRetry") + return + } + } else if err == nil && !HasStatusCode(resp, options.StatusCodes...) { + // if there is no error and the response code isn't in the list of retry codes then we're done. + log.Write(log.EventRetryPolicy, "exit due to non-retriable status code") + return + } + + if try == options.MaxRetries+1 { + // max number of tries has been reached, don't sleep again + log.Writef(log.EventRetryPolicy, "MaxRetries %d exceeded", options.MaxRetries) + return + } + + // use the delay from retry-after if available + delay := shared.RetryAfter(resp) + if delay <= 0 { + delay = calcDelay(options, try) + } else if delay > options.MaxRetryDelay { + // the retry-after delay exceeds the the cap so don't retry + log.Writef(log.EventRetryPolicy, "Retry-After delay %s exceeds MaxRetryDelay of %s", delay, options.MaxRetryDelay) + return + } + + // drain before retrying so nothing is leaked + Drain(resp) + + log.Writef(log.EventRetryPolicy, "End Try #%d, Delay=%v", try, delay) + select { + case <-time.After(delay): + try++ + case <-req.Raw().Context().Done(): + err = req.Raw().Context().Err() + log.Writef(log.EventRetryPolicy, "abort due to %v", err) + return + } + } +} + +// WithRetryOptions adds the specified RetryOptions to the parent context. +// Use this to specify custom RetryOptions at the API-call level. +// Deprecated: use [policy.WithRetryOptions] instead. +func WithRetryOptions(parent context.Context, options policy.RetryOptions) context.Context { + return policy.WithRetryOptions(parent, options) +} + +// ********** The following type/methods implement the retryableRequestBody (a ReadSeekCloser) + +// This struct is used when sending a body to the network +type retryableRequestBody struct { + body io.ReadSeeker // Seeking is required to support retries +} + +// Read reads a block of data from an inner stream and reports progress +func (b *retryableRequestBody) Read(p []byte) (n int, err error) { + return b.body.Read(p) +} + +func (b *retryableRequestBody) Seek(offset int64, whence int) (offsetFromStart int64, err error) { + return b.body.Seek(offset, whence) +} + +func (b *retryableRequestBody) Close() error { + // We don't want the underlying transport to close the request body on transient failures so this is a nop. + // The retry policy closes the request body upon success. + return nil +} + +func (b *retryableRequestBody) realClose() error { + if c, ok := b.body.(io.Closer); ok { + return c.Close() + } + return nil +} + +// ********** The following type/methods implement the contextCancelReadCloser + +// contextCancelReadCloser combines an io.ReadCloser with a cancel func. +// it ensures the cancel func is invoked once the body has been read and closed. +type contextCancelReadCloser struct { + cf context.CancelFunc + body io.ReadCloser +} + +func (rc *contextCancelReadCloser) Read(p []byte) (n int, err error) { + return rc.body.Read(p) +} + +func (rc *contextCancelReadCloser) Close() error { + err := rc.body.Close() + rc.cf() + return err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go new file mode 100644 index 000000000..3964beea8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go @@ -0,0 +1,55 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// SASCredentialPolicy authorizes requests with a [azcore.SASCredential]. +type SASCredentialPolicy struct { + cred *exported.SASCredential + header string + allowHTTP bool +} + +// SASCredentialPolicyOptions contains the optional values configuring [SASCredentialPolicy]. +type SASCredentialPolicyOptions struct { + // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP. + // By default, authenticated requests to an HTTP endpoint are rejected by the client. + // WARNING: setting this to true will allow sending the authentication key in clear text. Use with caution. + InsecureAllowCredentialWithHTTP bool +} + +// NewSASCredentialPolicy creates a new instance of [SASCredentialPolicy]. +// - cred is the [azcore.SASCredential] used to authenticate with the service +// - header is the name of the HTTP request header in which the shared access signature is placed +// - options contains optional configuration, pass nil to accept the default values +func NewSASCredentialPolicy(cred *exported.SASCredential, header string, options *SASCredentialPolicyOptions) *SASCredentialPolicy { + if options == nil { + options = &SASCredentialPolicyOptions{} + } + return &SASCredentialPolicy{ + cred: cred, + header: header, + allowHTTP: options.InsecureAllowCredentialWithHTTP, + } +} + +// Do implementes the Do method on the [policy.Polilcy] interface. +func (k *SASCredentialPolicy) Do(req *policy.Request) (*http.Response, error) { + // skip adding the authorization header if no SASCredential was provided. + // this prevents a panic that might be hard to diagnose and allows testing + // against http endpoints that don't require authentication. + if k.cred != nil { + if err := checkHTTPSForAuth(req, k.allowHTTP); err != nil { + return nil, err + } + req.Raw().Header.Add(k.header, exported.SASCredentialGet(k.cred)) + } + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go new file mode 100644 index 000000000..80a903546 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go @@ -0,0 +1,83 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "fmt" + "net/http" + "os" + "runtime" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +type telemetryPolicy struct { + telemetryValue string +} + +// NewTelemetryPolicy creates a telemetry policy object that adds telemetry information to outgoing HTTP requests. +// The format is [ ]azsdk-go-/ . +// Pass nil to accept the default values; this is the same as passing a zero-value options. +func NewTelemetryPolicy(mod, ver string, o *policy.TelemetryOptions) policy.Policy { + if o == nil { + o = &policy.TelemetryOptions{} + } + tp := telemetryPolicy{} + if o.Disabled { + return &tp + } + b := &bytes.Buffer{} + // normalize ApplicationID + if o.ApplicationID != "" { + o.ApplicationID = strings.ReplaceAll(o.ApplicationID, " ", "/") + if len(o.ApplicationID) > 24 { + o.ApplicationID = o.ApplicationID[:24] + } + b.WriteString(o.ApplicationID) + b.WriteRune(' ') + } + // mod might be the fully qualified name. in that case, we just want the package name + if i := strings.LastIndex(mod, "/"); i > -1 { + mod = mod[i+1:] + } + b.WriteString(formatTelemetry(mod, ver)) + b.WriteRune(' ') + b.WriteString(platformInfo) + tp.telemetryValue = b.String() + return &tp +} + +func formatTelemetry(comp, ver string) string { + return fmt.Sprintf("azsdk-go-%s/%s", comp, ver) +} + +func (p telemetryPolicy) Do(req *policy.Request) (*http.Response, error) { + if p.telemetryValue == "" { + return req.Next() + } + // preserve the existing User-Agent string + if ua := req.Raw().Header.Get(shared.HeaderUserAgent); ua != "" { + p.telemetryValue = fmt.Sprintf("%s %s", p.telemetryValue, ua) + } + req.Raw().Header.Set(shared.HeaderUserAgent, p.telemetryValue) + return req.Next() +} + +// NOTE: the ONLY function that should write to this variable is this func +var platformInfo = func() string { + operatingSystem := runtime.GOOS // Default OS string + switch operatingSystem { + case "windows": + operatingSystem = os.Getenv("OS") // Get more specific OS information + case "linux": // accept default OS info + case "freebsd": // accept default OS info + } + return fmt.Sprintf("(%s; %s)", runtime.Version(), operatingSystem) +}() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go new file mode 100644 index 000000000..03f76c9aa --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go @@ -0,0 +1,389 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "encoding/json" + "errors" + "flag" + "fmt" + "net/http" + "reflect" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// FinalStateVia is the enumerated type for the possible final-state-via values. +type FinalStateVia = pollers.FinalStateVia + +const ( + // FinalStateViaAzureAsyncOp indicates the final payload comes from the Azure-AsyncOperation URL. + FinalStateViaAzureAsyncOp = pollers.FinalStateViaAzureAsyncOp + + // FinalStateViaLocation indicates the final payload comes from the Location URL. + FinalStateViaLocation = pollers.FinalStateViaLocation + + // FinalStateViaOriginalURI indicates the final payload comes from the original URL. + FinalStateViaOriginalURI = pollers.FinalStateViaOriginalURI + + // FinalStateViaOpLocation indicates the final payload comes from the Operation-Location URL. + FinalStateViaOpLocation = pollers.FinalStateViaOpLocation +) + +// NewPollerOptions contains the optional parameters for NewPoller. +type NewPollerOptions[T any] struct { + // FinalStateVia contains the final-state-via value for the LRO. + FinalStateVia FinalStateVia + + // Response contains a preconstructed response type. + // The final payload will be unmarshaled into it and returned. + Response *T + + // Handler[T] contains a custom polling implementation. + Handler PollingHandler[T] + + // Tracer contains the Tracer from the client that's creating the Poller. + Tracer tracing.Tracer +} + +// NewPoller creates a Poller based on the provided initial response. +func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPollerOptions[T]) (*Poller[T], error) { + if options == nil { + options = &NewPollerOptions[T]{} + } + result := options.Response + if result == nil { + result = new(T) + } + if options.Handler != nil { + return &Poller[T]{ + op: options.Handler, + resp: resp, + result: result, + tracer: options.Tracer, + }, nil + } + + defer resp.Body.Close() + // this is a back-stop in case the swagger is incorrect (i.e. missing one or more status codes for success). + // ideally the codegen should return an error if the initial response failed and not even create a poller. + if !poller.StatusCodeValid(resp) { + return nil, errors.New("the operation failed or was cancelled") + } + + // determine the polling method + var opr PollingHandler[T] + var err error + if fake.Applicable(resp) { + opr, err = fake.New[T](pl, resp) + } else if async.Applicable(resp) { + // async poller must be checked first as it can also have a location header + opr, err = async.New[T](pl, resp, options.FinalStateVia) + } else if op.Applicable(resp) { + // op poller must be checked before loc as it can also have a location header + opr, err = op.New[T](pl, resp, options.FinalStateVia) + } else if loc.Applicable(resp) { + opr, err = loc.New[T](pl, resp) + } else if body.Applicable(resp) { + // must test body poller last as it's a subset of the other pollers. + // TODO: this is ambiguous for PATCH/PUT if it returns a 200 with no polling headers (sync completion) + opr, err = body.New[T](pl, resp) + } else if m := resp.Request.Method; resp.StatusCode == http.StatusAccepted && (m == http.MethodDelete || m == http.MethodPost) { + // if we get here it means we have a 202 with no polling headers. + // for DELETE and POST this is a hard error per ARM RPC spec. + return nil, errors.New("response is missing polling URL") + } else { + opr, err = pollers.NewNopPoller[T](resp) + } + + if err != nil { + return nil, err + } + return &Poller[T]{ + op: opr, + resp: resp, + result: result, + tracer: options.Tracer, + }, nil +} + +// NewPollerFromResumeTokenOptions contains the optional parameters for NewPollerFromResumeToken. +type NewPollerFromResumeTokenOptions[T any] struct { + // Response contains a preconstructed response type. + // The final payload will be unmarshaled into it and returned. + Response *T + + // Handler[T] contains a custom polling implementation. + Handler PollingHandler[T] + + // Tracer contains the Tracer from the client that's creating the Poller. + Tracer tracing.Tracer +} + +// NewPollerFromResumeToken creates a Poller from a resume token string. +func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options *NewPollerFromResumeTokenOptions[T]) (*Poller[T], error) { + if options == nil { + options = &NewPollerFromResumeTokenOptions[T]{} + } + result := options.Response + if result == nil { + result = new(T) + } + + if err := pollers.IsTokenValid[T](token); err != nil { + return nil, err + } + raw, err := pollers.ExtractToken(token) + if err != nil { + return nil, err + } + var asJSON map[string]any + if err := json.Unmarshal(raw, &asJSON); err != nil { + return nil, err + } + + opr := options.Handler + // now rehydrate the poller based on the encoded poller type + if fake.CanResume(asJSON) { + opr, _ = fake.New[T](pl, nil) + } else if opr != nil { + log.Writef(log.EventLRO, "Resuming custom poller %T.", opr) + } else if async.CanResume(asJSON) { + opr, _ = async.New[T](pl, nil, "") + } else if body.CanResume(asJSON) { + opr, _ = body.New[T](pl, nil) + } else if loc.CanResume(asJSON) { + opr, _ = loc.New[T](pl, nil) + } else if op.CanResume(asJSON) { + opr, _ = op.New[T](pl, nil, "") + } else { + return nil, fmt.Errorf("unhandled poller token %s", string(raw)) + } + if err := json.Unmarshal(raw, &opr); err != nil { + return nil, err + } + return &Poller[T]{ + op: opr, + result: result, + tracer: options.Tracer, + }, nil +} + +// PollingHandler[T] abstracts the differences among poller implementations. +type PollingHandler[T any] interface { + // Done returns true if the LRO has reached a terminal state. + Done() bool + + // Poll fetches the latest state of the LRO. + Poll(context.Context) (*http.Response, error) + + // Result is called once the LRO has reached a terminal state. It populates the out parameter + // with the result of the operation. + Result(ctx context.Context, out *T) error +} + +// Poller encapsulates a long-running operation, providing polling facilities until the operation reaches a terminal state. +type Poller[T any] struct { + op PollingHandler[T] + resp *http.Response + err error + result *T + tracer tracing.Tracer + done bool +} + +// PollUntilDoneOptions contains the optional values for the Poller[T].PollUntilDone() method. +type PollUntilDoneOptions struct { + // Frequency is the time to wait between polling intervals in absence of a Retry-After header. Allowed minimum is one second. + // Pass zero to accept the default value (30s). + Frequency time.Duration +} + +// PollUntilDone will poll the service endpoint until a terminal state is reached, an error is received, or the context expires. +// It internally uses Poll(), Done(), and Result() in its polling loop, sleeping for the specified duration between intervals. +// options: pass nil to accept the default values. +// NOTE: the default polling frequency is 30 seconds which works well for most operations. However, some operations might +// benefit from a shorter or longer duration. +func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOptions) (res T, err error) { + if options == nil { + options = &PollUntilDoneOptions{} + } + cp := *options + if cp.Frequency == 0 { + cp.Frequency = 30 * time.Second + } + + ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.PollUntilDone", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil) + defer func() { endSpan(err) }() + + // skip the floor check when executing tests so they don't take so long + if isTest := flag.Lookup("test.v"); isTest == nil && cp.Frequency < time.Second { + err = errors.New("polling frequency minimum is one second") + return + } + + start := time.Now() + logPollUntilDoneExit := func(v any) { + log.Writef(log.EventLRO, "END PollUntilDone() for %T: %v, total time: %s", p.op, v, time.Since(start)) + } + log.Writef(log.EventLRO, "BEGIN PollUntilDone() for %T", p.op) + if p.resp != nil { + // initial check for a retry-after header existing on the initial response + if retryAfter := shared.RetryAfter(p.resp); retryAfter > 0 { + log.Writef(log.EventLRO, "initial Retry-After delay for %s", retryAfter.String()) + if err = shared.Delay(ctx, retryAfter); err != nil { + logPollUntilDoneExit(err) + return + } + } + } + // begin polling the endpoint until a terminal state is reached + for { + var resp *http.Response + resp, err = p.Poll(ctx) + if err != nil { + logPollUntilDoneExit(err) + return + } + if p.Done() { + logPollUntilDoneExit("succeeded") + res, err = p.Result(ctx) + return + } + d := cp.Frequency + if retryAfter := shared.RetryAfter(resp); retryAfter > 0 { + log.Writef(log.EventLRO, "Retry-After delay for %s", retryAfter.String()) + d = retryAfter + } else { + log.Writef(log.EventLRO, "delay for %s", d.String()) + } + if err = shared.Delay(ctx, d); err != nil { + logPollUntilDoneExit(err) + return + } + } +} + +// Poll fetches the latest state of the LRO. It returns an HTTP response or error. +// If Poll succeeds, the poller's state is updated and the HTTP response is returned. +// If Poll fails, the poller's state is unmodified and the error is returned. +// Calling Poll on an LRO that has reached a terminal state will return the last HTTP response. +func (p *Poller[T]) Poll(ctx context.Context) (resp *http.Response, err error) { + if p.Done() { + // the LRO has reached a terminal state, don't poll again + resp = p.resp + return + } + + ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.Poll", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil) + defer func() { endSpan(err) }() + + resp, err = p.op.Poll(ctx) + if err != nil { + return + } + p.resp = resp + return +} + +// Done returns true if the LRO has reached a terminal state. +// Once a terminal state is reached, call Result(). +func (p *Poller[T]) Done() bool { + return p.op.Done() +} + +// Result returns the result of the LRO and is meant to be used in conjunction with Poll and Done. +// If the LRO completed successfully, a populated instance of T is returned. +// If the LRO failed or was canceled, an *azcore.ResponseError error is returned. +// Calling this on an LRO in a non-terminal state will return an error. +func (p *Poller[T]) Result(ctx context.Context) (res T, err error) { + if !p.Done() { + err = errors.New("poller is in a non-terminal state") + return + } + if p.done { + // the result has already been retrieved, return the cached value + if p.err != nil { + err = p.err + return + } + res = *p.result + return + } + + ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.Result", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil) + defer func() { endSpan(err) }() + + err = p.op.Result(ctx, p.result) + var respErr *exported.ResponseError + if errors.As(err, &respErr) { + if pollers.IsNonTerminalHTTPStatusCode(respErr.RawResponse) { + // the request failed in a non-terminal way. + // don't cache the error or mark the Poller as done + return + } + // the LRO failed. record the error + p.err = err + } else if err != nil { + // the call to Result failed, don't cache anything in this case + return + } + p.done = true + if p.err != nil { + err = p.err + return + } + res = *p.result + return +} + +// ResumeToken returns a value representing the poller that can be used to resume +// the LRO at a later time. ResumeTokens are unique per service operation. +// The token's format should be considered opaque and is subject to change. +// Calling this on an LRO in a terminal state will return an error. +func (p *Poller[T]) ResumeToken() (string, error) { + if p.Done() { + return "", errors.New("poller is in a terminal state") + } + tk, err := pollers.NewResumeToken[T](p.op) + if err != nil { + return "", err + } + return tk, err +} + +// extracts the type name from the string returned from reflect.Value.Name() +func shortenTypeName(s string) string { + // the value is formatted as follows + // Poller[module/Package.Type].Method + // we want to shorten the generic type parameter string to Type + // anything we don't recognize will be left as-is + begin := strings.Index(s, "[") + end := strings.Index(s, "]") + if begin == -1 || end == -1 { + return s + } + + typeName := s[begin+1 : end] + if i := strings.LastIndex(typeName, "."); i > -1 { + typeName = typeName[i+1:] + } + return s[:begin+1] + typeName + s[end:] +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go new file mode 100644 index 000000000..7d34b7803 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go @@ -0,0 +1,281 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "context" + "encoding/json" + "encoding/xml" + "errors" + "fmt" + "io" + "mime/multipart" + "net/http" + "net/textproto" + "net/url" + "path" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" +) + +// Base64Encoding is usesd to specify which base-64 encoder/decoder to use when +// encoding/decoding a slice of bytes to/from a string. +type Base64Encoding = exported.Base64Encoding + +const ( + // Base64StdFormat uses base64.StdEncoding for encoding and decoding payloads. + Base64StdFormat Base64Encoding = exported.Base64StdFormat + + // Base64URLFormat uses base64.RawURLEncoding for encoding and decoding payloads. + Base64URLFormat Base64Encoding = exported.Base64URLFormat +) + +// NewRequest creates a new policy.Request with the specified input. +// The endpoint MUST be properly encoded before calling this function. +func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*policy.Request, error) { + return exported.NewRequest(ctx, httpMethod, endpoint) +} + +// NewRequestFromRequest creates a new policy.Request with an existing *http.Request +func NewRequestFromRequest(req *http.Request) (*policy.Request, error) { + return exported.NewRequestFromRequest(req) +} + +// EncodeQueryParams will parse and encode any query parameters in the specified URL. +// Any semicolons will automatically be escaped. +func EncodeQueryParams(u string) (string, error) { + before, after, found := strings.Cut(u, "?") + if !found { + return u, nil + } + // starting in Go 1.17, url.ParseQuery will reject semicolons in query params. + // so, we must escape them first. note that this assumes that semicolons aren't + // being used as query param separators which is per the current RFC. + // for more info: + // https://github.com/golang/go/issues/25192 + // https://github.com/golang/go/issues/50034 + qp, err := url.ParseQuery(strings.ReplaceAll(after, ";", "%3B")) + if err != nil { + return "", err + } + return before + "?" + qp.Encode(), nil +} + +// JoinPaths concatenates multiple URL path segments into one path, +// inserting path separation characters as required. JoinPaths will preserve +// query parameters in the root path +func JoinPaths(root string, paths ...string) string { + if len(paths) == 0 { + return root + } + + qps := "" + if strings.Contains(root, "?") { + splitPath := strings.Split(root, "?") + root, qps = splitPath[0], splitPath[1] + } + + p := path.Join(paths...) + // path.Join will remove any trailing slashes. + // if one was provided, preserve it. + if strings.HasSuffix(paths[len(paths)-1], "/") && !strings.HasSuffix(p, "/") { + p += "/" + } + + if qps != "" { + p = p + "?" + qps + } + + if strings.HasSuffix(root, "/") && strings.HasPrefix(p, "/") { + root = root[:len(root)-1] + } else if !strings.HasSuffix(root, "/") && !strings.HasPrefix(p, "/") { + p = "/" + p + } + return root + p +} + +// EncodeByteArray will base-64 encode the byte slice v. +func EncodeByteArray(v []byte, format Base64Encoding) string { + return exported.EncodeByteArray(v, format) +} + +// MarshalAsByteArray will base-64 encode the byte slice v, then calls SetBody. +// The encoded value is treated as a JSON string. +func MarshalAsByteArray(req *policy.Request, v []byte, format Base64Encoding) error { + // send as a JSON string + encode := fmt.Sprintf("\"%s\"", EncodeByteArray(v, format)) + // tsp generated code can set Content-Type so we must prefer that + return exported.SetBody(req, exported.NopCloser(strings.NewReader(encode)), shared.ContentTypeAppJSON, false) +} + +// MarshalAsJSON calls json.Marshal() to get the JSON encoding of v then calls SetBody. +func MarshalAsJSON(req *policy.Request, v any) error { + b, err := json.Marshal(v) + if err != nil { + return fmt.Errorf("error marshalling type %T: %s", v, err) + } + // tsp generated code can set Content-Type so we must prefer that + return exported.SetBody(req, exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppJSON, false) +} + +// MarshalAsXML calls xml.Marshal() to get the XML encoding of v then calls SetBody. +func MarshalAsXML(req *policy.Request, v any) error { + b, err := xml.Marshal(v) + if err != nil { + return fmt.Errorf("error marshalling type %T: %s", v, err) + } + // inclue the XML header as some services require it + b = []byte(xml.Header + string(b)) + return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppXML) +} + +// SetMultipartFormData writes the specified keys/values as multi-part form fields with the specified value. +// File content must be specified as an [io.ReadSeekCloser] or [streaming.MultipartContent]. +// Byte slices will be treated as JSON. All other values are treated as string values. +func SetMultipartFormData(req *policy.Request, formData map[string]any) error { + body := bytes.Buffer{} + writer := multipart.NewWriter(&body) + + writeContent := func(fieldname, filename string, src io.Reader) error { + fd, err := writer.CreateFormFile(fieldname, filename) + if err != nil { + return err + } + // copy the data to the form file + if _, err = io.Copy(fd, src); err != nil { + return err + } + return nil + } + + quoteEscaper := strings.NewReplacer("\\", "\\\\", `"`, "\\\"") + + writeMultipartContent := func(fieldname string, mpc streaming.MultipartContent) error { + if mpc.Body == nil { + return errors.New("streaming.MultipartContent.Body cannot be nil") + } + + // use fieldname for the file name when unspecified + filename := fieldname + + if mpc.ContentType == "" && mpc.Filename == "" { + return writeContent(fieldname, filename, mpc.Body) + } + if mpc.Filename != "" { + filename = mpc.Filename + } + // this is pretty much copied from multipart.Writer.CreateFormFile + // but lets us set the caller provided Content-Type and filename + h := make(textproto.MIMEHeader) + h.Set("Content-Disposition", + fmt.Sprintf(`form-data; name="%s"; filename="%s"`, + quoteEscaper.Replace(fieldname), quoteEscaper.Replace(filename))) + contentType := "application/octet-stream" + if mpc.ContentType != "" { + contentType = mpc.ContentType + } + h.Set("Content-Type", contentType) + fd, err := writer.CreatePart(h) + if err != nil { + return err + } + // copy the data to the form file + if _, err = io.Copy(fd, mpc.Body); err != nil { + return err + } + return nil + } + + // the same as multipart.Writer.WriteField but lets us specify the Content-Type + writeField := func(fieldname, contentType string, value string) error { + h := make(textproto.MIMEHeader) + h.Set("Content-Disposition", + fmt.Sprintf(`form-data; name="%s"`, quoteEscaper.Replace(fieldname))) + h.Set("Content-Type", contentType) + fd, err := writer.CreatePart(h) + if err != nil { + return err + } + if _, err = fd.Write([]byte(value)); err != nil { + return err + } + return nil + } + + for k, v := range formData { + if rsc, ok := v.(io.ReadSeekCloser); ok { + if err := writeContent(k, k, rsc); err != nil { + return err + } + continue + } else if rscs, ok := v.([]io.ReadSeekCloser); ok { + for _, rsc := range rscs { + if err := writeContent(k, k, rsc); err != nil { + return err + } + } + continue + } else if mpc, ok := v.(streaming.MultipartContent); ok { + if err := writeMultipartContent(k, mpc); err != nil { + return err + } + continue + } else if mpcs, ok := v.([]streaming.MultipartContent); ok { + for _, mpc := range mpcs { + if err := writeMultipartContent(k, mpc); err != nil { + return err + } + } + continue + } + + var content string + contentType := shared.ContentTypeTextPlain + switch tt := v.(type) { + case []byte: + // JSON, don't quote it + content = string(tt) + contentType = shared.ContentTypeAppJSON + case string: + content = tt + default: + // ensure the value is in string format + content = fmt.Sprintf("%v", v) + } + + if err := writeField(k, contentType, content); err != nil { + return err + } + } + if err := writer.Close(); err != nil { + return err + } + return req.SetBody(exported.NopCloser(bytes.NewReader(body.Bytes())), writer.FormDataContentType()) +} + +// SkipBodyDownload will disable automatic downloading of the response body. +func SkipBodyDownload(req *policy.Request) { + req.SetOperationValue(bodyDownloadPolicyOpValues{Skip: true}) +} + +// CtxAPINameKey is used as a context key for adding/retrieving the API name. +type CtxAPINameKey = shared.CtxAPINameKey + +// NewUUID returns a new UUID using the RFC4122 algorithm. +func NewUUID() (string, error) { + u, err := uuid.New() + if err != nil { + return "", err + } + return u.String(), nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go new file mode 100644 index 000000000..048566e02 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go @@ -0,0 +1,109 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net/http" + + azexported "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" +) + +// Payload reads and returns the response body or an error. +// On a successful read, the response body is cached. +// Subsequent reads will access the cached value. +func Payload(resp *http.Response) ([]byte, error) { + return exported.Payload(resp, nil) +} + +// HasStatusCode returns true if the Response's status code is one of the specified values. +func HasStatusCode(resp *http.Response, statusCodes ...int) bool { + return exported.HasStatusCode(resp, statusCodes...) +} + +// UnmarshalAsByteArray will base-64 decode the received payload and place the result into the value pointed to by v. +func UnmarshalAsByteArray(resp *http.Response, v *[]byte, format Base64Encoding) error { + p, err := Payload(resp) + if err != nil { + return err + } + return DecodeByteArray(string(p), v, format) +} + +// UnmarshalAsJSON calls json.Unmarshal() to unmarshal the received payload into the value pointed to by v. +func UnmarshalAsJSON(resp *http.Response, v any) error { + payload, err := Payload(resp) + if err != nil { + return err + } + // TODO: verify early exit is correct + if len(payload) == 0 { + return nil + } + err = removeBOM(resp) + if err != nil { + return err + } + err = json.Unmarshal(payload, v) + if err != nil { + err = fmt.Errorf("unmarshalling type %T: %s", v, err) + } + return err +} + +// UnmarshalAsXML calls xml.Unmarshal() to unmarshal the received payload into the value pointed to by v. +func UnmarshalAsXML(resp *http.Response, v any) error { + payload, err := Payload(resp) + if err != nil { + return err + } + // TODO: verify early exit is correct + if len(payload) == 0 { + return nil + } + err = removeBOM(resp) + if err != nil { + return err + } + err = xml.Unmarshal(payload, v) + if err != nil { + err = fmt.Errorf("unmarshalling type %T: %s", v, err) + } + return err +} + +// Drain reads the response body to completion then closes it. The bytes read are discarded. +func Drain(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + resp.Body.Close() + } +} + +// removeBOM removes any byte-order mark prefix from the payload if present. +func removeBOM(resp *http.Response) error { + _, err := exported.Payload(resp, &exported.PayloadOptions{ + BytesModifier: func(b []byte) []byte { + // UTF8 + return bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) + }, + }) + if err != nil { + return err + } + return nil +} + +// DecodeByteArray will base-64 decode the provided string into v. +func DecodeByteArray(s string, v *[]byte, format Base64Encoding) error { + return azexported.DecodeByteArray(s, v, format) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_other.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_other.go new file mode 100644 index 000000000..1c75d771f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_other.go @@ -0,0 +1,15 @@ +//go:build !wasm + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "net" +) + +func defaultTransportDialContext(dialer *net.Dialer) func(context.Context, string, string) (net.Conn, error) { + return dialer.DialContext +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_wasm.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_wasm.go new file mode 100644 index 000000000..3dc9eeecd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_wasm.go @@ -0,0 +1,15 @@ +//go:build (js && wasm) || wasip1 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "net" +) + +func defaultTransportDialContext(dialer *net.Dialer) func(context.Context, string, string) (net.Conn, error) { + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go new file mode 100644 index 000000000..2124c1d48 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go @@ -0,0 +1,48 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "crypto/tls" + "net" + "net/http" + "time" + + "golang.org/x/net/http2" +) + +var defaultHTTPClient *http.Client + +func init() { + defaultTransport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: defaultTransportDialContext(&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }), + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + MaxIdleConnsPerHost: 10, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + Renegotiation: tls.RenegotiateFreelyAsClient, + }, + } + // TODO: evaluate removing this once https://github.com/golang/go/issues/59690 has been fixed + if http2Transport, err := http2.ConfigureTransports(defaultTransport); err == nil { + // if the connection has been idle for 10 seconds, send a ping frame for a health check + http2Transport.ReadIdleTimeout = 10 * time.Second + // if there's no response to the ping within the timeout, the connection will be closed + http2Transport.PingTimeout = 5 * time.Second + } + defaultHTTPClient = &http.Client{ + Transport: defaultTransport, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go new file mode 100644 index 000000000..cadaef3d5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go @@ -0,0 +1,9 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package streaming contains helpers for streaming IO operations and progress reporting. +package streaming diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go new file mode 100644 index 000000000..2468540bd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go @@ -0,0 +1,89 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package streaming + +import ( + "io" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" +) + +type progress struct { + rc io.ReadCloser + rsc io.ReadSeekCloser + pr func(bytesTransferred int64) + offset int64 +} + +// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. +// In addition to adding a Close method to an io.ReadSeeker, this can also be used to wrap an +// io.ReadSeekCloser with a no-op Close method to allow explicit control of when the io.ReedSeekCloser +// has its underlying stream closed. +func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { + return exported.NopCloser(rs) +} + +// NewRequestProgress adds progress reporting to an HTTP request's body stream. +func NewRequestProgress(body io.ReadSeekCloser, pr func(bytesTransferred int64)) io.ReadSeekCloser { + return &progress{ + rc: body, + rsc: body, + pr: pr, + offset: 0, + } +} + +// NewResponseProgress adds progress reporting to an HTTP response's body stream. +func NewResponseProgress(body io.ReadCloser, pr func(bytesTransferred int64)) io.ReadCloser { + return &progress{ + rc: body, + rsc: nil, + pr: pr, + offset: 0, + } +} + +// Read reads a block of data from an inner stream and reports progress +func (p *progress) Read(b []byte) (n int, err error) { + n, err = p.rc.Read(b) + if err != nil && err != io.EOF { + return + } + p.offset += int64(n) + // Invokes the user's callback method to report progress + p.pr(p.offset) + return +} + +// Seek only expects a zero or from beginning. +func (p *progress) Seek(offset int64, whence int) (int64, error) { + // This should only ever be called with offset = 0 and whence = io.SeekStart + n, err := p.rsc.Seek(offset, whence) + if err == nil { + p.offset = int64(n) + } + return n, err +} + +// requestBodyProgress supports Close but the underlying stream may not; if it does, Close will close it. +func (p *progress) Close() error { + return p.rc.Close() +} + +// MultipartContent contains streaming content used in multipart/form payloads. +type MultipartContent struct { + // Body contains the required content body. + Body io.ReadSeekCloser + + // ContentType optionally specifies the HTTP Content-Type for this Body. + // The default value is application/octet-stream. + ContentType string + + // Filename optionally specifies the filename for this Body. + // The default value is the field name for the multipart/form section. + Filename string +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go new file mode 100644 index 000000000..faa98c9dc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go @@ -0,0 +1,9 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package to contains various type-conversion helper functions. +package to diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go new file mode 100644 index 000000000..e0e4817b9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go @@ -0,0 +1,21 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package to + +// Ptr returns a pointer to the provided value. +func Ptr[T any](v T) *T { + return &v +} + +// SliceOfPtrs returns a slice of *T from the specified values. +func SliceOfPtrs[T any](vv ...T) []*T { + slc := make([]*T, len(vv)) + for i := range vv { + slc[i] = Ptr(vv[i]) + } + return slc +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go new file mode 100644 index 000000000..80282d4ab --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go @@ -0,0 +1,41 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package tracing + +// SpanKind represents the role of a Span inside a Trace. Often, this defines how a Span will be processed and visualized by various backends. +type SpanKind int + +const ( + // SpanKindInternal indicates the span represents an internal operation within an application. + SpanKindInternal SpanKind = 1 + + // SpanKindServer indicates the span covers server-side handling of a request. + SpanKindServer SpanKind = 2 + + // SpanKindClient indicates the span describes a request to a remote service. + SpanKindClient SpanKind = 3 + + // SpanKindProducer indicates the span was created by a messaging producer. + SpanKindProducer SpanKind = 4 + + // SpanKindConsumer indicates the span was created by a messaging consumer. + SpanKindConsumer SpanKind = 5 +) + +// SpanStatus represents the status of a span. +type SpanStatus int + +const ( + // SpanStatusUnset is the default status code. + SpanStatusUnset SpanStatus = 0 + + // SpanStatusError indicates the operation contains an error. + SpanStatusError SpanStatus = 1 + + // SpanStatusOK indicates the operation completed successfully. + SpanStatusOK SpanStatus = 2 +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go new file mode 100644 index 000000000..1ade7c560 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go @@ -0,0 +1,191 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// Package tracing contains the definitions needed to support distributed tracing. +package tracing + +import ( + "context" +) + +// ProviderOptions contains the optional values when creating a Provider. +type ProviderOptions struct { + // for future expansion +} + +// NewProvider creates a new Provider with the specified values. +// - newTracerFn is the underlying implementation for creating Tracer instances +// - options contains optional values; pass nil to accept the default value +func NewProvider(newTracerFn func(name, version string) Tracer, options *ProviderOptions) Provider { + return Provider{ + newTracerFn: newTracerFn, + } +} + +// Provider is the factory that creates Tracer instances. +// It defaults to a no-op provider. +type Provider struct { + newTracerFn func(name, version string) Tracer +} + +// NewTracer creates a new Tracer for the specified module name and version. +// - module - the fully qualified name of the module +// - version - the version of the module +func (p Provider) NewTracer(module, version string) (tracer Tracer) { + if p.newTracerFn != nil { + tracer = p.newTracerFn(module, version) + } + return +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// TracerOptions contains the optional values when creating a Tracer. +type TracerOptions struct { + // SpanFromContext contains the implementation for the Tracer.SpanFromContext method. + SpanFromContext func(context.Context) Span +} + +// NewTracer creates a Tracer with the specified values. +// - newSpanFn is the underlying implementation for creating Span instances +// - options contains optional values; pass nil to accept the default value +func NewTracer(newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span), options *TracerOptions) Tracer { + if options == nil { + options = &TracerOptions{} + } + return Tracer{ + newSpanFn: newSpanFn, + spanFromContextFn: options.SpanFromContext, + } +} + +// Tracer is the factory that creates Span instances. +type Tracer struct { + attrs []Attribute + newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) + spanFromContextFn func(ctx context.Context) Span +} + +// Start creates a new span and a context.Context that contains it. +// - ctx is the parent context for this span. If it contains a Span, the newly created span will be a child of that span, else it will be a root span +// - spanName identifies the span within a trace, it's typically the fully qualified API name +// - options contains optional values for the span, pass nil to accept any defaults +func (t Tracer) Start(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) { + if t.newSpanFn != nil { + opts := SpanOptions{} + if options != nil { + opts = *options + } + opts.Attributes = append(opts.Attributes, t.attrs...) + return t.newSpanFn(ctx, spanName, &opts) + } + return ctx, Span{} +} + +// SetAttributes sets attrs to be applied to each Span. If a key from attrs +// already exists for an attribute of the Span it will be overwritten with +// the value contained in attrs. +func (t *Tracer) SetAttributes(attrs ...Attribute) { + t.attrs = append(t.attrs, attrs...) +} + +// Enabled returns true if this Tracer is capable of creating Spans. +func (t Tracer) Enabled() bool { + return t.newSpanFn != nil +} + +// SpanFromContext returns the Span associated with the current context. +// If the provided context has no Span, false is returned. +func (t Tracer) SpanFromContext(ctx context.Context) Span { + if t.spanFromContextFn != nil { + return t.spanFromContextFn(ctx) + } + return Span{} +} + +// SpanOptions contains optional settings for creating a span. +type SpanOptions struct { + // Kind indicates the kind of Span. + Kind SpanKind + + // Attributes contains key-value pairs of attributes for the span. + Attributes []Attribute +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// SpanImpl abstracts the underlying implementation for Span, +// allowing it to work with various tracing implementations. +// Any zero-values will have their default, no-op behavior. +type SpanImpl struct { + // End contains the implementation for the Span.End method. + End func() + + // SetAttributes contains the implementation for the Span.SetAttributes method. + SetAttributes func(...Attribute) + + // AddEvent contains the implementation for the Span.AddEvent method. + AddEvent func(string, ...Attribute) + + // SetStatus contains the implementation for the Span.SetStatus method. + SetStatus func(SpanStatus, string) +} + +// NewSpan creates a Span with the specified implementation. +func NewSpan(impl SpanImpl) Span { + return Span{ + impl: impl, + } +} + +// Span is a single unit of a trace. A trace can contain multiple spans. +// A zero-value Span provides a no-op implementation. +type Span struct { + impl SpanImpl +} + +// End terminates the span and MUST be called before the span leaves scope. +// Any further updates to the span will be ignored after End is called. +func (s Span) End() { + if s.impl.End != nil { + s.impl.End() + } +} + +// SetAttributes sets the specified attributes on the Span. +// Any existing attributes with the same keys will have their values overwritten. +func (s Span) SetAttributes(attrs ...Attribute) { + if s.impl.SetAttributes != nil { + s.impl.SetAttributes(attrs...) + } +} + +// AddEvent adds a named event with an optional set of attributes to the span. +func (s Span) AddEvent(name string, attrs ...Attribute) { + if s.impl.AddEvent != nil { + s.impl.AddEvent(name, attrs...) + } +} + +// SetStatus sets the status on the span along with a description. +func (s Span) SetStatus(code SpanStatus, desc string) { + if s.impl.SetStatus != nil { + s.impl.SetStatus(code, desc) + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// Attribute is a key-value pair. +type Attribute struct { + // Key is the name of the attribute. + Key string + + // Value is the attribute's value. + // Types that are natively supported include int64, float64, int, bool, string. + // Any other type will be formatted per rules of fmt.Sprintf("%v"). + Value any +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/.gitignore b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/.gitignore new file mode 100644 index 000000000..8cdb91036 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/.gitignore @@ -0,0 +1,4 @@ +# live test artifacts +Dockerfile +k8s.yaml +sshkey* diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md new file mode 100644 index 000000000..a8c2feb6d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -0,0 +1,575 @@ +# Release History + +## 1.7.0 (2024-06-20) + +### Features Added +* `AzurePipelinesCredential` authenticates an Azure Pipelines service connection with + workload identity federation + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.7.0-beta.1 +* Removed the persistent token caching API. It will return in v1.8.0-beta.1 + +## 1.7.0-beta.1 (2024-06-10) + +### Features Added +* Restored `AzurePipelinesCredential` and persistent token caching API + +## Breaking Changes +> These changes affect only code written against a beta version such as v1.6.0-beta.4 +* Values which `NewAzurePipelinesCredential` read from environment variables in + prior versions are now parameters +* Renamed `AzurePipelinesServiceConnectionCredentialOptions` to `AzurePipelinesCredentialOptions` + +### Bugs Fixed +* Managed identity bug fixes + +## 1.6.0 (2024-06-10) + +### Features Added +* `NewOnBehalfOfCredentialWithClientAssertions` creates an on-behalf-of credential + that authenticates with client assertions such as federated credentials + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.6.0-beta.4 +* Removed `AzurePipelinesCredential` and the persistent token caching API. + They will return in v1.7.0-beta.1 + +### Bugs Fixed +* Managed identity bug fixes + +## 1.6.0-beta.4 (2024-05-14) + +### Features Added +* `AzurePipelinesCredential` authenticates an Azure Pipeline service connection with + workload identity federation + +## 1.6.0-beta.3 (2024-04-09) + +### Breaking Changes +* `DefaultAzureCredential` now sends a probe request with no retries for IMDS managed identity + environments to avoid excessive retry delays when the IMDS endpoint is not available. This + should improve credential chain resolution for local development scenarios. + +### Bugs Fixed +* `ManagedIdentityCredential` now specifies resource IDs correctly for Azure Container Instances + +## 1.5.2 (2024-04-09) + +### Bugs Fixed +* `ManagedIdentityCredential` now specifies resource IDs correctly for Azure Container Instances + +### Other Changes +* Restored v1.4.0 error behavior for empty tenant IDs +* Upgraded dependencies + +## 1.6.0-beta.2 (2024-02-06) + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.6.0-beta.1 +* Replaced `ErrAuthenticationRequired` with `AuthenticationRequiredError`, a struct + type that carries the `TokenRequestOptions` passed to the `GetToken` call which + returned the error. + +### Bugs Fixed +* Fixed more cases in which credential chains like `DefaultAzureCredential` + should try their next credential after attempting managed identity + authentication in a Docker Desktop container + +### Other Changes +* `AzureCLICredential` uses the CLI's `expires_on` value for token expiration + +## 1.6.0-beta.1 (2024-01-17) + +### Features Added +* Restored persistent token caching API first added in v1.5.0-beta.1 +* Added `AzureCLICredentialOptions.Subscription` + +## 1.5.1 (2024-01-17) + +### Bugs Fixed +* `InteractiveBrowserCredential` handles `AdditionallyAllowedTenants` correctly + +## 1.5.0 (2024-01-16) + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.5.0-beta.1 +* Removed persistent token caching. It will return in v1.6.0-beta.1 + +### Bugs Fixed +* Credentials now preserve MSAL headers e.g. X-Client-Sku + +### Other Changes +* Upgraded dependencies + +## 1.5.0-beta.2 (2023-11-07) + +### Features Added +* `DefaultAzureCredential` and `ManagedIdentityCredential` support Azure ML managed identity +* Added spans for distributed tracing. + +## 1.5.0-beta.1 (2023-10-10) + +### Features Added +* Optional persistent token caching for most credentials. Set `TokenCachePersistenceOptions` + on a credential's options to enable and configure this. See the package documentation for + this version and [TOKEN_CACHING.md](https://aka.ms/azsdk/go/identity/caching) for more + details. +* `AzureDeveloperCLICredential` authenticates with the Azure Developer CLI (`azd`). This + credential is also part of the `DefaultAzureCredential` authentication flow. + +## 1.4.0 (2023-10-10) + +### Bugs Fixed +* `ManagedIdentityCredential` will now retry when IMDS responds 410 or 503 + +## 1.4.0-beta.5 (2023-09-12) + +### Features Added +* Service principal credentials can request CAE tokens + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.4.0-beta.4 +* Whether `GetToken` requests a CAE token is now determined by `TokenRequestOptions.EnableCAE`. Azure + SDK clients which support CAE will set this option automatically. Credentials no longer request CAE + tokens by default or observe the environment variable "AZURE_IDENTITY_DISABLE_CP1". + +### Bugs Fixed +* Credential chains such as `DefaultAzureCredential` now try their next credential, if any, when + managed identity authentication fails in a Docker Desktop container + ([#21417](https://github.com/Azure/azure-sdk-for-go/issues/21417)) + +## 1.4.0-beta.4 (2023-08-16) + +### Other Changes +* Upgraded dependencies + +## 1.3.1 (2023-08-16) + +### Other Changes +* Upgraded dependencies + +## 1.4.0-beta.3 (2023-08-08) + +### Bugs Fixed +* One invocation of `AzureCLICredential.GetToken()` and `OnBehalfOfCredential.GetToken()` + can no longer make two authentication attempts + +## 1.4.0-beta.2 (2023-07-14) + +### Other Changes +* `DefaultAzureCredentialOptions.TenantID` applies to workload identity authentication +* Upgraded dependencies + +## 1.4.0-beta.1 (2023-06-06) + +### Other Changes +* Re-enabled CAE support as in v1.3.0-beta.3 + +## 1.3.0 (2023-05-09) + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.3.0-beta.5 +* Renamed `NewOnBehalfOfCredentialFromCertificate` to `NewOnBehalfOfCredentialWithCertificate` +* Renamed `NewOnBehalfOfCredentialFromSecret` to `NewOnBehalfOfCredentialWithSecret` + +### Other Changes +* Upgraded to MSAL v1.0.0 + +## 1.3.0-beta.5 (2023-04-11) + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.3.0-beta.4 +* Moved `NewWorkloadIdentityCredential()` parameters into `WorkloadIdentityCredentialOptions`. + The constructor now reads default configuration from environment variables set by the Azure + workload identity webhook by default. + ([#20478](https://github.com/Azure/azure-sdk-for-go/pull/20478)) +* Removed CAE support. It will return in v1.4.0-beta.1 + ([#20479](https://github.com/Azure/azure-sdk-for-go/pull/20479)) + +### Bugs Fixed +* Fixed an issue in `DefaultAzureCredential` that could cause the managed identity endpoint check to fail in rare circumstances. + +## 1.3.0-beta.4 (2023-03-08) + +### Features Added +* Added `WorkloadIdentityCredentialOptions.AdditionallyAllowedTenants` and `.DisableInstanceDiscovery` + +### Bugs Fixed +* Credentials now synchronize within `GetToken()` so a single instance can be shared among goroutines + ([#20044](https://github.com/Azure/azure-sdk-for-go/issues/20044)) + +### Other Changes +* Upgraded dependencies + +## 1.2.2 (2023-03-07) + +### Other Changes +* Upgraded dependencies + +## 1.3.0-beta.3 (2023-02-07) + +### Features Added +* By default, credentials set client capability "CP1" to enable support for + [Continuous Access Evaluation (CAE)](https://learn.microsoft.com/entra/identity-platform/app-resilience-continuous-access-evaluation). + This indicates to Microsoft Entra ID that your application can handle CAE claims challenges. + You can disable this behavior by setting the environment variable "AZURE_IDENTITY_DISABLE_CP1" to "true". +* `InteractiveBrowserCredentialOptions.LoginHint` enables pre-populating the login + prompt with a username ([#15599](https://github.com/Azure/azure-sdk-for-go/pull/15599)) +* Service principal and user credentials support ADFS authentication on Azure Stack. + Specify "adfs" as the credential's tenant. +* Applications running in private or disconnected clouds can prevent credentials from + requesting Microsoft Entra instance metadata by setting the `DisableInstanceDiscovery` + field on credential options. +* Many credentials can now be configured to authenticate in multiple tenants. The + options types for these credentials have an `AdditionallyAllowedTenants` field + that specifies additional tenants in which the credential may authenticate. + +## 1.3.0-beta.2 (2023-01-10) + +### Features Added +* Added `OnBehalfOfCredential` to support the on-behalf-of flow + ([#16642](https://github.com/Azure/azure-sdk-for-go/issues/16642)) + +### Bugs Fixed +* `AzureCLICredential` reports token expiration in local time (should be UTC) + +### Other Changes +* `AzureCLICredential` imposes its default timeout only when the `Context` + passed to `GetToken()` has no deadline +* Added `NewCredentialUnavailableError()`. This function constructs an error indicating + a credential can't authenticate and an encompassing `ChainedTokenCredential` should + try its next credential, if any. + +## 1.3.0-beta.1 (2022-12-13) + +### Features Added +* `WorkloadIdentityCredential` and `DefaultAzureCredential` support + Workload Identity Federation on Kubernetes. `DefaultAzureCredential` + support requires environment variable configuration as set by the + Workload Identity webhook. + ([#15615](https://github.com/Azure/azure-sdk-for-go/issues/15615)) + +## 1.2.0 (2022-11-08) + +### Other Changes +* This version includes all fixes and features from 1.2.0-beta.* + +## 1.2.0-beta.3 (2022-10-11) + +### Features Added +* `ManagedIdentityCredential` caches tokens in memory + +### Bugs Fixed +* `ClientCertificateCredential` sends only the leaf cert for SNI authentication + +## 1.2.0-beta.2 (2022-08-10) + +### Features Added +* Added `ClientAssertionCredential` to enable applications to authenticate + with custom client assertions + +### Other Changes +* Updated AuthenticationFailedError with links to TROUBLESHOOTING.md for relevant errors +* Upgraded `microsoft-authentication-library-for-go` requirement to v0.6.0 + +## 1.2.0-beta.1 (2022-06-07) + +### Features Added +* `EnvironmentCredential` reads certificate passwords from `AZURE_CLIENT_CERTIFICATE_PASSWORD` + ([#17099](https://github.com/Azure/azure-sdk-for-go/pull/17099)) + +## 1.1.0 (2022-06-07) + +### Features Added +* `ClientCertificateCredential` and `ClientSecretCredential` support ESTS-R. First-party + applications can set environment variable `AZURE_REGIONAL_AUTHORITY_NAME` with a + region name. + ([#15605](https://github.com/Azure/azure-sdk-for-go/issues/15605)) + +## 1.0.1 (2022-06-07) + +### Other Changes +* Upgrade `microsoft-authentication-library-for-go` requirement to v0.5.1 + ([#18176](https://github.com/Azure/azure-sdk-for-go/issues/18176)) + +## 1.0.0 (2022-05-12) + +### Features Added +* `DefaultAzureCredential` reads environment variable `AZURE_CLIENT_ID` for the + client ID of a user-assigned managed identity + ([#17293](https://github.com/Azure/azure-sdk-for-go/pull/17293)) + +### Breaking Changes +* Removed `AuthorizationCodeCredential`. Use `InteractiveBrowserCredential` instead + to authenticate a user with the authorization code flow. +* Instances of `AuthenticationFailedError` are now returned by pointer. +* `GetToken()` returns `azcore.AccessToken` by value + +### Bugs Fixed +* `AzureCLICredential` panics after receiving an unexpected error type + ([#17490](https://github.com/Azure/azure-sdk-for-go/issues/17490)) + +### Other Changes +* `GetToken()` returns an error when the caller specifies no scope +* Updated to the latest versions of `golang.org/x/crypto`, `azcore` and `internal` + +## 0.14.0 (2022-04-05) + +### Breaking Changes +* This module now requires Go 1.18 +* Removed `AuthorityHost`. Credentials are now configured for sovereign or private + clouds with the API in `azcore/cloud`, for example: + ```go + // before + opts := azidentity.ClientSecretCredentialOptions{AuthorityHost: azidentity.AzureGovernment} + cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, &opts) + + // after + import "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + + opts := azidentity.ClientSecretCredentialOptions{} + opts.Cloud = cloud.AzureGovernment + cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, &opts) + ``` + +## 0.13.2 (2022-03-08) + +### Bugs Fixed +* Prevented a data race in `DefaultAzureCredential` and `ChainedTokenCredential` + ([#17144](https://github.com/Azure/azure-sdk-for-go/issues/17144)) + +### Other Changes +* Upgraded App Service managed identity version from 2017-09-01 to 2019-08-01 + ([#17086](https://github.com/Azure/azure-sdk-for-go/pull/17086)) + +## 0.13.1 (2022-02-08) + +### Features Added +* `EnvironmentCredential` supports certificate SNI authentication when + `AZURE_CLIENT_SEND_CERTIFICATE_CHAIN` is "true". + ([#16851](https://github.com/Azure/azure-sdk-for-go/pull/16851)) + +### Bugs Fixed +* `ManagedIdentityCredential.GetToken()` now returns an error when configured for + a user assigned identity in Azure Cloud Shell (which doesn't support such identities) + ([#16946](https://github.com/Azure/azure-sdk-for-go/pull/16946)) + +### Other Changes +* `NewDefaultAzureCredential()` logs non-fatal errors. These errors are also included in the + error returned by `DefaultAzureCredential.GetToken()` when it's unable to acquire a token + from any source. ([#15923](https://github.com/Azure/azure-sdk-for-go/issues/15923)) + +## 0.13.0 (2022-01-11) + +### Breaking Changes +* Replaced `AuthenticationFailedError.RawResponse()` with a field having the same name +* Unexported `CredentialUnavailableError` +* Instances of `ChainedTokenCredential` will now skip looping through the list of source credentials and re-use the first successful credential on subsequent calls to `GetToken`. + * If `ChainedTokenCredentialOptions.RetrySources` is true, `ChainedTokenCredential` will continue to try all of the originally provided credentials each time the `GetToken` method is called. + * `ChainedTokenCredential.successfulCredential` will contain a reference to the last successful credential. + * `DefaultAzureCredenial` will also re-use the first successful credential on subsequent calls to `GetToken`. + * `DefaultAzureCredential.chain.successfulCredential` will also contain a reference to the last successful credential. + +### Other Changes +* `ManagedIdentityCredential` no longer probes IMDS before requesting a token + from it. Also, an error response from IMDS no longer disables a credential + instance. Following an error, a credential instance will continue to send + requests to IMDS as necessary. +* Adopted MSAL for user and service principal authentication +* Updated `azcore` requirement to 0.21.0 + +## 0.12.0 (2021-11-02) +### Breaking Changes +* Raised minimum go version to 1.16 +* Removed `NewAuthenticationPolicy()` from credentials. Clients should instead use azcore's + `runtime.NewBearerTokenPolicy()` to construct a bearer token authorization policy. +* The `AuthorityHost` field in credential options structs is now a custom type, + `AuthorityHost`, with underlying type `string` +* `NewChainedTokenCredential` has a new signature to accommodate a placeholder + options struct: + ```go + // before + cred, err := NewChainedTokenCredential(credA, credB) + + // after + cred, err := NewChainedTokenCredential([]azcore.TokenCredential{credA, credB}, nil) + ``` +* Removed `ExcludeAzureCLICredential`, `ExcludeEnvironmentCredential`, and `ExcludeMSICredential` + from `DefaultAzureCredentialOptions` +* `NewClientCertificateCredential` requires a `[]*x509.Certificate` and `crypto.PrivateKey` instead of + a path to a certificate file. Added `ParseCertificates` to simplify getting these in common cases: + ```go + // before + cred, err := NewClientCertificateCredential("tenant", "client-id", "/cert.pem", nil) + + // after + certData, err := os.ReadFile("/cert.pem") + certs, key, err := ParseCertificates(certData, password) + cred, err := NewClientCertificateCredential(tenantID, clientID, certs, key, nil) + ``` +* Removed `InteractiveBrowserCredentialOptions.ClientSecret` and `.Port` +* Removed `AADAuthenticationFailedError` +* Removed `id` parameter of `NewManagedIdentityCredential()`. User assigned identities are now + specified by `ManagedIdentityCredentialOptions.ID`: + ```go + // before + cred, err := NewManagedIdentityCredential("client-id", nil) + // or, for a resource ID + opts := &ManagedIdentityCredentialOptions{ID: ResourceID} + cred, err := NewManagedIdentityCredential("/subscriptions/...", opts) + + // after + clientID := ClientID("7cf7db0d-...") + opts := &ManagedIdentityCredentialOptions{ID: clientID} + // or, for a resource ID + resID: ResourceID("/subscriptions/...") + opts := &ManagedIdentityCredentialOptions{ID: resID} + cred, err := NewManagedIdentityCredential(opts) + ``` +* `DeviceCodeCredentialOptions.UserPrompt` has a new type: `func(context.Context, DeviceCodeMessage) error` +* Credential options structs now embed `azcore.ClientOptions`. In addition to changing literal initialization + syntax, this change renames `HTTPClient` fields to `Transport`. +* Renamed `LogCredential` to `EventCredential` +* `AzureCLICredential` no longer reads the environment variable `AZURE_CLI_PATH` +* `NewManagedIdentityCredential` no longer reads environment variables `AZURE_CLIENT_ID` and + `AZURE_RESOURCE_ID`. Use `ManagedIdentityCredentialOptions.ID` instead. +* Unexported `AuthenticationFailedError` and `CredentialUnavailableError` structs. In their place are two + interfaces having the same names. + +### Bugs Fixed +* `AzureCLICredential.GetToken` no longer mutates its `opts.Scopes` + +### Features Added +* Added connection configuration options to `DefaultAzureCredentialOptions` +* `AuthenticationFailedError.RawResponse()` returns the HTTP response motivating the error, + if available + +### Other Changes +* `NewDefaultAzureCredential()` returns `*DefaultAzureCredential` instead of `*ChainedTokenCredential` +* Added `TenantID` field to `DefaultAzureCredentialOptions` and `AzureCLICredentialOptions` + +## 0.11.0 (2021-09-08) +### Breaking Changes +* Unexported `AzureCLICredentialOptions.TokenProvider` and its type, + `AzureCLITokenProvider` + +### Bug Fixes +* `ManagedIdentityCredential.GetToken` returns `CredentialUnavailableError` + when IMDS has no assigned identity, signaling `DefaultAzureCredential` to + try other credentials + + +## 0.10.0 (2021-08-30) +### Breaking Changes +* Update based on `azcore` refactor [#15383](https://github.com/Azure/azure-sdk-for-go/pull/15383) + +## 0.9.3 (2021-08-20) + +### Bugs Fixed +* `ManagedIdentityCredential.GetToken` no longer mutates its `opts.Scopes` + +### Other Changes +* Bumps version of `azcore` to `v0.18.1` + + +## 0.9.2 (2021-07-23) +### Features Added +* Adding support for Service Fabric environment in `ManagedIdentityCredential` +* Adding an option for using a resource ID instead of client ID in `ManagedIdentityCredential` + + +## 0.9.1 (2021-05-24) +### Features Added +* Add LICENSE.txt and bump version information + + +## 0.9.0 (2021-05-21) +### Features Added +* Add support for authenticating in Azure Stack environments +* Enable user assigned identities for the IMDS scenario in `ManagedIdentityCredential` +* Add scope to resource conversion in `GetToken()` on `ManagedIdentityCredential` + + +## 0.8.0 (2021-01-20) +### Features Added +* Updating documentation + + +## 0.7.1 (2021-01-04) +### Features Added +* Adding port option to `InteractiveBrowserCredential` + + +## 0.7.0 (2020-12-11) +### Features Added +* Add `redirectURI` parameter back to authentication code flow + + +## 0.6.1 (2020-12-09) +### Features Added +* Updating query parameter in `ManagedIdentityCredential` and updating datetime string for parsing managed identity access tokens. + + +## 0.6.0 (2020-11-16) +### Features Added +* Remove `RedirectURL` parameter from auth code flow to align with the MSAL implementation which relies on the native client redirect URL. + + +## 0.5.0 (2020-10-30) +### Features Added +* Flattening credential options + + +## 0.4.3 (2020-10-21) +### Features Added +* Adding Azure Arc support in `ManagedIdentityCredential` + + +## 0.4.2 (2020-10-16) +### Features Added +* Typo fixes + + +## 0.4.1 (2020-10-16) +### Features Added +* Ensure authority hosts are only HTTPs + + +## 0.4.0 (2020-10-16) +### Features Added +* Adding options structs for credentials + + +## 0.3.0 (2020-10-09) +### Features Added +* Update `DeviceCodeCredential` callback + + +## 0.2.2 (2020-10-09) +### Features Added +* Add `AuthorizationCodeCredential` + + +## 0.2.1 (2020-10-06) +### Features Added +* Add `InteractiveBrowserCredential` + + +## 0.2.0 (2020-09-11) +### Features Added +* Refactor `azidentity` on top of `azcore` refactor +* Updated policies to conform to `policy.Policy` interface changes. +* Updated non-retriable errors to conform to `azcore.NonRetriableError`. +* Fixed calls to `Request.SetBody()` to include content type. +* Switched endpoints to string types and removed extra parsing code. + + +## 0.1.1 (2020-09-02) +### Features Added +* Add `AzureCLICredential` to `DefaultAzureCredential` chain + + +## 0.1.0 (2020-07-23) +### Features Added +* Initial Release. Azure Identity library that provides Microsoft Entra token authentication support for the SDK. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/LICENSE.txt new file mode 100644 index 000000000..48ea6616b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md new file mode 100644 index 000000000..4404be824 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md @@ -0,0 +1,307 @@ +# Migrating from autorest/adal to azidentity + +`azidentity` provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/entra/fundamentals/new-name)) authentication for the newest Azure SDK modules (`github.com/azure-sdk-for-go/sdk/...`). Older Azure SDK packages (`github.com/azure-sdk-for-go/services/...`) use types from `github.com/go-autorest/autorest/adal` instead. + +This guide shows common authentication code using `autorest/adal` and its equivalent using `azidentity`. + +## Table of contents + +- [Acquire a token](#acquire-a-token) +- [Client certificate authentication](#client-certificate-authentication) +- [Client secret authentication](#client-secret-authentication) +- [Configuration](#configuration) +- [Device code authentication](#device-code-authentication) +- [Managed identity](#managed-identity) +- [Use azidentity credentials with older packages](#use-azidentity-credentials-with-older-packages) + +## Configuration + +### `autorest/adal` + +Token providers require a token audience (resource identifier) and an instance of `adal.OAuthConfig`, which requires a Microsoft Entra endpoint and tenant: + +```go +import "github.com/Azure/go-autorest/autorest/adal" + +oauthCfg, err := adal.NewOAuthConfig("https://login.chinacloudapi.cn", tenantID) +handle(err) + +spt, err := adal.NewServicePrincipalTokenWithSecret( + *oauthCfg, clientID, "https://management.chinacloudapi.cn/", &adal.ServicePrincipalTokenSecret{ClientSecret: secret}, +) +``` + +### `azidentity` + +A credential instance can acquire tokens for any audience. The audience for each token is determined by the client requesting it. Credentials require endpoint configuration only for sovereign or private clouds. The `azcore/cloud` package has predefined configuration for sovereign clouds such as Azure China: + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" +) + +clientOpts := azcore.ClientOptions{Cloud: cloud.AzureChina} + +cred, err := azidentity.NewClientSecretCredential( + tenantID, clientID, secret, &azidentity.ClientSecretCredentialOptions{ClientOptions: clientOpts}, +) +handle(err) +``` + +## Client secret authentication + +### `autorest/adal` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" +) + +oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID) +handle(err) +spt, err := adal.NewServicePrincipalTokenWithSecret( + *oauthCfg, clientID, "https://management.azure.com/", &adal.ServicePrincipalTokenSecret{ClientSecret: secret}, +) +handle(err) + +client := subscriptions.NewClient() +client.Authorizer = autorest.NewBearerAuthorizer(spt) +``` + +### `azidentity` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions" +) + +cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, nil) +handle(err) + +client, err := armsubscriptions.NewClient(cred, nil) +handle(err) +``` + +## Client certificate authentication + +### `autorest/adal` + +```go +import ( + "os" + + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" +) +certData, err := os.ReadFile("./example.pfx") +handle(err) + +certificate, rsaPrivateKey, err := decodePkcs12(certData, "") +handle(err) + +oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID) +handle(err) + +spt, err := adal.NewServicePrincipalTokenFromCertificate( + *oauthConfig, clientID, certificate, rsaPrivateKey, "https://management.azure.com/", +) + +client := subscriptions.NewClient() +client.Authorizer = autorest.NewBearerAuthorizer(spt) +``` + +### `azidentity` + +```go +import ( + "os" + + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions" +) + +certData, err := os.ReadFile("./example.pfx") +handle(err) + +certs, key, err := azidentity.ParseCertificates(certData, nil) +handle(err) + +cred, err = azidentity.NewClientCertificateCredential(tenantID, clientID, certs, key, nil) +handle(err) + +client, err := armsubscriptions.NewClient(cred, nil) +handle(err) +``` + +## Managed identity + +### `autorest/adal` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" +) + +spt, err := adal.NewServicePrincipalTokenFromManagedIdentity("https://management.azure.com/", nil) +handle(err) + +client := subscriptions.NewClient() +client.Authorizer = autorest.NewBearerAuthorizer(spt) +``` + +### `azidentity` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions" +) + +cred, err := azidentity.NewManagedIdentityCredential(nil) +handle(err) + +client, err := armsubscriptions.NewClient(cred, nil) +handle(err) +``` + +### User-assigned identities + +`autorest/adal`: + +```go +import "github.com/Azure/go-autorest/autorest/adal" + +opts := &adal.ManagedIdentityOptions{ClientID: "..."} +spt, err := adal.NewServicePrincipalTokenFromManagedIdentity("https://management.azure.com/") +handle(err) +``` + +`azidentity`: + +```go +import "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + +opts := azidentity.ManagedIdentityCredentialOptions{ID: azidentity.ClientID("...")} +cred, err := azidentity.NewManagedIdentityCredential(&opts) +handle(err) +``` + +## Device code authentication + +### `autorest/adal` + +```go +import ( + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" +) + +oauthClient := &http.Client{} +oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID) +handle(err) +resource := "https://management.azure.com/" +deviceCode, err := adal.InitiateDeviceAuth(oauthClient, *oauthCfg, clientID, resource) +handle(err) + +// display instructions, wait for the user to authenticate +fmt.Println(*deviceCode.Message) +token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) +handle(err) + +spt, err := adal.NewServicePrincipalTokenFromManualToken(*oauthCfg, clientID, resource, *token) +handle(err) + +client := subscriptions.NewClient() +client.Authorizer = autorest.NewBearerAuthorizer(spt) +``` + +### `azidentity` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions" +) + +cred, err := azidentity.NewDeviceCodeCredential(nil) +handle(err) + +client, err := armsubscriptions.NewSubscriptionsClient(cred, nil) +handle(err) +``` + +`azidentity.DeviceCodeCredential` will guide a user through authentication, printing instructions to the console by default. The user prompt is customizable. For more information, see the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DeviceCodeCredential). + +## Acquire a token + +### `autorest/adal` + +```go +import "github.com/Azure/go-autorest/autorest/adal" + +oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID) +handle(err) + +spt, err := adal.NewServicePrincipalTokenWithSecret( + *oauthCfg, clientID, "https://vault.azure.net", &adal.ServicePrincipalTokenSecret{ClientSecret: secret}, +) + +err = spt.Refresh() +if err == nil { + token := spt.Token +} +``` + +### `azidentity` + +In ordinary usage, application code doesn't need to request tokens from credentials directly. Azure SDK clients handle token acquisition and refreshing internally. However, applications may call `GetToken()` to do so. All credential types have this method. + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" +) + +cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, nil) +handle(err) + +tk, err := cred.GetToken( + context.TODO(), policy.TokenRequestOptions{Scopes: []string{"https://vault.azure.net/.default"}}, +) +if err == nil { + token := tk.Token +} +``` + +Note that `azidentity` credentials use the Microsoft Entra endpoint, which requires OAuth 2 scopes instead of the resource identifiers `autorest/adal` expects. For more information, see [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/permissions-consent-overview). + +## Use azidentity credentials with older packages + +The [azidext module](https://pkg.go.dev/github.com/jongio/azidext/go/azidext) provides an adapter for `azidentity` credential types. The adapter enables using the credential types with older Azure SDK clients. For example: + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/jongio/azidext/go/azidext" +) + +cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, nil) +handle(err) + +client := subscriptions.NewClient() +client.Authorizer = azidext.NewTokenCredentialAdapter(cred, []string{"https://management.azure.com//.default"}) +``` + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fazidentity%2FMIGRATION.png) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md new file mode 100644 index 000000000..7e201ea2f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md @@ -0,0 +1,258 @@ +# Azure Identity Client Module for Go + +The Azure Identity module provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/entra/fundamentals/new-name)) token authentication support across the Azure SDK. It includes a set of `TokenCredential` implementations, which can be used with Azure SDK clients supporting token authentication. + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azidentity)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity) +| [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity/) +| [Source code](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/azidentity) + +# Getting started + +## Install the module + +This project uses [Go modules](https://github.com/golang/go/wiki/Modules) for versioning and dependency management. + +Install the Azure Identity module: + +```sh +go get -u github.com/Azure/azure-sdk-for-go/sdk/azidentity +``` + +## Prerequisites + +- an [Azure subscription](https://azure.microsoft.com/free/) +- Go 1.18 + +### Authenticating during local development + +When debugging and executing code locally, developers typically use their own accounts to authenticate calls to Azure services. The `azidentity` module supports authenticating through developer tools to simplify local development. + +#### Authenticating via the Azure CLI + +`DefaultAzureCredential` and `AzureCLICredential` can authenticate as the user +signed in to the [Azure CLI](https://learn.microsoft.com/cli/azure). To sign in to the Azure CLI, run `az login`. On a system with a default web browser, the Azure CLI will launch the browser to authenticate a user. + +When no default browser is available, `az login` will use the device code +authentication flow. This can also be selected manually by running `az login --use-device-code`. + +#### Authenticate via the Azure Developer CLI + +Developers coding outside of an IDE can also use the [Azure Developer CLI](https://aka.ms/azure-dev) to authenticate. Applications using the `DefaultAzureCredential` or the `AzureDeveloperCLICredential` can use the account logged in to the Azure Developer CLI to authenticate calls in their application when running locally. + +To authenticate with the Azure Developer CLI, run `azd auth login`. On a system with a default web browser, `azd` will launch the browser to authenticate. On systems without a default web browser, run `azd auth login --use-device-code` to use the device code authentication flow. + +## Key concepts + +### Credentials + +A credential is a type which contains or can obtain the data needed for a +service client to authenticate requests. Service clients across the Azure SDK +accept a credential instance when they are constructed, and use that credential +to authenticate requests. + +The `azidentity` module focuses on OAuth authentication with Microsoft Entra ID. It offers a variety of credential types capable of acquiring a Microsoft Entra access token. See [Credential Types](#credential-types "Credential Types") for a list of this module's credential types. + +### DefaultAzureCredential + +`DefaultAzureCredential` is appropriate for most apps that will be deployed to Azure. It combines common production credentials with development credentials. It attempts to authenticate via the following mechanisms in this order, stopping when one succeeds: + +![DefaultAzureCredential authentication flow](img/mermaidjs/DefaultAzureCredentialAuthFlow.svg) + +1. **Environment** - `DefaultAzureCredential` will read account information specified via [environment variables](#environment-variables) and use it to authenticate. +1. **Workload Identity** - If the app is deployed on Kubernetes with environment variables set by the workload identity webhook, `DefaultAzureCredential` will authenticate the configured identity. +1. **Managed Identity** - If the app is deployed to an Azure host with managed identity enabled, `DefaultAzureCredential` will authenticate with it. +1. **Azure CLI** - If a user or service principal has authenticated via the Azure CLI `az login` command, `DefaultAzureCredential` will authenticate that identity. +1. **Azure Developer CLI** - If the developer has authenticated via the Azure Developer CLI `azd auth login` command, the `DefaultAzureCredential` will authenticate with that account. + +> Note: `DefaultAzureCredential` is intended to simplify getting started with the SDK by handling common scenarios with reasonable default behaviors. Developers who want more control or whose scenario isn't served by the default settings should use other credential types. + +## Managed Identity + +`DefaultAzureCredential` and `ManagedIdentityCredential` support +[managed identity authentication](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/overview) +in any hosting environment which supports managed identities, such as (this list is not exhaustive): +* [Azure App Service](https://learn.microsoft.com/azure/app-service/overview-managed-identity) +* [Azure Arc](https://learn.microsoft.com/azure/azure-arc/servers/managed-identity-authentication) +* [Azure Cloud Shell](https://learn.microsoft.com/azure/cloud-shell/msi-authorization) +* [Azure Kubernetes Service](https://learn.microsoft.com/azure/aks/use-managed-identity) +* [Azure Service Fabric](https://learn.microsoft.com/azure/service-fabric/concepts-managed-identity) +* [Azure Virtual Machines](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/how-to-use-vm-token) + +## Examples + +- [Authenticate with DefaultAzureCredential](#authenticate-with-defaultazurecredential "Authenticate with DefaultAzureCredential") +- [Define a custom authentication flow with ChainedTokenCredential](#define-a-custom-authentication-flow-with-chainedtokencredential "Define a custom authentication flow with ChainedTokenCredential") +- [Specify a user-assigned managed identity for DefaultAzureCredential](#specify-a-user-assigned-managed-identity-for-defaultazurecredential) + +### Authenticate with DefaultAzureCredential + +This example demonstrates authenticating a client from the `armresources` module with `DefaultAzureCredential`. + +```go +cred, err := azidentity.NewDefaultAzureCredential(nil) +if err != nil { + // handle error +} + +client := armresources.NewResourceGroupsClient("subscription ID", cred, nil) +``` + +### Specify a user-assigned managed identity for DefaultAzureCredential + +To configure `DefaultAzureCredential` to authenticate a user-assigned managed identity, set the environment variable `AZURE_CLIENT_ID` to the identity's client ID. + +### Define a custom authentication flow with `ChainedTokenCredential` + +`DefaultAzureCredential` is generally the quickest way to get started developing apps for Azure. For more advanced scenarios, `ChainedTokenCredential` links multiple credential instances to be tried sequentially when authenticating. It will try each chained credential in turn until one provides a token or fails to authenticate due to an error. + +The following example demonstrates creating a credential, which will attempt to authenticate using managed identity. It will fall back to authenticating via the Azure CLI when a managed identity is unavailable. + +```go +managed, err := azidentity.NewManagedIdentityCredential(nil) +if err != nil { + // handle error +} +azCLI, err := azidentity.NewAzureCLICredential(nil) +if err != nil { + // handle error +} +chain, err := azidentity.NewChainedTokenCredential([]azcore.TokenCredential{managed, azCLI}, nil) +if err != nil { + // handle error +} + +client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) +``` + +## Credential Types + +### Authenticating Azure Hosted Applications + +|Credential|Usage +|-|- +|[DefaultAzureCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential)|Simplified authentication experience for getting started developing Azure apps +|[ChainedTokenCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ChainedTokenCredential)|Define custom authentication flows, composing multiple credentials +|[EnvironmentCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential)|Authenticate a service principal or user configured by environment variables +|[ManagedIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ManagedIdentityCredential)|Authenticate the managed identity of an Azure resource +|[WorkloadIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#WorkloadIdentityCredential)|Authenticate a workload identity on Kubernetes + +### Authenticating Service Principals + +|Credential|Usage +|-|- +|[AzurePipelinesCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzurePipelinesCredential)|Authenticate an Azure Pipelines [service connection](https://learn.microsoft.com/azure/devops/pipelines/library/service-endpoints?view=azure-devops&tabs=yaml) +|[ClientAssertionCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientAssertionCredential)|Authenticate a service principal with a signed client assertion +|[ClientCertificateCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientCertificateCredential)|Authenticate a service principal with a certificate +|[ClientSecretCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientSecretCredential)|Authenticate a service principal with a secret + +### Authenticating Users + +|Credential|Usage +|-|- +|[InteractiveBrowserCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#InteractiveBrowserCredential)|Interactively authenticate a user with the default web browser +|[DeviceCodeCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DeviceCodeCredential)|Interactively authenticate a user on a device with limited UI +|[UsernamePasswordCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#UsernamePasswordCredential)|Authenticate a user with a username and password + +### Authenticating via Development Tools + +|Credential|Usage +|-|- +|[AzureCLICredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureCLICredential)|Authenticate as the user signed in to the Azure CLI +|[`AzureDeveloperCLICredential`](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureDeveloperCLICredential)|Authenticates as the user signed in to the Azure Developer CLI + +## Environment Variables + +`DefaultAzureCredential` and `EnvironmentCredential` can be configured with environment variables. Each type of authentication requires values for specific variables: + +#### Service principal with secret + +|variable name|value +|-|- +|`AZURE_CLIENT_ID`|ID of a Microsoft Entra application +|`AZURE_TENANT_ID`|ID of the application's Microsoft Entra tenant +|`AZURE_CLIENT_SECRET`|one of the application's client secrets + +#### Service principal with certificate + +|variable name|value +|-|- +|`AZURE_CLIENT_ID`|ID of a Microsoft Entra application +|`AZURE_TENANT_ID`|ID of the application's Microsoft Entra tenant +|`AZURE_CLIENT_CERTIFICATE_PATH`|path to a certificate file including private key +|`AZURE_CLIENT_CERTIFICATE_PASSWORD`|password of the certificate file, if any + +#### Username and password + +|variable name|value +|-|- +|`AZURE_CLIENT_ID`|ID of a Microsoft Entra application +|`AZURE_USERNAME`|a username (usually an email address) +|`AZURE_PASSWORD`|that user's password + +Configuration is attempted in the above order. For example, if values for a +client secret and certificate are both present, the client secret will be used. + +## Token caching + +Token caching is an `azidentity` feature that allows apps to: + +* Cache tokens in memory (default) or on disk (opt-in). +* Improve resilience and performance. +* Reduce the number of requests made to Microsoft Entra ID to obtain access tokens. + +For more details, see the [token caching documentation](https://aka.ms/azsdk/go/identity/caching). + +## Troubleshooting + +### Error Handling + +Credentials return an `error` when they fail to authenticate or lack data they require to authenticate. For guidance on resolving errors from specific credential types, see the [troubleshooting guide](https://aka.ms/azsdk/go/identity/troubleshoot). + +For more details on handling specific Microsoft Entra errors, see the Microsoft Entra [error code documentation](https://learn.microsoft.com/entra/identity-platform/reference-error-codes). + +### Logging + +This module uses the classification-based logging implementation in `azcore`. To enable console logging for all SDK modules, set `AZURE_SDK_GO_LOGGING` to `all`. Use the `azcore/log` package to control log event output or to enable logs for `azidentity` only. For example: +```go +import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + +// print log output to stdout +azlog.SetListener(func(event azlog.Event, s string) { + fmt.Println(s) +}) + +// include only azidentity credential logs +azlog.SetEvents(azidentity.EventAuthentication) +``` + +Credentials log basic information only, such as `GetToken` success or failure and errors. These log entries don't contain authentication secrets but may contain sensitive information. + +## Next steps + +Client and management modules listed on the [Azure SDK releases page](https://azure.github.io/azure-sdk/releases/latest/go.html) support authenticating with `azidentity` credential types. You can learn more about using these libraries in their documentation, which is linked from the release page. + +## Provide Feedback + +If you encounter bugs or have suggestions, please +[open an issue](https://github.com/Azure/azure-sdk-for-go/issues). + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit [https://cla.microsoft.com](https://cla.microsoft.com). + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information, see the +[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any +additional questions or comments. + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fazidentity%2FREADME.png) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD new file mode 100644 index 000000000..fbaa29220 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD @@ -0,0 +1,71 @@ +## Token caching in the Azure Identity client module + +*Token caching* is a feature provided by the Azure Identity library that allows apps to: + +- Improve their resilience and performance. +- Reduce the number of requests made to Microsoft Entra ID to obtain access tokens. +- Reduce the number of times the user is prompted to authenticate. + +When an app needs to access a protected Azure resource, it typically needs to obtain an access token from Entra ID. Obtaining that token involves sending a request to Entra ID and may also involve prompting the user. Entra ID then validates the credentials provided in the request and issues an access token. + +Token caching, via the Azure Identity library, allows the app to store this access token [in memory](#in-memory-token-caching), where it's accessible to the current process, or [on disk](#persistent-token-caching) where it can be accessed across application or process invocations. The token can then be retrieved quickly and easily the next time the app needs to access the same resource. The app can avoid making another request to Entra ID, which reduces network traffic and improves resilience. Additionally, in scenarios where the app is authenticating users, token caching also avoids prompting the user each time new tokens are requested. + +### In-memory token caching + +*In-memory token caching* is the default option provided by the Azure Identity library. This caching approach allows apps to store access tokens in memory. With in-memory token caching, the library first determines if a valid access token for the requested resource is already stored in memory. If a valid token is found, it's returned to the app without the need to make another request to Entra ID. If a valid token isn't found, the library will automatically acquire a token by sending a request to Entra ID. The in-memory token cache provided by the Azure Identity library is thread-safe. + +**Note:** When Azure Identity library credentials are used with Azure service libraries (for example, Azure Blob Storage), the in-memory token caching is active in the `Pipeline` layer as well. All `TokenCredential` implementations are supported there, including custom implementations external to the Azure Identity library. + +#### Caching cannot be disabled + +As there are many levels of caching, it's not possible disable in-memory caching. However, the in-memory cache may be cleared by creating a new credential instance. + +### Persistent token caching + +> Only azidentity v1.5.0-beta versions support persistent token caching + +*Persistent disk token caching* is an opt-in feature in the Azure Identity library. The feature allows apps to cache access tokens in an encrypted, persistent storage mechanism. As indicated in the following table, the storage mechanism differs across operating systems. + +| Operating system | Storage mechanism | +|------------------|---------------------------------------| +| Linux | kernel key retention service (keyctl) | +| macOS | Keychain | +| Windows | DPAPI | + +By default the token cache will protect any data which is persisted using the user data protection APIs available on the current platform. +However, there are cases where no data protection is available, and applications may choose to allow storing the token cache in an unencrypted state by setting `TokenCachePersistenceOptions.AllowUnencryptedStorage` to `true`. This allows a credential to fall back to unencrypted storage if it can't encrypt the cache. However, we do not recommend using this storage method due to its significantly lower security measures. In addition, tokens are not encrypted solely to the current user, which could potentially allow unauthorized access to the cache by individuals with machine access. + +With persistent disk token caching enabled, the library first determines if a valid access token for the requested resource is already stored in the persistent cache. If a valid token is found, it's returned to the app without the need to make another request to Entra ID. Additionally, the tokens are preserved across app runs, which: + +- Makes the app more resilient to failures. +- Ensures the app can continue to function during an Entra ID outage or disruption. +- Avoids having to prompt users to authenticate each time the process is restarted. + +>IMPORTANT! The token cache contains sensitive data and **MUST** be protected to prevent compromising accounts. All application decisions regarding the persistence of the token cache must consider that a breach of its content will fully compromise all the accounts it contains. + +#### Example code + +See the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.6.0-beta.2#pkg-overview) for example code demonstrating how to configure persistent caching and access cached data. + +### Credentials supporting token caching + +The following table indicates the state of in-memory and persistent caching in each credential type. + +**Note:** In-memory caching is activated by default. Persistent token caching needs to be enabled as shown in [this example](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.5.0-beta.1#example-package-PersistentCache). + +| Credential | In-memory token caching | Persistent token caching | +|--------------------------------|---------------------------------------------------------------------|--------------------------| +| `AzureCLICredential` | Not Supported | Not Supported | +| `AzureDeveloperCLICredential` | Not Supported | Not Supported | +| `AzurePipelinesCredential` | Supported | Supported | +| `ClientAssertionCredential` | Supported | Supported | +| `ClientCertificateCredential` | Supported | Supported | +| `ClientSecretCredential` | Supported | Supported | +| `DefaultAzureCredential` | Supported if the target credential in the default chain supports it | Not Supported | +| `DeviceCodeCredential` | Supported | Supported | +| `EnvironmentCredential` | Supported | Not Supported | +| `InteractiveBrowserCredential` | Supported | Supported | +| `ManagedIdentityCredential` | Supported | Not Supported | +| `OnBehalfOfCredential` | Supported | Supported | +| `UsernamePasswordCredential` | Supported | Supported | +| `WorkloadIdentityCredential` | Supported | Supported | diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md new file mode 100644 index 000000000..54016a070 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md @@ -0,0 +1,241 @@ +# Troubleshoot Azure Identity authentication issues + +This troubleshooting guide covers failure investigation techniques, common errors for the credential types in the `azidentity` module, and mitigation steps to resolve these errors. + +## Table of contents + +- [Handle azidentity errors](#handle-azidentity-errors) + - [Permission issues](#permission-issues) +- [Find relevant information in errors](#find-relevant-information-in-errors) +- [Enable and configure logging](#enable-and-configure-logging) +- [Troubleshoot AzureCLICredential authentication issues](#troubleshoot-azureclicredential-authentication-issues) +- [Troubleshoot AzureDeveloperCLICredential authentication issues](#troubleshoot-azuredeveloperclicredential-authentication-issues) +- [Troubleshoot AzurePipelinesCredential authentication issues](#troubleshoot-azurepipelinescredential-authentication-issues) +- [Troubleshoot ClientCertificateCredential authentication issues](#troubleshoot-clientcertificatecredential-authentication-issues) +- [Troubleshoot ClientSecretCredential authentication issues](#troubleshoot-clientsecretcredential-authentication-issues) +- [Troubleshoot DefaultAzureCredential authentication issues](#troubleshoot-defaultazurecredential-authentication-issues) +- [Troubleshoot EnvironmentCredential authentication issues](#troubleshoot-environmentcredential-authentication-issues) +- [Troubleshoot ManagedIdentityCredential authentication issues](#troubleshoot-managedidentitycredential-authentication-issues) + - [Azure App Service and Azure Functions managed identity](#azure-app-service-and-azure-functions-managed-identity) + - [Azure Kubernetes Service managed identity](#azure-kubernetes-service-managed-identity) + - [Azure Virtual Machine managed identity](#azure-virtual-machine-managed-identity) +- [Troubleshoot UsernamePasswordCredential authentication issues](#troubleshoot-usernamepasswordcredential-authentication-issues) +- [Troubleshoot WorkloadIdentityCredential authentication issues](#troubleshoot-workloadidentitycredential-authentication-issues) +- [Get additional help](#get-additional-help) + +## Handle azidentity errors + +Any service client method that makes a request to the service may return an error due to authentication failure. This is because the credential authenticates on the first call to the service and on any subsequent call that needs to refresh an access token. Authentication errors include a description of the failure and possibly an error message from Microsoft Entra ID. Depending on the application, these errors may or may not be recoverable. + +### Permission issues + +Service client errors with a status code of 401 or 403 often indicate that authentication succeeded but the caller doesn't have permission to access the specified API. Check the service documentation to determine which RBAC roles are needed for the request, and ensure the authenticated user or service principal has the appropriate role assignments. + +## Find relevant information in errors + +Authentication errors can include responses from Microsoft Entra ID and often contain information helpful in diagnosis. Consider the following error message: + +``` +ClientSecretCredential authentication failed +POST https://login.microsoftonline.com/3c631bb7-a9f7-4343-a5ba-a615913/oauth2/v2.0/token +-------------------------------------------------------------------------------- +RESPONSE 401 Unauthorized +-------------------------------------------------------------------------------- +{ + "error": "invalid_client", + "error_description": "AADSTS7000215: Invalid client secret provided. Ensure the secret being sent in the request is the client secret value, not the client secret ID, for a secret added to app '86be4c01-505b-45e9-bfc0-9b825fd84'.\r\nTrace ID: 03da4b8e-5ffe-48ca-9754-aff4276f0100\r\nCorrelation ID: 7b12f9bb-2eef-42e3-ad75-eee69ec9088d\r\nTimestamp: 2022-03-02 18:25:26Z", + "error_codes": [ + 7000215 + ], + "timestamp": "2022-03-02 18:25:26Z", + "trace_id": "03da4b8e-5ffe-48ca-9754-aff4276f0100", + "correlation_id": "7b12f9bb-2eef-42e3-ad75-eee69ec9088d", + "error_uri": "https://login.microsoftonline.com/error?code=7000215" +} +-------------------------------------------------------------------------------- +``` + +This error contains several pieces of information: + +- __Failing Credential Type__: The type of credential that failed to authenticate. This can be helpful when diagnosing issues with chained credential types such as `DefaultAzureCredential` or `ChainedTokenCredential`. + +- __Microsoft Entra ID Error Code and Message__: The error code and message returned by Microsoft Entra ID. This can give insight into the specific reason the request failed. For instance, in this case authentication failed because the provided client secret is incorrect. [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/reference-error-codes#aadsts-error-codes) has more information on AADSTS error codes. + +- __Correlation ID and Timestamp__: The correlation ID and timestamp identify the request in server-side logs. This information can be useful to support engineers diagnosing unexpected Microsoft Entra failures. + +### Enable and configure logging + +`azidentity` provides the same logging capabilities as the rest of the Azure SDK. The simplest way to see the logs to help debug authentication issues is to print credential logs to the console. +```go +import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + +// print log output to stdout +azlog.SetListener(func(event azlog.Event, s string) { + fmt.Println(s) +}) + +// include only azidentity credential logs +azlog.SetEvents(azidentity.EventAuthentication) +``` + + +## Troubleshoot DefaultAzureCredential authentication issues + +| Error |Description| Mitigation | +|---|---|---| +|"DefaultAzureCredential failed to acquire a token"|No credential in the `DefaultAzureCredential` chain provided a token|
  • [Enable logging](#enable-and-configure-logging) to get further diagnostic information.
  • Consult the troubleshooting guide for underlying credential types for more information.
    • [EnvironmentCredential](#troubleshoot-environmentcredential-authentication-issues)
    • [ManagedIdentityCredential](#troubleshoot-managedidentitycredential-authentication-issues)
    • [AzureCLICredential](#troubleshoot-azureclicredential-authentication-issues)
    | +|Error from the client with a status code of 401 or 403|Authentication succeeded but the authorizing Azure service responded with a 401 (Unauthorized), or 403 (Forbidden) status code|
    • [Enable logging](#enable-and-configure-logging) to determine which credential in the chain returned the authenticating token.
    • If an unexpected credential is returning a token, check application configuration such as environment variables.
    • Ensure the correct role is assigned to the authenticated identity. For example, a service specific role rather than the subscription Owner role.
    | +|"managed identity timed out"|`DefaultAzureCredential` sets a short timeout on its first managed identity authentication attempt to prevent very long timeouts during local development when no managed identity is available. That timeout causes this error in production when an application requests a token before the hosting environment is ready to provide one.|Use [ManagedIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ManagedIdentityCredential) directly, at least in production. It doesn't set a timeout on its authentication attempts.| + +## Troubleshoot EnvironmentCredential authentication issues + +| Error Message |Description| Mitigation | +|---|---|---| +|Missing or incomplete environment variable configuration|A valid combination of environment variables wasn't set|Ensure the appropriate environment variables are set for the intended authentication method as described in the [module documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential)| + + +## Troubleshoot ClientSecretCredential authentication issues + +| Error Code | Issue | Mitigation | +|---|---|---| +|AADSTS7000215|An invalid client secret was provided.|Ensure the secret provided to the credential constructor is valid. If unsure, create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| +|AADSTS7000222|An expired client secret was provided.|Create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| +|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal).| + + +## Troubleshoot ClientCertificateCredential authentication issues + +| Error Code | Description | Mitigation | +|---|---|---| +|AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal#option-1-upload-a-certificate).| +|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal).| + + +## Troubleshoot UsernamePasswordCredential authentication issues + +| Error Code | Issue | Mitigation | +|---|---|---| +|AADSTS50126|The provided username or password is invalid.|Ensure the username and password provided to the credential constructor are valid.| + + +## Troubleshoot ManagedIdentityCredential authentication issues + +`ManagedIdentityCredential` is designed to work on a variety of Azure hosts support managed identity. Configuration and troubleshooting vary from host to host. The below table lists the Azure hosts that can be assigned a managed identity and are supported by `ManagedIdentityCredential`. + +|Host Environment| | | +|---|---|---| +|Azure Virtual Machines and Scale Sets|[Configuration](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/qs-configure-portal-windows-vm)|[Troubleshooting](#azure-virtual-machine-managed-identity)| +|Azure App Service and Azure Functions|[Configuration](https://learn.microsoft.com/azure/app-service/overview-managed-identity)|[Troubleshooting](#azure-app-service-and-azure-functions-managed-identity)| +|Azure Kubernetes Service|[Configuration](https://azure.github.io/aad-pod-identity/docs/)|[Troubleshooting](#azure-kubernetes-service-managed-identity)| +|Azure Arc|[Configuration](https://learn.microsoft.com/azure/azure-arc/servers/managed-identity-authentication)|| +|Azure Service Fabric|[Configuration](https://learn.microsoft.com/azure/service-fabric/concepts-managed-identity)|| + +### Azure Virtual Machine managed identity + +| Error Message |Description| Mitigation | +|---|---|---| +|The requested identity hasn’t been assigned to this resource.|The IMDS endpoint responded with a status code of 400, indicating the requested identity isn’t assigned to the VM.|If using a user assigned identity, ensure the specified ID is correct.

    If using a system assigned identity, make sure it has been enabled as described in [managed identity documentation](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/qs-configure-portal-windows-vm#enable-system-assigned-managed-identity-on-an-existing-vm).| +|The request failed due to a gateway error.|The request to the IMDS endpoint failed due to a gateway error, 502 or 504 status code.|IMDS doesn't support requests via proxy or gateway. Disable proxies or gateways running on the VM for requests to the IMDS endpoint `http://169.254.169.254`| +|No response received from the managed identity endpoint.|No response was received for the request to IMDS or the request timed out.|

    • Ensure the VM is configured for managed identity as described in [managed identity documentation](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/qs-configure-portal-windows-vm).
    • Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
    | +|Multiple attempts failed to obtain a token from the managed identity endpoint.|The credential has exhausted its retries for a token request.|
    • Refer to the error message for more details on specific failures.
    • Ensure the VM is configured for managed identity as described in [managed identity documentation](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/qs-configure-portal-windows-vm).
    • Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
    | + +#### Verify IMDS is available on the VM + +If you have access to the VM, you can use `curl` to verify the managed identity endpoint is available. + +```sh +curl 'http://169.254.169.254/metadata/identity/oauth2/token?resource=https://management.core.windows.net&api-version=2018-02-01' -H "Metadata: true" +``` + +> This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security. + +### Azure App Service and Azure Functions managed identity + +| Error Message |Description| Mitigation | +|---|---|---| +|Get "`http://169.254.169.254/...`" i/o timeout|The App Service host hasn't set environment variables for managed identity configuration.|
    • Ensure the App Service is configured for managed identity as described in [App Service documentation](https://learn.microsoft.com/azure/app-service/overview-managed-identity).
    • Verify the App Service environment is properly configured and the managed identity endpoint is available. See [below](#verify-the-app-service-managed-identity-endpoint-is-available) for instructions.
    | + +#### Verify the App Service managed identity endpoint is available + +If you can SSH into the App Service, you can verify managed identity is available in the environment. First ensure the environment variables `IDENTITY_ENDPOINT` and `IDENTITY_SECRET` are set. Then you can verify the managed identity endpoint is available using `curl`. + +```sh +curl "$IDENTITY_ENDPOINT?resource=https://management.core.windows.net&api-version=2019-08-01" -H "X-IDENTITY-HEADER: $IDENTITY_HEADER" +``` + +> This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security. + +### Azure Kubernetes Service managed identity + +#### Pod Identity + +| Error Message |Description| Mitigation | +|---|---|---| +|"no azure identity found for request clientID"|The application attempted to authenticate before an identity was assigned to its pod|Verify the pod is labeled correctly. This also occurs when a correctly labeled pod authenticates before the identity is ready. To prevent initialization races, configure NMI to set the Retry-After header in its responses as described in [Pod Identity documentation](https://azure.github.io/aad-pod-identity/docs/configure/feature_flags/#set-retry-after-header-in-nmi-response). + + +## Troubleshoot AzureCLICredential authentication issues + +| Error Message |Description| Mitigation | +|---|---|---| +|Azure CLI not found on path|The Azure CLI isn’t installed or isn't on the application's path.|
    • Ensure the Azure CLI is installed as described in [Azure CLI documentation](https://learn.microsoft.com/cli/azure/install-azure-cli).
    • Validate the installation location is in the application's `PATH` environment variable.
    | +|Please run 'az login' to set up account|No account is currently logged into the Azure CLI, or the login has expired.|
    • Run `az login` to log into the Azure CLI. More information about Azure CLI authentication is available in the [Azure CLI documentation](https://learn.microsoft.com/cli/azure/authenticate-azure-cli).
    • Verify that the Azure CLI can obtain tokens. See [below](#verify-the-azure-cli-can-obtain-tokens) for instructions.
    | + +#### Verify the Azure CLI can obtain tokens + +You can manually verify that the Azure CLI can authenticate and obtain tokens. First, use the `account` command to verify the logged in account. + +```azurecli +az account show +``` + +Once you've verified the Azure CLI is using the correct account, you can validate that it's able to obtain tokens for that account. + +```azurecli +az account get-access-token --output json --resource https://management.core.windows.net +``` + +> This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security. + + +## Troubleshoot AzureDeveloperCLICredential authentication issues + +| Error Message |Description| Mitigation | +|---|---|---| +|Azure Developer CLI not found on path|The Azure Developer CLI isn't installed or couldn't be found.|
    • Ensure the Azure Developer CLI is properly installed. See the installation instructions at [Install or update the Azure Developer CLI](https://learn.microsoft.com/azure/developer/azure-developer-cli/install-azd).
    • Validate the installation location has been added to the `PATH` environment variable.
    | +|Please run "azd auth login"|No account is logged into the Azure Developer CLI, or the login has expired.|
    • Log in to the Azure Developer CLI using the `azd login` command.
    • Validate that the Azure Developer CLI can obtain tokens. For instructions, see [Verify the Azure Developer CLI can obtain tokens](#verify-the-azure-developer-cli-can-obtain-tokens).
    | + +#### Verify the Azure Developer CLI can obtain tokens + +You can manually verify that the Azure Developer CLI is properly authenticated and can obtain tokens. First, use the `config` command to verify the account that is currently logged in to the Azure Developer CLI. + +```sh +azd config list +``` + +Once you've verified the Azure Developer CLI is using correct account, you can validate that it's able to obtain tokens for this account. + +```sh +azd auth token --output json --scope https://management.core.windows.net/.default +``` +>Note that output of this command will contain a valid access token, and SHOULD NOT BE SHARED to avoid compromising account security. + + +## Troubleshoot `WorkloadIdentityCredential` authentication issues + +| Error Message |Description| Mitigation | +|---|---|---| +|no client ID/tenant ID/token file specified|Incomplete configuration|In most cases these values are provided via environment variables set by Azure Workload Identity.
    • If your application runs on Azure Kubernetes Servide (AKS) or a cluster that has deployed the Azure Workload Identity admission webhook, check pod labels and service account configuration. See the [AKS documentation](https://learn.microsoft.com/azure/aks/workload-identity-deploy-cluster#disable-workload-identity) and [Azure Workload Identity troubleshooting guide](https://azure.github.io/azure-workload-identity/docs/troubleshooting.html) for more details.
    • If your application isn't running on AKS or your cluster hasn't deployed the Workload Identity admission webhook, set these values in `WorkloadIdentityCredentialOptions` + + +## Troubleshoot AzurePipelinesCredential authentication issues + +| Error Message |Description| Mitigation | +|---|---|---| +| AADSTS900023: Specified tenant identifier 'some tenant ID' is neither a valid DNS name, nor a valid external domain.|The `tenantID` argument to `NewAzurePipelinesCredential` is incorrect| Verify the tenant ID. It must identify the tenant of the user-assigned managed identity or service principal configured for the service connection.| +| No service connection found with identifier |The `serviceConnectionID` argument to `NewAzurePipelinesCredential` is incorrect| Verify the service connection ID. This parameter refers to the `resourceId` of the Azure Service Connection. It can also be found in the query string of the service connection's configuration in Azure DevOps. [Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/library/service-endpoints?view=azure-devops&tabs=yaml) has more information about service connections.| +|302 (Found) response from OIDC endpoint|The `systemAccessToken` argument to `NewAzurePipelinesCredential` is incorrect|Check pipeline configuration. This value comes from the predefined variable `System.AccessToken` [as described in Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#systemaccesstoken).| + +## Get additional help + +Additional information on ways to reach out for support can be found in [SUPPORT.md](https://github.com/Azure/azure-sdk-for-go/blob/main/SUPPORT.md). diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json new file mode 100644 index 000000000..bff0c44da --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json @@ -0,0 +1,6 @@ +{ + "AssetsRepo": "Azure/azure-sdk-assets", + "AssetsRepoPrefixPath": "go", + "TagPrefix": "go/azidentity", + "Tag": "go/azidentity_087379b475" +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go new file mode 100644 index 000000000..ada4d6501 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go @@ -0,0 +1,95 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "encoding/json" + "errors" + "fmt" + "net/url" + "strings" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" +) + +var supportedAuthRecordVersions = []string{"1.0"} + +// authenticationRecord is non-secret account information about an authenticated user that user credentials such as +// [DeviceCodeCredential] and [InteractiveBrowserCredential] can use to access previously cached authentication +// data. Call these credentials' Authenticate method to get an authenticationRecord for a user. +type authenticationRecord struct { + // Authority is the URL of the authority that issued the token. + Authority string `json:"authority"` + + // ClientID is the ID of the application that authenticated the user. + ClientID string `json:"clientId"` + + // HomeAccountID uniquely identifies the account. + HomeAccountID string `json:"homeAccountId"` + + // TenantID identifies the tenant in which the user authenticated. + TenantID string `json:"tenantId"` + + // Username is the user's preferred username. + Username string `json:"username"` + + // Version of the AuthenticationRecord. + Version string `json:"version"` +} + +// UnmarshalJSON implements json.Unmarshaler for AuthenticationRecord +func (a *authenticationRecord) UnmarshalJSON(b []byte) error { + // Default unmarshaling is fine but we want to return an error if the record's version isn't supported i.e., we + // want to inspect the unmarshalled values before deciding whether to return an error. Unmarshaling a formally + // different type enables this by assigning all the fields without recursing into this method. + type r authenticationRecord + err := json.Unmarshal(b, (*r)(a)) + if err != nil { + return err + } + if a.Version == "" { + return errors.New("AuthenticationRecord must have a version") + } + for _, v := range supportedAuthRecordVersions { + if a.Version == v { + return nil + } + } + return fmt.Errorf("unsupported AuthenticationRecord version %q. This module supports %v", a.Version, supportedAuthRecordVersions) +} + +// account returns the AuthenticationRecord as an MSAL Account. The account is zero-valued when the AuthenticationRecord is zero-valued. +func (a *authenticationRecord) account() public.Account { + return public.Account{ + Environment: a.Authority, + HomeAccountID: a.HomeAccountID, + PreferredUsername: a.Username, + } +} + +func newAuthenticationRecord(ar public.AuthResult) (authenticationRecord, error) { + u, err := url.Parse(ar.IDToken.Issuer) + if err != nil { + return authenticationRecord{}, fmt.Errorf("Authenticate expected a URL issuer but got %q", ar.IDToken.Issuer) + } + tenant := ar.IDToken.TenantID + if tenant == "" { + tenant = strings.Trim(u.Path, "/") + } + username := ar.IDToken.PreferredUsername + if username == "" { + username = ar.IDToken.UPN + } + return authenticationRecord{ + Authority: fmt.Sprintf("%s://%s", u.Scheme, u.Host), + ClientID: ar.IDToken.Audience, + HomeAccountID: ar.Account.HomeAccountID, + TenantID: tenant, + Username: username, + Version: "1.0", + }, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go new file mode 100644 index 000000000..b0965036b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go @@ -0,0 +1,190 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" +) + +const ( + azureAdditionallyAllowedTenants = "AZURE_ADDITIONALLY_ALLOWED_TENANTS" + azureAuthorityHost = "AZURE_AUTHORITY_HOST" + azureClientCertificatePassword = "AZURE_CLIENT_CERTIFICATE_PASSWORD" + azureClientCertificatePath = "AZURE_CLIENT_CERTIFICATE_PATH" + azureClientID = "AZURE_CLIENT_ID" + azureClientSecret = "AZURE_CLIENT_SECRET" + azureFederatedTokenFile = "AZURE_FEDERATED_TOKEN_FILE" + azurePassword = "AZURE_PASSWORD" + azureRegionalAuthorityName = "AZURE_REGIONAL_AUTHORITY_NAME" + azureTenantID = "AZURE_TENANT_ID" + azureUsername = "AZURE_USERNAME" + + organizationsTenantID = "organizations" + developerSignOnClientID = "04b07795-8ddb-461a-bbee-02f9e1bf7b46" + defaultSuffix = "/.default" + + traceNamespace = "Microsoft.Entra" + traceOpGetToken = "GetToken" + traceOpAuthenticate = "Authenticate" +) + +var ( + // capability CP1 indicates the client application is capable of handling CAE claims challenges + cp1 = []string{"CP1"} + errInvalidTenantID = errors.New("invalid tenantID. You can locate your tenantID by following the instructions listed here: https://learn.microsoft.com/partner-center/find-ids-and-domain-names") +) + +// tokenCachePersistenceOptions contains options for persistent token caching +type tokenCachePersistenceOptions = internal.TokenCachePersistenceOptions + +// setAuthorityHost initializes the authority host for credentials. Precedence is: +// 1. cloud.Configuration.ActiveDirectoryAuthorityHost value set by user +// 2. value of AZURE_AUTHORITY_HOST +// 3. default: Azure Public Cloud +func setAuthorityHost(cc cloud.Configuration) (string, error) { + host := cc.ActiveDirectoryAuthorityHost + if host == "" { + if len(cc.Services) > 0 { + return "", errors.New("missing ActiveDirectoryAuthorityHost for specified cloud") + } + host = cloud.AzurePublic.ActiveDirectoryAuthorityHost + if envAuthorityHost := os.Getenv(azureAuthorityHost); envAuthorityHost != "" { + host = envAuthorityHost + } + } + u, err := url.Parse(host) + if err != nil { + return "", err + } + if u.Scheme != "https" { + return "", errors.New("cannot use an authority host without https") + } + return host, nil +} + +// resolveAdditionalTenants returns a copy of tenants, simplified when tenants contains a wildcard +func resolveAdditionalTenants(tenants []string) []string { + if len(tenants) == 0 { + return nil + } + for _, t := range tenants { + // a wildcard makes all other values redundant + if t == "*" { + return []string{"*"} + } + } + cp := make([]string, len(tenants)) + copy(cp, tenants) + return cp +} + +// resolveTenant returns the correct tenant for a token request +func resolveTenant(defaultTenant, specified, credName string, additionalTenants []string) (string, error) { + if specified == "" || specified == defaultTenant { + return defaultTenant, nil + } + if defaultTenant == "adfs" { + return "", errors.New("ADFS doesn't support tenants") + } + if !validTenantID(specified) { + return "", errInvalidTenantID + } + for _, t := range additionalTenants { + if t == "*" || t == specified { + return specified, nil + } + } + return "", fmt.Errorf(`%s isn't configured to acquire tokens for tenant %q. To enable acquiring tokens for this tenant add it to the AdditionallyAllowedTenants on the credential options, or add "*" to allow acquiring tokens for any tenant`, credName, specified) +} + +func alphanumeric(r rune) bool { + return ('0' <= r && r <= '9') || ('a' <= r && r <= 'z') || ('A' <= r && r <= 'Z') +} + +func validTenantID(tenantID string) bool { + if len(tenantID) < 1 { + return false + } + for _, r := range tenantID { + if !(alphanumeric(r) || r == '.' || r == '-') { + return false + } + } + return true +} + +func doForClient(client *azcore.Client, r *http.Request) (*http.Response, error) { + req, err := runtime.NewRequest(r.Context(), r.Method, r.URL.String()) + if err != nil { + return nil, err + } + if r.Body != nil && r.Body != http.NoBody { + // create a rewindable body from the existing body as required + var body io.ReadSeekCloser + if rsc, ok := r.Body.(io.ReadSeekCloser); ok { + body = rsc + } else { + b, err := io.ReadAll(r.Body) + if err != nil { + return nil, err + } + body = streaming.NopCloser(bytes.NewReader(b)) + } + err = req.SetBody(body, r.Header.Get("Content-Type")) + if err != nil { + return nil, err + } + } + + // copy headers to the new request, ignoring any for which the new request has a value + h := req.Raw().Header + for key, vals := range r.Header { + if _, has := h[key]; !has { + for _, val := range vals { + h.Add(key, val) + } + } + } + + resp, err := client.Pipeline().Do(req) + if err != nil { + return nil, err + } + return resp, err +} + +// enables fakes for test scenarios +type msalConfidentialClient interface { + AcquireTokenSilent(ctx context.Context, scopes []string, options ...confidential.AcquireSilentOption) (confidential.AuthResult, error) + AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...confidential.AcquireByAuthCodeOption) (confidential.AuthResult, error) + AcquireTokenByCredential(ctx context.Context, scopes []string, options ...confidential.AcquireByCredentialOption) (confidential.AuthResult, error) + AcquireTokenOnBehalfOf(ctx context.Context, userAssertion string, scopes []string, options ...confidential.AcquireOnBehalfOfOption) (confidential.AuthResult, error) +} + +// enables fakes for test scenarios +type msalPublicClient interface { + AcquireTokenSilent(ctx context.Context, scopes []string, options ...public.AcquireSilentOption) (public.AuthResult, error) + AcquireTokenByUsernamePassword(ctx context.Context, scopes []string, username string, password string, options ...public.AcquireByUsernamePasswordOption) (public.AuthResult, error) + AcquireTokenByDeviceCode(ctx context.Context, scopes []string, options ...public.AcquireByDeviceCodeOption) (public.DeviceCode, error) + AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...public.AcquireByAuthCodeOption) (public.AuthResult, error) + AcquireTokenInteractive(ctx context.Context, scopes []string, options ...public.AcquireInteractiveOption) (public.AuthResult, error) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go new file mode 100644 index 000000000..b9976f5fe --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go @@ -0,0 +1,190 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "runtime" + "strings" + "sync" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +const credNameAzureCLI = "AzureCLICredential" + +type azTokenProvider func(ctx context.Context, scopes []string, tenant, subscription string) ([]byte, error) + +// AzureCLICredentialOptions contains optional parameters for AzureCLICredential. +type AzureCLICredentialOptions struct { + // AdditionallyAllowedTenants specifies tenants for which the credential may acquire tokens, in addition + // to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the + // logged in account can access. + AdditionallyAllowedTenants []string + + // Subscription is the name or ID of a subscription. Set this to acquire tokens for an account other + // than the Azure CLI's current account. + Subscription string + + // TenantID identifies the tenant the credential should authenticate in. + // Defaults to the CLI's default tenant, which is typically the home tenant of the logged in user. + TenantID string + + // inDefaultChain is true when the credential is part of DefaultAzureCredential + inDefaultChain bool + // tokenProvider is used by tests to fake invoking az + tokenProvider azTokenProvider +} + +// init returns an instance of AzureCLICredentialOptions initialized with default values. +func (o *AzureCLICredentialOptions) init() { + if o.tokenProvider == nil { + o.tokenProvider = defaultAzTokenProvider + } +} + +// AzureCLICredential authenticates as the identity logged in to the Azure CLI. +type AzureCLICredential struct { + mu *sync.Mutex + opts AzureCLICredentialOptions +} + +// NewAzureCLICredential constructs an AzureCLICredential. Pass nil to accept default options. +func NewAzureCLICredential(options *AzureCLICredentialOptions) (*AzureCLICredential, error) { + cp := AzureCLICredentialOptions{} + if options != nil { + cp = *options + } + for _, r := range cp.Subscription { + if !(alphanumeric(r) || r == '-' || r == '_' || r == ' ' || r == '.') { + return nil, fmt.Errorf("%s: invalid Subscription %q", credNameAzureCLI, cp.Subscription) + } + } + if cp.TenantID != "" && !validTenantID(cp.TenantID) { + return nil, errInvalidTenantID + } + cp.init() + cp.AdditionallyAllowedTenants = resolveAdditionalTenants(cp.AdditionallyAllowedTenants) + return &AzureCLICredential{mu: &sync.Mutex{}, opts: cp}, nil +} + +// GetToken requests a token from the Azure CLI. This credential doesn't cache tokens, so every call invokes the CLI. +// This method is called automatically by Azure SDK clients. +func (c *AzureCLICredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + at := azcore.AccessToken{} + if len(opts.Scopes) != 1 { + return at, errors.New(credNameAzureCLI + ": GetToken() requires exactly one scope") + } + if !validScope(opts.Scopes[0]) { + return at, fmt.Errorf("%s.GetToken(): invalid scope %q", credNameAzureCLI, opts.Scopes[0]) + } + tenant, err := resolveTenant(c.opts.TenantID, opts.TenantID, credNameAzureCLI, c.opts.AdditionallyAllowedTenants) + if err != nil { + return at, err + } + c.mu.Lock() + defer c.mu.Unlock() + b, err := c.opts.tokenProvider(ctx, opts.Scopes, tenant, c.opts.Subscription) + if err == nil { + at, err = c.createAccessToken(b) + } + if err != nil { + err = unavailableIfInChain(err, c.opts.inDefaultChain) + return at, err + } + msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", credNameAzureCLI, strings.Join(opts.Scopes, ", ")) + log.Write(EventAuthentication, msg) + return at, nil +} + +// defaultAzTokenProvider invokes the Azure CLI to acquire a token. It assumes +// callers have verified that all string arguments are safe to pass to the CLI. +var defaultAzTokenProvider azTokenProvider = func(ctx context.Context, scopes []string, tenantID, subscription string) ([]byte, error) { + // pass the CLI a Microsoft Entra ID v1 resource because we don't know which CLI version is installed and older ones don't support v2 scopes + resource := strings.TrimSuffix(scopes[0], defaultSuffix) + // set a default timeout for this authentication iff the application hasn't done so already + var cancel context.CancelFunc + if _, hasDeadline := ctx.Deadline(); !hasDeadline { + ctx, cancel = context.WithTimeout(ctx, cliTimeout) + defer cancel() + } + commandLine := "az account get-access-token -o json --resource " + resource + if tenantID != "" { + commandLine += " --tenant " + tenantID + } + if subscription != "" { + // subscription needs quotes because it may contain spaces + commandLine += ` --subscription "` + subscription + `"` + } + var cliCmd *exec.Cmd + if runtime.GOOS == "windows" { + dir := os.Getenv("SYSTEMROOT") + if dir == "" { + return nil, newCredentialUnavailableError(credNameAzureCLI, "environment variable 'SYSTEMROOT' has no value") + } + cliCmd = exec.CommandContext(ctx, "cmd.exe", "/c", commandLine) + cliCmd.Dir = dir + } else { + cliCmd = exec.CommandContext(ctx, "/bin/sh", "-c", commandLine) + cliCmd.Dir = "/bin" + } + cliCmd.Env = os.Environ() + var stderr bytes.Buffer + cliCmd.Stderr = &stderr + + output, err := cliCmd.Output() + if err != nil { + msg := stderr.String() + var exErr *exec.ExitError + if errors.As(err, &exErr) && exErr.ExitCode() == 127 || strings.HasPrefix(msg, "'az' is not recognized") { + msg = "Azure CLI not found on path" + } + if msg == "" { + msg = err.Error() + } + return nil, newCredentialUnavailableError(credNameAzureCLI, msg) + } + + return output, nil +} + +func (c *AzureCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) { + t := struct { + AccessToken string `json:"accessToken"` + Expires_On int64 `json:"expires_on"` + ExpiresOn string `json:"expiresOn"` + }{} + err := json.Unmarshal(tk, &t) + if err != nil { + return azcore.AccessToken{}, err + } + + exp := time.Unix(t.Expires_On, 0) + if t.Expires_On == 0 { + exp, err = time.ParseInLocation("2006-01-02 15:04:05.999999", t.ExpiresOn, time.Local) + if err != nil { + return azcore.AccessToken{}, fmt.Errorf("%s: error parsing token expiration time %q: %v", credNameAzureCLI, t.ExpiresOn, err) + } + } + + converted := azcore.AccessToken{ + Token: t.AccessToken, + ExpiresOn: exp.UTC(), + } + return converted, nil +} + +var _ azcore.TokenCredential = (*AzureCLICredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go new file mode 100644 index 000000000..cbe7c4c2d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go @@ -0,0 +1,169 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "runtime" + "strings" + "sync" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +const credNameAzureDeveloperCLI = "AzureDeveloperCLICredential" + +type azdTokenProvider func(ctx context.Context, scopes []string, tenant string) ([]byte, error) + +// AzureDeveloperCLICredentialOptions contains optional parameters for AzureDeveloperCLICredential. +type AzureDeveloperCLICredentialOptions struct { + // AdditionallyAllowedTenants specifies tenants for which the credential may acquire tokens, in addition + // to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the + // logged in account can access. + AdditionallyAllowedTenants []string + + // TenantID identifies the tenant the credential should authenticate in. Defaults to the azd environment, + // which is the tenant of the selected Azure subscription. + TenantID string + + // inDefaultChain is true when the credential is part of DefaultAzureCredential + inDefaultChain bool + // tokenProvider is used by tests to fake invoking azd + tokenProvider azdTokenProvider +} + +// AzureDeveloperCLICredential authenticates as the identity logged in to the [Azure Developer CLI]. +// +// [Azure Developer CLI]: https://learn.microsoft.com/azure/developer/azure-developer-cli/overview +type AzureDeveloperCLICredential struct { + mu *sync.Mutex + opts AzureDeveloperCLICredentialOptions +} + +// NewAzureDeveloperCLICredential constructs an AzureDeveloperCLICredential. Pass nil to accept default options. +func NewAzureDeveloperCLICredential(options *AzureDeveloperCLICredentialOptions) (*AzureDeveloperCLICredential, error) { + cp := AzureDeveloperCLICredentialOptions{} + if options != nil { + cp = *options + } + if cp.TenantID != "" && !validTenantID(cp.TenantID) { + return nil, errInvalidTenantID + } + if cp.tokenProvider == nil { + cp.tokenProvider = defaultAzdTokenProvider + } + return &AzureDeveloperCLICredential{mu: &sync.Mutex{}, opts: cp}, nil +} + +// GetToken requests a token from the Azure Developer CLI. This credential doesn't cache tokens, so every call invokes azd. +// This method is called automatically by Azure SDK clients. +func (c *AzureDeveloperCLICredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + at := azcore.AccessToken{} + if len(opts.Scopes) == 0 { + return at, errors.New(credNameAzureDeveloperCLI + ": GetToken() requires at least one scope") + } + for _, scope := range opts.Scopes { + if !validScope(scope) { + return at, fmt.Errorf("%s.GetToken(): invalid scope %q", credNameAzureDeveloperCLI, scope) + } + } + tenant, err := resolveTenant(c.opts.TenantID, opts.TenantID, credNameAzureDeveloperCLI, c.opts.AdditionallyAllowedTenants) + if err != nil { + return at, err + } + c.mu.Lock() + defer c.mu.Unlock() + b, err := c.opts.tokenProvider(ctx, opts.Scopes, tenant) + if err == nil { + at, err = c.createAccessToken(b) + } + if err != nil { + err = unavailableIfInChain(err, c.opts.inDefaultChain) + return at, err + } + msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", credNameAzureDeveloperCLI, strings.Join(opts.Scopes, ", ")) + log.Write(EventAuthentication, msg) + return at, nil +} + +// defaultAzTokenProvider invokes the Azure Developer CLI to acquire a token. It assumes +// callers have verified that all string arguments are safe to pass to the CLI. +var defaultAzdTokenProvider azdTokenProvider = func(ctx context.Context, scopes []string, tenant string) ([]byte, error) { + // set a default timeout for this authentication iff the application hasn't done so already + var cancel context.CancelFunc + if _, hasDeadline := ctx.Deadline(); !hasDeadline { + ctx, cancel = context.WithTimeout(ctx, cliTimeout) + defer cancel() + } + commandLine := "azd auth token -o json" + if tenant != "" { + commandLine += " --tenant-id " + tenant + } + for _, scope := range scopes { + commandLine += " --scope " + scope + } + var cliCmd *exec.Cmd + if runtime.GOOS == "windows" { + dir := os.Getenv("SYSTEMROOT") + if dir == "" { + return nil, newCredentialUnavailableError(credNameAzureDeveloperCLI, "environment variable 'SYSTEMROOT' has no value") + } + cliCmd = exec.CommandContext(ctx, "cmd.exe", "/c", commandLine) + cliCmd.Dir = dir + } else { + cliCmd = exec.CommandContext(ctx, "/bin/sh", "-c", commandLine) + cliCmd.Dir = "/bin" + } + cliCmd.Env = os.Environ() + var stderr bytes.Buffer + cliCmd.Stderr = &stderr + output, err := cliCmd.Output() + if err != nil { + msg := stderr.String() + var exErr *exec.ExitError + if errors.As(err, &exErr) && exErr.ExitCode() == 127 || strings.HasPrefix(msg, "'azd' is not recognized") { + msg = "Azure Developer CLI not found on path" + } else if strings.Contains(msg, "azd auth login") { + msg = `please run "azd auth login" from a command prompt to authenticate before using this credential` + } + if msg == "" { + msg = err.Error() + } + return nil, newCredentialUnavailableError(credNameAzureDeveloperCLI, msg) + } + return output, nil +} + +func (c *AzureDeveloperCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) { + t := struct { + AccessToken string `json:"token"` + ExpiresOn string `json:"expiresOn"` + }{} + err := json.Unmarshal(tk, &t) + if err != nil { + return azcore.AccessToken{}, err + } + exp, err := time.Parse("2006-01-02T15:04:05Z", t.ExpiresOn) + if err != nil { + return azcore.AccessToken{}, fmt.Errorf("error parsing token expiration time %q: %v", t.ExpiresOn, err) + } + return azcore.AccessToken{ + ExpiresOn: exp.UTC(), + Token: t.AccessToken, + }, nil +} + +var _ azcore.TokenCredential = (*AzureDeveloperCLICredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go new file mode 100644 index 000000000..80c1806bb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go @@ -0,0 +1,140 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +) + +const ( + credNameAzurePipelines = "AzurePipelinesCredential" + oidcAPIVersion = "7.1" + systemOIDCRequestURI = "SYSTEM_OIDCREQUESTURI" +) + +// AzurePipelinesCredential authenticates with workload identity federation in an Azure Pipeline. See +// [Azure Pipelines documentation] for more information. +// +// [Azure Pipelines documentation]: https://learn.microsoft.com/azure/devops/pipelines/library/connect-to-azure?view=azure-devops#create-an-azure-resource-manager-service-connection-that-uses-workload-identity-federation +type AzurePipelinesCredential struct { + connectionID, oidcURI, systemAccessToken string + cred *ClientAssertionCredential +} + +// AzurePipelinesCredentialOptions contains optional parameters for AzurePipelinesCredential. +type AzurePipelinesCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool +} + +// NewAzurePipelinesCredential is the constructor for AzurePipelinesCredential. +// +// - tenantID: tenant ID of the service principal federated with the service connection +// - clientID: client ID of that service principal +// - serviceConnectionID: ID of the service connection to authenticate +// - systemAccessToken: security token for the running build. See [Azure Pipelines documentation] for +// an example showing how to get this value. +// +// [Azure Pipelines documentation]: https://learn.microsoft.com/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#systemaccesstoken +func NewAzurePipelinesCredential(tenantID, clientID, serviceConnectionID, systemAccessToken string, options *AzurePipelinesCredentialOptions) (*AzurePipelinesCredential, error) { + if !validTenantID(tenantID) { + return nil, errInvalidTenantID + } + if clientID == "" { + return nil, errors.New("no client ID specified") + } + if serviceConnectionID == "" { + return nil, errors.New("no service connection ID specified") + } + if systemAccessToken == "" { + return nil, errors.New("no system access token specified") + } + u := os.Getenv(systemOIDCRequestURI) + if u == "" { + return nil, fmt.Errorf("no value for environment variable %s. This should be set by Azure Pipelines", systemOIDCRequestURI) + } + a := AzurePipelinesCredential{ + connectionID: serviceConnectionID, + oidcURI: u, + systemAccessToken: systemAccessToken, + } + if options == nil { + options = &AzurePipelinesCredentialOptions{} + } + caco := ClientAssertionCredentialOptions{ + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + } + cred, err := NewClientAssertionCredential(tenantID, clientID, a.getAssertion, &caco) + if err != nil { + return nil, err + } + cred.client.name = credNameAzurePipelines + a.cred = cred + return &a, nil +} + +// GetToken requests an access token from Microsoft Entra ID. Azure SDK clients call this method automatically. +func (a *AzurePipelinesCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameAzurePipelines+"."+traceOpGetToken, a.cred.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := a.cred.GetToken(ctx, opts) + return tk, err +} + +func (a *AzurePipelinesCredential) getAssertion(ctx context.Context) (string, error) { + url := a.oidcURI + "?api-version=" + oidcAPIVersion + "&serviceConnectionId=" + a.connectionID + url, err := runtime.EncodeQueryParams(url) + if err != nil { + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't encode OIDC URL: "+err.Error(), nil, nil) + } + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil) + if err != nil { + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't create OIDC token request: "+err.Error(), nil, nil) + } + req.Header.Set("Authorization", "Bearer "+a.systemAccessToken) + res, err := doForClient(a.cred.client.azClient, req) + if err != nil { + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't send OIDC token request: "+err.Error(), nil, nil) + } + if res.StatusCode != http.StatusOK { + msg := res.Status + " response from the OIDC endpoint. Check service connection ID and Pipeline configuration" + // include the response because its body, if any, probably contains an error message. + // OK responses aren't included with errors because they probably contain secrets + return "", newAuthenticationFailedError(credNameAzurePipelines, msg, res, nil) + } + b, err := runtime.Payload(res) + if err != nil { + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't read OIDC response content: "+err.Error(), nil, nil) + } + var r struct { + OIDCToken string `json:"oidcToken"` + } + err = json.Unmarshal(b, &r) + if err != nil { + return "", newAuthenticationFailedError(credNameAzurePipelines, "unexpected response from OIDC endpoint", nil, nil) + } + return r.OIDCToken, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go new file mode 100644 index 000000000..6c35a941b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go @@ -0,0 +1,138 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// ChainedTokenCredentialOptions contains optional parameters for ChainedTokenCredential. +type ChainedTokenCredentialOptions struct { + // RetrySources configures how the credential uses its sources. When true, the credential always attempts to + // authenticate through each source in turn, stopping when one succeeds. When false, the credential authenticates + // only through this first successful source--it never again tries the sources which failed. + RetrySources bool +} + +// ChainedTokenCredential links together multiple credentials and tries them sequentially when authenticating. By default, +// it tries all the credentials until one authenticates, after which it always uses that credential. +type ChainedTokenCredential struct { + cond *sync.Cond + iterating bool + name string + retrySources bool + sources []azcore.TokenCredential + successfulCredential azcore.TokenCredential +} + +// NewChainedTokenCredential creates a ChainedTokenCredential. Pass nil for options to accept defaults. +func NewChainedTokenCredential(sources []azcore.TokenCredential, options *ChainedTokenCredentialOptions) (*ChainedTokenCredential, error) { + if len(sources) == 0 { + return nil, errors.New("sources must contain at least one TokenCredential") + } + for _, source := range sources { + if source == nil { // cannot have a nil credential in the chain or else the application will panic when GetToken() is called on nil + return nil, errors.New("sources cannot contain nil") + } + } + cp := make([]azcore.TokenCredential, len(sources)) + copy(cp, sources) + if options == nil { + options = &ChainedTokenCredentialOptions{} + } + return &ChainedTokenCredential{ + cond: sync.NewCond(&sync.Mutex{}), + name: "ChainedTokenCredential", + retrySources: options.RetrySources, + sources: cp, + }, nil +} + +// GetToken calls GetToken on the chained credentials in turn, stopping when one returns a token. +// This method is called automatically by Azure SDK clients. +func (c *ChainedTokenCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if !c.retrySources { + // ensure only one goroutine at a time iterates the sources and perhaps sets c.successfulCredential + c.cond.L.Lock() + for { + if c.successfulCredential != nil { + c.cond.L.Unlock() + return c.successfulCredential.GetToken(ctx, opts) + } + if !c.iterating { + c.iterating = true + // allow other goroutines to wait while this one iterates + c.cond.L.Unlock() + break + } + c.cond.Wait() + } + } + + var ( + err error + errs []error + successfulCredential azcore.TokenCredential + token azcore.AccessToken + unavailableErr credentialUnavailable + ) + for _, cred := range c.sources { + token, err = cred.GetToken(ctx, opts) + if err == nil { + log.Writef(EventAuthentication, "%s authenticated with %s", c.name, extractCredentialName(cred)) + successfulCredential = cred + break + } + errs = append(errs, err) + // continue to the next source iff this one returned credentialUnavailableError + if !errors.As(err, &unavailableErr) { + break + } + } + if c.iterating { + c.cond.L.Lock() + // this is nil when all credentials returned an error + c.successfulCredential = successfulCredential + c.iterating = false + c.cond.L.Unlock() + c.cond.Broadcast() + } + // err is the error returned by the last GetToken call. It will be nil when that call succeeds + if err != nil { + // return credentialUnavailableError iff all sources did so; return AuthenticationFailedError otherwise + msg := createChainedErrorMessage(errs) + if errors.As(err, &unavailableErr) { + err = newCredentialUnavailableError(c.name, msg) + } else { + res := getResponseFromError(err) + err = newAuthenticationFailedError(c.name, msg, res, err) + } + } + return token, err +} + +func createChainedErrorMessage(errs []error) string { + msg := "failed to acquire a token.\nAttempted credentials:" + for _, err := range errs { + msg += fmt.Sprintf("\n\t%s", err.Error()) + } + return msg +} + +func extractCredentialName(credential azcore.TokenCredential) string { + return strings.TrimPrefix(fmt.Sprintf("%T", credential), "*azidentity.") +} + +var _ azcore.TokenCredential = (*ChainedTokenCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml new file mode 100644 index 000000000..4cd8c5144 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml @@ -0,0 +1,46 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azidentity/ + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azidentity/ + +extends: + template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + CloudConfig: + Public: + SubscriptionConfigurations: + - $(sub-config-azure-cloud-test-resources) + - $(sub-config-identity-test-resources) + EnvVars: + SYSTEM_ACCESSTOKEN: $(System.AccessToken) + RunLiveTests: true + ServiceDirectory: azidentity + UsePipelineProxy: false + + ${{ if endsWith(variables['Build.DefinitionName'], 'weekly') }}: + MatrixConfigs: + - Name: managed_identity_matrix + GenerateVMJobs: true + Path: sdk/azidentity/managed-identity-matrix.json + Selection: sparse + MatrixReplace: + - Pool=.*LINUXPOOL.*/azsdk-pool-mms-ubuntu-2204-identitymsi + - OSVmImage=.*LINUXNEXTVMIMAGE.*/azsdk-pool-mms-ubuntu-2204-1espt diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go new file mode 100644 index 000000000..b588750ef --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go @@ -0,0 +1,85 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const credNameAssertion = "ClientAssertionCredential" + +// ClientAssertionCredential authenticates an application with assertions provided by a callback function. +// This credential is for advanced scenarios. [ClientCertificateCredential] has a more convenient API for +// the most common assertion scenario, authenticating a service principal with a certificate. See +// [Microsoft Entra ID documentation] for details of the assertion format. +// +// [Microsoft Entra ID documentation]: https://learn.microsoft.com/entra/identity-platform/certificate-credentials#assertion-format +type ClientAssertionCredential struct { + client *confidentialClient +} + +// ClientAssertionCredentialOptions contains optional parameters for ClientAssertionCredential. +type ClientAssertionCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + + // tokenCachePersistenceOptions enables persistent token caching when not nil. + tokenCachePersistenceOptions *tokenCachePersistenceOptions +} + +// NewClientAssertionCredential constructs a ClientAssertionCredential. The getAssertion function must be thread safe. Pass nil for options to accept defaults. +func NewClientAssertionCredential(tenantID, clientID string, getAssertion func(context.Context) (string, error), options *ClientAssertionCredentialOptions) (*ClientAssertionCredential, error) { + if getAssertion == nil { + return nil, errors.New("getAssertion must be a function that returns assertions") + } + if options == nil { + options = &ClientAssertionCredentialOptions{} + } + cred := confidential.NewCredFromAssertionCallback( + func(ctx context.Context, _ confidential.AssertionRequestOptions) (string, error) { + return getAssertion(ctx) + }, + ) + msalOpts := confidentialClientOptions{ + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + tokenCachePersistenceOptions: options.tokenCachePersistenceOptions, + } + c, err := newConfidentialClient(tenantID, clientID, credNameAssertion, cred, msalOpts) + if err != nil { + return nil, err + } + return &ClientAssertionCredential{client: c}, nil +} + +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. +func (c *ClientAssertionCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameAssertion+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.GetToken(ctx, opts) + return tk, err +} + +var _ azcore.TokenCredential = (*ClientAssertionCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go new file mode 100644 index 000000000..80cd96b56 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go @@ -0,0 +1,174 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "crypto" + "crypto/x509" + "encoding/pem" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" + "golang.org/x/crypto/pkcs12" +) + +const credNameCert = "ClientCertificateCredential" + +// ClientCertificateCredentialOptions contains optional parameters for ClientCertificateCredential. +type ClientCertificateCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + + // SendCertificateChain controls whether the credential sends the public certificate chain in the x5c + // header of each token request's JWT. This is required for Subject Name/Issuer (SNI) authentication. + // Defaults to False. + SendCertificateChain bool + + // tokenCachePersistenceOptions enables persistent token caching when not nil. + tokenCachePersistenceOptions *tokenCachePersistenceOptions +} + +// ClientCertificateCredential authenticates a service principal with a certificate. +type ClientCertificateCredential struct { + client *confidentialClient +} + +// NewClientCertificateCredential constructs a ClientCertificateCredential. Pass nil for options to accept defaults. See +// [ParseCertificates] for help loading a certificate. +func NewClientCertificateCredential(tenantID string, clientID string, certs []*x509.Certificate, key crypto.PrivateKey, options *ClientCertificateCredentialOptions) (*ClientCertificateCredential, error) { + if len(certs) == 0 { + return nil, errors.New("at least one certificate is required") + } + if options == nil { + options = &ClientCertificateCredentialOptions{} + } + cred, err := confidential.NewCredFromCert(certs, key) + if err != nil { + return nil, err + } + msalOpts := confidentialClientOptions{ + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + SendX5C: options.SendCertificateChain, + tokenCachePersistenceOptions: options.tokenCachePersistenceOptions, + } + c, err := newConfidentialClient(tenantID, clientID, credNameCert, cred, msalOpts) + if err != nil { + return nil, err + } + return &ClientCertificateCredential{client: c}, nil +} + +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. +func (c *ClientCertificateCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameCert+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.GetToken(ctx, opts) + return tk, err +} + +// ParseCertificates loads certificates and a private key, in PEM or PKCS#12 format, for use with [NewClientCertificateCredential]. +// Pass nil for password if the private key isn't encrypted. This function has limitations, for example it can't decrypt keys in +// PEM format or PKCS#12 certificates that use SHA256 for message authentication. If you encounter such limitations, consider +// using another module to load the certificate and private key. +func ParseCertificates(certData []byte, password []byte) ([]*x509.Certificate, crypto.PrivateKey, error) { + var blocks []*pem.Block + var err error + if len(password) == 0 { + blocks, err = loadPEMCert(certData) + } + if len(blocks) == 0 || err != nil { + blocks, err = loadPKCS12Cert(certData, string(password)) + } + if err != nil { + return nil, nil, err + } + var certs []*x509.Certificate + var pk crypto.PrivateKey + for _, block := range blocks { + switch block.Type { + case "CERTIFICATE": + c, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, nil, err + } + certs = append(certs, c) + case "PRIVATE KEY": + if pk != nil { + return nil, nil, errors.New("certData contains multiple private keys") + } + pk, err = x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + pk, err = x509.ParsePKCS1PrivateKey(block.Bytes) + } + if err != nil { + return nil, nil, err + } + case "RSA PRIVATE KEY": + if pk != nil { + return nil, nil, errors.New("certData contains multiple private keys") + } + pk, err = x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, nil, err + } + } + } + if len(certs) == 0 { + return nil, nil, errors.New("found no certificate") + } + if pk == nil { + return nil, nil, errors.New("found no private key") + } + return certs, pk, nil +} + +func loadPEMCert(certData []byte) ([]*pem.Block, error) { + blocks := []*pem.Block{} + for { + var block *pem.Block + block, certData = pem.Decode(certData) + if block == nil { + break + } + blocks = append(blocks, block) + } + if len(blocks) == 0 { + return nil, errors.New("didn't find any PEM blocks") + } + return blocks, nil +} + +func loadPKCS12Cert(certData []byte, password string) ([]*pem.Block, error) { + blocks, err := pkcs12.ToPEM(certData, password) + if err != nil { + return nil, err + } + if len(blocks) == 0 { + // not mentioning PKCS12 in this message because we end up here when certData is garbage + return nil, errors.New("didn't find any certificate content") + } + return blocks, err +} + +var _ azcore.TokenCredential = (*ClientCertificateCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go new file mode 100644 index 000000000..9e6772e9b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go @@ -0,0 +1,75 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const credNameSecret = "ClientSecretCredential" + +// ClientSecretCredentialOptions contains optional parameters for ClientSecretCredential. +type ClientSecretCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + + // tokenCachePersistenceOptions enables persistent token caching when not nil. + tokenCachePersistenceOptions *tokenCachePersistenceOptions +} + +// ClientSecretCredential authenticates an application with a client secret. +type ClientSecretCredential struct { + client *confidentialClient +} + +// NewClientSecretCredential constructs a ClientSecretCredential. Pass nil for options to accept defaults. +func NewClientSecretCredential(tenantID string, clientID string, clientSecret string, options *ClientSecretCredentialOptions) (*ClientSecretCredential, error) { + if options == nil { + options = &ClientSecretCredentialOptions{} + } + cred, err := confidential.NewCredFromSecret(clientSecret) + if err != nil { + return nil, err + } + msalOpts := confidentialClientOptions{ + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + tokenCachePersistenceOptions: options.tokenCachePersistenceOptions, + } + c, err := newConfidentialClient(tenantID, clientID, credNameSecret, cred, msalOpts) + if err != nil { + return nil, err + } + return &ClientSecretCredential{client: c}, nil +} + +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. +func (c *ClientSecretCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameSecret+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.GetToken(ctx, opts) + return tk, err +} + +var _ azcore.TokenCredential = (*ClientSecretCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go new file mode 100644 index 000000000..3bd08c685 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go @@ -0,0 +1,184 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "fmt" + "net/http" + "os" + "strings" + "sync" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +type confidentialClientOptions struct { + azcore.ClientOptions + + AdditionallyAllowedTenants []string + // Assertion for on-behalf-of authentication + Assertion string + DisableInstanceDiscovery, SendX5C bool + tokenCachePersistenceOptions *tokenCachePersistenceOptions +} + +// confidentialClient wraps the MSAL confidential client +type confidentialClient struct { + cae, noCAE msalConfidentialClient + caeMu, noCAEMu, clientMu *sync.Mutex + clientID, tenantID string + cred confidential.Credential + host string + name string + opts confidentialClientOptions + region string + azClient *azcore.Client +} + +func newConfidentialClient(tenantID, clientID, name string, cred confidential.Credential, opts confidentialClientOptions) (*confidentialClient, error) { + if !validTenantID(tenantID) { + return nil, errInvalidTenantID + } + host, err := setAuthorityHost(opts.Cloud) + if err != nil { + return nil, err + } + client, err := azcore.NewClient(module, version, runtime.PipelineOptions{ + Tracing: runtime.TracingOptions{ + Namespace: traceNamespace, + }, + }, &opts.ClientOptions) + if err != nil { + return nil, err + } + opts.AdditionallyAllowedTenants = resolveAdditionalTenants(opts.AdditionallyAllowedTenants) + return &confidentialClient{ + caeMu: &sync.Mutex{}, + clientID: clientID, + clientMu: &sync.Mutex{}, + cred: cred, + host: host, + name: name, + noCAEMu: &sync.Mutex{}, + opts: opts, + region: os.Getenv(azureRegionalAuthorityName), + tenantID: tenantID, + azClient: client, + }, nil +} + +// GetToken requests an access token from MSAL, checking the cache first. +func (c *confidentialClient) GetToken(ctx context.Context, tro policy.TokenRequestOptions) (azcore.AccessToken, error) { + if len(tro.Scopes) < 1 { + return azcore.AccessToken{}, fmt.Errorf("%s.GetToken() requires at least one scope", c.name) + } + // we don't resolve the tenant for managed identities because they acquire tokens only from their home tenants + if c.name != credNameManagedIdentity { + tenant, err := c.resolveTenant(tro.TenantID) + if err != nil { + return azcore.AccessToken{}, err + } + tro.TenantID = tenant + } + client, mu, err := c.client(tro) + if err != nil { + return azcore.AccessToken{}, err + } + mu.Lock() + defer mu.Unlock() + var ar confidential.AuthResult + if c.opts.Assertion != "" { + ar, err = client.AcquireTokenOnBehalfOf(ctx, c.opts.Assertion, tro.Scopes, confidential.WithClaims(tro.Claims), confidential.WithTenantID(tro.TenantID)) + } else { + ar, err = client.AcquireTokenSilent(ctx, tro.Scopes, confidential.WithClaims(tro.Claims), confidential.WithTenantID(tro.TenantID)) + if err != nil { + ar, err = client.AcquireTokenByCredential(ctx, tro.Scopes, confidential.WithClaims(tro.Claims), confidential.WithTenantID(tro.TenantID)) + } + } + if err != nil { + // We could get a credentialUnavailableError from managed identity authentication because in that case the error comes from our code. + // We return it directly because it affects the behavior of credential chains. Otherwise, we return AuthenticationFailedError. + var unavailableErr credentialUnavailable + if !errors.As(err, &unavailableErr) { + res := getResponseFromError(err) + err = newAuthenticationFailedError(c.name, err.Error(), res, err) + } + } else { + msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", c.name, strings.Join(ar.GrantedScopes, ", ")) + log.Write(EventAuthentication, msg) + } + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +func (c *confidentialClient) client(tro policy.TokenRequestOptions) (msalConfidentialClient, *sync.Mutex, error) { + c.clientMu.Lock() + defer c.clientMu.Unlock() + if tro.EnableCAE { + if c.cae == nil { + client, err := c.newMSALClient(true) + if err != nil { + return nil, nil, err + } + c.cae = client + } + return c.cae, c.caeMu, nil + } + if c.noCAE == nil { + client, err := c.newMSALClient(false) + if err != nil { + return nil, nil, err + } + c.noCAE = client + } + return c.noCAE, c.noCAEMu, nil +} + +func (c *confidentialClient) newMSALClient(enableCAE bool) (msalConfidentialClient, error) { + cache, err := internal.NewCache(c.opts.tokenCachePersistenceOptions, enableCAE) + if err != nil { + return nil, err + } + authority := runtime.JoinPaths(c.host, c.tenantID) + o := []confidential.Option{ + confidential.WithAzureRegion(c.region), + confidential.WithCache(cache), + confidential.WithHTTPClient(c), + } + if enableCAE { + o = append(o, confidential.WithClientCapabilities(cp1)) + } + if c.opts.SendX5C { + o = append(o, confidential.WithX5C()) + } + if c.opts.DisableInstanceDiscovery || strings.ToLower(c.tenantID) == "adfs" { + o = append(o, confidential.WithInstanceDiscovery(false)) + } + return confidential.New(authority, c.clientID, c.cred, o...) +} + +// resolveTenant returns the correct WithTenantID() argument for a token request given the client's +// configuration, or an error when that configuration doesn't allow the specified tenant +func (c *confidentialClient) resolveTenant(specified string) (string, error) { + return resolveTenant(c.tenantID, specified, c.name, c.opts.AdditionallyAllowedTenants) +} + +// these methods satisfy the MSAL ops.HTTPClient interface + +func (c *confidentialClient) CloseIdleConnections() { + // do nothing +} + +func (c *confidentialClient) Do(r *http.Request) (*http.Response, error) { + return doForClient(c.azClient, r) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go new file mode 100644 index 000000000..551d31994 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go @@ -0,0 +1,165 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "os" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// DefaultAzureCredentialOptions contains optional parameters for DefaultAzureCredential. +// These options may not apply to all credentials in the chain. +type DefaultAzureCredentialOptions struct { + // ClientOptions has additional options for credentials that use an Azure SDK HTTP pipeline. These options don't apply + // to credential types that authenticate via external tools such as the Azure CLI. + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. Add + // the wildcard value "*" to allow the credential to acquire tokens for any tenant. This value can also be + // set as a semicolon delimited list of tenants in the environment variable AZURE_ADDITIONALLY_ALLOWED_TENANTS. + AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + // TenantID sets the default tenant for authentication via the Azure CLI and workload identity. + TenantID string +} + +// DefaultAzureCredential is a default credential chain for applications that will deploy to Azure. +// It combines credentials suitable for deployment with credentials suitable for local development. +// It attempts to authenticate with each of these credential types, in the following order, stopping +// when one provides a token: +// +// - [EnvironmentCredential] +// - [WorkloadIdentityCredential], if environment variable configuration is set by the Azure workload +// identity webhook. Use [WorkloadIdentityCredential] directly when not using the webhook or needing +// more control over its configuration. +// - [ManagedIdentityCredential] +// - [AzureCLICredential] +// - [AzureDeveloperCLICredential] +// +// Consult the documentation for these credential types for more information on how they authenticate. +// Once a credential has successfully authenticated, DefaultAzureCredential will use that credential for +// every subsequent authentication. +type DefaultAzureCredential struct { + chain *ChainedTokenCredential +} + +// NewDefaultAzureCredential creates a DefaultAzureCredential. Pass nil for options to accept defaults. +func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*DefaultAzureCredential, error) { + var creds []azcore.TokenCredential + var errorMessages []string + + if options == nil { + options = &DefaultAzureCredentialOptions{} + } + additionalTenants := options.AdditionallyAllowedTenants + if len(additionalTenants) == 0 { + if tenants := os.Getenv(azureAdditionallyAllowedTenants); tenants != "" { + additionalTenants = strings.Split(tenants, ";") + } + } + + envCred, err := NewEnvironmentCredential(&EnvironmentCredentialOptions{ + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + additionallyAllowedTenants: additionalTenants, + }) + if err == nil { + creds = append(creds, envCred) + } else { + errorMessages = append(errorMessages, "EnvironmentCredential: "+err.Error()) + creds = append(creds, &defaultCredentialErrorReporter{credType: "EnvironmentCredential", err: err}) + } + + wic, err := NewWorkloadIdentityCredential(&WorkloadIdentityCredentialOptions{ + AdditionallyAllowedTenants: additionalTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + TenantID: options.TenantID, + }) + if err == nil { + creds = append(creds, wic) + } else { + errorMessages = append(errorMessages, credNameWorkloadIdentity+": "+err.Error()) + creds = append(creds, &defaultCredentialErrorReporter{credType: credNameWorkloadIdentity, err: err}) + } + + o := &ManagedIdentityCredentialOptions{ClientOptions: options.ClientOptions, dac: true} + if ID, ok := os.LookupEnv(azureClientID); ok { + o.ID = ClientID(ID) + } + miCred, err := NewManagedIdentityCredential(o) + if err == nil { + creds = append(creds, miCred) + } else { + errorMessages = append(errorMessages, credNameManagedIdentity+": "+err.Error()) + creds = append(creds, &defaultCredentialErrorReporter{credType: credNameManagedIdentity, err: err}) + } + + cliCred, err := NewAzureCLICredential(&AzureCLICredentialOptions{AdditionallyAllowedTenants: additionalTenants, TenantID: options.TenantID}) + if err == nil { + creds = append(creds, cliCred) + } else { + errorMessages = append(errorMessages, credNameAzureCLI+": "+err.Error()) + creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzureCLI, err: err}) + } + + azdCred, err := NewAzureDeveloperCLICredential(&AzureDeveloperCLICredentialOptions{ + AdditionallyAllowedTenants: additionalTenants, + TenantID: options.TenantID, + }) + if err == nil { + creds = append(creds, azdCred) + } else { + errorMessages = append(errorMessages, credNameAzureDeveloperCLI+": "+err.Error()) + creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzureDeveloperCLI, err: err}) + } + + if len(errorMessages) > 0 { + log.Writef(EventAuthentication, "NewDefaultAzureCredential failed to initialize some credentials:\n\t%s", strings.Join(errorMessages, "\n\t")) + } + + chain, err := NewChainedTokenCredential(creds, nil) + if err != nil { + return nil, err + } + chain.name = "DefaultAzureCredential" + return &DefaultAzureCredential{chain: chain}, nil +} + +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. +func (c *DefaultAzureCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.chain.GetToken(ctx, opts) +} + +var _ azcore.TokenCredential = (*DefaultAzureCredential)(nil) + +// defaultCredentialErrorReporter is a substitute for credentials that couldn't be constructed. +// Its GetToken method always returns a credentialUnavailableError having the same message as +// the error that prevented constructing the credential. This ensures the message is present +// in the error returned by ChainedTokenCredential.GetToken() +type defaultCredentialErrorReporter struct { + credType string + err error +} + +func (d *defaultCredentialErrorReporter) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if _, ok := d.err.(credentialUnavailable); ok { + return azcore.AccessToken{}, d.err + } + return azcore.AccessToken{}, newCredentialUnavailableError(d.credType, d.err.Error()) +} + +var _ azcore.TokenCredential = (*defaultCredentialErrorReporter)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go new file mode 100644 index 000000000..be963d3a2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go @@ -0,0 +1,38 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "errors" + "time" +) + +// cliTimeout is the default timeout for authentication attempts via CLI tools +const cliTimeout = 10 * time.Second + +// unavailableIfInChain returns err or, if the credential was invoked by DefaultAzureCredential, a +// credentialUnavailableError having the same message. This ensures DefaultAzureCredential will try +// the next credential in its chain (another developer credential). +func unavailableIfInChain(err error, inDefaultChain bool) error { + if err != nil && inDefaultChain { + var unavailableErr credentialUnavailable + if !errors.As(err, &unavailableErr) { + err = newCredentialUnavailableError(credNameAzureDeveloperCLI, err.Error()) + } + } + return err +} + +// validScope is for credentials authenticating via external tools. The authority validates scopes for all other credentials. +func validScope(scope string) bool { + for _, r := range scope { + if !(alphanumeric(r) || r == '.' || r == '-' || r == '_' || r == '/' || r == ':') { + return false + } + } + return true +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go new file mode 100644 index 000000000..cd30bedd5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go @@ -0,0 +1,138 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "fmt" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +) + +const credNameDeviceCode = "DeviceCodeCredential" + +// DeviceCodeCredentialOptions contains optional parameters for DeviceCodeCredential. +type DeviceCodeCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire + // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. + AdditionallyAllowedTenants []string + + // authenticationRecord returned by a call to a credential's Authenticate method. Set this option + // to enable the credential to use data from a previous authentication. + authenticationRecord authenticationRecord + + // ClientID is the ID of the application users will authenticate to. + // Defaults to the ID of an Azure development application. + ClientID string + + // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. + // When this option is true, GetToken will return authenticationRequiredError when user interaction is necessary + // to acquire a token. + disableAutomaticAuthentication bool + + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + + // TenantID is the Microsoft Entra tenant the credential authenticates in. Defaults to the + // "organizations" tenant, which can authenticate work and school accounts. Required for single-tenant + // applications. + TenantID string + + // tokenCachePersistenceOptions enables persistent token caching when not nil. + tokenCachePersistenceOptions *tokenCachePersistenceOptions + + // UserPrompt controls how the credential presents authentication instructions. The credential calls + // this function with authentication details when it receives a device code. By default, the credential + // prints these details to stdout. + UserPrompt func(context.Context, DeviceCodeMessage) error +} + +func (o *DeviceCodeCredentialOptions) init() { + if o.TenantID == "" { + o.TenantID = organizationsTenantID + } + if o.ClientID == "" { + o.ClientID = developerSignOnClientID + } + if o.UserPrompt == nil { + o.UserPrompt = func(ctx context.Context, dc DeviceCodeMessage) error { + fmt.Println(dc.Message) + return nil + } + } +} + +// DeviceCodeMessage contains the information a user needs to complete authentication. +type DeviceCodeMessage struct { + // UserCode is the user code returned by the service. + UserCode string `json:"user_code"` + // VerificationURL is the URL at which the user must authenticate. + VerificationURL string `json:"verification_uri"` + // Message is user instruction from Microsoft Entra ID. + Message string `json:"message"` +} + +// DeviceCodeCredential acquires tokens for a user via the device code flow, which has the +// user browse to a Microsoft Entra URL, enter a code, and authenticate. It's useful +// for authenticating a user in an environment without a web browser, such as an SSH session. +// If a web browser is available, [InteractiveBrowserCredential] is more convenient because it +// automatically opens a browser to the login page. +type DeviceCodeCredential struct { + client *publicClient +} + +// NewDeviceCodeCredential creates a DeviceCodeCredential. Pass nil to accept default options. +func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeCredential, error) { + cp := DeviceCodeCredentialOptions{} + if options != nil { + cp = *options + } + cp.init() + msalOpts := publicClientOptions{ + AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants, + ClientOptions: cp.ClientOptions, + DeviceCodePrompt: cp.UserPrompt, + DisableAutomaticAuthentication: cp.disableAutomaticAuthentication, + DisableInstanceDiscovery: cp.DisableInstanceDiscovery, + Record: cp.authenticationRecord, + TokenCachePersistenceOptions: cp.tokenCachePersistenceOptions, + } + c, err := newPublicClient(cp.TenantID, cp.ClientID, credNameDeviceCode, msalOpts) + if err != nil { + return nil, err + } + c.name = credNameDeviceCode + return &DeviceCodeCredential{client: c}, nil +} + +// Authenticate a user via the device code flow. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord. +func (c *DeviceCodeCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameDeviceCode+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.Authenticate(ctx, opts) + return tk, err +} + +// GetToken requests an access token from Microsoft Entra ID. It will begin the device code flow and poll until the user completes authentication. +// This method is called automatically by Azure SDK clients. +func (c *DeviceCodeCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameDeviceCode+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.GetToken(ctx, opts) + return tk, err +} + +var _ azcore.TokenCredential = (*DeviceCodeCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go new file mode 100644 index 000000000..b30f5474f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go @@ -0,0 +1,167 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +const envVarSendCertChain = "AZURE_CLIENT_SEND_CERTIFICATE_CHAIN" + +// EnvironmentCredentialOptions contains optional parameters for EnvironmentCredential +type EnvironmentCredentialOptions struct { + azcore.ClientOptions + + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + // additionallyAllowedTenants is used only by NewDefaultAzureCredential() to enable that constructor's explicit + // option to override the value of AZURE_ADDITIONALLY_ALLOWED_TENANTS. Applications using EnvironmentCredential + // directly should set that variable instead. This field should remain unexported to preserve this credential's + // unambiguous "all configuration from environment variables" design. + additionallyAllowedTenants []string +} + +// EnvironmentCredential authenticates a service principal with a secret or certificate, or a user with a password, depending +// on environment variable configuration. It reads configuration from these variables, in the following order: +// +// # Service principal with client secret +// +// AZURE_TENANT_ID: ID of the service principal's tenant. Also called its "directory" ID. +// +// AZURE_CLIENT_ID: the service principal's client ID +// +// AZURE_CLIENT_SECRET: one of the service principal's client secrets +// +// # Service principal with certificate +// +// AZURE_TENANT_ID: ID of the service principal's tenant. Also called its "directory" ID. +// +// AZURE_CLIENT_ID: the service principal's client ID +// +// AZURE_CLIENT_CERTIFICATE_PATH: path to a PEM or PKCS12 certificate file including the private key. +// +// AZURE_CLIENT_CERTIFICATE_PASSWORD: (optional) password for the certificate file. +// +// Note that this credential uses [ParseCertificates] to load the certificate and key from the file. If this +// function isn't able to parse your certificate, use [ClientCertificateCredential] instead. +// +// # User with username and password +// +// AZURE_TENANT_ID: (optional) tenant to authenticate in. Defaults to "organizations". +// +// AZURE_CLIENT_ID: client ID of the application the user will authenticate to +// +// AZURE_USERNAME: a username (usually an email address) +// +// AZURE_PASSWORD: the user's password +// +// # Configuration for multitenant applications +// +// To enable multitenant authentication, set AZURE_ADDITIONALLY_ALLOWED_TENANTS with a semicolon delimited list of tenants +// the credential may request tokens from in addition to the tenant specified by AZURE_TENANT_ID. Set +// AZURE_ADDITIONALLY_ALLOWED_TENANTS to "*" to enable the credential to request a token from any tenant. +type EnvironmentCredential struct { + cred azcore.TokenCredential +} + +// NewEnvironmentCredential creates an EnvironmentCredential. Pass nil to accept default options. +func NewEnvironmentCredential(options *EnvironmentCredentialOptions) (*EnvironmentCredential, error) { + if options == nil { + options = &EnvironmentCredentialOptions{} + } + tenantID := os.Getenv(azureTenantID) + if tenantID == "" { + return nil, errors.New("missing environment variable AZURE_TENANT_ID") + } + clientID := os.Getenv(azureClientID) + if clientID == "" { + return nil, errors.New("missing environment variable " + azureClientID) + } + // tenants set by NewDefaultAzureCredential() override the value of AZURE_ADDITIONALLY_ALLOWED_TENANTS + additionalTenants := options.additionallyAllowedTenants + if len(additionalTenants) == 0 { + if tenants := os.Getenv(azureAdditionallyAllowedTenants); tenants != "" { + additionalTenants = strings.Split(tenants, ";") + } + } + if clientSecret := os.Getenv(azureClientSecret); clientSecret != "" { + log.Write(EventAuthentication, "EnvironmentCredential will authenticate with ClientSecretCredential") + o := &ClientSecretCredentialOptions{ + AdditionallyAllowedTenants: additionalTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + } + cred, err := NewClientSecretCredential(tenantID, clientID, clientSecret, o) + if err != nil { + return nil, err + } + return &EnvironmentCredential{cred: cred}, nil + } + if certPath := os.Getenv(azureClientCertificatePath); certPath != "" { + log.Write(EventAuthentication, "EnvironmentCredential will authenticate with ClientCertificateCredential") + certData, err := os.ReadFile(certPath) + if err != nil { + return nil, fmt.Errorf(`failed to read certificate file "%s": %v`, certPath, err) + } + var password []byte + if v := os.Getenv(azureClientCertificatePassword); v != "" { + password = []byte(v) + } + certs, key, err := ParseCertificates(certData, password) + if err != nil { + return nil, fmt.Errorf("failed to parse %q due to error %q. This may be due to a limitation of this module's certificate loader. Consider calling NewClientCertificateCredential instead", certPath, err.Error()) + } + o := &ClientCertificateCredentialOptions{ + AdditionallyAllowedTenants: additionalTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + } + if v, ok := os.LookupEnv(envVarSendCertChain); ok { + o.SendCertificateChain = v == "1" || strings.ToLower(v) == "true" + } + cred, err := NewClientCertificateCredential(tenantID, clientID, certs, key, o) + if err != nil { + return nil, err + } + return &EnvironmentCredential{cred: cred}, nil + } + if username := os.Getenv(azureUsername); username != "" { + if password := os.Getenv(azurePassword); password != "" { + log.Write(EventAuthentication, "EnvironmentCredential will authenticate with UsernamePasswordCredential") + o := &UsernamePasswordCredentialOptions{ + AdditionallyAllowedTenants: additionalTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + } + cred, err := NewUsernamePasswordCredential(tenantID, clientID, username, password, o) + if err != nil { + return nil, err + } + return &EnvironmentCredential{cred: cred}, nil + } + return nil, errors.New("no value for AZURE_PASSWORD") + } + return nil, errors.New("incomplete environment variable configuration. Only AZURE_TENANT_ID and AZURE_CLIENT_ID are set") +} + +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. +func (c *EnvironmentCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.cred.GetToken(ctx, opts) +} + +var _ azcore.TokenCredential = (*EnvironmentCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go new file mode 100644 index 000000000..35fa01d13 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go @@ -0,0 +1,170 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" + msal "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" +) + +// getResponseFromError retrieves the response carried by +// an AuthenticationFailedError or MSAL CallErr, if any +func getResponseFromError(err error) *http.Response { + var a *AuthenticationFailedError + var c msal.CallErr + var res *http.Response + if errors.As(err, &c) { + res = c.Resp + } else if errors.As(err, &a) { + res = a.RawResponse + } + return res +} + +// AuthenticationFailedError indicates an authentication request has failed. +type AuthenticationFailedError struct { + // RawResponse is the HTTP response motivating the error, if available. + RawResponse *http.Response + + credType string + message string + err error +} + +func newAuthenticationFailedError(credType string, message string, resp *http.Response, err error) error { + return &AuthenticationFailedError{credType: credType, message: message, RawResponse: resp, err: err} +} + +// Error implements the error interface. Note that the message contents are not contractual and can change over time. +func (e *AuthenticationFailedError) Error() string { + if e.RawResponse == nil { + return e.credType + ": " + e.message + } + msg := &bytes.Buffer{} + fmt.Fprintf(msg, "%s authentication failed. %s\n", e.credType, e.message) + if e.RawResponse.Request != nil { + fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) + } else { + // this happens when the response is created from a custom HTTP transporter, + // which doesn't guarantee to bind the original request to the response + fmt.Fprintln(msg, "Request information not available") + } + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + fmt.Fprintf(msg, "RESPONSE %s\n", e.RawResponse.Status) + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + body, err := runtime.Payload(e.RawResponse) + switch { + case err != nil: + fmt.Fprintf(msg, "Error reading response body: %v", err) + case len(body) > 0: + if err := json.Indent(msg, body, "", " "); err != nil { + // failed to pretty-print so just dump it verbatim + fmt.Fprint(msg, string(body)) + } + default: + fmt.Fprint(msg, "Response contained no body") + } + fmt.Fprintln(msg, "\n--------------------------------------------------------------------------------") + var anchor string + switch e.credType { + case credNameAzureCLI: + anchor = "azure-cli" + case credNameAzureDeveloperCLI: + anchor = "azd" + case credNameAzurePipelines: + anchor = "apc" + case credNameCert: + anchor = "client-cert" + case credNameSecret: + anchor = "client-secret" + case credNameManagedIdentity: + anchor = "managed-id" + case credNameUserPassword: + anchor = "username-password" + case credNameWorkloadIdentity: + anchor = "workload" + } + if anchor != "" { + fmt.Fprintf(msg, "To troubleshoot, visit https://aka.ms/azsdk/go/identity/troubleshoot#%s", anchor) + } + return msg.String() +} + +// NonRetriable indicates the request which provoked this error shouldn't be retried. +func (*AuthenticationFailedError) NonRetriable() { + // marker method +} + +var _ errorinfo.NonRetriable = (*AuthenticationFailedError)(nil) + +// authenticationRequiredError indicates a credential's Authenticate method must be called to acquire a token +// because the credential requires user interaction and is configured not to request it automatically. +type authenticationRequiredError struct { + credentialUnavailableError + + // TokenRequestOptions for the required token. Pass this to the credential's Authenticate method. + TokenRequestOptions policy.TokenRequestOptions +} + +func newauthenticationRequiredError(credType string, tro policy.TokenRequestOptions) error { + return &authenticationRequiredError{ + credentialUnavailableError: credentialUnavailableError{ + credType + " can't acquire a token without user interaction. Call Authenticate to authenticate a user interactively", + }, + TokenRequestOptions: tro, + } +} + +var ( + _ credentialUnavailable = (*authenticationRequiredError)(nil) + _ errorinfo.NonRetriable = (*authenticationRequiredError)(nil) +) + +type credentialUnavailable interface { + error + credentialUnavailable() +} + +type credentialUnavailableError struct { + message string +} + +// newCredentialUnavailableError is an internal helper that ensures consistent error message formatting +func newCredentialUnavailableError(credType, message string) error { + msg := fmt.Sprintf("%s: %s", credType, message) + return &credentialUnavailableError{msg} +} + +// NewCredentialUnavailableError constructs an error indicating a credential can't attempt authentication +// because it lacks required data or state. When [ChainedTokenCredential] receives this error it will try +// its next credential, if any. +func NewCredentialUnavailableError(message string) error { + return &credentialUnavailableError{message} +} + +// Error implements the error interface. Note that the message contents are not contractual and can change over time. +func (e *credentialUnavailableError) Error() string { + return e.message +} + +// NonRetriable is a marker method indicating this error should not be retried. It has no implementation. +func (*credentialUnavailableError) NonRetriable() {} + +func (*credentialUnavailableError) credentialUnavailable() {} + +var ( + _ credentialUnavailable = (*credentialUnavailableError)(nil) + _ errorinfo.NonRetriable = (*credentialUnavailableError)(nil) +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work new file mode 100644 index 000000000..04ea962b4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work @@ -0,0 +1,6 @@ +go 1.18 + +use ( + . + ./cache +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum new file mode 100644 index 000000000..c592f283b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum @@ -0,0 +1,60 @@ +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1 h1:ODs3brnqQM99Tq1PffODpAViYv3Bf8zOg464MU7p5ew= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1/go.mod h1:3Ug6Qzto9anB6mGlEdgYMDF5zHQ+wwhEaYR4s17PHMw= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 h1:fb8kj/Dh4CSwgsOzHeZY4Xh68cFVbzXx+ONXGMY//4w= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/keybase/dbus v0.0.0-20220506165403-5aa21ea2c23a/go.mod h1:YPNKjjE7Ubp9dTbnWvsP3HT+hYnY6TfXzubYTBeUxc8= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go new file mode 100644 index 000000000..056785a8a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go @@ -0,0 +1,118 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +) + +const credNameBrowser = "InteractiveBrowserCredential" + +// InteractiveBrowserCredentialOptions contains optional parameters for InteractiveBrowserCredential. +type InteractiveBrowserCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire + // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. + AdditionallyAllowedTenants []string + + // authenticationRecord returned by a call to a credential's Authenticate method. Set this option + // to enable the credential to use data from a previous authentication. + authenticationRecord authenticationRecord + + // ClientID is the ID of the application users will authenticate to. + // Defaults to the ID of an Azure development application. + ClientID string + + // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. + // When this option is true, GetToken will return authenticationRequiredError when user interaction is necessary + // to acquire a token. + disableAutomaticAuthentication bool + + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + + // LoginHint pre-populates the account prompt with a username. Users may choose to authenticate a different account. + LoginHint string + + // RedirectURL is the URL Microsoft Entra ID will redirect to with the access token. This is required + // only when setting ClientID, and must match a redirect URI in the application's registration. + // Applications which have registered "http://localhost" as a redirect URI need not set this option. + RedirectURL string + + // TenantID is the Microsoft Entra tenant the credential authenticates in. Defaults to the + // "organizations" tenant, which can authenticate work and school accounts. + TenantID string + + // tokenCachePersistenceOptions enables persistent token caching when not nil. + tokenCachePersistenceOptions *tokenCachePersistenceOptions +} + +func (o *InteractiveBrowserCredentialOptions) init() { + if o.TenantID == "" { + o.TenantID = organizationsTenantID + } + if o.ClientID == "" { + o.ClientID = developerSignOnClientID + } +} + +// InteractiveBrowserCredential opens a browser to interactively authenticate a user. +type InteractiveBrowserCredential struct { + client *publicClient +} + +// NewInteractiveBrowserCredential constructs a new InteractiveBrowserCredential. Pass nil to accept default options. +func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOptions) (*InteractiveBrowserCredential, error) { + cp := InteractiveBrowserCredentialOptions{} + if options != nil { + cp = *options + } + cp.init() + msalOpts := publicClientOptions{ + AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants, + ClientOptions: cp.ClientOptions, + DisableAutomaticAuthentication: cp.disableAutomaticAuthentication, + DisableInstanceDiscovery: cp.DisableInstanceDiscovery, + LoginHint: cp.LoginHint, + Record: cp.authenticationRecord, + RedirectURL: cp.RedirectURL, + TokenCachePersistenceOptions: cp.tokenCachePersistenceOptions, + } + c, err := newPublicClient(cp.TenantID, cp.ClientID, credNameBrowser, msalOpts) + if err != nil { + return nil, err + } + return &InteractiveBrowserCredential{client: c}, nil +} + +// Authenticate a user via the default browser. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord. +func (c *InteractiveBrowserCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameBrowser+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.Authenticate(ctx, opts) + return tk, err +} + +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. +func (c *InteractiveBrowserCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameBrowser+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.GetToken(ctx, opts) + return tk, err +} + +var _ azcore.TokenCredential = (*InteractiveBrowserCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go new file mode 100644 index 000000000..b1b4d5c8b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go @@ -0,0 +1,18 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package internal + +// TokenCachePersistenceOptions contains options for persistent token caching +type TokenCachePersistenceOptions struct { + // AllowUnencryptedStorage controls whether the cache should fall back to storing its data in plain text + // when encryption isn't possible. Setting this true doesn't disable encryption. The cache always attempts + // encryption before falling back to plaintext storage. + AllowUnencryptedStorage bool + + // Name identifies the cache. Set this to isolate data from other applications. + Name string +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go new file mode 100644 index 000000000..c1498b464 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go @@ -0,0 +1,31 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package internal + +import ( + "errors" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" +) + +var errMissingImport = errors.New("import github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache to enable persistent caching") + +// NewCache constructs a persistent token cache when "o" isn't nil. Applications that intend to +// use a persistent cache must first import the cache module, which will replace this function +// with a platform-specific implementation. +var NewCache = func(o *TokenCachePersistenceOptions, enableCAE bool) (cache.ExportReplace, error) { + if o == nil { + return nil, nil + } + return nil, errMissingImport +} + +// CacheFilePath returns the path to the cache file for the given name. +// Defining it in this package makes it available to azidentity tests. +var CacheFilePath = func(name string) (string, error) { + return "", errMissingImport +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/logging.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/logging.go new file mode 100644 index 000000000..1aa1e0fc7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/logging.go @@ -0,0 +1,14 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + +// EventAuthentication entries contain information about authentication. +// This includes information like the names of environment variables +// used when obtaining credentials and the type of credential used. +const EventAuthentication log.Event = "Authentication" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json new file mode 100644 index 000000000..1c3791777 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json @@ -0,0 +1,17 @@ +{ + "include": [ + { + "Agent": { + "msi_image": { + "ArmTemplateParameters": "@{deployResources = $true}", + "OSVmImage": "env:LINUXNEXTVMIMAGE", + "Pool": "env:LINUXPOOL" + } + }, + "GoVersion": [ + "1.22.1" + ], + "IDENTITY_IMDS_AVAILABLE": "1" + } + ] +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go new file mode 100644 index 000000000..6122cc700 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go @@ -0,0 +1,501 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + azruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const ( + arcIMDSEndpoint = "IMDS_ENDPOINT" + defaultIdentityClientID = "DEFAULT_IDENTITY_CLIENT_ID" + identityEndpoint = "IDENTITY_ENDPOINT" + identityHeader = "IDENTITY_HEADER" + identityServerThumbprint = "IDENTITY_SERVER_THUMBPRINT" + headerMetadata = "Metadata" + imdsEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" + miResID = "mi_res_id" + msiEndpoint = "MSI_ENDPOINT" + msiResID = "msi_res_id" + msiSecret = "MSI_SECRET" + imdsAPIVersion = "2018-02-01" + azureArcAPIVersion = "2019-08-15" + qpClientID = "client_id" + serviceFabricAPIVersion = "2019-07-01-preview" +) + +var imdsProbeTimeout = time.Second + +type msiType int + +const ( + msiTypeAppService msiType = iota + msiTypeAzureArc + msiTypeAzureML + msiTypeCloudShell + msiTypeIMDS + msiTypeServiceFabric +) + +type managedIdentityClient struct { + azClient *azcore.Client + endpoint string + id ManagedIDKind + msiType msiType + probeIMDS bool +} + +// arcKeyDirectory returns the directory expected to contain Azure Arc keys +var arcKeyDirectory = func() (string, error) { + switch runtime.GOOS { + case "linux": + return "/var/opt/azcmagent/tokens", nil + case "windows": + pd := os.Getenv("ProgramData") + if pd == "" { + return "", errors.New("environment variable ProgramData has no value") + } + return filepath.Join(pd, "AzureConnectedMachineAgent", "Tokens"), nil + default: + return "", fmt.Errorf("unsupported OS %q", runtime.GOOS) + } +} + +type wrappedNumber json.Number + +func (n *wrappedNumber) UnmarshalJSON(b []byte) error { + c := string(b) + if c == "\"\"" { + return nil + } + return json.Unmarshal(b, (*json.Number)(n)) +} + +// setIMDSRetryOptionDefaults sets zero-valued fields to default values appropriate for IMDS +func setIMDSRetryOptionDefaults(o *policy.RetryOptions) { + if o.MaxRetries == 0 { + o.MaxRetries = 5 + } + if o.MaxRetryDelay == 0 { + o.MaxRetryDelay = 1 * time.Minute + } + if o.RetryDelay == 0 { + o.RetryDelay = 2 * time.Second + } + if o.StatusCodes == nil { + o.StatusCodes = []int{ + // IMDS docs recommend retrying 404, 410, 429 and 5xx + // https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/how-to-use-vm-token#error-handling + http.StatusNotFound, // 404 + http.StatusGone, // 410 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusNotImplemented, // 501 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + http.StatusHTTPVersionNotSupported, // 505 + http.StatusVariantAlsoNegotiates, // 506 + http.StatusInsufficientStorage, // 507 + http.StatusLoopDetected, // 508 + http.StatusNotExtended, // 510 + http.StatusNetworkAuthenticationRequired, // 511 + } + } + if o.TryTimeout == 0 { + o.TryTimeout = 1 * time.Minute + } +} + +// newManagedIdentityClient creates a new instance of the ManagedIdentityClient with the ManagedIdentityCredentialOptions +// that are passed into it along with a default pipeline. +// options: ManagedIdentityCredentialOptions configure policies for the pipeline and the authority host that +// will be used to retrieve tokens and authenticate +func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*managedIdentityClient, error) { + if options == nil { + options = &ManagedIdentityCredentialOptions{} + } + cp := options.ClientOptions + c := managedIdentityClient{id: options.ID, endpoint: imdsEndpoint, msiType: msiTypeIMDS} + env := "IMDS" + if endpoint, ok := os.LookupEnv(identityEndpoint); ok { + if _, ok := os.LookupEnv(identityHeader); ok { + if _, ok := os.LookupEnv(identityServerThumbprint); ok { + env = "Service Fabric" + c.endpoint = endpoint + c.msiType = msiTypeServiceFabric + } else { + env = "App Service" + c.endpoint = endpoint + c.msiType = msiTypeAppService + } + } else if _, ok := os.LookupEnv(arcIMDSEndpoint); ok { + env = "Azure Arc" + c.endpoint = endpoint + c.msiType = msiTypeAzureArc + } + } else if endpoint, ok := os.LookupEnv(msiEndpoint); ok { + c.endpoint = endpoint + if _, ok := os.LookupEnv(msiSecret); ok { + env = "Azure ML" + c.msiType = msiTypeAzureML + } else { + env = "Cloud Shell" + c.msiType = msiTypeCloudShell + } + } else { + c.probeIMDS = options.dac + setIMDSRetryOptionDefaults(&cp.Retry) + } + + client, err := azcore.NewClient(module, version, azruntime.PipelineOptions{ + Tracing: azruntime.TracingOptions{ + Namespace: traceNamespace, + }, + }, &cp) + if err != nil { + return nil, err + } + c.azClient = client + + if log.Should(EventAuthentication) { + log.Writef(EventAuthentication, "Managed Identity Credential will use %s managed identity", env) + } + + return &c, nil +} + +// provideToken acquires a token for MSAL's confidential.Client, which caches the token +func (c *managedIdentityClient) provideToken(ctx context.Context, params confidential.TokenProviderParameters) (confidential.TokenProviderResult, error) { + result := confidential.TokenProviderResult{} + tk, err := c.authenticate(ctx, c.id, params.Scopes) + if err == nil { + result.AccessToken = tk.Token + result.ExpiresInSeconds = int(time.Until(tk.ExpiresOn).Seconds()) + } + return result, err +} + +// authenticate acquires an access token +func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKind, scopes []string) (azcore.AccessToken, error) { + // no need to synchronize around this value because it's true only when DefaultAzureCredential constructed the client, + // and in that case ChainedTokenCredential.GetToken synchronizes goroutines that would execute this block + if c.probeIMDS { + cx, cancel := context.WithTimeout(ctx, imdsProbeTimeout) + defer cancel() + cx = policy.WithRetryOptions(cx, policy.RetryOptions{MaxRetries: -1}) + req, err := azruntime.NewRequest(cx, http.MethodGet, c.endpoint) + if err == nil { + _, err = c.azClient.Pipeline().Do(req) + } + if err != nil { + msg := err.Error() + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + msg = "managed identity timed out. See https://aka.ms/azsdk/go/identity/troubleshoot#dac for more information" + } + return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, msg) + } + // send normal token requests from now on because something responded + c.probeIMDS = false + } + + msg, err := c.createAuthRequest(ctx, id, scopes) + if err != nil { + return azcore.AccessToken{}, err + } + + resp, err := c.azClient.Pipeline().Do(msg) + if err != nil { + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, err.Error(), nil, err) + } + + if azruntime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return c.createAccessToken(resp) + } + + if c.msiType == msiTypeIMDS { + switch resp.StatusCode { + case http.StatusBadRequest: + if id != nil { + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp, nil) + } + msg := "failed to authenticate a system assigned identity" + if body, err := azruntime.Payload(resp); err == nil && len(body) > 0 { + msg += fmt.Sprintf(". The endpoint responded with %s", body) + } + return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, msg) + case http.StatusForbidden: + // Docker Desktop runs a proxy that responds 403 to IMDS token requests. If we get that response, + // we return credentialUnavailableError so credential chains continue to their next credential + body, err := azruntime.Payload(resp) + if err == nil && strings.Contains(string(body), "unreachable") { + return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, fmt.Sprintf("unexpected response %q", string(body))) + } + } + } + + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "authentication failed", resp, nil) +} + +func (c *managedIdentityClient) createAccessToken(res *http.Response) (azcore.AccessToken, error) { + value := struct { + // these are the only fields that we use + Token string `json:"access_token,omitempty"` + RefreshToken string `json:"refresh_token,omitempty"` + ExpiresIn wrappedNumber `json:"expires_in,omitempty"` // this field should always return the number of seconds for which a token is valid + ExpiresOn interface{} `json:"expires_on,omitempty"` // the value returned in this field varies between a number and a date string + }{} + if err := azruntime.UnmarshalAsJSON(res, &value); err != nil { + return azcore.AccessToken{}, fmt.Errorf("internal AccessToken: %v", err) + } + if value.ExpiresIn != "" { + expiresIn, err := json.Number(value.ExpiresIn).Int64() + if err != nil { + return azcore.AccessToken{}, err + } + return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Now().Add(time.Second * time.Duration(expiresIn)).UTC()}, nil + } + switch v := value.ExpiresOn.(type) { + case float64: + return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(v), 0).UTC()}, nil + case string: + if expiresOn, err := strconv.Atoi(v); err == nil { + return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(expiresOn), 0).UTC()}, nil + } + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "unexpected expires_on value: "+v, res, nil) + default: + msg := fmt.Sprintf("unsupported type received in expires_on: %T, %v", v, v) + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, msg, res, nil) + } +} + +func (c *managedIdentityClient) createAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + switch c.msiType { + case msiTypeIMDS: + return c.createIMDSAuthRequest(ctx, id, scopes) + case msiTypeAppService: + return c.createAppServiceAuthRequest(ctx, id, scopes) + case msiTypeAzureArc: + // need to perform preliminary request to retreive the secret key challenge provided by the HIMDS service + key, err := c.getAzureArcSecretKey(ctx, scopes) + if err != nil { + msg := fmt.Sprintf("failed to retreive secret key from the identity endpoint: %v", err) + return nil, newAuthenticationFailedError(credNameManagedIdentity, msg, nil, err) + } + return c.createAzureArcAuthRequest(ctx, id, scopes, key) + case msiTypeAzureML: + return c.createAzureMLAuthRequest(ctx, id, scopes) + case msiTypeServiceFabric: + return c.createServiceFabricAuthRequest(ctx, id, scopes) + case msiTypeCloudShell: + return c.createCloudShellAuthRequest(ctx, id, scopes) + default: + return nil, newCredentialUnavailableError(credNameManagedIdentity, "managed identity isn't supported in this environment") + } +} + +func (c *managedIdentityClient) createIMDSAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set(headerMetadata, "true") + q := request.Raw().URL.Query() + q.Add("api-version", imdsAPIVersion) + q.Add("resource", strings.Join(scopes, " ")) + if id != nil { + if id.idKind() == miResourceID { + q.Add(msiResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + +func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set("X-IDENTITY-HEADER", os.Getenv(identityHeader)) + q := request.Raw().URL.Query() + q.Add("api-version", "2019-08-01") + q.Add("resource", scopes[0]) + if id != nil { + if id.idKind() == miResourceID { + q.Add(miResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + +func (c *managedIdentityClient) createAzureMLAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set("secret", os.Getenv(msiSecret)) + q := request.Raw().URL.Query() + q.Add("api-version", "2017-09-01") + q.Add("resource", strings.Join(scopes, " ")) + q.Add("clientid", os.Getenv(defaultIdentityClientID)) + if id != nil { + if id.idKind() == miResourceID { + log.Write(EventAuthentication, "WARNING: Azure ML doesn't support specifying a managed identity by resource ID") + q.Set("clientid", "") + q.Set(miResID, id.String()) + } else { + q.Set("clientid", id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + +func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + q := request.Raw().URL.Query() + request.Raw().Header.Set("Accept", "application/json") + request.Raw().Header.Set("Secret", os.Getenv(identityHeader)) + q.Add("api-version", serviceFabricAPIVersion) + q.Add("resource", strings.Join(scopes, " ")) + if id != nil { + log.Write(EventAuthentication, "WARNING: Service Fabric doesn't support selecting a user-assigned identity at runtime") + if id.idKind() == miResourceID { + q.Add(miResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + +func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resources []string) (string, error) { + // create the request to retreive the secret key challenge provided by the HIMDS service + request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return "", err + } + request.Raw().Header.Set(headerMetadata, "true") + q := request.Raw().URL.Query() + q.Add("api-version", azureArcAPIVersion) + q.Add("resource", strings.Join(resources, " ")) + request.Raw().URL.RawQuery = q.Encode() + // send the initial request to get the short-lived secret key + response, err := c.azClient.Pipeline().Do(request) + if err != nil { + return "", err + } + // the endpoint is expected to return a 401 with the WWW-Authenticate header set to the location + // of the secret key file. Any other status code indicates an error in the request. + if response.StatusCode != 401 { + msg := fmt.Sprintf("expected a 401 response, received %d", response.StatusCode) + return "", newAuthenticationFailedError(credNameManagedIdentity, msg, response, nil) + } + header := response.Header.Get("WWW-Authenticate") + if len(header) == 0 { + return "", newAuthenticationFailedError(credNameManagedIdentity, "HIMDS response has no WWW-Authenticate header", nil, nil) + } + // the WWW-Authenticate header is expected in the following format: Basic realm=/some/file/path.key + _, p, found := strings.Cut(header, "=") + if !found { + return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected WWW-Authenticate header from HIMDS: "+header, nil, nil) + } + expected, err := arcKeyDirectory() + if err != nil { + return "", err + } + if filepath.Dir(p) != expected || !strings.HasSuffix(p, ".key") { + return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected file path from HIMDS service: "+p, nil, nil) + } + f, err := os.Stat(p) + if err != nil { + return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not stat %q: %v", p, err), nil, nil) + } + if s := f.Size(); s > 4096 { + return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("key is too large (%d bytes)", s), nil, nil) + } + key, err := os.ReadFile(p) + if err != nil { + return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not read %q: %v", p, err), nil, nil) + } + return string(key), nil +} + +func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, id ManagedIDKind, resources []string, key string) (*policy.Request, error) { + request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set(headerMetadata, "true") + request.Raw().Header.Set("Authorization", fmt.Sprintf("Basic %s", key)) + q := request.Raw().URL.Query() + q.Add("api-version", azureArcAPIVersion) + q.Add("resource", strings.Join(resources, " ")) + if id != nil { + log.Write(EventAuthentication, "WARNING: Azure Arc doesn't support user-assigned managed identities") + if id.idKind() == miResourceID { + q.Add(miResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + +func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := azruntime.NewRequest(ctx, http.MethodPost, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set(headerMetadata, "true") + data := url.Values{} + data.Set("resource", strings.Join(scopes, " ")) + dataEncoded := data.Encode() + body := streaming.NopCloser(strings.NewReader(dataEncoded)) + if err := request.SetBody(body, "application/x-www-form-urlencoded"); err != nil { + return nil, err + } + if id != nil { + log.Write(EventAuthentication, "WARNING: Cloud Shell doesn't support user-assigned managed identities") + q := request.Raw().URL.Query() + if id.idKind() == miResourceID { + q.Add(miResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + return request, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go new file mode 100644 index 000000000..13c043d8e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go @@ -0,0 +1,128 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "fmt" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const credNameManagedIdentity = "ManagedIdentityCredential" + +type managedIdentityIDKind int + +const ( + miClientID managedIdentityIDKind = 0 + miResourceID managedIdentityIDKind = 1 +) + +// ManagedIDKind identifies the ID of a managed identity as either a client or resource ID +type ManagedIDKind interface { + fmt.Stringer + idKind() managedIdentityIDKind +} + +// ClientID is the client ID of a user-assigned managed identity. +type ClientID string + +func (ClientID) idKind() managedIdentityIDKind { + return miClientID +} + +// String returns the string value of the ID. +func (c ClientID) String() string { + return string(c) +} + +// ResourceID is the resource ID of a user-assigned managed identity. +type ResourceID string + +func (ResourceID) idKind() managedIdentityIDKind { + return miResourceID +} + +// String returns the string value of the ID. +func (r ResourceID) String() string { + return string(r) +} + +// ManagedIdentityCredentialOptions contains optional parameters for ManagedIdentityCredential. +type ManagedIdentityCredentialOptions struct { + azcore.ClientOptions + + // ID is the ID of a managed identity the credential should authenticate. Set this field to use a specific identity + // instead of the hosting environment's default. The value may be the identity's client ID or resource ID, but note that + // some platforms don't accept resource IDs. + ID ManagedIDKind + + // dac indicates whether the credential is part of DefaultAzureCredential. When true, and the environment doesn't have + // configuration for a specific managed identity API, the credential tries to determine whether IMDS is available before + // sending its first token request. It does this by sending a malformed request with a short timeout. Any response to that + // request is taken to mean IMDS is available, in which case the credential will send ordinary token requests thereafter + // with no special timeout. The purpose of this behavior is to prevent a very long timeout when IMDS isn't available. + dac bool +} + +// ManagedIdentityCredential authenticates an Azure managed identity in any hosting environment supporting managed identities. +// This credential authenticates a system-assigned identity by default. Use ManagedIdentityCredentialOptions.ID to specify a +// user-assigned identity. See Microsoft Entra ID documentation for more information about managed identities: +// https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/overview +type ManagedIdentityCredential struct { + client *confidentialClient + mic *managedIdentityClient +} + +// NewManagedIdentityCredential creates a ManagedIdentityCredential. Pass nil to accept default options. +func NewManagedIdentityCredential(options *ManagedIdentityCredentialOptions) (*ManagedIdentityCredential, error) { + if options == nil { + options = &ManagedIdentityCredentialOptions{} + } + mic, err := newManagedIdentityClient(options) + if err != nil { + return nil, err + } + cred := confidential.NewCredFromTokenProvider(mic.provideToken) + + // It's okay to give MSAL an invalid client ID because MSAL will use it only as part of a cache key. + // ManagedIdentityClient handles all the details of authentication and won't receive this value from MSAL. + clientID := "SYSTEM-ASSIGNED-MANAGED-IDENTITY" + if options.ID != nil { + clientID = options.ID.String() + } + // similarly, it's okay to give MSAL an incorrect tenant because MSAL won't use the value + c, err := newConfidentialClient("common", clientID, credNameManagedIdentity, cred, confidentialClientOptions{ + ClientOptions: options.ClientOptions, + }) + if err != nil { + return nil, err + } + return &ManagedIdentityCredential{client: c, mic: mic}, nil +} + +// GetToken requests an access token from the hosting environment. This method is called automatically by Azure SDK clients. +func (c *ManagedIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameManagedIdentity+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + + if len(opts.Scopes) != 1 { + err = fmt.Errorf("%s.GetToken() requires exactly one scope", credNameManagedIdentity) + return azcore.AccessToken{}, err + } + // managed identity endpoints require a Microsoft Entra ID v1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here + opts.Scopes = []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)} + tk, err := c.client.GetToken(ctx, opts) + return tk, err +} + +var _ azcore.TokenCredential = (*ManagedIdentityCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go new file mode 100644 index 000000000..9dcc82f01 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go @@ -0,0 +1,113 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "crypto" + "crypto/x509" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const credNameOBO = "OnBehalfOfCredential" + +// OnBehalfOfCredential authenticates a service principal via the on-behalf-of flow. This is typically used by +// middle-tier services that authorize requests to other services with a delegated user identity. Because this +// is not an interactive authentication flow, an application using it must have admin consent for any delegated +// permissions before requesting tokens for them. See [Microsoft Entra ID documentation] for more details. +// +// [Microsoft Entra ID documentation]: https://learn.microsoft.com/entra/identity-platform/v2-oauth2-on-behalf-of-flow +type OnBehalfOfCredential struct { + client *confidentialClient +} + +// OnBehalfOfCredentialOptions contains optional parameters for OnBehalfOfCredential +type OnBehalfOfCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + + // SendCertificateChain applies only when the credential is configured to authenticate with a certificate. + // This setting controls whether the credential sends the public certificate chain in the x5c header of each + // token request's JWT. This is required for, and only used in, Subject Name/Issuer (SNI) authentication. + SendCertificateChain bool +} + +// NewOnBehalfOfCredentialWithCertificate constructs an OnBehalfOfCredential that authenticates with a certificate. +// See [ParseCertificates] for help loading a certificate. +func NewOnBehalfOfCredentialWithCertificate(tenantID, clientID, userAssertion string, certs []*x509.Certificate, key crypto.PrivateKey, options *OnBehalfOfCredentialOptions) (*OnBehalfOfCredential, error) { + cred, err := confidential.NewCredFromCert(certs, key) + if err != nil { + return nil, err + } + return newOnBehalfOfCredential(tenantID, clientID, userAssertion, cred, options) +} + +// NewOnBehalfOfCredentialWithClientAssertions constructs an OnBehalfOfCredential that authenticates with client assertions. +// userAssertion is the user's access token for the application. The getAssertion function should return client assertions +// that authenticate the application to Microsoft Entra ID, such as federated credentials. +func NewOnBehalfOfCredentialWithClientAssertions(tenantID, clientID, userAssertion string, getAssertion func(context.Context) (string, error), options *OnBehalfOfCredentialOptions) (*OnBehalfOfCredential, error) { + if getAssertion == nil { + return nil, errors.New("getAssertion can't be nil. It must be a function that returns client assertions") + } + cred := confidential.NewCredFromAssertionCallback(func(ctx context.Context, _ confidential.AssertionRequestOptions) (string, error) { + return getAssertion(ctx) + }) + return newOnBehalfOfCredential(tenantID, clientID, userAssertion, cred, options) +} + +// NewOnBehalfOfCredentialWithSecret constructs an OnBehalfOfCredential that authenticates with a client secret. +func NewOnBehalfOfCredentialWithSecret(tenantID, clientID, userAssertion, clientSecret string, options *OnBehalfOfCredentialOptions) (*OnBehalfOfCredential, error) { + cred, err := confidential.NewCredFromSecret(clientSecret) + if err != nil { + return nil, err + } + return newOnBehalfOfCredential(tenantID, clientID, userAssertion, cred, options) +} + +func newOnBehalfOfCredential(tenantID, clientID, userAssertion string, cred confidential.Credential, options *OnBehalfOfCredentialOptions) (*OnBehalfOfCredential, error) { + if options == nil { + options = &OnBehalfOfCredentialOptions{} + } + opts := confidentialClientOptions{ + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + Assertion: userAssertion, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + SendX5C: options.SendCertificateChain, + } + c, err := newConfidentialClient(tenantID, clientID, credNameOBO, cred, opts) + if err != nil { + return nil, err + } + return &OnBehalfOfCredential{c}, nil +} + +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. +func (o *OnBehalfOfCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameOBO+"."+traceOpGetToken, o.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := o.client.GetToken(ctx, opts) + return tk, err +} + +var _ azcore.TokenCredential = (*OnBehalfOfCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go new file mode 100644 index 000000000..b3d22dbf3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go @@ -0,0 +1,273 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "fmt" + "net/http" + "strings" + "sync" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" + + // this import ensures well-known configurations in azcore/cloud have ARM audiences for Authenticate() + _ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" +) + +type publicClientOptions struct { + azcore.ClientOptions + + AdditionallyAllowedTenants []string + DeviceCodePrompt func(context.Context, DeviceCodeMessage) error + DisableAutomaticAuthentication bool + DisableInstanceDiscovery bool + LoginHint, RedirectURL string + Record authenticationRecord + TokenCachePersistenceOptions *tokenCachePersistenceOptions + Username, Password string +} + +// publicClient wraps the MSAL public client +type publicClient struct { + cae, noCAE msalPublicClient + caeMu, noCAEMu, clientMu *sync.Mutex + clientID, tenantID string + defaultScope []string + host string + name string + opts publicClientOptions + record authenticationRecord + azClient *azcore.Client +} + +var errScopeRequired = errors.New("authenticating in this environment requires specifying a scope in TokenRequestOptions") + +func newPublicClient(tenantID, clientID, name string, o publicClientOptions) (*publicClient, error) { + if !validTenantID(tenantID) { + return nil, errInvalidTenantID + } + host, err := setAuthorityHost(o.Cloud) + if err != nil { + return nil, err + } + // if the application specified a cloud configuration, use its ARM audience as the default scope for Authenticate() + audience := o.Cloud.Services[cloud.ResourceManager].Audience + if audience == "" { + // no cloud configuration, or no ARM audience, specified; try to map the host to a well-known one (all of which have a trailing slash) + if !strings.HasSuffix(host, "/") { + host += "/" + } + switch host { + case cloud.AzureChina.ActiveDirectoryAuthorityHost: + audience = cloud.AzureChina.Services[cloud.ResourceManager].Audience + case cloud.AzureGovernment.ActiveDirectoryAuthorityHost: + audience = cloud.AzureGovernment.Services[cloud.ResourceManager].Audience + case cloud.AzurePublic.ActiveDirectoryAuthorityHost: + audience = cloud.AzurePublic.Services[cloud.ResourceManager].Audience + } + } + // if we didn't come up with an audience, the application will have to specify a scope for Authenticate() + var defaultScope []string + if audience != "" { + defaultScope = []string{audience + defaultSuffix} + } + client, err := azcore.NewClient(module, version, runtime.PipelineOptions{ + Tracing: runtime.TracingOptions{ + Namespace: traceNamespace, + }, + }, &o.ClientOptions) + if err != nil { + return nil, err + } + o.AdditionallyAllowedTenants = resolveAdditionalTenants(o.AdditionallyAllowedTenants) + return &publicClient{ + caeMu: &sync.Mutex{}, + clientID: clientID, + clientMu: &sync.Mutex{}, + defaultScope: defaultScope, + host: host, + name: name, + noCAEMu: &sync.Mutex{}, + opts: o, + record: o.Record, + tenantID: tenantID, + azClient: client, + }, nil +} + +func (p *publicClient) Authenticate(ctx context.Context, tro *policy.TokenRequestOptions) (authenticationRecord, error) { + if tro == nil { + tro = &policy.TokenRequestOptions{} + } + if len(tro.Scopes) == 0 { + if p.defaultScope == nil { + return authenticationRecord{}, errScopeRequired + } + tro.Scopes = p.defaultScope + } + client, mu, err := p.client(*tro) + if err != nil { + return authenticationRecord{}, err + } + mu.Lock() + defer mu.Unlock() + _, err = p.reqToken(ctx, client, *tro) + if err == nil { + scope := strings.Join(tro.Scopes, ", ") + msg := fmt.Sprintf("%s.Authenticate() acquired a token for scope %q", p.name, scope) + log.Write(EventAuthentication, msg) + } + return p.record, err +} + +// GetToken requests an access token from MSAL, checking the cache first. +func (p *publicClient) GetToken(ctx context.Context, tro policy.TokenRequestOptions) (azcore.AccessToken, error) { + if len(tro.Scopes) < 1 { + return azcore.AccessToken{}, fmt.Errorf("%s.GetToken() requires at least one scope", p.name) + } + tenant, err := p.resolveTenant(tro.TenantID) + if err != nil { + return azcore.AccessToken{}, err + } + client, mu, err := p.client(tro) + if err != nil { + return azcore.AccessToken{}, err + } + mu.Lock() + defer mu.Unlock() + ar, err := client.AcquireTokenSilent(ctx, tro.Scopes, public.WithSilentAccount(p.record.account()), public.WithClaims(tro.Claims), public.WithTenantID(tenant)) + if err == nil { + return p.token(ar, err) + } + if p.opts.DisableAutomaticAuthentication { + return azcore.AccessToken{}, newauthenticationRequiredError(p.name, tro) + } + at, err := p.reqToken(ctx, client, tro) + if err == nil { + msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", p.name, strings.Join(ar.GrantedScopes, ", ")) + log.Write(EventAuthentication, msg) + } + return at, err +} + +// reqToken requests a token from the MSAL public client. It's separate from GetToken() to enable Authenticate() to bypass the cache. +func (p *publicClient) reqToken(ctx context.Context, c msalPublicClient, tro policy.TokenRequestOptions) (azcore.AccessToken, error) { + tenant, err := p.resolveTenant(tro.TenantID) + if err != nil { + return azcore.AccessToken{}, err + } + var ar public.AuthResult + switch p.name { + case credNameBrowser: + ar, err = c.AcquireTokenInteractive(ctx, tro.Scopes, + public.WithClaims(tro.Claims), + public.WithLoginHint(p.opts.LoginHint), + public.WithRedirectURI(p.opts.RedirectURL), + public.WithTenantID(tenant), + ) + case credNameDeviceCode: + dc, e := c.AcquireTokenByDeviceCode(ctx, tro.Scopes, public.WithClaims(tro.Claims), public.WithTenantID(tenant)) + if e != nil { + return azcore.AccessToken{}, e + } + err = p.opts.DeviceCodePrompt(ctx, DeviceCodeMessage{ + Message: dc.Result.Message, + UserCode: dc.Result.UserCode, + VerificationURL: dc.Result.VerificationURL, + }) + if err == nil { + ar, err = dc.AuthenticationResult(ctx) + } + case credNameUserPassword: + ar, err = c.AcquireTokenByUsernamePassword(ctx, tro.Scopes, p.opts.Username, p.opts.Password, public.WithClaims(tro.Claims), public.WithTenantID(tenant)) + default: + return azcore.AccessToken{}, fmt.Errorf("unknown credential %q", p.name) + } + return p.token(ar, err) +} + +func (p *publicClient) client(tro policy.TokenRequestOptions) (msalPublicClient, *sync.Mutex, error) { + p.clientMu.Lock() + defer p.clientMu.Unlock() + if tro.EnableCAE { + if p.cae == nil { + client, err := p.newMSALClient(true) + if err != nil { + return nil, nil, err + } + p.cae = client + } + return p.cae, p.caeMu, nil + } + if p.noCAE == nil { + client, err := p.newMSALClient(false) + if err != nil { + return nil, nil, err + } + p.noCAE = client + } + return p.noCAE, p.noCAEMu, nil +} + +func (p *publicClient) newMSALClient(enableCAE bool) (msalPublicClient, error) { + cache, err := internal.NewCache(p.opts.TokenCachePersistenceOptions, enableCAE) + if err != nil { + return nil, err + } + o := []public.Option{ + public.WithAuthority(runtime.JoinPaths(p.host, p.tenantID)), + public.WithCache(cache), + public.WithHTTPClient(p), + } + if enableCAE { + o = append(o, public.WithClientCapabilities(cp1)) + } + if p.opts.DisableInstanceDiscovery || strings.ToLower(p.tenantID) == "adfs" { + o = append(o, public.WithInstanceDiscovery(false)) + } + return public.New(p.clientID, o...) +} + +func (p *publicClient) token(ar public.AuthResult, err error) (azcore.AccessToken, error) { + if err == nil { + p.record, err = newAuthenticationRecord(ar) + } else { + res := getResponseFromError(err) + err = newAuthenticationFailedError(p.name, err.Error(), res, err) + } + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +// resolveTenant returns the correct WithTenantID() argument for a token request given the client's +// configuration, or an error when that configuration doesn't allow the specified tenant +func (p *publicClient) resolveTenant(specified string) (string, error) { + t, err := resolveTenant(p.tenantID, specified, p.name, p.opts.AdditionallyAllowedTenants) + if t == p.tenantID { + // callers pass this value to MSAL's WithTenantID(). There's no need to redundantly specify + // the client's default tenant and doing so is an error when that tenant is "organizations" + t = "" + } + return t, err +} + +// these methods satisfy the MSAL ops.HTTPClient interface + +func (p *publicClient) CloseIdleConnections() { + // do nothing +} + +func (p *publicClient) Do(r *http.Request) (*http.Response, error) { + return doForClient(p.azClient, r) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 new file mode 100644 index 000000000..a69bbce34 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 @@ -0,0 +1,112 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +# IMPORTANT: Do not invoke this file directly. Please instead run eng/common/TestResources/New-TestResources.ps1 from the repository root. + +param ( + [hashtable] $AdditionalParameters = @{}, + [hashtable] $DeploymentOutputs +) + +$ErrorActionPreference = 'Stop' +$PSNativeCommandUseErrorActionPreference = $true + +if ($CI) { + if (!$AdditionalParameters['deployResources']) { + Write-Host "Skipping post-provisioning script because resources weren't deployed" + return + } + az login --service-principal -u $DeploymentOutputs['AZIDENTITY_CLIENT_ID'] -p $DeploymentOutputs['AZIDENTITY_CLIENT_SECRET'] --tenant $DeploymentOutputs['AZIDENTITY_TENANT_ID'] + az account set --subscription $DeploymentOutputs['AZIDENTITY_SUBSCRIPTION_ID'] +} + +Write-Host "Building container" +$image = "$($DeploymentOutputs['AZIDENTITY_ACR_LOGIN_SERVER'])/azidentity-managed-id-test" +Set-Content -Path "$PSScriptRoot/Dockerfile" -Value @" +FROM mcr.microsoft.com/oss/go/microsoft/golang:latest as builder +ENV GOARCH=amd64 GOWORK=off +COPY . /azidentity +WORKDIR /azidentity/testdata/managed-id-test +RUN go mod tidy +RUN go build -o /build/managed-id-test . +RUN GOOS=windows go build -o /build/managed-id-test.exe . + +FROM mcr.microsoft.com/mirror/docker/library/alpine:3.16 +RUN apk add gcompat +COPY --from=builder /build/* . +RUN chmod +x managed-id-test +CMD ["./managed-id-test"] +"@ +# build from sdk/azidentity because we need that dir in the context (because the test app uses local azidentity) +docker build -t $image "$PSScriptRoot" +az acr login -n $DeploymentOutputs['AZIDENTITY_ACR_NAME'] +docker push $image + +$rg = $DeploymentOutputs['AZIDENTITY_RESOURCE_GROUP'] + +# ACI is easier to provision here than in the bicep file because the image isn't available before now +Write-Host "Deploying Azure Container Instance" +$aciName = "azidentity-test" +az container create -g $rg -n $aciName --image $image ` + --acr-identity $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` + --assign-identity [system] $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` + --role "Storage Blob Data Reader" ` + --scope $($DeploymentOutputs['AZIDENTITY_STORAGE_ID']) ` + -e AZIDENTITY_STORAGE_NAME=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME']) ` + AZIDENTITY_STORAGE_NAME_USER_ASSIGNED=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME_USER_ASSIGNED']) ` + AZIDENTITY_USER_ASSIGNED_IDENTITY=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` + FUNCTIONS_CUSTOMHANDLER_PORT=80 +Write-Host "##vso[task.setvariable variable=AZIDENTITY_ACI_NAME;]$aciName" + +# Azure Functions deployment: copy the Windows binary from the Docker image, deploy it in a zip +Write-Host "Deploying to Azure Functions" +$container = docker create $image +docker cp ${container}:managed-id-test.exe "$PSScriptRoot/testdata/managed-id-test/" +docker rm -v $container +Compress-Archive -Path "$PSScriptRoot/testdata/managed-id-test/*" -DestinationPath func.zip -Force +az functionapp deploy -g $rg -n $DeploymentOutputs['AZIDENTITY_FUNCTION_NAME'] --src-path func.zip --type zip + +Write-Host "Creating federated identity" +$aksName = $DeploymentOutputs['AZIDENTITY_AKS_NAME'] +$idName = $DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_NAME'] +$issuer = az aks show -g $rg -n $aksName --query "oidcIssuerProfile.issuerUrl" -otsv +$podName = "azidentity-test" +$serviceAccountName = "workload-identity-sa" +az identity federated-credential create -g $rg --identity-name $idName --issuer $issuer --name $idName --subject system:serviceaccount:default:$serviceAccountName +Write-Host "Deploying to AKS" +az aks get-credentials -g $rg -n $aksName +az aks update --attach-acr $DeploymentOutputs['AZIDENTITY_ACR_NAME'] -g $rg -n $aksName +Set-Content -Path "$PSScriptRoot/k8s.yaml" -Value @" +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + azure.workload.identity/client-id: $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID']) + name: $serviceAccountName + namespace: default +--- +apiVersion: v1 +kind: Pod +metadata: + name: $podName + namespace: default + labels: + app: $podName + azure.workload.identity/use: "true" +spec: + serviceAccountName: $serviceAccountName + containers: + - name: $podName + image: $image + env: + - name: AZIDENTITY_STORAGE_NAME + value: $($DeploymentOutputs['AZIDENTITY_STORAGE_NAME_USER_ASSIGNED']) + - name: AZIDENTITY_USE_WORKLOAD_IDENTITY + value: "true" + - name: FUNCTIONS_CUSTOMHANDLER_PORT + value: "80" + nodeSelector: + kubernetes.io/os: linux +"@ +kubectl apply -f "$PSScriptRoot/k8s.yaml" +Write-Host "##vso[task.setvariable variable=AZIDENTITY_POD_NAME;]$podName" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-pre.ps1 b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-pre.ps1 new file mode 100644 index 000000000..58766d0a0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-pre.ps1 @@ -0,0 +1,44 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +# IMPORTANT: Do not invoke this file directly. Please instead run eng/common/TestResources/New-TestResources.ps1 from the repository root. + +[CmdletBinding(SupportsShouldProcess = $true, ConfirmImpact = 'Medium')] +param ( + [hashtable] $AdditionalParameters = @{}, + + # Captures any arguments from eng/New-TestResources.ps1 not declared here (no parameter errors). + [Parameter(ValueFromRemainingArguments = $true)] + $RemainingArguments +) + +if (-not (Test-Path "$PSScriptRoot/sshkey.pub")) { + ssh-keygen -t rsa -b 4096 -f "$PSScriptRoot/sshkey" -N '' -C '' +} +$templateFileParameters['sshPubKey'] = Get-Content "$PSScriptRoot/sshkey.pub" + +if (!$CI) { + # TODO: Remove this once auto-cloud config downloads are supported locally + Write-Host "Skipping cert setup in local testing mode" + return +} + +if ($null -eq $EnvironmentVariables -or $EnvironmentVariables.Count -eq 0) { + throw "EnvironmentVariables must be set in the calling script New-TestResources.ps1" +} + +$tmp = $env:TEMP ? $env:TEMP : [System.IO.Path]::GetTempPath() +$pfxPath = Join-Path $tmp "test.pfx" +$pemPath = Join-Path $tmp "test.pem" + +Write-Host "Creating identity test files: $pfxPath $pemPath" + +[System.Convert]::FromBase64String($EnvironmentVariables['PFX_CONTENTS']) | Set-Content -Path $pfxPath -AsByteStream +Set-Content -Path $pemPath -Value $EnvironmentVariables['PEM_CONTENTS'] + +# Set for pipeline +Write-Host "##vso[task.setvariable variable=IDENTITY_SP_CERT_PFX;]$pfxPath" +Write-Host "##vso[task.setvariable variable=IDENTITY_SP_CERT_PEM;]$pemPath" +# Set for local +$env:IDENTITY_SP_CERT_PFX = $pfxPath +$env:IDENTITY_SP_CERT_PEM = $pemPath diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep new file mode 100644 index 000000000..2a2165293 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep @@ -0,0 +1,219 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +@description('Kubernetes cluster admin user name.') +param adminUser string = 'azureuser' + +@minLength(6) +@maxLength(23) +@description('The base resource name.') +param baseName string = resourceGroup().name + +@description('Whether to deploy resources. When set to false, this file deploys nothing.') +param deployResources bool = false + +param sshPubKey string = '' + +@description('The location of the resource. By default, this is the same as the resource group.') +param location string = resourceGroup().location + +// https://learn.microsoft.com/azure/role-based-access-control/built-in-roles +var acrPull = subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '7f951dda-4ed3-4680-a7ca-43fe172d538d') +var blobReader = subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '2a2b9908-6ea1-4ae2-8e65-a410df84e7d1') + +resource sa 'Microsoft.Storage/storageAccounts@2021-08-01' = if (deployResources) { + kind: 'StorageV2' + location: location + name: 'sa${uniqueString(baseName)}' + properties: { + accessTier: 'Hot' + } + sku: { + name: 'Standard_LRS' + } +} + +resource saUserAssigned 'Microsoft.Storage/storageAccounts@2021-08-01' = if (deployResources) { + kind: 'StorageV2' + location: location + name: 'sa2${uniqueString(baseName)}' + properties: { + accessTier: 'Hot' + } + sku: { + name: 'Standard_LRS' + } +} + +resource usermgdid 'Microsoft.ManagedIdentity/userAssignedIdentities@2018-11-30' = if (deployResources) { + location: location + name: baseName +} + +resource acrPullContainerInstance 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (deployResources) { + name: guid(resourceGroup().id, acrPull, 'containerInstance') + properties: { + principalId: deployResources ? usermgdid.properties.principalId : '' + principalType: 'ServicePrincipal' + roleDefinitionId: acrPull + } + scope: containerRegistry +} + +resource blobRoleUserAssigned 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (deployResources) { + scope: saUserAssigned + name: guid(resourceGroup().id, blobReader, usermgdid.id) + properties: { + principalId: deployResources ? usermgdid.properties.principalId : '' + principalType: 'ServicePrincipal' + roleDefinitionId: blobReader + } +} + +resource blobRoleFunc 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (deployResources) { + name: guid(resourceGroup().id, blobReader, 'azfunc') + properties: { + principalId: deployResources ? azfunc.identity.principalId : '' + roleDefinitionId: blobReader + principalType: 'ServicePrincipal' + } + scope: sa +} + +resource containerRegistry 'Microsoft.ContainerRegistry/registries@2023-01-01-preview' = if (deployResources) { + location: location + name: uniqueString(resourceGroup().id) + properties: { + adminUserEnabled: true + } + sku: { + name: 'Basic' + } +} + +resource farm 'Microsoft.Web/serverfarms@2021-03-01' = if (deployResources) { + kind: 'app' + location: location + name: '${baseName}_asp' + properties: {} + sku: { + capacity: 1 + family: 'B' + name: 'B1' + size: 'B1' + tier: 'Basic' + } +} + +resource azfunc 'Microsoft.Web/sites@2021-03-01' = if (deployResources) { + identity: { + type: 'SystemAssigned, UserAssigned' + userAssignedIdentities: { + '${deployResources ? usermgdid.id : ''}': {} + } + } + kind: 'functionapp' + location: location + name: '${baseName}func' + properties: { + enabled: true + httpsOnly: true + keyVaultReferenceIdentity: 'SystemAssigned' + serverFarmId: farm.id + siteConfig: { + alwaysOn: true + appSettings: [ + { + name: 'AZIDENTITY_STORAGE_NAME' + value: deployResources ? sa.name : null + } + { + name: 'AZIDENTITY_STORAGE_NAME_USER_ASSIGNED' + value: deployResources ? saUserAssigned.name : null + } + { + name: 'AZIDENTITY_USER_ASSIGNED_IDENTITY' + value: deployResources ? usermgdid.id : null + } + { + name: 'AzureWebJobsStorage' + value: 'DefaultEndpointsProtocol=https;AccountName=${deployResources ? sa.name : ''};EndpointSuffix=${deployResources ? environment().suffixes.storage : ''};AccountKey=${deployResources ? sa.listKeys().keys[0].value : ''}' + } + { + name: 'FUNCTIONS_EXTENSION_VERSION' + value: '~4' + } + { + name: 'FUNCTIONS_WORKER_RUNTIME' + value: 'custom' + } + { + name: 'WEBSITE_CONTENTAZUREFILECONNECTIONSTRING' + value: 'DefaultEndpointsProtocol=https;AccountName=${deployResources ? sa.name : ''};EndpointSuffix=${deployResources ? environment().suffixes.storage : ''};AccountKey=${deployResources ? sa.listKeys().keys[0].value : ''}' + } + { + name: 'WEBSITE_CONTENTSHARE' + value: toLower('${baseName}-func') + } + ] + http20Enabled: true + minTlsVersion: '1.2' + } + } +} + +resource aks 'Microsoft.ContainerService/managedClusters@2023-06-01' = if (deployResources) { + name: baseName + location: location + identity: { + type: 'SystemAssigned' + } + properties: { + agentPoolProfiles: [ + { + count: 1 + enableAutoScaling: false + kubeletDiskType: 'OS' + mode: 'System' + name: 'agentpool' + osDiskSizeGB: 128 + osDiskType: 'Managed' + osSKU: 'Ubuntu' + osType: 'Linux' + type: 'VirtualMachineScaleSets' + vmSize: 'Standard_D2s_v3' + } + ] + dnsPrefix: 'identitytest' + enableRBAC: true + linuxProfile: { + adminUsername: adminUser + ssh: { + publicKeys: [ + { + keyData: sshPubKey + } + ] + } + } + oidcIssuerProfile: { + enabled: true + } + securityProfile: { + workloadIdentity: { + enabled: true + } + } + } +} + +output AZIDENTITY_ACR_LOGIN_SERVER string = deployResources ? containerRegistry.properties.loginServer : '' +output AZIDENTITY_ACR_NAME string = deployResources ? containerRegistry.name : '' +output AZIDENTITY_AKS_NAME string = deployResources ? aks.name : '' +output AZIDENTITY_FUNCTION_NAME string = deployResources ? azfunc.name : '' +output AZIDENTITY_STORAGE_ID string = deployResources ? sa.id : '' +output AZIDENTITY_STORAGE_NAME string = deployResources ? sa.name : '' +output AZIDENTITY_STORAGE_NAME_USER_ASSIGNED string = deployResources ? saUserAssigned.name : '' +output AZIDENTITY_USER_ASSIGNED_IDENTITY string = deployResources ? usermgdid.id : '' +output AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID string = deployResources ? usermgdid.properties.clientId : '' +output AZIDENTITY_USER_ASSIGNED_IDENTITY_NAME string = deployResources ? usermgdid.name : '' diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go new file mode 100644 index 000000000..294ed81e9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go @@ -0,0 +1,90 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +) + +const credNameUserPassword = "UsernamePasswordCredential" + +// UsernamePasswordCredentialOptions contains optional parameters for UsernamePasswordCredential. +type UsernamePasswordCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + + // authenticationRecord returned by a call to a credential's Authenticate method. Set this option + // to enable the credential to use data from a previous authentication. + authenticationRecord authenticationRecord + + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + + // tokenCachePersistenceOptions enables persistent token caching when not nil. + tokenCachePersistenceOptions *tokenCachePersistenceOptions +} + +// UsernamePasswordCredential authenticates a user with a password. Microsoft doesn't recommend this kind of authentication, +// because it's less secure than other authentication flows. This credential is not interactive, so it isn't compatible +// with any form of multi-factor authentication, and the application must already have user or admin consent. +// This credential can only authenticate work and school accounts; it can't authenticate Microsoft accounts. +type UsernamePasswordCredential struct { + client *publicClient +} + +// NewUsernamePasswordCredential creates a UsernamePasswordCredential. clientID is the ID of the application the user +// will authenticate to. Pass nil for options to accept defaults. +func NewUsernamePasswordCredential(tenantID string, clientID string, username string, password string, options *UsernamePasswordCredentialOptions) (*UsernamePasswordCredential, error) { + if options == nil { + options = &UsernamePasswordCredentialOptions{} + } + opts := publicClientOptions{ + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + Password: password, + Record: options.authenticationRecord, + TokenCachePersistenceOptions: options.tokenCachePersistenceOptions, + Username: username, + } + c, err := newPublicClient(tenantID, clientID, credNameUserPassword, opts) + if err != nil { + return nil, err + } + return &UsernamePasswordCredential{client: c}, err +} + +// Authenticate the user. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord. +func (c *UsernamePasswordCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameUserPassword+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.Authenticate(ctx, opts) + return tk, err +} + +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. +func (c *UsernamePasswordCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameUserPassword+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.GetToken(ctx, opts) + return tk, err +} + +var _ azcore.TokenCredential = (*UsernamePasswordCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go new file mode 100644 index 000000000..4305b5d3d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -0,0 +1,18 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +const ( + // UserAgent is the string to be used in the user agent string when making requests. + component = "azidentity" + + // module is the fully qualified name of the module used in telemetry and distributed tracing. + module = "github.com/Azure/azure-sdk-for-go/sdk/" + component + + // Version is the semantic version (see http://semver.org) of this module. + version = "v1.7.0" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go new file mode 100644 index 000000000..3e43e788e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go @@ -0,0 +1,131 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "os" + "sync" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +) + +const credNameWorkloadIdentity = "WorkloadIdentityCredential" + +// WorkloadIdentityCredential supports Azure workload identity on Kubernetes. +// See [Azure Kubernetes Service documentation] for more information. +// +// [Azure Kubernetes Service documentation]: https://learn.microsoft.com/azure/aks/workload-identity-overview +type WorkloadIdentityCredential struct { + assertion, file string + cred *ClientAssertionCredential + expires time.Time + mtx *sync.RWMutex +} + +// WorkloadIdentityCredentialOptions contains optional parameters for WorkloadIdentityCredential. +type WorkloadIdentityCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + // ClientID of the service principal. Defaults to the value of the environment variable AZURE_CLIENT_ID. + ClientID string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool + // TenantID of the service principal. Defaults to the value of the environment variable AZURE_TENANT_ID. + TenantID string + // TokenFilePath is the path of a file containing a Kubernetes service account token. Defaults to the value of the + // environment variable AZURE_FEDERATED_TOKEN_FILE. + TokenFilePath string +} + +// NewWorkloadIdentityCredential constructs a WorkloadIdentityCredential. Service principal configuration is read +// from environment variables as set by the Azure workload identity webhook. Set options to override those values. +func NewWorkloadIdentityCredential(options *WorkloadIdentityCredentialOptions) (*WorkloadIdentityCredential, error) { + if options == nil { + options = &WorkloadIdentityCredentialOptions{} + } + ok := false + clientID := options.ClientID + if clientID == "" { + if clientID, ok = os.LookupEnv(azureClientID); !ok { + return nil, errors.New("no client ID specified. Check pod configuration or set ClientID in the options") + } + } + file := options.TokenFilePath + if file == "" { + if file, ok = os.LookupEnv(azureFederatedTokenFile); !ok { + return nil, errors.New("no token file specified. Check pod configuration or set TokenFilePath in the options") + } + } + tenantID := options.TenantID + if tenantID == "" { + if tenantID, ok = os.LookupEnv(azureTenantID); !ok { + return nil, errors.New("no tenant ID specified. Check pod configuration or set TenantID in the options") + } + } + w := WorkloadIdentityCredential{file: file, mtx: &sync.RWMutex{}} + caco := ClientAssertionCredentialOptions{ + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + } + cred, err := NewClientAssertionCredential(tenantID, clientID, w.getAssertion, &caco) + if err != nil { + return nil, err + } + // we want "WorkloadIdentityCredential" in log messages, not "ClientAssertionCredential" + cred.client.name = credNameWorkloadIdentity + w.cred = cred + return &w, nil +} + +// GetToken requests an access token from Microsoft Entra ID. Azure SDK clients call this method automatically. +func (w *WorkloadIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameWorkloadIdentity+"."+traceOpGetToken, w.cred.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := w.cred.GetToken(ctx, opts) + return tk, err +} + +// getAssertion returns the specified file's content, which is expected to be a Kubernetes service account token. +// Kubernetes is responsible for updating the file as service account tokens expire. +func (w *WorkloadIdentityCredential) getAssertion(context.Context) (string, error) { + w.mtx.RLock() + if w.expires.Before(time.Now()) { + // ensure only one goroutine at a time updates the assertion + w.mtx.RUnlock() + w.mtx.Lock() + defer w.mtx.Unlock() + // double check because another goroutine may have acquired the write lock first and done the update + if now := time.Now(); w.expires.Before(now) { + content, err := os.ReadFile(w.file) + if err != nil { + return "", err + } + w.assertion = string(content) + // Kubernetes rotates service account tokens when they reach 80% of their total TTL. The shortest TTL + // is 1 hour. That implies the token we just read is valid for at least 12 minutes (20% of 1 hour), + // but we add some margin for safety. + w.expires = now.Add(10 * time.Minute) + } + } else { + defer w.mtx.RUnlock() + } + return w.assertion, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt new file mode 100644 index 000000000..48ea6616b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go new file mode 100644 index 000000000..245af7d2b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go @@ -0,0 +1,51 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package diag + +import ( + "fmt" + "runtime" + "strings" +) + +// Caller returns the file and line number of a frame on the caller's stack. +// If the funtion fails an empty string is returned. +// skipFrames - the number of frames to skip when determining the caller. +// Passing a value of 0 will return the immediate caller of this function. +func Caller(skipFrames int) string { + if pc, file, line, ok := runtime.Caller(skipFrames + 1); ok { + // the skipFrames + 1 is to skip ourselves + frame := runtime.FuncForPC(pc) + return fmt.Sprintf("%s()\n\t%s:%d", frame.Name(), file, line) + } + return "" +} + +// StackTrace returns a formatted stack trace string. +// If the funtion fails an empty string is returned. +// skipFrames - the number of stack frames to skip before composing the trace string. +// totalFrames - the maximum number of stack frames to include in the trace string. +func StackTrace(skipFrames, totalFrames int) string { + pcCallers := make([]uintptr, totalFrames) + if frames := runtime.Callers(skipFrames, pcCallers); frames == 0 { + return "" + } + frames := runtime.CallersFrames(pcCallers) + sb := strings.Builder{} + for { + frame, more := frames.Next() + sb.WriteString(frame.Function) + sb.WriteString("()\n\t") + sb.WriteString(frame.File) + sb.WriteRune(':') + sb.WriteString(fmt.Sprintf("%d\n", frame.Line)) + if !more { + break + } + } + return sb.String() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go new file mode 100644 index 000000000..66bf13e5f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package diag diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go new file mode 100644 index 000000000..8c6eacb61 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package errorinfo diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go new file mode 100644 index 000000000..8ee66b526 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go @@ -0,0 +1,46 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package errorinfo + +// NonRetriable represents a non-transient error. This works in +// conjunction with the retry policy, indicating that the error condition +// is idempotent, so no retries will be attempted. +// Use errors.As() to access this interface in the error chain. +type NonRetriable interface { + error + NonRetriable() +} + +// NonRetriableError marks the specified error as non-retriable. +// This function takes an error as input and returns a new error that is marked as non-retriable. +func NonRetriableError(err error) error { + return &nonRetriableError{err} +} + +// nonRetriableError is a struct that embeds the error interface. +// It is used to represent errors that should not be retried. +type nonRetriableError struct { + error +} + +// Error method for nonRetriableError struct. +// It returns the error message of the embedded error. +func (p *nonRetriableError) Error() string { + return p.error.Error() +} + +// NonRetriable is a marker method for nonRetriableError struct. +// Non-functional and indicates that the error is non-retriable. +func (*nonRetriableError) NonRetriable() { + // marker method +} + +// Unwrap method for nonRetriableError struct. +// It returns the original error that was marked as non-retriable. +func (p *nonRetriableError) Unwrap() error { + return p.error +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go new file mode 100644 index 000000000..9948f604b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go @@ -0,0 +1,129 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "errors" + "io" + "net/http" +) + +// HasStatusCode returns true if the Response's status code is one of the specified values. +// Exported as runtime.HasStatusCode(). +func HasStatusCode(resp *http.Response, statusCodes ...int) bool { + if resp == nil { + return false + } + for _, sc := range statusCodes { + if resp.StatusCode == sc { + return true + } + } + return false +} + +// PayloadOptions contains the optional values for the Payload func. +// NOT exported but used by azcore. +type PayloadOptions struct { + // BytesModifier receives the downloaded byte slice and returns an updated byte slice. + // Use this to modify the downloaded bytes in a payload (e.g. removing a BOM). + BytesModifier func([]byte) []byte +} + +// Payload reads and returns the response body or an error. +// On a successful read, the response body is cached. +// Subsequent reads will access the cached value. +// Exported as runtime.Payload() WITHOUT the opts parameter. +func Payload(resp *http.Response, opts *PayloadOptions) ([]byte, error) { + if resp.Body == nil { + // this shouldn't happen in real-world scenarios as a + // response with no body should set it to http.NoBody + return nil, nil + } + modifyBytes := func(b []byte) []byte { return b } + if opts != nil && opts.BytesModifier != nil { + modifyBytes = opts.BytesModifier + } + + // r.Body won't be a nopClosingBytesReader if downloading was skipped + if buf, ok := resp.Body.(*nopClosingBytesReader); ok { + bytesBody := modifyBytes(buf.Bytes()) + buf.Set(bytesBody) + return bytesBody, nil + } + + bytesBody, err := io.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, err + } + + bytesBody = modifyBytes(bytesBody) + resp.Body = &nopClosingBytesReader{s: bytesBody} + return bytesBody, nil +} + +// PayloadDownloaded returns true if the response body has already been downloaded. +// This implies that the Payload() func above has been previously called. +// NOT exported but used by azcore. +func PayloadDownloaded(resp *http.Response) bool { + _, ok := resp.Body.(*nopClosingBytesReader) + return ok +} + +// nopClosingBytesReader is an io.ReadSeekCloser around a byte slice. +// It also provides direct access to the byte slice to avoid rereading. +type nopClosingBytesReader struct { + s []byte + i int64 +} + +// Bytes returns the underlying byte slice. +func (r *nopClosingBytesReader) Bytes() []byte { + return r.s +} + +// Close implements the io.Closer interface. +func (*nopClosingBytesReader) Close() error { + return nil +} + +// Read implements the io.Reader interface. +func (r *nopClosingBytesReader) Read(b []byte) (n int, err error) { + if r.i >= int64(len(r.s)) { + return 0, io.EOF + } + n = copy(b, r.s[r.i:]) + r.i += int64(n) + return +} + +// Set replaces the existing byte slice with the specified byte slice and resets the reader. +func (r *nopClosingBytesReader) Set(b []byte) { + r.s = b + r.i = 0 +} + +// Seek implements the io.Seeker interface. +func (r *nopClosingBytesReader) Seek(offset int64, whence int) (int64, error) { + var i int64 + switch whence { + case io.SeekStart: + i = offset + case io.SeekCurrent: + i = r.i + offset + case io.SeekEnd: + i = int64(len(r.s)) + offset + default: + return 0, errors.New("nopClosingBytesReader: invalid whence") + } + if i < 0 { + return 0, errors.New("nopClosingBytesReader: negative position") + } + r.i = i + return i, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go new file mode 100644 index 000000000..d7876d297 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package log diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go new file mode 100644 index 000000000..4f1dcf1b7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go @@ -0,0 +1,104 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package log + +import ( + "fmt" + "os" + "time" +) + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// NOTE: The following are exported as public surface area from azcore. DO NOT MODIFY +/////////////////////////////////////////////////////////////////////////////////////////////////// + +// Event is used to group entries. Each group can be toggled on or off. +type Event string + +// SetEvents is used to control which events are written to +// the log. By default all log events are writen. +func SetEvents(cls ...Event) { + log.cls = cls +} + +// SetListener will set the Logger to write to the specified listener. +func SetListener(lst func(Event, string)) { + log.lst = lst +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// END PUBLIC SURFACE AREA +/////////////////////////////////////////////////////////////////////////////////////////////////// + +// Should returns true if the specified log event should be written to the log. +// By default all log events will be logged. Call SetEvents() to limit +// the log events for logging. +// If no listener has been set this will return false. +// Calling this method is useful when the message to log is computationally expensive +// and you want to avoid the overhead if its log event is not enabled. +func Should(cls Event) bool { + if log.lst == nil { + return false + } + if log.cls == nil || len(log.cls) == 0 { + return true + } + for _, c := range log.cls { + if c == cls { + return true + } + } + return false +} + +// Write invokes the underlying listener with the specified event and message. +// If the event shouldn't be logged or there is no listener then Write does nothing. +func Write(cls Event, message string) { + if !Should(cls) { + return + } + log.lst(cls, message) +} + +// Writef invokes the underlying listener with the specified event and formatted message. +// If the event shouldn't be logged or there is no listener then Writef does nothing. +func Writef(cls Event, format string, a ...interface{}) { + if !Should(cls) { + return + } + log.lst(cls, fmt.Sprintf(format, a...)) +} + +// TestResetEvents is used for TESTING PURPOSES ONLY. +func TestResetEvents() { + log.cls = nil +} + +// logger controls which events to log and writing to the underlying log. +type logger struct { + cls []Event + lst func(Event, string) +} + +// the process-wide logger +var log logger + +func init() { + initLogging() +} + +// split out for testing purposes +func initLogging() { + if cls := os.Getenv("AZURE_SDK_GO_LOGGING"); cls == "all" { + // cls could be enhanced to support a comma-delimited list of log events + log.lst = func(cls Event, msg string) { + // simple console logger, it writes to stderr in the following format: + // [time-stamp] Event: message + fmt.Fprintf(os.Stderr, "[%s] %s: %s\n", time.Now().Format(time.StampMicro), cls, msg) + } + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/poller/util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/poller/util.go new file mode 100644 index 000000000..db8269627 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/poller/util.go @@ -0,0 +1,155 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package poller + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" +) + +// the well-known set of LRO status/provisioning state values. +const ( + StatusSucceeded = "Succeeded" + StatusCanceled = "Canceled" + StatusFailed = "Failed" + StatusInProgress = "InProgress" +) + +// these are non-conformant states that we've seen in the wild. +// we support them for back-compat. +const ( + StatusCancelled = "Cancelled" + StatusCompleted = "Completed" +) + +// IsTerminalState returns true if the LRO's state is terminal. +func IsTerminalState(s string) bool { + return Failed(s) || Succeeded(s) +} + +// Failed returns true if the LRO's state is terminal failure. +func Failed(s string) bool { + return strings.EqualFold(s, StatusFailed) || strings.EqualFold(s, StatusCanceled) || strings.EqualFold(s, StatusCancelled) +} + +// Succeeded returns true if the LRO's state is terminal success. +func Succeeded(s string) bool { + return strings.EqualFold(s, StatusSucceeded) || strings.EqualFold(s, StatusCompleted) +} + +// returns true if the LRO response contains a valid HTTP status code +func StatusCodeValid(resp *http.Response) bool { + return exported.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusCreated, http.StatusNoContent) +} + +// IsValidURL verifies that the URL is valid and absolute. +func IsValidURL(s string) bool { + u, err := url.Parse(s) + return err == nil && u.IsAbs() +} + +// ErrNoBody is returned if the response didn't contain a body. +var ErrNoBody = errors.New("the response did not contain a body") + +// GetJSON reads the response body into a raw JSON object. +// It returns ErrNoBody if there was no content. +func GetJSON(resp *http.Response) (map[string]any, error) { + body, err := exported.Payload(resp, nil) + if err != nil { + return nil, err + } + if len(body) == 0 { + return nil, ErrNoBody + } + // unmarshall the body to get the value + var jsonBody map[string]any + if err = json.Unmarshal(body, &jsonBody); err != nil { + return nil, err + } + return jsonBody, nil +} + +// provisioningState returns the provisioning state from the response or the empty string. +func provisioningState(jsonBody map[string]any) string { + jsonProps, ok := jsonBody["properties"] + if !ok { + return "" + } + props, ok := jsonProps.(map[string]any) + if !ok { + return "" + } + rawPs, ok := props["provisioningState"] + if !ok { + return "" + } + ps, ok := rawPs.(string) + if !ok { + return "" + } + return ps +} + +// status returns the status from the response or the empty string. +func status(jsonBody map[string]any) string { + rawStatus, ok := jsonBody["status"] + if !ok { + return "" + } + status, ok := rawStatus.(string) + if !ok { + return "" + } + return status +} + +// GetStatus returns the LRO's status from the response body. +// Typically used for Azure-AsyncOperation flows. +// If there is no status in the response body the empty string is returned. +func GetStatus(resp *http.Response) (string, error) { + jsonBody, err := GetJSON(resp) + if err != nil { + return "", err + } + return status(jsonBody), nil +} + +// GetProvisioningState returns the LRO's state from the response body. +// If there is no state in the response body the empty string is returned. +func GetProvisioningState(resp *http.Response) (string, error) { + jsonBody, err := GetJSON(resp) + if err != nil { + return "", err + } + return provisioningState(jsonBody), nil +} + +// GetResourceLocation returns the LRO's resourceLocation value from the response body. +// Typically used for Operation-Location flows. +// If there is no resourceLocation in the response body the empty string is returned. +func GetResourceLocation(resp *http.Response) (string, error) { + jsonBody, err := GetJSON(resp) + if err != nil { + return "", err + } + v, ok := jsonBody["resourceLocation"] + if !ok { + // it might be ok if the field doesn't exist, the caller must make that determination + return "", nil + } + vv, ok := v.(string) + if !ok { + return "", fmt.Errorf("the resourceLocation value %v was not in string format", v) + } + return vv, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go new file mode 100644 index 000000000..238ef42ed --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go @@ -0,0 +1,123 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package temporal + +import ( + "sync" + "time" +) + +// AcquireResource abstracts a method for refreshing a temporal resource. +type AcquireResource[TResource, TState any] func(state TState) (newResource TResource, newExpiration time.Time, err error) + +// Resource is a temporal resource (usually a credential) that requires periodic refreshing. +type Resource[TResource, TState any] struct { + // cond is used to synchronize access to the shared resource embodied by the remaining fields + cond *sync.Cond + + // acquiring indicates that some thread/goroutine is in the process of acquiring/updating the resource + acquiring bool + + // resource contains the value of the shared resource + resource TResource + + // expiration indicates when the shared resource expires; it is 0 if the resource was never acquired + expiration time.Time + + // lastAttempt indicates when a thread/goroutine last attempted to acquire/update the resource + lastAttempt time.Time + + // acquireResource is the callback function that actually acquires the resource + acquireResource AcquireResource[TResource, TState] +} + +// NewResource creates a new Resource that uses the specified AcquireResource for refreshing. +func NewResource[TResource, TState any](ar AcquireResource[TResource, TState]) *Resource[TResource, TState] { + return &Resource[TResource, TState]{cond: sync.NewCond(&sync.Mutex{}), acquireResource: ar} +} + +// Get returns the underlying resource. +// If the resource is fresh, no refresh is performed. +func (er *Resource[TResource, TState]) Get(state TState) (TResource, error) { + // If the resource is expiring within this time window, update it eagerly. + // This allows other threads/goroutines to keep running by using the not-yet-expired + // resource value while one thread/goroutine updates the resource. + const window = 5 * time.Minute // This example updates the resource 5 minutes prior to expiration + const backoff = 30 * time.Second // Minimum wait time between eager update attempts + + now, acquire, expired := time.Now(), false, false + + // acquire exclusive lock + er.cond.L.Lock() + resource := er.resource + + for { + expired = er.expiration.IsZero() || er.expiration.Before(now) + if expired { + // The resource was never acquired or has expired + if !er.acquiring { + // If another thread/goroutine is not acquiring/updating the resource, this thread/goroutine will do it + er.acquiring, acquire = true, true + break + } + // Getting here means that this thread/goroutine will wait for the updated resource + } else if er.expiration.Add(-window).Before(now) { + // The resource is valid but is expiring within the time window + if !er.acquiring && er.lastAttempt.Add(backoff).Before(now) { + // If another thread/goroutine is not acquiring/renewing the resource, and none has attempted + // to do so within the last 30 seconds, this thread/goroutine will do it + er.acquiring, acquire = true, true + break + } + // This thread/goroutine will use the existing resource value while another updates it + resource = er.resource + break + } else { + // The resource is not close to expiring, this thread/goroutine should use its current value + resource = er.resource + break + } + // If we get here, wait for the new resource value to be acquired/updated + er.cond.Wait() + } + er.cond.L.Unlock() // Release the lock so no threads/goroutines are blocked + + var err error + if acquire { + // This thread/goroutine has been selected to acquire/update the resource + var expiration time.Time + var newValue TResource + er.lastAttempt = now + newValue, expiration, err = er.acquireResource(state) + + // Atomically, update the shared resource's new value & expiration. + er.cond.L.Lock() + if err == nil { + // Update resource & expiration, return the new value + resource = newValue + er.resource, er.expiration = resource, expiration + } else if !expired { + // An eager update failed. Discard the error and return the current--still valid--resource value + err = nil + } + er.acquiring = false // Indicate that no thread/goroutine is currently acquiring the resource + + // Wake up any waiting threads/goroutines since there is a resource they can ALL use + er.cond.L.Unlock() + er.cond.Broadcast() + } + return resource, err // Return the resource this thread/goroutine can use +} + +// Expire marks the resource as expired, ensuring it's refreshed on the next call to Get(). +func (er *Resource[TResource, TState]) Expire() { + er.cond.L.Lock() + defer er.cond.L.Unlock() + + // Reset the expiration as if we never got this resource to begin with + er.expiration = time.Time{} +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go new file mode 100644 index 000000000..a3824bee8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package uuid diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go new file mode 100644 index 000000000..278ac9cd1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go @@ -0,0 +1,76 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package uuid + +import ( + "crypto/rand" + "errors" + "fmt" + "strconv" +) + +// The UUID reserved variants. +const ( + reservedRFC4122 byte = 0x40 +) + +// A UUID representation compliant with specification in RFC4122 document. +type UUID [16]byte + +// New returns a new UUID using the RFC4122 algorithm. +func New() (UUID, error) { + u := UUID{} + // Set all bits to pseudo-random values. + // NOTE: this takes a process-wide lock + _, err := rand.Read(u[:]) + if err != nil { + return u, err + } + u[8] = (u[8] | reservedRFC4122) & 0x7F // u.setVariant(ReservedRFC4122) + + var version byte = 4 + u[6] = (u[6] & 0xF) | (version << 4) // u.setVersion(4) + return u, nil +} + +// String returns the UUID in "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" format. +func (u UUID) String() string { + return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} + +// Parse parses a string formatted as "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +// or "{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}" into a UUID. +func Parse(s string) (UUID, error) { + var uuid UUID + // ensure format + switch len(s) { + case 36: + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 38: + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + s = s[1:37] + default: + return uuid, errors.New("invalid UUID format") + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + // parse chunks + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + b, err := strconv.ParseUint(s[x:x+2], 16, 8) + if err != nil { + return uuid, fmt.Errorf("invalid UUID format: %s", err) + } + uuid[i] = byte(b) + } + return uuid, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/CHANGELOG.md new file mode 100644 index 000000000..b10ffcc6a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/CHANGELOG.md @@ -0,0 +1,168 @@ +# Release History + +## 1.1.0 (2024-02-13) + +### Other Changes +* Upgraded to API service version `7.5` +* Upgraded dependencies + +## 1.1.0-beta.2 (2023-11-08) + +### Features Added +* Added the `HSMPlatform` field to the `KeyAttributes` struct + +### Other Changes +* Upgraded service version to `7.5-preview.1` +* Updated to latest version of `azcore`. +* Fixed value of `otel.library.name` in traces. + +## 1.1.0-beta.1 (2023-10-11) + +### Features Added + +* Enabled spans for distributed tracing. + +## 1.0.1 (2023-08-23) + +### Other Changes +* Upgraded dependencies + +## 1.0.0 (2023-07-17) + +### Features Added +* first stable release of `azkeys` module + +## 0.12.0 (2023-06-08) + +### Breaking Changes + +* Renamed `GetRandomBytesRequest` to `GetRandomBytesParameters` +* `ListDeletedKey` to `ListDeletedKeyProperties` +* `ListKeys` to `ListKeyProperties` +* `DeletedKeyBundle` to `DeletedKey` +* `KeyBundle` to `KeyVaultKey` +* `RestoreKeyParameters.KeyBundleBackup` to `RestoreKeyParameters.KeyBackup` +* `DeletedKeyItem` to `DeletedKeyProperties` +* `KeyItem` to `KeyProperties` +* `DeletedKeyListResult` to `DeletedKeyPropertiesListResult` +* `KeyListResult` `KeyPropertiesListResult` +* `KeyOperationsParameters` to `KeyOperationParameters` +* Changed `JSONWebKey.KeyOperations` from type []*string to []*KeyOperation +* `ReleaseParameters.Enc` to `ReleaseParameters.Algorithm` +* `KeyOperationParameters.AAD` to `KeyOperationParameters.AdditionalAuthenticatedData` +* `KeyOperationParameters.Tag` to `KeyOperationParameters.AuthenticationTag` +* `JSONWebKeyOperation` to `KeyOperation` +* `JSONWebKeyCurveName` to `KeyCurveName` +* `JSONWebKeyEncryptionAlgorithm` to `EncryptionAlgorithm` +* `JSONWebKeySignatureAlgorithm` to `SignatureAlgorithm` +* `JSONWebKeyType` to `KeyType` +* `LifetimeActions` to `LifetimeAction` +* Removed `DeletionRecoveryLevel` type +* Removed `SignatureAlgorithmRSNULL` constant +* Removed `KeyOperationExport` constant +* Removed `MaxResults` option + +### Other Changes +* Updated dependencies + +## 0.11.0 (2023-04-13) + +### Breaking Changes +* Moved from `sdk/keyvault/azkeys` to `sdk/security/keyvault/azkeys` + +## 0.10.0 (2023-04-13) + +### Features Added +* Upgraded to api version 7.4 + +### Breaking Changes +* Renamed `ActionType` to `KeyRotationPolicyAction` + +## 0.9.0 (2022-11-08) + +### Breaking Changes +* `NewClient` returns an `error` + +## 0.8.1 (2022-09-20) + +### Features Added +* Added `ClientOptions.DisableChallengeResourceVerification`. + See https://aka.ms/azsdk/blog/vault-uri for more information. + +## 0.8.0 (2022-09-12) + +### Breaking Changes +* Verify the challenge resource matches the vault domain. + +## 0.7.0 (2022-08-09) + +### Breaking Changes +* Changed type of `NewClient` options parameter to `azkeys.ClientOptions`, which embeds + the former type, `azcore.ClientOptions` + +## 0.6.0 (2022-07-07) + +### Breaking Changes +* The `Client` API now corresponds more directly to the Key Vault REST API. + Most method signatures and types have changed. See the + [module documentation](https://aka.ms/azsdk/go/keyvault-keys/docs) + for updated code examples and more details. + +### Other Changes +* Upgrade to latest `azcore` + +## 0.5.1 (2022-05-12) + +### Other Changes +* Update to latest `azcore` and `internal` modules. + +## 0.5.0 (2022-04-06) + +### Features Added +* Added the Name property on `Key` + +### Breaking Changes +* Requires go 1.18 +* `ListPropertiesOfDeletedKeysPager` has `More() bool` and `NextPage(context.Context) (ListPropertiesOfDeletedKeysPage, error)` for paging over deleted keys. +* `ListPropertiesOfKeyVersionsPager` has `More() bool` and `NextPage(context.Context) (ListPropertiesOfKeyVersionsPage, error)` for paging over deleted keys. +* Removing `RawResponse *http.Response` from `crypto` response types + +## 0.4.0 (2022-03-08) + +### Features Added +* Adds the `ReleasePolicy` parameter to the `UpdateKeyPropertiesOptions` struct. +* Adds the `Immutable` boolean to the `KeyReleasePolicy` model. +* Added a `ToPtr` method on `KeyType` constant + +### Breaking Changes +* Requires go 1.18 +* Changed the `Data` to `EncodedPolicy` on the `KeyReleasePolicy` struct. +* Changed the `Updated`, `Created`, and `Expires` properties to `UpdatedOn`, `CreatedOn`, and `ExpiresOn`. +* Renamed `JSONWebKeyOperation` to `Operation`. +* Renamed `JSONWebKeyCurveName` to `CurveName` +* Prefixed all KeyType constants with `KeyType` +* Changed `KeyBundle` to `KeyVaultKey` and `DeletedKeyBundle` to `DeletedKey` +* Renamed `KeyAttributes` to `KeyProperties` +* Renamed `ListKeyVersions` to `ListPropertiesOfKeyVersions` +* Removed `Attributes` struct +* Changed `CreateOCTKey`/`Response`/`Options` to `CreateOctKey`/`Response`/`Options` +* Removed all `RawResponse *http.Response` fields from response structs. + +## 0.3.0 (2022-02-08) + +### Breaking Changes +* Changed the `Tags` properties from `map[string]*string` to `map[string]string` + +### Bugs Fixed +* Fixed a bug in `UpdateKeyProperties` where the `KeyOps` would be deleted if the `UpdateKeyProperties.KeyOps` value was left empty. + +## 0.2.0 (2022-01-12) + +### Bugs Fixed +* Fixes a bug in `crypto.NewClient` where the key version was required in the path, it is no longer required but is recommended. + +### Other Changes +* Updates `azcore` dependency from `v0.20.0` to `v0.21.0` + +## 0.1.0 (2021-11-09) +* This is the initial release of the `azkeys` library diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/LICENSE.txt new file mode 100644 index 000000000..d1ca00f20 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/LICENSE.txt @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/README.md new file mode 100644 index 000000000..709fe3005 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/README.md @@ -0,0 +1,147 @@ +# Azure Key Vault Keys client module for Go + +* Cryptographic key management (this module) - create, store, and control access to the keys used to encrypt your data +* Managed HSM administration ([azadmin](https://aka.ms/azsdk/go/keyvault-admin/docs)) - role-based access control (RBAC), settings, and vault-level backup and restore options +* Certificate management ([azcertificates](https://aka.ms/azsdk/go/keyvault-certificates/docs)) - create, manage, and deploy public and private SSL/TLS certificates +* Secrets management ([azsecrets](https://aka.ms/azsdk/go/keyvault-secrets/docs)) - securely store and control access to tokens, passwords, certificates, API keys, and other secrets + +[Source code][key_client_src] | [Package (pkg.go.dev)][goget_azkeys] | [Product documentation][keyvault_docs] | [Samples][keys_samples] + +## Getting started + +### Install packages + +Install `azkeys` and `azidentity` with `go get`: +```Bash +go get github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys +go get github.com/Azure/azure-sdk-for-go/sdk/azidentity +``` +[azidentity][azure_identity] is used for Azure Active Directory authentication as demonstrated below. + +### Prerequisites + +* An [Azure subscription][azure_sub] +* A supported Go version (the Azure SDK supports the two most recent Go releases) +* A key vault. If you need to create one, see the Key Vault documentation for instructions on doing so in the [Azure Portal][azure_keyvault_portal] or with the [Azure CLI][azure_keyvault_cli]. + +### Authentication + +This document demonstrates using [azidentity.NewDefaultAzureCredential][default_cred_ref] to authenticate. This credential type works in both local development and production environments. We recommend using a [managed identity][managed_identity] in production. + +[Client][client_docs] accepts any [azidentity][azure_identity] credential. See the [azidentity][azure_identity] documentation for more information about other credential types. + +#### Create a client + +Constructing the client requires your vault's URL, which you can get from the Azure CLI or the Azure Portal. + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys" +) + +func main() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + // TODO: handle error + } + + client, err := azkeys.NewClient("https://.vault.azure.net", cred, nil) + if err != nil { + // TODO: handle error + } +} +``` + +## Key concepts + +### Keys + +Azure Key Vault can create and store RSA and elliptic curve keys. Both can optionally be protected by hardware security modules (HSMs). Azure Key Vault can also perform cryptographic operations with them. For more information about keys and supported operations and algorithms, see the [Key Vault documentation](https://docs.microsoft.com/azure/key-vault/keys/about-keys). + +[Client][client_docs] can create keys in the vault, get existing keys from the vault, update key metadata, and delete keys, as shown in the examples below. + +## Examples + +Get started with our [examples][keys_samples]. + +## Troubleshooting + +### Error Handling + +All methods which send HTTP requests return `*azcore.ResponseError` when these requests fail. `ResponseError` has error details and the raw response from Key Vault. + +```go +import "github.com/Azure/azure-sdk-for-go/sdk/azcore" + +resp, err := client.GetKey(context.Background(), "keyName", nil) +if err != nil { + var httpErr *azcore.ResponseError + if errors.As(err, &httpErr) { + // TODO: investigate httpErr + } else { + // TODO: not an HTTP error + } +} +``` + +### Logging + +This module uses the logging implementation in `azcore`. To turn on logging for all Azure SDK modules, set `AZURE_SDK_GO_LOGGING` to `all`. By default the logger writes to stderr. Use the `azcore/log` package to control log output. For example, logging only HTTP request and response events, and printing them to stdout: + +```go +import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + +// Print log events to stdout +azlog.SetListener(func(cls azlog.Event, msg string) { + fmt.Println(msg) +}) + +// Includes only requests and responses in logs +azlog.SetEvents(azlog.EventRequest, azlog.EventResponse) +``` + +### Accessing `http.Response` + +You can access the raw `*http.Response` returned by Key Vault using the `runtime.WithCaptureResponse` method and a context passed to any client method. + +```go +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +var response *http.Response +ctx := runtime.WithCaptureResponse(context.TODO(), &response) +_, err = client.GetKey(ctx, "keyName", nil) +if err != nil { + // TODO: handle error +} +// TODO: do something with response +``` + +### Additional Documentation + +For more extensive documentation on Azure Key Vault, see the [API reference documentation][reference_docs]. + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. + +This project has adopted the [Microsoft Open Source Code of Conduct][code_of_conduct]. For more information, see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact opencode@microsoft.com with any additional questions or comments. + + +[azure_identity]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity +[azure_keyvault_cli]: https://docs.microsoft.com/azure/key-vault/general/quick-create-cli +[azure_keyvault_portal]: https://docs.microsoft.com/azure/key-vault/general/quick-create-portal +[azure_sub]: https://azure.microsoft.com/free/ +[default_cred_ref]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#NewDefaultAzureCredential +[code_of_conduct]: https://opensource.microsoft.com/codeofconduct/ +[keyvault_docs]: https://docs.microsoft.com/azure/key-vault/ +[goget_azkeys]: https://aka.ms/azsdk/go/keyvault-keys/docs +[reference_docs]: https://aka.ms/azsdk/go/keyvault-keys/docs +[client_docs]: https://aka.ms/azsdk/go/keyvault-keys/docs#Client +[key_client_src]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/security/keyvault/azkeys/client.go +[keys_samples]: https://aka.ms/azsdk/go/keyvault-keys/docs#pkg-examples +[managed_identity]: https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fsecurity%2Fkeyvault%2Fazkeys%2FREADME.png) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/TROUBLESHOOTING.md new file mode 100644 index 000000000..857aa4c0b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/TROUBLESHOOTING.md @@ -0,0 +1,4 @@ +# Troubleshoot Azure Key Vault Keys Client Module Issues + +See our [Azure Key Vault SDK Troubleshooting Guide](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/security/keyvault/TROUBLESHOOTING.md) +to troubleshoot issues common to Azure Key Vault client modules. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/assets.json new file mode 100644 index 000000000..00d485deb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/assets.json @@ -0,0 +1,6 @@ +{ + "AssetsRepo": "Azure/azure-sdk-assets", + "AssetsRepoPrefixPath": "go", + "TagPrefix": "go/security/keyvault/azkeys", + "Tag": "go/security/keyvault/azkeys_2d421aec6c" +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/autorest.md new file mode 100644 index 000000000..b067fd801 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/autorest.md @@ -0,0 +1,252 @@ +## Go + +```yaml +clear-output-folder: false +export-clients: true +go: true +input-file: https://github.com/Azure/azure-rest-api-specs/blob/7452e1cc7db72fbc6cd9539b390d8b8e5c2a1864/specification/keyvault/data-plane/Microsoft.KeyVault/stable/7.5/keys.json +license-header: MICROSOFT_MIT_NO_VERSION +module: github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys +openapi-type: "data-plane" +output-folder: ../azkeys +override-client-name: Client +security: "AADToken" +security-scopes: "https://vault.azure.net/.default" +use: "@autorest/go@4.0.0-preview.59" +inject-spans: true +version: "^3.0.0" + +directive: + # delete unused models + - remove-model: KeyExportParameters + - remove-model: KeyProperties + + # make vault URL a parameter of the client constructor + - from: swagger-document + where: $["x-ms-parameterized-host"] + transform: $.parameters[0]["x-ms-parameter-location"] = "client" + + # rename parameter models to match their methods + - rename-model: + from: KeyCreateParameters + to: CreateKeyParameters + - rename-model: + from: KeyExportParameters + to: ExportKeyParameters + - rename-model: + from: KeyImportParameters + to: ImportKeyParameters + - rename-model: + from: KeyReleaseParameters + to: ReleaseParameters + - rename-model: + from: KeyRestoreParameters + to: RestoreKeyParameters + - rename-model: + from: KeySignParameters + to: SignParameters + - rename-model: + from: KeyUpdateParameters + to: UpdateKeyParameters + - rename-model: + from: KeyVerifyParameters + to: VerifyParameters + - rename-model: + from: GetRandomBytesRequest + to: GetRandomBytesParameters + + # rename paged operations from Get* to List* + - rename-operation: + from: GetDeletedKeys + to: ListDeletedKeyProperties + - rename-operation: + from: GetKeys + to: ListKeyProperties + - rename-operation: + from: GetKeyVersions + to: ListKeyPropertiesVersions + + # rename KeyItem and KeyBundle + - rename-model: + from: DeletedKeyBundle + to: DeletedKey + - rename-model: + from: KeyItem + to: KeyProperties + - rename-model: + from: DeletedKeyItem + to: DeletedKeyProperties + - rename-model: + from: DeletedKeyListResult + to: DeletedKeyPropertiesListResult + - rename-model: + from: KeyListResult + to: KeyPropertiesListResult + - from: swagger-document + where: $.definitions.RestoreKeyParameters.properties.value + transform: $["x-ms-client-name"] = "KeyBackup" + + # Change LifetimeActions to LifetimeAction + - rename-model: + from: LifetimeActions + to: LifetimeAction + - rename-model: + from: LifetimeActionsType + to: LifetimeActionType + - rename-model: + from: LifetimeActionsTrigger + to: LifetimeActionTrigger + + # Rename HsmPlatform to HSMPlatform for consistency + - where-model: KeyAttributes + rename-property: + from: hsmPlatform + to: HSMPlatform + + # Remove MaxResults parameter + - where: "$.paths..*" + remove-parameter: + in: query + name: maxresults + + # KeyOps updates + - rename-model: + from: KeyOperationsParameters + to: KeyOperationParameters + - from: models.go + where: $ + transform: return $.replace(/KeyOps \[\]\*string/, "KeyOps []*KeyOperation"); + + # fix capitalization + - from: swagger-document + where: $.definitions.ImportKeyParameters.properties.Hsm + transform: $["x-ms-client-name"] = "HSM" + - from: swagger-document + where: $.definitions..properties..iv + transform: $["x-ms-client-name"] = "IV" + - from: swagger-document + where: $.definitions..properties..kid + transform: $["x-ms-client-name"] = "KID" + + # keyName, keyVersion -> name, version + - from: swagger-document + where: $.paths..parameters..[?(@.name=='key-name')] + transform: $["x-ms-client-name"] = "name" + - from: swagger-document + where: $.paths..parameters..[?(@.name=='key-version')] + transform: $["x-ms-client-name"] = "version" + + # KeyEncryptionAlgorithm renames + - from: swagger-document + where: $.definitions.ReleaseParameters.properties.enc + transform: $["x-ms-client-name"] = "algorithm" + + # rename KeyOperationsParameters fields + - from: swagger-document + where: $.definitions.KeyOperationParameters.properties.aad + transform: $["x-ms-client-name"] = "AdditionalAuthenticatedData" + - from: swagger-document + where: $.definitions.KeyOperationParameters.properties.tag + transform: $["x-ms-client-name"] = "AuthenticationTag" + + # remove JSONWeb Prefix + - from: + - models.go + - constants.go + where: $ + transform: return $.replace(/JSONWebKeyOperation/g, "KeyOperation"); + - from: + - models.go + - constants.go + where: $ + transform: return $.replace(/JSONWebKeyCurveName/g, "CurveName"); + - from: + - models.go + - constants.go + where: $ + transform: return $.replace(/JSONWebKeyEncryptionAlgorithm/g, "EncryptionAlgorithm"); + - from: + - models.go + - constants.go + where: $ + transform: return $.replace(/JSONWebKeySignatureAlgorithm/g, "SignatureAlgorithm"); + - from: + - models.go + - constants.go + where: $ + transform: return $.replace(/JSONWebKeyType/g, "KeyType"); + + # remove DeletionRecoveryLevel type + - from: models.go + where: $ + transform: return $.replace(/RecoveryLevel \*DeletionRecoveryLevel/g, "RecoveryLevel *string"); + - from: constants.go + where: $ + transform: return $.replace(/(?:\/\/.*\s)+type DeletionRecoveryLevel string/, ""); + - from: constants.go + where: $ + transform: return $.replace(/(?:\/\/.*\s)+func PossibleDeletionRecovery(?:.+\s)+\}/, ""); + - from: constants.go + where: $ + transform: return $.replace(/const \(\n\s\/\/ DeletionRecoveryLevel(?:.+\s)+\)/, ""); + + # delete SignatureAlgorithmRSNULL + - from: constants.go + where: $ + transform: return $.replace(/.*(\bSignatureAlgorithmRSNULL\b).*/g, ""); + + # delete KeyOperationExport + - from: constants.go + where: $ + transform: return $.replace(/.*(\bKeyOperationExport\b).*/g, ""); + + # delete unused error models + - from: models.go + where: $ + transform: return $.replace(/(?:\/\/.*\s)+type (?:Error|KeyVaultError).+\{(?:\s.+\s)+\}\s/g, ""); + - from: models_serde.go + where: $ + transform: return $.replace(/(?:\/\/.*\s)+func \(\w \*?(?:Error|KeyVaultError)\).*\{\s(?:.+\s)+\}\s/g, ""); + + # delete the Attributes model defined in common.json (it's used only with allOf) + - from: models.go + where: $ + transform: return $.replace(/(?:\/\/.*\s)+type Attributes.+\{(?:\s.+\s)+\}\s/, ""); + - from: models_serde.go + where: $ + transform: return $.replace(/(?:\/\/.*\s)+func \(a \*?Attributes\).*\{\s(?:.+\s)+\}\s/g, ""); + + # delete the version path param check (version == "" is legal for Key Vault but indescribable by OpenAPI) + - from: client.go + where: $ + transform: return $.replace(/\sif version == "" \{\s+.+version cannot be empty"\)\s+\}\s/g, ""); + + # delete client name prefix from method options and response types + - from: + - client.go + - models.go + - options.go + - response_types.go + - options.go + where: $ + transform: return $.replace(/Client(\w+)((?:Options|Response))/g, "$1$2"); + + # insert a handwritten type for "KID" fields so we can add parsing methods + - from: models.go + where: $ + transform: return $.replace(/(KID \*)string(\s+.*)/g, "$1ID$2") + + # edit doc comments to not contain DeletedKeyBundle + - from: + - models.go + - response_types.go + where: $ + transform: return $.replace(/DeletedKeyBundle/g, "DeletedKey") + + # remove redundant section of doc comment + - from: + - models.go + - constants.go + where: $ + transform: return $.replace(/(For valid values, (.*)\.)|(For more information .*\.)|(For more information on possible algorithm\n\/\/ types, see JsonWebKeySignatureAlgorithm.)/g, ""); +``` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/build.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/build.go new file mode 100644 index 000000000..da8103e45 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/build.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +//go:generate autorest ./autorest.md +//go:generate gofmt -w . + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azkeys diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/ci.yml new file mode 100644 index 000000000..86d11e976 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/ci.yml @@ -0,0 +1,40 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/security/keyvault/azkeys + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/security/keyvault/azkeys + +stages: +- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + TimeoutInMinutes: 120 + ServiceDirectory: 'security/keyvault/azkeys' + RunLiveTests: true + UsePipelineProxy: false + AdditionalMatrixConfigs: + - Name: keyvault_test_matrix_addons + Path: sdk/security/keyvault/azkeys/platform-matrix.json + Selection: sparse + GenerateVMJobs: true + + # Due to the high cost of Managed HSMs, we only want to test using them weekly. + ${{ if not(contains(variables['Build.DefinitionName'], 'tests-weekly')) }}: + MatrixFilters: + - ArmTemplateParameters=^(?!.*enableHsm.*true) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/client.go new file mode 100644 index 000000000..bb4afa61a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/client.go @@ -0,0 +1,1418 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azkeys + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// Client contains the methods for the Client group. +// Don't use this type directly, use a constructor function instead. +type Client struct { + internal *azcore.Client + endpoint string +} + +// BackupKey - The Key Backup operation exports a key from Azure Key Vault in a protected form. Note that this operation does +// NOT return key material in a form that can be used outside the Azure Key Vault system, +// the returned key material is either protected to a Azure Key Vault HSM or to Azure Key Vault itself. The intent of this +// operation is to allow a client to GENERATE a key in one Azure Key Vault +// instance, BACKUP the key, and then RESTORE it into another Azure Key Vault instance. The BACKUP operation may be used to +// export, in protected form, any key type from Azure Key Vault. Individual +// versions of a key cannot be backed up. BACKUP / RESTORE can be performed within geographical boundaries only; meaning that +// a BACKUP from one geographical area cannot be restored to another +// geographical area. For example, a backup from the US geographical area cannot be restored in an EU geographical area. This +// operation requires the key/backup permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.5 +// - name - The name of the key. +// - options - BackupKeyOptions contains the optional parameters for the Client.BackupKey method. +func (client *Client) BackupKey(ctx context.Context, name string, options *BackupKeyOptions) (BackupKeyResponse, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, "Client.BackupKey", client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.backupKeyCreateRequest(ctx, name, options) + if err != nil { + return BackupKeyResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BackupKeyResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BackupKeyResponse{}, err + } + resp, err := client.backupKeyHandleResponse(httpResp) + return resp, err +} + +// backupKeyCreateRequest creates the BackupKey request. +func (client *Client) backupKeyCreateRequest(ctx context.Context, name string, options *BackupKeyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/backup" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// backupKeyHandleResponse handles the BackupKey response. +func (client *Client) backupKeyHandleResponse(resp *http.Response) (BackupKeyResponse, error) { + result := BackupKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.BackupKeyResult); err != nil { + return BackupKeyResponse{}, err + } + return result, nil +} + +// CreateKey - The create key operation can be used to create any key type in Azure Key Vault. If the named key already exists, +// Azure Key Vault creates a new version of the key. It requires the keys/create +// permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.5 +// - name - The name for the new key. The system will generate the version name for the new key. The value you provide may be +// copied globally for the purpose of running the service. The value provided should not +// include personally identifiable or sensitive information. +// - parameters - The parameters to create a key. +// - options - CreateKeyOptions contains the optional parameters for the Client.CreateKey method. +func (client *Client) CreateKey(ctx context.Context, name string, parameters CreateKeyParameters, options *CreateKeyOptions) (CreateKeyResponse, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, "Client.CreateKey", client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.createKeyCreateRequest(ctx, name, parameters, options) + if err != nil { + return CreateKeyResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return CreateKeyResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return CreateKeyResponse{}, err + } + resp, err := client.createKeyHandleResponse(httpResp) + return resp, err +} + +// createKeyCreateRequest creates the CreateKey request. +func (client *Client) createKeyCreateRequest(ctx context.Context, name string, parameters CreateKeyParameters, options *CreateKeyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/create" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + +// createKeyHandleResponse handles the CreateKey response. +func (client *Client) createKeyHandleResponse(resp *http.Response) (CreateKeyResponse, error) { + result := CreateKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil { + return CreateKeyResponse{}, err + } + return result, nil +} + +// Decrypt - The DECRYPT operation decrypts a well-formed block of ciphertext using the target encryption key and specified +// algorithm. This operation is the reverse of the ENCRYPT operation; only a single block of +// data may be decrypted, the size of this block is dependent on the target key and the algorithm to be used. The DECRYPT +// operation applies to asymmetric and symmetric keys stored in Azure Key Vault +// since it uses the private portion of the key. This operation requires the keys/decrypt permission. Microsoft recommends +// not to use CBC algorithms for decryption without first ensuring the integrity of +// the ciphertext using an HMAC, for example. See https://docs.microsoft.com/dotnet/standard/security/vulnerabilities-cbc-mode +// for more information. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.5 +// - name - The name of the key. +// - version - The version of the key. +// - parameters - The parameters for the decryption operation. +// - options - DecryptOptions contains the optional parameters for the Client.Decrypt method. +func (client *Client) Decrypt(ctx context.Context, name string, version string, parameters KeyOperationParameters, options *DecryptOptions) (DecryptResponse, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, "Client.Decrypt", client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.decryptCreateRequest(ctx, name, version, parameters, options) + if err != nil { + return DecryptResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return DecryptResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return DecryptResponse{}, err + } + resp, err := client.decryptHandleResponse(httpResp) + return resp, err +} + +// decryptCreateRequest creates the Decrypt request. +func (client *Client) decryptCreateRequest(ctx context.Context, name string, version string, parameters KeyOperationParameters, options *DecryptOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/{key-version}/decrypt" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{key-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + +// decryptHandleResponse handles the Decrypt response. +func (client *Client) decryptHandleResponse(resp *http.Response) (DecryptResponse, error) { + result := DecryptResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyOperationResult); err != nil { + return DecryptResponse{}, err + } + return result, nil +} + +// DeleteKey - The delete key operation cannot be used to remove individual versions of a key. This operation removes the +// cryptographic material associated with the key, which means the key is not usable for +// Sign/Verify, Wrap/Unwrap or Encrypt/Decrypt operations. This operation requires the keys/delete permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.5 +// - name - The name of the key to delete. +// - options - DeleteKeyOptions contains the optional parameters for the Client.DeleteKey method. +func (client *Client) DeleteKey(ctx context.Context, name string, options *DeleteKeyOptions) (DeleteKeyResponse, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, "Client.DeleteKey", client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.deleteKeyCreateRequest(ctx, name, options) + if err != nil { + return DeleteKeyResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return DeleteKeyResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return DeleteKeyResponse{}, err + } + resp, err := client.deleteKeyHandleResponse(httpResp) + return resp, err +} + +// deleteKeyCreateRequest creates the DeleteKey request. +func (client *Client) deleteKeyCreateRequest(ctx context.Context, name string, options *DeleteKeyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// deleteKeyHandleResponse handles the DeleteKey response. +func (client *Client) deleteKeyHandleResponse(resp *http.Response) (DeleteKeyResponse, error) { + result := DeleteKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DeletedKey); err != nil { + return DeleteKeyResponse{}, err + } + return result, nil +} + +// Encrypt - The ENCRYPT operation encrypts an arbitrary sequence of bytes using an encryption key that is stored in Azure +// Key Vault. Note that the ENCRYPT operation only supports a single block of data, the size +// of which is dependent on the target key and the encryption algorithm to be used. The ENCRYPT operation is only strictly +// necessary for symmetric keys stored in Azure Key Vault since protection with an +// asymmetric key can be performed using public portion of the key. This operation is supported for asymmetric keys as a convenience +// for callers that have a key-reference but do not have access to the +// public key material. This operation requires the keys/encrypt permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.5 +// - name - The name of the key. +// - version - The version of the key. +// - parameters - The parameters for the encryption operation. +// - options - EncryptOptions contains the optional parameters for the Client.Encrypt method. +func (client *Client) Encrypt(ctx context.Context, name string, version string, parameters KeyOperationParameters, options *EncryptOptions) (EncryptResponse, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, "Client.Encrypt", client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.encryptCreateRequest(ctx, name, version, parameters, options) + if err != nil { + return EncryptResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return EncryptResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return EncryptResponse{}, err + } + resp, err := client.encryptHandleResponse(httpResp) + return resp, err +} + +// encryptCreateRequest creates the Encrypt request. +func (client *Client) encryptCreateRequest(ctx context.Context, name string, version string, parameters KeyOperationParameters, options *EncryptOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/{key-version}/encrypt" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{key-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + +// encryptHandleResponse handles the Encrypt response. +func (client *Client) encryptHandleResponse(resp *http.Response) (EncryptResponse, error) { + result := EncryptResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyOperationResult); err != nil { + return EncryptResponse{}, err + } + return result, nil +} + +// GetDeletedKey - The Get Deleted Key operation is applicable for soft-delete enabled vaults. While the operation can be +// invoked on any vault, it will return an error if invoked on a non soft-delete enabled vault. This +// operation requires the keys/get permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.5 +// - name - The name of the key. +// - options - GetDeletedKeyOptions contains the optional parameters for the Client.GetDeletedKey method. +func (client *Client) GetDeletedKey(ctx context.Context, name string, options *GetDeletedKeyOptions) (GetDeletedKeyResponse, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, "Client.GetDeletedKey", client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.getDeletedKeyCreateRequest(ctx, name, options) + if err != nil { + return GetDeletedKeyResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return GetDeletedKeyResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return GetDeletedKeyResponse{}, err + } + resp, err := client.getDeletedKeyHandleResponse(httpResp) + return resp, err +} + +// getDeletedKeyCreateRequest creates the GetDeletedKey request. +func (client *Client) getDeletedKeyCreateRequest(ctx context.Context, name string, options *GetDeletedKeyOptions) (*policy.Request, error) { + urlPath := "/deletedkeys/{key-name}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getDeletedKeyHandleResponse handles the GetDeletedKey response. +func (client *Client) getDeletedKeyHandleResponse(resp *http.Response) (GetDeletedKeyResponse, error) { + result := GetDeletedKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DeletedKey); err != nil { + return GetDeletedKeyResponse{}, err + } + return result, nil +} + +// GetKey - The get key operation is applicable to all key types. If the requested key is symmetric, then no key material +// is released in the response. This operation requires the keys/get permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.5 +// - name - The name of the key to get. +// - version - Adding the version parameter retrieves a specific version of a key. This URI fragment is optional. If not specified, +// the latest version of the key is returned. +// - options - GetKeyOptions contains the optional parameters for the Client.GetKey method. +func (client *Client) GetKey(ctx context.Context, name string, version string, options *GetKeyOptions) (GetKeyResponse, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, "Client.GetKey", client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.getKeyCreateRequest(ctx, name, version, options) + if err != nil { + return GetKeyResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return GetKeyResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return GetKeyResponse{}, err + } + resp, err := client.getKeyHandleResponse(httpResp) + return resp, err +} + +// getKeyCreateRequest creates the GetKey request. +func (client *Client) getKeyCreateRequest(ctx context.Context, name string, version string, options *GetKeyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/{key-version}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{key-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getKeyHandleResponse handles the GetKey response. +func (client *Client) getKeyHandleResponse(resp *http.Response) (GetKeyResponse, error) { + result := GetKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil { + return GetKeyResponse{}, err + } + return result, nil +} + +// GetKeyRotationPolicy - The GetKeyRotationPolicy operation returns the specified key policy resources in the specified key +// vault. This operation requires the keys/get permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.5 +// - name - The name of the key in a given key vault. +// - options - GetKeyRotationPolicyOptions contains the optional parameters for the Client.GetKeyRotationPolicy method. +func (client *Client) GetKeyRotationPolicy(ctx context.Context, name string, options *GetKeyRotationPolicyOptions) (GetKeyRotationPolicyResponse, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, "Client.GetKeyRotationPolicy", client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.getKeyRotationPolicyCreateRequest(ctx, name, options) + if err != nil { + return GetKeyRotationPolicyResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return GetKeyRotationPolicyResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return GetKeyRotationPolicyResponse{}, err + } + resp, err := client.getKeyRotationPolicyHandleResponse(httpResp) + return resp, err +} + +// getKeyRotationPolicyCreateRequest creates the GetKeyRotationPolicy request. +func (client *Client) getKeyRotationPolicyCreateRequest(ctx context.Context, name string, options *GetKeyRotationPolicyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/rotationpolicy" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getKeyRotationPolicyHandleResponse handles the GetKeyRotationPolicy response. +func (client *Client) getKeyRotationPolicyHandleResponse(resp *http.Response) (GetKeyRotationPolicyResponse, error) { + result := GetKeyRotationPolicyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyRotationPolicy); err != nil { + return GetKeyRotationPolicyResponse{}, err + } + return result, nil +} + +// GetRandomBytes - Get the requested number of bytes containing random values from a managed HSM. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.5 +// - parameters - The request object to get random bytes. +// - options - GetRandomBytesOptions contains the optional parameters for the Client.GetRandomBytes method. +func (client *Client) GetRandomBytes(ctx context.Context, parameters GetRandomBytesParameters, options *GetRandomBytesOptions) (GetRandomBytesResponse, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, "Client.GetRandomBytes", client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.getRandomBytesCreateRequest(ctx, parameters, options) + if err != nil { + return GetRandomBytesResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return GetRandomBytesResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return GetRandomBytesResponse{}, err + } + resp, err := client.getRandomBytesHandleResponse(httpResp) + return resp, err +} + +// getRandomBytesCreateRequest creates the GetRandomBytes request. +func (client *Client) getRandomBytesCreateRequest(ctx context.Context, parameters GetRandomBytesParameters, options *GetRandomBytesOptions) (*policy.Request, error) { + urlPath := "/rng" + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + +// getRandomBytesHandleResponse handles the GetRandomBytes response. +func (client *Client) getRandomBytesHandleResponse(resp *http.Response) (GetRandomBytesResponse, error) { + result := GetRandomBytesResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.RandomBytes); err != nil { + return GetRandomBytesResponse{}, err + } + return result, nil +} + +// ImportKey - The import key operation may be used to import any key type into an Azure Key Vault. If the named key already +// exists, Azure Key Vault creates a new version of the key. This operation requires the +// keys/import permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.5 +// - name - Name for the imported key. The value you provide may be copied globally for the purpose of running the service. +// The value provided should not include personally identifiable or sensitive information. +// - parameters - The parameters to import a key. +// - options - ImportKeyOptions contains the optional parameters for the Client.ImportKey method. +func (client *Client) ImportKey(ctx context.Context, name string, parameters ImportKeyParameters, options *ImportKeyOptions) (ImportKeyResponse, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, "Client.ImportKey", client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.importKeyCreateRequest(ctx, name, parameters, options) + if err != nil { + return ImportKeyResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ImportKeyResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ImportKeyResponse{}, err + } + resp, err := client.importKeyHandleResponse(httpResp) + return resp, err +} + +// importKeyCreateRequest creates the ImportKey request. +func (client *Client) importKeyCreateRequest(ctx context.Context, name string, parameters ImportKeyParameters, options *ImportKeyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + +// importKeyHandleResponse handles the ImportKey response. +func (client *Client) importKeyHandleResponse(resp *http.Response) (ImportKeyResponse, error) { + result := ImportKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil { + return ImportKeyResponse{}, err + } + return result, nil +} + +// NewListDeletedKeyPropertiesPager - Retrieves a list of the keys in the Key Vault as JSON Web Key structures that contain +// the public part of a deleted key. This operation includes deletion-specific information. The Get Deleted Keys +// operation is applicable for vaults enabled for soft-delete. While the operation can be invoked on any vault, it will return +// an error if invoked on a non soft-delete enabled vault. This operation +// requires the keys/list permission. +// +// Generated from API version 7.5 +// - options - ListDeletedKeyPropertiesOptions contains the optional parameters for the Client.NewListDeletedKeyPropertiesPager +// method. +func (client *Client) NewListDeletedKeyPropertiesPager(options *ListDeletedKeyPropertiesOptions) *runtime.Pager[ListDeletedKeyPropertiesResponse] { + return runtime.NewPager(runtime.PagingHandler[ListDeletedKeyPropertiesResponse]{ + More: func(page ListDeletedKeyPropertiesResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *ListDeletedKeyPropertiesResponse) (ListDeletedKeyPropertiesResponse, error) { + nextLink := "" + if page != nil { + nextLink = *page.NextLink + } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listDeletedKeyPropertiesCreateRequest(ctx, options) + }, nil) + if err != nil { + return ListDeletedKeyPropertiesResponse{}, err + } + return client.listDeletedKeyPropertiesHandleResponse(resp) + }, + Tracer: client.internal.Tracer(), + }) +} + +// listDeletedKeyPropertiesCreateRequest creates the ListDeletedKeyProperties request. +func (client *Client) listDeletedKeyPropertiesCreateRequest(ctx context.Context, options *ListDeletedKeyPropertiesOptions) (*policy.Request, error) { + urlPath := "/deletedkeys" + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listDeletedKeyPropertiesHandleResponse handles the ListDeletedKeyProperties response. +func (client *Client) listDeletedKeyPropertiesHandleResponse(resp *http.Response) (ListDeletedKeyPropertiesResponse, error) { + result := ListDeletedKeyPropertiesResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DeletedKeyPropertiesListResult); err != nil { + return ListDeletedKeyPropertiesResponse{}, err + } + return result, nil +} + +// NewListKeyPropertiesPager - Retrieves a list of the keys in the Key Vault as JSON Web Key structures that contain the public +// part of a stored key. The LIST operation is applicable to all key types, however only the base key +// identifier, attributes, and tags are provided in the response. Individual versions of a key are not listed in the response. +// This operation requires the keys/list permission. +// +// Generated from API version 7.5 +// - options - ListKeyPropertiesOptions contains the optional parameters for the Client.NewListKeyPropertiesPager method. +func (client *Client) NewListKeyPropertiesPager(options *ListKeyPropertiesOptions) *runtime.Pager[ListKeyPropertiesResponse] { + return runtime.NewPager(runtime.PagingHandler[ListKeyPropertiesResponse]{ + More: func(page ListKeyPropertiesResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *ListKeyPropertiesResponse) (ListKeyPropertiesResponse, error) { + nextLink := "" + if page != nil { + nextLink = *page.NextLink + } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listKeyPropertiesCreateRequest(ctx, options) + }, nil) + if err != nil { + return ListKeyPropertiesResponse{}, err + } + return client.listKeyPropertiesHandleResponse(resp) + }, + Tracer: client.internal.Tracer(), + }) +} + +// listKeyPropertiesCreateRequest creates the ListKeyProperties request. +func (client *Client) listKeyPropertiesCreateRequest(ctx context.Context, options *ListKeyPropertiesOptions) (*policy.Request, error) { + urlPath := "/keys" + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listKeyPropertiesHandleResponse handles the ListKeyProperties response. +func (client *Client) listKeyPropertiesHandleResponse(resp *http.Response) (ListKeyPropertiesResponse, error) { + result := ListKeyPropertiesResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyPropertiesListResult); err != nil { + return ListKeyPropertiesResponse{}, err + } + return result, nil +} + +// NewListKeyPropertiesVersionsPager - The full key identifier, attributes, and tags are provided in the response. This operation +// requires the keys/list permission. +// +// Generated from API version 7.5 +// - name - The name of the key. +// - options - ListKeyPropertiesVersionsOptions contains the optional parameters for the Client.NewListKeyPropertiesVersionsPager +// method. +func (client *Client) NewListKeyPropertiesVersionsPager(name string, options *ListKeyPropertiesVersionsOptions) *runtime.Pager[ListKeyPropertiesVersionsResponse] { + return runtime.NewPager(runtime.PagingHandler[ListKeyPropertiesVersionsResponse]{ + More: func(page ListKeyPropertiesVersionsResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *ListKeyPropertiesVersionsResponse) (ListKeyPropertiesVersionsResponse, error) { + nextLink := "" + if page != nil { + nextLink = *page.NextLink + } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listKeyPropertiesVersionsCreateRequest(ctx, name, options) + }, nil) + if err != nil { + return ListKeyPropertiesVersionsResponse{}, err + } + return client.listKeyPropertiesVersionsHandleResponse(resp) + }, + Tracer: client.internal.Tracer(), + }) +} + +// listKeyPropertiesVersionsCreateRequest creates the ListKeyPropertiesVersions request. +func (client *Client) listKeyPropertiesVersionsCreateRequest(ctx context.Context, name string, options *ListKeyPropertiesVersionsOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/versions" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listKeyPropertiesVersionsHandleResponse handles the ListKeyPropertiesVersions response. +func (client *Client) listKeyPropertiesVersionsHandleResponse(resp *http.Response) (ListKeyPropertiesVersionsResponse, error) { + result := ListKeyPropertiesVersionsResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyPropertiesListResult); err != nil { + return ListKeyPropertiesVersionsResponse{}, err + } + return result, nil +} + +// PurgeDeletedKey - The Purge Deleted Key operation is applicable for soft-delete enabled vaults. While the operation can +// be invoked on any vault, it will return an error if invoked on a non soft-delete enabled vault. +// This operation requires the keys/purge permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.5 +// - name - The name of the key +// - options - PurgeDeletedKeyOptions contains the optional parameters for the Client.PurgeDeletedKey method. +func (client *Client) PurgeDeletedKey(ctx context.Context, name string, options *PurgeDeletedKeyOptions) (PurgeDeletedKeyResponse, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, "Client.PurgeDeletedKey", client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.purgeDeletedKeyCreateRequest(ctx, name, options) + if err != nil { + return PurgeDeletedKeyResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PurgeDeletedKeyResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusNoContent) { + err = runtime.NewResponseError(httpResp) + return PurgeDeletedKeyResponse{}, err + } + return PurgeDeletedKeyResponse{}, nil +} + +// purgeDeletedKeyCreateRequest creates the PurgeDeletedKey request. +func (client *Client) purgeDeletedKeyCreateRequest(ctx context.Context, name string, options *PurgeDeletedKeyOptions) (*policy.Request, error) { + urlPath := "/deletedkeys/{key-name}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// RecoverDeletedKey - The Recover Deleted Key operation is applicable for deleted keys in soft-delete enabled vaults. It +// recovers the deleted key back to its latest version under /keys. An attempt to recover an non-deleted +// key will return an error. Consider this the inverse of the delete operation on soft-delete enabled vaults. This operation +// requires the keys/recover permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.5 +// - name - The name of the deleted key. +// - options - RecoverDeletedKeyOptions contains the optional parameters for the Client.RecoverDeletedKey method. +func (client *Client) RecoverDeletedKey(ctx context.Context, name string, options *RecoverDeletedKeyOptions) (RecoverDeletedKeyResponse, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, "Client.RecoverDeletedKey", client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.recoverDeletedKeyCreateRequest(ctx, name, options) + if err != nil { + return RecoverDeletedKeyResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RecoverDeletedKeyResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return RecoverDeletedKeyResponse{}, err + } + resp, err := client.recoverDeletedKeyHandleResponse(httpResp) + return resp, err +} + +// recoverDeletedKeyCreateRequest creates the RecoverDeletedKey request. +func (client *Client) recoverDeletedKeyCreateRequest(ctx context.Context, name string, options *RecoverDeletedKeyOptions) (*policy.Request, error) { + urlPath := "/deletedkeys/{key-name}/recover" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// recoverDeletedKeyHandleResponse handles the RecoverDeletedKey response. +func (client *Client) recoverDeletedKeyHandleResponse(resp *http.Response) (RecoverDeletedKeyResponse, error) { + result := RecoverDeletedKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil { + return RecoverDeletedKeyResponse{}, err + } + return result, nil +} + +// Release - The release key operation is applicable to all key types. The target key must be marked exportable. This operation +// requires the keys/release permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.5 +// - name - The name of the key to get. +// - version - Adding the version parameter retrieves a specific version of a key. +// - parameters - The parameters for the key release operation. +// - options - ReleaseOptions contains the optional parameters for the Client.Release method. +func (client *Client) Release(ctx context.Context, name string, version string, parameters ReleaseParameters, options *ReleaseOptions) (ReleaseResponse, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, "Client.Release", client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.releaseCreateRequest(ctx, name, version, parameters, options) + if err != nil { + return ReleaseResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ReleaseResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ReleaseResponse{}, err + } + resp, err := client.releaseHandleResponse(httpResp) + return resp, err +} + +// releaseCreateRequest creates the Release request. +func (client *Client) releaseCreateRequest(ctx context.Context, name string, version string, parameters ReleaseParameters, options *ReleaseOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/{key-version}/release" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{key-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + +// releaseHandleResponse handles the Release response. +func (client *Client) releaseHandleResponse(resp *http.Response) (ReleaseResponse, error) { + result := ReleaseResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyReleaseResult); err != nil { + return ReleaseResponse{}, err + } + return result, nil +} + +// RestoreKey - Imports a previously backed up key into Azure Key Vault, restoring the key, its key identifier, attributes +// and access control policies. The RESTORE operation may be used to import a previously backed +// up key. Individual versions of a key cannot be restored. The key is restored in its entirety with the same key name as +// it had when it was backed up. If the key name is not available in the target Key +// Vault, the RESTORE operation will be rejected. While the key name is retained during restore, the final key identifier +// will change if the key is restored to a different vault. Restore will restore all +// versions and preserve version identifiers. The RESTORE operation is subject to security constraints: The target Key Vault +// must be owned by the same Microsoft Azure Subscription as the source Key Vault +// The user must have RESTORE permission in the target Key Vault. This operation requires the keys/restore permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.5 +// - parameters - The parameters to restore the key. +// - options - RestoreKeyOptions contains the optional parameters for the Client.RestoreKey method. +func (client *Client) RestoreKey(ctx context.Context, parameters RestoreKeyParameters, options *RestoreKeyOptions) (RestoreKeyResponse, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, "Client.RestoreKey", client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.restoreKeyCreateRequest(ctx, parameters, options) + if err != nil { + return RestoreKeyResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RestoreKeyResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return RestoreKeyResponse{}, err + } + resp, err := client.restoreKeyHandleResponse(httpResp) + return resp, err +} + +// restoreKeyCreateRequest creates the RestoreKey request. +func (client *Client) restoreKeyCreateRequest(ctx context.Context, parameters RestoreKeyParameters, options *RestoreKeyOptions) (*policy.Request, error) { + urlPath := "/keys/restore" + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + +// restoreKeyHandleResponse handles the RestoreKey response. +func (client *Client) restoreKeyHandleResponse(resp *http.Response) (RestoreKeyResponse, error) { + result := RestoreKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil { + return RestoreKeyResponse{}, err + } + return result, nil +} + +// RotateKey - The operation will rotate the key based on the key policy. It requires the keys/rotate permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.5 +// - name - The name of key to be rotated. The system will generate a new version in the specified key. +// - options - RotateKeyOptions contains the optional parameters for the Client.RotateKey method. +func (client *Client) RotateKey(ctx context.Context, name string, options *RotateKeyOptions) (RotateKeyResponse, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, "Client.RotateKey", client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.rotateKeyCreateRequest(ctx, name, options) + if err != nil { + return RotateKeyResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RotateKeyResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return RotateKeyResponse{}, err + } + resp, err := client.rotateKeyHandleResponse(httpResp) + return resp, err +} + +// rotateKeyCreateRequest creates the RotateKey request. +func (client *Client) rotateKeyCreateRequest(ctx context.Context, name string, options *RotateKeyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/rotate" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// rotateKeyHandleResponse handles the RotateKey response. +func (client *Client) rotateKeyHandleResponse(resp *http.Response) (RotateKeyResponse, error) { + result := RotateKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil { + return RotateKeyResponse{}, err + } + return result, nil +} + +// Sign - The SIGN operation is applicable to asymmetric and symmetric keys stored in Azure Key Vault since this operation +// uses the private portion of the key. This operation requires the keys/sign permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.5 +// - name - The name of the key. +// - version - The version of the key. +// - parameters - The parameters for the signing operation. +// - options - SignOptions contains the optional parameters for the Client.Sign method. +func (client *Client) Sign(ctx context.Context, name string, version string, parameters SignParameters, options *SignOptions) (SignResponse, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, "Client.Sign", client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.signCreateRequest(ctx, name, version, parameters, options) + if err != nil { + return SignResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return SignResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return SignResponse{}, err + } + resp, err := client.signHandleResponse(httpResp) + return resp, err +} + +// signCreateRequest creates the Sign request. +func (client *Client) signCreateRequest(ctx context.Context, name string, version string, parameters SignParameters, options *SignOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/{key-version}/sign" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{key-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + +// signHandleResponse handles the Sign response. +func (client *Client) signHandleResponse(resp *http.Response) (SignResponse, error) { + result := SignResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyOperationResult); err != nil { + return SignResponse{}, err + } + return result, nil +} + +// UnwrapKey - The UNWRAP operation supports decryption of a symmetric key using the target key encryption key. This operation +// is the reverse of the WRAP operation. The UNWRAP operation applies to asymmetric and +// symmetric keys stored in Azure Key Vault since it uses the private portion of the key. This operation requires the keys/unwrapKey +// permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.5 +// - name - The name of the key. +// - version - The version of the key. +// - parameters - The parameters for the key operation. +// - options - UnwrapKeyOptions contains the optional parameters for the Client.UnwrapKey method. +func (client *Client) UnwrapKey(ctx context.Context, name string, version string, parameters KeyOperationParameters, options *UnwrapKeyOptions) (UnwrapKeyResponse, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, "Client.UnwrapKey", client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.unwrapKeyCreateRequest(ctx, name, version, parameters, options) + if err != nil { + return UnwrapKeyResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return UnwrapKeyResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return UnwrapKeyResponse{}, err + } + resp, err := client.unwrapKeyHandleResponse(httpResp) + return resp, err +} + +// unwrapKeyCreateRequest creates the UnwrapKey request. +func (client *Client) unwrapKeyCreateRequest(ctx context.Context, name string, version string, parameters KeyOperationParameters, options *UnwrapKeyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/{key-version}/unwrapkey" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{key-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + +// unwrapKeyHandleResponse handles the UnwrapKey response. +func (client *Client) unwrapKeyHandleResponse(resp *http.Response) (UnwrapKeyResponse, error) { + result := UnwrapKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyOperationResult); err != nil { + return UnwrapKeyResponse{}, err + } + return result, nil +} + +// UpdateKey - In order to perform this operation, the key must already exist in the Key Vault. Note: The cryptographic material +// of a key itself cannot be changed. This operation requires the keys/update permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.5 +// - name - The name of key to update. +// - version - The version of the key to update. +// - parameters - The parameters of the key to update. +// - options - UpdateKeyOptions contains the optional parameters for the Client.UpdateKey method. +func (client *Client) UpdateKey(ctx context.Context, name string, version string, parameters UpdateKeyParameters, options *UpdateKeyOptions) (UpdateKeyResponse, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, "Client.UpdateKey", client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.updateKeyCreateRequest(ctx, name, version, parameters, options) + if err != nil { + return UpdateKeyResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return UpdateKeyResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return UpdateKeyResponse{}, err + } + resp, err := client.updateKeyHandleResponse(httpResp) + return resp, err +} + +// updateKeyCreateRequest creates the UpdateKey request. +func (client *Client) updateKeyCreateRequest(ctx context.Context, name string, version string, parameters UpdateKeyParameters, options *UpdateKeyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/{key-version}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{key-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + +// updateKeyHandleResponse handles the UpdateKey response. +func (client *Client) updateKeyHandleResponse(resp *http.Response) (UpdateKeyResponse, error) { + result := UpdateKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyBundle); err != nil { + return UpdateKeyResponse{}, err + } + return result, nil +} + +// UpdateKeyRotationPolicy - Set specified members in the key policy. Leave others as undefined. This operation requires the +// keys/update permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.5 +// - name - The name of the key in the given vault. +// - keyRotationPolicy - The policy for the key. +// - options - UpdateKeyRotationPolicyOptions contains the optional parameters for the Client.UpdateKeyRotationPolicy +// method. +func (client *Client) UpdateKeyRotationPolicy(ctx context.Context, name string, keyRotationPolicy KeyRotationPolicy, options *UpdateKeyRotationPolicyOptions) (UpdateKeyRotationPolicyResponse, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, "Client.UpdateKeyRotationPolicy", client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.updateKeyRotationPolicyCreateRequest(ctx, name, keyRotationPolicy, options) + if err != nil { + return UpdateKeyRotationPolicyResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return UpdateKeyRotationPolicyResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return UpdateKeyRotationPolicyResponse{}, err + } + resp, err := client.updateKeyRotationPolicyHandleResponse(httpResp) + return resp, err +} + +// updateKeyRotationPolicyCreateRequest creates the UpdateKeyRotationPolicy request. +func (client *Client) updateKeyRotationPolicyCreateRequest(ctx context.Context, name string, keyRotationPolicy KeyRotationPolicy, options *UpdateKeyRotationPolicyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/rotationpolicy" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, keyRotationPolicy); err != nil { + return nil, err + } + return req, nil +} + +// updateKeyRotationPolicyHandleResponse handles the UpdateKeyRotationPolicy response. +func (client *Client) updateKeyRotationPolicyHandleResponse(resp *http.Response) (UpdateKeyRotationPolicyResponse, error) { + result := UpdateKeyRotationPolicyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyRotationPolicy); err != nil { + return UpdateKeyRotationPolicyResponse{}, err + } + return result, nil +} + +// Verify - The VERIFY operation is applicable to symmetric keys stored in Azure Key Vault. VERIFY is not strictly necessary +// for asymmetric keys stored in Azure Key Vault since signature verification can be +// performed using the public portion of the key but this operation is supported as a convenience for callers that only have +// a key-reference and not the public portion of the key. This operation requires +// the keys/verify permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.5 +// - name - The name of the key. +// - version - The version of the key. +// - parameters - The parameters for verify operations. +// - options - VerifyOptions contains the optional parameters for the Client.Verify method. +func (client *Client) Verify(ctx context.Context, name string, version string, parameters VerifyParameters, options *VerifyOptions) (VerifyResponse, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, "Client.Verify", client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.verifyCreateRequest(ctx, name, version, parameters, options) + if err != nil { + return VerifyResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return VerifyResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return VerifyResponse{}, err + } + resp, err := client.verifyHandleResponse(httpResp) + return resp, err +} + +// verifyCreateRequest creates the Verify request. +func (client *Client) verifyCreateRequest(ctx context.Context, name string, version string, parameters VerifyParameters, options *VerifyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/{key-version}/verify" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{key-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + +// verifyHandleResponse handles the Verify response. +func (client *Client) verifyHandleResponse(resp *http.Response) (VerifyResponse, error) { + result := VerifyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyVerifyResult); err != nil { + return VerifyResponse{}, err + } + return result, nil +} + +// WrapKey - The WRAP operation supports encryption of a symmetric key using a key encryption key that has previously been +// stored in an Azure Key Vault. The WRAP operation is only strictly necessary for symmetric +// keys stored in Azure Key Vault since protection with an asymmetric key can be performed using the public portion of the +// key. This operation is supported for asymmetric keys as a convenience for +// callers that have a key-reference but do not have access to the public key material. This operation requires the keys/wrapKey +// permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.5 +// - name - The name of the key. +// - version - The version of the key. +// - parameters - The parameters for wrap operation. +// - options - WrapKeyOptions contains the optional parameters for the Client.WrapKey method. +func (client *Client) WrapKey(ctx context.Context, name string, version string, parameters KeyOperationParameters, options *WrapKeyOptions) (WrapKeyResponse, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, "Client.WrapKey", client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.wrapKeyCreateRequest(ctx, name, version, parameters, options) + if err != nil { + return WrapKeyResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return WrapKeyResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return WrapKeyResponse{}, err + } + resp, err := client.wrapKeyHandleResponse(httpResp) + return resp, err +} + +// wrapKeyCreateRequest creates the WrapKey request. +func (client *Client) wrapKeyCreateRequest(ctx context.Context, name string, version string, parameters KeyOperationParameters, options *WrapKeyOptions) (*policy.Request, error) { + urlPath := "/keys/{key-name}/{key-version}/wrapkey" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{key-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{key-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + +// wrapKeyHandleResponse handles the WrapKey response. +func (client *Client) wrapKeyHandleResponse(resp *http.Response) (WrapKeyResponse, error) { + result := WrapKeyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.KeyOperationResult); err != nil { + return WrapKeyResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/constants.go new file mode 100644 index 000000000..8f5c30c9f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/constants.go @@ -0,0 +1,211 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azkeys + +// CurveName - Elliptic curve name. +type CurveName string + +const ( + // CurveNameP256 - The NIST P-256 elliptic curve, AKA SECG curve SECP256R1. + CurveNameP256 CurveName = "P-256" + // CurveNameP256K - The SECG SECP256K1 elliptic curve. + CurveNameP256K CurveName = "P-256K" + // CurveNameP384 - The NIST P-384 elliptic curve, AKA SECG curve SECP384R1. + CurveNameP384 CurveName = "P-384" + // CurveNameP521 - The NIST P-521 elliptic curve, AKA SECG curve SECP521R1. + CurveNameP521 CurveName = "P-521" +) + +// PossibleCurveNameValues returns the possible values for the CurveName const type. +func PossibleCurveNameValues() []CurveName { + return []CurveName{ + CurveNameP256, + CurveNameP256K, + CurveNameP384, + CurveNameP521, + } +} + +// EncryptionAlgorithm - algorithm identifier +type EncryptionAlgorithm string + +const ( + EncryptionAlgorithmA128CBC EncryptionAlgorithm = "A128CBC" + EncryptionAlgorithmA128CBCPAD EncryptionAlgorithm = "A128CBCPAD" + EncryptionAlgorithmA128GCM EncryptionAlgorithm = "A128GCM" + EncryptionAlgorithmA128KW EncryptionAlgorithm = "A128KW" + EncryptionAlgorithmA192CBC EncryptionAlgorithm = "A192CBC" + EncryptionAlgorithmA192CBCPAD EncryptionAlgorithm = "A192CBCPAD" + EncryptionAlgorithmA192GCM EncryptionAlgorithm = "A192GCM" + EncryptionAlgorithmA192KW EncryptionAlgorithm = "A192KW" + EncryptionAlgorithmA256CBC EncryptionAlgorithm = "A256CBC" + EncryptionAlgorithmA256CBCPAD EncryptionAlgorithm = "A256CBCPAD" + EncryptionAlgorithmA256GCM EncryptionAlgorithm = "A256GCM" + EncryptionAlgorithmA256KW EncryptionAlgorithm = "A256KW" + EncryptionAlgorithmRSA15 EncryptionAlgorithm = "RSA1_5" + EncryptionAlgorithmRSAOAEP EncryptionAlgorithm = "RSA-OAEP" + EncryptionAlgorithmRSAOAEP256 EncryptionAlgorithm = "RSA-OAEP-256" +) + +// PossibleEncryptionAlgorithmValues returns the possible values for the EncryptionAlgorithm const type. +func PossibleEncryptionAlgorithmValues() []EncryptionAlgorithm { + return []EncryptionAlgorithm{ + EncryptionAlgorithmA128CBC, + EncryptionAlgorithmA128CBCPAD, + EncryptionAlgorithmA128GCM, + EncryptionAlgorithmA128KW, + EncryptionAlgorithmA192CBC, + EncryptionAlgorithmA192CBCPAD, + EncryptionAlgorithmA192GCM, + EncryptionAlgorithmA192KW, + EncryptionAlgorithmA256CBC, + EncryptionAlgorithmA256CBCPAD, + EncryptionAlgorithmA256GCM, + EncryptionAlgorithmA256KW, + EncryptionAlgorithmRSA15, + EncryptionAlgorithmRSAOAEP, + EncryptionAlgorithmRSAOAEP256, + } +} + +// KeyOperation - JSON web key operations. For more information, see JsonWebKeyOperation. +type KeyOperation string + +const ( + KeyOperationDecrypt KeyOperation = "decrypt" + KeyOperationEncrypt KeyOperation = "encrypt" + + KeyOperationImport KeyOperation = "import" + KeyOperationSign KeyOperation = "sign" + KeyOperationUnwrapKey KeyOperation = "unwrapKey" + KeyOperationVerify KeyOperation = "verify" + KeyOperationWrapKey KeyOperation = "wrapKey" +) + +// PossibleKeyOperationValues returns the possible values for the KeyOperation const type. +func PossibleKeyOperationValues() []KeyOperation { + return []KeyOperation{ + KeyOperationDecrypt, + KeyOperationEncrypt, + + KeyOperationImport, + KeyOperationSign, + KeyOperationUnwrapKey, + KeyOperationVerify, + KeyOperationWrapKey, + } +} + +// SignatureAlgorithm - The signing/verification algorithm identifier. +type SignatureAlgorithm string + +const ( + // SignatureAlgorithmES256 - ECDSA using P-256 and SHA-256, as described in https://tools.ietf.org/html/rfc7518. + SignatureAlgorithmES256 SignatureAlgorithm = "ES256" + // SignatureAlgorithmES256K - ECDSA using P-256K and SHA-256, as described in https://tools.ietf.org/html/rfc7518 + SignatureAlgorithmES256K SignatureAlgorithm = "ES256K" + // SignatureAlgorithmES384 - ECDSA using P-384 and SHA-384, as described in https://tools.ietf.org/html/rfc7518 + SignatureAlgorithmES384 SignatureAlgorithm = "ES384" + // SignatureAlgorithmES512 - ECDSA using P-521 and SHA-512, as described in https://tools.ietf.org/html/rfc7518 + SignatureAlgorithmES512 SignatureAlgorithm = "ES512" + // SignatureAlgorithmPS256 - RSASSA-PSS using SHA-256 and MGF1 with SHA-256, as described in https://tools.ietf.org/html/rfc7518 + SignatureAlgorithmPS256 SignatureAlgorithm = "PS256" + // SignatureAlgorithmPS384 - RSASSA-PSS using SHA-384 and MGF1 with SHA-384, as described in https://tools.ietf.org/html/rfc7518 + SignatureAlgorithmPS384 SignatureAlgorithm = "PS384" + // SignatureAlgorithmPS512 - RSASSA-PSS using SHA-512 and MGF1 with SHA-512, as described in https://tools.ietf.org/html/rfc7518 + SignatureAlgorithmPS512 SignatureAlgorithm = "PS512" + // SignatureAlgorithmRS256 - RSASSA-PKCS1-v1_5 using SHA-256, as described in https://tools.ietf.org/html/rfc7518 + SignatureAlgorithmRS256 SignatureAlgorithm = "RS256" + // SignatureAlgorithmRS384 - RSASSA-PKCS1-v1_5 using SHA-384, as described in https://tools.ietf.org/html/rfc7518 + SignatureAlgorithmRS384 SignatureAlgorithm = "RS384" + // SignatureAlgorithmRS512 - RSASSA-PKCS1-v1_5 using SHA-512, as described in https://tools.ietf.org/html/rfc7518 + SignatureAlgorithmRS512 SignatureAlgorithm = "RS512" +) + +// PossibleSignatureAlgorithmValues returns the possible values for the SignatureAlgorithm const type. +func PossibleSignatureAlgorithmValues() []SignatureAlgorithm { + return []SignatureAlgorithm{ + SignatureAlgorithmES256, + SignatureAlgorithmES256K, + SignatureAlgorithmES384, + SignatureAlgorithmES512, + SignatureAlgorithmPS256, + SignatureAlgorithmPS384, + SignatureAlgorithmPS512, + SignatureAlgorithmRS256, + SignatureAlgorithmRS384, + SignatureAlgorithmRS512, + } +} + +// KeyType - JsonWebKey Key Type (kty), as defined in https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40. +type KeyType string + +const ( + // KeyTypeEC - Elliptic Curve. + KeyTypeEC KeyType = "EC" + // KeyTypeECHSM - Elliptic Curve with a private key which is stored in the HSM. + KeyTypeECHSM KeyType = "EC-HSM" + // KeyTypeOct - Octet sequence (used to represent symmetric keys) + KeyTypeOct KeyType = "oct" + // KeyTypeOctHSM - Octet sequence (used to represent symmetric keys) which is stored the HSM. + KeyTypeOctHSM KeyType = "oct-HSM" + // KeyTypeRSA - RSA (https://tools.ietf.org/html/rfc3447) + KeyTypeRSA KeyType = "RSA" + // KeyTypeRSAHSM - RSA with a private key which is stored in the HSM. + KeyTypeRSAHSM KeyType = "RSA-HSM" +) + +// PossibleKeyTypeValues returns the possible values for the KeyType const type. +func PossibleKeyTypeValues() []KeyType { + return []KeyType{ + KeyTypeEC, + KeyTypeECHSM, + KeyTypeOct, + KeyTypeOctHSM, + KeyTypeRSA, + KeyTypeRSAHSM, + } +} + +// KeyEncryptionAlgorithm - The encryption algorithm to use to protected the exported key material +type KeyEncryptionAlgorithm string + +const ( + KeyEncryptionAlgorithmCKMRSAAESKEYWRAP KeyEncryptionAlgorithm = "CKM_RSA_AES_KEY_WRAP" + KeyEncryptionAlgorithmRSAAESKEYWRAP256 KeyEncryptionAlgorithm = "RSA_AES_KEY_WRAP_256" + KeyEncryptionAlgorithmRSAAESKEYWRAP384 KeyEncryptionAlgorithm = "RSA_AES_KEY_WRAP_384" +) + +// PossibleKeyEncryptionAlgorithmValues returns the possible values for the KeyEncryptionAlgorithm const type. +func PossibleKeyEncryptionAlgorithmValues() []KeyEncryptionAlgorithm { + return []KeyEncryptionAlgorithm{ + KeyEncryptionAlgorithmCKMRSAAESKEYWRAP, + KeyEncryptionAlgorithmRSAAESKEYWRAP256, + KeyEncryptionAlgorithmRSAAESKEYWRAP384, + } +} + +// KeyRotationPolicyAction - The type of the action. The value should be compared case-insensitively. +type KeyRotationPolicyAction string + +const ( + // KeyRotationPolicyActionNotify - Trigger Event Grid events. Defaults to 30 days before expiry. Key Vault only. + KeyRotationPolicyActionNotify KeyRotationPolicyAction = "Notify" + // KeyRotationPolicyActionRotate - Rotate the key based on the key policy. + KeyRotationPolicyActionRotate KeyRotationPolicyAction = "Rotate" +) + +// PossibleKeyRotationPolicyActionValues returns the possible values for the KeyRotationPolicyAction const type. +func PossibleKeyRotationPolicyActionValues() []KeyRotationPolicyAction { + return []KeyRotationPolicyAction{ + KeyRotationPolicyActionNotify, + KeyRotationPolicyActionRotate, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/custom_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/custom_client.go new file mode 100644 index 000000000..3d67fa0e4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/custom_client.go @@ -0,0 +1,68 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azkeys + +// this file contains handwritten additions to the generated code + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal" +) + +// ClientOptions contains optional settings for Client. +type ClientOptions struct { + azcore.ClientOptions + + // DisableChallengeResourceVerification controls whether the policy requires the + // authentication challenge resource to match the Key Vault or Managed HSM domain. + // See https://aka.ms/azsdk/blog/vault-uri for more information. + DisableChallengeResourceVerification bool +} + +// NewClient creates a client that accesses a Key Vault's keys. You should validate that vaultURL +// references a valid Key Vault or Managed HSM. See https://aka.ms/azsdk/blog/vault-uri for details. +func NewClient(vaultURL string, credential azcore.TokenCredential, options *ClientOptions) (*Client, error) { + if options == nil { + options = &ClientOptions{} + } + authPolicy := internal.NewKeyVaultChallengePolicy( + credential, + &internal.KeyVaultChallengePolicyOptions{ + DisableChallengeResourceVerification: options.DisableChallengeResourceVerification, + }, + ) + azcoreClient, err := azcore.NewClient(moduleName, version, runtime.PipelineOptions{ + PerRetry: []policy.Policy{authPolicy}, + Tracing: runtime.TracingOptions{ + Namespace: "Microsoft.KeyVault", + }, + }, &options.ClientOptions) + if err != nil { + return nil, err + } + return &Client{endpoint: vaultURL, internal: azcoreClient}, nil +} + +// ID is a key's unique ID, containing its version, if any, and name. +type ID string + +// Name of the key. +func (i *ID) Name() string { + _, name, _ := internal.ParseID((*string)(i)) + return *name +} + +// Version of the key. This returns an empty string when the ID contains no version. +func (i *ID) Version() string { + _, _, version := internal.ParseID((*string)(i)) + if version == nil { + return "" + } + return *version +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/models.go new file mode 100644 index 000000000..6faae411e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/models.go @@ -0,0 +1,428 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azkeys + +import "time" + +// BackupKeyResult - The backup key result, containing the backup blob. +type BackupKeyResult struct { + // READ-ONLY; The backup blob containing the backed up key. + Value []byte +} + +// CreateKeyParameters - The key create parameters. +type CreateKeyParameters struct { + // REQUIRED; The type of key to create. + Kty *KeyType + + // Elliptic curve name. + Curve *CurveName + + // The attributes of a key managed by the key vault service. + KeyAttributes *KeyAttributes + KeyOps []*KeyOperation + + // The key size in bits. For example: 2048, 3072, or 4096 for RSA. + KeySize *int32 + + // The public exponent for a RSA key. + PublicExponent *int32 + + // The policy rules under which the key can be exported. + ReleasePolicy *KeyReleasePolicy + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string +} + +// DeletedKey - A DeletedKey consisting of a WebKey plus its Attributes and deletion info +type DeletedKey struct { + // The key management attributes. + Attributes *KeyAttributes + + // The Json web key. + Key *JSONWebKey + + // The url of the recovery object, used to identify and recover the deleted key. + RecoveryID *string + + // The policy rules under which the key can be exported. + ReleasePolicy *KeyReleasePolicy + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string + + // READ-ONLY; The time when the key was deleted, in UTC + DeletedDate *time.Time + + // READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will + // be true. + Managed *bool + + // READ-ONLY; The time when the key is scheduled to be purged, in UTC + ScheduledPurgeDate *time.Time +} + +// DeletedKeyProperties - The deleted key item containing the deleted key metadata and information about deletion. +type DeletedKeyProperties struct { + // The key management attributes. + Attributes *KeyAttributes + + // Key identifier. + KID *ID + + // The url of the recovery object, used to identify and recover the deleted key. + RecoveryID *string + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string + + // READ-ONLY; The time when the key was deleted, in UTC + DeletedDate *time.Time + + // READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will + // be true. + Managed *bool + + // READ-ONLY; The time when the key is scheduled to be purged, in UTC + ScheduledPurgeDate *time.Time +} + +// DeletedKeyPropertiesListResult - A list of keys that have been deleted in this vault. +type DeletedKeyPropertiesListResult struct { + // READ-ONLY; The URL to get the next set of deleted keys. + NextLink *string + + // READ-ONLY; A response message containing a list of deleted keys in the vault along with a link to the next page of deleted + // keys + Value []*DeletedKeyProperties +} + +// GetRandomBytesParameters - The get random bytes request object. +type GetRandomBytesParameters struct { + // REQUIRED; The requested number of random bytes. + Count *int32 +} + +// ImportKeyParameters - The key import parameters. +type ImportKeyParameters struct { + // REQUIRED; The Json web key + Key *JSONWebKey + + // Whether to import as a hardware key (HSM) or software key. + HSM *bool + + // The key management attributes. + KeyAttributes *KeyAttributes + + // The policy rules under which the key can be exported. + ReleasePolicy *KeyReleasePolicy + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string +} + +// JSONWebKey - As of http://tools.ietf.org/html/draft-ietf-jose-json-web-key-18 +type JSONWebKey struct { + // Elliptic curve name. + Crv *CurveName + + // RSA private exponent, or the D component of an EC private key. + D []byte + + // RSA private key parameter. + DP []byte + + // RSA private key parameter. + DQ []byte + + // RSA public exponent. + E []byte + + // Symmetric key. + K []byte + + // Key identifier. + KID *ID + KeyOps []*KeyOperation + + // JsonWebKey Key Type (kty), as defined in https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40. + Kty *KeyType + + // RSA modulus. + N []byte + + // RSA secret prime. + P []byte + + // RSA secret prime, with p < q. + Q []byte + + // RSA private key parameter. + QI []byte + + // Protected Key, used with 'Bring Your Own Key'. + T []byte + + // X component of an EC public key. + X []byte + + // Y component of an EC public key. + Y []byte +} + +// KeyAttributes - The attributes of a key managed by the key vault service. +type KeyAttributes struct { + // Determines whether the object is enabled. + Enabled *bool + + // Expiry date in UTC. + Expires *time.Time + + // Indicates if the private key can be exported. Release policy must be provided when creating the first version of an exportable + // key. + Exportable *bool + + // Not before date in UTC. + NotBefore *time.Time + + // READ-ONLY; Creation time in UTC. + Created *time.Time + + // READ-ONLY; The underlying HSM Platform. + HSMPlatform *string + + // READ-ONLY; softDelete data retention days. Value should be >=7 and <=90 when softDelete enabled, otherwise 0. + RecoverableDays *int32 + + // READ-ONLY; Reflects the deletion recovery level currently in effect for keys in the current vault. If it contains 'Purgeable' + // the key can be permanently deleted by a privileged user; otherwise, only the system + // can purge the key, at the end of the retention interval. + RecoveryLevel *string + + // READ-ONLY; Last updated time in UTC. + Updated *time.Time +} + +// KeyBundle - A KeyBundle consisting of a WebKey plus its attributes. +type KeyBundle struct { + // The key management attributes. + Attributes *KeyAttributes + + // The Json web key. + Key *JSONWebKey + + // The policy rules under which the key can be exported. + ReleasePolicy *KeyReleasePolicy + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string + + // READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will + // be true. + Managed *bool +} + +// KeyOperationParameters - The key operations parameters. +type KeyOperationParameters struct { + // REQUIRED; algorithm identifier + Algorithm *EncryptionAlgorithm + + // REQUIRED + Value []byte + + // Additional data to authenticate but not encrypt/decrypt when using authenticated crypto algorithms. + AdditionalAuthenticatedData []byte + + // The tag to authenticate when performing decryption with an authenticated algorithm. + AuthenticationTag []byte + + // Cryptographically random, non-repeating initialization vector for symmetric algorithms. + IV []byte +} + +// KeyOperationResult - The key operation result. +type KeyOperationResult struct { + // READ-ONLY + AdditionalAuthenticatedData []byte + + // READ-ONLY + AuthenticationTag []byte + + // READ-ONLY + IV []byte + + // READ-ONLY; Key identifier + KID *ID + + // READ-ONLY + Result []byte +} + +// KeyProperties - The key item containing key metadata. +type KeyProperties struct { + // The key management attributes. + Attributes *KeyAttributes + + // Key identifier. + KID *ID + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string + + // READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will + // be true. + Managed *bool +} + +// KeyPropertiesListResult - The key list result. +type KeyPropertiesListResult struct { + // READ-ONLY; The URL to get the next set of keys. + NextLink *string + + // READ-ONLY; A response message containing a list of keys in the key vault along with a link to the next page of keys. + Value []*KeyProperties +} + +// KeyReleasePolicy - The policy rules under which the key can be exported. +type KeyReleasePolicy struct { + // Content type and version of key release policy + ContentType *string + + // Blob encoding the policy rules under which the key can be released. Blob must be base64 URL encoded. + EncodedPolicy []byte + + // Defines the mutability state of the policy. Once marked immutable, this flag cannot be reset and the policy cannot be changed + // under any circumstances. + Immutable *bool +} + +// KeyReleaseResult - The release result, containing the released key. +type KeyReleaseResult struct { + // READ-ONLY; A signed object containing the released key. + Value *string +} + +// KeyRotationPolicy - Management policy for a key. +type KeyRotationPolicy struct { + // The key rotation policy attributes. + Attributes *KeyRotationPolicyAttributes + + // Actions that will be performed by Key Vault over the lifetime of a key. For preview, lifetimeActions can only have two + // items at maximum: one for rotate, one for notify. Notification time would be + // default to 30 days before expiry and it is not configurable. + LifetimeActions []*LifetimeAction + + // READ-ONLY; The key policy id. + ID *string +} + +// KeyRotationPolicyAttributes - The key rotation policy attributes. +type KeyRotationPolicyAttributes struct { + // The expiryTime will be applied on the new key version. It should be at least 28 days. It will be in ISO 8601 Format. Examples: + // 90 days: P90D, 3 months: P3M, 48 hours: PT48H, 1 year and 10 days: P1Y10D + ExpiryTime *string + + // READ-ONLY; The key rotation policy created time in UTC. + Created *time.Time + + // READ-ONLY; The key rotation policy's last updated time in UTC. + Updated *time.Time +} + +// KeyVerifyResult - The key verify result. +type KeyVerifyResult struct { + // READ-ONLY; True if the signature is verified, otherwise false. + Value *bool +} + +// LifetimeAction - Action and its trigger that will be performed by Key Vault over the lifetime of a key. +type LifetimeAction struct { + // The action that will be executed. + Action *LifetimeActionType + + // The condition that will execute the action. + Trigger *LifetimeActionTrigger +} + +// LifetimeActionTrigger - A condition to be satisfied for an action to be executed. +type LifetimeActionTrigger struct { + // Time after creation to attempt to rotate. It only applies to rotate. It will be in ISO 8601 duration format. Example: 90 + // days : "P90D" + TimeAfterCreate *string + + // Time before expiry to attempt to rotate or notify. It will be in ISO 8601 duration format. Example: 90 days : "P90D" + TimeBeforeExpiry *string +} + +// LifetimeActionType - The action that will be executed. +type LifetimeActionType struct { + // The type of the action. The value should be compared case-insensitively. + Type *KeyRotationPolicyAction +} + +// RandomBytes - The get random bytes response object containing the bytes. +type RandomBytes struct { + // REQUIRED; The bytes encoded as a base64url string. + Value []byte +} + +// ReleaseParameters - The release key parameters. +type ReleaseParameters struct { + // REQUIRED; The attestation assertion for the target of the key release. + TargetAttestationToken *string + + // The encryption algorithm to use to protected the exported key material + Algorithm *KeyEncryptionAlgorithm + + // A client provided nonce for freshness. + Nonce *string +} + +// RestoreKeyParameters - The key restore parameters. +type RestoreKeyParameters struct { + // REQUIRED; The backup blob associated with a key bundle. + KeyBackup []byte +} + +// SignParameters - The key operations parameters. +type SignParameters struct { + // REQUIRED; The signing/verification algorithm identifier. + Algorithm *SignatureAlgorithm + + // REQUIRED + Value []byte +} + +// UpdateKeyParameters - The key update parameters. +type UpdateKeyParameters struct { + // The attributes of a key managed by the key vault service. + KeyAttributes *KeyAttributes + + // Json web key operations. + KeyOps []*KeyOperation + + // The policy rules under which the key can be exported. + ReleasePolicy *KeyReleasePolicy + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string +} + +// VerifyParameters - The key verify parameters. +type VerifyParameters struct { + // REQUIRED; The signing/verification algorithm. + Algorithm *SignatureAlgorithm + + // REQUIRED; The digest used for signing. + Digest []byte + + // REQUIRED; The signature to be verified. + Signature []byte +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/models_serde.go new file mode 100644 index 000000000..d33da63e1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/models_serde.go @@ -0,0 +1,1123 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azkeys + +import ( + "encoding/json" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "reflect" +) + +// MarshalJSON implements the json.Marshaller interface for type BackupKeyResult. +func (b BackupKeyResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateByteArray(objectMap, "value", b.Value, runtime.Base64URLFormat) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type BackupKeyResult. +func (b *BackupKeyResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "value": + err = runtime.DecodeByteArray(string(val), &b.Value, runtime.Base64URLFormat) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type CreateKeyParameters. +func (c CreateKeyParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "crv", c.Curve) + populate(objectMap, "attributes", c.KeyAttributes) + populate(objectMap, "key_ops", c.KeyOps) + populate(objectMap, "key_size", c.KeySize) + populate(objectMap, "kty", c.Kty) + populate(objectMap, "public_exponent", c.PublicExponent) + populate(objectMap, "release_policy", c.ReleasePolicy) + populate(objectMap, "tags", c.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CreateKeyParameters. +func (c *CreateKeyParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "crv": + err = unpopulate(val, "Curve", &c.Curve) + delete(rawMsg, key) + case "attributes": + err = unpopulate(val, "KeyAttributes", &c.KeyAttributes) + delete(rawMsg, key) + case "key_ops": + err = unpopulate(val, "KeyOps", &c.KeyOps) + delete(rawMsg, key) + case "key_size": + err = unpopulate(val, "KeySize", &c.KeySize) + delete(rawMsg, key) + case "kty": + err = unpopulate(val, "Kty", &c.Kty) + delete(rawMsg, key) + case "public_exponent": + err = unpopulate(val, "PublicExponent", &c.PublicExponent) + delete(rawMsg, key) + case "release_policy": + err = unpopulate(val, "ReleasePolicy", &c.ReleasePolicy) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &c.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type DeletedKey. +func (d DeletedKey) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "attributes", d.Attributes) + populateTimeUnix(objectMap, "deletedDate", d.DeletedDate) + populate(objectMap, "key", d.Key) + populate(objectMap, "managed", d.Managed) + populate(objectMap, "recoveryId", d.RecoveryID) + populate(objectMap, "release_policy", d.ReleasePolicy) + populateTimeUnix(objectMap, "scheduledPurgeDate", d.ScheduledPurgeDate) + populate(objectMap, "tags", d.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DeletedKey. +func (d *DeletedKey) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "attributes": + err = unpopulate(val, "Attributes", &d.Attributes) + delete(rawMsg, key) + case "deletedDate": + err = unpopulateTimeUnix(val, "DeletedDate", &d.DeletedDate) + delete(rawMsg, key) + case "key": + err = unpopulate(val, "Key", &d.Key) + delete(rawMsg, key) + case "managed": + err = unpopulate(val, "Managed", &d.Managed) + delete(rawMsg, key) + case "recoveryId": + err = unpopulate(val, "RecoveryID", &d.RecoveryID) + delete(rawMsg, key) + case "release_policy": + err = unpopulate(val, "ReleasePolicy", &d.ReleasePolicy) + delete(rawMsg, key) + case "scheduledPurgeDate": + err = unpopulateTimeUnix(val, "ScheduledPurgeDate", &d.ScheduledPurgeDate) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &d.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type DeletedKeyProperties. +func (d DeletedKeyProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "attributes", d.Attributes) + populateTimeUnix(objectMap, "deletedDate", d.DeletedDate) + populate(objectMap, "kid", d.KID) + populate(objectMap, "managed", d.Managed) + populate(objectMap, "recoveryId", d.RecoveryID) + populateTimeUnix(objectMap, "scheduledPurgeDate", d.ScheduledPurgeDate) + populate(objectMap, "tags", d.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DeletedKeyProperties. +func (d *DeletedKeyProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "attributes": + err = unpopulate(val, "Attributes", &d.Attributes) + delete(rawMsg, key) + case "deletedDate": + err = unpopulateTimeUnix(val, "DeletedDate", &d.DeletedDate) + delete(rawMsg, key) + case "kid": + err = unpopulate(val, "KID", &d.KID) + delete(rawMsg, key) + case "managed": + err = unpopulate(val, "Managed", &d.Managed) + delete(rawMsg, key) + case "recoveryId": + err = unpopulate(val, "RecoveryID", &d.RecoveryID) + delete(rawMsg, key) + case "scheduledPurgeDate": + err = unpopulateTimeUnix(val, "ScheduledPurgeDate", &d.ScheduledPurgeDate) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &d.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type DeletedKeyPropertiesListResult. +func (d DeletedKeyPropertiesListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", d.NextLink) + populate(objectMap, "value", d.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DeletedKeyPropertiesListResult. +func (d *DeletedKeyPropertiesListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &d.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &d.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type GetRandomBytesParameters. +func (g GetRandomBytesParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "count", g.Count) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type GetRandomBytesParameters. +func (g *GetRandomBytesParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "count": + err = unpopulate(val, "Count", &g.Count) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ImportKeyParameters. +func (i ImportKeyParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "Hsm", i.HSM) + populate(objectMap, "key", i.Key) + populate(objectMap, "attributes", i.KeyAttributes) + populate(objectMap, "release_policy", i.ReleasePolicy) + populate(objectMap, "tags", i.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ImportKeyParameters. +func (i *ImportKeyParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "Hsm": + err = unpopulate(val, "HSM", &i.HSM) + delete(rawMsg, key) + case "key": + err = unpopulate(val, "Key", &i.Key) + delete(rawMsg, key) + case "attributes": + err = unpopulate(val, "KeyAttributes", &i.KeyAttributes) + delete(rawMsg, key) + case "release_policy": + err = unpopulate(val, "ReleasePolicy", &i.ReleasePolicy) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &i.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type JSONWebKey. +func (j JSONWebKey) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "crv", j.Crv) + populateByteArray(objectMap, "d", j.D, runtime.Base64URLFormat) + populateByteArray(objectMap, "dp", j.DP, runtime.Base64URLFormat) + populateByteArray(objectMap, "dq", j.DQ, runtime.Base64URLFormat) + populateByteArray(objectMap, "e", j.E, runtime.Base64URLFormat) + populateByteArray(objectMap, "k", j.K, runtime.Base64URLFormat) + populate(objectMap, "kid", j.KID) + populate(objectMap, "key_ops", j.KeyOps) + populate(objectMap, "kty", j.Kty) + populateByteArray(objectMap, "n", j.N, runtime.Base64URLFormat) + populateByteArray(objectMap, "p", j.P, runtime.Base64URLFormat) + populateByteArray(objectMap, "q", j.Q, runtime.Base64URLFormat) + populateByteArray(objectMap, "qi", j.QI, runtime.Base64URLFormat) + populateByteArray(objectMap, "key_hsm", j.T, runtime.Base64URLFormat) + populateByteArray(objectMap, "x", j.X, runtime.Base64URLFormat) + populateByteArray(objectMap, "y", j.Y, runtime.Base64URLFormat) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type JSONWebKey. +func (j *JSONWebKey) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", j, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "crv": + err = unpopulate(val, "Crv", &j.Crv) + delete(rawMsg, key) + case "d": + err = runtime.DecodeByteArray(string(val), &j.D, runtime.Base64URLFormat) + delete(rawMsg, key) + case "dp": + err = runtime.DecodeByteArray(string(val), &j.DP, runtime.Base64URLFormat) + delete(rawMsg, key) + case "dq": + err = runtime.DecodeByteArray(string(val), &j.DQ, runtime.Base64URLFormat) + delete(rawMsg, key) + case "e": + err = runtime.DecodeByteArray(string(val), &j.E, runtime.Base64URLFormat) + delete(rawMsg, key) + case "k": + err = runtime.DecodeByteArray(string(val), &j.K, runtime.Base64URLFormat) + delete(rawMsg, key) + case "kid": + err = unpopulate(val, "KID", &j.KID) + delete(rawMsg, key) + case "key_ops": + err = unpopulate(val, "KeyOps", &j.KeyOps) + delete(rawMsg, key) + case "kty": + err = unpopulate(val, "Kty", &j.Kty) + delete(rawMsg, key) + case "n": + err = runtime.DecodeByteArray(string(val), &j.N, runtime.Base64URLFormat) + delete(rawMsg, key) + case "p": + err = runtime.DecodeByteArray(string(val), &j.P, runtime.Base64URLFormat) + delete(rawMsg, key) + case "q": + err = runtime.DecodeByteArray(string(val), &j.Q, runtime.Base64URLFormat) + delete(rawMsg, key) + case "qi": + err = runtime.DecodeByteArray(string(val), &j.QI, runtime.Base64URLFormat) + delete(rawMsg, key) + case "key_hsm": + err = runtime.DecodeByteArray(string(val), &j.T, runtime.Base64URLFormat) + delete(rawMsg, key) + case "x": + err = runtime.DecodeByteArray(string(val), &j.X, runtime.Base64URLFormat) + delete(rawMsg, key) + case "y": + err = runtime.DecodeByteArray(string(val), &j.Y, runtime.Base64URLFormat) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", j, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyAttributes. +func (k KeyAttributes) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateTimeUnix(objectMap, "created", k.Created) + populate(objectMap, "enabled", k.Enabled) + populateTimeUnix(objectMap, "exp", k.Expires) + populate(objectMap, "exportable", k.Exportable) + populate(objectMap, "HSMPlatform", k.HSMPlatform) + populateTimeUnix(objectMap, "nbf", k.NotBefore) + populate(objectMap, "recoverableDays", k.RecoverableDays) + populate(objectMap, "recoveryLevel", k.RecoveryLevel) + populateTimeUnix(objectMap, "updated", k.Updated) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyAttributes. +func (k *KeyAttributes) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "created": + err = unpopulateTimeUnix(val, "Created", &k.Created) + delete(rawMsg, key) + case "enabled": + err = unpopulate(val, "Enabled", &k.Enabled) + delete(rawMsg, key) + case "exp": + err = unpopulateTimeUnix(val, "Expires", &k.Expires) + delete(rawMsg, key) + case "exportable": + err = unpopulate(val, "Exportable", &k.Exportable) + delete(rawMsg, key) + case "HSMPlatform": + err = unpopulate(val, "HSMPlatform", &k.HSMPlatform) + delete(rawMsg, key) + case "nbf": + err = unpopulateTimeUnix(val, "NotBefore", &k.NotBefore) + delete(rawMsg, key) + case "recoverableDays": + err = unpopulate(val, "RecoverableDays", &k.RecoverableDays) + delete(rawMsg, key) + case "recoveryLevel": + err = unpopulate(val, "RecoveryLevel", &k.RecoveryLevel) + delete(rawMsg, key) + case "updated": + err = unpopulateTimeUnix(val, "Updated", &k.Updated) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyBundle. +func (k KeyBundle) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "attributes", k.Attributes) + populate(objectMap, "key", k.Key) + populate(objectMap, "managed", k.Managed) + populate(objectMap, "release_policy", k.ReleasePolicy) + populate(objectMap, "tags", k.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyBundle. +func (k *KeyBundle) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "attributes": + err = unpopulate(val, "Attributes", &k.Attributes) + delete(rawMsg, key) + case "key": + err = unpopulate(val, "Key", &k.Key) + delete(rawMsg, key) + case "managed": + err = unpopulate(val, "Managed", &k.Managed) + delete(rawMsg, key) + case "release_policy": + err = unpopulate(val, "ReleasePolicy", &k.ReleasePolicy) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &k.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyOperationParameters. +func (k KeyOperationParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateByteArray(objectMap, "aad", k.AdditionalAuthenticatedData, runtime.Base64URLFormat) + populate(objectMap, "alg", k.Algorithm) + populateByteArray(objectMap, "tag", k.AuthenticationTag, runtime.Base64URLFormat) + populateByteArray(objectMap, "iv", k.IV, runtime.Base64URLFormat) + populateByteArray(objectMap, "value", k.Value, runtime.Base64URLFormat) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyOperationParameters. +func (k *KeyOperationParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "aad": + err = runtime.DecodeByteArray(string(val), &k.AdditionalAuthenticatedData, runtime.Base64URLFormat) + delete(rawMsg, key) + case "alg": + err = unpopulate(val, "Algorithm", &k.Algorithm) + delete(rawMsg, key) + case "tag": + err = runtime.DecodeByteArray(string(val), &k.AuthenticationTag, runtime.Base64URLFormat) + delete(rawMsg, key) + case "iv": + err = runtime.DecodeByteArray(string(val), &k.IV, runtime.Base64URLFormat) + delete(rawMsg, key) + case "value": + err = runtime.DecodeByteArray(string(val), &k.Value, runtime.Base64URLFormat) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyOperationResult. +func (k KeyOperationResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateByteArray(objectMap, "aad", k.AdditionalAuthenticatedData, runtime.Base64URLFormat) + populateByteArray(objectMap, "tag", k.AuthenticationTag, runtime.Base64URLFormat) + populateByteArray(objectMap, "iv", k.IV, runtime.Base64URLFormat) + populate(objectMap, "kid", k.KID) + populateByteArray(objectMap, "value", k.Result, runtime.Base64URLFormat) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyOperationResult. +func (k *KeyOperationResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "aad": + err = runtime.DecodeByteArray(string(val), &k.AdditionalAuthenticatedData, runtime.Base64URLFormat) + delete(rawMsg, key) + case "tag": + err = runtime.DecodeByteArray(string(val), &k.AuthenticationTag, runtime.Base64URLFormat) + delete(rawMsg, key) + case "iv": + err = runtime.DecodeByteArray(string(val), &k.IV, runtime.Base64URLFormat) + delete(rawMsg, key) + case "kid": + err = unpopulate(val, "KID", &k.KID) + delete(rawMsg, key) + case "value": + err = runtime.DecodeByteArray(string(val), &k.Result, runtime.Base64URLFormat) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyProperties. +func (k KeyProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "attributes", k.Attributes) + populate(objectMap, "kid", k.KID) + populate(objectMap, "managed", k.Managed) + populate(objectMap, "tags", k.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyProperties. +func (k *KeyProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "attributes": + err = unpopulate(val, "Attributes", &k.Attributes) + delete(rawMsg, key) + case "kid": + err = unpopulate(val, "KID", &k.KID) + delete(rawMsg, key) + case "managed": + err = unpopulate(val, "Managed", &k.Managed) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &k.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyPropertiesListResult. +func (k KeyPropertiesListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", k.NextLink) + populate(objectMap, "value", k.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyPropertiesListResult. +func (k *KeyPropertiesListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &k.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &k.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyReleasePolicy. +func (k KeyReleasePolicy) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "contentType", k.ContentType) + populateByteArray(objectMap, "data", k.EncodedPolicy, runtime.Base64URLFormat) + populate(objectMap, "immutable", k.Immutable) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyReleasePolicy. +func (k *KeyReleasePolicy) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "contentType": + err = unpopulate(val, "ContentType", &k.ContentType) + delete(rawMsg, key) + case "data": + err = runtime.DecodeByteArray(string(val), &k.EncodedPolicy, runtime.Base64URLFormat) + delete(rawMsg, key) + case "immutable": + err = unpopulate(val, "Immutable", &k.Immutable) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyReleaseResult. +func (k KeyReleaseResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "value", k.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyReleaseResult. +func (k *KeyReleaseResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "value": + err = unpopulate(val, "Value", &k.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyRotationPolicy. +func (k KeyRotationPolicy) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "attributes", k.Attributes) + populate(objectMap, "id", k.ID) + populate(objectMap, "lifetimeActions", k.LifetimeActions) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyRotationPolicy. +func (k *KeyRotationPolicy) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "attributes": + err = unpopulate(val, "Attributes", &k.Attributes) + delete(rawMsg, key) + case "id": + err = unpopulate(val, "ID", &k.ID) + delete(rawMsg, key) + case "lifetimeActions": + err = unpopulate(val, "LifetimeActions", &k.LifetimeActions) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyRotationPolicyAttributes. +func (k KeyRotationPolicyAttributes) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateTimeUnix(objectMap, "created", k.Created) + populate(objectMap, "expiryTime", k.ExpiryTime) + populateTimeUnix(objectMap, "updated", k.Updated) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyRotationPolicyAttributes. +func (k *KeyRotationPolicyAttributes) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "created": + err = unpopulateTimeUnix(val, "Created", &k.Created) + delete(rawMsg, key) + case "expiryTime": + err = unpopulate(val, "ExpiryTime", &k.ExpiryTime) + delete(rawMsg, key) + case "updated": + err = unpopulateTimeUnix(val, "Updated", &k.Updated) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyVerifyResult. +func (k KeyVerifyResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "value", k.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyVerifyResult. +func (k *KeyVerifyResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "value": + err = unpopulate(val, "Value", &k.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LifetimeAction. +func (l LifetimeAction) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "action", l.Action) + populate(objectMap, "trigger", l.Trigger) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LifetimeAction. +func (l *LifetimeAction) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "action": + err = unpopulate(val, "Action", &l.Action) + delete(rawMsg, key) + case "trigger": + err = unpopulate(val, "Trigger", &l.Trigger) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LifetimeActionTrigger. +func (l LifetimeActionTrigger) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "timeAfterCreate", l.TimeAfterCreate) + populate(objectMap, "timeBeforeExpiry", l.TimeBeforeExpiry) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LifetimeActionTrigger. +func (l *LifetimeActionTrigger) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "timeAfterCreate": + err = unpopulate(val, "TimeAfterCreate", &l.TimeAfterCreate) + delete(rawMsg, key) + case "timeBeforeExpiry": + err = unpopulate(val, "TimeBeforeExpiry", &l.TimeBeforeExpiry) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LifetimeActionType. +func (l LifetimeActionType) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "type", l.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LifetimeActionType. +func (l *LifetimeActionType) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "type": + err = unpopulate(val, "Type", &l.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type RandomBytes. +func (r RandomBytes) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateByteArray(objectMap, "value", r.Value, runtime.Base64URLFormat) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type RandomBytes. +func (r *RandomBytes) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "value": + err = runtime.DecodeByteArray(string(val), &r.Value, runtime.Base64URLFormat) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ReleaseParameters. +func (r ReleaseParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "enc", r.Algorithm) + populate(objectMap, "nonce", r.Nonce) + populate(objectMap, "target", r.TargetAttestationToken) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ReleaseParameters. +func (r *ReleaseParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "enc": + err = unpopulate(val, "Algorithm", &r.Algorithm) + delete(rawMsg, key) + case "nonce": + err = unpopulate(val, "Nonce", &r.Nonce) + delete(rawMsg, key) + case "target": + err = unpopulate(val, "TargetAttestationToken", &r.TargetAttestationToken) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type RestoreKeyParameters. +func (r RestoreKeyParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateByteArray(objectMap, "value", r.KeyBackup, runtime.Base64URLFormat) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type RestoreKeyParameters. +func (r *RestoreKeyParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "value": + err = runtime.DecodeByteArray(string(val), &r.KeyBackup, runtime.Base64URLFormat) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SignParameters. +func (s SignParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "alg", s.Algorithm) + populateByteArray(objectMap, "value", s.Value, runtime.Base64URLFormat) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SignParameters. +func (s *SignParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "alg": + err = unpopulate(val, "Algorithm", &s.Algorithm) + delete(rawMsg, key) + case "value": + err = runtime.DecodeByteArray(string(val), &s.Value, runtime.Base64URLFormat) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type UpdateKeyParameters. +func (u UpdateKeyParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "attributes", u.KeyAttributes) + populate(objectMap, "key_ops", u.KeyOps) + populate(objectMap, "release_policy", u.ReleasePolicy) + populate(objectMap, "tags", u.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type UpdateKeyParameters. +func (u *UpdateKeyParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "attributes": + err = unpopulate(val, "KeyAttributes", &u.KeyAttributes) + delete(rawMsg, key) + case "key_ops": + err = unpopulate(val, "KeyOps", &u.KeyOps) + delete(rawMsg, key) + case "release_policy": + err = unpopulate(val, "ReleasePolicy", &u.ReleasePolicy) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &u.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type VerifyParameters. +func (v VerifyParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "alg", v.Algorithm) + populateByteArray(objectMap, "digest", v.Digest, runtime.Base64URLFormat) + populateByteArray(objectMap, "value", v.Signature, runtime.Base64URLFormat) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type VerifyParameters. +func (v *VerifyParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", v, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "alg": + err = unpopulate(val, "Algorithm", &v.Algorithm) + delete(rawMsg, key) + case "digest": + err = runtime.DecodeByteArray(string(val), &v.Digest, runtime.Base64URLFormat) + delete(rawMsg, key) + case "value": + err = runtime.DecodeByteArray(string(val), &v.Signature, runtime.Base64URLFormat) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", v, err) + } + } + return nil +} + +func populate(m map[string]any, k string, v any) { + if v == nil { + return + } else if azcore.IsNullValue(v) { + m[k] = nil + } else if !reflect.ValueOf(v).IsNil() { + m[k] = v + } +} + +func populateByteArray(m map[string]any, k string, b []byte, f runtime.Base64Encoding) { + if azcore.IsNullValue(b) { + m[k] = nil + } else if len(b) == 0 { + return + } else { + m[k] = runtime.EncodeByteArray(b, f) + } +} + +func unpopulate(data json.RawMessage, fn string, v any) error { + if data == nil { + return nil + } + if err := json.Unmarshal(data, v); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/options.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/options.go new file mode 100644 index 000000000..e3043e7a1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/options.go @@ -0,0 +1,131 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azkeys + +// BackupKeyOptions contains the optional parameters for the Client.BackupKey method. +type BackupKeyOptions struct { + // placeholder for future optional parameters +} + +// CreateKeyOptions contains the optional parameters for the Client.CreateKey method. +type CreateKeyOptions struct { + // placeholder for future optional parameters +} + +// DecryptOptions contains the optional parameters for the Client.Decrypt method. +type DecryptOptions struct { + // placeholder for future optional parameters +} + +// DeleteKeyOptions contains the optional parameters for the Client.DeleteKey method. +type DeleteKeyOptions struct { + // placeholder for future optional parameters +} + +// EncryptOptions contains the optional parameters for the Client.Encrypt method. +type EncryptOptions struct { + // placeholder for future optional parameters +} + +// GetDeletedKeyOptions contains the optional parameters for the Client.GetDeletedKey method. +type GetDeletedKeyOptions struct { + // placeholder for future optional parameters +} + +// GetKeyOptions contains the optional parameters for the Client.GetKey method. +type GetKeyOptions struct { + // placeholder for future optional parameters +} + +// GetKeyRotationPolicyOptions contains the optional parameters for the Client.GetKeyRotationPolicy method. +type GetKeyRotationPolicyOptions struct { + // placeholder for future optional parameters +} + +// GetRandomBytesOptions contains the optional parameters for the Client.GetRandomBytes method. +type GetRandomBytesOptions struct { + // placeholder for future optional parameters +} + +// ImportKeyOptions contains the optional parameters for the Client.ImportKey method. +type ImportKeyOptions struct { + // placeholder for future optional parameters +} + +// ListDeletedKeyPropertiesOptions contains the optional parameters for the Client.NewListDeletedKeyPropertiesPager +// method. +type ListDeletedKeyPropertiesOptions struct { + // placeholder for future optional parameters +} + +// ListKeyPropertiesOptions contains the optional parameters for the Client.NewListKeyPropertiesPager method. +type ListKeyPropertiesOptions struct { + // placeholder for future optional parameters +} + +// ListKeyPropertiesVersionsOptions contains the optional parameters for the Client.NewListKeyPropertiesVersionsPager +// method. +type ListKeyPropertiesVersionsOptions struct { + // placeholder for future optional parameters +} + +// PurgeDeletedKeyOptions contains the optional parameters for the Client.PurgeDeletedKey method. +type PurgeDeletedKeyOptions struct { + // placeholder for future optional parameters +} + +// RecoverDeletedKeyOptions contains the optional parameters for the Client.RecoverDeletedKey method. +type RecoverDeletedKeyOptions struct { + // placeholder for future optional parameters +} + +// ReleaseOptions contains the optional parameters for the Client.Release method. +type ReleaseOptions struct { + // placeholder for future optional parameters +} + +// RestoreKeyOptions contains the optional parameters for the Client.RestoreKey method. +type RestoreKeyOptions struct { + // placeholder for future optional parameters +} + +// RotateKeyOptions contains the optional parameters for the Client.RotateKey method. +type RotateKeyOptions struct { + // placeholder for future optional parameters +} + +// SignOptions contains the optional parameters for the Client.Sign method. +type SignOptions struct { + // placeholder for future optional parameters +} + +// UnwrapKeyOptions contains the optional parameters for the Client.UnwrapKey method. +type UnwrapKeyOptions struct { + // placeholder for future optional parameters +} + +// UpdateKeyOptions contains the optional parameters for the Client.UpdateKey method. +type UpdateKeyOptions struct { + // placeholder for future optional parameters +} + +// UpdateKeyRotationPolicyOptions contains the optional parameters for the Client.UpdateKeyRotationPolicy method. +type UpdateKeyRotationPolicyOptions struct { + // placeholder for future optional parameters +} + +// VerifyOptions contains the optional parameters for the Client.Verify method. +type VerifyOptions struct { + // placeholder for future optional parameters +} + +// WrapKeyOptions contains the optional parameters for the Client.WrapKey method. +type WrapKeyOptions struct { + // placeholder for future optional parameters +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/platform-matrix.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/platform-matrix.json new file mode 100644 index 000000000..51367ad93 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/platform-matrix.json @@ -0,0 +1,17 @@ +{ + "displayNames": { + "@{ enableHsm = $true }": "HSM" + }, + "include": [ + { + "Agent": { + "ubuntu-20.04": { + "OSVmImage": "MMSUbuntu20.04", + "Pool": "azsdk-pool-mms-ubuntu-2004-general" + } + }, + "ArmTemplateParameters": "@{ enableHsm = $true }", + "GoVersion": ["1.18"] + } + ] +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/response_types.go new file mode 100644 index 000000000..b1b4678ea --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/response_types.go @@ -0,0 +1,152 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azkeys + +// BackupKeyResponse contains the response from method Client.BackupKey. +type BackupKeyResponse struct { + // The backup key result, containing the backup blob. + BackupKeyResult +} + +// CreateKeyResponse contains the response from method Client.CreateKey. +type CreateKeyResponse struct { + // A KeyBundle consisting of a WebKey plus its attributes. + KeyBundle +} + +// DecryptResponse contains the response from method Client.Decrypt. +type DecryptResponse struct { + // The key operation result. + KeyOperationResult +} + +// DeleteKeyResponse contains the response from method Client.DeleteKey. +type DeleteKeyResponse struct { + // A DeletedKey consisting of a WebKey plus its Attributes and deletion info + DeletedKey +} + +// EncryptResponse contains the response from method Client.Encrypt. +type EncryptResponse struct { + // The key operation result. + KeyOperationResult +} + +// GetDeletedKeyResponse contains the response from method Client.GetDeletedKey. +type GetDeletedKeyResponse struct { + // A DeletedKey consisting of a WebKey plus its Attributes and deletion info + DeletedKey +} + +// GetKeyResponse contains the response from method Client.GetKey. +type GetKeyResponse struct { + // A KeyBundle consisting of a WebKey plus its attributes. + KeyBundle +} + +// GetKeyRotationPolicyResponse contains the response from method Client.GetKeyRotationPolicy. +type GetKeyRotationPolicyResponse struct { + // Management policy for a key. + KeyRotationPolicy +} + +// GetRandomBytesResponse contains the response from method Client.GetRandomBytes. +type GetRandomBytesResponse struct { + // The get random bytes response object containing the bytes. + RandomBytes +} + +// ImportKeyResponse contains the response from method Client.ImportKey. +type ImportKeyResponse struct { + // A KeyBundle consisting of a WebKey plus its attributes. + KeyBundle +} + +// ListDeletedKeyPropertiesResponse contains the response from method Client.NewListDeletedKeyPropertiesPager. +type ListDeletedKeyPropertiesResponse struct { + // A list of keys that have been deleted in this vault. + DeletedKeyPropertiesListResult +} + +// ListKeyPropertiesResponse contains the response from method Client.NewListKeyPropertiesPager. +type ListKeyPropertiesResponse struct { + // The key list result. + KeyPropertiesListResult +} + +// ListKeyPropertiesVersionsResponse contains the response from method Client.NewListKeyPropertiesVersionsPager. +type ListKeyPropertiesVersionsResponse struct { + // The key list result. + KeyPropertiesListResult +} + +// PurgeDeletedKeyResponse contains the response from method Client.PurgeDeletedKey. +type PurgeDeletedKeyResponse struct { + // placeholder for future response values +} + +// RecoverDeletedKeyResponse contains the response from method Client.RecoverDeletedKey. +type RecoverDeletedKeyResponse struct { + // A KeyBundle consisting of a WebKey plus its attributes. + KeyBundle +} + +// ReleaseResponse contains the response from method Client.Release. +type ReleaseResponse struct { + // The release result, containing the released key. + KeyReleaseResult +} + +// RestoreKeyResponse contains the response from method Client.RestoreKey. +type RestoreKeyResponse struct { + // A KeyBundle consisting of a WebKey plus its attributes. + KeyBundle +} + +// RotateKeyResponse contains the response from method Client.RotateKey. +type RotateKeyResponse struct { + // A KeyBundle consisting of a WebKey plus its attributes. + KeyBundle +} + +// SignResponse contains the response from method Client.Sign. +type SignResponse struct { + // The key operation result. + KeyOperationResult +} + +// UnwrapKeyResponse contains the response from method Client.UnwrapKey. +type UnwrapKeyResponse struct { + // The key operation result. + KeyOperationResult +} + +// UpdateKeyResponse contains the response from method Client.UpdateKey. +type UpdateKeyResponse struct { + // A KeyBundle consisting of a WebKey plus its attributes. + KeyBundle +} + +// UpdateKeyRotationPolicyResponse contains the response from method Client.UpdateKeyRotationPolicy. +type UpdateKeyRotationPolicyResponse struct { + // Management policy for a key. + KeyRotationPolicy +} + +// VerifyResponse contains the response from method Client.Verify. +type VerifyResponse struct { + // The key verify result. + KeyVerifyResult +} + +// WrapKeyResponse contains the response from method Client.WrapKey. +type WrapKeyResponse struct { + // The key operation result. + KeyOperationResult +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/test-resources-post.ps1 b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/test-resources-post.ps1 new file mode 100644 index 000000000..80f20c0cb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/test-resources-post.ps1 @@ -0,0 +1,118 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +# IMPORTANT: Do not invoke this file directly. Please instead run eng/New-TestResources.ps1 from the repository root. + +#Requires -Version 6.0 +#Requires -PSEdition Core + +using namespace System.Security.Cryptography +using namespace System.Security.Cryptography.X509Certificates + +# Use same parameter names as declared in eng/New-TestResources.ps1 (assume validation therein). +[CmdletBinding(SupportsShouldProcess = $true, ConfirmImpact = 'Medium')] +param ( + [Parameter()] + [hashtable] $DeploymentOutputs, + + # Captures any arguments from eng/New-TestResources.ps1 not declared here (no parameter errors). + [Parameter(ValueFromRemainingArguments = $true)] + $RemainingArguments +) + +# By default stop for any error. +if (!$PSBoundParameters.ContainsKey('ErrorAction')) { + $ErrorActionPreference = 'Stop' +} + +function Log($Message) { + Write-Host ('{0} - {1}' -f [DateTime]::Now.ToLongTimeString(), $Message) +} + +function New-X509Certificate2([string] $SubjectName) { + + $rsa = [RSA]::Create(2048) + try { + $req = [CertificateRequest]::new( + [string] $SubjectName, + $rsa, + [HashAlgorithmName]::SHA256, + [RSASignaturePadding]::Pkcs1 + ) + + # TODO: Add any KUs necessary to $req.CertificateExtensions + + $NotBefore = [DateTimeOffset]::Now.AddDays(-1) + $NotAfter = $NotBefore.AddDays(365) + + $req.CreateSelfSigned($NotBefore, $NotAfter) + } + finally { + $rsa.Dispose() + } +} + +function Export-X509Certificate2([string] $Path, [X509Certificate2] $Certificate) { + + $Certificate.Export([X509ContentType]::Pfx) | Set-Content $Path -AsByteStream +} + +function Export-X509Certificate2PEM([string] $Path, [X509Certificate2] $Certificate) { + +@" +-----BEGIN CERTIFICATE----- +$([Convert]::ToBase64String($Certificate.RawData, 'InsertLineBreaks')) +-----END CERTIFICATE----- +"@ > $Path + +} + +# Make sure we deployed a Managed HSM. +if (!$DeploymentOutputs['AZURE_MANAGEDHSM_URL']) { + Log "Managed HSM not deployed; skipping activation" + exit +} + +[Uri] $hsmUrl = $DeploymentOutputs['AZURE_MANAGEDHSM_URL'] +$hsmName = $hsmUrl.Host.Substring(0, $hsmUrl.Host.IndexOf('.')) + +Log 'Creating 3 X509 certificates to activate security domain' +$wrappingFiles = foreach ($i in 0..2) { + $certificate = New-X509Certificate2 "CN=$($hsmUrl.Host)" + + $baseName = "$PSScriptRoot\$hsmName-certificate$i" + Export-X509Certificate2 "$baseName.pfx" $certificate + Export-X509Certificate2PEM "$baseName.cer" $certificate + + Resolve-Path "$baseName.cer" +} + +Log "Downloading security domain from '$hsmUrl'" + +$sdPath = "$PSScriptRoot\$hsmName-security-domain.key" +if (Test-Path $sdpath) { + Log "Deleting old security domain: $sdPath" + Remove-Item $sdPath -Force +} + +Export-AzKeyVaultSecurityDomain -Name $hsmName -Quorum 2 -Certificates $wrappingFiles -OutputPath $sdPath -ErrorAction SilentlyContinue -Verbose +if ( !$? ) { + Write-Host $Error[0].Exception + Write-Error $Error[0] + + exit +} + +Log "Security domain downloaded to '$sdPath'; Managed HSM is now active at '$hsmUrl'" + +# Force a sleep to wait for Managed HSM activation to propagate through Cosmos replication. Issue tracked in Azure DevOps. +Log 'Sleeping for 30 seconds to allow activation to propagate...' +Start-Sleep -Seconds 30 + +$testApplicationOid = $DeploymentOutputs['CLIENT_OBJECTID'] + +Log "Creating additional required role assignments for '$testApplicationOid'" +$null = New-AzKeyVaultRoleAssignment -HsmName $hsmName -RoleDefinitionName 'Managed HSM Crypto Officer' -ObjectID $testApplicationOid +$null = New-AzKeyVaultRoleAssignment -HsmName $hsmName -RoleDefinitionName 'Managed HSM Crypto User' -ObjectID $testApplicationOid + +Log "Role assignments created for '$testApplicationOid'" \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/test-resources.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/test-resources.json new file mode 100644 index 000000000..b0d7b1e4b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/test-resources.json @@ -0,0 +1,296 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "baseName": { + "type": "string", + "defaultValue": "[resourceGroup().name]", + "metadata": { + "description": "The base resource name." + } + }, + "tenantId": { + "type": "string", + "defaultValue": "72f988bf-86f1-41af-91ab-2d7cd011db47", + "metadata": { + "description": "The tenant ID to which the application and resources belong." + } + }, + "testApplicationOid": { + "type": "string", + "metadata": { + "description": "The client OID to grant access to test resources." + } + }, + "provisionerApplicationOid": { + "type": "string", + "metadata": { + "description": "The provisioner OID to grant access to test resources." + } + }, + "location": { + "type": "string", + "defaultValue": "[resourceGroup().location]", + "metadata": { + "description": "The location of the resource. By default, this is the same as the resource group." + } + }, + "hsmLocation": { + "type": "string", + "defaultValue": "australiaeast", + "allowedValues": [ + "australiacentral", + "australiaeast", + "canadacentral", + "canadaeast", + "centralindia", + "centralus", + "eastasia", + "eastus", + "eastus2", + "francecentral", + "japaneast", + "koreacentral", + "northcentralus", + "northeurope", + "southafricanorth", + "southcentralus", + "switzerlandnorth", + "switzerlandwest", + "uaenorth", + "uksouth", + "westcentralus", + "westeurope", + "westus", + "westus2", + "westus3" + ], + "metadata": { + "description": "The location of the Managed HSM." + } + }, + "enableHsm": { + "type": "bool", + "defaultValue": false, + "metadata": { + "description": "Whether to enable deployment of Managed HSM. The default is false." + } + }, + "keyVaultSku": { + "type": "string", + "defaultValue": "premium", + "metadata": { + "description": "Key Vault SKU to deploy. The default is 'premium'" + } + }, + "attestationImage": { + "type": "string", + "defaultValue": "keyvault-mock-attestation:latest", + "metadata": { + "description": "The container image name and tag to use for the attestation mock service." + } + } + }, + "variables": { + "attestationFarm": "[concat(parameters('baseName'), 'farm')]", + "attestationSite": "[concat(parameters('baseName'), 'site')]", + "attestationUri": "[concat('DOCKER|azsdkengsys.azurecr.io/', parameters('attestationImage'))]", + "kvApiVersion": "2019-09-01", + "kvName": "[parameters('baseName')]", + "kvAdminDefinitionId": "00482a5a-887f-4fb3-b363-3b7fe8e74483", + "kvAdminAssignmentName": "[guid(resourceGroup().id, variables('kvAdminDefinitionId'), parameters('testApplicationOid'))]", + "hsmApiVersion": "2021-04-01-preview", + "hsmName": "[concat(parameters('baseName'), 'hsm')]", + "mgmtApiVersion": "2019-04-01", + "blobContainerName": "backup", + "primaryAccountName": "[concat(parameters('baseName'), 'prim')]", + "encryption": { + "services": { + "blob": { + "enabled": true + } + }, + "keySource": "Microsoft.Storage" + }, + "networkAcls": { + "bypass": "AzureServices", + "virtualNetworkRules": [], + "ipRules": [], + "defaultAction": "Allow" + } + }, + "resources": [ + { + "type": "Microsoft.KeyVault/vaults", + "apiVersion": "[variables('kvApiVersion')]", + "name": "[variables('kvName')]", + "location": "[parameters('location')]", + "properties": { + "sku": { + "family": "A", + "name": "[parameters('keyVaultSku')]" + }, + "tenantId": "[parameters('tenantId')]", + "enabledForDeployment": false, + "enabledForDiskEncryption": false, + "enabledForTemplateDeployment": false, + "enableSoftDelete": true, + "enableRbacAuthorization": true, + "softDeleteRetentionInDays": 7 + } + }, + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "2020-04-01-preview", + "name": "[variables('kvAdminAssignmentName')]", + "properties": { + "roleDefinitionId": "[resourceId('Microsoft.Authorization/roleDefinitions', variables('kvAdminDefinitionId'))]", + "principalId": "[parameters('testApplicationOid')]", + "scope": "[resourceGroup().id]" + } + }, + { + "type": "Microsoft.KeyVault/managedHSMs", + "apiVersion": "[variables('hsmApiVersion')]", + "name": "[variables('hsmName')]", + "condition": "[parameters('enableHsm')]", + "location": "[parameters('hsmLocation')]", + "sku": { + "family": "B", + "name": "Standard_B1" + }, + "properties": { + "tenantId": "[parameters('tenantId')]", + "initialAdminObjectIds": "[union(array(parameters('testApplicationOid')), array(parameters('provisionerApplicationOid')))]", + "enablePurgeProtection": false, + "enableSoftDelete": true, + "softDeleteRetentionInDays": 7, + "publicNetworkAccess": "Enabled", + "networkAcls": "[variables('networkAcls')]" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('primaryAccountName')]", + "location": "[parameters('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "2019-06-01", + "name": "[concat(variables('primaryAccountName'), '/default')]", + "dependsOn": [ + "[resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName'))]" + ], + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "properties": { + "cors": { + "corsRules": [] + }, + "deleteRetentionPolicy": { + "enabled": false + } + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices/containers", + "apiVersion": "2019-06-01", + "name": "[concat(variables('primaryAccountName'), '/default/', variables('blobContainerName'))]", + "dependsOn": [ + "[resourceId('Microsoft.Storage/storageAccounts/blobServices', variables('primaryAccountName'), 'default')]", + "[resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName'))]" + ], + "properties": { + "publicAccess": "None" + } + }, + { + + "type": "Microsoft.Web/serverfarms", + "apiVersion": "2020-12-01", + "name": "[variables('attestationFarm')]", + "location": "[parameters('location')]", + "kind": "linux", + "sku": { + "name": "B1" + }, + "properties": { + "reserved": true + } + }, + { + + "type": "Microsoft.Web/sites", + "apiVersion": "2020-12-01", + "name": "[variables('attestationSite')]", + "dependsOn": [ + "[resourceId('Microsoft.Web/serverfarms', variables('attestationFarm'))]" + ], + "location": "[parameters('location')]", + "properties": { + "httpsOnly": true, + "serverFarmId": "[resourceId('Microsoft.Web/serverfarms', variables('attestationFarm'))]", + "siteConfig": { + "name": "[variables('attestationSite')]", + "alwaysOn": true, + "linuxFxVersion": "[variables('attestationUri')]", + "appSettings": [ + { + "name": "WEBSITES_ENABLE_APP_SERVICE_STORAGE", + "value": "false" + } + ] + } + } + } + ], + "outputs": { + "AZURE_KEYVAULT_URL": { + "type": "string", + "value": "[reference(variables('kvName')).vaultUri]" + }, + "AZURE_MANAGEDHSM_URL": { + "type": "string", + "condition": "[parameters('enableHsm')]", + "value": "[reference(variables('hsmName')).hsmUri]" + }, + "KEYVAULT_SKU": { + "type": "string", + "value": "[reference(parameters('baseName')).sku.name]" + }, + "CLIENT_OBJECTID": { + "type": "string", + "value": "[parameters('testApplicationOid')]" + }, + "BLOB_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('primaryAccountName')]" + }, + "BLOB_PRIMARY_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(variables('primaryAccountName'), variables('mgmtApiVersion')).keys[0].value]" + }, + "BLOB_CONTAINER_NAME" : { + "type": "string", + "value": "[variables('blobContainerName')]" + }, + "AZURE_KEYVAULT_ATTESTATION_URL": { + "type": "string", + "value": "[format('https://{0}/', reference(variables('attestationSite')).defaultHostName)]" + } + } +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/time_unix.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/time_unix.go new file mode 100644 index 000000000..63591b5df --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/time_unix.go @@ -0,0 +1,61 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azkeys + +import ( + "encoding/json" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "reflect" + "strings" + "time" +) + +type timeUnix time.Time + +func (t timeUnix) MarshalJSON() ([]byte, error) { + return json.Marshal(time.Time(t).Unix()) +} + +func (t *timeUnix) UnmarshalJSON(data []byte) error { + var seconds int64 + if err := json.Unmarshal(data, &seconds); err != nil { + return err + } + *t = timeUnix(time.Unix(seconds, 0)) + return nil +} + +func (t timeUnix) String() string { + return fmt.Sprintf("%d", time.Time(t).Unix()) +} + +func populateTimeUnix(m map[string]any, k string, t *time.Time) { + if t == nil { + return + } else if azcore.IsNullValue(t) { + m[k] = nil + return + } else if reflect.ValueOf(t).IsNil() { + return + } + m[k] = (*timeUnix)(t) +} + +func unpopulateTimeUnix(data json.RawMessage, fn string, t **time.Time) error { + if data == nil || strings.EqualFold(string(data), "null") { + return nil + } + var aux timeUnix + if err := json.Unmarshal(data, &aux); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + *t = (*time.Time)(&aux) + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/version.go new file mode 100644 index 000000000..53dc2ebd2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/version.go @@ -0,0 +1,12 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azkeys + +const ( + moduleName = "github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys" + version = "v1.1.0" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/CHANGELOG.md new file mode 100644 index 000000000..fbc2626d2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/CHANGELOG.md @@ -0,0 +1,75 @@ +# Release History + +## 1.0.1 (2024-04-09) + +### Other Changes +* Upgraded dependencies + +## 1.0.0 (2023-08-15) + +### Features Added +* This is the first stable release of the `internal` library for KeyVault. + +### Other Changes +* Upgrade dependencies + +## 0.8.0 (2023-03-08) + +### Breaking Changes +* Moved to new location + +### Other Changes +* Upgrade dependencies + +## 0.7.1 (2022-11-14) + +### Bugs Fixed +* `KeyVaultChallengePolicy` uses incorrect authentication scope when challenge verification is disabled + +## 0.7.0 (2022-09-20) + +### Breaking Changes +* Added `*KeyVaultChallengePolicyOptions` parameter to `NewKeyVaultChallengePolicy` + +## 0.6.0 (2022-09-12) + +### Breaking Changes +* Verify the challenge resource matches the vault domain. See https://aka.ms/azsdk/blog/vault-uri for more information. +* `ParseID()` no longer appends a trailing slash to vault URLs + +## 0.5.0 (2022-05-12) + +### Breaking Changes +* Removed `ExpiringResource` and its dependencies in favor of shared implementation from `internal/temporal`. + +### Other Changes +* Updated to latest versions of `azcore` and `internal`. + +## 0.4.0 (2022-04-22) + +### Breaking Changes +* Updated `ExpiringResource` and its dependent types to use generics. + +### Other Changes +* Remove reference to `TokenRequestOptions.TenantID` as it's been removed and wasn't working anyways. + +## 0.3.0 (2022-04-04) + +### Features Added +* Adds the `ParseKeyvaultID` function to parse an ID into the Key Vault URL, item name, and item version + +### Breaking Changes +* Updates to azcore v0.23.0 + +## 0.2.1 (2022-01-31) + +### Bugs Fixed +* Avoid retries on terminal failures (#16932) + +## 0.2.0 (2022-01-12) + +### Bugs Fixed +* Fixes a bug with Managed HSMs that prevented correctly authorizing requests. + +## 0.1.0 (2021-11-09) +* This is the initial release of the `internal` library for KeyVault diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/LICENSE.txt new file mode 100644 index 000000000..d1ca00f20 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/LICENSE.txt @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/README.md new file mode 100644 index 000000000..8516337cf --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/README.md @@ -0,0 +1,21 @@ +# Key Vault Internal Module for Go + +This module contains shared code for all the Key Vault SDKs, mainly the challenge authentication policy. + +## Contributing +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit [https://cla.microsoft.com](https://cla.microsoft.com). + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information, see the +[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any +additional questions or comments. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/challenge_policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/challenge_policy.go new file mode 100644 index 000000000..5b1477bea --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/challenge_policy.go @@ -0,0 +1,156 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package internal + +import ( + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +) + +const challengeMatchError = `challenge resource "%s" doesn't match the requested domain. Set DisableChallengeResourceVerification to true in your client options to disable. See https://aka.ms/azsdk/blog/vault-uri for more information` + +type KeyVaultChallengePolicyOptions struct { + // DisableChallengeResourceVerification controls whether the policy requires the + // authentication challenge resource to match the Key Vault or Managed HSM domain + DisableChallengeResourceVerification bool +} + +type keyVaultAuthorizer struct { + // tro is the policy's authentication parameters. These are discovered from an authentication challenge + // elicited ahead of the first client request. + tro policy.TokenRequestOptions + // TODO: move into tro once it has a tenant field (https://github.com/Azure/azure-sdk-for-go/issues/19841) + tenantID string + verifyChallengeResource bool +} + +type reqBody struct { + body io.ReadSeekCloser + contentType string +} + +func NewKeyVaultChallengePolicy(cred azcore.TokenCredential, opts *KeyVaultChallengePolicyOptions) policy.Policy { + if opts == nil { + opts = &KeyVaultChallengePolicyOptions{} + } + kv := keyVaultAuthorizer{ + verifyChallengeResource: !opts.DisableChallengeResourceVerification, + } + return runtime.NewBearerTokenPolicy(cred, nil, &policy.BearerTokenOptions{ + AuthorizationHandler: policy.AuthorizationHandler{ + OnRequest: kv.authorize, + OnChallenge: kv.authorizeOnChallenge, + }, + }) +} + +func (k *keyVaultAuthorizer) authorize(req *policy.Request, authNZ func(policy.TokenRequestOptions) error) error { + if len(k.tro.Scopes) == 0 || k.tenantID == "" { + if body := req.Body(); body != nil { + // We don't know the scope or tenant ID because we haven't seen a challenge yet. We elicit one now by sending + // the request without authorization, first removing its body, if any. authorizeOnChallenge will reattach the + // body, authorize the request, and send it again. + rb := reqBody{body, req.Raw().Header.Get("content-type")} + req.SetOperationValue(rb) + if err := req.SetBody(nil, ""); err != nil { + return err + } + } + // returning nil indicates the bearer token policy should send the request + return nil + } + // else we know the auth parameters and can authorize the request as normal + return authNZ(k.tro) +} + +func (k *keyVaultAuthorizer) authorizeOnChallenge(req *policy.Request, res *http.Response, authNZ func(policy.TokenRequestOptions) error) error { + // parse the challenge + if err := k.updateTokenRequestOptions(res, req.Raw()); err != nil { + return err + } + // reattach the request's original body, if it was removed by authorize(). If a bug prevents recovering + // the body, this policy will send the request without it and get a 400 response from Key Vault. + var rb reqBody + if req.OperationValue(&rb) { + if err := req.SetBody(rb.body, rb.contentType); err != nil { + return err + } + } + // authenticate with the parameters supplied by Key Vault, authorize the request, send it again + return authNZ(k.tro) +} + +// parses Tenant ID from auth challenge +// https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000 +func parseTenant(url string) string { + if url == "" { + return "" + } + parts := strings.Split(url, "/") + tenant := parts[3] + tenant = strings.ReplaceAll(tenant, ",", "") + return tenant +} + +// updateTokenRequestOptions parses authentication parameters from Key Vault's challenge +func (k *keyVaultAuthorizer) updateTokenRequestOptions(resp *http.Response, req *http.Request) error { + authHeader := resp.Header.Get("WWW-Authenticate") + if authHeader == "" { + return errors.New("response has no WWW-Authenticate header for challenge authentication") + } + + // Strip down to auth and resource + // Format is "Bearer authorization=\"\" resource=\"\"" OR + // "Bearer authorization=\"\" scope=\"\" resource=\"\"" + authHeader = strings.ReplaceAll(authHeader, "Bearer ", "") + + parts := strings.Split(authHeader, " ") + + vals := map[string]string{} + for _, part := range parts { + subParts := strings.Split(part, "=") + if len(subParts) == 2 { + stripped := strings.ReplaceAll(subParts[1], "\"", "") + stripped = strings.TrimSuffix(stripped, ",") + vals[subParts[0]] = stripped + } + } + + k.tenantID = parseTenant(vals["authorization"]) + scope := "" + if v, ok := vals["scope"]; ok { + scope = v + } else if v, ok := vals["resource"]; ok { + scope = v + } + if scope == "" { + return errors.New("could not find a valid resource in the WWW-Authenticate header") + } + if k.verifyChallengeResource { + // the challenge resource's host must match the requested vault's host + parsed, err := url.Parse(scope) + if err != nil { + return fmt.Errorf("invalid challenge resource %q: %v", scope, err) + } + if !strings.HasSuffix(req.URL.Host, "."+parsed.Host) { + return fmt.Errorf(challengeMatchError, scope) + } + } + if !strings.HasSuffix(scope, "/.default") { + scope += "/.default" + } + k.tro.Scopes = []string{scope} + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/ci.keyvault.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/ci.keyvault.yml new file mode 100644 index 000000000..ba8690ac3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/ci.keyvault.yml @@ -0,0 +1,28 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/security/keyvault/internal + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/security/keyvault/internal + +extends: + template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + ServiceDirectory: 'security/keyvault/internal' + RunLiveTests: false diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/constants.go new file mode 100644 index 000000000..e42adf9ed --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/constants.go @@ -0,0 +1,11 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package internal + +const ( + version = "v1.0.1" //nolint +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/doc.go new file mode 100644 index 000000000..d8f93492f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package internal diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/parse.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/parse.go new file mode 100644 index 000000000..8511832d2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/parse.go @@ -0,0 +1,37 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +package internal + +import ( + "fmt" + "net/url" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" +) + +// ParseID parses "https://myvaultname.vault.azure.net/keys/key1053998307/b86c2e6ad9054f4abf69cc185b99aa60" +// into "https://myvaultname.managedhsm.azure.net/", "key1053998307", and "b86c2e6ad9054f4abf69cc185b99aa60" +func ParseID(id *string) (*string, *string, *string) { + if id == nil { + return nil, nil, nil + } + parsed, err := url.Parse(*id) + if err != nil { + return nil, nil, nil + } + + url := fmt.Sprintf("%s://%s", parsed.Scheme, parsed.Host) + split := strings.Split(strings.TrimPrefix(parsed.Path, "/"), "/") + if len(split) < 3 { + if len(split) == 2 { + return &url, to.Ptr(split[1]), nil + } + return &url, nil, nil + } + + return &url, to.Ptr(split[1]), to.Ptr(split[2]) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault/client.go deleted file mode 100644 index 501c1f2a4..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault/client.go +++ /dev/null @@ -1,6307 +0,0 @@ -// Package keyvault implements the Azure ARM Keyvault service API version 2016-10-01. -// -// The key vault client performs cryptographic key operations and vault operations against the Key Vault service. -package keyvault - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// BaseClient is the base client for Keyvault. -type BaseClient struct { - autorest.Client -} - -// New creates an instance of the BaseClient client. -func New() BaseClient { - return NewWithoutDefaults() -} - -// NewWithoutDefaults creates an instance of the BaseClient client. -func NewWithoutDefaults() BaseClient { - return BaseClient{ - Client: autorest.NewClientWithUserAgent(UserAgent()), - } -} - -// BackupKey the Key Backup operation exports a key from Azure Key Vault in a protected form. Note that this operation -// does NOT return key material in a form that can be used outside the Azure Key Vault system, the returned key -// material is either protected to a Azure Key Vault HSM or to Azure Key Vault itself. The intent of this operation is -// to allow a client to GENERATE a key in one Azure Key Vault instance, BACKUP the key, and then RESTORE it into -// another Azure Key Vault instance. The BACKUP operation may be used to export, in protected form, any key type from -// Azure Key Vault. Individual versions of a key cannot be backed up. BACKUP / RESTORE can be performed within -// geographical boundaries only; meaning that a BACKUP from one geographical area cannot be restored to another -// geographical area. For example, a backup from the US geographical area cannot be restored in an EU geographical -// area. This operation requires the key/backup permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -func (client BaseClient) BackupKey(ctx context.Context, vaultBaseURL string, keyName string) (result BackupKeyResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.BackupKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.BackupKeyPreparer(ctx, vaultBaseURL, keyName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupKey", nil, "Failure preparing request") - return - } - - resp, err := client.BackupKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupKey", resp, "Failure sending request") - return - } - - result, err = client.BackupKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupKey", resp, "Failure responding to request") - } - - return -} - -// BackupKeyPreparer prepares the BackupKey request. -func (client BaseClient) BackupKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/backup", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// BackupKeySender sends the BackupKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) BackupKeySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// BackupKeyResponder handles the response to the BackupKey request. The method always -// closes the http.Response Body. -func (client BaseClient) BackupKeyResponder(resp *http.Response) (result BackupKeyResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// BackupSecret requests that a backup of the specified secret be downloaded to the client. All versions of the secret -// will be downloaded. This operation requires the secrets/backup permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -func (client BaseClient) BackupSecret(ctx context.Context, vaultBaseURL string, secretName string) (result BackupSecretResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.BackupSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.BackupSecretPreparer(ctx, vaultBaseURL, secretName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupSecret", nil, "Failure preparing request") - return - } - - resp, err := client.BackupSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupSecret", resp, "Failure sending request") - return - } - - result, err = client.BackupSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupSecret", resp, "Failure responding to request") - } - - return -} - -// BackupSecretPreparer prepares the BackupSecret request. -func (client BaseClient) BackupSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/secrets/{secret-name}/backup", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// BackupSecretSender sends the BackupSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) BackupSecretSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// BackupSecretResponder handles the response to the BackupSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) BackupSecretResponder(resp *http.Response) (result BackupSecretResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// CreateCertificate if this is the first version, the certificate resource is created. This operation requires the -// certificates/create permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -// parameters - the parameters to create a certificate. -func (client BaseClient) CreateCertificate(ctx context.Context, vaultBaseURL string, certificateName string, parameters CertificateCreateParameters) (result CertificateOperation, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.CreateCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: certificateName, - Constraints: []validation.Constraint{{Target: "certificateName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z-]+$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.CertificatePolicy", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties.ValidityInMonths", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties.ValidityInMonths", Name: validation.InclusiveMinimum, Rule: 0, Chain: nil}}}, - }}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "CreateCertificate", err.Error()) - } - - req, err := client.CreateCertificatePreparer(ctx, vaultBaseURL, certificateName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "CreateCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.CreateCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "CreateCertificate", resp, "Failure sending request") - return - } - - result, err = client.CreateCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "CreateCertificate", resp, "Failure responding to request") - } - - return -} - -// CreateCertificatePreparer prepares the CreateCertificate request. -func (client BaseClient) CreateCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string, parameters CertificateCreateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/create", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateCertificateSender sends the CreateCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) CreateCertificateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// CreateCertificateResponder handles the response to the CreateCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) CreateCertificateResponder(resp *http.Response) (result CertificateOperation, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// CreateKey the create key operation can be used to create any key type in Azure Key Vault. If the named key already -// exists, Azure Key Vault creates a new version of the key. It requires the keys/create permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name for the new key. The system will generate the version name for the new key. -// parameters - the parameters to create a key. -func (client BaseClient) CreateKey(ctx context.Context, vaultBaseURL string, keyName string, parameters KeyCreateParameters) (result KeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.CreateKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: keyName, - Constraints: []validation.Constraint{{Target: "keyName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z-]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "CreateKey", err.Error()) - } - - req, err := client.CreateKeyPreparer(ctx, vaultBaseURL, keyName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "CreateKey", nil, "Failure preparing request") - return - } - - resp, err := client.CreateKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "CreateKey", resp, "Failure sending request") - return - } - - result, err = client.CreateKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "CreateKey", resp, "Failure responding to request") - } - - return -} - -// CreateKeyPreparer prepares the CreateKey request. -func (client BaseClient) CreateKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string, parameters KeyCreateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/create", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateKeySender sends the CreateKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) CreateKeySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// CreateKeyResponder handles the response to the CreateKey request. The method always -// closes the http.Response Body. -func (client BaseClient) CreateKeyResponder(resp *http.Response) (result KeyBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Decrypt the DECRYPT operation decrypts a well-formed block of ciphertext using the target encryption key and -// specified algorithm. This operation is the reverse of the ENCRYPT operation; only a single block of data may be -// decrypted, the size of this block is dependent on the target key and the algorithm to be used. The DECRYPT operation -// applies to asymmetric and symmetric keys stored in Azure Key Vault since it uses the private portion of the key. -// This operation requires the keys/decrypt permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -// keyVersion - the version of the key. -// parameters - the parameters for the decryption operation. -func (client BaseClient) Decrypt(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (result KeyOperationResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.Decrypt") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "Decrypt", err.Error()) - } - - req, err := client.DecryptPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Decrypt", nil, "Failure preparing request") - return - } - - resp, err := client.DecryptSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Decrypt", resp, "Failure sending request") - return - } - - result, err = client.DecryptResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Decrypt", resp, "Failure responding to request") - } - - return -} - -// DecryptPreparer prepares the Decrypt request. -func (client BaseClient) DecryptPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}/decrypt", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DecryptSender sends the Decrypt request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DecryptSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// DecryptResponder handles the response to the Decrypt request. The method always -// closes the http.Response Body. -func (client BaseClient) DecryptResponder(resp *http.Response) (result KeyOperationResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteCertificate deletes all versions of a certificate object along with its associated policy. Delete certificate -// cannot be used to remove individual versions of a certificate object. This operation requires the -// certificates/delete permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -func (client BaseClient) DeleteCertificate(ctx context.Context, vaultBaseURL string, certificateName string) (result DeletedCertificateBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeleteCertificatePreparer(ctx, vaultBaseURL, certificateName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificate", resp, "Failure sending request") - return - } - - result, err = client.DeleteCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificate", resp, "Failure responding to request") - } - - return -} - -// DeleteCertificatePreparer prepares the DeleteCertificate request. -func (client BaseClient) DeleteCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteCertificateSender sends the DeleteCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteCertificateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// DeleteCertificateResponder handles the response to the DeleteCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteCertificateResponder(resp *http.Response) (result DeletedCertificateBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteCertificateContacts deletes the certificate contacts for a specified key vault certificate. This operation -// requires the certificates/managecontacts permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -func (client BaseClient) DeleteCertificateContacts(ctx context.Context, vaultBaseURL string) (result Contacts, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteCertificateContacts") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeleteCertificateContactsPreparer(ctx, vaultBaseURL) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateContacts", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteCertificateContactsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateContacts", resp, "Failure sending request") - return - } - - result, err = client.DeleteCertificateContactsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateContacts", resp, "Failure responding to request") - } - - return -} - -// DeleteCertificateContactsPreparer prepares the DeleteCertificateContacts request. -func (client BaseClient) DeleteCertificateContactsPreparer(ctx context.Context, vaultBaseURL string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/certificates/contacts"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteCertificateContactsSender sends the DeleteCertificateContacts request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteCertificateContactsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// DeleteCertificateContactsResponder handles the response to the DeleteCertificateContacts request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteCertificateContactsResponder(resp *http.Response) (result Contacts, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteCertificateIssuer the DeleteCertificateIssuer operation permanently removes the specified certificate issuer -// from the vault. This operation requires the certificates/manageissuers/deleteissuers permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// issuerName - the name of the issuer. -func (client BaseClient) DeleteCertificateIssuer(ctx context.Context, vaultBaseURL string, issuerName string) (result IssuerBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteCertificateIssuer") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeleteCertificateIssuerPreparer(ctx, vaultBaseURL, issuerName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateIssuer", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteCertificateIssuerSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateIssuer", resp, "Failure sending request") - return - } - - result, err = client.DeleteCertificateIssuerResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateIssuer", resp, "Failure responding to request") - } - - return -} - -// DeleteCertificateIssuerPreparer prepares the DeleteCertificateIssuer request. -func (client BaseClient) DeleteCertificateIssuerPreparer(ctx context.Context, vaultBaseURL string, issuerName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "issuer-name": autorest.Encode("path", issuerName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/issuers/{issuer-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteCertificateIssuerSender sends the DeleteCertificateIssuer request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteCertificateIssuerSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// DeleteCertificateIssuerResponder handles the response to the DeleteCertificateIssuer request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteCertificateIssuerResponder(resp *http.Response) (result IssuerBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteCertificateOperation deletes the creation operation for a specified certificate that is in the process of -// being created. The certificate is no longer created. This operation requires the certificates/update permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -func (client BaseClient) DeleteCertificateOperation(ctx context.Context, vaultBaseURL string, certificateName string) (result CertificateOperation, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteCertificateOperation") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeleteCertificateOperationPreparer(ctx, vaultBaseURL, certificateName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateOperation", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteCertificateOperationSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateOperation", resp, "Failure sending request") - return - } - - result, err = client.DeleteCertificateOperationResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateOperation", resp, "Failure responding to request") - } - - return -} - -// DeleteCertificateOperationPreparer prepares the DeleteCertificateOperation request. -func (client BaseClient) DeleteCertificateOperationPreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/pending", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteCertificateOperationSender sends the DeleteCertificateOperation request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteCertificateOperationSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// DeleteCertificateOperationResponder handles the response to the DeleteCertificateOperation request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteCertificateOperationResponder(resp *http.Response) (result CertificateOperation, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteKey the delete key operation cannot be used to remove individual versions of a key. This operation removes the -// cryptographic material associated with the key, which means the key is not usable for Sign/Verify, Wrap/Unwrap or -// Encrypt/Decrypt operations. This operation requires the keys/delete permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key to delete. -func (client BaseClient) DeleteKey(ctx context.Context, vaultBaseURL string, keyName string) (result DeletedKeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeleteKeyPreparer(ctx, vaultBaseURL, keyName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteKey", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteKey", resp, "Failure sending request") - return - } - - result, err = client.DeleteKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteKey", resp, "Failure responding to request") - } - - return -} - -// DeleteKeyPreparer prepares the DeleteKey request. -func (client BaseClient) DeleteKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteKeySender sends the DeleteKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteKeySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// DeleteKeyResponder handles the response to the DeleteKey request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteKeyResponder(resp *http.Response) (result DeletedKeyBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteSasDefinition deletes a SAS definition from a specified storage account. This operation requires the -// storage/deletesas permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// sasDefinitionName - the name of the SAS definition. -func (client BaseClient) DeleteSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (result SasDefinitionBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteSasDefinition") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: sasDefinitionName, - Constraints: []validation.Constraint{{Target: "sasDefinitionName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "DeleteSasDefinition", err.Error()) - } - - req, err := client.DeleteSasDefinitionPreparer(ctx, vaultBaseURL, storageAccountName, sasDefinitionName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteSasDefinition", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSasDefinitionSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteSasDefinition", resp, "Failure sending request") - return - } - - result, err = client.DeleteSasDefinitionResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteSasDefinition", resp, "Failure responding to request") - } - - return -} - -// DeleteSasDefinitionPreparer prepares the DeleteSasDefinition request. -func (client BaseClient) DeleteSasDefinitionPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "sas-definition-name": autorest.Encode("path", sasDefinitionName), - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}/sas/{sas-definition-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSasDefinitionSender sends the DeleteSasDefinition request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteSasDefinitionSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// DeleteSasDefinitionResponder handles the response to the DeleteSasDefinition request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteSasDefinitionResponder(resp *http.Response) (result SasDefinitionBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteSecret the DELETE operation applies to any secret stored in Azure Key Vault. DELETE cannot be applied to an -// individual version of a secret. This operation requires the secrets/delete permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -func (client BaseClient) DeleteSecret(ctx context.Context, vaultBaseURL string, secretName string) (result DeletedSecretBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeleteSecretPreparer(ctx, vaultBaseURL, secretName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteSecret", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteSecret", resp, "Failure sending request") - return - } - - result, err = client.DeleteSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteSecret", resp, "Failure responding to request") - } - - return -} - -// DeleteSecretPreparer prepares the DeleteSecret request. -func (client BaseClient) DeleteSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/secrets/{secret-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSecretSender sends the DeleteSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteSecretSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// DeleteSecretResponder handles the response to the DeleteSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteSecretResponder(resp *http.Response) (result DeletedSecretBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteStorageAccount deletes a storage account. This operation requires the storage/delete permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -func (client BaseClient) DeleteStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result StorageBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteStorageAccount") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "DeleteStorageAccount", err.Error()) - } - - req, err := client.DeleteStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteStorageAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.DeleteStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteStorageAccount", resp, "Failure responding to request") - } - - return -} - -// DeleteStorageAccountPreparer prepares the DeleteStorageAccount request. -func (client BaseClient) DeleteStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteStorageAccountSender sends the DeleteStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteStorageAccountSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// DeleteStorageAccountResponder handles the response to the DeleteStorageAccount request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteStorageAccountResponder(resp *http.Response) (result StorageBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Encrypt the ENCRYPT operation encrypts an arbitrary sequence of bytes using an encryption key that is stored in -// Azure Key Vault. Note that the ENCRYPT operation only supports a single block of data, the size of which is -// dependent on the target key and the encryption algorithm to be used. The ENCRYPT operation is only strictly -// necessary for symmetric keys stored in Azure Key Vault since protection with an asymmetric key can be performed -// using public portion of the key. This operation is supported for asymmetric keys as a convenience for callers that -// have a key-reference but do not have access to the public key material. This operation requires the keys/encrypt -// permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -// keyVersion - the version of the key. -// parameters - the parameters for the encryption operation. -func (client BaseClient) Encrypt(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (result KeyOperationResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.Encrypt") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "Encrypt", err.Error()) - } - - req, err := client.EncryptPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Encrypt", nil, "Failure preparing request") - return - } - - resp, err := client.EncryptSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Encrypt", resp, "Failure sending request") - return - } - - result, err = client.EncryptResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Encrypt", resp, "Failure responding to request") - } - - return -} - -// EncryptPreparer prepares the Encrypt request. -func (client BaseClient) EncryptPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}/encrypt", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// EncryptSender sends the Encrypt request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) EncryptSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// EncryptResponder handles the response to the Encrypt request. The method always -// closes the http.Response Body. -func (client BaseClient) EncryptResponder(resp *http.Response) (result KeyOperationResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetCertificate gets information about a specific certificate. This operation requires the certificates/get -// permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate in the given vault. -// certificateVersion - the version of the certificate. -func (client BaseClient) GetCertificate(ctx context.Context, vaultBaseURL string, certificateName string, certificateVersion string) (result CertificateBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetCertificatePreparer(ctx, vaultBaseURL, certificateName, certificateVersion) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificate", resp, "Failure sending request") - return - } - - result, err = client.GetCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificate", resp, "Failure responding to request") - } - - return -} - -// GetCertificatePreparer prepares the GetCertificate request. -func (client BaseClient) GetCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string, certificateVersion string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - "certificate-version": autorest.Encode("path", certificateVersion), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/{certificate-version}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificateSender sends the GetCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// GetCertificateResponder handles the response to the GetCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificateResponder(resp *http.Response) (result CertificateBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetCertificateContacts the GetCertificateContacts operation returns the set of certificate contact resources in the -// specified key vault. This operation requires the certificates/managecontacts permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -func (client BaseClient) GetCertificateContacts(ctx context.Context, vaultBaseURL string) (result Contacts, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateContacts") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetCertificateContactsPreparer(ctx, vaultBaseURL) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateContacts", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificateContactsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateContacts", resp, "Failure sending request") - return - } - - result, err = client.GetCertificateContactsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateContacts", resp, "Failure responding to request") - } - - return -} - -// GetCertificateContactsPreparer prepares the GetCertificateContacts request. -func (client BaseClient) GetCertificateContactsPreparer(ctx context.Context, vaultBaseURL string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/certificates/contacts"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificateContactsSender sends the GetCertificateContacts request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificateContactsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// GetCertificateContactsResponder handles the response to the GetCertificateContacts request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificateContactsResponder(resp *http.Response) (result Contacts, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetCertificateIssuer the GetCertificateIssuer operation returns the specified certificate issuer resources in the -// specified key vault. This operation requires the certificates/manageissuers/getissuers permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// issuerName - the name of the issuer. -func (client BaseClient) GetCertificateIssuer(ctx context.Context, vaultBaseURL string, issuerName string) (result IssuerBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateIssuer") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetCertificateIssuerPreparer(ctx, vaultBaseURL, issuerName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateIssuer", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificateIssuerSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateIssuer", resp, "Failure sending request") - return - } - - result, err = client.GetCertificateIssuerResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateIssuer", resp, "Failure responding to request") - } - - return -} - -// GetCertificateIssuerPreparer prepares the GetCertificateIssuer request. -func (client BaseClient) GetCertificateIssuerPreparer(ctx context.Context, vaultBaseURL string, issuerName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "issuer-name": autorest.Encode("path", issuerName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/issuers/{issuer-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificateIssuerSender sends the GetCertificateIssuer request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificateIssuerSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// GetCertificateIssuerResponder handles the response to the GetCertificateIssuer request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificateIssuerResponder(resp *http.Response) (result IssuerBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetCertificateIssuers the GetCertificateIssuers operation returns the set of certificate issuer resources in the -// specified key vault. This operation requires the certificates/manageissuers/getissuers permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetCertificateIssuers(ctx context.Context, vaultBaseURL string, maxresults *int32) (result CertificateIssuerListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateIssuers") - defer func() { - sc := -1 - if result.cilr.Response.Response != nil { - sc = result.cilr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetCertificateIssuers", err.Error()) - } - - result.fn = client.getCertificateIssuersNextResults - req, err := client.GetCertificateIssuersPreparer(ctx, vaultBaseURL, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateIssuers", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificateIssuersSender(req) - if err != nil { - result.cilr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateIssuers", resp, "Failure sending request") - return - } - - result.cilr, err = client.GetCertificateIssuersResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateIssuers", resp, "Failure responding to request") - } - - return -} - -// GetCertificateIssuersPreparer prepares the GetCertificateIssuers request. -func (client BaseClient) GetCertificateIssuersPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/certificates/issuers"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificateIssuersSender sends the GetCertificateIssuers request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificateIssuersSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// GetCertificateIssuersResponder handles the response to the GetCertificateIssuers request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificateIssuersResponder(resp *http.Response) (result CertificateIssuerListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getCertificateIssuersNextResults retrieves the next set of results, if any. -func (client BaseClient) getCertificateIssuersNextResults(ctx context.Context, lastResults CertificateIssuerListResult) (result CertificateIssuerListResult, err error) { - req, err := lastResults.certificateIssuerListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificateIssuersNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetCertificateIssuersSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificateIssuersNextResults", resp, "Failure sending next results request") - } - result, err = client.GetCertificateIssuersResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificateIssuersNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetCertificateIssuersComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetCertificateIssuersComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result CertificateIssuerListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateIssuers") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetCertificateIssuers(ctx, vaultBaseURL, maxresults) - return -} - -// GetCertificateOperation gets the creation operation associated with a specified certificate. This operation requires -// the certificates/get permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -func (client BaseClient) GetCertificateOperation(ctx context.Context, vaultBaseURL string, certificateName string) (result CertificateOperation, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateOperation") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetCertificateOperationPreparer(ctx, vaultBaseURL, certificateName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateOperation", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificateOperationSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateOperation", resp, "Failure sending request") - return - } - - result, err = client.GetCertificateOperationResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateOperation", resp, "Failure responding to request") - } - - return -} - -// GetCertificateOperationPreparer prepares the GetCertificateOperation request. -func (client BaseClient) GetCertificateOperationPreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/pending", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificateOperationSender sends the GetCertificateOperation request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificateOperationSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// GetCertificateOperationResponder handles the response to the GetCertificateOperation request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificateOperationResponder(resp *http.Response) (result CertificateOperation, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetCertificatePolicy the GetCertificatePolicy operation returns the specified certificate policy resources in the -// specified key vault. This operation requires the certificates/get permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate in a given key vault. -func (client BaseClient) GetCertificatePolicy(ctx context.Context, vaultBaseURL string, certificateName string) (result CertificatePolicy, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificatePolicy") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetCertificatePolicyPreparer(ctx, vaultBaseURL, certificateName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificatePolicy", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificatePolicySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificatePolicy", resp, "Failure sending request") - return - } - - result, err = client.GetCertificatePolicyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificatePolicy", resp, "Failure responding to request") - } - - return -} - -// GetCertificatePolicyPreparer prepares the GetCertificatePolicy request. -func (client BaseClient) GetCertificatePolicyPreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/policy", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificatePolicySender sends the GetCertificatePolicy request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificatePolicySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// GetCertificatePolicyResponder handles the response to the GetCertificatePolicy request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificatePolicyResponder(resp *http.Response) (result CertificatePolicy, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetCertificates the GetCertificates operation returns the set of certificates resources in the specified key vault. -// This operation requires the certificates/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetCertificates(ctx context.Context, vaultBaseURL string, maxresults *int32) (result CertificateListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificates") - defer func() { - sc := -1 - if result.clr.Response.Response != nil { - sc = result.clr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetCertificates", err.Error()) - } - - result.fn = client.getCertificatesNextResults - req, err := client.GetCertificatesPreparer(ctx, vaultBaseURL, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificates", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificatesSender(req) - if err != nil { - result.clr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificates", resp, "Failure sending request") - return - } - - result.clr, err = client.GetCertificatesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificates", resp, "Failure responding to request") - } - - return -} - -// GetCertificatesPreparer prepares the GetCertificates request. -func (client BaseClient) GetCertificatesPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/certificates"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificatesSender sends the GetCertificates request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificatesSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// GetCertificatesResponder handles the response to the GetCertificates request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificatesResponder(resp *http.Response) (result CertificateListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getCertificatesNextResults retrieves the next set of results, if any. -func (client BaseClient) getCertificatesNextResults(ctx context.Context, lastResults CertificateListResult) (result CertificateListResult, err error) { - req, err := lastResults.certificateListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificatesNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetCertificatesSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificatesNextResults", resp, "Failure sending next results request") - } - result, err = client.GetCertificatesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificatesNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetCertificatesComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetCertificatesComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result CertificateListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificates") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetCertificates(ctx, vaultBaseURL, maxresults) - return -} - -// GetCertificateVersions the GetCertificateVersions operation returns the versions of a certificate in the specified -// key vault. This operation requires the certificates/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetCertificateVersions(ctx context.Context, vaultBaseURL string, certificateName string, maxresults *int32) (result CertificateListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateVersions") - defer func() { - sc := -1 - if result.clr.Response.Response != nil { - sc = result.clr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetCertificateVersions", err.Error()) - } - - result.fn = client.getCertificateVersionsNextResults - req, err := client.GetCertificateVersionsPreparer(ctx, vaultBaseURL, certificateName, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateVersions", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificateVersionsSender(req) - if err != nil { - result.clr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateVersions", resp, "Failure sending request") - return - } - - result.clr, err = client.GetCertificateVersionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateVersions", resp, "Failure responding to request") - } - - return -} - -// GetCertificateVersionsPreparer prepares the GetCertificateVersions request. -func (client BaseClient) GetCertificateVersionsPreparer(ctx context.Context, vaultBaseURL string, certificateName string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/versions", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificateVersionsSender sends the GetCertificateVersions request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificateVersionsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// GetCertificateVersionsResponder handles the response to the GetCertificateVersions request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificateVersionsResponder(resp *http.Response) (result CertificateListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getCertificateVersionsNextResults retrieves the next set of results, if any. -func (client BaseClient) getCertificateVersionsNextResults(ctx context.Context, lastResults CertificateListResult) (result CertificateListResult, err error) { - req, err := lastResults.certificateListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificateVersionsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetCertificateVersionsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificateVersionsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetCertificateVersionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificateVersionsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetCertificateVersionsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetCertificateVersionsComplete(ctx context.Context, vaultBaseURL string, certificateName string, maxresults *int32) (result CertificateListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateVersions") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetCertificateVersions(ctx, vaultBaseURL, certificateName, maxresults) - return -} - -// GetDeletedCertificate the GetDeletedCertificate operation retrieves the deleted certificate information plus its -// attributes, such as retention interval, scheduled permanent deletion and the current deletion recovery level. This -// operation requires the certificates/get permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate -func (client BaseClient) GetDeletedCertificate(ctx context.Context, vaultBaseURL string, certificateName string) (result DeletedCertificateBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetDeletedCertificatePreparer(ctx, vaultBaseURL, certificateName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedCertificate", resp, "Failure sending request") - return - } - - result, err = client.GetDeletedCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedCertificate", resp, "Failure responding to request") - } - - return -} - -// GetDeletedCertificatePreparer prepares the GetDeletedCertificate request. -func (client BaseClient) GetDeletedCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedcertificates/{certificate-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedCertificateSender sends the GetDeletedCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedCertificateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// GetDeletedCertificateResponder handles the response to the GetDeletedCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedCertificateResponder(resp *http.Response) (result DeletedCertificateBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetDeletedCertificates the GetDeletedCertificates operation retrieves the certificates in the current vault which -// are in a deleted state and ready for recovery or purging. This operation includes deletion-specific information. -// This operation requires the certificates/get/list permission. This operation can only be enabled on soft-delete -// enabled vaults. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetDeletedCertificates(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedCertificateListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedCertificates") - defer func() { - sc := -1 - if result.dclr.Response.Response != nil { - sc = result.dclr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetDeletedCertificates", err.Error()) - } - - result.fn = client.getDeletedCertificatesNextResults - req, err := client.GetDeletedCertificatesPreparer(ctx, vaultBaseURL, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedCertificates", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedCertificatesSender(req) - if err != nil { - result.dclr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedCertificates", resp, "Failure sending request") - return - } - - result.dclr, err = client.GetDeletedCertificatesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedCertificates", resp, "Failure responding to request") - } - - return -} - -// GetDeletedCertificatesPreparer prepares the GetDeletedCertificates request. -func (client BaseClient) GetDeletedCertificatesPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/deletedcertificates"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedCertificatesSender sends the GetDeletedCertificates request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedCertificatesSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// GetDeletedCertificatesResponder handles the response to the GetDeletedCertificates request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedCertificatesResponder(resp *http.Response) (result DeletedCertificateListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getDeletedCertificatesNextResults retrieves the next set of results, if any. -func (client BaseClient) getDeletedCertificatesNextResults(ctx context.Context, lastResults DeletedCertificateListResult) (result DeletedCertificateListResult, err error) { - req, err := lastResults.deletedCertificateListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedCertificatesNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetDeletedCertificatesSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedCertificatesNextResults", resp, "Failure sending next results request") - } - result, err = client.GetDeletedCertificatesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedCertificatesNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetDeletedCertificatesComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetDeletedCertificatesComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedCertificateListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedCertificates") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetDeletedCertificates(ctx, vaultBaseURL, maxresults) - return -} - -// GetDeletedKey the Get Deleted Key operation is applicable for soft-delete enabled vaults. While the operation can be -// invoked on any vault, it will return an error if invoked on a non soft-delete enabled vault. This operation requires -// the keys/get permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -func (client BaseClient) GetDeletedKey(ctx context.Context, vaultBaseURL string, keyName string) (result DeletedKeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetDeletedKeyPreparer(ctx, vaultBaseURL, keyName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedKey", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedKey", resp, "Failure sending request") - return - } - - result, err = client.GetDeletedKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedKey", resp, "Failure responding to request") - } - - return -} - -// GetDeletedKeyPreparer prepares the GetDeletedKey request. -func (client BaseClient) GetDeletedKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedkeys/{key-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedKeySender sends the GetDeletedKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedKeySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// GetDeletedKeyResponder handles the response to the GetDeletedKey request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedKeyResponder(resp *http.Response) (result DeletedKeyBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetDeletedKeys retrieves a list of the keys in the Key Vault as JSON Web Key structures that contain the public part -// of a deleted key. This operation includes deletion-specific information. The Get Deleted Keys operation is -// applicable for vaults enabled for soft-delete. While the operation can be invoked on any vault, it will return an -// error if invoked on a non soft-delete enabled vault. This operation requires the keys/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetDeletedKeys(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedKeyListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedKeys") - defer func() { - sc := -1 - if result.dklr.Response.Response != nil { - sc = result.dklr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetDeletedKeys", err.Error()) - } - - result.fn = client.getDeletedKeysNextResults - req, err := client.GetDeletedKeysPreparer(ctx, vaultBaseURL, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedKeys", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedKeysSender(req) - if err != nil { - result.dklr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedKeys", resp, "Failure sending request") - return - } - - result.dklr, err = client.GetDeletedKeysResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedKeys", resp, "Failure responding to request") - } - - return -} - -// GetDeletedKeysPreparer prepares the GetDeletedKeys request. -func (client BaseClient) GetDeletedKeysPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/deletedkeys"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedKeysSender sends the GetDeletedKeys request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedKeysSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// GetDeletedKeysResponder handles the response to the GetDeletedKeys request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedKeysResponder(resp *http.Response) (result DeletedKeyListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getDeletedKeysNextResults retrieves the next set of results, if any. -func (client BaseClient) getDeletedKeysNextResults(ctx context.Context, lastResults DeletedKeyListResult) (result DeletedKeyListResult, err error) { - req, err := lastResults.deletedKeyListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedKeysNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetDeletedKeysSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedKeysNextResults", resp, "Failure sending next results request") - } - result, err = client.GetDeletedKeysResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedKeysNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetDeletedKeysComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetDeletedKeysComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedKeyListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedKeys") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetDeletedKeys(ctx, vaultBaseURL, maxresults) - return -} - -// GetDeletedSecret the Get Deleted Secret operation returns the specified deleted secret along with its attributes. -// This operation requires the secrets/get permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -func (client BaseClient) GetDeletedSecret(ctx context.Context, vaultBaseURL string, secretName string) (result DeletedSecretBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetDeletedSecretPreparer(ctx, vaultBaseURL, secretName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSecret", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSecret", resp, "Failure sending request") - return - } - - result, err = client.GetDeletedSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSecret", resp, "Failure responding to request") - } - - return -} - -// GetDeletedSecretPreparer prepares the GetDeletedSecret request. -func (client BaseClient) GetDeletedSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedsecrets/{secret-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedSecretSender sends the GetDeletedSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedSecretSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// GetDeletedSecretResponder handles the response to the GetDeletedSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedSecretResponder(resp *http.Response) (result DeletedSecretBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetDeletedSecrets the Get Deleted Secrets operation returns the secrets that have been deleted for a vault enabled -// for soft-delete. This operation requires the secrets/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetDeletedSecrets(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedSecretListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedSecrets") - defer func() { - sc := -1 - if result.dslr.Response.Response != nil { - sc = result.dslr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetDeletedSecrets", err.Error()) - } - - result.fn = client.getDeletedSecretsNextResults - req, err := client.GetDeletedSecretsPreparer(ctx, vaultBaseURL, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSecrets", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedSecretsSender(req) - if err != nil { - result.dslr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSecrets", resp, "Failure sending request") - return - } - - result.dslr, err = client.GetDeletedSecretsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSecrets", resp, "Failure responding to request") - } - - return -} - -// GetDeletedSecretsPreparer prepares the GetDeletedSecrets request. -func (client BaseClient) GetDeletedSecretsPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/deletedsecrets"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedSecretsSender sends the GetDeletedSecrets request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedSecretsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// GetDeletedSecretsResponder handles the response to the GetDeletedSecrets request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedSecretsResponder(resp *http.Response) (result DeletedSecretListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getDeletedSecretsNextResults retrieves the next set of results, if any. -func (client BaseClient) getDeletedSecretsNextResults(ctx context.Context, lastResults DeletedSecretListResult) (result DeletedSecretListResult, err error) { - req, err := lastResults.deletedSecretListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedSecretsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetDeletedSecretsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedSecretsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetDeletedSecretsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedSecretsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetDeletedSecretsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetDeletedSecretsComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedSecretListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedSecrets") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetDeletedSecrets(ctx, vaultBaseURL, maxresults) - return -} - -// GetKey the get key operation is applicable to all key types. If the requested key is symmetric, then no key material -// is released in the response. This operation requires the keys/get permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key to get. -// keyVersion - adding the version parameter retrieves a specific version of a key. -func (client BaseClient) GetKey(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string) (result KeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetKeyPreparer(ctx, vaultBaseURL, keyName, keyVersion) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKey", nil, "Failure preparing request") - return - } - - resp, err := client.GetKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKey", resp, "Failure sending request") - return - } - - result, err = client.GetKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKey", resp, "Failure responding to request") - } - - return -} - -// GetKeyPreparer prepares the GetKey request. -func (client BaseClient) GetKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetKeySender sends the GetKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetKeySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// GetKeyResponder handles the response to the GetKey request. The method always -// closes the http.Response Body. -func (client BaseClient) GetKeyResponder(resp *http.Response) (result KeyBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetKeys retrieves a list of the keys in the Key Vault as JSON Web Key structures that contain the public part of a -// stored key. The LIST operation is applicable to all key types, however only the base key identifier, attributes, and -// tags are provided in the response. Individual versions of a key are not listed in the response. This operation -// requires the keys/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetKeys(ctx context.Context, vaultBaseURL string, maxresults *int32) (result KeyListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetKeys") - defer func() { - sc := -1 - if result.klr.Response.Response != nil { - sc = result.klr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetKeys", err.Error()) - } - - result.fn = client.getKeysNextResults - req, err := client.GetKeysPreparer(ctx, vaultBaseURL, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKeys", nil, "Failure preparing request") - return - } - - resp, err := client.GetKeysSender(req) - if err != nil { - result.klr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKeys", resp, "Failure sending request") - return - } - - result.klr, err = client.GetKeysResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKeys", resp, "Failure responding to request") - } - - return -} - -// GetKeysPreparer prepares the GetKeys request. -func (client BaseClient) GetKeysPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/keys"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetKeysSender sends the GetKeys request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetKeysSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// GetKeysResponder handles the response to the GetKeys request. The method always -// closes the http.Response Body. -func (client BaseClient) GetKeysResponder(resp *http.Response) (result KeyListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getKeysNextResults retrieves the next set of results, if any. -func (client BaseClient) getKeysNextResults(ctx context.Context, lastResults KeyListResult) (result KeyListResult, err error) { - req, err := lastResults.keyListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getKeysNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetKeysSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getKeysNextResults", resp, "Failure sending next results request") - } - result, err = client.GetKeysResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getKeysNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetKeysComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetKeysComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result KeyListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetKeys") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetKeys(ctx, vaultBaseURL, maxresults) - return -} - -// GetKeyVersions the full key identifier, attributes, and tags are provided in the response. This operation requires -// the keys/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetKeyVersions(ctx context.Context, vaultBaseURL string, keyName string, maxresults *int32) (result KeyListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetKeyVersions") - defer func() { - sc := -1 - if result.klr.Response.Response != nil { - sc = result.klr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetKeyVersions", err.Error()) - } - - result.fn = client.getKeyVersionsNextResults - req, err := client.GetKeyVersionsPreparer(ctx, vaultBaseURL, keyName, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKeyVersions", nil, "Failure preparing request") - return - } - - resp, err := client.GetKeyVersionsSender(req) - if err != nil { - result.klr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKeyVersions", resp, "Failure sending request") - return - } - - result.klr, err = client.GetKeyVersionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKeyVersions", resp, "Failure responding to request") - } - - return -} - -// GetKeyVersionsPreparer prepares the GetKeyVersions request. -func (client BaseClient) GetKeyVersionsPreparer(ctx context.Context, vaultBaseURL string, keyName string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/versions", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetKeyVersionsSender sends the GetKeyVersions request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetKeyVersionsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// GetKeyVersionsResponder handles the response to the GetKeyVersions request. The method always -// closes the http.Response Body. -func (client BaseClient) GetKeyVersionsResponder(resp *http.Response) (result KeyListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getKeyVersionsNextResults retrieves the next set of results, if any. -func (client BaseClient) getKeyVersionsNextResults(ctx context.Context, lastResults KeyListResult) (result KeyListResult, err error) { - req, err := lastResults.keyListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getKeyVersionsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetKeyVersionsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getKeyVersionsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetKeyVersionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getKeyVersionsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetKeyVersionsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetKeyVersionsComplete(ctx context.Context, vaultBaseURL string, keyName string, maxresults *int32) (result KeyListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetKeyVersions") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetKeyVersions(ctx, vaultBaseURL, keyName, maxresults) - return -} - -// GetSasDefinition gets information about a SAS definition for the specified storage account. This operation requires -// the storage/getsas permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// sasDefinitionName - the name of the SAS definition. -func (client BaseClient) GetSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (result SasDefinitionBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSasDefinition") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: sasDefinitionName, - Constraints: []validation.Constraint{{Target: "sasDefinitionName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetSasDefinition", err.Error()) - } - - req, err := client.GetSasDefinitionPreparer(ctx, vaultBaseURL, storageAccountName, sasDefinitionName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSasDefinition", nil, "Failure preparing request") - return - } - - resp, err := client.GetSasDefinitionSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSasDefinition", resp, "Failure sending request") - return - } - - result, err = client.GetSasDefinitionResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSasDefinition", resp, "Failure responding to request") - } - - return -} - -// GetSasDefinitionPreparer prepares the GetSasDefinition request. -func (client BaseClient) GetSasDefinitionPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "sas-definition-name": autorest.Encode("path", sasDefinitionName), - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}/sas/{sas-definition-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSasDefinitionSender sends the GetSasDefinition request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetSasDefinitionSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// GetSasDefinitionResponder handles the response to the GetSasDefinition request. The method always -// closes the http.Response Body. -func (client BaseClient) GetSasDefinitionResponder(resp *http.Response) (result SasDefinitionBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetSasDefinitions list storage SAS definitions for the given storage account. This operation requires the -// storage/listsas permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetSasDefinitions(ctx context.Context, vaultBaseURL string, storageAccountName string, maxresults *int32) (result SasDefinitionListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSasDefinitions") - defer func() { - sc := -1 - if result.sdlr.Response.Response != nil { - sc = result.sdlr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetSasDefinitions", err.Error()) - } - - result.fn = client.getSasDefinitionsNextResults - req, err := client.GetSasDefinitionsPreparer(ctx, vaultBaseURL, storageAccountName, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSasDefinitions", nil, "Failure preparing request") - return - } - - resp, err := client.GetSasDefinitionsSender(req) - if err != nil { - result.sdlr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSasDefinitions", resp, "Failure sending request") - return - } - - result.sdlr, err = client.GetSasDefinitionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSasDefinitions", resp, "Failure responding to request") - } - - return -} - -// GetSasDefinitionsPreparer prepares the GetSasDefinitions request. -func (client BaseClient) GetSasDefinitionsPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}/sas", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSasDefinitionsSender sends the GetSasDefinitions request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetSasDefinitionsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// GetSasDefinitionsResponder handles the response to the GetSasDefinitions request. The method always -// closes the http.Response Body. -func (client BaseClient) GetSasDefinitionsResponder(resp *http.Response) (result SasDefinitionListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getSasDefinitionsNextResults retrieves the next set of results, if any. -func (client BaseClient) getSasDefinitionsNextResults(ctx context.Context, lastResults SasDefinitionListResult) (result SasDefinitionListResult, err error) { - req, err := lastResults.sasDefinitionListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSasDefinitionsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetSasDefinitionsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSasDefinitionsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetSasDefinitionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSasDefinitionsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetSasDefinitionsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetSasDefinitionsComplete(ctx context.Context, vaultBaseURL string, storageAccountName string, maxresults *int32) (result SasDefinitionListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSasDefinitions") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetSasDefinitions(ctx, vaultBaseURL, storageAccountName, maxresults) - return -} - -// GetSecret the GET operation is applicable to any secret stored in Azure Key Vault. This operation requires the -// secrets/get permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -// secretVersion - the version of the secret. -func (client BaseClient) GetSecret(ctx context.Context, vaultBaseURL string, secretName string, secretVersion string) (result SecretBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetSecretPreparer(ctx, vaultBaseURL, secretName, secretVersion) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecret", nil, "Failure preparing request") - return - } - - resp, err := client.GetSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecret", resp, "Failure sending request") - return - } - - result, err = client.GetSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecret", resp, "Failure responding to request") - } - - return -} - -// GetSecretPreparer prepares the GetSecret request. -func (client BaseClient) GetSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string, secretVersion string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - "secret-version": autorest.Encode("path", secretVersion), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/secrets/{secret-name}/{secret-version}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSecretSender sends the GetSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetSecretSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// GetSecretResponder handles the response to the GetSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) GetSecretResponder(resp *http.Response) (result SecretBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetSecrets the Get Secrets operation is applicable to the entire vault. However, only the base secret identifier and -// its attributes are provided in the response. Individual secret versions are not listed in the response. This -// operation requires the secrets/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified, the service will return up to -// 25 results. -func (client BaseClient) GetSecrets(ctx context.Context, vaultBaseURL string, maxresults *int32) (result SecretListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSecrets") - defer func() { - sc := -1 - if result.slr.Response.Response != nil { - sc = result.slr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetSecrets", err.Error()) - } - - result.fn = client.getSecretsNextResults - req, err := client.GetSecretsPreparer(ctx, vaultBaseURL, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecrets", nil, "Failure preparing request") - return - } - - resp, err := client.GetSecretsSender(req) - if err != nil { - result.slr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecrets", resp, "Failure sending request") - return - } - - result.slr, err = client.GetSecretsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecrets", resp, "Failure responding to request") - } - - return -} - -// GetSecretsPreparer prepares the GetSecrets request. -func (client BaseClient) GetSecretsPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/secrets"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSecretsSender sends the GetSecrets request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetSecretsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// GetSecretsResponder handles the response to the GetSecrets request. The method always -// closes the http.Response Body. -func (client BaseClient) GetSecretsResponder(resp *http.Response) (result SecretListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getSecretsNextResults retrieves the next set of results, if any. -func (client BaseClient) getSecretsNextResults(ctx context.Context, lastResults SecretListResult) (result SecretListResult, err error) { - req, err := lastResults.secretListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSecretsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetSecretsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSecretsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetSecretsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSecretsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetSecretsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetSecretsComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result SecretListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSecrets") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetSecrets(ctx, vaultBaseURL, maxresults) - return -} - -// GetSecretVersions the full secret identifier and attributes are provided in the response. No values are returned for -// the secrets. This operations requires the secrets/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -// maxresults - maximum number of results to return in a page. If not specified, the service will return up to -// 25 results. -func (client BaseClient) GetSecretVersions(ctx context.Context, vaultBaseURL string, secretName string, maxresults *int32) (result SecretListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSecretVersions") - defer func() { - sc := -1 - if result.slr.Response.Response != nil { - sc = result.slr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetSecretVersions", err.Error()) - } - - result.fn = client.getSecretVersionsNextResults - req, err := client.GetSecretVersionsPreparer(ctx, vaultBaseURL, secretName, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecretVersions", nil, "Failure preparing request") - return - } - - resp, err := client.GetSecretVersionsSender(req) - if err != nil { - result.slr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecretVersions", resp, "Failure sending request") - return - } - - result.slr, err = client.GetSecretVersionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecretVersions", resp, "Failure responding to request") - } - - return -} - -// GetSecretVersionsPreparer prepares the GetSecretVersions request. -func (client BaseClient) GetSecretVersionsPreparer(ctx context.Context, vaultBaseURL string, secretName string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/secrets/{secret-name}/versions", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSecretVersionsSender sends the GetSecretVersions request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetSecretVersionsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// GetSecretVersionsResponder handles the response to the GetSecretVersions request. The method always -// closes the http.Response Body. -func (client BaseClient) GetSecretVersionsResponder(resp *http.Response) (result SecretListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getSecretVersionsNextResults retrieves the next set of results, if any. -func (client BaseClient) getSecretVersionsNextResults(ctx context.Context, lastResults SecretListResult) (result SecretListResult, err error) { - req, err := lastResults.secretListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSecretVersionsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetSecretVersionsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSecretVersionsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetSecretVersionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSecretVersionsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetSecretVersionsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetSecretVersionsComplete(ctx context.Context, vaultBaseURL string, secretName string, maxresults *int32) (result SecretListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSecretVersions") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetSecretVersions(ctx, vaultBaseURL, secretName, maxresults) - return -} - -// GetStorageAccount gets information about a specified storage account. This operation requires the storage/get -// permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -func (client BaseClient) GetStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result StorageBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetStorageAccount") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetStorageAccount", err.Error()) - } - - req, err := client.GetStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.GetStorageAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.GetStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetStorageAccount", resp, "Failure responding to request") - } - - return -} - -// GetStorageAccountPreparer prepares the GetStorageAccount request. -func (client BaseClient) GetStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetStorageAccountSender sends the GetStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetStorageAccountSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// GetStorageAccountResponder handles the response to the GetStorageAccount request. The method always -// closes the http.Response Body. -func (client BaseClient) GetStorageAccountResponder(resp *http.Response) (result StorageBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetStorageAccounts list storage accounts managed by the specified key vault. This operation requires the -// storage/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetStorageAccounts(ctx context.Context, vaultBaseURL string, maxresults *int32) (result StorageListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetStorageAccounts") - defer func() { - sc := -1 - if result.slr.Response.Response != nil { - sc = result.slr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetStorageAccounts", err.Error()) - } - - result.fn = client.getStorageAccountsNextResults - req, err := client.GetStorageAccountsPreparer(ctx, vaultBaseURL, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetStorageAccounts", nil, "Failure preparing request") - return - } - - resp, err := client.GetStorageAccountsSender(req) - if err != nil { - result.slr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetStorageAccounts", resp, "Failure sending request") - return - } - - result.slr, err = client.GetStorageAccountsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetStorageAccounts", resp, "Failure responding to request") - } - - return -} - -// GetStorageAccountsPreparer prepares the GetStorageAccounts request. -func (client BaseClient) GetStorageAccountsPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/storage"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetStorageAccountsSender sends the GetStorageAccounts request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetStorageAccountsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// GetStorageAccountsResponder handles the response to the GetStorageAccounts request. The method always -// closes the http.Response Body. -func (client BaseClient) GetStorageAccountsResponder(resp *http.Response) (result StorageListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getStorageAccountsNextResults retrieves the next set of results, if any. -func (client BaseClient) getStorageAccountsNextResults(ctx context.Context, lastResults StorageListResult) (result StorageListResult, err error) { - req, err := lastResults.storageListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getStorageAccountsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetStorageAccountsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getStorageAccountsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetStorageAccountsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getStorageAccountsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetStorageAccountsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetStorageAccountsComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result StorageListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetStorageAccounts") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetStorageAccounts(ctx, vaultBaseURL, maxresults) - return -} - -// ImportCertificate imports an existing valid certificate, containing a private key, into Azure Key Vault. The -// certificate to be imported can be in either PFX or PEM format. If the certificate is in PEM format the PEM file must -// contain the key as well as x509 certificates. This operation requires the certificates/import permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -// parameters - the parameters to import the certificate. -func (client BaseClient) ImportCertificate(ctx context.Context, vaultBaseURL string, certificateName string, parameters CertificateImportParameters) (result CertificateBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.ImportCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: certificateName, - Constraints: []validation.Constraint{{Target: "certificateName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z-]+$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Base64EncodedCertificate", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.CertificatePolicy", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties.ValidityInMonths", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties.ValidityInMonths", Name: validation.InclusiveMinimum, Rule: 0, Chain: nil}}}, - }}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "ImportCertificate", err.Error()) - } - - req, err := client.ImportCertificatePreparer(ctx, vaultBaseURL, certificateName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "ImportCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.ImportCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "ImportCertificate", resp, "Failure sending request") - return - } - - result, err = client.ImportCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "ImportCertificate", resp, "Failure responding to request") - } - - return -} - -// ImportCertificatePreparer prepares the ImportCertificate request. -func (client BaseClient) ImportCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string, parameters CertificateImportParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/import", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ImportCertificateSender sends the ImportCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) ImportCertificateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// ImportCertificateResponder handles the response to the ImportCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) ImportCertificateResponder(resp *http.Response) (result CertificateBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ImportKey the import key operation may be used to import any key type into an Azure Key Vault. If the named key -// already exists, Azure Key Vault creates a new version of the key. This operation requires the keys/import -// permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - name for the imported key. -// parameters - the parameters to import a key. -func (client BaseClient) ImportKey(ctx context.Context, vaultBaseURL string, keyName string, parameters KeyImportParameters) (result KeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.ImportKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: keyName, - Constraints: []validation.Constraint{{Target: "keyName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z-]+$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Key", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "ImportKey", err.Error()) - } - - req, err := client.ImportKeyPreparer(ctx, vaultBaseURL, keyName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "ImportKey", nil, "Failure preparing request") - return - } - - resp, err := client.ImportKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "ImportKey", resp, "Failure sending request") - return - } - - result, err = client.ImportKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "ImportKey", resp, "Failure responding to request") - } - - return -} - -// ImportKeyPreparer prepares the ImportKey request. -func (client BaseClient) ImportKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string, parameters KeyImportParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ImportKeySender sends the ImportKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) ImportKeySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// ImportKeyResponder handles the response to the ImportKey request. The method always -// closes the http.Response Body. -func (client BaseClient) ImportKeyResponder(resp *http.Response) (result KeyBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// MergeCertificate the MergeCertificate operation performs the merging of a certificate or certificate chain with a -// key pair currently available in the service. This operation requires the certificates/create permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -// parameters - the parameters to merge certificate. -func (client BaseClient) MergeCertificate(ctx context.Context, vaultBaseURL string, certificateName string, parameters CertificateMergeParameters) (result CertificateBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.MergeCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.X509Certificates", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "MergeCertificate", err.Error()) - } - - req, err := client.MergeCertificatePreparer(ctx, vaultBaseURL, certificateName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "MergeCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.MergeCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "MergeCertificate", resp, "Failure sending request") - return - } - - result, err = client.MergeCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "MergeCertificate", resp, "Failure responding to request") - } - - return -} - -// MergeCertificatePreparer prepares the MergeCertificate request. -func (client BaseClient) MergeCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string, parameters CertificateMergeParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/pending/merge", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// MergeCertificateSender sends the MergeCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) MergeCertificateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// MergeCertificateResponder handles the response to the MergeCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) MergeCertificateResponder(resp *http.Response) (result CertificateBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// PurgeDeletedCertificate the PurgeDeletedCertificate operation performs an irreversible deletion of the specified -// certificate, without possibility for recovery. The operation is not available if the recovery level does not specify -// 'Purgeable'. This operation requires the certificate/purge permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate -func (client BaseClient) PurgeDeletedCertificate(ctx context.Context, vaultBaseURL string, certificateName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.PurgeDeletedCertificate") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.PurgeDeletedCertificatePreparer(ctx, vaultBaseURL, certificateName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.PurgeDeletedCertificateSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedCertificate", resp, "Failure sending request") - return - } - - result, err = client.PurgeDeletedCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedCertificate", resp, "Failure responding to request") - } - - return -} - -// PurgeDeletedCertificatePreparer prepares the PurgeDeletedCertificate request. -func (client BaseClient) PurgeDeletedCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedcertificates/{certificate-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// PurgeDeletedCertificateSender sends the PurgeDeletedCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) PurgeDeletedCertificateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// PurgeDeletedCertificateResponder handles the response to the PurgeDeletedCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) PurgeDeletedCertificateResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// PurgeDeletedKey the Purge Deleted Key operation is applicable for soft-delete enabled vaults. While the operation -// can be invoked on any vault, it will return an error if invoked on a non soft-delete enabled vault. This operation -// requires the keys/purge permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key -func (client BaseClient) PurgeDeletedKey(ctx context.Context, vaultBaseURL string, keyName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.PurgeDeletedKey") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.PurgeDeletedKeyPreparer(ctx, vaultBaseURL, keyName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedKey", nil, "Failure preparing request") - return - } - - resp, err := client.PurgeDeletedKeySender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedKey", resp, "Failure sending request") - return - } - - result, err = client.PurgeDeletedKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedKey", resp, "Failure responding to request") - } - - return -} - -// PurgeDeletedKeyPreparer prepares the PurgeDeletedKey request. -func (client BaseClient) PurgeDeletedKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedkeys/{key-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// PurgeDeletedKeySender sends the PurgeDeletedKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) PurgeDeletedKeySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// PurgeDeletedKeyResponder handles the response to the PurgeDeletedKey request. The method always -// closes the http.Response Body. -func (client BaseClient) PurgeDeletedKeyResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// PurgeDeletedSecret the purge deleted secret operation removes the secret permanently, without the possibility of -// recovery. This operation can only be enabled on a soft-delete enabled vault. This operation requires the -// secrets/purge permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -func (client BaseClient) PurgeDeletedSecret(ctx context.Context, vaultBaseURL string, secretName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.PurgeDeletedSecret") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.PurgeDeletedSecretPreparer(ctx, vaultBaseURL, secretName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedSecret", nil, "Failure preparing request") - return - } - - resp, err := client.PurgeDeletedSecretSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedSecret", resp, "Failure sending request") - return - } - - result, err = client.PurgeDeletedSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedSecret", resp, "Failure responding to request") - } - - return -} - -// PurgeDeletedSecretPreparer prepares the PurgeDeletedSecret request. -func (client BaseClient) PurgeDeletedSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedsecrets/{secret-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// PurgeDeletedSecretSender sends the PurgeDeletedSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) PurgeDeletedSecretSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// PurgeDeletedSecretResponder handles the response to the PurgeDeletedSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) PurgeDeletedSecretResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// RecoverDeletedCertificate the RecoverDeletedCertificate operation performs the reversal of the Delete operation. The -// operation is applicable in vaults enabled for soft-delete, and must be issued during the retention interval -// (available in the deleted certificate's attributes). This operation requires the certificates/recover permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the deleted certificate -func (client BaseClient) RecoverDeletedCertificate(ctx context.Context, vaultBaseURL string, certificateName string) (result CertificateBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RecoverDeletedCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.RecoverDeletedCertificatePreparer(ctx, vaultBaseURL, certificateName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.RecoverDeletedCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedCertificate", resp, "Failure sending request") - return - } - - result, err = client.RecoverDeletedCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedCertificate", resp, "Failure responding to request") - } - - return -} - -// RecoverDeletedCertificatePreparer prepares the RecoverDeletedCertificate request. -func (client BaseClient) RecoverDeletedCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedcertificates/{certificate-name}/recover", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RecoverDeletedCertificateSender sends the RecoverDeletedCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RecoverDeletedCertificateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// RecoverDeletedCertificateResponder handles the response to the RecoverDeletedCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) RecoverDeletedCertificateResponder(resp *http.Response) (result CertificateBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RecoverDeletedKey the Recover Deleted Key operation is applicable for deleted keys in soft-delete enabled vaults. It -// recovers the deleted key back to its latest version under /keys. An attempt to recover an non-deleted key will -// return an error. Consider this the inverse of the delete operation on soft-delete enabled vaults. This operation -// requires the keys/recover permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the deleted key. -func (client BaseClient) RecoverDeletedKey(ctx context.Context, vaultBaseURL string, keyName string) (result KeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RecoverDeletedKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.RecoverDeletedKeyPreparer(ctx, vaultBaseURL, keyName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedKey", nil, "Failure preparing request") - return - } - - resp, err := client.RecoverDeletedKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedKey", resp, "Failure sending request") - return - } - - result, err = client.RecoverDeletedKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedKey", resp, "Failure responding to request") - } - - return -} - -// RecoverDeletedKeyPreparer prepares the RecoverDeletedKey request. -func (client BaseClient) RecoverDeletedKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedkeys/{key-name}/recover", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RecoverDeletedKeySender sends the RecoverDeletedKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RecoverDeletedKeySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// RecoverDeletedKeyResponder handles the response to the RecoverDeletedKey request. The method always -// closes the http.Response Body. -func (client BaseClient) RecoverDeletedKeyResponder(resp *http.Response) (result KeyBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RecoverDeletedSecret recovers the deleted secret in the specified vault. This operation can only be performed on a -// soft-delete enabled vault. This operation requires the secrets/recover permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the deleted secret. -func (client BaseClient) RecoverDeletedSecret(ctx context.Context, vaultBaseURL string, secretName string) (result SecretBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RecoverDeletedSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.RecoverDeletedSecretPreparer(ctx, vaultBaseURL, secretName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedSecret", nil, "Failure preparing request") - return - } - - resp, err := client.RecoverDeletedSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedSecret", resp, "Failure sending request") - return - } - - result, err = client.RecoverDeletedSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedSecret", resp, "Failure responding to request") - } - - return -} - -// RecoverDeletedSecretPreparer prepares the RecoverDeletedSecret request. -func (client BaseClient) RecoverDeletedSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedsecrets/{secret-name}/recover", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RecoverDeletedSecretSender sends the RecoverDeletedSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RecoverDeletedSecretSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// RecoverDeletedSecretResponder handles the response to the RecoverDeletedSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) RecoverDeletedSecretResponder(resp *http.Response) (result SecretBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RegenerateStorageAccountKey regenerates the specified key value for the given storage account. This operation -// requires the storage/regeneratekey permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// parameters - the parameters to regenerate storage account key. -func (client BaseClient) RegenerateStorageAccountKey(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountRegenerteKeyParameters) (result StorageBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RegenerateStorageAccountKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.KeyName", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "RegenerateStorageAccountKey", err.Error()) - } - - req, err := client.RegenerateStorageAccountKeyPreparer(ctx, vaultBaseURL, storageAccountName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RegenerateStorageAccountKey", nil, "Failure preparing request") - return - } - - resp, err := client.RegenerateStorageAccountKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RegenerateStorageAccountKey", resp, "Failure sending request") - return - } - - result, err = client.RegenerateStorageAccountKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RegenerateStorageAccountKey", resp, "Failure responding to request") - } - - return -} - -// RegenerateStorageAccountKeyPreparer prepares the RegenerateStorageAccountKey request. -func (client BaseClient) RegenerateStorageAccountKeyPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountRegenerteKeyParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}/regeneratekey", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RegenerateStorageAccountKeySender sends the RegenerateStorageAccountKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RegenerateStorageAccountKeySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// RegenerateStorageAccountKeyResponder handles the response to the RegenerateStorageAccountKey request. The method always -// closes the http.Response Body. -func (client BaseClient) RegenerateStorageAccountKeyResponder(resp *http.Response) (result StorageBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RestoreKey imports a previously backed up key into Azure Key Vault, restoring the key, its key identifier, -// attributes and access control policies. The RESTORE operation may be used to import a previously backed up key. -// Individual versions of a key cannot be restored. The key is restored in its entirety with the same key name as it -// had when it was backed up. If the key name is not available in the target Key Vault, the RESTORE operation will be -// rejected. While the key name is retained during restore, the final key identifier will change if the key is restored -// to a different vault. Restore will restore all versions and preserve version identifiers. The RESTORE operation is -// subject to security constraints: The target Key Vault must be owned by the same Microsoft Azure Subscription as the -// source Key Vault The user must have RESTORE permission in the target Key Vault. This operation requires the -// keys/restore permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// parameters - the parameters to restore the key. -func (client BaseClient) RestoreKey(ctx context.Context, vaultBaseURL string, parameters KeyRestoreParameters) (result KeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RestoreKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.KeyBundleBackup", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "RestoreKey", err.Error()) - } - - req, err := client.RestoreKeyPreparer(ctx, vaultBaseURL, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreKey", nil, "Failure preparing request") - return - } - - resp, err := client.RestoreKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreKey", resp, "Failure sending request") - return - } - - result, err = client.RestoreKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreKey", resp, "Failure responding to request") - } - - return -} - -// RestoreKeyPreparer prepares the RestoreKey request. -func (client BaseClient) RestoreKeyPreparer(ctx context.Context, vaultBaseURL string, parameters KeyRestoreParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/keys/restore"), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RestoreKeySender sends the RestoreKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RestoreKeySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// RestoreKeyResponder handles the response to the RestoreKey request. The method always -// closes the http.Response Body. -func (client BaseClient) RestoreKeyResponder(resp *http.Response) (result KeyBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RestoreSecret restores a backed up secret, and all its versions, to a vault. This operation requires the -// secrets/restore permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// parameters - the parameters to restore the secret. -func (client BaseClient) RestoreSecret(ctx context.Context, vaultBaseURL string, parameters SecretRestoreParameters) (result SecretBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RestoreSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.SecretBundleBackup", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "RestoreSecret", err.Error()) - } - - req, err := client.RestoreSecretPreparer(ctx, vaultBaseURL, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreSecret", nil, "Failure preparing request") - return - } - - resp, err := client.RestoreSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreSecret", resp, "Failure sending request") - return - } - - result, err = client.RestoreSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreSecret", resp, "Failure responding to request") - } - - return -} - -// RestoreSecretPreparer prepares the RestoreSecret request. -func (client BaseClient) RestoreSecretPreparer(ctx context.Context, vaultBaseURL string, parameters SecretRestoreParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/secrets/restore"), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RestoreSecretSender sends the RestoreSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RestoreSecretSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// RestoreSecretResponder handles the response to the RestoreSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) RestoreSecretResponder(resp *http.Response) (result SecretBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// SetCertificateContacts sets the certificate contacts for the specified key vault. This operation requires the -// certificates/managecontacts permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// contacts - the contacts for the key vault certificate. -func (client BaseClient) SetCertificateContacts(ctx context.Context, vaultBaseURL string, contacts Contacts) (result Contacts, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.SetCertificateContacts") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.SetCertificateContactsPreparer(ctx, vaultBaseURL, contacts) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetCertificateContacts", nil, "Failure preparing request") - return - } - - resp, err := client.SetCertificateContactsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetCertificateContacts", resp, "Failure sending request") - return - } - - result, err = client.SetCertificateContactsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetCertificateContacts", resp, "Failure responding to request") - } - - return -} - -// SetCertificateContactsPreparer prepares the SetCertificateContacts request. -func (client BaseClient) SetCertificateContactsPreparer(ctx context.Context, vaultBaseURL string, contacts Contacts) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - contacts.ID = nil - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/certificates/contacts"), - autorest.WithJSON(contacts), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// SetCertificateContactsSender sends the SetCertificateContacts request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) SetCertificateContactsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// SetCertificateContactsResponder handles the response to the SetCertificateContacts request. The method always -// closes the http.Response Body. -func (client BaseClient) SetCertificateContactsResponder(resp *http.Response) (result Contacts, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// SetCertificateIssuer the SetCertificateIssuer operation adds or updates the specified certificate issuer. This -// operation requires the certificates/setissuers permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// issuerName - the name of the issuer. -// parameter - certificate issuer set parameter. -func (client BaseClient) SetCertificateIssuer(ctx context.Context, vaultBaseURL string, issuerName string, parameter CertificateIssuerSetParameters) (result IssuerBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.SetCertificateIssuer") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameter, - Constraints: []validation.Constraint{{Target: "parameter.Provider", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "SetCertificateIssuer", err.Error()) - } - - req, err := client.SetCertificateIssuerPreparer(ctx, vaultBaseURL, issuerName, parameter) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetCertificateIssuer", nil, "Failure preparing request") - return - } - - resp, err := client.SetCertificateIssuerSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetCertificateIssuer", resp, "Failure sending request") - return - } - - result, err = client.SetCertificateIssuerResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetCertificateIssuer", resp, "Failure responding to request") - } - - return -} - -// SetCertificateIssuerPreparer prepares the SetCertificateIssuer request. -func (client BaseClient) SetCertificateIssuerPreparer(ctx context.Context, vaultBaseURL string, issuerName string, parameter CertificateIssuerSetParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "issuer-name": autorest.Encode("path", issuerName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/issuers/{issuer-name}", pathParameters), - autorest.WithJSON(parameter), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// SetCertificateIssuerSender sends the SetCertificateIssuer request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) SetCertificateIssuerSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// SetCertificateIssuerResponder handles the response to the SetCertificateIssuer request. The method always -// closes the http.Response Body. -func (client BaseClient) SetCertificateIssuerResponder(resp *http.Response) (result IssuerBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// SetSasDefinition creates or updates a new SAS definition for the specified storage account. This operation requires -// the storage/setsas permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// sasDefinitionName - the name of the SAS definition. -// parameters - the parameters to create a SAS definition. -func (client BaseClient) SetSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string, parameters SasDefinitionCreateParameters) (result SasDefinitionBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.SetSasDefinition") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: sasDefinitionName, - Constraints: []validation.Constraint{{Target: "sasDefinitionName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Parameters", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "SetSasDefinition", err.Error()) - } - - req, err := client.SetSasDefinitionPreparer(ctx, vaultBaseURL, storageAccountName, sasDefinitionName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetSasDefinition", nil, "Failure preparing request") - return - } - - resp, err := client.SetSasDefinitionSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetSasDefinition", resp, "Failure sending request") - return - } - - result, err = client.SetSasDefinitionResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetSasDefinition", resp, "Failure responding to request") - } - - return -} - -// SetSasDefinitionPreparer prepares the SetSasDefinition request. -func (client BaseClient) SetSasDefinitionPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string, parameters SasDefinitionCreateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "sas-definition-name": autorest.Encode("path", sasDefinitionName), - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}/sas/{sas-definition-name}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// SetSasDefinitionSender sends the SetSasDefinition request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) SetSasDefinitionSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// SetSasDefinitionResponder handles the response to the SetSasDefinition request. The method always -// closes the http.Response Body. -func (client BaseClient) SetSasDefinitionResponder(resp *http.Response) (result SasDefinitionBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// SetSecret the SET operation adds a secret to the Azure Key Vault. If the named secret already exists, Azure Key -// Vault creates a new version of that secret. This operation requires the secrets/set permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -// parameters - the parameters for setting the secret. -func (client BaseClient) SetSecret(ctx context.Context, vaultBaseURL string, secretName string, parameters SecretSetParameters) (result SecretBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.SetSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: secretName, - Constraints: []validation.Constraint{{Target: "secretName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z-]+$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "SetSecret", err.Error()) - } - - req, err := client.SetSecretPreparer(ctx, vaultBaseURL, secretName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetSecret", nil, "Failure preparing request") - return - } - - resp, err := client.SetSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetSecret", resp, "Failure sending request") - return - } - - result, err = client.SetSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetSecret", resp, "Failure responding to request") - } - - return -} - -// SetSecretPreparer prepares the SetSecret request. -func (client BaseClient) SetSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string, parameters SecretSetParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/secrets/{secret-name}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// SetSecretSender sends the SetSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) SetSecretSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// SetSecretResponder handles the response to the SetSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) SetSecretResponder(resp *http.Response) (result SecretBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// SetStorageAccount creates or updates a new storage account. This operation requires the storage/set permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// parameters - the parameters to create a storage account. -func (client BaseClient) SetStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountCreateParameters) (result StorageBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.SetStorageAccount") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.ResourceID", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.ActiveKeyName", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.AutoRegenerateKey", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "SetStorageAccount", err.Error()) - } - - req, err := client.SetStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.SetStorageAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.SetStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetStorageAccount", resp, "Failure responding to request") - } - - return -} - -// SetStorageAccountPreparer prepares the SetStorageAccount request. -func (client BaseClient) SetStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountCreateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// SetStorageAccountSender sends the SetStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) SetStorageAccountSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// SetStorageAccountResponder handles the response to the SetStorageAccount request. The method always -// closes the http.Response Body. -func (client BaseClient) SetStorageAccountResponder(resp *http.Response) (result StorageBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Sign the SIGN operation is applicable to asymmetric and symmetric keys stored in Azure Key Vault since this -// operation uses the private portion of the key. This operation requires the keys/sign permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -// keyVersion - the version of the key. -// parameters - the parameters for the signing operation. -func (client BaseClient) Sign(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeySignParameters) (result KeyOperationResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.Sign") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "Sign", err.Error()) - } - - req, err := client.SignPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Sign", nil, "Failure preparing request") - return - } - - resp, err := client.SignSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Sign", resp, "Failure sending request") - return - } - - result, err = client.SignResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Sign", resp, "Failure responding to request") - } - - return -} - -// SignPreparer prepares the Sign request. -func (client BaseClient) SignPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeySignParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}/sign", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// SignSender sends the Sign request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) SignSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// SignResponder handles the response to the Sign request. The method always -// closes the http.Response Body. -func (client BaseClient) SignResponder(resp *http.Response) (result KeyOperationResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UnwrapKey the UNWRAP operation supports decryption of a symmetric key using the target key encryption key. This -// operation is the reverse of the WRAP operation. The UNWRAP operation applies to asymmetric and symmetric keys stored -// in Azure Key Vault since it uses the private portion of the key. This operation requires the keys/unwrapKey -// permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -// keyVersion - the version of the key. -// parameters - the parameters for the key operation. -func (client BaseClient) UnwrapKey(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (result KeyOperationResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UnwrapKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "UnwrapKey", err.Error()) - } - - req, err := client.UnwrapKeyPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UnwrapKey", nil, "Failure preparing request") - return - } - - resp, err := client.UnwrapKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UnwrapKey", resp, "Failure sending request") - return - } - - result, err = client.UnwrapKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UnwrapKey", resp, "Failure responding to request") - } - - return -} - -// UnwrapKeyPreparer prepares the UnwrapKey request. -func (client BaseClient) UnwrapKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}/unwrapkey", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UnwrapKeySender sends the UnwrapKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UnwrapKeySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// UnwrapKeyResponder handles the response to the UnwrapKey request. The method always -// closes the http.Response Body. -func (client BaseClient) UnwrapKeyResponder(resp *http.Response) (result KeyOperationResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateCertificate the UpdateCertificate operation applies the specified update on the given certificate; the only -// elements updated are the certificate's attributes. This operation requires the certificates/update permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate in the given key vault. -// certificateVersion - the version of the certificate. -// parameters - the parameters for certificate update. -func (client BaseClient) UpdateCertificate(ctx context.Context, vaultBaseURL string, certificateName string, certificateVersion string, parameters CertificateUpdateParameters) (result CertificateBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdateCertificatePreparer(ctx, vaultBaseURL, certificateName, certificateVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificate", resp, "Failure sending request") - return - } - - result, err = client.UpdateCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificate", resp, "Failure responding to request") - } - - return -} - -// UpdateCertificatePreparer prepares the UpdateCertificate request. -func (client BaseClient) UpdateCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string, certificateVersion string, parameters CertificateUpdateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - "certificate-version": autorest.Encode("path", certificateVersion), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/{certificate-version}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateCertificateSender sends the UpdateCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateCertificateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// UpdateCertificateResponder handles the response to the UpdateCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateCertificateResponder(resp *http.Response) (result CertificateBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateCertificateIssuer the UpdateCertificateIssuer operation performs an update on the specified certificate issuer -// entity. This operation requires the certificates/setissuers permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// issuerName - the name of the issuer. -// parameter - certificate issuer update parameter. -func (client BaseClient) UpdateCertificateIssuer(ctx context.Context, vaultBaseURL string, issuerName string, parameter CertificateIssuerUpdateParameters) (result IssuerBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateCertificateIssuer") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdateCertificateIssuerPreparer(ctx, vaultBaseURL, issuerName, parameter) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificateIssuer", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateCertificateIssuerSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificateIssuer", resp, "Failure sending request") - return - } - - result, err = client.UpdateCertificateIssuerResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificateIssuer", resp, "Failure responding to request") - } - - return -} - -// UpdateCertificateIssuerPreparer prepares the UpdateCertificateIssuer request. -func (client BaseClient) UpdateCertificateIssuerPreparer(ctx context.Context, vaultBaseURL string, issuerName string, parameter CertificateIssuerUpdateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "issuer-name": autorest.Encode("path", issuerName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/issuers/{issuer-name}", pathParameters), - autorest.WithJSON(parameter), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateCertificateIssuerSender sends the UpdateCertificateIssuer request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateCertificateIssuerSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// UpdateCertificateIssuerResponder handles the response to the UpdateCertificateIssuer request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateCertificateIssuerResponder(resp *http.Response) (result IssuerBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateCertificateOperation updates a certificate creation operation that is already in progress. This operation -// requires the certificates/update permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -// certificateOperation - the certificate operation response. -func (client BaseClient) UpdateCertificateOperation(ctx context.Context, vaultBaseURL string, certificateName string, certificateOperation CertificateOperationUpdateParameter) (result CertificateOperation, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateCertificateOperation") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdateCertificateOperationPreparer(ctx, vaultBaseURL, certificateName, certificateOperation) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificateOperation", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateCertificateOperationSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificateOperation", resp, "Failure sending request") - return - } - - result, err = client.UpdateCertificateOperationResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificateOperation", resp, "Failure responding to request") - } - - return -} - -// UpdateCertificateOperationPreparer prepares the UpdateCertificateOperation request. -func (client BaseClient) UpdateCertificateOperationPreparer(ctx context.Context, vaultBaseURL string, certificateName string, certificateOperation CertificateOperationUpdateParameter) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/pending", pathParameters), - autorest.WithJSON(certificateOperation), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateCertificateOperationSender sends the UpdateCertificateOperation request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateCertificateOperationSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// UpdateCertificateOperationResponder handles the response to the UpdateCertificateOperation request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateCertificateOperationResponder(resp *http.Response) (result CertificateOperation, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateCertificatePolicy set specified members in the certificate policy. Leave others as null. This operation -// requires the certificates/update permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate in the given vault. -// certificatePolicy - the policy for the certificate. -func (client BaseClient) UpdateCertificatePolicy(ctx context.Context, vaultBaseURL string, certificateName string, certificatePolicy CertificatePolicy) (result CertificatePolicy, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateCertificatePolicy") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdateCertificatePolicyPreparer(ctx, vaultBaseURL, certificateName, certificatePolicy) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificatePolicy", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateCertificatePolicySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificatePolicy", resp, "Failure sending request") - return - } - - result, err = client.UpdateCertificatePolicyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificatePolicy", resp, "Failure responding to request") - } - - return -} - -// UpdateCertificatePolicyPreparer prepares the UpdateCertificatePolicy request. -func (client BaseClient) UpdateCertificatePolicyPreparer(ctx context.Context, vaultBaseURL string, certificateName string, certificatePolicy CertificatePolicy) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - certificatePolicy.ID = nil - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/policy", pathParameters), - autorest.WithJSON(certificatePolicy), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateCertificatePolicySender sends the UpdateCertificatePolicy request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateCertificatePolicySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// UpdateCertificatePolicyResponder handles the response to the UpdateCertificatePolicy request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateCertificatePolicyResponder(resp *http.Response) (result CertificatePolicy, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateKey in order to perform this operation, the key must already exist in the Key Vault. Note: The cryptographic -// material of a key itself cannot be changed. This operation requires the keys/update permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of key to update. -// keyVersion - the version of the key to update. -// parameters - the parameters of the key to update. -func (client BaseClient) UpdateKey(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyUpdateParameters) (result KeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdateKeyPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateKey", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateKey", resp, "Failure sending request") - return - } - - result, err = client.UpdateKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateKey", resp, "Failure responding to request") - } - - return -} - -// UpdateKeyPreparer prepares the UpdateKey request. -func (client BaseClient) UpdateKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyUpdateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateKeySender sends the UpdateKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateKeySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// UpdateKeyResponder handles the response to the UpdateKey request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateKeyResponder(resp *http.Response) (result KeyBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateSasDefinition updates the specified attributes associated with the given SAS definition. This operation -// requires the storage/setsas permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// sasDefinitionName - the name of the SAS definition. -// parameters - the parameters to update a SAS definition. -func (client BaseClient) UpdateSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string, parameters SasDefinitionUpdateParameters) (result SasDefinitionBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateSasDefinition") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: sasDefinitionName, - Constraints: []validation.Constraint{{Target: "sasDefinitionName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "UpdateSasDefinition", err.Error()) - } - - req, err := client.UpdateSasDefinitionPreparer(ctx, vaultBaseURL, storageAccountName, sasDefinitionName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateSasDefinition", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateSasDefinitionSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateSasDefinition", resp, "Failure sending request") - return - } - - result, err = client.UpdateSasDefinitionResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateSasDefinition", resp, "Failure responding to request") - } - - return -} - -// UpdateSasDefinitionPreparer prepares the UpdateSasDefinition request. -func (client BaseClient) UpdateSasDefinitionPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string, parameters SasDefinitionUpdateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "sas-definition-name": autorest.Encode("path", sasDefinitionName), - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}/sas/{sas-definition-name}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSasDefinitionSender sends the UpdateSasDefinition request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateSasDefinitionSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// UpdateSasDefinitionResponder handles the response to the UpdateSasDefinition request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateSasDefinitionResponder(resp *http.Response) (result SasDefinitionBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateSecret the UPDATE operation changes specified attributes of an existing stored secret. Attributes that are not -// specified in the request are left unchanged. The value of a secret itself cannot be changed. This operation requires -// the secrets/set permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -// secretVersion - the version of the secret. -// parameters - the parameters for update secret operation. -func (client BaseClient) UpdateSecret(ctx context.Context, vaultBaseURL string, secretName string, secretVersion string, parameters SecretUpdateParameters) (result SecretBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdateSecretPreparer(ctx, vaultBaseURL, secretName, secretVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateSecret", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateSecret", resp, "Failure sending request") - return - } - - result, err = client.UpdateSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateSecret", resp, "Failure responding to request") - } - - return -} - -// UpdateSecretPreparer prepares the UpdateSecret request. -func (client BaseClient) UpdateSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string, secretVersion string, parameters SecretUpdateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - "secret-version": autorest.Encode("path", secretVersion), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/secrets/{secret-name}/{secret-version}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSecretSender sends the UpdateSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateSecretSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// UpdateSecretResponder handles the response to the UpdateSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateSecretResponder(resp *http.Response) (result SecretBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateStorageAccount updates the specified attributes associated with the given storage account. This operation -// requires the storage/set/update permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// parameters - the parameters to update a storage account. -func (client BaseClient) UpdateStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountUpdateParameters) (result StorageBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateStorageAccount") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "UpdateStorageAccount", err.Error()) - } - - req, err := client.UpdateStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateStorageAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.UpdateStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateStorageAccount", resp, "Failure responding to request") - } - - return -} - -// UpdateStorageAccountPreparer prepares the UpdateStorageAccount request. -func (client BaseClient) UpdateStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountUpdateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateStorageAccountSender sends the UpdateStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateStorageAccountSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// UpdateStorageAccountResponder handles the response to the UpdateStorageAccount request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateStorageAccountResponder(resp *http.Response) (result StorageBundle, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Verify the VERIFY operation is applicable to symmetric keys stored in Azure Key Vault. VERIFY is not strictly -// necessary for asymmetric keys stored in Azure Key Vault since signature verification can be performed using the -// public portion of the key but this operation is supported as a convenience for callers that only have a -// key-reference and not the public portion of the key. This operation requires the keys/verify permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -// keyVersion - the version of the key. -// parameters - the parameters for verify operations. -func (client BaseClient) Verify(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyVerifyParameters) (result KeyVerifyResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.Verify") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Digest", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.Signature", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "Verify", err.Error()) - } - - req, err := client.VerifyPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Verify", nil, "Failure preparing request") - return - } - - resp, err := client.VerifySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Verify", resp, "Failure sending request") - return - } - - result, err = client.VerifyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Verify", resp, "Failure responding to request") - } - - return -} - -// VerifyPreparer prepares the Verify request. -func (client BaseClient) VerifyPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyVerifyParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}/verify", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// VerifySender sends the Verify request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) VerifySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// VerifyResponder handles the response to the Verify request. The method always -// closes the http.Response Body. -func (client BaseClient) VerifyResponder(resp *http.Response) (result KeyVerifyResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// WrapKey the WRAP operation supports encryption of a symmetric key using a key encryption key that has previously -// been stored in an Azure Key Vault. The WRAP operation is only strictly necessary for symmetric keys stored in Azure -// Key Vault since protection with an asymmetric key can be performed using the public portion of the key. This -// operation is supported for asymmetric keys as a convenience for callers that have a key-reference but do not have -// access to the public key material. This operation requires the keys/wrapKey permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -// keyVersion - the version of the key. -// parameters - the parameters for wrap operation. -func (client BaseClient) WrapKey(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (result KeyOperationResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.WrapKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "WrapKey", err.Error()) - } - - req, err := client.WrapKeyPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "WrapKey", nil, "Failure preparing request") - return - } - - resp, err := client.WrapKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "WrapKey", resp, "Failure sending request") - return - } - - result, err = client.WrapKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "WrapKey", resp, "Failure responding to request") - } - - return -} - -// WrapKeyPreparer prepares the WrapKey request. -func (client BaseClient) WrapKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "2016-10-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}/wrapkey", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// WrapKeySender sends the WrapKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) WrapKeySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) -} - -// WrapKeyResponder handles the response to the WrapKey request. The method always -// closes the http.Response Body. -func (client BaseClient) WrapKeyResponder(resp *http.Response) (result KeyOperationResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault/models.go deleted file mode 100644 index cbfec5797..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault/models.go +++ /dev/null @@ -1,2883 +0,0 @@ -package keyvault - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "encoding/json" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/date" - "github.com/Azure/go-autorest/autorest/to" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// The package's fully qualified name. -const fqdn = "github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault" - -// ActionType enumerates the values for action type. -type ActionType string - -const ( - // AutoRenew ... - AutoRenew ActionType = "AutoRenew" - // EmailContacts ... - EmailContacts ActionType = "EmailContacts" -) - -// PossibleActionTypeValues returns an array of possible values for the ActionType const type. -func PossibleActionTypeValues() []ActionType { - return []ActionType{AutoRenew, EmailContacts} -} - -// DeletionRecoveryLevel enumerates the values for deletion recovery level. -type DeletionRecoveryLevel string - -const ( - // Purgeable Soft-delete is not enabled for this vault. A DELETE operation results in immediate and - // irreversible data loss. - Purgeable DeletionRecoveryLevel = "Purgeable" - // Recoverable Soft-delete is enabled for this vault and purge has been disabled. A deleted entity will - // remain in this state until recovered, or the end of the retention interval. - Recoverable DeletionRecoveryLevel = "Recoverable" - // RecoverableProtectedSubscription Soft-delete is enabled for this vault, and the subscription is - // protected against immediate deletion. - RecoverableProtectedSubscription DeletionRecoveryLevel = "Recoverable+ProtectedSubscription" - // RecoverablePurgeable Soft-delete is enabled for this vault; A privileged user may trigger an immediate, - // irreversible deletion(purge) of a deleted entity. - RecoverablePurgeable DeletionRecoveryLevel = "Recoverable+Purgeable" -) - -// PossibleDeletionRecoveryLevelValues returns an array of possible values for the DeletionRecoveryLevel const type. -func PossibleDeletionRecoveryLevelValues() []DeletionRecoveryLevel { - return []DeletionRecoveryLevel{Purgeable, Recoverable, RecoverableProtectedSubscription, RecoverablePurgeable} -} - -// JSONWebKeyCurveName enumerates the values for json web key curve name. -type JSONWebKeyCurveName string - -const ( - // P256 The NIST P-256 elliptic curve, AKA SECG curve SECP256R1. - P256 JSONWebKeyCurveName = "P-256" - // P384 The NIST P-384 elliptic curve, AKA SECG curve SECP384R1. - P384 JSONWebKeyCurveName = "P-384" - // P521 The NIST P-521 elliptic curve, AKA SECG curve SECP521R1. - P521 JSONWebKeyCurveName = "P-521" - // SECP256K1 The SECG SECP256K1 elliptic curve. - SECP256K1 JSONWebKeyCurveName = "SECP256K1" -) - -// PossibleJSONWebKeyCurveNameValues returns an array of possible values for the JSONWebKeyCurveName const type. -func PossibleJSONWebKeyCurveNameValues() []JSONWebKeyCurveName { - return []JSONWebKeyCurveName{P256, P384, P521, SECP256K1} -} - -// JSONWebKeyEncryptionAlgorithm enumerates the values for json web key encryption algorithm. -type JSONWebKeyEncryptionAlgorithm string - -const ( - // RSA15 ... - RSA15 JSONWebKeyEncryptionAlgorithm = "RSA1_5" - // RSAOAEP ... - RSAOAEP JSONWebKeyEncryptionAlgorithm = "RSA-OAEP" - // RSAOAEP256 ... - RSAOAEP256 JSONWebKeyEncryptionAlgorithm = "RSA-OAEP-256" -) - -// PossibleJSONWebKeyEncryptionAlgorithmValues returns an array of possible values for the JSONWebKeyEncryptionAlgorithm const type. -func PossibleJSONWebKeyEncryptionAlgorithmValues() []JSONWebKeyEncryptionAlgorithm { - return []JSONWebKeyEncryptionAlgorithm{RSA15, RSAOAEP, RSAOAEP256} -} - -// JSONWebKeyOperation enumerates the values for json web key operation. -type JSONWebKeyOperation string - -const ( - // Decrypt ... - Decrypt JSONWebKeyOperation = "decrypt" - // Encrypt ... - Encrypt JSONWebKeyOperation = "encrypt" - // Sign ... - Sign JSONWebKeyOperation = "sign" - // UnwrapKey ... - UnwrapKey JSONWebKeyOperation = "unwrapKey" - // Verify ... - Verify JSONWebKeyOperation = "verify" - // WrapKey ... - WrapKey JSONWebKeyOperation = "wrapKey" -) - -// PossibleJSONWebKeyOperationValues returns an array of possible values for the JSONWebKeyOperation const type. -func PossibleJSONWebKeyOperationValues() []JSONWebKeyOperation { - return []JSONWebKeyOperation{Decrypt, Encrypt, Sign, UnwrapKey, Verify, WrapKey} -} - -// JSONWebKeySignatureAlgorithm enumerates the values for json web key signature algorithm. -type JSONWebKeySignatureAlgorithm string - -const ( - // ECDSA256 ... - ECDSA256 JSONWebKeySignatureAlgorithm = "ECDSA256" - // ES256 ... - ES256 JSONWebKeySignatureAlgorithm = "ES256" - // ES384 ... - ES384 JSONWebKeySignatureAlgorithm = "ES384" - // ES512 ... - ES512 JSONWebKeySignatureAlgorithm = "ES512" - // PS256 ... - PS256 JSONWebKeySignatureAlgorithm = "PS256" - // PS384 ... - PS384 JSONWebKeySignatureAlgorithm = "PS384" - // PS512 ... - PS512 JSONWebKeySignatureAlgorithm = "PS512" - // RS256 ... - RS256 JSONWebKeySignatureAlgorithm = "RS256" - // RS384 ... - RS384 JSONWebKeySignatureAlgorithm = "RS384" - // RS512 ... - RS512 JSONWebKeySignatureAlgorithm = "RS512" - // RSNULL ... - RSNULL JSONWebKeySignatureAlgorithm = "RSNULL" -) - -// PossibleJSONWebKeySignatureAlgorithmValues returns an array of possible values for the JSONWebKeySignatureAlgorithm const type. -func PossibleJSONWebKeySignatureAlgorithmValues() []JSONWebKeySignatureAlgorithm { - return []JSONWebKeySignatureAlgorithm{ECDSA256, ES256, ES384, ES512, PS256, PS384, PS512, RS256, RS384, RS512, RSNULL} -} - -// JSONWebKeyType enumerates the values for json web key type. -type JSONWebKeyType string - -const ( - // EC ... - EC JSONWebKeyType = "EC" - // ECHSM ... - ECHSM JSONWebKeyType = "EC-HSM" - // Oct ... - Oct JSONWebKeyType = "oct" - // RSA ... - RSA JSONWebKeyType = "RSA" - // RSAHSM ... - RSAHSM JSONWebKeyType = "RSA-HSM" -) - -// PossibleJSONWebKeyTypeValues returns an array of possible values for the JSONWebKeyType const type. -func PossibleJSONWebKeyTypeValues() []JSONWebKeyType { - return []JSONWebKeyType{EC, ECHSM, Oct, RSA, RSAHSM} -} - -// KeyUsageType enumerates the values for key usage type. -type KeyUsageType string - -const ( - // CRLSign ... - CRLSign KeyUsageType = "cRLSign" - // DataEncipherment ... - DataEncipherment KeyUsageType = "dataEncipherment" - // DecipherOnly ... - DecipherOnly KeyUsageType = "decipherOnly" - // DigitalSignature ... - DigitalSignature KeyUsageType = "digitalSignature" - // EncipherOnly ... - EncipherOnly KeyUsageType = "encipherOnly" - // KeyAgreement ... - KeyAgreement KeyUsageType = "keyAgreement" - // KeyCertSign ... - KeyCertSign KeyUsageType = "keyCertSign" - // KeyEncipherment ... - KeyEncipherment KeyUsageType = "keyEncipherment" - // NonRepudiation ... - NonRepudiation KeyUsageType = "nonRepudiation" -) - -// PossibleKeyUsageTypeValues returns an array of possible values for the KeyUsageType const type. -func PossibleKeyUsageTypeValues() []KeyUsageType { - return []KeyUsageType{CRLSign, DataEncipherment, DecipherOnly, DigitalSignature, EncipherOnly, KeyAgreement, KeyCertSign, KeyEncipherment, NonRepudiation} -} - -// Action the action that will be executed. -type Action struct { - // ActionType - The type of the action. Possible values include: 'EmailContacts', 'AutoRenew' - ActionType ActionType `json:"action_type,omitempty"` -} - -// AdministratorDetails details of the organization administrator of the certificate issuer. -type AdministratorDetails struct { - // FirstName - First name. - FirstName *string `json:"first_name,omitempty"` - // LastName - Last name. - LastName *string `json:"last_name,omitempty"` - // EmailAddress - Email address. - EmailAddress *string `json:"email,omitempty"` - // Phone - Phone number. - Phone *string `json:"phone,omitempty"` -} - -// Attributes the object attributes managed by the KeyVault service. -type Attributes struct { - // Enabled - Determines whether the object is enabled. - Enabled *bool `json:"enabled,omitempty"` - // NotBefore - Not before date in UTC. - NotBefore *date.UnixTime `json:"nbf,omitempty"` - // Expires - Expiry date in UTC. - Expires *date.UnixTime `json:"exp,omitempty"` - // Created - READ-ONLY; Creation time in UTC. - Created *date.UnixTime `json:"created,omitempty"` - // Updated - READ-ONLY; Last updated time in UTC. - Updated *date.UnixTime `json:"updated,omitempty"` -} - -// BackupKeyResult the backup key result, containing the backup blob. -type BackupKeyResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; The backup blob containing the backed up key. (a URL-encoded base64 string) - Value *string `json:"value,omitempty"` -} - -// BackupSecretResult the backup secret result, containing the backup blob. -type BackupSecretResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; The backup blob containing the backed up secret. (a URL-encoded base64 string) - Value *string `json:"value,omitempty"` -} - -// CertificateAttributes the certificate management attributes. -type CertificateAttributes struct { - // RecoveryLevel - READ-ONLY; Reflects the deletion recovery level currently in effect for certificates in the current vault. If it contains 'Purgeable', the certificate can be permanently deleted by a privileged user; otherwise, only the system can purge the certificate, at the end of the retention interval. Possible values include: 'Purgeable', 'RecoverablePurgeable', 'Recoverable', 'RecoverableProtectedSubscription' - RecoveryLevel DeletionRecoveryLevel `json:"recoveryLevel,omitempty"` - // Enabled - Determines whether the object is enabled. - Enabled *bool `json:"enabled,omitempty"` - // NotBefore - Not before date in UTC. - NotBefore *date.UnixTime `json:"nbf,omitempty"` - // Expires - Expiry date in UTC. - Expires *date.UnixTime `json:"exp,omitempty"` - // Created - READ-ONLY; Creation time in UTC. - Created *date.UnixTime `json:"created,omitempty"` - // Updated - READ-ONLY; Last updated time in UTC. - Updated *date.UnixTime `json:"updated,omitempty"` -} - -// CertificateBundle a certificate bundle consists of a certificate (X509) plus its attributes. -type CertificateBundle struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; The certificate id. - ID *string `json:"id,omitempty"` - // Kid - READ-ONLY; The key id. - Kid *string `json:"kid,omitempty"` - // Sid - READ-ONLY; The secret id. - Sid *string `json:"sid,omitempty"` - // X509Thumbprint - READ-ONLY; Thumbprint of the certificate. (a URL-encoded base64 string) - X509Thumbprint *string `json:"x5t,omitempty"` - // Policy - READ-ONLY; The management policy. - Policy *CertificatePolicy `json:"policy,omitempty"` - // Cer - CER contents of x509 certificate. - Cer *[]byte `json:"cer,omitempty"` - // ContentType - The content type of the secret. - ContentType *string `json:"contentType,omitempty"` - // Attributes - The certificate attributes. - Attributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for CertificateBundle. -func (cb CertificateBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if cb.Cer != nil { - objectMap["cer"] = cb.Cer - } - if cb.ContentType != nil { - objectMap["contentType"] = cb.ContentType - } - if cb.Attributes != nil { - objectMap["attributes"] = cb.Attributes - } - if cb.Tags != nil { - objectMap["tags"] = cb.Tags - } - return json.Marshal(objectMap) -} - -// CertificateCreateParameters the certificate create parameters. -type CertificateCreateParameters struct { - // CertificatePolicy - The management policy for the certificate. - CertificatePolicy *CertificatePolicy `json:"policy,omitempty"` - // CertificateAttributes - The attributes of the certificate (optional). - CertificateAttributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for CertificateCreateParameters. -func (ccp CertificateCreateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ccp.CertificatePolicy != nil { - objectMap["policy"] = ccp.CertificatePolicy - } - if ccp.CertificateAttributes != nil { - objectMap["attributes"] = ccp.CertificateAttributes - } - if ccp.Tags != nil { - objectMap["tags"] = ccp.Tags - } - return json.Marshal(objectMap) -} - -// CertificateImportParameters the certificate import parameters. -type CertificateImportParameters struct { - // Base64EncodedCertificate - Base64 encoded representation of the certificate object to import. This certificate needs to contain the private key. - Base64EncodedCertificate *string `json:"value,omitempty"` - // Password - If the private key in base64EncodedCertificate is encrypted, the password used for encryption. - Password *string `json:"pwd,omitempty"` - // CertificatePolicy - The management policy for the certificate. - CertificatePolicy *CertificatePolicy `json:"policy,omitempty"` - // CertificateAttributes - The attributes of the certificate (optional). - CertificateAttributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for CertificateImportParameters. -func (cip CertificateImportParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if cip.Base64EncodedCertificate != nil { - objectMap["value"] = cip.Base64EncodedCertificate - } - if cip.Password != nil { - objectMap["pwd"] = cip.Password - } - if cip.CertificatePolicy != nil { - objectMap["policy"] = cip.CertificatePolicy - } - if cip.CertificateAttributes != nil { - objectMap["attributes"] = cip.CertificateAttributes - } - if cip.Tags != nil { - objectMap["tags"] = cip.Tags - } - return json.Marshal(objectMap) -} - -// CertificateIssuerItem the certificate issuer item containing certificate issuer metadata. -type CertificateIssuerItem struct { - // ID - Certificate Identifier. - ID *string `json:"id,omitempty"` - // Provider - The issuer provider. - Provider *string `json:"provider,omitempty"` -} - -// CertificateIssuerListResult the certificate issuer list result. -type CertificateIssuerListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of certificate issuers in the key vault along with a link to the next page of certificate issuers. - Value *[]CertificateIssuerItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of certificate issuers. - NextLink *string `json:"nextLink,omitempty"` -} - -// CertificateIssuerListResultIterator provides access to a complete listing of CertificateIssuerItem -// values. -type CertificateIssuerListResultIterator struct { - i int - page CertificateIssuerListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *CertificateIssuerListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/CertificateIssuerListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *CertificateIssuerListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter CertificateIssuerListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter CertificateIssuerListResultIterator) Response() CertificateIssuerListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter CertificateIssuerListResultIterator) Value() CertificateIssuerItem { - if !iter.page.NotDone() { - return CertificateIssuerItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the CertificateIssuerListResultIterator type. -func NewCertificateIssuerListResultIterator(page CertificateIssuerListResultPage) CertificateIssuerListResultIterator { - return CertificateIssuerListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (cilr CertificateIssuerListResult) IsEmpty() bool { - return cilr.Value == nil || len(*cilr.Value) == 0 -} - -// certificateIssuerListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (cilr CertificateIssuerListResult) certificateIssuerListResultPreparer(ctx context.Context) (*http.Request, error) { - if cilr.NextLink == nil || len(to.String(cilr.NextLink)) < 1 { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(cilr.NextLink))) -} - -// CertificateIssuerListResultPage contains a page of CertificateIssuerItem values. -type CertificateIssuerListResultPage struct { - fn func(context.Context, CertificateIssuerListResult) (CertificateIssuerListResult, error) - cilr CertificateIssuerListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *CertificateIssuerListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/CertificateIssuerListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - next, err := page.fn(ctx, page.cilr) - if err != nil { - return err - } - page.cilr = next - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *CertificateIssuerListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page CertificateIssuerListResultPage) NotDone() bool { - return !page.cilr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page CertificateIssuerListResultPage) Response() CertificateIssuerListResult { - return page.cilr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page CertificateIssuerListResultPage) Values() []CertificateIssuerItem { - if page.cilr.IsEmpty() { - return nil - } - return *page.cilr.Value -} - -// Creates a new instance of the CertificateIssuerListResultPage type. -func NewCertificateIssuerListResultPage(getNextPage func(context.Context, CertificateIssuerListResult) (CertificateIssuerListResult, error)) CertificateIssuerListResultPage { - return CertificateIssuerListResultPage{fn: getNextPage} -} - -// CertificateIssuerSetParameters the certificate issuer set parameters. -type CertificateIssuerSetParameters struct { - // Provider - The issuer provider. - Provider *string `json:"provider,omitempty"` - // Credentials - The credentials to be used for the issuer. - Credentials *IssuerCredentials `json:"credentials,omitempty"` - // OrganizationDetails - Details of the organization as provided to the issuer. - OrganizationDetails *OrganizationDetails `json:"org_details,omitempty"` - // Attributes - Attributes of the issuer object. - Attributes *IssuerAttributes `json:"attributes,omitempty"` -} - -// CertificateIssuerUpdateParameters the certificate issuer update parameters. -type CertificateIssuerUpdateParameters struct { - // Provider - The issuer provider. - Provider *string `json:"provider,omitempty"` - // Credentials - The credentials to be used for the issuer. - Credentials *IssuerCredentials `json:"credentials,omitempty"` - // OrganizationDetails - Details of the organization as provided to the issuer. - OrganizationDetails *OrganizationDetails `json:"org_details,omitempty"` - // Attributes - Attributes of the issuer object. - Attributes *IssuerAttributes `json:"attributes,omitempty"` -} - -// CertificateItem the certificate item containing certificate metadata. -type CertificateItem struct { - // ID - Certificate identifier. - ID *string `json:"id,omitempty"` - // Attributes - The certificate management attributes. - Attributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // X509Thumbprint - Thumbprint of the certificate. (a URL-encoded base64 string) - X509Thumbprint *string `json:"x5t,omitempty"` -} - -// MarshalJSON is the custom marshaler for CertificateItem. -func (ci CertificateItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ci.ID != nil { - objectMap["id"] = ci.ID - } - if ci.Attributes != nil { - objectMap["attributes"] = ci.Attributes - } - if ci.Tags != nil { - objectMap["tags"] = ci.Tags - } - if ci.X509Thumbprint != nil { - objectMap["x5t"] = ci.X509Thumbprint - } - return json.Marshal(objectMap) -} - -// CertificateListResult the certificate list result. -type CertificateListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of certificates in the key vault along with a link to the next page of certificates. - Value *[]CertificateItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of certificates. - NextLink *string `json:"nextLink,omitempty"` -} - -// CertificateListResultIterator provides access to a complete listing of CertificateItem values. -type CertificateListResultIterator struct { - i int - page CertificateListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *CertificateListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/CertificateListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *CertificateListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter CertificateListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter CertificateListResultIterator) Response() CertificateListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter CertificateListResultIterator) Value() CertificateItem { - if !iter.page.NotDone() { - return CertificateItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the CertificateListResultIterator type. -func NewCertificateListResultIterator(page CertificateListResultPage) CertificateListResultIterator { - return CertificateListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (clr CertificateListResult) IsEmpty() bool { - return clr.Value == nil || len(*clr.Value) == 0 -} - -// certificateListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (clr CertificateListResult) certificateListResultPreparer(ctx context.Context) (*http.Request, error) { - if clr.NextLink == nil || len(to.String(clr.NextLink)) < 1 { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(clr.NextLink))) -} - -// CertificateListResultPage contains a page of CertificateItem values. -type CertificateListResultPage struct { - fn func(context.Context, CertificateListResult) (CertificateListResult, error) - clr CertificateListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *CertificateListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/CertificateListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - next, err := page.fn(ctx, page.clr) - if err != nil { - return err - } - page.clr = next - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *CertificateListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page CertificateListResultPage) NotDone() bool { - return !page.clr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page CertificateListResultPage) Response() CertificateListResult { - return page.clr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page CertificateListResultPage) Values() []CertificateItem { - if page.clr.IsEmpty() { - return nil - } - return *page.clr.Value -} - -// Creates a new instance of the CertificateListResultPage type. -func NewCertificateListResultPage(getNextPage func(context.Context, CertificateListResult) (CertificateListResult, error)) CertificateListResultPage { - return CertificateListResultPage{fn: getNextPage} -} - -// CertificateMergeParameters the certificate merge parameters -type CertificateMergeParameters struct { - // X509Certificates - The certificate or the certificate chain to merge. - X509Certificates *[][]byte `json:"x5c,omitempty"` - // CertificateAttributes - The attributes of the certificate (optional). - CertificateAttributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for CertificateMergeParameters. -func (cmp CertificateMergeParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if cmp.X509Certificates != nil { - objectMap["x5c"] = cmp.X509Certificates - } - if cmp.CertificateAttributes != nil { - objectMap["attributes"] = cmp.CertificateAttributes - } - if cmp.Tags != nil { - objectMap["tags"] = cmp.Tags - } - return json.Marshal(objectMap) -} - -// CertificateOperation a certificate operation is returned in case of asynchronous requests. -type CertificateOperation struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; The certificate id. - ID *string `json:"id,omitempty"` - // IssuerParameters - Parameters for the issuer of the X509 component of a certificate. - IssuerParameters *IssuerParameters `json:"issuer,omitempty"` - // Csr - The certificate signing request (CSR) that is being used in the certificate operation. - Csr *[]byte `json:"csr,omitempty"` - // CancellationRequested - Indicates if cancellation was requested on the certificate operation. - CancellationRequested *bool `json:"cancellation_requested,omitempty"` - // Status - Status of the certificate operation. - Status *string `json:"status,omitempty"` - // StatusDetails - The status details of the certificate operation. - StatusDetails *string `json:"status_details,omitempty"` - // Error - Error encountered, if any, during the certificate operation. - Error *Error `json:"error,omitempty"` - // Target - Location which contains the result of the certificate operation. - Target *string `json:"target,omitempty"` - // RequestID - Identifier for the certificate operation. - RequestID *string `json:"request_id,omitempty"` -} - -// CertificateOperationUpdateParameter the certificate operation update parameters. -type CertificateOperationUpdateParameter struct { - // CancellationRequested - Indicates if cancellation was requested on the certificate operation. - CancellationRequested *bool `json:"cancellation_requested,omitempty"` -} - -// CertificatePolicy management policy for a certificate. -type CertificatePolicy struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; The certificate id. - ID *string `json:"id,omitempty"` - // KeyProperties - Properties of the key backing a certificate. - KeyProperties *KeyProperties `json:"key_props,omitempty"` - // SecretProperties - Properties of the secret backing a certificate. - SecretProperties *SecretProperties `json:"secret_props,omitempty"` - // X509CertificateProperties - Properties of the X509 component of a certificate. - X509CertificateProperties *X509CertificateProperties `json:"x509_props,omitempty"` - // LifetimeActions - Actions that will be performed by Key Vault over the lifetime of a certificate. - LifetimeActions *[]LifetimeAction `json:"lifetime_actions,omitempty"` - // IssuerParameters - Parameters for the issuer of the X509 component of a certificate. - IssuerParameters *IssuerParameters `json:"issuer,omitempty"` - // Attributes - The certificate attributes. - Attributes *CertificateAttributes `json:"attributes,omitempty"` -} - -// CertificateUpdateParameters the certificate update parameters. -type CertificateUpdateParameters struct { - // CertificatePolicy - The management policy for the certificate. - CertificatePolicy *CertificatePolicy `json:"policy,omitempty"` - // CertificateAttributes - The attributes of the certificate (optional). - CertificateAttributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for CertificateUpdateParameters. -func (cup CertificateUpdateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if cup.CertificatePolicy != nil { - objectMap["policy"] = cup.CertificatePolicy - } - if cup.CertificateAttributes != nil { - objectMap["attributes"] = cup.CertificateAttributes - } - if cup.Tags != nil { - objectMap["tags"] = cup.Tags - } - return json.Marshal(objectMap) -} - -// Contact the contact information for the vault certificates. -type Contact struct { - // EmailAddress - Email address. - EmailAddress *string `json:"email,omitempty"` - // Name - Name. - Name *string `json:"name,omitempty"` - // Phone - Phone number. - Phone *string `json:"phone,omitempty"` -} - -// Contacts the contacts for the vault certificates. -type Contacts struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; Identifier for the contacts collection. - ID *string `json:"id,omitempty"` - // ContactList - The contact list for the vault certificates. - ContactList *[]Contact `json:"contacts,omitempty"` -} - -// DeletedCertificateBundle a Deleted Certificate consisting of its previous id, attributes and its tags, -// as well as information on when it will be purged. -type DeletedCertificateBundle struct { - autorest.Response `json:"-"` - // RecoveryID - The url of the recovery object, used to identify and recover the deleted certificate. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the certificate is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the certificate was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // ID - READ-ONLY; The certificate id. - ID *string `json:"id,omitempty"` - // Kid - READ-ONLY; The key id. - Kid *string `json:"kid,omitempty"` - // Sid - READ-ONLY; The secret id. - Sid *string `json:"sid,omitempty"` - // X509Thumbprint - READ-ONLY; Thumbprint of the certificate. (a URL-encoded base64 string) - X509Thumbprint *string `json:"x5t,omitempty"` - // Policy - READ-ONLY; The management policy. - Policy *CertificatePolicy `json:"policy,omitempty"` - // Cer - CER contents of x509 certificate. - Cer *[]byte `json:"cer,omitempty"` - // ContentType - The content type of the secret. - ContentType *string `json:"contentType,omitempty"` - // Attributes - The certificate attributes. - Attributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for DeletedCertificateBundle. -func (dcb DeletedCertificateBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dcb.RecoveryID != nil { - objectMap["recoveryId"] = dcb.RecoveryID - } - if dcb.Cer != nil { - objectMap["cer"] = dcb.Cer - } - if dcb.ContentType != nil { - objectMap["contentType"] = dcb.ContentType - } - if dcb.Attributes != nil { - objectMap["attributes"] = dcb.Attributes - } - if dcb.Tags != nil { - objectMap["tags"] = dcb.Tags - } - return json.Marshal(objectMap) -} - -// DeletedCertificateItem the deleted certificate item containing metadata about the deleted certificate. -type DeletedCertificateItem struct { - // RecoveryID - The url of the recovery object, used to identify and recover the deleted certificate. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the certificate is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the certificate was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // ID - Certificate identifier. - ID *string `json:"id,omitempty"` - // Attributes - The certificate management attributes. - Attributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // X509Thumbprint - Thumbprint of the certificate. (a URL-encoded base64 string) - X509Thumbprint *string `json:"x5t,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedCertificateItem. -func (dci DeletedCertificateItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dci.RecoveryID != nil { - objectMap["recoveryId"] = dci.RecoveryID - } - if dci.ID != nil { - objectMap["id"] = dci.ID - } - if dci.Attributes != nil { - objectMap["attributes"] = dci.Attributes - } - if dci.Tags != nil { - objectMap["tags"] = dci.Tags - } - if dci.X509Thumbprint != nil { - objectMap["x5t"] = dci.X509Thumbprint - } - return json.Marshal(objectMap) -} - -// DeletedCertificateListResult a list of certificates that have been deleted in this vault. -type DeletedCertificateListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of deleted certificates in the vault along with a link to the next page of deleted certificates - Value *[]DeletedCertificateItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of deleted certificates. - NextLink *string `json:"nextLink,omitempty"` -} - -// DeletedCertificateListResultIterator provides access to a complete listing of DeletedCertificateItem -// values. -type DeletedCertificateListResultIterator struct { - i int - page DeletedCertificateListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *DeletedCertificateListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedCertificateListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *DeletedCertificateListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter DeletedCertificateListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter DeletedCertificateListResultIterator) Response() DeletedCertificateListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter DeletedCertificateListResultIterator) Value() DeletedCertificateItem { - if !iter.page.NotDone() { - return DeletedCertificateItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the DeletedCertificateListResultIterator type. -func NewDeletedCertificateListResultIterator(page DeletedCertificateListResultPage) DeletedCertificateListResultIterator { - return DeletedCertificateListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (dclr DeletedCertificateListResult) IsEmpty() bool { - return dclr.Value == nil || len(*dclr.Value) == 0 -} - -// deletedCertificateListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (dclr DeletedCertificateListResult) deletedCertificateListResultPreparer(ctx context.Context) (*http.Request, error) { - if dclr.NextLink == nil || len(to.String(dclr.NextLink)) < 1 { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(dclr.NextLink))) -} - -// DeletedCertificateListResultPage contains a page of DeletedCertificateItem values. -type DeletedCertificateListResultPage struct { - fn func(context.Context, DeletedCertificateListResult) (DeletedCertificateListResult, error) - dclr DeletedCertificateListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *DeletedCertificateListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedCertificateListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - next, err := page.fn(ctx, page.dclr) - if err != nil { - return err - } - page.dclr = next - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *DeletedCertificateListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page DeletedCertificateListResultPage) NotDone() bool { - return !page.dclr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page DeletedCertificateListResultPage) Response() DeletedCertificateListResult { - return page.dclr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page DeletedCertificateListResultPage) Values() []DeletedCertificateItem { - if page.dclr.IsEmpty() { - return nil - } - return *page.dclr.Value -} - -// Creates a new instance of the DeletedCertificateListResultPage type. -func NewDeletedCertificateListResultPage(getNextPage func(context.Context, DeletedCertificateListResult) (DeletedCertificateListResult, error)) DeletedCertificateListResultPage { - return DeletedCertificateListResultPage{fn: getNextPage} -} - -// DeletedKeyBundle a DeletedKeyBundle consisting of a WebKey plus its Attributes and deletion info -type DeletedKeyBundle struct { - autorest.Response `json:"-"` - // RecoveryID - The url of the recovery object, used to identify and recover the deleted key. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the key is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the key was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // Key - The Json web key. - Key *JSONWebKey `json:"key,omitempty"` - // Attributes - The key management attributes. - Attributes *KeyAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // Managed - READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedKeyBundle. -func (dkb DeletedKeyBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dkb.RecoveryID != nil { - objectMap["recoveryId"] = dkb.RecoveryID - } - if dkb.Key != nil { - objectMap["key"] = dkb.Key - } - if dkb.Attributes != nil { - objectMap["attributes"] = dkb.Attributes - } - if dkb.Tags != nil { - objectMap["tags"] = dkb.Tags - } - return json.Marshal(objectMap) -} - -// DeletedKeyItem the deleted key item containing the deleted key metadata and information about deletion. -type DeletedKeyItem struct { - // RecoveryID - The url of the recovery object, used to identify and recover the deleted key. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the key is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the key was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // Kid - Key identifier. - Kid *string `json:"kid,omitempty"` - // Attributes - The key management attributes. - Attributes *KeyAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // Managed - READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedKeyItem. -func (dki DeletedKeyItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dki.RecoveryID != nil { - objectMap["recoveryId"] = dki.RecoveryID - } - if dki.Kid != nil { - objectMap["kid"] = dki.Kid - } - if dki.Attributes != nil { - objectMap["attributes"] = dki.Attributes - } - if dki.Tags != nil { - objectMap["tags"] = dki.Tags - } - return json.Marshal(objectMap) -} - -// DeletedKeyListResult a list of keys that have been deleted in this vault. -type DeletedKeyListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of deleted keys in the vault along with a link to the next page of deleted keys - Value *[]DeletedKeyItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of deleted keys. - NextLink *string `json:"nextLink,omitempty"` -} - -// DeletedKeyListResultIterator provides access to a complete listing of DeletedKeyItem values. -type DeletedKeyListResultIterator struct { - i int - page DeletedKeyListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *DeletedKeyListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedKeyListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *DeletedKeyListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter DeletedKeyListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter DeletedKeyListResultIterator) Response() DeletedKeyListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter DeletedKeyListResultIterator) Value() DeletedKeyItem { - if !iter.page.NotDone() { - return DeletedKeyItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the DeletedKeyListResultIterator type. -func NewDeletedKeyListResultIterator(page DeletedKeyListResultPage) DeletedKeyListResultIterator { - return DeletedKeyListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (dklr DeletedKeyListResult) IsEmpty() bool { - return dklr.Value == nil || len(*dklr.Value) == 0 -} - -// deletedKeyListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (dklr DeletedKeyListResult) deletedKeyListResultPreparer(ctx context.Context) (*http.Request, error) { - if dklr.NextLink == nil || len(to.String(dklr.NextLink)) < 1 { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(dklr.NextLink))) -} - -// DeletedKeyListResultPage contains a page of DeletedKeyItem values. -type DeletedKeyListResultPage struct { - fn func(context.Context, DeletedKeyListResult) (DeletedKeyListResult, error) - dklr DeletedKeyListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *DeletedKeyListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedKeyListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - next, err := page.fn(ctx, page.dklr) - if err != nil { - return err - } - page.dklr = next - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *DeletedKeyListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page DeletedKeyListResultPage) NotDone() bool { - return !page.dklr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page DeletedKeyListResultPage) Response() DeletedKeyListResult { - return page.dklr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page DeletedKeyListResultPage) Values() []DeletedKeyItem { - if page.dklr.IsEmpty() { - return nil - } - return *page.dklr.Value -} - -// Creates a new instance of the DeletedKeyListResultPage type. -func NewDeletedKeyListResultPage(getNextPage func(context.Context, DeletedKeyListResult) (DeletedKeyListResult, error)) DeletedKeyListResultPage { - return DeletedKeyListResultPage{fn: getNextPage} -} - -// DeletedSecretBundle a Deleted Secret consisting of its previous id, attributes and its tags, as well as -// information on when it will be purged. -type DeletedSecretBundle struct { - autorest.Response `json:"-"` - // RecoveryID - The url of the recovery object, used to identify and recover the deleted secret. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the secret is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the secret was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // Value - The secret value. - Value *string `json:"value,omitempty"` - // ID - The secret id. - ID *string `json:"id,omitempty"` - // ContentType - The content type of the secret. - ContentType *string `json:"contentType,omitempty"` - // Attributes - The secret management attributes. - Attributes *SecretAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // Kid - READ-ONLY; If this is a secret backing a KV certificate, then this field specifies the corresponding key backing the KV certificate. - Kid *string `json:"kid,omitempty"` - // Managed - READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a secret backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedSecretBundle. -func (dsb DeletedSecretBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dsb.RecoveryID != nil { - objectMap["recoveryId"] = dsb.RecoveryID - } - if dsb.Value != nil { - objectMap["value"] = dsb.Value - } - if dsb.ID != nil { - objectMap["id"] = dsb.ID - } - if dsb.ContentType != nil { - objectMap["contentType"] = dsb.ContentType - } - if dsb.Attributes != nil { - objectMap["attributes"] = dsb.Attributes - } - if dsb.Tags != nil { - objectMap["tags"] = dsb.Tags - } - return json.Marshal(objectMap) -} - -// DeletedSecretItem the deleted secret item containing metadata about the deleted secret. -type DeletedSecretItem struct { - // RecoveryID - The url of the recovery object, used to identify and recover the deleted secret. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the secret is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the secret was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // ID - Secret identifier. - ID *string `json:"id,omitempty"` - // Attributes - The secret management attributes. - Attributes *SecretAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // ContentType - Type of the secret value such as a password. - ContentType *string `json:"contentType,omitempty"` - // Managed - READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a key backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedSecretItem. -func (dsi DeletedSecretItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dsi.RecoveryID != nil { - objectMap["recoveryId"] = dsi.RecoveryID - } - if dsi.ID != nil { - objectMap["id"] = dsi.ID - } - if dsi.Attributes != nil { - objectMap["attributes"] = dsi.Attributes - } - if dsi.Tags != nil { - objectMap["tags"] = dsi.Tags - } - if dsi.ContentType != nil { - objectMap["contentType"] = dsi.ContentType - } - return json.Marshal(objectMap) -} - -// DeletedSecretListResult the deleted secret list result -type DeletedSecretListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of the deleted secrets in the vault along with a link to the next page of deleted secrets - Value *[]DeletedSecretItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of deleted secrets. - NextLink *string `json:"nextLink,omitempty"` -} - -// DeletedSecretListResultIterator provides access to a complete listing of DeletedSecretItem values. -type DeletedSecretListResultIterator struct { - i int - page DeletedSecretListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *DeletedSecretListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedSecretListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *DeletedSecretListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter DeletedSecretListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter DeletedSecretListResultIterator) Response() DeletedSecretListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter DeletedSecretListResultIterator) Value() DeletedSecretItem { - if !iter.page.NotDone() { - return DeletedSecretItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the DeletedSecretListResultIterator type. -func NewDeletedSecretListResultIterator(page DeletedSecretListResultPage) DeletedSecretListResultIterator { - return DeletedSecretListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (dslr DeletedSecretListResult) IsEmpty() bool { - return dslr.Value == nil || len(*dslr.Value) == 0 -} - -// deletedSecretListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (dslr DeletedSecretListResult) deletedSecretListResultPreparer(ctx context.Context) (*http.Request, error) { - if dslr.NextLink == nil || len(to.String(dslr.NextLink)) < 1 { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(dslr.NextLink))) -} - -// DeletedSecretListResultPage contains a page of DeletedSecretItem values. -type DeletedSecretListResultPage struct { - fn func(context.Context, DeletedSecretListResult) (DeletedSecretListResult, error) - dslr DeletedSecretListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *DeletedSecretListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedSecretListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - next, err := page.fn(ctx, page.dslr) - if err != nil { - return err - } - page.dslr = next - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *DeletedSecretListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page DeletedSecretListResultPage) NotDone() bool { - return !page.dslr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page DeletedSecretListResultPage) Response() DeletedSecretListResult { - return page.dslr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page DeletedSecretListResultPage) Values() []DeletedSecretItem { - if page.dslr.IsEmpty() { - return nil - } - return *page.dslr.Value -} - -// Creates a new instance of the DeletedSecretListResultPage type. -func NewDeletedSecretListResultPage(getNextPage func(context.Context, DeletedSecretListResult) (DeletedSecretListResult, error)) DeletedSecretListResultPage { - return DeletedSecretListResultPage{fn: getNextPage} -} - -// Error the key vault server error. -type Error struct { - // Code - READ-ONLY; The error code. - Code *string `json:"code,omitempty"` - // Message - READ-ONLY; The error message. - Message *string `json:"message,omitempty"` - // InnerError - READ-ONLY - InnerError *Error `json:"innererror,omitempty"` -} - -// ErrorType the key vault error exception. -type ErrorType struct { - // Error - READ-ONLY - Error *Error `json:"error,omitempty"` -} - -// IssuerAttributes the attributes of an issuer managed by the Key Vault service. -type IssuerAttributes struct { - // Enabled - Determines whether the issuer is enabled. - Enabled *bool `json:"enabled,omitempty"` - // Created - READ-ONLY; Creation time in UTC. - Created *date.UnixTime `json:"created,omitempty"` - // Updated - READ-ONLY; Last updated time in UTC. - Updated *date.UnixTime `json:"updated,omitempty"` -} - -// IssuerBundle the issuer for Key Vault certificate. -type IssuerBundle struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; Identifier for the issuer object. - ID *string `json:"id,omitempty"` - // Provider - The issuer provider. - Provider *string `json:"provider,omitempty"` - // Credentials - The credentials to be used for the issuer. - Credentials *IssuerCredentials `json:"credentials,omitempty"` - // OrganizationDetails - Details of the organization as provided to the issuer. - OrganizationDetails *OrganizationDetails `json:"org_details,omitempty"` - // Attributes - Attributes of the issuer object. - Attributes *IssuerAttributes `json:"attributes,omitempty"` -} - -// IssuerCredentials the credentials to be used for the certificate issuer. -type IssuerCredentials struct { - // AccountID - The user name/account name/account id. - AccountID *string `json:"account_id,omitempty"` - // Password - The password/secret/account key. - Password *string `json:"pwd,omitempty"` -} - -// IssuerParameters parameters for the issuer of the X509 component of a certificate. -type IssuerParameters struct { - // Name - Name of the referenced issuer object or reserved names; for example, 'Self' or 'Unknown'. - Name *string `json:"name,omitempty"` - // CertificateType - Type of certificate to be requested from the issuer provider. - CertificateType *string `json:"cty,omitempty"` -} - -// JSONWebKey as of http://tools.ietf.org/html/draft-ietf-jose-json-web-key-18 -type JSONWebKey struct { - // Kid - Key identifier. - Kid *string `json:"kid,omitempty"` - // Kty - JsonWebKey key type (kty). Possible values include: 'EC', 'ECHSM', 'RSA', 'RSAHSM', 'Oct' - Kty JSONWebKeyType `json:"kty,omitempty"` - KeyOps *[]string `json:"key_ops,omitempty"` - // N - RSA modulus. (a URL-encoded base64 string) - N *string `json:"n,omitempty"` - // E - RSA public exponent. (a URL-encoded base64 string) - E *string `json:"e,omitempty"` - // D - RSA private exponent, or the D component of an EC private key. (a URL-encoded base64 string) - D *string `json:"d,omitempty"` - // DP - RSA private key parameter. (a URL-encoded base64 string) - DP *string `json:"dp,omitempty"` - // DQ - RSA private key parameter. (a URL-encoded base64 string) - DQ *string `json:"dq,omitempty"` - // QI - RSA private key parameter. (a URL-encoded base64 string) - QI *string `json:"qi,omitempty"` - // P - RSA secret prime. (a URL-encoded base64 string) - P *string `json:"p,omitempty"` - // Q - RSA secret prime, with p < q. (a URL-encoded base64 string) - Q *string `json:"q,omitempty"` - // K - Symmetric key. (a URL-encoded base64 string) - K *string `json:"k,omitempty"` - // T - HSM Token, used with 'Bring Your Own Key'. (a URL-encoded base64 string) - T *string `json:"key_hsm,omitempty"` - // Crv - Elliptic curve name. For valid values, see JsonWebKeyCurveName. Possible values include: 'P256', 'P384', 'P521', 'SECP256K1' - Crv JSONWebKeyCurveName `json:"crv,omitempty"` - // X - X component of an EC public key. (a URL-encoded base64 string) - X *string `json:"x,omitempty"` - // Y - Y component of an EC public key. (a URL-encoded base64 string) - Y *string `json:"y,omitempty"` -} - -// KeyAttributes the attributes of a key managed by the key vault service. -type KeyAttributes struct { - // RecoveryLevel - READ-ONLY; Reflects the deletion recovery level currently in effect for keys in the current vault. If it contains 'Purgeable' the key can be permanently deleted by a privileged user; otherwise, only the system can purge the key, at the end of the retention interval. Possible values include: 'Purgeable', 'RecoverablePurgeable', 'Recoverable', 'RecoverableProtectedSubscription' - RecoveryLevel DeletionRecoveryLevel `json:"recoveryLevel,omitempty"` - // Enabled - Determines whether the object is enabled. - Enabled *bool `json:"enabled,omitempty"` - // NotBefore - Not before date in UTC. - NotBefore *date.UnixTime `json:"nbf,omitempty"` - // Expires - Expiry date in UTC. - Expires *date.UnixTime `json:"exp,omitempty"` - // Created - READ-ONLY; Creation time in UTC. - Created *date.UnixTime `json:"created,omitempty"` - // Updated - READ-ONLY; Last updated time in UTC. - Updated *date.UnixTime `json:"updated,omitempty"` -} - -// KeyBundle a KeyBundle consisting of a WebKey plus its attributes. -type KeyBundle struct { - autorest.Response `json:"-"` - // Key - The Json web key. - Key *JSONWebKey `json:"key,omitempty"` - // Attributes - The key management attributes. - Attributes *KeyAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // Managed - READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for KeyBundle. -func (kb KeyBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if kb.Key != nil { - objectMap["key"] = kb.Key - } - if kb.Attributes != nil { - objectMap["attributes"] = kb.Attributes - } - if kb.Tags != nil { - objectMap["tags"] = kb.Tags - } - return json.Marshal(objectMap) -} - -// KeyCreateParameters the key create parameters. -type KeyCreateParameters struct { - // Kty - The type of key to create. For valid values, see JsonWebKeyType. Possible values include: 'EC', 'ECHSM', 'RSA', 'RSAHSM', 'Oct' - Kty JSONWebKeyType `json:"kty,omitempty"` - // KeySize - The key size in bits. For example: 2048, 3072, or 4096 for RSA. - KeySize *int32 `json:"key_size,omitempty"` - KeyOps *[]JSONWebKeyOperation `json:"key_ops,omitempty"` - KeyAttributes *KeyAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // Curve - Elliptic curve name. For valid values, see JsonWebKeyCurveName. Possible values include: 'P256', 'P384', 'P521', 'SECP256K1' - Curve JSONWebKeyCurveName `json:"crv,omitempty"` -} - -// MarshalJSON is the custom marshaler for KeyCreateParameters. -func (kcp KeyCreateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if kcp.Kty != "" { - objectMap["kty"] = kcp.Kty - } - if kcp.KeySize != nil { - objectMap["key_size"] = kcp.KeySize - } - if kcp.KeyOps != nil { - objectMap["key_ops"] = kcp.KeyOps - } - if kcp.KeyAttributes != nil { - objectMap["attributes"] = kcp.KeyAttributes - } - if kcp.Tags != nil { - objectMap["tags"] = kcp.Tags - } - if kcp.Curve != "" { - objectMap["crv"] = kcp.Curve - } - return json.Marshal(objectMap) -} - -// KeyImportParameters the key import parameters. -type KeyImportParameters struct { - // Hsm - Whether to import as a hardware key (HSM) or software key. - Hsm *bool `json:"Hsm,omitempty"` - // Key - The Json web key - Key *JSONWebKey `json:"key,omitempty"` - // KeyAttributes - The key management attributes. - KeyAttributes *KeyAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for KeyImportParameters. -func (kip KeyImportParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if kip.Hsm != nil { - objectMap["Hsm"] = kip.Hsm - } - if kip.Key != nil { - objectMap["key"] = kip.Key - } - if kip.KeyAttributes != nil { - objectMap["attributes"] = kip.KeyAttributes - } - if kip.Tags != nil { - objectMap["tags"] = kip.Tags - } - return json.Marshal(objectMap) -} - -// KeyItem the key item containing key metadata. -type KeyItem struct { - // Kid - Key identifier. - Kid *string `json:"kid,omitempty"` - // Attributes - The key management attributes. - Attributes *KeyAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // Managed - READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for KeyItem. -func (ki KeyItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ki.Kid != nil { - objectMap["kid"] = ki.Kid - } - if ki.Attributes != nil { - objectMap["attributes"] = ki.Attributes - } - if ki.Tags != nil { - objectMap["tags"] = ki.Tags - } - return json.Marshal(objectMap) -} - -// KeyListResult the key list result. -type KeyListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of keys in the key vault along with a link to the next page of keys. - Value *[]KeyItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of keys. - NextLink *string `json:"nextLink,omitempty"` -} - -// KeyListResultIterator provides access to a complete listing of KeyItem values. -type KeyListResultIterator struct { - i int - page KeyListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *KeyListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/KeyListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *KeyListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter KeyListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter KeyListResultIterator) Response() KeyListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter KeyListResultIterator) Value() KeyItem { - if !iter.page.NotDone() { - return KeyItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the KeyListResultIterator type. -func NewKeyListResultIterator(page KeyListResultPage) KeyListResultIterator { - return KeyListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (klr KeyListResult) IsEmpty() bool { - return klr.Value == nil || len(*klr.Value) == 0 -} - -// keyListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (klr KeyListResult) keyListResultPreparer(ctx context.Context) (*http.Request, error) { - if klr.NextLink == nil || len(to.String(klr.NextLink)) < 1 { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(klr.NextLink))) -} - -// KeyListResultPage contains a page of KeyItem values. -type KeyListResultPage struct { - fn func(context.Context, KeyListResult) (KeyListResult, error) - klr KeyListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *KeyListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/KeyListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - next, err := page.fn(ctx, page.klr) - if err != nil { - return err - } - page.klr = next - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *KeyListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page KeyListResultPage) NotDone() bool { - return !page.klr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page KeyListResultPage) Response() KeyListResult { - return page.klr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page KeyListResultPage) Values() []KeyItem { - if page.klr.IsEmpty() { - return nil - } - return *page.klr.Value -} - -// Creates a new instance of the KeyListResultPage type. -func NewKeyListResultPage(getNextPage func(context.Context, KeyListResult) (KeyListResult, error)) KeyListResultPage { - return KeyListResultPage{fn: getNextPage} -} - -// KeyOperationResult the key operation result. -type KeyOperationResult struct { - autorest.Response `json:"-"` - // Kid - READ-ONLY; Key identifier - Kid *string `json:"kid,omitempty"` - // Result - READ-ONLY; a URL-encoded base64 string - Result *string `json:"value,omitempty"` -} - -// KeyOperationsParameters the key operations parameters. -type KeyOperationsParameters struct { - // Algorithm - algorithm identifier. Possible values include: 'RSAOAEP', 'RSAOAEP256', 'RSA15' - Algorithm JSONWebKeyEncryptionAlgorithm `json:"alg,omitempty"` - // Value - a URL-encoded base64 string - Value *string `json:"value,omitempty"` -} - -// KeyProperties properties of the key pair backing a certificate. -type KeyProperties struct { - // Exportable - Indicates if the private key can be exported. - Exportable *bool `json:"exportable,omitempty"` - // KeyType - The key type. - KeyType *string `json:"kty,omitempty"` - // KeySize - The key size in bits. For example: 2048, 3072, or 4096 for RSA. - KeySize *int32 `json:"key_size,omitempty"` - // ReuseKey - Indicates if the same key pair will be used on certificate renewal. - ReuseKey *bool `json:"reuse_key,omitempty"` -} - -// KeyRestoreParameters the key restore parameters. -type KeyRestoreParameters struct { - // KeyBundleBackup - The backup blob associated with a key bundle. (a URL-encoded base64 string) - KeyBundleBackup *string `json:"value,omitempty"` -} - -// KeySignParameters the key operations parameters. -type KeySignParameters struct { - // Algorithm - The signing/verification algorithm identifier. For more information on possible algorithm types, see JsonWebKeySignatureAlgorithm. Possible values include: 'PS256', 'PS384', 'PS512', 'RS256', 'RS384', 'RS512', 'RSNULL', 'ES256', 'ES384', 'ES512', 'ECDSA256' - Algorithm JSONWebKeySignatureAlgorithm `json:"alg,omitempty"` - // Value - a URL-encoded base64 string - Value *string `json:"value,omitempty"` -} - -// KeyUpdateParameters the key update parameters. -type KeyUpdateParameters struct { - // KeyOps - Json web key operations. For more information on possible key operations, see JsonWebKeyOperation. - KeyOps *[]JSONWebKeyOperation `json:"key_ops,omitempty"` - KeyAttributes *KeyAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for KeyUpdateParameters. -func (kup KeyUpdateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if kup.KeyOps != nil { - objectMap["key_ops"] = kup.KeyOps - } - if kup.KeyAttributes != nil { - objectMap["attributes"] = kup.KeyAttributes - } - if kup.Tags != nil { - objectMap["tags"] = kup.Tags - } - return json.Marshal(objectMap) -} - -// KeyVerifyParameters the key verify parameters. -type KeyVerifyParameters struct { - // Algorithm - The signing/verification algorithm. For more information on possible algorithm types, see JsonWebKeySignatureAlgorithm. Possible values include: 'PS256', 'PS384', 'PS512', 'RS256', 'RS384', 'RS512', 'RSNULL', 'ES256', 'ES384', 'ES512', 'ECDSA256' - Algorithm JSONWebKeySignatureAlgorithm `json:"alg,omitempty"` - // Digest - The digest used for signing. (a URL-encoded base64 string) - Digest *string `json:"digest,omitempty"` - // Signature - The signature to be verified. (a URL-encoded base64 string) - Signature *string `json:"value,omitempty"` -} - -// KeyVerifyResult the key verify result. -type KeyVerifyResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; True if the signature is verified, otherwise false. - Value *bool `json:"value,omitempty"` -} - -// LifetimeAction action and its trigger that will be performed by Key Vault over the lifetime of a -// certificate. -type LifetimeAction struct { - // Trigger - The condition that will execute the action. - Trigger *Trigger `json:"trigger,omitempty"` - // Action - The action that will be executed. - Action *Action `json:"action,omitempty"` -} - -// OrganizationDetails details of the organization of the certificate issuer. -type OrganizationDetails struct { - // ID - Id of the organization. - ID *string `json:"id,omitempty"` - // AdminDetails - Details of the organization administrator. - AdminDetails *[]AdministratorDetails `json:"admin_details,omitempty"` -} - -// PendingCertificateSigningRequestResult the pending certificate signing request result. -type PendingCertificateSigningRequestResult struct { - // Value - READ-ONLY; The pending certificate signing request as Base64 encoded string. - Value *string `json:"value,omitempty"` -} - -// SasDefinitionAttributes the SAS definition management attributes. -type SasDefinitionAttributes struct { - // Enabled - the enabled state of the object. - Enabled *bool `json:"enabled,omitempty"` - // Created - READ-ONLY; Creation time in UTC. - Created *date.UnixTime `json:"created,omitempty"` - // Updated - READ-ONLY; Last updated time in UTC. - Updated *date.UnixTime `json:"updated,omitempty"` -} - -// SasDefinitionBundle a SAS definition bundle consists of key vault SAS definition details plus its -// attributes. -type SasDefinitionBundle struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; The SAS definition id. - ID *string `json:"id,omitempty"` - // SecretID - READ-ONLY; Storage account SAS definition secret id. - SecretID *string `json:"sid,omitempty"` - // Parameters - READ-ONLY; The SAS definition metadata in the form of key-value pairs. - Parameters map[string]*string `json:"parameters"` - // Attributes - READ-ONLY; The SAS definition attributes. - Attributes *SasDefinitionAttributes `json:"attributes,omitempty"` - // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for SasDefinitionBundle. -func (sdb SasDefinitionBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// SasDefinitionCreateParameters the SAS definition create parameters. -type SasDefinitionCreateParameters struct { - // Parameters - Sas definition creation metadata in the form of key-value pairs. - Parameters map[string]*string `json:"parameters"` - // SasDefinitionAttributes - The attributes of the SAS definition. - SasDefinitionAttributes *SasDefinitionAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for SasDefinitionCreateParameters. -func (sdcp SasDefinitionCreateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if sdcp.Parameters != nil { - objectMap["parameters"] = sdcp.Parameters - } - if sdcp.SasDefinitionAttributes != nil { - objectMap["attributes"] = sdcp.SasDefinitionAttributes - } - if sdcp.Tags != nil { - objectMap["tags"] = sdcp.Tags - } - return json.Marshal(objectMap) -} - -// SasDefinitionItem the SAS definition item containing storage SAS definition metadata. -type SasDefinitionItem struct { - // ID - READ-ONLY; The storage SAS identifier. - ID *string `json:"id,omitempty"` - // SecretID - READ-ONLY; The storage account SAS definition secret id. - SecretID *string `json:"sid,omitempty"` - // Attributes - READ-ONLY; The SAS definition management attributes. - Attributes *SasDefinitionAttributes `json:"attributes,omitempty"` - // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for SasDefinitionItem. -func (sdi SasDefinitionItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// SasDefinitionListResult the storage account SAS definition list result. -type SasDefinitionListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of SAS definitions along with a link to the next page of SAS definitions. - Value *[]SasDefinitionItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of SAS definitions. - NextLink *string `json:"nextLink,omitempty"` -} - -// SasDefinitionListResultIterator provides access to a complete listing of SasDefinitionItem values. -type SasDefinitionListResultIterator struct { - i int - page SasDefinitionListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *SasDefinitionListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/SasDefinitionListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *SasDefinitionListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter SasDefinitionListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter SasDefinitionListResultIterator) Response() SasDefinitionListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter SasDefinitionListResultIterator) Value() SasDefinitionItem { - if !iter.page.NotDone() { - return SasDefinitionItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the SasDefinitionListResultIterator type. -func NewSasDefinitionListResultIterator(page SasDefinitionListResultPage) SasDefinitionListResultIterator { - return SasDefinitionListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (sdlr SasDefinitionListResult) IsEmpty() bool { - return sdlr.Value == nil || len(*sdlr.Value) == 0 -} - -// sasDefinitionListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (sdlr SasDefinitionListResult) sasDefinitionListResultPreparer(ctx context.Context) (*http.Request, error) { - if sdlr.NextLink == nil || len(to.String(sdlr.NextLink)) < 1 { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(sdlr.NextLink))) -} - -// SasDefinitionListResultPage contains a page of SasDefinitionItem values. -type SasDefinitionListResultPage struct { - fn func(context.Context, SasDefinitionListResult) (SasDefinitionListResult, error) - sdlr SasDefinitionListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *SasDefinitionListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/SasDefinitionListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - next, err := page.fn(ctx, page.sdlr) - if err != nil { - return err - } - page.sdlr = next - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *SasDefinitionListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page SasDefinitionListResultPage) NotDone() bool { - return !page.sdlr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page SasDefinitionListResultPage) Response() SasDefinitionListResult { - return page.sdlr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page SasDefinitionListResultPage) Values() []SasDefinitionItem { - if page.sdlr.IsEmpty() { - return nil - } - return *page.sdlr.Value -} - -// Creates a new instance of the SasDefinitionListResultPage type. -func NewSasDefinitionListResultPage(getNextPage func(context.Context, SasDefinitionListResult) (SasDefinitionListResult, error)) SasDefinitionListResultPage { - return SasDefinitionListResultPage{fn: getNextPage} -} - -// SasDefinitionUpdateParameters the SAS definition update parameters. -type SasDefinitionUpdateParameters struct { - // Parameters - Sas definition update metadata in the form of key-value pairs. - Parameters map[string]*string `json:"parameters"` - // SasDefinitionAttributes - The attributes of the SAS definition. - SasDefinitionAttributes *SasDefinitionAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for SasDefinitionUpdateParameters. -func (sdup SasDefinitionUpdateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if sdup.Parameters != nil { - objectMap["parameters"] = sdup.Parameters - } - if sdup.SasDefinitionAttributes != nil { - objectMap["attributes"] = sdup.SasDefinitionAttributes - } - if sdup.Tags != nil { - objectMap["tags"] = sdup.Tags - } - return json.Marshal(objectMap) -} - -// SecretAttributes the secret management attributes. -type SecretAttributes struct { - // RecoveryLevel - READ-ONLY; Reflects the deletion recovery level currently in effect for secrets in the current vault. If it contains 'Purgeable', the secret can be permanently deleted by a privileged user; otherwise, only the system can purge the secret, at the end of the retention interval. Possible values include: 'Purgeable', 'RecoverablePurgeable', 'Recoverable', 'RecoverableProtectedSubscription' - RecoveryLevel DeletionRecoveryLevel `json:"recoveryLevel,omitempty"` - // Enabled - Determines whether the object is enabled. - Enabled *bool `json:"enabled,omitempty"` - // NotBefore - Not before date in UTC. - NotBefore *date.UnixTime `json:"nbf,omitempty"` - // Expires - Expiry date in UTC. - Expires *date.UnixTime `json:"exp,omitempty"` - // Created - READ-ONLY; Creation time in UTC. - Created *date.UnixTime `json:"created,omitempty"` - // Updated - READ-ONLY; Last updated time in UTC. - Updated *date.UnixTime `json:"updated,omitempty"` -} - -// SecretBundle a secret consisting of a value, id and its attributes. -type SecretBundle struct { - autorest.Response `json:"-"` - // Value - The secret value. - Value *string `json:"value,omitempty"` - // ID - The secret id. - ID *string `json:"id,omitempty"` - // ContentType - The content type of the secret. - ContentType *string `json:"contentType,omitempty"` - // Attributes - The secret management attributes. - Attributes *SecretAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // Kid - READ-ONLY; If this is a secret backing a KV certificate, then this field specifies the corresponding key backing the KV certificate. - Kid *string `json:"kid,omitempty"` - // Managed - READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a secret backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for SecretBundle. -func (sb SecretBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if sb.Value != nil { - objectMap["value"] = sb.Value - } - if sb.ID != nil { - objectMap["id"] = sb.ID - } - if sb.ContentType != nil { - objectMap["contentType"] = sb.ContentType - } - if sb.Attributes != nil { - objectMap["attributes"] = sb.Attributes - } - if sb.Tags != nil { - objectMap["tags"] = sb.Tags - } - return json.Marshal(objectMap) -} - -// SecretItem the secret item containing secret metadata. -type SecretItem struct { - // ID - Secret identifier. - ID *string `json:"id,omitempty"` - // Attributes - The secret management attributes. - Attributes *SecretAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // ContentType - Type of the secret value such as a password. - ContentType *string `json:"contentType,omitempty"` - // Managed - READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a key backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for SecretItem. -func (si SecretItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if si.ID != nil { - objectMap["id"] = si.ID - } - if si.Attributes != nil { - objectMap["attributes"] = si.Attributes - } - if si.Tags != nil { - objectMap["tags"] = si.Tags - } - if si.ContentType != nil { - objectMap["contentType"] = si.ContentType - } - return json.Marshal(objectMap) -} - -// SecretListResult the secret list result. -type SecretListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of secrets in the key vault along with a link to the next page of secrets. - Value *[]SecretItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of secrets. - NextLink *string `json:"nextLink,omitempty"` -} - -// SecretListResultIterator provides access to a complete listing of SecretItem values. -type SecretListResultIterator struct { - i int - page SecretListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *SecretListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/SecretListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *SecretListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter SecretListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter SecretListResultIterator) Response() SecretListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter SecretListResultIterator) Value() SecretItem { - if !iter.page.NotDone() { - return SecretItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the SecretListResultIterator type. -func NewSecretListResultIterator(page SecretListResultPage) SecretListResultIterator { - return SecretListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (slr SecretListResult) IsEmpty() bool { - return slr.Value == nil || len(*slr.Value) == 0 -} - -// secretListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (slr SecretListResult) secretListResultPreparer(ctx context.Context) (*http.Request, error) { - if slr.NextLink == nil || len(to.String(slr.NextLink)) < 1 { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(slr.NextLink))) -} - -// SecretListResultPage contains a page of SecretItem values. -type SecretListResultPage struct { - fn func(context.Context, SecretListResult) (SecretListResult, error) - slr SecretListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *SecretListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/SecretListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - next, err := page.fn(ctx, page.slr) - if err != nil { - return err - } - page.slr = next - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *SecretListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page SecretListResultPage) NotDone() bool { - return !page.slr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page SecretListResultPage) Response() SecretListResult { - return page.slr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page SecretListResultPage) Values() []SecretItem { - if page.slr.IsEmpty() { - return nil - } - return *page.slr.Value -} - -// Creates a new instance of the SecretListResultPage type. -func NewSecretListResultPage(getNextPage func(context.Context, SecretListResult) (SecretListResult, error)) SecretListResultPage { - return SecretListResultPage{fn: getNextPage} -} - -// SecretProperties properties of the key backing a certificate. -type SecretProperties struct { - // ContentType - The media type (MIME type). - ContentType *string `json:"contentType,omitempty"` -} - -// SecretRestoreParameters the secret restore parameters. -type SecretRestoreParameters struct { - // SecretBundleBackup - The backup blob associated with a secret bundle. (a URL-encoded base64 string) - SecretBundleBackup *string `json:"value,omitempty"` -} - -// SecretSetParameters the secret set parameters. -type SecretSetParameters struct { - // Value - The value of the secret. - Value *string `json:"value,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // ContentType - Type of the secret value such as a password. - ContentType *string `json:"contentType,omitempty"` - // SecretAttributes - The secret management attributes. - SecretAttributes *SecretAttributes `json:"attributes,omitempty"` -} - -// MarshalJSON is the custom marshaler for SecretSetParameters. -func (ssp SecretSetParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ssp.Value != nil { - objectMap["value"] = ssp.Value - } - if ssp.Tags != nil { - objectMap["tags"] = ssp.Tags - } - if ssp.ContentType != nil { - objectMap["contentType"] = ssp.ContentType - } - if ssp.SecretAttributes != nil { - objectMap["attributes"] = ssp.SecretAttributes - } - return json.Marshal(objectMap) -} - -// SecretUpdateParameters the secret update parameters. -type SecretUpdateParameters struct { - // ContentType - Type of the secret value such as a password. - ContentType *string `json:"contentType,omitempty"` - // SecretAttributes - The secret management attributes. - SecretAttributes *SecretAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for SecretUpdateParameters. -func (sup SecretUpdateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if sup.ContentType != nil { - objectMap["contentType"] = sup.ContentType - } - if sup.SecretAttributes != nil { - objectMap["attributes"] = sup.SecretAttributes - } - if sup.Tags != nil { - objectMap["tags"] = sup.Tags - } - return json.Marshal(objectMap) -} - -// StorageAccountAttributes the storage account management attributes. -type StorageAccountAttributes struct { - // Enabled - the enabled state of the object. - Enabled *bool `json:"enabled,omitempty"` - // Created - READ-ONLY; Creation time in UTC. - Created *date.UnixTime `json:"created,omitempty"` - // Updated - READ-ONLY; Last updated time in UTC. - Updated *date.UnixTime `json:"updated,omitempty"` -} - -// StorageAccountCreateParameters the storage account create parameters. -type StorageAccountCreateParameters struct { - // ResourceID - Storage account resource id. - ResourceID *string `json:"resourceId,omitempty"` - // ActiveKeyName - Current active storage account key name. - ActiveKeyName *string `json:"activeKeyName,omitempty"` - // AutoRegenerateKey - whether keyvault should manage the storage account for the user. - AutoRegenerateKey *bool `json:"autoRegenerateKey,omitempty"` - // RegenerationPeriod - The key regeneration time duration specified in ISO-8601 format. - RegenerationPeriod *string `json:"regenerationPeriod,omitempty"` - // StorageAccountAttributes - The attributes of the storage account. - StorageAccountAttributes *StorageAccountAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for StorageAccountCreateParameters. -func (sacp StorageAccountCreateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if sacp.ResourceID != nil { - objectMap["resourceId"] = sacp.ResourceID - } - if sacp.ActiveKeyName != nil { - objectMap["activeKeyName"] = sacp.ActiveKeyName - } - if sacp.AutoRegenerateKey != nil { - objectMap["autoRegenerateKey"] = sacp.AutoRegenerateKey - } - if sacp.RegenerationPeriod != nil { - objectMap["regenerationPeriod"] = sacp.RegenerationPeriod - } - if sacp.StorageAccountAttributes != nil { - objectMap["attributes"] = sacp.StorageAccountAttributes - } - if sacp.Tags != nil { - objectMap["tags"] = sacp.Tags - } - return json.Marshal(objectMap) -} - -// StorageAccountItem the storage account item containing storage account metadata. -type StorageAccountItem struct { - // ID - READ-ONLY; Storage identifier. - ID *string `json:"id,omitempty"` - // ResourceID - READ-ONLY; Storage account resource Id. - ResourceID *string `json:"resourceId,omitempty"` - // Attributes - READ-ONLY; The storage account management attributes. - Attributes *StorageAccountAttributes `json:"attributes,omitempty"` - // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for StorageAccountItem. -func (sai StorageAccountItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// StorageAccountRegenerteKeyParameters the storage account key regenerate parameters. -type StorageAccountRegenerteKeyParameters struct { - // KeyName - The storage account key name. - KeyName *string `json:"keyName,omitempty"` -} - -// StorageAccountUpdateParameters the storage account update parameters. -type StorageAccountUpdateParameters struct { - // ActiveKeyName - The current active storage account key name. - ActiveKeyName *string `json:"activeKeyName,omitempty"` - // AutoRegenerateKey - whether keyvault should manage the storage account for the user. - AutoRegenerateKey *bool `json:"autoRegenerateKey,omitempty"` - // RegenerationPeriod - The key regeneration time duration specified in ISO-8601 format. - RegenerationPeriod *string `json:"regenerationPeriod,omitempty"` - // StorageAccountAttributes - The attributes of the storage account. - StorageAccountAttributes *StorageAccountAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for StorageAccountUpdateParameters. -func (saup StorageAccountUpdateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if saup.ActiveKeyName != nil { - objectMap["activeKeyName"] = saup.ActiveKeyName - } - if saup.AutoRegenerateKey != nil { - objectMap["autoRegenerateKey"] = saup.AutoRegenerateKey - } - if saup.RegenerationPeriod != nil { - objectMap["regenerationPeriod"] = saup.RegenerationPeriod - } - if saup.StorageAccountAttributes != nil { - objectMap["attributes"] = saup.StorageAccountAttributes - } - if saup.Tags != nil { - objectMap["tags"] = saup.Tags - } - return json.Marshal(objectMap) -} - -// StorageBundle a Storage account bundle consists of key vault storage account details plus its -// attributes. -type StorageBundle struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; The storage account id. - ID *string `json:"id,omitempty"` - // ResourceID - READ-ONLY; The storage account resource id. - ResourceID *string `json:"resourceId,omitempty"` - // ActiveKeyName - READ-ONLY; The current active storage account key name. - ActiveKeyName *string `json:"activeKeyName,omitempty"` - // AutoRegenerateKey - READ-ONLY; whether keyvault should manage the storage account for the user. - AutoRegenerateKey *bool `json:"autoRegenerateKey,omitempty"` - // RegenerationPeriod - READ-ONLY; The key regeneration time duration specified in ISO-8601 format. - RegenerationPeriod *string `json:"regenerationPeriod,omitempty"` - // Attributes - READ-ONLY; The storage account attributes. - Attributes *StorageAccountAttributes `json:"attributes,omitempty"` - // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for StorageBundle. -func (sb StorageBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// StorageListResult the storage accounts list result. -type StorageListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of storage accounts in the key vault along with a link to the next page of storage accounts. - Value *[]StorageAccountItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of storage accounts. - NextLink *string `json:"nextLink,omitempty"` -} - -// StorageListResultIterator provides access to a complete listing of StorageAccountItem values. -type StorageListResultIterator struct { - i int - page StorageListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *StorageListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/StorageListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *StorageListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter StorageListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter StorageListResultIterator) Response() StorageListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter StorageListResultIterator) Value() StorageAccountItem { - if !iter.page.NotDone() { - return StorageAccountItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the StorageListResultIterator type. -func NewStorageListResultIterator(page StorageListResultPage) StorageListResultIterator { - return StorageListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (slr StorageListResult) IsEmpty() bool { - return slr.Value == nil || len(*slr.Value) == 0 -} - -// storageListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (slr StorageListResult) storageListResultPreparer(ctx context.Context) (*http.Request, error) { - if slr.NextLink == nil || len(to.String(slr.NextLink)) < 1 { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(slr.NextLink))) -} - -// StorageListResultPage contains a page of StorageAccountItem values. -type StorageListResultPage struct { - fn func(context.Context, StorageListResult) (StorageListResult, error) - slr StorageListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *StorageListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/StorageListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - next, err := page.fn(ctx, page.slr) - if err != nil { - return err - } - page.slr = next - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *StorageListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page StorageListResultPage) NotDone() bool { - return !page.slr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page StorageListResultPage) Response() StorageListResult { - return page.slr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page StorageListResultPage) Values() []StorageAccountItem { - if page.slr.IsEmpty() { - return nil - } - return *page.slr.Value -} - -// Creates a new instance of the StorageListResultPage type. -func NewStorageListResultPage(getNextPage func(context.Context, StorageListResult) (StorageListResult, error)) StorageListResultPage { - return StorageListResultPage{fn: getNextPage} -} - -// SubjectAlternativeNames the subject alternate names of a X509 object. -type SubjectAlternativeNames struct { - // Emails - Email addresses. - Emails *[]string `json:"emails,omitempty"` - // DNSNames - Domain names. - DNSNames *[]string `json:"dns_names,omitempty"` - // Upns - User principal names. - Upns *[]string `json:"upns,omitempty"` -} - -// Trigger a condition to be satisfied for an action to be executed. -type Trigger struct { - // LifetimePercentage - Percentage of lifetime at which to trigger. Value should be between 1 and 99. - LifetimePercentage *int32 `json:"lifetime_percentage,omitempty"` - // DaysBeforeExpiry - Days before expiry to attempt renewal. Value should be between 1 and validity_in_months multiplied by 27. If validity_in_months is 36, then value should be between 1 and 972 (36 * 27). - DaysBeforeExpiry *int32 `json:"days_before_expiry,omitempty"` -} - -// X509CertificateProperties properties of the X509 component of a certificate. -type X509CertificateProperties struct { - // Subject - The subject name. Should be a valid X509 distinguished Name. - Subject *string `json:"subject,omitempty"` - // Ekus - The enhanced key usage. - Ekus *[]string `json:"ekus,omitempty"` - // SubjectAlternativeNames - The subject alternative names. - SubjectAlternativeNames *SubjectAlternativeNames `json:"sans,omitempty"` - // KeyUsage - List of key usages. - KeyUsage *[]KeyUsageType `json:"key_usage,omitempty"` - // ValidityInMonths - The duration that the certificate is valid in months. - ValidityInMonths *int32 `json:"validity_months,omitempty"` -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault/version.go deleted file mode 100644 index fb6a25c73..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault/version.go +++ /dev/null @@ -1,30 +0,0 @@ -package keyvault - -import "github.com/Azure/azure-sdk-for-go/version" - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -// UserAgent returns the UserAgent string to use when sending http.Requests. -func UserAgent() string { - return "Azure-SDK-For-Go/" + version.Number + " keyvault/2016-10-01" -} - -// Version returns the semantic version (see http://semver.org) of the client. -func Version() string { - return version.Number -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/LICENSE deleted file mode 100644 index b9d6a27ea..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md deleted file mode 100644 index fec416a9c..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md +++ /dev/null @@ -1,292 +0,0 @@ -# Azure Active Directory authentication for Go - -This is a standalone package for authenticating with Azure Active -Directory from other Go libraries and applications, in particular the [Azure SDK -for Go](https://github.com/Azure/azure-sdk-for-go). - -Note: Despite the package's name it is not related to other "ADAL" libraries -maintained in the [github.com/AzureAD](https://github.com/AzureAD) org. Issues -should be opened in [this repo's](https://github.com/Azure/go-autorest/issues) -or [the SDK's](https://github.com/Azure/azure-sdk-for-go/issues) issue -trackers. - -## Install - -```bash -go get -u github.com/Azure/go-autorest/autorest/adal -``` - -## Usage - -An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). - -### Register an Azure AD Application with secret - - -1. Register a new application with a `secret` credential - - ``` - az ad app create \ - --display-name example-app \ - --homepage https://example-app/home \ - --identifier-uris https://example-app/app \ - --password secret - ``` - -2. Create a service principal using the `Application ID` from previous step - - ``` - az ad sp create --id "Application ID" - ``` - - * Replace `Application ID` with `appId` from step 1. - -### Register an Azure AD Application with certificate - -1. Create a private key - - ``` - openssl genrsa -out "example-app.key" 2048 - ``` - -2. Create the certificate - - ``` - openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr" - openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000 - ``` - -3. Create the PKCS12 version of the certificate containing also the private key - - ``` - openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass: - - ``` - -4. Register a new application with the certificate content form `example-app.crt` - - ``` - certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)" - - az ad app create \ - --display-name example-app \ - --homepage https://example-app/home \ - --identifier-uris https://example-app/app \ - --key-usage Verify --end-date 2018-01-01 \ - --key-value "${certificateContents}" - ``` - -5. Create a service principal using the `Application ID` from previous step - - ``` - az ad sp create --id "APPLICATION_ID" - ``` - - * Replace `APPLICATION_ID` with `appId` from step 4. - - -### Grant the necessary permissions - -Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained -level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles) -which can be assigned to a service principal of an Azure AD application depending of your needs. - -``` -az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME" -``` - -* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step. -* Replace the `ROLE_NAME` with a role name of your choice. - -It is also possible to define custom role definitions. - -``` -az role definition create --role-definition role-definition.json -``` - -* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file. - - -### Acquire Access Token - -The common configuration used by all flows: - -```Go -const activeDirectoryEndpoint = "https://login.microsoftonline.com/" -tenantID := "TENANT_ID" -oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) - -applicationID := "APPLICATION_ID" - -callback := func(token adal.Token) error { - // This is called after the token is acquired -} - -// The resource for which the token is acquired -resource := "https://management.core.windows.net/" -``` - -* Replace the `TENANT_ID` with your tenant ID. -* Replace the `APPLICATION_ID` with the value from previous section. - -#### Client Credentials - -```Go -applicationSecret := "APPLICATION_SECRET" - -spt, err := adal.NewServicePrincipalToken( - *oauthConfig, - appliationID, - applicationSecret, - resource, - callbacks...) -if err != nil { - return nil, err -} - -// Acquire a new access token -err = spt.Refresh() -if (err == nil) { - token := spt.Token -} -``` - -* Replace the `APPLICATION_SECRET` with the `password` value from previous section. - -#### Client Certificate - -```Go -certificatePath := "./example-app.pfx" - -certData, err := ioutil.ReadFile(certificatePath) -if err != nil { - return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) -} - -// Get the certificate and private key from pfx file -certificate, rsaPrivateKey, err := decodePkcs12(certData, "") -if err != nil { - return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) -} - -spt, err := adal.NewServicePrincipalTokenFromCertificate( - *oauthConfig, - applicationID, - certificate, - rsaPrivateKey, - resource, - callbacks...) - -// Acquire a new access token -err = spt.Refresh() -if (err == nil) { - token := spt.Token -} -``` - -* Update the certificate path to point to the example-app.pfx file which was created in previous section. - - -#### Device Code - -```Go -oauthClient := &http.Client{} - -// Acquire the device code -deviceCode, err := adal.InitiateDeviceAuth( - oauthClient, - *oauthConfig, - applicationID, - resource) -if err != nil { - return nil, fmt.Errorf("Failed to start device auth flow: %s", err) -} - -// Display the authentication message -fmt.Println(*deviceCode.Message) - -// Wait here until the user is authenticated -token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) -if err != nil { - return nil, fmt.Errorf("Failed to finish device auth flow: %s", err) -} - -spt, err := adal.NewServicePrincipalTokenFromManualToken( - *oauthConfig, - applicationID, - resource, - *token, - callbacks...) - -if (err == nil) { - token := spt.Token -} -``` - -#### Username password authenticate - -```Go -spt, err := adal.NewServicePrincipalTokenFromUsernamePassword( - *oauthConfig, - applicationID, - username, - password, - resource, - callbacks...) - -if (err == nil) { - token := spt.Token -} -``` - -#### Authorization code authenticate - -``` Go -spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode( - *oauthConfig, - applicationID, - clientSecret, - authorizationCode, - redirectURI, - resource, - callbacks...) - -err = spt.Refresh() -if (err == nil) { - token := spt.Token -} -``` - -### Command Line Tool - -A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above. - -``` -adal -h - -Usage of ./adal: - -applicationId string - application id - -certificatePath string - path to pk12/PFC application certificate - -mode string - authentication mode (device, secret, cert, refresh) (default "device") - -resource string - resource for which the token is requested - -secret string - application secret - -tenantId string - tenant id - -tokenCachePath string - location of oath token cache (default "/home/cgc/.adal/accessToken.json") -``` - -Example acquire a token for `https://management.core.windows.net/` using device code flow: - -``` -adal -mode device \ - -applicationId "APPLICATION_ID" \ - -tenantId "TENANT_ID" \ - -resource https://management.core.windows.net/ - -``` diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go deleted file mode 100644 index fa5964742..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go +++ /dev/null @@ -1,151 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "errors" - "fmt" - "net/url" -) - -const ( - activeDirectoryEndpointTemplate = "%s/oauth2/%s%s" -) - -// OAuthConfig represents the endpoints needed -// in OAuth operations -type OAuthConfig struct { - AuthorityEndpoint url.URL `json:"authorityEndpoint"` - AuthorizeEndpoint url.URL `json:"authorizeEndpoint"` - TokenEndpoint url.URL `json:"tokenEndpoint"` - DeviceCodeEndpoint url.URL `json:"deviceCodeEndpoint"` -} - -// IsZero returns true if the OAuthConfig object is zero-initialized. -func (oac OAuthConfig) IsZero() bool { - return oac == OAuthConfig{} -} - -func validateStringParam(param, name string) error { - if len(param) == 0 { - return fmt.Errorf("parameter '" + name + "' cannot be empty") - } - return nil -} - -// NewOAuthConfig returns an OAuthConfig with tenant specific urls -func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { - apiVer := "1.0" - return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer) -} - -// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls. -// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value. -func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) { - if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil { - return nil, err - } - api := "" - // it's legal for tenantID to be empty so don't validate it - if apiVersion != nil { - if err := validateStringParam(*apiVersion, "apiVersion"); err != nil { - return nil, err - } - api = fmt.Sprintf("?api-version=%s", *apiVersion) - } - u, err := url.Parse(activeDirectoryEndpoint) - if err != nil { - return nil, err - } - authorityURL, err := u.Parse(tenantID) - if err != nil { - return nil, err - } - authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api)) - if err != nil { - return nil, err - } - tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api)) - if err != nil { - return nil, err - } - deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api)) - if err != nil { - return nil, err - } - - return &OAuthConfig{ - AuthorityEndpoint: *authorityURL, - AuthorizeEndpoint: *authorizeURL, - TokenEndpoint: *tokenURL, - DeviceCodeEndpoint: *deviceCodeURL, - }, nil -} - -// MultiTenantOAuthConfig provides endpoints for primary and aulixiary tenant IDs. -type MultiTenantOAuthConfig interface { - PrimaryTenant() *OAuthConfig - AuxiliaryTenants() []*OAuthConfig -} - -// OAuthOptions contains optional OAuthConfig creation arguments. -type OAuthOptions struct { - APIVersion string -} - -func (c OAuthOptions) apiVersion() string { - if c.APIVersion != "" { - return fmt.Sprintf("?api-version=%s", c.APIVersion) - } - return "1.0" -} - -// NewMultiTenantOAuthConfig creates an object that support multitenant OAuth configuration. -// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/authenticate-multi-tenant for more information. -func NewMultiTenantOAuthConfig(activeDirectoryEndpoint, primaryTenantID string, auxiliaryTenantIDs []string, options OAuthOptions) (MultiTenantOAuthConfig, error) { - if len(auxiliaryTenantIDs) == 0 || len(auxiliaryTenantIDs) > 3 { - return nil, errors.New("must specify one to three auxiliary tenants") - } - mtCfg := multiTenantOAuthConfig{ - cfgs: make([]*OAuthConfig, len(auxiliaryTenantIDs)+1), - } - apiVer := options.apiVersion() - pri, err := NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, primaryTenantID, &apiVer) - if err != nil { - return nil, fmt.Errorf("failed to create OAuthConfig for primary tenant: %v", err) - } - mtCfg.cfgs[0] = pri - for i := range auxiliaryTenantIDs { - aux, err := NewOAuthConfig(activeDirectoryEndpoint, auxiliaryTenantIDs[i]) - if err != nil { - return nil, fmt.Errorf("failed to create OAuthConfig for tenant '%s': %v", auxiliaryTenantIDs[i], err) - } - mtCfg.cfgs[i+1] = aux - } - return mtCfg, nil -} - -type multiTenantOAuthConfig struct { - // first config in the slice is the primary tenant - cfgs []*OAuthConfig -} - -func (m multiTenantOAuthConfig) PrimaryTenant() *OAuthConfig { - return m.cfgs[0] -} - -func (m multiTenantOAuthConfig) AuxiliaryTenants() []*OAuthConfig { - return m.cfgs[1:] -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go deleted file mode 100644 index 914f8af5e..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go +++ /dev/null @@ -1,269 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* - This file is largely based on rjw57/oauth2device's code, with the follow differences: - * scope -> resource, and only allow a single one - * receive "Message" in the DeviceCode struct and show it to users as the prompt - * azure-xplat-cli has the following behavior that this emulates: - - does not send client_secret during the token exchange - - sends resource again in the token exchange request -*/ - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" - "time" -) - -const ( - logPrefix = "autorest/adal/devicetoken:" -) - -var ( - // ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow - ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix) - - // ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow - ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix) - - // ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow - ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix) - - // ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow - ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix) - - // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow - ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) - - // ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow - ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix) - - // ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow - ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix) - - errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" - errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" - errTokenSendingFails = "Error occurred while sending request with device code for a token" - errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" - errStatusNotOK = "Error HTTP status != 200" -) - -// DeviceCode is the object returned by the device auth endpoint -// It contains information to instruct the user to complete the auth flow -type DeviceCode struct { - DeviceCode *string `json:"device_code,omitempty"` - UserCode *string `json:"user_code,omitempty"` - VerificationURL *string `json:"verification_url,omitempty"` - ExpiresIn *int64 `json:"expires_in,string,omitempty"` - Interval *int64 `json:"interval,string,omitempty"` - - Message *string `json:"message"` // Azure specific - Resource string // store the following, stored when initiating, used when exchanging - OAuthConfig OAuthConfig - ClientID string -} - -// TokenError is the object returned by the token exchange endpoint -// when something is amiss -type TokenError struct { - Error *string `json:"error,omitempty"` - ErrorCodes []int `json:"error_codes,omitempty"` - ErrorDescription *string `json:"error_description,omitempty"` - Timestamp *string `json:"timestamp,omitempty"` - TraceID *string `json:"trace_id,omitempty"` -} - -// DeviceToken is the object return by the token exchange endpoint -// It can either look like a Token or an ErrorToken, so put both here -// and check for presence of "Error" to know if we are in error state -type deviceToken struct { - Token - TokenError -} - -// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode -// that can be used with CheckForUserCompletion or WaitForUserCompletion. -// Deprecated: use InitiateDeviceAuthWithContext() instead. -func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { - return InitiateDeviceAuthWithContext(context.Background(), sender, oauthConfig, clientID, resource) -} - -// InitiateDeviceAuthWithContext initiates a device auth flow. It returns a DeviceCode -// that can be used with CheckForUserCompletion or WaitForUserCompletion. -func InitiateDeviceAuthWithContext(ctx context.Context, sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { - v := url.Values{ - "client_id": []string{clientID}, - "resource": []string{resource}, - } - - s := v.Encode() - body := ioutil.NopCloser(strings.NewReader(s)) - - req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) - } - - req.ContentLength = int64(len(s)) - req.Header.Set(contentType, mimeTypeFormPost) - resp, err := sender.Do(req.WithContext(ctx)) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) - } - defer resp.Body.Close() - - rb, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) - } - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK) - } - - if len(strings.Trim(string(rb), " ")) == 0 { - return nil, ErrDeviceCodeEmpty - } - - var code DeviceCode - err = json.Unmarshal(rb, &code) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) - } - - code.ClientID = clientID - code.Resource = resource - code.OAuthConfig = oauthConfig - - return &code, nil -} - -// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint -// to see if the device flow has: been completed, timed out, or otherwise failed -// Deprecated: use CheckForUserCompletionWithContext() instead. -func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { - return CheckForUserCompletionWithContext(context.Background(), sender, code) -} - -// CheckForUserCompletionWithContext takes a DeviceCode and checks with the Azure AD OAuth endpoint -// to see if the device flow has: been completed, timed out, or otherwise failed -func CheckForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) { - v := url.Values{ - "client_id": []string{code.ClientID}, - "code": []string{*code.DeviceCode}, - "grant_type": []string{OAuthGrantTypeDeviceCode}, - "resource": []string{code.Resource}, - } - - s := v.Encode() - body := ioutil.NopCloser(strings.NewReader(s)) - - req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) - } - - req.ContentLength = int64(len(s)) - req.Header.Set(contentType, mimeTypeFormPost) - resp, err := sender.Do(req.WithContext(ctx)) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) - } - defer resp.Body.Close() - - rb, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) - } - - if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK) - } - if len(strings.Trim(string(rb), " ")) == 0 { - return nil, ErrOAuthTokenEmpty - } - - var token deviceToken - err = json.Unmarshal(rb, &token) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) - } - - if token.Error == nil { - return &token.Token, nil - } - - switch *token.Error { - case "authorization_pending": - return nil, ErrDeviceAuthorizationPending - case "slow_down": - return nil, ErrDeviceSlowDown - case "access_denied": - return nil, ErrDeviceAccessDenied - case "code_expired": - return nil, ErrDeviceCodeExpired - default: - return nil, ErrDeviceGeneric - } -} - -// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs. -// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. -// Deprecated: use WaitForUserCompletionWithContext() instead. -func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { - return WaitForUserCompletionWithContext(context.Background(), sender, code) -} - -// WaitForUserCompletionWithContext calls CheckForUserCompletion repeatedly until a token is granted or an error -// state occurs. This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. -func WaitForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) { - intervalDuration := time.Duration(*code.Interval) * time.Second - waitDuration := intervalDuration - - for { - token, err := CheckForUserCompletionWithContext(ctx, sender, code) - - if err == nil { - return token, nil - } - - switch err { - case ErrDeviceSlowDown: - waitDuration += waitDuration - case ErrDeviceAuthorizationPending: - // noop - default: // everything else is "fatal" to us - return nil, err - } - - if waitDuration > (intervalDuration * 3) { - return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix) - } - - select { - case <-time.After(waitDuration): - // noop - case <-ctx.Done(): - return nil, ctx.Err() - } - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go deleted file mode 100644 index 28a4bfc4c..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build modhack - -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of -// the resultant binary. - -// Necessary for safely adding multi-module repo. -// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest/autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go deleted file mode 100644 index 9e15f2751..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go +++ /dev/null @@ -1,73 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" -) - -// LoadToken restores a Token object from a file located at 'path'. -func LoadToken(path string) (*Token, error) { - file, err := os.Open(path) - if err != nil { - return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) - } - defer file.Close() - - var token Token - - dec := json.NewDecoder(file) - if err = dec.Decode(&token); err != nil { - return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err) - } - return &token, nil -} - -// SaveToken persists an oauth token at the given location on disk. -// It moves the new file into place so it can safely be used to replace an existing file -// that maybe accessed by multiple processes. -func SaveToken(path string, mode os.FileMode, token Token) error { - dir := filepath.Dir(path) - err := os.MkdirAll(dir, os.ModePerm) - if err != nil { - return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err) - } - - newFile, err := ioutil.TempFile(dir, "token") - if err != nil { - return fmt.Errorf("failed to create the temp file to write the token: %v", err) - } - tempPath := newFile.Name() - - if err := json.NewEncoder(newFile).Encode(token); err != nil { - return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err) - } - if err := newFile.Close(); err != nil { - return fmt.Errorf("failed to close temp file %s: %v", tempPath, err) - } - - // Atomic replace to avoid multi-writer file corruptions - if err := os.Rename(tempPath, path); err != nil { - return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err) - } - if err := os.Chmod(path, mode); err != nil { - return fmt.Errorf("failed to chmod the token file %s: %v", path, err) - } - return nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go deleted file mode 100644 index d7e4372bb..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go +++ /dev/null @@ -1,95 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "crypto/tls" - "net/http" - "net/http/cookiejar" - "sync" - - "github.com/Azure/go-autorest/tracing" -) - -const ( - contentType = "Content-Type" - mimeTypeFormPost = "application/x-www-form-urlencoded" -) - -var defaultSender Sender -var defaultSenderInit = &sync.Once{} - -// Sender is the interface that wraps the Do method to send HTTP requests. -// -// The standard http.Client conforms to this interface. -type Sender interface { - Do(*http.Request) (*http.Response, error) -} - -// SenderFunc is a method that implements the Sender interface. -type SenderFunc func(*http.Request) (*http.Response, error) - -// Do implements the Sender interface on SenderFunc. -func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { - return sf(r) -} - -// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the -// http.Request and pass it along or, first, pass the http.Request along then react to the -// http.Response result. -type SendDecorator func(Sender) Sender - -// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. -func CreateSender(decorators ...SendDecorator) Sender { - return DecorateSender(sender(), decorators...) -} - -// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to -// the Sender. Decorators are applied in the order received, but their affect upon the request -// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a -// post-decorator (pass the http.Request along and react to the results in http.Response). -func DecorateSender(s Sender, decorators ...SendDecorator) Sender { - for _, decorate := range decorators { - s = decorate(s) - } - return s -} - -func sender() Sender { - // note that we can't init defaultSender in init() since it will - // execute before calling code has had a chance to enable tracing - defaultSenderInit.Do(func() { - // Use behaviour compatible with DefaultTransport, but require TLS minimum version. - defaultTransport := http.DefaultTransport.(*http.Transport) - transport := &http.Transport{ - Proxy: defaultTransport.Proxy, - DialContext: defaultTransport.DialContext, - MaxIdleConns: defaultTransport.MaxIdleConns, - IdleConnTimeout: defaultTransport.IdleConnTimeout, - TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout, - ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, - TLSClientConfig: &tls.Config{ - MinVersion: tls.VersionTLS12, - }, - } - var roundTripper http.RoundTripper = transport - if tracing.IsEnabled() { - roundTripper = tracing.NewTransport(transport) - } - j, _ := cookiejar.New(nil) - defaultSender = &http.Client{Jar: j, Transport: roundTripper} - }) - return defaultSender -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go deleted file mode 100644 index 7c7fca371..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go +++ /dev/null @@ -1,1112 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "context" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/x509" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "math" - "net/http" - "net/url" - "os" - "strings" - "sync" - "time" - - "github.com/Azure/go-autorest/autorest/date" - "github.com/dgrijalva/jwt-go" -) - -const ( - defaultRefresh = 5 * time.Minute - - // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow - OAuthGrantTypeDeviceCode = "device_code" - - // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows - OAuthGrantTypeClientCredentials = "client_credentials" - - // OAuthGrantTypeUserPass is the "grant_type" identifier used in username and password auth flows - OAuthGrantTypeUserPass = "password" - - // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows - OAuthGrantTypeRefreshToken = "refresh_token" - - // OAuthGrantTypeAuthorizationCode is the "grant_type" identifier used in authorization code flows - OAuthGrantTypeAuthorizationCode = "authorization_code" - - // metadataHeader is the header required by MSI extension - metadataHeader = "Metadata" - - // msiEndpoint is the well known endpoint for getting MSI authentications tokens - msiEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" - - // the default number of attempts to refresh an MSI authentication token - defaultMaxMSIRefreshAttempts = 5 - - // asMSIEndpointEnv is the environment variable used to store the endpoint on App Service and Functions - asMSIEndpointEnv = "MSI_ENDPOINT" - - // asMSISecretEnv is the environment variable used to store the request secret on App Service and Functions - asMSISecretEnv = "MSI_SECRET" -) - -// OAuthTokenProvider is an interface which should be implemented by an access token retriever -type OAuthTokenProvider interface { - OAuthToken() string -} - -// MultitenantOAuthTokenProvider provides tokens used for multi-tenant authorization. -type MultitenantOAuthTokenProvider interface { - PrimaryOAuthToken() string - AuxiliaryOAuthTokens() []string -} - -// TokenRefreshError is an interface used by errors returned during token refresh. -type TokenRefreshError interface { - error - Response() *http.Response -} - -// Refresher is an interface for token refresh functionality -type Refresher interface { - Refresh() error - RefreshExchange(resource string) error - EnsureFresh() error -} - -// RefresherWithContext is an interface for token refresh functionality -type RefresherWithContext interface { - RefreshWithContext(ctx context.Context) error - RefreshExchangeWithContext(ctx context.Context, resource string) error - EnsureFreshWithContext(ctx context.Context) error -} - -// TokenRefreshCallback is the type representing callbacks that will be called after -// a successful token refresh -type TokenRefreshCallback func(Token) error - -// Token encapsulates the access token used to authorize Azure requests. -// https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response -type Token struct { - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - - ExpiresIn json.Number `json:"expires_in"` - ExpiresOn json.Number `json:"expires_on"` - NotBefore json.Number `json:"not_before"` - - Resource string `json:"resource"` - Type string `json:"token_type"` -} - -func newToken() Token { - return Token{ - ExpiresIn: "0", - ExpiresOn: "0", - NotBefore: "0", - } -} - -// IsZero returns true if the token object is zero-initialized. -func (t Token) IsZero() bool { - return t == Token{} -} - -// Expires returns the time.Time when the Token expires. -func (t Token) Expires() time.Time { - s, err := t.ExpiresOn.Float64() - if err != nil { - s = -3600 - } - - expiration := date.NewUnixTimeFromSeconds(s) - - return time.Time(expiration).UTC() -} - -// IsExpired returns true if the Token is expired, false otherwise. -func (t Token) IsExpired() bool { - return t.WillExpireIn(0) -} - -// WillExpireIn returns true if the Token will expire after the passed time.Duration interval -// from now, false otherwise. -func (t Token) WillExpireIn(d time.Duration) bool { - return !t.Expires().After(time.Now().Add(d)) -} - -//OAuthToken return the current access token -func (t *Token) OAuthToken() string { - return t.AccessToken -} - -// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form -// that is submitted when acquiring an oAuth token. -type ServicePrincipalSecret interface { - SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error -} - -// ServicePrincipalNoSecret represents a secret type that contains no secret -// meaning it is not valid for fetching a fresh token. This is used by Manual -type ServicePrincipalNoSecret struct { -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret -// It only returns an error for the ServicePrincipalNoSecret type -func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token") -} - -// MarshalJSON implements the json.Marshaler interface. -func (noSecret ServicePrincipalNoSecret) MarshalJSON() ([]byte, error) { - type tokenType struct { - Type string `json:"type"` - } - return json.Marshal(tokenType{ - Type: "ServicePrincipalNoSecret", - }) -} - -// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. -type ServicePrincipalTokenSecret struct { - ClientSecret string `json:"value"` -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -// It will populate the form submitted during oAuth Token Acquisition using the client_secret. -func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - v.Set("client_secret", tokenSecret.ClientSecret) - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (tokenSecret ServicePrincipalTokenSecret) MarshalJSON() ([]byte, error) { - type tokenType struct { - Type string `json:"type"` - Value string `json:"value"` - } - return json.Marshal(tokenType{ - Type: "ServicePrincipalTokenSecret", - Value: tokenSecret.ClientSecret, - }) -} - -// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. -type ServicePrincipalCertificateSecret struct { - Certificate *x509.Certificate - PrivateKey *rsa.PrivateKey -} - -// SignJwt returns the JWT signed with the certificate's private key. -func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { - hasher := sha1.New() - _, err := hasher.Write(secret.Certificate.Raw) - if err != nil { - return "", err - } - - thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) - - // The jti (JWT ID) claim provides a unique identifier for the JWT. - jti := make([]byte, 20) - _, err = rand.Read(jti) - if err != nil { - return "", err - } - - token := jwt.New(jwt.SigningMethodRS256) - token.Header["x5t"] = thumbprint - x5c := []string{base64.StdEncoding.EncodeToString(secret.Certificate.Raw)} - token.Header["x5c"] = x5c - token.Claims = jwt.MapClaims{ - "aud": spt.inner.OauthConfig.TokenEndpoint.String(), - "iss": spt.inner.ClientID, - "sub": spt.inner.ClientID, - "jti": base64.URLEncoding.EncodeToString(jti), - "nbf": time.Now().Unix(), - "exp": time.Now().Add(time.Hour * 24).Unix(), - } - - signedString, err := token.SignedString(secret.PrivateKey) - return signedString, err -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. -func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - jwt, err := secret.SignJwt(spt) - if err != nil { - return err - } - - v.Set("client_assertion", jwt) - v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (secret ServicePrincipalCertificateSecret) MarshalJSON() ([]byte, error) { - return nil, errors.New("marshalling ServicePrincipalCertificateSecret is not supported") -} - -// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension. -type ServicePrincipalMSISecret struct { -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (msiSecret ServicePrincipalMSISecret) MarshalJSON() ([]byte, error) { - return nil, errors.New("marshalling ServicePrincipalMSISecret is not supported") -} - -// ServicePrincipalUsernamePasswordSecret implements ServicePrincipalSecret for username and password auth. -type ServicePrincipalUsernamePasswordSecret struct { - Username string `json:"username"` - Password string `json:"password"` -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -func (secret *ServicePrincipalUsernamePasswordSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - v.Set("username", secret.Username) - v.Set("password", secret.Password) - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (secret ServicePrincipalUsernamePasswordSecret) MarshalJSON() ([]byte, error) { - type tokenType struct { - Type string `json:"type"` - Username string `json:"username"` - Password string `json:"password"` - } - return json.Marshal(tokenType{ - Type: "ServicePrincipalUsernamePasswordSecret", - Username: secret.Username, - Password: secret.Password, - }) -} - -// ServicePrincipalAuthorizationCodeSecret implements ServicePrincipalSecret for authorization code auth. -type ServicePrincipalAuthorizationCodeSecret struct { - ClientSecret string `json:"value"` - AuthorizationCode string `json:"authCode"` - RedirectURI string `json:"redirect"` -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -func (secret *ServicePrincipalAuthorizationCodeSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - v.Set("code", secret.AuthorizationCode) - v.Set("client_secret", secret.ClientSecret) - v.Set("redirect_uri", secret.RedirectURI) - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (secret ServicePrincipalAuthorizationCodeSecret) MarshalJSON() ([]byte, error) { - type tokenType struct { - Type string `json:"type"` - Value string `json:"value"` - AuthCode string `json:"authCode"` - Redirect string `json:"redirect"` - } - return json.Marshal(tokenType{ - Type: "ServicePrincipalAuthorizationCodeSecret", - Value: secret.ClientSecret, - AuthCode: secret.AuthorizationCode, - Redirect: secret.RedirectURI, - }) -} - -// ServicePrincipalToken encapsulates a Token created for a Service Principal. -type ServicePrincipalToken struct { - inner servicePrincipalToken - refreshLock *sync.RWMutex - sender Sender - refreshCallbacks []TokenRefreshCallback - // MaxMSIRefreshAttempts is the maximum number of attempts to refresh an MSI token. - MaxMSIRefreshAttempts int -} - -// MarshalTokenJSON returns the marshalled inner token. -func (spt ServicePrincipalToken) MarshalTokenJSON() ([]byte, error) { - return json.Marshal(spt.inner.Token) -} - -// SetRefreshCallbacks replaces any existing refresh callbacks with the specified callbacks. -func (spt *ServicePrincipalToken) SetRefreshCallbacks(callbacks []TokenRefreshCallback) { - spt.refreshCallbacks = callbacks -} - -// MarshalJSON implements the json.Marshaler interface. -func (spt ServicePrincipalToken) MarshalJSON() ([]byte, error) { - return json.Marshal(spt.inner) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error { - // need to determine the token type - raw := map[string]interface{}{} - err := json.Unmarshal(data, &raw) - if err != nil { - return err - } - secret := raw["secret"].(map[string]interface{}) - switch secret["type"] { - case "ServicePrincipalNoSecret": - spt.inner.Secret = &ServicePrincipalNoSecret{} - case "ServicePrincipalTokenSecret": - spt.inner.Secret = &ServicePrincipalTokenSecret{} - case "ServicePrincipalCertificateSecret": - return errors.New("unmarshalling ServicePrincipalCertificateSecret is not supported") - case "ServicePrincipalMSISecret": - return errors.New("unmarshalling ServicePrincipalMSISecret is not supported") - case "ServicePrincipalUsernamePasswordSecret": - spt.inner.Secret = &ServicePrincipalUsernamePasswordSecret{} - case "ServicePrincipalAuthorizationCodeSecret": - spt.inner.Secret = &ServicePrincipalAuthorizationCodeSecret{} - default: - return fmt.Errorf("unrecognized token type '%s'", secret["type"]) - } - err = json.Unmarshal(data, &spt.inner) - if err != nil { - return err - } - // Don't override the refreshLock or the sender if those have been already set. - if spt.refreshLock == nil { - spt.refreshLock = &sync.RWMutex{} - } - if spt.sender == nil { - spt.sender = sender() - } - return nil -} - -// internal type used for marshalling/unmarshalling -type servicePrincipalToken struct { - Token Token `json:"token"` - Secret ServicePrincipalSecret `json:"secret"` - OauthConfig OAuthConfig `json:"oauth"` - ClientID string `json:"clientID"` - Resource string `json:"resource"` - AutoRefresh bool `json:"autoRefresh"` - RefreshWithin time.Duration `json:"refreshWithin"` -} - -func validateOAuthConfig(oac OAuthConfig) error { - if oac.IsZero() { - return fmt.Errorf("parameter 'oauthConfig' cannot be zero-initialized") - } - return nil -} - -// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. -func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(id, "id"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if secret == nil { - return nil, fmt.Errorf("parameter 'secret' cannot be nil") - } - spt := &ServicePrincipalToken{ - inner: servicePrincipalToken{ - Token: newToken(), - OauthConfig: oauthConfig, - Secret: secret, - ClientID: id, - Resource: resource, - AutoRefresh: true, - RefreshWithin: defaultRefresh, - }, - refreshLock: &sync.RWMutex{}, - sender: sender(), - refreshCallbacks: callbacks, - } - return spt, nil -} - -// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token -func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if token.IsZero() { - return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") - } - spt, err := NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalNoSecret{}, - callbacks...) - if err != nil { - return nil, err - } - - spt.inner.Token = token - - return spt, nil -} - -// NewServicePrincipalTokenFromManualTokenSecret creates a ServicePrincipalToken using the supplied token and secret -func NewServicePrincipalTokenFromManualTokenSecret(oauthConfig OAuthConfig, clientID string, resource string, token Token, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if secret == nil { - return nil, fmt.Errorf("parameter 'secret' cannot be nil") - } - if token.IsZero() { - return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") - } - spt, err := NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - secret, - callbacks...) - if err != nil { - return nil, err - } - - spt.inner.Token = token - - return spt, nil -} - -// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal -// credentials scoped to the named resource. -func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(secret, "secret"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalTokenSecret{ - ClientSecret: secret, - }, - callbacks..., - ) -} - -// NewServicePrincipalTokenFromCertificate creates a ServicePrincipalToken from the supplied pkcs12 bytes. -func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if certificate == nil { - return nil, fmt.Errorf("parameter 'certificate' cannot be nil") - } - if privateKey == nil { - return nil, fmt.Errorf("parameter 'privateKey' cannot be nil") - } - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalCertificateSecret{ - PrivateKey: privateKey, - Certificate: certificate, - }, - callbacks..., - ) -} - -// NewServicePrincipalTokenFromUsernamePassword creates a ServicePrincipalToken from the username and password. -func NewServicePrincipalTokenFromUsernamePassword(oauthConfig OAuthConfig, clientID string, username string, password string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(username, "username"); err != nil { - return nil, err - } - if err := validateStringParam(password, "password"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalUsernamePasswordSecret{ - Username: username, - Password: password, - }, - callbacks..., - ) -} - -// NewServicePrincipalTokenFromAuthorizationCode creates a ServicePrincipalToken from the -func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clientID string, clientSecret string, authorizationCode string, redirectURI string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(clientSecret, "clientSecret"); err != nil { - return nil, err - } - if err := validateStringParam(authorizationCode, "authorizationCode"); err != nil { - return nil, err - } - if err := validateStringParam(redirectURI, "redirectURI"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalAuthorizationCodeSecret{ - ClientSecret: clientSecret, - AuthorizationCode: authorizationCode, - RedirectURI: redirectURI, - }, - callbacks..., - ) -} - -// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. -func GetMSIVMEndpoint() (string, error) { - return msiEndpoint, nil -} - -func isAppService() bool { - _, asMSIEndpointEnvExists := os.LookupEnv(asMSIEndpointEnv) - _, asMSISecretEnvExists := os.LookupEnv(asMSISecretEnv) - - return asMSIEndpointEnvExists && asMSISecretEnvExists -} - -// GetMSIAppServiceEndpoint get the MSI endpoint for App Service and Functions -func GetMSIAppServiceEndpoint() (string, error) { - asMSIEndpoint, asMSIEndpointEnvExists := os.LookupEnv(asMSIEndpointEnv) - - if asMSIEndpointEnvExists { - return asMSIEndpoint, nil - } - return "", errors.New("MSI endpoint not found") -} - -// GetMSIEndpoint get the appropriate MSI endpoint depending on the runtime environment -func GetMSIEndpoint() (string, error) { - if isAppService() { - return GetMSIAppServiceEndpoint() - } - return GetMSIVMEndpoint() -} - -// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. -// It will use the system assigned identity when creating the token. -func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, callbacks...) -} - -// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension. -// It will use the specified user assigned identity when creating the token. -func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - return newServicePrincipalTokenFromMSI(msiEndpoint, resource, &userAssignedID, callbacks...) -} - -func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedID *string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateStringParam(msiEndpoint, "msiEndpoint"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if userAssignedID != nil { - if err := validateStringParam(*userAssignedID, "userAssignedID"); err != nil { - return nil, err - } - } - // We set the oauth config token endpoint to be MSI's endpoint - msiEndpointURL, err := url.Parse(msiEndpoint) - if err != nil { - return nil, err - } - - v := url.Values{} - v.Set("resource", resource) - // App Service MSI currently only supports token API version 2017-09-01 - if isAppService() { - v.Set("api-version", "2017-09-01") - } else { - v.Set("api-version", "2018-02-01") - } - if userAssignedID != nil { - v.Set("client_id", *userAssignedID) - } - msiEndpointURL.RawQuery = v.Encode() - - spt := &ServicePrincipalToken{ - inner: servicePrincipalToken{ - Token: newToken(), - OauthConfig: OAuthConfig{ - TokenEndpoint: *msiEndpointURL, - }, - Secret: &ServicePrincipalMSISecret{}, - Resource: resource, - AutoRefresh: true, - RefreshWithin: defaultRefresh, - }, - refreshLock: &sync.RWMutex{}, - sender: sender(), - refreshCallbacks: callbacks, - MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts, - } - - if userAssignedID != nil { - spt.inner.ClientID = *userAssignedID - } - - return spt, nil -} - -// internal type that implements TokenRefreshError -type tokenRefreshError struct { - message string - resp *http.Response -} - -// Error implements the error interface which is part of the TokenRefreshError interface. -func (tre tokenRefreshError) Error() string { - return tre.message -} - -// Response implements the TokenRefreshError interface, it returns the raw HTTP response from the refresh operation. -func (tre tokenRefreshError) Response() *http.Response { - return tre.resp -} - -func newTokenRefreshError(message string, resp *http.Response) TokenRefreshError { - return tokenRefreshError{message: message, resp: resp} -} - -// EnsureFresh will refresh the token if it will expire within the refresh window (as set by -// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. -func (spt *ServicePrincipalToken) EnsureFresh() error { - return spt.EnsureFreshWithContext(context.Background()) -} - -// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by -// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. -func (spt *ServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { - if spt.inner.AutoRefresh && spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { - // take the write lock then check to see if the token was already refreshed - spt.refreshLock.Lock() - defer spt.refreshLock.Unlock() - if spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { - return spt.refreshInternal(ctx, spt.inner.Resource) - } - } - return nil -} - -// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization -func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { - if spt.refreshCallbacks != nil { - for _, callback := range spt.refreshCallbacks { - err := callback(spt.inner.Token) - if err != nil { - return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err) - } - } - } - return nil -} - -// Refresh obtains a fresh token for the Service Principal. -// This method is not safe for concurrent use and should be syncrhonized. -func (spt *ServicePrincipalToken) Refresh() error { - return spt.RefreshWithContext(context.Background()) -} - -// RefreshWithContext obtains a fresh token for the Service Principal. -// This method is not safe for concurrent use and should be syncrhonized. -func (spt *ServicePrincipalToken) RefreshWithContext(ctx context.Context) error { - spt.refreshLock.Lock() - defer spt.refreshLock.Unlock() - return spt.refreshInternal(ctx, spt.inner.Resource) -} - -// RefreshExchange refreshes the token, but for a different resource. -// This method is not safe for concurrent use and should be syncrhonized. -func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { - return spt.RefreshExchangeWithContext(context.Background(), resource) -} - -// RefreshExchangeWithContext refreshes the token, but for a different resource. -// This method is not safe for concurrent use and should be syncrhonized. -func (spt *ServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { - spt.refreshLock.Lock() - defer spt.refreshLock.Unlock() - return spt.refreshInternal(ctx, resource) -} - -func (spt *ServicePrincipalToken) getGrantType() string { - switch spt.inner.Secret.(type) { - case *ServicePrincipalUsernamePasswordSecret: - return OAuthGrantTypeUserPass - case *ServicePrincipalAuthorizationCodeSecret: - return OAuthGrantTypeAuthorizationCode - default: - return OAuthGrantTypeClientCredentials - } -} - -func isIMDS(u url.URL) bool { - imds, err := url.Parse(msiEndpoint) - if err != nil { - return false - } - return (u.Host == imds.Host && u.Path == imds.Path) || isAppService() -} - -func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error { - req, err := http.NewRequest(http.MethodPost, spt.inner.OauthConfig.TokenEndpoint.String(), nil) - if err != nil { - return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) - } - req.Header.Add("User-Agent", UserAgent()) - // Add header when runtime is on App Service or Functions - if isAppService() { - asMSISecret, _ := os.LookupEnv(asMSISecretEnv) - req.Header.Add("Secret", asMSISecret) - } - req = req.WithContext(ctx) - if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) { - v := url.Values{} - v.Set("client_id", spt.inner.ClientID) - v.Set("resource", resource) - - if spt.inner.Token.RefreshToken != "" { - v.Set("grant_type", OAuthGrantTypeRefreshToken) - v.Set("refresh_token", spt.inner.Token.RefreshToken) - // web apps must specify client_secret when refreshing tokens - // see https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-protocols-oauth-code#refreshing-the-access-tokens - if spt.getGrantType() == OAuthGrantTypeAuthorizationCode { - err := spt.inner.Secret.SetAuthenticationValues(spt, &v) - if err != nil { - return err - } - } - } else { - v.Set("grant_type", spt.getGrantType()) - err := spt.inner.Secret.SetAuthenticationValues(spt, &v) - if err != nil { - return err - } - } - - s := v.Encode() - body := ioutil.NopCloser(strings.NewReader(s)) - req.ContentLength = int64(len(s)) - req.Header.Set(contentType, mimeTypeFormPost) - req.Body = body - } - - if _, ok := spt.inner.Secret.(*ServicePrincipalMSISecret); ok { - req.Method = http.MethodGet - req.Header.Set(metadataHeader, "true") - } - - var resp *http.Response - if isIMDS(spt.inner.OauthConfig.TokenEndpoint) { - resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts) - } else { - resp, err = spt.sender.Do(req) - } - if err != nil { - // don't return a TokenRefreshError here; this will allow retry logic to apply - return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err) - } - - defer resp.Body.Close() - rb, err := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != http.StatusOK { - if err != nil { - return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body: %v", resp.StatusCode, err), resp) - } - return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb)), resp) - } - - // for the following error cases don't return a TokenRefreshError. the operation succeeded - // but some transient failure happened during deserialization. by returning a generic error - // the retry logic will kick in (we don't retry on TokenRefreshError). - - if err != nil { - return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err) - } - if len(strings.Trim(string(rb), " ")) == 0 { - return fmt.Errorf("adal: Empty service principal token received during refresh") - } - var token Token - err = json.Unmarshal(rb, &token) - if err != nil { - return fmt.Errorf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb)) - } - - spt.inner.Token = token - - return spt.InvokeRefreshCallbacks(token) -} - -// retry logic specific to retrieving a token from the IMDS endpoint -func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http.Response, err error) { - // copied from client.go due to circular dependency - retries := []int{ - http.StatusRequestTimeout, // 408 - http.StatusTooManyRequests, // 429 - http.StatusInternalServerError, // 500 - http.StatusBadGateway, // 502 - http.StatusServiceUnavailable, // 503 - http.StatusGatewayTimeout, // 504 - } - // extra retry status codes specific to IMDS - retries = append(retries, - http.StatusNotFound, - http.StatusGone, - // all remaining 5xx - http.StatusNotImplemented, - http.StatusHTTPVersionNotSupported, - http.StatusVariantAlsoNegotiates, - http.StatusInsufficientStorage, - http.StatusLoopDetected, - http.StatusNotExtended, - http.StatusNetworkAuthenticationRequired) - - // see https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/how-to-use-vm-token#retry-guidance - - const maxDelay time.Duration = 60 * time.Second - - attempt := 0 - delay := time.Duration(0) - - for attempt < maxAttempts { - resp, err = sender.Do(req) - // we want to retry if err is not nil or the status code is in the list of retry codes - if err == nil && !responseHasStatusCode(resp, retries...) { - return - } - - // perform exponential backoff with a cap. - // must increment attempt before calculating delay. - attempt++ - // the base value of 2 is the "delta backoff" as specified in the guidance doc - delay += (time.Duration(math.Pow(2, float64(attempt))) * time.Second) - if delay > maxDelay { - delay = maxDelay - } - - select { - case <-time.After(delay): - // intentionally left blank - case <-req.Context().Done(): - err = req.Context().Err() - return - } - } - return -} - -func responseHasStatusCode(resp *http.Response, codes ...int) bool { - if resp != nil { - for _, i := range codes { - if i == resp.StatusCode { - return true - } - } - } - return false -} - -// SetAutoRefresh enables or disables automatic refreshing of stale tokens. -func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { - spt.inner.AutoRefresh = autoRefresh -} - -// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will -// refresh the token. -func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { - spt.inner.RefreshWithin = d - return -} - -// SetSender sets the http.Client used when obtaining the Service Principal token. An -// undecorated http.Client is used by default. -func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s } - -// OAuthToken implements the OAuthTokenProvider interface. It returns the current access token. -func (spt *ServicePrincipalToken) OAuthToken() string { - spt.refreshLock.RLock() - defer spt.refreshLock.RUnlock() - return spt.inner.Token.OAuthToken() -} - -// Token returns a copy of the current token. -func (spt *ServicePrincipalToken) Token() Token { - spt.refreshLock.RLock() - defer spt.refreshLock.RUnlock() - return spt.inner.Token -} - -// MultiTenantServicePrincipalToken contains tokens for multi-tenant authorization. -type MultiTenantServicePrincipalToken struct { - PrimaryToken *ServicePrincipalToken - AuxiliaryTokens []*ServicePrincipalToken -} - -// PrimaryOAuthToken returns the primary authorization token. -func (mt *MultiTenantServicePrincipalToken) PrimaryOAuthToken() string { - return mt.PrimaryToken.OAuthToken() -} - -// AuxiliaryOAuthTokens returns one to three auxiliary authorization tokens. -func (mt *MultiTenantServicePrincipalToken) AuxiliaryOAuthTokens() []string { - tokens := make([]string, len(mt.AuxiliaryTokens)) - for i := range mt.AuxiliaryTokens { - tokens[i] = mt.AuxiliaryTokens[i].OAuthToken() - } - return tokens -} - -// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by -// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. -func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { - if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil { - return fmt.Errorf("failed to refresh primary token: %v", err) - } - for _, aux := range mt.AuxiliaryTokens { - if err := aux.EnsureFreshWithContext(ctx); err != nil { - return fmt.Errorf("failed to refresh auxiliary token: %v", err) - } - } - return nil -} - -// RefreshWithContext obtains a fresh token for the Service Principal. -func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error { - if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil { - return fmt.Errorf("failed to refresh primary token: %v", err) - } - for _, aux := range mt.AuxiliaryTokens { - if err := aux.RefreshWithContext(ctx); err != nil { - return fmt.Errorf("failed to refresh auxiliary token: %v", err) - } - } - return nil -} - -// RefreshExchangeWithContext refreshes the token, but for a different resource. -func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { - if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil { - return fmt.Errorf("failed to refresh primary token: %v", err) - } - for _, aux := range mt.AuxiliaryTokens { - if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil { - return fmt.Errorf("failed to refresh auxiliary token: %v", err) - } - } - return nil -} - -// NewMultiTenantServicePrincipalToken creates a new MultiTenantServicePrincipalToken with the specified credentials and resource. -func NewMultiTenantServicePrincipalToken(multiTenantCfg MultiTenantOAuthConfig, clientID string, secret string, resource string) (*MultiTenantServicePrincipalToken, error) { - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(secret, "secret"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - auxTenants := multiTenantCfg.AuxiliaryTenants() - m := MultiTenantServicePrincipalToken{ - AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)), - } - primary, err := NewServicePrincipalToken(*multiTenantCfg.PrimaryTenant(), clientID, secret, resource) - if err != nil { - return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err) - } - m.PrimaryToken = primary - for i := range auxTenants { - aux, err := NewServicePrincipalToken(*auxTenants[i], clientID, secret, resource) - if err != nil { - return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err) - } - m.AuxiliaryTokens[i] = aux - } - return &m, nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/version.go b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go deleted file mode 100644 index c867b3484..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/version.go +++ /dev/null @@ -1,45 +0,0 @@ -package adal - -import ( - "fmt" - "runtime" -) - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -const number = "v1.0.0" - -var ( - ua = fmt.Sprintf("Go/%s (%s-%s) go-autorest/adal/%s", - runtime.Version(), - runtime.GOARCH, - runtime.GOOS, - number, - ) -) - -// UserAgent returns a string containing the Go version, system architecture and OS, and the adal version. -func UserAgent() string { - return ua -} - -// AddToUserAgent adds an extension to the current user agent -func AddToUserAgent(extension string) error { - if extension != "" { - ua = fmt.Sprintf("%s %s", ua, extension) - return nil - } - return fmt.Errorf("Extension was empty, User Agent remained as '%s'", ua) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go deleted file mode 100644 index 54e87b5b6..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/authorization.go +++ /dev/null @@ -1,336 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "crypto/tls" - "encoding/base64" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/Azure/go-autorest/autorest/adal" -) - -const ( - bearerChallengeHeader = "Www-Authenticate" - bearer = "Bearer" - tenantID = "tenantID" - apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key" - bingAPISdkHeader = "X-BingApis-SDK-Client" - golangBingAPISdkHeaderValue = "Go-SDK" - authorization = "Authorization" - basic = "Basic" -) - -// Authorizer is the interface that provides a PrepareDecorator used to supply request -// authorization. Most often, the Authorizer decorator runs last so it has access to the full -// state of the formed HTTP request. -type Authorizer interface { - WithAuthorization() PrepareDecorator -} - -// NullAuthorizer implements a default, "do nothing" Authorizer. -type NullAuthorizer struct{} - -// WithAuthorization returns a PrepareDecorator that does nothing. -func (na NullAuthorizer) WithAuthorization() PrepareDecorator { - return WithNothing() -} - -// APIKeyAuthorizer implements API Key authorization. -type APIKeyAuthorizer struct { - headers map[string]interface{} - queryParameters map[string]interface{} -} - -// NewAPIKeyAuthorizerWithHeaders creates an ApiKeyAuthorizer with headers. -func NewAPIKeyAuthorizerWithHeaders(headers map[string]interface{}) *APIKeyAuthorizer { - return NewAPIKeyAuthorizer(headers, nil) -} - -// NewAPIKeyAuthorizerWithQueryParameters creates an ApiKeyAuthorizer with query parameters. -func NewAPIKeyAuthorizerWithQueryParameters(queryParameters map[string]interface{}) *APIKeyAuthorizer { - return NewAPIKeyAuthorizer(nil, queryParameters) -} - -// NewAPIKeyAuthorizer creates an ApiKeyAuthorizer with headers. -func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[string]interface{}) *APIKeyAuthorizer { - return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters} -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Parameters. -func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters)) - } -} - -// CognitiveServicesAuthorizer implements authorization for Cognitive Services. -type CognitiveServicesAuthorizer struct { - subscriptionKey string -} - -// NewCognitiveServicesAuthorizer is -func NewCognitiveServicesAuthorizer(subscriptionKey string) *CognitiveServicesAuthorizer { - return &CognitiveServicesAuthorizer{subscriptionKey: subscriptionKey} -} - -// WithAuthorization is -func (csa *CognitiveServicesAuthorizer) WithAuthorization() PrepareDecorator { - headers := make(map[string]interface{}) - headers[apiKeyAuthorizerHeader] = csa.subscriptionKey - headers[bingAPISdkHeader] = golangBingAPISdkHeaderValue - - return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() -} - -// BearerAuthorizer implements the bearer authorization -type BearerAuthorizer struct { - tokenProvider adal.OAuthTokenProvider -} - -// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider -func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer { - return &BearerAuthorizer{tokenProvider: tp} -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Bearer " followed by the token. -// -// By default, the token will be automatically refreshed through the Refresher interface. -func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - // the ordering is important here, prefer RefresherWithContext if available - if refresher, ok := ba.tokenProvider.(adal.RefresherWithContext); ok { - err = refresher.EnsureFreshWithContext(r.Context()) - } else if refresher, ok := ba.tokenProvider.(adal.Refresher); ok { - err = refresher.EnsureFresh() - } - if err != nil { - var resp *http.Response - if tokError, ok := err.(adal.TokenRefreshError); ok { - resp = tokError.Response() - } - return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", resp, - "Failed to refresh the Token for request to %s", r.URL) - } - return Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken()))) - } - return r, err - }) - } -} - -// BearerAuthorizerCallbackFunc is the authentication callback signature. -type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error) - -// BearerAuthorizerCallback implements bearer authorization via a callback. -type BearerAuthorizerCallback struct { - sender Sender - callback BearerAuthorizerCallbackFunc -} - -// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback -// is invoked when the HTTP request is submitted. -func NewBearerAuthorizerCallback(s Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { - if s == nil { - s = sender(tls.RenegotiateNever) - } - return &BearerAuthorizerCallback{sender: s, callback: callback} -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value -// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback. -// -// By default, the token will be automatically refreshed through the Refresher interface. -func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - // make a copy of the request and remove the body as it's not - // required and avoids us having to create a copy of it. - rCopy := *r - removeRequestBody(&rCopy) - - resp, err := bacb.sender.Do(&rCopy) - if err == nil && resp.StatusCode == 401 { - defer resp.Body.Close() - if hasBearerChallenge(resp) { - bc, err := newBearerChallenge(resp) - if err != nil { - return r, err - } - if bacb.callback != nil { - ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"]) - if err != nil { - return r, err - } - return Prepare(r, ba.WithAuthorization()) - } - } - } - } - return r, err - }) - } -} - -// returns true if the HTTP response contains a bearer challenge -func hasBearerChallenge(resp *http.Response) bool { - authHeader := resp.Header.Get(bearerChallengeHeader) - if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 { - return false - } - return true -} - -type bearerChallenge struct { - values map[string]string -} - -func newBearerChallenge(resp *http.Response) (bc bearerChallenge, err error) { - challenge := strings.TrimSpace(resp.Header.Get(bearerChallengeHeader)) - trimmedChallenge := challenge[len(bearer)+1:] - - // challenge is a set of key=value pairs that are comma delimited - pairs := strings.Split(trimmedChallenge, ",") - if len(pairs) < 1 { - err = fmt.Errorf("challenge '%s' contains no pairs", challenge) - return bc, err - } - - bc.values = make(map[string]string) - for i := range pairs { - trimmedPair := strings.TrimSpace(pairs[i]) - pair := strings.Split(trimmedPair, "=") - if len(pair) == 2 { - // remove the enclosing quotes - key := strings.Trim(pair[0], "\"") - value := strings.Trim(pair[1], "\"") - - switch key { - case "authorization", "authorization_uri": - // strip the tenant ID from the authorization URL - asURL, err := url.Parse(value) - if err != nil { - return bc, err - } - bc.values[tenantID] = asURL.Path[1:] - default: - bc.values[key] = value - } - } - } - - return bc, err -} - -// EventGridKeyAuthorizer implements authorization for event grid using key authentication. -type EventGridKeyAuthorizer struct { - topicKey string -} - -// NewEventGridKeyAuthorizer creates a new EventGridKeyAuthorizer -// with the specified topic key. -func NewEventGridKeyAuthorizer(topicKey string) EventGridKeyAuthorizer { - return EventGridKeyAuthorizer{topicKey: topicKey} -} - -// WithAuthorization returns a PrepareDecorator that adds the aeg-sas-key authentication header. -func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator { - headers := map[string]interface{}{ - "aeg-sas-key": egta.topicKey, - } - return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() -} - -// BasicAuthorizer implements basic HTTP authorization by adding the Authorization HTTP header -// with the value "Basic " where is a base64-encoded username:password tuple. -type BasicAuthorizer struct { - userName string - password string -} - -// NewBasicAuthorizer creates a new BasicAuthorizer with the specified username and password. -func NewBasicAuthorizer(userName, password string) *BasicAuthorizer { - return &BasicAuthorizer{ - userName: userName, - password: password, - } -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Basic " followed by the base64-encoded username:password tuple. -func (ba *BasicAuthorizer) WithAuthorization() PrepareDecorator { - headers := make(map[string]interface{}) - headers[authorization] = basic + " " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", ba.userName, ba.password))) - - return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() -} - -// MultiTenantServicePrincipalTokenAuthorizer provides authentication across tenants. -type MultiTenantServicePrincipalTokenAuthorizer interface { - WithAuthorization() PrepareDecorator -} - -// NewMultiTenantServicePrincipalTokenAuthorizer crates a BearerAuthorizer using the given token provider -func NewMultiTenantServicePrincipalTokenAuthorizer(tp adal.MultitenantOAuthTokenProvider) MultiTenantServicePrincipalTokenAuthorizer { - return &multiTenantSPTAuthorizer{tp: tp} -} - -type multiTenantSPTAuthorizer struct { - tp adal.MultitenantOAuthTokenProvider -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header using the -// primary token along with the auxiliary authorization header using the auxiliary tokens. -// -// By default, the token will be automatically refreshed through the Refresher interface. -func (mt multiTenantSPTAuthorizer) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err != nil { - return r, err - } - if refresher, ok := mt.tp.(adal.RefresherWithContext); ok { - err = refresher.EnsureFreshWithContext(r.Context()) - if err != nil { - var resp *http.Response - if tokError, ok := err.(adal.TokenRefreshError); ok { - resp = tokError.Response() - } - return r, NewErrorWithError(err, "azure.multiTenantSPTAuthorizer", "WithAuthorization", resp, - "Failed to refresh one or more Tokens for request to %s", r.URL) - } - } - r, err = Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", mt.tp.PrimaryOAuthToken()))) - if err != nil { - return r, err - } - auxTokens := mt.tp.AuxiliaryOAuthTokens() - for i := range auxTokens { - auxTokens[i] = fmt.Sprintf("Bearer %s", auxTokens[i]) - } - return Prepare(r, WithHeader(headerAuxAuthorization, strings.Join(auxTokens, "; "))) - }) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go deleted file mode 100644 index aafdf021f..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/autorest.go +++ /dev/null @@ -1,150 +0,0 @@ -/* -Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines -and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) -generated Go code. - -The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, -and Responding. A typical pattern is: - - req, err := Prepare(&http.Request{}, - token.WithAuthorization()) - - resp, err := Send(req, - WithLogging(logger), - DoErrorIfStatusCode(http.StatusInternalServerError), - DoCloseIfError(), - DoRetryForAttempts(5, time.Second)) - - err = Respond(resp, - ByDiscardingBody(), - ByClosing()) - -Each phase relies on decorators to modify and / or manage processing. Decorators may first modify -and then pass the data along, pass the data first and then modify the result, or wrap themselves -around passing the data (such as a logger might do). Decorators run in the order provided. For -example, the following: - - req, err := Prepare(&http.Request{}, - WithBaseURL("https://microsoft.com/"), - WithPath("a"), - WithPath("b"), - WithPath("c")) - -will set the URL to: - - https://microsoft.com/a/b/c - -Preparers and Responders may be shared and re-used (assuming the underlying decorators support -sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders -shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, -all bound together by means of input / output channels. - -Decorators hold their passed state within a closure (such as the path components in the example -above). Be careful to share Preparers and Responders only in a context where such held state -applies. For example, it may not make sense to share a Preparer that applies a query string from a -fixed set of values. Similarly, sharing a Responder that reads the response body into a passed -struct (e.g., ByUnmarshallingJson) is likely incorrect. - -Lastly, the Swagger specification (https://swagger.io) that drives AutoRest -(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The -github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure -correct parsing and formatting. - -Errors raised by autorest objects and methods will conform to the autorest.Error interface. - -See the included examples for more detail. For details on the suggested use of this package by -generated clients, see the Client described below. -*/ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "context" - "net/http" - "time" -) - -const ( - // HeaderLocation specifies the HTTP Location header. - HeaderLocation = "Location" - - // HeaderRetryAfter specifies the HTTP Retry-After header. - HeaderRetryAfter = "Retry-After" -) - -// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set -// and false otherwise. -func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { - if resp == nil { - return false - } - return containsInt(codes, resp.StatusCode) -} - -// GetLocation retrieves the URL from the Location header of the passed response. -func GetLocation(resp *http.Response) string { - return resp.Header.Get(HeaderLocation) -} - -// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If -// the header is absent or is malformed, it will return the supplied default delay time.Duration. -func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration { - retry := resp.Header.Get(HeaderRetryAfter) - if retry == "" { - return defaultDelay - } - - d, err := time.ParseDuration(retry + "s") - if err != nil { - return defaultDelay - } - - return d -} - -// NewPollingRequest allocates and returns a new http.Request to poll for the passed response. -func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) { - location := GetLocation(resp) - if location == "" { - return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling") - } - - req, err := Prepare(&http.Request{Cancel: cancel}, - AsGet(), - WithBaseURL(location)) - if err != nil { - return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location) - } - - return req, nil -} - -// NewPollingRequestWithContext allocates and returns a new http.Request with the specified context to poll for the passed response. -func NewPollingRequestWithContext(ctx context.Context, resp *http.Response) (*http.Request, error) { - location := GetLocation(resp) - if location == "" { - return nil, NewErrorWithResponse("autorest", "NewPollingRequestWithContext", resp, "Location header missing from response that requires polling") - } - - req, err := Prepare((&http.Request{}).WithContext(ctx), - AsGet(), - WithBaseURL(location)) - if err != nil { - return nil, NewErrorWithError(err, "autorest", "NewPollingRequestWithContext", nil, "Failure creating poll request to %s", location) - } - - return req, nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go deleted file mode 100644 index 1cb41cbeb..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go +++ /dev/null @@ -1,924 +0,0 @@ -package azure - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" - "time" - - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/tracing" -) - -const ( - headerAsyncOperation = "Azure-AsyncOperation" -) - -const ( - operationInProgress string = "InProgress" - operationCanceled string = "Canceled" - operationFailed string = "Failed" - operationSucceeded string = "Succeeded" -) - -var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.StatusCreated, http.StatusOK} - -// Future provides a mechanism to access the status and results of an asynchronous request. -// Since futures are stateful they should be passed by value to avoid race conditions. -type Future struct { - pt pollingTracker -} - -// NewFutureFromResponse returns a new Future object initialized -// with the initial response from an asynchronous operation. -func NewFutureFromResponse(resp *http.Response) (Future, error) { - pt, err := createPollingTracker(resp) - return Future{pt: pt}, err -} - -// Response returns the last HTTP response. -func (f Future) Response() *http.Response { - if f.pt == nil { - return nil - } - return f.pt.latestResponse() -} - -// Status returns the last status message of the operation. -func (f Future) Status() string { - if f.pt == nil { - return "" - } - return f.pt.pollingStatus() -} - -// PollingMethod returns the method used to monitor the status of the asynchronous operation. -func (f Future) PollingMethod() PollingMethodType { - if f.pt == nil { - return PollingUnknown - } - return f.pt.pollingMethod() -} - -// DoneWithContext queries the service to see if the operation has completed. -func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (done bool, err error) { - ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.DoneWithContext") - defer func() { - sc := -1 - resp := f.Response() - if resp != nil { - sc = resp.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - - if f.pt == nil { - return false, autorest.NewError("Future", "Done", "future is not initialized") - } - if f.pt.hasTerminated() { - return true, f.pt.pollingError() - } - if err := f.pt.pollForStatus(ctx, sender); err != nil { - return false, err - } - if err := f.pt.checkForErrors(); err != nil { - return f.pt.hasTerminated(), err - } - if err := f.pt.updatePollingState(f.pt.provisioningStateApplicable()); err != nil { - return false, err - } - if err := f.pt.initPollingMethod(); err != nil { - return false, err - } - if err := f.pt.updatePollingMethod(); err != nil { - return false, err - } - return f.pt.hasTerminated(), f.pt.pollingError() -} - -// GetPollingDelay returns a duration the application should wait before checking -// the status of the asynchronous request and true; this value is returned from -// the service via the Retry-After response header. If the header wasn't returned -// then the function returns the zero-value time.Duration and false. -func (f Future) GetPollingDelay() (time.Duration, bool) { - if f.pt == nil { - return 0, false - } - resp := f.pt.latestResponse() - if resp == nil { - return 0, false - } - - retry := resp.Header.Get(autorest.HeaderRetryAfter) - if retry == "" { - return 0, false - } - - d, err := time.ParseDuration(retry + "s") - if err != nil { - panic(err) - } - - return d, true -} - -// WaitForCompletionRef will return when one of the following conditions is met: the long -// running operation has completed, the provided context is cancelled, or the client's -// polling duration has been exceeded. It will retry failed polling attempts based on -// the retry value defined in the client up to the maximum retry attempts. -// If no deadline is specified in the context then the client.PollingDuration will be -// used to determine if a default deadline should be used. -// If PollingDuration is greater than zero the value will be used as the context's timeout. -// If PollingDuration is zero then no default deadline will be used. -func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) (err error) { - ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.WaitForCompletionRef") - defer func() { - sc := -1 - resp := f.Response() - if resp != nil { - sc = resp.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - cancelCtx := ctx - // if the provided context already has a deadline don't override it - _, hasDeadline := ctx.Deadline() - if d := client.PollingDuration; !hasDeadline && d != 0 { - var cancel context.CancelFunc - cancelCtx, cancel = context.WithTimeout(ctx, d) - defer cancel() - } - - done, err := f.DoneWithContext(ctx, client) - for attempts := 0; !done; done, err = f.DoneWithContext(ctx, client) { - if attempts >= client.RetryAttempts { - return autorest.NewErrorWithError(err, "Future", "WaitForCompletion", f.pt.latestResponse(), "the number of retries has been exceeded") - } - // we want delayAttempt to be zero in the non-error case so - // that DelayForBackoff doesn't perform exponential back-off - var delayAttempt int - var delay time.Duration - if err == nil { - // check for Retry-After delay, if not present use the client's polling delay - var ok bool - delay, ok = f.GetPollingDelay() - if !ok { - delay = client.PollingDelay - } - } else { - // there was an error polling for status so perform exponential - // back-off based on the number of attempts using the client's retry - // duration. update attempts after delayAttempt to avoid off-by-one. - delayAttempt = attempts - delay = client.RetryDuration - attempts++ - } - // wait until the delay elapses or the context is cancelled - delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, cancelCtx.Done()) - if !delayElapsed { - return autorest.NewErrorWithError(cancelCtx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled") - } - } - return -} - -// MarshalJSON implements the json.Marshaler interface. -func (f Future) MarshalJSON() ([]byte, error) { - return json.Marshal(f.pt) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (f *Future) UnmarshalJSON(data []byte) error { - // unmarshal into JSON object to determine the tracker type - obj := map[string]interface{}{} - err := json.Unmarshal(data, &obj) - if err != nil { - return err - } - if obj["method"] == nil { - return autorest.NewError("Future", "UnmarshalJSON", "missing 'method' property") - } - method := obj["method"].(string) - switch strings.ToUpper(method) { - case http.MethodDelete: - f.pt = &pollingTrackerDelete{} - case http.MethodPatch: - f.pt = &pollingTrackerPatch{} - case http.MethodPost: - f.pt = &pollingTrackerPost{} - case http.MethodPut: - f.pt = &pollingTrackerPut{} - default: - return autorest.NewError("Future", "UnmarshalJSON", "unsupoorted method '%s'", method) - } - // now unmarshal into the tracker - return json.Unmarshal(data, &f.pt) -} - -// PollingURL returns the URL used for retrieving the status of the long-running operation. -func (f Future) PollingURL() string { - if f.pt == nil { - return "" - } - return f.pt.pollingURL() -} - -// GetResult should be called once polling has completed successfully. -// It makes the final GET call to retrieve the resultant payload. -func (f Future) GetResult(sender autorest.Sender) (*http.Response, error) { - if f.pt.finalGetURL() == "" { - // we can end up in this situation if the async operation returns a 200 - // with no polling URLs. in that case return the response which should - // contain the JSON payload (only do this for successful terminal cases). - if lr := f.pt.latestResponse(); lr != nil && f.pt.hasSucceeded() { - return lr, nil - } - return nil, autorest.NewError("Future", "GetResult", "missing URL for retrieving result") - } - req, err := http.NewRequest(http.MethodGet, f.pt.finalGetURL(), nil) - if err != nil { - return nil, err - } - return sender.Do(req) -} - -type pollingTracker interface { - // these methods can differ per tracker - - // checks the response headers and status code to determine the polling mechanism - updatePollingMethod() error - - // checks the response for tracker-specific error conditions - checkForErrors() error - - // returns true if provisioning state should be checked - provisioningStateApplicable() bool - - // methods common to all trackers - - // initializes a tracker's polling URL and method, called for each iteration. - // these values can be overridden by each polling tracker as required. - initPollingMethod() error - - // initializes the tracker's internal state, call this when the tracker is created - initializeState() error - - // makes an HTTP request to check the status of the LRO - pollForStatus(ctx context.Context, sender autorest.Sender) error - - // updates internal tracker state, call this after each call to pollForStatus - updatePollingState(provStateApl bool) error - - // returns the error response from the service, can be nil - pollingError() error - - // returns the polling method being used - pollingMethod() PollingMethodType - - // returns the state of the LRO as returned from the service - pollingStatus() string - - // returns the URL used for polling status - pollingURL() string - - // returns the URL used for the final GET to retrieve the resource - finalGetURL() string - - // returns true if the LRO is in a terminal state - hasTerminated() bool - - // returns true if the LRO is in a failed terminal state - hasFailed() bool - - // returns true if the LRO is in a successful terminal state - hasSucceeded() bool - - // returns the cached HTTP response after a call to pollForStatus(), can be nil - latestResponse() *http.Response -} - -type pollingTrackerBase struct { - // resp is the last response, either from the submission of the LRO or from polling - resp *http.Response - - // method is the HTTP verb, this is needed for deserialization - Method string `json:"method"` - - // rawBody is the raw JSON response body - rawBody map[string]interface{} - - // denotes if polling is using async-operation or location header - Pm PollingMethodType `json:"pollingMethod"` - - // the URL to poll for status - URI string `json:"pollingURI"` - - // the state of the LRO as returned from the service - State string `json:"lroState"` - - // the URL to GET for the final result - FinalGetURI string `json:"resultURI"` - - // used to hold an error object returned from the service - Err *ServiceError `json:"error,omitempty"` -} - -func (pt *pollingTrackerBase) initializeState() error { - // determine the initial polling state based on response body and/or HTTP status - // code. this is applicable to the initial LRO response, not polling responses! - pt.Method = pt.resp.Request.Method - if err := pt.updateRawBody(); err != nil { - return err - } - switch pt.resp.StatusCode { - case http.StatusOK: - if ps := pt.getProvisioningState(); ps != nil { - pt.State = *ps - if pt.hasFailed() { - pt.updateErrorFromResponse() - return pt.pollingError() - } - } else { - pt.State = operationSucceeded - } - case http.StatusCreated: - if ps := pt.getProvisioningState(); ps != nil { - pt.State = *ps - } else { - pt.State = operationInProgress - } - case http.StatusAccepted: - pt.State = operationInProgress - case http.StatusNoContent: - pt.State = operationSucceeded - default: - pt.State = operationFailed - pt.updateErrorFromResponse() - return pt.pollingError() - } - return pt.initPollingMethod() -} - -func (pt pollingTrackerBase) getProvisioningState() *string { - if pt.rawBody != nil && pt.rawBody["properties"] != nil { - p := pt.rawBody["properties"].(map[string]interface{}) - if ps := p["provisioningState"]; ps != nil { - s := ps.(string) - return &s - } - } - return nil -} - -func (pt *pollingTrackerBase) updateRawBody() error { - pt.rawBody = map[string]interface{}{} - if pt.resp.ContentLength != 0 { - defer pt.resp.Body.Close() - b, err := ioutil.ReadAll(pt.resp.Body) - if err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body") - } - // observed in 204 responses over HTTP/2.0; the content length is -1 but body is empty - if len(b) == 0 { - return nil - } - // put the body back so it's available to other callers - pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b)) - if err = json.Unmarshal(b, &pt.rawBody); err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to unmarshal response body") - } - } - return nil -} - -func (pt *pollingTrackerBase) pollForStatus(ctx context.Context, sender autorest.Sender) error { - req, err := http.NewRequest(http.MethodGet, pt.URI, nil) - if err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to create HTTP request") - } - - req = req.WithContext(ctx) - preparer := autorest.CreatePreparer(autorest.GetPrepareDecorators(ctx)...) - req, err = preparer.Prepare(req) - if err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed preparing HTTP request") - } - pt.resp, err = sender.Do(req) - if err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request") - } - if autorest.ResponseHasStatusCode(pt.resp, pollingCodes[:]...) { - // reset the service error on success case - pt.Err = nil - err = pt.updateRawBody() - } else { - // check response body for error content - pt.updateErrorFromResponse() - err = pt.pollingError() - } - return err -} - -// attempts to unmarshal a ServiceError type from the response body. -// if that fails then make a best attempt at creating something meaningful. -// NOTE: this assumes that the async operation has failed. -func (pt *pollingTrackerBase) updateErrorFromResponse() { - var err error - if pt.resp.ContentLength != 0 { - type respErr struct { - ServiceError *ServiceError `json:"error"` - } - re := respErr{} - defer pt.resp.Body.Close() - var b []byte - if b, err = ioutil.ReadAll(pt.resp.Body); err != nil || len(b) == 0 { - goto Default - } - if err = json.Unmarshal(b, &re); err != nil { - goto Default - } - // unmarshalling the error didn't yield anything, try unwrapped error - if re.ServiceError == nil { - err = json.Unmarshal(b, &re.ServiceError) - if err != nil { - goto Default - } - } - // the unmarshaller will ensure re.ServiceError is non-nil - // even if there was no content unmarshalled so check the code. - if re.ServiceError.Code != "" { - pt.Err = re.ServiceError - return - } - } -Default: - se := &ServiceError{ - Code: pt.pollingStatus(), - Message: "The async operation failed.", - } - if err != nil { - se.InnerError = make(map[string]interface{}) - se.InnerError["unmarshalError"] = err.Error() - } - // stick the response body into the error object in hopes - // it contains something useful to help diagnose the failure. - if len(pt.rawBody) > 0 { - se.AdditionalInfo = []map[string]interface{}{ - pt.rawBody, - } - } - pt.Err = se -} - -func (pt *pollingTrackerBase) updatePollingState(provStateApl bool) error { - if pt.Pm == PollingAsyncOperation && pt.rawBody["status"] != nil { - pt.State = pt.rawBody["status"].(string) - } else { - if pt.resp.StatusCode == http.StatusAccepted { - pt.State = operationInProgress - } else if provStateApl { - if ps := pt.getProvisioningState(); ps != nil { - pt.State = *ps - } else { - pt.State = operationSucceeded - } - } else { - return autorest.NewError("pollingTrackerBase", "updatePollingState", "the response from the async operation has an invalid status code") - } - } - // if the operation has failed update the error state - if pt.hasFailed() { - pt.updateErrorFromResponse() - } - return nil -} - -func (pt pollingTrackerBase) pollingError() error { - if pt.Err == nil { - return nil - } - return pt.Err -} - -func (pt pollingTrackerBase) pollingMethod() PollingMethodType { - return pt.Pm -} - -func (pt pollingTrackerBase) pollingStatus() string { - return pt.State -} - -func (pt pollingTrackerBase) pollingURL() string { - return pt.URI -} - -func (pt pollingTrackerBase) finalGetURL() string { - return pt.FinalGetURI -} - -func (pt pollingTrackerBase) hasTerminated() bool { - return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) || strings.EqualFold(pt.State, operationSucceeded) -} - -func (pt pollingTrackerBase) hasFailed() bool { - return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) -} - -func (pt pollingTrackerBase) hasSucceeded() bool { - return strings.EqualFold(pt.State, operationSucceeded) -} - -func (pt pollingTrackerBase) latestResponse() *http.Response { - return pt.resp -} - -// error checking common to all trackers -func (pt pollingTrackerBase) baseCheckForErrors() error { - // for Azure-AsyncOperations the response body cannot be nil or empty - if pt.Pm == PollingAsyncOperation { - if pt.resp.Body == nil || pt.resp.ContentLength == 0 { - return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "for Azure-AsyncOperation response body cannot be nil") - } - if pt.rawBody["status"] == nil { - return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "missing status property in Azure-AsyncOperation response body") - } - } - return nil -} - -// default initialization of polling URL/method. each verb tracker will update this as required. -func (pt *pollingTrackerBase) initPollingMethod() error { - if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - return nil - } - if lh, err := getURLFromLocationHeader(pt.resp); err != nil { - return err - } else if lh != "" { - pt.URI = lh - pt.Pm = PollingLocation - return nil - } - // it's ok if we didn't find a polling header, this will be handled elsewhere - return nil -} - -// DELETE - -type pollingTrackerDelete struct { - pollingTrackerBase -} - -func (pt *pollingTrackerDelete) updatePollingMethod() error { - // for 201 the Location header is required - if pt.resp.StatusCode == http.StatusCreated { - if lh, err := getURLFromLocationHeader(pt.resp); err != nil { - return err - } else if lh == "" { - return autorest.NewError("pollingTrackerDelete", "updateHeaders", "missing Location header in 201 response") - } else { - pt.URI = lh - } - pt.Pm = PollingLocation - pt.FinalGetURI = pt.URI - } - // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary - if pt.resp.StatusCode == http.StatusAccepted { - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - // if the Location header is invalid and we already have a polling URL - // then we don't care if the Location header URL is malformed. - if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { - return err - } else if lh != "" { - if ao == "" { - pt.URI = lh - pt.Pm = PollingLocation - } - // when both headers are returned we use the value in the Location header for the final GET - pt.FinalGetURI = lh - } - // make sure a polling URL was found - if pt.URI == "" { - return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") - } - } - return nil -} - -func (pt pollingTrackerDelete) checkForErrors() error { - return pt.baseCheckForErrors() -} - -func (pt pollingTrackerDelete) provisioningStateApplicable() bool { - return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent -} - -// PATCH - -type pollingTrackerPatch struct { - pollingTrackerBase -} - -func (pt *pollingTrackerPatch) updatePollingMethod() error { - // by default we can use the original URL for polling and final GET - if pt.URI == "" { - pt.URI = pt.resp.Request.URL.String() - } - if pt.FinalGetURI == "" { - pt.FinalGetURI = pt.resp.Request.URL.String() - } - if pt.Pm == PollingUnknown { - pt.Pm = PollingRequestURI - } - // for 201 it's permissible for no headers to be returned - if pt.resp.StatusCode == http.StatusCreated { - if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - } - // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary - // note the absence of the "final GET" mechanism for PATCH - if pt.resp.StatusCode == http.StatusAccepted { - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - if ao == "" { - if lh, err := getURLFromLocationHeader(pt.resp); err != nil { - return err - } else if lh == "" { - return autorest.NewError("pollingTrackerPatch", "updateHeaders", "didn't get any suitable polling URLs in 202 response") - } else { - pt.URI = lh - pt.Pm = PollingLocation - } - } - } - return nil -} - -func (pt pollingTrackerPatch) checkForErrors() error { - return pt.baseCheckForErrors() -} - -func (pt pollingTrackerPatch) provisioningStateApplicable() bool { - return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated -} - -// POST - -type pollingTrackerPost struct { - pollingTrackerBase -} - -func (pt *pollingTrackerPost) updatePollingMethod() error { - // 201 requires Location header - if pt.resp.StatusCode == http.StatusCreated { - if lh, err := getURLFromLocationHeader(pt.resp); err != nil { - return err - } else if lh == "" { - return autorest.NewError("pollingTrackerPost", "updateHeaders", "missing Location header in 201 response") - } else { - pt.URI = lh - pt.FinalGetURI = lh - pt.Pm = PollingLocation - } - } - // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary - if pt.resp.StatusCode == http.StatusAccepted { - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - // if the Location header is invalid and we already have a polling URL - // then we don't care if the Location header URL is malformed. - if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { - return err - } else if lh != "" { - if ao == "" { - pt.URI = lh - pt.Pm = PollingLocation - } - // when both headers are returned we use the value in the Location header for the final GET - pt.FinalGetURI = lh - } - // make sure a polling URL was found - if pt.URI == "" { - return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") - } - } - return nil -} - -func (pt pollingTrackerPost) checkForErrors() error { - return pt.baseCheckForErrors() -} - -func (pt pollingTrackerPost) provisioningStateApplicable() bool { - return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent -} - -// PUT - -type pollingTrackerPut struct { - pollingTrackerBase -} - -func (pt *pollingTrackerPut) updatePollingMethod() error { - // by default we can use the original URL for polling and final GET - if pt.URI == "" { - pt.URI = pt.resp.Request.URL.String() - } - if pt.FinalGetURI == "" { - pt.FinalGetURI = pt.resp.Request.URL.String() - } - if pt.Pm == PollingUnknown { - pt.Pm = PollingRequestURI - } - // for 201 it's permissible for no headers to be returned - if pt.resp.StatusCode == http.StatusCreated { - if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - } - // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary - if pt.resp.StatusCode == http.StatusAccepted { - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - // if the Location header is invalid and we already have a polling URL - // then we don't care if the Location header URL is malformed. - if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { - return err - } else if lh != "" { - if ao == "" { - pt.URI = lh - pt.Pm = PollingLocation - } - } - // make sure a polling URL was found - if pt.URI == "" { - return autorest.NewError("pollingTrackerPut", "updateHeaders", "didn't get any suitable polling URLs in 202 response") - } - } - return nil -} - -func (pt pollingTrackerPut) checkForErrors() error { - err := pt.baseCheckForErrors() - if err != nil { - return err - } - // if there are no LRO headers then the body cannot be empty - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } - lh, err := getURLFromLocationHeader(pt.resp) - if err != nil { - return err - } - if ao == "" && lh == "" && len(pt.rawBody) == 0 { - return autorest.NewError("pollingTrackerPut", "checkForErrors", "the response did not contain a body") - } - return nil -} - -func (pt pollingTrackerPut) provisioningStateApplicable() bool { - return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated -} - -// creates a polling tracker based on the verb of the original request -func createPollingTracker(resp *http.Response) (pollingTracker, error) { - var pt pollingTracker - switch strings.ToUpper(resp.Request.Method) { - case http.MethodDelete: - pt = &pollingTrackerDelete{pollingTrackerBase: pollingTrackerBase{resp: resp}} - case http.MethodPatch: - pt = &pollingTrackerPatch{pollingTrackerBase: pollingTrackerBase{resp: resp}} - case http.MethodPost: - pt = &pollingTrackerPost{pollingTrackerBase: pollingTrackerBase{resp: resp}} - case http.MethodPut: - pt = &pollingTrackerPut{pollingTrackerBase: pollingTrackerBase{resp: resp}} - default: - return nil, autorest.NewError("azure", "createPollingTracker", "unsupported HTTP method %s", resp.Request.Method) - } - if err := pt.initializeState(); err != nil { - return pt, err - } - // this initializes the polling header values, we do this during creation in case the - // initial response send us invalid values; this way the API call will return a non-nil - // error (not doing this means the error shows up in Future.Done) - return pt, pt.updatePollingMethod() -} - -// gets the polling URL from the Azure-AsyncOperation header. -// ensures the URL is well-formed and absolute. -func getURLFromAsyncOpHeader(resp *http.Response) (string, error) { - s := resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation)) - if s == "" { - return "", nil - } - if !isValidURL(s) { - return "", autorest.NewError("azure", "getURLFromAsyncOpHeader", "invalid polling URL '%s'", s) - } - return s, nil -} - -// gets the polling URL from the Location header. -// ensures the URL is well-formed and absolute. -func getURLFromLocationHeader(resp *http.Response) (string, error) { - s := resp.Header.Get(http.CanonicalHeaderKey(autorest.HeaderLocation)) - if s == "" { - return "", nil - } - if !isValidURL(s) { - return "", autorest.NewError("azure", "getURLFromLocationHeader", "invalid polling URL '%s'", s) - } - return s, nil -} - -// verify that the URL is valid and absolute -func isValidURL(s string) bool { - u, err := url.Parse(s) - return err == nil && u.IsAbs() -} - -// PollingMethodType defines a type used for enumerating polling mechanisms. -type PollingMethodType string - -const ( - // PollingAsyncOperation indicates the polling method uses the Azure-AsyncOperation header. - PollingAsyncOperation PollingMethodType = "AsyncOperation" - - // PollingLocation indicates the polling method uses the Location header. - PollingLocation PollingMethodType = "Location" - - // PollingRequestURI indicates the polling method uses the original request URI. - PollingRequestURI PollingMethodType = "RequestURI" - - // PollingUnknown indicates an unknown polling method and is the default value. - PollingUnknown PollingMethodType = "" -) - -// AsyncOpIncompleteError is the type that's returned from a future that has not completed. -type AsyncOpIncompleteError struct { - // FutureType is the name of the type composed of a azure.Future. - FutureType string -} - -// Error returns an error message including the originating type name of the error. -func (e AsyncOpIncompleteError) Error() string { - return fmt.Sprintf("%s: asynchronous operation has not completed", e.FutureType) -} - -// NewAsyncOpIncompleteError creates a new AsyncOpIncompleteError with the specified parameters. -func NewAsyncOpIncompleteError(futureType string) AsyncOpIncompleteError { - return AsyncOpIncompleteError{ - FutureType: futureType, - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go deleted file mode 100644 index 5f02026b3..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go +++ /dev/null @@ -1,737 +0,0 @@ -package auth - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "crypto/rsa" - "crypto/x509" - "encoding/binary" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "log" - "os" - "strings" - "unicode/utf16" - - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/adal" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/azure/cli" - "github.com/dimchansky/utfbom" - "golang.org/x/crypto/pkcs12" -) - -// The possible keys in the Values map. -const ( - SubscriptionID = "AZURE_SUBSCRIPTION_ID" - TenantID = "AZURE_TENANT_ID" - AuxiliaryTenantIDs = "AZURE_AUXILIARY_TENANT_IDS" - ClientID = "AZURE_CLIENT_ID" - ClientSecret = "AZURE_CLIENT_SECRET" - CertificatePath = "AZURE_CERTIFICATE_PATH" - CertificatePassword = "AZURE_CERTIFICATE_PASSWORD" - Username = "AZURE_USERNAME" - Password = "AZURE_PASSWORD" - EnvironmentName = "AZURE_ENVIRONMENT" - Resource = "AZURE_AD_RESOURCE" - ActiveDirectoryEndpoint = "ActiveDirectoryEndpoint" - ResourceManagerEndpoint = "ResourceManagerEndpoint" - GraphResourceID = "GraphResourceID" - SQLManagementEndpoint = "SQLManagementEndpoint" - GalleryEndpoint = "GalleryEndpoint" - ManagementEndpoint = "ManagementEndpoint" -) - -// NewAuthorizerFromEnvironment creates an Authorizer configured from environment variables in the order: -// 1. Client credentials -// 2. Client certificate -// 3. Username password -// 4. MSI -func NewAuthorizerFromEnvironment() (autorest.Authorizer, error) { - settings, err := GetSettingsFromEnvironment() - if err != nil { - return nil, err - } - return settings.GetAuthorizer() -} - -// NewAuthorizerFromEnvironmentWithResource creates an Authorizer configured from environment variables in the order: -// 1. Client credentials -// 2. Client certificate -// 3. Username password -// 4. MSI -func NewAuthorizerFromEnvironmentWithResource(resource string) (autorest.Authorizer, error) { - settings, err := GetSettingsFromEnvironment() - if err != nil { - return nil, err - } - settings.Values[Resource] = resource - return settings.GetAuthorizer() -} - -// EnvironmentSettings contains the available authentication settings. -type EnvironmentSettings struct { - Values map[string]string - Environment azure.Environment -} - -// GetSettingsFromEnvironment returns the available authentication settings from the environment. -func GetSettingsFromEnvironment() (s EnvironmentSettings, err error) { - s = EnvironmentSettings{ - Values: map[string]string{}, - } - s.setValue(SubscriptionID) - s.setValue(TenantID) - s.setValue(AuxiliaryTenantIDs) - s.setValue(ClientID) - s.setValue(ClientSecret) - s.setValue(CertificatePath) - s.setValue(CertificatePassword) - s.setValue(Username) - s.setValue(Password) - s.setValue(EnvironmentName) - s.setValue(Resource) - if v := s.Values[EnvironmentName]; v == "" { - s.Environment = azure.PublicCloud - } else { - s.Environment, err = azure.EnvironmentFromName(v) - } - if s.Values[Resource] == "" { - s.Values[Resource] = s.Environment.ResourceManagerEndpoint - } - return -} - -// GetSubscriptionID returns the available subscription ID or an empty string. -func (settings EnvironmentSettings) GetSubscriptionID() string { - return settings.Values[SubscriptionID] -} - -// adds the specified environment variable value to the Values map if it exists -func (settings EnvironmentSettings) setValue(key string) { - if v := os.Getenv(key); v != "" { - settings.Values[key] = v - } -} - -// helper to return client and tenant IDs -func (settings EnvironmentSettings) getClientAndTenant() (string, string) { - clientID := settings.Values[ClientID] - tenantID := settings.Values[TenantID] - return clientID, tenantID -} - -// GetClientCredentials creates a config object from the available client credentials. -// An error is returned if no client credentials are available. -func (settings EnvironmentSettings) GetClientCredentials() (ClientCredentialsConfig, error) { - secret := settings.Values[ClientSecret] - if secret == "" { - return ClientCredentialsConfig{}, errors.New("missing client secret") - } - clientID, tenantID := settings.getClientAndTenant() - config := NewClientCredentialsConfig(clientID, secret, tenantID) - config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint - config.Resource = settings.Values[Resource] - if auxTenants, ok := settings.Values[AuxiliaryTenantIDs]; ok { - config.AuxTenants = strings.Split(auxTenants, ";") - for i := range config.AuxTenants { - config.AuxTenants[i] = strings.TrimSpace(config.AuxTenants[i]) - } - } - return config, nil -} - -// GetClientCertificate creates a config object from the available certificate credentials. -// An error is returned if no certificate credentials are available. -func (settings EnvironmentSettings) GetClientCertificate() (ClientCertificateConfig, error) { - certPath := settings.Values[CertificatePath] - if certPath == "" { - return ClientCertificateConfig{}, errors.New("missing certificate path") - } - certPwd := settings.Values[CertificatePassword] - clientID, tenantID := settings.getClientAndTenant() - config := NewClientCertificateConfig(certPath, certPwd, clientID, tenantID) - config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint - config.Resource = settings.Values[Resource] - return config, nil -} - -// GetUsernamePassword creates a config object from the available username/password credentials. -// An error is returned if no username/password credentials are available. -func (settings EnvironmentSettings) GetUsernamePassword() (UsernamePasswordConfig, error) { - username := settings.Values[Username] - password := settings.Values[Password] - if username == "" || password == "" { - return UsernamePasswordConfig{}, errors.New("missing username/password") - } - clientID, tenantID := settings.getClientAndTenant() - config := NewUsernamePasswordConfig(username, password, clientID, tenantID) - config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint - config.Resource = settings.Values[Resource] - return config, nil -} - -// GetMSI creates a MSI config object from the available client ID. -func (settings EnvironmentSettings) GetMSI() MSIConfig { - config := NewMSIConfig() - config.Resource = settings.Values[Resource] - config.ClientID = settings.Values[ClientID] - return config -} - -// GetDeviceFlow creates a device-flow config object from the available client and tenant IDs. -func (settings EnvironmentSettings) GetDeviceFlow() DeviceFlowConfig { - clientID, tenantID := settings.getClientAndTenant() - config := NewDeviceFlowConfig(clientID, tenantID) - config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint - config.Resource = settings.Values[Resource] - return config -} - -// GetAuthorizer creates an Authorizer configured from environment variables in the order: -// 1. Client credentials -// 2. Client certificate -// 3. Username password -// 4. MSI -func (settings EnvironmentSettings) GetAuthorizer() (autorest.Authorizer, error) { - //1.Client Credentials - if c, e := settings.GetClientCredentials(); e == nil { - return c.Authorizer() - } - - //2. Client Certificate - if c, e := settings.GetClientCertificate(); e == nil { - return c.Authorizer() - } - - //3. Username Password - if c, e := settings.GetUsernamePassword(); e == nil { - return c.Authorizer() - } - - // 4. MSI - return settings.GetMSI().Authorizer() -} - -// NewAuthorizerFromFile creates an Authorizer configured from a configuration file in the following order. -// 1. Client credentials -// 2. Client certificate -func NewAuthorizerFromFile(baseURI string) (autorest.Authorizer, error) { - settings, err := GetSettingsFromFile() - if err != nil { - return nil, err - } - if a, err := settings.ClientCredentialsAuthorizer(baseURI); err == nil { - return a, err - } - if a, err := settings.ClientCertificateAuthorizer(baseURI); err == nil { - return a, err - } - return nil, errors.New("auth file missing client and certificate credentials") -} - -// NewAuthorizerFromFileWithResource creates an Authorizer configured from a configuration file in the following order. -// 1. Client credentials -// 2. Client certificate -func NewAuthorizerFromFileWithResource(resource string) (autorest.Authorizer, error) { - s, err := GetSettingsFromFile() - if err != nil { - return nil, err - } - if a, err := s.ClientCredentialsAuthorizerWithResource(resource); err == nil { - return a, err - } - if a, err := s.ClientCertificateAuthorizerWithResource(resource); err == nil { - return a, err - } - return nil, errors.New("auth file missing client and certificate credentials") -} - -// NewAuthorizerFromCLI creates an Authorizer configured from Azure CLI 2.0 for local development scenarios. -func NewAuthorizerFromCLI() (autorest.Authorizer, error) { - settings, err := GetSettingsFromEnvironment() - if err != nil { - return nil, err - } - - if settings.Values[Resource] == "" { - settings.Values[Resource] = settings.Environment.ResourceManagerEndpoint - } - - return NewAuthorizerFromCLIWithResource(settings.Values[Resource]) -} - -// NewAuthorizerFromCLIWithResource creates an Authorizer configured from Azure CLI 2.0 for local development scenarios. -func NewAuthorizerFromCLIWithResource(resource string) (autorest.Authorizer, error) { - token, err := cli.GetTokenFromCLI(resource) - if err != nil { - return nil, err - } - - adalToken, err := token.ToADALToken() - if err != nil { - return nil, err - } - - return autorest.NewBearerAuthorizer(&adalToken), nil -} - -// GetSettingsFromFile returns the available authentication settings from an Azure CLI authentication file. -func GetSettingsFromFile() (FileSettings, error) { - s := FileSettings{} - fileLocation := os.Getenv("AZURE_AUTH_LOCATION") - if fileLocation == "" { - return s, errors.New("environment variable AZURE_AUTH_LOCATION is not set") - } - - contents, err := ioutil.ReadFile(fileLocation) - if err != nil { - return s, err - } - - // Auth file might be encoded - decoded, err := decode(contents) - if err != nil { - return s, err - } - - authFile := map[string]interface{}{} - err = json.Unmarshal(decoded, &authFile) - if err != nil { - return s, err - } - - s.Values = map[string]string{} - s.setKeyValue(ClientID, authFile["clientId"]) - s.setKeyValue(ClientSecret, authFile["clientSecret"]) - s.setKeyValue(CertificatePath, authFile["clientCertificate"]) - s.setKeyValue(CertificatePassword, authFile["clientCertificatePassword"]) - s.setKeyValue(SubscriptionID, authFile["subscriptionId"]) - s.setKeyValue(TenantID, authFile["tenantId"]) - s.setKeyValue(ActiveDirectoryEndpoint, authFile["activeDirectoryEndpointUrl"]) - s.setKeyValue(ResourceManagerEndpoint, authFile["resourceManagerEndpointUrl"]) - s.setKeyValue(GraphResourceID, authFile["activeDirectoryGraphResourceId"]) - s.setKeyValue(SQLManagementEndpoint, authFile["sqlManagementEndpointUrl"]) - s.setKeyValue(GalleryEndpoint, authFile["galleryEndpointUrl"]) - s.setKeyValue(ManagementEndpoint, authFile["managementEndpointUrl"]) - return s, nil -} - -// FileSettings contains the available authentication settings. -type FileSettings struct { - Values map[string]string -} - -// GetSubscriptionID returns the available subscription ID or an empty string. -func (settings FileSettings) GetSubscriptionID() string { - return settings.Values[SubscriptionID] -} - -// adds the specified value to the Values map if it isn't nil -func (settings FileSettings) setKeyValue(key string, val interface{}) { - if val != nil { - settings.Values[key] = val.(string) - } -} - -// returns the specified AAD endpoint or the public cloud endpoint if unspecified -func (settings FileSettings) getAADEndpoint() string { - if v, ok := settings.Values[ActiveDirectoryEndpoint]; ok { - return v - } - return azure.PublicCloud.ActiveDirectoryEndpoint -} - -// ServicePrincipalTokenFromClientCredentials creates a ServicePrincipalToken from the available client credentials. -func (settings FileSettings) ServicePrincipalTokenFromClientCredentials(baseURI string) (*adal.ServicePrincipalToken, error) { - resource, err := settings.getResourceForToken(baseURI) - if err != nil { - return nil, err - } - return settings.ServicePrincipalTokenFromClientCredentialsWithResource(resource) -} - -// ClientCredentialsAuthorizer creates an authorizer from the available client credentials. -func (settings FileSettings) ClientCredentialsAuthorizer(baseURI string) (autorest.Authorizer, error) { - resource, err := settings.getResourceForToken(baseURI) - if err != nil { - return nil, err - } - return settings.ClientCredentialsAuthorizerWithResource(resource) -} - -// ServicePrincipalTokenFromClientCredentialsWithResource creates a ServicePrincipalToken -// from the available client credentials and the specified resource. -func (settings FileSettings) ServicePrincipalTokenFromClientCredentialsWithResource(resource string) (*adal.ServicePrincipalToken, error) { - if _, ok := settings.Values[ClientSecret]; !ok { - return nil, errors.New("missing client secret") - } - config, err := adal.NewOAuthConfig(settings.getAADEndpoint(), settings.Values[TenantID]) - if err != nil { - return nil, err - } - return adal.NewServicePrincipalToken(*config, settings.Values[ClientID], settings.Values[ClientSecret], resource) -} - -func (settings FileSettings) clientCertificateConfigWithResource(resource string) (ClientCertificateConfig, error) { - if _, ok := settings.Values[CertificatePath]; !ok { - return ClientCertificateConfig{}, errors.New("missing certificate path") - } - cfg := NewClientCertificateConfig(settings.Values[CertificatePath], settings.Values[CertificatePassword], settings.Values[ClientID], settings.Values[TenantID]) - cfg.AADEndpoint = settings.getAADEndpoint() - cfg.Resource = resource - return cfg, nil -} - -// ClientCredentialsAuthorizerWithResource creates an authorizer from the available client credentials and the specified resource. -func (settings FileSettings) ClientCredentialsAuthorizerWithResource(resource string) (autorest.Authorizer, error) { - spToken, err := settings.ServicePrincipalTokenFromClientCredentialsWithResource(resource) - if err != nil { - return nil, err - } - return autorest.NewBearerAuthorizer(spToken), nil -} - -// ServicePrincipalTokenFromClientCertificate creates a ServicePrincipalToken from the available certificate credentials. -func (settings FileSettings) ServicePrincipalTokenFromClientCertificate(baseURI string) (*adal.ServicePrincipalToken, error) { - resource, err := settings.getResourceForToken(baseURI) - if err != nil { - return nil, err - } - return settings.ServicePrincipalTokenFromClientCertificateWithResource(resource) -} - -// ClientCertificateAuthorizer creates an authorizer from the available certificate credentials. -func (settings FileSettings) ClientCertificateAuthorizer(baseURI string) (autorest.Authorizer, error) { - resource, err := settings.getResourceForToken(baseURI) - if err != nil { - return nil, err - } - return settings.ClientCertificateAuthorizerWithResource(resource) -} - -// ServicePrincipalTokenFromClientCertificateWithResource creates a ServicePrincipalToken from the available certificate credentials. -func (settings FileSettings) ServicePrincipalTokenFromClientCertificateWithResource(resource string) (*adal.ServicePrincipalToken, error) { - cfg, err := settings.clientCertificateConfigWithResource(resource) - if err != nil { - return nil, err - } - return cfg.ServicePrincipalToken() -} - -// ClientCertificateAuthorizerWithResource creates an authorizer from the available certificate credentials and the specified resource. -func (settings FileSettings) ClientCertificateAuthorizerWithResource(resource string) (autorest.Authorizer, error) { - cfg, err := settings.clientCertificateConfigWithResource(resource) - if err != nil { - return nil, err - } - return cfg.Authorizer() -} - -func decode(b []byte) ([]byte, error) { - reader, enc := utfbom.Skip(bytes.NewReader(b)) - - switch enc { - case utfbom.UTF16LittleEndian: - u16 := make([]uint16, (len(b)/2)-1) - err := binary.Read(reader, binary.LittleEndian, &u16) - if err != nil { - return nil, err - } - return []byte(string(utf16.Decode(u16))), nil - case utfbom.UTF16BigEndian: - u16 := make([]uint16, (len(b)/2)-1) - err := binary.Read(reader, binary.BigEndian, &u16) - if err != nil { - return nil, err - } - return []byte(string(utf16.Decode(u16))), nil - } - return ioutil.ReadAll(reader) -} - -func (settings FileSettings) getResourceForToken(baseURI string) (string, error) { - // Compare dafault base URI from the SDK to the endpoints from the public cloud - // Base URI and token resource are the same string. This func finds the authentication - // file field that matches the SDK base URI. The SDK defines the public cloud - // endpoint as its default base URI - if !strings.HasSuffix(baseURI, "/") { - baseURI += "/" - } - switch baseURI { - case azure.PublicCloud.ServiceManagementEndpoint: - return settings.Values[ManagementEndpoint], nil - case azure.PublicCloud.ResourceManagerEndpoint: - return settings.Values[ResourceManagerEndpoint], nil - case azure.PublicCloud.ActiveDirectoryEndpoint: - return settings.Values[ActiveDirectoryEndpoint], nil - case azure.PublicCloud.GalleryEndpoint: - return settings.Values[GalleryEndpoint], nil - case azure.PublicCloud.GraphEndpoint: - return settings.Values[GraphResourceID], nil - } - return "", fmt.Errorf("auth: base URI not found in endpoints") -} - -// NewClientCredentialsConfig creates an AuthorizerConfig object configured to obtain an Authorizer through Client Credentials. -// Defaults to Public Cloud and Resource Manager Endpoint. -func NewClientCredentialsConfig(clientID string, clientSecret string, tenantID string) ClientCredentialsConfig { - return ClientCredentialsConfig{ - ClientID: clientID, - ClientSecret: clientSecret, - TenantID: tenantID, - Resource: azure.PublicCloud.ResourceManagerEndpoint, - AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, - } -} - -// NewClientCertificateConfig creates a ClientCertificateConfig object configured to obtain an Authorizer through client certificate. -// Defaults to Public Cloud and Resource Manager Endpoint. -func NewClientCertificateConfig(certificatePath string, certificatePassword string, clientID string, tenantID string) ClientCertificateConfig { - return ClientCertificateConfig{ - CertificatePath: certificatePath, - CertificatePassword: certificatePassword, - ClientID: clientID, - TenantID: tenantID, - Resource: azure.PublicCloud.ResourceManagerEndpoint, - AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, - } -} - -// NewUsernamePasswordConfig creates an UsernamePasswordConfig object configured to obtain an Authorizer through username and password. -// Defaults to Public Cloud and Resource Manager Endpoint. -func NewUsernamePasswordConfig(username string, password string, clientID string, tenantID string) UsernamePasswordConfig { - return UsernamePasswordConfig{ - Username: username, - Password: password, - ClientID: clientID, - TenantID: tenantID, - Resource: azure.PublicCloud.ResourceManagerEndpoint, - AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, - } -} - -// NewMSIConfig creates an MSIConfig object configured to obtain an Authorizer through MSI. -func NewMSIConfig() MSIConfig { - return MSIConfig{ - Resource: azure.PublicCloud.ResourceManagerEndpoint, - } -} - -// NewDeviceFlowConfig creates a DeviceFlowConfig object configured to obtain an Authorizer through device flow. -// Defaults to Public Cloud and Resource Manager Endpoint. -func NewDeviceFlowConfig(clientID string, tenantID string) DeviceFlowConfig { - return DeviceFlowConfig{ - ClientID: clientID, - TenantID: tenantID, - Resource: azure.PublicCloud.ResourceManagerEndpoint, - AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, - } -} - -//AuthorizerConfig provides an authorizer from the configuration provided. -type AuthorizerConfig interface { - Authorizer() (autorest.Authorizer, error) -} - -// ClientCredentialsConfig provides the options to get a bearer authorizer from client credentials. -type ClientCredentialsConfig struct { - ClientID string - ClientSecret string - TenantID string - AuxTenants []string - AADEndpoint string - Resource string -} - -// ServicePrincipalToken creates a ServicePrincipalToken from client credentials. -func (ccc ClientCredentialsConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) { - oauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID) - if err != nil { - return nil, err - } - return adal.NewServicePrincipalToken(*oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource) -} - -// MultiTenantServicePrincipalToken creates a MultiTenantServicePrincipalToken from client credentials. -func (ccc ClientCredentialsConfig) MultiTenantServicePrincipalToken() (*adal.MultiTenantServicePrincipalToken, error) { - oauthConfig, err := adal.NewMultiTenantOAuthConfig(ccc.AADEndpoint, ccc.TenantID, ccc.AuxTenants, adal.OAuthOptions{}) - if err != nil { - return nil, err - } - return adal.NewMultiTenantServicePrincipalToken(oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource) -} - -// Authorizer gets the authorizer from client credentials. -func (ccc ClientCredentialsConfig) Authorizer() (autorest.Authorizer, error) { - if len(ccc.AuxTenants) == 0 { - spToken, err := ccc.ServicePrincipalToken() - if err != nil { - return nil, fmt.Errorf("failed to get SPT from client credentials: %v", err) - } - return autorest.NewBearerAuthorizer(spToken), nil - } - mtSPT, err := ccc.MultiTenantServicePrincipalToken() - if err != nil { - return nil, fmt.Errorf("failed to get multitenant SPT from client credentials: %v", err) - } - return autorest.NewMultiTenantServicePrincipalTokenAuthorizer(mtSPT), nil -} - -// ClientCertificateConfig provides the options to get a bearer authorizer from a client certificate. -type ClientCertificateConfig struct { - ClientID string - CertificatePath string - CertificatePassword string - TenantID string - AADEndpoint string - Resource string -} - -// ServicePrincipalToken creates a ServicePrincipalToken from client certificate. -func (ccc ClientCertificateConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) { - oauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID) - if err != nil { - return nil, err - } - certData, err := ioutil.ReadFile(ccc.CertificatePath) - if err != nil { - return nil, fmt.Errorf("failed to read the certificate file (%s): %v", ccc.CertificatePath, err) - } - certificate, rsaPrivateKey, err := decodePkcs12(certData, ccc.CertificatePassword) - if err != nil { - return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) - } - return adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, ccc.ClientID, certificate, rsaPrivateKey, ccc.Resource) -} - -// Authorizer gets an authorizer object from client certificate. -func (ccc ClientCertificateConfig) Authorizer() (autorest.Authorizer, error) { - spToken, err := ccc.ServicePrincipalToken() - if err != nil { - return nil, fmt.Errorf("failed to get oauth token from certificate auth: %v", err) - } - return autorest.NewBearerAuthorizer(spToken), nil -} - -// DeviceFlowConfig provides the options to get a bearer authorizer using device flow authentication. -type DeviceFlowConfig struct { - ClientID string - TenantID string - AADEndpoint string - Resource string -} - -// Authorizer gets the authorizer from device flow. -func (dfc DeviceFlowConfig) Authorizer() (autorest.Authorizer, error) { - spToken, err := dfc.ServicePrincipalToken() - if err != nil { - return nil, fmt.Errorf("failed to get oauth token from device flow: %v", err) - } - return autorest.NewBearerAuthorizer(spToken), nil -} - -// ServicePrincipalToken gets the service principal token from device flow. -func (dfc DeviceFlowConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) { - oauthConfig, err := adal.NewOAuthConfig(dfc.AADEndpoint, dfc.TenantID) - if err != nil { - return nil, err - } - oauthClient := &autorest.Client{} - deviceCode, err := adal.InitiateDeviceAuth(oauthClient, *oauthConfig, dfc.ClientID, dfc.Resource) - if err != nil { - return nil, fmt.Errorf("failed to start device auth flow: %s", err) - } - log.Println(*deviceCode.Message) - token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) - if err != nil { - return nil, fmt.Errorf("failed to finish device auth flow: %s", err) - } - return adal.NewServicePrincipalTokenFromManualToken(*oauthConfig, dfc.ClientID, dfc.Resource, *token) -} - -func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { - privateKey, certificate, err := pkcs12.Decode(pkcs, password) - if err != nil { - return nil, nil, err - } - - rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey) - if !isRsaKey { - return nil, nil, fmt.Errorf("PKCS#12 certificate must contain an RSA private key") - } - - return certificate, rsaPrivateKey, nil -} - -// UsernamePasswordConfig provides the options to get a bearer authorizer from a username and a password. -type UsernamePasswordConfig struct { - ClientID string - Username string - Password string - TenantID string - AADEndpoint string - Resource string -} - -// ServicePrincipalToken creates a ServicePrincipalToken from username and password. -func (ups UsernamePasswordConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) { - oauthConfig, err := adal.NewOAuthConfig(ups.AADEndpoint, ups.TenantID) - if err != nil { - return nil, err - } - return adal.NewServicePrincipalTokenFromUsernamePassword(*oauthConfig, ups.ClientID, ups.Username, ups.Password, ups.Resource) -} - -// Authorizer gets the authorizer from a username and a password. -func (ups UsernamePasswordConfig) Authorizer() (autorest.Authorizer, error) { - spToken, err := ups.ServicePrincipalToken() - if err != nil { - return nil, fmt.Errorf("failed to get oauth token from username and password auth: %v", err) - } - return autorest.NewBearerAuthorizer(spToken), nil -} - -// MSIConfig provides the options to get a bearer authorizer through MSI. -type MSIConfig struct { - Resource string - ClientID string -} - -// Authorizer gets the authorizer from MSI. -func (mc MSIConfig) Authorizer() (autorest.Authorizer, error) { - msiEndpoint, err := adal.GetMSIEndpoint() - if err != nil { - return nil, err - } - - var spToken *adal.ServicePrincipalToken - if mc.ClientID == "" { - spToken, err = adal.NewServicePrincipalTokenFromMSI(msiEndpoint, mc.Resource) - if err != nil { - return nil, fmt.Errorf("failed to get oauth token from MSI: %v", err) - } - } else { - spToken, err = adal.NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, mc.Resource, mc.ClientID) - if err != nil { - return nil, fmt.Errorf("failed to get oauth token from MSI for user assigned identity: %v", err) - } - } - - return autorest.NewBearerAuthorizer(spToken), nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go deleted file mode 100644 index 2f09cd177..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build modhack - -package auth - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of -// the resultant binary. - -// Necessary for safely adding multi-module repo. -// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest/autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go deleted file mode 100644 index 3a0a439ff..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go +++ /dev/null @@ -1,326 +0,0 @@ -// Package azure provides Azure-specific implementations used with AutoRest. -// See the included examples for more detail. -package azure - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "regexp" - "strconv" - "strings" - - "github.com/Azure/go-autorest/autorest" -) - -const ( - // HeaderClientID is the Azure extension header to set a user-specified request ID. - HeaderClientID = "x-ms-client-request-id" - - // HeaderReturnClientID is the Azure extension header to set if the user-specified request ID - // should be included in the response. - HeaderReturnClientID = "x-ms-return-client-request-id" - - // HeaderRequestID is the Azure extension header of the service generated request ID returned - // in the response. - HeaderRequestID = "x-ms-request-id" -) - -// ServiceError encapsulates the error response from an Azure service. -// It adhears to the OData v4 specification for error responses. -type ServiceError struct { - Code string `json:"code"` - Message string `json:"message"` - Target *string `json:"target"` - Details []map[string]interface{} `json:"details"` - InnerError map[string]interface{} `json:"innererror"` - AdditionalInfo []map[string]interface{} `json:"additionalInfo"` -} - -func (se ServiceError) Error() string { - result := fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) - - if se.Target != nil { - result += fmt.Sprintf(" Target=%q", *se.Target) - } - - if se.Details != nil { - d, err := json.Marshal(se.Details) - if err != nil { - result += fmt.Sprintf(" Details=%v", se.Details) - } - result += fmt.Sprintf(" Details=%v", string(d)) - } - - if se.InnerError != nil { - d, err := json.Marshal(se.InnerError) - if err != nil { - result += fmt.Sprintf(" InnerError=%v", se.InnerError) - } - result += fmt.Sprintf(" InnerError=%v", string(d)) - } - - if se.AdditionalInfo != nil { - d, err := json.Marshal(se.AdditionalInfo) - if err != nil { - result += fmt.Sprintf(" AdditionalInfo=%v", se.AdditionalInfo) - } - result += fmt.Sprintf(" AdditionalInfo=%v", string(d)) - } - - return result -} - -// UnmarshalJSON implements the json.Unmarshaler interface for the ServiceError type. -func (se *ServiceError) UnmarshalJSON(b []byte) error { - // per the OData v4 spec the details field must be an array of JSON objects. - // unfortunately not all services adhear to the spec and just return a single - // object instead of an array with one object. so we have to perform some - // shenanigans to accommodate both cases. - // http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091 - - type serviceError1 struct { - Code string `json:"code"` - Message string `json:"message"` - Target *string `json:"target"` - Details []map[string]interface{} `json:"details"` - InnerError map[string]interface{} `json:"innererror"` - AdditionalInfo []map[string]interface{} `json:"additionalInfo"` - } - - type serviceError2 struct { - Code string `json:"code"` - Message string `json:"message"` - Target *string `json:"target"` - Details map[string]interface{} `json:"details"` - InnerError map[string]interface{} `json:"innererror"` - AdditionalInfo []map[string]interface{} `json:"additionalInfo"` - } - - se1 := serviceError1{} - err := json.Unmarshal(b, &se1) - if err == nil { - se.populate(se1.Code, se1.Message, se1.Target, se1.Details, se1.InnerError, se1.AdditionalInfo) - return nil - } - - se2 := serviceError2{} - err = json.Unmarshal(b, &se2) - if err == nil { - se.populate(se2.Code, se2.Message, se2.Target, nil, se2.InnerError, se2.AdditionalInfo) - se.Details = append(se.Details, se2.Details) - return nil - } - return err -} - -func (se *ServiceError) populate(code, message string, target *string, details []map[string]interface{}, inner map[string]interface{}, additional []map[string]interface{}) { - se.Code = code - se.Message = message - se.Target = target - se.Details = details - se.InnerError = inner - se.AdditionalInfo = additional -} - -// RequestError describes an error response returned by Azure service. -type RequestError struct { - autorest.DetailedError - - // The error returned by the Azure service. - ServiceError *ServiceError `json:"error"` - - // The request id (from the x-ms-request-id-header) of the request. - RequestID string -} - -// Error returns a human-friendly error message from service error. -func (e RequestError) Error() string { - return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v", - e.StatusCode, e.ServiceError) -} - -// IsAzureError returns true if the passed error is an Azure Service error; false otherwise. -func IsAzureError(e error) bool { - _, ok := e.(*RequestError) - return ok -} - -// Resource contains details about an Azure resource. -type Resource struct { - SubscriptionID string - ResourceGroup string - Provider string - ResourceType string - ResourceName string -} - -// ParseResourceID parses a resource ID into a ResourceDetails struct. -// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-template-functions-resource#return-value-4. -func ParseResourceID(resourceID string) (Resource, error) { - - const resourceIDPatternText = `(?i)subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)` - resourceIDPattern := regexp.MustCompile(resourceIDPatternText) - match := resourceIDPattern.FindStringSubmatch(resourceID) - - if len(match) == 0 { - return Resource{}, fmt.Errorf("parsing failed for %s. Invalid resource Id format", resourceID) - } - - v := strings.Split(match[5], "/") - resourceName := v[len(v)-1] - - result := Resource{ - SubscriptionID: match[1], - ResourceGroup: match[2], - Provider: match[3], - ResourceType: match[4], - ResourceName: resourceName, - } - - return result, nil -} - -// NewErrorWithError creates a new Error conforming object from the -// passed packageType, method, statusCode of the given resp (UndefinedStatusCode -// if resp is nil), message, and original error. message is treated as a format -// string to which the optional args apply. -func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError { - if v, ok := original.(*RequestError); ok { - return *v - } - - statusCode := autorest.UndefinedStatusCode - if resp != nil { - statusCode = resp.StatusCode - } - return RequestError{ - DetailedError: autorest.DetailedError{ - Original: original, - PackageType: packageType, - Method: method, - StatusCode: statusCode, - Message: fmt.Sprintf(message, args...), - }, - } -} - -// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of -// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g., -// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id -// header to true such that UUID accompanies the http.Response. -func WithReturningClientID(uuid string) autorest.PrepareDecorator { - preparer := autorest.CreatePreparer( - WithClientID(uuid), - WithReturnClientID(true)) - - return func(p autorest.Preparer) autorest.Preparer { - return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err != nil { - return r, err - } - return preparer.Prepare(r) - }) - } -} - -// WithClientID returns a PrepareDecorator that adds an HTTP extension header of -// x-ms-client-request-id whose value is passed, undecorated UUID (e.g., -// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). -func WithClientID(uuid string) autorest.PrepareDecorator { - return autorest.WithHeader(HeaderClientID, uuid) -} - -// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of -// x-ms-return-client-request-id whose boolean value indicates if the value of the -// x-ms-client-request-id header should be included in the http.Response. -func WithReturnClientID(b bool) autorest.PrepareDecorator { - return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b)) -} - -// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the -// http.Request sent to the service (and returned in the http.Response) -func ExtractClientID(resp *http.Response) string { - return autorest.ExtractHeaderValue(HeaderClientID, resp) -} - -// ExtractRequestID extracts the Azure server generated request identifier from the -// x-ms-request-id header. -func ExtractRequestID(resp *http.Response) string { - return autorest.ExtractHeaderValue(HeaderRequestID, resp) -} - -// WithErrorUnlessStatusCode returns a RespondDecorator that emits an -// azure.RequestError by reading the response body unless the response HTTP status code -// is among the set passed. -// -// If there is a chance service may return responses other than the Azure error -// format and the response cannot be parsed into an error, a decoding error will -// be returned containing the response body. In any case, the Responder will -// return an error if the status code is not satisfied. -// -// If this Responder returns an error, the response body will be replaced with -// an in-memory reader, which needs no further closing. -func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { - return func(r autorest.Responder) autorest.Responder { - return autorest.ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) { - var e RequestError - defer resp.Body.Close() - - // Copy and replace the Body in case it does not contain an error object. - // This will leave the Body available to the caller. - b, decodeErr := autorest.CopyAndDecode(autorest.EncodedAsJSON, resp.Body, &e) - resp.Body = ioutil.NopCloser(&b) - if decodeErr != nil { - return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr) - } - if e.ServiceError == nil { - // Check if error is unwrapped ServiceError - if err := json.Unmarshal(b.Bytes(), &e.ServiceError); err != nil { - return err - } - } - if e.ServiceError.Message == "" { - // if we're here it means the returned error wasn't OData v4 compliant. - // try to unmarshal the body as raw JSON in hopes of getting something. - rawBody := map[string]interface{}{} - if err := json.Unmarshal(b.Bytes(), &rawBody); err != nil { - return err - } - e.ServiceError = &ServiceError{ - Code: "Unknown", - Message: "Unknown service error", - } - if len(rawBody) > 0 { - e.ServiceError.Details = []map[string]interface{}{rawBody} - } - } - e.Response = resp - e.RequestID = ExtractRequestID(resp) - if e.StatusCode == nil { - e.StatusCode = resp.StatusCode - } - err = &e - } - return err - }) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/LICENSE deleted file mode 100644 index b9d6a27ea..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go deleted file mode 100644 index 618bed392..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build modhack - -package cli - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of -// the resultant binary. - -// Necessary for safely adding multi-module repo. -// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest/autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go deleted file mode 100644 index a336b958d..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go +++ /dev/null @@ -1,79 +0,0 @@ -package cli - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/dimchansky/utfbom" - "github.com/mitchellh/go-homedir" -) - -// Profile represents a Profile from the Azure CLI -type Profile struct { - InstallationID string `json:"installationId"` - Subscriptions []Subscription `json:"subscriptions"` -} - -// Subscription represents a Subscription from the Azure CLI -type Subscription struct { - EnvironmentName string `json:"environmentName"` - ID string `json:"id"` - IsDefault bool `json:"isDefault"` - Name string `json:"name"` - State string `json:"state"` - TenantID string `json:"tenantId"` - User *User `json:"user"` -} - -// User represents a User from the Azure CLI -type User struct { - Name string `json:"name"` - Type string `json:"type"` -} - -const azureProfileJSON = "azureProfile.json" - -// ProfilePath returns the path where the Azure Profile is stored from the Azure CLI -func ProfilePath() (string, error) { - if cfgDir := os.Getenv("AZURE_CONFIG_DIR"); cfgDir != "" { - return filepath.Join(cfgDir, azureProfileJSON), nil - } - return homedir.Expand("~/.azure/" + azureProfileJSON) -} - -// LoadProfile restores a Profile object from a file located at 'path'. -func LoadProfile(path string) (result Profile, err error) { - var contents []byte - contents, err = ioutil.ReadFile(path) - if err != nil { - err = fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) - return - } - reader := utfbom.SkipOnly(bytes.NewReader(contents)) - - dec := json.NewDecoder(reader) - if err = dec.Decode(&result); err != nil { - err = fmt.Errorf("failed to decode contents of file (%s) into a Profile representation: %v", path, err) - return - } - - return -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go deleted file mode 100644 index 810075ba6..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go +++ /dev/null @@ -1,170 +0,0 @@ -package cli - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "fmt" - "os" - "os/exec" - "regexp" - "runtime" - "strconv" - "time" - - "github.com/Azure/go-autorest/autorest/adal" - "github.com/Azure/go-autorest/autorest/date" - "github.com/mitchellh/go-homedir" -) - -// Token represents an AccessToken from the Azure CLI -type Token struct { - AccessToken string `json:"accessToken"` - Authority string `json:"_authority"` - ClientID string `json:"_clientId"` - ExpiresOn string `json:"expiresOn"` - IdentityProvider string `json:"identityProvider"` - IsMRRT bool `json:"isMRRT"` - RefreshToken string `json:"refreshToken"` - Resource string `json:"resource"` - TokenType string `json:"tokenType"` - UserID string `json:"userId"` -} - -// ToADALToken converts an Azure CLI `Token`` to an `adal.Token`` -func (t Token) ToADALToken() (converted adal.Token, err error) { - tokenExpirationDate, err := ParseExpirationDate(t.ExpiresOn) - if err != nil { - err = fmt.Errorf("Error parsing Token Expiration Date %q: %+v", t.ExpiresOn, err) - return - } - - difference := tokenExpirationDate.Sub(date.UnixEpoch()) - - converted = adal.Token{ - AccessToken: t.AccessToken, - Type: t.TokenType, - ExpiresIn: "3600", - ExpiresOn: json.Number(strconv.Itoa(int(difference.Seconds()))), - RefreshToken: t.RefreshToken, - Resource: t.Resource, - } - return -} - -// AccessTokensPath returns the path where access tokens are stored from the Azure CLI -// TODO(#199): add unit test. -func AccessTokensPath() (string, error) { - // Azure-CLI allows user to customize the path of access tokens thorugh environment variable. - var accessTokenPath = os.Getenv("AZURE_ACCESS_TOKEN_FILE") - var err error - - // Fallback logic to default path on non-cloud-shell environment. - // TODO(#200): remove the dependency on hard-coding path. - if accessTokenPath == "" { - accessTokenPath, err = homedir.Expand("~/.azure/accessTokens.json") - } - - return accessTokenPath, err -} - -// ParseExpirationDate parses either a Azure CLI or CloudShell date into a time object -func ParseExpirationDate(input string) (*time.Time, error) { - // CloudShell (and potentially the Azure CLI in future) - expirationDate, cloudShellErr := time.Parse(time.RFC3339, input) - if cloudShellErr != nil { - // Azure CLI (Python) e.g. 2017-08-31 19:48:57.998857 (plus the local timezone) - const cliFormat = "2006-01-02 15:04:05.999999" - expirationDate, cliErr := time.ParseInLocation(cliFormat, input, time.Local) - if cliErr == nil { - return &expirationDate, nil - } - - return nil, fmt.Errorf("Error parsing expiration date %q.\n\nCloudShell Error: \n%+v\n\nCLI Error:\n%+v", input, cloudShellErr, cliErr) - } - - return &expirationDate, nil -} - -// LoadTokens restores a set of Token objects from a file located at 'path'. -func LoadTokens(path string) ([]Token, error) { - file, err := os.Open(path) - if err != nil { - return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) - } - defer file.Close() - - var tokens []Token - - dec := json.NewDecoder(file) - if err = dec.Decode(&tokens); err != nil { - return nil, fmt.Errorf("failed to decode contents of file (%s) into a `cli.Token` representation: %v", path, err) - } - - return tokens, nil -} - -// GetTokenFromCLI gets a token using Azure CLI 2.0 for local development scenarios. -func GetTokenFromCLI(resource string) (*Token, error) { - // This is the path that a developer can set to tell this class what the install path for Azure CLI is. - const azureCLIPath = "AzureCLIPath" - - // The default install paths are used to find Azure CLI. This is for security, so that any path in the calling program's Path environment is not used to execute Azure CLI. - azureCLIDefaultPathWindows := fmt.Sprintf("%s\\Microsoft SDKs\\Azure\\CLI2\\wbin; %s\\Microsoft SDKs\\Azure\\CLI2\\wbin", os.Getenv("ProgramFiles(x86)"), os.Getenv("ProgramFiles")) - - // Default path for non-Windows. - const azureCLIDefaultPath = "/bin:/sbin:/usr/bin:/usr/local/bin" - - // Validate resource, since it gets sent as a command line argument to Azure CLI - const invalidResourceErrorTemplate = "Resource %s is not in expected format. Only alphanumeric characters, [dot], [colon], [hyphen], and [forward slash] are allowed." - match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", resource) - if err != nil { - return nil, err - } - if !match { - return nil, fmt.Errorf(invalidResourceErrorTemplate, resource) - } - - // Execute Azure CLI to get token - var cliCmd *exec.Cmd - if runtime.GOOS == "windows" { - cliCmd = exec.Command(fmt.Sprintf("%s\\system32\\cmd.exe", os.Getenv("windir"))) - cliCmd.Env = os.Environ() - cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s;%s", os.Getenv(azureCLIPath), azureCLIDefaultPathWindows)) - cliCmd.Args = append(cliCmd.Args, "/c", "az") - } else { - cliCmd = exec.Command("az") - cliCmd.Env = os.Environ() - cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s:%s", os.Getenv(azureCLIPath), azureCLIDefaultPath)) - } - cliCmd.Args = append(cliCmd.Args, "account", "get-access-token", "-o", "json", "--resource", resource) - - var stderr bytes.Buffer - cliCmd.Stderr = &stderr - - output, err := cliCmd.Output() - if err != nil { - return nil, fmt.Errorf("Invoking Azure CLI failed with the following error: %s", stderr.String()) - } - - tokenResponse := Token{} - err = json.Unmarshal(output, &tokenResponse) - if err != nil { - return nil, err - } - - return &tokenResponse, err -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go deleted file mode 100644 index 6c20b8179..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go +++ /dev/null @@ -1,244 +0,0 @@ -package azure - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "strings" -) - -const ( - // EnvironmentFilepathName captures the name of the environment variable containing the path to the file - // to be used while populating the Azure Environment. - EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH" - - // NotAvailable is used for endpoints and resource IDs that are not available for a given cloud. - NotAvailable = "N/A" -) - -var environments = map[string]Environment{ - "AZURECHINACLOUD": ChinaCloud, - "AZUREGERMANCLOUD": GermanCloud, - "AZUREPUBLICCLOUD": PublicCloud, - "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, -} - -// ResourceIdentifier contains a set of Azure resource IDs. -type ResourceIdentifier struct { - Graph string `json:"graph"` - KeyVault string `json:"keyVault"` - Datalake string `json:"datalake"` - Batch string `json:"batch"` - OperationalInsights string `json:"operationalInsights"` - Storage string `json:"storage"` -} - -// Environment represents a set of endpoints for each of Azure's Clouds. -type Environment struct { - Name string `json:"name"` - ManagementPortalURL string `json:"managementPortalURL"` - PublishSettingsURL string `json:"publishSettingsURL"` - ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` - ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` - ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` - GalleryEndpoint string `json:"galleryEndpoint"` - KeyVaultEndpoint string `json:"keyVaultEndpoint"` - GraphEndpoint string `json:"graphEndpoint"` - ServiceBusEndpoint string `json:"serviceBusEndpoint"` - BatchManagementEndpoint string `json:"batchManagementEndpoint"` - StorageEndpointSuffix string `json:"storageEndpointSuffix"` - SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` - TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` - KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` - ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` - ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` - ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` - ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` - CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"` - TokenAudience string `json:"tokenAudience"` - ResourceIdentifiers ResourceIdentifier `json:"resourceIdentifiers"` -} - -var ( - // PublicCloud is the default public Azure cloud environment - PublicCloud = Environment{ - Name: "AzurePublicCloud", - ManagementPortalURL: "https://manage.windowsazure.com/", - PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.windows.net/", - ResourceManagerEndpoint: "https://management.azure.com/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", - GalleryEndpoint: "https://gallery.azure.com/", - KeyVaultEndpoint: "https://vault.azure.net/", - GraphEndpoint: "https://graph.windows.net/", - ServiceBusEndpoint: "https://servicebus.windows.net/", - BatchManagementEndpoint: "https://batch.core.windows.net/", - StorageEndpointSuffix: "core.windows.net", - SQLDatabaseDNSSuffix: "database.windows.net", - TrafficManagerDNSSuffix: "trafficmanager.net", - KeyVaultDNSSuffix: "vault.azure.net", - ServiceBusEndpointSuffix: "servicebus.windows.net", - ServiceManagementVMDNSSuffix: "cloudapp.net", - ResourceManagerVMDNSSuffix: "cloudapp.azure.com", - ContainerRegistryDNSSuffix: "azurecr.io", - CosmosDBDNSSuffix: "documents.azure.com", - TokenAudience: "https://management.azure.com/", - ResourceIdentifiers: ResourceIdentifier{ - Graph: "https://graph.windows.net/", - KeyVault: "https://vault.azure.net", - Datalake: "https://datalake.azure.net/", - Batch: "https://batch.core.windows.net/", - OperationalInsights: "https://api.loganalytics.io", - Storage: "https://storage.azure.com/", - }, - } - - // USGovernmentCloud is the cloud environment for the US Government - USGovernmentCloud = Environment{ - Name: "AzureUSGovernmentCloud", - ManagementPortalURL: "https://manage.windowsazure.us/", - PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", - ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.us/", - GalleryEndpoint: "https://gallery.usgovcloudapi.net/", - KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", - GraphEndpoint: "https://graph.windows.net/", - ServiceBusEndpoint: "https://servicebus.usgovcloudapi.net/", - BatchManagementEndpoint: "https://batch.core.usgovcloudapi.net/", - StorageEndpointSuffix: "core.usgovcloudapi.net", - SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", - TrafficManagerDNSSuffix: "usgovtrafficmanager.net", - KeyVaultDNSSuffix: "vault.usgovcloudapi.net", - ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", - ServiceManagementVMDNSSuffix: "usgovcloudapp.net", - ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us", - ContainerRegistryDNSSuffix: "azurecr.us", - CosmosDBDNSSuffix: "documents.azure.us", - TokenAudience: "https://management.usgovcloudapi.net/", - ResourceIdentifiers: ResourceIdentifier{ - Graph: "https://graph.windows.net/", - KeyVault: "https://vault.usgovcloudapi.net", - Datalake: NotAvailable, - Batch: "https://batch.core.usgovcloudapi.net/", - OperationalInsights: "https://api.loganalytics.us", - Storage: "https://storage.azure.com/", - }, - } - - // ChinaCloud is the cloud environment operated in China - ChinaCloud = Environment{ - Name: "AzureChinaCloud", - ManagementPortalURL: "https://manage.chinacloudapi.com/", - PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/", - ResourceManagerEndpoint: "https://management.chinacloudapi.cn/", - ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/", - GalleryEndpoint: "https://gallery.chinacloudapi.cn/", - KeyVaultEndpoint: "https://vault.azure.cn/", - GraphEndpoint: "https://graph.chinacloudapi.cn/", - ServiceBusEndpoint: "https://servicebus.chinacloudapi.cn/", - BatchManagementEndpoint: "https://batch.chinacloudapi.cn/", - StorageEndpointSuffix: "core.chinacloudapi.cn", - SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", - TrafficManagerDNSSuffix: "trafficmanager.cn", - KeyVaultDNSSuffix: "vault.azure.cn", - ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn", - ServiceManagementVMDNSSuffix: "chinacloudapp.cn", - ResourceManagerVMDNSSuffix: "cloudapp.azure.cn", - ContainerRegistryDNSSuffix: "azurecr.cn", - CosmosDBDNSSuffix: "documents.azure.cn", - TokenAudience: "https://management.chinacloudapi.cn/", - ResourceIdentifiers: ResourceIdentifier{ - Graph: "https://graph.chinacloudapi.cn/", - KeyVault: "https://vault.azure.cn", - Datalake: NotAvailable, - Batch: "https://batch.chinacloudapi.cn/", - OperationalInsights: NotAvailable, - Storage: "https://storage.azure.com/", - }, - } - - // GermanCloud is the cloud environment operated in Germany - GermanCloud = Environment{ - Name: "AzureGermanCloud", - ManagementPortalURL: "http://portal.microsoftazure.de/", - PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.cloudapi.de/", - ResourceManagerEndpoint: "https://management.microsoftazure.de/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.de/", - GalleryEndpoint: "https://gallery.cloudapi.de/", - KeyVaultEndpoint: "https://vault.microsoftazure.de/", - GraphEndpoint: "https://graph.cloudapi.de/", - ServiceBusEndpoint: "https://servicebus.cloudapi.de/", - BatchManagementEndpoint: "https://batch.cloudapi.de/", - StorageEndpointSuffix: "core.cloudapi.de", - SQLDatabaseDNSSuffix: "database.cloudapi.de", - TrafficManagerDNSSuffix: "azuretrafficmanager.de", - KeyVaultDNSSuffix: "vault.microsoftazure.de", - ServiceBusEndpointSuffix: "servicebus.cloudapi.de", - ServiceManagementVMDNSSuffix: "azurecloudapp.de", - ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", - ContainerRegistryDNSSuffix: NotAvailable, - CosmosDBDNSSuffix: "documents.microsoftazure.de", - TokenAudience: "https://management.microsoftazure.de/", - ResourceIdentifiers: ResourceIdentifier{ - Graph: "https://graph.cloudapi.de/", - KeyVault: "https://vault.microsoftazure.de", - Datalake: NotAvailable, - Batch: "https://batch.cloudapi.de/", - OperationalInsights: NotAvailable, - Storage: "https://storage.azure.com/", - }, - } -) - -// EnvironmentFromName returns an Environment based on the common name specified. -func EnvironmentFromName(name string) (Environment, error) { - // IMPORTANT - // As per @radhikagupta5: - // This is technical debt, fundamentally here because Kubernetes is not currently accepting - // contributions to the providers. Once that is an option, the provider should be updated to - // directly call `EnvironmentFromFile`. Until then, we rely on dispatching Azure Stack environment creation - // from this method based on the name that is provided to us. - if strings.EqualFold(name, "AZURESTACKCLOUD") { - return EnvironmentFromFile(os.Getenv(EnvironmentFilepathName)) - } - - name = strings.ToUpper(name) - env, ok := environments[name] - if !ok { - return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name) - } - - return env, nil -} - -// EnvironmentFromFile loads an Environment from a configuration file available on disk. -// This function is particularly useful in the Hybrid Cloud model, where one must define their own -// endpoints. -func EnvironmentFromFile(location string) (unmarshaled Environment, err error) { - fileContents, err := ioutil.ReadFile(location) - if err != nil { - return - } - - err = json.Unmarshal(fileContents, &unmarshaled) - - return -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go deleted file mode 100644 index 507f9e95c..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go +++ /dev/null @@ -1,245 +0,0 @@ -package azure - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - - "github.com/Azure/go-autorest/autorest" -) - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -type audience []string - -type authentication struct { - LoginEndpoint string `json:"loginEndpoint"` - Audiences audience `json:"audiences"` -} - -type environmentMetadataInfo struct { - GalleryEndpoint string `json:"galleryEndpoint"` - GraphEndpoint string `json:"graphEndpoint"` - PortalEndpoint string `json:"portalEndpoint"` - Authentication authentication `json:"authentication"` -} - -// EnvironmentProperty represent property names that clients can override -type EnvironmentProperty string - -const ( - // EnvironmentName ... - EnvironmentName EnvironmentProperty = "name" - // EnvironmentManagementPortalURL .. - EnvironmentManagementPortalURL EnvironmentProperty = "managementPortalURL" - // EnvironmentPublishSettingsURL ... - EnvironmentPublishSettingsURL EnvironmentProperty = "publishSettingsURL" - // EnvironmentServiceManagementEndpoint ... - EnvironmentServiceManagementEndpoint EnvironmentProperty = "serviceManagementEndpoint" - // EnvironmentResourceManagerEndpoint ... - EnvironmentResourceManagerEndpoint EnvironmentProperty = "resourceManagerEndpoint" - // EnvironmentActiveDirectoryEndpoint ... - EnvironmentActiveDirectoryEndpoint EnvironmentProperty = "activeDirectoryEndpoint" - // EnvironmentGalleryEndpoint ... - EnvironmentGalleryEndpoint EnvironmentProperty = "galleryEndpoint" - // EnvironmentKeyVaultEndpoint ... - EnvironmentKeyVaultEndpoint EnvironmentProperty = "keyVaultEndpoint" - // EnvironmentGraphEndpoint ... - EnvironmentGraphEndpoint EnvironmentProperty = "graphEndpoint" - // EnvironmentServiceBusEndpoint ... - EnvironmentServiceBusEndpoint EnvironmentProperty = "serviceBusEndpoint" - // EnvironmentBatchManagementEndpoint ... - EnvironmentBatchManagementEndpoint EnvironmentProperty = "batchManagementEndpoint" - // EnvironmentStorageEndpointSuffix ... - EnvironmentStorageEndpointSuffix EnvironmentProperty = "storageEndpointSuffix" - // EnvironmentSQLDatabaseDNSSuffix ... - EnvironmentSQLDatabaseDNSSuffix EnvironmentProperty = "sqlDatabaseDNSSuffix" - // EnvironmentTrafficManagerDNSSuffix ... - EnvironmentTrafficManagerDNSSuffix EnvironmentProperty = "trafficManagerDNSSuffix" - // EnvironmentKeyVaultDNSSuffix ... - EnvironmentKeyVaultDNSSuffix EnvironmentProperty = "keyVaultDNSSuffix" - // EnvironmentServiceBusEndpointSuffix ... - EnvironmentServiceBusEndpointSuffix EnvironmentProperty = "serviceBusEndpointSuffix" - // EnvironmentServiceManagementVMDNSSuffix ... - EnvironmentServiceManagementVMDNSSuffix EnvironmentProperty = "serviceManagementVMDNSSuffix" - // EnvironmentResourceManagerVMDNSSuffix ... - EnvironmentResourceManagerVMDNSSuffix EnvironmentProperty = "resourceManagerVMDNSSuffix" - // EnvironmentContainerRegistryDNSSuffix ... - EnvironmentContainerRegistryDNSSuffix EnvironmentProperty = "containerRegistryDNSSuffix" - // EnvironmentTokenAudience ... - EnvironmentTokenAudience EnvironmentProperty = "tokenAudience" -) - -// OverrideProperty represents property name and value that clients can override -type OverrideProperty struct { - Key EnvironmentProperty - Value string -} - -// EnvironmentFromURL loads an Environment from a URL -// This function is particularly useful in the Hybrid Cloud model, where one may define their own -// endpoints. -func EnvironmentFromURL(resourceManagerEndpoint string, properties ...OverrideProperty) (environment Environment, err error) { - var metadataEnvProperties environmentMetadataInfo - - if resourceManagerEndpoint == "" { - return environment, fmt.Errorf("Metadata resource manager endpoint is empty") - } - - if metadataEnvProperties, err = retrieveMetadataEnvironment(resourceManagerEndpoint); err != nil { - return environment, err - } - - // Give priority to user's override values - overrideProperties(&environment, properties) - - if environment.Name == "" { - environment.Name = "HybridEnvironment" - } - stampDNSSuffix := environment.StorageEndpointSuffix - if stampDNSSuffix == "" { - stampDNSSuffix = strings.TrimSuffix(strings.TrimPrefix(strings.Replace(resourceManagerEndpoint, strings.Split(resourceManagerEndpoint, ".")[0], "", 1), "."), "/") - environment.StorageEndpointSuffix = stampDNSSuffix - } - if environment.KeyVaultDNSSuffix == "" { - environment.KeyVaultDNSSuffix = fmt.Sprintf("%s.%s", "vault", stampDNSSuffix) - } - if environment.KeyVaultEndpoint == "" { - environment.KeyVaultEndpoint = fmt.Sprintf("%s%s", "https://", environment.KeyVaultDNSSuffix) - } - if environment.TokenAudience == "" { - environment.TokenAudience = metadataEnvProperties.Authentication.Audiences[0] - } - if environment.ActiveDirectoryEndpoint == "" { - environment.ActiveDirectoryEndpoint = metadataEnvProperties.Authentication.LoginEndpoint - } - if environment.ResourceManagerEndpoint == "" { - environment.ResourceManagerEndpoint = resourceManagerEndpoint - } - if environment.GalleryEndpoint == "" { - environment.GalleryEndpoint = metadataEnvProperties.GalleryEndpoint - } - if environment.GraphEndpoint == "" { - environment.GraphEndpoint = metadataEnvProperties.GraphEndpoint - } - - return environment, nil -} - -func overrideProperties(environment *Environment, properties []OverrideProperty) { - for _, property := range properties { - switch property.Key { - case EnvironmentName: - { - environment.Name = property.Value - } - case EnvironmentManagementPortalURL: - { - environment.ManagementPortalURL = property.Value - } - case EnvironmentPublishSettingsURL: - { - environment.PublishSettingsURL = property.Value - } - case EnvironmentServiceManagementEndpoint: - { - environment.ServiceManagementEndpoint = property.Value - } - case EnvironmentResourceManagerEndpoint: - { - environment.ResourceManagerEndpoint = property.Value - } - case EnvironmentActiveDirectoryEndpoint: - { - environment.ActiveDirectoryEndpoint = property.Value - } - case EnvironmentGalleryEndpoint: - { - environment.GalleryEndpoint = property.Value - } - case EnvironmentKeyVaultEndpoint: - { - environment.KeyVaultEndpoint = property.Value - } - case EnvironmentGraphEndpoint: - { - environment.GraphEndpoint = property.Value - } - case EnvironmentServiceBusEndpoint: - { - environment.ServiceBusEndpoint = property.Value - } - case EnvironmentBatchManagementEndpoint: - { - environment.BatchManagementEndpoint = property.Value - } - case EnvironmentStorageEndpointSuffix: - { - environment.StorageEndpointSuffix = property.Value - } - case EnvironmentSQLDatabaseDNSSuffix: - { - environment.SQLDatabaseDNSSuffix = property.Value - } - case EnvironmentTrafficManagerDNSSuffix: - { - environment.TrafficManagerDNSSuffix = property.Value - } - case EnvironmentKeyVaultDNSSuffix: - { - environment.KeyVaultDNSSuffix = property.Value - } - case EnvironmentServiceBusEndpointSuffix: - { - environment.ServiceBusEndpointSuffix = property.Value - } - case EnvironmentServiceManagementVMDNSSuffix: - { - environment.ServiceManagementVMDNSSuffix = property.Value - } - case EnvironmentResourceManagerVMDNSSuffix: - { - environment.ResourceManagerVMDNSSuffix = property.Value - } - case EnvironmentContainerRegistryDNSSuffix: - { - environment.ContainerRegistryDNSSuffix = property.Value - } - case EnvironmentTokenAudience: - { - environment.TokenAudience = property.Value - } - } - } -} - -func retrieveMetadataEnvironment(endpoint string) (environment environmentMetadataInfo, err error) { - client := autorest.NewClientWithUserAgent("") - managementEndpoint := fmt.Sprintf("%s%s", strings.TrimSuffix(endpoint, "/"), "/metadata/endpoints?api-version=1.0") - req, _ := http.NewRequest("GET", managementEndpoint, nil) - response, err := client.Do(req) - if err != nil { - return environment, err - } - defer response.Body.Close() - jsonResponse, err := ioutil.ReadAll(response.Body) - if err != nil { - return environment, err - } - err = json.Unmarshal(jsonResponse, &environment) - return environment, err -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go deleted file mode 100644 index 86ce9f2b5..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package azure - -import ( - "errors" - "fmt" - "net/http" - "net/url" - "strings" - "time" - - "github.com/Azure/go-autorest/autorest" -) - -// DoRetryWithRegistration tries to register the resource provider in case it is unregistered. -// It also handles request retries -func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - rr := autorest.NewRetriableRequest(r) - for currentAttempt := 0; currentAttempt < client.RetryAttempts; currentAttempt++ { - err = rr.Prepare() - if err != nil { - return resp, err - } - - resp, err = autorest.SendWithSender(s, rr.Request(), - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), - ) - if err != nil { - return resp, err - } - - if resp.StatusCode != http.StatusConflict || client.SkipResourceProviderRegistration { - return resp, err - } - var re RequestError - err = autorest.Respond( - resp, - autorest.ByUnmarshallingJSON(&re), - ) - if err != nil { - return resp, err - } - err = re - - if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" { - regErr := register(client, r, re) - if regErr != nil { - return resp, fmt.Errorf("failed auto registering Resource Provider: %s. Original error: %s", regErr, err) - } - } - } - return resp, err - }) - } -} - -func getProvider(re RequestError) (string, error) { - if re.ServiceError != nil && len(re.ServiceError.Details) > 0 { - return re.ServiceError.Details[0]["target"].(string), nil - } - return "", errors.New("provider was not found in the response") -} - -func register(client autorest.Client, originalReq *http.Request, re RequestError) error { - subID := getSubscription(originalReq.URL.Path) - if subID == "" { - return errors.New("missing parameter subscriptionID to register resource provider") - } - providerName, err := getProvider(re) - if err != nil { - return fmt.Errorf("missing parameter provider to register resource provider: %s", err) - } - newURL := url.URL{ - Scheme: originalReq.URL.Scheme, - Host: originalReq.URL.Host, - } - - // taken from the resources SDK - // with almost identical code, this sections are easier to mantain - // It is also not a good idea to import the SDK here - // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L252 - pathParameters := map[string]interface{}{ - "resourceProviderNamespace": autorest.Encode("path", providerName), - "subscriptionId": autorest.Encode("path", subID), - } - - const APIVersion = "2016-09-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithBaseURL(newURL.String()), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters), - autorest.WithQueryParameters(queryParameters), - ) - - req, err := preparer.Prepare(&http.Request{}) - if err != nil { - return err - } - req = req.WithContext(originalReq.Context()) - - resp, err := autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), - ) - if err != nil { - return err - } - - type Provider struct { - RegistrationState *string `json:"registrationState,omitempty"` - } - var provider Provider - - err = autorest.Respond( - resp, - WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&provider), - autorest.ByClosing(), - ) - if err != nil { - return err - } - - // poll for registered provisioning state - registrationStartTime := time.Now() - for err == nil && (client.PollingDuration == 0 || (client.PollingDuration != 0 && time.Since(registrationStartTime) < client.PollingDuration)) { - // taken from the resources SDK - // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45 - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(newURL.String()), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters), - autorest.WithQueryParameters(queryParameters), - ) - req, err = preparer.Prepare(&http.Request{}) - if err != nil { - return err - } - req = req.WithContext(originalReq.Context()) - - resp, err := autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), - ) - if err != nil { - return err - } - - err = autorest.Respond( - resp, - WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&provider), - autorest.ByClosing(), - ) - if err != nil { - return err - } - - if provider.RegistrationState != nil && - *provider.RegistrationState == "Registered" { - break - } - - delayed := autorest.DelayWithRetryAfter(resp, originalReq.Context().Done()) - if !delayed && !autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Context().Done()) { - return originalReq.Context().Err() - } - } - if client.PollingDuration != 0 && !(time.Since(registrationStartTime) < client.PollingDuration) { - return errors.New("polling for resource provider registration has exceeded the polling duration") - } - return err -} - -func getSubscription(path string) string { - parts := strings.Split(path, "/") - for i, v := range parts { - if v == "subscriptions" && (i+1) < len(parts) { - return parts[i+1] - } - } - return "" -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go deleted file mode 100644 index 1c6a0617a..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/client.go +++ /dev/null @@ -1,300 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "crypto/tls" - "fmt" - "io" - "io/ioutil" - "log" - "net/http" - "strings" - "time" - - "github.com/Azure/go-autorest/logger" -) - -const ( - // DefaultPollingDelay is a reasonable delay between polling requests. - DefaultPollingDelay = 60 * time.Second - - // DefaultPollingDuration is a reasonable total polling duration. - DefaultPollingDuration = 15 * time.Minute - - // DefaultRetryAttempts is number of attempts for retry status codes (5xx). - DefaultRetryAttempts = 3 - - // DefaultRetryDuration is the duration to wait between retries. - DefaultRetryDuration = 30 * time.Second -) - -var ( - // StatusCodesForRetry are a defined group of status code for which the client will retry - StatusCodesForRetry = []int{ - http.StatusRequestTimeout, // 408 - http.StatusTooManyRequests, // 429 - http.StatusInternalServerError, // 500 - http.StatusBadGateway, // 502 - http.StatusServiceUnavailable, // 503 - http.StatusGatewayTimeout, // 504 - } -) - -const ( - requestFormat = `HTTP Request Begin =================================================== -%s -===================================================== HTTP Request End -` - responseFormat = `HTTP Response Begin =================================================== -%s -===================================================== HTTP Response End -` -) - -// Response serves as the base for all responses from generated clients. It provides access to the -// last http.Response. -type Response struct { - *http.Response `json:"-"` -} - -// IsHTTPStatus returns true if the returned HTTP status code matches the provided status code. -// If there was no response (i.e. the underlying http.Response is nil) the return value is false. -func (r Response) IsHTTPStatus(statusCode int) bool { - if r.Response == nil { - return false - } - return r.Response.StatusCode == statusCode -} - -// HasHTTPStatus returns true if the returned HTTP status code matches one of the provided status codes. -// If there was no response (i.e. the underlying http.Response is nil) or not status codes are provided -// the return value is false. -func (r Response) HasHTTPStatus(statusCodes ...int) bool { - return ResponseHasStatusCode(r.Response, statusCodes...) -} - -// LoggingInspector implements request and response inspectors that log the full request and -// response to a supplied log. -type LoggingInspector struct { - Logger *log.Logger -} - -// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The -// body is restored after being emitted. -// -// Note: Since it reads the entire Body, this decorator should not be used where body streaming is -// important. It is best used to trace JSON or similar body values. -func (li LoggingInspector) WithInspection() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - var body, b bytes.Buffer - - defer r.Body.Close() - - r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) - if err := r.Write(&b); err != nil { - return nil, fmt.Errorf("Failed to write response: %v", err) - } - - li.Logger.Printf(requestFormat, b.String()) - - r.Body = ioutil.NopCloser(&body) - return p.Prepare(r) - }) - } -} - -// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The -// body is restored after being emitted. -// -// Note: Since it reads the entire Body, this decorator should not be used where body streaming is -// important. It is best used to trace JSON or similar body values. -func (li LoggingInspector) ByInspecting() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - var body, b bytes.Buffer - defer resp.Body.Close() - resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) - if err := resp.Write(&b); err != nil { - return fmt.Errorf("Failed to write response: %v", err) - } - - li.Logger.Printf(responseFormat, b.String()) - - resp.Body = ioutil.NopCloser(&body) - return r.Respond(resp) - }) - } -} - -// Client is the base for autorest generated clients. It provides default, "do nothing" -// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the -// standard, undecorated http.Client as a default Sender. -// -// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and -// return responses that compose with Response. -// -// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom -// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit -// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence -// sending the request by providing a decorated Sender. -type Client struct { - Authorizer Authorizer - Sender Sender - RequestInspector PrepareDecorator - ResponseInspector RespondDecorator - - // PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header - PollingDelay time.Duration - - // PollingDuration sets the maximum polling time after which an error is returned. - // Setting this to zero will use the provided context to control the duration. - PollingDuration time.Duration - - // RetryAttempts sets the default number of retry attempts for client. - RetryAttempts int - - // RetryDuration sets the delay duration for retries. - RetryDuration time.Duration - - // UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent - // through the Do method. - UserAgent string - - Jar http.CookieJar - - // Set to true to skip attempted registration of resource providers (false by default). - SkipResourceProviderRegistration bool -} - -// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed -// string. -func NewClientWithUserAgent(ua string) Client { - return newClient(ua, tls.RenegotiateNever) -} - -// ClientOptions contains various Client configuration options. -type ClientOptions struct { - // UserAgent is an optional user-agent string to append to the default user agent. - UserAgent string - - // Renegotiation is an optional setting to control client-side TLS renegotiation. - Renegotiation tls.RenegotiationSupport -} - -// NewClientWithOptions returns an instance of a Client with the specified values. -func NewClientWithOptions(options ClientOptions) Client { - return newClient(options.UserAgent, options.Renegotiation) -} - -func newClient(ua string, renegotiation tls.RenegotiationSupport) Client { - c := Client{ - PollingDelay: DefaultPollingDelay, - PollingDuration: DefaultPollingDuration, - RetryAttempts: DefaultRetryAttempts, - RetryDuration: DefaultRetryDuration, - UserAgent: UserAgent(), - } - c.Sender = c.sender(renegotiation) - c.AddToUserAgent(ua) - return c -} - -// AddToUserAgent adds an extension to the current user agent -func (c *Client) AddToUserAgent(extension string) error { - if extension != "" { - c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension) - return nil - } - return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent) -} - -// Do implements the Sender interface by invoking the active Sender after applying authorization. -// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent -// is set, apply set the User-Agent header. -func (c Client) Do(r *http.Request) (*http.Response, error) { - if r.UserAgent() == "" { - r, _ = Prepare(r, - WithUserAgent(c.UserAgent)) - } - // NOTE: c.WithInspection() must be last in the list so that it can inspect all preceding operations - r, err := Prepare(r, - c.WithAuthorization(), - c.WithInspection()) - if err != nil { - var resp *http.Response - if detErr, ok := err.(DetailedError); ok { - // if the authorization failed (e.g. invalid credentials) there will - // be a response associated with the error, be sure to return it. - resp = detErr.Response - } - return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") - } - logger.Instance.WriteRequest(r, logger.Filter{ - Header: func(k string, v []string) (bool, []string) { - // remove the auth token from the log - if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "Ocp-Apim-Subscription-Key") { - v = []string{"**REDACTED**"} - } - return true, v - }, - }) - resp, err := SendWithSender(c.sender(tls.RenegotiateNever), r) - logger.Instance.WriteResponse(resp, logger.Filter{}) - Respond(resp, c.ByInspecting()) - return resp, err -} - -// sender returns the Sender to which to send requests. -func (c Client) sender(renengotiation tls.RenegotiationSupport) Sender { - if c.Sender == nil { - return sender(renengotiation) - } - return c.Sender -} - -// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator -// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer. -func (c Client) WithAuthorization() PrepareDecorator { - return c.authorizer().WithAuthorization() -} - -// authorizer returns the Authorizer to use. -func (c Client) authorizer() Authorizer { - if c.Authorizer == nil { - return NullAuthorizer{} - } - return c.Authorizer -} - -// WithInspection is a convenience method that passes the request to the supplied RequestInspector, -// if present, or returns the WithNothing PrepareDecorator otherwise. -func (c Client) WithInspection() PrepareDecorator { - if c.RequestInspector == nil { - return WithNothing() - } - return c.RequestInspector -} - -// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector, -// if present, or returns the ByIgnoring RespondDecorator otherwise. -func (c Client) ByInspecting() RespondDecorator { - if c.ResponseInspector == nil { - return ByIgnoring() - } - return c.ResponseInspector -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE deleted file mode 100644 index b9d6a27ea..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/vendor/github.com/Azure/go-autorest/autorest/date/date.go deleted file mode 100644 index c45710656..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/date.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) -defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of -time.Time types. And both convert to time.Time through a ToTime method. -*/ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "time" -) - -const ( - fullDate = "2006-01-02" - fullDateJSON = `"2006-01-02"` - dateFormat = "%04d-%02d-%02d" - jsonFormat = `"%04d-%02d-%02d"` -) - -// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., -// 2006-01-02). -type Date struct { - time.Time -} - -// ParseDate create a new Date from the passed string. -func ParseDate(date string) (d Date, err error) { - return parseDate(date, fullDate) -} - -func parseDate(date string, format string) (Date, error) { - d, err := time.Parse(format, date) - return Date{Time: d}, err -} - -// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d Date) MarshalBinary() ([]byte, error) { - return d.MarshalText() -} - -// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d *Date) UnmarshalBinary(data []byte) error { - return d.UnmarshalText(data) -} - -// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d Date) MarshalJSON() (json []byte, err error) { - return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil -} - -// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d *Date) UnmarshalJSON(data []byte) (err error) { - d.Time, err = time.Parse(fullDateJSON, string(data)) - return err -} - -// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d Date) MarshalText() (text []byte, err error) { - return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil -} - -// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d *Date) UnmarshalText(data []byte) (err error) { - d.Time, err = time.Parse(fullDate, string(data)) - return err -} - -// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). -func (d Date) String() string { - return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) -} - -// ToTime returns a Date as a time.Time -func (d Date) ToTime() time.Time { - return d.Time -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go deleted file mode 100644 index 55adf930f..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build modhack - -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of -// the resultant binary. - -// Necessary for safely adding multi-module repo. -// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest/autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/vendor/github.com/Azure/go-autorest/autorest/date/time.go deleted file mode 100644 index b453fad04..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/time.go +++ /dev/null @@ -1,103 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "regexp" - "time" -) - -// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. -const ( - azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"` - azureUtcFormat = "2006-01-02T15:04:05.999999999" - rfc3339JSON = `"` + time.RFC3339Nano + `"` - rfc3339 = time.RFC3339Nano - tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$` -) - -// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -type Time struct { - time.Time -} - -// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) MarshalBinary() ([]byte, error) { - return t.Time.MarshalText() -} - -// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time -// (i.e., 2006-01-02T15:04:05Z). -func (t *Time) UnmarshalBinary(data []byte) error { - return t.UnmarshalText(data) -} - -// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) MarshalJSON() (json []byte, err error) { - return t.Time.MarshalJSON() -} - -// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time -// (i.e., 2006-01-02T15:04:05Z). -func (t *Time) UnmarshalJSON(data []byte) (err error) { - timeFormat := azureUtcFormatJSON - match, err := regexp.Match(tzOffsetRegex, data) - if err != nil { - return err - } else if match { - timeFormat = rfc3339JSON - } - t.Time, err = ParseTime(timeFormat, string(data)) - return err -} - -// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) MarshalText() (text []byte, err error) { - return t.Time.MarshalText() -} - -// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time -// (i.e., 2006-01-02T15:04:05Z). -func (t *Time) UnmarshalText(data []byte) (err error) { - timeFormat := azureUtcFormat - match, err := regexp.Match(tzOffsetRegex, data) - if err != nil { - return err - } else if match { - timeFormat = rfc3339 - } - t.Time, err = ParseTime(timeFormat, string(data)) - return err -} - -// String returns the Time formatted as an RFC3339 date-time string (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) String() string { - // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does. - b, err := t.MarshalText() - if err != nil { - return "" - } - return string(b) -} - -// ToTime returns a Time as a time.Time -func (t Time) ToTime() time.Time { - return t.Time -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go deleted file mode 100644 index 48fb39ba9..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go +++ /dev/null @@ -1,100 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "errors" - "time" -) - -const ( - rfc1123JSON = `"` + time.RFC1123 + `"` - rfc1123 = time.RFC1123 -) - -// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -type TimeRFC1123 struct { - time.Time -} - -// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time -// (i.e., Mon, 02 Jan 2006 15:04:05 MST). -func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) { - t.Time, err = ParseTime(rfc1123JSON, string(data)) - if err != nil { - return err - } - return nil -} - -// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) MarshalJSON() ([]byte, error) { - if y := t.Year(); y < 0 || y >= 10000 { - return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]") - } - b := []byte(t.Format(rfc1123JSON)) - return b, nil -} - -// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) MarshalText() ([]byte, error) { - if y := t.Year(); y < 0 || y >= 10000 { - return nil, errors.New("Time.MarshalText: year outside of range [0,9999]") - } - - b := []byte(t.Format(rfc1123)) - return b, nil -} - -// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time -// (i.e., Mon, 02 Jan 2006 15:04:05 MST). -func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) { - t.Time, err = ParseTime(rfc1123, string(data)) - if err != nil { - return err - } - return nil -} - -// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) MarshalBinary() ([]byte, error) { - return t.MarshalText() -} - -// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time -// (i.e., Mon, 02 Jan 2006 15:04:05 MST). -func (t *TimeRFC1123) UnmarshalBinary(data []byte) error { - return t.UnmarshalText(data) -} - -// ToTime returns a Time as a time.Time -func (t TimeRFC1123) ToTime() time.Time { - return t.Time -} - -// String returns the Time formatted as an RFC1123 date-time string (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) String() string { - // Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does. - b, err := t.MarshalText() - if err != nil { - return "" - } - return string(b) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go deleted file mode 100644 index 7073959b2..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go +++ /dev/null @@ -1,123 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/binary" - "encoding/json" - "time" -) - -// unixEpoch is the moment in time that should be treated as timestamp 0. -var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) - -// UnixTime marshals and unmarshals a time that is represented as the number -// of seconds (ignoring skip-seconds) since the Unix Epoch. -type UnixTime time.Time - -// Duration returns the time as a Duration since the UnixEpoch. -func (t UnixTime) Duration() time.Duration { - return time.Time(t).Sub(unixEpoch) -} - -// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch. -func NewUnixTimeFromSeconds(seconds float64) UnixTime { - return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second))) -} - -// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch. -func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime { - return NewUnixTimeFromDuration(time.Duration(nanoseconds)) -} - -// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch. -func NewUnixTimeFromDuration(dur time.Duration) UnixTime { - return UnixTime(unixEpoch.Add(dur)) -} - -// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0' -func UnixEpoch() time.Time { - return unixEpoch -} - -// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements. -// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.) -func (t UnixTime) MarshalJSON() ([]byte, error) { - buffer := &bytes.Buffer{} - enc := json.NewEncoder(buffer) - err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9) - if err != nil { - return nil, err - } - return buffer.Bytes(), nil -} - -// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since -// midnight January 1st, 1970. -func (t *UnixTime) UnmarshalJSON(text []byte) error { - dec := json.NewDecoder(bytes.NewReader(text)) - - var secondsSinceEpoch float64 - if err := dec.Decode(&secondsSinceEpoch); err != nil { - return err - } - - *t = NewUnixTimeFromSeconds(secondsSinceEpoch) - - return nil -} - -// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number. -func (t UnixTime) MarshalText() ([]byte, error) { - cast := time.Time(t) - return cast.MarshalText() -} - -// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch. -func (t *UnixTime) UnmarshalText(raw []byte) error { - var unmarshaled time.Time - - if err := unmarshaled.UnmarshalText(raw); err != nil { - return err - } - - *t = UnixTime(unmarshaled) - return nil -} - -// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch. -func (t UnixTime) MarshalBinary() ([]byte, error) { - buf := &bytes.Buffer{} - - payload := int64(t.Duration()) - - if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime. -func (t *UnixTime) UnmarshalBinary(raw []byte) error { - var nanosecondsSinceEpoch int64 - - if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil { - return err - } - *t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch) - return nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go deleted file mode 100644 index 12addf0eb..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go +++ /dev/null @@ -1,25 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "strings" - "time" -) - -// ParseTime to parse Time string to specified format. -func ParseTime(format string, t string) (d time.Time, err error) { - return time.Parse(format, strings.ToUpper(t)) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/error.go b/vendor/github.com/Azure/go-autorest/autorest/error.go deleted file mode 100644 index f724f3332..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/error.go +++ /dev/null @@ -1,98 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "net/http" -) - -const ( - // UndefinedStatusCode is used when HTTP status code is not available for an error. - UndefinedStatusCode = 0 -) - -// DetailedError encloses a error with details of the package, method, and associated HTTP -// status code (if any). -type DetailedError struct { - Original error - - // PackageType is the package type of the object emitting the error. For types, the value - // matches that produced the the '%T' format specifier of the fmt package. For other elements, - // such as functions, it is just the package name (e.g., "autorest"). - PackageType string - - // Method is the name of the method raising the error. - Method string - - // StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error. - StatusCode interface{} - - // Message is the error message. - Message string - - // Service Error is the response body of failed API in bytes - ServiceError []byte - - // Response is the response object that was returned during failure if applicable. - Response *http.Response -} - -// NewError creates a new Error conforming object from the passed packageType, method, and -// message. message is treated as a format string to which the optional args apply. -func NewError(packageType string, method string, message string, args ...interface{}) DetailedError { - return NewErrorWithError(nil, packageType, method, nil, message, args...) -} - -// NewErrorWithResponse creates a new Error conforming object from the passed -// packageType, method, statusCode of the given resp (UndefinedStatusCode if -// resp is nil), and message. message is treated as a format string to which the -// optional args apply. -func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { - return NewErrorWithError(nil, packageType, method, resp, message, args...) -} - -// NewErrorWithError creates a new Error conforming object from the -// passed packageType, method, statusCode of the given resp (UndefinedStatusCode -// if resp is nil), message, and original error. message is treated as a format -// string to which the optional args apply. -func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { - if v, ok := original.(DetailedError); ok { - return v - } - - statusCode := UndefinedStatusCode - if resp != nil { - statusCode = resp.StatusCode - } - - return DetailedError{ - Original: original, - PackageType: packageType, - Method: method, - StatusCode: statusCode, - Message: fmt.Sprintf(message, args...), - Response: resp, - } -} - -// Error returns a formatted containing all available details (i.e., PackageType, Method, -// StatusCode, Message, and original error (if any)). -func (e DetailedError) Error() string { - if e.Original == nil { - return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode) - } - return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go deleted file mode 100644 index 6e8ed64eb..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/preparer.go +++ /dev/null @@ -1,550 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "context" - "encoding/json" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "mime/multipart" - "net/http" - "net/url" - "strings" -) - -const ( - mimeTypeJSON = "application/json" - mimeTypeOctetStream = "application/octet-stream" - mimeTypeFormPost = "application/x-www-form-urlencoded" - - headerAuthorization = "Authorization" - headerAuxAuthorization = "x-ms-authorization-auxiliary" - headerContentType = "Content-Type" - headerUserAgent = "User-Agent" -) - -// used as a key type in context.WithValue() -type ctxPrepareDecorators struct{} - -// WithPrepareDecorators adds the specified PrepareDecorators to the provided context. -// If no PrepareDecorators are provided the context is unchanged. -func WithPrepareDecorators(ctx context.Context, prepareDecorator []PrepareDecorator) context.Context { - if len(prepareDecorator) == 0 { - return ctx - } - return context.WithValue(ctx, ctxPrepareDecorators{}, prepareDecorator) -} - -// GetPrepareDecorators returns the PrepareDecorators in the provided context or the provided default PrepareDecorators. -func GetPrepareDecorators(ctx context.Context, defaultPrepareDecorators ...PrepareDecorator) []PrepareDecorator { - inCtx := ctx.Value(ctxPrepareDecorators{}) - if pd, ok := inCtx.([]PrepareDecorator); ok { - return pd - } - return defaultPrepareDecorators -} - -// Preparer is the interface that wraps the Prepare method. -// -// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations -// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used. -type Preparer interface { - Prepare(*http.Request) (*http.Request, error) -} - -// PreparerFunc is a method that implements the Preparer interface. -type PreparerFunc func(*http.Request) (*http.Request, error) - -// Prepare implements the Preparer interface on PreparerFunc. -func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) { - return pf(r) -} - -// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the -// http.Request and pass it along or, first, pass the http.Request along then affect the result. -type PrepareDecorator func(Preparer) Preparer - -// CreatePreparer creates, decorates, and returns a Preparer. -// Without decorators, the returned Preparer returns the passed http.Request unmodified. -// Preparers are safe to share and re-use. -func CreatePreparer(decorators ...PrepareDecorator) Preparer { - return DecoratePreparer( - Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })), - decorators...) -} - -// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it -// applies to the Preparer. Decorators are applied in the order received, but their affect upon the -// request depends on whether they are a pre-decorator (change the http.Request and then pass it -// along) or a post-decorator (pass the http.Request along and alter it on return). -func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer { - for _, decorate := range decorators { - p = decorate(p) - } - return p -} - -// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators. -// It creates a Preparer from the decorators which it then applies to the passed http.Request. -func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) { - if r == nil { - return nil, NewError("autorest", "Prepare", "Invoked without an http.Request") - } - return CreatePreparer(decorators...).Prepare(r) -} - -// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed -// http.Request. -func WithNothing() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - return p.Prepare(r) - }) - } -} - -// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to -// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before -// adding the header. -func WithHeader(header string, value string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.Header == nil { - r.Header = make(http.Header) - } - r.Header.Set(http.CanonicalHeaderKey(header), value) - } - return r, err - }) - } -} - -// WithHeaders returns a PrepareDecorator that sets the specified HTTP headers of the http.Request to -// the passed value. It canonicalizes the passed headers name (via http.CanonicalHeaderKey) before -// adding them. -func WithHeaders(headers map[string]interface{}) PrepareDecorator { - h := ensureValueStrings(headers) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.Header == nil { - r.Header = make(http.Header) - } - - for name, value := range h { - r.Header.Set(http.CanonicalHeaderKey(name), value) - } - } - return r, err - }) - } -} - -// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Bearer " followed by the supplied token. -func WithBearerAuthorization(token string) PrepareDecorator { - return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token)) -} - -// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value -// is the passed contentType. -func AsContentType(contentType string) PrepareDecorator { - return WithHeader(headerContentType, contentType) -} - -// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the -// passed string. -func WithUserAgent(ua string) PrepareDecorator { - return WithHeader(headerUserAgent, ua) -} - -// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is -// "application/x-www-form-urlencoded". -func AsFormURLEncoded() PrepareDecorator { - return AsContentType(mimeTypeFormPost) -} - -// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is -// "application/json". -func AsJSON() PrepareDecorator { - return AsContentType(mimeTypeJSON) -} - -// AsOctetStream returns a PrepareDecorator that adds the "application/octet-stream" Content-Type header. -func AsOctetStream() PrepareDecorator { - return AsContentType(mimeTypeOctetStream) -} - -// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The -// decorator does not validate that the passed method string is a known HTTP method. -func WithMethod(method string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r.Method = method - return p.Prepare(r) - }) - } -} - -// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE. -func AsDelete() PrepareDecorator { return WithMethod("DELETE") } - -// AsGet returns a PrepareDecorator that sets the HTTP method to GET. -func AsGet() PrepareDecorator { return WithMethod("GET") } - -// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. -func AsHead() PrepareDecorator { return WithMethod("HEAD") } - -// AsMerge returns a PrepareDecorator that sets the HTTP method to MERGE. -func AsMerge() PrepareDecorator { return WithMethod("MERGE") } - -// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. -func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } - -// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH. -func AsPatch() PrepareDecorator { return WithMethod("PATCH") } - -// AsPost returns a PrepareDecorator that sets the HTTP method to POST. -func AsPost() PrepareDecorator { return WithMethod("POST") } - -// AsPut returns a PrepareDecorator that sets the HTTP method to PUT. -func AsPut() PrepareDecorator { return WithMethod("PUT") } - -// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed -// from the supplied baseUrl. -func WithBaseURL(baseURL string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - var u *url.URL - if u, err = url.Parse(baseURL); err != nil { - return r, err - } - if u.Scheme == "" { - err = fmt.Errorf("autorest: No scheme detected in URL %s", baseURL) - } - if err == nil { - r.URL = u - } - } - return r, err - }) - } -} - -// WithBytes returns a PrepareDecorator that takes a list of bytes -// which passes the bytes directly to the body -func WithBytes(input *[]byte) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if input == nil { - return r, fmt.Errorf("Input Bytes was nil") - } - - r.ContentLength = int64(len(*input)) - r.Body = ioutil.NopCloser(bytes.NewReader(*input)) - } - return r, err - }) - } -} - -// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the -// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map. -func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator { - parameters := ensureValueStrings(urlParameters) - for key, value := range parameters { - baseURL = strings.Replace(baseURL, "{"+key+"}", value, -1) - } - return WithBaseURL(baseURL) -} - -// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the -// http.Request body. -func WithFormData(v url.Values) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - s := v.Encode() - - if r.Header == nil { - r.Header = make(http.Header) - } - r.Header.Set(http.CanonicalHeaderKey(headerContentType), mimeTypeFormPost) - r.ContentLength = int64(len(s)) - r.Body = ioutil.NopCloser(strings.NewReader(s)) - } - return r, err - }) - } -} - -// WithMultiPartFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) form parameters -// into the http.Request body. -func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - var body bytes.Buffer - writer := multipart.NewWriter(&body) - for key, value := range formDataParameters { - if rc, ok := value.(io.ReadCloser); ok { - var fd io.Writer - if fd, err = writer.CreateFormFile(key, key); err != nil { - return r, err - } - if _, err = io.Copy(fd, rc); err != nil { - return r, err - } - } else { - if err = writer.WriteField(key, ensureValueString(value)); err != nil { - return r, err - } - } - } - if err = writer.Close(); err != nil { - return r, err - } - if r.Header == nil { - r.Header = make(http.Header) - } - r.Header.Set(http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType()) - r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) - r.ContentLength = int64(body.Len()) - return r, err - } - return r, err - }) - } -} - -// WithFile returns a PrepareDecorator that sends file in request body. -func WithFile(f io.ReadCloser) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - b, err := ioutil.ReadAll(f) - if err != nil { - return r, err - } - r.Body = ioutil.NopCloser(bytes.NewReader(b)) - r.ContentLength = int64(len(b)) - } - return r, err - }) - } -} - -// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request -// and sets the Content-Length header. -func WithBool(v bool) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the -// request and sets the Content-Length header. -func WithFloat32(v float32) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the -// request and sets the Content-Length header. -func WithFloat64(v float64) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request -// and sets the Content-Length header. -func WithInt32(v int32) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request -// and sets the Content-Length header. -func WithInt64(v int64) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithString returns a PrepareDecorator that encodes the passed string into the body of the request -// and sets the Content-Length header. -func WithString(v string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - r.ContentLength = int64(len(v)) - r.Body = ioutil.NopCloser(strings.NewReader(v)) - } - return r, err - }) - } -} - -// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the -// request and sets the Content-Length header. -func WithJSON(v interface{}) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - b, err := json.Marshal(v) - if err == nil { - r.ContentLength = int64(len(b)) - r.Body = ioutil.NopCloser(bytes.NewReader(b)) - } - } - return r, err - }) - } -} - -// WithXML returns a PrepareDecorator that encodes the data passed as XML into the body of the -// request and sets the Content-Length header. -func WithXML(v interface{}) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - b, err := xml.Marshal(v) - if err == nil { - // we have to tack on an XML header - withHeader := xml.Header + string(b) - bytesWithHeader := []byte(withHeader) - - r.ContentLength = int64(len(bytesWithHeader)) - r.Body = ioutil.NopCloser(bytes.NewReader(bytesWithHeader)) - } - } - return r, err - }) - } -} - -// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path -// is absolute (that is, it begins with a "/"), it replaces the existing path. -func WithPath(path string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithPath", "Invoked with a nil URL") - } - if r.URL, err = parseURL(r.URL, path); err != nil { - return r, err - } - } - return r, err - }) - } -} - -// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the -// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The -// values will be escaped (aka URL encoded) before insertion into the path. -func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { - parameters := escapeValueStrings(ensureValueStrings(pathParameters)) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL") - } - for key, value := range parameters { - path = strings.Replace(path, "{"+key+"}", value, -1) - } - if r.URL, err = parseURL(r.URL, path); err != nil { - return r, err - } - } - return r, err - }) - } -} - -// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the -// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. -func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { - parameters := ensureValueStrings(pathParameters) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL") - } - for key, value := range parameters { - path = strings.Replace(path, "{"+key+"}", value, -1) - } - - if r.URL, err = parseURL(r.URL, path); err != nil { - return r, err - } - } - return r, err - }) - } -} - -func parseURL(u *url.URL, path string) (*url.URL, error) { - p := strings.TrimRight(u.String(), "/") - if !strings.HasPrefix(path, "/") { - path = "/" + path - } - return url.Parse(p + path) -} - -// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters -// given in the supplied map (i.e., key=value). -func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator { - parameters := MapToValues(queryParameters) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") - } - v := r.URL.Query() - for key, value := range parameters { - for i := range value { - d, err := url.QueryUnescape(value[i]) - if err != nil { - return r, err - } - value[i] = d - } - v[key] = value - } - r.URL.RawQuery = v.Encode() - } - return r, err - }) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go deleted file mode 100644 index 349e1963a..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/responder.go +++ /dev/null @@ -1,269 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "net/http" - "strings" -) - -// Responder is the interface that wraps the Respond method. -// -// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold -// state since Responders may be shared and re-used. -type Responder interface { - Respond(*http.Response) error -} - -// ResponderFunc is a method that implements the Responder interface. -type ResponderFunc func(*http.Response) error - -// Respond implements the Responder interface on ResponderFunc. -func (rf ResponderFunc) Respond(r *http.Response) error { - return rf(r) -} - -// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to -// the http.Response and pass it along or, first, pass the http.Response along then react. -type RespondDecorator func(Responder) Responder - -// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned -// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share -// and re-used: It depends on the applied decorators. For example, a standard decorator that closes -// the response body is fine to share whereas a decorator that reads the body into a passed struct -// is not. -// -// To prevent memory leaks, ensure that at least one Responder closes the response body. -func CreateResponder(decorators ...RespondDecorator) Responder { - return DecorateResponder( - Responder(ResponderFunc(func(r *http.Response) error { return nil })), - decorators...) -} - -// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it -// applies to the Responder. Decorators are applied in the order received, but their affect upon the -// request depends on whether they are a pre-decorator (react to the http.Response and then pass it -// along) or a post-decorator (pass the http.Response along and then react). -func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder { - for _, decorate := range decorators { - r = decorate(r) - } - return r -} - -// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators. -// It creates a Responder from the decorators it then applies to the passed http.Response. -func Respond(r *http.Response, decorators ...RespondDecorator) error { - if r == nil { - return nil - } - return CreateResponder(decorators...).Respond(r) -} - -// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined -// to the next RespondDecorator. -func ByIgnoring() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - return r.Respond(resp) - }) - } -} - -// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as -// the Body is read. -func ByCopying(b *bytes.Buffer) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && resp != nil && resp.Body != nil { - resp.Body = TeeReadCloser(resp.Body, b) - } - return err - }) - } -} - -// ByDiscardingBody returns a RespondDecorator that first invokes the passed Responder after which -// it copies the remaining bytes (if any) in the response body to ioutil.Discard. Since the passed -// Responder is invoked prior to discarding the response body, the decorator may occur anywhere -// within the set. -func ByDiscardingBody() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && resp != nil && resp.Body != nil { - if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { - return fmt.Errorf("Error discarding the response body: %v", err) - } - } - return err - }) - } -} - -// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it -// closes the response body. Since the passed Responder is invoked prior to closing the response -// body, the decorator may occur anywhere within the set. -func ByClosing() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if resp != nil && resp.Body != nil { - if err := resp.Body.Close(); err != nil { - return fmt.Errorf("Error closing the response body: %v", err) - } - } - return err - }) - } -} - -// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which -// it closes the response if the passed Responder returns an error and the response body exists. -func ByClosingIfError() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err != nil && resp != nil && resp.Body != nil { - if err := resp.Body.Close(); err != nil { - return fmt.Errorf("Error closing the response body: %v", err) - } - } - return err - }) - } -} - -// ByUnmarshallingBytes returns a RespondDecorator that copies the Bytes returned in the -// response Body into the value pointed to by v. -func ByUnmarshallingBytes(v *[]byte) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil { - bytes, errInner := ioutil.ReadAll(resp.Body) - if errInner != nil { - err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) - } else { - *v = bytes - } - } - return err - }) - } -} - -// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the -// response Body into the value pointed to by v. -func ByUnmarshallingJSON(v interface{}) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil { - b, errInner := ioutil.ReadAll(resp.Body) - // Some responses might include a BOM, remove for successful unmarshalling - b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) - if errInner != nil { - err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) - } else if len(strings.Trim(string(b), " ")) > 0 { - errInner = json.Unmarshal(b, v) - if errInner != nil { - err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b)) - } - } - } - return err - }) - } -} - -// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the -// response Body into the value pointed to by v. -func ByUnmarshallingXML(v interface{}) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil { - b, errInner := ioutil.ReadAll(resp.Body) - if errInner != nil { - err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) - } else { - errInner = xml.Unmarshal(b, v) - if errInner != nil { - err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b)) - } - } - } - return err - }) - } -} - -// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response -// StatusCode is among the set passed. On error, response body is fully read into a buffer and -// presented in the returned error, as well as in the response body. -func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && !ResponseHasStatusCode(resp, codes...) { - derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s", - resp.Request.Method, - resp.Request.URL, - resp.Status) - if resp.Body != nil { - defer resp.Body.Close() - b, _ := ioutil.ReadAll(resp.Body) - derr.ServiceError = b - resp.Body = ioutil.NopCloser(bytes.NewReader(b)) - } - err = derr - } - return err - }) - } -} - -// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is -// anything other than HTTP 200. -func WithErrorUnlessOK() RespondDecorator { - return WithErrorUnlessStatusCode(http.StatusOK) -} - -// ExtractHeader extracts all values of the specified header from the http.Response. It returns an -// empty string slice if the passed http.Response is nil or the header does not exist. -func ExtractHeader(header string, resp *http.Response) []string { - if resp != nil && resp.Header != nil { - return resp.Header[http.CanonicalHeaderKey(header)] - } - return nil -} - -// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It -// returns an empty string if the passed http.Response is nil or the header does not exist. -func ExtractHeaderValue(header string, resp *http.Response) string { - h := ExtractHeader(header, resp) - if len(h) > 0 { - return h[0] - } - return "" -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go deleted file mode 100644 index fa11dbed7..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go +++ /dev/null @@ -1,52 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" -) - -// NewRetriableRequest returns a wrapper around an HTTP request that support retry logic. -func NewRetriableRequest(req *http.Request) *RetriableRequest { - return &RetriableRequest{req: req} -} - -// Request returns the wrapped HTTP request. -func (rr *RetriableRequest) Request() *http.Request { - return rr.req -} - -func (rr *RetriableRequest) prepareFromByteReader() (err error) { - // fall back to making a copy (only do this once) - b := []byte{} - if rr.req.ContentLength > 0 { - b = make([]byte, rr.req.ContentLength) - _, err = io.ReadFull(rr.req.Body, b) - if err != nil { - return err - } - } else { - b, err = ioutil.ReadAll(rr.req.Body) - if err != nil { - return err - } - } - rr.br = bytes.NewReader(b) - rr.req.Body = ioutil.NopCloser(rr.br) - return err -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go deleted file mode 100644 index 7143cc61b..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go +++ /dev/null @@ -1,54 +0,0 @@ -// +build !go1.8 - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package autorest - -import ( - "bytes" - "io/ioutil" - "net/http" -) - -// RetriableRequest provides facilities for retrying an HTTP request. -type RetriableRequest struct { - req *http.Request - br *bytes.Reader -} - -// Prepare signals that the request is about to be sent. -func (rr *RetriableRequest) Prepare() (err error) { - // preserve the request body; this is to support retry logic as - // the underlying transport will always close the reqeust body - if rr.req.Body != nil { - if rr.br != nil { - _, err = rr.br.Seek(0, 0 /*io.SeekStart*/) - rr.req.Body = ioutil.NopCloser(rr.br) - } - if err != nil { - return err - } - if rr.br == nil { - // fall back to making a copy (only do this once) - err = rr.prepareFromByteReader() - } - } - return err -} - -func removeRequestBody(req *http.Request) { - req.Body = nil - req.ContentLength = 0 -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go deleted file mode 100644 index ae15c6bf9..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go +++ /dev/null @@ -1,66 +0,0 @@ -// +build go1.8 - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package autorest - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" -) - -// RetriableRequest provides facilities for retrying an HTTP request. -type RetriableRequest struct { - req *http.Request - rc io.ReadCloser - br *bytes.Reader -} - -// Prepare signals that the request is about to be sent. -func (rr *RetriableRequest) Prepare() (err error) { - // preserve the request body; this is to support retry logic as - // the underlying transport will always close the reqeust body - if rr.req.Body != nil { - if rr.rc != nil { - rr.req.Body = rr.rc - } else if rr.br != nil { - _, err = rr.br.Seek(0, io.SeekStart) - rr.req.Body = ioutil.NopCloser(rr.br) - } - if err != nil { - return err - } - if rr.req.GetBody != nil { - // this will allow us to preserve the body without having to - // make a copy. note we need to do this on each iteration - rr.rc, err = rr.req.GetBody() - if err != nil { - return err - } - } else if rr.br == nil { - // fall back to making a copy (only do this once) - err = rr.prepareFromByteReader() - } - } - return err -} - -func removeRequestBody(req *http.Request) { - req.Body = nil - req.GetBody = nil - req.ContentLength = 0 -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go deleted file mode 100644 index 5e595d7b1..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/sender.go +++ /dev/null @@ -1,407 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "context" - "crypto/tls" - "fmt" - "log" - "math" - "net/http" - "net/http/cookiejar" - "strconv" - "time" - - "github.com/Azure/go-autorest/tracing" -) - -// used as a key type in context.WithValue() -type ctxSendDecorators struct{} - -// WithSendDecorators adds the specified SendDecorators to the provided context. -// If no SendDecorators are provided the context is unchanged. -func WithSendDecorators(ctx context.Context, sendDecorator []SendDecorator) context.Context { - if len(sendDecorator) == 0 { - return ctx - } - return context.WithValue(ctx, ctxSendDecorators{}, sendDecorator) -} - -// GetSendDecorators returns the SendDecorators in the provided context or the provided default SendDecorators. -func GetSendDecorators(ctx context.Context, defaultSendDecorators ...SendDecorator) []SendDecorator { - inCtx := ctx.Value(ctxSendDecorators{}) - if sd, ok := inCtx.([]SendDecorator); ok { - return sd - } - return defaultSendDecorators -} - -// Sender is the interface that wraps the Do method to send HTTP requests. -// -// The standard http.Client conforms to this interface. -type Sender interface { - Do(*http.Request) (*http.Response, error) -} - -// SenderFunc is a method that implements the Sender interface. -type SenderFunc func(*http.Request) (*http.Response, error) - -// Do implements the Sender interface on SenderFunc. -func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { - return sf(r) -} - -// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the -// http.Request and pass it along or, first, pass the http.Request along then react to the -// http.Response result. -type SendDecorator func(Sender) Sender - -// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. -func CreateSender(decorators ...SendDecorator) Sender { - return DecorateSender(sender(tls.RenegotiateNever), decorators...) -} - -// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to -// the Sender. Decorators are applied in the order received, but their affect upon the request -// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a -// post-decorator (pass the http.Request along and react to the results in http.Response). -func DecorateSender(s Sender, decorators ...SendDecorator) Sender { - for _, decorate := range decorators { - s = decorate(s) - } - return s -} - -// Send sends, by means of the default http.Client, the passed http.Request, returning the -// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which -// it will apply the http.Client before invoking the Do method. -// -// Send is a convenience method and not recommended for production. Advanced users should use -// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client). -// -// Send will not poll or retry requests. -func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { - return SendWithSender(sender(tls.RenegotiateNever), r, decorators...) -} - -// SendWithSender sends the passed http.Request, through the provided Sender, returning the -// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which -// it will apply the http.Client before invoking the Do method. -// -// SendWithSender will not poll or retry requests. -func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) { - return DecorateSender(s, decorators...).Do(r) -} - -func sender(renengotiation tls.RenegotiationSupport) Sender { - // Use behaviour compatible with DefaultTransport, but require TLS minimum version. - defaultTransport := http.DefaultTransport.(*http.Transport) - transport := &http.Transport{ - Proxy: defaultTransport.Proxy, - DialContext: defaultTransport.DialContext, - MaxIdleConns: defaultTransport.MaxIdleConns, - IdleConnTimeout: defaultTransport.IdleConnTimeout, - TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout, - ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, - TLSClientConfig: &tls.Config{ - MinVersion: tls.VersionTLS12, - Renegotiation: renengotiation, - }, - } - var roundTripper http.RoundTripper = transport - if tracing.IsEnabled() { - roundTripper = tracing.NewTransport(transport) - } - j, _ := cookiejar.New(nil) - return &http.Client{Jar: j, Transport: roundTripper} -} - -// AfterDelay returns a SendDecorator that delays for the passed time.Duration before -// invoking the Sender. The delay may be terminated by closing the optional channel on the -// http.Request. If canceled, no further Senders are invoked. -func AfterDelay(d time.Duration) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - if !DelayForBackoff(d, 0, r.Context().Done()) { - return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay") - } - return s.Do(r) - }) - } -} - -// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request. -func AsIs() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - return s.Do(r) - }) - } -} - -// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which -// it closes the response if the passed Sender returns an error and the response body exists. -func DoCloseIfError() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err != nil { - Respond(resp, ByDiscardingBody(), ByClosing()) - } - return resp, err - }) - } -} - -// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is -// among the set passed. Since these are artificial errors, the response body may still require -// closing. -func DoErrorIfStatusCode(codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err == nil && ResponseHasStatusCode(resp, codes...) { - err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s", - resp.Request.Method, - resp.Request.URL, - resp.Status) - } - return resp, err - }) - } -} - -// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response -// StatusCode is among the set passed. Since these are artificial errors, the response body -// may still require closing. -func DoErrorUnlessStatusCode(codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err == nil && !ResponseHasStatusCode(resp, codes...) { - err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s", - resp.Request.Method, - resp.Request.URL, - resp.Status) - } - return resp, err - }) - } -} - -// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the -// passed status codes. It expects the http.Response to contain a Location header providing the -// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than -// the supplied duration. It will delay between requests for the duration specified in the -// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by -// closing the optional channel on the http.Request. -func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - resp, err = s.Do(r) - - if err == nil && ResponseHasStatusCode(resp, codes...) { - r, err = NewPollingRequestWithContext(r.Context(), resp) - - for err == nil && ResponseHasStatusCode(resp, codes...) { - Respond(resp, - ByDiscardingBody(), - ByClosing()) - resp, err = SendWithSender(s, r, - AfterDelay(GetRetryAfter(resp, delay))) - } - } - - return resp, err - }) - } -} - -// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified -// number of attempts, exponentially backing off between requests using the supplied backoff -// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on -// the http.Request. -func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - rr := NewRetriableRequest(r) - for attempt := 0; attempt < attempts; attempt++ { - err = rr.Prepare() - if err != nil { - return resp, err - } - resp, err = s.Do(rr.Request()) - if err == nil { - return resp, err - } - if !DelayForBackoff(backoff, attempt, r.Context().Done()) { - return nil, r.Context().Err() - } - } - return resp, err - }) - } -} - -// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified -// number of attempts, exponentially backing off between requests using the supplied backoff -// time.Duration (which may be zero). Retrying may be canceled by cancelling the context on the http.Request. -// NOTE: Code http.StatusTooManyRequests (429) will *not* be counted against the number of attempts. -func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - return doRetryForStatusCodesImpl(s, r, false, attempts, backoff, 0, codes...) - }) - } -} - -// DoRetryForStatusCodesWithCap returns a SendDecorator that retries for specified statusCodes for up to the -// specified number of attempts, exponentially backing off between requests using the supplied backoff -// time.Duration (which may be zero). To cap the maximum possible delay between iterations specify a value greater -// than zero for cap. Retrying may be canceled by cancelling the context on the http.Request. -func DoRetryForStatusCodesWithCap(attempts int, backoff, cap time.Duration, codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - return doRetryForStatusCodesImpl(s, r, true, attempts, backoff, cap, codes...) - }) - } -} - -func doRetryForStatusCodesImpl(s Sender, r *http.Request, count429 bool, attempts int, backoff, cap time.Duration, codes ...int) (resp *http.Response, err error) { - rr := NewRetriableRequest(r) - // Increment to add the first call (attempts denotes number of retries) - for attempt := 0; attempt < attempts+1; { - err = rr.Prepare() - if err != nil { - return - } - resp, err = s.Do(rr.Request()) - // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication - // resp and err will both have a value, so in this case we don't want to retry as it will never succeed. - if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) { - return resp, err - } - delayed := DelayWithRetryAfter(resp, r.Context().Done()) - if !delayed && !DelayForBackoffWithCap(backoff, cap, attempt, r.Context().Done()) { - return resp, r.Context().Err() - } - // when count429 == false don't count a 429 against the number - // of attempts so that we continue to retry until it succeeds - if count429 || (resp == nil || resp.StatusCode != http.StatusTooManyRequests) { - attempt++ - } - } - return resp, err -} - -// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header. -// The value of Retry-After can be either the number of seconds or a date in RFC1123 format. -// The function returns true after successfully waiting for the specified duration. If there is -// no Retry-After header or the wait is cancelled the return value is false. -func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool { - if resp == nil { - return false - } - var dur time.Duration - ra := resp.Header.Get("Retry-After") - if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { - dur = time.Duration(retryAfter) * time.Second - } else if t, err := time.Parse(time.RFC1123, ra); err == nil { - dur = t.Sub(time.Now()) - } - if dur > 0 { - select { - case <-time.After(dur): - return true - case <-cancel: - return false - } - } - return false -} - -// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal -// to or greater than the specified duration, exponentially backing off between requests using the -// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the -// optional channel on the http.Request. -func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - rr := NewRetriableRequest(r) - end := time.Now().Add(d) - for attempt := 0; time.Now().Before(end); attempt++ { - err = rr.Prepare() - if err != nil { - return resp, err - } - resp, err = s.Do(rr.Request()) - if err == nil { - return resp, err - } - if !DelayForBackoff(backoff, attempt, r.Context().Done()) { - return nil, r.Context().Err() - } - } - return resp, err - }) - } -} - -// WithLogging returns a SendDecorator that implements simple before and after logging of the -// request. -func WithLogging(logger *log.Logger) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - logger.Printf("Sending %s %s", r.Method, r.URL) - resp, err := s.Do(r) - if err != nil { - logger.Printf("%s %s received error '%v'", r.Method, r.URL, err) - } else { - logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status) - } - return resp, err - }) - } -} - -// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of -// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set -// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early, -// returns false. -// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt -// count. -func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool { - return DelayForBackoffWithCap(backoff, 0, attempt, cancel) -} - -// DelayForBackoffWithCap invokes time.After for the supplied backoff duration raised to the power of -// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set -// to zero for no delay. To cap the maximum possible delay specify a value greater than zero for cap. -// The delay may be canceled by closing the passed channel. If terminated early, returns false. -// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt -// count. -func DelayForBackoffWithCap(backoff, cap time.Duration, attempt int, cancel <-chan struct{}) bool { - d := time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second - if cap > 0 && d > cap { - d = cap - } - select { - case <-time.After(d): - return true - case <-cancel: - return false - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/to/LICENSE deleted file mode 100644 index b9d6a27ea..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/to/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/convert.go b/vendor/github.com/Azure/go-autorest/autorest/to/convert.go deleted file mode 100644 index 86694bd25..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/to/convert.go +++ /dev/null @@ -1,152 +0,0 @@ -/* -Package to provides helpers to ease working with pointer values of marshalled structures. -*/ -package to - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// String returns a string value for the passed string pointer. It returns the empty string if the -// pointer is nil. -func String(s *string) string { - if s != nil { - return *s - } - return "" -} - -// StringPtr returns a pointer to the passed string. -func StringPtr(s string) *string { - return &s -} - -// StringSlice returns a string slice value for the passed string slice pointer. It returns a nil -// slice if the pointer is nil. -func StringSlice(s *[]string) []string { - if s != nil { - return *s - } - return nil -} - -// StringSlicePtr returns a pointer to the passed string slice. -func StringSlicePtr(s []string) *[]string { - return &s -} - -// StringMap returns a map of strings built from the map of string pointers. The empty string is -// used for nil pointers. -func StringMap(msp map[string]*string) map[string]string { - ms := make(map[string]string, len(msp)) - for k, sp := range msp { - if sp != nil { - ms[k] = *sp - } else { - ms[k] = "" - } - } - return ms -} - -// StringMapPtr returns a pointer to a map of string pointers built from the passed map of strings. -func StringMapPtr(ms map[string]string) *map[string]*string { - msp := make(map[string]*string, len(ms)) - for k, s := range ms { - msp[k] = StringPtr(s) - } - return &msp -} - -// Bool returns a bool value for the passed bool pointer. It returns false if the pointer is nil. -func Bool(b *bool) bool { - if b != nil { - return *b - } - return false -} - -// BoolPtr returns a pointer to the passed bool. -func BoolPtr(b bool) *bool { - return &b -} - -// Int returns an int value for the passed int pointer. It returns 0 if the pointer is nil. -func Int(i *int) int { - if i != nil { - return *i - } - return 0 -} - -// IntPtr returns a pointer to the passed int. -func IntPtr(i int) *int { - return &i -} - -// Int32 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. -func Int32(i *int32) int32 { - if i != nil { - return *i - } - return 0 -} - -// Int32Ptr returns a pointer to the passed int32. -func Int32Ptr(i int32) *int32 { - return &i -} - -// Int64 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. -func Int64(i *int64) int64 { - if i != nil { - return *i - } - return 0 -} - -// Int64Ptr returns a pointer to the passed int64. -func Int64Ptr(i int64) *int64 { - return &i -} - -// Float32 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. -func Float32(i *float32) float32 { - if i != nil { - return *i - } - return 0.0 -} - -// Float32Ptr returns a pointer to the passed float32. -func Float32Ptr(i float32) *float32 { - return &i -} - -// Float64 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. -func Float64(i *float64) float64 { - if i != nil { - return *i - } - return 0.0 -} - -// Float64Ptr returns a pointer to the passed float64. -func Float64Ptr(i float64) *float64 { - return &i -} - -// ByteSlicePtr returns a pointer to the passed byte slice. -func ByteSlicePtr(b []byte) *[]byte { - return &b -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go deleted file mode 100644 index 8e8292107..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build modhack - -package to - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of -// the resultant binary. - -// Necessary for safely adding multi-module repo. -// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest/autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go deleted file mode 100644 index 08cf11c11..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/utility.go +++ /dev/null @@ -1,228 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "encoding/xml" - "fmt" - "io" - "net" - "net/http" - "net/url" - "reflect" - "strings" - - "github.com/Azure/go-autorest/autorest/adal" -) - -// EncodedAs is a series of constants specifying various data encodings -type EncodedAs string - -const ( - // EncodedAsJSON states that data is encoded as JSON - EncodedAsJSON EncodedAs = "JSON" - - // EncodedAsXML states that data is encoded as Xml - EncodedAsXML EncodedAs = "XML" -) - -// Decoder defines the decoding method json.Decoder and xml.Decoder share -type Decoder interface { - Decode(v interface{}) error -} - -// NewDecoder creates a new decoder appropriate to the passed encoding. -// encodedAs specifies the type of encoding and r supplies the io.Reader containing the -// encoded data. -func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder { - if encodedAs == EncodedAsJSON { - return json.NewDecoder(r) - } else if encodedAs == EncodedAsXML { - return xml.NewDecoder(r) - } - return nil -} - -// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy -// is especially useful if there is a chance the data will fail to decode. -// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v -// is the decoding destination. -func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) { - b := bytes.Buffer{} - return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) -} - -// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc. -// It utilizes io.TeeReader to copy the data read and has the same behavior when reading. -// Further, when it is closed, it ensures that rc is closed as well. -func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser { - return &teeReadCloser{rc, io.TeeReader(rc, w)} -} - -type teeReadCloser struct { - rc io.ReadCloser - r io.Reader -} - -func (t *teeReadCloser) Read(p []byte) (int, error) { - return t.r.Read(p) -} - -func (t *teeReadCloser) Close() error { - return t.rc.Close() -} - -func containsInt(ints []int, n int) bool { - for _, i := range ints { - if i == n { - return true - } - } - return false -} - -func escapeValueStrings(m map[string]string) map[string]string { - for key, value := range m { - m[key] = url.QueryEscape(value) - } - return m -} - -func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string { - mapOfStrings := make(map[string]string) - for key, value := range mapOfInterface { - mapOfStrings[key] = ensureValueString(value) - } - return mapOfStrings -} - -func ensureValueString(value interface{}) string { - if value == nil { - return "" - } - switch v := value.(type) { - case string: - return v - case []byte: - return string(v) - default: - return fmt.Sprintf("%v", v) - } -} - -// MapToValues method converts map[string]interface{} to url.Values. -func MapToValues(m map[string]interface{}) url.Values { - v := url.Values{} - for key, value := range m { - x := reflect.ValueOf(value) - if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { - for i := 0; i < x.Len(); i++ { - v.Add(key, ensureValueString(x.Index(i))) - } - } else { - v.Add(key, ensureValueString(value)) - } - } - return v -} - -// AsStringSlice method converts interface{} to []string. This expects a -//that the parameter passed to be a slice or array of a type that has the underlying -//type a string. -func AsStringSlice(s interface{}) ([]string, error) { - v := reflect.ValueOf(s) - if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { - return nil, NewError("autorest", "AsStringSlice", "the value's type is not an array.") - } - stringSlice := make([]string, 0, v.Len()) - - for i := 0; i < v.Len(); i++ { - stringSlice = append(stringSlice, v.Index(i).String()) - } - return stringSlice, nil -} - -// String method converts interface v to string. If interface is a list, it -// joins list elements using the separator. Note that only sep[0] will be used for -// joining if any separator is specified. -func String(v interface{}, sep ...string) string { - if len(sep) == 0 { - return ensureValueString(v) - } - stringSlice, ok := v.([]string) - if ok == false { - var err error - stringSlice, err = AsStringSlice(v) - if err != nil { - panic(fmt.Sprintf("autorest: Couldn't convert value to a string %s.", err)) - } - } - return ensureValueString(strings.Join(stringSlice, sep[0])) -} - -// Encode method encodes url path and query parameters. -func Encode(location string, v interface{}, sep ...string) string { - s := String(v, sep...) - switch strings.ToLower(location) { - case "path": - return pathEscape(s) - case "query": - return queryEscape(s) - default: - return s - } -} - -func pathEscape(s string) string { - return strings.Replace(url.QueryEscape(s), "+", "%20", -1) -} - -func queryEscape(s string) string { - return url.QueryEscape(s) -} - -// ChangeToGet turns the specified http.Request into a GET (it assumes it wasn't). -// This is mainly useful for long-running operations that use the Azure-AsyncOperation -// header, so we change the initial PUT into a GET to retrieve the final result. -func ChangeToGet(req *http.Request) *http.Request { - req.Method = "GET" - req.Body = nil - req.ContentLength = 0 - req.Header.Del("Content-Length") - return req -} - -// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError -// interface. If err is a DetailedError it will walk the chain of Original errors. -func IsTokenRefreshError(err error) bool { - if _, ok := err.(adal.TokenRefreshError); ok { - return true - } - if de, ok := err.(DetailedError); ok { - return IsTokenRefreshError(de.Original) - } - return false -} - -// IsTemporaryNetworkError returns true if the specified error is a temporary network error or false -// if it's not. If the error doesn't implement the net.Error interface the return value is true. -func IsTemporaryNetworkError(err error) bool { - if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) { - return true - } - return false -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/validation/LICENSE deleted file mode 100644 index b9d6a27ea..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/validation/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/error.go b/vendor/github.com/Azure/go-autorest/autorest/validation/error.go deleted file mode 100644 index fed156dbf..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/validation/error.go +++ /dev/null @@ -1,48 +0,0 @@ -package validation - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" -) - -// Error is the type that's returned when the validation of an APIs arguments constraints fails. -type Error struct { - // PackageType is the package type of the object emitting the error. For types, the value - // matches that produced the the '%T' format specifier of the fmt package. For other elements, - // such as functions, it is just the package name (e.g., "autorest"). - PackageType string - - // Method is the name of the method raising the error. - Method string - - // Message is the error message. - Message string -} - -// Error returns a string containing the details of the validation failure. -func (e Error) Error() string { - return fmt.Sprintf("%s#%s: Invalid input: %s", e.PackageType, e.Method, e.Message) -} - -// NewError creates a new Error object with the specified parameters. -// message is treated as a format string to which the optional args apply. -func NewError(packageType string, method string, message string, args ...interface{}) Error { - return Error{ - PackageType: packageType, - Method: method, - Message: fmt.Sprintf(message, args...), - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go deleted file mode 100644 index 2b2668581..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build modhack - -package validation - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of -// the resultant binary. - -// Necessary for safely adding multi-module repo. -// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest/autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go b/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go deleted file mode 100644 index 65899b69b..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go +++ /dev/null @@ -1,400 +0,0 @@ -/* -Package validation provides methods for validating parameter value using reflection. -*/ -package validation - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "reflect" - "regexp" - "strings" -) - -// Constraint stores constraint name, target field name -// Rule and chain validations. -type Constraint struct { - - // Target field name for validation. - Target string - - // Constraint name e.g. minLength, MaxLength, Pattern, etc. - Name string - - // Rule for constraint e.g. greater than 10, less than 5 etc. - Rule interface{} - - // Chain Validations for struct type - Chain []Constraint -} - -// Validation stores parameter-wise validation. -type Validation struct { - TargetValue interface{} - Constraints []Constraint -} - -// Constraint list -const ( - Empty = "Empty" - Null = "Null" - ReadOnly = "ReadOnly" - Pattern = "Pattern" - MaxLength = "MaxLength" - MinLength = "MinLength" - MaxItems = "MaxItems" - MinItems = "MinItems" - MultipleOf = "MultipleOf" - UniqueItems = "UniqueItems" - InclusiveMaximum = "InclusiveMaximum" - ExclusiveMaximum = "ExclusiveMaximum" - ExclusiveMinimum = "ExclusiveMinimum" - InclusiveMinimum = "InclusiveMinimum" -) - -// Validate method validates constraints on parameter -// passed in validation array. -func Validate(m []Validation) error { - for _, item := range m { - v := reflect.ValueOf(item.TargetValue) - for _, constraint := range item.Constraints { - var err error - switch v.Kind() { - case reflect.Ptr: - err = validatePtr(v, constraint) - case reflect.String: - err = validateString(v, constraint) - case reflect.Struct: - err = validateStruct(v, constraint) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - err = validateInt(v, constraint) - case reflect.Float32, reflect.Float64: - err = validateFloat(v, constraint) - case reflect.Array, reflect.Slice, reflect.Map: - err = validateArrayMap(v, constraint) - default: - err = createError(v, constraint, fmt.Sprintf("unknown type %v", v.Kind())) - } - - if err != nil { - return err - } - } - } - return nil -} - -func validateStruct(x reflect.Value, v Constraint, name ...string) error { - //Get field name from target name which is in format a.b.c - s := strings.Split(v.Target, ".") - f := x.FieldByName(s[len(s)-1]) - if isZero(f) { - return createError(x, v, fmt.Sprintf("field %q doesn't exist", v.Target)) - } - - return Validate([]Validation{ - { - TargetValue: getInterfaceValue(f), - Constraints: []Constraint{v}, - }, - }) -} - -func validatePtr(x reflect.Value, v Constraint) error { - if v.Name == ReadOnly { - if !x.IsNil() { - return createError(x.Elem(), v, "readonly parameter; must send as nil or empty in request") - } - return nil - } - if x.IsNil() { - return checkNil(x, v) - } - if v.Chain != nil { - return Validate([]Validation{ - { - TargetValue: getInterfaceValue(x.Elem()), - Constraints: v.Chain, - }, - }) - } - return nil -} - -func validateInt(x reflect.Value, v Constraint) error { - i := x.Int() - r, ok := toInt64(v.Rule) - if !ok { - return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) - } - switch v.Name { - case MultipleOf: - if i%r != 0 { - return createError(x, v, fmt.Sprintf("value must be a multiple of %v", r)) - } - case ExclusiveMinimum: - if i <= r { - return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) - } - case ExclusiveMaximum: - if i >= r { - return createError(x, v, fmt.Sprintf("value must be less than %v", r)) - } - case InclusiveMinimum: - if i < r { - return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) - } - case InclusiveMaximum: - if i > r { - return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) - } - default: - return createError(x, v, fmt.Sprintf("constraint %v is not applicable for type integer", v.Name)) - } - return nil -} - -func validateFloat(x reflect.Value, v Constraint) error { - f := x.Float() - r, ok := v.Rule.(float64) - if !ok { - return createError(x, v, fmt.Sprintf("rule must be float value for %v constraint; got: %v", v.Name, v.Rule)) - } - switch v.Name { - case ExclusiveMinimum: - if f <= r { - return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) - } - case ExclusiveMaximum: - if f >= r { - return createError(x, v, fmt.Sprintf("value must be less than %v", r)) - } - case InclusiveMinimum: - if f < r { - return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) - } - case InclusiveMaximum: - if f > r { - return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) - } - default: - return createError(x, v, fmt.Sprintf("constraint %s is not applicable for type float", v.Name)) - } - return nil -} - -func validateString(x reflect.Value, v Constraint) error { - s := x.String() - switch v.Name { - case Empty: - if len(s) == 0 { - return checkEmpty(x, v) - } - case Pattern: - reg, err := regexp.Compile(v.Rule.(string)) - if err != nil { - return createError(x, v, err.Error()) - } - if !reg.MatchString(s) { - return createError(x, v, fmt.Sprintf("value doesn't match pattern %v", v.Rule)) - } - case MaxLength: - if _, ok := v.Rule.(int); !ok { - return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) - } - if len(s) > v.Rule.(int) { - return createError(x, v, fmt.Sprintf("value length must be less than or equal to %v", v.Rule)) - } - case MinLength: - if _, ok := v.Rule.(int); !ok { - return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) - } - if len(s) < v.Rule.(int) { - return createError(x, v, fmt.Sprintf("value length must be greater than or equal to %v", v.Rule)) - } - case ReadOnly: - if len(s) > 0 { - return createError(reflect.ValueOf(s), v, "readonly parameter; must send as nil or empty in request") - } - default: - return createError(x, v, fmt.Sprintf("constraint %s is not applicable to string type", v.Name)) - } - - if v.Chain != nil { - return Validate([]Validation{ - { - TargetValue: getInterfaceValue(x), - Constraints: v.Chain, - }, - }) - } - return nil -} - -func validateArrayMap(x reflect.Value, v Constraint) error { - switch v.Name { - case Null: - if x.IsNil() { - return checkNil(x, v) - } - case Empty: - if x.IsNil() || x.Len() == 0 { - return checkEmpty(x, v) - } - case MaxItems: - if _, ok := v.Rule.(int); !ok { - return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule)) - } - if x.Len() > v.Rule.(int) { - return createError(x, v, fmt.Sprintf("maximum item limit is %v; got: %v", v.Rule, x.Len())) - } - case MinItems: - if _, ok := v.Rule.(int); !ok { - return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule)) - } - if x.Len() < v.Rule.(int) { - return createError(x, v, fmt.Sprintf("minimum item limit is %v; got: %v", v.Rule, x.Len())) - } - case UniqueItems: - if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { - if !checkForUniqueInArray(x) { - return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x)) - } - } else if x.Kind() == reflect.Map { - if !checkForUniqueInMap(x) { - return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x)) - } - } else { - return createError(x, v, fmt.Sprintf("type must be array, slice or map for constraint %v; got: %v", v.Name, x.Kind())) - } - case ReadOnly: - if x.Len() != 0 { - return createError(x, v, "readonly parameter; must send as nil or empty in request") - } - case Pattern: - reg, err := regexp.Compile(v.Rule.(string)) - if err != nil { - return createError(x, v, err.Error()) - } - keys := x.MapKeys() - for _, k := range keys { - if !reg.MatchString(k.String()) { - return createError(k, v, fmt.Sprintf("map key doesn't match pattern %v", v.Rule)) - } - } - default: - return createError(x, v, fmt.Sprintf("constraint %v is not applicable to array, slice and map type", v.Name)) - } - - if v.Chain != nil { - return Validate([]Validation{ - { - TargetValue: getInterfaceValue(x), - Constraints: v.Chain, - }, - }) - } - return nil -} - -func checkNil(x reflect.Value, v Constraint) error { - if _, ok := v.Rule.(bool); !ok { - return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule)) - } - if v.Rule.(bool) { - return createError(x, v, "value can not be null; required parameter") - } - return nil -} - -func checkEmpty(x reflect.Value, v Constraint) error { - if _, ok := v.Rule.(bool); !ok { - return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule)) - } - - if v.Rule.(bool) { - return createError(x, v, "value can not be null or empty; required parameter") - } - return nil -} - -func checkForUniqueInArray(x reflect.Value) bool { - if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { - return false - } - arrOfInterface := make([]interface{}, x.Len()) - - for i := 0; i < x.Len(); i++ { - arrOfInterface[i] = x.Index(i).Interface() - } - - m := make(map[interface{}]bool) - for _, val := range arrOfInterface { - if m[val] { - return false - } - m[val] = true - } - return true -} - -func checkForUniqueInMap(x reflect.Value) bool { - if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { - return false - } - mapOfInterface := make(map[interface{}]interface{}, x.Len()) - - keys := x.MapKeys() - for _, k := range keys { - mapOfInterface[k.Interface()] = x.MapIndex(k).Interface() - } - - m := make(map[interface{}]bool) - for _, val := range mapOfInterface { - if m[val] { - return false - } - m[val] = true - } - return true -} - -func getInterfaceValue(x reflect.Value) interface{} { - if x.Kind() == reflect.Invalid { - return nil - } - return x.Interface() -} - -func isZero(x interface{}) bool { - return x == reflect.Zero(reflect.TypeOf(x)).Interface() -} - -func createError(x reflect.Value, v Constraint, err string) error { - return fmt.Errorf("autorest/validation: validation failed: parameter=%s constraint=%s value=%#v details: %s", - v.Target, v.Name, getInterfaceValue(x), err) -} - -func toInt64(v interface{}) (int64, bool) { - if i64, ok := v.(int64); ok { - return i64, true - } - // older generators emit max constants as int, so if int64 fails fall back to int - if i32, ok := v.(int); ok { - return int64(i32), true - } - return 0, false -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go deleted file mode 100644 index 7a71089c9..000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/version.go +++ /dev/null @@ -1,41 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "runtime" -) - -const number = "v13.0.2" - -var ( - userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", - runtime.Version(), - runtime.GOARCH, - runtime.GOOS, - number, - ) -) - -// UserAgent returns a string containing the Go version, system architecture and OS, and the go-autorest version. -func UserAgent() string { - return userAgent -} - -// Version returns the semantic version (see http://semver.org). -func Version() string { - return number -} diff --git a/vendor/github.com/Azure/go-autorest/logger/LICENSE b/vendor/github.com/Azure/go-autorest/logger/LICENSE deleted file mode 100644 index b9d6a27ea..000000000 --- a/vendor/github.com/Azure/go-autorest/logger/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/logger/logger.go b/vendor/github.com/Azure/go-autorest/logger/logger.go deleted file mode 100644 index da09f394c..000000000 --- a/vendor/github.com/Azure/go-autorest/logger/logger.go +++ /dev/null @@ -1,328 +0,0 @@ -package logger - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "strings" - "sync" - "time" -) - -// LevelType tells a logger the minimum level to log. When code reports a log entry, -// the LogLevel indicates the level of the log entry. The logger only records entries -// whose level is at least the level it was told to log. See the Log* constants. -// For example, if a logger is configured with LogError, then LogError, LogPanic, -// and LogFatal entries will be logged; lower level entries are ignored. -type LevelType uint32 - -const ( - // LogNone tells a logger not to log any entries passed to it. - LogNone LevelType = iota - - // LogFatal tells a logger to log all LogFatal entries passed to it. - LogFatal - - // LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it. - LogPanic - - // LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it. - LogError - - // LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it. - LogWarning - - // LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. - LogInfo - - // LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. - LogDebug -) - -const ( - logNone = "NONE" - logFatal = "FATAL" - logPanic = "PANIC" - logError = "ERROR" - logWarning = "WARNING" - logInfo = "INFO" - logDebug = "DEBUG" - logUnknown = "UNKNOWN" -) - -// ParseLevel converts the specified string into the corresponding LevelType. -func ParseLevel(s string) (lt LevelType, err error) { - switch strings.ToUpper(s) { - case logFatal: - lt = LogFatal - case logPanic: - lt = LogPanic - case logError: - lt = LogError - case logWarning: - lt = LogWarning - case logInfo: - lt = LogInfo - case logDebug: - lt = LogDebug - default: - err = fmt.Errorf("bad log level '%s'", s) - } - return -} - -// String implements the stringer interface for LevelType. -func (lt LevelType) String() string { - switch lt { - case LogNone: - return logNone - case LogFatal: - return logFatal - case LogPanic: - return logPanic - case LogError: - return logError - case LogWarning: - return logWarning - case LogInfo: - return logInfo - case LogDebug: - return logDebug - default: - return logUnknown - } -} - -// Filter defines functions for filtering HTTP request/response content. -type Filter struct { - // URL returns a potentially modified string representation of a request URL. - URL func(u *url.URL) string - - // Header returns a potentially modified set of values for the specified key. - // To completely exclude the header key/values return false. - Header func(key string, val []string) (bool, []string) - - // Body returns a potentially modified request/response body. - Body func(b []byte) []byte -} - -func (f Filter) processURL(u *url.URL) string { - if f.URL == nil { - return u.String() - } - return f.URL(u) -} - -func (f Filter) processHeader(k string, val []string) (bool, []string) { - if f.Header == nil { - return true, val - } - return f.Header(k, val) -} - -func (f Filter) processBody(b []byte) []byte { - if f.Body == nil { - return b - } - return f.Body(b) -} - -// Writer defines methods for writing to a logging facility. -type Writer interface { - // Writeln writes the specified message with the standard log entry header and new-line character. - Writeln(level LevelType, message string) - - // Writef writes the specified format specifier with the standard log entry header and no new-line character. - Writef(level LevelType, format string, a ...interface{}) - - // WriteRequest writes the specified HTTP request to the logger if the log level is greater than - // or equal to LogInfo. The request body, if set, is logged at level LogDebug or higher. - // Custom filters can be specified to exclude URL, header, and/or body content from the log. - // By default no request content is excluded. - WriteRequest(req *http.Request, filter Filter) - - // WriteResponse writes the specified HTTP response to the logger if the log level is greater than - // or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher. - // Custom filters can be specified to exclude URL, header, and/or body content from the log. - // By default no response content is excluded. - WriteResponse(resp *http.Response, filter Filter) -} - -// Instance is the default log writer initialized during package init. -// This can be replaced with a custom implementation as required. -var Instance Writer - -// default log level -var logLevel = LogNone - -// Level returns the value specified in AZURE_GO_AUTOREST_LOG_LEVEL. -// If no value was specified the default value is LogNone. -// Custom loggers can call this to retrieve the configured log level. -func Level() LevelType { - return logLevel -} - -func init() { - // separated for testing purposes - initDefaultLogger() -} - -func initDefaultLogger() { - // init with nilLogger so callers don't have to do a nil check on Default - Instance = nilLogger{} - llStr := strings.ToLower(os.Getenv("AZURE_GO_SDK_LOG_LEVEL")) - if llStr == "" { - return - } - var err error - logLevel, err = ParseLevel(llStr) - if err != nil { - fmt.Fprintf(os.Stderr, "go-autorest: failed to parse log level: %s\n", err.Error()) - return - } - if logLevel == LogNone { - return - } - // default to stderr - dest := os.Stderr - lfStr := os.Getenv("AZURE_GO_SDK_LOG_FILE") - if strings.EqualFold(lfStr, "stdout") { - dest = os.Stdout - } else if lfStr != "" { - lf, err := os.Create(lfStr) - if err == nil { - dest = lf - } else { - fmt.Fprintf(os.Stderr, "go-autorest: failed to create log file, using stderr: %s\n", err.Error()) - } - } - Instance = fileLogger{ - logLevel: logLevel, - mu: &sync.Mutex{}, - logFile: dest, - } -} - -// the nil logger does nothing -type nilLogger struct{} - -func (nilLogger) Writeln(LevelType, string) {} - -func (nilLogger) Writef(LevelType, string, ...interface{}) {} - -func (nilLogger) WriteRequest(*http.Request, Filter) {} - -func (nilLogger) WriteResponse(*http.Response, Filter) {} - -// A File is used instead of a Logger so the stream can be flushed after every write. -type fileLogger struct { - logLevel LevelType - mu *sync.Mutex // for synchronizing writes to logFile - logFile *os.File -} - -func (fl fileLogger) Writeln(level LevelType, message string) { - fl.Writef(level, "%s\n", message) -} - -func (fl fileLogger) Writef(level LevelType, format string, a ...interface{}) { - if fl.logLevel >= level { - fl.mu.Lock() - defer fl.mu.Unlock() - fmt.Fprintf(fl.logFile, "%s %s", entryHeader(level), fmt.Sprintf(format, a...)) - fl.logFile.Sync() - } -} - -func (fl fileLogger) WriteRequest(req *http.Request, filter Filter) { - if req == nil || fl.logLevel < LogInfo { - return - } - b := &bytes.Buffer{} - fmt.Fprintf(b, "%s REQUEST: %s %s\n", entryHeader(LogInfo), req.Method, filter.processURL(req.URL)) - // dump headers - for k, v := range req.Header { - if ok, mv := filter.processHeader(k, v); ok { - fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) - } - } - if fl.shouldLogBody(req.Header, req.Body) { - // dump body - body, err := ioutil.ReadAll(req.Body) - if err == nil { - fmt.Fprintln(b, string(filter.processBody(body))) - if nc, ok := req.Body.(io.Seeker); ok { - // rewind to the beginning - nc.Seek(0, io.SeekStart) - } else { - // recreate the body - req.Body = ioutil.NopCloser(bytes.NewReader(body)) - } - } else { - fmt.Fprintf(b, "failed to read body: %v\n", err) - } - } - fl.mu.Lock() - defer fl.mu.Unlock() - fmt.Fprint(fl.logFile, b.String()) - fl.logFile.Sync() -} - -func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) { - if resp == nil || fl.logLevel < LogInfo { - return - } - b := &bytes.Buffer{} - fmt.Fprintf(b, "%s RESPONSE: %d %s\n", entryHeader(LogInfo), resp.StatusCode, filter.processURL(resp.Request.URL)) - // dump headers - for k, v := range resp.Header { - if ok, mv := filter.processHeader(k, v); ok { - fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) - } - } - if fl.shouldLogBody(resp.Header, resp.Body) { - // dump body - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err == nil { - fmt.Fprintln(b, string(filter.processBody(body))) - resp.Body = ioutil.NopCloser(bytes.NewReader(body)) - } else { - fmt.Fprintf(b, "failed to read body: %v\n", err) - } - } - fl.mu.Lock() - defer fl.mu.Unlock() - fmt.Fprint(fl.logFile, b.String()) - fl.logFile.Sync() -} - -// returns true if the provided body should be included in the log -func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool { - ct := header.Get("Content-Type") - return fl.logLevel >= LogDebug && body != nil && !strings.Contains(ct, "application/octet-stream") -} - -// creates standard header for log entries, it contains a timestamp and the log level -func entryHeader(level LevelType) string { - // this format provides a fixed number of digits so the size of the timestamp is constant - return fmt.Sprintf("(%s) %s:", time.Now().Format("2006-01-02T15:04:05.0000000Z07:00"), level.String()) -} diff --git a/vendor/github.com/Azure/go-autorest/tracing/LICENSE b/vendor/github.com/Azure/go-autorest/tracing/LICENSE deleted file mode 100644 index b9d6a27ea..000000000 --- a/vendor/github.com/Azure/go-autorest/tracing/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/tracing/tracing.go b/vendor/github.com/Azure/go-autorest/tracing/tracing.go deleted file mode 100644 index 0e7a6e962..000000000 --- a/vendor/github.com/Azure/go-autorest/tracing/tracing.go +++ /dev/null @@ -1,67 +0,0 @@ -package tracing - -// Copyright 2018 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "context" - "net/http" -) - -// Tracer represents an HTTP tracing facility. -type Tracer interface { - NewTransport(base *http.Transport) http.RoundTripper - StartSpan(ctx context.Context, name string) context.Context - EndSpan(ctx context.Context, httpStatusCode int, err error) -} - -var ( - tracer Tracer -) - -// Register will register the provided Tracer. Pass nil to unregister a Tracer. -func Register(t Tracer) { - tracer = t -} - -// IsEnabled returns true if a Tracer has been registered. -func IsEnabled() bool { - return tracer != nil -} - -// NewTransport creates a new instrumenting http.RoundTripper for the -// registered Tracer. If no Tracer has been registered it returns nil. -func NewTransport(base *http.Transport) http.RoundTripper { - if tracer != nil { - return tracer.NewTransport(base) - } - return nil -} - -// StartSpan starts a trace span with the specified name, associating it with the -// provided context. Has no effect if a Tracer has not been registered. -func StartSpan(ctx context.Context, name string) context.Context { - if tracer != nil { - return tracer.StartSpan(ctx, name) - } - return ctx -} - -// EndSpan ends a previously started span stored in the context. -// Has no effect if a Tracer has not been registered. -func EndSpan(ctx context.Context, httpStatusCode int, err error) { - if tracer != nil { - tracer.EndSpan(ctx, httpStatusCode, err) - } -} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/LICENSE b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/LICENSE new file mode 100644 index 000000000..3d8b93bc7 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache/cache.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache/cache.go new file mode 100644 index 000000000..19210883b --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache/cache.go @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package cache allows third parties to implement external storage for caching token data +for distributed systems or multiple local applications access. + +The data stored and extracted will represent the entire cache. Therefore it is recommended +one msal instance per user. This data is considered opaque and there are no guarantees to +implementers on the format being passed. +*/ +package cache + +import "context" + +// Marshaler marshals data from an internal cache to bytes that can be stored. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// Unmarshaler unmarshals data from a storage medium into the internal cache, overwriting it. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// Serializer can serialize the cache to binary or from binary into the cache. +type Serializer interface { + Marshaler + Unmarshaler +} + +// ExportHints are suggestions for storing data. +type ExportHints struct { + // PartitionKey is a suggested key for partitioning the cache + PartitionKey string +} + +// ReplaceHints are suggestions for loading data. +type ReplaceHints struct { + // PartitionKey is a suggested key for partitioning the cache + PartitionKey string +} + +// ExportReplace exports and replaces in-memory cache data. It doesn't support nil Context or +// define the outcome of passing one. A Context without a timeout must receive a default timeout +// specified by the implementor. Retries must be implemented inside the implementation. +type ExportReplace interface { + // Replace replaces the cache with what is in external storage. Implementors should honor + // Context cancellations and return context.Canceled or context.DeadlineExceeded in those cases. + Replace(ctx context.Context, cache Unmarshaler, hints ReplaceHints) error + // Export writes the binary representation of the cache (cache.Marshal()) to external storage. + // This is considered opaque. Context cancellations should be honored as in Replace. + Export(ctx context.Context, cache Marshaler, hints ExportHints) error +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go new file mode 100644 index 000000000..f86286051 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go @@ -0,0 +1,719 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package confidential provides a client for authentication of "confidential" applications. +A "confidential" application is defined as an app that run on servers. They are considered +difficult to access and for that reason capable of keeping an application secret. +Confidential clients can hold configuration-time secrets. +*/ +package confidential + +import ( + "context" + "crypto" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +/* +Design note: + +confidential.Client uses base.Client as an embedded type. base.Client statically assigns its attributes +during creation. As it doesn't have any pointers in it, anything borrowed from it, such as +Base.AuthParams is a copy that is free to be manipulated here. + +Duplicate Calls shared between public.Client and this package: +There is some duplicate call options provided here that are the same as in public.Client . This +is a design choices. Go proverb(https://www.youtube.com/watch?v=PAAkCSZUG1c&t=9m28s): +"a little copying is better than a little dependency". Yes, we could have another package with +shared options (fail). That divides like 2 options from all others which makes the user look +through more docs. We can have all clients in one package, but I think separate packages +here makes for better naming (public.Client vs client.PublicClient). So I chose a little +duplication. + +.Net People, Take note on X509: +This uses x509.Certificates and private keys. x509 does not store private keys. .Net +has a x509.Certificate2 abstraction that has private keys, but that just a strange invention. +As such I've put a PEM decoder into here. +*/ + +// TODO(msal): This should have example code for each method on client using Go's example doc framework. +// base usage details should be include in the package documentation. + +// AuthResult contains the results of one token acquisition operation. +// For details see https://aka.ms/msal-net-authenticationresult +type AuthResult = base.AuthResult + +type AuthenticationScheme = authority.AuthenticationScheme + +type Account = shared.Account + +// CertFromPEM converts a PEM file (.pem or .key) for use with [NewCredFromCert]. The file +// must contain the public certificate and the private key. If a PEM block is encrypted and +// password is not an empty string, it attempts to decrypt the PEM blocks using the password. +// Multiple certs are due to certificate chaining for use cases like TLS that sign from root to leaf. +func CertFromPEM(pemData []byte, password string) ([]*x509.Certificate, crypto.PrivateKey, error) { + var certs []*x509.Certificate + var priv crypto.PrivateKey + for { + block, rest := pem.Decode(pemData) + if block == nil { + break + } + + //nolint:staticcheck // x509.IsEncryptedPEMBlock and x509.DecryptPEMBlock are deprecated. They are used here only to support a usecase. + if x509.IsEncryptedPEMBlock(block) { + b, err := x509.DecryptPEMBlock(block, []byte(password)) + if err != nil { + return nil, nil, fmt.Errorf("could not decrypt encrypted PEM block: %v", err) + } + block, _ = pem.Decode(b) + if block == nil { + return nil, nil, fmt.Errorf("encounter encrypted PEM block that did not decode") + } + } + + switch block.Type { + case "CERTIFICATE": + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, nil, fmt.Errorf("block labelled 'CERTIFICATE' could not be parsed by x509: %v", err) + } + certs = append(certs, cert) + case "PRIVATE KEY": + if priv != nil { + return nil, nil, errors.New("found multiple private key blocks") + } + + var err error + priv, err = x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, nil, fmt.Errorf("could not decode private key: %v", err) + } + case "RSA PRIVATE KEY": + if priv != nil { + return nil, nil, errors.New("found multiple private key blocks") + } + var err error + priv, err = x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, nil, fmt.Errorf("could not decode private key: %v", err) + } + } + pemData = rest + } + + if len(certs) == 0 { + return nil, nil, fmt.Errorf("no certificates found") + } + + if priv == nil { + return nil, nil, fmt.Errorf("no private key found") + } + + return certs, priv, nil +} + +// AssertionRequestOptions has required information for client assertion claims +type AssertionRequestOptions = exported.AssertionRequestOptions + +// Credential represents the credential used in confidential client flows. +type Credential struct { + secret string + + cert *x509.Certificate + key crypto.PrivateKey + x5c []string + + assertionCallback func(context.Context, AssertionRequestOptions) (string, error) + + tokenProvider func(context.Context, TokenProviderParameters) (TokenProviderResult, error) +} + +// toInternal returns the accesstokens.Credential that is used internally. The current structure of the +// code requires that client.go, requests.go and confidential.go share a credential type without +// having import recursion. That requires the type used between is in a shared package. Therefore +// we have this. +func (c Credential) toInternal() (*accesstokens.Credential, error) { + if c.secret != "" { + return &accesstokens.Credential{Secret: c.secret}, nil + } + if c.cert != nil { + if c.key == nil { + return nil, errors.New("missing private key for certificate") + } + return &accesstokens.Credential{Cert: c.cert, Key: c.key, X5c: c.x5c}, nil + } + if c.key != nil { + return nil, errors.New("missing certificate for private key") + } + if c.assertionCallback != nil { + return &accesstokens.Credential{AssertionCallback: c.assertionCallback}, nil + } + if c.tokenProvider != nil { + return &accesstokens.Credential{TokenProvider: c.tokenProvider}, nil + } + return nil, errors.New("invalid credential") +} + +// NewCredFromSecret creates a Credential from a secret. +func NewCredFromSecret(secret string) (Credential, error) { + if secret == "" { + return Credential{}, errors.New("secret can't be empty string") + } + return Credential{secret: secret}, nil +} + +// NewCredFromAssertionCallback creates a Credential that invokes a callback to get assertions +// authenticating the application. The callback must be thread safe. +func NewCredFromAssertionCallback(callback func(context.Context, AssertionRequestOptions) (string, error)) Credential { + return Credential{assertionCallback: callback} +} + +// NewCredFromCert creates a Credential from a certificate or chain of certificates and an RSA private key +// as returned by [CertFromPEM]. +func NewCredFromCert(certs []*x509.Certificate, key crypto.PrivateKey) (Credential, error) { + cred := Credential{key: key} + k, ok := key.(*rsa.PrivateKey) + if !ok { + return cred, errors.New("key must be an RSA key") + } + for _, cert := range certs { + if cert == nil { + // not returning an error here because certs may still contain a sufficient cert/key pair + continue + } + certKey, ok := cert.PublicKey.(*rsa.PublicKey) + if ok && k.E == certKey.E && k.N.Cmp(certKey.N) == 0 { + // We know this is the signing cert because its public key matches the given private key. + // This cert must be first in x5c. + cred.cert = cert + cred.x5c = append([]string{base64.StdEncoding.EncodeToString(cert.Raw)}, cred.x5c...) + } else { + cred.x5c = append(cred.x5c, base64.StdEncoding.EncodeToString(cert.Raw)) + } + } + if cred.cert == nil { + return cred, errors.New("key doesn't match any certificate") + } + return cred, nil +} + +// TokenProviderParameters is the authentication parameters passed to token providers +type TokenProviderParameters = exported.TokenProviderParameters + +// TokenProviderResult is the authentication result returned by custom token providers +type TokenProviderResult = exported.TokenProviderResult + +// NewCredFromTokenProvider creates a Credential from a function that provides access tokens. The function +// must be concurrency safe. This is intended only to allow the Azure SDK to cache MSI tokens. It isn't +// useful to applications in general because the token provider must implement all authentication logic. +func NewCredFromTokenProvider(provider func(context.Context, TokenProviderParameters) (TokenProviderResult, error)) Credential { + return Credential{tokenProvider: provider} +} + +// AutoDetectRegion instructs MSAL Go to auto detect region for Azure regional token service. +func AutoDetectRegion() string { + return "TryAutoDetect" +} + +// Client is a representation of authentication client for confidential applications as defined in the +// package doc. A new Client should be created PER SERVICE USER. +// For more information, visit https://docs.microsoft.com/azure/active-directory/develop/msal-client-applications +type Client struct { + base base.Client + cred *accesstokens.Credential +} + +// clientOptions are optional settings for New(). These options are set using various functions +// returning Option calls. +type clientOptions struct { + accessor cache.ExportReplace + authority, azureRegion string + capabilities []string + disableInstanceDiscovery, sendX5C bool + httpClient ops.HTTPClient +} + +// Option is an optional argument to New(). +type Option func(o *clientOptions) + +// WithCache provides an accessor that will read and write authentication data to an externally managed cache. +func WithCache(accessor cache.ExportReplace) Option { + return func(o *clientOptions) { + o.accessor = accessor + } +} + +// WithClientCapabilities allows configuring one or more client capabilities such as "CP1" +func WithClientCapabilities(capabilities []string) Option { + return func(o *clientOptions) { + // there's no danger of sharing the slice's underlying memory with the application because + // this slice is simply passed to base.WithClientCapabilities, which copies its data + o.capabilities = capabilities + } +} + +// WithHTTPClient allows for a custom HTTP client to be set. +func WithHTTPClient(httpClient ops.HTTPClient) Option { + return func(o *clientOptions) { + o.httpClient = httpClient + } +} + +// WithX5C specifies if x5c claim(public key of the certificate) should be sent to STS to enable Subject Name Issuer Authentication. +func WithX5C() Option { + return func(o *clientOptions) { + o.sendX5C = true + } +} + +// WithInstanceDiscovery set to false to disable authority validation (to support private cloud scenarios) +func WithInstanceDiscovery(enabled bool) Option { + return func(o *clientOptions) { + o.disableInstanceDiscovery = !enabled + } +} + +// WithAzureRegion sets the region(preferred) or Confidential.AutoDetectRegion() for auto detecting region. +// Region names as per https://azure.microsoft.com/en-ca/global-infrastructure/geographies/. +// See https://aka.ms/region-map for more details on region names. +// The region value should be short region name for the region where the service is deployed. +// For example "centralus" is short name for region Central US. +// Not all auth flows can use the regional token service. +// Service To Service (client credential flow) tokens can be obtained from the regional service. +// Requires configuration at the tenant level. +// Auto-detection works on a limited number of Azure artifacts (VMs, Azure functions). +// If auto-detection fails, the non-regional endpoint will be used. +// If an invalid region name is provided, the non-regional endpoint MIGHT be used or the token request MIGHT fail. +func WithAzureRegion(val string) Option { + return func(o *clientOptions) { + o.azureRegion = val + } +} + +// New is the constructor for Client. authority is the URL of a token authority such as "https://login.microsoftonline.com/". +// If the Client will connect directly to AD FS, use "adfs" for the tenant. clientID is the application's client ID (also called its +// "application ID"). +func New(authority, clientID string, cred Credential, options ...Option) (Client, error) { + internalCred, err := cred.toInternal() + if err != nil { + return Client{}, err + } + + opts := clientOptions{ + authority: authority, + // if the caller specified a token provider, it will handle all details of authentication, using Client only as a token cache + disableInstanceDiscovery: cred.tokenProvider != nil, + httpClient: shared.DefaultClient, + } + for _, o := range options { + o(&opts) + } + baseOpts := []base.Option{ + base.WithCacheAccessor(opts.accessor), + base.WithClientCapabilities(opts.capabilities), + base.WithInstanceDiscovery(!opts.disableInstanceDiscovery), + base.WithRegionDetection(opts.azureRegion), + base.WithX5C(opts.sendX5C), + } + base, err := base.New(clientID, opts.authority, oauth.New(opts.httpClient), baseOpts...) + if err != nil { + return Client{}, err + } + base.AuthParams.IsConfidentialClient = true + + return Client{base: base, cred: internalCred}, nil +} + +// authCodeURLOptions contains options for AuthCodeURL +type authCodeURLOptions struct { + claims, loginHint, tenantID, domainHint string +} + +// AuthCodeURLOption is implemented by options for AuthCodeURL +type AuthCodeURLOption interface { + authCodeURLOption() +} + +// AuthCodeURL creates a URL used to acquire an authorization code. Users need to call CreateAuthorizationCodeURLParameters and pass it in. +// +// Options: [WithClaims], [WithDomainHint], [WithLoginHint], [WithTenantID] +func (cca Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string, opts ...AuthCodeURLOption) (string, error) { + o := authCodeURLOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return "", err + } + ap, err := cca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return "", err + } + ap.Claims = o.claims + ap.LoginHint = o.loginHint + ap.DomainHint = o.domainHint + return cca.base.AuthCodeURL(ctx, clientID, redirectURI, scopes, ap) +} + +// WithLoginHint pre-populates the login prompt with a username. +func WithLoginHint(username string) interface { + AuthCodeURLOption + options.CallOption +} { + return struct { + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *authCodeURLOptions: + t.loginHint = username + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithDomainHint adds the IdP domain as domain_hint query parameter in the auth url. +func WithDomainHint(domain string) interface { + AuthCodeURLOption + options.CallOption +} { + return struct { + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *authCodeURLOptions: + t.domainHint = domain + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithClaims sets additional claims to request for the token, such as those required by conditional access policies. +// Use this option when Azure AD returned a claims challenge for a prior request. The argument must be decoded. +// This option is valid for any token acquisition method. +func WithClaims(claims string) interface { + AcquireByAuthCodeOption + AcquireByCredentialOption + AcquireOnBehalfOfOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + AcquireByCredentialOption + AcquireOnBehalfOfOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenByAuthCodeOptions: + t.claims = claims + case *acquireTokenByCredentialOptions: + t.claims = claims + case *acquireTokenOnBehalfOfOptions: + t.claims = claims + case *acquireTokenSilentOptions: + t.claims = claims + case *authCodeURLOptions: + t.claims = claims + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithAuthenticationScheme is an extensibility mechanism designed to be used only by Azure Arc for proof of possession access tokens. +func WithAuthenticationScheme(authnScheme AuthenticationScheme) interface { + AcquireSilentOption + AcquireByCredentialOption + options.CallOption +} { + return struct { + AcquireSilentOption + AcquireByCredentialOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenSilentOptions: + t.authnScheme = authnScheme + case *acquireTokenByCredentialOptions: + t.authnScheme = authnScheme + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithTenantID specifies a tenant for a single authentication. It may be different than the tenant set in [New]. +// This option is valid for any token acquisition method. +func WithTenantID(tenantID string) interface { + AcquireByAuthCodeOption + AcquireByCredentialOption + AcquireOnBehalfOfOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + AcquireByCredentialOption + AcquireOnBehalfOfOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenByAuthCodeOptions: + t.tenantID = tenantID + case *acquireTokenByCredentialOptions: + t.tenantID = tenantID + case *acquireTokenOnBehalfOfOptions: + t.tenantID = tenantID + case *acquireTokenSilentOptions: + t.tenantID = tenantID + case *authCodeURLOptions: + t.tenantID = tenantID + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// acquireTokenSilentOptions are all the optional settings to an AcquireTokenSilent() call. +// These are set by using various AcquireTokenSilentOption functions. +type acquireTokenSilentOptions struct { + account Account + claims, tenantID string + authnScheme AuthenticationScheme +} + +// AcquireSilentOption is implemented by options for AcquireTokenSilent +type AcquireSilentOption interface { + acquireSilentOption() +} + +// WithSilentAccount uses the passed account during an AcquireTokenSilent() call. +func WithSilentAccount(account Account) interface { + AcquireSilentOption + options.CallOption +} { + return struct { + AcquireSilentOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenSilentOptions: + t.account = account + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// AcquireTokenSilent acquires a token from either the cache or using a refresh token. +// +// Options: [WithClaims], [WithSilentAccount], [WithTenantID] +func (cca Client) AcquireTokenSilent(ctx context.Context, scopes []string, opts ...AcquireSilentOption) (AuthResult, error) { + o := acquireTokenSilentOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + + if o.claims != "" { + return AuthResult{}, errors.New("call another AcquireToken method to request a new token having these claims") + } + + silentParameters := base.AcquireTokenSilentParameters{ + Scopes: scopes, + Account: o.account, + RequestType: accesstokens.ATConfidential, + Credential: cca.cred, + IsAppCache: o.account.IsZero(), + TenantID: o.tenantID, + AuthnScheme: o.authnScheme, + } + + return cca.base.AcquireTokenSilent(ctx, silentParameters) +} + +// acquireTokenByAuthCodeOptions contains the optional parameters used to acquire an access token using the authorization code flow. +type acquireTokenByAuthCodeOptions struct { + challenge, claims, tenantID string +} + +// AcquireByAuthCodeOption is implemented by options for AcquireTokenByAuthCode +type AcquireByAuthCodeOption interface { + acquireByAuthCodeOption() +} + +// WithChallenge allows you to provide a challenge for the .AcquireTokenByAuthCode() call. +func WithChallenge(challenge string) interface { + AcquireByAuthCodeOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenByAuthCodeOptions: + t.challenge = challenge + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// AcquireTokenByAuthCode is a request to acquire a security token from the authority, using an authorization code. +// The specified redirect URI must be the same URI that was used when the authorization code was requested. +// +// Options: [WithChallenge], [WithClaims], [WithTenantID] +func (cca Client) AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, opts ...AcquireByAuthCodeOption) (AuthResult, error) { + o := acquireTokenByAuthCodeOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + + params := base.AcquireTokenAuthCodeParameters{ + Scopes: scopes, + Code: code, + Challenge: o.challenge, + Claims: o.claims, + AppType: accesstokens.ATConfidential, + Credential: cca.cred, // This setting differs from public.Client.AcquireTokenByAuthCode + RedirectURI: redirectURI, + TenantID: o.tenantID, + } + + return cca.base.AcquireTokenByAuthCode(ctx, params) +} + +// acquireTokenByCredentialOptions contains optional configuration for AcquireTokenByCredential +type acquireTokenByCredentialOptions struct { + claims, tenantID string + authnScheme AuthenticationScheme +} + +// AcquireByCredentialOption is implemented by options for AcquireTokenByCredential +type AcquireByCredentialOption interface { + acquireByCredOption() +} + +// AcquireTokenByCredential acquires a security token from the authority, using the client credentials grant. +// +// Options: [WithClaims], [WithTenantID] +func (cca Client) AcquireTokenByCredential(ctx context.Context, scopes []string, opts ...AcquireByCredentialOption) (AuthResult, error) { + o := acquireTokenByCredentialOptions{} + err := options.ApplyOptions(&o, opts) + if err != nil { + return AuthResult{}, err + } + authParams, err := cca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return AuthResult{}, err + } + authParams.Scopes = scopes + authParams.AuthorizationType = authority.ATClientCredentials + authParams.Claims = o.claims + if o.authnScheme != nil { + authParams.AuthnScheme = o.authnScheme + } + token, err := cca.base.Token.Credential(ctx, authParams, cca.cred) + if err != nil { + return AuthResult{}, err + } + return cca.base.AuthResultFromToken(ctx, authParams, token, true) +} + +// acquireTokenOnBehalfOfOptions contains optional configuration for AcquireTokenOnBehalfOf +type acquireTokenOnBehalfOfOptions struct { + claims, tenantID string +} + +// AcquireOnBehalfOfOption is implemented by options for AcquireTokenOnBehalfOf +type AcquireOnBehalfOfOption interface { + acquireOBOOption() +} + +// AcquireTokenOnBehalfOf acquires a security token for an app using middle tier apps access token. +// Refer https://docs.microsoft.com/en-us/azure/active-directory/develop/v2-oauth2-on-behalf-of-flow. +// +// Options: [WithClaims], [WithTenantID] +func (cca Client) AcquireTokenOnBehalfOf(ctx context.Context, userAssertion string, scopes []string, opts ...AcquireOnBehalfOfOption) (AuthResult, error) { + o := acquireTokenOnBehalfOfOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + params := base.AcquireTokenOnBehalfOfParameters{ + Scopes: scopes, + UserAssertion: userAssertion, + Claims: o.claims, + Credential: cca.cred, + TenantID: o.tenantID, + } + return cca.base.AcquireTokenOnBehalfOf(ctx, params) +} + +// Account gets the account in the token cache with the specified homeAccountID. +func (cca Client) Account(ctx context.Context, accountID string) (Account, error) { + return cca.base.Account(ctx, accountID) +} + +// RemoveAccount signs the account out and forgets account from token cache. +func (cca Client) RemoveAccount(ctx context.Context, account Account) error { + return cca.base.RemoveAccount(ctx, account) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/error_design.md b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/error_design.md new file mode 100644 index 000000000..7ef7862fe --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/error_design.md @@ -0,0 +1,111 @@ +# MSAL Error Design + +Author: Abhidnya Patil(abhidnya.patil@microsoft.com) + +Contributors: + +- John Doak(jdoak@microsoft.com) +- Keegan Caruso(Keegan.Caruso@microsoft.com) +- Joel Hendrix(jhendrix@microsoft.com) + +## Background + +Errors in MSAL are intended for app developers to troubleshoot and not for displaying to end-users. + +### Go error handling vs other MSAL languages + +Most modern languages use exception based errors. Simply put, you "throw" an exception and it must be caught at some routine in the upper stack or it will eventually crash the program. + +Go doesn't use exceptions, instead it relies on multiple return values, one of which can be the builtin error interface type. It is up to the user to decide what to do. + +### Go custom error types + +Errors can be created in Go by simply using errors.New() or fmt.Errorf() to create an "error". + +Custom errors can be created in multiple ways. One of the more robust ways is simply to satisfy the error interface: + +```go +type MyCustomErr struct { + Msg string +} +func (m MyCustomErr) Error() string { // This implements "error" + return m.Msg +} +``` + +### MSAL Error Goals + +- Provide diagnostics to the user and for tickets that can be used to track down bugs or client misconfigurations +- Detect errors that are transitory and can be retried +- Allow the user to identify certain errors that the program can respond to, such a informing the user for the need to do an enrollment + +## Implementing Client Side Errors + +Client side errors indicate a misconfiguration or passing of bad arguments that is non-recoverable. Retrying isn't possible. + +These errors can simply be standard Go errors created by errors.New() or fmt.Errorf(). If down the line we need a custom error, we can introduce it, but for now the error messages just need to be clear on what the issue was. + +## Implementing Service Side Errors + +Service side errors occur when an external RPC responds either with an HTTP error code or returns a message that includes an error. + +These errors can be transitory (please slow down) or permanent (HTTP 404). To provide our diagnostic goals, we require the ability to differentiate these errors from other errors. + +The current implementation includes a specialized type that captures any error from the server: + +```go +// CallErr represents an HTTP call error. Has a Verbose() method that allows getting the +// http.Request and Response objects. Implements error. +type CallErr struct { + Req *http.Request + Resp *http.Response + Err error +} + +// Errors implements error.Error(). +func (e CallErr) Error() string { + return e.Err.Error() +} + +// Verbose prints a versbose error message with the request or response. +func (e CallErr) Verbose() string { + e.Resp.Request = nil // This brings in a bunch of TLS stuff we don't need + e.Resp.TLS = nil // Same + return fmt.Sprintf("%s:\nRequest:\n%s\nResponse:\n%s", e.Err, prettyConf.Sprint(e.Req), prettyConf.Sprint(e.Resp)) +} +``` + +A user will always receive the most concise error we provide. They can tell if it is a server side error using Go error package: + +```go +var callErr CallErr +if errors.As(err, &callErr) { + ... +} +``` + +We provide a Verbose() function that can retrieve the most verbose message from any error we provide: + +```go +fmt.Println(errors.Verbose(err)) +``` + +If further differentiation is required, we can add custom errors that use Go error wrapping on top of CallErr to achieve our diagnostic goals (such as detecting when to retry a call due to transient errors). + +CallErr is always thrown from the comm package (which handles all http requests) and looks similar to: + +```go +return nil, errors.CallErr{ + Req: req, + Resp: reply, + Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d:\n%s", req.URL.String(), req.Method, reply.StatusCode, ErrorResponse), //ErrorResponse is the json body extracted from the http response + } +``` + +## Future Decisions + +The ability to retry calls needs to have centralized responsibility. Either the user is doing it or the client is doing it. + +If the user should be responsible, our errors package will include a CanRetry() function that will inform the user if the error provided to them is retryable. This is based on the http error code and possibly the type of error that was returned. It would also include a sleep time if the server returned an amount of time to wait. + +Otherwise we will do this internally and retries will be left to us. diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go new file mode 100644 index 000000000..c9b8dbed0 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go @@ -0,0 +1,89 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package errors + +import ( + "errors" + "fmt" + "io" + "net/http" + "reflect" + "strings" + + "github.com/kylelemons/godebug/pretty" +) + +var prettyConf = &pretty.Config{ + IncludeUnexported: false, + SkipZeroFields: true, + TrackCycles: true, + Formatter: map[reflect.Type]interface{}{ + reflect.TypeOf((*io.Reader)(nil)).Elem(): func(r io.Reader) string { + b, err := io.ReadAll(r) + if err != nil { + return "could not read io.Reader content" + } + return string(b) + }, + }, +} + +type verboser interface { + Verbose() string +} + +// Verbose prints the most verbose error that the error message has. +func Verbose(err error) string { + build := strings.Builder{} + for { + if err == nil { + break + } + if v, ok := err.(verboser); ok { + build.WriteString(v.Verbose()) + } else { + build.WriteString(err.Error()) + } + err = errors.Unwrap(err) + } + return build.String() +} + +// New is equivalent to errors.New(). +func New(text string) error { + return errors.New(text) +} + +// CallErr represents an HTTP call error. Has a Verbose() method that allows getting the +// http.Request and Response objects. Implements error. +type CallErr struct { + Req *http.Request + // Resp contains response body + Resp *http.Response + Err error +} + +// Errors implements error.Error(). +func (e CallErr) Error() string { + return e.Err.Error() +} + +// Verbose prints a versbose error message with the request or response. +func (e CallErr) Verbose() string { + e.Resp.Request = nil // This brings in a bunch of TLS crap we don't need + e.Resp.TLS = nil // Same + return fmt.Sprintf("%s:\nRequest:\n%s\nResponse:\n%s", e.Err, prettyConf.Sprint(e.Req), prettyConf.Sprint(e.Resp)) +} + +// Is reports whether any error in errors chain matches target. +func Is(err, target error) bool { + return errors.Is(err, target) +} + +// As finds the first error in errors chain that matches target, +// and if so, sets target to that error value and returns true. +// Otherwise, it returns false. +func As(err error, target interface{}) bool { + return errors.As(err, target) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go new file mode 100644 index 000000000..09a0d92f5 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go @@ -0,0 +1,477 @@ +// Package base contains a "Base" client that is used by the external public.Client and confidential.Client. +// Base holds shared attributes that must be available to both clients and methods that act as +// shared calls. +package base + +import ( + "context" + "errors" + "fmt" + "net/url" + "reflect" + "strings" + "sync" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +const ( + // AuthorityPublicCloud is the default AAD authority host + AuthorityPublicCloud = "https://login.microsoftonline.com/common" + scopeSeparator = " " +) + +// manager provides an internal cache. It is defined to allow faking the cache in tests. +// In production it's a *storage.Manager or *storage.PartitionedManager. +type manager interface { + cache.Serializer + Read(context.Context, authority.AuthParams) (storage.TokenResponse, error) + Write(authority.AuthParams, accesstokens.TokenResponse) (shared.Account, error) +} + +// accountManager is a manager that also caches accounts. In production it's a *storage.Manager. +type accountManager interface { + manager + AllAccounts() []shared.Account + Account(homeAccountID string) shared.Account + RemoveAccount(account shared.Account, clientID string) +} + +// AcquireTokenSilentParameters contains the parameters to acquire a token silently (from cache). +type AcquireTokenSilentParameters struct { + Scopes []string + Account shared.Account + RequestType accesstokens.AppType + Credential *accesstokens.Credential + IsAppCache bool + TenantID string + UserAssertion string + AuthorizationType authority.AuthorizeType + Claims string + AuthnScheme authority.AuthenticationScheme +} + +// AcquireTokenAuthCodeParameters contains the parameters required to acquire an access token using the auth code flow. +// To use PKCE, set the CodeChallengeParameter. +// Code challenges are used to secure authorization code grants; for more information, visit +// https://tools.ietf.org/html/rfc7636. +type AcquireTokenAuthCodeParameters struct { + Scopes []string + Code string + Challenge string + Claims string + RedirectURI string + AppType accesstokens.AppType + Credential *accesstokens.Credential + TenantID string +} + +type AcquireTokenOnBehalfOfParameters struct { + Scopes []string + Claims string + Credential *accesstokens.Credential + TenantID string + UserAssertion string +} + +// AuthResult contains the results of one token acquisition operation in PublicClientApplication +// or ConfidentialClientApplication. For details see https://aka.ms/msal-net-authenticationresult +type AuthResult struct { + Account shared.Account + IDToken accesstokens.IDToken + AccessToken string + ExpiresOn time.Time + GrantedScopes []string + DeclinedScopes []string +} + +// AuthResultFromStorage creates an AuthResult from a storage token response (which is generated from the cache). +func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResult, error) { + if err := storageTokenResponse.AccessToken.Validate(); err != nil { + return AuthResult{}, fmt.Errorf("problem with access token in StorageTokenResponse: %w", err) + } + + account := storageTokenResponse.Account + accessToken := storageTokenResponse.AccessToken.Secret + grantedScopes := strings.Split(storageTokenResponse.AccessToken.Scopes, scopeSeparator) + + // Checking if there was an ID token in the cache; this will throw an error in the case of confidential client applications. + var idToken accesstokens.IDToken + if !storageTokenResponse.IDToken.IsZero() { + err := idToken.UnmarshalJSON([]byte(storageTokenResponse.IDToken.Secret)) + if err != nil { + return AuthResult{}, fmt.Errorf("problem decoding JWT token: %w", err) + } + } + return AuthResult{account, idToken, accessToken, storageTokenResponse.AccessToken.ExpiresOn.T, grantedScopes, nil}, nil +} + +// NewAuthResult creates an AuthResult. +func NewAuthResult(tokenResponse accesstokens.TokenResponse, account shared.Account) (AuthResult, error) { + if len(tokenResponse.DeclinedScopes) > 0 { + return AuthResult{}, fmt.Errorf("token response failed because declined scopes are present: %s", strings.Join(tokenResponse.DeclinedScopes, ",")) + } + return AuthResult{ + Account: account, + IDToken: tokenResponse.IDToken, + AccessToken: tokenResponse.AccessToken, + ExpiresOn: tokenResponse.ExpiresOn.T, + GrantedScopes: tokenResponse.GrantedScopes.Slice, + }, nil +} + +// Client is a base client that provides access to common methods and primatives that +// can be used by multiple clients. +type Client struct { + Token *oauth.Client + manager accountManager // *storage.Manager or fakeManager in tests + // pmanager is a partitioned cache for OBO authentication. *storage.PartitionedManager or fakeManager in tests + pmanager manager + + AuthParams authority.AuthParams // DO NOT EVER MAKE THIS A POINTER! See "Note" in New(). + cacheAccessor cache.ExportReplace + cacheAccessorMu *sync.RWMutex +} + +// Option is an optional argument to the New constructor. +type Option func(c *Client) error + +// WithCacheAccessor allows you to set some type of cache for storing authentication tokens. +func WithCacheAccessor(ca cache.ExportReplace) Option { + return func(c *Client) error { + if ca != nil { + c.cacheAccessor = ca + } + return nil + } +} + +// WithClientCapabilities allows configuring one or more client capabilities such as "CP1" +func WithClientCapabilities(capabilities []string) Option { + return func(c *Client) error { + var err error + if len(capabilities) > 0 { + cc, err := authority.NewClientCapabilities(capabilities) + if err == nil { + c.AuthParams.Capabilities = cc + } + } + return err + } +} + +// WithKnownAuthorityHosts specifies hosts Client shouldn't validate or request metadata for because they're known to the user +func WithKnownAuthorityHosts(hosts []string) Option { + return func(c *Client) error { + cp := make([]string, len(hosts)) + copy(cp, hosts) + c.AuthParams.KnownAuthorityHosts = cp + return nil + } +} + +// WithX5C specifies if x5c claim(public key of the certificate) should be sent to STS to enable Subject Name Issuer Authentication. +func WithX5C(sendX5C bool) Option { + return func(c *Client) error { + c.AuthParams.SendX5C = sendX5C + return nil + } +} + +func WithRegionDetection(region string) Option { + return func(c *Client) error { + c.AuthParams.AuthorityInfo.Region = region + return nil + } +} + +func WithInstanceDiscovery(instanceDiscoveryEnabled bool) Option { + return func(c *Client) error { + c.AuthParams.AuthorityInfo.ValidateAuthority = instanceDiscoveryEnabled + c.AuthParams.AuthorityInfo.InstanceDiscoveryDisabled = !instanceDiscoveryEnabled + return nil + } +} + +// New is the constructor for Base. +func New(clientID string, authorityURI string, token *oauth.Client, options ...Option) (Client, error) { + //By default, validateAuthority is set to true and instanceDiscoveryDisabled is set to false + authInfo, err := authority.NewInfoFromAuthorityURI(authorityURI, true, false) + if err != nil { + return Client{}, err + } + authParams := authority.NewAuthParams(clientID, authInfo) + client := Client{ // Note: Hey, don't even THINK about making Base into *Base. See "design notes" in public.go and confidential.go + Token: token, + AuthParams: authParams, + cacheAccessorMu: &sync.RWMutex{}, + manager: storage.New(token), + pmanager: storage.NewPartitionedManager(token), + } + for _, o := range options { + if err = o(&client); err != nil { + break + } + } + return client, err + +} + +// AuthCodeURL creates a URL used to acquire an authorization code. +func (b Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string, authParams authority.AuthParams) (string, error) { + endpoints, err := b.Token.ResolveEndpoints(ctx, authParams.AuthorityInfo, "") + if err != nil { + return "", err + } + + baseURL, err := url.Parse(endpoints.AuthorizationEndpoint) + if err != nil { + return "", err + } + + claims, err := authParams.MergeCapabilitiesAndClaims() + if err != nil { + return "", err + } + + v := url.Values{} + v.Add("client_id", clientID) + v.Add("response_type", "code") + v.Add("redirect_uri", redirectURI) + v.Add("scope", strings.Join(scopes, scopeSeparator)) + if authParams.State != "" { + v.Add("state", authParams.State) + } + if claims != "" { + v.Add("claims", claims) + } + if authParams.CodeChallenge != "" { + v.Add("code_challenge", authParams.CodeChallenge) + } + if authParams.CodeChallengeMethod != "" { + v.Add("code_challenge_method", authParams.CodeChallengeMethod) + } + if authParams.LoginHint != "" { + v.Add("login_hint", authParams.LoginHint) + } + if authParams.Prompt != "" { + v.Add("prompt", authParams.Prompt) + } + if authParams.DomainHint != "" { + v.Add("domain_hint", authParams.DomainHint) + } + // There were left over from an implementation that didn't use any of these. We may + // need to add them later, but as of now aren't needed. + /* + if p.ResponseMode != "" { + urlParams.Add("response_mode", p.ResponseMode) + } + */ + baseURL.RawQuery = v.Encode() + return baseURL.String(), nil +} + +func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilentParameters) (AuthResult, error) { + ar := AuthResult{} + // when tenant == "", the caller didn't specify a tenant and WithTenant will choose the client's configured tenant + tenant := silent.TenantID + authParams, err := b.AuthParams.WithTenant(tenant) + if err != nil { + return ar, err + } + authParams.Scopes = silent.Scopes + authParams.HomeAccountID = silent.Account.HomeAccountID + authParams.AuthorizationType = silent.AuthorizationType + authParams.Claims = silent.Claims + authParams.UserAssertion = silent.UserAssertion + if silent.AuthnScheme != nil { + authParams.AuthnScheme = silent.AuthnScheme + } + + m := b.pmanager + if authParams.AuthorizationType != authority.ATOnBehalfOf { + authParams.AuthorizationType = authority.ATRefreshToken + m = b.manager + } + if b.cacheAccessor != nil { + key := authParams.CacheKey(silent.IsAppCache) + b.cacheAccessorMu.RLock() + err = b.cacheAccessor.Replace(ctx, m, cache.ReplaceHints{PartitionKey: key}) + b.cacheAccessorMu.RUnlock() + } + if err != nil { + return ar, err + } + storageTokenResponse, err := m.Read(ctx, authParams) + if err != nil { + return ar, err + } + + // ignore cached access tokens when given claims + if silent.Claims == "" { + ar, err = AuthResultFromStorage(storageTokenResponse) + if err == nil { + ar.AccessToken, err = authParams.AuthnScheme.FormatAccessToken(ar.AccessToken) + return ar, err + } + } + + // redeem a cached refresh token, if available + if reflect.ValueOf(storageTokenResponse.RefreshToken).IsZero() { + return ar, errors.New("no token found") + } + var cc *accesstokens.Credential + if silent.RequestType == accesstokens.ATConfidential { + cc = silent.Credential + } + token, err := b.Token.Refresh(ctx, silent.RequestType, authParams, cc, storageTokenResponse.RefreshToken) + if err != nil { + return ar, err + } + return b.AuthResultFromToken(ctx, authParams, token, true) +} + +func (b Client) AcquireTokenByAuthCode(ctx context.Context, authCodeParams AcquireTokenAuthCodeParameters) (AuthResult, error) { + authParams, err := b.AuthParams.WithTenant(authCodeParams.TenantID) + if err != nil { + return AuthResult{}, err + } + authParams.Claims = authCodeParams.Claims + authParams.Scopes = authCodeParams.Scopes + authParams.Redirecturi = authCodeParams.RedirectURI + authParams.AuthorizationType = authority.ATAuthCode + + var cc *accesstokens.Credential + if authCodeParams.AppType == accesstokens.ATConfidential { + cc = authCodeParams.Credential + authParams.IsConfidentialClient = true + } + + req, err := accesstokens.NewCodeChallengeRequest(authParams, authCodeParams.AppType, cc, authCodeParams.Code, authCodeParams.Challenge) + if err != nil { + return AuthResult{}, err + } + + token, err := b.Token.AuthCode(ctx, req) + if err != nil { + return AuthResult{}, err + } + + return b.AuthResultFromToken(ctx, authParams, token, true) +} + +// AcquireTokenOnBehalfOf acquires a security token for an app using middle tier apps access token. +func (b Client) AcquireTokenOnBehalfOf(ctx context.Context, onBehalfOfParams AcquireTokenOnBehalfOfParameters) (AuthResult, error) { + var ar AuthResult + silentParameters := AcquireTokenSilentParameters{ + Scopes: onBehalfOfParams.Scopes, + RequestType: accesstokens.ATConfidential, + Credential: onBehalfOfParams.Credential, + UserAssertion: onBehalfOfParams.UserAssertion, + AuthorizationType: authority.ATOnBehalfOf, + TenantID: onBehalfOfParams.TenantID, + Claims: onBehalfOfParams.Claims, + } + ar, err := b.AcquireTokenSilent(ctx, silentParameters) + if err == nil { + return ar, err + } + authParams, err := b.AuthParams.WithTenant(onBehalfOfParams.TenantID) + if err != nil { + return AuthResult{}, err + } + authParams.AuthorizationType = authority.ATOnBehalfOf + authParams.Claims = onBehalfOfParams.Claims + authParams.Scopes = onBehalfOfParams.Scopes + authParams.UserAssertion = onBehalfOfParams.UserAssertion + token, err := b.Token.OnBehalfOf(ctx, authParams, onBehalfOfParams.Credential) + if err == nil { + ar, err = b.AuthResultFromToken(ctx, authParams, token, true) + } + return ar, err +} + +func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.AuthParams, token accesstokens.TokenResponse, cacheWrite bool) (AuthResult, error) { + if !cacheWrite { + return NewAuthResult(token, shared.Account{}) + } + var m manager = b.manager + if authParams.AuthorizationType == authority.ATOnBehalfOf { + m = b.pmanager + } + key := token.CacheKey(authParams) + if b.cacheAccessor != nil { + b.cacheAccessorMu.Lock() + defer b.cacheAccessorMu.Unlock() + err := b.cacheAccessor.Replace(ctx, m, cache.ReplaceHints{PartitionKey: key}) + if err != nil { + return AuthResult{}, err + } + } + account, err := m.Write(authParams, token) + if err != nil { + return AuthResult{}, err + } + ar, err := NewAuthResult(token, account) + if err == nil && b.cacheAccessor != nil { + err = b.cacheAccessor.Export(ctx, b.manager, cache.ExportHints{PartitionKey: key}) + } + if err != nil { + return AuthResult{}, err + } + + ar.AccessToken, err = authParams.AuthnScheme.FormatAccessToken(ar.AccessToken) + return ar, err +} + +func (b Client) AllAccounts(ctx context.Context) ([]shared.Account, error) { + if b.cacheAccessor != nil { + b.cacheAccessorMu.RLock() + defer b.cacheAccessorMu.RUnlock() + key := b.AuthParams.CacheKey(false) + err := b.cacheAccessor.Replace(ctx, b.manager, cache.ReplaceHints{PartitionKey: key}) + if err != nil { + return nil, err + } + } + return b.manager.AllAccounts(), nil +} + +func (b Client) Account(ctx context.Context, homeAccountID string) (shared.Account, error) { + if b.cacheAccessor != nil { + b.cacheAccessorMu.RLock() + defer b.cacheAccessorMu.RUnlock() + authParams := b.AuthParams // This is a copy, as we don't have a pointer receiver and .AuthParams is not a pointer. + authParams.AuthorizationType = authority.AccountByID + authParams.HomeAccountID = homeAccountID + key := b.AuthParams.CacheKey(false) + err := b.cacheAccessor.Replace(ctx, b.manager, cache.ReplaceHints{PartitionKey: key}) + if err != nil { + return shared.Account{}, err + } + } + return b.manager.Account(homeAccountID), nil +} + +// RemoveAccount removes all the ATs, RTs and IDTs from the cache associated with this account. +func (b Client) RemoveAccount(ctx context.Context, account shared.Account) error { + if b.cacheAccessor == nil { + b.manager.RemoveAccount(account, b.AuthParams.ClientID) + return nil + } + b.cacheAccessorMu.Lock() + defer b.cacheAccessorMu.Unlock() + key := b.AuthParams.CacheKey(false) + err := b.cacheAccessor.Replace(ctx, b.manager, cache.ReplaceHints{PartitionKey: key}) + if err != nil { + return err + } + b.manager.RemoveAccount(account, b.AuthParams.ClientID) + return b.cacheAccessor.Export(ctx, b.manager, cache.ExportHints{PartitionKey: key}) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go new file mode 100644 index 000000000..f9be90276 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go @@ -0,0 +1,213 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package storage + +import ( + "errors" + "fmt" + "reflect" + "strings" + "time" + + internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +// Contract is the JSON structure that is written to any storage medium when serializing +// the internal cache. This design is shared between MSAL versions in many languages. +// This cannot be changed without design that includes other SDKs. +type Contract struct { + AccessTokens map[string]AccessToken `json:"AccessToken,omitempty"` + RefreshTokens map[string]accesstokens.RefreshToken `json:"RefreshToken,omitempty"` + IDTokens map[string]IDToken `json:"IdToken,omitempty"` + Accounts map[string]shared.Account `json:"Account,omitempty"` + AppMetaData map[string]AppMetaData `json:"AppMetadata,omitempty"` + + AdditionalFields map[string]interface{} +} + +// Contract is the JSON structure that is written to any storage medium when serializing +// the internal cache. This design is shared between MSAL versions in many languages. +// This cannot be changed without design that includes other SDKs. +type InMemoryContract struct { + AccessTokensPartition map[string]map[string]AccessToken + RefreshTokensPartition map[string]map[string]accesstokens.RefreshToken + IDTokensPartition map[string]map[string]IDToken + AccountsPartition map[string]map[string]shared.Account + AppMetaData map[string]AppMetaData +} + +// NewContract is the constructor for Contract. +func NewInMemoryContract() *InMemoryContract { + return &InMemoryContract{ + AccessTokensPartition: map[string]map[string]AccessToken{}, + RefreshTokensPartition: map[string]map[string]accesstokens.RefreshToken{}, + IDTokensPartition: map[string]map[string]IDToken{}, + AccountsPartition: map[string]map[string]shared.Account{}, + AppMetaData: map[string]AppMetaData{}, + } +} + +// NewContract is the constructor for Contract. +func NewContract() *Contract { + return &Contract{ + AccessTokens: map[string]AccessToken{}, + RefreshTokens: map[string]accesstokens.RefreshToken{}, + IDTokens: map[string]IDToken{}, + Accounts: map[string]shared.Account{}, + AppMetaData: map[string]AppMetaData{}, + AdditionalFields: map[string]interface{}{}, + } +} + +// AccessToken is the JSON representation of a MSAL access token for encoding to storage. +type AccessToken struct { + HomeAccountID string `json:"home_account_id,omitempty"` + Environment string `json:"environment,omitempty"` + Realm string `json:"realm,omitempty"` + CredentialType string `json:"credential_type,omitempty"` + ClientID string `json:"client_id,omitempty"` + Secret string `json:"secret,omitempty"` + Scopes string `json:"target,omitempty"` + ExpiresOn internalTime.Unix `json:"expires_on,omitempty"` + ExtendedExpiresOn internalTime.Unix `json:"extended_expires_on,omitempty"` + CachedAt internalTime.Unix `json:"cached_at,omitempty"` + UserAssertionHash string `json:"user_assertion_hash,omitempty"` + TokenType string `json:"token_type,omitempty"` + AuthnSchemeKeyID string `json:"keyid,omitempty"` + + AdditionalFields map[string]interface{} +} + +// NewAccessToken is the constructor for AccessToken. +func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, extendedExpiresOn time.Time, scopes, token, tokenType, authnSchemeKeyID string) AccessToken { + return AccessToken{ + HomeAccountID: homeID, + Environment: env, + Realm: realm, + CredentialType: "AccessToken", + ClientID: clientID, + Secret: token, + Scopes: scopes, + CachedAt: internalTime.Unix{T: cachedAt.UTC()}, + ExpiresOn: internalTime.Unix{T: expiresOn.UTC()}, + ExtendedExpiresOn: internalTime.Unix{T: extendedExpiresOn.UTC()}, + TokenType: tokenType, + AuthnSchemeKeyID: authnSchemeKeyID, + } +} + +// Key outputs the key that can be used to uniquely look up this entry in a map. +func (a AccessToken) Key() string { + key := strings.Join( + []string{a.HomeAccountID, a.Environment, a.CredentialType, a.ClientID, a.Realm, a.Scopes}, + shared.CacheKeySeparator, + ) + // add token type to key for new access tokens types. skip for bearer token type to + // preserve fwd and back compat between a common cache and msal clients + if !strings.EqualFold(a.TokenType, authority.AccessTokenTypeBearer) { + key = strings.Join([]string{key, a.TokenType}, shared.CacheKeySeparator) + } + return strings.ToLower(key) +} + +// FakeValidate enables tests to fake access token validation +var FakeValidate func(AccessToken) error + +// Validate validates that this AccessToken can be used. +func (a AccessToken) Validate() error { + if FakeValidate != nil { + return FakeValidate(a) + } + if a.CachedAt.T.After(time.Now()) { + return errors.New("access token isn't valid, it was cached at a future time") + } + if a.ExpiresOn.T.Before(time.Now().Add(5 * time.Minute)) { + return fmt.Errorf("access token is expired") + } + if a.CachedAt.T.IsZero() { + return fmt.Errorf("access token does not have CachedAt set") + } + return nil +} + +// IDToken is the JSON representation of an MSAL id token for encoding to storage. +type IDToken struct { + HomeAccountID string `json:"home_account_id,omitempty"` + Environment string `json:"environment,omitempty"` + Realm string `json:"realm,omitempty"` + CredentialType string `json:"credential_type,omitempty"` + ClientID string `json:"client_id,omitempty"` + Secret string `json:"secret,omitempty"` + UserAssertionHash string `json:"user_assertion_hash,omitempty"` + AdditionalFields map[string]interface{} +} + +// IsZero determines if IDToken is the zero value. +func (i IDToken) IsZero() bool { + v := reflect.ValueOf(i) + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + if !field.IsZero() { + switch field.Kind() { + case reflect.Map, reflect.Slice: + if field.Len() == 0 { + continue + } + } + return false + } + } + return true +} + +// NewIDToken is the constructor for IDToken. +func NewIDToken(homeID, env, realm, clientID, idToken string) IDToken { + return IDToken{ + HomeAccountID: homeID, + Environment: env, + Realm: realm, + CredentialType: "IDToken", + ClientID: clientID, + Secret: idToken, + } +} + +// Key outputs the key that can be used to uniquely look up this entry in a map. +func (id IDToken) Key() string { + key := strings.Join( + []string{id.HomeAccountID, id.Environment, id.CredentialType, id.ClientID, id.Realm}, + shared.CacheKeySeparator, + ) + return strings.ToLower(key) +} + +// AppMetaData is the JSON representation of application metadata for encoding to storage. +type AppMetaData struct { + FamilyID string `json:"family_id,omitempty"` + ClientID string `json:"client_id,omitempty"` + Environment string `json:"environment,omitempty"` + + AdditionalFields map[string]interface{} +} + +// NewAppMetaData is the constructor for AppMetaData. +func NewAppMetaData(familyID, clientID, environment string) AppMetaData { + return AppMetaData{ + FamilyID: familyID, + ClientID: clientID, + Environment: environment, + } +} + +// Key outputs the key that can be used to uniquely look up this entry in a map. +func (a AppMetaData) Key() string { + key := strings.Join( + []string{"AppMetaData", a.Environment, a.ClientID}, + shared.CacheKeySeparator, + ) + return strings.ToLower(key) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go new file mode 100644 index 000000000..c09318330 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go @@ -0,0 +1,442 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package storage + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +// PartitionedManager is a partitioned in-memory cache of access tokens, accounts and meta data. +type PartitionedManager struct { + contract *InMemoryContract + contractMu sync.RWMutex + requests aadInstanceDiscoveryer // *oauth.Token + + aadCacheMu sync.RWMutex + aadCache map[string]authority.InstanceDiscoveryMetadata +} + +// NewPartitionedManager is the constructor for PartitionedManager. +func NewPartitionedManager(requests *oauth.Client) *PartitionedManager { + m := &PartitionedManager{requests: requests, aadCache: make(map[string]authority.InstanceDiscoveryMetadata)} + m.contract = NewInMemoryContract() + return m +} + +// Read reads a storage token from the cache if it exists. +func (m *PartitionedManager) Read(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) { + tr := TokenResponse{} + realm := authParameters.AuthorityInfo.Tenant + clientID := authParameters.ClientID + scopes := authParameters.Scopes + authnSchemeKeyID := authParameters.AuthnScheme.KeyID() + tokenType := authParameters.AuthnScheme.AccessTokenType() + + // fetch metadata if instanceDiscovery is enabled + aliases := []string{authParameters.AuthorityInfo.Host} + if !authParameters.AuthorityInfo.InstanceDiscoveryDisabled { + metadata, err := m.getMetadataEntry(ctx, authParameters.AuthorityInfo) + if err != nil { + return TokenResponse{}, err + } + aliases = metadata.Aliases + } + + userAssertionHash := authParameters.AssertionHash() + partitionKeyFromRequest := userAssertionHash + + // errors returned by read* methods indicate a cache miss and are therefore non-fatal. We continue populating + // TokenResponse fields so that e.g. lack of an ID token doesn't prevent the caller from receiving a refresh token. + accessToken, err := m.readAccessToken(aliases, realm, clientID, userAssertionHash, scopes, partitionKeyFromRequest, tokenType, authnSchemeKeyID) + if err == nil { + tr.AccessToken = accessToken + } + idToken, err := m.readIDToken(aliases, realm, clientID, userAssertionHash, getPartitionKeyIDTokenRead(accessToken)) + if err == nil { + tr.IDToken = idToken + } + + if appMetadata, err := m.readAppMetaData(aliases, clientID); err == nil { + // we need the family ID to identify the correct refresh token, if any + familyID := appMetadata.FamilyID + refreshToken, err := m.readRefreshToken(aliases, familyID, clientID, userAssertionHash, partitionKeyFromRequest) + if err == nil { + tr.RefreshToken = refreshToken + } + } + + account, err := m.readAccount(aliases, realm, userAssertionHash, idToken.HomeAccountID) + if err == nil { + tr.Account = account + } + return tr, nil +} + +// Write writes a token response to the cache and returns the account information the token is stored with. +func (m *PartitionedManager) Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) { + authParameters.HomeAccountID = tokenResponse.HomeAccountID() + homeAccountID := authParameters.HomeAccountID + environment := authParameters.AuthorityInfo.Host + realm := authParameters.AuthorityInfo.Tenant + clientID := authParameters.ClientID + target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator) + userAssertionHash := authParameters.AssertionHash() + cachedAt := time.Now() + authnSchemeKeyID := authParameters.AuthnScheme.KeyID() + var account shared.Account + + if len(tokenResponse.RefreshToken) > 0 { + refreshToken := accesstokens.NewRefreshToken(homeAccountID, environment, clientID, tokenResponse.RefreshToken, tokenResponse.FamilyID) + if authParameters.AuthorizationType == authority.ATOnBehalfOf { + refreshToken.UserAssertionHash = userAssertionHash + } + if err := m.writeRefreshToken(refreshToken, getPartitionKeyRefreshToken(refreshToken)); err != nil { + return account, err + } + } + + if len(tokenResponse.AccessToken) > 0 { + accessToken := NewAccessToken( + homeAccountID, + environment, + realm, + clientID, + cachedAt, + tokenResponse.ExpiresOn.T, + tokenResponse.ExtExpiresOn.T, + target, + tokenResponse.AccessToken, + tokenResponse.TokenType, + authnSchemeKeyID, + ) + if authParameters.AuthorizationType == authority.ATOnBehalfOf { + accessToken.UserAssertionHash = userAssertionHash // get Hash method on this + } + + // Since we have a valid access token, cache it before moving on. + if err := accessToken.Validate(); err == nil { + if err := m.writeAccessToken(accessToken, getPartitionKeyAccessToken(accessToken)); err != nil { + return account, err + } + } else { + return shared.Account{}, err + } + } + + idTokenJwt := tokenResponse.IDToken + if !idTokenJwt.IsZero() { + idToken := NewIDToken(homeAccountID, environment, realm, clientID, idTokenJwt.RawToken) + if authParameters.AuthorizationType == authority.ATOnBehalfOf { + idToken.UserAssertionHash = userAssertionHash + } + if err := m.writeIDToken(idToken, getPartitionKeyIDToken(idToken)); err != nil { + return shared.Account{}, err + } + + localAccountID := idTokenJwt.LocalAccountID() + authorityType := authParameters.AuthorityInfo.AuthorityType + + preferredUsername := idTokenJwt.UPN + if idTokenJwt.PreferredUsername != "" { + preferredUsername = idTokenJwt.PreferredUsername + } + + account = shared.NewAccount( + homeAccountID, + environment, + realm, + localAccountID, + authorityType, + preferredUsername, + ) + if authParameters.AuthorizationType == authority.ATOnBehalfOf { + account.UserAssertionHash = userAssertionHash + } + if err := m.writeAccount(account, getPartitionKeyAccount(account)); err != nil { + return shared.Account{}, err + } + } + + AppMetaData := NewAppMetaData(tokenResponse.FamilyID, clientID, environment) + + if err := m.writeAppMetaData(AppMetaData); err != nil { + return shared.Account{}, err + } + return account, nil +} + +func (m *PartitionedManager) getMetadataEntry(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + md, err := m.aadMetadataFromCache(ctx, authorityInfo) + if err != nil { + // not in the cache, retrieve it + md, err = m.aadMetadata(ctx, authorityInfo) + } + return md, err +} + +func (m *PartitionedManager) aadMetadataFromCache(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + m.aadCacheMu.RLock() + defer m.aadCacheMu.RUnlock() + metadata, ok := m.aadCache[authorityInfo.Host] + if ok { + return metadata, nil + } + return metadata, errors.New("not found") +} + +func (m *PartitionedManager) aadMetadata(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + discoveryResponse, err := m.requests.AADInstanceDiscovery(ctx, authorityInfo) + if err != nil { + return authority.InstanceDiscoveryMetadata{}, err + } + + m.aadCacheMu.Lock() + defer m.aadCacheMu.Unlock() + + for _, metadataEntry := range discoveryResponse.Metadata { + for _, aliasedAuthority := range metadataEntry.Aliases { + m.aadCache[aliasedAuthority] = metadataEntry + } + } + if _, ok := m.aadCache[authorityInfo.Host]; !ok { + m.aadCache[authorityInfo.Host] = authority.InstanceDiscoveryMetadata{ + PreferredNetwork: authorityInfo.Host, + PreferredCache: authorityInfo.Host, + } + } + return m.aadCache[authorityInfo.Host], nil +} + +func (m *PartitionedManager) readAccessToken(envAliases []string, realm, clientID, userAssertionHash string, scopes []string, partitionKey, tokenType, authnSchemeKeyID string) (AccessToken, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + if accessTokens, ok := m.contract.AccessTokensPartition[partitionKey]; ok { + // TODO: linear search (over a map no less) is slow for a large number (thousands) of tokens. + // this shows up as the dominating node in a profile. for real-world scenarios this likely isn't + // an issue, however if it does become a problem then we know where to look. + for _, at := range accessTokens { + if at.Realm == realm && at.ClientID == clientID && at.UserAssertionHash == userAssertionHash { + if at.TokenType == tokenType && at.AuthnSchemeKeyID == authnSchemeKeyID { + if checkAlias(at.Environment, envAliases) { + if isMatchingScopes(scopes, at.Scopes) { + return at, nil + } + } + } + } + } + } + return AccessToken{}, fmt.Errorf("access token not found") +} + +func (m *PartitionedManager) writeAccessToken(accessToken AccessToken, partitionKey string) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + key := accessToken.Key() + if m.contract.AccessTokensPartition[partitionKey] == nil { + m.contract.AccessTokensPartition[partitionKey] = make(map[string]AccessToken) + } + m.contract.AccessTokensPartition[partitionKey][key] = accessToken + return nil +} + +func matchFamilyRefreshTokenObo(rt accesstokens.RefreshToken, userAssertionHash string, envAliases []string) bool { + return rt.UserAssertionHash == userAssertionHash && checkAlias(rt.Environment, envAliases) && rt.FamilyID != "" +} + +func matchClientIDRefreshTokenObo(rt accesstokens.RefreshToken, userAssertionHash string, envAliases []string, clientID string) bool { + return rt.UserAssertionHash == userAssertionHash && checkAlias(rt.Environment, envAliases) && rt.ClientID == clientID +} + +func (m *PartitionedManager) readRefreshToken(envAliases []string, familyID, clientID, userAssertionHash, partitionKey string) (accesstokens.RefreshToken, error) { + byFamily := func(rt accesstokens.RefreshToken) bool { + return matchFamilyRefreshTokenObo(rt, userAssertionHash, envAliases) + } + byClient := func(rt accesstokens.RefreshToken) bool { + return matchClientIDRefreshTokenObo(rt, userAssertionHash, envAliases, clientID) + } + + var matchers []func(rt accesstokens.RefreshToken) bool + if familyID == "" { + matchers = []func(rt accesstokens.RefreshToken) bool{ + byClient, byFamily, + } + } else { + matchers = []func(rt accesstokens.RefreshToken) bool{ + byFamily, byClient, + } + } + + // TODO(keegan): All the tests here pass, but Bogdan says this is + // more complicated. I'm opening an issue for this to have him + // review the tests and suggest tests that would break this so + // we can re-write against good tests. His comments as follow: + // The algorithm is a bit more complex than this, I assume there are some tests covering everything. I would keep the order as is. + // The algorithm is: + // If application is NOT part of the family, search by client_ID + // If app is part of the family or if we DO NOT KNOW if it's part of the family, search by family ID, then by client_id (we will know if an app is part of the family after the first token response). + // https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/blob/311fe8b16e7c293462806f397e189a6aa1159769/src/client/Microsoft.Identity.Client/Internal/Requests/Silent/CacheSilentStrategy.cs#L95 + m.contractMu.RLock() + defer m.contractMu.RUnlock() + for _, matcher := range matchers { + for _, rt := range m.contract.RefreshTokensPartition[partitionKey] { + if matcher(rt) { + return rt, nil + } + } + } + + return accesstokens.RefreshToken{}, fmt.Errorf("refresh token not found") +} + +func (m *PartitionedManager) writeRefreshToken(refreshToken accesstokens.RefreshToken, partitionKey string) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + key := refreshToken.Key() + if m.contract.AccessTokensPartition[partitionKey] == nil { + m.contract.RefreshTokensPartition[partitionKey] = make(map[string]accesstokens.RefreshToken) + } + m.contract.RefreshTokensPartition[partitionKey][key] = refreshToken + return nil +} + +func (m *PartitionedManager) readIDToken(envAliases []string, realm, clientID, userAssertionHash, partitionKey string) (IDToken, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + for _, idt := range m.contract.IDTokensPartition[partitionKey] { + if idt.Realm == realm && idt.ClientID == clientID && idt.UserAssertionHash == userAssertionHash { + if checkAlias(idt.Environment, envAliases) { + return idt, nil + } + } + } + return IDToken{}, fmt.Errorf("token not found") +} + +func (m *PartitionedManager) writeIDToken(idToken IDToken, partitionKey string) error { + key := idToken.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + if m.contract.IDTokensPartition[partitionKey] == nil { + m.contract.IDTokensPartition[partitionKey] = make(map[string]IDToken) + } + m.contract.IDTokensPartition[partitionKey][key] = idToken + return nil +} + +func (m *PartitionedManager) readAccount(envAliases []string, realm, UserAssertionHash, partitionKey string) (shared.Account, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + // You might ask why, if cache.Accounts is a map, we would loop through all of these instead of using a key. + // We only use a map because the storage contract shared between all language implementations says use a map. + // We can't change that. The other is because the keys are made using a specific "env", but here we are allowing + // a match in multiple envs (envAlias). That means we either need to hash each possible keyand do the lookup + // or just statically check. Since the design is to have a storage.Manager per user, the amount of keys stored + // is really low (say 2). Each hash is more expensive than the entire iteration. + for _, acc := range m.contract.AccountsPartition[partitionKey] { + if checkAlias(acc.Environment, envAliases) && acc.UserAssertionHash == UserAssertionHash && acc.Realm == realm { + return acc, nil + } + } + return shared.Account{}, fmt.Errorf("account not found") +} + +func (m *PartitionedManager) writeAccount(account shared.Account, partitionKey string) error { + key := account.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + if m.contract.AccountsPartition[partitionKey] == nil { + m.contract.AccountsPartition[partitionKey] = make(map[string]shared.Account) + } + m.contract.AccountsPartition[partitionKey][key] = account + return nil +} + +func (m *PartitionedManager) readAppMetaData(envAliases []string, clientID string) (AppMetaData, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + for _, app := range m.contract.AppMetaData { + if checkAlias(app.Environment, envAliases) && app.ClientID == clientID { + return app, nil + } + } + return AppMetaData{}, fmt.Errorf("not found") +} + +func (m *PartitionedManager) writeAppMetaData(AppMetaData AppMetaData) error { + key := AppMetaData.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.AppMetaData[key] = AppMetaData + return nil +} + +// update updates the internal cache object. This is for use in tests, other uses are not +// supported. +func (m *PartitionedManager) update(cache *InMemoryContract) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract = cache +} + +// Marshal implements cache.Marshaler. +func (m *PartitionedManager) Marshal() ([]byte, error) { + return json.Marshal(m.contract) +} + +// Unmarshal implements cache.Unmarshaler. +func (m *PartitionedManager) Unmarshal(b []byte) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + + contract := NewInMemoryContract() + + err := json.Unmarshal(b, contract) + if err != nil { + return err + } + + m.contract = contract + + return nil +} + +func getPartitionKeyAccessToken(item AccessToken) string { + if item.UserAssertionHash != "" { + return item.UserAssertionHash + } + return item.HomeAccountID +} + +func getPartitionKeyRefreshToken(item accesstokens.RefreshToken) string { + if item.UserAssertionHash != "" { + return item.UserAssertionHash + } + return item.HomeAccountID +} + +func getPartitionKeyIDToken(item IDToken) string { + return item.HomeAccountID +} + +func getPartitionKeyAccount(item shared.Account) string { + return item.HomeAccountID +} + +func getPartitionKeyIDTokenRead(item AccessToken) string { + return item.HomeAccountID +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go new file mode 100644 index 000000000..2221e60c4 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go @@ -0,0 +1,583 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package storage holds all cached token information for MSAL. This storage can be +// augmented with third-party extensions to provide persistent storage. In that case, +// reads and writes in upper packages will call Marshal() to take the entire in-memory +// representation and write it to storage and Unmarshal() to update the entire in-memory +// storage with what was in the persistent storage. The persistent storage can only be +// accessed in this way because multiple MSAL clients written in multiple languages can +// access the same storage and must adhere to the same method that was defined +// previously. +package storage + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +// aadInstanceDiscoveryer allows faking in tests. +// It is implemented in production by ops/authority.Client +type aadInstanceDiscoveryer interface { + AADInstanceDiscovery(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryResponse, error) +} + +// TokenResponse mimics a token response that was pulled from the cache. +type TokenResponse struct { + RefreshToken accesstokens.RefreshToken + IDToken IDToken // *Credential + AccessToken AccessToken + Account shared.Account +} + +// Manager is an in-memory cache of access tokens, accounts and meta data. This data is +// updated on read/write calls. Unmarshal() replaces all data stored here with whatever +// was given to it on each call. +type Manager struct { + contract *Contract + contractMu sync.RWMutex + requests aadInstanceDiscoveryer // *oauth.Token + + aadCacheMu sync.RWMutex + aadCache map[string]authority.InstanceDiscoveryMetadata +} + +// New is the constructor for Manager. +func New(requests *oauth.Client) *Manager { + m := &Manager{requests: requests, aadCache: make(map[string]authority.InstanceDiscoveryMetadata)} + m.contract = NewContract() + return m +} + +func checkAlias(alias string, aliases []string) bool { + for _, v := range aliases { + if alias == v { + return true + } + } + return false +} + +func isMatchingScopes(scopesOne []string, scopesTwo string) bool { + newScopesTwo := strings.Split(scopesTwo, scopeSeparator) + scopeCounter := 0 + for _, scope := range scopesOne { + for _, otherScope := range newScopesTwo { + if strings.EqualFold(scope, otherScope) { + scopeCounter++ + continue + } + } + } + return scopeCounter == len(scopesOne) +} + +// needsUpgrade returns true if the given key follows the v1.0 schema i.e., +// it contains an uppercase character (v1.1+ keys are all lowercase) +func needsUpgrade(key string) bool { + for _, r := range key { + if 'A' <= r && r <= 'Z' { + return true + } + } + return false +} + +// upgrade a v1.0 cache item by adding a v1.1+ item having the same value and deleting +// the v1.0 item. Callers must hold an exclusive lock on m. +func upgrade[T any](m map[string]T, k string) T { + v1_1Key := strings.ToLower(k) + v, ok := m[k] + if !ok { + // another goroutine did the upgrade while this one was waiting for the write lock + return m[v1_1Key] + } + if v2, ok := m[v1_1Key]; ok { + // cache has an equivalent v1.1+ item, which we prefer because we know it was added + // by a newer version of the module and is therefore more likely to remain valid. + // The v1.0 item may have expired because only v1.0 or earlier would update it. + v = v2 + } else { + // add an equivalent item according to the v1.1 schema + m[v1_1Key] = v + } + delete(m, k) + return v +} + +// Read reads a storage token from the cache if it exists. +func (m *Manager) Read(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) { + tr := TokenResponse{} + homeAccountID := authParameters.HomeAccountID + realm := authParameters.AuthorityInfo.Tenant + clientID := authParameters.ClientID + scopes := authParameters.Scopes + authnSchemeKeyID := authParameters.AuthnScheme.KeyID() + tokenType := authParameters.AuthnScheme.AccessTokenType() + + // fetch metadata if instanceDiscovery is enabled + aliases := []string{authParameters.AuthorityInfo.Host} + if !authParameters.AuthorityInfo.InstanceDiscoveryDisabled { + metadata, err := m.getMetadataEntry(ctx, authParameters.AuthorityInfo) + if err != nil { + return TokenResponse{}, err + } + aliases = metadata.Aliases + } + + accessToken := m.readAccessToken(homeAccountID, aliases, realm, clientID, scopes, tokenType, authnSchemeKeyID) + tr.AccessToken = accessToken + + if homeAccountID == "" { + // caller didn't specify a user, so there's no reason to search for an ID or refresh token + return tr, nil + } + // errors returned by read* methods indicate a cache miss and are therefore non-fatal. We continue populating + // TokenResponse fields so that e.g. lack of an ID token doesn't prevent the caller from receiving a refresh token. + idToken, err := m.readIDToken(homeAccountID, aliases, realm, clientID) + if err == nil { + tr.IDToken = idToken + } + + if appMetadata, err := m.readAppMetaData(aliases, clientID); err == nil { + // we need the family ID to identify the correct refresh token, if any + familyID := appMetadata.FamilyID + refreshToken, err := m.readRefreshToken(homeAccountID, aliases, familyID, clientID) + if err == nil { + tr.RefreshToken = refreshToken + } + } + + account, err := m.readAccount(homeAccountID, aliases, realm) + if err == nil { + tr.Account = account + } + return tr, nil +} + +const scopeSeparator = " " + +// Write writes a token response to the cache and returns the account information the token is stored with. +func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) { + homeAccountID := tokenResponse.HomeAccountID() + environment := authParameters.AuthorityInfo.Host + realm := authParameters.AuthorityInfo.Tenant + clientID := authParameters.ClientID + target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator) + cachedAt := time.Now() + authnSchemeKeyID := authParameters.AuthnScheme.KeyID() + + var account shared.Account + + if len(tokenResponse.RefreshToken) > 0 { + refreshToken := accesstokens.NewRefreshToken(homeAccountID, environment, clientID, tokenResponse.RefreshToken, tokenResponse.FamilyID) + if err := m.writeRefreshToken(refreshToken); err != nil { + return account, err + } + } + + if len(tokenResponse.AccessToken) > 0 { + accessToken := NewAccessToken( + homeAccountID, + environment, + realm, + clientID, + cachedAt, + tokenResponse.ExpiresOn.T, + tokenResponse.ExtExpiresOn.T, + target, + tokenResponse.AccessToken, + tokenResponse.TokenType, + authnSchemeKeyID, + ) + + // Since we have a valid access token, cache it before moving on. + if err := accessToken.Validate(); err == nil { + if err := m.writeAccessToken(accessToken); err != nil { + return account, err + } + } + } + + idTokenJwt := tokenResponse.IDToken + if !idTokenJwt.IsZero() { + idToken := NewIDToken(homeAccountID, environment, realm, clientID, idTokenJwt.RawToken) + if err := m.writeIDToken(idToken); err != nil { + return shared.Account{}, err + } + + localAccountID := idTokenJwt.LocalAccountID() + authorityType := authParameters.AuthorityInfo.AuthorityType + + preferredUsername := idTokenJwt.UPN + if idTokenJwt.PreferredUsername != "" { + preferredUsername = idTokenJwt.PreferredUsername + } + + account = shared.NewAccount( + homeAccountID, + environment, + realm, + localAccountID, + authorityType, + preferredUsername, + ) + if err := m.writeAccount(account); err != nil { + return shared.Account{}, err + } + } + + AppMetaData := NewAppMetaData(tokenResponse.FamilyID, clientID, environment) + + if err := m.writeAppMetaData(AppMetaData); err != nil { + return shared.Account{}, err + } + return account, nil +} + +func (m *Manager) getMetadataEntry(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + md, err := m.aadMetadataFromCache(ctx, authorityInfo) + if err != nil { + // not in the cache, retrieve it + md, err = m.aadMetadata(ctx, authorityInfo) + } + return md, err +} + +func (m *Manager) aadMetadataFromCache(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + m.aadCacheMu.RLock() + defer m.aadCacheMu.RUnlock() + metadata, ok := m.aadCache[authorityInfo.Host] + if ok { + return metadata, nil + } + return metadata, errors.New("not found") +} + +func (m *Manager) aadMetadata(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + m.aadCacheMu.Lock() + defer m.aadCacheMu.Unlock() + discoveryResponse, err := m.requests.AADInstanceDiscovery(ctx, authorityInfo) + if err != nil { + return authority.InstanceDiscoveryMetadata{}, err + } + + for _, metadataEntry := range discoveryResponse.Metadata { + for _, aliasedAuthority := range metadataEntry.Aliases { + m.aadCache[aliasedAuthority] = metadataEntry + } + } + if _, ok := m.aadCache[authorityInfo.Host]; !ok { + m.aadCache[authorityInfo.Host] = authority.InstanceDiscoveryMetadata{ + PreferredNetwork: authorityInfo.Host, + PreferredCache: authorityInfo.Host, + } + } + return m.aadCache[authorityInfo.Host], nil +} + +func (m *Manager) readAccessToken(homeID string, envAliases []string, realm, clientID string, scopes []string, tokenType, authnSchemeKeyID string) AccessToken { + m.contractMu.RLock() + // TODO: linear search (over a map no less) is slow for a large number (thousands) of tokens. + // this shows up as the dominating node in a profile. for real-world scenarios this likely isn't + // an issue, however if it does become a problem then we know where to look. + for k, at := range m.contract.AccessTokens { + if at.HomeAccountID == homeID && at.Realm == realm && at.ClientID == clientID { + if (strings.EqualFold(at.TokenType, tokenType) && at.AuthnSchemeKeyID == authnSchemeKeyID) || (at.TokenType == "" && (tokenType == "" || tokenType == "Bearer")) { + if checkAlias(at.Environment, envAliases) && isMatchingScopes(scopes, at.Scopes) { + m.contractMu.RUnlock() + if needsUpgrade(k) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + at = upgrade(m.contract.AccessTokens, k) + } + return at + } + } + } + } + m.contractMu.RUnlock() + return AccessToken{} +} + +func (m *Manager) writeAccessToken(accessToken AccessToken) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + key := accessToken.Key() + m.contract.AccessTokens[key] = accessToken + return nil +} + +func (m *Manager) readRefreshToken(homeID string, envAliases []string, familyID, clientID string) (accesstokens.RefreshToken, error) { + byFamily := func(rt accesstokens.RefreshToken) bool { + return matchFamilyRefreshToken(rt, homeID, envAliases) + } + byClient := func(rt accesstokens.RefreshToken) bool { + return matchClientIDRefreshToken(rt, homeID, envAliases, clientID) + } + + var matchers []func(rt accesstokens.RefreshToken) bool + if familyID == "" { + matchers = []func(rt accesstokens.RefreshToken) bool{ + byClient, byFamily, + } + } else { + matchers = []func(rt accesstokens.RefreshToken) bool{ + byFamily, byClient, + } + } + + // TODO(keegan): All the tests here pass, but Bogdan says this is + // more complicated. I'm opening an issue for this to have him + // review the tests and suggest tests that would break this so + // we can re-write against good tests. His comments as follow: + // The algorithm is a bit more complex than this, I assume there are some tests covering everything. I would keep the order as is. + // The algorithm is: + // If application is NOT part of the family, search by client_ID + // If app is part of the family or if we DO NOT KNOW if it's part of the family, search by family ID, then by client_id (we will know if an app is part of the family after the first token response). + // https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/blob/311fe8b16e7c293462806f397e189a6aa1159769/src/client/Microsoft.Identity.Client/Internal/Requests/Silent/CacheSilentStrategy.cs#L95 + m.contractMu.RLock() + for _, matcher := range matchers { + for k, rt := range m.contract.RefreshTokens { + if matcher(rt) { + m.contractMu.RUnlock() + if needsUpgrade(k) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + rt = upgrade(m.contract.RefreshTokens, k) + } + return rt, nil + } + } + } + + m.contractMu.RUnlock() + return accesstokens.RefreshToken{}, fmt.Errorf("refresh token not found") +} + +func matchFamilyRefreshToken(rt accesstokens.RefreshToken, homeID string, envAliases []string) bool { + return rt.HomeAccountID == homeID && checkAlias(rt.Environment, envAliases) && rt.FamilyID != "" +} + +func matchClientIDRefreshToken(rt accesstokens.RefreshToken, homeID string, envAliases []string, clientID string) bool { + return rt.HomeAccountID == homeID && checkAlias(rt.Environment, envAliases) && rt.ClientID == clientID +} + +func (m *Manager) writeRefreshToken(refreshToken accesstokens.RefreshToken) error { + key := refreshToken.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.RefreshTokens[key] = refreshToken + return nil +} + +func (m *Manager) readIDToken(homeID string, envAliases []string, realm, clientID string) (IDToken, error) { + m.contractMu.RLock() + for k, idt := range m.contract.IDTokens { + if idt.HomeAccountID == homeID && idt.Realm == realm && idt.ClientID == clientID { + if checkAlias(idt.Environment, envAliases) { + m.contractMu.RUnlock() + if needsUpgrade(k) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + idt = upgrade(m.contract.IDTokens, k) + } + return idt, nil + } + } + } + m.contractMu.RUnlock() + return IDToken{}, fmt.Errorf("token not found") +} + +func (m *Manager) writeIDToken(idToken IDToken) error { + key := idToken.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.IDTokens[key] = idToken + return nil +} + +func (m *Manager) AllAccounts() []shared.Account { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + var accounts []shared.Account + for _, v := range m.contract.Accounts { + accounts = append(accounts, v) + } + + return accounts +} + +func (m *Manager) Account(homeAccountID string) shared.Account { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + for _, v := range m.contract.Accounts { + if v.HomeAccountID == homeAccountID { + return v + } + } + + return shared.Account{} +} + +func (m *Manager) readAccount(homeAccountID string, envAliases []string, realm string) (shared.Account, error) { + m.contractMu.RLock() + + // You might ask why, if cache.Accounts is a map, we would loop through all of these instead of using a key. + // We only use a map because the storage contract shared between all language implementations says use a map. + // We can't change that. The other is because the keys are made using a specific "env", but here we are allowing + // a match in multiple envs (envAlias). That means we either need to hash each possible keyand do the lookup + // or just statically check. Since the design is to have a storage.Manager per user, the amount of keys stored + // is really low (say 2). Each hash is more expensive than the entire iteration. + for k, acc := range m.contract.Accounts { + if acc.HomeAccountID == homeAccountID && checkAlias(acc.Environment, envAliases) && acc.Realm == realm { + m.contractMu.RUnlock() + if needsUpgrade(k) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + acc = upgrade(m.contract.Accounts, k) + } + return acc, nil + } + } + m.contractMu.RUnlock() + return shared.Account{}, fmt.Errorf("account not found") +} + +func (m *Manager) writeAccount(account shared.Account) error { + key := account.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.Accounts[key] = account + return nil +} + +func (m *Manager) readAppMetaData(envAliases []string, clientID string) (AppMetaData, error) { + m.contractMu.RLock() + for k, app := range m.contract.AppMetaData { + if checkAlias(app.Environment, envAliases) && app.ClientID == clientID { + m.contractMu.RUnlock() + if needsUpgrade(k) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + app = upgrade(m.contract.AppMetaData, k) + } + return app, nil + } + } + m.contractMu.RUnlock() + return AppMetaData{}, fmt.Errorf("not found") +} + +func (m *Manager) writeAppMetaData(AppMetaData AppMetaData) error { + key := AppMetaData.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.AppMetaData[key] = AppMetaData + return nil +} + +// RemoveAccount removes all the associated ATs, RTs and IDTs from the cache associated with this account. +func (m *Manager) RemoveAccount(account shared.Account, clientID string) { + m.removeRefreshTokens(account.HomeAccountID, account.Environment, clientID) + m.removeAccessTokens(account.HomeAccountID, account.Environment) + m.removeIDTokens(account.HomeAccountID, account.Environment) + m.removeAccounts(account.HomeAccountID, account.Environment) +} + +func (m *Manager) removeRefreshTokens(homeID string, env string, clientID string) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + for key, rt := range m.contract.RefreshTokens { + // Check for RTs associated with the account. + if rt.HomeAccountID == homeID && rt.Environment == env { + // Do RT's app ownership check as a precaution, in case family apps + // and 3rd-party apps share same token cache, although they should not. + if rt.ClientID == clientID || rt.FamilyID != "" { + delete(m.contract.RefreshTokens, key) + } + } + } +} + +func (m *Manager) removeAccessTokens(homeID string, env string) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + for key, at := range m.contract.AccessTokens { + // Remove AT's associated with the account + if at.HomeAccountID == homeID && at.Environment == env { + // # To avoid the complexity of locating sibling family app's AT, we skip AT's app ownership check. + // It means ATs for other apps will also be removed, it is OK because: + // non-family apps are not supposed to share token cache to begin with; + // Even if it happens, we keep other app's RT already, so SSO still works. + delete(m.contract.AccessTokens, key) + } + } +} + +func (m *Manager) removeIDTokens(homeID string, env string) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + for key, idt := range m.contract.IDTokens { + // Remove ID tokens associated with the account. + if idt.HomeAccountID == homeID && idt.Environment == env { + delete(m.contract.IDTokens, key) + } + } +} + +func (m *Manager) removeAccounts(homeID string, env string) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + for key, acc := range m.contract.Accounts { + // Remove the specified account. + if acc.HomeAccountID == homeID && acc.Environment == env { + delete(m.contract.Accounts, key) + } + } +} + +// update updates the internal cache object. This is for use in tests, other uses are not +// supported. +func (m *Manager) update(cache *Contract) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract = cache +} + +// Marshal implements cache.Marshaler. +func (m *Manager) Marshal() ([]byte, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + return json.Marshal(m.contract) +} + +// Unmarshal implements cache.Unmarshaler. +func (m *Manager) Unmarshal(b []byte) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + + contract := NewContract() + + err := json.Unmarshal(b, contract) + if err != nil { + return err + } + + m.contract = contract + + return nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go new file mode 100644 index 000000000..7b673e3fe --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go @@ -0,0 +1,34 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// package exported contains internal types that are re-exported from a public package +package exported + +// AssertionRequestOptions has information required to generate a client assertion +type AssertionRequestOptions struct { + // ClientID identifies the application for which an assertion is requested. Used as the assertion's "iss" and "sub" claims. + ClientID string + + // TokenEndpoint is the intended token endpoint. Used as the assertion's "aud" claim. + TokenEndpoint string +} + +// TokenProviderParameters is the authentication parameters passed to token providers +type TokenProviderParameters struct { + // Claims contains any additional claims requested for the token + Claims string + // CorrelationID of the authentication request + CorrelationID string + // Scopes requested for the token + Scopes []string + // TenantID identifies the tenant in which to authenticate + TenantID string +} + +// TokenProviderResult is the authentication result returned by custom token providers +type TokenProviderResult struct { + // AccessToken is the requested token + AccessToken string + // ExpiresInSeconds is the lifetime of the token in seconds + ExpiresInSeconds int +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/design.md b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/design.md new file mode 100644 index 000000000..09edb01b7 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/design.md @@ -0,0 +1,140 @@ +# JSON Package Design +Author: John Doak(jdoak@microsoft.com) + +## Why? + +This project needs a special type of marshal/unmarshal not directly supported +by the encoding/json package. + +The need revolves around a few key wants/needs: +- unmarshal and marshal structs representing JSON messages +- fields in the messgage not in the struct must be maintained when unmarshalled +- those same fields must be marshalled back when encoded again + +The initial version used map[string]interface{} to put in the keys that +were known and then any other keys were put into a field called AdditionalFields. + +This has a few negatives: +- Dual marshaling/unmarshalling is required +- Adding a struct field requires manually adding a key by name to be encoded/decoded from the map (which is a loosely coupled construct), which can lead to bugs that aren't detected or have bad side effects +- Tests can become quickly disconnected if those keys aren't put +in tests as well. So you think you have support working, but you +don't. Existing tests were found that didn't test the marshalling output. +- There is no enforcement that if AdditionalFields is required on one struct, it should be on all containers +that don't have custom marshal/unmarshal. + +This package aims to support our needs by providing custom Marshal()/Unmarshal() functions. + +This prevents all the negatives in the initial solution listed above. However, it does add its own negative: +- Custom encoding/decoding via reflection is messy (as can be seen in encoding/json itself) + +Go proverb: Reflection is never clear +Suggested reading: https://blog.golang.org/laws-of-reflection + +## Important design decisions + +- We don't want to understand all JSON decoding rules +- We don't want to deal with all the quoting, commas, etc on decode +- Need support for json.Marshaler/Unmarshaler, so we can support types like time.Time +- If struct does not implement json.Unmarshaler, it must have AdditionalFields defined +- We only support root level objects that are \*struct or struct + +To faciliate these goals, we will utilize the json.Encoder and json.Decoder. +They provide streaming processing (efficient) and return errors on bad JSON. + +Support for json.Marshaler/Unmarshaler allows for us to use non-basic types +that must be specially encoded/decoded (like time.Time objects). + +We don't support types that can't customer unmarshal or have AdditionalFields +in order to prevent future devs from forgetting that important field and +generating bad return values. + +Support for root level objects of \*struct or struct simply acknowledges the +fact that this is designed only for the purposes listed in the Introduction. +Outside that (like encoding a lone number) should be done with the +regular json package (as it will not have additional fields). + +We don't support a few things on json supported reference types and structs: +- \*map: no need for pointers to maps +- \*slice: no need for pointers to slices +- any further pointers on struct after \*struct + +There should never be a need for this in Go. + +## Design + +## State Machines + +This uses state machine designs that based upon the Rob Pike talk on +lexers and parsers: https://www.youtube.com/watch?v=HxaD_trXwRE + +This is the most common pattern for state machines in Go and +the model to follow closesly when dealing with streaming +processing of textual data. + +Our state machines are based on the type: +```go +type stateFn func() (stateFn, error) +``` + +The state machine itself is simply a struct that has methods that +satisfy stateFn. + +Our state machines have a few standard calls +- run(): runs the state machine +- start(): always the first stateFn to be called + +All state machines have the following logic: +* run() is called +* start() is called and returns the next stateFn or error +* stateFn is called + - If returned stateFn(next state) is non-nil, call it + - If error is non-nil, run() returns the error + - If stateFn == nil and err == nil, run() return err == nil + +## Supporting types + +Marshalling/Unmarshalling must support(within top level struct): +- struct +- \*struct +- []struct +- []\*struct +- []map[string]structContainer +- [][]structContainer + +**Term note:** structContainer == type that has a struct or \*struct inside it + +We specifically do not support []interface or map[string]interface +where the interface value would hold some value with a struct in it. + +Those will still marshal/unmarshal, but without support for +AdditionalFields. + +## Marshalling + +The marshalling design will be based around a statemachine design. + +The basic logic is as follows: + +* If struct has custom marshaller, call it and return +* If struct has field "AdditionalFields", it must be a map[string]interface{} +* If struct does not have "AdditionalFields", give an error +* Get struct tag detailing json names to go names, create mapping +* For each public field name + - Write field name out + - If field value is a struct, recursively call our state machine + - Otherwise, use the json.Encoder to write out the value + +## Unmarshalling + +The unmarshalling desin is also based around a statemachine design. The +basic logic is as follows: + +* If struct has custom marhaller, call it +* If struct has field "AdditionalFields", it must be a map[string]interface{} +* Get struct tag detailing json names to go names, create mapping +* For each key found + - If key exists, + - If value is basic type, extract value into struct field using Decoder + - If value is struct type, recursively call statemachine + - If key doesn't exist, add it to AdditionalFields if it exists using Decoder diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go new file mode 100644 index 000000000..2238521f5 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go @@ -0,0 +1,184 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package json provide functions for marshalling an unmarshalling types to JSON. These functions are meant to +// be utilized inside of structs that implement json.Unmarshaler and json.Marshaler interfaces. +// This package provides the additional functionality of writing fields that are not in the struct when marshalling +// to a field called AdditionalFields if that field exists and is a map[string]interface{}. +// When marshalling, if the struct has all the same prerequisites, it will uses the keys in AdditionalFields as +// extra fields. This package uses encoding/json underneath. +package json + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strings" +) + +const addField = "AdditionalFields" +const ( + marshalJSON = "MarshalJSON" + unmarshalJSON = "UnmarshalJSON" +) + +var ( + leftBrace = []byte("{")[0] + rightBrace = []byte("}")[0] + comma = []byte(",")[0] + leftParen = []byte("[")[0] + rightParen = []byte("]")[0] +) + +var mapStrInterType = reflect.TypeOf(map[string]interface{}{}) + +// stateFn defines a state machine function. This will be used in all state +// machines in this package. +type stateFn func() (stateFn, error) + +// Marshal is used to marshal a type into its JSON representation. It +// wraps the stdlib calls in order to marshal a struct or *struct so +// that a field called "AdditionalFields" of type map[string]interface{} +// with "-" used inside struct tag `json:"-"` can be marshalled as if +// they were fields within the struct. +func Marshal(i interface{}) ([]byte, error) { + buff := bytes.Buffer{} + enc := json.NewEncoder(&buff) + enc.SetEscapeHTML(false) + enc.SetIndent("", "") + + v := reflect.ValueOf(i) + if v.Kind() != reflect.Ptr && v.CanAddr() { + v = v.Addr() + } + err := marshalStruct(v, &buff, enc) + if err != nil { + return nil, err + } + return buff.Bytes(), nil +} + +// Unmarshal unmarshals a []byte representing JSON into i, which must be a *struct. In addition, if the struct has +// a field called AdditionalFields of type map[string]interface{}, JSON data representing fields not in the struct +// will be written as key/value pairs to AdditionalFields. +func Unmarshal(b []byte, i interface{}) error { + if len(b) == 0 { + return nil + } + + jdec := json.NewDecoder(bytes.NewBuffer(b)) + jdec.UseNumber() + return unmarshalStruct(jdec, i) +} + +// MarshalRaw marshals i into a json.RawMessage. If I cannot be marshalled, +// this will panic. This is exposed to help test AdditionalField values +// which are stored as json.RawMessage. +func MarshalRaw(i interface{}) json.RawMessage { + b, err := json.Marshal(i) + if err != nil { + panic(err) + } + return json.RawMessage(b) +} + +// isDelim simply tests to see if a json.Token is a delimeter. +func isDelim(got json.Token) bool { + switch got.(type) { + case json.Delim: + return true + } + return false +} + +// delimIs tests got to see if it is want. +func delimIs(got json.Token, want rune) bool { + switch v := got.(type) { + case json.Delim: + if v == json.Delim(want) { + return true + } + } + return false +} + +// hasMarshalJSON will determine if the value or a pointer to this value has +// the MarshalJSON method. +func hasMarshalJSON(v reflect.Value) bool { + if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { + _, ok := v.Interface().(json.Marshaler) + return ok + } + + if v.Kind() == reflect.Ptr { + v = v.Elem() + } else { + if !v.CanAddr() { + return false + } + v = v.Addr() + } + + if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { + _, ok := v.Interface().(json.Marshaler) + return ok + } + return false +} + +// callMarshalJSON will call MarshalJSON() method on the value or a pointer to this value. +// This will panic if the method is not defined. +func callMarshalJSON(v reflect.Value) ([]byte, error) { + if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { + marsh := v.Interface().(json.Marshaler) + return marsh.MarshalJSON() + } + + if v.Kind() == reflect.Ptr { + v = v.Elem() + } else { + if v.CanAddr() { + v = v.Addr() + } + } + + if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid { + marsh := v.Interface().(json.Marshaler) + return marsh.MarshalJSON() + } + + panic(fmt.Sprintf("callMarshalJSON called on type %T that does not have MarshalJSON defined", v.Interface())) +} + +// hasUnmarshalJSON will determine if the value or a pointer to this value has +// the UnmarshalJSON method. +func hasUnmarshalJSON(v reflect.Value) bool { + // You can't unmarshal on a non-pointer type. + if v.Kind() != reflect.Ptr { + if !v.CanAddr() { + return false + } + v = v.Addr() + } + + if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid { + _, ok := v.Interface().(json.Unmarshaler) + return ok + } + + return false +} + +// hasOmitEmpty indicates if the field has instructed us to not output +// the field if omitempty is set on the tag. tag is the string +// returned by reflect.StructField.Tag().Get(). +func hasOmitEmpty(tag string) bool { + sl := strings.Split(tag, ",") + for _, str := range sl { + if str == "omitempty" { + return true + } + } + return false +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/mapslice.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/mapslice.go new file mode 100644 index 000000000..cef442f25 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/mapslice.go @@ -0,0 +1,333 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package json + +import ( + "encoding/json" + "fmt" + "reflect" +) + +// unmarshalMap unmarshal's a map. +func unmarshalMap(dec *json.Decoder, m reflect.Value) error { + if m.Kind() != reflect.Ptr || m.Elem().Kind() != reflect.Map { + panic("unmarshalMap called on non-*map value") + } + mapValueType := m.Elem().Type().Elem() + walk := mapWalk{dec: dec, m: m, valueType: mapValueType} + if err := walk.run(); err != nil { + return err + } + return nil +} + +type mapWalk struct { + dec *json.Decoder + key string + m reflect.Value + valueType reflect.Type +} + +// run runs our decoder state machine. +func (m *mapWalk) run() error { + var state = m.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +func (m *mapWalk) start() (stateFn, error) { + // maps can have custom unmarshaler's. + if hasUnmarshalJSON(m.m) { + err := m.dec.Decode(m.m.Interface()) + if err != nil { + return nil, err + } + return nil, nil + } + + // We only want to use this if the map value is: + // *struct/struct/map/slice + // otherwise use standard decode + t, _ := m.valueBaseType() + switch t.Kind() { + case reflect.Struct, reflect.Map, reflect.Slice: + delim, err := m.dec.Token() + if err != nil { + return nil, err + } + // This indicates the value was set to JSON null. + if delim == nil { + return nil, nil + } + if !delimIs(delim, '{') { + return nil, fmt.Errorf("Unmarshal expected opening {, received %v", delim) + } + return m.next, nil + case reflect.Ptr: + return nil, fmt.Errorf("do not support maps with values of '**type' or '*reference") + } + + // This is a basic map type, so just use Decode(). + if err := m.dec.Decode(m.m.Interface()); err != nil { + return nil, err + } + + return nil, nil +} + +func (m *mapWalk) next() (stateFn, error) { + if m.dec.More() { + key, err := m.dec.Token() + if err != nil { + return nil, err + } + m.key = key.(string) + return m.storeValue, nil + } + // No more entries, so remove final }. + _, err := m.dec.Token() + if err != nil { + return nil, err + } + return nil, nil +} + +func (m *mapWalk) storeValue() (stateFn, error) { + v := m.valueType + for { + switch v.Kind() { + case reflect.Ptr: + v = v.Elem() + continue + case reflect.Struct: + return m.storeStruct, nil + case reflect.Map: + return m.storeMap, nil + case reflect.Slice: + return m.storeSlice, nil + } + return nil, fmt.Errorf("bug: mapWalk.storeValue() called on unsupported type: %v", v.Kind()) + } +} + +func (m *mapWalk) storeStruct() (stateFn, error) { + v := newValue(m.valueType) + if err := unmarshalStruct(m.dec, v.Interface()); err != nil { + return nil, err + } + + if m.valueType.Kind() == reflect.Ptr { + m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v) + return m.next, nil + } + m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v.Elem()) + + return m.next, nil +} + +func (m *mapWalk) storeMap() (stateFn, error) { + v := reflect.MakeMap(m.valueType) + ptr := newValue(v.Type()) + ptr.Elem().Set(v) + if err := unmarshalMap(m.dec, ptr); err != nil { + return nil, err + } + + m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v) + + return m.next, nil +} + +func (m *mapWalk) storeSlice() (stateFn, error) { + v := newValue(m.valueType) + if err := unmarshalSlice(m.dec, v); err != nil { + return nil, err + } + + m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v.Elem()) + + return m.next, nil +} + +// valueType returns the underlying Type. So a *struct would yield +// struct, etc... +func (m *mapWalk) valueBaseType() (reflect.Type, bool) { + ptr := false + v := m.valueType + if v.Kind() == reflect.Ptr { + ptr = true + v = v.Elem() + } + return v, ptr +} + +// unmarshalSlice unmarshal's the next value, which must be a slice, into +// ptrSlice, which must be a pointer to a slice. newValue() can be use to +// create the slice. +func unmarshalSlice(dec *json.Decoder, ptrSlice reflect.Value) error { + if ptrSlice.Kind() != reflect.Ptr || ptrSlice.Elem().Kind() != reflect.Slice { + panic("unmarshalSlice called on non-*[]slice value") + } + sliceValueType := ptrSlice.Elem().Type().Elem() + walk := sliceWalk{ + dec: dec, + s: ptrSlice, + valueType: sliceValueType, + } + if err := walk.run(); err != nil { + return err + } + + return nil +} + +type sliceWalk struct { + dec *json.Decoder + s reflect.Value // *[]slice + valueType reflect.Type +} + +// run runs our decoder state machine. +func (s *sliceWalk) run() error { + var state = s.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +func (s *sliceWalk) start() (stateFn, error) { + // slices can have custom unmarshaler's. + if hasUnmarshalJSON(s.s) { + err := s.dec.Decode(s.s.Interface()) + if err != nil { + return nil, err + } + return nil, nil + } + + // We only want to use this if the slice value is: + // []*struct/[]struct/[]map/[]slice + // otherwise use standard decode + t := s.valueBaseType() + + switch t.Kind() { + case reflect.Ptr: + return nil, fmt.Errorf("cannot unmarshal into a ** or *") + case reflect.Struct, reflect.Map, reflect.Slice: + delim, err := s.dec.Token() + if err != nil { + return nil, err + } + // This indicates the value was set to nil. + if delim == nil { + return nil, nil + } + if !delimIs(delim, '[') { + return nil, fmt.Errorf("Unmarshal expected opening [, received %v", delim) + } + return s.next, nil + } + + if err := s.dec.Decode(s.s.Interface()); err != nil { + return nil, err + } + return nil, nil +} + +func (s *sliceWalk) next() (stateFn, error) { + if s.dec.More() { + return s.storeValue, nil + } + // Nothing left in the slice, remove closing ] + _, err := s.dec.Token() + return nil, err +} + +func (s *sliceWalk) storeValue() (stateFn, error) { + t := s.valueBaseType() + switch t.Kind() { + case reflect.Ptr: + return nil, fmt.Errorf("do not support 'pointer to pointer' or 'pointer to reference' types") + case reflect.Struct: + return s.storeStruct, nil + case reflect.Map: + return s.storeMap, nil + case reflect.Slice: + return s.storeSlice, nil + } + return nil, fmt.Errorf("bug: sliceWalk.storeValue() called on unsupported type: %v", t.Kind()) +} + +func (s *sliceWalk) storeStruct() (stateFn, error) { + v := newValue(s.valueType) + if err := unmarshalStruct(s.dec, v.Interface()); err != nil { + return nil, err + } + + if s.valueType.Kind() == reflect.Ptr { + s.s.Elem().Set(reflect.Append(s.s.Elem(), v)) + return s.next, nil + } + + s.s.Elem().Set(reflect.Append(s.s.Elem(), v.Elem())) + return s.next, nil +} + +func (s *sliceWalk) storeMap() (stateFn, error) { + v := reflect.MakeMap(s.valueType) + ptr := newValue(v.Type()) + ptr.Elem().Set(v) + + if err := unmarshalMap(s.dec, ptr); err != nil { + return nil, err + } + + s.s.Elem().Set(reflect.Append(s.s.Elem(), v)) + + return s.next, nil +} + +func (s *sliceWalk) storeSlice() (stateFn, error) { + v := newValue(s.valueType) + if err := unmarshalSlice(s.dec, v); err != nil { + return nil, err + } + + s.s.Elem().Set(reflect.Append(s.s.Elem(), v.Elem())) + + return s.next, nil +} + +// valueType returns the underlying Type. So a *struct would yield +// struct, etc... +func (s *sliceWalk) valueBaseType() reflect.Type { + v := s.valueType + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + return v +} + +// newValue() returns a new *type that represents type passed. +func newValue(valueType reflect.Type) reflect.Value { + if valueType.Kind() == reflect.Ptr { + return reflect.New(valueType.Elem()) + } + return reflect.New(valueType) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/marshal.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/marshal.go new file mode 100644 index 000000000..df5dc6e11 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/marshal.go @@ -0,0 +1,346 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package json + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "unicode" +) + +// marshalStruct takes in i, which must be a *struct or struct and marshals its content +// as JSON into buff (sometimes with writes to buff directly, sometimes via enc). +// This call is recursive for all fields of *struct or struct type. +func marshalStruct(v reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + // We only care about custom Marshalling a struct. + if v.Kind() != reflect.Struct { + return fmt.Errorf("bug: marshal() received a non *struct or struct, received type %T", v.Interface()) + } + + if hasMarshalJSON(v) { + b, err := callMarshalJSON(v) + if err != nil { + return err + } + buff.Write(b) + return nil + } + + t := v.Type() + + // If it has an AdditionalFields field make sure its the right type. + f := v.FieldByName(addField) + if f.Kind() != reflect.Invalid { + if f.Kind() != reflect.Map { + return fmt.Errorf("type %T has field 'AdditionalFields' that is not a map[string]interface{}", v.Interface()) + } + if !f.Type().AssignableTo(mapStrInterType) { + return fmt.Errorf("type %T has field 'AdditionalFields' that is not a map[string]interface{}", v.Interface()) + } + } + + translator, err := findFields(v) + if err != nil { + return err + } + + buff.WriteByte(leftBrace) + for x := 0; x < v.NumField(); x++ { + field := v.Field(x) + + // We don't access private fields. + if unicode.IsLower(rune(t.Field(x).Name[0])) { + continue + } + + if t.Field(x).Name == addField { + if v.Field(x).Len() > 0 { + if err := writeAddFields(field.Interface(), buff, enc); err != nil { + return err + } + buff.WriteByte(comma) + } + continue + } + + // If they have omitempty set, we don't write out the field if + // it is the zero value. + if hasOmitEmpty(t.Field(x).Tag.Get("json")) { + if v.Field(x).IsZero() { + continue + } + } + + // Write out the field name part. + jsonName := translator.jsonName(t.Field(x).Name) + buff.WriteString(fmt.Sprintf("%q:", jsonName)) + + if field.Kind() == reflect.Ptr { + field = field.Elem() + } + + if err := marshalStructField(field, buff, enc); err != nil { + return err + } + } + + buff.Truncate(buff.Len() - 1) // Remove final comma + buff.WriteByte(rightBrace) + + return nil +} + +func marshalStructField(field reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error { + // Determine if we need a trailing comma. + defer buff.WriteByte(comma) + + switch field.Kind() { + // If it was a *struct or struct, we need to recursively all marshal(). + case reflect.Struct: + if field.CanAddr() { + field = field.Addr() + } + return marshalStruct(field, buff, enc) + case reflect.Map: + return marshalMap(field, buff, enc) + case reflect.Slice: + return marshalSlice(field, buff, enc) + } + + // It is just a basic type, so encode it. + if err := enc.Encode(field.Interface()); err != nil { + return err + } + buff.Truncate(buff.Len() - 1) // Remove Encode() added \n + + return nil +} + +func marshalMap(v reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error { + if v.Kind() != reflect.Map { + return fmt.Errorf("bug: marshalMap() called on %T", v.Interface()) + } + if v.Len() == 0 { + buff.WriteByte(leftBrace) + buff.WriteByte(rightBrace) + return nil + } + encoder := mapEncode{m: v, buff: buff, enc: enc} + return encoder.run() +} + +type mapEncode struct { + m reflect.Value + buff *bytes.Buffer + enc *json.Encoder + + valueBaseType reflect.Type +} + +// run runs our encoder state machine. +func (m *mapEncode) run() error { + var state = m.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +func (m *mapEncode) start() (stateFn, error) { + if hasMarshalJSON(m.m) { + b, err := callMarshalJSON(m.m) + if err != nil { + return nil, err + } + m.buff.Write(b) + return nil, nil + } + + valueBaseType := m.m.Type().Elem() + if valueBaseType.Kind() == reflect.Ptr { + valueBaseType = valueBaseType.Elem() + } + m.valueBaseType = valueBaseType + + switch valueBaseType.Kind() { + case reflect.Ptr: + return nil, fmt.Errorf("Marshal does not support ** or *") + case reflect.Struct, reflect.Map, reflect.Slice: + return m.encode, nil + } + + // If the map value doesn't have a struct/map/slice, just Encode() it. + if err := m.enc.Encode(m.m.Interface()); err != nil { + return nil, err + } + m.buff.Truncate(m.buff.Len() - 1) // Remove Encode() added \n + return nil, nil +} + +func (m *mapEncode) encode() (stateFn, error) { + m.buff.WriteByte(leftBrace) + + iter := m.m.MapRange() + for iter.Next() { + // Write the key. + k := iter.Key() + m.buff.WriteString(fmt.Sprintf("%q:", k.String())) + + v := iter.Value() + switch m.valueBaseType.Kind() { + case reflect.Struct: + if v.CanAddr() { + v = v.Addr() + } + if err := marshalStruct(v, m.buff, m.enc); err != nil { + return nil, err + } + case reflect.Map: + if err := marshalMap(v, m.buff, m.enc); err != nil { + return nil, err + } + case reflect.Slice: + if err := marshalSlice(v, m.buff, m.enc); err != nil { + return nil, err + } + default: + panic(fmt.Sprintf("critical bug: mapEncode.encode() called with value base type: %v", m.valueBaseType.Kind())) + } + m.buff.WriteByte(comma) + } + m.buff.Truncate(m.buff.Len() - 1) // Remove final comma + m.buff.WriteByte(rightBrace) + + return nil, nil +} + +func marshalSlice(v reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error { + if v.Kind() != reflect.Slice { + return fmt.Errorf("bug: marshalSlice() called on %T", v.Interface()) + } + if v.Len() == 0 { + buff.WriteByte(leftParen) + buff.WriteByte(rightParen) + return nil + } + encoder := sliceEncode{s: v, buff: buff, enc: enc} + return encoder.run() +} + +type sliceEncode struct { + s reflect.Value + buff *bytes.Buffer + enc *json.Encoder + + valueBaseType reflect.Type +} + +// run runs our encoder state machine. +func (s *sliceEncode) run() error { + var state = s.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +func (s *sliceEncode) start() (stateFn, error) { + if hasMarshalJSON(s.s) { + b, err := callMarshalJSON(s.s) + if err != nil { + return nil, err + } + s.buff.Write(b) + return nil, nil + } + + valueBaseType := s.s.Type().Elem() + if valueBaseType.Kind() == reflect.Ptr { + valueBaseType = valueBaseType.Elem() + } + s.valueBaseType = valueBaseType + + switch valueBaseType.Kind() { + case reflect.Ptr: + return nil, fmt.Errorf("Marshal does not support ** or *") + case reflect.Struct, reflect.Map, reflect.Slice: + return s.encode, nil + } + + // If the map value doesn't have a struct/map/slice, just Encode() it. + if err := s.enc.Encode(s.s.Interface()); err != nil { + return nil, err + } + s.buff.Truncate(s.buff.Len() - 1) // Remove Encode added \n + + return nil, nil +} + +func (s *sliceEncode) encode() (stateFn, error) { + s.buff.WriteByte(leftParen) + for i := 0; i < s.s.Len(); i++ { + v := s.s.Index(i) + switch s.valueBaseType.Kind() { + case reflect.Struct: + if v.CanAddr() { + v = v.Addr() + } + if err := marshalStruct(v, s.buff, s.enc); err != nil { + return nil, err + } + case reflect.Map: + if err := marshalMap(v, s.buff, s.enc); err != nil { + return nil, err + } + case reflect.Slice: + if err := marshalSlice(v, s.buff, s.enc); err != nil { + return nil, err + } + default: + panic(fmt.Sprintf("critical bug: mapEncode.encode() called with value base type: %v", s.valueBaseType.Kind())) + } + s.buff.WriteByte(comma) + } + s.buff.Truncate(s.buff.Len() - 1) // Remove final comma + s.buff.WriteByte(rightParen) + return nil, nil +} + +// writeAddFields writes the AdditionalFields struct field out to JSON as field +// values. i must be a map[string]interface{} or this will panic. +func writeAddFields(i interface{}, buff *bytes.Buffer, enc *json.Encoder) error { + m := i.(map[string]interface{}) + + x := 0 + for k, v := range m { + buff.WriteString(fmt.Sprintf("%q:", k)) + if err := enc.Encode(v); err != nil { + return err + } + buff.Truncate(buff.Len() - 1) // Remove Encode() added \n + + if x+1 != len(m) { + buff.WriteByte(comma) + } + x++ + } + return nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/struct.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/struct.go new file mode 100644 index 000000000..07751544a --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/struct.go @@ -0,0 +1,290 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package json + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" +) + +func unmarshalStruct(jdec *json.Decoder, i interface{}) error { + v := reflect.ValueOf(i) + if v.Kind() != reflect.Ptr { + return fmt.Errorf("Unmarshal() received type %T, which is not a *struct", i) + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return fmt.Errorf("Unmarshal() received type %T, which is not a *struct", i) + } + + if hasUnmarshalJSON(v) { + // Indicates that this type has a custom Unmarshaler. + return jdec.Decode(v.Addr().Interface()) + } + + f := v.FieldByName(addField) + if f.Kind() == reflect.Invalid { + return fmt.Errorf("Unmarshal(%T) only supports structs that have the field AdditionalFields or implements json.Unmarshaler", i) + } + + if f.Kind() != reflect.Map || !f.Type().AssignableTo(mapStrInterType) { + return fmt.Errorf("type %T has field 'AdditionalFields' that is not a map[string]interface{}", i) + } + + dec := newDecoder(jdec, v) + return dec.run() +} + +type decoder struct { + dec *json.Decoder + value reflect.Value // This will be a reflect.Struct + translator translateFields + key string +} + +func newDecoder(dec *json.Decoder, value reflect.Value) *decoder { + return &decoder{value: value, dec: dec} +} + +// run runs our decoder state machine. +func (d *decoder) run() error { + var state = d.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +// start looks for our opening delimeter '{' and then transitions to looping through our fields. +func (d *decoder) start() (stateFn, error) { + var err error + d.translator, err = findFields(d.value) + if err != nil { + return nil, err + } + + delim, err := d.dec.Token() + if err != nil { + return nil, err + } + if !delimIs(delim, '{') { + return nil, fmt.Errorf("Unmarshal expected opening {, received %v", delim) + } + + return d.next, nil +} + +// next gets the next struct field name from the raw json or stops the machine if we get our closing }. +func (d *decoder) next() (stateFn, error) { + if !d.dec.More() { + // Remove the closing }. + if _, err := d.dec.Token(); err != nil { + return nil, err + } + return nil, nil + } + + key, err := d.dec.Token() + if err != nil { + return nil, err + } + + d.key = key.(string) + return d.storeValue, nil +} + +// storeValue takes the next value and stores it our struct. If the field can't be found +// in the struct, it pushes the operation to storeAdditional(). +func (d *decoder) storeValue() (stateFn, error) { + goName := d.translator.goName(d.key) + if goName == "" { + goName = d.key + } + + // We don't have the field in the struct, so it goes in AdditionalFields. + f := d.value.FieldByName(goName) + if f.Kind() == reflect.Invalid { + return d.storeAdditional, nil + } + + // Indicates that this type has a custom Unmarshaler. + if hasUnmarshalJSON(f) { + err := d.dec.Decode(f.Addr().Interface()) + if err != nil { + return nil, err + } + return d.next, nil + } + + t, isPtr, err := fieldBaseType(d.value, goName) + if err != nil { + return nil, fmt.Errorf("type(%s) had field(%s) %w", d.value.Type().Name(), goName, err) + } + + switch t.Kind() { + // We need to recursively call ourselves on any *struct or struct. + case reflect.Struct: + if isPtr { + if f.IsNil() { + f.Set(reflect.New(t)) + } + } else { + f = f.Addr() + } + if err := unmarshalStruct(d.dec, f.Interface()); err != nil { + return nil, err + } + return d.next, nil + case reflect.Map: + v := reflect.MakeMap(f.Type()) + ptr := newValue(f.Type()) + ptr.Elem().Set(v) + if err := unmarshalMap(d.dec, ptr); err != nil { + return nil, err + } + f.Set(ptr.Elem()) + return d.next, nil + case reflect.Slice: + v := reflect.MakeSlice(f.Type(), 0, 0) + ptr := newValue(f.Type()) + ptr.Elem().Set(v) + if err := unmarshalSlice(d.dec, ptr); err != nil { + return nil, err + } + f.Set(ptr.Elem()) + return d.next, nil + } + + if !isPtr { + f = f.Addr() + } + + // For values that are pointers, we need them to be non-nil in order + // to decode into them. + if f.IsNil() { + f.Set(reflect.New(t)) + } + + if err := d.dec.Decode(f.Interface()); err != nil { + return nil, err + } + + return d.next, nil +} + +// storeAdditional pushes the key/value into our .AdditionalFields map. +func (d *decoder) storeAdditional() (stateFn, error) { + rw := json.RawMessage{} + if err := d.dec.Decode(&rw); err != nil { + return nil, err + } + field := d.value.FieldByName(addField) + if field.IsNil() { + field.Set(reflect.MakeMap(field.Type())) + } + field.SetMapIndex(reflect.ValueOf(d.key), reflect.ValueOf(rw)) + return d.next, nil +} + +func fieldBaseType(v reflect.Value, fieldName string) (t reflect.Type, isPtr bool, err error) { + sf, ok := v.Type().FieldByName(fieldName) + if !ok { + return nil, false, fmt.Errorf("bug: fieldBaseType() lookup of field(%s) on type(%s): do not have field", fieldName, v.Type().Name()) + } + t = sf.Type + if t.Kind() == reflect.Ptr { + t = t.Elem() + isPtr = true + } + if t.Kind() == reflect.Ptr { + return nil, isPtr, fmt.Errorf("received pointer to pointer type, not supported") + } + return t, isPtr, nil +} + +type translateField struct { + jsonName string + goName string +} + +// translateFields is a list of translateFields with a handy lookup method. +type translateFields []translateField + +// goName loops through a list of fields looking for one contaning the jsonName and +// returning the goName. If not found, returns the empty string. +// Note: not a map because at this size slices are faster even in tight loops. +func (t translateFields) goName(jsonName string) string { + for _, entry := range t { + if entry.jsonName == jsonName { + return entry.goName + } + } + return "" +} + +// jsonName loops through a list of fields looking for one contaning the goName and +// returning the jsonName. If not found, returns the empty string. +// Note: not a map because at this size slices are faster even in tight loops. +func (t translateFields) jsonName(goName string) string { + for _, entry := range t { + if entry.goName == goName { + return entry.jsonName + } + } + return "" +} + +var umarshalerType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + +// findFields parses a struct and writes the field tags for lookup. It will return an error +// if any field has a type of *struct or struct that does not implement json.Marshaler. +func findFields(v reflect.Value) (translateFields, error) { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if v.Kind() != reflect.Struct { + return nil, fmt.Errorf("findFields received a %s type, expected *struct or struct", v.Type().Name()) + } + tfs := make([]translateField, 0, v.NumField()) + for i := 0; i < v.NumField(); i++ { + tf := translateField{ + goName: v.Type().Field(i).Name, + jsonName: parseTag(v.Type().Field(i).Tag.Get("json")), + } + switch tf.jsonName { + case "", "-": + tf.jsonName = tf.goName + } + tfs = append(tfs, tf) + + f := v.Field(i) + if f.Kind() == reflect.Ptr { + f = f.Elem() + } + if f.Kind() == reflect.Struct { + if f.Type().Implements(umarshalerType) { + return nil, fmt.Errorf("struct type %q which has field %q which "+ + "doesn't implement json.Unmarshaler", v.Type().Name(), v.Type().Field(i).Name) + } + } + } + return tfs, nil +} + +// parseTag just returns the first entry in the tag. tag is the string +// returned by reflect.StructField.Tag().Get(). +func parseTag(tag string) string { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx] + } + return tag +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time/time.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time/time.go new file mode 100644 index 000000000..a1c99621e --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time/time.go @@ -0,0 +1,70 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package time provides for custom types to translate time from JSON and other formats +// into time.Time objects. +package time + +import ( + "fmt" + "strconv" + "strings" + "time" +) + +// Unix provides a type that can marshal and unmarshal a string representation +// of the unix epoch into a time.Time object. +type Unix struct { + T time.Time +} + +// MarshalJSON implements encoding/json.MarshalJSON(). +func (u Unix) MarshalJSON() ([]byte, error) { + if u.T.IsZero() { + return []byte(""), nil + } + return []byte(fmt.Sprintf("%q", strconv.FormatInt(u.T.Unix(), 10))), nil +} + +// UnmarshalJSON implements encoding/json.UnmarshalJSON(). +func (u *Unix) UnmarshalJSON(b []byte) error { + i, err := strconv.Atoi(strings.Trim(string(b), `"`)) + if err != nil { + return fmt.Errorf("unix time(%s) could not be converted from string to int: %w", string(b), err) + } + u.T = time.Unix(int64(i), 0) + return nil +} + +// DurationTime provides a type that can marshal and unmarshal a string representation +// of a duration from now into a time.Time object. +// Note: I'm not sure this is the best way to do this. What happens is we get a field +// called "expires_in" that represents the seconds from now that this expires. We +// turn that into a time we call .ExpiresOn. But maybe we should be recording +// when the token was received at .TokenRecieved and .ExpiresIn should remain as a duration. +// Then we could have a method called ExpiresOn(). Honestly, the whole thing is +// bad because the server doesn't return a concrete time. I think this is +// cleaner, but its not great either. +type DurationTime struct { + T time.Time +} + +// MarshalJSON implements encoding/json.MarshalJSON(). +func (d DurationTime) MarshalJSON() ([]byte, error) { + if d.T.IsZero() { + return []byte(""), nil + } + + dt := time.Until(d.T) + return []byte(fmt.Sprintf("%d", int64(dt*time.Second))), nil +} + +// UnmarshalJSON implements encoding/json.UnmarshalJSON(). +func (d *DurationTime) UnmarshalJSON(b []byte) error { + i, err := strconv.Atoi(strings.Trim(string(b), `"`)) + if err != nil { + return fmt.Errorf("unix time(%s) could not be converted from string to int: %w", string(b), err) + } + d.T = time.Now().Add(time.Duration(i) * time.Second) + return nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go new file mode 100644 index 000000000..04236ff31 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go @@ -0,0 +1,177 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package local contains a local HTTP server used with interactive authentication. +package local + +import ( + "context" + "fmt" + "net" + "net/http" + "strconv" + "strings" + "time" +) + +var okPage = []byte(` + + + + + Authentication Complete + + +

      Authentication complete. You can return to the application. Feel free to close this browser tab.

      + + +`) + +const failPage = ` + + + + + Authentication Failed + + +

      Authentication failed. You can return to the application. Feel free to close this browser tab.

      +

      Error details: error %s error_description: %s

      + + +` + +// Result is the result from the redirect. +type Result struct { + // Code is the code sent by the authority server. + Code string + // Err is set if there was an error. + Err error +} + +// Server is an HTTP server. +type Server struct { + // Addr is the address the server is listening on. + Addr string + resultCh chan Result + s *http.Server + reqState string +} + +// New creates a local HTTP server and starts it. +func New(reqState string, port int) (*Server, error) { + var l net.Listener + var err error + var portStr string + if port > 0 { + // use port provided by caller + l, err = net.Listen("tcp", fmt.Sprintf("localhost:%d", port)) + portStr = strconv.FormatInt(int64(port), 10) + } else { + // find a free port + for i := 0; i < 10; i++ { + l, err = net.Listen("tcp", "localhost:0") + if err != nil { + continue + } + addr := l.Addr().String() + portStr = addr[strings.LastIndex(addr, ":")+1:] + break + } + } + if err != nil { + return nil, err + } + + serv := &Server{ + Addr: fmt.Sprintf("http://localhost:%s", portStr), + s: &http.Server{Addr: "localhost:0", ReadHeaderTimeout: time.Second}, + reqState: reqState, + resultCh: make(chan Result, 1), + } + serv.s.Handler = http.HandlerFunc(serv.handler) + + if err := serv.start(l); err != nil { + return nil, err + } + + return serv, nil +} + +func (s *Server) start(l net.Listener) error { + go func() { + err := s.s.Serve(l) + if err != nil { + select { + case s.resultCh <- Result{Err: err}: + default: + } + } + }() + + return nil +} + +// Result gets the result of the redirect operation. Once a single result is returned, the server +// is shutdown. ctx deadline will be honored. +func (s *Server) Result(ctx context.Context) Result { + select { + case <-ctx.Done(): + return Result{Err: ctx.Err()} + case r := <-s.resultCh: + return r + } +} + +// Shutdown shuts down the server. +func (s *Server) Shutdown() { + // Note: You might get clever and think you can do this in handler() as a defer, you can't. + _ = s.s.Shutdown(context.Background()) +} + +func (s *Server) putResult(r Result) { + select { + case s.resultCh <- r: + default: + } +} + +func (s *Server) handler(w http.ResponseWriter, r *http.Request) { + q := r.URL.Query() + + headerErr := q.Get("error") + if headerErr != "" { + desc := q.Get("error_description") + // Note: It is a little weird we handle some errors by not going to the failPage. If they all should, + // change this to s.error() and make s.error() write the failPage instead of an error code. + _, _ = w.Write([]byte(fmt.Sprintf(failPage, headerErr, desc))) + s.putResult(Result{Err: fmt.Errorf(desc)}) + return + } + + respState := q.Get("state") + switch respState { + case s.reqState: + case "": + s.error(w, http.StatusInternalServerError, "server didn't send OAuth state") + return + default: + s.error(w, http.StatusInternalServerError, "mismatched OAuth state, req(%s), resp(%s)", s.reqState, respState) + return + } + + code := q.Get("code") + if code == "" { + s.error(w, http.StatusInternalServerError, "authorization code missing in query string") + return + } + + _, _ = w.Write(okPage) + s.putResult(Result{Code: code}) +} + +func (s *Server) error(w http.ResponseWriter, code int, str string, i ...interface{}) { + err := fmt.Errorf(str, i...) + http.Error(w, err.Error(), code) + s.putResult(Result{Err: err}) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go new file mode 100644 index 000000000..ef8d908a4 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go @@ -0,0 +1,354 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package oauth + +import ( + "context" + "encoding/json" + "fmt" + "io" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported" + internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs" + "github.com/google/uuid" +) + +// ResolveEndpointer contains the methods for resolving authority endpoints. +type ResolveEndpointer interface { + ResolveEndpoints(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, error) +} + +// AccessTokens contains the methods for fetching tokens from different sources. +type AccessTokens interface { + DeviceCodeResult(ctx context.Context, authParameters authority.AuthParams) (accesstokens.DeviceCodeResult, error) + FromUsernamePassword(ctx context.Context, authParameters authority.AuthParams) (accesstokens.TokenResponse, error) + FromAuthCode(ctx context.Context, req accesstokens.AuthCodeRequest) (accesstokens.TokenResponse, error) + FromRefreshToken(ctx context.Context, appType accesstokens.AppType, authParams authority.AuthParams, cc *accesstokens.Credential, refreshToken string) (accesstokens.TokenResponse, error) + FromClientSecret(ctx context.Context, authParameters authority.AuthParams, clientSecret string) (accesstokens.TokenResponse, error) + FromAssertion(ctx context.Context, authParameters authority.AuthParams, assertion string) (accesstokens.TokenResponse, error) + FromUserAssertionClientSecret(ctx context.Context, authParameters authority.AuthParams, userAssertion string, clientSecret string) (accesstokens.TokenResponse, error) + FromUserAssertionClientCertificate(ctx context.Context, authParameters authority.AuthParams, userAssertion string, assertion string) (accesstokens.TokenResponse, error) + FromDeviceCodeResult(ctx context.Context, authParameters authority.AuthParams, deviceCodeResult accesstokens.DeviceCodeResult) (accesstokens.TokenResponse, error) + FromSamlGrant(ctx context.Context, authParameters authority.AuthParams, samlGrant wstrust.SamlTokenInfo) (accesstokens.TokenResponse, error) +} + +// FetchAuthority will be implemented by authority.Authority. +type FetchAuthority interface { + UserRealm(context.Context, authority.AuthParams) (authority.UserRealm, error) + AADInstanceDiscovery(context.Context, authority.Info) (authority.InstanceDiscoveryResponse, error) +} + +// FetchWSTrust contains the methods for interacting with WSTrust endpoints. +type FetchWSTrust interface { + Mex(ctx context.Context, federationMetadataURL string) (defs.MexDocument, error) + SAMLTokenInfo(ctx context.Context, authParameters authority.AuthParams, cloudAudienceURN string, endpoint defs.Endpoint) (wstrust.SamlTokenInfo, error) +} + +// Client provides tokens for various types of token requests. +type Client struct { + Resolver ResolveEndpointer + AccessTokens AccessTokens + Authority FetchAuthority + WSTrust FetchWSTrust +} + +// New is the constructor for Token. +func New(httpClient ops.HTTPClient) *Client { + r := ops.New(httpClient) + return &Client{ + Resolver: newAuthorityEndpoint(r), + AccessTokens: r.AccessTokens(), + Authority: r.Authority(), + WSTrust: r.WSTrust(), + } +} + +// ResolveEndpoints gets the authorization and token endpoints and creates an AuthorityEndpoints instance. +func (t *Client) ResolveEndpoints(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, error) { + return t.Resolver.ResolveEndpoints(ctx, authorityInfo, userPrincipalName) +} + +// AADInstanceDiscovery attempts to discover a tenant endpoint (used in OIDC auth with an authorization endpoint). +// This is done by AAD which allows for aliasing of tenants (windows.sts.net is the same as login.windows.com). +func (t *Client) AADInstanceDiscovery(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryResponse, error) { + return t.Authority.AADInstanceDiscovery(ctx, authorityInfo) +} + +// AuthCode returns a token based on an authorization code. +func (t *Client) AuthCode(ctx context.Context, req accesstokens.AuthCodeRequest) (accesstokens.TokenResponse, error) { + if err := scopeError(req.AuthParams); err != nil { + return accesstokens.TokenResponse{}, err + } + if err := t.resolveEndpoint(ctx, &req.AuthParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + tResp, err := t.AccessTokens.FromAuthCode(ctx, req) + if err != nil { + return accesstokens.TokenResponse{}, fmt.Errorf("could not retrieve token from auth code: %w", err) + } + return tResp, nil +} + +// Credential acquires a token from the authority using a client credentials grant. +func (t *Client) Credential(ctx context.Context, authParams authority.AuthParams, cred *accesstokens.Credential) (accesstokens.TokenResponse, error) { + if cred.TokenProvider != nil { + now := time.Now() + scopes := make([]string, len(authParams.Scopes)) + copy(scopes, authParams.Scopes) + params := exported.TokenProviderParameters{ + Claims: authParams.Claims, + CorrelationID: uuid.New().String(), + Scopes: scopes, + TenantID: authParams.AuthorityInfo.Tenant, + } + tr, err := cred.TokenProvider(ctx, params) + if err != nil { + if len(scopes) == 0 { + err = fmt.Errorf("token request had an empty authority.AuthParams.Scopes, which may cause the following error: %w", err) + return accesstokens.TokenResponse{}, err + } + return accesstokens.TokenResponse{}, err + } + return accesstokens.TokenResponse{ + TokenType: authParams.AuthnScheme.AccessTokenType(), + AccessToken: tr.AccessToken, + ExpiresOn: internalTime.DurationTime{ + T: now.Add(time.Duration(tr.ExpiresInSeconds) * time.Second), + }, + GrantedScopes: accesstokens.Scopes{Slice: authParams.Scopes}, + }, nil + } + + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + if cred.Secret != "" { + return t.AccessTokens.FromClientSecret(ctx, authParams, cred.Secret) + } + jwt, err := cred.JWT(ctx, authParams) + if err != nil { + return accesstokens.TokenResponse{}, err + } + return t.AccessTokens.FromAssertion(ctx, authParams, jwt) +} + +// Credential acquires a token from the authority using a client credentials grant. +func (t *Client) OnBehalfOf(ctx context.Context, authParams authority.AuthParams, cred *accesstokens.Credential) (accesstokens.TokenResponse, error) { + if err := scopeError(authParams); err != nil { + return accesstokens.TokenResponse{}, err + } + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + if cred.Secret != "" { + return t.AccessTokens.FromUserAssertionClientSecret(ctx, authParams, authParams.UserAssertion, cred.Secret) + } + jwt, err := cred.JWT(ctx, authParams) + if err != nil { + return accesstokens.TokenResponse{}, err + } + tr, err := t.AccessTokens.FromUserAssertionClientCertificate(ctx, authParams, authParams.UserAssertion, jwt) + if err != nil { + return accesstokens.TokenResponse{}, err + } + return tr, nil +} + +func (t *Client) Refresh(ctx context.Context, reqType accesstokens.AppType, authParams authority.AuthParams, cc *accesstokens.Credential, refreshToken accesstokens.RefreshToken) (accesstokens.TokenResponse, error) { + if err := scopeError(authParams); err != nil { + return accesstokens.TokenResponse{}, err + } + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + tr, err := t.AccessTokens.FromRefreshToken(ctx, reqType, authParams, cc, refreshToken.Secret) + if err != nil { + return accesstokens.TokenResponse{}, err + } + return tr, nil +} + +// UsernamePassword retrieves a token where a username and password is used. However, if this is +// a user realm of "Federated", this uses SAML tokens. If "Managed", uses normal username/password. +func (t *Client) UsernamePassword(ctx context.Context, authParams authority.AuthParams) (accesstokens.TokenResponse, error) { + if err := scopeError(authParams); err != nil { + return accesstokens.TokenResponse{}, err + } + + if authParams.AuthorityInfo.AuthorityType == authority.ADFS { + if err := t.resolveEndpoint(ctx, &authParams, authParams.Username); err != nil { + return accesstokens.TokenResponse{}, err + } + return t.AccessTokens.FromUsernamePassword(ctx, authParams) + } + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + userRealm, err := t.Authority.UserRealm(ctx, authParams) + if err != nil { + return accesstokens.TokenResponse{}, fmt.Errorf("problem getting user realm from authority: %w", err) + } + + switch userRealm.AccountType { + case authority.Federated: + mexDoc, err := t.WSTrust.Mex(ctx, userRealm.FederationMetadataURL) + if err != nil { + err = fmt.Errorf("problem getting mex doc from federated url(%s): %w", userRealm.FederationMetadataURL, err) + return accesstokens.TokenResponse{}, err + } + + saml, err := t.WSTrust.SAMLTokenInfo(ctx, authParams, userRealm.CloudAudienceURN, mexDoc.UsernamePasswordEndpoint) + if err != nil { + err = fmt.Errorf("problem getting SAML token info: %w", err) + return accesstokens.TokenResponse{}, err + } + tr, err := t.AccessTokens.FromSamlGrant(ctx, authParams, saml) + if err != nil { + return accesstokens.TokenResponse{}, err + } + return tr, nil + case authority.Managed: + if len(authParams.Scopes) == 0 { + err = fmt.Errorf("token request had an empty authority.AuthParams.Scopes, which may cause the following error: %w", err) + return accesstokens.TokenResponse{}, err + } + return t.AccessTokens.FromUsernamePassword(ctx, authParams) + } + return accesstokens.TokenResponse{}, errors.New("unknown account type") +} + +// DeviceCode is the result of a call to Token.DeviceCode(). +type DeviceCode struct { + // Result is the device code result from the first call in the device code flow. This allows + // the caller to retrieve the displayed code that is used to authorize on the second device. + Result accesstokens.DeviceCodeResult + authParams authority.AuthParams + + accessTokens AccessTokens +} + +// Token returns a token AFTER the user uses the user code on the second device. This will block +// until either: (1) the code is input by the user and the service releases a token, (2) the token +// expires, (3) the Context passed to .DeviceCode() is cancelled or expires, (4) some other service +// error occurs. +func (d DeviceCode) Token(ctx context.Context) (accesstokens.TokenResponse, error) { + if d.accessTokens == nil { + return accesstokens.TokenResponse{}, fmt.Errorf("DeviceCode was either created outside its package or the creating method had an error. DeviceCode is not valid") + } + + var cancel context.CancelFunc + if deadline, ok := ctx.Deadline(); !ok || d.Result.ExpiresOn.Before(deadline) { + ctx, cancel = context.WithDeadline(ctx, d.Result.ExpiresOn) + } else { + ctx, cancel = context.WithCancel(ctx) + } + defer cancel() + + var interval = 50 * time.Millisecond + timer := time.NewTimer(interval) + defer timer.Stop() + + for { + timer.Reset(interval) + select { + case <-ctx.Done(): + return accesstokens.TokenResponse{}, ctx.Err() + case <-timer.C: + interval += interval * 2 + if interval > 5*time.Second { + interval = 5 * time.Second + } + } + + token, err := d.accessTokens.FromDeviceCodeResult(ctx, d.authParams, d.Result) + if err != nil && isWaitDeviceCodeErr(err) { + continue + } + return token, err // This handles if it was a non-wait error or success + } +} + +type deviceCodeError struct { + Error string `json:"error"` +} + +func isWaitDeviceCodeErr(err error) bool { + var c errors.CallErr + if !errors.As(err, &c) { + return false + } + if c.Resp.StatusCode != 400 { + return false + } + var dCErr deviceCodeError + defer c.Resp.Body.Close() + body, err := io.ReadAll(c.Resp.Body) + if err != nil { + return false + } + err = json.Unmarshal(body, &dCErr) + if err != nil { + return false + } + if dCErr.Error == "authorization_pending" || dCErr.Error == "slow_down" { + return true + } + return false +} + +// DeviceCode returns a DeviceCode object that can be used to get the code that must be entered on the second +// device and optionally the token once the code has been entered on the second device. +func (t *Client) DeviceCode(ctx context.Context, authParams authority.AuthParams) (DeviceCode, error) { + if err := scopeError(authParams); err != nil { + return DeviceCode{}, err + } + + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return DeviceCode{}, err + } + + dcr, err := t.AccessTokens.DeviceCodeResult(ctx, authParams) + if err != nil { + return DeviceCode{}, err + } + + return DeviceCode{Result: dcr, authParams: authParams, accessTokens: t.AccessTokens}, nil +} + +func (t *Client) resolveEndpoint(ctx context.Context, authParams *authority.AuthParams, userPrincipalName string) error { + endpoints, err := t.Resolver.ResolveEndpoints(ctx, authParams.AuthorityInfo, userPrincipalName) + if err != nil { + return fmt.Errorf("unable to resolve an endpoint: %s", err) + } + authParams.Endpoints = endpoints + return nil +} + +// scopeError takes an authority.AuthParams and returns an error +// if len(AuthParams.Scope) == 0. +func scopeError(a authority.AuthParams) error { + // TODO(someone): we could look deeper at the message to determine if + // it's a scope error, but this is a good start. + /* + {error":"invalid_scope","error_description":"AADSTS1002012: The provided value for scope + openid offline_access profile is not valid. Client credential flows must have a scope value + with /.default suffixed to the resource identifier (application ID URI)...} + */ + if len(a.Scopes) == 0 { + return fmt.Errorf("token request had an empty authority.AuthParams.Scopes, which is invalid") + } + return nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go new file mode 100644 index 000000000..a7b7b0742 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go @@ -0,0 +1,457 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package accesstokens exposes a REST client for querying backend systems to get various types of +access tokens (oauth) for use in authentication. + +These calls are of type "application/x-www-form-urlencoded". This means we use url.Values to +represent arguments and then encode them into the POST body message. We receive JSON in +return for the requests. The request definition is defined in https://tools.ietf.org/html/rfc7521#section-4.2 . +*/ +package accesstokens + +import ( + "context" + "crypto" + + /* #nosec */ + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust" + "github.com/golang-jwt/jwt/v5" + "github.com/google/uuid" +) + +const ( + grantType = "grant_type" + deviceCode = "device_code" + clientID = "client_id" + clientInfo = "client_info" + clientInfoVal = "1" + username = "username" + password = "password" +) + +//go:generate stringer -type=AppType + +// AppType is whether the authorization code flow is for a public or confidential client. +type AppType int8 + +const ( + // ATUnknown is the zero value when the type hasn't been set. + ATUnknown AppType = iota + // ATPublic indicates this if for the Public.Client. + ATPublic + // ATConfidential indicates this if for the Confidential.Client. + ATConfidential +) + +type urlFormCaller interface { + URLFormCall(ctx context.Context, endpoint string, qv url.Values, resp interface{}) error +} + +// DeviceCodeResponse represents the HTTP response received from the device code endpoint +type DeviceCodeResponse struct { + authority.OAuthResponseBase + + UserCode string `json:"user_code"` + DeviceCode string `json:"device_code"` + VerificationURL string `json:"verification_url"` + ExpiresIn int `json:"expires_in"` + Interval int `json:"interval"` + Message string `json:"message"` + + AdditionalFields map[string]interface{} +} + +// Convert converts the DeviceCodeResponse to a DeviceCodeResult +func (dcr DeviceCodeResponse) Convert(clientID string, scopes []string) DeviceCodeResult { + expiresOn := time.Now().UTC().Add(time.Duration(dcr.ExpiresIn) * time.Second) + return NewDeviceCodeResult(dcr.UserCode, dcr.DeviceCode, dcr.VerificationURL, expiresOn, dcr.Interval, dcr.Message, clientID, scopes) +} + +// Credential represents the credential used in confidential client flows. This can be either +// a Secret or Cert/Key. +type Credential struct { + // Secret contains the credential secret if we are doing auth by secret. + Secret string + + // Cert is the public certificate, if we're authenticating by certificate. + Cert *x509.Certificate + // Key is the private key for signing, if we're authenticating by certificate. + Key crypto.PrivateKey + // X5c is the JWT assertion's x5c header value, required for SN/I authentication. + X5c []string + + // AssertionCallback is a function provided by the application, if we're authenticating by assertion. + AssertionCallback func(context.Context, exported.AssertionRequestOptions) (string, error) + + // TokenProvider is a function provided by the application that implements custom authentication + // logic for a confidential client + TokenProvider func(context.Context, exported.TokenProviderParameters) (exported.TokenProviderResult, error) +} + +// JWT gets the jwt assertion when the credential is not using a secret. +func (c *Credential) JWT(ctx context.Context, authParams authority.AuthParams) (string, error) { + if c.AssertionCallback != nil { + options := exported.AssertionRequestOptions{ + ClientID: authParams.ClientID, + TokenEndpoint: authParams.Endpoints.TokenEndpoint, + } + return c.AssertionCallback(ctx, options) + } + + token := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{ + "aud": authParams.Endpoints.TokenEndpoint, + "exp": json.Number(strconv.FormatInt(time.Now().Add(10*time.Minute).Unix(), 10)), + "iss": authParams.ClientID, + "jti": uuid.New().String(), + "nbf": json.Number(strconv.FormatInt(time.Now().Unix(), 10)), + "sub": authParams.ClientID, + }) + token.Header = map[string]interface{}{ + "alg": "RS256", + "typ": "JWT", + "x5t": base64.StdEncoding.EncodeToString(thumbprint(c.Cert)), + } + + if authParams.SendX5C { + token.Header["x5c"] = c.X5c + } + + assertion, err := token.SignedString(c.Key) + if err != nil { + return "", fmt.Errorf("unable to sign a JWT token using private key: %w", err) + } + return assertion, nil +} + +// thumbprint runs the asn1.Der bytes through sha1 for use in the x5t parameter of JWT. +// https://tools.ietf.org/html/rfc7517#section-4.8 +func thumbprint(cert *x509.Certificate) []byte { + /* #nosec */ + a := sha1.Sum(cert.Raw) + return a[:] +} + +// Client represents the REST calls to get tokens from token generator backends. +type Client struct { + // Comm provides the HTTP transport client. + Comm urlFormCaller + + testing bool +} + +// FromUsernamePassword uses a username and password to get an access token. +func (c Client) FromUsernamePassword(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.Password) + qv.Set(username, authParameters.Username) + qv.Set(password, authParameters.Password) + qv.Set(clientID, authParameters.ClientID) + qv.Set(clientInfo, clientInfoVal) + addScopeQueryParam(qv, authParameters) + + return c.doTokenResp(ctx, authParameters, qv) +} + +// AuthCodeRequest stores the values required to request a token from the authority using an authorization code +type AuthCodeRequest struct { + AuthParams authority.AuthParams + Code string + CodeChallenge string + Credential *Credential + AppType AppType +} + +// NewCodeChallengeRequest returns an AuthCodeRequest that uses a code challenge.. +func NewCodeChallengeRequest(params authority.AuthParams, appType AppType, cc *Credential, code, challenge string) (AuthCodeRequest, error) { + if appType == ATUnknown { + return AuthCodeRequest{}, fmt.Errorf("bug: NewCodeChallengeRequest() called with AppType == ATUnknown") + } + return AuthCodeRequest{ + AuthParams: params, + AppType: appType, + Code: code, + CodeChallenge: challenge, + Credential: cc, + }, nil +} + +// FromAuthCode uses an authorization code to retrieve an access token. +func (c Client) FromAuthCode(ctx context.Context, req AuthCodeRequest) (TokenResponse, error) { + var qv url.Values + + switch req.AppType { + case ATUnknown: + return TokenResponse{}, fmt.Errorf("bug: Token.AuthCode() received request with AppType == ATUnknown") + case ATConfidential: + var err error + if req.Credential == nil { + return TokenResponse{}, fmt.Errorf("AuthCodeRequest had nil Credential for Confidential app") + } + qv, err = prepURLVals(ctx, req.Credential, req.AuthParams) + if err != nil { + return TokenResponse{}, err + } + case ATPublic: + qv = url.Values{} + default: + return TokenResponse{}, fmt.Errorf("bug: Token.AuthCode() received request with AppType == %v, which we do not recongnize", req.AppType) + } + + qv.Set(grantType, grant.AuthCode) + qv.Set("code", req.Code) + qv.Set("code_verifier", req.CodeChallenge) + qv.Set("redirect_uri", req.AuthParams.Redirecturi) + qv.Set(clientID, req.AuthParams.ClientID) + qv.Set(clientInfo, clientInfoVal) + addScopeQueryParam(qv, req.AuthParams) + if err := addClaims(qv, req.AuthParams); err != nil { + return TokenResponse{}, err + } + + return c.doTokenResp(ctx, req.AuthParams, qv) +} + +// FromRefreshToken uses a refresh token (for refreshing credentials) to get a new access token. +func (c Client) FromRefreshToken(ctx context.Context, appType AppType, authParams authority.AuthParams, cc *Credential, refreshToken string) (TokenResponse, error) { + qv := url.Values{} + if appType == ATConfidential { + var err error + qv, err = prepURLVals(ctx, cc, authParams) + if err != nil { + return TokenResponse{}, err + } + } + if err := addClaims(qv, authParams); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.RefreshToken) + qv.Set(clientID, authParams.ClientID) + qv.Set(clientInfo, clientInfoVal) + qv.Set("refresh_token", refreshToken) + addScopeQueryParam(qv, authParams) + + return c.doTokenResp(ctx, authParams, qv) +} + +// FromClientSecret uses a client's secret (aka password) to get a new token. +func (c Client) FromClientSecret(ctx context.Context, authParameters authority.AuthParams, clientSecret string) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.ClientCredential) + qv.Set("client_secret", clientSecret) + qv.Set(clientID, authParameters.ClientID) + addScopeQueryParam(qv, authParameters) + + token, err := c.doTokenResp(ctx, authParameters, qv) + if err != nil { + return token, fmt.Errorf("FromClientSecret(): %w", err) + } + return token, nil +} + +func (c Client) FromAssertion(ctx context.Context, authParameters authority.AuthParams, assertion string) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.ClientCredential) + qv.Set("client_assertion_type", grant.ClientAssertion) + qv.Set("client_assertion", assertion) + qv.Set(clientID, authParameters.ClientID) + qv.Set(clientInfo, clientInfoVal) + addScopeQueryParam(qv, authParameters) + + token, err := c.doTokenResp(ctx, authParameters, qv) + if err != nil { + return token, fmt.Errorf("FromAssertion(): %w", err) + } + return token, nil +} + +func (c Client) FromUserAssertionClientSecret(ctx context.Context, authParameters authority.AuthParams, userAssertion string, clientSecret string) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.JWT) + qv.Set(clientID, authParameters.ClientID) + qv.Set("client_secret", clientSecret) + qv.Set("assertion", userAssertion) + qv.Set(clientInfo, clientInfoVal) + qv.Set("requested_token_use", "on_behalf_of") + addScopeQueryParam(qv, authParameters) + + return c.doTokenResp(ctx, authParameters, qv) +} + +func (c Client) FromUserAssertionClientCertificate(ctx context.Context, authParameters authority.AuthParams, userAssertion string, assertion string) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.JWT) + qv.Set("client_assertion_type", grant.ClientAssertion) + qv.Set("client_assertion", assertion) + qv.Set(clientID, authParameters.ClientID) + qv.Set("assertion", userAssertion) + qv.Set(clientInfo, clientInfoVal) + qv.Set("requested_token_use", "on_behalf_of") + addScopeQueryParam(qv, authParameters) + + return c.doTokenResp(ctx, authParameters, qv) +} + +func (c Client) DeviceCodeResult(ctx context.Context, authParameters authority.AuthParams) (DeviceCodeResult, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return DeviceCodeResult{}, err + } + qv.Set(clientID, authParameters.ClientID) + addScopeQueryParam(qv, authParameters) + + endpoint := strings.Replace(authParameters.Endpoints.TokenEndpoint, "token", "devicecode", -1) + + resp := DeviceCodeResponse{} + err := c.Comm.URLFormCall(ctx, endpoint, qv, &resp) + if err != nil { + return DeviceCodeResult{}, err + } + + return resp.Convert(authParameters.ClientID, authParameters.Scopes), nil +} + +func (c Client) FromDeviceCodeResult(ctx context.Context, authParameters authority.AuthParams, deviceCodeResult DeviceCodeResult) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(grantType, grant.DeviceCode) + qv.Set(deviceCode, deviceCodeResult.DeviceCode) + qv.Set(clientID, authParameters.ClientID) + qv.Set(clientInfo, clientInfoVal) + addScopeQueryParam(qv, authParameters) + + return c.doTokenResp(ctx, authParameters, qv) +} + +func (c Client) FromSamlGrant(ctx context.Context, authParameters authority.AuthParams, samlGrant wstrust.SamlTokenInfo) (TokenResponse, error) { + qv := url.Values{} + if err := addClaims(qv, authParameters); err != nil { + return TokenResponse{}, err + } + qv.Set(username, authParameters.Username) + qv.Set(password, authParameters.Password) + qv.Set(clientID, authParameters.ClientID) + qv.Set(clientInfo, clientInfoVal) + qv.Set("assertion", base64.StdEncoding.WithPadding(base64.StdPadding).EncodeToString([]byte(samlGrant.Assertion))) + addScopeQueryParam(qv, authParameters) + + switch samlGrant.AssertionType { + case grant.SAMLV1: + qv.Set(grantType, grant.SAMLV1) + case grant.SAMLV2: + qv.Set(grantType, grant.SAMLV2) + default: + return TokenResponse{}, fmt.Errorf("GetAccessTokenFromSamlGrant returned unknown SAML assertion type: %q", samlGrant.AssertionType) + } + + return c.doTokenResp(ctx, authParameters, qv) +} + +func (c Client) doTokenResp(ctx context.Context, authParams authority.AuthParams, qv url.Values) (TokenResponse, error) { + resp := TokenResponse{} + if authParams.AuthnScheme != nil { + trParams := authParams.AuthnScheme.TokenRequestParams() + for k, v := range trParams { + qv.Set(k, v) + } + } + err := c.Comm.URLFormCall(ctx, authParams.Endpoints.TokenEndpoint, qv, &resp) + if err != nil { + return resp, err + } + resp.ComputeScope(authParams) + if c.testing { + return resp, nil + } + return resp, resp.Validate() +} + +// prepURLVals returns an url.Values that sets various key/values if we are doing secrets +// or JWT assertions. +func prepURLVals(ctx context.Context, cc *Credential, authParams authority.AuthParams) (url.Values, error) { + params := url.Values{} + if cc.Secret != "" { + params.Set("client_secret", cc.Secret) + return params, nil + } + + jwt, err := cc.JWT(ctx, authParams) + if err != nil { + return nil, err + } + params.Set("client_assertion", jwt) + params.Set("client_assertion_type", grant.ClientAssertion) + return params, nil +} + +// openid required to get an id token +// offline_access required to get a refresh token +// profile required to get the client_info field back +var detectDefaultScopes = map[string]bool{ + "openid": true, + "offline_access": true, + "profile": true, +} + +var defaultScopes = []string{"openid", "offline_access", "profile"} + +func AppendDefaultScopes(authParameters authority.AuthParams) []string { + scopes := make([]string, 0, len(authParameters.Scopes)+len(defaultScopes)) + for _, scope := range authParameters.Scopes { + s := strings.TrimSpace(scope) + if s == "" { + continue + } + if detectDefaultScopes[scope] { + continue + } + scopes = append(scopes, scope) + } + scopes = append(scopes, defaultScopes...) + return scopes +} + +// addClaims adds client capabilities and claims from AuthParams to the given url.Values +func addClaims(v url.Values, ap authority.AuthParams) error { + claims, err := ap.MergeCapabilitiesAndClaims() + if err == nil && claims != "" { + v.Set("claims", claims) + } + return err +} + +func addScopeQueryParam(queryParams url.Values, authParameters authority.AuthParams) { + scopes := AppendDefaultScopes(authParameters) + queryParams.Set("scope", strings.Join(scopes, " ")) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/apptype_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/apptype_string.go new file mode 100644 index 000000000..3bec4a67c --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/apptype_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=AppType"; DO NOT EDIT. + +package accesstokens + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ATUnknown-0] + _ = x[ATPublic-1] + _ = x[ATConfidential-2] +} + +const _AppType_name = "ATUnknownATPublicATConfidential" + +var _AppType_index = [...]uint8{0, 9, 17, 31} + +func (i AppType) String() string { + if i < 0 || i >= AppType(len(_AppType_index)-1) { + return "AppType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _AppType_name[_AppType_index[i]:_AppType_index[i+1]] +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go new file mode 100644 index 000000000..3107b45c1 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go @@ -0,0 +1,339 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package accesstokens + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "reflect" + "strings" + "time" + + internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +// IDToken consists of all the information used to validate a user. +// https://docs.microsoft.com/azure/active-directory/develop/id-tokens . +type IDToken struct { + PreferredUsername string `json:"preferred_username,omitempty"` + GivenName string `json:"given_name,omitempty"` + FamilyName string `json:"family_name,omitempty"` + MiddleName string `json:"middle_name,omitempty"` + Name string `json:"name,omitempty"` + Oid string `json:"oid,omitempty"` + TenantID string `json:"tid,omitempty"` + Subject string `json:"sub,omitempty"` + UPN string `json:"upn,omitempty"` + Email string `json:"email,omitempty"` + AlternativeID string `json:"alternative_id,omitempty"` + Issuer string `json:"iss,omitempty"` + Audience string `json:"aud,omitempty"` + ExpirationTime int64 `json:"exp,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + RawToken string + + AdditionalFields map[string]interface{} +} + +var null = []byte("null") + +// UnmarshalJSON implements json.Unmarshaler. +func (i *IDToken) UnmarshalJSON(b []byte) error { + if bytes.Equal(null, b) { + return nil + } + + // Because we have a custom unmarshaler, you + // cannot directly call json.Unmarshal here. If you do, it will call this function + // recursively until reach our recursion limit. We have to create a new type + // that doesn't have this method in order to use json.Unmarshal. + type idToken2 IDToken + + jwt := strings.Trim(string(b), `"`) + jwtArr := strings.Split(jwt, ".") + if len(jwtArr) < 2 { + return errors.New("IDToken returned from server is invalid") + } + + jwtPart := jwtArr[1] + jwtDecoded, err := decodeJWT(jwtPart) + if err != nil { + return fmt.Errorf("unable to unmarshal IDToken, problem decoding JWT: %w", err) + } + + token := idToken2{} + err = json.Unmarshal(jwtDecoded, &token) + if err != nil { + return fmt.Errorf("unable to unmarshal IDToken: %w", err) + } + token.RawToken = jwt + + *i = IDToken(token) + return nil +} + +// IsZero indicates if the IDToken is the zero value. +func (i IDToken) IsZero() bool { + v := reflect.ValueOf(i) + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + if !field.IsZero() { + switch field.Kind() { + case reflect.Map, reflect.Slice: + if field.Len() == 0 { + continue + } + } + return false + } + } + return true +} + +// LocalAccountID extracts an account's local account ID from an ID token. +func (i IDToken) LocalAccountID() string { + if i.Oid != "" { + return i.Oid + } + return i.Subject +} + +// jwtDecoder is provided to allow tests to provide their own. +var jwtDecoder = decodeJWT + +// ClientInfo is used to create a Home Account ID for an account. +type ClientInfo struct { + UID string `json:"uid"` + UTID string `json:"utid"` + + AdditionalFields map[string]interface{} +} + +// UnmarshalJSON implements json.Unmarshaler.s +func (c *ClientInfo) UnmarshalJSON(b []byte) error { + s := strings.Trim(string(b), `"`) + // Client info may be empty in some flows, e.g. certificate exchange. + if len(s) == 0 { + return nil + } + + // Because we have a custom unmarshaler, you + // cannot directly call json.Unmarshal here. If you do, it will call this function + // recursively until reach our recursion limit. We have to create a new type + // that doesn't have this method in order to use json.Unmarshal. + type clientInfo2 ClientInfo + + raw, err := jwtDecoder(s) + if err != nil { + return fmt.Errorf("TokenResponse client_info field had JWT decode error: %w", err) + } + + var c2 clientInfo2 + + err = json.Unmarshal(raw, &c2) + if err != nil { + return fmt.Errorf("was unable to unmarshal decoded JWT in TokenRespone to ClientInfo: %w", err) + } + + *c = ClientInfo(c2) + return nil +} + +// Scopes represents scopes in a TokenResponse. +type Scopes struct { + Slice []string +} + +// UnmarshalJSON implements json.Unmarshal. +func (s *Scopes) UnmarshalJSON(b []byte) error { + str := strings.Trim(string(b), `"`) + if len(str) == 0 { + return nil + } + sl := strings.Split(str, " ") + s.Slice = sl + return nil +} + +// TokenResponse is the information that is returned from a token endpoint during a token acquisition flow. +type TokenResponse struct { + authority.OAuthResponseBase + + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + TokenType string `json:"token_type"` + + FamilyID string `json:"foci"` + IDToken IDToken `json:"id_token"` + ClientInfo ClientInfo `json:"client_info"` + ExpiresOn internalTime.DurationTime `json:"expires_in"` + ExtExpiresOn internalTime.DurationTime `json:"ext_expires_in"` + GrantedScopes Scopes `json:"scope"` + DeclinedScopes []string // This is derived + + AdditionalFields map[string]interface{} + + scopesComputed bool +} + +// ComputeScope computes the final scopes based on what was granted by the server and +// what our AuthParams were from the authority server. Per OAuth spec, if no scopes are returned, the response should be treated as if all scopes were granted +// This behavior can be observed in client assertion flows, but can happen at any time, this check ensures we treat +// those special responses properly Link to spec: https://tools.ietf.org/html/rfc6749#section-3.3 +func (tr *TokenResponse) ComputeScope(authParams authority.AuthParams) { + if len(tr.GrantedScopes.Slice) == 0 { + tr.GrantedScopes = Scopes{Slice: authParams.Scopes} + } else { + tr.DeclinedScopes = findDeclinedScopes(authParams.Scopes, tr.GrantedScopes.Slice) + } + tr.scopesComputed = true +} + +// HomeAccountID uniquely identifies the authenticated account, if any. It's "" when the token is an app token. +func (tr *TokenResponse) HomeAccountID() string { + id := tr.IDToken.Subject + if uid := tr.ClientInfo.UID; uid != "" { + utid := tr.ClientInfo.UTID + if utid == "" { + utid = uid + } + id = fmt.Sprintf("%s.%s", uid, utid) + } + return id +} + +// Validate validates the TokenResponse has basic valid values. It must be called +// after ComputeScopes() is called. +func (tr *TokenResponse) Validate() error { + if tr.Error != "" { + return fmt.Errorf("%s: %s", tr.Error, tr.ErrorDescription) + } + + if tr.AccessToken == "" { + return errors.New("response is missing access_token") + } + + if !tr.scopesComputed { + return fmt.Errorf("TokenResponse hasn't had ScopesComputed() called") + } + return nil +} + +func (tr *TokenResponse) CacheKey(authParams authority.AuthParams) string { + if authParams.AuthorizationType == authority.ATOnBehalfOf { + return authParams.AssertionHash() + } + if authParams.AuthorizationType == authority.ATClientCredentials { + return authParams.AppKey() + } + if authParams.IsConfidentialClient || authParams.AuthorizationType == authority.ATRefreshToken { + return tr.HomeAccountID() + } + return "" +} + +func findDeclinedScopes(requestedScopes []string, grantedScopes []string) []string { + declined := []string{} + grantedMap := map[string]bool{} + for _, s := range grantedScopes { + grantedMap[strings.ToLower(s)] = true + } + // Comparing the requested scopes with the granted scopes to see if there are any scopes that have been declined. + for _, r := range requestedScopes { + if !grantedMap[strings.ToLower(r)] { + declined = append(declined, r) + } + } + return declined +} + +// decodeJWT decodes a JWT and converts it to a byte array representing a JSON object +// JWT has headers and payload base64url encoded without padding +// https://tools.ietf.org/html/rfc7519#section-3 and +// https://tools.ietf.org/html/rfc7515#section-2 +func decodeJWT(data string) ([]byte, error) { + // https://tools.ietf.org/html/rfc7515#appendix-C + return base64.RawURLEncoding.DecodeString(data) +} + +// RefreshToken is the JSON representation of a MSAL refresh token for encoding to storage. +type RefreshToken struct { + HomeAccountID string `json:"home_account_id,omitempty"` + Environment string `json:"environment,omitempty"` + CredentialType string `json:"credential_type,omitempty"` + ClientID string `json:"client_id,omitempty"` + FamilyID string `json:"family_id,omitempty"` + Secret string `json:"secret,omitempty"` + Realm string `json:"realm,omitempty"` + Target string `json:"target,omitempty"` + UserAssertionHash string `json:"user_assertion_hash,omitempty"` + + AdditionalFields map[string]interface{} +} + +// NewRefreshToken is the constructor for RefreshToken. +func NewRefreshToken(homeID, env, clientID, refreshToken, familyID string) RefreshToken { + return RefreshToken{ + HomeAccountID: homeID, + Environment: env, + CredentialType: "RefreshToken", + ClientID: clientID, + FamilyID: familyID, + Secret: refreshToken, + } +} + +// Key outputs the key that can be used to uniquely look up this entry in a map. +func (rt RefreshToken) Key() string { + var fourth = rt.FamilyID + if fourth == "" { + fourth = rt.ClientID + } + + key := strings.Join( + []string{rt.HomeAccountID, rt.Environment, rt.CredentialType, fourth}, + shared.CacheKeySeparator, + ) + return strings.ToLower(key) +} + +func (rt RefreshToken) GetSecret() string { + return rt.Secret +} + +// DeviceCodeResult stores the response from the STS device code endpoint. +type DeviceCodeResult struct { + // UserCode is the code the user needs to provide when authentication at the verification URI. + UserCode string + // DeviceCode is the code used in the access token request. + DeviceCode string + // VerificationURL is the the URL where user can authenticate. + VerificationURL string + // ExpiresOn is the expiration time of device code in seconds. + ExpiresOn time.Time + // Interval is the interval at which the STS should be polled at. + Interval int + // Message is the message which should be displayed to the user. + Message string + // ClientID is the UUID issued by the authorization server for your application. + ClientID string + // Scopes is the OpenID scopes used to request access a protected API. + Scopes []string +} + +// NewDeviceCodeResult creates a DeviceCodeResult instance. +func NewDeviceCodeResult(userCode, deviceCode, verificationURL string, expiresOn time.Time, interval int, message, clientID string, scopes []string) DeviceCodeResult { + return DeviceCodeResult{userCode, deviceCode, verificationURL, expiresOn, interval, message, clientID, scopes} +} + +func (dcr DeviceCodeResult) String() string { + return fmt.Sprintf("UserCode: (%v)\nDeviceCode: (%v)\nURL: (%v)\nMessage: (%v)\n", dcr.UserCode, dcr.DeviceCode, dcr.VerificationURL, dcr.Message) + +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go new file mode 100644 index 000000000..9d60734f8 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go @@ -0,0 +1,589 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package authority + +import ( + "context" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path" + "strings" + "time" + + "github.com/google/uuid" +) + +const ( + authorizationEndpoint = "https://%v/%v/oauth2/v2.0/authorize" + instanceDiscoveryEndpoint = "https://%v/common/discovery/instance" + tenantDiscoveryEndpointWithRegion = "https://%s.%s/%s/v2.0/.well-known/openid-configuration" + regionName = "REGION_NAME" + defaultAPIVersion = "2021-10-01" + imdsEndpoint = "http://169.254.169.254/metadata/instance/compute/location?format=text&api-version=" + defaultAPIVersion + autoDetectRegion = "TryAutoDetect" + AccessTokenTypeBearer = "Bearer" +) + +// These are various hosts that host AAD Instance discovery endpoints. +const ( + defaultHost = "login.microsoftonline.com" + loginMicrosoft = "login.microsoft.com" + loginWindows = "login.windows.net" + loginSTSWindows = "sts.windows.net" + loginMicrosoftOnline = defaultHost +) + +// jsonCaller is an interface that allows us to mock the JSONCall method. +type jsonCaller interface { + JSONCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, body, resp interface{}) error +} + +var aadTrustedHostList = map[string]bool{ + "login.windows.net": true, // Microsoft Azure Worldwide - Used in validation scenarios where host is not this list + "login.chinacloudapi.cn": true, // Microsoft Azure China + "login.microsoftonline.de": true, // Microsoft Azure Blackforest + "login-us.microsoftonline.com": true, // Microsoft Azure US Government - Legacy + "login.microsoftonline.us": true, // Microsoft Azure US Government + "login.microsoftonline.com": true, // Microsoft Azure Worldwide + "login.cloudgovapi.us": true, // Microsoft Azure US Government +} + +// TrustedHost checks if an AAD host is trusted/valid. +func TrustedHost(host string) bool { + if _, ok := aadTrustedHostList[host]; ok { + return true + } + return false +} + +// OAuthResponseBase is the base JSON return message for an OAuth call. +// This is embedded in other calls to get the base fields from every response. +type OAuthResponseBase struct { + Error string `json:"error"` + SubError string `json:"suberror"` + ErrorDescription string `json:"error_description"` + ErrorCodes []int `json:"error_codes"` + CorrelationID string `json:"correlation_id"` + Claims string `json:"claims"` +} + +// TenantDiscoveryResponse is the tenant endpoints from the OpenID configuration endpoint. +type TenantDiscoveryResponse struct { + OAuthResponseBase + + AuthorizationEndpoint string `json:"authorization_endpoint"` + TokenEndpoint string `json:"token_endpoint"` + Issuer string `json:"issuer"` + + AdditionalFields map[string]interface{} +} + +// Validate validates that the response had the correct values required. +func (r *TenantDiscoveryResponse) Validate() error { + switch "" { + case r.AuthorizationEndpoint: + return errors.New("TenantDiscoveryResponse: authorize endpoint was not found in the openid configuration") + case r.TokenEndpoint: + return errors.New("TenantDiscoveryResponse: token endpoint was not found in the openid configuration") + case r.Issuer: + return errors.New("TenantDiscoveryResponse: issuer was not found in the openid configuration") + } + return nil +} + +type InstanceDiscoveryMetadata struct { + PreferredNetwork string `json:"preferred_network"` + PreferredCache string `json:"preferred_cache"` + Aliases []string `json:"aliases"` + + AdditionalFields map[string]interface{} +} + +type InstanceDiscoveryResponse struct { + TenantDiscoveryEndpoint string `json:"tenant_discovery_endpoint"` + Metadata []InstanceDiscoveryMetadata `json:"metadata"` + + AdditionalFields map[string]interface{} +} + +//go:generate stringer -type=AuthorizeType + +// AuthorizeType represents the type of token flow. +type AuthorizeType int + +// These are all the types of token flows. +const ( + ATUnknown AuthorizeType = iota + ATUsernamePassword + ATWindowsIntegrated + ATAuthCode + ATInteractive + ATClientCredentials + ATDeviceCode + ATRefreshToken + AccountByID + ATOnBehalfOf +) + +// These are all authority types +const ( + AAD = "MSSTS" + ADFS = "ADFS" +) + +// AuthenticationScheme is an extensibility mechanism designed to be used only by Azure Arc for proof of possession access tokens. +type AuthenticationScheme interface { + // Extra parameters that are added to the request to the /token endpoint. + TokenRequestParams() map[string]string + // Key ID of the public / private key pair used by the encryption algorithm, if any. + // Tokens obtained by authentication schemes that use this are bound to the KeyId, i.e. + // if a different kid is presented, the access token cannot be used. + KeyID() string + // Creates the access token that goes into an Authorization HTTP header. + FormatAccessToken(accessToken string) (string, error) + //Expected to match the token_type parameter returned by ESTS. Used to disambiguate + // between ATs of different types (e.g. Bearer and PoP) when loading from cache etc. + AccessTokenType() string +} + +// default authn scheme realizing AuthenticationScheme for "Bearer" tokens +type BearerAuthenticationScheme struct{} + +var bearerAuthnScheme BearerAuthenticationScheme + +func (ba *BearerAuthenticationScheme) TokenRequestParams() map[string]string { + return nil +} +func (ba *BearerAuthenticationScheme) KeyID() string { + return "" +} +func (ba *BearerAuthenticationScheme) FormatAccessToken(accessToken string) (string, error) { + return accessToken, nil +} +func (ba *BearerAuthenticationScheme) AccessTokenType() string { + return AccessTokenTypeBearer +} + +// AuthParams represents the parameters used for authorization for token acquisition. +type AuthParams struct { + AuthorityInfo Info + CorrelationID string + Endpoints Endpoints + ClientID string + // Redirecturi is used for auth flows that specify a redirect URI (e.g. local server for interactive auth flow). + Redirecturi string + HomeAccountID string + // Username is the user-name portion for username/password auth flow. + Username string + // Password is the password portion for username/password auth flow. + Password string + // Scopes is the list of scopes the user consents to. + Scopes []string + // AuthorizationType specifies the auth flow being used. + AuthorizationType AuthorizeType + // State is a random value used to prevent cross-site request forgery attacks. + State string + // CodeChallenge is derived from a code verifier and is sent in the auth request. + CodeChallenge string + // CodeChallengeMethod describes the method used to create the CodeChallenge. + CodeChallengeMethod string + // Prompt specifies the user prompt type during interactive auth. + Prompt string + // IsConfidentialClient specifies if it is a confidential client. + IsConfidentialClient bool + // SendX5C specifies if x5c claim(public key of the certificate) should be sent to STS. + SendX5C bool + // UserAssertion is the access token used to acquire token on behalf of user + UserAssertion string + // Capabilities the client will include with each token request, for example "CP1". + // Call [NewClientCapabilities] to construct a value for this field. + Capabilities ClientCapabilities + // Claims required for an access token to satisfy a conditional access policy + Claims string + // KnownAuthorityHosts don't require metadata discovery because they're known to the user + KnownAuthorityHosts []string + // LoginHint is a username with which to pre-populate account selection during interactive auth + LoginHint string + // DomainHint is a directive that can be used to accelerate the user to their federated IdP sign-in page + DomainHint string + // AuthnScheme is an optional scheme for formatting access tokens + AuthnScheme AuthenticationScheme +} + +// NewAuthParams creates an authorization parameters object. +func NewAuthParams(clientID string, authorityInfo Info) AuthParams { + return AuthParams{ + ClientID: clientID, + AuthorityInfo: authorityInfo, + CorrelationID: uuid.New().String(), + AuthnScheme: &bearerAuthnScheme, + } +} + +// WithTenant returns a copy of the AuthParams having the specified tenant ID. If the given +// ID is empty, the copy is identical to the original. This function returns an error in +// several cases: +// - ID isn't specific (for example, it's "common") +// - ID is non-empty and the authority doesn't support tenants (for example, it's an ADFS authority) +// - the client is configured to authenticate only Microsoft accounts via the "consumers" endpoint +// - the resulting authority URL is invalid +func (p AuthParams) WithTenant(ID string) (AuthParams, error) { + switch ID { + case "", p.AuthorityInfo.Tenant: + // keep the default tenant because the caller didn't override it + return p, nil + case "common", "consumers", "organizations": + if p.AuthorityInfo.AuthorityType == AAD { + return p, fmt.Errorf(`tenant ID must be a specific tenant, not "%s"`, ID) + } + // else we'll return a better error below + } + if p.AuthorityInfo.AuthorityType != AAD { + return p, errors.New("the authority doesn't support tenants") + } + if p.AuthorityInfo.Tenant == "consumers" { + return p, errors.New(`client is configured to authenticate only personal Microsoft accounts, via the "consumers" endpoint`) + } + authority := "https://" + path.Join(p.AuthorityInfo.Host, ID) + info, err := NewInfoFromAuthorityURI(authority, p.AuthorityInfo.ValidateAuthority, p.AuthorityInfo.InstanceDiscoveryDisabled) + if err == nil { + info.Region = p.AuthorityInfo.Region + p.AuthorityInfo = info + } + return p, err +} + +// MergeCapabilitiesAndClaims combines client capabilities and challenge claims into a value suitable for an authentication request's "claims" parameter. +func (p AuthParams) MergeCapabilitiesAndClaims() (string, error) { + claims := p.Claims + if len(p.Capabilities.asMap) > 0 { + if claims == "" { + // without claims the result is simply the capabilities + return p.Capabilities.asJSON, nil + } + // Otherwise, merge claims and capabilties into a single JSON object. + // We handle the claims challenge as a map because we don't know its structure. + var challenge map[string]any + if err := json.Unmarshal([]byte(claims), &challenge); err != nil { + return "", fmt.Errorf(`claims must be JSON. Are they base64 encoded? json.Unmarshal returned "%v"`, err) + } + if err := merge(p.Capabilities.asMap, challenge); err != nil { + return "", err + } + b, err := json.Marshal(challenge) + if err != nil { + return "", err + } + claims = string(b) + } + return claims, nil +} + +// merges a into b without overwriting b's values. Returns an error when a and b share a key for which either has a non-object value. +func merge(a, b map[string]any) error { + for k, av := range a { + if bv, ok := b[k]; !ok { + // b doesn't contain this key => simply set it to a's value + b[k] = av + } else { + // b does contain this key => recursively merge a[k] into b[k], provided both are maps. If a[k] or b[k] isn't + // a map, return an error because merging would overwrite some value in b. Errors shouldn't occur in practice + // because the challenge will be from AAD, which knows the capabilities format. + if A, ok := av.(map[string]any); ok { + if B, ok := bv.(map[string]any); ok { + return merge(A, B) + } else { + // b[k] isn't a map + return errors.New("challenge claims conflict with client capabilities") + } + } else { + // a[k] isn't a map + return errors.New("challenge claims conflict with client capabilities") + } + } + } + return nil +} + +// ClientCapabilities stores capabilities in the formats used by AuthParams.MergeCapabilitiesAndClaims. +// [NewClientCapabilities] precomputes these representations because capabilities are static for the +// lifetime of a client and are included with every authentication request i.e., these computations +// always have the same result and would otherwise have to be repeated for every request. +type ClientCapabilities struct { + // asJSON is for the common case: adding the capabilities to an auth request with no challenge claims + asJSON string + // asMap is for merging the capabilities with challenge claims + asMap map[string]any +} + +func NewClientCapabilities(capabilities []string) (ClientCapabilities, error) { + c := ClientCapabilities{} + var err error + if len(capabilities) > 0 { + cpbs := make([]string, len(capabilities)) + for i := 0; i < len(cpbs); i++ { + cpbs[i] = fmt.Sprintf(`"%s"`, capabilities[i]) + } + c.asJSON = fmt.Sprintf(`{"access_token":{"xms_cc":{"values":[%s]}}}`, strings.Join(cpbs, ",")) + // note our JSON is valid but we can't stop users breaking it with garbage like "}" + err = json.Unmarshal([]byte(c.asJSON), &c.asMap) + } + return c, err +} + +// Info consists of information about the authority. +type Info struct { + Host string + CanonicalAuthorityURI string + AuthorityType string + UserRealmURIPrefix string + ValidateAuthority bool + Tenant string + Region string + InstanceDiscoveryDisabled bool +} + +func firstPathSegment(u *url.URL) (string, error) { + pathParts := strings.Split(u.EscapedPath(), "/") + if len(pathParts) >= 2 { + return pathParts[1], nil + } + + return "", errors.New(`authority must be an https URL such as "https://login.microsoftonline.com/"`) +} + +// NewInfoFromAuthorityURI creates an AuthorityInfo instance from the authority URL provided. +func NewInfoFromAuthorityURI(authority string, validateAuthority bool, instanceDiscoveryDisabled bool) (Info, error) { + u, err := url.Parse(strings.ToLower(authority)) + if err != nil || u.Scheme != "https" { + return Info{}, errors.New(`authority must be an https URL such as "https://login.microsoftonline.com/"`) + } + + tenant, err := firstPathSegment(u) + if err != nil { + return Info{}, err + } + authorityType := AAD + if tenant == "adfs" { + authorityType = ADFS + } + + // u.Host includes the port, if any, which is required for private cloud deployments + return Info{ + Host: u.Host, + CanonicalAuthorityURI: fmt.Sprintf("https://%v/%v/", u.Host, tenant), + AuthorityType: authorityType, + UserRealmURIPrefix: fmt.Sprintf("https://%v/common/userrealm/", u.Hostname()), + ValidateAuthority: validateAuthority, + Tenant: tenant, + InstanceDiscoveryDisabled: instanceDiscoveryDisabled, + }, nil +} + +// Endpoints consists of the endpoints from the tenant discovery response. +type Endpoints struct { + AuthorizationEndpoint string + TokenEndpoint string + selfSignedJwtAudience string + authorityHost string +} + +// NewEndpoints creates an Endpoints object. +func NewEndpoints(authorizationEndpoint string, tokenEndpoint string, selfSignedJwtAudience string, authorityHost string) Endpoints { + return Endpoints{authorizationEndpoint, tokenEndpoint, selfSignedJwtAudience, authorityHost} +} + +// UserRealmAccountType refers to the type of user realm. +type UserRealmAccountType string + +// These are the different types of user realms. +const ( + Unknown UserRealmAccountType = "" + Federated UserRealmAccountType = "Federated" + Managed UserRealmAccountType = "Managed" +) + +// UserRealm is used for the username password request to determine user type +type UserRealm struct { + AccountType UserRealmAccountType `json:"account_type"` + DomainName string `json:"domain_name"` + CloudInstanceName string `json:"cloud_instance_name"` + CloudAudienceURN string `json:"cloud_audience_urn"` + + // required if accountType is Federated + FederationProtocol string `json:"federation_protocol"` + FederationMetadataURL string `json:"federation_metadata_url"` + + AdditionalFields map[string]interface{} +} + +func (u UserRealm) validate() error { + switch "" { + case string(u.AccountType): + return errors.New("the account type (Federated or Managed) is missing") + case u.DomainName: + return errors.New("domain name of user realm is missing") + case u.CloudInstanceName: + return errors.New("cloud instance name of user realm is missing") + case u.CloudAudienceURN: + return errors.New("cloud Instance URN is missing") + } + + if u.AccountType == Federated { + switch "" { + case u.FederationProtocol: + return errors.New("federation protocol of user realm is missing") + case u.FederationMetadataURL: + return errors.New("federation metadata URL of user realm is missing") + } + } + return nil +} + +// Client represents the REST calls to authority backends. +type Client struct { + // Comm provides the HTTP transport client. + Comm jsonCaller // *comm.Client +} + +func (c Client) UserRealm(ctx context.Context, authParams AuthParams) (UserRealm, error) { + endpoint := fmt.Sprintf("https://%s/common/UserRealm/%s", authParams.Endpoints.authorityHost, url.PathEscape(authParams.Username)) + qv := url.Values{ + "api-version": []string{"1.0"}, + } + + resp := UserRealm{} + err := c.Comm.JSONCall( + ctx, + endpoint, + http.Header{"client-request-id": []string{authParams.CorrelationID}}, + qv, + nil, + &resp, + ) + if err != nil { + return resp, err + } + + return resp, resp.validate() +} + +func (c Client) GetTenantDiscoveryResponse(ctx context.Context, openIDConfigurationEndpoint string) (TenantDiscoveryResponse, error) { + resp := TenantDiscoveryResponse{} + err := c.Comm.JSONCall( + ctx, + openIDConfigurationEndpoint, + http.Header{}, + nil, + nil, + &resp, + ) + + return resp, err +} + +// AADInstanceDiscovery attempts to discover a tenant endpoint (used in OIDC auth with an authorization endpoint). +// This is done by AAD which allows for aliasing of tenants (windows.sts.net is the same as login.windows.com). +func (c Client) AADInstanceDiscovery(ctx context.Context, authorityInfo Info) (InstanceDiscoveryResponse, error) { + region := "" + var err error + resp := InstanceDiscoveryResponse{} + if authorityInfo.Region != "" && authorityInfo.Region != autoDetectRegion { + region = authorityInfo.Region + } else if authorityInfo.Region == autoDetectRegion { + region = detectRegion(ctx) + } + if region != "" { + environment := authorityInfo.Host + switch environment { + case loginMicrosoft, loginWindows, loginSTSWindows, defaultHost: + environment = loginMicrosoft + } + + resp.TenantDiscoveryEndpoint = fmt.Sprintf(tenantDiscoveryEndpointWithRegion, region, environment, authorityInfo.Tenant) + metadata := InstanceDiscoveryMetadata{ + PreferredNetwork: fmt.Sprintf("%v.%v", region, authorityInfo.Host), + PreferredCache: authorityInfo.Host, + Aliases: []string{fmt.Sprintf("%v.%v", region, authorityInfo.Host), authorityInfo.Host}, + } + resp.Metadata = []InstanceDiscoveryMetadata{metadata} + } else { + qv := url.Values{} + qv.Set("api-version", "1.1") + qv.Set("authorization_endpoint", fmt.Sprintf(authorizationEndpoint, authorityInfo.Host, authorityInfo.Tenant)) + + discoveryHost := defaultHost + if TrustedHost(authorityInfo.Host) { + discoveryHost = authorityInfo.Host + } + + endpoint := fmt.Sprintf(instanceDiscoveryEndpoint, discoveryHost) + err = c.Comm.JSONCall(ctx, endpoint, http.Header{}, qv, nil, &resp) + } + return resp, err +} + +func detectRegion(ctx context.Context) string { + region := os.Getenv(regionName) + if region != "" { + region = strings.ReplaceAll(region, " ", "") + return strings.ToLower(region) + } + // HTTP call to IMDS endpoint to get region + // Refer : https://identitydivision.visualstudio.com/DevEx/_git/AuthLibrariesApiReview?path=%2FPinAuthToRegion%2FAAD%20SDK%20Proposal%20to%20Pin%20Auth%20to%20region.md&_a=preview&version=GBdev + // Set a 2 second timeout for this http client which only does calls to IMDS endpoint + client := http.Client{ + Timeout: time.Duration(2 * time.Second), + } + req, _ := http.NewRequest("GET", imdsEndpoint, nil) + req.Header.Set("Metadata", "true") + resp, err := client.Do(req) + // If the request times out or there is an error, it is retried once + if err != nil || resp.StatusCode != 200 { + resp, err = client.Do(req) + if err != nil || resp.StatusCode != 200 { + return "" + } + } + defer resp.Body.Close() + response, err := io.ReadAll(resp.Body) + if err != nil { + return "" + } + return string(response) +} + +func (a *AuthParams) CacheKey(isAppCache bool) string { + if a.AuthorizationType == ATOnBehalfOf { + return a.AssertionHash() + } + if a.AuthorizationType == ATClientCredentials || isAppCache { + return a.AppKey() + } + if a.AuthorizationType == ATRefreshToken || a.AuthorizationType == AccountByID { + return a.HomeAccountID + } + return "" +} +func (a *AuthParams) AssertionHash() string { + hasher := sha256.New() + // Per documentation this never returns an error : https://pkg.go.dev/hash#pkg-types + _, _ = hasher.Write([]byte(a.UserAssertion)) + sha := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) + return sha +} + +func (a *AuthParams) AppKey() string { + if a.AuthorityInfo.Tenant != "" { + return fmt.Sprintf("%s_%s_AppTokenCache", a.ClientID, a.AuthorityInfo.Tenant) + } + return fmt.Sprintf("%s__AppTokenCache", a.ClientID) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authorizetype_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authorizetype_string.go new file mode 100644 index 000000000..10039773b --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authorizetype_string.go @@ -0,0 +1,30 @@ +// Code generated by "stringer -type=AuthorizeType"; DO NOT EDIT. + +package authority + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ATUnknown-0] + _ = x[ATUsernamePassword-1] + _ = x[ATWindowsIntegrated-2] + _ = x[ATAuthCode-3] + _ = x[ATInteractive-4] + _ = x[ATClientCredentials-5] + _ = x[ATDeviceCode-6] + _ = x[ATRefreshToken-7] +} + +const _AuthorizeType_name = "ATUnknownATUsernamePasswordATWindowsIntegratedATAuthCodeATInteractiveATClientCredentialsATDeviceCodeATRefreshToken" + +var _AuthorizeType_index = [...]uint8{0, 9, 27, 46, 56, 69, 88, 100, 114} + +func (i AuthorizeType) String() string { + if i < 0 || i >= AuthorizeType(len(_AuthorizeType_index)-1) { + return "AuthorizeType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _AuthorizeType_name[_AuthorizeType_index[i]:_AuthorizeType_index[i+1]] +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go new file mode 100644 index 000000000..7d9ec7cd3 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go @@ -0,0 +1,320 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package comm provides helpers for communicating with HTTP backends. +package comm + +import ( + "bytes" + "context" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/url" + "reflect" + "runtime" + "strings" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" + customJSON "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version" + "github.com/google/uuid" +) + +// HTTPClient represents an HTTP client. +// It's usually an *http.Client from the standard library. +type HTTPClient interface { + // Do sends an HTTP request and returns an HTTP response. + Do(req *http.Request) (*http.Response, error) + + // CloseIdleConnections closes any idle connections in a "keep-alive" state. + CloseIdleConnections() +} + +// Client provides a wrapper to our *http.Client that handles compression and serialization needs. +type Client struct { + client HTTPClient +} + +// New returns a new Client object. +func New(httpClient HTTPClient) *Client { + if httpClient == nil { + panic("http.Client cannot == nil") + } + + return &Client{client: httpClient} +} + +// JSONCall connects to the REST endpoint passing the HTTP query values, headers and JSON conversion +// of body in the HTTP body. It automatically handles compression and decompression with gzip. The response is JSON +// unmarshalled into resp. resp must be a pointer to a struct. If the body struct contains a field called +// "AdditionalFields" we use a custom marshal/unmarshal engine. +func (c *Client) JSONCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, body, resp interface{}) error { + if qv == nil { + qv = url.Values{} + } + + v := reflect.ValueOf(resp) + if err := c.checkResp(v); err != nil { + return err + } + + // Choose a JSON marshal/unmarshal depending on if we have AdditionalFields attribute. + var marshal = json.Marshal + var unmarshal = json.Unmarshal + if _, ok := v.Elem().Type().FieldByName("AdditionalFields"); ok { + marshal = customJSON.Marshal + unmarshal = customJSON.Unmarshal + } + + u, err := url.Parse(endpoint) + if err != nil { + return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + } + u.RawQuery = qv.Encode() + + addStdHeaders(headers) + + req := &http.Request{Method: http.MethodGet, URL: u, Header: headers} + + if body != nil { + // Note: In case your wondering why we are not gzip encoding.... + // I'm not sure if these various services support gzip on send. + headers.Add("Content-Type", "application/json; charset=utf-8") + data, err := marshal(body) + if err != nil { + return fmt.Errorf("bug: conn.Call(): could not marshal the body object: %w", err) + } + req.Body = io.NopCloser(bytes.NewBuffer(data)) + req.Method = http.MethodPost + } + + data, err := c.do(ctx, req) + if err != nil { + return err + } + + if resp != nil { + if err := unmarshal(data, resp); err != nil { + return fmt.Errorf("json decode error: %w\njson message bytes were: %s", err, string(data)) + } + } + return nil +} + +// XMLCall connects to an endpoint and decodes the XML response into resp. This is used when +// sending application/xml . If sending XML via SOAP, use SOAPCall(). +func (c *Client) XMLCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, resp interface{}) error { + if err := c.checkResp(reflect.ValueOf(resp)); err != nil { + return err + } + + if qv == nil { + qv = url.Values{} + } + + u, err := url.Parse(endpoint) + if err != nil { + return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + } + u.RawQuery = qv.Encode() + + headers.Set("Content-Type", "application/xml; charset=utf-8") // This was not set in he original Mex(), but... + addStdHeaders(headers) + + return c.xmlCall(ctx, u, headers, "", resp) +} + +// SOAPCall returns the SOAP message given an endpoint, action, body of the request and the response object to marshal into. +func (c *Client) SOAPCall(ctx context.Context, endpoint, action string, headers http.Header, qv url.Values, body string, resp interface{}) error { + if body == "" { + return fmt.Errorf("cannot make a SOAP call with body set to empty string") + } + + if err := c.checkResp(reflect.ValueOf(resp)); err != nil { + return err + } + + if qv == nil { + qv = url.Values{} + } + + u, err := url.Parse(endpoint) + if err != nil { + return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + } + u.RawQuery = qv.Encode() + + headers.Set("Content-Type", "application/soap+xml; charset=utf-8") + headers.Set("SOAPAction", action) + addStdHeaders(headers) + + return c.xmlCall(ctx, u, headers, body, resp) +} + +// xmlCall sends an XML in body and decodes into resp. This simply does the transport and relies on +// an upper level call to set things such as SOAP parameters and Content-Type, if required. +func (c *Client) xmlCall(ctx context.Context, u *url.URL, headers http.Header, body string, resp interface{}) error { + req := &http.Request{Method: http.MethodGet, URL: u, Header: headers} + + if len(body) > 0 { + req.Method = http.MethodPost + req.Body = io.NopCloser(strings.NewReader(body)) + } + + data, err := c.do(ctx, req) + if err != nil { + return err + } + + return xml.Unmarshal(data, resp) +} + +// URLFormCall is used to make a call where we need to send application/x-www-form-urlencoded data +// to the backend and receive JSON back. qv will be encoded into the request body. +func (c *Client) URLFormCall(ctx context.Context, endpoint string, qv url.Values, resp interface{}) error { + if len(qv) == 0 { + return fmt.Errorf("URLFormCall() requires qv to have non-zero length") + } + + if err := c.checkResp(reflect.ValueOf(resp)); err != nil { + return err + } + + u, err := url.Parse(endpoint) + if err != nil { + return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + } + + headers := http.Header{} + headers.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + addStdHeaders(headers) + + enc := qv.Encode() + + req := &http.Request{ + Method: http.MethodPost, + URL: u, + Header: headers, + ContentLength: int64(len(enc)), + Body: io.NopCloser(strings.NewReader(enc)), + GetBody: func() (io.ReadCloser, error) { + return io.NopCloser(strings.NewReader(enc)), nil + }, + } + + data, err := c.do(ctx, req) + if err != nil { + return err + } + + v := reflect.ValueOf(resp) + if err := c.checkResp(v); err != nil { + return err + } + + var unmarshal = json.Unmarshal + if _, ok := v.Elem().Type().FieldByName("AdditionalFields"); ok { + unmarshal = customJSON.Unmarshal + } + if resp != nil { + if err := unmarshal(data, resp); err != nil { + return fmt.Errorf("json decode error: %w\nraw message was: %s", err, string(data)) + } + } + return nil +} + +// do makes the HTTP call to the server and returns the contents of the body. +func (c *Client) do(ctx context.Context, req *http.Request) ([]byte, error) { + if _, ok := ctx.Deadline(); !ok { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, 30*time.Second) + defer cancel() + } + req = req.WithContext(ctx) + + reply, err := c.client.Do(req) + if err != nil { + return nil, fmt.Errorf("server response error:\n %w", err) + } + defer reply.Body.Close() + + data, err := c.readBody(reply) + if err != nil { + return nil, fmt.Errorf("could not read the body of an HTTP Response: %w", err) + } + reply.Body = io.NopCloser(bytes.NewBuffer(data)) + + // NOTE: This doesn't happen immediately after the call so that we can get an error message + // from the server and include it in our error. + switch reply.StatusCode { + case 200, 201: + default: + sd := strings.TrimSpace(string(data)) + if sd != "" { + // We probably have the error in the body. + return nil, errors.CallErr{ + Req: req, + Resp: reply, + Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d:\n%s", req.URL.String(), req.Method, reply.StatusCode, sd), + } + } + return nil, errors.CallErr{ + Req: req, + Resp: reply, + Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d", req.URL.String(), req.Method, reply.StatusCode), + } + } + + return data, nil +} + +// checkResp checks a response object o make sure it is a pointer to a struct. +func (c *Client) checkResp(v reflect.Value) error { + if v.Kind() != reflect.Ptr { + return fmt.Errorf("bug: resp argument must a *struct, was %T", v.Interface()) + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return fmt.Errorf("bug: resp argument must be a *struct, was %T", v.Interface()) + } + return nil +} + +// readBody reads the body out of an *http.Response. It supports gzip encoded responses. +func (c *Client) readBody(resp *http.Response) ([]byte, error) { + var reader io.Reader = resp.Body + switch resp.Header.Get("Content-Encoding") { + case "": + // Do nothing + case "gzip": + reader = gzipDecompress(resp.Body) + default: + return nil, fmt.Errorf("bug: comm.Client.JSONCall(): content was send with unsupported content-encoding %s", resp.Header.Get("Content-Encoding")) + } + return io.ReadAll(reader) +} + +var testID string + +// addStdHeaders adds the standard headers we use on all calls. +func addStdHeaders(headers http.Header) http.Header { + headers.Set("Accept-Encoding", "gzip") + // So that I can have a static id for tests. + if testID != "" { + headers.Set("client-request-id", testID) + headers.Set("Return-Client-Request-Id", "false") + } else { + headers.Set("client-request-id", uuid.New().String()) + headers.Set("Return-Client-Request-Id", "false") + } + headers.Set("x-client-sku", "MSAL.Go") + headers.Set("x-client-os", runtime.GOOS) + headers.Set("x-client-cpu", runtime.GOARCH) + headers.Set("x-client-ver", version.Version) + return headers +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/compress.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/compress.go new file mode 100644 index 000000000..4d3dbfcf0 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/compress.go @@ -0,0 +1,33 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package comm + +import ( + "compress/gzip" + "io" +) + +func gzipDecompress(r io.Reader) io.Reader { + gzipReader, _ := gzip.NewReader(r) + + pipeOut, pipeIn := io.Pipe() + go func() { + // decompression bomb would have to come from Azure services. + // If we want to limit, we should do that in comm.do(). + _, err := io.Copy(pipeIn, gzipReader) //nolint + if err != nil { + // don't need the error. + pipeIn.CloseWithError(err) //nolint + gzipReader.Close() + return + } + if err := gzipReader.Close(); err != nil { + // don't need the error. + pipeIn.CloseWithError(err) //nolint + return + } + pipeIn.Close() + }() + return pipeOut +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant/grant.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant/grant.go new file mode 100644 index 000000000..b628f61ac --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant/grant.go @@ -0,0 +1,17 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package grant holds types of grants issued by authorization services. +package grant + +const ( + Password = "password" + JWT = "urn:ietf:params:oauth:grant-type:jwt-bearer" + SAMLV1 = "urn:ietf:params:oauth:grant-type:saml1_1-bearer" + SAMLV2 = "urn:ietf:params:oauth:grant-type:saml2-bearer" + DeviceCode = "device_code" + AuthCode = "authorization_code" + RefreshToken = "refresh_token" + ClientCredential = "client_credentials" + ClientAssertion = "urn:ietf:params:oauth:client-assertion-type:jwt-bearer" +) diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go new file mode 100644 index 000000000..1f9c543fa --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go @@ -0,0 +1,56 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package ops provides operations to various backend services using REST clients. + +The REST type provides several clients that can be used to communicate to backends. +Usage is simple: + + rest := ops.New() + + // Creates an authority client and calls the UserRealm() method. + userRealm, err := rest.Authority().UserRealm(ctx, authParameters) + if err != nil { + // Do something + } +*/ +package ops + +import ( + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust" +) + +// HTTPClient represents an HTTP client. +// It's usually an *http.Client from the standard library. +type HTTPClient = comm.HTTPClient + +// REST provides REST clients for communicating with various backends used by MSAL. +type REST struct { + client *comm.Client +} + +// New is the constructor for REST. +func New(httpClient HTTPClient) *REST { + return &REST{client: comm.New(httpClient)} +} + +// Authority returns a client for querying information about various authorities. +func (r *REST) Authority() authority.Client { + return authority.Client{Comm: r.client} +} + +// AccessTokens returns a client that can be used to get various access tokens for +// authorization purposes. +func (r *REST) AccessTokens() accesstokens.Client { + return accesstokens.Client{Comm: r.client} +} + +// WSTrust provides access to various metadata in a WSTrust service. This data can +// be used to gain tokens based on SAML data using the client provided by AccessTokens(). +func (r *REST) WSTrust() wstrust.Client { + return wstrust.Client{Comm: r.client} +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/endpointtype_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/endpointtype_string.go new file mode 100644 index 000000000..a2bb6278a --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/endpointtype_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=endpointType"; DO NOT EDIT. + +package defs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[etUnknown-0] + _ = x[etUsernamePassword-1] + _ = x[etWindowsTransport-2] +} + +const _endpointType_name = "etUnknownetUsernamePasswordetWindowsTransport" + +var _endpointType_index = [...]uint8{0, 9, 27, 45} + +func (i endpointType) String() string { + if i < 0 || i >= endpointType(len(_endpointType_index)-1) { + return "endpointType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _endpointType_name[_endpointType_index[i]:_endpointType_index[i+1]] +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/mex_document_definitions.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/mex_document_definitions.go new file mode 100644 index 000000000..649727002 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/mex_document_definitions.go @@ -0,0 +1,394 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package defs + +import "encoding/xml" + +type Definitions struct { + XMLName xml.Name `xml:"definitions"` + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + TargetNamespace string `xml:"targetNamespace,attr"` + WSDL string `xml:"wsdl,attr"` + XSD string `xml:"xsd,attr"` + T string `xml:"t,attr"` + SOAPENC string `xml:"soapenc,attr"` + SOAP string `xml:"soap,attr"` + TNS string `xml:"tns,attr"` + MSC string `xml:"msc,attr"` + WSAM string `xml:"wsam,attr"` + SOAP12 string `xml:"soap12,attr"` + WSA10 string `xml:"wsa10,attr"` + WSA string `xml:"wsa,attr"` + WSAW string `xml:"wsaw,attr"` + WSX string `xml:"wsx,attr"` + WSAP string `xml:"wsap,attr"` + WSU string `xml:"wsu,attr"` + Trust string `xml:"trust,attr"` + WSP string `xml:"wsp,attr"` + Policy []Policy `xml:"Policy"` + Types Types `xml:"types"` + Message []Message `xml:"message"` + PortType []PortType `xml:"portType"` + Binding []Binding `xml:"binding"` + Service Service `xml:"service"` +} + +type Policy struct { + Text string `xml:",chardata"` + ID string `xml:"Id,attr"` + ExactlyOne ExactlyOne `xml:"ExactlyOne"` +} + +type ExactlyOne struct { + Text string `xml:",chardata"` + All All `xml:"All"` +} + +type All struct { + Text string `xml:",chardata"` + NegotiateAuthentication NegotiateAuthentication `xml:"NegotiateAuthentication"` + TransportBinding TransportBinding `xml:"TransportBinding"` + UsingAddressing Text `xml:"UsingAddressing"` + EndorsingSupportingTokens EndorsingSupportingTokens `xml:"EndorsingSupportingTokens"` + WSS11 WSS11 `xml:"Wss11"` + Trust10 Trust10 `xml:"Trust10"` + SignedSupportingTokens SignedSupportingTokens `xml:"SignedSupportingTokens"` + Trust13 WSTrust13 `xml:"Trust13"` + SignedEncryptedSupportingTokens SignedEncryptedSupportingTokens `xml:"SignedEncryptedSupportingTokens"` +} + +type NegotiateAuthentication struct { + Text string `xml:",chardata"` + HTTP string `xml:"http,attr"` + XMLName xml.Name +} + +type TransportBinding struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy TransportBindingPolicy `xml:"Policy"` +} + +type TransportBindingPolicy struct { + Text string `xml:",chardata"` + TransportToken TransportToken `xml:"TransportToken"` + AlgorithmSuite AlgorithmSuite `xml:"AlgorithmSuite"` + Layout Layout `xml:"Layout"` + IncludeTimestamp Text `xml:"IncludeTimestamp"` +} + +type TransportToken struct { + Text string `xml:",chardata"` + Policy TransportTokenPolicy `xml:"Policy"` +} + +type TransportTokenPolicy struct { + Text string `xml:",chardata"` + HTTPSToken HTTPSToken `xml:"HttpsToken"` +} + +type HTTPSToken struct { + Text string `xml:",chardata"` + RequireClientCertificate string `xml:"RequireClientCertificate,attr"` +} + +type AlgorithmSuite struct { + Text string `xml:",chardata"` + Policy AlgorithmSuitePolicy `xml:"Policy"` +} + +type AlgorithmSuitePolicy struct { + Text string `xml:",chardata"` + Basic256 Text `xml:"Basic256"` + Basic128 Text `xml:"Basic128"` +} + +type Layout struct { + Text string `xml:",chardata"` + Policy LayoutPolicy `xml:"Policy"` +} + +type LayoutPolicy struct { + Text string `xml:",chardata"` + Strict Text `xml:"Strict"` +} + +type EndorsingSupportingTokens struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy EndorsingSupportingTokensPolicy `xml:"Policy"` +} + +type EndorsingSupportingTokensPolicy struct { + Text string `xml:",chardata"` + X509Token X509Token `xml:"X509Token"` + RSAToken RSAToken `xml:"RsaToken"` + SignedParts SignedParts `xml:"SignedParts"` + KerberosToken KerberosToken `xml:"KerberosToken"` + IssuedToken IssuedToken `xml:"IssuedToken"` + KeyValueToken KeyValueToken `xml:"KeyValueToken"` +} + +type X509Token struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Policy X509TokenPolicy `xml:"Policy"` +} + +type X509TokenPolicy struct { + Text string `xml:",chardata"` + RequireThumbprintReference Text `xml:"RequireThumbprintReference"` + WSSX509V3Token10 Text `xml:"WssX509V3Token10"` +} + +type RSAToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Optional string `xml:"Optional,attr"` + MSSP string `xml:"mssp,attr"` +} + +type SignedParts struct { + Text string `xml:",chardata"` + Header SignedPartsHeader `xml:"Header"` +} + +type SignedPartsHeader struct { + Text string `xml:",chardata"` + Name string `xml:"Name,attr"` + Namespace string `xml:"Namespace,attr"` +} + +type KerberosToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Policy KerberosTokenPolicy `xml:"Policy"` +} + +type KerberosTokenPolicy struct { + Text string `xml:",chardata"` + WSSGSSKerberosV5ApReqToken11 Text `xml:"WssGssKerberosV5ApReqToken11"` +} + +type IssuedToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + RequestSecurityTokenTemplate RequestSecurityTokenTemplate `xml:"RequestSecurityTokenTemplate"` + Policy IssuedTokenPolicy `xml:"Policy"` +} + +type RequestSecurityTokenTemplate struct { + Text string `xml:",chardata"` + KeyType Text `xml:"KeyType"` + EncryptWith Text `xml:"EncryptWith"` + SignatureAlgorithm Text `xml:"SignatureAlgorithm"` + CanonicalizationAlgorithm Text `xml:"CanonicalizationAlgorithm"` + EncryptionAlgorithm Text `xml:"EncryptionAlgorithm"` + KeySize Text `xml:"KeySize"` + KeyWrapAlgorithm Text `xml:"KeyWrapAlgorithm"` +} + +type IssuedTokenPolicy struct { + Text string `xml:",chardata"` + RequireInternalReference Text `xml:"RequireInternalReference"` +} + +type KeyValueToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Optional string `xml:"Optional,attr"` +} + +type WSS11 struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy Wss11Policy `xml:"Policy"` +} + +type Wss11Policy struct { + Text string `xml:",chardata"` + MustSupportRefThumbprint Text `xml:"MustSupportRefThumbprint"` +} + +type Trust10 struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy Trust10Policy `xml:"Policy"` +} + +type Trust10Policy struct { + Text string `xml:",chardata"` + MustSupportIssuedTokens Text `xml:"MustSupportIssuedTokens"` + RequireClientEntropy Text `xml:"RequireClientEntropy"` + RequireServerEntropy Text `xml:"RequireServerEntropy"` +} + +type SignedSupportingTokens struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy SupportingTokensPolicy `xml:"Policy"` +} + +type SupportingTokensPolicy struct { + Text string `xml:",chardata"` + UsernameToken UsernameToken `xml:"UsernameToken"` +} +type UsernameToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Policy UsernameTokenPolicy `xml:"Policy"` +} + +type UsernameTokenPolicy struct { + Text string `xml:",chardata"` + WSSUsernameToken10 WSSUsernameToken10 `xml:"WssUsernameToken10"` +} + +type WSSUsernameToken10 struct { + Text string `xml:",chardata"` + XMLName xml.Name +} + +type WSTrust13 struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy WSTrust13Policy `xml:"Policy"` +} + +type WSTrust13Policy struct { + Text string `xml:",chardata"` + MustSupportIssuedTokens Text `xml:"MustSupportIssuedTokens"` + RequireClientEntropy Text `xml:"RequireClientEntropy"` + RequireServerEntropy Text `xml:"RequireServerEntropy"` +} + +type SignedEncryptedSupportingTokens struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy SupportingTokensPolicy `xml:"Policy"` +} + +type Types struct { + Text string `xml:",chardata"` + Schema Schema `xml:"schema"` +} + +type Schema struct { + Text string `xml:",chardata"` + TargetNamespace string `xml:"targetNamespace,attr"` + Import []Import `xml:"import"` +} + +type Import struct { + Text string `xml:",chardata"` + SchemaLocation string `xml:"schemaLocation,attr"` + Namespace string `xml:"namespace,attr"` +} + +type Message struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Part Part `xml:"part"` +} + +type Part struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Element string `xml:"element,attr"` +} + +type PortType struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Operation Operation `xml:"operation"` +} + +type Operation struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Input OperationIO `xml:"input"` + Output OperationIO `xml:"output"` +} + +type OperationIO struct { + Text string `xml:",chardata"` + Action string `xml:"Action,attr"` + Message string `xml:"message,attr"` + Body OperationIOBody `xml:"body"` +} + +type OperationIOBody struct { + Text string `xml:",chardata"` + Use string `xml:"use,attr"` +} + +type Binding struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Type string `xml:"type,attr"` + PolicyReference PolicyReference `xml:"PolicyReference"` + Binding DefinitionsBinding `xml:"binding"` + Operation BindingOperation `xml:"operation"` +} + +type PolicyReference struct { + Text string `xml:",chardata"` + URI string `xml:"URI,attr"` +} + +type DefinitionsBinding struct { + Text string `xml:",chardata"` + Transport string `xml:"transport,attr"` +} + +type BindingOperation struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Operation BindingOperationOperation `xml:"operation"` + Input BindingOperationIO `xml:"input"` + Output BindingOperationIO `xml:"output"` +} + +type BindingOperationOperation struct { + Text string `xml:",chardata"` + SoapAction string `xml:"soapAction,attr"` + Style string `xml:"style,attr"` +} + +type BindingOperationIO struct { + Text string `xml:",chardata"` + Body OperationIOBody `xml:"body"` +} + +type Service struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Port []Port `xml:"port"` +} + +type Port struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Binding string `xml:"binding,attr"` + Address Address `xml:"address"` + EndpointReference PortEndpointReference `xml:"EndpointReference"` +} + +type Address struct { + Text string `xml:",chardata"` + Location string `xml:"location,attr"` +} + +type PortEndpointReference struct { + Text string `xml:",chardata"` + Address Text `xml:"Address"` + Identity Identity `xml:"Identity"` +} + +type Identity struct { + Text string `xml:",chardata"` + XMLNS string `xml:"xmlns,attr"` + SPN Text `xml:"Spn"` +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/saml_assertion_definitions.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/saml_assertion_definitions.go new file mode 100644 index 000000000..7d0725565 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/saml_assertion_definitions.go @@ -0,0 +1,230 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package defs + +import "encoding/xml" + +// TODO(msal): Someone (and it ain't gonna be me) needs to document these attributes or +// at the least put a link to RFC. + +type SAMLDefinitions struct { + XMLName xml.Name `xml:"Envelope"` + Text string `xml:",chardata"` + S string `xml:"s,attr"` + A string `xml:"a,attr"` + U string `xml:"u,attr"` + Header Header `xml:"Header"` + Body Body `xml:"Body"` +} + +type Header struct { + Text string `xml:",chardata"` + Action Action `xml:"Action"` + Security Security `xml:"Security"` +} + +type Action struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"mustUnderstand,attr"` +} + +type Security struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"mustUnderstand,attr"` + O string `xml:"o,attr"` + Timestamp Timestamp `xml:"Timestamp"` +} + +type Timestamp struct { + Text string `xml:",chardata"` + ID string `xml:"Id,attr"` + Created Text `xml:"Created"` + Expires Text `xml:"Expires"` +} + +type Text struct { + Text string `xml:",chardata"` +} + +type Body struct { + Text string `xml:",chardata"` + RequestSecurityTokenResponseCollection RequestSecurityTokenResponseCollection `xml:"RequestSecurityTokenResponseCollection"` +} + +type RequestSecurityTokenResponseCollection struct { + Text string `xml:",chardata"` + Trust string `xml:"trust,attr"` + RequestSecurityTokenResponse []RequestSecurityTokenResponse `xml:"RequestSecurityTokenResponse"` +} + +type RequestSecurityTokenResponse struct { + Text string `xml:",chardata"` + Lifetime Lifetime `xml:"Lifetime"` + AppliesTo AppliesTo `xml:"AppliesTo"` + RequestedSecurityToken RequestedSecurityToken `xml:"RequestedSecurityToken"` + RequestedAttachedReference RequestedAttachedReference `xml:"RequestedAttachedReference"` + RequestedUnattachedReference RequestedUnattachedReference `xml:"RequestedUnattachedReference"` + TokenType Text `xml:"TokenType"` + RequestType Text `xml:"RequestType"` + KeyType Text `xml:"KeyType"` +} + +type Lifetime struct { + Text string `xml:",chardata"` + Created WSUTimestamp `xml:"Created"` + Expires WSUTimestamp `xml:"Expires"` +} + +type WSUTimestamp struct { + Text string `xml:",chardata"` + Wsu string `xml:"wsu,attr"` +} + +type AppliesTo struct { + Text string `xml:",chardata"` + Wsp string `xml:"wsp,attr"` + EndpointReference EndpointReference `xml:"EndpointReference"` +} + +type EndpointReference struct { + Text string `xml:",chardata"` + Wsa string `xml:"wsa,attr"` + Address Text `xml:"Address"` +} + +type RequestedSecurityToken struct { + Text string `xml:",chardata"` + AssertionRawXML string `xml:",innerxml"` + Assertion Assertion `xml:"Assertion"` +} + +type Assertion struct { + XMLName xml.Name // Normally its `xml:"Assertion"`, but I think they want to capture the xmlns + Text string `xml:",chardata"` + MajorVersion string `xml:"MajorVersion,attr"` + MinorVersion string `xml:"MinorVersion,attr"` + AssertionID string `xml:"AssertionID,attr"` + Issuer string `xml:"Issuer,attr"` + IssueInstant string `xml:"IssueInstant,attr"` + Saml string `xml:"saml,attr"` + Conditions Conditions `xml:"Conditions"` + AttributeStatement AttributeStatement `xml:"AttributeStatement"` + AuthenticationStatement AuthenticationStatement `xml:"AuthenticationStatement"` + Signature Signature `xml:"Signature"` +} + +type Conditions struct { + Text string `xml:",chardata"` + NotBefore string `xml:"NotBefore,attr"` + NotOnOrAfter string `xml:"NotOnOrAfter,attr"` + AudienceRestrictionCondition AudienceRestrictionCondition `xml:"AudienceRestrictionCondition"` +} + +type AudienceRestrictionCondition struct { + Text string `xml:",chardata"` + Audience Text `xml:"Audience"` +} + +type AttributeStatement struct { + Text string `xml:",chardata"` + Subject Subject `xml:"Subject"` + Attribute []Attribute `xml:"Attribute"` +} + +type Subject struct { + Text string `xml:",chardata"` + NameIdentifier NameIdentifier `xml:"NameIdentifier"` + SubjectConfirmation SubjectConfirmation `xml:"SubjectConfirmation"` +} + +type NameIdentifier struct { + Text string `xml:",chardata"` + Format string `xml:"Format,attr"` +} + +type SubjectConfirmation struct { + Text string `xml:",chardata"` + ConfirmationMethod Text `xml:"ConfirmationMethod"` +} + +type Attribute struct { + Text string `xml:",chardata"` + AttributeName string `xml:"AttributeName,attr"` + AttributeNamespace string `xml:"AttributeNamespace,attr"` + AttributeValue Text `xml:"AttributeValue"` +} + +type AuthenticationStatement struct { + Text string `xml:",chardata"` + AuthenticationMethod string `xml:"AuthenticationMethod,attr"` + AuthenticationInstant string `xml:"AuthenticationInstant,attr"` + Subject Subject `xml:"Subject"` +} + +type Signature struct { + Text string `xml:",chardata"` + Ds string `xml:"ds,attr"` + SignedInfo SignedInfo `xml:"SignedInfo"` + SignatureValue Text `xml:"SignatureValue"` + KeyInfo KeyInfo `xml:"KeyInfo"` +} + +type SignedInfo struct { + Text string `xml:",chardata"` + CanonicalizationMethod Method `xml:"CanonicalizationMethod"` + SignatureMethod Method `xml:"SignatureMethod"` + Reference Reference `xml:"Reference"` +} + +type Method struct { + Text string `xml:",chardata"` + Algorithm string `xml:"Algorithm,attr"` +} + +type Reference struct { + Text string `xml:",chardata"` + URI string `xml:"URI,attr"` + Transforms Transforms `xml:"Transforms"` + DigestMethod Method `xml:"DigestMethod"` + DigestValue Text `xml:"DigestValue"` +} + +type Transforms struct { + Text string `xml:",chardata"` + Transform []Method `xml:"Transform"` +} + +type KeyInfo struct { + Text string `xml:",chardata"` + Xmlns string `xml:"xmlns,attr"` + X509Data X509Data `xml:"X509Data"` +} + +type X509Data struct { + Text string `xml:",chardata"` + X509Certificate Text `xml:"X509Certificate"` +} + +type RequestedAttachedReference struct { + Text string `xml:",chardata"` + SecurityTokenReference SecurityTokenReference `xml:"SecurityTokenReference"` +} + +type SecurityTokenReference struct { + Text string `xml:",chardata"` + TokenType string `xml:"TokenType,attr"` + O string `xml:"o,attr"` + K string `xml:"k,attr"` + KeyIdentifier KeyIdentifier `xml:"KeyIdentifier"` +} + +type KeyIdentifier struct { + Text string `xml:",chardata"` + ValueType string `xml:"ValueType,attr"` +} + +type RequestedUnattachedReference struct { + Text string `xml:",chardata"` + SecurityTokenReference SecurityTokenReference `xml:"SecurityTokenReference"` +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/version_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/version_string.go new file mode 100644 index 000000000..6fe5efa8a --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/version_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=Version"; DO NOT EDIT. + +package defs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TrustUnknown-0] + _ = x[Trust2005-1] + _ = x[Trust13-2] +} + +const _Version_name = "TrustUnknownTrust2005Trust13" + +var _Version_index = [...]uint8{0, 12, 21, 28} + +func (i Version) String() string { + if i < 0 || i >= Version(len(_Version_index)-1) { + return "Version(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Version_name[_Version_index[i]:_Version_index[i+1]] +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_endpoint.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_endpoint.go new file mode 100644 index 000000000..8fad5efb5 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_endpoint.go @@ -0,0 +1,199 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package defs + +import ( + "encoding/xml" + "fmt" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + uuid "github.com/google/uuid" +) + +//go:generate stringer -type=Version + +type Version int + +const ( + TrustUnknown Version = iota + Trust2005 + Trust13 +) + +// Endpoint represents a WSTrust endpoint. +type Endpoint struct { + // Version is the version of the endpoint. + Version Version + // URL is the URL of the endpoint. + URL string +} + +type wsTrustTokenRequestEnvelope struct { + XMLName xml.Name `xml:"s:Envelope"` + Text string `xml:",chardata"` + S string `xml:"xmlns:s,attr"` + Wsa string `xml:"xmlns:wsa,attr"` + Wsu string `xml:"xmlns:wsu,attr"` + Header struct { + Text string `xml:",chardata"` + Action struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"s:mustUnderstand,attr"` + } `xml:"wsa:Action"` + MessageID struct { + Text string `xml:",chardata"` + } `xml:"wsa:messageID"` + ReplyTo struct { + Text string `xml:",chardata"` + Address struct { + Text string `xml:",chardata"` + } `xml:"wsa:Address"` + } `xml:"wsa:ReplyTo"` + To struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"s:mustUnderstand,attr"` + } `xml:"wsa:To"` + Security struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"s:mustUnderstand,attr"` + Wsse string `xml:"xmlns:wsse,attr"` + Timestamp struct { + Text string `xml:",chardata"` + ID string `xml:"wsu:Id,attr"` + Created struct { + Text string `xml:",chardata"` + } `xml:"wsu:Created"` + Expires struct { + Text string `xml:",chardata"` + } `xml:"wsu:Expires"` + } `xml:"wsu:Timestamp"` + UsernameToken struct { + Text string `xml:",chardata"` + ID string `xml:"wsu:Id,attr"` + Username struct { + Text string `xml:",chardata"` + } `xml:"wsse:Username"` + Password struct { + Text string `xml:",chardata"` + } `xml:"wsse:Password"` + } `xml:"wsse:UsernameToken"` + } `xml:"wsse:Security"` + } `xml:"s:Header"` + Body struct { + Text string `xml:",chardata"` + RequestSecurityToken struct { + Text string `xml:",chardata"` + Wst string `xml:"xmlns:wst,attr"` + AppliesTo struct { + Text string `xml:",chardata"` + Wsp string `xml:"xmlns:wsp,attr"` + EndpointReference struct { + Text string `xml:",chardata"` + Address struct { + Text string `xml:",chardata"` + } `xml:"wsa:Address"` + } `xml:"wsa:EndpointReference"` + } `xml:"wsp:AppliesTo"` + KeyType struct { + Text string `xml:",chardata"` + } `xml:"wst:KeyType"` + RequestType struct { + Text string `xml:",chardata"` + } `xml:"wst:RequestType"` + } `xml:"wst:RequestSecurityToken"` + } `xml:"s:Body"` +} + +func buildTimeString(t time.Time) string { + // Golang time formats are weird: https://stackoverflow.com/questions/20234104/how-to-format-current-time-using-a-yyyymmddhhmmss-format + return t.Format("2006-01-02T15:04:05.000Z") +} + +func (wte *Endpoint) buildTokenRequestMessage(authType authority.AuthorizeType, cloudAudienceURN string, username string, password string) (string, error) { + var soapAction string + var trustNamespace string + var keyType string + var requestType string + + createdTime := time.Now().UTC() + expiresTime := createdTime.Add(10 * time.Minute) + + switch wte.Version { + case Trust2005: + soapAction = trust2005Spec + trustNamespace = "http://schemas.xmlsoap.org/ws/2005/02/trust" + keyType = "http://schemas.xmlsoap.org/ws/2005/05/identity/NoProofKey" + requestType = "http://schemas.xmlsoap.org/ws/2005/02/trust/Issue" + case Trust13: + soapAction = trust13Spec + trustNamespace = "http://docs.oasis-open.org/ws-sx/ws-trust/200512" + keyType = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/Bearer" + requestType = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/Issue" + default: + return "", fmt.Errorf("buildTokenRequestMessage had Version == %q, which is not recognized", wte.Version) + } + + var envelope wsTrustTokenRequestEnvelope + + messageUUID := uuid.New() + + envelope.S = "http://www.w3.org/2003/05/soap-envelope" + envelope.Wsa = "http://www.w3.org/2005/08/addressing" + envelope.Wsu = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd" + + envelope.Header.Action.MustUnderstand = "1" + envelope.Header.Action.Text = soapAction + envelope.Header.MessageID.Text = "urn:uuid:" + messageUUID.String() + envelope.Header.ReplyTo.Address.Text = "http://www.w3.org/2005/08/addressing/anonymous" + envelope.Header.To.MustUnderstand = "1" + envelope.Header.To.Text = wte.URL + + switch authType { + case authority.ATUnknown: + return "", fmt.Errorf("buildTokenRequestMessage had no authority type(%v)", authType) + case authority.ATUsernamePassword: + endpointUUID := uuid.New() + + var trustID string + if wte.Version == Trust2005 { + trustID = "UnPwSecTok2005-" + endpointUUID.String() + } else { + trustID = "UnPwSecTok13-" + endpointUUID.String() + } + + envelope.Header.Security.MustUnderstand = "1" + envelope.Header.Security.Wsse = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd" + envelope.Header.Security.Timestamp.ID = "MSATimeStamp" + envelope.Header.Security.Timestamp.Created.Text = buildTimeString(createdTime) + envelope.Header.Security.Timestamp.Expires.Text = buildTimeString(expiresTime) + envelope.Header.Security.UsernameToken.ID = trustID + envelope.Header.Security.UsernameToken.Username.Text = username + envelope.Header.Security.UsernameToken.Password.Text = password + default: + // This is just to note that we don't do anything for other cases. + // We aren't missing anything I know of. + } + + envelope.Body.RequestSecurityToken.Wst = trustNamespace + envelope.Body.RequestSecurityToken.AppliesTo.Wsp = "http://schemas.xmlsoap.org/ws/2004/09/policy" + envelope.Body.RequestSecurityToken.AppliesTo.EndpointReference.Address.Text = cloudAudienceURN + envelope.Body.RequestSecurityToken.KeyType.Text = keyType + envelope.Body.RequestSecurityToken.RequestType.Text = requestType + + output, err := xml.Marshal(envelope) + if err != nil { + return "", err + } + + return string(output), nil +} + +func (wte *Endpoint) BuildTokenRequestMessageWIA(cloudAudienceURN string) (string, error) { + return wte.buildTokenRequestMessage(authority.ATWindowsIntegrated, cloudAudienceURN, "", "") +} + +func (wte *Endpoint) BuildTokenRequestMessageUsernamePassword(cloudAudienceURN string, username string, password string) (string, error) { + return wte.buildTokenRequestMessage(authority.ATUsernamePassword, cloudAudienceURN, username, password) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_mex_document.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_mex_document.go new file mode 100644 index 000000000..e3d19886e --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_mex_document.go @@ -0,0 +1,159 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package defs + +import ( + "errors" + "fmt" + "strings" +) + +//go:generate stringer -type=endpointType + +type endpointType int + +const ( + etUnknown endpointType = iota + etUsernamePassword + etWindowsTransport +) + +type wsEndpointData struct { + Version Version + EndpointType endpointType +} + +const trust13Spec string = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/RST/Issue" +const trust2005Spec string = "http://schemas.xmlsoap.org/ws/2005/02/trust/RST/Issue" + +type MexDocument struct { + UsernamePasswordEndpoint Endpoint + WindowsTransportEndpoint Endpoint + policies map[string]endpointType + bindings map[string]wsEndpointData +} + +func updateEndpoint(cached *Endpoint, found Endpoint) { + if cached == nil || cached.Version == TrustUnknown { + *cached = found + return + } + if (*cached).Version == Trust2005 && found.Version == Trust13 { + *cached = found + return + } +} + +// TODO(msal): Someone needs to write tests for everything below. + +// NewFromDef creates a new MexDocument. +func NewFromDef(defs Definitions) (MexDocument, error) { + policies, err := policies(defs) + if err != nil { + return MexDocument{}, err + } + + bindings, err := bindings(defs, policies) + if err != nil { + return MexDocument{}, err + } + + userPass, windows, err := endpoints(defs, bindings) + if err != nil { + return MexDocument{}, err + } + + return MexDocument{ + UsernamePasswordEndpoint: userPass, + WindowsTransportEndpoint: windows, + policies: policies, + bindings: bindings, + }, nil +} + +func policies(defs Definitions) (map[string]endpointType, error) { + policies := make(map[string]endpointType, len(defs.Policy)) + + for _, policy := range defs.Policy { + if policy.ExactlyOne.All.NegotiateAuthentication.XMLName.Local != "" { + if policy.ExactlyOne.All.TransportBinding.SP != "" && policy.ID != "" { + policies["#"+policy.ID] = etWindowsTransport + } + } + + if policy.ExactlyOne.All.SignedEncryptedSupportingTokens.Policy.UsernameToken.Policy.WSSUsernameToken10.XMLName.Local != "" { + if policy.ExactlyOne.All.TransportBinding.SP != "" && policy.ID != "" { + policies["#"+policy.ID] = etUsernamePassword + } + } + if policy.ExactlyOne.All.SignedSupportingTokens.Policy.UsernameToken.Policy.WSSUsernameToken10.XMLName.Local != "" { + if policy.ExactlyOne.All.TransportBinding.SP != "" && policy.ID != "" { + policies["#"+policy.ID] = etUsernamePassword + } + } + } + + if len(policies) == 0 { + return policies, errors.New("no policies for mex document") + } + + return policies, nil +} + +func bindings(defs Definitions, policies map[string]endpointType) (map[string]wsEndpointData, error) { + bindings := make(map[string]wsEndpointData, len(defs.Binding)) + + for _, binding := range defs.Binding { + policyName := binding.PolicyReference.URI + transport := binding.Binding.Transport + + if transport == "http://schemas.xmlsoap.org/soap/http" { + if policy, ok := policies[policyName]; ok { + bindingName := binding.Name + specVersion := binding.Operation.Operation.SoapAction + + if specVersion == trust13Spec { + bindings[bindingName] = wsEndpointData{Trust13, policy} + } else if specVersion == trust2005Spec { + bindings[bindingName] = wsEndpointData{Trust2005, policy} + } else { + return nil, errors.New("found unknown spec version in mex document") + } + } + } + } + return bindings, nil +} + +func endpoints(defs Definitions, bindings map[string]wsEndpointData) (userPass, windows Endpoint, err error) { + for _, port := range defs.Service.Port { + bindingName := port.Binding + + index := strings.Index(bindingName, ":") + if index != -1 { + bindingName = bindingName[index+1:] + } + + if binding, ok := bindings[bindingName]; ok { + url := strings.TrimSpace(port.EndpointReference.Address.Text) + if url == "" { + return Endpoint{}, Endpoint{}, fmt.Errorf("MexDocument cannot have blank URL endpoint") + } + if binding.Version == TrustUnknown { + return Endpoint{}, Endpoint{}, fmt.Errorf("endpoint version unknown") + } + endpoint := Endpoint{Version: binding.Version, URL: url} + + switch binding.EndpointType { + case etUsernamePassword: + updateEndpoint(&userPass, endpoint) + case etWindowsTransport: + updateEndpoint(&windows, endpoint) + default: + return Endpoint{}, Endpoint{}, errors.New("found unknown port type in MEX document") + } + } + } + return userPass, windows, nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/wstrust.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/wstrust.go new file mode 100644 index 000000000..47cd4c692 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/wstrust.go @@ -0,0 +1,136 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package wstrust provides a client for communicating with a WSTrust (https://en.wikipedia.org/wiki/WS-Trust#:~:text=WS%2DTrust%20is%20a%20WS,in%20a%20secure%20message%20exchange.) +for the purposes of extracting metadata from the service. This data can be used to acquire +tokens using the accesstokens.Client.GetAccessTokenFromSamlGrant() call. +*/ +package wstrust + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs" +) + +type xmlCaller interface { + XMLCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, resp interface{}) error + SOAPCall(ctx context.Context, endpoint, action string, headers http.Header, qv url.Values, body string, resp interface{}) error +} + +type SamlTokenInfo struct { + AssertionType string // Should be either constants SAMLV1Grant or SAMLV2Grant. + Assertion string +} + +// Client represents the REST calls to get tokens from token generator backends. +type Client struct { + // Comm provides the HTTP transport client. + Comm xmlCaller +} + +// TODO(msal): This allows me to call Mex without having a real Def file on line 45. +// This would fail because policies() would not find a policy. This is easy enough to +// fix in test data, but.... Definitions is defined with built in structs. That needs +// to be pulled apart and until then I have this hack in. +var newFromDef = defs.NewFromDef + +// Mex provides metadata about a wstrust service. +func (c Client) Mex(ctx context.Context, federationMetadataURL string) (defs.MexDocument, error) { + resp := defs.Definitions{} + err := c.Comm.XMLCall( + ctx, + federationMetadataURL, + http.Header{}, + nil, + &resp, + ) + if err != nil { + return defs.MexDocument{}, err + } + + return newFromDef(resp) +} + +const ( + SoapActionDefault = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/RST/Issue" + + // Note: Commented out because this action is not supported. It was in the original code + // but only used in a switch where it errored. Since there was only one value, a default + // worked better. However, buildTokenRequestMessage() had 2005 support. I'm not actually + // sure what's going on here. It like we have half support. For now this is here just + // for documentation purposes in case we are going to add support. + // + // SoapActionWSTrust2005 = "http://schemas.xmlsoap.org/ws/2005/02/trust/RST/Issue" +) + +// SAMLTokenInfo provides SAML information that is used to generate a SAML token. +func (c Client) SAMLTokenInfo(ctx context.Context, authParameters authority.AuthParams, cloudAudienceURN string, endpoint defs.Endpoint) (SamlTokenInfo, error) { + var wsTrustRequestMessage string + var err error + + switch authParameters.AuthorizationType { + case authority.ATWindowsIntegrated: + wsTrustRequestMessage, err = endpoint.BuildTokenRequestMessageWIA(cloudAudienceURN) + if err != nil { + return SamlTokenInfo{}, err + } + case authority.ATUsernamePassword: + wsTrustRequestMessage, err = endpoint.BuildTokenRequestMessageUsernamePassword( + cloudAudienceURN, authParameters.Username, authParameters.Password) + if err != nil { + return SamlTokenInfo{}, err + } + default: + return SamlTokenInfo{}, fmt.Errorf("unknown auth type %v", authParameters.AuthorizationType) + } + + var soapAction string + switch endpoint.Version { + case defs.Trust13: + soapAction = SoapActionDefault + case defs.Trust2005: + return SamlTokenInfo{}, errors.New("WS Trust 2005 support is not implemented") + default: + return SamlTokenInfo{}, fmt.Errorf("the SOAP endpoint for a wstrust call had an invalid version: %v", endpoint.Version) + } + + resp := defs.SAMLDefinitions{} + err = c.Comm.SOAPCall(ctx, endpoint.URL, soapAction, http.Header{}, nil, wsTrustRequestMessage, &resp) + if err != nil { + return SamlTokenInfo{}, err + } + + return c.samlAssertion(resp) +} + +const ( + samlv1Assertion = "urn:oasis:names:tc:SAML:1.0:assertion" + samlv2Assertion = "urn:oasis:names:tc:SAML:2.0:assertion" +) + +func (c Client) samlAssertion(def defs.SAMLDefinitions) (SamlTokenInfo, error) { + for _, tokenResponse := range def.Body.RequestSecurityTokenResponseCollection.RequestSecurityTokenResponse { + token := tokenResponse.RequestedSecurityToken + if token.Assertion.XMLName.Local != "" { + assertion := token.AssertionRawXML + + samlVersion := token.Assertion.Saml + switch samlVersion { + case samlv1Assertion: + return SamlTokenInfo{AssertionType: grant.SAMLV1, Assertion: assertion}, nil + case samlv2Assertion: + return SamlTokenInfo{AssertionType: grant.SAMLV2, Assertion: assertion}, nil + } + return SamlTokenInfo{}, fmt.Errorf("couldn't parse SAML assertion, version unknown: %q", samlVersion) + } + } + return SamlTokenInfo{}, errors.New("unknown WS-Trust version") +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go new file mode 100644 index 000000000..0ade41179 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go @@ -0,0 +1,149 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// TODO(msal): Write some tests. The original code this came from didn't have tests and I'm too +// tired at this point to do it. It, like many other *Manager code I found was broken because +// they didn't have mutex protection. + +package oauth + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" +) + +// ADFS is an active directory federation service authority type. +const ADFS = "ADFS" + +type cacheEntry struct { + Endpoints authority.Endpoints + ValidForDomainsInList map[string]bool +} + +func createcacheEntry(endpoints authority.Endpoints) cacheEntry { + return cacheEntry{endpoints, map[string]bool{}} +} + +// AuthorityEndpoint retrieves endpoints from an authority for auth and token acquisition. +type authorityEndpoint struct { + rest *ops.REST + + mu sync.Mutex + cache map[string]cacheEntry +} + +// newAuthorityEndpoint is the constructor for AuthorityEndpoint. +func newAuthorityEndpoint(rest *ops.REST) *authorityEndpoint { + m := &authorityEndpoint{rest: rest, cache: map[string]cacheEntry{}} + return m +} + +// ResolveEndpoints gets the authorization and token endpoints and creates an AuthorityEndpoints instance +func (m *authorityEndpoint) ResolveEndpoints(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, error) { + + if endpoints, found := m.cachedEndpoints(authorityInfo, userPrincipalName); found { + return endpoints, nil + } + + endpoint, err := m.openIDConfigurationEndpoint(ctx, authorityInfo, userPrincipalName) + if err != nil { + return authority.Endpoints{}, err + } + + resp, err := m.rest.Authority().GetTenantDiscoveryResponse(ctx, endpoint) + if err != nil { + return authority.Endpoints{}, err + } + if err := resp.Validate(); err != nil { + return authority.Endpoints{}, fmt.Errorf("ResolveEndpoints(): %w", err) + } + + tenant := authorityInfo.Tenant + + endpoints := authority.NewEndpoints( + strings.Replace(resp.AuthorizationEndpoint, "{tenant}", tenant, -1), + strings.Replace(resp.TokenEndpoint, "{tenant}", tenant, -1), + strings.Replace(resp.Issuer, "{tenant}", tenant, -1), + authorityInfo.Host) + + m.addCachedEndpoints(authorityInfo, userPrincipalName, endpoints) + + return endpoints, nil +} + +// cachedEndpoints returns a the cached endpoints if they exists. If not, we return false. +func (m *authorityEndpoint) cachedEndpoints(authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, bool) { + m.mu.Lock() + defer m.mu.Unlock() + + if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok { + if authorityInfo.AuthorityType == ADFS { + domain, err := adfsDomainFromUpn(userPrincipalName) + if err == nil { + if _, ok := cacheEntry.ValidForDomainsInList[domain]; ok { + return cacheEntry.Endpoints, true + } + } + } + return cacheEntry.Endpoints, true + } + return authority.Endpoints{}, false +} + +func (m *authorityEndpoint) addCachedEndpoints(authorityInfo authority.Info, userPrincipalName string, endpoints authority.Endpoints) { + m.mu.Lock() + defer m.mu.Unlock() + + updatedCacheEntry := createcacheEntry(endpoints) + + if authorityInfo.AuthorityType == ADFS { + // Since we're here, we've made a call to the backend. We want to ensure we're caching + // the latest values from the server. + if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok { + for k := range cacheEntry.ValidForDomainsInList { + updatedCacheEntry.ValidForDomainsInList[k] = true + } + } + domain, err := adfsDomainFromUpn(userPrincipalName) + if err == nil { + updatedCacheEntry.ValidForDomainsInList[domain] = true + } + } + + m.cache[authorityInfo.CanonicalAuthorityURI] = updatedCacheEntry +} + +func (m *authorityEndpoint) openIDConfigurationEndpoint(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (string, error) { + if authorityInfo.Tenant == "adfs" { + return fmt.Sprintf("https://%s/adfs/.well-known/openid-configuration", authorityInfo.Host), nil + } else if authorityInfo.ValidateAuthority && !authority.TrustedHost(authorityInfo.Host) { + resp, err := m.rest.Authority().AADInstanceDiscovery(ctx, authorityInfo) + if err != nil { + return "", err + } + return resp.TenantDiscoveryEndpoint, nil + } else if authorityInfo.Region != "" { + resp, err := m.rest.Authority().AADInstanceDiscovery(ctx, authorityInfo) + if err != nil { + return "", err + } + return resp.TenantDiscoveryEndpoint, nil + + } + + return authorityInfo.CanonicalAuthorityURI + "v2.0/.well-known/openid-configuration", nil +} + +func adfsDomainFromUpn(userPrincipalName string) (string, error) { + parts := strings.Split(userPrincipalName, "@") + if len(parts) < 2 { + return "", errors.New("no @ present in user principal name") + } + return parts[1], nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options/options.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options/options.go new file mode 100644 index 000000000..4561d72db --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options/options.go @@ -0,0 +1,52 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package options + +import ( + "errors" + "fmt" +) + +// CallOption implements an optional argument to a method call. See +// https://blog.devgenius.io/go-call-option-that-can-be-used-with-multiple-methods-6c81734f3dbe +// for an explanation of the usage pattern. +type CallOption interface { + Do(any) error + callOption() +} + +// ApplyOptions applies all the callOptions to options. options must be a pointer to a struct and +// callOptions must be a list of objects that implement CallOption. +func ApplyOptions[O, C any](options O, callOptions []C) error { + for _, o := range callOptions { + if t, ok := any(o).(CallOption); !ok { + return fmt.Errorf("unexpected option type %T", o) + } else if err := t.Do(options); err != nil { + return err + } + } + return nil +} + +// NewCallOption returns a new CallOption whose Do() method calls function "f". +func NewCallOption(f func(any) error) CallOption { + if f == nil { + // This isn't a practical concern because only an MSAL maintainer can get + // us here, by implementing a do-nothing option. But if someone does that, + // the below ensures the method invoked with the option returns an error. + return callOption(func(any) error { + return errors.New("invalid option: missing implementation") + }) + } + return callOption(f) +} + +// callOption is an adapter for a function to a CallOption +type callOption func(any) error + +func (c callOption) Do(a any) error { + return c(a) +} + +func (callOption) callOption() {} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go new file mode 100644 index 000000000..d8ab71356 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go @@ -0,0 +1,72 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package shared + +import ( + "net/http" + "reflect" + "strings" +) + +const ( + // CacheKeySeparator is used in creating the keys of the cache. + CacheKeySeparator = "-" +) + +type Account struct { + HomeAccountID string `json:"home_account_id,omitempty"` + Environment string `json:"environment,omitempty"` + Realm string `json:"realm,omitempty"` + LocalAccountID string `json:"local_account_id,omitempty"` + AuthorityType string `json:"authority_type,omitempty"` + PreferredUsername string `json:"username,omitempty"` + GivenName string `json:"given_name,omitempty"` + FamilyName string `json:"family_name,omitempty"` + MiddleName string `json:"middle_name,omitempty"` + Name string `json:"name,omitempty"` + AlternativeID string `json:"alternative_account_id,omitempty"` + RawClientInfo string `json:"client_info,omitempty"` + UserAssertionHash string `json:"user_assertion_hash,omitempty"` + + AdditionalFields map[string]interface{} +} + +// NewAccount creates an account. +func NewAccount(homeAccountID, env, realm, localAccountID, authorityType, username string) Account { + return Account{ + HomeAccountID: homeAccountID, + Environment: env, + Realm: realm, + LocalAccountID: localAccountID, + AuthorityType: authorityType, + PreferredUsername: username, + } +} + +// Key creates the key for storing accounts in the cache. +func (acc Account) Key() string { + key := strings.Join([]string{acc.HomeAccountID, acc.Environment, acc.Realm}, CacheKeySeparator) + return strings.ToLower(key) +} + +// IsZero checks the zero value of account. +func (acc Account) IsZero() bool { + v := reflect.ValueOf(acc) + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + if !field.IsZero() { + switch field.Kind() { + case reflect.Map, reflect.Slice: + if field.Len() == 0 { + continue + } + } + return false + } + } + return true +} + +// DefaultClient is our default shared HTTP client. +var DefaultClient = &http.Client{} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go new file mode 100644 index 000000000..eb16b405c --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go @@ -0,0 +1,8 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package version keeps the version number of the client package. +package version + +// Version is the version of this client package that is communicated to the server. +const Version = "1.2.0" diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go new file mode 100644 index 000000000..392e5e43f --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go @@ -0,0 +1,756 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package public provides a client for authentication of "public" applications. A "public" +application is defined as an app that runs on client devices (android, ios, windows, linux, ...). +These devices are "untrusted" and access resources via web APIs that must authenticate. +*/ +package public + +/* +Design note: + +public.Client uses client.Base as an embedded type. client.Base statically assigns its attributes +during creation. As it doesn't have any pointers in it, anything borrowed from it, such as +Base.AuthParams is a copy that is free to be manipulated here. +*/ + +// TODO(msal): This should have example code for each method on client using Go's example doc framework. +// base usage details should be includee in the package documentation. + +import ( + "context" + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "errors" + "fmt" + "net/url" + "reflect" + "strconv" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" + "github.com/google/uuid" + "github.com/pkg/browser" +) + +// AuthResult contains the results of one token acquisition operation. +// For details see https://aka.ms/msal-net-authenticationresult +type AuthResult = base.AuthResult + +type AuthenticationScheme = authority.AuthenticationScheme + +type Account = shared.Account + +var errNoAccount = errors.New("no account was specified with public.WithSilentAccount(), or the specified account is invalid") + +// clientOptions configures the Client's behavior. +type clientOptions struct { + accessor cache.ExportReplace + authority string + capabilities []string + disableInstanceDiscovery bool + httpClient ops.HTTPClient +} + +func (p *clientOptions) validate() error { + u, err := url.Parse(p.authority) + if err != nil { + return fmt.Errorf("Authority options cannot be URL parsed: %w", err) + } + if u.Scheme != "https" { + return fmt.Errorf("Authority(%s) did not start with https://", u.String()) + } + return nil +} + +// Option is an optional argument to the New constructor. +type Option func(o *clientOptions) + +// WithAuthority allows for a custom authority to be set. This must be a valid https url. +func WithAuthority(authority string) Option { + return func(o *clientOptions) { + o.authority = authority + } +} + +// WithCache provides an accessor that will read and write authentication data to an externally managed cache. +func WithCache(accessor cache.ExportReplace) Option { + return func(o *clientOptions) { + o.accessor = accessor + } +} + +// WithClientCapabilities allows configuring one or more client capabilities such as "CP1" +func WithClientCapabilities(capabilities []string) Option { + return func(o *clientOptions) { + // there's no danger of sharing the slice's underlying memory with the application because + // this slice is simply passed to base.WithClientCapabilities, which copies its data + o.capabilities = capabilities + } +} + +// WithHTTPClient allows for a custom HTTP client to be set. +func WithHTTPClient(httpClient ops.HTTPClient) Option { + return func(o *clientOptions) { + o.httpClient = httpClient + } +} + +// WithInstanceDiscovery set to false to disable authority validation (to support private cloud scenarios) +func WithInstanceDiscovery(enabled bool) Option { + return func(o *clientOptions) { + o.disableInstanceDiscovery = !enabled + } +} + +// Client is a representation of authentication client for public applications as defined in the +// package doc. For more information, visit https://docs.microsoft.com/azure/active-directory/develop/msal-client-applications. +type Client struct { + base base.Client +} + +// New is the constructor for Client. +func New(clientID string, options ...Option) (Client, error) { + opts := clientOptions{ + authority: base.AuthorityPublicCloud, + httpClient: shared.DefaultClient, + } + + for _, o := range options { + o(&opts) + } + if err := opts.validate(); err != nil { + return Client{}, err + } + + base, err := base.New(clientID, opts.authority, oauth.New(opts.httpClient), base.WithCacheAccessor(opts.accessor), base.WithClientCapabilities(opts.capabilities), base.WithInstanceDiscovery(!opts.disableInstanceDiscovery)) + if err != nil { + return Client{}, err + } + return Client{base}, nil +} + +// authCodeURLOptions contains options for AuthCodeURL +type authCodeURLOptions struct { + claims, loginHint, tenantID, domainHint string +} + +// AuthCodeURLOption is implemented by options for AuthCodeURL +type AuthCodeURLOption interface { + authCodeURLOption() +} + +// AuthCodeURL creates a URL used to acquire an authorization code. +// +// Options: [WithClaims], [WithDomainHint], [WithLoginHint], [WithTenantID] +func (pca Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string, opts ...AuthCodeURLOption) (string, error) { + o := authCodeURLOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return "", err + } + ap, err := pca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return "", err + } + ap.Claims = o.claims + ap.LoginHint = o.loginHint + ap.DomainHint = o.domainHint + return pca.base.AuthCodeURL(ctx, clientID, redirectURI, scopes, ap) +} + +// WithClaims sets additional claims to request for the token, such as those required by conditional access policies. +// Use this option when Azure AD returned a claims challenge for a prior request. The argument must be decoded. +// This option is valid for any token acquisition method. +func WithClaims(claims string) interface { + AcquireByAuthCodeOption + AcquireByDeviceCodeOption + AcquireByUsernamePasswordOption + AcquireInteractiveOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + AcquireByDeviceCodeOption + AcquireByUsernamePasswordOption + AcquireInteractiveOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenByAuthCodeOptions: + t.claims = claims + case *acquireTokenByDeviceCodeOptions: + t.claims = claims + case *acquireTokenByUsernamePasswordOptions: + t.claims = claims + case *acquireTokenSilentOptions: + t.claims = claims + case *authCodeURLOptions: + t.claims = claims + case *interactiveAuthOptions: + t.claims = claims + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithAuthenticationScheme is an extensibility mechanism designed to be used only by Azure Arc for proof of possession access tokens. +func WithAuthenticationScheme(authnScheme AuthenticationScheme) interface { + AcquireSilentOption + AcquireInteractiveOption + AcquireByUsernamePasswordOption + options.CallOption +} { + return struct { + AcquireSilentOption + AcquireInteractiveOption + AcquireByUsernamePasswordOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenSilentOptions: + t.authnScheme = authnScheme + case *interactiveAuthOptions: + t.authnScheme = authnScheme + case *acquireTokenByUsernamePasswordOptions: + t.authnScheme = authnScheme + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithTenantID specifies a tenant for a single authentication. It may be different than the tenant set in [New] by [WithAuthority]. +// This option is valid for any token acquisition method. +func WithTenantID(tenantID string) interface { + AcquireByAuthCodeOption + AcquireByDeviceCodeOption + AcquireByUsernamePasswordOption + AcquireInteractiveOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + AcquireByDeviceCodeOption + AcquireByUsernamePasswordOption + AcquireInteractiveOption + AcquireSilentOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenByAuthCodeOptions: + t.tenantID = tenantID + case *acquireTokenByDeviceCodeOptions: + t.tenantID = tenantID + case *acquireTokenByUsernamePasswordOptions: + t.tenantID = tenantID + case *acquireTokenSilentOptions: + t.tenantID = tenantID + case *authCodeURLOptions: + t.tenantID = tenantID + case *interactiveAuthOptions: + t.tenantID = tenantID + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// acquireTokenSilentOptions are all the optional settings to an AcquireTokenSilent() call. +// These are set by using various AcquireTokenSilentOption functions. +type acquireTokenSilentOptions struct { + account Account + claims, tenantID string + authnScheme AuthenticationScheme +} + +// AcquireSilentOption is implemented by options for AcquireTokenSilent +type AcquireSilentOption interface { + acquireSilentOption() +} + +// WithSilentAccount uses the passed account during an AcquireTokenSilent() call. +func WithSilentAccount(account Account) interface { + AcquireSilentOption + options.CallOption +} { + return struct { + AcquireSilentOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenSilentOptions: + t.account = account + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// AcquireTokenSilent acquires a token from either the cache or using a refresh token. +// +// Options: [WithClaims], [WithSilentAccount], [WithTenantID] +func (pca Client) AcquireTokenSilent(ctx context.Context, scopes []string, opts ...AcquireSilentOption) (AuthResult, error) { + o := acquireTokenSilentOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + // an account is required to find user tokens in the cache + if reflect.ValueOf(o.account).IsZero() { + return AuthResult{}, errNoAccount + } + + silentParameters := base.AcquireTokenSilentParameters{ + Scopes: scopes, + Account: o.account, + Claims: o.claims, + RequestType: accesstokens.ATPublic, + IsAppCache: false, + TenantID: o.tenantID, + AuthnScheme: o.authnScheme, + } + + return pca.base.AcquireTokenSilent(ctx, silentParameters) +} + +// acquireTokenByUsernamePasswordOptions contains optional configuration for AcquireTokenByUsernamePassword +type acquireTokenByUsernamePasswordOptions struct { + claims, tenantID string + authnScheme AuthenticationScheme +} + +// AcquireByUsernamePasswordOption is implemented by options for AcquireTokenByUsernamePassword +type AcquireByUsernamePasswordOption interface { + acquireByUsernamePasswordOption() +} + +// AcquireTokenByUsernamePassword acquires a security token from the authority, via Username/Password Authentication. +// NOTE: this flow is NOT recommended. +// +// Options: [WithClaims], [WithTenantID] +func (pca Client) AcquireTokenByUsernamePassword(ctx context.Context, scopes []string, username, password string, opts ...AcquireByUsernamePasswordOption) (AuthResult, error) { + o := acquireTokenByUsernamePasswordOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + authParams, err := pca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return AuthResult{}, err + } + authParams.Scopes = scopes + authParams.AuthorizationType = authority.ATUsernamePassword + authParams.Claims = o.claims + authParams.Username = username + authParams.Password = password + if o.authnScheme != nil { + authParams.AuthnScheme = o.authnScheme + } + + token, err := pca.base.Token.UsernamePassword(ctx, authParams) + if err != nil { + return AuthResult{}, err + } + return pca.base.AuthResultFromToken(ctx, authParams, token, true) +} + +type DeviceCodeResult = accesstokens.DeviceCodeResult + +// DeviceCode provides the results of the device code flows first stage (containing the code) +// that must be entered on the second device and provides a method to retrieve the AuthenticationResult +// once that code has been entered and verified. +type DeviceCode struct { + // Result holds the information about the device code (such as the code). + Result DeviceCodeResult + + authParams authority.AuthParams + client Client + dc oauth.DeviceCode +} + +// AuthenticationResult retreives the AuthenticationResult once the user enters the code +// on the second device. Until then it blocks until the .AcquireTokenByDeviceCode() context +// is cancelled or the token expires. +func (d DeviceCode) AuthenticationResult(ctx context.Context) (AuthResult, error) { + token, err := d.dc.Token(ctx) + if err != nil { + return AuthResult{}, err + } + return d.client.base.AuthResultFromToken(ctx, d.authParams, token, true) +} + +// acquireTokenByDeviceCodeOptions contains optional configuration for AcquireTokenByDeviceCode +type acquireTokenByDeviceCodeOptions struct { + claims, tenantID string +} + +// AcquireByDeviceCodeOption is implemented by options for AcquireTokenByDeviceCode +type AcquireByDeviceCodeOption interface { + acquireByDeviceCodeOptions() +} + +// AcquireTokenByDeviceCode acquires a security token from the authority, by acquiring a device code and using that to acquire the token. +// Users need to create an AcquireTokenDeviceCodeParameters instance and pass it in. +// +// Options: [WithClaims], [WithTenantID] +func (pca Client) AcquireTokenByDeviceCode(ctx context.Context, scopes []string, opts ...AcquireByDeviceCodeOption) (DeviceCode, error) { + o := acquireTokenByDeviceCodeOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return DeviceCode{}, err + } + authParams, err := pca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return DeviceCode{}, err + } + authParams.Scopes = scopes + authParams.AuthorizationType = authority.ATDeviceCode + authParams.Claims = o.claims + + dc, err := pca.base.Token.DeviceCode(ctx, authParams) + if err != nil { + return DeviceCode{}, err + } + + return DeviceCode{Result: dc.Result, authParams: authParams, client: pca, dc: dc}, nil +} + +// acquireTokenByAuthCodeOptions contains the optional parameters used to acquire an access token using the authorization code flow. +type acquireTokenByAuthCodeOptions struct { + challenge, claims, tenantID string +} + +// AcquireByAuthCodeOption is implemented by options for AcquireTokenByAuthCode +type AcquireByAuthCodeOption interface { + acquireByAuthCodeOption() +} + +// WithChallenge allows you to provide a code for the .AcquireTokenByAuthCode() call. +func WithChallenge(challenge string) interface { + AcquireByAuthCodeOption + options.CallOption +} { + return struct { + AcquireByAuthCodeOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenByAuthCodeOptions: + t.challenge = challenge + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// AcquireTokenByAuthCode is a request to acquire a security token from the authority, using an authorization code. +// The specified redirect URI must be the same URI that was used when the authorization code was requested. +// +// Options: [WithChallenge], [WithClaims], [WithTenantID] +func (pca Client) AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, opts ...AcquireByAuthCodeOption) (AuthResult, error) { + o := acquireTokenByAuthCodeOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + + params := base.AcquireTokenAuthCodeParameters{ + Scopes: scopes, + Code: code, + Challenge: o.challenge, + Claims: o.claims, + AppType: accesstokens.ATPublic, + RedirectURI: redirectURI, + TenantID: o.tenantID, + } + + return pca.base.AcquireTokenByAuthCode(ctx, params) +} + +// Accounts gets all the accounts in the token cache. +// If there are no accounts in the cache the returned slice is empty. +func (pca Client) Accounts(ctx context.Context) ([]Account, error) { + return pca.base.AllAccounts(ctx) +} + +// RemoveAccount signs the account out and forgets account from token cache. +func (pca Client) RemoveAccount(ctx context.Context, account Account) error { + return pca.base.RemoveAccount(ctx, account) +} + +// interactiveAuthOptions contains the optional parameters used to acquire an access token for interactive auth code flow. +type interactiveAuthOptions struct { + claims, domainHint, loginHint, redirectURI, tenantID string + openURL func(url string) error + authnScheme AuthenticationScheme +} + +// AcquireInteractiveOption is implemented by options for AcquireTokenInteractive +type AcquireInteractiveOption interface { + acquireInteractiveOption() +} + +// WithLoginHint pre-populates the login prompt with a username. +func WithLoginHint(username string) interface { + AcquireInteractiveOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireInteractiveOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *authCodeURLOptions: + t.loginHint = username + case *interactiveAuthOptions: + t.loginHint = username + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithDomainHint adds the IdP domain as domain_hint query parameter in the auth url. +func WithDomainHint(domain string) interface { + AcquireInteractiveOption + AuthCodeURLOption + options.CallOption +} { + return struct { + AcquireInteractiveOption + AuthCodeURLOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *authCodeURLOptions: + t.domainHint = domain + case *interactiveAuthOptions: + t.domainHint = domain + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithRedirectURI sets a port for the local server used in interactive authentication, for +// example http://localhost:port. All URI components other than the port are ignored. +func WithRedirectURI(redirectURI string) interface { + AcquireInteractiveOption + options.CallOption +} { + return struct { + AcquireInteractiveOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *interactiveAuthOptions: + t.redirectURI = redirectURI + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// WithOpenURL allows you to provide a function to open the browser to complete the interactive login, instead of launching the system default browser. +func WithOpenURL(openURL func(url string) error) interface { + AcquireInteractiveOption + options.CallOption +} { + return struct { + AcquireInteractiveOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *interactiveAuthOptions: + t.openURL = openURL + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + +// AcquireTokenInteractive acquires a security token from the authority using the default web browser to select the account. +// https://docs.microsoft.com/en-us/azure/active-directory/develop/msal-authentication-flows#interactive-and-non-interactive-authentication +// +// Options: [WithDomainHint], [WithLoginHint], [WithOpenURL], [WithRedirectURI], [WithTenantID] +func (pca Client) AcquireTokenInteractive(ctx context.Context, scopes []string, opts ...AcquireInteractiveOption) (AuthResult, error) { + o := interactiveAuthOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + // the code verifier is a random 32-byte sequence that's been base-64 encoded without padding. + // it's used to prevent MitM attacks during auth code flow, see https://tools.ietf.org/html/rfc7636 + cv, challenge, err := codeVerifier() + if err != nil { + return AuthResult{}, err + } + var redirectURL *url.URL + if o.redirectURI != "" { + redirectURL, err = url.Parse(o.redirectURI) + if err != nil { + return AuthResult{}, err + } + } + if o.openURL == nil { + o.openURL = browser.OpenURL + } + authParams, err := pca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return AuthResult{}, err + } + authParams.Scopes = scopes + authParams.AuthorizationType = authority.ATInteractive + authParams.Claims = o.claims + authParams.CodeChallenge = challenge + authParams.CodeChallengeMethod = "S256" + authParams.LoginHint = o.loginHint + authParams.DomainHint = o.domainHint + authParams.State = uuid.New().String() + authParams.Prompt = "select_account" + if o.authnScheme != nil { + authParams.AuthnScheme = o.authnScheme + } + res, err := pca.browserLogin(ctx, redirectURL, authParams, o.openURL) + if err != nil { + return AuthResult{}, err + } + authParams.Redirecturi = res.redirectURI + + req, err := accesstokens.NewCodeChallengeRequest(authParams, accesstokens.ATPublic, nil, res.authCode, cv) + if err != nil { + return AuthResult{}, err + } + + token, err := pca.base.Token.AuthCode(ctx, req) + if err != nil { + return AuthResult{}, err + } + + return pca.base.AuthResultFromToken(ctx, authParams, token, true) +} + +type interactiveAuthResult struct { + authCode string + redirectURI string +} + +// parses the port number from the provided URL. +// returns 0 if nil or no port is specified. +func parsePort(u *url.URL) (int, error) { + if u == nil { + return 0, nil + } + p := u.Port() + if p == "" { + return 0, nil + } + return strconv.Atoi(p) +} + +// browserLogin calls openURL and waits for a user to log in +func (pca Client) browserLogin(ctx context.Context, redirectURI *url.URL, params authority.AuthParams, openURL func(string) error) (interactiveAuthResult, error) { + // start local redirect server so login can call us back + port, err := parsePort(redirectURI) + if err != nil { + return interactiveAuthResult{}, err + } + srv, err := local.New(params.State, port) + if err != nil { + return interactiveAuthResult{}, err + } + defer srv.Shutdown() + params.Scopes = accesstokens.AppendDefaultScopes(params) + authURL, err := pca.base.AuthCodeURL(ctx, params.ClientID, srv.Addr, params.Scopes, params) + if err != nil { + return interactiveAuthResult{}, err + } + // open browser window so user can select credentials + if err := openURL(authURL); err != nil { + return interactiveAuthResult{}, err + } + // now wait until the logic calls us back + res := srv.Result(ctx) + if res.Err != nil { + return interactiveAuthResult{}, res.Err + } + return interactiveAuthResult{ + authCode: res.Code, + redirectURI: srv.Addr, + }, nil +} + +// creates a code verifier string along with its SHA256 hash which +// is used as the challenge when requesting an auth code. +// used in interactive auth flow for PKCE. +func codeVerifier() (codeVerifier string, challenge string, err error) { + cvBytes := make([]byte, 32) + if _, err = rand.Read(cvBytes); err != nil { + return + } + codeVerifier = base64.RawURLEncoding.EncodeToString(cvBytes) + // for PKCE, create a hash of the code verifier + cvh := sha256.Sum256([]byte(codeVerifier)) + challenge = base64.RawURLEncoding.EncodeToString(cvh[:]) + return +} diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/github.com/ProtonMail/go-crypto/AUTHORS similarity index 74% rename from vendor/golang.org/x/oauth2/AUTHORS rename to vendor/github.com/ProtonMail/go-crypto/AUTHORS index 15167cd74..2b00ddba0 100644 --- a/vendor/golang.org/x/oauth2/AUTHORS +++ b/vendor/github.com/ProtonMail/go-crypto/AUTHORS @@ -1,3 +1,3 @@ # This source code refers to The Go Authors for copyright purposes. # The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. +# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS similarity index 70% rename from vendor/golang.org/x/oauth2/CONTRIBUTORS rename to vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS index 1c4577e96..1fbd3e976 100644 --- a/vendor/golang.org/x/oauth2/CONTRIBUTORS +++ b/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS @@ -1,3 +1,3 @@ # This source code was written by the Go contributors. # The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. +# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE b/vendor/github.com/ProtonMail/go-crypto/LICENSE similarity index 100% rename from vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE rename to vendor/github.com/ProtonMail/go-crypto/LICENSE diff --git a/vendor/github.com/ProtonMail/go-crypto/PATENTS b/vendor/github.com/ProtonMail/go-crypto/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go b/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go new file mode 100644 index 000000000..c85e6befe --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go @@ -0,0 +1,381 @@ +package bitcurves + +// Copyright 2010 The Go Authors. All rights reserved. +// Copyright 2011 ThePiachu. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bitelliptic implements several Koblitz elliptic curves over prime +// fields. + +// This package operates, internally, on Jacobian coordinates. For a given +// (x, y) position on the curve, the Jacobian coordinates are (x1, y1, z1) +// where x = x1/z1² and y = y1/z1³. The greatest speedups come when the whole +// calculation can be performed within the transform (as in ScalarMult and +// ScalarBaseMult). But even for Add and Double, it's faster to apply and +// reverse the transform than to operate in affine coordinates. + +import ( + "crypto/elliptic" + "io" + "math/big" + "sync" +) + +// A BitCurve represents a Koblitz Curve with a=0. +// See http://www.hyperelliptic.org/EFD/g1p/auto-shortw.html +type BitCurve struct { + Name string + P *big.Int // the order of the underlying field + N *big.Int // the order of the base point + B *big.Int // the constant of the BitCurve equation + Gx, Gy *big.Int // (x,y) of the base point + BitSize int // the size of the underlying field +} + +// Params returns the parameters of the given BitCurve (see BitCurve struct) +func (bitCurve *BitCurve) Params() (cp *elliptic.CurveParams) { + cp = new(elliptic.CurveParams) + cp.Name = bitCurve.Name + cp.P = bitCurve.P + cp.N = bitCurve.N + cp.Gx = bitCurve.Gx + cp.Gy = bitCurve.Gy + cp.BitSize = bitCurve.BitSize + return cp +} + +// IsOnCurve returns true if the given (x,y) lies on the BitCurve. +func (bitCurve *BitCurve) IsOnCurve(x, y *big.Int) bool { + // y² = x³ + b + y2 := new(big.Int).Mul(y, y) //y² + y2.Mod(y2, bitCurve.P) //y²%P + + x3 := new(big.Int).Mul(x, x) //x² + x3.Mul(x3, x) //x³ + + x3.Add(x3, bitCurve.B) //x³+B + x3.Mod(x3, bitCurve.P) //(x³+B)%P + + return x3.Cmp(y2) == 0 +} + +// affineFromJacobian reverses the Jacobian transform. See the comment at the +// top of the file. +func (bitCurve *BitCurve) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big.Int) { + if z.Cmp(big.NewInt(0)) == 0 { + panic("bitcurve: Can't convert to affine with Jacobian Z = 0") + } + // x = YZ^2 mod P + zinv := new(big.Int).ModInverse(z, bitCurve.P) + zinvsq := new(big.Int).Mul(zinv, zinv) + + xOut = new(big.Int).Mul(x, zinvsq) + xOut.Mod(xOut, bitCurve.P) + // y = YZ^3 mod P + zinvsq.Mul(zinvsq, zinv) + yOut = new(big.Int).Mul(y, zinvsq) + yOut.Mod(yOut, bitCurve.P) + return xOut, yOut +} + +// Add returns the sum of (x1,y1) and (x2,y2) +func (bitCurve *BitCurve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) { + z := new(big.Int).SetInt64(1) + x, y, z := bitCurve.addJacobian(x1, y1, z, x2, y2, z) + return bitCurve.affineFromJacobian(x, y, z) +} + +// addJacobian takes two points in Jacobian coordinates, (x1, y1, z1) and +// (x2, y2, z2) and returns their sum, also in Jacobian form. +func (bitCurve *BitCurve) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int, *big.Int, *big.Int) { + // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2007-bl + z1z1 := new(big.Int).Mul(z1, z1) + z1z1.Mod(z1z1, bitCurve.P) + z2z2 := new(big.Int).Mul(z2, z2) + z2z2.Mod(z2z2, bitCurve.P) + + u1 := new(big.Int).Mul(x1, z2z2) + u1.Mod(u1, bitCurve.P) + u2 := new(big.Int).Mul(x2, z1z1) + u2.Mod(u2, bitCurve.P) + h := new(big.Int).Sub(u2, u1) + if h.Sign() == -1 { + h.Add(h, bitCurve.P) + } + i := new(big.Int).Lsh(h, 1) + i.Mul(i, i) + j := new(big.Int).Mul(h, i) + + s1 := new(big.Int).Mul(y1, z2) + s1.Mul(s1, z2z2) + s1.Mod(s1, bitCurve.P) + s2 := new(big.Int).Mul(y2, z1) + s2.Mul(s2, z1z1) + s2.Mod(s2, bitCurve.P) + r := new(big.Int).Sub(s2, s1) + if r.Sign() == -1 { + r.Add(r, bitCurve.P) + } + r.Lsh(r, 1) + v := new(big.Int).Mul(u1, i) + + x3 := new(big.Int).Set(r) + x3.Mul(x3, x3) + x3.Sub(x3, j) + x3.Sub(x3, v) + x3.Sub(x3, v) + x3.Mod(x3, bitCurve.P) + + y3 := new(big.Int).Set(r) + v.Sub(v, x3) + y3.Mul(y3, v) + s1.Mul(s1, j) + s1.Lsh(s1, 1) + y3.Sub(y3, s1) + y3.Mod(y3, bitCurve.P) + + z3 := new(big.Int).Add(z1, z2) + z3.Mul(z3, z3) + z3.Sub(z3, z1z1) + if z3.Sign() == -1 { + z3.Add(z3, bitCurve.P) + } + z3.Sub(z3, z2z2) + if z3.Sign() == -1 { + z3.Add(z3, bitCurve.P) + } + z3.Mul(z3, h) + z3.Mod(z3, bitCurve.P) + + return x3, y3, z3 +} + +// Double returns 2*(x,y) +func (bitCurve *BitCurve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) { + z1 := new(big.Int).SetInt64(1) + return bitCurve.affineFromJacobian(bitCurve.doubleJacobian(x1, y1, z1)) +} + +// doubleJacobian takes a point in Jacobian coordinates, (x, y, z), and +// returns its double, also in Jacobian form. +func (bitCurve *BitCurve) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, *big.Int) { + // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l + + a := new(big.Int).Mul(x, x) //X1² + b := new(big.Int).Mul(y, y) //Y1² + c := new(big.Int).Mul(b, b) //B² + + d := new(big.Int).Add(x, b) //X1+B + d.Mul(d, d) //(X1+B)² + d.Sub(d, a) //(X1+B)²-A + d.Sub(d, c) //(X1+B)²-A-C + d.Mul(d, big.NewInt(2)) //2*((X1+B)²-A-C) + + e := new(big.Int).Mul(big.NewInt(3), a) //3*A + f := new(big.Int).Mul(e, e) //E² + + x3 := new(big.Int).Mul(big.NewInt(2), d) //2*D + x3.Sub(f, x3) //F-2*D + x3.Mod(x3, bitCurve.P) + + y3 := new(big.Int).Sub(d, x3) //D-X3 + y3.Mul(e, y3) //E*(D-X3) + y3.Sub(y3, new(big.Int).Mul(big.NewInt(8), c)) //E*(D-X3)-8*C + y3.Mod(y3, bitCurve.P) + + z3 := new(big.Int).Mul(y, z) //Y1*Z1 + z3.Mul(big.NewInt(2), z3) //3*Y1*Z1 + z3.Mod(z3, bitCurve.P) + + return x3, y3, z3 +} + +// TODO: double check if it is okay +// ScalarMult returns k*(Bx,By) where k is a number in big-endian form. +func (bitCurve *BitCurve) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) { + // We have a slight problem in that the identity of the group (the + // point at infinity) cannot be represented in (x, y) form on a finite + // machine. Thus the standard add/double algorithm has to be tweaked + // slightly: our initial state is not the identity, but x, and we + // ignore the first true bit in |k|. If we don't find any true bits in + // |k|, then we return nil, nil, because we cannot return the identity + // element. + + Bz := new(big.Int).SetInt64(1) + x := Bx + y := By + z := Bz + + seenFirstTrue := false + for _, byte := range k { + for bitNum := 0; bitNum < 8; bitNum++ { + if seenFirstTrue { + x, y, z = bitCurve.doubleJacobian(x, y, z) + } + if byte&0x80 == 0x80 { + if !seenFirstTrue { + seenFirstTrue = true + } else { + x, y, z = bitCurve.addJacobian(Bx, By, Bz, x, y, z) + } + } + byte <<= 1 + } + } + + if !seenFirstTrue { + return nil, nil + } + + return bitCurve.affineFromJacobian(x, y, z) +} + +// ScalarBaseMult returns k*G, where G is the base point of the group and k is +// an integer in big-endian form. +func (bitCurve *BitCurve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) { + return bitCurve.ScalarMult(bitCurve.Gx, bitCurve.Gy, k) +} + +var mask = []byte{0xff, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f} + +// TODO: double check if it is okay +// GenerateKey returns a public/private key pair. The private key is generated +// using the given reader, which must return random data. +func (bitCurve *BitCurve) GenerateKey(rand io.Reader) (priv []byte, x, y *big.Int, err error) { + byteLen := (bitCurve.BitSize + 7) >> 3 + priv = make([]byte, byteLen) + + for x == nil { + _, err = io.ReadFull(rand, priv) + if err != nil { + return + } + // We have to mask off any excess bits in the case that the size of the + // underlying field is not a whole number of bytes. + priv[0] &= mask[bitCurve.BitSize%8] + // This is because, in tests, rand will return all zeros and we don't + // want to get the point at infinity and loop forever. + priv[1] ^= 0x42 + x, y = bitCurve.ScalarBaseMult(priv) + } + return +} + +// Marshal converts a point into the form specified in section 4.3.6 of ANSI +// X9.62. +func (bitCurve *BitCurve) Marshal(x, y *big.Int) []byte { + byteLen := (bitCurve.BitSize + 7) >> 3 + + ret := make([]byte, 1+2*byteLen) + ret[0] = 4 // uncompressed point + + xBytes := x.Bytes() + copy(ret[1+byteLen-len(xBytes):], xBytes) + yBytes := y.Bytes() + copy(ret[1+2*byteLen-len(yBytes):], yBytes) + return ret +} + +// Unmarshal converts a point, serialised by Marshal, into an x, y pair. On +// error, x = nil. +func (bitCurve *BitCurve) Unmarshal(data []byte) (x, y *big.Int) { + byteLen := (bitCurve.BitSize + 7) >> 3 + if len(data) != 1+2*byteLen { + return + } + if data[0] != 4 { // uncompressed form + return + } + x = new(big.Int).SetBytes(data[1 : 1+byteLen]) + y = new(big.Int).SetBytes(data[1+byteLen:]) + return +} + +//curve parameters taken from: +//http://www.secg.org/collateral/sec2_final.pdf + +var initonce sync.Once +var secp160k1 *BitCurve +var secp192k1 *BitCurve +var secp224k1 *BitCurve +var secp256k1 *BitCurve + +func initAll() { + initS160() + initS192() + initS224() + initS256() +} + +func initS160() { + // See SEC 2 section 2.4.1 + secp160k1 = new(BitCurve) + secp160k1.Name = "secp160k1" + secp160k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC73", 16) + secp160k1.N, _ = new(big.Int).SetString("0100000000000000000001B8FA16DFAB9ACA16B6B3", 16) + secp160k1.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000007", 16) + secp160k1.Gx, _ = new(big.Int).SetString("3B4C382CE37AA192A4019E763036F4F5DD4D7EBB", 16) + secp160k1.Gy, _ = new(big.Int).SetString("938CF935318FDCED6BC28286531733C3F03C4FEE", 16) + secp160k1.BitSize = 160 +} + +func initS192() { + // See SEC 2 section 2.5.1 + secp192k1 = new(BitCurve) + secp192k1.Name = "secp192k1" + secp192k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFEE37", 16) + secp192k1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFE26F2FC170F69466A74DEFD8D", 16) + secp192k1.B, _ = new(big.Int).SetString("000000000000000000000000000000000000000000000003", 16) + secp192k1.Gx, _ = new(big.Int).SetString("DB4FF10EC057E9AE26B07D0280B7F4341DA5D1B1EAE06C7D", 16) + secp192k1.Gy, _ = new(big.Int).SetString("9B2F2F6D9C5628A7844163D015BE86344082AA88D95E2F9D", 16) + secp192k1.BitSize = 192 +} + +func initS224() { + // See SEC 2 section 2.6.1 + secp224k1 = new(BitCurve) + secp224k1.Name = "secp224k1" + secp224k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFE56D", 16) + secp224k1.N, _ = new(big.Int).SetString("010000000000000000000000000001DCE8D2EC6184CAF0A971769FB1F7", 16) + secp224k1.B, _ = new(big.Int).SetString("00000000000000000000000000000000000000000000000000000005", 16) + secp224k1.Gx, _ = new(big.Int).SetString("A1455B334DF099DF30FC28A169A467E9E47075A90F7E650EB6B7A45C", 16) + secp224k1.Gy, _ = new(big.Int).SetString("7E089FED7FBA344282CAFBD6F7E319F7C0B0BD59E2CA4BDB556D61A5", 16) + secp224k1.BitSize = 224 +} + +func initS256() { + // See SEC 2 section 2.7.1 + secp256k1 = new(BitCurve) + secp256k1.Name = "secp256k1" + secp256k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 16) + secp256k1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 16) + secp256k1.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000000000000000000000000000007", 16) + secp256k1.Gx, _ = new(big.Int).SetString("79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798", 16) + secp256k1.Gy, _ = new(big.Int).SetString("483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8", 16) + secp256k1.BitSize = 256 +} + +// S160 returns a BitCurve which implements secp160k1 (see SEC 2 section 2.4.1) +func S160() *BitCurve { + initonce.Do(initAll) + return secp160k1 +} + +// S192 returns a BitCurve which implements secp192k1 (see SEC 2 section 2.5.1) +func S192() *BitCurve { + initonce.Do(initAll) + return secp192k1 +} + +// S224 returns a BitCurve which implements secp224k1 (see SEC 2 section 2.6.1) +func S224() *BitCurve { + initonce.Do(initAll) + return secp224k1 +} + +// S256 returns a BitCurve which implements bitcurves (see SEC 2 section 2.7.1) +func S256() *BitCurve { + initonce.Do(initAll) + return secp256k1 +} diff --git a/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go b/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go new file mode 100644 index 000000000..cb6676de2 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go @@ -0,0 +1,134 @@ +// Package brainpool implements Brainpool elliptic curves. +// Implementation of rcurves is from github.com/ebfe/brainpool +// Note that these curves are implemented with naive, non-constant time operations +// and are likely not suitable for environments where timing attacks are a concern. +package brainpool + +import ( + "crypto/elliptic" + "math/big" + "sync" +) + +var ( + once sync.Once + p256t1, p384t1, p512t1 *elliptic.CurveParams + p256r1, p384r1, p512r1 *rcurve +) + +func initAll() { + initP256t1() + initP384t1() + initP512t1() + initP256r1() + initP384r1() + initP512r1() +} + +func initP256t1() { + p256t1 = &elliptic.CurveParams{Name: "brainpoolP256t1"} + p256t1.P, _ = new(big.Int).SetString("A9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377", 16) + p256t1.N, _ = new(big.Int).SetString("A9FB57DBA1EEA9BC3E660A909D838D718C397AA3B561A6F7901E0E82974856A7", 16) + p256t1.B, _ = new(big.Int).SetString("662C61C430D84EA4FE66A7733D0B76B7BF93EBC4AF2F49256AE58101FEE92B04", 16) + p256t1.Gx, _ = new(big.Int).SetString("A3E8EB3CC1CFE7B7732213B23A656149AFA142C47AAFBC2B79A191562E1305F4", 16) + p256t1.Gy, _ = new(big.Int).SetString("2D996C823439C56D7F7B22E14644417E69BCB6DE39D027001DABE8F35B25C9BE", 16) + p256t1.BitSize = 256 +} + +func initP256r1() { + twisted := p256t1 + params := &elliptic.CurveParams{ + Name: "brainpoolP256r1", + P: twisted.P, + N: twisted.N, + BitSize: twisted.BitSize, + } + params.Gx, _ = new(big.Int).SetString("8BD2AEB9CB7E57CB2C4B482FFC81B7AFB9DE27E1E3BD23C23A4453BD9ACE3262", 16) + params.Gy, _ = new(big.Int).SetString("547EF835C3DAC4FD97F8461A14611DC9C27745132DED8E545C1D54C72F046997", 16) + z, _ := new(big.Int).SetString("3E2D4BD9597B58639AE7AA669CAB9837CF5CF20A2C852D10F655668DFC150EF0", 16) + p256r1 = newrcurve(twisted, params, z) +} + +func initP384t1() { + p384t1 = &elliptic.CurveParams{Name: "brainpoolP384t1"} + p384t1.P, _ = new(big.Int).SetString("8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B412B1DA197FB71123ACD3A729901D1A71874700133107EC53", 16) + p384t1.N, _ = new(big.Int).SetString("8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B31F166E6CAC0425A7CF3AB6AF6B7FC3103B883202E9046565", 16) + p384t1.B, _ = new(big.Int).SetString("7F519EADA7BDA81BD826DBA647910F8C4B9346ED8CCDC64E4B1ABD11756DCE1D2074AA263B88805CED70355A33B471EE", 16) + p384t1.Gx, _ = new(big.Int).SetString("18DE98B02DB9A306F2AFCD7235F72A819B80AB12EBD653172476FECD462AABFFC4FF191B946A5F54D8D0AA2F418808CC", 16) + p384t1.Gy, _ = new(big.Int).SetString("25AB056962D30651A114AFD2755AD336747F93475B7A1FCA3B88F2B6A208CCFE469408584DC2B2912675BF5B9E582928", 16) + p384t1.BitSize = 384 +} + +func initP384r1() { + twisted := p384t1 + params := &elliptic.CurveParams{ + Name: "brainpoolP384r1", + P: twisted.P, + N: twisted.N, + BitSize: twisted.BitSize, + } + params.Gx, _ = new(big.Int).SetString("1D1C64F068CF45FFA2A63A81B7C13F6B8847A3E77EF14FE3DB7FCAFE0CBD10E8E826E03436D646AAEF87B2E247D4AF1E", 16) + params.Gy, _ = new(big.Int).SetString("8ABE1D7520F9C2A45CB1EB8E95CFD55262B70B29FEEC5864E19C054FF99129280E4646217791811142820341263C5315", 16) + z, _ := new(big.Int).SetString("41DFE8DD399331F7166A66076734A89CD0D2BCDB7D068E44E1F378F41ECBAE97D2D63DBC87BCCDDCCC5DA39E8589291C", 16) + p384r1 = newrcurve(twisted, params, z) +} + +func initP512t1() { + p512t1 = &elliptic.CurveParams{Name: "brainpoolP512t1"} + p512t1.P, _ = new(big.Int).SetString("AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA703308717D4D9B009BC66842AECDA12AE6A380E62881FF2F2D82C68528AA6056583A48F3", 16) + p512t1.N, _ = new(big.Int).SetString("AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA70330870553E5C414CA92619418661197FAC10471DB1D381085DDADDB58796829CA90069", 16) + p512t1.B, _ = new(big.Int).SetString("7CBBBCF9441CFAB76E1890E46884EAE321F70C0BCB4981527897504BEC3E36A62BCDFA2304976540F6450085F2DAE145C22553B465763689180EA2571867423E", 16) + p512t1.Gx, _ = new(big.Int).SetString("640ECE5C12788717B9C1BA06CBC2A6FEBA85842458C56DDE9DB1758D39C0313D82BA51735CDB3EA499AA77A7D6943A64F7A3F25FE26F06B51BAA2696FA9035DA", 16) + p512t1.Gy, _ = new(big.Int).SetString("5B534BD595F5AF0FA2C892376C84ACE1BB4E3019B71634C01131159CAE03CEE9D9932184BEEF216BD71DF2DADF86A627306ECFF96DBB8BACE198B61E00F8B332", 16) + p512t1.BitSize = 512 +} + +func initP512r1() { + twisted := p512t1 + params := &elliptic.CurveParams{ + Name: "brainpoolP512r1", + P: twisted.P, + N: twisted.N, + BitSize: twisted.BitSize, + } + params.Gx, _ = new(big.Int).SetString("81AEE4BDD82ED9645A21322E9C4C6A9385ED9F70B5D916C1B43B62EEF4D0098EFF3B1F78E2D0D48D50D1687B93B97D5F7C6D5047406A5E688B352209BCB9F822", 16) + params.Gy, _ = new(big.Int).SetString("7DDE385D566332ECC0EABFA9CF7822FDF209F70024A57B1AA000C55B881F8111B2DCDE494A5F485E5BCA4BD88A2763AED1CA2B2FA8F0540678CD1E0F3AD80892", 16) + z, _ := new(big.Int).SetString("12EE58E6764838B69782136F0F2D3BA06E27695716054092E60A80BEDB212B64E585D90BCE13761F85C3F1D2A64E3BE8FEA2220F01EBA5EEB0F35DBD29D922AB", 16) + p512r1 = newrcurve(twisted, params, z) +} + +// P256t1 returns a Curve which implements Brainpool P256t1 (see RFC 5639, section 3.4) +func P256t1() elliptic.Curve { + once.Do(initAll) + return p256t1 +} + +// P256r1 returns a Curve which implements Brainpool P256r1 (see RFC 5639, section 3.4) +func P256r1() elliptic.Curve { + once.Do(initAll) + return p256r1 +} + +// P384t1 returns a Curve which implements Brainpool P384t1 (see RFC 5639, section 3.6) +func P384t1() elliptic.Curve { + once.Do(initAll) + return p384t1 +} + +// P384r1 returns a Curve which implements Brainpool P384r1 (see RFC 5639, section 3.6) +func P384r1() elliptic.Curve { + once.Do(initAll) + return p384r1 +} + +// P512t1 returns a Curve which implements Brainpool P512t1 (see RFC 5639, section 3.7) +func P512t1() elliptic.Curve { + once.Do(initAll) + return p512t1 +} + +// P512r1 returns a Curve which implements Brainpool P512r1 (see RFC 5639, section 3.7) +func P512r1() elliptic.Curve { + once.Do(initAll) + return p512r1 +} diff --git a/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go b/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go new file mode 100644 index 000000000..7e291d6aa --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go @@ -0,0 +1,83 @@ +package brainpool + +import ( + "crypto/elliptic" + "math/big" +) + +var _ elliptic.Curve = (*rcurve)(nil) + +type rcurve struct { + twisted elliptic.Curve + params *elliptic.CurveParams + z *big.Int + zinv *big.Int + z2 *big.Int + z3 *big.Int + zinv2 *big.Int + zinv3 *big.Int +} + +var ( + two = big.NewInt(2) + three = big.NewInt(3) +) + +func newrcurve(twisted elliptic.Curve, params *elliptic.CurveParams, z *big.Int) *rcurve { + zinv := new(big.Int).ModInverse(z, params.P) + return &rcurve{ + twisted: twisted, + params: params, + z: z, + zinv: zinv, + z2: new(big.Int).Exp(z, two, params.P), + z3: new(big.Int).Exp(z, three, params.P), + zinv2: new(big.Int).Exp(zinv, two, params.P), + zinv3: new(big.Int).Exp(zinv, three, params.P), + } +} + +func (curve *rcurve) toTwisted(x, y *big.Int) (*big.Int, *big.Int) { + var tx, ty big.Int + tx.Mul(x, curve.z2) + tx.Mod(&tx, curve.params.P) + ty.Mul(y, curve.z3) + ty.Mod(&ty, curve.params.P) + return &tx, &ty +} + +func (curve *rcurve) fromTwisted(tx, ty *big.Int) (*big.Int, *big.Int) { + var x, y big.Int + x.Mul(tx, curve.zinv2) + x.Mod(&x, curve.params.P) + y.Mul(ty, curve.zinv3) + y.Mod(&y, curve.params.P) + return &x, &y +} + +func (curve *rcurve) Params() *elliptic.CurveParams { + return curve.params +} + +func (curve *rcurve) IsOnCurve(x, y *big.Int) bool { + return curve.twisted.IsOnCurve(curve.toTwisted(x, y)) +} + +func (curve *rcurve) Add(x1, y1, x2, y2 *big.Int) (x, y *big.Int) { + tx1, ty1 := curve.toTwisted(x1, y1) + tx2, ty2 := curve.toTwisted(x2, y2) + return curve.fromTwisted(curve.twisted.Add(tx1, ty1, tx2, ty2)) +} + +func (curve *rcurve) Double(x1, y1 *big.Int) (x, y *big.Int) { + return curve.fromTwisted(curve.twisted.Double(curve.toTwisted(x1, y1))) +} + +func (curve *rcurve) ScalarMult(x1, y1 *big.Int, scalar []byte) (x, y *big.Int) { + tx1, ty1 := curve.toTwisted(x1, y1) + return curve.fromTwisted(curve.twisted.ScalarMult(tx1, ty1, scalar)) +} + +func (curve *rcurve) ScalarBaseMult(scalar []byte) (x, y *big.Int) { + return curve.fromTwisted(curve.twisted.ScalarBaseMult(scalar)) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/eax/eax.go b/vendor/github.com/ProtonMail/go-crypto/eax/eax.go new file mode 100644 index 000000000..3ae91d594 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/eax/eax.go @@ -0,0 +1,162 @@ +// Copyright (C) 2019 ProtonTech AG + +// Package eax provides an implementation of the EAX +// (encrypt-authenticate-translate) mode of operation, as described in +// Bellare, Rogaway, and Wagner "THE EAX MODE OF OPERATION: A TWO-PASS +// AUTHENTICATED-ENCRYPTION SCHEME OPTIMIZED FOR SIMPLICITY AND EFFICIENCY." +// In FSE'04, volume 3017 of LNCS, 2004 +package eax + +import ( + "crypto/cipher" + "crypto/subtle" + "errors" + "github.com/ProtonMail/go-crypto/internal/byteutil" +) + +const ( + defaultTagSize = 16 + defaultNonceSize = 16 +) + +type eax struct { + block cipher.Block // Only AES-{128, 192, 256} supported + tagSize int // At least 12 bytes recommended + nonceSize int +} + +func (e *eax) NonceSize() int { + return e.nonceSize +} + +func (e *eax) Overhead() int { + return e.tagSize +} + +// NewEAX returns an EAX instance with AES-{KEYLENGTH} and default nonce and +// tag lengths. Supports {128, 192, 256}- bit key length. +func NewEAX(block cipher.Block) (cipher.AEAD, error) { + return NewEAXWithNonceAndTagSize(block, defaultNonceSize, defaultTagSize) +} + +// NewEAXWithNonceAndTagSize returns an EAX instance with AES-{keyLength} and +// given nonce and tag lengths in bytes. Panics on zero nonceSize and +// exceedingly long tags. +// +// It is recommended to use at least 12 bytes as tag length (see, for instance, +// NIST SP 800-38D). +// +// Only to be used for compatibility with existing cryptosystems with +// non-standard parameters. For all other cases, prefer NewEAX. +func NewEAXWithNonceAndTagSize( + block cipher.Block, nonceSize, tagSize int) (cipher.AEAD, error) { + if nonceSize < 1 { + return nil, eaxError("Cannot initialize EAX with nonceSize = 0") + } + if tagSize > block.BlockSize() { + return nil, eaxError("Custom tag length exceeds blocksize") + } + return &eax{ + block: block, + tagSize: tagSize, + nonceSize: nonceSize, + }, nil +} + +func (e *eax) Seal(dst, nonce, plaintext, adata []byte) []byte { + if len(nonce) > e.nonceSize { + panic("crypto/eax: Nonce too long for this instance") + } + ret, out := byteutil.SliceForAppend(dst, len(plaintext)+e.tagSize) + omacNonce := e.omacT(0, nonce) + omacAdata := e.omacT(1, adata) + + // Encrypt message using CTR mode and omacNonce as IV + ctr := cipher.NewCTR(e.block, omacNonce) + ciphertextData := out[:len(plaintext)] + ctr.XORKeyStream(ciphertextData, plaintext) + + omacCiphertext := e.omacT(2, ciphertextData) + + tag := out[len(plaintext):] + for i := 0; i < e.tagSize; i++ { + tag[i] = omacCiphertext[i] ^ omacNonce[i] ^ omacAdata[i] + } + return ret +} + +func (e *eax) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) { + if len(nonce) > e.nonceSize { + panic("crypto/eax: Nonce too long for this instance") + } + if len(ciphertext) < e.tagSize { + return nil, eaxError("Ciphertext shorter than tag length") + } + sep := len(ciphertext) - e.tagSize + + // Compute tag + omacNonce := e.omacT(0, nonce) + omacAdata := e.omacT(1, adata) + omacCiphertext := e.omacT(2, ciphertext[:sep]) + + tag := make([]byte, e.tagSize) + for i := 0; i < e.tagSize; i++ { + tag[i] = omacCiphertext[i] ^ omacNonce[i] ^ omacAdata[i] + } + + // Compare tags + if subtle.ConstantTimeCompare(ciphertext[sep:], tag) != 1 { + return nil, eaxError("Tag authentication failed") + } + + // Decrypt ciphertext + ret, out := byteutil.SliceForAppend(dst, len(ciphertext)) + ctr := cipher.NewCTR(e.block, omacNonce) + ctr.XORKeyStream(out, ciphertext[:sep]) + + return ret[:sep], nil +} + +// Tweakable OMAC - Calls OMAC_K([t]_n || plaintext) +func (e *eax) omacT(t byte, plaintext []byte) []byte { + blockSize := e.block.BlockSize() + byteT := make([]byte, blockSize) + byteT[blockSize-1] = t + concat := append(byteT, plaintext...) + return e.omac(concat) +} + +func (e *eax) omac(plaintext []byte) []byte { + blockSize := e.block.BlockSize() + // L ← E_K(0^n); B ← 2L; P ← 4L + L := make([]byte, blockSize) + e.block.Encrypt(L, L) + B := byteutil.GfnDouble(L) + P := byteutil.GfnDouble(B) + + // CBC with IV = 0 + cbc := cipher.NewCBCEncrypter(e.block, make([]byte, blockSize)) + padded := e.pad(plaintext, B, P) + cbcCiphertext := make([]byte, len(padded)) + cbc.CryptBlocks(cbcCiphertext, padded) + + return cbcCiphertext[len(cbcCiphertext)-blockSize:] +} + +func (e *eax) pad(plaintext, B, P []byte) []byte { + // if |M| in {n, 2n, 3n, ...} + blockSize := e.block.BlockSize() + if len(plaintext) != 0 && len(plaintext)%blockSize == 0 { + return byteutil.RightXor(plaintext, B) + } + + // else return (M || 1 || 0^(n−1−(|M| % n))) xor→ P + ending := make([]byte, blockSize-len(plaintext)%blockSize) + ending[0] = 0x80 + padded := append(plaintext, ending...) + return byteutil.RightXor(padded, P) +} + +func eaxError(err string) error { + return errors.New("crypto/eax: " + err) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go b/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go new file mode 100644 index 000000000..ddb53d079 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go @@ -0,0 +1,58 @@ +package eax + +// Test vectors from +// https://web.cs.ucdavis.edu/~rogaway/papers/eax.pdf +var testVectors = []struct { + msg, key, nonce, header, ciphertext string +}{ + {"", + "233952DEE4D5ED5F9B9C6D6FF80FF478", + "62EC67F9C3A4A407FCB2A8C49031A8B3", + "6BFB914FD07EAE6B", + "E037830E8389F27B025A2D6527E79D01"}, + {"F7FB", + "91945D3F4DCBEE0BF45EF52255F095A4", + "BECAF043B0A23D843194BA972C66DEBD", + "FA3BFD4806EB53FA", + "19DD5C4C9331049D0BDAB0277408F67967E5"}, + {"1A47CB4933", + "01F74AD64077F2E704C0F60ADA3DD523", + "70C3DB4F0D26368400A10ED05D2BFF5E", + "234A3463C1264AC6", + "D851D5BAE03A59F238A23E39199DC9266626C40F80"}, + {"481C9E39B1", + "D07CF6CBB7F313BDDE66B727AFD3C5E8", + "8408DFFF3C1A2B1292DC199E46B7D617", + "33CCE2EABFF5A79D", + "632A9D131AD4C168A4225D8E1FF755939974A7BEDE"}, + {"40D0C07DA5E4", + "35B6D0580005BBC12B0587124557D2C2", + "FDB6B06676EEDC5C61D74276E1F8E816", + "AEB96EAEBE2970E9", + "071DFE16C675CB0677E536F73AFE6A14B74EE49844DD"}, + {"4DE3B35C3FC039245BD1FB7D", + "BD8E6E11475E60B268784C38C62FEB22", + "6EAC5C93072D8E8513F750935E46DA1B", + "D4482D1CA78DCE0F", + "835BB4F15D743E350E728414ABB8644FD6CCB86947C5E10590210A4F"}, + {"8B0A79306C9CE7ED99DAE4F87F8DD61636", + "7C77D6E813BED5AC98BAA417477A2E7D", + "1A8C98DCD73D38393B2BF1569DEEFC19", + "65D2017990D62528", + "02083E3979DA014812F59F11D52630DA30137327D10649B0AA6E1C181DB617D7F2"}, + {"1BDA122BCE8A8DBAF1877D962B8592DD2D56", + "5FFF20CAFAB119CA2FC73549E20F5B0D", + "DDE59B97D722156D4D9AFF2BC7559826", + "54B9F04E6A09189A", + "2EC47B2C4954A489AFC7BA4897EDCDAE8CC33B60450599BD02C96382902AEF7F832A"}, + {"6CF36720872B8513F6EAB1A8A44438D5EF11", + "A4A4782BCFFD3EC5E7EF6D8C34A56123", + "B781FCF2F75FA5A8DE97A9CA48E522EC", + "899A175897561D7E", + "0DE18FD0FDD91E7AF19F1D8EE8733938B1E8E7F6D2231618102FDB7FE55FF1991700"}, + {"CA40D7446E545FFAED3BD12A740A659FFBBB3CEAB7", + "8395FCF1E95BEBD697BD010BC766AAC3", + "22E7ADD93CFC6393C57EC0B3C17D6B44", + "126735FCC320D25A", + "CB8920F87A6C75CFF39627B56E3ED197C552D295A7CFC46AFC253B4652B1AF3795B124AB6E"}, +} diff --git a/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go b/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go new file mode 100644 index 000000000..4eb19f28d --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go @@ -0,0 +1,131 @@ +// These vectors include key length in {128, 192, 256}, tag size 128, and +// random nonce, header, and plaintext lengths. + +// This file was automatically generated. + +package eax + +var randomVectors = []struct { + key, nonce, header, plaintext, ciphertext string +}{ + {"DFDE093F36B0356E5A81F609786982E3", + "1D8AC604419001816905BA72B14CED7E", + "152A1517A998D7A24163FCDD146DE81AC347C8B97088F502093C1ABB8F6E33D9A219C34D7603A18B1F5ABE02E56661B7D7F67E81EC08C1302EF38D80A859486D450E94A4F26AD9E68EEBBC0C857A0FC5CF9E641D63D565A7E361BC8908F5A8DC8FD6", + "1C8EAAB71077FE18B39730A3156ADE29C5EE824C7EE86ED2A253B775603FB237116E654F6FEC588DD27F523A0E01246FE73FE348491F2A8E9ABC6CA58D663F71CDBCF4AD798BE46C42AE6EE8B599DB44A1A48D7BBBBA0F7D2750181E1C5E66967F7D57CBD30AFBDA5727", + "79E7E150934BBEBF7013F61C60462A14D8B15AF7A248AFB8A344EF021C1500E16666891D6E973D8BB56B71A371F12CA34660C4410C016982B20F547E3762A58B7BF4F20236CADCF559E2BE7D783B13723B2741FC7CDC8997D839E39A3DDD2BADB96743DD7049F1BDB0516A262869915B3F70498AFB7B191BF960"}, + {"F10619EF02E5D94D7550EB84ED364A21", + "8DC0D4F2F745BBAE835CC5574B942D20", + "FE561358F2E8DF7E1024FF1AE9A8D36EBD01352214505CB99D644777A8A1F6027FA2BDBFC529A9B91136D5F2416CFC5F0F4EC3A1AFD32BDDA23CA504C5A5CB451785FABF4DFE4CD50D817491991A60615B30286361C100A95D1712F2A45F8E374461F4CA2B", + "D7B5A971FC219631D30EFC3664AE3127D9CF3097DAD9C24AC7905D15E8D9B25B026B31D68CAE00975CDB81EB1FD96FD5E1A12E2BB83FA25F1B1D91363457657FC03875C27F2946C5", + "2F336ED42D3CC38FC61660C4CD60BA4BD438B05F5965D8B7B399D2E7167F5D34F792D318F94DB15D67463AC449E13D568CC09BFCE32A35EE3EE96A041927680AE329811811E27F2D1E8E657707AF99BA96D13A478D695D59"}, + {"429F514EFC64D98A698A9247274CFF45", + "976AA5EB072F912D126ACEBC954FEC38", + "A71D89DC5B6CEDBB7451A27C3C2CAE09126DB4C421", + "5632FE62AB1DC549D54D3BC3FC868ACCEDEFD9ECF5E9F8", + "848AE4306CA8C7F416F8707625B7F55881C0AB430353A5C967CDA2DA787F581A70E34DBEBB2385"}, + {"398138F309085F47F8457CDF53895A63", + "F8A8A7F2D28E5FFF7BBC2F24353F7A36", + "5D633C21BA7764B8855CAB586F3746E236AD486039C83C6B56EFA9C651D38A41D6B20DAEE3418BFEA44B8BD6", + "A3BBAA91920AF5E10659818B1B3B300AC79BFC129C8329E75251F73A66D3AE0128EB91D5031E0A65C329DB7D1E9C0493E268", + "D078097267606E5FB07CFB7E2B4B718172A82C6A4CEE65D549A4DFB9838003BD2FBF64A7A66988AC1A632FD88F9E9FBB57C5A78AD2E086EACBA3DB68511D81C2970A"}, + {"7A4151EBD3901B42CBA45DAFB2E931BA", + "0FC88ACEE74DD538040321C330974EB8", + "250464FB04733BAB934C59E6AD2D6AE8D662CBCFEFBE61E5A308D4211E58C4C25935B72C69107722E946BFCBF416796600542D76AEB73F2B25BF53BAF97BDEB36ED3A7A51C31E7F170EB897457E7C17571D1BA0A908954E9", + "88C41F3EBEC23FAB8A362D969CAC810FAD4F7CA6A7F7D0D44F060F92E37E1183768DD4A8C733F71C96058D362A39876D183B86C103DE", + "74A25B2182C51096D48A870D80F18E1CE15867778E34FCBA6BD7BFB3739FDCD42AD0F2D9F4EBA29085285C6048C15BCE5E5166F1F962D3337AA88E6062F05523029D0A7F0BF9"}, + {"BFB147E1CD5459424F8C0271FC0E0DC5", + "EABCC126442BF373969EA3015988CC45", + "4C0880E1D71AA2C7", + "BE1B5EC78FBF73E7A6682B21BA7E0E5D2D1C7ABE", + "5660D7C1380E2F306895B1402CB2D6C37876504276B414D120F4CF92FDDDBB293A238EA0"}, + {"595DD6F52D18BC2CA8EB4EDAA18D9FA3", + "0F84B5D36CF4BC3B863313AF3B4D2E97", + "30AE6CC5F99580F12A779D98BD379A60948020C0B6FBD5746B30BA3A15C6CD33DAF376C70A9F15B6C0EB410A93161F7958AE23", + "8EF3687A1642B070970B0B91462229D1D76ABC154D18211F7152AA9FF368", + "317C1DDB11417E5A9CC4DDE7FDFF6659A5AC4B31DE025212580A05CDAC6024D3E4AE7C2966E52B9129E9ECDBED86"}, + {"44E6F2DC8FDC778AD007137D11410F50", + "270A237AD977F7187AA6C158A0BAB24F", + "509B0F0EB12E2AA5C5BA2DE553C07FAF4CE0C9E926531AA709A3D6224FCB783ACCF1559E10B1123EBB7D52E8AB54E6B5352A9ED0D04124BF0E9D9BACFD7E32B817B2E625F5EE94A64EDE9E470DE7FE6886C19B294F9F828209FE257A78", + "8B3D7815DF25618A5D0C55A601711881483878F113A12EC36CF64900549A3199555528559DC118F789788A55FAFD944E6E99A9CA3F72F238CD3F4D88223F7A745992B3FAED1848", + "1CC00D79F7AD82FDA71B58D286E5F34D0CC4CEF30704E771CC1E50746BDF83E182B078DB27149A42BAE619DF0F85B0B1090AD55D3B4471B0D6F6ECCD09C8F876B30081F0E7537A9624F8AAF29DA85E324122EFB4D68A56"}, + {"BB7BC352A03044B4428D8DBB4B0701FDEC4649FD17B81452", + "8B4BBE26CCD9859DCD84884159D6B0A4", + "2212BEB0E78E0F044A86944CF33C8D5C80D9DBE1034BF3BCF73611835C7D3A52F5BD2D81B68FD681B68540A496EE5DA16FD8AC8824E60E1EC2042BE28FB0BFAD4E4B03596446BDD8C37D936D9B3D5295BE19F19CF5ACE1D33A46C952CE4DE5C12F92C1DD051E04AEED", + "9037234CC44FFF828FABED3A7084AF40FA7ABFF8E0C0EFB57A1CC361E18FC4FAC1AB54F3ABFE9FF77263ACE16C3A", + "A9391B805CCD956081E0B63D282BEA46E7025126F1C1631239C33E92AA6F92CD56E5A4C56F00FF9658E93D48AF4EF0EF81628E34AD4DB0CDAEDCD2A17EE7"}, + {"99C0AD703196D2F60A74E6B378B838B31F82EA861F06FC4E", + "92745C018AA708ECFEB1667E9F3F1B01", + "828C69F376C0C0EC651C67749C69577D589EE39E51404D80EBF70C8660A8F5FD375473F4A7C611D59CB546A605D67446CE2AA844135FCD78BB5FBC90222A00D42920BB1D7EEDFB0C4672554F583EF23184F89063CDECBE482367B5F9AF3ACBC3AF61392BD94CBCD9B64677", + "A879214658FD0A5B0E09836639BF82E05EC7A5EF71D4701934BDA228435C68AC3D5CEB54997878B06A655EEACEFB1345C15867E7FE6C6423660C8B88DF128EBD6BCD85118DBAE16E9252FFB204324E5C8F38CA97759BDBF3CB0083", + "51FE87996F194A2585E438B023B345439EA60D1AEBED4650CDAF48A4D4EEC4FC77DC71CC4B09D3BEEF8B7B7AF716CE2B4EFFB3AC9E6323C18AC35E0AA6E2BBBC8889490EB6226C896B0D105EAB42BFE7053CCF00ED66BA94C1BA09A792AA873F0C3B26C5C5F9A936E57B25"}, + {"7086816D00D648FB8304AA8C9E552E1B69A9955FB59B25D1", + "0F45CF7F0BF31CCEB85D9DA10F4D749F", + "93F27C60A417D9F0669E86ACC784FC8917B502DAF30A6338F11B30B94D74FEFE2F8BE1BBE2EAD10FAB7EED3C6F72B7C3ECEE1937C32ED4970A6404E139209C05", + "877F046601F3CBE4FB1491943FA29487E738F94B99AF206262A1D6FF856C9AA0B8D4D08A54370C98F8E88FA3DCC2B14C1F76D71B2A4C7963AEE8AF960464C5BEC8357AD00DC8", + "FE96906B895CE6A8E72BC72344E2C8BB3C63113D70EAFA26C299BAFE77A8A6568172EB447FB3E86648A0AF3512DEB1AAC0819F3EC553903BF28A9FB0F43411237A774BF9EE03E445D280FBB9CD12B9BAAB6EF5E52691"}, + {"062F65A896D5BF1401BADFF70E91B458E1F9BD4888CB2E4D", + "5B11EA1D6008EBB41CF892FCA5B943D1", + "BAF4FF5C8242", + "A8870E091238355984EB2F7D61A865B9170F440BFF999A5993DD41A10F4440D21FF948DDA2BF663B2E03AC3324492DC5E40262ECC6A65C07672353BE23E7FB3A9D79FF6AA38D97960905A38DECC312CB6A59E5467ECF06C311CD43ADC0B543EDF34FE8BE611F176460D5627CA51F8F8D9FED71F55C", + "B10E127A632172CF8AA7539B140D2C9C2590E6F28C3CB892FC498FCE56A34F732FBFF32E79C7B9747D9094E8635A0C084D6F0247F9768FB5FF83493799A9BEC6C39572120C40E9292C8C947AE8573462A9108C36D9D7112E6995AE5867E6C8BB387D1C5D4BEF524F391B9FD9F0A3B4BFA079E915BCD920185CFD38D114C558928BD7D47877"}, + {"38A8E45D6D705A11AF58AED5A1344896998EACF359F2E26A", + "FD82B5B31804FF47D44199B533D0CF84", + "DE454D4E62FE879F2050EE3E25853623D3E9AC52EEC1A1779A48CFAF5ECA0BFDE44749391866D1", + "B804", + "164BB965C05EBE0931A1A63293EDF9C38C27"}, + {"34C33C97C6D7A0850DA94D78A58DC61EC717CD7574833068", + "343BE00DA9483F05C14F2E9EB8EA6AE8", + "78312A43EFDE3CAE34A65796FF059A3FE15304EEA5CF1D9306949FE5BF3349D4977D4EBE76C040FE894C5949E4E4D6681153DA87FB9AC5062063CA2EA183566343362370944CE0362D25FC195E124FD60E8682E665D13F2229DDA3E4B2CB1DCA", + "CC11BB284B1153578E4A5ED9D937B869DAF00F5B1960C23455CA9CC43F486A3BE0B66254F1041F04FDF459C8640465B6E1D2CF899A381451E8E7FCB50CF87823BE77E24B132BBEEDC72E53369B275E1D8F49ECE59F4F215230AC4FE133FC80E4F634EE80BA4682B62C86", + "E7F703DC31A95E3A4919FF957836CB76C063D81702AEA4703E1C2BF30831E58C4609D626EC6810E12EAA5B930F049FF9EFC22C3E3F1EBD4A1FB285CB02A1AC5AD46B425199FC0A85670A5C4E3DAA9636C8F64C199F42F18AAC8EA7457FD377F322DD7752D7D01B946C8F0A97E6113F0D50106F319AFD291AAACE"}, + {"C6ECF7F053573E403E61B83052A343D93CBCC179D1E835BE", + "E280E13D7367042E3AA09A80111B6184", + "21486C9D7A9647", + "5F2639AFA6F17931853791CD8C92382BBB677FD72D0AB1A080D0E49BFAA21810E963E4FACD422E92F65CBFAD5884A60CD94740DF31AF02F95AA57DA0C4401B0ED906", + "5C51DB20755302070C45F52E50128A67C8B2E4ED0EACB7E29998CCE2E8C289DD5655913EC1A51CC3AABE5CDC2402B2BE7D6D4BF6945F266FBD70BA9F37109067157AE7530678B45F64475D4EBFCB5FFF46A5"}, + {"5EC6CF7401BC57B18EF154E8C38ACCA8959E57D2F3975FF5", + "656B41CB3F9CF8C08BAD7EBFC80BD225", + "6B817C2906E2AF425861A7EF59BA5801F143EE2A139EE72697CDE168B4", + "2C0E1DDC9B1E5389BA63845B18B1F8A1DB062037151BCC56EF7C21C0BB4DAE366636BBA975685D7CC5A94AFBE89C769016388C56FB7B57CE750A12B718A8BDCF70E80E8659A8330EFC8F86640F21735E8C80E23FE43ABF23507CE3F964AE4EC99D", + "ED780CF911E6D1AA8C979B889B0B9DC1ABE261832980BDBFB576901D9EF5AB8048998E31A15BE54B3E5845A4D136AD24D0BDA1C3006168DF2F8AC06729CB0818867398150020131D8F04EDF1923758C9EABB5F735DE5EA1758D4BC0ACFCA98AFD202E9839B8720253693B874C65586C6F0"}, + {"C92F678EB2208662F5BCF3403EC05F5961E957908A3E79421E1D25FC19054153", + "DA0F3A40983D92F2D4C01FED33C7A192", + "2B6E9D26DB406A0FAB47608657AA10EFC2B4AA5F459B29FF85AC9A40BFFE7AEB04F77E9A11FAAA116D7F6D4DA417671A9AB02C588E0EF59CB1BFB4B1CC931B63A3B3A159FCEC97A04D1E6F0C7E6A9CEF6B0ABB04758A69F1FE754DF4C2610E8C46B6CF413BDB31351D55BEDCB7B4A13A1C98E10984475E0F2F957853", + "F37326A80E08", + "83519E53E321D334F7C10B568183775C0E9AAE55F806"}, + {"6847E0491BE57E72995D186D50094B0B3593957A5146798FCE68B287B2FB37B5", + "3EE1182AEBB19A02B128F28E1D5F7F99", + "D9F35ABB16D776CE", + "DB7566ED8EA95BDF837F23DB277BAFBC5E70D1105ADFD0D9EF15475051B1EF94709C67DCA9F8D5", + "2CDCED0C9EBD6E2A508822A685F7DCD1CDD99E7A5FCA786C234E7F7F1D27EC49751AD5DCFA30C5EDA87C43CAE3B919B6BBCFE34C8EDA59"}, + {"82B019673642C08388D3E42075A4D5D587558C229E4AB8F660E37650C4C41A0A", + "336F5D681E0410FAE7B607246092C6DC", + "D430CBD8FE435B64214E9E9CDC5DE99D31CFCFB8C10AA0587A49DF276611", + "998404153AD77003E1737EDE93ED79859EE6DCCA93CB40C4363AA817ABF2DBBD46E42A14A7183B6CC01E12A577888141363D0AE011EB6E8D28C0B235", + "9BEF69EEB60BD3D6065707B7557F25292A8872857CFBD24F2F3C088E4450995333088DA50FD9121221C504DF1D0CD5EFE6A12666C5D5BB12282CF4C19906E9CFAB97E9BDF7F49DC17CFC384B"}, + {"747B2E269B1859F0622C15C8BAD6A725028B1F94B8DB7326948D1E6ED663A8BC", + "AB91F7245DDCE3F1C747872D47BE0A8A", + "3B03F786EF1DDD76E1D42646DA4CD2A5165DC5383CE86D1A0B5F13F910DC278A4E451EE0192CBA178E13B3BA27FDC7840DF73D2E104B", + "6B803F4701114F3E5FE21718845F8416F70F626303F545BE197189E0A2BA396F37CE06D389EB2658BC7D56D67868708F6D0D32", + "1570DDB0BCE75AA25D1957A287A2C36B1A5F2270186DA81BA6112B7F43B0F3D1D0ED072591DCF1F1C99BBB25621FC39B896FF9BD9413A2845363A9DCD310C32CF98E57"}, + {"02E59853FB29AEDA0FE1C5F19180AD99A12FF2F144670BB2B8BADF09AD812E0A", + "C691294EF67CD04D1B9242AF83DD1421", + "879334DAE3", + "1E17F46A98FEF5CBB40759D95354", + "FED8C3FF27DDF6313AED444A2985B36CBA268AAD6AAC563C0BA28F6DB5DB"}, + {"F6C1FB9B4188F2288FF03BD716023198C3582CF2A037FC2F29760916C2B7FCDB", + "4228DA0678CA3534588859E77DFF014C", + "D8153CAF35539A61DD8D05B3C9B44F01E564FB9348BCD09A1C23B84195171308861058F0A3CD2A55B912A3AAEE06FF4D356C77275828F2157C2FC7C115DA39E443210CCC56BEDB0CC99BBFB227ABD5CC454F4E7F547C7378A659EEB6A7E809101A84F866503CB18D4484E1FA09B3EC7FC75EB2E35270800AA7", + "23B660A779AD285704B12EC1C580387A47BEC7B00D452C6570", + "5AA642BBABA8E49849002A2FAF31DB8FC7773EFDD656E469CEC19B3206D4174C9A263D0A05484261F6"}, + {"8FF6086F1FADB9A3FBE245EAC52640C43B39D43F89526BB5A6EBA47710931446", + "943188480C99437495958B0AE4831AA9", + "AD5CD0BDA426F6EBA23C8EB23DC73FF9FEC173355EDBD6C9344C4C4383F211888F7CE6B29899A6801DF6B38651A7C77150941A", + "80CD5EA8D7F81DDF5070B934937912E8F541A5301877528EB41AB60C020968D459960ED8FB73083329841A", + "ABAE8EB7F36FCA2362551E72DAC890BA1BB6794797E0FC3B67426EC9372726ED4725D379EA0AC9147E48DCD0005C502863C2C5358A38817C8264B5"}, + {"A083B54E6B1FE01B65D42FCD248F97BB477A41462BBFE6FD591006C022C8FD84", + "B0490F5BD68A52459556B3749ACDF40E", + "8892E047DA5CFBBDF7F3CFCBD1BD21C6D4C80774B1826999234394BD3E513CC7C222BB40E1E3140A152F19B3802F0D036C24A590512AD0E8", + "D7B15752789DC94ED0F36778A5C7BBB207BEC32BAC66E702B39966F06E381E090C6757653C3D26A81EC6AD6C364D66867A334C91BB0B8A8A4B6EACDF0783D09010AEBA2DD2062308FE99CC1F", + "C071280A732ADC93DF272BF1E613B2BB7D46FC6665EF2DC1671F3E211D6BDE1D6ADDD28DF3AA2E47053FC8BB8AE9271EC8BC8B2CFFA320D225B451685B6D23ACEFDD241FE284F8ADC8DB07F456985B14330BBB66E0FB212213E05B3E"}, +} diff --git a/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go b/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go new file mode 100644 index 000000000..affb74a76 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go @@ -0,0 +1,90 @@ +// Copyright (C) 2019 ProtonTech AG +// This file contains necessary tools for the aex and ocb packages. +// +// These functions SHOULD NOT be used elsewhere, since they are optimized for +// specific input nature in the EAX and OCB modes of operation. + +package byteutil + +// GfnDouble computes 2 * input in the field of 2^n elements. +// The irreducible polynomial in the finite field for n=128 is +// x^128 + x^7 + x^2 + x + 1 (equals 0x87) +// Constant-time execution in order to avoid side-channel attacks +func GfnDouble(input []byte) []byte { + if len(input) != 16 { + panic("Doubling in GFn only implemented for n = 128") + } + // If the first bit is zero, return 2L = L << 1 + // Else return (L << 1) xor 0^120 10000111 + shifted := ShiftBytesLeft(input) + shifted[15] ^= ((input[0] >> 7) * 0x87) + return shifted +} + +// ShiftBytesLeft outputs the byte array corresponding to x << 1 in binary. +func ShiftBytesLeft(x []byte) []byte { + l := len(x) + dst := make([]byte, l) + for i := 0; i < l-1; i++ { + dst[i] = (x[i] << 1) | (x[i+1] >> 7) + } + dst[l-1] = x[l-1] << 1 + return dst +} + +// ShiftNBytesLeft puts in dst the byte array corresponding to x << n in binary. +func ShiftNBytesLeft(dst, x []byte, n int) { + // Erase first n / 8 bytes + copy(dst, x[n/8:]) + + // Shift the remaining n % 8 bits + bits := uint(n % 8) + l := len(dst) + for i := 0; i < l-1; i++ { + dst[i] = (dst[i] << bits) | (dst[i+1] >> uint(8-bits)) + } + dst[l-1] = dst[l-1] << bits + + // Append trailing zeroes + dst = append(dst, make([]byte, n/8)...) +} + +// XorBytesMut assumes equal input length, replaces X with X XOR Y +func XorBytesMut(X, Y []byte) { + for i := 0; i < len(X); i++ { + X[i] ^= Y[i] + } +} + +// XorBytes assumes equal input length, puts X XOR Y into Z +func XorBytes(Z, X, Y []byte) { + for i := 0; i < len(X); i++ { + Z[i] = X[i] ^ Y[i] + } +} + +// RightXor XORs smaller input (assumed Y) at the right of the larger input (assumed X) +func RightXor(X, Y []byte) []byte { + offset := len(X) - len(Y) + xored := make([]byte, len(X)) + copy(xored, X) + for i := 0; i < len(Y); i++ { + xored[offset+i] ^= Y[i] + } + return xored +} + +// SliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func SliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go b/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go new file mode 100644 index 000000000..5022285b4 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go @@ -0,0 +1,318 @@ +// Copyright (C) 2019 ProtonTech AG + +// Package ocb provides an implementation of the OCB (offset codebook) mode of +// operation, as described in RFC-7253 of the IRTF and in Rogaway, Bellare, +// Black and Krovetz - OCB: A BLOCK-CIPHER MODE OF OPERATION FOR EFFICIENT +// AUTHENTICATED ENCRYPTION (2003). +// Security considerations (from RFC-7253): A private key MUST NOT be used to +// encrypt more than 2^48 blocks. Tag length should be at least 12 bytes (a +// brute-force forging adversary succeeds after 2^{tag length} attempts). A +// single key SHOULD NOT be used to decrypt ciphertext with different tag +// lengths. Nonces need not be secret, but MUST NOT be reused. +// This package only supports underlying block ciphers with 128-bit blocks, +// such as AES-{128, 192, 256}, but may be extended to other sizes. +package ocb + +import ( + "bytes" + "crypto/cipher" + "crypto/subtle" + "errors" + "math/bits" + + "github.com/ProtonMail/go-crypto/internal/byteutil" +) + +type ocb struct { + block cipher.Block + tagSize int + nonceSize int + mask mask + // Optimized en/decrypt: For each nonce N used to en/decrypt, the 'Ktop' + // internal variable can be reused for en/decrypting with nonces sharing + // all but the last 6 bits with N. The prefix of the first nonce used to + // compute the new Ktop, and the Ktop value itself, are stored in + // reusableKtop. If using incremental nonces, this saves one block cipher + // call every 63 out of 64 OCB encryptions, and stores one nonce and one + // output of the block cipher in memory only. + reusableKtop reusableKtop +} + +type mask struct { + // L_*, L_$, (L_i)_{i ∈ N} + lAst []byte + lDol []byte + L [][]byte +} + +type reusableKtop struct { + noncePrefix []byte + Ktop []byte +} + +const ( + defaultTagSize = 16 + defaultNonceSize = 15 +) + +const ( + enc = iota + dec +) + +func (o *ocb) NonceSize() int { + return o.nonceSize +} + +func (o *ocb) Overhead() int { + return o.tagSize +} + +// NewOCB returns an OCB instance with the given block cipher and default +// tag and nonce sizes. +func NewOCB(block cipher.Block) (cipher.AEAD, error) { + return NewOCBWithNonceAndTagSize(block, defaultNonceSize, defaultTagSize) +} + +// NewOCBWithNonceAndTagSize returns an OCB instance with the given block +// cipher, nonce length, and tag length. Panics on zero nonceSize and +// exceedingly long tag size. +// +// It is recommended to use at least 12 bytes as tag length. +func NewOCBWithNonceAndTagSize( + block cipher.Block, nonceSize, tagSize int) (cipher.AEAD, error) { + if block.BlockSize() != 16 { + return nil, ocbError("Block cipher must have 128-bit blocks") + } + if nonceSize < 1 { + return nil, ocbError("Incorrect nonce length") + } + if nonceSize >= block.BlockSize() { + return nil, ocbError("Nonce length exceeds blocksize - 1") + } + if tagSize > block.BlockSize() { + return nil, ocbError("Custom tag length exceeds blocksize") + } + return &ocb{ + block: block, + tagSize: tagSize, + nonceSize: nonceSize, + mask: initializeMaskTable(block), + reusableKtop: reusableKtop{ + noncePrefix: nil, + Ktop: nil, + }, + }, nil +} + +func (o *ocb) Seal(dst, nonce, plaintext, adata []byte) []byte { + if len(nonce) > o.nonceSize { + panic("crypto/ocb: Incorrect nonce length given to OCB") + } + ret, out := byteutil.SliceForAppend(dst, len(plaintext)+o.tagSize) + o.crypt(enc, out, nonce, adata, plaintext) + return ret +} + +func (o *ocb) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) { + if len(nonce) > o.nonceSize { + panic("Nonce too long for this instance") + } + if len(ciphertext) < o.tagSize { + return nil, ocbError("Ciphertext shorter than tag length") + } + sep := len(ciphertext) - o.tagSize + ret, out := byteutil.SliceForAppend(dst, len(ciphertext)) + ciphertextData := ciphertext[:sep] + tag := ciphertext[sep:] + o.crypt(dec, out, nonce, adata, ciphertextData) + if subtle.ConstantTimeCompare(ret[sep:], tag) == 1 { + ret = ret[:sep] + return ret, nil + } + for i := range out { + out[i] = 0 + } + return nil, ocbError("Tag authentication failed") +} + +// On instruction enc (resp. dec), crypt is the encrypt (resp. decrypt) +// function. It returns the resulting plain/ciphertext with the tag appended. +func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte { + // + // Consider X as a sequence of 128-bit blocks + // + // Note: For encryption (resp. decryption), X is the plaintext (resp., the + // ciphertext without the tag). + blockSize := o.block.BlockSize() + + // + // Nonce-dependent and per-encryption variables + // + // Zero out the last 6 bits of the nonce into truncatedNonce to see if Ktop + // is already computed. + truncatedNonce := make([]byte, len(nonce)) + copy(truncatedNonce, nonce) + truncatedNonce[len(truncatedNonce)-1] &= 192 + var Ktop []byte + if bytes.Equal(truncatedNonce, o.reusableKtop.noncePrefix) { + Ktop = o.reusableKtop.Ktop + } else { + // Nonce = num2str(TAGLEN mod 128, 7) || zeros(120 - bitlen(N)) || 1 || N + paddedNonce := append(make([]byte, blockSize-1-len(nonce)), 1) + paddedNonce = append(paddedNonce, truncatedNonce...) + paddedNonce[0] |= byte(((8 * o.tagSize) % (8 * blockSize)) << 1) + // Last 6 bits of paddedNonce are already zero. Encrypt into Ktop + paddedNonce[blockSize-1] &= 192 + Ktop = paddedNonce + o.block.Encrypt(Ktop, Ktop) + o.reusableKtop.noncePrefix = truncatedNonce + o.reusableKtop.Ktop = Ktop + } + + // Stretch = Ktop || ((lower half of Ktop) XOR (lower half of Ktop << 8)) + xorHalves := make([]byte, blockSize/2) + byteutil.XorBytes(xorHalves, Ktop[:blockSize/2], Ktop[1:1+blockSize/2]) + stretch := append(Ktop, xorHalves...) + bottom := int(nonce[len(nonce)-1] & 63) + offset := make([]byte, len(stretch)) + byteutil.ShiftNBytesLeft(offset, stretch, bottom) + offset = offset[:blockSize] + + // + // Process any whole blocks + // + // Note: For encryption Y is ciphertext || tag, for decryption Y is + // plaintext || tag. + checksum := make([]byte, blockSize) + m := len(X) / blockSize + for i := 0; i < m; i++ { + index := bits.TrailingZeros(uint(i + 1)) + if len(o.mask.L)-1 < index { + o.mask.extendTable(index) + } + byteutil.XorBytesMut(offset, o.mask.L[bits.TrailingZeros(uint(i+1))]) + blockX := X[i*blockSize : (i+1)*blockSize] + blockY := Y[i*blockSize : (i+1)*blockSize] + byteutil.XorBytes(blockY, blockX, offset) + switch instruction { + case enc: + o.block.Encrypt(blockY, blockY) + byteutil.XorBytesMut(blockY, offset) + byteutil.XorBytesMut(checksum, blockX) + case dec: + o.block.Decrypt(blockY, blockY) + byteutil.XorBytesMut(blockY, offset) + byteutil.XorBytesMut(checksum, blockY) + } + } + // + // Process any final partial block and compute raw tag + // + tag := make([]byte, blockSize) + if len(X)%blockSize != 0 { + byteutil.XorBytesMut(offset, o.mask.lAst) + pad := make([]byte, blockSize) + o.block.Encrypt(pad, offset) + chunkX := X[blockSize*m:] + chunkY := Y[blockSize*m : len(X)] + byteutil.XorBytes(chunkY, chunkX, pad[:len(chunkX)]) + // P_* || bit(1) || zeroes(127) - len(P_*) + switch instruction { + case enc: + paddedY := append(chunkX, byte(128)) + paddedY = append(paddedY, make([]byte, blockSize-len(chunkX)-1)...) + byteutil.XorBytesMut(checksum, paddedY) + case dec: + paddedX := append(chunkY, byte(128)) + paddedX = append(paddedX, make([]byte, blockSize-len(chunkY)-1)...) + byteutil.XorBytesMut(checksum, paddedX) + } + byteutil.XorBytes(tag, checksum, offset) + byteutil.XorBytesMut(tag, o.mask.lDol) + o.block.Encrypt(tag, tag) + byteutil.XorBytesMut(tag, o.hash(adata)) + copy(Y[blockSize*m+len(chunkY):], tag[:o.tagSize]) + } else { + byteutil.XorBytes(tag, checksum, offset) + byteutil.XorBytesMut(tag, o.mask.lDol) + o.block.Encrypt(tag, tag) + byteutil.XorBytesMut(tag, o.hash(adata)) + copy(Y[blockSize*m:], tag[:o.tagSize]) + } + return Y +} + +// This hash function is used to compute the tag. Per design, on empty input it +// returns a slice of zeros, of the same length as the underlying block cipher +// block size. +func (o *ocb) hash(adata []byte) []byte { + // + // Consider A as a sequence of 128-bit blocks + // + A := make([]byte, len(adata)) + copy(A, adata) + blockSize := o.block.BlockSize() + + // + // Process any whole blocks + // + sum := make([]byte, blockSize) + offset := make([]byte, blockSize) + m := len(A) / blockSize + for i := 0; i < m; i++ { + chunk := A[blockSize*i : blockSize*(i+1)] + index := bits.TrailingZeros(uint(i + 1)) + // If the mask table is too short + if len(o.mask.L)-1 < index { + o.mask.extendTable(index) + } + byteutil.XorBytesMut(offset, o.mask.L[index]) + byteutil.XorBytesMut(chunk, offset) + o.block.Encrypt(chunk, chunk) + byteutil.XorBytesMut(sum, chunk) + } + + // + // Process any final partial block; compute final hash value + // + if len(A)%blockSize != 0 { + byteutil.XorBytesMut(offset, o.mask.lAst) + // Pad block with 1 || 0 ^ 127 - bitlength(a) + ending := make([]byte, blockSize-len(A)%blockSize) + ending[0] = 0x80 + encrypted := append(A[blockSize*m:], ending...) + byteutil.XorBytesMut(encrypted, offset) + o.block.Encrypt(encrypted, encrypted) + byteutil.XorBytesMut(sum, encrypted) + } + return sum +} + +func initializeMaskTable(block cipher.Block) mask { + // + // Key-dependent variables + // + lAst := make([]byte, block.BlockSize()) + block.Encrypt(lAst, lAst) + lDol := byteutil.GfnDouble(lAst) + L := make([][]byte, 1) + L[0] = byteutil.GfnDouble(lDol) + + return mask{ + lAst: lAst, + lDol: lDol, + L: L, + } +} + +// Extends the L array of mask m up to L[limit], with L[i] = GfnDouble(L[i-1]) +func (m *mask) extendTable(limit int) { + for i := len(m.L); i <= limit; i++ { + m.L = append(m.L, byteutil.GfnDouble(m.L[i-1])) + } +} + +func ocbError(err string) error { + return errors.New("crypto/ocb: " + err) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go b/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go new file mode 100644 index 000000000..0efaf344f --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go @@ -0,0 +1,136 @@ +// In the test vectors provided by RFC 7253, the "bottom" +// internal variable, which defines "offset" for the first time, does not +// exceed 15. However, it can attain values up to 63. + +// These vectors include key length in {128, 192, 256}, tag size 128, and +// random nonce, header, and plaintext lengths. + +// This file was automatically generated. + +package ocb + +var randomVectors = []struct { + key, nonce, header, plaintext, ciphertext string +}{ + + {"9438C5D599308EAF13F800D2D31EA7F0", + "C38EE4801BEBFFA1CD8635BE", + "0E507B7DADD8A98CDFE272D3CB6B3E8332B56AE583FB049C0874D4200BED16BD1A044182434E9DA0E841F182DFD5B3016B34641CED0784F1745F63AB3D0DA22D3351C9EF9A658B8081E24498EBF61FCE40DA6D8E184536", + "962D227786FB8913A8BAD5DC3250", + "EEDEF5FFA5986D1E3BF86DDD33EF9ADC79DCA06E215FA772CCBA814F63AD"}, + {"BA7DE631C7D6712167C6724F5B9A2B1D", + "35263EBDA05765DC0E71F1F5", + "0103257B4224507C0242FEFE821EA7FA42E0A82863E5F8B68F7D881B4B44FA428A2B6B21D2F591260802D8AB6D83", + "9D6D1FC93AE8A64E7889B7B2E3521EFA9B920A8DDB692E6F833DDC4A38AFA535E5E2A3ED82CB7E26404AB86C54D01C4668F28398C2DF33D5D561CBA1C8DCFA7A912F5048E545B59483C0E3221F54B14DAA2E4EB657B3BEF9554F34CAD69B2724AE962D3D8A", + "E93852D1985C5E775655E937FA79CE5BF28A585F2AF53A5018853B9634BE3C84499AC0081918FDCE0624494D60E25F76ACD6853AC7576E3C350F332249BFCABD4E73CEABC36BE4EDDA40914E598AE74174A0D7442149B26990899491BDDFE8FC54D6C18E83AE9E9A6FFBF5D376565633862EEAD88D"}, + {"2E74B25289F6FD3E578C24866E9C72A5", + "FD912F15025AF8414642BA1D1D", + "FB5FB8C26F365EEDAB5FE260C6E3CCD27806729C8335F146063A7F9EA93290E56CF84576EB446350D22AD730547C267B1F0BBB97EB34E1E2C41A", + "6C092EBF78F76EE8C1C6E592277D9545BA16EDB67BC7D8480B9827702DC2F8A129E2B08A2CE710CA7E1DA45CE162BB6CD4B512E632116E2211D3C90871EFB06B8D4B902681C7FB", + "6AC0A77F26531BF4F354A1737F99E49BE32ECD909A7A71AD69352906F54B08A9CE9B8CA5D724CBFFC5673437F23F630697F3B84117A1431D6FA8CC13A974FB4AD360300522E09511B99E71065D5AC4BBCB1D791E864EF4"}, + {"E7EC507C802528F790AFF5303A017B17", + "4B97A7A568940A9E3CE7A99E93031E", + "28349BDC5A09390C480F9B8AA3EDEA3DDB8B9D64BCA322C570B8225DF0E31190DAB25A4014BA39519E02ABFB12B89AA28BBFD29E486E7FB28734258C817B63CED9912DBAFEBB93E2798AB2890DE3B0ACFCFF906AB15563EF7823CE83D27CDB251195E22BD1337BCBDE65E7C2C427321C463C2777BFE5AEAA", + "9455B3EA706B74", + "7F33BA3EA848D48A96B9530E26888F43EBD4463C9399B6"}, + {"6C928AA3224736F28EE7378DE0090191", + "8936138E2E4C6A13280017A1622D", + "6202717F2631565BDCDC57C6584543E72A7C8BD444D0D108ED35069819633C", + "DA0691439E5F035F3E455269D14FE5C201C8C9B0A3FE2D3F86BCC59387C868FE65733D388360B31E3CE28B4BF6A8BE636706B536D5720DB66B47CF1C7A5AFD6F61E0EF90F1726D6B0E169F9A768B2B7AE4EE00A17F630AC905FCAAA1B707FFF25B3A1AAE83B504837C64A5639B2A34002B300EC035C9B43654DA55", + "B8804D182AB0F0EEB464FA7BD1329AD6154F982013F3765FEDFE09E26DAC078C9C1439BFC1159D6C02A25E3FF83EF852570117B315852AD5EE20E0FA3AA0A626B0E43BC0CEA38B44579DD36803455FB46989B90E6D229F513FD727AF8372517E9488384C515D6067704119C931299A0982EDDFB9C2E86A90C450C077EB222511EC9CCABC9FCFDB19F70088"}, + {"ECEA315CA4B3F425B0C9957A17805EA4", + "664CDAE18403F4F9BA13015A44FC", + "642AFB090D6C6DB46783F08B01A3EF2A8FEB5736B531EAC226E7888FCC8505F396818F83105065FACB3267485B9E5E4A0261F621041C08FCCB2A809A49AB5252A91D0971BCC620B9D614BD77E57A0EED2FA5", + "6852C31F8083E20E364CEA21BB7854D67CEE812FE1C9ED2425C0932A90D3780728D1BB", + "2ECEF962A9695A463ADABB275BDA9FF8B2BA57AEC2F52EFFB700CD9271A74D2A011C24AEA946051BD6291776429B7E681BA33E"}, + {"4EE616C4A58AAA380878F71A373461F6", + "91B8C9C176D9C385E9C47E52", + "CDA440B7F9762C572A718AC754EDEECC119E5EE0CCB9FEA4FFB22EEE75087C032EBF3DA9CDD8A28CC010B99ED45143B41A4BA50EA2A005473F89639237838867A57F23B0F0ED3BF22490E4501DAC9C658A9B9F", + "D6E645FA9AE410D15B8123FD757FA356A8DBE9258DDB5BE88832E615910993F497EC", + "B70ED7BF959FB2AAED4F36174A2A99BFB16992C8CDF369C782C4DB9C73DE78C5DB8E0615F647243B97ACDB24503BC9CADC48"}, + {"DCD475773136C830D5E3D0C5FE05B7FF", + "BB8E1FBB483BE7616A922C4A", + "36FEF2E1CB29E76A6EA663FC3AF66ECD7404F466382F7B040AABED62293302B56E8783EF7EBC21B4A16C3E78A7483A0A403F253A2CDC5BBF79DC3DAE6C73F39A961D8FBBE8D41B", + "441E886EA38322B2437ECA7DEB5282518865A66780A454E510878E61BFEC3106A3CD93D2A02052E6F9E1832F9791053E3B76BF4C07EFDD6D4106E3027FABB752E60C1AA425416A87D53938163817A1051EBA1D1DEEB4B9B25C7E97368B52E5911A31810B0EC5AF547559B6142D9F4C4A6EF24A4CF75271BF9D48F62B", + "1BE4DD2F4E25A6512C2CC71D24BBB07368589A94C2714962CD0ACE5605688F06342587521E75F0ACAFFD86212FB5C34327D238DB36CF2B787794B9A4412E7CD1410EA5DDD2450C265F29CF96013CD213FD2880657694D718558964BC189B4A84AFCF47EB012935483052399DBA5B088B0A0477F20DFE0E85DCB735E21F22A439FB837DD365A93116D063E607"}, + {"3FBA2B3D30177FFE15C1C59ED2148BB2C091F5615FBA7C07", + "FACF804A4BEBF998505FF9DE", + "8213B9263B2971A5BDA18DBD02208EE1", + "15B323926993B326EA19F892D704439FC478828322AF72118748284A1FD8A6D814E641F70512FD706980337379F31DC63355974738D7FEA87AD2858C0C2EBBFBE74371C21450072373C7B651B334D7C4D43260B9D7CCD3AF9EDB", + "6D35DC1469B26E6AAB26272A41B46916397C24C485B61162E640A062D9275BC33DDCFD3D9E1A53B6C8F51AC89B66A41D59B3574197A40D9B6DCF8A4E2A001409C8112F16B9C389E0096179DB914E05D6D11ED0005AD17E1CE105A2F0BAB8F6B1540DEB968B7A5428FF44"}, + {"53B52B8D4D748BCDF1DDE68857832FA46227FA6E2F32EFA1", + "0B0EF53D4606B28D1398355F", + "F23882436349094AF98BCACA8218E81581A043B19009E28EFBF2DE37883E04864148CC01D240552CA8844EC1456F42034653067DA67E80F87105FD06E14FF771246C9612867BE4D215F6D761", + "F15030679BD4088D42CAC9BF2E9606EAD4798782FA3ED8C57EBE7F84A53236F51B25967C6489D0CD20C9EEA752F9BC", + "67B96E2D67C3729C96DAEAEDF821D61C17E648643A2134C5621FEC621186915AD80864BFD1EB5B238BF526A679385E012A457F583AFA78134242E9D9C1B4E4"}, + {"0272DD80F23399F49BFC320381A5CD8225867245A49A7D41", + "5C83F4896D0738E1366B1836", + "69B0337289B19F73A12BAEEA857CCAF396C11113715D9500CCCF48BA08CFF12BC8B4BADB3084E63B85719DB5058FA7C2C11DEB096D7943CFA7CAF5", + "C01AD10FC8B562CD17C7BC2FAB3E26CBDFF8D7F4DEA816794BBCC12336991712972F52816AABAB244EB43B0137E2BAC1DD413CE79531E78BEF782E6B439612BB3AEF154DE3502784F287958EBC159419F9EBA27916A28D6307324129F506B1DE80C1755A929F87", + "FEFE52DD7159C8DD6E8EC2D3D3C0F37AB6CB471A75A071D17EC4ACDD8F3AA4D7D4F7BB559F3C09099E3D9003E5E8AA1F556B79CECDE66F85B08FA5955E6976BF2695EA076388A62D2AD5BAB7CBF1A7F3F4C8D5CDF37CDE99BD3E30B685D9E5EEE48C7C89118EF4878EB89747F28271FA2CC45F8E9E7601"}, + {"3EEAED04A455D6E5E5AB53CFD5AFD2F2BC625C7BF4BE49A5", + "36B88F63ADBB5668588181D774", + "D367E3CB3703E762D23C6533188EF7028EFF9D935A3977150361997EC9DEAF1E4794BDE26AA8B53C124980B1362EC86FCDDFC7A90073171C1BAEE351A53234B86C66E8AB92FAE99EC6967A6D3428892D80", + "573454C719A9A55E04437BF7CBAAF27563CCCD92ADD5E515CD63305DFF0687E5EEF790C5DCA5C0033E9AB129505E2775438D92B38F08F3B0356BA142C6F694", + "E9F79A5B432D9E682C9AAA5661CFC2E49A0FCB81A431E54B42EB73DD3BED3F377FEC556ABA81624BA64A5D739AD41467460088F8D4F442180A9382CA635745473794C382FCDDC49BA4EB6D8A44AE3C"}, + {"B695C691538F8CBD60F039D0E28894E3693CC7C36D92D79D", + "BC099AEB637361BAC536B57618", + "BFFF1A65AE38D1DC142C71637319F5F6508E2CB33C9DCB94202B359ED5A5ED8042E7F4F09231D32A7242976677E6F4C549BF65FADC99E5AF43F7A46FD95E16C2", + "081DF3FD85B415D803F0BE5AC58CFF0023FDDED99788296C3731D8", + "E50C64E3614D94FE69C47092E46ACC9957C6FEA2CCBF96BC62FBABE7424753C75F9C147C42AE26FE171531"}, + {"C9ACBD2718F0689A1BE9802A551B6B8D9CF5614DAF5E65ED", + "B1B0AAF373B8B026EB80422051D8", + "6648C0E61AC733C76119D23FB24548D637751387AA2EAE9D80E912B7BD486CAAD9EAF4D7A5FE2B54AAD481E8EC94BB4D558000896E2010462B70C9FED1E7273080D1", + "189F591F6CB6D59AFEDD14C341741A8F1037DC0DF00FC57CE65C30F49E860255CEA5DC6019380CC0FE8880BC1A9E685F41C239C38F36E3F2A1388865C5C311059C0A", + "922A5E949B61D03BE34AB5F4E58607D4504EA14017BB363DAE3C873059EA7A1C77A746FB78981671D26C2CF6D9F24952D510044CE02A10177E9DB42D0145211DFE6E84369C5E3BC2669EAB4147B2822895F9"}, + {"7A832BD2CF5BF4919F353CE2A8C86A5E406DA2D52BE16A72", + "2F2F17CECF7E5A756D10785A3CB9DB", + "61DA05E3788CC2D8405DBA70C7A28E5AF699863C9F72E6C6770126929F5D6FA267F005EBCF49495CB46400958A3AE80D1289D1C671", + "44E91121195A41AF14E8CFDBD39A4B517BE0DF1A72977ED8A3EEF8EEDA1166B2EB6DB2C4AE2E74FA0F0C74537F659BFBD141E5DDEC67E64EDA85AABD3F52C85A785B9FB3CECD70E7DF", + "BEDF596EA21288D2B84901E188F6EE1468B14D5161D3802DBFE00D60203A24E2AB62714BF272A45551489838C3A7FEAADC177B591836E73684867CCF4E12901DCF2064058726BBA554E84ADC5136F507E961188D4AF06943D3"}, + {"1508E8AE9079AA15F1CEC4F776B4D11BCCB061B58AA56C18", + "BCA625674F41D1E3AB47672DC0C3", + "8B12CF84F16360F0EAD2A41BC021530FFCEC7F3579CAE658E10E2D3D81870F65AFCED0C77C6C4C6E6BA424FF23088C796BA6195ABA35094BF1829E089662E7A95FC90750AE16D0C8AFA55DAC789D7735B970B58D4BE7CEC7341DA82A0179A01929C27A59C5063215B859EA43", + "E525422519ECE070E82C", + "B47BC07C3ED1C0A43BA52C43CBACBCDBB29CAF1001E09FDF7107"}, + {"7550C2761644E911FE9ADD119BAC07376BEA442845FEAD876D7E7AC1B713E464", + "36D2EC25ADD33CDEDF495205BBC923", + "7FCFE81A3790DE97FFC3DE160C470847EA7E841177C2F759571CBD837EA004A6CA8C6F4AEBFF2E9FD552D73EB8A30705D58D70C0B67AEEA280CBBF0A477358ACEF1E7508F2735CD9A0E4F9AC92B8C008F575D3B6278F1C18BD01227E3502E5255F3AB1893632AD00C717C588EF652A51A43209E7EE90", + "2B1A62F8FDFAA3C16470A21AD307C9A7D03ADE8EF72C69B06F8D738CDE578D7AEFD0D40BD9C022FB9F580DF5394C998ACCCEFC5471A3996FB8F1045A81FDC6F32D13502EA65A211390C8D882B8E0BEFD8DD8CBEF51D1597B124E9F7F", + "C873E02A22DB89EB0787DB6A60B99F7E4A0A085D5C4232A81ADCE2D60AA36F92DDC33F93DD8640AC0E08416B187FB382B3EC3EE85A64B0E6EE41C1366A5AD2A282F66605E87031CCBA2FA7B2DA201D975994AADE3DD1EE122AE09604AD489B84BF0C1AB7129EE16C6934850E"}, + {"A51300285E554FDBDE7F771A9A9A80955639DD87129FAEF74987C91FB9687C71", + "81691D5D20EC818FCFF24B33DECC", + "C948093218AA9EB2A8E44A87EEA73FC8B6B75A196819A14BD83709EA323E8DF8B491045220E1D88729A38DBCFFB60D3056DAD4564498FD6574F74512945DEB34B69329ACED9FFC05D5D59DFCD5B973E2ACAFE6AD1EF8BBBC49351A2DD12508ED89ED", + "EB861165DAF7625F827C6B574ED703F03215", + "C6CD1CE76D2B3679C1B5AA1CFD67CCB55444B6BFD3E22C81CBC9BB738796B83E54E3"}, + {"8CE0156D26FAEB7E0B9B800BBB2E9D4075B5EAC5C62358B0E7F6FCE610223282", + "D2A7B94DD12CDACA909D3AD7", + "E021A78F374FC271389AB9A3E97077D755", + "7C26000B58929F5095E1CEE154F76C2A299248E299F9B5ADE6C403AA1FD4A67FD4E0232F214CE7B919EE7A1027D2B76C57475715CD078461", + "C556FB38DF069B56F337B5FF5775CE6EAA16824DFA754F20B78819028EA635C3BB7AA731DE8776B2DCB67DCA2D33EEDF3C7E52EA450013722A41755A0752433ED17BDD5991AAE77A"}, + {"1E8000A2CE00A561C9920A30BF0D7B983FEF8A1014C8F04C35CA6970E6BA02BD", + "65ED3D63F79F90BBFD19775E", + "336A8C0B7243582A46B221AA677647FCAE91", + "134A8B34824A290E7B", + "914FBEF80D0E6E17F8BDBB6097EBF5FBB0554952DC2B9E5151"}, + {"53D5607BBE690B6E8D8F6D97F3DF2BA853B682597A214B8AA0EA6E598650AF15", + "C391A856B9FE234E14BA1AC7BB40FF", + "479682BC21349C4BE1641D5E78FE2C79EC1B9CF5470936DCAD9967A4DCD7C4EFADA593BC9EDE71E6A08829B8580901B61E274227E9D918502DE3", + "EAD154DC09C5E26C5D26FF33ED148B27120C7F2C23225CC0D0631B03E1F6C6D96FEB88C1A4052ACB4CE746B884B6502931F407021126C6AAB8C514C077A5A38438AE88EE", + "938821286EBB671D999B87C032E1D6055392EB564E57970D55E545FC5E8BAB90E6E3E3C0913F6320995FC636D72CD9919657CC38BD51552F4A502D8D1FE56DB33EBAC5092630E69EBB986F0E15CEE9FC8C052501"}, + {"294362FCC984F440CEA3E9F7D2C06AF20C53AAC1B3738CA2186C914A6E193ABB", + "B15B61C8BB39261A8F55AB178EC3", + "D0729B6B75BB", + "2BD089ADCE9F334BAE3B065996C7D616DD0C27DF4218DCEEA0FBCA0F968837CE26B0876083327E25681FDDD620A32EC0DA12F73FAE826CC94BFF2B90A54D2651", + "AC94B25E4E21DE2437B806966CCD5D9385EF0CD4A51AB9FA6DE675C7B8952D67802E9FEC1FDE9F5D1EAB06057498BC0EEA454804FC9D2068982A3E24182D9AC2E7AB9994DDC899A604264583F63D066B"}, + {"959DBFEB039B1A5B8CE6A44649B602AAA5F98A906DB96143D202CD2024F749D9", + "01D7BDB1133E9C347486C1EFA6", + "F3843955BD741F379DD750585EDC55E2CDA05CCBA8C1F4622AC2FE35214BC3A019B8BD12C4CC42D9213D1E1556941E8D8450830287FFB3B763A13722DD4140ED9846FB5FFF745D7B0B967D810A068222E10B259AF1D392035B0D83DC1498A6830B11B2418A840212599171E0258A1C203B05362978", + "A21811232C950FA8B12237C2EBD6A7CD2C3A155905E9E0C7C120", + "63C1CE397B22F1A03F1FA549B43178BC405B152D3C95E977426D519B3DFCA28498823240592B6EEE7A14"}, + {"096AE499F5294173F34FF2B375F0E5D5AB79D0D03B33B1A74D7D576826345DF4", + "0C52B3D11D636E5910A4DD76D32C", + "229E9ECA3053789E937447BC719467075B6138A142DA528DA8F0CF8DDF022FD9AF8E74779BA3AC306609", + "8B7A00038783E8BAF6EDEAE0C4EAB48FC8FD501A588C7E4A4DB71E3604F2155A97687D3D2FFF8569261375A513CF4398CE0F87CA1658A1050F6EF6C4EA3E25", + "C20B6CF8D3C8241825FD90B2EDAC7593600646E579A8D8DAAE9E2E40C3835FE801B2BE4379131452BC5182C90307B176DFBE2049544222FE7783147B690774F6D9D7CEF52A91E61E298E9AA15464AC"}, +} diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go new file mode 100644 index 000000000..330309ff5 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go @@ -0,0 +1,78 @@ +package ocb + +import ( + "encoding/hex" +) + +// Test vectors from https://tools.ietf.org/html/rfc7253. Note that key is +// shared across tests. +var testKey, _ = hex.DecodeString("000102030405060708090A0B0C0D0E0F") + +var rfc7253testVectors = []struct { + nonce, header, plaintext, ciphertext string +}{ + {"BBAA99887766554433221100", + "", + "", + "785407BFFFC8AD9EDCC5520AC9111EE6"}, + {"BBAA99887766554433221101", + "0001020304050607", + "0001020304050607", + "6820B3657B6F615A5725BDA0D3B4EB3A257C9AF1F8F03009"}, + {"BBAA99887766554433221102", + "0001020304050607", + "", + "81017F8203F081277152FADE694A0A00"}, + {"BBAA99887766554433221103", + "", + "0001020304050607", + "45DD69F8F5AAE72414054CD1F35D82760B2CD00D2F99BFA9"}, + {"BBAA99887766554433221104", + "000102030405060708090A0B0C0D0E0F", + "000102030405060708090A0B0C0D0E0F", + "571D535B60B277188BE5147170A9A22C3AD7A4FF3835B8C5701C1CCEC8FC3358"}, + {"BBAA99887766554433221105", + "000102030405060708090A0B0C0D0E0F", + "", + "8CF761B6902EF764462AD86498CA6B97"}, + {"BBAA99887766554433221106", + "", + "000102030405060708090A0B0C0D0E0F", + "5CE88EC2E0692706A915C00AEB8B2396F40E1C743F52436BDF06D8FA1ECA343D"}, + {"BBAA99887766554433221107", + "000102030405060708090A0B0C0D0E0F1011121314151617", + "000102030405060708090A0B0C0D0E0F1011121314151617", + "1CA2207308C87C010756104D8840CE1952F09673A448A122C92C62241051F57356D7F3C90BB0E07F"}, + {"BBAA99887766554433221108", + "000102030405060708090A0B0C0D0E0F1011121314151617", + "", + "6DC225A071FC1B9F7C69F93B0F1E10DE"}, + {"BBAA99887766554433221109", + "", + "000102030405060708090A0B0C0D0E0F1011121314151617", + "221BD0DE7FA6FE993ECCD769460A0AF2D6CDED0C395B1C3CE725F32494B9F914D85C0B1EB38357FF"}, + {"BBAA9988776655443322110A", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", + "BD6F6C496201C69296C11EFD138A467ABD3C707924B964DEAFFC40319AF5A48540FBBA186C5553C68AD9F592A79A4240"}, + {"BBAA9988776655443322110B", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", + "", + "FE80690BEE8A485D11F32965BC9D2A32"}, + {"BBAA9988776655443322110C", + "", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", + "2942BFC773BDA23CABC6ACFD9BFD5835BD300F0973792EF46040C53F1432BCDFB5E1DDE3BC18A5F840B52E653444D5DF"}, + {"BBAA9988776655443322110D", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "D5CA91748410C1751FF8A2F618255B68A0A12E093FF454606E59F9C1D0DDC54B65E8628E568BAD7AED07BA06A4A69483A7035490C5769E60"}, + {"BBAA9988776655443322110E", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "", + "C5CD9D1850C141E358649994EE701B68"}, + {"BBAA9988776655443322110F", + "", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "4412923493C57D5DE0D700F753CCE0D1D2D95060122E9F15A5DDBFC5787E50B5CC55EE507BCB084E479AD363AC366B95A98CA5F3000B1479"}, +} diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go new file mode 100644 index 000000000..14a3c336f --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go @@ -0,0 +1,25 @@ +package ocb + +// Second set of test vectors from https://tools.ietf.org/html/rfc7253 +var rfc7253TestVectorTaglen96 = struct { + key, nonce, header, plaintext, ciphertext string +}{"0F0E0D0C0B0A09080706050403020100", + "BBAA9988776655443322110D", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "1792A4E31E0755FB03E31B22116E6C2DDF9EFD6E33D536F1A0124B0A55BAE884ED93481529C76B6AD0C515F4D1CDD4FDAC4F02AA"} + +var rfc7253AlgorithmTest = []struct { + KEYLEN, TAGLEN int + OUTPUT string +}{ + {128, 128, "67E944D23256C5E0B6C61FA22FDF1EA2"}, + {192, 128, "F673F2C3E7174AAE7BAE986CA9F29E17"}, + {256, 128, "D90EB8E9C977C88B79DD793D7FFA161C"}, + {128, 96, "77A3D8E73589158D25D01209"}, + {192, 96, "05D56EAD2752C86BE6932C5E"}, + {256, 96, "5458359AC23B0CBA9E6330DD"}, + {128, 64, "192C9B7BD90BA06A"}, + {192, 64, "0066BC6E0EF34E24"}, + {256, 64, "7D4EA5D445501CBE"}, +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go new file mode 100644 index 000000000..3c6251d1c --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go @@ -0,0 +1,153 @@ +// Copyright 2014 Matthew Endsley +// All rights reserved +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted providing that the following conditions +// are met: +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +// IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// Package keywrap is an implementation of the RFC 3394 AES key wrapping +// algorithm. This is used in OpenPGP with elliptic curve keys. +package keywrap + +import ( + "crypto/aes" + "encoding/binary" + "errors" +) + +var ( + // ErrWrapPlaintext is returned if the plaintext is not a multiple + // of 64 bits. + ErrWrapPlaintext = errors.New("keywrap: plainText must be a multiple of 64 bits") + + // ErrUnwrapCiphertext is returned if the ciphertext is not a + // multiple of 64 bits. + ErrUnwrapCiphertext = errors.New("keywrap: cipherText must by a multiple of 64 bits") + + // ErrUnwrapFailed is returned if unwrapping a key fails. + ErrUnwrapFailed = errors.New("keywrap: failed to unwrap key") + + // NB: the AES NewCipher call only fails if the key is an invalid length. + + // ErrInvalidKey is returned when the AES key is invalid. + ErrInvalidKey = errors.New("keywrap: invalid AES key") +) + +// Wrap a key using the RFC 3394 AES Key Wrap Algorithm. +func Wrap(key, plainText []byte) ([]byte, error) { + if len(plainText)%8 != 0 { + return nil, ErrWrapPlaintext + } + + c, err := aes.NewCipher(key) + if err != nil { + return nil, ErrInvalidKey + } + + nblocks := len(plainText) / 8 + + // 1) Initialize variables. + var block [aes.BlockSize]byte + // - Set A = IV, an initial value (see 2.2.3) + for ii := 0; ii < 8; ii++ { + block[ii] = 0xA6 + } + + // - For i = 1 to n + // - Set R[i] = P[i] + intermediate := make([]byte, len(plainText)) + copy(intermediate, plainText) + + // 2) Calculate intermediate values. + for ii := 0; ii < 6; ii++ { + for jj := 0; jj < nblocks; jj++ { + // - B = AES(K, A | R[i]) + copy(block[8:], intermediate[jj*8:jj*8+8]) + c.Encrypt(block[:], block[:]) + + // - A = MSB(64, B) ^ t where t = (n*j)+1 + t := uint64(ii*nblocks + jj + 1) + val := binary.BigEndian.Uint64(block[:8]) ^ t + binary.BigEndian.PutUint64(block[:8], val) + + // - R[i] = LSB(64, B) + copy(intermediate[jj*8:jj*8+8], block[8:]) + } + } + + // 3) Output results. + // - Set C[0] = A + // - For i = 1 to n + // - C[i] = R[i] + return append(block[:8], intermediate...), nil +} + +// Unwrap a key using the RFC 3394 AES Key Wrap Algorithm. +func Unwrap(key, cipherText []byte) ([]byte, error) { + if len(cipherText)%8 != 0 { + return nil, ErrUnwrapCiphertext + } + + c, err := aes.NewCipher(key) + if err != nil { + return nil, ErrInvalidKey + } + + nblocks := len(cipherText)/8 - 1 + + // 1) Initialize variables. + var block [aes.BlockSize]byte + // - Set A = C[0] + copy(block[:8], cipherText[:8]) + + // - For i = 1 to n + // - Set R[i] = C[i] + intermediate := make([]byte, len(cipherText)-8) + copy(intermediate, cipherText[8:]) + + // 2) Compute intermediate values. + for jj := 5; jj >= 0; jj-- { + for ii := nblocks - 1; ii >= 0; ii-- { + // - B = AES-1(K, (A ^ t) | R[i]) where t = n*j+1 + // - A = MSB(64, B) + t := uint64(jj*nblocks + ii + 1) + val := binary.BigEndian.Uint64(block[:8]) ^ t + binary.BigEndian.PutUint64(block[:8], val) + + copy(block[8:], intermediate[ii*8:ii*8+8]) + c.Decrypt(block[:], block[:]) + + // - R[i] = LSB(B, 64) + copy(intermediate[ii*8:ii*8+8], block[8:]) + } + } + + // 3) Output results. + // - If A is an appropriate initial value (see 2.2.3), + for ii := 0; ii < 8; ii++ { + if block[ii] != 0xA6 { + return nil, ErrUnwrapFailed + } + } + + // - For i = 1 to n + // - P[i] = R[i] + return intermediate, nil +} diff --git a/vendor/golang.org/x/crypto/openpgp/armor/armor.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go similarity index 66% rename from vendor/golang.org/x/crypto/openpgp/armor/armor.go rename to vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go index 8907183ec..e0a677f28 100644 --- a/vendor/golang.org/x/crypto/openpgp/armor/armor.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go @@ -4,20 +4,15 @@ // Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is // very similar to PEM except that it has an additional CRC checksum. -// -// Deprecated: this package is unmaintained except for security fixes. New -// applications should consider a more focused, modern alternative to OpenPGP -// for their specific task. If you are required to interoperate with OpenPGP -// systems and need a maintained package, consider a community fork. -// See https://golang.org/issue/44226. -package armor // import "golang.org/x/crypto/openpgp/armor" +package armor // import "github.com/ProtonMail/go-crypto/openpgp/armor" import ( "bufio" "bytes" "encoding/base64" - "golang.org/x/crypto/openpgp/errors" "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" ) // A Block represents an OpenPGP armored structure. @@ -28,7 +23,7 @@ import ( // Headers // // base64-encoded Bytes -// '=' base64 encoded checksum +// '=' base64 encoded checksum (optional) not checked anymore // -----END Type----- // // where Headers is a possibly empty sequence of Key: Value lines. @@ -45,36 +40,15 @@ type Block struct { var ArmorCorrupt error = errors.StructuralError("armor invalid") -const crc24Init = 0xb704ce -const crc24Poly = 0x1864cfb -const crc24Mask = 0xffffff - -// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1 -func crc24(crc uint32, d []byte) uint32 { - for _, b := range d { - crc ^= uint32(b) << 16 - for i := 0; i < 8; i++ { - crc <<= 1 - if crc&0x1000000 != 0 { - crc ^= crc24Poly - } - } - } - return crc -} - var armorStart = []byte("-----BEGIN ") var armorEnd = []byte("-----END ") var armorEndOfLine = []byte("-----") -// lineReader wraps a line based reader. It watches for the end of an armor -// block and records the expected CRC value. +// lineReader wraps a line based reader. It watches for the end of an armor block type lineReader struct { - in *bufio.Reader - buf []byte - eof bool - crc uint32 - crcSet bool + in *bufio.Reader + buf []byte + eof bool } func (l *lineReader) Read(p []byte) (n int, err error) { @@ -103,26 +77,9 @@ func (l *lineReader) Read(p []byte) (n int, err error) { if len(line) == 5 && line[0] == '=' { // This is the checksum line - var expectedBytes [3]byte - var m int - m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:]) - if m != 3 || err != nil { - return - } - l.crc = uint32(expectedBytes[0])<<16 | - uint32(expectedBytes[1])<<8 | - uint32(expectedBytes[2]) - - line, _, err = l.in.ReadLine() - if err != nil && err != io.EOF { - return - } - if !bytes.HasPrefix(line, armorEnd) { - return 0, ArmorCorrupt - } + // Don't check the checksum l.eof = true - l.crcSet = true return 0, io.EOF } @@ -143,23 +100,14 @@ func (l *lineReader) Read(p []byte) (n int, err error) { return } -// openpgpReader passes Read calls to the underlying base64 decoder, but keeps -// a running CRC of the resulting data and checks the CRC against the value -// found by the lineReader at EOF. +// openpgpReader passes Read calls to the underlying base64 decoder. type openpgpReader struct { - lReader *lineReader - b64Reader io.Reader - currentCRC uint32 + lReader *lineReader + b64Reader io.Reader } func (r *openpgpReader) Read(p []byte) (n int, err error) { n, err = r.b64Reader.Read(p) - r.currentCRC = crc24(r.currentCRC, p[:n]) - - if err == io.EOF && r.lReader.crcSet && r.lReader.crc != r.currentCRC&crc24Mask { - return 0, ArmorCorrupt - } - return } @@ -214,16 +162,19 @@ TryNextBlock: break } - i := bytes.Index(line, []byte(": ")) + i := bytes.Index(line, []byte(":")) if i == -1 { goto TryNextBlock } lastKey = string(line[:i]) - p.Header[lastKey] = string(line[i+2:]) + var value string + if len(line) > i+2 { + value = string(line[i+2:]) + } + p.Header[lastKey] = value } p.lReader.in = r - p.oReader.currentCRC = crc24Init p.oReader.lReader = &p.lReader p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader) p.Body = &p.oReader diff --git a/vendor/golang.org/x/crypto/openpgp/armor/encode.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go similarity index 61% rename from vendor/golang.org/x/crypto/openpgp/armor/encode.go rename to vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go index 5b6e16c19..112f98b83 100644 --- a/vendor/golang.org/x/crypto/openpgp/armor/encode.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go @@ -14,6 +14,23 @@ var blockEnd = []byte("\n=") var newline = []byte("\n") var armorEndOfLineOut = []byte("-----\n") +const crc24Init = 0xb704ce +const crc24Poly = 0x1864cfb + +// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1 +func crc24(crc uint32, d []byte) uint32 { + for _, b := range d { + crc ^= uint32(b) << 16 + for i := 0; i < 8; i++ { + crc <<= 1 + if crc&0x1000000 != 0 { + crc ^= crc24Poly + } + } + } + return crc +} + // writeSlices writes its arguments to the given Writer. func writeSlices(out io.Writer, slices ...[]byte) (err error) { for _, s := range slices { @@ -99,15 +116,18 @@ func (l *lineBreaker) Close() (err error) { // // encoding -> base64 encoder -> lineBreaker -> out type encoding struct { - out io.Writer - breaker *lineBreaker - b64 io.WriteCloser - crc uint32 - blockType []byte + out io.Writer + breaker *lineBreaker + b64 io.WriteCloser + crc uint32 + crcEnabled bool + blockType []byte } func (e *encoding) Write(data []byte) (n int, err error) { - e.crc = crc24(e.crc, data) + if e.crcEnabled { + e.crc = crc24(e.crc, data) + } return e.b64.Write(data) } @@ -118,20 +138,21 @@ func (e *encoding) Close() (err error) { } e.breaker.Close() - var checksumBytes [3]byte - checksumBytes[0] = byte(e.crc >> 16) - checksumBytes[1] = byte(e.crc >> 8) - checksumBytes[2] = byte(e.crc) + if e.crcEnabled { + var checksumBytes [3]byte + checksumBytes[0] = byte(e.crc >> 16) + checksumBytes[1] = byte(e.crc >> 8) + checksumBytes[2] = byte(e.crc) - var b64ChecksumBytes [4]byte - base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:]) + var b64ChecksumBytes [4]byte + base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:]) - return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine) + return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine) + } + return writeSlices(e.out, newline, armorEnd, e.blockType, armorEndOfLine) } -// Encode returns a WriteCloser which will encode the data written to it in -// OpenPGP armor. -func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) { +func encode(out io.Writer, blockType string, headers map[string]string, checksum bool) (w io.WriteCloser, err error) { bType := []byte(blockType) err = writeSlices(out, armorStart, bType, armorEndOfLineOut) if err != nil { @@ -151,11 +172,27 @@ func Encode(out io.Writer, blockType string, headers map[string]string) (w io.Wr } e := &encoding{ - out: out, - breaker: newLineBreaker(out, 64), - crc: crc24Init, - blockType: bType, + out: out, + breaker: newLineBreaker(out, 64), + blockType: bType, + crc: crc24Init, + crcEnabled: checksum, } e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker) return e, nil } + +// Encode returns a WriteCloser which will encode the data written to it in +// OpenPGP armor. +func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) { + return encode(out, blockType, headers, true) +} + +// EncodeWithChecksumOption returns a WriteCloser which will encode the data written to it in +// OpenPGP armor and provides the option to include a checksum. +// When forming ASCII Armor, the CRC24 footer SHOULD NOT be generated, +// unless interoperability with implementations that require the CRC24 footer +// to be present is a concern. +func EncodeWithChecksumOption(out io.Writer, blockType string, headers map[string]string, doChecksum bool) (w io.WriteCloser, err error) { + return encode(out, blockType, headers, doChecksum) +} diff --git a/vendor/golang.org/x/crypto/openpgp/canonical_text.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go similarity index 71% rename from vendor/golang.org/x/crypto/openpgp/canonical_text.go rename to vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go index e601e389f..5b40e1375 100644 --- a/vendor/golang.org/x/crypto/openpgp/canonical_text.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go @@ -4,7 +4,10 @@ package openpgp -import "hash" +import ( + "hash" + "io" +) // NewCanonicalTextHash reformats text written to it into the canonical // form and then applies the hash h. See RFC 4880, section 5.2.1. @@ -19,28 +22,37 @@ type canonicalTextHash struct { var newline = []byte{'\r', '\n'} -func (cth *canonicalTextHash) Write(buf []byte) (int, error) { +func writeCanonical(cw io.Writer, buf []byte, s *int) (int, error) { start := 0 - for i, c := range buf { - switch cth.s { + switch *s { case 0: if c == '\r' { - cth.s = 1 + *s = 1 } else if c == '\n' { - cth.h.Write(buf[start:i]) - cth.h.Write(newline) + if _, err := cw.Write(buf[start:i]); err != nil { + return 0, err + } + if _, err := cw.Write(newline); err != nil { + return 0, err + } start = i + 1 } case 1: - cth.s = 0 + *s = 0 } } - cth.h.Write(buf[start:]) + if _, err := cw.Write(buf[start:]); err != nil { + return 0, err + } return len(buf), nil } +func (cth *canonicalTextHash) Write(buf []byte) (int, error) { + return writeCanonical(cth.h, buf, &cth.s) +} + func (cth *canonicalTextHash) Sum(in []byte) []byte { return cth.h.Sum(in) } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go new file mode 100644 index 000000000..85a06b17c --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go @@ -0,0 +1,287 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ecdh implements ECDH encryption, suitable for OpenPGP, +// as specified in RFC 6637, section 8. +package ecdh + +import ( + "bytes" + "errors" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/aes/keywrap" + pgperrors "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519" +) + +const ( + KDFVersion1 = 1 + KDFVersionForwarding = 255 +) + +type KDF struct { + Version int // Defaults to v1; 255 for forwarding + Hash algorithm.Hash + Cipher algorithm.Cipher + ReplacementFingerprint []byte // (forwarding only) fingerprint to use instead of recipient's (20 octets) +} + +func (kdf *KDF) Serialize(w io.Writer) (err error) { + switch kdf.Version { + case 0, KDFVersion1: // Default to v1 if unspecified + return kdf.serializeForHash(w) + case KDFVersionForwarding: + // Length || Version || Hash || Cipher || Replacement Fingerprint + length := byte(3 + len(kdf.ReplacementFingerprint)) + if _, err := w.Write([]byte{length, KDFVersionForwarding, kdf.Hash.Id(), kdf.Cipher.Id()}); err != nil { + return err + } + if _, err := w.Write(kdf.ReplacementFingerprint); err != nil { + return err + } + + return nil + default: + return errors.New("ecdh: invalid KDF version") + } +} + +func (kdf *KDF) serializeForHash(w io.Writer) (err error) { + // Length || Version || Hash || Cipher + if _, err := w.Write([]byte{3, KDFVersion1, kdf.Hash.Id(), kdf.Cipher.Id()}); err != nil { + return err + } + return nil +} + +type PublicKey struct { + curve ecc.ECDHCurve + Point []byte + KDF +} + +type PrivateKey struct { + PublicKey + D []byte +} + +func NewPublicKey(curve ecc.ECDHCurve, kdf KDF) *PublicKey { + return &PublicKey{ + curve: curve, + KDF: kdf, + } +} + +func NewPrivateKey(key PublicKey) *PrivateKey { + return &PrivateKey{ + PublicKey: key, + } +} + +func (pk *PublicKey) GetCurve() ecc.ECDHCurve { + return pk.curve +} + +func (pk *PublicKey) MarshalPoint() []byte { + return pk.curve.MarshalBytePoint(pk.Point) +} + +func (pk *PublicKey) UnmarshalPoint(p []byte) error { + pk.Point = pk.curve.UnmarshalBytePoint(p) + if pk.Point == nil { + return errors.New("ecdh: failed to parse EC point") + } + return nil +} + +func (sk *PrivateKey) MarshalByteSecret() []byte { + return sk.curve.MarshalByteSecret(sk.D) +} + +func (sk *PrivateKey) UnmarshalByteSecret(d []byte) error { + sk.D = sk.curve.UnmarshalByteSecret(d) + + if sk.D == nil { + return errors.New("ecdh: failed to parse scalar") + } + return nil +} + +func GenerateKey(rand io.Reader, c ecc.ECDHCurve, kdf KDF) (priv *PrivateKey, err error) { + priv = new(PrivateKey) + priv.PublicKey.curve = c + priv.PublicKey.KDF = kdf + priv.PublicKey.Point, priv.D, err = c.GenerateECDH(rand) + return +} + +func Encrypt(random io.Reader, pub *PublicKey, msg, curveOID, fingerprint []byte) (vsG, c []byte, err error) { + if len(msg) > 40 { + return nil, nil, errors.New("ecdh: message too long") + } + // the sender MAY use 21, 13, and 5 bytes of padding for AES-128, + // AES-192, and AES-256, respectively, to provide the same number of + // octets, 40 total, as an input to the key wrapping method. + padding := make([]byte, 40-len(msg)) + for i := range padding { + padding[i] = byte(40 - len(msg)) + } + m := append(msg, padding...) + + ephemeral, zb, err := pub.curve.Encaps(random, pub.Point) + if err != nil { + return nil, nil, err + } + + vsG = pub.curve.MarshalBytePoint(ephemeral) + + z, err := buildKey(pub, zb, curveOID, fingerprint, false, false) + if err != nil { + return nil, nil, err + } + + if c, err = keywrap.Wrap(z, m); err != nil { + return nil, nil, err + } + + return vsG, c, nil + +} + +func Decrypt(priv *PrivateKey, vsG, c, curveOID, fingerprint []byte) (msg []byte, err error) { + var m []byte + zb, err := priv.PublicKey.curve.Decaps(priv.curve.UnmarshalBytePoint(vsG), priv.D) + + // Try buildKey three times to workaround an old bug, see comments in buildKey. + for i := 0; i < 3; i++ { + var z []byte + // RFC6637 §8: "Compute Z = KDF( S, Z_len, Param );" + z, err = buildKey(&priv.PublicKey, zb, curveOID, fingerprint, i == 1, i == 2) + if err != nil { + return nil, err + } + + // RFC6637 §8: "Compute C = AESKeyWrap( Z, c ) as per [RFC3394]" + m, err = keywrap.Unwrap(z, c) + if err == nil { + break + } + } + + // Only return an error after we've tried all (required) variants of buildKey. + if err != nil { + return nil, err + } + + // RFC6637 §8: "m = symm_alg_ID || session key || checksum || pkcs5_padding" + // The last byte should be the length of the padding, as per PKCS5; strip it off. + return m[:len(m)-int(m[len(m)-1])], nil +} + +func buildKey(pub *PublicKey, zb []byte, curveOID, fingerprint []byte, stripLeading, stripTrailing bool) ([]byte, error) { + // Param = curve_OID_len || curve_OID || public_key_alg_ID + // || KDF_params for AESKeyWrap + // || "Anonymous Sender " || recipient_fingerprint; + param := new(bytes.Buffer) + if _, err := param.Write(curveOID); err != nil { + return nil, err + } + algo := []byte{18} + if _, err := param.Write(algo); err != nil { + return nil, err + } + + if err := pub.KDF.serializeForHash(param); err != nil { + return nil, err + } + + if _, err := param.Write([]byte("Anonymous Sender ")); err != nil { + return nil, err + } + + if pub.KDF.ReplacementFingerprint != nil { + fingerprint = pub.KDF.ReplacementFingerprint + } + + if _, err := param.Write(fingerprint); err != nil { + return nil, err + } + + // MB = Hash ( 00 || 00 || 00 || 01 || ZB || Param ); + h := pub.KDF.Hash.New() + if _, err := h.Write([]byte{0x0, 0x0, 0x0, 0x1}); err != nil { + return nil, err + } + zbLen := len(zb) + i := 0 + j := zbLen - 1 + if stripLeading { + // Work around old go crypto bug where the leading zeros are missing. + for i < zbLen && zb[i] == 0 { + i++ + } + } + if stripTrailing { + // Work around old OpenPGP.js bug where insignificant trailing zeros in + // this little-endian number are missing. + // (See https://github.com/openpgpjs/openpgpjs/pull/853.) + for j >= 0 && zb[j] == 0 { + j-- + } + } + if _, err := h.Write(zb[i : j+1]); err != nil { + return nil, err + } + if _, err := h.Write(param.Bytes()); err != nil { + return nil, err + } + mb := h.Sum(nil) + + return mb[:pub.KDF.Cipher.KeySize()], nil // return oBits leftmost bits of MB. + +} + +func Validate(priv *PrivateKey) error { + return priv.curve.ValidateECDH(priv.Point, priv.D) +} + +func DeriveProxyParam(recipientKey, forwardeeKey *PrivateKey) (proxyParam []byte, err error) { + if recipientKey.GetCurve().GetCurveName() != "curve25519" { + return nil, pgperrors.InvalidArgumentError("recipient subkey is not curve25519") + } + + if forwardeeKey.GetCurve().GetCurveName() != "curve25519" { + return nil, pgperrors.InvalidArgumentError("forwardee subkey is not curve25519") + } + + c := ecc.NewCurve25519() + + // Clamp and reverse two secrets + proxyParam, err = curve25519.DeriveProxyParam(c.MarshalByteSecret(recipientKey.D), c.MarshalByteSecret(forwardeeKey.D)) + + return proxyParam, err +} + +func ProxyTransform(ephemeral, proxyParam []byte) ([]byte, error) { + c := ecc.NewCurve25519() + + parsedEphemeral := c.UnmarshalBytePoint(ephemeral) + if parsedEphemeral == nil { + return nil, pgperrors.InvalidArgumentError("invalid ephemeral") + } + + if len(proxyParam) != curve25519.ParamSize { + return nil, pgperrors.InvalidArgumentError("invalid proxy parameter") + } + + transformed, err := curve25519.ProxyTransform(parsedEphemeral, proxyParam) + if err != nil { + return nil, err + } + + return c.MarshalBytePoint(transformed), nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdsa/ecdsa.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdsa/ecdsa.go new file mode 100644 index 000000000..f94ae1b2f --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdsa/ecdsa.go @@ -0,0 +1,80 @@ +// Package ecdsa implements ECDSA signature, suitable for OpenPGP, +// as specified in RFC 6637, section 5. +package ecdsa + +import ( + "errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" + "io" + "math/big" +) + +type PublicKey struct { + X, Y *big.Int + curve ecc.ECDSACurve +} + +type PrivateKey struct { + PublicKey + D *big.Int +} + +func NewPublicKey(curve ecc.ECDSACurve) *PublicKey { + return &PublicKey{ + curve: curve, + } +} + +func NewPrivateKey(key PublicKey) *PrivateKey { + return &PrivateKey{ + PublicKey: key, + } +} + +func (pk *PublicKey) GetCurve() ecc.ECDSACurve { + return pk.curve +} + +func (pk *PublicKey) MarshalPoint() []byte { + return pk.curve.MarshalIntegerPoint(pk.X, pk.Y) +} + +func (pk *PublicKey) UnmarshalPoint(p []byte) error { + pk.X, pk.Y = pk.curve.UnmarshalIntegerPoint(p) + if pk.X == nil { + return errors.New("ecdsa: failed to parse EC point") + } + return nil +} + +func (sk *PrivateKey) MarshalIntegerSecret() []byte { + return sk.curve.MarshalIntegerSecret(sk.D) +} + +func (sk *PrivateKey) UnmarshalIntegerSecret(d []byte) error { + sk.D = sk.curve.UnmarshalIntegerSecret(d) + + if sk.D == nil { + return errors.New("ecdsa: failed to parse scalar") + } + return nil +} + +func GenerateKey(rand io.Reader, c ecc.ECDSACurve) (priv *PrivateKey, err error) { + priv = new(PrivateKey) + priv.PublicKey.curve = c + priv.PublicKey.X, priv.PublicKey.Y, priv.D, err = c.GenerateECDSA(rand) + return +} + +func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err error) { + return priv.PublicKey.curve.Sign(rand, priv.X, priv.Y, priv.D, hash) +} + +func Verify(pub *PublicKey, hash []byte, r, s *big.Int) bool { + return pub.curve.Verify(pub.X, pub.Y, hash, r, s) +} + +func Validate(priv *PrivateKey) error { + return priv.curve.ValidateECDSA(priv.X, priv.Y, priv.D.Bytes()) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ed25519/ed25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ed25519/ed25519.go new file mode 100644 index 000000000..6abdf7c44 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/ed25519/ed25519.go @@ -0,0 +1,115 @@ +// Package ed25519 implements the ed25519 signature algorithm for OpenPGP +// as defined in the Open PGP crypto refresh. +package ed25519 + +import ( + "crypto/subtle" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + ed25519lib "github.com/cloudflare/circl/sign/ed25519" +) + +const ( + // PublicKeySize is the size, in bytes, of public keys in this package. + PublicKeySize = ed25519lib.PublicKeySize + // SeedSize is the size, in bytes, of private key seeds. + // The private key representation used by RFC 8032. + SeedSize = ed25519lib.SeedSize + // SignatureSize is the size, in bytes, of signatures generated and verified by this package. + SignatureSize = ed25519lib.SignatureSize +) + +type PublicKey struct { + // Point represents the elliptic curve point of the public key. + Point []byte +} + +type PrivateKey struct { + PublicKey + // Key the private key representation by RFC 8032, + // encoded as seed | pub key point. + Key []byte +} + +// NewPublicKey creates a new empty ed25519 public key. +func NewPublicKey() *PublicKey { + return &PublicKey{} +} + +// NewPrivateKey creates a new empty private key referencing the public key. +func NewPrivateKey(key PublicKey) *PrivateKey { + return &PrivateKey{ + PublicKey: key, + } +} + +// Seed returns the ed25519 private key secret seed. +// The private key representation by RFC 8032. +func (pk *PrivateKey) Seed() []byte { + return pk.Key[:SeedSize] +} + +// MarshalByteSecret returns the underlying 32 byte seed of the private key. +func (pk *PrivateKey) MarshalByteSecret() []byte { + return pk.Seed() +} + +// UnmarshalByteSecret computes the private key from the secret seed +// and stores it in the private key object. +func (sk *PrivateKey) UnmarshalByteSecret(seed []byte) error { + sk.Key = ed25519lib.NewKeyFromSeed(seed) + return nil +} + +// GenerateKey generates a fresh private key with the provided randomness source. +func GenerateKey(rand io.Reader) (*PrivateKey, error) { + publicKey, privateKey, err := ed25519lib.GenerateKey(rand) + if err != nil { + return nil, err + } + privateKeyOut := new(PrivateKey) + privateKeyOut.PublicKey.Point = publicKey[:] + privateKeyOut.Key = privateKey[:] + return privateKeyOut, nil +} + +// Sign signs a message with the ed25519 algorithm. +// priv MUST be a valid key! Check this with Validate() before use. +func Sign(priv *PrivateKey, message []byte) ([]byte, error) { + return ed25519lib.Sign(priv.Key, message), nil +} + +// Verify verifies an ed25519 signature. +func Verify(pub *PublicKey, message []byte, signature []byte) bool { + return ed25519lib.Verify(pub.Point, message, signature) +} + +// Validate checks if the ed25519 private key is valid. +func Validate(priv *PrivateKey) error { + expectedPrivateKey := ed25519lib.NewKeyFromSeed(priv.Seed()) + if subtle.ConstantTimeCompare(priv.Key, expectedPrivateKey) == 0 { + return errors.KeyInvalidError("ed25519: invalid ed25519 secret") + } + if subtle.ConstantTimeCompare(priv.PublicKey.Point, expectedPrivateKey[SeedSize:]) == 0 { + return errors.KeyInvalidError("ed25519: invalid ed25519 public key") + } + return nil +} + +// ENCODING/DECODING signature: + +// WriteSignature encodes and writes an ed25519 signature to writer. +func WriteSignature(writer io.Writer, signature []byte) error { + _, err := writer.Write(signature) + return err +} + +// ReadSignature decodes an ed25519 signature from a reader. +func ReadSignature(reader io.Reader) ([]byte, error) { + signature := make([]byte, SignatureSize) + if _, err := io.ReadFull(reader, signature); err != nil { + return nil, err + } + return signature, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ed448/ed448.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ed448/ed448.go new file mode 100644 index 000000000..b11fb4fb1 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/ed448/ed448.go @@ -0,0 +1,119 @@ +// Package ed448 implements the ed448 signature algorithm for OpenPGP +// as defined in the Open PGP crypto refresh. +package ed448 + +import ( + "crypto/subtle" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + ed448lib "github.com/cloudflare/circl/sign/ed448" +) + +const ( + // PublicKeySize is the size, in bytes, of public keys in this package. + PublicKeySize = ed448lib.PublicKeySize + // SeedSize is the size, in bytes, of private key seeds. + // The private key representation used by RFC 8032. + SeedSize = ed448lib.SeedSize + // SignatureSize is the size, in bytes, of signatures generated and verified by this package. + SignatureSize = ed448lib.SignatureSize +) + +type PublicKey struct { + // Point represents the elliptic curve point of the public key. + Point []byte +} + +type PrivateKey struct { + PublicKey + // Key the private key representation by RFC 8032, + // encoded as seed | public key point. + Key []byte +} + +// NewPublicKey creates a new empty ed448 public key. +func NewPublicKey() *PublicKey { + return &PublicKey{} +} + +// NewPrivateKey creates a new empty private key referencing the public key. +func NewPrivateKey(key PublicKey) *PrivateKey { + return &PrivateKey{ + PublicKey: key, + } +} + +// Seed returns the ed448 private key secret seed. +// The private key representation by RFC 8032. +func (pk *PrivateKey) Seed() []byte { + return pk.Key[:SeedSize] +} + +// MarshalByteSecret returns the underlying seed of the private key. +func (pk *PrivateKey) MarshalByteSecret() []byte { + return pk.Seed() +} + +// UnmarshalByteSecret computes the private key from the secret seed +// and stores it in the private key object. +func (sk *PrivateKey) UnmarshalByteSecret(seed []byte) error { + sk.Key = ed448lib.NewKeyFromSeed(seed) + return nil +} + +// GenerateKey generates a fresh private key with the provided randomness source. +func GenerateKey(rand io.Reader) (*PrivateKey, error) { + publicKey, privateKey, err := ed448lib.GenerateKey(rand) + if err != nil { + return nil, err + } + privateKeyOut := new(PrivateKey) + privateKeyOut.PublicKey.Point = publicKey[:] + privateKeyOut.Key = privateKey[:] + return privateKeyOut, nil +} + +// Sign signs a message with the ed448 algorithm. +// priv MUST be a valid key! Check this with Validate() before use. +func Sign(priv *PrivateKey, message []byte) ([]byte, error) { + // Ed448 is used with the empty string as a context string. + // See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-08#section-13.7 + return ed448lib.Sign(priv.Key, message, ""), nil +} + +// Verify verifies a ed448 signature +func Verify(pub *PublicKey, message []byte, signature []byte) bool { + // Ed448 is used with the empty string as a context string. + // See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-08#section-13.7 + return ed448lib.Verify(pub.Point, message, signature, "") +} + +// Validate checks if the ed448 private key is valid +func Validate(priv *PrivateKey) error { + expectedPrivateKey := ed448lib.NewKeyFromSeed(priv.Seed()) + if subtle.ConstantTimeCompare(priv.Key, expectedPrivateKey) == 0 { + return errors.KeyInvalidError("ed448: invalid ed448 secret") + } + if subtle.ConstantTimeCompare(priv.PublicKey.Point, expectedPrivateKey[SeedSize:]) == 0 { + return errors.KeyInvalidError("ed448: invalid ed448 public key") + } + return nil +} + +// ENCODING/DECODING signature: + +// WriteSignature encodes and writes an ed448 signature to writer. +func WriteSignature(writer io.Writer, signature []byte) error { + _, err := writer.Write(signature) + return err +} + +// ReadSignature decodes an ed448 signature from a reader. +func ReadSignature(reader io.Reader) ([]byte, error) { + signature := make([]byte, SignatureSize) + if _, err := io.ReadFull(reader, signature); err != nil { + return nil, err + } + return signature, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/eddsa/eddsa.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/eddsa/eddsa.go new file mode 100644 index 000000000..99ecfc7f1 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/eddsa/eddsa.go @@ -0,0 +1,91 @@ +// Package eddsa implements EdDSA signature, suitable for OpenPGP, as specified in +// https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-13.7 +package eddsa + +import ( + "errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" + "io" +) + +type PublicKey struct { + X []byte + curve ecc.EdDSACurve +} + +type PrivateKey struct { + PublicKey + D []byte +} + +func NewPublicKey(curve ecc.EdDSACurve) *PublicKey { + return &PublicKey{ + curve: curve, + } +} + +func NewPrivateKey(key PublicKey) *PrivateKey { + return &PrivateKey{ + PublicKey: key, + } +} + +func (pk *PublicKey) GetCurve() ecc.EdDSACurve { + return pk.curve +} + +func (pk *PublicKey) MarshalPoint() []byte { + return pk.curve.MarshalBytePoint(pk.X) +} + +func (pk *PublicKey) UnmarshalPoint(x []byte) error { + pk.X = pk.curve.UnmarshalBytePoint(x) + + if pk.X == nil { + return errors.New("eddsa: failed to parse EC point") + } + return nil +} + +func (sk *PrivateKey) MarshalByteSecret() []byte { + return sk.curve.MarshalByteSecret(sk.D) +} + +func (sk *PrivateKey) UnmarshalByteSecret(d []byte) error { + sk.D = sk.curve.UnmarshalByteSecret(d) + + if sk.D == nil { + return errors.New("eddsa: failed to parse scalar") + } + return nil +} + +func GenerateKey(rand io.Reader, c ecc.EdDSACurve) (priv *PrivateKey, err error) { + priv = new(PrivateKey) + priv.PublicKey.curve = c + priv.PublicKey.X, priv.D, err = c.GenerateEdDSA(rand) + return +} + +func Sign(priv *PrivateKey, message []byte) (r, s []byte, err error) { + sig, err := priv.PublicKey.curve.Sign(priv.PublicKey.X, priv.D, message) + if err != nil { + return nil, nil, err + } + + r, s = priv.PublicKey.curve.MarshalSignature(sig) + return +} + +func Verify(pub *PublicKey, message, r, s []byte) bool { + sig := pub.curve.UnmarshalSignature(r, s) + if sig == nil { + return false + } + + return pub.curve.Verify(pub.X, message, sig) +} + +func Validate(priv *PrivateKey) error { + return priv.curve.ValidateEdDSA(priv.PublicKey.X, priv.D) +} diff --git a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go similarity index 88% rename from vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go rename to vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go index 743b35a12..bad277434 100644 --- a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go @@ -10,13 +10,7 @@ // This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it // unsuitable for other protocols. RSA should be used in preference in any // case. -// -// Deprecated: this package was only provided to support ElGamal encryption in -// OpenPGP. The golang.org/x/crypto/openpgp package is now deprecated (see -// https://golang.org/issue/44226), and ElGamal in the OpenPGP ecosystem has -// compatibility and security issues (see https://eprint.iacr.org/2021/923). -// Moreover, this package doesn't protect against side-channel attacks. -package elgamal // import "golang.org/x/crypto/openpgp/elgamal" +package elgamal // import "github.com/ProtonMail/go-crypto/openpgp/elgamal" import ( "crypto/rand" diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go new file mode 100644 index 000000000..c20292c61 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go @@ -0,0 +1,145 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errors contains common error types for the OpenPGP packages. +package errors // import "github.com/ProtonMail/go-crypto/openpgp/errors" + +import ( + "strconv" +) + +// A StructuralError is returned when OpenPGP data is found to be syntactically +// invalid. +type StructuralError string + +func (s StructuralError) Error() string { + return "openpgp: invalid data: " + string(s) +} + +// UnsupportedError indicates that, although the OpenPGP data is valid, it +// makes use of currently unimplemented features. +type UnsupportedError string + +func (s UnsupportedError) Error() string { + return "openpgp: unsupported feature: " + string(s) +} + +// InvalidArgumentError indicates that the caller is in error and passed an +// incorrect value. +type InvalidArgumentError string + +func (i InvalidArgumentError) Error() string { + return "openpgp: invalid argument: " + string(i) +} + +var InvalidForwardeeKeyError = InvalidArgumentError("invalid forwardee key") + +// SignatureError indicates that a syntactically valid signature failed to +// validate. +type SignatureError string + +func (b SignatureError) Error() string { + return "openpgp: invalid signature: " + string(b) +} + +var ErrMDCHashMismatch error = SignatureError("MDC hash mismatch") +var ErrMDCMissing error = SignatureError("MDC packet not found") + +type signatureExpiredError int + +func (se signatureExpiredError) Error() string { + return "openpgp: signature expired" +} + +var ErrSignatureExpired error = signatureExpiredError(0) + +type keyExpiredError int + +func (ke keyExpiredError) Error() string { + return "openpgp: key expired" +} + +var ErrSignatureOlderThanKey error = signatureOlderThanKeyError(0) + +type signatureOlderThanKeyError int + +func (ske signatureOlderThanKeyError) Error() string { + return "openpgp: signature is older than the key" +} + +var ErrKeyExpired error = keyExpiredError(0) + +type keyIncorrectError int + +func (ki keyIncorrectError) Error() string { + return "openpgp: incorrect key" +} + +var ErrKeyIncorrect error = keyIncorrectError(0) + +// KeyInvalidError indicates that the public key parameters are invalid +// as they do not match the private ones +type KeyInvalidError string + +func (e KeyInvalidError) Error() string { + return "openpgp: invalid key: " + string(e) +} + +type unknownIssuerError int + +func (unknownIssuerError) Error() string { + return "openpgp: signature made by unknown entity" +} + +var ErrUnknownIssuer error = unknownIssuerError(0) + +type keyRevokedError int + +func (keyRevokedError) Error() string { + return "openpgp: signature made by revoked key" +} + +var ErrKeyRevoked error = keyRevokedError(0) + +type WeakAlgorithmError string + +func (e WeakAlgorithmError) Error() string { + return "openpgp: weak algorithms are rejected: " + string(e) +} + +type UnknownPacketTypeError uint8 + +func (upte UnknownPacketTypeError) Error() string { + return "openpgp: unknown packet type: " + strconv.Itoa(int(upte)) +} + +type CriticalUnknownPacketTypeError uint8 + +func (upte CriticalUnknownPacketTypeError) Error() string { + return "openpgp: unknown critical packet type: " + strconv.Itoa(int(upte)) +} + +// AEADError indicates that there is a problem when initializing or using a +// AEAD instance, configuration struct, nonces or index values. +type AEADError string + +func (ae AEADError) Error() string { + return "openpgp: aead error: " + string(ae) +} + +// ErrDummyPrivateKey results when operations are attempted on a private key +// that is just a dummy key. See +// https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;h=fe55ae16ab4e26d8356dc574c9e8bc935e71aef1;hb=23191d7851eae2217ecdac6484349849a24fd94a#l1109 +type ErrDummyPrivateKey string + +func (dke ErrDummyPrivateKey) Error() string { + return "openpgp: s2k GNU dummy key: " + string(dke) +} + +// ErrMalformedMessage results when the packet sequence is incorrect +type ErrMalformedMessage string + +func (dke ErrMalformedMessage) Error() string { + return "openpgp: malformed message " + string(dke) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/forwarding.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/forwarding.go new file mode 100644 index 000000000..ae45c3c2b --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/forwarding.go @@ -0,0 +1,163 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + goerrors "errors" + + "github.com/ProtonMail/go-crypto/openpgp/ecdh" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/packet" +) + +// NewForwardingEntity generates a new forwardee key and derives the proxy parameters from the entity e. +// If strict, it will return an error if encryption-capable non-revoked subkeys with a wrong algorithm are found, +// instead of ignoring them +func (e *Entity) NewForwardingEntity( + name, comment, email string, config *packet.Config, strict bool, +) ( + forwardeeKey *Entity, instances []packet.ForwardingInstance, err error, +) { + if e.PrimaryKey.Version != 4 { + return nil, nil, errors.InvalidArgumentError("unsupported key version") + } + + now := config.Now() + i := e.PrimaryIdentity() + if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired + i.SelfSignature.SigExpired(now) || // user ID self-signature has expired + e.Revoked(now) || // primary key has been revoked + i.Revoked(now) { // user ID has been revoked + return nil, nil, errors.InvalidArgumentError("primary key is expired") + } + + // Generate a new Primary key for the forwardee + config.Algorithm = packet.PubKeyAlgoEdDSA + config.Curve = packet.Curve25519 + keyLifetimeSecs := config.KeyLifetime() + + forwardeePrimaryPrivRaw, err := newSigner(config) + if err != nil { + return nil, nil, err + } + + primary := packet.NewSignerPrivateKey(now, forwardeePrimaryPrivRaw) + + forwardeeKey = &Entity{ + PrimaryKey: &primary.PublicKey, + PrivateKey: primary, + Identities: make(map[string]*Identity), + Subkeys: []Subkey{}, + } + + err = forwardeeKey.addUserId(name, comment, email, config, now, keyLifetimeSecs, true) + if err != nil { + return nil, nil, err + } + + // Init empty instances + instances = []packet.ForwardingInstance{} + + // Handle all forwarder subkeys + for _, forwarderSubKey := range e.Subkeys { + // Filter flags + if !forwarderSubKey.PublicKey.PubKeyAlgo.CanEncrypt() { + continue + } + + // Filter expiration & revokal + if forwarderSubKey.PublicKey.KeyExpired(forwarderSubKey.Sig, now) || + forwarderSubKey.Sig.SigExpired(now) || + forwarderSubKey.Revoked(now) { + continue + } + + if forwarderSubKey.PublicKey.PubKeyAlgo != packet.PubKeyAlgoECDH { + if strict { + return nil, nil, errors.InvalidArgumentError("encryption subkey is not algorithm 18 (ECDH)") + } else { + continue + } + } + + forwarderEcdhKey, ok := forwarderSubKey.PrivateKey.PrivateKey.(*ecdh.PrivateKey) + if !ok { + return nil, nil, errors.InvalidArgumentError("malformed key") + } + + err = forwardeeKey.addEncryptionSubkey(config, now, 0) + if err != nil { + return nil, nil, err + } + + forwardeeSubKey := forwardeeKey.Subkeys[len(forwardeeKey.Subkeys)-1] + + forwardeeEcdhKey, ok := forwardeeSubKey.PrivateKey.PrivateKey.(*ecdh.PrivateKey) + if !ok { + return nil, nil, goerrors.New("wrong forwarding sub key generation") + } + + instance := packet.ForwardingInstance{ + KeyVersion: 4, + ForwarderFingerprint: forwarderSubKey.PublicKey.Fingerprint, + } + + instance.ProxyParameter, err = ecdh.DeriveProxyParam(forwarderEcdhKey, forwardeeEcdhKey) + if err != nil { + return nil, nil, err + } + + kdf := ecdh.KDF{ + Version: ecdh.KDFVersionForwarding, + Hash: forwarderEcdhKey.KDF.Hash, + Cipher: forwarderEcdhKey.KDF.Cipher, + } + + // If deriving a forwarding key from a forwarding key + if forwarderSubKey.Sig.FlagForward { + if forwarderEcdhKey.KDF.Version != ecdh.KDFVersionForwarding { + return nil, nil, goerrors.New("malformed forwarder key") + } + kdf.ReplacementFingerprint = forwarderEcdhKey.KDF.ReplacementFingerprint + } else { + kdf.ReplacementFingerprint = forwarderSubKey.PublicKey.Fingerprint + } + + err = forwardeeSubKey.PublicKey.ReplaceKDF(kdf) + if err != nil { + return nil, nil, err + } + + // Extract fingerprint after changing the KDF + instance.ForwardeeFingerprint = forwardeeSubKey.PublicKey.Fingerprint + + // 0x04 - This key may be used to encrypt communications. + forwardeeSubKey.Sig.FlagEncryptCommunications = false + + // 0x08 - This key may be used to encrypt storage. + forwardeeSubKey.Sig.FlagEncryptStorage = false + + // 0x10 - The private component of this key may have been split by a secret-sharing mechanism. + forwardeeSubKey.Sig.FlagSplitKey = true + + // 0x40 - This key may be used for forwarded communications. + forwardeeSubKey.Sig.FlagForward = true + + // Re-sign subkey binding signature + err = forwardeeSubKey.Sig.SignKey(forwardeeSubKey.PublicKey, forwardeeKey.PrivateKey, config) + if err != nil { + return nil, nil, err + } + + // Append each valid instance to the list + instances = append(instances, instance) + } + + if len(instances) == 0 { + return nil, nil, errors.InvalidArgumentError("no valid subkey found") + } + + return forwardeeKey, instances, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/hash.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/hash.go new file mode 100644 index 000000000..526bd7777 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/hash.go @@ -0,0 +1,24 @@ +package openpgp + +import ( + "crypto" + + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" +) + +// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP +// hash id. +func HashIdToHash(id byte) (h crypto.Hash, ok bool) { + return algorithm.HashIdToHash(id) +} + +// HashIdToString returns the name of the hash function corresponding to the +// given OpenPGP hash id. +func HashIdToString(id byte) (name string, ok bool) { + return algorithm.HashIdToString(id) +} + +// HashToHashId returns an OpenPGP hash id which corresponds the given Hash. +func HashToHashId(h crypto.Hash) (id byte, ok bool) { + return algorithm.HashToHashId(h) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go new file mode 100644 index 000000000..d06706518 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go @@ -0,0 +1,65 @@ +// Copyright (C) 2019 ProtonTech AG + +package algorithm + +import ( + "crypto/cipher" + "github.com/ProtonMail/go-crypto/eax" + "github.com/ProtonMail/go-crypto/ocb" +) + +// AEADMode defines the Authenticated Encryption with Associated Data mode of +// operation. +type AEADMode uint8 + +// Supported modes of operation (see RFC4880bis [EAX] and RFC7253) +const ( + AEADModeEAX = AEADMode(1) + AEADModeOCB = AEADMode(2) + AEADModeGCM = AEADMode(3) +) + +// TagLength returns the length in bytes of authentication tags. +func (mode AEADMode) TagLength() int { + switch mode { + case AEADModeEAX: + return 16 + case AEADModeOCB: + return 16 + case AEADModeGCM: + return 16 + default: + return 0 + } +} + +// NonceLength returns the length in bytes of nonces. +func (mode AEADMode) NonceLength() int { + switch mode { + case AEADModeEAX: + return 16 + case AEADModeOCB: + return 15 + case AEADModeGCM: + return 12 + default: + return 0 + } +} + +// New returns a fresh instance of the given mode +func (mode AEADMode) New(block cipher.Block) (alg cipher.AEAD) { + var err error + switch mode { + case AEADModeEAX: + alg, err = eax.NewEAX(block) + case AEADModeOCB: + alg, err = ocb.NewOCB(block) + case AEADModeGCM: + alg, err = cipher.NewGCM(block) + } + if err != nil { + panic(err.Error()) + } + return alg +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go new file mode 100644 index 000000000..c76a75bcd --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go @@ -0,0 +1,97 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package algorithm + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/des" + + "golang.org/x/crypto/cast5" +) + +// Cipher is an official symmetric key cipher algorithm. See RFC 4880, +// section 9.2. +type Cipher interface { + // Id returns the algorithm ID, as a byte, of the cipher. + Id() uint8 + // KeySize returns the key size, in bytes, of the cipher. + KeySize() int + // BlockSize returns the block size, in bytes, of the cipher. + BlockSize() int + // New returns a fresh instance of the given cipher. + New(key []byte) cipher.Block +} + +// The following constants mirror the OpenPGP standard (RFC 4880). +const ( + TripleDES = CipherFunction(2) + CAST5 = CipherFunction(3) + AES128 = CipherFunction(7) + AES192 = CipherFunction(8) + AES256 = CipherFunction(9) +) + +// CipherById represents the different block ciphers specified for OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 +var CipherById = map[uint8]Cipher{ + TripleDES.Id(): TripleDES, + CAST5.Id(): CAST5, + AES128.Id(): AES128, + AES192.Id(): AES192, + AES256.Id(): AES256, +} + +type CipherFunction uint8 + +// ID returns the algorithm Id, as a byte, of cipher. +func (sk CipherFunction) Id() uint8 { + return uint8(sk) +} + +// KeySize returns the key size, in bytes, of cipher. +func (cipher CipherFunction) KeySize() int { + switch cipher { + case CAST5: + return cast5.KeySize + case AES128: + return 16 + case AES192, TripleDES: + return 24 + case AES256: + return 32 + } + return 0 +} + +// BlockSize returns the block size, in bytes, of cipher. +func (cipher CipherFunction) BlockSize() int { + switch cipher { + case TripleDES: + return des.BlockSize + case CAST5: + return 8 + case AES128, AES192, AES256: + return 16 + } + return 0 +} + +// New returns a fresh instance of the given cipher. +func (cipher CipherFunction) New(key []byte) (block cipher.Block) { + var err error + switch cipher { + case TripleDES: + block, err = des.NewTripleDESCipher(key) + case CAST5: + block, err = cast5.NewCipher(key) + case AES128, AES192, AES256: + block, err = aes.NewCipher(key) + } + if err != nil { + panic(err.Error()) + } + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go new file mode 100644 index 000000000..d1a00fc74 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go @@ -0,0 +1,143 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package algorithm + +import ( + "crypto" + "fmt" + "hash" +) + +// Hash is an official hash function algorithm. See RFC 4880, section 9.4. +type Hash interface { + // Id returns the algorithm ID, as a byte, of Hash. + Id() uint8 + // Available reports whether the given hash function is linked into the binary. + Available() bool + // HashFunc simply returns the value of h so that Hash implements SignerOpts. + HashFunc() crypto.Hash + // New returns a new hash.Hash calculating the given hash function. New + // panics if the hash function is not linked into the binary. + New() hash.Hash + // Size returns the length, in bytes, of a digest resulting from the given + // hash function. It doesn't require that the hash function in question be + // linked into the program. + Size() int + // String is the name of the hash function corresponding to the given + // OpenPGP hash id. + String() string +} + +// The following vars mirror the crypto/Hash supported hash functions. +var ( + SHA1 Hash = cryptoHash{2, crypto.SHA1} + SHA256 Hash = cryptoHash{8, crypto.SHA256} + SHA384 Hash = cryptoHash{9, crypto.SHA384} + SHA512 Hash = cryptoHash{10, crypto.SHA512} + SHA224 Hash = cryptoHash{11, crypto.SHA224} + SHA3_256 Hash = cryptoHash{12, crypto.SHA3_256} + SHA3_512 Hash = cryptoHash{14, crypto.SHA3_512} +) + +// HashById represents the different hash functions specified for OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-14 +var ( + HashById = map[uint8]Hash{ + SHA256.Id(): SHA256, + SHA384.Id(): SHA384, + SHA512.Id(): SHA512, + SHA224.Id(): SHA224, + SHA3_256.Id(): SHA3_256, + SHA3_512.Id(): SHA3_512, + } +) + +// cryptoHash contains pairs relating OpenPGP's hash identifier with +// Go's crypto.Hash type. See RFC 4880, section 9.4. +type cryptoHash struct { + id uint8 + crypto.Hash +} + +// Id returns the algorithm ID, as a byte, of cryptoHash. +func (h cryptoHash) Id() uint8 { + return h.id +} + +var hashNames = map[uint8]string{ + SHA256.Id(): "SHA256", + SHA384.Id(): "SHA384", + SHA512.Id(): "SHA512", + SHA224.Id(): "SHA224", + SHA3_256.Id(): "SHA3-256", + SHA3_512.Id(): "SHA3-512", +} + +func (h cryptoHash) String() string { + s, ok := hashNames[h.id] + if !ok { + panic(fmt.Sprintf("Unsupported hash function %d", h.id)) + } + return s +} + +// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP +// hash id. +func HashIdToHash(id byte) (h crypto.Hash, ok bool) { + if hash, ok := HashById[id]; ok { + return hash.HashFunc(), true + } + return 0, false +} + +// HashIdToHashWithSha1 returns a crypto.Hash which corresponds to the given OpenPGP +// hash id, allowing sha1. +func HashIdToHashWithSha1(id byte) (h crypto.Hash, ok bool) { + if hash, ok := HashById[id]; ok { + return hash.HashFunc(), true + } + + if id == SHA1.Id() { + return SHA1.HashFunc(), true + } + + return 0, false +} + +// HashIdToString returns the name of the hash function corresponding to the +// given OpenPGP hash id. +func HashIdToString(id byte) (name string, ok bool) { + if hash, ok := HashById[id]; ok { + return hash.String(), true + } + return "", false +} + +// HashToHashId returns an OpenPGP hash id which corresponds the given Hash. +func HashToHashId(h crypto.Hash) (id byte, ok bool) { + for id, hash := range HashById { + if hash.HashFunc() == h { + return id, true + } + } + + return 0, false +} + +// HashToHashIdWithSha1 returns an OpenPGP hash id which corresponds the given Hash, +// allowing instances of SHA1 +func HashToHashIdWithSha1(h crypto.Hash) (id byte, ok bool) { + for id, hash := range HashById { + if hash.HashFunc() == h { + return id, true + } + } + + if h == SHA1.HashFunc() { + return SHA1.Id(), true + } + + return 0, false +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519.go new file mode 100644 index 000000000..a6721ff98 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519.go @@ -0,0 +1,170 @@ +// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. +package ecc + +import ( + "crypto/subtle" + "github.com/ProtonMail/go-crypto/openpgp/errors" + x25519lib "github.com/cloudflare/circl/dh/x25519" + "io" +) + +type curve25519 struct{} + +func NewCurve25519() *curve25519 { + return &curve25519{} +} + +func (c *curve25519) GetCurveName() string { + return "curve25519" +} + +// MarshalBytePoint encodes the public point from native format, adding the prefix. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6 +func (c *curve25519) MarshalBytePoint(point []byte) []byte { + return append([]byte{0x40}, point...) +} + +// UnmarshalBytePoint decodes the public point to native format, removing the prefix. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6 +func (c *curve25519) UnmarshalBytePoint(point []byte) []byte { + if len(point) != x25519lib.Size+1 { + return nil + } + + // Remove prefix + return point[1:] +} + +// MarshalByteSecret encodes the secret scalar from native format. +// Note that the EC secret scalar differs from the definition of public keys in +// [Curve25519] in two ways: (1) the byte-ordering is big-endian, which is +// more uniform with how big integers are represented in OpenPGP, and (2) the +// leading zeros are truncated. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.1 +// Note that leading zero bytes are stripped later when encoding as an MPI. +func (c *curve25519) MarshalByteSecret(secret []byte) []byte { + d := make([]byte, x25519lib.Size) + copyReversed(d, secret) + + // The following ensures that the private key is a number of the form + // 2^{254} + 8 * [0, 2^{251}), in order to avoid the small subgroup of + // the curve. + // + // This masking is done internally in the underlying lib and so is unnecessary + // for security, but OpenPGP implementations require that private keys be + // pre-masked. + d[0] &= 127 + d[0] |= 64 + d[31] &= 248 + + return d +} + +// UnmarshalByteSecret decodes the secret scalar from native format. +// Note that the EC secret scalar differs from the definition of public keys in +// [Curve25519] in two ways: (1) the byte-ordering is big-endian, which is +// more uniform with how big integers are represented in OpenPGP, and (2) the +// leading zeros are truncated. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.1 +func (c *curve25519) UnmarshalByteSecret(d []byte) []byte { + if len(d) > x25519lib.Size { + return nil + } + + // Ensure truncated leading bytes are re-added + secret := make([]byte, x25519lib.Size) + copyReversed(secret, d) + + return secret +} + +// generateKeyPairBytes Generates a private-public key-pair. +// 'priv' is a private key; a little-endian scalar belonging to the set +// 2^{254} + 8 * [0, 2^{251}), in order to avoid the small subgroup of the +// curve. 'pub' is simply 'priv' * G where G is the base point. +// See https://cr.yp.to/ecdh.html and RFC7748, sec 5. +func (c *curve25519) generateKeyPairBytes(rand io.Reader) (priv, pub x25519lib.Key, err error) { + _, err = io.ReadFull(rand, priv[:]) + if err != nil { + return + } + + x25519lib.KeyGen(&pub, &priv) + return +} + +func (c *curve25519) GenerateECDH(rand io.Reader) (point []byte, secret []byte, err error) { + priv, pub, err := c.generateKeyPairBytes(rand) + if err != nil { + return + } + + return pub[:], priv[:], nil +} + +func (c *genericCurve) MaskSecret(secret []byte) []byte { + return secret +} + +func (c *curve25519) Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error) { + // RFC6637 §8: "Generate an ephemeral key pair {v, V=vG}" + // ephemeralPrivate corresponds to `v`. + // ephemeralPublic corresponds to `V`. + ephemeralPrivate, ephemeralPublic, err := c.generateKeyPairBytes(rand) + if err != nil { + return nil, nil, err + } + + // RFC6637 §8: "Obtain the authenticated recipient public key R" + // pubKey corresponds to `R`. + var pubKey x25519lib.Key + copy(pubKey[:], point) + + // RFC6637 §8: "Compute the shared point S = vR" + // "VB = convert point V to the octet string" + // sharedPoint corresponds to `VB`. + var sharedPoint x25519lib.Key + x25519lib.Shared(&sharedPoint, &ephemeralPrivate, &pubKey) + + return ephemeralPublic[:], sharedPoint[:], nil +} + +func (c *curve25519) Decaps(vsG, secret []byte) (sharedSecret []byte, err error) { + var ephemeralPublic, decodedPrivate, sharedPoint x25519lib.Key + // RFC6637 §8: "The decryption is the inverse of the method given." + // All quoted descriptions in comments below describe encryption, and + // the reverse is performed. + // vsG corresponds to `VB` in RFC6637 §8 . + + // RFC6637 §8: "VB = convert point V to the octet string" + copy(ephemeralPublic[:], vsG) + + // decodedPrivate corresponds to `r` in RFC6637 §8 . + copy(decodedPrivate[:], secret) + + // RFC6637 §8: "Note that the recipient obtains the shared secret by calculating + // S = rV = rvG, where (r,R) is the recipient's key pair." + // sharedPoint corresponds to `S`. + x25519lib.Shared(&sharedPoint, &decodedPrivate, &ephemeralPublic) + + return sharedPoint[:], nil +} + +func (c *curve25519) ValidateECDH(point []byte, secret []byte) (err error) { + var pk, sk x25519lib.Key + copy(sk[:], secret) + x25519lib.KeyGen(&pk, &sk) + + if subtle.ConstantTimeCompare(point, pk[:]) == 0 { + return errors.KeyInvalidError("ecc: invalid curve25519 public point") + } + + return nil +} + +func copyReversed(out []byte, in []byte) { + l := len(in) + for i := 0; i < l; i++ { + out[i] = in[l-i-1] + } +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/curve25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/curve25519.go new file mode 100644 index 000000000..21670a82c --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/curve25519.go @@ -0,0 +1,122 @@ +// Package curve25519 implements custom field operations without clamping for forwarding. +package curve25519 + +import ( + "crypto/subtle" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field" + x25519lib "github.com/cloudflare/circl/dh/x25519" + "math/big" +) + +var curveGroupByte = [x25519lib.Size]byte{ + 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x14, 0xde, 0xf9, 0xde, 0xa2, 0xf7, 0x9c, 0xd6, 0x58, 0x12, 0x63, 0x1a, 0x5c, 0xf5, 0xd3, 0xed, +} + +const ParamSize = x25519lib.Size + +func DeriveProxyParam(recipientSecretByte, forwardeeSecretByte []byte) (proxyParam []byte, err error) { + curveGroup := new(big.Int).SetBytes(curveGroupByte[:]) + recipientSecret := new(big.Int).SetBytes(recipientSecretByte) + forwardeeSecret := new(big.Int).SetBytes(forwardeeSecretByte) + + proxyTransform := new(big.Int).Mod( + new(big.Int).Mul( + new(big.Int).ModInverse(forwardeeSecret, curveGroup), + recipientSecret, + ), + curveGroup, + ) + + rawProxyParam := proxyTransform.Bytes() + + // pad and convert to small endian + proxyParam = make([]byte, x25519lib.Size) + l := len(rawProxyParam) + for i := 0; i < l; i++ { + proxyParam[i] = rawProxyParam[l-i-1] + } + + return proxyParam, nil +} + +func ProxyTransform(ephemeral, proxyParam []byte) ([]byte, error) { + var transformed, safetyCheck [x25519lib.Size]byte + + var scalarEight = make([]byte, x25519lib.Size) + scalarEight[0] = 0x08 + err := ScalarMult(&safetyCheck, scalarEight, ephemeral) + if err != nil { + return nil, err + } + + err = ScalarMult(&transformed, proxyParam, ephemeral) + if err != nil { + return nil, err + } + + return transformed[:], nil +} + +func ScalarMult(dst *[32]byte, scalar, point []byte) error { + var in, base, zero [32]byte + copy(in[:], scalar) + copy(base[:], point) + + scalarMult(dst, &in, &base) + if subtle.ConstantTimeCompare(dst[:], zero[:]) == 1 { + return errors.InvalidArgumentError("invalid ephemeral: low order point") + } + + return nil +} + +func scalarMult(dst, scalar, point *[32]byte) { + var e [32]byte + + copy(e[:], scalar[:]) + + var x1, x2, z2, x3, z3, tmp0, tmp1 field.Element + x1.SetBytes(point[:]) + x2.One() + x3.Set(&x1) + z3.One() + + swap := 0 + for pos := 254; pos >= 0; pos-- { + b := e[pos/8] >> uint(pos&7) + b &= 1 + swap ^= int(b) + x2.Swap(&x3, swap) + z2.Swap(&z3, swap) + swap = int(b) + + tmp0.Subtract(&x3, &z3) + tmp1.Subtract(&x2, &z2) + x2.Add(&x2, &z2) + z2.Add(&x3, &z3) + z3.Multiply(&tmp0, &x2) + z2.Multiply(&z2, &tmp1) + tmp0.Square(&tmp1) + tmp1.Square(&x2) + x3.Add(&z3, &z2) + z2.Subtract(&z3, &z2) + x2.Multiply(&tmp1, &tmp0) + tmp1.Subtract(&tmp1, &tmp0) + z2.Square(&z2) + + z3.Mult32(&tmp1, 121666) + x3.Square(&x3) + tmp0.Add(&tmp0, &z3) + z3.Multiply(&x1, &z2) + z2.Multiply(&tmp1, &tmp0) + } + + x2.Swap(&x3, swap) + z2.Swap(&z3, swap) + + z2.Invert(&z2) + x2.Multiply(&x2, &z2) + copy(dst[:], x2.Bytes()) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe.go new file mode 100644 index 000000000..ca841ad99 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe.go @@ -0,0 +1,416 @@ +// Copyright (c) 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package field implements fast arithmetic modulo 2^255-19. +package field + +import ( + "crypto/subtle" + "encoding/binary" + "math/bits" +) + +// Element represents an element of the field GF(2^255-19). Note that this +// is not a cryptographically secure group, and should only be used to interact +// with edwards25519.Point coordinates. +// +// This type works similarly to math/big.Int, and all arguments and receivers +// are allowed to alias. +// +// The zero value is a valid zero element. +type Element struct { + // An element t represents the integer + // t.l0 + t.l1*2^51 + t.l2*2^102 + t.l3*2^153 + t.l4*2^204 + // + // Between operations, all limbs are expected to be lower than 2^52. + l0 uint64 + l1 uint64 + l2 uint64 + l3 uint64 + l4 uint64 +} + +const maskLow51Bits uint64 = (1 << 51) - 1 + +var feZero = &Element{0, 0, 0, 0, 0} + +// Zero sets v = 0, and returns v. +func (v *Element) Zero() *Element { + *v = *feZero + return v +} + +var feOne = &Element{1, 0, 0, 0, 0} + +// One sets v = 1, and returns v. +func (v *Element) One() *Element { + *v = *feOne + return v +} + +// reduce reduces v modulo 2^255 - 19 and returns it. +func (v *Element) reduce() *Element { + v.carryPropagate() + + // After the light reduction we now have a field element representation + // v < 2^255 + 2^13 * 19, but need v < 2^255 - 19. + + // If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1, + // generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise. + c := (v.l0 + 19) >> 51 + c = (v.l1 + c) >> 51 + c = (v.l2 + c) >> 51 + c = (v.l3 + c) >> 51 + c = (v.l4 + c) >> 51 + + // If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's + // effectively applying the reduction identity to the carry. + v.l0 += 19 * c + + v.l1 += v.l0 >> 51 + v.l0 = v.l0 & maskLow51Bits + v.l2 += v.l1 >> 51 + v.l1 = v.l1 & maskLow51Bits + v.l3 += v.l2 >> 51 + v.l2 = v.l2 & maskLow51Bits + v.l4 += v.l3 >> 51 + v.l3 = v.l3 & maskLow51Bits + // no additional carry + v.l4 = v.l4 & maskLow51Bits + + return v +} + +// Add sets v = a + b, and returns v. +func (v *Element) Add(a, b *Element) *Element { + v.l0 = a.l0 + b.l0 + v.l1 = a.l1 + b.l1 + v.l2 = a.l2 + b.l2 + v.l3 = a.l3 + b.l3 + v.l4 = a.l4 + b.l4 + // Using the generic implementation here is actually faster than the + // assembly. Probably because the body of this function is so simple that + // the compiler can figure out better optimizations by inlining the carry + // propagation. TODO + return v.carryPropagateGeneric() +} + +// Subtract sets v = a - b, and returns v. +func (v *Element) Subtract(a, b *Element) *Element { + // We first add 2 * p, to guarantee the subtraction won't underflow, and + // then subtract b (which can be up to 2^255 + 2^13 * 19). + v.l0 = (a.l0 + 0xFFFFFFFFFFFDA) - b.l0 + v.l1 = (a.l1 + 0xFFFFFFFFFFFFE) - b.l1 + v.l2 = (a.l2 + 0xFFFFFFFFFFFFE) - b.l2 + v.l3 = (a.l3 + 0xFFFFFFFFFFFFE) - b.l3 + v.l4 = (a.l4 + 0xFFFFFFFFFFFFE) - b.l4 + return v.carryPropagate() +} + +// Negate sets v = -a, and returns v. +func (v *Element) Negate(a *Element) *Element { + return v.Subtract(feZero, a) +} + +// Invert sets v = 1/z mod p, and returns v. +// +// If z == 0, Invert returns v = 0. +func (v *Element) Invert(z *Element) *Element { + // Inversion is implemented as exponentiation with exponent p − 2. It uses the + // same sequence of 255 squarings and 11 multiplications as [Curve25519]. + var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t Element + + z2.Square(z) // 2 + t.Square(&z2) // 4 + t.Square(&t) // 8 + z9.Multiply(&t, z) // 9 + z11.Multiply(&z9, &z2) // 11 + t.Square(&z11) // 22 + z2_5_0.Multiply(&t, &z9) // 31 = 2^5 - 2^0 + + t.Square(&z2_5_0) // 2^6 - 2^1 + for i := 0; i < 4; i++ { + t.Square(&t) // 2^10 - 2^5 + } + z2_10_0.Multiply(&t, &z2_5_0) // 2^10 - 2^0 + + t.Square(&z2_10_0) // 2^11 - 2^1 + for i := 0; i < 9; i++ { + t.Square(&t) // 2^20 - 2^10 + } + z2_20_0.Multiply(&t, &z2_10_0) // 2^20 - 2^0 + + t.Square(&z2_20_0) // 2^21 - 2^1 + for i := 0; i < 19; i++ { + t.Square(&t) // 2^40 - 2^20 + } + t.Multiply(&t, &z2_20_0) // 2^40 - 2^0 + + t.Square(&t) // 2^41 - 2^1 + for i := 0; i < 9; i++ { + t.Square(&t) // 2^50 - 2^10 + } + z2_50_0.Multiply(&t, &z2_10_0) // 2^50 - 2^0 + + t.Square(&z2_50_0) // 2^51 - 2^1 + for i := 0; i < 49; i++ { + t.Square(&t) // 2^100 - 2^50 + } + z2_100_0.Multiply(&t, &z2_50_0) // 2^100 - 2^0 + + t.Square(&z2_100_0) // 2^101 - 2^1 + for i := 0; i < 99; i++ { + t.Square(&t) // 2^200 - 2^100 + } + t.Multiply(&t, &z2_100_0) // 2^200 - 2^0 + + t.Square(&t) // 2^201 - 2^1 + for i := 0; i < 49; i++ { + t.Square(&t) // 2^250 - 2^50 + } + t.Multiply(&t, &z2_50_0) // 2^250 - 2^0 + + t.Square(&t) // 2^251 - 2^1 + t.Square(&t) // 2^252 - 2^2 + t.Square(&t) // 2^253 - 2^3 + t.Square(&t) // 2^254 - 2^4 + t.Square(&t) // 2^255 - 2^5 + + return v.Multiply(&t, &z11) // 2^255 - 21 +} + +// Set sets v = a, and returns v. +func (v *Element) Set(a *Element) *Element { + *v = *a + return v +} + +// SetBytes sets v to x, which must be a 32-byte little-endian encoding. +// +// Consistent with RFC 7748, the most significant bit (the high bit of the +// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1) +// are accepted. Note that this is laxer than specified by RFC 8032. +func (v *Element) SetBytes(x []byte) *Element { + if len(x) != 32 { + panic("edwards25519: invalid field element input size") + } + + // Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51). + v.l0 = binary.LittleEndian.Uint64(x[0:8]) + v.l0 &= maskLow51Bits + // Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51). + v.l1 = binary.LittleEndian.Uint64(x[6:14]) >> 3 + v.l1 &= maskLow51Bits + // Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51). + v.l2 = binary.LittleEndian.Uint64(x[12:20]) >> 6 + v.l2 &= maskLow51Bits + // Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51). + v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1 + v.l3 &= maskLow51Bits + // Bits 204:251 (bytes 24:32, bits 192:256, shift 12, mask 51). + // Note: not bytes 25:33, shift 4, to avoid overread. + v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12 + v.l4 &= maskLow51Bits + + return v +} + +// Bytes returns the canonical 32-byte little-endian encoding of v. +func (v *Element) Bytes() []byte { + // This function is outlined to make the allocations inline in the caller + // rather than happen on the heap. + var out [32]byte + return v.bytes(&out) +} + +func (v *Element) bytes(out *[32]byte) []byte { + t := *v + t.reduce() + + var buf [8]byte + for i, l := range [5]uint64{t.l0, t.l1, t.l2, t.l3, t.l4} { + bitsOffset := i * 51 + binary.LittleEndian.PutUint64(buf[:], l<= len(out) { + break + } + out[off] |= bb + } + } + + return out[:] +} + +// Equal returns 1 if v and u are equal, and 0 otherwise. +func (v *Element) Equal(u *Element) int { + sa, sv := u.Bytes(), v.Bytes() + return subtle.ConstantTimeCompare(sa, sv) +} + +// mask64Bits returns 0xffffffff if cond is 1, and 0 otherwise. +func mask64Bits(cond int) uint64 { return ^(uint64(cond) - 1) } + +// Select sets v to a if cond == 1, and to b if cond == 0. +func (v *Element) Select(a, b *Element, cond int) *Element { + m := mask64Bits(cond) + v.l0 = (m & a.l0) | (^m & b.l0) + v.l1 = (m & a.l1) | (^m & b.l1) + v.l2 = (m & a.l2) | (^m & b.l2) + v.l3 = (m & a.l3) | (^m & b.l3) + v.l4 = (m & a.l4) | (^m & b.l4) + return v +} + +// Swap swaps v and u if cond == 1 or leaves them unchanged if cond == 0, and returns v. +func (v *Element) Swap(u *Element, cond int) { + m := mask64Bits(cond) + t := m & (v.l0 ^ u.l0) + v.l0 ^= t + u.l0 ^= t + t = m & (v.l1 ^ u.l1) + v.l1 ^= t + u.l1 ^= t + t = m & (v.l2 ^ u.l2) + v.l2 ^= t + u.l2 ^= t + t = m & (v.l3 ^ u.l3) + v.l3 ^= t + u.l3 ^= t + t = m & (v.l4 ^ u.l4) + v.l4 ^= t + u.l4 ^= t +} + +// IsNegative returns 1 if v is negative, and 0 otherwise. +func (v *Element) IsNegative() int { + return int(v.Bytes()[0] & 1) +} + +// Absolute sets v to |u|, and returns v. +func (v *Element) Absolute(u *Element) *Element { + return v.Select(new(Element).Negate(u), u, u.IsNegative()) +} + +// Multiply sets v = x * y, and returns v. +func (v *Element) Multiply(x, y *Element) *Element { + feMul(v, x, y) + return v +} + +// Square sets v = x * x, and returns v. +func (v *Element) Square(x *Element) *Element { + feSquare(v, x) + return v +} + +// Mult32 sets v = x * y, and returns v. +func (v *Element) Mult32(x *Element, y uint32) *Element { + x0lo, x0hi := mul51(x.l0, y) + x1lo, x1hi := mul51(x.l1, y) + x2lo, x2hi := mul51(x.l2, y) + x3lo, x3hi := mul51(x.l3, y) + x4lo, x4hi := mul51(x.l4, y) + v.l0 = x0lo + 19*x4hi // carried over per the reduction identity + v.l1 = x1lo + x0hi + v.l2 = x2lo + x1hi + v.l3 = x3lo + x2hi + v.l4 = x4lo + x3hi + // The hi portions are going to be only 32 bits, plus any previous excess, + // so we can skip the carry propagation. + return v +} + +// mul51 returns lo + hi * 2⁵¹ = a * b. +func mul51(a uint64, b uint32) (lo uint64, hi uint64) { + mh, ml := bits.Mul64(a, uint64(b)) + lo = ml & maskLow51Bits + hi = (mh << 13) | (ml >> 51) + return +} + +// Pow22523 set v = x^((p-5)/8), and returns v. (p-5)/8 is 2^252-3. +func (v *Element) Pow22523(x *Element) *Element { + var t0, t1, t2 Element + + t0.Square(x) // x^2 + t1.Square(&t0) // x^4 + t1.Square(&t1) // x^8 + t1.Multiply(x, &t1) // x^9 + t0.Multiply(&t0, &t1) // x^11 + t0.Square(&t0) // x^22 + t0.Multiply(&t1, &t0) // x^31 + t1.Square(&t0) // x^62 + for i := 1; i < 5; i++ { // x^992 + t1.Square(&t1) + } + t0.Multiply(&t1, &t0) // x^1023 -> 1023 = 2^10 - 1 + t1.Square(&t0) // 2^11 - 2 + for i := 1; i < 10; i++ { // 2^20 - 2^10 + t1.Square(&t1) + } + t1.Multiply(&t1, &t0) // 2^20 - 1 + t2.Square(&t1) // 2^21 - 2 + for i := 1; i < 20; i++ { // 2^40 - 2^20 + t2.Square(&t2) + } + t1.Multiply(&t2, &t1) // 2^40 - 1 + t1.Square(&t1) // 2^41 - 2 + for i := 1; i < 10; i++ { // 2^50 - 2^10 + t1.Square(&t1) + } + t0.Multiply(&t1, &t0) // 2^50 - 1 + t1.Square(&t0) // 2^51 - 2 + for i := 1; i < 50; i++ { // 2^100 - 2^50 + t1.Square(&t1) + } + t1.Multiply(&t1, &t0) // 2^100 - 1 + t2.Square(&t1) // 2^101 - 2 + for i := 1; i < 100; i++ { // 2^200 - 2^100 + t2.Square(&t2) + } + t1.Multiply(&t2, &t1) // 2^200 - 1 + t1.Square(&t1) // 2^201 - 2 + for i := 1; i < 50; i++ { // 2^250 - 2^50 + t1.Square(&t1) + } + t0.Multiply(&t1, &t0) // 2^250 - 1 + t0.Square(&t0) // 2^251 - 2 + t0.Square(&t0) // 2^252 - 4 + return v.Multiply(&t0, x) // 2^252 - 3 -> x^(2^252-3) +} + +// sqrtM1 is 2^((p-1)/4), which squared is equal to -1 by Euler's Criterion. +var sqrtM1 = &Element{1718705420411056, 234908883556509, + 2233514472574048, 2117202627021982, 765476049583133} + +// SqrtRatio sets r to the non-negative square root of the ratio of u and v. +// +// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio +// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00, +// and returns r and 0. +func (r *Element) SqrtRatio(u, v *Element) (rr *Element, wasSquare int) { + var a, b Element + + // r = (u * v3) * (u * v7)^((p-5)/8) + v2 := a.Square(v) + uv3 := b.Multiply(u, b.Multiply(v2, v)) + uv7 := a.Multiply(uv3, a.Square(v2)) + r.Multiply(uv3, r.Pow22523(uv7)) + + check := a.Multiply(v, a.Square(r)) // check = v * r^2 + + uNeg := b.Negate(u) + correctSignSqrt := check.Equal(u) + flippedSignSqrt := check.Equal(uNeg) + flippedSignSqrtI := check.Equal(uNeg.Multiply(uNeg, sqrtM1)) + + rPrime := b.Multiply(r, sqrtM1) // r_prime = SQRT_M1 * r + // r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r) + r.Select(rPrime, r, flippedSignSqrt|flippedSignSqrtI) + + r.Absolute(r) // Choose the nonnegative square root. + return r, correctSignSqrt | flippedSignSqrt +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_amd64.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_amd64.go new file mode 100644 index 000000000..44dc8e8ca --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_amd64.go @@ -0,0 +1,13 @@ +// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. + +// +build amd64,gc,!purego + +package field + +// feMul sets out = a * b. It works like feMulGeneric. +//go:noescape +func feMul(out *Element, a *Element, b *Element) + +// feSquare sets out = a * a. It works like feSquareGeneric. +//go:noescape +func feSquare(out *Element, a *Element) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_amd64.s b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_amd64.s new file mode 100644 index 000000000..293f013c9 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_amd64.s @@ -0,0 +1,379 @@ +// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. + +//go:build amd64 && gc && !purego +// +build amd64,gc,!purego + +#include "textflag.h" + +// func feMul(out *Element, a *Element, b *Element) +TEXT ·feMul(SB), NOSPLIT, $0-24 + MOVQ a+8(FP), CX + MOVQ b+16(FP), BX + + // r0 = a0×b0 + MOVQ (CX), AX + MULQ (BX) + MOVQ AX, DI + MOVQ DX, SI + + // r0 += 19×a1×b4 + MOVQ 8(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 32(BX) + ADDQ AX, DI + ADCQ DX, SI + + // r0 += 19×a2×b3 + MOVQ 16(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 24(BX) + ADDQ AX, DI + ADCQ DX, SI + + // r0 += 19×a3×b2 + MOVQ 24(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 16(BX) + ADDQ AX, DI + ADCQ DX, SI + + // r0 += 19×a4×b1 + MOVQ 32(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 8(BX) + ADDQ AX, DI + ADCQ DX, SI + + // r1 = a0×b1 + MOVQ (CX), AX + MULQ 8(BX) + MOVQ AX, R9 + MOVQ DX, R8 + + // r1 += a1×b0 + MOVQ 8(CX), AX + MULQ (BX) + ADDQ AX, R9 + ADCQ DX, R8 + + // r1 += 19×a2×b4 + MOVQ 16(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 32(BX) + ADDQ AX, R9 + ADCQ DX, R8 + + // r1 += 19×a3×b3 + MOVQ 24(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 24(BX) + ADDQ AX, R9 + ADCQ DX, R8 + + // r1 += 19×a4×b2 + MOVQ 32(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 16(BX) + ADDQ AX, R9 + ADCQ DX, R8 + + // r2 = a0×b2 + MOVQ (CX), AX + MULQ 16(BX) + MOVQ AX, R11 + MOVQ DX, R10 + + // r2 += a1×b1 + MOVQ 8(CX), AX + MULQ 8(BX) + ADDQ AX, R11 + ADCQ DX, R10 + + // r2 += a2×b0 + MOVQ 16(CX), AX + MULQ (BX) + ADDQ AX, R11 + ADCQ DX, R10 + + // r2 += 19×a3×b4 + MOVQ 24(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 32(BX) + ADDQ AX, R11 + ADCQ DX, R10 + + // r2 += 19×a4×b3 + MOVQ 32(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 24(BX) + ADDQ AX, R11 + ADCQ DX, R10 + + // r3 = a0×b3 + MOVQ (CX), AX + MULQ 24(BX) + MOVQ AX, R13 + MOVQ DX, R12 + + // r3 += a1×b2 + MOVQ 8(CX), AX + MULQ 16(BX) + ADDQ AX, R13 + ADCQ DX, R12 + + // r3 += a2×b1 + MOVQ 16(CX), AX + MULQ 8(BX) + ADDQ AX, R13 + ADCQ DX, R12 + + // r3 += a3×b0 + MOVQ 24(CX), AX + MULQ (BX) + ADDQ AX, R13 + ADCQ DX, R12 + + // r3 += 19×a4×b4 + MOVQ 32(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 32(BX) + ADDQ AX, R13 + ADCQ DX, R12 + + // r4 = a0×b4 + MOVQ (CX), AX + MULQ 32(BX) + MOVQ AX, R15 + MOVQ DX, R14 + + // r4 += a1×b3 + MOVQ 8(CX), AX + MULQ 24(BX) + ADDQ AX, R15 + ADCQ DX, R14 + + // r4 += a2×b2 + MOVQ 16(CX), AX + MULQ 16(BX) + ADDQ AX, R15 + ADCQ DX, R14 + + // r4 += a3×b1 + MOVQ 24(CX), AX + MULQ 8(BX) + ADDQ AX, R15 + ADCQ DX, R14 + + // r4 += a4×b0 + MOVQ 32(CX), AX + MULQ (BX) + ADDQ AX, R15 + ADCQ DX, R14 + + // First reduction chain + MOVQ $0x0007ffffffffffff, AX + SHLQ $0x0d, DI, SI + SHLQ $0x0d, R9, R8 + SHLQ $0x0d, R11, R10 + SHLQ $0x0d, R13, R12 + SHLQ $0x0d, R15, R14 + ANDQ AX, DI + IMUL3Q $0x13, R14, R14 + ADDQ R14, DI + ANDQ AX, R9 + ADDQ SI, R9 + ANDQ AX, R11 + ADDQ R8, R11 + ANDQ AX, R13 + ADDQ R10, R13 + ANDQ AX, R15 + ADDQ R12, R15 + + // Second reduction chain (carryPropagate) + MOVQ DI, SI + SHRQ $0x33, SI + MOVQ R9, R8 + SHRQ $0x33, R8 + MOVQ R11, R10 + SHRQ $0x33, R10 + MOVQ R13, R12 + SHRQ $0x33, R12 + MOVQ R15, R14 + SHRQ $0x33, R14 + ANDQ AX, DI + IMUL3Q $0x13, R14, R14 + ADDQ R14, DI + ANDQ AX, R9 + ADDQ SI, R9 + ANDQ AX, R11 + ADDQ R8, R11 + ANDQ AX, R13 + ADDQ R10, R13 + ANDQ AX, R15 + ADDQ R12, R15 + + // Store output + MOVQ out+0(FP), AX + MOVQ DI, (AX) + MOVQ R9, 8(AX) + MOVQ R11, 16(AX) + MOVQ R13, 24(AX) + MOVQ R15, 32(AX) + RET + +// func feSquare(out *Element, a *Element) +TEXT ·feSquare(SB), NOSPLIT, $0-16 + MOVQ a+8(FP), CX + + // r0 = l0×l0 + MOVQ (CX), AX + MULQ (CX) + MOVQ AX, SI + MOVQ DX, BX + + // r0 += 38×l1×l4 + MOVQ 8(CX), AX + IMUL3Q $0x26, AX, AX + MULQ 32(CX) + ADDQ AX, SI + ADCQ DX, BX + + // r0 += 38×l2×l3 + MOVQ 16(CX), AX + IMUL3Q $0x26, AX, AX + MULQ 24(CX) + ADDQ AX, SI + ADCQ DX, BX + + // r1 = 2×l0×l1 + MOVQ (CX), AX + SHLQ $0x01, AX + MULQ 8(CX) + MOVQ AX, R8 + MOVQ DX, DI + + // r1 += 38×l2×l4 + MOVQ 16(CX), AX + IMUL3Q $0x26, AX, AX + MULQ 32(CX) + ADDQ AX, R8 + ADCQ DX, DI + + // r1 += 19×l3×l3 + MOVQ 24(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 24(CX) + ADDQ AX, R8 + ADCQ DX, DI + + // r2 = 2×l0×l2 + MOVQ (CX), AX + SHLQ $0x01, AX + MULQ 16(CX) + MOVQ AX, R10 + MOVQ DX, R9 + + // r2 += l1×l1 + MOVQ 8(CX), AX + MULQ 8(CX) + ADDQ AX, R10 + ADCQ DX, R9 + + // r2 += 38×l3×l4 + MOVQ 24(CX), AX + IMUL3Q $0x26, AX, AX + MULQ 32(CX) + ADDQ AX, R10 + ADCQ DX, R9 + + // r3 = 2×l0×l3 + MOVQ (CX), AX + SHLQ $0x01, AX + MULQ 24(CX) + MOVQ AX, R12 + MOVQ DX, R11 + + // r3 += 2×l1×l2 + MOVQ 8(CX), AX + IMUL3Q $0x02, AX, AX + MULQ 16(CX) + ADDQ AX, R12 + ADCQ DX, R11 + + // r3 += 19×l4×l4 + MOVQ 32(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 32(CX) + ADDQ AX, R12 + ADCQ DX, R11 + + // r4 = 2×l0×l4 + MOVQ (CX), AX + SHLQ $0x01, AX + MULQ 32(CX) + MOVQ AX, R14 + MOVQ DX, R13 + + // r4 += 2×l1×l3 + MOVQ 8(CX), AX + IMUL3Q $0x02, AX, AX + MULQ 24(CX) + ADDQ AX, R14 + ADCQ DX, R13 + + // r4 += l2×l2 + MOVQ 16(CX), AX + MULQ 16(CX) + ADDQ AX, R14 + ADCQ DX, R13 + + // First reduction chain + MOVQ $0x0007ffffffffffff, AX + SHLQ $0x0d, SI, BX + SHLQ $0x0d, R8, DI + SHLQ $0x0d, R10, R9 + SHLQ $0x0d, R12, R11 + SHLQ $0x0d, R14, R13 + ANDQ AX, SI + IMUL3Q $0x13, R13, R13 + ADDQ R13, SI + ANDQ AX, R8 + ADDQ BX, R8 + ANDQ AX, R10 + ADDQ DI, R10 + ANDQ AX, R12 + ADDQ R9, R12 + ANDQ AX, R14 + ADDQ R11, R14 + + // Second reduction chain (carryPropagate) + MOVQ SI, BX + SHRQ $0x33, BX + MOVQ R8, DI + SHRQ $0x33, DI + MOVQ R10, R9 + SHRQ $0x33, R9 + MOVQ R12, R11 + SHRQ $0x33, R11 + MOVQ R14, R13 + SHRQ $0x33, R13 + ANDQ AX, SI + IMUL3Q $0x13, R13, R13 + ADDQ R13, SI + ANDQ AX, R8 + ADDQ BX, R8 + ANDQ AX, R10 + ADDQ DI, R10 + ANDQ AX, R12 + ADDQ R9, R12 + ANDQ AX, R14 + ADDQ R11, R14 + + // Store output + MOVQ out+0(FP), AX + MOVQ SI, (AX) + MOVQ R8, 8(AX) + MOVQ R10, 16(AX) + MOVQ R12, 24(AX) + MOVQ R14, 32(AX) + RET diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_amd64_noasm.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_amd64_noasm.go new file mode 100644 index 000000000..ddb6c9b8f --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_amd64_noasm.go @@ -0,0 +1,12 @@ +// Copyright (c) 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || !gc || purego +// +build !amd64 !gc purego + +package field + +func feMul(v, x, y *Element) { feMulGeneric(v, x, y) } + +func feSquare(v, x *Element) { feSquareGeneric(v, x) } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_arm64.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_arm64.go new file mode 100644 index 000000000..af459ef51 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_arm64.go @@ -0,0 +1,16 @@ +// Copyright (c) 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build arm64 && gc && !purego +// +build arm64,gc,!purego + +package field + +//go:noescape +func carryPropagate(v *Element) + +func (v *Element) carryPropagate() *Element { + carryPropagate(v) + return v +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_arm64.s b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_arm64.s new file mode 100644 index 000000000..5c91e4589 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_arm64.s @@ -0,0 +1,43 @@ +// Copyright (c) 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build arm64 && gc && !purego +// +build arm64,gc,!purego + +#include "textflag.h" + +// carryPropagate works exactly like carryPropagateGeneric and uses the +// same AND, ADD, and LSR+MADD instructions emitted by the compiler, but +// avoids loading R0-R4 twice and uses LDP and STP. +// +// See https://golang.org/issues/43145 for the main compiler issue. +// +// func carryPropagate(v *Element) +TEXT ·carryPropagate(SB),NOFRAME|NOSPLIT,$0-8 + MOVD v+0(FP), R20 + + LDP 0(R20), (R0, R1) + LDP 16(R20), (R2, R3) + MOVD 32(R20), R4 + + AND $0x7ffffffffffff, R0, R10 + AND $0x7ffffffffffff, R1, R11 + AND $0x7ffffffffffff, R2, R12 + AND $0x7ffffffffffff, R3, R13 + AND $0x7ffffffffffff, R4, R14 + + ADD R0>>51, R11, R11 + ADD R1>>51, R12, R12 + ADD R2>>51, R13, R13 + ADD R3>>51, R14, R14 + // R4>>51 * 19 + R10 -> R10 + LSR $51, R4, R21 + MOVD $19, R22 + MADD R22, R10, R21, R10 + + STP (R10, R11), 0(R20) + STP (R12, R13), 16(R20) + MOVD R14, 32(R20) + + RET diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_arm64_noasm.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_arm64_noasm.go new file mode 100644 index 000000000..234a5b2e5 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_arm64_noasm.go @@ -0,0 +1,12 @@ +// Copyright (c) 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !arm64 || !gc || purego +// +build !arm64 !gc purego + +package field + +func (v *Element) carryPropagate() *Element { + return v.carryPropagateGeneric() +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_generic.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_generic.go new file mode 100644 index 000000000..7b5b78cbd --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_generic.go @@ -0,0 +1,264 @@ +// Copyright (c) 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package field + +import "math/bits" + +// uint128 holds a 128-bit number as two 64-bit limbs, for use with the +// bits.Mul64 and bits.Add64 intrinsics. +type uint128 struct { + lo, hi uint64 +} + +// mul64 returns a * b. +func mul64(a, b uint64) uint128 { + hi, lo := bits.Mul64(a, b) + return uint128{lo, hi} +} + +// addMul64 returns v + a * b. +func addMul64(v uint128, a, b uint64) uint128 { + hi, lo := bits.Mul64(a, b) + lo, c := bits.Add64(lo, v.lo, 0) + hi, _ = bits.Add64(hi, v.hi, c) + return uint128{lo, hi} +} + +// shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits. +func shiftRightBy51(a uint128) uint64 { + return (a.hi << (64 - 51)) | (a.lo >> 51) +} + +func feMulGeneric(v, a, b *Element) { + a0 := a.l0 + a1 := a.l1 + a2 := a.l2 + a3 := a.l3 + a4 := a.l4 + + b0 := b.l0 + b1 := b.l1 + b2 := b.l2 + b3 := b.l3 + b4 := b.l4 + + // Limb multiplication works like pen-and-paper columnar multiplication, but + // with 51-bit limbs instead of digits. + // + // a4 a3 a2 a1 a0 x + // b4 b3 b2 b1 b0 = + // ------------------------ + // a4b0 a3b0 a2b0 a1b0 a0b0 + + // a4b1 a3b1 a2b1 a1b1 a0b1 + + // a4b2 a3b2 a2b2 a1b2 a0b2 + + // a4b3 a3b3 a2b3 a1b3 a0b3 + + // a4b4 a3b4 a2b4 a1b4 a0b4 = + // ---------------------------------------------- + // r8 r7 r6 r5 r4 r3 r2 r1 r0 + // + // We can then use the reduction identity (a * 2²⁵⁵ + b = a * 19 + b) to + // reduce the limbs that would overflow 255 bits. r5 * 2²⁵⁵ becomes 19 * r5, + // r6 * 2³⁰⁶ becomes 19 * r6 * 2⁵¹, etc. + // + // Reduction can be carried out simultaneously to multiplication. For + // example, we do not compute r5: whenever the result of a multiplication + // belongs to r5, like a1b4, we multiply it by 19 and add the result to r0. + // + // a4b0 a3b0 a2b0 a1b0 a0b0 + + // a3b1 a2b1 a1b1 a0b1 19×a4b1 + + // a2b2 a1b2 a0b2 19×a4b2 19×a3b2 + + // a1b3 a0b3 19×a4b3 19×a3b3 19×a2b3 + + // a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4 = + // -------------------------------------- + // r4 r3 r2 r1 r0 + // + // Finally we add up the columns into wide, overlapping limbs. + + a1_19 := a1 * 19 + a2_19 := a2 * 19 + a3_19 := a3 * 19 + a4_19 := a4 * 19 + + // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) + r0 := mul64(a0, b0) + r0 = addMul64(r0, a1_19, b4) + r0 = addMul64(r0, a2_19, b3) + r0 = addMul64(r0, a3_19, b2) + r0 = addMul64(r0, a4_19, b1) + + // r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2) + r1 := mul64(a0, b1) + r1 = addMul64(r1, a1, b0) + r1 = addMul64(r1, a2_19, b4) + r1 = addMul64(r1, a3_19, b3) + r1 = addMul64(r1, a4_19, b2) + + // r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3) + r2 := mul64(a0, b2) + r2 = addMul64(r2, a1, b1) + r2 = addMul64(r2, a2, b0) + r2 = addMul64(r2, a3_19, b4) + r2 = addMul64(r2, a4_19, b3) + + // r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4 + r3 := mul64(a0, b3) + r3 = addMul64(r3, a1, b2) + r3 = addMul64(r3, a2, b1) + r3 = addMul64(r3, a3, b0) + r3 = addMul64(r3, a4_19, b4) + + // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 + r4 := mul64(a0, b4) + r4 = addMul64(r4, a1, b3) + r4 = addMul64(r4, a2, b2) + r4 = addMul64(r4, a3, b1) + r4 = addMul64(r4, a4, b0) + + // After the multiplication, we need to reduce (carry) the five coefficients + // to obtain a result with limbs that are at most slightly larger than 2⁵¹, + // to respect the Element invariant. + // + // Overall, the reduction works the same as carryPropagate, except with + // wider inputs: we take the carry for each coefficient by shifting it right + // by 51, and add it to the limb above it. The top carry is multiplied by 19 + // according to the reduction identity and added to the lowest limb. + // + // The largest coefficient (r0) will be at most 111 bits, which guarantees + // that all carries are at most 111 - 51 = 60 bits, which fits in a uint64. + // + // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) + // r0 < 2⁵²×2⁵² + 19×(2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵²) + // r0 < (1 + 19 × 4) × 2⁵² × 2⁵² + // r0 < 2⁷ × 2⁵² × 2⁵² + // r0 < 2¹¹¹ + // + // Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most + // 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and + // allows us to easily apply the reduction identity. + // + // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 + // r4 < 5 × 2⁵² × 2⁵² + // r4 < 2¹⁰⁷ + // + + c0 := shiftRightBy51(r0) + c1 := shiftRightBy51(r1) + c2 := shiftRightBy51(r2) + c3 := shiftRightBy51(r3) + c4 := shiftRightBy51(r4) + + rr0 := r0.lo&maskLow51Bits + c4*19 + rr1 := r1.lo&maskLow51Bits + c0 + rr2 := r2.lo&maskLow51Bits + c1 + rr3 := r3.lo&maskLow51Bits + c2 + rr4 := r4.lo&maskLow51Bits + c3 + + // Now all coefficients fit into 64-bit registers but are still too large to + // be passed around as a Element. We therefore do one last carry chain, + // where the carries will be small enough to fit in the wiggle room above 2⁵¹. + *v = Element{rr0, rr1, rr2, rr3, rr4} + v.carryPropagate() +} + +func feSquareGeneric(v, a *Element) { + l0 := a.l0 + l1 := a.l1 + l2 := a.l2 + l3 := a.l3 + l4 := a.l4 + + // Squaring works precisely like multiplication above, but thanks to its + // symmetry we get to group a few terms together. + // + // l4 l3 l2 l1 l0 x + // l4 l3 l2 l1 l0 = + // ------------------------ + // l4l0 l3l0 l2l0 l1l0 l0l0 + + // l4l1 l3l1 l2l1 l1l1 l0l1 + + // l4l2 l3l2 l2l2 l1l2 l0l2 + + // l4l3 l3l3 l2l3 l1l3 l0l3 + + // l4l4 l3l4 l2l4 l1l4 l0l4 = + // ---------------------------------------------- + // r8 r7 r6 r5 r4 r3 r2 r1 r0 + // + // l4l0 l3l0 l2l0 l1l0 l0l0 + + // l3l1 l2l1 l1l1 l0l1 19×l4l1 + + // l2l2 l1l2 l0l2 19×l4l2 19×l3l2 + + // l1l3 l0l3 19×l4l3 19×l3l3 19×l2l3 + + // l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4 = + // -------------------------------------- + // r4 r3 r2 r1 r0 + // + // With precomputed 2×, 19×, and 2×19× terms, we can compute each limb with + // only three Mul64 and four Add64, instead of five and eight. + + l0_2 := l0 * 2 + l1_2 := l1 * 2 + + l1_38 := l1 * 38 + l2_38 := l2 * 38 + l3_38 := l3 * 38 + + l3_19 := l3 * 19 + l4_19 := l4 * 19 + + // r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3) + r0 := mul64(l0, l0) + r0 = addMul64(r0, l1_38, l4) + r0 = addMul64(r0, l2_38, l3) + + // r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3 + r1 := mul64(l0_2, l1) + r1 = addMul64(r1, l2_38, l4) + r1 = addMul64(r1, l3_19, l3) + + // r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4 + r2 := mul64(l0_2, l2) + r2 = addMul64(r2, l1, l1) + r2 = addMul64(r2, l3_38, l4) + + // r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4 + r3 := mul64(l0_2, l3) + r3 = addMul64(r3, l1_2, l2) + r3 = addMul64(r3, l4_19, l4) + + // r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2 + r4 := mul64(l0_2, l4) + r4 = addMul64(r4, l1_2, l3) + r4 = addMul64(r4, l2, l2) + + c0 := shiftRightBy51(r0) + c1 := shiftRightBy51(r1) + c2 := shiftRightBy51(r2) + c3 := shiftRightBy51(r3) + c4 := shiftRightBy51(r4) + + rr0 := r0.lo&maskLow51Bits + c4*19 + rr1 := r1.lo&maskLow51Bits + c0 + rr2 := r2.lo&maskLow51Bits + c1 + rr3 := r3.lo&maskLow51Bits + c2 + rr4 := r4.lo&maskLow51Bits + c3 + + *v = Element{rr0, rr1, rr2, rr3, rr4} + v.carryPropagate() +} + +// carryPropagate brings the limbs below 52 bits by applying the reduction +// identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry. TODO inline +func (v *Element) carryPropagateGeneric() *Element { + c0 := v.l0 >> 51 + c1 := v.l1 >> 51 + c2 := v.l2 >> 51 + c3 := v.l3 >> 51 + c4 := v.l4 >> 51 + + v.l0 = v.l0&maskLow51Bits + c4*19 + v.l1 = v.l1&maskLow51Bits + c0 + v.l2 = v.l2&maskLow51Bits + c1 + v.l3 = v.l3&maskLow51Bits + c2 + v.l4 = v.l4&maskLow51Bits + c3 + + return v +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go new file mode 100644 index 000000000..0da2d0d85 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go @@ -0,0 +1,143 @@ +// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. +package ecc + +import ( + "bytes" + "crypto/elliptic" + + "github.com/ProtonMail/go-crypto/bitcurves" + "github.com/ProtonMail/go-crypto/brainpool" + "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" +) + +const Curve25519GenName = "Curve25519" + +type CurveInfo struct { + GenName string + Oid *encoding.OID + Curve Curve +} + +var Curves = []CurveInfo{ + { + // NIST P-256 + GenName: "P256", + Oid: encoding.NewOID([]byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07}), + Curve: NewGenericCurve(elliptic.P256()), + }, + { + // NIST P-384 + GenName: "P384", + Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x22}), + Curve: NewGenericCurve(elliptic.P384()), + }, + { + // NIST P-521 + GenName: "P521", + Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x23}), + Curve: NewGenericCurve(elliptic.P521()), + }, + { + // SecP256k1 + GenName: "SecP256k1", + Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x0A}), + Curve: NewGenericCurve(bitcurves.S256()), + }, + { + // Curve25519 + GenName: Curve25519GenName, + Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0x97, 0x55, 0x01, 0x05, 0x01}), + Curve: NewCurve25519(), + }, + { + // x448 + GenName: "Curve448", + Oid: encoding.NewOID([]byte{0x2B, 0x65, 0x6F}), + Curve: NewX448(), + }, + { + // Ed25519 + GenName: Curve25519GenName, + Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0xDA, 0x47, 0x0F, 0x01}), + Curve: NewEd25519(), + }, + { + // Ed448 + GenName: "Curve448", + Oid: encoding.NewOID([]byte{0x2B, 0x65, 0x71}), + Curve: NewEd448(), + }, + { + // BrainpoolP256r1 + GenName: "BrainpoolP256", + Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x07}), + Curve: NewGenericCurve(brainpool.P256r1()), + }, + { + // BrainpoolP384r1 + GenName: "BrainpoolP384", + Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0B}), + Curve: NewGenericCurve(brainpool.P384r1()), + }, + { + // BrainpoolP512r1 + GenName: "BrainpoolP512", + Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0D}), + Curve: NewGenericCurve(brainpool.P512r1()), + }, +} + +func FindByCurve(curve Curve) *CurveInfo { + for _, curveInfo := range Curves { + if curveInfo.Curve.GetCurveName() == curve.GetCurveName() { + return &curveInfo + } + } + return nil +} + +func FindByOid(oid encoding.Field) *CurveInfo { + var rawBytes = oid.Bytes() + for _, curveInfo := range Curves { + if bytes.Equal(curveInfo.Oid.Bytes(), rawBytes) { + return &curveInfo + } + } + return nil +} + +func FindEdDSAByGenName(curveGenName string) EdDSACurve { + for _, curveInfo := range Curves { + if curveInfo.GenName == curveGenName { + curve, ok := curveInfo.Curve.(EdDSACurve) + if ok { + return curve + } + } + } + return nil +} + +func FindECDSAByGenName(curveGenName string) ECDSACurve { + for _, curveInfo := range Curves { + if curveInfo.GenName == curveGenName { + curve, ok := curveInfo.Curve.(ECDSACurve) + if ok { + return curve + } + } + } + return nil +} + +func FindECDHByGenName(curveGenName string) ECDHCurve { + for _, curveInfo := range Curves { + if curveInfo.GenName == curveGenName { + curve, ok := curveInfo.Curve.(ECDHCurve) + if ok { + return curve + } + } + } + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curves.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curves.go new file mode 100644 index 000000000..5ed9c93b3 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curves.go @@ -0,0 +1,48 @@ +// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. +package ecc + +import ( + "io" + "math/big" +) + +type Curve interface { + GetCurveName() string +} + +type ECDSACurve interface { + Curve + MarshalIntegerPoint(x, y *big.Int) []byte + UnmarshalIntegerPoint([]byte) (x, y *big.Int) + MarshalIntegerSecret(d *big.Int) []byte + UnmarshalIntegerSecret(d []byte) *big.Int + GenerateECDSA(rand io.Reader) (x, y, secret *big.Int, err error) + Sign(rand io.Reader, x, y, d *big.Int, hash []byte) (r, s *big.Int, err error) + Verify(x, y *big.Int, hash []byte, r, s *big.Int) bool + ValidateECDSA(x, y *big.Int, secret []byte) error +} + +type EdDSACurve interface { + Curve + MarshalBytePoint(x []byte) []byte + UnmarshalBytePoint([]byte) (x []byte) + MarshalByteSecret(d []byte) []byte + UnmarshalByteSecret(d []byte) []byte + MarshalSignature(sig []byte) (r, s []byte) + UnmarshalSignature(r, s []byte) (sig []byte) + GenerateEdDSA(rand io.Reader) (pub, priv []byte, err error) + Sign(publicKey, privateKey, message []byte) (sig []byte, err error) + Verify(publicKey, message, sig []byte) bool + ValidateEdDSA(publicKey, privateKey []byte) (err error) +} +type ECDHCurve interface { + Curve + MarshalBytePoint([]byte) (encoded []byte) + UnmarshalBytePoint(encoded []byte) []byte + MarshalByteSecret(d []byte) []byte + UnmarshalByteSecret(d []byte) []byte + GenerateECDH(rand io.Reader) (point []byte, secret []byte, err error) + Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error) + Decaps(ephemeral, secret []byte) (sharedSecret []byte, err error) + ValidateECDH(public []byte, secret []byte) error +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go new file mode 100644 index 000000000..5a4c3a859 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go @@ -0,0 +1,120 @@ +// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. +package ecc + +import ( + "bytes" + "crypto/subtle" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + ed25519lib "github.com/cloudflare/circl/sign/ed25519" +) + +const ed25519Size = 32 + +type ed25519 struct{} + +func NewEd25519() *ed25519 { + return &ed25519{} +} + +func (c *ed25519) GetCurveName() string { + return "ed25519" +} + +// MarshalBytePoint encodes the public point from native format, adding the prefix. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed25519) MarshalBytePoint(x []byte) []byte { + return append([]byte{0x40}, x...) +} + +// UnmarshalBytePoint decodes a point from prefixed format to native. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed25519) UnmarshalBytePoint(point []byte) (x []byte) { + if len(point) != ed25519lib.PublicKeySize+1 { + return nil + } + + // Return unprefixed + return point[1:] +} + +// MarshalByteSecret encodes a scalar in native format. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed25519) MarshalByteSecret(d []byte) []byte { + return d +} + +// UnmarshalByteSecret decodes a scalar in native format and re-adds the stripped leading zeroes +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed25519) UnmarshalByteSecret(s []byte) (d []byte) { + if len(s) > ed25519lib.SeedSize { + return nil + } + + // Handle stripped leading zeroes + d = make([]byte, ed25519lib.SeedSize) + copy(d[ed25519lib.SeedSize-len(s):], s) + return +} + +// MarshalSignature splits a signature in R and S. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.1 +func (c *ed25519) MarshalSignature(sig []byte) (r, s []byte) { + return sig[:ed25519Size], sig[ed25519Size:] +} + +// UnmarshalSignature decodes R and S in the native format, re-adding the stripped leading zeroes +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.1 +func (c *ed25519) UnmarshalSignature(r, s []byte) (sig []byte) { + // Check size + if len(r) > 32 || len(s) > 32 { + return nil + } + + sig = make([]byte, ed25519lib.SignatureSize) + + // Handle stripped leading zeroes + copy(sig[ed25519Size-len(r):ed25519Size], r) + copy(sig[ed25519lib.SignatureSize-len(s):], s) + return sig +} + +func (c *ed25519) GenerateEdDSA(rand io.Reader) (pub, priv []byte, err error) { + pk, sk, err := ed25519lib.GenerateKey(rand) + + if err != nil { + return nil, nil, err + } + + return pk, sk[:ed25519lib.SeedSize], nil +} + +func getEd25519Sk(publicKey, privateKey []byte) ed25519lib.PrivateKey { + privateKeyCap, privateKeyLen, publicKeyLen := cap(privateKey), len(privateKey), len(publicKey) + + if privateKeyCap >= privateKeyLen+publicKeyLen && + bytes.Equal(privateKey[privateKeyLen:privateKeyLen+publicKeyLen], publicKey) { + return privateKey[:privateKeyLen+publicKeyLen] + } + + return append(privateKey[:privateKeyLen:privateKeyLen], publicKey...) +} + +func (c *ed25519) Sign(publicKey, privateKey, message []byte) (sig []byte, err error) { + sig = ed25519lib.Sign(getEd25519Sk(publicKey, privateKey), message) + return sig, nil +} + +func (c *ed25519) Verify(publicKey, message, sig []byte) bool { + return ed25519lib.Verify(publicKey, message, sig) +} + +func (c *ed25519) ValidateEdDSA(publicKey, privateKey []byte) (err error) { + priv := getEd25519Sk(publicKey, privateKey) + expectedPriv := ed25519lib.NewKeyFromSeed(priv.Seed()) + if subtle.ConstantTimeCompare(priv, expectedPriv) == 0 { + return errors.KeyInvalidError("ecc: invalid ed25519 secret") + } + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go new file mode 100644 index 000000000..b6edda748 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go @@ -0,0 +1,119 @@ +// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. +package ecc + +import ( + "bytes" + "crypto/subtle" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + ed448lib "github.com/cloudflare/circl/sign/ed448" +) + +type ed448 struct{} + +func NewEd448() *ed448 { + return &ed448{} +} + +func (c *ed448) GetCurveName() string { + return "ed448" +} + +// MarshalBytePoint encodes the public point from native format, adding the prefix. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed448) MarshalBytePoint(x []byte) []byte { + // Return prefixed + return append([]byte{0x40}, x...) +} + +// UnmarshalBytePoint decodes a point from prefixed format to native. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed448) UnmarshalBytePoint(point []byte) (x []byte) { + if len(point) != ed448lib.PublicKeySize+1 { + return nil + } + + // Strip prefix + return point[1:] +} + +// MarshalByteSecret encoded a scalar from native format to prefixed. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed448) MarshalByteSecret(d []byte) []byte { + // Return prefixed + return append([]byte{0x40}, d...) +} + +// UnmarshalByteSecret decodes a scalar from prefixed format to native. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed448) UnmarshalByteSecret(s []byte) (d []byte) { + // Check prefixed size + if len(s) != ed448lib.SeedSize+1 { + return nil + } + + // Strip prefix + return s[1:] +} + +// MarshalSignature splits a signature in R and S, where R is in prefixed native format and +// S is an MPI with value zero. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.2 +func (c *ed448) MarshalSignature(sig []byte) (r, s []byte) { + return append([]byte{0x40}, sig...), []byte{} +} + +// UnmarshalSignature decodes R and S in the native format. Only R is used, in prefixed native format. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.2 +func (c *ed448) UnmarshalSignature(r, s []byte) (sig []byte) { + if len(r) != ed448lib.SignatureSize+1 { + return nil + } + + return r[1:] +} + +func (c *ed448) GenerateEdDSA(rand io.Reader) (pub, priv []byte, err error) { + pk, sk, err := ed448lib.GenerateKey(rand) + + if err != nil { + return nil, nil, err + } + + return pk, sk[:ed448lib.SeedSize], nil +} + +func getEd448Sk(publicKey, privateKey []byte) ed448lib.PrivateKey { + privateKeyCap, privateKeyLen, publicKeyLen := cap(privateKey), len(privateKey), len(publicKey) + + if privateKeyCap >= privateKeyLen+publicKeyLen && + bytes.Equal(privateKey[privateKeyLen:privateKeyLen+publicKeyLen], publicKey) { + return privateKey[:privateKeyLen+publicKeyLen] + } + + return append(privateKey[:privateKeyLen:privateKeyLen], publicKey...) +} + +func (c *ed448) Sign(publicKey, privateKey, message []byte) (sig []byte, err error) { + // Ed448 is used with the empty string as a context string. + // See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-13.7 + sig = ed448lib.Sign(getEd448Sk(publicKey, privateKey), message, "") + + return sig, nil +} + +func (c *ed448) Verify(publicKey, message, sig []byte) bool { + // Ed448 is used with the empty string as a context string. + // See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-13.7 + return ed448lib.Verify(publicKey, message, sig, "") +} + +func (c *ed448) ValidateEdDSA(publicKey, privateKey []byte) (err error) { + priv := getEd448Sk(publicKey, privateKey) + expectedPriv := ed448lib.NewKeyFromSeed(priv.Seed()) + if subtle.ConstantTimeCompare(priv, expectedPriv) == 0 { + return errors.KeyInvalidError("ecc: invalid ed448 secret") + } + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/generic.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/generic.go new file mode 100644 index 000000000..e28d7c710 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/generic.go @@ -0,0 +1,149 @@ +// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. +package ecc + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "fmt" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "io" + "math/big" +) + +type genericCurve struct { + Curve elliptic.Curve +} + +func NewGenericCurve(c elliptic.Curve) *genericCurve { + return &genericCurve{ + Curve: c, + } +} + +func (c *genericCurve) GetCurveName() string { + return c.Curve.Params().Name +} + +func (c *genericCurve) MarshalBytePoint(point []byte) []byte { + return point +} + +func (c *genericCurve) UnmarshalBytePoint(point []byte) []byte { + return point +} + +func (c *genericCurve) MarshalIntegerPoint(x, y *big.Int) []byte { + return elliptic.Marshal(c.Curve, x, y) +} + +func (c *genericCurve) UnmarshalIntegerPoint(point []byte) (x, y *big.Int) { + return elliptic.Unmarshal(c.Curve, point) +} + +func (c *genericCurve) MarshalByteSecret(d []byte) []byte { + return d +} + +func (c *genericCurve) UnmarshalByteSecret(d []byte) []byte { + return d +} + +func (c *genericCurve) MarshalIntegerSecret(d *big.Int) []byte { + return d.Bytes() +} + +func (c *genericCurve) UnmarshalIntegerSecret(d []byte) *big.Int { + return new(big.Int).SetBytes(d) +} + +func (c *genericCurve) GenerateECDH(rand io.Reader) (point, secret []byte, err error) { + secret, x, y, err := elliptic.GenerateKey(c.Curve, rand) + if err != nil { + return nil, nil, err + } + + point = elliptic.Marshal(c.Curve, x, y) + return point, secret, nil +} + +func (c *genericCurve) GenerateECDSA(rand io.Reader) (x, y, secret *big.Int, err error) { + priv, err := ecdsa.GenerateKey(c.Curve, rand) + if err != nil { + return + } + + return priv.X, priv.Y, priv.D, nil +} + +func (c *genericCurve) Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error) { + xP, yP := elliptic.Unmarshal(c.Curve, point) + if xP == nil { + panic("invalid point") + } + + d, x, y, err := elliptic.GenerateKey(c.Curve, rand) + if err != nil { + return nil, nil, err + } + + vsG := elliptic.Marshal(c.Curve, x, y) + zbBig, _ := c.Curve.ScalarMult(xP, yP, d) + + byteLen := (c.Curve.Params().BitSize + 7) >> 3 + zb := make([]byte, byteLen) + zbBytes := zbBig.Bytes() + copy(zb[byteLen-len(zbBytes):], zbBytes) + + return vsG, zb, nil +} + +func (c *genericCurve) Decaps(ephemeral, secret []byte) (sharedSecret []byte, err error) { + x, y := elliptic.Unmarshal(c.Curve, ephemeral) + zbBig, _ := c.Curve.ScalarMult(x, y, secret) + byteLen := (c.Curve.Params().BitSize + 7) >> 3 + zb := make([]byte, byteLen) + zbBytes := zbBig.Bytes() + copy(zb[byteLen-len(zbBytes):], zbBytes) + + return zb, nil +} + +func (c *genericCurve) Sign(rand io.Reader, x, y, d *big.Int, hash []byte) (r, s *big.Int, err error) { + priv := &ecdsa.PrivateKey{D: d, PublicKey: ecdsa.PublicKey{X: x, Y: y, Curve: c.Curve}} + return ecdsa.Sign(rand, priv, hash) +} + +func (c *genericCurve) Verify(x, y *big.Int, hash []byte, r, s *big.Int) bool { + pub := &ecdsa.PublicKey{X: x, Y: y, Curve: c.Curve} + return ecdsa.Verify(pub, hash, r, s) +} + +func (c *genericCurve) validate(xP, yP *big.Int, secret []byte) error { + // the public point should not be at infinity (0,0) + zero := new(big.Int) + if xP.Cmp(zero) == 0 && yP.Cmp(zero) == 0 { + return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): infinity point", c.Curve.Params().Name)) + } + + // re-derive the public point Q' = (X,Y) = dG + // to compare to declared Q in public key + expectedX, expectedY := c.Curve.ScalarBaseMult(secret) + if xP.Cmp(expectedX) != 0 || yP.Cmp(expectedY) != 0 { + return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): invalid point", c.Curve.Params().Name)) + } + + return nil +} + +func (c *genericCurve) ValidateECDSA(xP, yP *big.Int, secret []byte) error { + return c.validate(xP, yP, secret) +} + +func (c *genericCurve) ValidateECDH(point []byte, secret []byte) error { + xP, yP := elliptic.Unmarshal(c.Curve, point) + if xP == nil { + return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): invalid point", c.Curve.Params().Name)) + } + + return c.validate(xP, yP, secret) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/x448.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/x448.go new file mode 100644 index 000000000..df04262e9 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/x448.go @@ -0,0 +1,107 @@ +// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. +package ecc + +import ( + "crypto/subtle" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + x448lib "github.com/cloudflare/circl/dh/x448" +) + +type x448 struct{} + +func NewX448() *x448 { + return &x448{} +} + +func (c *x448) GetCurveName() string { + return "x448" +} + +// MarshalBytePoint encodes the public point from native format, adding the prefix. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6 +func (c *x448) MarshalBytePoint(point []byte) []byte { + return append([]byte{0x40}, point...) +} + +// UnmarshalBytePoint decodes a point from prefixed format to native. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6 +func (c *x448) UnmarshalBytePoint(point []byte) []byte { + if len(point) != x448lib.Size+1 { + return nil + } + + return point[1:] +} + +// MarshalByteSecret encoded a scalar from native format to prefixed. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.2 +func (c *x448) MarshalByteSecret(d []byte) []byte { + return append([]byte{0x40}, d...) +} + +// UnmarshalByteSecret decodes a scalar from prefixed format to native. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.2 +func (c *x448) UnmarshalByteSecret(d []byte) []byte { + if len(d) != x448lib.Size+1 { + return nil + } + + // Store without prefix + return d[1:] +} + +func (c *x448) generateKeyPairBytes(rand io.Reader) (sk, pk x448lib.Key, err error) { + if _, err = rand.Read(sk[:]); err != nil { + return + } + + x448lib.KeyGen(&pk, &sk) + return +} + +func (c *x448) GenerateECDH(rand io.Reader) (point []byte, secret []byte, err error) { + priv, pub, err := c.generateKeyPairBytes(rand) + if err != nil { + return + } + + return pub[:], priv[:], nil +} + +func (c *x448) Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error) { + var pk, ss x448lib.Key + seed, e, err := c.generateKeyPairBytes(rand) + if err != nil { + return nil, nil, err + } + copy(pk[:], point) + x448lib.Shared(&ss, &seed, &pk) + + return e[:], ss[:], nil +} + +func (c *x448) Decaps(ephemeral, secret []byte) (sharedSecret []byte, err error) { + var ss, sk, e x448lib.Key + + copy(sk[:], secret) + copy(e[:], ephemeral) + x448lib.Shared(&ss, &sk, &e) + + return ss[:], nil +} + +func (c *x448) ValidateECDH(point []byte, secret []byte) error { + var sk, pk, expectedPk x448lib.Key + + copy(pk[:], point) + copy(sk[:], secret) + x448lib.KeyGen(&expectedPk, &sk) + + if subtle.ConstantTimeCompare(expectedPk[:], pk[:]) == 0 { + return errors.KeyInvalidError("ecc: invalid curve25519 public point") + } + + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go new file mode 100644 index 000000000..6c921481b --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go @@ -0,0 +1,27 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package encoding implements openpgp packet field encodings as specified in +// RFC 4880 and 6637. +package encoding + +import "io" + +// Field is an encoded field of an openpgp packet. +type Field interface { + // Bytes returns the decoded data. + Bytes() []byte + + // BitLength is the size in bits of the decoded data. + BitLength() uint16 + + // EncodedBytes returns the encoded data. + EncodedBytes() []byte + + // EncodedLength is the size in bytes of the encoded data. + EncodedLength() uint16 + + // ReadFrom reads the next Field from r. + ReadFrom(r io.Reader) (int64, error) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go new file mode 100644 index 000000000..02e5e695c --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go @@ -0,0 +1,91 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package encoding + +import ( + "io" + "math/big" + "math/bits" +) + +// An MPI is used to store the contents of a big integer, along with the bit +// length that was specified in the original input. This allows the MPI to be +// reserialized exactly. +type MPI struct { + bytes []byte + bitLength uint16 +} + +// NewMPI returns a MPI initialized with bytes. +func NewMPI(bytes []byte) *MPI { + for len(bytes) != 0 && bytes[0] == 0 { + bytes = bytes[1:] + } + if len(bytes) == 0 { + bitLength := uint16(0) + return &MPI{bytes, bitLength} + } + bitLength := 8*uint16(len(bytes)-1) + uint16(bits.Len8(bytes[0])) + return &MPI{bytes, bitLength} +} + +// Bytes returns the decoded data. +func (m *MPI) Bytes() []byte { + return m.bytes +} + +// BitLength is the size in bits of the decoded data. +func (m *MPI) BitLength() uint16 { + return m.bitLength +} + +// EncodedBytes returns the encoded data. +func (m *MPI) EncodedBytes() []byte { + return append([]byte{byte(m.bitLength >> 8), byte(m.bitLength)}, m.bytes...) +} + +// EncodedLength is the size in bytes of the encoded data. +func (m *MPI) EncodedLength() uint16 { + return uint16(2 + len(m.bytes)) +} + +// ReadFrom reads into m the next MPI from r. +func (m *MPI) ReadFrom(r io.Reader) (int64, error) { + var buf [2]byte + n, err := io.ReadFull(r, buf[0:]) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return int64(n), err + } + + m.bitLength = uint16(buf[0])<<8 | uint16(buf[1]) + m.bytes = make([]byte, (int(m.bitLength)+7)/8) + + nn, err := io.ReadFull(r, m.bytes) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + + // remove leading zero bytes from malformed GnuPG encoded MPIs: + // https://bugs.gnupg.org/gnupg/issue1853 + // for _, b := range m.bytes { + // if b != 0 { + // break + // } + // m.bytes = m.bytes[1:] + // m.bitLength -= 8 + // } + + return int64(n) + int64(nn), err +} + +// SetBig initializes m with the bits from n. +func (m *MPI) SetBig(n *big.Int) *MPI { + m.bytes = n.Bytes() + m.bitLength = uint16(n.BitLen()) + return m +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go new file mode 100644 index 000000000..c9df9fe23 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go @@ -0,0 +1,88 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package encoding + +import ( + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +// OID is used to store a variable-length field with a one-octet size +// prefix. See https://tools.ietf.org/html/rfc6637#section-9. +type OID struct { + bytes []byte +} + +const ( + // maxOID is the maximum number of bytes in a OID. + maxOID = 254 + // reservedOIDLength1 and reservedOIDLength2 are OID lengths that the RFC + // specifies are reserved. + reservedOIDLength1 = 0 + reservedOIDLength2 = 0xff +) + +// NewOID returns a OID initialized with bytes. +func NewOID(bytes []byte) *OID { + switch len(bytes) { + case reservedOIDLength1, reservedOIDLength2: + panic("encoding: NewOID argument length is reserved") + default: + if len(bytes) > maxOID { + panic("encoding: NewOID argument too large") + } + } + + return &OID{ + bytes: bytes, + } +} + +// Bytes returns the decoded data. +func (o *OID) Bytes() []byte { + return o.bytes +} + +// BitLength is the size in bits of the decoded data. +func (o *OID) BitLength() uint16 { + return uint16(len(o.bytes) * 8) +} + +// EncodedBytes returns the encoded data. +func (o *OID) EncodedBytes() []byte { + return append([]byte{byte(len(o.bytes))}, o.bytes...) +} + +// EncodedLength is the size in bytes of the encoded data. +func (o *OID) EncodedLength() uint16 { + return uint16(1 + len(o.bytes)) +} + +// ReadFrom reads into b the next OID from r. +func (o *OID) ReadFrom(r io.Reader) (int64, error) { + var buf [1]byte + n, err := io.ReadFull(r, buf[:]) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return int64(n), err + } + + switch buf[0] { + case reservedOIDLength1, reservedOIDLength2: + return int64(n), errors.UnsupportedError("reserved for future extensions") + } + + o.bytes = make([]byte, buf[0]) + + nn, err := io.ReadFull(r, o.bytes) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + + return int64(n) + int64(nn), err +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/short_byte_string.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/short_byte_string.go new file mode 100644 index 000000000..0c3b91233 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/short_byte_string.go @@ -0,0 +1,50 @@ +package encoding + +import ( + "io" +) + +type ShortByteString struct { + length uint8 + data []byte +} + +func NewShortByteString(data []byte) *ShortByteString { + byteLength := uint8(len(data)) + + return &ShortByteString{byteLength, data} +} + +func (byteString *ShortByteString) Bytes() []byte { + return byteString.data +} + +func (byteString *ShortByteString) BitLength() uint16 { + return uint16(byteString.length) * 8 +} + +func (byteString *ShortByteString) EncodedBytes() []byte { + encodedLength := [1]byte{ + uint8(byteString.length), + } + return append(encodedLength[:], byteString.data...) +} + +func (byteString *ShortByteString) EncodedLength() uint16 { + return uint16(byteString.length) + 1 +} + +func (byteString *ShortByteString) ReadFrom(r io.Reader) (int64, error) { + var lengthBytes [1]byte + if n, err := io.ReadFull(r, lengthBytes[:]); err != nil { + return int64(n), err + } + + byteString.length = uint8(lengthBytes[0]) + + byteString.data = make([]byte, byteString.length) + if n, err := io.ReadFull(r, byteString.data); err != nil { + return int64(n + 1), err + } + return int64(byteString.length + 1), nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go new file mode 100644 index 000000000..f56874958 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go @@ -0,0 +1,458 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + goerrors "errors" + "io" + "math/big" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/ecdh" + "github.com/ProtonMail/go-crypto/openpgp/ecdsa" + "github.com/ProtonMail/go-crypto/openpgp/ed25519" + "github.com/ProtonMail/go-crypto/openpgp/ed448" + "github.com/ProtonMail/go-crypto/openpgp/eddsa" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" + "github.com/ProtonMail/go-crypto/openpgp/packet" + "github.com/ProtonMail/go-crypto/openpgp/symmetric" + "github.com/ProtonMail/go-crypto/openpgp/x25519" + "github.com/ProtonMail/go-crypto/openpgp/x448" +) + +// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a +// single identity composed of the given full name, comment and email, any of +// which may be empty but must not contain any of "()<>\x00". +// If config is nil, sensible defaults will be used. +func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) { + creationTime := config.Now() + keyLifetimeSecs := config.KeyLifetime() + + // Generate a primary signing key + primaryPrivRaw, err := newSigner(config) + if err != nil { + return nil, err + } + primary := packet.NewSignerPrivateKey(creationTime, primaryPrivRaw) + if config.V6() { + if err := primary.UpgradeToV6(); err != nil { + return nil, err + } + } + + e := &Entity{ + PrimaryKey: &primary.PublicKey, + PrivateKey: primary, + Identities: make(map[string]*Identity), + Subkeys: []Subkey{}, + Signatures: []*packet.Signature{}, + } + + if config.V6() { + // In v6 keys algorithm preferences should be stored in direct key signatures + selfSignature := createSignaturePacket(&primary.PublicKey, packet.SigTypeDirectSignature, config) + err = writeKeyProperties(selfSignature, creationTime, keyLifetimeSecs, config) + if err != nil { + return nil, err + } + err = selfSignature.SignDirectKeyBinding(&primary.PublicKey, primary, config) + if err != nil { + return nil, err + } + e.Signatures = append(e.Signatures, selfSignature) + e.SelfSignature = selfSignature + } + + err = e.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs, !config.V6()) + if err != nil { + return nil, err + } + + // NOTE: No key expiry here, but we will not return this subkey in EncryptionKey() + // if the primary/master key has expired. + err = e.addEncryptionSubkey(config, creationTime, 0) + if err != nil { + return nil, err + } + + return e, nil +} + +func (t *Entity) AddUserId(name, comment, email string, config *packet.Config) error { + creationTime := config.Now() + keyLifetimeSecs := config.KeyLifetime() + return t.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs, !config.V6()) +} + +func writeKeyProperties(selfSignature *packet.Signature, creationTime time.Time, keyLifetimeSecs uint32, config *packet.Config) error { + selfSignature.CreationTime = creationTime + selfSignature.KeyLifetimeSecs = &keyLifetimeSecs + selfSignature.FlagsValid = true + selfSignature.FlagSign = true + selfSignature.FlagCertify = true + selfSignature.SEIPDv1 = true // true by default, see 5.8 vs. 5.14 + selfSignature.SEIPDv2 = config.AEAD() != nil + + // Set the PreferredHash for the SelfSignature from the packet.Config. + // If it is not the must-implement algorithm from rfc4880bis, append that. + hash, ok := algorithm.HashToHashId(config.Hash()) + if !ok { + return errors.UnsupportedError("unsupported preferred hash function") + } + + selfSignature.PreferredHash = []uint8{hash} + if config.Hash() != crypto.SHA256 { + selfSignature.PreferredHash = append(selfSignature.PreferredHash, hashToHashId(crypto.SHA256)) + } + + // Likewise for DefaultCipher. + selfSignature.PreferredSymmetric = []uint8{uint8(config.Cipher())} + if config.Cipher() != packet.CipherAES128 { + selfSignature.PreferredSymmetric = append(selfSignature.PreferredSymmetric, uint8(packet.CipherAES128)) + } + + // We set CompressionNone as the preferred compression algorithm because + // of compression side channel attacks, then append the configured + // DefaultCompressionAlgo if any is set (to signal support for cases + // where the application knows that using compression is safe). + selfSignature.PreferredCompression = []uint8{uint8(packet.CompressionNone)} + if config.Compression() != packet.CompressionNone { + selfSignature.PreferredCompression = append(selfSignature.PreferredCompression, uint8(config.Compression())) + } + + // And for DefaultMode. + modes := []uint8{uint8(config.AEAD().Mode())} + if config.AEAD().Mode() != packet.AEADModeOCB { + modes = append(modes, uint8(packet.AEADModeOCB)) + } + + // For preferred (AES256, GCM), we'll generate (AES256, GCM), (AES256, OCB), (AES128, GCM), (AES128, OCB) + for _, cipher := range selfSignature.PreferredSymmetric { + for _, mode := range modes { + selfSignature.PreferredCipherSuites = append(selfSignature.PreferredCipherSuites, [2]uint8{cipher, mode}) + } + } + return nil +} + +func (t *Entity) addUserId(name, comment, email string, config *packet.Config, creationTime time.Time, keyLifetimeSecs uint32, writeProperties bool) error { + uid := packet.NewUserId(name, comment, email) + if uid == nil { + return errors.InvalidArgumentError("user id field contained invalid characters") + } + + if _, ok := t.Identities[uid.Id]; ok { + return errors.InvalidArgumentError("user id exist") + } + + primary := t.PrivateKey + isPrimaryId := len(t.Identities) == 0 + selfSignature := createSignaturePacket(&primary.PublicKey, packet.SigTypePositiveCert, config) + if writeProperties { + err := writeKeyProperties(selfSignature, creationTime, keyLifetimeSecs, config) + if err != nil { + return err + } + } + selfSignature.IsPrimaryId = &isPrimaryId + + // User ID binding signature + err := selfSignature.SignUserId(uid.Id, &primary.PublicKey, primary, config) + if err != nil { + return err + } + t.Identities[uid.Id] = &Identity{ + Name: uid.Id, + UserId: uid, + SelfSignature: selfSignature, + Signatures: []*packet.Signature{selfSignature}, + } + return nil +} + +// AddSigningSubkey adds a signing keypair as a subkey to the Entity. +// If config is nil, sensible defaults will be used. +func (e *Entity) AddSigningSubkey(config *packet.Config) error { + creationTime := config.Now() + keyLifetimeSecs := config.KeyLifetime() + + subPrivRaw, err := newSigner(config) + if err != nil { + return err + } + sub := packet.NewSignerPrivateKey(creationTime, subPrivRaw) + sub.IsSubkey = true + if config.V6() { + if err := sub.UpgradeToV6(); err != nil { + return err + } + } + + subkey := Subkey{ + PublicKey: &sub.PublicKey, + PrivateKey: sub, + } + subkey.Sig = createSignaturePacket(e.PrimaryKey, packet.SigTypeSubkeyBinding, config) + subkey.Sig.CreationTime = creationTime + subkey.Sig.KeyLifetimeSecs = &keyLifetimeSecs + subkey.Sig.FlagsValid = true + subkey.Sig.FlagSign = true + subkey.Sig.EmbeddedSignature = createSignaturePacket(subkey.PublicKey, packet.SigTypePrimaryKeyBinding, config) + subkey.Sig.EmbeddedSignature.CreationTime = creationTime + + err = subkey.Sig.EmbeddedSignature.CrossSignKey(subkey.PublicKey, e.PrimaryKey, subkey.PrivateKey, config) + if err != nil { + return err + } + + err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) + if err != nil { + return err + } + + e.Subkeys = append(e.Subkeys, subkey) + return nil +} + +// AddEncryptionSubkey adds an encryption keypair as a subkey to the Entity. +// If config is nil, sensible defaults will be used. +func (e *Entity) AddEncryptionSubkey(config *packet.Config) error { + creationTime := config.Now() + keyLifetimeSecs := config.KeyLifetime() + return e.addEncryptionSubkey(config, creationTime, keyLifetimeSecs) +} + +func (e *Entity) addEncryptionSubkey(config *packet.Config, creationTime time.Time, keyLifetimeSecs uint32) error { + subPrivRaw, err := newDecrypter(config) + if err != nil { + return err + } + sub := packet.NewDecrypterPrivateKey(creationTime, subPrivRaw) + sub.IsSubkey = true + if config.V6() { + if err := sub.UpgradeToV6(); err != nil { + return err + } + } + + subkey := Subkey{ + PublicKey: &sub.PublicKey, + PrivateKey: sub, + } + subkey.Sig = createSignaturePacket(e.PrimaryKey, packet.SigTypeSubkeyBinding, config) + subkey.Sig.CreationTime = creationTime + subkey.Sig.KeyLifetimeSecs = &keyLifetimeSecs + subkey.Sig.FlagsValid = true + subkey.Sig.FlagEncryptStorage = true + subkey.Sig.FlagEncryptCommunications = true + + err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) + if err != nil { + return err + } + + e.Subkeys = append(e.Subkeys, subkey) + return nil +} + +// Generates a signing key +func newSigner(config *packet.Config) (signer interface{}, err error) { + switch config.PublicKeyAlgorithm() { + case packet.PubKeyAlgoRSA: + bits := config.RSAModulusBits() + if bits < 1024 { + return nil, errors.InvalidArgumentError("bits must be >= 1024") + } + if config != nil && len(config.RSAPrimes) >= 2 { + primes := config.RSAPrimes[0:2] + config.RSAPrimes = config.RSAPrimes[2:] + return generateRSAKeyWithPrimes(config.Random(), 2, bits, primes) + } + return rsa.GenerateKey(config.Random(), bits) + case packet.PubKeyAlgoEdDSA: + if config.V6() { + // Implementations MUST NOT accept or generate v6 key material + // using the deprecated OIDs. + return nil, errors.InvalidArgumentError("EdDSALegacy cannot be used for v6 keys") + } + curve := ecc.FindEdDSAByGenName(string(config.CurveName())) + if curve == nil { + return nil, errors.InvalidArgumentError("unsupported curve") + } + + priv, err := eddsa.GenerateKey(config.Random(), curve) + if err != nil { + return nil, err + } + return priv, nil + case packet.PubKeyAlgoECDSA: + curve := ecc.FindECDSAByGenName(string(config.CurveName())) + if curve == nil { + return nil, errors.InvalidArgumentError("unsupported curve") + } + + priv, err := ecdsa.GenerateKey(config.Random(), curve) + if err != nil { + return nil, err + } + return priv, nil + case packet.PubKeyAlgoEd25519: + priv, err := ed25519.GenerateKey(config.Random()) + if err != nil { + return nil, err + } + return priv, nil + case packet.PubKeyAlgoEd448: + priv, err := ed448.GenerateKey(config.Random()) + if err != nil { + return nil, err + } + return priv, nil + case packet.ExperimentalPubKeyAlgoHMAC: + hash := algorithm.HashById[hashToHashId(config.Hash())] + return symmetric.HMACGenerateKey(config.Random(), hash) + default: + return nil, errors.InvalidArgumentError("unsupported public key algorithm") + } +} + +// Generates an encryption/decryption key +func newDecrypter(config *packet.Config) (decrypter interface{}, err error) { + switch config.PublicKeyAlgorithm() { + case packet.PubKeyAlgoRSA: + bits := config.RSAModulusBits() + if bits < 1024 { + return nil, errors.InvalidArgumentError("bits must be >= 1024") + } + if config != nil && len(config.RSAPrimes) >= 2 { + primes := config.RSAPrimes[0:2] + config.RSAPrimes = config.RSAPrimes[2:] + return generateRSAKeyWithPrimes(config.Random(), 2, bits, primes) + } + return rsa.GenerateKey(config.Random(), bits) + case packet.PubKeyAlgoEdDSA, packet.PubKeyAlgoECDSA: + fallthrough // When passing EdDSA or ECDSA, we generate an ECDH subkey + case packet.PubKeyAlgoECDH: + if config.V6() && + (config.CurveName() == packet.Curve25519 || + config.CurveName() == packet.Curve448) { + // Implementations MUST NOT accept or generate v6 key material + // using the deprecated OIDs. + return nil, errors.InvalidArgumentError("ECDH with Curve25519/448 legacy cannot be used for v6 keys") + } + var kdf = ecdh.KDF{ + Hash: algorithm.SHA512, + Cipher: algorithm.AES256, + } + curve := ecc.FindECDHByGenName(string(config.CurveName())) + if curve == nil { + return nil, errors.InvalidArgumentError("unsupported curve") + } + return ecdh.GenerateKey(config.Random(), curve, kdf) + case packet.PubKeyAlgoEd25519, packet.PubKeyAlgoX25519: // When passing Ed25519, we generate an x25519 subkey + return x25519.GenerateKey(config.Random()) + case packet.PubKeyAlgoEd448, packet.PubKeyAlgoX448: // When passing Ed448, we generate an x448 subkey + return x448.GenerateKey(config.Random()) + case packet.ExperimentalPubKeyAlgoAEAD: + cipher := algorithm.CipherFunction(config.Cipher()) + return symmetric.AEADGenerateKey(config.Random(), cipher) + default: + return nil, errors.InvalidArgumentError("unsupported public key algorithm") + } +} + +var bigOne = big.NewInt(1) + +// generateRSAKeyWithPrimes generates a multi-prime RSA keypair of the +// given bit size, using the given random source and pre-populated primes. +func generateRSAKeyWithPrimes(random io.Reader, nprimes int, bits int, prepopulatedPrimes []*big.Int) (*rsa.PrivateKey, error) { + priv := new(rsa.PrivateKey) + priv.E = 65537 + + if nprimes < 2 { + return nil, goerrors.New("generateRSAKeyWithPrimes: nprimes must be >= 2") + } + + if bits < 1024 { + return nil, goerrors.New("generateRSAKeyWithPrimes: bits must be >= 1024") + } + + primes := make([]*big.Int, nprimes) + +NextSetOfPrimes: + for { + todo := bits + // crypto/rand should set the top two bits in each prime. + // Thus each prime has the form + // p_i = 2^bitlen(p_i) × 0.11... (in base 2). + // And the product is: + // P = 2^todo × α + // where α is the product of nprimes numbers of the form 0.11... + // + // If α < 1/2 (which can happen for nprimes > 2), we need to + // shift todo to compensate for lost bits: the mean value of 0.11... + // is 7/8, so todo + shift - nprimes * log2(7/8) ~= bits - 1/2 + // will give good results. + if nprimes >= 7 { + todo += (nprimes - 2) / 5 + } + for i := 0; i < nprimes; i++ { + var err error + if len(prepopulatedPrimes) == 0 { + primes[i], err = rand.Prime(random, todo/(nprimes-i)) + if err != nil { + return nil, err + } + } else { + primes[i] = prepopulatedPrimes[0] + prepopulatedPrimes = prepopulatedPrimes[1:] + } + + todo -= primes[i].BitLen() + } + + // Make sure that primes is pairwise unequal. + for i, prime := range primes { + for j := 0; j < i; j++ { + if prime.Cmp(primes[j]) == 0 { + continue NextSetOfPrimes + } + } + } + + n := new(big.Int).Set(bigOne) + totient := new(big.Int).Set(bigOne) + pminus1 := new(big.Int) + for _, prime := range primes { + n.Mul(n, prime) + pminus1.Sub(prime, bigOne) + totient.Mul(totient, pminus1) + } + if n.BitLen() != bits { + // This should never happen for nprimes == 2 because + // crypto/rand should set the top two bits in each prime. + // For nprimes > 2 we hope it does not happen often. + continue NextSetOfPrimes + } + + priv.D = new(big.Int) + e := big.NewInt(int64(priv.E)) + ok := priv.D.ModInverse(e, totient) + + if ok != nil { + priv.Primes = primes + priv.N = n + break + } + } + + priv.Precompute() + return priv, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go new file mode 100644 index 000000000..bbcc95d9b --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go @@ -0,0 +1,915 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + goerrors "errors" + "fmt" + "io" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/armor" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/packet" +) + +// PublicKeyType is the armor type for a PGP public key. +var PublicKeyType = "PGP PUBLIC KEY BLOCK" + +// PrivateKeyType is the armor type for a PGP private key. +var PrivateKeyType = "PGP PRIVATE KEY BLOCK" + +// An Entity represents the components of an OpenPGP key: a primary public key +// (which must be a signing key), one or more identities claimed by that key, +// and zero or more subkeys, which may be encryption keys. +type Entity struct { + PrimaryKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Identities map[string]*Identity // indexed by Identity.Name + Revocations []*packet.Signature + Subkeys []Subkey + SelfSignature *packet.Signature // Direct-key self signature of the PrimaryKey (contains primary key properties in v6) + Signatures []*packet.Signature // all (potentially unverified) self-signatures, revocations, and third-party signatures +} + +// An Identity represents an identity claimed by an Entity and zero or more +// assertions by other entities about that claim. +type Identity struct { + Name string // by convention, has the form "Full Name (comment) " + UserId *packet.UserId + SelfSignature *packet.Signature + Revocations []*packet.Signature + Signatures []*packet.Signature // all (potentially unverified) self-signatures, revocations, and third-party signatures +} + +// A Subkey is an additional public key in an Entity. Subkeys can be used for +// encryption. +type Subkey struct { + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Sig *packet.Signature + Revocations []*packet.Signature +} + +// A Key identifies a specific public key in an Entity. This is either the +// Entity's primary key or a subkey. +type Key struct { + Entity *Entity + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + SelfSignature *packet.Signature + Revocations []*packet.Signature +} + +// A KeyRing provides access to public and private keys. +type KeyRing interface { + // KeysById returns the set of keys that have the given key id. + KeysById(id uint64) []Key + // KeysByIdAndUsage returns the set of keys with the given id + // that also meet the key usage given by requiredUsage. + // The requiredUsage is expressed as the bitwise-OR of + // packet.KeyFlag* values. + KeysByIdUsage(id uint64, requiredUsage byte) []Key + // DecryptionKeys returns all private keys that are valid for + // decryption. + DecryptionKeys() []Key +} + +// PrimaryIdentity returns an Identity, preferring non-revoked identities, +// identities marked as primary, or the latest-created identity, in that order. +func (e *Entity) PrimaryIdentity() *Identity { + var primaryIdentity *Identity + for _, ident := range e.Identities { + if shouldPreferIdentity(primaryIdentity, ident) { + primaryIdentity = ident + } + } + return primaryIdentity +} + +func shouldPreferIdentity(existingId, potentialNewId *Identity) bool { + if existingId == nil { + return true + } + + if len(existingId.Revocations) > len(potentialNewId.Revocations) { + return true + } + + if len(existingId.Revocations) < len(potentialNewId.Revocations) { + return false + } + + if existingId.SelfSignature == nil { + return true + } + + if existingId.SelfSignature.IsPrimaryId != nil && *existingId.SelfSignature.IsPrimaryId && + !(potentialNewId.SelfSignature.IsPrimaryId != nil && *potentialNewId.SelfSignature.IsPrimaryId) { + return false + } + + if !(existingId.SelfSignature.IsPrimaryId != nil && *existingId.SelfSignature.IsPrimaryId) && + potentialNewId.SelfSignature.IsPrimaryId != nil && *potentialNewId.SelfSignature.IsPrimaryId { + return true + } + + return potentialNewId.SelfSignature.CreationTime.After(existingId.SelfSignature.CreationTime) +} + +// EncryptionKey returns the best candidate Key for encrypting a message to the +// given Entity. +func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { + // Fail to find any encryption key if the... + primarySelfSignature, primaryIdentity := e.PrimarySelfSignature() + if primarySelfSignature == nil || // no self-signature found + e.PrimaryKey.KeyExpired(primarySelfSignature, now) || // primary key has expired + e.Revoked(now) || // primary key has been revoked + primarySelfSignature.SigExpired(now) || // user ID or or direct self-signature has expired + (primaryIdentity != nil && primaryIdentity.Revoked(now)) { // user ID has been revoked (for v4 keys) + return Key{}, false + } + + // Iterate the keys to find the newest, unexpired one + candidateSubkey := -1 + var maxTime time.Time + for i, subkey := range e.Subkeys { + if subkey.Sig.FlagsValid && + subkey.Sig.FlagEncryptCommunications && + subkey.PublicKey.PubKeyAlgo.CanEncrypt() && + !subkey.PublicKey.KeyExpired(subkey.Sig, now) && + !subkey.Sig.SigExpired(now) && + !subkey.Revoked(now) && + (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { + candidateSubkey = i + maxTime = subkey.Sig.CreationTime + } + } + + if candidateSubkey != -1 { + subkey := e.Subkeys[candidateSubkey] + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Revocations}, true + } + + // If we don't have any subkeys for encryption and the primary key + // is marked as OK to encrypt with, then we can use it. + if primarySelfSignature.FlagsValid && primarySelfSignature.FlagEncryptCommunications && + e.PrimaryKey.PubKeyAlgo.CanEncrypt() { + return Key{e, e.PrimaryKey, e.PrivateKey, primarySelfSignature, e.Revocations}, true + } + + return Key{}, false +} + +// CertificationKey return the best candidate Key for certifying a key with this +// Entity. +func (e *Entity) CertificationKey(now time.Time) (Key, bool) { + return e.CertificationKeyById(now, 0) +} + +// CertificationKeyById return the Key for key certification with this +// Entity and keyID. +func (e *Entity) CertificationKeyById(now time.Time, id uint64) (Key, bool) { + return e.signingKeyByIdUsage(now, id, packet.KeyFlagCertify) +} + +// SigningKey return the best candidate Key for signing a message with this +// Entity. +func (e *Entity) SigningKey(now time.Time) (Key, bool) { + return e.SigningKeyById(now, 0) +} + +// SigningKeyById return the Key for signing a message with this +// Entity and keyID. +func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) { + return e.signingKeyByIdUsage(now, id, packet.KeyFlagSign) +} + +func (e *Entity) signingKeyByIdUsage(now time.Time, id uint64, flags int) (Key, bool) { + // Fail to find any signing key if the... + primarySelfSignature, primaryIdentity := e.PrimarySelfSignature() + if primarySelfSignature == nil || // no self-signature found + e.PrimaryKey.KeyExpired(primarySelfSignature, now) || // primary key has expired + e.Revoked(now) || // primary key has been revoked + primarySelfSignature.SigExpired(now) || // user ID or direct self-signature has expired + (primaryIdentity != nil && primaryIdentity.Revoked(now)) { // user ID has been revoked (for v4 keys) + return Key{}, false + } + + // Iterate the keys to find the newest, unexpired one + candidateSubkey := -1 + var maxTime time.Time + for idx, subkey := range e.Subkeys { + if subkey.Sig.FlagsValid && + (flags&packet.KeyFlagCertify == 0 || subkey.Sig.FlagCertify) && + (flags&packet.KeyFlagSign == 0 || subkey.Sig.FlagSign) && + subkey.PublicKey.PubKeyAlgo.CanSign() && + !subkey.PublicKey.KeyExpired(subkey.Sig, now) && + !subkey.Sig.SigExpired(now) && + !subkey.Revoked(now) && + (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) && + (id == 0 || subkey.PublicKey.KeyId == id) { + candidateSubkey = idx + maxTime = subkey.Sig.CreationTime + } + } + + if candidateSubkey != -1 { + subkey := e.Subkeys[candidateSubkey] + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Revocations}, true + } + + // If we don't have any subkeys for signing and the primary key + // is marked as OK to sign with, then we can use it. + if primarySelfSignature.FlagsValid && + (flags&packet.KeyFlagCertify == 0 || primarySelfSignature.FlagCertify) && + (flags&packet.KeyFlagSign == 0 || primarySelfSignature.FlagSign) && + e.PrimaryKey.PubKeyAlgo.CanSign() && + (id == 0 || e.PrimaryKey.KeyId == id) { + return Key{e, e.PrimaryKey, e.PrivateKey, primarySelfSignature, e.Revocations}, true + } + + // No keys with a valid Signing Flag or no keys matched the id passed in + return Key{}, false +} + +func revoked(revocations []*packet.Signature, now time.Time) bool { + for _, revocation := range revocations { + if revocation.RevocationReason != nil && *revocation.RevocationReason == packet.KeyCompromised { + // If the key is compromised, the key is considered revoked even before the revocation date. + return true + } + if !revocation.SigExpired(now) { + return true + } + } + return false +} + +// Revoked returns whether the entity has any direct key revocation signatures. +// Note that third-party revocation signatures are not supported. +// Note also that Identity and Subkey revocation should be checked separately. +func (e *Entity) Revoked(now time.Time) bool { + return revoked(e.Revocations, now) +} + +// EncryptPrivateKeys encrypts all non-encrypted keys in the entity with the same key +// derived from the provided passphrase. Public keys and dummy keys are ignored, +// and don't cause an error to be returned. +func (e *Entity) EncryptPrivateKeys(passphrase []byte, config *packet.Config) error { + var keysToEncrypt []*packet.PrivateKey + // Add entity private key to encrypt. + if e.PrivateKey != nil && !e.PrivateKey.Dummy() && !e.PrivateKey.Encrypted { + keysToEncrypt = append(keysToEncrypt, e.PrivateKey) + } + + // Add subkeys to encrypt. + for _, sub := range e.Subkeys { + if sub.PrivateKey != nil && !sub.PrivateKey.Dummy() && !sub.PrivateKey.Encrypted { + keysToEncrypt = append(keysToEncrypt, sub.PrivateKey) + } + } + return packet.EncryptPrivateKeys(keysToEncrypt, passphrase, config) +} + +// DecryptPrivateKeys decrypts all encrypted keys in the entity with the given passphrase. +// Avoids recomputation of similar s2k key derivations. Public keys and dummy keys are ignored, +// and don't cause an error to be returned. +func (e *Entity) DecryptPrivateKeys(passphrase []byte) error { + var keysToDecrypt []*packet.PrivateKey + // Add entity private key to decrypt. + if e.PrivateKey != nil && !e.PrivateKey.Dummy() && e.PrivateKey.Encrypted { + keysToDecrypt = append(keysToDecrypt, e.PrivateKey) + } + + // Add subkeys to decrypt. + for _, sub := range e.Subkeys { + if sub.PrivateKey != nil && !sub.PrivateKey.Dummy() && sub.PrivateKey.Encrypted { + keysToDecrypt = append(keysToDecrypt, sub.PrivateKey) + } + } + return packet.DecryptPrivateKeys(keysToDecrypt, passphrase) +} + +// Revoked returns whether the identity has been revoked by a self-signature. +// Note that third-party revocation signatures are not supported. +func (i *Identity) Revoked(now time.Time) bool { + return revoked(i.Revocations, now) +} + +// Revoked returns whether the subkey has been revoked by a self-signature. +// Note that third-party revocation signatures are not supported. +func (s *Subkey) Revoked(now time.Time) bool { + return revoked(s.Revocations, now) +} + +// Revoked returns whether the key or subkey has been revoked by a self-signature. +// Note that third-party revocation signatures are not supported. +// Note also that Identity revocation should be checked separately. +// Normally, it's not necessary to call this function, except on keys returned by +// KeysById or KeysByIdUsage. +func (key *Key) Revoked(now time.Time) bool { + return revoked(key.Revocations, now) +} + +// An EntityList contains one or more Entities. +type EntityList []*Entity + +// KeysById returns the set of keys that have the given key id. +func (el EntityList) KeysById(id uint64) (keys []Key) { + for _, e := range el { + if e.PrimaryKey.KeyId == id { + selfSig, _ := e.PrimarySelfSignature() + keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig, e.Revocations}) + } + + for _, subKey := range e.Subkeys { + if subKey.PublicKey.KeyId == id { + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Revocations}) + } + } + } + return +} + +// KeysByIdAndUsage returns the set of keys with the given id that also meet +// the key usage given by requiredUsage. The requiredUsage is expressed as +// the bitwise-OR of packet.KeyFlag* values. +func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { + for _, key := range el.KeysById(id) { + if requiredUsage != 0 { + if key.SelfSignature == nil || !key.SelfSignature.FlagsValid { + continue + } + + var usage byte + if key.SelfSignature.FlagCertify { + usage |= packet.KeyFlagCertify + } + if key.SelfSignature.FlagSign { + usage |= packet.KeyFlagSign + } + if key.SelfSignature.FlagEncryptCommunications { + usage |= packet.KeyFlagEncryptCommunications + } + if key.SelfSignature.FlagEncryptStorage { + usage |= packet.KeyFlagEncryptStorage + } + if usage&requiredUsage != requiredUsage { + continue + } + } + + keys = append(keys, key) + } + return +} + +// DecryptionKeys returns all private keys that are valid for decryption. +func (el EntityList) DecryptionKeys() (keys []Key) { + for _, e := range el { + for _, subKey := range e.Subkeys { + if subKey.PrivateKey != nil && subKey.Sig.FlagsValid && (subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications || subKey.Sig.FlagForward) { + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Revocations}) + } + } + } + return +} + +// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file. +func ReadArmoredKeyRing(r io.Reader) (EntityList, error) { + block, err := armor.Decode(r) + if err == io.EOF { + return nil, errors.InvalidArgumentError("no armored data found") + } + if err != nil { + return nil, err + } + if block.Type != PublicKeyType && block.Type != PrivateKeyType { + return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type) + } + + return ReadKeyRing(block.Body) +} + +// ReadKeyRing reads one or more public/private keys. Unsupported keys are +// ignored as long as at least a single valid key is found. +func ReadKeyRing(r io.Reader) (el EntityList, err error) { + packets := packet.NewReader(r) + var lastUnsupportedError error + + for { + var e *Entity + e, err = ReadEntity(packets) + if err != nil { + // TODO: warn about skipped unsupported/unreadable keys + if _, ok := err.(errors.UnsupportedError); ok { + lastUnsupportedError = err + err = readToNextPublicKey(packets) + } else if _, ok := err.(errors.StructuralError); ok { + // Skip unreadable, badly-formatted keys + lastUnsupportedError = err + err = readToNextPublicKey(packets) + } + if err == io.EOF { + err = nil + break + } + if err != nil { + el = nil + break + } + } else { + el = append(el, e) + } + } + + if len(el) == 0 && err == nil { + err = lastUnsupportedError + } + return +} + +// readToNextPublicKey reads packets until the start of the entity and leaves +// the first packet of the new entity in the Reader. +func readToNextPublicKey(packets *packet.Reader) (err error) { + var p packet.Packet + for { + p, err = packets.Next() + if err == io.EOF { + return + } else if err != nil { + if _, ok := err.(errors.UnsupportedError); ok { + continue + } + return + } + + if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey { + packets.Unread(p) + return + } + } +} + +// ReadEntity reads an entity (public key, identities, subkeys etc) from the +// given Reader. +func ReadEntity(packets *packet.Reader) (*Entity, error) { + e := new(Entity) + e.Identities = make(map[string]*Identity) + + p, err := packets.Next() + if err != nil { + return nil, err + } + + var ok bool + if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok { + if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok { + packets.Unread(p) + return nil, errors.StructuralError("first packet was not a public/private key") + } + e.PrimaryKey = &e.PrivateKey.PublicKey + } + + if !e.PrimaryKey.PubKeyAlgo.CanSign() { + return nil, errors.StructuralError("primary key cannot be used for signatures") + } + + var revocations []*packet.Signature + var directSignatures []*packet.Signature +EachPacket: + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + + switch pkt := p.(type) { + case *packet.UserId: + if err := addUserID(e, packets, pkt); err != nil { + return nil, err + } + case *packet.Signature: + if pkt.SigType == packet.SigTypeKeyRevocation { + revocations = append(revocations, pkt) + } else if pkt.SigType == packet.SigTypeDirectSignature { + directSignatures = append(directSignatures, pkt) + } + // Else, ignoring the signature as it does not follow anything + // we would know to attach it to. + case *packet.PrivateKey: + if !pkt.IsSubkey { + packets.Unread(p) + break EachPacket + } + err = addSubkey(e, packets, &pkt.PublicKey, pkt) + if err != nil { + return nil, err + } + case *packet.PublicKey: + if !pkt.IsSubkey { + packets.Unread(p) + break EachPacket + } + err = addSubkey(e, packets, pkt, nil) + if err != nil { + return nil, err + } + default: + // we ignore unknown packets. + } + } + + if len(e.Identities) == 0 && e.PrimaryKey.Version < 6 { + return nil, errors.StructuralError(fmt.Sprintf("v%d entity without any identities", e.PrimaryKey.Version)) + } + + // An implementation MUST ensure that a valid direct-key signature is present before using a v6 key. + if e.PrimaryKey.Version == 6 { + if len(directSignatures) == 0 { + return nil, errors.StructuralError("v6 entity without a valid direct-key signature") + } + // Select main direct key signature. + var mainDirectKeySelfSignature *packet.Signature + for _, directSignature := range directSignatures { + if directSignature.SigType == packet.SigTypeDirectSignature && + directSignature.CheckKeyIdOrFingerprint(e.PrimaryKey) && + (mainDirectKeySelfSignature == nil || + directSignature.CreationTime.After(mainDirectKeySelfSignature.CreationTime)) { + mainDirectKeySelfSignature = directSignature + } + } + if mainDirectKeySelfSignature == nil { + return nil, errors.StructuralError("no valid direct-key self-signature for v6 primary key found") + } + // Check that the main self-signature is valid. + err = e.PrimaryKey.VerifyDirectKeySignature(mainDirectKeySelfSignature) + if err != nil { + return nil, errors.StructuralError("invalid direct-key self-signature for v6 primary key") + } + e.SelfSignature = mainDirectKeySelfSignature + e.Signatures = directSignatures + } + + for _, revocation := range revocations { + err = e.PrimaryKey.VerifyRevocationSignature(revocation) + if err == nil { + e.Revocations = append(e.Revocations, revocation) + } else { + // TODO: RFC 4880 5.2.3.15 defines revocation keys. + return nil, errors.StructuralError("revocation signature signed by alternate key") + } + } + + return e, nil +} + +func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error { + // Make a new Identity object, that we might wind up throwing away. + // We'll only add it if we get a valid self-signature over this + // userID. + identity := new(Identity) + identity.Name = pkt.Id + identity.UserId = pkt + + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return err + } + + sig, ok := p.(*packet.Signature) + if !ok { + packets.Unread(p) + break + } + + if sig.SigType != packet.SigTypeGenericCert && + sig.SigType != packet.SigTypePersonaCert && + sig.SigType != packet.SigTypeCasualCert && + sig.SigType != packet.SigTypePositiveCert && + sig.SigType != packet.SigTypeCertificationRevocation { + return errors.StructuralError("user ID signature with wrong type") + } + + if sig.CheckKeyIdOrFingerprint(e.PrimaryKey) { + if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil { + return errors.StructuralError("user ID self-signature invalid: " + err.Error()) + } + if sig.SigType == packet.SigTypeCertificationRevocation { + identity.Revocations = append(identity.Revocations, sig) + } else if identity.SelfSignature == nil || sig.CreationTime.After(identity.SelfSignature.CreationTime) { + identity.SelfSignature = sig + } + identity.Signatures = append(identity.Signatures, sig) + e.Identities[pkt.Id] = identity + } else { + identity.Signatures = append(identity.Signatures, sig) + } + } + + return nil +} + +func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error { + var subKey Subkey + subKey.PublicKey = pub + subKey.PrivateKey = priv + + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return errors.StructuralError("subkey signature invalid: " + err.Error()) + } + + sig, ok := p.(*packet.Signature) + if !ok { + packets.Unread(p) + break + } + + if sig.SigType != packet.SigTypeSubkeyBinding && sig.SigType != packet.SigTypeSubkeyRevocation { + return errors.StructuralError("subkey signature with wrong type") + } + + if err := e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, sig); err != nil { + return errors.StructuralError("subkey signature invalid: " + err.Error()) + } + + switch sig.SigType { + case packet.SigTypeSubkeyRevocation: + subKey.Revocations = append(subKey.Revocations, sig) + case packet.SigTypeSubkeyBinding: + if subKey.Sig == nil || sig.CreationTime.After(subKey.Sig.CreationTime) { + subKey.Sig = sig + } + } + } + + if subKey.Sig == nil { + return errors.StructuralError("subkey packet not followed by signature") + } + + e.Subkeys = append(e.Subkeys, subKey) + + return nil +} + +// SerializePrivate serializes an Entity, including private key material, but +// excluding signatures from other entities, to the given Writer. +// Identities and subkeys are re-signed in case they changed since NewEntry. +// If config is nil, sensible defaults will be used. +func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) { + if e.PrivateKey.Dummy() { + return errors.ErrDummyPrivateKey("dummy private key cannot re-sign identities") + } + return e.serializePrivate(w, config, true) +} + +// SerializePrivateWithoutSigning serializes an Entity, including private key +// material, but excluding signatures from other entities, to the given Writer. +// Self-signatures of identities and subkeys are not re-signed. This is useful +// when serializing GNU dummy keys, among other things. +// If config is nil, sensible defaults will be used. +func (e *Entity) SerializePrivateWithoutSigning(w io.Writer, config *packet.Config) (err error) { + return e.serializePrivate(w, config, false) +} + +func (e *Entity) serializePrivate(w io.Writer, config *packet.Config, reSign bool) (err error) { + if e.PrivateKey == nil { + return goerrors.New("openpgp: private key is missing") + } + err = e.PrivateKey.Serialize(w) + if err != nil { + return + } + for _, revocation := range e.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } + for _, directSignature := range e.Signatures { + err := directSignature.Serialize(w) + if err != nil { + return err + } + } + for _, ident := range e.Identities { + err = ident.UserId.Serialize(w) + if err != nil { + return + } + if reSign { + if ident.SelfSignature == nil { + return goerrors.New("openpgp: can't re-sign identity without valid self-signature") + } + err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) + if err != nil { + return + } + } + for _, sig := range ident.Signatures { + err = sig.Serialize(w) + if err != nil { + return err + } + } + } + for _, subkey := range e.Subkeys { + err = subkey.PrivateKey.Serialize(w) + if err != nil { + return + } + if reSign { + err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) + if err != nil { + return + } + if subkey.Sig.EmbeddedSignature != nil { + err = subkey.Sig.EmbeddedSignature.CrossSignKey(subkey.PublicKey, e.PrimaryKey, + subkey.PrivateKey, config) + if err != nil { + return + } + } + } + for _, revocation := range subkey.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } + err = subkey.Sig.Serialize(w) + if err != nil { + return + } + } + return nil +} + +// Serialize writes the public part of the given Entity to w, including +// signatures from other entities. No private key material will be output. +func (e *Entity) Serialize(w io.Writer) error { + if e.PrimaryKey.PubKeyAlgo == packet.ExperimentalPubKeyAlgoHMAC || + e.PrimaryKey.PubKeyAlgo == packet.ExperimentalPubKeyAlgoAEAD { + return errors.InvalidArgumentError("Can't serialize symmetric primary key") + } + err := e.PrimaryKey.Serialize(w) + if err != nil { + return err + } + for _, revocation := range e.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } + for _, directSignature := range e.Signatures { + err := directSignature.Serialize(w) + if err != nil { + return err + } + } + for _, ident := range e.Identities { + err = ident.UserId.Serialize(w) + if err != nil { + return err + } + for _, sig := range ident.Signatures { + err = sig.Serialize(w) + if err != nil { + return err + } + } + } + for _, subkey := range e.Subkeys { + // The types of keys below are only useful as private keys. Thus, the + // public key packets contain no meaningful information and do not need + // to be serialized. + // Prevent public key export for forwarding keys, see forwarding section 4.1. + if subkey.PublicKey.PubKeyAlgo == packet.ExperimentalPubKeyAlgoHMAC || + subkey.PublicKey.PubKeyAlgo == packet.ExperimentalPubKeyAlgoAEAD || + subkey.Sig.FlagForward { + continue + } + + err = subkey.PublicKey.Serialize(w) + if err != nil { + return err + } + for _, revocation := range subkey.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } + err = subkey.Sig.Serialize(w) + if err != nil { + return err + } + } + return nil +} + +// SignIdentity adds a signature to e, from signer, attesting that identity is +// associated with e. The provided identity must already be an element of +// e.Identities and the private key of signer must have been decrypted if +// necessary. +// If config is nil, sensible defaults will be used. +func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error { + certificationKey, ok := signer.CertificationKey(config.Now()) + if !ok { + return errors.InvalidArgumentError("no valid certification key found") + } + + if certificationKey.PrivateKey.Encrypted { + return errors.InvalidArgumentError("signing Entity's private key must be decrypted") + } + + ident, ok := e.Identities[identity] + if !ok { + return errors.InvalidArgumentError("given identity string not found in Entity") + } + + sig := createSignaturePacket(certificationKey.PublicKey, packet.SigTypeGenericCert, config) + + signingUserID := config.SigningUserId() + if signingUserID != "" { + if _, ok := signer.Identities[signingUserID]; !ok { + return errors.InvalidArgumentError("signer identity string not found in signer Entity") + } + sig.SignerUserId = &signingUserID + } + + if err := sig.SignUserId(identity, e.PrimaryKey, certificationKey.PrivateKey, config); err != nil { + return err + } + ident.Signatures = append(ident.Signatures, sig) + return nil +} + +// RevokeKey generates a key revocation signature (packet.SigTypeKeyRevocation) with the +// specified reason code and text (RFC4880 section-5.2.3.23). +// If config is nil, sensible defaults will be used. +func (e *Entity) RevokeKey(reason packet.ReasonForRevocation, reasonText string, config *packet.Config) error { + revSig := createSignaturePacket(e.PrimaryKey, packet.SigTypeKeyRevocation, config) + revSig.RevocationReason = &reason + revSig.RevocationReasonText = reasonText + + if err := revSig.RevokeKey(e.PrimaryKey, e.PrivateKey, config); err != nil { + return err + } + e.Revocations = append(e.Revocations, revSig) + return nil +} + +// RevokeSubkey generates a subkey revocation signature (packet.SigTypeSubkeyRevocation) for +// a subkey with the specified reason code and text (RFC4880 section-5.2.3.23). +// If config is nil, sensible defaults will be used. +func (e *Entity) RevokeSubkey(sk *Subkey, reason packet.ReasonForRevocation, reasonText string, config *packet.Config) error { + if err := e.PrimaryKey.VerifyKeySignature(sk.PublicKey, sk.Sig); err != nil { + return errors.InvalidArgumentError("given subkey is not associated with this key") + } + + revSig := createSignaturePacket(e.PrimaryKey, packet.SigTypeSubkeyRevocation, config) + revSig.RevocationReason = &reason + revSig.RevocationReasonText = reasonText + + if err := revSig.RevokeSubkey(sk.PublicKey, e.PrivateKey, config); err != nil { + return err + } + + sk.Revocations = append(sk.Revocations, revSig) + return nil +} + +func (e *Entity) primaryDirectSignature() *packet.Signature { + return e.SelfSignature +} + +// PrimarySelfSignature searches the entity for the self-signature that stores key preferences. +// For V4 keys, returns the self-signature of the primary identity, and the identity. +// For V6 keys, returns the latest valid direct-key self-signature, and no identity (nil). +// This self-signature is to be used to check the key expiration, +// algorithm preferences, and so on. +func (e *Entity) PrimarySelfSignature() (*packet.Signature, *Identity) { + if e.PrimaryKey.Version == 6 { + return e.primaryDirectSignature(), nil + } + primaryIdentity := e.PrimaryIdentity() + if primaryIdentity == nil { + return nil, nil + } + return primaryIdentity.SelfSignature, primaryIdentity +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go new file mode 100644 index 000000000..108fd096f --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go @@ -0,0 +1,538 @@ +package openpgp + +const expiringKeyHex = "c6c04d0451d0c680010800abbb021fd03ffc4e96618901180c3fdcb060ee69eeead97b91256d11420d80b5f1b51930248044130bd300605cf8a05b7a40d3d8cfb0a910be2e3db50dcd50a9c54064c2a5550801daa834ff4480b33d3d3ca495ff8a4e84a886977d17d998f881241a874083d8b995beab555b6d22b8a4817ab17ac3e7304f7d4d2c05c495fb2218348d3bc13651db1d92732e368a9dd7dcefa6eddff30b94706a9aaee47e9d39321460b740c59c6fc3c2fd8ab6c0fb868cb87c0051f0321301fe0f0e1820b15e7fb7063395769b525005c7e30a7ce85984f5cac00504e7b4fdc45d74958de8388436fd5c7ba9ea121f1c851b5911dd1b47a14d81a09e92ef37721e2325b6790011010001cd00c2c07b041001080025050251d0c680050900278d00060b09070803020415080a0203160201021901021b03021e01000a0910e7b484133a890a35ae4b0800a1beb82e7f28eaf5273d6af9d3391314f6280b2b624eaca2851f89a9ebcaf80ac589ebd509f168bc4322106ca2e2ce77a76e071a3c7444787d65216b5f05e82c77928860b92aace3b7d0327db59492f422eb9dfab7249266d37429870b091a98aba8724c2259ebf8f85093f21255eafa75aa841e31d94f2ac891b9755fed455e539044ee69fc47950b80e003fc9f298d695660f28329eaa38037c367efde1727458e514faf990d439a21461b719edaddf9296d3d0647b43ca56cb8dbf63b4fcf8b9968e7928c463470fab3b98e44d0d95645062f94b2d04fe56bd52822b71934db8ce845622c40b92fcbe765a142e7f38b61a6aa9606c8e8858dcd3b6eb1894acec04d0451d1f06b01080088bea67444e1789390e7c0335c86775502d58ec783d99c8ef4e06de235ed3dd4b0467f6f358d818c7d8989d43ec6d69fcbc8c32632d5a1b605e3fa8e41d695fcdcaa535936cd0157f9040dce362519803b908eafe838bb13216c885c6f93e9e8d5745607f0d062322085d6bdc760969149a8ff8dd9f5c18d9bfe2e6f63a06e17694cf1f67587c6fb70e9aebf90ffc528ca3b615ac7c9d4a21ea4f7c06f2e98fbbd90a859b8608bf9ea638e3a54289ce44c283110d0c45fa458de6251cd6e7baf71f80f12c8978340490fd90c92b81736ae902ed958e478dceae2835953d189c45d182aff02ea2be61b81d8e94430f041d638647b43e2fcb45fd512fbf5068b810011010001c2c06504180108000f050251d1f06b050900081095021b0c000a0910e7b484133a890a35e63407fe2ec88d6d1e6c9ce7553ece0cb2524747217bad29f251d33df84599ffcc900141a355abd62126800744068a5e05dc167056aa9205273dc7765a2ed49db15c2a83b8d6e6429c902136f1e12229086c1c10c0053242c2a4ae1930db58163387a48cad64607ff2153c320e42843dec28e3fce90e7399d63ac0affa2fee1f0adc0953c89eb3f46ef1d6c04328ed13b491669d5120a3782e3ffb7c69575fb77eebd108794f4dda9d34be2bae57e8e59ec8ebfda2f6f06104b2321be408ea146e2db482b00c5055c8618de36ac9716f80da2617e225556d0fce61b01c8cea2d1e0ea982c31711060ca370f2739366e1e708f38405d784b49d16a26cf62d152eae734327cec04d0451d1f07b010800d5af91c5e7c2fd8951c8d254eab0c97cdcb66822f868b79b78c366255059a68fd74ebca9adb9b970cd9e586690e6e0756705432306878c897b10a4b4ca0005966f99ac8fa4e6f9caf54bf8e53844544beee9872a7ac64c119cf1393d96e674254b661f61ee975633d0e8a8672531edb6bb8e211204e7754a9efa802342118eee850beea742bac95a3f706cc2024cf6037a308bb68162b2f53b9a6346a96e6d31871a2456186e24a1c7a82b82ac04afdfd57cd7fb9ba77a9c760d40b76a170f7be525e5fb6a9848cc726e806187710d9b190387df28700f321f988a392899f93815cc937f309129eb94d5299c5547cb2c085898e6639496e70d746c9d3fb9881d0011010001c2c06504180108000f050251d1f07b050900266305021b0c000a0910e7b484133a890a35bff207fd10dfe8c4a6ea1dd30568012b6fd6891a763c87ad0f7a1d112aad9e8e3239378a3b85588c235865bac2e614348cb4f216d7217f53b3ef48c192e0a4d31d64d7bfa5faccf21155965fa156e887056db644a05ad08a85cc6152d1377d9e37b46f4ff462bbe68ace2dc586ef90070314576c985d8037c2ba63f0a7dc17a62e15bd77e88bc61d9d00858979709f12304264a4cf4225c5cf86f12c8e19486cb9cdcc69f18f027e5f16f4ca8b50e28b3115eaff3a345acd21f624aef81f6ede515c1b55b26b84c1e32264754eab672d5489b287e7277ea855e0a5ff2aa9e8b8c76d579a964ec225255f4d57bf66639ccb34b64798846943e162a41096a7002ca21c7f56" +const subkeyUsageHex = "988d04533a52bc010400d26af43085558f65b9e7dbc90cb9238015259aed5e954637adcfa2181548b2d0b60c65f1f42ec5081cbf1bc0a8aa4900acfb77070837c58f26012fbce297d70afe96e759ad63531f0037538e70dbf8e384569b9720d99d8eb39d8d0a2947233ed242436cb6ac7dfe74123354b3d0119b5c235d3dd9c9d6c004f8ffaf67ad8583001101000188b7041f010200210502533b8552170c8001ce094aa433f7040bb2ddf0be3893cb843d0fe70c020700000a0910a42704b92866382aa98404009d63d916a27543da4221c60087c33f1c44bec9998c5438018ed370cca4962876c748e94b73eb39c58eb698063f3fd6346d58dd2a11c0247934c4a9d71f24754f7468f96fb24c3e791dd2392b62f626148ad724189498cbf993db2df7c0cdc2d677c35da0f16cb16c9ce7c33b4de65a4a91b1d21a130ae9cc26067718910ef8e2b417556d627261203c756d627261407379642e65642e61753e88b80413010200220502533a52bc021b03060b090807030206150802090a0b0416020301021e01021780000a0910a42704b92866382a47840400c0c2bd04f5fca586de408b395b3c280a278259c93eaaa8b79a53b97003f8ed502a8a00446dd9947fb462677e4fcac0dac2f0701847d15130aadb6cd9e0705ea0cf5f92f129136c7be21a718d46c8e641eb7f044f2adae573e11ae423a0a9ca51324f03a8a2f34b91fa40c3cc764bee4dccadedb54c768ba0469b683ea53f1c29b88d04533a52bc01040099c92a5d6f8b744224da27bc2369127c35269b58bec179de6bbc038f749344222f85a31933224f26b70243c4e4b2d242f0c4777eaef7b5502f9dad6d8bf3aaeb471210674b74de2d7078af497d55f5cdad97c7bedfbc1b41e8065a97c9c3d344b21fc81d27723af8e374bc595da26ea242dccb6ae497be26eea57e563ed517e90011010001889f0418010200090502533a52bc021b0c000a0910a42704b92866382afa1403ff70284c2de8a043ff51d8d29772602fa98009b7861c540535f874f2c230af8caf5638151a636b21f8255003997ccd29747fdd06777bb24f9593bd7d98a3e887689bf902f999915fcc94625ae487e5d13e6616f89090ebc4fdc7eb5cad8943e4056995bb61c6af37f8043016876a958ec7ebf39c43d20d53b7f546cfa83e8d2604b88d04533b8283010400c0b529316dbdf58b4c54461e7e669dc11c09eb7f73819f178ccd4177b9182b91d138605fcf1e463262fabefa73f94a52b5e15d1904635541c7ea540f07050ce0fb51b73e6f88644cec86e91107c957a114f69554548a85295d2b70bd0b203992f76eb5d493d86d9eabcaa7ef3fc7db7e458438db3fcdb0ca1cc97c638439a9170011010001889f0418010200090502533b8283021b0c000a0910a42704b92866382adc6d0400cfff6258485a21675adb7a811c3e19ebca18851533f75a7ba317950b9997fda8d1a4c8c76505c08c04b6c2cc31dc704d33da36a21273f2b388a1a706f7c3378b66d887197a525936ed9a69acb57fe7f718133da85ec742001c5d1864e9c6c8ea1b94f1c3759cebfd93b18606066c063a63be86085b7e37bdbc65f9a915bf084bb901a204533b85cd110400aed3d2c52af2b38b5b67904b0ef73d6dd7aef86adb770e2b153cd22489654dcc91730892087bb9856ae2d9f7ed1eb48f214243fe86bfe87b349ebd7c30e630e49c07b21fdabf78b7a95c8b7f969e97e3d33f2e074c63552ba64a2ded7badc05ce0ea2be6d53485f6900c7860c7aa76560376ce963d7271b9b54638a4028b573f00a0d8854bfcdb04986141568046202192263b9b67350400aaa1049dbc7943141ef590a70dcb028d730371d92ea4863de715f7f0f16d168bd3dc266c2450457d46dcbbf0b071547e5fbee7700a820c3750b236335d8d5848adb3c0da010e998908dfd93d961480084f3aea20b247034f8988eccb5546efaa35a92d0451df3aaf1aee5aa36a4c4d462c760ecd9cebcabfbe1412b1f21450f203fd126687cd486496e971a87fd9e1a8a765fe654baa219a6871ab97768596ab05c26c1aeea8f1a2c72395a58dbc12ef9640d2b95784e974a4d2d5a9b17c25fedacfe551bda52602de8f6d2e48443f5dd1a2a2a8e6a5e70ecdb88cd6e766ad9745c7ee91d78cc55c3d06536b49c3fee6c3d0b6ff0fb2bf13a314f57c953b8f4d93bf88e70418010200090502533b85cd021b0200520910a42704b92866382a47200419110200060502533b85cd000a091042ce2c64bc0ba99214b2009e26b26852c8b13b10c35768e40e78fbbb48bd084100a0c79d9ea0844fa5853dd3c85ff3ecae6f2c9dd6c557aa04008bbbc964cd65b9b8299d4ebf31f41cc7264b8cf33a00e82c5af022331fac79efc9563a822497ba012953cefe2629f1242fcdcb911dbb2315985bab060bfd58261ace3c654bdbbe2e8ed27a46e836490145c86dc7bae15c011f7e1ffc33730109b9338cd9f483e7cef3d2f396aab5bd80efb6646d7e778270ee99d934d187dd98" +const revokedKeyHex = "988d045331ce82010400c4fdf7b40a5477f206e6ee278eaef888ca73bf9128a9eef9f2f1ddb8b7b71a4c07cfa241f028a04edb405e4d916c61d6beabc333813dc7b484d2b3c52ee233c6a79b1eea4e9cc51596ba9cd5ac5aeb9df62d86ea051055b79d03f8a4fa9f38386f5bd17529138f3325d46801514ea9047977e0829ed728e68636802796801be10011010001889f04200102000905025331d0e3021d03000a0910a401d9f09a34f7c042aa040086631196405b7e6af71026b88e98012eab44aa9849f6ef3fa930c7c9f23deaedba9db1538830f8652fb7648ec3fcade8dbcbf9eaf428e83c6cbcc272201bfe2fbb90d41963397a7c0637a1a9d9448ce695d9790db2dc95433ad7be19eb3de72dacf1d6db82c3644c13eae2a3d072b99bb341debba012c5ce4006a7d34a1f4b94b444526567205265766f6b657220283c52656727732022424d204261726973746122204b657920262530305c303e5c29203c72656740626d626172697374612e636f2e61753e88b704130102002205025331ce82021b03060b090807030206150802090a0b0416020301021e01021780000a0910a401d9f09a34f7c0019c03f75edfbeb6a73e7225ad3cc52724e2872e04260d7daf0d693c170d8c4b243b8767bc7785763533febc62ec2600c30603c433c095453ede59ff2fcabeb84ce32e0ed9d5cf15ffcbc816202b64370d4d77c1e9077d74e94a16fb4fa2e5bec23a56d7a73cf275f91691ae1801a976fcde09e981a2f6327ac27ea1fecf3185df0d56889c04100102000605025331cfb5000a0910fe9645554e8266b64b4303fc084075396674fb6f778d302ac07cef6bc0b5d07b66b2004c44aef711cbac79617ef06d836b4957522d8772dd94bf41a2f4ac8b1ee6d70c57503f837445a74765a076d07b829b8111fc2a918423ddb817ead7ca2a613ef0bfb9c6b3562aec6c3cf3c75ef3031d81d95f6563e4cdcc9960bcb386c5d757b104fcca5fe11fc709df884604101102000605025331cfe7000a09107b15a67f0b3ddc0317f6009e360beea58f29c1d963a22b962b80788c3fa6c84e009d148cfde6b351469b8eae91187eff07ad9d08fcaab88d045331ce820104009f25e20a42b904f3fa555530fe5c46737cf7bd076c35a2a0d22b11f7e0b61a69320b768f4a80fe13980ce380d1cfc4a0cd8fbe2d2e2ef85416668b77208baa65bf973fe8e500e78cc310d7c8705cdb34328bf80e24f0385fce5845c33bc7943cf6b11b02348a23da0bf6428e57c05135f2dc6bd7c1ce325d666d5a5fd2fd5e410011010001889f04180102000905025331ce82021b0c000a0910a401d9f09a34f7c0418003fe34feafcbeaef348a800a0d908a7a6809cc7304017d820f70f0474d5e23cb17e38b67dc6dca282c6ca00961f4ec9edf2738d0f087b1d81e4871ef08e1798010863afb4eac4c44a376cb343be929c5be66a78cfd4456ae9ec6a99d97f4e1c3ff3583351db2147a65c0acef5c003fb544ab3a2e2dc4d43646f58b811a6c3a369d1f" +const revokedSubkeyHex = "988d04533121f6010400aefc803a3e4bb1a61c86e8a86d2726c6a43e0079e9f2713f1fa017e9854c83877f4aced8e331d675c67ea83ddab80aacbfa0b9040bb12d96f5a3d6be09455e2a76546cbd21677537db941cab710216b6d24ec277ee0bd65b910f416737ed120f6b93a9d3b306245c8cfd8394606fdb462e5cf43c551438d2864506c63367fc890011010001b41d416c696365203c616c69636540626d626172697374612e636f2e61753e88bb041301020025021b03060b090807030206150802090a0b0416020301021e01021780050253312798021901000a09104ef7e4beccde97f015a803ff5448437780f63263b0df8442a995e7f76c221351a51edd06f2063d8166cf3157aada4923dfc44aa0f2a6a4da5cf83b7fe722ba8ab416c976e77c6b5682e7f1069026673bd0de56ba06fd5d7a9f177607f277d9b55ff940a638c3e68525c67517e2b3d976899b93ca267f705b3e5efad7d61220e96b618a4497eab8d04403d23f8846041011020006050253312910000a09107b15a67f0b3ddc03d96e009f50b6365d86c4be5d5e9d0ea42d5e56f5794c617700a0ab274e19c2827780016d23417ce89e0a2c0d987d889c04100102000605025331cf7a000a0910a401d9f09a34f7c0ee970400aca292f213041c9f3b3fc49148cbda9d84afee6183c8dd6c5ff2600b29482db5fecd4303797be1ee6d544a20a858080fec43412061c9a71fae4039fd58013b4ae341273e6c66ad4c7cdd9e68245bedb260562e7b166f2461a1032f2b38c0e0e5715fb3d1656979e052b55ca827a76f872b78a9fdae64bc298170bfcebedc1271b41a416c696365203c616c696365407379646973702e6f722e61753e88b804130102002205025331278b021b03060b090807030206150802090a0b0416020301021e01021780000a09104ef7e4beccde97f06a7003fa03c3af68d272ebc1fa08aa72a03b02189c26496a2833d90450801c4e42c5b5f51ad96ce2d2c9cef4b7c02a6a2fcf1412d6a2d486098eb762f5010a201819c17fd2888aec8eda20c65a3b75744de7ee5cc8ac7bfc470cbe3cb982720405a27a3c6a8c229cfe36905f881b02ed5680f6a8f05866efb9d6c5844897e631deb949ca8846041011020006050253312910000a09107b15a67f0b3ddc0347bc009f7fa35db59147469eb6f2c5aaf6428accb138b22800a0caa2f5f0874bacc5909c652a57a31beda65eddd5889c04100102000605025331cf7a000a0910a401d9f09a34f7c0316403ff46f2a5c101256627f16384d34a38fb47a6c88ba60506843e532d91614339fccae5f884a5741e7582ffaf292ba38ee10a270a05f139bde3814b6a077e8cd2db0f105ebea2a83af70d385f13b507fac2ad93ff79d84950328bb86f3074745a8b7f9b64990fb142e2a12976e27e8d09a28dc5621f957ac49091116da410ac3cbde1b88d04533121f6010400cbd785b56905e4192e2fb62a720727d43c4fa487821203cf72138b884b78b701093243e1d8c92a0248a6c0203a5a88693da34af357499abacaf4b3309c640797d03093870a323b4b6f37865f6eaa2838148a67df4735d43a90ca87942554cdf1c4a751b1e75f9fd4ce4e97e278d6c1c7ed59d33441df7d084f3f02beb68896c70011010001889f0418010200090502533121f6021b0c000a09104ef7e4beccde97f0b98b03fc0a5ccf6a372995835a2f5da33b282a7d612c0ab2a97f59cf9fff73e9110981aac2858c41399afa29624a7fd8a0add11654e3d882c0fd199e161bdad65e5e2548f7b68a437ea64293db1246e3011cbb94dc1bcdeaf0f2539bd88ff16d95547144d97cead6a8c5927660a91e6db0d16eb36b7b49a3525b54d1644e65599b032b7eb901a204533127a0110400bd3edaa09eff9809c4edc2c2a0ebe52e53c50a19c1e49ab78e6167bf61473bb08f2050d78a5cbbc6ed66aff7b42cd503f16b4a0b99fa1609681fca9b7ce2bbb1a5b3864d6cdda4d7ef7849d156d534dea30fb0efb9e4cf8959a2b2ce623905882d5430b995a15c3b9fe92906086788b891002924f94abe139b42cbbfaaabe42f00a0b65dc1a1ad27d798adbcb5b5ad02d2688c89477b03ff4eebb6f7b15a73b96a96bed201c0e5e4ea27e4c6e2dd1005b94d4b90137a5b1cf5e01c6226c070c4cc999938101578877ee76d296b9aab8246d57049caacf489e80a3f40589cade790a020b1ac146d6f7a6241184b8c7fcde680eae3188f5dcbe846d7f7bdad34f6fcfca08413e19c1d5df83fc7c7c627d493492e009c2f52a80400a2fe82de87136fd2e8845888c4431b032ba29d9a29a804277e31002a8201fb8591a3e55c7a0d0881496caf8b9fb07544a5a4879291d0dc026a0ea9e5bd88eb4aa4947bbd694b25012e208a250d65ddc6f1eea59d3aed3b4ec15fcab85e2afaa23a40ab1ef9ce3e11e1bc1c34a0e758e7aa64deb8739276df0af7d4121f834a9b88e70418010200090502533127a0021b02005209104ef7e4beccde97f047200419110200060502533127a0000a0910dbce4ee19529437fe045009c0b32f5ead48ee8a7e98fac0dea3d3e6c0e2c552500a0ad71fadc5007cfaf842d9b7db3335a8cdad15d3d1a6404009b08e2c68fe8f3b45c1bb72a4b3278cdf3012aa0f229883ad74aa1f6000bb90b18301b2f85372ca5d6b9bf478d235b733b1b197d19ccca48e9daf8e890cb64546b4ce1b178faccfff07003c172a2d4f5ebaba9f57153955f3f61a9b80a4f5cb959908f8b211b03b7026a8a82fc612bfedd3794969bcf458c4ce92be215a1176ab88d045331d144010400a5063000c5aaf34953c1aa3bfc95045b3aab9882b9a8027fecfe2142dc6b47ba8aca667399990244d513dd0504716908c17d92c65e74219e004f7b83fc125e575dd58efec3ab6dd22e3580106998523dea42ec75bf9aa111734c82df54630bebdff20fe981cfc36c76f865eb1c2fb62c9e85bc3a6e5015a361a2eb1c8431578d0011010001889f04280102000905025331d433021d03000a09104ef7e4beccde97f02e5503ff5e0630d1b65291f4882b6d40a29da4616bb5088717d469fbcc3648b8276de04a04988b1f1b9f3e18f52265c1f8b6c85861691c1a6b8a3a25a1809a0b32ad330aec5667cb4262f4450649184e8113849b05e5ad06a316ea80c001e8e71838190339a6e48bbde30647bcf245134b9a97fa875c1d83a9862cae87ffd7e2c4ce3a1b89013d04180102000905025331d144021b0200a809104ef7e4beccde97f09d2004190102000605025331d144000a0910677815e371c2fd23522203fe22ab62b8e7a151383cea3edd3a12995693911426f8ccf125e1f6426388c0010f88d9ca7da2224aee8d1c12135998640c5e1813d55a93df472faae75bef858457248db41b4505827590aeccf6f9eb646da7f980655dd3050c6897feddddaca90676dee856d66db8923477d251712bb9b3186b4d0114daf7d6b59272b53218dd1da94a03ff64006fcbe71211e5daecd9961fba66cdb6de3f914882c58ba5beddeba7dcb950c1156d7fba18c19ea880dccc800eae335deec34e3b84ac75ffa24864f782f87815cda1c0f634b3dd2fa67cea30811d21723d21d9551fa12ccbcfa62b6d3a15d01307b99925707992556d50065505b090aadb8579083a20fe65bd2a270da9b011" + +const missingCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Charset: UTF-8 + +mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY +ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG +zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54 +QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ +QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo +9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu +Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/ +dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R +JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL +ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew +RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW +/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu +yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAJcXQeP+NmuciE99YcJoffxv +2gVLU4ZXBNHEaP0mgaJ1+tmMD089vUQAcyGRvw8jfsNsVZQIOAuRxY94aHQhIRHR +bUzBN28ofo/AJJtfx62C15xt6fDKRV6HXYqAiygrHIpEoRLyiN69iScUsjIJeyFL +C8wa72e8pSL6dkHoaV1N9ZH/xmrJ+k0vsgkQaAh9CzYufncDxcwkoP+aOlGtX1gP +WwWoIbz0JwLEMPHBWvDDXQcQPQTYQyj+LGC9U6f9VZHN25E94subM1MjuT9OhN9Y +MLfWaaIc5WyhLFyQKW2Upofn9wSFi8ubyBnv640Dfd0rVmaWv7LNTZpoZ/GbJAMA +EQEAAYkBHwQYAQIACQUCU5ygeQIbAgAKCRDt1A0FCB6SP0zCB/sEzaVR38vpx+OQ +MMynCBJrakiqDmUZv9xtplY7zsHSQjpd6xGflbU2n+iX99Q+nav0ETQZifNUEd4N +1ljDGQejcTyKD6Pkg6wBL3x9/RJye7Zszazm4+toJXZ8xJ3800+BtaPoI39akYJm ++ijzbskvN0v/j5GOFJwQO0pPRAFtdHqRs9Kf4YanxhedB4dIUblzlIJuKsxFit6N +lgGRblagG3Vv2eBszbxzPbJjHCgVLR3RmrVezKOsZjr/2i7X+xLWIR0uD3IN1qOW +CXQxLBizEEmSNVNxsp7KPGTLnqO3bPtqFirxS9PJLIMPTPLNBY7ZYuPNTMqVIUWF +4artDmrG +=7FfJ +-----END PGP PUBLIC KEY BLOCK-----` + +const invalidCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY +ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG +zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54 +QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ +QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo +9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu +Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/ +dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R +JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL +ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew +RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW +/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu +yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAIINDqlj7X6jYKc6DjwrOkjQ +UIRWbQQar0LwmNilehmt70g5DCL1SYm9q4LcgJJ2Nhxj0/5qqsYib50OSWMcKeEe +iRXpXzv1ObpcQtI5ithp0gR53YPXBib80t3bUzomQ5UyZqAAHzMp3BKC54/vUrSK +FeRaxDzNLrCeyI00+LHNUtwghAqHvdNcsIf8VRumK8oTm3RmDh0TyjASWYbrt9c8 +R1Um3zuoACOVy+mEIgIzsfHq0u7dwYwJB5+KeM7ZLx+HGIYdUYzHuUE1sLwVoELh ++SHIGHI1HDicOjzqgajShuIjj5hZTyQySVprrsLKiXS6NEwHAP20+XjayJ/R3tEA +EQEAAYkCPgQYAQIBKAUCU5ygeQIbAsBdIAQZAQIABgUCU5ygeQAKCRCpVlnFZmhO +52RJB/9uD1MSa0wjY6tHOIgquZcP3bHBvHmrHNMw9HR2wRCMO91ZkhrpdS3ZHtgb +u3/55etj0FdvDo1tb8P8FGSVtO5Vcwf5APM8sbbqoi8L951Q3i7qt847lfhu6sMl +w0LWFvPTOLHrliZHItPRjOltS1WAWfr2jUYhsU9ytaDAJmvf9DujxEOsN5G1YJep +54JCKVCkM/y585Zcnn+yxk/XwqoNQ0/iJUT9qRrZWvoeasxhl1PQcwihCwss44A+ +YXaAt3hbk+6LEQuZoYS73yR3WHj+42tfm7YxRGeubXfgCEz/brETEWXMh4pe0vCL +bfWrmfSPq2rDegYcAybxRQz0lF8PAAoJEO3UDQUIHpI/exkH/0vQfdHA8g/N4T6E +i6b1CUVBAkvtdJpCATZjWPhXmShOw62gkDw306vHPilL4SCvEEi4KzG72zkp6VsB +DSRcpxCwT4mHue+duiy53/aRMtSJ+vDfiV1Vhq+3sWAck/yUtfDU9/u4eFaiNok1 +8/Gd7reyuZt5CiJnpdPpjCwelK21l2w7sHAnJF55ITXdOxI8oG3BRKufz0z5lyDY +s2tXYmhhQIggdgelN8LbcMhWs/PBbtUr6uZlNJG2lW1yscD4aI529VjwJlCeo745 +U7pO4eF05VViUJ2mmfoivL3tkhoTUWhx8xs8xCUcCg8DoEoSIhxtOmoTPR22Z9BL +6LCg2mg= +=Dhm4 +-----END PGP PUBLIC KEY BLOCK-----` + +const goodCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1 + +mI0EVUqeVwEEAMufHRrMPWK3gyvi0O0tABCs/oON9zV9KDZlr1a1M91ShCSFwCPo +7r80PxdWVWcj0V5h50/CJYtpN3eE/mUIgW2z1uDYQF1OzrQ8ubrksfsJvpAhENom +lTQEppv9mV8qhcM278teb7TX0pgrUHLYF5CfPdp1L957JLLXoQR/lwLVABEBAAG0 +E2dvb2Qtc2lnbmluZy1zdWJrZXmIuAQTAQIAIgUCVUqeVwIbAwYLCQgHAwIGFQgC +CQoLBBYCAwECHgECF4AACgkQNRjL95IRWP69XQQAlH6+eyXJN4DZTLX78KGjHrsw +6FCvxxClEPtPUjcJy/1KCRQmtLAt9PbbA78dvgzjDeZMZqRAwdjyJhjyg/fkU2OH +7wq4ktjUu+dLcOBb+BFMEY+YjKZhf6EJuVfxoTVr5f82XNPbYHfTho9/OABKH6kv +X70PaKZhbwnwij8Nts65AaIEVUqftREEAJ3WxZfqAX0bTDbQPf2CMT2IVMGDfhK7 +GyubOZgDFFjwUJQvHNvsrbeGLZ0xOBumLINyPO1amIfTgJNm1iiWFWfmnHReGcDl +y5mpYG60Mb79Whdcer7CMm3AqYh/dW4g6IB02NwZMKoUHo3PXmFLxMKXnWyJ0clw +R0LI/Qn509yXAKDh1SO20rqrBM+EAP2c5bfI98kyNwQAi3buu94qo3RR1ZbvfxgW +CKXDVm6N99jdZGNK7FbRifXqzJJDLcXZKLnstnC4Sd3uyfyf1uFhmDLIQRryn5m+ +LBYHfDBPN3kdm7bsZDDq9GbTHiFZUfm/tChVKXWxkhpAmHhU/tH6GGzNSMXuIWSO +aOz3Rqq0ED4NXyNKjdF9MiwD/i83S0ZBc0LmJYt4Z10jtH2B6tYdqnAK29uQaadx +yZCX2scE09UIm32/w7pV77CKr1Cp/4OzAXS1tmFzQ+bX7DR+Gl8t4wxr57VeEMvl +BGw4Vjh3X8//m3xynxycQU18Q1zJ6PkiMyPw2owZ/nss3hpSRKFJsxMLhW3fKmKr +Ey2KiOcEGAECAAkFAlVKn7UCGwIAUgkQNRjL95IRWP5HIAQZEQIABgUCVUqftQAK +CRD98VjDN10SqkWrAKDTpEY8D8HC02E/KVC5YUI01B30wgCgurpILm20kXEDCeHp +C5pygfXw1DJrhAP+NyPJ4um/bU1I+rXaHHJYroYJs8YSweiNcwiHDQn0Engh/mVZ +SqLHvbKh2dL/RXymC3+rjPvQf5cup9bPxNMa6WagdYBNAfzWGtkVISeaQW+cTEp/ +MtgVijRGXR/lGLGETPg2X3Afwn9N9bLMBkBprKgbBqU7lpaoPupxT61bL70= +=vtbN +-----END PGP PUBLIC KEY BLOCK-----` + +const revokedUserIDKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQENBFsgO5EBCADhREPmcjsPkXe1z7ctvyWL0S7oa9JaoGZ9oPDHFDlQxd0qlX2e +DZJZDg0qYvVixmaULIulApq1puEsaJCn3lHUbHlb4PYKwLEywYXM28JN91KtLsz/ +uaEX2KC5WqeP40utmzkNLq+oRX/xnRMgwbO7yUNVG2UlEa6eI+xOXO3YtLdmJMBW +ClQ066ZnOIzEo1JxnIwha1CDBMWLLfOLrg6l8InUqaXbtEBbnaIYO6fXVXELUjkx +nmk7t/QOk0tXCy8muH9UDqJkwDUESY2l79XwBAcx9riX8vY7vwC34pm22fAUVLCJ +x1SJx0J8bkeNp38jKM2Zd9SUQqSbfBopQ4pPABEBAAG0I0dvbGFuZyBHb3BoZXIg +PG5vLXJlcGx5QGdvbGFuZy5jb20+iQFUBBMBCgA+FiEE5Ik5JLcNx6l6rZfw1oFy +9I6cUoMFAlsgO5ECGwMFCQPCZwAFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AACgkQ +1oFy9I6cUoMIkwf8DNPeD23i4jRwd/pylbvxwZintZl1fSwTJW1xcOa1emXaEtX2 +depuqhP04fjlRQGfsYAQh7X9jOJxAHjTmhqFBi5sD7QvKU00cPFYbJ/JTx0B41bl +aXnSbGhRPh63QtEZL7ACAs+shwvvojJqysx7kyVRu0EW2wqjXdHwR/SJO6nhNBa2 +DXzSiOU/SUA42mmG+5kjF8Aabq9wPwT9wjraHShEweNerNMmOqJExBOy3yFeyDpa +XwEZFzBfOKoxFNkIaVf5GSdIUGhFECkGvBMB935khftmgR8APxdU4BE7XrXexFJU +8RCuPXonm4WQOwTWR0vQg64pb2WKAzZ8HhwTGbQiR29sYW5nIEdvcGhlciA8cmV2 +b2tlZEBnb2xhbmcuY29tPokBNgQwAQoAIBYhBOSJOSS3Dcepeq2X8NaBcvSOnFKD +BQJbIDv3Ah0AAAoJENaBcvSOnFKDfWMIAKhI/Tvu3h8fSUxp/gSAcduT6bC1JttG +0lYQ5ilKB/58lBUA5CO3ZrKDKlzW3M8VEcvohVaqeTMKeoQd5rCZq8KxHn/KvN6N +s85REfXfniCKfAbnGgVXX3kDmZ1g63pkxrFu0fDZjVDXC6vy+I0sGyI/Inro0Pzb +tvn0QCsxjapKK15BtmSrpgHgzVqVg0cUp8vqZeKFxarYbYB2idtGRci4b9tObOK0 +BSTVFy26+I/mrFGaPrySYiy2Kz5NMEcRhjmTxJ8jSwEr2O2sUR0yjbgUAXbTxDVE +/jg5fQZ1ACvBRQnB7LvMHcInbzjyeTM3FazkkSYQD6b97+dkWwb1iWG5AQ0EWyA7 +kQEIALkg04REDZo1JgdYV4x8HJKFS4xAYWbIva1ZPqvDNmZRUbQZR2+gpJGEwn7z +VofGvnOYiGW56AS5j31SFf5kro1+1bZQ5iOONBng08OOo58/l1hRseIIVGB5TGSa +PCdChKKHreJI6hS3mShxH6hdfFtiZuB45rwoaArMMsYcjaezLwKeLc396cpUwwcZ +snLUNd1Xu5EWEF2OdFkZ2a1qYdxBvAYdQf4+1Nr+NRIx1u1NS9c8jp3PuMOkrQEi +bNtc1v6v0Jy52mKLG4y7mC/erIkvkQBYJdxPaP7LZVaPYc3/xskcyijrJ/5ufoD8 +K71/ShtsZUXSQn9jlRaYR0EbojMAEQEAAYkBPAQYAQoAJhYhBOSJOSS3Dcepeq2X +8NaBcvSOnFKDBQJbIDuRAhsMBQkDwmcAAAoJENaBcvSOnFKDkFMIAIt64bVZ8x7+ +TitH1bR4pgcNkaKmgKoZz6FXu80+SnbuEt2NnDyf1cLOSimSTILpwLIuv9Uft5Pb +OraQbYt3xi9yrqdKqGLv80bxqK0NuryNkvh9yyx5WoG1iKqMj9/FjGghuPrRaT4l +QinNAghGVkEy1+aXGFrG2DsOC1FFI51CC2WVTzZ5RwR2GpiNRfESsU1rZAUqf/2V +yJl9bD5R4SUNy8oQmhOxi+gbhD4Ao34e4W0ilibslI/uawvCiOwlu5NGd8zv5n+U +heiQvzkApQup5c+BhH5zFDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB +7qTZOahrETw= +=IKnw +-----END PGP PUBLIC KEY BLOCK-----` + +const keyWithFirstUserIDRevoked = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: OpenPGP.js v4.10.10 +Comment: https://openpgpjs.org + +xsBNBFsgO5EBCADhREPmcjsPkXe1z7ctvyWL0S7oa9JaoGZ9oPDHFDlQxd0q +lX2eDZJZDg0qYvVixmaULIulApq1puEsaJCn3lHUbHlb4PYKwLEywYXM28JN +91KtLsz/uaEX2KC5WqeP40utmzkNLq+oRX/xnRMgwbO7yUNVG2UlEa6eI+xO +XO3YtLdmJMBWClQ066ZnOIzEo1JxnIwha1CDBMWLLfOLrg6l8InUqaXbtEBb +naIYO6fXVXELUjkxnmk7t/QOk0tXCy8muH9UDqJkwDUESY2l79XwBAcx9riX +8vY7vwC34pm22fAUVLCJx1SJx0J8bkeNp38jKM2Zd9SUQqSbfBopQ4pPABEB +AAHNIkdvbGFuZyBHb3BoZXIgPHJldm9rZWRAZ29sYW5nLmNvbT7CwI0EMAEK +ACAWIQTkiTkktw3HqXqtl/DWgXL0jpxSgwUCWyA79wIdAAAhCRDWgXL0jpxS +gxYhBOSJOSS3Dcepeq2X8NaBcvSOnFKDfWMIAKhI/Tvu3h8fSUxp/gSAcduT +6bC1JttG0lYQ5ilKB/58lBUA5CO3ZrKDKlzW3M8VEcvohVaqeTMKeoQd5rCZ +q8KxHn/KvN6Ns85REfXfniCKfAbnGgVXX3kDmZ1g63pkxrFu0fDZjVDXC6vy ++I0sGyI/Inro0Pzbtvn0QCsxjapKK15BtmSrpgHgzVqVg0cUp8vqZeKFxarY +bYB2idtGRci4b9tObOK0BSTVFy26+I/mrFGaPrySYiy2Kz5NMEcRhjmTxJ8j +SwEr2O2sUR0yjbgUAXbTxDVE/jg5fQZ1ACvBRQnB7LvMHcInbzjyeTM3Fazk +kSYQD6b97+dkWwb1iWHNI0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFu +Zy5jb20+wsCrBBMBCgA+FiEE5Ik5JLcNx6l6rZfw1oFy9I6cUoMFAlsgO5EC +GwMFCQPCZwAFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AAIQkQ1oFy9I6cUoMW +IQTkiTkktw3HqXqtl/DWgXL0jpxSgwiTB/wM094PbeLiNHB3+nKVu/HBmKe1 +mXV9LBMlbXFw5rV6ZdoS1fZ16m6qE/Th+OVFAZ+xgBCHtf2M4nEAeNOaGoUG +LmwPtC8pTTRw8Vhsn8lPHQHjVuVpedJsaFE+HrdC0RkvsAICz6yHC++iMmrK +zHuTJVG7QRbbCqNd0fBH9Ik7qeE0FrYNfNKI5T9JQDjaaYb7mSMXwBpur3A/ +BP3COtodKETB416s0yY6okTEE7LfIV7IOlpfARkXMF84qjEU2QhpV/kZJ0hQ +aEUQKQa8EwH3fmSF+2aBHwA/F1TgETtetd7EUlTxEK49eiebhZA7BNZHS9CD +rilvZYoDNnweHBMZzsBNBFsgO5EBCAC5INOERA2aNSYHWFeMfByShUuMQGFm +yL2tWT6rwzZmUVG0GUdvoKSRhMJ+81aHxr5zmIhluegEuY99UhX+ZK6NftW2 +UOYjjjQZ4NPDjqOfP5dYUbHiCFRgeUxkmjwnQoSih63iSOoUt5kocR+oXXxb +YmbgeOa8KGgKzDLGHI2nsy8Cni3N/enKVMMHGbJy1DXdV7uRFhBdjnRZGdmt +amHcQbwGHUH+PtTa/jUSMdbtTUvXPI6dz7jDpK0BImzbXNb+r9CcudpiixuM +u5gv3qyJL5EAWCXcT2j+y2VWj2HN/8bJHMoo6yf+bn6A/Cu9f0obbGVF0kJ/ +Y5UWmEdBG6IzABEBAAHCwJMEGAEKACYWIQTkiTkktw3HqXqtl/DWgXL0jpxS +gwUCWyA7kQIbDAUJA8JnAAAhCRDWgXL0jpxSgxYhBOSJOSS3Dcepeq2X8NaB +cvSOnFKDkFMIAIt64bVZ8x7+TitH1bR4pgcNkaKmgKoZz6FXu80+SnbuEt2N +nDyf1cLOSimSTILpwLIuv9Uft5PbOraQbYt3xi9yrqdKqGLv80bxqK0NuryN +kvh9yyx5WoG1iKqMj9/FjGghuPrRaT4lQinNAghGVkEy1+aXGFrG2DsOC1FF +I51CC2WVTzZ5RwR2GpiNRfESsU1rZAUqf/2VyJl9bD5R4SUNy8oQmhOxi+gb +hD4Ao34e4W0ilibslI/uawvCiOwlu5NGd8zv5n+UheiQvzkApQup5c+BhH5z +FDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB7qTZOahrETw= +=+2T8 +-----END PGP PUBLIC KEY BLOCK----- +` + +const keyWithOnlyUserIDRevoked = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mDMEYYwB7RYJKwYBBAHaRw8BAQdARimqhPPzyGAXmfQJjcqM1QVPzLtURJSzNVll +JV4tEaW0KVJldm9rZWQgUHJpbWFyeSBVc2VyIElEIDxyZXZva2VkQGtleS5jb20+ +iHgEMBYIACAWIQSpyJZAXYqVEFkjyKutFcS0yeB0LQUCYYwCtgIdAAAKCRCtFcS0 +yeB0LbSsAQD8OYMaaBjrdzzpwIkP1stgmPd4/kzN/ZG28Ywl6a5F5QEA5Xg7aq4e +/t6Fsb4F5iqB956kSPe6YJrikobD/tBbMwSIkAQTFggAOBYhBKnIlkBdipUQWSPI +q60VxLTJ4HQtBQJhjAHtAhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEK0V +xLTJ4HQtBaoBAPZL7luTCji+Tqhn7XNfFE/0QIahCt8k9wfO1cGlB3inAQDf8Tzw +ZGR5fNluUcNoVxQT7bUSFStbaGo3k0BaOYPbCLg4BGGMAe0SCisGAQQBl1UBBQEB +B0DLwSpveSrbIO/IVZD13yrs1XuB3FURZUnafGrRq7+jUAMBCAeIeAQYFggAIBYh +BKnIlkBdipUQWSPIq60VxLTJ4HQtBQJhjAHtAhsMAAoJEK0VxLTJ4HQtZ1oA/j9u +8+p3xTNzsmabTL6BkNbMeB/RUKCrlm6woM6AV+vxAQCcXTn3JC2sNoNrLoXuVzaA +mcG3/TwG5GSQUUPkrDsGDA== +=mFWy +-----END PGP PUBLIC KEY BLOCK----- +` + +const keyWithSubKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mI0EWyKwKQEEALwXhKBnyaaNFeK3ljfc/qn9X/QFw+28EUfgZPHjRmHubuXLE2uR +s3ZoSXY2z7Dkv+NyHYMt8p+X8q5fR7JvUjK2XbPyKoiJVnHINll83yl67DaWfKNL +EjNoO0kIfbXfCkZ7EG6DL+iKtuxniGTcnGT47e+HJSqb/STpLMnWwXjBABEBAAG0 +I0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFuZy5jb20+iM4EEwEKADgWIQQ/ +lRafP/p9PytHbwxMvYJsOQdOOAUCWyKwKQIbAwULCQgHAwUVCgkICwUWAgMBAAIe +AQIXgAAKCRBMvYJsOQdOOOsFBAC62mXww8XuqvYLcVOvHkWLT6mhxrQOJXnlfpn7 +2uBV9CMhoG/Ycd43NONsJrB95Apr9TDIqWnVszNbqPCuBhZQSGLdbiDKjxnCWBk0 +69qv4RNtkpOhYB7jK4s8F5oQZqId6JasT/PmJTH92mhBYhhTQr0GYFuPX2UJdkw9 +Sn9C67iNBFsisDUBBAC3A+Yo9lgCnxi/pfskyLrweYif6kIXWLAtLTsM6g/6jt7b +wTrknuCPyTv0QKGXsAEe/cK/Xq3HvX9WfXPGIHc/X56ZIsHQ+RLowbZV/Lhok1IW +FAuQm8axr/by80cRwFnzhfPc/ukkAq2Qyj4hLsGblu6mxeAhzcp8aqmWOO2H9QAR +AQABiLYEKAEKACAWIQQ/lRafP/p9PytHbwxMvYJsOQdOOAUCWyK16gIdAAAKCRBM +vYJsOQdOOB1vA/4u4uLONsE+2GVOyBsHyy7uTdkuxaR9b54A/cz6jT/tzUbeIzgx +22neWhgvIEghnUZd0vEyK9k1wy5vbDlEo6nKzHso32N1QExGr5upRERAxweDxGOj +7luDwNypI7QcifE64lS/JmlnunwRCdRWMKc0Fp+7jtRc5mpwyHN/Suf5RokBagQY +AQoAIBYhBD+VFp8/+n0/K0dvDEy9gmw5B044BQJbIrA1AhsCAL8JEEy9gmw5B044 +tCAEGQEKAB0WIQSNdnkaWY6t62iX336UXbGvYdhXJwUCWyKwNQAKCRCUXbGvYdhX +JxJSA/9fCPHP6sUtGF1o3G1a3yvOUDGr1JWcct9U+QpbCt1mZoNopCNDDQAJvDWl +mvDgHfuogmgNJRjOMznvahbF+wpTXmB7LS0SK412gJzl1fFIpK4bgnhu0TwxNsO1 +8UkCZWqxRMgcNUn9z6XWONK8dgt5JNvHSHrwF4CxxwjL23AAtK+FA/UUoi3U4kbC +0XnSr1Sl+mrzQi1+H7xyMe7zjqe+gGANtskqexHzwWPUJCPZ5qpIa2l8ghiUim6b +4ymJ+N8/T8Yva1FaPEqfMzzqJr8McYFm0URioXJPvOAlRxdHPteZ0qUopt/Jawxl +Xt6B9h1YpeLoJwjwsvbi98UTRs0jXwoY +=3fWu +-----END PGP PUBLIC KEY BLOCK-----` + +const keyWithSubKeyAndBadSelfSigOrder = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mI0EWyLLDQEEAOqIOpJ/ha1OYAGduu9tS3rBz5vyjbNgJO4sFveEM0mgsHQ0X9/L +plonW+d0gRoO1dhJ8QICjDAc6+cna1DE3tEb5m6JtQ30teLZuqrR398Cf6w7NNVz +r3lrlmnH9JaKRuXl7tZciwyovneBfZVCdtsRZjaLI1uMQCz/BToiYe3DABEBAAG0 +I0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFuZy5jb20+iM4EEwEKADgWIQRZ +sixZOfQcZdW0wUqmgmdsv1O9xgUCWyLLDQIbAwULCQgHAwUVCgkICwUWAgMBAAIe +AQIXgAAKCRCmgmdsv1O9xql2A/4pix98NxjhdsXtazA9agpAKeADf9tG4Za27Gj+ +3DCww/E4iP2X35jZimSm/30QRB6j08uGCqd9vXkkJxtOt63y/IpVOtWX6vMWSTUm +k8xKkaYMP0/IzKNJ1qC/qYEUYpwERBKg9Z+k99E2Ql4kRHdxXUHq6OzY79H18Y+s +GdeM/riNBFsiyxsBBAC54Pxg/8ZWaZX1phGdwfe5mek27SOYpC0AxIDCSOdMeQ6G +HPk38pywl1d+S+KmF/F4Tdi+kWro62O4eG2uc/T8JQuRDUhSjX0Qa51gPzJrUOVT +CFyUkiZ/3ZDhtXkgfuso8ua2ChBgR9Ngr4v43tSqa9y6AK7v0qjxD1x+xMrjXQAR +AQABiQFxBBgBCgAmAhsCFiEEWbIsWTn0HGXVtMFKpoJnbL9TvcYFAlsizTIFCQAN +MRcAv7QgBBkBCgAdFiEEJcoVUVJIk5RWj1c/o62jUpRPICQFAlsiyxsACgkQo62j +UpRPICQq5gQApoWIigZxXFoM0uw4uJBS5JFZtirTANvirZV5RhndwHeMN6JttaBS +YnjyA4+n1D+zB2VqliD2QrsX12KJN6rGOehCtEIClQ1Hodo9nC6kMzzAwW1O8bZs +nRJmXV+bsvD4sidLZLjdwOVa3Cxh6pvq4Uur6a7/UYx121hEY0Qx0s8JEKaCZ2y/ +U73GGi0D/i20VW8AWYAPACm2zMlzExKTOAV01YTQH/3vW0WLrOse53WcIVZga6es +HuO4So0SOEAvxKMe5HpRIu2dJxTvd99Bo9xk9xJU0AoFrO0vNCRnL+5y68xMlODK +lEw5/kl0jeaTBp6xX0HDQOEVOpPGUwWV4Ij2EnvfNDXaE1vK1kffiQFrBBgBCgAg +AhsCFiEEWbIsWTn0HGXVtMFKpoJnbL9TvcYFAlsi0AYAv7QgBBkBCgAdFiEEJcoV +UVJIk5RWj1c/o62jUpRPICQFAlsiyxsACgkQo62jUpRPICQq5gQApoWIigZxXFoM +0uw4uJBS5JFZtirTANvirZV5RhndwHeMN6JttaBSYnjyA4+n1D+zB2VqliD2QrsX +12KJN6rGOehCtEIClQ1Hodo9nC6kMzzAwW1O8bZsnRJmXV+bsvD4sidLZLjdwOVa +3Cxh6pvq4Uur6a7/UYx121hEY0Qx0s8JEKaCZ2y/U73GRl0EAJokkXmy4zKDHWWi +wvK9gi2gQgRkVnu2AiONxJb5vjeLhM/07BRmH6K1o+w3fOeEQp4FjXj1eQ5fPSM6 +Hhwx2CTl9SDnPSBMiKXsEFRkmwQ2AAsQZLmQZvKBkLZYeBiwf+IY621eYDhZfo+G +1dh1WoUCyREZsJQg2YoIpWIcvw+a +=bNRo +-----END PGP PUBLIC KEY BLOCK----- +` + +const onlySubkeyNoPrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- +Version: GnuPG v1 + +lQCVBFggvocBBAC7vBsHn7MKmS6IiiZNTXdciplVgS9cqVd+RTdIAoyNTcsiV1H0 +GQ3QtodOPeDlQDNoqinqaobd7R9g3m3hS53Nor7yBZkCWQ5x9v9JxRtoAq0sklh1 +I1X2zEqZk2l6YrfBF/64zWrhjnW3j23szkrAIVu0faQXbQ4z56tmZrw11wARAQAB +/gdlAkdOVQG0CUdOVSBEdW1teYi4BBMBAgAiBQJYIL6HAhsDBgsJCAcDAgYVCAIJ +CgsEFgIDAQIeAQIXgAAKCRCd1xxWp1CYAnjGA/9synn6ZXJUKAXQzySgmCZvCIbl +rqBfEpxwLG4Q/lONhm5vthAE0z49I8hj5Gc5e2tLYUtq0o0OCRdCrYHa/efOYWpJ +6RsK99bePOisVzmOABLIgZkcr022kHoMCmkPgv9CUGKP1yqbGl+zzAwQfUjRUmvD +ZIcWLHi2ge4GzPMPi50B2ARYIL6cAQQAxWHnicKejAFcFcF1/3gUSgSH7eiwuBPX +M7vDdgGzlve1o1jbV4tzrjN9jsCl6r0nJPDMfBSzgLr1auNTRG6HpJ4abcOx86ED +Ad+avDcQPZb7z3dPhH/gb2lQejZsHh7bbeOS8WMSzHV3RqCLd8J/xwWPNR5zKn1f +yp4IGfopidMAEQEAAQAD+wQOelnR82+dxyM2IFmZdOB9wSXQeCVOvxSaNMh6Y3lk +UOOkO8Nlic4x0ungQRvjoRs4wBmCuwFK/MII6jKui0B7dn/NDf51i7rGdNGuJXDH +e676By1sEY/NGkc74jr74T+5GWNU64W0vkpfgVmjSAzsUtpmhJMXsc7beBhJdnVl +AgDKCb8hZqj1alcdmLoNvb7ibA3K/V8J462CPD7bMySPBa/uayoFhNxibpoXml2r +oOtHa5izF3b0/9JY97F6rqkdAgD6GdTJ+xmlCoz1Sewoif1I6krq6xoa7gOYpIXo +UL1Afr+LiJeyAnF/M34j/kjIVmPanZJjry0kkjHE5ILjH3uvAf4/6n9np+Th8ujS +YDCIzKwR7639+H+qccOaddCep8Y6KGUMVdD/vTKEx1rMtK+hK/CDkkkxnFslifMJ +kqoqv3WUqCWJAT0EGAECAAkFAlggvpwCGwIAqAkQndccVqdQmAKdIAQZAQIABgUC +WCC+nAAKCRDmGUholQPwvQk+A/9latnSsR5s5/1A9TFki11GzSEnfLbx46FYOdkW +n3YBxZoPQGxNA1vIn8GmouxZInw9CF4jdOJxEdzLlYQJ9YLTLtN5tQEMl/19/bR8 +/qLacAZ9IOezYRWxxZsyn6//jfl7A0Y+FV59d4YajKkEfItcIIlgVBSW6T+TNQT3 +R+EH5HJ/A/4/AN0CmBhhE2vGzTnVU0VPrE4V64pjn1rufFdclgpixNZCuuqpKpoE +VVHn6mnBf4njKjZrAGPs5kfQ+H4NsM7v3Zz4yV6deu9FZc4O6E+V1WJ38rO8eBix +7G2jko106CC6vtxsCPVIzY7aaG3H5pjRtomw+pX7SzrQ7FUg2PGumg== +=F/T0 +-----END PGP PRIVATE KEY BLOCK-----` + +const ecdsaPrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +xaUEX1KsSRMIKoZIzj0DAQcCAwTpYqJsnJiFhKKh+8TulWD+lVmerBFNS+Ii +B+nlG3T0xQQ4Sy5eIjJ0CExIQQzi3EElF/Z2l4F3WC5taFA11NgA/gkDCHSS +PThf1M2K4LN8F1MRcvR+sb7i0nH55ojkwuVB1DE6jqIT9m9i+mX1tzjSAS+6 +lPQiweCJvG7xTC7Hs3AzRapf/r1At4TB+v+5G2/CKynNFEJpbGwgPGJpbGxA +aG9tZS5jb20+wncEEBMIAB8FAl9SrEkGCwkHCAMCBBUICgIDFgIBAhkBAhsD +Ah4BAAoJEMpwT3+q3+xqw5UBAMebZN9isEZ1ML+R/jWAAWMwa/knMugrEZ1v +Bl9+ZwM0AQCZdf80/wYY4Nve01qSRFv8OmKswLli3TvDv6FKc4cLz8epBF9S +rEkSCCqGSM49AwEHAgMEAjKnT9b5wY2bf9TpAV3d7OUfPOxKj9c4VzeVzSrH +AtQgo/MuI1cdYVURicV4i76DNjFhQHQFTk7BrC+C2u1yqQMBCAf+CQMIHImA +iYfzQtjgQWSFZYUkCFpbbwhNF0ch+3HNaZkaHCnZRIsWsRnc6FCb6lRQyK9+ +Dq59kHlduE5QgY40894jfmP2JdJHU6nBdYrivbEdbMJhBBgTCAAJBQJfUqxJ +AhsMAAoJEMpwT3+q3+xqUI0BAMykhV08kQ4Ip9Qlbss6Jdufv7YrU0Vd5hou +b5TmiPd0APoDBh3qIic+aLLUcAuG3+Gt1P1AbUlmqV61ozn1WfHxfw== +=KLN8 +-----END PGP PRIVATE KEY BLOCK-----` + +const dsaPrivateKeyWithElGamalSubkey = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +lQOBBF9/MLsRCACeaF6BI0jTgDAs86t8/kXPfwlPvR2MCYzB0BCqAdcq1hV/GTYd +oNmJRna/ZJfsI/vf+d8Nv+EYOQkPheFS1MJVBitkAXjQPgm8i1tQWen1FCWZxqGk +/vwZYF4yo8GhZ+Wxi3w09W9Cp9QM/CTmyE1Xe7wpPBGe+oD+me8Zxjyt8JBS4Qx+ +gvWbfHxfHnggh4pz7U8QkItlLsBNQEdX4R5+zwRN66g2ZSX/shaa/EkVnihUhD7r +njP9I51ORWucTQD6OvgooaNQZCkQ/Se9TzdakwWKS2XSIFXiY/e2E5ZgKI/pfKDU +iA/KessxddPb7nP/05OIJqg9AoDrD4vmehLzAQD+zsUS3LDU1m9/cG4LMsQbT2VK +Te4HqbGIAle+eu/asQf8DDJMrbZpiJZvADum9j0TJ0oep6VdMbzo9RSDKvlLKT9m +kG63H8oDWnCZm1a+HmGq9YIX+JHWmsLXXsFLeEouLzHO+mZo0X28eji3V2T87hyR +MmUM0wFo4k7jK8uVmkDXv3XwNp2uByWxUKZd7EnWmcEZWqIiexJ7XpCS0Pg3tRaI +zxve0SRe/dxfUPnTk/9KQ9hS6DWroBKquL182zx1Fggh4LIWWE2zq+UYn8BI0E8A +rmIDFJdF8ymFQGRrEy6g79NnkPmkrZWsgMRYY65P6v4zLVmqohJKkpm3/Uxa6QAP +CCoPh/JTOvPeCP2bOJH8z4Z9Py3ouMIjofQW8sXqRgf/RIHbh0KsINHrwwZ4gVIr +MK3RofpaYxw1ztPIWb4cMWoWZHH1Pxh7ggTGSBpAhKXkiWw2Rxat8QF5aA7e962c +bLvVv8dqsPrD/RnVJHag89cbPTzjn7gY9elE8EM8ithV3oQkwHTr4avYlpDZsgNd +hUW3YgRwGo31tdzxoG04AcpV2t+07P8XMPr9hsfWs4rHohXPi38Hseu1Ji+dBoWQ +3+1w/HH3o55s+jy4Ruaz78AIrjbmAJq+6rA2mIcCgrhw3DnzuwQAKeBvSeqn9zfS +ZC812osMBVmkycwelpaIh64WZ0vWL3GvdXDctV2kXM+qVpDTLEny0LuiXxrwCKQL +Ev4HAwK9uQBcreDEEud7pfRb8EYP5lzO2ZA7RaIvje6EWAGBvJGMRT0QQE5SGqc7 +Fw5geigBdt+vVyRuNNhg3c2fdn/OBQaYu0J/8AiOogG8EaM8tCFlbGdhbWFsQGRz +YS5jb20gPGVsZ2FtYWxAZHNhLmNvbT6IkAQTEQgAOBYhBI+gnfiHQxB35/Dp0XAQ +aE/rsWC5BQJffzC7AhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEHAQaE/r +sWC5A4EA/0GcJmyPtN+Klc7b9sVT3JgKTRnB/URxOJfYJofP0hZLAQCkqyMO+adV +JvbgDH0zaITQWZSSXPqpgMpCA6juTrDsd50CawRffzC7EAgAxFFFSAAEQzWTgKU5 +EBtpxxoPzHqcChawTHRxHxjcELXzmUBS5PzfA1HXSPnNqK/x3Ut5ycC3CsW41Fnt +Gm3706Wu9VFbFZVn55F9lPiplUo61n5pqMvOr1gmuQsdXiTa0t5FRa4TZ2VSiHFw +vdAVSPTUsT4ZxJ1rPyFYRtq1n3pQcvdZowd07r0JnzTMjLLMFYCKhwIowoOC4zqJ +iB8enjwOlpaqBATRm9xpVF7SJkroPF6/B1vdhj7E3c1aJyHlo0PYBAg756sSHWHg +UuLyUQ4TA0hcCVenn/L/aSY2LnbdZB1EBhlYjA7dTCgwIqsQhfQmPkjz6g64A7+Y +HbbrLwADBQgAk14QIEQ+J/VHetpQV/jt2pNsFK1kVK7mXK0spTExaC2yj2sXlHjL +Ie3bO5T/KqmIaBEB5db5fA5xK9cZt79qrQHDKsEqUetUeMUWLBx77zBsus3grIgy +bwDZKseRzQ715pwxquxQlScGoDIBKEh08HpwHkq140eIj3w+MAIfndaZaSCNaxaP +Snky7BQmJ7Wc7qrIwoQP6yrnUqyW2yNi81nJYUhxjChqaFSlwzLs/iNGryBKo0ic +BqVIRjikKHBlwBng6WyrltQo/Vt9GG8w+lqaAVXbJRlaBZJUR+2NKi/YhP3qQse3 +v8fi4kns0gh5LK+2C01RvdX4T49QSExuIf4HAwLJqYIGwadA2uem5v7/765ZtFWV +oL0iZ0ueTJDby4wTFDpLVzzDi/uVcB0ZRFrGOp7w6OYcNYTtV8n3xmli2Q5Trw0c +wZVzvg+ABKWiv7faBjMczIFF8y6WZKOIeAQYEQgAIBYhBI+gnfiHQxB35/Dp0XAQ +aE/rsWC5BQJffzC7AhsMAAoJEHAQaE/rsWC5ZmIA/jhS4r4lClbvjuPWt0Yqdn7R +fss2SPMYvMrrDh42aE0OAQD8xn4G6CN8UtW9xihXOY6FpxiJ/sMc2VaneeUd34oa +4g== +=XZm8 +-----END PGP PRIVATE KEY BLOCK-----` + +// https://tests.sequoia-pgp.org/#Certificate_expiration +// P _ U p +const expiringPrimaryUIDKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +xsDNBF2lnPIBDAC5cL9PQoQLTMuhjbYvb4Ncuuo0bfmgPRFywX53jPhoFf4Zg6mv +/seOXpgecTdOcVttfzC8ycIKrt3aQTiwOG/ctaR4Bk/t6ayNFfdUNxHWk4WCKzdz +/56fW2O0F23qIRd8UUJp5IIlN4RDdRCtdhVQIAuzvp2oVy/LaS2kxQoKvph/5pQ/ +5whqsyroEWDJoSV0yOb25B/iwk/pLUFoyhDG9bj0kIzDxrEqW+7Ba8nocQlecMF3 +X5KMN5kp2zraLv9dlBBpWW43XktjcCZgMy20SouraVma8Je/ECwUWYUiAZxLIlMv +9CurEOtxUw6N3RdOtLmYZS9uEnn5y1UkF88o8Nku890uk6BrewFzJyLAx5wRZ4F0 +qV/yq36UWQ0JB/AUGhHVPdFf6pl6eaxBwT5GXvbBUibtf8YI2og5RsgTWtXfU7eb +SGXrl5ZMpbA6mbfhd0R8aPxWfmDWiIOhBufhMCvUHh1sApMKVZnvIff9/0Dca3wb +vLIwa3T4CyshfT0AEQEAAc0hQm9iIEJhYmJhZ2UgPGJvYkBvcGVucGdwLmV4YW1w +bGU+wsFcBBMBCgCQBYJhesp/BYkEWQPJBQsJCAcCCRD7/MgqAV5zMEcUAAAAAAAe +ACBzYWx0QG5vdGF0aW9ucy5zZXF1b2lhLXBncC5vcmeEOQlNyTLFkc9I/elp+BpY +495V7KatqtDmsyDr+zDAdwYVCgkICwIEFgIDAQIXgAIbAwIeARYhBNGmbhojsYLJ +mA94jPv8yCoBXnMwAABSCQv/av8hKyynMtXVKFuWOGJw0mR8auDm84WdhMFRZg8t +yTJ1L88+Ny4WUAFeqo2j7DU2yPGrm5rmuvzlEedFYFeOWt+A4adz+oumgRd0nsgG +Lf3QYUWQhLWVlz+H7zubgKqSB2A2RqV65S7mTTVro42nb2Mng6rvGWiqeKG5nrXN +/01p1mIBQGR/KnZSqYLzA2Pw2PiJoSkXT26PDz/kiEMXpjKMR6sicV4bKVlEdUvm +pIImIPBHZq1EsKXEyWtWC41w/pc+FofGE+uSFs2aef1vvEHFkj3BHSK8gRcH3kfR +eFroTET8C2q9V1AOELWm+Ys6PzGzF72URK1MKXlThuL4t4LjvXWGNA78IKW+/RQH +DzK4U0jqSO0mL6qxqVS5Ij6jjL6OTrVEGdtDf5n0vI8tcUTBKtVqYAYk+t2YGT05 +ayxALtb7viVKo8f10WEcCuKshn0gdsEFMRZQzJ89uQIY3R3FbsdRCaE6OEaDgKMQ +UTFROyfhthgzRKbRxfcplMUCzsDNBF2lnPIBDADWML9cbGMrp12CtF9b2P6z9TTT +74S8iyBOzaSvdGDQY/sUtZXRg21HWamXnn9sSXvIDEINOQ6A9QxdxoqWdCHrOuW3 +ofneYXoG+zeKc4dC86wa1TR2q9vW+RMXSO4uImA+Uzula/6k1DogDf28qhCxMwG/ +i/m9g1c/0aApuDyKdQ1PXsHHNlgd/Dn6rrd5y2AObaifV7wIhEJnvqgFXDN2RXGj +LeCOHV4Q2WTYPg/S4k1nMXVDwZXrvIsA0YwIMgIT86Rafp1qKlgPNbiIlC1g9RY/ +iFaGN2b4Ir6GDohBQSfZW2+LXoPZuVE/wGlQ01rh827KVZW4lXvqsge+wtnWlszc +selGATyzqOK9LdHPdZGzROZYI2e8c+paLNDdVPL6vdRBUnkCaEkOtl1mr2JpQi5n +TU+gTX4IeInC7E+1a9UDF/Y85ybUz8XV8rUnR76UqVC7KidNepdHbZjjXCt8/Zo+ +Tec9JNbYNQB/e9ExmDntmlHEsSEQzFwzj8sxH48AEQEAAcLA9gQYAQoAIBYhBNGm +bhojsYLJmA94jPv8yCoBXnMwBQJdpZzyAhsMAAoJEPv8yCoBXnMw6f8L/26C34dk +jBffTzMj5Bdzm8MtF67OYneJ4TQMw7+41IL4rVcSKhIhk/3Ud5knaRtP2ef1+5F6 +6h9/RPQOJ5+tvBwhBAcUWSupKnUrdVaZQanYmtSxcVV2PL9+QEiNN3tzluhaWO// +rACxJ+K/ZXQlIzwQVTpNhfGzAaMVV9zpf3u0k14itcv6alKY8+rLZvO1wIIeRZLm +U0tZDD5HtWDvUV7rIFI1WuoLb+KZgbYn3OWjCPHVdTrdZ2CqnZbG3SXw6awH9bzR +LV9EXkbhIMez0deCVdeo+wFFklh8/5VK2b0vk/+wqMJxfpa1lHvJLobzOP9fvrsw +sr92MA2+k901WeISR7qEzcI0Fdg8AyFAExaEK6VyjP7SXGLwvfisw34OxuZr3qmx +1Sufu4toH3XrB7QJN8XyqqbsGxUCBqWif9RSK4xjzRTe56iPeiSJJOIciMP9i2ld +I+KgLycyeDvGoBj0HCLO3gVaBe4ubVrj5KjhX2PVNEJd3XZRzaXZE2aAMQ== +=AmgT +-----END PGP PUBLIC KEY BLOCK-----` + +const rsa2048PrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- +Comment: gpg (GnuPG) 2.2.27 with libgcrypt 1.9.4 + +lQPGBGL07P0BCADL0etN8efyAXA6sL2WfQvHe5wEKYXPWeN2+jiqSppfeRZAOlzP +kZ3U+cloeJriplYvVJwI3ID2aw52Z/TRn8iKRP5eOUFrEgcgl06lazLtOndK7o7p +oBV5mLtHEirFHm6W61fNt10jzM0jx0PV6nseLhFB2J42F1cmU/aBgFo41wjLSZYr +owR+v+O9S5sUXblQF6sEDcY01sBEu09zrIgT49VFwQ1Cvdh9XZEOTQBfdiugoj5a +DS3fAqAka3r1VoQK4eR7/upnYSgSACGeaQ4pUelKku5rpm50gdWTY8ppq0k9e1eT +y2x0OQcW3hWE+j4os1ca0ZEADMdqr/99MOxrABEBAAH+BwMCJWxU4VOZOJ7/I6vX +FxdfBhIBEXlJ52FM3S/oYtXqLhkGyrtmZOeEazVvUtuCe3M3ScHI8xCthcmE8E0j +bi+ZEHPS2NiBZtgHFF27BLn7zZuTc+oD5WKduZdK3463egnyThTqIIMl25WZBuab +k5ycwYrWwBH0jfA4gwJ13ai4pufKC2RM8qIu6YAVPglYBKFLKGvvJHa5vI+LuA0E +K+k35hIic7yVUcQneNnAF2598X5yWiieYnOZpmHlRw1zfbMwOJr3ZNj2v94u7b+L +sTa/1Uv9887Vb6sJp0c2Sh4cwEccoPYkvMqFn3ZrJUr3UdDu1K2vWohPtswzhrYV ++RdPZE5RLoCQufKvlPezk0Pzhzb3bBU7XjUbdGY1nH/EyQeBNp+Gw6qldKvzcBaB +cyOK1c6hPSszpJX93m5UxCN55IeifmcNjmbDh8vGCCdajy6d56qV2n4F3k7vt1J1 +0UlxIGhqijJoaTCX66xjLMC6VXkSz6aHQ35rnXosm/cqPcQshsZTdlfSyWkorfdr +4Hj8viBER26mjYurTMLBKDtUN724ZrR0Ev5jorX9uoKlgl87bDZHty2Ku2S+vR68 +VAvnj6Fi1BYNclnDoqxdRB2z5T9JbWE52HuG83/QsplhEqXxESDxriTyTHMbNxEe +88soVCDh4tgflZFa2ucUr6gEKJKij7jgahARnyaXfPZlQBUAS1YUeILYmN+VR+M/ +sHENpwDWc7TInn8VN638nJV+ScZGMih3AwWZTIoiLju3MMt1K0YZ3NuiqwGH4Jwg +/BbEdTWeCci9y3NEQHQ3uZZ5p6j2CwFVlK11idemCMvAiTVxF+gKdaLMkeCwKxru +J3YzhKEo+iDVYbPYBYizx/EHBn2U5kITQ5SBXzjTaaFMNZJEf9JYsL1ybPB6HOFY +VNVB2KT8CGVwtCJHb2xhbmcgR29waGVyIDxnb2xhbmdAZXhhbXBsZS5vcmc+iQFO +BBMBCgA4FiEEC6K7U7f4qesybTnqSkra7gHusm0FAmL07P0CGwMFCwkIBwIGFQoJ +CAsCBBYCAwECHgECF4AACgkQSkra7gHusm1MvwgAxpClWkeSqIhMQfbiuz0+lOkE +89y1DCFw8bHjZoUf4/4K8hFA3dGkk+q72XFgiyaCpfXxMt6Gi+dN47t+tTv9NIqC +sukbaoJBmJDhN6+djmJOgOYy+FWsW2LAk2LOwKYulpnBZdcA5rlMAhBg7gevQpF+ +ruSU69P7UUaFJl/DC7hDmaIcj+4cjBE/HO26SnVQjoTfjZT82rDh1Wsuf8LnkJUk +b3wezBLpXKjDvdHikdv4gdlR4AputVM38aZntYYglh/EASo5TneyZ7ZscdLNRdcF +r5O2fKqrOJLOdaoYRFZZWOvP5GtEVFDU7WGivOSVfiszBE0wZR3dgZRJipHCXJ0D +xgRi9Oz9AQgAtMJcJqLLVANJHl90tWuoizDkm+Imcwq2ubQAjpclnNrODnDK+7o4 +pBsWmXbZSdkC4gY+LhOQA6bPDD0JEHM58DOnrm49BddxXAyK0HPsk4sGGt2SS86B +OawWNdfJVyqw4bAiHWDmQg4PcjBbt3ocOIxAR6I5kBSiQVxuGQs9T+Zvg3G1r3Or +fS6DzlgY3HFUML5YsGH4lOxNSOoKAP68GIH/WNdUZ+feiRg9knIib6I3Hgtf5eO8 +JRH7aWE/TD7eNu36bLLjT5TZPq5r6xaD2plbtPOyXbNPWs9qI1yG+VnErfaLY0w8 +Qo0aqzbgID+CTZVomXSOpOcQseaFKw8ZfQARAQAB/gcDArha6+/+d4OY/w9N32K9 +hFNYt4LufTETMQ+k/sBeaMuAVzmT47DlAXzkrZhGW4dZOtXMu1rXaUwHlqkhEyzL +L4MYEWVXfD+LbZNEK3MEFss6RK+UAMeT/PTV9aA8cXQVPcSJYzfBXHQ1U1hnOgrO +apn92MN8RmkhX8wJLyeWTMMuP4lXByJMmmGo8WvifeRD2kFY4y0WVBDAXJAV4Ljf +Di/bBiwoc5a+gxHuZT2W9ZSxBQJNXdt4Un2IlyZuo58s5MLx2N0EaNJ8PwRUE6fM +RZYO8aZCEPUtINE4njbvsWOMCtrblsMPwZ1B0SiIaWmLaNyGdCNKea+fCIW7kasC +JYMhnLumpUTXg5HNexkCsl7ABWj0PYBflOE61h8EjWpnQ7JBBVKS2ua4lMjwHRX7 +5o5yxym9k5UZNFdGoXVL7xpizCcdGawxTJvwhs3vBqu1ZWYCegOAZWDrOkCyhUpq +8uKMROZFbn+FwE+7tjt+v2ed62FVEvD6g4V3ThCA6mQqeOARfJWN8GZY8BDm8lht +crOXriUkrx+FlrgGtm2CkwjW5/9Xd7AhFpHnQdFeozOHyq1asNSgJF9sNi9Lz94W +skQSVRi0IExxSXYGI3Y0nnAZUe2BAQflYPJdEveSr3sKlUqXiETTA1VXsTPK3kOC +92CbLzj/Hz199jZvywwyu53I+GKMpF42rMq7zxr2oa61YWY4YE/GDezwwys/wLx/ +QpCW4X3ppI7wJjCSSqEV0baYZSSli1ayheS6dxi8QnSpX1Bmpz6gU7m/M9Sns+hl +J7ZvgpjCAiV7KJTjtclr5/S02zP78LTVkoTWoz/6MOTROwaP63VBUXX8pbJhf/vu +DLmNnDk8joMJxoDXWeNU0EnNl4hP7Z/jExRBOEO4oAnUf/Sf6gCWQhL5qcajtg6w +tGv7vx3f2IkBNgQYAQoAIBYhBAuiu1O3+KnrMm056kpK2u4B7rJtBQJi9Oz9AhsM +AAoJEEpK2u4B7rJt6lgIAMBWqP4BCOGnQXBbgJ0+ACVghpkFUXZTb/tXJc8UUvTM +8uov6k/RsqDGZrvhhufD7Wwt7j9v7dD7VPp7bPyjVWyimglQzWguTUUqLDGlstYH +5uYv1pzma0ZsAGNqFeGlTLsKOSGKFMH4rB2KfN2n51L8POvtp1y7GKZQbWIWneaB +cZr3BINU5GMvYYU7pAYcoR+mJPdJx5Up3Ocn+bn8Tu1sy9C/ArtCQucazGnoE9u1 +HhNLrh0CdzzX7TNH6TQ8LwPOvq0K5l/WqbN9lE0WBBhMv2HydxhluO8AhU+A5GqC +C+wET7nVDnhoOm/fstIeb7/LN7OYejKPeHdFBJEL9GA= +=u442 +-----END PGP PRIVATE KEY BLOCK-----` + +const curve25519PrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- +Comment: gpg (GnuPG) 2.2.27 with libgcrypt 1.9.4 + +lFgEYvTtQBYJKwYBBAHaRw8BAQdAxsNXLbrk5xOjpO24VhOMvQ0/F+JcyIkckMDH +X3FIGxcAAQDFOlunZWYuPsCx5JLp78vKqUTfgef9TGG4oD6I/Sa0zBMstCJHb2xh +bmcgR29waGVyIDxnb2xhbmdAZXhhbXBsZS5vcmc+iJAEExYIADgWIQSFQHEOazmo +h1ldII4MvfnLQ4JBNwUCYvTtQAIbAwULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAK +CRAMvfnLQ4JBN5yeAQCKdry8B5ScCPrev2+UByMCss7Sdu5RhomCFsHdNPLcKAEA +8ugei+1owHsV+3cGwWWzKk6sLa8ZN87i3SKuOGp9DQycXQRi9O1AEgorBgEEAZdV +AQUBAQdA5CubPp8l7lrVQ25h7Hx5XN2C8xanRnnpcjzEooCaEA0DAQgHAAD/Rpc+ +sOZUXrFk9HOWB1XU41LoWbDBoG8sP8RWAVYwD5AQRYh4BBgWCAAgFiEEhUBxDms5 +qIdZXSCODL35y0OCQTcFAmL07UACGwwACgkQDL35y0OCQTcvdwEA7lb5g/YisrEf +iq660uwMGoepLUfvtqKzuQ6heYe83y0BAN65Ffg5HYOJzUEi0kZQRf7OhdtuL2kJ +SRXn8DmCTfEB +=cELM +-----END PGP PRIVATE KEY BLOCK-----` + +const curve448PrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- +Comment: C1DB 65D5 80D7 B922 7254 4B1E A699 9895 FABA CE52 + +xYUEYV2UmRYDK2VxAc9AFyxgh5xnSbyt50TWl558mw9xdMN+/UBLr5+UMP8IsrvV +MdXuTIE8CyaUQKSotHtH2RkYEXj5nsMAAAHPQIbTMSzjIWug8UFECzAex5FHgAgH +gYF3RK+TS8D24wX8kOu2C/NoVxwGY+p+i0JHaB+7yljriSKAGxs6wsBEBB8WCgCD +BYJhXZSZBYkFpI+9AwsJBwkQppmYlfq6zlJHFAAAAAAAHgAgc2FsdEBub3RhdGlv +bnMuc2VxdW9pYS1wZ3Aub3Jn5wSpIutJ5HncJWk4ruUV8GzQF390rR5+qWEAnAoY +akcDFQoIApsBAh4BFiEEwdtl1YDXuSJyVEseppmYlfq6zlIAALzdA5dA/fsgYg/J +qaQriYKaPUkyHL7EB3BXhV2d1h/gk+qJLvXQuU2WEJ/XSs3GrsBRiiZwvPH4o+7b +mleAxjy5wpS523vqrrBR2YZ5FwIku7WS4litSdn4AtVam/TlLdMNIf41CtFeZKBe +c5R5VNdQy8y7qy8AAADNEUN1cnZlNDQ4IE9wdGlvbiA4wsBHBBMWCgCGBYJhXZSZ +BYkFpI+9AwsJBwkQppmYlfq6zlJHFAAAAAAAHgAgc2FsdEBub3RhdGlvbnMuc2Vx +dW9pYS1wZ3Aub3JnD55UsYMzE6OACP+mgw5zvT+BBgol8/uFQjHg4krjUCMDFQoI +ApkBApsBAh4BFiEEwdtl1YDXuSJyVEseppmYlfq6zlIAAPQJA5dA0Xqwzn/0uwCq +RlsOVCB3f5NOj1exKnlBvRw0xT1VBee1yxvlUt5eIAoCxWoRlWBJob3TTkhm9AEA +8dyhwPmyGfWHzPw5NFG3xsXrZdNXNvit9WMVAPcmsyR7teXuDlJItxRAdJJc/qfJ +YVbBFoaNrhYAAADHhQRhXZSZFgMrZXEBz0BL7THZ9MnCLfSPJ1FMLim9eGkQ3Bfn +M3he5rOwO3t14QI1LjI96OjkeJipMgcFAmEP1Bq/ZHGO7oAAAc9AFnE8iNBaT3OU +EFtxkmWHXtdaYMmGGRdopw9JPXr/UxuunDln5o9dxPxf7q7z26zXrZen+qed/Isa +HsDCwSwEGBYKAWsFgmFdlJkFiQWkj70JEKaZmJX6us5SRxQAAAAAAB4AIHNhbHRA +bm90YXRpb25zLnNlcXVvaWEtcGdwLm9yZxREUizdTcepBzgSMOv2VWQCWbl++3CZ +EbgAWDryvSsyApsCwDGgBBkWCgBvBYJhXZSZCRBKo3SL4S5djkcUAAAAAAAeACBz +YWx0QG5vdGF0aW9ucy5zZXF1b2lhLXBncC5vcmemoGTDjmNQiIzw6HOEddvS0OB7 +UZ/P07jM/EVmnYxTlBYhBAxsnkGpx1UCiH6gUUqjdIvhLl2OAAALYQOXQAMB1oKq +OWxSFmvmgCKNcbAAyA3piF5ERIqs4z07oJvqDYrOWt75UsEIH/04gU/vHc4EmfG2 +JDLJgOLlyTUPkL/08f0ydGZPofFQBhn8HkuFFjnNtJ5oz3GIP4cdWMQFaUw0uvjb +PM9Tm3ptENGd6Ts1AAAAFiEEwdtl1YDXuSJyVEseppmYlfq6zlIAAGpTA5dATR6i +U2GrpUcQgpG+JqfAsGmF4yAOhgFxc1UfidFk3nTup3fLgjipkYY170WLRNbyKkVO +Sodx93GAs58rizO1acDAWiLq3cyEPBFXbyFThbcNPcLl+/77Uk/mgkYrPQFAQWdK +1kSRm4SizDBK37K8ChAAAADHhwRhXZSZEgMrZW8Bx0DMhzvhQo+OsXeqQ6QVw4sF +CaexHh6rLohh7TzL3hQSjoJ27fV6JBkIWdn0LfrMlJIDbSv2SLdlgQMBCgkAAcdA +MO7Dc1myF6Co1fAH+EuP+OxhxP/7V6ljuSCZENDfA49tQkzTta+PniG+pOVB2LHb +huyaKBkqiaogo8LAOQQYFgoAeAWCYV2UmQWJBaSPvQkQppmYlfq6zlJHFAAAAAAA +HgAgc2FsdEBub3RhdGlvbnMuc2VxdW9pYS1wZ3Aub3JnEjBMQAmc/2u45u5FQGmB +QAytjSG2LM3JQN+PPVl5vEkCmwwWIQTB22XVgNe5InJUSx6mmZiV+rrOUgAASdYD +l0DXEHQ9ykNP2rZP35ET1dmiFagFtTj/hLQcWlg16LqvJNGqOgYXuqTerbiOOt02 +XLCBln+wdewpU4ChEffMUDRBfqfQco/YsMqWV7bHJHAO0eC/DMKCjyU90xdH7R/d +QgqsfguR1PqPuJxpXV4bSr6CGAAAAA== +=MSvh +-----END PGP PRIVATE KEY BLOCK-----` + +const keyWithNotation = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +xVgEY9gIshYJKwYBBAHaRw8BAQdAF25fSM8OpFlXZhop4Qpqo5ywGZ4jgWlR +ppjhIKDthREAAQC+LFpzFcMJYcjxGKzBGHN0Px2jU4d04YSRnFAik+lVVQ6u +zRdUZXN0IDx0ZXN0QGV4YW1wbGUuY29tPsLACgQQFgoAfAUCY9gIsgQLCQcI +CRD/utJOCym8pR0UgAAAAAAQAAR0ZXh0QGV4YW1wbGUuY29tdGVzdB8UAAAA +AAASAARiaW5hcnlAZXhhbXBsZS5jb20AAQIDAxUICgQWAAIBAhkBAhsDAh4B +FiEEEMCQTUVGKgCX5rDQ/7rSTgspvKUAAPl5AP9Npz90LxzrB97Qr2DrGwfG +wuYn4FSYwtuPfZHHeoIabwD/QEbvpQJ/NBb9EAZuow4Rirlt1yv19mmnF+j5 +8yUzhQjHXQRj2AiyEgorBgEEAZdVAQUBAQdARXAo30DmKcyUg6co7OUm0RNT +z9iqFbDBzA8A47JEt1MDAQgHAAD/XKK3lBm0SqMR558HLWdBrNG6NqKuqb5X +joCML987ZNgRD8J4BBgWCAAqBQJj2AiyCRD/utJOCym8pQIbDBYhBBDAkE1F +RioAl+aw0P+60k4LKbylAADRxgEAg7UfBDiDPp5LHcW9D+SgFHk6+GyEU4ev +VppQxdtxPvAA/34snHBX7Twnip1nMt7P4e2hDiw/hwQ7oqioOvc6jMkP +=Z8YJ +-----END PGP PRIVATE KEY BLOCK----- +` diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go new file mode 100644 index 000000000..fec41a0e7 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go @@ -0,0 +1,67 @@ +// Copyright (C) 2019 ProtonTech AG + +package packet + +import "math/bits" + +// CipherSuite contains a combination of Cipher and Mode +type CipherSuite struct { + // The cipher function + Cipher CipherFunction + // The AEAD mode of operation. + Mode AEADMode +} + +// AEADConfig collects a number of AEAD parameters along with sensible defaults. +// A nil AEADConfig is valid and results in all default values. +type AEADConfig struct { + // The AEAD mode of operation. + DefaultMode AEADMode + // Amount of octets in each chunk of data + ChunkSize uint64 +} + +// Mode returns the AEAD mode of operation. +func (conf *AEADConfig) Mode() AEADMode { + // If no preference is specified, OCB is used (which is mandatory to implement). + if conf == nil || conf.DefaultMode == 0 { + return AEADModeOCB + } + + mode := conf.DefaultMode + if mode != AEADModeEAX && mode != AEADModeOCB && mode != AEADModeGCM { + panic("AEAD mode unsupported") + } + return mode +} + +// ChunkSizeByte returns the byte indicating the chunk size. The effective +// chunk size is computed with the formula uint64(1) << (chunkSizeByte + 6) +// limit to 16 = 4 MiB +// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2 +func (conf *AEADConfig) ChunkSizeByte() byte { + if conf == nil || conf.ChunkSize == 0 { + return 12 // 1 << (12 + 6) == 262144 bytes + } + + chunkSize := conf.ChunkSize + exponent := bits.Len64(chunkSize) - 1 + switch { + case exponent < 6: + exponent = 6 + case exponent > 16: + exponent = 16 + } + + return byte(exponent - 6) +} + +// decodeAEADChunkSize returns the effective chunk size. In 32-bit systems, the +// maximum returned value is 1 << 30. +func decodeAEADChunkSize(c byte) int { + size := uint64(1 << (c + 6)) + if size != uint64(int(size)) { + return 1 << 30 + } + return int(size) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go new file mode 100644 index 000000000..7171387f9 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go @@ -0,0 +1,270 @@ +// Copyright (C) 2019 ProtonTech AG + +package packet + +import ( + "bytes" + "crypto/cipher" + "encoding/binary" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +// aeadCrypter is an AEAD opener/sealer, its configuration, and data for en/decryption. +type aeadCrypter struct { + aead cipher.AEAD + chunkSize int + initialNonce []byte + associatedData []byte // Chunk-independent associated data + chunkIndex []byte // Chunk counter + packetTag packetType // SEIP packet (v2) or AEAD Encrypted Data packet + bytesProcessed int // Amount of plaintext bytes encrypted/decrypted + buffer bytes.Buffer // Buffered bytes across chunks +} + +// computeNonce takes the incremental index and computes an eXclusive OR with +// the least significant 8 bytes of the receivers' initial nonce (see sec. +// 5.16.1 and 5.16.2). It returns the resulting nonce. +func (wo *aeadCrypter) computeNextNonce() (nonce []byte) { + if wo.packetTag == packetTypeSymmetricallyEncryptedIntegrityProtected { + return append(wo.initialNonce, wo.chunkIndex...) + } + + nonce = make([]byte, len(wo.initialNonce)) + copy(nonce, wo.initialNonce) + offset := len(wo.initialNonce) - 8 + for i := 0; i < 8; i++ { + nonce[i+offset] ^= wo.chunkIndex[i] + } + return +} + +// incrementIndex performs an integer increment by 1 of the integer represented by the +// slice, modifying it accordingly. +func (wo *aeadCrypter) incrementIndex() error { + index := wo.chunkIndex + if len(index) == 0 { + return errors.AEADError("Index has length 0") + } + for i := len(index) - 1; i >= 0; i-- { + if index[i] < 255 { + index[i]++ + return nil + } + index[i] = 0 + } + return errors.AEADError("cannot further increment index") +} + +// aeadDecrypter reads and decrypts bytes. It buffers extra decrypted bytes when +// necessary, similar to aeadEncrypter. +type aeadDecrypter struct { + aeadCrypter // Embedded ciphertext opener + reader io.Reader // 'reader' is a partialLengthReader + peekedBytes []byte // Used to detect last chunk + eof bool +} + +// Read decrypts bytes and reads them into dst. It decrypts when necessary and +// buffers extra decrypted bytes. It returns the number of bytes copied into dst +// and an error. +func (ar *aeadDecrypter) Read(dst []byte) (n int, err error) { + // Return buffered plaintext bytes from previous calls + if ar.buffer.Len() > 0 { + return ar.buffer.Read(dst) + } + + // Return EOF if we've previously validated the final tag + if ar.eof { + return 0, io.EOF + } + + // Read a chunk + tagLen := ar.aead.Overhead() + cipherChunkBuf := new(bytes.Buffer) + _, errRead := io.CopyN(cipherChunkBuf, ar.reader, int64(ar.chunkSize+tagLen)) + cipherChunk := cipherChunkBuf.Bytes() + if errRead != nil && errRead != io.EOF { + return 0, errRead + } + + if len(cipherChunk) > 0 { + decrypted, errChunk := ar.openChunk(cipherChunk) + if errChunk != nil { + return 0, errChunk + } + + // Return decrypted bytes, buffering if necessary + if len(dst) < len(decrypted) { + n = copy(dst, decrypted[:len(dst)]) + ar.buffer.Write(decrypted[len(dst):]) + } else { + n = copy(dst, decrypted) + } + } + + // Check final authentication tag + if errRead == io.EOF { + errChunk := ar.validateFinalTag(ar.peekedBytes) + if errChunk != nil { + return n, errChunk + } + ar.eof = true // Mark EOF for when we've returned all buffered data + } + return +} + +// Close is noOp. The final authentication tag of the stream was already +// checked in the last Read call. In the future, this function could be used to +// wipe the reader and peeked, decrypted bytes, if necessary. +func (ar *aeadDecrypter) Close() (err error) { + if !ar.eof { + errChunk := ar.validateFinalTag(ar.peekedBytes) + if errChunk != nil { + return errChunk + } + } + return nil +} + +// openChunk decrypts and checks integrity of an encrypted chunk, returning +// the underlying plaintext and an error. It accesses peeked bytes from next +// chunk, to identify the last chunk and decrypt/validate accordingly. +func (ar *aeadDecrypter) openChunk(data []byte) ([]byte, error) { + tagLen := ar.aead.Overhead() + // Restore carried bytes from last call + chunkExtra := append(ar.peekedBytes, data...) + // 'chunk' contains encrypted bytes, followed by an authentication tag. + chunk := chunkExtra[:len(chunkExtra)-tagLen] + ar.peekedBytes = chunkExtra[len(chunkExtra)-tagLen:] + + adata := ar.associatedData + if ar.aeadCrypter.packetTag == packetTypeAEADEncrypted { + adata = append(ar.associatedData, ar.chunkIndex...) + } + + nonce := ar.computeNextNonce() + plainChunk, err := ar.aead.Open(nil, nonce, chunk, adata) + if err != nil { + return nil, err + } + ar.bytesProcessed += len(plainChunk) + if err = ar.aeadCrypter.incrementIndex(); err != nil { + return nil, err + } + return plainChunk, nil +} + +// Checks the summary tag. It takes into account the total decrypted bytes into +// the associated data. It returns an error, or nil if the tag is valid. +func (ar *aeadDecrypter) validateFinalTag(tag []byte) error { + // Associated: tag, version, cipher, aead, chunk size, ... + amountBytes := make([]byte, 8) + binary.BigEndian.PutUint64(amountBytes, uint64(ar.bytesProcessed)) + + adata := ar.associatedData + if ar.aeadCrypter.packetTag == packetTypeAEADEncrypted { + // ... index ... + adata = append(ar.associatedData, ar.chunkIndex...) + } + + // ... and total number of encrypted octets + adata = append(adata, amountBytes...) + nonce := ar.computeNextNonce() + _, err := ar.aead.Open(nil, nonce, tag, adata) + return err +} + +// aeadEncrypter encrypts and writes bytes. It encrypts when necessary according +// to the AEAD block size, and buffers the extra encrypted bytes for next write. +type aeadEncrypter struct { + aeadCrypter // Embedded plaintext sealer + writer io.WriteCloser // 'writer' is a partialLengthWriter +} + +// Write encrypts and writes bytes. It encrypts when necessary and buffers extra +// plaintext bytes for next call. When the stream is finished, Close() MUST be +// called to append the final tag. +func (aw *aeadEncrypter) Write(plaintextBytes []byte) (n int, err error) { + // Append plaintextBytes to existing buffered bytes + n, err = aw.buffer.Write(plaintextBytes) + if err != nil { + return n, err + } + // Encrypt and write chunks + for aw.buffer.Len() >= aw.chunkSize { + plainChunk := aw.buffer.Next(aw.chunkSize) + encryptedChunk, err := aw.sealChunk(plainChunk) + if err != nil { + return n, err + } + _, err = aw.writer.Write(encryptedChunk) + if err != nil { + return n, err + } + } + return +} + +// Close encrypts and writes the remaining buffered plaintext if any, appends +// the final authentication tag, and closes the embedded writer. This function +// MUST be called at the end of a stream. +func (aw *aeadEncrypter) Close() (err error) { + // Encrypt and write a chunk if there's buffered data left, or if we haven't + // written any chunks yet. + if aw.buffer.Len() > 0 || aw.bytesProcessed == 0 { + plainChunk := aw.buffer.Bytes() + lastEncryptedChunk, err := aw.sealChunk(plainChunk) + if err != nil { + return err + } + _, err = aw.writer.Write(lastEncryptedChunk) + if err != nil { + return err + } + } + // Compute final tag (associated data: packet tag, version, cipher, aead, + // chunk size... + adata := aw.associatedData + + if aw.aeadCrypter.packetTag == packetTypeAEADEncrypted { + // ... index ... + adata = append(aw.associatedData, aw.chunkIndex...) + } + + // ... and total number of encrypted octets + amountBytes := make([]byte, 8) + binary.BigEndian.PutUint64(amountBytes, uint64(aw.bytesProcessed)) + adata = append(adata, amountBytes...) + + nonce := aw.computeNextNonce() + finalTag := aw.aead.Seal(nil, nonce, nil, adata) + _, err = aw.writer.Write(finalTag) + if err != nil { + return err + } + return aw.writer.Close() +} + +// sealChunk Encrypts and authenticates the given chunk. +func (aw *aeadEncrypter) sealChunk(data []byte) ([]byte, error) { + if len(data) > aw.chunkSize { + return nil, errors.AEADError("chunk exceeds maximum length") + } + if aw.associatedData == nil { + return nil, errors.AEADError("can't seal without headers") + } + adata := aw.associatedData + if aw.aeadCrypter.packetTag == packetTypeAEADEncrypted { + adata = append(aw.associatedData, aw.chunkIndex...) + } + + nonce := aw.computeNextNonce() + encrypted := aw.aead.Seal(nil, nonce, data, adata) + aw.bytesProcessed += len(data) + if err := aw.aeadCrypter.incrementIndex(); err != nil { + return nil, err + } + return encrypted, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go new file mode 100644 index 000000000..98bd876bf --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go @@ -0,0 +1,96 @@ +// Copyright (C) 2019 ProtonTech AG + +package packet + +import ( + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" +) + +// AEADEncrypted represents an AEAD Encrypted Packet. +// See https://www.ietf.org/archive/id/draft-koch-openpgp-2015-rfc4880bis-00.html#name-aead-encrypted-data-packet-t +type AEADEncrypted struct { + cipher CipherFunction + mode AEADMode + chunkSizeByte byte + Contents io.Reader // Encrypted chunks and tags + initialNonce []byte // Referred to as IV in RFC4880-bis +} + +// Only currently defined version +const aeadEncryptedVersion = 1 + +func (ae *AEADEncrypted) parse(buf io.Reader) error { + headerData := make([]byte, 4) + if n, err := io.ReadFull(buf, headerData); n < 4 { + return errors.AEADError("could not read aead header:" + err.Error()) + } + // Read initial nonce + mode := AEADMode(headerData[2]) + nonceLen := mode.IvLength() + + // This packet supports only EAX and OCB + // https://www.ietf.org/archive/id/draft-koch-openpgp-2015-rfc4880bis-00.html#name-aead-encrypted-data-packet-t + if nonceLen == 0 || mode > AEADModeOCB { + return errors.AEADError("unknown mode") + } + + initialNonce := make([]byte, nonceLen) + if n, err := io.ReadFull(buf, initialNonce); n < nonceLen { + return errors.AEADError("could not read aead nonce:" + err.Error()) + } + ae.Contents = buf + ae.initialNonce = initialNonce + c := headerData[1] + if _, ok := algorithm.CipherById[c]; !ok { + return errors.UnsupportedError("unknown cipher: " + string(c)) + } + ae.cipher = CipherFunction(c) + ae.mode = mode + ae.chunkSizeByte = headerData[3] + return nil +} + +// Decrypt returns a io.ReadCloser from which decrypted bytes can be read, or +// an error. +func (ae *AEADEncrypted) Decrypt(ciph CipherFunction, key []byte) (io.ReadCloser, error) { + return ae.decrypt(key) +} + +// decrypt prepares an aeadCrypter and returns a ReadCloser from which +// decrypted bytes can be read (see aeadDecrypter.Read()). +func (ae *AEADEncrypted) decrypt(key []byte) (io.ReadCloser, error) { + blockCipher := ae.cipher.new(key) + aead := ae.mode.new(blockCipher) + // Carry the first tagLen bytes + tagLen := ae.mode.TagLength() + peekedBytes := make([]byte, tagLen) + n, err := io.ReadFull(ae.Contents, peekedBytes) + if n < tagLen || (err != nil && err != io.EOF) { + return nil, errors.AEADError("Not enough data to decrypt:" + err.Error()) + } + chunkSize := decodeAEADChunkSize(ae.chunkSizeByte) + return &aeadDecrypter{ + aeadCrypter: aeadCrypter{ + aead: aead, + chunkSize: chunkSize, + initialNonce: ae.initialNonce, + associatedData: ae.associatedData(), + chunkIndex: make([]byte, 8), + packetTag: packetTypeAEADEncrypted, + }, + reader: ae.Contents, + peekedBytes: peekedBytes}, nil +} + +// associatedData for chunks: tag, version, cipher, mode, chunk size byte +func (ae *AEADEncrypted) associatedData() []byte { + return []byte{ + 0xD4, + aeadEncryptedVersion, + byte(ae.cipher), + byte(ae.mode), + ae.chunkSizeByte} +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/compressed.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go similarity index 72% rename from vendor/golang.org/x/crypto/openpgp/packet/compressed.go rename to vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go index 353f94524..0bcb38cac 100644 --- a/vendor/golang.org/x/crypto/openpgp/packet/compressed.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go @@ -8,9 +8,10 @@ import ( "compress/bzip2" "compress/flate" "compress/zlib" - "golang.org/x/crypto/openpgp/errors" "io" "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/errors" ) // Compressed represents a compressed OpenPGP packet. The decompressed contents @@ -39,6 +40,37 @@ type CompressionConfig struct { Level int } +// decompressionReader ensures that the whole compression packet is read. +type decompressionReader struct { + compressed io.Reader + decompressed io.ReadCloser + readAll bool +} + +func newDecompressionReader(r io.Reader, decompressor io.ReadCloser) *decompressionReader { + return &decompressionReader{ + compressed: r, + decompressed: decompressor, + } +} + +func (dr *decompressionReader) Read(data []byte) (n int, err error) { + if dr.readAll { + return 0, io.EOF + } + n, err = dr.decompressed.Read(data) + if err == io.EOF { + dr.readAll = true + // Close the decompressor. + if errDec := dr.decompressed.Close(); errDec != nil { + return n, errDec + } + // Consume all remaining data from the compressed packet. + consumeAll(dr.compressed) + } + return n, err +} + func (c *Compressed) parse(r io.Reader) error { var buf [1]byte _, err := readFull(r, buf[:]) @@ -47,12 +79,18 @@ func (c *Compressed) parse(r io.Reader) error { } switch buf[0] { + case 0: + c.Body = r case 1: - c.Body = flate.NewReader(r) + c.Body = newDecompressionReader(r, flate.NewReader(r)) case 2: - c.Body, err = zlib.NewReader(r) + decompressor, err := zlib.NewReader(r) + if err != nil { + return err + } + c.Body = newDecompressionReader(r, decompressor) case 3: - c.Body = bzip2.NewReader(r) + c.Body = newDecompressionReader(r, io.NopCloser(bzip2.NewReader(r))) default: err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0]))) } @@ -60,7 +98,7 @@ func (c *Compressed) parse(r io.Reader) error { return err } -// compressedWriteCloser represents the serialized compression stream +// compressedWriterCloser represents the serialized compression stream // header and the compressor. Its Close() method ensures that both the // compressor and serialized stream header are closed. Its Write() // method writes to the compressor. diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go new file mode 100644 index 000000000..fb21e6d1b --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go @@ -0,0 +1,398 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/rand" + "io" + "math/big" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/s2k" +) + +var ( + defaultRejectPublicKeyAlgorithms = map[PublicKeyAlgorithm]bool{ + PubKeyAlgoElGamal: true, + PubKeyAlgoDSA: true, + } + defaultRejectHashAlgorithms = map[crypto.Hash]bool{ + crypto.MD5: true, + crypto.RIPEMD160: true, + } + defaultRejectMessageHashAlgorithms = map[crypto.Hash]bool{ + crypto.SHA1: true, + crypto.MD5: true, + crypto.RIPEMD160: true, + } + defaultRejectCurves = map[Curve]bool{ + CurveSecP256k1: true, + } +) + +// A global feature flag to indicate v5 support. +// Can be set via a build tag, e.g.: `go build -tags v5 ./...` +// If the build tag is missing config_v5.go will set it to true. +// +// Disables parsing of v5 keys and v5 signatures. +// These are non-standard entities, which in the crypto-refresh have been superseded +// by v6 keys, v6 signatures and SEIPDv2 encrypted data, respectively. +var V5Disabled = false + +// Config collects a number of parameters along with sensible defaults. +// A nil *Config is valid and results in all default values. +type Config struct { + // Rand provides the source of entropy. + // If nil, the crypto/rand Reader is used. + Rand io.Reader + // DefaultHash is the default hash function to be used. + // If zero, SHA-256 is used. + DefaultHash crypto.Hash + // DefaultCipher is the cipher to be used. + // If zero, AES-128 is used. + DefaultCipher CipherFunction + // Time returns the current time as the number of seconds since the + // epoch. If Time is nil, time.Now is used. + Time func() time.Time + // DefaultCompressionAlgo is the compression algorithm to be + // applied to the plaintext before encryption. If zero, no + // compression is done. + DefaultCompressionAlgo CompressionAlgo + // CompressionConfig configures the compression settings. + CompressionConfig *CompressionConfig + // S2K (String to Key) config, used for key derivation in the context of secret key encryption + // and password-encrypted data. + // If nil, the default configuration is used + S2KConfig *s2k.Config + // Iteration count for Iterated S2K (String to Key). + // Only used if sk2.Mode is nil. + // This value is duplicated here from s2k.Config for backwards compatibility. + // It determines the strength of the passphrase stretching when + // the said passphrase is hashed to produce a key. S2KCount + // should be between 65536 and 65011712, inclusive. If Config + // is nil or S2KCount is 0, the value 16777216 used. Not all + // values in the above range can be represented. S2KCount will + // be rounded up to the next representable value if it cannot + // be encoded exactly. When set, it is strongly encrouraged to + // use a value that is at least 65536. See RFC 4880 Section + // 3.7.1.3. + // + // Deprecated: SK2Count should be configured in S2KConfig instead. + S2KCount int + // RSABits is the number of bits in new RSA keys made with NewEntity. + // If zero, then 2048 bit keys are created. + RSABits int + // The public key algorithm to use - will always create a signing primary + // key and encryption subkey. + Algorithm PublicKeyAlgorithm + // Some known primes that are optionally prepopulated by the caller + RSAPrimes []*big.Int + // Curve configures the desired packet.Curve if the Algorithm is PubKeyAlgoECDSA, + // PubKeyAlgoEdDSA, or PubKeyAlgoECDH. If empty Curve25519 is used. + Curve Curve + // AEADConfig configures the use of the new AEAD Encrypted Data Packet, + // defined in the draft of the next version of the OpenPGP specification. + // If a non-nil AEADConfig is passed, usage of this packet is enabled. By + // default, it is disabled. See the documentation of AEADConfig for more + // configuration options related to AEAD. + // **Note: using this option may break compatibility with other OpenPGP + // implementations, as well as future versions of this library.** + AEADConfig *AEADConfig + // V6Keys configures version 6 key generation. If false, this package still + // supports version 6 keys, but produces version 4 keys. + V6Keys bool + // Minimum RSA key size allowed for key generation and message signing, verification and encryption. + MinRSABits uint16 + // Reject insecure algorithms, only works with v2 api + RejectPublicKeyAlgorithms map[PublicKeyAlgorithm]bool + RejectHashAlgorithms map[crypto.Hash]bool + RejectMessageHashAlgorithms map[crypto.Hash]bool + RejectCurves map[Curve]bool + // "The validity period of the key. This is the number of seconds after + // the key creation time that the key expires. If this is not present + // or has a value of zero, the key never expires. This is found only on + // a self-signature."" + // https://tools.ietf.org/html/rfc4880#section-5.2.3.6 + KeyLifetimeSecs uint32 + // "The validity period of the signature. This is the number of seconds + // after the signature creation time that the signature expires. If + // this is not present or has a value of zero, it never expires." + // https://tools.ietf.org/html/rfc4880#section-5.2.3.10 + SigLifetimeSecs uint32 + // SigningKeyId is used to specify the signing key to use (by Key ID). + // By default, the signing key is selected automatically, preferring + // signing subkeys if available. + SigningKeyId uint64 + // SigningIdentity is used to specify a user ID (packet Signer's User ID, type 28) + // when producing a generic certification signature onto an existing user ID. + // The identity must be present in the signer Entity. + SigningIdentity string + // InsecureAllowUnauthenticatedMessages controls, whether it is tolerated to read + // encrypted messages without Modification Detection Code (MDC). + // MDC is mandated by the IETF OpenPGP Crypto Refresh draft and has long been implemented + // in most OpenPGP implementations. Messages without MDC are considered unnecessarily + // insecure and should be prevented whenever possible. + // In case one needs to deal with messages from very old OpenPGP implementations, there + // might be no other way than to tolerate the missing MDC. Setting this flag, allows this + // mode of operation. It should be considered a measure of last resort. + InsecureAllowUnauthenticatedMessages bool + // KnownNotations is a map of Notation Data names to bools, which controls + // the notation names that are allowed to be present in critical Notation Data + // signature subpackets. + KnownNotations map[string]bool + // SignatureNotations is a list of Notations to be added to any signatures. + SignatureNotations []*Notation + // CheckIntendedRecipients controls, whether the OpenPGP Intended Recipient Fingerprint feature + // should be enabled for encryption and decryption. + // (See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-12.html#name-intended-recipient-fingerpr). + // When the flag is set, encryption produces Intended Recipient Fingerprint signature sub-packets and decryption + // checks whether the key it was encrypted to is one of the included fingerprints in the signature. + // If the flag is disabled, no Intended Recipient Fingerprint sub-packets are created or checked. + // The default behavior, when the config or flag is nil, is to enable the feature. + CheckIntendedRecipients *bool + // CacheSessionKey controls if decryption should return the session key used for decryption. + // If the flag is set, the session key is cached in the message details struct. + CacheSessionKey bool + // CheckPacketSequence is a flag that controls if the pgp message reader should strictly check + // that the packet sequence conforms with the grammar mandated by rfc4880. + // The default behavior, when the config or flag is nil, is to check the packet sequence. + CheckPacketSequence *bool + // NonDeterministicSignaturesViaNotation is a flag to enable randomization of signatures. + // If true, a salt notation is used to randomize signatures generated by v4 and v5 keys + // (v6 signatures are always non-deterministic, by design). + // This protects EdDSA signatures from potentially leaking the secret key in case of faults (i.e. bitflips) which, in principle, could occur + // during the signing computation. It is added to signatures of any algo for simplicity, and as it may also serve as protection in case of + // weaknesses in the hash algo, potentially hindering e.g. some chosen-prefix attacks. + // The default behavior, when the config or flag is nil, is to enable the feature. + NonDeterministicSignaturesViaNotation *bool +} + +func (c *Config) Random() io.Reader { + if c == nil || c.Rand == nil { + return rand.Reader + } + return c.Rand +} + +func (c *Config) Hash() crypto.Hash { + if c == nil || uint(c.DefaultHash) == 0 { + return crypto.SHA256 + } + return c.DefaultHash +} + +func (c *Config) Cipher() CipherFunction { + if c == nil || uint8(c.DefaultCipher) == 0 { + return CipherAES128 + } + return c.DefaultCipher +} + +func (c *Config) Now() time.Time { + if c == nil || c.Time == nil { + return time.Now().Truncate(time.Second) + } + return c.Time().Truncate(time.Second) +} + +// KeyLifetime returns the validity period of the key. +func (c *Config) KeyLifetime() uint32 { + if c == nil { + return 0 + } + return c.KeyLifetimeSecs +} + +// SigLifetime returns the validity period of the signature. +func (c *Config) SigLifetime() uint32 { + if c == nil { + return 0 + } + return c.SigLifetimeSecs +} + +func (c *Config) Compression() CompressionAlgo { + if c == nil { + return CompressionNone + } + return c.DefaultCompressionAlgo +} + +func (c *Config) RSAModulusBits() int { + if c == nil || c.RSABits == 0 { + return 2048 + } + return c.RSABits +} + +func (c *Config) PublicKeyAlgorithm() PublicKeyAlgorithm { + if c == nil || c.Algorithm == 0 { + return PubKeyAlgoRSA + } + return c.Algorithm +} + +func (c *Config) CurveName() Curve { + if c == nil || c.Curve == "" { + return Curve25519 + } + return c.Curve +} + +// Deprecated: The hash iterations should now be queried via the S2K() method. +func (c *Config) PasswordHashIterations() int { + if c == nil || c.S2KCount == 0 { + return 0 + } + return c.S2KCount +} + +func (c *Config) S2K() *s2k.Config { + if c == nil { + return nil + } + // for backwards compatibility + if c.S2KCount > 0 && c.S2KConfig == nil { + return &s2k.Config{ + S2KCount: c.S2KCount, + } + } + return c.S2KConfig +} + +func (c *Config) AEAD() *AEADConfig { + if c == nil { + return nil + } + return c.AEADConfig +} + +func (c *Config) SigningKey() uint64 { + if c == nil { + return 0 + } + return c.SigningKeyId +} + +func (c *Config) SigningUserId() string { + if c == nil { + return "" + } + return c.SigningIdentity +} + +func (c *Config) AllowUnauthenticatedMessages() bool { + if c == nil { + return false + } + return c.InsecureAllowUnauthenticatedMessages +} + +func (c *Config) KnownNotation(notationName string) bool { + if c == nil { + return false + } + return c.KnownNotations[notationName] +} + +func (c *Config) Notations() []*Notation { + if c == nil { + return nil + } + return c.SignatureNotations +} + +func (c *Config) V6() bool { + if c == nil { + return false + } + return c.V6Keys +} + +func (c *Config) IntendedRecipients() bool { + if c == nil || c.CheckIntendedRecipients == nil { + return true + } + return *c.CheckIntendedRecipients +} + +func (c *Config) RetrieveSessionKey() bool { + if c == nil { + return false + } + return c.CacheSessionKey +} + +func (c *Config) MinimumRSABits() uint16 { + if c == nil || c.MinRSABits == 0 { + return 2047 + } + return c.MinRSABits +} + +func (c *Config) RejectPublicKeyAlgorithm(alg PublicKeyAlgorithm) bool { + var rejectedAlgorithms map[PublicKeyAlgorithm]bool + if c == nil || c.RejectPublicKeyAlgorithms == nil { + // Default + rejectedAlgorithms = defaultRejectPublicKeyAlgorithms + } else { + rejectedAlgorithms = c.RejectPublicKeyAlgorithms + } + return rejectedAlgorithms[alg] +} + +func (c *Config) RejectHashAlgorithm(hash crypto.Hash) bool { + var rejectedAlgorithms map[crypto.Hash]bool + if c == nil || c.RejectHashAlgorithms == nil { + // Default + rejectedAlgorithms = defaultRejectHashAlgorithms + } else { + rejectedAlgorithms = c.RejectHashAlgorithms + } + return rejectedAlgorithms[hash] +} + +func (c *Config) RejectMessageHashAlgorithm(hash crypto.Hash) bool { + var rejectedAlgorithms map[crypto.Hash]bool + if c == nil || c.RejectMessageHashAlgorithms == nil { + // Default + rejectedAlgorithms = defaultRejectMessageHashAlgorithms + } else { + rejectedAlgorithms = c.RejectMessageHashAlgorithms + } + return rejectedAlgorithms[hash] +} + +func (c *Config) RejectCurve(curve Curve) bool { + var rejectedCurve map[Curve]bool + if c == nil || c.RejectCurves == nil { + // Default + rejectedCurve = defaultRejectCurves + } else { + rejectedCurve = c.RejectCurves + } + return rejectedCurve[curve] +} + +func (c *Config) StrictPacketSequence() bool { + if c == nil || c.CheckPacketSequence == nil { + return true + } + return *c.CheckPacketSequence +} + +func (c *Config) RandomizeSignaturesViaNotation() bool { + if c == nil || c.NonDeterministicSignaturesViaNotation == nil { + return true + } + return *c.NonDeterministicSignaturesViaNotation +} + +// BoolPointer is a helper function to set a boolean pointer in the Config. +// e.g., config.CheckPacketSequence = BoolPointer(true) +func BoolPointer(value bool) *bool { + return &value +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config_v5.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config_v5.go new file mode 100644 index 000000000..f2415906b --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config_v5.go @@ -0,0 +1,7 @@ +//go:build !v5 + +package packet + +func init() { + V5Disabled = true +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go new file mode 100644 index 000000000..a84cf6b4f --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go @@ -0,0 +1,664 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/rsa" + "encoding/binary" + "encoding/hex" + "io" + "math/big" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/ecdh" + "github.com/ProtonMail/go-crypto/openpgp/elgamal" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" + "github.com/ProtonMail/go-crypto/openpgp/symmetric" + "github.com/ProtonMail/go-crypto/openpgp/x25519" + "github.com/ProtonMail/go-crypto/openpgp/x448" +) + +// EncryptedKey represents a public-key encrypted session key. See RFC 4880, +// section 5.1. +type EncryptedKey struct { + Version int + KeyId uint64 + KeyVersion int // v6 + KeyFingerprint []byte // v6 + Algo PublicKeyAlgorithm + CipherFunc CipherFunction // only valid after a successful Decrypt for a v3 packet + Key []byte // only valid after a successful Decrypt + + encryptedMPI1, encryptedMPI2 encoding.Field + ephemeralPublicX25519 *x25519.PublicKey // used for x25519 + ephemeralPublicX448 *x448.PublicKey // used for x448 + encryptedSession []byte // used for x25519 and x448 + + nonce []byte + aeadMode algorithm.AEADMode +} + +func (e *EncryptedKey) parse(r io.Reader) (err error) { + var buf [8]byte + _, err = readFull(r, buf[:versionSize]) + if err != nil { + return + } + e.Version = int(buf[0]) + if e.Version != 3 && e.Version != 6 { + return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0]))) + } + if e.Version == 6 { + //Read a one-octet size of the following two fields. + if _, err = readFull(r, buf[:1]); err != nil { + return + } + // The size may also be zero, and the key version and + // fingerprint omitted for an "anonymous recipient" + if buf[0] != 0 { + // non-anonymous case + _, err = readFull(r, buf[:versionSize]) + if err != nil { + return + } + e.KeyVersion = int(buf[0]) + if e.KeyVersion != 4 && e.KeyVersion != 6 { + return errors.UnsupportedError("unknown public key version " + strconv.Itoa(e.KeyVersion)) + } + var fingerprint []byte + if e.KeyVersion == 6 { + fingerprint = make([]byte, fingerprintSizeV6) + } else if e.KeyVersion == 4 { + fingerprint = make([]byte, fingerprintSize) + } + _, err = readFull(r, fingerprint) + if err != nil { + return + } + e.KeyFingerprint = fingerprint + if e.KeyVersion == 6 { + e.KeyId = binary.BigEndian.Uint64(e.KeyFingerprint[:keyIdSize]) + } else if e.KeyVersion == 4 { + e.KeyId = binary.BigEndian.Uint64(e.KeyFingerprint[fingerprintSize-keyIdSize : fingerprintSize]) + } + } + } else { + _, err = readFull(r, buf[:8]) + if err != nil { + return + } + e.KeyId = binary.BigEndian.Uint64(buf[:keyIdSize]) + } + + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + e.Algo = PublicKeyAlgorithm(buf[0]) + var cipherFunction byte + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + e.encryptedMPI1 = new(encoding.MPI) + if _, err = e.encryptedMPI1.ReadFrom(r); err != nil { + return + } + case PubKeyAlgoElGamal: + e.encryptedMPI1 = new(encoding.MPI) + if _, err = e.encryptedMPI1.ReadFrom(r); err != nil { + return + } + + e.encryptedMPI2 = new(encoding.MPI) + if _, err = e.encryptedMPI2.ReadFrom(r); err != nil { + return + } + case PubKeyAlgoECDH: + e.encryptedMPI1 = new(encoding.MPI) + if _, err = e.encryptedMPI1.ReadFrom(r); err != nil { + return + } + + e.encryptedMPI2 = new(encoding.OID) + if _, err = e.encryptedMPI2.ReadFrom(r); err != nil { + return + } + case PubKeyAlgoX25519: + e.ephemeralPublicX25519, e.encryptedSession, cipherFunction, err = x25519.DecodeFields(r, e.Version == 6) + if err != nil { + return + } + case PubKeyAlgoX448: + e.ephemeralPublicX448, e.encryptedSession, cipherFunction, err = x448.DecodeFields(r, e.Version == 6) + if err != nil { + return + } + case ExperimentalPubKeyAlgoAEAD: + var aeadMode [1]byte + if _, err = readFull(r, aeadMode[:]); err != nil { + return + } + e.aeadMode = algorithm.AEADMode(aeadMode[0]) + nonceLength := e.aeadMode.NonceLength() + e.nonce = make([]byte, nonceLength) + if _, err = readFull(r, e.nonce); err != nil { + return + } + e.encryptedMPI1 = new(encoding.ShortByteString) + if _, err = e.encryptedMPI1.ReadFrom(r); err != nil { + return + } + } + if e.Version < 6 { + switch e.Algo { + case PubKeyAlgoX25519, PubKeyAlgoX448: + e.CipherFunc = CipherFunction(cipherFunction) + // Check for validiy is in the Decrypt method + } + } + + _, err = consumeAll(r) + return +} + +// Decrypt decrypts an encrypted session key with the given private key. The +// private key must have been decrypted first. +// If config is nil, sensible defaults will be used. +func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { + if e.Version < 6 && e.KeyId != 0 && e.KeyId != priv.KeyId { + return errors.InvalidArgumentError("cannot decrypt encrypted session key for key id " + strconv.FormatUint(e.KeyId, 16) + " with private key id " + strconv.FormatUint(priv.KeyId, 16)) + } + if e.Version == 6 && e.KeyVersion != 0 && !bytes.Equal(e.KeyFingerprint, priv.Fingerprint) { + return errors.InvalidArgumentError("cannot decrypt encrypted session key for key fingerprint " + hex.EncodeToString(e.KeyFingerprint) + " with private key fingerprint " + hex.EncodeToString(priv.Fingerprint)) + } + if e.Algo != priv.PubKeyAlgo { + return errors.InvalidArgumentError("cannot decrypt encrypted session key of type " + strconv.Itoa(int(e.Algo)) + " with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) + } + if priv.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + + var err error + var b []byte + + // TODO(agl): use session key decryption routines here to avoid + // padding oracle attacks. + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + // Supports both *rsa.PrivateKey and crypto.Decrypter + k := priv.PrivateKey.(crypto.Decrypter) + b, err = k.Decrypt(config.Random(), padToKeySize(k.Public().(*rsa.PublicKey), e.encryptedMPI1.Bytes()), nil) + case PubKeyAlgoElGamal: + c1 := new(big.Int).SetBytes(e.encryptedMPI1.Bytes()) + c2 := new(big.Int).SetBytes(e.encryptedMPI2.Bytes()) + b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2) + case PubKeyAlgoECDH: + vsG := e.encryptedMPI1.Bytes() + m := e.encryptedMPI2.Bytes() + oid := priv.PublicKey.oid.EncodedBytes() + fp := priv.PublicKey.Fingerprint[:] + if priv.PublicKey.Version == 5 { + // For v5 the, the fingerprint must be restricted to 20 bytes + fp = fp[:20] + } + b, err = ecdh.Decrypt(priv.PrivateKey.(*ecdh.PrivateKey), vsG, m, oid, fp) + case PubKeyAlgoX25519: + b, err = x25519.Decrypt(priv.PrivateKey.(*x25519.PrivateKey), e.ephemeralPublicX25519, e.encryptedSession) + case PubKeyAlgoX448: + b, err = x448.Decrypt(priv.PrivateKey.(*x448.PrivateKey), e.ephemeralPublicX448, e.encryptedSession) + case ExperimentalPubKeyAlgoAEAD: + priv := priv.PrivateKey.(*symmetric.AEADPrivateKey) + b, err = priv.Decrypt(e.nonce, e.encryptedMPI1.Bytes(), e.aeadMode) + default: + err = errors.InvalidArgumentError("cannot decrypt encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) + } + if err != nil { + return err + } + + var key []byte + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH, ExperimentalPubKeyAlgoAEAD: + keyOffset := 0 + if e.Version < 6 { + e.CipherFunc = CipherFunction(b[0]) + keyOffset = 1 + if !e.CipherFunc.IsSupported() { + return errors.UnsupportedError("unsupported encryption function") + } + } + key, err = decodeChecksumKey(b[keyOffset:]) + if err != nil { + return err + } + case PubKeyAlgoX25519, PubKeyAlgoX448: + if e.Version < 6 { + switch e.CipherFunc { + case CipherAES128, CipherAES192, CipherAES256: + break + default: + return errors.StructuralError("v3 PKESK mandates AES as cipher function for x25519 and x448") + } + } + key = b[:] + default: + return errors.UnsupportedError("unsupported algorithm for decryption") + } + e.Key = key + return nil +} + +// Serialize writes the encrypted key packet, e, to w. +func (e *EncryptedKey) Serialize(w io.Writer) error { + var encodedLength int + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + encodedLength = int(e.encryptedMPI1.EncodedLength()) + case PubKeyAlgoElGamal: + encodedLength = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength()) + case PubKeyAlgoECDH: + encodedLength = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength()) + case PubKeyAlgoX25519: + encodedLength = x25519.EncodedFieldsLength(e.encryptedSession, e.Version == 6) + case PubKeyAlgoX448: + encodedLength = x448.EncodedFieldsLength(e.encryptedSession, e.Version == 6) + default: + return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo))) + } + + packetLen := versionSize /* version */ + keyIdSize /* key id */ + algorithmSize /* algo */ + encodedLength + if e.Version == 6 { + packetLen = versionSize /* version */ + algorithmSize /* algo */ + encodedLength + keyVersionSize /* key version */ + if e.KeyVersion == 6 { + packetLen += fingerprintSizeV6 + } else if e.KeyVersion == 4 { + packetLen += fingerprintSize + } + } + + err := serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + + _, err = w.Write([]byte{byte(e.Version)}) + if err != nil { + return err + } + if e.Version == 6 { + _, err = w.Write([]byte{byte(e.KeyVersion)}) + if err != nil { + return err + } + // The key version number may also be zero, + // and the fingerprint omitted + if e.KeyVersion != 0 { + _, err = w.Write(e.KeyFingerprint) + if err != nil { + return err + } + } + } else { + // Write KeyID + err = binary.Write(w, binary.BigEndian, e.KeyId) + if err != nil { + return err + } + } + _, err = w.Write([]byte{byte(e.Algo)}) + if err != nil { + return err + } + + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + _, err := w.Write(e.encryptedMPI1.EncodedBytes()) + return err + case PubKeyAlgoElGamal: + if _, err := w.Write(e.encryptedMPI1.EncodedBytes()); err != nil { + return err + } + _, err := w.Write(e.encryptedMPI2.EncodedBytes()) + return err + case PubKeyAlgoECDH: + if _, err := w.Write(e.encryptedMPI1.EncodedBytes()); err != nil { + return err + } + _, err := w.Write(e.encryptedMPI2.EncodedBytes()) + return err + case PubKeyAlgoX25519: + err := x25519.EncodeFields(w, e.ephemeralPublicX25519, e.encryptedSession, byte(e.CipherFunc), e.Version == 6) + return err + case PubKeyAlgoX448: + err := x448.EncodeFields(w, e.ephemeralPublicX448, e.encryptedSession, byte(e.CipherFunc), e.Version == 6) + return err + default: + panic("internal error") + } +} + +// SerializeEncryptedKeyAEAD serializes an encrypted key packet to w that contains +// key, encrypted to pub. +// If aeadSupported is set, PKESK v6 is used else v4. +// If config is nil, sensible defaults will be used. +func SerializeEncryptedKeyAEAD(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, aeadSupported bool, key []byte, config *Config) error { + return SerializeEncryptedKeyAEADwithHiddenOption(w, pub, cipherFunc, aeadSupported, key, false, config) +} + +// SerializeEncryptedKeyAEADwithHiddenOption serializes an encrypted key packet to w that contains +// key, encrypted to pub. +// Offers the hidden flag option to indicated if the PKESK packet should include a wildcard KeyID. +// If aeadSupported is set, PKESK v6 is used else v4. +// If config is nil, sensible defaults will be used. +func SerializeEncryptedKeyAEADwithHiddenOption(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, aeadSupported bool, key []byte, hidden bool, config *Config) error { + var buf [36]byte // max possible header size is v6 + lenHeaderWritten := versionSize + version := 3 + + if aeadSupported { + version = 6 + } + // An implementation MUST NOT generate ElGamal v6 PKESKs. + if version == 6 && pub.PubKeyAlgo == PubKeyAlgoElGamal { + return errors.InvalidArgumentError("ElGamal v6 PKESK are not allowed") + } + // In v3 PKESKs, for x25519 and x448, mandate using AES + if version == 3 && (pub.PubKeyAlgo == PubKeyAlgoX25519 || pub.PubKeyAlgo == PubKeyAlgoX448) { + switch cipherFunc { + case CipherAES128, CipherAES192, CipherAES256: + break + default: + return errors.InvalidArgumentError("v3 PKESK mandates AES for x25519 and x448") + } + } + + buf[0] = byte(version) + + // If hidden is set, the key should be hidden + // An implementation MAY accept or use a Key ID of all zeros, + // or a key version of zero and no key fingerprint, to hide the intended decryption key. + // See Section 5.1.8. in the open pgp crypto refresh + if version == 6 { + if !hidden { + // A one-octet size of the following two fields. + buf[1] = byte(keyVersionSize + len(pub.Fingerprint)) + // A one octet key version number. + buf[2] = byte(pub.Version) + lenHeaderWritten += keyVersionSize + 1 + // The fingerprint of the public key + copy(buf[lenHeaderWritten:lenHeaderWritten+len(pub.Fingerprint)], pub.Fingerprint) + lenHeaderWritten += len(pub.Fingerprint) + } else { + // The size may also be zero, and the key version + // and fingerprint omitted for an "anonymous recipient" + buf[1] = 0 + lenHeaderWritten += 1 + } + } else { + if !hidden { + binary.BigEndian.PutUint64(buf[versionSize:(versionSize+keyIdSize)], pub.KeyId) + } + lenHeaderWritten += keyIdSize + } + buf[lenHeaderWritten] = byte(pub.PubKeyAlgo) + lenHeaderWritten += algorithmSize + + var keyBlock []byte + switch pub.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH, ExperimentalPubKeyAlgoAEAD: + lenKeyBlock := len(key) + 2 + if version < 6 { + lenKeyBlock += 1 // cipher type included + } + keyBlock = make([]byte, lenKeyBlock) + keyOffset := 0 + if version < 6 { + keyBlock[0] = byte(cipherFunc) + keyOffset = 1 + } + encodeChecksumKey(keyBlock[keyOffset:], key) + case PubKeyAlgoX25519, PubKeyAlgoX448: + // algorithm is added in plaintext below + keyBlock = key + } + + switch pub.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + return serializeEncryptedKeyRSA(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*rsa.PublicKey), keyBlock) + case PubKeyAlgoElGamal: + return serializeEncryptedKeyElGamal(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*elgamal.PublicKey), keyBlock) + case PubKeyAlgoECDH: + return serializeEncryptedKeyECDH(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*ecdh.PublicKey), keyBlock, pub.oid, pub.Fingerprint) + case PubKeyAlgoX25519: + return serializeEncryptedKeyX25519(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*x25519.PublicKey), keyBlock, byte(cipherFunc), version) + case PubKeyAlgoX448: + return serializeEncryptedKeyX448(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*x448.PublicKey), keyBlock, byte(cipherFunc), version) + case ExperimentalPubKeyAlgoAEAD: + return serializeEncryptedKeyAEAD(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*symmetric.AEADPublicKey), keyBlock, config.AEAD()) + case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly, ExperimentalPubKeyAlgoHMAC: + return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) + } + + return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) +} + +// SerializeEncryptedKey serializes an encrypted key packet to w that contains +// key, encrypted to pub. +// PKESKv6 is used if config.AEAD() is not nil. +// If config is nil, sensible defaults will be used. +func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error { + return SerializeEncryptedKeyAEAD(w, pub, cipherFunc, config.AEAD() != nil, key, config) +} + +// SerializeEncryptedKeyWithHiddenOption serializes an encrypted key packet to w that contains +// key, encrypted to pub. PKESKv6 is used if config.AEAD() is not nil. +// The hidden option controls if the packet should be anonymous, i.e., omit key metadata. +// If config is nil, sensible defaults will be used. +func SerializeEncryptedKeyWithHiddenOption(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, hidden bool, config *Config) error { + return SerializeEncryptedKeyAEADwithHiddenOption(w, pub, cipherFunc, config.AEAD() != nil, key, hidden, config) +} + +func (e *EncryptedKey) ProxyTransform(instance ForwardingInstance) (transformed *EncryptedKey, err error) { + if e.Algo != PubKeyAlgoECDH { + return nil, errors.InvalidArgumentError("invalid PKESK") + } + + if e.KeyId != 0 && e.KeyId != instance.GetForwarderKeyId() { + return nil, errors.InvalidArgumentError("invalid key id in PKESK") + } + + ephemeral := e.encryptedMPI1.Bytes() + transformedEphemeral, err := ecdh.ProxyTransform(ephemeral, instance.ProxyParameter) + if err != nil { + return nil, err + } + + wrappedKey := e.encryptedMPI2.Bytes() + copiedWrappedKey := make([]byte, len(wrappedKey)) + copy(copiedWrappedKey, wrappedKey) + + transformed = &EncryptedKey{ + Version: e.Version, + KeyId: instance.getForwardeeKeyIdOrZero(e.KeyId), + Algo: e.Algo, + encryptedMPI1: encoding.NewMPI(transformedEphemeral), + encryptedMPI2: encoding.NewOID(copiedWrappedKey), + } + + return transformed, nil +} + +func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header []byte, pub *rsa.PublicKey, keyBlock []byte) error { + cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("RSA encryption failed: " + err.Error()) + } + + cipherMPI := encoding.NewMPI(cipherText) + packetLen := len(header) /* header length */ + int(cipherMPI.EncodedLength()) + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + _, err = w.Write(header[:]) + if err != nil { + return err + } + _, err = w.Write(cipherMPI.EncodedBytes()) + return err +} + +func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header []byte, pub *elgamal.PublicKey, keyBlock []byte) error { + c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error()) + } + + packetLen := len(header) /* header length */ + packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8 + packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8 + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + _, err = w.Write(header[:]) + if err != nil { + return err + } + if _, err = w.Write(new(encoding.MPI).SetBig(c1).EncodedBytes()); err != nil { + return err + } + _, err = w.Write(new(encoding.MPI).SetBig(c2).EncodedBytes()) + return err +} + +func serializeEncryptedKeyECDH(w io.Writer, rand io.Reader, header []byte, pub *ecdh.PublicKey, keyBlock []byte, oid encoding.Field, fingerprint []byte) error { + vsG, c, err := ecdh.Encrypt(rand, pub, keyBlock, oid.EncodedBytes(), fingerprint) + if err != nil { + return errors.InvalidArgumentError("ECDH encryption failed: " + err.Error()) + } + + g := encoding.NewMPI(vsG) + m := encoding.NewOID(c) + + packetLen := len(header) /* header length */ + packetLen += int(g.EncodedLength()) + int(m.EncodedLength()) + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + + _, err = w.Write(header[:]) + if err != nil { + return err + } + if _, err = w.Write(g.EncodedBytes()); err != nil { + return err + } + _, err = w.Write(m.EncodedBytes()) + return err +} + +func serializeEncryptedKeyX25519(w io.Writer, rand io.Reader, header []byte, pub *x25519.PublicKey, keyBlock []byte, cipherFunc byte, version int) error { + ephemeralPublicX25519, ciphertext, err := x25519.Encrypt(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("x25519 encryption failed: " + err.Error()) + } + + packetLen := len(header) /* header length */ + packetLen += x25519.EncodedFieldsLength(ciphertext, version == 6) + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + + _, err = w.Write(header[:]) + if err != nil { + return err + } + return x25519.EncodeFields(w, ephemeralPublicX25519, ciphertext, cipherFunc, version == 6) +} + +func serializeEncryptedKeyX448(w io.Writer, rand io.Reader, header []byte, pub *x448.PublicKey, keyBlock []byte, cipherFunc byte, version int) error { + ephemeralPublicX448, ciphertext, err := x448.Encrypt(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("x448 encryption failed: " + err.Error()) + } + + packetLen := len(header) /* header length */ + packetLen += x448.EncodedFieldsLength(ciphertext, version == 6) + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + + _, err = w.Write(header[:]) + if err != nil { + return err + } + return x448.EncodeFields(w, ephemeralPublicX448, ciphertext, cipherFunc, version == 6) +} + +func serializeEncryptedKeyAEAD(w io.Writer, rand io.Reader, header []byte, pub *symmetric.AEADPublicKey, keyBlock []byte, config *AEADConfig) error { + mode := algorithm.AEADMode(config.Mode()) + iv, ciphertextRaw, err := pub.Encrypt(rand, keyBlock, mode) + if err != nil { + return errors.InvalidArgumentError("AEAD encryption failed: " + err.Error()) + } + + ciphertextShortByteString := encoding.NewShortByteString(ciphertextRaw) + + buffer := append([]byte{byte(mode)}, iv...) + buffer = append(buffer, ciphertextShortByteString.EncodedBytes()...) + + packetLen := len(header) /* header length */ + packetLen += int(len(buffer)) + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + + _, err = w.Write(header[:]) + if err != nil { + return err + } + + _, err = w.Write(buffer) + return err +} + +func checksumKeyMaterial(key []byte) uint16 { + var checksum uint16 + for _, v := range key { + checksum += uint16(v) + } + return checksum +} + +func decodeChecksumKey(msg []byte) (key []byte, err error) { + key = msg[:len(msg)-2] + expectedChecksum := uint16(msg[len(msg)-2])<<8 | uint16(msg[len(msg)-1]) + checksum := checksumKeyMaterial(key) + if checksum != expectedChecksum { + err = errors.StructuralError("session key checksum is incorrect") + } + return +} + +func encodeChecksumKey(buffer []byte, key []byte) { + copy(buffer, key) + checksum := checksumKeyMaterial(key) + buffer[len(key)] = byte(checksum >> 8) + buffer[len(key)+1] = byte(checksum) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/forwarding.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/forwarding.go new file mode 100644 index 000000000..50b4de441 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/forwarding.go @@ -0,0 +1,36 @@ +package packet + +import "encoding/binary" + +// ForwardingInstance represents a single forwarding instance (mapping IDs to a Proxy Param) +type ForwardingInstance struct { + KeyVersion int + ForwarderFingerprint []byte + ForwardeeFingerprint []byte + ProxyParameter []byte +} + +func (f *ForwardingInstance) GetForwarderKeyId() uint64 { + return computeForwardingKeyId(f.ForwarderFingerprint, f.KeyVersion) +} + +func (f *ForwardingInstance) GetForwardeeKeyId() uint64 { + return computeForwardingKeyId(f.ForwardeeFingerprint, f.KeyVersion) +} + +func (f *ForwardingInstance) getForwardeeKeyIdOrZero(originalKeyId uint64) uint64 { + if originalKeyId == 0 { + return 0 + } + + return f.GetForwardeeKeyId() +} + +func computeForwardingKeyId(fingerprint []byte, version int) uint64 { + switch version { + case 4: + return binary.BigEndian.Uint64(fingerprint[12:20]) + default: + panic("invalid pgp key version") + } +} \ No newline at end of file diff --git a/vendor/golang.org/x/crypto/openpgp/packet/literal.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go similarity index 94% rename from vendor/golang.org/x/crypto/openpgp/packet/literal.go rename to vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go index 1a9ec6e51..8a028c8a1 100644 --- a/vendor/golang.org/x/crypto/openpgp/packet/literal.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go @@ -11,6 +11,7 @@ import ( // LiteralData represents an encrypted file. See RFC 4880, section 5.9. type LiteralData struct { + Format uint8 IsBinary bool FileName string Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined. @@ -31,7 +32,8 @@ func (l *LiteralData) parse(r io.Reader) (err error) { return } - l.IsBinary = buf[0] == 'b' + l.Format = buf[0] + l.IsBinary = l.Format == 'b' fileNameLen := int(buf[1]) _, err = readFull(r, buf[:fileNameLen]) @@ -56,9 +58,9 @@ func (l *LiteralData) parse(r io.Reader) (err error) { // on completion. The fileName is truncated to 255 bytes. func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) { var buf [4]byte - buf[0] = 't' - if isBinary { - buf[0] = 'b' + buf[0] = 'b' + if !isBinary { + buf[0] = 'u' } if len(fileName) > 255 { fileName = fileName[:255] diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/marker.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/marker.go new file mode 100644 index 000000000..1ee378ba3 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/marker.go @@ -0,0 +1,33 @@ +package packet + +import ( + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +type Marker struct{} + +const markerString = "PGP" + +// parse just checks if the packet contains "PGP". +func (m *Marker) parse(reader io.Reader) error { + var buffer [3]byte + if _, err := io.ReadFull(reader, buffer[:]); err != nil { + return err + } + if string(buffer[:]) != markerString { + return errors.StructuralError("invalid marker packet") + } + return nil +} + +// SerializeMarker writes a marker packet to writer. +func SerializeMarker(writer io.Writer) error { + err := serializeHeader(writer, packetTypeMarker, len(markerString)) + if err != nil { + return err + } + _, err = writer.Write([]byte(markerString)) + return err +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/notation.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/notation.go new file mode 100644 index 000000000..2c3e3f50b --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/notation.go @@ -0,0 +1,29 @@ +package packet + +// Notation type represents a Notation Data subpacket +// see https://tools.ietf.org/html/rfc4880#section-5.2.3.16 +type Notation struct { + Name string + Value []byte + IsCritical bool + IsHumanReadable bool +} + +func (notation *Notation) getData() []byte { + nameData := []byte(notation.Name) + nameLen := len(nameData) + valueLen := len(notation.Value) + + data := make([]byte, 8+nameLen+valueLen) + if notation.IsHumanReadable { + data[0] = 0x80 + } + + data[4] = byte(nameLen >> 8) + data[5] = byte(nameLen) + data[6] = byte(valueLen >> 8) + data[7] = byte(valueLen) + copy(data[8:8+nameLen], nameData) + copy(data[8+nameLen:], notation.Value) + return data +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go similarity index 92% rename from vendor/golang.org/x/crypto/openpgp/packet/ocfb.go rename to vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go index ce2a33a54..4f26d0a00 100644 --- a/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go @@ -85,8 +85,7 @@ type ocfbDecrypter struct { // NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's // cipher feedback mode using the given cipher.Block. Prefix must be the first // blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's -// block size. If an incorrect key is detected then nil is returned. On -// successful exit, blockSize+2 bytes of decrypted data are written into +// block size. On successful exit, blockSize+2 bytes of decrypted data are written into // prefix. Resync determines if the "resynchronization step" from RFC 4880, // 13.9 step 7 is performed. Different parts of OpenPGP vary on this point. func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream { @@ -112,11 +111,6 @@ func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption prefixCopy[blockSize] ^= x.fre[0] prefixCopy[blockSize+1] ^= x.fre[1] - if prefixCopy[blockSize-2] != prefixCopy[blockSize] || - prefixCopy[blockSize-1] != prefixCopy[blockSize+1] { - return nil - } - if resync { block.Encrypt(x.fre, prefix[2:]) } else { diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go new file mode 100644 index 000000000..f393c4063 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go @@ -0,0 +1,157 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "encoding/binary" + "io" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" +) + +// OnePassSignature represents a one-pass signature packet. See RFC 4880, +// section 5.4. +type OnePassSignature struct { + Version int + SigType SignatureType + Hash crypto.Hash + PubKeyAlgo PublicKeyAlgorithm + KeyId uint64 + IsLast bool + Salt []byte // v6 only + KeyFingerprint []byte // v6 only +} + +func (ops *OnePassSignature) parse(r io.Reader) (err error) { + var buf [8]byte + // Read: version | signature type | hash algorithm | public-key algorithm + _, err = readFull(r, buf[:4]) + if err != nil { + return + } + if buf[0] != 3 && buf[0] != 6 { + return errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) + } + ops.Version = int(buf[0]) + + var ok bool + ops.Hash, ok = algorithm.HashIdToHashWithSha1(buf[2]) + if !ok { + return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2]))) + } + + ops.SigType = SignatureType(buf[1]) + ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3]) + + if ops.Version == 6 { + // Only for v6, a variable-length field containing the salt + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + saltLength := int(buf[0]) + var expectedSaltLength int + expectedSaltLength, err = SaltLengthForHash(ops.Hash) + if err != nil { + return + } + if saltLength != expectedSaltLength { + err = errors.StructuralError("unexpected salt size for the given hash algorithm") + return + } + salt := make([]byte, expectedSaltLength) + _, err = readFull(r, salt) + if err != nil { + return + } + ops.Salt = salt + + // Only for v6 packets, 32 octets of the fingerprint of the signing key. + fingerprint := make([]byte, 32) + _, err = readFull(r, fingerprint) + if err != nil { + return + } + ops.KeyFingerprint = fingerprint + ops.KeyId = binary.BigEndian.Uint64(ops.KeyFingerprint[:8]) + } else { + _, err = readFull(r, buf[:8]) + if err != nil { + return + } + ops.KeyId = binary.BigEndian.Uint64(buf[:8]) + } + + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + ops.IsLast = buf[0] != 0 + return +} + +// Serialize marshals the given OnePassSignature to w. +func (ops *OnePassSignature) Serialize(w io.Writer) error { + //v3 length 1+1+1+1+8+1 = + packetLength := 13 + if ops.Version == 6 { + // v6 length 1+1+1+1+1+len(salt)+32+1 = + packetLength = 38 + len(ops.Salt) + } + + if err := serializeHeader(w, packetTypeOnePassSignature, packetLength); err != nil { + return err + } + + var buf [8]byte + buf[0] = byte(ops.Version) + buf[1] = uint8(ops.SigType) + var ok bool + buf[2], ok = algorithm.HashToHashIdWithSha1(ops.Hash) + if !ok { + return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash))) + } + buf[3] = uint8(ops.PubKeyAlgo) + + _, err := w.Write(buf[:4]) + if err != nil { + return err + } + + if ops.Version == 6 { + // write salt for v6 signatures + _, err := w.Write([]byte{uint8(len(ops.Salt))}) + if err != nil { + return err + } + _, err = w.Write(ops.Salt) + if err != nil { + return err + } + + // write fingerprint v6 signatures + _, err = w.Write(ops.KeyFingerprint) + if err != nil { + return err + } + } else { + binary.BigEndian.PutUint64(buf[:8], ops.KeyId) + _, err := w.Write(buf[:8]) + if err != nil { + return err + } + } + + isLast := []byte{byte(0)} + if ops.IsLast { + isLast[0] = 1 + } + + _, err = w.Write(isLast) + return err +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/opaque.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go similarity index 90% rename from vendor/golang.org/x/crypto/openpgp/packet/opaque.go rename to vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go index 398447731..cef7c661d 100644 --- a/vendor/golang.org/x/crypto/openpgp/packet/opaque.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go @@ -8,7 +8,7 @@ import ( "bytes" "io" - "golang.org/x/crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/errors" ) // OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is @@ -83,8 +83,9 @@ func (or *OpaqueReader) Next() (op *OpaquePacket, err error) { // OpaqueSubpacket represents an unparsed OpenPGP subpacket, // as found in signature and user attribute packets. type OpaqueSubpacket struct { - SubType uint8 - Contents []byte + SubType uint8 + EncodedLength []byte // Store the original encoded length for signature verifications. + Contents []byte } // OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from @@ -108,6 +109,7 @@ func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) { func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) { // RFC 4880, section 5.2.3.1 var subLen uint32 + var encodedLength []byte if len(contents) < 1 { goto Truncated } @@ -118,6 +120,7 @@ func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacke if len(contents) < subHeaderLen { goto Truncated } + encodedLength = contents[0:1] subLen = uint32(contents[0]) contents = contents[1:] case contents[0] < 255: @@ -125,6 +128,7 @@ func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacke if len(contents) < subHeaderLen { goto Truncated } + encodedLength = contents[0:2] subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192 contents = contents[2:] default: @@ -132,16 +136,19 @@ func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacke if len(contents) < subHeaderLen { goto Truncated } + encodedLength = contents[0:5] subLen = uint32(contents[1])<<24 | uint32(contents[2])<<16 | uint32(contents[3])<<8 | uint32(contents[4]) contents = contents[5:] + } if subLen > uint32(len(contents)) || subLen == 0 { goto Truncated } subPacket.SubType = contents[0] + subPacket.EncodedLength = encodedLength subPacket.Contents = contents[1:subLen] return Truncated: @@ -151,7 +158,9 @@ Truncated: func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) { buf := make([]byte, 6) - n := serializeSubpacketLength(buf, len(osp.Contents)+1) + copy(buf, osp.EncodedLength) + n := len(osp.EncodedLength) + buf[n] = osp.SubType if _, err = w.Write(buf[:n+1]); err != nil { return diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go new file mode 100644 index 000000000..dd4ad34c6 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go @@ -0,0 +1,678 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packet implements parsing and serialization of OpenPGP packets, as +// specified in RFC 4880. +package packet // import "github.com/ProtonMail/go-crypto/openpgp/packet" + +import ( + "bytes" + "crypto/cipher" + "crypto/rsa" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" +) + +// readFull is the same as io.ReadFull except that reading zero bytes returns +// ErrUnexpectedEOF rather than EOF. +func readFull(r io.Reader, buf []byte) (n int, err error) { + n, err = io.ReadFull(r, buf) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2. +func readLength(r io.Reader) (length int64, isPartial bool, err error) { + var buf [4]byte + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + switch { + case buf[0] < 192: + length = int64(buf[0]) + case buf[0] < 224: + length = int64(buf[0]-192) << 8 + _, err = readFull(r, buf[0:1]) + if err != nil { + return + } + length += int64(buf[0]) + 192 + case buf[0] < 255: + length = int64(1) << (buf[0] & 0x1f) + isPartial = true + default: + _, err = readFull(r, buf[0:4]) + if err != nil { + return + } + length = int64(buf[0])<<24 | + int64(buf[1])<<16 | + int64(buf[2])<<8 | + int64(buf[3]) + } + return +} + +// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths. +// The continuation lengths are parsed and removed from the stream and EOF is +// returned at the end of the packet. See RFC 4880, section 4.2.2.4. +type partialLengthReader struct { + r io.Reader + remaining int64 + isPartial bool +} + +func (r *partialLengthReader) Read(p []byte) (n int, err error) { + for r.remaining == 0 { + if !r.isPartial { + return 0, io.EOF + } + r.remaining, r.isPartial, err = readLength(r.r) + if err != nil { + return 0, err + } + } + + toRead := int64(len(p)) + if toRead > r.remaining { + toRead = r.remaining + } + + n, err = r.r.Read(p[:int(toRead)]) + r.remaining -= int64(n) + if n < int(toRead) && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// partialLengthWriter writes a stream of data using OpenPGP partial lengths. +// See RFC 4880, section 4.2.2.4. +type partialLengthWriter struct { + w io.WriteCloser + buf bytes.Buffer + lengthByte [1]byte +} + +func (w *partialLengthWriter) Write(p []byte) (n int, err error) { + bufLen := w.buf.Len() + if bufLen > 512 { + for power := uint(30); ; power-- { + l := 1 << power + if bufLen >= l { + w.lengthByte[0] = 224 + uint8(power) + _, err = w.w.Write(w.lengthByte[:]) + if err != nil { + return + } + var m int + m, err = w.w.Write(w.buf.Next(l)) + if err != nil { + return + } + if m != l { + return 0, io.ErrShortWrite + } + break + } + } + } + return w.buf.Write(p) +} + +func (w *partialLengthWriter) Close() (err error) { + len := w.buf.Len() + err = serializeLength(w.w, len) + if err != nil { + return err + } + _, err = w.buf.WriteTo(w.w) + if err != nil { + return err + } + return w.w.Close() +} + +// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the +// underlying Reader returns EOF before the limit has been reached. +type spanReader struct { + r io.Reader + n int64 +} + +func (l *spanReader) Read(p []byte) (n int, err error) { + if l.n <= 0 { + return 0, io.EOF + } + if int64(len(p)) > l.n { + p = p[0:l.n] + } + n, err = l.r.Read(p) + l.n -= int64(n) + if l.n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// readHeader parses a packet header and returns an io.Reader which will return +// the contents of the packet. See RFC 4880, section 4.2. +func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) { + var buf [4]byte + _, err = io.ReadFull(r, buf[:1]) + if err != nil { + return + } + if buf[0]&0x80 == 0 { + err = errors.StructuralError("tag byte does not have MSB set") + return + } + if buf[0]&0x40 == 0 { + // Old format packet + tag = packetType((buf[0] & 0x3f) >> 2) + lengthType := buf[0] & 3 + if lengthType == 3 { + length = -1 + contents = r + return + } + lengthBytes := 1 << lengthType + _, err = readFull(r, buf[0:lengthBytes]) + if err != nil { + return + } + for i := 0; i < lengthBytes; i++ { + length <<= 8 + length |= int64(buf[i]) + } + contents = &spanReader{r, length} + return + } + + // New format packet + tag = packetType(buf[0] & 0x3f) + length, isPartial, err := readLength(r) + if err != nil { + return + } + if isPartial { + contents = &partialLengthReader{ + remaining: length, + isPartial: true, + r: r, + } + length = -1 + } else { + contents = &spanReader{r, length} + } + return +} + +// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section +// 4.2. +func serializeHeader(w io.Writer, ptype packetType, length int) (err error) { + err = serializeType(w, ptype) + if err != nil { + return + } + return serializeLength(w, length) +} + +// serializeType writes an OpenPGP packet type to w. See RFC 4880, section +// 4.2. +func serializeType(w io.Writer, ptype packetType) (err error) { + var buf [1]byte + buf[0] = 0x80 | 0x40 | byte(ptype) + _, err = w.Write(buf[:]) + return +} + +// serializeLength writes an OpenPGP packet length to w. See RFC 4880, section +// 4.2.2. +func serializeLength(w io.Writer, length int) (err error) { + var buf [5]byte + var n int + + if length < 192 { + buf[0] = byte(length) + n = 1 + } else if length < 8384 { + length -= 192 + buf[0] = 192 + byte(length>>8) + buf[1] = byte(length) + n = 2 + } else { + buf[0] = 255 + buf[1] = byte(length >> 24) + buf[2] = byte(length >> 16) + buf[3] = byte(length >> 8) + buf[4] = byte(length) + n = 5 + } + + _, err = w.Write(buf[:n]) + return +} + +// serializeStreamHeader writes an OpenPGP packet header to w where the +// length of the packet is unknown. It returns a io.WriteCloser which can be +// used to write the contents of the packet. See RFC 4880, section 4.2. +func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) { + err = serializeType(w, ptype) + if err != nil { + return + } + out = &partialLengthWriter{w: w} + return +} + +// Packet represents an OpenPGP packet. Users are expected to try casting +// instances of this interface to specific packet types. +type Packet interface { + parse(io.Reader) error +} + +// consumeAll reads from the given Reader until error, returning the number of +// bytes read. +func consumeAll(r io.Reader) (n int64, err error) { + var m int + var buf [1024]byte + + for { + m, err = r.Read(buf[:]) + n += int64(m) + if err == io.EOF { + err = nil + return + } + if err != nil { + return + } + } +} + +// packetType represents the numeric ids of the different OpenPGP packet types. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2 +type packetType uint8 + +const ( + packetTypeEncryptedKey packetType = 1 + packetTypeSignature packetType = 2 + packetTypeSymmetricKeyEncrypted packetType = 3 + packetTypeOnePassSignature packetType = 4 + packetTypePrivateKey packetType = 5 + packetTypePublicKey packetType = 6 + packetTypePrivateSubkey packetType = 7 + packetTypeCompressed packetType = 8 + packetTypeSymmetricallyEncrypted packetType = 9 + packetTypeMarker packetType = 10 + packetTypeLiteralData packetType = 11 + packetTypeTrust packetType = 12 + packetTypeUserId packetType = 13 + packetTypePublicSubkey packetType = 14 + packetTypeUserAttribute packetType = 17 + packetTypeSymmetricallyEncryptedIntegrityProtected packetType = 18 + packetTypeAEADEncrypted packetType = 20 + packetPadding packetType = 21 +) + +// EncryptedDataPacket holds encrypted data. It is currently implemented by +// SymmetricallyEncrypted and AEADEncrypted. +type EncryptedDataPacket interface { + Decrypt(CipherFunction, []byte) (io.ReadCloser, error) +} + +// Read reads a single OpenPGP packet from the given io.Reader. If there is an +// error parsing a packet, the whole packet is consumed from the input. +func Read(r io.Reader) (p Packet, err error) { + tag, len, contents, err := readHeader(r) + if err != nil { + return + } + + switch tag { + case packetTypeEncryptedKey: + p = new(EncryptedKey) + case packetTypeSignature: + p = new(Signature) + case packetTypeSymmetricKeyEncrypted: + p = new(SymmetricKeyEncrypted) + case packetTypeOnePassSignature: + p = new(OnePassSignature) + case packetTypePrivateKey, packetTypePrivateSubkey: + pk := new(PrivateKey) + if tag == packetTypePrivateSubkey { + pk.IsSubkey = true + } + p = pk + case packetTypePublicKey, packetTypePublicSubkey: + isSubkey := tag == packetTypePublicSubkey + p = &PublicKey{IsSubkey: isSubkey} + case packetTypeCompressed: + p = new(Compressed) + case packetTypeSymmetricallyEncrypted: + p = new(SymmetricallyEncrypted) + case packetTypeLiteralData: + p = new(LiteralData) + case packetTypeUserId: + p = new(UserId) + case packetTypeUserAttribute: + p = new(UserAttribute) + case packetTypeSymmetricallyEncryptedIntegrityProtected: + se := new(SymmetricallyEncrypted) + se.IntegrityProtected = true + p = se + case packetTypeAEADEncrypted: + p = new(AEADEncrypted) + case packetPadding: + p = Padding(len) + case packetTypeMarker: + p = new(Marker) + case packetTypeTrust: + // Not implemented, just consume + err = errors.UnknownPacketTypeError(tag) + default: + // Packet Tags from 0 to 39 are critical. + // Packet Tags from 40 to 63 are non-critical. + if tag < 40 { + err = errors.CriticalUnknownPacketTypeError(tag) + } else { + err = errors.UnknownPacketTypeError(tag) + } + } + if p != nil { + err = p.parse(contents) + } + if err != nil { + consumeAll(contents) + } + return +} + +// ReadWithCheck reads a single OpenPGP message packet from the given io.Reader. If there is an +// error parsing a packet, the whole packet is consumed from the input. +// ReadWithCheck additionally checks if the OpenPGP message packet sequence adheres +// to the packet composition rules in rfc4880, if not throws an error. +func ReadWithCheck(r io.Reader, sequence *SequenceVerifier) (p Packet, msgErr error, err error) { + tag, len, contents, err := readHeader(r) + if err != nil { + return + } + switch tag { + case packetTypeEncryptedKey: + msgErr = sequence.Next(ESKSymbol) + p = new(EncryptedKey) + case packetTypeSignature: + msgErr = sequence.Next(SigSymbol) + p = new(Signature) + case packetTypeSymmetricKeyEncrypted: + msgErr = sequence.Next(ESKSymbol) + p = new(SymmetricKeyEncrypted) + case packetTypeOnePassSignature: + msgErr = sequence.Next(OPSSymbol) + p = new(OnePassSignature) + case packetTypeCompressed: + msgErr = sequence.Next(CompSymbol) + p = new(Compressed) + case packetTypeSymmetricallyEncrypted: + msgErr = sequence.Next(EncSymbol) + p = new(SymmetricallyEncrypted) + case packetTypeLiteralData: + msgErr = sequence.Next(LDSymbol) + p = new(LiteralData) + case packetTypeSymmetricallyEncryptedIntegrityProtected: + msgErr = sequence.Next(EncSymbol) + se := new(SymmetricallyEncrypted) + se.IntegrityProtected = true + p = se + case packetTypeAEADEncrypted: + msgErr = sequence.Next(EncSymbol) + p = new(AEADEncrypted) + case packetPadding: + p = Padding(len) + case packetTypeMarker: + p = new(Marker) + case packetTypeTrust: + // Not implemented, just consume + err = errors.UnknownPacketTypeError(tag) + case packetTypePrivateKey, + packetTypePrivateSubkey, + packetTypePublicKey, + packetTypePublicSubkey, + packetTypeUserId, + packetTypeUserAttribute: + msgErr = sequence.Next(UnknownSymbol) + consumeAll(contents) + default: + // Packet Tags from 0 to 39 are critical. + // Packet Tags from 40 to 63 are non-critical. + if tag < 40 { + err = errors.CriticalUnknownPacketTypeError(tag) + } else { + err = errors.UnknownPacketTypeError(tag) + } + } + if p != nil { + err = p.parse(contents) + } + if err != nil { + consumeAll(contents) + } + return +} + +// SignatureType represents the different semantic meanings of an OpenPGP +// signature. See RFC 4880, section 5.2.1. +type SignatureType uint8 + +const ( + SigTypeBinary SignatureType = 0x00 + SigTypeText SignatureType = 0x01 + SigTypeGenericCert SignatureType = 0x10 + SigTypePersonaCert SignatureType = 0x11 + SigTypeCasualCert SignatureType = 0x12 + SigTypePositiveCert SignatureType = 0x13 + SigTypeSubkeyBinding SignatureType = 0x18 + SigTypePrimaryKeyBinding SignatureType = 0x19 + SigTypeDirectSignature SignatureType = 0x1F + SigTypeKeyRevocation SignatureType = 0x20 + SigTypeSubkeyRevocation SignatureType = 0x28 + SigTypeCertificationRevocation SignatureType = 0x30 +) + +// PublicKeyAlgorithm represents the different public key system specified for +// OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12 +type PublicKeyAlgorithm uint8 + +const ( + PubKeyAlgoRSA PublicKeyAlgorithm = 1 + PubKeyAlgoElGamal PublicKeyAlgorithm = 16 + PubKeyAlgoDSA PublicKeyAlgorithm = 17 + // RFC 6637, Section 5. + PubKeyAlgoECDH PublicKeyAlgorithm = 18 + PubKeyAlgoECDSA PublicKeyAlgorithm = 19 + // https://www.ietf.org/archive/id/draft-koch-eddsa-for-openpgp-04.txt + PubKeyAlgoEdDSA PublicKeyAlgorithm = 22 + // https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh + PubKeyAlgoX25519 PublicKeyAlgorithm = 25 + PubKeyAlgoX448 PublicKeyAlgorithm = 26 + PubKeyAlgoEd25519 PublicKeyAlgorithm = 27 + PubKeyAlgoEd448 PublicKeyAlgorithm = 28 + + ExperimentalPubKeyAlgoAEAD PublicKeyAlgorithm = 100 + ExperimentalPubKeyAlgoHMAC PublicKeyAlgorithm = 101 + + // Deprecated in RFC 4880, Section 13.5. Use key flags instead. + PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2 + PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3 +) + +// CanEncrypt returns true if it's possible to encrypt a message to a public +// key of the given type. +func (pka PublicKeyAlgorithm) CanEncrypt() bool { + switch pka { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH, PubKeyAlgoX25519, PubKeyAlgoX448, ExperimentalPubKeyAlgoAEAD: + return true + } + return false +} + +// CanSign returns true if it's possible for a public key of the given type to +// sign a message. +func (pka PublicKeyAlgorithm) CanSign() bool { + switch pka { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA, PubKeyAlgoEd25519, PubKeyAlgoEd448, ExperimentalPubKeyAlgoHMAC: + return true + } + return false +} + +// CipherFunction represents the different block ciphers specified for OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 +type CipherFunction algorithm.CipherFunction + +const ( + Cipher3DES CipherFunction = 2 + CipherCAST5 CipherFunction = 3 + CipherAES128 CipherFunction = 7 + CipherAES192 CipherFunction = 8 + CipherAES256 CipherFunction = 9 +) + +// KeySize returns the key size, in bytes, of cipher. +func (cipher CipherFunction) KeySize() int { + return algorithm.CipherFunction(cipher).KeySize() +} + +// IsSupported returns true if the cipher is supported from the library +func (cipher CipherFunction) IsSupported() bool { + return algorithm.CipherFunction(cipher).KeySize() > 0 +} + +// blockSize returns the block size, in bytes, of cipher. +func (cipher CipherFunction) blockSize() int { + return algorithm.CipherFunction(cipher).BlockSize() +} + +// new returns a fresh instance of the given cipher. +func (cipher CipherFunction) new(key []byte) (block cipher.Block) { + return algorithm.CipherFunction(cipher).New(key) +} + +// padToKeySize left-pads a MPI with zeroes to match the length of the +// specified RSA public. +func padToKeySize(pub *rsa.PublicKey, b []byte) []byte { + k := (pub.N.BitLen() + 7) / 8 + if len(b) >= k { + return b + } + bb := make([]byte, k) + copy(bb[len(bb)-len(b):], b) + return bb +} + +// CompressionAlgo Represents the different compression algorithms +// supported by OpenPGP (except for BZIP2, which is not currently +// supported). See Section 9.3 of RFC 4880. +type CompressionAlgo uint8 + +const ( + CompressionNone CompressionAlgo = 0 + CompressionZIP CompressionAlgo = 1 + CompressionZLIB CompressionAlgo = 2 +) + +// AEADMode represents the different Authenticated Encryption with Associated +// Data specified for OpenPGP. +// See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.6 +type AEADMode algorithm.AEADMode + +const ( + AEADModeEAX AEADMode = 1 + AEADModeOCB AEADMode = 2 + AEADModeGCM AEADMode = 3 +) + +func (mode AEADMode) IvLength() int { + return algorithm.AEADMode(mode).NonceLength() +} + +func (mode AEADMode) TagLength() int { + return algorithm.AEADMode(mode).TagLength() +} + +// IsSupported returns true if the aead mode is supported from the library +func (mode AEADMode) IsSupported() bool { + return algorithm.AEADMode(mode).TagLength() > 0 +} + +// new returns a fresh instance of the given mode. +func (mode AEADMode) new(block cipher.Block) cipher.AEAD { + return algorithm.AEADMode(mode).New(block) +} + +// ReasonForRevocation represents a revocation reason code as per RFC4880 +// section 5.2.3.23. +type ReasonForRevocation uint8 + +const ( + NoReason ReasonForRevocation = 0 + KeySuperseded ReasonForRevocation = 1 + KeyCompromised ReasonForRevocation = 2 + KeyRetired ReasonForRevocation = 3 + UserIDNotValid ReasonForRevocation = 32 + Unknown ReasonForRevocation = 200 +) + +func NewReasonForRevocation(value byte) ReasonForRevocation { + if value < 4 || value == 32 { + return ReasonForRevocation(value) + } + return Unknown +} + +// Curve is a mapping to supported ECC curves for key generation. +// See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-06.html#name-curve-specific-wire-formats +type Curve string + +const ( + Curve25519 Curve = "Curve25519" + Curve448 Curve = "Curve448" + CurveNistP256 Curve = "P256" + CurveNistP384 Curve = "P384" + CurveNistP521 Curve = "P521" + CurveSecP256k1 Curve = "SecP256k1" + CurveBrainpoolP256 Curve = "BrainpoolP256" + CurveBrainpoolP384 Curve = "BrainpoolP384" + CurveBrainpoolP512 Curve = "BrainpoolP512" +) + +// TrustLevel represents a trust level per RFC4880 5.2.3.13 +type TrustLevel uint8 + +// TrustAmount represents a trust amount per RFC4880 5.2.3.13 +type TrustAmount uint8 + +const ( + // versionSize is the length in bytes of the version value. + versionSize = 1 + // algorithmSize is the length in bytes of the key algorithm value. + algorithmSize = 1 + // keyVersionSize is the length in bytes of the key version value + keyVersionSize = 1 + // keyIdSize is the length in bytes of the key identifier value. + keyIdSize = 8 + // timestampSize is the length in bytes of encoded timestamps. + timestampSize = 4 + // fingerprintSizeV6 is the length in bytes of the key fingerprint in v6. + fingerprintSizeV6 = 32 + // fingerprintSize is the length in bytes of the key fingerprint. + fingerprintSize = 20 +) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_sequence.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_sequence.go new file mode 100644 index 000000000..55a8a56c2 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_sequence.go @@ -0,0 +1,222 @@ +package packet + +// This file implements the pushdown automata (PDA) from PGPainless (Paul Schaub) +// to verify pgp packet sequences. See Paul's blogpost for more details: +// https://blog.jabberhead.tk/2022/10/26/implementing-packet-sequence-validation-using-pushdown-automata/ +import ( + "fmt" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +func NewErrMalformedMessage(from State, input InputSymbol, stackSymbol StackSymbol) errors.ErrMalformedMessage { + return errors.ErrMalformedMessage(fmt.Sprintf("state %d, input symbol %d, stack symbol %d ", from, input, stackSymbol)) +} + +// InputSymbol defines the input alphabet of the PDA +type InputSymbol uint8 + +const ( + LDSymbol InputSymbol = iota + SigSymbol + OPSSymbol + CompSymbol + ESKSymbol + EncSymbol + EOSSymbol + UnknownSymbol +) + +// StackSymbol defines the stack alphabet of the PDA +type StackSymbol int8 + +const ( + MsgStackSymbol StackSymbol = iota + OpsStackSymbol + KeyStackSymbol + EndStackSymbol + EmptyStackSymbol +) + +// State defines the states of the PDA +type State int8 + +const ( + OpenPGPMessage State = iota + ESKMessage + LiteralMessage + CompressedMessage + EncryptedMessage + ValidMessage +) + +// transition represents a state transition in the PDA +type transition func(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) + +// SequenceVerifier is a pushdown automata to verify +// PGP messages packet sequences according to rfc4880. +type SequenceVerifier struct { + stack []StackSymbol + state State +} + +// Next performs a state transition with the given input symbol. +// If the transition fails a ErrMalformedMessage is returned. +func (sv *SequenceVerifier) Next(input InputSymbol) error { + for { + stackSymbol := sv.popStack() + transitionFunc := getTransition(sv.state) + nextState, newStackSymbols, redo, err := transitionFunc(input, stackSymbol) + if err != nil { + return err + } + if redo { + sv.pushStack(stackSymbol) + } + for _, newStackSymbol := range newStackSymbols { + sv.pushStack(newStackSymbol) + } + sv.state = nextState + if !redo { + break + } + } + return nil +} + +// Valid returns true if RDA is in a valid state. +func (sv *SequenceVerifier) Valid() bool { + return sv.state == ValidMessage && len(sv.stack) == 0 +} + +func (sv *SequenceVerifier) AssertValid() error { + if !sv.Valid() { + return errors.ErrMalformedMessage("invalid message") + } + return nil +} + +func NewSequenceVerifier() *SequenceVerifier { + return &SequenceVerifier{ + stack: []StackSymbol{EndStackSymbol, MsgStackSymbol}, + state: OpenPGPMessage, + } +} + +func (sv *SequenceVerifier) popStack() StackSymbol { + if len(sv.stack) == 0 { + return EmptyStackSymbol + } + elemIndex := len(sv.stack) - 1 + stackSymbol := sv.stack[elemIndex] + sv.stack = sv.stack[:elemIndex] + return stackSymbol +} + +func (sv *SequenceVerifier) pushStack(stackSymbol StackSymbol) { + sv.stack = append(sv.stack, stackSymbol) +} + +func getTransition(from State) transition { + switch from { + case OpenPGPMessage: + return fromOpenPGPMessage + case LiteralMessage: + return fromLiteralMessage + case CompressedMessage: + return fromCompressedMessage + case EncryptedMessage: + return fromEncryptedMessage + case ESKMessage: + return fromESKMessage + case ValidMessage: + return fromValidMessage + } + return nil +} + +// fromOpenPGPMessage is the transition for the state OpenPGPMessage. +func fromOpenPGPMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) { + if stackSymbol != MsgStackSymbol { + return 0, nil, false, NewErrMalformedMessage(OpenPGPMessage, input, stackSymbol) + } + switch input { + case LDSymbol: + return LiteralMessage, nil, false, nil + case SigSymbol: + return OpenPGPMessage, []StackSymbol{MsgStackSymbol}, false, nil + case OPSSymbol: + return OpenPGPMessage, []StackSymbol{OpsStackSymbol, MsgStackSymbol}, false, nil + case CompSymbol: + return CompressedMessage, nil, false, nil + case ESKSymbol: + return ESKMessage, []StackSymbol{KeyStackSymbol}, false, nil + case EncSymbol: + return EncryptedMessage, nil, false, nil + } + return 0, nil, false, NewErrMalformedMessage(OpenPGPMessage, input, stackSymbol) +} + +// fromESKMessage is the transition for the state ESKMessage. +func fromESKMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) { + if stackSymbol != KeyStackSymbol { + return 0, nil, false, NewErrMalformedMessage(ESKMessage, input, stackSymbol) + } + switch input { + case ESKSymbol: + return ESKMessage, []StackSymbol{KeyStackSymbol}, false, nil + case EncSymbol: + return EncryptedMessage, nil, false, nil + } + return 0, nil, false, NewErrMalformedMessage(ESKMessage, input, stackSymbol) +} + +// fromLiteralMessage is the transition for the state LiteralMessage. +func fromLiteralMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) { + switch input { + case SigSymbol: + if stackSymbol == OpsStackSymbol { + return LiteralMessage, nil, false, nil + } + case EOSSymbol: + if stackSymbol == EndStackSymbol { + return ValidMessage, nil, false, nil + } + } + return 0, nil, false, NewErrMalformedMessage(LiteralMessage, input, stackSymbol) +} + +// fromLiteralMessage is the transition for the state CompressedMessage. +func fromCompressedMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) { + switch input { + case SigSymbol: + if stackSymbol == OpsStackSymbol { + return CompressedMessage, nil, false, nil + } + case EOSSymbol: + if stackSymbol == EndStackSymbol { + return ValidMessage, nil, false, nil + } + } + return OpenPGPMessage, []StackSymbol{MsgStackSymbol}, true, nil +} + +// fromEncryptedMessage is the transition for the state EncryptedMessage. +func fromEncryptedMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) { + switch input { + case SigSymbol: + if stackSymbol == OpsStackSymbol { + return EncryptedMessage, nil, false, nil + } + case EOSSymbol: + if stackSymbol == EndStackSymbol { + return ValidMessage, nil, false, nil + } + } + return OpenPGPMessage, []StackSymbol{MsgStackSymbol}, true, nil +} + +// fromValidMessage is the transition for the state ValidMessage. +func fromValidMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) { + return 0, nil, false, NewErrMalformedMessage(ValidMessage, input, stackSymbol) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_unsupported.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_unsupported.go new file mode 100644 index 000000000..2d714723c --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_unsupported.go @@ -0,0 +1,24 @@ +package packet + +import ( + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +// UnsupportedPackage represents a OpenPGP packet with a known packet type +// but with unsupported content. +type UnsupportedPacket struct { + IncompletePacket Packet + Error errors.UnsupportedError +} + +// Implements the Packet interface +func (up *UnsupportedPacket) parse(read io.Reader) error { + err := up.IncompletePacket.parse(read) + if castedErr, ok := err.(errors.UnsupportedError); ok { + up.Error = castedErr + return nil + } + return err +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/padding.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/padding.go new file mode 100644 index 000000000..3b6a7045d --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/padding.go @@ -0,0 +1,26 @@ +package packet + +import ( + "io" +) + +// Padding type represents a Padding Packet (Tag 21). +// The padding type is represented by the length of its padding. +// see https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#name-padding-packet-tag-21 +type Padding int + +// parse just ignores the padding content. +func (pad Padding) parse(reader io.Reader) error { + _, err := io.CopyN(io.Discard, reader, int64(pad)) + return err +} + +// SerializePadding writes the padding to writer. +func (pad Padding) SerializePadding(writer io.Writer, rand io.Reader) error { + err := serializeHeader(writer, packetPadding, int(pad)) + if err != nil { + return err + } + _, err = io.CopyN(writer, rand, int64(pad)) + return err +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go new file mode 100644 index 000000000..406c56e65 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go @@ -0,0 +1,1282 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/cipher" + "crypto/dsa" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/subtle" + "fmt" + "io" + "math/big" + "strconv" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/ecdh" + "github.com/ProtonMail/go-crypto/openpgp/ecdsa" + "github.com/ProtonMail/go-crypto/openpgp/ed25519" + "github.com/ProtonMail/go-crypto/openpgp/ed448" + "github.com/ProtonMail/go-crypto/openpgp/eddsa" + "github.com/ProtonMail/go-crypto/openpgp/elgamal" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" + "github.com/ProtonMail/go-crypto/openpgp/s2k" + "github.com/ProtonMail/go-crypto/openpgp/symmetric" + "github.com/ProtonMail/go-crypto/openpgp/x25519" + "github.com/ProtonMail/go-crypto/openpgp/x448" + "golang.org/x/crypto/hkdf" +) + +// PrivateKey represents a possibly encrypted private key. See RFC 4880, +// section 5.5.3. +type PrivateKey struct { + PublicKey + Encrypted bool // if true then the private key is unavailable until Decrypt has been called. + encryptedData []byte + cipher CipherFunction + s2k func(out, in []byte) + aead AEADMode // only relevant if S2KAEAD is enabled + // An *{rsa|dsa|elgamal|ecdh|ecdsa|ed25519|ed448}.PrivateKey or + // crypto.Signer/crypto.Decrypter (Decryptor RSA only). + PrivateKey interface{} + iv []byte + + // Type of encryption of the S2K packet + // Allowed values are 0 (Not encrypted), 253 (AEAD), 254 (SHA1), or + // 255 (2-byte checksum) + s2kType S2KType + // Full parameters of the S2K packet + s2kParams *s2k.Params +} + +// S2KType s2k packet type +type S2KType uint8 + +const ( + // S2KNON unencrypt + S2KNON S2KType = 0 + // S2KAEAD use authenticated encryption + S2KAEAD S2KType = 253 + // S2KSHA1 sha1 sum check + S2KSHA1 S2KType = 254 + // S2KCHECKSUM sum check + S2KCHECKSUM S2KType = 255 +) + +func NewRSAPrivateKey(creationTime time.Time, priv *rsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewDSAPrivateKey(creationTime time.Time, priv *dsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewDSAPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewElGamalPrivateKey(creationTime time.Time, priv *elgamal.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewECDSAPrivateKey(creationTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewECDSAPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewEdDSAPrivateKey(creationTime time.Time, priv *eddsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewEdDSAPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewECDHPrivateKey(creationTime time.Time, priv *ecdh.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewECDHPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewX25519PrivateKey(creationTime time.Time, priv *x25519.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewX25519PublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewX448PrivateKey(creationTime time.Time, priv *x448.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewX448PublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewEd25519PrivateKey(creationTime time.Time, priv *ed25519.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewEd25519PublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewEd448PrivateKey(creationTime time.Time, priv *ed448.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewEd448PublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +// NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that +// implements RSA, ECDSA or EdDSA. +func NewSignerPrivateKey(creationTime time.Time, signer interface{}) *PrivateKey { + pk := new(PrivateKey) + // In general, the public Keys should be used as pointers. We still + // type-switch on the values, for backwards-compatibility. + switch pubkey := signer.(type) { + case *rsa.PrivateKey: + pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey.PublicKey) + case rsa.PrivateKey: + pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey.PublicKey) + case *ecdsa.PrivateKey: + pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey.PublicKey) + case ecdsa.PrivateKey: + pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey.PublicKey) + case *eddsa.PrivateKey: + pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pubkey.PublicKey) + case eddsa.PrivateKey: + pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pubkey.PublicKey) + case *ed25519.PrivateKey: + pk.PublicKey = *NewEd25519PublicKey(creationTime, &pubkey.PublicKey) + case ed25519.PrivateKey: + pk.PublicKey = *NewEd25519PublicKey(creationTime, &pubkey.PublicKey) + case *ed448.PrivateKey: + pk.PublicKey = *NewEd448PublicKey(creationTime, &pubkey.PublicKey) + case ed448.PrivateKey: + pk.PublicKey = *NewEd448PublicKey(creationTime, &pubkey.PublicKey) + case *symmetric.HMACPrivateKey: + pk.PublicKey = *NewHMACPublicKey(creationTime, &pubkey.PublicKey) + default: + panic("openpgp: unknown signer type in NewSignerPrivateKey") + } + pk.PrivateKey = signer + return pk +} + +// NewDecrypterPrivateKey creates a PrivateKey from a *{rsa|elgamal|ecdh|x25519|x448}.PrivateKey. +func NewDecrypterPrivateKey(creationTime time.Time, decrypter interface{}) *PrivateKey { + pk := new(PrivateKey) + switch priv := decrypter.(type) { + case *rsa.PrivateKey: + pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey) + case *elgamal.PrivateKey: + pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey) + case *ecdh.PrivateKey: + pk.PublicKey = *NewECDHPublicKey(creationTime, &priv.PublicKey) + case *x25519.PrivateKey: + pk.PublicKey = *NewX25519PublicKey(creationTime, &priv.PublicKey) + case *x448.PrivateKey: + pk.PublicKey = *NewX448PublicKey(creationTime, &priv.PublicKey) + case *symmetric.AEADPrivateKey: + pk.PublicKey = *NewAEADPublicKey(creationTime, &priv.PublicKey) + default: + panic("openpgp: unknown decrypter type in NewDecrypterPrivateKey") + } + pk.PrivateKey = decrypter + return pk +} + +func (pk *PrivateKey) parse(r io.Reader) (err error) { + err = (&pk.PublicKey).parse(r) + if err != nil { + return + } + v5 := pk.PublicKey.Version == 5 + v6 := pk.PublicKey.Version == 6 + + if V5Disabled && v5 { + return errors.UnsupportedError("support for parsing v5 entities is disabled; build with `-tags v5` if needed") + } + + var buf [1]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + pk.s2kType = S2KType(buf[0]) + var optCount [1]byte + if v5 || (v6 && pk.s2kType != S2KNON) { + if _, err = readFull(r, optCount[:]); err != nil { + return + } + } + + switch pk.s2kType { + case S2KNON: + pk.s2k = nil + pk.Encrypted = false + case S2KSHA1, S2KCHECKSUM, S2KAEAD: + if (v5 || v6) && pk.s2kType == S2KCHECKSUM { + return errors.StructuralError(fmt.Sprintf("wrong s2k identifier for version %d", pk.Version)) + } + _, err = readFull(r, buf[:]) + if err != nil { + return + } + pk.cipher = CipherFunction(buf[0]) + if pk.cipher != 0 && !pk.cipher.IsSupported() { + return errors.UnsupportedError("unsupported cipher function in private key") + } + // [Optional] If string-to-key usage octet was 253, + // a one-octet AEAD algorithm. + if pk.s2kType == S2KAEAD { + _, err = readFull(r, buf[:]) + if err != nil { + return + } + pk.aead = AEADMode(buf[0]) + if !pk.aead.IsSupported() { + return errors.UnsupportedError("unsupported aead mode in private key") + } + } + + // [Optional] Only for a version 6 packet, + // and if string-to-key usage octet was 255, 254, or 253, + // an one-octet count of the following field. + if v6 { + _, err = readFull(r, buf[:]) + if err != nil { + return + } + } + + pk.s2kParams, err = s2k.ParseIntoParams(r) + if err != nil { + return + } + if pk.s2kParams.Dummy() { + return + } + if pk.s2kParams.Mode() == s2k.Argon2S2K && pk.s2kType != S2KAEAD { + return errors.StructuralError("using Argon2 S2K without AEAD is not allowed") + } + if pk.s2kParams.Mode() == s2k.SimpleS2K && pk.Version == 6 { + return errors.StructuralError("using Simple S2K with version 6 keys is not allowed") + } + pk.s2k, err = pk.s2kParams.Function() + if err != nil { + return + } + pk.Encrypted = true + default: + return errors.UnsupportedError("deprecated s2k function in private key") + } + + if pk.Encrypted { + var ivSize int + // If the S2K usage octet was 253, the IV is of the size expected by the AEAD mode, + // unless it's a version 5 key, in which case it's the size of the symmetric cipher's block size. + // For all other S2K modes, it's always the block size. + if !v5 && pk.s2kType == S2KAEAD { + ivSize = pk.aead.IvLength() + } else { + ivSize = pk.cipher.blockSize() + } + + if ivSize == 0 { + return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) + } + pk.iv = make([]byte, ivSize) + _, err = readFull(r, pk.iv) + if err != nil { + return + } + if v5 && pk.s2kType == S2KAEAD { + pk.iv = pk.iv[:pk.aead.IvLength()] + } + } + + var privateKeyData []byte + if v5 { + var n [4]byte /* secret material four octet count */ + _, err = readFull(r, n[:]) + if err != nil { + return + } + count := uint32(uint32(n[0])<<24 | uint32(n[1])<<16 | uint32(n[2])<<8 | uint32(n[3])) + if !pk.Encrypted { + count = count + 2 /* two octet checksum */ + } + privateKeyData = make([]byte, count) + _, err = readFull(r, privateKeyData) + if err != nil { + return + } + } else { + privateKeyData, err = io.ReadAll(r) + if err != nil { + return + } + } + if !pk.Encrypted { + if len(privateKeyData) < 2 { + return errors.StructuralError("truncated private key data") + } + if pk.Version != 6 { + // checksum + var sum uint16 + for i := 0; i < len(privateKeyData)-2; i++ { + sum += uint16(privateKeyData[i]) + } + if privateKeyData[len(privateKeyData)-2] != uint8(sum>>8) || + privateKeyData[len(privateKeyData)-1] != uint8(sum) { + return errors.StructuralError("private key checksum failure") + } + privateKeyData = privateKeyData[:len(privateKeyData)-2] + return pk.parsePrivateKey(privateKeyData) + } else { + // No checksum + return pk.parsePrivateKey(privateKeyData) + } + } + + pk.encryptedData = privateKeyData + return +} + +// Dummy returns true if the private key is a dummy key. This is a GNU extension. +func (pk *PrivateKey) Dummy() bool { + return pk.s2kParams.Dummy() +} + +func mod64kHash(d []byte) uint16 { + var h uint16 + for _, b := range d { + h += uint16(b) + } + return h +} + +func (pk *PrivateKey) Serialize(w io.Writer) (err error) { + contents := bytes.NewBuffer(nil) + err = pk.PublicKey.serializeWithoutHeaders(contents) + if err != nil { + return + } + if _, err = contents.Write([]byte{uint8(pk.s2kType)}); err != nil { + return + } + + optional := bytes.NewBuffer(nil) + if pk.Encrypted || pk.Dummy() { + // [Optional] If string-to-key usage octet was 255, 254, or 253, + // a one-octet symmetric encryption algorithm. + if _, err = optional.Write([]byte{uint8(pk.cipher)}); err != nil { + return + } + // [Optional] If string-to-key usage octet was 253, + // a one-octet AEAD algorithm. + if pk.s2kType == S2KAEAD { + if _, err = optional.Write([]byte{uint8(pk.aead)}); err != nil { + return + } + } + + s2kBuffer := bytes.NewBuffer(nil) + if err := pk.s2kParams.Serialize(s2kBuffer); err != nil { + return err + } + // [Optional] Only for a version 6 packet, and if string-to-key + // usage octet was 255, 254, or 253, an one-octet + // count of the following field. + if pk.Version == 6 { + if _, err = optional.Write([]byte{uint8(s2kBuffer.Len())}); err != nil { + return + } + } + // [Optional] If string-to-key usage octet was 255, 254, or 253, + // a string-to-key (S2K) specifier. The length of the string-to-key specifier + // depends on its type + if _, err = io.Copy(optional, s2kBuffer); err != nil { + return + } + + // IV + if pk.Encrypted { + if _, err = optional.Write(pk.iv); err != nil { + return + } + if pk.Version == 5 && pk.s2kType == S2KAEAD { + // Add padding for version 5 + padding := make([]byte, pk.cipher.blockSize()-len(pk.iv)) + if _, err = optional.Write(padding); err != nil { + return + } + } + } + } + if pk.Version == 5 || (pk.Version == 6 && pk.s2kType != S2KNON) { + contents.Write([]byte{uint8(optional.Len())}) + } + + if _, err := io.Copy(contents, optional); err != nil { + return err + } + + if !pk.Dummy() { + l := 0 + var priv []byte + if !pk.Encrypted { + buf := bytes.NewBuffer(nil) + err = pk.serializePrivateKey(buf) + if err != nil { + return err + } + l = buf.Len() + if pk.Version != 6 { + checksum := mod64kHash(buf.Bytes()) + buf.Write([]byte{byte(checksum >> 8), byte(checksum)}) + } + priv = buf.Bytes() + } else { + priv, l = pk.encryptedData, len(pk.encryptedData) + } + + if pk.Version == 5 { + contents.Write([]byte{byte(l >> 24), byte(l >> 16), byte(l >> 8), byte(l)}) + } + contents.Write(priv) + } + + ptype := packetTypePrivateKey + if pk.IsSubkey { + ptype = packetTypePrivateSubkey + } + err = serializeHeader(w, ptype, contents.Len()) + if err != nil { + return + } + _, err = io.Copy(w, contents) + if err != nil { + return + } + return +} + +func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error { + if _, err := w.Write(new(encoding.MPI).SetBig(priv.D).EncodedBytes()); err != nil { + return err + } + if _, err := w.Write(new(encoding.MPI).SetBig(priv.Primes[1]).EncodedBytes()); err != nil { + return err + } + if _, err := w.Write(new(encoding.MPI).SetBig(priv.Primes[0]).EncodedBytes()); err != nil { + return err + } + _, err := w.Write(new(encoding.MPI).SetBig(priv.Precomputed.Qinv).EncodedBytes()) + return err +} + +func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error { + _, err := w.Write(new(encoding.MPI).SetBig(priv.X).EncodedBytes()) + return err +} + +func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error { + _, err := w.Write(new(encoding.MPI).SetBig(priv.X).EncodedBytes()) + return err +} + +func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error { + _, err := w.Write(encoding.NewMPI(priv.MarshalIntegerSecret()).EncodedBytes()) + return err +} + +func serializeEdDSAPrivateKey(w io.Writer, priv *eddsa.PrivateKey) error { + _, err := w.Write(encoding.NewMPI(priv.MarshalByteSecret()).EncodedBytes()) + return err +} + +func serializeECDHPrivateKey(w io.Writer, priv *ecdh.PrivateKey) error { + _, err := w.Write(encoding.NewMPI(priv.MarshalByteSecret()).EncodedBytes()) + return err +} + +func serializeX25519PrivateKey(w io.Writer, priv *x25519.PrivateKey) error { + _, err := w.Write(priv.Secret) + return err +} + +func serializeX448PrivateKey(w io.Writer, priv *x448.PrivateKey) error { + _, err := w.Write(priv.Secret) + return err +} + +func serializeEd25519PrivateKey(w io.Writer, priv *ed25519.PrivateKey) error { + _, err := w.Write(priv.MarshalByteSecret()) + return err +} + +func serializeEd448PrivateKey(w io.Writer, priv *ed448.PrivateKey) error { + _, err := w.Write(priv.MarshalByteSecret()) + return err +} + +func serializeAEADPrivateKey(w io.Writer, priv *symmetric.AEADPrivateKey) (err error) { + _, err = w.Write(priv.HashSeed[:]) + if err != nil { + return + } + _, err = w.Write(priv.Key) + return +} + +func serializeHMACPrivateKey(w io.Writer, priv *symmetric.HMACPrivateKey) (err error) { + _, err = w.Write(priv.HashSeed[:]) + if err != nil { + return + } + _, err = w.Write(priv.Key) + return +} + +// decrypt decrypts an encrypted private key using a decryption key. +func (pk *PrivateKey) decrypt(decryptionKey []byte) error { + if pk.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + if !pk.Encrypted { + return nil + } + block := pk.cipher.new(decryptionKey) + var data []byte + switch pk.s2kType { + case S2KAEAD: + aead := pk.aead.new(block) + additionalData, err := pk.additionalData() + if err != nil { + return err + } + // Decrypt the encrypted key material with aead + data, err = aead.Open(nil, pk.iv, pk.encryptedData, additionalData) + if err != nil { + return err + } + case S2KSHA1, S2KCHECKSUM: + cfb := cipher.NewCFBDecrypter(block, pk.iv) + data = make([]byte, len(pk.encryptedData)) + cfb.XORKeyStream(data, pk.encryptedData) + if pk.s2kType == S2KSHA1 { + if len(data) < sha1.Size { + return errors.StructuralError("truncated private key data") + } + h := sha1.New() + h.Write(data[:len(data)-sha1.Size]) + sum := h.Sum(nil) + if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-sha1.Size] + } else { + if len(data) < 2 { + return errors.StructuralError("truncated private key data") + } + var sum uint16 + for i := 0; i < len(data)-2; i++ { + sum += uint16(data[i]) + } + if data[len(data)-2] != uint8(sum>>8) || + data[len(data)-1] != uint8(sum) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-2] + } + default: + return errors.InvalidArgumentError("invalid s2k type") + } + + err := pk.parsePrivateKey(data) + if _, ok := err.(errors.KeyInvalidError); ok { + return errors.KeyInvalidError("invalid key parameters") + } + if err != nil { + return err + } + + // Mark key as unencrypted + pk.s2kType = S2KNON + pk.s2k = nil + pk.Encrypted = false + pk.encryptedData = nil + return nil +} + +func (pk *PrivateKey) decryptWithCache(passphrase []byte, keyCache *s2k.Cache) error { + if pk.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + if !pk.Encrypted { + return nil + } + + key, err := keyCache.GetOrComputeDerivedKey(passphrase, pk.s2kParams, pk.cipher.KeySize()) + if err != nil { + return err + } + if pk.s2kType == S2KAEAD { + key = pk.applyHKDF(key) + } + return pk.decrypt(key) +} + +// Decrypt decrypts an encrypted private key using a passphrase. +func (pk *PrivateKey) Decrypt(passphrase []byte) error { + if pk.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + if !pk.Encrypted { + return nil + } + + key := make([]byte, pk.cipher.KeySize()) + pk.s2k(key, passphrase) + if pk.s2kType == S2KAEAD { + key = pk.applyHKDF(key) + } + return pk.decrypt(key) +} + +// DecryptPrivateKeys decrypts all encrypted keys with the given config and passphrase. +// Avoids recomputation of similar s2k key derivations. +func DecryptPrivateKeys(keys []*PrivateKey, passphrase []byte) error { + // Create a cache to avoid recomputation of key derviations for the same passphrase. + s2kCache := &s2k.Cache{} + for _, key := range keys { + if key != nil && !key.Dummy() && key.Encrypted { + err := key.decryptWithCache(passphrase, s2kCache) + if err != nil { + return err + } + } + } + return nil +} + +// encrypt encrypts an unencrypted private key. +func (pk *PrivateKey) encrypt(key []byte, params *s2k.Params, s2kType S2KType, cipherFunction CipherFunction, rand io.Reader) error { + if pk.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + if pk.Encrypted { + return nil + } + // check if encryptionKey has the correct size + if len(key) != cipherFunction.KeySize() { + return errors.InvalidArgumentError("supplied encryption key has the wrong size") + } + + if params.Mode() == s2k.Argon2S2K && s2kType != S2KAEAD { + return errors.InvalidArgumentError("using Argon2 S2K without AEAD is not allowed") + } + if params.Mode() != s2k.Argon2S2K && params.Mode() != s2k.IteratedSaltedS2K && + params.Mode() != s2k.SaltedS2K { // only allowed for high-entropy passphrases + return errors.InvalidArgumentError("insecure S2K mode") + } + + priv := bytes.NewBuffer(nil) + err := pk.serializePrivateKey(priv) + if err != nil { + return err + } + + pk.cipher = cipherFunction + pk.s2kParams = params + pk.s2k, err = pk.s2kParams.Function() + if err != nil { + return err + } + + privateKeyBytes := priv.Bytes() + pk.s2kType = s2kType + block := pk.cipher.new(key) + switch s2kType { + case S2KAEAD: + if pk.aead == 0 { + return errors.StructuralError("aead mode is not set on key") + } + aead := pk.aead.new(block) + additionalData, err := pk.additionalData() + if err != nil { + return err + } + pk.iv = make([]byte, aead.NonceSize()) + _, err = io.ReadFull(rand, pk.iv) + if err != nil { + return err + } + // Decrypt the encrypted key material with aead + pk.encryptedData = aead.Seal(nil, pk.iv, privateKeyBytes, additionalData) + case S2KSHA1, S2KCHECKSUM: + pk.iv = make([]byte, pk.cipher.blockSize()) + _, err = io.ReadFull(rand, pk.iv) + if err != nil { + return err + } + cfb := cipher.NewCFBEncrypter(block, pk.iv) + if s2kType == S2KSHA1 { + h := sha1.New() + h.Write(privateKeyBytes) + sum := h.Sum(nil) + privateKeyBytes = append(privateKeyBytes, sum...) + } else { + var sum uint16 + for _, b := range privateKeyBytes { + sum += uint16(b) + } + privateKeyBytes = append(privateKeyBytes, []byte{uint8(sum >> 8), uint8(sum)}...) + } + pk.encryptedData = make([]byte, len(privateKeyBytes)) + cfb.XORKeyStream(pk.encryptedData, privateKeyBytes) + default: + return errors.InvalidArgumentError("invalid s2k type for encryption") + } + + pk.Encrypted = true + pk.PrivateKey = nil + return err +} + +// EncryptWithConfig encrypts an unencrypted private key using the passphrase and the config. +func (pk *PrivateKey) EncryptWithConfig(passphrase []byte, config *Config) error { + params, err := s2k.Generate(config.Random(), config.S2K()) + if err != nil { + return err + } + // Derive an encryption key with the configured s2k function. + key := make([]byte, config.Cipher().KeySize()) + s2k, err := params.Function() + if err != nil { + return err + } + s2k(key, passphrase) + s2kType := S2KSHA1 + if config.AEAD() != nil { + s2kType = S2KAEAD + pk.aead = config.AEAD().Mode() + pk.cipher = config.Cipher() + key = pk.applyHKDF(key) + } + // Encrypt the private key with the derived encryption key. + return pk.encrypt(key, params, s2kType, config.Cipher(), config.Random()) +} + +// EncryptPrivateKeys encrypts all unencrypted keys with the given config and passphrase. +// Only derives one key from the passphrase, which is then used to encrypt each key. +func EncryptPrivateKeys(keys []*PrivateKey, passphrase []byte, config *Config) error { + params, err := s2k.Generate(config.Random(), config.S2K()) + if err != nil { + return err + } + // Derive an encryption key with the configured s2k function. + encryptionKey := make([]byte, config.Cipher().KeySize()) + s2k, err := params.Function() + if err != nil { + return err + } + s2k(encryptionKey, passphrase) + for _, key := range keys { + if key != nil && !key.Dummy() && !key.Encrypted { + s2kType := S2KSHA1 + if config.AEAD() != nil { + s2kType = S2KAEAD + key.aead = config.AEAD().Mode() + key.cipher = config.Cipher() + derivedKey := key.applyHKDF(encryptionKey) + err = key.encrypt(derivedKey, params, s2kType, config.Cipher(), config.Random()) + } else { + err = key.encrypt(encryptionKey, params, s2kType, config.Cipher(), config.Random()) + } + if err != nil { + return err + } + } + } + return nil +} + +// Encrypt encrypts an unencrypted private key using a passphrase. +func (pk *PrivateKey) Encrypt(passphrase []byte) error { + // Default config of private key encryption + config := &Config{ + S2KConfig: &s2k.Config{ + S2KMode: s2k.IteratedSaltedS2K, + S2KCount: 65536, + Hash: crypto.SHA256, + }, + DefaultCipher: CipherAES256, + } + return pk.EncryptWithConfig(passphrase, config) +} + +func (pk *PrivateKey) serializePrivateKey(w io.Writer) (err error) { + switch priv := pk.PrivateKey.(type) { + case *rsa.PrivateKey: + err = serializeRSAPrivateKey(w, priv) + case *dsa.PrivateKey: + err = serializeDSAPrivateKey(w, priv) + case *elgamal.PrivateKey: + err = serializeElGamalPrivateKey(w, priv) + case *ecdsa.PrivateKey: + err = serializeECDSAPrivateKey(w, priv) + case *eddsa.PrivateKey: + err = serializeEdDSAPrivateKey(w, priv) + case *ecdh.PrivateKey: + err = serializeECDHPrivateKey(w, priv) + case *x25519.PrivateKey: + err = serializeX25519PrivateKey(w, priv) + case *x448.PrivateKey: + err = serializeX448PrivateKey(w, priv) + case *ed25519.PrivateKey: + err = serializeEd25519PrivateKey(w, priv) + case *ed448.PrivateKey: + err = serializeEd448PrivateKey(w, priv) + case *symmetric.AEADPrivateKey: + err = serializeAEADPrivateKey(w, priv) + case *symmetric.HMACPrivateKey: + err = serializeHMACPrivateKey(w, priv) + default: + err = errors.InvalidArgumentError("unknown private key type") + } + return +} + +func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) { + switch pk.PublicKey.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly: + return pk.parseRSAPrivateKey(data) + case PubKeyAlgoDSA: + return pk.parseDSAPrivateKey(data) + case PubKeyAlgoElGamal: + return pk.parseElGamalPrivateKey(data) + case PubKeyAlgoECDSA: + return pk.parseECDSAPrivateKey(data) + case PubKeyAlgoECDH: + return pk.parseECDHPrivateKey(data) + case PubKeyAlgoEdDSA: + return pk.parseEdDSAPrivateKey(data) + case PubKeyAlgoX25519: + return pk.parseX25519PrivateKey(data) + case PubKeyAlgoX448: + return pk.parseX448PrivateKey(data) + case PubKeyAlgoEd25519: + return pk.parseEd25519PrivateKey(data) + case PubKeyAlgoEd448: + return pk.parseEd448PrivateKey(data) + default: + err = errors.StructuralError("unknown private key type") + return + case ExperimentalPubKeyAlgoAEAD: + return pk.parseAEADPrivateKey(data) + case ExperimentalPubKeyAlgoHMAC: + return pk.parseHMACPrivateKey(data) + } +} + +func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) { + rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey) + rsaPriv := new(rsa.PrivateKey) + rsaPriv.PublicKey = *rsaPub + + buf := bytes.NewBuffer(data) + d := new(encoding.MPI) + if _, err := d.ReadFrom(buf); err != nil { + return err + } + + p := new(encoding.MPI) + if _, err := p.ReadFrom(buf); err != nil { + return err + } + + q := new(encoding.MPI) + if _, err := q.ReadFrom(buf); err != nil { + return err + } + + rsaPriv.D = new(big.Int).SetBytes(d.Bytes()) + rsaPriv.Primes = make([]*big.Int, 2) + rsaPriv.Primes[0] = new(big.Int).SetBytes(p.Bytes()) + rsaPriv.Primes[1] = new(big.Int).SetBytes(q.Bytes()) + if err := rsaPriv.Validate(); err != nil { + return errors.KeyInvalidError(err.Error()) + } + rsaPriv.Precompute() + pk.PrivateKey = rsaPriv + + return nil +} + +func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) { + dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey) + dsaPriv := new(dsa.PrivateKey) + dsaPriv.PublicKey = *dsaPub + + buf := bytes.NewBuffer(data) + x := new(encoding.MPI) + if _, err := x.ReadFrom(buf); err != nil { + return err + } + + dsaPriv.X = new(big.Int).SetBytes(x.Bytes()) + if err := validateDSAParameters(dsaPriv); err != nil { + return err + } + pk.PrivateKey = dsaPriv + + return nil +} + +func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) { + pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey) + priv := new(elgamal.PrivateKey) + priv.PublicKey = *pub + + buf := bytes.NewBuffer(data) + x := new(encoding.MPI) + if _, err := x.ReadFrom(buf); err != nil { + return err + } + + priv.X = new(big.Int).SetBytes(x.Bytes()) + if err := validateElGamalParameters(priv); err != nil { + return err + } + pk.PrivateKey = priv + + return nil +} + +func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) { + ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey) + ecdsaPriv := ecdsa.NewPrivateKey(*ecdsaPub) + + buf := bytes.NewBuffer(data) + d := new(encoding.MPI) + if _, err := d.ReadFrom(buf); err != nil { + return err + } + + if err := ecdsaPriv.UnmarshalIntegerSecret(d.Bytes()); err != nil { + return err + } + if err := ecdsa.Validate(ecdsaPriv); err != nil { + return err + } + pk.PrivateKey = ecdsaPriv + + return nil +} + +func (pk *PrivateKey) parseECDHPrivateKey(data []byte) (err error) { + ecdhPub := pk.PublicKey.PublicKey.(*ecdh.PublicKey) + ecdhPriv := ecdh.NewPrivateKey(*ecdhPub) + + buf := bytes.NewBuffer(data) + d := new(encoding.MPI) + if _, err := d.ReadFrom(buf); err != nil { + return err + } + + if err := ecdhPriv.UnmarshalByteSecret(d.Bytes()); err != nil { + return err + } + + if err := ecdh.Validate(ecdhPriv); err != nil { + return err + } + + pk.PrivateKey = ecdhPriv + + return nil +} + +func (pk *PrivateKey) parseX25519PrivateKey(data []byte) (err error) { + publicKey := pk.PublicKey.PublicKey.(*x25519.PublicKey) + privateKey := x25519.NewPrivateKey(*publicKey) + privateKey.PublicKey = *publicKey + + privateKey.Secret = make([]byte, x25519.KeySize) + + if len(data) != x25519.KeySize { + err = errors.StructuralError("wrong x25519 key size") + return err + } + subtle.ConstantTimeCopy(1, privateKey.Secret, data) + if err = x25519.Validate(privateKey); err != nil { + return err + } + pk.PrivateKey = privateKey + return nil +} + +func (pk *PrivateKey) parseX448PrivateKey(data []byte) (err error) { + publicKey := pk.PublicKey.PublicKey.(*x448.PublicKey) + privateKey := x448.NewPrivateKey(*publicKey) + privateKey.PublicKey = *publicKey + + privateKey.Secret = make([]byte, x448.KeySize) + + if len(data) != x448.KeySize { + err = errors.StructuralError("wrong x448 key size") + return err + } + subtle.ConstantTimeCopy(1, privateKey.Secret, data) + if err = x448.Validate(privateKey); err != nil { + return err + } + pk.PrivateKey = privateKey + return nil +} + +func (pk *PrivateKey) parseEd25519PrivateKey(data []byte) (err error) { + publicKey := pk.PublicKey.PublicKey.(*ed25519.PublicKey) + privateKey := ed25519.NewPrivateKey(*publicKey) + privateKey.PublicKey = *publicKey + + if len(data) != ed25519.SeedSize { + err = errors.StructuralError("wrong ed25519 key size") + return err + } + err = privateKey.UnmarshalByteSecret(data) + if err != nil { + return err + } + err = ed25519.Validate(privateKey) + if err != nil { + return err + } + pk.PrivateKey = privateKey + return nil +} + +func (pk *PrivateKey) parseEd448PrivateKey(data []byte) (err error) { + publicKey := pk.PublicKey.PublicKey.(*ed448.PublicKey) + privateKey := ed448.NewPrivateKey(*publicKey) + privateKey.PublicKey = *publicKey + + if len(data) != ed448.SeedSize { + err = errors.StructuralError("wrong ed448 key size") + return err + } + err = privateKey.UnmarshalByteSecret(data) + if err != nil { + return err + } + err = ed448.Validate(privateKey) + if err != nil { + return err + } + pk.PrivateKey = privateKey + return nil +} + +func (pk *PrivateKey) parseEdDSAPrivateKey(data []byte) (err error) { + eddsaPub := pk.PublicKey.PublicKey.(*eddsa.PublicKey) + eddsaPriv := eddsa.NewPrivateKey(*eddsaPub) + eddsaPriv.PublicKey = *eddsaPub + + buf := bytes.NewBuffer(data) + d := new(encoding.MPI) + if _, err := d.ReadFrom(buf); err != nil { + return err + } + + if err = eddsaPriv.UnmarshalByteSecret(d.Bytes()); err != nil { + return err + } + + if err := eddsa.Validate(eddsaPriv); err != nil { + return err + } + + pk.PrivateKey = eddsaPriv + + return nil +} + +func (pk *PrivateKey) additionalData() ([]byte, error) { + additionalData := bytes.NewBuffer(nil) + // Write additional data prefix based on packet type + var packetByte byte + if pk.PublicKey.IsSubkey { + packetByte = 0xc7 + } else { + packetByte = 0xc5 + } + // Write public key to additional data + _, err := additionalData.Write([]byte{packetByte}) + if err != nil { + return nil, err + } + err = pk.PublicKey.serializeWithoutHeaders(additionalData) + if err != nil { + return nil, err + } + return additionalData.Bytes(), nil +} + +func (pk *PrivateKey) applyHKDF(inputKey []byte) []byte { + var packetByte byte + if pk.PublicKey.IsSubkey { + packetByte = 0xc7 + } else { + packetByte = 0xc5 + } + associatedData := []byte{packetByte, byte(pk.Version), byte(pk.cipher), byte(pk.aead)} + hkdfReader := hkdf.New(sha256.New, inputKey, []byte{}, associatedData) + encryptionKey := make([]byte, pk.cipher.KeySize()) + _, _ = readFull(hkdfReader, encryptionKey) + return encryptionKey +} + +func (pk *PrivateKey) parseAEADPrivateKey(data []byte) (err error) { + pubKey := pk.PublicKey.PublicKey.(*symmetric.AEADPublicKey) + + aeadPriv := new(symmetric.AEADPrivateKey) + aeadPriv.PublicKey = *pubKey + + copy(aeadPriv.HashSeed[:], data[:32]) + + priv := make([]byte, pubKey.Cipher.KeySize()) + copy(priv, data[32:]) + aeadPriv.Key = priv + aeadPriv.PublicKey.Key = aeadPriv.Key + + if err = validateAEADParameters(aeadPriv); err != nil { + return + } + + pk.PrivateKey = aeadPriv + pk.PublicKey.PublicKey = &aeadPriv.PublicKey + return +} + +func (pk *PrivateKey) parseHMACPrivateKey(data []byte) (err error) { + pubKey := pk.PublicKey.PublicKey.(*symmetric.HMACPublicKey) + + hmacPriv := new(symmetric.HMACPrivateKey) + hmacPriv.PublicKey = *pubKey + + copy(hmacPriv.HashSeed[:], data[:32]) + + priv := make([]byte, pubKey.Hash.Size()) + copy(priv, data[32:]) + hmacPriv.Key = data[32:] + hmacPriv.PublicKey.Key = hmacPriv.Key + + if err = validateHMACParameters(hmacPriv); err != nil { + return + } + + pk.PrivateKey = hmacPriv + pk.PublicKey.PublicKey = &hmacPriv.PublicKey + return +} + +func validateAEADParameters(priv *symmetric.AEADPrivateKey) error { + return validateCommonSymmetric(priv.HashSeed, priv.PublicKey.BindingHash) +} + +func validateHMACParameters(priv *symmetric.HMACPrivateKey) error { + return validateCommonSymmetric(priv.HashSeed, priv.PublicKey.BindingHash) +} + +func validateCommonSymmetric(seed [32]byte, bindingHash [32]byte) error { + expectedBindingHash := symmetric.ComputeBindingHash(seed) + if !bytes.Equal(expectedBindingHash, bindingHash[:]) { + return errors.KeyInvalidError("symmetric: wrong binding hash") + } + return nil +} + +func validateDSAParameters(priv *dsa.PrivateKey) error { + p := priv.P // group prime + q := priv.Q // subgroup order + g := priv.G // g has order q mod p + x := priv.X // secret + y := priv.Y // y == g**x mod p + one := big.NewInt(1) + // expect g, y >= 2 and g < p + if g.Cmp(one) <= 0 || y.Cmp(one) <= 0 || g.Cmp(p) > 0 { + return errors.KeyInvalidError("dsa: invalid group") + } + // expect p > q + if p.Cmp(q) <= 0 { + return errors.KeyInvalidError("dsa: invalid group prime") + } + // q should be large enough and divide p-1 + pSub1 := new(big.Int).Sub(p, one) + if q.BitLen() < 150 || new(big.Int).Mod(pSub1, q).Cmp(big.NewInt(0)) != 0 { + return errors.KeyInvalidError("dsa: invalid order") + } + // confirm that g has order q mod p + if !q.ProbablyPrime(32) || new(big.Int).Exp(g, q, p).Cmp(one) != 0 { + return errors.KeyInvalidError("dsa: invalid order") + } + // check y + if new(big.Int).Exp(g, x, p).Cmp(y) != 0 { + return errors.KeyInvalidError("dsa: mismatching values") + } + + return nil +} + +func validateElGamalParameters(priv *elgamal.PrivateKey) error { + p := priv.P // group prime + g := priv.G // g has order p-1 mod p + x := priv.X // secret + y := priv.Y // y == g**x mod p + one := big.NewInt(1) + // Expect g, y >= 2 and g < p + if g.Cmp(one) <= 0 || y.Cmp(one) <= 0 || g.Cmp(p) > 0 { + return errors.KeyInvalidError("elgamal: invalid group") + } + if p.BitLen() < 1024 { + return errors.KeyInvalidError("elgamal: group order too small") + } + pSub1 := new(big.Int).Sub(p, one) + if new(big.Int).Exp(g, pSub1, p).Cmp(one) != 0 { + return errors.KeyInvalidError("elgamal: invalid group") + } + // Since p-1 is not prime, g might have a smaller order that divides p-1. + // We cannot confirm the exact order of g, but we make sure it is not too small. + gExpI := new(big.Int).Set(g) + i := 1 + threshold := 2 << 17 // we want order > threshold + for i < threshold { + i++ // we check every order to make sure key validation is not easily bypassed by guessing y' + gExpI.Mod(new(big.Int).Mul(gExpI, g), p) + if gExpI.Cmp(one) == 0 { + return errors.KeyInvalidError("elgamal: order too small") + } + } + // Check y + if new(big.Int).Exp(g, x, p).Cmp(y) != 0 { + return errors.KeyInvalidError("elgamal: mismatching values") + } + + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go new file mode 100644 index 000000000..029b8f1aa --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go @@ -0,0 +1,12 @@ +package packet + +// Generated with `gpg --export-secret-keys "Test Key 2"` +const privKeyRSAHex = "9501fe044cc349a8010400b70ca0010e98c090008d45d1ee8f9113bd5861fd57b88bacb7c68658747663f1e1a3b5a98f32fda6472373c024b97359cd2efc88ff60f77751adfbf6af5e615e6a1408cfad8bf0cea30b0d5f53aa27ad59089ba9b15b7ebc2777a25d7b436144027e3bcd203909f147d0e332b240cf63d3395f5dfe0df0a6c04e8655af7eacdf0011010001fe0303024a252e7d475fd445607de39a265472aa74a9320ba2dac395faa687e9e0336aeb7e9a7397e511b5afd9dc84557c80ac0f3d4d7bfec5ae16f20d41c8c84a04552a33870b930420e230e179564f6d19bb153145e76c33ae993886c388832b0fa042ddda7f133924f3854481533e0ede31d51278c0519b29abc3bf53da673e13e3e1214b52413d179d7f66deee35cac8eacb060f78379d70ef4af8607e68131ff529439668fc39c9ce6dfef8a5ac234d234802cbfb749a26107db26406213ae5c06d4673253a3cbee1fcbae58d6ab77e38d6e2c0e7c6317c48e054edadb5a40d0d48acb44643d998139a8a66bb820be1f3f80185bc777d14b5954b60effe2448a036d565c6bc0b915fcea518acdd20ab07bc1529f561c58cd044f723109b93f6fd99f876ff891d64306b5d08f48bab59f38695e9109c4dec34013ba3153488ce070268381ba923ee1eb77125b36afcb4347ec3478c8f2735b06ef17351d872e577fa95d0c397c88c71b59629a36aec" + +// Generated by `gpg --export-secret-keys` followed by a manual extraction of +// the ElGamal subkey from the packets. +const privKeyElGamalHex = "9d0157044df9ee1a100400eb8e136a58ec39b582629cdadf830bc64e0a94ed8103ca8bb247b27b11b46d1d25297ef4bcc3071785ba0c0bedfe89eabc5287fcc0edf81ab5896c1c8e4b20d27d79813c7aede75320b33eaeeaa586edc00fd1036c10133e6ba0ff277245d0d59d04b2b3421b7244aca5f4a8d870c6f1c1fbff9e1c26699a860b9504f35ca1d700030503fd1ededd3b840795be6d9ccbe3c51ee42e2f39233c432b831ddd9c4e72b7025a819317e47bf94f9ee316d7273b05d5fcf2999c3a681f519b1234bbfa6d359b4752bd9c3f77d6b6456cde152464763414ca130f4e91d91041432f90620fec0e6d6b5116076c2985d5aeaae13be492b9b329efcaf7ee25120159a0a30cd976b42d7afe030302dae7eb80db744d4960c4df930d57e87fe81412eaace9f900e6c839817a614ddb75ba6603b9417c33ea7b6c93967dfa2bcff3fa3c74a5ce2c962db65b03aece14c96cbd0038fc" + +// pkcs1PrivKeyHex is a PKCS#1, RSA private key. +// Generated by `openssl genrsa 1024 | openssl rsa -outform DER | xxd -p` +const pkcs1PrivKeyHex = "3082025d02010002818100e98edfa1c3b35884a54d0b36a6a603b0290fa85e49e30fa23fc94fef9c6790bc4849928607aa48d809da326fb42a969d06ad756b98b9c1a90f5d4a2b6d0ac05953c97f4da3120164a21a679793ce181c906dc01d235cc085ddcdf6ea06c389b6ab8885dfd685959e693138856a68a7e5db263337ff82a088d583a897cf2d59e9020301000102818100b6d5c9eb70b02d5369b3ee5b520a14490b5bde8a317d36f7e4c74b7460141311d1e5067735f8f01d6f5908b2b96fbd881f7a1ab9a84d82753e39e19e2d36856be960d05ac9ef8e8782ea1b6d65aee28fdfe1d61451e8cff0adfe84322f12cf455028b581cf60eb9e0e140ba5d21aeba6c2634d7c65318b9a665fc01c3191ca21024100fa5e818da3705b0fa33278bb28d4b6f6050388af2d4b75ec9375dd91ccf2e7d7068086a8b82a8f6282e4fbbdb8a7f2622eb97295249d87acea7f5f816f54d347024100eecf9406d7dc49cdfb95ab1eff4064de84c7a30f64b2798936a0d2018ba9eb52e4b636f82e96c49cc63b80b675e91e40d1b2e4017d4b9adaf33ab3d9cf1c214f024100c173704ace742c082323066226a4655226819a85304c542b9dacbeacbf5d1881ee863485fcf6f59f3a604f9b42289282067447f2b13dfeed3eab7851fc81e0550240741fc41f3fc002b382eed8730e33c5d8de40256e4accee846667f536832f711ab1d4590e7db91a8a116ac5bff3be13d3f9243ff2e976662aa9b395d907f8e9c9024046a5696c9ef882363e06c9fa4e2f5b580906452befba03f4a99d0f873697ef1f851d2226ca7934b30b7c3e80cb634a67172bbbf4781735fe3e09263e2dd723e7" diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go new file mode 100644 index 000000000..d9e2c85ee --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go @@ -0,0 +1,1277 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto/dsa" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + _ "crypto/sha512" + "encoding/binary" + goerrors "errors" + "fmt" + "hash" + "io" + "math/big" + "strconv" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/ecdh" + "github.com/ProtonMail/go-crypto/openpgp/ecdsa" + "github.com/ProtonMail/go-crypto/openpgp/ed25519" + "github.com/ProtonMail/go-crypto/openpgp/ed448" + "github.com/ProtonMail/go-crypto/openpgp/eddsa" + "github.com/ProtonMail/go-crypto/openpgp/elgamal" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" + "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" + "github.com/ProtonMail/go-crypto/openpgp/symmetric" + "github.com/ProtonMail/go-crypto/openpgp/x25519" + "github.com/ProtonMail/go-crypto/openpgp/x448" +) + +// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2. +type PublicKey struct { + Version int + CreationTime time.Time + PubKeyAlgo PublicKeyAlgorithm + PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey or *eddsa.PublicKey, *x25519.PublicKey, *x448.PublicKey, *ed25519.PublicKey, *ed448.PublicKey + Fingerprint []byte + KeyId uint64 + IsSubkey bool + + // RFC 4880 fields + n, e, p, q, g, y encoding.Field + + // RFC 6637 fields + // oid contains the OID byte sequence identifying the elliptic curve used + oid encoding.Field + + // kdf stores key derivation function parameters + // used for ECDH encryption. See RFC 6637, Section 9. + kdf encoding.Field +} + +// UpgradeToV5 updates the version of the key to v5, and updates all necessary +// fields. +func (pk *PublicKey) UpgradeToV5() { + pk.Version = 5 + pk.setFingerprintAndKeyId() +} + +// UpgradeToV6 updates the version of the key to v6, and updates all necessary +// fields. +func (pk *PublicKey) UpgradeToV6() error { + pk.Version = 6 + pk.setFingerprintAndKeyId() + return pk.checkV6Compatibility() +} + +// ReplaceKDF replaces the KDF instance, and updates all necessary fields. +func (pk *PublicKey) ReplaceKDF(kdf ecdh.KDF) error { + ecdhKey, ok := pk.PublicKey.(*ecdh.PublicKey) + if !ok { + return goerrors.New("wrong forwarding sub key generation") + } + + ecdhKey.KDF = kdf + byteBuffer := new(bytes.Buffer) + err := kdf.Serialize(byteBuffer) + if err != nil { + return err + } + + pk.kdf = encoding.NewOID(byteBuffer.Bytes()[1:]) + pk.setFingerprintAndKeyId() + + return nil +} + +// signingKey provides a convenient abstraction over signature verification +// for v3 and v4 public keys. +type signingKey interface { + SerializeForHash(io.Writer) error + SerializeSignaturePrefix(io.Writer) error + serializeWithoutHeaders(io.Writer) error +} + +// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey. +func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoRSA, + PublicKey: pub, + n: new(encoding.MPI).SetBig(pub.N), + e: new(encoding.MPI).SetBig(big.NewInt(int64(pub.E))), + } + + pk.setFingerprintAndKeyId() + return pk +} + +// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey. +func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoDSA, + PublicKey: pub, + p: new(encoding.MPI).SetBig(pub.P), + q: new(encoding.MPI).SetBig(pub.Q), + g: new(encoding.MPI).SetBig(pub.G), + y: new(encoding.MPI).SetBig(pub.Y), + } + + pk.setFingerprintAndKeyId() + return pk +} + +// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey. +func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoElGamal, + PublicKey: pub, + p: new(encoding.MPI).SetBig(pub.P), + g: new(encoding.MPI).SetBig(pub.G), + y: new(encoding.MPI).SetBig(pub.Y), + } + + pk.setFingerprintAndKeyId() + return pk +} + +func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoECDSA, + PublicKey: pub, + p: encoding.NewMPI(pub.MarshalPoint()), + } + + curveInfo := ecc.FindByCurve(pub.GetCurve()) + if curveInfo == nil { + panic("unknown elliptic curve") + } + pk.oid = curveInfo.Oid + pk.setFingerprintAndKeyId() + return pk +} + +func NewECDHPublicKey(creationTime time.Time, pub *ecdh.PublicKey) *PublicKey { + var pk *PublicKey + var kdf = encoding.NewOID([]byte{0x1, pub.Hash.Id(), pub.Cipher.Id()}) + pk = &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoECDH, + PublicKey: pub, + p: encoding.NewMPI(pub.MarshalPoint()), + kdf: kdf, + } + + curveInfo := ecc.FindByCurve(pub.GetCurve()) + + if curveInfo == nil { + panic("unknown elliptic curve") + } + + pk.oid = curveInfo.Oid + pk.setFingerprintAndKeyId() + return pk +} + +func NewEdDSAPublicKey(creationTime time.Time, pub *eddsa.PublicKey) *PublicKey { + curveInfo := ecc.FindByCurve(pub.GetCurve()) + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoEdDSA, + PublicKey: pub, + oid: curveInfo.Oid, + // Native point format, see draft-koch-eddsa-for-openpgp-04, Appendix B + p: encoding.NewMPI(pub.MarshalPoint()), + } + + pk.setFingerprintAndKeyId() + return pk +} + +func NewX25519PublicKey(creationTime time.Time, pub *x25519.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoX25519, + PublicKey: pub, + } + + pk.setFingerprintAndKeyId() + return pk +} + +func NewX448PublicKey(creationTime time.Time, pub *x448.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoX448, + PublicKey: pub, + } + + pk.setFingerprintAndKeyId() + return pk +} + +func NewEd25519PublicKey(creationTime time.Time, pub *ed25519.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoEd25519, + PublicKey: pub, + } + + pk.setFingerprintAndKeyId() + return pk +} + +func NewEd448PublicKey(creationTime time.Time, pub *ed448.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoEd448, + PublicKey: pub, + } + + pk.setFingerprintAndKeyId() + return pk +} + +func NewAEADPublicKey(creationTime time.Time, pub *symmetric.AEADPublicKey) *PublicKey { + var pk *PublicKey + pk = &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: ExperimentalPubKeyAlgoAEAD, + PublicKey: pub, + } + + return pk +} + +func NewHMACPublicKey(creationTime time.Time, pub *symmetric.HMACPublicKey) *PublicKey { + var pk *PublicKey + pk = &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: ExperimentalPubKeyAlgoHMAC, + PublicKey: pub, + } + + return pk +} + +func (pk *PublicKey) parse(r io.Reader) (err error) { + // RFC 4880, section 5.5.2 + var buf [6]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + + pk.Version = int(buf[0]) + if pk.Version != 4 && pk.Version != 5 && pk.Version != 6 { + return errors.UnsupportedError("public key version " + strconv.Itoa(int(buf[0]))) + } + + if V5Disabled && pk.Version == 5 { + return errors.UnsupportedError("support for parsing v5 entities is disabled; build with `-tags v5` if needed") + } + + if pk.Version >= 5 { + // Read the four-octet scalar octet count + // The count is not used in this implementation + var n [4]byte + _, err = readFull(r, n[:]) + if err != nil { + return + } + } + pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) + pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5]) + // Ignore four-ocet length + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + err = pk.parseRSA(r) + case PubKeyAlgoDSA: + err = pk.parseDSA(r) + case PubKeyAlgoElGamal: + err = pk.parseElGamal(r) + case PubKeyAlgoECDSA: + err = pk.parseECDSA(r) + case PubKeyAlgoECDH: + err = pk.parseECDH(r) + case PubKeyAlgoEdDSA: + err = pk.parseEdDSA(r) + case PubKeyAlgoX25519: + err = pk.parseX25519(r) + case PubKeyAlgoX448: + err = pk.parseX448(r) + case PubKeyAlgoEd25519: + err = pk.parseEd25519(r) + case PubKeyAlgoEd448: + err = pk.parseEd448(r) + case ExperimentalPubKeyAlgoAEAD: + err = pk.parseAEAD(r) + case ExperimentalPubKeyAlgoHMAC: + err = pk.parseHMAC(r) + default: + err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) + } + if err != nil { + return + } + + pk.setFingerprintAndKeyId() + return +} + +func (pk *PublicKey) setFingerprintAndKeyId() { + // RFC 4880, section 12.2 + if pk.Version >= 5 { + fingerprint := sha256.New() + if err := pk.SerializeForHash(fingerprint); err != nil { + // Should not happen for a hash. + panic(err) + } + pk.Fingerprint = make([]byte, 32) + copy(pk.Fingerprint, fingerprint.Sum(nil)) + pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[:8]) + } else { + fingerprint := sha1.New() + if err := pk.SerializeForHash(fingerprint); err != nil { + // Should not happen for a hash. + panic(err) + } + pk.Fingerprint = make([]byte, 20) + copy(pk.Fingerprint, fingerprint.Sum(nil)) + pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20]) + } +} + +func (pk *PublicKey) checkV6Compatibility() error { + // Implementations MUST NOT accept or generate version 6 key material using the deprecated OIDs. + switch pk.PubKeyAlgo { + case PubKeyAlgoECDH: + curveInfo := ecc.FindByOid(pk.oid) + if curveInfo == nil { + return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) + } + if curveInfo.GenName == ecc.Curve25519GenName { + return errors.StructuralError("cannot generate v6 key with deprecated OID: Curve25519Legacy") + } + case PubKeyAlgoEdDSA: + return errors.StructuralError("cannot generate v6 key with deprecated algorithm: EdDSALegacy") + } + return nil +} + +// parseRSA parses RSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKey) parseRSA(r io.Reader) (err error) { + pk.n = new(encoding.MPI) + if _, err = pk.n.ReadFrom(r); err != nil { + return + } + pk.e = new(encoding.MPI) + if _, err = pk.e.ReadFrom(r); err != nil { + return + } + + if len(pk.e.Bytes()) > 3 { + err = errors.UnsupportedError("large public exponent") + return + } + rsa := &rsa.PublicKey{ + N: new(big.Int).SetBytes(pk.n.Bytes()), + E: 0, + } + for i := 0; i < len(pk.e.Bytes()); i++ { + rsa.E <<= 8 + rsa.E |= int(pk.e.Bytes()[i]) + } + pk.PublicKey = rsa + return +} + +// parseDSA parses DSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKey) parseDSA(r io.Reader) (err error) { + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + pk.q = new(encoding.MPI) + if _, err = pk.q.ReadFrom(r); err != nil { + return + } + pk.g = new(encoding.MPI) + if _, err = pk.g.ReadFrom(r); err != nil { + return + } + pk.y = new(encoding.MPI) + if _, err = pk.y.ReadFrom(r); err != nil { + return + } + + dsa := new(dsa.PublicKey) + dsa.P = new(big.Int).SetBytes(pk.p.Bytes()) + dsa.Q = new(big.Int).SetBytes(pk.q.Bytes()) + dsa.G = new(big.Int).SetBytes(pk.g.Bytes()) + dsa.Y = new(big.Int).SetBytes(pk.y.Bytes()) + pk.PublicKey = dsa + return +} + +// parseElGamal parses ElGamal public key material from the given Reader. See +// RFC 4880, section 5.5.2. +func (pk *PublicKey) parseElGamal(r io.Reader) (err error) { + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + pk.g = new(encoding.MPI) + if _, err = pk.g.ReadFrom(r); err != nil { + return + } + pk.y = new(encoding.MPI) + if _, err = pk.y.ReadFrom(r); err != nil { + return + } + + elgamal := new(elgamal.PublicKey) + elgamal.P = new(big.Int).SetBytes(pk.p.Bytes()) + elgamal.G = new(big.Int).SetBytes(pk.g.Bytes()) + elgamal.Y = new(big.Int).SetBytes(pk.y.Bytes()) + pk.PublicKey = elgamal + return +} + +// parseECDSA parses ECDSA public key material from the given Reader. See +// RFC 6637, Section 9. +func (pk *PublicKey) parseECDSA(r io.Reader) (err error) { + pk.oid = new(encoding.OID) + if _, err = pk.oid.ReadFrom(r); err != nil { + return + } + + curveInfo := ecc.FindByOid(pk.oid) + if curveInfo == nil { + return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) + } + + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + + c, ok := curveInfo.Curve.(ecc.ECDSACurve) + if !ok { + return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) + } + + ecdsaKey := ecdsa.NewPublicKey(c) + err = ecdsaKey.UnmarshalPoint(pk.p.Bytes()) + pk.PublicKey = ecdsaKey + + return +} + +// parseECDH parses ECDH public key material from the given Reader. See +// RFC 6637, Section 9. +func (pk *PublicKey) parseECDH(r io.Reader) (err error) { + pk.oid = new(encoding.OID) + if _, err = pk.oid.ReadFrom(r); err != nil { + return + } + + curveInfo := ecc.FindByOid(pk.oid) + if curveInfo == nil { + return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) + } + + if pk.Version == 6 && curveInfo.GenName == ecc.Curve25519GenName { + // Implementations MUST NOT accept or generate version 6 key material using the deprecated OIDs. + return errors.StructuralError("cannot read v6 key with deprecated OID: Curve25519Legacy") + } + + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + pk.kdf = new(encoding.OID) + if _, err = pk.kdf.ReadFrom(r); err != nil { + return + } + + c, ok := curveInfo.Curve.(ecc.ECDHCurve) + if !ok { + return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) + } + + kdfLen := len(pk.kdf.Bytes()) + if kdfLen < 3 { + return errors.UnsupportedError("unsupported ECDH KDF length: " + strconv.Itoa(kdfLen)) + } + kdfVersion := int(pk.kdf.Bytes()[0]) + if kdfVersion != ecdh.KDFVersion1 && kdfVersion != ecdh.KDFVersionForwarding { + return errors.UnsupportedError("unsupported ECDH KDF version: " + strconv.Itoa(kdfVersion)) + } + kdfHash, ok := algorithm.HashById[pk.kdf.Bytes()[1]] + if !ok { + return errors.UnsupportedError("unsupported ECDH KDF hash: " + strconv.Itoa(int(pk.kdf.Bytes()[1]))) + } + kdfCipher, ok := algorithm.CipherById[pk.kdf.Bytes()[2]] + if !ok { + return errors.UnsupportedError("unsupported ECDH KDF cipher: " + strconv.Itoa(int(pk.kdf.Bytes()[2]))) + } + + kdf := ecdh.KDF{ + Version: kdfVersion, + Hash: kdfHash, + Cipher: kdfCipher, + } + + if kdfVersion == ecdh.KDFVersionForwarding { + if pk.Version != 4 || kdfLen != 23 { + return errors.UnsupportedError("unsupported ECDH KDF v2 length: " + strconv.Itoa(kdfLen)) + } + + kdf.ReplacementFingerprint = pk.kdf.Bytes()[3:23] + } + + ecdhKey := ecdh.NewPublicKey(c, kdf) + err = ecdhKey.UnmarshalPoint(pk.p.Bytes()) + pk.PublicKey = ecdhKey + return +} + +func (pk *PublicKey) parseEdDSA(r io.Reader) (err error) { + if pk.Version == 6 { + // Implementations MUST NOT accept or generate version 6 key material using the deprecated OIDs. + return errors.StructuralError("cannot generate v6 key with deprecated algorithm: EdDSALegacy") + } + + pk.oid = new(encoding.OID) + if _, err = pk.oid.ReadFrom(r); err != nil { + return + } + + curveInfo := ecc.FindByOid(pk.oid) + if curveInfo == nil { + return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) + } + + c, ok := curveInfo.Curve.(ecc.EdDSACurve) + if !ok { + return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) + } + + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + + if len(pk.p.Bytes()) == 0 { + return errors.StructuralError("empty EdDSA public key") + } + + pub := eddsa.NewPublicKey(c) + + switch flag := pk.p.Bytes()[0]; flag { + case 0x04: + // TODO: see _grcy_ecc_eddsa_ensure_compact in grcypt + return errors.UnsupportedError("unsupported EdDSA compression: " + strconv.Itoa(int(flag))) + case 0x40: + err = pub.UnmarshalPoint(pk.p.Bytes()) + default: + return errors.UnsupportedError("unsupported EdDSA compression: " + strconv.Itoa(int(flag))) + } + + pk.PublicKey = pub + return +} + +func (pk *PublicKey) parseX25519(r io.Reader) (err error) { + point := make([]byte, x25519.KeySize) + _, err = io.ReadFull(r, point) + if err != nil { + return + } + pub := &x25519.PublicKey{ + Point: point, + } + pk.PublicKey = pub + return +} + +func (pk *PublicKey) parseX448(r io.Reader) (err error) { + point := make([]byte, x448.KeySize) + _, err = io.ReadFull(r, point) + if err != nil { + return + } + pub := &x448.PublicKey{ + Point: point, + } + pk.PublicKey = pub + return +} + +func (pk *PublicKey) parseEd25519(r io.Reader) (err error) { + point := make([]byte, ed25519.PublicKeySize) + _, err = io.ReadFull(r, point) + if err != nil { + return + } + pub := &ed25519.PublicKey{ + Point: point, + } + pk.PublicKey = pub + return +} + +func (pk *PublicKey) parseEd448(r io.Reader) (err error) { + point := make([]byte, ed448.PublicKeySize) + _, err = io.ReadFull(r, point) + if err != nil { + return + } + pub := &ed448.PublicKey{ + Point: point, + } + pk.PublicKey = pub + return +} + +func (pk *PublicKey) parseAEAD(r io.Reader) (err error) { + var cipher [1]byte + _, err = readFull(r, cipher[:]) + if err != nil { + return + } + + var bindingHash [32]byte + _, err = readFull(r, bindingHash[:]) + if err != nil { + return + } + + symmetric := &symmetric.AEADPublicKey{ + Cipher: algorithm.CipherFunction(cipher[0]), + BindingHash: bindingHash, + } + + pk.PublicKey = symmetric + return +} + +func (pk *PublicKey) parseHMAC(r io.Reader) (err error) { + var hash [1]byte + _, err = readFull(r, hash[:]) + if err != nil { + return + } + bindingHash, err := readBindingHash(r) + if err != nil { + return + } + + hmacHash, ok := algorithm.HashById[hash[0]] + if !ok { + return errors.UnsupportedError("unsupported HMAC hash: " + strconv.Itoa(int(hash[0]))) + } + + symmetric := &symmetric.HMACPublicKey{ + Hash: hmacHash, + BindingHash: bindingHash, + } + + pk.PublicKey = symmetric + return +} + +func readBindingHash(r io.Reader) (bindingHash [32]byte, err error) { + _, err = readFull(r, bindingHash[:]) + return +} + +// SerializeForHash serializes the PublicKey to w with the special packet +// header format needed for hashing. +func (pk *PublicKey) SerializeForHash(w io.Writer) error { + if err := pk.SerializeSignaturePrefix(w); err != nil { + return err + } + return pk.serializeWithoutHeaders(w) +} + +// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. +// The prefix is used when calculating a signature over this public key. See +// RFC 4880, section 5.2.4. +func (pk *PublicKey) SerializeSignaturePrefix(w io.Writer) error { + var pLength = pk.algorithmSpecificByteCount() + // version, timestamp, algorithm + pLength += versionSize + timestampSize + algorithmSize + if pk.Version >= 5 { + // key octet count (4). + pLength += 4 + _, err := w.Write([]byte{ + // When a v4 signature is made over a key, the hash data starts with the octet 0x99, followed by a two-octet length + // of the key, and then the body of the key packet. When a v6 signature is made over a key, the hash data starts + // with the salt, then octet 0x9B, followed by a four-octet length of the key, and then the body of the key packet. + 0x95 + byte(pk.Version), + byte(pLength >> 24), + byte(pLength >> 16), + byte(pLength >> 8), + byte(pLength), + }) + return err + } + if _, err := w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}); err != nil { + return err + } + return nil +} + +func (pk *PublicKey) Serialize(w io.Writer) (err error) { + length := uint32(versionSize + timestampSize + algorithmSize) // 6 byte header + length += pk.algorithmSpecificByteCount() + if pk.Version >= 5 { + length += 4 // octet key count + } + packetType := packetTypePublicKey + if pk.IsSubkey { + packetType = packetTypePublicSubkey + } + err = serializeHeader(w, packetType, int(length)) + if err != nil { + return + } + return pk.serializeWithoutHeaders(w) +} + +func (pk *PublicKey) algorithmSpecificByteCount() uint32 { + length := uint32(0) + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + length += uint32(pk.n.EncodedLength()) + length += uint32(pk.e.EncodedLength()) + case PubKeyAlgoDSA: + length += uint32(pk.p.EncodedLength()) + length += uint32(pk.q.EncodedLength()) + length += uint32(pk.g.EncodedLength()) + length += uint32(pk.y.EncodedLength()) + case PubKeyAlgoElGamal: + length += uint32(pk.p.EncodedLength()) + length += uint32(pk.g.EncodedLength()) + length += uint32(pk.y.EncodedLength()) + case PubKeyAlgoECDSA: + length += uint32(pk.oid.EncodedLength()) + length += uint32(pk.p.EncodedLength()) + case PubKeyAlgoECDH: + length += uint32(pk.oid.EncodedLength()) + length += uint32(pk.p.EncodedLength()) + length += uint32(pk.kdf.EncodedLength()) + case PubKeyAlgoEdDSA: + length += uint32(pk.oid.EncodedLength()) + length += uint32(pk.p.EncodedLength()) + case PubKeyAlgoX25519: + length += x25519.KeySize + case PubKeyAlgoX448: + length += x448.KeySize + case PubKeyAlgoEd25519: + length += ed25519.PublicKeySize + case PubKeyAlgoEd448: + length += ed448.PublicKeySize + case ExperimentalPubKeyAlgoAEAD, ExperimentalPubKeyAlgoHMAC: + length += 1 // Hash octet + length += 32 // Binding hash + default: + panic("unknown public key algorithm") + } + return length +} + +// serializeWithoutHeaders marshals the PublicKey to w in the form of an +// OpenPGP public key packet, not including the packet header. +func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { + t := uint32(pk.CreationTime.Unix()) + if _, err = w.Write([]byte{ + byte(pk.Version), + byte(t >> 24), byte(t >> 16), byte(t >> 8), byte(t), + byte(pk.PubKeyAlgo), + }); err != nil { + return + } + + if pk.Version >= 5 { + n := pk.algorithmSpecificByteCount() + if _, err = w.Write([]byte{ + byte(n >> 24), byte(n >> 16), byte(n >> 8), byte(n), + }); err != nil { + return + } + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + if _, err = w.Write(pk.n.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.e.EncodedBytes()) + return + case PubKeyAlgoDSA: + if _, err = w.Write(pk.p.EncodedBytes()); err != nil { + return + } + if _, err = w.Write(pk.q.EncodedBytes()); err != nil { + return + } + if _, err = w.Write(pk.g.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.y.EncodedBytes()) + return + case PubKeyAlgoElGamal: + if _, err = w.Write(pk.p.EncodedBytes()); err != nil { + return + } + if _, err = w.Write(pk.g.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.y.EncodedBytes()) + return + case PubKeyAlgoECDSA: + if _, err = w.Write(pk.oid.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.p.EncodedBytes()) + return + case PubKeyAlgoECDH: + if _, err = w.Write(pk.oid.EncodedBytes()); err != nil { + return + } + if _, err = w.Write(pk.p.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.kdf.EncodedBytes()) + return + case PubKeyAlgoEdDSA: + if _, err = w.Write(pk.oid.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.p.EncodedBytes()) + return + case PubKeyAlgoX25519: + publicKey := pk.PublicKey.(*x25519.PublicKey) + _, err = w.Write(publicKey.Point) + return + case PubKeyAlgoX448: + publicKey := pk.PublicKey.(*x448.PublicKey) + _, err = w.Write(publicKey.Point) + return + case PubKeyAlgoEd25519: + publicKey := pk.PublicKey.(*ed25519.PublicKey) + _, err = w.Write(publicKey.Point) + return + case PubKeyAlgoEd448: + publicKey := pk.PublicKey.(*ed448.PublicKey) + _, err = w.Write(publicKey.Point) + return + case ExperimentalPubKeyAlgoAEAD: + symmKey := pk.PublicKey.(*symmetric.AEADPublicKey) + cipherOctet := [1]byte{symmKey.Cipher.Id()} + if _, err = w.Write(cipherOctet[:]); err != nil { + return + } + _, err = w.Write(symmKey.BindingHash[:]) + return + case ExperimentalPubKeyAlgoHMAC: + symmKey := pk.PublicKey.(*symmetric.HMACPublicKey) + hashOctet := [1]byte{symmKey.Hash.Id()} + if _, err = w.Write(hashOctet[:]); err != nil { + return + } + _, err = w.Write(symmKey.BindingHash[:]) + return + } + return errors.InvalidArgumentError("bad public-key algorithm") +} + +// CanSign returns true iff this public key can generate signatures +func (pk *PublicKey) CanSign() bool { + return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal && pk.PubKeyAlgo != PubKeyAlgoECDH +} + +// VerifyHashTag returns nil iff sig appears to be a plausible signature of the data +// hashed into signed, based solely on its HashTag. signed is mutated by this call. +func VerifyHashTag(signed hash.Hash, sig *Signature) (err error) { + if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) { + sig.AddMetadataToHashSuffix() + } + signed.Write(sig.HashSuffix) + hashBytes := signed.Sum(nil) + if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { + return errors.SignatureError("hash tag doesn't match") + } + return nil +} + +// VerifySignature returns nil iff sig is a valid signature, made by this +// public key, of the data hashed into signed. signed is mutated by this call. +func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) { + if !pk.CanSign() { + return errors.InvalidArgumentError("public key cannot generate signatures") + } + if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) { + sig.AddMetadataToHashSuffix() + } + signed.Write(sig.HashSuffix) + hashBytes := signed.Sum(nil) + // see discussion https://github.com/ProtonMail/go-crypto/issues/107 + if sig.Version >= 5 && (hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1]) { + return errors.SignatureError("hash tag doesn't match") + } + + if pk.PubKeyAlgo != sig.PubKeyAlgo { + return errors.InvalidArgumentError("public key and signature use different algorithms") + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey) + err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.Bytes())) + if err != nil { + return errors.SignatureError("RSA verification failure") + } + return nil + case PubKeyAlgoDSA: + dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey) + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 + if len(hashBytes) > subgroupSize { + hashBytes = hashBytes[:subgroupSize] + } + if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.Bytes()), new(big.Int).SetBytes(sig.DSASigS.Bytes())) { + return errors.SignatureError("DSA verification failure") + } + return nil + case PubKeyAlgoECDSA: + ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey) + if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.Bytes()), new(big.Int).SetBytes(sig.ECDSASigS.Bytes())) { + return errors.SignatureError("ECDSA verification failure") + } + return nil + case PubKeyAlgoEdDSA: + eddsaPublicKey := pk.PublicKey.(*eddsa.PublicKey) + if !eddsa.Verify(eddsaPublicKey, hashBytes, sig.EdDSASigR.Bytes(), sig.EdDSASigS.Bytes()) { + return errors.SignatureError("EdDSA verification failure") + } + return nil + case PubKeyAlgoEd25519: + ed25519PublicKey := pk.PublicKey.(*ed25519.PublicKey) + if !ed25519.Verify(ed25519PublicKey, hashBytes, sig.EdSig) { + return errors.SignatureError("Ed25519 verification failure") + } + return nil + case PubKeyAlgoEd448: + ed448PublicKey := pk.PublicKey.(*ed448.PublicKey) + if !ed448.Verify(ed448PublicKey, hashBytes, sig.EdSig) { + return errors.SignatureError("ed448 verification failure") + } + return nil + case ExperimentalPubKeyAlgoHMAC: + HMACKey := pk.PublicKey.(*symmetric.HMACPublicKey) + + result, err := HMACKey.Verify(hashBytes, sig.HMAC.Bytes()) + if err != nil { + return err + } + if !result { + return errors.SignatureError("HMAC verification failure") + } + return nil + default: + return errors.SignatureError("Unsupported public key algorithm used in signature") + } +} + +// keySignatureHash returns a Hash of the message that needs to be signed for +// pk to assert a subkey relationship to signed. +func keySignatureHash(pk, signed signingKey, hashFunc hash.Hash) (h hash.Hash, err error) { + h = hashFunc + + // RFC 4880, section 5.2.4 + err = pk.SerializeForHash(h) + if err != nil { + return nil, err + } + + err = signed.SerializeForHash(h) + return +} + +// VerifyKeyHashTag returns nil iff sig appears to be a plausible signature over this +// primary key and subkey, based solely on its HashTag. +func (pk *PublicKey) VerifyKeyHashTag(signed *PublicKey, sig *Signature) error { + preparedHash, err := sig.PrepareVerify() + if err != nil { + return err + } + h, err := keySignatureHash(pk, signed, preparedHash) + if err != nil { + return err + } + return VerifyHashTag(h, sig) +} + +// VerifyKeySignature returns nil iff sig is a valid signature, made by this +// public key, of signed. +func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error { + preparedHash, err := sig.PrepareVerify() + if err != nil { + return err + } + h, err := keySignatureHash(pk, signed, preparedHash) + if err != nil { + return err + } + if err = pk.VerifySignature(h, sig); err != nil { + return err + } + + if sig.FlagSign { + // Signing subkeys must be cross-signed. See + // https://www.gnupg.org/faq/subkey-cross-certify.html. + if sig.EmbeddedSignature == nil { + return errors.StructuralError("signing subkey is missing cross-signature") + } + preparedHashEmbedded, err := sig.EmbeddedSignature.PrepareVerify() + if err != nil { + return err + } + // Verify the cross-signature. This is calculated over the same + // data as the main signature, so we cannot just recursively + // call signed.VerifyKeySignature(...) + if h, err = keySignatureHash(pk, signed, preparedHashEmbedded); err != nil { + return errors.StructuralError("error while hashing for cross-signature: " + err.Error()) + } + if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil { + return errors.StructuralError("error while verifying cross-signature: " + err.Error()) + } + } + + // Keys having this flag MUST have the forwarding KDF parameters version 2 defined in Section 5.1. + if sig.FlagForward && (signed.PubKeyAlgo != PubKeyAlgoECDH || + signed.kdf == nil || + signed.kdf.Bytes()[0] != ecdh.KDFVersionForwarding) { + return errors.StructuralError("forwarding key with wrong ecdh kdf version") + } + + return nil +} + +func keyRevocationHash(pk signingKey, hashFunc hash.Hash) (err error) { + return pk.SerializeForHash(hashFunc) +} + +// VerifyRevocationHashTag returns nil iff sig appears to be a plausible signature +// over this public key, based solely on its HashTag. +func (pk *PublicKey) VerifyRevocationHashTag(sig *Signature) (err error) { + preparedHash, err := sig.PrepareVerify() + if err != nil { + return err + } + if err = keyRevocationHash(pk, preparedHash); err != nil { + return err + } + return VerifyHashTag(preparedHash, sig) +} + +// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this +// public key. +func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { + preparedHash, err := sig.PrepareVerify() + if err != nil { + return err + } + if err = keyRevocationHash(pk, preparedHash); err != nil { + return err + } + return pk.VerifySignature(preparedHash, sig) +} + +// VerifySubkeyRevocationSignature returns nil iff sig is a valid subkey revocation signature, +// made by this public key, of signed. +func (pk *PublicKey) VerifySubkeyRevocationSignature(sig *Signature, signed *PublicKey) (err error) { + preparedHash, err := sig.PrepareVerify() + if err != nil { + return err + } + h, err := keySignatureHash(pk, signed, preparedHash) + if err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// userIdSignatureHash returns a Hash of the message that needs to be signed +// to assert that pk is a valid key for id. +func userIdSignatureHash(id string, pk *PublicKey, h hash.Hash) (err error) { + + // RFC 4880, section 5.2.4 + if err := pk.SerializeSignaturePrefix(h); err != nil { + return err + } + if err := pk.serializeWithoutHeaders(h); err != nil { + return err + } + + var buf [5]byte + buf[0] = 0xb4 + buf[1] = byte(len(id) >> 24) + buf[2] = byte(len(id) >> 16) + buf[3] = byte(len(id) >> 8) + buf[4] = byte(len(id)) + h.Write(buf[:]) + h.Write([]byte(id)) + + return nil +} + +// directKeySignatureHash returns a Hash of the message that needs to be signed. +func directKeySignatureHash(pk *PublicKey, h hash.Hash) (err error) { + return pk.SerializeForHash(h) +} + +// VerifyUserIdHashTag returns nil iff sig appears to be a plausible signature over this +// public key and UserId, based solely on its HashTag +func (pk *PublicKey) VerifyUserIdHashTag(id string, sig *Signature) (err error) { + preparedHash, err := sig.PrepareVerify() + if err != nil { + return err + } + err = userIdSignatureHash(id, pk, preparedHash) + if err != nil { + return err + } + return VerifyHashTag(preparedHash, sig) +} + +// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this +// public key, that id is the identity of pub. +func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) { + h, err := sig.PrepareVerify() + if err != nil { + return err + } + if err := userIdSignatureHash(id, pub, h); err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// VerifyDirectKeySignature returns nil iff sig is a valid signature, made by this +// public key. +func (pk *PublicKey) VerifyDirectKeySignature(sig *Signature) (err error) { + h, err := sig.PrepareVerify() + if err != nil { + return err + } + if err := directKeySignatureHash(pk, h); err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// KeyIdString returns the public key's fingerprint in capital hex +// (e.g. "6C7EE1B8621CC013"). +func (pk *PublicKey) KeyIdString() string { + return fmt.Sprintf("%X", pk.Fingerprint[12:20]) +} + +// KeyIdShortString returns the short form of public key's fingerprint +// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). +func (pk *PublicKey) KeyIdShortString() string { + return fmt.Sprintf("%X", pk.Fingerprint[16:20]) +} + +// BitLength returns the bit length for the given public key. +func (pk *PublicKey) BitLength() (bitLength uint16, err error) { + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + bitLength = pk.n.BitLength() + case PubKeyAlgoDSA: + bitLength = pk.p.BitLength() + case PubKeyAlgoElGamal: + bitLength = pk.p.BitLength() + case PubKeyAlgoECDSA: + bitLength = pk.p.BitLength() + case PubKeyAlgoECDH: + bitLength = pk.p.BitLength() + case PubKeyAlgoEdDSA: + bitLength = pk.p.BitLength() + case PubKeyAlgoX25519: + bitLength = x25519.KeySize * 8 + case PubKeyAlgoX448: + bitLength = x448.KeySize * 8 + case PubKeyAlgoEd25519: + bitLength = ed25519.PublicKeySize * 8 + case PubKeyAlgoEd448: + bitLength = ed448.PublicKeySize * 8 + case ExperimentalPubKeyAlgoAEAD: + bitLength = 32 + default: + err = errors.InvalidArgumentError("bad public-key algorithm") + } + return +} + +// Curve returns the used elliptic curve of this public key. +// Returns an error if no elliptic curve is used. +func (pk *PublicKey) Curve() (curve Curve, err error) { + switch pk.PubKeyAlgo { + case PubKeyAlgoECDSA, PubKeyAlgoECDH, PubKeyAlgoEdDSA: + curveInfo := ecc.FindByOid(pk.oid) + if curveInfo == nil { + return "", errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) + } + curve = Curve(curveInfo.GenName) + case PubKeyAlgoEd25519, PubKeyAlgoX25519: + curve = Curve25519 + case PubKeyAlgoEd448, PubKeyAlgoX448: + curve = Curve448 + default: + err = errors.InvalidArgumentError("public key does not operate with an elliptic curve") + } + return +} + +// KeyExpired returns whether sig is a self-signature of a key that has +// expired or is created in the future. +func (pk *PublicKey) KeyExpired(sig *Signature, currentTime time.Time) bool { + if pk.CreationTime.Unix() > currentTime.Unix() { + return true + } + if sig.KeyLifetimeSecs == nil || *sig.KeyLifetimeSecs == 0 { + return false + } + expiry := pk.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second) + return currentTime.Unix() > expiry.Unix() +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go new file mode 100644 index 000000000..b255f1f6f --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go @@ -0,0 +1,24 @@ +package packet + +const rsaFingerprintHex = "5fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb" + +const rsaPkDataHex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001" + +const dsaFingerprintHex = "eece4c094db002103714c63c8e8fbe54062f19ed" + +const dsaPkDataHex = "9901a2044d432f89110400cd581334f0d7a1e1bdc8b9d6d8c0baf68793632735d2bb0903224cbaa1dfbf35a60ee7a13b92643421e1eb41aa8d79bea19a115a677f6b8ba3c7818ce53a6c2a24a1608bd8b8d6e55c5090cbde09dd26e356267465ae25e69ec8bdd57c7bbb2623e4d73336f73a0a9098f7f16da2e25252130fd694c0e8070c55a812a423ae7f00a0ebf50e70c2f19c3520a551bd4b08d30f23530d3d03ff7d0bf4a53a64a09dc5e6e6e35854b7d70c882b0c60293401958b1bd9e40abec3ea05ba87cf64899299d4bd6aa7f459c201d3fbbd6c82004bdc5e8a9eb8082d12054cc90fa9d4ec251a843236a588bf49552441817436c4f43326966fe85447d4e6d0acf8fa1ef0f014730770603ad7634c3088dc52501c237328417c31c89ed70400b2f1a98b0bf42f11fefc430704bebbaa41d9f355600c3facee1e490f64208e0e094ea55e3a598a219a58500bf78ac677b670a14f4e47e9cf8eab4f368cc1ddcaa18cc59309d4cc62dd4f680e73e6cc3e1ce87a84d0925efbcb26c575c093fc42eecf45135fabf6403a25c2016e1774c0484e440a18319072c617cc97ac0a3bb0" + +const ecdsaFingerprintHex = "9892270b38b8980b05c8d56d43fe956c542ca00b" + +const ecdsaPkDataHex = "9893045071c29413052b8104002304230401f4867769cedfa52c325018896245443968e52e51d0c2df8d939949cb5b330f2921711fbee1c9b9dddb95d15cb0255e99badeddda7cc23d9ddcaacbc290969b9f24019375d61c2e4e3b36953a28d8b2bc95f78c3f1d592fb24499be348656a7b17e3963187b4361afe497bc5f9f81213f04069f8e1fb9e6a6290ae295ca1a92b894396cb4" + +const ecdhFingerprintHex = "722354df2475a42164d1d49faa8b938f9a201946" + +const ecdhPkDataHex = "b90073044d53059212052b810400220303042faa84024a20b6735c4897efa5bfb41bf85b7eefeab5ca0cb9ffc8ea04a46acb25534a577694f9e25340a4ab5223a9dd1eda530c8aa2e6718db10d7e672558c7736fe09369ea5739a2a3554bf16d41faa50562f11c6d39bbd5dffb6b9a9ec91803010909" + +const eddsaFingerprintHex = "b2d5e5ec0e6deca6bc8eeeb00907e75e1dd99ad8" + +const eddsaPkDataHex = "98330456e2132b16092b06010401da470f01010740bbda39266affa511a8c2d02edf690fb784b0499c4406185811a163539ef11dc1b41d74657374696e67203c74657374696e674074657374696e672e636f6d3e8879041316080021050256e2132b021b03050b09080702061508090a0b020416020301021e01021780000a09100907e75e1dd99ad86d0c00fe39d2008359352782bc9b61ac382584cd8eff3f57a18c2287e3afeeb05d1f04ba00fe2d0bc1ddf3ff8adb9afa3e7d9287244b4ec567f3db4d60b74a9b5465ed528203" + +// Source: https://sites.google.com/site/brainhub/pgpecckeys#TOC-ECC-NIST-P-384-key +const ecc384PubHex = `99006f044d53059213052b81040022030304f6b8c5aced5b84ef9f4a209db2e4a9dfb70d28cb8c10ecd57674a9fa5a67389942b62d5e51367df4c7bfd3f8e500feecf07ed265a621a8ebbbe53e947ec78c677eba143bd1533c2b350e1c29f82313e1e1108eba063be1e64b10e6950e799c2db42465635f6473615f64685f333834203c6f70656e70677040627261696e6875622e6f72673e8900cb04101309005305024d530592301480000000002000077072656665727265642d656d61696c2d656e636f64696e67407067702e636f6d7067706d696d65040b090807021901051b03000000021602051e010000000415090a08000a0910098033880f54719fca2b0180aa37350968bd5f115afd8ce7bc7b103822152dbff06d0afcda835329510905b98cb469ba208faab87c7412b799e7b633017f58364ea480e8a1a3f253a0c5f22c446e8be9a9fce6210136ee30811abbd49139de28b5bdf8dc36d06ae748579e9ff503b90073044d53059212052b810400220303042faa84024a20b6735c4897efa5bfb41bf85b7eefeab5ca0cb9ffc8ea04a46acb25534a577694f9e25340a4ab5223a9dd1eda530c8aa2e6718db10d7e672558c7736fe09369ea5739a2a3554bf16d41faa50562f11c6d39bbd5dffb6b9a9ec9180301090989008404181309000c05024d530592051b0c000000000a0910098033880f54719f80970180eee7a6d8fcee41ee4f9289df17f9bcf9d955dca25c583b94336f3a2b2d4986dc5cf417b8d2dc86f741a9e1a6d236c0e3017d1c76575458a0cfb93ae8a2b274fcc65ceecd7a91eec83656ba13219969f06945b48c56bd04152c3a0553c5f2f4bd1267` diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go new file mode 100644 index 000000000..dd8409239 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go @@ -0,0 +1,209 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +type PacketReader interface { + Next() (p Packet, err error) + Push(reader io.Reader) (err error) + Unread(p Packet) +} + +// Reader reads packets from an io.Reader and allows packets to be 'unread' so +// that they result from the next call to Next. +type Reader struct { + q []Packet + readers []io.Reader +} + +// New io.Readers are pushed when a compressed or encrypted packet is processed +// and recursively treated as a new source of packets. However, a carefully +// crafted packet can trigger an infinite recursive sequence of packets. See +// http://mumble.net/~campbell/misc/pgp-quine +// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402 +// This constant limits the number of recursive packets that may be pushed. +const maxReaders = 32 + +// Next returns the most recently unread Packet, or reads another packet from +// the top-most io.Reader. Unknown/unsupported/Marker packet types are skipped. +func (r *Reader) Next() (p Packet, err error) { + for { + p, err := r.read() + if err == io.EOF { + break + } else if err != nil { + if _, ok := err.(errors.UnknownPacketTypeError); ok { + continue + } + if _, ok := err.(errors.UnsupportedError); ok { + switch p.(type) { + case *SymmetricallyEncrypted, *AEADEncrypted, *Compressed, *LiteralData: + return nil, err + } + continue + } + return nil, err + } else { + //A marker packet MUST be ignored when received + switch p.(type) { + case *Marker: + continue + } + return p, nil + } + } + return nil, io.EOF +} + +// Next returns the most recently unread Packet, or reads another packet from +// the top-most io.Reader. Unknown/Marker packet types are skipped while unsupported +// packets are returned as UnsupportedPacket type. +func (r *Reader) NextWithUnsupported() (p Packet, err error) { + for { + p, err = r.read() + if err == io.EOF { + break + } else if err != nil { + if _, ok := err.(errors.UnknownPacketTypeError); ok { + continue + } + if casteErr, ok := err.(errors.UnsupportedError); ok { + return &UnsupportedPacket{ + IncompletePacket: p, + Error: casteErr, + }, nil + } + return + } else { + //A marker packet MUST be ignored when received + switch p.(type) { + case *Marker: + continue + } + return + } + } + return nil, io.EOF +} + +func (r *Reader) read() (p Packet, err error) { + if len(r.q) > 0 { + p = r.q[len(r.q)-1] + r.q = r.q[:len(r.q)-1] + return + } + for len(r.readers) > 0 { + p, err = Read(r.readers[len(r.readers)-1]) + if err == io.EOF { + r.readers = r.readers[:len(r.readers)-1] + continue + } + return p, err + } + return nil, io.EOF +} + +// Push causes the Reader to start reading from a new io.Reader. When an EOF +// error is seen from the new io.Reader, it is popped and the Reader continues +// to read from the next most recent io.Reader. Push returns a StructuralError +// if pushing the reader would exceed the maximum recursion level, otherwise it +// returns nil. +func (r *Reader) Push(reader io.Reader) (err error) { + if len(r.readers) >= maxReaders { + return errors.StructuralError("too many layers of packets") + } + r.readers = append(r.readers, reader) + return nil +} + +// Unread causes the given Packet to be returned from the next call to Next. +func (r *Reader) Unread(p Packet) { + r.q = append(r.q, p) +} + +func NewReader(r io.Reader) *Reader { + return &Reader{ + q: nil, + readers: []io.Reader{r}, + } +} + +// CheckReader is similar to Reader but additionally +// uses the pushdown automata to verify the read packet sequence. +type CheckReader struct { + Reader + verifier *SequenceVerifier + fullyRead bool +} + +// Next returns the most recently unread Packet, or reads another packet from +// the top-most io.Reader. Unknown packet types are skipped. +// If the read packet sequence does not conform to the packet composition +// rules in rfc4880, it returns an error. +func (r *CheckReader) Next() (p Packet, err error) { + if r.fullyRead { + return nil, io.EOF + } + if len(r.q) > 0 { + p = r.q[len(r.q)-1] + r.q = r.q[:len(r.q)-1] + return + } + var errMsg error + for len(r.readers) > 0 { + p, errMsg, err = ReadWithCheck(r.readers[len(r.readers)-1], r.verifier) + if errMsg != nil { + err = errMsg + return + } + if err == nil { + return + } + if err == io.EOF { + r.readers = r.readers[:len(r.readers)-1] + continue + } + //A marker packet MUST be ignored when received + switch p.(type) { + case *Marker: + continue + } + if _, ok := err.(errors.UnknownPacketTypeError); ok { + continue + } + if _, ok := err.(errors.UnsupportedError); ok { + switch p.(type) { + case *SymmetricallyEncrypted, *AEADEncrypted, *Compressed, *LiteralData: + return nil, err + } + continue + } + return nil, err + } + if errMsg = r.verifier.Next(EOSSymbol); errMsg != nil { + return nil, errMsg + } + if errMsg = r.verifier.AssertValid(); errMsg != nil { + return nil, errMsg + } + r.fullyRead = true + return nil, io.EOF +} + +func NewCheckReader(r io.Reader) *CheckReader { + return &CheckReader{ + Reader: Reader{ + q: nil, + readers: []io.Reader{r}, + }, + verifier: NewSequenceVerifier(), + fullyRead: false, + } +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/recipient.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/recipient.go new file mode 100644 index 000000000..fb2e362e4 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/recipient.go @@ -0,0 +1,15 @@ +package packet + +// Recipient type represents a Intended Recipient Fingerprint subpacket +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#name-intended-recipient-fingerpr +type Recipient struct { + KeyVersion int + Fingerprint []byte +} + +func (r *Recipient) Serialize() []byte { + packet := make([]byte, len(r.Fingerprint)+1) + packet[0] = byte(r.KeyVersion) + copy(packet[1:], r.Fingerprint) + return packet +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go new file mode 100644 index 000000000..79161a38d --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go @@ -0,0 +1,1530 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/dsa" + "encoding/asn1" + "encoding/binary" + "hash" + "io" + "math/big" + "strconv" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/ecdsa" + "github.com/ProtonMail/go-crypto/openpgp/ed25519" + "github.com/ProtonMail/go-crypto/openpgp/ed448" + "github.com/ProtonMail/go-crypto/openpgp/eddsa" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" +) + +const ( + // First octet of key flags. + // See RFC 9580, section 5.2.3.29 for details. + KeyFlagCertify = 1 << iota + KeyFlagSign + KeyFlagEncryptCommunications + KeyFlagEncryptStorage + KeyFlagSplitKey + KeyFlagAuthenticate + KeyFlagForward + KeyFlagGroupKey +) + +const ( + // First octet of keyserver preference flags. + // See RFC 9580, section 5.2.3.25 for details. + _ = 1 << iota + _ + _ + _ + _ + _ + _ + KeyserverPrefNoModify +) + +const SaltNotationName = "salt@notations.openpgpjs.org" + +// Signature represents a signature. See RFC 9580, section 5.2. +type Signature struct { + Version int + SigType SignatureType + PubKeyAlgo PublicKeyAlgorithm + Hash crypto.Hash + // salt contains a random salt value for v6 signatures + // See RFC 9580 Section 5.2.4. + salt []byte + + // HashSuffix is extra data that is hashed in after the signed data. + HashSuffix []byte + // HashTag contains the first two bytes of the hash for fast rejection + // of bad signed data. + HashTag [2]byte + + // Metadata includes format, filename and time, and is protected by v5 + // signatures of type 0x00 or 0x01. This metadata is included into the hash + // computation; if nil, six 0x00 bytes are used instead. See section 5.2.4. + Metadata *LiteralData + + CreationTime time.Time + + RSASignature encoding.Field + DSASigR, DSASigS encoding.Field + ECDSASigR, ECDSASigS encoding.Field + EdDSASigR, EdDSASigS encoding.Field + EdSig []byte + HMAC encoding.Field + + // rawSubpackets contains the unparsed subpackets, in order. + rawSubpackets []outputSubpacket + + // The following are optional so are nil when not included in the + // signature. + + SigLifetimeSecs, KeyLifetimeSecs *uint32 + PreferredSymmetric, PreferredHash, PreferredCompression []uint8 + PreferredCipherSuites [][2]uint8 + IssuerKeyId *uint64 + IssuerFingerprint []byte + SignerUserId *string + IsPrimaryId *bool + Notations []*Notation + IntendedRecipients []*Recipient + + // TrustLevel and TrustAmount can be set by the signer to assert that + // the key is not only valid but also trustworthy at the specified + // level. + // See RFC 9580, section 5.2.3.21 for details. + TrustLevel TrustLevel + TrustAmount TrustAmount + + // TrustRegularExpression can be used in conjunction with trust Signature + // packets to limit the scope of the trust that is extended. + // See RFC 9580, section 5.2.3.22 for details. + TrustRegularExpression *string + + // KeyserverPrefsValid is set if any keyserver preferences were given. See RFC 9580, section + // 5.2.3.25 for details. + KeyserverPrefsValid bool + KeyserverPrefNoModify bool + + // PreferredKeyserver can be set to a URI where the latest version of the + // key that this signature is made over can be found. See RFC 9580, section + // 5.2.3.26 for details. + PreferredKeyserver string + + // PolicyURI can be set to the URI of a document that describes the + // policy under which the signature was issued. See RFC 9580, section + // 5.2.3.28 for details. + PolicyURI string + + // FlagsValid is set if any flags were given. See RFC 9580, section + // 5.2.3.29 for details. + FlagsValid bool + FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage, FlagSplitKey, FlagAuthenticate, FlagGroupKey, FlagForward bool + + // RevocationReason is set if this signature has been revoked. + // See RFC 9580, section 5.2.3.31 for details. + RevocationReason *ReasonForRevocation + RevocationReasonText string + + // In a self-signature, these flags are set there is a features subpacket + // indicating that the issuer implementation supports these features + // see https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#features-subpacket + SEIPDv1, SEIPDv2 bool + + // EmbeddedSignature, if non-nil, is a signature of the parent key, by + // this key. This prevents an attacker from claiming another's signing + // subkey as their own. + EmbeddedSignature *Signature + + outSubpackets []outputSubpacket +} + +// VerifiableSignature internally keeps state if the +// the signature has been verified before. +type VerifiableSignature struct { + Valid *bool // nil if it has not been verified yet + Packet *Signature +} + +// NewVerifiableSig returns a struct of type VerifiableSignature referencing the input signature. +func NewVerifiableSig(signature *Signature) *VerifiableSignature { + return &VerifiableSignature{ + Packet: signature, + } +} + +// Salt returns the signature salt for v6 signatures. +func (sig *Signature) Salt() []byte { + if sig == nil { + return nil + } + return sig.salt +} + +func (sig *Signature) parse(r io.Reader) (err error) { + // RFC 9580, section 5.2.3 + var buf [7]byte + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + sig.Version = int(buf[0]) + if sig.Version != 4 && sig.Version != 5 && sig.Version != 6 { + err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) + return + } + + if V5Disabled && sig.Version == 5 { + return errors.UnsupportedError("support for parsing v5 entities is disabled; build with `-tags v5` if needed") + } + + if sig.Version == 6 { + _, err = readFull(r, buf[:7]) + } else { + _, err = readFull(r, buf[:5]) + } + if err != nil { + return + } + sig.SigType = SignatureType(buf[0]) + sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA, PubKeyAlgoEd25519, PubKeyAlgoEd448, ExperimentalPubKeyAlgoHMAC: + default: + err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) + return + } + + var ok bool + + if sig.Version < 5 { + sig.Hash, ok = algorithm.HashIdToHashWithSha1(buf[2]) + } else { + sig.Hash, ok = algorithm.HashIdToHash(buf[2]) + } + + if !ok { + return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) + } + + var hashedSubpacketsLength int + if sig.Version == 6 { + // For a v6 signature, a four-octet length is used. + hashedSubpacketsLength = + int(buf[3])<<24 | + int(buf[4])<<16 | + int(buf[5])<<8 | + int(buf[6]) + } else { + hashedSubpacketsLength = int(buf[3])<<8 | int(buf[4]) + } + hashedSubpackets := make([]byte, hashedSubpacketsLength) + _, err = readFull(r, hashedSubpackets) + if err != nil { + return + } + err = sig.buildHashSuffix(hashedSubpackets) + if err != nil { + return + } + + err = parseSignatureSubpackets(sig, hashedSubpackets, true) + if err != nil { + return + } + + if sig.Version == 6 { + _, err = readFull(r, buf[:4]) + } else { + _, err = readFull(r, buf[:2]) + } + + if err != nil { + return + } + var unhashedSubpacketsLength uint32 + if sig.Version == 6 { + unhashedSubpacketsLength = uint32(buf[0])<<24 | uint32(buf[1])<<16 | uint32(buf[2])<<8 | uint32(buf[3]) + } else { + unhashedSubpacketsLength = uint32(buf[0])<<8 | uint32(buf[1]) + } + unhashedSubpackets := make([]byte, unhashedSubpacketsLength) + _, err = readFull(r, unhashedSubpackets) + if err != nil { + return + } + err = parseSignatureSubpackets(sig, unhashedSubpackets, false) + if err != nil { + return + } + + _, err = readFull(r, sig.HashTag[:2]) + if err != nil { + return + } + + if sig.Version == 6 { + // Only for v6 signatures, a variable-length field containing the salt + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + saltLength := int(buf[0]) + var expectedSaltLength int + expectedSaltLength, err = SaltLengthForHash(sig.Hash) + if err != nil { + return + } + if saltLength != expectedSaltLength { + err = errors.StructuralError("unexpected salt size for the given hash algorithm") + return + } + salt := make([]byte, expectedSaltLength) + _, err = readFull(r, salt) + if err != nil { + return + } + sig.salt = salt + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sig.RSASignature = new(encoding.MPI) + _, err = sig.RSASignature.ReadFrom(r) + case PubKeyAlgoDSA: + sig.DSASigR = new(encoding.MPI) + if _, err = sig.DSASigR.ReadFrom(r); err != nil { + return + } + + sig.DSASigS = new(encoding.MPI) + _, err = sig.DSASigS.ReadFrom(r) + case PubKeyAlgoECDSA: + sig.ECDSASigR = new(encoding.MPI) + if _, err = sig.ECDSASigR.ReadFrom(r); err != nil { + return + } + + sig.ECDSASigS = new(encoding.MPI) + _, err = sig.ECDSASigS.ReadFrom(r) + case PubKeyAlgoEdDSA: + sig.EdDSASigR = new(encoding.MPI) + if _, err = sig.EdDSASigR.ReadFrom(r); err != nil { + return + } + + sig.EdDSASigS = new(encoding.MPI) + if _, err = sig.EdDSASigS.ReadFrom(r); err != nil { + return + } + case PubKeyAlgoEd25519: + sig.EdSig, err = ed25519.ReadSignature(r) + if err != nil { + return + } + case PubKeyAlgoEd448: + sig.EdSig, err = ed448.ReadSignature(r) + if err != nil { + return + } + case ExperimentalPubKeyAlgoHMAC: + sig.HMAC = new(encoding.ShortByteString) + if _, err = sig.HMAC.ReadFrom(r); err != nil { + return + } + default: + panic("unreachable") + } + return +} + +// parseSignatureSubpackets parses subpackets of the main signature packet. See +// RFC 9580, section 5.2.3.1. +func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) { + for len(subpackets) > 0 { + subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed) + if err != nil { + return + } + } + + if sig.CreationTime.IsZero() { + err = errors.StructuralError("no creation time in signature") + } + + return +} + +type signatureSubpacketType uint8 + +const ( + creationTimeSubpacket signatureSubpacketType = 2 + signatureExpirationSubpacket signatureSubpacketType = 3 + exportableCertSubpacket signatureSubpacketType = 4 + trustSubpacket signatureSubpacketType = 5 + regularExpressionSubpacket signatureSubpacketType = 6 + keyExpirationSubpacket signatureSubpacketType = 9 + prefSymmetricAlgosSubpacket signatureSubpacketType = 11 + issuerSubpacket signatureSubpacketType = 16 + notationDataSubpacket signatureSubpacketType = 20 + prefHashAlgosSubpacket signatureSubpacketType = 21 + prefCompressionSubpacket signatureSubpacketType = 22 + keyserverPrefsSubpacket signatureSubpacketType = 23 + prefKeyserverSubpacket signatureSubpacketType = 24 + primaryUserIdSubpacket signatureSubpacketType = 25 + policyUriSubpacket signatureSubpacketType = 26 + keyFlagsSubpacket signatureSubpacketType = 27 + signerUserIdSubpacket signatureSubpacketType = 28 + reasonForRevocationSubpacket signatureSubpacketType = 29 + featuresSubpacket signatureSubpacketType = 30 + embeddedSignatureSubpacket signatureSubpacketType = 32 + issuerFingerprintSubpacket signatureSubpacketType = 33 + intendedRecipientSubpacket signatureSubpacketType = 35 + prefCipherSuitesSubpacket signatureSubpacketType = 39 +) + +// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1. +func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) { + // RFC 9580, section 5.2.3.7 + var ( + length uint32 + packetType signatureSubpacketType + isCritical bool + ) + if len(subpacket) == 0 { + err = errors.StructuralError("zero length signature subpacket") + return + } + switch { + case subpacket[0] < 192: + length = uint32(subpacket[0]) + subpacket = subpacket[1:] + case subpacket[0] < 255: + if len(subpacket) < 2 { + goto Truncated + } + length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192 + subpacket = subpacket[2:] + default: + if len(subpacket) < 5 { + goto Truncated + } + length = uint32(subpacket[1])<<24 | + uint32(subpacket[2])<<16 | + uint32(subpacket[3])<<8 | + uint32(subpacket[4]) + subpacket = subpacket[5:] + } + if length > uint32(len(subpacket)) { + goto Truncated + } + rest = subpacket[length:] + subpacket = subpacket[:length] + if len(subpacket) == 0 { + err = errors.StructuralError("zero length signature subpacket") + return + } + packetType = signatureSubpacketType(subpacket[0] & 0x7f) + isCritical = subpacket[0]&0x80 == 0x80 + subpacket = subpacket[1:] + sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket}) + if !isHashed && + packetType != issuerSubpacket && + packetType != issuerFingerprintSubpacket && + packetType != embeddedSignatureSubpacket { + return + } + switch packetType { + case creationTimeSubpacket: + if len(subpacket) != 4 { + err = errors.StructuralError("signature creation time not four bytes") + return + } + t := binary.BigEndian.Uint32(subpacket) + sig.CreationTime = time.Unix(int64(t), 0) + case signatureExpirationSubpacket: + // Signature expiration time, section 5.2.3.18 + if len(subpacket) != 4 { + err = errors.StructuralError("expiration subpacket with bad length") + return + } + sig.SigLifetimeSecs = new(uint32) + *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket) + case exportableCertSubpacket: + if subpacket[0] == 0 { + err = errors.UnsupportedError("signature with non-exportable certification") + return + } + case trustSubpacket: + if len(subpacket) != 2 { + err = errors.StructuralError("trust subpacket with bad length") + return + } + // Trust level and amount, section 5.2.3.21 + sig.TrustLevel = TrustLevel(subpacket[0]) + sig.TrustAmount = TrustAmount(subpacket[1]) + case regularExpressionSubpacket: + if len(subpacket) == 0 { + err = errors.StructuralError("regexp subpacket with bad length") + return + } + // Trust regular expression, section 5.2.3.22 + // RFC specifies the string should be null-terminated; remove a null byte from the end + if subpacket[len(subpacket)-1] != 0x00 { + err = errors.StructuralError("expected regular expression to be null-terminated") + return + } + trustRegularExpression := string(subpacket[:len(subpacket)-1]) + sig.TrustRegularExpression = &trustRegularExpression + case keyExpirationSubpacket: + // Key expiration time, section 5.2.3.13 + if len(subpacket) != 4 { + err = errors.StructuralError("key expiration subpacket with bad length") + return + } + sig.KeyLifetimeSecs = new(uint32) + *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket) + case prefSymmetricAlgosSubpacket: + // Preferred symmetric algorithms, section 5.2.3.14 + sig.PreferredSymmetric = make([]byte, len(subpacket)) + copy(sig.PreferredSymmetric, subpacket) + case issuerSubpacket: + // Issuer, section 5.2.3.12 + if sig.Version > 4 && isHashed { + err = errors.StructuralError("issuer subpacket found in v6 key") + return + } + if len(subpacket) != 8 { + err = errors.StructuralError("issuer subpacket with bad length") + return + } + if sig.Version <= 4 { + sig.IssuerKeyId = new(uint64) + *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) + } + case notationDataSubpacket: + // Notation data, section 5.2.3.24 + if len(subpacket) < 8 { + err = errors.StructuralError("notation data subpacket with bad length") + return + } + + nameLength := uint32(subpacket[4])<<8 | uint32(subpacket[5]) + valueLength := uint32(subpacket[6])<<8 | uint32(subpacket[7]) + if len(subpacket) != int(nameLength)+int(valueLength)+8 { + err = errors.StructuralError("notation data subpacket with bad length") + return + } + + notation := Notation{ + IsHumanReadable: (subpacket[0] & 0x80) == 0x80, + Name: string(subpacket[8:(nameLength + 8)]), + Value: subpacket[(nameLength + 8):(valueLength + nameLength + 8)], + IsCritical: isCritical, + } + + sig.Notations = append(sig.Notations, ¬ation) + case prefHashAlgosSubpacket: + // Preferred hash algorithms, section 5.2.3.16 + sig.PreferredHash = make([]byte, len(subpacket)) + copy(sig.PreferredHash, subpacket) + case prefCompressionSubpacket: + // Preferred compression algorithms, section 5.2.3.17 + sig.PreferredCompression = make([]byte, len(subpacket)) + copy(sig.PreferredCompression, subpacket) + case keyserverPrefsSubpacket: + // Keyserver preferences, section 5.2.3.25 + sig.KeyserverPrefsValid = true + if len(subpacket) == 0 { + return + } + if subpacket[0]&KeyserverPrefNoModify != 0 { + sig.KeyserverPrefNoModify = true + } + case prefKeyserverSubpacket: + // Preferred keyserver, section 5.2.3.26 + sig.PreferredKeyserver = string(subpacket) + case primaryUserIdSubpacket: + // Primary User ID, section 5.2.3.27 + if len(subpacket) != 1 { + err = errors.StructuralError("primary user id subpacket with bad length") + return + } + sig.IsPrimaryId = new(bool) + if subpacket[0] > 0 { + *sig.IsPrimaryId = true + } + case keyFlagsSubpacket: + // Key flags, section 5.2.3.29 + sig.FlagsValid = true + if len(subpacket) == 0 { + return + } + if subpacket[0]&KeyFlagCertify != 0 { + sig.FlagCertify = true + } + if subpacket[0]&KeyFlagSign != 0 { + sig.FlagSign = true + } + if subpacket[0]&KeyFlagEncryptCommunications != 0 { + sig.FlagEncryptCommunications = true + } + if subpacket[0]&KeyFlagEncryptStorage != 0 { + sig.FlagEncryptStorage = true + } + if subpacket[0]&KeyFlagSplitKey != 0 { + sig.FlagSplitKey = true + } + if subpacket[0]&KeyFlagAuthenticate != 0 { + sig.FlagAuthenticate = true + } + if subpacket[0]&KeyFlagForward != 0 { + sig.FlagForward = true + } + if subpacket[0]&KeyFlagGroupKey != 0 { + sig.FlagGroupKey = true + } + case signerUserIdSubpacket: + userId := string(subpacket) + sig.SignerUserId = &userId + case reasonForRevocationSubpacket: + // Reason For Revocation, section 5.2.3.31 + if len(subpacket) == 0 { + err = errors.StructuralError("empty revocation reason subpacket") + return + } + sig.RevocationReason = new(ReasonForRevocation) + *sig.RevocationReason = NewReasonForRevocation(subpacket[0]) + sig.RevocationReasonText = string(subpacket[1:]) + case featuresSubpacket: + // Features subpacket, section 5.2.3.32 specifies a very general + // mechanism for OpenPGP implementations to signal support for new + // features. + if len(subpacket) > 0 { + if subpacket[0]&0x01 != 0 { + sig.SEIPDv1 = true + } + // 0x02 and 0x04 are reserved + if subpacket[0]&0x08 != 0 { + sig.SEIPDv2 = true + } + } + case embeddedSignatureSubpacket: + // Only usage is in signatures that cross-certify + // signing subkeys. section 5.2.3.34 describes the + // format, with its usage described in section 11.1 + if sig.EmbeddedSignature != nil { + err = errors.StructuralError("Cannot have multiple embedded signatures") + return + } + sig.EmbeddedSignature = new(Signature) + if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil { + return nil, err + } + if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding { + return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) + } + case policyUriSubpacket: + // Policy URI, section 5.2.3.28 + sig.PolicyURI = string(subpacket) + case issuerFingerprintSubpacket: + if len(subpacket) == 0 { + err = errors.StructuralError("empty issuer fingerprint subpacket") + return + } + v, l := subpacket[0], len(subpacket[1:]) + if v >= 5 && l != 32 || v < 5 && l != 20 { + return nil, errors.StructuralError("bad fingerprint length") + } + sig.IssuerFingerprint = make([]byte, l) + copy(sig.IssuerFingerprint, subpacket[1:]) + sig.IssuerKeyId = new(uint64) + if v >= 5 { + *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[1:9]) + } else { + *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[13:21]) + } + case intendedRecipientSubpacket: + // Intended Recipient Fingerprint, section 5.2.3.36 + if len(subpacket) < 1 { + return nil, errors.StructuralError("invalid intended recipient fingerpring length") + } + version, length := subpacket[0], len(subpacket[1:]) + if version >= 5 && length != 32 || version < 5 && length != 20 { + return nil, errors.StructuralError("invalid fingerprint length") + } + fingerprint := make([]byte, length) + copy(fingerprint, subpacket[1:]) + sig.IntendedRecipients = append(sig.IntendedRecipients, &Recipient{int(version), fingerprint}) + case prefCipherSuitesSubpacket: + // Preferred AEAD cipher suites, section 5.2.3.15 + if len(subpacket)%2 != 0 { + err = errors.StructuralError("invalid aead cipher suite length") + return + } + + sig.PreferredCipherSuites = make([][2]byte, len(subpacket)/2) + + for i := 0; i < len(subpacket)/2; i++ { + sig.PreferredCipherSuites[i] = [2]uint8{subpacket[2*i], subpacket[2*i+1]} + } + default: + if isCritical { + err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType))) + return + } + } + return + +Truncated: + err = errors.StructuralError("signature subpacket truncated") + return +} + +// subpacketLengthLength returns the length, in bytes, of an encoded length value. +func subpacketLengthLength(length int) int { + if length < 192 { + return 1 + } + if length < 16320 { + return 2 + } + return 5 +} + +func (sig *Signature) CheckKeyIdOrFingerprint(pk *PublicKey) bool { + if sig.IssuerFingerprint != nil && len(sig.IssuerFingerprint) >= 20 { + return bytes.Equal(sig.IssuerFingerprint, pk.Fingerprint) + } + return sig.IssuerKeyId != nil && *sig.IssuerKeyId == pk.KeyId +} + +func (sig *Signature) CheckKeyIdOrFingerprintExplicit(fingerprint []byte, keyId uint64) bool { + if sig.IssuerFingerprint != nil && len(sig.IssuerFingerprint) >= 20 && fingerprint != nil { + return bytes.Equal(sig.IssuerFingerprint, fingerprint) + } + return sig.IssuerKeyId != nil && *sig.IssuerKeyId == keyId +} + +// serializeSubpacketLength marshals the given length into to. +func serializeSubpacketLength(to []byte, length int) int { + // RFC 9580, Section 4.2.1. + if length < 192 { + to[0] = byte(length) + return 1 + } + if length < 16320 { + length -= 192 + to[0] = byte((length >> 8) + 192) + to[1] = byte(length) + return 2 + } + to[0] = 255 + to[1] = byte(length >> 24) + to[2] = byte(length >> 16) + to[3] = byte(length >> 8) + to[4] = byte(length) + return 5 +} + +// subpacketsLength returns the serialized length, in bytes, of the given +// subpackets. +func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) { + for _, subpacket := range subpackets { + if subpacket.hashed == hashed { + length += subpacketLengthLength(len(subpacket.contents) + 1) + length += 1 // type byte + length += len(subpacket.contents) + } + } + return +} + +// serializeSubpackets marshals the given subpackets into to. +func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { + for _, subpacket := range subpackets { + if subpacket.hashed == hashed { + n := serializeSubpacketLength(to, len(subpacket.contents)+1) + to[n] = byte(subpacket.subpacketType) + if subpacket.isCritical { + to[n] |= 0x80 + } + to = to[1+n:] + n = copy(to, subpacket.contents) + to = to[n:] + } + } +} + +// SigExpired returns whether sig is a signature that has expired or is created +// in the future. +func (sig *Signature) SigExpired(currentTime time.Time) bool { + if sig.CreationTime.Unix() > currentTime.Unix() { + return true + } + if sig.SigLifetimeSecs == nil || *sig.SigLifetimeSecs == 0 { + return false + } + expiry := sig.CreationTime.Add(time.Duration(*sig.SigLifetimeSecs) * time.Second) + return currentTime.Unix() > expiry.Unix() +} + +// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing. +func (sig *Signature) buildHashSuffix(hashedSubpackets []byte) (err error) { + var hashId byte + var ok bool + + if sig.Version < 5 { + hashId, ok = algorithm.HashToHashIdWithSha1(sig.Hash) + } else { + hashId, ok = algorithm.HashToHashId(sig.Hash) + } + + if !ok { + sig.HashSuffix = nil + return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash))) + } + + hashedFields := bytes.NewBuffer([]byte{ + uint8(sig.Version), + uint8(sig.SigType), + uint8(sig.PubKeyAlgo), + uint8(hashId), + }) + hashedSubpacketsLength := len(hashedSubpackets) + if sig.Version == 6 { + // v6 signatures store the length in 4 octets + hashedFields.Write([]byte{ + uint8(hashedSubpacketsLength >> 24), + uint8(hashedSubpacketsLength >> 16), + uint8(hashedSubpacketsLength >> 8), + uint8(hashedSubpacketsLength), + }) + } else { + hashedFields.Write([]byte{ + uint8(hashedSubpacketsLength >> 8), + uint8(hashedSubpacketsLength), + }) + } + lenPrefix := hashedFields.Len() + hashedFields.Write(hashedSubpackets) + + var l uint64 = uint64(lenPrefix + len(hashedSubpackets)) + if sig.Version == 5 { + // v5 case + hashedFields.Write([]byte{0x05, 0xff}) + hashedFields.Write([]byte{ + uint8(l >> 56), uint8(l >> 48), uint8(l >> 40), uint8(l >> 32), + uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), + }) + } else { + // v4 and v6 case + hashedFields.Write([]byte{byte(sig.Version), 0xff}) + hashedFields.Write([]byte{ + uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), + }) + } + sig.HashSuffix = make([]byte, hashedFields.Len()) + copy(sig.HashSuffix, hashedFields.Bytes()) + return +} + +func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) { + hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) + hashedSubpackets := make([]byte, hashedSubpacketsLen) + serializeSubpackets(hashedSubpackets, sig.outSubpackets, true) + err = sig.buildHashSuffix(hashedSubpackets) + if err != nil { + return + } + if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) { + sig.AddMetadataToHashSuffix() + } + + h.Write(sig.HashSuffix) + digest = h.Sum(nil) + copy(sig.HashTag[:], digest) + return +} + +// PrepareSign must be called to create a hash object before Sign for v6 signatures. +// The created hash object initially hashes a randomly generated salt +// as required by v6 signatures. The generated salt is stored in sig. If the signature is not v6, +// the method returns an empty hash object. +// See RFC 9580 Section 5.2.4. +func (sig *Signature) PrepareSign(config *Config) (hash.Hash, error) { + if !sig.Hash.Available() { + return nil, errors.UnsupportedError("hash function") + } + hasher := sig.Hash.New() + if sig.Version == 6 { + if sig.salt == nil { + var err error + sig.salt, err = SignatureSaltForHash(sig.Hash, config.Random()) + if err != nil { + return nil, err + } + } + hasher.Write(sig.salt) + } + return hasher, nil +} + +// SetSalt sets the signature salt for v6 signatures. +// Assumes salt is generated correctly and checks if length matches. +// If the signature is not v6, the method ignores the salt. +// Use PrepareSign whenever possible instead of generating and +// hashing the salt externally. +// See RFC 9580 Section 5.2.4. +func (sig *Signature) SetSalt(salt []byte) error { + if sig.Version == 6 { + expectedSaltLength, err := SaltLengthForHash(sig.Hash) + if err != nil { + return err + } + if salt == nil || len(salt) != expectedSaltLength { + return errors.InvalidArgumentError("unexpected salt size for the given hash algorithm") + } + sig.salt = salt + } + return nil +} + +// PrepareVerify must be called to create a hash object before verifying v6 signatures. +// The created hash object initially hashes the internally stored salt. +// If the signature is not v6, the method returns an empty hash object. +// See RFC 9580 Section 5.2.4. +func (sig *Signature) PrepareVerify() (hash.Hash, error) { + if !sig.Hash.Available() { + return nil, errors.UnsupportedError("hash function") + } + hasher := sig.Hash.New() + if sig.Version == 6 { + if sig.salt == nil { + return nil, errors.StructuralError("v6 requires a salt for the hash to be signed") + } + hasher.Write(sig.salt) + } + return hasher, nil +} + +// Sign signs a message with a private key. The hash, h, must contain +// the hash of the message to be signed and will be mutated by this function. +// On success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) { + if priv.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + sig.Version = priv.PublicKey.Version + sig.IssuerFingerprint = priv.PublicKey.Fingerprint + if sig.Version < 6 && config.RandomizeSignaturesViaNotation() { + sig.removeNotationsWithName(SaltNotationName) + salt, err := SignatureSaltForHash(sig.Hash, config.Random()) + if err != nil { + return err + } + notation := Notation{ + Name: SaltNotationName, + Value: salt, + IsCritical: false, + IsHumanReadable: false, + } + sig.Notations = append(sig.Notations, ¬ation) + } + sig.outSubpackets, err = sig.buildSubpackets(priv.PublicKey) + if err != nil { + return err + } + digest, err := sig.signPrepareHash(h) + if err != nil { + return + } + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + // supports both *rsa.PrivateKey and crypto.Signer + sigdata, err := priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) + if err == nil { + sig.RSASignature = encoding.NewMPI(sigdata) + } + case PubKeyAlgoDSA: + dsaPriv := priv.PrivateKey.(*dsa.PrivateKey) + + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8 + if len(digest) > subgroupSize { + digest = digest[:subgroupSize] + } + r, s, err := dsa.Sign(config.Random(), dsaPriv, digest) + if err == nil { + sig.DSASigR = new(encoding.MPI).SetBig(r) + sig.DSASigS = new(encoding.MPI).SetBig(s) + } + case PubKeyAlgoECDSA: + var r, s *big.Int + if sk, ok := priv.PrivateKey.(*ecdsa.PrivateKey); ok { + r, s, err = ecdsa.Sign(config.Random(), sk, digest) + } else { + var b []byte + b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) + if err == nil { + r, s, err = unwrapECDSASig(b) + } + } + + if err == nil { + sig.ECDSASigR = new(encoding.MPI).SetBig(r) + sig.ECDSASigS = new(encoding.MPI).SetBig(s) + } + case PubKeyAlgoEdDSA: + sk := priv.PrivateKey.(*eddsa.PrivateKey) + r, s, err := eddsa.Sign(sk, digest) + if err == nil { + sig.EdDSASigR = encoding.NewMPI(r) + sig.EdDSASigS = encoding.NewMPI(s) + } + case PubKeyAlgoEd25519: + sk := priv.PrivateKey.(*ed25519.PrivateKey) + signature, err := ed25519.Sign(sk, digest) + if err == nil { + sig.EdSig = signature + } + case PubKeyAlgoEd448: + sk := priv.PrivateKey.(*ed448.PrivateKey) + signature, err := ed448.Sign(sk, digest) + if err == nil { + sig.EdSig = signature + } + case ExperimentalPubKeyAlgoHMAC: + sigdata, err := priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, nil) + if err == nil { + sig.HMAC = encoding.NewShortByteString(sigdata) + } + default: + err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo))) + } + + return +} + +// unwrapECDSASig parses the two integer components of an ASN.1-encoded ECDSA signature. +func unwrapECDSASig(b []byte) (r, s *big.Int, err error) { + var ecsdaSig struct { + R, S *big.Int + } + _, err = asn1.Unmarshal(b, &ecsdaSig) + if err != nil { + return + } + return ecsdaSig.R, ecsdaSig.S, nil +} + +// SignUserId computes a signature from priv, asserting that pub is a valid +// key for the identity id. On success, the signature is stored in sig. Call +// Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error { + if priv.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + prepareHash, err := sig.PrepareSign(config) + if err != nil { + return err + } + if err := userIdSignatureHash(id, pub, prepareHash); err != nil { + return err + } + return sig.Sign(prepareHash, priv, config) +} + +// SignDirectKeyBinding computes a signature from priv +// On success, the signature is stored in sig. +// Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignDirectKeyBinding(pub *PublicKey, priv *PrivateKey, config *Config) error { + if priv.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + prepareHash, err := sig.PrepareSign(config) + if err != nil { + return err + } + if err := directKeySignatureHash(pub, prepareHash); err != nil { + return err + } + return sig.Sign(prepareHash, priv, config) +} + +// CrossSignKey computes a signature from signingKey on pub hashed using hashKey. On success, +// the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) CrossSignKey(pub *PublicKey, hashKey *PublicKey, signingKey *PrivateKey, + config *Config) error { + prepareHash, err := sig.PrepareSign(config) + if err != nil { + return err + } + h, err := keySignatureHash(hashKey, pub, prepareHash) + if err != nil { + return err + } + return sig.Sign(h, signingKey, config) +} + +// SignKey computes a signature from priv, asserting that pub is a subkey. On +// success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error { + if priv.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + prepareHash, err := sig.PrepareSign(config) + if err != nil { + return err + } + h, err := keySignatureHash(&priv.PublicKey, pub, prepareHash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +} + +// RevokeKey computes a revocation signature of pub using priv. On success, the signature is +// stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) RevokeKey(pub *PublicKey, priv *PrivateKey, config *Config) error { + prepareHash, err := sig.PrepareSign(config) + if err != nil { + return err + } + if err := keyRevocationHash(pub, prepareHash); err != nil { + return err + } + return sig.Sign(prepareHash, priv, config) +} + +// RevokeSubkey computes a subkey revocation signature of pub using priv. +// On success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) RevokeSubkey(pub *PublicKey, priv *PrivateKey, config *Config) error { + // Identical to a subkey binding signature + return sig.SignKey(pub, priv, config) +} + +// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been +// called first. +func (sig *Signature) Serialize(w io.Writer) (err error) { + if len(sig.outSubpackets) == 0 { + sig.outSubpackets = sig.rawSubpackets + } + if sig.RSASignature == nil && sig.DSASigR == nil && sig.ECDSASigR == nil && sig.EdDSASigR == nil && sig.EdSig == nil && sig.HMAC == nil { + return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") + } + + sigLength := 0 + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sigLength = int(sig.RSASignature.EncodedLength()) + case PubKeyAlgoDSA: + sigLength = int(sig.DSASigR.EncodedLength()) + sigLength += int(sig.DSASigS.EncodedLength()) + case PubKeyAlgoECDSA: + sigLength = int(sig.ECDSASigR.EncodedLength()) + sigLength += int(sig.ECDSASigS.EncodedLength()) + case PubKeyAlgoEdDSA: + sigLength = int(sig.EdDSASigR.EncodedLength()) + sigLength += int(sig.EdDSASigS.EncodedLength()) + case PubKeyAlgoEd25519: + sigLength = ed25519.SignatureSize + case PubKeyAlgoEd448: + sigLength = ed448.SignatureSize + case ExperimentalPubKeyAlgoHMAC: + sigLength = int(sig.HMAC.EncodedLength()) + default: + panic("impossible") + } + + hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) + unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) + length := 4 + /* length of version|signature type|public-key algorithm|hash algorithm */ + 2 /* length of hashed subpackets */ + hashedSubpacketsLen + + 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + + 2 /* hash tag */ + sigLength + if sig.Version == 6 { + length += 4 + /* the two length fields are four-octet instead of two */ + 1 + /* salt length */ + len(sig.salt) /* length salt */ + } + err = serializeHeader(w, packetTypeSignature, length) + if err != nil { + return + } + err = sig.serializeBody(w) + if err != nil { + return err + } + return +} + +func (sig *Signature) serializeBody(w io.Writer) (err error) { + var fields []byte + if sig.Version == 6 { + // v6 signatures use 4 octets for length + hashedSubpacketsLen := + uint32(uint32(sig.HashSuffix[4])<<24) | + uint32(uint32(sig.HashSuffix[5])<<16) | + uint32(uint32(sig.HashSuffix[6])<<8) | + uint32(sig.HashSuffix[7]) + fields = sig.HashSuffix[:8+hashedSubpacketsLen] + } else { + hashedSubpacketsLen := uint16(uint16(sig.HashSuffix[4])<<8) | + uint16(sig.HashSuffix[5]) + fields = sig.HashSuffix[:6+hashedSubpacketsLen] + + } + _, err = w.Write(fields) + if err != nil { + return + } + + unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) + var unhashedSubpackets []byte + if sig.Version == 6 { + unhashedSubpackets = make([]byte, 4+unhashedSubpacketsLen) + unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 24) + unhashedSubpackets[1] = byte(unhashedSubpacketsLen >> 16) + unhashedSubpackets[2] = byte(unhashedSubpacketsLen >> 8) + unhashedSubpackets[3] = byte(unhashedSubpacketsLen) + serializeSubpackets(unhashedSubpackets[4:], sig.outSubpackets, false) + } else { + unhashedSubpackets = make([]byte, 2+unhashedSubpacketsLen) + unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) + unhashedSubpackets[1] = byte(unhashedSubpacketsLen) + serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) + } + + _, err = w.Write(unhashedSubpackets) + if err != nil { + return + } + _, err = w.Write(sig.HashTag[:]) + if err != nil { + return + } + + if sig.Version == 6 { + // write salt for v6 signatures + _, err = w.Write([]byte{uint8(len(sig.salt))}) + if err != nil { + return + } + _, err = w.Write(sig.salt) + if err != nil { + return + } + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + _, err = w.Write(sig.RSASignature.EncodedBytes()) + case PubKeyAlgoDSA: + if _, err = w.Write(sig.DSASigR.EncodedBytes()); err != nil { + return + } + _, err = w.Write(sig.DSASigS.EncodedBytes()) + case PubKeyAlgoECDSA: + if _, err = w.Write(sig.ECDSASigR.EncodedBytes()); err != nil { + return + } + _, err = w.Write(sig.ECDSASigS.EncodedBytes()) + case PubKeyAlgoEdDSA: + if _, err = w.Write(sig.EdDSASigR.EncodedBytes()); err != nil { + return + } + _, err = w.Write(sig.EdDSASigS.EncodedBytes()) + case PubKeyAlgoEd25519: + err = ed25519.WriteSignature(w, sig.EdSig) + case PubKeyAlgoEd448: + err = ed448.WriteSignature(w, sig.EdSig) + case ExperimentalPubKeyAlgoHMAC: + _, err = w.Write(sig.HMAC.EncodedBytes()) + default: + panic("impossible") + } + return +} + +// outputSubpacket represents a subpacket to be marshaled. +type outputSubpacket struct { + hashed bool // true if this subpacket is in the hashed area. + subpacketType signatureSubpacketType + isCritical bool + contents []byte +} + +func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubpacket, err error) { + creationTime := make([]byte, 4) + binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix())) + // Signature Creation Time + subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, true, creationTime}) + // Signature Expiration Time + if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { + sigLifetime := make([]byte, 4) + binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs) + subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime}) + } + // Trust Signature + if sig.TrustLevel != 0 { + subpackets = append(subpackets, outputSubpacket{true, trustSubpacket, true, []byte{byte(sig.TrustLevel), byte(sig.TrustAmount)}}) + } + // Regular Expression + if sig.TrustRegularExpression != nil { + // RFC specifies the string should be null-terminated; add a null byte to the end + subpackets = append(subpackets, outputSubpacket{true, regularExpressionSubpacket, true, []byte(*sig.TrustRegularExpression + "\000")}) + } + // Key Expiration Time + if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 { + keyLifetime := make([]byte, 4) + binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs) + subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime}) + } + // Preferred Symmetric Ciphers for v1 SEIPD + if len(sig.PreferredSymmetric) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric}) + } + // Issuer Key ID + if sig.IssuerKeyId != nil && sig.Version == 4 { + keyId := make([]byte, 8) + binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) + subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, true, keyId}) + } + // Notation Data + for _, notation := range sig.Notations { + subpackets = append( + subpackets, + outputSubpacket{ + true, + notationDataSubpacket, + notation.IsCritical, + notation.getData(), + }) + } + // Preferred Hash Algorithms + if len(sig.PreferredHash) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash}) + } + // Preferred Compression Algorithms + if len(sig.PreferredCompression) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) + } + // Keyserver Preferences + // Keyserver preferences may only appear in self-signatures or certification signatures. + if sig.KeyserverPrefsValid { + var prefs byte + if sig.KeyserverPrefNoModify { + prefs |= KeyserverPrefNoModify + } + subpackets = append(subpackets, outputSubpacket{true, keyserverPrefsSubpacket, false, []byte{prefs}}) + } + // Preferred Keyserver + if len(sig.PreferredKeyserver) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefKeyserverSubpacket, false, []uint8(sig.PreferredKeyserver)}) + } + // Primary User ID + if sig.IsPrimaryId != nil && *sig.IsPrimaryId { + subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}}) + } + // Policy URI + if len(sig.PolicyURI) > 0 { + subpackets = append(subpackets, outputSubpacket{true, policyUriSubpacket, false, []uint8(sig.PolicyURI)}) + } + // Key Flags + // Key flags may only appear in self-signatures or certification signatures. + if sig.FlagsValid { + var flags byte + if sig.FlagCertify { + flags |= KeyFlagCertify + } + if sig.FlagSign { + flags |= KeyFlagSign + } + if sig.FlagEncryptCommunications { + flags |= KeyFlagEncryptCommunications + } + if sig.FlagEncryptStorage { + flags |= KeyFlagEncryptStorage + } + if sig.FlagSplitKey { + flags |= KeyFlagSplitKey + } + if sig.FlagAuthenticate { + flags |= KeyFlagAuthenticate + } + if sig.FlagForward { + flags |= KeyFlagForward + } + if sig.FlagGroupKey { + flags |= KeyFlagGroupKey + } + subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, true, []byte{flags}}) + } + // Signer's User ID + if sig.SignerUserId != nil { + subpackets = append(subpackets, outputSubpacket{true, signerUserIdSubpacket, false, []byte(*sig.SignerUserId)}) + } + // Reason for Revocation + // Revocation reason appears only in revocation signatures and is serialized as per section 5.2.3.31. + if sig.RevocationReason != nil { + subpackets = append(subpackets, outputSubpacket{true, reasonForRevocationSubpacket, true, + append([]uint8{uint8(*sig.RevocationReason)}, []uint8(sig.RevocationReasonText)...)}) + } + // Features + var features = byte(0x00) + if sig.SEIPDv1 { + features |= 0x01 + } + if sig.SEIPDv2 { + features |= 0x08 + } + if features != 0x00 { + subpackets = append(subpackets, outputSubpacket{true, featuresSubpacket, false, []byte{features}}) + } + // Embedded Signature + // EmbeddedSignature appears only in subkeys capable of signing and is serialized as per section 5.2.3.34. + if sig.EmbeddedSignature != nil { + var buf bytes.Buffer + err = sig.EmbeddedSignature.serializeBody(&buf) + if err != nil { + return + } + subpackets = append(subpackets, outputSubpacket{true, embeddedSignatureSubpacket, true, buf.Bytes()}) + } + // Issuer Fingerprint + if sig.IssuerFingerprint != nil { + contents := append([]uint8{uint8(issuer.Version)}, sig.IssuerFingerprint...) + subpackets = append(subpackets, outputSubpacket{true, issuerFingerprintSubpacket, sig.Version >= 5, contents}) + } + // Intended Recipient Fingerprint + for _, recipient := range sig.IntendedRecipients { + subpackets = append( + subpackets, + outputSubpacket{ + true, + intendedRecipientSubpacket, + false, + recipient.Serialize(), + }) + } + // Preferred AEAD Ciphersuites + if len(sig.PreferredCipherSuites) > 0 { + serialized := make([]byte, len(sig.PreferredCipherSuites)*2) + for i, cipherSuite := range sig.PreferredCipherSuites { + serialized[2*i] = cipherSuite[0] + serialized[2*i+1] = cipherSuite[1] + } + subpackets = append(subpackets, outputSubpacket{true, prefCipherSuitesSubpacket, false, serialized}) + } + return +} + +// AddMetadataToHashSuffix modifies the current hash suffix to include metadata +// (format, filename, and time). Version 5 keys protect this data including it +// in the hash computation. See section 5.2.4. +func (sig *Signature) AddMetadataToHashSuffix() { + if sig == nil || sig.Version != 5 { + return + } + if sig.SigType != 0x00 && sig.SigType != 0x01 { + return + } + lit := sig.Metadata + if lit == nil { + // This will translate into six 0x00 bytes. + lit = &LiteralData{} + } + + // Extract the current byte count + n := sig.HashSuffix[len(sig.HashSuffix)-8:] + l := uint64( + uint64(n[0])<<56 | uint64(n[1])<<48 | uint64(n[2])<<40 | uint64(n[3])<<32 | + uint64(n[4])<<24 | uint64(n[5])<<16 | uint64(n[6])<<8 | uint64(n[7])) + + suffix := bytes.NewBuffer(nil) + suffix.Write(sig.HashSuffix[:l]) + + // Add the metadata + var buf [4]byte + buf[0] = lit.Format + fileName := lit.FileName + if len(lit.FileName) > 255 { + fileName = fileName[:255] + } + buf[1] = byte(len(fileName)) + suffix.Write(buf[:2]) + suffix.Write([]byte(lit.FileName)) + binary.BigEndian.PutUint32(buf[:], lit.Time) + suffix.Write(buf[:]) + + suffix.Write([]byte{0x05, 0xff}) + suffix.Write([]byte{ + uint8(l >> 56), uint8(l >> 48), uint8(l >> 40), uint8(l >> 32), + uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), + }) + sig.HashSuffix = suffix.Bytes() +} + +// SaltLengthForHash selects the required salt length for the given hash algorithm, +// as per Table 23 (Hash algorithm registry) of the crypto refresh. +// See RFC 9580 Section 9.5. +func SaltLengthForHash(hash crypto.Hash) (int, error) { + switch hash { + case crypto.SHA256, crypto.SHA224, crypto.SHA3_256: + return 16, nil + case crypto.SHA384: + return 24, nil + case crypto.SHA512, crypto.SHA3_512: + return 32, nil + default: + return 0, errors.UnsupportedError("hash function not supported for V6 signatures") + } +} + +// SignatureSaltForHash generates a random signature salt +// with the length for the given hash algorithm. +// See RFC 9580 Section 9.5. +func SignatureSaltForHash(hash crypto.Hash, randReader io.Reader) ([]byte, error) { + saltLength, err := SaltLengthForHash(hash) + if err != nil { + return nil, err + } + salt := make([]byte, saltLength) + _, err = io.ReadFull(randReader, salt) + if err != nil { + return nil, err + } + return salt, nil +} + +// removeNotationsWithName removes all notations in this signature with the given name. +func (sig *Signature) removeNotationsWithName(name string) { + if sig == nil || sig.Notations == nil { + return + } + updatedNotations := make([]*Notation, 0, len(sig.Notations)) + for _, notation := range sig.Notations { + if notation.Name != name { + updatedNotations = append(updatedNotations, notation) + } + } + sig.Notations = updatedNotations +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go new file mode 100644 index 000000000..f843c35bf --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go @@ -0,0 +1,319 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto/cipher" + "crypto/sha256" + "io" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/s2k" + "golang.org/x/crypto/hkdf" +) + +// This is the largest session key that we'll support. Since at most 256-bit cipher +// is supported in OpenPGP, this is large enough to contain also the auth tag. +const maxSessionKeySizeInBytes = 64 + +// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC +// 4880, section 5.3. +type SymmetricKeyEncrypted struct { + Version int + CipherFunc CipherFunction + Mode AEADMode + s2k func(out, in []byte) + iv []byte + encryptedKey []byte // Contains also the authentication tag for AEAD +} + +// parse parses an SymmetricKeyEncrypted packet as specified in +// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#name-symmetric-key-encrypted-ses +func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { + var buf [1]byte + + // Version + if _, err := readFull(r, buf[:]); err != nil { + return err + } + ske.Version = int(buf[0]) + if ske.Version != 4 && ske.Version != 5 && ske.Version != 6 { + return errors.UnsupportedError("unknown SymmetricKeyEncrypted version") + } + + if V5Disabled && ske.Version == 5 { + return errors.UnsupportedError("support for parsing v5 entities is disabled; build with `-tags v5` if needed") + } + + if ske.Version > 5 { + // Scalar octet count + if _, err := readFull(r, buf[:]); err != nil { + return err + } + } + + // Cipher function + if _, err := readFull(r, buf[:]); err != nil { + return err + } + ske.CipherFunc = CipherFunction(buf[0]) + if !ske.CipherFunc.IsSupported() { + return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[0]))) + } + + if ske.Version >= 5 { + // AEAD mode + if _, err := readFull(r, buf[:]); err != nil { + return errors.StructuralError("cannot read AEAD octet from packet") + } + ske.Mode = AEADMode(buf[0]) + } + + if ske.Version > 5 { + // Scalar octet count + if _, err := readFull(r, buf[:]); err != nil { + return err + } + } + + var err error + if ske.s2k, err = s2k.Parse(r); err != nil { + if _, ok := err.(errors.ErrDummyPrivateKey); ok { + return errors.UnsupportedError("missing key GNU extension in session key") + } + return err + } + + if ske.Version >= 5 { + // AEAD IV + iv := make([]byte, ske.Mode.IvLength()) + _, err := readFull(r, iv) + if err != nil { + return errors.StructuralError("cannot read AEAD IV") + } + + ske.iv = iv + } + + encryptedKey := make([]byte, maxSessionKeySizeInBytes) + // The session key may follow. We just have to try and read to find + // out. If it exists then we limit it to maxSessionKeySizeInBytes. + n, err := readFull(r, encryptedKey) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + + if n != 0 { + if n == maxSessionKeySizeInBytes { + return errors.UnsupportedError("oversized encrypted session key") + } + ske.encryptedKey = encryptedKey[:n] + } + return nil +} + +// Decrypt attempts to decrypt an encrypted session key and returns the key and +// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data +// packet. +func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) { + key := make([]byte, ske.CipherFunc.KeySize()) + ske.s2k(key, passphrase) + if len(ske.encryptedKey) == 0 { + return key, ske.CipherFunc, nil + } + switch ske.Version { + case 4: + plaintextKey, cipherFunc, err := ske.decryptV4(key) + return plaintextKey, cipherFunc, err + case 5, 6: + plaintextKey, err := ske.aeadDecrypt(ske.Version, key) + return plaintextKey, CipherFunction(0), err + } + err := errors.UnsupportedError("unknown SymmetricKeyEncrypted version") + return nil, CipherFunction(0), err +} + +func (ske *SymmetricKeyEncrypted) decryptV4(key []byte) ([]byte, CipherFunction, error) { + // the IV is all zeros + iv := make([]byte, ske.CipherFunc.blockSize()) + c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv) + plaintextKey := make([]byte, len(ske.encryptedKey)) + c.XORKeyStream(plaintextKey, ske.encryptedKey) + cipherFunc := CipherFunction(plaintextKey[0]) + if cipherFunc.blockSize() == 0 { + return nil, ske.CipherFunc, errors.UnsupportedError( + "unknown cipher: " + strconv.Itoa(int(cipherFunc))) + } + plaintextKey = plaintextKey[1:] + if len(plaintextKey) != cipherFunc.KeySize() { + return nil, cipherFunc, errors.StructuralError( + "length of decrypted key not equal to cipher keysize") + } + return plaintextKey, cipherFunc, nil +} + +func (ske *SymmetricKeyEncrypted) aeadDecrypt(version int, key []byte) ([]byte, error) { + adata := []byte{0xc3, byte(version), byte(ske.CipherFunc), byte(ske.Mode)} + aead := getEncryptedKeyAeadInstance(ske.CipherFunc, ske.Mode, key, adata, version) + + plaintextKey, err := aead.Open(nil, ske.iv, ske.encryptedKey, adata) + if err != nil { + return nil, err + } + return plaintextKey, nil +} + +// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. +// The packet contains a random session key, encrypted by a key derived from +// the given passphrase. The session key is returned and must be passed to +// SerializeSymmetricallyEncrypted. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) { + cipherFunc := config.Cipher() + + sessionKey := make([]byte, cipherFunc.KeySize()) + _, err = io.ReadFull(config.Random(), sessionKey) + if err != nil { + return + } + + err = SerializeSymmetricKeyEncryptedReuseKey(w, sessionKey, passphrase, config) + if err != nil { + return + } + + key = sessionKey + return +} + +// SerializeSymmetricKeyEncryptedReuseKey serializes a symmetric key packet to w. +// The packet contains the given session key, encrypted by a key derived from +// the given passphrase. The returned session key must be passed to +// SerializeSymmetricallyEncrypted. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, passphrase []byte, config *Config) (err error) { + var version int + if config.AEAD() != nil { + version = 6 + } else { + version = 4 + } + cipherFunc := config.Cipher() + // cipherFunc must be AES + if !cipherFunc.IsSupported() || cipherFunc < CipherAES128 || cipherFunc > CipherAES256 { + return errors.UnsupportedError("unsupported cipher: " + strconv.Itoa(int(cipherFunc))) + } + + keySize := cipherFunc.KeySize() + s2kBuf := new(bytes.Buffer) + keyEncryptingKey := make([]byte, keySize) + // s2k.Serialize salts and stretches the passphrase, and writes the + // resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf. + err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, config.S2K()) + if err != nil { + return + } + s2kBytes := s2kBuf.Bytes() + + var packetLength int + switch version { + case 4: + packetLength = 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize + case 5, 6: + ivLen := config.AEAD().Mode().IvLength() + tagLen := config.AEAD().Mode().TagLength() + packetLength = 3 + len(s2kBytes) + ivLen + keySize + tagLen + } + if version > 5 { + packetLength += 2 // additional octet count fields + } + + err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength) + if err != nil { + return + } + + // Symmetric Key Encrypted Version + buf := []byte{byte(version)} + + if version > 5 { + // Scalar octet count + buf = append(buf, byte(3+len(s2kBytes)+config.AEAD().Mode().IvLength())) + } + + // Cipher function + buf = append(buf, byte(cipherFunc)) + + if version >= 5 { + // AEAD mode + buf = append(buf, byte(config.AEAD().Mode())) + } + if version > 5 { + // Scalar octet count + buf = append(buf, byte(len(s2kBytes))) + } + _, err = w.Write(buf) + if err != nil { + return + } + _, err = w.Write(s2kBytes) + if err != nil { + return + } + + switch version { + case 4: + iv := make([]byte, cipherFunc.blockSize()) + c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv) + encryptedCipherAndKey := make([]byte, keySize+1) + c.XORKeyStream(encryptedCipherAndKey, buf[1:]) + c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey) + _, err = w.Write(encryptedCipherAndKey) + if err != nil { + return + } + case 5, 6: + mode := config.AEAD().Mode() + adata := []byte{0xc3, byte(version), byte(cipherFunc), byte(mode)} + aead := getEncryptedKeyAeadInstance(cipherFunc, mode, keyEncryptingKey, adata, version) + + // Sample iv using random reader + iv := make([]byte, config.AEAD().Mode().IvLength()) + _, err = io.ReadFull(config.Random(), iv) + if err != nil { + return + } + // Seal and write (encryptedData includes auth. tag) + + encryptedData := aead.Seal(nil, iv, sessionKey, adata) + _, err = w.Write(iv) + if err != nil { + return + } + _, err = w.Write(encryptedData) + if err != nil { + return + } + } + + return +} + +func getEncryptedKeyAeadInstance(c CipherFunction, mode AEADMode, inputKey, associatedData []byte, version int) (aead cipher.AEAD) { + var blockCipher cipher.Block + if version > 5 { + hkdfReader := hkdf.New(sha256.New, inputKey, []byte{}, associatedData) + + encryptionKey := make([]byte, c.KeySize()) + _, _ = readFull(hkdfReader, encryptionKey) + + blockCipher = c.new(encryptionKey) + } else { + blockCipher = c.new(inputKey) + } + return mode.new(blockCipher) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go new file mode 100644 index 000000000..e9bbf0327 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go @@ -0,0 +1,90 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +const aeadSaltSize = 32 + +// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The +// encrypted Contents will consist of more OpenPGP packets. See RFC 4880, +// sections 5.7 and 5.13. +type SymmetricallyEncrypted struct { + Version int + Contents io.Reader // contains tag for version 2 + IntegrityProtected bool // If true it is type 18 (with MDC or AEAD). False is packet type 9 + + // Specific to version 1 + prefix []byte + + // Specific to version 2 + Cipher CipherFunction + Mode AEADMode + ChunkSizeByte byte + Salt [aeadSaltSize]byte +} + +const ( + symmetricallyEncryptedVersionMdc = 1 + symmetricallyEncryptedVersionAead = 2 +) + +func (se *SymmetricallyEncrypted) parse(r io.Reader) error { + if se.IntegrityProtected { + // See RFC 4880, section 5.13. + var buf [1]byte + _, err := readFull(r, buf[:]) + if err != nil { + return err + } + + switch buf[0] { + case symmetricallyEncryptedVersionMdc: + se.Version = symmetricallyEncryptedVersionMdc + case symmetricallyEncryptedVersionAead: + se.Version = symmetricallyEncryptedVersionAead + if err := se.parseAead(r); err != nil { + return err + } + default: + return errors.UnsupportedError("unknown SymmetricallyEncrypted version") + } + } + se.Contents = r + return nil +} + +// Decrypt returns a ReadCloser, from which the decrypted Contents of the +// packet can be read. An incorrect key will only be detected after trying +// to decrypt the entire data. +func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) { + if se.Version == symmetricallyEncryptedVersionAead { + return se.decryptAead(key) + } + + return se.decryptMdc(c, key) +} + +// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet +// to w and returns a WriteCloser to which the to-be-encrypted packets can be +// written. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, aeadSupported bool, cipherSuite CipherSuite, key []byte, config *Config) (Contents io.WriteCloser, err error) { + writeCloser := noOpCloser{w} + ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedIntegrityProtected) + if err != nil { + return + } + + if aeadSupported { + return serializeSymmetricallyEncryptedAead(ciphertext, cipherSuite, config.AEADConfig.ChunkSizeByte(), config.Random(), key) + } + + return serializeSymmetricallyEncryptedMdc(ciphertext, c, key, config) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go new file mode 100644 index 000000000..95341255a --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go @@ -0,0 +1,157 @@ +// Copyright 2023 Proton AG. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto/cipher" + "crypto/sha256" + "io" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "golang.org/x/crypto/hkdf" +) + +// parseAead parses a V2 SEIPD packet (AEAD) as specified in +// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2 +func (se *SymmetricallyEncrypted) parseAead(r io.Reader) error { + headerData := make([]byte, 3) + if n, err := io.ReadFull(r, headerData); n < 3 { + return errors.StructuralError("could not read aead header: " + err.Error()) + } + + // Cipher + se.Cipher = CipherFunction(headerData[0]) + // cipherFunc must have block size 16 to use AEAD + if se.Cipher.blockSize() != 16 { + return errors.UnsupportedError("invalid aead cipher: " + strconv.Itoa(int(se.Cipher))) + } + + // Mode + se.Mode = AEADMode(headerData[1]) + if se.Mode.TagLength() == 0 { + return errors.UnsupportedError("unknown aead mode: " + strconv.Itoa(int(se.Mode))) + } + + // Chunk size + se.ChunkSizeByte = headerData[2] + if se.ChunkSizeByte > 16 { + return errors.UnsupportedError("invalid aead chunk size byte: " + strconv.Itoa(int(se.ChunkSizeByte))) + } + + // Salt + if n, err := io.ReadFull(r, se.Salt[:]); n < aeadSaltSize { + return errors.StructuralError("could not read aead salt: " + err.Error()) + } + + return nil +} + +// associatedData for chunks: tag, version, cipher, mode, chunk size byte +func (se *SymmetricallyEncrypted) associatedData() []byte { + return []byte{ + 0xD2, + symmetricallyEncryptedVersionAead, + byte(se.Cipher), + byte(se.Mode), + se.ChunkSizeByte, + } +} + +// decryptAead decrypts a V2 SEIPD packet (AEAD) as specified in +// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2 +func (se *SymmetricallyEncrypted) decryptAead(inputKey []byte) (io.ReadCloser, error) { + aead, nonce := getSymmetricallyEncryptedAeadInstance(se.Cipher, se.Mode, inputKey, se.Salt[:], se.associatedData()) + + // Carry the first tagLen bytes + tagLen := se.Mode.TagLength() + peekedBytes := make([]byte, tagLen) + n, err := io.ReadFull(se.Contents, peekedBytes) + if n < tagLen || (err != nil && err != io.EOF) { + return nil, errors.StructuralError("not enough data to decrypt:" + err.Error()) + } + + return &aeadDecrypter{ + aeadCrypter: aeadCrypter{ + aead: aead, + chunkSize: decodeAEADChunkSize(se.ChunkSizeByte), + initialNonce: nonce, + associatedData: se.associatedData(), + chunkIndex: make([]byte, 8), + packetTag: packetTypeSymmetricallyEncryptedIntegrityProtected, + }, + reader: se.Contents, + peekedBytes: peekedBytes, + }, nil +} + +// serializeSymmetricallyEncryptedAead encrypts to a writer a V2 SEIPD packet (AEAD) as specified in +// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2 +func serializeSymmetricallyEncryptedAead(ciphertext io.WriteCloser, cipherSuite CipherSuite, chunkSizeByte byte, rand io.Reader, inputKey []byte) (Contents io.WriteCloser, err error) { + // cipherFunc must have block size 16 to use AEAD + if cipherSuite.Cipher.blockSize() != 16 { + return nil, errors.InvalidArgumentError("invalid aead cipher function") + } + + if cipherSuite.Cipher.KeySize() != len(inputKey) { + return nil, errors.InvalidArgumentError("error in aead serialization: bad key length") + } + + // Data for en/decryption: tag, version, cipher, aead mode, chunk size + prefix := []byte{ + 0xD2, + symmetricallyEncryptedVersionAead, + byte(cipherSuite.Cipher), + byte(cipherSuite.Mode), + chunkSizeByte, + } + + // Write header (that correspond to prefix except first byte) + n, err := ciphertext.Write(prefix[1:]) + if err != nil || n < 4 { + return nil, err + } + + // Random salt + salt := make([]byte, aeadSaltSize) + if _, err := io.ReadFull(rand, salt); err != nil { + return nil, err + } + + if _, err := ciphertext.Write(salt); err != nil { + return nil, err + } + + aead, nonce := getSymmetricallyEncryptedAeadInstance(cipherSuite.Cipher, cipherSuite.Mode, inputKey, salt, prefix) + + return &aeadEncrypter{ + aeadCrypter: aeadCrypter{ + aead: aead, + chunkSize: decodeAEADChunkSize(chunkSizeByte), + associatedData: prefix, + chunkIndex: make([]byte, 8), + initialNonce: nonce, + packetTag: packetTypeSymmetricallyEncryptedIntegrityProtected, + }, + writer: ciphertext, + }, nil +} + +func getSymmetricallyEncryptedAeadInstance(c CipherFunction, mode AEADMode, inputKey, salt, associatedData []byte) (aead cipher.AEAD, nonce []byte) { + hkdfReader := hkdf.New(sha256.New, inputKey, salt, associatedData) + + encryptionKey := make([]byte, c.KeySize()) + _, _ = readFull(hkdfReader, encryptionKey) + + // Last 64 bits of nonce are the counter + nonce = make([]byte, mode.IvLength()-8) + + _, _ = readFull(hkdfReader, nonce) + + blockCipher := c.new(encryptionKey) + aead = mode.new(blockCipher) + + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go similarity index 60% rename from vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go rename to vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go index 1a1a62964..0a3aecadf 100644 --- a/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go @@ -8,54 +8,38 @@ import ( "crypto/cipher" "crypto/sha1" "crypto/subtle" - "golang.org/x/crypto/openpgp/errors" "hash" "io" "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/errors" ) -// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The -// encrypted contents will consist of more OpenPGP packets. See RFC 4880, -// sections 5.7 and 5.13. -type SymmetricallyEncrypted struct { - MDC bool // true iff this is a type 18 packet and thus has an embedded MAC. - contents io.Reader - prefix []byte +// seMdcReader wraps an io.Reader with a no-op Close method. +type seMdcReader struct { + in io.Reader } -const symmetricallyEncryptedVersion = 1 +func (ser seMdcReader) Read(buf []byte) (int, error) { + return ser.in.Read(buf) +} -func (se *SymmetricallyEncrypted) parse(r io.Reader) error { - if se.MDC { - // See RFC 4880, section 5.13. - var buf [1]byte - _, err := readFull(r, buf[:]) - if err != nil { - return err - } - if buf[0] != symmetricallyEncryptedVersion { - return errors.UnsupportedError("unknown SymmetricallyEncrypted version") - } - } - se.contents = r +func (ser seMdcReader) Close() error { return nil } -// Decrypt returns a ReadCloser, from which the decrypted contents of the -// packet can be read. An incorrect key can, with high probability, be detected -// immediately and this will result in a KeyIncorrect error being returned. -func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) { - keySize := c.KeySize() - if keySize == 0 { - return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c))) +func (se *SymmetricallyEncrypted) decryptMdc(c CipherFunction, key []byte) (io.ReadCloser, error) { + if !c.IsSupported() { + return nil, errors.UnsupportedError("unsupported cipher: " + strconv.Itoa(int(c))) } - if len(key) != keySize { + + if len(key) != c.KeySize() { return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length") } if se.prefix == nil { se.prefix = make([]byte, c.blockSize()+2) - _, err := readFull(se.contents, se.prefix) + _, err := readFull(se.Contents, se.prefix) if err != nil { return nil, err } @@ -64,47 +48,31 @@ func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.Read } ocfbResync := OCFBResync - if se.MDC { + if se.IntegrityProtected { // MDC packets use a different form of OCFB mode. ocfbResync = OCFBNoResync } s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync) - if s == nil { - return nil, errors.ErrKeyIncorrect - } - plaintext := cipher.StreamReader{S: s, R: se.contents} + plaintext := cipher.StreamReader{S: s, R: se.Contents} - if se.MDC { - // MDC packets have an embedded hash that we need to check. + if se.IntegrityProtected { + // IntegrityProtected packets have an embedded hash that we need to check. h := sha1.New() h.Write(se.prefix) return &seMDCReader{in: plaintext, h: h}, nil } // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser. - return seReader{plaintext}, nil -} - -// seReader wraps an io.Reader with a no-op Close method. -type seReader struct { - in io.Reader -} - -func (ser seReader) Read(buf []byte) (int, error) { - return ser.in.Read(buf) -} - -func (ser seReader) Close() error { - return nil + return seMdcReader{plaintext}, nil } const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size // An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold // of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an -// MDC packet containing a hash of the previous contents which is checked +// MDC packet containing a hash of the previous Contents which is checked // against the running hash. See RFC 4880, section 5.13. type seMDCReader struct { in io.Reader @@ -175,12 +143,12 @@ func (ser *seMDCReader) Read(buf []byte) (n int, err error) { return } -// This is a new-format packet tag byte for a type 19 (MDC) packet. +// This is a new-format packet tag byte for a type 19 (Integrity Protected) packet. const mdcPacketTagByte = byte(0x80) | 0x40 | 19 func (ser *seMDCReader) Close() error { if ser.error { - return errors.SignatureError("error during reading") + return errors.ErrMDCMissing } for !ser.eof { @@ -191,18 +159,20 @@ func (ser *seMDCReader) Close() error { break } if err != nil { - return errors.SignatureError("error during reading") + return errors.ErrMDCMissing } } - if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size { - return errors.SignatureError("MDC packet not found") - } ser.h.Write(ser.trailer[:2]) final := ser.h.Sum(nil) if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 { - return errors.SignatureError("hash mismatch") + return errors.ErrMDCHashMismatch + } + // The hash already includes the MDC header, but we still check its value + // to confirm encryption correctness + if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size { + return errors.ErrMDCMissing } return nil } @@ -236,7 +206,7 @@ func (w *seMDCWriter) Close() (err error) { return w.w.Close() } -// noOpCloser is like an io.NopCloser, but for an io.Writer. +// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. type noOpCloser struct { w io.Writer } @@ -249,21 +219,17 @@ func (c noOpCloser) Close() error { return nil } -// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet -// to w and returns a WriteCloser to which the to-be-encrypted packets can be -// written. -// If config is nil, sensible defaults will be used. -func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte, config *Config) (contents io.WriteCloser, err error) { - if c.KeySize() != len(key) { - return nil, errors.InvalidArgumentError("SymmetricallyEncrypted.Serialize: bad key length") +func serializeSymmetricallyEncryptedMdc(ciphertext io.WriteCloser, c CipherFunction, key []byte, config *Config) (Contents io.WriteCloser, err error) { + // Disallow old cipher suites + if !c.IsSupported() || c < CipherAES128 { + return nil, errors.InvalidArgumentError("invalid mdc cipher function") } - writeCloser := noOpCloser{w} - ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedMDC) - if err != nil { - return + + if c.KeySize() != len(key) { + return nil, errors.InvalidArgumentError("error in mdc serialization: bad key length") } - _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersion}) + _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersionMdc}) if err != nil { return } @@ -271,9 +237,9 @@ func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte, block := c.new(key) blockSize := block.BlockSize() iv := make([]byte, blockSize) - _, err = config.Random().Read(iv) + _, err = io.ReadFull(config.Random(), iv) if err != nil { - return + return nil, err } s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync) _, err = ciphertext.Write(prefix) @@ -285,6 +251,6 @@ func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte, h := sha1.New() h.Write(iv) h.Write(iv[blockSize-2:]) - contents = &seMDCWriter{w: plaintext, h: h} + Contents = &seMDCWriter{w: plaintext, h: h} return } diff --git a/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go similarity index 89% rename from vendor/golang.org/x/crypto/openpgp/packet/userattribute.go rename to vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go index ff7ef5307..63814ed13 100644 --- a/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go @@ -41,9 +41,16 @@ func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error if err = jpeg.Encode(&buf, photo, nil); err != nil { return } + + lengthBuf := make([]byte, 5) + n := serializeSubpacketLength(lengthBuf, len(buf.Bytes())+1) + lengthBuf = lengthBuf[:n] + uat.Contents = append(uat.Contents, &OpaqueSubpacket{ - SubType: UserAttrImageSubpacket, - Contents: buf.Bytes()}) + SubType: UserAttrImageSubpacket, + EncodedLength: lengthBuf, + Contents: buf.Bytes(), + }) } return } @@ -68,7 +75,10 @@ func (uat *UserAttribute) parse(r io.Reader) (err error) { func (uat *UserAttribute) Serialize(w io.Writer) (err error) { var buf bytes.Buffer for _, sp := range uat.Contents { - sp.Serialize(&buf) + err = sp.Serialize(&buf) + if err != nil { + return err + } } if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil { return err diff --git a/vendor/golang.org/x/crypto/openpgp/packet/userid.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go similarity index 96% rename from vendor/golang.org/x/crypto/openpgp/packet/userid.go rename to vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go index 359a462eb..3c7451a3c 100644 --- a/vendor/golang.org/x/crypto/openpgp/packet/userid.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go @@ -155,5 +155,12 @@ func parseUserId(id string) (name, comment, email string) { name = strings.TrimSpace(id[n.start:n.end]) comment = strings.TrimSpace(id[c.start:c.end]) email = strings.TrimSpace(id[e.start:e.end]) + + // RFC 2822 3.4: alternate simple form of a mailbox + if email == "" && strings.ContainsRune(name, '@') { + email = name + name = "" + } + return } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go new file mode 100644 index 000000000..43def2c4b --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go @@ -0,0 +1,619 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package openpgp implements high level operations on OpenPGP messages. +package openpgp // import "github.com/ProtonMail/go-crypto/openpgp" + +import ( + "crypto" + _ "crypto/sha256" + _ "crypto/sha512" + "hash" + "io" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/armor" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "github.com/ProtonMail/go-crypto/openpgp/packet" + _ "golang.org/x/crypto/sha3" +) + +// SignatureType is the armor type for a PGP signature. +var SignatureType = "PGP SIGNATURE" + +// readArmored reads an armored block with the given type. +func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) { + block, err := armor.Decode(r) + if err != nil { + return + } + + if block.Type != expectedType { + return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type) + } + + return block.Body, nil +} + +// MessageDetails contains the result of parsing an OpenPGP encrypted and/or +// signed message. +type MessageDetails struct { + IsEncrypted bool // true if the message was encrypted. + EncryptedToKeyIds []uint64 // the list of recipient key ids. + IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message. + DecryptedWith Key // the private key used to decrypt the message, if any. + IsSigned bool // true if the message is signed. + SignedByKeyId uint64 // the key id of the signer, if any. + SignedByFingerprint []byte // the key fingerprint of the signer, if any. + SignedBy *Key // the key of the signer, if available. + LiteralData *packet.LiteralData // the metadata of the contents + UnverifiedBody io.Reader // the contents of the message. + + // If IsSigned is true and SignedBy is non-zero then the signature will + // be verified as UnverifiedBody is read. The signature cannot be + // checked until the whole of UnverifiedBody is read so UnverifiedBody + // must be consumed until EOF before the data can be trusted. Even if a + // message isn't signed (or the signer is unknown) the data may contain + // an authentication code that is only checked once UnverifiedBody has + // been consumed. Once EOF has been seen, the following fields are + // valid. (An authentication code failure is reported as a + // SignatureError error when reading from UnverifiedBody.) + Signature *packet.Signature // the signature packet itself. + SignatureError error // nil if the signature is good. + UnverifiedSignatures []*packet.Signature // all other unverified signature packets. + + decrypted io.ReadCloser +} + +// A PromptFunction is used as a callback by functions that may need to decrypt +// a private key, or prompt for a passphrase. It is called with a list of +// acceptable, encrypted private keys and a boolean that indicates whether a +// passphrase is usable. It should either decrypt a private key or return a +// passphrase to try. If the decrypted private key or given passphrase isn't +// correct, the function will be called again, forever. Any error returned will +// be passed up. +type PromptFunction func(keys []Key, symmetric bool) ([]byte, error) + +// A keyEnvelopePair is used to store a private key with the envelope that +// contains a symmetric key, encrypted with that key. +type keyEnvelopePair struct { + key Key + encryptedKey *packet.EncryptedKey +} + +// ReadMessage parses an OpenPGP message that may be signed and/or encrypted. +// The given KeyRing should contain both public keys (for signature +// verification) and, possibly encrypted, private keys for decrypting. +// If config is nil, sensible defaults will be used. +func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) { + var p packet.Packet + + var symKeys []*packet.SymmetricKeyEncrypted + var pubKeys []keyEnvelopePair + // Integrity protected encrypted packet: SymmetricallyEncrypted or AEADEncrypted + var edp packet.EncryptedDataPacket + + packets := packet.NewReader(r) + md = new(MessageDetails) + md.IsEncrypted = true + + // The message, if encrypted, starts with a number of packets + // containing an encrypted decryption key. The decryption key is either + // encrypted to a public key, or with a passphrase. This loop + // collects these packets. +ParsePackets: + for { + p, err = packets.Next() + if err != nil { + return nil, err + } + switch p := p.(type) { + case *packet.SymmetricKeyEncrypted: + // This packet contains the decryption key encrypted with a passphrase. + md.IsSymmetricallyEncrypted = true + symKeys = append(symKeys, p) + case *packet.EncryptedKey: + // This packet contains the decryption key encrypted to a public key. + md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId) + switch p.Algo { + case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal, packet.PubKeyAlgoECDH, packet.PubKeyAlgoX25519, packet.PubKeyAlgoX448, packet.ExperimentalPubKeyAlgoAEAD: + break + default: + continue + } + if keyring != nil { + var keys []Key + if p.KeyId == 0 { + keys = keyring.DecryptionKeys() + } else { + keys = keyring.KeysById(p.KeyId) + } + for _, k := range keys { + pubKeys = append(pubKeys, keyEnvelopePair{k, p}) + } + } + case *packet.SymmetricallyEncrypted: + if !p.IntegrityProtected && !config.AllowUnauthenticatedMessages() { + return nil, errors.UnsupportedError("message is not integrity protected") + } + edp = p + break ParsePackets + case *packet.AEADEncrypted: + edp = p + break ParsePackets + case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature: + // This message isn't encrypted. + if len(symKeys) != 0 || len(pubKeys) != 0 { + return nil, errors.StructuralError("key material not followed by encrypted message") + } + packets.Unread(p) + return readSignedMessage(packets, nil, keyring, config) + } + } + + var candidates []Key + var decrypted io.ReadCloser + + // Now that we have the list of encrypted keys we need to decrypt at + // least one of them or, if we cannot, we need to call the prompt + // function so that it can decrypt a key or give us a passphrase. +FindKey: + for { + // See if any of the keys already have a private key available + candidates = candidates[:0] + candidateFingerprints := make(map[string]bool) + + for _, pk := range pubKeys { + if pk.key.PrivateKey == nil { + continue + } + if !pk.key.PrivateKey.Encrypted { + if len(pk.encryptedKey.Key) == 0 { + errDec := pk.encryptedKey.Decrypt(pk.key.PrivateKey, config) + if errDec != nil { + continue + } + } + // Try to decrypt symmetrically encrypted + decrypted, err = edp.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key) + if err != nil && err != errors.ErrKeyIncorrect { + return nil, err + } + if decrypted != nil { + md.DecryptedWith = pk.key + break FindKey + } + } else { + fpr := string(pk.key.PublicKey.Fingerprint[:]) + if v := candidateFingerprints[fpr]; v { + continue + } + candidates = append(candidates, pk.key) + candidateFingerprints[fpr] = true + } + } + + if len(candidates) == 0 && len(symKeys) == 0 { + return nil, errors.ErrKeyIncorrect + } + + if prompt == nil { + return nil, errors.ErrKeyIncorrect + } + + passphrase, err := prompt(candidates, len(symKeys) != 0) + if err != nil { + return nil, err + } + + // Try the symmetric passphrase first + if len(symKeys) != 0 && passphrase != nil { + for _, s := range symKeys { + key, cipherFunc, err := s.Decrypt(passphrase) + // In v4, on wrong passphrase, session key decryption is very likely to result in an invalid cipherFunc: + // only for < 5% of cases we will proceed to decrypt the data + if err == nil { + decrypted, err = edp.Decrypt(cipherFunc, key) + if err != nil { + return nil, err + } + if decrypted != nil { + break FindKey + } + } + } + } + } + + md.decrypted = decrypted + if err := packets.Push(decrypted); err != nil { + return nil, err + } + mdFinal, sensitiveParsingErr := readSignedMessage(packets, md, keyring, config) + if sensitiveParsingErr != nil { + return nil, errors.StructuralError("parsing error") + } + return mdFinal, nil +} + +// readSignedMessage reads a possibly signed message if mdin is non-zero then +// that structure is updated and returned. Otherwise a fresh MessageDetails is +// used. +func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing, config *packet.Config) (md *MessageDetails, err error) { + if mdin == nil { + mdin = new(MessageDetails) + } + md = mdin + + var p packet.Packet + var h hash.Hash + var wrappedHash hash.Hash + var prevLast bool +FindLiteralData: + for { + p, err = packets.Next() + if err != nil { + return nil, err + } + switch p := p.(type) { + case *packet.Compressed: + if err := packets.Push(p.Body); err != nil { + return nil, err + } + case *packet.OnePassSignature: + if prevLast { + return nil, errors.UnsupportedError("nested signature packets") + } + + if p.IsLast { + prevLast = true + } + + h, wrappedHash, err = hashForSignature(p.Hash, p.SigType, p.Salt) + if err != nil { + md.SignatureError = err + } + + md.IsSigned = true + if p.Version == 6 { + md.SignedByFingerprint = p.KeyFingerprint + } + md.SignedByKeyId = p.KeyId + + if keyring != nil { + keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign) + if len(keys) > 0 { + md.SignedBy = &keys[0] + } + } + case *packet.LiteralData: + md.LiteralData = p + break FindLiteralData + } + } + + if md.IsSigned && md.SignatureError == nil { + md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md, config} + } else if md.decrypted != nil { + md.UnverifiedBody = &checkReader{md, false} + } else { + md.UnverifiedBody = md.LiteralData.Body + } + + return md, nil +} + +func wrapHashForSignature(hashFunc hash.Hash, sigType packet.SignatureType) (hash.Hash, error) { + switch sigType { + case packet.SigTypeBinary: + return hashFunc, nil + case packet.SigTypeText: + return NewCanonicalTextHash(hashFunc), nil + } + return nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) +} + +// hashForSignature returns a pair of hashes that can be used to verify a +// signature. The signature may specify that the contents of the signed message +// should be preprocessed (i.e. to normalize line endings). Thus this function +// returns two hashes. The second should be used to hash the message itself and +// performs any needed preprocessing. +func hashForSignature(hashFunc crypto.Hash, sigType packet.SignatureType, sigSalt []byte) (hash.Hash, hash.Hash, error) { + if _, ok := algorithm.HashToHashIdWithSha1(hashFunc); !ok { + return nil, nil, errors.UnsupportedError("unsupported hash function") + } + if !hashFunc.Available() { + return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashFunc))) + } + h := hashFunc.New() + if sigSalt != nil { + h.Write(sigSalt) + } + wrappedHash, err := wrapHashForSignature(h, sigType) + if err != nil { + return nil, nil, err + } + switch sigType { + case packet.SigTypeBinary: + return h, wrappedHash, nil + case packet.SigTypeText: + return h, wrappedHash, nil + } + return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) +} + +// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF +// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger +// MDC checks. +type checkReader struct { + md *MessageDetails + checked bool +} + +func (cr *checkReader) Read(buf []byte) (int, error) { + n, sensitiveParsingError := cr.md.LiteralData.Body.Read(buf) + if sensitiveParsingError == io.EOF { + if cr.checked { + // Only check once + return n, io.EOF + } + mdcErr := cr.md.decrypted.Close() + if mdcErr != nil { + return n, mdcErr + } + cr.checked = true + return n, io.EOF + } + + if sensitiveParsingError != nil { + return n, errors.StructuralError("parsing error") + } + + return n, nil +} + +// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes +// the data as it is read. When it sees an EOF from the underlying io.Reader +// it parses and checks a trailing Signature packet and triggers any MDC checks. +type signatureCheckReader struct { + packets *packet.Reader + h, wrappedHash hash.Hash + md *MessageDetails + config *packet.Config +} + +func (scr *signatureCheckReader) Read(buf []byte) (int, error) { + n, sensitiveParsingError := scr.md.LiteralData.Body.Read(buf) + + // Hash only if required + if scr.md.SignedBy != nil { + scr.wrappedHash.Write(buf[:n]) + } + + if sensitiveParsingError == io.EOF { + var p packet.Packet + var readError error + var sig *packet.Signature + + p, readError = scr.packets.Next() + for readError == nil { + var ok bool + if sig, ok = p.(*packet.Signature); ok { + if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) { + sig.Metadata = scr.md.LiteralData + } + + // If signature KeyID matches + if scr.md.SignedBy != nil && *sig.IssuerKeyId == scr.md.SignedByKeyId { + key := scr.md.SignedBy + signatureError := key.PublicKey.VerifySignature(scr.h, sig) + if signatureError == nil { + signatureError = checkMessageSignatureDetails(key, sig, scr.config) + } + scr.md.Signature = sig + scr.md.SignatureError = signatureError + } else { + scr.md.UnverifiedSignatures = append(scr.md.UnverifiedSignatures, sig) + } + } + + p, readError = scr.packets.Next() + } + + if scr.md.SignedBy != nil && scr.md.Signature == nil { + if scr.md.UnverifiedSignatures == nil { + scr.md.SignatureError = errors.StructuralError("LiteralData not followed by signature") + } else { + scr.md.SignatureError = errors.StructuralError("No matching signature found") + } + } + + // The SymmetricallyEncrypted packet, if any, might have an + // unsigned hash of its own. In order to check this we need to + // close that Reader. + if scr.md.decrypted != nil { + mdcErr := scr.md.decrypted.Close() + if mdcErr != nil { + return n, mdcErr + } + } + return n, io.EOF + } + + if sensitiveParsingError != nil { + return n, errors.StructuralError("parsing error") + } + + return n, nil +} + +// VerifyDetachedSignature takes a signed file and a detached signature and +// returns the signature packet and the entity the signature was signed by, +// if any, and a possible signature verification error. +// If the signer isn't known, ErrUnknownIssuer is returned. +func VerifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { + return verifyDetachedSignature(keyring, signed, signature, nil, false, config) +} + +// VerifyDetachedSignatureAndHash performs the same actions as +// VerifyDetachedSignature and checks that the expected hash functions were used. +func VerifyDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { + return verifyDetachedSignature(keyring, signed, signature, expectedHashes, true, config) +} + +// CheckDetachedSignature takes a signed file and a detached signature and +// returns the entity the signature was signed by, if any, and a possible +// signature verification error. If the signer isn't known, +// ErrUnknownIssuer is returned. +func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (signer *Entity, err error) { + _, signer, err = verifyDetachedSignature(keyring, signed, signature, nil, false, config) + return +} + +// CheckDetachedSignatureAndHash performs the same actions as +// CheckDetachedSignature and checks that the expected hash functions were used. +func CheckDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (signer *Entity, err error) { + _, signer, err = verifyDetachedSignature(keyring, signed, signature, expectedHashes, true, config) + return +} + +func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, checkHashes bool, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { + var issuerKeyId uint64 + var hashFunc crypto.Hash + var sigType packet.SignatureType + var keys []Key + var p packet.Packet + + packets := packet.NewReader(signature) + for { + p, err = packets.Next() + if err == io.EOF { + return nil, nil, errors.ErrUnknownIssuer + } + if err != nil { + return nil, nil, err + } + + var ok bool + sig, ok = p.(*packet.Signature) + if !ok { + return nil, nil, errors.StructuralError("non signature packet found") + } + if sig.IssuerKeyId == nil { + return nil, nil, errors.StructuralError("signature doesn't have an issuer") + } + issuerKeyId = *sig.IssuerKeyId + hashFunc = sig.Hash + sigType = sig.SigType + if checkHashes { + matchFound := false + // check for hashes + for _, expectedHash := range expectedHashes { + if hashFunc == expectedHash { + matchFound = true + break + } + } + if !matchFound { + return nil, nil, errors.StructuralError("hash algorithm or salt mismatch with cleartext message headers") + } + } + keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign) + if len(keys) > 0 { + break + } + } + + if len(keys) == 0 { + panic("unreachable") + } + + h, err := sig.PrepareVerify() + if err != nil { + return nil, nil, err + } + wrappedHash, err := wrapHashForSignature(h, sigType) + if err != nil { + return nil, nil, err + } + + if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF { + return nil, nil, err + } + + for _, key := range keys { + err = key.PublicKey.VerifySignature(h, sig) + if err == nil { + return sig, key.Entity, checkMessageSignatureDetails(&key, sig, config) + } + } + + return nil, nil, err +} + +// CheckArmoredDetachedSignature performs the same actions as +// CheckDetachedSignature but expects the signature to be armored. +func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (signer *Entity, err error) { + body, err := readArmored(signature, SignatureType) + if err != nil { + return + } + + return CheckDetachedSignature(keyring, signed, body, config) +} + +// checkMessageSignatureDetails returns an error if: +// - The signature (or one of the binding signatures mentioned below) +// has a unknown critical notation data subpacket +// - The primary key of the signing entity is revoked +// - The primary identity is revoked +// - The signature is expired +// - The primary key of the signing entity is expired according to the +// primary identity binding signature +// +// ... or, if the signature was signed by a subkey and: +// - The signing subkey is revoked +// - The signing subkey is expired according to the subkey binding signature +// - The signing subkey binding signature is expired +// - The signing subkey cross-signature is expired +// +// NOTE: The order of these checks is important, as the caller may choose to +// ignore ErrSignatureExpired or ErrKeyExpired errors, but should never +// ignore any other errors. +func checkMessageSignatureDetails(key *Key, signature *packet.Signature, config *packet.Config) error { + now := config.Now() + primarySelfSignature, primaryIdentity := key.Entity.PrimarySelfSignature() + signedBySubKey := key.PublicKey != key.Entity.PrimaryKey + sigsToCheck := []*packet.Signature{signature, primarySelfSignature} + if signedBySubKey { + sigsToCheck = append(sigsToCheck, key.SelfSignature, key.SelfSignature.EmbeddedSignature) + } + for _, sig := range sigsToCheck { + for _, notation := range sig.Notations { + if notation.IsCritical && !config.KnownNotation(notation.Name) { + return errors.SignatureError("unknown critical notation: " + notation.Name) + } + } + } + if key.Entity.Revoked(now) || // primary key is revoked + (signedBySubKey && key.Revoked(now)) || // subkey is revoked + (primaryIdentity != nil && primaryIdentity.Revoked(now)) { // primary identity is revoked for v4 + return errors.ErrKeyRevoked + } + if key.Entity.PrimaryKey.KeyExpired(primarySelfSignature, now) { // primary key is expired + return errors.ErrKeyExpired + } + if signedBySubKey { + if key.PublicKey.KeyExpired(key.SelfSignature, now) { // subkey is expired + return errors.ErrKeyExpired + } + } + for _, sig := range sigsToCheck { + if sig.SigExpired(now) { // any of the relevant signatures are expired + return errors.ErrSignatureExpired + } + } + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go new file mode 100644 index 000000000..77282c0ea --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go @@ -0,0 +1,475 @@ +package openpgp + +const testKey1KeyId uint64 = 0xA34D7E18C20C31BB +const testKey3KeyId uint64 = 0x338934250CCC0360 +const testKeyP256KeyId uint64 = 0xd44a2c495918513e + +const signedInput = "Signed message\nline 2\nline 3\n" +const signedTextInput = "Signed message\r\nline 2\r\nline 3\r\n" + +const recipientUnspecifiedHex = "848c0300000000000000000103ff62d4d578d03cf40c3da998dfe216c074fa6ddec5e31c197c9666ba292830d91d18716a80f699f9d897389a90e6d62d0238f5f07a5248073c0f24920e4bc4a30c2d17ee4e0cae7c3d4aaa4e8dced50e3010a80ee692175fa0385f62ecca4b56ee6e9980aa3ec51b61b077096ac9e800edaf161268593eedb6cc7027ff5cb32745d250010d407a6221ae22ef18469b444f2822478c4d190b24d36371a95cb40087cdd42d9399c3d06a53c0673349bfb607927f20d1e122bde1e2bf3aa6cae6edf489629bcaa0689539ae3b718914d88ededc3b" + +const detachedSignatureHex = "889c04000102000605024d449cd1000a0910a34d7e18c20c31bb167603ff57718d09f28a519fdc7b5a68b6a3336da04df85e38c5cd5d5bd2092fa4629848a33d85b1729402a2aab39c3ac19f9d573f773cc62c264dc924c067a79dfd8a863ae06c7c8686120760749f5fd9b1e03a64d20a7df3446ddc8f0aeadeaeba7cbaee5c1e366d65b6a0c6cc749bcb912d2f15013f812795c2e29eb7f7b77f39ce77" + +const detachedSignatureTextHex = "889c04010102000605024d449d21000a0910a34d7e18c20c31bbc8c60400a24fbef7342603a41cb1165767bd18985d015fb72fe05db42db36cfb2f1d455967f1e491194fbf6cf88146222b23bf6ffbd50d17598d976a0417d3192ff9cc0034fd00f287b02e90418bbefe609484b09231e4e7a5f3562e199bf39909ab5276c4d37382fe088f6b5c3426fc1052865da8b3ab158672d58b6264b10823dc4b39" + +const detachedSignatureDSAHex = "884604001102000605024d6c4eac000a0910338934250ccc0360f18d00a087d743d6405ed7b87755476629600b8b694a39e900a0abff8126f46faf1547c1743c37b21b4ea15b8f83" + +const detachedSignatureP256Hex = "885e0400130a0006050256e5bb00000a0910d44a2c495918513edef001009841a4f792beb0befccb35c8838a6a87d9b936beaa86db6745ddc7b045eee0cf00fd1ac1f78306b17e965935dd3f8bae4587a76587e4af231efe19cc4011a8434817" + +// The plaintext is https://www.gutenberg.org/cache/epub/1080/pg1080.txt +const modestProposalSha512 = "lbbrB1+WP3T9AaC9OQqBdOcCjgeEQadlulXsNPgVx0tyqPzDHwUugZ2gE7V0ESKAw6kAVfgkcuvfgxAAGaeHtw==" + +const testKeys1And2Hex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b0020003b88d044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f0011010001889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab0020003988d044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b0020003b88d044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020003" + +const testKeys1And2PrivateHex = "9501d8044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd00110100010003ff4d91393b9a8e3430b14d6209df42f98dc927425b881f1209f319220841273a802a97c7bdb8b3a7740b3ab5866c4d1d308ad0d3a79bd1e883aacf1ac92dfe720285d10d08752a7efe3c609b1d00f17f2805b217be53999a7da7e493bfc3e9618fd17018991b8128aea70a05dbce30e4fbe626aa45775fa255dd9177aabf4df7cf0200c1ded12566e4bc2bb590455e5becfb2e2c9796482270a943343a7835de41080582c2be3caf5981aa838140e97afa40ad652a0b544f83eb1833b0957dce26e47b0200eacd6046741e9ce2ec5beb6fb5e6335457844fb09477f83b050a96be7da043e17f3a9523567ed40e7a521f818813a8b8a72209f1442844843ccc7eb9805442570200bdafe0438d97ac36e773c7162028d65844c4d463e2420aa2228c6e50dc2743c3d6c72d0d782a5173fe7be2169c8a9f4ef8a7cf3e37165e8c61b89c346cdc6c1799d2b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b00200009d01d8044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f00110100010003fd17a7490c22a79c59281fb7b20f5e6553ec0c1637ae382e8adaea295f50241037f8997cf42c1ce26417e015091451b15424b2c59eb8d4161b0975630408e394d3b00f88d4b4e18e2cc85e8251d4753a27c639c83f5ad4a571c4f19d7cd460b9b73c25ade730c99df09637bd173d8e3e981ac64432078263bb6dc30d3e974150dd0200d0ee05be3d4604d2146fb0457f31ba17c057560785aa804e8ca5530a7cd81d3440d0f4ba6851efcfd3954b7e68908fc0ba47f7ac37bf559c6c168b70d3a7c8cd0200da1c677c4bce06a068070f2b3733b0a714e88d62aa3f9a26c6f5216d48d5c2b5624144f3807c0df30be66b3268eeeca4df1fbded58faf49fc95dc3c35f134f8b01fd1396b6c0fc1b6c4f0eb8f5e44b8eace1e6073e20d0b8bc5385f86f1cf3f050f66af789f3ef1fc107b7f4421e19e0349c730c68f0a226981f4e889054fdb4dc149e8e889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab00200009501fe044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001fe030302e9030f3c783e14856063f16938530e148bc57a7aa3f3e4f90df9dceccdc779bc0835e1ad3d006e4a8d7b36d08b8e0de5a0d947254ecfbd22037e6572b426bcfdc517796b224b0036ff90bc574b5509bede85512f2eefb520fb4b02aa523ba739bff424a6fe81c5041f253f8d757e69a503d3563a104d0d49e9e890b9d0c26f96b55b743883b472caa7050c4acfd4a21f875bdf1258d88bd61224d303dc9df77f743137d51e6d5246b88c406780528fd9a3e15bab5452e5b93970d9dcc79f48b38651b9f15bfbcf6da452837e9cc70683d1bdca94507870f743e4ad902005812488dd342f836e72869afd00ce1850eea4cfa53ce10e3608e13d3c149394ee3cbd0e23d018fcbcb6e2ec5a1a22972d1d462ca05355d0d290dd2751e550d5efb38c6c89686344df64852bf4ff86638708f644e8ec6bd4af9b50d8541cb91891a431326ab2e332faa7ae86cfb6e0540aa63160c1e5cdd5a4add518b303fff0a20117c6bc77f7cfbaf36b04c865c6c2b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b00200009d01fe044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001fe030302e9030f3c783e148560f936097339ae381d63116efcf802ff8b1c9360767db5219cc987375702a4123fd8657d3e22700f23f95020d1b261eda5257e9a72f9a918e8ef22dd5b3323ae03bbc1923dd224db988cadc16acc04b120a9f8b7e84da9716c53e0334d7b66586ddb9014df604b41be1e960dcfcbc96f4ed150a1a0dd070b9eb14276b9b6be413a769a75b519a53d3ecc0c220e85cd91ca354d57e7344517e64b43b6e29823cbd87eae26e2b2e78e6dedfbb76e3e9f77bcb844f9a8932eb3db2c3f9e44316e6f5d60e9e2a56e46b72abe6b06dc9a31cc63f10023d1f5e12d2a3ee93b675c96f504af0001220991c88db759e231b3320dcedf814dcf723fd9857e3d72d66a0f2af26950b915abdf56c1596f46a325bf17ad4810d3535fb02a259b247ac3dbd4cc3ecf9c51b6c07cebb009c1506fba0a89321ec8683e3fd009a6e551d50243e2d5092fefb3321083a4bad91320dc624bd6b5dddf93553e3d53924c05bfebec1fb4bd47e89a1a889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020000" + +const dsaElGamalTestKeysHex = "9501e1044dfcb16a110400aa3e5c1a1f43dd28c2ffae8abf5cfce555ee874134d8ba0a0f7b868ce2214beddc74e5e1e21ded354a95d18acdaf69e5e342371a71fbb9093162e0c5f3427de413a7f2c157d83f5cd2f9d791256dc4f6f0e13f13c3302af27f2384075ab3021dff7a050e14854bbde0a1094174855fc02f0bae8e00a340d94a1f22b32e48485700a0cec672ac21258fb95f61de2ce1af74b2c4fa3e6703ff698edc9be22c02ae4d916e4fa223f819d46582c0516235848a77b577ea49018dcd5e9e15cff9dbb4663a1ae6dd7580fa40946d40c05f72814b0f88481207e6c0832c3bded4853ebba0a7e3bd8e8c66df33d5a537cd4acf946d1080e7a3dcea679cb2b11a72a33a2b6a9dc85f466ad2ddf4c3db6283fa645343286971e3dd700703fc0c4e290d45767f370831a90187e74e9972aae5bff488eeff7d620af0362bfb95c1a6c3413ab5d15a2e4139e5d07a54d72583914661ed6a87cce810be28a0aa8879a2dd39e52fb6fe800f4f181ac7e328f740cde3d09a05cecf9483e4cca4253e60d4429ffd679d9996a520012aad119878c941e3cf151459873bdfc2a9563472fe0303027a728f9feb3b864260a1babe83925ce794710cfd642ee4ae0e5b9d74cee49e9c67b6cd0ea5dfbb582132195a121356a1513e1bca73e5b80c58c7ccb4164453412f456c47616d616c2054657374204b65792031886204131102002205024dfcb16a021b03060b090807030206150802090a0b0416020301021e01021780000a091033af447ccd759b09fadd00a0b8fd6f5a790bad7e9f2dbb7632046dc4493588db009c087c6a9ba9f7f49fab221587a74788c00db4889ab00200009d0157044dfcb16a1004008dec3f9291205255ccff8c532318133a6840739dd68b03ba942676f9038612071447bf07d00d559c5c0875724ea16a4c774f80d8338b55fca691a0522e530e604215b467bbc9ccfd483a1da99d7bc2648b4318fdbd27766fc8bfad3fddb37c62b8ae7ccfe9577e9b8d1e77c1d417ed2c2ef02d52f4da11600d85d3229607943700030503ff506c94c87c8cab778e963b76cf63770f0a79bf48fb49d3b4e52234620fc9f7657f9f8d56c96a2b7c7826ae6b57ebb2221a3fe154b03b6637cea7e6d98e3e45d87cf8dc432f723d3d71f89c5192ac8d7290684d2c25ce55846a80c9a7823f6acd9bb29fa6cd71f20bc90eccfca20451d0c976e460e672b000df49466408d527affe0303027a728f9feb3b864260abd761730327bca2aaa4ea0525c175e92bf240682a0e83b226f97ecb2e935b62c9a133858ce31b271fa8eb41f6a1b3cd72a63025ce1a75ee4180dcc284884904181102000905024dfcb16a021b0c000a091033af447ccd759b09dd0b009e3c3e7296092c81bee5a19929462caaf2fff3ae26009e218c437a2340e7ea628149af1ec98ec091a43992b00200009501e1044dfcb1be1104009f61faa61aa43df75d128cbe53de528c4aec49ce9360c992e70c77072ad5623de0a3a6212771b66b39a30dad6781799e92608316900518ec01184a85d872365b7d2ba4bacfb5882ea3c2473d3750dc6178cc1cf82147fb58caa28b28e9f12f6d1efcb0534abed644156c91cca4ab78834268495160b2400bc422beb37d237c2300a0cac94911b6d493bda1e1fbc6feeca7cb7421d34b03fe22cec6ccb39675bb7b94a335c2b7be888fd3906a1125f33301d8aa6ec6ee6878f46f73961c8d57a3e9544d8ef2a2cbfd4d52da665b1266928cfe4cb347a58c412815f3b2d2369dec04b41ac9a71cc9547426d5ab941cccf3b18575637ccfb42df1a802df3cfe0a999f9e7109331170e3a221991bf868543960f8c816c28097e503fe319db10fb98049f3a57d7c80c420da66d56f3644371631fad3f0ff4040a19a4fedc2d07727a1b27576f75a4d28c47d8246f27071e12d7a8de62aad216ddbae6aa02efd6b8a3e2818cda48526549791ab277e447b3a36c57cefe9b592f5eab73959743fcc8e83cbefec03a329b55018b53eec196765ae40ef9e20521a603c551efe0303020950d53a146bf9c66034d00c23130cce95576a2ff78016ca471276e8227fb30b1ffbd92e61804fb0c3eff9e30b1a826ee8f3e4730b4d86273ca977b4164453412f456c47616d616c2054657374204b65792032886204131102002205024dfcb1be021b03060b090807030206150802090a0b0416020301021e01021780000a0910a86bf526325b21b22bd9009e34511620415c974750a20df5cb56b182f3b48e6600a0a9466cb1a1305a84953445f77d461593f1d42bc1b00200009d0157044dfcb1be1004009565a951da1ee87119d600c077198f1c1bceb0f7aa54552489298e41ff788fa8f0d43a69871f0f6f77ebdfb14a4260cf9fbeb65d5844b4272a1904dd95136d06c3da745dc46327dd44a0f16f60135914368c8039a34033862261806bb2c5ce1152e2840254697872c85441ccb7321431d75a747a4bfb1d2c66362b51ce76311700030503fc0ea76601c196768070b7365a200e6ddb09307f262d5f39eec467b5f5784e22abdf1aa49226f59ab37cb49969d8f5230ea65caf56015abda62604544ed526c5c522bf92bed178a078789f6c807b6d34885688024a5bed9e9f8c58d11d4b82487b44c5f470c5606806a0443b79cadb45e0f897a561a53f724e5349b9267c75ca17fe0303020950d53a146bf9c660bc5f4ce8f072465e2d2466434320c1e712272fafc20e342fe7608101580fa1a1a367e60486a7cd1246b7ef5586cf5e10b32762b710a30144f12dd17dd4884904181102000905024dfcb1be021b0c000a0910a86bf526325b21b2904c00a0b2b66b4b39ccffda1d10f3ea8d58f827e30a8b8e009f4255b2d8112a184e40cde43a34e8655ca7809370b0020000" + +const ed25519wX25519Key = "c54b0663877fe31b00000020f94da7bb48d60a61e567706a6587d0331999bb9d891a08242ead84543df895a3001972817b12be707e8d5f586ce61361201d344eb266a2c82fde6835762b65b0b7c2b1061f1b0a00000042058263877fe3030b090705150a0e080c021600029b03021e09222106cb186c4f0609a697e4d52dfa6c722b0c1f1e27c18a56708f6525ec27bad9acc905270902070200000000ad2820103e2d7d227ec0e6d7ce4471db36bfc97083253690271498a7ef0576c07faae14585b3b903b0127ec4fda2f023045a2ec76bcb4f9571a9651e14aee1137a1d668442c88f951e33c4ffd33fb9a17d511eed758fc6d9cc50cb5fd793b2039d5804c74b0663877fe319000000208693248367f9e5015db922f8f48095dda784987f2d5985b12fbad16caf5e4435004d600a4f794d44775c57a26e0feefed558e9afffd6ad0d582d57fb2ba2dcedb8c29b06181b0a0000002c050263877fe322a106cb186c4f0609a697e4d52dfa6c722b0c1f1e27c18a56708f6525ec27bad9acc9021b0c00000000defa20a6e9186d9d5935fc8fe56314cdb527486a5a5120f9b762a235a729f039010a56b89c658568341fbef3b894e9834ad9bc72afae2f4c9c47a43855e65f1cb0a3f77bbc5f61085c1f8249fe4e7ca59af5f0bcee9398e0fa8d76e522e1d8ab42bb0d" + +const signedMessageHex = "a3019bc0cbccc0c4b8d8b74ee2108fe16ec6d3ca490cbe362d3f8333d3f352531472538b8b13d353b97232f352158c20943157c71c16064626063656269052062e4e01987e9b6fccff4b7df3a34c534b23e679cbec3bc0f8f6e64dfb4b55fe3f8efa9ce110ddb5cd79faf1d753c51aecfa669f7e7aa043436596cccc3359cb7dd6bbe9ecaa69e5989d9e57209571edc0b2fa7f57b9b79a64ee6e99ce1371395fee92fec2796f7b15a77c386ff668ee27f6d38f0baa6c438b561657377bf6acff3c5947befd7bf4c196252f1d6e5c524d0300" + +const signedTextMessageHex = "a3019bc0cbccc8c4b8d8b74ee2108fe16ec6d36a250cbece0c178233d3f352531472538b8b13d35379b97232f352158ca0b4312f57c71c1646462606365626906a062e4e019811591798ff99bf8afee860b0d8a8c2a85c3387e3bcf0bb3b17987f2bbcfab2aa526d930cbfd3d98757184df3995c9f3e7790e36e3e9779f06089d4c64e9e47dd6202cb6e9bc73c5d11bb59fbaf89d22d8dc7cf199ddf17af96e77c5f65f9bbed56f427bd8db7af37f6c9984bf9385efaf5f184f986fb3e6adb0ecfe35bbf92d16a7aa2a344fb0bc52fb7624f0200" + +const signedEncryptedMessageHex = "c18c032a67d68660df41c70103ff5a84c9a72f80e74ef0384c2d6a9ebfe2b09e06a8f298394f6d2abf174e40934ab0ec01fb2d0ddf21211c6fe13eb238563663b017a6b44edca552eb4736c4b7dc6ed907dd9e12a21b51b64b46f902f76fb7aaf805c1db8070574d8d0431a23e324a750f77fb72340a17a42300ee4ca8207301e95a731da229a63ab9c6b44541fbd2c11d016d810b3b3b2b38f15b5b40f0a4910332829c2062f1f7cc61f5b03677d73c54cafa1004ced41f315d46444946faae571d6f426e6dbd45d9780eb466df042005298adabf7ce0ef766dfeb94cd449c7ed0046c880339599c4711af073ce649b1e237c40b50a5536283e03bdbb7afad78bd08707715c67fb43295f905b4c479178809d429a8e167a9a8c6dfd8ab20b4edebdc38d6dec879a3202e1b752690d9bb5b0c07c5a227c79cc200e713a99251a4219d62ad5556900cf69bd384b6c8e726c7be267471d0d23af956da165af4af757246c2ebcc302b39e8ef2fccb4971b234fcda22d759ddb20e27269ee7f7fe67898a9de721bfa02ab0becaa046d00ea16cb1afc4e2eab40d0ac17121c565686e5cbd0cbdfbd9d6db5c70278b9c9db5a83176d04f61fbfbc4471d721340ede2746e5c312ded4f26787985af92b64fae3f253dbdde97f6a5e1996fd4d865599e32ff76325d3e9abe93184c02988ee89a4504356a4ef3b9b7a57cbb9637ca90af34a7676b9ef559325c3cca4e29d69fec1887f5440bb101361d744ad292a8547f22b4f22b419a42aa836169b89190f46d9560824cb2ac6e8771de8223216a5e647e132ab9eebcba89569ab339cb1c3d70fe806b31f4f4c600b4103b8d7583ebff16e43dcda551e6530f975122eb8b29" + +const verifiedSignatureEncryptedMessageHex = "c2b304000108000605026048f6d600210910a34d7e18c20c31bb1621045fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb9a3b0400a32ddac1af259c1b0abab0041327ea04970944401978fb647dd1cf9aba4f164e43f0d8a9389501886474bdd4a6e77f6aea945c07dfbf87743835b44cc2c39a1f9aeecfa83135abc92e18e50396f2e6a06c44e0188b0081effbfb4160d28f118d4ff73dd199a102e47cffd8c7ff2bacd83ae72b5820c021a486766dd587b5da61" + +const unverifiedSignatureEncryptedMessageHex = "c2b304000108000605026048f6d600210910a34d7e18c20c31bb1621045fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb9a3b0400a32ddac1af259c1b0abab0041327ea04970944401978fb647dd1cf9aba4f164e43f0d8a9389501886474bdd4a6e77f6aea945c07dfbf87743835b44cc2c39a1f9aeecfa83135abc92e18e50396f2e6a06c44e0188b0081effbfb4160d28f118d4ff73dd199a102e47cffd8c7ff2bacd83ae72b5820c021a486766dd587b5da61" + +const signedEncryptedMessage2Hex = "85010e03cf6a7abcd43e36731003fb057f5495b79db367e277cdbe4ab90d924ddee0c0381494112ff8c1238fb0184af35d1731573b01bc4c55ecacd2aafbe2003d36310487d1ecc9ac994f3fada7f9f7f5c3a64248ab7782906c82c6ff1303b69a84d9a9529c31ecafbcdb9ba87e05439897d87e8a2a3dec55e14df19bba7f7bd316291c002ae2efd24f83f9e3441203fc081c0c23dc3092a454ca8a082b27f631abf73aca341686982e8fbda7e0e7d863941d68f3de4a755c2964407f4b5e0477b3196b8c93d551dd23c8beef7d0f03fbb1b6066f78907faf4bf1677d8fcec72651124080e0b7feae6b476e72ab207d38d90b958759fdedfc3c6c35717c9dbfc979b3cfbbff0a76d24a5e57056bb88acbd2a901ef64bc6e4db02adc05b6250ff378de81dca18c1910ab257dff1b9771b85bb9bbe0a69f5989e6d1710a35e6dfcceb7d8fb5ccea8db3932b3d9ff3fe0d327597c68b3622aec8e3716c83a6c93f497543b459b58ba504ed6bcaa747d37d2ca746fe49ae0a6ce4a8b694234e941b5159ff8bd34b9023da2814076163b86f40eed7c9472f81b551452d5ab87004a373c0172ec87ea6ce42ccfa7dbdad66b745496c4873d8019e8c28d6b3" + +const signatureEncryptedMessage2Hex = "c24604001102000605024dfd0166000a091033af447ccd759b09bae600a096ec5e63ecf0a403085e10f75cc3bab327663282009f51fad9df457ed8d2b70d8a73c76e0443eac0f377" + +const symmetricallyEncryptedCompressedHex = "c32e040903085a357c1a7b5614ed00cc0d1d92f428162058b3f558a0fb0980d221ebac6c97d5eda4e0fe32f6e706e94dd263012d6ca1ef8c4bbd324098225e603a10c85ebf09cbf7b5aeeb5ce46381a52edc51038b76a8454483be74e6dcd1e50d5689a8ae7eceaeefed98a0023d49b22eb1f65c2aa1ef1783bb5e1995713b0457102ec3c3075fe871267ffa4b686ad5d52000d857" + +const dsaTestKeyHex = "9901a2044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794" + +const dsaTestKeyPrivateHex = "9501bb044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4d00009f592e0619d823953577d4503061706843317e4fee083db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794" + +const p256TestKeyHex = "98520456e5b83813082a8648ce3d030107020304a2072cd6d21321266c758cc5b83fab0510f751cb8d91897cddb7047d8d6f185546e2107111b0a95cb8ef063c33245502af7a65f004d5919d93ee74eb71a66253b424502d3235362054657374204b6579203c696e76616c6964406578616d706c652e636f6d3e8879041313080021050256e5b838021b03050b09080702061508090a0b020416020301021e01021780000a0910d44a2c495918513e54e50100dfa64f97d9b47766fc1943c6314ba3f2b2a103d71ad286dc5b1efb96a345b0c80100dbc8150b54241f559da6ef4baacea6d31902b4f4b1bdc09b34bf0502334b7754b8560456e5b83812082a8648ce3d030107020304bfe3cea9cee13486f8d518aa487fecab451f25467d2bf08e58f63e5fa525d5482133e6a79299c274b068ef0be448152ad65cf11cf764348588ca4f6a0bcf22b6030108078861041813080009050256e5b838021b0c000a0910d44a2c495918513e4a4800ff49d589fa64024ad30be363a032e3a0e0e6f5db56ba4c73db850518bf0121b8f20100fd78e065f4c70ea5be9df319ea67e493b936fc78da834a71828043d3154af56e" + +const p256TestKeyPrivateHex = "94a50456e5b83813082a8648ce3d030107020304a2072cd6d21321266c758cc5b83fab0510f751cb8d91897cddb7047d8d6f185546e2107111b0a95cb8ef063c33245502af7a65f004d5919d93ee74eb71a66253fe070302f0c2bfb0b6c30f87ee1599472b8636477eab23ced13b271886a4b50ed34c9d8436af5af5b8f88921f0efba6ef8c37c459bbb88bc1c6a13bbd25c4ce9b1e97679569ee77645d469bf4b43de637f5561b424502d3235362054657374204b6579203c696e76616c6964406578616d706c652e636f6d3e8879041313080021050256e5b838021b03050b09080702061508090a0b020416020301021e01021780000a0910d44a2c495918513e54e50100dfa64f97d9b47766fc1943c6314ba3f2b2a103d71ad286dc5b1efb96a345b0c80100dbc8150b54241f559da6ef4baacea6d31902b4f4b1bdc09b34bf0502334b77549ca90456e5b83812082a8648ce3d030107020304bfe3cea9cee13486f8d518aa487fecab451f25467d2bf08e58f63e5fa525d5482133e6a79299c274b068ef0be448152ad65cf11cf764348588ca4f6a0bcf22b603010807fe0703027510012471a603cfee2968dce19f732721ddf03e966fd133b4e3c7a685b788705cbc46fb026dc94724b830c9edbaecd2fb2c662f23169516cacd1fe423f0475c364ecc10abcabcfd4bbbda1a36a1bd8861041813080009050256e5b838021b0c000a0910d44a2c495918513e4a4800ff49d589fa64024ad30be363a032e3a0e0e6f5db56ba4c73db850518bf0121b8f20100fd78e065f4c70ea5be9df319ea67e493b936fc78da834a71828043d3154af56e" + +const armoredPrivateKeyBlock = `-----BEGIN PGP PRIVATE KEY BLOCK----- +Version: GnuPG v1.4.10 (GNU/Linux) + +lQHYBE2rFNoBBADFwqWQIW/DSqcB4yCQqnAFTJ27qS5AnB46ccAdw3u4Greeu3Bp +idpoHdjULy7zSKlwR1EA873dO/k/e11Ml3dlAFUinWeejWaK2ugFP6JjiieSsrKn +vWNicdCS4HTWn0X4sjl0ZiAygw6GNhqEQ3cpLeL0g8E9hnYzJKQ0LWJa0QARAQAB +AAP/TB81EIo2VYNmTq0pK1ZXwUpxCrvAAIG3hwKjEzHcbQznsjNvPUihZ+NZQ6+X +0HCfPAdPkGDCLCb6NavcSW+iNnLTrdDnSI6+3BbIONqWWdRDYJhqZCkqmG6zqSfL +IdkJgCw94taUg5BWP/AAeQrhzjChvpMQTVKQL5mnuZbUCeMCAN5qrYMP2S9iKdnk +VANIFj7656ARKt/nf4CBzxcpHTyB8+d2CtPDKCmlJP6vL8t58Jmih+kHJMvC0dzn +gr5f5+sCAOOe5gt9e0am7AvQWhdbHVfJU0TQJx+m2OiCJAqGTB1nvtBLHdJnfdC9 +TnXXQ6ZXibqLyBies/xeY2sCKL5qtTMCAKnX9+9d/5yQxRyrQUHt1NYhaXZnJbHx +q4ytu0eWz+5i68IYUSK69jJ1NWPM0T6SkqpB3KCAIv68VFm9PxqG1KmhSrQIVGVz +dCBLZXmIuAQTAQIAIgUCTasU2gIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AA +CgkQO9o98PRieSoLhgQAkLEZex02Qt7vGhZzMwuN0R22w3VwyYyjBx+fM3JFETy1 +ut4xcLJoJfIaF5ZS38UplgakHG0FQ+b49i8dMij0aZmDqGxrew1m4kBfjXw9B/v+ +eIqpODryb6cOSwyQFH0lQkXC040pjq9YqDsO5w0WYNXYKDnzRV0p4H1pweo2VDid +AdgETasU2gEEAN46UPeWRqKHvA99arOxee38fBt2CI08iiWyI8T3J6ivtFGixSqV +bRcPxYO/qLpVe5l84Nb3X71GfVXlc9hyv7CD6tcowL59hg1E/DC5ydI8K8iEpUmK +/UnHdIY5h8/kqgGxkY/T/hgp5fRQgW1ZoZxLajVlMRZ8W4tFtT0DeA+JABEBAAEA +A/0bE1jaaZKj6ndqcw86jd+QtD1SF+Cf21CWRNeLKnUds4FRRvclzTyUMuWPkUeX +TaNNsUOFqBsf6QQ2oHUBBK4VCHffHCW4ZEX2cd6umz7mpHW6XzN4DECEzOVksXtc +lUC1j4UB91DC/RNQqwX1IV2QLSwssVotPMPqhOi0ZLNY7wIA3n7DWKInxYZZ4K+6 +rQ+POsz6brEoRHwr8x6XlHenq1Oki855pSa1yXIARoTrSJkBtn5oI+f8AzrnN0BN +oyeQAwIA/7E++3HDi5aweWrViiul9cd3rcsS0dEnksPhvS0ozCJiHsq/6GFmy7J8 +QSHZPteedBnZyNp5jR+H7cIfVN3KgwH/Skq4PsuPhDq5TKK6i8Pc1WW8MA6DXTdU +nLkX7RGmMwjC0DBf7KWAlPjFaONAX3a8ndnz//fy1q7u2l9AZwrj1qa1iJ8EGAEC +AAkFAk2rFNoCGwwACgkQO9o98PRieSo2/QP/WTzr4ioINVsvN1akKuekmEMI3LAp +BfHwatufxxP1U+3Si/6YIk7kuPB9Hs+pRqCXzbvPRrI8NHZBmc8qIGthishdCYad +AHcVnXjtxrULkQFGbGvhKURLvS9WnzD/m1K2zzwxzkPTzT9/Yf06O6Mal5AdugPL +VrM0m72/jnpKo04= +=zNCn +-----END PGP PRIVATE KEY BLOCK-----` + +const e2ePublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Charset: UTF-8 + +xv8AAABSBAAAAAATCCqGSM49AwEHAgME1LRoXSpOxtHXDUdmuvzchyg6005qIBJ4 +sfaSxX7QgH9RV2ONUhC+WiayCNADq+UMzuR/vunSr4aQffXvuGnR383/AAAAFDxk +Z2lsQHlhaG9vLWluYy5jb20+wv8AAACGBBATCAA4/wAAAAWCVGvAG/8AAAACiwn/ +AAAACZC2VkQCOjdvYf8AAAAFlQgJCgv/AAAAA5YBAv8AAAACngEAAE1BAP0X8veD +24IjmI5/C6ZAfVNXxgZZFhTAACFX75jUA3oD6AEAzoSwKf1aqH6oq62qhCN/pekX ++WAsVMBhNwzLpqtCRjLO/wAAAFYEAAAAABIIKoZIzj0DAQcCAwT50ain7vXiIRv8 +B1DO3x3cE/aattZ5sHNixJzRCXi2vQIA5QmOxZ6b5jjUekNbdHG3SZi1a2Ak5mfX +fRxC/5VGAwEIB8L/AAAAZQQYEwgAGP8AAAAFglRrwBz/AAAACZC2VkQCOjdvYQAA +FJAA9isX3xtGyMLYwp2F3nXm7QEdY5bq5VUcD/RJlj792VwA/1wH0pCzVLl4Q9F9 +ex7En5r7rHR5xwX82Msc+Rq9dSyO +=7MrZ +-----END PGP PUBLIC KEY BLOCK-----` + +const dsaKeyWithSHA512 = `9901a2044f04b07f110400db244efecc7316553ee08d179972aab87bb1214de7692593fcf5b6feb1c80fba268722dd464748539b85b81d574cd2d7ad0ca2444de4d849b8756bad7768c486c83a824f9bba4af773d11742bdfb4ac3b89ef8cc9452d4aad31a37e4b630d33927bff68e879284a1672659b8b298222fc68f370f3e24dccacc4a862442b9438b00a0ea444a24088dc23e26df7daf8f43cba3bffc4fe703fe3d6cd7fdca199d54ed8ae501c30e3ec7871ea9cdd4cf63cfe6fc82281d70a5b8bb493f922cd99fba5f088935596af087c8d818d5ec4d0b9afa7f070b3d7c1dd32a84fca08d8280b4890c8da1dde334de8e3cad8450eed2a4a4fcc2db7b8e5528b869a74a7f0189e11ef097ef1253582348de072bb07a9fa8ab838e993cef0ee203ff49298723e2d1f549b00559f886cd417a41692ce58d0ac1307dc71d85a8af21b0cf6eaa14baf2922d3a70389bedf17cc514ba0febbd107675a372fe84b90162a9e88b14d4b1c6be855b96b33fb198c46f058568817780435b6936167ebb3724b680f32bf27382ada2e37a879b3d9de2abe0c3f399350afd1ad438883f4791e2e3b4184453412068617368207472756e636174696f6e207465737488620413110a002205024f04b07f021b03060b090807030206150802090a0b0416020301021e01021780000a0910ef20e0cefca131581318009e2bf3bf047a44d75a9bacd00161ee04d435522397009a03a60d51bd8a568c6c021c8d7cf1be8d990d6417b0020003` + +const unknownHashFunctionHex = `8a00000040040001990006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101` + +const rsaSignatureBadMPIlength = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101` + +const missingHashFunctionHex = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101` + +const campbellQuine = `a0b001000300fcffa0b001000d00f2ff000300fcffa0b001000d00f2ff8270a01c00000500faff8270a01c00000500faff000500faff001400ebff8270a01c00000500faff000500faff001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400000000ffff000000ffff000b00f4ff428821c400000000ffff000000ffff000b00f4ff0233214c40000100feff000233214c40000100feff0000` + +const keyV4forVerifyingSignedMessageV3 = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Comment: GPGTools - https://gpgtools.org + +mI0EVfxoFQEEAMBIqmbDfYygcvP6Phr1wr1XI41IF7Qixqybs/foBF8qqblD9gIY +BKpXjnBOtbkcVOJ0nljd3/sQIfH4E0vQwK5/4YRQSI59eKOqd6Fx+fWQOLG+uu6z +tewpeCj9LLHvibx/Sc7VWRnrznia6ftrXxJ/wHMezSab3tnGC0YPVdGNABEBAAG0 +JEdvY3J5cHRvIFRlc3QgS2V5IDx0aGVtYXhAZ21haWwuY29tPoi5BBMBCgAjBQJV +/GgVAhsDBwsJCAcDAgEGFQgCCQoLBBYCAwECHgECF4AACgkQeXnQmhdGW9PFVAP+ +K7TU0qX5ArvIONIxh/WAweyOk884c5cE8f+3NOPOOCRGyVy0FId5A7MmD5GOQh4H +JseOZVEVCqlmngEvtHZb3U1VYtVGE5WZ+6rQhGsMcWP5qaT4soYwMBlSYxgYwQcx +YhN9qOr292f9j2Y//TTIJmZT4Oa+lMxhWdqTfX+qMgG4jQRV/GgVAQQArhFSiij1 +b+hT3dnapbEU+23Z1yTu1DfF6zsxQ4XQWEV3eR8v+8mEDDNcz8oyyF56k6UQ3rXi +UMTIwRDg4V6SbZmaFbZYCOwp/EmXJ3rfhm7z7yzXj2OFN22luuqbyVhuL7LRdB0M +pxgmjXb4tTvfgKd26x34S+QqUJ7W6uprY4sAEQEAAYifBBgBCgAJBQJV/GgVAhsM +AAoJEHl50JoXRlvT7y8D/02ckx4OMkKBZo7viyrBw0MLG92i+DC2bs35PooHR6zz +786mitjOp5z2QWNLBvxC70S0qVfCIz8jKupO1J6rq6Z8CcbLF3qjm6h1omUBf8Nd +EfXKD2/2HV6zMKVknnKzIEzauh+eCKS2CeJUSSSryap/QLVAjRnckaES/OsEWhNB +=RZia +-----END PGP PUBLIC KEY BLOCK----- +` + +const signedMessageV3 = `-----BEGIN PGP MESSAGE----- +Comment: GPGTools - https://gpgtools.org + +owGbwMvMwMVYWXlhlrhb9GXG03JJDKF/MtxDMjKLFYAoUaEktbhEITe1uDgxPVWP +q5NhKjMrWAVcC9evD8z/bF/uWNjqtk/X3y5/38XGRQHm/57rrDRYuGnTw597Xqka +uM3137/hH3Os+Jf2dc0fXOITKwJvXJvecPVs0ta+Vg7ZO1MLn8w58Xx+6L58mbka +DGHyU9yTueZE8D+QF/Tz28Y78dqtF56R1VPn9Xw4uJqrWYdd7b3vIZ1V6R4Nh05d +iT57d/OhWwA= +=hG7R +-----END PGP MESSAGE----- +` + +// https://mailarchive.ietf.org/arch/msg/openpgp/9SheW_LENE0Kxf7haNllovPyAdY/ +const v5PrivKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +lGEFXJH05BYAAAAtCSsGAQQB2kcPAQEHQFhZlVcVVtwf+21xNQPX+ecMJJBL0MPd +fj75iux+my8QAAAAAAAiAQCHZ1SnSUmWqxEsoI6facIVZQu6mph3cBFzzTvcm5lA +Ng5ctBhlbW1hLmdvbGRtYW5AZXhhbXBsZS5uZXSIlgUTFggASCIhBRk0e8mHJGQC +X5nfPsLgAA7ZiEiS4fez6kyUAJFZVptUBQJckfTkAhsDBQsJCAcCAyICAQYVCgkI +CwIEFgIDAQIeBwIXgAAA9cAA/jiR3yMsZMeEQ40u6uzEoXa6UXeV/S3wwJAXRJy9 +M8s0AP9vuL/7AyTfFXwwzSjDnYmzS0qAhbLDQ643N+MXGBJ2BZxmBVyR9OQSAAAA +MgorBgEEAZdVAQUBAQdA+nysrzml2UCweAqtpDuncSPlvrcBWKU0yfU0YvYWWAoD +AQgHAAAAAAAiAP9OdAPppjU1WwpqjIItkxr+VPQRT8Zm/Riw7U3F6v3OiBFHiHoF +GBYIACwiIQUZNHvJhyRkAl+Z3z7C4AAO2YhIkuH3s+pMlACRWVabVAUCXJH05AIb +DAAAOSQBAP4BOOIR/sGLNMOfeb5fPs/02QMieoiSjIBnijhob2U5AQC+RtOHCHx7 +TcIYl5/Uyoi+FOvPLcNw4hOv2nwUzSSVAw== +=IiS2 +-----END PGP PRIVATE KEY BLOCK-----` + +// See OpenPGP crypto refresh Section A.3. +const v6PrivKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +xUsGY4d/4xsAAAAg+U2nu0jWCmHlZ3BqZYfQMxmZu52JGggkLq2EVD34laMAGXKB +exK+cH6NX1hs5hNhIB00TrJmosgv3mg1ditlsLfCsQYfGwoAAABCBYJjh3/jAwsJ +BwUVCg4IDAIWAAKbAwIeCSIhBssYbE8GCaaX5NUt+mxyKwwfHifBilZwj2Ul7Ce6 +2azJBScJAgcCAAAAAK0oIBA+LX0ifsDm185Ecds2v8lwgyU2kCcUmKfvBXbAf6rh +RYWzuQOwEn7E/aLwIwRaLsdry0+VcallHhSu4RN6HWaEQsiPlR4zxP/TP7mhfVEe +7XWPxtnMUMtf15OyA51YBMdLBmOHf+MZAAAAIIaTJINn+eUBXbki+PSAld2nhJh/ +LVmFsS+60WyvXkQ1AE1gCk95TUR3XFeibg/u/tVY6a//1q0NWC1X+yui3O24wpsG +GBsKAAAALAWCY4d/4wKbDCIhBssYbE8GCaaX5NUt+mxyKwwfHifBilZwj2Ul7Ce6 +2azJAAAAAAQBIKbpGG2dWTX8j+VjFM21J0hqWlEg+bdiojWnKfA5AQpWUWtnNwDE +M0g12vYxoWM8Y81W+bHBw805I8kWVkXU6vFOi+HWvv/ira7ofJu16NnoUkhclkUr +k0mXubZvyl4GBg== +-----END PGP PRIVATE KEY BLOCK-----` + +// See OpenPGP crypto refresh merge request: +// https://gitlab.com/openpgp-wg/rfc4880bis/-/merge_requests/304 +const v6PrivKeyMsg = `-----BEGIN PGP MESSAGE----- + +wV0GIQYSyD8ecG9jCP4VGkF3Q6HwM3kOk+mXhIjR2zeNqZMIhRmHzxjV8bU/gXzO +WgBM85PMiVi93AZfJfhK9QmxfdNnZBjeo1VDeVZheQHgaVf7yopqR6W1FT6NOrfS +aQIHAgZhZBZTW+CwcW1g4FKlbExAf56zaw76/prQoN+bAzxpohup69LA7JW/Vp0l +yZnuSj3hcFj0DfqLTGgr4/u717J+sPWbtQBfgMfG9AOIwwrUBqsFE9zW+f1zdlYo +bhF30A+IitsxxA== +-----END PGP MESSAGE-----` + +// See OpenPGP crypto refresh merge request: +// https://gitlab.com/openpgp-wg/rfc4880bis/-/merge_requests/305 +const v6PrivKeyInlineSignMsg = `-----BEGIN PGP MESSAGE----- + +wV0GIQYSyD8ecG9jCP4VGkF3Q6HwM3kOk+mXhIjR2zeNqZMIhRmHzxjV8bU/gXzO +WgBM85PMiVi93AZfJfhK9QmxfdNnZBjeo1VDeVZheQHgaVf7yopqR6W1FT6NOrfS +aQIHAgZhZBZTW+CwcW1g4FKlbExAf56zaw76/prQoN+bAzxpohup69LA7JW/Vp0l +yZnuSj3hcFj0DfqLTGgr4/u717J+sPWbtQBfgMfG9AOIwwrUBqsFE9zW+f1zdlYo +bhF30A+IitsxxA== +-----END PGP MESSAGE-----` + +// See https://gitlab.com/openpgp-wg/rfc4880bis/-/merge_requests/274 +// decryption password: "correct horse battery staple" +const v6ArgonSealedPrivKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +xYIGY4d/4xsAAAAg+U2nu0jWCmHlZ3BqZYfQMxmZu52JGggkLq2EVD34laP9JgkC +FARdb9ccngltHraRe25uHuyuAQQVtKipJ0+r5jL4dacGWSAheCWPpITYiyfyIOPS +3gIDyg8f7strd1OB4+LZsUhcIjOMpVHgmiY/IutJkulneoBYwrEGHxsKAAAAQgWC +Y4d/4wMLCQcFFQoOCAwCFgACmwMCHgkiIQbLGGxPBgmml+TVLfpscisMHx4nwYpW +cI9lJewnutmsyQUnCQIHAgAAAACtKCAQPi19In7A5tfORHHbNr/JcIMlNpAnFJin +7wV2wH+q4UWFs7kDsBJ+xP2i8CMEWi7Ha8tPlXGpZR4UruETeh1mhELIj5UeM8T/ +0z+5oX1RHu11j8bZzFDLX9eTsgOdWATHggZjh3/jGQAAACCGkySDZ/nlAV25Ivj0 +gJXdp4SYfy1ZhbEvutFsr15ENf0mCQIUBA5hhGgp2oaavg6mFUXcFMwBBBUuE8qf +9Ock+xwusd+GAglBr5LVyr/lup3xxQvHXFSjjA2haXfoN6xUGRdDEHI6+uevKjVR +v5oAxgu7eJpaXNjCmwYYGwoAAAAsBYJjh3/jApsMIiEGyxhsTwYJppfk1S36bHIr +DB8eJ8GKVnCPZSXsJ7rZrMkAAAAABAEgpukYbZ1ZNfyP5WMUzbUnSGpaUSD5t2Ki +Nacp8DkBClZRa2c3AMQzSDXa9jGhYzxjzVb5scHDzTkjyRZWRdTq8U6L4da+/+Kt +ruh8m7Xo2ehSSFyWRSuTSZe5tm/KXgYG +-----END PGP PRIVATE KEY BLOCK-----` + +const v4Key25519 = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +xUkEZB3qzRto01j2k2pwN5ux9w70stPinAdXULLr20CRW7U7h2GSeACch0M+ +qzQg8yjFQ8VBvu3uwgKH9senoHmj72lLSCLTmhFKzQR0ZXN0wogEEBsIAD4F +gmQd6s0ECwkHCAmQIf45+TuC+xMDFQgKBBYAAgECGQECmwMCHgEWIQSWEzMi +jJUHvyIbVKIh/jn5O4L7EwAAUhaHNlgudvxARdPPETUzVgjuWi+YIz8w1xIb +lHQMvIrbe2sGCQIethpWofd0x7DHuv/ciHg+EoxJ/Td6h4pWtIoKx0kEZB3q +zRm4CyA7quliq7yx08AoOqHTuuCgvpkSdEhpp3pEyejQOgBo0p6ywIiLPllY +0t+jpNspHpAGfXID6oqjpYuJw3AfVRBlwnQEGBsIACoFgmQd6s0JkCH+Ofk7 +gvsTApsMFiEElhMzIoyVB78iG1SiIf45+TuC+xMAAGgQuN9G73446ykvJ/mL +sCZ7zGFId2gBd1EnG0FTC4npfOKpck0X8dngByrCxU8LDSfvjsEp/xDAiKsQ +aU71tdtNBQ== +=e7jT +-----END PGP PRIVATE KEY BLOCK-----` + +const keyWithExpiredCrossSig = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +xsDNBF2lnPIBDAC5cL9PQoQLTMuhjbYvb4Ncuuo0bfmgPRFywX53jPhoFf4Zg6mv +/seOXpgecTdOcVttfzC8ycIKrt3aQTiwOG/ctaR4Bk/t6ayNFfdUNxHWk4WCKzdz +/56fW2O0F23qIRd8UUJp5IIlN4RDdRCtdhVQIAuzvp2oVy/LaS2kxQoKvph/5pQ/ +5whqsyroEWDJoSV0yOb25B/iwk/pLUFoyhDG9bj0kIzDxrEqW+7Ba8nocQlecMF3 +X5KMN5kp2zraLv9dlBBpWW43XktjcCZgMy20SouraVma8Je/ECwUWYUiAZxLIlMv +9CurEOtxUw6N3RdOtLmYZS9uEnn5y1UkF88o8Nku890uk6BrewFzJyLAx5wRZ4F0 +qV/yq36UWQ0JB/AUGhHVPdFf6pl6eaxBwT5GXvbBUibtf8YI2og5RsgTWtXfU7eb +SGXrl5ZMpbA6mbfhd0R8aPxWfmDWiIOhBufhMCvUHh1sApMKVZnvIff9/0Dca3wb +vLIwa3T4CyshfT0AEQEAAc0hQm9iIEJhYmJhZ2UgPGJvYkBvcGVucGdwLmV4YW1w +bGU+wsEABBMBCgATBYJeO2eVAgsJAxUICgKbAQIeAQAhCRD7/MgqAV5zMBYhBNGm +bhojsYLJmA94jPv8yCoBXnMwKWUMAJ3FKZfJ2mXvh+GFqgymvK4NoKkDRPB0CbUN +aDdG7ZOizQrWXo7Da2MYIZ6eZUDqBKLdhZ5gZfVnisDfu/yeCgpENaKib1MPHpA8 +nZQjnPejbBDomNqY8HRzr5jvXNlwywBpjWGtegCKUY9xbSynjbfzIlMrWL4S+Rfl ++bOOQKRyYJWXmECmVyqY8cz2VUYmETjNcwC8VCDUxQnhtcCJ7Aej22hfYwVEPb/J +BsJBPq8WECCiGfJ9Y2y6TF+62KzG9Kfs5hqUeHhQy8V4TSi479ewwL7DH86XmIIK +chSANBS+7iyMtctjNZfmF9zYdGJFvjI/mbBR/lK66E515Inuf75XnL8hqlXuwqvG +ni+i03Aet1DzULZEIio4uIU6ioc1lGO9h7K2Xn4S7QQH1QoISNMWqXibUR0RCGjw +FsEDTt2QwJl8XXxoJCooM7BCcCQo+rMNVUHDjIwrdoQjPld3YZsUQQRcqH6bLuln +cfn5ufl8zTGWKydoj/iTz8KcjZ7w187AzQRdpZzyAQwA1jC/XGxjK6ddgrRfW9j+ +s/U00++EvIsgTs2kr3Rg0GP7FLWV0YNtR1mpl55/bEl7yAxCDTkOgPUMXcaKlnQh +6zrlt6H53mF6Bvs3inOHQvOsGtU0dqvb1vkTF0juLiJgPlM7pWv+pNQ6IA39vKoQ +sTMBv4v5vYNXP9GgKbg8inUNT17BxzZYHfw5+q63ectgDm2on1e8CIRCZ76oBVwz +dkVxoy3gjh1eENlk2D4P0uJNZzF1Q8GV67yLANGMCDICE/OkWn6daipYDzW4iJQt +YPUWP4hWhjdm+CK+hg6IQUEn2Vtvi16D2blRP8BpUNNa4fNuylWVuJV76rIHvsLZ +1pbM3LHpRgE8s6jivS3Rz3WRs0TmWCNnvHPqWizQ3VTy+r3UQVJ5AmhJDrZdZq9i +aUIuZ01PoE1+CHiJwuxPtWvVAxf2POcm1M/F1fK1J0e+lKlQuyonTXqXR22Y41wr +fP2aPk3nPSTW2DUAf3vRMZg57ZpRxLEhEMxcM4/LMR+PABEBAAHCwrIEGAEKAAkF +gl8sAVYCmwIB3QkQ+/zIKgFeczDA+qAEGQEKAAwFgl47Z5UFgwB4TOAAIQkQfC+q +Tfk8N7IWIQQd3OFfCSF87i87N2B8L6pN+Tw3st58C/0exp0X2U4LqicSHEOSqHZj +jiysdqIELHGyo5DSPv92UFPp36aqjF9OFgtNNwSa56fmAVCD4+hor/fKARRIeIjF +qdIC5Y/9a4B10NQFJa5lsvB38x/d39LI2kEoglZnqWgdJskROo3vNQF4KlIcm6FH +dn4WI8UkC5oUUcrpZVMSKoacIaxLwqnXT42nIVgYYuqrd/ZagZZjG5WlrTOd5+NI +zi/l0fWProcPHGLjmAh4Thu8i7omtVw1nQaMnq9I77ffg3cPDgXknYrLL+q8xXh/ +0mEJyIhnmPwllWCSZuLv9DrD5pOexFfdlwXhf6cLzNpW6QhXD/Tf5KrqIPr9aOv8 +9xaEEXWh0vEby2kIsI2++ft+vfdIyxYw/wKqx0awTSnuBV1rG3z1dswX4BfoY66x +Bz3KOVqlz9+mG/FTRQwrgPvR+qgLCHbuotxoGN7fzW+PI75hQG5JQAqhsC9sHjQH +UrI21/VUNwzfw3v5pYsWuFb5bdQ3ASJetICQiMy7IW8WIQTRpm4aI7GCyZgPeIz7 +/MgqAV5zMG6/C/wLpPl/9e6Hf5wmXIUwpZNQbNZvpiCcyx9sXsHXaycOQVxn3McZ +nYOUP9/mobl1tIeDQyTNbkxWjU0zzJl8XQsDZerb5098pg+x7oGIL7M1vn5s5JMl +owROourqF88JEtOBxLMxlAM7X4hB48xKQ3Hu9hS1GdnqLKki4MqRGl4l5FUwyGOM +GjyS3TzkfiDJNwQxybQiC9n57ij20ieNyLfuWCMLcNNnZUgZtnF6wCctoq/0ZIWu +a7nvuA/XC2WW9YjEJJiWdy5109pqac+qWiY11HWy/nms4gpMdxVpT0RhrKGWq4o0 +M5q3ZElOoeN70UO3OSbU5EVrG7gB1GuwF9mTHUVlV0veSTw0axkta3FGT//XfSpD +lRrCkyLzwq0M+UUHQAuYpAfobDlDdnxxOD2jm5GyTzak3GSVFfjW09QFVO6HlGp5 +01/jtzkUiS6nwoHHkfnyn0beZuR8X6KlcrzLB0VFgQFLmkSM9cSOgYhD0PTu9aHb +hW1Hj9AO8lzggBQ= +=Nt+N +-----END PGP PUBLIC KEY BLOCK----- +` + +const sigFromKeyWithExpiredCrossSig = `-----BEGIN PGP SIGNATURE----- + +wsDzBAABCgAGBYJfLAFsACEJEHwvqk35PDeyFiEEHdzhXwkhfO4vOzdgfC+qTfk8 +N7KiqwwAts4QGB7v9bABCC2qkTxJhmStC0wQMcHRcjL/qAiVnmasQWmvE9KVsdm3 +AaXd8mIx4a37/RRvr9dYrY2eE4uw72cMqPxNja2tvVXkHQvk1oEUqfkvbXs4ypKI +NyeTWjXNOTZEbg0hbm3nMy+Wv7zgB1CEvAsEboLDJlhGqPcD+X8a6CJGrBGUBUrv +KVmZr3U6vEzClz3DBLpoddCQseJRhT4YM1nKmBlZ5quh2LFgTSpajv5OsZheqt9y +EZAPbqmLhDmWRQwGzkWHKceKS7nZ/ox2WK6OS7Ob8ZGZkM64iPo6/EGj5Yc19vQN +AGiIaPEGszBBWlOpHTPhNm0LB0nMWqqaT87oNYwP8CQuuxDb6rKJ2lffCmZH27Lb +UbQZcH8J+0UhpeaiadPZxH5ATJAcenmVtVVMLVOFnm+eIlxzov9ntpgGYt8hLdXB +ITEG9mMgp3TGS9ZzSifMZ8UGtHdp9QdBg8NEVPFzDOMGxpc/Bftav7RRRuPiAER+ +7A5CBid5 +=aQkm +-----END PGP SIGNATURE----- +` + +const signedMessageWithCriticalNotation = `-----BEGIN PGP MESSAGE----- + +owGbwMvMwMH4oOW7S46CznTG09xJDDE3Wl1KUotLuDousDAwcjBYiSmyXL+48d6x +U1PSGUxcj8IUszKBVMpMaWAAAgEGZpAeh9SKxNyCnFS95PzcytRiBi5OAZjyXXzM +f8WYLqv7TXP61Sa4rqT12CI3xaN73YS2pt089f96odCKaEPnWJ3iSGmzJaW/ug10 +2Zo8Wj2k4s7t8wt4H3HtTu+y5UZfV3VOO+l//sdE/o+Lsub8FZH7/eOq7OnbNp4n +vwjE8mqJXetNMfj8r2SCyvkEnlVRYR+/mnge+ib56FdJ8uKtqSxyvgA= +=fRXs +-----END PGP MESSAGE-----` + +const criticalNotationSigner = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mI0EUmEvTgEEANyWtQQMOybQ9JltDqmaX0WnNPJeLILIM36sw6zL0nfTQ5zXSS3+ +fIF6P29lJFxpblWk02PSID5zX/DYU9/zjM2xPO8Oa4xo0cVTOTLj++Ri5mtr//f5 +GLsIXxFrBJhD/ghFsL3Op0GXOeLJ9A5bsOn8th7x6JucNKuaRB6bQbSPABEBAAG0 +JFRlc3QgTWNUZXN0aW5ndG9uIDx0ZXN0QGV4YW1wbGUuY29tPoi5BBMBAgAjBQJS +YS9OAhsvBwsJCAcDAgEGFQgCCQoLBBYCAwECHgECF4AACgkQSmNhOk1uQJQwDAP6 +AgrTyqkRlJVqz2pb46TfbDM2TDF7o9CBnBzIGoxBhlRwpqALz7z2kxBDmwpQa+ki +Bq3jZN/UosY9y8bhwMAlnrDY9jP1gdCo+H0sD48CdXybblNwaYpwqC8VSpDdTndf +9j2wE/weihGp/DAdy/2kyBCaiOY1sjhUfJ1GogF49rC4jQRSYS9OAQQA6R/PtBFa +JaT4jq10yqASk4sqwVMsc6HcifM5lSdxzExFP74naUMMyEsKHP53QxTF0Grqusag +Qg/ZtgT0CN1HUM152y7ACOdp1giKjpMzOTQClqCoclyvWOFB+L/SwGEIJf7LSCEr +woBuJifJc8xAVr0XX0JthoW+uP91eTQ3XpsAEQEAAYkBPQQYAQIACQUCUmEvTgIb +LgCoCRBKY2E6TW5AlJ0gBBkBAgAGBQJSYS9OAAoJEOCE90RsICyXuqIEANmmiRCA +SF7YK7PvFkieJNwzeK0V3F2lGX+uu6Y3Q/Zxdtwc4xR+me/CSBmsURyXTO29OWhP +GLszPH9zSJU9BdDi6v0yNprmFPX/1Ng0Abn/sCkwetvjxC1YIvTLFwtUL/7v6NS2 +bZpsUxRTg9+cSrMWWSNjiY9qUKajm1tuzPDZXAUEAMNmAN3xXN/Kjyvj2OK2ck0X +W748sl/tc3qiKPMJ+0AkMF7Pjhmh9nxqE9+QCEl7qinFqqBLjuzgUhBU4QlwX1GD +AtNTq6ihLMD5v1d82ZC7tNatdlDMGWnIdvEMCv2GZcuIqDQ9rXWs49e7tq1NncLY +hz3tYjKhoFTKEIq3y3Pp +=h/aX +-----END PGP PUBLIC KEY BLOCK-----` + +const keyv5Test = `-----BEGIN PGP PRIVATE KEY BLOCK----- +Comment: Bob's OpenPGP Transferable Secret Key + +lQVYBF2lnPIBDAC5cL9PQoQLTMuhjbYvb4Ncuuo0bfmgPRFywX53jPhoFf4Zg6mv +/seOXpgecTdOcVttfzC8ycIKrt3aQTiwOG/ctaR4Bk/t6ayNFfdUNxHWk4WCKzdz +/56fW2O0F23qIRd8UUJp5IIlN4RDdRCtdhVQIAuzvp2oVy/LaS2kxQoKvph/5pQ/ +5whqsyroEWDJoSV0yOb25B/iwk/pLUFoyhDG9bj0kIzDxrEqW+7Ba8nocQlecMF3 +X5KMN5kp2zraLv9dlBBpWW43XktjcCZgMy20SouraVma8Je/ECwUWYUiAZxLIlMv +9CurEOtxUw6N3RdOtLmYZS9uEnn5y1UkF88o8Nku890uk6BrewFzJyLAx5wRZ4F0 +qV/yq36UWQ0JB/AUGhHVPdFf6pl6eaxBwT5GXvbBUibtf8YI2og5RsgTWtXfU7eb +SGXrl5ZMpbA6mbfhd0R8aPxWfmDWiIOhBufhMCvUHh1sApMKVZnvIff9/0Dca3wb +vLIwa3T4CyshfT0AEQEAAQAL/RZqbJW2IqQDCnJi4Ozm++gPqBPiX1RhTWSjwxfM +cJKUZfzLj414rMKm6Jh1cwwGY9jekROhB9WmwaaKT8HtcIgrZNAlYzANGRCM4TLK +3VskxfSwKKna8l+s+mZglqbAjUg3wmFuf9Tj2xcUZYmyRm1DEmcN2ZzpvRtHgX7z +Wn1mAKUlSDJZSQks0zjuMNbupcpyJokdlkUg2+wBznBOTKzgMxVNC9b2g5/tMPUs +hGGWmF1UH+7AHMTaS6dlmr2ZBIyogdnfUqdNg5sZwsxSNrbglKP4sqe7X61uEAIQ +bD7rT3LonLbhkrj3I8wilUD8usIwt5IecoHhd9HziqZjRCc1BUBkboUEoyedbDV4 +i4qfsFZ6CEWoLuD5pW7dEp0M+WeuHXO164Rc+LnH6i1VQrpb1Okl4qO6ejIpIjBI +1t3GshtUu/mwGBBxs60KBX5g77mFQ9lLCRj8lSYqOsHRKBhUp4qM869VA+fD0BRP +fqPT0I9IH4Oa/A3jYJcg622GwQYA1LhnP208Waf6PkQSJ6kyr8ymY1yVh9VBE/g6 +fRDYA+pkqKnw9wfH2Qho3ysAA+OmVOX8Hldg+Pc0Zs0e5pCavb0En8iFLvTA0Q2E +LR5rLue9uD7aFuKFU/VdcddY9Ww/vo4k5p/tVGp7F8RYCFn9rSjIWbfvvZi1q5Tx ++akoZbga+4qQ4WYzB/obdX6SCmi6BndcQ1QdjCCQU6gpYx0MddVERbIp9+2SXDyL +hpxjSyz+RGsZi/9UAshT4txP4+MZBgDfK3ZqtW+h2/eMRxkANqOJpxSjMyLO/FXN +WxzTDYeWtHNYiAlOwlQZEPOydZFty9IVzzNFQCIUCGjQ/nNyhw7adSgUk3+BXEx/ +MyJPYY0BYuhLxLYcrfQ9nrhaVKxRJj25SVHj2ASsiwGJRZW4CC3uw40OYxfKEvNC +mer/VxM3kg8qqGf9KUzJ1dVdAvjyx2Hz6jY2qWCyRQ6IMjWHyd43C4r3jxooYKUC +YnstRQyb/gCSKahveSEjo07CiXMr88UGALwzEr3npFAsPW3osGaFLj49y1oRe11E +he9gCHFm+fuzbXrWmdPjYU5/ZdqdojzDqfu4ThfnipknpVUM1o6MQqkjM896FHm8 +zbKVFSMhEP6DPHSCexMFrrSgN03PdwHTO6iBaIBBFqmGY01tmJ03SxvSpiBPON9P +NVvy/6UZFedTq8A07OUAxO62YUSNtT5pmK2vzs3SAZJmbFbMh+NN204TRI72GlqT +t5hcfkuv8hrmwPS/ZR6q312mKQ6w/1pqO9qitCFCb2IgQmFiYmFnZSA8Ym9iQG9w +ZW5wZ3AuZXhhbXBsZT6JAc4EEwEKADgCGwMFCwkIBwIGFQoJCAsCBBYCAwECHgEC +F4AWIQTRpm4aI7GCyZgPeIz7/MgqAV5zMAUCXaWe+gAKCRD7/MgqAV5zMG9sC/9U +2T3RrqEbw533FPNfEflhEVRIZ8gDXKM8hU6cqqEzCmzZT6xYTe6sv4y+PJBGXJFX +yhj0g6FDkSyboM5litOcTupURObVqMgA/Y4UKERznm4fzzH9qek85c4ljtLyNufe +doL2pp3vkGtn7eD0QFRaLLmnxPKQ/TlZKdLE1G3u8Uot8QHicaR6GnAdc5UXQJE3 +BiV7jZuDyWmZ1cUNwJkKL6oRtp+ZNDOQCrLNLecKHcgCqrpjSQG5oouba1I1Q6Vl +sP44dhA1nkmLHtxlTOzpeHj4jnk1FaXmyasurrrI5CgU/L2Oi39DGKTH/A/cywDN +4ZplIQ9zR8enkbXquUZvFDe+Xz+6xRXtb5MwQyWODB3nHw85HocLwRoIN9WdQEI+ +L8a/56AuOwhs8llkSuiITjR7r9SgKJC2WlAHl7E8lhJ3VDW3ELC56KH308d6mwOG +ZRAqIAKzM1T5FGjMBhq7ZV0eqdEntBh3EcOIfj2M8rg1MzJv+0mHZOIjByawikad +BVgEXaWc8gEMANYwv1xsYyunXYK0X1vY/rP1NNPvhLyLIE7NpK90YNBj+xS1ldGD +bUdZqZeef2xJe8gMQg05DoD1DF3GipZ0Ies65beh+d5hegb7N4pzh0LzrBrVNHar +29b5ExdI7i4iYD5TO6Vr/qTUOiAN/byqELEzAb+L+b2DVz/RoCm4PIp1DU9ewcc2 +WB38Ofqut3nLYA5tqJ9XvAiEQme+qAVcM3ZFcaMt4I4dXhDZZNg+D9LiTWcxdUPB +leu8iwDRjAgyAhPzpFp+nWoqWA81uIiULWD1Fj+IVoY3ZvgivoYOiEFBJ9lbb4te +g9m5UT/AaVDTWuHzbspVlbiVe+qyB77C2daWzNyx6UYBPLOo4r0t0c91kbNE5lgj +Z7xz6los0N1U8vq91EFSeQJoSQ62XWavYmlCLmdNT6BNfgh4icLsT7Vr1QMX9jzn +JtTPxdXytSdHvpSpULsqJ016l0dtmONcK3z9mj5N5z0k1tg1AH970TGYOe2aUcSx +IRDMXDOPyzEfjwARAQABAAv9F2CwsjS+Sjh1M1vegJbZjei4gF1HHpEM0K0PSXsp +SfVvpR4AoSJ4He6CXSMWg0ot8XKtDuZoV9jnJaES5UL9pMAD7JwIOqZm/DYVJM5h +OASCh1c356/wSbFbzRHPtUdZO9Q30WFNJM5pHbCJPjtNoRmRGkf71RxtvHBzy7np +Ga+W6U/NVKHw0i0CYwMI0YlKDakYW3Pm+QL+gHZFvngGweTod0f9l2VLLAmeQR/c ++EZs7lNumhuZ8mXcwhUc9JQIhOkpO+wreDysEFkAcsKbkQP3UDUsA1gFx9pbMzT0 +tr1oZq2a4QBtxShHzP/ph7KLpN+6qtjks3xB/yjTgaGmtrwM8tSe0wD1RwXS+/1o +BHpXTnQ7TfeOGUAu4KCoOQLv6ELpKWbRBLWuiPwMdbGpvVFALO8+kvKAg9/r+/ny +zM2GQHY+J3Jh5JxPiJnHfXNZjIKLbFbIPdSKNyJBuazXW8xIa//mEHMI5OcvsZBK +clAIp7LXzjEjKXIwHwDcTn9pBgDpdOKTHOtJ3JUKx0rWVsDH6wq6iKV/FTVSY5jl +zN+puOEsskF1Lfxn9JsJihAVO3yNsp6RvkKtyNlFazaCVKtDAmkjoh60XNxcNRqr +gCnwdpbgdHP6v/hvZY54ZaJjz6L2e8unNEkYLxDt8cmAyGPgH2XgL7giHIp9jrsQ +aS381gnYwNX6wE1aEikgtY91nqJjwPlibF9avSyYQoMtEqM/1UjTjB2KdD/MitK5 +fP0VpvuXpNYZedmyq4UOMwdkiNMGAOrfmOeT0olgLrTMT5H97Cn3Yxbk13uXHNu/ +ZUZZNe8s+QtuLfUlKAJtLEUutN33TlWQY522FV0m17S+b80xJib3yZVJteVurrh5 +HSWHAM+zghQAvCesg5CLXa2dNMkTCmZKgCBvfDLZuZbjFwnwCI6u/NhOY9egKuUf +SA/je/RXaT8m5VxLYMxwqQXKApzD87fv0tLPlVIEvjEsaf992tFEFSNPcG1l/jpd +5AVXw6kKuf85UkJtYR1x2MkQDrqY1QX/XMw00kt8y9kMZUre19aCArcmor+hDhRJ +E3Gt4QJrD9z/bICESw4b4z2DbgD/Xz9IXsA/r9cKiM1h5QMtXvuhyfVeM01enhxM +GbOH3gjqqGNKysx0UODGEwr6AV9hAd8RWXMchJLaExK9J5SRawSg671ObAU24SdY +vMQ9Z4kAQ2+1ReUZzf3ogSMRZtMT+d18gT6L90/y+APZIaoArLPhebIAGq39HLmJ +26x3z0WAgrpA1kNsjXEXkoiZGPLKIGoe3hqJAbYEGAEKACAWIQTRpm4aI7GCyZgP +eIz7/MgqAV5zMAUCXaWc8gIbDAAKCRD7/MgqAV5zMOn/C/9ugt+HZIwX308zI+QX +c5vDLReuzmJ3ieE0DMO/uNSC+K1XEioSIZP91HeZJ2kbT9nn9fuReuoff0T0Dief +rbwcIQQHFFkrqSp1K3VWmUGp2JrUsXFVdjy/fkBIjTd7c5boWljv/6wAsSfiv2V0 +JSM8EFU6TYXxswGjFVfc6X97tJNeIrXL+mpSmPPqy2bztcCCHkWS5lNLWQw+R7Vg +71Fe6yBSNVrqC2/imYG2J9zlowjx1XU63Wdgqp2Wxt0l8OmsB/W80S1fRF5G4SDH +s9HXglXXqPsBRZJYfP+VStm9L5P/sKjCcX6WtZR7yS6G8zj/X767MLK/djANvpPd +NVniEke6hM3CNBXYPAMhQBMWhCulcoz+0lxi8L34rMN+Dsbma96psdUrn7uLaB91 +6we0CTfF8qqm7BsVAgalon/UUiuMY80U3ueoj3okiSTiHIjD/YtpXSPioC8nMng7 +xqAY9Bwizt4FWgXuLm1a4+So4V9j1TRCXd12Uc2l2RNmgDE= +=miES +-----END PGP PRIVATE KEY BLOCK----- +` + +const certv5Test = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +lGEFXJH05BYAAAAtCSsGAQQB2kcPAQEHQFhZlVcVVtwf+21xNQPX+ecMJJBL0MPd +fj75iux+my8QAAAAAAAiAQCHZ1SnSUmWqxEsoI6facIVZQu6mph3cBFzzTvcm5lA +Ng5ctBhlbW1hLmdvbGRtYW5AZXhhbXBsZS5uZXSIlgUTFggASCIhBRk0e8mHJGQC +X5nfPsLgAA7ZiEiS4fez6kyUAJFZVptUBQJckfTkAhsDBQsJCAcCAyICAQYVCgkI +CwIEFgIDAQIeBwIXgAAA9cAA/jiR3yMsZMeEQ40u6uzEoXa6UXeV/S3wwJAXRJy9 +M8s0AP9vuL/7AyTfFXwwzSjDnYmzS0qAhbLDQ643N+MXGBJ2BZxmBVyR9OQSAAAA +MgorBgEEAZdVAQUBAQdA+nysrzml2UCweAqtpDuncSPlvrcBWKU0yfU0YvYWWAoD +AQgHAAAAAAAiAP9OdAPppjU1WwpqjIItkxr+VPQRT8Zm/Riw7U3F6v3OiBFHiHoF +GBYIACwiIQUZNHvJhyRkAl+Z3z7C4AAO2YhIkuH3s+pMlACRWVabVAUCXJH05AIb +DAAAOSQBAP4BOOIR/sGLNMOfeb5fPs/02QMieoiSjIBnijhob2U5AQC+RtOHCHx7 +TcIYl5/Uyoi+FOvPLcNw4hOv2nwUzSSVAw== +=IiS2 +-----END PGP PRIVATE KEY BLOCK----- +` + +const msgv5Test = `-----BEGIN PGP MESSAGE----- + +wcDMA3wvqk35PDeyAQv+PcQiLsoYTH30nJYQh3j3cJaO2+jErtVCrIQRIU0+ +rmgMddERYST4A9mA0DQIiTI4FQ0Lp440D3BWCgpq3LlNWewGzduaWwym5rN6 +cwHz5ccDqOcqbd9X0GXXGy/ZH/ljSgzuVMIytMAXKdF/vrRrVgH/+I7cxvm9 +HwnhjMN5dF0j4aEt996H2T7cbtzSr2GN9SWGW8Gyu7I8Zx73hgrGUI7gDiJB +Afaff+P6hfkkHSGOItr94dde8J/7AUF4VEwwxdVVPvsNEFyvv6gRIbYtOCa2 +6RE6h1V/QTxW2O7zZgzWALrE2ui0oaYr9QuqQSssd9CdgExLfdPbI+3/ZAnE +v31Idzpk3/6ILiakYHtXkElPXvf46mCNpobty8ysT34irF+fy3C1p3oGwAsx +5VDV9OSFU6z5U+UPbSPYAy9rkc5ZssuIKxCER2oTvZ2L8Q5cfUvEUiJtRGGn +CJlHrVDdp3FssKv2tlKgLkvxJLyoOjuEkj44H1qRk+D02FzmmUT/0sAHAYYx +lTir6mjHeLpcGjn4waUuWIAJyph8SxUexP60bic0L0NBa6Qp5SxxijKsPIDb +FPHxWwfJSDZRrgUyYT7089YFB/ZM4FHyH9TZcnxn0f0xIB7NS6YNDsxzN2zT +EVEYf+De4qT/dQTsdww78Chtcv9JY9r2kDm77dk2MUGHL2j7n8jasbLtgA7h +pn2DMIWLrGamMLWRmlwslolKr1sMV5x8w+5Ias6C33iBMl9phkg42an0gYmc +byVJHvLO/XErtC+GNIJeMg== +=liRq +-----END PGP MESSAGE----- +` + +// A key that contains a persistent AEAD subkey +const keyWithAEADSubkey = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +xVgEYs/4KxYJKwYBBAHaRw8BAQdA7tIsntXluwloh/H62PJMqasjP00M86fv +/Pof9A968q8AAQDYcgkPKUdWAxsDjDHJfouPS4q5Me3ks+umlo5RJdwLZw4k +zQ1TeW1tZXRyaWMgS2V5wowEEBYKAB0FAmLP+CsECwkHCAMVCAoEFgACAQIZ +AQIbAwIeAQAhCRDkNhFDvaU8vxYhBDJNoyEFquVOCf99d+Q2EUO9pTy/5XQA +/1F2YPouv0ydBDJU3EOS/4bmPt7yqvzciWzeKVEOkzYuAP9OsP7q/5ccqOPX +mmRUKwd82/cNjdzdnWZ8Tq89XMwMAMdqBGLP+CtkCfFyZxOMF0BWLwAE8pLy +RVj2n2K7k6VvrhyuTqDkFDUFALiSLrEfnmTKlsPYS3/YzsODF354ccR63q73 +3lmCrvFRyaf6AHvVrBYPbJR+VhuTjZTwZKvPPKv0zVdSqi5JDEQiocJ4BBgW +CAAJBQJiz/grAhsMACEJEOQ2EUO9pTy/FiEEMk2jIQWq5U4J/3135DYRQ72l +PL+fEQEA7RaRbfa+AtiRN7a4GuqVEDZi3qtQZ2/Qcb27/LkAD0sA/3r9drYv +jyu46h1fdHHyo0HS2MiShZDZ8u60JnDltloD +=8TxH +-----END PGP PRIVATE KEY BLOCK----- +` diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go new file mode 100644 index 000000000..925115807 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go @@ -0,0 +1,414 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package s2k implements the various OpenPGP string-to-key transforms as +// specified in RFC 4800 section 3.7.1, and Argon2 specified in +// draft-ietf-openpgp-crypto-refresh-08 section 3.7.1.4. +package s2k // import "github.com/ProtonMail/go-crypto/openpgp/s2k" + +import ( + "crypto" + "hash" + "io" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "golang.org/x/crypto/argon2" +) + +type Mode uint8 + +// Defines the default S2KMode constants +// +// 0 (simple), 1(salted), 3(iterated), 4(argon2) +const ( + SimpleS2K Mode = 0 + SaltedS2K Mode = 1 + IteratedSaltedS2K Mode = 3 + Argon2S2K Mode = 4 + GnuS2K Mode = 101 +) + +const Argon2SaltSize int = 16 + +// Params contains all the parameters of the s2k packet +type Params struct { + // mode is the mode of s2k function. + // It can be 0 (simple), 1(salted), 3(iterated) + // 2(reserved) 100-110(private/experimental). + mode Mode + // hashId is the ID of the hash function used in any of the modes + hashId byte + // salt is a byte array to use as a salt in hashing process or argon2 + saltBytes [Argon2SaltSize]byte + // countByte is used to determine how many rounds of hashing are to + // be performed in s2k mode 3. See RFC 4880 Section 3.7.1.3. + countByte byte + // passes is a parameter in Argon2 to determine the number of iterations + // See RFC the crypto refresh Section 3.7.1.4. + passes byte + // parallelism is a parameter in Argon2 to determine the degree of paralellism + // See RFC the crypto refresh Section 3.7.1.4. + parallelism byte + // memoryExp is a parameter in Argon2 to determine the memory usage + // i.e., 2 ** memoryExp kibibytes + // See RFC the crypto refresh Section 3.7.1.4. + memoryExp byte +} + +// encodeCount converts an iterative "count" in the range 1024 to +// 65011712, inclusive, to an encoded count. The return value is the +// octet that is actually stored in the GPG file. encodeCount panics +// if i is not in the above range (encodedCount above takes care to +// pass i in the correct range). See RFC 4880 Section 3.7.7.1. +func encodeCount(i int) uint8 { + if i < 65536 || i > 65011712 { + panic("count arg i outside the required range") + } + + for encoded := 96; encoded < 256; encoded++ { + count := decodeCount(uint8(encoded)) + if count >= i { + return uint8(encoded) + } + } + + return 255 +} + +// decodeCount returns the s2k mode 3 iterative "count" corresponding to +// the encoded octet c. +func decodeCount(c uint8) int { + return (16 + int(c&15)) << (uint32(c>>4) + 6) +} + +// encodeMemory converts the Argon2 "memory" in the range parallelism*8 to +// 2**31, inclusive, to an encoded memory. The return value is the +// octet that is actually stored in the GPG file. encodeMemory panics +// if is not in the above range +// See OpenPGP crypto refresh Section 3.7.1.4. +func encodeMemory(memory uint32, parallelism uint8) uint8 { + if memory < (8*uint32(parallelism)) || memory > uint32(2147483648) { + panic("Memory argument memory is outside the required range") + } + + for exp := 3; exp < 31; exp++ { + compare := decodeMemory(uint8(exp)) + if compare >= memory { + return uint8(exp) + } + } + + return 31 +} + +// decodeMemory computes the decoded memory in kibibytes as 2**memoryExponent +func decodeMemory(memoryExponent uint8) uint32 { + return uint32(1) << memoryExponent +} + +// Simple writes to out the result of computing the Simple S2K function (RFC +// 4880, section 3.7.1.1) using the given hash and input passphrase. +func Simple(out []byte, h hash.Hash, in []byte) { + Salted(out, h, in, nil) +} + +var zero [1]byte + +// Salted writes to out the result of computing the Salted S2K function (RFC +// 4880, section 3.7.1.2) using the given hash, input passphrase and salt. +func Salted(out []byte, h hash.Hash, in []byte, salt []byte) { + done := 0 + var digest []byte + + for i := 0; done < len(out); i++ { + h.Reset() + for j := 0; j < i; j++ { + h.Write(zero[:]) + } + h.Write(salt) + h.Write(in) + digest = h.Sum(digest[:0]) + n := copy(out[done:], digest) + done += n + } +} + +// Iterated writes to out the result of computing the Iterated and Salted S2K +// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase, +// salt and iteration count. +func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) { + combined := make([]byte, len(in)+len(salt)) + copy(combined, salt) + copy(combined[len(salt):], in) + + if count < len(combined) { + count = len(combined) + } + + done := 0 + var digest []byte + for i := 0; done < len(out); i++ { + h.Reset() + for j := 0; j < i; j++ { + h.Write(zero[:]) + } + written := 0 + for written < count { + if written+len(combined) > count { + todo := count - written + h.Write(combined[:todo]) + written = count + } else { + h.Write(combined) + written += len(combined) + } + } + digest = h.Sum(digest[:0]) + n := copy(out[done:], digest) + done += n + } +} + +// Argon2 writes to out the key derived from the password (in) with the Argon2 +// function (the crypto refresh, section 3.7.1.4) +func Argon2(out []byte, in []byte, salt []byte, passes uint8, paralellism uint8, memoryExp uint8) { + key := argon2.IDKey(in, salt, uint32(passes), decodeMemory(memoryExp), paralellism, uint32(len(out))) + copy(out[:], key) +} + +// Generate generates valid parameters from given configuration. +// It will enforce the Iterated and Salted or Argon2 S2K method. +func Generate(rand io.Reader, c *Config) (*Params, error) { + var params *Params + if c != nil && c.Mode() == Argon2S2K { + // handle Argon2 case + argonConfig := c.Argon2() + params = &Params{ + mode: Argon2S2K, + passes: argonConfig.Passes(), + parallelism: argonConfig.Parallelism(), + memoryExp: argonConfig.EncodedMemory(), + } + } else if c != nil && c.PassphraseIsHighEntropy && c.Mode() == SaltedS2K { // Allow SaltedS2K if PassphraseIsHighEntropy + hashId, ok := algorithm.HashToHashId(c.hash()) + if !ok { + return nil, errors.UnsupportedError("no such hash") + } + + params = &Params{ + mode: SaltedS2K, + hashId: hashId, + } + } else { // Enforce IteratedSaltedS2K method otherwise + hashId, ok := algorithm.HashToHashId(c.hash()) + if !ok { + return nil, errors.UnsupportedError("no such hash") + } + if c != nil { + c.S2KMode = IteratedSaltedS2K + } + params = &Params{ + mode: IteratedSaltedS2K, + hashId: hashId, + countByte: c.EncodedCount(), + } + } + if _, err := io.ReadFull(rand, params.salt()); err != nil { + return nil, err + } + return params, nil +} + +// Parse reads a binary specification for a string-to-key transformation from r +// and returns a function which performs that transform. If the S2K is a special +// GNU extension that indicates that the private key is missing, then the error +// returned is errors.ErrDummyPrivateKey. +func Parse(r io.Reader) (f func(out, in []byte), err error) { + params, err := ParseIntoParams(r) + if err != nil { + return nil, err + } + + return params.Function() +} + +// ParseIntoParams reads a binary specification for a string-to-key +// transformation from r and returns a struct describing the s2k parameters. +func ParseIntoParams(r io.Reader) (params *Params, err error) { + var buf [Argon2SaltSize + 3]byte + + _, err = io.ReadFull(r, buf[:1]) + if err != nil { + return + } + + params = &Params{ + mode: Mode(buf[0]), + } + + switch params.mode { + case SimpleS2K: + _, err = io.ReadFull(r, buf[:1]) + if err != nil { + return nil, err + } + params.hashId = buf[0] + return params, nil + case SaltedS2K: + _, err = io.ReadFull(r, buf[:9]) + if err != nil { + return nil, err + } + params.hashId = buf[0] + copy(params.salt(), buf[1:9]) + return params, nil + case IteratedSaltedS2K: + _, err = io.ReadFull(r, buf[:10]) + if err != nil { + return nil, err + } + params.hashId = buf[0] + copy(params.salt(), buf[1:9]) + params.countByte = buf[9] + return params, nil + case Argon2S2K: + _, err = io.ReadFull(r, buf[:Argon2SaltSize+3]) + if err != nil { + return nil, err + } + copy(params.salt(), buf[:Argon2SaltSize]) + params.passes = buf[Argon2SaltSize] + params.parallelism = buf[Argon2SaltSize+1] + params.memoryExp = buf[Argon2SaltSize+2] + return params, nil + case GnuS2K: + // This is a GNU extension. See + // https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;h=fe55ae16ab4e26d8356dc574c9e8bc935e71aef1;hb=23191d7851eae2217ecdac6484349849a24fd94a#l1109 + if _, err = io.ReadFull(r, buf[:5]); err != nil { + return nil, err + } + params.hashId = buf[0] + if buf[1] == 'G' && buf[2] == 'N' && buf[3] == 'U' && buf[4] == 1 { + return params, nil + } + return nil, errors.UnsupportedError("GNU S2K extension") + } + + return nil, errors.UnsupportedError("S2K function") +} + +func (params *Params) Mode() Mode { + return params.mode +} + +func (params *Params) Dummy() bool { + return params != nil && params.mode == GnuS2K +} + +func (params *Params) salt() []byte { + switch params.mode { + case SaltedS2K, IteratedSaltedS2K: + return params.saltBytes[:8] + case Argon2S2K: + return params.saltBytes[:Argon2SaltSize] + default: + return nil + } +} + +func (params *Params) Function() (f func(out, in []byte), err error) { + if params.Dummy() { + return nil, errors.ErrDummyPrivateKey("dummy key found") + } + var hashObj crypto.Hash + if params.mode != Argon2S2K { + var ok bool + hashObj, ok = algorithm.HashIdToHashWithSha1(params.hashId) + if !ok { + return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(params.hashId))) + } + if !hashObj.Available() { + return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashObj))) + } + } + + switch params.mode { + case SimpleS2K: + f := func(out, in []byte) { + Simple(out, hashObj.New(), in) + } + + return f, nil + case SaltedS2K: + f := func(out, in []byte) { + Salted(out, hashObj.New(), in, params.salt()) + } + + return f, nil + case IteratedSaltedS2K: + f := func(out, in []byte) { + Iterated(out, hashObj.New(), in, params.salt(), decodeCount(params.countByte)) + } + + return f, nil + case Argon2S2K: + f := func(out, in []byte) { + Argon2(out, in, params.salt(), params.passes, params.parallelism, params.memoryExp) + } + return f, nil + } + + return nil, errors.UnsupportedError("S2K function") +} + +func (params *Params) Serialize(w io.Writer) (err error) { + if _, err = w.Write([]byte{uint8(params.mode)}); err != nil { + return + } + if params.mode != Argon2S2K { + if _, err = w.Write([]byte{params.hashId}); err != nil { + return + } + } + if params.Dummy() { + _, err = w.Write(append([]byte("GNU"), 1)) + return + } + if params.mode > 0 { + if _, err = w.Write(params.salt()); err != nil { + return + } + if params.mode == IteratedSaltedS2K { + _, err = w.Write([]byte{params.countByte}) + } + if params.mode == Argon2S2K { + _, err = w.Write([]byte{params.passes, params.parallelism, params.memoryExp}) + } + } + return +} + +// Serialize salts and stretches the given passphrase and writes the +// resulting key into key. It also serializes an S2K descriptor to +// w. The key stretching can be configured with c, which may be +// nil. In that case, sensible defaults will be used. +func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error { + params, err := Generate(rand, c) + if err != nil { + return err + } + err = params.Serialize(w) + if err != nil { + return err + } + + f, err := params.Function() + if err != nil { + return err + } + f(key, passphrase) + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go new file mode 100644 index 000000000..616e0d12c --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go @@ -0,0 +1,26 @@ +package s2k + +// Cache stores keys derived with s2k functions from one passphrase +// to avoid recomputation if multiple items are encrypted with +// the same parameters. +type Cache map[Params][]byte + +// GetOrComputeDerivedKey tries to retrieve the key +// for the given s2k parameters from the cache. +// If there is no hit, it derives the key with the s2k function from the passphrase, +// updates the cache, and returns the key. +func (c *Cache) GetOrComputeDerivedKey(passphrase []byte, params *Params, expectedKeySize int) ([]byte, error) { + key, found := (*c)[*params] + if !found || len(key) != expectedKeySize { + var err error + derivedKey := make([]byte, expectedKeySize) + s2k, err := params.Function() + if err != nil { + return nil, err + } + s2k(derivedKey, passphrase) + (*c)[*params] = key + return derivedKey, nil + } + return key, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go new file mode 100644 index 000000000..b93db1ab8 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go @@ -0,0 +1,129 @@ +package s2k + +import "crypto" + +// Config collects configuration parameters for s2k key-stretching +// transformations. A nil *Config is valid and results in all default +// values. +type Config struct { + // S2K (String to Key) mode, used for key derivation in the context of secret key encryption + // and passphrase-encrypted data. Either s2k.Argon2S2K or s2k.IteratedSaltedS2K may be used. + // If the passphrase is a high-entropy key, indicated by setting PassphraseIsHighEntropy to true, + // s2k.SaltedS2K can also be used. + // Note: Argon2 is the strongest option but not all OpenPGP implementations are compatible with it + //(pending standardisation). + // 0 (simple), 1(salted), 3(iterated), 4(argon2) + // 2(reserved) 100-110(private/experimental). + S2KMode Mode + // Only relevant if S2KMode is not set to s2k.Argon2S2K. + // Hash is the default hash function to be used. If + // nil, SHA256 is used. + Hash crypto.Hash + // Argon2 parameters for S2K (String to Key). + // Only relevant if S2KMode is set to s2k.Argon2S2K. + // If nil, default parameters are used. + // For more details on the choice of parameters, see https://tools.ietf.org/html/rfc9106#section-4. + Argon2Config *Argon2Config + // Only relevant if S2KMode is set to s2k.IteratedSaltedS2K. + // Iteration count for Iterated S2K (String to Key). It + // determines the strength of the passphrase stretching when + // the said passphrase is hashed to produce a key. S2KCount + // should be between 65536 and 65011712, inclusive. If Config + // is nil or S2KCount is 0, the value 16777216 used. Not all + // values in the above range can be represented. S2KCount will + // be rounded up to the next representable value if it cannot + // be encoded exactly. When set, it is strongly encrouraged to + // use a value that is at least 65536. See RFC 4880 Section + // 3.7.1.3. + S2KCount int + // Indicates whether the passphrase passed by the application is a + // high-entropy key (e.g. it's randomly generated or derived from + // another passphrase using a strong key derivation function). + // When true, allows the S2KMode to be s2k.SaltedS2K. + // When the passphrase is not a high-entropy key, using SaltedS2K is + // insecure, and not allowed by draft-ietf-openpgp-crypto-refresh-08. + PassphraseIsHighEntropy bool +} + +// Argon2Config stores the Argon2 parameters +// A nil *Argon2Config is valid and results in all default +type Argon2Config struct { + NumberOfPasses uint8 + DegreeOfParallelism uint8 + // Memory specifies the desired Argon2 memory usage in kibibytes. + // For example memory=64*1024 sets the memory cost to ~64 MB. + Memory uint32 +} + +func (c *Config) Mode() Mode { + if c == nil { + return IteratedSaltedS2K + } + return c.S2KMode +} + +func (c *Config) hash() crypto.Hash { + if c == nil || uint(c.Hash) == 0 { + return crypto.SHA256 + } + + return c.Hash +} + +func (c *Config) Argon2() *Argon2Config { + if c == nil || c.Argon2Config == nil { + return nil + } + return c.Argon2Config +} + +// EncodedCount get encoded count +func (c *Config) EncodedCount() uint8 { + if c == nil || c.S2KCount == 0 { + return 224 // The common case. Corresponding to 16777216 + } + + i := c.S2KCount + + switch { + case i < 65536: + i = 65536 + case i > 65011712: + i = 65011712 + } + + return encodeCount(i) +} + +func (c *Argon2Config) Passes() uint8 { + if c == nil || c.NumberOfPasses == 0 { + return 3 + } + return c.NumberOfPasses +} + +func (c *Argon2Config) Parallelism() uint8 { + if c == nil || c.DegreeOfParallelism == 0 { + return 4 + } + return c.DegreeOfParallelism +} + +func (c *Argon2Config) EncodedMemory() uint8 { + if c == nil || c.Memory == 0 { + return 16 // 64 MiB of RAM + } + + memory := c.Memory + lowerBound := uint32(c.Parallelism()) * 8 + upperBound := uint32(2147483648) + + switch { + case memory < lowerBound: + memory = lowerBound + case memory > upperBound: + memory = upperBound + } + + return encodeMemory(memory, c.Parallelism()) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/symmetric/aead.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/symmetric/aead.go new file mode 100644 index 000000000..044b13943 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/symmetric/aead.go @@ -0,0 +1,76 @@ +package symmetric + +import ( + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "io" +) + +type AEADPublicKey struct { + Cipher algorithm.CipherFunction + BindingHash [32]byte + Key []byte +} + +type AEADPrivateKey struct { + PublicKey AEADPublicKey + HashSeed [32]byte + Key []byte +} + +func AEADGenerateKey(rand io.Reader, cipher algorithm.CipherFunction) (priv *AEADPrivateKey, err error) { + priv, err = generatePrivatePartAEAD(rand, cipher) + if err != nil { + return + } + + priv.generatePublicPartAEAD(cipher) + return +} + +func generatePrivatePartAEAD(rand io.Reader, cipher algorithm.CipherFunction) (priv *AEADPrivateKey, err error) { + priv = new(AEADPrivateKey) + var seed [32] byte + _, err = rand.Read(seed[:]) + if err != nil { + return + } + + key := make([]byte, cipher.KeySize()) + _, err = rand.Read(key) + if err != nil { + return + } + + priv.HashSeed = seed + priv.Key = key + return +} + +func (priv *AEADPrivateKey) generatePublicPartAEAD(cipher algorithm.CipherFunction) (err error) { + priv.PublicKey.Cipher = cipher + + bindingHash := ComputeBindingHash(priv.HashSeed) + + priv.PublicKey.Key = make([]byte, len(priv.Key)) + copy(priv.PublicKey.Key, priv.Key) + copy(priv.PublicKey.BindingHash[:], bindingHash) + return +} + +func (pub *AEADPublicKey) Encrypt(rand io.Reader, data []byte, mode algorithm.AEADMode) (nonce []byte, ciphertext []byte, err error) { + block := pub.Cipher.New(pub.Key) + aead := mode.New(block) + nonce = make([]byte, aead.NonceSize()) + rand.Read(nonce) + ciphertext = aead.Seal(nil, nonce, data, nil) + return +} + +func (priv *AEADPrivateKey) Decrypt(nonce []byte, ciphertext []byte, mode algorithm.AEADMode) (message []byte, err error) { + + block := priv.PublicKey.Cipher.New(priv.Key) + aead := mode.New(block) + message, err = aead.Open(nil, nonce, ciphertext, nil) + return +} + diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/symmetric/hmac.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/symmetric/hmac.go new file mode 100644 index 000000000..fd4a7cbb8 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/symmetric/hmac.go @@ -0,0 +1,109 @@ +package symmetric + +import ( + "crypto" + "crypto/hmac" + "crypto/sha256" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" +) + +type HMACPublicKey struct { + Hash algorithm.Hash + BindingHash [32]byte + // While this is a "public" key, the symmetric key needs to be present here. + // Symmetric cryptographic operations use the same key material for + // signing and verifying, and go-crypto assumes that a public key type will + // be used for verification. Thus, this `Key` field must never be exported + // publicly. + Key []byte +} + +type HMACPrivateKey struct { + PublicKey HMACPublicKey + HashSeed [32]byte + Key []byte +} + +func HMACGenerateKey(rand io.Reader, hash algorithm.Hash) (priv *HMACPrivateKey, err error) { + priv, err = generatePrivatePartHMAC(rand, hash) + if err != nil { + return + } + + priv.generatePublicPartHMAC(hash) + return +} + +func generatePrivatePartHMAC(rand io.Reader, hash algorithm.Hash) (priv *HMACPrivateKey, err error) { + priv = new(HMACPrivateKey) + var seed [32] byte + _, err = rand.Read(seed[:]) + if err != nil { + return + } + + key := make([]byte, hash.Size()) + _, err = rand.Read(key) + if err != nil { + return + } + + priv.HashSeed = seed + priv.Key = key + return +} + +func (priv *HMACPrivateKey) generatePublicPartHMAC(hash algorithm.Hash) (err error) { + priv.PublicKey.Hash = hash + + bindingHash := ComputeBindingHash(priv.HashSeed) + copy(priv.PublicKey.BindingHash[:], bindingHash) + + priv.PublicKey.Key = make([]byte, len(priv.Key)) + copy(priv.PublicKey.Key, priv.Key) + return +} + +func ComputeBindingHash(seed [32]byte) []byte { + bindingHash := sha256.New() + bindingHash.Write(seed[:]) + + return bindingHash.Sum(nil) +} + +func (priv *HMACPrivateKey) Public() crypto.PublicKey { + return &priv.PublicKey +} + +func (priv *HMACPrivateKey) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) (signature []byte, err error) { + expectedMAC, err := calculateMAC(priv.PublicKey.Hash, priv.Key, digest) + if err != nil { + return + } + signature = make([]byte, len(expectedMAC)) + copy(signature, expectedMAC) + return +} + +func (pub *HMACPublicKey) Verify(digest []byte, signature []byte) (bool, error) { + expectedMAC, err := calculateMAC(pub.Hash, pub.Key, digest) + if err != nil { + return false, err + } + return hmac.Equal(expectedMAC, signature), nil +} + +func calculateMAC(hash algorithm.Hash, key []byte, data []byte) ([]byte, error) { + hashFunc := hash.HashFunc() + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + + mac := hmac.New(hashFunc.New, key) + mac.Write(data) + + return mac.Sum(nil), nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go new file mode 100644 index 000000000..b0f6ef7b0 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go @@ -0,0 +1,620 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "crypto" + "hash" + "io" + "strconv" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/armor" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "github.com/ProtonMail/go-crypto/openpgp/packet" +) + +// DetachSign signs message with the private key from signer (which must +// already have been decrypted) and writes the signature to w. +// If config is nil, sensible defaults will be used. +func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return detachSign(w, signer, message, packet.SigTypeBinary, config) +} + +// ArmoredDetachSign signs message with the private key from signer (which +// must already have been decrypted) and writes an armored signature to w. +// If config is nil, sensible defaults will be used. +func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) { + return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config) +} + +// DetachSignText signs message (after canonicalising the line endings) with +// the private key from signer (which must already have been decrypted) and +// writes the signature to w. +// If config is nil, sensible defaults will be used. +func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return detachSign(w, signer, message, packet.SigTypeText, config) +} + +// ArmoredDetachSignText signs message (after canonicalising the line endings) +// with the private key from signer (which must already have been decrypted) +// and writes an armored signature to w. +// If config is nil, sensible defaults will be used. +func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return armoredDetachSign(w, signer, message, packet.SigTypeText, config) +} + +func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { + out, err := armor.Encode(w, SignatureType, nil) + if err != nil { + return + } + err = detachSign(out, signer, message, sigType, config) + if err != nil { + return + } + return out.Close() +} + +func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { + signingKey, ok := signer.SigningKeyById(config.Now(), config.SigningKey()) + if !ok { + return errors.InvalidArgumentError("no valid signing keys") + } + if signingKey.PrivateKey == nil { + return errors.InvalidArgumentError("signing key doesn't have a private key") + } + if signingKey.PrivateKey.Encrypted { + return errors.InvalidArgumentError("signing key is encrypted") + } + if _, ok := algorithm.HashToHashId(config.Hash()); !ok { + return errors.InvalidArgumentError("invalid hash function") + } + + sig := createSignaturePacket(signingKey.PublicKey, sigType, config) + + h, err := sig.PrepareSign(config) + if err != nil { + return + } + wrappedHash, err := wrapHashForSignature(h, sig.SigType) + if err != nil { + return + } + if _, err = io.Copy(wrappedHash, message); err != nil { + return err + } + + err = sig.Sign(h, signingKey.PrivateKey, config) + if err != nil { + return + } + + return sig.Serialize(w) +} + +// FileHints contains metadata about encrypted files. This metadata is, itself, +// encrypted. +type FileHints struct { + // IsBinary can be set to hint that the contents are binary data. + IsBinary bool + // FileName hints at the name of the file that should be written. It's + // truncated to 255 bytes if longer. It may be empty to suggest that the + // file should not be written to disk. It may be equal to "_CONSOLE" to + // suggest the data should not be written to disk. + FileName string + // ModTime contains the modification time of the file, or the zero time if not applicable. + ModTime time.Time +} + +// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase. +// The resulting WriteCloser must be closed after the contents of the file have +// been written. +// If config is nil, sensible defaults will be used. +func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + if hints == nil { + hints = &FileHints{} + } + + key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config) + if err != nil { + return + } + + var w io.WriteCloser + cipherSuite := packet.CipherSuite{ + Cipher: config.Cipher(), + Mode: config.AEAD().Mode(), + } + w, err = packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), config.AEAD() != nil, cipherSuite, key, config) + if err != nil { + return + } + + literalData := w + if algo := config.Compression(); algo != packet.CompressionNone { + var compConfig *packet.CompressionConfig + if config != nil { + compConfig = config.CompressionConfig + } + literalData, err = packet.SerializeCompressed(w, algo, compConfig) + if err != nil { + return + } + } + + var epochSeconds uint32 + if !hints.ModTime.IsZero() { + epochSeconds = uint32(hints.ModTime.Unix()) + } + return packet.SerializeLiteral(literalData, hints.IsBinary, hints.FileName, epochSeconds) +} + +// intersectPreferences mutates and returns a prefix of a that contains only +// the values in the intersection of a and b. The order of a is preserved. +func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) { + var j int + for _, v := range a { + for _, v2 := range b { + if v == v2 { + a[j] = v + j++ + break + } + } + } + + return a[:j] +} + +// intersectPreferences mutates and returns a prefix of a that contains only +// the values in the intersection of a and b. The order of a is preserved. +func intersectCipherSuites(a [][2]uint8, b [][2]uint8) (intersection [][2]uint8) { + var j int + for _, v := range a { + for _, v2 := range b { + if v[0] == v2[0] && v[1] == v2[1] { + a[j] = v + j++ + break + } + } + } + + return a[:j] +} + +func hashToHashId(h crypto.Hash) uint8 { + v, ok := algorithm.HashToHashId(h) + if !ok { + panic("tried to convert unknown hash") + } + return v +} + +// EncryptText encrypts a message to a number of recipients and, optionally, +// signs it. Optional information is contained in 'hints', also encrypted, that +// aids the recipients in processing the message. The resulting WriteCloser +// must be closed after the contents of the file have been written. If config +// is nil, sensible defaults will be used. The signing is done in text mode. +func EncryptText(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + return encrypt(ciphertext, ciphertext, to, signed, hints, packet.SigTypeText, config) +} + +// Encrypt encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + return encrypt(ciphertext, ciphertext, to, signed, hints, packet.SigTypeBinary, config) +} + +// EncryptSplit encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func EncryptSplit(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + return encrypt(keyWriter, dataWriter, to, signed, hints, packet.SigTypeBinary, config) +} + +// EncryptTextSplit encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func EncryptTextSplit(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + return encrypt(keyWriter, dataWriter, to, signed, hints, packet.SigTypeText, config) +} + +// writeAndSign writes the data as a payload package and, optionally, signs +// it. hints contains optional information, that is also encrypted, +// that aids the recipients in processing the message. The resulting +// WriteCloser must be closed after the contents of the file have been +// written. If config is nil, sensible defaults will be used. +func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entity, hints *FileHints, sigType packet.SignatureType, config *packet.Config) (plaintext io.WriteCloser, err error) { + var signer *packet.PrivateKey + if signed != nil { + signKey, ok := signed.SigningKeyById(config.Now(), config.SigningKey()) + if !ok { + return nil, errors.InvalidArgumentError("no valid signing keys") + } + signer = signKey.PrivateKey + if signer == nil { + return nil, errors.InvalidArgumentError("no private key in signing key") + } + if signer.Encrypted { + return nil, errors.InvalidArgumentError("signing key must be decrypted") + } + } + + var hash crypto.Hash + for _, hashId := range candidateHashes { + if h, ok := algorithm.HashIdToHash(hashId); ok && h.Available() { + hash = h + break + } + } + + // If the hash specified by config is a candidate, we'll use that. + if configuredHash := config.Hash(); configuredHash.Available() { + for _, hashId := range candidateHashes { + if h, ok := algorithm.HashIdToHash(hashId); ok && h == configuredHash { + hash = h + break + } + } + } + + if hash == 0 { + hashId := candidateHashes[0] + name, ok := algorithm.HashIdToString(hashId) + if !ok { + name = "#" + strconv.Itoa(int(hashId)) + } + return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)") + } + + var salt []byte + if signer != nil { + var opsVersion = 3 + if signer.Version == 6 { + opsVersion = signer.Version + } + ops := &packet.OnePassSignature{ + Version: opsVersion, + SigType: sigType, + Hash: hash, + PubKeyAlgo: signer.PubKeyAlgo, + KeyId: signer.KeyId, + IsLast: true, + } + if opsVersion == 6 { + ops.KeyFingerprint = signer.Fingerprint + salt, err = packet.SignatureSaltForHash(hash, config.Random()) + if err != nil { + return nil, err + } + ops.Salt = salt + } + if err := ops.Serialize(payload); err != nil { + return nil, err + } + } + + if hints == nil { + hints = &FileHints{} + } + + w := payload + if signer != nil { + // If we need to write a signature packet after the literal + // data then we need to stop literalData from closing + // encryptedData. + w = noOpCloser{w} + + } + var epochSeconds uint32 + if !hints.ModTime.IsZero() { + epochSeconds = uint32(hints.ModTime.Unix()) + } + literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds) + if err != nil { + return nil, err + } + + if signer != nil { + h, wrappedHash, err := hashForSignature(hash, sigType, salt) + if err != nil { + return nil, err + } + metadata := &packet.LiteralData{ + Format: 'u', + FileName: hints.FileName, + Time: epochSeconds, + } + if hints.IsBinary { + metadata.Format = 'b' + } + return signatureWriter{payload, literalData, hash, wrappedHash, h, salt, signer, sigType, config, metadata}, nil + } + return literalData, nil +} + +// encrypt encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, sigType packet.SignatureType, config *packet.Config) (plaintext io.WriteCloser, err error) { + if len(to) == 0 { + return nil, errors.InvalidArgumentError("no encryption recipient provided") + } + + // These are the possible ciphers that we'll use for the message. + candidateCiphers := []uint8{ + uint8(packet.CipherAES256), + uint8(packet.CipherAES128), + } + + // These are the possible hash functions that we'll use for the signature. + candidateHashes := []uint8{ + hashToHashId(crypto.SHA256), + hashToHashId(crypto.SHA384), + hashToHashId(crypto.SHA512), + hashToHashId(crypto.SHA3_256), + hashToHashId(crypto.SHA3_512), + } + + // Prefer GCM if everyone supports it + candidateCipherSuites := [][2]uint8{ + {uint8(packet.CipherAES256), uint8(packet.AEADModeGCM)}, + {uint8(packet.CipherAES256), uint8(packet.AEADModeEAX)}, + {uint8(packet.CipherAES256), uint8(packet.AEADModeOCB)}, + {uint8(packet.CipherAES128), uint8(packet.AEADModeGCM)}, + {uint8(packet.CipherAES128), uint8(packet.AEADModeEAX)}, + {uint8(packet.CipherAES128), uint8(packet.AEADModeOCB)}, + } + + candidateCompression := []uint8{ + uint8(packet.CompressionNone), + uint8(packet.CompressionZIP), + uint8(packet.CompressionZLIB), + } + + encryptKeys := make([]Key, len(to)) + + // AEAD is used only if config enables it and every key supports it + aeadSupported := config.AEAD() != nil + + for i := range to { + var ok bool + encryptKeys[i], ok = to[i].EncryptionKey(config.Now()) + if !ok { + return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no valid encryption keys") + } + + primarySelfSignature, _ := to[i].PrimarySelfSignature() + if primarySelfSignature == nil { + return nil, errors.InvalidArgumentError("entity without a self-signature") + } + + if !primarySelfSignature.SEIPDv2 { + aeadSupported = false + } + + candidateCiphers = intersectPreferences(candidateCiphers, primarySelfSignature.PreferredSymmetric) + candidateHashes = intersectPreferences(candidateHashes, primarySelfSignature.PreferredHash) + candidateCipherSuites = intersectCipherSuites(candidateCipherSuites, primarySelfSignature.PreferredCipherSuites) + candidateCompression = intersectPreferences(candidateCompression, primarySelfSignature.PreferredCompression) + } + + // In the event that the intersection of supported algorithms is empty we use the ones + // labelled as MUST that every implementation supports. + if len(candidateCiphers) == 0 { + // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.3 + candidateCiphers = []uint8{uint8(packet.CipherAES128)} + } + if len(candidateHashes) == 0 { + // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#hash-algos + candidateHashes = []uint8{hashToHashId(crypto.SHA256)} + } + if len(candidateCipherSuites) == 0 { + // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.6 + candidateCipherSuites = [][2]uint8{{uint8(packet.CipherAES128), uint8(packet.AEADModeOCB)}} + } + + cipher := packet.CipherFunction(candidateCiphers[0]) + aeadCipherSuite := packet.CipherSuite{ + Cipher: packet.CipherFunction(candidateCipherSuites[0][0]), + Mode: packet.AEADMode(candidateCipherSuites[0][1]), + } + + // If the cipher specified by config is a candidate, we'll use that. + configuredCipher := config.Cipher() + for _, c := range candidateCiphers { + cipherFunc := packet.CipherFunction(c) + if cipherFunc == configuredCipher { + cipher = cipherFunc + break + } + } + + var symKey []byte + if aeadSupported { + symKey = make([]byte, aeadCipherSuite.Cipher.KeySize()) + } else { + symKey = make([]byte, cipher.KeySize()) + } + + if _, err := io.ReadFull(config.Random(), symKey); err != nil { + return nil, err + } + + for _, key := range encryptKeys { + if err := packet.SerializeEncryptedKeyAEAD(keyWriter, key.PublicKey, cipher, aeadSupported, symKey, config); err != nil { + return nil, err + } + } + + var payload io.WriteCloser + payload, err = packet.SerializeSymmetricallyEncrypted(dataWriter, cipher, aeadSupported, aeadCipherSuite, symKey, config) + if err != nil { + return + } + + payload, err = handleCompression(payload, candidateCompression, config) + if err != nil { + return nil, err + } + + return writeAndSign(payload, candidateHashes, signed, hints, sigType, config) +} + +// Sign signs a message. The resulting WriteCloser must be closed after the +// contents of the file have been written. hints contains optional information +// that aids the recipients in processing the message. +// If config is nil, sensible defaults will be used. +func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Config) (input io.WriteCloser, err error) { + if signed == nil { + return nil, errors.InvalidArgumentError("no signer provided") + } + + // These are the possible hash functions that we'll use for the signature. + candidateHashes := []uint8{ + hashToHashId(crypto.SHA256), + hashToHashId(crypto.SHA384), + hashToHashId(crypto.SHA512), + hashToHashId(crypto.SHA3_256), + hashToHashId(crypto.SHA3_512), + } + defaultHashes := candidateHashes[0:1] + primarySelfSignature, _ := signed.PrimarySelfSignature() + if primarySelfSignature == nil { + return nil, errors.StructuralError("signed entity has no self-signature") + } + preferredHashes := primarySelfSignature.PreferredHash + if len(preferredHashes) == 0 { + preferredHashes = defaultHashes + } + candidateHashes = intersectPreferences(candidateHashes, preferredHashes) + if len(candidateHashes) == 0 { + return nil, errors.StructuralError("cannot sign because signing key shares no common algorithms with candidate hashes") + } + + return writeAndSign(noOpCloser{output}, candidateHashes, signed, hints, packet.SigTypeBinary, config) +} + +// signatureWriter hashes the contents of a message while passing it along to +// literalData. When closed, it closes literalData, writes a signature packet +// to encryptedData and then also closes encryptedData. +type signatureWriter struct { + encryptedData io.WriteCloser + literalData io.WriteCloser + hashType crypto.Hash + wrappedHash hash.Hash + h hash.Hash + salt []byte // v6 only + signer *packet.PrivateKey + sigType packet.SignatureType + config *packet.Config + metadata *packet.LiteralData // V5 signatures protect document metadata +} + +func (s signatureWriter) Write(data []byte) (int, error) { + s.wrappedHash.Write(data) + switch s.sigType { + case packet.SigTypeBinary: + return s.literalData.Write(data) + case packet.SigTypeText: + flag := 0 + return writeCanonical(s.literalData, data, &flag) + } + return 0, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(s.sigType))) +} + +func (s signatureWriter) Close() error { + sig := createSignaturePacket(&s.signer.PublicKey, s.sigType, s.config) + sig.Hash = s.hashType + sig.Metadata = s.metadata + + if err := sig.SetSalt(s.salt); err != nil { + return err + } + + if err := sig.Sign(s.h, s.signer, s.config); err != nil { + return err + } + if err := s.literalData.Close(); err != nil { + return err + } + if err := sig.Serialize(s.encryptedData); err != nil { + return err + } + return s.encryptedData.Close() +} + +func createSignaturePacket(signer *packet.PublicKey, sigType packet.SignatureType, config *packet.Config) *packet.Signature { + sigLifetimeSecs := config.SigLifetime() + return &packet.Signature{ + Version: signer.Version, + SigType: sigType, + PubKeyAlgo: signer.PubKeyAlgo, + Hash: config.Hash(), + CreationTime: config.Now(), + IssuerKeyId: &signer.KeyId, + IssuerFingerprint: signer.Fingerprint, + Notations: config.Notations(), + SigLifetimeSecs: &sigLifetimeSecs, + } +} + +// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. +// TODO: we have two of these in OpenPGP packages alone. This probably needs +// to be promoted somewhere more common. +type noOpCloser struct { + w io.Writer +} + +func (c noOpCloser) Write(data []byte) (n int, err error) { + return c.w.Write(data) +} + +func (c noOpCloser) Close() error { + return nil +} + +func handleCompression(compressed io.WriteCloser, candidateCompression []uint8, config *packet.Config) (data io.WriteCloser, err error) { + data = compressed + confAlgo := config.Compression() + if confAlgo == packet.CompressionNone { + return + } + + // Set algorithm labelled as MUST as fallback + // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.4 + finalAlgo := packet.CompressionNone + // if compression specified by config available we will use it + for _, c := range candidateCompression { + if uint8(confAlgo) == c { + finalAlgo = confAlgo + break + } + } + + if finalAlgo != packet.CompressionNone { + var compConfig *packet.CompressionConfig + if config != nil { + compConfig = config.CompressionConfig + } + data, err = packet.SerializeCompressed(compressed, finalAlgo, compConfig) + if err != nil { + return + } + } + return data, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/x25519/x25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/x25519/x25519.go new file mode 100644 index 000000000..38afcc74f --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/x25519/x25519.go @@ -0,0 +1,221 @@ +package x25519 + +import ( + "crypto/sha256" + "crypto/subtle" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/aes/keywrap" + "github.com/ProtonMail/go-crypto/openpgp/errors" + x25519lib "github.com/cloudflare/circl/dh/x25519" + "golang.org/x/crypto/hkdf" +) + +const ( + hkdfInfo = "OpenPGP X25519" + aes128KeySize = 16 + // The size of a public or private key in bytes. + KeySize = x25519lib.Size +) + +type PublicKey struct { + // Point represents the encoded elliptic curve point of the public key. + Point []byte +} + +type PrivateKey struct { + PublicKey + // Secret represents the secret of the private key. + Secret []byte +} + +// NewPrivateKey creates a new empty private key including the public key. +func NewPrivateKey(key PublicKey) *PrivateKey { + return &PrivateKey{ + PublicKey: key, + } +} + +// Validate validates that the provided public key matches the private key. +func Validate(pk *PrivateKey) (err error) { + var expectedPublicKey, privateKey x25519lib.Key + subtle.ConstantTimeCopy(1, privateKey[:], pk.Secret) + x25519lib.KeyGen(&expectedPublicKey, &privateKey) + if subtle.ConstantTimeCompare(expectedPublicKey[:], pk.PublicKey.Point) == 0 { + return errors.KeyInvalidError("x25519: invalid key") + } + return nil +} + +// GenerateKey generates a new x25519 key pair. +func GenerateKey(rand io.Reader) (*PrivateKey, error) { + var privateKey, publicKey x25519lib.Key + privateKeyOut := new(PrivateKey) + err := generateKey(rand, &privateKey, &publicKey) + if err != nil { + return nil, err + } + privateKeyOut.PublicKey.Point = publicKey[:] + privateKeyOut.Secret = privateKey[:] + return privateKeyOut, nil +} + +func generateKey(rand io.Reader, privateKey *x25519lib.Key, publicKey *x25519lib.Key) error { + maxRounds := 10 + isZero := true + for round := 0; isZero; round++ { + if round == maxRounds { + return errors.InvalidArgumentError("x25519: zero keys only, randomness source might be corrupt") + } + _, err := io.ReadFull(rand, privateKey[:]) + if err != nil { + return err + } + isZero = constantTimeIsZero(privateKey[:]) + } + x25519lib.KeyGen(publicKey, privateKey) + return nil +} + +// Encrypt encrypts a sessionKey with x25519 according to +// the OpenPGP crypto refresh specification section 5.1.6. The function assumes that the +// sessionKey has the correct format and padding according to the specification. +func Encrypt(rand io.Reader, publicKey *PublicKey, sessionKey []byte) (ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, err error) { + var ephemeralPrivate, ephemeralPublic, staticPublic, shared x25519lib.Key + // Check that the input static public key has 32 bytes + if len(publicKey.Point) != KeySize { + err = errors.KeyInvalidError("x25519: the public key has the wrong size") + return + } + copy(staticPublic[:], publicKey.Point) + // Generate ephemeral keyPair + err = generateKey(rand, &ephemeralPrivate, &ephemeralPublic) + if err != nil { + return + } + // Compute shared key + ok := x25519lib.Shared(&shared, &ephemeralPrivate, &staticPublic) + if !ok { + err = errors.KeyInvalidError("x25519: the public key is a low order point") + return + } + // Derive the encryption key from the shared secret + encryptionKey := applyHKDF(ephemeralPublic[:], publicKey.Point[:], shared[:]) + ephemeralPublicKey = &PublicKey{ + Point: ephemeralPublic[:], + } + // Encrypt the sessionKey with aes key wrapping + encryptedSessionKey, err = keywrap.Wrap(encryptionKey, sessionKey) + return +} + +// Decrypt decrypts a session key stored in ciphertext with the provided x25519 +// private key and ephemeral public key. +func Decrypt(privateKey *PrivateKey, ephemeralPublicKey *PublicKey, ciphertext []byte) (encodedSessionKey []byte, err error) { + var ephemeralPublic, staticPrivate, shared x25519lib.Key + // Check that the input ephemeral public key has 32 bytes + if len(ephemeralPublicKey.Point) != KeySize { + err = errors.KeyInvalidError("x25519: the public key has the wrong size") + return + } + copy(ephemeralPublic[:], ephemeralPublicKey.Point) + subtle.ConstantTimeCopy(1, staticPrivate[:], privateKey.Secret) + // Compute shared key + ok := x25519lib.Shared(&shared, &staticPrivate, &ephemeralPublic) + if !ok { + err = errors.KeyInvalidError("x25519: the ephemeral public key is a low order point") + return + } + // Derive the encryption key from the shared secret + encryptionKey := applyHKDF(ephemeralPublicKey.Point[:], privateKey.PublicKey.Point[:], shared[:]) + // Decrypt the session key with aes key wrapping + encodedSessionKey, err = keywrap.Unwrap(encryptionKey, ciphertext) + return +} + +func applyHKDF(ephemeralPublicKey []byte, publicKey []byte, sharedSecret []byte) []byte { + inputKey := make([]byte, 3*KeySize) + // ephemeral public key | recipient public key | shared secret + subtle.ConstantTimeCopy(1, inputKey[:KeySize], ephemeralPublicKey) + subtle.ConstantTimeCopy(1, inputKey[KeySize:2*KeySize], publicKey) + subtle.ConstantTimeCopy(1, inputKey[2*KeySize:], sharedSecret) + hkdfReader := hkdf.New(sha256.New, inputKey, []byte{}, []byte(hkdfInfo)) + encryptionKey := make([]byte, aes128KeySize) + _, _ = io.ReadFull(hkdfReader, encryptionKey) + return encryptionKey +} + +func constantTimeIsZero(bytes []byte) bool { + isZero := byte(0) + for _, b := range bytes { + isZero |= b + } + return isZero == 0 +} + +// ENCODING/DECODING ciphertexts: + +// EncodeFieldsLength returns the length of the ciphertext encoding +// given the encrypted session key. +func EncodedFieldsLength(encryptedSessionKey []byte, v6 bool) int { + lenCipherFunction := 0 + if !v6 { + lenCipherFunction = 1 + } + return KeySize + 1 + len(encryptedSessionKey) + lenCipherFunction +} + +// EncodeField encodes x25519 session key encryption fields as +// ephemeral x25519 public key | follow byte length | cipherFunction (v3 only) | encryptedSessionKey +// and writes it to writer. +func EncodeFields(writer io.Writer, ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, cipherFunction byte, v6 bool) (err error) { + lenAlgorithm := 0 + if !v6 { + lenAlgorithm = 1 + } + if _, err = writer.Write(ephemeralPublicKey.Point); err != nil { + return err + } + if _, err = writer.Write([]byte{byte(len(encryptedSessionKey) + lenAlgorithm)}); err != nil { + return err + } + if !v6 { + if _, err = writer.Write([]byte{cipherFunction}); err != nil { + return err + } + } + _, err = writer.Write(encryptedSessionKey) + return err +} + +// DecodeField decodes a x25519 session key encryption as +// ephemeral x25519 public key | follow byte length | cipherFunction (v3 only) | encryptedSessionKey. +func DecodeFields(reader io.Reader, v6 bool) (ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, cipherFunction byte, err error) { + var buf [1]byte + ephemeralPublicKey = &PublicKey{ + Point: make([]byte, KeySize), + } + // 32 octets representing an ephemeral x25519 public key. + if _, err = io.ReadFull(reader, ephemeralPublicKey.Point); err != nil { + return nil, nil, 0, err + } + // A one-octet size of the following fields. + if _, err = io.ReadFull(reader, buf[:]); err != nil { + return nil, nil, 0, err + } + followingLen := buf[0] + // The one-octet algorithm identifier, if it was passed (in the case of a v3 PKESK packet). + if !v6 { + if _, err = io.ReadFull(reader, buf[:]); err != nil { + return nil, nil, 0, err + } + cipherFunction = buf[0] + followingLen -= 1 + } + // The encrypted session key. + encryptedSessionKey = make([]byte, followingLen) + if _, err = io.ReadFull(reader, encryptedSessionKey); err != nil { + return nil, nil, 0, err + } + return ephemeralPublicKey, encryptedSessionKey, cipherFunction, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/x448/x448.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/x448/x448.go new file mode 100644 index 000000000..65a082dab --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/x448/x448.go @@ -0,0 +1,229 @@ +package x448 + +import ( + "crypto/sha512" + "crypto/subtle" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/aes/keywrap" + "github.com/ProtonMail/go-crypto/openpgp/errors" + x448lib "github.com/cloudflare/circl/dh/x448" + "golang.org/x/crypto/hkdf" +) + +const ( + hkdfInfo = "OpenPGP X448" + aes256KeySize = 32 + // The size of a public or private key in bytes. + KeySize = x448lib.Size +) + +type PublicKey struct { + // Point represents the encoded elliptic curve point of the public key. + Point []byte +} + +type PrivateKey struct { + PublicKey + // Secret represents the secret of the private key. + Secret []byte +} + +// NewPrivateKey creates a new empty private key including the public key. +func NewPrivateKey(key PublicKey) *PrivateKey { + return &PrivateKey{ + PublicKey: key, + } +} + +// Validate validates that the provided public key matches +// the private key. +func Validate(pk *PrivateKey) (err error) { + var expectedPublicKey, privateKey x448lib.Key + subtle.ConstantTimeCopy(1, privateKey[:], pk.Secret) + x448lib.KeyGen(&expectedPublicKey, &privateKey) + if subtle.ConstantTimeCompare(expectedPublicKey[:], pk.PublicKey.Point) == 0 { + return errors.KeyInvalidError("x448: invalid key") + } + return nil +} + +// GenerateKey generates a new x448 key pair. +func GenerateKey(rand io.Reader) (*PrivateKey, error) { + var privateKey, publicKey x448lib.Key + privateKeyOut := new(PrivateKey) + err := generateKey(rand, &privateKey, &publicKey) + if err != nil { + return nil, err + } + privateKeyOut.PublicKey.Point = publicKey[:] + privateKeyOut.Secret = privateKey[:] + return privateKeyOut, nil +} + +func generateKey(rand io.Reader, privateKey *x448lib.Key, publicKey *x448lib.Key) error { + maxRounds := 10 + isZero := true + for round := 0; isZero; round++ { + if round == maxRounds { + return errors.InvalidArgumentError("x448: zero keys only, randomness source might be corrupt") + } + _, err := io.ReadFull(rand, privateKey[:]) + if err != nil { + return err + } + isZero = constantTimeIsZero(privateKey[:]) + } + x448lib.KeyGen(publicKey, privateKey) + return nil +} + +// Encrypt encrypts a sessionKey with x448 according to +// the OpenPGP crypto refresh specification section 5.1.7. The function assumes that the +// sessionKey has the correct format and padding according to the specification. +func Encrypt(rand io.Reader, publicKey *PublicKey, sessionKey []byte) (ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, err error) { + var ephemeralPrivate, ephemeralPublic, staticPublic, shared x448lib.Key + // Check that the input static public key has 56 bytes. + if len(publicKey.Point) != KeySize { + err = errors.KeyInvalidError("x448: the public key has the wrong size") + return nil, nil, err + } + copy(staticPublic[:], publicKey.Point) + // Generate ephemeral keyPair. + if err = generateKey(rand, &ephemeralPrivate, &ephemeralPublic); err != nil { + return nil, nil, err + } + // Compute shared key. + ok := x448lib.Shared(&shared, &ephemeralPrivate, &staticPublic) + if !ok { + err = errors.KeyInvalidError("x448: the public key is a low order point") + return nil, nil, err + } + // Derive the encryption key from the shared secret. + encryptionKey := applyHKDF(ephemeralPublic[:], publicKey.Point[:], shared[:]) + ephemeralPublicKey = &PublicKey{ + Point: ephemeralPublic[:], + } + // Encrypt the sessionKey with aes key wrapping. + encryptedSessionKey, err = keywrap.Wrap(encryptionKey, sessionKey) + if err != nil { + return nil, nil, err + } + return ephemeralPublicKey, encryptedSessionKey, nil +} + +// Decrypt decrypts a session key stored in ciphertext with the provided x448 +// private key and ephemeral public key. +func Decrypt(privateKey *PrivateKey, ephemeralPublicKey *PublicKey, ciphertext []byte) (encodedSessionKey []byte, err error) { + var ephemeralPublic, staticPrivate, shared x448lib.Key + // Check that the input ephemeral public key has 56 bytes. + if len(ephemeralPublicKey.Point) != KeySize { + err = errors.KeyInvalidError("x448: the public key has the wrong size") + return nil, err + } + copy(ephemeralPublic[:], ephemeralPublicKey.Point) + subtle.ConstantTimeCopy(1, staticPrivate[:], privateKey.Secret) + // Compute shared key. + ok := x448lib.Shared(&shared, &staticPrivate, &ephemeralPublic) + if !ok { + err = errors.KeyInvalidError("x448: the ephemeral public key is a low order point") + return nil, err + } + // Derive the encryption key from the shared secret. + encryptionKey := applyHKDF(ephemeralPublicKey.Point[:], privateKey.PublicKey.Point[:], shared[:]) + // Decrypt the session key with aes key wrapping. + encodedSessionKey, err = keywrap.Unwrap(encryptionKey, ciphertext) + if err != nil { + return nil, err + } + return encodedSessionKey, nil +} + +func applyHKDF(ephemeralPublicKey []byte, publicKey []byte, sharedSecret []byte) []byte { + inputKey := make([]byte, 3*KeySize) + // ephemeral public key | recipient public key | shared secret. + subtle.ConstantTimeCopy(1, inputKey[:KeySize], ephemeralPublicKey) + subtle.ConstantTimeCopy(1, inputKey[KeySize:2*KeySize], publicKey) + subtle.ConstantTimeCopy(1, inputKey[2*KeySize:], sharedSecret) + hkdfReader := hkdf.New(sha512.New, inputKey, []byte{}, []byte(hkdfInfo)) + encryptionKey := make([]byte, aes256KeySize) + _, _ = io.ReadFull(hkdfReader, encryptionKey) + return encryptionKey +} + +func constantTimeIsZero(bytes []byte) bool { + isZero := byte(0) + for _, b := range bytes { + isZero |= b + } + return isZero == 0 +} + +// ENCODING/DECODING ciphertexts: + +// EncodeFieldsLength returns the length of the ciphertext encoding +// given the encrypted session key. +func EncodedFieldsLength(encryptedSessionKey []byte, v6 bool) int { + lenCipherFunction := 0 + if !v6 { + lenCipherFunction = 1 + } + return KeySize + 1 + len(encryptedSessionKey) + lenCipherFunction +} + +// EncodeField encodes x448 session key encryption fields as +// ephemeral x448 public key | follow byte length | cipherFunction (v3 only) | encryptedSessionKey +// and writes it to writer. +func EncodeFields(writer io.Writer, ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, cipherFunction byte, v6 bool) (err error) { + lenAlgorithm := 0 + if !v6 { + lenAlgorithm = 1 + } + if _, err = writer.Write(ephemeralPublicKey.Point); err != nil { + return err + } + if _, err = writer.Write([]byte{byte(len(encryptedSessionKey) + lenAlgorithm)}); err != nil { + return err + } + if !v6 { + if _, err = writer.Write([]byte{cipherFunction}); err != nil { + return err + } + } + if _, err = writer.Write(encryptedSessionKey); err != nil { + return err + } + return nil +} + +// DecodeField decodes a x448 session key encryption as +// ephemeral x448 public key | follow byte length | cipherFunction (v3 only) | encryptedSessionKey. +func DecodeFields(reader io.Reader, v6 bool) (ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, cipherFunction byte, err error) { + var buf [1]byte + ephemeralPublicKey = &PublicKey{ + Point: make([]byte, KeySize), + } + // 56 octets representing an ephemeral x448 public key. + if _, err = io.ReadFull(reader, ephemeralPublicKey.Point); err != nil { + return nil, nil, 0, err + } + // A one-octet size of the following fields. + if _, err = io.ReadFull(reader, buf[:]); err != nil { + return nil, nil, 0, err + } + followingLen := buf[0] + // The one-octet algorithm identifier, if it was passed (in the case of a v3 PKESK packet). + if !v6 { + if _, err = io.ReadFull(reader, buf[:]); err != nil { + return nil, nil, 0, err + } + cipherFunction = buf[0] + followingLen -= 1 + } + // The encrypted session key. + encryptedSessionKey = make([]byte, followingLen) + if _, err = io.ReadFull(reader, encryptedSessionKey); err != nil { + return nil, nil, 0, err + } + return ephemeralPublicKey, encryptedSessionKey, cipherFunction, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md new file mode 100644 index 000000000..a967b5079 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md @@ -0,0 +1,574 @@ +# v1.36.3 (2024-09-27) + +* No change notes available for this release. + +# v1.36.2 (2024-09-25) + +* No change notes available for this release. + +# v1.36.1 (2024-09-23) + +* No change notes available for this release. + +# v1.36.0 (2024-09-20) + +* **Feature**: Add tracing and metrics support to service clients. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.35.8 (2024-09-17) + +* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution. + +# v1.35.7 (2024-09-04) + +* No change notes available for this release. + +# v1.35.6 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.35.5 (2024-08-22) + +* No change notes available for this release. + +# v1.35.4 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.35.3 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.35.2 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.35.1 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.35.0 (2024-06-26) + +* **Feature**: Support list-of-string endpoint parameter. + +# v1.34.1 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.34.0 (2024-06-18) + +* **Feature**: Track usage of various AWS SDK features in user-agent string. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.33.1 (2024-06-17) + +* **Documentation**: Updating SDK example for KMS DeriveSharedSecret API. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.33.0 (2024-06-13) + +* **Feature**: This feature allows customers to use their keys stored in KMS to derive a shared secret which can then be used to establish a secured channel for communication, provide proof of possession, or establish trust with other parties. + +# v1.32.3 (2024-06-07) + +* **Bug Fix**: Add clock skew correction on all service clients +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.2 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.1 (2024-05-23) + +* No change notes available for this release. + +# v1.32.0 (2024-05-22) + +* **Feature**: This release includes feature to import customer's asymmetric (RSA, ECC and SM2) and HMAC keys into KMS in China. + +# v1.31.3 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.31.2 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.31.1 (2024-05-08) + +* **Bug Fix**: GoDoc improvement + +# v1.31.0 (2024-04-12) + +* **Feature**: This feature supports the ability to specify a custom rotation period for automatic key rotations, the ability to perform on-demand key rotations, and visibility into your key material rotations. + +# v1.30.1 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.0 (2024-03-18) + +* **Feature**: Adds the ability to use the default policy name by omitting the policyName parameter in calls to PutKeyPolicy and GetKeyPolicy +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.2 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.1 (2024-02-23) + +* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.0 (2024-02-22) + +* **Feature**: Add middleware stack snapshot tests. + +# v1.28.3 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.2 (2024-02-20) + +* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure. + +# v1.28.1 (2024-02-15) + +* **Bug Fix**: Correct failure to determine the error type in awsJson services that could occur when errors were modeled with a non-string `code` field. + +# v1.28.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.9 (2024-01-05) + +* **Documentation**: Documentation updates for AWS Key Management Service (KMS). + +# v1.27.8 (2024-01-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.7 (2023-12-20) + +* No change notes available for this release. + +# v1.27.6 (2023-12-15) + +* **Documentation**: Documentation updates for AWS Key Management Service + +# v1.27.5 (2023-12-08) + +* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein. + +# v1.27.4 (2023-12-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.3 (2023-12-06) + +* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously. + +# v1.27.2 (2023-12-01) + +* **Bug Fix**: Correct wrapping of errors in authentication workflow. +* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.1 (2023-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.0 (2023-11-29) + +* **Feature**: Expose Options() accessor on service clients. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.5 (2023-11-28.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.4 (2023-11-28) + +* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction. + +# v1.26.3 (2023-11-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.2 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.1 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.7 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.6 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.5 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.4 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.3 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.2 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.1 (2023-08-01) + +* No change notes available for this release. + +# v1.24.0 (2023-07-31) + +* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.2 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.1 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.0 (2023-07-05) + +* **Feature**: Added Dry Run Feature to cryptographic and cross-account mutating KMS APIs (14 in all). This feature allows users to test their permissions and parameters before making the actual API call. + +# v1.22.2 (2023-06-15) + +* No change notes available for this release. + +# v1.22.1 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.0 (2023-06-05) + +* **Feature**: This release includes feature to import customer's asymmetric (RSA and ECC) and HMAC keys into KMS. It also includes feature to allow customers to specify number of days to schedule a KMS key deletion as a policy condition key. + +# v1.21.1 (2023-05-04) + +* No change notes available for this release. + +# v1.21.0 (2023-05-01) + +* **Feature**: This release makes the NitroEnclave request parameter Recipient and the response field for CiphertextForRecipient available in AWS SDKs. It also adds the regex pattern for CloudHsmClusterId validation. + +# v1.20.12 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.11 (2023-04-20) + +* No change notes available for this release. + +# v1.20.10 (2023-04-10) + +* No change notes available for this release. + +# v1.20.9 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.8 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.7 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.6 (2023-02-28) + +* **Documentation**: AWS KMS is deprecating the RSAES_PKCS1_V1_5 wrapping algorithm option in the GetParametersForImport API that is used in the AWS KMS Import Key Material feature. AWS KMS will end support for this wrapping algorithm by October 1, 2023. + +# v1.20.5 (2023-02-22) + +* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. + +# v1.20.4 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.3 (2023-02-15) + +* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. +* **Bug Fix**: Correct error type parsing for restJson services. + +# v1.20.2 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.1 (2023-01-23) + +* No change notes available for this release. + +# v1.20.0 (2023-01-05) + +* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + +# v1.19.4 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.3 (2022-12-14) + +* No change notes available for this release. + +# v1.19.2 (2022-12-07) + +* **Documentation**: Updated examples and exceptions for External Key Store (XKS). + +# v1.19.1 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.0 (2022-11-29.2) + +* **Feature**: AWS KMS introduces the External Key Store (XKS), a new feature for customers who want to protect their data with encryption keys stored in an external key management system under their control. + +# v1.18.18 (2022-11-22) + +* No change notes available for this release. + +# v1.18.17 (2022-11-16) + +* No change notes available for this release. + +# v1.18.16 (2022-11-10) + +* No change notes available for this release. + +# v1.18.15 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.14 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.13 (2022-10-20) + +* No change notes available for this release. + +# v1.18.12 (2022-10-13) + +* No change notes available for this release. + +# v1.18.11 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.10 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.9 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.8 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.7 (2022-08-30) + +* No change notes available for this release. + +# v1.18.6 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.5 (2022-08-22) + +* No change notes available for this release. + +# v1.18.4 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.3 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.2 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.1 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.0 (2022-07-18) + +* **Feature**: Added support for the SM2 KeySpec in China Partition Regions + +# v1.17.5 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.4 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.3 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.2 (2022-05-17) + +* **Documentation**: Add HMAC best practice tip, annual rotation of AWS managed keys. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.1 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.0 (2022-04-19) + +* **Feature**: Adds support for KMS keys and APIs that generate and verify HMAC codes + +# v1.16.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.0 (2022-02-24) + +* **Feature**: API client updated +* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2021-12-21) + +* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens. +* **Feature**: Updated to latest service endpoints + +# v1.11.1 (2021-12-02) + +* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2021-11-19) + +* **Feature**: API client updated +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2021-11-12) + +* **Feature**: Service clients now support custom endpoints that have an initial URI path defined. + +# v1.9.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Feature**: Updated service to latest API model. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2021-10-21) + +* **Feature**: API client updated +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-10-11) + +* **Feature**: API client updated +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.1 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-09-02) + +* **Feature**: API client updated + +# v1.5.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-06-25) + +* **Feature**: API client updated +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2021-06-04) + +* No change notes available for this release. + +# v1.3.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go new file mode 100644 index 000000000..c5e6b2a2e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go @@ -0,0 +1,911 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" + smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" + smithydocument "github.com/aws/smithy-go/document" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net" + "net/http" + "sync/atomic" + "time" +) + +const ServiceID = "KMS" +const ServiceAPIVersion = "2014-11-01" + +type operationMetrics struct { + Duration metrics.Float64Histogram + SerializeDuration metrics.Float64Histogram + ResolveIdentityDuration metrics.Float64Histogram + ResolveEndpointDuration metrics.Float64Histogram + SignRequestDuration metrics.Float64Histogram + DeserializeDuration metrics.Float64Histogram +} + +func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram { + switch name { + case "client.call.duration": + return m.Duration + case "client.call.serialization_duration": + return m.SerializeDuration + case "client.call.resolve_identity_duration": + return m.ResolveIdentityDuration + case "client.call.resolve_endpoint_duration": + return m.ResolveEndpointDuration + case "client.call.signing_duration": + return m.SignRequestDuration + case "client.call.deserialization_duration": + return m.DeserializeDuration + default: + panic("unrecognized operation metric") + } +} + +func timeOperationMetric[T any]( + ctx context.Context, metric string, fn func() (T, error), + opts ...metrics.RecordMetricOption, +) (T, error) { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + start := time.Now() + v, err := fn() + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + return v, err +} + +func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + var ended bool + start := time.Now() + return func() { + if ended { + return + } + ended = true + + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + } +} + +func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { + return func(o *metrics.RecordMetricOptions) { + o.Properties.Set("rpc.service", middleware.GetServiceID(ctx)) + o.Properties.Set("rpc.method", middleware.GetOperationName(ctx)) + } +} + +type operationMetricsKey struct{} + +func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) { + meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/kms") + om := &operationMetrics{} + + var err error + + om.Duration, err = operationMetricTimer(meter, "client.call.duration", + "Overall call duration (including retries and time to send or receive request and response body)") + if err != nil { + return nil, err + } + om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration", + "The time it takes to serialize a message body") + if err != nil { + return nil, err + } + om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration", + "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider") + if err != nil { + return nil, err + } + om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration", + "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request") + if err != nil { + return nil, err + } + om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration", + "The time it takes to sign a request") + if err != nil { + return nil, err + } + om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration", + "The time it takes to deserialize a message body") + if err != nil { + return nil, err + } + + return context.WithValue(parent, operationMetricsKey{}, om), nil +} + +func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) { + return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) { + o.UnitLabel = "s" + o.Description = desc + }) +} + +func getOperationMetrics(ctx context.Context) *operationMetrics { + return ctx.Value(operationMetricsKey{}).(*operationMetrics) +} + +func operationTracer(p tracing.TracerProvider) tracing.Tracer { + return p.Tracer("github.com/aws/aws-sdk-go-v2/service/kms") +} + +// Client provides the API client to make operations call for AWS Key Management +// Service. +type Client struct { + options Options + + // Difference between the time reported by the server and the client + timeOffset *atomic.Int64 +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + + setResolvedDefaultsMode(&options) + + resolveRetryer(&options) + + resolveHTTPClient(&options) + + resolveHTTPSignerV4(&options) + + resolveEndpointResolverV2(&options) + + resolveMeterProvider(&options) + + resolveTracerProvider(&options) + + resolveAuthSchemeResolver(&options) + + for _, fn := range optFns { + fn(&options) + } + + finalizeRetryMaxAttempts(&options) + + ignoreAnonymousAuth(&options) + + wrapWithAnonymousAuth(&options) + + resolveAuthSchemes(&options) + + client := &Client{ + options: options, + } + + initializeTimeOffsetResolver(client) + + return client +} + +// Options returns a copy of the client configuration. +// +// Callers SHOULD NOT perform mutations on any inner structures within client +// config. Config overrides should instead be made on a per-operation basis through +// functional options. +func (c *Client) Options() Options { + return c.options.Copy() +} + +func (c *Client) invokeOperation( + ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error, +) ( + result interface{}, metadata middleware.Metadata, err error, +) { + ctx = middleware.ClearStackValues(ctx) + ctx = middleware.WithServiceID(ctx, ServiceID) + ctx = middleware.WithOperationName(ctx, opID) + + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + + for _, fn := range optFns { + fn(&options) + } + + finalizeOperationRetryMaxAttempts(&options, *c) + + finalizeClientEndpointResolverOptions(&options) + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + ctx, err = withOperationMetrics(ctx, options.MeterProvider) + if err != nil { + return nil, metadata, err + } + + tracer := operationTracer(options.TracerProvider) + spanName := fmt.Sprintf("%s.%s", ServiceID, opID) + + ctx = tracing.WithOperationTracer(ctx, tracer) + + ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) { + o.Kind = tracing.SpanKindClient + o.Properties.Set("rpc.system", "aws-api") + o.Properties.Set("rpc.method", opID) + o.Properties.Set("rpc.service", ServiceID) + }) + endTimer := startMetricTimer(ctx, "client.call.duration") + defer endTimer() + defer span.End() + + handler := smithyhttp.NewClientHandler(options.HTTPClient) + decorated := middleware.DecorateHandler(handler, stack) + result, metadata, err = decorated.Handle(ctx, params) + if err != nil { + span.SetProperty("exception.type", fmt.Sprintf("%T", err)) + span.SetProperty("exception.message", err.Error()) + + var aerr smithy.APIError + if errors.As(err, &aerr) { + span.SetProperty("api.error_code", aerr.ErrorCode()) + span.SetProperty("api.error_message", aerr.ErrorMessage()) + span.SetProperty("api.error_fault", aerr.ErrorFault().String()) + } + + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + + span.SetProperty("error", err != nil) + if err == nil { + span.SetStatus(tracing.SpanStatusOK) + } else { + span.SetStatus(tracing.SpanStatusError) + } + + return result, metadata, err +} + +type operationInputKey struct{} + +func setOperationInput(ctx context.Context, input interface{}) context.Context { + return middleware.WithStackValue(ctx, operationInputKey{}, input) +} + +func getOperationInput(ctx context.Context) interface{} { + return middleware.GetStackValue(ctx, operationInputKey{}) +} + +type setOperationInputMiddleware struct { +} + +func (*setOperationInputMiddleware) ID() string { + return "setOperationInput" +} + +func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + ctx = setOperationInput(ctx, in.Parameters) + return next.HandleSerialize(ctx, in) +} + +func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error { + if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil { + return fmt.Errorf("add ResolveAuthScheme: %w", err) + } + if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil { + return fmt.Errorf("add GetIdentity: %v", err) + } + if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { + return fmt.Errorf("add ResolveEndpointV2: %v", err) + } + if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil { + return fmt.Errorf("add Signing: %w", err) + } + return nil +} +func resolveAuthSchemeResolver(options *Options) { + if options.AuthSchemeResolver == nil { + options.AuthSchemeResolver = &defaultAuthSchemeResolver{} + } +} + +func resolveAuthSchemes(options *Options) { + if options.AuthSchemes == nil { + options.AuthSchemes = []smithyhttp.AuthScheme{ + internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{ + Signer: options.HTTPSignerV4, + Logger: options.Logger, + LogSigning: options.ClientLogMode.IsSigning(), + }), + } + } +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +type legacyEndpointContextSetter struct { + LegacyResolver EndpointResolver +} + +func (*legacyEndpointContextSetter) ID() string { + return "legacyEndpointContextSetter" +} + +func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.LegacyResolver != nil { + ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true) + } + + return next.HandleInitialize(ctx, in) + +} +func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error { + return stack.Initialize.Add(&legacyEndpointContextSetter{ + LegacyResolver: o.EndpointResolver, + }, middleware.Before) +} + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +func setResolvedDefaultsMode(o *Options) { + if len(o.resolvedDefaultsMode) > 0 { + return + } + + var mode aws.DefaultsMode + mode.SetFromString(string(o.DefaultsMode)) + + if mode == aws.DefaultsModeAuto { + mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) + } + + o.resolvedDefaultsMode = mode +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSRetryMaxAttempts(cfg, &opts) + resolveAWSRetryMode(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + resolveUseDualStackEndpoint(cfg, &opts) + resolveUseFIPSEndpoint(cfg, &opts) + resolveBaseEndpoint(cfg, &opts) + return New(opts, optFns...) +} + +func resolveHTTPClient(o *Options) { + var buildable *awshttp.BuildableClient + + if o.HTTPClient != nil { + var ok bool + buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return + } + } else { + buildable = awshttp.NewBuildableClient() + } + + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { + if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { + dialer.Timeout = dialerTimeout + } + }) + + buildable = buildable.WithTransportOptions(func(transport *http.Transport) { + if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { + transport.TLSHandshakeTimeout = tlsHandshakeTimeout + } + }) + } + + o.HTTPClient = buildable +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + + if len(o.RetryMode) == 0 { + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + o.RetryMode = modeConfig.RetryMode + } + } + if len(o.RetryMode) == 0 { + o.RetryMode = aws.RetryModeStandard + } + + var standardOptions []func(*retry.StandardOptions) + if v := o.RetryMaxAttempts; v != 0 { + standardOptions = append(standardOptions, func(so *retry.StandardOptions) { + so.MaxAttempts = v + }) + } + + switch o.RetryMode { + case aws.RetryModeAdaptive: + var adaptiveOptions []func(*retry.AdaptiveModeOptions) + if len(standardOptions) != 0 { + adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { + ao.StandardOptions = append(ao.StandardOptions, standardOptions...) + }) + } + o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) + + default: + o.Retryer = retry.NewStandard(standardOptions...) + } +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSRetryMode(cfg aws.Config, o *Options) { + if len(cfg.RetryMode) == 0 { + return + } + o.RetryMode = cfg.RetryMode +} +func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { + if cfg.RetryMaxAttempts == 0 { + return + } + o.RetryMaxAttempts = cfg.RetryMaxAttempts +} + +func finalizeRetryMaxAttempts(o *Options) { + if o.RetryMaxAttempts == 0 { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func finalizeOperationRetryMaxAttempts(o *Options, client Client) { + if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions) +} + +func addClientUserAgent(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "kms", goModuleVersion) + if len(options.AppID) > 0 { + ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID) + } + + return nil +} + +func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) { + id := (*awsmiddleware.RequestUserAgent)(nil).ID() + mw, ok := stack.Build.Get(id) + if !ok { + mw = awsmiddleware.NewRequestUserAgent() + if err := stack.Build.Add(mw, middleware.After); err != nil { + return nil, err + } + } + + ua, ok := mw.(*awsmiddleware.RequestUserAgent) + if !ok { + return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id) + } + + return ua, nil +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} + +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + }) +} + +func addClientRequestID(stack *middleware.Stack) error { + return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After) +} + +func addComputeContentLength(stack *middleware.Stack) error { + return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After) +} + +func addRawResponseToMetadata(stack *middleware.Stack) error { + return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before) +} + +func addRecordResponseTiming(stack *middleware.Stack) error { + return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) +} + +func addSpanRetryLoop(stack *middleware.Stack, options Options) error { + return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before) +} + +type spanRetryLoop struct { + options Options +} + +func (*spanRetryLoop) ID() string { + return "spanRetryLoop" +} + +func (m *spanRetryLoop) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + middleware.FinalizeOutput, middleware.Metadata, error, +) { + tracer := operationTracer(m.options.TracerProvider) + ctx, span := tracer.StartSpan(ctx, "RetryLoop") + defer span.End() + + return next.HandleFinalize(ctx, in) +} +func addStreamingEventsPayload(stack *middleware.Stack) error { + return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) +} + +func addUnsignedPayload(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After) +} + +func addComputePayloadSHA256(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After) +} + +func addContentSHA256Header(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) +} + +func addIsWaiterUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) + return nil + }) +} + +func addIsPaginatorUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) + return nil + }) +} + +func addRetry(stack *middleware.Stack, o Options) error { + attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { + m.LogAttempts = o.ClientLogMode.IsRetries() + m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/kms") + }) + if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { + return err + } + if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { + return err + } + return nil +} + +// resolves dual-stack endpoint configuration +func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseDualStackEndpoint = value + } + return nil +} + +// resolves FIPS endpoint configuration +func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseFIPSEndpoint = value + } + return nil +} + +func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { + if mode == aws.AccountIDEndpointModeDisabled { + return nil + } + + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { + return aws.String(ca.Credentials.AccountID) + } + + return nil +} + +func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { + mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} + if err := stack.Build.Add(&mw, middleware.After); err != nil { + return err + } + return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) +} +func initializeTimeOffsetResolver(c *Client) { + c.timeOffset = new(atomic.Int64) +} + +func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + switch options.Retryer.(type) { + case *retry.Standard: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) + case *retry.AdaptiveMode: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) + } + return nil +} + +func resolveTracerProvider(options *Options) { + if options.TracerProvider == nil { + options.TracerProvider = &tracing.NopTracerProvider{} + } +} + +func resolveMeterProvider(options *Options) { + if options.MeterProvider == nil { + options.MeterProvider = metrics.NopMeterProvider{} + } +} + +func addRecursionDetection(stack *middleware.Stack) error { + return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) +} + +func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before) + +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before) + +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} + +type disableHTTPSMiddleware struct { + DisableHTTPS bool +} + +func (*disableHTTPSMiddleware) ID() string { + return "disableHTTPS" +} + +func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { + req.URL.Scheme = "http" + } + + return next.HandleFinalize(ctx, in) +} + +func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { + return stack.Finalize.Insert(&disableHTTPSMiddleware{ + DisableHTTPS: o.EndpointOptions.DisableHTTPS, + }, "ResolveEndpointV2", middleware.After) +} + +type spanInitializeStart struct { +} + +func (*spanInitializeStart) ID() string { + return "spanInitializeStart" +} + +func (m *spanInitializeStart) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "Initialize") + + return next.HandleInitialize(ctx, in) +} + +type spanInitializeEnd struct { +} + +func (*spanInitializeEnd) ID() string { + return "spanInitializeEnd" +} + +func (m *spanInitializeEnd) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleInitialize(ctx, in) +} + +type spanBuildRequestStart struct { +} + +func (*spanBuildRequestStart) ID() string { + return "spanBuildRequestStart" +} + +func (m *spanBuildRequestStart) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + middleware.SerializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "BuildRequest") + + return next.HandleSerialize(ctx, in) +} + +type spanBuildRequestEnd struct { +} + +func (*spanBuildRequestEnd) ID() string { + return "spanBuildRequestEnd" +} + +func (m *spanBuildRequestEnd) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + middleware.BuildOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleBuild(ctx, in) +} + +func addSpanInitializeStart(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before) +} + +func addSpanInitializeEnd(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After) +} + +func addSpanBuildRequestStart(stack *middleware.Stack) error { + return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before) +} + +func addSpanBuildRequestEnd(stack *middleware.Stack) error { + return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CancelKeyDeletion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CancelKeyDeletion.go new file mode 100644 index 000000000..706c00b30 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CancelKeyDeletion.go @@ -0,0 +1,191 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Cancels the deletion of a KMS key. When this operation succeeds, the key state +// of the KMS key is Disabled . To enable the KMS key, use EnableKey. +// +// For more information about scheduling and canceling deletion of a KMS key, see [Deleting KMS keys] +// in the Key Management Service Developer Guide. +// +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see [Key states of KMS keys]in the Key Management Service Developer Guide. +// +// Cross-account use: No. You cannot perform this operation on a KMS key in a +// different Amazon Web Services account. +// +// Required permissions: [kms:CancelKeyDeletion] (key policy) +// +// Related operations: ScheduleKeyDeletion +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [kms:CancelKeyDeletion]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [Deleting KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +func (c *Client) CancelKeyDeletion(ctx context.Context, params *CancelKeyDeletionInput, optFns ...func(*Options)) (*CancelKeyDeletionOutput, error) { + if params == nil { + params = &CancelKeyDeletionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CancelKeyDeletion", params, optFns, c.addOperationCancelKeyDeletionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CancelKeyDeletionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CancelKeyDeletionInput struct { + + // Identifies the KMS key whose deletion is being canceled. + // + // Specify the key ID or key ARN of the KMS key. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + noSmithyDocumentSerde +} + +type CancelKeyDeletionOutput struct { + + // The Amazon Resource Name ([key ARN] ) of the KMS key whose deletion is canceled. + // + // [key ARN]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN + KeyId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCancelKeyDeletionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCancelKeyDeletion{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCancelKeyDeletion{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CancelKeyDeletion"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpCancelKeyDeletionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCancelKeyDeletion(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCancelKeyDeletion(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CancelKeyDeletion", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ConnectCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ConnectCustomKeyStore.go new file mode 100644 index 000000000..8656b61c8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ConnectCustomKeyStore.go @@ -0,0 +1,245 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Connects or reconnects a [custom key store] to its backing key store. For an CloudHSM key store, +// ConnectCustomKeyStore connects the key store to its associated CloudHSM cluster. +// For an external key store, ConnectCustomKeyStore connects the key store to the +// external key store proxy that communicates with your external key manager. +// +// The custom key store must be connected before you can create KMS keys in the +// key store or use the KMS keys it contains. You can disconnect and reconnect a +// custom key store at any time. +// +// The connection process for a custom key store can take an extended amount of +// time to complete. This operation starts the connection process, but it does not +// wait for it to complete. When it succeeds, this operation quickly returns an +// HTTP 200 response and a JSON object with no properties. However, this response +// does not indicate that the custom key store is connected. To get the connection +// state of the custom key store, use the DescribeCustomKeyStoresoperation. +// +// This operation is part of the [custom key stores] feature in KMS, which combines the convenience +// and extensive integration of KMS with the isolation and control of a key store +// that you own and manage. +// +// The ConnectCustomKeyStore operation might fail for various reasons. To find the +// reason, use the DescribeCustomKeyStoresoperation and see the ConnectionErrorCode in the response. For +// help interpreting the ConnectionErrorCode , see CustomKeyStoresListEntry. +// +// To fix the failure, use the DisconnectCustomKeyStore operation to disconnect the custom key store, +// correct the error, use the UpdateCustomKeyStoreoperation if necessary, and then use +// ConnectCustomKeyStore again. +// +// # CloudHSM key store +// +// During the connection process for an CloudHSM key store, KMS finds the CloudHSM +// cluster that is associated with the custom key store, creates the connection +// infrastructure, connects to the cluster, logs into the CloudHSM client as the +// kmsuser CU, and rotates its password. +// +// To connect an CloudHSM key store, its associated CloudHSM cluster must have at +// least one active HSM. To get the number of active HSMs in a cluster, use the [DescribeClusters] +// operation. To add HSMs to the cluster, use the [CreateHsm]operation. Also, the [kmsuser crypto user]kmsuser +// (CU) must not be logged into the cluster. This prevents KMS from using this +// account to log in. +// +// If you are having trouble connecting or disconnecting a CloudHSM key store, see [Troubleshooting an CloudHSM key store] +// in the Key Management Service Developer Guide. +// +// # External key store +// +// When you connect an external key store that uses public endpoint connectivity, +// KMS tests its ability to communicate with your external key manager by sending a +// request via the external key store proxy. +// +// When you connect to an external key store that uses VPC endpoint service +// connectivity, KMS establishes the networking elements that it needs to +// communicate with your external key manager via the external key store proxy. +// This includes creating an interface endpoint to the VPC endpoint service and a +// private hosted zone for traffic between KMS and the VPC endpoint service. +// +// To connect an external key store, KMS must be able to connect to the external +// key store proxy, the external key store proxy must be able to communicate with +// your external key manager, and the external key manager must be available for +// cryptographic operations. +// +// If you are having trouble connecting or disconnecting an external key store, +// see [Troubleshooting an external key store]in the Key Management Service Developer Guide. +// +// Cross-account use: No. You cannot perform this operation on a custom key store +// in a different Amazon Web Services account. +// +// Required permissions: [kms:ConnectCustomKeyStore] (IAM policy) +// +// # Related operations +// +// # CreateCustomKeyStore +// +// # DeleteCustomKeyStore +// +// # DescribeCustomKeyStores +// +// # DisconnectCustomKeyStore +// +// # UpdateCustomKeyStore +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [DescribeClusters]: https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html +// [custom key stores]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html +// [kmsuser crypto user]: https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser +// [Troubleshooting an CloudHSM key store]: https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html +// [CreateHsm]: https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html +// [kms:ConnectCustomKeyStore]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [Troubleshooting an external key store]: https://docs.aws.amazon.com/kms/latest/developerguide/xks-troubleshooting.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +// [custom key store]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html +func (c *Client) ConnectCustomKeyStore(ctx context.Context, params *ConnectCustomKeyStoreInput, optFns ...func(*Options)) (*ConnectCustomKeyStoreOutput, error) { + if params == nil { + params = &ConnectCustomKeyStoreInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ConnectCustomKeyStore", params, optFns, c.addOperationConnectCustomKeyStoreMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ConnectCustomKeyStoreOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ConnectCustomKeyStoreInput struct { + + // Enter the key store ID of the custom key store that you want to connect. To + // find the ID of a custom key store, use the DescribeCustomKeyStoresoperation. + // + // This member is required. + CustomKeyStoreId *string + + noSmithyDocumentSerde +} + +type ConnectCustomKeyStoreOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationConnectCustomKeyStoreMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpConnectCustomKeyStore{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpConnectCustomKeyStore{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ConnectCustomKeyStore"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpConnectCustomKeyStoreValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opConnectCustomKeyStore(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opConnectCustomKeyStore(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ConnectCustomKeyStore", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateAlias.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateAlias.go new file mode 100644 index 000000000..84e8a58d4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateAlias.go @@ -0,0 +1,244 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a friendly name for a KMS key. +// +// Adding, deleting, or updating an alias can allow or deny permission to the KMS +// key. For details, see [ABAC for KMS]in the Key Management Service Developer Guide. +// +// You can use an alias to identify a KMS key in the KMS console, in the DescribeKey +// operation and in [cryptographic operations], such as Encrypt and GenerateDataKey. You can also change the KMS key that's +// associated with the alias (UpdateAlias ) or delete the alias (DeleteAlias ) at any time. These +// operations don't affect the underlying KMS key. +// +// You can associate the alias with any customer managed key in the same Amazon +// Web Services Region. Each alias is associated with only one KMS key at a time, +// but a KMS key can have multiple aliases. A valid KMS key is required. You can't +// create an alias without a KMS key. +// +// The alias must be unique in the account and Region, but you can have aliases +// with the same name in different Regions. For detailed information about aliases, +// see [Using aliases]in the Key Management Service Developer Guide. +// +// This operation does not return a response. To get the alias that you created, +// use the ListAliasesoperation. +// +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see [Key states of KMS keys]in the Key Management Service Developer Guide. +// +// Cross-account use: No. You cannot perform this operation on an alias in a +// different Amazon Web Services account. +// +// # Required permissions +// +// [kms:CreateAlias] +// - on the alias (IAM policy). +// +// [kms:CreateAlias] +// - on the KMS key (key policy). +// +// For details, see [Controlling access to aliases] in the Key Management Service Developer Guide. +// +// Related operations: +// +// # DeleteAlias +// +// # ListAliases +// +// # UpdateAlias +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [cryptographic operations]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations +// [Using aliases]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html +// [kms:CreateAlias]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [ABAC for KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/abac.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +// [Controlling access to aliases]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access +func (c *Client) CreateAlias(ctx context.Context, params *CreateAliasInput, optFns ...func(*Options)) (*CreateAliasOutput, error) { + if params == nil { + params = &CreateAliasInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateAlias", params, optFns, c.addOperationCreateAliasMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateAliasOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateAliasInput struct { + + // Specifies the alias name. This value must begin with alias/ followed by a name, + // such as alias/ExampleAlias . + // + // Do not include confidential or sensitive information in this field. This field + // may be displayed in plaintext in CloudTrail logs and other output. + // + // The AliasName value must be string of 1-256 characters. It can contain only + // alphanumeric characters, forward slashes (/), underscores (_), and dashes (-). + // The alias name cannot begin with alias/aws/ . The alias/aws/ prefix is reserved + // for [Amazon Web Services managed keys]. + // + // [Amazon Web Services managed keys]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk + // + // This member is required. + AliasName *string + + // Associates the alias with the specified [customer managed key]. The KMS key must be in the same + // Amazon Web Services Region. + // + // A valid key ID is required. If you supply a null or empty string value, this + // operation returns an error. + // + // For help finding the key ID and ARN, see [Finding the Key ID and ARN] in the Key Management Service + // Developer Guide . + // + // Specify the key ID or key ARN of the KMS key. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + // [Finding the Key ID and ARN]: https://docs.aws.amazon.com/kms/latest/developerguide/viewing-keys.html#find-cmk-id-arn + // + // This member is required. + TargetKeyId *string + + noSmithyDocumentSerde +} + +type CreateAliasOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateAliasMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateAlias{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateAlias{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateAlias"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpCreateAliasValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateAlias(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateAlias(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateAlias", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateCustomKeyStore.go new file mode 100644 index 000000000..548da759f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateCustomKeyStore.go @@ -0,0 +1,393 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a [custom key store] backed by a key store that you own and manage. When you use a KMS +// key in a custom key store for a cryptographic operation, the cryptographic +// operation is actually performed in your key store using your keys. KMS supports [CloudHSM key stores] +// backed by an [CloudHSM cluster]and [external key stores] backed by an external key store proxy and external key +// manager outside of Amazon Web Services. +// +// This operation is part of the [custom key stores] feature in KMS, which combines the convenience +// and extensive integration of KMS with the isolation and control of a key store +// that you own and manage. +// +// Before you create the custom key store, the required elements must be in place +// and operational. We recommend that you use the test tools that KMS provides to +// verify the configuration your external key store proxy. For details about the +// required elements and verification tests, see [Assemble the prerequisites (for CloudHSM key stores)]or [Assemble the prerequisites (for external key stores)] in the Key Management Service +// Developer Guide. +// +// To create a custom key store, use the following parameters. +// +// - To create an CloudHSM key store, specify the CustomKeyStoreName , +// CloudHsmClusterId , KeyStorePassword , and TrustAnchorCertificate . The +// CustomKeyStoreType parameter is optional for CloudHSM key stores. If you +// include it, set it to the default value, AWS_CLOUDHSM . For help with +// failures, see [Troubleshooting an CloudHSM key store]in the Key Management Service Developer Guide. +// +// - To create an external key store, specify the CustomKeyStoreName and a +// CustomKeyStoreType of EXTERNAL_KEY_STORE . Also, specify values for +// XksProxyConnectivity , XksProxyAuthenticationCredential , XksProxyUriEndpoint +// , and XksProxyUriPath . If your XksProxyConnectivity value is +// VPC_ENDPOINT_SERVICE , specify the XksProxyVpcEndpointServiceName parameter. +// For help with failures, see [Troubleshooting an external key store]in the Key Management Service Developer Guide. +// +// For external key stores: +// +// Some external key managers provide a simpler method for creating an external +// key store. For details, see your external key manager documentation. +// +// When creating an external key store in the KMS console, you can upload a +// JSON-based proxy configuration file with the desired values. You cannot use a +// proxy configuration with the CreateCustomKeyStore operation. However, you can +// use the values in the file to help you determine the correct values for the +// CreateCustomKeyStore parameters. +// +// When the operation completes successfully, it returns the ID of the new custom +// key store. Before you can use your new custom key store, you need to use the ConnectCustomKeyStore +// operation to connect a new CloudHSM key store to its CloudHSM cluster, or to +// connect a new external key store to the external key store proxy for your +// external key manager. Even if you are not going to use your custom key store +// immediately, you might want to connect it to verify that all settings are +// correct and then disconnect it until you are ready to use it. +// +// For help with failures, see [Troubleshooting a custom key store] in the Key Management Service Developer Guide. +// +// Cross-account use: No. You cannot perform this operation on a custom key store +// in a different Amazon Web Services account. +// +// Required permissions: [kms:CreateCustomKeyStore] (IAM policy). +// +// Related operations: +// +// # ConnectCustomKeyStore +// +// # DeleteCustomKeyStore +// +// # DescribeCustomKeyStores +// +// # DisconnectCustomKeyStore +// +// # UpdateCustomKeyStore +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [CloudHSM key stores]: https://docs.aws.amazon.com/kms/latest/developerguide/keystore-cloudhsm.html +// [CloudHSM cluster]: https://docs.aws.amazon.com/cloudhsm/latest/userguide/clusters.html +// [custom key stores]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html +// [external key stores]: https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html +// [Troubleshooting an CloudHSM key store]: https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html +// [Assemble the prerequisites (for CloudHSM key stores)]: https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore +// [Assemble the prerequisites (for external key stores)]: https://docs.aws.amazon.com/kms/latest/developerguide/create-xks-keystore.html#xks-requirements +// [Troubleshooting a custom key store]: https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html +// [Troubleshooting an external key store]: https://docs.aws.amazon.com/kms/latest/developerguide/xks-troubleshooting.html +// [kms:CreateCustomKeyStore]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +// [custom key store]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html +func (c *Client) CreateCustomKeyStore(ctx context.Context, params *CreateCustomKeyStoreInput, optFns ...func(*Options)) (*CreateCustomKeyStoreOutput, error) { + if params == nil { + params = &CreateCustomKeyStoreInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateCustomKeyStore", params, optFns, c.addOperationCreateCustomKeyStoreMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateCustomKeyStoreOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateCustomKeyStoreInput struct { + + // Specifies a friendly name for the custom key store. The name must be unique in + // your Amazon Web Services account and Region. This parameter is required for all + // custom key stores. + // + // Do not include confidential or sensitive information in this field. This field + // may be displayed in plaintext in CloudTrail logs and other output. + // + // This member is required. + CustomKeyStoreName *string + + // Identifies the CloudHSM cluster for an CloudHSM key store. This parameter is + // required for custom key stores with CustomKeyStoreType of AWS_CLOUDHSM . + // + // Enter the cluster ID of any active CloudHSM cluster that is not already + // associated with a custom key store. To find the cluster ID, use the [DescribeClusters]operation. + // + // [DescribeClusters]: https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html + CloudHsmClusterId *string + + // Specifies the type of custom key store. The default value is AWS_CLOUDHSM . + // + // For a custom key store backed by an CloudHSM cluster, omit the parameter or + // enter AWS_CLOUDHSM . For a custom key store backed by an external key manager + // outside of Amazon Web Services, enter EXTERNAL_KEY_STORE . You cannot change + // this property after the key store is created. + CustomKeyStoreType types.CustomKeyStoreType + + // Specifies the kmsuser password for an CloudHSM key store. This parameter is + // required for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM . + // + // Enter the password of the [kmsuser crypto user (CU) account]kmsuser in the specified CloudHSM cluster. KMS logs + // into the cluster as this user to manage key material on your behalf. + // + // The password must be a string of 7 to 32 characters. Its value is case + // sensitive. + // + // This parameter tells KMS the kmsuser account password; it does not change the + // password in the CloudHSM cluster. + // + // [kmsuser crypto user (CU) account]: https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser + KeyStorePassword *string + + // Specifies the certificate for an CloudHSM key store. This parameter is required + // for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM . + // + // Enter the content of the trust anchor certificate for the CloudHSM cluster. + // This is the content of the customerCA.crt file that you created when you [initialized the cluster]. + // + // [initialized the cluster]: https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html + TrustAnchorCertificate *string + + // Specifies an authentication credential for the external key store proxy (XKS + // proxy). This parameter is required for all custom key stores with a + // CustomKeyStoreType of EXTERNAL_KEY_STORE . + // + // The XksProxyAuthenticationCredential has two required elements: + // RawSecretAccessKey , a secret key, and AccessKeyId , a unique identifier for the + // RawSecretAccessKey . For character requirements, see XksProxyAuthenticationCredentialType. + // + // KMS uses this authentication credential to sign requests to the external key + // store proxy on your behalf. This credential is unrelated to Identity and Access + // Management (IAM) and Amazon Web Services credentials. + // + // This parameter doesn't set or change the authentication credentials on the XKS + // proxy. It just tells KMS the credential that you established on your external + // key store proxy. If you rotate your proxy authentication credential, use the UpdateCustomKeyStore + // operation to provide the new credential to KMS. + XksProxyAuthenticationCredential *types.XksProxyAuthenticationCredentialType + + // Indicates how KMS communicates with the external key store proxy. This + // parameter is required for custom key stores with a CustomKeyStoreType of + // EXTERNAL_KEY_STORE . + // + // If the external key store proxy uses a public endpoint, specify PUBLIC_ENDPOINT + // . If the external key store proxy uses a Amazon VPC endpoint service for + // communication with KMS, specify VPC_ENDPOINT_SERVICE . For help making this + // choice, see [Choosing a connectivity option]in the Key Management Service Developer Guide. + // + // An Amazon VPC endpoint service keeps your communication with KMS in a private + // address space entirely within Amazon Web Services, but it requires more + // configuration, including establishing a Amazon VPC with multiple subnets, a VPC + // endpoint service, a network load balancer, and a verified private DNS name. A + // public endpoint is simpler to set up, but it might be slower and might not + // fulfill your security requirements. You might consider testing with a public + // endpoint, and then establishing a VPC endpoint service for production tasks. + // Note that this choice does not determine the location of the external key store + // proxy. Even if you choose a VPC endpoint service, the proxy can be hosted within + // the VPC or outside of Amazon Web Services such as in your corporate data center. + // + // [Choosing a connectivity option]: https://docs.aws.amazon.com/kms/latest/developerguide/plan-xks-keystore.html#choose-xks-connectivity + XksProxyConnectivity types.XksProxyConnectivityType + + // Specifies the endpoint that KMS uses to send requests to the external key store + // proxy (XKS proxy). This parameter is required for custom key stores with a + // CustomKeyStoreType of EXTERNAL_KEY_STORE . + // + // The protocol must be HTTPS. KMS communicates on port 443. Do not specify the + // port in the XksProxyUriEndpoint value. + // + // For external key stores with XksProxyConnectivity value of VPC_ENDPOINT_SERVICE + // , specify https:// followed by the private DNS name of the VPC endpoint service. + // + // For external key stores with PUBLIC_ENDPOINT connectivity, this endpoint must + // be reachable before you create the custom key store. KMS connects to the + // external key store proxy while creating the custom key store. For external key + // stores with VPC_ENDPOINT_SERVICE connectivity, KMS connects when you call the ConnectCustomKeyStore + // operation. + // + // The value of this parameter must begin with https:// . The remainder can contain + // upper and lower case letters (A-Z and a-z), numbers (0-9), dots ( . ), and + // hyphens ( - ). Additional slashes ( / and \ ) are not permitted. + // + // Uniqueness requirements: + // + // - The combined XksProxyUriEndpoint and XksProxyUriPath values must be unique + // in the Amazon Web Services account and Region. + // + // - An external key store with PUBLIC_ENDPOINT connectivity cannot use the same + // XksProxyUriEndpoint value as an external key store with VPC_ENDPOINT_SERVICE + // connectivity in this Amazon Web Services Region. + // + // - Each external key store with VPC_ENDPOINT_SERVICE connectivity must have its + // own private DNS name. The XksProxyUriEndpoint value for external key stores + // with VPC_ENDPOINT_SERVICE connectivity (private DNS name) must be unique in + // the Amazon Web Services account and Region. + XksProxyUriEndpoint *string + + // Specifies the base path to the proxy APIs for this external key store. To find + // this value, see the documentation for your external key store proxy. This + // parameter is required for all custom key stores with a CustomKeyStoreType of + // EXTERNAL_KEY_STORE . + // + // The value must start with / and must end with /kms/xks/v1 where v1 represents + // the version of the KMS external key store proxy API. This path can include an + // optional prefix between the required elements such as /prefix/kms/xks/v1 . + // + // Uniqueness requirements: + // + // - The combined XksProxyUriEndpoint and XksProxyUriPath values must be unique + // in the Amazon Web Services account and Region. + XksProxyUriPath *string + + // Specifies the name of the Amazon VPC endpoint service for interface endpoints + // that is used to communicate with your external key store proxy (XKS proxy). This + // parameter is required when the value of CustomKeyStoreType is EXTERNAL_KEY_STORE + // and the value of XksProxyConnectivity is VPC_ENDPOINT_SERVICE . + // + // The Amazon VPC endpoint service must [fulfill all requirements] for use with an external key store. + // + // Uniqueness requirements: + // + // - External key stores with VPC_ENDPOINT_SERVICE connectivity can share an + // Amazon VPC, but each external key store must have its own VPC endpoint service + // and private DNS name. + // + // [fulfill all requirements]: https://docs.aws.amazon.com/kms/latest/developerguide/create-xks-keystore.html#xks-requirements + XksProxyVpcEndpointServiceName *string + + noSmithyDocumentSerde +} + +type CreateCustomKeyStoreOutput struct { + + // A unique identifier for the new custom key store. + CustomKeyStoreId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateCustomKeyStoreMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateCustomKeyStore{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateCustomKeyStore{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateCustomKeyStore"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpCreateCustomKeyStoreValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateCustomKeyStore(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateCustomKeyStore(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateCustomKeyStore", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateGrant.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateGrant.go new file mode 100644 index 000000000..d30a5e80e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateGrant.go @@ -0,0 +1,351 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Adds a grant to a KMS key. +// +// A grant is a policy instrument that allows Amazon Web Services principals to +// use KMS keys in cryptographic operations. It also can allow them to view a KMS +// key (DescribeKey ) and create and manage grants. When authorizing access to a KMS key, +// grants are considered along with key policies and IAM policies. Grants are often +// used for temporary permissions because you can create one, use its permissions, +// and delete it without changing your key policies or IAM policies. +// +// For detailed information about grants, including grant terminology, see [Grants in KMS] in the +// Key Management Service Developer Guide . For examples of working with grants in +// several programming languages, see [Programming grants]. +// +// The CreateGrant operation returns a GrantToken and a GrantId . +// +// - When you create, retire, or revoke a grant, there might be a brief delay, +// usually less than five minutes, until the grant is available throughout KMS. +// This state is known as eventual consistency. Once the grant has achieved +// eventual consistency, the grantee principal can use the permissions in the grant +// without identifying the grant. +// +// However, to use the permissions in the grant immediately, use the GrantToken +// +// that CreateGrant returns. For details, see [Using a grant token]in the Key Management Service +// Developer Guide . +// +// - The CreateGrant operation also returns a GrantId . You can use the GrantId +// and a key identifier to identify the grant in the RetireGrantand RevokeGrantoperations. To find the +// grant ID, use the ListGrantsor ListRetirableGrantsoperations. +// +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see [Key states of KMS keys]in the Key Management Service Developer Guide. +// +// Cross-account use: Yes. To perform this operation on a KMS key in a different +// Amazon Web Services account, specify the key ARN in the value of the KeyId +// parameter. +// +// Required permissions: [kms:CreateGrant] (key policy) +// +// Related operations: +// +// # ListGrants +// +// # ListRetirableGrants +// +// # RetireGrant +// +// # RevokeGrant +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [Programming grants]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [Grants in KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html +// [kms:CreateGrant]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +// +// [Using a grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token +func (c *Client) CreateGrant(ctx context.Context, params *CreateGrantInput, optFns ...func(*Options)) (*CreateGrantOutput, error) { + if params == nil { + params = &CreateGrantInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateGrant", params, optFns, c.addOperationCreateGrantMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateGrantOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateGrantInput struct { + + // The identity that gets the permissions specified in the grant. + // + // To specify the grantee principal, use the Amazon Resource Name (ARN) of an + // Amazon Web Services principal. Valid principals include Amazon Web Services + // accounts, IAM users, IAM roles, federated users, and assumed role users. For + // help with the ARN syntax for a principal, see [IAM ARNs]in the Identity and Access + // Management User Guide . + // + // [IAM ARNs]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns + // + // This member is required. + GranteePrincipal *string + + // Identifies the KMS key for the grant. The grant gives principals permission to + // use this KMS key. + // + // Specify the key ID or key ARN of the KMS key. To specify a KMS key in a + // different Amazon Web Services account, you must use the key ARN. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // A list of operations that the grant permits. + // + // This list must include only operations that are permitted in a grant. Also, the + // operation must be supported on the KMS key. For example, you cannot create a + // grant for a symmetric encryption KMS key that allows the Signoperation, or a grant + // for an asymmetric KMS key that allows the GenerateDataKeyoperation. If you try, KMS returns a + // ValidationError exception. For details, see [Grant operations] in the Key Management Service + // Developer Guide. + // + // [Grant operations]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-grant-operations + // + // This member is required. + Operations []types.GrantOperation + + // Specifies a grant constraint. + // + // Do not include confidential or sensitive information in this field. This field + // may be displayed in plaintext in CloudTrail logs and other output. + // + // KMS supports the EncryptionContextEquals and EncryptionContextSubset grant + // constraints, which allow the permissions in the grant only when the encryption + // context in the request matches ( EncryptionContextEquals ) or includes ( + // EncryptionContextSubset ) the encryption context specified in the constraint. + // + // The encryption context grant constraints are supported only on [grant operations] that include an + // EncryptionContext parameter, such as cryptographic operations on symmetric + // encryption KMS keys. Grants with grant constraints can include the DescribeKeyand RetireGrant + // operations, but the constraint doesn't apply to these operations. If a grant + // with a grant constraint includes the CreateGrant operation, the constraint + // requires that any grants created with the CreateGrant permission have an + // equally strict or stricter encryption context constraint. + // + // You cannot use an encryption context grant constraint for cryptographic + // operations with asymmetric KMS keys or HMAC KMS keys. Operations with these keys + // don't support an encryption context. + // + // Each constraint value can include up to 8 encryption context pairs. The + // encryption context value in each constraint cannot exceed 384 characters. For + // information about grant constraints, see [Using grant constraints]in the Key Management Service + // Developer Guide. For more information about encryption context, see [Encryption context]in the Key + // Management Service Developer Guide . + // + // [grant operations]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-grant-operations + // [Using grant constraints]: https://docs.aws.amazon.com/kms/latest/developerguide/create-grant-overview.html#grant-constraints + // [Encryption context]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + Constraints *types.GrantConstraints + + // Checks if your request will succeed. DryRun is an optional parameter. + // + // To learn more about how to use this parameter, see [Testing your KMS API calls] in the Key Management + // Service Developer Guide. + // + // [Testing your KMS API calls]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-dryrun.html + DryRun *bool + + // A list of grant tokens. + // + // Use a grant token when your permission to call this operation comes from a new + // grant that has not yet achieved eventual consistency. For more information, see [Grant token] + // and [Using a grant token]in the Key Management Service Developer Guide. + // + // [Grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + // [Using a grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + GrantTokens []string + + // A friendly name for the grant. Use this value to prevent the unintended + // creation of duplicate grants when retrying this request. + // + // Do not include confidential or sensitive information in this field. This field + // may be displayed in plaintext in CloudTrail logs and other output. + // + // When this value is absent, all CreateGrant requests result in a new grant with + // a unique GrantId even if all the supplied parameters are identical. This can + // result in unintended duplicates when you retry the CreateGrant request. + // + // When this value is present, you can retry a CreateGrant request with identical + // parameters; if the grant already exists, the original GrantId is returned + // without creating a new grant. Note that the returned grant token is unique with + // every CreateGrant request, even when a duplicate GrantId is returned. All grant + // tokens for the same grant ID can be used interchangeably. + Name *string + + // The principal that has permission to use the RetireGrant operation to retire the grant. + // + // To specify the principal, use the [Amazon Resource Name (ARN)] of an Amazon Web Services principal. Valid + // principals include Amazon Web Services accounts, IAM users, IAM roles, federated + // users, and assumed role users. For help with the ARN syntax for a principal, see + // [IAM ARNs]in the Identity and Access Management User Guide . + // + // The grant determines the retiring principal. Other principals might have + // permission to retire the grant or revoke the grant. For details, see RevokeGrantand [Retiring and revoking grants] in + // the Key Management Service Developer Guide. + // + // [IAM ARNs]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns + // [Amazon Resource Name (ARN)]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + // [Retiring and revoking grants]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete + RetiringPrincipal *string + + noSmithyDocumentSerde +} + +type CreateGrantOutput struct { + + // The unique identifier for the grant. + // + // You can use the GrantId in a ListGrants, RetireGrant, or RevokeGrant operation. + GrantId *string + + // The grant token. + // + // Use a grant token when your permission to call this operation comes from a new + // grant that has not yet achieved eventual consistency. For more information, see [Grant token] + // and [Using a grant token]in the Key Management Service Developer Guide. + // + // [Grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + // [Using a grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + GrantToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateGrantMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateGrant{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateGrant{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateGrant"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpCreateGrantValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateGrant(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateGrant(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateGrant", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateKey.go new file mode 100644 index 000000000..259c5fce6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateKey.go @@ -0,0 +1,599 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a unique customer managed [KMS key] in your Amazon Web Services account and +// Region. You can use a KMS key in cryptographic operations, such as encryption +// and signing. Some Amazon Web Services services let you use KMS keys that you +// create and manage to protect your service resources. +// +// A KMS key is a logical representation of a cryptographic key. In addition to +// the key material used in cryptographic operations, a KMS key includes metadata, +// such as the key ID, key policy, creation date, description, and key state. For +// details, see [Managing keys]in the Key Management Service Developer Guide +// +// Use the parameters of CreateKey to specify the type of KMS key, the source of +// its key material, its key policy, description, tags, and other properties. +// +// KMS has replaced the term customer master key (CMK) with KMS key and KMS key. +// The concept has not changed. To prevent breaking changes, KMS is keeping some +// variations of this term. +// +// To create different types of KMS keys, use the following guidance: +// +// Symmetric encryption KMS key By default, CreateKey creates a symmetric +// encryption KMS key with key material that KMS generates. This is the basic and +// most widely used type of KMS key, and provides the best performance. +// +// To create a symmetric encryption KMS key, you don't need to specify any +// parameters. The default value for KeySpec , SYMMETRIC_DEFAULT , the default +// value for KeyUsage , ENCRYPT_DECRYPT , and the default value for Origin , +// AWS_KMS , create a symmetric encryption KMS key with KMS key material. +// +// If you need a key for basic encryption and decryption or you are creating a KMS +// key to protect your resources in an Amazon Web Services service, create a +// symmetric encryption KMS key. The key material in a symmetric encryption key +// never leaves KMS unencrypted. You can use a symmetric encryption KMS key to +// encrypt and decrypt data up to 4,096 bytes, but they are typically used to +// generate data keys and data keys pairs. For details, see GenerateDataKeyand GenerateDataKeyPair. +// +// Asymmetric KMS keys To create an asymmetric KMS key, use the KeySpec parameter +// to specify the type of key material in the KMS key. Then, use the KeyUsage +// parameter to determine whether the KMS key will be used to encrypt and decrypt +// or sign and verify. You can't change these properties after the KMS key is +// created. +// +// Asymmetric KMS keys contain an RSA key pair, Elliptic Curve (ECC) key pair, or +// an SM2 key pair (China Regions only). The private key in an asymmetric KMS key +// never leaves KMS unencrypted. However, you can use the GetPublicKeyoperation to download +// the public key so it can be used outside of KMS. Each KMS key can have only one +// key usage. KMS keys with RSA key pairs can be used to encrypt and decrypt data +// or sign and verify messages (but not both). KMS keys with NIST-recommended ECC +// key pairs can be used to sign and verify messages or derive shared secrets (but +// not both). KMS keys with ECC_SECG_P256K1 can be used only to sign and verify +// messages. KMS keys with SM2 key pairs (China Regions only) can be used to either +// encrypt and decrypt data, sign and verify messages, or derive shared secrets +// (you must choose one key usage type). For information about asymmetric KMS keys, +// see [Asymmetric KMS keys]in the Key Management Service Developer Guide. +// +// HMAC KMS key To create an HMAC KMS key, set the KeySpec parameter to a key spec +// value for HMAC KMS keys. Then set the KeyUsage parameter to GENERATE_VERIFY_MAC +// . You must set the key usage even though GENERATE_VERIFY_MAC is the only valid +// key usage value for HMAC KMS keys. You can't change these properties after the +// KMS key is created. +// +// HMAC KMS keys are symmetric keys that never leave KMS unencrypted. You can use +// HMAC keys to generate (GenerateMac ) and verify (VerifyMac ) HMAC codes for messages up to 4096 +// bytes. +// +// Multi-Region primary keys Imported key material To create a multi-Region +// primary key in the local Amazon Web Services Region, use the MultiRegion +// parameter with a value of True . To create a multi-Region replica key, that is, +// a KMS key with the same key ID and key material as a primary key, but in a +// different Amazon Web Services Region, use the ReplicateKeyoperation. To change a replica +// key to a primary key, and its primary key to a replica key, use the UpdatePrimaryRegionoperation. +// +// You can create multi-Region KMS keys for all supported KMS key types: symmetric +// encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and +// asymmetric signing KMS keys. You can also create multi-Region keys with imported +// key material. However, you can't create multi-Region keys in a custom key store. +// +// This operation supports multi-Region keys, an KMS feature that lets you create +// multiple interoperable KMS keys in different Amazon Web Services Regions. +// Because these KMS keys have the same key ID, key material, and other metadata, +// you can use them interchangeably to encrypt data in one Amazon Web Services +// Region and decrypt it in a different Amazon Web Services Region without +// re-encrypting the data or making a cross-Region call. For more information about +// multi-Region keys, see [Multi-Region keys in KMS]in the Key Management Service Developer Guide. +// +// To import your own key material into a KMS key, begin by creating a KMS key +// with no key material. To do this, use the Origin parameter of CreateKey with a +// value of EXTERNAL . Next, use GetParametersForImport operation to get a public key and import token. +// Use the wrapping public key to encrypt your key material. Then, use ImportKeyMaterialwith your +// import token to import the key material. For step-by-step instructions, see [Importing Key Material]in +// the Key Management Service Developer Guide . +// +// You can import key material into KMS keys of all supported KMS key types: +// symmetric encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, +// and asymmetric signing KMS keys. You can also create multi-Region keys with +// imported key material. However, you can't import key material into a KMS key in +// a custom key store. +// +// To create a multi-Region primary key with imported key material, use the Origin +// parameter of CreateKey with a value of EXTERNAL and the MultiRegion parameter +// with a value of True . To create replicas of the multi-Region primary key, use +// the ReplicateKeyoperation. For instructions, see [Importing key material into multi-Region keys]. For more information about multi-Region +// keys, see [Multi-Region keys in KMS]in the Key Management Service Developer Guide. +// +// Custom key store A [custom key store] lets you protect your Amazon Web Services resources using +// keys in a backing key store that you own and manage. When you request a +// cryptographic operation with a KMS key in a custom key store, the operation is +// performed in the backing key store using its cryptographic keys. +// +// KMS supports [CloudHSM key stores] backed by an CloudHSM cluster and [external key stores] backed by an external key +// manager outside of Amazon Web Services. When you create a KMS key in an CloudHSM +// key store, KMS generates an encryption key in the CloudHSM cluster and +// associates it with the KMS key. When you create a KMS key in an external key +// store, you specify an existing encryption key in the external key manager. +// +// Some external key managers provide a simpler method for creating a KMS key in +// an external key store. For details, see your external key manager documentation. +// +// Before you create a KMS key in a custom key store, the ConnectionState of the +// key store must be CONNECTED . To connect the custom key store, use the ConnectCustomKeyStore +// operation. To find the ConnectionState , use the DescribeCustomKeyStores operation. +// +// To create a KMS key in a custom key store, use the CustomKeyStoreId . Use the +// default KeySpec value, SYMMETRIC_DEFAULT , and the default KeyUsage value, +// ENCRYPT_DECRYPT to create a symmetric encryption key. No other key type is +// supported in a custom key store. +// +// To create a KMS key in an [CloudHSM key store], use the Origin parameter with a value of +// AWS_CLOUDHSM . The CloudHSM cluster that is associated with the custom key store +// must have at least two active HSMs in different Availability Zones in the Amazon +// Web Services Region. +// +// To create a KMS key in an [external key store], use the Origin parameter with a value of +// EXTERNAL_KEY_STORE and an XksKeyId parameter that identifies an existing +// external key. +// +// Some external key managers provide a simpler method for creating a KMS key in +// an external key store. For details, see your external key manager documentation. +// +// Cross-account use: No. You cannot use this operation to create a KMS key in a +// different Amazon Web Services account. +// +// Required permissions: [kms:CreateKey] (IAM policy). To use the Tags parameter, [kms:TagResource] (IAM policy). +// For examples and information about related permissions, see [Allow a user to create KMS keys]in the Key +// Management Service Developer Guide. +// +// Related operations: +// +// # DescribeKey +// +// # ListKeys +// +// # ScheduleKeyDeletion +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [CloudHSM key stores]: https://docs.aws.amazon.com/kms/latest/developerguide/keystore-cloudhsm.html +// [external key store]: https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html +// [external key stores]: https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html +// [Asymmetric KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html +// [Multi-Region keys in KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html +// [Managing keys]: https://docs.aws.amazon.com/kms/latest/developerguide/getting-started.html +// [KMS key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms-keys +// [Allow a user to create KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/iam-policies.html#iam-policy-example-create-key +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +// [kms:TagResource]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [CloudHSM key store]: https://docs.aws.amazon.com/kms/latest/developerguide/keystore-cloudhsm.html +// [kms:CreateKey]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [Importing key material into multi-Region keys]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-import.html +// [Importing Key Material]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html +// [custom key store]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html +func (c *Client) CreateKey(ctx context.Context, params *CreateKeyInput, optFns ...func(*Options)) (*CreateKeyOutput, error) { + if params == nil { + params = &CreateKeyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateKey", params, optFns, c.addOperationCreateKeyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateKeyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateKeyInput struct { + + // Skips ("bypasses") the key policy lockout safety check. The default value is + // false. + // + // Setting this value to true increases the risk that the KMS key becomes + // unmanageable. Do not set this value to true indiscriminately. + // + // For more information, see [Default key policy] in the Key Management Service Developer Guide. + // + // Use this parameter only when you intend to prevent the principal that is making + // the request from making a subsequent [PutKeyPolicy]request on the KMS key. + // + // [Default key policy]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key + // [PutKeyPolicy]: https://docs.aws.amazon.com/kms/latest/APIReference/API_PutKeyPolicy.html + BypassPolicyLockoutSafetyCheck bool + + // Creates the KMS key in the specified [custom key store]. The ConnectionState of the custom key + // store must be CONNECTED . To find the CustomKeyStoreID and ConnectionState use + // the DescribeCustomKeyStoresoperation. + // + // This parameter is valid only for symmetric encryption KMS keys in a single + // Region. You cannot create any other type of KMS key in a custom key store. + // + // When you create a KMS key in an CloudHSM key store, KMS generates a + // non-exportable 256-bit symmetric key in its associated CloudHSM cluster and + // associates it with the KMS key. When you create a KMS key in an external key + // store, you must use the XksKeyId parameter to specify an external key that + // serves as key material for the KMS key. + // + // [custom key store]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html + CustomKeyStoreId *string + + // Instead, use the KeySpec parameter. + // + // The KeySpec and CustomerMasterKeySpec parameters work the same way. Only the + // names differ. We recommend that you use KeySpec parameter in your code. + // However, to avoid breaking changes, KMS supports both parameters. + // + // Deprecated: This parameter has been deprecated. Instead, use the KeySpec + // parameter. + CustomerMasterKeySpec types.CustomerMasterKeySpec + + // A description of the KMS key. Use a description that helps you decide whether + // the KMS key is appropriate for a task. The default value is an empty string (no + // description). + // + // Do not include confidential or sensitive information in this field. This field + // may be displayed in plaintext in CloudTrail logs and other output. + // + // To set or change the description after the key is created, use UpdateKeyDescription. + Description *string + + // Specifies the type of KMS key to create. The default value, SYMMETRIC_DEFAULT , + // creates a KMS key with a 256-bit AES-GCM key that is used for encryption and + // decryption, except in China Regions, where it creates a 128-bit symmetric key + // that uses SM4 encryption. For help choosing a key spec for your KMS key, see [Choosing a KMS key type]in + // the Key Management Service Developer Guide . + // + // The KeySpec determines whether the KMS key contains a symmetric key or an + // asymmetric key pair. It also determines the algorithms that the KMS key + // supports. You can't change the KeySpec after the KMS key is created. To further + // restrict the algorithms that can be used with the KMS key, use a condition key + // in its key policy or IAM policy. For more information, see [kms:EncryptionAlgorithm], [kms:MacAlgorithm] or [kms:Signing Algorithm] in the Key + // Management Service Developer Guide . + // + // [Amazon Web Services services that are integrated with KMS]use symmetric encryption KMS keys to protect your data. These services do not + // support asymmetric KMS keys or HMAC KMS keys. + // + // KMS supports the following key specs for KMS keys: + // + // - Symmetric encryption key (default) + // + // - SYMMETRIC_DEFAULT + // + // - HMAC keys (symmetric) + // + // - HMAC_224 + // + // - HMAC_256 + // + // - HMAC_384 + // + // - HMAC_512 + // + // - Asymmetric RSA key pairs (encryption and decryption -or- signing and + // verification) + // + // - RSA_2048 + // + // - RSA_3072 + // + // - RSA_4096 + // + // - Asymmetric NIST-recommended elliptic curve key pairs (signing and + // verification -or- deriving shared secrets) + // + // - ECC_NIST_P256 (secp256r1) + // + // - ECC_NIST_P384 (secp384r1) + // + // - ECC_NIST_P521 (secp521r1) + // + // - Other asymmetric elliptic curve key pairs (signing and verification) + // + // - ECC_SECG_P256K1 (secp256k1), commonly used for cryptocurrencies. + // + // - SM2 key pairs (encryption and decryption -or- signing and verification -or- + // deriving shared secrets) + // + // - SM2 (China Regions only) + // + // [kms:EncryptionAlgorithm]: https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-encryption-algorithm + // [kms:Signing Algorithm]: https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-signing-algorithm + // [kms:MacAlgorithm]: https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-mac-algorithm + // [Choosing a KMS key type]: https://docs.aws.amazon.com/kms/latest/developerguide/key-types.html#symm-asymm-choose + // [Amazon Web Services services that are integrated with KMS]: http://aws.amazon.com/kms/features/#AWS_Service_Integration + KeySpec types.KeySpec + + // Determines the [cryptographic operations] for which you can use the KMS key. The default value is + // ENCRYPT_DECRYPT . This parameter is optional when you are creating a symmetric + // encryption KMS key; otherwise, it is required. You can't change the KeyUsage + // value after the KMS key is created. + // + // Select only one valid value. + // + // - For symmetric encryption KMS keys, omit the parameter or specify + // ENCRYPT_DECRYPT . + // + // - For HMAC KMS keys (symmetric), specify GENERATE_VERIFY_MAC . + // + // - For asymmetric KMS keys with RSA key pairs, specify ENCRYPT_DECRYPT or + // SIGN_VERIFY . + // + // - For asymmetric KMS keys with NIST-recommended elliptic curve key pairs, + // specify SIGN_VERIFY or KEY_AGREEMENT . + // + // - For asymmetric KMS keys with ECC_SECG_P256K1 key pairs specify SIGN_VERIFY . + // + // - For asymmetric KMS keys with SM2 key pairs (China Regions only), specify + // ENCRYPT_DECRYPT , SIGN_VERIFY , or KEY_AGREEMENT . + // + // [cryptographic operations]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations + KeyUsage types.KeyUsageType + + // Creates a multi-Region primary key that you can replicate into other Amazon Web + // Services Regions. You cannot change this value after you create the KMS key. + // + // For a multi-Region key, set this parameter to True . For a single-Region KMS + // key, omit this parameter or set it to False . The default value is False . + // + // This operation supports multi-Region keys, an KMS feature that lets you create + // multiple interoperable KMS keys in different Amazon Web Services Regions. + // Because these KMS keys have the same key ID, key material, and other metadata, + // you can use them interchangeably to encrypt data in one Amazon Web Services + // Region and decrypt it in a different Amazon Web Services Region without + // re-encrypting the data or making a cross-Region call. For more information about + // multi-Region keys, see [Multi-Region keys in KMS]in the Key Management Service Developer Guide. + // + // This value creates a primary key, not a replica. To create a replica key, use + // the ReplicateKeyoperation. + // + // You can create a symmetric or asymmetric multi-Region key, and you can create a + // multi-Region key with imported key material. However, you cannot create a + // multi-Region key in a custom key store. + // + // [Multi-Region keys in KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html + MultiRegion *bool + + // The source of the key material for the KMS key. You cannot change the origin + // after you create the KMS key. The default is AWS_KMS , which means that KMS + // creates the key material. + // + // To [create a KMS key with no key material] (for imported key material), set this value to EXTERNAL . For more + // information about importing key material into KMS, see [Importing Key Material]in the Key Management + // Service Developer Guide. The EXTERNAL origin value is valid only for symmetric + // KMS keys. + // + // To [create a KMS key in an CloudHSM key store] and create its key material in the associated CloudHSM cluster, set this + // value to AWS_CLOUDHSM . You must also use the CustomKeyStoreId parameter to + // identify the CloudHSM key store. The KeySpec value must be SYMMETRIC_DEFAULT . + // + // To [create a KMS key in an external key store], set this value to EXTERNAL_KEY_STORE . You must also use the + // CustomKeyStoreId parameter to identify the external key store and the XksKeyId + // parameter to identify the associated external key. The KeySpec value must be + // SYMMETRIC_DEFAULT . + // + // [create a KMS key in an external key store]: https://docs.aws.amazon.com/kms/latest/developerguide/create-xks-keys.html + // [create a KMS key in an CloudHSM key store]: https://docs.aws.amazon.com/kms/latest/developerguide/create-cmk-keystore.html + // [Importing Key Material]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html + // [create a KMS key with no key material]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys-create-cmk.html + Origin types.OriginType + + // The key policy to attach to the KMS key. + // + // If you provide a key policy, it must meet the following criteria: + // + // - The key policy must allow the calling principal to make a subsequent + // PutKeyPolicy request on the KMS key. This reduces the risk that the KMS key + // becomes unmanageable. For more information, see [Default key policy]in the Key Management Service + // Developer Guide. (To omit this condition, set BypassPolicyLockoutSafetyCheck + // to true.) + // + // - Each statement in the key policy must contain one or more principals. The + // principals in the key policy must exist and be visible to KMS. When you create a + // new Amazon Web Services principal, you might need to enforce a delay before + // including the new principal in a key policy because the new principal might not + // be immediately visible to KMS. For more information, see [Changes that I make are not always immediately visible]in the Amazon Web + // Services Identity and Access Management User Guide. + // + // If you do not provide a key policy, KMS attaches a default key policy to the + // KMS key. For more information, see [Default key policy]in the Key Management Service Developer + // Guide. + // + // The key policy size quota is 32 kilobytes (32768 bytes). + // + // For help writing and formatting a JSON policy document, see the [IAM JSON Policy Reference] in the + // Identity and Access Management User Guide . + // + // [IAM JSON Policy Reference]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html + // [Default key policy]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default + // [Changes that I make are not always immediately visible]: https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency + Policy *string + + // Assigns one or more tags to the KMS key. Use this parameter to tag the KMS key + // when it is created. To tag an existing KMS key, use the TagResourceoperation. + // + // Do not include confidential or sensitive information in this field. This field + // may be displayed in plaintext in CloudTrail logs and other output. + // + // Tagging or untagging a KMS key can allow or deny permission to the KMS key. For + // details, see [ABAC for KMS]in the Key Management Service Developer Guide. + // + // To use this parameter, you must have [kms:TagResource] permission in an IAM policy. + // + // Each tag consists of a tag key and a tag value. Both the tag key and the tag + // value are required, but the tag value can be an empty (null) string. You cannot + // have more than one tag on a KMS key with the same tag key. If you specify an + // existing tag key with a different tag value, KMS replaces the current tag value + // with the specified one. + // + // When you add tags to an Amazon Web Services resource, Amazon Web Services + // generates a cost allocation report with usage and costs aggregated by tags. Tags + // can also be used to control access to a KMS key. For details, see [Tagging Keys]. + // + // [kms:TagResource]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + // [Tagging Keys]: https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html + // [ABAC for KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/abac.html + Tags []types.Tag + + // Identifies the [external key] that serves as key material for the KMS key in an [external key store]. Specify the + // ID that the [external key store proxy]uses to refer to the external key. For help, see the documentation + // for your external key store proxy. + // + // This parameter is required for a KMS key with an Origin value of + // EXTERNAL_KEY_STORE . It is not valid for KMS keys with any other Origin value. + // + // The external key must be an existing 256-bit AES symmetric encryption key + // hosted outside of Amazon Web Services in an external key manager associated with + // the external key store specified by the CustomKeyStoreId parameter. This key + // must be enabled and configured to perform encryption and decryption. Each KMS + // key in an external key store must use a different external key. For details, see + // [Requirements for a KMS key in an external key store]in the Key Management Service Developer Guide. + // + // Each KMS key in an external key store is associated two backing keys. One is + // key material that KMS generates. The other is the external key specified by this + // parameter. When you use the KMS key in an external key store to encrypt data, + // the encryption operation is performed first by KMS using the KMS key material, + // and then by the external key manager using the specified external key, a process + // known as double encryption. For details, see [Double encryption]in the Key Management Service + // Developer Guide. + // + // [external key store]: https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html + // [Double encryption]: https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-double-encryption + // [external key]: https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-external-key + // [Requirements for a KMS key in an external key store]: https://docs.aws.amazon.com/create-xks-keys.html#xks-key-requirements + // [external key store proxy]: https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-xks-proxy + XksKeyId *string + + noSmithyDocumentSerde +} + +type CreateKeyOutput struct { + + // Metadata associated with the KMS key. + KeyMetadata *types.KeyMetadata + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateKeyMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateKey{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateKey{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateKey"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpCreateKeyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateKey(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateKey(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateKey", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Decrypt.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Decrypt.go new file mode 100644 index 000000000..93d64f580 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Decrypt.go @@ -0,0 +1,362 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Decrypts ciphertext that was encrypted by a KMS key using any of the following +// operations: +// +// # Encrypt +// +// # GenerateDataKey +// +// # GenerateDataKeyPair +// +// # GenerateDataKeyWithoutPlaintext +// +// # GenerateDataKeyPairWithoutPlaintext +// +// You can use this operation to decrypt ciphertext that was encrypted under a +// symmetric encryption KMS key or an asymmetric encryption KMS key. When the KMS +// key is asymmetric, you must specify the KMS key and the encryption algorithm +// that was used to encrypt the ciphertext. For information about asymmetric KMS +// keys, see [Asymmetric KMS keys]in the Key Management Service Developer Guide. +// +// The Decrypt operation also decrypts ciphertext that was encrypted outside of +// KMS by the public key in an KMS asymmetric KMS key. However, it cannot decrypt +// symmetric ciphertext produced by other libraries, such as the [Amazon Web Services Encryption SDK]or [Amazon S3 client-side encryption]. These +// libraries return a ciphertext format that is incompatible with KMS. +// +// If the ciphertext was encrypted under a symmetric encryption KMS key, the KeyId +// parameter is optional. KMS can get this information from metadata that it adds +// to the symmetric ciphertext blob. This feature adds durability to your +// implementation by ensuring that authorized users can decrypt ciphertext decades +// after it was encrypted, even if they've lost track of the key ID. However, +// specifying the KMS key is always recommended as a best practice. When you use +// the KeyId parameter to specify a KMS key, KMS only uses the KMS key you +// specify. If the ciphertext was encrypted under a different KMS key, the Decrypt +// operation fails. This practice ensures that you use the KMS key that you intend. +// +// Whenever possible, use key policies to give users permission to call the Decrypt +// operation on a particular KMS key, instead of using &IAM; policies. Otherwise, +// you might create an &IAM; policy that gives the user Decrypt permission on all +// KMS keys. This user could decrypt ciphertext that was encrypted by KMS keys in +// other accounts if the key policy for the cross-account KMS key permits it. If +// you must use an IAM policy for Decrypt permissions, limit the user to +// particular KMS keys or particular trusted accounts. For details, see [Best practices for IAM policies]in the Key +// Management Service Developer Guide. +// +// Decrypt also supports [Amazon Web Services Nitro Enclaves], which provide an isolated compute environment in Amazon +// EC2. To call Decrypt for a Nitro enclave, use the [Amazon Web Services Nitro Enclaves SDK] or any Amazon Web Services +// SDK. Use the Recipient parameter to provide the attestation document for the +// enclave. Instead of the plaintext data, the response includes the plaintext data +// encrypted with the public key from the attestation document ( +// CiphertextForRecipient ). For information about the interaction between KMS and +// Amazon Web Services Nitro Enclaves, see [How Amazon Web Services Nitro Enclaves uses KMS]in the Key Management Service Developer +// Guide. +// +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see [Key states of KMS keys]in the Key Management Service Developer Guide. +// +// Cross-account use: Yes. If you use the KeyId parameter to identify a KMS key in +// a different Amazon Web Services account, specify the key ARN or the alias ARN of +// the KMS key. +// +// Required permissions: [kms:Decrypt] (key policy) +// +// Related operations: +// +// # Encrypt +// +// # GenerateDataKey +// +// # GenerateDataKeyPair +// +// # ReEncrypt +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [Amazon Web Services Encryption SDK]: https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/ +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [kms:Decrypt]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [Asymmetric KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html +// [Amazon Web Services Nitro Enclaves]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitro-enclave.html +// [Amazon S3 client-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html +// [Best practices for IAM policies]: https://docs.aws.amazon.com/kms/latest/developerguide/iam-policies.html#iam-policies-best-practices +// [How Amazon Web Services Nitro Enclaves uses KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +// [Amazon Web Services Nitro Enclaves SDK]: https://docs.aws.amazon.com/enclaves/latest/user/developing-applications.html#sdk +func (c *Client) Decrypt(ctx context.Context, params *DecryptInput, optFns ...func(*Options)) (*DecryptOutput, error) { + if params == nil { + params = &DecryptInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "Decrypt", params, optFns, c.addOperationDecryptMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DecryptOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DecryptInput struct { + + // Ciphertext to be decrypted. The blob includes metadata. + // + // This member is required. + CiphertextBlob []byte + + // Checks if your request will succeed. DryRun is an optional parameter. + // + // To learn more about how to use this parameter, see [Testing your KMS API calls] in the Key Management + // Service Developer Guide. + // + // [Testing your KMS API calls]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-dryrun.html + DryRun *bool + + // Specifies the encryption algorithm that will be used to decrypt the ciphertext. + // Specify the same algorithm that was used to encrypt the data. If you specify a + // different algorithm, the Decrypt operation fails. + // + // This parameter is required only when the ciphertext was encrypted under an + // asymmetric KMS key. The default value, SYMMETRIC_DEFAULT , represents the only + // supported algorithm that is valid for symmetric encryption KMS keys. + EncryptionAlgorithm types.EncryptionAlgorithmSpec + + // Specifies the encryption context to use when decrypting the data. An encryption + // context is valid only for [cryptographic operations]with a symmetric encryption KMS key. The standard + // asymmetric encryption algorithms and HMAC algorithms that KMS uses do not + // support an encryption context. + // + // An encryption context is a collection of non-secret key-value pairs that + // represent additional authenticated data. When you use an encryption context to + // encrypt data, you must specify the same (an exact case-sensitive match) + // encryption context to decrypt the data. An encryption context is supported only + // on operations with symmetric encryption KMS keys. On operations with symmetric + // encryption KMS keys, an encryption context is optional, but it is strongly + // recommended. + // + // For more information, see [Encryption context] in the Key Management Service Developer Guide. + // + // [cryptographic operations]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations + // [Encryption context]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + EncryptionContext map[string]string + + // A list of grant tokens. + // + // Use a grant token when your permission to call this operation comes from a new + // grant that has not yet achieved eventual consistency. For more information, see [Grant token] + // and [Using a grant token]in the Key Management Service Developer Guide. + // + // [Grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + // [Using a grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + GrantTokens []string + + // Specifies the KMS key that KMS uses to decrypt the ciphertext. + // + // Enter a key ID of the KMS key that was used to encrypt the ciphertext. If you + // identify a different KMS key, the Decrypt operation throws an + // IncorrectKeyException . + // + // This parameter is required only when the ciphertext was encrypted under an + // asymmetric KMS key. If you used a symmetric encryption KMS key, KMS can get the + // KMS key from metadata that it adds to the symmetric ciphertext blob. However, it + // is always recommended as a best practice. This practice ensures that you use the + // KMS key that you intend. + // + // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. When + // using an alias name, prefix it with "alias/" . To specify a KMS key in a + // different Amazon Web Services account, you must use the key ARN or alias ARN. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Alias name: alias/ExampleAlias + // + // - Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name + // and alias ARN, use ListAliases. + KeyId *string + + // A signed [attestation document] from an Amazon Web Services Nitro enclave and the encryption + // algorithm to use with the enclave's public key. The only valid encryption + // algorithm is RSAES_OAEP_SHA_256 . + // + // This parameter only supports attestation documents for Amazon Web Services + // Nitro Enclaves. To include this parameter, use the [Amazon Web Services Nitro Enclaves SDK]or any Amazon Web Services + // SDK. + // + // When you use this parameter, instead of returning the plaintext data, KMS + // encrypts the plaintext data with the public key in the attestation document, and + // returns the resulting ciphertext in the CiphertextForRecipient field in the + // response. This ciphertext can be decrypted only with the private key in the + // enclave. The Plaintext field in the response is null or empty. + // + // For information about the interaction between KMS and Amazon Web Services Nitro + // Enclaves, see [How Amazon Web Services Nitro Enclaves uses KMS]in the Key Management Service Developer Guide. + // + // [attestation document]: https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave-concepts.html#term-attestdoc + // [How Amazon Web Services Nitro Enclaves uses KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html + // [Amazon Web Services Nitro Enclaves SDK]: https://docs.aws.amazon.com/enclaves/latest/user/developing-applications.html#sdk + Recipient *types.RecipientInfo + + noSmithyDocumentSerde +} + +type DecryptOutput struct { + + // The plaintext data encrypted with the public key in the attestation document. + // + // This field is included in the response only when the Recipient parameter in the + // request includes a valid attestation document from an Amazon Web Services Nitro + // enclave. For information about the interaction between KMS and Amazon Web + // Services Nitro Enclaves, see [How Amazon Web Services Nitro Enclaves uses KMS]in the Key Management Service Developer Guide. + // + // [How Amazon Web Services Nitro Enclaves uses KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html + CiphertextForRecipient []byte + + // The encryption algorithm that was used to decrypt the ciphertext. + EncryptionAlgorithm types.EncryptionAlgorithmSpec + + // The Amazon Resource Name ([key ARN] ) of the KMS key that was used to decrypt the + // ciphertext. + // + // [key ARN]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN + KeyId *string + + // Decrypted plaintext data. When you use the HTTP API or the Amazon Web Services + // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. + // + // If the response includes the CiphertextForRecipient field, the Plaintext field + // is null or empty. + Plaintext []byte + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDecryptMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDecrypt{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDecrypt{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "Decrypt"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpDecryptValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDecrypt(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDecrypt(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "Decrypt", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteAlias.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteAlias.go new file mode 100644 index 000000000..58af22e36 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteAlias.go @@ -0,0 +1,194 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the specified alias. +// +// Adding, deleting, or updating an alias can allow or deny permission to the KMS +// key. For details, see [ABAC for KMS]in the Key Management Service Developer Guide. +// +// Because an alias is not a property of a KMS key, you can delete and change the +// aliases of a KMS key without affecting the KMS key. Also, aliases do not appear +// in the response from the DescribeKeyoperation. To get the aliases of all KMS keys, use the ListAliases +// operation. +// +// Each KMS key can have multiple aliases. To change the alias of a KMS key, use DeleteAlias +// to delete the current alias and CreateAliasto create a new alias. To associate an existing +// alias with a different KMS key, call UpdateAlias. +// +// Cross-account use: No. You cannot perform this operation on an alias in a +// different Amazon Web Services account. +// +// # Required permissions +// +// [kms:DeleteAlias] +// - on the alias (IAM policy). +// +// [kms:DeleteAlias] +// - on the KMS key (key policy). +// +// For details, see [Controlling access to aliases] in the Key Management Service Developer Guide. +// +// Related operations: +// +// # CreateAlias +// +// # ListAliases +// +// # UpdateAlias +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [ABAC for KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/abac.html +// [kms:DeleteAlias]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +// [Controlling access to aliases]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access +func (c *Client) DeleteAlias(ctx context.Context, params *DeleteAliasInput, optFns ...func(*Options)) (*DeleteAliasOutput, error) { + if params == nil { + params = &DeleteAliasInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteAlias", params, optFns, c.addOperationDeleteAliasMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteAliasOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteAliasInput struct { + + // The alias to be deleted. The alias name must begin with alias/ followed by the + // alias name, such as alias/ExampleAlias . + // + // This member is required. + AliasName *string + + noSmithyDocumentSerde +} + +type DeleteAliasOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteAliasMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteAlias{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteAlias{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteAlias"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpDeleteAliasValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteAlias(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteAlias(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteAlias", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteCustomKeyStore.go new file mode 100644 index 000000000..b773858b9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteCustomKeyStore.go @@ -0,0 +1,211 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes a [custom key store]. This operation does not affect any backing elements of the custom +// key store. It does not delete the CloudHSM cluster that is associated with an +// CloudHSM key store, or affect any users or keys in the cluster. For an external +// key store, it does not affect the external key store proxy, external key +// manager, or any external keys. +// +// This operation is part of the [custom key stores] feature in KMS, which combines the convenience +// and extensive integration of KMS with the isolation and control of a key store +// that you own and manage. +// +// The custom key store that you delete cannot contain any [KMS keys]. Before deleting the +// key store, verify that you will never need to use any of the KMS keys in the key +// store for any [cryptographic operations]. Then, use ScheduleKeyDeletion to delete the KMS keys from the key store. After the +// required waiting period expires and all KMS keys are deleted from the custom key +// store, use DisconnectCustomKeyStoreto disconnect the key store from KMS. Then, you can delete the +// custom key store. +// +// For keys in an CloudHSM key store, the ScheduleKeyDeletion operation makes a +// best effort to delete the key material from the associated cluster. However, you +// might need to manually [delete the orphaned key material]from the cluster and its backups. KMS never creates, +// manages, or deletes cryptographic keys in the external key manager associated +// with an external key store. You must manage them using your external key manager +// tools. +// +// Instead of deleting the custom key store, consider using the DisconnectCustomKeyStore operation to +// disconnect the custom key store from its backing key store. While the key store +// is disconnected, you cannot create or use the KMS keys in the key store. But, +// you do not need to delete KMS keys and you can reconnect a disconnected custom +// key store at any time. +// +// If the operation succeeds, it returns a JSON object with no properties. +// +// Cross-account use: No. You cannot perform this operation on a custom key store +// in a different Amazon Web Services account. +// +// Required permissions: [kms:DeleteCustomKeyStore] (IAM policy) +// +// Related operations: +// +// # ConnectCustomKeyStore +// +// # CreateCustomKeyStore +// +// # DescribeCustomKeyStores +// +// # DisconnectCustomKeyStore +// +// # UpdateCustomKeyStore +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [delete the orphaned key material]: https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-orphaned-key +// [custom key stores]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html +// [kms:DeleteCustomKeyStore]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [cryptographic operations]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations +// [KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms_keys +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +// [custom key store]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html +func (c *Client) DeleteCustomKeyStore(ctx context.Context, params *DeleteCustomKeyStoreInput, optFns ...func(*Options)) (*DeleteCustomKeyStoreOutput, error) { + if params == nil { + params = &DeleteCustomKeyStoreInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteCustomKeyStore", params, optFns, c.addOperationDeleteCustomKeyStoreMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteCustomKeyStoreOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteCustomKeyStoreInput struct { + + // Enter the ID of the custom key store you want to delete. To find the ID of a + // custom key store, use the DescribeCustomKeyStoresoperation. + // + // This member is required. + CustomKeyStoreId *string + + noSmithyDocumentSerde +} + +type DeleteCustomKeyStoreOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteCustomKeyStoreMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteCustomKeyStore{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteCustomKeyStore{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteCustomKeyStore"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpDeleteCustomKeyStoreValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteCustomKeyStore(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteCustomKeyStore(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteCustomKeyStore", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteImportedKeyMaterial.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteImportedKeyMaterial.go new file mode 100644 index 000000000..c952f6305 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteImportedKeyMaterial.go @@ -0,0 +1,193 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes key material that was previously imported. This operation makes the +// specified KMS key temporarily unusable. To restore the usability of the KMS key, +// reimport the same key material. For more information about importing key +// material into KMS, see [Importing Key Material]in the Key Management Service Developer Guide. +// +// When the specified KMS key is in the PendingDeletion state, this operation does +// not change the KMS key's state. Otherwise, it changes the KMS key's state to +// PendingImport . +// +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see [Key states of KMS keys]in the Key Management Service Developer Guide. +// +// Cross-account use: No. You cannot perform this operation on a KMS key in a +// different Amazon Web Services account. +// +// Required permissions: [kms:DeleteImportedKeyMaterial] (key policy) +// +// Related operations: +// +// # GetParametersForImport +// +// # ImportKeyMaterial +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [kms:DeleteImportedKeyMaterial]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [Importing Key Material]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +func (c *Client) DeleteImportedKeyMaterial(ctx context.Context, params *DeleteImportedKeyMaterialInput, optFns ...func(*Options)) (*DeleteImportedKeyMaterialOutput, error) { + if params == nil { + params = &DeleteImportedKeyMaterialInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteImportedKeyMaterial", params, optFns, c.addOperationDeleteImportedKeyMaterialMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteImportedKeyMaterialOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteImportedKeyMaterialInput struct { + + // Identifies the KMS key from which you are deleting imported key material. The + // Origin of the KMS key must be EXTERNAL . + // + // Specify the key ID or key ARN of the KMS key. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + noSmithyDocumentSerde +} + +type DeleteImportedKeyMaterialOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteImportedKeyMaterialMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteImportedKeyMaterial{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteImportedKeyMaterial{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteImportedKeyMaterial"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpDeleteImportedKeyMaterialValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteImportedKeyMaterial(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteImportedKeyMaterial(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteImportedKeyMaterial", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeriveSharedSecret.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeriveSharedSecret.go new file mode 100644 index 000000000..fef5dc426 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeriveSharedSecret.go @@ -0,0 +1,364 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Derives a shared secret using a key agreement algorithm. +// +// You must use an asymmetric NIST-recommended elliptic curve (ECC) or SM2 (China +// Regions only) KMS key pair with a KeyUsage value of KEY_AGREEMENT to call +// DeriveSharedSecret. +// +// DeriveSharedSecret uses the [Elliptic Curve Cryptography Cofactor Diffie-Hellman Primitive] (ECDH) to establish a key agreement between two +// peers by deriving a shared secret from their elliptic curve public-private key +// pairs. You can use the raw shared secret that DeriveSharedSecret returns to +// derive a symmetric key that can encrypt and decrypt data that is sent between +// the two peers, or that can generate and verify HMACs. KMS recommends that you +// follow [NIST recommendations for key derivation]when using the raw shared secret to derive a symmetric key. +// +// The following workflow demonstrates how to establish key agreement over an +// insecure communication channel using DeriveSharedSecret. +// +// - Alice calls CreateKeyto create an asymmetric KMS key pair with a KeyUsage value of +// KEY_AGREEMENT . +// +// The asymmetric KMS key must use a NIST-recommended elliptic curve (ECC) or SM2 +// +// (China Regions only) key spec. +// +// - Bob creates an elliptic curve key pair. +// +// Bob can call CreateKeyto create an asymmetric KMS key pair or generate a key pair +// +// outside of KMS. Bob's key pair must use the same NIST-recommended elliptic curve +// (ECC) or SM2 (China Regions ony) curve as Alice. +// +// - Alice and Bob exchange their public keys through an insecure communication +// channel (like the internet). +// +// Use GetPublicKeyto download the public key of your asymmetric KMS key pair. +// +// KMS strongly recommends verifying that the public key you receive came from the +// +// expected party before using it to derive a shared secret. +// +// - Alice calls DeriveSharedSecret. +// +// KMS uses the private key from the KMS key pair generated in Step 1, Bob's +// +// public key, and the Elliptic Curve Cryptography Cofactor Diffie-Hellman +// Primitive to derive the shared secret. The private key in your KMS key pair +// never leaves KMS unencrypted. DeriveSharedSecret returns the raw shared secret. +// +// - Bob uses the Elliptic Curve Cryptography Cofactor Diffie-Hellman Primitive +// to calculate the same raw secret using his private key and Alice's public key. +// +// To derive a shared secret you must provide a key agreement algorithm, the +// private key of the caller's asymmetric NIST-recommended elliptic curve or SM2 +// (China Regions only) KMS key pair, and the public key from your peer's +// NIST-recommended elliptic curve or SM2 (China Regions only) key pair. The public +// key can be from another asymmetric KMS key pair or from a key pair generated +// outside of KMS, but both key pairs must be on the same elliptic curve. +// +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see [Key states of KMS keys]in the Key Management Service Developer Guide. +// +// Cross-account use: Yes. To perform this operation with a KMS key in a different +// Amazon Web Services account, specify the key ARN or alias ARN in the value of +// the KeyId parameter. +// +// Required permissions: [kms:DeriveSharedSecret] (key policy) +// +// Related operations: +// +// # CreateKey +// +// # GetPublicKey +// +// # DescribeKey +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [kms:DeriveSharedSecret]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [Elliptic Curve Cryptography Cofactor Diffie-Hellman Primitive]: https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-56Ar3.pdf#page=60 +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +// [NIST recommendations for key derivation]: https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-56Cr2.pdf +func (c *Client) DeriveSharedSecret(ctx context.Context, params *DeriveSharedSecretInput, optFns ...func(*Options)) (*DeriveSharedSecretOutput, error) { + if params == nil { + params = &DeriveSharedSecretInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeriveSharedSecret", params, optFns, c.addOperationDeriveSharedSecretMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeriveSharedSecretOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeriveSharedSecretInput struct { + + // Specifies the key agreement algorithm used to derive the shared secret. The + // only valid value is ECDH . + // + // This member is required. + KeyAgreementAlgorithm types.KeyAgreementAlgorithmSpec + + // Identifies an asymmetric NIST-recommended ECC or SM2 (China Regions only) KMS + // key. KMS uses the private key in the specified key pair to derive the shared + // secret. The key usage of the KMS key must be KEY_AGREEMENT . To find the + // KeyUsage of a KMS key, use the DescribeKey operation. + // + // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. When + // using an alias name, prefix it with "alias/" . To specify a KMS key in a + // different Amazon Web Services account, you must use the key ARN or alias ARN. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Alias name: alias/ExampleAlias + // + // - Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name + // and alias ARN, use ListAliases. + // + // This member is required. + KeyId *string + + // Specifies the public key in your peer's NIST-recommended elliptic curve (ECC) + // or SM2 (China Regions only) key pair. + // + // The public key must be a DER-encoded X.509 public key, also known as + // SubjectPublicKeyInfo (SPKI), as defined in [RFC 5280]. + // + // GetPublicKeyreturns the public key of an asymmetric KMS key pair in the required + // DER-encoded format. + // + // If you use [Amazon Web Services CLI version 1], you must provide the DER-encoded X.509 public key in a file. + // Otherwise, the Amazon Web Services CLI Base64-encodes the public key a second + // time, resulting in a ValidationException . + // + // You can specify the public key as binary data in a file using fileb ( fileb:// ) + // or in-line using a Base64 encoded string. + // + // [RFC 5280]: https://tools.ietf.org/html/rfc5280 + // [Amazon Web Services CLI version 1]: https://docs.aws.amazon.com/cli/v1/userguide/cli-chap-welcome.html + // + // This member is required. + PublicKey []byte + + // Checks if your request will succeed. DryRun is an optional parameter. + // + // To learn more about how to use this parameter, see [Testing your KMS API calls] in the Key Management + // Service Developer Guide. + // + // [Testing your KMS API calls]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-dryrun.html + DryRun *bool + + // A list of grant tokens. + // + // Use a grant token when your permission to call this operation comes from a new + // grant that has not yet achieved eventual consistency. For more information, see [Grant token] + // and [Using a grant token]in the Key Management Service Developer Guide. + // + // [Grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + // [Using a grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + GrantTokens []string + + // A signed [attestation document] from an Amazon Web Services Nitro enclave and the encryption + // algorithm to use with the enclave's public key. The only valid encryption + // algorithm is RSAES_OAEP_SHA_256 . + // + // This parameter only supports attestation documents for Amazon Web Services + // Nitro Enclaves. To call DeriveSharedSecret for an Amazon Web Services Nitro + // Enclaves, use the [Amazon Web Services Nitro Enclaves SDK]to generate the attestation document and then use the + // Recipient parameter from any Amazon Web Services SDK to provide the attestation + // document for the enclave. + // + // When you use this parameter, instead of returning a plaintext copy of the + // shared secret, KMS encrypts the plaintext shared secret under the public key in + // the attestation document, and returns the resulting ciphertext in the + // CiphertextForRecipient field in the response. This ciphertext can be decrypted + // only with the private key in the enclave. The CiphertextBlob field in the + // response contains the encrypted shared secret derived from the KMS key specified + // by the KeyId parameter and public key specified by the PublicKey parameter. The + // SharedSecret field in the response is null or empty. + // + // For information about the interaction between KMS and Amazon Web Services Nitro + // Enclaves, see [How Amazon Web Services Nitro Enclaves uses KMS]in the Key Management Service Developer Guide. + // + // [attestation document]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitro-enclave-how.html#term-attestdoc + // [How Amazon Web Services Nitro Enclaves uses KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html + // [Amazon Web Services Nitro Enclaves SDK]: https://docs.aws.amazon.com/enclaves/latest/user/developing-applications.html#sdk + Recipient *types.RecipientInfo + + noSmithyDocumentSerde +} + +type DeriveSharedSecretOutput struct { + + // The plaintext shared secret encrypted with the public key in the attestation + // document. + // + // This field is included in the response only when the Recipient parameter in the + // request includes a valid attestation document from an Amazon Web Services Nitro + // enclave. For information about the interaction between KMS and Amazon Web + // Services Nitro Enclaves, see [How Amazon Web Services Nitro Enclaves uses KMS]in the Key Management Service Developer Guide. + // + // [How Amazon Web Services Nitro Enclaves uses KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html + CiphertextForRecipient []byte + + // Identifies the key agreement algorithm used to derive the shared secret. + KeyAgreementAlgorithm types.KeyAgreementAlgorithmSpec + + // Identifies the KMS key used to derive the shared secret. + KeyId *string + + // The source of the key material for the specified KMS key. + // + // When this value is AWS_KMS , KMS created the key material. When this value is + // EXTERNAL , the key material was imported or the KMS key doesn't have any key + // material. + // + // The only valid values for DeriveSharedSecret are AWS_KMS and EXTERNAL . + // DeriveSharedSecret does not support KMS keys with a KeyOrigin value of + // AWS_CLOUDHSM or EXTERNAL_KEY_STORE . + KeyOrigin types.OriginType + + // The raw secret derived from the specified key agreement algorithm, private key + // in the asymmetric KMS key, and your peer's public key. + // + // If the response includes the CiphertextForRecipient field, the SharedSecret + // field is null or empty. + SharedSecret []byte + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeriveSharedSecretMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeriveSharedSecret{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeriveSharedSecret{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeriveSharedSecret"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpDeriveSharedSecretValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeriveSharedSecret(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeriveSharedSecret(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeriveSharedSecret", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeCustomKeyStores.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeCustomKeyStores.go new file mode 100644 index 000000000..438537acd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeCustomKeyStores.go @@ -0,0 +1,338 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets information about [custom key stores] in the account and Region. +// +// This operation is part of the [custom key stores] feature in KMS, which combines the convenience +// and extensive integration of KMS with the isolation and control of a key store +// that you own and manage. +// +// By default, this operation returns information about all custom key stores in +// the account and Region. To get only information about a particular custom key +// store, use either the CustomKeyStoreName or CustomKeyStoreId parameter (but not +// both). +// +// To determine whether the custom key store is connected to its CloudHSM cluster +// or external key store proxy, use the ConnectionState element in the response. +// If an attempt to connect the custom key store failed, the ConnectionState value +// is FAILED and the ConnectionErrorCode element in the response indicates the +// cause of the failure. For help interpreting the ConnectionErrorCode , see CustomKeyStoresListEntry. +// +// Custom key stores have a DISCONNECTED connection state if the key store has +// never been connected or you used the DisconnectCustomKeyStoreoperation to disconnect it. Otherwise, the +// connection state is CONNECTED. If your custom key store connection state is +// CONNECTED but you are having trouble using it, verify that the backing store is +// active and available. For an CloudHSM key store, verify that the associated +// CloudHSM cluster is active and contains the minimum number of HSMs required for +// the operation, if any. For an external key store, verify that the external key +// store proxy and its associated external key manager are reachable and enabled. +// +// For help repairing your CloudHSM key store, see the [Troubleshooting CloudHSM key stores]. For help repairing your +// external key store, see the [Troubleshooting external key stores]. Both topics are in the Key Management Service +// Developer Guide. +// +// Cross-account use: No. You cannot perform this operation on a custom key store +// in a different Amazon Web Services account. +// +// Required permissions: [kms:DescribeCustomKeyStores] (IAM policy) +// +// Related operations: +// +// # ConnectCustomKeyStore +// +// # CreateCustomKeyStore +// +// # DeleteCustomKeyStore +// +// # DisconnectCustomKeyStore +// +// # UpdateCustomKeyStore +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [kms:DescribeCustomKeyStores]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [custom key stores]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html +// [Troubleshooting CloudHSM key stores]: https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html +// [Troubleshooting external key stores]: https://docs.aws.amazon.com/kms/latest/developerguide/xks-troubleshooting.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +func (c *Client) DescribeCustomKeyStores(ctx context.Context, params *DescribeCustomKeyStoresInput, optFns ...func(*Options)) (*DescribeCustomKeyStoresOutput, error) { + if params == nil { + params = &DescribeCustomKeyStoresInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeCustomKeyStores", params, optFns, c.addOperationDescribeCustomKeyStoresMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeCustomKeyStoresOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeCustomKeyStoresInput struct { + + // Gets only information about the specified custom key store. Enter the key store + // ID. + // + // By default, this operation gets information about all custom key stores in the + // account and Region. To limit the output to a particular custom key store, + // provide either the CustomKeyStoreId or CustomKeyStoreName parameter, but not + // both. + CustomKeyStoreId *string + + // Gets only information about the specified custom key store. Enter the friendly + // name of the custom key store. + // + // By default, this operation gets information about all custom key stores in the + // account and Region. To limit the output to a particular custom key store, + // provide either the CustomKeyStoreId or CustomKeyStoreName parameter, but not + // both. + CustomKeyStoreName *string + + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. + Limit *int32 + + // Use this parameter in a subsequent request after you receive a response with + // truncated results. Set it to the value of NextMarker from the truncated + // response you just received. + Marker *string + + noSmithyDocumentSerde +} + +type DescribeCustomKeyStoresOutput struct { + + // Contains metadata about each custom key store. + CustomKeyStores []types.CustomKeyStoresListEntry + + // When Truncated is true, this element is present and contains the value to use + // for the Marker parameter in a subsequent request. + NextMarker *string + + // A flag that indicates whether there are more items in the list. When this value + // is true, the list in this response is truncated. To get more items, pass the + // value of the NextMarker element in this response to the Marker parameter in a + // subsequent request. + Truncated bool + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeCustomKeyStoresMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeCustomKeyStores{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeCustomKeyStores{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeCustomKeyStores"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeCustomKeyStores(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// DescribeCustomKeyStoresPaginatorOptions is the paginator options for +// DescribeCustomKeyStores +type DescribeCustomKeyStoresPaginatorOptions struct { + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// DescribeCustomKeyStoresPaginator is a paginator for DescribeCustomKeyStores +type DescribeCustomKeyStoresPaginator struct { + options DescribeCustomKeyStoresPaginatorOptions + client DescribeCustomKeyStoresAPIClient + params *DescribeCustomKeyStoresInput + nextToken *string + firstPage bool +} + +// NewDescribeCustomKeyStoresPaginator returns a new +// DescribeCustomKeyStoresPaginator +func NewDescribeCustomKeyStoresPaginator(client DescribeCustomKeyStoresAPIClient, params *DescribeCustomKeyStoresInput, optFns ...func(*DescribeCustomKeyStoresPaginatorOptions)) *DescribeCustomKeyStoresPaginator { + if params == nil { + params = &DescribeCustomKeyStoresInput{} + } + + options := DescribeCustomKeyStoresPaginatorOptions{} + if params.Limit != nil { + options.Limit = *params.Limit + } + + for _, fn := range optFns { + fn(&options) + } + + return &DescribeCustomKeyStoresPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.Marker, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *DescribeCustomKeyStoresPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next DescribeCustomKeyStores page. +func (p *DescribeCustomKeyStoresPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribeCustomKeyStoresOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.Marker = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.Limit = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.DescribeCustomKeyStores(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextMarker + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// DescribeCustomKeyStoresAPIClient is a client that implements the +// DescribeCustomKeyStores operation. +type DescribeCustomKeyStoresAPIClient interface { + DescribeCustomKeyStores(context.Context, *DescribeCustomKeyStoresInput, ...func(*Options)) (*DescribeCustomKeyStoresOutput, error) +} + +var _ DescribeCustomKeyStoresAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opDescribeCustomKeyStores(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DescribeCustomKeyStores", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeKey.go new file mode 100644 index 000000000..3ac7fafa0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeKey.go @@ -0,0 +1,257 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Provides detailed information about a KMS key. You can run DescribeKey on a [customer managed key] or +// an [Amazon Web Services managed key]. +// +// This detailed information includes the key ARN, creation date (and deletion +// date, if applicable), the key state, and the origin and expiration date (if any) +// of the key material. It includes fields, like KeySpec , that help you +// distinguish different types of KMS keys. It also displays the key usage +// (encryption, signing, or generating and verifying MACs) and the algorithms that +// the KMS key supports. +// +// For [multi-Region keys], DescribeKey displays the primary key and all related replica keys. For +// KMS keys in [CloudHSM key stores], it includes information about the key store, such as the key +// store ID and the CloudHSM cluster ID. For KMS keys in [external key stores], it includes the custom +// key store ID and the ID of the external key. +// +// DescribeKey does not return the following information: +// +// - Aliases associated with the KMS key. To get this information, use ListAliases. +// +// - Whether automatic key rotation is enabled on the KMS key. To get this +// information, use GetKeyRotationStatus. Also, some key states prevent a KMS key from being +// automatically rotated. For details, see [How Automatic Key Rotation Works]in the Key Management Service +// Developer Guide. +// +// - Tags on the KMS key. To get this information, use ListResourceTags. +// +// - Key policies and grants on the KMS key. To get this information, use GetKeyPolicyand ListGrants. +// +// In general, DescribeKey is a non-mutating operation. It returns data about KMS +// keys, but doesn't change them. However, Amazon Web Services services use +// DescribeKey to create [Amazon Web Services managed keys] from a predefined Amazon Web Services alias with no key +// ID. +// +// Cross-account use: Yes. To perform this operation with a KMS key in a different +// Amazon Web Services account, specify the key ARN or alias ARN in the value of +// the KeyId parameter. +// +// Required permissions: [kms:DescribeKey] (key policy) +// +// Related operations: +// +// # GetKeyPolicy +// +// # GetKeyRotationStatus +// +// # ListAliases +// +// # ListGrants +// +// # ListKeys +// +// # ListResourceTags +// +// # ListRetirableGrants +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [CloudHSM key stores]: https://docs.aws.amazon.com/kms/latest/developerguide/keystore-cloudhsm.html +// [external key stores]: https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html +// [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk +// [kms:DescribeKey]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [How Automatic Key Rotation Works]: https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotate-keys-how-it-works +// [multi-Region keys]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html +// [Amazon Web Services managed keys]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +// [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk +func (c *Client) DescribeKey(ctx context.Context, params *DescribeKeyInput, optFns ...func(*Options)) (*DescribeKeyOutput, error) { + if params == nil { + params = &DescribeKeyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeKey", params, optFns, c.addOperationDescribeKeyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeKeyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeKeyInput struct { + + // Describes the specified KMS key. + // + // If you specify a predefined Amazon Web Services alias (an Amazon Web Services + // alias with no key ID), KMS associates the alias with an [Amazon Web Services managed key]and returns its KeyId + // and Arn in the response. + // + // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. When + // using an alias name, prefix it with "alias/" . To specify a KMS key in a + // different Amazon Web Services account, you must use the key ARN or alias ARN. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Alias name: alias/ExampleAlias + // + // - Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name + // and alias ARN, use ListAliases. + // + // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html##aws-managed-cmk + // + // This member is required. + KeyId *string + + // A list of grant tokens. + // + // Use a grant token when your permission to call this operation comes from a new + // grant that has not yet achieved eventual consistency. For more information, see [Grant token] + // and [Using a grant token]in the Key Management Service Developer Guide. + // + // [Grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + // [Using a grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + GrantTokens []string + + noSmithyDocumentSerde +} + +type DescribeKeyOutput struct { + + // Metadata associated with the key. + KeyMetadata *types.KeyMetadata + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeKeyMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeKey{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeKey{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeKey"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpDescribeKeyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeKey(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeKey(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DescribeKey", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKey.go new file mode 100644 index 000000000..3d247346d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKey.go @@ -0,0 +1,185 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Sets the state of a KMS key to disabled. This change temporarily prevents use +// of the KMS key for [cryptographic operations]. +// +// For more information about how key state affects the use of a KMS key, see [Key states of KMS keys] in +// the Key Management Service Developer Guide . +// +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see [Key states of KMS keys]in the Key Management Service Developer Guide. +// +// Cross-account use: No. You cannot perform this operation on a KMS key in a +// different Amazon Web Services account. +// +// Required permissions: [kms:DisableKey] (key policy) +// +// Related operations: EnableKey +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [cryptographic operations]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations +// [kms:DisableKey]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +func (c *Client) DisableKey(ctx context.Context, params *DisableKeyInput, optFns ...func(*Options)) (*DisableKeyOutput, error) { + if params == nil { + params = &DisableKeyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DisableKey", params, optFns, c.addOperationDisableKeyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DisableKeyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DisableKeyInput struct { + + // Identifies the KMS key to disable. + // + // Specify the key ID or key ARN of the KMS key. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + noSmithyDocumentSerde +} + +type DisableKeyOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDisableKeyMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDisableKey{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDisableKey{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DisableKey"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpDisableKeyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDisableKey(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDisableKey(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DisableKey", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKeyRotation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKeyRotation.go new file mode 100644 index 000000000..30ee5155f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKeyRotation.go @@ -0,0 +1,215 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Disables [automatic rotation of the key material] of the specified symmetric encryption KMS key. +// +// Automatic key rotation is supported only on symmetric encryption KMS keys. You +// cannot enable automatic rotation of [asymmetric KMS keys], [HMAC KMS keys], KMS keys with [imported key material], or KMS keys in a [custom key store]. To +// enable or disable automatic rotation of a set of related [multi-Region keys], set the property on +// the primary key. +// +// You can enable (EnableKeyRotation ) and disable automatic rotation of the key material in [customer managed KMS keys]. Key +// material rotation of [Amazon Web Services managed KMS keys]is not configurable. KMS always rotates the key material +// for every year. Rotation of [Amazon Web Services owned KMS keys]varies. +// +// In May 2022, KMS changed the rotation schedule for Amazon Web Services managed +// keys from every three years to every year. For details, see EnableKeyRotation. +// +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see [Key states of KMS keys]in the Key Management Service Developer Guide. +// +// Cross-account use: No. You cannot perform this operation on a KMS key in a +// different Amazon Web Services account. +// +// Required permissions: [kms:DisableKeyRotation] (key policy) +// +// Related operations: +// +// # EnableKeyRotation +// +// # GetKeyRotationStatus +// +// # ListKeyRotations +// +// # RotateKeyOnDemand +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [imported key material]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [HMAC KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html +// [Amazon Web Services managed KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk +// [automatic rotation of the key material]: https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html +// [asymmetric KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html +// [customer managed KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk +// [Amazon Web Services owned KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk +// [kms:DisableKeyRotation]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [multi-Region keys]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +// [custom key store]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html +func (c *Client) DisableKeyRotation(ctx context.Context, params *DisableKeyRotationInput, optFns ...func(*Options)) (*DisableKeyRotationOutput, error) { + if params == nil { + params = &DisableKeyRotationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DisableKeyRotation", params, optFns, c.addOperationDisableKeyRotationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DisableKeyRotationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DisableKeyRotationInput struct { + + // Identifies a symmetric encryption KMS key. You cannot enable or disable + // automatic rotation of [asymmetric KMS keys], [HMAC KMS keys], KMS keys with [imported key material], or KMS keys in a [custom key store]. + // + // Specify the key ID or key ARN of the KMS key. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // [imported key material]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html + // [HMAC KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html + // [asymmetric KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html#asymmetric-cmks + // [custom key store]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html + // + // This member is required. + KeyId *string + + noSmithyDocumentSerde +} + +type DisableKeyRotationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDisableKeyRotationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDisableKeyRotation{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDisableKeyRotation{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DisableKeyRotation"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpDisableKeyRotationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDisableKeyRotation(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDisableKeyRotation(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DisableKeyRotation", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisconnectCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisconnectCustomKeyStore.go new file mode 100644 index 000000000..55327cb72 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisconnectCustomKeyStore.go @@ -0,0 +1,200 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Disconnects the [custom key store] from its backing key store. This operation disconnects an +// CloudHSM key store from its associated CloudHSM cluster or disconnects an +// external key store from the external key store proxy that communicates with your +// external key manager. +// +// This operation is part of the [custom key stores] feature in KMS, which combines the convenience +// and extensive integration of KMS with the isolation and control of a key store +// that you own and manage. +// +// While a custom key store is disconnected, you can manage the custom key store +// and its KMS keys, but you cannot create or use its KMS keys. You can reconnect +// the custom key store at any time. +// +// While a custom key store is disconnected, all attempts to create KMS keys in +// the custom key store or to use existing KMS keys in [cryptographic operations]will fail. This action can +// prevent users from storing and accessing sensitive data. +// +// When you disconnect a custom key store, its ConnectionState changes to +// Disconnected . To find the connection state of a custom key store, use the DescribeCustomKeyStores +// operation. To reconnect a custom key store, use the ConnectCustomKeyStoreoperation. +// +// If the operation succeeds, it returns a JSON object with no properties. +// +// Cross-account use: No. You cannot perform this operation on a custom key store +// in a different Amazon Web Services account. +// +// Required permissions: [kms:DisconnectCustomKeyStore] (IAM policy) +// +// Related operations: +// +// # ConnectCustomKeyStore +// +// # CreateCustomKeyStore +// +// # DeleteCustomKeyStore +// +// # DescribeCustomKeyStores +// +// # UpdateCustomKeyStore +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [custom key stores]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html +// [cryptographic operations]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations +// [kms:DisconnectCustomKeyStore]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +// [custom key store]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html +func (c *Client) DisconnectCustomKeyStore(ctx context.Context, params *DisconnectCustomKeyStoreInput, optFns ...func(*Options)) (*DisconnectCustomKeyStoreOutput, error) { + if params == nil { + params = &DisconnectCustomKeyStoreInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DisconnectCustomKeyStore", params, optFns, c.addOperationDisconnectCustomKeyStoreMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DisconnectCustomKeyStoreOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DisconnectCustomKeyStoreInput struct { + + // Enter the ID of the custom key store you want to disconnect. To find the ID of + // a custom key store, use the DescribeCustomKeyStoresoperation. + // + // This member is required. + CustomKeyStoreId *string + + noSmithyDocumentSerde +} + +type DisconnectCustomKeyStoreOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDisconnectCustomKeyStoreMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDisconnectCustomKeyStore{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDisconnectCustomKeyStore{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DisconnectCustomKeyStore"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpDisconnectCustomKeyStoreValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDisconnectCustomKeyStore(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDisconnectCustomKeyStore(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DisconnectCustomKeyStore", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKey.go new file mode 100644 index 000000000..fc8402f5e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKey.go @@ -0,0 +1,182 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Sets the key state of a KMS key to enabled. This allows you to use the KMS key +// for [cryptographic operations]. +// +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see [Key states of KMS keys]in the Key Management Service Developer Guide. +// +// Cross-account use: No. You cannot perform this operation on a KMS key in a +// different Amazon Web Services account. +// +// Required permissions: [kms:EnableKey] (key policy) +// +// Related operations: DisableKey +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [kms:EnableKey]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [cryptographic operations]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +func (c *Client) EnableKey(ctx context.Context, params *EnableKeyInput, optFns ...func(*Options)) (*EnableKeyOutput, error) { + if params == nil { + params = &EnableKeyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "EnableKey", params, optFns, c.addOperationEnableKeyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*EnableKeyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type EnableKeyInput struct { + + // Identifies the KMS key to enable. + // + // Specify the key ID or key ARN of the KMS key. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + noSmithyDocumentSerde +} + +type EnableKeyOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationEnableKeyMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpEnableKey{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpEnableKey{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "EnableKey"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpEnableKeyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opEnableKey(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opEnableKey(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "EnableKey", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKeyRotation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKeyRotation.go new file mode 100644 index 000000000..3236e4970 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKeyRotation.go @@ -0,0 +1,254 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Enables [automatic rotation of the key material] of the specified symmetric encryption KMS key. +// +// By default, when you enable automatic rotation of a [customer managed KMS key], KMS rotates the key +// material of the KMS key one year (approximately 365 days) from the enable date +// and every year thereafter. You can use the optional RotationPeriodInDays +// parameter to specify a custom rotation period when you enable key rotation, or +// you can use RotationPeriodInDays to modify the rotation period of a key that +// you previously enabled automatic key rotation on. +// +// You can monitor rotation of the key material for your KMS keys in CloudTrail +// and Amazon CloudWatch. To disable rotation of the key material in a customer +// managed KMS key, use the DisableKeyRotationoperation. You can use the GetKeyRotationStatus operation to identify any +// in progress rotations. You can use the ListKeyRotationsoperation to view the details of +// completed rotations. +// +// Automatic key rotation is supported only on [symmetric encryption KMS keys]. You cannot enable automatic +// rotation of [asymmetric KMS keys], [HMAC KMS keys], KMS keys with [imported key material], or KMS keys in a [custom key store]. To enable or disable +// automatic rotation of a set of related [multi-Region keys], set the property on the primary key. +// +// You cannot enable or disable automatic rotation of [Amazon Web Services managed KMS keys]. KMS always rotates the key +// material of Amazon Web Services managed keys every year. Rotation of [Amazon Web Services owned KMS keys]is managed +// by the Amazon Web Services service that owns the key. +// +// In May 2022, KMS changed the rotation schedule for Amazon Web Services managed +// keys from every three years (approximately 1,095 days) to every year +// (approximately 365 days). +// +// New Amazon Web Services managed keys are automatically rotated one year after +// they are created, and approximately every year thereafter. +// +// Existing Amazon Web Services managed keys are automatically rotated one year +// after their most recent rotation, and every year thereafter. +// +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see [Key states of KMS keys]in the Key Management Service Developer Guide. +// +// Cross-account use: No. You cannot perform this operation on a KMS key in a +// different Amazon Web Services account. +// +// Required permissions: [kms:EnableKeyRotation] (key policy) +// +// Related operations: +// +// # DisableKeyRotation +// +// # GetKeyRotationStatus +// +// # ListKeyRotations +// +// RotateKeyOnDemand +// +// - You can perform on-demand (RotateKeyOnDemand ) rotation of the key material in customer +// managed KMS keys, regardless of whether or not automatic key rotation is +// enabled. +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [kms:EnableKeyRotation]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [Amazon Web Services owned KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk +// [multi-Region keys]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +// [imported key material]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [HMAC KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html +// [Amazon Web Services managed KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk +// [customer managed KMS key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk +// [automatic rotation of the key material]: https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotating-keys-enable-disable +// [asymmetric KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html +// [symmetric encryption KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#symmetric-cmks +// [custom key store]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html +func (c *Client) EnableKeyRotation(ctx context.Context, params *EnableKeyRotationInput, optFns ...func(*Options)) (*EnableKeyRotationOutput, error) { + if params == nil { + params = &EnableKeyRotationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "EnableKeyRotation", params, optFns, c.addOperationEnableKeyRotationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*EnableKeyRotationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type EnableKeyRotationInput struct { + + // Identifies a symmetric encryption KMS key. You cannot enable automatic rotation + // of [asymmetric KMS keys], [HMAC KMS keys], KMS keys with [imported key material], or KMS keys in a [custom key store]. To enable or disable automatic + // rotation of a set of related [multi-Region keys], set the property on the primary key. + // + // Specify the key ID or key ARN of the KMS key. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // [imported key material]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html + // [HMAC KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html + // [asymmetric KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html + // [multi-Region keys]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate + // [custom key store]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html + // + // This member is required. + KeyId *string + + // Use this parameter to specify a custom period of time between each rotation + // date. If no value is specified, the default value is 365 days. + // + // The rotation period defines the number of days after you enable automatic key + // rotation that KMS will rotate your key material, and the number of days between + // each automatic rotation thereafter. + // + // You can use the [kms:RotationPeriodInDays]kms:RotationPeriodInDays condition key to further constrain the + // values that principals can specify in the RotationPeriodInDays parameter. + // + // [kms:RotationPeriodInDays]: https://docs.aws.amazon.com/kms/latest/developerguide/conditions-kms.html#conditions-kms-rotation-period-in-days + RotationPeriodInDays *int32 + + noSmithyDocumentSerde +} + +type EnableKeyRotationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationEnableKeyRotationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpEnableKeyRotation{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpEnableKeyRotation{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "EnableKeyRotation"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpEnableKeyRotationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opEnableKeyRotation(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opEnableKeyRotation(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "EnableKeyRotation", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Encrypt.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Encrypt.go new file mode 100644 index 000000000..ac6031bdf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Encrypt.go @@ -0,0 +1,322 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Encrypts plaintext of up to 4,096 bytes using a KMS key. You can use a +// symmetric or asymmetric KMS key with a KeyUsage of ENCRYPT_DECRYPT . +// +// You can use this operation to encrypt small amounts of arbitrary data, such as +// a personal identifier or database password, or other sensitive information. You +// don't need to use the Encrypt operation to encrypt a data key. The GenerateDataKey and GenerateDataKeyPair +// operations return a plaintext data key and an encrypted copy of that data key. +// +// If you use a symmetric encryption KMS key, you can use an encryption context to +// add additional security to your encryption operation. If you specify an +// EncryptionContext when encrypting data, you must specify the same encryption +// context (a case-sensitive exact match) when decrypting the data. Otherwise, the +// request to decrypt fails with an InvalidCiphertextException . For more +// information, see [Encryption Context]in the Key Management Service Developer Guide. +// +// If you specify an asymmetric KMS key, you must also specify the encryption +// algorithm. The algorithm must be compatible with the KMS key spec. +// +// When you use an asymmetric KMS key to encrypt or reencrypt data, be sure to +// record the KMS key and encryption algorithm that you choose. You will be +// required to provide the same KMS key and encryption algorithm when you decrypt +// the data. If the KMS key and algorithm do not match the values used to encrypt +// the data, the decrypt operation fails. +// +// You are not required to supply the key ID and encryption algorithm when you +// decrypt with symmetric encryption KMS keys because KMS stores this information +// in the ciphertext blob. KMS cannot store metadata in ciphertext generated with +// asymmetric keys. The standard format for asymmetric key ciphertext does not +// include configurable fields. +// +// The maximum size of the data that you can encrypt varies with the type of KMS +// key and the encryption algorithm that you choose. +// +// - Symmetric encryption KMS keys +// +// - SYMMETRIC_DEFAULT : 4096 bytes +// +// - RSA_2048 +// +// - RSAES_OAEP_SHA_1 : 214 bytes +// +// - RSAES_OAEP_SHA_256 : 190 bytes +// +// - RSA_3072 +// +// - RSAES_OAEP_SHA_1 : 342 bytes +// +// - RSAES_OAEP_SHA_256 : 318 bytes +// +// - RSA_4096 +// +// - RSAES_OAEP_SHA_1 : 470 bytes +// +// - RSAES_OAEP_SHA_256 : 446 bytes +// +// - SM2PKE : 1024 bytes (China Regions only) +// +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see [Key states of KMS keys]in the Key Management Service Developer Guide. +// +// Cross-account use: Yes. To perform this operation with a KMS key in a different +// Amazon Web Services account, specify the key ARN or alias ARN in the value of +// the KeyId parameter. +// +// Required permissions: [kms:Encrypt] (key policy) +// +// Related operations: +// +// # Decrypt +// +// # GenerateDataKey +// +// # GenerateDataKeyPair +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [Encryption Context]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context +// [kms:Encrypt]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +func (c *Client) Encrypt(ctx context.Context, params *EncryptInput, optFns ...func(*Options)) (*EncryptOutput, error) { + if params == nil { + params = &EncryptInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "Encrypt", params, optFns, c.addOperationEncryptMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*EncryptOutput) + out.ResultMetadata = metadata + return out, nil +} + +type EncryptInput struct { + + // Identifies the KMS key to use in the encryption operation. The KMS key must + // have a KeyUsage of ENCRYPT_DECRYPT . To find the KeyUsage of a KMS key, use the DescribeKey + // operation. + // + // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. When + // using an alias name, prefix it with "alias/" . To specify a KMS key in a + // different Amazon Web Services account, you must use the key ARN or alias ARN. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Alias name: alias/ExampleAlias + // + // - Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name + // and alias ARN, use ListAliases. + // + // This member is required. + KeyId *string + + // Data to be encrypted. + // + // This member is required. + Plaintext []byte + + // Checks if your request will succeed. DryRun is an optional parameter. + // + // To learn more about how to use this parameter, see [Testing your KMS API calls] in the Key Management + // Service Developer Guide. + // + // [Testing your KMS API calls]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-dryrun.html + DryRun *bool + + // Specifies the encryption algorithm that KMS will use to encrypt the plaintext + // message. The algorithm must be compatible with the KMS key that you specify. + // + // This parameter is required only for asymmetric KMS keys. The default value, + // SYMMETRIC_DEFAULT , is the algorithm used for symmetric encryption KMS keys. If + // you are using an asymmetric KMS key, we recommend RSAES_OAEP_SHA_256. + // + // The SM2PKE algorithm is only available in China Regions. + EncryptionAlgorithm types.EncryptionAlgorithmSpec + + // Specifies the encryption context that will be used to encrypt the data. An + // encryption context is valid only for [cryptographic operations]with a symmetric encryption KMS key. The + // standard asymmetric encryption algorithms and HMAC algorithms that KMS uses do + // not support an encryption context. + // + // Do not include confidential or sensitive information in this field. This field + // may be displayed in plaintext in CloudTrail logs and other output. + // + // An encryption context is a collection of non-secret key-value pairs that + // represent additional authenticated data. When you use an encryption context to + // encrypt data, you must specify the same (an exact case-sensitive match) + // encryption context to decrypt the data. An encryption context is supported only + // on operations with symmetric encryption KMS keys. On operations with symmetric + // encryption KMS keys, an encryption context is optional, but it is strongly + // recommended. + // + // For more information, see [Encryption context] in the Key Management Service Developer Guide. + // + // [cryptographic operations]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations + // [Encryption context]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + EncryptionContext map[string]string + + // A list of grant tokens. + // + // Use a grant token when your permission to call this operation comes from a new + // grant that has not yet achieved eventual consistency. For more information, see [Grant token] + // and [Using a grant token]in the Key Management Service Developer Guide. + // + // [Grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + // [Using a grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + GrantTokens []string + + noSmithyDocumentSerde +} + +type EncryptOutput struct { + + // The encrypted plaintext. When you use the HTTP API or the Amazon Web Services + // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. + CiphertextBlob []byte + + // The encryption algorithm that was used to encrypt the plaintext. + EncryptionAlgorithm types.EncryptionAlgorithmSpec + + // The Amazon Resource Name ([key ARN] ) of the KMS key that was used to encrypt the + // plaintext. + // + // [key ARN]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN + KeyId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationEncryptMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpEncrypt{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpEncrypt{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "Encrypt"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpEncryptValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opEncrypt(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opEncrypt(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "Encrypt", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKey.go new file mode 100644 index 000000000..5cab73d3f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKey.go @@ -0,0 +1,377 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a unique symmetric data key for use outside of KMS. This operation +// returns a plaintext copy of the data key and a copy that is encrypted under a +// symmetric encryption KMS key that you specify. The bytes in the plaintext key +// are random; they are not related to the caller or the KMS key. You can use the +// plaintext key to encrypt your data outside of KMS and store the encrypted data +// key with the encrypted data. +// +// To generate a data key, specify the symmetric encryption KMS key that will be +// used to encrypt the data key. You cannot use an asymmetric KMS key to encrypt +// data keys. To get the type of your KMS key, use the DescribeKeyoperation. +// +// You must also specify the length of the data key. Use either the KeySpec or +// NumberOfBytes parameters (but not both). For 128-bit and 256-bit data keys, use +// the KeySpec parameter. +// +// To generate a 128-bit SM4 data key (China Regions only), specify a KeySpec +// value of AES_128 or a NumberOfBytes value of 16 . The symmetric encryption key +// used in China Regions to encrypt your data key is an SM4 encryption key. +// +// To get only an encrypted copy of the data key, use GenerateDataKeyWithoutPlaintext. To generate an asymmetric +// data key pair, use the GenerateDataKeyPairor GenerateDataKeyPairWithoutPlaintext operation. To get a cryptographically secure random +// byte string, use GenerateRandom. +// +// You can use an optional encryption context to add additional security to the +// encryption operation. If you specify an EncryptionContext , you must specify the +// same encryption context (a case-sensitive exact match) when decrypting the +// encrypted data key. Otherwise, the request to decrypt fails with an +// InvalidCiphertextException . For more information, see [Encryption Context] in the Key Management +// Service Developer Guide. +// +// GenerateDataKey also supports [Amazon Web Services Nitro Enclaves], which provide an isolated compute environment +// in Amazon EC2. To call GenerateDataKey for an Amazon Web Services Nitro +// enclave, use the [Amazon Web Services Nitro Enclaves SDK]or any Amazon Web Services SDK. Use the Recipient parameter to +// provide the attestation document for the enclave. GenerateDataKey returns a +// copy of the data key encrypted under the specified KMS key, as usual. But +// instead of a plaintext copy of the data key, the response includes a copy of the +// data key encrypted under the public key from the attestation document ( +// CiphertextForRecipient ). For information about the interaction between KMS and +// Amazon Web Services Nitro Enclaves, see [How Amazon Web Services Nitro Enclaves uses KMS]in the Key Management Service Developer +// Guide.. +// +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see [Key states of KMS keys]in the Key Management Service Developer Guide. +// +// # How to use your data key +// +// We recommend that you use the following pattern to encrypt data locally in your +// application. You can write your own code or use a client-side encryption +// library, such as the [Amazon Web Services Encryption SDK], the [Amazon DynamoDB Encryption Client], or [Amazon S3 client-side encryption] to do these tasks for you. +// +// To encrypt data outside of KMS: +// +// - Use the GenerateDataKey operation to get a data key. +// +// - Use the plaintext data key (in the Plaintext field of the response) to +// encrypt your data outside of KMS. Then erase the plaintext data key from memory. +// +// - Store the encrypted data key (in the CiphertextBlob field of the response) +// with the encrypted data. +// +// To decrypt data outside of KMS: +// +// - Use the Decryptoperation to decrypt the encrypted data key. The operation returns +// a plaintext copy of the data key. +// +// - Use the plaintext data key to decrypt data outside of KMS, then erase the +// plaintext data key from memory. +// +// Cross-account use: Yes. To perform this operation with a KMS key in a different +// Amazon Web Services account, specify the key ARN or alias ARN in the value of +// the KeyId parameter. +// +// Required permissions: [kms:GenerateDataKey] (key policy) +// +// Related operations: +// +// # Decrypt +// +// # Encrypt +// +// # GenerateDataKeyPair +// +// # GenerateDataKeyPairWithoutPlaintext +// +// # GenerateDataKeyWithoutPlaintext +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [Amazon Web Services Encryption SDK]: https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/ +// [Amazon DynamoDB Encryption Client]: https://docs.aws.amazon.com/dynamodb-encryption-client/latest/devguide/ +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [Encryption Context]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context +// [Amazon Web Services Nitro Enclaves]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitro-enclave.html +// [Amazon S3 client-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html +// [kms:GenerateDataKey]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [How Amazon Web Services Nitro Enclaves uses KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +// [Amazon Web Services Nitro Enclaves SDK]: https://docs.aws.amazon.com/enclaves/latest/user/developing-applications.html#sdk +func (c *Client) GenerateDataKey(ctx context.Context, params *GenerateDataKeyInput, optFns ...func(*Options)) (*GenerateDataKeyOutput, error) { + if params == nil { + params = &GenerateDataKeyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GenerateDataKey", params, optFns, c.addOperationGenerateDataKeyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GenerateDataKeyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GenerateDataKeyInput struct { + + // Specifies the symmetric encryption KMS key that encrypts the data key. You + // cannot specify an asymmetric KMS key or a KMS key in a custom key store. To get + // the type and origin of your KMS key, use the DescribeKeyoperation. + // + // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. When + // using an alias name, prefix it with "alias/" . To specify a KMS key in a + // different Amazon Web Services account, you must use the key ARN or alias ARN. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Alias name: alias/ExampleAlias + // + // - Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name + // and alias ARN, use ListAliases. + // + // This member is required. + KeyId *string + + // Checks if your request will succeed. DryRun is an optional parameter. + // + // To learn more about how to use this parameter, see [Testing your KMS API calls] in the Key Management + // Service Developer Guide. + // + // [Testing your KMS API calls]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-dryrun.html + DryRun *bool + + // Specifies the encryption context that will be used when encrypting the data key. + // + // Do not include confidential or sensitive information in this field. This field + // may be displayed in plaintext in CloudTrail logs and other output. + // + // An encryption context is a collection of non-secret key-value pairs that + // represent additional authenticated data. When you use an encryption context to + // encrypt data, you must specify the same (an exact case-sensitive match) + // encryption context to decrypt the data. An encryption context is supported only + // on operations with symmetric encryption KMS keys. On operations with symmetric + // encryption KMS keys, an encryption context is optional, but it is strongly + // recommended. + // + // For more information, see [Encryption context] in the Key Management Service Developer Guide. + // + // [Encryption context]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + EncryptionContext map[string]string + + // A list of grant tokens. + // + // Use a grant token when your permission to call this operation comes from a new + // grant that has not yet achieved eventual consistency. For more information, see [Grant token] + // and [Using a grant token]in the Key Management Service Developer Guide. + // + // [Grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + // [Using a grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + GrantTokens []string + + // Specifies the length of the data key. Use AES_128 to generate a 128-bit + // symmetric key, or AES_256 to generate a 256-bit symmetric key. + // + // You must specify either the KeySpec or the NumberOfBytes parameter (but not + // both) in every GenerateDataKey request. + KeySpec types.DataKeySpec + + // Specifies the length of the data key in bytes. For example, use the value 64 to + // generate a 512-bit data key (64 bytes is 512 bits). For 128-bit (16-byte) and + // 256-bit (32-byte) data keys, use the KeySpec parameter. + // + // You must specify either the KeySpec or the NumberOfBytes parameter (but not + // both) in every GenerateDataKey request. + NumberOfBytes *int32 + + // A signed [attestation document] from an Amazon Web Services Nitro enclave and the encryption + // algorithm to use with the enclave's public key. The only valid encryption + // algorithm is RSAES_OAEP_SHA_256 . + // + // This parameter only supports attestation documents for Amazon Web Services + // Nitro Enclaves. To include this parameter, use the [Amazon Web Services Nitro Enclaves SDK]or any Amazon Web Services + // SDK. + // + // When you use this parameter, instead of returning the plaintext data key, KMS + // encrypts the plaintext data key under the public key in the attestation + // document, and returns the resulting ciphertext in the CiphertextForRecipient + // field in the response. This ciphertext can be decrypted only with the private + // key in the enclave. The CiphertextBlob field in the response contains a copy of + // the data key encrypted under the KMS key specified by the KeyId parameter. The + // Plaintext field in the response is null or empty. + // + // For information about the interaction between KMS and Amazon Web Services Nitro + // Enclaves, see [How Amazon Web Services Nitro Enclaves uses KMS]in the Key Management Service Developer Guide. + // + // [attestation document]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitro-enclave-how.html#term-attestdoc + // [How Amazon Web Services Nitro Enclaves uses KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html + // [Amazon Web Services Nitro Enclaves SDK]: https://docs.aws.amazon.com/enclaves/latest/user/developing-applications.html#sdk + Recipient *types.RecipientInfo + + noSmithyDocumentSerde +} + +type GenerateDataKeyOutput struct { + + // The encrypted copy of the data key. When you use the HTTP API or the Amazon Web + // Services CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. + CiphertextBlob []byte + + // The plaintext data key encrypted with the public key from the Nitro enclave. + // This ciphertext can be decrypted only by using a private key in the Nitro + // enclave. + // + // This field is included in the response only when the Recipient parameter in the + // request includes a valid attestation document from an Amazon Web Services Nitro + // enclave. For information about the interaction between KMS and Amazon Web + // Services Nitro Enclaves, see [How Amazon Web Services Nitro Enclaves uses KMS]in the Key Management Service Developer Guide. + // + // [How Amazon Web Services Nitro Enclaves uses KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html + CiphertextForRecipient []byte + + // The Amazon Resource Name ([key ARN] ) of the KMS key that encrypted the data key. + // + // [key ARN]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN + KeyId *string + + // The plaintext data key. When you use the HTTP API or the Amazon Web Services + // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. Use this + // data key to encrypt your data outside of KMS. Then, remove it from memory as + // soon as possible. + // + // If the response includes the CiphertextForRecipient field, the Plaintext field + // is null or empty. + Plaintext []byte + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGenerateDataKeyMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGenerateDataKey{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGenerateDataKey{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GenerateDataKey"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpGenerateDataKeyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGenerateDataKey(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGenerateDataKey(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GenerateDataKey", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPair.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPair.go new file mode 100644 index 000000000..394bac814 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPair.go @@ -0,0 +1,375 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a unique asymmetric data key pair for use outside of KMS. This +// operation returns a plaintext public key, a plaintext private key, and a copy of +// the private key that is encrypted under the symmetric encryption KMS key you +// specify. You can use the data key pair to perform asymmetric cryptography and +// implement digital signatures outside of KMS. The bytes in the keys are random; +// they are not related to the caller or to the KMS key that is used to encrypt the +// private key. +// +// You can use the public key that GenerateDataKeyPair returns to encrypt data or +// verify a signature outside of KMS. Then, store the encrypted private key with +// the data. When you are ready to decrypt data or sign a message, you can use the Decrypt +// operation to decrypt the encrypted private key. +// +// To generate a data key pair, you must specify a symmetric encryption KMS key to +// encrypt the private key in a data key pair. You cannot use an asymmetric KMS key +// or a KMS key in a custom key store. To get the type and origin of your KMS key, +// use the DescribeKeyoperation. +// +// Use the KeyPairSpec parameter to choose an RSA or Elliptic Curve (ECC) data key +// pair. In China Regions, you can also choose an SM2 data key pair. KMS recommends +// that you use ECC key pairs for signing, and use RSA and SM2 key pairs for either +// encryption or signing, but not both. However, KMS cannot enforce any +// restrictions on the use of data key pairs outside of KMS. +// +// If you are using the data key pair to encrypt data, or for any operation where +// you don't immediately need a private key, consider using the GenerateDataKeyPairWithoutPlaintextoperation. +// GenerateDataKeyPairWithoutPlaintext returns a plaintext public key and an +// encrypted private key, but omits the plaintext private key that you need only to +// decrypt ciphertext or sign a message. Later, when you need to decrypt the data +// or sign a message, use the Decryptoperation to decrypt the encrypted private key in +// the data key pair. +// +// GenerateDataKeyPair returns a unique data key pair for each request. The bytes +// in the keys are random; they are not related to the caller or the KMS key that +// is used to encrypt the private key. The public key is a DER-encoded X.509 +// SubjectPublicKeyInfo, as specified in [RFC 5280]. The private key is a DER-encoded PKCS8 +// PrivateKeyInfo, as specified in [RFC 5958]. +// +// GenerateDataKeyPair also supports [Amazon Web Services Nitro Enclaves], which provide an isolated compute +// environment in Amazon EC2. To call GenerateDataKeyPair for an Amazon Web +// Services Nitro enclave, use the [Amazon Web Services Nitro Enclaves SDK]or any Amazon Web Services SDK. Use the +// Recipient parameter to provide the attestation document for the enclave. +// GenerateDataKeyPair returns the public data key and a copy of the private data +// key encrypted under the specified KMS key, as usual. But instead of a plaintext +// copy of the private data key ( PrivateKeyPlaintext ), the response includes a +// copy of the private data key encrypted under the public key from the attestation +// document ( CiphertextForRecipient ). For information about the interaction +// between KMS and Amazon Web Services Nitro Enclaves, see [How Amazon Web Services Nitro Enclaves uses KMS]in the Key Management +// Service Developer Guide.. +// +// You can use an optional encryption context to add additional security to the +// encryption operation. If you specify an EncryptionContext , you must specify the +// same encryption context (a case-sensitive exact match) when decrypting the +// encrypted data key. Otherwise, the request to decrypt fails with an +// InvalidCiphertextException . For more information, see [Encryption Context] in the Key Management +// Service Developer Guide. +// +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see [Key states of KMS keys]in the Key Management Service Developer Guide. +// +// Cross-account use: Yes. To perform this operation with a KMS key in a different +// Amazon Web Services account, specify the key ARN or alias ARN in the value of +// the KeyId parameter. +// +// Required permissions: [kms:GenerateDataKeyPair] (key policy) +// +// Related operations: +// +// # Decrypt +// +// # Encrypt +// +// # GenerateDataKey +// +// # GenerateDataKeyPairWithoutPlaintext +// +// # GenerateDataKeyWithoutPlaintext +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [RFC 5280]: https://tools.ietf.org/html/rfc5280 +// [Encryption Context]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context +// [Amazon Web Services Nitro Enclaves]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitro-enclave.html +// [RFC 5958]: https://tools.ietf.org/html/rfc5958 +// [How Amazon Web Services Nitro Enclaves uses KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html +// [kms:GenerateDataKeyPair]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +// [Amazon Web Services Nitro Enclaves SDK]: https://docs.aws.amazon.com/enclaves/latest/user/developing-applications.html#sdk +func (c *Client) GenerateDataKeyPair(ctx context.Context, params *GenerateDataKeyPairInput, optFns ...func(*Options)) (*GenerateDataKeyPairOutput, error) { + if params == nil { + params = &GenerateDataKeyPairInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GenerateDataKeyPair", params, optFns, c.addOperationGenerateDataKeyPairMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GenerateDataKeyPairOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GenerateDataKeyPairInput struct { + + // Specifies the symmetric encryption KMS key that encrypts the private key in the + // data key pair. You cannot specify an asymmetric KMS key or a KMS key in a custom + // key store. To get the type and origin of your KMS key, use the DescribeKeyoperation. + // + // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. When + // using an alias name, prefix it with "alias/" . To specify a KMS key in a + // different Amazon Web Services account, you must use the key ARN or alias ARN. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Alias name: alias/ExampleAlias + // + // - Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name + // and alias ARN, use ListAliases. + // + // This member is required. + KeyId *string + + // Determines the type of data key pair that is generated. + // + // The KMS rule that restricts the use of asymmetric RSA and SM2 KMS keys to + // encrypt and decrypt or to sign and verify (but not both), and the rule that + // permits you to use ECC KMS keys only to sign and verify, are not effective on + // data key pairs, which are used outside of KMS. The SM2 key spec is only + // available in China Regions. + // + // This member is required. + KeyPairSpec types.DataKeyPairSpec + + // Checks if your request will succeed. DryRun is an optional parameter. + // + // To learn more about how to use this parameter, see [Testing your KMS API calls] in the Key Management + // Service Developer Guide. + // + // [Testing your KMS API calls]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-dryrun.html + DryRun *bool + + // Specifies the encryption context that will be used when encrypting the private + // key in the data key pair. + // + // Do not include confidential or sensitive information in this field. This field + // may be displayed in plaintext in CloudTrail logs and other output. + // + // An encryption context is a collection of non-secret key-value pairs that + // represent additional authenticated data. When you use an encryption context to + // encrypt data, you must specify the same (an exact case-sensitive match) + // encryption context to decrypt the data. An encryption context is supported only + // on operations with symmetric encryption KMS keys. On operations with symmetric + // encryption KMS keys, an encryption context is optional, but it is strongly + // recommended. + // + // For more information, see [Encryption context] in the Key Management Service Developer Guide. + // + // [Encryption context]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + EncryptionContext map[string]string + + // A list of grant tokens. + // + // Use a grant token when your permission to call this operation comes from a new + // grant that has not yet achieved eventual consistency. For more information, see [Grant token] + // and [Using a grant token]in the Key Management Service Developer Guide. + // + // [Grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + // [Using a grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + GrantTokens []string + + // A signed [attestation document] from an Amazon Web Services Nitro enclave and the encryption + // algorithm to use with the enclave's public key. The only valid encryption + // algorithm is RSAES_OAEP_SHA_256 . + // + // This parameter only supports attestation documents for Amazon Web Services + // Nitro Enclaves. To call DeriveSharedSecret for an Amazon Web Services Nitro + // Enclaves, use the [Amazon Web Services Nitro Enclaves SDK]to generate the attestation document and then use the + // Recipient parameter from any Amazon Web Services SDK to provide the attestation + // document for the enclave. + // + // When you use this parameter, instead of returning a plaintext copy of the + // private data key, KMS encrypts the plaintext private data key under the public + // key in the attestation document, and returns the resulting ciphertext in the + // CiphertextForRecipient field in the response. This ciphertext can be decrypted + // only with the private key in the enclave. The CiphertextBlob field in the + // response contains a copy of the private data key encrypted under the KMS key + // specified by the KeyId parameter. The PrivateKeyPlaintext field in the response + // is null or empty. + // + // For information about the interaction between KMS and Amazon Web Services Nitro + // Enclaves, see [How Amazon Web Services Nitro Enclaves uses KMS]in the Key Management Service Developer Guide. + // + // [attestation document]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitro-enclave-how.html#term-attestdoc + // [How Amazon Web Services Nitro Enclaves uses KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html + // [Amazon Web Services Nitro Enclaves SDK]: https://docs.aws.amazon.com/enclaves/latest/user/developing-applications.html#sdk + Recipient *types.RecipientInfo + + noSmithyDocumentSerde +} + +type GenerateDataKeyPairOutput struct { + + // The plaintext private data key encrypted with the public key from the Nitro + // enclave. This ciphertext can be decrypted only by using a private key in the + // Nitro enclave. + // + // This field is included in the response only when the Recipient parameter in the + // request includes a valid attestation document from an Amazon Web Services Nitro + // enclave. For information about the interaction between KMS and Amazon Web + // Services Nitro Enclaves, see [How Amazon Web Services Nitro Enclaves uses KMS]in the Key Management Service Developer Guide. + // + // [How Amazon Web Services Nitro Enclaves uses KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html + CiphertextForRecipient []byte + + // The Amazon Resource Name ([key ARN] ) of the KMS key that encrypted the private key. + // + // [key ARN]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN + KeyId *string + + // The type of data key pair that was generated. + KeyPairSpec types.DataKeyPairSpec + + // The encrypted copy of the private key. When you use the HTTP API or the Amazon + // Web Services CLI, the value is Base64-encoded. Otherwise, it is not + // Base64-encoded. + PrivateKeyCiphertextBlob []byte + + // The plaintext copy of the private key. When you use the HTTP API or the Amazon + // Web Services CLI, the value is Base64-encoded. Otherwise, it is not + // Base64-encoded. + // + // If the response includes the CiphertextForRecipient field, the + // PrivateKeyPlaintext field is null or empty. + PrivateKeyPlaintext []byte + + // The public key (in plaintext). When you use the HTTP API or the Amazon Web + // Services CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. + PublicKey []byte + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGenerateDataKeyPairMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGenerateDataKeyPair{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGenerateDataKeyPair{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GenerateDataKeyPair"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpGenerateDataKeyPairValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGenerateDataKeyPair(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGenerateDataKeyPair(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GenerateDataKeyPair", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go new file mode 100644 index 000000000..5bbc48fda --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go @@ -0,0 +1,302 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a unique asymmetric data key pair for use outside of KMS. This +// operation returns a plaintext public key and a copy of the private key that is +// encrypted under the symmetric encryption KMS key you specify. Unlike GenerateDataKeyPair, this +// operation does not return a plaintext private key. The bytes in the keys are +// random; they are not related to the caller or to the KMS key that is used to +// encrypt the private key. +// +// You can use the public key that GenerateDataKeyPairWithoutPlaintext returns to +// encrypt data or verify a signature outside of KMS. Then, store the encrypted +// private key with the data. When you are ready to decrypt data or sign a message, +// you can use the Decryptoperation to decrypt the encrypted private key. +// +// To generate a data key pair, you must specify a symmetric encryption KMS key to +// encrypt the private key in a data key pair. You cannot use an asymmetric KMS key +// or a KMS key in a custom key store. To get the type and origin of your KMS key, +// use the DescribeKeyoperation. +// +// Use the KeyPairSpec parameter to choose an RSA or Elliptic Curve (ECC) data key +// pair. In China Regions, you can also choose an SM2 data key pair. KMS recommends +// that you use ECC key pairs for signing, and use RSA and SM2 key pairs for either +// encryption or signing, but not both. However, KMS cannot enforce any +// restrictions on the use of data key pairs outside of KMS. +// +// GenerateDataKeyPairWithoutPlaintext returns a unique data key pair for each +// request. The bytes in the key are not related to the caller or KMS key that is +// used to encrypt the private key. The public key is a DER-encoded X.509 +// SubjectPublicKeyInfo, as specified in [RFC 5280]. +// +// You can use an optional encryption context to add additional security to the +// encryption operation. If you specify an EncryptionContext , you must specify the +// same encryption context (a case-sensitive exact match) when decrypting the +// encrypted data key. Otherwise, the request to decrypt fails with an +// InvalidCiphertextException . For more information, see [Encryption Context] in the Key Management +// Service Developer Guide. +// +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see [Key states of KMS keys]in the Key Management Service Developer Guide. +// +// Cross-account use: Yes. To perform this operation with a KMS key in a different +// Amazon Web Services account, specify the key ARN or alias ARN in the value of +// the KeyId parameter. +// +// Required permissions: [kms:GenerateDataKeyPairWithoutPlaintext] (key policy) +// +// Related operations: +// +// # Decrypt +// +// # Encrypt +// +// # GenerateDataKey +// +// # GenerateDataKeyPair +// +// # GenerateDataKeyWithoutPlaintext +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [RFC 5280]: https://tools.ietf.org/html/rfc5280 +// [Encryption Context]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context +// [kms:GenerateDataKeyPairWithoutPlaintext]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +func (c *Client) GenerateDataKeyPairWithoutPlaintext(ctx context.Context, params *GenerateDataKeyPairWithoutPlaintextInput, optFns ...func(*Options)) (*GenerateDataKeyPairWithoutPlaintextOutput, error) { + if params == nil { + params = &GenerateDataKeyPairWithoutPlaintextInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GenerateDataKeyPairWithoutPlaintext", params, optFns, c.addOperationGenerateDataKeyPairWithoutPlaintextMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GenerateDataKeyPairWithoutPlaintextOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GenerateDataKeyPairWithoutPlaintextInput struct { + + // Specifies the symmetric encryption KMS key that encrypts the private key in the + // data key pair. You cannot specify an asymmetric KMS key or a KMS key in a custom + // key store. To get the type and origin of your KMS key, use the DescribeKeyoperation. + // + // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. When + // using an alias name, prefix it with "alias/" . To specify a KMS key in a + // different Amazon Web Services account, you must use the key ARN or alias ARN. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Alias name: alias/ExampleAlias + // + // - Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name + // and alias ARN, use ListAliases. + // + // This member is required. + KeyId *string + + // Determines the type of data key pair that is generated. + // + // The KMS rule that restricts the use of asymmetric RSA and SM2 KMS keys to + // encrypt and decrypt or to sign and verify (but not both), and the rule that + // permits you to use ECC KMS keys only to sign and verify, are not effective on + // data key pairs, which are used outside of KMS. The SM2 key spec is only + // available in China Regions. + // + // This member is required. + KeyPairSpec types.DataKeyPairSpec + + // Checks if your request will succeed. DryRun is an optional parameter. + // + // To learn more about how to use this parameter, see [Testing your KMS API calls] in the Key Management + // Service Developer Guide. + // + // [Testing your KMS API calls]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-dryrun.html + DryRun *bool + + // Specifies the encryption context that will be used when encrypting the private + // key in the data key pair. + // + // Do not include confidential or sensitive information in this field. This field + // may be displayed in plaintext in CloudTrail logs and other output. + // + // An encryption context is a collection of non-secret key-value pairs that + // represent additional authenticated data. When you use an encryption context to + // encrypt data, you must specify the same (an exact case-sensitive match) + // encryption context to decrypt the data. An encryption context is supported only + // on operations with symmetric encryption KMS keys. On operations with symmetric + // encryption KMS keys, an encryption context is optional, but it is strongly + // recommended. + // + // For more information, see [Encryption context] in the Key Management Service Developer Guide. + // + // [Encryption context]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + EncryptionContext map[string]string + + // A list of grant tokens. + // + // Use a grant token when your permission to call this operation comes from a new + // grant that has not yet achieved eventual consistency. For more information, see [Grant token] + // and [Using a grant token]in the Key Management Service Developer Guide. + // + // [Grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + // [Using a grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + GrantTokens []string + + noSmithyDocumentSerde +} + +type GenerateDataKeyPairWithoutPlaintextOutput struct { + + // The Amazon Resource Name ([key ARN] ) of the KMS key that encrypted the private key. + // + // [key ARN]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN + KeyId *string + + // The type of data key pair that was generated. + KeyPairSpec types.DataKeyPairSpec + + // The encrypted copy of the private key. When you use the HTTP API or the Amazon + // Web Services CLI, the value is Base64-encoded. Otherwise, it is not + // Base64-encoded. + PrivateKeyCiphertextBlob []byte + + // The public key (in plaintext). When you use the HTTP API or the Amazon Web + // Services CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. + PublicKey []byte + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGenerateDataKeyPairWithoutPlaintextMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGenerateDataKeyPairWithoutPlaintext{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGenerateDataKeyPairWithoutPlaintext{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GenerateDataKeyPairWithoutPlaintext"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpGenerateDataKeyPairWithoutPlaintextValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGenerateDataKeyPairWithoutPlaintext(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGenerateDataKeyPairWithoutPlaintext(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GenerateDataKeyPairWithoutPlaintext", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go new file mode 100644 index 000000000..69b955021 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go @@ -0,0 +1,302 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a unique symmetric data key for use outside of KMS. This operation +// returns a data key that is encrypted under a symmetric encryption KMS key that +// you specify. The bytes in the key are random; they are not related to the caller +// or to the KMS key. +// +// GenerateDataKeyWithoutPlaintext is identical to the GenerateDataKey operation except that it +// does not return a plaintext copy of the data key. +// +// This operation is useful for systems that need to encrypt data at some point, +// but not immediately. When you need to encrypt the data, you call the Decryptoperation +// on the encrypted copy of the key. +// +// It's also useful in distributed systems with different levels of trust. For +// example, you might store encrypted data in containers. One component of your +// system creates new containers and stores an encrypted data key with each +// container. Then, a different component puts the data into the containers. That +// component first decrypts the data key, uses the plaintext data key to encrypt +// data, puts the encrypted data into the container, and then destroys the +// plaintext data key. In this system, the component that creates the containers +// never sees the plaintext data key. +// +// To request an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operations. +// +// To generate a data key, you must specify the symmetric encryption KMS key that +// is used to encrypt the data key. You cannot use an asymmetric KMS key or a key +// in a custom key store to generate a data key. To get the type of your KMS key, +// use the DescribeKeyoperation. +// +// You must also specify the length of the data key. Use either the KeySpec or +// NumberOfBytes parameters (but not both). For 128-bit and 256-bit data keys, use +// the KeySpec parameter. +// +// To generate an SM4 data key (China Regions only), specify a KeySpec value of +// AES_128 or NumberOfBytes value of 16 . The symmetric encryption key used in +// China Regions to encrypt your data key is an SM4 encryption key. +// +// If the operation succeeds, you will find the encrypted copy of the data key in +// the CiphertextBlob field. +// +// You can use an optional encryption context to add additional security to the +// encryption operation. If you specify an EncryptionContext , you must specify the +// same encryption context (a case-sensitive exact match) when decrypting the +// encrypted data key. Otherwise, the request to decrypt fails with an +// InvalidCiphertextException . For more information, see [Encryption Context] in the Key Management +// Service Developer Guide. +// +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see [Key states of KMS keys]in the Key Management Service Developer Guide. +// +// Cross-account use: Yes. To perform this operation with a KMS key in a different +// Amazon Web Services account, specify the key ARN or alias ARN in the value of +// the KeyId parameter. +// +// Required permissions: [kms:GenerateDataKeyWithoutPlaintext] (key policy) +// +// Related operations: +// +// # Decrypt +// +// # Encrypt +// +// # GenerateDataKey +// +// # GenerateDataKeyPair +// +// # GenerateDataKeyPairWithoutPlaintext +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [Encryption Context]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context +// [kms:GenerateDataKeyWithoutPlaintext]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +func (c *Client) GenerateDataKeyWithoutPlaintext(ctx context.Context, params *GenerateDataKeyWithoutPlaintextInput, optFns ...func(*Options)) (*GenerateDataKeyWithoutPlaintextOutput, error) { + if params == nil { + params = &GenerateDataKeyWithoutPlaintextInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GenerateDataKeyWithoutPlaintext", params, optFns, c.addOperationGenerateDataKeyWithoutPlaintextMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GenerateDataKeyWithoutPlaintextOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GenerateDataKeyWithoutPlaintextInput struct { + + // Specifies the symmetric encryption KMS key that encrypts the data key. You + // cannot specify an asymmetric KMS key or a KMS key in a custom key store. To get + // the type and origin of your KMS key, use the DescribeKeyoperation. + // + // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. When + // using an alias name, prefix it with "alias/" . To specify a KMS key in a + // different Amazon Web Services account, you must use the key ARN or alias ARN. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Alias name: alias/ExampleAlias + // + // - Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name + // and alias ARN, use ListAliases. + // + // This member is required. + KeyId *string + + // Checks if your request will succeed. DryRun is an optional parameter. + // + // To learn more about how to use this parameter, see [Testing your KMS API calls] in the Key Management + // Service Developer Guide. + // + // [Testing your KMS API calls]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-dryrun.html + DryRun *bool + + // Specifies the encryption context that will be used when encrypting the data key. + // + // Do not include confidential or sensitive information in this field. This field + // may be displayed in plaintext in CloudTrail logs and other output. + // + // An encryption context is a collection of non-secret key-value pairs that + // represent additional authenticated data. When you use an encryption context to + // encrypt data, you must specify the same (an exact case-sensitive match) + // encryption context to decrypt the data. An encryption context is supported only + // on operations with symmetric encryption KMS keys. On operations with symmetric + // encryption KMS keys, an encryption context is optional, but it is strongly + // recommended. + // + // For more information, see [Encryption context] in the Key Management Service Developer Guide. + // + // [Encryption context]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + EncryptionContext map[string]string + + // A list of grant tokens. + // + // Use a grant token when your permission to call this operation comes from a new + // grant that has not yet achieved eventual consistency. For more information, see [Grant token] + // and [Using a grant token]in the Key Management Service Developer Guide. + // + // [Grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + // [Using a grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + GrantTokens []string + + // The length of the data key. Use AES_128 to generate a 128-bit symmetric key, or + // AES_256 to generate a 256-bit symmetric key. + KeySpec types.DataKeySpec + + // The length of the data key in bytes. For example, use the value 64 to generate + // a 512-bit data key (64 bytes is 512 bits). For common key lengths (128-bit and + // 256-bit symmetric keys), we recommend that you use the KeySpec field instead of + // this one. + NumberOfBytes *int32 + + noSmithyDocumentSerde +} + +type GenerateDataKeyWithoutPlaintextOutput struct { + + // The encrypted data key. When you use the HTTP API or the Amazon Web Services + // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. + CiphertextBlob []byte + + // The Amazon Resource Name ([key ARN] ) of the KMS key that encrypted the data key. + // + // [key ARN]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN + KeyId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGenerateDataKeyWithoutPlaintextMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGenerateDataKeyWithoutPlaintext{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGenerateDataKeyWithoutPlaintext{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GenerateDataKeyWithoutPlaintext"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpGenerateDataKeyWithoutPlaintextValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGenerateDataKeyWithoutPlaintext(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGenerateDataKeyWithoutPlaintext(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GenerateDataKeyWithoutPlaintext", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateMac.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateMac.go new file mode 100644 index 000000000..a5505e333 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateMac.go @@ -0,0 +1,247 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Generates a hash-based message authentication code (HMAC) for a message using +// an HMAC KMS key and a MAC algorithm that the key supports. HMAC KMS keys and the +// HMAC algorithms that KMS uses conform to industry standards defined in [RFC 2104]. +// +// You can use value that GenerateMac returns in the VerifyMac operation to demonstrate +// that the original message has not changed. Also, because a secret key is used to +// create the hash, you can verify that the party that generated the hash has the +// required secret key. You can also use the raw result to implement HMAC-based +// algorithms such as key derivation functions. This operation is part of KMS +// support for HMAC KMS keys. For details, see [HMAC keys in KMS]in the Key Management Service +// Developer Guide . +// +// Best practices recommend that you limit the time during which any signing +// mechanism, including an HMAC, is effective. This deters an attack where the +// actor uses a signed message to establish validity repeatedly or long after the +// message is superseded. HMAC tags do not include a timestamp, but you can include +// a timestamp in the token or message to help you detect when its time to refresh +// the HMAC. +// +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see [Key states of KMS keys]in the Key Management Service Developer Guide. +// +// Cross-account use: Yes. To perform this operation with a KMS key in a different +// Amazon Web Services account, specify the key ARN or alias ARN in the value of +// the KeyId parameter. +// +// Required permissions: [kms:GenerateMac] (key policy) +// +// Related operations: VerifyMac +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [kms:GenerateMac]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [RFC 2104]: https://datatracker.ietf.org/doc/html/rfc2104 +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +// [HMAC keys in KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html +func (c *Client) GenerateMac(ctx context.Context, params *GenerateMacInput, optFns ...func(*Options)) (*GenerateMacOutput, error) { + if params == nil { + params = &GenerateMacInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GenerateMac", params, optFns, c.addOperationGenerateMacMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GenerateMacOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GenerateMacInput struct { + + // The HMAC KMS key to use in the operation. The MAC algorithm computes the HMAC + // for the message and the key as described in [RFC 2104]. + // + // To identify an HMAC KMS key, use the DescribeKey operation and see the KeySpec field in + // the response. + // + // [RFC 2104]: https://datatracker.ietf.org/doc/html/rfc2104 + // + // This member is required. + KeyId *string + + // The MAC algorithm used in the operation. + // + // The algorithm must be compatible with the HMAC KMS key that you specify. To + // find the MAC algorithms that your HMAC KMS key supports, use the DescribeKeyoperation and + // see the MacAlgorithms field in the DescribeKey response. + // + // This member is required. + MacAlgorithm types.MacAlgorithmSpec + + // The message to be hashed. Specify a message of up to 4,096 bytes. + // + // GenerateMac and VerifyMac do not provide special handling for message digests. If you + // generate an HMAC for a hash digest of a message, you must verify the HMAC of the + // same hash digest. + // + // This member is required. + Message []byte + + // Checks if your request will succeed. DryRun is an optional parameter. + // + // To learn more about how to use this parameter, see [Testing your KMS API calls] in the Key Management + // Service Developer Guide. + // + // [Testing your KMS API calls]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-dryrun.html + DryRun *bool + + // A list of grant tokens. + // + // Use a grant token when your permission to call this operation comes from a new + // grant that has not yet achieved eventual consistency. For more information, see [Grant token] + // and [Using a grant token]in the Key Management Service Developer Guide. + // + // [Grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + // [Using a grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + GrantTokens []string + + noSmithyDocumentSerde +} + +type GenerateMacOutput struct { + + // The HMAC KMS key used in the operation. + KeyId *string + + // The hash-based message authentication code (HMAC) that was generated for the + // specified message, HMAC KMS key, and MAC algorithm. + // + // This is the standard, raw HMAC defined in [RFC 2104]. + // + // [RFC 2104]: https://datatracker.ietf.org/doc/html/rfc2104 + Mac []byte + + // The MAC algorithm that was used to generate the HMAC. + MacAlgorithm types.MacAlgorithmSpec + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGenerateMacMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGenerateMac{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGenerateMac{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GenerateMac"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpGenerateMacValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGenerateMac(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGenerateMac(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GenerateMac", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateRandom.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateRandom.go new file mode 100644 index 000000000..3ff869365 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateRandom.go @@ -0,0 +1,232 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a random byte string that is cryptographically secure. +// +// You must use the NumberOfBytes parameter to specify the length of the random +// byte string. There is no default value for string length. +// +// By default, the random byte string is generated in KMS. To generate the byte +// string in the CloudHSM cluster associated with an CloudHSM key store, use the +// CustomKeyStoreId parameter. +// +// GenerateRandom also supports [Amazon Web Services Nitro Enclaves], which provide an isolated compute environment in +// Amazon EC2. To call GenerateRandom for a Nitro enclave, use the [Amazon Web Services Nitro Enclaves SDK] or any Amazon +// Web Services SDK. Use the Recipient parameter to provide the attestation +// document for the enclave. Instead of plaintext bytes, the response includes the +// plaintext bytes encrypted under the public key from the attestation document ( +// CiphertextForRecipient ).For information about the interaction between KMS and +// Amazon Web Services Nitro Enclaves, see [How Amazon Web Services Nitro Enclaves uses KMS]in the Key Management Service Developer +// Guide. +// +// For more information about entropy and random number generation, see [Key Management Service Cryptographic Details]. +// +// Cross-account use: Not applicable. GenerateRandom does not use any +// account-specific resources, such as KMS keys. +// +// Required permissions: [kms:GenerateRandom] (IAM policy) +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [kms:GenerateRandom]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [Amazon Web Services Nitro Enclaves]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitro-enclave.html +// [Key Management Service Cryptographic Details]: https://docs.aws.amazon.com/kms/latest/cryptographic-details/ +// [How Amazon Web Services Nitro Enclaves uses KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +// [Amazon Web Services Nitro Enclaves SDK]: https://docs.aws.amazon.com/enclaves/latest/user/developing-applications.html#sdk +func (c *Client) GenerateRandom(ctx context.Context, params *GenerateRandomInput, optFns ...func(*Options)) (*GenerateRandomOutput, error) { + if params == nil { + params = &GenerateRandomInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GenerateRandom", params, optFns, c.addOperationGenerateRandomMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GenerateRandomOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GenerateRandomInput struct { + + // Generates the random byte string in the CloudHSM cluster that is associated + // with the specified CloudHSM key store. To find the ID of a custom key store, use + // the DescribeCustomKeyStoresoperation. + // + // External key store IDs are not valid for this parameter. If you specify the ID + // of an external key store, GenerateRandom throws an UnsupportedOperationException + // . + CustomKeyStoreId *string + + // The length of the random byte string. This parameter is required. + NumberOfBytes *int32 + + // A signed [attestation document] from an Amazon Web Services Nitro enclave and the encryption + // algorithm to use with the enclave's public key. The only valid encryption + // algorithm is RSAES_OAEP_SHA_256 . + // + // This parameter only supports attestation documents for Amazon Web Services + // Nitro Enclaves. To include this parameter, use the [Amazon Web Services Nitro Enclaves SDK]or any Amazon Web Services + // SDK. + // + // When you use this parameter, instead of returning plaintext bytes, KMS encrypts + // the plaintext bytes under the public key in the attestation document, and + // returns the resulting ciphertext in the CiphertextForRecipient field in the + // response. This ciphertext can be decrypted only with the private key in the + // enclave. The Plaintext field in the response is null or empty. + // + // For information about the interaction between KMS and Amazon Web Services Nitro + // Enclaves, see [How Amazon Web Services Nitro Enclaves uses KMS]in the Key Management Service Developer Guide. + // + // [attestation document]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitro-enclave-how.html#term-attestdoc + // [How Amazon Web Services Nitro Enclaves uses KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html + // [Amazon Web Services Nitro Enclaves SDK]: https://docs.aws.amazon.com/enclaves/latest/user/developing-applications.html#sdk + Recipient *types.RecipientInfo + + noSmithyDocumentSerde +} + +type GenerateRandomOutput struct { + + // The plaintext random bytes encrypted with the public key from the Nitro + // enclave. This ciphertext can be decrypted only by using a private key in the + // Nitro enclave. + // + // This field is included in the response only when the Recipient parameter in the + // request includes a valid attestation document from an Amazon Web Services Nitro + // enclave. For information about the interaction between KMS and Amazon Web + // Services Nitro Enclaves, see [How Amazon Web Services Nitro Enclaves uses KMS]in the Key Management Service Developer Guide. + // + // [How Amazon Web Services Nitro Enclaves uses KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html + CiphertextForRecipient []byte + + // The random byte string. When you use the HTTP API or the Amazon Web Services + // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. + // + // If the response includes the CiphertextForRecipient field, the Plaintext field + // is null or empty. + Plaintext []byte + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGenerateRandomMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGenerateRandom{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGenerateRandom{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GenerateRandom"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGenerateRandom(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGenerateRandom(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GenerateRandom", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyPolicy.go new file mode 100644 index 000000000..b0368d534 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyPolicy.go @@ -0,0 +1,189 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets a key policy attached to the specified KMS key. +// +// Cross-account use: No. You cannot perform this operation on a KMS key in a +// different Amazon Web Services account. +// +// Required permissions: [kms:GetKeyPolicy] (key policy) +// +// Related operations: [PutKeyPolicy] +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [kms:GetKeyPolicy]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [PutKeyPolicy]: https://docs.aws.amazon.com/kms/latest/APIReference/API_PutKeyPolicy.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +func (c *Client) GetKeyPolicy(ctx context.Context, params *GetKeyPolicyInput, optFns ...func(*Options)) (*GetKeyPolicyOutput, error) { + if params == nil { + params = &GetKeyPolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetKeyPolicy", params, optFns, c.addOperationGetKeyPolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetKeyPolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetKeyPolicyInput struct { + + // Gets the key policy for the specified KMS key. + // + // Specify the key ID or key ARN of the KMS key. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // Specifies the name of the key policy. If no policy name is specified, the + // default value is default . The only valid name is default . To get the names of + // key policies, use ListKeyPolicies. + PolicyName *string + + noSmithyDocumentSerde +} + +type GetKeyPolicyOutput struct { + + // A key policy document in JSON format. + Policy *string + + // The name of the key policy. The only valid value is default . + PolicyName *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetKeyPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetKeyPolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetKeyPolicy{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetKeyPolicy"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpGetKeyPolicyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetKeyPolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetKeyPolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetKeyPolicy", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyRotationStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyRotationStatus.go new file mode 100644 index 000000000..0484bb59c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyRotationStatus.go @@ -0,0 +1,258 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Provides detailed information about the rotation status for a KMS key, +// including whether [automatic rotation of the key material]is enabled for the specified KMS key, the [rotation period], and the next +// scheduled rotation date. +// +// Automatic key rotation is supported only on [symmetric encryption KMS keys]. You cannot enable automatic +// rotation of [asymmetric KMS keys], [HMAC KMS keys], KMS keys with [imported key material], or KMS keys in a [custom key store]. To enable or disable +// automatic rotation of a set of related [multi-Region keys], set the property on the primary key.. +// +// You can enable (EnableKeyRotation ) and disable automatic rotation (DisableKeyRotation ) of the key material in +// customer managed KMS keys. Key material rotation of [Amazon Web Services managed KMS keys]is not configurable. KMS +// always rotates the key material in Amazon Web Services managed KMS keys every +// year. The key rotation status for Amazon Web Services managed KMS keys is always +// true . +// +// You can perform on-demand (RotateKeyOnDemand ) rotation of the key material in customer managed +// KMS keys, regardless of whether or not automatic key rotation is enabled. You +// can use GetKeyRotationStatus to identify the date and time that an in progress +// on-demand rotation was initiated. You can use ListKeyRotationsto view the details of completed +// rotations. +// +// In May 2022, KMS changed the rotation schedule for Amazon Web Services managed +// keys from every three years to every year. For details, see EnableKeyRotation. +// +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see [Key states of KMS keys]in the Key Management Service Developer Guide. +// +// - Disabled: The key rotation status does not change when you disable a KMS +// key. However, while the KMS key is disabled, KMS does not rotate the key +// material. When you re-enable the KMS key, rotation resumes. If the key material +// in the re-enabled KMS key hasn't been rotated in one year, KMS rotates it +// immediately, and every year thereafter. If it's been less than a year since the +// key material in the re-enabled KMS key was rotated, the KMS key resumes its +// prior rotation schedule. +// +// - Pending deletion: While a KMS key is pending deletion, its key rotation +// status is false and KMS does not rotate the key material. If you cancel the +// deletion, the original key rotation status returns to true . +// +// Cross-account use: Yes. To perform this operation on a KMS key in a different +// Amazon Web Services account, specify the key ARN in the value of the KeyId +// parameter. +// +// Required permissions: [kms:GetKeyRotationStatus] (key policy) +// +// Related operations: +// +// # DisableKeyRotation +// +// # EnableKeyRotation +// +// # ListKeyRotations +// +// # RotateKeyOnDemand +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [imported key material]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [HMAC KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html +// [rotation period]: https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotation-period +// [Amazon Web Services managed KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk +// [kms:GetKeyRotationStatus]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [automatic rotation of the key material]: https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html +// [asymmetric KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html +// [symmetric encryption KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#symmetric-cmks +// [multi-Region keys]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +// [custom key store]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html +func (c *Client) GetKeyRotationStatus(ctx context.Context, params *GetKeyRotationStatusInput, optFns ...func(*Options)) (*GetKeyRotationStatusOutput, error) { + if params == nil { + params = &GetKeyRotationStatusInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetKeyRotationStatus", params, optFns, c.addOperationGetKeyRotationStatusMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetKeyRotationStatusOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetKeyRotationStatusInput struct { + + // Gets the rotation status for the specified KMS key. + // + // Specify the key ID or key ARN of the KMS key. To specify a KMS key in a + // different Amazon Web Services account, you must use the key ARN. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + noSmithyDocumentSerde +} + +type GetKeyRotationStatusOutput struct { + + // Identifies the specified symmetric encryption KMS key. + KeyId *string + + // A Boolean value that specifies whether key rotation is enabled. + KeyRotationEnabled bool + + // The next date that KMS will automatically rotate the key material. + NextRotationDate *time.Time + + // Identifies the date and time that an in progress on-demand rotation was + // initiated. + // + // The KMS API follows an [eventual consistency] model due to the distributed nature of the system. As a + // result, there might be a slight delay between initiating on-demand key rotation + // and the rotation's completion. Once the on-demand rotation is complete, use ListKeyRotationsto + // view the details of the on-demand rotation. + // + // [eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html + OnDemandRotationStartDate *time.Time + + // The number of days between each automatic rotation. The default value is 365 + // days. + RotationPeriodInDays *int32 + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetKeyRotationStatusMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetKeyRotationStatus{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetKeyRotationStatus{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetKeyRotationStatus"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpGetKeyRotationStatusValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetKeyRotationStatus(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetKeyRotationStatus(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetKeyRotationStatus", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetParametersForImport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetParametersForImport.go new file mode 100644 index 000000000..e3ffaca7a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetParametersForImport.go @@ -0,0 +1,306 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Returns the public key and an import token you need to import or reimport key +// material for a KMS key. +// +// By default, KMS keys are created with key material that KMS generates. This +// operation supports [Importing key material], an advanced feature that lets you generate and import the +// cryptographic key material for a KMS key. For more information about importing +// key material into KMS, see [Importing key material]in the Key Management Service Developer Guide. +// +// Before calling GetParametersForImport , use the CreateKey operation with an Origin value +// of EXTERNAL to create a KMS key with no key material. You can import key +// material for a symmetric encryption KMS key, HMAC KMS key, asymmetric encryption +// KMS key, or asymmetric signing KMS key. You can also import key material into a [multi-Region key] +// of any supported type. However, you can't import key material into a KMS key in +// a [custom key store]. You can also use GetParametersForImport to get a public key and import +// token to [reimport the original key material]into a KMS key whose key material expired or was deleted. +// +// GetParametersForImport returns the items that you need to import your key +// material. +// +// - The public key (or "wrapping key") of an RSA key pair that KMS generates. +// +// You will use this public key to encrypt ("wrap") your key material while it's +// +// in transit to KMS. +// +// - A import token that ensures that KMS can decrypt your key material and +// associate it with the correct KMS key. +// +// The public key and its import token are permanently linked and must be used +// together. Each public key and import token set is valid for 24 hours. The +// expiration date and time appear in the ParametersValidTo field in the +// GetParametersForImport response. You cannot use an expired public key or import +// token in an ImportKeyMaterialrequest. If your key and token expire, send another +// GetParametersForImport request. +// +// GetParametersForImport requires the following information: +// +// - The key ID of the KMS key for which you are importing the key material. +// +// - The key spec of the public key ("wrapping key") that you will use to +// encrypt your key material during import. +// +// - The wrapping algorithm that you will use with the public key to encrypt +// your key material. +// +// You can use the same or a different public key spec and wrapping algorithm each +// time you import or reimport the same key material. +// +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see [Key states of KMS keys]in the Key Management Service Developer Guide. +// +// Cross-account use: No. You cannot perform this operation on a KMS key in a +// different Amazon Web Services account. +// +// Required permissions: [kms:GetParametersForImport] (key policy) +// +// Related operations: +// +// # ImportKeyMaterial +// +// # DeleteImportedKeyMaterial +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [Importing key material]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html +// [kms:GetParametersForImport]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [reimport the original key material]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html#reimport-key-material +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +// [multi-Region key]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html +// [custom key store]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html +func (c *Client) GetParametersForImport(ctx context.Context, params *GetParametersForImportInput, optFns ...func(*Options)) (*GetParametersForImportOutput, error) { + if params == nil { + params = &GetParametersForImportInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetParametersForImport", params, optFns, c.addOperationGetParametersForImportMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetParametersForImportOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetParametersForImportInput struct { + + // The identifier of the KMS key that will be associated with the imported key + // material. The Origin of the KMS key must be EXTERNAL . + // + // All KMS key types are supported, including multi-Region keys. However, you + // cannot import key material into a KMS key in a custom key store. + // + // Specify the key ID or key ARN of the KMS key. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // The algorithm you will use with the RSA public key ( PublicKey ) in the response + // to protect your key material during import. For more information, see Select a wrapping algorithmin the + // Key Management Service Developer Guide. + // + // For RSA_AES wrapping algorithms, you encrypt your key material with an AES key + // that you generate, then encrypt your AES key with the RSA public key from KMS. + // For RSAES wrapping algorithms, you encrypt your key material directly with the + // RSA public key from KMS. + // + // The wrapping algorithms that you can use depend on the type of key material + // that you are importing. To import an RSA private key, you must use an RSA_AES + // wrapping algorithm. + // + // - RSA_AES_KEY_WRAP_SHA_256 — Supported for wrapping RSA and ECC key material. + // + // - RSA_AES_KEY_WRAP_SHA_1 — Supported for wrapping RSA and ECC key material. + // + // - RSAES_OAEP_SHA_256 — Supported for all types of key material, except RSA + // key material (private key). + // + // You cannot use the RSAES_OAEP_SHA_256 wrapping algorithm with the RSA_2048 + // wrapping key spec to wrap ECC_NIST_P521 key material. + // + // - RSAES_OAEP_SHA_1 — Supported for all types of key material, except RSA key + // material (private key). + // + // You cannot use the RSAES_OAEP_SHA_1 wrapping algorithm with the RSA_2048 + // wrapping key spec to wrap ECC_NIST_P521 key material. + // + // - RSAES_PKCS1_V1_5 (Deprecated) — As of October 10, 2023, KMS does not + // support the RSAES_PKCS1_V1_5 wrapping algorithm. + // + // This member is required. + WrappingAlgorithm types.AlgorithmSpec + + // The type of RSA public key to return in the response. You will use this + // wrapping key with the specified wrapping algorithm to protect your key material + // during import. + // + // Use the longest RSA wrapping key that is practical. + // + // You cannot use an RSA_2048 public key to directly wrap an ECC_NIST_P521 private + // key. Instead, use an RSA_AES wrapping algorithm or choose a longer RSA public + // key. + // + // This member is required. + WrappingKeySpec types.WrappingKeySpec + + noSmithyDocumentSerde +} + +type GetParametersForImportOutput struct { + + // The import token to send in a subsequent ImportKeyMaterial request. + ImportToken []byte + + // The Amazon Resource Name ([key ARN] ) of the KMS key to use in a subsequent ImportKeyMaterial request. + // This is the same KMS key specified in the GetParametersForImport request. + // + // [key ARN]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN + KeyId *string + + // The time at which the import token and public key are no longer valid. After + // this time, you cannot use them to make an ImportKeyMaterialrequest and you must send another + // GetParametersForImport request to get new ones. + ParametersValidTo *time.Time + + // The public key to use to encrypt the key material before importing it with ImportKeyMaterial. + PublicKey []byte + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetParametersForImportMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetParametersForImport{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetParametersForImport{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetParametersForImport"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpGetParametersForImportValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetParametersForImport(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetParametersForImport(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetParametersForImport", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetPublicKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetPublicKey.go new file mode 100644 index 000000000..245cf4610 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetPublicKey.go @@ -0,0 +1,297 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the public key of an asymmetric KMS key. Unlike the private key of a +// asymmetric KMS key, which never leaves KMS unencrypted, callers with +// kms:GetPublicKey permission can download the public key of an asymmetric KMS +// key. You can share the public key to allow others to encrypt messages and verify +// signatures outside of KMS. For information about asymmetric KMS keys, see [Asymmetric KMS keys]in +// the Key Management Service Developer Guide. +// +// You do not need to download the public key. Instead, you can use the public key +// within KMS by calling the Encrypt, ReEncrypt, or Verify operations with the identifier of an +// asymmetric KMS key. When you use the public key within KMS, you benefit from the +// authentication, authorization, and logging that are part of every KMS operation. +// You also reduce of risk of encrypting data that cannot be decrypted. These +// features are not effective outside of KMS. +// +// To help you use the public key safely outside of KMS, GetPublicKey returns +// important information about the public key in the response, including: +// +// [KeySpec] +// - : The type of key material in the public key, such as RSA_4096 or +// ECC_NIST_P521 . +// +// [KeyUsage] +// - : Whether the key is used for encryption, signing, or deriving a shared +// secret. +// +// [EncryptionAlgorithms] +// - or [SigningAlgorithms]: A list of the encryption algorithms or the signing algorithms for the +// key. +// +// Although KMS cannot enforce these restrictions on external operations, it is +// crucial that you use this information to prevent the public key from being used +// improperly. For example, you can prevent a public signing key from being used +// encrypt data, or prevent a public key from being used with an encryption +// algorithm that is not supported by KMS. You can also avoid errors, such as using +// the wrong signing algorithm in a verification operation. +// +// To verify a signature outside of KMS with an SM2 public key (China Regions +// only), you must specify the distinguishing ID. By default, KMS uses +// 1234567812345678 as the distinguishing ID. For more information, see [Offline verification with SM2 key pairs]. +// +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see [Key states of KMS keys]in the Key Management Service Developer Guide. +// +// Cross-account use: Yes. To perform this operation with a KMS key in a different +// Amazon Web Services account, specify the key ARN or alias ARN in the value of +// the KeyId parameter. +// +// Required permissions: [kms:GetPublicKey] (key policy) +// +// Related operations: CreateKey +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [SigningAlgorithms]: https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-SigningAlgorithms +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [kms:GetPublicKey]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [EncryptionAlgorithms]: https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-EncryptionAlgorithms +// [Asymmetric KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html +// [KeySpec]: https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-KeySpec +// [Offline verification with SM2 key pairs]: https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification +// [KeyUsage]: https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-KeyUsage +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +func (c *Client) GetPublicKey(ctx context.Context, params *GetPublicKeyInput, optFns ...func(*Options)) (*GetPublicKeyOutput, error) { + if params == nil { + params = &GetPublicKeyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetPublicKey", params, optFns, c.addOperationGetPublicKeyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetPublicKeyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetPublicKeyInput struct { + + // Identifies the asymmetric KMS key that includes the public key. + // + // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. When + // using an alias name, prefix it with "alias/" . To specify a KMS key in a + // different Amazon Web Services account, you must use the key ARN or alias ARN. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Alias name: alias/ExampleAlias + // + // - Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name + // and alias ARN, use ListAliases. + // + // This member is required. + KeyId *string + + // A list of grant tokens. + // + // Use a grant token when your permission to call this operation comes from a new + // grant that has not yet achieved eventual consistency. For more information, see [Grant token] + // and [Using a grant token]in the Key Management Service Developer Guide. + // + // [Grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + // [Using a grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + GrantTokens []string + + noSmithyDocumentSerde +} + +type GetPublicKeyOutput struct { + + // Instead, use the KeySpec field in the GetPublicKey response. + // + // The KeySpec and CustomerMasterKeySpec fields have the same value. We recommend + // that you use the KeySpec field in your code. However, to avoid breaking + // changes, KMS supports both fields. + // + // Deprecated: This field has been deprecated. Instead, use the KeySpec field. + CustomerMasterKeySpec types.CustomerMasterKeySpec + + // The encryption algorithms that KMS supports for this key. + // + // This information is critical. If a public key encrypts data outside of KMS by + // using an unsupported encryption algorithm, the ciphertext cannot be decrypted. + // + // This field appears in the response only when the KeyUsage of the public key is + // ENCRYPT_DECRYPT . + EncryptionAlgorithms []types.EncryptionAlgorithmSpec + + // The key agreement algorithm used to derive a shared secret. This field is + // present only when the KMS key has a KeyUsage value of KEY_AGREEMENT . + KeyAgreementAlgorithms []types.KeyAgreementAlgorithmSpec + + // The Amazon Resource Name ([key ARN] ) of the asymmetric KMS key from which the public key + // was downloaded. + // + // [key ARN]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN + KeyId *string + + // The type of the of the public key that was downloaded. + KeySpec types.KeySpec + + // The permitted use of the public key. Valid values for asymmetric key pairs are + // ENCRYPT_DECRYPT , SIGN_VERIFY , and KEY_AGREEMENT . + // + // This information is critical. For example, if a public key with SIGN_VERIFY key + // usage encrypts data outside of KMS, the ciphertext cannot be decrypted. + KeyUsage types.KeyUsageType + + // The exported public key. + // + // The value is a DER-encoded X.509 public key, also known as SubjectPublicKeyInfo + // (SPKI), as defined in [RFC 5280]. When you use the HTTP API or the Amazon Web Services + // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. + // + // [RFC 5280]: https://tools.ietf.org/html/rfc5280 + PublicKey []byte + + // The signing algorithms that KMS supports for this key. + // + // This field appears in the response only when the KeyUsage of the public key is + // SIGN_VERIFY . + SigningAlgorithms []types.SigningAlgorithmSpec + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetPublicKeyMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetPublicKey{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetPublicKey{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetPublicKey"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpGetPublicKeyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetPublicKey(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetPublicKey(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetPublicKey", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ImportKeyMaterial.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ImportKeyMaterial.go new file mode 100644 index 000000000..ee7037a49 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ImportKeyMaterial.go @@ -0,0 +1,321 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Imports or reimports key material into an existing KMS key that was created +// without key material. ImportKeyMaterial also sets the expiration model and +// expiration date of the imported key material. +// +// By default, KMS keys are created with key material that KMS generates. This +// operation supports [Importing key material], an advanced feature that lets you generate and import the +// cryptographic key material for a KMS key. For more information about importing +// key material into KMS, see [Importing key material]in the Key Management Service Developer Guide. +// +// After you successfully import key material into a KMS key, you can [reimport the same key material] into that +// KMS key, but you cannot import different key material. You might reimport key +// material to replace key material that expired or key material that you deleted. +// You might also reimport key material to change the expiration model or +// expiration date of the key material. +// +// Each time you import key material into KMS, you can determine whether ( +// ExpirationModel ) and when ( ValidTo ) the key material expires. To change the +// expiration of your key material, you must import it again, either by calling +// ImportKeyMaterial or using the import features of the KMS console. +// +// Before calling ImportKeyMaterial : +// +// - Create or identify a KMS key with no key material. The KMS key must have an +// Origin value of EXTERNAL , which indicates that the KMS key is designed for +// imported key material. +// +// To create an new KMS key for imported key material, call the CreateKeyoperation with an +// +// Origin value of EXTERNAL . You can create a symmetric encryption KMS key, HMAC +// KMS key, asymmetric encryption KMS key, or asymmetric signing KMS key. You can +// also import key material into a multi-Region keyof any supported type. However, you can't +// import key material into a KMS key in a custom key store. +// +// - Use the DescribeKeyoperation to verify that the KeyState of the KMS key is +// PendingImport , which indicates that the KMS key has no key material. +// +// If you are reimporting the same key material into an existing KMS key, you +// +// might need to call the DeleteImportedKeyMaterialto delete its existing key material. +// +// - Call the GetParametersForImportoperation to get a public key and import token set for importing +// key material. +// +// - Use the public key in the GetParametersForImportresponse to encrypt your key material. +// +// Then, in an ImportKeyMaterial request, you submit your encrypted key material +// and import token. When calling this operation, you must specify the following +// values: +// +// - The key ID or key ARN of the KMS key to associate with the imported key +// material. Its Origin must be EXTERNAL and its KeyState must be PendingImport . +// You cannot perform this operation on a KMS key in a custom key store, or on a KMS key in a +// different Amazon Web Services account. To get the Origin and KeyState of a KMS +// key, call DescribeKey. +// +// - The encrypted key material. +// +// - The import token that GetParametersForImportreturned. You must use a public key and token from +// the same GetParametersForImport response. +// +// - Whether the key material expires ( ExpirationModel ) and, if so, when ( +// ValidTo ). For help with this choice, see [Setting an expiration time]in the Key Management Service +// Developer Guide. +// +// If you set an expiration date, KMS deletes the key material from the KMS key on +// +// the specified date, making the KMS key unusable. To use the KMS key in +// cryptographic operations again, you must reimport the same key material. +// However, you can delete and reimport the key material at any time, including +// before the key material expires. Each time you reimport, you can eliminate or +// reset the expiration time. +// +// When this operation is successful, the key state of the KMS key changes from +// PendingImport to Enabled , and you can use the KMS key in cryptographic +// operations. +// +// If this operation fails, use the exception to help determine the problem. If +// the error is related to the key material, the import token, or wrapping key, use +// GetParametersForImportto get a new public key and import token for the KMS key and repeat the import +// procedure. For help, see [How To Import Key Material]in the Key Management Service Developer Guide. +// +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see [Key states of KMS keys]in the Key Management Service Developer Guide. +// +// Cross-account use: No. You cannot perform this operation on a KMS key in a +// different Amazon Web Services account. +// +// Required permissions: [kms:ImportKeyMaterial] (key policy) +// +// Related operations: +// +// # DeleteImportedKeyMaterial +// +// # GetParametersForImport +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [Importing key material]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [How To Import Key Material]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html#importing-keys-overview +// [kms:ImportKeyMaterial]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [reimport the same key material]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html#reimport-key-material +// [Setting an expiration time]: https://docs.aws.amazon.com/en_us/kms/latest/developerguide/importing-keys.html#importing-keys-expiration +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +func (c *Client) ImportKeyMaterial(ctx context.Context, params *ImportKeyMaterialInput, optFns ...func(*Options)) (*ImportKeyMaterialOutput, error) { + if params == nil { + params = &ImportKeyMaterialInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ImportKeyMaterial", params, optFns, c.addOperationImportKeyMaterialMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ImportKeyMaterialOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ImportKeyMaterialInput struct { + + // The encrypted key material to import. The key material must be encrypted under + // the public wrapping key that GetParametersForImportreturned, using the wrapping algorithm that you + // specified in the same GetParametersForImport request. + // + // This member is required. + EncryptedKeyMaterial []byte + + // The import token that you received in the response to a previous GetParametersForImport request. It + // must be from the same response that contained the public key that you used to + // encrypt the key material. + // + // This member is required. + ImportToken []byte + + // The identifier of the KMS key that will be associated with the imported key + // material. This must be the same KMS key specified in the KeyID parameter of the + // corresponding GetParametersForImportrequest. The Origin of the KMS key must be EXTERNAL and its + // KeyState must be PendingImport . + // + // The KMS key can be a symmetric encryption KMS key, HMAC KMS key, asymmetric + // encryption KMS key, or asymmetric signing KMS key, including a multi-Region keyof any supported + // type. You cannot perform this operation on a KMS key in a custom key store, or + // on a KMS key in a different Amazon Web Services account. + // + // Specify the key ID or key ARN of the KMS key. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // Specifies whether the key material expires. The default is KEY_MATERIAL_EXPIRES + // . For help with this choice, see [Setting an expiration time]in the Key Management Service Developer Guide. + // + // When the value of ExpirationModel is KEY_MATERIAL_EXPIRES , you must specify a + // value for the ValidTo parameter. When value is KEY_MATERIAL_DOES_NOT_EXPIRE , + // you must omit the ValidTo parameter. + // + // You cannot change the ExpirationModel or ValidTo values for the current import + // after the request completes. To change either value, you must reimport the key + // material. + // + // [Setting an expiration time]: https://docs.aws.amazon.com/en_us/kms/latest/developerguide/importing-keys.html#importing-keys-expiration + ExpirationModel types.ExpirationModelType + + // The date and time when the imported key material expires. This parameter is + // required when the value of the ExpirationModel parameter is KEY_MATERIAL_EXPIRES + // . Otherwise it is not valid. + // + // The value of this parameter must be a future date and time. The maximum value + // is 365 days from the request date. + // + // When the key material expires, KMS deletes the key material from the KMS key. + // Without its key material, the KMS key is unusable. To use the KMS key in + // cryptographic operations, you must reimport the same key material. + // + // You cannot change the ExpirationModel or ValidTo values for the current import + // after the request completes. To change either value, you must delete (DeleteImportedKeyMaterial ) and + // reimport the key material. + ValidTo *time.Time + + noSmithyDocumentSerde +} + +type ImportKeyMaterialOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationImportKeyMaterialMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpImportKeyMaterial{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpImportKeyMaterial{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ImportKeyMaterial"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpImportKeyMaterialValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opImportKeyMaterial(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opImportKeyMaterial(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ImportKeyMaterial", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListAliases.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListAliases.go new file mode 100644 index 000000000..e7f68db3f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListAliases.go @@ -0,0 +1,326 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets a list of aliases in the caller's Amazon Web Services account and region. +// For more information about aliases, see CreateAlias. +// +// By default, the ListAliases operation returns all aliases in the account and +// region. To get only the aliases associated with a particular KMS key, use the +// KeyId parameter. +// +// The ListAliases response can include aliases that you created and associated +// with your customer managed keys, and aliases that Amazon Web Services created +// and associated with Amazon Web Services managed keys in your account. You can +// recognize Amazon Web Services aliases because their names have the format aws/ , +// such as aws/dynamodb . +// +// The response might also include aliases that have no TargetKeyId field. These +// are predefined aliases that Amazon Web Services has created but has not yet +// associated with a KMS key. Aliases that Amazon Web Services creates in your +// account, including predefined aliases, do not count against your [KMS aliases quota]. +// +// Cross-account use: No. ListAliases does not return aliases in other Amazon Web +// Services accounts. +// +// Required permissions: [kms:ListAliases] (IAM policy) +// +// For details, see [Controlling access to aliases] in the Key Management Service Developer Guide. +// +// Related operations: +// +// # CreateAlias +// +// # DeleteAlias +// +// # UpdateAlias +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [KMS aliases quota]: https://docs.aws.amazon.com/kms/latest/developerguide/limits.html#aliases-limit +// [kms:ListAliases]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +// [Controlling access to aliases]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access +func (c *Client) ListAliases(ctx context.Context, params *ListAliasesInput, optFns ...func(*Options)) (*ListAliasesOutput, error) { + if params == nil { + params = &ListAliasesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListAliases", params, optFns, c.addOperationListAliasesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListAliasesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListAliasesInput struct { + + // Lists only aliases that are associated with the specified KMS key. Enter a KMS + // key in your Amazon Web Services account. + // + // This parameter is optional. If you omit it, ListAliases returns all aliases in + // the account and Region. + // + // Specify the key ID or key ARN of the KMS key. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + KeyId *string + + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. + // + // This value is optional. If you include a value, it must be between 1 and 100, + // inclusive. If you do not include a value, it defaults to 50. + Limit *int32 + + // Use this parameter in a subsequent request after you receive a response with + // truncated results. Set it to the value of NextMarker from the truncated + // response you just received. + Marker *string + + noSmithyDocumentSerde +} + +type ListAliasesOutput struct { + + // A list of aliases. + Aliases []types.AliasListEntry + + // When Truncated is true, this element is present and contains the value to use + // for the Marker parameter in a subsequent request. + NextMarker *string + + // A flag that indicates whether there are more items in the list. When this value + // is true, the list in this response is truncated. To get more items, pass the + // value of the NextMarker element in this response to the Marker parameter in a + // subsequent request. + Truncated bool + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListAliasesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListAliases{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListAliases{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListAliases"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAliases(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListAliasesPaginatorOptions is the paginator options for ListAliases +type ListAliasesPaginatorOptions struct { + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. + // + // This value is optional. If you include a value, it must be between 1 and 100, + // inclusive. If you do not include a value, it defaults to 50. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListAliasesPaginator is a paginator for ListAliases +type ListAliasesPaginator struct { + options ListAliasesPaginatorOptions + client ListAliasesAPIClient + params *ListAliasesInput + nextToken *string + firstPage bool +} + +// NewListAliasesPaginator returns a new ListAliasesPaginator +func NewListAliasesPaginator(client ListAliasesAPIClient, params *ListAliasesInput, optFns ...func(*ListAliasesPaginatorOptions)) *ListAliasesPaginator { + if params == nil { + params = &ListAliasesInput{} + } + + options := ListAliasesPaginatorOptions{} + if params.Limit != nil { + options.Limit = *params.Limit + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListAliasesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.Marker, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListAliasesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListAliases page. +func (p *ListAliasesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListAliasesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.Marker = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.Limit = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListAliases(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextMarker + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListAliasesAPIClient is a client that implements the ListAliases operation. +type ListAliasesAPIClient interface { + ListAliases(context.Context, *ListAliasesInput, ...func(*Options)) (*ListAliasesOutput, error) +} + +var _ ListAliasesAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListAliases(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListAliases", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListGrants.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListGrants.go new file mode 100644 index 000000000..bd9433fa6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListGrants.go @@ -0,0 +1,335 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets a list of all grants for the specified KMS key. +// +// You must specify the KMS key in all requests. You can filter the grant list by +// grant ID or grantee principal. +// +// For detailed information about grants, including grant terminology, see [Grants in KMS] in the +// Key Management Service Developer Guide . For examples of working with grants in +// several programming languages, see [Programming grants]. +// +// The GranteePrincipal field in the ListGrants response usually contains the user +// or role designated as the grantee principal in the grant. However, when the +// grantee principal in the grant is an Amazon Web Services service, the +// GranteePrincipal field contains the [service principal], which might represent several different +// grantee principals. +// +// Cross-account use: Yes. To perform this operation on a KMS key in a different +// Amazon Web Services account, specify the key ARN in the value of the KeyId +// parameter. +// +// Required permissions: [kms:ListGrants] (key policy) +// +// Related operations: +// +// # CreateGrant +// +// # ListRetirableGrants +// +// # RetireGrant +// +// # RevokeGrant +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [Programming grants]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html +// [service principal]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-services +// [Grants in KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html +// [kms:ListGrants]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +func (c *Client) ListGrants(ctx context.Context, params *ListGrantsInput, optFns ...func(*Options)) (*ListGrantsOutput, error) { + if params == nil { + params = &ListGrantsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListGrants", params, optFns, c.addOperationListGrantsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListGrantsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListGrantsInput struct { + + // Returns only grants for the specified KMS key. This parameter is required. + // + // Specify the key ID or key ARN of the KMS key. To specify a KMS key in a + // different Amazon Web Services account, you must use the key ARN. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // Returns only the grant with the specified grant ID. The grant ID uniquely + // identifies the grant. + GrantId *string + + // Returns only grants where the specified principal is the grantee principal for + // the grant. + GranteePrincipal *string + + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. + // + // This value is optional. If you include a value, it must be between 1 and 100, + // inclusive. If you do not include a value, it defaults to 50. + Limit *int32 + + // Use this parameter in a subsequent request after you receive a response with + // truncated results. Set it to the value of NextMarker from the truncated + // response you just received. + Marker *string + + noSmithyDocumentSerde +} + +type ListGrantsOutput struct { + + // A list of grants. + Grants []types.GrantListEntry + + // When Truncated is true, this element is present and contains the value to use + // for the Marker parameter in a subsequent request. + NextMarker *string + + // A flag that indicates whether there are more items in the list. When this value + // is true, the list in this response is truncated. To get more items, pass the + // value of the NextMarker element in this response to the Marker parameter in a + // subsequent request. + Truncated bool + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListGrantsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListGrants{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListGrants{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListGrants"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpListGrantsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListGrants(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListGrantsPaginatorOptions is the paginator options for ListGrants +type ListGrantsPaginatorOptions struct { + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. + // + // This value is optional. If you include a value, it must be between 1 and 100, + // inclusive. If you do not include a value, it defaults to 50. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListGrantsPaginator is a paginator for ListGrants +type ListGrantsPaginator struct { + options ListGrantsPaginatorOptions + client ListGrantsAPIClient + params *ListGrantsInput + nextToken *string + firstPage bool +} + +// NewListGrantsPaginator returns a new ListGrantsPaginator +func NewListGrantsPaginator(client ListGrantsAPIClient, params *ListGrantsInput, optFns ...func(*ListGrantsPaginatorOptions)) *ListGrantsPaginator { + if params == nil { + params = &ListGrantsInput{} + } + + options := ListGrantsPaginatorOptions{} + if params.Limit != nil { + options.Limit = *params.Limit + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListGrantsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.Marker, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListGrantsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListGrants page. +func (p *ListGrantsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListGrantsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.Marker = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.Limit = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListGrants(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextMarker + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListGrantsAPIClient is a client that implements the ListGrants operation. +type ListGrantsAPIClient interface { + ListGrants(context.Context, *ListGrantsInput, ...func(*Options)) (*ListGrantsOutput, error) +} + +var _ ListGrantsAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListGrants(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListGrants", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyPolicies.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyPolicies.go new file mode 100644 index 000000000..c68867a53 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyPolicies.go @@ -0,0 +1,312 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets the names of the key policies that are attached to a KMS key. This +// operation is designed to get policy names that you can use in a GetKeyPolicyoperation. +// However, the only valid policy name is default . +// +// Cross-account use: No. You cannot perform this operation on a KMS key in a +// different Amazon Web Services account. +// +// Required permissions: [kms:ListKeyPolicies] (key policy) +// +// Related operations: +// +// # GetKeyPolicy +// +// [PutKeyPolicy] +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [kms:ListKeyPolicies]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [PutKeyPolicy]: https://docs.aws.amazon.com/kms/latest/APIReference/API_PutKeyPolicy.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +func (c *Client) ListKeyPolicies(ctx context.Context, params *ListKeyPoliciesInput, optFns ...func(*Options)) (*ListKeyPoliciesOutput, error) { + if params == nil { + params = &ListKeyPoliciesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListKeyPolicies", params, optFns, c.addOperationListKeyPoliciesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListKeyPoliciesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListKeyPoliciesInput struct { + + // Gets the names of key policies for the specified KMS key. + // + // Specify the key ID or key ARN of the KMS key. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. + // + // This value is optional. If you include a value, it must be between 1 and 1000, + // inclusive. If you do not include a value, it defaults to 100. + // + // Only one policy can be attached to a key. + Limit *int32 + + // Use this parameter in a subsequent request after you receive a response with + // truncated results. Set it to the value of NextMarker from the truncated + // response you just received. + Marker *string + + noSmithyDocumentSerde +} + +type ListKeyPoliciesOutput struct { + + // When Truncated is true, this element is present and contains the value to use + // for the Marker parameter in a subsequent request. + NextMarker *string + + // A list of key policy names. The only valid value is default . + PolicyNames []string + + // A flag that indicates whether there are more items in the list. When this value + // is true, the list in this response is truncated. To get more items, pass the + // value of the NextMarker element in this response to the Marker parameter in a + // subsequent request. + Truncated bool + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListKeyPoliciesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListKeyPolicies{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListKeyPolicies{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListKeyPolicies"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpListKeyPoliciesValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListKeyPolicies(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListKeyPoliciesPaginatorOptions is the paginator options for ListKeyPolicies +type ListKeyPoliciesPaginatorOptions struct { + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. + // + // This value is optional. If you include a value, it must be between 1 and 1000, + // inclusive. If you do not include a value, it defaults to 100. + // + // Only one policy can be attached to a key. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListKeyPoliciesPaginator is a paginator for ListKeyPolicies +type ListKeyPoliciesPaginator struct { + options ListKeyPoliciesPaginatorOptions + client ListKeyPoliciesAPIClient + params *ListKeyPoliciesInput + nextToken *string + firstPage bool +} + +// NewListKeyPoliciesPaginator returns a new ListKeyPoliciesPaginator +func NewListKeyPoliciesPaginator(client ListKeyPoliciesAPIClient, params *ListKeyPoliciesInput, optFns ...func(*ListKeyPoliciesPaginatorOptions)) *ListKeyPoliciesPaginator { + if params == nil { + params = &ListKeyPoliciesInput{} + } + + options := ListKeyPoliciesPaginatorOptions{} + if params.Limit != nil { + options.Limit = *params.Limit + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListKeyPoliciesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.Marker, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListKeyPoliciesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListKeyPolicies page. +func (p *ListKeyPoliciesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListKeyPoliciesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.Marker = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.Limit = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListKeyPolicies(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextMarker + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListKeyPoliciesAPIClient is a client that implements the ListKeyPolicies +// operation. +type ListKeyPoliciesAPIClient interface { + ListKeyPolicies(context.Context, *ListKeyPoliciesInput, ...func(*Options)) (*ListKeyPoliciesOutput, error) +} + +var _ ListKeyPoliciesAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListKeyPolicies(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListKeyPolicies", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyRotations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyRotations.go new file mode 100644 index 000000000..eed276b15 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyRotations.go @@ -0,0 +1,318 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns information about all completed key material rotations for the +// specified KMS key. +// +// You must specify the KMS key in all requests. You can refine the key rotations +// list by limiting the number of rotations returned. +// +// For detailed information about automatic and on-demand key rotations, see [Rotating KMS keys] in +// the Key Management Service Developer Guide. +// +// Cross-account use: No. You cannot perform this operation on a KMS key in a +// different Amazon Web Services account. +// +// Required permissions: [kms:ListKeyRotations] (key policy) +// +// Related operations: +// +// # EnableKeyRotation +// +// # DisableKeyRotation +// +// # GetKeyRotationStatus +// +// # RotateKeyOnDemand +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [Rotating KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html +// [kms:ListKeyRotations]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +func (c *Client) ListKeyRotations(ctx context.Context, params *ListKeyRotationsInput, optFns ...func(*Options)) (*ListKeyRotationsOutput, error) { + if params == nil { + params = &ListKeyRotationsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListKeyRotations", params, optFns, c.addOperationListKeyRotationsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListKeyRotationsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListKeyRotationsInput struct { + + // Gets the key rotations for the specified KMS key. + // + // Specify the key ID or key ARN of the KMS key. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. + // + // This value is optional. If you include a value, it must be between 1 and 1000, + // inclusive. If you do not include a value, it defaults to 100. + Limit *int32 + + // Use this parameter in a subsequent request after you receive a response with + // truncated results. Set it to the value of NextMarker from the truncated + // response you just received. + Marker *string + + noSmithyDocumentSerde +} + +type ListKeyRotationsOutput struct { + + // When Truncated is true, this element is present and contains the value to use + // for the Marker parameter in a subsequent request. + NextMarker *string + + // A list of completed key material rotations. + Rotations []types.RotationsListEntry + + // A flag that indicates whether there are more items in the list. When this value + // is true, the list in this response is truncated. To get more items, pass the + // value of the NextMarker element in this response to the Marker parameter in a + // subsequent request. + Truncated bool + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListKeyRotationsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListKeyRotations{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListKeyRotations{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListKeyRotations"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpListKeyRotationsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListKeyRotations(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListKeyRotationsPaginatorOptions is the paginator options for ListKeyRotations +type ListKeyRotationsPaginatorOptions struct { + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. + // + // This value is optional. If you include a value, it must be between 1 and 1000, + // inclusive. If you do not include a value, it defaults to 100. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListKeyRotationsPaginator is a paginator for ListKeyRotations +type ListKeyRotationsPaginator struct { + options ListKeyRotationsPaginatorOptions + client ListKeyRotationsAPIClient + params *ListKeyRotationsInput + nextToken *string + firstPage bool +} + +// NewListKeyRotationsPaginator returns a new ListKeyRotationsPaginator +func NewListKeyRotationsPaginator(client ListKeyRotationsAPIClient, params *ListKeyRotationsInput, optFns ...func(*ListKeyRotationsPaginatorOptions)) *ListKeyRotationsPaginator { + if params == nil { + params = &ListKeyRotationsInput{} + } + + options := ListKeyRotationsPaginatorOptions{} + if params.Limit != nil { + options.Limit = *params.Limit + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListKeyRotationsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.Marker, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListKeyRotationsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListKeyRotations page. +func (p *ListKeyRotationsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListKeyRotationsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.Marker = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.Limit = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListKeyRotations(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextMarker + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListKeyRotationsAPIClient is a client that implements the ListKeyRotations +// operation. +type ListKeyRotationsAPIClient interface { + ListKeyRotations(context.Context, *ListKeyRotationsInput, ...func(*Options)) (*ListKeyRotationsOutput, error) +} + +var _ ListKeyRotationsAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListKeyRotations(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListKeyRotations", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeys.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeys.go new file mode 100644 index 000000000..9c3860fcc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeys.go @@ -0,0 +1,291 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets a list of all KMS keys in the caller's Amazon Web Services account and +// Region. +// +// Cross-account use: No. You cannot perform this operation on a KMS key in a +// different Amazon Web Services account. +// +// Required permissions: [kms:ListKeys] (IAM policy) +// +// Related operations: +// +// # CreateKey +// +// # DescribeKey +// +// # ListAliases +// +// # ListResourceTags +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [kms:ListKeys]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +func (c *Client) ListKeys(ctx context.Context, params *ListKeysInput, optFns ...func(*Options)) (*ListKeysOutput, error) { + if params == nil { + params = &ListKeysInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListKeys", params, optFns, c.addOperationListKeysMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListKeysOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListKeysInput struct { + + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. + // + // This value is optional. If you include a value, it must be between 1 and 1000, + // inclusive. If you do not include a value, it defaults to 100. + Limit *int32 + + // Use this parameter in a subsequent request after you receive a response with + // truncated results. Set it to the value of NextMarker from the truncated + // response you just received. + Marker *string + + noSmithyDocumentSerde +} + +type ListKeysOutput struct { + + // A list of KMS keys. + Keys []types.KeyListEntry + + // When Truncated is true, this element is present and contains the value to use + // for the Marker parameter in a subsequent request. + NextMarker *string + + // A flag that indicates whether there are more items in the list. When this value + // is true, the list in this response is truncated. To get more items, pass the + // value of the NextMarker element in this response to the Marker parameter in a + // subsequent request. + Truncated bool + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListKeysMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListKeys{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListKeys{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListKeys"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListKeys(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListKeysPaginatorOptions is the paginator options for ListKeys +type ListKeysPaginatorOptions struct { + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. + // + // This value is optional. If you include a value, it must be between 1 and 1000, + // inclusive. If you do not include a value, it defaults to 100. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListKeysPaginator is a paginator for ListKeys +type ListKeysPaginator struct { + options ListKeysPaginatorOptions + client ListKeysAPIClient + params *ListKeysInput + nextToken *string + firstPage bool +} + +// NewListKeysPaginator returns a new ListKeysPaginator +func NewListKeysPaginator(client ListKeysAPIClient, params *ListKeysInput, optFns ...func(*ListKeysPaginatorOptions)) *ListKeysPaginator { + if params == nil { + params = &ListKeysInput{} + } + + options := ListKeysPaginatorOptions{} + if params.Limit != nil { + options.Limit = *params.Limit + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListKeysPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.Marker, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListKeysPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListKeys page. +func (p *ListKeysPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListKeysOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.Marker = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.Limit = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListKeys(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextMarker + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListKeysAPIClient is a client that implements the ListKeys operation. +type ListKeysAPIClient interface { + ListKeys(context.Context, *ListKeysInput, ...func(*Options)) (*ListKeysOutput, error) +} + +var _ ListKeysAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListKeys(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListKeys", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListResourceTags.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListResourceTags.go new file mode 100644 index 000000000..7bdf6304c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListResourceTags.go @@ -0,0 +1,326 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns all tags on the specified KMS key. +// +// For general information about tags, including the format and syntax, see [Tagging Amazon Web Services resources] in +// the Amazon Web Services General Reference. For information about using tags in +// KMS, see [Tagging keys]. +// +// Cross-account use: No. You cannot perform this operation on a KMS key in a +// different Amazon Web Services account. +// +// Required permissions: [kms:ListResourceTags] (key policy) +// +// Related operations: +// +// # CreateKey +// +// # ReplicateKey +// +// # TagResource +// +// # UntagResource +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [Tagging keys]: https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html +// [kms:ListResourceTags]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +// [Tagging Amazon Web Services resources]: https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html +func (c *Client) ListResourceTags(ctx context.Context, params *ListResourceTagsInput, optFns ...func(*Options)) (*ListResourceTagsOutput, error) { + if params == nil { + params = &ListResourceTagsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListResourceTags", params, optFns, c.addOperationListResourceTagsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListResourceTagsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListResourceTagsInput struct { + + // Gets tags on the specified KMS key. + // + // Specify the key ID or key ARN of the KMS key. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. + // + // This value is optional. If you include a value, it must be between 1 and 50, + // inclusive. If you do not include a value, it defaults to 50. + Limit *int32 + + // Use this parameter in a subsequent request after you receive a response with + // truncated results. Set it to the value of NextMarker from the truncated + // response you just received. + // + // Do not attempt to construct this value. Use only the value of NextMarker from + // the truncated response you just received. + Marker *string + + noSmithyDocumentSerde +} + +type ListResourceTagsOutput struct { + + // When Truncated is true, this element is present and contains the value to use + // for the Marker parameter in a subsequent request. + // + // Do not assume or infer any information from this value. + NextMarker *string + + // A list of tags. Each tag consists of a tag key and a tag value. + // + // Tagging or untagging a KMS key can allow or deny permission to the KMS key. For + // details, see [ABAC for KMS]in the Key Management Service Developer Guide. + // + // [ABAC for KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/abac.html + Tags []types.Tag + + // A flag that indicates whether there are more items in the list. When this value + // is true, the list in this response is truncated. To get more items, pass the + // value of the NextMarker element in this response to the Marker parameter in a + // subsequent request. + Truncated bool + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListResourceTagsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListResourceTags{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListResourceTags{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListResourceTags"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpListResourceTagsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListResourceTags(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListResourceTagsPaginatorOptions is the paginator options for ListResourceTags +type ListResourceTagsPaginatorOptions struct { + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. + // + // This value is optional. If you include a value, it must be between 1 and 50, + // inclusive. If you do not include a value, it defaults to 50. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListResourceTagsPaginator is a paginator for ListResourceTags +type ListResourceTagsPaginator struct { + options ListResourceTagsPaginatorOptions + client ListResourceTagsAPIClient + params *ListResourceTagsInput + nextToken *string + firstPage bool +} + +// NewListResourceTagsPaginator returns a new ListResourceTagsPaginator +func NewListResourceTagsPaginator(client ListResourceTagsAPIClient, params *ListResourceTagsInput, optFns ...func(*ListResourceTagsPaginatorOptions)) *ListResourceTagsPaginator { + if params == nil { + params = &ListResourceTagsInput{} + } + + options := ListResourceTagsPaginatorOptions{} + if params.Limit != nil { + options.Limit = *params.Limit + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListResourceTagsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.Marker, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListResourceTagsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListResourceTags page. +func (p *ListResourceTagsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListResourceTagsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.Marker = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.Limit = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListResourceTags(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextMarker + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListResourceTagsAPIClient is a client that implements the ListResourceTags +// operation. +type ListResourceTagsAPIClient interface { + ListResourceTags(context.Context, *ListResourceTagsInput, ...func(*Options)) (*ListResourceTagsOutput, error) +} + +var _ ListResourceTagsAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListResourceTags(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListResourceTags", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListRetirableGrants.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListRetirableGrants.go new file mode 100644 index 000000000..7fe68d8e2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListRetirableGrants.go @@ -0,0 +1,332 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns information about all grants in the Amazon Web Services account and +// Region that have the specified retiring principal. +// +// You can specify any principal in your Amazon Web Services account. The grants +// that are returned include grants for KMS keys in your Amazon Web Services +// account and other Amazon Web Services accounts. You might use this operation to +// determine which grants you may retire. To retire a grant, use the RetireGrantoperation. +// +// For detailed information about grants, including grant terminology, see [Grants in KMS] in the +// Key Management Service Developer Guide . For examples of working with grants in +// several programming languages, see [Programming grants]. +// +// Cross-account use: You must specify a principal in your Amazon Web Services +// account. This operation returns a list of grants where the retiring principal +// specified in the ListRetirableGrants request is the same retiring principal on +// the grant. This can include grants on KMS keys owned by other Amazon Web +// Services accounts, but you do not need kms:ListRetirableGrants permission (or +// any other additional permission) in any Amazon Web Services account other than +// your own. +// +// Required permissions: [kms:ListRetirableGrants] (IAM policy) in your Amazon Web Services account. +// +// KMS authorizes ListRetirableGrants requests by evaluating the caller account's +// kms:ListRetirableGrants permissions. The authorized resource in +// ListRetirableGrants calls is the retiring principal specified in the request. +// KMS does not evaluate the caller's permissions to verify their access to any KMS +// keys or grants that might be returned by the ListRetirableGrants call. +// +// Related operations: +// +// # CreateGrant +// +// # ListGrants +// +// # RetireGrant +// +// # RevokeGrant +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [Programming grants]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html +// [kms:ListRetirableGrants]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [Grants in KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +func (c *Client) ListRetirableGrants(ctx context.Context, params *ListRetirableGrantsInput, optFns ...func(*Options)) (*ListRetirableGrantsOutput, error) { + if params == nil { + params = &ListRetirableGrantsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListRetirableGrants", params, optFns, c.addOperationListRetirableGrantsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListRetirableGrantsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListRetirableGrantsInput struct { + + // The retiring principal for which to list grants. Enter a principal in your + // Amazon Web Services account. + // + // To specify the retiring principal, use the [Amazon Resource Name (ARN)] of an Amazon Web Services + // principal. Valid principals include Amazon Web Services accounts, IAM users, IAM + // roles, federated users, and assumed role users. For help with the ARN syntax for + // a principal, see [IAM ARNs]in the Identity and Access Management User Guide . + // + // [IAM ARNs]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns + // [Amazon Resource Name (ARN)]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + // + // This member is required. + RetiringPrincipal *string + + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. + // + // This value is optional. If you include a value, it must be between 1 and 100, + // inclusive. If you do not include a value, it defaults to 50. + Limit *int32 + + // Use this parameter in a subsequent request after you receive a response with + // truncated results. Set it to the value of NextMarker from the truncated + // response you just received. + Marker *string + + noSmithyDocumentSerde +} + +type ListRetirableGrantsOutput struct { + + // A list of grants. + Grants []types.GrantListEntry + + // When Truncated is true, this element is present and contains the value to use + // for the Marker parameter in a subsequent request. + NextMarker *string + + // A flag that indicates whether there are more items in the list. When this value + // is true, the list in this response is truncated. To get more items, pass the + // value of the NextMarker element in this response to the Marker parameter in a + // subsequent request. + Truncated bool + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListRetirableGrantsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListRetirableGrants{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListRetirableGrants{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListRetirableGrants"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpListRetirableGrantsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListRetirableGrants(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListRetirableGrantsPaginatorOptions is the paginator options for +// ListRetirableGrants +type ListRetirableGrantsPaginatorOptions struct { + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. + // + // This value is optional. If you include a value, it must be between 1 and 100, + // inclusive. If you do not include a value, it defaults to 50. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListRetirableGrantsPaginator is a paginator for ListRetirableGrants +type ListRetirableGrantsPaginator struct { + options ListRetirableGrantsPaginatorOptions + client ListRetirableGrantsAPIClient + params *ListRetirableGrantsInput + nextToken *string + firstPage bool +} + +// NewListRetirableGrantsPaginator returns a new ListRetirableGrantsPaginator +func NewListRetirableGrantsPaginator(client ListRetirableGrantsAPIClient, params *ListRetirableGrantsInput, optFns ...func(*ListRetirableGrantsPaginatorOptions)) *ListRetirableGrantsPaginator { + if params == nil { + params = &ListRetirableGrantsInput{} + } + + options := ListRetirableGrantsPaginatorOptions{} + if params.Limit != nil { + options.Limit = *params.Limit + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListRetirableGrantsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.Marker, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListRetirableGrantsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListRetirableGrants page. +func (p *ListRetirableGrantsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListRetirableGrantsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.Marker = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.Limit = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListRetirableGrants(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextMarker + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListRetirableGrantsAPIClient is a client that implements the +// ListRetirableGrants operation. +type ListRetirableGrantsAPIClient interface { + ListRetirableGrants(context.Context, *ListRetirableGrantsInput, ...func(*Options)) (*ListRetirableGrantsOutput, error) +} + +var _ ListRetirableGrantsAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListRetirableGrants(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListRetirableGrants", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_PutKeyPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_PutKeyPolicy.go new file mode 100644 index 000000000..4957cdee9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_PutKeyPolicy.go @@ -0,0 +1,244 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Attaches a key policy to the specified KMS key. +// +// For more information about key policies, see [Key Policies] in the Key Management Service +// Developer Guide. For help writing and formatting a JSON policy document, see the +// [IAM JSON Policy Reference]in the Identity and Access Management User Guide . For examples of adding a key +// policy in multiple programming languages, see [Setting a key policy]in the Key Management Service +// Developer Guide. +// +// Cross-account use: No. You cannot perform this operation on a KMS key in a +// different Amazon Web Services account. +// +// Required permissions: [kms:PutKeyPolicy] (key policy) +// +// Related operations: GetKeyPolicy +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [IAM JSON Policy Reference]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html +// [kms:PutKeyPolicy]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [Setting a key policy]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-key-policies.html#put-policy +// [Key Policies]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +func (c *Client) PutKeyPolicy(ctx context.Context, params *PutKeyPolicyInput, optFns ...func(*Options)) (*PutKeyPolicyOutput, error) { + if params == nil { + params = &PutKeyPolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutKeyPolicy", params, optFns, c.addOperationPutKeyPolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutKeyPolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutKeyPolicyInput struct { + + // Sets the key policy on the specified KMS key. + // + // Specify the key ID or key ARN of the KMS key. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // The key policy to attach to the KMS key. + // + // The key policy must meet the following criteria: + // + // - The key policy must allow the calling principal to make a subsequent + // PutKeyPolicy request on the KMS key. This reduces the risk that the KMS key + // becomes unmanageable. For more information, see [Default key policy]in the Key Management Service + // Developer Guide. (To omit this condition, set BypassPolicyLockoutSafetyCheck + // to true.) + // + // - Each statement in the key policy must contain one or more principals. The + // principals in the key policy must exist and be visible to KMS. When you create a + // new Amazon Web Services principal, you might need to enforce a delay before + // including the new principal in a key policy because the new principal might not + // be immediately visible to KMS. For more information, see [Changes that I make are not always immediately visible]in the Amazon Web + // Services Identity and Access Management User Guide. + // + // A key policy document can include only the following characters: + // + // - Printable ASCII characters from the space character ( \u0020 ) through the + // end of the ASCII character range. + // + // - Printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF ). + // + // - The tab ( \u0009 ), line feed ( \u000A ), and carriage return ( \u000D ) + // special characters + // + // For information about key policies, see [Key policies in KMS] in the Key Management Service + // Developer Guide.For help writing and formatting a JSON policy document, see the [IAM JSON Policy Reference] + // in the Identity and Access Management User Guide . + // + // [Key policies in KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html + // [IAM JSON Policy Reference]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html + // [Default key policy]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key + // [Changes that I make are not always immediately visible]: https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency + // + // This member is required. + Policy *string + + // Skips ("bypasses") the key policy lockout safety check. The default value is + // false. + // + // Setting this value to true increases the risk that the KMS key becomes + // unmanageable. Do not set this value to true indiscriminately. + // + // For more information, see [Default key policy] in the Key Management Service Developer Guide. + // + // Use this parameter only when you intend to prevent the principal that is making + // the request from making a subsequent [PutKeyPolicy]request on the KMS key. + // + // [Default key policy]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key + // [PutKeyPolicy]: https://docs.aws.amazon.com/kms/latest/APIReference/API_PutKeyPolicy.html + BypassPolicyLockoutSafetyCheck bool + + // The name of the key policy. If no policy name is specified, the default value + // is default . The only valid value is default . + PolicyName *string + + noSmithyDocumentSerde +} + +type PutKeyPolicyOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutKeyPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpPutKeyPolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpPutKeyPolicy{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutKeyPolicy"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpPutKeyPolicyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutKeyPolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutKeyPolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutKeyPolicy", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReEncrypt.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReEncrypt.go new file mode 100644 index 000000000..7d6f278de --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReEncrypt.go @@ -0,0 +1,401 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Decrypts ciphertext and then reencrypts it entirely within KMS. You can use +// this operation to change the KMS key under which data is encrypted, such as when +// you [manually rotate]a KMS key or change the KMS key that protects a ciphertext. You can also +// use it to reencrypt ciphertext under the same KMS key, such as to change the [encryption context]of +// a ciphertext. +// +// The ReEncrypt operation can decrypt ciphertext that was encrypted by using a +// KMS key in an KMS operation, such as Encryptor GenerateDataKey. It can also decrypt ciphertext that +// was encrypted by using the public key of an [asymmetric KMS key]outside of KMS. However, it cannot +// decrypt ciphertext produced by other libraries, such as the [Amazon Web Services Encryption SDK]or [Amazon S3 client-side encryption]. These +// libraries return a ciphertext format that is incompatible with KMS. +// +// When you use the ReEncrypt operation, you need to provide information for the +// decrypt operation and the subsequent encrypt operation. +// +// - If your ciphertext was encrypted under an asymmetric KMS key, you must use +// the SourceKeyId parameter to identify the KMS key that encrypted the +// ciphertext. You must also supply the encryption algorithm that was used. This +// information is required to decrypt the data. +// +// - If your ciphertext was encrypted under a symmetric encryption KMS key, the +// SourceKeyId parameter is optional. KMS can get this information from metadata +// that it adds to the symmetric ciphertext blob. This feature adds durability to +// your implementation by ensuring that authorized users can decrypt ciphertext +// decades after it was encrypted, even if they've lost track of the key ID. +// However, specifying the source KMS key is always recommended as a best practice. +// When you use the SourceKeyId parameter to specify a KMS key, KMS uses only the +// KMS key you specify. If the ciphertext was encrypted under a different KMS key, +// the ReEncrypt operation fails. This practice ensures that you use the KMS key +// that you intend. +// +// - To reencrypt the data, you must use the DestinationKeyId parameter to +// specify the KMS key that re-encrypts the data after it is decrypted. If the +// destination KMS key is an asymmetric KMS key, you must also provide the +// encryption algorithm. The algorithm that you choose must be compatible with the +// KMS key. +// +// When you use an asymmetric KMS key to encrypt or reencrypt data, be sure to +// +// record the KMS key and encryption algorithm that you choose. You will be +// required to provide the same KMS key and encryption algorithm when you decrypt +// the data. If the KMS key and algorithm do not match the values used to encrypt +// the data, the decrypt operation fails. +// +// You are not required to supply the key ID and encryption algorithm when you +// +// decrypt with symmetric encryption KMS keys because KMS stores this information +// in the ciphertext blob. KMS cannot store metadata in ciphertext generated with +// asymmetric keys. The standard format for asymmetric key ciphertext does not +// include configurable fields. +// +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see [Key states of KMS keys]in the Key Management Service Developer Guide. +// +// Cross-account use: Yes. The source KMS key and destination KMS key can be in +// different Amazon Web Services accounts. Either or both KMS keys can be in a +// different account than the caller. To specify a KMS key in a different account, +// you must use its key ARN or alias ARN. +// +// Required permissions: +// +// [kms:ReEncryptFrom] +// - permission on the source KMS key (key policy) +// +// [kms:ReEncryptTo] +// - permission on the destination KMS key (key policy) +// +// To permit reencryption from or to a KMS key, include the "kms:ReEncrypt*" +// permission in your [key policy]. This permission is automatically included in the key +// policy when you use the console to create a KMS key. But you must include it +// manually when you create a KMS key programmatically or when you use the PutKeyPolicy +// operation to set a key policy. +// +// Related operations: +// +// # Decrypt +// +// # Encrypt +// +// # GenerateDataKey +// +// # GenerateDataKeyPair +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [Amazon Web Services Encryption SDK]: https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/ +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [asymmetric KMS key]: https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#asymmetric-cmks +// [key policy]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html +// [Amazon S3 client-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html +// [kms:ReEncryptTo]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [encryption context]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context +// [manually rotate]: https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotate-keys-manually +// [kms:ReEncryptFrom]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +func (c *Client) ReEncrypt(ctx context.Context, params *ReEncryptInput, optFns ...func(*Options)) (*ReEncryptOutput, error) { + if params == nil { + params = &ReEncryptInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ReEncrypt", params, optFns, c.addOperationReEncryptMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ReEncryptOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ReEncryptInput struct { + + // Ciphertext of the data to reencrypt. + // + // This member is required. + CiphertextBlob []byte + + // A unique identifier for the KMS key that is used to reencrypt the data. Specify + // a symmetric encryption KMS key or an asymmetric KMS key with a KeyUsage value + // of ENCRYPT_DECRYPT . To find the KeyUsage value of a KMS key, use the DescribeKey + // operation. + // + // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. When + // using an alias name, prefix it with "alias/" . To specify a KMS key in a + // different Amazon Web Services account, you must use the key ARN or alias ARN. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Alias name: alias/ExampleAlias + // + // - Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name + // and alias ARN, use ListAliases. + // + // This member is required. + DestinationKeyId *string + + // Specifies the encryption algorithm that KMS will use to reecrypt the data after + // it has decrypted it. The default value, SYMMETRIC_DEFAULT , represents the + // encryption algorithm used for symmetric encryption KMS keys. + // + // This parameter is required only when the destination KMS key is an asymmetric + // KMS key. + DestinationEncryptionAlgorithm types.EncryptionAlgorithmSpec + + // Specifies that encryption context to use when the reencrypting the data. + // + // Do not include confidential or sensitive information in this field. This field + // may be displayed in plaintext in CloudTrail logs and other output. + // + // A destination encryption context is valid only when the destination KMS key is + // a symmetric encryption KMS key. The standard ciphertext format for asymmetric + // KMS keys does not include fields for metadata. + // + // An encryption context is a collection of non-secret key-value pairs that + // represent additional authenticated data. When you use an encryption context to + // encrypt data, you must specify the same (an exact case-sensitive match) + // encryption context to decrypt the data. An encryption context is supported only + // on operations with symmetric encryption KMS keys. On operations with symmetric + // encryption KMS keys, an encryption context is optional, but it is strongly + // recommended. + // + // For more information, see [Encryption context] in the Key Management Service Developer Guide. + // + // [Encryption context]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + DestinationEncryptionContext map[string]string + + // Checks if your request will succeed. DryRun is an optional parameter. + // + // To learn more about how to use this parameter, see [Testing your KMS API calls] in the Key Management + // Service Developer Guide. + // + // [Testing your KMS API calls]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-dryrun.html + DryRun *bool + + // A list of grant tokens. + // + // Use a grant token when your permission to call this operation comes from a new + // grant that has not yet achieved eventual consistency. For more information, see [Grant token] + // and [Using a grant token]in the Key Management Service Developer Guide. + // + // [Grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + // [Using a grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + GrantTokens []string + + // Specifies the encryption algorithm that KMS will use to decrypt the ciphertext + // before it is reencrypted. The default value, SYMMETRIC_DEFAULT , represents the + // algorithm used for symmetric encryption KMS keys. + // + // Specify the same algorithm that was used to encrypt the ciphertext. If you + // specify a different algorithm, the decrypt attempt fails. + // + // This parameter is required only when the ciphertext was encrypted under an + // asymmetric KMS key. + SourceEncryptionAlgorithm types.EncryptionAlgorithmSpec + + // Specifies the encryption context to use to decrypt the ciphertext. Enter the + // same encryption context that was used to encrypt the ciphertext. + // + // An encryption context is a collection of non-secret key-value pairs that + // represent additional authenticated data. When you use an encryption context to + // encrypt data, you must specify the same (an exact case-sensitive match) + // encryption context to decrypt the data. An encryption context is supported only + // on operations with symmetric encryption KMS keys. On operations with symmetric + // encryption KMS keys, an encryption context is optional, but it is strongly + // recommended. + // + // For more information, see [Encryption context] in the Key Management Service Developer Guide. + // + // [Encryption context]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context + SourceEncryptionContext map[string]string + + // Specifies the KMS key that KMS will use to decrypt the ciphertext before it is + // re-encrypted. + // + // Enter a key ID of the KMS key that was used to encrypt the ciphertext. If you + // identify a different KMS key, the ReEncrypt operation throws an + // IncorrectKeyException . + // + // This parameter is required only when the ciphertext was encrypted under an + // asymmetric KMS key. If you used a symmetric encryption KMS key, KMS can get the + // KMS key from metadata that it adds to the symmetric ciphertext blob. However, it + // is always recommended as a best practice. This practice ensures that you use the + // KMS key that you intend. + // + // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. When + // using an alias name, prefix it with "alias/" . To specify a KMS key in a + // different Amazon Web Services account, you must use the key ARN or alias ARN. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Alias name: alias/ExampleAlias + // + // - Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name + // and alias ARN, use ListAliases. + SourceKeyId *string + + noSmithyDocumentSerde +} + +type ReEncryptOutput struct { + + // The reencrypted data. When you use the HTTP API or the Amazon Web Services CLI, + // the value is Base64-encoded. Otherwise, it is not Base64-encoded. + CiphertextBlob []byte + + // The encryption algorithm that was used to reencrypt the data. + DestinationEncryptionAlgorithm types.EncryptionAlgorithmSpec + + // The Amazon Resource Name ([key ARN] ) of the KMS key that was used to reencrypt the data. + // + // [key ARN]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN + KeyId *string + + // The encryption algorithm that was used to decrypt the ciphertext before it was + // reencrypted. + SourceEncryptionAlgorithm types.EncryptionAlgorithmSpec + + // Unique identifier of the KMS key used to originally encrypt the data. + SourceKeyId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationReEncryptMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpReEncrypt{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpReEncrypt{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ReEncrypt"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpReEncryptValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opReEncrypt(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opReEncrypt(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ReEncrypt", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReplicateKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReplicateKey.go new file mode 100644 index 000000000..4c5bb7b17 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReplicateKey.go @@ -0,0 +1,398 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Replicates a multi-Region key into the specified Region. This operation creates +// a multi-Region replica key based on a multi-Region primary key in a different +// Region of the same Amazon Web Services partition. You can create multiple +// replicas of a primary key, but each must be in a different Region. To create a +// multi-Region primary key, use the CreateKeyoperation. +// +// This operation supports multi-Region keys, an KMS feature that lets you create +// multiple interoperable KMS keys in different Amazon Web Services Regions. +// Because these KMS keys have the same key ID, key material, and other metadata, +// you can use them interchangeably to encrypt data in one Amazon Web Services +// Region and decrypt it in a different Amazon Web Services Region without +// re-encrypting the data or making a cross-Region call. For more information about +// multi-Region keys, see [Multi-Region keys in KMS]in the Key Management Service Developer Guide. +// +// A replica key is a fully-functional KMS key that can be used independently of +// its primary and peer replica keys. A primary key and its replica keys share +// properties that make them interoperable. They have the same [key ID]and key material. +// They also have the same [key spec], [key usage], [key material origin], and [automatic key rotation status]. KMS automatically synchronizes these shared +// properties among related multi-Region keys. All other properties of a replica +// key can differ, including its [key policy], [tags], [aliases], and [Key states of KMS keys]. KMS pricing and quotas for KMS keys +// apply to each primary key and replica key. +// +// When this operation completes, the new replica key has a transient key state of +// Creating . This key state changes to Enabled (or PendingImport ) after a few +// seconds when the process of creating the new replica key is complete. While the +// key state is Creating , you can manage key, but you cannot yet use it in +// cryptographic operations. If you are creating and using the replica key +// programmatically, retry on KMSInvalidStateException or call DescribeKey to +// check its KeyState value before using it. For details about the Creating key +// state, see [Key states of KMS keys]in the Key Management Service Developer Guide. +// +// You cannot create more than one replica of a primary key in any Region. If the +// Region already includes a replica of the key you're trying to replicate, +// ReplicateKey returns an AlreadyExistsException error. If the key state of the +// existing replica is PendingDeletion , you can cancel the scheduled key deletion (CancelKeyDeletion +// ) or wait for the key to be deleted. The new replica key you create will have +// the same [shared properties]as the original replica key. +// +// The CloudTrail log of a ReplicateKey operation records a ReplicateKey operation +// in the primary key's Region and a CreateKeyoperation in the replica key's Region. +// +// If you replicate a multi-Region primary key with imported key material, the +// replica key is created with no key material. You must import the same key +// material that you imported into the primary key. For details, see [Importing key material into multi-Region keys]in the Key +// Management Service Developer Guide. +// +// To convert a replica key to a primary key, use the UpdatePrimaryRegion operation. +// +// ReplicateKey uses different default values for the KeyPolicy and Tags +// parameters than those used in the KMS console. For details, see the parameter +// descriptions. +// +// Cross-account use: No. You cannot use this operation to create a replica key in +// a different Amazon Web Services account. +// +// Required permissions: +// +// - kms:ReplicateKey on the primary key (in the primary key's Region). Include +// this permission in the primary key's key policy. +// +// - kms:CreateKey in an IAM policy in the replica Region. +// +// - To use the Tags parameter, kms:TagResource in an IAM policy in the replica +// Region. +// +// # Related operations +// +// # CreateKey +// +// # UpdatePrimaryRegion +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [key ID]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-id +// [automatic key rotation status]: https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html +// [aliases]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html +// [key usage]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-usage +// [Multi-Region keys in KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html +// [key policy]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +// [tags]: https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [key spec]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-spec +// [Importing key material into multi-Region keys]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-import.html +// [key material origin]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-origin +// [shared properties]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html#mrk-sync-properties +func (c *Client) ReplicateKey(ctx context.Context, params *ReplicateKeyInput, optFns ...func(*Options)) (*ReplicateKeyOutput, error) { + if params == nil { + params = &ReplicateKeyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ReplicateKey", params, optFns, c.addOperationReplicateKeyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ReplicateKeyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ReplicateKeyInput struct { + + // Identifies the multi-Region primary key that is being replicated. To determine + // whether a KMS key is a multi-Region primary key, use the DescribeKeyoperation to check the + // value of the MultiRegionKeyType property. + // + // Specify the key ID or key ARN of a multi-Region primary key. + // + // For example: + // + // - Key ID: mrk-1234abcd12ab34cd56ef1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/mrk-1234abcd12ab34cd56ef1234567890ab + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // The Region ID of the Amazon Web Services Region for this replica key. + // + // Enter the Region ID, such as us-east-1 or ap-southeast-2 . For a list of Amazon + // Web Services Regions in which KMS is supported, see [KMS service endpoints]in the Amazon Web Services + // General Reference. + // + // HMAC KMS keys are not supported in all Amazon Web Services Regions. If you try + // to replicate an HMAC KMS key in an Amazon Web Services Region in which HMAC keys + // are not supported, the ReplicateKey operation returns an + // UnsupportedOperationException . For a list of Regions in which HMAC KMS keys are + // supported, see [HMAC keys in KMS]in the Key Management Service Developer Guide. + // + // The replica must be in a different Amazon Web Services Region than its primary + // key and other replicas of that primary key, but in the same Amazon Web Services + // partition. KMS must be available in the replica Region. If the Region is not + // enabled by default, the Amazon Web Services account must be enabled in the + // Region. For information about Amazon Web Services partitions, see [Amazon Resource Names (ARNs)]in the Amazon + // Web Services General Reference. For information about enabling and disabling + // Regions, see [Enabling a Region]and [Disabling a Region] in the Amazon Web Services General Reference. + // + // [Disabling a Region]: https://docs.aws.amazon.com/general/latest/gr/rande-manage.html#rande-manage-disable + // [Enabling a Region]: https://docs.aws.amazon.com/general/latest/gr/rande-manage.html#rande-manage-enable + // [KMS service endpoints]: https://docs.aws.amazon.com/general/latest/gr/kms.html#kms_region + // [HMAC keys in KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html + // [Amazon Resource Names (ARNs)]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + // + // This member is required. + ReplicaRegion *string + + // Skips ("bypasses") the key policy lockout safety check. The default value is + // false. + // + // Setting this value to true increases the risk that the KMS key becomes + // unmanageable. Do not set this value to true indiscriminately. + // + // For more information, see [Default key policy] in the Key Management Service Developer Guide. + // + // Use this parameter only when you intend to prevent the principal that is making + // the request from making a subsequent [PutKeyPolicy]request on the KMS key. + // + // [Default key policy]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key + // [PutKeyPolicy]: https://docs.aws.amazon.com/kms/latest/APIReference/API_PutKeyPolicy.html + BypassPolicyLockoutSafetyCheck bool + + // A description of the KMS key. The default value is an empty string (no + // description). + // + // Do not include confidential or sensitive information in this field. This field + // may be displayed in plaintext in CloudTrail logs and other output. + // + // The description is not a shared property of multi-Region keys. You can specify + // the same description or a different description for each key in a set of related + // multi-Region keys. KMS does not synchronize this property. + Description *string + + // The key policy to attach to the KMS key. This parameter is optional. If you do + // not provide a key policy, KMS attaches the [default key policy]to the KMS key. + // + // The key policy is not a shared property of multi-Region keys. You can specify + // the same key policy or a different key policy for each key in a set of related + // multi-Region keys. KMS does not synchronize this property. + // + // If you provide a key policy, it must meet the following criteria: + // + // - The key policy must allow the calling principal to make a subsequent + // PutKeyPolicy request on the KMS key. This reduces the risk that the KMS key + // becomes unmanageable. For more information, see [Default key policy]in the Key Management Service + // Developer Guide. (To omit this condition, set BypassPolicyLockoutSafetyCheck + // to true.) + // + // - Each statement in the key policy must contain one or more principals. The + // principals in the key policy must exist and be visible to KMS. When you create a + // new Amazon Web Services principal, you might need to enforce a delay before + // including the new principal in a key policy because the new principal might not + // be immediately visible to KMS. For more information, see [Changes that I make are not always immediately visible]in the Amazon Web + // Services Identity and Access Management User Guide. + // + // A key policy document can include only the following characters: + // + // - Printable ASCII characters from the space character ( \u0020 ) through the + // end of the ASCII character range. + // + // - Printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF ). + // + // - The tab ( \u0009 ), line feed ( \u000A ), and carriage return ( \u000D ) + // special characters + // + // For information about key policies, see [Key policies in KMS] in the Key Management Service + // Developer Guide. For help writing and formatting a JSON policy document, see the + // [IAM JSON Policy Reference]in the Identity and Access Management User Guide . + // + // [Key policies in KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html + // [default key policy]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default + // [IAM JSON Policy Reference]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html + // [Default key policy]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key + // [Changes that I make are not always immediately visible]: https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency + Policy *string + + // Assigns one or more tags to the replica key. Use this parameter to tag the KMS + // key when it is created. To tag an existing KMS key, use the TagResourceoperation. + // + // Do not include confidential or sensitive information in this field. This field + // may be displayed in plaintext in CloudTrail logs and other output. + // + // Tagging or untagging a KMS key can allow or deny permission to the KMS key. For + // details, see [ABAC for KMS]in the Key Management Service Developer Guide. + // + // To use this parameter, you must have [kms:TagResource] permission in an IAM policy. + // + // Tags are not a shared property of multi-Region keys. You can specify the same + // tags or different tags for each key in a set of related multi-Region keys. KMS + // does not synchronize this property. + // + // Each tag consists of a tag key and a tag value. Both the tag key and the tag + // value are required, but the tag value can be an empty (null) string. You cannot + // have more than one tag on a KMS key with the same tag key. If you specify an + // existing tag key with a different tag value, KMS replaces the current tag value + // with the specified one. + // + // When you add tags to an Amazon Web Services resource, Amazon Web Services + // generates a cost allocation report with usage and costs aggregated by tags. Tags + // can also be used to control access to a KMS key. For details, see [Tagging Keys]. + // + // [kms:TagResource]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + // [Tagging Keys]: https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html + // [ABAC for KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/abac.html + Tags []types.Tag + + noSmithyDocumentSerde +} + +type ReplicateKeyOutput struct { + + // Displays details about the new replica key, including its Amazon Resource Name ([key ARN] + // ) and [Key states of KMS keys]. It also includes the ARN and Amazon Web Services Region of its primary + // key and other replica keys. + // + // [key ARN]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN + // [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + ReplicaKeyMetadata *types.KeyMetadata + + // The key policy of the new replica key. The value is a key policy document in + // JSON format. + ReplicaPolicy *string + + // The tags on the new replica key. The value is a list of tag key and tag value + // pairs. + ReplicaTags []types.Tag + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationReplicateKeyMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpReplicateKey{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpReplicateKey{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ReplicateKey"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpReplicateKeyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opReplicateKey(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opReplicateKey(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ReplicateKey", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RetireGrant.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RetireGrant.go new file mode 100644 index 000000000..016739eab --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RetireGrant.go @@ -0,0 +1,213 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes a grant. Typically, you retire a grant when you no longer need its +// permissions. To identify the grant to retire, use a [grant token], or both the grant ID and +// a key identifier (key ID or key ARN) of the KMS key. The CreateGrantoperation returns both +// values. +// +// This operation can be called by the retiring principal for a grant, by the +// grantee principal if the grant allows the RetireGrant operation, and by the +// Amazon Web Services account in which the grant is created. It can also be called +// by principals to whom permission for retiring a grant is delegated. For details, +// see [Retiring and revoking grants]in the Key Management Service Developer Guide. +// +// For detailed information about grants, including grant terminology, see [Grants in KMS] in the +// Key Management Service Developer Guide . For examples of working with grants in +// several programming languages, see [Programming grants]. +// +// Cross-account use: Yes. You can retire a grant on a KMS key in a different +// Amazon Web Services account. +// +// Required permissions: Permission to retire a grant is determined primarily by +// the grant. For details, see [Retiring and revoking grants]in the Key Management Service Developer Guide. +// +// Related operations: +// +// # CreateGrant +// +// # ListGrants +// +// # ListRetirableGrants +// +// # RevokeGrant +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [Programming grants]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html +// [grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token +// [Retiring and revoking grants]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete +// [Grants in KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +func (c *Client) RetireGrant(ctx context.Context, params *RetireGrantInput, optFns ...func(*Options)) (*RetireGrantOutput, error) { + if params == nil { + params = &RetireGrantInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "RetireGrant", params, optFns, c.addOperationRetireGrantMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*RetireGrantOutput) + out.ResultMetadata = metadata + return out, nil +} + +type RetireGrantInput struct { + + // Checks if your request will succeed. DryRun is an optional parameter. + // + // To learn more about how to use this parameter, see [Testing your KMS API calls] in the Key Management + // Service Developer Guide. + // + // [Testing your KMS API calls]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-dryrun.html + DryRun *bool + + // Identifies the grant to retire. To get the grant ID, use CreateGrant, ListGrants, or ListRetirableGrants. + // + // - Grant ID Example - + // 0123456789012345678901234567890123456789012345678901234567890123 + GrantId *string + + // Identifies the grant to be retired. You can use a grant token to identify a new + // grant even before it has achieved eventual consistency. + // + // Only the CreateGrant operation returns a grant token. For details, see [Grant token] and [Eventual consistency] in the Key + // Management Service Developer Guide. + // + // [Grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + // [Eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-eventual-consistency + GrantToken *string + + // The key ARN KMS key associated with the grant. To find the key ARN, use the ListKeys + // operation. + // + // For example: + // arn:aws:kms:us-east-2:444455556666:key/1234abcd-12ab-34cd-56ef-1234567890ab + KeyId *string + + noSmithyDocumentSerde +} + +type RetireGrantOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationRetireGrantMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpRetireGrant{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpRetireGrant{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "RetireGrant"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRetireGrant(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opRetireGrant(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "RetireGrant", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RevokeGrant.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RevokeGrant.go new file mode 100644 index 000000000..c97a332bf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RevokeGrant.go @@ -0,0 +1,215 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the specified grant. You revoke a grant to terminate the permissions +// that the grant allows. For more information, see [Retiring and revoking grants]in the Key Management Service +// Developer Guide . +// +// When you create, retire, or revoke a grant, there might be a brief delay, +// usually less than five minutes, until the grant is available throughout KMS. +// This state is known as eventual consistency. For details, see [Eventual consistency]in the Key +// Management Service Developer Guide . +// +// For detailed information about grants, including grant terminology, see [Grants in KMS] in the +// Key Management Service Developer Guide . For examples of working with grants in +// several programming languages, see [Programming grants]. +// +// Cross-account use: Yes. To perform this operation on a KMS key in a different +// Amazon Web Services account, specify the key ARN in the value of the KeyId +// parameter. +// +// Required permissions: [kms:RevokeGrant] (key policy). +// +// Related operations: +// +// # CreateGrant +// +// # ListGrants +// +// # ListRetirableGrants +// +// # RetireGrant +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [Eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-eventual-consistency +// [Programming grants]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html +// [kms:RevokeGrant]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [Retiring and revoking grants]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete +// [Grants in KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +func (c *Client) RevokeGrant(ctx context.Context, params *RevokeGrantInput, optFns ...func(*Options)) (*RevokeGrantOutput, error) { + if params == nil { + params = &RevokeGrantInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "RevokeGrant", params, optFns, c.addOperationRevokeGrantMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*RevokeGrantOutput) + out.ResultMetadata = metadata + return out, nil +} + +type RevokeGrantInput struct { + + // Identifies the grant to revoke. To get the grant ID, use CreateGrant, ListGrants, or ListRetirableGrants. + // + // This member is required. + GrantId *string + + // A unique identifier for the KMS key associated with the grant. To get the key + // ID and key ARN for a KMS key, use ListKeysor DescribeKey. + // + // Specify the key ID or key ARN of the KMS key. To specify a KMS key in a + // different Amazon Web Services account, you must use the key ARN. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // Checks if your request will succeed. DryRun is an optional parameter. + // + // To learn more about how to use this parameter, see [Testing your KMS API calls] in the Key Management + // Service Developer Guide. + // + // [Testing your KMS API calls]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-dryrun.html + DryRun *bool + + noSmithyDocumentSerde +} + +type RevokeGrantOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationRevokeGrantMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpRevokeGrant{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpRevokeGrant{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "RevokeGrant"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpRevokeGrantValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRevokeGrant(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opRevokeGrant(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "RevokeGrant", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RotateKeyOnDemand.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RotateKeyOnDemand.go new file mode 100644 index 000000000..5424efcf6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RotateKeyOnDemand.go @@ -0,0 +1,237 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Immediately initiates rotation of the key material of the specified symmetric +// encryption KMS key. +// +// You can perform [on-demand rotation] of the key material in customer managed KMS keys, regardless +// of whether or not [automatic key rotation]is enabled. On-demand rotations do not change existing +// automatic rotation schedules. For example, consider a KMS key that has automatic +// key rotation enabled with a rotation period of 730 days. If the key is scheduled +// to automatically rotate on April 14, 2024, and you perform an on-demand rotation +// on April 10, 2024, the key will automatically rotate, as scheduled, on April 14, +// 2024 and every 730 days thereafter. +// +// You can perform on-demand key rotation a maximum of 10 times per KMS key. You +// can use the KMS console to view the number of remaining on-demand rotations +// available for a KMS key. +// +// You can use GetKeyRotationStatus to identify any in progress on-demand rotations. You can use ListKeyRotations to +// identify the date that completed on-demand rotations were performed. You can +// monitor rotation of the key material for your KMS keys in CloudTrail and Amazon +// CloudWatch. +// +// On-demand key rotation is supported only on [symmetric encryption KMS keys]. You cannot perform on-demand +// rotation of [asymmetric KMS keys], [HMAC KMS keys], KMS keys with [imported key material], or KMS keys in a [custom key store]. To perform on-demand +// rotation of a set of related [multi-Region keys], invoke the on-demand rotation on the primary key. +// +// You cannot initiate on-demand rotation of [Amazon Web Services managed KMS keys]. KMS always rotates the key material +// of Amazon Web Services managed keys every year. Rotation of [Amazon Web Services owned KMS keys]is managed by the +// Amazon Web Services service that owns the key. +// +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see [Key states of KMS keys]in the Key Management Service Developer Guide. +// +// Cross-account use: No. You cannot perform this operation on a KMS key in a +// different Amazon Web Services account. +// +// Required permissions: [kms:RotateKeyOnDemand] (key policy) +// +// Related operations: +// +// # EnableKeyRotation +// +// # DisableKeyRotation +// +// # GetKeyRotationStatus +// +// # ListKeyRotations +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [on-demand rotation]: https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotating-keys-on-demand +// [Amazon Web Services owned KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk +// [automatic key rotation]: https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotating-keys-enable-disable +// [kms:RotateKeyOnDemand]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [multi-Region keys]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +// [imported key material]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [HMAC KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html +// [Amazon Web Services managed KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk +// [asymmetric KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html +// [symmetric encryption KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#symmetric-cmks +// [custom key store]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html +func (c *Client) RotateKeyOnDemand(ctx context.Context, params *RotateKeyOnDemandInput, optFns ...func(*Options)) (*RotateKeyOnDemandOutput, error) { + if params == nil { + params = &RotateKeyOnDemandInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "RotateKeyOnDemand", params, optFns, c.addOperationRotateKeyOnDemandMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*RotateKeyOnDemandOutput) + out.ResultMetadata = metadata + return out, nil +} + +type RotateKeyOnDemandInput struct { + + // Identifies a symmetric encryption KMS key. You cannot perform on-demand + // rotation of [asymmetric KMS keys], [HMAC KMS keys], KMS keys with [imported key material], or KMS keys in a [custom key store]. To perform on-demand + // rotation of a set of related [multi-Region keys], invoke the on-demand rotation on the primary key. + // + // Specify the key ID or key ARN of the KMS key. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // [imported key material]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html + // [HMAC KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html + // [asymmetric KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html + // [multi-Region keys]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate + // [custom key store]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html + // + // This member is required. + KeyId *string + + noSmithyDocumentSerde +} + +type RotateKeyOnDemandOutput struct { + + // Identifies the symmetric encryption KMS key that you initiated on-demand + // rotation on. + KeyId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationRotateKeyOnDemandMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpRotateKeyOnDemand{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpRotateKeyOnDemand{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "RotateKeyOnDemand"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpRotateKeyOnDemandValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRotateKeyOnDemand(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opRotateKeyOnDemand(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "RotateKeyOnDemand", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ScheduleKeyDeletion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ScheduleKeyDeletion.go new file mode 100644 index 000000000..090468d5e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ScheduleKeyDeletion.go @@ -0,0 +1,268 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Schedules the deletion of a KMS key. By default, KMS applies a waiting period +// of 30 days, but you can specify a waiting period of 7-30 days. When this +// operation is successful, the key state of the KMS key changes to PendingDeletion +// and the key can't be used in any cryptographic operations. It remains in this +// state for the duration of the waiting period. Before the waiting period ends, +// you can use CancelKeyDeletionto cancel the deletion of the KMS key. After the waiting period +// ends, KMS deletes the KMS key, its key material, and all KMS data associated +// with it, including all aliases that refer to it. +// +// Deleting a KMS key is a destructive and potentially dangerous operation. When a +// KMS key is deleted, all data that was encrypted under the KMS key is +// unrecoverable. (The only exception is a [multi-Region replica key], or an asymmetric or HMAC KMS key with imported key material.) To prevent the use of a KMS +// key without deleting it, use DisableKey. +// +// You can schedule the deletion of a multi-Region primary key and its replica +// keys at any time. However, KMS will not delete a multi-Region primary key with +// existing replica keys. If you schedule the deletion of a primary key with +// replicas, its key state changes to PendingReplicaDeletion and it cannot be +// replicated or used in cryptographic operations. This status can continue +// indefinitely. When the last of its replicas keys is deleted (not just +// scheduled), the key state of the primary key changes to PendingDeletion and its +// waiting period ( PendingWindowInDays ) begins. For details, see [Deleting multi-Region keys] in the Key +// Management Service Developer Guide. +// +// When KMS [deletes a KMS key from an CloudHSM key store], it makes a best effort to delete the associated key material from +// the associated CloudHSM cluster. However, you might need to manually [delete the orphaned key material]from the +// cluster and its backups. [Deleting a KMS key from an external key store]has no effect on the associated external key. However, +// for both types of custom key stores, deleting a KMS key is destructive and +// irreversible. You cannot decrypt ciphertext encrypted under the KMS key by using +// only its associated external key or CloudHSM key. Also, you cannot recreate a +// KMS key in an external key store by creating a new KMS key with the same key +// material. +// +// For more information about scheduling a KMS key for deletion, see [Deleting KMS keys] in the Key +// Management Service Developer Guide. +// +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see [Key states of KMS keys]in the Key Management Service Developer Guide. +// +// Cross-account use: No. You cannot perform this operation on a KMS key in a +// different Amazon Web Services account. +// +// Required permissions: kms:ScheduleKeyDeletion (key policy) +// +// # Related operations +// +// # CancelKeyDeletion +// +// # DisableKey +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [delete the orphaned key material]: https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-orphaned-key +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [Deleting a KMS key from an external key store]: https://docs.aws.amazon.com/kms/latest/developerguide/delete-xks-key.html +// [Deleting multi-Region keys]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-delete.html +// [Deleting KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html +// [multi-Region replica key]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-delete.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +// [deletes a KMS key from an CloudHSM key store]: https://docs.aws.amazon.com/kms/latest/developerguide/delete-cmk-keystore.html +func (c *Client) ScheduleKeyDeletion(ctx context.Context, params *ScheduleKeyDeletionInput, optFns ...func(*Options)) (*ScheduleKeyDeletionOutput, error) { + if params == nil { + params = &ScheduleKeyDeletionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ScheduleKeyDeletion", params, optFns, c.addOperationScheduleKeyDeletionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ScheduleKeyDeletionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ScheduleKeyDeletionInput struct { + + // The unique identifier of the KMS key to delete. + // + // Specify the key ID or key ARN of the KMS key. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // The waiting period, specified in number of days. After the waiting period ends, + // KMS deletes the KMS key. + // + // If the KMS key is a multi-Region primary key with replica keys, the waiting + // period begins when the last of its replica keys is deleted. Otherwise, the + // waiting period begins immediately. + // + // This value is optional. If you include a value, it must be between 7 and 30, + // inclusive. If you do not include a value, it defaults to 30. You can use the [kms:ScheduleKeyDeletionPendingWindowInDays] + // kms:ScheduleKeyDeletionPendingWindowInDays condition key to further constrain + // the values that principals can specify in the PendingWindowInDays parameter. + // + // [kms:ScheduleKeyDeletionPendingWindowInDays]: https://docs.aws.amazon.com/kms/latest/developerguide/conditions-kms.html#conditions-kms-schedule-key-deletion-pending-window-in-days + PendingWindowInDays *int32 + + noSmithyDocumentSerde +} + +type ScheduleKeyDeletionOutput struct { + + // The date and time after which KMS deletes the KMS key. + // + // If the KMS key is a multi-Region primary key with replica keys, this field does + // not appear. The deletion date for the primary key isn't known until its last + // replica key is deleted. + DeletionDate *time.Time + + // The Amazon Resource Name ([key ARN] ) of the KMS key whose deletion is scheduled. + // + // [key ARN]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN + KeyId *string + + // The current status of the KMS key. + // + // For more information about how key state affects the use of a KMS key, see [Key states of KMS keys] in + // the Key Management Service Developer Guide. + // + // [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + KeyState types.KeyState + + // The waiting period before the KMS key is deleted. + // + // If the KMS key is a multi-Region primary key with replicas, the waiting period + // begins when the last of its replica keys is deleted. Otherwise, the waiting + // period begins immediately. + PendingWindowInDays *int32 + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationScheduleKeyDeletionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpScheduleKeyDeletion{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpScheduleKeyDeletion{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ScheduleKeyDeletion"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpScheduleKeyDeletionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opScheduleKeyDeletion(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opScheduleKeyDeletion(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ScheduleKeyDeletion", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Sign.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Sign.go new file mode 100644 index 000000000..553404e23 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Sign.go @@ -0,0 +1,325 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a [digital signature] for a message or message digest by using the private key in an +// asymmetric signing KMS key. To verify the signature, use the Verifyoperation, or use +// the public key in the same asymmetric KMS key outside of KMS. For information +// about asymmetric KMS keys, see [Asymmetric KMS keys]in the Key Management Service Developer Guide. +// +// Digital signatures are generated and verified by using asymmetric key pair, +// such as an RSA or ECC pair that is represented by an asymmetric KMS key. The key +// owner (or an authorized user) uses their private key to sign a message. Anyone +// with the public key can verify that the message was signed with that particular +// private key and that the message hasn't changed since it was signed. +// +// To use the Sign operation, provide the following information: +// +// - Use the KeyId parameter to identify an asymmetric KMS key with a KeyUsage +// value of SIGN_VERIFY . To get the KeyUsage value of a KMS key, use the DescribeKey +// operation. The caller must have kms:Sign permission on the KMS key. +// +// - Use the Message parameter to specify the message or message digest to sign. +// You can submit messages of up to 4096 bytes. To sign a larger message, generate +// a hash digest of the message, and then provide the hash digest in the Message +// parameter. To indicate whether the message is a full message or a digest, use +// the MessageType parameter. +// +// - Choose a signing algorithm that is compatible with the KMS key. +// +// When signing a message, be sure to record the KMS key and the signing +// algorithm. This information is required to verify the signature. +// +// Best practices recommend that you limit the time during which any signature is +// effective. This deters an attack where the actor uses a signed message to +// establish validity repeatedly or long after the message is superseded. +// Signatures do not include a timestamp, but you can include a timestamp in the +// signed message to help you detect when its time to refresh the signature. +// +// To verify the signature that this operation generates, use the Verify operation. Or +// use the GetPublicKeyoperation to download the public key and then use the public key to +// verify the signature outside of KMS. +// +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see [Key states of KMS keys]in the Key Management Service Developer Guide. +// +// Cross-account use: Yes. To perform this operation with a KMS key in a different +// Amazon Web Services account, specify the key ARN or alias ARN in the value of +// the KeyId parameter. +// +// Required permissions: [kms:Sign] (key policy) +// +// Related operations: Verify +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [digital signature]: https://en.wikipedia.org/wiki/Digital_signature +// [Asymmetric KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html +// [kms:Sign]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +func (c *Client) Sign(ctx context.Context, params *SignInput, optFns ...func(*Options)) (*SignOutput, error) { + if params == nil { + params = &SignInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "Sign", params, optFns, c.addOperationSignMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*SignOutput) + out.ResultMetadata = metadata + return out, nil +} + +type SignInput struct { + + // Identifies an asymmetric KMS key. KMS uses the private key in the asymmetric + // KMS key to sign the message. The KeyUsage type of the KMS key must be + // SIGN_VERIFY . To find the KeyUsage of a KMS key, use the DescribeKey operation. + // + // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. When + // using an alias name, prefix it with "alias/" . To specify a KMS key in a + // different Amazon Web Services account, you must use the key ARN or alias ARN. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Alias name: alias/ExampleAlias + // + // - Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name + // and alias ARN, use ListAliases. + // + // This member is required. + KeyId *string + + // Specifies the message or message digest to sign. Messages can be 0-4096 bytes. + // To sign a larger message, provide a message digest. + // + // If you provide a message digest, use the DIGEST value of MessageType to prevent + // the digest from being hashed again while signing. + // + // This member is required. + Message []byte + + // Specifies the signing algorithm to use when signing the message. + // + // Choose an algorithm that is compatible with the type and size of the specified + // asymmetric KMS key. When signing with RSA key pairs, RSASSA-PSS algorithms are + // preferred. We include RSASSA-PKCS1-v1_5 algorithms for compatibility with + // existing applications. + // + // This member is required. + SigningAlgorithm types.SigningAlgorithmSpec + + // Checks if your request will succeed. DryRun is an optional parameter. + // + // To learn more about how to use this parameter, see [Testing your KMS API calls] in the Key Management + // Service Developer Guide. + // + // [Testing your KMS API calls]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-dryrun.html + DryRun *bool + + // A list of grant tokens. + // + // Use a grant token when your permission to call this operation comes from a new + // grant that has not yet achieved eventual consistency. For more information, see [Grant token] + // and [Using a grant token]in the Key Management Service Developer Guide. + // + // [Grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + // [Using a grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + GrantTokens []string + + // Tells KMS whether the value of the Message parameter should be hashed as part + // of the signing algorithm. Use RAW for unhashed messages; use DIGEST for message + // digests, which are already hashed. + // + // When the value of MessageType is RAW , KMS uses the standard signing algorithm, + // which begins with a hash function. When the value is DIGEST , KMS skips the + // hashing step in the signing algorithm. + // + // Use the DIGEST value only when the value of the Message parameter is a message + // digest. If you use the DIGEST value with an unhashed message, the security of + // the signing operation can be compromised. + // + // When the value of MessageType is DIGEST , the length of the Message value must + // match the length of hashed messages for the specified signing algorithm. + // + // You can submit a message digest and omit the MessageType or specify RAW so the + // digest is hashed again while signing. However, this can cause verification + // failures when verifying with a system that assumes a single hash. + // + // The hashing algorithm in that Sign uses is based on the SigningAlgorithm value. + // + // - Signing algorithms that end in SHA_256 use the SHA_256 hashing algorithm. + // + // - Signing algorithms that end in SHA_384 use the SHA_384 hashing algorithm. + // + // - Signing algorithms that end in SHA_512 use the SHA_512 hashing algorithm. + // + // - SM2DSA uses the SM3 hashing algorithm. For details, see [Offline verification with SM2 key pairs]. + // + // [Offline verification with SM2 key pairs]: https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification + MessageType types.MessageType + + noSmithyDocumentSerde +} + +type SignOutput struct { + + // The Amazon Resource Name ([key ARN] ) of the asymmetric KMS key that was used to sign the + // message. + // + // [key ARN]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN + KeyId *string + + // The cryptographic signature that was generated for the message. + // + // - When used with the supported RSA signing algorithms, the encoding of this + // value is defined by [PKCS #1 in RFC 8017]. + // + // - When used with the ECDSA_SHA_256 , ECDSA_SHA_384 , or ECDSA_SHA_512 signing + // algorithms, this value is a DER-encoded object as defined by ANSI X9.62–2005 and + // [RFC 3279 Section 2.2.3]. This is the most commonly used signature format and is appropriate for most + // uses. + // + // When you use the HTTP API or the Amazon Web Services CLI, the value is + // Base64-encoded. Otherwise, it is not Base64-encoded. + // + // [RFC 3279 Section 2.2.3]: https://tools.ietf.org/html/rfc3279#section-2.2.3 + // [PKCS #1 in RFC 8017]: https://tools.ietf.org/html/rfc8017 + Signature []byte + + // The signing algorithm that was used to sign the message. + SigningAlgorithm types.SigningAlgorithmSpec + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationSignMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpSign{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpSign{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "Sign"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpSignValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opSign(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opSign(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "Sign", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_TagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_TagResource.go new file mode 100644 index 000000000..6ec3ee335 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_TagResource.go @@ -0,0 +1,226 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Adds or edits tags on a [customer managed key]. +// +// Tagging or untagging a KMS key can allow or deny permission to the KMS key. For +// details, see [ABAC for KMS]in the Key Management Service Developer Guide. +// +// Each tag consists of a tag key and a tag value, both of which are +// case-sensitive strings. The tag value can be an empty (null) string. To add a +// tag, specify a new tag key and a tag value. To edit a tag, specify an existing +// tag key and a new tag value. +// +// You can use this operation to tag a [customer managed key], but you cannot tag an [Amazon Web Services managed key], an [Amazon Web Services owned key], a [custom key store], or an [alias]. +// +// You can also add tags to a KMS key while creating it (CreateKey ) or replicating it (ReplicateKey ). +// +// For information about using tags in KMS, see [Tagging keys]. For general information about +// tags, including the format and syntax, see [Tagging Amazon Web Services resources]in the Amazon Web Services General +// Reference. +// +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see [Key states of KMS keys]in the Key Management Service Developer Guide. +// +// Cross-account use: No. You cannot perform this operation on a KMS key in a +// different Amazon Web Services account. +// +// Required permissions: [kms:TagResource] (key policy) +// +// # Related operations +// +// # CreateKey +// +// # ListResourceTags +// +// # ReplicateKey +// +// # UntagResource +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [Amazon Web Services owned key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [kms:TagResource]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk +// [Tagging keys]: https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html +// [alias]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#alias-concept +// [ABAC for KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/abac.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +// [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk +// [custom key store]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#keystore-concept +// [Tagging Amazon Web Services resources]: https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html +func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optFns ...func(*Options)) (*TagResourceOutput, error) { + if params == nil { + params = &TagResourceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "TagResource", params, optFns, c.addOperationTagResourceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*TagResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type TagResourceInput struct { + + // Identifies a customer managed key in the account and Region. + // + // Specify the key ID or key ARN of the KMS key. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // One or more tags. Each tag consists of a tag key and a tag value. The tag value + // can be an empty (null) string. + // + // Do not include confidential or sensitive information in this field. This field + // may be displayed in plaintext in CloudTrail logs and other output. + // + // You cannot have more than one tag on a KMS key with the same tag key. If you + // specify an existing tag key with a different tag value, KMS replaces the current + // tag value with the specified one. + // + // This member is required. + Tags []types.Tag + + noSmithyDocumentSerde +} + +type TagResourceOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpTagResource{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpTagResource{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "TagResource"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpTagResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opTagResource(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opTagResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "TagResource", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UntagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UntagResource.go new file mode 100644 index 000000000..ee01e6e7c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UntagResource.go @@ -0,0 +1,209 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes tags from a [customer managed key]. To delete a tag, specify the tag key and the KMS key. +// +// Tagging or untagging a KMS key can allow or deny permission to the KMS key. For +// details, see [ABAC for KMS]in the Key Management Service Developer Guide. +// +// When it succeeds, the UntagResource operation doesn't return any output. Also, +// if the specified tag key isn't found on the KMS key, it doesn't throw an +// exception or return a response. To confirm that the operation worked, use the ListResourceTags +// operation. +// +// For information about using tags in KMS, see [Tagging keys]. For general information about +// tags, including the format and syntax, see [Tagging Amazon Web Services resources]in the Amazon Web Services General +// Reference. +// +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see [Key states of KMS keys]in the Key Management Service Developer Guide. +// +// Cross-account use: No. You cannot perform this operation on a KMS key in a +// different Amazon Web Services account. +// +// Required permissions: [kms:UntagResource] (key policy) +// +// # Related operations +// +// # CreateKey +// +// # ListResourceTags +// +// # ReplicateKey +// +// # TagResource +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [kms:UntagResource]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk +// [Tagging keys]: https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html +// [ABAC for KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/abac.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +// [Tagging Amazon Web Services resources]: https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html +func (c *Client) UntagResource(ctx context.Context, params *UntagResourceInput, optFns ...func(*Options)) (*UntagResourceOutput, error) { + if params == nil { + params = &UntagResourceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UntagResource", params, optFns, c.addOperationUntagResourceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UntagResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UntagResourceInput struct { + + // Identifies the KMS key from which you are removing tags. + // + // Specify the key ID or key ARN of the KMS key. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // One or more tag keys. Specify only the tag keys, not the tag values. + // + // This member is required. + TagKeys []string + + noSmithyDocumentSerde +} + +type UntagResourceOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUntagResource{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUntagResource{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "UntagResource"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpUntagResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUntagResource(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUntagResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UntagResource", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateAlias.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateAlias.go new file mode 100644 index 000000000..2238087b2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateAlias.go @@ -0,0 +1,240 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Associates an existing KMS alias with a different KMS key. Each alias is +// associated with only one KMS key at a time, although a KMS key can have multiple +// aliases. The alias and the KMS key must be in the same Amazon Web Services +// account and Region. +// +// Adding, deleting, or updating an alias can allow or deny permission to the KMS +// key. For details, see [ABAC for KMS]in the Key Management Service Developer Guide. +// +// The current and new KMS key must be the same type (both symmetric or both +// asymmetric or both HMAC), and they must have the same key usage. This +// restriction prevents errors in code that uses aliases. If you must assign an +// alias to a different type of KMS key, use DeleteAliasto delete the old alias and CreateAlias to +// create a new alias. +// +// You cannot use UpdateAlias to change an alias name. To change an alias name, +// use DeleteAliasto delete the old alias and CreateAlias to create a new alias. +// +// Because an alias is not a property of a KMS key, you can create, update, and +// delete the aliases of a KMS key without affecting the KMS key. Also, aliases do +// not appear in the response from the DescribeKeyoperation. To get the aliases of all KMS +// keys in the account, use the ListAliasesoperation. +// +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see [Key states of KMS keys]in the Key Management Service Developer Guide. +// +// Cross-account use: No. You cannot perform this operation on a KMS key in a +// different Amazon Web Services account. +// +// # Required permissions +// +// [kms:UpdateAlias] +// - on the alias (IAM policy). +// +// [kms:UpdateAlias] +// - on the current KMS key (key policy). +// +// [kms:UpdateAlias] +// - on the new KMS key (key policy). +// +// For details, see [Controlling access to aliases] in the Key Management Service Developer Guide. +// +// Related operations: +// +// # CreateAlias +// +// # DeleteAlias +// +// # ListAliases +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [ABAC for KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/abac.html +// [kms:UpdateAlias]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +// [Controlling access to aliases]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access +func (c *Client) UpdateAlias(ctx context.Context, params *UpdateAliasInput, optFns ...func(*Options)) (*UpdateAliasOutput, error) { + if params == nil { + params = &UpdateAliasInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateAlias", params, optFns, c.addOperationUpdateAliasMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateAliasOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateAliasInput struct { + + // Identifies the alias that is changing its KMS key. This value must begin with + // alias/ followed by the alias name, such as alias/ExampleAlias . You cannot use + // UpdateAlias to change the alias name. + // + // Do not include confidential or sensitive information in this field. This field + // may be displayed in plaintext in CloudTrail logs and other output. + // + // This member is required. + AliasName *string + + // Identifies the [customer managed key] to associate with the alias. You don't have permission to + // associate an alias with an [Amazon Web Services managed key]. + // + // The KMS key must be in the same Amazon Web Services account and Region as the + // alias. Also, the new target KMS key must be the same type as the current target + // KMS key (both symmetric or both asymmetric or both HMAC) and they must have the + // same key usage. + // + // Specify the key ID or key ARN of the KMS key. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // To verify that the alias is mapped to the correct KMS key, use ListAliases. + // + // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk + // + // This member is required. + TargetKeyId *string + + noSmithyDocumentSerde +} + +type UpdateAliasOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateAliasMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateAlias{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateAlias{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateAlias"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpUpdateAliasValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateAlias(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateAlias(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateAlias", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateCustomKeyStore.go new file mode 100644 index 000000000..fa7c84dfd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateCustomKeyStore.go @@ -0,0 +1,350 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Changes the properties of a custom key store. You can use this operation to +// change the properties of an CloudHSM key store or an external key store. +// +// Use the required CustomKeyStoreId parameter to identify the custom key store. +// Use the remaining optional parameters to change its properties. This operation +// does not return any property values. To verify the updated property values, use +// the DescribeCustomKeyStoresoperation. +// +// This operation is part of the [custom key stores] feature in KMS, which combines the convenience +// and extensive integration of KMS with the isolation and control of a key store +// that you own and manage. +// +// When updating the properties of an external key store, verify that the updated +// settings connect your key store, via the external key store proxy, to the same +// external key manager as the previous settings, or to a backup or snapshot of the +// external key manager with the same cryptographic keys. If the updated connection +// settings fail, you can fix them and retry, although an extended delay might +// disrupt Amazon Web Services services. However, if KMS permanently loses its +// access to cryptographic keys, ciphertext encrypted under those keys is +// unrecoverable. +// +// For external key stores: +// +// Some external key managers provide a simpler method for updating an external +// key store. For details, see your external key manager documentation. +// +// When updating an external key store in the KMS console, you can upload a +// JSON-based proxy configuration file with the desired values. You cannot upload +// the proxy configuration file to the UpdateCustomKeyStore operation. However, +// you can use the file to help you determine the correct values for the +// UpdateCustomKeyStore parameters. +// +// For an CloudHSM key store, you can use this operation to change the custom key +// store friendly name ( NewCustomKeyStoreName ), to tell KMS about a change to the +// kmsuser crypto user password ( KeyStorePassword ), or to associate the custom +// key store with a different, but related, CloudHSM cluster ( CloudHsmClusterId ). +// To update any property of an CloudHSM key store, the ConnectionState of the +// CloudHSM key store must be DISCONNECTED . +// +// For an external key store, you can use this operation to change the custom key +// store friendly name ( NewCustomKeyStoreName ), or to tell KMS about a change to +// the external key store proxy authentication credentials ( +// XksProxyAuthenticationCredential ), connection method ( XksProxyConnectivity ), +// external proxy endpoint ( XksProxyUriEndpoint ) and path ( XksProxyUriPath ). +// For external key stores with an XksProxyConnectivity of VPC_ENDPOINT_SERVICE , +// you can also update the Amazon VPC endpoint service name ( +// XksProxyVpcEndpointServiceName ). To update most properties of an external key +// store, the ConnectionState of the external key store must be DISCONNECTED . +// However, you can update the CustomKeyStoreName , +// XksProxyAuthenticationCredential , and XksProxyUriPath of an external key store +// when it is in the CONNECTED or DISCONNECTED state. +// +// If your update requires a DISCONNECTED state, before using UpdateCustomKeyStore +// , use the DisconnectCustomKeyStoreoperation to disconnect the custom key store. After the +// UpdateCustomKeyStore operation completes, use the ConnectCustomKeyStore to reconnect the custom key +// store. To find the ConnectionState of the custom key store, use the DescribeCustomKeyStores operation. +// +// Before updating the custom key store, verify that the new values allow KMS to +// connect the custom key store to its backing key store. For example, before you +// change the XksProxyUriPath value, verify that the external key store proxy is +// reachable at the new path. +// +// If the operation succeeds, it returns a JSON object with no properties. +// +// Cross-account use: No. You cannot perform this operation on a custom key store +// in a different Amazon Web Services account. +// +// Required permissions: [kms:UpdateCustomKeyStore] (IAM policy) +// +// Related operations: +// +// # ConnectCustomKeyStore +// +// # CreateCustomKeyStore +// +// # DeleteCustomKeyStore +// +// # DescribeCustomKeyStores +// +// # DisconnectCustomKeyStore +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [custom key stores]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html +// [kms:UpdateCustomKeyStore]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +func (c *Client) UpdateCustomKeyStore(ctx context.Context, params *UpdateCustomKeyStoreInput, optFns ...func(*Options)) (*UpdateCustomKeyStoreOutput, error) { + if params == nil { + params = &UpdateCustomKeyStoreInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateCustomKeyStore", params, optFns, c.addOperationUpdateCustomKeyStoreMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateCustomKeyStoreOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateCustomKeyStoreInput struct { + + // Identifies the custom key store that you want to update. Enter the ID of the + // custom key store. To find the ID of a custom key store, use the DescribeCustomKeyStoresoperation. + // + // This member is required. + CustomKeyStoreId *string + + // Associates the custom key store with a related CloudHSM cluster. This parameter + // is valid only for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM . + // + // Enter the cluster ID of the cluster that you used to create the custom key + // store or a cluster that shares a backup history and has the same cluster + // certificate as the original cluster. You cannot use this parameter to associate + // a custom key store with an unrelated cluster. In addition, the replacement + // cluster must [fulfill the requirements]for a cluster associated with a custom key store. To view the + // cluster certificate of a cluster, use the [DescribeClusters]operation. + // + // To change this value, the CloudHSM key store must be disconnected. + // + // [fulfill the requirements]: https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore + // [DescribeClusters]: https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html + CloudHsmClusterId *string + + // Enter the current password of the kmsuser crypto user (CU) in the CloudHSM + // cluster that is associated with the custom key store. This parameter is valid + // only for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM . + // + // This parameter tells KMS the current password of the kmsuser crypto user (CU). + // It does not set or change the password of any users in the CloudHSM cluster. + // + // To change this value, the CloudHSM key store must be disconnected. + KeyStorePassword *string + + // Changes the friendly name of the custom key store to the value that you + // specify. The custom key store name must be unique in the Amazon Web Services + // account. + // + // Do not include confidential or sensitive information in this field. This field + // may be displayed in plaintext in CloudTrail logs and other output. + // + // To change this value, an CloudHSM key store must be disconnected. An external + // key store can be connected or disconnected. + NewCustomKeyStoreName *string + + // Changes the credentials that KMS uses to sign requests to the external key + // store proxy (XKS proxy). This parameter is valid only for custom key stores with + // a CustomKeyStoreType of EXTERNAL_KEY_STORE . + // + // You must specify both the AccessKeyId and SecretAccessKey value in the + // authentication credential, even if you are only updating one value. + // + // This parameter doesn't establish or change your authentication credentials on + // the proxy. It just tells KMS the credential that you established with your + // external key store proxy. For example, if you rotate the credential on your + // external key store proxy, you can use this parameter to update the credential in + // KMS. + // + // You can change this value when the external key store is connected or + // disconnected. + XksProxyAuthenticationCredential *types.XksProxyAuthenticationCredentialType + + // Changes the connectivity setting for the external key store. To indicate that + // the external key store proxy uses a Amazon VPC endpoint service to communicate + // with KMS, specify VPC_ENDPOINT_SERVICE . Otherwise, specify PUBLIC_ENDPOINT . + // + // If you change the XksProxyConnectivity to VPC_ENDPOINT_SERVICE , you must also + // change the XksProxyUriEndpoint and add an XksProxyVpcEndpointServiceName value. + // + // If you change the XksProxyConnectivity to PUBLIC_ENDPOINT , you must also change + // the XksProxyUriEndpoint and specify a null or empty string for the + // XksProxyVpcEndpointServiceName value. + // + // To change this value, the external key store must be disconnected. + XksProxyConnectivity types.XksProxyConnectivityType + + // Changes the URI endpoint that KMS uses to connect to your external key store + // proxy (XKS proxy). This parameter is valid only for custom key stores with a + // CustomKeyStoreType of EXTERNAL_KEY_STORE . + // + // For external key stores with an XksProxyConnectivity value of PUBLIC_ENDPOINT , + // the protocol must be HTTPS. + // + // For external key stores with an XksProxyConnectivity value of + // VPC_ENDPOINT_SERVICE , specify https:// followed by the private DNS name + // associated with the VPC endpoint service. Each external key store must use a + // different private DNS name. + // + // The combined XksProxyUriEndpoint and XksProxyUriPath values must be unique in + // the Amazon Web Services account and Region. + // + // To change this value, the external key store must be disconnected. + XksProxyUriEndpoint *string + + // Changes the base path to the proxy APIs for this external key store. To find + // this value, see the documentation for your external key manager and external key + // store proxy (XKS proxy). This parameter is valid only for custom key stores with + // a CustomKeyStoreType of EXTERNAL_KEY_STORE . + // + // The value must start with / and must end with /kms/xks/v1 , where v1 represents + // the version of the KMS external key store proxy API. You can include an optional + // prefix between the required elements such as /example/kms/xks/v1 . + // + // The combined XksProxyUriEndpoint and XksProxyUriPath values must be unique in + // the Amazon Web Services account and Region. + // + // You can change this value when the external key store is connected or + // disconnected. + XksProxyUriPath *string + + // Changes the name that KMS uses to identify the Amazon VPC endpoint service for + // your external key store proxy (XKS proxy). This parameter is valid when the + // CustomKeyStoreType is EXTERNAL_KEY_STORE and the XksProxyConnectivity is + // VPC_ENDPOINT_SERVICE . + // + // To change this value, the external key store must be disconnected. + XksProxyVpcEndpointServiceName *string + + noSmithyDocumentSerde +} + +type UpdateCustomKeyStoreOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateCustomKeyStoreMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateCustomKeyStore{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateCustomKeyStore{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateCustomKeyStore"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpUpdateCustomKeyStoreValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateCustomKeyStore(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateCustomKeyStore(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateCustomKeyStore", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateKeyDescription.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateKeyDescription.go new file mode 100644 index 000000000..83ed78459 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateKeyDescription.go @@ -0,0 +1,193 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Updates the description of a KMS key. To see the description of a KMS key, use DescribeKey +// . +// +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see [Key states of KMS keys]in the Key Management Service Developer Guide. +// +// Cross-account use: No. You cannot perform this operation on a KMS key in a +// different Amazon Web Services account. +// +// Required permissions: [kms:UpdateKeyDescription] (key policy) +// +// # Related operations +// +// # CreateKey +// +// # DescribeKey +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [kms:UpdateKeyDescription]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +func (c *Client) UpdateKeyDescription(ctx context.Context, params *UpdateKeyDescriptionInput, optFns ...func(*Options)) (*UpdateKeyDescriptionOutput, error) { + if params == nil { + params = &UpdateKeyDescriptionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateKeyDescription", params, optFns, c.addOperationUpdateKeyDescriptionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateKeyDescriptionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateKeyDescriptionInput struct { + + // New description for the KMS key. + // + // Do not include confidential or sensitive information in this field. This field + // may be displayed in plaintext in CloudTrail logs and other output. + // + // This member is required. + Description *string + + // Updates the description of the specified KMS key. + // + // Specify the key ID or key ARN of the KMS key. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + noSmithyDocumentSerde +} + +type UpdateKeyDescriptionOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateKeyDescriptionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateKeyDescription{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateKeyDescription{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateKeyDescription"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpUpdateKeyDescriptionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateKeyDescription(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateKeyDescription(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateKeyDescription", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdatePrimaryRegion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdatePrimaryRegion.go new file mode 100644 index 000000000..e79f816ab --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdatePrimaryRegion.go @@ -0,0 +1,248 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Changes the primary key of a multi-Region key. +// +// This operation changes the replica key in the specified Region to a primary key +// and changes the former primary key to a replica key. For example, suppose you +// have a primary key in us-east-1 and a replica key in eu-west-2 . If you run +// UpdatePrimaryRegion with a PrimaryRegion value of eu-west-2 , the primary key is +// now the key in eu-west-2 , and the key in us-east-1 becomes a replica key. For +// details, see [Updating the primary Region]in the Key Management Service Developer Guide. +// +// This operation supports multi-Region keys, an KMS feature that lets you create +// multiple interoperable KMS keys in different Amazon Web Services Regions. +// Because these KMS keys have the same key ID, key material, and other metadata, +// you can use them interchangeably to encrypt data in one Amazon Web Services +// Region and decrypt it in a different Amazon Web Services Region without +// re-encrypting the data or making a cross-Region call. For more information about +// multi-Region keys, see [Multi-Region keys in KMS]in the Key Management Service Developer Guide. +// +// The primary key of a multi-Region key is the source for properties that are +// always shared by primary and replica keys, including the key material, [key ID], [key spec], [key usage], [key material origin], +// and [automatic key rotation]. It's the only key that can be replicated. You cannot [delete the primary key] until all replica +// keys are deleted. +// +// The key ID and primary Region that you specify uniquely identify the replica +// key that will become the primary key. The primary Region must already have a +// replica key. This operation does not create a KMS key in the specified Region. +// To find the replica keys, use the DescribeKeyoperation on the primary key or any replica +// key. To create a replica key, use the ReplicateKeyoperation. +// +// You can run this operation while using the affected multi-Region keys in +// cryptographic operations. This operation should not delay, interrupt, or cause +// failures in cryptographic operations. +// +// Even after this operation completes, the process of updating the primary Region +// might still be in progress for a few more seconds. Operations such as +// DescribeKey might display both the old and new primary keys as replicas. The old +// and new primary keys have a transient key state of Updating . The original key +// state is restored when the update is complete. While the key state is Updating , +// you can use the keys in cryptographic operations, but you cannot replicate the +// new primary key or perform certain management operations, such as enabling or +// disabling these keys. For details about the Updating key state, see [Key states of KMS keys] in the Key +// Management Service Developer Guide. +// +// This operation does not return any output. To verify that primary key is +// changed, use the DescribeKeyoperation. +// +// Cross-account use: No. You cannot use this operation in a different Amazon Web +// Services account. +// +// Required permissions: +// +// - kms:UpdatePrimaryRegion on the current primary key (in the primary key's +// Region). Include this permission primary key's key policy. +// +// - kms:UpdatePrimaryRegion on the current replica key (in the replica key's +// Region). Include this permission in the replica key's key policy. +// +// # Related operations +// +// # CreateKey +// +// # ReplicateKey +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [key ID]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-id +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [delete the primary key]: https://docs.aws.amazon.com/kms/latest/APIReference/API_ScheduleKeyDeletion.html +// [key usage]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-usage +// [Updating the primary Region]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-update +// [Multi-Region keys in KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html +// [key spec]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-spec +// [key material origin]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-origin +// [automatic key rotation]: https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +func (c *Client) UpdatePrimaryRegion(ctx context.Context, params *UpdatePrimaryRegionInput, optFns ...func(*Options)) (*UpdatePrimaryRegionOutput, error) { + if params == nil { + params = &UpdatePrimaryRegionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdatePrimaryRegion", params, optFns, c.addOperationUpdatePrimaryRegionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdatePrimaryRegionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdatePrimaryRegionInput struct { + + // Identifies the current primary key. When the operation completes, this KMS key + // will be a replica key. + // + // Specify the key ID or key ARN of a multi-Region primary key. + // + // For example: + // + // - Key ID: mrk-1234abcd12ab34cd56ef1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/mrk-1234abcd12ab34cd56ef1234567890ab + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // The Amazon Web Services Region of the new primary key. Enter the Region ID, + // such as us-east-1 or ap-southeast-2 . There must be an existing replica key in + // this Region. + // + // When the operation completes, the multi-Region key in this Region will be the + // primary key. + // + // This member is required. + PrimaryRegion *string + + noSmithyDocumentSerde +} + +type UpdatePrimaryRegionOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdatePrimaryRegionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdatePrimaryRegion{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdatePrimaryRegion{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdatePrimaryRegion"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpUpdatePrimaryRegionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdatePrimaryRegion(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdatePrimaryRegion(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdatePrimaryRegion", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Verify.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Verify.go new file mode 100644 index 000000000..4a25ff29f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Verify.go @@ -0,0 +1,313 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Verifies a digital signature that was generated by the Sign operation. +// +// Verification confirms that an authorized user signed the message with the +// specified KMS key and signing algorithm, and the message hasn't changed since it +// was signed. If the signature is verified, the value of the SignatureValid field +// in the response is True . If the signature verification fails, the Verify +// operation fails with an KMSInvalidSignatureException exception. +// +// A digital signature is generated by using the private key in an asymmetric KMS +// key. The signature is verified by using the public key in the same asymmetric +// KMS key. For information about asymmetric KMS keys, see [Asymmetric KMS keys]in the Key Management +// Service Developer Guide. +// +// To use the Verify operation, specify the same asymmetric KMS key, message, and +// signing algorithm that were used to produce the signature. The message type does +// not need to be the same as the one used for signing, but it must indicate +// whether the value of the Message parameter should be hashed as part of the +// verification process. +// +// You can also verify the digital signature by using the public key of the KMS +// key outside of KMS. Use the GetPublicKeyoperation to download the public key in the +// asymmetric KMS key and then use the public key to verify the signature outside +// of KMS. The advantage of using the Verify operation is that it is performed +// within KMS. As a result, it's easy to call, the operation is performed within +// the FIPS boundary, it is logged in CloudTrail, and you can use key policy and +// IAM policy to determine who is authorized to use the KMS key to verify +// signatures. +// +// To verify a signature outside of KMS with an SM2 public key (China Regions +// only), you must specify the distinguishing ID. By default, KMS uses +// 1234567812345678 as the distinguishing ID. For more information, see [Offline verification with SM2 key pairs]. +// +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see [Key states of KMS keys]in the Key Management Service Developer Guide. +// +// Cross-account use: Yes. To perform this operation with a KMS key in a different +// Amazon Web Services account, specify the key ARN or alias ARN in the value of +// the KeyId parameter. +// +// Required permissions: [kms:Verify] (key policy) +// +// Related operations: Sign +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [Asymmetric KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html +// [Offline verification with SM2 key pairs]: https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification +// [kms:Verify]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +func (c *Client) Verify(ctx context.Context, params *VerifyInput, optFns ...func(*Options)) (*VerifyOutput, error) { + if params == nil { + params = &VerifyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "Verify", params, optFns, c.addOperationVerifyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*VerifyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type VerifyInput struct { + + // Identifies the asymmetric KMS key that will be used to verify the signature. + // This must be the same KMS key that was used to generate the signature. If you + // specify a different KMS key, the signature verification fails. + // + // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. When + // using an alias name, prefix it with "alias/" . To specify a KMS key in a + // different Amazon Web Services account, you must use the key ARN or alias ARN. + // + // For example: + // + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // - Alias name: alias/ExampleAlias + // + // - Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name + // and alias ARN, use ListAliases. + // + // This member is required. + KeyId *string + + // Specifies the message that was signed. You can submit a raw message of up to + // 4096 bytes, or a hash digest of the message. If you submit a digest, use the + // MessageType parameter with a value of DIGEST . + // + // If the message specified here is different from the message that was signed, + // the signature verification fails. A message and its hash digest are considered + // to be the same message. + // + // This member is required. + Message []byte + + // The signature that the Sign operation generated. + // + // This member is required. + Signature []byte + + // The signing algorithm that was used to sign the message. If you submit a + // different algorithm, the signature verification fails. + // + // This member is required. + SigningAlgorithm types.SigningAlgorithmSpec + + // Checks if your request will succeed. DryRun is an optional parameter. + // + // To learn more about how to use this parameter, see [Testing your KMS API calls] in the Key Management + // Service Developer Guide. + // + // [Testing your KMS API calls]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-dryrun.html + DryRun *bool + + // A list of grant tokens. + // + // Use a grant token when your permission to call this operation comes from a new + // grant that has not yet achieved eventual consistency. For more information, see [Grant token] + // and [Using a grant token]in the Key Management Service Developer Guide. + // + // [Grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + // [Using a grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + GrantTokens []string + + // Tells KMS whether the value of the Message parameter should be hashed as part + // of the signing algorithm. Use RAW for unhashed messages; use DIGEST for message + // digests, which are already hashed. + // + // When the value of MessageType is RAW , KMS uses the standard signing algorithm, + // which begins with a hash function. When the value is DIGEST , KMS skips the + // hashing step in the signing algorithm. + // + // Use the DIGEST value only when the value of the Message parameter is a message + // digest. If you use the DIGEST value with an unhashed message, the security of + // the verification operation can be compromised. + // + // When the value of MessageType is DIGEST , the length of the Message value must + // match the length of hashed messages for the specified signing algorithm. + // + // You can submit a message digest and omit the MessageType or specify RAW so the + // digest is hashed again while signing. However, if the signed message is hashed + // once while signing, but twice while verifying, verification fails, even when the + // message hasn't changed. + // + // The hashing algorithm in that Verify uses is based on the SigningAlgorithm + // value. + // + // - Signing algorithms that end in SHA_256 use the SHA_256 hashing algorithm. + // + // - Signing algorithms that end in SHA_384 use the SHA_384 hashing algorithm. + // + // - Signing algorithms that end in SHA_512 use the SHA_512 hashing algorithm. + // + // - SM2DSA uses the SM3 hashing algorithm. For details, see [Offline verification with SM2 key pairs]. + // + // [Offline verification with SM2 key pairs]: https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification + MessageType types.MessageType + + noSmithyDocumentSerde +} + +type VerifyOutput struct { + + // The Amazon Resource Name ([key ARN] ) of the asymmetric KMS key that was used to verify + // the signature. + // + // [key ARN]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN + KeyId *string + + // A Boolean value that indicates whether the signature was verified. A value of + // True indicates that the Signature was produced by signing the Message with the + // specified KeyID and SigningAlgorithm. If the signature is not verified, the + // Verify operation fails with a KMSInvalidSignatureException exception. + SignatureValid bool + + // The signing algorithm that was used to verify the signature. + SigningAlgorithm types.SigningAlgorithmSpec + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationVerifyMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpVerify{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpVerify{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "Verify"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpVerifyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opVerify(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opVerify(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "Verify", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_VerifyMac.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_VerifyMac.go new file mode 100644 index 000000000..484906a5b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_VerifyMac.go @@ -0,0 +1,246 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Verifies the hash-based message authentication code (HMAC) for a specified +// message, HMAC KMS key, and MAC algorithm. To verify the HMAC, VerifyMac +// computes an HMAC using the message, HMAC KMS key, and MAC algorithm that you +// specify, and compares the computed HMAC to the HMAC that you specify. If the +// HMACs are identical, the verification succeeds; otherwise, it fails. +// Verification indicates that the message hasn't changed since the HMAC was +// calculated, and the specified key was used to generate and verify the HMAC. +// +// HMAC KMS keys and the HMAC algorithms that KMS uses conform to industry +// standards defined in [RFC 2104]. +// +// This operation is part of KMS support for HMAC KMS keys. For details, see [HMAC keys in KMS] in +// the Key Management Service Developer Guide. +// +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see [Key states of KMS keys]in the Key Management Service Developer Guide. +// +// Cross-account use: Yes. To perform this operation with a KMS key in a different +// Amazon Web Services account, specify the key ARN or alias ARN in the value of +// the KeyId parameter. +// +// Required permissions: [kms:VerifyMac] (key policy) +// +// Related operations: GenerateMac +// +// Eventual consistency: The KMS API follows an eventual consistency model. For +// more information, see [KMS eventual consistency]. +// +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +// [RFC 2104]: https://datatracker.ietf.org/doc/html/rfc2104 +// [kms:VerifyMac]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html +// [KMS eventual consistency]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html +// [HMAC keys in KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html +func (c *Client) VerifyMac(ctx context.Context, params *VerifyMacInput, optFns ...func(*Options)) (*VerifyMacOutput, error) { + if params == nil { + params = &VerifyMacInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "VerifyMac", params, optFns, c.addOperationVerifyMacMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*VerifyMacOutput) + out.ResultMetadata = metadata + return out, nil +} + +type VerifyMacInput struct { + + // The KMS key that will be used in the verification. + // + // Enter a key ID of the KMS key that was used to generate the HMAC. If you + // identify a different KMS key, the VerifyMac operation fails. + // + // This member is required. + KeyId *string + + // The HMAC to verify. Enter the HMAC that was generated by the GenerateMac operation when + // you specified the same message, HMAC KMS key, and MAC algorithm as the values + // specified in this request. + // + // This member is required. + Mac []byte + + // The MAC algorithm that will be used in the verification. Enter the same MAC + // algorithm that was used to compute the HMAC. This algorithm must be supported by + // the HMAC KMS key identified by the KeyId parameter. + // + // This member is required. + MacAlgorithm types.MacAlgorithmSpec + + // The message that will be used in the verification. Enter the same message that + // was used to generate the HMAC. + // + // GenerateMacand VerifyMac do not provide special handling for message digests. If you + // generated an HMAC for a hash digest of a message, you must verify the HMAC for + // the same hash digest. + // + // This member is required. + Message []byte + + // Checks if your request will succeed. DryRun is an optional parameter. + // + // To learn more about how to use this parameter, see [Testing your KMS API calls] in the Key Management + // Service Developer Guide. + // + // [Testing your KMS API calls]: https://docs.aws.amazon.com/kms/latest/developerguide/programming-dryrun.html + DryRun *bool + + // A list of grant tokens. + // + // Use a grant token when your permission to call this operation comes from a new + // grant that has not yet achieved eventual consistency. For more information, see [Grant token] + // and [Using a grant token]in the Key Management Service Developer Guide. + // + // [Grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token + // [Using a grant token]: https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token + GrantTokens []string + + noSmithyDocumentSerde +} + +type VerifyMacOutput struct { + + // The HMAC KMS key used in the verification. + KeyId *string + + // The MAC algorithm used in the verification. + MacAlgorithm types.MacAlgorithmSpec + + // A Boolean value that indicates whether the HMAC was verified. A value of True + // indicates that the HMAC ( Mac ) was generated with the specified Message , HMAC + // KMS key ( KeyID ) and MacAlgorithm. . + // + // If the HMAC is not verified, the VerifyMac operation fails with a + // KMSInvalidMacException exception. This exception indicates that one or more of + // the inputs changed since the HMAC was computed. + MacValid bool + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationVerifyMacMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpVerifyMac{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpVerifyMac{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "VerifyMac"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpVerifyMacValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opVerifyMac(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opVerifyMac(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "VerifyMac", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/auth.go new file mode 100644 index 000000000..f9d4ef035 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/auth.go @@ -0,0 +1,313 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/metrics" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { + params.Region = options.Region +} + +type setLegacyContextSigningOptionsMiddleware struct { +} + +func (*setLegacyContextSigningOptionsMiddleware) ID() string { + return "setLegacyContextSigningOptions" +} + +func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + rscheme := getResolvedAuthScheme(ctx) + schemeID := rscheme.Scheme.SchemeID() + + if sn := awsmiddleware.GetSigningName(ctx); sn != "" { + if schemeID == "aws.auth#sigv4" { + smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn) + } else if schemeID == "aws.auth#sigv4a" { + smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn) + } + } + + if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" { + if schemeID == "aws.auth#sigv4" { + smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr) + } else if schemeID == "aws.auth#sigv4a" { + smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr}) + } + } + + return next.HandleFinalize(ctx, in) +} + +func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error { + return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before) +} + +type withAnonymous struct { + resolver AuthSchemeResolver +} + +var _ AuthSchemeResolver = (*withAnonymous)(nil) + +func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { + opts, err := v.resolver.ResolveAuthSchemes(ctx, params) + if err != nil { + return nil, err + } + + opts = append(opts, &smithyauth.Option{ + SchemeID: smithyauth.SchemeIDAnonymous, + }) + return opts, nil +} + +func wrapWithAnonymousAuth(options *Options) { + if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok { + return + } + + options.AuthSchemeResolver = &withAnonymous{ + resolver: options.AuthSchemeResolver, + } +} + +// AuthResolverParameters contains the set of inputs necessary for auth scheme +// resolution. +type AuthResolverParameters struct { + // The name of the operation being invoked. + Operation string + + // The region in which the operation is being invoked. + Region string +} + +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { + params := &AuthResolverParameters{ + Operation: operation, + } + + bindAuthParamsRegion(ctx, params, input, options) + + return params +} + +// AuthSchemeResolver returns a set of possible authentication options for an +// operation. +type AuthSchemeResolver interface { + ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error) +} + +type defaultAuthSchemeResolver struct{} + +var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil) + +func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { + if overrides, ok := operationAuthOptions[params.Operation]; ok { + return overrides(params), nil + } + return serviceAuthOptions(params), nil +} + +var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{} + +func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + { + SchemeID: smithyauth.SchemeIDSigV4, + SignerProperties: func() smithy.Properties { + var props smithy.Properties + smithyhttp.SetSigV4SigningName(&props, "kms") + smithyhttp.SetSigV4SigningRegion(&props, params.Region) + return props + }(), + }, + } +} + +type resolveAuthSchemeMiddleware struct { + operation string + options Options +} + +func (*resolveAuthSchemeMiddleware) ID() string { + return "ResolveAuthScheme" +} + +func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "ResolveAuthScheme") + defer span.End() + + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) + options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) + } + + scheme, ok := m.selectScheme(options) + if !ok { + return out, metadata, fmt.Errorf("could not select an auth scheme") + } + + ctx = setResolvedAuthScheme(ctx, scheme) + + span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID()) + span.End() + return next.HandleFinalize(ctx, in) +} + +func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) { + for _, option := range options { + if option.SchemeID == smithyauth.SchemeIDAnonymous { + return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true + } + + for _, scheme := range m.options.AuthSchemes { + if scheme.SchemeID() != option.SchemeID { + continue + } + + if scheme.IdentityResolver(m.options) != nil { + return newResolvedAuthScheme(scheme, option), true + } + } + } + + return nil, false +} + +type resolvedAuthSchemeKey struct{} + +type resolvedAuthScheme struct { + Scheme smithyhttp.AuthScheme + IdentityProperties smithy.Properties + SignerProperties smithy.Properties +} + +func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme { + return &resolvedAuthScheme{ + Scheme: scheme, + IdentityProperties: option.IdentityProperties, + SignerProperties: option.SignerProperties, + } +} + +func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context { + return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme) +} + +func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme { + v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme) + return v +} + +type getIdentityMiddleware struct { + options Options +} + +func (*getIdentityMiddleware) ID() string { + return "GetIdentity" +} + +func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + innerCtx, span := tracing.StartSpan(ctx, "GetIdentity") + defer span.End() + + rscheme := getResolvedAuthScheme(innerCtx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + resolver := rscheme.Scheme.IdentityResolver(m.options) + if resolver == nil { + return out, metadata, fmt.Errorf("no identity resolver") + } + + identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration", + func() (smithyauth.Identity, error) { + return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties) + }, + func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) + if err != nil { + return out, metadata, fmt.Errorf("get identity: %w", err) + } + + ctx = setIdentity(ctx, identity) + + span.End() + return next.HandleFinalize(ctx, in) +} + +type identityKey struct{} + +func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context { + return middleware.WithStackValue(ctx, identityKey{}, identity) +} + +func getIdentity(ctx context.Context) smithyauth.Identity { + v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity) + return v +} + +type signRequestMiddleware struct { + options Options +} + +func (*signRequestMiddleware) ID() string { + return "Signing" +} + +func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "SignRequest") + defer span.End() + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) + } + + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + identity := getIdentity(ctx) + if identity == nil { + return out, metadata, fmt.Errorf("no identity") + } + + signer := rscheme.Scheme.Signer() + if signer == nil { + return out, metadata, fmt.Errorf("no signer") + } + + _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) { + return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties) + }, func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) + if err != nil { + return out, metadata, fmt.Errorf("sign request: %w", err) + } + + span.End() + return next.HandleFinalize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/deserializers.go new file mode 100644 index 000000000..acd0c603d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/deserializers.go @@ -0,0 +1,13880 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + smithy "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "io/ioutil" + "strings" + "time" +) + +func deserializeS3Expires(v string) (*time.Time, error) { + t, err := smithytime.ParseHTTPDate(v) + if err != nil { + return nil, nil + } + return &t, nil +} + +type awsAwsjson11_deserializeOpCancelKeyDeletion struct { +} + +func (*awsAwsjson11_deserializeOpCancelKeyDeletion) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCancelKeyDeletion) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCancelKeyDeletion(response, &metadata) + } + output := &CancelKeyDeletionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCancelKeyDeletionOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCancelKeyDeletion(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpConnectCustomKeyStore struct { +} + +func (*awsAwsjson11_deserializeOpConnectCustomKeyStore) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpConnectCustomKeyStore) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorConnectCustomKeyStore(response, &metadata) + } + output := &ConnectCustomKeyStoreOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentConnectCustomKeyStoreOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorConnectCustomKeyStore(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("CloudHsmClusterInvalidConfigurationException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterInvalidConfigurationException(response, errorBody) + + case strings.EqualFold("CloudHsmClusterNotActiveException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterNotActiveException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreInvalidStateException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreNotFoundException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCreateAlias struct { +} + +func (*awsAwsjson11_deserializeOpCreateAlias) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreateAlias) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreateAlias(response, &metadata) + } + output := &CreateAliasOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreateAlias(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("AlreadyExistsException", errorCode): + return awsAwsjson11_deserializeErrorAlreadyExistsException(response, errorBody) + + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidAliasNameException", errorCode): + return awsAwsjson11_deserializeErrorInvalidAliasNameException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCreateCustomKeyStore struct { +} + +func (*awsAwsjson11_deserializeOpCreateCustomKeyStore) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreateCustomKeyStore) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreateCustomKeyStore(response, &metadata) + } + output := &CreateCustomKeyStoreOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCreateCustomKeyStoreOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreateCustomKeyStore(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("CloudHsmClusterInUseException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterInUseException(response, errorBody) + + case strings.EqualFold("CloudHsmClusterInvalidConfigurationException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterInvalidConfigurationException(response, errorBody) + + case strings.EqualFold("CloudHsmClusterNotActiveException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterNotActiveException(response, errorBody) + + case strings.EqualFold("CloudHsmClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterNotFoundException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreNameInUseException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreNameInUseException(response, errorBody) + + case strings.EqualFold("IncorrectTrustAnchorException", errorCode): + return awsAwsjson11_deserializeErrorIncorrectTrustAnchorException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("XksProxyIncorrectAuthenticationCredentialException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyIncorrectAuthenticationCredentialException(response, errorBody) + + case strings.EqualFold("XksProxyInvalidConfigurationException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyInvalidConfigurationException(response, errorBody) + + case strings.EqualFold("XksProxyInvalidResponseException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyInvalidResponseException(response, errorBody) + + case strings.EqualFold("XksProxyUriEndpointInUseException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyUriEndpointInUseException(response, errorBody) + + case strings.EqualFold("XksProxyUriInUseException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyUriInUseException(response, errorBody) + + case strings.EqualFold("XksProxyUriUnreachableException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyUriUnreachableException(response, errorBody) + + case strings.EqualFold("XksProxyVpcEndpointServiceInUseException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyVpcEndpointServiceInUseException(response, errorBody) + + case strings.EqualFold("XksProxyVpcEndpointServiceInvalidConfigurationException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyVpcEndpointServiceInvalidConfigurationException(response, errorBody) + + case strings.EqualFold("XksProxyVpcEndpointServiceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyVpcEndpointServiceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCreateGrant struct { +} + +func (*awsAwsjson11_deserializeOpCreateGrant) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreateGrant) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreateGrant(response, &metadata) + } + output := &CreateGrantOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCreateGrantOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreateGrant(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("DryRunOperationException", errorCode): + return awsAwsjson11_deserializeErrorDryRunOperationException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCreateKey struct { +} + +func (*awsAwsjson11_deserializeOpCreateKey) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreateKey) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreateKey(response, &metadata) + } + output := &CreateKeyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCreateKeyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreateKey(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("CloudHsmClusterInvalidConfigurationException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterInvalidConfigurationException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreInvalidStateException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreNotFoundException(response, errorBody) + + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("MalformedPolicyDocumentException", errorCode): + return awsAwsjson11_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("TagException", errorCode): + return awsAwsjson11_deserializeErrorTagException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + case strings.EqualFold("XksKeyAlreadyInUseException", errorCode): + return awsAwsjson11_deserializeErrorXksKeyAlreadyInUseException(response, errorBody) + + case strings.EqualFold("XksKeyInvalidConfigurationException", errorCode): + return awsAwsjson11_deserializeErrorXksKeyInvalidConfigurationException(response, errorBody) + + case strings.EqualFold("XksKeyNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorXksKeyNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDecrypt struct { +} + +func (*awsAwsjson11_deserializeOpDecrypt) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDecrypt) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDecrypt(response, &metadata) + } + output := &DecryptOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDecryptOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDecrypt(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("DryRunOperationException", errorCode): + return awsAwsjson11_deserializeErrorDryRunOperationException(response, errorBody) + + case strings.EqualFold("IncorrectKeyException", errorCode): + return awsAwsjson11_deserializeErrorIncorrectKeyException(response, errorBody) + + case strings.EqualFold("InvalidCiphertextException", errorCode): + return awsAwsjson11_deserializeErrorInvalidCiphertextException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteAlias struct { +} + +func (*awsAwsjson11_deserializeOpDeleteAlias) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteAlias) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteAlias(response, &metadata) + } + output := &DeleteAliasOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteAlias(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteCustomKeyStore struct { +} + +func (*awsAwsjson11_deserializeOpDeleteCustomKeyStore) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteCustomKeyStore) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteCustomKeyStore(response, &metadata) + } + output := &DeleteCustomKeyStoreOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeleteCustomKeyStoreOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteCustomKeyStore(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("CustomKeyStoreHasCMKsException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreHasCMKsException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreInvalidStateException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreNotFoundException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteImportedKeyMaterial struct { +} + +func (*awsAwsjson11_deserializeOpDeleteImportedKeyMaterial) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteImportedKeyMaterial) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteImportedKeyMaterial(response, &metadata) + } + output := &DeleteImportedKeyMaterialOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteImportedKeyMaterial(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeriveSharedSecret struct { +} + +func (*awsAwsjson11_deserializeOpDeriveSharedSecret) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeriveSharedSecret) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeriveSharedSecret(response, &metadata) + } + output := &DeriveSharedSecretOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeriveSharedSecretOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeriveSharedSecret(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("DryRunOperationException", errorCode): + return awsAwsjson11_deserializeErrorDryRunOperationException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeCustomKeyStores struct { +} + +func (*awsAwsjson11_deserializeOpDescribeCustomKeyStores) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeCustomKeyStores) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeCustomKeyStores(response, &metadata) + } + output := &DescribeCustomKeyStoresOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeCustomKeyStoresOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeCustomKeyStores(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("CustomKeyStoreNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidMarkerException", errorCode): + return awsAwsjson11_deserializeErrorInvalidMarkerException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeKey struct { +} + +func (*awsAwsjson11_deserializeOpDescribeKey) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeKey) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeKey(response, &metadata) + } + output := &DescribeKeyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeKeyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeKey(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDisableKey struct { +} + +func (*awsAwsjson11_deserializeOpDisableKey) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDisableKey) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDisableKey(response, &metadata) + } + output := &DisableKeyOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDisableKey(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDisableKeyRotation struct { +} + +func (*awsAwsjson11_deserializeOpDisableKeyRotation) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDisableKeyRotation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDisableKeyRotation(response, &metadata) + } + output := &DisableKeyRotationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDisableKeyRotation(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDisconnectCustomKeyStore struct { +} + +func (*awsAwsjson11_deserializeOpDisconnectCustomKeyStore) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDisconnectCustomKeyStore) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDisconnectCustomKeyStore(response, &metadata) + } + output := &DisconnectCustomKeyStoreOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDisconnectCustomKeyStoreOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDisconnectCustomKeyStore(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("CustomKeyStoreInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreInvalidStateException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreNotFoundException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpEnableKey struct { +} + +func (*awsAwsjson11_deserializeOpEnableKey) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpEnableKey) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorEnableKey(response, &metadata) + } + output := &EnableKeyOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorEnableKey(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpEnableKeyRotation struct { +} + +func (*awsAwsjson11_deserializeOpEnableKeyRotation) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpEnableKeyRotation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorEnableKeyRotation(response, &metadata) + } + output := &EnableKeyRotationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorEnableKeyRotation(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpEncrypt struct { +} + +func (*awsAwsjson11_deserializeOpEncrypt) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpEncrypt) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorEncrypt(response, &metadata) + } + output := &EncryptOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentEncryptOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorEncrypt(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("DryRunOperationException", errorCode): + return awsAwsjson11_deserializeErrorDryRunOperationException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGenerateDataKey struct { +} + +func (*awsAwsjson11_deserializeOpGenerateDataKey) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGenerateDataKey) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGenerateDataKey(response, &metadata) + } + output := &GenerateDataKeyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGenerateDataKeyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGenerateDataKey(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("DryRunOperationException", errorCode): + return awsAwsjson11_deserializeErrorDryRunOperationException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGenerateDataKeyPair struct { +} + +func (*awsAwsjson11_deserializeOpGenerateDataKeyPair) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGenerateDataKeyPair) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGenerateDataKeyPair(response, &metadata) + } + output := &GenerateDataKeyPairOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGenerateDataKeyPairOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGenerateDataKeyPair(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("DryRunOperationException", errorCode): + return awsAwsjson11_deserializeErrorDryRunOperationException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGenerateDataKeyPairWithoutPlaintext struct { +} + +func (*awsAwsjson11_deserializeOpGenerateDataKeyPairWithoutPlaintext) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGenerateDataKeyPairWithoutPlaintext) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGenerateDataKeyPairWithoutPlaintext(response, &metadata) + } + output := &GenerateDataKeyPairWithoutPlaintextOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGenerateDataKeyPairWithoutPlaintextOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGenerateDataKeyPairWithoutPlaintext(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("DryRunOperationException", errorCode): + return awsAwsjson11_deserializeErrorDryRunOperationException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGenerateDataKeyWithoutPlaintext struct { +} + +func (*awsAwsjson11_deserializeOpGenerateDataKeyWithoutPlaintext) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGenerateDataKeyWithoutPlaintext) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGenerateDataKeyWithoutPlaintext(response, &metadata) + } + output := &GenerateDataKeyWithoutPlaintextOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGenerateDataKeyWithoutPlaintextOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGenerateDataKeyWithoutPlaintext(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("DryRunOperationException", errorCode): + return awsAwsjson11_deserializeErrorDryRunOperationException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGenerateMac struct { +} + +func (*awsAwsjson11_deserializeOpGenerateMac) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGenerateMac) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGenerateMac(response, &metadata) + } + output := &GenerateMacOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGenerateMacOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGenerateMac(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("DryRunOperationException", errorCode): + return awsAwsjson11_deserializeErrorDryRunOperationException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGenerateRandom struct { +} + +func (*awsAwsjson11_deserializeOpGenerateRandom) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGenerateRandom) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGenerateRandom(response, &metadata) + } + output := &GenerateRandomOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGenerateRandomOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGenerateRandom(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("CustomKeyStoreInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreInvalidStateException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreNotFoundException(response, errorBody) + + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetKeyPolicy struct { +} + +func (*awsAwsjson11_deserializeOpGetKeyPolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetKeyPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetKeyPolicy(response, &metadata) + } + output := &GetKeyPolicyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetKeyPolicyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetKeyPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetKeyRotationStatus struct { +} + +func (*awsAwsjson11_deserializeOpGetKeyRotationStatus) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetKeyRotationStatus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetKeyRotationStatus(response, &metadata) + } + output := &GetKeyRotationStatusOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetKeyRotationStatusOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetKeyRotationStatus(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetParametersForImport struct { +} + +func (*awsAwsjson11_deserializeOpGetParametersForImport) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetParametersForImport) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetParametersForImport(response, &metadata) + } + output := &GetParametersForImportOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetParametersForImportOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetParametersForImport(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetPublicKey struct { +} + +func (*awsAwsjson11_deserializeOpGetPublicKey) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetPublicKey) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetPublicKey(response, &metadata) + } + output := &GetPublicKeyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetPublicKeyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetPublicKey(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpImportKeyMaterial struct { +} + +func (*awsAwsjson11_deserializeOpImportKeyMaterial) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpImportKeyMaterial) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorImportKeyMaterial(response, &metadata) + } + output := &ImportKeyMaterialOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentImportKeyMaterialOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorImportKeyMaterial(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("ExpiredImportTokenException", errorCode): + return awsAwsjson11_deserializeErrorExpiredImportTokenException(response, errorBody) + + case strings.EqualFold("IncorrectKeyMaterialException", errorCode): + return awsAwsjson11_deserializeErrorIncorrectKeyMaterialException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("InvalidCiphertextException", errorCode): + return awsAwsjson11_deserializeErrorInvalidCiphertextException(response, errorBody) + + case strings.EqualFold("InvalidImportTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidImportTokenException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListAliases struct { +} + +func (*awsAwsjson11_deserializeOpListAliases) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListAliases) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListAliases(response, &metadata) + } + output := &ListAliasesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListAliasesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListAliases(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("InvalidMarkerException", errorCode): + return awsAwsjson11_deserializeErrorInvalidMarkerException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListGrants struct { +} + +func (*awsAwsjson11_deserializeOpListGrants) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListGrants) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListGrants(response, &metadata) + } + output := &ListGrantsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListGrantsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListGrants(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("InvalidGrantIdException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantIdException(response, errorBody) + + case strings.EqualFold("InvalidMarkerException", errorCode): + return awsAwsjson11_deserializeErrorInvalidMarkerException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListKeyPolicies struct { +} + +func (*awsAwsjson11_deserializeOpListKeyPolicies) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListKeyPolicies) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListKeyPolicies(response, &metadata) + } + output := &ListKeyPoliciesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListKeyPoliciesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListKeyPolicies(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListKeyRotations struct { +} + +func (*awsAwsjson11_deserializeOpListKeyRotations) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListKeyRotations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListKeyRotations(response, &metadata) + } + output := &ListKeyRotationsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListKeyRotationsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListKeyRotations(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("InvalidMarkerException", errorCode): + return awsAwsjson11_deserializeErrorInvalidMarkerException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListKeys struct { +} + +func (*awsAwsjson11_deserializeOpListKeys) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListKeys) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListKeys(response, &metadata) + } + output := &ListKeysOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListKeysOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListKeys(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidMarkerException", errorCode): + return awsAwsjson11_deserializeErrorInvalidMarkerException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListResourceTags struct { +} + +func (*awsAwsjson11_deserializeOpListResourceTags) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListResourceTags) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListResourceTags(response, &metadata) + } + output := &ListResourceTagsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListResourceTagsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListResourceTags(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("InvalidMarkerException", errorCode): + return awsAwsjson11_deserializeErrorInvalidMarkerException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListRetirableGrants struct { +} + +func (*awsAwsjson11_deserializeOpListRetirableGrants) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListRetirableGrants) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListRetirableGrants(response, &metadata) + } + output := &ListRetirableGrantsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListRetirableGrantsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListRetirableGrants(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("InvalidMarkerException", errorCode): + return awsAwsjson11_deserializeErrorInvalidMarkerException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpPutKeyPolicy struct { +} + +func (*awsAwsjson11_deserializeOpPutKeyPolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpPutKeyPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorPutKeyPolicy(response, &metadata) + } + output := &PutKeyPolicyOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorPutKeyPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("MalformedPolicyDocumentException", errorCode): + return awsAwsjson11_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpReEncrypt struct { +} + +func (*awsAwsjson11_deserializeOpReEncrypt) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpReEncrypt) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorReEncrypt(response, &metadata) + } + output := &ReEncryptOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentReEncryptOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorReEncrypt(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("DryRunOperationException", errorCode): + return awsAwsjson11_deserializeErrorDryRunOperationException(response, errorBody) + + case strings.EqualFold("IncorrectKeyException", errorCode): + return awsAwsjson11_deserializeErrorIncorrectKeyException(response, errorBody) + + case strings.EqualFold("InvalidCiphertextException", errorCode): + return awsAwsjson11_deserializeErrorInvalidCiphertextException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpReplicateKey struct { +} + +func (*awsAwsjson11_deserializeOpReplicateKey) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpReplicateKey) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorReplicateKey(response, &metadata) + } + output := &ReplicateKeyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentReplicateKeyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorReplicateKey(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("AlreadyExistsException", errorCode): + return awsAwsjson11_deserializeErrorAlreadyExistsException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("MalformedPolicyDocumentException", errorCode): + return awsAwsjson11_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("TagException", errorCode): + return awsAwsjson11_deserializeErrorTagException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpRetireGrant struct { +} + +func (*awsAwsjson11_deserializeOpRetireGrant) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpRetireGrant) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorRetireGrant(response, &metadata) + } + output := &RetireGrantOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorRetireGrant(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DryRunOperationException", errorCode): + return awsAwsjson11_deserializeErrorDryRunOperationException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("InvalidGrantIdException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantIdException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpRevokeGrant struct { +} + +func (*awsAwsjson11_deserializeOpRevokeGrant) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpRevokeGrant) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorRevokeGrant(response, &metadata) + } + output := &RevokeGrantOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorRevokeGrant(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DryRunOperationException", errorCode): + return awsAwsjson11_deserializeErrorDryRunOperationException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("InvalidGrantIdException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantIdException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpRotateKeyOnDemand struct { +} + +func (*awsAwsjson11_deserializeOpRotateKeyOnDemand) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpRotateKeyOnDemand) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorRotateKeyOnDemand(response, &metadata) + } + output := &RotateKeyOnDemandOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentRotateKeyOnDemandOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorRotateKeyOnDemand(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ConflictException", errorCode): + return awsAwsjson11_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpScheduleKeyDeletion struct { +} + +func (*awsAwsjson11_deserializeOpScheduleKeyDeletion) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpScheduleKeyDeletion) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorScheduleKeyDeletion(response, &metadata) + } + output := &ScheduleKeyDeletionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentScheduleKeyDeletionOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorScheduleKeyDeletion(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpSign struct { +} + +func (*awsAwsjson11_deserializeOpSign) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpSign) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorSign(response, &metadata) + } + output := &SignOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentSignOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorSign(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("DryRunOperationException", errorCode): + return awsAwsjson11_deserializeErrorDryRunOperationException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpTagResource struct { +} + +func (*awsAwsjson11_deserializeOpTagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpTagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorTagResource(response, &metadata) + } + output := &TagResourceOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorTagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("TagException", errorCode): + return awsAwsjson11_deserializeErrorTagException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUntagResource struct { +} + +func (*awsAwsjson11_deserializeOpUntagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUntagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUntagResource(response, &metadata) + } + output := &UntagResourceOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUntagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("TagException", errorCode): + return awsAwsjson11_deserializeErrorTagException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateAlias struct { +} + +func (*awsAwsjson11_deserializeOpUpdateAlias) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateAlias) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateAlias(response, &metadata) + } + output := &UpdateAliasOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateAlias(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateCustomKeyStore struct { +} + +func (*awsAwsjson11_deserializeOpUpdateCustomKeyStore) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateCustomKeyStore) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateCustomKeyStore(response, &metadata) + } + output := &UpdateCustomKeyStoreOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUpdateCustomKeyStoreOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateCustomKeyStore(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("CloudHsmClusterInvalidConfigurationException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterInvalidConfigurationException(response, errorBody) + + case strings.EqualFold("CloudHsmClusterNotActiveException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterNotActiveException(response, errorBody) + + case strings.EqualFold("CloudHsmClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterNotFoundException(response, errorBody) + + case strings.EqualFold("CloudHsmClusterNotRelatedException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterNotRelatedException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreInvalidStateException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreNameInUseException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreNameInUseException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreNotFoundException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("XksProxyIncorrectAuthenticationCredentialException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyIncorrectAuthenticationCredentialException(response, errorBody) + + case strings.EqualFold("XksProxyInvalidConfigurationException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyInvalidConfigurationException(response, errorBody) + + case strings.EqualFold("XksProxyInvalidResponseException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyInvalidResponseException(response, errorBody) + + case strings.EqualFold("XksProxyUriEndpointInUseException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyUriEndpointInUseException(response, errorBody) + + case strings.EqualFold("XksProxyUriInUseException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyUriInUseException(response, errorBody) + + case strings.EqualFold("XksProxyUriUnreachableException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyUriUnreachableException(response, errorBody) + + case strings.EqualFold("XksProxyVpcEndpointServiceInUseException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyVpcEndpointServiceInUseException(response, errorBody) + + case strings.EqualFold("XksProxyVpcEndpointServiceInvalidConfigurationException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyVpcEndpointServiceInvalidConfigurationException(response, errorBody) + + case strings.EqualFold("XksProxyVpcEndpointServiceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorXksProxyVpcEndpointServiceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateKeyDescription struct { +} + +func (*awsAwsjson11_deserializeOpUpdateKeyDescription) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateKeyDescription) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateKeyDescription(response, &metadata) + } + output := &UpdateKeyDescriptionOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateKeyDescription(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdatePrimaryRegion struct { +} + +func (*awsAwsjson11_deserializeOpUpdatePrimaryRegion) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdatePrimaryRegion) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdatePrimaryRegion(response, &metadata) + } + output := &UpdatePrimaryRegionOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdatePrimaryRegion(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpVerify struct { +} + +func (*awsAwsjson11_deserializeOpVerify) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpVerify) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorVerify(response, &metadata) + } + output := &VerifyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentVerifyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorVerify(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("DryRunOperationException", errorCode): + return awsAwsjson11_deserializeErrorDryRunOperationException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidSignatureException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidSignatureException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpVerifyMac struct { +} + +func (*awsAwsjson11_deserializeOpVerifyMac) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpVerifyMac) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorVerifyMac(response, &metadata) + } + output := &VerifyMacOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentVerifyMacOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorVerifyMac(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("DryRunOperationException", errorCode): + return awsAwsjson11_deserializeErrorDryRunOperationException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidMacException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidMacException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsAwsjson11_deserializeErrorAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.AlreadyExistsException{} + err := awsAwsjson11_deserializeDocumentAlreadyExistsException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorCloudHsmClusterInUseException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.CloudHsmClusterInUseException{} + err := awsAwsjson11_deserializeDocumentCloudHsmClusterInUseException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorCloudHsmClusterInvalidConfigurationException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.CloudHsmClusterInvalidConfigurationException{} + err := awsAwsjson11_deserializeDocumentCloudHsmClusterInvalidConfigurationException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorCloudHsmClusterNotActiveException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.CloudHsmClusterNotActiveException{} + err := awsAwsjson11_deserializeDocumentCloudHsmClusterNotActiveException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorCloudHsmClusterNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.CloudHsmClusterNotFoundException{} + err := awsAwsjson11_deserializeDocumentCloudHsmClusterNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorCloudHsmClusterNotRelatedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.CloudHsmClusterNotRelatedException{} + err := awsAwsjson11_deserializeDocumentCloudHsmClusterNotRelatedException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorConflictException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ConflictException{} + err := awsAwsjson11_deserializeDocumentConflictException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorCustomKeyStoreHasCMKsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.CustomKeyStoreHasCMKsException{} + err := awsAwsjson11_deserializeDocumentCustomKeyStoreHasCMKsException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorCustomKeyStoreInvalidStateException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.CustomKeyStoreInvalidStateException{} + err := awsAwsjson11_deserializeDocumentCustomKeyStoreInvalidStateException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorCustomKeyStoreNameInUseException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.CustomKeyStoreNameInUseException{} + err := awsAwsjson11_deserializeDocumentCustomKeyStoreNameInUseException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorCustomKeyStoreNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.CustomKeyStoreNotFoundException{} + err := awsAwsjson11_deserializeDocumentCustomKeyStoreNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorDependencyTimeoutException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.DependencyTimeoutException{} + err := awsAwsjson11_deserializeDocumentDependencyTimeoutException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorDisabledException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.DisabledException{} + err := awsAwsjson11_deserializeDocumentDisabledException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorDryRunOperationException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.DryRunOperationException{} + err := awsAwsjson11_deserializeDocumentDryRunOperationException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorExpiredImportTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ExpiredImportTokenException{} + err := awsAwsjson11_deserializeDocumentExpiredImportTokenException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorIncorrectKeyException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.IncorrectKeyException{} + err := awsAwsjson11_deserializeDocumentIncorrectKeyException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorIncorrectKeyMaterialException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.IncorrectKeyMaterialException{} + err := awsAwsjson11_deserializeDocumentIncorrectKeyMaterialException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorIncorrectTrustAnchorException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.IncorrectTrustAnchorException{} + err := awsAwsjson11_deserializeDocumentIncorrectTrustAnchorException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidAliasNameException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidAliasNameException{} + err := awsAwsjson11_deserializeDocumentInvalidAliasNameException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidArnException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidArnException{} + err := awsAwsjson11_deserializeDocumentInvalidArnException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidCiphertextException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidCiphertextException{} + err := awsAwsjson11_deserializeDocumentInvalidCiphertextException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidGrantIdException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidGrantIdException{} + err := awsAwsjson11_deserializeDocumentInvalidGrantIdException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidGrantTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidGrantTokenException{} + err := awsAwsjson11_deserializeDocumentInvalidGrantTokenException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidImportTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidImportTokenException{} + err := awsAwsjson11_deserializeDocumentInvalidImportTokenException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidKeyUsageException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidKeyUsageException{} + err := awsAwsjson11_deserializeDocumentInvalidKeyUsageException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidMarkerException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidMarkerException{} + err := awsAwsjson11_deserializeDocumentInvalidMarkerException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorKeyUnavailableException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.KeyUnavailableException{} + err := awsAwsjson11_deserializeDocumentKeyUnavailableException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorKMSInternalException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.KMSInternalException{} + err := awsAwsjson11_deserializeDocumentKMSInternalException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorKMSInvalidMacException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.KMSInvalidMacException{} + err := awsAwsjson11_deserializeDocumentKMSInvalidMacException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorKMSInvalidSignatureException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.KMSInvalidSignatureException{} + err := awsAwsjson11_deserializeDocumentKMSInvalidSignatureException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorKMSInvalidStateException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.KMSInvalidStateException{} + err := awsAwsjson11_deserializeDocumentKMSInvalidStateException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorLimitExceededException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.LimitExceededException{} + err := awsAwsjson11_deserializeDocumentLimitExceededException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorMalformedPolicyDocumentException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.MalformedPolicyDocumentException{} + err := awsAwsjson11_deserializeDocumentMalformedPolicyDocumentException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.NotFoundException{} + err := awsAwsjson11_deserializeDocumentNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorTagException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.TagException{} + err := awsAwsjson11_deserializeDocumentTagException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorUnsupportedOperationException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.UnsupportedOperationException{} + err := awsAwsjson11_deserializeDocumentUnsupportedOperationException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorXksKeyAlreadyInUseException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.XksKeyAlreadyInUseException{} + err := awsAwsjson11_deserializeDocumentXksKeyAlreadyInUseException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorXksKeyInvalidConfigurationException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.XksKeyInvalidConfigurationException{} + err := awsAwsjson11_deserializeDocumentXksKeyInvalidConfigurationException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorXksKeyNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.XksKeyNotFoundException{} + err := awsAwsjson11_deserializeDocumentXksKeyNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorXksProxyIncorrectAuthenticationCredentialException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.XksProxyIncorrectAuthenticationCredentialException{} + err := awsAwsjson11_deserializeDocumentXksProxyIncorrectAuthenticationCredentialException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorXksProxyInvalidConfigurationException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.XksProxyInvalidConfigurationException{} + err := awsAwsjson11_deserializeDocumentXksProxyInvalidConfigurationException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorXksProxyInvalidResponseException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.XksProxyInvalidResponseException{} + err := awsAwsjson11_deserializeDocumentXksProxyInvalidResponseException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorXksProxyUriEndpointInUseException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.XksProxyUriEndpointInUseException{} + err := awsAwsjson11_deserializeDocumentXksProxyUriEndpointInUseException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorXksProxyUriInUseException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.XksProxyUriInUseException{} + err := awsAwsjson11_deserializeDocumentXksProxyUriInUseException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorXksProxyUriUnreachableException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.XksProxyUriUnreachableException{} + err := awsAwsjson11_deserializeDocumentXksProxyUriUnreachableException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorXksProxyVpcEndpointServiceInUseException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.XksProxyVpcEndpointServiceInUseException{} + err := awsAwsjson11_deserializeDocumentXksProxyVpcEndpointServiceInUseException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorXksProxyVpcEndpointServiceInvalidConfigurationException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.XksProxyVpcEndpointServiceInvalidConfigurationException{} + err := awsAwsjson11_deserializeDocumentXksProxyVpcEndpointServiceInvalidConfigurationException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorXksProxyVpcEndpointServiceNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.XksProxyVpcEndpointServiceNotFoundException{} + err := awsAwsjson11_deserializeDocumentXksProxyVpcEndpointServiceNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeDocumentAliasList(v *[]types.AliasListEntry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.AliasListEntry + if *v == nil { + cv = []types.AliasListEntry{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.AliasListEntry + destAddr := &col + if err := awsAwsjson11_deserializeDocumentAliasListEntry(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentAliasListEntry(v **types.AliasListEntry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AliasListEntry + if *v == nil { + sv = &types.AliasListEntry{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AliasArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ArnType to be of type string, got %T instead", value) + } + sv.AliasArn = ptr.String(jtv) + } + + case "AliasName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AliasNameType to be of type string, got %T instead", value) + } + sv.AliasName = ptr.String(jtv) + } + + case "CreationDate": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreationDate = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected DateType to be a JSON Number, got %T instead", value) + + } + } + + case "LastUpdatedDate": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.LastUpdatedDate = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected DateType to be a JSON Number, got %T instead", value) + + } + } + + case "TargetKeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.TargetKeyId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentAlreadyExistsException(v **types.AlreadyExistsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AlreadyExistsException + if *v == nil { + sv = &types.AlreadyExistsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCloudHsmClusterInUseException(v **types.CloudHsmClusterInUseException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CloudHsmClusterInUseException + if *v == nil { + sv = &types.CloudHsmClusterInUseException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCloudHsmClusterInvalidConfigurationException(v **types.CloudHsmClusterInvalidConfigurationException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CloudHsmClusterInvalidConfigurationException + if *v == nil { + sv = &types.CloudHsmClusterInvalidConfigurationException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCloudHsmClusterNotActiveException(v **types.CloudHsmClusterNotActiveException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CloudHsmClusterNotActiveException + if *v == nil { + sv = &types.CloudHsmClusterNotActiveException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCloudHsmClusterNotFoundException(v **types.CloudHsmClusterNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CloudHsmClusterNotFoundException + if *v == nil { + sv = &types.CloudHsmClusterNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCloudHsmClusterNotRelatedException(v **types.CloudHsmClusterNotRelatedException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CloudHsmClusterNotRelatedException + if *v == nil { + sv = &types.CloudHsmClusterNotRelatedException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentConflictException(v **types.ConflictException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ConflictException + if *v == nil { + sv = &types.ConflictException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCustomKeyStoreHasCMKsException(v **types.CustomKeyStoreHasCMKsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CustomKeyStoreHasCMKsException + if *v == nil { + sv = &types.CustomKeyStoreHasCMKsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCustomKeyStoreInvalidStateException(v **types.CustomKeyStoreInvalidStateException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CustomKeyStoreInvalidStateException + if *v == nil { + sv = &types.CustomKeyStoreInvalidStateException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCustomKeyStoreNameInUseException(v **types.CustomKeyStoreNameInUseException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CustomKeyStoreNameInUseException + if *v == nil { + sv = &types.CustomKeyStoreNameInUseException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCustomKeyStoreNotFoundException(v **types.CustomKeyStoreNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CustomKeyStoreNotFoundException + if *v == nil { + sv = &types.CustomKeyStoreNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCustomKeyStoresList(v *[]types.CustomKeyStoresListEntry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.CustomKeyStoresListEntry + if *v == nil { + cv = []types.CustomKeyStoresListEntry{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.CustomKeyStoresListEntry + destAddr := &col + if err := awsAwsjson11_deserializeDocumentCustomKeyStoresListEntry(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentCustomKeyStoresListEntry(v **types.CustomKeyStoresListEntry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CustomKeyStoresListEntry + if *v == nil { + sv = &types.CustomKeyStoresListEntry{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CloudHsmClusterId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CloudHsmClusterIdType to be of type string, got %T instead", value) + } + sv.CloudHsmClusterId = ptr.String(jtv) + } + + case "ConnectionErrorCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ConnectionErrorCodeType to be of type string, got %T instead", value) + } + sv.ConnectionErrorCode = types.ConnectionErrorCodeType(jtv) + } + + case "ConnectionState": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ConnectionStateType to be of type string, got %T instead", value) + } + sv.ConnectionState = types.ConnectionStateType(jtv) + } + + case "CreationDate": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreationDate = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected DateType to be a JSON Number, got %T instead", value) + + } + } + + case "CustomKeyStoreId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CustomKeyStoreIdType to be of type string, got %T instead", value) + } + sv.CustomKeyStoreId = ptr.String(jtv) + } + + case "CustomKeyStoreName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CustomKeyStoreNameType to be of type string, got %T instead", value) + } + sv.CustomKeyStoreName = ptr.String(jtv) + } + + case "CustomKeyStoreType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CustomKeyStoreType to be of type string, got %T instead", value) + } + sv.CustomKeyStoreType = types.CustomKeyStoreType(jtv) + } + + case "TrustAnchorCertificate": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TrustAnchorCertificateType to be of type string, got %T instead", value) + } + sv.TrustAnchorCertificate = ptr.String(jtv) + } + + case "XksProxyConfiguration": + if err := awsAwsjson11_deserializeDocumentXksProxyConfigurationType(&sv.XksProxyConfiguration, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentDependencyTimeoutException(v **types.DependencyTimeoutException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DependencyTimeoutException + if *v == nil { + sv = &types.DependencyTimeoutException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentDisabledException(v **types.DisabledException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DisabledException + if *v == nil { + sv = &types.DisabledException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentDryRunOperationException(v **types.DryRunOperationException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DryRunOperationException + if *v == nil { + sv = &types.DryRunOperationException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentEncryptionAlgorithmSpecList(v *[]types.EncryptionAlgorithmSpec, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.EncryptionAlgorithmSpec + if *v == nil { + cv = []types.EncryptionAlgorithmSpec{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.EncryptionAlgorithmSpec + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EncryptionAlgorithmSpec to be of type string, got %T instead", value) + } + col = types.EncryptionAlgorithmSpec(jtv) + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentEncryptionContextType(v *map[string]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var mv map[string]string + if *v == nil { + mv = map[string]string{} + } else { + mv = *v + } + + for key, value := range shape { + var parsedVal string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EncryptionContextValue to be of type string, got %T instead", value) + } + parsedVal = jtv + } + mv[key] = parsedVal + + } + *v = mv + return nil +} + +func awsAwsjson11_deserializeDocumentExpiredImportTokenException(v **types.ExpiredImportTokenException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ExpiredImportTokenException + if *v == nil { + sv = &types.ExpiredImportTokenException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentGrantConstraints(v **types.GrantConstraints, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.GrantConstraints + if *v == nil { + sv = &types.GrantConstraints{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "EncryptionContextEquals": + if err := awsAwsjson11_deserializeDocumentEncryptionContextType(&sv.EncryptionContextEquals, value); err != nil { + return err + } + + case "EncryptionContextSubset": + if err := awsAwsjson11_deserializeDocumentEncryptionContextType(&sv.EncryptionContextSubset, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentGrantList(v *[]types.GrantListEntry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.GrantListEntry + if *v == nil { + cv = []types.GrantListEntry{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.GrantListEntry + destAddr := &col + if err := awsAwsjson11_deserializeDocumentGrantListEntry(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentGrantListEntry(v **types.GrantListEntry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.GrantListEntry + if *v == nil { + sv = &types.GrantListEntry{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Constraints": + if err := awsAwsjson11_deserializeDocumentGrantConstraints(&sv.Constraints, value); err != nil { + return err + } + + case "CreationDate": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreationDate = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected DateType to be a JSON Number, got %T instead", value) + + } + } + + case "GranteePrincipal": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PrincipalIdType to be of type string, got %T instead", value) + } + sv.GranteePrincipal = ptr.String(jtv) + } + + case "GrantId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GrantIdType to be of type string, got %T instead", value) + } + sv.GrantId = ptr.String(jtv) + } + + case "IssuingAccount": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PrincipalIdType to be of type string, got %T instead", value) + } + sv.IssuingAccount = ptr.String(jtv) + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "Name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GrantNameType to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "Operations": + if err := awsAwsjson11_deserializeDocumentGrantOperationList(&sv.Operations, value); err != nil { + return err + } + + case "RetiringPrincipal": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PrincipalIdType to be of type string, got %T instead", value) + } + sv.RetiringPrincipal = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentGrantOperationList(v *[]types.GrantOperation, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.GrantOperation + if *v == nil { + cv = []types.GrantOperation{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.GrantOperation + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GrantOperation to be of type string, got %T instead", value) + } + col = types.GrantOperation(jtv) + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentIncorrectKeyException(v **types.IncorrectKeyException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.IncorrectKeyException + if *v == nil { + sv = &types.IncorrectKeyException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentIncorrectKeyMaterialException(v **types.IncorrectKeyMaterialException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.IncorrectKeyMaterialException + if *v == nil { + sv = &types.IncorrectKeyMaterialException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentIncorrectTrustAnchorException(v **types.IncorrectTrustAnchorException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.IncorrectTrustAnchorException + if *v == nil { + sv = &types.IncorrectTrustAnchorException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidAliasNameException(v **types.InvalidAliasNameException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidAliasNameException + if *v == nil { + sv = &types.InvalidAliasNameException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidArnException(v **types.InvalidArnException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidArnException + if *v == nil { + sv = &types.InvalidArnException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidCiphertextException(v **types.InvalidCiphertextException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidCiphertextException + if *v == nil { + sv = &types.InvalidCiphertextException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidGrantIdException(v **types.InvalidGrantIdException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidGrantIdException + if *v == nil { + sv = &types.InvalidGrantIdException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidGrantTokenException(v **types.InvalidGrantTokenException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidGrantTokenException + if *v == nil { + sv = &types.InvalidGrantTokenException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidImportTokenException(v **types.InvalidImportTokenException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidImportTokenException + if *v == nil { + sv = &types.InvalidImportTokenException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidKeyUsageException(v **types.InvalidKeyUsageException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidKeyUsageException + if *v == nil { + sv = &types.InvalidKeyUsageException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidMarkerException(v **types.InvalidMarkerException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidMarkerException + if *v == nil { + sv = &types.InvalidMarkerException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentKeyAgreementAlgorithmSpecList(v *[]types.KeyAgreementAlgorithmSpec, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.KeyAgreementAlgorithmSpec + if *v == nil { + cv = []types.KeyAgreementAlgorithmSpec{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.KeyAgreementAlgorithmSpec + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyAgreementAlgorithmSpec to be of type string, got %T instead", value) + } + col = types.KeyAgreementAlgorithmSpec(jtv) + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentKeyList(v *[]types.KeyListEntry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.KeyListEntry + if *v == nil { + cv = []types.KeyListEntry{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.KeyListEntry + destAddr := &col + if err := awsAwsjson11_deserializeDocumentKeyListEntry(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentKeyListEntry(v **types.KeyListEntry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.KeyListEntry + if *v == nil { + sv = &types.KeyListEntry{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ArnType to be of type string, got %T instead", value) + } + sv.KeyArn = ptr.String(jtv) + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentKeyMetadata(v **types.KeyMetadata, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.KeyMetadata + if *v == nil { + sv = &types.KeyMetadata{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Arn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ArnType to be of type string, got %T instead", value) + } + sv.Arn = ptr.String(jtv) + } + + case "AWSAccountId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AWSAccountIdType to be of type string, got %T instead", value) + } + sv.AWSAccountId = ptr.String(jtv) + } + + case "CloudHsmClusterId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CloudHsmClusterIdType to be of type string, got %T instead", value) + } + sv.CloudHsmClusterId = ptr.String(jtv) + } + + case "CreationDate": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreationDate = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected DateType to be a JSON Number, got %T instead", value) + + } + } + + case "CustomerMasterKeySpec": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CustomerMasterKeySpec to be of type string, got %T instead", value) + } + sv.CustomerMasterKeySpec = types.CustomerMasterKeySpec(jtv) + } + + case "CustomKeyStoreId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CustomKeyStoreIdType to be of type string, got %T instead", value) + } + sv.CustomKeyStoreId = ptr.String(jtv) + } + + case "DeletionDate": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.DeletionDate = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected DateType to be a JSON Number, got %T instead", value) + + } + } + + case "Description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DescriptionType to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "Enabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.Enabled = jtv + } + + case "EncryptionAlgorithms": + if err := awsAwsjson11_deserializeDocumentEncryptionAlgorithmSpecList(&sv.EncryptionAlgorithms, value); err != nil { + return err + } + + case "ExpirationModel": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExpirationModelType to be of type string, got %T instead", value) + } + sv.ExpirationModel = types.ExpirationModelType(jtv) + } + + case "KeyAgreementAlgorithms": + if err := awsAwsjson11_deserializeDocumentKeyAgreementAlgorithmSpecList(&sv.KeyAgreementAlgorithms, value); err != nil { + return err + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "KeyManager": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyManagerType to be of type string, got %T instead", value) + } + sv.KeyManager = types.KeyManagerType(jtv) + } + + case "KeySpec": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeySpec to be of type string, got %T instead", value) + } + sv.KeySpec = types.KeySpec(jtv) + } + + case "KeyState": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyState to be of type string, got %T instead", value) + } + sv.KeyState = types.KeyState(jtv) + } + + case "KeyUsage": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyUsageType to be of type string, got %T instead", value) + } + sv.KeyUsage = types.KeyUsageType(jtv) + } + + case "MacAlgorithms": + if err := awsAwsjson11_deserializeDocumentMacAlgorithmSpecList(&sv.MacAlgorithms, value); err != nil { + return err + } + + case "MultiRegion": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected NullableBooleanType to be of type *bool, got %T instead", value) + } + sv.MultiRegion = ptr.Bool(jtv) + } + + case "MultiRegionConfiguration": + if err := awsAwsjson11_deserializeDocumentMultiRegionConfiguration(&sv.MultiRegionConfiguration, value); err != nil { + return err + } + + case "Origin": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected OriginType to be of type string, got %T instead", value) + } + sv.Origin = types.OriginType(jtv) + } + + case "PendingDeletionWindowInDays": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected PendingWindowInDaysType to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.PendingDeletionWindowInDays = ptr.Int32(int32(i64)) + } + + case "SigningAlgorithms": + if err := awsAwsjson11_deserializeDocumentSigningAlgorithmSpecList(&sv.SigningAlgorithms, value); err != nil { + return err + } + + case "ValidTo": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.ValidTo = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected DateType to be a JSON Number, got %T instead", value) + + } + } + + case "XksKeyConfiguration": + if err := awsAwsjson11_deserializeDocumentXksKeyConfigurationType(&sv.XksKeyConfiguration, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentKeyUnavailableException(v **types.KeyUnavailableException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.KeyUnavailableException + if *v == nil { + sv = &types.KeyUnavailableException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentKMSInternalException(v **types.KMSInternalException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.KMSInternalException + if *v == nil { + sv = &types.KMSInternalException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentKMSInvalidMacException(v **types.KMSInvalidMacException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.KMSInvalidMacException + if *v == nil { + sv = &types.KMSInvalidMacException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentKMSInvalidSignatureException(v **types.KMSInvalidSignatureException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.KMSInvalidSignatureException + if *v == nil { + sv = &types.KMSInvalidSignatureException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentKMSInvalidStateException(v **types.KMSInvalidStateException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.KMSInvalidStateException + if *v == nil { + sv = &types.KMSInvalidStateException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentLimitExceededException(v **types.LimitExceededException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LimitExceededException + if *v == nil { + sv = &types.LimitExceededException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentMacAlgorithmSpecList(v *[]types.MacAlgorithmSpec, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.MacAlgorithmSpec + if *v == nil { + cv = []types.MacAlgorithmSpec{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.MacAlgorithmSpec + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MacAlgorithmSpec to be of type string, got %T instead", value) + } + col = types.MacAlgorithmSpec(jtv) + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentMalformedPolicyDocumentException(v **types.MalformedPolicyDocumentException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.MalformedPolicyDocumentException + if *v == nil { + sv = &types.MalformedPolicyDocumentException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentMultiRegionConfiguration(v **types.MultiRegionConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.MultiRegionConfiguration + if *v == nil { + sv = &types.MultiRegionConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "MultiRegionKeyType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MultiRegionKeyType to be of type string, got %T instead", value) + } + sv.MultiRegionKeyType = types.MultiRegionKeyType(jtv) + } + + case "PrimaryKey": + if err := awsAwsjson11_deserializeDocumentMultiRegionKey(&sv.PrimaryKey, value); err != nil { + return err + } + + case "ReplicaKeys": + if err := awsAwsjson11_deserializeDocumentMultiRegionKeyList(&sv.ReplicaKeys, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentMultiRegionKey(v **types.MultiRegionKey, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.MultiRegionKey + if *v == nil { + sv = &types.MultiRegionKey{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Arn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ArnType to be of type string, got %T instead", value) + } + sv.Arn = ptr.String(jtv) + } + + case "Region": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegionType to be of type string, got %T instead", value) + } + sv.Region = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentMultiRegionKeyList(v *[]types.MultiRegionKey, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.MultiRegionKey + if *v == nil { + cv = []types.MultiRegionKey{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.MultiRegionKey + destAddr := &col + if err := awsAwsjson11_deserializeDocumentMultiRegionKey(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentNotFoundException(v **types.NotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.NotFoundException + if *v == nil { + sv = &types.NotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentPolicyNameList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PolicyNameType to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentRotationsList(v *[]types.RotationsListEntry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.RotationsListEntry + if *v == nil { + cv = []types.RotationsListEntry{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.RotationsListEntry + destAddr := &col + if err := awsAwsjson11_deserializeDocumentRotationsListEntry(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentRotationsListEntry(v **types.RotationsListEntry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RotationsListEntry + if *v == nil { + sv = &types.RotationsListEntry{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "RotationDate": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.RotationDate = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected DateType to be a JSON Number, got %T instead", value) + + } + } + + case "RotationType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RotationType to be of type string, got %T instead", value) + } + sv.RotationType = types.RotationType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentSigningAlgorithmSpecList(v *[]types.SigningAlgorithmSpec, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.SigningAlgorithmSpec + if *v == nil { + cv = []types.SigningAlgorithmSpec{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.SigningAlgorithmSpec + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SigningAlgorithmSpec to be of type string, got %T instead", value) + } + col = types.SigningAlgorithmSpec(jtv) + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentTag(v **types.Tag, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Tag + if *v == nil { + sv = &types.Tag{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "TagKey": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagKeyType to be of type string, got %T instead", value) + } + sv.TagKey = ptr.String(jtv) + } + + case "TagValue": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagValueType to be of type string, got %T instead", value) + } + sv.TagValue = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentTagException(v **types.TagException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TagException + if *v == nil { + sv = &types.TagException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentTagList(v *[]types.Tag, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Tag + if *v == nil { + cv = []types.Tag{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Tag + destAddr := &col + if err := awsAwsjson11_deserializeDocumentTag(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentUnsupportedOperationException(v **types.UnsupportedOperationException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UnsupportedOperationException + if *v == nil { + sv = &types.UnsupportedOperationException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentXksKeyAlreadyInUseException(v **types.XksKeyAlreadyInUseException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.XksKeyAlreadyInUseException + if *v == nil { + sv = &types.XksKeyAlreadyInUseException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentXksKeyConfigurationType(v **types.XksKeyConfigurationType, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.XksKeyConfigurationType + if *v == nil { + sv = &types.XksKeyConfigurationType{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Id": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected XksKeyIdType to be of type string, got %T instead", value) + } + sv.Id = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentXksKeyInvalidConfigurationException(v **types.XksKeyInvalidConfigurationException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.XksKeyInvalidConfigurationException + if *v == nil { + sv = &types.XksKeyInvalidConfigurationException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentXksKeyNotFoundException(v **types.XksKeyNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.XksKeyNotFoundException + if *v == nil { + sv = &types.XksKeyNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentXksProxyConfigurationType(v **types.XksProxyConfigurationType, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.XksProxyConfigurationType + if *v == nil { + sv = &types.XksProxyConfigurationType{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AccessKeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected XksProxyAuthenticationAccessKeyIdType to be of type string, got %T instead", value) + } + sv.AccessKeyId = ptr.String(jtv) + } + + case "Connectivity": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected XksProxyConnectivityType to be of type string, got %T instead", value) + } + sv.Connectivity = types.XksProxyConnectivityType(jtv) + } + + case "UriEndpoint": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected XksProxyUriEndpointType to be of type string, got %T instead", value) + } + sv.UriEndpoint = ptr.String(jtv) + } + + case "UriPath": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected XksProxyUriPathType to be of type string, got %T instead", value) + } + sv.UriPath = ptr.String(jtv) + } + + case "VpcEndpointServiceName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected XksProxyVpcEndpointServiceNameType to be of type string, got %T instead", value) + } + sv.VpcEndpointServiceName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentXksProxyIncorrectAuthenticationCredentialException(v **types.XksProxyIncorrectAuthenticationCredentialException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.XksProxyIncorrectAuthenticationCredentialException + if *v == nil { + sv = &types.XksProxyIncorrectAuthenticationCredentialException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentXksProxyInvalidConfigurationException(v **types.XksProxyInvalidConfigurationException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.XksProxyInvalidConfigurationException + if *v == nil { + sv = &types.XksProxyInvalidConfigurationException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentXksProxyInvalidResponseException(v **types.XksProxyInvalidResponseException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.XksProxyInvalidResponseException + if *v == nil { + sv = &types.XksProxyInvalidResponseException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentXksProxyUriEndpointInUseException(v **types.XksProxyUriEndpointInUseException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.XksProxyUriEndpointInUseException + if *v == nil { + sv = &types.XksProxyUriEndpointInUseException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentXksProxyUriInUseException(v **types.XksProxyUriInUseException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.XksProxyUriInUseException + if *v == nil { + sv = &types.XksProxyUriInUseException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentXksProxyUriUnreachableException(v **types.XksProxyUriUnreachableException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.XksProxyUriUnreachableException + if *v == nil { + sv = &types.XksProxyUriUnreachableException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentXksProxyVpcEndpointServiceInUseException(v **types.XksProxyVpcEndpointServiceInUseException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.XksProxyVpcEndpointServiceInUseException + if *v == nil { + sv = &types.XksProxyVpcEndpointServiceInUseException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentXksProxyVpcEndpointServiceInvalidConfigurationException(v **types.XksProxyVpcEndpointServiceInvalidConfigurationException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.XksProxyVpcEndpointServiceInvalidConfigurationException + if *v == nil { + sv = &types.XksProxyVpcEndpointServiceInvalidConfigurationException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentXksProxyVpcEndpointServiceNotFoundException(v **types.XksProxyVpcEndpointServiceNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.XksProxyVpcEndpointServiceNotFoundException + if *v == nil { + sv = &types.XksProxyVpcEndpointServiceNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCancelKeyDeletionOutput(v **CancelKeyDeletionOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CancelKeyDeletionOutput + if *v == nil { + sv = &CancelKeyDeletionOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentConnectCustomKeyStoreOutput(v **ConnectCustomKeyStoreOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ConnectCustomKeyStoreOutput + if *v == nil { + sv = &ConnectCustomKeyStoreOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCreateCustomKeyStoreOutput(v **CreateCustomKeyStoreOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateCustomKeyStoreOutput + if *v == nil { + sv = &CreateCustomKeyStoreOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CustomKeyStoreId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CustomKeyStoreIdType to be of type string, got %T instead", value) + } + sv.CustomKeyStoreId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCreateGrantOutput(v **CreateGrantOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateGrantOutput + if *v == nil { + sv = &CreateGrantOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "GrantId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GrantIdType to be of type string, got %T instead", value) + } + sv.GrantId = ptr.String(jtv) + } + + case "GrantToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GrantTokenType to be of type string, got %T instead", value) + } + sv.GrantToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCreateKeyOutput(v **CreateKeyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateKeyOutput + if *v == nil { + sv = &CreateKeyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyMetadata": + if err := awsAwsjson11_deserializeDocumentKeyMetadata(&sv.KeyMetadata, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDecryptOutput(v **DecryptOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DecryptOutput + if *v == nil { + sv = &DecryptOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CiphertextForRecipient": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.CiphertextForRecipient = dv + } + + case "EncryptionAlgorithm": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EncryptionAlgorithmSpec to be of type string, got %T instead", value) + } + sv.EncryptionAlgorithm = types.EncryptionAlgorithmSpec(jtv) + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "Plaintext": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PlaintextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode PlaintextType, %w", err) + } + sv.Plaintext = dv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeleteCustomKeyStoreOutput(v **DeleteCustomKeyStoreOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteCustomKeyStoreOutput + if *v == nil { + sv = &DeleteCustomKeyStoreOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeriveSharedSecretOutput(v **DeriveSharedSecretOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeriveSharedSecretOutput + if *v == nil { + sv = &DeriveSharedSecretOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CiphertextForRecipient": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.CiphertextForRecipient = dv + } + + case "KeyAgreementAlgorithm": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyAgreementAlgorithmSpec to be of type string, got %T instead", value) + } + sv.KeyAgreementAlgorithm = types.KeyAgreementAlgorithmSpec(jtv) + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "KeyOrigin": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected OriginType to be of type string, got %T instead", value) + } + sv.KeyOrigin = types.OriginType(jtv) + } + + case "SharedSecret": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PlaintextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode PlaintextType, %w", err) + } + sv.SharedSecret = dv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeCustomKeyStoresOutput(v **DescribeCustomKeyStoresOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeCustomKeyStoresOutput + if *v == nil { + sv = &DescribeCustomKeyStoresOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CustomKeyStores": + if err := awsAwsjson11_deserializeDocumentCustomKeyStoresList(&sv.CustomKeyStores, value); err != nil { + return err + } + + case "NextMarker": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MarkerType to be of type string, got %T instead", value) + } + sv.NextMarker = ptr.String(jtv) + } + + case "Truncated": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.Truncated = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeKeyOutput(v **DescribeKeyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeKeyOutput + if *v == nil { + sv = &DescribeKeyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyMetadata": + if err := awsAwsjson11_deserializeDocumentKeyMetadata(&sv.KeyMetadata, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDisconnectCustomKeyStoreOutput(v **DisconnectCustomKeyStoreOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DisconnectCustomKeyStoreOutput + if *v == nil { + sv = &DisconnectCustomKeyStoreOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentEncryptOutput(v **EncryptOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *EncryptOutput + if *v == nil { + sv = &EncryptOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CiphertextBlob": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.CiphertextBlob = dv + } + + case "EncryptionAlgorithm": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EncryptionAlgorithmSpec to be of type string, got %T instead", value) + } + sv.EncryptionAlgorithm = types.EncryptionAlgorithmSpec(jtv) + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGenerateDataKeyOutput(v **GenerateDataKeyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GenerateDataKeyOutput + if *v == nil { + sv = &GenerateDataKeyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CiphertextBlob": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.CiphertextBlob = dv + } + + case "CiphertextForRecipient": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.CiphertextForRecipient = dv + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "Plaintext": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PlaintextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode PlaintextType, %w", err) + } + sv.Plaintext = dv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGenerateDataKeyPairOutput(v **GenerateDataKeyPairOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GenerateDataKeyPairOutput + if *v == nil { + sv = &GenerateDataKeyPairOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CiphertextForRecipient": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.CiphertextForRecipient = dv + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "KeyPairSpec": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DataKeyPairSpec to be of type string, got %T instead", value) + } + sv.KeyPairSpec = types.DataKeyPairSpec(jtv) + } + + case "PrivateKeyCiphertextBlob": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.PrivateKeyCiphertextBlob = dv + } + + case "PrivateKeyPlaintext": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PlaintextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode PlaintextType, %w", err) + } + sv.PrivateKeyPlaintext = dv + } + + case "PublicKey": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PublicKeyType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode PublicKeyType, %w", err) + } + sv.PublicKey = dv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGenerateDataKeyPairWithoutPlaintextOutput(v **GenerateDataKeyPairWithoutPlaintextOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GenerateDataKeyPairWithoutPlaintextOutput + if *v == nil { + sv = &GenerateDataKeyPairWithoutPlaintextOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "KeyPairSpec": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DataKeyPairSpec to be of type string, got %T instead", value) + } + sv.KeyPairSpec = types.DataKeyPairSpec(jtv) + } + + case "PrivateKeyCiphertextBlob": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.PrivateKeyCiphertextBlob = dv + } + + case "PublicKey": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PublicKeyType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode PublicKeyType, %w", err) + } + sv.PublicKey = dv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGenerateDataKeyWithoutPlaintextOutput(v **GenerateDataKeyWithoutPlaintextOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GenerateDataKeyWithoutPlaintextOutput + if *v == nil { + sv = &GenerateDataKeyWithoutPlaintextOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CiphertextBlob": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.CiphertextBlob = dv + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGenerateMacOutput(v **GenerateMacOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GenerateMacOutput + if *v == nil { + sv = &GenerateMacOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "Mac": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.Mac = dv + } + + case "MacAlgorithm": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MacAlgorithmSpec to be of type string, got %T instead", value) + } + sv.MacAlgorithm = types.MacAlgorithmSpec(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGenerateRandomOutput(v **GenerateRandomOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GenerateRandomOutput + if *v == nil { + sv = &GenerateRandomOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CiphertextForRecipient": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.CiphertextForRecipient = dv + } + + case "Plaintext": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PlaintextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode PlaintextType, %w", err) + } + sv.Plaintext = dv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetKeyPolicyOutput(v **GetKeyPolicyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetKeyPolicyOutput + if *v == nil { + sv = &GetKeyPolicyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Policy": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PolicyType to be of type string, got %T instead", value) + } + sv.Policy = ptr.String(jtv) + } + + case "PolicyName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PolicyNameType to be of type string, got %T instead", value) + } + sv.PolicyName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetKeyRotationStatusOutput(v **GetKeyRotationStatusOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetKeyRotationStatusOutput + if *v == nil { + sv = &GetKeyRotationStatusOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "KeyRotationEnabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.KeyRotationEnabled = jtv + } + + case "NextRotationDate": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.NextRotationDate = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected DateType to be a JSON Number, got %T instead", value) + + } + } + + case "OnDemandRotationStartDate": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.OnDemandRotationStartDate = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected DateType to be a JSON Number, got %T instead", value) + + } + } + + case "RotationPeriodInDays": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected RotationPeriodInDaysType to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.RotationPeriodInDays = ptr.Int32(int32(i64)) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetParametersForImportOutput(v **GetParametersForImportOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetParametersForImportOutput + if *v == nil { + sv = &GetParametersForImportOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ImportToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.ImportToken = dv + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "ParametersValidTo": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.ParametersValidTo = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected DateType to be a JSON Number, got %T instead", value) + + } + } + + case "PublicKey": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PlaintextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode PlaintextType, %w", err) + } + sv.PublicKey = dv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetPublicKeyOutput(v **GetPublicKeyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetPublicKeyOutput + if *v == nil { + sv = &GetPublicKeyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CustomerMasterKeySpec": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CustomerMasterKeySpec to be of type string, got %T instead", value) + } + sv.CustomerMasterKeySpec = types.CustomerMasterKeySpec(jtv) + } + + case "EncryptionAlgorithms": + if err := awsAwsjson11_deserializeDocumentEncryptionAlgorithmSpecList(&sv.EncryptionAlgorithms, value); err != nil { + return err + } + + case "KeyAgreementAlgorithms": + if err := awsAwsjson11_deserializeDocumentKeyAgreementAlgorithmSpecList(&sv.KeyAgreementAlgorithms, value); err != nil { + return err + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "KeySpec": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeySpec to be of type string, got %T instead", value) + } + sv.KeySpec = types.KeySpec(jtv) + } + + case "KeyUsage": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyUsageType to be of type string, got %T instead", value) + } + sv.KeyUsage = types.KeyUsageType(jtv) + } + + case "PublicKey": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PublicKeyType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode PublicKeyType, %w", err) + } + sv.PublicKey = dv + } + + case "SigningAlgorithms": + if err := awsAwsjson11_deserializeDocumentSigningAlgorithmSpecList(&sv.SigningAlgorithms, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentImportKeyMaterialOutput(v **ImportKeyMaterialOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ImportKeyMaterialOutput + if *v == nil { + sv = &ImportKeyMaterialOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListAliasesOutput(v **ListAliasesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListAliasesOutput + if *v == nil { + sv = &ListAliasesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Aliases": + if err := awsAwsjson11_deserializeDocumentAliasList(&sv.Aliases, value); err != nil { + return err + } + + case "NextMarker": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MarkerType to be of type string, got %T instead", value) + } + sv.NextMarker = ptr.String(jtv) + } + + case "Truncated": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.Truncated = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListGrantsOutput(v **ListGrantsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListGrantsOutput + if *v == nil { + sv = &ListGrantsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Grants": + if err := awsAwsjson11_deserializeDocumentGrantList(&sv.Grants, value); err != nil { + return err + } + + case "NextMarker": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MarkerType to be of type string, got %T instead", value) + } + sv.NextMarker = ptr.String(jtv) + } + + case "Truncated": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.Truncated = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListKeyPoliciesOutput(v **ListKeyPoliciesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListKeyPoliciesOutput + if *v == nil { + sv = &ListKeyPoliciesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NextMarker": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MarkerType to be of type string, got %T instead", value) + } + sv.NextMarker = ptr.String(jtv) + } + + case "PolicyNames": + if err := awsAwsjson11_deserializeDocumentPolicyNameList(&sv.PolicyNames, value); err != nil { + return err + } + + case "Truncated": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.Truncated = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListKeyRotationsOutput(v **ListKeyRotationsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListKeyRotationsOutput + if *v == nil { + sv = &ListKeyRotationsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NextMarker": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MarkerType to be of type string, got %T instead", value) + } + sv.NextMarker = ptr.String(jtv) + } + + case "Rotations": + if err := awsAwsjson11_deserializeDocumentRotationsList(&sv.Rotations, value); err != nil { + return err + } + + case "Truncated": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.Truncated = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListKeysOutput(v **ListKeysOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListKeysOutput + if *v == nil { + sv = &ListKeysOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Keys": + if err := awsAwsjson11_deserializeDocumentKeyList(&sv.Keys, value); err != nil { + return err + } + + case "NextMarker": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MarkerType to be of type string, got %T instead", value) + } + sv.NextMarker = ptr.String(jtv) + } + + case "Truncated": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.Truncated = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListResourceTagsOutput(v **ListResourceTagsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListResourceTagsOutput + if *v == nil { + sv = &ListResourceTagsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NextMarker": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MarkerType to be of type string, got %T instead", value) + } + sv.NextMarker = ptr.String(jtv) + } + + case "Tags": + if err := awsAwsjson11_deserializeDocumentTagList(&sv.Tags, value); err != nil { + return err + } + + case "Truncated": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.Truncated = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListRetirableGrantsOutput(v **ListRetirableGrantsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListRetirableGrantsOutput + if *v == nil { + sv = &ListRetirableGrantsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Grants": + if err := awsAwsjson11_deserializeDocumentGrantList(&sv.Grants, value); err != nil { + return err + } + + case "NextMarker": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MarkerType to be of type string, got %T instead", value) + } + sv.NextMarker = ptr.String(jtv) + } + + case "Truncated": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.Truncated = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentReEncryptOutput(v **ReEncryptOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ReEncryptOutput + if *v == nil { + sv = &ReEncryptOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CiphertextBlob": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.CiphertextBlob = dv + } + + case "DestinationEncryptionAlgorithm": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EncryptionAlgorithmSpec to be of type string, got %T instead", value) + } + sv.DestinationEncryptionAlgorithm = types.EncryptionAlgorithmSpec(jtv) + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "SourceEncryptionAlgorithm": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EncryptionAlgorithmSpec to be of type string, got %T instead", value) + } + sv.SourceEncryptionAlgorithm = types.EncryptionAlgorithmSpec(jtv) + } + + case "SourceKeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.SourceKeyId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentReplicateKeyOutput(v **ReplicateKeyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ReplicateKeyOutput + if *v == nil { + sv = &ReplicateKeyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ReplicaKeyMetadata": + if err := awsAwsjson11_deserializeDocumentKeyMetadata(&sv.ReplicaKeyMetadata, value); err != nil { + return err + } + + case "ReplicaPolicy": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PolicyType to be of type string, got %T instead", value) + } + sv.ReplicaPolicy = ptr.String(jtv) + } + + case "ReplicaTags": + if err := awsAwsjson11_deserializeDocumentTagList(&sv.ReplicaTags, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentRotateKeyOnDemandOutput(v **RotateKeyOnDemandOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *RotateKeyOnDemandOutput + if *v == nil { + sv = &RotateKeyOnDemandOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentScheduleKeyDeletionOutput(v **ScheduleKeyDeletionOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ScheduleKeyDeletionOutput + if *v == nil { + sv = &ScheduleKeyDeletionOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "DeletionDate": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.DeletionDate = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected DateType to be a JSON Number, got %T instead", value) + + } + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "KeyState": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyState to be of type string, got %T instead", value) + } + sv.KeyState = types.KeyState(jtv) + } + + case "PendingWindowInDays": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected PendingWindowInDaysType to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.PendingWindowInDays = ptr.Int32(int32(i64)) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentSignOutput(v **SignOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *SignOutput + if *v == nil { + sv = &SignOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "Signature": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.Signature = dv + } + + case "SigningAlgorithm": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SigningAlgorithmSpec to be of type string, got %T instead", value) + } + sv.SigningAlgorithm = types.SigningAlgorithmSpec(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentUpdateCustomKeyStoreOutput(v **UpdateCustomKeyStoreOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateCustomKeyStoreOutput + if *v == nil { + sv = &UpdateCustomKeyStoreOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentVerifyMacOutput(v **VerifyMacOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *VerifyMacOutput + if *v == nil { + sv = &VerifyMacOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "MacAlgorithm": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MacAlgorithmSpec to be of type string, got %T instead", value) + } + sv.MacAlgorithm = types.MacAlgorithmSpec(jtv) + } + + case "MacValid": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.MacValid = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentVerifyOutput(v **VerifyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *VerifyOutput + if *v == nil { + sv = &VerifyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "SignatureValid": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.SignatureValid = jtv + } + + case "SigningAlgorithm": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SigningAlgorithmSpec to be of type string, got %T instead", value) + } + sv.SigningAlgorithm = types.SigningAlgorithmSpec(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type protocolErrorInfo struct { + Type string `json:"__type"` + Message string + Code any // nonstandard for awsjson but some services do present the type here +} + +func getProtocolErrorInfo(decoder *json.Decoder) (protocolErrorInfo, error) { + var errInfo protocolErrorInfo + if err := decoder.Decode(&errInfo); err != nil { + if err == io.EOF { + return errInfo, nil + } + return errInfo, err + } + + return errInfo, nil +} + +func resolveProtocolErrorType(headerType string, bodyInfo protocolErrorInfo) (string, bool) { + if len(headerType) != 0 { + return headerType, true + } else if len(bodyInfo.Type) != 0 { + return bodyInfo.Type, true + } else if code, ok := bodyInfo.Code.(string); ok && len(code) != 0 { + return code, true + } + return "", false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/doc.go new file mode 100644 index 000000000..f989361a1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/doc.go @@ -0,0 +1,97 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package kms provides the API client, operations, and parameter types for AWS +// Key Management Service. +// +// # Key Management Service +// +// Key Management Service (KMS) is an encryption and key management web service. +// This guide describes the KMS operations that you can call programmatically. For +// general information about KMS, see the [Key Management Service Developer Guide]. +// +// KMS has replaced the term customer master key (CMK) with KMS key and KMS key. +// The concept has not changed. To prevent breaking changes, KMS is keeping some +// variations of this term. +// +// Amazon Web Services provides SDKs that consist of libraries and sample code for +// various programming languages and platforms (Java, Ruby, .Net, macOS, Android, +// etc.). The SDKs provide a convenient way to create programmatic access to KMS +// and other Amazon Web Services services. For example, the SDKs take care of tasks +// such as signing requests (see below), managing errors, and retrying requests +// automatically. For more information about the Amazon Web Services SDKs, +// including how to download and install them, see [Tools for Amazon Web Services]. +// +// We recommend that you use the Amazon Web Services SDKs to make programmatic API +// calls to KMS. +// +// If you need to use FIPS 140-2 validated cryptographic modules when +// communicating with Amazon Web Services, use the FIPS endpoint in your preferred +// Amazon Web Services Region. For more information about the available FIPS +// endpoints, see [Service endpoints]in the Key Management Service topic of the Amazon Web Services +// General Reference. +// +// All KMS API calls must be signed and be transmitted using Transport Layer +// Security (TLS). KMS recommends you always use the latest supported TLS version. +// Clients must also support cipher suites with Perfect Forward Secrecy (PFS) such +// as Ephemeral Diffie-Hellman (DHE) or Elliptic Curve Ephemeral Diffie-Hellman +// (ECDHE). Most modern systems such as Java 7 and later support these modes. +// +// # Signing Requests +// +// Requests must be signed using an access key ID and a secret access key. We +// strongly recommend that you do not use your Amazon Web Services account root +// access key ID and secret access key for everyday work. You can use the access +// key ID and secret access key for an IAM user or you can use the Security Token +// Service (STS) to generate temporary security credentials and use those to sign +// requests. +// +// All KMS requests must be signed with [Signature Version 4]. +// +// # Logging API Requests +// +// KMS supports CloudTrail, a service that logs Amazon Web Services API calls and +// related events for your Amazon Web Services account and delivers them to an +// Amazon S3 bucket that you specify. By using the information collected by +// CloudTrail, you can determine what requests were made to KMS, who made the +// request, when it was made, and so on. To learn more about CloudTrail, including +// how to turn it on and find your log files, see the [CloudTrail User Guide]. +// +// # Additional Resources +// +// For more information about credentials and request signing, see the following: +// +// [Amazon Web Services Security Credentials] +// - - This topic provides general information about the types of credentials +// used to access Amazon Web Services. +// +// [Temporary Security Credentials] +// - - This section of the IAM User Guide describes how to create and use +// temporary security credentials. +// +// [Signature Version 4 Signing Process] +// - - This set of topics walks you through the process of signing a request +// using an access key ID and a secret access key. +// +// # Commonly Used API Operations +// +// Of the API operations discussed in this guide, the following will prove the +// most useful for most applications. You will likely perform operations other than +// these, such as creating keys and assigning policies, by using the console. +// +// # Encrypt +// +// # Decrypt +// +// # GenerateDataKey +// +// # GenerateDataKeyWithoutPlaintext +// +// [Signature Version 4]: https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html +// [Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html +// [Tools for Amazon Web Services]: http://aws.amazon.com/tools/ +// [Amazon Web Services Security Credentials]: https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html +// [Key Management Service Developer Guide]: https://docs.aws.amazon.com/kms/latest/developerguide/ +// [Service endpoints]: https://docs.aws.amazon.com/general/latest/gr/kms.html#kms_region +// [CloudTrail User Guide]: https://docs.aws.amazon.com/awscloudtrail/latest/userguide/ +// [Signature Version 4 Signing Process]: https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html +package kms diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/endpoints.go new file mode 100644 index 000000000..0d9f88d68 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/endpoints.go @@ -0,0 +1,537 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + "github.com/aws/aws-sdk-go-v2/internal/endpoints" + "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints" + smithyauth "github.com/aws/smithy-go/auth" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" + "net/url" + "os" + "strings" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} + +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) + +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} + for _, fn := range optFns { + fn(&e) + } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { + e.SigningRegion = region + } + return e, nil + }, + ) +} + +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + eo := m.Options + eo.Logger = middleware.GetLogger(ctx) + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) + if err != nil { + nf := (&aws.EndpointNotFoundError{}) + if errors.As(err, &nf) { + ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false) + return next.HandleSerialize(ctx, in) + } + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "kms" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} + +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolverWithOptions +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return w.awsResolver.ResolveEndpoint(ServiceID, region, options) +} + +type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) + +func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { + return a(service, region) +} + +var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) + +// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver. +// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error, +// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked +// via its middleware. +// +// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated. +func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver { + var resolver aws.EndpointResolverWithOptions + + if awsResolverWithOptions != nil { + resolver = awsResolverWithOptions + } else if awsResolver != nil { + resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) + } + + return &wrappedEndpointResolver{ + awsResolver: resolver, + } +} + +func finalizeClientEndpointResolverOptions(options *Options) { + options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() + + if len(options.EndpointOptions.ResolvedRegion) == 0 { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(options.Region, fipsInfix) || + strings.Contains(options.Region, fipsPrefix) || + strings.Contains(options.Region, fipsSuffix) { + options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled + } + } + +} + +func resolveEndpointResolverV2(options *Options) { + if options.EndpointResolverV2 == nil { + options.EndpointResolverV2 = NewDefaultEndpointResolverV2() + } +} + +func resolveBaseEndpoint(cfg aws.Config, o *Options) { + if cfg.BaseEndpoint != nil { + o.BaseEndpoint = cfg.BaseEndpoint + } + + _, g := os.LookupEnv("AWS_ENDPOINT_URL") + _, s := os.LookupEnv("AWS_ENDPOINT_URL_KMS") + + if g && !s { + return + } + + value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "KMS", cfg.ConfigSources) + if found && err == nil { + o.BaseEndpoint = &value + } +} + +func bindRegion(region string) *string { + if region == "" { + return nil + } + return aws.String(endpoints.MapFIPSRegion(region)) +} + +// EndpointParameters provides the parameters that influence how endpoints are +// resolved. +type EndpointParameters struct { + // The AWS region used to dispatch the request. + // + // Parameter is + // required. + // + // AWS::Region + Region *string + + // When true, use the dual-stack endpoint. If the configured endpoint does not + // support dual-stack, dispatching the request MAY return an error. + // + // Defaults to + // false if no value is provided. + // + // AWS::UseDualStack + UseDualStack *bool + + // When true, send this request to the FIPS-compliant regional endpoint. If the + // configured endpoint does not have a FIPS compliant endpoint, dispatching the + // request will return an error. + // + // Defaults to false if no value is + // provided. + // + // AWS::UseFIPS + UseFIPS *bool + + // Override the endpoint used to send this request + // + // Parameter is + // required. + // + // SDK::Endpoint + Endpoint *string +} + +// ValidateRequired validates required parameters are set. +func (p EndpointParameters) ValidateRequired() error { + if p.UseDualStack == nil { + return fmt.Errorf("parameter UseDualStack is required") + } + + if p.UseFIPS == nil { + return fmt.Errorf("parameter UseFIPS is required") + } + + return nil +} + +// WithDefaults returns a shallow copy of EndpointParameterswith default values +// applied to members where applicable. +func (p EndpointParameters) WithDefaults() EndpointParameters { + if p.UseDualStack == nil { + p.UseDualStack = ptr.Bool(false) + } + + if p.UseFIPS == nil { + p.UseFIPS = ptr.Bool(false) + } + return p +} + +type stringSlice []string + +func (s stringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} + +// EndpointResolverV2 provides the interface for resolving service endpoints. +type EndpointResolverV2 interface { + // ResolveEndpoint attempts to resolve the endpoint with the provided options, + // returning the endpoint if found. Otherwise an error is returned. + ResolveEndpoint(ctx context.Context, params EndpointParameters) ( + smithyendpoints.Endpoint, error, + ) +} + +// resolver provides the implementation for resolving endpoints. +type resolver struct{} + +func NewDefaultEndpointResolverV2() EndpointResolverV2 { + return &resolver{} +} + +// ResolveEndpoint attempts to resolve the endpoint with the provided options, +// returning the endpoint if found. Otherwise an error is returned. +func (r *resolver) ResolveEndpoint( + ctx context.Context, params EndpointParameters, +) ( + endpoint smithyendpoints.Endpoint, err error, +) { + params = params.WithDefaults() + if err = params.ValidateRequired(); err != nil { + return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) + } + _UseDualStack := *params.UseDualStack + _UseFIPS := *params.UseFIPS + + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if _UseFIPS == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported") + } + if _UseDualStack == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported") + } + uriString := _Endpoint + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + if exprVal := params.Region; exprVal != nil { + _Region := *exprVal + _ = _Region + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _PartitionResult := *exprVal + _ = _PartitionResult + if _UseFIPS == true { + if _UseDualStack == true { + if true == _PartitionResult.SupportsFIPS { + if true == _PartitionResult.SupportsDualStack { + uriString := func() string { + var out strings.Builder + out.WriteString("https://kms-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both") + } + } + if _UseFIPS == true { + if _PartitionResult.SupportsFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://kms-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS") + } + if _UseDualStack == true { + if true == _PartitionResult.SupportsDualStack { + uriString := func() string { + var out strings.Builder + out.WriteString("https://kms.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack") + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://kms.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region") +} + +type endpointParamsBinder interface { + bindEndpointParams(*EndpointParameters) +} + +func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { + params := &EndpointParameters{} + + params.Region = bindRegion(options.Region) + params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) + params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) + params.Endpoint = options.BaseEndpoint + + if b, ok := input.(endpointParamsBinder); ok { + b.bindEndpointParams(params) + } + + return params +} + +type resolveEndpointV2Middleware struct { + options Options +} + +func (*resolveEndpointV2Middleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "ResolveEndpoint") + defer span.End() + + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleFinalize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.options.EndpointResolverV2 == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) + endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration", + func() (smithyendpoints.Endpoint, error) { + return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + }) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + span.SetProperty("client.call.resolved_endpoint", endpt.URI.String()) + + if endpt.URI.RawPath == "" && req.URL.RawPath != "" { + endpt.URI.RawPath = endpt.URI.Path + } + req.URL.Scheme = endpt.URI.Scheme + req.URL.Host = endpt.URI.Host + req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path) + req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath) + for k := range endpt.Headers { + req.Header.Set(k, endpt.Headers.Get(k)) + } + + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + opts, _ := smithyauth.GetAuthOptions(&endpt.Properties) + for _, o := range opts { + rscheme.SignerProperties.SetAll(&o.SignerProperties) + } + + span.End() + return next.HandleFinalize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/generated.json new file mode 100644 index 000000000..301d9467a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/generated.json @@ -0,0 +1,85 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", + "github.com/aws/smithy-go": "v1.4.0" + }, + "files": [ + "api_client.go", + "api_client_test.go", + "api_op_CancelKeyDeletion.go", + "api_op_ConnectCustomKeyStore.go", + "api_op_CreateAlias.go", + "api_op_CreateCustomKeyStore.go", + "api_op_CreateGrant.go", + "api_op_CreateKey.go", + "api_op_Decrypt.go", + "api_op_DeleteAlias.go", + "api_op_DeleteCustomKeyStore.go", + "api_op_DeleteImportedKeyMaterial.go", + "api_op_DeriveSharedSecret.go", + "api_op_DescribeCustomKeyStores.go", + "api_op_DescribeKey.go", + "api_op_DisableKey.go", + "api_op_DisableKeyRotation.go", + "api_op_DisconnectCustomKeyStore.go", + "api_op_EnableKey.go", + "api_op_EnableKeyRotation.go", + "api_op_Encrypt.go", + "api_op_GenerateDataKey.go", + "api_op_GenerateDataKeyPair.go", + "api_op_GenerateDataKeyPairWithoutPlaintext.go", + "api_op_GenerateDataKeyWithoutPlaintext.go", + "api_op_GenerateMac.go", + "api_op_GenerateRandom.go", + "api_op_GetKeyPolicy.go", + "api_op_GetKeyRotationStatus.go", + "api_op_GetParametersForImport.go", + "api_op_GetPublicKey.go", + "api_op_ImportKeyMaterial.go", + "api_op_ListAliases.go", + "api_op_ListGrants.go", + "api_op_ListKeyPolicies.go", + "api_op_ListKeyRotations.go", + "api_op_ListKeys.go", + "api_op_ListResourceTags.go", + "api_op_ListRetirableGrants.go", + "api_op_PutKeyPolicy.go", + "api_op_ReEncrypt.go", + "api_op_ReplicateKey.go", + "api_op_RetireGrant.go", + "api_op_RevokeGrant.go", + "api_op_RotateKeyOnDemand.go", + "api_op_ScheduleKeyDeletion.go", + "api_op_Sign.go", + "api_op_TagResource.go", + "api_op_UntagResource.go", + "api_op_UpdateAlias.go", + "api_op_UpdateCustomKeyStore.go", + "api_op_UpdateKeyDescription.go", + "api_op_UpdatePrimaryRegion.go", + "api_op_Verify.go", + "api_op_VerifyMac.go", + "auth.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "endpoints_config_test.go", + "endpoints_test.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "options.go", + "protocol_test.go", + "serializers.go", + "snapshot_test.go", + "types/enums.go", + "types/errors.go", + "types/types.go", + "validators.go" + ], + "go": "1.15", + "module": "github.com/aws/aws-sdk-go-v2/service/kms", + "unstable": false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go new file mode 100644 index 000000000..7ae7d409d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package kms + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.36.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints/endpoints.go new file mode 100644 index 000000000..0c6bbd587 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints/endpoints.go @@ -0,0 +1,978 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" + "github.com/aws/smithy-go/logging" + "regexp" +) + +// Options is the endpoint resolver configuration options +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the + // provided logger. + LogDeprecated bool + + // ResolvedRegion is used to override the region to be resolved, rather then the + // using the value passed to the ResolveEndpoint method. This value is used by the + // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative + // name. You must not set this value directly in your application. + ResolvedRegion string + + // DisableHTTPS informs the resolver to return an endpoint that does not use the + // HTTPS scheme. + DisableHTTPS bool + + // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. + UseDualStackEndpoint aws.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint aws.FIPSEndpointState +} + +func (o Options) GetResolvedRegion() string { + return o.ResolvedRegion +} + +func (o Options) GetDisableHTTPS() bool { + return o.DisableHTTPS +} + +func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { + return o.UseDualStackEndpoint +} + +func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { + return o.UseFIPSEndpoint +} + +func transformToSharedOptions(options Options) endpoints.Options { + return endpoints.Options{ + Logger: options.Logger, + LogDeprecated: options.LogDeprecated, + ResolvedRegion: options.ResolvedRegion, + DisableHTTPS: options.DisableHTTPS, + UseDualStackEndpoint: options.UseDualStackEndpoint, + UseFIPSEndpoint: options.UseFIPSEndpoint, + } +} + +// Resolver KMS endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { + return endpoint, &aws.MissingRegionError{} + } + + opt := transformToSharedOptions(options) + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} + +var partitionRegexp = struct { + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsIsoE *regexp.Regexp + AwsIsoF *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), + AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "kms.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "kms-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "kms.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "ProdFips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.eu-central-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-central-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "af-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "af-south-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.af-south-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "af-south-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.af-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "af-south-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ap-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ap-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ap-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ap-northeast-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-northeast-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ap-northeast-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ap-northeast-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-northeast-2-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ap-northeast-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ap-northeast-3.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-northeast-3-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ap-northeast-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-3", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ap-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-south-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ap-south-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-south-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ap-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-south-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ap-south-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-south-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ap-south-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-south-2-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ap-south-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-south-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ap-southeast-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-southeast-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ap-southeast-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ap-southeast-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-southeast-2-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ap-southeast-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ap-southeast-3.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-southeast-3-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ap-southeast-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-3", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-4", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-4", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ap-southeast-4.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-southeast-4-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ap-southeast-4.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-4", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-5", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-5", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ap-southeast-5.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-southeast-5-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ap-southeast-5.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-5", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ca-central-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ca-central-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ca-central-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ca-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-central-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ca-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ca-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ca-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ca-west-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ca-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "eu-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-central-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.eu-central-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-central-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.eu-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-central-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "eu-central-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-central-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.eu-central-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-central-2-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.eu-central-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-central-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "eu-north-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-north-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.eu-north-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-north-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.eu-north-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-north-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "eu-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-south-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.eu-south-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-south-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.eu-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-south-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "eu-south-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-south-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.eu-south-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-south-2-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.eu-south-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-south-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "eu-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.eu-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-west-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.eu-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "eu-west-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.eu-west-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-west-2-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.eu-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "eu-west-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-3", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.eu-west-3.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-west-3-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.eu-west-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-3", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "il-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "il-central-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.il-central-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "il-central-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.il-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "il-central-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "me-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "me-central-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.me-central-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "me-central-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.me-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "me-central-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "me-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "me-south-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.me-south-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "me-south-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.me-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "me-south-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "sa-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "sa-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.sa-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "sa-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.sa-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "sa-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.us-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-east-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.us-east-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-east-2-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.us-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-west-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-west-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.us-west-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-west-2-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + Deprecated: aws.TrueTernary, + }, + }, + }, + { + ID: "aws-cn", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "kms.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "kms-fips.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "kms.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "cn-north-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "cn-northwest-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-iso", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "kms.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "ProdFips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: endpoints.CredentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-iso-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-iso-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + }, + endpoints.EndpointKey{ + Region: "us-iso-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: endpoints.CredentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-iso-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-iso-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.us-iso-west-1.c2s.ic.gov", + }, + endpoints.EndpointKey{ + Region: "us-iso-west-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: endpoints.CredentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: aws.TrueTernary, + }, + }, + }, + { + ID: "aws-iso-b", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "kms.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "ProdFips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: endpoints.CredentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-isob-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-isob-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + }, + endpoints.EndpointKey{ + Region: "us-isob-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: endpoints.CredentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: aws.TrueTernary, + }, + }, + }, + { + ID: "aws-iso-e", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "kms.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoE, + IsRegionalized: true, + }, + { + ID: "aws-iso-f", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "kms.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoF, + IsRegionalized: true, + }, + { + ID: "aws-us-gov", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "kms.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "kms-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "kms.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "ProdFips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.us-gov-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-gov-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: aws.TrueTernary, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/options.go new file mode 100644 index 000000000..e7f5513b4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/options.go @@ -0,0 +1,232 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" + smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" +) + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // The optional application specific identifier appended to the User-Agent header. + AppID string + + // This endpoint will be given as input to an EndpointResolverV2. It is used for + // providing a custom base endpoint that is subject to modifications by the + // processing EndpointResolverV2. + BaseEndpoint *string + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + // + // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a + // value for this field will likely prevent you from using any endpoint-related + // service features released after the introduction of EndpointResolverV2 and + // BaseEndpoint. + // + // To migrate an EndpointResolver implementation that uses a custom endpoint, set + // the client option BaseEndpoint instead. + EndpointResolver EndpointResolver + + // Resolves the endpoint used for a particular service operation. This should be + // used over the deprecated EndpointResolver. + EndpointResolverV2 EndpointResolverV2 + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The client meter provider. + MeterProvider metrics.MeterProvider + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. + // + // If specified in an operation call's functional options with a value that is + // different than the constructed client's Options, the Client's Retryer will be + // wrapped to use the operation's specific RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. + // + // When creating a new API Clients this member will only be used if the Retryer + // Options member is nil. This value will be ignored if Retryer is not nil. + // + // Currently does not support per operation call overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // The client tracer provider. + TracerProvider tracing.TracerProvider + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. + // + // Currently does not support per operation call overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient + + // The auth scheme resolver which determines how to authenticate for each + // operation. + AuthSchemeResolver AuthSchemeResolver + + // The list of auth schemes supported by the client. + AuthSchemes []smithyhttp.AuthScheme +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} + +func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver { + if schemeID == "aws.auth#sigv4" { + return getSigV4IdentityResolver(o) + } + if schemeID == "smithy.api#noAuth" { + return &smithyauth.AnonymousIdentityResolver{} + } + return nil +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for +// this field will likely prevent you from using any endpoint-related service +// features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// +// To migrate an EndpointResolver implementation that uses a custom endpoint, set +// the client option BaseEndpoint instead. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +// WithEndpointResolverV2 returns a functional option for setting the Client's +// EndpointResolverV2 option. +func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { + return func(o *Options) { + o.EndpointResolverV2 = v + } +} + +func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver { + if o.Credentials != nil { + return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials} + } + return nil +} + +// WithSigV4SigningName applies an override to the authentication workflow to +// use the given signing name for SigV4-authenticated operations. +// +// This is an advanced setting. The value here is FINAL, taking precedence over +// the resolved signing name from both auth scheme resolution and endpoint +// resolution. +func WithSigV4SigningName(name string) func(*Options) { + fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, + ) { + return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in) + } + return func(o *Options) { + o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { + return s.Initialize.Add( + middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn), + middleware.Before, + ) + }) + } +} + +// WithSigV4SigningRegion applies an override to the authentication workflow to +// use the given signing region for SigV4-authenticated operations. +// +// This is an advanced setting. The value here is FINAL, taking precedence over +// the resolved signing region from both auth scheme resolution and endpoint +// resolution. +func WithSigV4SigningRegion(region string) func(*Options) { + fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, + ) { + return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in) + } + return func(o *Options) { + o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { + return s.Initialize.Add( + middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn), + middleware.Before, + ) + }) + } +} + +func ignoreAnonymousAuth(options *Options) { + if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) { + options.Credentials = nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/serializers.go new file mode 100644 index 000000000..1f9dabb9c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/serializers.go @@ -0,0 +1,4827 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "bytes" + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + smithyjson "github.com/aws/smithy-go/encoding/json" + "github.com/aws/smithy-go/middleware" + smithytime "github.com/aws/smithy-go/time" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" + "path" +) + +type awsAwsjson11_serializeOpCancelKeyDeletion struct { +} + +func (*awsAwsjson11_serializeOpCancelKeyDeletion) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCancelKeyDeletion) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CancelKeyDeletionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.CancelKeyDeletion") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCancelKeyDeletionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpConnectCustomKeyStore struct { +} + +func (*awsAwsjson11_serializeOpConnectCustomKeyStore) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpConnectCustomKeyStore) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ConnectCustomKeyStoreInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ConnectCustomKeyStore") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentConnectCustomKeyStoreInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCreateAlias struct { +} + +func (*awsAwsjson11_serializeOpCreateAlias) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreateAlias) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateAliasInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.CreateAlias") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreateAliasInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCreateCustomKeyStore struct { +} + +func (*awsAwsjson11_serializeOpCreateCustomKeyStore) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreateCustomKeyStore) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateCustomKeyStoreInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.CreateCustomKeyStore") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreateCustomKeyStoreInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCreateGrant struct { +} + +func (*awsAwsjson11_serializeOpCreateGrant) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreateGrant) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateGrantInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.CreateGrant") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreateGrantInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCreateKey struct { +} + +func (*awsAwsjson11_serializeOpCreateKey) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreateKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateKeyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.CreateKey") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreateKeyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDecrypt struct { +} + +func (*awsAwsjson11_serializeOpDecrypt) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDecrypt) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DecryptInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.Decrypt") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDecryptInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDeleteAlias struct { +} + +func (*awsAwsjson11_serializeOpDeleteAlias) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteAlias) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteAliasInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.DeleteAlias") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteAliasInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDeleteCustomKeyStore struct { +} + +func (*awsAwsjson11_serializeOpDeleteCustomKeyStore) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteCustomKeyStore) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteCustomKeyStoreInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.DeleteCustomKeyStore") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteCustomKeyStoreInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDeleteImportedKeyMaterial struct { +} + +func (*awsAwsjson11_serializeOpDeleteImportedKeyMaterial) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteImportedKeyMaterial) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteImportedKeyMaterialInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.DeleteImportedKeyMaterial") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteImportedKeyMaterialInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDeriveSharedSecret struct { +} + +func (*awsAwsjson11_serializeOpDeriveSharedSecret) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeriveSharedSecret) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeriveSharedSecretInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.DeriveSharedSecret") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeriveSharedSecretInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDescribeCustomKeyStores struct { +} + +func (*awsAwsjson11_serializeOpDescribeCustomKeyStores) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeCustomKeyStores) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeCustomKeyStoresInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.DescribeCustomKeyStores") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeCustomKeyStoresInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDescribeKey struct { +} + +func (*awsAwsjson11_serializeOpDescribeKey) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeKeyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.DescribeKey") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeKeyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDisableKey struct { +} + +func (*awsAwsjson11_serializeOpDisableKey) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDisableKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DisableKeyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.DisableKey") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDisableKeyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDisableKeyRotation struct { +} + +func (*awsAwsjson11_serializeOpDisableKeyRotation) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDisableKeyRotation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DisableKeyRotationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.DisableKeyRotation") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDisableKeyRotationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDisconnectCustomKeyStore struct { +} + +func (*awsAwsjson11_serializeOpDisconnectCustomKeyStore) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDisconnectCustomKeyStore) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DisconnectCustomKeyStoreInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.DisconnectCustomKeyStore") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDisconnectCustomKeyStoreInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpEnableKey struct { +} + +func (*awsAwsjson11_serializeOpEnableKey) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpEnableKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*EnableKeyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.EnableKey") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentEnableKeyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpEnableKeyRotation struct { +} + +func (*awsAwsjson11_serializeOpEnableKeyRotation) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpEnableKeyRotation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*EnableKeyRotationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.EnableKeyRotation") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentEnableKeyRotationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpEncrypt struct { +} + +func (*awsAwsjson11_serializeOpEncrypt) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpEncrypt) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*EncryptInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.Encrypt") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentEncryptInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGenerateDataKey struct { +} + +func (*awsAwsjson11_serializeOpGenerateDataKey) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGenerateDataKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GenerateDataKeyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.GenerateDataKey") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGenerateDataKeyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGenerateDataKeyPair struct { +} + +func (*awsAwsjson11_serializeOpGenerateDataKeyPair) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGenerateDataKeyPair) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GenerateDataKeyPairInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.GenerateDataKeyPair") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGenerateDataKeyPairInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGenerateDataKeyPairWithoutPlaintext struct { +} + +func (*awsAwsjson11_serializeOpGenerateDataKeyPairWithoutPlaintext) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGenerateDataKeyPairWithoutPlaintext) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GenerateDataKeyPairWithoutPlaintextInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.GenerateDataKeyPairWithoutPlaintext") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGenerateDataKeyPairWithoutPlaintextInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGenerateDataKeyWithoutPlaintext struct { +} + +func (*awsAwsjson11_serializeOpGenerateDataKeyWithoutPlaintext) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGenerateDataKeyWithoutPlaintext) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GenerateDataKeyWithoutPlaintextInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.GenerateDataKeyWithoutPlaintext") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGenerateDataKeyWithoutPlaintextInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGenerateMac struct { +} + +func (*awsAwsjson11_serializeOpGenerateMac) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGenerateMac) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GenerateMacInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.GenerateMac") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGenerateMacInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGenerateRandom struct { +} + +func (*awsAwsjson11_serializeOpGenerateRandom) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGenerateRandom) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GenerateRandomInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.GenerateRandom") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGenerateRandomInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetKeyPolicy struct { +} + +func (*awsAwsjson11_serializeOpGetKeyPolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetKeyPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetKeyPolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.GetKeyPolicy") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetKeyPolicyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetKeyRotationStatus struct { +} + +func (*awsAwsjson11_serializeOpGetKeyRotationStatus) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetKeyRotationStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetKeyRotationStatusInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.GetKeyRotationStatus") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetKeyRotationStatusInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetParametersForImport struct { +} + +func (*awsAwsjson11_serializeOpGetParametersForImport) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetParametersForImport) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetParametersForImportInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.GetParametersForImport") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetParametersForImportInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetPublicKey struct { +} + +func (*awsAwsjson11_serializeOpGetPublicKey) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetPublicKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetPublicKeyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.GetPublicKey") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetPublicKeyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpImportKeyMaterial struct { +} + +func (*awsAwsjson11_serializeOpImportKeyMaterial) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpImportKeyMaterial) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ImportKeyMaterialInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ImportKeyMaterial") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentImportKeyMaterialInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListAliases struct { +} + +func (*awsAwsjson11_serializeOpListAliases) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListAliases) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListAliasesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ListAliases") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListAliasesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListGrants struct { +} + +func (*awsAwsjson11_serializeOpListGrants) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListGrants) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListGrantsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ListGrants") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListGrantsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListKeyPolicies struct { +} + +func (*awsAwsjson11_serializeOpListKeyPolicies) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListKeyPolicies) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListKeyPoliciesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ListKeyPolicies") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListKeyPoliciesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListKeyRotations struct { +} + +func (*awsAwsjson11_serializeOpListKeyRotations) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListKeyRotations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListKeyRotationsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ListKeyRotations") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListKeyRotationsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListKeys struct { +} + +func (*awsAwsjson11_serializeOpListKeys) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListKeys) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListKeysInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ListKeys") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListKeysInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListResourceTags struct { +} + +func (*awsAwsjson11_serializeOpListResourceTags) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListResourceTags) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListResourceTagsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ListResourceTags") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListResourceTagsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListRetirableGrants struct { +} + +func (*awsAwsjson11_serializeOpListRetirableGrants) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListRetirableGrants) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListRetirableGrantsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ListRetirableGrants") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListRetirableGrantsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpPutKeyPolicy struct { +} + +func (*awsAwsjson11_serializeOpPutKeyPolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpPutKeyPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutKeyPolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.PutKeyPolicy") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentPutKeyPolicyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpReEncrypt struct { +} + +func (*awsAwsjson11_serializeOpReEncrypt) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpReEncrypt) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ReEncryptInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ReEncrypt") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentReEncryptInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpReplicateKey struct { +} + +func (*awsAwsjson11_serializeOpReplicateKey) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpReplicateKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ReplicateKeyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ReplicateKey") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentReplicateKeyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpRetireGrant struct { +} + +func (*awsAwsjson11_serializeOpRetireGrant) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpRetireGrant) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*RetireGrantInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.RetireGrant") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentRetireGrantInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpRevokeGrant struct { +} + +func (*awsAwsjson11_serializeOpRevokeGrant) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpRevokeGrant) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*RevokeGrantInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.RevokeGrant") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentRevokeGrantInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpRotateKeyOnDemand struct { +} + +func (*awsAwsjson11_serializeOpRotateKeyOnDemand) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpRotateKeyOnDemand) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*RotateKeyOnDemandInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.RotateKeyOnDemand") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentRotateKeyOnDemandInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpScheduleKeyDeletion struct { +} + +func (*awsAwsjson11_serializeOpScheduleKeyDeletion) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpScheduleKeyDeletion) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ScheduleKeyDeletionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ScheduleKeyDeletion") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentScheduleKeyDeletionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpSign struct { +} + +func (*awsAwsjson11_serializeOpSign) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpSign) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*SignInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.Sign") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentSignInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpTagResource struct { +} + +func (*awsAwsjson11_serializeOpTagResource) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpTagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*TagResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.TagResource") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentTagResourceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUntagResource struct { +} + +func (*awsAwsjson11_serializeOpUntagResource) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUntagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UntagResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.UntagResource") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUntagResourceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUpdateAlias struct { +} + +func (*awsAwsjson11_serializeOpUpdateAlias) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdateAlias) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateAliasInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.UpdateAlias") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdateAliasInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUpdateCustomKeyStore struct { +} + +func (*awsAwsjson11_serializeOpUpdateCustomKeyStore) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdateCustomKeyStore) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateCustomKeyStoreInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.UpdateCustomKeyStore") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdateCustomKeyStoreInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUpdateKeyDescription struct { +} + +func (*awsAwsjson11_serializeOpUpdateKeyDescription) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdateKeyDescription) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateKeyDescriptionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.UpdateKeyDescription") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdateKeyDescriptionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUpdatePrimaryRegion struct { +} + +func (*awsAwsjson11_serializeOpUpdatePrimaryRegion) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdatePrimaryRegion) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdatePrimaryRegionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.UpdatePrimaryRegion") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdatePrimaryRegionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpVerify struct { +} + +func (*awsAwsjson11_serializeOpVerify) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpVerify) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*VerifyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.Verify") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentVerifyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpVerifyMac struct { +} + +func (*awsAwsjson11_serializeOpVerifyMac) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpVerifyMac) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*VerifyMacInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.VerifyMac") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentVerifyMacInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsAwsjson11_serializeDocumentEncryptionContextType(v map[string]string, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + for key := range v { + om := object.Key(key) + om.String(v[key]) + } + return nil +} + +func awsAwsjson11_serializeDocumentGrantConstraints(v *types.GrantConstraints, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.EncryptionContextEquals != nil { + ok := object.Key("EncryptionContextEquals") + if err := awsAwsjson11_serializeDocumentEncryptionContextType(v.EncryptionContextEquals, ok); err != nil { + return err + } + } + + if v.EncryptionContextSubset != nil { + ok := object.Key("EncryptionContextSubset") + if err := awsAwsjson11_serializeDocumentEncryptionContextType(v.EncryptionContextSubset, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentGrantOperationList(v []types.GrantOperation, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(string(v[i])) + } + return nil +} + +func awsAwsjson11_serializeDocumentGrantTokenList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentRecipientInfo(v *types.RecipientInfo, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AttestationDocument != nil { + ok := object.Key("AttestationDocument") + ok.Base64EncodeBytes(v.AttestationDocument) + } + + if len(v.KeyEncryptionAlgorithm) > 0 { + ok := object.Key("KeyEncryptionAlgorithm") + ok.String(string(v.KeyEncryptionAlgorithm)) + } + + return nil +} + +func awsAwsjson11_serializeDocumentTag(v *types.Tag, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.TagKey != nil { + ok := object.Key("TagKey") + ok.String(*v.TagKey) + } + + if v.TagValue != nil { + ok := object.Key("TagValue") + ok.String(*v.TagValue) + } + + return nil +} + +func awsAwsjson11_serializeDocumentTagKeyList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentTagList(v []types.Tag, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentTag(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentXksProxyAuthenticationCredentialType(v *types.XksProxyAuthenticationCredentialType, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AccessKeyId != nil { + ok := object.Key("AccessKeyId") + ok.String(*v.AccessKeyId) + } + + if v.RawSecretAccessKey != nil { + ok := object.Key("RawSecretAccessKey") + ok.String(*v.RawSecretAccessKey) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCancelKeyDeletionInput(v *CancelKeyDeletionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentConnectCustomKeyStoreInput(v *ConnectCustomKeyStoreInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CustomKeyStoreId != nil { + ok := object.Key("CustomKeyStoreId") + ok.String(*v.CustomKeyStoreId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCreateAliasInput(v *CreateAliasInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AliasName != nil { + ok := object.Key("AliasName") + ok.String(*v.AliasName) + } + + if v.TargetKeyId != nil { + ok := object.Key("TargetKeyId") + ok.String(*v.TargetKeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCreateCustomKeyStoreInput(v *CreateCustomKeyStoreInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CloudHsmClusterId != nil { + ok := object.Key("CloudHsmClusterId") + ok.String(*v.CloudHsmClusterId) + } + + if v.CustomKeyStoreName != nil { + ok := object.Key("CustomKeyStoreName") + ok.String(*v.CustomKeyStoreName) + } + + if len(v.CustomKeyStoreType) > 0 { + ok := object.Key("CustomKeyStoreType") + ok.String(string(v.CustomKeyStoreType)) + } + + if v.KeyStorePassword != nil { + ok := object.Key("KeyStorePassword") + ok.String(*v.KeyStorePassword) + } + + if v.TrustAnchorCertificate != nil { + ok := object.Key("TrustAnchorCertificate") + ok.String(*v.TrustAnchorCertificate) + } + + if v.XksProxyAuthenticationCredential != nil { + ok := object.Key("XksProxyAuthenticationCredential") + if err := awsAwsjson11_serializeDocumentXksProxyAuthenticationCredentialType(v.XksProxyAuthenticationCredential, ok); err != nil { + return err + } + } + + if len(v.XksProxyConnectivity) > 0 { + ok := object.Key("XksProxyConnectivity") + ok.String(string(v.XksProxyConnectivity)) + } + + if v.XksProxyUriEndpoint != nil { + ok := object.Key("XksProxyUriEndpoint") + ok.String(*v.XksProxyUriEndpoint) + } + + if v.XksProxyUriPath != nil { + ok := object.Key("XksProxyUriPath") + ok.String(*v.XksProxyUriPath) + } + + if v.XksProxyVpcEndpointServiceName != nil { + ok := object.Key("XksProxyVpcEndpointServiceName") + ok.String(*v.XksProxyVpcEndpointServiceName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCreateGrantInput(v *CreateGrantInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Constraints != nil { + ok := object.Key("Constraints") + if err := awsAwsjson11_serializeDocumentGrantConstraints(v.Constraints, ok); err != nil { + return err + } + } + + if v.DryRun != nil { + ok := object.Key("DryRun") + ok.Boolean(*v.DryRun) + } + + if v.GranteePrincipal != nil { + ok := object.Key("GranteePrincipal") + ok.String(*v.GranteePrincipal) + } + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Name != nil { + ok := object.Key("Name") + ok.String(*v.Name) + } + + if v.Operations != nil { + ok := object.Key("Operations") + if err := awsAwsjson11_serializeDocumentGrantOperationList(v.Operations, ok); err != nil { + return err + } + } + + if v.RetiringPrincipal != nil { + ok := object.Key("RetiringPrincipal") + ok.String(*v.RetiringPrincipal) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCreateKeyInput(v *CreateKeyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.BypassPolicyLockoutSafetyCheck { + ok := object.Key("BypassPolicyLockoutSafetyCheck") + ok.Boolean(v.BypassPolicyLockoutSafetyCheck) + } + + if len(v.CustomerMasterKeySpec) > 0 { + ok := object.Key("CustomerMasterKeySpec") + ok.String(string(v.CustomerMasterKeySpec)) + } + + if v.CustomKeyStoreId != nil { + ok := object.Key("CustomKeyStoreId") + ok.String(*v.CustomKeyStoreId) + } + + if v.Description != nil { + ok := object.Key("Description") + ok.String(*v.Description) + } + + if len(v.KeySpec) > 0 { + ok := object.Key("KeySpec") + ok.String(string(v.KeySpec)) + } + + if len(v.KeyUsage) > 0 { + ok := object.Key("KeyUsage") + ok.String(string(v.KeyUsage)) + } + + if v.MultiRegion != nil { + ok := object.Key("MultiRegion") + ok.Boolean(*v.MultiRegion) + } + + if len(v.Origin) > 0 { + ok := object.Key("Origin") + ok.String(string(v.Origin)) + } + + if v.Policy != nil { + ok := object.Key("Policy") + ok.String(*v.Policy) + } + + if v.Tags != nil { + ok := object.Key("Tags") + if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + + if v.XksKeyId != nil { + ok := object.Key("XksKeyId") + ok.String(*v.XksKeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDecryptInput(v *DecryptInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CiphertextBlob != nil { + ok := object.Key("CiphertextBlob") + ok.Base64EncodeBytes(v.CiphertextBlob) + } + + if v.DryRun != nil { + ok := object.Key("DryRun") + ok.Boolean(*v.DryRun) + } + + if len(v.EncryptionAlgorithm) > 0 { + ok := object.Key("EncryptionAlgorithm") + ok.String(string(v.EncryptionAlgorithm)) + } + + if v.EncryptionContext != nil { + ok := object.Key("EncryptionContext") + if err := awsAwsjson11_serializeDocumentEncryptionContextType(v.EncryptionContext, ok); err != nil { + return err + } + } + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Recipient != nil { + ok := object.Key("Recipient") + if err := awsAwsjson11_serializeDocumentRecipientInfo(v.Recipient, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteAliasInput(v *DeleteAliasInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AliasName != nil { + ok := object.Key("AliasName") + ok.String(*v.AliasName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteCustomKeyStoreInput(v *DeleteCustomKeyStoreInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CustomKeyStoreId != nil { + ok := object.Key("CustomKeyStoreId") + ok.String(*v.CustomKeyStoreId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteImportedKeyMaterialInput(v *DeleteImportedKeyMaterialInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeriveSharedSecretInput(v *DeriveSharedSecretInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DryRun != nil { + ok := object.Key("DryRun") + ok.Boolean(*v.DryRun) + } + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if len(v.KeyAgreementAlgorithm) > 0 { + ok := object.Key("KeyAgreementAlgorithm") + ok.String(string(v.KeyAgreementAlgorithm)) + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.PublicKey != nil { + ok := object.Key("PublicKey") + ok.Base64EncodeBytes(v.PublicKey) + } + + if v.Recipient != nil { + ok := object.Key("Recipient") + if err := awsAwsjson11_serializeDocumentRecipientInfo(v.Recipient, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeCustomKeyStoresInput(v *DescribeCustomKeyStoresInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CustomKeyStoreId != nil { + ok := object.Key("CustomKeyStoreId") + ok.String(*v.CustomKeyStoreId) + } + + if v.CustomKeyStoreName != nil { + ok := object.Key("CustomKeyStoreName") + ok.String(*v.CustomKeyStoreName) + } + + if v.Limit != nil { + ok := object.Key("Limit") + ok.Integer(*v.Limit) + } + + if v.Marker != nil { + ok := object.Key("Marker") + ok.String(*v.Marker) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeKeyInput(v *DescribeKeyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDisableKeyInput(v *DisableKeyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDisableKeyRotationInput(v *DisableKeyRotationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDisconnectCustomKeyStoreInput(v *DisconnectCustomKeyStoreInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CustomKeyStoreId != nil { + ok := object.Key("CustomKeyStoreId") + ok.String(*v.CustomKeyStoreId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentEnableKeyInput(v *EnableKeyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentEnableKeyRotationInput(v *EnableKeyRotationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.RotationPeriodInDays != nil { + ok := object.Key("RotationPeriodInDays") + ok.Integer(*v.RotationPeriodInDays) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentEncryptInput(v *EncryptInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DryRun != nil { + ok := object.Key("DryRun") + ok.Boolean(*v.DryRun) + } + + if len(v.EncryptionAlgorithm) > 0 { + ok := object.Key("EncryptionAlgorithm") + ok.String(string(v.EncryptionAlgorithm)) + } + + if v.EncryptionContext != nil { + ok := object.Key("EncryptionContext") + if err := awsAwsjson11_serializeDocumentEncryptionContextType(v.EncryptionContext, ok); err != nil { + return err + } + } + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Plaintext != nil { + ok := object.Key("Plaintext") + ok.Base64EncodeBytes(v.Plaintext) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGenerateDataKeyInput(v *GenerateDataKeyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DryRun != nil { + ok := object.Key("DryRun") + ok.Boolean(*v.DryRun) + } + + if v.EncryptionContext != nil { + ok := object.Key("EncryptionContext") + if err := awsAwsjson11_serializeDocumentEncryptionContextType(v.EncryptionContext, ok); err != nil { + return err + } + } + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if len(v.KeySpec) > 0 { + ok := object.Key("KeySpec") + ok.String(string(v.KeySpec)) + } + + if v.NumberOfBytes != nil { + ok := object.Key("NumberOfBytes") + ok.Integer(*v.NumberOfBytes) + } + + if v.Recipient != nil { + ok := object.Key("Recipient") + if err := awsAwsjson11_serializeDocumentRecipientInfo(v.Recipient, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGenerateDataKeyPairInput(v *GenerateDataKeyPairInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DryRun != nil { + ok := object.Key("DryRun") + ok.Boolean(*v.DryRun) + } + + if v.EncryptionContext != nil { + ok := object.Key("EncryptionContext") + if err := awsAwsjson11_serializeDocumentEncryptionContextType(v.EncryptionContext, ok); err != nil { + return err + } + } + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if len(v.KeyPairSpec) > 0 { + ok := object.Key("KeyPairSpec") + ok.String(string(v.KeyPairSpec)) + } + + if v.Recipient != nil { + ok := object.Key("Recipient") + if err := awsAwsjson11_serializeDocumentRecipientInfo(v.Recipient, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGenerateDataKeyPairWithoutPlaintextInput(v *GenerateDataKeyPairWithoutPlaintextInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DryRun != nil { + ok := object.Key("DryRun") + ok.Boolean(*v.DryRun) + } + + if v.EncryptionContext != nil { + ok := object.Key("EncryptionContext") + if err := awsAwsjson11_serializeDocumentEncryptionContextType(v.EncryptionContext, ok); err != nil { + return err + } + } + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if len(v.KeyPairSpec) > 0 { + ok := object.Key("KeyPairSpec") + ok.String(string(v.KeyPairSpec)) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGenerateDataKeyWithoutPlaintextInput(v *GenerateDataKeyWithoutPlaintextInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DryRun != nil { + ok := object.Key("DryRun") + ok.Boolean(*v.DryRun) + } + + if v.EncryptionContext != nil { + ok := object.Key("EncryptionContext") + if err := awsAwsjson11_serializeDocumentEncryptionContextType(v.EncryptionContext, ok); err != nil { + return err + } + } + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if len(v.KeySpec) > 0 { + ok := object.Key("KeySpec") + ok.String(string(v.KeySpec)) + } + + if v.NumberOfBytes != nil { + ok := object.Key("NumberOfBytes") + ok.Integer(*v.NumberOfBytes) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGenerateMacInput(v *GenerateMacInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DryRun != nil { + ok := object.Key("DryRun") + ok.Boolean(*v.DryRun) + } + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if len(v.MacAlgorithm) > 0 { + ok := object.Key("MacAlgorithm") + ok.String(string(v.MacAlgorithm)) + } + + if v.Message != nil { + ok := object.Key("Message") + ok.Base64EncodeBytes(v.Message) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGenerateRandomInput(v *GenerateRandomInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CustomKeyStoreId != nil { + ok := object.Key("CustomKeyStoreId") + ok.String(*v.CustomKeyStoreId) + } + + if v.NumberOfBytes != nil { + ok := object.Key("NumberOfBytes") + ok.Integer(*v.NumberOfBytes) + } + + if v.Recipient != nil { + ok := object.Key("Recipient") + if err := awsAwsjson11_serializeDocumentRecipientInfo(v.Recipient, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetKeyPolicyInput(v *GetKeyPolicyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.PolicyName != nil { + ok := object.Key("PolicyName") + ok.String(*v.PolicyName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetKeyRotationStatusInput(v *GetKeyRotationStatusInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetParametersForImportInput(v *GetParametersForImportInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if len(v.WrappingAlgorithm) > 0 { + ok := object.Key("WrappingAlgorithm") + ok.String(string(v.WrappingAlgorithm)) + } + + if len(v.WrappingKeySpec) > 0 { + ok := object.Key("WrappingKeySpec") + ok.String(string(v.WrappingKeySpec)) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetPublicKeyInput(v *GetPublicKeyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentImportKeyMaterialInput(v *ImportKeyMaterialInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.EncryptedKeyMaterial != nil { + ok := object.Key("EncryptedKeyMaterial") + ok.Base64EncodeBytes(v.EncryptedKeyMaterial) + } + + if len(v.ExpirationModel) > 0 { + ok := object.Key("ExpirationModel") + ok.String(string(v.ExpirationModel)) + } + + if v.ImportToken != nil { + ok := object.Key("ImportToken") + ok.Base64EncodeBytes(v.ImportToken) + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.ValidTo != nil { + ok := object.Key("ValidTo") + ok.Double(smithytime.FormatEpochSeconds(*v.ValidTo)) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListAliasesInput(v *ListAliasesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Limit != nil { + ok := object.Key("Limit") + ok.Integer(*v.Limit) + } + + if v.Marker != nil { + ok := object.Key("Marker") + ok.String(*v.Marker) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListGrantsInput(v *ListGrantsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.GranteePrincipal != nil { + ok := object.Key("GranteePrincipal") + ok.String(*v.GranteePrincipal) + } + + if v.GrantId != nil { + ok := object.Key("GrantId") + ok.String(*v.GrantId) + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Limit != nil { + ok := object.Key("Limit") + ok.Integer(*v.Limit) + } + + if v.Marker != nil { + ok := object.Key("Marker") + ok.String(*v.Marker) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListKeyPoliciesInput(v *ListKeyPoliciesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Limit != nil { + ok := object.Key("Limit") + ok.Integer(*v.Limit) + } + + if v.Marker != nil { + ok := object.Key("Marker") + ok.String(*v.Marker) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListKeyRotationsInput(v *ListKeyRotationsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Limit != nil { + ok := object.Key("Limit") + ok.Integer(*v.Limit) + } + + if v.Marker != nil { + ok := object.Key("Marker") + ok.String(*v.Marker) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListKeysInput(v *ListKeysInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Limit != nil { + ok := object.Key("Limit") + ok.Integer(*v.Limit) + } + + if v.Marker != nil { + ok := object.Key("Marker") + ok.String(*v.Marker) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListResourceTagsInput(v *ListResourceTagsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Limit != nil { + ok := object.Key("Limit") + ok.Integer(*v.Limit) + } + + if v.Marker != nil { + ok := object.Key("Marker") + ok.String(*v.Marker) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListRetirableGrantsInput(v *ListRetirableGrantsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Limit != nil { + ok := object.Key("Limit") + ok.Integer(*v.Limit) + } + + if v.Marker != nil { + ok := object.Key("Marker") + ok.String(*v.Marker) + } + + if v.RetiringPrincipal != nil { + ok := object.Key("RetiringPrincipal") + ok.String(*v.RetiringPrincipal) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentPutKeyPolicyInput(v *PutKeyPolicyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.BypassPolicyLockoutSafetyCheck { + ok := object.Key("BypassPolicyLockoutSafetyCheck") + ok.Boolean(v.BypassPolicyLockoutSafetyCheck) + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Policy != nil { + ok := object.Key("Policy") + ok.String(*v.Policy) + } + + if v.PolicyName != nil { + ok := object.Key("PolicyName") + ok.String(*v.PolicyName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentReEncryptInput(v *ReEncryptInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CiphertextBlob != nil { + ok := object.Key("CiphertextBlob") + ok.Base64EncodeBytes(v.CiphertextBlob) + } + + if len(v.DestinationEncryptionAlgorithm) > 0 { + ok := object.Key("DestinationEncryptionAlgorithm") + ok.String(string(v.DestinationEncryptionAlgorithm)) + } + + if v.DestinationEncryptionContext != nil { + ok := object.Key("DestinationEncryptionContext") + if err := awsAwsjson11_serializeDocumentEncryptionContextType(v.DestinationEncryptionContext, ok); err != nil { + return err + } + } + + if v.DestinationKeyId != nil { + ok := object.Key("DestinationKeyId") + ok.String(*v.DestinationKeyId) + } + + if v.DryRun != nil { + ok := object.Key("DryRun") + ok.Boolean(*v.DryRun) + } + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if len(v.SourceEncryptionAlgorithm) > 0 { + ok := object.Key("SourceEncryptionAlgorithm") + ok.String(string(v.SourceEncryptionAlgorithm)) + } + + if v.SourceEncryptionContext != nil { + ok := object.Key("SourceEncryptionContext") + if err := awsAwsjson11_serializeDocumentEncryptionContextType(v.SourceEncryptionContext, ok); err != nil { + return err + } + } + + if v.SourceKeyId != nil { + ok := object.Key("SourceKeyId") + ok.String(*v.SourceKeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentReplicateKeyInput(v *ReplicateKeyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.BypassPolicyLockoutSafetyCheck { + ok := object.Key("BypassPolicyLockoutSafetyCheck") + ok.Boolean(v.BypassPolicyLockoutSafetyCheck) + } + + if v.Description != nil { + ok := object.Key("Description") + ok.String(*v.Description) + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Policy != nil { + ok := object.Key("Policy") + ok.String(*v.Policy) + } + + if v.ReplicaRegion != nil { + ok := object.Key("ReplicaRegion") + ok.String(*v.ReplicaRegion) + } + + if v.Tags != nil { + ok := object.Key("Tags") + if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentRetireGrantInput(v *RetireGrantInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DryRun != nil { + ok := object.Key("DryRun") + ok.Boolean(*v.DryRun) + } + + if v.GrantId != nil { + ok := object.Key("GrantId") + ok.String(*v.GrantId) + } + + if v.GrantToken != nil { + ok := object.Key("GrantToken") + ok.String(*v.GrantToken) + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentRevokeGrantInput(v *RevokeGrantInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DryRun != nil { + ok := object.Key("DryRun") + ok.Boolean(*v.DryRun) + } + + if v.GrantId != nil { + ok := object.Key("GrantId") + ok.String(*v.GrantId) + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentRotateKeyOnDemandInput(v *RotateKeyOnDemandInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentScheduleKeyDeletionInput(v *ScheduleKeyDeletionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.PendingWindowInDays != nil { + ok := object.Key("PendingWindowInDays") + ok.Integer(*v.PendingWindowInDays) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentSignInput(v *SignInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DryRun != nil { + ok := object.Key("DryRun") + ok.Boolean(*v.DryRun) + } + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Message != nil { + ok := object.Key("Message") + ok.Base64EncodeBytes(v.Message) + } + + if len(v.MessageType) > 0 { + ok := object.Key("MessageType") + ok.String(string(v.MessageType)) + } + + if len(v.SigningAlgorithm) > 0 { + ok := object.Key("SigningAlgorithm") + ok.String(string(v.SigningAlgorithm)) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentTagResourceInput(v *TagResourceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Tags != nil { + ok := object.Key("Tags") + if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUntagResourceInput(v *UntagResourceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.TagKeys != nil { + ok := object.Key("TagKeys") + if err := awsAwsjson11_serializeDocumentTagKeyList(v.TagKeys, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdateAliasInput(v *UpdateAliasInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AliasName != nil { + ok := object.Key("AliasName") + ok.String(*v.AliasName) + } + + if v.TargetKeyId != nil { + ok := object.Key("TargetKeyId") + ok.String(*v.TargetKeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdateCustomKeyStoreInput(v *UpdateCustomKeyStoreInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CloudHsmClusterId != nil { + ok := object.Key("CloudHsmClusterId") + ok.String(*v.CloudHsmClusterId) + } + + if v.CustomKeyStoreId != nil { + ok := object.Key("CustomKeyStoreId") + ok.String(*v.CustomKeyStoreId) + } + + if v.KeyStorePassword != nil { + ok := object.Key("KeyStorePassword") + ok.String(*v.KeyStorePassword) + } + + if v.NewCustomKeyStoreName != nil { + ok := object.Key("NewCustomKeyStoreName") + ok.String(*v.NewCustomKeyStoreName) + } + + if v.XksProxyAuthenticationCredential != nil { + ok := object.Key("XksProxyAuthenticationCredential") + if err := awsAwsjson11_serializeDocumentXksProxyAuthenticationCredentialType(v.XksProxyAuthenticationCredential, ok); err != nil { + return err + } + } + + if len(v.XksProxyConnectivity) > 0 { + ok := object.Key("XksProxyConnectivity") + ok.String(string(v.XksProxyConnectivity)) + } + + if v.XksProxyUriEndpoint != nil { + ok := object.Key("XksProxyUriEndpoint") + ok.String(*v.XksProxyUriEndpoint) + } + + if v.XksProxyUriPath != nil { + ok := object.Key("XksProxyUriPath") + ok.String(*v.XksProxyUriPath) + } + + if v.XksProxyVpcEndpointServiceName != nil { + ok := object.Key("XksProxyVpcEndpointServiceName") + ok.String(*v.XksProxyVpcEndpointServiceName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdateKeyDescriptionInput(v *UpdateKeyDescriptionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Description != nil { + ok := object.Key("Description") + ok.String(*v.Description) + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdatePrimaryRegionInput(v *UpdatePrimaryRegionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.PrimaryRegion != nil { + ok := object.Key("PrimaryRegion") + ok.String(*v.PrimaryRegion) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentVerifyInput(v *VerifyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DryRun != nil { + ok := object.Key("DryRun") + ok.Boolean(*v.DryRun) + } + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Message != nil { + ok := object.Key("Message") + ok.Base64EncodeBytes(v.Message) + } + + if len(v.MessageType) > 0 { + ok := object.Key("MessageType") + ok.String(string(v.MessageType)) + } + + if v.Signature != nil { + ok := object.Key("Signature") + ok.Base64EncodeBytes(v.Signature) + } + + if len(v.SigningAlgorithm) > 0 { + ok := object.Key("SigningAlgorithm") + ok.String(string(v.SigningAlgorithm)) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentVerifyMacInput(v *VerifyMacInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DryRun != nil { + ok := object.Key("DryRun") + ok.Boolean(*v.DryRun) + } + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Mac != nil { + ok := object.Key("Mac") + ok.Base64EncodeBytes(v.Mac) + } + + if len(v.MacAlgorithm) > 0 { + ok := object.Key("MacAlgorithm") + ok.String(string(v.MacAlgorithm)) + } + + if v.Message != nil { + ok := object.Key("Message") + ok.Base64EncodeBytes(v.Message) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/enums.go new file mode 100644 index 000000000..9c111656d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/enums.go @@ -0,0 +1,635 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +type AlgorithmSpec string + +// Enum values for AlgorithmSpec +const ( + AlgorithmSpecRsaesPkcs1V15 AlgorithmSpec = "RSAES_PKCS1_V1_5" + AlgorithmSpecRsaesOaepSha1 AlgorithmSpec = "RSAES_OAEP_SHA_1" + AlgorithmSpecRsaesOaepSha256 AlgorithmSpec = "RSAES_OAEP_SHA_256" + AlgorithmSpecRsaAesKeyWrapSha1 AlgorithmSpec = "RSA_AES_KEY_WRAP_SHA_1" + AlgorithmSpecRsaAesKeyWrapSha256 AlgorithmSpec = "RSA_AES_KEY_WRAP_SHA_256" + AlgorithmSpecSm2pke AlgorithmSpec = "SM2PKE" +) + +// Values returns all known values for AlgorithmSpec. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (AlgorithmSpec) Values() []AlgorithmSpec { + return []AlgorithmSpec{ + "RSAES_PKCS1_V1_5", + "RSAES_OAEP_SHA_1", + "RSAES_OAEP_SHA_256", + "RSA_AES_KEY_WRAP_SHA_1", + "RSA_AES_KEY_WRAP_SHA_256", + "SM2PKE", + } +} + +type ConnectionErrorCodeType string + +// Enum values for ConnectionErrorCodeType +const ( + ConnectionErrorCodeTypeInvalidCredentials ConnectionErrorCodeType = "INVALID_CREDENTIALS" + ConnectionErrorCodeTypeClusterNotFound ConnectionErrorCodeType = "CLUSTER_NOT_FOUND" + ConnectionErrorCodeTypeNetworkErrors ConnectionErrorCodeType = "NETWORK_ERRORS" + ConnectionErrorCodeTypeInternalError ConnectionErrorCodeType = "INTERNAL_ERROR" + ConnectionErrorCodeTypeInsufficientCloudhsmHsms ConnectionErrorCodeType = "INSUFFICIENT_CLOUDHSM_HSMS" + ConnectionErrorCodeTypeUserLockedOut ConnectionErrorCodeType = "USER_LOCKED_OUT" + ConnectionErrorCodeTypeUserNotFound ConnectionErrorCodeType = "USER_NOT_FOUND" + ConnectionErrorCodeTypeUserLoggedIn ConnectionErrorCodeType = "USER_LOGGED_IN" + ConnectionErrorCodeTypeSubnetNotFound ConnectionErrorCodeType = "SUBNET_NOT_FOUND" + ConnectionErrorCodeTypeInsufficientFreeAddressesInSubnet ConnectionErrorCodeType = "INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET" + ConnectionErrorCodeTypeXksProxyAccessDenied ConnectionErrorCodeType = "XKS_PROXY_ACCESS_DENIED" + ConnectionErrorCodeTypeXksProxyNotReachable ConnectionErrorCodeType = "XKS_PROXY_NOT_REACHABLE" + ConnectionErrorCodeTypeXksVpcEndpointServiceNotFound ConnectionErrorCodeType = "XKS_VPC_ENDPOINT_SERVICE_NOT_FOUND" + ConnectionErrorCodeTypeXksProxyInvalidResponse ConnectionErrorCodeType = "XKS_PROXY_INVALID_RESPONSE" + ConnectionErrorCodeTypeXksProxyInvalidConfiguration ConnectionErrorCodeType = "XKS_PROXY_INVALID_CONFIGURATION" + ConnectionErrorCodeTypeXksVpcEndpointServiceInvalidConfiguration ConnectionErrorCodeType = "XKS_VPC_ENDPOINT_SERVICE_INVALID_CONFIGURATION" + ConnectionErrorCodeTypeXksProxyTimedOut ConnectionErrorCodeType = "XKS_PROXY_TIMED_OUT" + ConnectionErrorCodeTypeXksProxyInvalidTlsConfiguration ConnectionErrorCodeType = "XKS_PROXY_INVALID_TLS_CONFIGURATION" +) + +// Values returns all known values for ConnectionErrorCodeType. Note that this can +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ConnectionErrorCodeType) Values() []ConnectionErrorCodeType { + return []ConnectionErrorCodeType{ + "INVALID_CREDENTIALS", + "CLUSTER_NOT_FOUND", + "NETWORK_ERRORS", + "INTERNAL_ERROR", + "INSUFFICIENT_CLOUDHSM_HSMS", + "USER_LOCKED_OUT", + "USER_NOT_FOUND", + "USER_LOGGED_IN", + "SUBNET_NOT_FOUND", + "INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET", + "XKS_PROXY_ACCESS_DENIED", + "XKS_PROXY_NOT_REACHABLE", + "XKS_VPC_ENDPOINT_SERVICE_NOT_FOUND", + "XKS_PROXY_INVALID_RESPONSE", + "XKS_PROXY_INVALID_CONFIGURATION", + "XKS_VPC_ENDPOINT_SERVICE_INVALID_CONFIGURATION", + "XKS_PROXY_TIMED_OUT", + "XKS_PROXY_INVALID_TLS_CONFIGURATION", + } +} + +type ConnectionStateType string + +// Enum values for ConnectionStateType +const ( + ConnectionStateTypeConnected ConnectionStateType = "CONNECTED" + ConnectionStateTypeConnecting ConnectionStateType = "CONNECTING" + ConnectionStateTypeFailed ConnectionStateType = "FAILED" + ConnectionStateTypeDisconnected ConnectionStateType = "DISCONNECTED" + ConnectionStateTypeDisconnecting ConnectionStateType = "DISCONNECTING" +) + +// Values returns all known values for ConnectionStateType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ConnectionStateType) Values() []ConnectionStateType { + return []ConnectionStateType{ + "CONNECTED", + "CONNECTING", + "FAILED", + "DISCONNECTED", + "DISCONNECTING", + } +} + +type CustomerMasterKeySpec string + +// Enum values for CustomerMasterKeySpec +const ( + CustomerMasterKeySpecRsa2048 CustomerMasterKeySpec = "RSA_2048" + CustomerMasterKeySpecRsa3072 CustomerMasterKeySpec = "RSA_3072" + CustomerMasterKeySpecRsa4096 CustomerMasterKeySpec = "RSA_4096" + CustomerMasterKeySpecEccNistP256 CustomerMasterKeySpec = "ECC_NIST_P256" + CustomerMasterKeySpecEccNistP384 CustomerMasterKeySpec = "ECC_NIST_P384" + CustomerMasterKeySpecEccNistP521 CustomerMasterKeySpec = "ECC_NIST_P521" + CustomerMasterKeySpecEccSecgP256k1 CustomerMasterKeySpec = "ECC_SECG_P256K1" + CustomerMasterKeySpecSymmetricDefault CustomerMasterKeySpec = "SYMMETRIC_DEFAULT" + CustomerMasterKeySpecHmac224 CustomerMasterKeySpec = "HMAC_224" + CustomerMasterKeySpecHmac256 CustomerMasterKeySpec = "HMAC_256" + CustomerMasterKeySpecHmac384 CustomerMasterKeySpec = "HMAC_384" + CustomerMasterKeySpecHmac512 CustomerMasterKeySpec = "HMAC_512" + CustomerMasterKeySpecSm2 CustomerMasterKeySpec = "SM2" +) + +// Values returns all known values for CustomerMasterKeySpec. Note that this can +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (CustomerMasterKeySpec) Values() []CustomerMasterKeySpec { + return []CustomerMasterKeySpec{ + "RSA_2048", + "RSA_3072", + "RSA_4096", + "ECC_NIST_P256", + "ECC_NIST_P384", + "ECC_NIST_P521", + "ECC_SECG_P256K1", + "SYMMETRIC_DEFAULT", + "HMAC_224", + "HMAC_256", + "HMAC_384", + "HMAC_512", + "SM2", + } +} + +type CustomKeyStoreType string + +// Enum values for CustomKeyStoreType +const ( + CustomKeyStoreTypeAwsCloudhsm CustomKeyStoreType = "AWS_CLOUDHSM" + CustomKeyStoreTypeExternalKeyStore CustomKeyStoreType = "EXTERNAL_KEY_STORE" +) + +// Values returns all known values for CustomKeyStoreType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (CustomKeyStoreType) Values() []CustomKeyStoreType { + return []CustomKeyStoreType{ + "AWS_CLOUDHSM", + "EXTERNAL_KEY_STORE", + } +} + +type DataKeyPairSpec string + +// Enum values for DataKeyPairSpec +const ( + DataKeyPairSpecRsa2048 DataKeyPairSpec = "RSA_2048" + DataKeyPairSpecRsa3072 DataKeyPairSpec = "RSA_3072" + DataKeyPairSpecRsa4096 DataKeyPairSpec = "RSA_4096" + DataKeyPairSpecEccNistP256 DataKeyPairSpec = "ECC_NIST_P256" + DataKeyPairSpecEccNistP384 DataKeyPairSpec = "ECC_NIST_P384" + DataKeyPairSpecEccNistP521 DataKeyPairSpec = "ECC_NIST_P521" + DataKeyPairSpecEccSecgP256k1 DataKeyPairSpec = "ECC_SECG_P256K1" + DataKeyPairSpecSm2 DataKeyPairSpec = "SM2" +) + +// Values returns all known values for DataKeyPairSpec. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (DataKeyPairSpec) Values() []DataKeyPairSpec { + return []DataKeyPairSpec{ + "RSA_2048", + "RSA_3072", + "RSA_4096", + "ECC_NIST_P256", + "ECC_NIST_P384", + "ECC_NIST_P521", + "ECC_SECG_P256K1", + "SM2", + } +} + +type DataKeySpec string + +// Enum values for DataKeySpec +const ( + DataKeySpecAes256 DataKeySpec = "AES_256" + DataKeySpecAes128 DataKeySpec = "AES_128" +) + +// Values returns all known values for DataKeySpec. Note that this can be expanded +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (DataKeySpec) Values() []DataKeySpec { + return []DataKeySpec{ + "AES_256", + "AES_128", + } +} + +type EncryptionAlgorithmSpec string + +// Enum values for EncryptionAlgorithmSpec +const ( + EncryptionAlgorithmSpecSymmetricDefault EncryptionAlgorithmSpec = "SYMMETRIC_DEFAULT" + EncryptionAlgorithmSpecRsaesOaepSha1 EncryptionAlgorithmSpec = "RSAES_OAEP_SHA_1" + EncryptionAlgorithmSpecRsaesOaepSha256 EncryptionAlgorithmSpec = "RSAES_OAEP_SHA_256" + EncryptionAlgorithmSpecSm2pke EncryptionAlgorithmSpec = "SM2PKE" +) + +// Values returns all known values for EncryptionAlgorithmSpec. Note that this can +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (EncryptionAlgorithmSpec) Values() []EncryptionAlgorithmSpec { + return []EncryptionAlgorithmSpec{ + "SYMMETRIC_DEFAULT", + "RSAES_OAEP_SHA_1", + "RSAES_OAEP_SHA_256", + "SM2PKE", + } +} + +type ExpirationModelType string + +// Enum values for ExpirationModelType +const ( + ExpirationModelTypeKeyMaterialExpires ExpirationModelType = "KEY_MATERIAL_EXPIRES" + ExpirationModelTypeKeyMaterialDoesNotExpire ExpirationModelType = "KEY_MATERIAL_DOES_NOT_EXPIRE" +) + +// Values returns all known values for ExpirationModelType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ExpirationModelType) Values() []ExpirationModelType { + return []ExpirationModelType{ + "KEY_MATERIAL_EXPIRES", + "KEY_MATERIAL_DOES_NOT_EXPIRE", + } +} + +type GrantOperation string + +// Enum values for GrantOperation +const ( + GrantOperationDecrypt GrantOperation = "Decrypt" + GrantOperationEncrypt GrantOperation = "Encrypt" + GrantOperationGenerateDataKey GrantOperation = "GenerateDataKey" + GrantOperationGenerateDataKeyWithoutPlaintext GrantOperation = "GenerateDataKeyWithoutPlaintext" + GrantOperationReEncryptFrom GrantOperation = "ReEncryptFrom" + GrantOperationReEncryptTo GrantOperation = "ReEncryptTo" + GrantOperationSign GrantOperation = "Sign" + GrantOperationVerify GrantOperation = "Verify" + GrantOperationGetPublicKey GrantOperation = "GetPublicKey" + GrantOperationCreateGrant GrantOperation = "CreateGrant" + GrantOperationRetireGrant GrantOperation = "RetireGrant" + GrantOperationDescribeKey GrantOperation = "DescribeKey" + GrantOperationGenerateDataKeyPair GrantOperation = "GenerateDataKeyPair" + GrantOperationGenerateDataKeyPairWithoutPlaintext GrantOperation = "GenerateDataKeyPairWithoutPlaintext" + GrantOperationGenerateMac GrantOperation = "GenerateMac" + GrantOperationVerifyMac GrantOperation = "VerifyMac" + GrantOperationDeriveSharedSecret GrantOperation = "DeriveSharedSecret" +) + +// Values returns all known values for GrantOperation. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (GrantOperation) Values() []GrantOperation { + return []GrantOperation{ + "Decrypt", + "Encrypt", + "GenerateDataKey", + "GenerateDataKeyWithoutPlaintext", + "ReEncryptFrom", + "ReEncryptTo", + "Sign", + "Verify", + "GetPublicKey", + "CreateGrant", + "RetireGrant", + "DescribeKey", + "GenerateDataKeyPair", + "GenerateDataKeyPairWithoutPlaintext", + "GenerateMac", + "VerifyMac", + "DeriveSharedSecret", + } +} + +type KeyAgreementAlgorithmSpec string + +// Enum values for KeyAgreementAlgorithmSpec +const ( + KeyAgreementAlgorithmSpecEcdh KeyAgreementAlgorithmSpec = "ECDH" +) + +// Values returns all known values for KeyAgreementAlgorithmSpec. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (KeyAgreementAlgorithmSpec) Values() []KeyAgreementAlgorithmSpec { + return []KeyAgreementAlgorithmSpec{ + "ECDH", + } +} + +type KeyEncryptionMechanism string + +// Enum values for KeyEncryptionMechanism +const ( + KeyEncryptionMechanismRsaesOaepSha256 KeyEncryptionMechanism = "RSAES_OAEP_SHA_256" +) + +// Values returns all known values for KeyEncryptionMechanism. Note that this can +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (KeyEncryptionMechanism) Values() []KeyEncryptionMechanism { + return []KeyEncryptionMechanism{ + "RSAES_OAEP_SHA_256", + } +} + +type KeyManagerType string + +// Enum values for KeyManagerType +const ( + KeyManagerTypeAws KeyManagerType = "AWS" + KeyManagerTypeCustomer KeyManagerType = "CUSTOMER" +) + +// Values returns all known values for KeyManagerType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (KeyManagerType) Values() []KeyManagerType { + return []KeyManagerType{ + "AWS", + "CUSTOMER", + } +} + +type KeySpec string + +// Enum values for KeySpec +const ( + KeySpecRsa2048 KeySpec = "RSA_2048" + KeySpecRsa3072 KeySpec = "RSA_3072" + KeySpecRsa4096 KeySpec = "RSA_4096" + KeySpecEccNistP256 KeySpec = "ECC_NIST_P256" + KeySpecEccNistP384 KeySpec = "ECC_NIST_P384" + KeySpecEccNistP521 KeySpec = "ECC_NIST_P521" + KeySpecEccSecgP256k1 KeySpec = "ECC_SECG_P256K1" + KeySpecSymmetricDefault KeySpec = "SYMMETRIC_DEFAULT" + KeySpecHmac224 KeySpec = "HMAC_224" + KeySpecHmac256 KeySpec = "HMAC_256" + KeySpecHmac384 KeySpec = "HMAC_384" + KeySpecHmac512 KeySpec = "HMAC_512" + KeySpecSm2 KeySpec = "SM2" +) + +// Values returns all known values for KeySpec. Note that this can be expanded in +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (KeySpec) Values() []KeySpec { + return []KeySpec{ + "RSA_2048", + "RSA_3072", + "RSA_4096", + "ECC_NIST_P256", + "ECC_NIST_P384", + "ECC_NIST_P521", + "ECC_SECG_P256K1", + "SYMMETRIC_DEFAULT", + "HMAC_224", + "HMAC_256", + "HMAC_384", + "HMAC_512", + "SM2", + } +} + +type KeyState string + +// Enum values for KeyState +const ( + KeyStateCreating KeyState = "Creating" + KeyStateEnabled KeyState = "Enabled" + KeyStateDisabled KeyState = "Disabled" + KeyStatePendingDeletion KeyState = "PendingDeletion" + KeyStatePendingImport KeyState = "PendingImport" + KeyStatePendingReplicaDeletion KeyState = "PendingReplicaDeletion" + KeyStateUnavailable KeyState = "Unavailable" + KeyStateUpdating KeyState = "Updating" +) + +// Values returns all known values for KeyState. Note that this can be expanded in +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (KeyState) Values() []KeyState { + return []KeyState{ + "Creating", + "Enabled", + "Disabled", + "PendingDeletion", + "PendingImport", + "PendingReplicaDeletion", + "Unavailable", + "Updating", + } +} + +type KeyUsageType string + +// Enum values for KeyUsageType +const ( + KeyUsageTypeSignVerify KeyUsageType = "SIGN_VERIFY" + KeyUsageTypeEncryptDecrypt KeyUsageType = "ENCRYPT_DECRYPT" + KeyUsageTypeGenerateVerifyMac KeyUsageType = "GENERATE_VERIFY_MAC" + KeyUsageTypeKeyAgreement KeyUsageType = "KEY_AGREEMENT" +) + +// Values returns all known values for KeyUsageType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (KeyUsageType) Values() []KeyUsageType { + return []KeyUsageType{ + "SIGN_VERIFY", + "ENCRYPT_DECRYPT", + "GENERATE_VERIFY_MAC", + "KEY_AGREEMENT", + } +} + +type MacAlgorithmSpec string + +// Enum values for MacAlgorithmSpec +const ( + MacAlgorithmSpecHmacSha224 MacAlgorithmSpec = "HMAC_SHA_224" + MacAlgorithmSpecHmacSha256 MacAlgorithmSpec = "HMAC_SHA_256" + MacAlgorithmSpecHmacSha384 MacAlgorithmSpec = "HMAC_SHA_384" + MacAlgorithmSpecHmacSha512 MacAlgorithmSpec = "HMAC_SHA_512" +) + +// Values returns all known values for MacAlgorithmSpec. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (MacAlgorithmSpec) Values() []MacAlgorithmSpec { + return []MacAlgorithmSpec{ + "HMAC_SHA_224", + "HMAC_SHA_256", + "HMAC_SHA_384", + "HMAC_SHA_512", + } +} + +type MessageType string + +// Enum values for MessageType +const ( + MessageTypeRaw MessageType = "RAW" + MessageTypeDigest MessageType = "DIGEST" +) + +// Values returns all known values for MessageType. Note that this can be expanded +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (MessageType) Values() []MessageType { + return []MessageType{ + "RAW", + "DIGEST", + } +} + +type MultiRegionKeyType string + +// Enum values for MultiRegionKeyType +const ( + MultiRegionKeyTypePrimary MultiRegionKeyType = "PRIMARY" + MultiRegionKeyTypeReplica MultiRegionKeyType = "REPLICA" +) + +// Values returns all known values for MultiRegionKeyType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (MultiRegionKeyType) Values() []MultiRegionKeyType { + return []MultiRegionKeyType{ + "PRIMARY", + "REPLICA", + } +} + +type OriginType string + +// Enum values for OriginType +const ( + OriginTypeAwsKms OriginType = "AWS_KMS" + OriginTypeExternal OriginType = "EXTERNAL" + OriginTypeAwsCloudhsm OriginType = "AWS_CLOUDHSM" + OriginTypeExternalKeyStore OriginType = "EXTERNAL_KEY_STORE" +) + +// Values returns all known values for OriginType. Note that this can be expanded +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (OriginType) Values() []OriginType { + return []OriginType{ + "AWS_KMS", + "EXTERNAL", + "AWS_CLOUDHSM", + "EXTERNAL_KEY_STORE", + } +} + +type RotationType string + +// Enum values for RotationType +const ( + RotationTypeAutomatic RotationType = "AUTOMATIC" + RotationTypeOnDemand RotationType = "ON_DEMAND" +) + +// Values returns all known values for RotationType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (RotationType) Values() []RotationType { + return []RotationType{ + "AUTOMATIC", + "ON_DEMAND", + } +} + +type SigningAlgorithmSpec string + +// Enum values for SigningAlgorithmSpec +const ( + SigningAlgorithmSpecRsassaPssSha256 SigningAlgorithmSpec = "RSASSA_PSS_SHA_256" + SigningAlgorithmSpecRsassaPssSha384 SigningAlgorithmSpec = "RSASSA_PSS_SHA_384" + SigningAlgorithmSpecRsassaPssSha512 SigningAlgorithmSpec = "RSASSA_PSS_SHA_512" + SigningAlgorithmSpecRsassaPkcs1V15Sha256 SigningAlgorithmSpec = "RSASSA_PKCS1_V1_5_SHA_256" + SigningAlgorithmSpecRsassaPkcs1V15Sha384 SigningAlgorithmSpec = "RSASSA_PKCS1_V1_5_SHA_384" + SigningAlgorithmSpecRsassaPkcs1V15Sha512 SigningAlgorithmSpec = "RSASSA_PKCS1_V1_5_SHA_512" + SigningAlgorithmSpecEcdsaSha256 SigningAlgorithmSpec = "ECDSA_SHA_256" + SigningAlgorithmSpecEcdsaSha384 SigningAlgorithmSpec = "ECDSA_SHA_384" + SigningAlgorithmSpecEcdsaSha512 SigningAlgorithmSpec = "ECDSA_SHA_512" + SigningAlgorithmSpecSm2dsa SigningAlgorithmSpec = "SM2DSA" +) + +// Values returns all known values for SigningAlgorithmSpec. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (SigningAlgorithmSpec) Values() []SigningAlgorithmSpec { + return []SigningAlgorithmSpec{ + "RSASSA_PSS_SHA_256", + "RSASSA_PSS_SHA_384", + "RSASSA_PSS_SHA_512", + "RSASSA_PKCS1_V1_5_SHA_256", + "RSASSA_PKCS1_V1_5_SHA_384", + "RSASSA_PKCS1_V1_5_SHA_512", + "ECDSA_SHA_256", + "ECDSA_SHA_384", + "ECDSA_SHA_512", + "SM2DSA", + } +} + +type WrappingKeySpec string + +// Enum values for WrappingKeySpec +const ( + WrappingKeySpecRsa2048 WrappingKeySpec = "RSA_2048" + WrappingKeySpecRsa3072 WrappingKeySpec = "RSA_3072" + WrappingKeySpecRsa4096 WrappingKeySpec = "RSA_4096" + WrappingKeySpecSm2 WrappingKeySpec = "SM2" +) + +// Values returns all known values for WrappingKeySpec. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (WrappingKeySpec) Values() []WrappingKeySpec { + return []WrappingKeySpec{ + "RSA_2048", + "RSA_3072", + "RSA_4096", + "SM2", + } +} + +type XksProxyConnectivityType string + +// Enum values for XksProxyConnectivityType +const ( + XksProxyConnectivityTypePublicEndpoint XksProxyConnectivityType = "PUBLIC_ENDPOINT" + XksProxyConnectivityTypeVpcEndpointService XksProxyConnectivityType = "VPC_ENDPOINT_SERVICE" +) + +// Values returns all known values for XksProxyConnectivityType. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (XksProxyConnectivityType) Values() []XksProxyConnectivityType { + return []XksProxyConnectivityType{ + "PUBLIC_ENDPOINT", + "VPC_ENDPOINT_SERVICE", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/errors.go new file mode 100644 index 000000000..a68d1739e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/errors.go @@ -0,0 +1,1474 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// The request was rejected because it attempted to create a resource that already +// exists. +type AlreadyExistsException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *AlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *AlreadyExistsException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *AlreadyExistsException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "AlreadyExistsException" + } + return *e.ErrorCodeOverride +} +func (e *AlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified CloudHSM cluster is already +// associated with an CloudHSM key store in the account, or it shares a backup +// history with an CloudHSM key store in the account. Each CloudHSM key store in +// the account must be associated with a different CloudHSM cluster. +// +// CloudHSM clusters that share a backup history have the same cluster +// certificate. To view the cluster certificate of an CloudHSM cluster, use the [DescribeClusters] +// operation. +// +// [DescribeClusters]: https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html +type CloudHsmClusterInUseException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *CloudHsmClusterInUseException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *CloudHsmClusterInUseException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *CloudHsmClusterInUseException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "CloudHsmClusterInUseException" + } + return *e.ErrorCodeOverride +} +func (e *CloudHsmClusterInUseException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the associated CloudHSM cluster did not meet +// the configuration requirements for an CloudHSM key store. +// +// - The CloudHSM cluster must be configured with private subnets in at least +// two different Availability Zones in the Region. +// +// - The [security group for the cluster](cloudhsm-cluster--sg) must include inbound rules and outbound rules +// that allow TCP traffic on ports 2223-2225. The Source in the inbound rules and +// the Destination in the outbound rules must match the security group ID. These +// rules are set by default when you create the CloudHSM cluster. Do not delete or +// change them. To get information about a particular security group, use the [DescribeSecurityGroups] +// operation. +// +// - The CloudHSM cluster must contain at least as many HSMs as the operation +// requires. To add HSMs, use the CloudHSM [CreateHsm]operation. +// +// For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKeyoperations, the CloudHSM cluster must have at least two active +// +// HSMs, each in a different Availability Zone. For the ConnectCustomKeyStoreoperation, the CloudHSM +// must contain at least one active HSM. +// +// For information about the requirements for an CloudHSM cluster that is +// associated with an CloudHSM key store, see [Assemble the Prerequisites]in the Key Management Service +// Developer Guide. For information about creating a private subnet for an CloudHSM +// cluster, see [Create a Private Subnet]in the CloudHSM User Guide. For information about cluster security +// groups, see [Configure a Default Security Group]in the CloudHSM User Guide . +// +// [Assemble the Prerequisites]: https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore +// [Create a Private Subnet]: https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html +// [Configure a Default Security Group]: https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html +// [DescribeSecurityGroups]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html +// [CreateHsm]: https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html +// [security group for the cluster]: https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html +type CloudHsmClusterInvalidConfigurationException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *CloudHsmClusterInvalidConfigurationException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *CloudHsmClusterInvalidConfigurationException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *CloudHsmClusterInvalidConfigurationException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "CloudHsmClusterInvalidConfigurationException" + } + return *e.ErrorCodeOverride +} +func (e *CloudHsmClusterInvalidConfigurationException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// The request was rejected because the CloudHSM cluster associated with the +// CloudHSM key store is not active. Initialize and activate the cluster and try +// the command again. For detailed instructions, see [Getting Started]in the CloudHSM User Guide. +// +// [Getting Started]: https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html +type CloudHsmClusterNotActiveException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *CloudHsmClusterNotActiveException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *CloudHsmClusterNotActiveException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *CloudHsmClusterNotActiveException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "CloudHsmClusterNotActiveException" + } + return *e.ErrorCodeOverride +} +func (e *CloudHsmClusterNotActiveException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because KMS cannot find the CloudHSM cluster with the +// specified cluster ID. Retry the request with a different cluster ID. +type CloudHsmClusterNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *CloudHsmClusterNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *CloudHsmClusterNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *CloudHsmClusterNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "CloudHsmClusterNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *CloudHsmClusterNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified CloudHSM cluster has a different +// cluster certificate than the original cluster. You cannot use the operation to +// specify an unrelated cluster for an CloudHSM key store. +// +// Specify an CloudHSM cluster that shares a backup history with the original +// cluster. This includes clusters that were created from a backup of the current +// cluster, and clusters that were created from the same backup that produced the +// current cluster. +// +// CloudHSM clusters that share a backup history have the same cluster +// certificate. To view the cluster certificate of an CloudHSM cluster, use the [DescribeClusters] +// operation. +// +// [DescribeClusters]: https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html +type CloudHsmClusterNotRelatedException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *CloudHsmClusterNotRelatedException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *CloudHsmClusterNotRelatedException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *CloudHsmClusterNotRelatedException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "CloudHsmClusterNotRelatedException" + } + return *e.ErrorCodeOverride +} +func (e *CloudHsmClusterNotRelatedException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// The request was rejected because an automatic rotation of this key is currently +// in progress or scheduled to begin within the next 20 minutes. +type ConflictException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ConflictException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ConflictException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ConflictException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ConflictException" + } + return *e.ErrorCodeOverride +} +func (e *ConflictException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the custom key store contains KMS keys. After +// verifying that you do not need to use the KMS keys, use the ScheduleKeyDeletionoperation to delete +// the KMS keys. After they are deleted, you can delete the custom key store. +type CustomKeyStoreHasCMKsException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *CustomKeyStoreHasCMKsException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *CustomKeyStoreHasCMKsException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *CustomKeyStoreHasCMKsException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "CustomKeyStoreHasCMKsException" + } + return *e.ErrorCodeOverride +} +func (e *CustomKeyStoreHasCMKsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because of the ConnectionState of the custom key +// store. To get the ConnectionState of a custom key store, use the DescribeCustomKeyStores operation. +// +// This exception is thrown under the following conditions: +// +// - You requested the ConnectCustomKeyStoreoperation on a custom key store with a ConnectionState of +// DISCONNECTING or FAILED . This operation is valid for all other +// ConnectionState values. To reconnect a custom key store in a FAILED state, +// disconnect it (DisconnectCustomKeyStore ), then connect it ( ConnectCustomKeyStore ). +// +// - You requested the CreateKeyoperation in a custom key store that is not connected. +// This operations is valid only when the custom key store ConnectionState is +// CONNECTED . +// +// - You requested the DisconnectCustomKeyStoreoperation on a custom key store with a ConnectionState of +// DISCONNECTING or DISCONNECTED . This operation is valid for all other +// ConnectionState values. +// +// - You requested the UpdateCustomKeyStoreor DeleteCustomKeyStoreoperation on a custom key store that is not +// disconnected. This operation is valid only when the custom key store +// ConnectionState is DISCONNECTED . +// +// - You requested the GenerateRandomoperation in an CloudHSM key store that is not connected. +// This operation is valid only when the CloudHSM key store ConnectionState is +// CONNECTED . +type CustomKeyStoreInvalidStateException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *CustomKeyStoreInvalidStateException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *CustomKeyStoreInvalidStateException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *CustomKeyStoreInvalidStateException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "CustomKeyStoreInvalidStateException" + } + return *e.ErrorCodeOverride +} +func (e *CustomKeyStoreInvalidStateException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// The request was rejected because the specified custom key store name is already +// assigned to another custom key store in the account. Try again with a custom key +// store name that is unique in the account. +type CustomKeyStoreNameInUseException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *CustomKeyStoreNameInUseException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *CustomKeyStoreNameInUseException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *CustomKeyStoreNameInUseException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "CustomKeyStoreNameInUseException" + } + return *e.ErrorCodeOverride +} +func (e *CustomKeyStoreNameInUseException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because KMS cannot find a custom key store with the +// specified key store name or ID. +type CustomKeyStoreNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *CustomKeyStoreNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *CustomKeyStoreNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *CustomKeyStoreNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "CustomKeyStoreNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *CustomKeyStoreNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The system timed out while trying to fulfill the request. You can retry the +// request. +type DependencyTimeoutException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *DependencyTimeoutException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *DependencyTimeoutException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *DependencyTimeoutException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "DependencyTimeoutException" + } + return *e.ErrorCodeOverride +} +func (e *DependencyTimeoutException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } + +// The request was rejected because the specified KMS key is not enabled. +type DisabledException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *DisabledException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *DisabledException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *DisabledException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "DisabledException" + } + return *e.ErrorCodeOverride +} +func (e *DisabledException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the DryRun parameter was specified. +type DryRunOperationException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *DryRunOperationException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *DryRunOperationException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *DryRunOperationException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "DryRunOperationException" + } + return *e.ErrorCodeOverride +} +func (e *DryRunOperationException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified import token is expired. Use GetParametersForImport to +// get a new import token and public key, use the new public key to encrypt the key +// material, and then try the request again. +type ExpiredImportTokenException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ExpiredImportTokenException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ExpiredImportTokenException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ExpiredImportTokenException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ExpiredImportTokenException" + } + return *e.ErrorCodeOverride +} +func (e *ExpiredImportTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified KMS key cannot decrypt the data. +// The KeyId in a Decrypt request and the SourceKeyId in a ReEncrypt request must identify the +// same KMS key that was used to encrypt the ciphertext. +type IncorrectKeyException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *IncorrectKeyException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *IncorrectKeyException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *IncorrectKeyException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "IncorrectKeyException" + } + return *e.ErrorCodeOverride +} +func (e *IncorrectKeyException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the key material in the request is, expired, +// invalid, or is not the same key material that was previously imported into this +// KMS key. +type IncorrectKeyMaterialException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *IncorrectKeyMaterialException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *IncorrectKeyMaterialException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *IncorrectKeyMaterialException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "IncorrectKeyMaterialException" + } + return *e.ErrorCodeOverride +} +func (e *IncorrectKeyMaterialException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the trust anchor certificate in the request to +// create an CloudHSM key store is not the trust anchor certificate for the +// specified CloudHSM cluster. +// +// When you [initialize the CloudHSM cluster], you create the trust anchor certificate and save it in the +// customerCA.crt file. +// +// [initialize the CloudHSM cluster]: https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr +type IncorrectTrustAnchorException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *IncorrectTrustAnchorException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *IncorrectTrustAnchorException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *IncorrectTrustAnchorException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "IncorrectTrustAnchorException" + } + return *e.ErrorCodeOverride +} +func (e *IncorrectTrustAnchorException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified alias name is not valid. +type InvalidAliasNameException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidAliasNameException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidAliasNameException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidAliasNameException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidAliasNameException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidAliasNameException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because a specified ARN, or an ARN in a key policy, is +// not valid. +type InvalidArnException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidArnException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidArnException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidArnException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidArnException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidArnException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// From the Decrypt or ReEncrypt operation, the request was rejected because the specified +// ciphertext, or additional authenticated data incorporated into the ciphertext, +// such as the encryption context, is corrupted, missing, or otherwise invalid. +// +// From the ImportKeyMaterial operation, the request was rejected because KMS could not decrypt the +// encrypted (wrapped) key material. +type InvalidCiphertextException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidCiphertextException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidCiphertextException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidCiphertextException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidCiphertextException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidCiphertextException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified GrantId is not valid. +type InvalidGrantIdException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidGrantIdException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidGrantIdException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidGrantIdException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidGrantIdException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidGrantIdException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified grant token is not valid. +type InvalidGrantTokenException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidGrantTokenException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidGrantTokenException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidGrantTokenException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidGrantTokenException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidGrantTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the provided import token is invalid or is +// associated with a different KMS key. +type InvalidImportTokenException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidImportTokenException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidImportTokenException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidImportTokenException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidImportTokenException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidImportTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected for one of the following reasons: +// +// - The KeyUsage value of the KMS key is incompatible with the API operation. +// +// - The encryption algorithm or signing algorithm specified for the operation +// is incompatible with the type of key material in the KMS key (KeySpec ). +// +// For encrypting, decrypting, re-encrypting, and generating data keys, the +// KeyUsage must be ENCRYPT_DECRYPT . For signing and verifying messages, the +// KeyUsage must be SIGN_VERIFY . For generating and verifying message +// authentication codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC . For +// deriving key agreement secrets, the KeyUsage must be KEY_AGREEMENT . To find the +// KeyUsage of a KMS key, use the DescribeKey operation. +// +// To find the encryption or signing algorithms supported for a particular KMS +// key, use the DescribeKeyoperation. +type InvalidKeyUsageException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidKeyUsageException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidKeyUsageException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidKeyUsageException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidKeyUsageException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidKeyUsageException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the marker that specifies where pagination +// should next begin is not valid. +type InvalidMarkerException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidMarkerException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidMarkerException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidMarkerException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidMarkerException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidMarkerException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified KMS key was not available. You +// can retry the request. +type KeyUnavailableException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *KeyUnavailableException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *KeyUnavailableException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *KeyUnavailableException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "KeyUnavailableException" + } + return *e.ErrorCodeOverride +} +func (e *KeyUnavailableException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } + +// The request was rejected because an internal exception occurred. The request +// can be retried. +type KMSInternalException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *KMSInternalException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *KMSInternalException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *KMSInternalException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "KMSInternalException" + } + return *e.ErrorCodeOverride +} +func (e *KMSInternalException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } + +// The request was rejected because the HMAC verification failed. HMAC +// verification fails when the HMAC computed by using the specified message, HMAC +// KMS key, and MAC algorithm does not match the HMAC specified in the request. +type KMSInvalidMacException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *KMSInvalidMacException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *KMSInvalidMacException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *KMSInvalidMacException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "KMSInvalidMacException" + } + return *e.ErrorCodeOverride +} +func (e *KMSInvalidMacException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the signature verification failed. Signature +// verification fails when it cannot confirm that signature was produced by signing +// the specified message with the specified KMS key and signing algorithm. +type KMSInvalidSignatureException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *KMSInvalidSignatureException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *KMSInvalidSignatureException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *KMSInvalidSignatureException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "KMSInvalidSignatureException" + } + return *e.ErrorCodeOverride +} +func (e *KMSInvalidSignatureException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the state of the specified resource is not +// valid for this request. +// +// This exceptions means one of the following: +// +// - The key state of the KMS key is not compatible with the operation. +// +// To find the key state, use the DescribeKeyoperation. For more information about which key +// +// states are compatible with each KMS operation, see [Key states of KMS keys]in the Key Management +// Service Developer Guide . +// +// - For cryptographic operations on KMS keys in custom key stores, this +// exception represents a general failure with many possible causes. To identify +// the cause, see the error message that accompanies the exception. +// +// [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html +type KMSInvalidStateException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *KMSInvalidStateException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *KMSInvalidStateException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *KMSInvalidStateException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "KMSInvalidStateException" + } + return *e.ErrorCodeOverride +} +func (e *KMSInvalidStateException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because a quota was exceeded. For more information, +// see [Quotas]in the Key Management Service Developer Guide. +// +// [Quotas]: https://docs.aws.amazon.com/kms/latest/developerguide/limits.html +type LimitExceededException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *LimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *LimitExceededException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *LimitExceededException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "LimitExceededException" + } + return *e.ErrorCodeOverride +} +func (e *LimitExceededException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified policy is not syntactically or +// semantically correct. +type MalformedPolicyDocumentException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *MalformedPolicyDocumentException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *MalformedPolicyDocumentException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *MalformedPolicyDocumentException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "MalformedPolicyDocumentException" + } + return *e.ErrorCodeOverride +} +func (e *MalformedPolicyDocumentException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified entity or resource could not be +// found. +type NotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *NotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *NotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *NotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "NotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *NotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because one or more tags are not valid. +type TagException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *TagException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *TagException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *TagException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "TagException" + } + return *e.ErrorCodeOverride +} +func (e *TagException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because a specified parameter is not supported or a +// specified resource is not valid for this operation. +type UnsupportedOperationException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *UnsupportedOperationException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UnsupportedOperationException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UnsupportedOperationException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "UnsupportedOperationException" + } + return *e.ErrorCodeOverride +} +func (e *UnsupportedOperationException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the ( XksKeyId ) is already associated with +// another KMS key in this external key store. Each KMS key in an external key +// store must be associated with a different external key. +type XksKeyAlreadyInUseException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *XksKeyAlreadyInUseException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *XksKeyAlreadyInUseException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *XksKeyAlreadyInUseException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "XksKeyAlreadyInUseException" + } + return *e.ErrorCodeOverride +} +func (e *XksKeyAlreadyInUseException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the external key specified by the XksKeyId +// parameter did not meet the configuration requirements for an external key store. +// +// The external key must be an AES-256 symmetric key that is enabled and performs +// encryption and decryption. +type XksKeyInvalidConfigurationException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *XksKeyInvalidConfigurationException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *XksKeyInvalidConfigurationException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *XksKeyInvalidConfigurationException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "XksKeyInvalidConfigurationException" + } + return *e.ErrorCodeOverride +} +func (e *XksKeyInvalidConfigurationException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// The request was rejected because the external key store proxy could not find +// the external key. This exception is thrown when the value of the XksKeyId +// parameter doesn't identify a key in the external key manager associated with the +// external key proxy. +// +// Verify that the XksKeyId represents an existing key in the external key +// manager. Use the key identifier that the external key store proxy uses to +// identify the key. For details, see the documentation provided with your external +// key store proxy or key manager. +type XksKeyNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *XksKeyNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *XksKeyNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *XksKeyNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "XksKeyNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *XksKeyNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the proxy credentials failed to authenticate +// to the specified external key store proxy. The specified external key store +// proxy rejected a status request from KMS due to invalid credentials. This can +// indicate an error in the credentials or in the identification of the external +// key store proxy. +type XksProxyIncorrectAuthenticationCredentialException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *XksProxyIncorrectAuthenticationCredentialException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *XksProxyIncorrectAuthenticationCredentialException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *XksProxyIncorrectAuthenticationCredentialException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "XksProxyIncorrectAuthenticationCredentialException" + } + return *e.ErrorCodeOverride +} +func (e *XksProxyIncorrectAuthenticationCredentialException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// The request was rejected because the external key store proxy is not configured +// correctly. To identify the cause, see the error message that accompanies the +// exception. +type XksProxyInvalidConfigurationException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *XksProxyInvalidConfigurationException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *XksProxyInvalidConfigurationException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *XksProxyInvalidConfigurationException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "XksProxyInvalidConfigurationException" + } + return *e.ErrorCodeOverride +} +func (e *XksProxyInvalidConfigurationException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// KMS cannot interpret the response it received from the external key store +// proxy. The problem might be a poorly constructed response, but it could also be +// a transient network issue. If you see this error repeatedly, report it to the +// proxy vendor. +type XksProxyInvalidResponseException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *XksProxyInvalidResponseException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *XksProxyInvalidResponseException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *XksProxyInvalidResponseException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "XksProxyInvalidResponseException" + } + return *e.ErrorCodeOverride +} +func (e *XksProxyInvalidResponseException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the XksProxyUriEndpoint is already associated +// with another external key store in this Amazon Web Services Region. To identify +// the cause, see the error message that accompanies the exception. +type XksProxyUriEndpointInUseException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *XksProxyUriEndpointInUseException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *XksProxyUriEndpointInUseException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *XksProxyUriEndpointInUseException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "XksProxyUriEndpointInUseException" + } + return *e.ErrorCodeOverride +} +func (e *XksProxyUriEndpointInUseException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the concatenation of the XksProxyUriEndpoint +// and XksProxyUriPath is already associated with another external key store in +// this Amazon Web Services Region. Each external key store in a Region must use a +// unique external key store proxy API address. +type XksProxyUriInUseException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *XksProxyUriInUseException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *XksProxyUriInUseException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *XksProxyUriInUseException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "XksProxyUriInUseException" + } + return *e.ErrorCodeOverride +} +func (e *XksProxyUriInUseException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// KMS was unable to reach the specified XksProxyUriPath . The path must be +// reachable before you create the external key store or update its settings. +// +// This exception is also thrown when the external key store proxy response to a +// GetHealthStatus request indicates that all external key manager instances are +// unavailable. +type XksProxyUriUnreachableException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *XksProxyUriUnreachableException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *XksProxyUriUnreachableException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *XksProxyUriUnreachableException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "XksProxyUriUnreachableException" + } + return *e.ErrorCodeOverride +} +func (e *XksProxyUriUnreachableException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified Amazon VPC endpoint service is +// already associated with another external key store in this Amazon Web Services +// Region. Each external key store in a Region must use a different Amazon VPC +// endpoint service. +type XksProxyVpcEndpointServiceInUseException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *XksProxyVpcEndpointServiceInUseException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *XksProxyVpcEndpointServiceInUseException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *XksProxyVpcEndpointServiceInUseException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "XksProxyVpcEndpointServiceInUseException" + } + return *e.ErrorCodeOverride +} +func (e *XksProxyVpcEndpointServiceInUseException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// The request was rejected because the Amazon VPC endpoint service configuration +// does not fulfill the requirements for an external key store. To identify the +// cause, see the error message that accompanies the exception and [review the requirements]for Amazon VPC +// endpoint service connectivity for an external key store. +// +// [review the requirements]: https://docs.aws.amazon.com/kms/latest/developerguide/vpc-connectivity.html#xks-vpc-requirements +type XksProxyVpcEndpointServiceInvalidConfigurationException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *XksProxyVpcEndpointServiceInvalidConfigurationException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *XksProxyVpcEndpointServiceInvalidConfigurationException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *XksProxyVpcEndpointServiceInvalidConfigurationException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "XksProxyVpcEndpointServiceInvalidConfigurationException" + } + return *e.ErrorCodeOverride +} +func (e *XksProxyVpcEndpointServiceInvalidConfigurationException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// The request was rejected because KMS could not find the specified VPC endpoint +// service. Use DescribeCustomKeyStoresto verify the VPC endpoint service name for the external key +// store. Also, confirm that the Allow principals list for the VPC endpoint +// service includes the KMS service principal for the Region, such as +// cks.kms.us-east-1.amazonaws.com . +type XksProxyVpcEndpointServiceNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *XksProxyVpcEndpointServiceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *XksProxyVpcEndpointServiceNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *XksProxyVpcEndpointServiceNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "XksProxyVpcEndpointServiceNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *XksProxyVpcEndpointServiceNotFoundException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/types.go new file mode 100644 index 000000000..62a8f7f8b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/types.go @@ -0,0 +1,691 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" + "time" +) + +// Contains information about an alias. +type AliasListEntry struct { + + // String that contains the key ARN. + AliasArn *string + + // String that contains the alias. This value begins with alias/ . + AliasName *string + + // Date and time that the alias was most recently created in the account and + // Region. Formatted as Unix time. + CreationDate *time.Time + + // Date and time that the alias was most recently associated with a KMS key in the + // account and Region. Formatted as Unix time. + LastUpdatedDate *time.Time + + // String that contains the key identifier of the KMS key associated with the + // alias. + TargetKeyId *string + + noSmithyDocumentSerde +} + +// Contains information about each custom key store in the custom key store list. +type CustomKeyStoresListEntry struct { + + // A unique identifier for the CloudHSM cluster that is associated with an + // CloudHSM key store. This field appears only when the CustomKeyStoreType is + // AWS_CLOUDHSM . + CloudHsmClusterId *string + + // Describes the connection error. This field appears in the response only when + // the ConnectionState is FAILED . + // + // Many failures can be resolved by updating the properties of the custom key + // store. To update a custom key store, disconnect it (DisconnectCustomKeyStore ), correct the errors (UpdateCustomKeyStore ), + // and try to connect again (ConnectCustomKeyStore ). For additional help resolving these errors, see [How to Fix a Connection Failure] + // in Key Management Service Developer Guide. + // + // All custom key stores: + // + // - INTERNAL_ERROR — KMS could not complete the request due to an internal + // error. Retry the request. For ConnectCustomKeyStore requests, disconnect the + // custom key store before trying to connect again. + // + // - NETWORK_ERRORS — Network errors are preventing KMS from connecting the + // custom key store to its backing key store. + // + // CloudHSM key stores: + // + // - CLUSTER_NOT_FOUND — KMS cannot find the CloudHSM cluster with the specified + // cluster ID. + // + // - INSUFFICIENT_CLOUDHSM_HSMS — The associated CloudHSM cluster does not + // contain any active HSMs. To connect a custom key store to its CloudHSM cluster, + // the cluster must contain at least one active HSM. + // + // - INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET — At least one private subnet + // associated with the CloudHSM cluster doesn't have any available IP addresses. A + // CloudHSM key store connection requires one free IP address in each of the + // associated private subnets, although two are preferable. For details, see [How to Fix a Connection Failure]in + // the Key Management Service Developer Guide. + // + // - INVALID_CREDENTIALS — The KeyStorePassword for the custom key store doesn't + // match the current password of the kmsuser crypto user in the CloudHSM cluster. + // Before you can connect your custom key store to its CloudHSM cluster, you must + // change the kmsuser account password and update the KeyStorePassword value for + // the custom key store. + // + // - SUBNET_NOT_FOUND — A subnet in the CloudHSM cluster configuration was + // deleted. If KMS cannot find all of the subnets in the cluster configuration, + // attempts to connect the custom key store to the CloudHSM cluster fail. To fix + // this error, create a cluster from a recent backup and associate it with your + // custom key store. (This process creates a new cluster configuration with a VPC + // and private subnets.) For details, see [How to Fix a Connection Failure]in the Key Management Service + // Developer Guide. + // + // - USER_LOCKED_OUT — The kmsuser CU account is locked out of the associated + // CloudHSM cluster due to too many failed password attempts. Before you can + // connect your custom key store to its CloudHSM cluster, you must change the + // kmsuser account password and update the key store password value for the + // custom key store. + // + // - USER_LOGGED_IN — The kmsuser CU account is logged into the associated + // CloudHSM cluster. This prevents KMS from rotating the kmsuser account password + // and logging into the cluster. Before you can connect your custom key store to + // its CloudHSM cluster, you must log the kmsuser CU out of the cluster. If you + // changed the kmsuser password to log into the cluster, you must also and update + // the key store password value for the custom key store. For help, see [How to Log Out and Reconnect]in the + // Key Management Service Developer Guide. + // + // - USER_NOT_FOUND — KMS cannot find a kmsuser CU account in the associated + // CloudHSM cluster. Before you can connect your custom key store to its CloudHSM + // cluster, you must create a kmsuser CU account in the cluster, and then update + // the key store password value for the custom key store. + // + // External key stores: + // + // - INVALID_CREDENTIALS — One or both of the XksProxyAuthenticationCredential + // values is not valid on the specified external key store proxy. + // + // - XKS_PROXY_ACCESS_DENIED — KMS requests are denied access to the external key + // store proxy. If the external key store proxy has authorization rules, verify + // that they permit KMS to communicate with the proxy on your behalf. + // + // - XKS_PROXY_INVALID_CONFIGURATION — A configuration error is preventing the + // external key store from connecting to its proxy. Verify the value of the + // XksProxyUriPath . + // + // - XKS_PROXY_INVALID_RESPONSE — KMS cannot interpret the response from the + // external key store proxy. If you see this connection error code repeatedly, + // notify your external key store proxy vendor. + // + // - XKS_PROXY_INVALID_TLS_CONFIGURATION — KMS cannot connect to the external key + // store proxy because the TLS configuration is invalid. Verify that the XKS proxy + // supports TLS 1.2 or 1.3. Also, verify that the TLS certificate is not expired, + // and that it matches the hostname in the XksProxyUriEndpoint value, and that it + // is signed by a certificate authority included in the [Trusted Certificate Authorities]list. + // + // - XKS_PROXY_NOT_REACHABLE — KMS can't communicate with your external key store + // proxy. Verify that the XksProxyUriEndpoint and XksProxyUriPath are correct. + // Use the tools for your external key store proxy to verify that the proxy is + // active and available on its network. Also, verify that your external key manager + // instances are operating properly. Connection attempts fail with this connection + // error code if the proxy reports that all external key manager instances are + // unavailable. + // + // - XKS_PROXY_TIMED_OUT — KMS can connect to the external key store proxy, but + // the proxy does not respond to KMS in the time allotted. If you see this + // connection error code repeatedly, notify your external key store proxy vendor. + // + // - XKS_VPC_ENDPOINT_SERVICE_INVALID_CONFIGURATION — The Amazon VPC endpoint + // service configuration doesn't conform to the requirements for an KMS external + // key store. + // + // - The VPC endpoint service must be an endpoint service for interface + // endpoints in the caller's Amazon Web Services account. + // + // - It must have a network load balancer (NLB) connected to at least two + // subnets, each in a different Availability Zone. + // + // - The Allow principals list must include the KMS service principal for the + // Region, cks.kms..amazonaws.com , such as cks.kms.us-east-1.amazonaws.com . + // + // - It must not require [acceptance]of connection requests. + // + // - It must have a private DNS name. The private DNS name for an external key + // store with VPC_ENDPOINT_SERVICE connectivity must be unique in its Amazon Web + // Services Region. + // + // - The domain of the private DNS name must have a [verification status]of verified . + // + // - The [TLS certificate]specifies the private DNS hostname at which the endpoint is reachable. + // + // - XKS_VPC_ENDPOINT_SERVICE_NOT_FOUND — KMS can't find the VPC endpoint service + // that it uses to communicate with the external key store proxy. Verify that the + // XksProxyVpcEndpointServiceName is correct and the KMS service principal has + // service consumer permissions on the Amazon VPC endpoint service. + // + // [acceptance]: https://docs.aws.amazon.com/vpc/latest/privatelink/create-endpoint-service.html + // [verification status]: https://docs.aws.amazon.com/vpc/latest/privatelink/verify-domains.html + // [How to Log Out and Reconnect]: https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#login-kmsuser-2 + // [TLS certificate]: https://docs.aws.amazon.com/elasticloadbalancing/latest/network/create-tls-listener.html + // [Trusted Certificate Authorities]: https://github.com/aws/aws-kms-xksproxy-api-spec/blob/main/TrustedCertificateAuthorities + // [How to Fix a Connection Failure]: https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-failed + ConnectionErrorCode ConnectionErrorCodeType + + // Indicates whether the custom key store is connected to its backing key store. + // For an CloudHSM key store, the ConnectionState indicates whether it is + // connected to its CloudHSM cluster. For an external key store, the + // ConnectionState indicates whether it is connected to the external key store + // proxy that communicates with your external key manager. + // + // You can create and use KMS keys in your custom key stores only when its + // ConnectionState is CONNECTED . + // + // The ConnectionState value is DISCONNECTED only if the key store has never been + // connected or you use the DisconnectCustomKeyStoreoperation to disconnect it. If the value is CONNECTED + // but you are having trouble using the custom key store, make sure that the + // backing key store is reachable and active. For an CloudHSM key store, verify + // that its associated CloudHSM cluster is active and contains at least one active + // HSM. For an external key store, verify that the external key store proxy and + // external key manager are connected and enabled. + // + // A value of FAILED indicates that an attempt to connect was unsuccessful. The + // ConnectionErrorCode field in the response indicates the cause of the failure. + // For help resolving a connection failure, see [Troubleshooting a custom key store]in the Key Management Service + // Developer Guide. + // + // [Troubleshooting a custom key store]: https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html + ConnectionState ConnectionStateType + + // The date and time when the custom key store was created. + CreationDate *time.Time + + // A unique identifier for the custom key store. + CustomKeyStoreId *string + + // The user-specified friendly name for the custom key store. + CustomKeyStoreName *string + + // Indicates the type of the custom key store. AWS_CLOUDHSM indicates a custom key + // store backed by an CloudHSM cluster. EXTERNAL_KEY_STORE indicates a custom key + // store backed by an external key store proxy and external key manager outside of + // Amazon Web Services. + CustomKeyStoreType CustomKeyStoreType + + // The trust anchor certificate of the CloudHSM cluster associated with an + // CloudHSM key store. When you [initialize the cluster], you create this certificate and save it in the + // customerCA.crt file. + // + // This field appears only when the CustomKeyStoreType is AWS_CLOUDHSM . + // + // [initialize the cluster]: https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr + TrustAnchorCertificate *string + + // Configuration settings for the external key store proxy (XKS proxy). The + // external key store proxy translates KMS requests into a format that your + // external key manager can understand. The proxy configuration includes connection + // information that KMS requires. + // + // This field appears only when the CustomKeyStoreType is EXTERNAL_KEY_STORE . + XksProxyConfiguration *XksProxyConfigurationType + + noSmithyDocumentSerde +} + +// Use this structure to allow [cryptographic operations] in the grant only when the operation request +// includes the specified [encryption context]. +// +// KMS applies the grant constraints only to cryptographic operations that support +// an encryption context, that is, all cryptographic operations with a [symmetric KMS key]. Grant +// constraints are not applied to operations that do not support an encryption +// context, such as cryptographic operations with asymmetric KMS keys and +// management operations, such as DescribeKeyor RetireGrant. +// +// In a cryptographic operation, the encryption context in the decryption +// operation must be an exact, case-sensitive match for the keys and values in the +// encryption context of the encryption operation. Only the order of the pairs can +// vary. +// +// However, in a grant constraint, the key in each key-value pair is not case +// sensitive, but the value is case sensitive. +// +// To avoid confusion, do not use multiple encryption context pairs that differ +// only by case. To require a fully case-sensitive encryption context, use the +// kms:EncryptionContext: and kms:EncryptionContextKeys conditions in an IAM or +// key policy. For details, see [kms:EncryptionContext:]in the Key Management Service Developer Guide . +// +// [cryptographic operations]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations +// [kms:EncryptionContext:]: https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-encryption-context +// [encryption context]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context +// [symmetric KMS key]: https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#symmetric-cmks +type GrantConstraints struct { + + // A list of key-value pairs that must match the encryption context in the [cryptographic operation] + // request. The grant allows the operation only when the encryption context in the + // request is the same as the encryption context specified in this constraint. + // + // [cryptographic operation]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations + EncryptionContextEquals map[string]string + + // A list of key-value pairs that must be included in the encryption context of + // the [cryptographic operation]request. The grant allows the cryptographic operation only when the + // encryption context in the request includes the key-value pairs specified in this + // constraint, although it can include additional key-value pairs. + // + // [cryptographic operation]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations + EncryptionContextSubset map[string]string + + noSmithyDocumentSerde +} + +// Contains information about a grant. +type GrantListEntry struct { + + // A list of key-value pairs that must be present in the encryption context of + // certain subsequent operations that the grant allows. + Constraints *GrantConstraints + + // The date and time when the grant was created. + CreationDate *time.Time + + // The unique identifier for the grant. + GrantId *string + + // The identity that gets the permissions in the grant. + // + // The GranteePrincipal field in the ListGrants response usually contains the user + // or role designated as the grantee principal in the grant. However, when the + // grantee principal in the grant is an Amazon Web Services service, the + // GranteePrincipal field contains the [service principal], which might represent several different + // grantee principals. + // + // [service principal]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-services + GranteePrincipal *string + + // The Amazon Web Services account under which the grant was issued. + IssuingAccount *string + + // The unique identifier for the KMS key to which the grant applies. + KeyId *string + + // The friendly name that identifies the grant. If a name was provided in the CreateGrant + // request, that name is returned. Otherwise this value is null. + Name *string + + // The list of operations permitted by the grant. + Operations []GrantOperation + + // The principal that can retire the grant. + RetiringPrincipal *string + + noSmithyDocumentSerde +} + +// Contains information about each entry in the key list. +type KeyListEntry struct { + + // ARN of the key. + KeyArn *string + + // Unique identifier of the key. + KeyId *string + + noSmithyDocumentSerde +} + +// Contains metadata about a KMS key. +// +// This data type is used as a response element for the CreateKey, DescribeKey, and ReplicateKey operations. +type KeyMetadata struct { + + // The globally unique identifier for the KMS key. + // + // This member is required. + KeyId *string + + // The twelve-digit account ID of the Amazon Web Services account that owns the + // KMS key. + AWSAccountId *string + + // The Amazon Resource Name (ARN) of the KMS key. For examples, see [Key Management Service (KMS)] in the + // Example ARNs section of the Amazon Web Services General Reference. + // + // [Key Management Service (KMS)]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kms + Arn *string + + // The cluster ID of the CloudHSM cluster that contains the key material for the + // KMS key. When you create a KMS key in an CloudHSM [custom key store], KMS creates the key + // material for the KMS key in the associated CloudHSM cluster. This field is + // present only when the KMS key is created in an CloudHSM key store. + // + // [custom key store]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html + CloudHsmClusterId *string + + // The date and time when the KMS key was created. + CreationDate *time.Time + + // A unique identifier for the [custom key store] that contains the KMS key. This field is present + // only when the KMS key is created in a custom key store. + // + // [custom key store]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html + CustomKeyStoreId *string + + // Instead, use the KeySpec field. + // + // The KeySpec and CustomerMasterKeySpec fields have the same value. We recommend + // that you use the KeySpec field in your code. However, to avoid breaking + // changes, KMS supports both fields. + // + // Deprecated: This field has been deprecated. Instead, use the KeySpec field. + CustomerMasterKeySpec CustomerMasterKeySpec + + // The date and time after which KMS deletes this KMS key. This value is present + // only when the KMS key is scheduled for deletion, that is, when its KeyState is + // PendingDeletion . + // + // When the primary key in a multi-Region key is scheduled for deletion but still + // has replica keys, its key state is PendingReplicaDeletion and the length of its + // waiting period is displayed in the PendingDeletionWindowInDays field. + DeletionDate *time.Time + + // The description of the KMS key. + Description *string + + // Specifies whether the KMS key is enabled. When KeyState is Enabled this value + // is true, otherwise it is false. + Enabled bool + + // The encryption algorithms that the KMS key supports. You cannot use the KMS key + // with other encryption algorithms within KMS. + // + // This value is present only when the KeyUsage of the KMS key is ENCRYPT_DECRYPT . + EncryptionAlgorithms []EncryptionAlgorithmSpec + + // Specifies whether the KMS key's key material expires. This value is present + // only when Origin is EXTERNAL , otherwise this value is omitted. + ExpirationModel ExpirationModelType + + // The key agreement algorithm used to derive a shared secret. + KeyAgreementAlgorithms []KeyAgreementAlgorithmSpec + + // The manager of the KMS key. KMS keys in your Amazon Web Services account are + // either customer managed or Amazon Web Services managed. For more information + // about the difference, see [KMS keys]in the Key Management Service Developer Guide. + // + // [KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms_keys + KeyManager KeyManagerType + + // Describes the type of key material in the KMS key. + KeySpec KeySpec + + // The current status of the KMS key. + // + // For more information about how key state affects the use of a KMS key, see [Key states of KMS keys] in + // the Key Management Service Developer Guide. + // + // [Key states of KMS keys]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + KeyState KeyState + + // The [cryptographic operations] for which you can use the KMS key. + // + // [cryptographic operations]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations + KeyUsage KeyUsageType + + // The message authentication code (MAC) algorithm that the HMAC KMS key supports. + // + // This value is present only when the KeyUsage of the KMS key is + // GENERATE_VERIFY_MAC . + MacAlgorithms []MacAlgorithmSpec + + // Indicates whether the KMS key is a multi-Region ( True ) or regional ( False ) + // key. This value is True for multi-Region primary and replica keys and False for + // regional KMS keys. + // + // For more information about multi-Region keys, see [Multi-Region keys in KMS] in the Key Management + // Service Developer Guide. + // + // [Multi-Region keys in KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html + MultiRegion *bool + + // Lists the primary and replica keys in same multi-Region key. This field is + // present only when the value of the MultiRegion field is True . + // + // For more information about any listed KMS key, use the DescribeKey operation. + // + // - MultiRegionKeyType indicates whether the KMS key is a PRIMARY or REPLICA key. + // + // - PrimaryKey displays the key ARN and Region of the primary key. This field + // displays the current KMS key if it is the primary key. + // + // - ReplicaKeys displays the key ARNs and Regions of all replica keys. This + // field includes the current KMS key if it is a replica key. + MultiRegionConfiguration *MultiRegionConfiguration + + // The source of the key material for the KMS key. When this value is AWS_KMS , KMS + // created the key material. When this value is EXTERNAL , the key material was + // imported or the KMS key doesn't have any key material. When this value is + // AWS_CLOUDHSM , the key material was created in the CloudHSM cluster associated + // with a custom key store. + Origin OriginType + + // The waiting period before the primary key in a multi-Region key is deleted. + // This waiting period begins when the last of its replica keys is deleted. This + // value is present only when the KeyState of the KMS key is PendingReplicaDeletion + // . That indicates that the KMS key is the primary key in a multi-Region key, it + // is scheduled for deletion, and it still has existing replica keys. + // + // When a single-Region KMS key or a multi-Region replica key is scheduled for + // deletion, its deletion date is displayed in the DeletionDate field. However, + // when the primary key in a multi-Region key is scheduled for deletion, its + // waiting period doesn't begin until all of its replica keys are deleted. This + // value displays that waiting period. When the last replica key in the + // multi-Region key is deleted, the KeyState of the scheduled primary key changes + // from PendingReplicaDeletion to PendingDeletion and the deletion date appears in + // the DeletionDate field. + PendingDeletionWindowInDays *int32 + + // The signing algorithms that the KMS key supports. You cannot use the KMS key + // with other signing algorithms within KMS. + // + // This field appears only when the KeyUsage of the KMS key is SIGN_VERIFY . + SigningAlgorithms []SigningAlgorithmSpec + + // The time at which the imported key material expires. When the key material + // expires, KMS deletes the key material and the KMS key becomes unusable. This + // value is present only for KMS keys whose Origin is EXTERNAL and whose + // ExpirationModel is KEY_MATERIAL_EXPIRES , otherwise this value is omitted. + ValidTo *time.Time + + // Information about the external key that is associated with a KMS key in an + // external key store. + // + // For more information, see [External key] in the Key Management Service Developer Guide. + // + // [External key]: https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-external-key + XksKeyConfiguration *XksKeyConfigurationType + + noSmithyDocumentSerde +} + +// Describes the configuration of this multi-Region key. This field appears only +// when the KMS key is a primary or replica of a multi-Region key. +// +// For more information about any listed KMS key, use the DescribeKey operation. +type MultiRegionConfiguration struct { + + // Indicates whether the KMS key is a PRIMARY or REPLICA key. + MultiRegionKeyType MultiRegionKeyType + + // Displays the key ARN and Region of the primary key. This field includes the + // current KMS key if it is the primary key. + PrimaryKey *MultiRegionKey + + // displays the key ARNs and Regions of all replica keys. This field includes the + // current KMS key if it is a replica key. + ReplicaKeys []MultiRegionKey + + noSmithyDocumentSerde +} + +// Describes the primary or replica key in a multi-Region key. +type MultiRegionKey struct { + + // Displays the key ARN of a primary or replica key of a multi-Region key. + Arn *string + + // Displays the Amazon Web Services Region of a primary or replica key in a + // multi-Region key. + Region *string + + noSmithyDocumentSerde +} + +// Contains information about the party that receives the response from the API +// operation. +// +// This data type is designed to support Amazon Web Services Nitro Enclaves, which +// lets you create an isolated compute environment in Amazon EC2. For information +// about the interaction between KMS and Amazon Web Services Nitro Enclaves, see [How Amazon Web Services Nitro Enclaves uses KMS] +// in the Key Management Service Developer Guide. +// +// [How Amazon Web Services Nitro Enclaves uses KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html +type RecipientInfo struct { + + // The attestation document for an Amazon Web Services Nitro Enclave. This + // document includes the enclave's public key. + AttestationDocument []byte + + // The encryption algorithm that KMS should use with the public key for an Amazon + // Web Services Nitro Enclave to encrypt plaintext values for the response. The + // only valid value is RSAES_OAEP_SHA_256 . + KeyEncryptionAlgorithm KeyEncryptionMechanism + + noSmithyDocumentSerde +} + +// Contains information about completed key material rotations. +type RotationsListEntry struct { + + // Unique identifier of the key. + KeyId *string + + // Date and time that the key material rotation completed. Formatted as Unix time. + RotationDate *time.Time + + // Identifies whether the key material rotation was a scheduled [automatic rotation] or an [on-demand rotation]. + // + // [automatic rotation]: https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotating-keys-enable-disable + // [on-demand rotation]: https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotating-keys-on-demand + RotationType RotationType + + noSmithyDocumentSerde +} + +// A key-value pair. A tag consists of a tag key and a tag value. Tag keys and tag +// values are both required, but tag values can be empty (null) strings. +// +// Do not include confidential or sensitive information in this field. This field +// may be displayed in plaintext in CloudTrail logs and other output. +// +// For information about the rules that apply to tag keys and tag values, see [User-Defined Tag Restrictions] in +// the Amazon Web Services Billing and Cost Management User Guide. +// +// [User-Defined Tag Restrictions]: https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html +type Tag struct { + + // The key of the tag. + // + // This member is required. + TagKey *string + + // The value of the tag. + // + // This member is required. + TagValue *string + + noSmithyDocumentSerde +} + +// Information about the [external key]that is associated with a KMS key in an external key +// store. +// +// This element appears in a CreateKey or DescribeKey response only for a KMS key in an external key +// store. +// +// The external key is a symmetric encryption key that is hosted by an external +// key manager outside of Amazon Web Services. When you use the KMS key in an +// external key store in a cryptographic operation, the cryptographic operation is +// performed in the external key manager using the specified external key. For more +// information, see [External key]in the Key Management Service Developer Guide. +// +// [External key]: https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-external-key +// [external key]: https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-external-key +type XksKeyConfigurationType struct { + + // The ID of the external key in its external key manager. This is the ID that the + // external key store proxy uses to identify the external key. + Id *string + + noSmithyDocumentSerde +} + +// KMS uses the authentication credential to sign requests that it sends to the +// external key store proxy (XKS proxy) on your behalf. You establish these +// credentials on your external key store proxy and report them to KMS. +// +// The XksProxyAuthenticationCredential includes two required elements. +type XksProxyAuthenticationCredentialType struct { + + // A unique identifier for the raw secret access key. + // + // This member is required. + AccessKeyId *string + + // A secret string of 43-64 characters. Valid characters are a-z, A-Z, 0-9, /, +, + // and =. + // + // This member is required. + RawSecretAccessKey *string + + noSmithyDocumentSerde +} + +// Detailed information about the external key store proxy (XKS proxy). Your +// external key store proxy translates KMS requests into a format that your +// external key manager can understand. These fields appear in a DescribeCustomKeyStoresresponse only +// when the CustomKeyStoreType is EXTERNAL_KEY_STORE . +type XksProxyConfigurationType struct { + + // The part of the external key store [proxy authentication credential] that uniquely identifies the secret access + // key. + // + // [proxy authentication credential]: https://docs.aws.amazon.com/kms/latest/APIReference/API_CreateCustomKeyStore.html#KMS-CreateCustomKeyStore-request-XksProxyAuthenticationCredential + AccessKeyId *string + + // Indicates whether the external key store proxy uses a public endpoint or an + // Amazon VPC endpoint service to communicate with KMS. + Connectivity XksProxyConnectivityType + + // The URI endpoint for the external key store proxy. + // + // If the external key store proxy has a public endpoint, it is displayed here. + // + // If the external key store proxy uses an Amazon VPC endpoint service name, this + // field displays the private DNS name associated with the VPC endpoint service. + UriEndpoint *string + + // The path to the external key store proxy APIs. + UriPath *string + + // The Amazon VPC endpoint service used to communicate with the external key store + // proxy. This field appears only when the external key store proxy uses an Amazon + // VPC endpoint service to communicate with KMS. + VpcEndpointServiceName *string + + noSmithyDocumentSerde +} + +type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/validators.go new file mode 100644 index 000000000..58254d141 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/validators.go @@ -0,0 +1,2050 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +type validateOpCancelKeyDeletion struct { +} + +func (*validateOpCancelKeyDeletion) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCancelKeyDeletion) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CancelKeyDeletionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCancelKeyDeletionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpConnectCustomKeyStore struct { +} + +func (*validateOpConnectCustomKeyStore) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpConnectCustomKeyStore) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ConnectCustomKeyStoreInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpConnectCustomKeyStoreInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateAlias struct { +} + +func (*validateOpCreateAlias) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateAlias) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateAliasInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateAliasInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateCustomKeyStore struct { +} + +func (*validateOpCreateCustomKeyStore) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateCustomKeyStore) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateCustomKeyStoreInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateCustomKeyStoreInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateGrant struct { +} + +func (*validateOpCreateGrant) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateGrant) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateGrantInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateGrantInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateKey struct { +} + +func (*validateOpCreateKey) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateKey) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateKeyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateKeyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDecrypt struct { +} + +func (*validateOpDecrypt) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDecrypt) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DecryptInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDecryptInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteAlias struct { +} + +func (*validateOpDeleteAlias) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteAlias) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteAliasInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteAliasInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteCustomKeyStore struct { +} + +func (*validateOpDeleteCustomKeyStore) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteCustomKeyStore) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteCustomKeyStoreInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteCustomKeyStoreInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteImportedKeyMaterial struct { +} + +func (*validateOpDeleteImportedKeyMaterial) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteImportedKeyMaterial) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteImportedKeyMaterialInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteImportedKeyMaterialInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeriveSharedSecret struct { +} + +func (*validateOpDeriveSharedSecret) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeriveSharedSecret) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeriveSharedSecretInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeriveSharedSecretInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDescribeKey struct { +} + +func (*validateOpDescribeKey) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeKey) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeKeyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeKeyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDisableKey struct { +} + +func (*validateOpDisableKey) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDisableKey) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DisableKeyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDisableKeyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDisableKeyRotation struct { +} + +func (*validateOpDisableKeyRotation) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDisableKeyRotation) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DisableKeyRotationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDisableKeyRotationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDisconnectCustomKeyStore struct { +} + +func (*validateOpDisconnectCustomKeyStore) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDisconnectCustomKeyStore) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DisconnectCustomKeyStoreInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDisconnectCustomKeyStoreInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpEnableKey struct { +} + +func (*validateOpEnableKey) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpEnableKey) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*EnableKeyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpEnableKeyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpEnableKeyRotation struct { +} + +func (*validateOpEnableKeyRotation) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpEnableKeyRotation) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*EnableKeyRotationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpEnableKeyRotationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpEncrypt struct { +} + +func (*validateOpEncrypt) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpEncrypt) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*EncryptInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpEncryptInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGenerateDataKey struct { +} + +func (*validateOpGenerateDataKey) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGenerateDataKey) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GenerateDataKeyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGenerateDataKeyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGenerateDataKeyPair struct { +} + +func (*validateOpGenerateDataKeyPair) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGenerateDataKeyPair) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GenerateDataKeyPairInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGenerateDataKeyPairInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGenerateDataKeyPairWithoutPlaintext struct { +} + +func (*validateOpGenerateDataKeyPairWithoutPlaintext) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGenerateDataKeyPairWithoutPlaintext) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GenerateDataKeyPairWithoutPlaintextInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGenerateDataKeyPairWithoutPlaintextInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGenerateDataKeyWithoutPlaintext struct { +} + +func (*validateOpGenerateDataKeyWithoutPlaintext) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGenerateDataKeyWithoutPlaintext) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GenerateDataKeyWithoutPlaintextInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGenerateDataKeyWithoutPlaintextInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGenerateMac struct { +} + +func (*validateOpGenerateMac) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGenerateMac) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GenerateMacInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGenerateMacInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetKeyPolicy struct { +} + +func (*validateOpGetKeyPolicy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetKeyPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetKeyPolicyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetKeyPolicyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetKeyRotationStatus struct { +} + +func (*validateOpGetKeyRotationStatus) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetKeyRotationStatus) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetKeyRotationStatusInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetKeyRotationStatusInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetParametersForImport struct { +} + +func (*validateOpGetParametersForImport) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetParametersForImport) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetParametersForImportInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetParametersForImportInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetPublicKey struct { +} + +func (*validateOpGetPublicKey) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetPublicKey) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetPublicKeyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetPublicKeyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpImportKeyMaterial struct { +} + +func (*validateOpImportKeyMaterial) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpImportKeyMaterial) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ImportKeyMaterialInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpImportKeyMaterialInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListGrants struct { +} + +func (*validateOpListGrants) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListGrants) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListGrantsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListGrantsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListKeyPolicies struct { +} + +func (*validateOpListKeyPolicies) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListKeyPolicies) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListKeyPoliciesInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListKeyPoliciesInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListKeyRotations struct { +} + +func (*validateOpListKeyRotations) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListKeyRotations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListKeyRotationsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListKeyRotationsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListResourceTags struct { +} + +func (*validateOpListResourceTags) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListResourceTags) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListResourceTagsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListResourceTagsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListRetirableGrants struct { +} + +func (*validateOpListRetirableGrants) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListRetirableGrants) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListRetirableGrantsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListRetirableGrantsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutKeyPolicy struct { +} + +func (*validateOpPutKeyPolicy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutKeyPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutKeyPolicyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutKeyPolicyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpReEncrypt struct { +} + +func (*validateOpReEncrypt) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpReEncrypt) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ReEncryptInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpReEncryptInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpReplicateKey struct { +} + +func (*validateOpReplicateKey) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpReplicateKey) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ReplicateKeyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpReplicateKeyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpRevokeGrant struct { +} + +func (*validateOpRevokeGrant) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpRevokeGrant) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*RevokeGrantInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpRevokeGrantInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpRotateKeyOnDemand struct { +} + +func (*validateOpRotateKeyOnDemand) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpRotateKeyOnDemand) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*RotateKeyOnDemandInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpRotateKeyOnDemandInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpScheduleKeyDeletion struct { +} + +func (*validateOpScheduleKeyDeletion) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpScheduleKeyDeletion) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ScheduleKeyDeletionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpScheduleKeyDeletionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpSign struct { +} + +func (*validateOpSign) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpSign) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*SignInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpSignInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpTagResource struct { +} + +func (*validateOpTagResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpTagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*TagResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpTagResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUntagResource struct { +} + +func (*validateOpUntagResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUntagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UntagResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUntagResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateAlias struct { +} + +func (*validateOpUpdateAlias) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateAlias) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateAliasInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateAliasInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateCustomKeyStore struct { +} + +func (*validateOpUpdateCustomKeyStore) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateCustomKeyStore) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateCustomKeyStoreInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateCustomKeyStoreInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateKeyDescription struct { +} + +func (*validateOpUpdateKeyDescription) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateKeyDescription) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateKeyDescriptionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateKeyDescriptionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdatePrimaryRegion struct { +} + +func (*validateOpUpdatePrimaryRegion) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdatePrimaryRegion) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdatePrimaryRegionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdatePrimaryRegionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpVerify struct { +} + +func (*validateOpVerify) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpVerify) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*VerifyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpVerifyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpVerifyMac struct { +} + +func (*validateOpVerifyMac) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpVerifyMac) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*VerifyMacInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpVerifyMacInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpCancelKeyDeletionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCancelKeyDeletion{}, middleware.After) +} + +func addOpConnectCustomKeyStoreValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpConnectCustomKeyStore{}, middleware.After) +} + +func addOpCreateAliasValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateAlias{}, middleware.After) +} + +func addOpCreateCustomKeyStoreValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateCustomKeyStore{}, middleware.After) +} + +func addOpCreateGrantValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateGrant{}, middleware.After) +} + +func addOpCreateKeyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateKey{}, middleware.After) +} + +func addOpDecryptValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDecrypt{}, middleware.After) +} + +func addOpDeleteAliasValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteAlias{}, middleware.After) +} + +func addOpDeleteCustomKeyStoreValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteCustomKeyStore{}, middleware.After) +} + +func addOpDeleteImportedKeyMaterialValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteImportedKeyMaterial{}, middleware.After) +} + +func addOpDeriveSharedSecretValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeriveSharedSecret{}, middleware.After) +} + +func addOpDescribeKeyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeKey{}, middleware.After) +} + +func addOpDisableKeyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDisableKey{}, middleware.After) +} + +func addOpDisableKeyRotationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDisableKeyRotation{}, middleware.After) +} + +func addOpDisconnectCustomKeyStoreValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDisconnectCustomKeyStore{}, middleware.After) +} + +func addOpEnableKeyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpEnableKey{}, middleware.After) +} + +func addOpEnableKeyRotationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpEnableKeyRotation{}, middleware.After) +} + +func addOpEncryptValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpEncrypt{}, middleware.After) +} + +func addOpGenerateDataKeyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGenerateDataKey{}, middleware.After) +} + +func addOpGenerateDataKeyPairValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGenerateDataKeyPair{}, middleware.After) +} + +func addOpGenerateDataKeyPairWithoutPlaintextValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGenerateDataKeyPairWithoutPlaintext{}, middleware.After) +} + +func addOpGenerateDataKeyWithoutPlaintextValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGenerateDataKeyWithoutPlaintext{}, middleware.After) +} + +func addOpGenerateMacValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGenerateMac{}, middleware.After) +} + +func addOpGetKeyPolicyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetKeyPolicy{}, middleware.After) +} + +func addOpGetKeyRotationStatusValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetKeyRotationStatus{}, middleware.After) +} + +func addOpGetParametersForImportValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetParametersForImport{}, middleware.After) +} + +func addOpGetPublicKeyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetPublicKey{}, middleware.After) +} + +func addOpImportKeyMaterialValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpImportKeyMaterial{}, middleware.After) +} + +func addOpListGrantsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListGrants{}, middleware.After) +} + +func addOpListKeyPoliciesValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListKeyPolicies{}, middleware.After) +} + +func addOpListKeyRotationsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListKeyRotations{}, middleware.After) +} + +func addOpListResourceTagsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListResourceTags{}, middleware.After) +} + +func addOpListRetirableGrantsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListRetirableGrants{}, middleware.After) +} + +func addOpPutKeyPolicyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutKeyPolicy{}, middleware.After) +} + +func addOpReEncryptValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpReEncrypt{}, middleware.After) +} + +func addOpReplicateKeyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpReplicateKey{}, middleware.After) +} + +func addOpRevokeGrantValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpRevokeGrant{}, middleware.After) +} + +func addOpRotateKeyOnDemandValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpRotateKeyOnDemand{}, middleware.After) +} + +func addOpScheduleKeyDeletionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpScheduleKeyDeletion{}, middleware.After) +} + +func addOpSignValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpSign{}, middleware.After) +} + +func addOpTagResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpTagResource{}, middleware.After) +} + +func addOpUntagResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUntagResource{}, middleware.After) +} + +func addOpUpdateAliasValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateAlias{}, middleware.After) +} + +func addOpUpdateCustomKeyStoreValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateCustomKeyStore{}, middleware.After) +} + +func addOpUpdateKeyDescriptionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateKeyDescription{}, middleware.After) +} + +func addOpUpdatePrimaryRegionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdatePrimaryRegion{}, middleware.After) +} + +func addOpVerifyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpVerify{}, middleware.After) +} + +func addOpVerifyMacValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpVerifyMac{}, middleware.After) +} + +func validateTag(v *types.Tag) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Tag"} + if v.TagKey == nil { + invalidParams.Add(smithy.NewErrParamRequired("TagKey")) + } + if v.TagValue == nil { + invalidParams.Add(smithy.NewErrParamRequired("TagValue")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTagList(v []types.Tag) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TagList"} + for i := range v { + if err := validateTag(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateXksProxyAuthenticationCredentialType(v *types.XksProxyAuthenticationCredentialType) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "XksProxyAuthenticationCredentialType"} + if v.AccessKeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccessKeyId")) + } + if v.RawSecretAccessKey == nil { + invalidParams.Add(smithy.NewErrParamRequired("RawSecretAccessKey")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCancelKeyDeletionInput(v *CancelKeyDeletionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CancelKeyDeletionInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpConnectCustomKeyStoreInput(v *ConnectCustomKeyStoreInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ConnectCustomKeyStoreInput"} + if v.CustomKeyStoreId == nil { + invalidParams.Add(smithy.NewErrParamRequired("CustomKeyStoreId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateAliasInput(v *CreateAliasInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateAliasInput"} + if v.AliasName == nil { + invalidParams.Add(smithy.NewErrParamRequired("AliasName")) + } + if v.TargetKeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("TargetKeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateCustomKeyStoreInput(v *CreateCustomKeyStoreInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateCustomKeyStoreInput"} + if v.CustomKeyStoreName == nil { + invalidParams.Add(smithy.NewErrParamRequired("CustomKeyStoreName")) + } + if v.XksProxyAuthenticationCredential != nil { + if err := validateXksProxyAuthenticationCredentialType(v.XksProxyAuthenticationCredential); err != nil { + invalidParams.AddNested("XksProxyAuthenticationCredential", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateGrantInput(v *CreateGrantInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateGrantInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.GranteePrincipal == nil { + invalidParams.Add(smithy.NewErrParamRequired("GranteePrincipal")) + } + if v.Operations == nil { + invalidParams.Add(smithy.NewErrParamRequired("Operations")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateKeyInput(v *CreateKeyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateKeyInput"} + if v.Tags != nil { + if err := validateTagList(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDecryptInput(v *DecryptInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DecryptInput"} + if v.CiphertextBlob == nil { + invalidParams.Add(smithy.NewErrParamRequired("CiphertextBlob")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteAliasInput(v *DeleteAliasInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteAliasInput"} + if v.AliasName == nil { + invalidParams.Add(smithy.NewErrParamRequired("AliasName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteCustomKeyStoreInput(v *DeleteCustomKeyStoreInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteCustomKeyStoreInput"} + if v.CustomKeyStoreId == nil { + invalidParams.Add(smithy.NewErrParamRequired("CustomKeyStoreId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteImportedKeyMaterialInput(v *DeleteImportedKeyMaterialInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteImportedKeyMaterialInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeriveSharedSecretInput(v *DeriveSharedSecretInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeriveSharedSecretInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if len(v.KeyAgreementAlgorithm) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("KeyAgreementAlgorithm")) + } + if v.PublicKey == nil { + invalidParams.Add(smithy.NewErrParamRequired("PublicKey")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDescribeKeyInput(v *DescribeKeyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeKeyInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDisableKeyInput(v *DisableKeyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DisableKeyInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDisableKeyRotationInput(v *DisableKeyRotationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DisableKeyRotationInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDisconnectCustomKeyStoreInput(v *DisconnectCustomKeyStoreInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DisconnectCustomKeyStoreInput"} + if v.CustomKeyStoreId == nil { + invalidParams.Add(smithy.NewErrParamRequired("CustomKeyStoreId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpEnableKeyInput(v *EnableKeyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "EnableKeyInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpEnableKeyRotationInput(v *EnableKeyRotationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "EnableKeyRotationInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpEncryptInput(v *EncryptInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "EncryptInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.Plaintext == nil { + invalidParams.Add(smithy.NewErrParamRequired("Plaintext")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGenerateDataKeyInput(v *GenerateDataKeyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GenerateDataKeyInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGenerateDataKeyPairInput(v *GenerateDataKeyPairInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GenerateDataKeyPairInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if len(v.KeyPairSpec) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("KeyPairSpec")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGenerateDataKeyPairWithoutPlaintextInput(v *GenerateDataKeyPairWithoutPlaintextInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GenerateDataKeyPairWithoutPlaintextInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if len(v.KeyPairSpec) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("KeyPairSpec")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGenerateDataKeyWithoutPlaintextInput(v *GenerateDataKeyWithoutPlaintextInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GenerateDataKeyWithoutPlaintextInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGenerateMacInput(v *GenerateMacInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GenerateMacInput"} + if v.Message == nil { + invalidParams.Add(smithy.NewErrParamRequired("Message")) + } + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if len(v.MacAlgorithm) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("MacAlgorithm")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetKeyPolicyInput(v *GetKeyPolicyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetKeyPolicyInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetKeyRotationStatusInput(v *GetKeyRotationStatusInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetKeyRotationStatusInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetParametersForImportInput(v *GetParametersForImportInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetParametersForImportInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if len(v.WrappingAlgorithm) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("WrappingAlgorithm")) + } + if len(v.WrappingKeySpec) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("WrappingKeySpec")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetPublicKeyInput(v *GetPublicKeyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetPublicKeyInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpImportKeyMaterialInput(v *ImportKeyMaterialInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ImportKeyMaterialInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.ImportToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("ImportToken")) + } + if v.EncryptedKeyMaterial == nil { + invalidParams.Add(smithy.NewErrParamRequired("EncryptedKeyMaterial")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListGrantsInput(v *ListGrantsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListGrantsInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListKeyPoliciesInput(v *ListKeyPoliciesInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListKeyPoliciesInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListKeyRotationsInput(v *ListKeyRotationsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListKeyRotationsInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListResourceTagsInput(v *ListResourceTagsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListResourceTagsInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListRetirableGrantsInput(v *ListRetirableGrantsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListRetirableGrantsInput"} + if v.RetiringPrincipal == nil { + invalidParams.Add(smithy.NewErrParamRequired("RetiringPrincipal")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutKeyPolicyInput(v *PutKeyPolicyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutKeyPolicyInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.Policy == nil { + invalidParams.Add(smithy.NewErrParamRequired("Policy")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpReEncryptInput(v *ReEncryptInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReEncryptInput"} + if v.CiphertextBlob == nil { + invalidParams.Add(smithy.NewErrParamRequired("CiphertextBlob")) + } + if v.DestinationKeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("DestinationKeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpReplicateKeyInput(v *ReplicateKeyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicateKeyInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.ReplicaRegion == nil { + invalidParams.Add(smithy.NewErrParamRequired("ReplicaRegion")) + } + if v.Tags != nil { + if err := validateTagList(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpRevokeGrantInput(v *RevokeGrantInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RevokeGrantInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.GrantId == nil { + invalidParams.Add(smithy.NewErrParamRequired("GrantId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpRotateKeyOnDemandInput(v *RotateKeyOnDemandInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RotateKeyOnDemandInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpScheduleKeyDeletionInput(v *ScheduleKeyDeletionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ScheduleKeyDeletionInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpSignInput(v *SignInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SignInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.Message == nil { + invalidParams.Add(smithy.NewErrParamRequired("Message")) + } + if len(v.SigningAlgorithm) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("SigningAlgorithm")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpTagResourceInput(v *TagResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TagResourceInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.Tags == nil { + invalidParams.Add(smithy.NewErrParamRequired("Tags")) + } else if v.Tags != nil { + if err := validateTagList(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUntagResourceInput(v *UntagResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UntagResourceInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.TagKeys == nil { + invalidParams.Add(smithy.NewErrParamRequired("TagKeys")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateAliasInput(v *UpdateAliasInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateAliasInput"} + if v.AliasName == nil { + invalidParams.Add(smithy.NewErrParamRequired("AliasName")) + } + if v.TargetKeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("TargetKeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateCustomKeyStoreInput(v *UpdateCustomKeyStoreInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateCustomKeyStoreInput"} + if v.CustomKeyStoreId == nil { + invalidParams.Add(smithy.NewErrParamRequired("CustomKeyStoreId")) + } + if v.XksProxyAuthenticationCredential != nil { + if err := validateXksProxyAuthenticationCredentialType(v.XksProxyAuthenticationCredential); err != nil { + invalidParams.AddNested("XksProxyAuthenticationCredential", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateKeyDescriptionInput(v *UpdateKeyDescriptionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateKeyDescriptionInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.Description == nil { + invalidParams.Add(smithy.NewErrParamRequired("Description")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdatePrimaryRegionInput(v *UpdatePrimaryRegionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdatePrimaryRegionInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.PrimaryRegion == nil { + invalidParams.Add(smithy.NewErrParamRequired("PrimaryRegion")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpVerifyInput(v *VerifyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "VerifyInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.Message == nil { + invalidParams.Add(smithy.NewErrParamRequired("Message")) + } + if v.Signature == nil { + invalidParams.Add(smithy.NewErrParamRequired("Signature")) + } + if len(v.SigningAlgorithm) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("SigningAlgorithm")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpVerifyMacInput(v *VerifyMacInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "VerifyMacInput"} + if v.Message == nil { + invalidParams.Add(smithy.NewErrParamRequired("Message")) + } + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if len(v.MacAlgorithm) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("MacAlgorithm")) + } + if v.Mac == nil { + invalidParams.Add(smithy.NewErrParamRequired("Mac")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt deleted file mode 100644 index 899129ecc..000000000 --- a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt +++ /dev/null @@ -1,3 +0,0 @@ -AWS SDK for Go -Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. -Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go deleted file mode 100644 index 99849c0e1..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go +++ /dev/null @@ -1,164 +0,0 @@ -// Package awserr represents API error interface accessors for the SDK. -package awserr - -// An Error wraps lower level errors with code, message and an original error. -// The underlying concrete error type may also satisfy other interfaces which -// can be to used to obtain more specific information about the error. -// -// Calling Error() or String() will always include the full information about -// an error based on its underlying type. -// -// Example: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if awsErr, ok := err.(awserr.Error); ok { -// // Get error details -// log.Println("Error:", awsErr.Code(), awsErr.Message()) -// -// // Prints out full error message, including original error if there was one. -// log.Println("Error:", awsErr.Error()) -// -// // Get original error -// if origErr := awsErr.OrigErr(); origErr != nil { -// // operate on original error. -// } -// } else { -// fmt.Println(err.Error()) -// } -// } -// -type Error interface { - // Satisfy the generic error interface. - error - - // Returns the short phrase depicting the classification of the error. - Code() string - - // Returns the error details message. - Message() string - - // Returns the original error if one was set. Nil is returned if not set. - OrigErr() error -} - -// BatchError is a batch of errors which also wraps lower level errors with -// code, message, and original errors. Calling Error() will include all errors -// that occurred in the batch. -// -// Deprecated: Replaced with BatchedErrors. Only defined for backwards -// compatibility. -type BatchError interface { - // Satisfy the generic error interface. - error - - // Returns the short phrase depicting the classification of the error. - Code() string - - // Returns the error details message. - Message() string - - // Returns the original error if one was set. Nil is returned if not set. - OrigErrs() []error -} - -// BatchedErrors is a batch of errors which also wraps lower level errors with -// code, message, and original errors. Calling Error() will include all errors -// that occurred in the batch. -// -// Replaces BatchError -type BatchedErrors interface { - // Satisfy the base Error interface. - Error - - // Returns the original error if one was set. Nil is returned if not set. - OrigErrs() []error -} - -// New returns an Error object described by the code, message, and origErr. -// -// If origErr satisfies the Error interface it will not be wrapped within a new -// Error object and will instead be returned. -func New(code, message string, origErr error) Error { - var errs []error - if origErr != nil { - errs = append(errs, origErr) - } - return newBaseError(code, message, errs) -} - -// NewBatchError returns an BatchedErrors with a collection of errors as an -// array of errors. -func NewBatchError(code, message string, errs []error) BatchedErrors { - return newBaseError(code, message, errs) -} - -// A RequestFailure is an interface to extract request failure information from -// an Error such as the request ID of the failed request returned by a service. -// RequestFailures may not always have a requestID value if the request failed -// prior to reaching the service such as a connection error. -// -// Example: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if reqerr, ok := err.(RequestFailure); ok { -// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) -// } else { -// log.Println("Error:", err.Error()) -// } -// } -// -// Combined with awserr.Error: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if awsErr, ok := err.(awserr.Error); ok { -// // Generic AWS Error with Code, Message, and original error (if any) -// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) -// -// if reqErr, ok := err.(awserr.RequestFailure); ok { -// // A service error occurred -// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) -// } -// } else { -// fmt.Println(err.Error()) -// } -// } -// -type RequestFailure interface { - Error - - // The status code of the HTTP response. - StatusCode() int - - // The request ID returned by the service for a request failure. This will - // be empty if no request ID is available such as the request failed due - // to a connection error. - RequestID() string -} - -// NewRequestFailure returns a wrapped error with additional information for -// request status code, and service requestID. -// -// Should be used to wrap all request which involve service requests. Even if -// the request failed without a service response, but had an HTTP status code -// that may be meaningful. -func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { - return newRequestError(err, statusCode, reqID) -} - -// UnmarshalError provides the interface for the SDK failing to unmarshal data. -type UnmarshalError interface { - awsError - Bytes() []byte -} - -// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding -// the bytes that fail to unmarshal to the error. -func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError { - return &unmarshalError{ - awsError: New("UnmarshalError", msg, err), - bytes: bytes, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go deleted file mode 100644 index 9cf7eaf40..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go +++ /dev/null @@ -1,221 +0,0 @@ -package awserr - -import ( - "encoding/hex" - "fmt" -) - -// SprintError returns a string of the formatted error code. -// -// Both extra and origErr are optional. If they are included their lines -// will be added, but if they are not included their lines will be ignored. -func SprintError(code, message, extra string, origErr error) string { - msg := fmt.Sprintf("%s: %s", code, message) - if extra != "" { - msg = fmt.Sprintf("%s\n\t%s", msg, extra) - } - if origErr != nil { - msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) - } - return msg -} - -// A baseError wraps the code and message which defines an error. It also -// can be used to wrap an original error object. -// -// Should be used as the root for errors satisfying the awserr.Error. Also -// for any error which does not fit into a specific error wrapper type. -type baseError struct { - // Classification of error - code string - - // Detailed information about error - message string - - // Optional original error this error is based off of. Allows building - // chained errors. - errs []error -} - -// newBaseError returns an error object for the code, message, and errors. -// -// code is a short no whitespace phrase depicting the classification of -// the error that is being created. -// -// message is the free flow string containing detailed information about the -// error. -// -// origErrs is the error objects which will be nested under the new errors to -// be returned. -func newBaseError(code, message string, origErrs []error) *baseError { - b := &baseError{ - code: code, - message: message, - errs: origErrs, - } - - return b -} - -// Error returns the string representation of the error. -// -// See ErrorWithExtra for formatting. -// -// Satisfies the error interface. -func (b baseError) Error() string { - size := len(b.errs) - if size > 0 { - return SprintError(b.code, b.message, "", errorList(b.errs)) - } - - return SprintError(b.code, b.message, "", nil) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (b baseError) String() string { - return b.Error() -} - -// Code returns the short phrase depicting the classification of the error. -func (b baseError) Code() string { - return b.code -} - -// Message returns the error details message. -func (b baseError) Message() string { - return b.message -} - -// OrigErr returns the original error if one was set. Nil is returned if no -// error was set. This only returns the first element in the list. If the full -// list is needed, use BatchedErrors. -func (b baseError) OrigErr() error { - switch len(b.errs) { - case 0: - return nil - case 1: - return b.errs[0] - default: - if err, ok := b.errs[0].(Error); ok { - return NewBatchError(err.Code(), err.Message(), b.errs[1:]) - } - return NewBatchError("BatchedErrors", - "multiple errors occurred", b.errs) - } -} - -// OrigErrs returns the original errors if one was set. An empty slice is -// returned if no error was set. -func (b baseError) OrigErrs() []error { - return b.errs -} - -// So that the Error interface type can be included as an anonymous field -// in the requestError struct and not conflict with the error.Error() method. -type awsError Error - -// A requestError wraps a request or service error. -// -// Composed of baseError for code, message, and original error. -type requestError struct { - awsError - statusCode int - requestID string - bytes []byte -} - -// newRequestError returns a wrapped error with additional information for -// request status code, and service requestID. -// -// Should be used to wrap all request which involve service requests. Even if -// the request failed without a service response, but had an HTTP status code -// that may be meaningful. -// -// Also wraps original errors via the baseError. -func newRequestError(err Error, statusCode int, requestID string) *requestError { - return &requestError{ - awsError: err, - statusCode: statusCode, - requestID: requestID, - } -} - -// Error returns the string representation of the error. -// Satisfies the error interface. -func (r requestError) Error() string { - extra := fmt.Sprintf("status code: %d, request id: %s", - r.statusCode, r.requestID) - return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (r requestError) String() string { - return r.Error() -} - -// StatusCode returns the wrapped status code for the error -func (r requestError) StatusCode() int { - return r.statusCode -} - -// RequestID returns the wrapped requestID -func (r requestError) RequestID() string { - return r.requestID -} - -// OrigErrs returns the original errors if one was set. An empty slice is -// returned if no error was set. -func (r requestError) OrigErrs() []error { - if b, ok := r.awsError.(BatchedErrors); ok { - return b.OrigErrs() - } - return []error{r.OrigErr()} -} - -type unmarshalError struct { - awsError - bytes []byte -} - -// Error returns the string representation of the error. -// Satisfies the error interface. -func (e unmarshalError) Error() string { - extra := hex.Dump(e.bytes) - return SprintError(e.Code(), e.Message(), extra, e.OrigErr()) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (e unmarshalError) String() string { - return e.Error() -} - -// Bytes returns the bytes that failed to unmarshal. -func (e unmarshalError) Bytes() []byte { - return e.bytes -} - -// An error list that satisfies the golang interface -type errorList []error - -// Error returns the string representation of the error. -// -// Satisfies the error interface. -func (e errorList) Error() string { - msg := "" - // How do we want to handle the array size being zero - if size := len(e); size > 0 { - for i := 0; i < size; i++ { - msg += e[i].Error() - // We check the next index to see if it is within the slice. - // If it is, then we append a newline. We do this, because unit tests - // could be broken with the additional '\n' - if i+1 < size { - msg += "\n" - } - } - } - return msg -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go deleted file mode 100644 index 1a3d106d5..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go +++ /dev/null @@ -1,108 +0,0 @@ -package awsutil - -import ( - "io" - "reflect" - "time" -) - -// Copy deeply copies a src structure to dst. Useful for copying request and -// response structures. -// -// Can copy between structs of different type, but will only copy fields which -// are assignable, and exist in both structs. Fields which are not assignable, -// or do not exist in both structs are ignored. -func Copy(dst, src interface{}) { - dstval := reflect.ValueOf(dst) - if !dstval.IsValid() { - panic("Copy dst cannot be nil") - } - - rcopy(dstval, reflect.ValueOf(src), true) -} - -// CopyOf returns a copy of src while also allocating the memory for dst. -// src must be a pointer type or this operation will fail. -func CopyOf(src interface{}) (dst interface{}) { - dsti := reflect.New(reflect.TypeOf(src).Elem()) - dst = dsti.Interface() - rcopy(dsti, reflect.ValueOf(src), true) - return -} - -// rcopy performs a recursive copy of values from the source to destination. -// -// root is used to skip certain aspects of the copy which are not valid -// for the root node of a object. -func rcopy(dst, src reflect.Value, root bool) { - if !src.IsValid() { - return - } - - switch src.Kind() { - case reflect.Ptr: - if _, ok := src.Interface().(io.Reader); ok { - if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { - dst.Elem().Set(src) - } else if dst.CanSet() { - dst.Set(src) - } - } else { - e := src.Type().Elem() - if dst.CanSet() && !src.IsNil() { - if _, ok := src.Interface().(*time.Time); !ok { - dst.Set(reflect.New(e)) - } else { - tempValue := reflect.New(e) - tempValue.Elem().Set(src.Elem()) - // Sets time.Time's unexported values - dst.Set(tempValue) - } - } - if src.Elem().IsValid() { - // Keep the current root state since the depth hasn't changed - rcopy(dst.Elem(), src.Elem(), root) - } - } - case reflect.Struct: - t := dst.Type() - for i := 0; i < t.NumField(); i++ { - name := t.Field(i).Name - srcVal := src.FieldByName(name) - dstVal := dst.FieldByName(name) - if srcVal.IsValid() && dstVal.CanSet() { - rcopy(dstVal, srcVal, false) - } - } - case reflect.Slice: - if src.IsNil() { - break - } - - s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) - dst.Set(s) - for i := 0; i < src.Len(); i++ { - rcopy(dst.Index(i), src.Index(i), false) - } - case reflect.Map: - if src.IsNil() { - break - } - - s := reflect.MakeMap(src.Type()) - dst.Set(s) - for _, k := range src.MapKeys() { - v := src.MapIndex(k) - v2 := reflect.New(v.Type()).Elem() - rcopy(v2, v, false) - dst.SetMapIndex(k, v2) - } - default: - // Assign the value if possible. If its not assignable, the value would - // need to be converted and the impact of that may be unexpected, or is - // not compatible with the dst type. - if src.Type().AssignableTo(dst.Type()) { - dst.Set(src) - } - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go deleted file mode 100644 index 142a7a01c..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go +++ /dev/null @@ -1,27 +0,0 @@ -package awsutil - -import ( - "reflect" -) - -// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. -// In addition to this, this method will also dereference the input values if -// possible so the DeepEqual performed will not fail if one parameter is a -// pointer and the other is not. -// -// DeepEqual will not perform indirection of nested values of the input parameters. -func DeepEqual(a, b interface{}) bool { - ra := reflect.Indirect(reflect.ValueOf(a)) - rb := reflect.Indirect(reflect.ValueOf(b)) - - if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { - // If the elements are both nil, and of the same type they are equal - // If they are of different types they are not equal - return reflect.TypeOf(a) == reflect.TypeOf(b) - } else if raValid != rbValid { - // Both values must be valid to be equal - return false - } - - return reflect.DeepEqual(ra.Interface(), rb.Interface()) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go deleted file mode 100644 index a4eb6a7f4..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go +++ /dev/null @@ -1,221 +0,0 @@ -package awsutil - -import ( - "reflect" - "regexp" - "strconv" - "strings" - - "github.com/jmespath/go-jmespath" -) - -var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) - -// rValuesAtPath returns a slice of values found in value v. The values -// in v are explored recursively so all nested values are collected. -func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { - pathparts := strings.Split(path, "||") - if len(pathparts) > 1 { - for _, pathpart := range pathparts { - vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) - if len(vals) > 0 { - return vals - } - } - return nil - } - - values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} - components := strings.Split(path, ".") - for len(values) > 0 && len(components) > 0 { - var index *int64 - var indexStar bool - c := strings.TrimSpace(components[0]) - if c == "" { // no actual component, illegal syntax - return nil - } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { - // TODO normalize case for user - return nil // don't support unexported fields - } - - // parse this component - if m := indexRe.FindStringSubmatch(c); m != nil { - c = m[1] - if m[2] == "" { - index = nil - indexStar = true - } else { - i, _ := strconv.ParseInt(m[2], 10, 32) - index = &i - indexStar = false - } - } - - nextvals := []reflect.Value{} - for _, value := range values { - // pull component name out of struct member - if value.Kind() != reflect.Struct { - continue - } - - if c == "*" { // pull all members - for i := 0; i < value.NumField(); i++ { - if f := reflect.Indirect(value.Field(i)); f.IsValid() { - nextvals = append(nextvals, f) - } - } - continue - } - - value = value.FieldByNameFunc(func(name string) bool { - if c == name { - return true - } else if !caseSensitive && strings.EqualFold(name, c) { - return true - } - return false - }) - - if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { - if !value.IsNil() { - value.Set(reflect.Zero(value.Type())) - } - return []reflect.Value{value} - } - - if createPath && value.Kind() == reflect.Ptr && value.IsNil() { - // TODO if the value is the terminus it should not be created - // if the value to be set to its position is nil. - value.Set(reflect.New(value.Type().Elem())) - value = value.Elem() - } else { - value = reflect.Indirect(value) - } - - if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { - if !createPath && value.IsNil() { - value = reflect.ValueOf(nil) - } - } - - if value.IsValid() { - nextvals = append(nextvals, value) - } - } - values = nextvals - - if indexStar || index != nil { - nextvals = []reflect.Value{} - for _, valItem := range values { - value := reflect.Indirect(valItem) - if value.Kind() != reflect.Slice { - continue - } - - if indexStar { // grab all indices - for i := 0; i < value.Len(); i++ { - idx := reflect.Indirect(value.Index(i)) - if idx.IsValid() { - nextvals = append(nextvals, idx) - } - } - continue - } - - // pull out index - i := int(*index) - if i >= value.Len() { // check out of bounds - if createPath { - // TODO resize slice - } else { - continue - } - } else if i < 0 { // support negative indexing - i = value.Len() + i - } - value = reflect.Indirect(value.Index(i)) - - if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { - if !createPath && value.IsNil() { - value = reflect.ValueOf(nil) - } - } - - if value.IsValid() { - nextvals = append(nextvals, value) - } - } - values = nextvals - } - - components = components[1:] - } - return values -} - -// ValuesAtPath returns a list of values at the case insensitive lexical -// path inside of a structure. -func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { - result, err := jmespath.Search(path, i) - if err != nil { - return nil, err - } - - v := reflect.ValueOf(result) - if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { - return nil, nil - } - if s, ok := result.([]interface{}); ok { - return s, err - } - if v.Kind() == reflect.Map && v.Len() == 0 { - return nil, nil - } - if v.Kind() == reflect.Slice { - out := make([]interface{}, v.Len()) - for i := 0; i < v.Len(); i++ { - out[i] = v.Index(i).Interface() - } - return out, nil - } - - return []interface{}{result}, nil -} - -// SetValueAtPath sets a value at the case insensitive lexical path inside -// of a structure. -func SetValueAtPath(i interface{}, path string, v interface{}) { - rvals := rValuesAtPath(i, path, true, false, v == nil) - for _, rval := range rvals { - if rval.Kind() == reflect.Ptr && rval.IsNil() { - continue - } - setValue(rval, v) - } -} - -func setValue(dstVal reflect.Value, src interface{}) { - if dstVal.Kind() == reflect.Ptr { - dstVal = reflect.Indirect(dstVal) - } - srcVal := reflect.ValueOf(src) - - if !srcVal.IsValid() { // src is literal nil - if dstVal.CanAddr() { - // Convert to pointer so that pointer's value can be nil'ed - // dstVal = dstVal.Addr() - } - dstVal.Set(reflect.Zero(dstVal.Type())) - - } else if srcVal.Kind() == reflect.Ptr { - if srcVal.IsNil() { - srcVal = reflect.Zero(dstVal.Type()) - } else { - srcVal = reflect.ValueOf(src).Elem() - } - dstVal.Set(srcVal) - } else { - dstVal.Set(srcVal) - } - -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go deleted file mode 100644 index 11d4240d6..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go +++ /dev/null @@ -1,123 +0,0 @@ -package awsutil - -import ( - "bytes" - "fmt" - "io" - "reflect" - "strings" -) - -// Prettify returns the string representation of a value. -func Prettify(i interface{}) string { - var buf bytes.Buffer - prettify(reflect.ValueOf(i), 0, &buf) - return buf.String() -} - -// prettify will recursively walk value v to build a textual -// representation of the value. -func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { - for v.Kind() == reflect.Ptr { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Struct: - strtype := v.Type().String() - if strtype == "time.Time" { - fmt.Fprintf(buf, "%s", v.Interface()) - break - } else if strings.HasPrefix(strtype, "io.") { - buf.WriteString("") - break - } - - buf.WriteString("{\n") - - names := []string{} - for i := 0; i < v.Type().NumField(); i++ { - name := v.Type().Field(i).Name - f := v.Field(i) - if name[0:1] == strings.ToLower(name[0:1]) { - continue // ignore unexported fields - } - if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { - continue // ignore unset fields - } - names = append(names, name) - } - - for i, n := range names { - val := v.FieldByName(n) - ft, ok := v.Type().FieldByName(n) - if !ok { - panic(fmt.Sprintf("expected to find field %v on type %v, but was not found", n, v.Type())) - } - - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(n + ": ") - - if tag := ft.Tag.Get("sensitive"); tag == "true" { - buf.WriteString("") - } else { - prettify(val, indent+2, buf) - } - - if i < len(names)-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - case reflect.Slice: - strtype := v.Type().String() - if strtype == "[]uint8" { - fmt.Fprintf(buf, " len %d", v.Len()) - break - } - - nl, id, id2 := "", "", "" - if v.Len() > 3 { - nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) - } - buf.WriteString("[" + nl) - for i := 0; i < v.Len(); i++ { - buf.WriteString(id2) - prettify(v.Index(i), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString("," + nl) - } - } - - buf.WriteString(nl + id + "]") - case reflect.Map: - buf.WriteString("{\n") - - for i, k := range v.MapKeys() { - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(k.String() + ": ") - prettify(v.MapIndex(k), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - default: - if !v.IsValid() { - fmt.Fprint(buf, "") - return - } - format := "%v" - switch v.Interface().(type) { - case string: - format = "%q" - case io.ReadSeeker, io.Reader: - format = "buffer(%p)" - } - fmt.Fprintf(buf, format, v.Interface()) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go deleted file mode 100644 index 3f7cffd95..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go +++ /dev/null @@ -1,90 +0,0 @@ -package awsutil - -import ( - "bytes" - "fmt" - "reflect" - "strings" -) - -// StringValue returns the string representation of a value. -// -// Deprecated: Use Prettify instead. -func StringValue(i interface{}) string { - var buf bytes.Buffer - stringValue(reflect.ValueOf(i), 0, &buf) - return buf.String() -} - -func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { - for v.Kind() == reflect.Ptr { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Struct: - buf.WriteString("{\n") - - for i := 0; i < v.Type().NumField(); i++ { - ft := v.Type().Field(i) - fv := v.Field(i) - - if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) { - continue // ignore unexported fields - } - if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() { - continue // ignore unset fields - } - - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(ft.Name + ": ") - - if tag := ft.Tag.Get("sensitive"); tag == "true" { - buf.WriteString("") - } else { - stringValue(fv, indent+2, buf) - } - - buf.WriteString(",\n") - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - case reflect.Slice: - nl, id, id2 := "", "", "" - if v.Len() > 3 { - nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) - } - buf.WriteString("[" + nl) - for i := 0; i < v.Len(); i++ { - buf.WriteString(id2) - stringValue(v.Index(i), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString("," + nl) - } - } - - buf.WriteString(nl + id + "]") - case reflect.Map: - buf.WriteString("{\n") - - for i, k := range v.MapKeys() { - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(k.String() + ": ") - stringValue(v.MapIndex(k), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - default: - format := "%v" - switch v.Interface().(type) { - case string: - format = "%q" - } - fmt.Fprintf(buf, format, v.Interface()) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go deleted file mode 100644 index b147f103c..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go +++ /dev/null @@ -1,94 +0,0 @@ -package client - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" -) - -// A Config provides configuration to a service client instance. -type Config struct { - Config *aws.Config - Handlers request.Handlers - PartitionID string - Endpoint string - SigningRegion string - SigningName string - ResolvedRegion string - - // States that the signing name did not come from a modeled source but - // was derived based on other data. Used by service client constructors - // to determine if the signin name can be overridden based on metadata the - // service has. - SigningNameDerived bool -} - -// ConfigProvider provides a generic way for a service client to receive -// the ClientConfig without circular dependencies. -type ConfigProvider interface { - ClientConfig(serviceName string, cfgs ...*aws.Config) Config -} - -// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not -// resolve the endpoint automatically. The service client's endpoint must be -// provided via the aws.Config.Endpoint field. -type ConfigNoResolveEndpointProvider interface { - ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config -} - -// A Client implements the base client request and response handling -// used by all service clients. -type Client struct { - request.Retryer - metadata.ClientInfo - - Config aws.Config - Handlers request.Handlers -} - -// New will return a pointer to a new initialized service client. -func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { - svc := &Client{ - Config: cfg, - ClientInfo: info, - Handlers: handlers.Copy(), - } - - switch retryer, ok := cfg.Retryer.(request.Retryer); { - case ok: - svc.Retryer = retryer - case cfg.Retryer != nil && cfg.Logger != nil: - s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) - cfg.Logger.Log(s) - fallthrough - default: - maxRetries := aws.IntValue(cfg.MaxRetries) - if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { - maxRetries = DefaultRetryerMaxNumRetries - } - svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} - } - - svc.AddDebugHandlers() - - for _, option := range options { - option(svc) - } - - return svc -} - -// NewRequest returns a new Request pointer for the service API -// operation and parameters. -func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { - return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) -} - -// AddDebugHandlers injects debug logging handlers into the service to log request -// debug information. -func (c *Client) AddDebugHandlers() { - c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler) - c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go deleted file mode 100644 index 9f6af19dd..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ /dev/null @@ -1,177 +0,0 @@ -package client - -import ( - "math" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkrand" -) - -// DefaultRetryer implements basic retry logic using exponential backoff for -// most services. If you want to implement custom retry logic, you can implement the -// request.Retryer interface. -// -type DefaultRetryer struct { - // Num max Retries is the number of max retries that will be performed. - // By default, this is zero. - NumMaxRetries int - - // MinRetryDelay is the minimum retry delay after which retry will be performed. - // If not set, the value is 0ns. - MinRetryDelay time.Duration - - // MinThrottleRetryDelay is the minimum retry delay when throttled. - // If not set, the value is 0ns. - MinThrottleDelay time.Duration - - // MaxRetryDelay is the maximum retry delay before which retry must be performed. - // If not set, the value is 0ns. - MaxRetryDelay time.Duration - - // MaxThrottleDelay is the maximum retry delay when throttled. - // If not set, the value is 0ns. - MaxThrottleDelay time.Duration -} - -const ( - // DefaultRetryerMaxNumRetries sets maximum number of retries - DefaultRetryerMaxNumRetries = 3 - - // DefaultRetryerMinRetryDelay sets minimum retry delay - DefaultRetryerMinRetryDelay = 30 * time.Millisecond - - // DefaultRetryerMinThrottleDelay sets minimum delay when throttled - DefaultRetryerMinThrottleDelay = 500 * time.Millisecond - - // DefaultRetryerMaxRetryDelay sets maximum retry delay - DefaultRetryerMaxRetryDelay = 300 * time.Second - - // DefaultRetryerMaxThrottleDelay sets maximum delay when throttled - DefaultRetryerMaxThrottleDelay = 300 * time.Second -) - -// MaxRetries returns the number of maximum returns the service will use to make -// an individual API request. -func (d DefaultRetryer) MaxRetries() int { - return d.NumMaxRetries -} - -// setRetryerDefaults sets the default values of the retryer if not set -func (d *DefaultRetryer) setRetryerDefaults() { - if d.MinRetryDelay == 0 { - d.MinRetryDelay = DefaultRetryerMinRetryDelay - } - if d.MaxRetryDelay == 0 { - d.MaxRetryDelay = DefaultRetryerMaxRetryDelay - } - if d.MinThrottleDelay == 0 { - d.MinThrottleDelay = DefaultRetryerMinThrottleDelay - } - if d.MaxThrottleDelay == 0 { - d.MaxThrottleDelay = DefaultRetryerMaxThrottleDelay - } -} - -// RetryRules returns the delay duration before retrying this request again -func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { - - // if number of max retries is zero, no retries will be performed. - if d.NumMaxRetries == 0 { - return 0 - } - - // Sets default value for retryer members - d.setRetryerDefaults() - - // minDelay is the minimum retryer delay - minDelay := d.MinRetryDelay - - var initialDelay time.Duration - - isThrottle := r.IsErrorThrottle() - if isThrottle { - if delay, ok := getRetryAfterDelay(r); ok { - initialDelay = delay - } - minDelay = d.MinThrottleDelay - } - - retryCount := r.RetryCount - - // maxDelay the maximum retryer delay - maxDelay := d.MaxRetryDelay - - if isThrottle { - maxDelay = d.MaxThrottleDelay - } - - var delay time.Duration - - // Logic to cap the retry count based on the minDelay provided - actualRetryCount := int(math.Log2(float64(minDelay))) + 1 - if actualRetryCount < 63-retryCount { - delay = time.Duration(1< maxDelay { - delay = getJitterDelay(maxDelay / 2) - } - } else { - delay = getJitterDelay(maxDelay / 2) - } - return delay + initialDelay -} - -// getJitterDelay returns a jittered delay for retry -func getJitterDelay(duration time.Duration) time.Duration { - return time.Duration(sdkrand.SeededRand.Int63n(int64(duration)) + int64(duration)) -} - -// ShouldRetry returns true if the request should be retried. -func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { - - // ShouldRetry returns false if number of max retries is 0. - if d.NumMaxRetries == 0 { - return false - } - - // If one of the other handlers already set the retry state - // we don't want to override it based on the service's state - if r.Retryable != nil { - return *r.Retryable - } - return r.IsErrorRetryable() || r.IsErrorThrottle() -} - -// This will look in the Retry-After header, RFC 7231, for how long -// it will wait before attempting another request -func getRetryAfterDelay(r *request.Request) (time.Duration, bool) { - if !canUseRetryAfterHeader(r) { - return 0, false - } - - delayStr := r.HTTPResponse.Header.Get("Retry-After") - if len(delayStr) == 0 { - return 0, false - } - - delay, err := strconv.Atoi(delayStr) - if err != nil { - return 0, false - } - - return time.Duration(delay) * time.Second, true -} - -// Will look at the status code to see if the retry header pertains to -// the status code. -func canUseRetryAfterHeader(r *request.Request) bool { - switch r.HTTPResponse.StatusCode { - case 429: - case 503: - default: - return false - } - - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go deleted file mode 100644 index 5ac5c24a1..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go +++ /dev/null @@ -1,206 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http/httputil" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" -) - -const logReqMsg = `DEBUG: Request %s/%s Details: ----[ REQUEST POST-SIGN ]----------------------------- -%s ------------------------------------------------------` - -const logReqErrMsg = `DEBUG ERROR: Request %s/%s: ----[ REQUEST DUMP ERROR ]----------------------------- -%s -------------------------------------------------------` - -type logWriter struct { - // Logger is what we will use to log the payload of a response. - Logger aws.Logger - // buf stores the contents of what has been read - buf *bytes.Buffer -} - -func (logger *logWriter) Write(b []byte) (int, error) { - return logger.buf.Write(b) -} - -type teeReaderCloser struct { - // io.Reader will be a tee reader that is used during logging. - // This structure will read from a body and write the contents to a logger. - io.Reader - // Source is used just to close when we are done reading. - Source io.ReadCloser -} - -func (reader *teeReaderCloser) Close() error { - return reader.Source.Close() -} - -// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent -// to a service. Will include the HTTP request body if the LogLevel of the -// request matches LogDebugWithHTTPBody. -var LogHTTPRequestHandler = request.NamedHandler{ - Name: "awssdk.client.LogRequest", - Fn: logRequest, -} - -func logRequest(r *request.Request) { - if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil { - return - } - - logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - bodySeekable := aws.IsReaderSeekable(r.Body) - - b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) - if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, - r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - - if logBody { - if !bodySeekable { - r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) - } - // Reset the request body because dumpRequest will re-wrap the - // r.HTTPRequest's Body as a NoOpCloser and will not be reset after - // read by the HTTP client reader. - if err := r.Error; err != nil { - r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, - r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - } - - r.Config.Logger.Log(fmt.Sprintf(logReqMsg, - r.ClientInfo.ServiceName, r.Operation.Name, string(b))) -} - -// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent -// to a service. Will only log the HTTP request's headers. The request payload -// will not be read. -var LogHTTPRequestHeaderHandler = request.NamedHandler{ - Name: "awssdk.client.LogRequestHeader", - Fn: logRequestHeader, -} - -func logRequestHeader(r *request.Request) { - if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil { - return - } - - b, err := httputil.DumpRequestOut(r.HTTPRequest, false) - if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, - r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - - r.Config.Logger.Log(fmt.Sprintf(logReqMsg, - r.ClientInfo.ServiceName, r.Operation.Name, string(b))) -} - -const logRespMsg = `DEBUG: Response %s/%s Details: ----[ RESPONSE ]-------------------------------------- -%s ------------------------------------------------------` - -const logRespErrMsg = `DEBUG ERROR: Response %s/%s: ----[ RESPONSE DUMP ERROR ]----------------------------- -%s ------------------------------------------------------` - -// LogHTTPResponseHandler is a SDK request handler to log the HTTP response -// received from a service. Will include the HTTP response body if the LogLevel -// of the request matches LogDebugWithHTTPBody. -var LogHTTPResponseHandler = request.NamedHandler{ - Name: "awssdk.client.LogResponse", - Fn: logResponse, -} - -func logResponse(r *request.Request) { - if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil { - return - } - - lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} - - if r.HTTPResponse == nil { - lw.Logger.Log(fmt.Sprintf(logRespErrMsg, - r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil")) - return - } - - logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - if logBody { - r.HTTPResponse.Body = &teeReaderCloser{ - Reader: io.TeeReader(r.HTTPResponse.Body, lw), - Source: r.HTTPResponse.Body, - } - } - - handlerFn := func(req *request.Request) { - b, err := httputil.DumpResponse(req.HTTPResponse, false) - if err != nil { - lw.Logger.Log(fmt.Sprintf(logRespErrMsg, - req.ClientInfo.ServiceName, req.Operation.Name, err)) - return - } - - lw.Logger.Log(fmt.Sprintf(logRespMsg, - req.ClientInfo.ServiceName, req.Operation.Name, string(b))) - - if logBody { - b, err := ioutil.ReadAll(lw.buf) - if err != nil { - lw.Logger.Log(fmt.Sprintf(logRespErrMsg, - req.ClientInfo.ServiceName, req.Operation.Name, err)) - return - } - - lw.Logger.Log(string(b)) - } - } - - const handlerName = "awsdk.client.LogResponse.ResponseBody" - - r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{ - Name: handlerName, Fn: handlerFn, - }) - r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{ - Name: handlerName, Fn: handlerFn, - }) -} - -// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP -// response received from a service. Will only log the HTTP response's headers. -// The response payload will not be read. -var LogHTTPResponseHeaderHandler = request.NamedHandler{ - Name: "awssdk.client.LogResponseHeader", - Fn: logResponseHeader, -} - -func logResponseHeader(r *request.Request) { - if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil { - return - } - - b, err := httputil.DumpResponse(r.HTTPResponse, false) - if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, - r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - - r.Config.Logger.Log(fmt.Sprintf(logRespMsg, - r.ClientInfo.ServiceName, r.Operation.Name, string(b))) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go deleted file mode 100644 index a7530ebb3..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go +++ /dev/null @@ -1,15 +0,0 @@ -package metadata - -// ClientInfo wraps immutable data from the client.Client structure. -type ClientInfo struct { - ServiceName string - ServiceID string - APIVersion string - PartitionID string - Endpoint string - SigningName string - SigningRegion string - JSONVersion string - TargetPrefix string - ResolvedRegion string -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go deleted file mode 100644 index 881d575f0..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go +++ /dev/null @@ -1,28 +0,0 @@ -package client - -import ( - "time" - - "github.com/aws/aws-sdk-go/aws/request" -) - -// NoOpRetryer provides a retryer that performs no retries. -// It should be used when we do not want retries to be performed. -type NoOpRetryer struct{} - -// MaxRetries returns the number of maximum returns the service will use to make -// an individual API; For NoOpRetryer the MaxRetries will always be zero. -func (d NoOpRetryer) MaxRetries() int { - return 0 -} - -// ShouldRetry will always return false for NoOpRetryer, as it should never retry. -func (d NoOpRetryer) ShouldRetry(_ *request.Request) bool { - return false -} - -// RetryRules returns the delay duration before retrying this request again; -// since NoOpRetryer does not retry, RetryRules always returns 0. -func (d NoOpRetryer) RetryRules(_ *request.Request) time.Duration { - return 0 -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go deleted file mode 100644 index 79f18fb2f..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ /dev/null @@ -1,628 +0,0 @@ -package aws - -import ( - "net/http" - "time" - - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/endpoints" -) - -// UseServiceDefaultRetries instructs the config to use the service's own -// default number of retries. This will be the default action if -// Config.MaxRetries is nil also. -const UseServiceDefaultRetries = -1 - -// RequestRetryer is an alias for a type that implements the request.Retryer -// interface. -type RequestRetryer interface{} - -// A Config provides service configuration for service clients. By default, -// all clients will use the defaults.DefaultConfig structure. -// -// // Create Session with MaxRetries configuration to be shared by multiple -// // service clients. -// sess := session.Must(session.NewSession(&aws.Config{ -// MaxRetries: aws.Int(3), -// })) -// -// // Create S3 service client with a specific Region. -// svc := s3.New(sess, &aws.Config{ -// Region: aws.String("us-west-2"), -// }) -type Config struct { - // Enables verbose error printing of all credential chain errors. - // Should be used when wanting to see all errors while attempting to - // retrieve credentials. - CredentialsChainVerboseErrors *bool - - // The credentials object to use when signing requests. Defaults to a - // chain of credential providers to search for credentials in environment - // variables, shared credential file, and EC2 Instance Roles. - Credentials *credentials.Credentials - - // An optional endpoint URL (hostname only or fully qualified URI) - // that overrides the default generated endpoint for a client. Set this - // to `nil` or the value to `""` to use the default generated endpoint. - // - // Note: You must still provide a `Region` value when specifying an - // endpoint for a client. - Endpoint *string - - // The resolver to use for looking up endpoints for AWS service clients - // to use based on region. - EndpointResolver endpoints.Resolver - - // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call - // ShouldRetry regardless of whether or not if request.Retryable is set. - // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck - // is not set, then ShouldRetry will only be called if request.Retryable is nil. - // Proper handling of the request.Retryable field is important when setting this field. - EnforceShouldRetryCheck *bool - - // The region to send requests to. This parameter is required and must - // be configured globally or on a per-client basis unless otherwise - // noted. A full list of regions is found in the "Regions and Endpoints" - // document. - // - // See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS - // Regions and Endpoints. - Region *string - - // Set this to `true` to disable SSL when sending requests. Defaults - // to `false`. - DisableSSL *bool - - // The HTTP client to use when sending requests. Defaults to - // `http.DefaultClient`. - HTTPClient *http.Client - - // An integer value representing the logging level. The default log level - // is zero (LogOff), which represents no logging. To enable logging set - // to a LogLevel Value. - LogLevel *LogLevelType - - // The logger writer interface to write logging messages to. Defaults to - // standard out. - Logger Logger - - // The maximum number of times that a request will be retried for failures. - // Defaults to -1, which defers the max retry setting to the service - // specific configuration. - MaxRetries *int - - // Retryer guides how HTTP requests should be retried in case of - // recoverable failures. - // - // When nil or the value does not implement the request.Retryer interface, - // the client.DefaultRetryer will be used. - // - // When both Retryer and MaxRetries are non-nil, the former is used and - // the latter ignored. - // - // To set the Retryer field in a type-safe manner and with chaining, use - // the request.WithRetryer helper function: - // - // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) - // - Retryer RequestRetryer - - // Disables semantic parameter validation, which validates input for - // missing required fields and/or other semantic request input errors. - DisableParamValidation *bool - - // Disables the computation of request and response checksums, e.g., - // CRC32 checksums in Amazon DynamoDB. - DisableComputeChecksums *bool - - // Set this to `true` to force the request to use path-style addressing, - // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client - // will use virtual hosted bucket addressing when possible - // (`http://BUCKET.s3.amazonaws.com/KEY`). - // - // Note: This configuration option is specific to the Amazon S3 service. - // - // See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html - // for Amazon S3: Virtual Hosting of Buckets - S3ForcePathStyle *bool - - // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` - // header to PUT requests over 2MB of content. 100-Continue instructs the - // HTTP client not to send the body until the service responds with a - // `continue` status. This is useful to prevent sending the request body - // until after the request is authenticated, and validated. - // - // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html - // - // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s - // `ExpectContinueTimeout` for information on adjusting the continue wait - // timeout. https://golang.org/pkg/net/http/#Transport - // - // You should use this flag to disable 100-Continue if you experience issues - // with proxies or third party S3 compatible services. - S3Disable100Continue *bool - - // Set this to `true` to enable S3 Accelerate feature. For all operations - // compatible with S3 Accelerate will use the accelerate endpoint for - // requests. Requests not compatible will fall back to normal S3 requests. - // - // The bucket must be enable for accelerate to be used with S3 client with - // accelerate enabled. If the bucket is not enabled for accelerate an error - // will be returned. The bucket name must be DNS compatible to also work - // with accelerate. - S3UseAccelerate *bool - - // S3DisableContentMD5Validation config option is temporarily disabled, - // For S3 GetObject API calls, #1837. - // - // Set this to `true` to disable the S3 service client from automatically - // adding the ContentMD5 to S3 Object Put and Upload API calls. This option - // will also disable the SDK from performing object ContentMD5 validation - // on GetObject API calls. - S3DisableContentMD5Validation *bool - - // Set this to `true` to have the S3 service client to use the region specified - // in the ARN, when an ARN is provided as an argument to a bucket parameter. - S3UseARNRegion *bool - - // Set this to `true` to enable the SDK to unmarshal API response header maps to - // normalized lower case map keys. - // - // For example S3's X-Amz-Meta prefixed header will be unmarshaled to lower case - // Metadata member's map keys. The value of the header in the map is unaffected. - LowerCaseHeaderMaps *bool - - // Set this to `true` to disable the EC2Metadata client from overriding the - // default http.Client's Timeout. This is helpful if you do not want the - // EC2Metadata client to create a new http.Client. This options is only - // meaningful if you're not already using a custom HTTP client with the - // SDK. Enabled by default. - // - // Must be set and provided to the session.NewSession() in order to disable - // the EC2Metadata overriding the timeout for default credentials chain. - // - // Example: - // sess := session.Must(session.NewSession(aws.NewConfig() - // .WithEC2MetadataDisableTimeoutOverride(true))) - // - // svc := s3.New(sess) - // - EC2MetadataDisableTimeoutOverride *bool - - // Instructs the endpoint to be generated for a service client to - // be the dual stack endpoint. The dual stack endpoint will support - // both IPv4 and IPv6 addressing. - // - // Setting this for a service which does not support dual stack will fail - // to make requests. It is not recommended to set this value on the session - // as it will apply to all service clients created with the session. Even - // services which don't support dual stack endpoints. - // - // If the Endpoint config value is also provided the UseDualStack flag - // will be ignored. - // - // Only supported with. - // - // sess := session.Must(session.NewSession()) - // - // svc := s3.New(sess, &aws.Config{ - // UseDualStack: aws.Bool(true), - // }) - // - // Deprecated: This option will continue to function for S3 and S3 Control for backwards compatibility. - // UseDualStackEndpoint should be used to enable usage of a service's dual-stack endpoint for all service clients - // moving forward. For S3 and S3 Control, when UseDualStackEndpoint is set to a non-zero value it takes higher - // precedence then this option. - UseDualStack *bool - - // Sets the resolver to resolve a dual-stack endpoint for the service. - UseDualStackEndpoint endpoints.DualStackEndpointState - - // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. - UseFIPSEndpoint endpoints.FIPSEndpointState - - // SleepDelay is an override for the func the SDK will call when sleeping - // during the lifecycle of a request. Specifically this will be used for - // request delays. This value should only be used for testing. To adjust - // the delay of a request see the aws/client.DefaultRetryer and - // aws/request.Retryer. - // - // SleepDelay will prevent any Context from being used for canceling retry - // delay of an API operation. It is recommended to not use SleepDelay at all - // and specify a Retryer instead. - SleepDelay func(time.Duration) - - // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests. - // Will default to false. This would only be used for empty directory names in s3 requests. - // - // Example: - // sess := session.Must(session.NewSession(&aws.Config{ - // DisableRestProtocolURICleaning: aws.Bool(true), - // })) - // - // svc := s3.New(sess) - // out, err := svc.GetObject(&s3.GetObjectInput { - // Bucket: aws.String("bucketname"), - // Key: aws.String("//foo//bar//moo"), - // }) - DisableRestProtocolURICleaning *bool - - // EnableEndpointDiscovery will allow for endpoint discovery on operations that - // have the definition in its model. By default, endpoint discovery is off. - // To use EndpointDiscovery, Endpoint should be unset or set to an empty string. - // - // Example: - // sess := session.Must(session.NewSession(&aws.Config{ - // EnableEndpointDiscovery: aws.Bool(true), - // })) - // - // svc := s3.New(sess) - // out, err := svc.GetObject(&s3.GetObjectInput { - // Bucket: aws.String("bucketname"), - // Key: aws.String("/foo/bar/moo"), - // }) - EnableEndpointDiscovery *bool - - // DisableEndpointHostPrefix will disable the SDK's behavior of prefixing - // request endpoint hosts with modeled information. - // - // Disabling this feature is useful when you want to use local endpoints - // for testing that do not support the modeled host prefix pattern. - DisableEndpointHostPrefix *bool - - // STSRegionalEndpoint will enable regional or legacy endpoint resolving - STSRegionalEndpoint endpoints.STSRegionalEndpoint - - // S3UsEast1RegionalEndpoint will enable regional or legacy endpoint resolving - S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint -} - -// NewConfig returns a new Config pointer that can be chained with builder -// methods to set multiple configuration values inline without using pointers. -// -// // Create Session with MaxRetries configuration to be shared by multiple -// // service clients. -// sess := session.Must(session.NewSession(aws.NewConfig(). -// WithMaxRetries(3), -// )) -// -// // Create S3 service client with a specific Region. -// svc := s3.New(sess, aws.NewConfig(). -// WithRegion("us-west-2"), -// ) -func NewConfig() *Config { - return &Config{} -} - -// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning -// a Config pointer. -func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { - c.CredentialsChainVerboseErrors = &verboseErrs - return c -} - -// WithCredentials sets a config Credentials value returning a Config pointer -// for chaining. -func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { - c.Credentials = creds - return c -} - -// WithEndpoint sets a config Endpoint value returning a Config pointer for -// chaining. -func (c *Config) WithEndpoint(endpoint string) *Config { - c.Endpoint = &endpoint - return c -} - -// WithEndpointResolver sets a config EndpointResolver value returning a -// Config pointer for chaining. -func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config { - c.EndpointResolver = resolver - return c -} - -// WithRegion sets a config Region value returning a Config pointer for -// chaining. -func (c *Config) WithRegion(region string) *Config { - c.Region = ®ion - return c -} - -// WithDisableSSL sets a config DisableSSL value returning a Config pointer -// for chaining. -func (c *Config) WithDisableSSL(disable bool) *Config { - c.DisableSSL = &disable - return c -} - -// WithHTTPClient sets a config HTTPClient value returning a Config pointer -// for chaining. -func (c *Config) WithHTTPClient(client *http.Client) *Config { - c.HTTPClient = client - return c -} - -// WithMaxRetries sets a config MaxRetries value returning a Config pointer -// for chaining. -func (c *Config) WithMaxRetries(max int) *Config { - c.MaxRetries = &max - return c -} - -// WithDisableParamValidation sets a config DisableParamValidation value -// returning a Config pointer for chaining. -func (c *Config) WithDisableParamValidation(disable bool) *Config { - c.DisableParamValidation = &disable - return c -} - -// WithDisableComputeChecksums sets a config DisableComputeChecksums value -// returning a Config pointer for chaining. -func (c *Config) WithDisableComputeChecksums(disable bool) *Config { - c.DisableComputeChecksums = &disable - return c -} - -// WithLogLevel sets a config LogLevel value returning a Config pointer for -// chaining. -func (c *Config) WithLogLevel(level LogLevelType) *Config { - c.LogLevel = &level - return c -} - -// WithLogger sets a config Logger value returning a Config pointer for -// chaining. -func (c *Config) WithLogger(logger Logger) *Config { - c.Logger = logger - return c -} - -// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config -// pointer for chaining. -func (c *Config) WithS3ForcePathStyle(force bool) *Config { - c.S3ForcePathStyle = &force - return c -} - -// WithS3Disable100Continue sets a config S3Disable100Continue value returning -// a Config pointer for chaining. -func (c *Config) WithS3Disable100Continue(disable bool) *Config { - c.S3Disable100Continue = &disable - return c -} - -// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config -// pointer for chaining. -func (c *Config) WithS3UseAccelerate(enable bool) *Config { - c.S3UseAccelerate = &enable - return c - -} - -// WithS3DisableContentMD5Validation sets a config -// S3DisableContentMD5Validation value returning a Config pointer for chaining. -func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config { - c.S3DisableContentMD5Validation = &enable - return c - -} - -// WithS3UseARNRegion sets a config S3UseARNRegion value and -// returning a Config pointer for chaining -func (c *Config) WithS3UseARNRegion(enable bool) *Config { - c.S3UseARNRegion = &enable - return c -} - -// WithUseDualStack sets a config UseDualStack value returning a Config -// pointer for chaining. -func (c *Config) WithUseDualStack(enable bool) *Config { - c.UseDualStack = &enable - return c -} - -// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value -// returning a Config pointer for chaining. -func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { - c.EC2MetadataDisableTimeoutOverride = &enable - return c -} - -// WithSleepDelay overrides the function used to sleep while waiting for the -// next retry. Defaults to time.Sleep. -func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { - c.SleepDelay = fn - return c -} - -// WithEndpointDiscovery will set whether or not to use endpoint discovery. -func (c *Config) WithEndpointDiscovery(t bool) *Config { - c.EnableEndpointDiscovery = &t - return c -} - -// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix -// when making requests. -func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config { - c.DisableEndpointHostPrefix = &t - return c -} - -// WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag -// when resolving the endpoint for a service -func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config { - c.STSRegionalEndpoint = sre - return c -} - -// WithS3UsEast1RegionalEndpoint will set whether or not to use regional endpoint flag -// when resolving the endpoint for a service -func (c *Config) WithS3UsEast1RegionalEndpoint(sre endpoints.S3UsEast1RegionalEndpoint) *Config { - c.S3UsEast1RegionalEndpoint = sre - return c -} - -// WithLowerCaseHeaderMaps sets a config LowerCaseHeaderMaps value -// returning a Config pointer for chaining. -func (c *Config) WithLowerCaseHeaderMaps(t bool) *Config { - c.LowerCaseHeaderMaps = &t - return c -} - -// WithDisableRestProtocolURICleaning sets a config DisableRestProtocolURICleaning value -// returning a Config pointer for chaining. -func (c *Config) WithDisableRestProtocolURICleaning(t bool) *Config { - c.DisableRestProtocolURICleaning = &t - return c -} - -// MergeIn merges the passed in configs into the existing config object. -func (c *Config) MergeIn(cfgs ...*Config) { - for _, other := range cfgs { - mergeInConfig(c, other) - } -} - -func mergeInConfig(dst *Config, other *Config) { - if other == nil { - return - } - - if other.CredentialsChainVerboseErrors != nil { - dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors - } - - if other.Credentials != nil { - dst.Credentials = other.Credentials - } - - if other.Endpoint != nil { - dst.Endpoint = other.Endpoint - } - - if other.EndpointResolver != nil { - dst.EndpointResolver = other.EndpointResolver - } - - if other.Region != nil { - dst.Region = other.Region - } - - if other.DisableSSL != nil { - dst.DisableSSL = other.DisableSSL - } - - if other.HTTPClient != nil { - dst.HTTPClient = other.HTTPClient - } - - if other.LogLevel != nil { - dst.LogLevel = other.LogLevel - } - - if other.Logger != nil { - dst.Logger = other.Logger - } - - if other.MaxRetries != nil { - dst.MaxRetries = other.MaxRetries - } - - if other.Retryer != nil { - dst.Retryer = other.Retryer - } - - if other.DisableParamValidation != nil { - dst.DisableParamValidation = other.DisableParamValidation - } - - if other.DisableComputeChecksums != nil { - dst.DisableComputeChecksums = other.DisableComputeChecksums - } - - if other.S3ForcePathStyle != nil { - dst.S3ForcePathStyle = other.S3ForcePathStyle - } - - if other.S3Disable100Continue != nil { - dst.S3Disable100Continue = other.S3Disable100Continue - } - - if other.S3UseAccelerate != nil { - dst.S3UseAccelerate = other.S3UseAccelerate - } - - if other.S3DisableContentMD5Validation != nil { - dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation - } - - if other.S3UseARNRegion != nil { - dst.S3UseARNRegion = other.S3UseARNRegion - } - - if other.UseDualStack != nil { - dst.UseDualStack = other.UseDualStack - } - - if other.UseDualStackEndpoint != endpoints.DualStackEndpointStateUnset { - dst.UseDualStackEndpoint = other.UseDualStackEndpoint - } - - if other.EC2MetadataDisableTimeoutOverride != nil { - dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride - } - - if other.SleepDelay != nil { - dst.SleepDelay = other.SleepDelay - } - - if other.DisableRestProtocolURICleaning != nil { - dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning - } - - if other.EnforceShouldRetryCheck != nil { - dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck - } - - if other.EnableEndpointDiscovery != nil { - dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery - } - - if other.DisableEndpointHostPrefix != nil { - dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix - } - - if other.STSRegionalEndpoint != endpoints.UnsetSTSEndpoint { - dst.STSRegionalEndpoint = other.STSRegionalEndpoint - } - - if other.S3UsEast1RegionalEndpoint != endpoints.UnsetS3UsEast1Endpoint { - dst.S3UsEast1RegionalEndpoint = other.S3UsEast1RegionalEndpoint - } - - if other.LowerCaseHeaderMaps != nil { - dst.LowerCaseHeaderMaps = other.LowerCaseHeaderMaps - } - - if other.UseDualStackEndpoint != endpoints.DualStackEndpointStateUnset { - dst.UseDualStackEndpoint = other.UseDualStackEndpoint - } - - if other.UseFIPSEndpoint != endpoints.FIPSEndpointStateUnset { - dst.UseFIPSEndpoint = other.UseFIPSEndpoint - } -} - -// Copy will return a shallow copy of the Config object. If any additional -// configurations are provided they will be merged into the new config returned. -func (c *Config) Copy(cfgs ...*Config) *Config { - dst := &Config{} - dst.MergeIn(c) - - for _, cfg := range cfgs { - dst.MergeIn(cfg) - } - - return dst -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go deleted file mode 100644 index 89aad2c67..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go +++ /dev/null @@ -1,38 +0,0 @@ -//go:build !go1.9 -// +build !go1.9 - -package aws - -import "time" - -// Context is an copy of the Go v1.7 stdlib's context.Context interface. -// It is represented as a SDK interface to enable you to use the "WithContext" -// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. -// -// See https://golang.org/pkg/context on how to use contexts. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - Value(key interface{}) interface{} -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go deleted file mode 100644 index 6ee9ddd18..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build go1.9 -// +build go1.9 - -package aws - -import "context" - -// Context is an alias of the Go stdlib's context.Context interface. -// It can be used within the SDK's API operation "WithContext" methods. -// -// See https://golang.org/pkg/context on how to use contexts. -type Context = context.Context diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go deleted file mode 100644 index 313218190..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go +++ /dev/null @@ -1,23 +0,0 @@ -//go:build !go1.7 -// +build !go1.7 - -package aws - -import ( - "github.com/aws/aws-sdk-go/internal/context" -) - -// BackgroundContext returns a context that will never be canceled, has no -// values, and no deadline. This context is used by the SDK to provide -// backwards compatibility with non-context API operations and functionality. -// -// Go 1.6 and before: -// This context function is equivalent to context.Background in the Go stdlib. -// -// Go 1.7 and later: -// The context returned will be the value returned by context.Background() -// -// See https://golang.org/pkg/context for more information on Contexts. -func BackgroundContext() Context { - return context.BackgroundCtx -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go deleted file mode 100644 index 9975d561b..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build go1.7 -// +build go1.7 - -package aws - -import "context" - -// BackgroundContext returns a context that will never be canceled, has no -// values, and no deadline. This context is used by the SDK to provide -// backwards compatibility with non-context API operations and functionality. -// -// Go 1.6 and before: -// This context function is equivalent to context.Background in the Go stdlib. -// -// Go 1.7 and later: -// The context returned will be the value returned by context.Background() -// -// See https://golang.org/pkg/context for more information on Contexts. -func BackgroundContext() Context { - return context.Background() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go deleted file mode 100644 index 304fd1561..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go +++ /dev/null @@ -1,24 +0,0 @@ -package aws - -import ( - "time" -) - -// SleepWithContext will wait for the timer duration to expire, or the context -// is canceled. Which ever happens first. If the context is canceled the Context's -// error will be returned. -// -// Expects Context to always return a non-nil error if the Done channel is closed. -func SleepWithContext(ctx Context, dur time.Duration) error { - t := time.NewTimer(dur) - defer t.Stop() - - select { - case <-t.C: - break - case <-ctx.Done(): - return ctx.Err() - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go deleted file mode 100644 index 4e076c183..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go +++ /dev/null @@ -1,918 +0,0 @@ -package aws - -import "time" - -// String returns a pointer to the string value passed in. -func String(v string) *string { - return &v -} - -// StringValue returns the value of the string pointer passed in or -// "" if the pointer is nil. -func StringValue(v *string) string { - if v != nil { - return *v - } - return "" -} - -// StringSlice converts a slice of string values into a slice of -// string pointers -func StringSlice(src []string) []*string { - dst := make([]*string, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// StringValueSlice converts a slice of string pointers into a slice of -// string values -func StringValueSlice(src []*string) []string { - dst := make([]string, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// StringMap converts a string map of string values into a string -// map of string pointers -func StringMap(src map[string]string) map[string]*string { - dst := make(map[string]*string) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// StringValueMap converts a string map of string pointers into a string -// map of string values -func StringValueMap(src map[string]*string) map[string]string { - dst := make(map[string]string) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Bool returns a pointer to the bool value passed in. -func Bool(v bool) *bool { - return &v -} - -// BoolValue returns the value of the bool pointer passed in or -// false if the pointer is nil. -func BoolValue(v *bool) bool { - if v != nil { - return *v - } - return false -} - -// BoolSlice converts a slice of bool values into a slice of -// bool pointers -func BoolSlice(src []bool) []*bool { - dst := make([]*bool, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// BoolValueSlice converts a slice of bool pointers into a slice of -// bool values -func BoolValueSlice(src []*bool) []bool { - dst := make([]bool, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// BoolMap converts a string map of bool values into a string -// map of bool pointers -func BoolMap(src map[string]bool) map[string]*bool { - dst := make(map[string]*bool) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// BoolValueMap converts a string map of bool pointers into a string -// map of bool values -func BoolValueMap(src map[string]*bool) map[string]bool { - dst := make(map[string]bool) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int returns a pointer to the int value passed in. -func Int(v int) *int { - return &v -} - -// IntValue returns the value of the int pointer passed in or -// 0 if the pointer is nil. -func IntValue(v *int) int { - if v != nil { - return *v - } - return 0 -} - -// IntSlice converts a slice of int values into a slice of -// int pointers -func IntSlice(src []int) []*int { - dst := make([]*int, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// IntValueSlice converts a slice of int pointers into a slice of -// int values -func IntValueSlice(src []*int) []int { - dst := make([]int, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// IntMap converts a string map of int values into a string -// map of int pointers -func IntMap(src map[string]int) map[string]*int { - dst := make(map[string]*int) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// IntValueMap converts a string map of int pointers into a string -// map of int values -func IntValueMap(src map[string]*int) map[string]int { - dst := make(map[string]int) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint returns a pointer to the uint value passed in. -func Uint(v uint) *uint { - return &v -} - -// UintValue returns the value of the uint pointer passed in or -// 0 if the pointer is nil. -func UintValue(v *uint) uint { - if v != nil { - return *v - } - return 0 -} - -// UintSlice converts a slice of uint values uinto a slice of -// uint pointers -func UintSlice(src []uint) []*uint { - dst := make([]*uint, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// UintValueSlice converts a slice of uint pointers uinto a slice of -// uint values -func UintValueSlice(src []*uint) []uint { - dst := make([]uint, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// UintMap converts a string map of uint values uinto a string -// map of uint pointers -func UintMap(src map[string]uint) map[string]*uint { - dst := make(map[string]*uint) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// UintValueMap converts a string map of uint pointers uinto a string -// map of uint values -func UintValueMap(src map[string]*uint) map[string]uint { - dst := make(map[string]uint) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int8 returns a pointer to the int8 value passed in. -func Int8(v int8) *int8 { - return &v -} - -// Int8Value returns the value of the int8 pointer passed in or -// 0 if the pointer is nil. -func Int8Value(v *int8) int8 { - if v != nil { - return *v - } - return 0 -} - -// Int8Slice converts a slice of int8 values into a slice of -// int8 pointers -func Int8Slice(src []int8) []*int8 { - dst := make([]*int8, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int8ValueSlice converts a slice of int8 pointers into a slice of -// int8 values -func Int8ValueSlice(src []*int8) []int8 { - dst := make([]int8, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int8Map converts a string map of int8 values into a string -// map of int8 pointers -func Int8Map(src map[string]int8) map[string]*int8 { - dst := make(map[string]*int8) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int8ValueMap converts a string map of int8 pointers into a string -// map of int8 values -func Int8ValueMap(src map[string]*int8) map[string]int8 { - dst := make(map[string]int8) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int16 returns a pointer to the int16 value passed in. -func Int16(v int16) *int16 { - return &v -} - -// Int16Value returns the value of the int16 pointer passed in or -// 0 if the pointer is nil. -func Int16Value(v *int16) int16 { - if v != nil { - return *v - } - return 0 -} - -// Int16Slice converts a slice of int16 values into a slice of -// int16 pointers -func Int16Slice(src []int16) []*int16 { - dst := make([]*int16, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int16ValueSlice converts a slice of int16 pointers into a slice of -// int16 values -func Int16ValueSlice(src []*int16) []int16 { - dst := make([]int16, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int16Map converts a string map of int16 values into a string -// map of int16 pointers -func Int16Map(src map[string]int16) map[string]*int16 { - dst := make(map[string]*int16) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int16ValueMap converts a string map of int16 pointers into a string -// map of int16 values -func Int16ValueMap(src map[string]*int16) map[string]int16 { - dst := make(map[string]int16) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int32 returns a pointer to the int32 value passed in. -func Int32(v int32) *int32 { - return &v -} - -// Int32Value returns the value of the int32 pointer passed in or -// 0 if the pointer is nil. -func Int32Value(v *int32) int32 { - if v != nil { - return *v - } - return 0 -} - -// Int32Slice converts a slice of int32 values into a slice of -// int32 pointers -func Int32Slice(src []int32) []*int32 { - dst := make([]*int32, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int32ValueSlice converts a slice of int32 pointers into a slice of -// int32 values -func Int32ValueSlice(src []*int32) []int32 { - dst := make([]int32, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int32Map converts a string map of int32 values into a string -// map of int32 pointers -func Int32Map(src map[string]int32) map[string]*int32 { - dst := make(map[string]*int32) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int32ValueMap converts a string map of int32 pointers into a string -// map of int32 values -func Int32ValueMap(src map[string]*int32) map[string]int32 { - dst := make(map[string]int32) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int64 returns a pointer to the int64 value passed in. -func Int64(v int64) *int64 { - return &v -} - -// Int64Value returns the value of the int64 pointer passed in or -// 0 if the pointer is nil. -func Int64Value(v *int64) int64 { - if v != nil { - return *v - } - return 0 -} - -// Int64Slice converts a slice of int64 values into a slice of -// int64 pointers -func Int64Slice(src []int64) []*int64 { - dst := make([]*int64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int64ValueSlice converts a slice of int64 pointers into a slice of -// int64 values -func Int64ValueSlice(src []*int64) []int64 { - dst := make([]int64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int64Map converts a string map of int64 values into a string -// map of int64 pointers -func Int64Map(src map[string]int64) map[string]*int64 { - dst := make(map[string]*int64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int64ValueMap converts a string map of int64 pointers into a string -// map of int64 values -func Int64ValueMap(src map[string]*int64) map[string]int64 { - dst := make(map[string]int64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint8 returns a pointer to the uint8 value passed in. -func Uint8(v uint8) *uint8 { - return &v -} - -// Uint8Value returns the value of the uint8 pointer passed in or -// 0 if the pointer is nil. -func Uint8Value(v *uint8) uint8 { - if v != nil { - return *v - } - return 0 -} - -// Uint8Slice converts a slice of uint8 values into a slice of -// uint8 pointers -func Uint8Slice(src []uint8) []*uint8 { - dst := make([]*uint8, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint8ValueSlice converts a slice of uint8 pointers into a slice of -// uint8 values -func Uint8ValueSlice(src []*uint8) []uint8 { - dst := make([]uint8, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint8Map converts a string map of uint8 values into a string -// map of uint8 pointers -func Uint8Map(src map[string]uint8) map[string]*uint8 { - dst := make(map[string]*uint8) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint8ValueMap converts a string map of uint8 pointers into a string -// map of uint8 values -func Uint8ValueMap(src map[string]*uint8) map[string]uint8 { - dst := make(map[string]uint8) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint16 returns a pointer to the uint16 value passed in. -func Uint16(v uint16) *uint16 { - return &v -} - -// Uint16Value returns the value of the uint16 pointer passed in or -// 0 if the pointer is nil. -func Uint16Value(v *uint16) uint16 { - if v != nil { - return *v - } - return 0 -} - -// Uint16Slice converts a slice of uint16 values into a slice of -// uint16 pointers -func Uint16Slice(src []uint16) []*uint16 { - dst := make([]*uint16, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint16ValueSlice converts a slice of uint16 pointers into a slice of -// uint16 values -func Uint16ValueSlice(src []*uint16) []uint16 { - dst := make([]uint16, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint16Map converts a string map of uint16 values into a string -// map of uint16 pointers -func Uint16Map(src map[string]uint16) map[string]*uint16 { - dst := make(map[string]*uint16) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint16ValueMap converts a string map of uint16 pointers into a string -// map of uint16 values -func Uint16ValueMap(src map[string]*uint16) map[string]uint16 { - dst := make(map[string]uint16) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint32 returns a pointer to the uint32 value passed in. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint32Value returns the value of the uint32 pointer passed in or -// 0 if the pointer is nil. -func Uint32Value(v *uint32) uint32 { - if v != nil { - return *v - } - return 0 -} - -// Uint32Slice converts a slice of uint32 values into a slice of -// uint32 pointers -func Uint32Slice(src []uint32) []*uint32 { - dst := make([]*uint32, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint32ValueSlice converts a slice of uint32 pointers into a slice of -// uint32 values -func Uint32ValueSlice(src []*uint32) []uint32 { - dst := make([]uint32, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint32Map converts a string map of uint32 values into a string -// map of uint32 pointers -func Uint32Map(src map[string]uint32) map[string]*uint32 { - dst := make(map[string]*uint32) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint32ValueMap converts a string map of uint32 pointers into a string -// map of uint32 values -func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { - dst := make(map[string]uint32) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint64 returns a pointer to the uint64 value passed in. -func Uint64(v uint64) *uint64 { - return &v -} - -// Uint64Value returns the value of the uint64 pointer passed in or -// 0 if the pointer is nil. -func Uint64Value(v *uint64) uint64 { - if v != nil { - return *v - } - return 0 -} - -// Uint64Slice converts a slice of uint64 values into a slice of -// uint64 pointers -func Uint64Slice(src []uint64) []*uint64 { - dst := make([]*uint64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint64ValueSlice converts a slice of uint64 pointers into a slice of -// uint64 values -func Uint64ValueSlice(src []*uint64) []uint64 { - dst := make([]uint64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint64Map converts a string map of uint64 values into a string -// map of uint64 pointers -func Uint64Map(src map[string]uint64) map[string]*uint64 { - dst := make(map[string]*uint64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint64ValueMap converts a string map of uint64 pointers into a string -// map of uint64 values -func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { - dst := make(map[string]uint64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Float32 returns a pointer to the float32 value passed in. -func Float32(v float32) *float32 { - return &v -} - -// Float32Value returns the value of the float32 pointer passed in or -// 0 if the pointer is nil. -func Float32Value(v *float32) float32 { - if v != nil { - return *v - } - return 0 -} - -// Float32Slice converts a slice of float32 values into a slice of -// float32 pointers -func Float32Slice(src []float32) []*float32 { - dst := make([]*float32, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Float32ValueSlice converts a slice of float32 pointers into a slice of -// float32 values -func Float32ValueSlice(src []*float32) []float32 { - dst := make([]float32, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Float32Map converts a string map of float32 values into a string -// map of float32 pointers -func Float32Map(src map[string]float32) map[string]*float32 { - dst := make(map[string]*float32) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Float32ValueMap converts a string map of float32 pointers into a string -// map of float32 values -func Float32ValueMap(src map[string]*float32) map[string]float32 { - dst := make(map[string]float32) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Float64 returns a pointer to the float64 value passed in. -func Float64(v float64) *float64 { - return &v -} - -// Float64Value returns the value of the float64 pointer passed in or -// 0 if the pointer is nil. -func Float64Value(v *float64) float64 { - if v != nil { - return *v - } - return 0 -} - -// Float64Slice converts a slice of float64 values into a slice of -// float64 pointers -func Float64Slice(src []float64) []*float64 { - dst := make([]*float64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Float64ValueSlice converts a slice of float64 pointers into a slice of -// float64 values -func Float64ValueSlice(src []*float64) []float64 { - dst := make([]float64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Float64Map converts a string map of float64 values into a string -// map of float64 pointers -func Float64Map(src map[string]float64) map[string]*float64 { - dst := make(map[string]*float64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Float64ValueMap converts a string map of float64 pointers into a string -// map of float64 values -func Float64ValueMap(src map[string]*float64) map[string]float64 { - dst := make(map[string]float64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Time returns a pointer to the time.Time value passed in. -func Time(v time.Time) *time.Time { - return &v -} - -// TimeValue returns the value of the time.Time pointer passed in or -// time.Time{} if the pointer is nil. -func TimeValue(v *time.Time) time.Time { - if v != nil { - return *v - } - return time.Time{} -} - -// SecondsTimeValue converts an int64 pointer to a time.Time value -// representing seconds since Epoch or time.Time{} if the pointer is nil. -func SecondsTimeValue(v *int64) time.Time { - if v != nil { - return time.Unix((*v / 1000), 0) - } - return time.Time{} -} - -// MillisecondsTimeValue converts an int64 pointer to a time.Time value -// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil. -func MillisecondsTimeValue(v *int64) time.Time { - if v != nil { - return time.Unix(0, (*v * 1000000)) - } - return time.Time{} -} - -// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". -// The result is undefined if the Unix time cannot be represented by an int64. -// Which includes calling TimeUnixMilli on a zero Time is undefined. -// -// This utility is useful for service API's such as CloudWatch Logs which require -// their unix time values to be in milliseconds. -// -// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. -func TimeUnixMilli(t time.Time) int64 { - return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) -} - -// TimeSlice converts a slice of time.Time values into a slice of -// time.Time pointers -func TimeSlice(src []time.Time) []*time.Time { - dst := make([]*time.Time, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// TimeValueSlice converts a slice of time.Time pointers into a slice of -// time.Time values -func TimeValueSlice(src []*time.Time) []time.Time { - dst := make([]time.Time, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// TimeMap converts a string map of time.Time values into a string -// map of time.Time pointers -func TimeMap(src map[string]time.Time) map[string]*time.Time { - dst := make(map[string]*time.Time) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// TimeValueMap converts a string map of time.Time pointers into a string -// map of time.Time values -func TimeValueMap(src map[string]*time.Time) map[string]time.Time { - dst := make(map[string]time.Time) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go deleted file mode 100644 index 36a915efe..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ /dev/null @@ -1,232 +0,0 @@ -package corehandlers - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "regexp" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" -) - -// Interface for matching types which also have a Len method. -type lener interface { - Len() int -} - -// BuildContentLengthHandler builds the content length of a request based on the body, -// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable -// to determine request body length and no "Content-Length" was specified it will panic. -// -// The Content-Length will only be added to the request if the length of the body -// is greater than 0. If the body is empty or the current `Content-Length` -// header is <= 0, the header will also be stripped. -var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { - var length int64 - - if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { - length, _ = strconv.ParseInt(slength, 10, 64) - } else { - if r.Body != nil { - var err error - length, err = aws.SeekerLen(r.Body) - if err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err) - return - } - } - } - - if length > 0 { - r.HTTPRequest.ContentLength = length - r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) - } else { - r.HTTPRequest.ContentLength = 0 - r.HTTPRequest.Header.Del("Content-Length") - } -}} - -var reStatusCode = regexp.MustCompile(`^(\d{3})`) - -// ValidateReqSigHandler is a request handler to ensure that the request's -// signature doesn't expire before it is sent. This can happen when a request -// is built and signed significantly before it is sent. Or significant delays -// occur when retrying requests that would cause the signature to expire. -var ValidateReqSigHandler = request.NamedHandler{ - Name: "core.ValidateReqSigHandler", - Fn: func(r *request.Request) { - // Unsigned requests are not signed - if r.Config.Credentials == credentials.AnonymousCredentials { - return - } - - signedTime := r.Time - if !r.LastSignedAt.IsZero() { - signedTime = r.LastSignedAt - } - - // 5 minutes to allow for some clock skew/delays in transmission. - // Would be improved with aws/aws-sdk-go#423 - if signedTime.Add(5 * time.Minute).After(time.Now()) { - return - } - - fmt.Println("request expired, resigning") - r.Sign() - }, -} - -// SendHandler is a request handler to send service request using HTTP client. -var SendHandler = request.NamedHandler{ - Name: "core.SendHandler", - Fn: func(r *request.Request) { - sender := sendFollowRedirects - if r.DisableFollowRedirects { - sender = sendWithoutFollowRedirects - } - - if request.NoBody == r.HTTPRequest.Body { - // Strip off the request body if the NoBody reader was used as a - // place holder for a request body. This prevents the SDK from - // making requests with a request body when it would be invalid - // to do so. - // - // Use a shallow copy of the http.Request to ensure the race condition - // of transport on Body will not trigger - reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest - reqCopy.Body = nil - r.HTTPRequest = &reqCopy - defer func() { - r.HTTPRequest = reqOrig - }() - } - - var err error - r.HTTPResponse, err = sender(r) - if err != nil { - handleSendError(r, err) - } - }, -} - -func sendFollowRedirects(r *request.Request) (*http.Response, error) { - return r.Config.HTTPClient.Do(r.HTTPRequest) -} - -func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) { - transport := r.Config.HTTPClient.Transport - if transport == nil { - transport = http.DefaultTransport - } - - return transport.RoundTrip(r.HTTPRequest) -} - -func handleSendError(r *request.Request, err error) { - // Prevent leaking if an HTTPResponse was returned. Clean up - // the body. - if r.HTTPResponse != nil { - r.HTTPResponse.Body.Close() - } - // Capture the case where url.Error is returned for error processing - // response. e.g. 301 without location header comes back as string - // error and r.HTTPResponse is nil. Other URL redirect errors will - // comeback in a similar method. - if e, ok := err.(*url.Error); ok && e.Err != nil { - if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { - code, _ := strconv.ParseInt(s[1], 10, 64) - r.HTTPResponse = &http.Response{ - StatusCode: int(code), - Status: http.StatusText(int(code)), - Body: ioutil.NopCloser(bytes.NewReader([]byte{})), - } - return - } - } - if r.HTTPResponse == nil { - // Add a dummy request response object to ensure the HTTPResponse - // value is consistent. - r.HTTPResponse = &http.Response{ - StatusCode: int(0), - Status: http.StatusText(int(0)), - Body: ioutil.NopCloser(bytes.NewReader([]byte{})), - } - } - // Catch all request errors, and let the default retrier determine - // if the error is retryable. - r.Error = awserr.New(request.ErrCodeRequestError, "send request failed", err) - - // Override the error with a context canceled error, if that was canceled. - ctx := r.Context() - select { - case <-ctx.Done(): - r.Error = awserr.New(request.CanceledErrorCode, - "request context canceled", ctx.Err()) - r.Retryable = aws.Bool(false) - default: - } -} - -// ValidateResponseHandler is a request handler to validate service response. -var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { - if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { - // this may be replaced by an UnmarshalError handler - r.Error = awserr.New("UnknownError", "unknown error", r.Error) - } -}} - -// AfterRetryHandler performs final checks to determine if the request should -// be retried and how long to delay. -var AfterRetryHandler = request.NamedHandler{ - Name: "core.AfterRetryHandler", - Fn: func(r *request.Request) { - // If one of the other handlers already set the retry state - // we don't want to override it based on the service's state - if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { - r.Retryable = aws.Bool(r.ShouldRetry(r)) - } - - if r.WillRetry() { - r.RetryDelay = r.RetryRules(r) - - if sleepFn := r.Config.SleepDelay; sleepFn != nil { - // Support SleepDelay for backwards compatibility and testing - sleepFn(r.RetryDelay) - } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { - r.Error = awserr.New(request.CanceledErrorCode, - "request context canceled", err) - r.Retryable = aws.Bool(false) - return - } - - // when the expired token exception occurs the credentials - // need to be expired locally so that the next request to - // get credentials will trigger a credentials refresh. - if r.IsErrorExpired() { - r.Config.Credentials.Expire() - } - - r.RetryCount++ - r.Error = nil - } - }} - -// ValidateEndpointHandler is a request handler to validate a request had the -// appropriate Region and Endpoint set. Will set r.Error if the endpoint or -// region is not valid. -var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { - if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { - r.Error = aws.ErrMissingRegion - } else if r.ClientInfo.Endpoint == "" { - // Was any endpoint provided by the user, or one was derived by the - // SDK's endpoint resolver? - r.Error = aws.ErrMissingEndpoint - } -}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go deleted file mode 100644 index 7d50b1557..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go +++ /dev/null @@ -1,17 +0,0 @@ -package corehandlers - -import "github.com/aws/aws-sdk-go/aws/request" - -// ValidateParametersHandler is a request handler to validate the input parameters. -// Validating parameters only has meaning if done prior to the request being sent. -var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { - if !r.ParamsFilled() { - return - } - - if v, ok := r.Params.(request.Validator); ok { - if err := v.Validate(); err != nil { - r.Error = err - } - } -}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go deleted file mode 100644 index ab69c7a6f..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go +++ /dev/null @@ -1,37 +0,0 @@ -package corehandlers - -import ( - "os" - "runtime" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" -) - -// SDKVersionUserAgentHandler is a request handler for adding the SDK Version -// to the user agent. -var SDKVersionUserAgentHandler = request.NamedHandler{ - Name: "core.SDKVersionUserAgentHandler", - Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, - runtime.Version(), runtime.GOOS, runtime.GOARCH), -} - -const execEnvVar = `AWS_EXECUTION_ENV` -const execEnvUAKey = `exec-env` - -// AddHostExecEnvUserAgentHander is a request handler appending the SDK's -// execution environment to the user agent. -// -// If the environment variable AWS_EXECUTION_ENV is set, its value will be -// appended to the user agent string. -var AddHostExecEnvUserAgentHander = request.NamedHandler{ - Name: "core.AddHostExecEnvUserAgentHander", - Fn: func(r *request.Request) { - v := os.Getenv(execEnvVar) - if len(v) == 0 { - return - } - - request.AddToUserAgent(r, execEnvUAKey+"/"+v) - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go deleted file mode 100644 index 3ad1e798d..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go +++ /dev/null @@ -1,100 +0,0 @@ -package credentials - -import ( - "github.com/aws/aws-sdk-go/aws/awserr" -) - -var ( - // ErrNoValidProvidersFoundInChain Is returned when there are no valid - // providers in the ChainProvider. - // - // This has been deprecated. For verbose error messaging set - // aws.Config.CredentialsChainVerboseErrors to true. - ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", - `no valid providers in chain. Deprecated. - For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, - nil) -) - -// A ChainProvider will search for a provider which returns credentials -// and cache that provider until Retrieve is called again. -// -// The ChainProvider provides a way of chaining multiple providers together -// which will pick the first available using priority order of the Providers -// in the list. -// -// If none of the Providers retrieve valid credentials Value, ChainProvider's -// Retrieve() will return the error ErrNoValidProvidersFoundInChain. -// -// If a Provider is found which returns valid credentials Value ChainProvider -// will cache that Provider for all calls to IsExpired(), until Retrieve is -// called again. -// -// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. -// In this example EnvProvider will first check if any credentials are available -// via the environment variables. If there are none ChainProvider will check -// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider -// does not return any credentials ChainProvider will return the error -// ErrNoValidProvidersFoundInChain -// -// creds := credentials.NewChainCredentials( -// []credentials.Provider{ -// &credentials.EnvProvider{}, -// &ec2rolecreds.EC2RoleProvider{ -// Client: ec2metadata.New(sess), -// }, -// }) -// -// // Usage of ChainCredentials with aws.Config -// svc := ec2.New(session.Must(session.NewSession(&aws.Config{ -// Credentials: creds, -// }))) -// -type ChainProvider struct { - Providers []Provider - curr Provider - VerboseErrors bool -} - -// NewChainCredentials returns a pointer to a new Credentials object -// wrapping a chain of providers. -func NewChainCredentials(providers []Provider) *Credentials { - return NewCredentials(&ChainProvider{ - Providers: append([]Provider{}, providers...), - }) -} - -// Retrieve returns the credentials value or error if no provider returned -// without error. -// -// If a provider is found it will be cached and any calls to IsExpired() -// will return the expired state of the cached provider. -func (c *ChainProvider) Retrieve() (Value, error) { - var errs []error - for _, p := range c.Providers { - creds, err := p.Retrieve() - if err == nil { - c.curr = p - return creds, nil - } - errs = append(errs, err) - } - c.curr = nil - - var err error - err = ErrNoValidProvidersFoundInChain - if c.VerboseErrors { - err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) - } - return Value{}, err -} - -// IsExpired will returned the expired state of the currently cached provider -// if there is one. If there is no current provider, true will be returned. -func (c *ChainProvider) IsExpired() bool { - if c.curr != nil { - return c.curr.IsExpired() - } - - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go deleted file mode 100644 index 6e3406b1f..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go +++ /dev/null @@ -1,23 +0,0 @@ -//go:build !go1.7 -// +build !go1.7 - -package credentials - -import ( - "github.com/aws/aws-sdk-go/internal/context" -) - -// backgroundContext returns a context that will never be canceled, has no -// values, and no deadline. This context is used by the SDK to provide -// backwards compatibility with non-context API operations and functionality. -// -// Go 1.6 and before: -// This context function is equivalent to context.Background in the Go stdlib. -// -// Go 1.7 and later: -// The context returned will be the value returned by context.Background() -// -// See https://golang.org/pkg/context for more information on Contexts. -func backgroundContext() Context { - return context.BackgroundCtx -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go deleted file mode 100644 index a68df0ee7..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build go1.7 -// +build go1.7 - -package credentials - -import "context" - -// backgroundContext returns a context that will never be canceled, has no -// values, and no deadline. This context is used by the SDK to provide -// backwards compatibility with non-context API operations and functionality. -// -// Go 1.6 and before: -// This context function is equivalent to context.Background in the Go stdlib. -// -// Go 1.7 and later: -// The context returned will be the value returned by context.Background() -// -// See https://golang.org/pkg/context for more information on Contexts. -func backgroundContext() Context { - return context.Background() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go deleted file mode 100644 index 0345fab2d..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go +++ /dev/null @@ -1,40 +0,0 @@ -//go:build !go1.9 -// +build !go1.9 - -package credentials - -import "time" - -// Context is an copy of the Go v1.7 stdlib's context.Context interface. -// It is represented as a SDK interface to enable you to use the "WithContext" -// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. -// -// This type, aws.Context, and context.Context are equivalent. -// -// See https://golang.org/pkg/context on how to use contexts. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - Value(key interface{}) interface{} -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go deleted file mode 100644 index 79018aba7..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build go1.9 -// +build go1.9 - -package credentials - -import "context" - -// Context is an alias of the Go stdlib's context.Context interface. -// It can be used within the SDK's API operation "WithContext" methods. -// -// This type, aws.Context, and context.Context are equivalent. -// -// See https://golang.org/pkg/context on how to use contexts. -type Context = context.Context diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go deleted file mode 100644 index a880a3de8..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ /dev/null @@ -1,383 +0,0 @@ -// Package credentials provides credential retrieval and management -// -// The Credentials is the primary method of getting access to and managing -// credentials Values. Using dependency injection retrieval of the credential -// values is handled by a object which satisfies the Provider interface. -// -// By default the Credentials.Get() will cache the successful result of a -// Provider's Retrieve() until Provider.IsExpired() returns true. At which -// point Credentials will call Provider's Retrieve() to get new credential Value. -// -// The Provider is responsible for determining when credentials Value have expired. -// It is also important to note that Credentials will always call Retrieve the -// first time Credentials.Get() is called. -// -// Example of using the environment variable credentials. -// -// creds := credentials.NewEnvCredentials() -// -// // Retrieve the credentials value -// credValue, err := creds.Get() -// if err != nil { -// // handle error -// } -// -// Example of forcing credentials to expire and be refreshed on the next Get(). -// This may be helpful to proactively expire credentials and refresh them sooner -// than they would naturally expire on their own. -// -// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{}) -// creds.Expire() -// credsValue, err := creds.Get() -// // New credentials will be retrieved instead of from cache. -// -// -// Custom Provider -// -// Each Provider built into this package also provides a helper method to generate -// a Credentials pointer setup with the provider. To use a custom Provider just -// create a type which satisfies the Provider interface and pass it to the -// NewCredentials method. -// -// type MyProvider struct{} -// func (m *MyProvider) Retrieve() (Value, error) {...} -// func (m *MyProvider) IsExpired() bool {...} -// -// creds := credentials.NewCredentials(&MyProvider{}) -// credValue, err := creds.Get() -// -package credentials - -import ( - "fmt" - "sync" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/internal/sync/singleflight" -) - -// AnonymousCredentials is an empty Credential object that can be used as -// dummy placeholder credentials for requests that do not need signed. -// -// This Credentials can be used to configure a service to not sign requests -// when making service API calls. For example, when accessing public -// s3 buckets. -// -// svc := s3.New(session.Must(session.NewSession(&aws.Config{ -// Credentials: credentials.AnonymousCredentials, -// }))) -// // Access public S3 buckets. -var AnonymousCredentials = NewStaticCredentials("", "", "") - -// A Value is the AWS credentials value for individual credential fields. -type Value struct { - // AWS Access key ID - AccessKeyID string - - // AWS Secret Access Key - SecretAccessKey string - - // AWS Session Token - SessionToken string - - // Provider used to get credentials - ProviderName string -} - -// HasKeys returns if the credentials Value has both AccessKeyID and -// SecretAccessKey value set. -func (v Value) HasKeys() bool { - return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0 -} - -// A Provider is the interface for any component which will provide credentials -// Value. A provider is required to manage its own Expired state, and what to -// be expired means. -// -// The Provider should not need to implement its own mutexes, because -// that will be managed by Credentials. -type Provider interface { - // Retrieve returns nil if it successfully retrieved the value. - // Error is returned if the value were not obtainable, or empty. - Retrieve() (Value, error) - - // IsExpired returns if the credentials are no longer valid, and need - // to be retrieved. - IsExpired() bool -} - -// ProviderWithContext is a Provider that can retrieve credentials with a Context -type ProviderWithContext interface { - Provider - - RetrieveWithContext(Context) (Value, error) -} - -// An Expirer is an interface that Providers can implement to expose the expiration -// time, if known. If the Provider cannot accurately provide this info, -// it should not implement this interface. -type Expirer interface { - // The time at which the credentials are no longer valid - ExpiresAt() time.Time -} - -// An ErrorProvider is a stub credentials provider that always returns an error -// this is used by the SDK when construction a known provider is not possible -// due to an error. -type ErrorProvider struct { - // The error to be returned from Retrieve - Err error - - // The provider name to set on the Retrieved returned Value - ProviderName string -} - -// Retrieve will always return the error that the ErrorProvider was created with. -func (p ErrorProvider) Retrieve() (Value, error) { - return Value{ProviderName: p.ProviderName}, p.Err -} - -// IsExpired will always return not expired. -func (p ErrorProvider) IsExpired() bool { - return false -} - -// A Expiry provides shared expiration logic to be used by credentials -// providers to implement expiry functionality. -// -// The best method to use this struct is as an anonymous field within the -// provider's struct. -// -// Example: -// type EC2RoleProvider struct { -// Expiry -// ... -// } -type Expiry struct { - // The date/time when to expire on - expiration time.Time - - // If set will be used by IsExpired to determine the current time. - // Defaults to time.Now if CurrentTime is not set. Available for testing - // to be able to mock out the current time. - CurrentTime func() time.Time -} - -// SetExpiration sets the expiration IsExpired will check when called. -// -// If window is greater than 0 the expiration time will be reduced by the -// window value. -// -// Using a window is helpful to trigger credentials to expire sooner than -// the expiration time given to ensure no requests are made with expired -// tokens. -func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { - // Passed in expirations should have the monotonic clock values stripped. - // This ensures time comparisons will be based on wall-time. - e.expiration = expiration.Round(0) - if window > 0 { - e.expiration = e.expiration.Add(-window) - } -} - -// IsExpired returns if the credentials are expired. -func (e *Expiry) IsExpired() bool { - curTime := e.CurrentTime - if curTime == nil { - curTime = time.Now - } - return e.expiration.Before(curTime()) -} - -// ExpiresAt returns the expiration time of the credential -func (e *Expiry) ExpiresAt() time.Time { - return e.expiration -} - -// A Credentials provides concurrency safe retrieval of AWS credentials Value. -// Credentials will cache the credentials value until they expire. Once the value -// expires the next Get will attempt to retrieve valid credentials. -// -// Credentials is safe to use across multiple goroutines and will manage the -// synchronous state so the Providers do not need to implement their own -// synchronization. -// -// The first Credentials.Get() will always call Provider.Retrieve() to get the -// first instance of the credentials Value. All calls to Get() after that -// will return the cached credentials Value until IsExpired() returns true. -type Credentials struct { - sf singleflight.Group - - m sync.RWMutex - creds Value - provider Provider -} - -// NewCredentials returns a pointer to a new Credentials with the provider set. -func NewCredentials(provider Provider) *Credentials { - c := &Credentials{ - provider: provider, - } - return c -} - -// GetWithContext returns the credentials value, or error if the credentials -// Value failed to be retrieved. Will return early if the passed in context is -// canceled. -// -// Will return the cached credentials Value if it has not expired. If the -// credentials Value has expired the Provider's Retrieve() will be called -// to refresh the credentials. -// -// If Credentials.Expire() was called the credentials Value will be force -// expired, and the next call to Get() will cause them to be refreshed. -// -// Passed in Context is equivalent to aws.Context, and context.Context. -func (c *Credentials) GetWithContext(ctx Context) (Value, error) { - // Check if credentials are cached, and not expired. - select { - case curCreds, ok := <-c.asyncIsExpired(): - // ok will only be true, of the credentials were not expired. ok will - // be false and have no value if the credentials are expired. - if ok { - return curCreds, nil - } - case <-ctx.Done(): - return Value{}, awserr.New("RequestCanceled", - "request context canceled", ctx.Err()) - } - - // Cannot pass context down to the actual retrieve, because the first - // context would cancel the whole group when there is not direct - // association of items in the group. - resCh := c.sf.DoChan("", func() (interface{}, error) { - return c.singleRetrieve(&suppressedContext{ctx}) - }) - select { - case res := <-resCh: - return res.Val.(Value), res.Err - case <-ctx.Done(): - return Value{}, awserr.New("RequestCanceled", - "request context canceled", ctx.Err()) - } -} - -func (c *Credentials) singleRetrieve(ctx Context) (interface{}, error) { - c.m.Lock() - defer c.m.Unlock() - - if curCreds := c.creds; !c.isExpiredLocked(curCreds) { - return curCreds, nil - } - - var creds Value - var err error - if p, ok := c.provider.(ProviderWithContext); ok { - creds, err = p.RetrieveWithContext(ctx) - } else { - creds, err = c.provider.Retrieve() - } - if err == nil { - c.creds = creds - } - - return creds, err -} - -// Get returns the credentials value, or error if the credentials Value failed -// to be retrieved. -// -// Will return the cached credentials Value if it has not expired. If the -// credentials Value has expired the Provider's Retrieve() will be called -// to refresh the credentials. -// -// If Credentials.Expire() was called the credentials Value will be force -// expired, and the next call to Get() will cause them to be refreshed. -func (c *Credentials) Get() (Value, error) { - return c.GetWithContext(backgroundContext()) -} - -// Expire expires the credentials and forces them to be retrieved on the -// next call to Get(). -// -// This will override the Provider's expired state, and force Credentials -// to call the Provider's Retrieve(). -func (c *Credentials) Expire() { - c.m.Lock() - defer c.m.Unlock() - - c.creds = Value{} -} - -// IsExpired returns if the credentials are no longer valid, and need -// to be retrieved. -// -// If the Credentials were forced to be expired with Expire() this will -// reflect that override. -func (c *Credentials) IsExpired() bool { - c.m.RLock() - defer c.m.RUnlock() - - return c.isExpiredLocked(c.creds) -} - -// asyncIsExpired returns a channel of credentials Value. If the channel is -// closed the credentials are expired and credentials value are not empty. -func (c *Credentials) asyncIsExpired() <-chan Value { - ch := make(chan Value, 1) - go func() { - c.m.RLock() - defer c.m.RUnlock() - - if curCreds := c.creds; !c.isExpiredLocked(curCreds) { - ch <- curCreds - } - - close(ch) - }() - - return ch -} - -// isExpiredLocked helper method wrapping the definition of expired credentials. -func (c *Credentials) isExpiredLocked(creds interface{}) bool { - return creds == nil || creds.(Value) == Value{} || c.provider.IsExpired() -} - -// ExpiresAt provides access to the functionality of the Expirer interface of -// the underlying Provider, if it supports that interface. Otherwise, it returns -// an error. -func (c *Credentials) ExpiresAt() (time.Time, error) { - c.m.RLock() - defer c.m.RUnlock() - - expirer, ok := c.provider.(Expirer) - if !ok { - return time.Time{}, awserr.New("ProviderNotExpirer", - fmt.Sprintf("provider %s does not support ExpiresAt()", - c.creds.ProviderName), - nil) - } - if c.creds == (Value{}) { - // set expiration time to the distant past - return time.Time{}, nil - } - return expirer.ExpiresAt(), nil -} - -type suppressedContext struct { - Context -} - -func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) { - return time.Time{}, false -} - -func (s *suppressedContext) Done() <-chan struct{} { - return nil -} - -func (s *suppressedContext) Err() error { - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go deleted file mode 100644 index 92af5b725..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go +++ /dev/null @@ -1,188 +0,0 @@ -package ec2rolecreds - -import ( - "bufio" - "encoding/json" - "fmt" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkuri" -) - -// ProviderName provides a name of EC2Role provider -const ProviderName = "EC2RoleProvider" - -// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if -// those credentials are expired. -// -// Example how to configure the EC2RoleProvider with custom http Client, Endpoint -// or ExpiryWindow -// -// p := &ec2rolecreds.EC2RoleProvider{ -// // Pass in a custom timeout to be used when requesting -// // IAM EC2 Role credentials. -// Client: ec2metadata.New(sess, aws.Config{ -// HTTPClient: &http.Client{Timeout: 10 * time.Second}, -// }), -// -// // Do not use early expiry of credentials. If a non zero value is -// // specified the credentials will be expired early -// ExpiryWindow: 0, -// } -type EC2RoleProvider struct { - credentials.Expiry - - // Required EC2Metadata client to use when connecting to EC2 metadata service. - Client *ec2metadata.EC2Metadata - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration -} - -// NewCredentials returns a pointer to a new Credentials object wrapping -// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. -// The ConfigProvider is satisfied by the session.Session type. -func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { - p := &EC2RoleProvider{ - Client: ec2metadata.New(c), - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping -// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 -// metadata service. -func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { - p := &EC2RoleProvider{ - Client: client, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// Retrieve retrieves credentials from the EC2 service. -// Error will be returned if the request fails, or unable to extract -// the desired credentials. -func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { - return m.RetrieveWithContext(aws.BackgroundContext()) -} - -// RetrieveWithContext retrieves credentials from the EC2 service. -// Error will be returned if the request fails, or unable to extract -// the desired credentials. -func (m *EC2RoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { - credsList, err := requestCredList(ctx, m.Client) - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - if len(credsList) == 0 { - return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) - } - credsName := credsList[0] - - roleCreds, err := requestCred(ctx, m.Client, credsName) - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) - - return credentials.Value{ - AccessKeyID: roleCreds.AccessKeyID, - SecretAccessKey: roleCreds.SecretAccessKey, - SessionToken: roleCreds.Token, - ProviderName: ProviderName, - }, nil -} - -// A ec2RoleCredRespBody provides the shape for unmarshaling credential -// request responses. -type ec2RoleCredRespBody struct { - // Success State - Expiration time.Time - AccessKeyID string - SecretAccessKey string - Token string - - // Error state - Code string - Message string -} - -const iamSecurityCredsPath = "iam/security-credentials/" - -// requestCredList requests a list of credentials from the EC2 service. -// If there are no credentials, or there is an error making or receiving the request -func requestCredList(ctx aws.Context, client *ec2metadata.EC2Metadata) ([]string, error) { - resp, err := client.GetMetadataWithContext(ctx, iamSecurityCredsPath) - if err != nil { - return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) - } - - credsList := []string{} - s := bufio.NewScanner(strings.NewReader(resp)) - for s.Scan() { - credsList = append(credsList, s.Text()) - } - - if err := s.Err(); err != nil { - return nil, awserr.New(request.ErrCodeSerialization, - "failed to read EC2 instance role from metadata service", err) - } - - return credsList, nil -} - -// requestCred requests the credentials for a specific credentials from the EC2 service. -// -// If the credentials cannot be found, or there is an error reading the response -// and error will be returned. -func requestCred(ctx aws.Context, client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { - resp, err := client.GetMetadataWithContext(ctx, sdkuri.PathJoin(iamSecurityCredsPath, credsName)) - if err != nil { - return ec2RoleCredRespBody{}, - awserr.New("EC2RoleRequestError", - fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), - err) - } - - respCreds := ec2RoleCredRespBody{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { - return ec2RoleCredRespBody{}, - awserr.New(request.ErrCodeSerialization, - fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), - err) - } - - if respCreds.Code != "Success" { - // If an error code was returned something failed requesting the role. - return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) - } - - return respCreds, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go deleted file mode 100644 index 785f30d8e..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go +++ /dev/null @@ -1,210 +0,0 @@ -// Package endpointcreds provides support for retrieving credentials from an -// arbitrary HTTP endpoint. -// -// The credentials endpoint Provider can receive both static and refreshable -// credentials that will expire. Credentials are static when an "Expiration" -// value is not provided in the endpoint's response. -// -// Static credentials will never expire once they have been retrieved. The format -// of the static credentials response: -// { -// "AccessKeyId" : "MUA...", -// "SecretAccessKey" : "/7PC5om....", -// } -// -// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration -// value in the response. The format of the refreshable credentials response: -// { -// "AccessKeyId" : "MUA...", -// "SecretAccessKey" : "/7PC5om....", -// "Token" : "AQoDY....=", -// "Expiration" : "2016-02-25T06:03:31Z" -// } -// -// Errors should be returned in the following format and only returned with 400 -// or 500 HTTP status codes. -// { -// "code": "ErrorCode", -// "message": "Helpful error message." -// } -package endpointcreds - -import ( - "encoding/json" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" -) - -// ProviderName is the name of the credentials provider. -const ProviderName = `CredentialsEndpointProvider` - -// Provider satisfies the credentials.Provider interface, and is a client to -// retrieve credentials from an arbitrary endpoint. -type Provider struct { - staticCreds bool - credentials.Expiry - - // Requires a AWS Client to make HTTP requests to the endpoint with. - // the Endpoint the request will be made to is provided by the aws.Config's - // Endpoint value. - Client *client.Client - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration - - // Optional authorization token value if set will be used as the value of - // the Authorization header of the endpoint credential request. - AuthorizationToken string -} - -// NewProviderClient returns a credentials Provider for retrieving AWS credentials -// from arbitrary endpoint. -func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider { - p := &Provider{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: "CredentialsEndpoint", - Endpoint: endpoint, - }, - handlers, - ), - } - - p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler) - p.Client.Handlers.UnmarshalError.PushBack(unmarshalError) - p.Client.Handlers.Validate.Clear() - p.Client.Handlers.Validate.PushBack(validateEndpointHandler) - - for _, option := range options { - option(p) - } - - return p -} - -// NewCredentialsClient returns a pointer to a new Credentials object -// wrapping the endpoint credentials Provider. -func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { - return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) -} - -// IsExpired returns true if the credentials retrieved are expired, or not yet -// retrieved. -func (p *Provider) IsExpired() bool { - if p.staticCreds { - return false - } - return p.Expiry.IsExpired() -} - -// Retrieve will attempt to request the credentials from the endpoint the Provider -// was configured for. And error will be returned if the retrieval fails. -func (p *Provider) Retrieve() (credentials.Value, error) { - return p.RetrieveWithContext(aws.BackgroundContext()) -} - -// RetrieveWithContext will attempt to request the credentials from the endpoint the Provider -// was configured for. And error will be returned if the retrieval fails. -func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { - resp, err := p.getCredentials(ctx) - if err != nil { - return credentials.Value{ProviderName: ProviderName}, - awserr.New("CredentialsEndpointError", "failed to load credentials", err) - } - - if resp.Expiration != nil { - p.SetExpiration(*resp.Expiration, p.ExpiryWindow) - } else { - p.staticCreds = true - } - - return credentials.Value{ - AccessKeyID: resp.AccessKeyID, - SecretAccessKey: resp.SecretAccessKey, - SessionToken: resp.Token, - ProviderName: ProviderName, - }, nil -} - -type getCredentialsOutput struct { - Expiration *time.Time - AccessKeyID string - SecretAccessKey string - Token string -} - -type errorOutput struct { - Code string `json:"code"` - Message string `json:"message"` -} - -func (p *Provider) getCredentials(ctx aws.Context) (*getCredentialsOutput, error) { - op := &request.Operation{ - Name: "GetCredentials", - HTTPMethod: "GET", - } - - out := &getCredentialsOutput{} - req := p.Client.NewRequest(op, nil, out) - req.SetContext(ctx) - req.HTTPRequest.Header.Set("Accept", "application/json") - if authToken := p.AuthorizationToken; len(authToken) != 0 { - req.HTTPRequest.Header.Set("Authorization", authToken) - } - - return out, req.Send() -} - -func validateEndpointHandler(r *request.Request) { - if len(r.ClientInfo.Endpoint) == 0 { - r.Error = aws.ErrMissingEndpoint - } -} - -func unmarshalHandler(r *request.Request) { - defer r.HTTPResponse.Body.Close() - - out := r.Data.(*getCredentialsOutput) - if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, - "failed to decode endpoint credentials", - err, - ) - } -} - -func unmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - - var errOut errorOutput - err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to decode error message", err), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } - - // Response body format is not consistent between metadata endpoints. - // Grab the error message as a string and include that as the source error - r.Error = awserr.New(errOut.Code, errOut.Message, nil) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go deleted file mode 100644 index 54c5cf733..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go +++ /dev/null @@ -1,74 +0,0 @@ -package credentials - -import ( - "os" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// EnvProviderName provides a name of Env provider -const EnvProviderName = "EnvProvider" - -var ( - // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be - // found in the process's environment. - ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) - - // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key - // can't be found in the process's environment. - ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) -) - -// A EnvProvider retrieves credentials from the environment variables of the -// running process. Environment credentials never expire. -// -// Environment variables used: -// -// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY -// -// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY -type EnvProvider struct { - retrieved bool -} - -// NewEnvCredentials returns a pointer to a new Credentials object -// wrapping the environment variable provider. -func NewEnvCredentials() *Credentials { - return NewCredentials(&EnvProvider{}) -} - -// Retrieve retrieves the keys from the environment. -func (e *EnvProvider) Retrieve() (Value, error) { - e.retrieved = false - - id := os.Getenv("AWS_ACCESS_KEY_ID") - if id == "" { - id = os.Getenv("AWS_ACCESS_KEY") - } - - secret := os.Getenv("AWS_SECRET_ACCESS_KEY") - if secret == "" { - secret = os.Getenv("AWS_SECRET_KEY") - } - - if id == "" { - return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound - } - - if secret == "" { - return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound - } - - e.retrieved = true - return Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: os.Getenv("AWS_SESSION_TOKEN"), - ProviderName: EnvProviderName, - }, nil -} - -// IsExpired returns if the credentials have been retrieved. -func (e *EnvProvider) IsExpired() bool { - return !e.retrieved -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini deleted file mode 100644 index 7fc91d9d2..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini +++ /dev/null @@ -1,12 +0,0 @@ -[default] -aws_access_key_id = accessKey -aws_secret_access_key = secret -aws_session_token = token - -[no_token] -aws_access_key_id = accessKey -aws_secret_access_key = secret - -[with_colon] -aws_access_key_id: accessKey -aws_secret_access_key: secret diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go deleted file mode 100644 index e62483600..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go +++ /dev/null @@ -1,426 +0,0 @@ -/* -Package processcreds is a credential Provider to retrieve `credential_process` -credentials. - -WARNING: The following describes a method of sourcing credentials from an external -process. This can potentially be dangerous, so proceed with caution. Other -credential providers should be preferred if at all possible. If using this -option, you should make sure that the config file is as locked down as possible -using security best practices for your operating system. - -You can use credentials from a `credential_process` in a variety of ways. - -One way is to setup your shared config file, located in the default -location, with the `credential_process` key and the command you want to be -called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable -(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file. - - [default] - credential_process = /command/to/call - -Creating a new session will use the credential process to retrieve credentials. -NOTE: If there are credentials in the profile you are using, the credential -process will not be used. - - // Initialize a session to load credentials. - sess, _ := session.NewSession(&aws.Config{ - Region: aws.String("us-east-1")}, - ) - - // Create S3 service client to use the credentials. - svc := s3.New(sess) - -Another way to use the `credential_process` method is by using -`credentials.NewCredentials()` and providing a command to be executed to -retrieve credentials: - - // Create credentials using the ProcessProvider. - creds := processcreds.NewCredentials("/path/to/command") - - // Create service client value configured for credentials. - svc := s3.New(sess, &aws.Config{Credentials: creds}) - -You can set a non-default timeout for the `credential_process` with another -constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To -set a one minute timeout: - - // Create credentials using the ProcessProvider. - creds := processcreds.NewCredentialsTimeout( - "/path/to/command", - time.Duration(500) * time.Millisecond) - -If you need more control, you can set any configurable options in the -credentials using one or more option functions. For example, you can set a two -minute timeout, a credential duration of 60 minutes, and a maximum stdout -buffer size of 2k. - - creds := processcreds.NewCredentials( - "/path/to/command", - func(opt *ProcessProvider) { - opt.Timeout = time.Duration(2) * time.Minute - opt.Duration = time.Duration(60) * time.Minute - opt.MaxBufSize = 2048 - }) - -You can also use your own `exec.Cmd`: - - // Create an exec.Cmd - myCommand := exec.Command("/path/to/command") - - // Create credentials using your exec.Cmd and custom timeout - creds := processcreds.NewCredentialsCommand( - myCommand, - func(opt *processcreds.ProcessProvider) { - opt.Timeout = time.Duration(1) * time.Second - }) -*/ -package processcreds - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "runtime" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/internal/sdkio" -) - -const ( - // ProviderName is the name this credentials provider will label any - // returned credentials Value with. - ProviderName = `ProcessProvider` - - // ErrCodeProcessProviderParse error parsing process output - ErrCodeProcessProviderParse = "ProcessProviderParseError" - - // ErrCodeProcessProviderVersion version error in output - ErrCodeProcessProviderVersion = "ProcessProviderVersionError" - - // ErrCodeProcessProviderRequired required attribute missing in output - ErrCodeProcessProviderRequired = "ProcessProviderRequiredError" - - // ErrCodeProcessProviderExecution execution of command failed - ErrCodeProcessProviderExecution = "ProcessProviderExecutionError" - - // errMsgProcessProviderTimeout process took longer than allowed - errMsgProcessProviderTimeout = "credential process timed out" - - // errMsgProcessProviderProcess process error - errMsgProcessProviderProcess = "error in credential_process" - - // errMsgProcessProviderParse problem parsing output - errMsgProcessProviderParse = "parse failed of credential_process output" - - // errMsgProcessProviderVersion version error in output - errMsgProcessProviderVersion = "wrong version in process output (not 1)" - - // errMsgProcessProviderMissKey missing access key id in output - errMsgProcessProviderMissKey = "missing AccessKeyId in process output" - - // errMsgProcessProviderMissSecret missing secret acess key in output - errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output" - - // errMsgProcessProviderPrepareCmd prepare of command failed - errMsgProcessProviderPrepareCmd = "failed to prepare command" - - // errMsgProcessProviderEmptyCmd command must not be empty - errMsgProcessProviderEmptyCmd = "command must not be empty" - - // errMsgProcessProviderPipe failed to initialize pipe - errMsgProcessProviderPipe = "failed to initialize pipe" - - // DefaultDuration is the default amount of time in minutes that the - // credentials will be valid for. - DefaultDuration = time.Duration(15) * time.Minute - - // DefaultBufSize limits buffer size from growing to an enormous - // amount due to a faulty process. - DefaultBufSize = int(8 * sdkio.KibiByte) - - // DefaultTimeout default limit on time a process can run. - DefaultTimeout = time.Duration(1) * time.Minute -) - -// ProcessProvider satisfies the credentials.Provider interface, and is a -// client to retrieve credentials from a process. -type ProcessProvider struct { - staticCreds bool - credentials.Expiry - originalCommand []string - - // Expiry duration of the credentials. Defaults to 15 minutes if not set. - Duration time.Duration - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration - - // A string representing an os command that should return a JSON with - // credential information. - command *exec.Cmd - - // MaxBufSize limits memory usage from growing to an enormous - // amount due to a faulty process. - MaxBufSize int - - // Timeout limits the time a process can run. - Timeout time.Duration -} - -// NewCredentials returns a pointer to a new Credentials object wrapping the -// ProcessProvider. The credentials will expire every 15 minutes by default. -func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials { - p := &ProcessProvider{ - command: exec.Command(command), - Duration: DefaultDuration, - Timeout: DefaultTimeout, - MaxBufSize: DefaultBufSize, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// NewCredentialsTimeout returns a pointer to a new Credentials object with -// the specified command and timeout, and default duration and max buffer size. -func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials { - p := NewCredentials(command, func(opt *ProcessProvider) { - opt.Timeout = timeout - }) - - return p -} - -// NewCredentialsCommand returns a pointer to a new Credentials object with -// the specified command, and default timeout, duration and max buffer size. -func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials { - p := &ProcessProvider{ - command: command, - Duration: DefaultDuration, - Timeout: DefaultTimeout, - MaxBufSize: DefaultBufSize, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -type credentialProcessResponse struct { - Version int - AccessKeyID string `json:"AccessKeyId"` - SecretAccessKey string - SessionToken string - Expiration *time.Time -} - -// Retrieve executes the 'credential_process' and returns the credentials. -func (p *ProcessProvider) Retrieve() (credentials.Value, error) { - out, err := p.executeCredentialProcess() - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - // Serialize and validate response - resp := &credentialProcessResponse{} - if err = json.Unmarshal(out, resp); err != nil { - return credentials.Value{ProviderName: ProviderName}, awserr.New( - ErrCodeProcessProviderParse, - fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)), - err) - } - - if resp.Version != 1 { - return credentials.Value{ProviderName: ProviderName}, awserr.New( - ErrCodeProcessProviderVersion, - errMsgProcessProviderVersion, - nil) - } - - if len(resp.AccessKeyID) == 0 { - return credentials.Value{ProviderName: ProviderName}, awserr.New( - ErrCodeProcessProviderRequired, - errMsgProcessProviderMissKey, - nil) - } - - if len(resp.SecretAccessKey) == 0 { - return credentials.Value{ProviderName: ProviderName}, awserr.New( - ErrCodeProcessProviderRequired, - errMsgProcessProviderMissSecret, - nil) - } - - // Handle expiration - p.staticCreds = resp.Expiration == nil - if resp.Expiration != nil { - p.SetExpiration(*resp.Expiration, p.ExpiryWindow) - } - - return credentials.Value{ - ProviderName: ProviderName, - AccessKeyID: resp.AccessKeyID, - SecretAccessKey: resp.SecretAccessKey, - SessionToken: resp.SessionToken, - }, nil -} - -// IsExpired returns true if the credentials retrieved are expired, or not yet -// retrieved. -func (p *ProcessProvider) IsExpired() bool { - if p.staticCreds { - return false - } - return p.Expiry.IsExpired() -} - -// prepareCommand prepares the command to be executed. -func (p *ProcessProvider) prepareCommand() error { - - var cmdArgs []string - if runtime.GOOS == "windows" { - cmdArgs = []string{"cmd.exe", "/C"} - } else { - cmdArgs = []string{"sh", "-c"} - } - - if len(p.originalCommand) == 0 { - p.originalCommand = make([]string, len(p.command.Args)) - copy(p.originalCommand, p.command.Args) - - // check for empty command because it succeeds - if len(strings.TrimSpace(p.originalCommand[0])) < 1 { - return awserr.New( - ErrCodeProcessProviderExecution, - fmt.Sprintf( - "%s: %s", - errMsgProcessProviderPrepareCmd, - errMsgProcessProviderEmptyCmd), - nil) - } - } - - cmdArgs = append(cmdArgs, p.originalCommand...) - p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...) - p.command.Env = os.Environ() - - return nil -} - -// executeCredentialProcess starts the credential process on the OS and -// returns the results or an error. -func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) { - - if err := p.prepareCommand(); err != nil { - return nil, err - } - - // Setup the pipes - outReadPipe, outWritePipe, err := os.Pipe() - if err != nil { - return nil, awserr.New( - ErrCodeProcessProviderExecution, - errMsgProcessProviderPipe, - err) - } - - p.command.Stderr = os.Stderr // display stderr on console for MFA - p.command.Stdout = outWritePipe // get creds json on process's stdout - p.command.Stdin = os.Stdin // enable stdin for MFA - - output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize)) - - stdoutCh := make(chan error, 1) - go readInput( - io.LimitReader(outReadPipe, int64(p.MaxBufSize)), - output, - stdoutCh) - - execCh := make(chan error, 1) - go executeCommand(*p.command, execCh) - - finished := false - var errors []error - for !finished { - select { - case readError := <-stdoutCh: - errors = appendError(errors, readError) - finished = true - case execError := <-execCh: - err := outWritePipe.Close() - errors = appendError(errors, err) - errors = appendError(errors, execError) - if errors != nil { - return output.Bytes(), awserr.NewBatchError( - ErrCodeProcessProviderExecution, - errMsgProcessProviderProcess, - errors) - } - case <-time.After(p.Timeout): - finished = true - return output.Bytes(), awserr.NewBatchError( - ErrCodeProcessProviderExecution, - errMsgProcessProviderTimeout, - errors) // errors can be nil - } - } - - out := output.Bytes() - - if runtime.GOOS == "windows" { - // windows adds slashes to quotes - out = []byte(strings.Replace(string(out), `\"`, `"`, -1)) - } - - return out, nil -} - -// appendError conveniently checks for nil before appending slice -func appendError(errors []error, err error) []error { - if err != nil { - return append(errors, err) - } - return errors -} - -func executeCommand(cmd exec.Cmd, exec chan error) { - // Start the command - err := cmd.Start() - if err == nil { - err = cmd.Wait() - } - - exec <- err -} - -func readInput(r io.Reader, w io.Writer, read chan error) { - tee := io.TeeReader(r, w) - - _, err := ioutil.ReadAll(tee) - - if err == io.EOF { - err = nil - } - - read <- err // will only arrive here when write end of pipe is closed -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go deleted file mode 100644 index 22b5c5d9f..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go +++ /dev/null @@ -1,151 +0,0 @@ -package credentials - -import ( - "fmt" - "os" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/internal/ini" - "github.com/aws/aws-sdk-go/internal/shareddefaults" -) - -// SharedCredsProviderName provides a name of SharedCreds provider -const SharedCredsProviderName = "SharedCredentialsProvider" - -var ( - // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. - ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) -) - -// A SharedCredentialsProvider retrieves access key pair (access key ID, -// secret access key, and session token if present) credentials from the current -// user's home directory, and keeps track if those credentials are expired. -// -// Profile ini file example: $HOME/.aws/credentials -type SharedCredentialsProvider struct { - // Path to the shared credentials file. - // - // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the - // env value is empty will default to current user's home directory. - // Linux/OSX: "$HOME/.aws/credentials" - // Windows: "%USERPROFILE%\.aws\credentials" - Filename string - - // AWS Profile to extract credentials from the shared credentials file. If empty - // will default to environment variable "AWS_PROFILE" or "default" if - // environment variable is also not set. - Profile string - - // retrieved states if the credentials have been successfully retrieved. - retrieved bool -} - -// NewSharedCredentials returns a pointer to a new Credentials object -// wrapping the Profile file provider. -func NewSharedCredentials(filename, profile string) *Credentials { - return NewCredentials(&SharedCredentialsProvider{ - Filename: filename, - Profile: profile, - }) -} - -// Retrieve reads and extracts the shared credentials from the current -// users home directory. -func (p *SharedCredentialsProvider) Retrieve() (Value, error) { - p.retrieved = false - - filename, err := p.filename() - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, err - } - - creds, err := loadProfile(filename, p.profile()) - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, err - } - - p.retrieved = true - return creds, nil -} - -// IsExpired returns if the shared credentials have expired. -func (p *SharedCredentialsProvider) IsExpired() bool { - return !p.retrieved -} - -// loadProfiles loads from the file pointed to by shared credentials filename for profile. -// The credentials retrieved from the profile will be returned or error. Error will be -// returned if it fails to read from the file, or the data is invalid. -func loadProfile(filename, profile string) (Value, error) { - config, err := ini.OpenFile(filename) - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) - } - - iniProfile, ok := config.GetSection(profile) - if !ok { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil) - } - - id := iniProfile.String("aws_access_key_id") - if len(id) == 0 { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", - fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), - nil) - } - - secret := iniProfile.String("aws_secret_access_key") - if len(secret) == 0 { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", - fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), - nil) - } - - // Default to empty string if not found - token := iniProfile.String("aws_session_token") - - return Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: token, - ProviderName: SharedCredsProviderName, - }, nil -} - -// filename returns the filename to use to read AWS shared credentials. -// -// Will return an error if the user's home directory path cannot be found. -func (p *SharedCredentialsProvider) filename() (string, error) { - if len(p.Filename) != 0 { - return p.Filename, nil - } - - if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 { - return p.Filename, nil - } - - if home := shareddefaults.UserHomeDir(); len(home) == 0 { - // Backwards compatibility of home directly not found error being returned. - // This error is too verbose, failure when opening the file would of been - // a better error to return. - return "", ErrSharedCredentialsHomeNotFound - } - - p.Filename = shareddefaults.SharedCredentialsFilename() - - return p.Filename, nil -} - -// profile returns the AWS shared credentials profile. If empty will read -// environment variable "AWS_PROFILE". If that is not set profile will -// return "default". -func (p *SharedCredentialsProvider) profile() string { - if p.Profile == "" { - p.Profile = os.Getenv("AWS_PROFILE") - } - if p.Profile == "" { - p.Profile = "default" - } - - return p.Profile -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go deleted file mode 100644 index 18c940ab3..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go +++ /dev/null @@ -1,60 +0,0 @@ -// Package ssocreds provides a credential provider for retrieving temporary AWS credentials using an SSO access token. -// -// IMPORTANT: The provider in this package does not initiate or perform the AWS SSO login flow. The SDK provider -// expects that you have already performed the SSO login flow using AWS CLI using the "aws sso login" command, or by -// some other mechanism. The provider must find a valid non-expired access token for the AWS SSO user portal URL in -// ~/.aws/sso/cache. If a cached token is not found, it is expired, or the file is malformed an error will be returned. -// -// Loading AWS SSO credentials with the AWS shared configuration file -// -// You can use configure AWS SSO credentials from the AWS shared configuration file by -// providing the specifying the required keys in the profile: -// -// sso_account_id -// sso_region -// sso_role_name -// sso_start_url -// -// For example, the following defines a profile "devsso" and specifies the AWS SSO parameters that defines the target -// account, role, sign-on portal, and the region where the user portal is located. Note: all SSO arguments must be -// provided, or an error will be returned. -// -// [profile devsso] -// sso_start_url = https://my-sso-portal.awsapps.com/start -// sso_role_name = SSOReadOnlyRole -// sso_region = us-east-1 -// sso_account_id = 123456789012 -// -// Using the config module, you can load the AWS SDK shared configuration, and specify that this profile be used to -// retrieve credentials. For example: -// -// sess, err := session.NewSessionWithOptions(session.Options{ -// SharedConfigState: session.SharedConfigEnable, -// Profile: "devsso", -// }) -// if err != nil { -// return err -// } -// -// Programmatically loading AWS SSO credentials directly -// -// You can programmatically construct the AWS SSO Provider in your application, and provide the necessary information -// to load and retrieve temporary credentials using an access token from ~/.aws/sso/cache. -// -// svc := sso.New(sess, &aws.Config{ -// Region: aws.String("us-west-2"), // Client Region must correspond to the AWS SSO user portal region -// }) -// -// provider := ssocreds.NewCredentialsWithClient(svc, "123456789012", "SSOReadOnlyRole", "https://my-sso-portal.awsapps.com/start") -// -// credentials, err := provider.Get() -// if err != nil { -// return err -// } -// -// Additional Resources -// -// Configuring the AWS CLI to use AWS Single Sign-On: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html -// -// AWS Single Sign-On User Guide: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html -package ssocreds diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go deleted file mode 100644 index d4df39a7a..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build !windows -// +build !windows - -package ssocreds - -import "os" - -func getHomeDirectory() string { - return os.Getenv("HOME") -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go deleted file mode 100644 index eb48f61e5..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package ssocreds - -import "os" - -func getHomeDirectory() string { - return os.Getenv("USERPROFILE") -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go deleted file mode 100644 index 6eda2a555..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go +++ /dev/null @@ -1,180 +0,0 @@ -package ssocreds - -import ( - "crypto/sha1" - "encoding/hex" - "encoding/json" - "fmt" - "io/ioutil" - "path/filepath" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/service/sso" - "github.com/aws/aws-sdk-go/service/sso/ssoiface" -) - -// ErrCodeSSOProviderInvalidToken is the code type that is returned if loaded token has expired or is otherwise invalid. -// To refresh the SSO session run aws sso login with the corresponding profile. -const ErrCodeSSOProviderInvalidToken = "SSOProviderInvalidToken" - -const invalidTokenMessage = "the SSO session has expired or is invalid" - -func init() { - nowTime = time.Now - defaultCacheLocation = defaultCacheLocationImpl -} - -var nowTime func() time.Time - -// ProviderName is the name of the provider used to specify the source of credentials. -const ProviderName = "SSOProvider" - -var defaultCacheLocation func() string - -func defaultCacheLocationImpl() string { - return filepath.Join(getHomeDirectory(), ".aws", "sso", "cache") -} - -// Provider is an AWS credential provider that retrieves temporary AWS credentials by exchanging an SSO login token. -type Provider struct { - credentials.Expiry - - // The Client which is configured for the AWS Region where the AWS SSO user portal is located. - Client ssoiface.SSOAPI - - // The AWS account that is assigned to the user. - AccountID string - - // The role name that is assigned to the user. - RoleName string - - // The URL that points to the organization's AWS Single Sign-On (AWS SSO) user portal. - StartURL string -} - -// NewCredentials returns a new AWS Single Sign-On (AWS SSO) credential provider. The ConfigProvider is expected to be configured -// for the AWS Region where the AWS SSO user portal is located. -func NewCredentials(configProvider client.ConfigProvider, accountID, roleName, startURL string, optFns ...func(provider *Provider)) *credentials.Credentials { - return NewCredentialsWithClient(sso.New(configProvider), accountID, roleName, startURL, optFns...) -} - -// NewCredentialsWithClient returns a new AWS Single Sign-On (AWS SSO) credential provider. The provided client is expected to be configured -// for the AWS Region where the AWS SSO user portal is located. -func NewCredentialsWithClient(client ssoiface.SSOAPI, accountID, roleName, startURL string, optFns ...func(provider *Provider)) *credentials.Credentials { - p := &Provider{ - Client: client, - AccountID: accountID, - RoleName: roleName, - StartURL: startURL, - } - - for _, fn := range optFns { - fn(p) - } - - return credentials.NewCredentials(p) -} - -// Retrieve retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal -// by exchanging the accessToken present in ~/.aws/sso/cache. -func (p *Provider) Retrieve() (credentials.Value, error) { - return p.RetrieveWithContext(aws.BackgroundContext()) -} - -// RetrieveWithContext retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal -// by exchanging the accessToken present in ~/.aws/sso/cache. -func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { - tokenFile, err := loadTokenFile(p.StartURL) - if err != nil { - return credentials.Value{}, err - } - - output, err := p.Client.GetRoleCredentialsWithContext(ctx, &sso.GetRoleCredentialsInput{ - AccessToken: &tokenFile.AccessToken, - AccountId: &p.AccountID, - RoleName: &p.RoleName, - }) - if err != nil { - return credentials.Value{}, err - } - - expireTime := time.Unix(0, aws.Int64Value(output.RoleCredentials.Expiration)*int64(time.Millisecond)).UTC() - p.SetExpiration(expireTime, 0) - - return credentials.Value{ - AccessKeyID: aws.StringValue(output.RoleCredentials.AccessKeyId), - SecretAccessKey: aws.StringValue(output.RoleCredentials.SecretAccessKey), - SessionToken: aws.StringValue(output.RoleCredentials.SessionToken), - ProviderName: ProviderName, - }, nil -} - -func getCacheFileName(url string) (string, error) { - hash := sha1.New() - _, err := hash.Write([]byte(url)) - if err != nil { - return "", err - } - return strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json", nil -} - -type rfc3339 time.Time - -func (r *rfc3339) UnmarshalJSON(bytes []byte) error { - var value string - - if err := json.Unmarshal(bytes, &value); err != nil { - return err - } - - parse, err := time.Parse(time.RFC3339, value) - if err != nil { - return fmt.Errorf("expected RFC3339 timestamp: %v", err) - } - - *r = rfc3339(parse) - - return nil -} - -type token struct { - AccessToken string `json:"accessToken"` - ExpiresAt rfc3339 `json:"expiresAt"` - Region string `json:"region,omitempty"` - StartURL string `json:"startUrl,omitempty"` -} - -func (t token) Expired() bool { - return nowTime().Round(0).After(time.Time(t.ExpiresAt)) -} - -func loadTokenFile(startURL string) (t token, err error) { - key, err := getCacheFileName(startURL) - if err != nil { - return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) - } - - fileBytes, err := ioutil.ReadFile(filepath.Join(defaultCacheLocation(), key)) - if err != nil { - return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) - } - - if err := json.Unmarshal(fileBytes, &t); err != nil { - return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) - } - - if len(t.AccessToken) == 0 { - return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, nil) - } - - if t.Expired() { - return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, nil) - } - - return t, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go deleted file mode 100644 index cbba1e3d5..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go +++ /dev/null @@ -1,57 +0,0 @@ -package credentials - -import ( - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// StaticProviderName provides a name of Static provider -const StaticProviderName = "StaticProvider" - -var ( - // ErrStaticCredentialsEmpty is emitted when static credentials are empty. - ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) -) - -// A StaticProvider is a set of credentials which are set programmatically, -// and will never expire. -type StaticProvider struct { - Value -} - -// NewStaticCredentials returns a pointer to a new Credentials object -// wrapping a static credentials value provider. Token is only required -// for temporary security credentials retrieved via STS, otherwise an empty -// string can be passed for this parameter. -func NewStaticCredentials(id, secret, token string) *Credentials { - return NewCredentials(&StaticProvider{Value: Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: token, - }}) -} - -// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object -// wrapping the static credentials value provide. Same as NewStaticCredentials -// but takes the creds Value instead of individual fields -func NewStaticCredentialsFromCreds(creds Value) *Credentials { - return NewCredentials(&StaticProvider{Value: creds}) -} - -// Retrieve returns the credentials or error if the credentials are invalid. -func (s *StaticProvider) Retrieve() (Value, error) { - if s.AccessKeyID == "" || s.SecretAccessKey == "" { - return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty - } - - if len(s.Value.ProviderName) == 0 { - s.Value.ProviderName = StaticProviderName - } - return s.Value, nil -} - -// IsExpired returns if the credentials are expired. -// -// For StaticProvider, the credentials never expired. -func (s *StaticProvider) IsExpired() bool { - return false -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go deleted file mode 100644 index 260a37cbb..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go +++ /dev/null @@ -1,367 +0,0 @@ -/* -Package stscreds are credential Providers to retrieve STS AWS credentials. - -STS provides multiple ways to retrieve credentials which can be used when making -future AWS service API operation calls. - -The SDK will ensure that per instance of credentials.Credentials all requests -to refresh the credentials will be synchronized. But, the SDK is unable to -ensure synchronous usage of the AssumeRoleProvider if the value is shared -between multiple Credentials, Sessions or service clients. - -Assume Role - -To assume an IAM role using STS with the SDK you can create a new Credentials -with the SDKs's stscreds package. - - // Initial credentials loaded from SDK's default credential chain. Such as - // the environment, shared credentials (~/.aws/credentials), or EC2 Instance - // Role. These credentials will be used to to make the STS Assume Role API. - sess := session.Must(session.NewSession()) - - // Create the credentials from AssumeRoleProvider to assume the role - // referenced by the "myRoleARN" ARN. - creds := stscreds.NewCredentials(sess, "myRoleArn") - - // Create service client value configured for credentials - // from assumed role. - svc := s3.New(sess, &aws.Config{Credentials: creds}) - -Assume Role with static MFA Token - -To assume an IAM role with a MFA token you can either specify a MFA token code -directly or provide a function to prompt the user each time the credentials -need to refresh the role's credentials. Specifying the TokenCode should be used -for short lived operations that will not need to be refreshed, and when you do -not want to have direct control over the user provides their MFA token. - -With TokenCode the AssumeRoleProvider will be not be able to refresh the role's -credentials. - - // Create the credentials from AssumeRoleProvider to assume the role - // referenced by the "myRoleARN" ARN using the MFA token code provided. - creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { - p.SerialNumber = aws.String("myTokenSerialNumber") - p.TokenCode = aws.String("00000000") - }) - - // Create service client value configured for credentials - // from assumed role. - svc := s3.New(sess, &aws.Config{Credentials: creds}) - -Assume Role with MFA Token Provider - -To assume an IAM role with MFA for longer running tasks where the credentials -may need to be refreshed setting the TokenProvider field of AssumeRoleProvider -will allow the credential provider to prompt for new MFA token code when the -role's credentials need to be refreshed. - -The StdinTokenProvider function is available to prompt on stdin to retrieve -the MFA token code from the user. You can also implement custom prompts by -satisfing the TokenProvider function signature. - -Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will -have undesirable results as the StdinTokenProvider will not be synchronized. A -single Credentials with an AssumeRoleProvider can be shared safely. - - // Create the credentials from AssumeRoleProvider to assume the role - // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin. - creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { - p.SerialNumber = aws.String("myTokenSerialNumber") - p.TokenProvider = stscreds.StdinTokenProvider - }) - - // Create service client value configured for credentials - // from assumed role. - svc := s3.New(sess, &aws.Config{Credentials: creds}) - -*/ -package stscreds - -import ( - "fmt" - "os" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkrand" - "github.com/aws/aws-sdk-go/service/sts" -) - -// StdinTokenProvider will prompt on stderr and read from stdin for a string value. -// An error is returned if reading from stdin fails. -// -// Use this function to read MFA tokens from stdin. The function makes no attempt -// to make atomic prompts from stdin across multiple gorouties. -// -// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will -// have undesirable results as the StdinTokenProvider will not be synchronized. A -// single Credentials with an AssumeRoleProvider can be shared safely -// -// Will wait forever until something is provided on the stdin. -func StdinTokenProvider() (string, error) { - var v string - fmt.Fprintf(os.Stderr, "Assume Role MFA token code: ") - _, err := fmt.Scanln(&v) - - return v, err -} - -// ProviderName provides a name of AssumeRole provider -const ProviderName = "AssumeRoleProvider" - -// AssumeRoler represents the minimal subset of the STS client API used by this provider. -type AssumeRoler interface { - AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) -} - -type assumeRolerWithContext interface { - AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) -} - -// DefaultDuration is the default amount of time in minutes that the credentials -// will be valid for. -var DefaultDuration = time.Duration(15) * time.Minute - -// AssumeRoleProvider retrieves temporary credentials from the STS service, and -// keeps track of their expiration time. -// -// This credential provider will be used by the SDKs default credential change -// when shared configuration is enabled, and the shared config or shared credentials -// file configure assume role. See Session docs for how to do this. -// -// AssumeRoleProvider does not provide any synchronization and it is not safe -// to share this value across multiple Credentials, Sessions, or service clients -// without also sharing the same Credentials instance. -type AssumeRoleProvider struct { - credentials.Expiry - - // STS client to make assume role request with. - Client AssumeRoler - - // Role to be assumed. - RoleARN string - - // Session name, if you wish to reuse the credentials elsewhere. - RoleSessionName string - - // Optional, you can pass tag key-value pairs to your session. These tags are called session tags. - Tags []*sts.Tag - - // A list of keys for session tags that you want to set as transitive. - // If you set a tag key as transitive, the corresponding key and value passes to subsequent sessions in a role chain. - TransitiveTagKeys []*string - - // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. - Duration time.Duration - - // Optional ExternalID to pass along, defaults to nil if not set. - ExternalID *string - - // The policy plain text must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, with 100% equaling the maximum allowed - // size. - Policy *string - - // The ARNs of IAM managed policies you want to use as managed session policies. - // The policies must exist in the same account as the role. - // - // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plain text that you use for both inline and managed session - // policies can't exceed 2,048 characters. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - // - // Passing policies to this operation returns new temporary credentials. The - // resulting session's permissions are the intersection of the role's identity-based - // policy and the session policies. You can use the role's temporary credentials - // in subsequent AWS API calls to access resources in the account that owns - // the role. You cannot use session policies to grant more permissions than - // those allowed by the identity-based policy of the role that is being assumed. - // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - PolicyArns []*sts.PolicyDescriptorType - - // The identification number of the MFA device that is associated with the user - // who is making the AssumeRole call. Specify this value if the trust policy - // of the role being assumed includes a condition that requires MFA authentication. - // The value is either the serial number for a hardware device (such as GAHT12345678) - // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). - SerialNumber *string - - // The value provided by the MFA device, if the trust policy of the role being - // assumed requires MFA (that is, if the policy includes a condition that tests - // for MFA). If the role being assumed requires MFA and if the TokenCode value - // is missing or expired, the AssumeRole call returns an "access denied" error. - // - // If SerialNumber is set and neither TokenCode nor TokenProvider are also - // set an error will be returned. - TokenCode *string - - // Async method of providing MFA token code for assuming an IAM role with MFA. - // The value returned by the function will be used as the TokenCode in the Retrieve - // call. See StdinTokenProvider for a provider that prompts and reads from stdin. - // - // This token provider will be called when ever the assumed role's - // credentials need to be refreshed when SerialNumber is also set and - // TokenCode is not set. - // - // If both TokenCode and TokenProvider is set, TokenProvider will be used and - // TokenCode is ignored. - TokenProvider func() (string, error) - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration - - // MaxJitterFrac reduces the effective Duration of each credential requested - // by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must - // have a value between 0 and 1. Any other value may lead to expected behavior. - // With a MaxJitterFrac value of 0, default) will no jitter will be used. - // - // For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the - // AssumeRole call will be made with an arbitrary Duration between 27m and - // 30m. - // - // MaxJitterFrac should not be negative. - MaxJitterFrac float64 -} - -// NewCredentials returns a pointer to a new Credentials value wrapping the -// AssumeRoleProvider. The credentials will expire every 15 minutes and the -// role will be named after a nanosecond timestamp of this operation. The -// Credentials value will attempt to refresh the credentials using the provider -// when Credentials.Get is called, if the cached credentials are expiring. -// -// Takes a Config provider to create the STS client. The ConfigProvider is -// satisfied by the session.Session type. -// -// It is safe to share the returned Credentials with multiple Sessions and -// service clients. All access to the credentials and refreshing them -// will be synchronized. -func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { - p := &AssumeRoleProvider{ - Client: sts.New(c), - RoleARN: roleARN, - Duration: DefaultDuration, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// NewCredentialsWithClient returns a pointer to a new Credentials value wrapping the -// AssumeRoleProvider. The credentials will expire every 15 minutes and the -// role will be named after a nanosecond timestamp of this operation. The -// Credentials value will attempt to refresh the credentials using the provider -// when Credentials.Get is called, if the cached credentials are expiring. -// -// Takes an AssumeRoler which can be satisfied by the STS client. -// -// It is safe to share the returned Credentials with multiple Sessions and -// service clients. All access to the credentials and refreshing them -// will be synchronized. -func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { - p := &AssumeRoleProvider{ - Client: svc, - RoleARN: roleARN, - Duration: DefaultDuration, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// Retrieve generates a new set of temporary credentials using STS. -func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { - return p.RetrieveWithContext(aws.BackgroundContext()) -} - -// RetrieveWithContext generates a new set of temporary credentials using STS. -func (p *AssumeRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { - // Apply defaults where parameters are not set. - if p.RoleSessionName == "" { - // Try to work out a role name that will hopefully end up unique. - p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) - } - if p.Duration == 0 { - // Expire as often as AWS permits. - p.Duration = DefaultDuration - } - jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration)) - input := &sts.AssumeRoleInput{ - DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)), - RoleArn: aws.String(p.RoleARN), - RoleSessionName: aws.String(p.RoleSessionName), - ExternalId: p.ExternalID, - Tags: p.Tags, - PolicyArns: p.PolicyArns, - TransitiveTagKeys: p.TransitiveTagKeys, - } - if p.Policy != nil { - input.Policy = p.Policy - } - if p.SerialNumber != nil { - if p.TokenCode != nil { - input.SerialNumber = p.SerialNumber - input.TokenCode = p.TokenCode - } else if p.TokenProvider != nil { - input.SerialNumber = p.SerialNumber - code, err := p.TokenProvider() - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - input.TokenCode = aws.String(code) - } else { - return credentials.Value{ProviderName: ProviderName}, - awserr.New("AssumeRoleTokenNotAvailable", - "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil) - } - } - - var roleOutput *sts.AssumeRoleOutput - var err error - - if c, ok := p.Client.(assumeRolerWithContext); ok { - roleOutput, err = c.AssumeRoleWithContext(ctx, input) - } else { - roleOutput, err = p.Client.AssumeRole(input) - } - - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - // We will proactively generate new credentials before they expire. - p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) - - return credentials.Value{ - AccessKeyID: *roleOutput.Credentials.AccessKeyId, - SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, - SessionToken: *roleOutput.Credentials.SessionToken, - ProviderName: ProviderName, - }, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go deleted file mode 100644 index cefe2a76d..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go +++ /dev/null @@ -1,154 +0,0 @@ -package stscreds - -import ( - "fmt" - "io/ioutil" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/service/sts" - "github.com/aws/aws-sdk-go/service/sts/stsiface" -) - -const ( - // ErrCodeWebIdentity will be used as an error code when constructing - // a new error to be returned during session creation or retrieval. - ErrCodeWebIdentity = "WebIdentityErr" - - // WebIdentityProviderName is the web identity provider name - WebIdentityProviderName = "WebIdentityCredentials" -) - -// now is used to return a time.Time object representing -// the current time. This can be used to easily test and -// compare test values. -var now = time.Now - -// TokenFetcher shuold return WebIdentity token bytes or an error -type TokenFetcher interface { - FetchToken(credentials.Context) ([]byte, error) -} - -// FetchTokenPath is a path to a WebIdentity token file -type FetchTokenPath string - -// FetchToken returns a token by reading from the filesystem -func (f FetchTokenPath) FetchToken(ctx credentials.Context) ([]byte, error) { - data, err := ioutil.ReadFile(string(f)) - if err != nil { - errMsg := fmt.Sprintf("unable to read file at %s", f) - return nil, awserr.New(ErrCodeWebIdentity, errMsg, err) - } - return data, nil -} - -// WebIdentityRoleProvider is used to retrieve credentials using -// an OIDC token. -type WebIdentityRoleProvider struct { - credentials.Expiry - PolicyArns []*sts.PolicyDescriptorType - - // Duration the STS credentials will be valid for. Truncated to seconds. - // If unset, the assumed role will use AssumeRoleWithWebIdentity's default - // expiry duration. See - // https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#STS.AssumeRoleWithWebIdentity - // for more information. - Duration time.Duration - - // The amount of time the credentials will be refreshed before they expire. - // This is useful refresh credentials before they expire to reduce risk of - // using credentials as they expire. If unset, will default to no expiry - // window. - ExpiryWindow time.Duration - - client stsiface.STSAPI - - tokenFetcher TokenFetcher - roleARN string - roleSessionName string -} - -// NewWebIdentityCredentials will return a new set of credentials with a given -// configuration, role arn, and token file path. -func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName, path string) *credentials.Credentials { - svc := sts.New(c) - p := NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, path) - return credentials.NewCredentials(p) -} - -// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the -// provided stsiface.STSAPI -func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider { - return NewWebIdentityRoleProviderWithToken(svc, roleARN, roleSessionName, FetchTokenPath(path)) -} - -// NewWebIdentityRoleProviderWithToken will return a new WebIdentityRoleProvider with the -// provided stsiface.STSAPI and a TokenFetcher -func NewWebIdentityRoleProviderWithToken(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher) *WebIdentityRoleProvider { - return &WebIdentityRoleProvider{ - client: svc, - tokenFetcher: tokenFetcher, - roleARN: roleARN, - roleSessionName: roleSessionName, - } -} - -// Retrieve will attempt to assume a role from a token which is located at -// 'WebIdentityTokenFilePath' specified destination and if that is empty an -// error will be returned. -func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { - return p.RetrieveWithContext(aws.BackgroundContext()) -} - -// RetrieveWithContext will attempt to assume a role from a token which is located at -// 'WebIdentityTokenFilePath' specified destination and if that is empty an -// error will be returned. -func (p *WebIdentityRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { - b, err := p.tokenFetcher.FetchToken(ctx) - if err != nil { - return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed fetching WebIdentity token: ", err) - } - - sessionName := p.roleSessionName - if len(sessionName) == 0 { - // session name is used to uniquely identify a session. This simply - // uses unix time in nanoseconds to uniquely identify sessions. - sessionName = strconv.FormatInt(now().UnixNano(), 10) - } - - var duration *int64 - if p.Duration != 0 { - duration = aws.Int64(int64(p.Duration / time.Second)) - } - - req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{ - PolicyArns: p.PolicyArns, - RoleArn: &p.roleARN, - RoleSessionName: &sessionName, - WebIdentityToken: aws.String(string(b)), - DurationSeconds: duration, - }) - - req.SetContext(ctx) - - // InvalidIdentityToken error is a temporary error that can occur - // when assuming an Role with a JWT web identity token. - req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException) - if err := req.Send(); err != nil { - return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err) - } - - p.SetExpiration(aws.TimeValue(resp.Credentials.Expiration), p.ExpiryWindow) - - value := credentials.Value{ - AccessKeyID: aws.StringValue(resp.Credentials.AccessKeyId), - SecretAccessKey: aws.StringValue(resp.Credentials.SecretAccessKey), - SessionToken: aws.StringValue(resp.Credentials.SessionToken), - ProviderName: WebIdentityProviderName, - } - return value, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go deleted file mode 100644 index 25a66d1dd..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go +++ /dev/null @@ -1,69 +0,0 @@ -// Package csm provides the Client Side Monitoring (CSM) client which enables -// sending metrics via UDP connection to the CSM agent. This package provides -// control options, and configuration for the CSM client. The client can be -// controlled manually, or automatically via the SDK's Session configuration. -// -// Enabling CSM client via SDK's Session configuration -// -// The CSM client can be enabled automatically via SDK's Session configuration. -// The SDK's session configuration enables the CSM client if the AWS_CSM_PORT -// environment variable is set to a non-empty value. -// -// The configuration options for the CSM client via the SDK's session -// configuration are: -// -// * AWS_CSM_PORT= -// The port number the CSM agent will receive metrics on. -// -// * AWS_CSM_HOST= -// The hostname, or IP address the CSM agent will receive metrics on. -// Without port number. -// -// Manually enabling the CSM client -// -// The CSM client can be started, paused, and resumed manually. The Start -// function will enable the CSM client to publish metrics to the CSM agent. It -// is safe to call Start concurrently, but if Start is called additional times -// with different ClientID or address it will panic. -// -// r, err := csm.Start("clientID", ":31000") -// if err != nil { -// panic(fmt.Errorf("failed starting CSM: %v", err)) -// } -// -// When controlling the CSM client manually, you must also inject its request -// handlers into the SDK's Session configuration for the SDK's API clients to -// publish metrics. -// -// sess, err := session.NewSession(&aws.Config{}) -// if err != nil { -// panic(fmt.Errorf("failed loading session: %v", err)) -// } -// -// // Add CSM client's metric publishing request handlers to the SDK's -// // Session Configuration. -// r.InjectHandlers(&sess.Handlers) -// -// Controlling CSM client -// -// Once the CSM client has been enabled the Get function will return a Reporter -// value that you can use to pause and resume the metrics published to the CSM -// agent. If Get function is called before the reporter is enabled with the -// Start function or via SDK's Session configuration nil will be returned. -// -// The Pause method can be called to stop the CSM client publishing metrics to -// the CSM agent. The Continue method will resume metric publishing. -// -// // Get the CSM client Reporter. -// r := csm.Get() -// -// // Will pause monitoring -// r.Pause() -// resp, err = client.GetObject(&s3.GetObjectInput{ -// Bucket: aws.String("bucket"), -// Key: aws.String("key"), -// }) -// -// // Resume monitoring -// r.Continue() -package csm diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go deleted file mode 100644 index 4b19e2800..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go +++ /dev/null @@ -1,89 +0,0 @@ -package csm - -import ( - "fmt" - "strings" - "sync" -) - -var ( - lock sync.Mutex -) - -const ( - // DefaultPort is used when no port is specified. - DefaultPort = "31000" - - // DefaultHost is the host that will be used when none is specified. - DefaultHost = "127.0.0.1" -) - -// AddressWithDefaults returns a CSM address built from the host and port -// values. If the host or port is not set, default values will be used -// instead. If host is "localhost" it will be replaced with "127.0.0.1". -func AddressWithDefaults(host, port string) string { - if len(host) == 0 || strings.EqualFold(host, "localhost") { - host = DefaultHost - } - - if len(port) == 0 { - port = DefaultPort - } - - // Only IP6 host can contain a colon - if strings.Contains(host, ":") { - return "[" + host + "]:" + port - } - - return host + ":" + port -} - -// Start will start a long running go routine to capture -// client side metrics. Calling start multiple time will only -// start the metric listener once and will panic if a different -// client ID or port is passed in. -// -// r, err := csm.Start("clientID", "127.0.0.1:31000") -// if err != nil { -// panic(fmt.Errorf("expected no error, but received %v", err)) -// } -// sess := session.NewSession() -// r.InjectHandlers(sess.Handlers) -// -// svc := s3.New(sess) -// out, err := svc.GetObject(&s3.GetObjectInput{ -// Bucket: aws.String("bucket"), -// Key: aws.String("key"), -// }) -func Start(clientID string, url string) (*Reporter, error) { - lock.Lock() - defer lock.Unlock() - - if sender == nil { - sender = newReporter(clientID, url) - } else { - if sender.clientID != clientID { - panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID)) - } - - if sender.url != url { - panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url)) - } - } - - if err := connect(url); err != nil { - sender = nil - return nil, err - } - - return sender, nil -} - -// Get will return a reporter if one exists, if one does not exist, nil will -// be returned. -func Get() *Reporter { - lock.Lock() - defer lock.Unlock() - - return sender -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go deleted file mode 100644 index 5bacc791a..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go +++ /dev/null @@ -1,109 +0,0 @@ -package csm - -import ( - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" -) - -type metricTime time.Time - -func (t metricTime) MarshalJSON() ([]byte, error) { - ns := time.Duration(time.Time(t).UnixNano()) - return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil -} - -type metric struct { - ClientID *string `json:"ClientId,omitempty"` - API *string `json:"Api,omitempty"` - Service *string `json:"Service,omitempty"` - Timestamp *metricTime `json:"Timestamp,omitempty"` - Type *string `json:"Type,omitempty"` - Version *int `json:"Version,omitempty"` - - AttemptCount *int `json:"AttemptCount,omitempty"` - Latency *int `json:"Latency,omitempty"` - - Fqdn *string `json:"Fqdn,omitempty"` - UserAgent *string `json:"UserAgent,omitempty"` - AttemptLatency *int `json:"AttemptLatency,omitempty"` - - SessionToken *string `json:"SessionToken,omitempty"` - Region *string `json:"Region,omitempty"` - AccessKey *string `json:"AccessKey,omitempty"` - HTTPStatusCode *int `json:"HttpStatusCode,omitempty"` - XAmzID2 *string `json:"XAmzId2,omitempty"` - XAmzRequestID *string `json:"XAmznRequestId,omitempty"` - - AWSException *string `json:"AwsException,omitempty"` - AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"` - SDKException *string `json:"SdkException,omitempty"` - SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"` - - FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"` - FinalAWSException *string `json:"FinalAwsException,omitempty"` - FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"` - FinalSDKException *string `json:"FinalSdkException,omitempty"` - FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"` - - DestinationIP *string `json:"DestinationIp,omitempty"` - ConnectionReused *int `json:"ConnectionReused,omitempty"` - - AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"` - ConnectLatency *int `json:"ConnectLatency,omitempty"` - RequestLatency *int `json:"RequestLatency,omitempty"` - DNSLatency *int `json:"DnsLatency,omitempty"` - TCPLatency *int `json:"TcpLatency,omitempty"` - SSLLatency *int `json:"SslLatency,omitempty"` - - MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"` -} - -func (m *metric) TruncateFields() { - m.ClientID = truncateString(m.ClientID, 255) - m.UserAgent = truncateString(m.UserAgent, 256) - - m.AWSException = truncateString(m.AWSException, 128) - m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512) - - m.SDKException = truncateString(m.SDKException, 128) - m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512) - - m.FinalAWSException = truncateString(m.FinalAWSException, 128) - m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512) - - m.FinalSDKException = truncateString(m.FinalSDKException, 128) - m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512) -} - -func truncateString(v *string, l int) *string { - if v != nil && len(*v) > l { - nv := (*v)[:l] - return &nv - } - - return v -} - -func (m *metric) SetException(e metricException) { - switch te := e.(type) { - case awsException: - m.AWSException = aws.String(te.exception) - m.AWSExceptionMessage = aws.String(te.message) - case sdkException: - m.SDKException = aws.String(te.exception) - m.SDKExceptionMessage = aws.String(te.message) - } -} - -func (m *metric) SetFinalException(e metricException) { - switch te := e.(type) { - case awsException: - m.FinalAWSException = aws.String(te.exception) - m.FinalAWSExceptionMessage = aws.String(te.message) - case sdkException: - m.FinalSDKException = aws.String(te.exception) - m.FinalSDKExceptionMessage = aws.String(te.message) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go deleted file mode 100644 index 82a3e345e..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go +++ /dev/null @@ -1,55 +0,0 @@ -package csm - -import ( - "sync/atomic" -) - -const ( - runningEnum = iota - pausedEnum -) - -var ( - // MetricsChannelSize of metrics to hold in the channel - MetricsChannelSize = 100 -) - -type metricChan struct { - ch chan metric - paused *int64 -} - -func newMetricChan(size int) metricChan { - return metricChan{ - ch: make(chan metric, size), - paused: new(int64), - } -} - -func (ch *metricChan) Pause() { - atomic.StoreInt64(ch.paused, pausedEnum) -} - -func (ch *metricChan) Continue() { - atomic.StoreInt64(ch.paused, runningEnum) -} - -func (ch *metricChan) IsPaused() bool { - v := atomic.LoadInt64(ch.paused) - return v == pausedEnum -} - -// Push will push metrics to the metric channel if the channel -// is not paused -func (ch *metricChan) Push(m metric) bool { - if ch.IsPaused() { - return false - } - - select { - case ch.ch <- m: - return true - default: - return false - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go deleted file mode 100644 index 54a99280c..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go +++ /dev/null @@ -1,26 +0,0 @@ -package csm - -type metricException interface { - Exception() string - Message() string -} - -type requestException struct { - exception string - message string -} - -func (e requestException) Exception() string { - return e.exception -} -func (e requestException) Message() string { - return e.message -} - -type awsException struct { - requestException -} - -type sdkException struct { - requestException -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go deleted file mode 100644 index 835bcd49c..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go +++ /dev/null @@ -1,264 +0,0 @@ -package csm - -import ( - "encoding/json" - "net" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// Reporter will gather metrics of API requests made and -// send those metrics to the CSM endpoint. -type Reporter struct { - clientID string - url string - conn net.Conn - metricsCh metricChan - done chan struct{} -} - -var ( - sender *Reporter -) - -func connect(url string) error { - const network = "udp" - if err := sender.connect(network, url); err != nil { - return err - } - - if sender.done == nil { - sender.done = make(chan struct{}) - go sender.start() - } - - return nil -} - -func newReporter(clientID, url string) *Reporter { - return &Reporter{ - clientID: clientID, - url: url, - metricsCh: newMetricChan(MetricsChannelSize), - } -} - -func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { - if rep == nil { - return - } - - now := time.Now() - creds, _ := r.Config.Credentials.Get() - - m := metric{ - ClientID: aws.String(rep.clientID), - API: aws.String(r.Operation.Name), - Service: aws.String(r.ClientInfo.ServiceID), - Timestamp: (*metricTime)(&now), - UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), - Region: r.Config.Region, - Type: aws.String("ApiCallAttempt"), - Version: aws.Int(1), - - XAmzRequestID: aws.String(r.RequestID), - - AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))), - AccessKey: aws.String(creds.AccessKeyID), - } - - if r.HTTPResponse != nil { - m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) - } - - if r.Error != nil { - if awserr, ok := r.Error.(awserr.Error); ok { - m.SetException(getMetricException(awserr)) - } - } - - m.TruncateFields() - rep.metricsCh.Push(m) -} - -func getMetricException(err awserr.Error) metricException { - msg := err.Error() - code := err.Code() - - switch code { - case request.ErrCodeRequestError, - request.ErrCodeSerialization, - request.CanceledErrorCode: - return sdkException{ - requestException{exception: code, message: msg}, - } - default: - return awsException{ - requestException{exception: code, message: msg}, - } - } -} - -func (rep *Reporter) sendAPICallMetric(r *request.Request) { - if rep == nil { - return - } - - now := time.Now() - m := metric{ - ClientID: aws.String(rep.clientID), - API: aws.String(r.Operation.Name), - Service: aws.String(r.ClientInfo.ServiceID), - Timestamp: (*metricTime)(&now), - UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), - Type: aws.String("ApiCall"), - AttemptCount: aws.Int(r.RetryCount + 1), - Region: r.Config.Region, - Latency: aws.Int(int(time.Since(r.Time) / time.Millisecond)), - XAmzRequestID: aws.String(r.RequestID), - MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())), - } - - if r.HTTPResponse != nil { - m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) - } - - if r.Error != nil { - if awserr, ok := r.Error.(awserr.Error); ok { - m.SetFinalException(getMetricException(awserr)) - } - } - - m.TruncateFields() - - // TODO: Probably want to figure something out for logging dropped - // metrics - rep.metricsCh.Push(m) -} - -func (rep *Reporter) connect(network, url string) error { - if rep.conn != nil { - rep.conn.Close() - } - - conn, err := net.Dial(network, url) - if err != nil { - return awserr.New("UDPError", "Could not connect", err) - } - - rep.conn = conn - - return nil -} - -func (rep *Reporter) close() { - if rep.done != nil { - close(rep.done) - } - - rep.metricsCh.Pause() -} - -func (rep *Reporter) start() { - defer func() { - rep.metricsCh.Pause() - }() - - for { - select { - case <-rep.done: - rep.done = nil - return - case m := <-rep.metricsCh.ch: - // TODO: What to do with this error? Probably should just log - b, err := json.Marshal(m) - if err != nil { - continue - } - - rep.conn.Write(b) - } - } -} - -// Pause will pause the metric channel preventing any new metrics from being -// added. It is safe to call concurrently with other calls to Pause, but if -// called concurently with Continue can lead to unexpected state. -func (rep *Reporter) Pause() { - lock.Lock() - defer lock.Unlock() - - if rep == nil { - return - } - - rep.close() -} - -// Continue will reopen the metric channel and allow for monitoring to be -// resumed. It is safe to call concurrently with other calls to Continue, but -// if called concurently with Pause can lead to unexpected state. -func (rep *Reporter) Continue() { - lock.Lock() - defer lock.Unlock() - if rep == nil { - return - } - - if !rep.metricsCh.IsPaused() { - return - } - - rep.metricsCh.Continue() -} - -// Client side metric handler names -const ( - APICallMetricHandlerName = "awscsm.SendAPICallMetric" - APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" -) - -// InjectHandlers will will enable client side metrics and inject the proper -// handlers to handle how metrics are sent. -// -// InjectHandlers is NOT safe to call concurrently. Calling InjectHandlers -// multiple times may lead to unexpected behavior, (e.g. duplicate metrics). -// -// // Start must be called in order to inject the correct handlers -// r, err := csm.Start("clientID", "127.0.0.1:8094") -// if err != nil { -// panic(fmt.Errorf("expected no error, but received %v", err)) -// } -// -// sess := session.NewSession() -// r.InjectHandlers(&sess.Handlers) -// -// // create a new service client with our client side metric session -// svc := s3.New(sess) -func (rep *Reporter) InjectHandlers(handlers *request.Handlers) { - if rep == nil { - return - } - - handlers.Complete.PushFrontNamed(request.NamedHandler{ - Name: APICallMetricHandlerName, - Fn: rep.sendAPICallMetric, - }) - - handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{ - Name: APICallAttemptMetricHandlerName, - Fn: rep.sendAPICallAttemptMetric, - }) -} - -// boolIntValue return 1 for true and 0 for false. -func boolIntValue(b bool) int { - if b { - return 1 - } - - return 0 -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go deleted file mode 100644 index 23bb639e0..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ /dev/null @@ -1,207 +0,0 @@ -// Package defaults is a collection of helpers to retrieve the SDK's default -// configuration and handlers. -// -// Generally this package shouldn't be used directly, but session.Session -// instead. This package is useful when you need to reset the defaults -// of a session or service client to the SDK defaults before setting -// additional parameters. -package defaults - -import ( - "fmt" - "net" - "net/http" - "net/url" - "os" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/corehandlers" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" - "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/shareddefaults" -) - -// A Defaults provides a collection of default values for SDK clients. -type Defaults struct { - Config *aws.Config - Handlers request.Handlers -} - -// Get returns the SDK's default values with Config and handlers pre-configured. -func Get() Defaults { - cfg := Config() - handlers := Handlers() - cfg.Credentials = CredChain(cfg, handlers) - - return Defaults{ - Config: cfg, - Handlers: handlers, - } -} - -// Config returns the default configuration without credentials. -// To retrieve a config with credentials also included use -// `defaults.Get().Config` instead. -// -// Generally you shouldn't need to use this method directly, but -// is available if you need to reset the configuration of an -// existing service client or session. -func Config() *aws.Config { - return aws.NewConfig(). - WithCredentials(credentials.AnonymousCredentials). - WithRegion(os.Getenv("AWS_REGION")). - WithHTTPClient(http.DefaultClient). - WithMaxRetries(aws.UseServiceDefaultRetries). - WithLogger(aws.NewDefaultLogger()). - WithLogLevel(aws.LogOff). - WithEndpointResolver(endpoints.DefaultResolver()) -} - -// Handlers returns the default request handlers. -// -// Generally you shouldn't need to use this method directly, but -// is available if you need to reset the request handlers of an -// existing service client or session. -func Handlers() request.Handlers { - var handlers request.Handlers - - handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) - handlers.Validate.AfterEachFn = request.HandlerListStopOnError - handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) - handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander) - handlers.Build.AfterEachFn = request.HandlerListStopOnError - handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) - handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler) - handlers.Send.PushBackNamed(corehandlers.SendHandler) - handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) - handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) - - return handlers -} - -// CredChain returns the default credential chain. -// -// Generally you shouldn't need to use this method directly, but -// is available if you need to reset the credentials of an -// existing service client or session's Config. -func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { - return credentials.NewCredentials(&credentials.ChainProvider{ - VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), - Providers: CredProviders(cfg, handlers), - }) -} - -// CredProviders returns the slice of providers used in -// the default credential chain. -// -// For applications that need to use some other provider (for example use -// different environment variables for legacy reasons) but still fall back -// on the default chain of providers. This allows that default chaint to be -// automatically updated -func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider { - return []credentials.Provider{ - &credentials.EnvProvider{}, - &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, - RemoteCredProvider(*cfg, handlers), - } -} - -const ( - httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" - httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" -) - -// RemoteCredProvider returns a credentials provider for the default remote -// endpoints such as EC2 or ECS Roles. -func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { - if u := os.Getenv(httpProviderEnvVar); len(u) > 0 { - return localHTTPCredProvider(cfg, handlers, u) - } - - if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 { - u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri) - return httpCredProvider(cfg, handlers, u) - } - - return ec2RoleProvider(cfg, handlers) -} - -var lookupHostFn = net.LookupHost - -func isLoopbackHost(host string) (bool, error) { - ip := net.ParseIP(host) - if ip != nil { - return ip.IsLoopback(), nil - } - - // Host is not an ip, perform lookup - addrs, err := lookupHostFn(host) - if err != nil { - return false, err - } - for _, addr := range addrs { - if !net.ParseIP(addr).IsLoopback() { - return false, nil - } - } - - return true, nil -} - -func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { - var errMsg string - - parsed, err := url.Parse(u) - if err != nil { - errMsg = fmt.Sprintf("invalid URL, %v", err) - } else { - host := aws.URLHostname(parsed) - if len(host) == 0 { - errMsg = "unable to parse host from local HTTP cred provider URL" - } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { - errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr) - } else if !isLoopback { - errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host) - } - } - - if len(errMsg) > 0 { - if cfg.Logger != nil { - cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err) - } - return credentials.ErrorProvider{ - Err: awserr.New("CredentialsEndpointError", errMsg, err), - ProviderName: endpointcreds.ProviderName, - } - } - - return httpCredProvider(cfg, handlers, u) -} - -func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { - return endpointcreds.NewProviderClient(cfg, handlers, u, - func(p *endpointcreds.Provider) { - p.ExpiryWindow = 5 * time.Minute - p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar) - }, - ) -} - -func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { - resolver := cfg.EndpointResolver - if resolver == nil { - resolver = endpoints.DefaultResolver() - } - - e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "") - return &ec2rolecreds.EC2RoleProvider{ - Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion), - ExpiryWindow: 5 * time.Minute, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go deleted file mode 100644 index ca0ee1dcc..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go +++ /dev/null @@ -1,27 +0,0 @@ -package defaults - -import ( - "github.com/aws/aws-sdk-go/internal/shareddefaults" -) - -// SharedCredentialsFilename returns the SDK's default file path -// for the shared credentials file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/credentials -// - Windows: %USERPROFILE%\.aws\credentials -func SharedCredentialsFilename() string { - return shareddefaults.SharedCredentialsFilename() -} - -// SharedConfigFilename returns the SDK's default file path for -// the shared config file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/config -// - Windows: %USERPROFILE%\.aws\config -func SharedConfigFilename() string { - return shareddefaults.SharedConfigFilename() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/doc.go deleted file mode 100644 index 4fcb61618..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/doc.go +++ /dev/null @@ -1,56 +0,0 @@ -// Package aws provides the core SDK's utilities and shared types. Use this package's -// utilities to simplify setting and reading API operations parameters. -// -// Value and Pointer Conversion Utilities -// -// This package includes a helper conversion utility for each scalar type the SDK's -// API use. These utilities make getting a pointer of the scalar, and dereferencing -// a pointer easier. -// -// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. -// The Pointer to value will safely dereference the pointer and return its value. -// If the pointer was nil, the scalar's zero value will be returned. -// -// The value to pointer functions will be named after the scalar type. So get a -// *string from a string value use the "String" function. This makes it easy to -// to get pointer of a literal string value, because getting the address of a -// literal requires assigning the value to a variable first. -// -// var strPtr *string -// -// // Without the SDK's conversion functions -// str := "my string" -// strPtr = &str -// -// // With the SDK's conversion functions -// strPtr = aws.String("my string") -// -// // Convert *string to string value -// str = aws.StringValue(strPtr) -// -// In addition to scalars the aws package also includes conversion utilities for -// map and slice for commonly types used in API parameters. The map and slice -// conversion functions use similar naming pattern as the scalar conversion -// functions. -// -// var strPtrs []*string -// var strs []string = []string{"Go", "Gophers", "Go"} -// -// // Convert []string to []*string -// strPtrs = aws.StringSlice(strs) -// -// // Convert []*string to []string -// strs = aws.StringValueSlice(strPtrs) -// -// SDK Default HTTP Client -// -// The SDK will use the http.DefaultClient if a HTTP client is not provided to -// the SDK's Session, or service client constructor. This means that if the -// http.DefaultClient is modified by other components of your application the -// modifications will be picked up by the SDK as well. -// -// In some cases this might be intended, but it is a better practice to create -// a custom HTTP Client to share explicitly through your application. You can -// configure the SDK to use the custom HTTP Client by setting the HTTPClient -// value of the SDK's Config type when creating a Session or service client. -package aws diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go deleted file mode 100644 index 69fa63dc0..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go +++ /dev/null @@ -1,250 +0,0 @@ -package ec2metadata - -import ( - "encoding/json" - "fmt" - "net/http" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkuri" -) - -// getToken uses the duration to return a token for EC2 metadata service, -// or an error if the request failed. -func (c *EC2Metadata) getToken(ctx aws.Context, duration time.Duration) (tokenOutput, error) { - op := &request.Operation{ - Name: "GetToken", - HTTPMethod: "PUT", - HTTPPath: "/latest/api/token", - } - - var output tokenOutput - req := c.NewRequest(op, nil, &output) - req.SetContext(ctx) - - // remove the fetch token handler from the request handlers to avoid infinite recursion - req.Handlers.Sign.RemoveByName(fetchTokenHandlerName) - - // Swap the unmarshalMetadataHandler with unmarshalTokenHandler on this request. - req.Handlers.Unmarshal.Swap(unmarshalMetadataHandlerName, unmarshalTokenHandler) - - ttl := strconv.FormatInt(int64(duration/time.Second), 10) - req.HTTPRequest.Header.Set(ttlHeader, ttl) - - err := req.Send() - - // Errors with bad request status should be returned. - if err != nil { - err = awserr.NewRequestFailure( - awserr.New(req.HTTPResponse.Status, http.StatusText(req.HTTPResponse.StatusCode), err), - req.HTTPResponse.StatusCode, req.RequestID) - } - - return output, err -} - -// GetMetadata uses the path provided to request information from the EC2 -// instance metadata service. The content will be returned as a string, or -// error if the request failed. -func (c *EC2Metadata) GetMetadata(p string) (string, error) { - return c.GetMetadataWithContext(aws.BackgroundContext(), p) -} - -// GetMetadataWithContext uses the path provided to request information from the EC2 -// instance metadata service. The content will be returned as a string, or -// error if the request failed. -func (c *EC2Metadata) GetMetadataWithContext(ctx aws.Context, p string) (string, error) { - op := &request.Operation{ - Name: "GetMetadata", - HTTPMethod: "GET", - HTTPPath: sdkuri.PathJoin("/latest/meta-data", p), - } - output := &metadataOutput{} - - req := c.NewRequest(op, nil, output) - - req.SetContext(ctx) - - err := req.Send() - return output.Content, err -} - -// GetUserData returns the userdata that was configured for the service. If -// there is no user-data setup for the EC2 instance a "NotFoundError" error -// code will be returned. -func (c *EC2Metadata) GetUserData() (string, error) { - return c.GetUserDataWithContext(aws.BackgroundContext()) -} - -// GetUserDataWithContext returns the userdata that was configured for the service. If -// there is no user-data setup for the EC2 instance a "NotFoundError" error -// code will be returned. -func (c *EC2Metadata) GetUserDataWithContext(ctx aws.Context) (string, error) { - op := &request.Operation{ - Name: "GetUserData", - HTTPMethod: "GET", - HTTPPath: "/latest/user-data", - } - - output := &metadataOutput{} - req := c.NewRequest(op, nil, output) - req.SetContext(ctx) - - err := req.Send() - return output.Content, err -} - -// GetDynamicData uses the path provided to request information from the EC2 -// instance metadata service for dynamic data. The content will be returned -// as a string, or error if the request failed. -func (c *EC2Metadata) GetDynamicData(p string) (string, error) { - return c.GetDynamicDataWithContext(aws.BackgroundContext(), p) -} - -// GetDynamicDataWithContext uses the path provided to request information from the EC2 -// instance metadata service for dynamic data. The content will be returned -// as a string, or error if the request failed. -func (c *EC2Metadata) GetDynamicDataWithContext(ctx aws.Context, p string) (string, error) { - op := &request.Operation{ - Name: "GetDynamicData", - HTTPMethod: "GET", - HTTPPath: sdkuri.PathJoin("/latest/dynamic", p), - } - - output := &metadataOutput{} - req := c.NewRequest(op, nil, output) - req.SetContext(ctx) - - err := req.Send() - return output.Content, err -} - -// GetInstanceIdentityDocument retrieves an identity document describing an -// instance. Error is returned if the request fails or is unable to parse -// the response. -func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { - return c.GetInstanceIdentityDocumentWithContext(aws.BackgroundContext()) -} - -// GetInstanceIdentityDocumentWithContext retrieves an identity document describing an -// instance. Error is returned if the request fails or is unable to parse -// the response. -func (c *EC2Metadata) GetInstanceIdentityDocumentWithContext(ctx aws.Context) (EC2InstanceIdentityDocument, error) { - resp, err := c.GetDynamicDataWithContext(ctx, "instance-identity/document") - if err != nil { - return EC2InstanceIdentityDocument{}, - awserr.New("EC2MetadataRequestError", - "failed to get EC2 instance identity document", err) - } - - doc := EC2InstanceIdentityDocument{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { - return EC2InstanceIdentityDocument{}, - awserr.New(request.ErrCodeSerialization, - "failed to decode EC2 instance identity document", err) - } - - return doc, nil -} - -// IAMInfo retrieves IAM info from the metadata API -func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { - return c.IAMInfoWithContext(aws.BackgroundContext()) -} - -// IAMInfoWithContext retrieves IAM info from the metadata API -func (c *EC2Metadata) IAMInfoWithContext(ctx aws.Context) (EC2IAMInfo, error) { - resp, err := c.GetMetadataWithContext(ctx, "iam/info") - if err != nil { - return EC2IAMInfo{}, - awserr.New("EC2MetadataRequestError", - "failed to get EC2 IAM info", err) - } - - info := EC2IAMInfo{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { - return EC2IAMInfo{}, - awserr.New(request.ErrCodeSerialization, - "failed to decode EC2 IAM info", err) - } - - if info.Code != "Success" { - errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) - return EC2IAMInfo{}, - awserr.New("EC2MetadataError", errMsg, nil) - } - - return info, nil -} - -// Region returns the region the instance is running in. -func (c *EC2Metadata) Region() (string, error) { - return c.RegionWithContext(aws.BackgroundContext()) -} - -// RegionWithContext returns the region the instance is running in. -func (c *EC2Metadata) RegionWithContext(ctx aws.Context) (string, error) { - ec2InstanceIdentityDocument, err := c.GetInstanceIdentityDocumentWithContext(ctx) - if err != nil { - return "", err - } - // extract region from the ec2InstanceIdentityDocument - region := ec2InstanceIdentityDocument.Region - if len(region) == 0 { - return "", awserr.New("EC2MetadataError", "invalid region received for ec2metadata instance", nil) - } - // returns region - return region, nil -} - -// Available returns if the application has access to the EC2 Metadata service. -// Can be used to determine if application is running within an EC2 Instance and -// the metadata service is available. -func (c *EC2Metadata) Available() bool { - return c.AvailableWithContext(aws.BackgroundContext()) -} - -// AvailableWithContext returns if the application has access to the EC2 Metadata service. -// Can be used to determine if application is running within an EC2 Instance and -// the metadata service is available. -func (c *EC2Metadata) AvailableWithContext(ctx aws.Context) bool { - if _, err := c.GetMetadataWithContext(ctx, "instance-id"); err != nil { - return false - } - - return true -} - -// An EC2IAMInfo provides the shape for unmarshaling -// an IAM info from the metadata API -type EC2IAMInfo struct { - Code string - LastUpdated time.Time - InstanceProfileArn string - InstanceProfileID string -} - -// An EC2InstanceIdentityDocument provides the shape for unmarshaling -// an instance identity document -type EC2InstanceIdentityDocument struct { - DevpayProductCodes []string `json:"devpayProductCodes"` - MarketplaceProductCodes []string `json:"marketplaceProductCodes"` - AvailabilityZone string `json:"availabilityZone"` - PrivateIP string `json:"privateIp"` - Version string `json:"version"` - Region string `json:"region"` - InstanceID string `json:"instanceId"` - BillingProducts []string `json:"billingProducts"` - InstanceType string `json:"instanceType"` - AccountID string `json:"accountId"` - PendingTime time.Time `json:"pendingTime"` - ImageID string `json:"imageId"` - KernelID string `json:"kernelId"` - RamdiskID string `json:"ramdiskId"` - Architecture string `json:"architecture"` -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go deleted file mode 100644 index df63bade1..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ /dev/null @@ -1,245 +0,0 @@ -// Package ec2metadata provides the client for making API calls to the -// EC2 Metadata service. -// -// This package's client can be disabled completely by setting the environment -// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to -// true instructs the SDK to disable the EC2 Metadata client. The client cannot -// be used while the environment variable is set to true, (case insensitive). -// -// The endpoint of the EC2 IMDS client can be configured via the environment -// variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a -// Session. See aws/session#Options.EC2IMDSEndpoint for more details. -package ec2metadata - -import ( - "bytes" - "io" - "net/http" - "net/url" - "os" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/corehandlers" - "github.com/aws/aws-sdk-go/aws/request" -) - -const ( - // ServiceName is the name of the service. - ServiceName = "ec2metadata" - disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" - - // Headers for Token and TTL - ttlHeader = "x-aws-ec2-metadata-token-ttl-seconds" - tokenHeader = "x-aws-ec2-metadata-token" - - // Named Handler constants - fetchTokenHandlerName = "FetchTokenHandler" - unmarshalMetadataHandlerName = "unmarshalMetadataHandler" - unmarshalTokenHandlerName = "unmarshalTokenHandler" - enableTokenProviderHandlerName = "enableTokenProviderHandler" - - // TTL constants - defaultTTL = 21600 * time.Second - ttlExpirationWindow = 30 * time.Second -) - -// A EC2Metadata is an EC2 Metadata service Client. -type EC2Metadata struct { - *client.Client -} - -// New creates a new instance of the EC2Metadata client with a session. -// This client is safe to use across multiple goroutines. -// -// -// Example: -// // Create a EC2Metadata client from just a session. -// svc := ec2metadata.New(mySession) -// -// // Create a EC2Metadata client with additional configuration -// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { - c := p.ClientConfig(ServiceName, cfgs...) - return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) -} - -// NewClient returns a new EC2Metadata client. Should be used to create -// a client when not using a session. Generally using just New with a session -// is preferred. -// -// Will remove the URL path from the endpoint provided to ensure the EC2 IMDS -// client is able to communicate with the EC2 IMDS API. -// -// If an unmodified HTTP client is provided from the stdlib default, or no client -// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. -// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. -func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { - if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { - // If the http client is unmodified and this feature is not disabled - // set custom timeouts for EC2Metadata requests. - cfg.HTTPClient = &http.Client{ - // use a shorter timeout than default because the metadata - // service is local if it is running, and to fail faster - // if not running on an ec2 instance. - Timeout: 1 * time.Second, - } - // max number of retries on the client operation - cfg.MaxRetries = aws.Int(2) - } - - if u, err := url.Parse(endpoint); err == nil { - // Remove path from the endpoint since it will be added by requests. - // This is an artifact of the SDK adding `/latest` to the endpoint for - // EC2 IMDS, but this is now moved to the operation definition. - u.Path = "" - u.RawPath = "" - endpoint = u.String() - } - - svc := &EC2Metadata{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - ServiceID: ServiceName, - Endpoint: endpoint, - APIVersion: "latest", - }, - handlers, - ), - } - - // token provider instance - tp := newTokenProvider(svc, defaultTTL) - - // NamedHandler for fetching token - svc.Handlers.Sign.PushBackNamed(request.NamedHandler{ - Name: fetchTokenHandlerName, - Fn: tp.fetchTokenHandler, - }) - // NamedHandler for enabling token provider - svc.Handlers.Complete.PushBackNamed(request.NamedHandler{ - Name: enableTokenProviderHandlerName, - Fn: tp.enableTokenProviderHandler, - }) - - svc.Handlers.Unmarshal.PushBackNamed(unmarshalHandler) - svc.Handlers.UnmarshalError.PushBack(unmarshalError) - svc.Handlers.Validate.Clear() - svc.Handlers.Validate.PushBack(validateEndpointHandler) - - // Disable the EC2 Metadata service if the environment variable is set. - // This short-circuits the service's functionality to always fail to send - // requests. - if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" { - svc.Handlers.Send.SwapNamed(request.NamedHandler{ - Name: corehandlers.SendHandler.Name, - Fn: func(r *request.Request) { - r.HTTPResponse = &http.Response{ - Header: http.Header{}, - } - r.Error = awserr.New( - request.CanceledErrorCode, - "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var", - nil) - }, - }) - } - - // Add additional options to the service config - for _, option := range opts { - option(svc.Client) - } - return svc -} - -func httpClientZero(c *http.Client) bool { - return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) -} - -type metadataOutput struct { - Content string -} - -type tokenOutput struct { - Token string - TTL time.Duration -} - -// unmarshal token handler is used to parse the response of a getToken operation -var unmarshalTokenHandler = request.NamedHandler{ - Name: unmarshalTokenHandlerName, - Fn: func(r *request.Request) { - defer r.HTTPResponse.Body.Close() - var b bytes.Buffer - if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, - "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) - return - } - - v := r.HTTPResponse.Header.Get(ttlHeader) - data, ok := r.Data.(*tokenOutput) - if !ok { - return - } - - data.Token = b.String() - // TTL is in seconds - i, err := strconv.ParseInt(v, 10, 64) - if err != nil { - r.Error = awserr.NewRequestFailure(awserr.New(request.ParamFormatErrCode, - "unable to parse EC2 token TTL response", err), r.HTTPResponse.StatusCode, r.RequestID) - return - } - t := time.Duration(i) * time.Second - data.TTL = t - }, -} - -var unmarshalHandler = request.NamedHandler{ - Name: unmarshalMetadataHandlerName, - Fn: func(r *request.Request) { - defer r.HTTPResponse.Body.Close() - var b bytes.Buffer - if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, - "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) - return - } - - if data, ok := r.Data.(*metadataOutput); ok { - data.Content = b.String() - } - }, -} - -func unmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - var b bytes.Buffer - - if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err), - r.HTTPResponse.StatusCode, r.RequestID) - return - } - - // Response body format is not consistent between metadata endpoints. - // Grab the error message as a string and include that as the source error - r.Error = awserr.NewRequestFailure( - awserr.New("EC2MetadataError", "failed to make EC2Metadata request\n"+b.String(), nil), - r.HTTPResponse.StatusCode, r.RequestID) -} - -func validateEndpointHandler(r *request.Request) { - if r.ClientInfo.Endpoint == "" { - r.Error = aws.ErrMissingEndpoint - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go deleted file mode 100644 index 4b29f190b..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go +++ /dev/null @@ -1,93 +0,0 @@ -package ec2metadata - -import ( - "net/http" - "sync/atomic" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" -) - -// A tokenProvider struct provides access to EC2Metadata client -// and atomic instance of a token, along with configuredTTL for it. -// tokenProvider also provides an atomic flag to disable the -// fetch token operation. -// The disabled member will use 0 as false, and 1 as true. -type tokenProvider struct { - client *EC2Metadata - token atomic.Value - configuredTTL time.Duration - disabled uint32 -} - -// A ec2Token struct helps use of token in EC2 Metadata service ops -type ec2Token struct { - token string - credentials.Expiry -} - -// newTokenProvider provides a pointer to a tokenProvider instance -func newTokenProvider(c *EC2Metadata, duration time.Duration) *tokenProvider { - return &tokenProvider{client: c, configuredTTL: duration} -} - -// fetchTokenHandler fetches token for EC2Metadata service client by default. -func (t *tokenProvider) fetchTokenHandler(r *request.Request) { - - // short-circuits to insecure data flow if tokenProvider is disabled. - if v := atomic.LoadUint32(&t.disabled); v == 1 { - return - } - - if ec2Token, ok := t.token.Load().(ec2Token); ok && !ec2Token.IsExpired() { - r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) - return - } - - output, err := t.client.getToken(r.Context(), t.configuredTTL) - - if err != nil { - - // change the disabled flag on token provider to true, - // when error is request timeout error. - if requestFailureError, ok := err.(awserr.RequestFailure); ok { - switch requestFailureError.StatusCode() { - case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed: - atomic.StoreUint32(&t.disabled, 1) - case http.StatusBadRequest: - r.Error = requestFailureError - } - - // Check if request timed out while waiting for response - if e, ok := requestFailureError.OrigErr().(awserr.Error); ok { - if e.Code() == request.ErrCodeRequestError { - atomic.StoreUint32(&t.disabled, 1) - } - } - } - return - } - - newToken := ec2Token{ - token: output.Token, - } - newToken.SetExpiration(time.Now().Add(output.TTL), ttlExpirationWindow) - t.token.Store(newToken) - - // Inject token header to the request. - if ec2Token, ok := t.token.Load().(ec2Token); ok { - r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) - } -} - -// enableTokenProviderHandler enables the token provider -func (t *tokenProvider) enableTokenProviderHandler(r *request.Request) { - // If the error code status is 401, we enable the token provider - if e, ok := r.Error.(awserr.RequestFailure); ok && e != nil && - e.StatusCode() == http.StatusUnauthorized { - t.token.Store(ec2Token{}) - atomic.StoreUint32(&t.disabled, 0) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go deleted file mode 100644 index 8d65ca1d6..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go +++ /dev/null @@ -1,193 +0,0 @@ -package endpoints - -import ( - "encoding/json" - "fmt" - "io" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -type modelDefinition map[string]json.RawMessage - -// A DecodeModelOptions are the options for how the endpoints model definition -// are decoded. -type DecodeModelOptions struct { - SkipCustomizations bool -} - -// Set combines all of the option functions together. -func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) { - for _, fn := range optFns { - fn(d) - } -} - -// DecodeModel unmarshals a Regions and Endpoint model definition file into -// a endpoint Resolver. If the file format is not supported, or an error occurs -// when unmarshaling the model an error will be returned. -// -// Casting the return value of this func to a EnumPartitions will -// allow you to get a list of the partitions in the order the endpoints -// will be resolved in. -// -// resolver, err := endpoints.DecodeModel(reader) -// -// partitions := resolver.(endpoints.EnumPartitions).Partitions() -// for _, p := range partitions { -// // ... inspect partitions -// } -func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) { - var opts DecodeModelOptions - opts.Set(optFns...) - - // Get the version of the partition file to determine what - // unmarshaling model to use. - modelDef := modelDefinition{} - if err := json.NewDecoder(r).Decode(&modelDef); err != nil { - return nil, newDecodeModelError("failed to decode endpoints model", err) - } - - var version string - if b, ok := modelDef["version"]; ok { - version = string(b) - } else { - return nil, newDecodeModelError("endpoints version not found in model", nil) - } - - if version == "3" { - return decodeV3Endpoints(modelDef, opts) - } - - return nil, newDecodeModelError( - fmt.Sprintf("endpoints version %s, not supported", version), nil) -} - -func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) { - b, ok := modelDef["partitions"] - if !ok { - return nil, newDecodeModelError("endpoints model missing partitions", nil) - } - - ps := partitions{} - if err := json.Unmarshal(b, &ps); err != nil { - return nil, newDecodeModelError("failed to decode endpoints model", err) - } - - if opts.SkipCustomizations { - return ps, nil - } - - // Customization - for i := 0; i < len(ps); i++ { - p := &ps[i] - custRegionalS3(p) - custRmIotDataService(p) - custFixAppAutoscalingChina(p) - custFixAppAutoscalingUsGov(p) - } - - return ps, nil -} - -func custRegionalS3(p *partition) { - if p.ID != "aws" { - return - } - - service, ok := p.Services["s3"] - if !ok { - return - } - - const awsGlobal = "aws-global" - const usEast1 = "us-east-1" - - // If global endpoint already exists no customization needed. - if _, ok := service.Endpoints[endpointKey{Region: awsGlobal}]; ok { - return - } - - service.PartitionEndpoint = awsGlobal - if _, ok := service.Endpoints[endpointKey{Region: usEast1}]; !ok { - service.Endpoints[endpointKey{Region: usEast1}] = endpoint{} - } - service.Endpoints[endpointKey{Region: awsGlobal}] = endpoint{ - Hostname: "s3.amazonaws.com", - CredentialScope: credentialScope{ - Region: usEast1, - }, - } - - p.Services["s3"] = service -} - -func custRmIotDataService(p *partition) { - delete(p.Services, "data.iot") -} - -func custFixAppAutoscalingChina(p *partition) { - if p.ID != "aws-cn" { - return - } - - const serviceName = "application-autoscaling" - s, ok := p.Services[serviceName] - if !ok { - return - } - - const expectHostname = `autoscaling.{region}.amazonaws.com` - serviceDefault := s.Defaults[defaultKey{}] - if e, a := expectHostname, serviceDefault.Hostname; e != a { - fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a) - return - } - serviceDefault.Hostname = expectHostname + ".cn" - s.Defaults[defaultKey{}] = serviceDefault - p.Services[serviceName] = s -} - -func custFixAppAutoscalingUsGov(p *partition) { - if p.ID != "aws-us-gov" { - return - } - - const serviceName = "application-autoscaling" - s, ok := p.Services[serviceName] - if !ok { - return - } - - serviceDefault := s.Defaults[defaultKey{}] - if a := serviceDefault.CredentialScope.Service; a != "" { - fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a) - return - } - - if a := serviceDefault.Hostname; a != "" { - fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a) - return - } - - serviceDefault.CredentialScope.Service = "application-autoscaling" - serviceDefault.Hostname = "autoscaling.{region}.amazonaws.com" - - if s.Defaults == nil { - s.Defaults = make(endpointDefaults) - } - - s.Defaults[defaultKey{}] = serviceDefault - - p.Services[serviceName] = s -} - -type decodeModelError struct { - awsError -} - -func newDecodeModelError(msg string, err error) decodeModelError { - return decodeModelError{ - awsError: awserr.New("DecodeEndpointsModelError", msg, err), - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go deleted file mode 100644 index b9f4a26de..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ /dev/null @@ -1,28166 +0,0 @@ -// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. - -package endpoints - -import ( - "regexp" -) - -// Partition identifiers -const ( - AwsPartitionID = "aws" // AWS Standard partition. - AwsCnPartitionID = "aws-cn" // AWS China partition. - AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. - AwsIsoPartitionID = "aws-iso" // AWS ISO (US) partition. - AwsIsoBPartitionID = "aws-iso-b" // AWS ISOB (US) partition. -) - -// AWS Standard partition's regions. -const ( - AfSouth1RegionID = "af-south-1" // Africa (Cape Town). - ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong). - ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). - ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). - ApNortheast3RegionID = "ap-northeast-3" // Asia Pacific (Osaka). - ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). - ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). - ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). - CaCentral1RegionID = "ca-central-1" // Canada (Central). - EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt). - EuNorth1RegionID = "eu-north-1" // Europe (Stockholm). - EuSouth1RegionID = "eu-south-1" // Europe (Milan). - EuWest1RegionID = "eu-west-1" // Europe (Ireland). - EuWest2RegionID = "eu-west-2" // Europe (London). - EuWest3RegionID = "eu-west-3" // Europe (Paris). - MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). - SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). - UsEast1RegionID = "us-east-1" // US East (N. Virginia). - UsEast2RegionID = "us-east-2" // US East (Ohio). - UsWest1RegionID = "us-west-1" // US West (N. California). - UsWest2RegionID = "us-west-2" // US West (Oregon). -) - -// AWS China partition's regions. -const ( - CnNorth1RegionID = "cn-north-1" // China (Beijing). - CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia). -) - -// AWS GovCloud (US) partition's regions. -const ( - UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East). - UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US-West). -) - -// AWS ISO (US) partition's regions. -const ( - UsIsoEast1RegionID = "us-iso-east-1" // US ISO East. - UsIsoWest1RegionID = "us-iso-west-1" // US ISO WEST. -) - -// AWS ISOB (US) partition's regions. -const ( - UsIsobEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio). -) - -// DefaultResolver returns an Endpoint resolver that will be able -// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). -// -// Use DefaultPartitions() to get the list of the default partitions. -func DefaultResolver() Resolver { - return defaultPartitions -} - -// DefaultPartitions returns a list of the partitions the SDK is bundled -// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). -// -// partitions := endpoints.DefaultPartitions -// for _, p := range partitions { -// // ... inspect partitions -// } -func DefaultPartitions() []Partition { - return defaultPartitions.Partitions() -} - -var defaultPartitions = partitions{ - awsPartition, - awscnPartition, - awsusgovPartition, - awsisoPartition, - awsisobPartition, -} - -// AwsPartition returns the Resolver for AWS Standard. -func AwsPartition() Partition { - return awsPartition.Partition() -} - -var awsPartition = partition{ - ID: "aws", - Name: "AWS Standard", - DNSSuffix: "amazonaws.com", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: dualStackVariant, - }: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - DNSSuffix: "api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - Regions: regions{ - "af-south-1": region{ - Description: "Africa (Cape Town)", - }, - "ap-east-1": region{ - Description: "Asia Pacific (Hong Kong)", - }, - "ap-northeast-1": region{ - Description: "Asia Pacific (Tokyo)", - }, - "ap-northeast-2": region{ - Description: "Asia Pacific (Seoul)", - }, - "ap-northeast-3": region{ - Description: "Asia Pacific (Osaka)", - }, - "ap-south-1": region{ - Description: "Asia Pacific (Mumbai)", - }, - "ap-southeast-1": region{ - Description: "Asia Pacific (Singapore)", - }, - "ap-southeast-2": region{ - Description: "Asia Pacific (Sydney)", - }, - "ca-central-1": region{ - Description: "Canada (Central)", - }, - "eu-central-1": region{ - Description: "Europe (Frankfurt)", - }, - "eu-north-1": region{ - Description: "Europe (Stockholm)", - }, - "eu-south-1": region{ - Description: "Europe (Milan)", - }, - "eu-west-1": region{ - Description: "Europe (Ireland)", - }, - "eu-west-2": region{ - Description: "Europe (London)", - }, - "eu-west-3": region{ - Description: "Europe (Paris)", - }, - "me-south-1": region{ - Description: "Middle East (Bahrain)", - }, - "sa-east-1": region{ - Description: "South America (Sao Paulo)", - }, - "us-east-1": region{ - Description: "US East (N. Virginia)", - }, - "us-east-2": region{ - Description: "US East (Ohio)", - }, - "us-west-1": region{ - Description: "US West (N. California)", - }, - "us-west-2": region{ - Description: "US West (Oregon)", - }, - }, - Services: services{ - "a4b": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "access-analyzer": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "access-analyzer-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "access-analyzer-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "access-analyzer-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "access-analyzer-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "access-analyzer-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "access-analyzer-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "access-analyzer-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "access-analyzer-fips.us-west-2.amazonaws.com", - }, - }, - }, - "account": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "account.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "acm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "acm-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "acm-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "acm-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "acm-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "acm-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "acm-pca": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "acm-pca-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "acm-pca-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "acm-pca-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "acm-pca-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-pca-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-pca-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-pca-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-pca-fips.us-west-2.amazonaws.com", - }, - }, - }, - "airflow": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "amplify": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "amplifybackend": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "api.detective": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.detective-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "api.detective-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.detective-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "api.detective-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.detective-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "api.detective-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.detective-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "api.detective-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "api.ecr": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{ - Hostname: "api.ecr.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{ - Hostname: "api.ecr.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "api.ecr.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "api.ecr.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{ - Hostname: "api.ecr.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "api.ecr.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "api.ecr.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "api.ecr.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "api.ecr.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "dkr-us-east-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dkr-us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dkr-us-east-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dkr-us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dkr-us-west-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dkr-us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dkr-us-west-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dkr-us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "api.ecr.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Hostname: "api.ecr.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{ - Hostname: "api.ecr.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "api.ecr.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "api.ecr.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Hostname: "api.ecr.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "fips-dkr-us-east-1", - }: endpoint{ - Hostname: "ecr-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-dkr-us-east-2", - }: endpoint{ - Hostname: "ecr-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-dkr-us-west-1", - }: endpoint{ - Hostname: "ecr-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-dkr-us-west-2", - }: endpoint{ - Hostname: "ecr-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "ecr-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "ecr-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "ecr-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "ecr-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{ - Hostname: "api.ecr.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "api.ecr.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "api.ecr.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "api.ecr.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{ - Hostname: "api.ecr.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "api.ecr.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "api.elastic-inference": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "api.elastic-inference.ap-northeast-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "api.elastic-inference.ap-northeast-2.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "api.elastic-inference.eu-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "api.elastic-inference.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "api.elastic-inference.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "api.elastic-inference.us-west-2.amazonaws.com", - }, - }, - }, - "api.fleethub.iot": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.fleethub.iot-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "api.fleethub.iot-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "api.fleethub.iot-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "api.fleethub.iot-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "api.fleethub.iot-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.fleethub.iot-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.fleethub.iot-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.fleethub.iot-fips.us-west-2.amazonaws.com", - }, - }, - }, - "api.iotwireless": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "api.iotwireless.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "api.iotwireless.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "api.iotwireless.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "api.iotwireless.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "api.iotwireless.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "api.mediatailor": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "api.pricing": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "pricing", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "api.sagemaker": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "api-fips.sagemaker.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "apigateway": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "app-integrations": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "appconfigdata": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "appflow": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "applicationinsights": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "appmesh": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "apprunner": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "appstream2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Service: "appstream", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "appstream2-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "appstream2-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "appstream2-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "appstream2-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "appstream2-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "appsync": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "aps": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "athena": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "athena-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "athena-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "athena-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "athena-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "athena-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "athena-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "athena-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "athena-fips.us-west-2.amazonaws.com", - }, - }, - }, - "auditmanager": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "autoscaling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "autoscaling-plans": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "backup": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "batch": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.batch.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "fips.batch.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "fips.batch.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "fips.batch.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "fips.batch.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.batch.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.batch.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.batch.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.batch.us-west-2.amazonaws.com", - }, - }, - }, - "braket": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "budgets": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "budgets.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "ce": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "ce.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "chime": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "chime.us-east-1.amazonaws.com", - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "cloud9": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "cloudcontrolapi": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-west-2.amazonaws.com", - }, - }, - }, - "clouddirectory": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "cloudformation": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudformation-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "cloudformation-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudformation-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "cloudformation-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudformation-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "cloudformation-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudformation-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "cloudformation-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "cloudfront": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "cloudfront.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "cloudhsm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "cloudhsmv2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "cloudhsm", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "cloudsearch": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "cloudtrail": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "cloudtrail-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "cloudtrail-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "cloudtrail-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "cloudtrail-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudtrail-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudtrail-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudtrail-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudtrail-fips.us-west-2.amazonaws.com", - }, - }, - }, - "codeartifact": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "codebuild": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codebuild-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "codebuild-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codebuild-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "codebuild-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codebuild-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "codebuild-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codebuild-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "codebuild-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "codecommit": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codecommit-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "codecommit-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "codecommit-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codecommit-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "codecommit-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codecommit-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "codecommit-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codecommit-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "codecommit-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codecommit-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "codecommit-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "codedeploy": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codedeploy-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "codedeploy-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codedeploy-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "codedeploy-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codedeploy-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "codedeploy-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codedeploy-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "codedeploy-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "codeguru-reviewer": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "codepipeline": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codepipeline-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "codepipeline-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "codepipeline-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "codepipeline-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "codepipeline-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "codepipeline-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codepipeline-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codepipeline-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codepipeline-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codepipeline-fips.us-west-2.amazonaws.com", - }, - }, - }, - "codestar": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "codestar-connections": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "cognito-identity": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", - }, - }, - }, - "cognito-idp": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "cognito-idp-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "cognito-idp-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "cognito-idp-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "cognito-idp-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-idp-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-idp-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-idp-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-idp-fips.us-west-2.amazonaws.com", - }, - }, - }, - "cognito-sync": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "comprehend": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "comprehend-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "comprehend-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "comprehend-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "comprehend-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "comprehend-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "comprehend-fips.us-west-2.amazonaws.com", - }, - }, - }, - "comprehendmedical": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com", - }, - }, - }, - "compute-optimizer": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "compute-optimizer.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "compute-optimizer.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "compute-optimizer.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "compute-optimizer.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "compute-optimizer.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "compute-optimizer.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "compute-optimizer.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Hostname: "compute-optimizer.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "compute-optimizer.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "compute-optimizer.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Hostname: "compute-optimizer.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "compute-optimizer.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "compute-optimizer.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "compute-optimizer.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{ - Hostname: "compute-optimizer.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "compute-optimizer.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "config": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "config-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "config-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "config-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "config-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "config-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "config-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "config-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "config-fips.us-west-2.amazonaws.com", - }, - }, - }, - "connect": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "contact-lens": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "cur": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "data.jobs.iot": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.jobs.iot-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "data.jobs.iot-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-west-2.amazonaws.com", - }, - }, - }, - "data.mediastore": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "databrew": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "dataexchange": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "datapipeline": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "datasync": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "datasync-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "datasync-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "datasync-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "datasync-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "datasync-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "datasync-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "datasync-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "datasync-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "datasync-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "datasync-fips.us-west-2.amazonaws.com", - }, - }, - }, - "dax": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "devicefarm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "directconnect": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "directconnect-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "directconnect-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "directconnect-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "directconnect-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "directconnect-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "directconnect-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "directconnect-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "directconnect-fips.us-west-2.amazonaws.com", - }, - }, - }, - "discovery": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "dms": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "dms", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dms", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dms-fips", - }: endpoint{ - Hostname: "dms-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "dms-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "dms-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "dms-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "dms-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "docdb": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "rds.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "rds.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "rds.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "rds.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "rds.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "rds.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "rds.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "rds.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "rds.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Hostname: "rds.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "rds.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "rds.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "rds.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "rds.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "drs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "ds": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ds-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "ds-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "ds-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "ds-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "ds-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "ds-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ds-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ds-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ds-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ds-fips.us-west-2.amazonaws.com", - }, - }, - }, - "dynamodb": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "local", - }: endpoint{ - Hostname: "localhost:8000", - Protocols: []string{"http"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dynamodb-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "dynamodb-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dynamodb-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "dynamodb-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dynamodb-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "dynamodb-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dynamodb-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "dynamodb-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "ebs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ebs-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "ebs-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "ebs-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "ebs-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "ebs-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "ebs-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ebs-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ebs-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ebs-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ebs-fips.us-west-2.amazonaws.com", - }, - }, - }, - "ec2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "api.ec2.ap-south-1.aws", - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ec2-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "api.ec2.eu-west-1.aws", - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "ec2-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "ec2-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "ec2-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "ec2-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "ec2-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "api.ec2.sa-east-1.aws", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "api.ec2.us-east-1.aws", - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ec2-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "api.ec2.us-east-2.aws", - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ec2-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ec2-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "api.ec2.us-west-2.aws", - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ec2-fips.us-west-2.amazonaws.com", - }, - }, - }, - "ecs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "ecs-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "ecs-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "ecs-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "ecs-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecs-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecs-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecs-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecs-fips.us-west-2.amazonaws.com", - }, - }, - }, - "eks": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.eks.{region}.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "fips.eks.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "fips.eks.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "fips.eks.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "fips.eks.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.eks.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.eks.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.eks.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.eks.us-west-2.amazonaws.com", - }, - }, - }, - "elasticache": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "elasticache-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticache-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "elasticache-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticache-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "elasticache-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticache-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "elasticache-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticache-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "elasticache-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "elasticbeanstalk": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com", - }, - }, - }, - "elasticfilesystem": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "af-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-northeast-3.amazonaws.com", - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com", - }, - endpointKey{ - Region: "fips-af-south-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-east-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-2", - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-3", - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-south-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-2", - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-central-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-north-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-south-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-2", - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-3", - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-me-south-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-sa-east-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com", - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com", - }, - }, - }, - "elasticloadbalancing": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com", - }, - }, - }, - "elasticmapreduce": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - SSLCommonName: "{region}.{service}.{dnsSuffix}", - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - SSLCommonName: "{service}.{region}.{dnsSuffix}", - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - SSLCommonName: "{service}.{region}.{dnsSuffix}", - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com", - SSLCommonName: "{service}.{region}.{dnsSuffix}", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com", - }, - }, - }, - "elastictranscoder": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "email": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "emr-containers": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "emr-containers-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "emr-containers-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "emr-containers-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "emr-containers-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "emr-containers-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "emr-containers-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "emr-containers-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "emr-containers-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "emr-containers-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "emr-containers-fips.us-west-2.amazonaws.com", - }, - }, - }, - "entitlement.marketplace": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "es": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "es-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "es-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "es-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "es-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "es-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "es-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "es-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "es-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "es-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "events": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "events-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "events-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "events-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "events-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "events-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "events-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "events-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "events-fips.us-west-2.amazonaws.com", - }, - }, - }, - "finspace": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "finspace-api": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "firehose": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "firehose-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "firehose-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "firehose-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "firehose-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "firehose-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "firehose-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "firehose-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "firehose-fips.us-west-2.amazonaws.com", - }, - }, - }, - "fms": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "af-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.af-south-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.ap-east-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.ap-northeast-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.ap-northeast-2.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.ap-south-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.ap-southeast-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.ap-southeast-2.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.eu-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.eu-south-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.eu-west-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.eu-west-2.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.eu-west-3.amazonaws.com", - }, - endpointKey{ - Region: "fips-af-south-1", - }: endpoint{ - Hostname: "fms-fips.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-east-1", - }: endpoint{ - Hostname: "fms-fips.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-1", - }: endpoint{ - Hostname: "fms-fips.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-2", - }: endpoint{ - Hostname: "fms-fips.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-south-1", - }: endpoint{ - Hostname: "fms-fips.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-1", - }: endpoint{ - Hostname: "fms-fips.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-2", - }: endpoint{ - Hostname: "fms-fips.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "fms-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-central-1", - }: endpoint{ - Hostname: "fms-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-south-1", - }: endpoint{ - Hostname: "fms-fips.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-1", - }: endpoint{ - Hostname: "fms-fips.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-2", - }: endpoint{ - Hostname: "fms-fips.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-3", - }: endpoint{ - Hostname: "fms-fips.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-me-south-1", - }: endpoint{ - Hostname: "fms-fips.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-sa-east-1", - }: endpoint{ - Hostname: "fms-fips.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "fms-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "fms-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "fms-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "fms-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.me-south-1.amazonaws.com", - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.sa-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.us-west-2.amazonaws.com", - }, - }, - }, - "forecast": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "forecast-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "forecast-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "forecast-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "forecast-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "forecast-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "forecast-fips.us-west-2.amazonaws.com", - }, - }, - }, - "forecastquery": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "forecastquery-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "forecastquery-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "forecastquery-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "forecastquery-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "forecastquery-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "forecastquery-fips.us-west-2.amazonaws.com", - }, - }, - }, - "frauddetector": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "fsx": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "fsx-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-prod-ca-central-1", - }: endpoint{ - Hostname: "fsx-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-prod-us-east-1", - }: endpoint{ - Hostname: "fsx-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-prod-us-east-2", - }: endpoint{ - Hostname: "fsx-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-prod-us-west-1", - }: endpoint{ - Hostname: "fsx-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-prod-us-west-2", - }: endpoint{ - Hostname: "fsx-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "fsx-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "fsx-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "fsx-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "fsx-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "prod-ca-central-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-east-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-east-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-west-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-west-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-west-2.amazonaws.com", - }, - }, - }, - "gamelift": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "glacier": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glacier-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "glacier-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "glacier-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "glacier-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "glacier-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "glacier-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glacier-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glacier-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glacier-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glacier-fips.us-west-2.amazonaws.com", - }, - }, - }, - "glue": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "glue-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "glue-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "glue-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "glue-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glue-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glue-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glue-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glue-fips.us-west-2.amazonaws.com", - }, - }, - }, - "grafana": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "grafana.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "grafana.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "grafana.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "grafana.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "grafana.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "grafana.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "grafana.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "grafana.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "grafana.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "grafana.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "greengrass": service{ - IsRegionalized: boxedTrue, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "groundstation": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "groundstation-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "groundstation-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "groundstation-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "groundstation-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "groundstation-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "groundstation-fips.us-west-2.amazonaws.com", - }, - }, - }, - "guardduty": service{ - IsRegionalized: boxedTrue, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "guardduty-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "guardduty-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "guardduty-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "guardduty-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "guardduty-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "guardduty-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "guardduty-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "guardduty-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "health": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "health-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "health-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "healthlake": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "honeycode": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "iam.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "aws-global", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iam-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "aws-global-fips", - }: endpoint{ - Hostname: "iam-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "iam", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "iam", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iam-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "iam-fips", - }: endpoint{ - Hostname: "iam-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "identity-chime": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "identity-chime-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "identity-chime-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "identitystore": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "importexport": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "importexport.amazonaws.com", - SignatureVersions: []string{"v2", "v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - Service: "IngestionService", - }, - }, - }, - }, - "inspector": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "inspector-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "inspector-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "inspector-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "inspector-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "inspector-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "inspector-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "inspector-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "inspector-fips.us-west-2.amazonaws.com", - }, - }, - }, - "iot": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iot-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "iot-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "iot-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "iot-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "iot-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "iot-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iot-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iot-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iot-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iot-fips.us-west-2.amazonaws.com", - }, - }, - }, - "iotanalytics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "iotevents": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "ioteventsdata": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "data.iotevents.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "data.iotevents.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "data.iotevents.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "data.iotevents.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "data.iotevents.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "data.iotevents.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "data.iotevents.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "data.iotevents.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "data.iotevents.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "data.iotevents.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "data.iotevents.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "iotsecuredtunneling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", - }, - }, - }, - "iotsitewise": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "iotthingsgraph": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "iotthingsgraph", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "iotwireless": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "api.iotwireless.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "api.iotwireless.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "api.iotwireless.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "api.iotwireless.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "api.iotwireless.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "ivs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "kafka": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "kafkaconnect": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "kendra": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "kendra-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "kendra-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "kendra-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kendra-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kendra-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kendra-fips.us-west-2.amazonaws.com", - }, - }, - }, - "kinesis": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "kinesis-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "kinesis-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "kinesis-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "kinesis-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kinesis-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kinesis-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kinesis-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kinesis-fips.us-west-2.amazonaws.com", - }, - }, - }, - "kinesisanalytics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "kinesisvideo": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "kms": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "af-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.af-south-1.amazonaws.com", - }, - endpointKey{ - Region: "af-south-1-fips", - }: endpoint{ - Hostname: "kms-fips.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.ap-east-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-east-1-fips", - }: endpoint{ - Hostname: "kms-fips.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.ap-northeast-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-1-fips", - }: endpoint{ - Hostname: "kms-fips.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.ap-northeast-2.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-2-fips", - }: endpoint{ - Hostname: "kms-fips.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.ap-northeast-3.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-3-fips", - }: endpoint{ - Hostname: "kms-fips.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.ap-south-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-south-1-fips", - }: endpoint{ - Hostname: "kms-fips.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.ap-southeast-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-1-fips", - }: endpoint{ - Hostname: "kms-fips.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.ap-southeast-2.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-2-fips", - }: endpoint{ - Hostname: "kms-fips.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ap-southeast-3-fips", - }: endpoint{ - Hostname: "kms-fips.ap-southeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "kms-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.eu-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1-fips", - }: endpoint{ - Hostname: "kms-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.eu-north-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-north-1-fips", - }: endpoint{ - Hostname: "kms-fips.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.eu-south-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-south-1-fips", - }: endpoint{ - Hostname: "kms-fips.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.eu-west-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-1-fips", - }: endpoint{ - Hostname: "kms-fips.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.eu-west-2.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-2-fips", - }: endpoint{ - Hostname: "kms-fips.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.eu-west-3.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-3-fips", - }: endpoint{ - Hostname: "kms-fips.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.me-south-1.amazonaws.com", - }, - endpointKey{ - Region: "me-south-1-fips", - }: endpoint{ - Hostname: "kms-fips.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.sa-east-1.amazonaws.com", - }, - endpointKey{ - Region: "sa-east-1-fips", - }: endpoint{ - Hostname: "kms-fips.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "kms-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "kms-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "kms-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "kms-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "lakeformation": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "lakeformation-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "lakeformation-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "lakeformation-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "lakeformation-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lakeformation-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lakeformation-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lakeformation-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lakeformation-fips.us-west-2.amazonaws.com", - }, - }, - }, - "lambda": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "lambda-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "lambda-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "lambda-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "lambda-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lambda-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lambda-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lambda-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lambda-fips.us-west-2.amazonaws.com", - }, - }, - }, - "license-manager": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "license-manager-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "license-manager-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "license-manager-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "license-manager-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-fips.us-west-2.amazonaws.com", - }, - }, - }, - "lightsail": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "logs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "logs-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "logs-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "logs-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "logs-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "logs-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "logs-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "logs-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "logs-fips.us-west-2.amazonaws.com", - }, - }, - }, - "lookoutequipment": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "lookoutvision": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "machinelearning": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "macie": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "macie-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "macie-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "macie-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "macie-fips.us-west-2.amazonaws.com", - }, - }, - }, - "macie2": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "macie2-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "macie2-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "macie2-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "macie2-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "macie2-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "macie2-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "macie2-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "macie2-fips.us-west-2.amazonaws.com", - }, - }, - }, - "managedblockchain": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "marketplacecommerceanalytics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "mediaconnect": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "mediaconvert": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "mediaconvert-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "mediaconvert-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "mediaconvert-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "mediaconvert-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mediaconvert-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mediaconvert-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mediaconvert-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mediaconvert-fips.us-west-2.amazonaws.com", - }, - }, - }, - "medialive": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "medialive-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "medialive-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "medialive-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "medialive-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "medialive-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "medialive-fips.us-west-2.amazonaws.com", - }, - }, - }, - "mediapackage": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "mediapackage-vod": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "mediastore": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "messaging-chime": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "messaging-chime-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "messaging-chime-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "metering.marketplace": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "mgh": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "mgn": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "migrationhub-strategy": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "mobileanalytics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "models-v2-lex": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "models.lex": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "models-fips.lex.{region}.{dnsSuffix}", - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "models-fips.lex.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "models-fips.lex.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "models-fips.lex.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "models-fips.lex.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "monitoring": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "monitoring-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "monitoring-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "monitoring-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "monitoring-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "monitoring-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "monitoring-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "monitoring-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "monitoring-fips.us-west-2.amazonaws.com", - }, - }, - }, - "mq": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "mq-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "mq-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "mq-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "mq-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mq-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mq-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mq-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mq-fips.us-west-2.amazonaws.com", - }, - }, - }, - "mturk-requester": service{ - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "sandbox", - }: endpoint{ - Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "neptune": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{ - Hostname: "rds.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "rds.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "rds.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "rds.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "rds.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "rds.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "rds.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "rds.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Hostname: "rds.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "rds.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "rds.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Hostname: "rds.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{ - Hostname: "rds.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "rds.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "rds.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "rds.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{ - Hostname: "rds.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "rds.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "network-firewall": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "network-firewall-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "network-firewall-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "network-firewall-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "network-firewall-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "network-firewall-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "network-firewall-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "network-firewall-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "network-firewall-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "network-firewall-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "network-firewall-fips.us-west-2.amazonaws.com", - }, - }, - }, - "networkmanager": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "networkmanager.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "nimble": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "oidc": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "oidc.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "oidc.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "oidc.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "oidc.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "oidc.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "oidc.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "oidc.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Hostname: "oidc.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "oidc.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "oidc.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Hostname: "oidc.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "oidc.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "oidc.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "oidc.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "oidc.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "opsworks": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "opsworks-cm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "organizations": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "organizations.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "aws-global", - Variant: fipsVariant, - }: endpoint{ - Hostname: "organizations-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "fips-aws-global", - }: endpoint{ - Hostname: "organizations-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "outposts": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "outposts-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "outposts-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "outposts-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "outposts-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "outposts-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "outposts-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "outposts-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "outposts-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "outposts-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "outposts-fips.us-west-2.amazonaws.com", - }, - }, - }, - "personalize": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "pinpoint": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "mobiletargeting", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "pinpoint-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "pinpoint-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "pinpoint.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "pinpoint-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "pinpoint.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "pinpoint-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "polly": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "polly-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "polly-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "polly-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "polly-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "polly-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "polly-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "polly-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "polly-fips.us-west-2.amazonaws.com", - }, - }, - }, - "portal.sso": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "portal.sso.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "portal.sso.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "portal.sso.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "portal.sso.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "portal.sso.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "portal.sso.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "portal.sso.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Hostname: "portal.sso.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "portal.sso.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "portal.sso.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Hostname: "portal.sso.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "portal.sso.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "portal.sso.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "portal.sso.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "portal.sso.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "profile": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "projects.iot1click": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "qldb": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "qldb-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "qldb-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "qldb-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "qldb-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "qldb-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "qldb-fips.us-west-2.amazonaws.com", - }, - }, - }, - "quicksight": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "api", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "ram": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "ram-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "ram-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "ram-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "ram-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "ram-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.us-west-2.amazonaws.com", - }, - }, - }, - "rds": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "rds-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "rds-fips.ca-central-1", - }: endpoint{ - Hostname: "rds-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds-fips.us-east-1", - }: endpoint{ - Hostname: "rds-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds-fips.us-east-2", - }: endpoint{ - Hostname: "rds-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds-fips.us-west-1", - }: endpoint{ - Hostname: "rds-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds-fips.us-west-2", - }: endpoint{ - Hostname: "rds-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.ca-central-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-east-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-east-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-west-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-west-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - SSLCommonName: "{service}.{dnsSuffix}", - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-east-1.amazonaws.com", - SSLCommonName: "{service}.{dnsSuffix}", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "rds-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "rds-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "rds-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "rds-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "redshift": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "redshift-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "redshift-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "redshift-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "redshift-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "redshift-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-west-2.amazonaws.com", - }, - }, - }, - "rekognition": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "rekognition-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "rekognition-fips.ca-central-1", - }: endpoint{ - Hostname: "rekognition-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition-fips.us-east-1", - }: endpoint{ - Hostname: "rekognition-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition-fips.us-east-2", - }: endpoint{ - Hostname: "rekognition-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition-fips.us-west-1", - }: endpoint{ - Hostname: "rekognition-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition-fips.us-west-2", - }: endpoint{ - Hostname: "rekognition-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.ca-central-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.us-east-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.us-east-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.us-west-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.us-west-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "rekognition-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "rekognition-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "rekognition-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "rekognition-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "resource-groups": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "resource-groups-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "resource-groups-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "resource-groups-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "resource-groups-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "resource-groups-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "resource-groups-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "resource-groups-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "resource-groups-fips.us-west-2.amazonaws.com", - }, - }, - }, - "robomaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "route53.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "aws-global", - Variant: fipsVariant, - }: endpoint{ - Hostname: "route53-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "fips-aws-global", - }: endpoint{ - Hostname: "route53-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "route53-recovery-control-config": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "route53-recovery-control-config.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "route53domains": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "route53resolver": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "runtime-v2-lex": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "runtime.lex": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime-fips.lex.{region}.{dnsSuffix}", - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime-fips.lex.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "runtime-fips.lex.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime-fips.lex.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "runtime-fips.lex.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "runtime.sagemaker": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime-fips.sagemaker.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "s3": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedTrue, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - defaultKey{ - Variant: dualStackVariant, - }: endpoint{ - Hostname: "{service}.dualstack.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - defaultKey{ - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "af-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.af-south-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.ap-east-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "s3.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "ap-northeast-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.ap-northeast-2.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.ap-northeast-3.amazonaws.com", - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.ap-south-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "s3.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "ap-southeast-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "s3.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "ap-southeast-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "s3.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-fips.dualstack.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.eu-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.eu-north-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.eu-south-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "s3.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "eu-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.eu-west-2.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.eu-west-3.amazonaws.com", - }, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "s3-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "s3-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "s3-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "s3-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "s3-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.me-south-1.amazonaws.com", - }, - endpointKey{ - Region: "s3-external-1", - }: endpoint{ - Hostname: "s3-external-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "s3.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "sa-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "s3.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-fips.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-fips.dualstack.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-fips.dualstack.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{ - Hostname: "s3.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-fips.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-fips.dualstack.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "s3.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-west-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-fips.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-fips.dualstack.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - }, - }, - "s3-control": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - defaultKey{ - Variant: dualStackVariant, - }: endpoint{ - Hostname: "{service}.dualstack.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - defaultKey{ - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "s3-control.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "s3-control.ap-northeast-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.ap-northeast-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{ - Hostname: "s3-control.ap-northeast-3.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - }, - endpointKey{ - Region: "ap-northeast-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.ap-northeast-3.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "s3-control.ap-south-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.ap-south-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "s3-control.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "s3-control.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "s3-control.ca-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "ca-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.ca-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-control-fips.ca-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-control-fips.dualstack.ca-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "s3-control-fips.ca-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "s3-control.eu-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.eu-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Hostname: "s3-control.eu-north-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "eu-north-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.eu-north-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "s3-control.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "s3-control.eu-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.eu-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Hostname: "s3-control.eu-west-3.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "eu-west-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.eu-west-3.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "s3-control.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "sa-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "s3-control.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-control-fips.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-control-fips.dualstack.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "s3-control-fips.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "s3-control.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-east-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-control-fips.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-control-fips.dualstack.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "s3-control-fips.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{ - Hostname: "s3-control.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-control-fips.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-control-fips.dualstack.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "s3-control-fips.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "s3-control.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "us-west-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-control-fips.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-control-fips.dualstack.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "s3-control-fips.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "savingsplans": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "savingsplans.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "schemas": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "sdb": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"v2"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "sdb.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "secretsmanager": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "securityhub": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "securityhub-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "securityhub-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "securityhub-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "securityhub-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "securityhub-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "securityhub-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "securityhub-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "securityhub-fips.us-west-2.amazonaws.com", - }, - }, - }, - "serverlessrepo": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "servicecatalog": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "servicecatalog-appregistry": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-west-2.amazonaws.com", - }, - }, - }, - "servicediscovery": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "servicediscovery", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "servicediscovery", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "servicediscovery-fips", - }: endpoint{ - Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "servicediscovery-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "servicediscovery-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "servicediscovery-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "servicediscovery-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "servicequotas": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "session.qldb": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "session.qldb-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "session.qldb-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "session.qldb-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "session.qldb-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "session.qldb-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "session.qldb-fips.us-west-2.amazonaws.com", - }, - }, - }, - "shield": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - SSLCommonName: "shield.us-east-1.amazonaws.com", - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "shield.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "aws-global", - Variant: fipsVariant, - }: endpoint{ - Hostname: "shield-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "fips-aws-global", - }: endpoint{ - Hostname: "shield-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "sms": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "sms-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "sms-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "sms-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "sms-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sms-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sms-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sms-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sms-fips.us-west-2.amazonaws.com", - }, - }, - }, - "snowball": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.ap-northeast-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.ap-northeast-2.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.ap-northeast-3.amazonaws.com", - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.ap-south-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.ap-southeast-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.ap-southeast-2.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.eu-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.eu-west-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.eu-west-2.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.eu-west-3.amazonaws.com", - }, - endpointKey{ - Region: "fips-ap-northeast-1", - }: endpoint{ - Hostname: "snowball-fips.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-2", - }: endpoint{ - Hostname: "snowball-fips.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-3", - }: endpoint{ - Hostname: "snowball-fips.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-south-1", - }: endpoint{ - Hostname: "snowball-fips.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-1", - }: endpoint{ - Hostname: "snowball-fips.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-2", - }: endpoint{ - Hostname: "snowball-fips.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "snowball-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-central-1", - }: endpoint{ - Hostname: "snowball-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-1", - }: endpoint{ - Hostname: "snowball-fips.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-2", - }: endpoint{ - Hostname: "snowball-fips.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-3", - }: endpoint{ - Hostname: "snowball-fips.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-sa-east-1", - }: endpoint{ - Hostname: "snowball-fips.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "snowball-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "snowball-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "snowball-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "snowball-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.sa-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.us-west-2.amazonaws.com", - }, - }, - }, - "sns": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "sns-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "sns-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "sns-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "sns-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sns-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sns-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sns-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sns-fips.us-west-2.amazonaws.com", - }, - }, - }, - "sqs": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "sqs-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "sqs-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "sqs-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "sqs-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - SSLCommonName: "queue.{dnsSuffix}", - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sqs-fips.us-east-1.amazonaws.com", - SSLCommonName: "queue.{dnsSuffix}", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sqs-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sqs-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sqs-fips.us-west-2.amazonaws.com", - }, - }, - }, - "ssm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "ssm-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "ssm-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "ssm-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "ssm-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "ssm-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-fips.us-west-2.amazonaws.com", - }, - }, - }, - "ssm-incidents": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "states": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "states-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "states-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "states-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "states-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "states-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "states-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "states-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "states-fips.us-west-2.amazonaws.com", - }, - }, - }, - "storagegateway": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "storagegateway-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "storagegateway-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "storagegateway-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "storagegateway-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "storagegateway-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "storagegateway-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "storagegateway-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "storagegateway-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "streams.dynamodb": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "local", - }: endpoint{ - Hostname: "localhost:8000", - Protocols: []string{"http"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "sts": service{ - PartitionEndpoint: "aws-global", - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "sts.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sts-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "sts-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sts-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "sts-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sts-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "sts-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sts-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "sts-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "support": service{ - PartitionEndpoint: "aws-global", - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "support.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "swf": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "swf-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "swf-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "swf-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "swf-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "swf-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "swf-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "swf-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "swf-fips.us-west-2.amazonaws.com", - }, - }, - }, - "tagging": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "textract": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "textract-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "textract-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "textract-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "textract-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "textract-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "textract-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "textract-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "textract-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "textract-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "textract-fips.us-west-2.amazonaws.com", - }, - }, - }, - "transcribe": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.transcribe.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.transcribe.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "fips.transcribe.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "fips.transcribe.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "fips.transcribe.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "fips.transcribe.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "fips.transcribe.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.transcribe.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.transcribe.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.transcribe.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.transcribe.us-west-2.amazonaws.com", - }, - }, - }, - "transcribestreaming": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "transcribestreaming-ca-central-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transcribestreaming-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-fips-ca-central-1", - }: endpoint{ - Hostname: "transcribestreaming-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-fips-us-east-1", - }: endpoint{ - Hostname: "transcribestreaming-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-fips-us-east-2", - }: endpoint{ - Hostname: "transcribestreaming-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-fips-us-west-2", - }: endpoint{ - Hostname: "transcribestreaming-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-us-east-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transcribestreaming-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-us-east-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transcribestreaming-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-us-west-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transcribestreaming-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "transfer": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transfer-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "transfer-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "transfer-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "transfer-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "transfer-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "transfer-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transfer-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transfer-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transfer-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transfer-fips.us-west-2.amazonaws.com", - }, - }, - }, - "translate": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "translate-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "translate-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "translate-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "translate-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "translate-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "translate-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "voiceid": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "waf": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "aws", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "aws-fips", - }: endpoint{ - Hostname: "waf-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "waf.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "aws-global", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "aws-global-fips", - }: endpoint{ - Hostname: "waf-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "waf-regional": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{ - Hostname: "waf-regional.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - }, - endpointKey{ - Region: "af-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{ - Hostname: "waf-regional.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - endpointKey{ - Region: "ap-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "waf-regional.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "waf-regional.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{ - Hostname: "waf-regional.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - }, - endpointKey{ - Region: "ap-northeast-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "waf-regional.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "waf-regional.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "waf-regional.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "waf-regional.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "waf-regional.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Hostname: "waf-regional.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "eu-north-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{ - Hostname: "waf-regional.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - }, - endpointKey{ - Region: "eu-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "waf-regional.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "waf-regional.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Hostname: "waf-regional.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "eu-west-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "fips-af-south-1", - }: endpoint{ - Hostname: "waf-regional-fips.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-east-1", - }: endpoint{ - Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-1", - }: endpoint{ - Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-2", - }: endpoint{ - Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-3", - }: endpoint{ - Hostname: "waf-regional-fips.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-south-1", - }: endpoint{ - Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-1", - }: endpoint{ - Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-2", - }: endpoint{ - Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-central-1", - }: endpoint{ - Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-north-1", - }: endpoint{ - Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-south-1", - }: endpoint{ - Hostname: "waf-regional-fips.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-1", - }: endpoint{ - Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-2", - }: endpoint{ - Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-3", - }: endpoint{ - Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-me-south-1", - }: endpoint{ - Hostname: "waf-regional-fips.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-sa-east-1", - }: endpoint{ - Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "waf-regional-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "waf-regional-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "waf-regional-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "waf-regional-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{ - Hostname: "waf-regional.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - endpointKey{ - Region: "me-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "waf-regional.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "sa-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "waf-regional.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "waf-regional.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{ - Hostname: "waf-regional.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "waf-regional.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "wisdom": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "workdocs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "workdocs-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "workdocs-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "workdocs-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "workdocs-fips.us-west-2.amazonaws.com", - }, - }, - }, - "workmail": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "workspaces": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "workspaces-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "workspaces-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "workspaces-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "workspaces-fips.us-west-2.amazonaws.com", - }, - }, - }, - "xray": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "xray-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "xray-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "xray-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "xray-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "xray-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "xray-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "xray-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "xray-fips.us-west-2.amazonaws.com", - }, - }, - }, - }, -} - -// AwsCnPartition returns the Resolver for AWS China. -func AwsCnPartition() Partition { - return awscnPartition.Partition() -} - -var awscnPartition = partition{ - ID: "aws-cn", - Name: "AWS China", - DNSSuffix: "amazonaws.com.cn", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: dualStackVariant, - }: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - DNSSuffix: "api.amazonwebservices.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.amazonwebservices.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - Regions: regions{ - "cn-north-1": region{ - Description: "China (Beijing)", - }, - "cn-northwest-1": region{ - Description: "China (Ningxia)", - }, - }, - Services: services{ - "access-analyzer": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "account": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-cn-global", - }: endpoint{ - Hostname: "account.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "acm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "api.ecr": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "api.ecr.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "api.sagemaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "apigateway": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "appconfigdata": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "applicationinsights": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "appmesh": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "appsync": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "athena": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "autoscaling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "autoscaling-plans": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "backup": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "batch": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "budgets": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-cn-global", - }: endpoint{ - Hostname: "budgets.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "ce": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-cn-global", - }: endpoint{ - Hostname: "ce.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "cloudformation": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "cloudfront": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-cn-global", - }: endpoint{ - Hostname: "cloudfront.cn-northwest-1.amazonaws.com.cn", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "cloudtrail": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "codebuild": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "codecommit": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "codedeploy": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "cognito-identity": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - }, - }, - "compute-optimizer": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "compute-optimizer.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "compute-optimizer.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "config": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "cur": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "data.jobs.iot": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "databrew": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "dax": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "directconnect": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "dms": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "docdb": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "rds.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "ds": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "dynamodb": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "ebs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "ec2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "ecs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "eks": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "elasticache": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "elasticbeanstalk": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "elasticfilesystem": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-north-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn", - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn", - }, - endpointKey{ - Region: "fips-cn-north-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-cn-northwest-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "elasticloadbalancing": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "elasticmapreduce": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "emr-containers": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "es": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "events": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "firehose": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "fms": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "fsx": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "gamelift": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "glacier": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "glue": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "greengrass": service{ - IsRegionalized: boxedTrue, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - }, - }, - "guardduty": service{ - IsRegionalized: boxedTrue, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "health": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-cn-global", - }: endpoint{ - Hostname: "iam.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - }, - }, - "iot": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "iotanalytics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - }, - }, - "iotevents": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - }, - }, - "ioteventsdata": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "data.iotevents.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - }, - }, - "iotsecuredtunneling": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "iotsitewise": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - }, - }, - "kafka": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "kinesis": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "kinesisanalytics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "kms": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "lakeformation": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "lambda": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "license-manager": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "logs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "mediaconvert": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "monitoring": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "mq": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "neptune": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "rds.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "rds.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "organizations": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-cn-global", - }: endpoint{ - Hostname: "organizations.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "personalize": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - }, - }, - "polly": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "ram": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "rds": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "redshift": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "resource-groups": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-cn-global", - }: endpoint{ - Hostname: "route53.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "route53resolver": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "runtime.sagemaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "s3": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - defaultKey{ - Variant: dualStackVariant, - }: endpoint{ - Hostname: "{service}.dualstack.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com.cn", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-north-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.cn-north-1.amazonaws.com.cn", - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.cn-northwest-1.amazonaws.com.cn", - }, - }, - }, - "s3-control": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - defaultKey{ - Variant: dualStackVariant, - }: endpoint{ - Hostname: "{service}.dualstack.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "s3-control.cn-north-1.amazonaws.com.cn", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-north-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.cn-north-1.amazonaws.com.cn", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - endpointKey{ - Region: "cn-northwest-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.cn-northwest-1.amazonaws.com.cn", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "secretsmanager": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "securityhub": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "serverlessrepo": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "servicecatalog": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "servicediscovery": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "sms": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "snowball": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-north-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn", - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.cn-northwest-1.amazonaws.com.cn", - }, - endpointKey{ - Region: "fips-cn-north-1", - }: endpoint{ - Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-cn-northwest-1", - }: endpoint{ - Hostname: "snowball-fips.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "sns": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "sqs": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "ssm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "states": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "storagegateway": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "sts": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "support": service{ - PartitionEndpoint: "aws-cn-global", - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-cn-global", - }: endpoint{ - Hostname: "support.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - }, - }, - "swf": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "tagging": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "transcribe": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "cn.transcribe.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "cn.transcribe.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "transcribestreaming": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "transfer": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "waf-regional": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "waf-regional.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-north-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "waf-regional.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - endpointKey{ - Region: "cn-northwest-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - endpointKey{ - Region: "fips-cn-north-1", - }: endpoint{ - Hostname: "waf-regional-fips.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-cn-northwest-1", - }: endpoint{ - Hostname: "waf-regional-fips.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "workspaces": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "xray": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - }, -} - -// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). -func AwsUsGovPartition() Partition { - return awsusgovPartition.Partition() -} - -var awsusgovPartition = partition{ - ID: "aws-us-gov", - Name: "AWS GovCloud (US)", - DNSSuffix: "amazonaws.com", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: dualStackVariant, - }: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - DNSSuffix: "api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - Regions: regions{ - "us-gov-east-1": region{ - Description: "AWS GovCloud (US-East)", - }, - "us-gov-west-1": region{ - Description: "AWS GovCloud (US-West)", - }, - }, - Services: services{ - "access-analyzer": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "access-analyzer.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "access-analyzer.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "acm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "acm.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "acm.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "acm-pca": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-pca.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "acm-pca.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "acm-pca.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-pca.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-pca.us-gov-west-1.amazonaws.com", - }, - }, - }, - "api.detective": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.detective-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "api.detective-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.detective-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "api.detective-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "api.ecr": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "dkr-us-gov-east-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dkr-us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dkr-us-gov-west-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dkr-us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-dkr-us-gov-east-1", - }: endpoint{ - Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-dkr-us-gov-west-1", - }: endpoint{ - Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "api.ecr.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "api.ecr.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "api.sagemaker": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "api-fips.sagemaker.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1-fips-secondary", - }: endpoint{ - Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1-secondary", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1-secondary", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "apigateway": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "appconfigdata": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "application-autoscaling", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "applicationinsights": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "applicationinsights.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "applicationinsights.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "appstream2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Service: "appstream", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "athena": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "athena-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "athena-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "athena-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "athena-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "autoscaling": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "autoscaling-plans": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "backup": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "batch": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "batch.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "batch.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "batch.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "batch.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "batch.us-gov-west-1.amazonaws.com", - }, - }, - }, - "cloudcontrolapi": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "clouddirectory": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "cloudformation": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "cloudformation.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "cloudformation.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "cloudhsm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "cloudhsmv2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "cloudhsm", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "cloudtrail": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "cloudtrail.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "codebuild": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "codecommit": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codecommit-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "codecommit-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "codedeploy": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "codepipeline": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "cognito-identity": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "cognito-identity-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-identity-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "cognito-idp": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "comprehend": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "comprehendmedical": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "config": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "config.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "config.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "config.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "config.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "config.us-gov-west-1.amazonaws.com", - }, - }, - }, - "connect": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "data.jobs.iot": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "databrew": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "datasync": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "datasync-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "datasync-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "directconnect": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "directconnect.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "directconnect.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "dms": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "dms", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dms", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dms-fips", - }: endpoint{ - Hostname: "dms.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "dms.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "dms.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "docdb": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "rds.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "ds": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "ds-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "ds-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ds-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ds-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "dynamodb": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "dynamodb.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dynamodb.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "dynamodb.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dynamodb.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "dynamodb.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "ebs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "ec2": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "ec2.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "ec2.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "ecs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "ecs-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "ecs-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecs-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecs-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "eks": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "eks.{region}.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "eks.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "eks.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "eks.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "eks.us-gov-west-1.amazonaws.com", - }, - }, - }, - "elasticache": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticache.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "elasticache.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticache.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "elasticache.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "elasticbeanstalk": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "elasticfilesystem": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "elasticloadbalancing": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticloadbalancing.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "elasticloadbalancing.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "elasticloadbalancing.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticloadbalancing.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticloadbalancing.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - }, - }, - "elasticmapreduce": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticmapreduce.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com", - Protocols: []string{"https"}, - }, - }, - }, - "email": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "email-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "email-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "es": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "es-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "es-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "es-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "es-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "es-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "events": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "events.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "events.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "firehose": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "firehose-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "firehose-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "firehose-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "firehose-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "fms": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "fms-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "fms-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "fsx": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-prod-us-gov-east-1", - }: endpoint{ - Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-prod-us-gov-west-1", - }: endpoint{ - Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-gov-east-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-gov-west-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "glacier": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "glacier.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "glacier.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "glue": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "glue-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "glue-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glue-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glue-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "greengrass": service{ - IsRegionalized: boxedTrue, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "dataplane-us-gov-east-1", - }: endpoint{ - Hostname: "greengrass-ats.iot.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "dataplane-us-gov-west-1", - }: endpoint{ - Hostname: "greengrass-ats.iot.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "greengrass-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "greengrass.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "greengrass-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "greengrass.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "guardduty": service{ - IsRegionalized: boxedTrue, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "guardduty.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "guardduty.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "guardduty.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "guardduty.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "guardduty.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "health": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "health-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "health-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-us-gov-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-us-gov-global", - }: endpoint{ - Hostname: "iam.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "aws-us-gov-global", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iam.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "aws-us-gov-global-fips", - }: endpoint{ - Hostname: "iam.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "iam-govcloud", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "iam-govcloud", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iam.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "iam-govcloud-fips", - }: endpoint{ - Hostname: "iam.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "identitystore": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "identitystore.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "identitystore.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "identitystore.us-gov-west-1.amazonaws.com", - }, - }, - }, - "inspector": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "inspector-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "inspector-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "inspector-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "inspector-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "iot": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "iot-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "iot-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "execute-api", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iot-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iot-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "iotevents": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "ioteventsdata": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "data.iotevents.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "iotsecuredtunneling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "iotsitewise": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "kafka": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "kendra": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "kendra-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kendra-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "kinesis": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "kinesis.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "kinesis.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "kinesisanalytics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "kms": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ProdFips", - }: endpoint{ - Hostname: "kms-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "kms-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "kms-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "lakeformation": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "lakeformation-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lakeformation-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "lambda": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "lambda-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "lambda-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lambda-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lambda-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "license-manager": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "license-manager-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "license-manager-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "logs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "logs.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "logs.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "mediaconvert": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "mediaconvert.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "metering.marketplace": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "models.lex": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "models-fips.lex.{region}.{dnsSuffix}", - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "models-fips.lex.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "models-fips.lex.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "monitoring": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "monitoring.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "monitoring.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "monitoring.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "monitoring.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "monitoring.us-gov-west-1.amazonaws.com", - }, - }, - }, - "mq": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "mq-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "mq-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mq-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mq-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "neptune": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "rds.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "rds.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "network-firewall": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "network-firewall-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "network-firewall-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "network-firewall-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "network-firewall-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "networkmanager": service{ - PartitionEndpoint: "aws-us-gov-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-us-gov-global", - }: endpoint{ - Hostname: "networkmanager.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "oidc": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "oidc.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "organizations": service{ - PartitionEndpoint: "aws-us-gov-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-us-gov-global", - }: endpoint{ - Hostname: "organizations.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "aws-us-gov-global", - Variant: fipsVariant, - }: endpoint{ - Hostname: "organizations.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "fips-aws-us-gov-global", - }: endpoint{ - Hostname: "organizations.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "outposts": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "outposts.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "outposts.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "pinpoint": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "mobiletargeting", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "pinpoint-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "pinpoint.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "pinpoint-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "polly": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "polly-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "polly-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "portal.sso": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "portal.sso.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "quicksight": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "api", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "ram": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "ram.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "ram.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "rds": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "rds.us-gov-east-1", - }: endpoint{ - Hostname: "rds.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-gov-west-1", - }: endpoint{ - Hostname: "rds.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "rds.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "rds.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "redshift": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "redshift.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "redshift.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "rekognition": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "rekognition-fips.us-gov-west-1", - }: endpoint{ - Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.us-gov-west-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "resource-groups": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "resource-groups.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "resource-groups.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "resource-groups.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "resource-groups.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "resource-groups.us-gov-west-1.amazonaws.com", - }, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-us-gov-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-us-gov-global", - }: endpoint{ - Hostname: "route53.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "aws-us-gov-global", - Variant: fipsVariant, - }: endpoint{ - Hostname: "route53.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "fips-aws-us-gov-global", - }: endpoint{ - Hostname: "route53.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "route53resolver": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "runtime.lex": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime-fips.lex.{region}.{dnsSuffix}", - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime-fips.lex.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "runtime-fips.lex.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "runtime.sagemaker": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime.sagemaker.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime.sagemaker.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "runtime.sagemaker.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "s3": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - SignatureVersions: []string{"s3", "s3v4"}, - }, - defaultKey{ - Variant: dualStackVariant, - }: endpoint{ - Hostname: "{service}.dualstack.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - defaultKey{ - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "s3-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "s3-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "s3.us-gov-east-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.us-gov-east-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-fips.us-gov-east-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "s3.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-fips.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - }, - }, - "s3-control": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - defaultKey{ - Variant: dualStackVariant, - }: endpoint{ - Hostname: "{service}.dualstack.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - defaultKey{ - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "s3-control.us-gov-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.us-gov-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-control-fips.dualstack.us-gov-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "s3-control.us-gov-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.us-gov-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-control-fips.dualstack.us-gov-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "secretsmanager": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "securityhub": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "serverlessrepo": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com", - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com", - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "servicecatalog": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "servicecatalog-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "servicecatalog-appregistry": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-appregistry.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "servicecatalog-appregistry.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "servicecatalog-appregistry.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-appregistry.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-appregistry.us-gov-west-1.amazonaws.com", - }, - }, - }, - "servicediscovery": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "servicediscovery", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "servicediscovery", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "servicediscovery-fips", - }: endpoint{ - Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "servicequotas": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicequotas.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "servicequotas.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "servicequotas.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicequotas.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicequotas.us-gov-west-1.amazonaws.com", - }, - }, - }, - "sms": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "sms-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "sms-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sms-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sms-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "snowball": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "snowball-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "snowball-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "sns": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "sns.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "sns.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "sqs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "sqs.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "sqs.us-gov-west-1.amazonaws.com", - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "ssm": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "ssm.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "ssm.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm.us-gov-west-1.amazonaws.com", - }, - }, - }, - "states": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "states-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "states.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "states-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "states.us-gov-west-1.amazonaws.com", - }, - }, - }, - "storagegateway": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "storagegateway-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "storagegateway-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "streams.dynamodb": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "streams.dynamodb.{region}.{dnsSuffix}", - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "streams.dynamodb.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "streams.dynamodb.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "streams.dynamodb.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "streams.dynamodb.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "sts": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "sts.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sts.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "sts.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sts.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "sts.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "support": service{ - PartitionEndpoint: "aws-us-gov-global", - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-us-gov-global", - }: endpoint{ - Hostname: "support.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "support.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "support.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "swf": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "swf.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "swf.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "tagging": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "textract": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "textract-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "textract-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "textract-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "textract-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "transcribe": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.transcribe.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "fips.transcribe.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "fips.transcribe.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.transcribe.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.transcribe.us-gov-west-1.amazonaws.com", - }, - }, - }, - "transfer": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "transfer-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "transfer-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transfer-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transfer-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "translate": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "translate-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "translate-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "waf-regional": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "waf-regional-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "waf-regional.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "waf-regional.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "workspaces": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "workspaces-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "workspaces-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "xray": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "xray-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "xray-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "xray-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "xray-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - }, -} - -// AwsIsoPartition returns the Resolver for AWS ISO (US). -func AwsIsoPartition() Partition { - return awsisoPartition.Partition() -} - -var awsisoPartition = partition{ - ID: "aws-iso", - Name: "AWS ISO (US)", - DNSSuffix: "c2s.ic.gov", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^us\\-iso\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "c2s.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - Regions: regions{ - "us-iso-east-1": region{ - Description: "US ISO East", - }, - "us-iso-west-1": region{ - Description: "US ISO WEST", - }, - }, - Services: services{ - "api.ecr": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{ - Hostname: "api.ecr.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{ - Hostname: "api.ecr.us-iso-west-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - }, - }, - }, - "api.sagemaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "apigateway": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "autoscaling": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "cloudformation": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "cloudtrail": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "codedeploy": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "comprehend": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "config": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "datapipeline": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "directconnect": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "dms": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "dms", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dms", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dms-fips", - }: endpoint{ - Hostname: "dms.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.us-iso-east-1.c2s.ic.gov", - }, - endpointKey{ - Region: "us-iso-east-1-fips", - }: endpoint{ - Hostname: "dms.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "ds": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "dynamodb": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "ebs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "ec2": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "ecs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "elasticache": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "elasticfilesystem": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-iso-east-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov", - }, - }, - }, - "elasticloadbalancing": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "elasticmapreduce": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "es": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "events": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "firehose": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "glacier": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "health": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-iso-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-iso-global", - }: endpoint{ - Hostname: "iam.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - }, - }, - "kinesis": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "kms": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ProdFips", - }: endpoint{ - Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", - }, - endpointKey{ - Region: "us-iso-east-1-fips", - }: endpoint{ - Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.us-iso-west-1.c2s.ic.gov", - }, - endpointKey{ - Region: "us-iso-west-1-fips", - }: endpoint{ - Hostname: "kms-fips.us-iso-west-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "lambda": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "license-manager": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "logs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "medialive": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "mediapackage": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "monitoring": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "outposts": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "ram": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "rds": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "redshift": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-iso-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-iso-global", - }: endpoint{ - Hostname: "route53.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - }, - }, - "route53resolver": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "runtime.sagemaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "s3": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - SignatureVersions: []string{"s3v4"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "secretsmanager": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "snowball": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "sns": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "sqs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "ssm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "states": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "sts": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "support": service{ - PartitionEndpoint: "aws-iso-global", - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-iso-global", - }: endpoint{ - Hostname: "support.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - }, - }, - "swf": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "transcribe": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "transcribestreaming": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "translate": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "workspaces": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - }, -} - -// AwsIsoBPartition returns the Resolver for AWS ISOB (US). -func AwsIsoBPartition() Partition { - return awsisobPartition.Partition() -} - -var awsisobPartition = partition{ - ID: "aws-iso-b", - Name: "AWS ISOB (US)", - DNSSuffix: "sc2s.sgov.gov", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^us\\-isob\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "sc2s.sgov.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - Regions: regions{ - "us-isob-east-1": region{ - Description: "US ISOB East (Ohio)", - }, - }, - Services: services{ - "api.ecr": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{ - Hostname: "api.ecr.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - }, - }, - }, - "application-autoscaling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "autoscaling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "cloudformation": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "cloudtrail": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "codedeploy": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "config": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "directconnect": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "dms": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "dms", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dms", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dms-fips", - }: endpoint{ - Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-isob-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", - }, - endpointKey{ - Region: "us-isob-east-1-fips", - }: endpoint{ - Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "ds": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "dynamodb": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "ebs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "ec2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "ecs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "elasticache": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "elasticloadbalancing": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "elasticmapreduce": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "es": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "events": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "glacier": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "health": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-iso-b-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-iso-b-global", - }: endpoint{ - Hostname: "iam.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - }, - }, - }, - "kinesis": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "kms": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ProdFips", - }: endpoint{ - Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-isob-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", - }, - endpointKey{ - Region: "us-isob-east-1-fips", - }: endpoint{ - Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "lambda": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "license-manager": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "logs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "monitoring": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "rds": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "redshift": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-iso-b-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-iso-b-global", - }: endpoint{ - Hostname: "route53.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - }, - }, - }, - "s3": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "snowball": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "sns": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "sqs": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "ssm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "states": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "sts": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "support": service{ - PartitionEndpoint: "aws-iso-b-global", - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-iso-b-global", - }: endpoint{ - Hostname: "support.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - }, - }, - }, - "swf": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "tagging": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go deleted file mode 100644 index ca8fc828e..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go +++ /dev/null @@ -1,141 +0,0 @@ -package endpoints - -// Service identifiers -// -// Deprecated: Use client package's EndpointsID value instead of these -// ServiceIDs. These IDs are not maintained, and are out of date. -const ( - A4bServiceID = "a4b" // A4b. - AcmServiceID = "acm" // Acm. - AcmPcaServiceID = "acm-pca" // AcmPca. - ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. - ApiPricingServiceID = "api.pricing" // ApiPricing. - ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker. - ApigatewayServiceID = "apigateway" // Apigateway. - ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. - Appstream2ServiceID = "appstream2" // Appstream2. - AppsyncServiceID = "appsync" // Appsync. - AthenaServiceID = "athena" // Athena. - AutoscalingServiceID = "autoscaling" // Autoscaling. - AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. - BatchServiceID = "batch" // Batch. - BudgetsServiceID = "budgets" // Budgets. - CeServiceID = "ce" // Ce. - ChimeServiceID = "chime" // Chime. - Cloud9ServiceID = "cloud9" // Cloud9. - ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. - CloudformationServiceID = "cloudformation" // Cloudformation. - CloudfrontServiceID = "cloudfront" // Cloudfront. - CloudhsmServiceID = "cloudhsm" // Cloudhsm. - Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. - CloudsearchServiceID = "cloudsearch" // Cloudsearch. - CloudtrailServiceID = "cloudtrail" // Cloudtrail. - CodebuildServiceID = "codebuild" // Codebuild. - CodecommitServiceID = "codecommit" // Codecommit. - CodedeployServiceID = "codedeploy" // Codedeploy. - CodepipelineServiceID = "codepipeline" // Codepipeline. - CodestarServiceID = "codestar" // Codestar. - CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. - CognitoIdpServiceID = "cognito-idp" // CognitoIdp. - CognitoSyncServiceID = "cognito-sync" // CognitoSync. - ComprehendServiceID = "comprehend" // Comprehend. - ConfigServiceID = "config" // Config. - CurServiceID = "cur" // Cur. - DatapipelineServiceID = "datapipeline" // Datapipeline. - DaxServiceID = "dax" // Dax. - DevicefarmServiceID = "devicefarm" // Devicefarm. - DirectconnectServiceID = "directconnect" // Directconnect. - DiscoveryServiceID = "discovery" // Discovery. - DmsServiceID = "dms" // Dms. - DsServiceID = "ds" // Ds. - DynamodbServiceID = "dynamodb" // Dynamodb. - Ec2ServiceID = "ec2" // Ec2. - Ec2metadataServiceID = "ec2metadata" // Ec2metadata. - EcrServiceID = "ecr" // Ecr. - EcsServiceID = "ecs" // Ecs. - ElasticacheServiceID = "elasticache" // Elasticache. - ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. - ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. - ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. - ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. - ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. - EmailServiceID = "email" // Email. - EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. - EsServiceID = "es" // Es. - EventsServiceID = "events" // Events. - FirehoseServiceID = "firehose" // Firehose. - FmsServiceID = "fms" // Fms. - GameliftServiceID = "gamelift" // Gamelift. - GlacierServiceID = "glacier" // Glacier. - GlueServiceID = "glue" // Glue. - GreengrassServiceID = "greengrass" // Greengrass. - GuarddutyServiceID = "guardduty" // Guardduty. - HealthServiceID = "health" // Health. - IamServiceID = "iam" // Iam. - ImportexportServiceID = "importexport" // Importexport. - InspectorServiceID = "inspector" // Inspector. - IotServiceID = "iot" // Iot. - IotanalyticsServiceID = "iotanalytics" // Iotanalytics. - KinesisServiceID = "kinesis" // Kinesis. - KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. - KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. - KmsServiceID = "kms" // Kms. - LambdaServiceID = "lambda" // Lambda. - LightsailServiceID = "lightsail" // Lightsail. - LogsServiceID = "logs" // Logs. - MachinelearningServiceID = "machinelearning" // Machinelearning. - MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. - MediaconvertServiceID = "mediaconvert" // Mediaconvert. - MedialiveServiceID = "medialive" // Medialive. - MediapackageServiceID = "mediapackage" // Mediapackage. - MediastoreServiceID = "mediastore" // Mediastore. - MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. - MghServiceID = "mgh" // Mgh. - MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. - ModelsLexServiceID = "models.lex" // ModelsLex. - MonitoringServiceID = "monitoring" // Monitoring. - MturkRequesterServiceID = "mturk-requester" // MturkRequester. - NeptuneServiceID = "neptune" // Neptune. - OpsworksServiceID = "opsworks" // Opsworks. - OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. - OrganizationsServiceID = "organizations" // Organizations. - PinpointServiceID = "pinpoint" // Pinpoint. - PollyServiceID = "polly" // Polly. - RdsServiceID = "rds" // Rds. - RedshiftServiceID = "redshift" // Redshift. - RekognitionServiceID = "rekognition" // Rekognition. - ResourceGroupsServiceID = "resource-groups" // ResourceGroups. - Route53ServiceID = "route53" // Route53. - Route53domainsServiceID = "route53domains" // Route53domains. - RuntimeLexServiceID = "runtime.lex" // RuntimeLex. - RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. - S3ServiceID = "s3" // S3. - S3ControlServiceID = "s3-control" // S3Control. - SagemakerServiceID = "api.sagemaker" // Sagemaker. - SdbServiceID = "sdb" // Sdb. - SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. - ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. - ServicecatalogServiceID = "servicecatalog" // Servicecatalog. - ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. - ShieldServiceID = "shield" // Shield. - SmsServiceID = "sms" // Sms. - SnowballServiceID = "snowball" // Snowball. - SnsServiceID = "sns" // Sns. - SqsServiceID = "sqs" // Sqs. - SsmServiceID = "ssm" // Ssm. - StatesServiceID = "states" // States. - StoragegatewayServiceID = "storagegateway" // Storagegateway. - StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. - StsServiceID = "sts" // Sts. - SupportServiceID = "support" // Support. - SwfServiceID = "swf" // Swf. - TaggingServiceID = "tagging" // Tagging. - TransferServiceID = "transfer" // Transfer. - TranslateServiceID = "translate" // Translate. - WafServiceID = "waf" // Waf. - WafRegionalServiceID = "waf-regional" // WafRegional. - WorkdocsServiceID = "workdocs" // Workdocs. - WorkmailServiceID = "workmail" // Workmail. - WorkspacesServiceID = "workspaces" // Workspaces. - XrayServiceID = "xray" // Xray. -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go deleted file mode 100644 index 84316b92c..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go +++ /dev/null @@ -1,66 +0,0 @@ -// Package endpoints provides the types and functionality for defining regions -// and endpoints, as well as querying those definitions. -// -// The SDK's Regions and Endpoints metadata is code generated into the endpoints -// package, and is accessible via the DefaultResolver function. This function -// returns a endpoint Resolver will search the metadata and build an associated -// endpoint if one is found. The default resolver will search all partitions -// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and -// AWS GovCloud (US) (aws-us-gov). -// . -// -// Enumerating Regions and Endpoint Metadata -// -// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface -// will allow you to get access to the list of underlying Partitions with the -// Partitions method. This is helpful if you want to limit the SDK's endpoint -// resolving to a single partition, or enumerate regions, services, and endpoints -// in the partition. -// -// resolver := endpoints.DefaultResolver() -// partitions := resolver.(endpoints.EnumPartitions).Partitions() -// -// for _, p := range partitions { -// fmt.Println("Regions for", p.ID()) -// for id, _ := range p.Regions() { -// fmt.Println("*", id) -// } -// -// fmt.Println("Services for", p.ID()) -// for id, _ := range p.Services() { -// fmt.Println("*", id) -// } -// } -// -// Using Custom Endpoints -// -// The endpoints package also gives you the ability to use your own logic how -// endpoints are resolved. This is a great way to define a custom endpoint -// for select services, without passing that logic down through your code. -// -// If a type implements the Resolver interface it can be used to resolve -// endpoints. To use this with the SDK's Session and Config set the value -// of the type to the EndpointsResolver field of aws.Config when initializing -// the session, or service client. -// -// In addition the ResolverFunc is a wrapper for a func matching the signature -// of Resolver.EndpointFor, converting it to a type that satisfies the -// Resolver interface. -// -// -// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { -// if service == endpoints.S3ServiceID { -// return endpoints.ResolvedEndpoint{ -// URL: "s3.custom.endpoint.com", -// SigningRegion: "custom-signing-region", -// }, nil -// } -// -// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) -// } -// -// sess := session.Must(session.NewSession(&aws.Config{ -// Region: aws.String("us-west-2"), -// EndpointResolver: endpoints.ResolverFunc(myCustomResolver), -// })) -package endpoints diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go deleted file mode 100644 index 880986157..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go +++ /dev/null @@ -1,706 +0,0 @@ -package endpoints - -import ( - "fmt" - "regexp" - "strings" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// A Logger is a minimalistic interface for the SDK to log messages to. -type Logger interface { - Log(...interface{}) -} - -// DualStackEndpointState is a constant to describe the dual-stack endpoint resolution -// behavior. -type DualStackEndpointState uint - -const ( - // DualStackEndpointStateUnset is the default value behavior for dual-stack endpoint - // resolution. - DualStackEndpointStateUnset DualStackEndpointState = iota - - // DualStackEndpointStateEnabled enable dual-stack endpoint resolution for endpoints. - DualStackEndpointStateEnabled - - // DualStackEndpointStateDisabled disables dual-stack endpoint resolution for endpoints. - DualStackEndpointStateDisabled -) - -// FIPSEndpointState is a constant to describe the FIPS endpoint resolution behavior. -type FIPSEndpointState uint - -const ( - // FIPSEndpointStateUnset is the default value behavior for FIPS endpoint resolution. - FIPSEndpointStateUnset FIPSEndpointState = iota - - // FIPSEndpointStateEnabled enables FIPS endpoint resolution for service endpoints. - FIPSEndpointStateEnabled - - // FIPSEndpointStateDisabled disables FIPS endpoint resolution for endpoints. - FIPSEndpointStateDisabled -) - -// Options provide the configuration needed to direct how the -// endpoints will be resolved. -type Options struct { - // DisableSSL forces the endpoint to be resolved as HTTP. - // instead of HTTPS if the service supports it. - DisableSSL bool - - // Sets the resolver to resolve the endpoint as a dualstack endpoint - // for the service. If dualstack support for a service is not known and - // StrictMatching is not enabled a dualstack endpoint for the service will - // be returned. This endpoint may not be valid. If StrictMatching is - // enabled only services that are known to support dualstack will return - // dualstack endpoints. - // - // Deprecated: This option will continue to function for S3 and S3 Control for backwards compatibility. - // UseDualStackEndpoint should be used to enable usage of a service's dual-stack endpoint for all service clients - // moving forward. For S3 and S3 Control, when UseDualStackEndpoint is set to a non-zero value it takes higher - // precedence then this option. - UseDualStack bool - - // Sets the resolver to resolve a dual-stack endpoint for the service. - UseDualStackEndpoint DualStackEndpointState - - // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. - UseFIPSEndpoint FIPSEndpointState - - // Enables strict matching of services and regions resolved endpoints. - // If the partition doesn't enumerate the exact service and region an - // error will be returned. This option will prevent returning endpoints - // that look valid, but may not resolve to any real endpoint. - StrictMatching bool - - // Enables resolving a service endpoint based on the region provided if the - // service does not exist. The service endpoint ID will be used as the service - // domain name prefix. By default the endpoint resolver requires the service - // to be known when resolving endpoints. - // - // If resolving an endpoint on the partition list the provided region will - // be used to determine which partition's domain name pattern to the service - // endpoint ID with. If both the service and region are unknown and resolving - // the endpoint on partition list an UnknownEndpointError error will be returned. - // - // If resolving and endpoint on a partition specific resolver that partition's - // domain name pattern will be used with the service endpoint ID. If both - // region and service do not exist when resolving an endpoint on a specific - // partition the partition's domain pattern will be used to combine the - // endpoint and region together. - // - // This option is ignored if StrictMatching is enabled. - ResolveUnknownService bool - - // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) - EC2MetadataEndpointMode EC2IMDSEndpointModeState - - // STS Regional Endpoint flag helps with resolving the STS endpoint - STSRegionalEndpoint STSRegionalEndpoint - - // S3 Regional Endpoint flag helps with resolving the S3 endpoint - S3UsEast1RegionalEndpoint S3UsEast1RegionalEndpoint - - // ResolvedRegion is the resolved region string. If provided (non-zero length) it takes priority - // over the region name passed to the ResolveEndpoint call. - ResolvedRegion string - - // Logger is the logger that will be used to log messages. - Logger Logger - - // Determines whether logging of deprecated endpoints usage is enabled. - LogDeprecated bool -} - -func (o Options) getEndpointVariant(service string) (v endpointVariant) { - const s3 = "s3" - const s3Control = "s3-control" - - if (o.UseDualStackEndpoint == DualStackEndpointStateEnabled) || - ((service == s3 || service == s3Control) && (o.UseDualStackEndpoint == DualStackEndpointStateUnset && o.UseDualStack)) { - v |= dualStackVariant - } - if o.UseFIPSEndpoint == FIPSEndpointStateEnabled { - v |= fipsVariant - } - return v -} - -// EC2IMDSEndpointModeState is an enum configuration variable describing the client endpoint mode. -type EC2IMDSEndpointModeState uint - -// Enumeration values for EC2IMDSEndpointModeState -const ( - EC2IMDSEndpointModeStateUnset EC2IMDSEndpointModeState = iota - EC2IMDSEndpointModeStateIPv4 - EC2IMDSEndpointModeStateIPv6 -) - -// SetFromString sets the EC2IMDSEndpointModeState based on the provided string value. Unknown values will default to EC2IMDSEndpointModeStateUnset -func (e *EC2IMDSEndpointModeState) SetFromString(v string) error { - v = strings.TrimSpace(v) - - switch { - case len(v) == 0: - *e = EC2IMDSEndpointModeStateUnset - case strings.EqualFold(v, "IPv6"): - *e = EC2IMDSEndpointModeStateIPv6 - case strings.EqualFold(v, "IPv4"): - *e = EC2IMDSEndpointModeStateIPv4 - default: - return fmt.Errorf("unknown EC2 IMDS endpoint mode, must be either IPv6 or IPv4") - } - return nil -} - -// STSRegionalEndpoint is an enum for the states of the STS Regional Endpoint -// options. -type STSRegionalEndpoint int - -func (e STSRegionalEndpoint) String() string { - switch e { - case LegacySTSEndpoint: - return "legacy" - case RegionalSTSEndpoint: - return "regional" - case UnsetSTSEndpoint: - return "" - default: - return "unknown" - } -} - -const ( - - // UnsetSTSEndpoint represents that STS Regional Endpoint flag is not specified. - UnsetSTSEndpoint STSRegionalEndpoint = iota - - // LegacySTSEndpoint represents when STS Regional Endpoint flag is specified - // to use legacy endpoints. - LegacySTSEndpoint - - // RegionalSTSEndpoint represents when STS Regional Endpoint flag is specified - // to use regional endpoints. - RegionalSTSEndpoint -) - -// GetSTSRegionalEndpoint function returns the STSRegionalEndpointFlag based -// on the input string provided in env config or shared config by the user. -// -// `legacy`, `regional` are the only case-insensitive valid strings for -// resolving the STS regional Endpoint flag. -func GetSTSRegionalEndpoint(s string) (STSRegionalEndpoint, error) { - switch { - case strings.EqualFold(s, "legacy"): - return LegacySTSEndpoint, nil - case strings.EqualFold(s, "regional"): - return RegionalSTSEndpoint, nil - default: - return UnsetSTSEndpoint, fmt.Errorf("unable to resolve the value of STSRegionalEndpoint for %v", s) - } -} - -// S3UsEast1RegionalEndpoint is an enum for the states of the S3 us-east-1 -// Regional Endpoint options. -type S3UsEast1RegionalEndpoint int - -func (e S3UsEast1RegionalEndpoint) String() string { - switch e { - case LegacyS3UsEast1Endpoint: - return "legacy" - case RegionalS3UsEast1Endpoint: - return "regional" - case UnsetS3UsEast1Endpoint: - return "" - default: - return "unknown" - } -} - -const ( - - // UnsetS3UsEast1Endpoint represents that S3 Regional Endpoint flag is not - // specified. - UnsetS3UsEast1Endpoint S3UsEast1RegionalEndpoint = iota - - // LegacyS3UsEast1Endpoint represents when S3 Regional Endpoint flag is - // specified to use legacy endpoints. - LegacyS3UsEast1Endpoint - - // RegionalS3UsEast1Endpoint represents when S3 Regional Endpoint flag is - // specified to use regional endpoints. - RegionalS3UsEast1Endpoint -) - -// GetS3UsEast1RegionalEndpoint function returns the S3UsEast1RegionalEndpointFlag based -// on the input string provided in env config or shared config by the user. -// -// `legacy`, `regional` are the only case-insensitive valid strings for -// resolving the S3 regional Endpoint flag. -func GetS3UsEast1RegionalEndpoint(s string) (S3UsEast1RegionalEndpoint, error) { - switch { - case strings.EqualFold(s, "legacy"): - return LegacyS3UsEast1Endpoint, nil - case strings.EqualFold(s, "regional"): - return RegionalS3UsEast1Endpoint, nil - default: - return UnsetS3UsEast1Endpoint, - fmt.Errorf("unable to resolve the value of S3UsEast1RegionalEndpoint for %v", s) - } -} - -// Set combines all of the option functions together. -func (o *Options) Set(optFns ...func(*Options)) { - for _, fn := range optFns { - fn(o) - } -} - -// DisableSSLOption sets the DisableSSL options. Can be used as a functional -// option when resolving endpoints. -func DisableSSLOption(o *Options) { - o.DisableSSL = true -} - -// UseDualStackOption sets the UseDualStack option. Can be used as a functional -// option when resolving endpoints. -// -// Deprecated: UseDualStackEndpointOption should be used to enable usage of a service's dual-stack endpoint. -// When DualStackEndpointState is set to a non-zero value it takes higher precedence then this option. -func UseDualStackOption(o *Options) { - o.UseDualStack = true -} - -// UseDualStackEndpointOption sets the UseDualStackEndpoint option to enabled. Can be used as a functional -// option when resolving endpoints. -func UseDualStackEndpointOption(o *Options) { - o.UseDualStackEndpoint = DualStackEndpointStateEnabled -} - -// UseFIPSEndpointOption sets the UseFIPSEndpoint option to enabled. Can be used as a functional -// option when resolving endpoints. -func UseFIPSEndpointOption(o *Options) { - o.UseFIPSEndpoint = FIPSEndpointStateEnabled -} - -// StrictMatchingOption sets the StrictMatching option. Can be used as a functional -// option when resolving endpoints. -func StrictMatchingOption(o *Options) { - o.StrictMatching = true -} - -// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used -// as a functional option when resolving endpoints. -func ResolveUnknownServiceOption(o *Options) { - o.ResolveUnknownService = true -} - -// STSRegionalEndpointOption enables the STS endpoint resolver behavior to resolve -// STS endpoint to their regional endpoint, instead of the global endpoint. -func STSRegionalEndpointOption(o *Options) { - o.STSRegionalEndpoint = RegionalSTSEndpoint -} - -// A Resolver provides the interface for functionality to resolve endpoints. -// The build in Partition and DefaultResolver return value satisfy this interface. -type Resolver interface { - EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) -} - -// ResolverFunc is a helper utility that wraps a function so it satisfies the -// Resolver interface. This is useful when you want to add additional endpoint -// resolving logic, or stub out specific endpoints with custom values. -type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) - -// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface. -func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return fn(service, region, opts...) -} - -var schemeRE = regexp.MustCompile("^([^:]+)://") - -// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no -// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS. -// -// If disableSSL is set, it will only set the URL's scheme if the URL does not -// contain a scheme. -func AddScheme(endpoint string, disableSSL bool) string { - if !schemeRE.MatchString(endpoint) { - scheme := "https" - if disableSSL { - scheme = "http" - } - endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) - } - - return endpoint -} - -// EnumPartitions a provides a way to retrieve the underlying partitions that -// make up the SDK's default Resolver, or any resolver decoded from a model -// file. -// -// Use this interface with DefaultResolver and DecodeModels to get the list of -// Partitions. -type EnumPartitions interface { - Partitions() []Partition -} - -// RegionsForService returns a map of regions for the partition and service. -// If either the partition or service does not exist false will be returned -// as the second parameter. -// -// This example shows how to get the regions for DynamoDB in the AWS partition. -// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) -// -// This is equivalent to using the partition directly. -// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() -func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) { - for _, p := range ps { - if p.ID() != partitionID { - continue - } - if _, ok := p.p.Services[serviceID]; !(ok || serviceID == Ec2metadataServiceID) { - break - } - - s := Service{ - id: serviceID, - p: p.p, - } - return s.Regions(), true - } - - return map[string]Region{}, false -} - -// PartitionForRegion returns the first partition which includes the region -// passed in. This includes both known regions and regions which match -// a pattern supported by the partition which may include regions that are -// not explicitly known by the partition. Use the Regions method of the -// returned Partition if explicit support is needed. -func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { - for _, p := range ps { - if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) { - return p, true - } - } - - return Partition{}, false -} - -// A Partition provides the ability to enumerate the partition's regions -// and services. -type Partition struct { - id, dnsSuffix string - p *partition -} - -// DNSSuffix returns the base domain name of the partition. -func (p Partition) DNSSuffix() string { return p.dnsSuffix } - -// ID returns the identifier of the partition. -func (p Partition) ID() string { return p.id } - -// EndpointFor attempts to resolve the endpoint based on service and region. -// See Options for information on configuring how the endpoint is resolved. -// -// If the service cannot be found in the metadata the UnknownServiceError -// error will be returned. This validation will occur regardless if -// StrictMatching is enabled. To enable resolving unknown services set the -// "ResolveUnknownService" option to true. When StrictMatching is disabled -// this option allows the partition resolver to resolve a endpoint based on -// the service endpoint ID provided. -// -// When resolving endpoints you can choose to enable StrictMatching. This will -// require the provided service and region to be known by the partition. -// If the endpoint cannot be strictly resolved an error will be returned. This -// mode is useful to ensure the endpoint resolved is valid. Without -// StrictMatching enabled the endpoint returned may look valid but may not work. -// StrictMatching requires the SDK to be updated if you want to take advantage -// of new regions and services expansions. -// -// Errors that can be returned. -// * UnknownServiceError -// * UnknownEndpointError -func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return p.p.EndpointFor(service, region, opts...) -} - -// Regions returns a map of Regions indexed by their ID. This is useful for -// enumerating over the regions in a partition. -func (p Partition) Regions() map[string]Region { - rs := make(map[string]Region, len(p.p.Regions)) - for id, r := range p.p.Regions { - rs[id] = Region{ - id: id, - desc: r.Description, - p: p.p, - } - } - - return rs -} - -// Services returns a map of Service indexed by their ID. This is useful for -// enumerating over the services in a partition. -func (p Partition) Services() map[string]Service { - ss := make(map[string]Service, len(p.p.Services)) - - for id := range p.p.Services { - ss[id] = Service{ - id: id, - p: p.p, - } - } - - // Since we have removed the customization that injected this into the model - // we still need to pretend that this is a modeled service. - if _, ok := ss[Ec2metadataServiceID]; !ok { - ss[Ec2metadataServiceID] = Service{ - id: Ec2metadataServiceID, - p: p.p, - } - } - - return ss -} - -// A Region provides information about a region, and ability to resolve an -// endpoint from the context of a region, given a service. -type Region struct { - id, desc string - p *partition -} - -// ID returns the region's identifier. -func (r Region) ID() string { return r.id } - -// Description returns the region's description. The region description -// is free text, it can be empty, and it may change between SDK releases. -func (r Region) Description() string { return r.desc } - -// ResolveEndpoint resolves an endpoint from the context of the region given -// a service. See Partition.EndpointFor for usage and errors that can be returned. -func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return r.p.EndpointFor(service, r.id, opts...) -} - -// Services returns a list of all services that are known to be in this region. -func (r Region) Services() map[string]Service { - ss := map[string]Service{} - for id, s := range r.p.Services { - if _, ok := s.Endpoints[endpointKey{Region: r.id}]; ok { - ss[id] = Service{ - id: id, - p: r.p, - } - } - } - - return ss -} - -// A Service provides information about a service, and ability to resolve an -// endpoint from the context of a service, given a region. -type Service struct { - id string - p *partition -} - -// ID returns the identifier for the service. -func (s Service) ID() string { return s.id } - -// ResolveEndpoint resolves an endpoint from the context of a service given -// a region. See Partition.EndpointFor for usage and errors that can be returned. -func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return s.p.EndpointFor(s.id, region, opts...) -} - -// Regions returns a map of Regions that the service is present in. -// -// A region is the AWS region the service exists in. Whereas a Endpoint is -// an URL that can be resolved to a instance of a service. -func (s Service) Regions() map[string]Region { - rs := map[string]Region{} - - service, ok := s.p.Services[s.id] - - // Since ec2metadata customization has been removed we need to check - // if it was defined in non-standard endpoints.json file. If it's not - // then we can return the empty map as there is no regional-endpoints for IMDS. - // Otherwise, we iterate need to iterate the non-standard model. - if s.id == Ec2metadataServiceID && !ok { - return rs - } - - for id := range service.Endpoints { - if id.Variant != 0 { - continue - } - if r, ok := s.p.Regions[id.Region]; ok { - rs[id.Region] = Region{ - id: id.Region, - desc: r.Description, - p: s.p, - } - } - } - - return rs -} - -// Endpoints returns a map of Endpoints indexed by their ID for all known -// endpoints for a service. -// -// A region is the AWS region the service exists in. Whereas a Endpoint is -// an URL that can be resolved to a instance of a service. -func (s Service) Endpoints() map[string]Endpoint { - es := make(map[string]Endpoint, len(s.p.Services[s.id].Endpoints)) - for id := range s.p.Services[s.id].Endpoints { - if id.Variant != 0 { - continue - } - es[id.Region] = Endpoint{ - id: id.Region, - serviceID: s.id, - p: s.p, - } - } - - return es -} - -// A Endpoint provides information about endpoints, and provides the ability -// to resolve that endpoint for the service, and the region the endpoint -// represents. -type Endpoint struct { - id string - serviceID string - p *partition -} - -// ID returns the identifier for an endpoint. -func (e Endpoint) ID() string { return e.id } - -// ServiceID returns the identifier the endpoint belongs to. -func (e Endpoint) ServiceID() string { return e.serviceID } - -// ResolveEndpoint resolves an endpoint from the context of a service and -// region the endpoint represents. See Partition.EndpointFor for usage and -// errors that can be returned. -func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) { - return e.p.EndpointFor(e.serviceID, e.id, opts...) -} - -// A ResolvedEndpoint is an endpoint that has been resolved based on a partition -// service, and region. -type ResolvedEndpoint struct { - // The endpoint URL - URL string - - // The endpoint partition - PartitionID string - - // The region that should be used for signing requests. - SigningRegion string - - // The service name that should be used for signing requests. - SigningName string - - // States that the signing name for this endpoint was derived from metadata - // passed in, but was not explicitly modeled. - SigningNameDerived bool - - // The signing method that should be used for signing requests. - SigningMethod string -} - -// So that the Error interface type can be included as an anonymous field -// in the requestError struct and not conflict with the error.Error() method. -type awsError awserr.Error - -// A EndpointNotFoundError is returned when in StrictMatching mode, and the -// endpoint for the service and region cannot be found in any of the partitions. -type EndpointNotFoundError struct { - awsError - Partition string - Service string - Region string -} - -// A UnknownServiceError is returned when the service does not resolve to an -// endpoint. Includes a list of all known services for the partition. Returned -// when a partition does not support the service. -type UnknownServiceError struct { - awsError - Partition string - Service string - Known []string -} - -// NewUnknownServiceError builds and returns UnknownServiceError. -func NewUnknownServiceError(p, s string, known []string) UnknownServiceError { - return UnknownServiceError{ - awsError: awserr.New("UnknownServiceError", - "could not resolve endpoint for unknown service", nil), - Partition: p, - Service: s, - Known: known, - } -} - -// String returns the string representation of the error. -func (e UnknownServiceError) Error() string { - extra := fmt.Sprintf("partition: %q, service: %q", - e.Partition, e.Service) - if len(e.Known) > 0 { - extra += fmt.Sprintf(", known: %v", e.Known) - } - return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) -} - -// String returns the string representation of the error. -func (e UnknownServiceError) String() string { - return e.Error() -} - -// A UnknownEndpointError is returned when in StrictMatching mode and the -// service is valid, but the region does not resolve to an endpoint. Includes -// a list of all known endpoints for the service. -type UnknownEndpointError struct { - awsError - Partition string - Service string - Region string - Known []string -} - -// NewUnknownEndpointError builds and returns UnknownEndpointError. -func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError { - return UnknownEndpointError{ - awsError: awserr.New("UnknownEndpointError", - "could not resolve endpoint", nil), - Partition: p, - Service: s, - Region: r, - Known: known, - } -} - -// String returns the string representation of the error. -func (e UnknownEndpointError) Error() string { - extra := fmt.Sprintf("partition: %q, service: %q, region: %q", - e.Partition, e.Service, e.Region) - if len(e.Known) > 0 { - extra += fmt.Sprintf(", known: %v", e.Known) - } - return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) -} - -// String returns the string representation of the error. -func (e UnknownEndpointError) String() string { - return e.Error() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go deleted file mode 100644 index df75e899a..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go +++ /dev/null @@ -1,24 +0,0 @@ -package endpoints - -var legacyGlobalRegions = map[string]map[string]struct{}{ - "sts": { - "ap-northeast-1": {}, - "ap-south-1": {}, - "ap-southeast-1": {}, - "ap-southeast-2": {}, - "ca-central-1": {}, - "eu-central-1": {}, - "eu-north-1": {}, - "eu-west-1": {}, - "eu-west-2": {}, - "eu-west-3": {}, - "sa-east-1": {}, - "us-east-1": {}, - "us-east-2": {}, - "us-west-1": {}, - "us-west-2": {}, - }, - "s3": { - "us-east-1": {}, - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go deleted file mode 100644 index 89f6627dc..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go +++ /dev/null @@ -1,594 +0,0 @@ -package endpoints - -import ( - "encoding/json" - "fmt" - "regexp" - "strconv" - "strings" -) - -const ( - ec2MetadataEndpointIPv6 = "http://[fd00:ec2::254]/latest" - ec2MetadataEndpointIPv4 = "http://169.254.169.254/latest" -) - -const dnsSuffixTemplateKey = "{dnsSuffix}" - -// defaultKey is a compound map key of a variant and other values. -type defaultKey struct { - Variant endpointVariant - ServiceVariant serviceVariant -} - -// endpointKey is a compound map key of a region and associated variant value. -type endpointKey struct { - Region string - Variant endpointVariant -} - -// endpointVariant is a bit field to describe the endpoints attributes. -type endpointVariant uint64 - -// serviceVariant is a bit field to describe the service endpoint attributes. -type serviceVariant uint64 - -const ( - // fipsVariant indicates that the endpoint is FIPS capable. - fipsVariant endpointVariant = 1 << (64 - 1 - iota) - - // dualStackVariant indicates that the endpoint is DualStack capable. - dualStackVariant -) - -var regionValidationRegex = regexp.MustCompile(`^[[:alnum:]]([[:alnum:]\-]*[[:alnum:]])?$`) - -type partitions []partition - -func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - var opt Options - opt.Set(opts...) - - if len(opt.ResolvedRegion) > 0 { - region = opt.ResolvedRegion - } - - for i := 0; i < len(ps); i++ { - if !ps[i].canResolveEndpoint(service, region, opt) { - continue - } - - return ps[i].EndpointFor(service, region, opts...) - } - - // If loose matching fallback to first partition format to use - // when resolving the endpoint. - if !opt.StrictMatching && len(ps) > 0 { - return ps[0].EndpointFor(service, region, opts...) - } - - return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{}) -} - -// Partitions satisfies the EnumPartitions interface and returns a list -// of Partitions representing each partition represented in the SDK's -// endpoints model. -func (ps partitions) Partitions() []Partition { - parts := make([]Partition, 0, len(ps)) - for i := 0; i < len(ps); i++ { - parts = append(parts, ps[i].Partition()) - } - - return parts -} - -type endpointWithVariants struct { - endpoint - Variants []endpointWithTags `json:"variants"` -} - -type endpointWithTags struct { - endpoint - Tags []string `json:"tags"` -} - -type endpointDefaults map[defaultKey]endpoint - -func (p *endpointDefaults) UnmarshalJSON(data []byte) error { - if *p == nil { - *p = make(endpointDefaults) - } - - var e endpointWithVariants - if err := json.Unmarshal(data, &e); err != nil { - return err - } - - (*p)[defaultKey{Variant: 0}] = e.endpoint - - e.Hostname = "" - e.DNSSuffix = "" - - for _, variant := range e.Variants { - endpointVariant, unknown := parseVariantTags(variant.Tags) - if unknown { - continue - } - - var ve endpoint - ve.mergeIn(e.endpoint) - ve.mergeIn(variant.endpoint) - - (*p)[defaultKey{Variant: endpointVariant}] = ve - } - - return nil -} - -func parseVariantTags(tags []string) (ev endpointVariant, unknown bool) { - if len(tags) == 0 { - unknown = true - return - } - - for _, tag := range tags { - switch { - case strings.EqualFold("fips", tag): - ev |= fipsVariant - case strings.EqualFold("dualstack", tag): - ev |= dualStackVariant - default: - unknown = true - } - } - return ev, unknown -} - -type partition struct { - ID string `json:"partition"` - Name string `json:"partitionName"` - DNSSuffix string `json:"dnsSuffix"` - RegionRegex regionRegex `json:"regionRegex"` - Defaults endpointDefaults `json:"defaults"` - Regions regions `json:"regions"` - Services services `json:"services"` -} - -func (p partition) Partition() Partition { - return Partition{ - dnsSuffix: p.DNSSuffix, - id: p.ID, - p: &p, - } -} - -func (p partition) canResolveEndpoint(service, region string, options Options) bool { - s, hasService := p.Services[service] - _, hasEndpoint := s.Endpoints[endpointKey{ - Region: region, - Variant: options.getEndpointVariant(service), - }] - - if hasEndpoint && hasService { - return true - } - - if options.StrictMatching { - return false - } - - return p.RegionRegex.MatchString(region) -} - -func allowLegacyEmptyRegion(service string) bool { - legacy := map[string]struct{}{ - "budgets": {}, - "ce": {}, - "chime": {}, - "cloudfront": {}, - "ec2metadata": {}, - "iam": {}, - "importexport": {}, - "organizations": {}, - "route53": {}, - "sts": {}, - "support": {}, - "waf": {}, - } - - _, allowed := legacy[service] - return allowed -} - -func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) { - var opt Options - opt.Set(opts...) - - if len(opt.ResolvedRegion) > 0 { - region = opt.ResolvedRegion - } - - s, hasService := p.Services[service] - - if service == Ec2metadataServiceID && !hasService { - endpoint := getEC2MetadataEndpoint(p.ID, service, opt.EC2MetadataEndpointMode) - return endpoint, nil - } - - if len(service) == 0 || !(hasService || opt.ResolveUnknownService) { - // Only return error if the resolver will not fallback to creating - // endpoint based on service endpoint ID passed in. - return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) - } - - if len(region) == 0 && allowLegacyEmptyRegion(service) && len(s.PartitionEndpoint) != 0 { - region = s.PartitionEndpoint - } - - if r, ok := isLegacyGlobalRegion(service, region, opt); ok { - region = r - } - - variant := opt.getEndpointVariant(service) - - endpoints := s.Endpoints - - serviceDefaults, hasServiceDefault := s.Defaults[defaultKey{Variant: variant}] - // If we searched for a variant which may have no explicit service defaults, - // then we need to inherit the standard service defaults except the hostname and dnsSuffix - if variant != 0 && !hasServiceDefault { - serviceDefaults = s.Defaults[defaultKey{}] - serviceDefaults.Hostname = "" - serviceDefaults.DNSSuffix = "" - } - - partitionDefaults, hasPartitionDefault := p.Defaults[defaultKey{Variant: variant}] - - var dnsSuffix string - if len(serviceDefaults.DNSSuffix) > 0 { - dnsSuffix = serviceDefaults.DNSSuffix - } else if variant == 0 { - // For legacy reasons the partition dnsSuffix is not in the defaults, so if we looked for - // a non-variant endpoint then we need to set the dnsSuffix. - dnsSuffix = p.DNSSuffix - } - - noDefaults := !hasServiceDefault && !hasPartitionDefault - - e, hasEndpoint := s.endpointForRegion(region, endpoints, variant) - if len(region) == 0 || (!hasEndpoint && (opt.StrictMatching || noDefaults)) { - return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(endpoints, variant)) - } - - defs := []endpoint{partitionDefaults, serviceDefaults} - - return e.resolve(service, p.ID, region, dnsSuffixTemplateKey, dnsSuffix, defs, opt) -} - -func getEC2MetadataEndpoint(partitionID, service string, mode EC2IMDSEndpointModeState) ResolvedEndpoint { - switch mode { - case EC2IMDSEndpointModeStateIPv6: - return ResolvedEndpoint{ - URL: ec2MetadataEndpointIPv6, - PartitionID: partitionID, - SigningRegion: "aws-global", - SigningName: service, - SigningNameDerived: true, - SigningMethod: "v4", - } - case EC2IMDSEndpointModeStateIPv4: - fallthrough - default: - return ResolvedEndpoint{ - URL: ec2MetadataEndpointIPv4, - PartitionID: partitionID, - SigningRegion: "aws-global", - SigningName: service, - SigningNameDerived: true, - SigningMethod: "v4", - } - } -} - -func isLegacyGlobalRegion(service string, region string, opt Options) (string, bool) { - if opt.getEndpointVariant(service) != 0 { - return "", false - } - - const ( - sts = "sts" - s3 = "s3" - awsGlobal = "aws-global" - ) - - switch { - case service == sts && opt.STSRegionalEndpoint == RegionalSTSEndpoint: - return region, false - case service == s3 && opt.S3UsEast1RegionalEndpoint == RegionalS3UsEast1Endpoint: - return region, false - default: - if _, ok := legacyGlobalRegions[service][region]; ok { - return awsGlobal, true - } - } - - return region, false -} - -func serviceList(ss services) []string { - list := make([]string, 0, len(ss)) - for k := range ss { - list = append(list, k) - } - return list -} -func endpointList(es serviceEndpoints, variant endpointVariant) []string { - list := make([]string, 0, len(es)) - for k := range es { - if k.Variant != variant { - continue - } - list = append(list, k.Region) - } - return list -} - -type regionRegex struct { - *regexp.Regexp -} - -func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) { - // Strip leading and trailing quotes - regex, err := strconv.Unquote(string(b)) - if err != nil { - return fmt.Errorf("unable to strip quotes from regex, %v", err) - } - - rr.Regexp, err = regexp.Compile(regex) - if err != nil { - return fmt.Errorf("unable to unmarshal region regex, %v", err) - } - return nil -} - -type regions map[string]region - -type region struct { - Description string `json:"description"` -} - -type services map[string]service - -type service struct { - PartitionEndpoint string `json:"partitionEndpoint"` - IsRegionalized boxedBool `json:"isRegionalized,omitempty"` - Defaults endpointDefaults `json:"defaults"` - Endpoints serviceEndpoints `json:"endpoints"` -} - -func (s *service) endpointForRegion(region string, endpoints serviceEndpoints, variant endpointVariant) (endpoint, bool) { - if e, ok := endpoints[endpointKey{Region: region, Variant: variant}]; ok { - return e, true - } - - if s.IsRegionalized == boxedFalse { - return endpoints[endpointKey{Region: s.PartitionEndpoint, Variant: variant}], region == s.PartitionEndpoint - } - - // Unable to find any matching endpoint, return - // blank that will be used for generic endpoint creation. - return endpoint{}, false -} - -type serviceEndpoints map[endpointKey]endpoint - -func (s *serviceEndpoints) UnmarshalJSON(data []byte) error { - if *s == nil { - *s = make(serviceEndpoints) - } - - var regionToEndpoint map[string]endpointWithVariants - - if err := json.Unmarshal(data, ®ionToEndpoint); err != nil { - return err - } - - for region, e := range regionToEndpoint { - (*s)[endpointKey{Region: region}] = e.endpoint - - e.Hostname = "" - e.DNSSuffix = "" - - for _, variant := range e.Variants { - endpointVariant, unknown := parseVariantTags(variant.Tags) - if unknown { - continue - } - - var ve endpoint - ve.mergeIn(e.endpoint) - ve.mergeIn(variant.endpoint) - - (*s)[endpointKey{Region: region, Variant: endpointVariant}] = ve - } - } - - return nil -} - -type endpoint struct { - Hostname string `json:"hostname"` - Protocols []string `json:"protocols"` - CredentialScope credentialScope `json:"credentialScope"` - - DNSSuffix string `json:"dnsSuffix"` - - // Signature Version not used - SignatureVersions []string `json:"signatureVersions"` - - // SSLCommonName not used. - SSLCommonName string `json:"sslCommonName"` - - Deprecated boxedBool `json:"deprecated"` -} - -// isZero returns whether the endpoint structure is an empty (zero) value. -func (e endpoint) isZero() bool { - switch { - case len(e.Hostname) != 0: - return false - case len(e.Protocols) != 0: - return false - case e.CredentialScope != (credentialScope{}): - return false - case len(e.SignatureVersions) != 0: - return false - case len(e.SSLCommonName) != 0: - return false - } - return true -} - -const ( - defaultProtocol = "https" - defaultSigner = "v4" -) - -var ( - protocolPriority = []string{"https", "http"} - signerPriority = []string{"v4", "v2"} -) - -func getByPriority(s []string, p []string, def string) string { - if len(s) == 0 { - return def - } - - for i := 0; i < len(p); i++ { - for j := 0; j < len(s); j++ { - if s[j] == p[i] { - return s[j] - } - } - } - - return s[0] -} - -func (e endpoint) resolve(service, partitionID, region, dnsSuffixTemplateVariable, dnsSuffix string, defs []endpoint, opts Options) (ResolvedEndpoint, error) { - var merged endpoint - for _, def := range defs { - merged.mergeIn(def) - } - merged.mergeIn(e) - e = merged - - signingRegion := e.CredentialScope.Region - if len(signingRegion) == 0 { - signingRegion = region - } - - signingName := e.CredentialScope.Service - var signingNameDerived bool - if len(signingName) == 0 { - signingName = service - signingNameDerived = true - } - - hostname := e.Hostname - - if !validateInputRegion(region) { - return ResolvedEndpoint{}, fmt.Errorf("invalid region identifier format provided") - } - - if len(merged.DNSSuffix) > 0 { - dnsSuffix = merged.DNSSuffix - } - - u := strings.Replace(hostname, "{service}", service, 1) - u = strings.Replace(u, "{region}", region, 1) - u = strings.Replace(u, dnsSuffixTemplateVariable, dnsSuffix, 1) - - scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) - u = fmt.Sprintf("%s://%s", scheme, u) - - if e.Deprecated == boxedTrue && opts.LogDeprecated && opts.Logger != nil { - opts.Logger.Log(fmt.Sprintf("endpoint identifier %q, url %q marked as deprecated", region, u)) - } - - return ResolvedEndpoint{ - URL: u, - PartitionID: partitionID, - SigningRegion: signingRegion, - SigningName: signingName, - SigningNameDerived: signingNameDerived, - SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), - }, nil -} - -func getEndpointScheme(protocols []string, disableSSL bool) string { - if disableSSL { - return "http" - } - - return getByPriority(protocols, protocolPriority, defaultProtocol) -} - -func (e *endpoint) mergeIn(other endpoint) { - if len(other.Hostname) > 0 { - e.Hostname = other.Hostname - } - if len(other.Protocols) > 0 { - e.Protocols = other.Protocols - } - if len(other.SignatureVersions) > 0 { - e.SignatureVersions = other.SignatureVersions - } - if len(other.CredentialScope.Region) > 0 { - e.CredentialScope.Region = other.CredentialScope.Region - } - if len(other.CredentialScope.Service) > 0 { - e.CredentialScope.Service = other.CredentialScope.Service - } - if len(other.SSLCommonName) > 0 { - e.SSLCommonName = other.SSLCommonName - } - if len(other.DNSSuffix) > 0 { - e.DNSSuffix = other.DNSSuffix - } - if other.Deprecated != boxedBoolUnset { - e.Deprecated = other.Deprecated - } -} - -type credentialScope struct { - Region string `json:"region"` - Service string `json:"service"` -} - -type boxedBool int - -func (b *boxedBool) UnmarshalJSON(buf []byte) error { - v, err := strconv.ParseBool(string(buf)) - if err != nil { - return err - } - - if v { - *b = boxedTrue - } else { - *b = boxedFalse - } - - return nil -} - -const ( - boxedBoolUnset boxedBool = iota - boxedFalse - boxedTrue -) - -func validateInputRegion(region string) bool { - return regionValidationRegex.MatchString(region) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go deleted file mode 100644 index 84922bca8..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go +++ /dev/null @@ -1,412 +0,0 @@ -//go:build codegen -// +build codegen - -package endpoints - -import ( - "fmt" - "io" - "reflect" - "strings" - "text/template" - "unicode" -) - -// A CodeGenOptions are the options for code generating the endpoints into -// Go code from the endpoints model definition. -type CodeGenOptions struct { - // Options for how the model will be decoded. - DecodeModelOptions DecodeModelOptions - - // Disables code generation of the service endpoint prefix IDs defined in - // the model. - DisableGenerateServiceIDs bool -} - -// Set combines all of the option functions together -func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) { - for _, fn := range optFns { - fn(d) - } -} - -// CodeGenModel given a endpoints model file will decode it and attempt to -// generate Go code from the model definition. Error will be returned if -// the code is unable to be generated, or decoded. -func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error { - var opts CodeGenOptions - opts.Set(optFns...) - - resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) { - *d = opts.DecodeModelOptions - }) - if err != nil { - return err - } - - v := struct { - Resolver - CodeGenOptions - }{ - Resolver: resolver, - CodeGenOptions: opts, - } - - tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl)) - if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil { - return fmt.Errorf("failed to execute template, %v", err) - } - - return nil -} - -func toSymbol(v string) string { - out := []rune{} - for _, c := range strings.Title(v) { - if !(unicode.IsNumber(c) || unicode.IsLetter(c)) { - continue - } - - out = append(out, c) - } - - return string(out) -} - -func quoteString(v string) string { - return fmt.Sprintf("%q", v) -} - -func regionConstName(p, r string) string { - return toSymbol(p) + toSymbol(r) -} - -func partitionGetter(id string) string { - return fmt.Sprintf("%sPartition", toSymbol(id)) -} - -func partitionVarName(id string) string { - return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id))) -} - -func listPartitionNames(ps partitions) string { - names := []string{} - switch len(ps) { - case 1: - return ps[0].Name - case 2: - return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name) - default: - for i, p := range ps { - if i == len(ps)-1 { - names = append(names, "and "+p.Name) - } else { - names = append(names, p.Name) - } - } - return strings.Join(names, ", ") - } -} - -func boxedBoolIfSet(msg string, v boxedBool) string { - switch v { - case boxedTrue: - return fmt.Sprintf(msg, "boxedTrue") - case boxedFalse: - return fmt.Sprintf(msg, "boxedFalse") - default: - return "" - } -} - -func stringIfSet(msg, v string) string { - if len(v) == 0 { - return "" - } - - return fmt.Sprintf(msg, v) -} - -func stringSliceIfSet(msg string, vs []string) string { - if len(vs) == 0 { - return "" - } - - names := []string{} - for _, v := range vs { - names = append(names, `"`+v+`"`) - } - - return fmt.Sprintf(msg, strings.Join(names, ",")) -} - -func endpointIsSet(v endpoint) bool { - return !reflect.DeepEqual(v, endpoint{}) -} - -func serviceSet(ps partitions) map[string]struct{} { - set := map[string]struct{}{} - for _, p := range ps { - for id := range p.Services { - set[id] = struct{}{} - } - } - - return set -} - -func endpointVariantSetter(variant endpointVariant) (string, error) { - if variant == 0 { - return "0", nil - } - - if variant > (fipsVariant | dualStackVariant) { - return "", fmt.Errorf("unknown endpoint variant") - } - - var symbols []string - if variant&fipsVariant != 0 { - symbols = append(symbols, "fipsVariant") - } - if variant&dualStackVariant != 0 { - symbols = append(symbols, "dualStackVariant") - } - v := strings.Join(symbols, "|") - - return v, nil -} - -func endpointKeySetter(e endpointKey) (string, error) { - var sb strings.Builder - sb.WriteString("endpointKey{\n") - sb.WriteString(fmt.Sprintf("Region: %q,\n", e.Region)) - if e.Variant != 0 { - variantSetter, err := endpointVariantSetter(e.Variant) - if err != nil { - return "", err - } - sb.WriteString(fmt.Sprintf("Variant: %s,\n", variantSetter)) - } - sb.WriteString("}") - return sb.String(), nil -} - -func defaultKeySetter(e defaultKey) (string, error) { - var sb strings.Builder - sb.WriteString("defaultKey{\n") - if e.Variant != 0 { - variantSetter, err := endpointVariantSetter(e.Variant) - if err != nil { - return "", err - } - sb.WriteString(fmt.Sprintf("Variant: %s,\n", variantSetter)) - } - sb.WriteString("}") - return sb.String(), nil -} - -var funcMap = template.FuncMap{ - "ToSymbol": toSymbol, - "QuoteString": quoteString, - "RegionConst": regionConstName, - "PartitionGetter": partitionGetter, - "PartitionVarName": partitionVarName, - "ListPartitionNames": listPartitionNames, - "BoxedBoolIfSet": boxedBoolIfSet, - "StringIfSet": stringIfSet, - "StringSliceIfSet": stringSliceIfSet, - "EndpointIsSet": endpointIsSet, - "ServicesSet": serviceSet, - "EndpointVariantSetter": endpointVariantSetter, - "EndpointKeySetter": endpointKeySetter, - "DefaultKeySetter": defaultKeySetter, -} - -const v3Tmpl = ` -{{ define "defaults" -}} -// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. - -package endpoints - -import ( - "regexp" -) - - {{ template "partition consts" $.Resolver }} - - {{ range $_, $partition := $.Resolver }} - {{ template "partition region consts" $partition }} - {{ end }} - - {{ if not $.DisableGenerateServiceIDs -}} - {{ template "service consts" $.Resolver }} - {{- end }} - - {{ template "endpoint resolvers" $.Resolver }} -{{- end }} - -{{ define "partition consts" }} - // Partition identifiers - const ( - {{ range $_, $p := . -}} - {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition. - {{ end -}} - ) -{{- end }} - -{{ define "partition region consts" }} - // {{ .Name }} partition's regions. - const ( - {{ range $id, $region := .Regions -}} - {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}. - {{ end -}} - ) -{{- end }} - -{{ define "service consts" }} - // Service identifiers - const ( - {{ $serviceSet := ServicesSet . -}} - {{ range $id, $_ := $serviceSet -}} - {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}. - {{ end -}} - ) -{{- end }} - -{{ define "endpoint resolvers" }} - // DefaultResolver returns an Endpoint resolver that will be able - // to resolve endpoints for: {{ ListPartitionNames . }}. - // - // Use DefaultPartitions() to get the list of the default partitions. - func DefaultResolver() Resolver { - return defaultPartitions - } - - // DefaultPartitions returns a list of the partitions the SDK is bundled - // with. The available partitions are: {{ ListPartitionNames . }}. - // - // partitions := endpoints.DefaultPartitions - // for _, p := range partitions { - // // ... inspect partitions - // } - func DefaultPartitions() []Partition { - return defaultPartitions.Partitions() - } - - var defaultPartitions = partitions{ - {{ range $_, $partition := . -}} - {{ PartitionVarName $partition.ID }}, - {{ end }} - } - - {{ range $_, $partition := . -}} - {{ $name := PartitionGetter $partition.ID -}} - // {{ $name }} returns the Resolver for {{ $partition.Name }}. - func {{ $name }}() Partition { - return {{ PartitionVarName $partition.ID }}.Partition() - } - var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }} - {{ end }} -{{ end }} - -{{ define "default partitions" }} - func DefaultPartitions() []Partition { - return []partition{ - {{ range $_, $partition := . -}} - // {{ ToSymbol $partition.ID}}Partition(), - {{ end }} - } - } -{{ end }} - -{{ define "gocode Partition" -}} -partition{ - {{ StringIfSet "ID: %q,\n" .ID -}} - {{ StringIfSet "Name: %q,\n" .Name -}} - {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} - RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }}, - {{ if (gt (len .Defaults) 0) -}} - Defaults: {{ template "gocode Defaults" .Defaults -}}, - {{ end -}} - Regions: {{ template "gocode Regions" .Regions }}, - Services: {{ template "gocode Services" .Services }}, -} -{{- end }} - -{{ define "gocode RegionRegex" -}} -regionRegex{ - Regexp: func() *regexp.Regexp{ - reg, _ := regexp.Compile({{ QuoteString .Regexp.String }}) - return reg - }(), -} -{{- end }} - -{{ define "gocode Regions" -}} -regions{ - {{ range $id, $region := . -}} - "{{ $id }}": {{ template "gocode Region" $region }}, - {{ end -}} -} -{{- end }} - -{{ define "gocode Region" -}} -region{ - {{ StringIfSet "Description: %q,\n" .Description -}} -} -{{- end }} - -{{ define "gocode Services" -}} -services{ - {{ range $id, $service := . -}} - "{{ $id }}": {{ template "gocode Service" $service }}, - {{ end }} -} -{{- end }} - -{{ define "gocode Service" -}} -service{ - {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}} - {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}} - {{ if (gt (len .Defaults) 0) -}} - Defaults: {{ template "gocode Defaults" .Defaults -}}, - {{ end -}} - {{ if .Endpoints -}} - Endpoints: {{ template "gocode Endpoints" .Endpoints }}, - {{- end }} -} -{{- end }} - -{{ define "gocode Defaults" -}} -endpointDefaults{ - {{ range $id, $endpoint := . -}} - {{ DefaultKeySetter $id }}: {{ template "gocode Endpoint" $endpoint }}, - {{ end }} -} -{{- end }} - -{{ define "gocode Endpoints" -}} -serviceEndpoints{ - {{ range $id, $endpoint := . -}} - {{ EndpointKeySetter $id }}: {{ template "gocode Endpoint" $endpoint }}, - {{ end }} -} -{{- end }} - -{{ define "gocode Endpoint" -}} -endpoint{ - {{ StringIfSet "Hostname: %q,\n" .Hostname -}} - {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} - {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}} - {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}} - {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}} - {{ if or .CredentialScope.Region .CredentialScope.Service -}} - CredentialScope: credentialScope{ - {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}} - {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}} - }, - {{- end }} - {{ BoxedBoolIfSet "Deprecated: %s,\n" .Deprecated -}} -} -{{- end }} -` diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go deleted file mode 100644 index fa06f7a8f..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/errors.go +++ /dev/null @@ -1,13 +0,0 @@ -package aws - -import "github.com/aws/aws-sdk-go/aws/awserr" - -var ( - // ErrMissingRegion is an error that is returned if region configuration is - // not found. - ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) - - // ErrMissingEndpoint is an error that is returned if an endpoint cannot be - // resolved for a service. - ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go deleted file mode 100644 index 91a6f277a..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go +++ /dev/null @@ -1,12 +0,0 @@ -package aws - -// JSONValue is a representation of a grab bag type that will be marshaled -// into a json string. This type can be used just like any other map. -// -// Example: -// -// values := aws.JSONValue{ -// "Foo": "Bar", -// } -// values["Baz"] = "Qux" -type JSONValue map[string]interface{} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go deleted file mode 100644 index 49674cc79..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/logger.go +++ /dev/null @@ -1,121 +0,0 @@ -package aws - -import ( - "log" - "os" -) - -// A LogLevelType defines the level logging should be performed at. Used to instruct -// the SDK which statements should be logged. -type LogLevelType uint - -// LogLevel returns the pointer to a LogLevel. Should be used to workaround -// not being able to take the address of a non-composite literal. -func LogLevel(l LogLevelType) *LogLevelType { - return &l -} - -// Value returns the LogLevel value or the default value LogOff if the LogLevel -// is nil. Safe to use on nil value LogLevelTypes. -func (l *LogLevelType) Value() LogLevelType { - if l != nil { - return *l - } - return LogOff -} - -// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be -// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If -// LogLevel is nil, will default to LogOff comparison. -func (l *LogLevelType) Matches(v LogLevelType) bool { - c := l.Value() - return c&v == v -} - -// AtLeast returns true if this LogLevel is at least high enough to satisfies v. -// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default -// to LogOff comparison. -func (l *LogLevelType) AtLeast(v LogLevelType) bool { - c := l.Value() - return c >= v -} - -const ( - // LogOff states that no logging should be performed by the SDK. This is the - // default state of the SDK, and should be use to disable all logging. - LogOff LogLevelType = iota * 0x1000 - - // LogDebug state that debug output should be logged by the SDK. This should - // be used to inspect request made and responses received. - LogDebug -) - -// Debug Logging Sub Levels -const ( - // LogDebugWithSigning states that the SDK should log request signing and - // presigning events. This should be used to log the signing details of - // requests for debugging. Will also enable LogDebug. - LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) - - // LogDebugWithHTTPBody states the SDK should log HTTP request and response - // HTTP bodys in addition to the headers and path. This should be used to - // see the body content of requests and responses made while using the SDK - // Will also enable LogDebug. - LogDebugWithHTTPBody - - // LogDebugWithRequestRetries states the SDK should log when service requests will - // be retried. This should be used to log when you want to log when service - // requests are being retried. Will also enable LogDebug. - LogDebugWithRequestRetries - - // LogDebugWithRequestErrors states the SDK should log when service requests fail - // to build, send, validate, or unmarshal. - LogDebugWithRequestErrors - - // LogDebugWithEventStreamBody states the SDK should log EventStream - // request and response bodys. This should be used to log the EventStream - // wire unmarshaled message content of requests and responses made while - // using the SDK Will also enable LogDebug. - LogDebugWithEventStreamBody - - // LogDebugWithDeprecated states the SDK should log details about deprecated functionality. - LogDebugWithDeprecated -) - -// A Logger is a minimalistic interface for the SDK to log messages to. Should -// be used to provide custom logging writers for the SDK to use. -type Logger interface { - Log(...interface{}) -} - -// A LoggerFunc is a convenience type to convert a function taking a variadic -// list of arguments and wrap it so the Logger interface can be used. -// -// Example: -// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { -// fmt.Fprintln(os.Stdout, args...) -// })}) -type LoggerFunc func(...interface{}) - -// Log calls the wrapped function with the arguments provided -func (f LoggerFunc) Log(args ...interface{}) { - f(args...) -} - -// NewDefaultLogger returns a Logger which will write log messages to stdout, and -// use same formatting runes as the stdlib log.Logger -func NewDefaultLogger() Logger { - return &defaultLogger{ - logger: log.New(os.Stdout, "", log.LstdFlags), - } -} - -// A defaultLogger provides a minimalistic logger satisfying the Logger interface. -type defaultLogger struct { - logger *log.Logger -} - -// Log logs the parameters to the stdlib logger. See log.Println. -func (l defaultLogger) Log(args ...interface{}) { - l.logger.Println(args...) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go deleted file mode 100644 index 2ba3c56c1..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go +++ /dev/null @@ -1,19 +0,0 @@ -package request - -import ( - "strings" -) - -func isErrConnectionReset(err error) bool { - if strings.Contains(err.Error(), "read: connection reset") { - return false - } - - if strings.Contains(err.Error(), "use of closed network connection") || - strings.Contains(err.Error(), "connection reset") || - strings.Contains(err.Error(), "broken pipe") { - return true - } - - return false -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go deleted file mode 100644 index e819ab6c0..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go +++ /dev/null @@ -1,343 +0,0 @@ -package request - -import ( - "fmt" - "strings" -) - -// A Handlers provides a collection of request handlers for various -// stages of handling requests. -type Handlers struct { - Validate HandlerList - Build HandlerList - BuildStream HandlerList - Sign HandlerList - Send HandlerList - ValidateResponse HandlerList - Unmarshal HandlerList - UnmarshalStream HandlerList - UnmarshalMeta HandlerList - UnmarshalError HandlerList - Retry HandlerList - AfterRetry HandlerList - CompleteAttempt HandlerList - Complete HandlerList -} - -// Copy returns a copy of this handler's lists. -func (h *Handlers) Copy() Handlers { - return Handlers{ - Validate: h.Validate.copy(), - Build: h.Build.copy(), - BuildStream: h.BuildStream.copy(), - Sign: h.Sign.copy(), - Send: h.Send.copy(), - ValidateResponse: h.ValidateResponse.copy(), - Unmarshal: h.Unmarshal.copy(), - UnmarshalStream: h.UnmarshalStream.copy(), - UnmarshalError: h.UnmarshalError.copy(), - UnmarshalMeta: h.UnmarshalMeta.copy(), - Retry: h.Retry.copy(), - AfterRetry: h.AfterRetry.copy(), - CompleteAttempt: h.CompleteAttempt.copy(), - Complete: h.Complete.copy(), - } -} - -// Clear removes callback functions for all handlers. -func (h *Handlers) Clear() { - h.Validate.Clear() - h.Build.Clear() - h.BuildStream.Clear() - h.Send.Clear() - h.Sign.Clear() - h.Unmarshal.Clear() - h.UnmarshalStream.Clear() - h.UnmarshalMeta.Clear() - h.UnmarshalError.Clear() - h.ValidateResponse.Clear() - h.Retry.Clear() - h.AfterRetry.Clear() - h.CompleteAttempt.Clear() - h.Complete.Clear() -} - -// IsEmpty returns if there are no handlers in any of the handlerlists. -func (h *Handlers) IsEmpty() bool { - if h.Validate.Len() != 0 { - return false - } - if h.Build.Len() != 0 { - return false - } - if h.BuildStream.Len() != 0 { - return false - } - if h.Send.Len() != 0 { - return false - } - if h.Sign.Len() != 0 { - return false - } - if h.Unmarshal.Len() != 0 { - return false - } - if h.UnmarshalStream.Len() != 0 { - return false - } - if h.UnmarshalMeta.Len() != 0 { - return false - } - if h.UnmarshalError.Len() != 0 { - return false - } - if h.ValidateResponse.Len() != 0 { - return false - } - if h.Retry.Len() != 0 { - return false - } - if h.AfterRetry.Len() != 0 { - return false - } - if h.CompleteAttempt.Len() != 0 { - return false - } - if h.Complete.Len() != 0 { - return false - } - - return true -} - -// A HandlerListRunItem represents an entry in the HandlerList which -// is being run. -type HandlerListRunItem struct { - Index int - Handler NamedHandler - Request *Request -} - -// A HandlerList manages zero or more handlers in a list. -type HandlerList struct { - list []NamedHandler - - // Called after each request handler in the list is called. If set - // and the func returns true the HandlerList will continue to iterate - // over the request handlers. If false is returned the HandlerList - // will stop iterating. - // - // Should be used if extra logic to be performed between each handler - // in the list. This can be used to terminate a list's iteration - // based on a condition such as error like, HandlerListStopOnError. - // Or for logging like HandlerListLogItem. - AfterEachFn func(item HandlerListRunItem) bool -} - -// A NamedHandler is a struct that contains a name and function callback. -type NamedHandler struct { - Name string - Fn func(*Request) -} - -// copy creates a copy of the handler list. -func (l *HandlerList) copy() HandlerList { - n := HandlerList{ - AfterEachFn: l.AfterEachFn, - } - if len(l.list) == 0 { - return n - } - - n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...) - return n -} - -// Clear clears the handler list. -func (l *HandlerList) Clear() { - l.list = l.list[0:0] -} - -// Len returns the number of handlers in the list. -func (l *HandlerList) Len() int { - return len(l.list) -} - -// PushBack pushes handler f to the back of the handler list. -func (l *HandlerList) PushBack(f func(*Request)) { - l.PushBackNamed(NamedHandler{"__anonymous", f}) -} - -// PushBackNamed pushes named handler f to the back of the handler list. -func (l *HandlerList) PushBackNamed(n NamedHandler) { - if cap(l.list) == 0 { - l.list = make([]NamedHandler, 0, 5) - } - l.list = append(l.list, n) -} - -// PushFront pushes handler f to the front of the handler list. -func (l *HandlerList) PushFront(f func(*Request)) { - l.PushFrontNamed(NamedHandler{"__anonymous", f}) -} - -// PushFrontNamed pushes named handler f to the front of the handler list. -func (l *HandlerList) PushFrontNamed(n NamedHandler) { - if cap(l.list) == len(l.list) { - // Allocating new list required - l.list = append([]NamedHandler{n}, l.list...) - } else { - // Enough room to prepend into list. - l.list = append(l.list, NamedHandler{}) - copy(l.list[1:], l.list) - l.list[0] = n - } -} - -// Remove removes a NamedHandler n -func (l *HandlerList) Remove(n NamedHandler) { - l.RemoveByName(n.Name) -} - -// RemoveByName removes a NamedHandler by name. -func (l *HandlerList) RemoveByName(name string) { - for i := 0; i < len(l.list); i++ { - m := l.list[i] - if m.Name == name { - // Shift array preventing creating new arrays - copy(l.list[i:], l.list[i+1:]) - l.list[len(l.list)-1] = NamedHandler{} - l.list = l.list[:len(l.list)-1] - - // decrement list so next check to length is correct - i-- - } - } -} - -// SwapNamed will swap out any existing handlers with the same name as the -// passed in NamedHandler returning true if handlers were swapped. False is -// returned otherwise. -func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) { - for i := 0; i < len(l.list); i++ { - if l.list[i].Name == n.Name { - l.list[i].Fn = n.Fn - swapped = true - } - } - - return swapped -} - -// Swap will swap out all handlers matching the name passed in. The matched -// handlers will be swapped in. True is returned if the handlers were swapped. -func (l *HandlerList) Swap(name string, replace NamedHandler) bool { - var swapped bool - - for i := 0; i < len(l.list); i++ { - if l.list[i].Name == name { - l.list[i] = replace - swapped = true - } - } - - return swapped -} - -// SetBackNamed will replace the named handler if it exists in the handler list. -// If the handler does not exist the handler will be added to the end of the list. -func (l *HandlerList) SetBackNamed(n NamedHandler) { - if !l.SwapNamed(n) { - l.PushBackNamed(n) - } -} - -// SetFrontNamed will replace the named handler if it exists in the handler list. -// If the handler does not exist the handler will be added to the beginning of -// the list. -func (l *HandlerList) SetFrontNamed(n NamedHandler) { - if !l.SwapNamed(n) { - l.PushFrontNamed(n) - } -} - -// Run executes all handlers in the list with a given request object. -func (l *HandlerList) Run(r *Request) { - for i, h := range l.list { - h.Fn(r) - item := HandlerListRunItem{ - Index: i, Handler: h, Request: r, - } - if l.AfterEachFn != nil && !l.AfterEachFn(item) { - return - } - } -} - -// HandlerListLogItem logs the request handler and the state of the -// request's Error value. Always returns true to continue iterating -// request handlers in a HandlerList. -func HandlerListLogItem(item HandlerListRunItem) bool { - if item.Request.Config.Logger == nil { - return true - } - item.Request.Config.Logger.Log("DEBUG: RequestHandler", - item.Index, item.Handler.Name, item.Request.Error) - - return true -} - -// HandlerListStopOnError returns false to stop the HandlerList iterating -// over request handlers if Request.Error is not nil. True otherwise -// to continue iterating. -func HandlerListStopOnError(item HandlerListRunItem) bool { - return item.Request.Error == nil -} - -// WithAppendUserAgent will add a string to the user agent prefixed with a -// single white space. -func WithAppendUserAgent(s string) Option { - return func(r *Request) { - r.Handlers.Build.PushBack(func(r2 *Request) { - AddToUserAgent(r, s) - }) - } -} - -// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request -// header. If the extra parameters are provided they will be added as metadata to the -// name/version pair resulting in the following format. -// "name/version (extra0; extra1; ...)" -// The user agent part will be concatenated with this current request's user agent string. -func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { - ua := fmt.Sprintf("%s/%s", name, version) - if len(extra) > 0 { - ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) - } - return func(r *Request) { - AddToUserAgent(r, ua) - } -} - -// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. -// The input string will be concatenated with the current request's user agent string. -func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { - return func(r *Request) { - AddToUserAgent(r, s) - } -} - -// WithSetRequestHeaders updates the operation request's HTTP header to contain -// the header key value pairs provided. If the header key already exists in the -// request's HTTP header set, the existing value(s) will be replaced. -func WithSetRequestHeaders(h map[string]string) Option { - return withRequestHeader(h).SetRequestHeaders -} - -type withRequestHeader map[string]string - -func (h withRequestHeader) SetRequestHeaders(r *Request) { - for k, v := range h { - r.HTTPRequest.Header[k] = []string{v} - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go deleted file mode 100644 index 79f79602b..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go +++ /dev/null @@ -1,24 +0,0 @@ -package request - -import ( - "io" - "net/http" - "net/url" -) - -func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { - req := new(http.Request) - *req = *r - req.URL = &url.URL{} - *req.URL = *r.URL - req.Body = body - - req.Header = http.Header{} - for k, v := range r.Header { - for _, vv := range v { - req.Header.Add(k, vv) - } - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go deleted file mode 100644 index 9370fa50c..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go +++ /dev/null @@ -1,65 +0,0 @@ -package request - -import ( - "io" - "sync" - - "github.com/aws/aws-sdk-go/internal/sdkio" -) - -// offsetReader is a thread-safe io.ReadCloser to prevent racing -// with retrying requests -type offsetReader struct { - buf io.ReadSeeker - lock sync.Mutex - closed bool -} - -func newOffsetReader(buf io.ReadSeeker, offset int64) (*offsetReader, error) { - reader := &offsetReader{} - _, err := buf.Seek(offset, sdkio.SeekStart) - if err != nil { - return nil, err - } - - reader.buf = buf - return reader, nil -} - -// Close will close the instance of the offset reader's access to -// the underlying io.ReadSeeker. -func (o *offsetReader) Close() error { - o.lock.Lock() - defer o.lock.Unlock() - o.closed = true - return nil -} - -// Read is a thread-safe read of the underlying io.ReadSeeker -func (o *offsetReader) Read(p []byte) (int, error) { - o.lock.Lock() - defer o.lock.Unlock() - - if o.closed { - return 0, io.EOF - } - - return o.buf.Read(p) -} - -// Seek is a thread-safe seeking operation. -func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { - o.lock.Lock() - defer o.lock.Unlock() - - return o.buf.Seek(offset, whence) -} - -// CloseAndCopy will return a new offsetReader with a copy of the old buffer -// and close the old buffer. -func (o *offsetReader) CloseAndCopy(offset int64) (*offsetReader, error) { - if err := o.Close(); err != nil { - return nil, err - } - return newOffsetReader(o.buf, offset) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go deleted file mode 100644 index fb0a68fce..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ /dev/null @@ -1,713 +0,0 @@ -package request - -import ( - "bytes" - "fmt" - "io" - "net/http" - "net/url" - "reflect" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/internal/sdkio" -) - -const ( - // ErrCodeSerialization is the serialization error code that is received - // during protocol unmarshaling. - ErrCodeSerialization = "SerializationError" - - // ErrCodeRead is an error that is returned during HTTP reads. - ErrCodeRead = "ReadError" - - // ErrCodeResponseTimeout is the connection timeout error that is received - // during body reads. - ErrCodeResponseTimeout = "ResponseTimeout" - - // ErrCodeInvalidPresignExpire is returned when the expire time provided to - // presign is invalid - ErrCodeInvalidPresignExpire = "InvalidPresignExpireError" - - // CanceledErrorCode is the error code that will be returned by an - // API request that was canceled. Requests given a aws.Context may - // return this error when canceled. - CanceledErrorCode = "RequestCanceled" - - // ErrCodeRequestError is an error preventing the SDK from continuing to - // process the request. - ErrCodeRequestError = "RequestError" -) - -// A Request is the service request to be made. -type Request struct { - Config aws.Config - ClientInfo metadata.ClientInfo - Handlers Handlers - - Retryer - AttemptTime time.Time - Time time.Time - Operation *Operation - HTTPRequest *http.Request - HTTPResponse *http.Response - Body io.ReadSeeker - streamingBody io.ReadCloser - BodyStart int64 // offset from beginning of Body that the request body starts - Params interface{} - Error error - Data interface{} - RequestID string - RetryCount int - Retryable *bool - RetryDelay time.Duration - NotHoist bool - SignedHeaderVals http.Header - LastSignedAt time.Time - DisableFollowRedirects bool - - // Additional API error codes that should be retried. IsErrorRetryable - // will consider these codes in addition to its built in cases. - RetryErrorCodes []string - - // Additional API error codes that should be retried with throttle backoff - // delay. IsErrorThrottle will consider these codes in addition to its - // built in cases. - ThrottleErrorCodes []string - - // A value greater than 0 instructs the request to be signed as Presigned URL - // You should not set this field directly. Instead use Request's - // Presign or PresignRequest methods. - ExpireTime time.Duration - - context aws.Context - - built bool - - // Need to persist an intermediate body between the input Body and HTTP - // request body because the HTTP Client's transport can maintain a reference - // to the HTTP request's body after the client has returned. This value is - // safe to use concurrently and wrap the input Body for each HTTP request. - safeBody *offsetReader -} - -// An Operation is the service API operation to be made. -type Operation struct { - Name string - HTTPMethod string - HTTPPath string - *Paginator - - BeforePresignFn func(r *Request) error -} - -// New returns a new Request pointer for the service API operation and -// parameters. -// -// A Retryer should be provided to direct how the request is retried. If -// Retryer is nil, a default no retry value will be used. You can use -// NoOpRetryer in the Client package to disable retry behavior directly. -// -// Params is any value of input parameters to be the request payload. -// Data is pointer value to an object which the request's response -// payload will be deserialized to. -func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, - retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { - - if retryer == nil { - retryer = noOpRetryer{} - } - - method := operation.HTTPMethod - if method == "" { - method = "POST" - } - - httpReq, _ := http.NewRequest(method, "", nil) - - var err error - httpReq.URL, err = url.Parse(clientInfo.Endpoint) - if err != nil { - httpReq.URL = &url.URL{} - err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) - } - - if len(operation.HTTPPath) != 0 { - opHTTPPath := operation.HTTPPath - var opQueryString string - if idx := strings.Index(opHTTPPath, "?"); idx >= 0 { - opQueryString = opHTTPPath[idx+1:] - opHTTPPath = opHTTPPath[:idx] - } - - if strings.HasSuffix(httpReq.URL.Path, "/") && strings.HasPrefix(opHTTPPath, "/") { - opHTTPPath = opHTTPPath[1:] - } - httpReq.URL.Path += opHTTPPath - httpReq.URL.RawQuery = opQueryString - } - - r := &Request{ - Config: cfg, - ClientInfo: clientInfo, - Handlers: handlers.Copy(), - - Retryer: retryer, - Time: time.Now(), - ExpireTime: 0, - Operation: operation, - HTTPRequest: httpReq, - Body: nil, - Params: params, - Error: err, - Data: data, - } - r.SetBufferBody([]byte{}) - - return r -} - -// A Option is a functional option that can augment or modify a request when -// using a WithContext API operation method. -type Option func(*Request) - -// WithGetResponseHeader builds a request Option which will retrieve a single -// header value from the HTTP Response. If there are multiple values for the -// header key use WithGetResponseHeaders instead to access the http.Header -// map directly. The passed in val pointer must be non-nil. -// -// This Option can be used multiple times with a single API operation. -// -// var id2, versionID string -// svc.PutObjectWithContext(ctx, params, -// request.WithGetResponseHeader("x-amz-id-2", &id2), -// request.WithGetResponseHeader("x-amz-version-id", &versionID), -// ) -func WithGetResponseHeader(key string, val *string) Option { - return func(r *Request) { - r.Handlers.Complete.PushBack(func(req *Request) { - *val = req.HTTPResponse.Header.Get(key) - }) - } -} - -// WithGetResponseHeaders builds a request Option which will retrieve the -// headers from the HTTP response and assign them to the passed in headers -// variable. The passed in headers pointer must be non-nil. -// -// var headers http.Header -// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers)) -func WithGetResponseHeaders(headers *http.Header) Option { - return func(r *Request) { - r.Handlers.Complete.PushBack(func(req *Request) { - *headers = req.HTTPResponse.Header - }) - } -} - -// WithLogLevel is a request option that will set the request to use a specific -// log level when the request is made. -// -// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody) -func WithLogLevel(l aws.LogLevelType) Option { - return func(r *Request) { - r.Config.LogLevel = aws.LogLevel(l) - } -} - -// ApplyOptions will apply each option to the request calling them in the order -// the were provided. -func (r *Request) ApplyOptions(opts ...Option) { - for _, opt := range opts { - opt(r) - } -} - -// Context will always returns a non-nil context. If Request does not have a -// context aws.BackgroundContext will be returned. -func (r *Request) Context() aws.Context { - if r.context != nil { - return r.context - } - return aws.BackgroundContext() -} - -// SetContext adds a Context to the current request that can be used to cancel -// a in-flight request. The Context value must not be nil, or this method will -// panic. -// -// Unlike http.Request.WithContext, SetContext does not return a copy of the -// Request. It is not safe to use use a single Request value for multiple -// requests. A new Request should be created for each API operation request. -// -// Go 1.6 and below: -// The http.Request's Cancel field will be set to the Done() value of -// the context. This will overwrite the Cancel field's value. -// -// Go 1.7 and above: -// The http.Request.WithContext will be used to set the context on the underlying -// http.Request. This will create a shallow copy of the http.Request. The SDK -// may create sub contexts in the future for nested requests such as retries. -func (r *Request) SetContext(ctx aws.Context) { - if ctx == nil { - panic("context cannot be nil") - } - setRequestContext(r, ctx) -} - -// WillRetry returns if the request's can be retried. -func (r *Request) WillRetry() bool { - if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody { - return false - } - return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() -} - -func fmtAttemptCount(retryCount, maxRetries int) string { - return fmt.Sprintf("attempt %v/%v", retryCount, maxRetries) -} - -// ParamsFilled returns if the request's parameters have been populated -// and the parameters are valid. False is returned if no parameters are -// provided or invalid. -func (r *Request) ParamsFilled() bool { - return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() -} - -// DataFilled returns true if the request's data for response deserialization -// target has been set and is a valid. False is returned if data is not -// set, or is invalid. -func (r *Request) DataFilled() bool { - return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() -} - -// SetBufferBody will set the request's body bytes that will be sent to -// the service API. -func (r *Request) SetBufferBody(buf []byte) { - r.SetReaderBody(bytes.NewReader(buf)) -} - -// SetStringBody sets the body of the request to be backed by a string. -func (r *Request) SetStringBody(s string) { - r.SetReaderBody(strings.NewReader(s)) -} - -// SetReaderBody will set the request's body reader. -func (r *Request) SetReaderBody(reader io.ReadSeeker) { - r.Body = reader - - if aws.IsReaderSeekable(reader) { - var err error - // Get the Bodies current offset so retries will start from the same - // initial position. - r.BodyStart, err = reader.Seek(0, sdkio.SeekCurrent) - if err != nil { - r.Error = awserr.New(ErrCodeSerialization, - "failed to determine start of request body", err) - return - } - } - r.ResetBody() -} - -// SetStreamingBody set the reader to be used for the request that will stream -// bytes to the server. Request's Body must not be set to any reader. -func (r *Request) SetStreamingBody(reader io.ReadCloser) { - r.streamingBody = reader - r.SetReaderBody(aws.ReadSeekCloser(reader)) -} - -// Presign returns the request's signed URL. Error will be returned -// if the signing fails. The expire parameter is only used for presigned Amazon -// S3 API requests. All other AWS services will use a fixed expiration -// time of 15 minutes. -// -// It is invalid to create a presigned URL with a expire duration 0 or less. An -// error is returned if expire duration is 0 or less. -func (r *Request) Presign(expire time.Duration) (string, error) { - r = r.copy() - - // Presign requires all headers be hoisted. There is no way to retrieve - // the signed headers not hoisted without this. Making the presigned URL - // useless. - r.NotHoist = false - - u, _, err := getPresignedURL(r, expire) - return u, err -} - -// PresignRequest behaves just like presign, with the addition of returning a -// set of headers that were signed. The expire parameter is only used for -// presigned Amazon S3 API requests. All other AWS services will use a fixed -// expiration time of 15 minutes. -// -// It is invalid to create a presigned URL with a expire duration 0 or less. An -// error is returned if expire duration is 0 or less. -// -// Returns the URL string for the API operation with signature in the query string, -// and the HTTP headers that were included in the signature. These headers must -// be included in any HTTP request made with the presigned URL. -// -// To prevent hoisting any headers to the query string set NotHoist to true on -// this Request value prior to calling PresignRequest. -func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) { - r = r.copy() - return getPresignedURL(r, expire) -} - -// IsPresigned returns true if the request represents a presigned API url. -func (r *Request) IsPresigned() bool { - return r.ExpireTime != 0 -} - -func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) { - if expire <= 0 { - return "", nil, awserr.New( - ErrCodeInvalidPresignExpire, - "presigned URL requires an expire duration greater than 0", - nil, - ) - } - - r.ExpireTime = expire - - if r.Operation.BeforePresignFn != nil { - if err := r.Operation.BeforePresignFn(r); err != nil { - return "", nil, err - } - } - - if err := r.Sign(); err != nil { - return "", nil, err - } - - return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil -} - -const ( - notRetrying = "not retrying" -) - -func debugLogReqError(r *Request, stage, retryStr string, err error) { - if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { - return - } - - r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", - stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) -} - -// Build will build the request's object so it can be signed and sent -// to the service. Build will also validate all the request's parameters. -// Any additional build Handlers set on this request will be run -// in the order they were set. -// -// The request will only be built once. Multiple calls to build will have -// no effect. -// -// If any Validate or Build errors occur the build will stop and the error -// which occurred will be returned. -func (r *Request) Build() error { - if !r.built { - r.Handlers.Validate.Run(r) - if r.Error != nil { - debugLogReqError(r, "Validate Request", notRetrying, r.Error) - return r.Error - } - r.Handlers.Build.Run(r) - if r.Error != nil { - debugLogReqError(r, "Build Request", notRetrying, r.Error) - return r.Error - } - r.built = true - } - - return r.Error -} - -// Sign will sign the request, returning error if errors are encountered. -// -// Sign will build the request prior to signing. All Sign Handlers will -// be executed in the order they were set. -func (r *Request) Sign() error { - r.Build() - if r.Error != nil { - debugLogReqError(r, "Build Request", notRetrying, r.Error) - return r.Error - } - - SanitizeHostForHeader(r.HTTPRequest) - - r.Handlers.Sign.Run(r) - return r.Error -} - -func (r *Request) getNextRequestBody() (body io.ReadCloser, err error) { - if r.streamingBody != nil { - return r.streamingBody, nil - } - - if r.safeBody != nil { - r.safeBody.Close() - } - - r.safeBody, err = newOffsetReader(r.Body, r.BodyStart) - if err != nil { - return nil, awserr.New(ErrCodeSerialization, - "failed to get next request body reader", err) - } - - // Go 1.8 tightened and clarified the rules code needs to use when building - // requests with the http package. Go 1.8 removed the automatic detection - // of if the Request.Body was empty, or actually had bytes in it. The SDK - // always sets the Request.Body even if it is empty and should not actually - // be sent. This is incorrect. - // - // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http - // client that the request really should be sent without a body. The - // Request.Body cannot be set to nil, which is preferable, because the - // field is exported and could introduce nil pointer dereferences for users - // of the SDK if they used that field. - // - // Related golang/go#18257 - l, err := aws.SeekerLen(r.Body) - if err != nil { - return nil, awserr.New(ErrCodeSerialization, - "failed to compute request body size", err) - } - - if l == 0 { - body = NoBody - } else if l > 0 { - body = r.safeBody - } else { - // Hack to prevent sending bodies for methods where the body - // should be ignored by the server. Sending bodies on these - // methods without an associated ContentLength will cause the - // request to socket timeout because the server does not handle - // Transfer-Encoding: chunked bodies for these methods. - // - // This would only happen if a aws.ReaderSeekerCloser was used with - // a io.Reader that was not also an io.Seeker, or did not implement - // Len() method. - switch r.Operation.HTTPMethod { - case "GET", "HEAD", "DELETE": - body = NoBody - default: - body = r.safeBody - } - } - - return body, nil -} - -// GetBody will return an io.ReadSeeker of the Request's underlying -// input body with a concurrency safe wrapper. -func (r *Request) GetBody() io.ReadSeeker { - return r.safeBody -} - -// Send will send the request, returning error if errors are encountered. -// -// Send will sign the request prior to sending. All Send Handlers will -// be executed in the order they were set. -// -// Canceling a request is non-deterministic. If a request has been canceled, -// then the transport will choose, randomly, one of the state channels during -// reads or getting the connection. -// -// readLoop() and getConn(req *Request, cm connectMethod) -// https://github.com/golang/go/blob/master/src/net/http/transport.go -// -// Send will not close the request.Request's body. -func (r *Request) Send() error { - defer func() { - // Regardless of success or failure of the request trigger the Complete - // request handlers. - r.Handlers.Complete.Run(r) - }() - - if err := r.Error; err != nil { - return err - } - - for { - r.Error = nil - r.AttemptTime = time.Now() - - if err := r.Sign(); err != nil { - debugLogReqError(r, "Sign Request", notRetrying, err) - return err - } - - if err := r.sendRequest(); err == nil { - return nil - } - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - - if r.Error != nil || !aws.BoolValue(r.Retryable) { - return r.Error - } - - if err := r.prepareRetry(); err != nil { - r.Error = err - return err - } - } -} - -func (r *Request) prepareRetry() error { - if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { - r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", - r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) - } - - // The previous http.Request will have a reference to the r.Body - // and the HTTP Client's Transport may still be reading from - // the request's body even though the Client's Do returned. - r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) - r.ResetBody() - if err := r.Error; err != nil { - return awserr.New(ErrCodeSerialization, - "failed to prepare body for retry", err) - - } - - // Closing response body to ensure that no response body is leaked - // between retry attempts. - if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { - r.HTTPResponse.Body.Close() - } - - return nil -} - -func (r *Request) sendRequest() (sendErr error) { - defer r.Handlers.CompleteAttempt.Run(r) - - r.Retryable = nil - r.Handlers.Send.Run(r) - if r.Error != nil { - debugLogReqError(r, "Send Request", - fmtAttemptCount(r.RetryCount, r.MaxRetries()), - r.Error) - return r.Error - } - - r.Handlers.UnmarshalMeta.Run(r) - r.Handlers.ValidateResponse.Run(r) - if r.Error != nil { - r.Handlers.UnmarshalError.Run(r) - debugLogReqError(r, "Validate Response", - fmtAttemptCount(r.RetryCount, r.MaxRetries()), - r.Error) - return r.Error - } - - r.Handlers.Unmarshal.Run(r) - if r.Error != nil { - debugLogReqError(r, "Unmarshal Response", - fmtAttemptCount(r.RetryCount, r.MaxRetries()), - r.Error) - return r.Error - } - - return nil -} - -// copy will copy a request which will allow for local manipulation of the -// request. -func (r *Request) copy() *Request { - req := &Request{} - *req = *r - req.Handlers = r.Handlers.Copy() - op := *r.Operation - req.Operation = &op - return req -} - -// AddToUserAgent adds the string to the end of the request's current user agent. -func AddToUserAgent(r *Request, s string) { - curUA := r.HTTPRequest.Header.Get("User-Agent") - if len(curUA) > 0 { - s = curUA + " " + s - } - r.HTTPRequest.Header.Set("User-Agent", s) -} - -// SanitizeHostForHeader removes default port from host and updates request.Host -func SanitizeHostForHeader(r *http.Request) { - host := getHost(r) - port := portOnly(host) - if port != "" && isDefaultPort(r.URL.Scheme, port) { - r.Host = stripPort(host) - } -} - -// Returns host from request -func getHost(r *http.Request) string { - if r.Host != "" { - return r.Host - } - - if r.URL == nil { - return "" - } - - return r.URL.Host -} - -// Hostname returns u.Host, without any port number. -// -// If Host is an IPv6 literal with a port number, Hostname returns the -// IPv6 literal without the square brackets. IPv6 literals may include -// a zone identifier. -// -// Copied from the Go 1.8 standard library (net/url) -func stripPort(hostport string) string { - colon := strings.IndexByte(hostport, ':') - if colon == -1 { - return hostport - } - if i := strings.IndexByte(hostport, ']'); i != -1 { - return strings.TrimPrefix(hostport[:i], "[") - } - return hostport[:colon] -} - -// Port returns the port part of u.Host, without the leading colon. -// If u.Host doesn't contain a port, Port returns an empty string. -// -// Copied from the Go 1.8 standard library (net/url) -func portOnly(hostport string) string { - colon := strings.IndexByte(hostport, ':') - if colon == -1 { - return "" - } - if i := strings.Index(hostport, "]:"); i != -1 { - return hostport[i+len("]:"):] - } - if strings.Contains(hostport, "]") { - return "" - } - return hostport[colon+len(":"):] -} - -// Returns true if the specified URI is using the standard port -// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) -func isDefaultPort(scheme, port string) bool { - if port == "" { - return true - } - - lowerCaseScheme := strings.ToLower(scheme) - if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { - return true - } - - return false -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go deleted file mode 100644 index 5921b8ff2..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go +++ /dev/null @@ -1,40 +0,0 @@ -//go:build !go1.8 -// +build !go1.8 - -package request - -import "io" - -// NoBody is an io.ReadCloser with no bytes. Read always returns EOF -// and Close always returns nil. It can be used in an outgoing client -// request to explicitly signal that a request has zero bytes. -// An alternative, however, is to simply set Request.Body to nil. -// -// Copy of Go 1.8 NoBody type from net/http/http.go -type noBody struct{} - -func (noBody) Read([]byte) (int, error) { return 0, io.EOF } -func (noBody) Close() error { return nil } -func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } - -// NoBody is an empty reader that will trigger the Go HTTP client to not include -// and body in the HTTP request. -var NoBody = noBody{} - -// ResetBody rewinds the request body back to its starting position, and -// sets the HTTP Request body reference. When the body is read prior -// to being sent in the HTTP request it will need to be rewound. -// -// ResetBody will automatically be called by the SDK's build handler, but if -// the request is being used directly ResetBody must be called before the request -// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically -// call ResetBody. -func (r *Request) ResetBody() { - body, err := r.getNextRequestBody() - if err != nil { - r.Error = err - return - } - - r.HTTPRequest.Body = body -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go deleted file mode 100644 index ea643c9c4..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go +++ /dev/null @@ -1,37 +0,0 @@ -//go:build go1.8 -// +build go1.8 - -package request - -import ( - "net/http" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// NoBody is a http.NoBody reader instructing Go HTTP client to not include -// and body in the HTTP request. -var NoBody = http.NoBody - -// ResetBody rewinds the request body back to its starting position, and -// sets the HTTP Request body reference. When the body is read prior -// to being sent in the HTTP request it will need to be rewound. -// -// ResetBody will automatically be called by the SDK's build handler, but if -// the request is being used directly ResetBody must be called before the request -// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically -// call ResetBody. -// -// Will also set the Go 1.8's http.Request.GetBody member to allow retrying -// PUT/POST redirects. -func (r *Request) ResetBody() { - body, err := r.getNextRequestBody() - if err != nil { - r.Error = awserr.New(ErrCodeSerialization, - "failed to reset request body", err) - return - } - - r.HTTPRequest.Body = body - r.HTTPRequest.GetBody = r.getNextRequestBody -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go deleted file mode 100644 index d8c505302..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go +++ /dev/null @@ -1,15 +0,0 @@ -//go:build go1.7 -// +build go1.7 - -package request - -import "github.com/aws/aws-sdk-go/aws" - -// setContext updates the Request to use the passed in context for cancellation. -// Context will also be used for request retry delay. -// -// Creates shallow copy of the http.Request with the WithContext method. -func setRequestContext(r *Request, ctx aws.Context) { - r.context = ctx - r.HTTPRequest = r.HTTPRequest.WithContext(ctx) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go deleted file mode 100644 index 49a243ef2..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go +++ /dev/null @@ -1,15 +0,0 @@ -//go:build !go1.7 -// +build !go1.7 - -package request - -import "github.com/aws/aws-sdk-go/aws" - -// setContext updates the Request to use the passed in context for cancellation. -// Context will also be used for request retry delay. -// -// Creates shallow copy of the http.Request with the WithContext method. -func setRequestContext(r *Request, ctx aws.Context) { - r.context = ctx - r.HTTPRequest.Cancel = ctx.Done() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go deleted file mode 100644 index 64784e16f..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go +++ /dev/null @@ -1,266 +0,0 @@ -package request - -import ( - "reflect" - "sync/atomic" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" -) - -// A Pagination provides paginating of SDK API operations which are paginatable. -// Generally you should not use this type directly, but use the "Pages" API -// operations method to automatically perform pagination for you. Such as, -// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods. -// -// Pagination differs from a Paginator type in that pagination is the type that -// does the pagination between API operations, and Paginator defines the -// configuration that will be used per page request. -// -// for p.Next() { -// data := p.Page().(*s3.ListObjectsOutput) -// // process the page's data -// // ... -// // break out of loop to stop fetching additional pages -// } -// -// return p.Err() -// -// See service client API operation Pages methods for examples how the SDK will -// use the Pagination type. -type Pagination struct { - // Function to return a Request value for each pagination request. - // Any configuration or handlers that need to be applied to the request - // prior to getting the next page should be done here before the request - // returned. - // - // NewRequest should always be built from the same API operations. It is - // undefined if different API operations are returned on subsequent calls. - NewRequest func() (*Request, error) - // EndPageOnSameToken, when enabled, will allow the paginator to stop on - // token that are the same as its previous tokens. - EndPageOnSameToken bool - - started bool - prevTokens []interface{} - nextTokens []interface{} - - err error - curPage interface{} -} - -// HasNextPage will return true if Pagination is able to determine that the API -// operation has additional pages. False will be returned if there are no more -// pages remaining. -// -// Will always return true if Next has not been called yet. -func (p *Pagination) HasNextPage() bool { - if !p.started { - return true - } - - hasNextPage := len(p.nextTokens) != 0 - if p.EndPageOnSameToken { - return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens) - } - return hasNextPage -} - -// Err returns the error Pagination encountered when retrieving the next page. -func (p *Pagination) Err() error { - return p.err -} - -// Page returns the current page. Page should only be called after a successful -// call to Next. It is undefined what Page will return if Page is called after -// Next returns false. -func (p *Pagination) Page() interface{} { - return p.curPage -} - -// Next will attempt to retrieve the next page for the API operation. When a page -// is retrieved true will be returned. If the page cannot be retrieved, or there -// are no more pages false will be returned. -// -// Use the Page method to retrieve the current page data. The data will need -// to be cast to the API operation's output type. -// -// Use the Err method to determine if an error occurred if Page returns false. -func (p *Pagination) Next() bool { - if !p.HasNextPage() { - return false - } - - req, err := p.NewRequest() - if err != nil { - p.err = err - return false - } - - if p.started { - for i, intok := range req.Operation.InputTokens { - awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i]) - } - } - p.started = true - - err = req.Send() - if err != nil { - p.err = err - return false - } - - p.prevTokens = p.nextTokens - p.nextTokens = req.nextPageTokens() - p.curPage = req.Data - - return true -} - -// A Paginator is the configuration data that defines how an API operation -// should be paginated. This type is used by the API service models to define -// the generated pagination config for service APIs. -// -// The Pagination type is what provides iterating between pages of an API. It -// is only used to store the token metadata the SDK should use for performing -// pagination. -type Paginator struct { - InputTokens []string - OutputTokens []string - LimitToken string - TruncationToken string -} - -// nextPageTokens returns the tokens to use when asking for the next page of data. -func (r *Request) nextPageTokens() []interface{} { - if r.Operation.Paginator == nil { - return nil - } - if r.Operation.TruncationToken != "" { - tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) - if len(tr) == 0 { - return nil - } - - switch v := tr[0].(type) { - case *bool: - if !aws.BoolValue(v) { - return nil - } - case bool: - if !v { - return nil - } - } - } - - tokens := []interface{}{} - tokenAdded := false - for _, outToken := range r.Operation.OutputTokens { - vs, _ := awsutil.ValuesAtPath(r.Data, outToken) - if len(vs) == 0 { - tokens = append(tokens, nil) - continue - } - v := vs[0] - - switch tv := v.(type) { - case *string: - if len(aws.StringValue(tv)) == 0 { - tokens = append(tokens, nil) - continue - } - case string: - if len(tv) == 0 { - tokens = append(tokens, nil) - continue - } - } - - tokenAdded = true - tokens = append(tokens, v) - } - if !tokenAdded { - return nil - } - - return tokens -} - -// Ensure a deprecated item is only logged once instead of each time its used. -func logDeprecatedf(logger aws.Logger, flag *int32, msg string) { - if logger == nil { - return - } - if atomic.CompareAndSwapInt32(flag, 0, 1) { - logger.Log(msg) - } -} - -var ( - logDeprecatedHasNextPage int32 - logDeprecatedNextPage int32 - logDeprecatedEachPage int32 -) - -// HasNextPage returns true if this request has more pages of data available. -// -// Deprecated Use Pagination type for configurable pagination of API operations -func (r *Request) HasNextPage() bool { - logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage, - "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations") - - return len(r.nextPageTokens()) > 0 -} - -// NextPage returns a new Request that can be executed to return the next -// page of result data. Call .Send() on this request to execute it. -// -// Deprecated Use Pagination type for configurable pagination of API operations -func (r *Request) NextPage() *Request { - logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage, - "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations") - - tokens := r.nextPageTokens() - if len(tokens) == 0 { - return nil - } - - data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() - nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) - for i, intok := range nr.Operation.InputTokens { - awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) - } - return nr -} - -// EachPage iterates over each page of a paginated request object. The fn -// parameter should be a function with the following sample signature: -// -// func(page *T, lastPage bool) bool { -// return true // return false to stop iterating -// } -// -// Where "T" is the structure type matching the output structure of the given -// operation. For example, a request object generated by -// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput -// as the structure "T". The lastPage value represents whether the page is -// the last page of data or not. The return value of this function should -// return true to keep iterating or false to stop. -// -// Deprecated Use Pagination type for configurable pagination of API operations -func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { - logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage, - "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations") - - for page := r; page != nil; page = page.NextPage() { - if err := page.Send(); err != nil { - return err - } - if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { - return page.Error - } - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go deleted file mode 100644 index 752ae47f8..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ /dev/null @@ -1,309 +0,0 @@ -package request - -import ( - "net" - "net/url" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// Retryer provides the interface drive the SDK's request retry behavior. The -// Retryer implementation is responsible for implementing exponential backoff, -// and determine if a request API error should be retried. -// -// client.DefaultRetryer is the SDK's default implementation of the Retryer. It -// uses the which uses the Request.IsErrorRetryable and Request.IsErrorThrottle -// methods to determine if the request is retried. -type Retryer interface { - // RetryRules return the retry delay that should be used by the SDK before - // making another request attempt for the failed request. - RetryRules(*Request) time.Duration - - // ShouldRetry returns if the failed request is retryable. - // - // Implementations may consider request attempt count when determining if a - // request is retryable, but the SDK will use MaxRetries to limit the - // number of attempts a request are made. - ShouldRetry(*Request) bool - - // MaxRetries is the number of times a request may be retried before - // failing. - MaxRetries() int -} - -// WithRetryer sets a Retryer value to the given Config returning the Config -// value for chaining. The value must not be nil. -func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { - if retryer == nil { - if cfg.Logger != nil { - cfg.Logger.Log("ERROR: Request.WithRetryer called with nil retryer. Replacing with retry disabled Retryer.") - } - retryer = noOpRetryer{} - } - cfg.Retryer = retryer - return cfg - -} - -// noOpRetryer is a internal no op retryer used when a request is created -// without a retryer. -// -// Provides a retryer that performs no retries. -// It should be used when we do not want retries to be performed. -type noOpRetryer struct{} - -// MaxRetries returns the number of maximum returns the service will use to make -// an individual API; For NoOpRetryer the MaxRetries will always be zero. -func (d noOpRetryer) MaxRetries() int { - return 0 -} - -// ShouldRetry will always return false for NoOpRetryer, as it should never retry. -func (d noOpRetryer) ShouldRetry(_ *Request) bool { - return false -} - -// RetryRules returns the delay duration before retrying this request again; -// since NoOpRetryer does not retry, RetryRules always returns 0. -func (d noOpRetryer) RetryRules(_ *Request) time.Duration { - return 0 -} - -// retryableCodes is a collection of service response codes which are retry-able -// without any further action. -var retryableCodes = map[string]struct{}{ - ErrCodeRequestError: {}, - "RequestTimeout": {}, - ErrCodeResponseTimeout: {}, - "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout -} - -var throttleCodes = map[string]struct{}{ - "ProvisionedThroughputExceededException": {}, - "ThrottledException": {}, // SNS, XRay, ResourceGroupsTagging API - "Throttling": {}, - "ThrottlingException": {}, - "RequestLimitExceeded": {}, - "RequestThrottled": {}, - "RequestThrottledException": {}, - "TooManyRequestsException": {}, // Lambda functions - "PriorRequestNotComplete": {}, // Route53 - "TransactionInProgressException": {}, - "EC2ThrottledException": {}, // EC2 -} - -// credsExpiredCodes is a collection of error codes which signify the credentials -// need to be refreshed. Expired tokens require refreshing of credentials, and -// resigning before the request can be retried. -var credsExpiredCodes = map[string]struct{}{ - "ExpiredToken": {}, - "ExpiredTokenException": {}, - "RequestExpired": {}, // EC2 Only -} - -func isCodeThrottle(code string) bool { - _, ok := throttleCodes[code] - return ok -} - -func isCodeRetryable(code string) bool { - if _, ok := retryableCodes[code]; ok { - return true - } - - return isCodeExpiredCreds(code) -} - -func isCodeExpiredCreds(code string) bool { - _, ok := credsExpiredCodes[code] - return ok -} - -var validParentCodes = map[string]struct{}{ - ErrCodeSerialization: {}, - ErrCodeRead: {}, -} - -func isNestedErrorRetryable(parentErr awserr.Error) bool { - if parentErr == nil { - return false - } - - if _, ok := validParentCodes[parentErr.Code()]; !ok { - return false - } - - err := parentErr.OrigErr() - if err == nil { - return false - } - - if aerr, ok := err.(awserr.Error); ok { - return isCodeRetryable(aerr.Code()) - } - - if t, ok := err.(temporary); ok { - return t.Temporary() || isErrConnectionReset(err) - } - - return isErrConnectionReset(err) -} - -// IsErrorRetryable returns whether the error is retryable, based on its Code. -// Returns false if error is nil. -func IsErrorRetryable(err error) bool { - if err == nil { - return false - } - return shouldRetryError(err) -} - -type temporary interface { - Temporary() bool -} - -func shouldRetryError(origErr error) bool { - switch err := origErr.(type) { - case awserr.Error: - if err.Code() == CanceledErrorCode { - return false - } - if isNestedErrorRetryable(err) { - return true - } - - origErr := err.OrigErr() - var shouldRetry bool - if origErr != nil { - shouldRetry = shouldRetryError(origErr) - if err.Code() == ErrCodeRequestError && !shouldRetry { - return false - } - } - if isCodeRetryable(err.Code()) { - return true - } - return shouldRetry - - case *url.Error: - if strings.Contains(err.Error(), "connection refused") { - // Refused connections should be retried as the service may not yet - // be running on the port. Go TCP dial considers refused - // connections as not temporary. - return true - } - // *url.Error only implements Temporary after golang 1.6 but since - // url.Error only wraps the error: - return shouldRetryError(err.Err) - - case temporary: - if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" { - return true - } - // If the error is temporary, we want to allow continuation of the - // retry process - return err.Temporary() || isErrConnectionReset(origErr) - - case nil: - // `awserr.Error.OrigErr()` can be nil, meaning there was an error but - // because we don't know the cause, it is marked as retryable. See - // TestRequest4xxUnretryable for an example. - return true - - default: - switch err.Error() { - case "net/http: request canceled", - "net/http: request canceled while waiting for connection": - // known 1.5 error case when an http request is cancelled - return false - } - // here we don't know the error; so we allow a retry. - return true - } -} - -// IsErrorThrottle returns whether the error is to be throttled based on its code. -// Returns false if error is nil. -func IsErrorThrottle(err error) bool { - if aerr, ok := err.(awserr.Error); ok && aerr != nil { - return isCodeThrottle(aerr.Code()) - } - return false -} - -// IsErrorExpiredCreds returns whether the error code is a credential expiry -// error. Returns false if error is nil. -func IsErrorExpiredCreds(err error) bool { - if aerr, ok := err.(awserr.Error); ok && aerr != nil { - return isCodeExpiredCreds(aerr.Code()) - } - return false -} - -// IsErrorRetryable returns whether the error is retryable, based on its Code. -// Returns false if the request has no Error set. -// -// Alias for the utility function IsErrorRetryable -func (r *Request) IsErrorRetryable() bool { - if isErrCode(r.Error, r.RetryErrorCodes) { - return true - } - - // HTTP response status code 501 should not be retried. - // 501 represents Not Implemented which means the request method is not - // supported by the server and cannot be handled. - if r.HTTPResponse != nil { - // HTTP response status code 500 represents internal server error and - // should be retried without any throttle. - if r.HTTPResponse.StatusCode == 500 { - return true - } - } - return IsErrorRetryable(r.Error) -} - -// IsErrorThrottle returns whether the error is to be throttled based on its -// code. Returns false if the request has no Error set. -// -// Alias for the utility function IsErrorThrottle -func (r *Request) IsErrorThrottle() bool { - if isErrCode(r.Error, r.ThrottleErrorCodes) { - return true - } - - if r.HTTPResponse != nil { - switch r.HTTPResponse.StatusCode { - case - 429, // error caused due to too many requests - 502, // Bad Gateway error should be throttled - 503, // caused when service is unavailable - 504: // error occurred due to gateway timeout - return true - } - } - - return IsErrorThrottle(r.Error) -} - -func isErrCode(err error, codes []string) bool { - if aerr, ok := err.(awserr.Error); ok && aerr != nil { - for _, code := range codes { - if code == aerr.Code() { - return true - } - } - } - - return false -} - -// IsErrorExpired returns whether the error code is a credential expiry error. -// Returns false if the request has no Error set. -// -// Alias for the utility function IsErrorExpiredCreds -func (r *Request) IsErrorExpired() bool { - return IsErrorExpiredCreds(r.Error) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go deleted file mode 100644 index 09a44eb98..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go +++ /dev/null @@ -1,94 +0,0 @@ -package request - -import ( - "io" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -var timeoutErr = awserr.New( - ErrCodeResponseTimeout, - "read on body has reached the timeout limit", - nil, -) - -type readResult struct { - n int - err error -} - -// timeoutReadCloser will handle body reads that take too long. -// We will return a ErrReadTimeout error if a timeout occurs. -type timeoutReadCloser struct { - reader io.ReadCloser - duration time.Duration -} - -// Read will spin off a goroutine to call the reader's Read method. We will -// select on the timer's channel or the read's channel. Whoever completes first -// will be returned. -func (r *timeoutReadCloser) Read(b []byte) (int, error) { - timer := time.NewTimer(r.duration) - c := make(chan readResult, 1) - - go func() { - n, err := r.reader.Read(b) - timer.Stop() - c <- readResult{n: n, err: err} - }() - - select { - case data := <-c: - return data.n, data.err - case <-timer.C: - return 0, timeoutErr - } -} - -func (r *timeoutReadCloser) Close() error { - return r.reader.Close() -} - -const ( - // HandlerResponseTimeout is what we use to signify the name of the - // response timeout handler. - HandlerResponseTimeout = "ResponseTimeoutHandler" -) - -// adaptToResponseTimeoutError is a handler that will replace any top level error -// to a ErrCodeResponseTimeout, if its child is that. -func adaptToResponseTimeoutError(req *Request) { - if err, ok := req.Error.(awserr.Error); ok { - aerr, ok := err.OrigErr().(awserr.Error) - if ok && aerr.Code() == ErrCodeResponseTimeout { - req.Error = aerr - } - } -} - -// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer. -// This will allow for per read timeouts. If a timeout occurred, we will return the -// ErrCodeResponseTimeout. -// -// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second) -func WithResponseReadTimeout(duration time.Duration) Option { - return func(r *Request) { - - var timeoutHandler = NamedHandler{ - HandlerResponseTimeout, - func(req *Request) { - req.HTTPResponse.Body = &timeoutReadCloser{ - reader: req.HTTPResponse.Body, - duration: duration, - } - }} - - // remove the handler so we are not stomping over any new durations. - r.Handlers.Send.RemoveByName(HandlerResponseTimeout) - r.Handlers.Send.PushBackNamed(timeoutHandler) - - r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError) - r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go deleted file mode 100644 index 8630683f3..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go +++ /dev/null @@ -1,286 +0,0 @@ -package request - -import ( - "bytes" - "fmt" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -const ( - // InvalidParameterErrCode is the error code for invalid parameters errors - InvalidParameterErrCode = "InvalidParameter" - // ParamRequiredErrCode is the error code for required parameter errors - ParamRequiredErrCode = "ParamRequiredError" - // ParamMinValueErrCode is the error code for fields with too low of a - // number value. - ParamMinValueErrCode = "ParamMinValueError" - // ParamMinLenErrCode is the error code for fields without enough elements. - ParamMinLenErrCode = "ParamMinLenError" - // ParamMaxLenErrCode is the error code for value being too long. - ParamMaxLenErrCode = "ParamMaxLenError" - - // ParamFormatErrCode is the error code for a field with invalid - // format or characters. - ParamFormatErrCode = "ParamFormatInvalidError" -) - -// Validator provides a way for types to perform validation logic on their -// input values that external code can use to determine if a type's values -// are valid. -type Validator interface { - Validate() error -} - -// An ErrInvalidParams provides wrapping of invalid parameter errors found when -// validating API operation input parameters. -type ErrInvalidParams struct { - // Context is the base context of the invalid parameter group. - Context string - errs []ErrInvalidParam -} - -// Add adds a new invalid parameter error to the collection of invalid -// parameters. The context of the invalid parameter will be updated to reflect -// this collection. -func (e *ErrInvalidParams) Add(err ErrInvalidParam) { - err.SetContext(e.Context) - e.errs = append(e.errs, err) -} - -// AddNested adds the invalid parameter errors from another ErrInvalidParams -// value into this collection. The nested errors will have their nested context -// updated and base context to reflect the merging. -// -// Use for nested validations errors. -func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { - for _, err := range nested.errs { - err.SetContext(e.Context) - err.AddNestedContext(nestedCtx) - e.errs = append(e.errs, err) - } -} - -// Len returns the number of invalid parameter errors -func (e ErrInvalidParams) Len() int { - return len(e.errs) -} - -// Code returns the code of the error -func (e ErrInvalidParams) Code() string { - return InvalidParameterErrCode -} - -// Message returns the message of the error -func (e ErrInvalidParams) Message() string { - return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) -} - -// Error returns the string formatted form of the invalid parameters. -func (e ErrInvalidParams) Error() string { - w := &bytes.Buffer{} - fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) - - for _, err := range e.errs { - fmt.Fprintf(w, "- %s\n", err.Message()) - } - - return w.String() -} - -// OrigErr returns the invalid parameters as a awserr.BatchedErrors value -func (e ErrInvalidParams) OrigErr() error { - return awserr.NewBatchError( - InvalidParameterErrCode, e.Message(), e.OrigErrs()) -} - -// OrigErrs returns a slice of the invalid parameters -func (e ErrInvalidParams) OrigErrs() []error { - errs := make([]error, len(e.errs)) - for i := 0; i < len(errs); i++ { - errs[i] = e.errs[i] - } - - return errs -} - -// An ErrInvalidParam represents an invalid parameter error type. -type ErrInvalidParam interface { - awserr.Error - - // Field name the error occurred on. - Field() string - - // SetContext updates the context of the error. - SetContext(string) - - // AddNestedContext updates the error's context to include a nested level. - AddNestedContext(string) -} - -type errInvalidParam struct { - context string - nestedContext string - field string - code string - msg string -} - -// Code returns the error code for the type of invalid parameter. -func (e *errInvalidParam) Code() string { - return e.code -} - -// Message returns the reason the parameter was invalid, and its context. -func (e *errInvalidParam) Message() string { - return fmt.Sprintf("%s, %s.", e.msg, e.Field()) -} - -// Error returns the string version of the invalid parameter error. -func (e *errInvalidParam) Error() string { - return fmt.Sprintf("%s: %s", e.code, e.Message()) -} - -// OrigErr returns nil, Implemented for awserr.Error interface. -func (e *errInvalidParam) OrigErr() error { - return nil -} - -// Field Returns the field and context the error occurred. -func (e *errInvalidParam) Field() string { - field := e.context - if len(field) > 0 { - field += "." - } - if len(e.nestedContext) > 0 { - field += fmt.Sprintf("%s.", e.nestedContext) - } - field += e.field - - return field -} - -// SetContext updates the base context of the error. -func (e *errInvalidParam) SetContext(ctx string) { - e.context = ctx -} - -// AddNestedContext prepends a context to the field's path. -func (e *errInvalidParam) AddNestedContext(ctx string) { - if len(e.nestedContext) == 0 { - e.nestedContext = ctx - } else { - e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) - } - -} - -// An ErrParamRequired represents an required parameter error. -type ErrParamRequired struct { - errInvalidParam -} - -// NewErrParamRequired creates a new required parameter error. -func NewErrParamRequired(field string) *ErrParamRequired { - return &ErrParamRequired{ - errInvalidParam{ - code: ParamRequiredErrCode, - field: field, - msg: fmt.Sprintf("missing required field"), - }, - } -} - -// An ErrParamMinValue represents a minimum value parameter error. -type ErrParamMinValue struct { - errInvalidParam - min float64 -} - -// NewErrParamMinValue creates a new minimum value parameter error. -func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { - return &ErrParamMinValue{ - errInvalidParam: errInvalidParam{ - code: ParamMinValueErrCode, - field: field, - msg: fmt.Sprintf("minimum field value of %v", min), - }, - min: min, - } -} - -// MinValue returns the field's require minimum value. -// -// float64 is returned for both int and float min values. -func (e *ErrParamMinValue) MinValue() float64 { - return e.min -} - -// An ErrParamMinLen represents a minimum length parameter error. -type ErrParamMinLen struct { - errInvalidParam - min int -} - -// NewErrParamMinLen creates a new minimum length parameter error. -func NewErrParamMinLen(field string, min int) *ErrParamMinLen { - return &ErrParamMinLen{ - errInvalidParam: errInvalidParam{ - code: ParamMinLenErrCode, - field: field, - msg: fmt.Sprintf("minimum field size of %v", min), - }, - min: min, - } -} - -// MinLen returns the field's required minimum length. -func (e *ErrParamMinLen) MinLen() int { - return e.min -} - -// An ErrParamMaxLen represents a maximum length parameter error. -type ErrParamMaxLen struct { - errInvalidParam - max int -} - -// NewErrParamMaxLen creates a new maximum length parameter error. -func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen { - return &ErrParamMaxLen{ - errInvalidParam: errInvalidParam{ - code: ParamMaxLenErrCode, - field: field, - msg: fmt.Sprintf("maximum size of %v, %v", max, value), - }, - max: max, - } -} - -// MaxLen returns the field's required minimum length. -func (e *ErrParamMaxLen) MaxLen() int { - return e.max -} - -// An ErrParamFormat represents a invalid format parameter error. -type ErrParamFormat struct { - errInvalidParam - format string -} - -// NewErrParamFormat creates a new invalid format parameter error. -func NewErrParamFormat(field string, format, value string) *ErrParamFormat { - return &ErrParamFormat{ - errInvalidParam: errInvalidParam{ - code: ParamFormatErrCode, - field: field, - msg: fmt.Sprintf("format %v, %v", format, value), - }, - format: format, - } -} - -// Format returns the field's required format. -func (e *ErrParamFormat) Format() string { - return e.format -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go deleted file mode 100644 index 4601f883c..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go +++ /dev/null @@ -1,295 +0,0 @@ -package request - -import ( - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" -) - -// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when -// the waiter's max attempts have been exhausted. -const WaiterResourceNotReadyErrorCode = "ResourceNotReady" - -// A WaiterOption is a function that will update the Waiter value's fields to -// configure the waiter. -type WaiterOption func(*Waiter) - -// WithWaiterMaxAttempts returns the maximum number of times the waiter should -// attempt to check the resource for the target state. -func WithWaiterMaxAttempts(max int) WaiterOption { - return func(w *Waiter) { - w.MaxAttempts = max - } -} - -// WaiterDelay will return a delay the waiter should pause between attempts to -// check the resource state. The passed in attempt is the number of times the -// Waiter has checked the resource state. -// -// Attempt is the number of attempts the Waiter has made checking the resource -// state. -type WaiterDelay func(attempt int) time.Duration - -// ConstantWaiterDelay returns a WaiterDelay that will always return a constant -// delay the waiter should use between attempts. It ignores the number of -// attempts made. -func ConstantWaiterDelay(delay time.Duration) WaiterDelay { - return func(attempt int) time.Duration { - return delay - } -} - -// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in. -func WithWaiterDelay(delayer WaiterDelay) WaiterOption { - return func(w *Waiter) { - w.Delay = delayer - } -} - -// WithWaiterLogger returns a waiter option to set the logger a waiter -// should use to log warnings and errors to. -func WithWaiterLogger(logger aws.Logger) WaiterOption { - return func(w *Waiter) { - w.Logger = logger - } -} - -// WithWaiterRequestOptions returns a waiter option setting the request -// options for each request the waiter makes. Appends to waiter's request -// options already set. -func WithWaiterRequestOptions(opts ...Option) WaiterOption { - return func(w *Waiter) { - w.RequestOptions = append(w.RequestOptions, opts...) - } -} - -// A Waiter provides the functionality to perform a blocking call which will -// wait for a resource state to be satisfied by a service. -// -// This type should not be used directly. The API operations provided in the -// service packages prefixed with "WaitUntil" should be used instead. -type Waiter struct { - Name string - Acceptors []WaiterAcceptor - Logger aws.Logger - - MaxAttempts int - Delay WaiterDelay - - RequestOptions []Option - NewRequest func([]Option) (*Request, error) - SleepWithContext func(aws.Context, time.Duration) error -} - -// ApplyOptions updates the waiter with the list of waiter options provided. -func (w *Waiter) ApplyOptions(opts ...WaiterOption) { - for _, fn := range opts { - fn(w) - } -} - -// WaiterState are states the waiter uses based on WaiterAcceptor definitions -// to identify if the resource state the waiter is waiting on has occurred. -type WaiterState int - -// String returns the string representation of the waiter state. -func (s WaiterState) String() string { - switch s { - case SuccessWaiterState: - return "success" - case FailureWaiterState: - return "failure" - case RetryWaiterState: - return "retry" - default: - return "unknown waiter state" - } -} - -// States the waiter acceptors will use to identify target resource states. -const ( - SuccessWaiterState WaiterState = iota // waiter successful - FailureWaiterState // waiter failed - RetryWaiterState // waiter needs to be retried -) - -// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor -// definition's Expected attribute. -type WaiterMatchMode int - -// Modes the waiter will use when inspecting API response to identify target -// resource states. -const ( - PathAllWaiterMatch WaiterMatchMode = iota // match on all paths - PathWaiterMatch // match on specific path - PathAnyWaiterMatch // match on any path - PathListWaiterMatch // match on list of paths - StatusWaiterMatch // match on status code - ErrorWaiterMatch // match on error -) - -// String returns the string representation of the waiter match mode. -func (m WaiterMatchMode) String() string { - switch m { - case PathAllWaiterMatch: - return "pathAll" - case PathWaiterMatch: - return "path" - case PathAnyWaiterMatch: - return "pathAny" - case PathListWaiterMatch: - return "pathList" - case StatusWaiterMatch: - return "status" - case ErrorWaiterMatch: - return "error" - default: - return "unknown waiter match mode" - } -} - -// WaitWithContext will make requests for the API operation using NewRequest to -// build API requests. The request's response will be compared against the -// Waiter's Acceptors to determine the successful state of the resource the -// waiter is inspecting. -// -// The passed in context must not be nil. If it is nil a panic will occur. The -// Context will be used to cancel the waiter's pending requests and retry delays. -// Use aws.BackgroundContext if no context is available. -// -// The waiter will continue until the target state defined by the Acceptors, -// or the max attempts expires. -// -// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's -// retryer ShouldRetry returns false. This normally will happen when the max -// wait attempts expires. -func (w Waiter) WaitWithContext(ctx aws.Context) error { - - for attempt := 1; ; attempt++ { - req, err := w.NewRequest(w.RequestOptions) - if err != nil { - waiterLogf(w.Logger, "unable to create request %v", err) - return err - } - req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter")) - err = req.Send() - - // See if any of the acceptors match the request's response, or error - for _, a := range w.Acceptors { - if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched { - return matchErr - } - } - - // The Waiter should only check the resource state MaxAttempts times - // This is here instead of in the for loop above to prevent delaying - // unnecessary when the waiter will not retry. - if attempt == w.MaxAttempts { - break - } - - // Delay to wait before inspecting the resource again - delay := w.Delay(attempt) - if sleepFn := req.Config.SleepDelay; sleepFn != nil { - // Support SleepDelay for backwards compatibility and testing - sleepFn(delay) - } else { - sleepCtxFn := w.SleepWithContext - if sleepCtxFn == nil { - sleepCtxFn = aws.SleepWithContext - } - - if err := sleepCtxFn(ctx, delay); err != nil { - return awserr.New(CanceledErrorCode, "waiter context canceled", err) - } - } - } - - return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil) -} - -// A WaiterAcceptor provides the information needed to wait for an API operation -// to complete. -type WaiterAcceptor struct { - State WaiterState - Matcher WaiterMatchMode - Argument string - Expected interface{} -} - -// match returns if the acceptor found a match with the passed in request -// or error. True is returned if the acceptor made a match, error is returned -// if there was an error attempting to perform the match. -func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) { - result := false - var vals []interface{} - - switch a.Matcher { - case PathAllWaiterMatch, PathWaiterMatch: - // Require all matches to be equal for result to match - vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) - if len(vals) == 0 { - break - } - result = true - for _, val := range vals { - if !awsutil.DeepEqual(val, a.Expected) { - result = false - break - } - } - case PathAnyWaiterMatch: - // Only a single match needs to equal for the result to match - vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) - for _, val := range vals { - if awsutil.DeepEqual(val, a.Expected) { - result = true - break - } - } - case PathListWaiterMatch: - // ignored matcher - case StatusWaiterMatch: - s := a.Expected.(int) - result = s == req.HTTPResponse.StatusCode - case ErrorWaiterMatch: - if aerr, ok := err.(awserr.Error); ok { - result = aerr.Code() == a.Expected.(string) - } - default: - waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s", - name, a.Matcher) - } - - if !result { - // If there was no matching result found there is nothing more to do - // for this response, retry the request. - return false, nil - } - - switch a.State { - case SuccessWaiterState: - // waiter completed - return true, nil - case FailureWaiterState: - // Waiter failure state triggered - return true, awserr.New(WaiterResourceNotReadyErrorCode, - "failed waiting for successful resource state", err) - case RetryWaiterState: - // clear the error and retry the operation - return false, nil - default: - waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s", - name, a.State) - return false, nil - } -} - -func waiterLogf(logger aws.Logger, msg string, args ...interface{}) { - if logger != nil { - logger.Log(fmt.Sprintf(msg, args...)) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go deleted file mode 100644 index 3efdac29f..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go +++ /dev/null @@ -1,290 +0,0 @@ -package session - -import ( - "fmt" - "os" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/processcreds" - "github.com/aws/aws-sdk-go/aws/credentials/ssocreds" - "github.com/aws/aws-sdk-go/aws/credentials/stscreds" - "github.com/aws/aws-sdk-go/aws/defaults" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/shareddefaults" -) - -func resolveCredentials(cfg *aws.Config, - envCfg envConfig, sharedCfg sharedConfig, - handlers request.Handlers, - sessOpts Options, -) (*credentials.Credentials, error) { - - switch { - case len(sessOpts.Profile) != 0: - // User explicitly provided an Profile in the session's configuration - // so load that profile from shared config first. - // Github(aws/aws-sdk-go#2727) - return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) - - case envCfg.Creds.HasKeys(): - // Environment credentials - return credentials.NewStaticCredentialsFromCreds(envCfg.Creds), nil - - case len(envCfg.WebIdentityTokenFilePath) != 0: - // Web identity token from environment, RoleARN required to also be - // set. - return assumeWebIdentity(cfg, handlers, - envCfg.WebIdentityTokenFilePath, - envCfg.RoleARN, - envCfg.RoleSessionName, - ) - - default: - // Fallback to the "default" credential resolution chain. - return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) - } -} - -// WebIdentityEmptyRoleARNErr will occur if 'AWS_WEB_IDENTITY_TOKEN_FILE' was set but -// 'AWS_ROLE_ARN' was not set. -var WebIdentityEmptyRoleARNErr = awserr.New(stscreds.ErrCodeWebIdentity, "role ARN is not set", nil) - -// WebIdentityEmptyTokenFilePathErr will occur if 'AWS_ROLE_ARN' was set but -// 'AWS_WEB_IDENTITY_TOKEN_FILE' was not set. -var WebIdentityEmptyTokenFilePathErr = awserr.New(stscreds.ErrCodeWebIdentity, "token file path is not set", nil) - -func assumeWebIdentity(cfg *aws.Config, handlers request.Handlers, - filepath string, - roleARN, sessionName string, -) (*credentials.Credentials, error) { - - if len(filepath) == 0 { - return nil, WebIdentityEmptyTokenFilePathErr - } - - if len(roleARN) == 0 { - return nil, WebIdentityEmptyRoleARNErr - } - - creds := stscreds.NewWebIdentityCredentials( - &Session{ - Config: cfg, - Handlers: handlers.Copy(), - }, - roleARN, - sessionName, - filepath, - ) - - return creds, nil -} - -func resolveCredsFromProfile(cfg *aws.Config, - envCfg envConfig, sharedCfg sharedConfig, - handlers request.Handlers, - sessOpts Options, -) (creds *credentials.Credentials, err error) { - - switch { - case sharedCfg.SourceProfile != nil: - // Assume IAM role with credentials source from a different profile. - creds, err = resolveCredsFromProfile(cfg, envCfg, - *sharedCfg.SourceProfile, handlers, sessOpts, - ) - - case sharedCfg.Creds.HasKeys(): - // Static Credentials from Shared Config/Credentials file. - creds = credentials.NewStaticCredentialsFromCreds( - sharedCfg.Creds, - ) - - case len(sharedCfg.CredentialSource) != 0: - creds, err = resolveCredsFromSource(cfg, envCfg, - sharedCfg, handlers, sessOpts, - ) - - case len(sharedCfg.WebIdentityTokenFile) != 0: - // Credentials from Assume Web Identity token require an IAM Role, and - // that roll will be assumed. May be wrapped with another assume role - // via SourceProfile. - return assumeWebIdentity(cfg, handlers, - sharedCfg.WebIdentityTokenFile, - sharedCfg.RoleARN, - sharedCfg.RoleSessionName, - ) - - case sharedCfg.hasSSOConfiguration(): - creds, err = resolveSSOCredentials(cfg, sharedCfg, handlers) - - case len(sharedCfg.CredentialProcess) != 0: - // Get credentials from CredentialProcess - creds = processcreds.NewCredentials(sharedCfg.CredentialProcess) - - default: - // Fallback to default credentials provider, include mock errors for - // the credential chain so user can identify why credentials failed to - // be retrieved. - creds = credentials.NewCredentials(&credentials.ChainProvider{ - VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), - Providers: []credentials.Provider{ - &credProviderError{ - Err: awserr.New("EnvAccessKeyNotFound", - "failed to find credentials in the environment.", nil), - }, - &credProviderError{ - Err: awserr.New("SharedCredsLoad", - fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil), - }, - defaults.RemoteCredProvider(*cfg, handlers), - }, - }) - } - if err != nil { - return nil, err - } - - if len(sharedCfg.RoleARN) > 0 { - cfgCp := *cfg - cfgCp.Credentials = creds - return credsFromAssumeRole(cfgCp, handlers, sharedCfg, sessOpts) - } - - return creds, nil -} - -func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers request.Handlers) (*credentials.Credentials, error) { - if err := sharedCfg.validateSSOConfiguration(); err != nil { - return nil, err - } - - cfgCopy := cfg.Copy() - cfgCopy.Region = &sharedCfg.SSORegion - - return ssocreds.NewCredentials( - &Session{ - Config: cfgCopy, - Handlers: handlers.Copy(), - }, - sharedCfg.SSOAccountID, - sharedCfg.SSORoleName, - sharedCfg.SSOStartURL, - ), nil -} - -// valid credential source values -const ( - credSourceEc2Metadata = "Ec2InstanceMetadata" - credSourceEnvironment = "Environment" - credSourceECSContainer = "EcsContainer" -) - -func resolveCredsFromSource(cfg *aws.Config, - envCfg envConfig, sharedCfg sharedConfig, - handlers request.Handlers, - sessOpts Options, -) (creds *credentials.Credentials, err error) { - - switch sharedCfg.CredentialSource { - case credSourceEc2Metadata: - p := defaults.RemoteCredProvider(*cfg, handlers) - creds = credentials.NewCredentials(p) - - case credSourceEnvironment: - creds = credentials.NewStaticCredentialsFromCreds(envCfg.Creds) - - case credSourceECSContainer: - if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { - return nil, ErrSharedConfigECSContainerEnvVarEmpty - } - - p := defaults.RemoteCredProvider(*cfg, handlers) - creds = credentials.NewCredentials(p) - - default: - return nil, ErrSharedConfigInvalidCredSource - } - - return creds, nil -} - -func credsFromAssumeRole(cfg aws.Config, - handlers request.Handlers, - sharedCfg sharedConfig, - sessOpts Options, -) (*credentials.Credentials, error) { - - if len(sharedCfg.MFASerial) != 0 && sessOpts.AssumeRoleTokenProvider == nil { - // AssumeRole Token provider is required if doing Assume Role - // with MFA. - return nil, AssumeRoleTokenProviderNotSetError{} - } - - return stscreds.NewCredentials( - &Session{ - Config: &cfg, - Handlers: handlers.Copy(), - }, - sharedCfg.RoleARN, - func(opt *stscreds.AssumeRoleProvider) { - opt.RoleSessionName = sharedCfg.RoleSessionName - - if sessOpts.AssumeRoleDuration == 0 && - sharedCfg.AssumeRoleDuration != nil && - *sharedCfg.AssumeRoleDuration/time.Minute > 15 { - opt.Duration = *sharedCfg.AssumeRoleDuration - } else if sessOpts.AssumeRoleDuration != 0 { - opt.Duration = sessOpts.AssumeRoleDuration - } - - // Assume role with external ID - if len(sharedCfg.ExternalID) > 0 { - opt.ExternalID = aws.String(sharedCfg.ExternalID) - } - - // Assume role with MFA - if len(sharedCfg.MFASerial) > 0 { - opt.SerialNumber = aws.String(sharedCfg.MFASerial) - opt.TokenProvider = sessOpts.AssumeRoleTokenProvider - } - }, - ), nil -} - -// AssumeRoleTokenProviderNotSetError is an error returned when creating a -// session when the MFAToken option is not set when shared config is configured -// load assume a role with an MFA token. -type AssumeRoleTokenProviderNotSetError struct{} - -// Code is the short id of the error. -func (e AssumeRoleTokenProviderNotSetError) Code() string { - return "AssumeRoleTokenProviderNotSetError" -} - -// Message is the description of the error -func (e AssumeRoleTokenProviderNotSetError) Message() string { - return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") -} - -// OrigErr is the underlying error that caused the failure. -func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { - return nil -} - -// Error satisfies the error interface. -func (e AssumeRoleTokenProviderNotSetError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", nil) -} - -type credProviderError struct { - Err error -} - -func (c credProviderError) Retrieve() (credentials.Value, error) { - return credentials.Value{}, c.Err -} -func (c credProviderError) IsExpired() bool { - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go deleted file mode 100644 index 4390ad52f..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go +++ /dev/null @@ -1,28 +0,0 @@ -//go:build go1.13 -// +build go1.13 - -package session - -import ( - "net" - "net/http" - "time" -) - -// Transport that should be used when a custom CA bundle is specified with the -// SDK. -func getCustomTransport() *http.Transport { - return &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).DialContext, - ForceAttemptHTTP2: true, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go deleted file mode 100644 index 668565bea..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go +++ /dev/null @@ -1,27 +0,0 @@ -//go:build !go1.13 && go1.7 -// +build !go1.13,go1.7 - -package session - -import ( - "net" - "net/http" - "time" -) - -// Transport that should be used when a custom CA bundle is specified with the -// SDK. -func getCustomTransport() *http.Transport { - return &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).DialContext, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go deleted file mode 100644 index e101aa6b6..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go +++ /dev/null @@ -1,23 +0,0 @@ -//go:build !go1.6 && go1.5 -// +build !go1.6,go1.5 - -package session - -import ( - "net" - "net/http" - "time" -) - -// Transport that should be used when a custom CA bundle is specified with the -// SDK. -func getCustomTransport() *http.Transport { - return &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go deleted file mode 100644 index b5fcbe0d1..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build !go1.7 && go1.6 -// +build !go1.7,go1.6 - -package session - -import ( - "net" - "net/http" - "time" -) - -// Transport that should be used when a custom CA bundle is specified with the -// SDK. -func getCustomTransport() *http.Transport { - return &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go deleted file mode 100644 index ff3cc012a..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go +++ /dev/null @@ -1,367 +0,0 @@ -/* -Package session provides configuration for the SDK's service clients. Sessions -can be shared across service clients that share the same base configuration. - -Sessions are safe to use concurrently as long as the Session is not being -modified. Sessions should be cached when possible, because creating a new -Session will load all configuration values from the environment, and config -files each time the Session is created. Sharing the Session value across all of -your service clients will ensure the configuration is loaded the fewest number -of times possible. - -Sessions options from Shared Config - -By default NewSession will only load credentials from the shared credentials -file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is -set to a truthy value the Session will be created from the configuration -values from the shared config (~/.aws/config) and shared credentials -(~/.aws/credentials) files. Using the NewSessionWithOptions with -SharedConfigState set to SharedConfigEnable will create the session as if the -AWS_SDK_LOAD_CONFIG environment variable was set. - -Credential and config loading order - -The Session will attempt to load configuration and credentials from the -environment, configuration files, and other credential sources. The order -configuration is loaded in is: - - * Environment Variables - * Shared Credentials file - * Shared Configuration file (if SharedConfig is enabled) - * EC2 Instance Metadata (credentials only) - -The Environment variables for credentials will have precedence over shared -config even if SharedConfig is enabled. To override this behavior, and use -shared config credentials instead specify the session.Options.Profile, (e.g. -when using credential_source=Environment to assume a role). - - sess, err := session.NewSessionWithOptions(session.Options{ - Profile: "myProfile", - }) - -Creating Sessions - -Creating a Session without additional options will load credentials region, and -profile loaded from the environment and shared config automatically. See, -"Environment Variables" section for information on environment variables used -by Session. - - // Create Session - sess, err := session.NewSession() - - -When creating Sessions optional aws.Config values can be passed in that will -override the default, or loaded, config values the Session is being created -with. This allows you to provide additional, or case based, configuration -as needed. - - // Create a Session with a custom region - sess, err := session.NewSession(&aws.Config{ - Region: aws.String("us-west-2"), - }) - -Use NewSessionWithOptions to provide additional configuration driving how the -Session's configuration will be loaded. Such as, specifying shared config -profile, or override the shared config state, (AWS_SDK_LOAD_CONFIG). - - // Equivalent to session.NewSession() - sess, err := session.NewSessionWithOptions(session.Options{ - // Options - }) - - sess, err := session.NewSessionWithOptions(session.Options{ - // Specify profile to load for the session's config - Profile: "profile_name", - - // Provide SDK Config options, such as Region. - Config: aws.Config{ - Region: aws.String("us-west-2"), - }, - - // Force enable Shared Config support - SharedConfigState: session.SharedConfigEnable, - }) - -Adding Handlers - -You can add handlers to a session to decorate API operation, (e.g. adding HTTP -headers). All clients that use the Session receive a copy of the Session's -handlers. For example, the following request handler added to the Session logs -every requests made. - - // Create a session, and add additional handlers for all service - // clients created with the Session to inherit. Adds logging handler. - sess := session.Must(session.NewSession()) - - sess.Handlers.Send.PushFront(func(r *request.Request) { - // Log every request made and its payload - logger.Printf("Request: %s/%s, Params: %s", - r.ClientInfo.ServiceName, r.Operation, r.Params) - }) - -Shared Config Fields - -By default the SDK will only load the shared credentials file's -(~/.aws/credentials) credentials values, and all other config is provided by -the environment variables, SDK defaults, and user provided aws.Config values. - -If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable -option is used to create the Session the full shared config values will be -loaded. This includes credentials, region, and support for assume role. In -addition the Session will load its configuration from both the shared config -file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both -files have the same format. - -If both config files are present the configuration from both files will be -read. The Session will be created from configuration values from the shared -credentials file (~/.aws/credentials) over those in the shared config file -(~/.aws/config). - -Credentials are the values the SDK uses to authenticating requests with AWS -Services. When specified in a file, both aws_access_key_id and -aws_secret_access_key must be provided together in the same file to be -considered valid. They will be ignored if both are not present. -aws_session_token is an optional field that can be provided in addition to the -other two fields. - - aws_access_key_id = AKID - aws_secret_access_key = SECRET - aws_session_token = TOKEN - - ; region only supported if SharedConfigEnabled. - region = us-east-1 - -Assume Role configuration - -The role_arn field allows you to configure the SDK to assume an IAM role using -a set of credentials from another source. Such as when paired with static -credentials, "profile_source", "credential_process", or "credential_source" -fields. If "role_arn" is provided, a source of credentials must also be -specified, such as "source_profile", "credential_source", or -"credential_process". - - role_arn = arn:aws:iam:::role/ - source_profile = profile_with_creds - external_id = 1234 - mfa_serial = - role_session_name = session_name - - -The SDK supports assuming a role with MFA token. If "mfa_serial" is set, you -must also set the Session Option.AssumeRoleTokenProvider. The Session will fail -to load if the AssumeRoleTokenProvider is not specified. - - sess := session.Must(session.NewSessionWithOptions(session.Options{ - AssumeRoleTokenProvider: stscreds.StdinTokenProvider, - })) - -To setup Assume Role outside of a session see the stscreds.AssumeRoleProvider -documentation. - -Environment Variables - -When a Session is created several environment variables can be set to adjust -how the SDK functions, and what configuration data it loads when creating -Sessions. All environment values are optional, but some values like credentials -require multiple of the values to set or the partial values will be ignored. -All environment variable values are strings unless otherwise noted. - -Environment configuration values. If set both Access Key ID and Secret Access -Key must be provided. Session Token and optionally also be provided, but is -not required. - - # Access Key ID - AWS_ACCESS_KEY_ID=AKID - AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. - - # Secret Access Key - AWS_SECRET_ACCESS_KEY=SECRET - AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. - - # Session Token - AWS_SESSION_TOKEN=TOKEN - -Region value will instruct the SDK where to make service API requests to. If is -not provided in the environment the region must be provided before a service -client request is made. - - AWS_REGION=us-east-1 - - # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, - # and AWS_REGION is not also set. - AWS_DEFAULT_REGION=us-east-1 - -Profile name the SDK should load use when loading shared config from the -configuration files. If not provided "default" will be used as the profile name. - - AWS_PROFILE=my_profile - - # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, - # and AWS_PROFILE is not also set. - AWS_DEFAULT_PROFILE=my_profile - -SDK load config instructs the SDK to load the shared config in addition to -shared credentials. This also expands the configuration loaded so the shared -credentials will have parity with the shared config file. This also enables -Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE -env values as well. - - AWS_SDK_LOAD_CONFIG=1 - -Custom Shared Config and Credential Files - -Shared credentials file path can be set to instruct the SDK to use an alternative -file for the shared credentials. If not set the file will be loaded from -$HOME/.aws/credentials on Linux/Unix based systems, and -%USERPROFILE%\.aws\credentials on Windows. - - AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials - -Shared config file path can be set to instruct the SDK to use an alternative -file for the shared config. If not set the file will be loaded from -$HOME/.aws/config on Linux/Unix based systems, and -%USERPROFILE%\.aws\config on Windows. - - AWS_CONFIG_FILE=$HOME/my_shared_config - -Custom CA Bundle - -Path to a custom Credentials Authority (CA) bundle PEM file that the SDK -will use instead of the default system's root CA bundle. Use this only -if you want to replace the CA bundle the SDK uses for TLS requests. - - AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle - -Enabling this option will attempt to merge the Transport into the SDK's HTTP -client. If the client's Transport is not a http.Transport an error will be -returned. If the Transport's TLS config is set this option will cause the SDK -to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file -contains multiple certificates all of them will be loaded. - -The Session option CustomCABundle is also available when creating sessions -to also enable this feature. CustomCABundle session option field has priority -over the AWS_CA_BUNDLE environment variable, and will be used if both are set. - -Setting a custom HTTPClient in the aws.Config options will override this setting. -To use this option and custom HTTP client, the HTTP client needs to be provided -when creating the session. Not the service client. - -Custom Client TLS Certificate - -The SDK supports the environment and session option being configured with -Client TLS certificates that are sent as a part of the client's TLS handshake -for client authentication. If used, both Cert and Key values are required. If -one is missing, or either fail to load the contents of the file an error will -be returned. - -HTTP Client's Transport concrete implementation must be a http.Transport -or creating the session will fail. - - AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key - AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert - -This can also be configured via the session.Options ClientTLSCert and ClientTLSKey. - - sess, err := session.NewSessionWithOptions(session.Options{ - ClientTLSCert: myCertFile, - ClientTLSKey: myKeyFile, - }) - -Custom EC2 IMDS Endpoint - -The endpoint of the EC2 IMDS client can be configured via the environment -variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a -Session. See Options.EC2IMDSEndpoint for more details. - - AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254 - -If using an URL with an IPv6 address literal, the IPv6 address -component must be enclosed in square brackets. - - AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] - -The custom EC2 IMDS endpoint can also be specified via the Session options. - - sess, err := session.NewSessionWithOptions(session.Options{ - EC2MetadataEndpoint: "http://[::1]", - }) - -FIPS and DualStack Endpoints - -The SDK can be configured to resolve an endpoint with certain capabilities such as FIPS and DualStack. - -You can configure a FIPS endpoint using an environment variable, shared config ($HOME/.aws/config), -or programmatically. - -To configure a FIPS endpoint set the environment variable set the AWS_USE_FIPS_ENDPOINT to true or false to enable -or disable FIPS endpoint resolution. - - AWS_USE_FIPS_ENDPOINT=true - -To configure a FIPS endpoint using shared config, set use_fips_endpoint to true or false to enable -or disable FIPS endpoint resolution. - - [profile myprofile] - region=us-west-2 - use_fips_endpoint=true - -To configure a FIPS endpoint programmatically - - // Option 1: Configure it on a session for all clients - sess, err := session.NewSessionWithOptions(session.Options{ - UseFIPSEndpoint: endpoints.FIPSEndpointStateEnabled, - }) - if err != nil { - // handle error - } - - client := s3.New(sess) - - // Option 2: Configure it per client - sess, err := session.NewSession() - if err != nil { - // handle error - } - - client := s3.New(sess, &aws.Config{ - UseFIPSEndpoint: endpoints.FIPSEndpointStateEnabled, - }) - -You can configure a DualStack endpoint using an environment variable, shared config ($HOME/.aws/config), -or programmatically. - -To configure a DualStack endpoint set the environment variable set the AWS_USE_DUALSTACK_ENDPOINT to true or false to -enable or disable DualStack endpoint resolution. - - AWS_USE_DUALSTACK_ENDPOINT=true - -To configure a DualStack endpoint using shared config, set use_dualstack_endpoint to true or false to enable -or disable DualStack endpoint resolution. - - [profile myprofile] - region=us-west-2 - use_dualstack_endpoint=true - -To configure a DualStack endpoint programmatically - - // Option 1: Configure it on a session for all clients - sess, err := session.NewSessionWithOptions(session.Options{ - UseDualStackEndpoint: endpoints.DualStackEndpointStateEnabled, - }) - if err != nil { - // handle error - } - - client := s3.New(sess) - - // Option 2: Configure it per client - sess, err := session.NewSession() - if err != nil { - // handle error - } - - client := s3.New(sess, &aws.Config{ - UseDualStackEndpoint: endpoints.DualStackEndpointStateEnabled, - }) -*/ -package session diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go deleted file mode 100644 index d6fa24776..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ /dev/null @@ -1,471 +0,0 @@ -package session - -import ( - "fmt" - "os" - "strconv" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/defaults" - "github.com/aws/aws-sdk-go/aws/endpoints" -) - -// EnvProviderName provides a name of the provider when config is loaded from environment. -const EnvProviderName = "EnvConfigCredentials" - -// envConfig is a collection of environment values the SDK will read -// setup config from. All environment values are optional. But some values -// such as credentials require multiple values to be complete or the values -// will be ignored. -type envConfig struct { - // Environment configuration values. If set both Access Key ID and Secret Access - // Key must be provided. Session Token and optionally also be provided, but is - // not required. - // - // # Access Key ID - // AWS_ACCESS_KEY_ID=AKID - // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. - // - // # Secret Access Key - // AWS_SECRET_ACCESS_KEY=SECRET - // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. - // - // # Session Token - // AWS_SESSION_TOKEN=TOKEN - Creds credentials.Value - - // Region value will instruct the SDK where to make service API requests to. If is - // not provided in the environment the region must be provided before a service - // client request is made. - // - // AWS_REGION=us-east-1 - // - // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, - // # and AWS_REGION is not also set. - // AWS_DEFAULT_REGION=us-east-1 - Region string - - // Profile name the SDK should load use when loading shared configuration from the - // shared configuration files. If not provided "default" will be used as the - // profile name. - // - // AWS_PROFILE=my_profile - // - // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, - // # and AWS_PROFILE is not also set. - // AWS_DEFAULT_PROFILE=my_profile - Profile string - - // SDK load config instructs the SDK to load the shared config in addition to - // shared credentials. This also expands the configuration loaded from the shared - // credentials to have parity with the shared config file. This also enables - // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE - // env values as well. - // - // AWS_SDK_LOAD_CONFIG=1 - EnableSharedConfig bool - - // Shared credentials file path can be set to instruct the SDK to use an alternate - // file for the shared credentials. If not set the file will be loaded from - // $HOME/.aws/credentials on Linux/Unix based systems, and - // %USERPROFILE%\.aws\credentials on Windows. - // - // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials - SharedCredentialsFile string - - // Shared config file path can be set to instruct the SDK to use an alternate - // file for the shared config. If not set the file will be loaded from - // $HOME/.aws/config on Linux/Unix based systems, and - // %USERPROFILE%\.aws\config on Windows. - // - // AWS_CONFIG_FILE=$HOME/my_shared_config - SharedConfigFile string - - // Sets the path to a custom Credentials Authority (CA) Bundle PEM file - // that the SDK will use instead of the system's root CA bundle. - // Only use this if you want to configure the SDK to use a custom set - // of CAs. - // - // Enabling this option will attempt to merge the Transport - // into the SDK's HTTP client. If the client's Transport is - // not a http.Transport an error will be returned. If the - // Transport's TLS config is set this option will cause the - // SDK to overwrite the Transport's TLS config's RootCAs value. - // - // Setting a custom HTTPClient in the aws.Config options will override this setting. - // To use this option and custom HTTP client, the HTTP client needs to be provided - // when creating the session. Not the service client. - // - // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle - CustomCABundle string - - // Sets the TLC client certificate that should be used by the SDK's HTTP transport - // when making requests. The certificate must be paired with a TLS client key file. - // - // AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert - ClientTLSCert string - - // Sets the TLC client key that should be used by the SDK's HTTP transport - // when making requests. The key must be paired with a TLS client certificate file. - // - // AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key - ClientTLSKey string - - csmEnabled string - CSMEnabled *bool - CSMPort string - CSMHost string - CSMClientID string - - // Enables endpoint discovery via environment variables. - // - // AWS_ENABLE_ENDPOINT_DISCOVERY=true - EnableEndpointDiscovery *bool - enableEndpointDiscovery string - - // Specifies the WebIdentity token the SDK should use to assume a role - // with. - // - // AWS_WEB_IDENTITY_TOKEN_FILE=file_path - WebIdentityTokenFilePath string - - // Specifies the IAM role arn to use when assuming an role. - // - // AWS_ROLE_ARN=role_arn - RoleARN string - - // Specifies the IAM role session name to use when assuming a role. - // - // AWS_ROLE_SESSION_NAME=session_name - RoleSessionName string - - // Specifies the STS Regional Endpoint flag for the SDK to resolve the endpoint - // for a service. - // - // AWS_STS_REGIONAL_ENDPOINTS=regional - // This can take value as `regional` or `legacy` - STSRegionalEndpoint endpoints.STSRegionalEndpoint - - // Specifies the S3 Regional Endpoint flag for the SDK to resolve the - // endpoint for a service. - // - // AWS_S3_US_EAST_1_REGIONAL_ENDPOINT=regional - // This can take value as `regional` or `legacy` - S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint - - // Specifies if the S3 service should allow ARNs to direct the region - // the client's requests are sent to. - // - // AWS_S3_USE_ARN_REGION=true - S3UseARNRegion bool - - // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EC2IMDSEndpointMode. - // - // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] - EC2IMDSEndpoint string - - // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) - // - // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 - EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState - - // Specifies that SDK clients must resolve a dual-stack endpoint for - // services. - // - // AWS_USE_DUALSTACK_ENDPOINT=true - UseDualStackEndpoint endpoints.DualStackEndpointState - - // Specifies that SDK clients must resolve a FIPS endpoint for - // services. - // - // AWS_USE_FIPS_ENDPOINT=true - UseFIPSEndpoint endpoints.FIPSEndpointState -} - -var ( - csmEnabledEnvKey = []string{ - "AWS_CSM_ENABLED", - } - csmHostEnvKey = []string{ - "AWS_CSM_HOST", - } - csmPortEnvKey = []string{ - "AWS_CSM_PORT", - } - csmClientIDEnvKey = []string{ - "AWS_CSM_CLIENT_ID", - } - credAccessEnvKey = []string{ - "AWS_ACCESS_KEY_ID", - "AWS_ACCESS_KEY", - } - credSecretEnvKey = []string{ - "AWS_SECRET_ACCESS_KEY", - "AWS_SECRET_KEY", - } - credSessionEnvKey = []string{ - "AWS_SESSION_TOKEN", - } - - enableEndpointDiscoveryEnvKey = []string{ - "AWS_ENABLE_ENDPOINT_DISCOVERY", - } - - regionEnvKeys = []string{ - "AWS_REGION", - "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set - } - profileEnvKeys = []string{ - "AWS_PROFILE", - "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set - } - sharedCredsFileEnvKey = []string{ - "AWS_SHARED_CREDENTIALS_FILE", - } - sharedConfigFileEnvKey = []string{ - "AWS_CONFIG_FILE", - } - webIdentityTokenFilePathEnvKey = []string{ - "AWS_WEB_IDENTITY_TOKEN_FILE", - } - roleARNEnvKey = []string{ - "AWS_ROLE_ARN", - } - roleSessionNameEnvKey = []string{ - "AWS_ROLE_SESSION_NAME", - } - stsRegionalEndpointKey = []string{ - "AWS_STS_REGIONAL_ENDPOINTS", - } - s3UsEast1RegionalEndpoint = []string{ - "AWS_S3_US_EAST_1_REGIONAL_ENDPOINT", - } - s3UseARNRegionEnvKey = []string{ - "AWS_S3_USE_ARN_REGION", - } - ec2IMDSEndpointEnvKey = []string{ - "AWS_EC2_METADATA_SERVICE_ENDPOINT", - } - ec2IMDSEndpointModeEnvKey = []string{ - "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE", - } - useCABundleKey = []string{ - "AWS_CA_BUNDLE", - } - useClientTLSCert = []string{ - "AWS_SDK_GO_CLIENT_TLS_CERT", - } - useClientTLSKey = []string{ - "AWS_SDK_GO_CLIENT_TLS_KEY", - } - awsUseDualStackEndpoint = []string{ - "AWS_USE_DUALSTACK_ENDPOINT", - } - awsUseFIPSEndpoint = []string{ - "AWS_USE_FIPS_ENDPOINT", - } -) - -// loadEnvConfig retrieves the SDK's environment configuration. -// See `envConfig` for the values that will be retrieved. -// -// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value -// the shared SDK config will be loaded in addition to the SDK's specific -// configuration values. -func loadEnvConfig() (envConfig, error) { - enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) - return envConfigLoad(enableSharedConfig) -} - -// loadEnvSharedConfig retrieves the SDK's environment configuration, and the -// SDK shared config. See `envConfig` for the values that will be retrieved. -// -// Loads the shared configuration in addition to the SDK's specific configuration. -// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` -// environment variable is set. -func loadSharedEnvConfig() (envConfig, error) { - return envConfigLoad(true) -} - -func envConfigLoad(enableSharedConfig bool) (envConfig, error) { - cfg := envConfig{} - - cfg.EnableSharedConfig = enableSharedConfig - - // Static environment credentials - var creds credentials.Value - setFromEnvVal(&creds.AccessKeyID, credAccessEnvKey) - setFromEnvVal(&creds.SecretAccessKey, credSecretEnvKey) - setFromEnvVal(&creds.SessionToken, credSessionEnvKey) - if creds.HasKeys() { - // Require logical grouping of credentials - creds.ProviderName = EnvProviderName - cfg.Creds = creds - } - - // Role Metadata - setFromEnvVal(&cfg.RoleARN, roleARNEnvKey) - setFromEnvVal(&cfg.RoleSessionName, roleSessionNameEnvKey) - - // Web identity environment variables - setFromEnvVal(&cfg.WebIdentityTokenFilePath, webIdentityTokenFilePathEnvKey) - - // CSM environment variables - setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) - setFromEnvVal(&cfg.CSMHost, csmHostEnvKey) - setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) - setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) - - if len(cfg.csmEnabled) != 0 { - v, _ := strconv.ParseBool(cfg.csmEnabled) - cfg.CSMEnabled = &v - } - - regionKeys := regionEnvKeys - profileKeys := profileEnvKeys - if !cfg.EnableSharedConfig { - regionKeys = regionKeys[:1] - profileKeys = profileKeys[:1] - } - - setFromEnvVal(&cfg.Region, regionKeys) - setFromEnvVal(&cfg.Profile, profileKeys) - - // endpoint discovery is in reference to it being enabled. - setFromEnvVal(&cfg.enableEndpointDiscovery, enableEndpointDiscoveryEnvKey) - if len(cfg.enableEndpointDiscovery) > 0 { - cfg.EnableEndpointDiscovery = aws.Bool(cfg.enableEndpointDiscovery != "false") - } - - setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey) - setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey) - - if len(cfg.SharedCredentialsFile) == 0 { - cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() - } - if len(cfg.SharedConfigFile) == 0 { - cfg.SharedConfigFile = defaults.SharedConfigFilename() - } - - setFromEnvVal(&cfg.CustomCABundle, useCABundleKey) - setFromEnvVal(&cfg.ClientTLSCert, useClientTLSCert) - setFromEnvVal(&cfg.ClientTLSKey, useClientTLSKey) - - var err error - // STS Regional Endpoint variable - for _, k := range stsRegionalEndpointKey { - if v := os.Getenv(k); len(v) != 0 { - cfg.STSRegionalEndpoint, err = endpoints.GetSTSRegionalEndpoint(v) - if err != nil { - return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) - } - } - } - - // S3 Regional Endpoint variable - for _, k := range s3UsEast1RegionalEndpoint { - if v := os.Getenv(k); len(v) != 0 { - cfg.S3UsEast1RegionalEndpoint, err = endpoints.GetS3UsEast1RegionalEndpoint(v) - if err != nil { - return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) - } - } - } - - var s3UseARNRegion string - setFromEnvVal(&s3UseARNRegion, s3UseARNRegionEnvKey) - if len(s3UseARNRegion) != 0 { - switch { - case strings.EqualFold(s3UseARNRegion, "false"): - cfg.S3UseARNRegion = false - case strings.EqualFold(s3UseARNRegion, "true"): - cfg.S3UseARNRegion = true - default: - return envConfig{}, fmt.Errorf( - "invalid value for environment variable, %s=%s, need true or false", - s3UseARNRegionEnvKey[0], s3UseARNRegion) - } - } - - setFromEnvVal(&cfg.EC2IMDSEndpoint, ec2IMDSEndpointEnvKey) - if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, ec2IMDSEndpointModeEnvKey); err != nil { - return envConfig{}, err - } - - if err := setUseDualStackEndpointFromEnvVal(&cfg.UseDualStackEndpoint, awsUseDualStackEndpoint); err != nil { - return cfg, err - } - - if err := setUseFIPSEndpointFromEnvVal(&cfg.UseFIPSEndpoint, awsUseFIPSEndpoint); err != nil { - return cfg, err - } - - return cfg, nil -} - -func setFromEnvVal(dst *string, keys []string) { - for _, k := range keys { - if v := os.Getenv(k); len(v) != 0 { - *dst = v - break - } - } -} - -func setEC2IMDSEndpointMode(mode *endpoints.EC2IMDSEndpointModeState, keys []string) error { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue - } - if err := mode.SetFromString(value); err != nil { - return fmt.Errorf("invalid value for environment variable, %s=%s, %v", k, value, err) - } - return nil - } - return nil -} - -func setUseDualStackEndpointFromEnvVal(dst *endpoints.DualStackEndpointState, keys []string) error { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue // skip if empty - } - - switch { - case strings.EqualFold(value, "true"): - *dst = endpoints.DualStackEndpointStateEnabled - case strings.EqualFold(value, "false"): - *dst = endpoints.DualStackEndpointStateDisabled - default: - return fmt.Errorf( - "invalid value for environment variable, %s=%s, need true, false", - k, value) - } - } - return nil -} - -func setUseFIPSEndpointFromEnvVal(dst *endpoints.FIPSEndpointState, keys []string) error { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue // skip if empty - } - - switch { - case strings.EqualFold(value, "true"): - *dst = endpoints.FIPSEndpointStateEnabled - case strings.EqualFold(value, "false"): - *dst = endpoints.FIPSEndpointStateDisabled - default: - return fmt.Errorf( - "invalid value for environment variable, %s=%s, need true, false", - k, value) - } - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go deleted file mode 100644 index ebace4bb7..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ /dev/null @@ -1,992 +0,0 @@ -package session - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/corehandlers" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/csm" - "github.com/aws/aws-sdk-go/aws/defaults" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/aws/request" -) - -const ( - // ErrCodeSharedConfig represents an error that occurs in the shared - // configuration logic - ErrCodeSharedConfig = "SharedConfigErr" - - // ErrCodeLoadCustomCABundle error code for unable to load custom CA bundle. - ErrCodeLoadCustomCABundle = "LoadCustomCABundleError" - - // ErrCodeLoadClientTLSCert error code for unable to load client TLS - // certificate or key - ErrCodeLoadClientTLSCert = "LoadClientTLSCertError" -) - -// ErrSharedConfigSourceCollision will be returned if a section contains both -// source_profile and credential_source -var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only one credential type may be specified per profile: source profile, credential source, credential process, web identity token, or sso", nil) - -// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment -// variables are empty and Environment was set as the credential source -var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil) - -// ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided -var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil) - -// A Session provides a central location to create service clients from and -// store configurations and request handlers for those services. -// -// Sessions are safe to create service clients concurrently, but it is not safe -// to mutate the Session concurrently. -// -// The Session satisfies the service client's client.ConfigProvider. -type Session struct { - Config *aws.Config - Handlers request.Handlers - - options Options -} - -// New creates a new instance of the handlers merging in the provided configs -// on top of the SDK's default configurations. Once the Session is created it -// can be mutated to modify the Config or Handlers. The Session is safe to be -// read concurrently, but it should not be written to concurrently. -// -// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New -// method could now encounter an error when loading the configuration. When -// The environment variable is set, and an error occurs, New will return a -// session that will fail all requests reporting the error that occurred while -// loading the session. Use NewSession to get the error when creating the -// session. -// -// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value -// the shared config file (~/.aws/config) will also be loaded, in addition to -// the shared credentials file (~/.aws/credentials). Values set in both the -// shared config, and shared credentials will be taken from the shared -// credentials file. -// -// Deprecated: Use NewSession functions to create sessions instead. NewSession -// has the same functionality as New except an error can be returned when the -// func is called instead of waiting to receive an error until a request is made. -func New(cfgs ...*aws.Config) *Session { - // load initial config from environment - envCfg, envErr := loadEnvConfig() - - if envCfg.EnableSharedConfig { - var cfg aws.Config - cfg.MergeIn(cfgs...) - s, err := NewSessionWithOptions(Options{ - Config: cfg, - SharedConfigState: SharedConfigEnable, - }) - if err != nil { - // Old session.New expected all errors to be discovered when - // a request is made, and would report the errors then. This - // needs to be replicated if an error occurs while creating - // the session. - msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " + - "Use session.NewSession to handle errors occurring during session creation." - - // Session creation failed, need to report the error and prevent - // any requests from succeeding. - s = &Session{Config: defaults.Config()} - s.logDeprecatedNewSessionError(msg, err, cfgs) - } - - return s - } - - s := deprecatedNewSession(envCfg, cfgs...) - if envErr != nil { - msg := "failed to load env config" - s.logDeprecatedNewSessionError(msg, envErr, cfgs) - } - - if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil { - if l := s.Config.Logger; l != nil { - l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) - } - } else if csmCfg.Enabled { - err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger) - if err != nil { - msg := "failed to enable CSM" - s.logDeprecatedNewSessionError(msg, err, cfgs) - } - } - - return s -} - -// NewSession returns a new Session created from SDK defaults, config files, -// environment, and user provided config files. Once the Session is created -// it can be mutated to modify the Config or Handlers. The Session is safe to -// be read concurrently, but it should not be written to concurrently. -// -// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value -// the shared config file (~/.aws/config) will also be loaded in addition to -// the shared credentials file (~/.aws/credentials). Values set in both the -// shared config, and shared credentials will be taken from the shared -// credentials file. Enabling the Shared Config will also allow the Session -// to be built with retrieving credentials with AssumeRole set in the config. -// -// See the NewSessionWithOptions func for information on how to override or -// control through code how the Session will be created, such as specifying the -// config profile, and controlling if shared config is enabled or not. -func NewSession(cfgs ...*aws.Config) (*Session, error) { - opts := Options{} - opts.Config.MergeIn(cfgs...) - - return NewSessionWithOptions(opts) -} - -// SharedConfigState provides the ability to optionally override the state -// of the session's creation based on the shared config being enabled or -// disabled. -type SharedConfigState int - -const ( - // SharedConfigStateFromEnv does not override any state of the - // AWS_SDK_LOAD_CONFIG env var. It is the default value of the - // SharedConfigState type. - SharedConfigStateFromEnv SharedConfigState = iota - - // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value - // and disables the shared config functionality. - SharedConfigDisable - - // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value - // and enables the shared config functionality. - SharedConfigEnable -) - -// Options provides the means to control how a Session is created and what -// configuration values will be loaded. -// -type Options struct { - // Provides config values for the SDK to use when creating service clients - // and making API requests to services. Any value set in with this field - // will override the associated value provided by the SDK defaults, - // environment or config files where relevant. - // - // If not set, configuration values from from SDK defaults, environment, - // config will be used. - Config aws.Config - - // Overrides the config profile the Session should be created from. If not - // set the value of the environment variable will be loaded (AWS_PROFILE, - // or AWS_DEFAULT_PROFILE if the Shared Config is enabled). - // - // If not set and environment variables are not set the "default" - // (DefaultSharedConfigProfile) will be used as the profile to load the - // session config from. - Profile string - - // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG - // environment variable. By default a Session will be created using the - // value provided by the AWS_SDK_LOAD_CONFIG environment variable. - // - // Setting this value to SharedConfigEnable or SharedConfigDisable - // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable - // and enable or disable the shared config functionality. - SharedConfigState SharedConfigState - - // Ordered list of files the session will load configuration from. - // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE. - SharedConfigFiles []string - - // When the SDK's shared config is configured to assume a role with MFA - // this option is required in order to provide the mechanism that will - // retrieve the MFA token. There is no default value for this field. If - // it is not set an error will be returned when creating the session. - // - // This token provider will be called when ever the assumed role's - // credentials need to be refreshed. Within the context of service clients - // all sharing the same session the SDK will ensure calls to the token - // provider are atomic. When sharing a token provider across multiple - // sessions additional synchronization logic is needed to ensure the - // token providers do not introduce race conditions. It is recommend to - // share the session where possible. - // - // stscreds.StdinTokenProvider is a basic implementation that will prompt - // from stdin for the MFA token code. - // - // This field is only used if the shared configuration is enabled, and - // the config enables assume role wit MFA via the mfa_serial field. - AssumeRoleTokenProvider func() (string, error) - - // When the SDK's shared config is configured to assume a role this option - // may be provided to set the expiry duration of the STS credentials. - // Defaults to 15 minutes if not set as documented in the - // stscreds.AssumeRoleProvider. - AssumeRoleDuration time.Duration - - // Reader for a custom Credentials Authority (CA) bundle in PEM format that - // the SDK will use instead of the default system's root CA bundle. Use this - // only if you want to replace the CA bundle the SDK uses for TLS requests. - // - // HTTP Client's Transport concrete implementation must be a http.Transport - // or creating the session will fail. - // - // If the Transport's TLS config is set this option will cause the SDK - // to overwrite the Transport's TLS config's RootCAs value. If the CA - // bundle reader contains multiple certificates all of them will be loaded. - // - // Can also be specified via the environment variable: - // - // AWS_CA_BUNDLE=$HOME/ca_bundle - // - // Can also be specified via the shared config field: - // - // ca_bundle = $HOME/ca_bundle - CustomCABundle io.Reader - - // Reader for the TLC client certificate that should be used by the SDK's - // HTTP transport when making requests. The certificate must be paired with - // a TLS client key file. Will be ignored if both are not provided. - // - // HTTP Client's Transport concrete implementation must be a http.Transport - // or creating the session will fail. - // - // Can also be specified via the environment variable: - // - // AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert - ClientTLSCert io.Reader - - // Reader for the TLC client key that should be used by the SDK's HTTP - // transport when making requests. The key must be paired with a TLS client - // certificate file. Will be ignored if both are not provided. - // - // HTTP Client's Transport concrete implementation must be a http.Transport - // or creating the session will fail. - // - // Can also be specified via the environment variable: - // - // AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key - ClientTLSKey io.Reader - - // The handlers that the session and all API clients will be created with. - // This must be a complete set of handlers. Use the defaults.Handlers() - // function to initialize this value before changing the handlers to be - // used by the SDK. - Handlers request.Handlers - - // Allows specifying a custom endpoint to be used by the EC2 IMDS client - // when making requests to the EC2 IMDS API. The endpoint value should - // include the URI scheme. If the scheme is not present it will be defaulted to http. - // - // If unset, will the EC2 IMDS client will use its default endpoint. - // - // Can also be specified via the environment variable, - // AWS_EC2_METADATA_SERVICE_ENDPOINT. - // - // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254 - // - // If using an URL with an IPv6 address literal, the IPv6 address - // component must be enclosed in square brackets. - // - // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] - EC2IMDSEndpoint string - - // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) - // - // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 - EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState -} - -// NewSessionWithOptions returns a new Session created from SDK defaults, config files, -// environment, and user provided config files. This func uses the Options -// values to configure how the Session is created. -// -// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value -// the shared config file (~/.aws/config) will also be loaded in addition to -// the shared credentials file (~/.aws/credentials). Values set in both the -// shared config, and shared credentials will be taken from the shared -// credentials file. Enabling the Shared Config will also allow the Session -// to be built with retrieving credentials with AssumeRole set in the config. -// -// // Equivalent to session.New -// sess := session.Must(session.NewSessionWithOptions(session.Options{})) -// -// // Specify profile to load for the session's config -// sess := session.Must(session.NewSessionWithOptions(session.Options{ -// Profile: "profile_name", -// })) -// -// // Specify profile for config and region for requests -// sess := session.Must(session.NewSessionWithOptions(session.Options{ -// Config: aws.Config{Region: aws.String("us-east-1")}, -// Profile: "profile_name", -// })) -// -// // Force enable Shared Config support -// sess := session.Must(session.NewSessionWithOptions(session.Options{ -// SharedConfigState: session.SharedConfigEnable, -// })) -func NewSessionWithOptions(opts Options) (*Session, error) { - var envCfg envConfig - var err error - if opts.SharedConfigState == SharedConfigEnable { - envCfg, err = loadSharedEnvConfig() - if err != nil { - return nil, fmt.Errorf("failed to load shared config, %v", err) - } - } else { - envCfg, err = loadEnvConfig() - if err != nil { - return nil, fmt.Errorf("failed to load environment config, %v", err) - } - } - - if len(opts.Profile) != 0 { - envCfg.Profile = opts.Profile - } - - switch opts.SharedConfigState { - case SharedConfigDisable: - envCfg.EnableSharedConfig = false - case SharedConfigEnable: - envCfg.EnableSharedConfig = true - } - - return newSession(opts, envCfg, &opts.Config) -} - -// Must is a helper function to ensure the Session is valid and there was no -// error when calling a NewSession function. -// -// This helper is intended to be used in variable initialization to load the -// Session and configuration at startup. Such as: -// -// var sess = session.Must(session.NewSession()) -func Must(sess *Session, err error) *Session { - if err != nil { - panic(err) - } - - return sess -} - -// Wraps the endpoint resolver with a resolver that will return a custom -// endpoint for EC2 IMDS. -func wrapEC2IMDSEndpoint(resolver endpoints.Resolver, endpoint string, mode endpoints.EC2IMDSEndpointModeState) endpoints.Resolver { - return endpoints.ResolverFunc( - func(service, region string, opts ...func(*endpoints.Options)) ( - endpoints.ResolvedEndpoint, error, - ) { - if service == ec2MetadataServiceID && len(endpoint) > 0 { - return endpoints.ResolvedEndpoint{ - URL: endpoint, - SigningName: ec2MetadataServiceID, - SigningRegion: region, - }, nil - } else if service == ec2MetadataServiceID { - opts = append(opts, func(o *endpoints.Options) { - o.EC2MetadataEndpointMode = mode - }) - } - return resolver.EndpointFor(service, region, opts...) - }) -} - -func deprecatedNewSession(envCfg envConfig, cfgs ...*aws.Config) *Session { - cfg := defaults.Config() - handlers := defaults.Handlers() - - // Apply the passed in configs so the configuration can be applied to the - // default credential chain - cfg.MergeIn(cfgs...) - if cfg.EndpointResolver == nil { - // An endpoint resolver is required for a session to be able to provide - // endpoints for service client configurations. - cfg.EndpointResolver = endpoints.DefaultResolver() - } - - if !(len(envCfg.EC2IMDSEndpoint) == 0 && envCfg.EC2IMDSEndpointMode == endpoints.EC2IMDSEndpointModeStateUnset) { - cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, envCfg.EC2IMDSEndpoint, envCfg.EC2IMDSEndpointMode) - } - - cfg.Credentials = defaults.CredChain(cfg, handlers) - - // Reapply any passed in configs to override credentials if set - cfg.MergeIn(cfgs...) - - s := &Session{ - Config: cfg, - Handlers: handlers, - options: Options{ - EC2IMDSEndpoint: envCfg.EC2IMDSEndpoint, - }, - } - - initHandlers(s) - return s -} - -func enableCSM(handlers *request.Handlers, cfg csmConfig, logger aws.Logger) error { - if logger != nil { - logger.Log("Enabling CSM") - } - - r, err := csm.Start(cfg.ClientID, csm.AddressWithDefaults(cfg.Host, cfg.Port)) - if err != nil { - return err - } - r.InjectHandlers(handlers) - - return nil -} - -func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { - cfg := defaults.Config() - - handlers := opts.Handlers - if handlers.IsEmpty() { - handlers = defaults.Handlers() - } - - // Get a merged version of the user provided config to determine if - // credentials were. - userCfg := &aws.Config{} - userCfg.MergeIn(cfgs...) - cfg.MergeIn(userCfg) - - // Ordered config files will be loaded in with later files overwriting - // previous config file values. - var cfgFiles []string - if opts.SharedConfigFiles != nil { - cfgFiles = opts.SharedConfigFiles - } else { - cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} - if !envCfg.EnableSharedConfig { - // The shared config file (~/.aws/config) is only loaded if instructed - // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). - cfgFiles = cfgFiles[1:] - } - } - - // Load additional config from file(s) - sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig) - if err != nil { - if len(envCfg.Profile) == 0 && !envCfg.EnableSharedConfig && (envCfg.Creds.HasKeys() || userCfg.Credentials != nil) { - // Special case where the user has not explicitly specified an AWS_PROFILE, - // or session.Options.profile, shared config is not enabled, and the - // environment has credentials, allow the shared config file to fail to - // load since the user has already provided credentials, and nothing else - // is required to be read file. Github(aws/aws-sdk-go#2455) - } else if _, ok := err.(SharedConfigProfileNotExistsError); !ok { - return nil, err - } - } - - if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { - return nil, err - } - - if err := setTLSOptions(&opts, cfg, envCfg, sharedCfg); err != nil { - return nil, err - } - - s := &Session{ - Config: cfg, - Handlers: handlers, - options: opts, - } - - initHandlers(s) - - if csmCfg, err := loadCSMConfig(envCfg, cfgFiles); err != nil { - if l := s.Config.Logger; l != nil { - l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) - } - } else if csmCfg.Enabled { - err = enableCSM(&s.Handlers, csmCfg, s.Config.Logger) - if err != nil { - return nil, err - } - } - - return s, nil -} - -type csmConfig struct { - Enabled bool - Host string - Port string - ClientID string -} - -var csmProfileName = "aws_csm" - -func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) { - if envCfg.CSMEnabled != nil { - if *envCfg.CSMEnabled { - return csmConfig{ - Enabled: true, - ClientID: envCfg.CSMClientID, - Host: envCfg.CSMHost, - Port: envCfg.CSMPort, - }, nil - } - return csmConfig{}, nil - } - - sharedCfg, err := loadSharedConfig(csmProfileName, cfgFiles, false) - if err != nil { - if _, ok := err.(SharedConfigProfileNotExistsError); !ok { - return csmConfig{}, err - } - } - if sharedCfg.CSMEnabled != nil && *sharedCfg.CSMEnabled == true { - return csmConfig{ - Enabled: true, - ClientID: sharedCfg.CSMClientID, - Host: sharedCfg.CSMHost, - Port: sharedCfg.CSMPort, - }, nil - } - - return csmConfig{}, nil -} - -func setTLSOptions(opts *Options, cfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig) error { - // CA Bundle can be specified in both environment variable shared config file. - var caBundleFilename = envCfg.CustomCABundle - if len(caBundleFilename) == 0 { - caBundleFilename = sharedCfg.CustomCABundle - } - - // Only use environment value if session option is not provided. - customTLSOptions := map[string]struct { - filename string - field *io.Reader - errCode string - }{ - "custom CA bundle PEM": {filename: caBundleFilename, field: &opts.CustomCABundle, errCode: ErrCodeLoadCustomCABundle}, - "custom client TLS cert": {filename: envCfg.ClientTLSCert, field: &opts.ClientTLSCert, errCode: ErrCodeLoadClientTLSCert}, - "custom client TLS key": {filename: envCfg.ClientTLSKey, field: &opts.ClientTLSKey, errCode: ErrCodeLoadClientTLSCert}, - } - for name, v := range customTLSOptions { - if len(v.filename) != 0 && *v.field == nil { - f, err := os.Open(v.filename) - if err != nil { - return awserr.New(v.errCode, fmt.Sprintf("failed to open %s file", name), err) - } - defer f.Close() - *v.field = f - } - } - - // Setup HTTP client with custom cert bundle if enabled - if opts.CustomCABundle != nil { - if err := loadCustomCABundle(cfg.HTTPClient, opts.CustomCABundle); err != nil { - return err - } - } - - // Setup HTTP client TLS certificate and key for client TLS authentication. - if opts.ClientTLSCert != nil && opts.ClientTLSKey != nil { - if err := loadClientTLSCert(cfg.HTTPClient, opts.ClientTLSCert, opts.ClientTLSKey); err != nil { - return err - } - } else if opts.ClientTLSCert == nil && opts.ClientTLSKey == nil { - // Do nothing if neither values are available. - - } else { - return awserr.New(ErrCodeLoadClientTLSCert, - fmt.Sprintf("client TLS cert(%t) and key(%t) must both be provided", - opts.ClientTLSCert != nil, opts.ClientTLSKey != nil), nil) - } - - return nil -} - -func getHTTPTransport(client *http.Client) (*http.Transport, error) { - var t *http.Transport - switch v := client.Transport.(type) { - case *http.Transport: - t = v - default: - if client.Transport != nil { - return nil, fmt.Errorf("unsupported transport, %T", client.Transport) - } - } - if t == nil { - // Nil transport implies `http.DefaultTransport` should be used. Since - // the SDK cannot modify, nor copy the `DefaultTransport` specifying - // the values the next closest behavior. - t = getCustomTransport() - } - - return t, nil -} - -func loadCustomCABundle(client *http.Client, bundle io.Reader) error { - t, err := getHTTPTransport(client) - if err != nil { - return awserr.New(ErrCodeLoadCustomCABundle, - "unable to load custom CA bundle, HTTPClient's transport unsupported type", err) - } - - p, err := loadCertPool(bundle) - if err != nil { - return err - } - if t.TLSClientConfig == nil { - t.TLSClientConfig = &tls.Config{} - } - t.TLSClientConfig.RootCAs = p - - client.Transport = t - - return nil -} - -func loadCertPool(r io.Reader) (*x509.CertPool, error) { - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, awserr.New(ErrCodeLoadCustomCABundle, - "failed to read custom CA bundle PEM file", err) - } - - p := x509.NewCertPool() - if !p.AppendCertsFromPEM(b) { - return nil, awserr.New(ErrCodeLoadCustomCABundle, - "failed to load custom CA bundle PEM file", err) - } - - return p, nil -} - -func loadClientTLSCert(client *http.Client, certFile, keyFile io.Reader) error { - t, err := getHTTPTransport(client) - if err != nil { - return awserr.New(ErrCodeLoadClientTLSCert, - "unable to get usable HTTP transport from client", err) - } - - cert, err := ioutil.ReadAll(certFile) - if err != nil { - return awserr.New(ErrCodeLoadClientTLSCert, - "unable to get read client TLS cert file", err) - } - - key, err := ioutil.ReadAll(keyFile) - if err != nil { - return awserr.New(ErrCodeLoadClientTLSCert, - "unable to get read client TLS key file", err) - } - - clientCert, err := tls.X509KeyPair(cert, key) - if err != nil { - return awserr.New(ErrCodeLoadClientTLSCert, - "unable to load x509 key pair from client cert", err) - } - - tlsCfg := t.TLSClientConfig - if tlsCfg == nil { - tlsCfg = &tls.Config{} - } - - tlsCfg.Certificates = append(tlsCfg.Certificates, clientCert) - - t.TLSClientConfig = tlsCfg - client.Transport = t - - return nil -} - -func mergeConfigSrcs(cfg, userCfg *aws.Config, - envCfg envConfig, sharedCfg sharedConfig, - handlers request.Handlers, - sessOpts Options, -) error { - - // Region if not already set by user - if len(aws.StringValue(cfg.Region)) == 0 { - if len(envCfg.Region) > 0 { - cfg.WithRegion(envCfg.Region) - } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 { - cfg.WithRegion(sharedCfg.Region) - } - } - - if cfg.EnableEndpointDiscovery == nil { - if envCfg.EnableEndpointDiscovery != nil { - cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery) - } else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil { - cfg.WithEndpointDiscovery(*sharedCfg.EnableEndpointDiscovery) - } - } - - // Regional Endpoint flag for STS endpoint resolving - mergeSTSRegionalEndpointConfig(cfg, []endpoints.STSRegionalEndpoint{ - userCfg.STSRegionalEndpoint, - envCfg.STSRegionalEndpoint, - sharedCfg.STSRegionalEndpoint, - endpoints.LegacySTSEndpoint, - }) - - // Regional Endpoint flag for S3 endpoint resolving - mergeS3UsEast1RegionalEndpointConfig(cfg, []endpoints.S3UsEast1RegionalEndpoint{ - userCfg.S3UsEast1RegionalEndpoint, - envCfg.S3UsEast1RegionalEndpoint, - sharedCfg.S3UsEast1RegionalEndpoint, - endpoints.LegacyS3UsEast1Endpoint, - }) - - var ec2IMDSEndpoint string - for _, v := range []string{ - sessOpts.EC2IMDSEndpoint, - envCfg.EC2IMDSEndpoint, - sharedCfg.EC2IMDSEndpoint, - } { - if len(v) != 0 { - ec2IMDSEndpoint = v - break - } - } - - var endpointMode endpoints.EC2IMDSEndpointModeState - for _, v := range []endpoints.EC2IMDSEndpointModeState{ - sessOpts.EC2IMDSEndpointMode, - envCfg.EC2IMDSEndpointMode, - sharedCfg.EC2IMDSEndpointMode, - } { - if v != endpoints.EC2IMDSEndpointModeStateUnset { - endpointMode = v - break - } - } - - if len(ec2IMDSEndpoint) != 0 || endpointMode != endpoints.EC2IMDSEndpointModeStateUnset { - cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, ec2IMDSEndpoint, endpointMode) - } - - // Configure credentials if not already set by the user when creating the - // Session. - if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { - creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) - if err != nil { - return err - } - cfg.Credentials = creds - } - - cfg.S3UseARNRegion = userCfg.S3UseARNRegion - if cfg.S3UseARNRegion == nil { - cfg.S3UseARNRegion = &envCfg.S3UseARNRegion - } - if cfg.S3UseARNRegion == nil { - cfg.S3UseARNRegion = &sharedCfg.S3UseARNRegion - } - - for _, v := range []endpoints.DualStackEndpointState{userCfg.UseDualStackEndpoint, envCfg.UseDualStackEndpoint, sharedCfg.UseDualStackEndpoint} { - if v != endpoints.DualStackEndpointStateUnset { - cfg.UseDualStackEndpoint = v - break - } - } - - for _, v := range []endpoints.FIPSEndpointState{userCfg.UseFIPSEndpoint, envCfg.UseFIPSEndpoint, sharedCfg.UseFIPSEndpoint} { - if v != endpoints.FIPSEndpointStateUnset { - cfg.UseFIPSEndpoint = v - break - } - } - - return nil -} - -func mergeSTSRegionalEndpointConfig(cfg *aws.Config, values []endpoints.STSRegionalEndpoint) { - for _, v := range values { - if v != endpoints.UnsetSTSEndpoint { - cfg.STSRegionalEndpoint = v - break - } - } -} - -func mergeS3UsEast1RegionalEndpointConfig(cfg *aws.Config, values []endpoints.S3UsEast1RegionalEndpoint) { - for _, v := range values { - if v != endpoints.UnsetS3UsEast1Endpoint { - cfg.S3UsEast1RegionalEndpoint = v - break - } - } -} - -func initHandlers(s *Session) { - // Add the Validate parameter handler if it is not disabled. - s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) - if !aws.BoolValue(s.Config.DisableParamValidation) { - s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) - } -} - -// Copy creates and returns a copy of the current Session, copying the config -// and handlers. If any additional configs are provided they will be merged -// on top of the Session's copied config. -// -// // Create a copy of the current Session, configured for the us-west-2 region. -// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) -func (s *Session) Copy(cfgs ...*aws.Config) *Session { - newSession := &Session{ - Config: s.Config.Copy(cfgs...), - Handlers: s.Handlers.Copy(), - options: s.options, - } - - initHandlers(newSession) - - return newSession -} - -// ClientConfig satisfies the client.ConfigProvider interface and is used to -// configure the service client instances. Passing the Session to the service -// client's constructor (New) will use this method to configure the client. -func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Config { - s = s.Copy(cfgs...) - - resolvedRegion := normalizeRegion(s.Config) - - region := aws.StringValue(s.Config.Region) - resolved, err := s.resolveEndpoint(service, region, resolvedRegion, s.Config) - if err != nil { - s.Handlers.Validate.PushBack(func(r *request.Request) { - if len(r.ClientInfo.Endpoint) != 0 { - // Error occurred while resolving endpoint, but the request - // being invoked has had an endpoint specified after the client - // was created. - return - } - r.Error = err - }) - } - - return client.Config{ - Config: s.Config, - Handlers: s.Handlers, - PartitionID: resolved.PartitionID, - Endpoint: resolved.URL, - SigningRegion: resolved.SigningRegion, - SigningNameDerived: resolved.SigningNameDerived, - SigningName: resolved.SigningName, - ResolvedRegion: resolvedRegion, - } -} - -const ec2MetadataServiceID = "ec2metadata" - -func (s *Session) resolveEndpoint(service, region, resolvedRegion string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) { - - if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 { - return endpoints.ResolvedEndpoint{ - URL: endpoints.AddScheme(ep, aws.BoolValue(cfg.DisableSSL)), - SigningRegion: region, - }, nil - } - - resolved, err := cfg.EndpointResolver.EndpointFor(service, region, - func(opt *endpoints.Options) { - opt.DisableSSL = aws.BoolValue(cfg.DisableSSL) - - opt.UseDualStack = aws.BoolValue(cfg.UseDualStack) - opt.UseDualStackEndpoint = cfg.UseDualStackEndpoint - - opt.UseFIPSEndpoint = cfg.UseFIPSEndpoint - - // Support for STSRegionalEndpoint where the STSRegionalEndpoint is - // provided in envConfig or sharedConfig with envConfig getting - // precedence. - opt.STSRegionalEndpoint = cfg.STSRegionalEndpoint - - // Support for S3UsEast1RegionalEndpoint where the S3UsEast1RegionalEndpoint is - // provided in envConfig or sharedConfig with envConfig getting - // precedence. - opt.S3UsEast1RegionalEndpoint = cfg.S3UsEast1RegionalEndpoint - - // Support the condition where the service is modeled but its - // endpoint metadata is not available. - opt.ResolveUnknownService = true - - opt.ResolvedRegion = resolvedRegion - - opt.Logger = cfg.Logger - opt.LogDeprecated = cfg.LogLevel.Matches(aws.LogDebugWithDeprecated) - }, - ) - if err != nil { - return endpoints.ResolvedEndpoint{}, err - } - - return resolved, nil -} - -// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception -// that the EndpointResolver will not be used to resolve the endpoint. The only -// endpoint set must come from the aws.Config.Endpoint field. -func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config { - s = s.Copy(cfgs...) - - resolvedRegion := normalizeRegion(s.Config) - - var resolved endpoints.ResolvedEndpoint - if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { - resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) - resolved.SigningRegion = aws.StringValue(s.Config.Region) - } - - return client.Config{ - Config: s.Config, - Handlers: s.Handlers, - Endpoint: resolved.URL, - SigningRegion: resolved.SigningRegion, - SigningNameDerived: resolved.SigningNameDerived, - SigningName: resolved.SigningName, - ResolvedRegion: resolvedRegion, - } -} - -// logDeprecatedNewSessionError function enables error handling for session -func (s *Session) logDeprecatedNewSessionError(msg string, err error, cfgs []*aws.Config) { - // Session creation failed, need to report the error and prevent - // any requests from succeeding. - s.Config.MergeIn(cfgs...) - s.Config.Logger.Log("ERROR:", msg, "Error:", err) - s.Handlers.Validate.PushBack(func(r *request.Request) { - r.Error = err - }) -} - -// normalizeRegion resolves / normalizes the configured region (converts pseudo fips regions), and modifies the provided -// config to have the equivalent options for resolution and returns the resolved region name. -func normalizeRegion(cfg *aws.Config) (resolved string) { - const fipsInfix = "-fips-" - const fipsPrefix = "-fips" - const fipsSuffix = "fips-" - - region := aws.StringValue(cfg.Region) - - if strings.Contains(region, fipsInfix) || - strings.Contains(region, fipsPrefix) || - strings.Contains(region, fipsSuffix) { - resolved = strings.Replace(strings.Replace(strings.Replace( - region, fipsInfix, "-", -1), fipsPrefix, "", -1), fipsSuffix, "", -1) - cfg.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled - } - - return resolved -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go deleted file mode 100644 index 424c82b4d..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ /dev/null @@ -1,729 +0,0 @@ -package session - -import ( - "fmt" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/internal/ini" -) - -const ( - // Static Credentials group - accessKeyIDKey = `aws_access_key_id` // group required - secretAccessKey = `aws_secret_access_key` // group required - sessionTokenKey = `aws_session_token` // optional - - // Assume Role Credentials group - roleArnKey = `role_arn` // group required - sourceProfileKey = `source_profile` // group required (or credential_source) - credentialSourceKey = `credential_source` // group required (or source_profile) - externalIDKey = `external_id` // optional - mfaSerialKey = `mfa_serial` // optional - roleSessionNameKey = `role_session_name` // optional - roleDurationSecondsKey = "duration_seconds" // optional - - // AWS Single Sign-On (AWS SSO) group - ssoAccountIDKey = "sso_account_id" - ssoRegionKey = "sso_region" - ssoRoleNameKey = "sso_role_name" - ssoStartURL = "sso_start_url" - - // CSM options - csmEnabledKey = `csm_enabled` - csmHostKey = `csm_host` - csmPortKey = `csm_port` - csmClientIDKey = `csm_client_id` - - // Additional Config fields - regionKey = `region` - - // custom CA Bundle filename - customCABundleKey = `ca_bundle` - - // endpoint discovery group - enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional - - // External Credential Process - credentialProcessKey = `credential_process` // optional - - // Web Identity Token File - webIdentityTokenFileKey = `web_identity_token_file` // optional - - // Additional config fields for regional or legacy endpoints - stsRegionalEndpointSharedKey = `sts_regional_endpoints` - - // Additional config fields for regional or legacy endpoints - s3UsEast1RegionalSharedKey = `s3_us_east_1_regional_endpoint` - - // DefaultSharedConfigProfile is the default profile to be used when - // loading configuration from the config files if another profile name - // is not provided. - DefaultSharedConfigProfile = `default` - - // S3 ARN Region Usage - s3UseARNRegionKey = "s3_use_arn_region" - - // EC2 IMDS Endpoint Mode - ec2MetadataServiceEndpointModeKey = "ec2_metadata_service_endpoint_mode" - - // EC2 IMDS Endpoint - ec2MetadataServiceEndpointKey = "ec2_metadata_service_endpoint" - - // Use DualStack Endpoint Resolution - useDualStackEndpoint = "use_dualstack_endpoint" - - // Use FIPS Endpoint Resolution - useFIPSEndpointKey = "use_fips_endpoint" -) - -// sharedConfig represents the configuration fields of the SDK config files. -type sharedConfig struct { - Profile string - - // Credentials values from the config file. Both aws_access_key_id and - // aws_secret_access_key must be provided together in the same file to be - // considered valid. The values will be ignored if not a complete group. - // aws_session_token is an optional field that can be provided if both of - // the other two fields are also provided. - // - // aws_access_key_id - // aws_secret_access_key - // aws_session_token - Creds credentials.Value - - CredentialSource string - CredentialProcess string - WebIdentityTokenFile string - - SSOAccountID string - SSORegion string - SSORoleName string - SSOStartURL string - - RoleARN string - RoleSessionName string - ExternalID string - MFASerial string - AssumeRoleDuration *time.Duration - - SourceProfileName string - SourceProfile *sharedConfig - - // Region is the region the SDK should use for looking up AWS service - // endpoints and signing requests. - // - // region - Region string - - // CustomCABundle is the file path to a PEM file the SDK will read and - // use to configure the HTTP transport with additional CA certs that are - // not present in the platforms default CA store. - // - // This value will be ignored if the file does not exist. - // - // ca_bundle - CustomCABundle string - - // EnableEndpointDiscovery can be enabled in the shared config by setting - // endpoint_discovery_enabled to true - // - // endpoint_discovery_enabled = true - EnableEndpointDiscovery *bool - - // CSM Options - CSMEnabled *bool - CSMHost string - CSMPort string - CSMClientID string - - // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service - // - // sts_regional_endpoints = regional - // This can take value as `LegacySTSEndpoint` or `RegionalSTSEndpoint` - STSRegionalEndpoint endpoints.STSRegionalEndpoint - - // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service - // - // s3_us_east_1_regional_endpoint = regional - // This can take value as `LegacyS3UsEast1Endpoint` or `RegionalS3UsEast1Endpoint` - S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint - - // Specifies if the S3 service should allow ARNs to direct the region - // the client's requests are sent to. - // - // s3_use_arn_region=true - S3UseARNRegion bool - - // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) - // - // ec2_metadata_service_endpoint_mode=IPv6 - EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState - - // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EC2IMDSEndpointMode. - // - // ec2_metadata_service_endpoint=http://fd00:ec2::254 - EC2IMDSEndpoint string - - // Specifies that SDK clients must resolve a dual-stack endpoint for - // services. - // - // use_dualstack_endpoint=true - UseDualStackEndpoint endpoints.DualStackEndpointState - - // Specifies that SDK clients must resolve a FIPS endpoint for - // services. - // - // use_fips_endpoint=true - UseFIPSEndpoint endpoints.FIPSEndpointState -} - -type sharedConfigFile struct { - Filename string - IniData ini.Sections -} - -// loadSharedConfig retrieves the configuration from the list of files using -// the profile provided. The order the files are listed will determine -// precedence. Values in subsequent files will overwrite values defined in -// earlier files. -// -// For example, given two files A and B. Both define credentials. If the order -// of the files are A then B, B's credential values will be used instead of -// A's. -// -// See sharedConfig.setFromFile for information how the config files -// will be loaded. -func loadSharedConfig(profile string, filenames []string, exOpts bool) (sharedConfig, error) { - if len(profile) == 0 { - profile = DefaultSharedConfigProfile - } - - files, err := loadSharedConfigIniFiles(filenames) - if err != nil { - return sharedConfig{}, err - } - - cfg := sharedConfig{} - profiles := map[string]struct{}{} - if err = cfg.setFromIniFiles(profiles, profile, files, exOpts); err != nil { - return sharedConfig{}, err - } - - return cfg, nil -} - -func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { - files := make([]sharedConfigFile, 0, len(filenames)) - - for _, filename := range filenames { - sections, err := ini.OpenFile(filename) - if aerr, ok := err.(awserr.Error); ok && aerr.Code() == ini.ErrCodeUnableToReadFile { - // Skip files which can't be opened and read for whatever reason - continue - } else if err != nil { - return nil, SharedConfigLoadError{Filename: filename, Err: err} - } - - files = append(files, sharedConfigFile{ - Filename: filename, IniData: sections, - }) - } - - return files, nil -} - -func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error { - cfg.Profile = profile - - // Trim files from the list that don't exist. - var skippedFiles int - var profileNotFoundErr error - for _, f := range files { - if err := cfg.setFromIniFile(profile, f, exOpts); err != nil { - if _, ok := err.(SharedConfigProfileNotExistsError); ok { - // Ignore profiles not defined in individual files. - profileNotFoundErr = err - skippedFiles++ - continue - } - return err - } - } - if skippedFiles == len(files) { - // If all files were skipped because the profile is not found, return - // the original profile not found error. - return profileNotFoundErr - } - - if _, ok := profiles[profile]; ok { - // if this is the second instance of the profile the Assume Role - // options must be cleared because they are only valid for the - // first reference of a profile. The self linked instance of the - // profile only have credential provider options. - cfg.clearAssumeRoleOptions() - } else { - // First time a profile has been seen, It must either be a assume role - // credentials, or SSO. Assert if the credential type requires a role ARN, - // the ARN is also set, or validate that the SSO configuration is complete. - if err := cfg.validateCredentialsConfig(profile); err != nil { - return err - } - } - profiles[profile] = struct{}{} - - if err := cfg.validateCredentialType(); err != nil { - return err - } - - // Link source profiles for assume roles - if len(cfg.SourceProfileName) != 0 { - // Linked profile via source_profile ignore credential provider - // options, the source profile must provide the credentials. - cfg.clearCredentialOptions() - - srcCfg := &sharedConfig{} - err := srcCfg.setFromIniFiles(profiles, cfg.SourceProfileName, files, exOpts) - if err != nil { - // SourceProfile that doesn't exist is an error in configuration. - if _, ok := err.(SharedConfigProfileNotExistsError); ok { - err = SharedConfigAssumeRoleError{ - RoleARN: cfg.RoleARN, - SourceProfile: cfg.SourceProfileName, - } - } - return err - } - - if !srcCfg.hasCredentials() { - return SharedConfigAssumeRoleError{ - RoleARN: cfg.RoleARN, - SourceProfile: cfg.SourceProfileName, - } - } - - cfg.SourceProfile = srcCfg - } - - return nil -} - -// setFromFile loads the configuration from the file using the profile -// provided. A sharedConfig pointer type value is used so that multiple config -// file loadings can be chained. -// -// Only loads complete logically grouped values, and will not set fields in cfg -// for incomplete grouped values in the config. Such as credentials. For -// example if a config file only includes aws_access_key_id but no -// aws_secret_access_key the aws_access_key_id will be ignored. -func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, exOpts bool) error { - section, ok := file.IniData.GetSection(profile) - if !ok { - // Fallback to to alternate profile name: profile - section, ok = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) - if !ok { - return SharedConfigProfileNotExistsError{Profile: profile, Err: nil} - } - } - - if exOpts { - // Assume Role Parameters - updateString(&cfg.RoleARN, section, roleArnKey) - updateString(&cfg.ExternalID, section, externalIDKey) - updateString(&cfg.MFASerial, section, mfaSerialKey) - updateString(&cfg.RoleSessionName, section, roleSessionNameKey) - updateString(&cfg.SourceProfileName, section, sourceProfileKey) - updateString(&cfg.CredentialSource, section, credentialSourceKey) - updateString(&cfg.Region, section, regionKey) - updateString(&cfg.CustomCABundle, section, customCABundleKey) - - if section.Has(roleDurationSecondsKey) { - d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second - cfg.AssumeRoleDuration = &d - } - - if v := section.String(stsRegionalEndpointSharedKey); len(v) != 0 { - sre, err := endpoints.GetSTSRegionalEndpoint(v) - if err != nil { - return fmt.Errorf("failed to load %s from shared config, %s, %v", - stsRegionalEndpointSharedKey, file.Filename, err) - } - cfg.STSRegionalEndpoint = sre - } - - if v := section.String(s3UsEast1RegionalSharedKey); len(v) != 0 { - sre, err := endpoints.GetS3UsEast1RegionalEndpoint(v) - if err != nil { - return fmt.Errorf("failed to load %s from shared config, %s, %v", - s3UsEast1RegionalSharedKey, file.Filename, err) - } - cfg.S3UsEast1RegionalEndpoint = sre - } - - // AWS Single Sign-On (AWS SSO) - updateString(&cfg.SSOAccountID, section, ssoAccountIDKey) - updateString(&cfg.SSORegion, section, ssoRegionKey) - updateString(&cfg.SSORoleName, section, ssoRoleNameKey) - updateString(&cfg.SSOStartURL, section, ssoStartURL) - - if err := updateEC2MetadataServiceEndpointMode(&cfg.EC2IMDSEndpointMode, section, ec2MetadataServiceEndpointModeKey); err != nil { - return fmt.Errorf("failed to load %s from shared config, %s, %v", - ec2MetadataServiceEndpointModeKey, file.Filename, err) - } - updateString(&cfg.EC2IMDSEndpoint, section, ec2MetadataServiceEndpointKey) - - updateUseDualStackEndpoint(&cfg.UseDualStackEndpoint, section, useDualStackEndpoint) - - updateUseFIPSEndpoint(&cfg.UseFIPSEndpoint, section, useFIPSEndpointKey) - } - - updateString(&cfg.CredentialProcess, section, credentialProcessKey) - updateString(&cfg.WebIdentityTokenFile, section, webIdentityTokenFileKey) - - // Shared Credentials - creds := credentials.Value{ - AccessKeyID: section.String(accessKeyIDKey), - SecretAccessKey: section.String(secretAccessKey), - SessionToken: section.String(sessionTokenKey), - ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), - } - if creds.HasKeys() { - cfg.Creds = creds - } - - // Endpoint discovery - updateBoolPtr(&cfg.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) - - // CSM options - updateBoolPtr(&cfg.CSMEnabled, section, csmEnabledKey) - updateString(&cfg.CSMHost, section, csmHostKey) - updateString(&cfg.CSMPort, section, csmPortKey) - updateString(&cfg.CSMClientID, section, csmClientIDKey) - - updateBool(&cfg.S3UseARNRegion, section, s3UseARNRegionKey) - - return nil -} - -func updateEC2MetadataServiceEndpointMode(endpointMode *endpoints.EC2IMDSEndpointModeState, section ini.Section, key string) error { - if !section.Has(key) { - return nil - } - value := section.String(key) - return endpointMode.SetFromString(value) -} - -func (cfg *sharedConfig) validateCredentialsConfig(profile string) error { - if err := cfg.validateCredentialsRequireARN(profile); err != nil { - return err - } - - return nil -} - -func (cfg *sharedConfig) validateCredentialsRequireARN(profile string) error { - var credSource string - - switch { - case len(cfg.SourceProfileName) != 0: - credSource = sourceProfileKey - case len(cfg.CredentialSource) != 0: - credSource = credentialSourceKey - case len(cfg.WebIdentityTokenFile) != 0: - credSource = webIdentityTokenFileKey - } - - if len(credSource) != 0 && len(cfg.RoleARN) == 0 { - return CredentialRequiresARNError{ - Type: credSource, - Profile: profile, - } - } - - return nil -} - -func (cfg *sharedConfig) validateCredentialType() error { - // Only one or no credential type can be defined. - if !oneOrNone( - len(cfg.SourceProfileName) != 0, - len(cfg.CredentialSource) != 0, - len(cfg.CredentialProcess) != 0, - len(cfg.WebIdentityTokenFile) != 0, - ) { - return ErrSharedConfigSourceCollision - } - - return nil -} - -func (cfg *sharedConfig) validateSSOConfiguration() error { - if !cfg.hasSSOConfiguration() { - return nil - } - - var missing []string - if len(cfg.SSOAccountID) == 0 { - missing = append(missing, ssoAccountIDKey) - } - - if len(cfg.SSORegion) == 0 { - missing = append(missing, ssoRegionKey) - } - - if len(cfg.SSORoleName) == 0 { - missing = append(missing, ssoRoleNameKey) - } - - if len(cfg.SSOStartURL) == 0 { - missing = append(missing, ssoStartURL) - } - - if len(missing) > 0 { - return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", - cfg.Profile, strings.Join(missing, ", ")) - } - - return nil -} - -func (cfg *sharedConfig) hasCredentials() bool { - switch { - case len(cfg.SourceProfileName) != 0: - case len(cfg.CredentialSource) != 0: - case len(cfg.CredentialProcess) != 0: - case len(cfg.WebIdentityTokenFile) != 0: - case cfg.hasSSOConfiguration(): - case cfg.Creds.HasKeys(): - default: - return false - } - - return true -} - -func (cfg *sharedConfig) clearCredentialOptions() { - cfg.CredentialSource = "" - cfg.CredentialProcess = "" - cfg.WebIdentityTokenFile = "" - cfg.Creds = credentials.Value{} - cfg.SSOAccountID = "" - cfg.SSORegion = "" - cfg.SSORoleName = "" - cfg.SSOStartURL = "" -} - -func (cfg *sharedConfig) clearAssumeRoleOptions() { - cfg.RoleARN = "" - cfg.ExternalID = "" - cfg.MFASerial = "" - cfg.RoleSessionName = "" - cfg.SourceProfileName = "" -} - -func (cfg *sharedConfig) hasSSOConfiguration() bool { - switch { - case len(cfg.SSOAccountID) != 0: - case len(cfg.SSORegion) != 0: - case len(cfg.SSORoleName) != 0: - case len(cfg.SSOStartURL) != 0: - default: - return false - } - return true -} - -func oneOrNone(bs ...bool) bool { - var count int - - for _, b := range bs { - if b { - count++ - if count > 1 { - return false - } - } - } - - return true -} - -// updateString will only update the dst with the value in the section key, key -// is present in the section. -func updateString(dst *string, section ini.Section, key string) { - if !section.Has(key) { - return - } - *dst = section.String(key) -} - -// updateBool will only update the dst with the value in the section key, key -// is present in the section. -func updateBool(dst *bool, section ini.Section, key string) { - if !section.Has(key) { - return - } - *dst = section.Bool(key) -} - -// updateBoolPtr will only update the dst with the value in the section key, -// key is present in the section. -func updateBoolPtr(dst **bool, section ini.Section, key string) { - if !section.Has(key) { - return - } - *dst = new(bool) - **dst = section.Bool(key) -} - -// SharedConfigLoadError is an error for the shared config file failed to load. -type SharedConfigLoadError struct { - Filename string - Err error -} - -// Code is the short id of the error. -func (e SharedConfigLoadError) Code() string { - return "SharedConfigLoadError" -} - -// Message is the description of the error -func (e SharedConfigLoadError) Message() string { - return fmt.Sprintf("failed to load config file, %s", e.Filename) -} - -// OrigErr is the underlying error that caused the failure. -func (e SharedConfigLoadError) OrigErr() error { - return e.Err -} - -// Error satisfies the error interface. -func (e SharedConfigLoadError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", e.Err) -} - -// SharedConfigProfileNotExistsError is an error for the shared config when -// the profile was not find in the config file. -type SharedConfigProfileNotExistsError struct { - Profile string - Err error -} - -// Code is the short id of the error. -func (e SharedConfigProfileNotExistsError) Code() string { - return "SharedConfigProfileNotExistsError" -} - -// Message is the description of the error -func (e SharedConfigProfileNotExistsError) Message() string { - return fmt.Sprintf("failed to get profile, %s", e.Profile) -} - -// OrigErr is the underlying error that caused the failure. -func (e SharedConfigProfileNotExistsError) OrigErr() error { - return e.Err -} - -// Error satisfies the error interface. -func (e SharedConfigProfileNotExistsError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", e.Err) -} - -// SharedConfigAssumeRoleError is an error for the shared config when the -// profile contains assume role information, but that information is invalid -// or not complete. -type SharedConfigAssumeRoleError struct { - RoleARN string - SourceProfile string -} - -// Code is the short id of the error. -func (e SharedConfigAssumeRoleError) Code() string { - return "SharedConfigAssumeRoleError" -} - -// Message is the description of the error -func (e SharedConfigAssumeRoleError) Message() string { - return fmt.Sprintf( - "failed to load assume role for %s, source profile %s has no shared credentials", - e.RoleARN, e.SourceProfile, - ) -} - -// OrigErr is the underlying error that caused the failure. -func (e SharedConfigAssumeRoleError) OrigErr() error { - return nil -} - -// Error satisfies the error interface. -func (e SharedConfigAssumeRoleError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", nil) -} - -// CredentialRequiresARNError provides the error for shared config credentials -// that are incorrectly configured in the shared config or credentials file. -type CredentialRequiresARNError struct { - // type of credentials that were configured. - Type string - - // Profile name the credentials were in. - Profile string -} - -// Code is the short id of the error. -func (e CredentialRequiresARNError) Code() string { - return "CredentialRequiresARNError" -} - -// Message is the description of the error -func (e CredentialRequiresARNError) Message() string { - return fmt.Sprintf( - "credential type %s requires role_arn, profile %s", - e.Type, e.Profile, - ) -} - -// OrigErr is the underlying error that caused the failure. -func (e CredentialRequiresARNError) OrigErr() error { - return nil -} - -// Error satisfies the error interface. -func (e CredentialRequiresARNError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", nil) -} - -// updateEndpointDiscoveryType will only update the dst with the value in the section, if -// a valid key and corresponding EndpointDiscoveryType is found. -func updateUseDualStackEndpoint(dst *endpoints.DualStackEndpointState, section ini.Section, key string) { - if !section.Has(key) { - return - } - - if section.Bool(key) { - *dst = endpoints.DualStackEndpointStateEnabled - } else { - *dst = endpoints.DualStackEndpointStateDisabled - } - - return -} - -// updateEndpointDiscoveryType will only update the dst with the value in the section, if -// a valid key and corresponding EndpointDiscoveryType is found. -func updateUseFIPSEndpoint(dst *endpoints.FIPSEndpointState, section ini.Section, key string) { - if !section.Has(key) { - return - } - - if section.Bool(key) { - *dst = endpoints.FIPSEndpointStateEnabled - } else { - *dst = endpoints.FIPSEndpointStateDisabled - } - - return -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go deleted file mode 100644 index 993753831..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go +++ /dev/null @@ -1,81 +0,0 @@ -package v4 - -import ( - "github.com/aws/aws-sdk-go/internal/strings" -) - -// validator houses a set of rule needed for validation of a -// string value -type rules []rule - -// rule interface allows for more flexible rules and just simply -// checks whether or not a value adheres to that rule -type rule interface { - IsValid(value string) bool -} - -// IsValid will iterate through all rules and see if any rules -// apply to the value and supports nested rules -func (r rules) IsValid(value string) bool { - for _, rule := range r { - if rule.IsValid(value) { - return true - } - } - return false -} - -// mapRule generic rule for maps -type mapRule map[string]struct{} - -// IsValid for the map rule satisfies whether it exists in the map -func (m mapRule) IsValid(value string) bool { - _, ok := m[value] - return ok -} - -// allowList is a generic rule for allow listing -type allowList struct { - rule -} - -// IsValid for allow list checks if the value is within the allow list -func (w allowList) IsValid(value string) bool { - return w.rule.IsValid(value) -} - -// excludeList is a generic rule for exclude listing -type excludeList struct { - rule -} - -// IsValid for exclude list checks if the value is within the exclude list -func (b excludeList) IsValid(value string) bool { - return !b.rule.IsValid(value) -} - -type patterns []string - -// IsValid for patterns checks each pattern and returns if a match has -// been found -func (p patterns) IsValid(value string) bool { - for _, pattern := range p { - if strings.HasPrefixFold(value, pattern) { - return true - } - } - return false -} - -// inclusiveRules rules allow for rules to depend on one another -type inclusiveRules []rule - -// IsValid will return true if all rules are true -func (r inclusiveRules) IsValid(value string) bool { - for _, rule := range r { - if !rule.IsValid(value) { - return false - } - } - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go deleted file mode 100644 index 6aa2ed241..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go +++ /dev/null @@ -1,7 +0,0 @@ -package v4 - -// WithUnsignedPayload will enable and set the UnsignedPayload field to -// true of the signer. -func WithUnsignedPayload(v4 *Signer) { - v4.UnsignedPayload = true -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go deleted file mode 100644 index cf672b6ac..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build !go1.7 -// +build !go1.7 - -package v4 - -import ( - "net/http" - - "github.com/aws/aws-sdk-go/aws" -) - -func requestContext(r *http.Request) aws.Context { - return aws.BackgroundContext() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go deleted file mode 100644 index 21fe74e6f..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build go1.7 -// +build go1.7 - -package v4 - -import ( - "net/http" - - "github.com/aws/aws-sdk-go/aws" -) - -func requestContext(r *http.Request) aws.Context { - return r.Context() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go deleted file mode 100644 index 02cbd97e2..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go +++ /dev/null @@ -1,63 +0,0 @@ -package v4 - -import ( - "encoding/hex" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/credentials" -) - -type credentialValueProvider interface { - Get() (credentials.Value, error) -} - -// StreamSigner implements signing of event stream encoded payloads -type StreamSigner struct { - region string - service string - - credentials credentialValueProvider - - prevSig []byte -} - -// NewStreamSigner creates a SigV4 signer used to sign Event Stream encoded messages -func NewStreamSigner(region, service string, seedSignature []byte, credentials *credentials.Credentials) *StreamSigner { - return &StreamSigner{ - region: region, - service: service, - credentials: credentials, - prevSig: seedSignature, - } -} - -// GetSignature takes an event stream encoded headers and payload and returns a signature -func (s *StreamSigner) GetSignature(headers, payload []byte, date time.Time) ([]byte, error) { - credValue, err := s.credentials.Get() - if err != nil { - return nil, err - } - - sigKey := deriveSigningKey(s.region, s.service, credValue.SecretAccessKey, date) - - keyPath := buildSigningScope(s.region, s.service, date) - - stringToSign := buildEventStreamStringToSign(headers, payload, s.prevSig, keyPath, date) - - signature := hmacSHA256(sigKey, []byte(stringToSign)) - s.prevSig = signature - - return signature, nil -} - -func buildEventStreamStringToSign(headers, payload, prevSig []byte, scope string, date time.Time) string { - return strings.Join([]string{ - "AWS4-HMAC-SHA256-PAYLOAD", - formatTime(date), - scope, - hex.EncodeToString(prevSig), - hex.EncodeToString(hashSHA256(headers)), - hex.EncodeToString(hashSHA256(payload)), - }, "\n") -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go deleted file mode 100644 index 7711ec737..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go +++ /dev/null @@ -1,25 +0,0 @@ -//go:build go1.5 -// +build go1.5 - -package v4 - -import ( - "net/url" - "strings" -) - -func getURIPath(u *url.URL) string { - var uri string - - if len(u.Opaque) > 0 { - uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") - } else { - uri = u.EscapedPath() - } - - if len(uri) == 0 { - uri = "/" - } - - return uri -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go deleted file mode 100644 index 4d78162c0..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ /dev/null @@ -1,854 +0,0 @@ -// Package v4 implements signing for AWS V4 signer -// -// Provides request signing for request that need to be signed with -// AWS V4 Signatures. -// -// Standalone Signer -// -// Generally using the signer outside of the SDK should not require any additional -// logic when using Go v1.5 or higher. The signer does this by taking advantage -// of the URL.EscapedPath method. If your request URI requires additional escaping -// you many need to use the URL.Opaque to define what the raw URI should be sent -// to the service as. -// -// The signer will first check the URL.Opaque field, and use its value if set. -// The signer does require the URL.Opaque field to be set in the form of: -// -// "///" -// -// // e.g. -// "//example.com/some/path" -// -// The leading "//" and hostname are required or the URL.Opaque escaping will -// not work correctly. -// -// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() -// method and using the returned value. If you're using Go v1.4 you must set -// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with -// Go v1.5 the signer will fallback to URL.Path. -// -// AWS v4 signature validation requires that the canonical string's URI path -// element must be the URI escaped form of the HTTP request's path. -// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html -// -// The Go HTTP client will perform escaping automatically on the request. Some -// of these escaping may cause signature validation errors because the HTTP -// request differs from the URI path or query that the signature was generated. -// https://golang.org/pkg/net/url/#URL.EscapedPath -// -// Because of this, it is recommended that when using the signer outside of the -// SDK that explicitly escaping the request prior to being signed is preferable, -// and will help prevent signature validation errors. This can be done by setting -// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then -// call URL.EscapedPath() if Opaque is not set. -// -// If signing a request intended for HTTP2 server, and you're using Go 1.6.2 -// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the -// request URL. https://github.com/golang/go/issues/16847 points to a bug in -// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP -// message. URL.Opaque generally will force Go to make requests with absolute URL. -// URL.RawPath does not do this, but RawPath must be a valid escaping of Path -// or url.EscapedPath will ignore the RawPath escaping. -// -// Test `TestStandaloneSign` provides a complete example of using the signer -// outside of the SDK and pre-escaping the URI path. -package v4 - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkio" - "github.com/aws/aws-sdk-go/private/protocol/rest" -) - -const ( - authorizationHeader = "Authorization" - authHeaderSignatureElem = "Signature=" - signatureQueryKey = "X-Amz-Signature" - - authHeaderPrefix = "AWS4-HMAC-SHA256" - timeFormat = "20060102T150405Z" - shortTimeFormat = "20060102" - awsV4Request = "aws4_request" - - // emptyStringSHA256 is a SHA256 of an empty string - emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` -) - -var ignoredHeaders = rules{ - excludeList{ - mapRule{ - authorizationHeader: struct{}{}, - "User-Agent": struct{}{}, - "X-Amzn-Trace-Id": struct{}{}, - }, - }, -} - -// requiredSignedHeaders is a allow list for build canonical headers. -var requiredSignedHeaders = rules{ - allowList{ - mapRule{ - "Cache-Control": struct{}{}, - "Content-Disposition": struct{}{}, - "Content-Encoding": struct{}{}, - "Content-Language": struct{}{}, - "Content-Md5": struct{}{}, - "Content-Type": struct{}{}, - "Expires": struct{}{}, - "If-Match": struct{}{}, - "If-Modified-Since": struct{}{}, - "If-None-Match": struct{}{}, - "If-Unmodified-Since": struct{}{}, - "Range": struct{}{}, - "X-Amz-Acl": struct{}{}, - "X-Amz-Copy-Source": struct{}{}, - "X-Amz-Copy-Source-If-Match": struct{}{}, - "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, - "X-Amz-Copy-Source-If-None-Match": struct{}{}, - "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, - "X-Amz-Copy-Source-Range": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Grant-Full-control": struct{}{}, - "X-Amz-Grant-Read": struct{}{}, - "X-Amz-Grant-Read-Acp": struct{}{}, - "X-Amz-Grant-Write": struct{}{}, - "X-Amz-Grant-Write-Acp": struct{}{}, - "X-Amz-Metadata-Directive": struct{}{}, - "X-Amz-Mfa": struct{}{}, - "X-Amz-Request-Payer": struct{}{}, - "X-Amz-Server-Side-Encryption": struct{}{}, - "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Storage-Class": struct{}{}, - "X-Amz-Tagging": struct{}{}, - "X-Amz-Website-Redirect-Location": struct{}{}, - "X-Amz-Content-Sha256": struct{}{}, - }, - }, - patterns{"X-Amz-Meta-"}, - patterns{"X-Amz-Object-Lock-"}, -} - -// allowedHoisting is a allow list for build query headers. The boolean value -// represents whether or not it is a pattern. -var allowedQueryHoisting = inclusiveRules{ - excludeList{requiredSignedHeaders}, - patterns{"X-Amz-"}, -} - -// Signer applies AWS v4 signing to given request. Use this to sign requests -// that need to be signed with AWS V4 Signatures. -type Signer struct { - // The authentication credentials the request will be signed against. - // This value must be set to sign requests. - Credentials *credentials.Credentials - - // Sets the log level the signer should use when reporting information to - // the logger. If the logger is nil nothing will be logged. See - // aws.LogLevelType for more information on available logging levels - // - // By default nothing will be logged. - Debug aws.LogLevelType - - // The logger loging information will be written to. If there the logger - // is nil, nothing will be logged. - Logger aws.Logger - - // Disables the Signer's moving HTTP header key/value pairs from the HTTP - // request header to the request's query string. This is most commonly used - // with pre-signed requests preventing headers from being added to the - // request's query string. - DisableHeaderHoisting bool - - // Disables the automatic escaping of the URI path of the request for the - // siganture's canonical string's path. For services that do not need additional - // escaping then use this to disable the signer escaping the path. - // - // S3 is an example of a service that does not need additional escaping. - // - // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html - DisableURIPathEscaping bool - - // Disables the automatical setting of the HTTP request's Body field with the - // io.ReadSeeker passed in to the signer. This is useful if you're using a - // custom wrapper around the body for the io.ReadSeeker and want to preserve - // the Body value on the Request.Body. - // - // This does run the risk of signing a request with a body that will not be - // sent in the request. Need to ensure that the underlying data of the Body - // values are the same. - DisableRequestBodyOverwrite bool - - // currentTimeFn returns the time value which represents the current time. - // This value should only be used for testing. If it is nil the default - // time.Now will be used. - currentTimeFn func() time.Time - - // UnsignedPayload will prevent signing of the payload. This will only - // work for services that have support for this. - UnsignedPayload bool -} - -// NewSigner returns a Signer pointer configured with the credentials and optional -// option values provided. If not options are provided the Signer will use its -// default configuration. -func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer { - v4 := &Signer{ - Credentials: credentials, - } - - for _, option := range options { - option(v4) - } - - return v4 -} - -type signingCtx struct { - ServiceName string - Region string - Request *http.Request - Body io.ReadSeeker - Query url.Values - Time time.Time - ExpireTime time.Duration - SignedHeaderVals http.Header - - DisableURIPathEscaping bool - - credValues credentials.Value - isPresign bool - unsignedPayload bool - - bodyDigest string - signedHeaders string - canonicalHeaders string - canonicalString string - credentialString string - stringToSign string - signature string - authorization string -} - -// Sign signs AWS v4 requests with the provided body, service name, region the -// request is made to, and time the request is signed at. The signTime allows -// you to specify that a request is signed for the future, and cannot be -// used until then. -// -// Returns a list of HTTP headers that were included in the signature or an -// error if signing the request failed. Generally for signed requests this value -// is not needed as the full request context will be captured by the http.Request -// value. It is included for reference though. -// -// Sign will set the request's Body to be the `body` parameter passed in. If -// the body is not already an io.ReadCloser, it will be wrapped within one. If -// a `nil` body parameter passed to Sign, the request's Body field will be -// also set to nil. Its important to note that this functionality will not -// change the request's ContentLength of the request. -// -// Sign differs from Presign in that it will sign the request using HTTP -// header values. This type of signing is intended for http.Request values that -// will not be shared, or are shared in a way the header values on the request -// will not be lost. -// -// The requests body is an io.ReadSeeker so the SHA256 of the body can be -// generated. To bypass the signer computing the hash you can set the -// "X-Amz-Content-Sha256" header with a precomputed value. The signer will -// only compute the hash if the request header value is empty. -func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { - return v4.signWithBody(r, body, service, region, 0, false, signTime) -} - -// Presign signs AWS v4 requests with the provided body, service name, region -// the request is made to, and time the request is signed at. The signTime -// allows you to specify that a request is signed for the future, and cannot -// be used until then. -// -// Returns a list of HTTP headers that were included in the signature or an -// error if signing the request failed. For presigned requests these headers -// and their values must be included on the HTTP request when it is made. This -// is helpful to know what header values need to be shared with the party the -// presigned request will be distributed to. -// -// Presign differs from Sign in that it will sign the request using query string -// instead of header values. This allows you to share the Presigned Request's -// URL with third parties, or distribute it throughout your system with minimal -// dependencies. -// -// Presign also takes an exp value which is the duration the -// signed request will be valid after the signing time. This is allows you to -// set when the request will expire. -// -// The requests body is an io.ReadSeeker so the SHA256 of the body can be -// generated. To bypass the signer computing the hash you can set the -// "X-Amz-Content-Sha256" header with a precomputed value. The signer will -// only compute the hash if the request header value is empty. -// -// Presigning a S3 request will not compute the body's SHA256 hash by default. -// This is done due to the general use case for S3 presigned URLs is to share -// PUT/GET capabilities. If you would like to include the body's SHA256 in the -// presigned request's signature you can set the "X-Amz-Content-Sha256" -// HTTP header and that will be included in the request's signature. -func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { - return v4.signWithBody(r, body, service, region, exp, true, signTime) -} - -func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) { - currentTimeFn := v4.currentTimeFn - if currentTimeFn == nil { - currentTimeFn = time.Now - } - - ctx := &signingCtx{ - Request: r, - Body: body, - Query: r.URL.Query(), - Time: signTime, - ExpireTime: exp, - isPresign: isPresign, - ServiceName: service, - Region: region, - DisableURIPathEscaping: v4.DisableURIPathEscaping, - unsignedPayload: v4.UnsignedPayload, - } - - for key := range ctx.Query { - sort.Strings(ctx.Query[key]) - } - - if ctx.isRequestSigned() { - ctx.Time = currentTimeFn() - ctx.handlePresignRemoval() - } - - var err error - ctx.credValues, err = v4.Credentials.GetWithContext(requestContext(r)) - if err != nil { - return http.Header{}, err - } - - ctx.sanitizeHostForHeader() - ctx.assignAmzQueryValues() - if err := ctx.build(v4.DisableHeaderHoisting); err != nil { - return nil, err - } - - // If the request is not presigned the body should be attached to it. This - // prevents the confusion of wanting to send a signed request without - // the body the request was signed for attached. - if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) { - var reader io.ReadCloser - if body != nil { - var ok bool - if reader, ok = body.(io.ReadCloser); !ok { - reader = ioutil.NopCloser(body) - } - } - r.Body = reader - } - - if v4.Debug.Matches(aws.LogDebugWithSigning) { - v4.logSigningInfo(ctx) - } - - return ctx.SignedHeaderVals, nil -} - -func (ctx *signingCtx) sanitizeHostForHeader() { - request.SanitizeHostForHeader(ctx.Request) -} - -func (ctx *signingCtx) handlePresignRemoval() { - if !ctx.isPresign { - return - } - - // The credentials have expired for this request. The current signing - // is invalid, and needs to be request because the request will fail. - ctx.removePresign() - - // Update the request's query string to ensure the values stays in - // sync in the case retrieving the new credentials fails. - ctx.Request.URL.RawQuery = ctx.Query.Encode() -} - -func (ctx *signingCtx) assignAmzQueryValues() { - if ctx.isPresign { - ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix) - if ctx.credValues.SessionToken != "" { - ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) - } else { - ctx.Query.Del("X-Amz-Security-Token") - } - - return - } - - if ctx.credValues.SessionToken != "" { - ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) - } -} - -// SignRequestHandler is a named request handler the SDK will use to sign -// service client request with using the V4 signature. -var SignRequestHandler = request.NamedHandler{ - Name: "v4.SignRequestHandler", Fn: SignSDKRequest, -} - -// SignSDKRequest signs an AWS request with the V4 signature. This -// request handler should only be used with the SDK's built in service client's -// API operation requests. -// -// This function should not be used on its own, but in conjunction with -// an AWS service client's API operation call. To sign a standalone request -// not created by a service client's API operation method use the "Sign" or -// "Presign" functions of the "Signer" type. -// -// If the credentials of the request's config are set to -// credentials.AnonymousCredentials the request will not be signed. -func SignSDKRequest(req *request.Request) { - SignSDKRequestWithCurrentTime(req, time.Now) -} - -// BuildNamedHandler will build a generic handler for signing. -func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler { - return request.NamedHandler{ - Name: name, - Fn: func(req *request.Request) { - SignSDKRequestWithCurrentTime(req, time.Now, opts...) - }, - } -} - -// SignSDKRequestWithCurrentTime will sign the SDK's request using the time -// function passed in. Behaves the same as SignSDKRequest with the exception -// the request is signed with the value returned by the current time function. -func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { - // If the request does not need to be signed ignore the signing of the - // request if the AnonymousCredentials object is used. - if req.Config.Credentials == credentials.AnonymousCredentials { - return - } - - region := req.ClientInfo.SigningRegion - if region == "" { - region = aws.StringValue(req.Config.Region) - } - - name := req.ClientInfo.SigningName - if name == "" { - name = req.ClientInfo.ServiceName - } - - v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) { - v4.Debug = req.Config.LogLevel.Value() - v4.Logger = req.Config.Logger - v4.DisableHeaderHoisting = req.NotHoist - v4.currentTimeFn = curTimeFn - if name == "s3" { - // S3 service should not have any escaping applied - v4.DisableURIPathEscaping = true - } - // Prevents setting the HTTPRequest's Body. Since the Body could be - // wrapped in a custom io.Closer that we do not want to be stompped - // on top of by the signer. - v4.DisableRequestBodyOverwrite = true - }) - - for _, opt := range opts { - opt(v4) - } - - curTime := curTimeFn() - signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), - name, region, req.ExpireTime, req.ExpireTime > 0, curTime, - ) - if err != nil { - req.Error = err - req.SignedHeaderVals = nil - return - } - - req.SignedHeaderVals = signedHeaders - req.LastSignedAt = curTime -} - -const logSignInfoMsg = `DEBUG: Request Signature: ----[ CANONICAL STRING ]----------------------------- -%s ----[ STRING TO SIGN ]-------------------------------- -%s%s ------------------------------------------------------` -const logSignedURLMsg = ` ----[ SIGNED URL ]------------------------------------ -%s` - -func (v4 *Signer) logSigningInfo(ctx *signingCtx) { - signedURLMsg := "" - if ctx.isPresign { - signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String()) - } - msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg) - v4.Logger.Log(msg) -} - -func (ctx *signingCtx) build(disableHeaderHoisting bool) error { - ctx.buildTime() // no depends - ctx.buildCredentialString() // no depends - - if err := ctx.buildBodyDigest(); err != nil { - return err - } - - unsignedHeaders := ctx.Request.Header - if ctx.isPresign { - if !disableHeaderHoisting { - urlValues := url.Values{} - urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends - for k := range urlValues { - ctx.Query[k] = urlValues[k] - } - } - } - - ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) - ctx.buildCanonicalString() // depends on canon headers / signed headers - ctx.buildStringToSign() // depends on canon string - ctx.buildSignature() // depends on string to sign - - if ctx.isPresign { - ctx.Request.URL.RawQuery += "&" + signatureQueryKey + "=" + ctx.signature - } else { - parts := []string{ - authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, - "SignedHeaders=" + ctx.signedHeaders, - authHeaderSignatureElem + ctx.signature, - } - ctx.Request.Header.Set(authorizationHeader, strings.Join(parts, ", ")) - } - - return nil -} - -// GetSignedRequestSignature attempts to extract the signature of the request. -// Returning an error if the request is unsigned, or unable to extract the -// signature. -func GetSignedRequestSignature(r *http.Request) ([]byte, error) { - - if auth := r.Header.Get(authorizationHeader); len(auth) != 0 { - ps := strings.Split(auth, ", ") - for _, p := range ps { - if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 { - sig := p[len(authHeaderSignatureElem):] - if len(sig) == 0 { - return nil, fmt.Errorf("invalid request signature authorization header") - } - return hex.DecodeString(sig) - } - } - } - - if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 { - return hex.DecodeString(sig) - } - - return nil, fmt.Errorf("request not signed") -} - -func (ctx *signingCtx) buildTime() { - if ctx.isPresign { - duration := int64(ctx.ExpireTime / time.Second) - ctx.Query.Set("X-Amz-Date", formatTime(ctx.Time)) - ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) - } else { - ctx.Request.Header.Set("X-Amz-Date", formatTime(ctx.Time)) - } -} - -func (ctx *signingCtx) buildCredentialString() { - ctx.credentialString = buildSigningScope(ctx.Region, ctx.ServiceName, ctx.Time) - - if ctx.isPresign { - ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) - } -} - -func buildQuery(r rule, header http.Header) (url.Values, http.Header) { - query := url.Values{} - unsignedHeaders := http.Header{} - for k, h := range header { - if r.IsValid(k) { - query[k] = h - } else { - unsignedHeaders[k] = h - } - } - - return query, unsignedHeaders -} -func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { - var headers []string - headers = append(headers, "host") - for k, v := range header { - if !r.IsValid(k) { - continue // ignored header - } - if ctx.SignedHeaderVals == nil { - ctx.SignedHeaderVals = make(http.Header) - } - - lowerCaseKey := strings.ToLower(k) - if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok { - // include additional values - ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...) - continue - } - - headers = append(headers, lowerCaseKey) - ctx.SignedHeaderVals[lowerCaseKey] = v - } - sort.Strings(headers) - - ctx.signedHeaders = strings.Join(headers, ";") - - if ctx.isPresign { - ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) - } - - headerItems := make([]string, len(headers)) - for i, k := range headers { - if k == "host" { - if ctx.Request.Host != "" { - headerItems[i] = "host:" + ctx.Request.Host - } else { - headerItems[i] = "host:" + ctx.Request.URL.Host - } - } else { - headerValues := make([]string, len(ctx.SignedHeaderVals[k])) - for i, v := range ctx.SignedHeaderVals[k] { - headerValues[i] = strings.TrimSpace(v) - } - headerItems[i] = k + ":" + - strings.Join(headerValues, ",") - } - } - stripExcessSpaces(headerItems) - ctx.canonicalHeaders = strings.Join(headerItems, "\n") -} - -func (ctx *signingCtx) buildCanonicalString() { - ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1) - - uri := getURIPath(ctx.Request.URL) - - if !ctx.DisableURIPathEscaping { - uri = rest.EscapePath(uri, false) - } - - ctx.canonicalString = strings.Join([]string{ - ctx.Request.Method, - uri, - ctx.Request.URL.RawQuery, - ctx.canonicalHeaders + "\n", - ctx.signedHeaders, - ctx.bodyDigest, - }, "\n") -} - -func (ctx *signingCtx) buildStringToSign() { - ctx.stringToSign = strings.Join([]string{ - authHeaderPrefix, - formatTime(ctx.Time), - ctx.credentialString, - hex.EncodeToString(hashSHA256([]byte(ctx.canonicalString))), - }, "\n") -} - -func (ctx *signingCtx) buildSignature() { - creds := deriveSigningKey(ctx.Region, ctx.ServiceName, ctx.credValues.SecretAccessKey, ctx.Time) - signature := hmacSHA256(creds, []byte(ctx.stringToSign)) - ctx.signature = hex.EncodeToString(signature) -} - -func (ctx *signingCtx) buildBodyDigest() error { - hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") - if hash == "" { - includeSHA256Header := ctx.unsignedPayload || - ctx.ServiceName == "s3" || - ctx.ServiceName == "s3-object-lambda" || - ctx.ServiceName == "glacier" - - s3Presign := ctx.isPresign && - (ctx.ServiceName == "s3" || - ctx.ServiceName == "s3-object-lambda") - - if ctx.unsignedPayload || s3Presign { - hash = "UNSIGNED-PAYLOAD" - includeSHA256Header = !s3Presign - } else if ctx.Body == nil { - hash = emptyStringSHA256 - } else { - if !aws.IsReaderSeekable(ctx.Body) { - return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) - } - hashBytes, err := makeSha256Reader(ctx.Body) - if err != nil { - return err - } - hash = hex.EncodeToString(hashBytes) - } - - if includeSHA256Header { - ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) - } - } - ctx.bodyDigest = hash - - return nil -} - -// isRequestSigned returns if the request is currently signed or presigned -func (ctx *signingCtx) isRequestSigned() bool { - if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" { - return true - } - if ctx.Request.Header.Get("Authorization") != "" { - return true - } - - return false -} - -// unsign removes signing flags for both signed and presigned requests. -func (ctx *signingCtx) removePresign() { - ctx.Query.Del("X-Amz-Algorithm") - ctx.Query.Del("X-Amz-Signature") - ctx.Query.Del("X-Amz-Security-Token") - ctx.Query.Del("X-Amz-Date") - ctx.Query.Del("X-Amz-Expires") - ctx.Query.Del("X-Amz-Credential") - ctx.Query.Del("X-Amz-SignedHeaders") -} - -func hmacSHA256(key []byte, data []byte) []byte { - hash := hmac.New(sha256.New, key) - hash.Write(data) - return hash.Sum(nil) -} - -func hashSHA256(data []byte) []byte { - hash := sha256.New() - hash.Write(data) - return hash.Sum(nil) -} - -func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) { - hash := sha256.New() - start, err := reader.Seek(0, sdkio.SeekCurrent) - if err != nil { - return nil, err - } - defer func() { - // ensure error is return if unable to seek back to start of payload. - _, err = reader.Seek(start, sdkio.SeekStart) - }() - - // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies - // smaller than 32KB. Fall back to io.Copy if we fail to determine the size. - size, err := aws.SeekerLen(reader) - if err != nil { - io.Copy(hash, reader) - } else { - io.CopyN(hash, reader, size) - } - - return hash.Sum(nil), nil -} - -const doubleSpace = " " - -// stripExcessSpaces will rewrite the passed in slice's string values to not -// contain multiple side-by-side spaces. -func stripExcessSpaces(vals []string) { - var j, k, l, m, spaces int - for i, str := range vals { - // Trim trailing spaces - for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { - } - - // Trim leading spaces - for k = 0; k < j && str[k] == ' '; k++ { - } - str = str[k : j+1] - - // Strip multiple spaces. - j = strings.Index(str, doubleSpace) - if j < 0 { - vals[i] = str - continue - } - - buf := []byte(str) - for k, m, l = j, j, len(buf); k < l; k++ { - if buf[k] == ' ' { - if spaces == 0 { - // First space. - buf[m] = buf[k] - m++ - } - spaces++ - } else { - // End of multiple spaces. - spaces = 0 - buf[m] = buf[k] - m++ - } - } - - vals[i] = string(buf[:m]) - } -} - -func buildSigningScope(region, service string, dt time.Time) string { - return strings.Join([]string{ - formatShortTime(dt), - region, - service, - awsV4Request, - }, "/") -} - -func deriveSigningKey(region, service, secretKey string, dt time.Time) []byte { - kDate := hmacSHA256([]byte("AWS4"+secretKey), []byte(formatShortTime(dt))) - kRegion := hmacSHA256(kDate, []byte(region)) - kService := hmacSHA256(kRegion, []byte(service)) - signingKey := hmacSHA256(kService, []byte(awsV4Request)) - return signingKey -} - -func formatShortTime(dt time.Time) string { - return dt.UTC().Format(shortTimeFormat) -} - -func formatTime(dt time.Time) string { - return dt.UTC().Format(timeFormat) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go deleted file mode 100644 index 98751ee84..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/types.go +++ /dev/null @@ -1,264 +0,0 @@ -package aws - -import ( - "io" - "strings" - "sync" - - "github.com/aws/aws-sdk-go/internal/sdkio" -) - -// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the -// SDK to accept an io.Reader that is not also an io.Seeker for unsigned -// streaming payload API operations. -// -// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API -// operation's input will prevent that operation being retried in the case of -// network errors, and cause operation requests to fail if the operation -// requires payload signing. -// -// Note: If using With S3 PutObject to stream an object upload The SDK's S3 -// Upload manager (s3manager.Uploader) provides support for streaming with the -// ability to retry network errors. -func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { - return ReaderSeekerCloser{r} -} - -// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and -// io.Closer interfaces to the underlying object if they are available. -type ReaderSeekerCloser struct { - r io.Reader -} - -// IsReaderSeekable returns if the underlying reader type can be seeked. A -// io.Reader might not actually be seekable if it is the ReaderSeekerCloser -// type. -func IsReaderSeekable(r io.Reader) bool { - switch v := r.(type) { - case ReaderSeekerCloser: - return v.IsSeeker() - case *ReaderSeekerCloser: - return v.IsSeeker() - case io.ReadSeeker: - return true - default: - return false - } -} - -// Read reads from the reader up to size of p. The number of bytes read, and -// error if it occurred will be returned. -// -// If the reader is not an io.Reader zero bytes read, and nil error will be -// returned. -// -// Performs the same functionality as io.Reader Read -func (r ReaderSeekerCloser) Read(p []byte) (int, error) { - switch t := r.r.(type) { - case io.Reader: - return t.Read(p) - } - return 0, nil -} - -// Seek sets the offset for the next Read to offset, interpreted according to -// whence: 0 means relative to the origin of the file, 1 means relative to the -// current offset, and 2 means relative to the end. Seek returns the new offset -// and an error, if any. -// -// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. -func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { - switch t := r.r.(type) { - case io.Seeker: - return t.Seek(offset, whence) - } - return int64(0), nil -} - -// IsSeeker returns if the underlying reader is also a seeker. -func (r ReaderSeekerCloser) IsSeeker() bool { - _, ok := r.r.(io.Seeker) - return ok -} - -// HasLen returns the length of the underlying reader if the value implements -// the Len() int method. -func (r ReaderSeekerCloser) HasLen() (int, bool) { - type lenner interface { - Len() int - } - - if lr, ok := r.r.(lenner); ok { - return lr.Len(), true - } - - return 0, false -} - -// GetLen returns the length of the bytes remaining in the underlying reader. -// Checks first for Len(), then io.Seeker to determine the size of the -// underlying reader. -// -// Will return -1 if the length cannot be determined. -func (r ReaderSeekerCloser) GetLen() (int64, error) { - if l, ok := r.HasLen(); ok { - return int64(l), nil - } - - if s, ok := r.r.(io.Seeker); ok { - return seekerLen(s) - } - - return -1, nil -} - -// SeekerLen attempts to get the number of bytes remaining at the seeker's -// current position. Returns the number of bytes remaining or error. -func SeekerLen(s io.Seeker) (int64, error) { - // Determine if the seeker is actually seekable. ReaderSeekerCloser - // hides the fact that a io.Readers might not actually be seekable. - switch v := s.(type) { - case ReaderSeekerCloser: - return v.GetLen() - case *ReaderSeekerCloser: - return v.GetLen() - } - - return seekerLen(s) -} - -func seekerLen(s io.Seeker) (int64, error) { - curOffset, err := s.Seek(0, sdkio.SeekCurrent) - if err != nil { - return 0, err - } - - endOffset, err := s.Seek(0, sdkio.SeekEnd) - if err != nil { - return 0, err - } - - _, err = s.Seek(curOffset, sdkio.SeekStart) - if err != nil { - return 0, err - } - - return endOffset - curOffset, nil -} - -// Close closes the ReaderSeekerCloser. -// -// If the ReaderSeekerCloser is not an io.Closer nothing will be done. -func (r ReaderSeekerCloser) Close() error { - switch t := r.r.(type) { - case io.Closer: - return t.Close() - } - return nil -} - -// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface -// Can be used with the s3manager.Downloader to download content to a buffer -// in memory. Safe to use concurrently. -type WriteAtBuffer struct { - buf []byte - m sync.Mutex - - // GrowthCoeff defines the growth rate of the internal buffer. By - // default, the growth rate is 1, where expanding the internal - // buffer will allocate only enough capacity to fit the new expected - // length. - GrowthCoeff float64 -} - -// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer -// provided by buf. -func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { - return &WriteAtBuffer{buf: buf} -} - -// WriteAt writes a slice of bytes to a buffer starting at the position provided -// The number of bytes written will be returned, or error. Can overwrite previous -// written slices if the write ats overlap. -func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { - pLen := len(p) - expLen := pos + int64(pLen) - b.m.Lock() - defer b.m.Unlock() - if int64(len(b.buf)) < expLen { - if int64(cap(b.buf)) < expLen { - if b.GrowthCoeff < 1 { - b.GrowthCoeff = 1 - } - newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) - copy(newBuf, b.buf) - b.buf = newBuf - } - b.buf = b.buf[:expLen] - } - copy(b.buf[pos:], p) - return pLen, nil -} - -// Bytes returns a slice of bytes written to the buffer. -func (b *WriteAtBuffer) Bytes() []byte { - b.m.Lock() - defer b.m.Unlock() - return b.buf -} - -// MultiCloser is a utility to close multiple io.Closers within a single -// statement. -type MultiCloser []io.Closer - -// Close closes all of the io.Closers making up the MultiClosers. Any -// errors that occur while closing will be returned in the order they -// occur. -func (m MultiCloser) Close() error { - var errs errors - for _, c := range m { - err := c.Close() - if err != nil { - errs = append(errs, err) - } - } - if len(errs) != 0 { - return errs - } - - return nil -} - -type errors []error - -func (es errors) Error() string { - var parts []string - for _, e := range es { - parts = append(parts, e.Error()) - } - - return strings.Join(parts, "\n") -} - -// CopySeekableBody copies the seekable body to an io.Writer -func CopySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) { - curPos, err := src.Seek(0, sdkio.SeekCurrent) - if err != nil { - return 0, err - } - - // copy errors may be assumed to be from the body. - n, err := io.Copy(dst, src) - if err != nil { - return n, err - } - - // seek back to the first position after reading to reset - // the body for transmission. - _, err = src.Seek(curPos, sdkio.SeekStart) - if err != nil { - return n, err - } - - return n, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url.go b/vendor/github.com/aws/aws-sdk-go/aws/url.go deleted file mode 100644 index fed561bd5..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/url.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build go1.8 -// +build go1.8 - -package aws - -import "net/url" - -// URLHostname will extract the Hostname without port from the URL value. -// -// Wrapper of net/url#URL.Hostname for backwards Go version compatibility. -func URLHostname(url *url.URL) string { - return url.Hostname() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go deleted file mode 100644 index 95282db03..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go +++ /dev/null @@ -1,30 +0,0 @@ -//go:build !go1.8 -// +build !go1.8 - -package aws - -import ( - "net/url" - "strings" -) - -// URLHostname will extract the Hostname without port from the URL value. -// -// Copy of Go 1.8's net/url#URL.Hostname functionality. -func URLHostname(url *url.URL) string { - return stripPort(url.Host) - -} - -// stripPort is copy of Go 1.8 url#URL.Hostname functionality. -// https://golang.org/src/net/url/url.go -func stripPort(hostport string) string { - colon := strings.IndexByte(hostport, ':') - if colon == -1 { - return hostport - } - if i := strings.IndexByte(hostport, ']'); i != -1 { - return strings.TrimPrefix(hostport[:i], "[") - } - return hostport[:colon] -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go deleted file mode 100644 index c8b88d2e4..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package aws provides core functionality for making requests to AWS services. -package aws - -// SDKName is the name of this AWS SDK -const SDKName = "aws-sdk-go" - -// SDKVersion is the version of this SDK -const SDKVersion = "1.42.15" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go b/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go deleted file mode 100644 index 365345353..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go +++ /dev/null @@ -1,41 +0,0 @@ -//go:build !go1.7 -// +build !go1.7 - -package context - -import "time" - -// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to -// provide a 1.6 and 1.5 safe version of context that is compatible with Go -// 1.7's Context. -// -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case BackgroundCtx: - return "aws.BackgroundContext" - } - return "unknown empty Context" -} - -// BackgroundCtx is the common base context. -var BackgroundCtx = new(emptyCtx) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go deleted file mode 100644 index e83a99886..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go +++ /dev/null @@ -1,120 +0,0 @@ -package ini - -// ASTKind represents different states in the parse table -// and the type of AST that is being constructed -type ASTKind int - -// ASTKind* is used in the parse table to transition between -// the different states -const ( - ASTKindNone = ASTKind(iota) - ASTKindStart - ASTKindExpr - ASTKindEqualExpr - ASTKindStatement - ASTKindSkipStatement - ASTKindExprStatement - ASTKindSectionStatement - ASTKindNestedSectionStatement - ASTKindCompletedNestedSectionStatement - ASTKindCommentStatement - ASTKindCompletedSectionStatement -) - -func (k ASTKind) String() string { - switch k { - case ASTKindNone: - return "none" - case ASTKindStart: - return "start" - case ASTKindExpr: - return "expr" - case ASTKindStatement: - return "stmt" - case ASTKindSectionStatement: - return "section_stmt" - case ASTKindExprStatement: - return "expr_stmt" - case ASTKindCommentStatement: - return "comment" - case ASTKindNestedSectionStatement: - return "nested_section_stmt" - case ASTKindCompletedSectionStatement: - return "completed_stmt" - case ASTKindSkipStatement: - return "skip" - default: - return "" - } -} - -// AST interface allows us to determine what kind of node we -// are on and casting may not need to be necessary. -// -// The root is always the first node in Children -type AST struct { - Kind ASTKind - Root Token - RootToken bool - Children []AST -} - -func newAST(kind ASTKind, root AST, children ...AST) AST { - return AST{ - Kind: kind, - Children: append([]AST{root}, children...), - } -} - -func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST { - return AST{ - Kind: kind, - Root: root, - RootToken: true, - Children: children, - } -} - -// AppendChild will append to the list of children an AST has. -func (a *AST) AppendChild(child AST) { - a.Children = append(a.Children, child) -} - -// GetRoot will return the root AST which can be the first entry -// in the children list or a token. -func (a *AST) GetRoot() AST { - if a.RootToken { - return *a - } - - if len(a.Children) == 0 { - return AST{} - } - - return a.Children[0] -} - -// GetChildren will return the current AST's list of children -func (a *AST) GetChildren() []AST { - if len(a.Children) == 0 { - return []AST{} - } - - if a.RootToken { - return a.Children - } - - return a.Children[1:] -} - -// SetChildren will set and override all children of the AST. -func (a *AST) SetChildren(children []AST) { - if a.RootToken { - a.Children = children - } else { - a.Children = append(a.Children[:1], children...) - } -} - -// Start is used to indicate the starting state of the parse table. -var Start = newAST(ASTKindStart, AST{}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go deleted file mode 100644 index 0895d53cb..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go +++ /dev/null @@ -1,11 +0,0 @@ -package ini - -var commaRunes = []rune(",") - -func isComma(b rune) bool { - return b == ',' -} - -func newCommaToken() Token { - return newToken(TokenComma, commaRunes, NoneType) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go deleted file mode 100644 index 0b76999ba..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go +++ /dev/null @@ -1,35 +0,0 @@ -package ini - -// isComment will return whether or not the next byte(s) is a -// comment. -func isComment(b []rune) bool { - if len(b) == 0 { - return false - } - - switch b[0] { - case ';': - return true - case '#': - return true - } - - return false -} - -// newCommentToken will create a comment token and -// return how many bytes were read. -func newCommentToken(b []rune) (Token, int, error) { - i := 0 - for ; i < len(b); i++ { - if b[i] == '\n' { - break - } - - if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' { - break - } - } - - return newToken(TokenComment, b[:i], NoneType), i, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go deleted file mode 100644 index 1e55bbd07..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go +++ /dev/null @@ -1,42 +0,0 @@ -// Package ini is an LL(1) parser for configuration files. -// -// Example: -// sections, err := ini.OpenFile("/path/to/file") -// if err != nil { -// panic(err) -// } -// -// profile := "foo" -// section, ok := sections.GetSection(profile) -// if !ok { -// fmt.Printf("section %q could not be found", profile) -// } -// -// Below is the BNF that describes this parser -// Grammar: -// stmt -> section | stmt' -// stmt' -> epsilon | expr -// expr -> value (stmt)* | equal_expr (stmt)* -// equal_expr -> value ( ':' | '=' ) equal_expr' -// equal_expr' -> number | string | quoted_string -// quoted_string -> " quoted_string' -// quoted_string' -> string quoted_string_end -// quoted_string_end -> " -// -// section -> [ section' -// section' -> section_value section_close -// section_value -> number | string_subset | boolean | quoted_string_subset -// quoted_string_subset -> " quoted_string_subset' -// quoted_string_subset' -> string_subset quoted_string_end -// quoted_string_subset -> " -// section_close -> ] -// -// value -> number | string_subset | boolean -// string -> ? UTF-8 Code-Points except '\n' (U+000A) and '\r\n' (U+000D U+000A) ? -// string_subset -> ? Code-points excepted by grammar except ':' (U+003A), '=' (U+003D), '[' (U+005B), and ']' (U+005D) ? -// -// SkipState will skip (NL WS)+ -// -// comment -> # comment' | ; comment' -// comment' -> epsilon | value -package ini diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go deleted file mode 100644 index 04345a54c..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go +++ /dev/null @@ -1,4 +0,0 @@ -package ini - -// emptyToken is used to satisfy the Token interface -var emptyToken = newToken(TokenNone, []rune{}, NoneType) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go deleted file mode 100644 index 91ba2a59d..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go +++ /dev/null @@ -1,24 +0,0 @@ -package ini - -// newExpression will return an expression AST. -// Expr represents an expression -// -// grammar: -// expr -> string | number -func newExpression(tok Token) AST { - return newASTWithRootToken(ASTKindExpr, tok) -} - -func newEqualExpr(left AST, tok Token) AST { - return newASTWithRootToken(ASTKindEqualExpr, tok, left) -} - -// EqualExprKey will return a LHS value in the equal expr -func EqualExprKey(ast AST) string { - children := ast.GetChildren() - if len(children) == 0 || ast.Kind != ASTKindEqualExpr { - return "" - } - - return string(children[0].Root.Raw()) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go deleted file mode 100644 index 6e545b63b..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go +++ /dev/null @@ -1,18 +0,0 @@ -//go:build gofuzz -// +build gofuzz - -package ini - -import ( - "bytes" -) - -func Fuzz(data []byte) int { - b := bytes.NewReader(data) - - if _, err := Parse(b); err != nil { - return 0 - } - - return 1 -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go deleted file mode 100644 index 3b0ca7afe..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go +++ /dev/null @@ -1,51 +0,0 @@ -package ini - -import ( - "io" - "os" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// OpenFile takes a path to a given file, and will open and parse -// that file. -func OpenFile(path string) (Sections, error) { - f, err := os.Open(path) - if err != nil { - return Sections{}, awserr.New(ErrCodeUnableToReadFile, "unable to open file", err) - } - defer f.Close() - - return Parse(f) -} - -// Parse will parse the given file using the shared config -// visitor. -func Parse(f io.Reader) (Sections, error) { - tree, err := ParseAST(f) - if err != nil { - return Sections{}, err - } - - v := NewDefaultVisitor() - if err = Walk(tree, v); err != nil { - return Sections{}, err - } - - return v.Sections, nil -} - -// ParseBytes will parse the given bytes and return the parsed sections. -func ParseBytes(b []byte) (Sections, error) { - tree, err := ParseASTBytes(b) - if err != nil { - return Sections{}, err - } - - v := NewDefaultVisitor() - if err = Walk(tree, v); err != nil { - return Sections{}, err - } - - return v.Sections, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go deleted file mode 100644 index 582c024ad..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go +++ /dev/null @@ -1,165 +0,0 @@ -package ini - -import ( - "bytes" - "io" - "io/ioutil" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -const ( - // ErrCodeUnableToReadFile is used when a file is failed to be - // opened or read from. - ErrCodeUnableToReadFile = "FailedRead" -) - -// TokenType represents the various different tokens types -type TokenType int - -func (t TokenType) String() string { - switch t { - case TokenNone: - return "none" - case TokenLit: - return "literal" - case TokenSep: - return "sep" - case TokenOp: - return "op" - case TokenWS: - return "ws" - case TokenNL: - return "newline" - case TokenComment: - return "comment" - case TokenComma: - return "comma" - default: - return "" - } -} - -// TokenType enums -const ( - TokenNone = TokenType(iota) - TokenLit - TokenSep - TokenComma - TokenOp - TokenWS - TokenNL - TokenComment -) - -type iniLexer struct{} - -// Tokenize will return a list of tokens during lexical analysis of the -// io.Reader. -func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) { - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, awserr.New(ErrCodeUnableToReadFile, "unable to read file", err) - } - - return l.tokenize(b) -} - -func (l *iniLexer) tokenize(b []byte) ([]Token, error) { - runes := bytes.Runes(b) - var err error - n := 0 - tokenAmount := countTokens(runes) - tokens := make([]Token, tokenAmount) - count := 0 - - for len(runes) > 0 && count < tokenAmount { - switch { - case isWhitespace(runes[0]): - tokens[count], n, err = newWSToken(runes) - case isComma(runes[0]): - tokens[count], n = newCommaToken(), 1 - case isComment(runes): - tokens[count], n, err = newCommentToken(runes) - case isNewline(runes): - tokens[count], n, err = newNewlineToken(runes) - case isSep(runes): - tokens[count], n, err = newSepToken(runes) - case isOp(runes): - tokens[count], n, err = newOpToken(runes) - default: - tokens[count], n, err = newLitToken(runes) - } - - if err != nil { - return nil, err - } - - count++ - - runes = runes[n:] - } - - return tokens[:count], nil -} - -func countTokens(runes []rune) int { - count, n := 0, 0 - var err error - - for len(runes) > 0 { - switch { - case isWhitespace(runes[0]): - _, n, err = newWSToken(runes) - case isComma(runes[0]): - _, n = newCommaToken(), 1 - case isComment(runes): - _, n, err = newCommentToken(runes) - case isNewline(runes): - _, n, err = newNewlineToken(runes) - case isSep(runes): - _, n, err = newSepToken(runes) - case isOp(runes): - _, n, err = newOpToken(runes) - default: - _, n, err = newLitToken(runes) - } - - if err != nil { - return 0 - } - - count++ - runes = runes[n:] - } - - return count + 1 -} - -// Token indicates a metadata about a given value. -type Token struct { - t TokenType - ValueType ValueType - base int - raw []rune -} - -var emptyValue = Value{} - -func newToken(t TokenType, raw []rune, v ValueType) Token { - return Token{ - t: t, - raw: raw, - ValueType: v, - } -} - -// Raw return the raw runes that were consumed -func (tok Token) Raw() []rune { - return tok.raw -} - -// Type returns the token type -func (tok Token) Type() TokenType { - return tok.t -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go deleted file mode 100644 index 0ba319491..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go +++ /dev/null @@ -1,350 +0,0 @@ -package ini - -import ( - "fmt" - "io" -) - -// ParseState represents the current state of the parser. -type ParseState uint - -// State enums for the parse table -const ( - InvalidState ParseState = iota - // stmt -> value stmt' - StatementState - // stmt' -> MarkComplete | op stmt - StatementPrimeState - // value -> number | string | boolean | quoted_string - ValueState - // section -> [ section' - OpenScopeState - // section' -> value section_close - SectionState - // section_close -> ] - CloseScopeState - // SkipState will skip (NL WS)+ - SkipState - // SkipTokenState will skip any token and push the previous - // state onto the stack. - SkipTokenState - // comment -> # comment' | ; comment' - // comment' -> MarkComplete | value - CommentState - // MarkComplete state will complete statements and move that - // to the completed AST list - MarkCompleteState - // TerminalState signifies that the tokens have been fully parsed - TerminalState -) - -// parseTable is a state machine to dictate the grammar above. -var parseTable = map[ASTKind]map[TokenType]ParseState{ - ASTKindStart: { - TokenLit: StatementState, - TokenSep: OpenScopeState, - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenComment: CommentState, - TokenNone: TerminalState, - }, - ASTKindCommentStatement: { - TokenLit: StatementState, - TokenSep: OpenScopeState, - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenComment: CommentState, - TokenNone: MarkCompleteState, - }, - ASTKindExpr: { - TokenOp: StatementPrimeState, - TokenLit: ValueState, - TokenSep: OpenScopeState, - TokenWS: ValueState, - TokenNL: SkipState, - TokenComment: CommentState, - TokenNone: MarkCompleteState, - }, - ASTKindEqualExpr: { - TokenLit: ValueState, - TokenSep: ValueState, - TokenOp: ValueState, - TokenWS: SkipTokenState, - TokenNL: SkipState, - TokenNone: SkipState, - }, - ASTKindStatement: { - TokenLit: SectionState, - TokenSep: CloseScopeState, - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenComment: CommentState, - TokenNone: MarkCompleteState, - }, - ASTKindExprStatement: { - TokenLit: ValueState, - TokenSep: ValueState, - TokenOp: ValueState, - TokenWS: ValueState, - TokenNL: MarkCompleteState, - TokenComment: CommentState, - TokenNone: TerminalState, - TokenComma: SkipState, - }, - ASTKindSectionStatement: { - TokenLit: SectionState, - TokenOp: SectionState, - TokenSep: CloseScopeState, - TokenWS: SectionState, - TokenNL: SkipTokenState, - }, - ASTKindCompletedSectionStatement: { - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenLit: StatementState, - TokenSep: OpenScopeState, - TokenComment: CommentState, - TokenNone: MarkCompleteState, - }, - ASTKindSkipStatement: { - TokenLit: StatementState, - TokenSep: OpenScopeState, - TokenWS: SkipTokenState, - TokenNL: SkipTokenState, - TokenComment: CommentState, - TokenNone: TerminalState, - }, -} - -// ParseAST will parse input from an io.Reader using -// an LL(1) parser. -func ParseAST(r io.Reader) ([]AST, error) { - lexer := iniLexer{} - tokens, err := lexer.Tokenize(r) - if err != nil { - return []AST{}, err - } - - return parse(tokens) -} - -// ParseASTBytes will parse input from a byte slice using -// an LL(1) parser. -func ParseASTBytes(b []byte) ([]AST, error) { - lexer := iniLexer{} - tokens, err := lexer.tokenize(b) - if err != nil { - return []AST{}, err - } - - return parse(tokens) -} - -func parse(tokens []Token) ([]AST, error) { - start := Start - stack := newParseStack(3, len(tokens)) - - stack.Push(start) - s := newSkipper() - -loop: - for stack.Len() > 0 { - k := stack.Pop() - - var tok Token - if len(tokens) == 0 { - // this occurs when all the tokens have been processed - // but reduction of what's left on the stack needs to - // occur. - tok = emptyToken - } else { - tok = tokens[0] - } - - step := parseTable[k.Kind][tok.Type()] - if s.ShouldSkip(tok) { - // being in a skip state with no tokens will break out of - // the parse loop since there is nothing left to process. - if len(tokens) == 0 { - break loop - } - // if should skip is true, we skip the tokens until should skip is set to false. - step = SkipTokenState - } - - switch step { - case TerminalState: - // Finished parsing. Push what should be the last - // statement to the stack. If there is anything left - // on the stack, an error in parsing has occurred. - if k.Kind != ASTKindStart { - stack.MarkComplete(k) - } - break loop - case SkipTokenState: - // When skipping a token, the previous state was popped off the stack. - // To maintain the correct state, the previous state will be pushed - // onto the stack. - stack.Push(k) - case StatementState: - if k.Kind != ASTKindStart { - stack.MarkComplete(k) - } - expr := newExpression(tok) - stack.Push(expr) - case StatementPrimeState: - if tok.Type() != TokenOp { - stack.MarkComplete(k) - continue - } - - if k.Kind != ASTKindExpr { - return nil, NewParseError( - fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k), - ) - } - - k = trimSpaces(k) - expr := newEqualExpr(k, tok) - stack.Push(expr) - case ValueState: - // ValueState requires the previous state to either be an equal expression - // or an expression statement. - switch k.Kind { - case ASTKindEqualExpr: - // assigning a value to some key - k.AppendChild(newExpression(tok)) - stack.Push(newExprStatement(k)) - case ASTKindExpr: - k.Root.raw = append(k.Root.raw, tok.Raw()...) - stack.Push(k) - case ASTKindExprStatement: - root := k.GetRoot() - children := root.GetChildren() - if len(children) == 0 { - return nil, NewParseError( - fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind), - ) - } - - rhs := children[len(children)-1] - - if rhs.Root.ValueType != QuotedStringType { - rhs.Root.ValueType = StringType - rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...) - - } - - children[len(children)-1] = rhs - root.SetChildren(children) - - stack.Push(k) - } - case OpenScopeState: - if !runeCompare(tok.Raw(), openBrace) { - return nil, NewParseError("expected '['") - } - // If OpenScopeState is not at the start, we must mark the previous ast as complete - // - // for example: if previous ast was a skip statement; - // we should mark it as complete before we create a new statement - if k.Kind != ASTKindStart { - stack.MarkComplete(k) - } - - stmt := newStatement() - stack.Push(stmt) - case CloseScopeState: - if !runeCompare(tok.Raw(), closeBrace) { - return nil, NewParseError("expected ']'") - } - - k = trimSpaces(k) - stack.Push(newCompletedSectionStatement(k)) - case SectionState: - var stmt AST - - switch k.Kind { - case ASTKindStatement: - // If there are multiple literals inside of a scope declaration, - // then the current token's raw value will be appended to the Name. - // - // This handles cases like [ profile default ] - // - // k will represent a SectionStatement with the children representing - // the label of the section - stmt = newSectionStatement(tok) - case ASTKindSectionStatement: - k.Root.raw = append(k.Root.raw, tok.Raw()...) - stmt = k - default: - return nil, NewParseError( - fmt.Sprintf("invalid statement: expected statement: %v", k.Kind), - ) - } - - stack.Push(stmt) - case MarkCompleteState: - if k.Kind != ASTKindStart { - stack.MarkComplete(k) - } - - if stack.Len() == 0 { - stack.Push(start) - } - case SkipState: - stack.Push(newSkipStatement(k)) - s.Skip() - case CommentState: - if k.Kind == ASTKindStart { - stack.Push(k) - } else { - stack.MarkComplete(k) - } - - stmt := newCommentStatement(tok) - stack.Push(stmt) - default: - return nil, NewParseError( - fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", - k, tok.Type())) - } - - if len(tokens) > 0 { - tokens = tokens[1:] - } - } - - // this occurs when a statement has not been completed - if stack.top > 1 { - return nil, NewParseError(fmt.Sprintf("incomplete ini expression")) - } - - // returns a sublist which excludes the start symbol - return stack.List(), nil -} - -// trimSpaces will trim spaces on the left and right hand side of -// the literal. -func trimSpaces(k AST) AST { - // trim left hand side of spaces - for i := 0; i < len(k.Root.raw); i++ { - if !isWhitespace(k.Root.raw[i]) { - break - } - - k.Root.raw = k.Root.raw[1:] - i-- - } - - // trim right hand side of spaces - for i := len(k.Root.raw) - 1; i >= 0; i-- { - if !isWhitespace(k.Root.raw[i]) { - break - } - - k.Root.raw = k.Root.raw[:len(k.Root.raw)-1] - } - - return k -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go deleted file mode 100644 index 34a481afb..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go +++ /dev/null @@ -1,340 +0,0 @@ -package ini - -import ( - "fmt" - "strconv" - "strings" - "unicode" -) - -var ( - runesTrue = []rune("true") - runesFalse = []rune("false") -) - -var literalValues = [][]rune{ - runesTrue, - runesFalse, -} - -func isBoolValue(b []rune) bool { - for _, lv := range literalValues { - if isCaselessLitValue(lv, b) { - return true - } - } - return false -} - -func isLitValue(want, have []rune) bool { - if len(have) < len(want) { - return false - } - - for i := 0; i < len(want); i++ { - if want[i] != have[i] { - return false - } - } - - return true -} - -// isCaselessLitValue is a caseless value comparison, assumes want is already lower-cased for efficiency. -func isCaselessLitValue(want, have []rune) bool { - if len(have) < len(want) { - return false - } - - for i := 0; i < len(want); i++ { - if want[i] != unicode.ToLower(have[i]) { - return false - } - } - - return true -} - -// isNumberValue will return whether not the leading characters in -// a byte slice is a number. A number is delimited by whitespace or -// the newline token. -// -// A number is defined to be in a binary, octal, decimal (int | float), hex format, -// or in scientific notation. -func isNumberValue(b []rune) bool { - negativeIndex := 0 - helper := numberHelper{} - needDigit := false - - for i := 0; i < len(b); i++ { - negativeIndex++ - - switch b[i] { - case '-': - if helper.IsNegative() || negativeIndex != 1 { - return false - } - helper.Determine(b[i]) - needDigit = true - continue - case 'e', 'E': - if err := helper.Determine(b[i]); err != nil { - return false - } - negativeIndex = 0 - needDigit = true - continue - case 'b': - if helper.numberFormat == hex { - break - } - fallthrough - case 'o', 'x': - needDigit = true - if i == 0 { - return false - } - - fallthrough - case '.': - if err := helper.Determine(b[i]); err != nil { - return false - } - needDigit = true - continue - } - - if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) { - return !needDigit - } - - if !helper.CorrectByte(b[i]) { - return false - } - needDigit = false - } - - return !needDigit -} - -func isValid(b []rune) (bool, int, error) { - if len(b) == 0 { - // TODO: should probably return an error - return false, 0, nil - } - - return isValidRune(b[0]), 1, nil -} - -func isValidRune(r rune) bool { - return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n' -} - -// ValueType is an enum that will signify what type -// the Value is -type ValueType int - -func (v ValueType) String() string { - switch v { - case NoneType: - return "NONE" - case DecimalType: - return "FLOAT" - case IntegerType: - return "INT" - case StringType: - return "STRING" - case BoolType: - return "BOOL" - } - - return "" -} - -// ValueType enums -const ( - NoneType = ValueType(iota) - DecimalType - IntegerType - StringType - QuotedStringType - BoolType -) - -// Value is a union container -type Value struct { - Type ValueType - raw []rune - - integer int64 - decimal float64 - boolean bool - str string -} - -func newValue(t ValueType, base int, raw []rune) (Value, error) { - v := Value{ - Type: t, - raw: raw, - } - var err error - - switch t { - case DecimalType: - v.decimal, err = strconv.ParseFloat(string(raw), 64) - case IntegerType: - if base != 10 { - raw = raw[2:] - } - - v.integer, err = strconv.ParseInt(string(raw), base, 64) - case StringType: - v.str = string(raw) - case QuotedStringType: - v.str = string(raw[1 : len(raw)-1]) - case BoolType: - v.boolean = isCaselessLitValue(runesTrue, v.raw) - } - - // issue 2253 - // - // if the value trying to be parsed is too large, then we will use - // the 'StringType' and raw value instead. - if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange { - v.Type = StringType - v.str = string(raw) - err = nil - } - - return v, err -} - -// Append will append values and change the type to a string -// type. -func (v *Value) Append(tok Token) { - r := tok.Raw() - if v.Type != QuotedStringType { - v.Type = StringType - r = tok.raw[1 : len(tok.raw)-1] - } - if tok.Type() != TokenLit { - v.raw = append(v.raw, tok.Raw()...) - } else { - v.raw = append(v.raw, r...) - } -} - -func (v Value) String() string { - switch v.Type { - case DecimalType: - return fmt.Sprintf("decimal: %f", v.decimal) - case IntegerType: - return fmt.Sprintf("integer: %d", v.integer) - case StringType: - return fmt.Sprintf("string: %s", string(v.raw)) - case QuotedStringType: - return fmt.Sprintf("quoted string: %s", string(v.raw)) - case BoolType: - return fmt.Sprintf("bool: %t", v.boolean) - default: - return "union not set" - } -} - -func newLitToken(b []rune) (Token, int, error) { - n := 0 - var err error - - token := Token{} - if b[0] == '"' { - n, err = getStringValue(b) - if err != nil { - return token, n, err - } - - token = newToken(TokenLit, b[:n], QuotedStringType) - } else if isNumberValue(b) { - var base int - base, n, err = getNumericalValue(b) - if err != nil { - return token, 0, err - } - - value := b[:n] - vType := IntegerType - if contains(value, '.') || hasExponent(value) { - vType = DecimalType - } - token = newToken(TokenLit, value, vType) - token.base = base - } else if isBoolValue(b) { - n, err = getBoolValue(b) - - token = newToken(TokenLit, b[:n], BoolType) - } else { - n, err = getValue(b) - token = newToken(TokenLit, b[:n], StringType) - } - - return token, n, err -} - -// IntValue returns an integer value -func (v Value) IntValue() int64 { - return v.integer -} - -// FloatValue returns a float value -func (v Value) FloatValue() float64 { - return v.decimal -} - -// BoolValue returns a bool value -func (v Value) BoolValue() bool { - return v.boolean -} - -func isTrimmable(r rune) bool { - switch r { - case '\n', ' ': - return true - } - return false -} - -// StringValue returns the string value -func (v Value) StringValue() string { - switch v.Type { - case StringType: - return strings.TrimFunc(string(v.raw), isTrimmable) - case QuotedStringType: - // preserve all characters in the quotes - return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1])) - default: - return strings.TrimFunc(string(v.raw), isTrimmable) - } -} - -func contains(runes []rune, c rune) bool { - for i := 0; i < len(runes); i++ { - if runes[i] == c { - return true - } - } - - return false -} - -func runeCompare(v1 []rune, v2 []rune) bool { - if len(v1) != len(v2) { - return false - } - - for i := 0; i < len(v1); i++ { - if v1[i] != v2[i] { - return false - } - } - - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go deleted file mode 100644 index e52ac399f..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go +++ /dev/null @@ -1,30 +0,0 @@ -package ini - -func isNewline(b []rune) bool { - if len(b) == 0 { - return false - } - - if b[0] == '\n' { - return true - } - - if len(b) < 2 { - return false - } - - return b[0] == '\r' && b[1] == '\n' -} - -func newNewlineToken(b []rune) (Token, int, error) { - i := 1 - if b[0] == '\r' && isNewline(b[1:]) { - i++ - } - - if !isNewline([]rune(b[:i])) { - return emptyToken, 0, NewParseError("invalid new line token") - } - - return newToken(TokenNL, b[:i], NoneType), i, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go deleted file mode 100644 index a45c0bc56..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go +++ /dev/null @@ -1,152 +0,0 @@ -package ini - -import ( - "bytes" - "fmt" - "strconv" -) - -const ( - none = numberFormat(iota) - binary - octal - decimal - hex - exponent -) - -type numberFormat int - -// numberHelper is used to dictate what format a number is in -// and what to do for negative values. Since -1e-4 is a valid -// number, we cannot just simply check for duplicate negatives. -type numberHelper struct { - numberFormat numberFormat - - negative bool - negativeExponent bool -} - -func (b numberHelper) Exists() bool { - return b.numberFormat != none -} - -func (b numberHelper) IsNegative() bool { - return b.negative || b.negativeExponent -} - -func (b *numberHelper) Determine(c rune) error { - if b.Exists() { - return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c))) - } - - switch c { - case 'b': - b.numberFormat = binary - case 'o': - b.numberFormat = octal - case 'x': - b.numberFormat = hex - case 'e', 'E': - b.numberFormat = exponent - case '-': - if b.numberFormat != exponent { - b.negative = true - } else { - b.negativeExponent = true - } - case '.': - b.numberFormat = decimal - default: - return NewParseError(fmt.Sprintf("invalid number character: %v", string(c))) - } - - return nil -} - -func (b numberHelper) CorrectByte(c rune) bool { - switch { - case b.numberFormat == binary: - if !isBinaryByte(c) { - return false - } - case b.numberFormat == octal: - if !isOctalByte(c) { - return false - } - case b.numberFormat == hex: - if !isHexByte(c) { - return false - } - case b.numberFormat == decimal: - if !isDigit(c) { - return false - } - case b.numberFormat == exponent: - if !isDigit(c) { - return false - } - case b.negativeExponent: - if !isDigit(c) { - return false - } - case b.negative: - if !isDigit(c) { - return false - } - default: - if !isDigit(c) { - return false - } - } - - return true -} - -func (b numberHelper) Base() int { - switch b.numberFormat { - case binary: - return 2 - case octal: - return 8 - case hex: - return 16 - default: - return 10 - } -} - -func (b numberHelper) String() string { - buf := bytes.Buffer{} - i := 0 - - switch b.numberFormat { - case binary: - i++ - buf.WriteString(strconv.Itoa(i) + ": binary format\n") - case octal: - i++ - buf.WriteString(strconv.Itoa(i) + ": octal format\n") - case hex: - i++ - buf.WriteString(strconv.Itoa(i) + ": hex format\n") - case exponent: - i++ - buf.WriteString(strconv.Itoa(i) + ": exponent format\n") - default: - i++ - buf.WriteString(strconv.Itoa(i) + ": integer format\n") - } - - if b.negative { - i++ - buf.WriteString(strconv.Itoa(i) + ": negative format\n") - } - - if b.negativeExponent { - i++ - buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n") - } - - return buf.String() -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go deleted file mode 100644 index 8a84c7cbe..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go +++ /dev/null @@ -1,39 +0,0 @@ -package ini - -import ( - "fmt" -) - -var ( - equalOp = []rune("=") - equalColonOp = []rune(":") -) - -func isOp(b []rune) bool { - if len(b) == 0 { - return false - } - - switch b[0] { - case '=': - return true - case ':': - return true - default: - return false - } -} - -func newOpToken(b []rune) (Token, int, error) { - tok := Token{} - - switch b[0] { - case '=': - tok = newToken(TokenOp, equalOp, NoneType) - case ':': - tok = newToken(TokenOp, equalColonOp, NoneType) - default: - return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0])) - } - return tok, 1, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go deleted file mode 100644 index 457287019..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go +++ /dev/null @@ -1,43 +0,0 @@ -package ini - -import "fmt" - -const ( - // ErrCodeParseError is returned when a parsing error - // has occurred. - ErrCodeParseError = "INIParseError" -) - -// ParseError is an error which is returned during any part of -// the parsing process. -type ParseError struct { - msg string -} - -// NewParseError will return a new ParseError where message -// is the description of the error. -func NewParseError(message string) *ParseError { - return &ParseError{ - msg: message, - } -} - -// Code will return the ErrCodeParseError -func (err *ParseError) Code() string { - return ErrCodeParseError -} - -// Message returns the error's message -func (err *ParseError) Message() string { - return err.msg -} - -// OrigError return nothing since there will never be any -// original error. -func (err *ParseError) OrigError() error { - return nil -} - -func (err *ParseError) Error() string { - return fmt.Sprintf("%s: %s", err.Code(), err.Message()) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go deleted file mode 100644 index 7f01cf7c7..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go +++ /dev/null @@ -1,60 +0,0 @@ -package ini - -import ( - "bytes" - "fmt" -) - -// ParseStack is a stack that contains a container, the stack portion, -// and the list which is the list of ASTs that have been successfully -// parsed. -type ParseStack struct { - top int - container []AST - list []AST - index int -} - -func newParseStack(sizeContainer, sizeList int) ParseStack { - return ParseStack{ - container: make([]AST, sizeContainer), - list: make([]AST, sizeList), - } -} - -// Pop will return and truncate the last container element. -func (s *ParseStack) Pop() AST { - s.top-- - return s.container[s.top] -} - -// Push will add the new AST to the container -func (s *ParseStack) Push(ast AST) { - s.container[s.top] = ast - s.top++ -} - -// MarkComplete will append the AST to the list of completed statements -func (s *ParseStack) MarkComplete(ast AST) { - s.list[s.index] = ast - s.index++ -} - -// List will return the completed statements -func (s ParseStack) List() []AST { - return s.list[:s.index] -} - -// Len will return the length of the container -func (s *ParseStack) Len() int { - return s.top -} - -func (s ParseStack) String() string { - buf := bytes.Buffer{} - for i, node := range s.list { - buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node)) - } - - return buf.String() -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go deleted file mode 100644 index f82095ba2..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go +++ /dev/null @@ -1,41 +0,0 @@ -package ini - -import ( - "fmt" -) - -var ( - emptyRunes = []rune{} -) - -func isSep(b []rune) bool { - if len(b) == 0 { - return false - } - - switch b[0] { - case '[', ']': - return true - default: - return false - } -} - -var ( - openBrace = []rune("[") - closeBrace = []rune("]") -) - -func newSepToken(b []rune) (Token, int, error) { - tok := Token{} - - switch b[0] { - case '[': - tok = newToken(TokenSep, openBrace, NoneType) - case ']': - tok = newToken(TokenSep, closeBrace, NoneType) - default: - return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0])) - } - return tok, 1, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go deleted file mode 100644 index da7a4049c..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go +++ /dev/null @@ -1,45 +0,0 @@ -package ini - -// skipper is used to skip certain blocks of an ini file. -// Currently skipper is used to skip nested blocks of ini -// files. See example below -// -// [ foo ] -// nested = ; this section will be skipped -// a=b -// c=d -// bar=baz ; this will be included -type skipper struct { - shouldSkip bool - TokenSet bool - prevTok Token -} - -func newSkipper() skipper { - return skipper{ - prevTok: emptyToken, - } -} - -func (s *skipper) ShouldSkip(tok Token) bool { - // should skip state will be modified only if previous token was new line (NL); - // and the current token is not WhiteSpace (WS). - if s.shouldSkip && - s.prevTok.Type() == TokenNL && - tok.Type() != TokenWS { - s.Continue() - return false - } - s.prevTok = tok - return s.shouldSkip -} - -func (s *skipper) Skip() { - s.shouldSkip = true -} - -func (s *skipper) Continue() { - s.shouldSkip = false - // empty token is assigned as we return to default state, when should skip is false - s.prevTok = emptyToken -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go deleted file mode 100644 index 18f3fe893..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go +++ /dev/null @@ -1,35 +0,0 @@ -package ini - -// Statement is an empty AST mostly used for transitioning states. -func newStatement() AST { - return newAST(ASTKindStatement, AST{}) -} - -// SectionStatement represents a section AST -func newSectionStatement(tok Token) AST { - return newASTWithRootToken(ASTKindSectionStatement, tok) -} - -// ExprStatement represents a completed expression AST -func newExprStatement(ast AST) AST { - return newAST(ASTKindExprStatement, ast) -} - -// CommentStatement represents a comment in the ini definition. -// -// grammar: -// comment -> #comment' | ;comment' -// comment' -> epsilon | value -func newCommentStatement(tok Token) AST { - return newAST(ASTKindCommentStatement, newExpression(tok)) -} - -// CompletedSectionStatement represents a completed section -func newCompletedSectionStatement(ast AST) AST { - return newAST(ASTKindCompletedSectionStatement, ast) -} - -// SkipStatement is used to skip whole statements -func newSkipStatement(ast AST) AST { - return newAST(ASTKindSkipStatement, ast) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go deleted file mode 100644 index b5480fdeb..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go +++ /dev/null @@ -1,284 +0,0 @@ -package ini - -import ( - "fmt" -) - -// getStringValue will return a quoted string and the amount -// of bytes read -// -// an error will be returned if the string is not properly formatted -func getStringValue(b []rune) (int, error) { - if b[0] != '"' { - return 0, NewParseError("strings must start with '\"'") - } - - endQuote := false - i := 1 - - for ; i < len(b) && !endQuote; i++ { - if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped { - endQuote = true - break - } else if escaped { - /*c, err := getEscapedByte(b[i]) - if err != nil { - return 0, err - } - - b[i-1] = c - b = append(b[:i], b[i+1:]...) - i--*/ - - continue - } - } - - if !endQuote { - return 0, NewParseError("missing '\"' in string value") - } - - return i + 1, nil -} - -// getBoolValue will return a boolean and the amount -// of bytes read -// -// an error will be returned if the boolean is not of a correct -// value -func getBoolValue(b []rune) (int, error) { - if len(b) < 4 { - return 0, NewParseError("invalid boolean value") - } - - n := 0 - for _, lv := range literalValues { - if len(lv) > len(b) { - continue - } - - if isCaselessLitValue(lv, b) { - n = len(lv) - } - } - - if n == 0 { - return 0, NewParseError("invalid boolean value") - } - - return n, nil -} - -// getNumericalValue will return a numerical string, the amount -// of bytes read, and the base of the number -// -// an error will be returned if the number is not of a correct -// value -func getNumericalValue(b []rune) (int, int, error) { - if !isDigit(b[0]) { - return 0, 0, NewParseError("invalid digit value") - } - - i := 0 - helper := numberHelper{} - -loop: - for negativeIndex := 0; i < len(b); i++ { - negativeIndex++ - - if !isDigit(b[i]) { - switch b[i] { - case '-': - if helper.IsNegative() || negativeIndex != 1 { - return 0, 0, NewParseError("parse error '-'") - } - - n := getNegativeNumber(b[i:]) - i += (n - 1) - helper.Determine(b[i]) - continue - case '.': - if err := helper.Determine(b[i]); err != nil { - return 0, 0, err - } - case 'e', 'E': - if err := helper.Determine(b[i]); err != nil { - return 0, 0, err - } - - negativeIndex = 0 - case 'b': - if helper.numberFormat == hex { - break - } - fallthrough - case 'o', 'x': - if i == 0 && b[i] != '0' { - return 0, 0, NewParseError("incorrect base format, expected leading '0'") - } - - if i != 1 { - return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i)) - } - - if err := helper.Determine(b[i]); err != nil { - return 0, 0, err - } - default: - if isWhitespace(b[i]) { - break loop - } - - if isNewline(b[i:]) { - break loop - } - - if !(helper.numberFormat == hex && isHexByte(b[i])) { - if i+2 < len(b) && !isNewline(b[i:i+2]) { - return 0, 0, NewParseError("invalid numerical character") - } else if !isNewline([]rune{b[i]}) { - return 0, 0, NewParseError("invalid numerical character") - } - - break loop - } - } - } - } - - return helper.Base(), i, nil -} - -// isDigit will return whether or not something is an integer -func isDigit(b rune) bool { - return b >= '0' && b <= '9' -} - -func hasExponent(v []rune) bool { - return contains(v, 'e') || contains(v, 'E') -} - -func isBinaryByte(b rune) bool { - switch b { - case '0', '1': - return true - default: - return false - } -} - -func isOctalByte(b rune) bool { - switch b { - case '0', '1', '2', '3', '4', '5', '6', '7': - return true - default: - return false - } -} - -func isHexByte(b rune) bool { - if isDigit(b) { - return true - } - return (b >= 'A' && b <= 'F') || - (b >= 'a' && b <= 'f') -} - -func getValue(b []rune) (int, error) { - i := 0 - - for i < len(b) { - if isNewline(b[i:]) { - break - } - - if isOp(b[i:]) { - break - } - - valid, n, err := isValid(b[i:]) - if err != nil { - return 0, err - } - - if !valid { - break - } - - i += n - } - - return i, nil -} - -// getNegativeNumber will return a negative number from a -// byte slice. This will iterate through all characters until -// a non-digit has been found. -func getNegativeNumber(b []rune) int { - if b[0] != '-' { - return 0 - } - - i := 1 - for ; i < len(b); i++ { - if !isDigit(b[i]) { - return i - } - } - - return i -} - -// isEscaped will return whether or not the character is an escaped -// character. -func isEscaped(value []rune, b rune) bool { - if len(value) == 0 { - return false - } - - switch b { - case '\'': // single quote - case '"': // quote - case 'n': // newline - case 't': // tab - case '\\': // backslash - default: - return false - } - - return value[len(value)-1] == '\\' -} - -func getEscapedByte(b rune) (rune, error) { - switch b { - case '\'': // single quote - return '\'', nil - case '"': // quote - return '"', nil - case 'n': // newline - return '\n', nil - case 't': // table - return '\t', nil - case '\\': // backslash - return '\\', nil - default: - return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b)) - } -} - -func removeEscapedCharacters(b []rune) []rune { - for i := 0; i < len(b); i++ { - if isEscaped(b[:i], b[i]) { - c, err := getEscapedByte(b[i]) - if err != nil { - return b - } - - b[i-1] = c - b = append(b[:i], b[i+1:]...) - i-- - } - } - - return b -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go deleted file mode 100644 index 081cf4334..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go +++ /dev/null @@ -1,169 +0,0 @@ -package ini - -import ( - "fmt" - "sort" -) - -// Visitor is an interface used by walkers that will -// traverse an array of ASTs. -type Visitor interface { - VisitExpr(AST) error - VisitStatement(AST) error -} - -// DefaultVisitor is used to visit statements and expressions -// and ensure that they are both of the correct format. -// In addition, upon visiting this will build sections and populate -// the Sections field which can be used to retrieve profile -// configuration. -type DefaultVisitor struct { - scope string - Sections Sections -} - -// NewDefaultVisitor return a DefaultVisitor -func NewDefaultVisitor() *DefaultVisitor { - return &DefaultVisitor{ - Sections: Sections{ - container: map[string]Section{}, - }, - } -} - -// VisitExpr visits expressions... -func (v *DefaultVisitor) VisitExpr(expr AST) error { - t := v.Sections.container[v.scope] - if t.values == nil { - t.values = values{} - } - - switch expr.Kind { - case ASTKindExprStatement: - opExpr := expr.GetRoot() - switch opExpr.Kind { - case ASTKindEqualExpr: - children := opExpr.GetChildren() - if len(children) <= 1 { - return NewParseError("unexpected token type") - } - - rhs := children[1] - - // The right-hand value side the equality expression is allowed to contain '[', ']', ':', '=' in the values. - // If the token is not either a literal or one of the token types that identifies those four additional - // tokens then error. - if !(rhs.Root.Type() == TokenLit || rhs.Root.Type() == TokenOp || rhs.Root.Type() == TokenSep) { - return NewParseError("unexpected token type") - } - - key := EqualExprKey(opExpr) - v, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw()) - if err != nil { - return err - } - - t.values[key] = v - default: - return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) - } - default: - return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) - } - - v.Sections.container[v.scope] = t - return nil -} - -// VisitStatement visits statements... -func (v *DefaultVisitor) VisitStatement(stmt AST) error { - switch stmt.Kind { - case ASTKindCompletedSectionStatement: - child := stmt.GetRoot() - if child.Kind != ASTKindSectionStatement { - return NewParseError(fmt.Sprintf("unsupported child statement: %T", child)) - } - - name := string(child.Root.Raw()) - v.Sections.container[name] = Section{} - v.scope = name - default: - return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind)) - } - - return nil -} - -// Sections is a map of Section structures that represent -// a configuration. -type Sections struct { - container map[string]Section -} - -// GetSection will return section p. If section p does not exist, -// false will be returned in the second parameter. -func (t Sections) GetSection(p string) (Section, bool) { - v, ok := t.container[p] - return v, ok -} - -// values represents a map of union values. -type values map[string]Value - -// List will return a list of all sections that were successfully -// parsed. -func (t Sections) List() []string { - keys := make([]string, len(t.container)) - i := 0 - for k := range t.container { - keys[i] = k - i++ - } - - sort.Strings(keys) - return keys -} - -// Section contains a name and values. This represent -// a sectioned entry in a configuration file. -type Section struct { - Name string - values values -} - -// Has will return whether or not an entry exists in a given section -func (t Section) Has(k string) bool { - _, ok := t.values[k] - return ok -} - -// ValueType will returned what type the union is set to. If -// k was not found, the NoneType will be returned. -func (t Section) ValueType(k string) (ValueType, bool) { - v, ok := t.values[k] - return v.Type, ok -} - -// Bool returns a bool value at k -func (t Section) Bool(k string) bool { - return t.values[k].BoolValue() -} - -// Int returns an integer value at k -func (t Section) Int(k string) int64 { - return t.values[k].IntValue() -} - -// Float64 returns a float value at k -func (t Section) Float64(k string) float64 { - return t.values[k].FloatValue() -} - -// String returns the string value at k -func (t Section) String(k string) string { - _, ok := t.values[k] - if !ok { - return "" - } - return t.values[k].StringValue() -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go deleted file mode 100644 index 99915f7f7..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go +++ /dev/null @@ -1,25 +0,0 @@ -package ini - -// Walk will traverse the AST using the v, the Visitor. -func Walk(tree []AST, v Visitor) error { - for _, node := range tree { - switch node.Kind { - case ASTKindExpr, - ASTKindExprStatement: - - if err := v.VisitExpr(node); err != nil { - return err - } - case ASTKindStatement, - ASTKindCompletedSectionStatement, - ASTKindNestedSectionStatement, - ASTKindCompletedNestedSectionStatement: - - if err := v.VisitStatement(node); err != nil { - return err - } - } - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go deleted file mode 100644 index 7ffb4ae06..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go +++ /dev/null @@ -1,24 +0,0 @@ -package ini - -import ( - "unicode" -) - -// isWhitespace will return whether or not the character is -// a whitespace character. -// -// Whitespace is defined as a space or tab. -func isWhitespace(c rune) bool { - return unicode.IsSpace(c) && c != '\n' && c != '\r' -} - -func newWSToken(b []rune) (Token, int, error) { - i := 0 - for ; i < len(b); i++ { - if !isWhitespace(b[i]) { - break - } - } - - return newToken(TokenWS, b[:i], NoneType), i, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go deleted file mode 100644 index 6c443988b..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go +++ /dev/null @@ -1,12 +0,0 @@ -package sdkio - -const ( - // Byte is 8 bits - Byte int64 = 1 - // KibiByte (KiB) is 1024 Bytes - KibiByte = Byte * 1024 - // MebiByte (MiB) is 1024 KiB - MebiByte = KibiByte * 1024 - // GibiByte (GiB) is 1024 MiB - GibiByte = MebiByte * 1024 -) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go deleted file mode 100644 index 037a998c4..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build !go1.7 -// +build !go1.7 - -package sdkio - -// Copy of Go 1.7 io package's Seeker constants. -const ( - SeekStart = 0 // seek relative to the origin of the file - SeekCurrent = 1 // seek relative to the current offset - SeekEnd = 2 // seek relative to the end -) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go deleted file mode 100644 index 65e7c60c4..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build go1.7 -// +build go1.7 - -package sdkio - -import "io" - -// Alias for Go 1.7 io package Seeker constants -const ( - SeekStart = io.SeekStart // seek relative to the origin of the file - SeekCurrent = io.SeekCurrent // seek relative to the current offset - SeekEnd = io.SeekEnd // seek relative to the end -) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go deleted file mode 100644 index a84528783..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build go1.10 -// +build go1.10 - -package sdkmath - -import "math" - -// Round returns the nearest integer, rounding half away from zero. -// -// Special cases are: -// Round(±0) = ±0 -// Round(±Inf) = ±Inf -// Round(NaN) = NaN -func Round(x float64) float64 { - return math.Round(x) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go deleted file mode 100644 index a3ae3e5db..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go +++ /dev/null @@ -1,57 +0,0 @@ -//go:build !go1.10 -// +build !go1.10 - -package sdkmath - -import "math" - -// Copied from the Go standard library's (Go 1.12) math/floor.go for use in -// Go version prior to Go 1.10. -const ( - uvone = 0x3FF0000000000000 - mask = 0x7FF - shift = 64 - 11 - 1 - bias = 1023 - signMask = 1 << 63 - fracMask = 1<= 0.5 { - // return t + Copysign(1, x) - // } - // return t - // } - bits := math.Float64bits(x) - e := uint(bits>>shift) & mask - if e < bias { - // Round abs(x) < 1 including denormals. - bits &= signMask // +-0 - if e == bias-1 { - bits |= uvone // +-1 - } - } else if e < bias+shift { - // Round any abs(x) >= 1 containing a fractional component [0,1). - // - // Numbers with larger exponents are returned unchanged since they - // must be either an integer, infinity, or NaN. - const half = 1 << (shift - 1) - e -= bias - bits += half >> e - bits &^= fracMask >> e - } - return math.Float64frombits(bits) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go deleted file mode 100644 index 0c9802d87..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go +++ /dev/null @@ -1,29 +0,0 @@ -package sdkrand - -import ( - "math/rand" - "sync" - "time" -) - -// lockedSource is a thread-safe implementation of rand.Source -type lockedSource struct { - lk sync.Mutex - src rand.Source -} - -func (r *lockedSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return -} - -func (r *lockedSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() -} - -// SeededRand is a new RNG using a thread safe implementation of rand.Source -var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go deleted file mode 100644 index 4bae66cee..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build go1.6 -// +build go1.6 - -package sdkrand - -import "math/rand" - -// Read provides the stub for math.Rand.Read method support for go version's -// 1.6 and greater. -func Read(r *rand.Rand, p []byte) (int, error) { - return r.Read(p) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go deleted file mode 100644 index 3a6ab8825..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go +++ /dev/null @@ -1,25 +0,0 @@ -//go:build !go1.6 -// +build !go1.6 - -package sdkrand - -import "math/rand" - -// Read backfills Go 1.6's math.Rand.Reader for Go 1.5 -func Read(r *rand.Rand, p []byte) (n int, err error) { - // Copy of Go standard libraries math package's read function not added to - // standard library until Go 1.6. - var pos int8 - var val int64 - for n = 0; n < len(p); n++ { - if pos == 0 { - val = r.Int63() - pos = 7 - } - p[n] = byte(val) - val >>= 8 - pos-- - } - - return n, err -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go deleted file mode 100644 index 38ea61afe..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go +++ /dev/null @@ -1,23 +0,0 @@ -package sdkuri - -import ( - "path" - "strings" -) - -// PathJoin will join the elements of the path delimited by the "/" -// character. Similar to path.Join with the exception the trailing "/" -// character is preserved if present. -func PathJoin(elems ...string) string { - if len(elems) == 0 { - return "" - } - - hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/") - str := path.Join(elems...) - if hasTrailing && str != "/" { - str += "/" - } - - return str -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go deleted file mode 100644 index 7da8a49ce..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go +++ /dev/null @@ -1,12 +0,0 @@ -package shareddefaults - -const ( - // ECSCredsProviderEnvVar is an environmental variable key used to - // determine which path needs to be hit. - ECSCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" -) - -// ECSContainerCredentialsURI is the endpoint to retrieve container -// credentials. This can be overridden to test to ensure the credential process -// is behaving correctly. -var ECSContainerCredentialsURI = "http://169.254.170.2" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go deleted file mode 100644 index ebcbc2b40..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go +++ /dev/null @@ -1,40 +0,0 @@ -package shareddefaults - -import ( - "os" - "path/filepath" - "runtime" -) - -// SharedCredentialsFilename returns the SDK's default file path -// for the shared credentials file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/credentials -// - Windows: %USERPROFILE%\.aws\credentials -func SharedCredentialsFilename() string { - return filepath.Join(UserHomeDir(), ".aws", "credentials") -} - -// SharedConfigFilename returns the SDK's default file path for -// the shared config file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/config -// - Windows: %USERPROFILE%\.aws\config -func SharedConfigFilename() string { - return filepath.Join(UserHomeDir(), ".aws", "config") -} - -// UserHomeDir returns the home directory for the user the process is -// running under. -func UserHomeDir() string { - if runtime.GOOS == "windows" { // Windows - return os.Getenv("USERPROFILE") - } - - // *nix - return os.Getenv("HOME") -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go b/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go deleted file mode 100644 index d008ae27c..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go +++ /dev/null @@ -1,11 +0,0 @@ -package strings - -import ( - "strings" -) - -// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings, -// under Unicode case-folding. -func HasPrefixFold(s, prefix string) bool { - return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go deleted file mode 100644 index 14ad0c589..000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package singleflight provides a duplicate function call suppression -// mechanism. -package singleflight - -import "sync" - -// call is an in-flight or completed singleflight.Do call -type call struct { - wg sync.WaitGroup - - // These fields are written once before the WaitGroup is done - // and are only read after the WaitGroup is done. - val interface{} - err error - - // forgotten indicates whether Forget was called with this call's key - // while the call was still in flight. - forgotten bool - - // These fields are read and written with the singleflight - // mutex held before the WaitGroup is done, and are read but - // not written after the WaitGroup is done. - dups int - chans []chan<- Result -} - -// Group represents a class of work and forms a namespace in -// which units of work can be executed with duplicate suppression. -type Group struct { - mu sync.Mutex // protects m - m map[string]*call // lazily initialized -} - -// Result holds the results of Do, so they can be passed -// on a channel. -type Result struct { - Val interface{} - Err error - Shared bool -} - -// Do executes and returns the results of the given function, making -// sure that only one execution is in-flight for a given key at a -// time. If a duplicate comes in, the duplicate caller waits for the -// original to complete and receives the same results. -// The return value shared indicates whether v was given to multiple callers. -func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { - g.mu.Lock() - if g.m == nil { - g.m = make(map[string]*call) - } - if c, ok := g.m[key]; ok { - c.dups++ - g.mu.Unlock() - c.wg.Wait() - return c.val, c.err, true - } - c := new(call) - c.wg.Add(1) - g.m[key] = c - g.mu.Unlock() - - g.doCall(c, key, fn) - return c.val, c.err, c.dups > 0 -} - -// DoChan is like Do but returns a channel that will receive the -// results when they are ready. -func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { - ch := make(chan Result, 1) - g.mu.Lock() - if g.m == nil { - g.m = make(map[string]*call) - } - if c, ok := g.m[key]; ok { - c.dups++ - c.chans = append(c.chans, ch) - g.mu.Unlock() - return ch - } - c := &call{chans: []chan<- Result{ch}} - c.wg.Add(1) - g.m[key] = c - g.mu.Unlock() - - go g.doCall(c, key, fn) - - return ch -} - -// doCall handles the single call for a key. -func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { - c.val, c.err = fn() - c.wg.Done() - - g.mu.Lock() - if !c.forgotten { - delete(g.m, key) - } - for _, ch := range c.chans { - ch <- Result{c.val, c.err, c.dups > 0} - } - g.mu.Unlock() -} - -// Forget tells the singleflight to forget about a key. Future calls -// to Do for this key will call the function rather than waiting for -// an earlier call to complete. -func (g *Group) Forget(key string) { - g.mu.Lock() - if c, ok := g.m[key]; ok { - c.forgotten = true - } - delete(g.m, key) - g.mu.Unlock() -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go deleted file mode 100644 index 1f1d27aea..000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go +++ /dev/null @@ -1,104 +0,0 @@ -package protocol - -import ( - "github.com/aws/aws-sdk-go/aws/request" - "net" - "strconv" - "strings" -) - -// ValidateEndpointHostHandler is a request handler that will validate the -// request endpoint's hosts is a valid RFC 3986 host. -var ValidateEndpointHostHandler = request.NamedHandler{ - Name: "awssdk.protocol.ValidateEndpointHostHandler", - Fn: func(r *request.Request) { - err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host) - if err != nil { - r.Error = err - } - }, -} - -// ValidateEndpointHost validates that the host string passed in is a valid RFC -// 3986 host. Returns error if the host is not valid. -func ValidateEndpointHost(opName, host string) error { - paramErrs := request.ErrInvalidParams{Context: opName} - - var hostname string - var port string - var err error - - if strings.Contains(host, ":") { - hostname, port, err = net.SplitHostPort(host) - - if err != nil { - paramErrs.Add(request.NewErrParamFormat("endpoint", err.Error(), host)) - } - - if !ValidPortNumber(port) { - paramErrs.Add(request.NewErrParamFormat("endpoint port number", "[0-65535]", port)) - } - } else { - hostname = host - } - - labels := strings.Split(hostname, ".") - for i, label := range labels { - if i == len(labels)-1 && len(label) == 0 { - // Allow trailing dot for FQDN hosts. - continue - } - - if !ValidHostLabel(label) { - paramErrs.Add(request.NewErrParamFormat( - "endpoint host label", "[a-zA-Z0-9-]{1,63}", label)) - } - } - - if len(hostname) == 0 { - paramErrs.Add(request.NewErrParamMinLen("endpoint host", 1)) - } - - if len(hostname) > 255 { - paramErrs.Add(request.NewErrParamMaxLen( - "endpoint host", 255, host, - )) - } - - if paramErrs.Len() > 0 { - return paramErrs - } - return nil -} - -// ValidHostLabel returns if the label is a valid RFC 3986 host label. -func ValidHostLabel(label string) bool { - if l := len(label); l == 0 || l > 63 { - return false - } - for _, r := range label { - switch { - case r >= '0' && r <= '9': - case r >= 'A' && r <= 'Z': - case r >= 'a' && r <= 'z': - case r == '-': - default: - return false - } - } - - return true -} - -// ValidPortNumber return if the port is valid RFC 3986 port -func ValidPortNumber(port string) bool { - i, err := strconv.Atoi(port) - if err != nil { - return false - } - - if i < 0 || i > 65535 { - return false - } - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go deleted file mode 100644 index 915b0fcaf..000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go +++ /dev/null @@ -1,54 +0,0 @@ -package protocol - -import ( - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" -) - -// HostPrefixHandlerName is the handler name for the host prefix request -// handler. -const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler" - -// NewHostPrefixHandler constructs a build handler -func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) request.NamedHandler { - builder := HostPrefixBuilder{ - Prefix: prefix, - LabelsFn: labelsFn, - } - - return request.NamedHandler{ - Name: HostPrefixHandlerName, - Fn: builder.Build, - } -} - -// HostPrefixBuilder provides the request handler to expand and prepend -// the host prefix into the operation's request endpoint host. -type HostPrefixBuilder struct { - Prefix string - LabelsFn func() map[string]string -} - -// Build updates the passed in Request with the HostPrefix template expanded. -func (h HostPrefixBuilder) Build(r *request.Request) { - if aws.BoolValue(r.Config.DisableEndpointHostPrefix) { - return - } - - var labels map[string]string - if h.LabelsFn != nil { - labels = h.LabelsFn() - } - - prefix := h.Prefix - for name, value := range labels { - prefix = strings.Replace(prefix, "{"+name+"}", value, -1) - } - - r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host - if len(r.HTTPRequest.Host) > 0 { - r.HTTPRequest.Host = prefix + r.HTTPRequest.Host - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go deleted file mode 100644 index 53831dff9..000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go +++ /dev/null @@ -1,75 +0,0 @@ -package protocol - -import ( - "crypto/rand" - "fmt" - "reflect" -) - -// RandReader is the random reader the protocol package will use to read -// random bytes from. This is exported for testing, and should not be used. -var RandReader = rand.Reader - -const idempotencyTokenFillTag = `idempotencyToken` - -// CanSetIdempotencyToken returns true if the struct field should be -// automatically populated with a Idempotency token. -// -// Only *string and string type fields that are tagged with idempotencyToken -// which are not already set can be auto filled. -func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { - switch u := v.Interface().(type) { - // To auto fill an Idempotency token the field must be a string, - // tagged for auto fill, and have a zero value. - case *string: - return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 - case string: - return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 - } - - return false -} - -// GetIdempotencyToken returns a randomly generated idempotency token. -func GetIdempotencyToken() string { - b := make([]byte, 16) - RandReader.Read(b) - - return UUIDVersion4(b) -} - -// SetIdempotencyToken will set the value provided with a Idempotency Token. -// Given that the value can be set. Will panic if value is not setable. -func SetIdempotencyToken(v reflect.Value) { - if v.Kind() == reflect.Ptr { - if v.IsNil() && v.CanSet() { - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - } - v = reflect.Indirect(v) - - if !v.CanSet() { - panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) - } - - b := make([]byte, 16) - _, err := rand.Read(b) - if err != nil { - // TODO handle error - return - } - - v.Set(reflect.ValueOf(UUIDVersion4(b))) -} - -// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided -func UUIDVersion4(u []byte) string { - // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 - // 13th character is "4" - u[6] = (u[6] | 0x40) & 0x4F - // 17th character is "8", "9", "a", or "b" - u[8] = (u[8] | 0x80) & 0xBF - - return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go deleted file mode 100644 index 2aec80661..000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go +++ /dev/null @@ -1,298 +0,0 @@ -// Package jsonutil provides JSON serialization of AWS requests and responses. -package jsonutil - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/private/protocol" -) - -var timeType = reflect.ValueOf(time.Time{}).Type() -var byteSliceType = reflect.ValueOf([]byte{}).Type() - -// BuildJSON builds a JSON string for a given object v. -func BuildJSON(v interface{}) ([]byte, error) { - var buf bytes.Buffer - - err := buildAny(reflect.ValueOf(v), &buf, "") - return buf.Bytes(), err -} - -func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - origVal := value - value = reflect.Indirect(value) - if !value.IsValid() { - return nil - } - - vtype := value.Type() - - t := tag.Get("type") - if t == "" { - switch vtype.Kind() { - case reflect.Struct: - // also it can't be a time object - if value.Type() != timeType { - t = "structure" - } - case reflect.Slice: - // also it can't be a byte slice - if _, ok := value.Interface().([]byte); !ok { - t = "list" - } - case reflect.Map: - // cannot be a JSONValue map - if _, ok := value.Interface().(aws.JSONValue); !ok { - t = "map" - } - } - } - - switch t { - case "structure": - if field, ok := vtype.FieldByName("_"); ok { - tag = field.Tag - } - return buildStruct(value, buf, tag) - case "list": - return buildList(value, buf, tag) - case "map": - return buildMap(value, buf, tag) - default: - return buildScalar(origVal, buf, tag) - } -} - -func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - if !value.IsValid() { - return nil - } - - // unwrap payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := value.Type().FieldByName(payload) - tag = field.Tag - value = elemOf(value.FieldByName(payload)) - if !value.IsValid() && tag.Get("type") != "structure" { - return nil - } - } - - buf.WriteByte('{') - defer buf.WriteString("}") - - if !value.IsValid() { - return nil - } - - t := value.Type() - first := true - for i := 0; i < t.NumField(); i++ { - member := value.Field(i) - - // This allocates the most memory. - // Additionally, we cannot skip nil fields due to - // idempotency auto filling. - field := t.Field(i) - - if field.PkgPath != "" { - continue // ignore unexported fields - } - if field.Tag.Get("json") == "-" { - continue - } - if field.Tag.Get("location") != "" { - continue // ignore non-body elements - } - if field.Tag.Get("ignore") != "" { - continue - } - - if protocol.CanSetIdempotencyToken(member, field) { - token := protocol.GetIdempotencyToken() - member = reflect.ValueOf(&token) - } - - if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() { - continue // ignore unset fields - } - - if first { - first = false - } else { - buf.WriteByte(',') - } - - // figure out what this field is called - name := field.Name - if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - - writeString(name, buf) - buf.WriteString(`:`) - - err := buildAny(member, buf, field.Tag) - if err != nil { - return err - } - - } - - return nil -} - -func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - buf.WriteString("[") - - for i := 0; i < value.Len(); i++ { - buildAny(value.Index(i), buf, "") - - if i < value.Len()-1 { - buf.WriteString(",") - } - } - - buf.WriteString("]") - - return nil -} - -type sortedValues []reflect.Value - -func (sv sortedValues) Len() int { return len(sv) } -func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } -func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() } - -func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - buf.WriteString("{") - - sv := sortedValues(value.MapKeys()) - sort.Sort(sv) - - for i, k := range sv { - if i > 0 { - buf.WriteByte(',') - } - - writeString(k.String(), buf) - buf.WriteString(`:`) - - buildAny(value.MapIndex(k), buf, "") - } - - buf.WriteString("}") - - return nil -} - -func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - // prevents allocation on the heap. - scratch := [64]byte{} - switch value := reflect.Indirect(v); value.Kind() { - case reflect.String: - writeString(value.String(), buf) - case reflect.Bool: - if value.Bool() { - buf.WriteString("true") - } else { - buf.WriteString("false") - } - case reflect.Int64: - buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10)) - case reflect.Float64: - f := value.Float() - if math.IsInf(f, 0) || math.IsNaN(f) { - return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)} - } - buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) - default: - switch converted := value.Interface().(type) { - case time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.UnixTimeFormatName - } - - ts := protocol.FormatTime(format, converted) - if format != protocol.UnixTimeFormatName { - ts = `"` + ts + `"` - } - - buf.WriteString(ts) - case []byte: - if !value.IsNil() { - buf.WriteByte('"') - if len(converted) < 1024 { - // for small buffers, using Encode directly is much faster. - dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted))) - base64.StdEncoding.Encode(dst, converted) - buf.Write(dst) - } else { - // for large buffers, avoid unnecessary extra temporary - // buffer space. - enc := base64.NewEncoder(base64.StdEncoding, buf) - enc.Write(converted) - enc.Close() - } - buf.WriteByte('"') - } - case aws.JSONValue: - str, err := protocol.EncodeJSONValue(converted, protocol.QuotedEscape) - if err != nil { - return fmt.Errorf("unable to encode JSONValue, %v", err) - } - buf.WriteString(str) - default: - return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) - } - } - return nil -} - -var hex = "0123456789abcdef" - -func writeString(s string, buf *bytes.Buffer) { - buf.WriteByte('"') - for i := 0; i < len(s); i++ { - if s[i] == '"' { - buf.WriteString(`\"`) - } else if s[i] == '\\' { - buf.WriteString(`\\`) - } else if s[i] == '\b' { - buf.WriteString(`\b`) - } else if s[i] == '\f' { - buf.WriteString(`\f`) - } else if s[i] == '\r' { - buf.WriteString(`\r`) - } else if s[i] == '\t' { - buf.WriteString(`\t`) - } else if s[i] == '\n' { - buf.WriteString(`\n`) - } else if s[i] < 32 { - buf.WriteString("\\u00") - buf.WriteByte(hex[s[i]>>4]) - buf.WriteByte(hex[s[i]&0xF]) - } else { - buf.WriteByte(s[i]) - } - } - buf.WriteByte('"') -} - -// Returns the reflection element of a value, if it is a pointer. -func elemOf(value reflect.Value) reflect.Value { - for value.Kind() == reflect.Ptr { - value = value.Elem() - } - return value -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go deleted file mode 100644 index 8b2c9bbeb..000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go +++ /dev/null @@ -1,304 +0,0 @@ -package jsonutil - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "math/big" - "reflect" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/private/protocol" -) - -var millisecondsFloat = new(big.Float).SetInt64(1e3) - -// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in -// type. The value to unmarshal the json document into must be a pointer to the -// type. -func UnmarshalJSONError(v interface{}, stream io.Reader) error { - var errBuf bytes.Buffer - body := io.TeeReader(stream, &errBuf) - - err := json.NewDecoder(body).Decode(v) - if err != nil { - msg := "failed decoding error message" - if err == io.EOF { - msg = "error message missing" - err = nil - } - return awserr.NewUnmarshalError(err, msg, errBuf.Bytes()) - } - - return nil -} - -// UnmarshalJSON reads a stream and unmarshals the results in object v. -func UnmarshalJSON(v interface{}, stream io.Reader) error { - var out interface{} - - decoder := json.NewDecoder(stream) - decoder.UseNumber() - err := decoder.Decode(&out) - if err == io.EOF { - return nil - } else if err != nil { - return err - } - - return unmarshaler{}.unmarshalAny(reflect.ValueOf(v), out, "") -} - -// UnmarshalJSONCaseInsensitive reads a stream and unmarshals the result into the -// object v. Ignores casing for structure members. -func UnmarshalJSONCaseInsensitive(v interface{}, stream io.Reader) error { - var out interface{} - - decoder := json.NewDecoder(stream) - decoder.UseNumber() - err := decoder.Decode(&out) - if err == io.EOF { - return nil - } else if err != nil { - return err - } - - return unmarshaler{ - caseInsensitive: true, - }.unmarshalAny(reflect.ValueOf(v), out, "") -} - -type unmarshaler struct { - caseInsensitive bool -} - -func (u unmarshaler) unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { - vtype := value.Type() - if vtype.Kind() == reflect.Ptr { - vtype = vtype.Elem() // check kind of actual element type - } - - t := tag.Get("type") - if t == "" { - switch vtype.Kind() { - case reflect.Struct: - // also it can't be a time object - if _, ok := value.Interface().(*time.Time); !ok { - t = "structure" - } - case reflect.Slice: - // also it can't be a byte slice - if _, ok := value.Interface().([]byte); !ok { - t = "list" - } - case reflect.Map: - // cannot be a JSONValue map - if _, ok := value.Interface().(aws.JSONValue); !ok { - t = "map" - } - } - } - - switch t { - case "structure": - if field, ok := vtype.FieldByName("_"); ok { - tag = field.Tag - } - return u.unmarshalStruct(value, data, tag) - case "list": - return u.unmarshalList(value, data, tag) - case "map": - return u.unmarshalMap(value, data, tag) - default: - return u.unmarshalScalar(value, data, tag) - } -} - -func (u unmarshaler) unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { - if data == nil { - return nil - } - mapData, ok := data.(map[string]interface{}) - if !ok { - return fmt.Errorf("JSON value is not a structure (%#v)", data) - } - - t := value.Type() - if value.Kind() == reflect.Ptr { - if value.IsNil() { // create the structure if it's nil - s := reflect.New(value.Type().Elem()) - value.Set(s) - value = s - } - - value = value.Elem() - t = t.Elem() - } - - // unwrap any payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := t.FieldByName(payload) - return u.unmarshalAny(value.FieldByName(payload), data, field.Tag) - } - - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - if field.PkgPath != "" { - continue // ignore unexported fields - } - - // figure out what this field is called - name := field.Name - if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - if u.caseInsensitive { - if _, ok := mapData[name]; !ok { - // Fallback to uncased name search if the exact name didn't match. - for kn, v := range mapData { - if strings.EqualFold(kn, name) { - mapData[name] = v - } - } - } - } - - member := value.FieldByIndex(field.Index) - err := u.unmarshalAny(member, mapData[name], field.Tag) - if err != nil { - return err - } - } - return nil -} - -func (u unmarshaler) unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { - if data == nil { - return nil - } - listData, ok := data.([]interface{}) - if !ok { - return fmt.Errorf("JSON value is not a list (%#v)", data) - } - - if value.IsNil() { - l := len(listData) - value.Set(reflect.MakeSlice(value.Type(), l, l)) - } - - for i, c := range listData { - err := u.unmarshalAny(value.Index(i), c, "") - if err != nil { - return err - } - } - - return nil -} - -func (u unmarshaler) unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { - if data == nil { - return nil - } - mapData, ok := data.(map[string]interface{}) - if !ok { - return fmt.Errorf("JSON value is not a map (%#v)", data) - } - - if value.IsNil() { - value.Set(reflect.MakeMap(value.Type())) - } - - for k, v := range mapData { - kvalue := reflect.ValueOf(k) - vvalue := reflect.New(value.Type().Elem()).Elem() - - u.unmarshalAny(vvalue, v, "") - value.SetMapIndex(kvalue, vvalue) - } - - return nil -} - -func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { - - switch d := data.(type) { - case nil: - return nil // nothing to do here - case string: - switch value.Interface().(type) { - case *string: - value.Set(reflect.ValueOf(&d)) - case []byte: - b, err := base64.StdEncoding.DecodeString(d) - if err != nil { - return err - } - value.Set(reflect.ValueOf(b)) - case *time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.ISO8601TimeFormatName - } - - t, err := protocol.ParseTime(format, d) - if err != nil { - return err - } - value.Set(reflect.ValueOf(&t)) - case aws.JSONValue: - // No need to use escaping as the value is a non-quoted string. - v, err := protocol.DecodeJSONValue(d, protocol.NoEscape) - if err != nil { - return err - } - value.Set(reflect.ValueOf(v)) - default: - return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) - } - case json.Number: - switch value.Interface().(type) { - case *int64: - // Retain the old behavior where we would just truncate the float64 - // calling d.Int64() here could cause an invalid syntax error due to the usage of strconv.ParseInt - f, err := d.Float64() - if err != nil { - return err - } - di := int64(f) - value.Set(reflect.ValueOf(&di)) - case *float64: - f, err := d.Float64() - if err != nil { - return err - } - value.Set(reflect.ValueOf(&f)) - case *time.Time: - float, ok := new(big.Float).SetString(d.String()) - if !ok { - return fmt.Errorf("unsupported float time representation: %v", d.String()) - } - float = float.Mul(float, millisecondsFloat) - ms, _ := float.Int64() - t := time.Unix(0, ms*1e6).UTC() - value.Set(reflect.ValueOf(&t)) - default: - return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) - } - case bool: - switch value.Interface().(type) { - case *bool: - value.Set(reflect.ValueOf(&d)) - default: - return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) - } - default: - return fmt.Errorf("unsupported JSON value (%v)", data) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go deleted file mode 100644 index d9aa27114..000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go +++ /dev/null @@ -1,87 +0,0 @@ -// Package jsonrpc provides JSON RPC utilities for serialization of AWS -// requests and responses. -package jsonrpc - -//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/json.json build_test.go -//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/json.json unmarshal_test.go - -import ( - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" - "github.com/aws/aws-sdk-go/private/protocol/rest" -) - -var emptyJSON = []byte("{}") - -// BuildHandler is a named request handler for building jsonrpc protocol -// requests -var BuildHandler = request.NamedHandler{ - Name: "awssdk.jsonrpc.Build", - Fn: Build, -} - -// UnmarshalHandler is a named request handler for unmarshaling jsonrpc -// protocol requests -var UnmarshalHandler = request.NamedHandler{ - Name: "awssdk.jsonrpc.Unmarshal", - Fn: Unmarshal, -} - -// UnmarshalMetaHandler is a named request handler for unmarshaling jsonrpc -// protocol request metadata -var UnmarshalMetaHandler = request.NamedHandler{ - Name: "awssdk.jsonrpc.UnmarshalMeta", - Fn: UnmarshalMeta, -} - -// Build builds a JSON payload for a JSON RPC request. -func Build(req *request.Request) { - var buf []byte - var err error - if req.ParamsFilled() { - buf, err = jsonutil.BuildJSON(req.Params) - if err != nil { - req.Error = awserr.New(request.ErrCodeSerialization, "failed encoding JSON RPC request", err) - return - } - } else { - buf = emptyJSON - } - - // Always serialize the body, don't suppress it. - req.SetBufferBody(buf) - - if req.ClientInfo.TargetPrefix != "" { - target := req.ClientInfo.TargetPrefix + "." + req.Operation.Name - req.HTTPRequest.Header.Add("X-Amz-Target", target) - } - - // Only set the content type if one is not already specified and an - // JSONVersion is specified. - if ct, v := req.HTTPRequest.Header.Get("Content-Type"), req.ClientInfo.JSONVersion; len(ct) == 0 && len(v) != 0 { - jsonVersion := req.ClientInfo.JSONVersion - req.HTTPRequest.Header.Set("Content-Type", "application/x-amz-json-"+jsonVersion) - } -} - -// Unmarshal unmarshals a response for a JSON RPC service. -func Unmarshal(req *request.Request) { - defer req.HTTPResponse.Body.Close() - if req.DataFilled() { - err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body) - if err != nil { - req.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, "failed decoding JSON RPC response", err), - req.HTTPResponse.StatusCode, - req.RequestID, - ) - } - } - return -} - -// UnmarshalMeta unmarshals headers from a response for a JSON RPC service. -func UnmarshalMeta(req *request.Request) { - rest.UnmarshalMeta(req) -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go deleted file mode 100644 index c0c52e2db..000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go +++ /dev/null @@ -1,107 +0,0 @@ -package jsonrpc - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" - "strings" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" -) - -// UnmarshalTypedError provides unmarshaling errors API response errors -// for both typed and untyped errors. -type UnmarshalTypedError struct { - exceptions map[string]func(protocol.ResponseMetadata) error -} - -// NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the -// set of exception names to the error unmarshalers -func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError { - return &UnmarshalTypedError{ - exceptions: exceptions, - } -} - -// UnmarshalError attempts to unmarshal the HTTP response error as a known -// error type. If unable to unmarshal the error type, the generic SDK error -// type will be used. -func (u *UnmarshalTypedError) UnmarshalError( - resp *http.Response, - respMeta protocol.ResponseMetadata, -) (error, error) { - - var buf bytes.Buffer - var jsonErr jsonErrorResponse - teeReader := io.TeeReader(resp.Body, &buf) - err := jsonutil.UnmarshalJSONError(&jsonErr, teeReader) - if err != nil { - return nil, err - } - body := ioutil.NopCloser(&buf) - - // Code may be separated by hash(#), with the last element being the code - // used by the SDK. - codeParts := strings.SplitN(jsonErr.Code, "#", 2) - code := codeParts[len(codeParts)-1] - msg := jsonErr.Message - - if fn, ok := u.exceptions[code]; ok { - // If exception code is know, use associated constructor to get a value - // for the exception that the JSON body can be unmarshaled into. - v := fn(respMeta) - err := jsonutil.UnmarshalJSONCaseInsensitive(v, body) - if err != nil { - return nil, err - } - - return v, nil - } - - // fallback to unmodeled generic exceptions - return awserr.NewRequestFailure( - awserr.New(code, msg, nil), - respMeta.StatusCode, - respMeta.RequestID, - ), nil -} - -// UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc -// protocol request errors -var UnmarshalErrorHandler = request.NamedHandler{ - Name: "awssdk.jsonrpc.UnmarshalError", - Fn: UnmarshalError, -} - -// UnmarshalError unmarshals an error response for a JSON RPC service. -func UnmarshalError(req *request.Request) { - defer req.HTTPResponse.Body.Close() - - var jsonErr jsonErrorResponse - err := jsonutil.UnmarshalJSONError(&jsonErr, req.HTTPResponse.Body) - if err != nil { - req.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to unmarshal error message", err), - req.HTTPResponse.StatusCode, - req.RequestID, - ) - return - } - - codes := strings.SplitN(jsonErr.Code, "#", 2) - req.Error = awserr.NewRequestFailure( - awserr.New(codes[len(codes)-1], jsonErr.Message, nil), - req.HTTPResponse.StatusCode, - req.RequestID, - ) -} - -type jsonErrorResponse struct { - Code string `json:"__type"` - Message string `json:"message"` -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go deleted file mode 100644 index 776d11018..000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go +++ /dev/null @@ -1,76 +0,0 @@ -package protocol - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "strconv" - - "github.com/aws/aws-sdk-go/aws" -) - -// EscapeMode is the mode that should be use for escaping a value -type EscapeMode uint - -// The modes for escaping a value before it is marshaled, and unmarshaled. -const ( - NoEscape EscapeMode = iota - Base64Escape - QuotedEscape -) - -// EncodeJSONValue marshals the value into a JSON string, and optionally base64 -// encodes the string before returning it. -// -// Will panic if the escape mode is unknown. -func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) { - b, err := json.Marshal(v) - if err != nil { - return "", err - } - - switch escape { - case NoEscape: - return string(b), nil - case Base64Escape: - return base64.StdEncoding.EncodeToString(b), nil - case QuotedEscape: - return strconv.Quote(string(b)), nil - } - - panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape)) -} - -// DecodeJSONValue will attempt to decode the string input as a JSONValue. -// Optionally decoding base64 the value first before JSON unmarshaling. -// -// Will panic if the escape mode is unknown. -func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) { - var b []byte - var err error - - switch escape { - case NoEscape: - b = []byte(v) - case Base64Escape: - b, err = base64.StdEncoding.DecodeString(v) - case QuotedEscape: - var u string - u, err = strconv.Unquote(v) - b = []byte(u) - default: - panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape)) - } - - if err != nil { - return nil, err - } - - m := aws.JSONValue{} - err = json.Unmarshal(b, &m) - if err != nil { - return nil, err - } - - return m, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go deleted file mode 100644 index 0ea0647a5..000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go +++ /dev/null @@ -1,81 +0,0 @@ -package protocol - -import ( - "io" - "io/ioutil" - "net/http" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" -) - -// PayloadUnmarshaler provides the interface for unmarshaling a payload's -// reader into a SDK shape. -type PayloadUnmarshaler interface { - UnmarshalPayload(io.Reader, interface{}) error -} - -// HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a -// HandlerList. This provides the support for unmarshaling a payload reader to -// a shape without needing a SDK request first. -type HandlerPayloadUnmarshal struct { - Unmarshalers request.HandlerList -} - -// UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using -// the Unmarshalers HandlerList provided. Returns an error if unable -// unmarshaling fails. -func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error { - req := &request.Request{ - HTTPRequest: &http.Request{}, - HTTPResponse: &http.Response{ - StatusCode: 200, - Header: http.Header{}, - Body: ioutil.NopCloser(r), - }, - Data: v, - } - - h.Unmarshalers.Run(req) - - return req.Error -} - -// PayloadMarshaler provides the interface for marshaling a SDK shape into and -// io.Writer. -type PayloadMarshaler interface { - MarshalPayload(io.Writer, interface{}) error -} - -// HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList. -// This provides support for marshaling a SDK shape into an io.Writer without -// needing a SDK request first. -type HandlerPayloadMarshal struct { - Marshalers request.HandlerList -} - -// MarshalPayload marshals the SDK shape into the io.Writer using the -// Marshalers HandlerList provided. Returns an error if unable if marshal -// fails. -func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error { - req := request.New( - aws.Config{}, - metadata.ClientInfo{}, - request.Handlers{}, - nil, - &request.Operation{HTTPMethod: "PUT"}, - v, - nil, - ) - - h.Marshalers.Run(req) - - if req.Error != nil { - return req.Error - } - - io.Copy(w, req.GetBody()) - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go deleted file mode 100644 index 9d521dcb9..000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go +++ /dev/null @@ -1,49 +0,0 @@ -package protocol - -import ( - "fmt" - "strings" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// RequireHTTPMinProtocol request handler is used to enforce that -// the target endpoint supports the given major and minor HTTP protocol version. -type RequireHTTPMinProtocol struct { - Major, Minor int -} - -// Handler will mark the request.Request with an error if the -// target endpoint did not connect with the required HTTP protocol -// major and minor version. -func (p RequireHTTPMinProtocol) Handler(r *request.Request) { - if r.Error != nil || r.HTTPResponse == nil { - return - } - - if !strings.HasPrefix(r.HTTPResponse.Proto, "HTTP") { - r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) - } - - if r.HTTPResponse.ProtoMajor < p.Major || r.HTTPResponse.ProtoMinor < p.Minor { - r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) - } -} - -// ErrCodeMinimumHTTPProtocolError error code is returned when the target endpoint -// did not match the required HTTP major and minor protocol version. -const ErrCodeMinimumHTTPProtocolError = "MinimumHTTPProtocolError" - -func newMinHTTPProtoError(major, minor int, r *request.Request) error { - return awserr.NewRequestFailure( - awserr.New("MinimumHTTPProtocolError", - fmt.Sprintf( - "operation requires minimum HTTP protocol of HTTP/%d.%d, but was %s", - major, minor, r.HTTPResponse.Proto, - ), - nil, - ), - r.HTTPResponse.StatusCode, r.RequestID, - ) -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go deleted file mode 100644 index d40346a77..000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go +++ /dev/null @@ -1,36 +0,0 @@ -// Package query provides serialization of AWS query requests, and responses. -package query - -//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/query.json build_test.go - -import ( - "net/url" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" -) - -// BuildHandler is a named request handler for building query protocol requests -var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} - -// Build builds a request for an AWS Query service. -func Build(r *request.Request) { - body := url.Values{ - "Action": {r.Operation.Name}, - "Version": {r.ClientInfo.APIVersion}, - } - if err := queryutil.Parse(body, r.Params, false); err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "failed encoding Query request", err) - return - } - - if !r.IsPresigned() { - r.HTTPRequest.Method = "POST" - r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") - r.SetBufferBody([]byte(body.Encode())) - } else { // This is a pre-signed request - r.HTTPRequest.Method = "GET" - r.HTTPRequest.URL.RawQuery = body.Encode() - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go deleted file mode 100644 index 75866d012..000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go +++ /dev/null @@ -1,246 +0,0 @@ -package queryutil - -import ( - "encoding/base64" - "fmt" - "net/url" - "reflect" - "sort" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/private/protocol" -) - -// Parse parses an object i and fills a url.Values object. The isEC2 flag -// indicates if this is the EC2 Query sub-protocol. -func Parse(body url.Values, i interface{}, isEC2 bool) error { - q := queryParser{isEC2: isEC2} - return q.parseValue(body, reflect.ValueOf(i), "", "") -} - -func elemOf(value reflect.Value) reflect.Value { - for value.Kind() == reflect.Ptr { - value = value.Elem() - } - return value -} - -type queryParser struct { - isEC2 bool -} - -func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { - value = elemOf(value) - - // no need to handle zero values - if !value.IsValid() { - return nil - } - - t := tag.Get("type") - if t == "" { - switch value.Kind() { - case reflect.Struct: - t = "structure" - case reflect.Slice: - t = "list" - case reflect.Map: - t = "map" - } - } - - switch t { - case "structure": - return q.parseStruct(v, value, prefix) - case "list": - return q.parseList(v, value, prefix, tag) - case "map": - return q.parseMap(v, value, prefix, tag) - default: - return q.parseScalar(v, value, prefix, tag) - } -} - -func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { - if !value.IsValid() { - return nil - } - - t := value.Type() - for i := 0; i < value.NumField(); i++ { - elemValue := elemOf(value.Field(i)) - field := t.Field(i) - - if field.PkgPath != "" { - continue // ignore unexported fields - } - if field.Tag.Get("ignore") != "" { - continue - } - - if protocol.CanSetIdempotencyToken(value.Field(i), field) { - token := protocol.GetIdempotencyToken() - elemValue = reflect.ValueOf(token) - } - - var name string - if q.isEC2 { - name = field.Tag.Get("queryName") - } - if name == "" { - if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { - name = field.Tag.Get("locationNameList") - } else if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - if name != "" && q.isEC2 { - name = strings.ToUpper(name[0:1]) + name[1:] - } - } - if name == "" { - name = field.Name - } - - if prefix != "" { - name = prefix + "." + name - } - - if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { - return err - } - } - return nil -} - -func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { - // If it's empty, generate an empty value - if !value.IsNil() && value.Len() == 0 { - v.Set(prefix, "") - return nil - } - - if _, ok := value.Interface().([]byte); ok { - return q.parseScalar(v, value, prefix, tag) - } - - // check for unflattened list member - if !q.isEC2 && tag.Get("flattened") == "" { - if listName := tag.Get("locationNameList"); listName == "" { - prefix += ".member" - } else { - prefix += "." + listName - } - } - - for i := 0; i < value.Len(); i++ { - slicePrefix := prefix - if slicePrefix == "" { - slicePrefix = strconv.Itoa(i + 1) - } else { - slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) - } - if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { - return err - } - } - return nil -} - -func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { - // If it's empty, generate an empty value - if !value.IsNil() && value.Len() == 0 { - v.Set(prefix, "") - return nil - } - - // check for unflattened list member - if !q.isEC2 && tag.Get("flattened") == "" { - prefix += ".entry" - } - - // sort keys for improved serialization consistency. - // this is not strictly necessary for protocol support. - mapKeyValues := value.MapKeys() - mapKeys := map[string]reflect.Value{} - mapKeyNames := make([]string, len(mapKeyValues)) - for i, mapKey := range mapKeyValues { - name := mapKey.String() - mapKeys[name] = mapKey - mapKeyNames[i] = name - } - sort.Strings(mapKeyNames) - - for i, mapKeyName := range mapKeyNames { - mapKey := mapKeys[mapKeyName] - mapValue := value.MapIndex(mapKey) - - kname := tag.Get("locationNameKey") - if kname == "" { - kname = "key" - } - vname := tag.Get("locationNameValue") - if vname == "" { - vname = "value" - } - - // serialize key - var keyName string - if prefix == "" { - keyName = strconv.Itoa(i+1) + "." + kname - } else { - keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname - } - - if err := q.parseValue(v, mapKey, keyName, ""); err != nil { - return err - } - - // serialize value - var valueName string - if prefix == "" { - valueName = strconv.Itoa(i+1) + "." + vname - } else { - valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname - } - - if err := q.parseValue(v, mapValue, valueName, ""); err != nil { - return err - } - } - - return nil -} - -func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { - switch value := r.Interface().(type) { - case string: - v.Set(name, value) - case []byte: - if !r.IsNil() { - v.Set(name, base64.StdEncoding.EncodeToString(value)) - } - case bool: - v.Set(name, strconv.FormatBool(value)) - case int64: - v.Set(name, strconv.FormatInt(value, 10)) - case int: - v.Set(name, strconv.Itoa(value)) - case float64: - v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) - case float32: - v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) - case time.Time: - const ISO8601UTC = "2006-01-02T15:04:05Z" - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.ISO8601TimeFormatName - } - - v.Set(name, protocol.FormatTime(format, value)) - default: - return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go deleted file mode 100644 index 9231e95d1..000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go +++ /dev/null @@ -1,39 +0,0 @@ -package query - -//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/query.json unmarshal_test.go - -import ( - "encoding/xml" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" -) - -// UnmarshalHandler is a named request handler for unmarshaling query protocol requests -var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} - -// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata -var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} - -// Unmarshal unmarshals a response for an AWS Query service. -func Unmarshal(r *request.Request) { - defer r.HTTPResponse.Body.Close() - if r.DataFilled() { - decoder := xml.NewDecoder(r.HTTPResponse.Body) - err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") - if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, "failed decoding Query response", err), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } - } -} - -// UnmarshalMeta unmarshals header response values for an AWS Query service. -func UnmarshalMeta(r *request.Request) { - r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go deleted file mode 100644 index 831b0110c..000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go +++ /dev/null @@ -1,69 +0,0 @@ -package query - -import ( - "encoding/xml" - "fmt" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" -) - -// UnmarshalErrorHandler is a name request handler to unmarshal request errors -var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} - -type xmlErrorResponse struct { - Code string `xml:"Error>Code"` - Message string `xml:"Error>Message"` - RequestID string `xml:"RequestId"` -} - -type xmlResponseError struct { - xmlErrorResponse -} - -func (e *xmlResponseError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - const svcUnavailableTagName = "ServiceUnavailableException" - const errorResponseTagName = "ErrorResponse" - - switch start.Name.Local { - case svcUnavailableTagName: - e.Code = svcUnavailableTagName - e.Message = "service is unavailable" - return d.Skip() - - case errorResponseTagName: - return d.DecodeElement(&e.xmlErrorResponse, &start) - - default: - return fmt.Errorf("unknown error response tag, %v", start) - } -} - -// UnmarshalError unmarshals an error response for an AWS Query service. -func UnmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - - var respErr xmlResponseError - err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to unmarshal error message", err), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } - - reqID := respErr.RequestID - if len(reqID) == 0 { - reqID = r.RequestID - } - - r.Error = awserr.NewRequestFailure( - awserr.New(respErr.Code, respErr.Message, nil), - r.HTTPResponse.StatusCode, - reqID, - ) -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go deleted file mode 100644 index fb35fee5f..000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ /dev/null @@ -1,310 +0,0 @@ -// Package rest provides RESTful serialization of AWS requests and responses. -package rest - -import ( - "bytes" - "encoding/base64" - "fmt" - "io" - "net/http" - "net/url" - "path" - "reflect" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol" -) - -// Whether the byte value can be sent without escaping in AWS URLs -var noEscape [256]bool - -var errValueNotSet = fmt.Errorf("value not set") - -var byteSliceType = reflect.TypeOf([]byte{}) - -func init() { - for i := 0; i < len(noEscape); i++ { - // AWS expects every character except these to be escaped - noEscape[i] = (i >= 'A' && i <= 'Z') || - (i >= 'a' && i <= 'z') || - (i >= '0' && i <= '9') || - i == '-' || - i == '.' || - i == '_' || - i == '~' - } -} - -// BuildHandler is a named request handler for building rest protocol requests -var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} - -// Build builds the REST component of a service request. -func Build(r *request.Request) { - if r.ParamsFilled() { - v := reflect.ValueOf(r.Params).Elem() - buildLocationElements(r, v, false) - buildBody(r, v) - } -} - -// BuildAsGET builds the REST component of a service request with the ability to hoist -// data from the body. -func BuildAsGET(r *request.Request) { - if r.ParamsFilled() { - v := reflect.ValueOf(r.Params).Elem() - buildLocationElements(r, v, true) - buildBody(r, v) - } -} - -func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) { - query := r.HTTPRequest.URL.Query() - - // Setup the raw path to match the base path pattern. This is needed - // so that when the path is mutated a custom escaped version can be - // stored in RawPath that will be used by the Go client. - r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path - - for i := 0; i < v.NumField(); i++ { - m := v.Field(i) - if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { - continue - } - - if m.IsValid() { - field := v.Type().Field(i) - name := field.Tag.Get("locationName") - if name == "" { - name = field.Name - } - if kind := m.Kind(); kind == reflect.Ptr { - m = m.Elem() - } else if kind == reflect.Interface { - if !m.Elem().IsValid() { - continue - } - } - if !m.IsValid() { - continue - } - if field.Tag.Get("ignore") != "" { - continue - } - - // Support the ability to customize values to be marshaled as a - // blob even though they were modeled as a string. Required for S3 - // API operations like SSECustomerKey is modeled as string but - // required to be base64 encoded in request. - if field.Tag.Get("marshal-as") == "blob" { - m = m.Convert(byteSliceType) - } - - var err error - switch field.Tag.Get("location") { - case "headers": // header maps - err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag) - case "header": - err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag) - case "uri": - err = buildURI(r.HTTPRequest.URL, m, name, field.Tag) - case "querystring": - err = buildQueryString(query, m, name, field.Tag) - default: - if buildGETQuery { - err = buildQueryString(query, m, name, field.Tag) - } - } - r.Error = err - } - if r.Error != nil { - return - } - } - - r.HTTPRequest.URL.RawQuery = query.Encode() - if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) { - cleanPath(r.HTTPRequest.URL) - } -} - -func buildBody(r *request.Request, v reflect.Value) { - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - pfield, _ := v.Type().FieldByName(payloadName) - if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { - payload := reflect.Indirect(v.FieldByName(payloadName)) - if payload.IsValid() && payload.Interface() != nil { - switch reader := payload.Interface().(type) { - case io.ReadSeeker: - r.SetReaderBody(reader) - case []byte: - r.SetBufferBody(reader) - case string: - r.SetStringBody(reader) - default: - r.Error = awserr.New(request.ErrCodeSerialization, - "failed to encode REST request", - fmt.Errorf("unknown payload type %s", payload.Type())) - } - } - } - } - } -} - -func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error { - str, err := convertType(v, tag) - if err == errValueNotSet { - return nil - } else if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) - } - - name = strings.TrimSpace(name) - str = strings.TrimSpace(str) - - header.Add(name, str) - - return nil -} - -func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error { - prefix := tag.Get("locationName") - for _, key := range v.MapKeys() { - str, err := convertType(v.MapIndex(key), tag) - if err == errValueNotSet { - continue - } else if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) - - } - keyStr := strings.TrimSpace(key.String()) - str = strings.TrimSpace(str) - - header.Add(prefix+keyStr, str) - } - return nil -} - -func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error { - value, err := convertType(v, tag) - if err == errValueNotSet { - return nil - } else if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) - } - - u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) - u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1) - - u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1) - u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1) - - return nil -} - -func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error { - switch value := v.Interface().(type) { - case []*string: - for _, item := range value { - query.Add(name, *item) - } - case map[string]*string: - for key, item := range value { - query.Add(key, *item) - } - case map[string][]*string: - for key, items := range value { - for _, item := range items { - query.Add(key, *item) - } - } - default: - str, err := convertType(v, tag) - if err == errValueNotSet { - return nil - } else if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) - } - query.Set(name, str) - } - - return nil -} - -func cleanPath(u *url.URL) { - hasSlash := strings.HasSuffix(u.Path, "/") - - // clean up path, removing duplicate `/` - u.Path = path.Clean(u.Path) - u.RawPath = path.Clean(u.RawPath) - - if hasSlash && !strings.HasSuffix(u.Path, "/") { - u.Path += "/" - u.RawPath += "/" - } -} - -// EscapePath escapes part of a URL path in Amazon style -func EscapePath(path string, encodeSep bool) string { - var buf bytes.Buffer - for i := 0; i < len(path); i++ { - c := path[i] - if noEscape[c] || (c == '/' && !encodeSep) { - buf.WriteByte(c) - } else { - fmt.Fprintf(&buf, "%%%02X", c) - } - } - return buf.String() -} - -func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) { - v = reflect.Indirect(v) - if !v.IsValid() { - return "", errValueNotSet - } - - switch value := v.Interface().(type) { - case string: - str = value - case []byte: - str = base64.StdEncoding.EncodeToString(value) - case bool: - str = strconv.FormatBool(value) - case int64: - str = strconv.FormatInt(value, 10) - case float64: - str = strconv.FormatFloat(value, 'f', -1, 64) - case time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.RFC822TimeFormatName - if tag.Get("location") == "querystring" { - format = protocol.ISO8601TimeFormatName - } - } - str = protocol.FormatTime(format, value) - case aws.JSONValue: - if len(value) == 0 { - return "", errValueNotSet - } - escaping := protocol.NoEscape - if tag.Get("location") == "header" { - escaping = protocol.Base64Escape - } - str, err = protocol.EncodeJSONValue(value, escaping) - if err != nil { - return "", fmt.Errorf("unable to encode JSONValue, %v", err) - } - default: - err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type()) - return "", err - } - return str, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go deleted file mode 100644 index b54c99eda..000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go +++ /dev/null @@ -1,54 +0,0 @@ -package rest - -import "reflect" - -// PayloadMember returns the payload field member of i if there is one, or nil. -func PayloadMember(i interface{}) interface{} { - if i == nil { - return nil - } - - v := reflect.ValueOf(i).Elem() - if !v.IsValid() { - return nil - } - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - field, _ := v.Type().FieldByName(payloadName) - if field.Tag.Get("type") != "structure" { - return nil - } - - payload := v.FieldByName(payloadName) - if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { - return payload.Interface() - } - } - } - return nil -} - -const nopayloadPayloadType = "nopayload" - -// PayloadType returns the type of a payload field member of i if there is one, -// or "". -func PayloadType(i interface{}) string { - v := reflect.Indirect(reflect.ValueOf(i)) - if !v.IsValid() { - return "" - } - - if field, ok := v.Type().FieldByName("_"); ok { - if noPayload := field.Tag.Get(nopayloadPayloadType); noPayload != "" { - return nopayloadPayloadType - } - - if payloadName := field.Tag.Get("payload"); payloadName != "" { - if member, ok := v.Type().FieldByName(payloadName); ok { - return member.Tag.Get("type") - } - } - } - - return "" -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go deleted file mode 100644 index 92f8b4d9a..000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ /dev/null @@ -1,257 +0,0 @@ -package rest - -import ( - "bytes" - "encoding/base64" - "fmt" - "io" - "io/ioutil" - "net/http" - "reflect" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - awsStrings "github.com/aws/aws-sdk-go/internal/strings" - "github.com/aws/aws-sdk-go/private/protocol" -) - -// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests -var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} - -// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata -var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} - -// Unmarshal unmarshals the REST component of a response in a REST service. -func Unmarshal(r *request.Request) { - if r.DataFilled() { - v := reflect.Indirect(reflect.ValueOf(r.Data)) - if err := unmarshalBody(r, v); err != nil { - r.Error = err - } - } -} - -// UnmarshalMeta unmarshals the REST metadata of a response in a REST service -func UnmarshalMeta(r *request.Request) { - r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") - if r.RequestID == "" { - // Alternative version of request id in the header - r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") - } - if r.DataFilled() { - if err := UnmarshalResponse(r.HTTPResponse, r.Data, aws.BoolValue(r.Config.LowerCaseHeaderMaps)); err != nil { - r.Error = err - } - } -} - -// UnmarshalResponse attempts to unmarshal the REST response headers to -// the data type passed in. The type must be a pointer. An error is returned -// with any error unmarshaling the response into the target datatype. -func UnmarshalResponse(resp *http.Response, data interface{}, lowerCaseHeaderMaps bool) error { - v := reflect.Indirect(reflect.ValueOf(data)) - return unmarshalLocationElements(resp, v, lowerCaseHeaderMaps) -} - -func unmarshalBody(r *request.Request, v reflect.Value) error { - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - pfield, _ := v.Type().FieldByName(payloadName) - if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { - payload := v.FieldByName(payloadName) - if payload.IsValid() { - switch payload.Interface().(type) { - case []byte: - defer r.HTTPResponse.Body.Close() - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) - } - - payload.Set(reflect.ValueOf(b)) - - case *string: - defer r.HTTPResponse.Body.Close() - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) - } - - str := string(b) - payload.Set(reflect.ValueOf(&str)) - - default: - switch payload.Type().String() { - case "io.ReadCloser": - payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) - - case "io.ReadSeeker": - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - return awserr.New(request.ErrCodeSerialization, - "failed to read response body", err) - } - payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b)))) - - default: - io.Copy(ioutil.Discard, r.HTTPResponse.Body) - r.HTTPResponse.Body.Close() - return awserr.New(request.ErrCodeSerialization, - "failed to decode REST response", - fmt.Errorf("unknown payload type %s", payload.Type())) - } - } - } - } - } - } - - return nil -} - -func unmarshalLocationElements(resp *http.Response, v reflect.Value, lowerCaseHeaderMaps bool) error { - for i := 0; i < v.NumField(); i++ { - m, field := v.Field(i), v.Type().Field(i) - if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { - continue - } - - if m.IsValid() { - name := field.Tag.Get("locationName") - if name == "" { - name = field.Name - } - - switch field.Tag.Get("location") { - case "statusCode": - unmarshalStatusCode(m, resp.StatusCode) - - case "header": - err := unmarshalHeader(m, resp.Header.Get(name), field.Tag) - if err != nil { - return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) - } - - case "headers": - prefix := field.Tag.Get("locationName") - err := unmarshalHeaderMap(m, resp.Header, prefix, lowerCaseHeaderMaps) - if err != nil { - awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) - } - } - } - } - - return nil -} - -func unmarshalStatusCode(v reflect.Value, statusCode int) { - if !v.IsValid() { - return - } - - switch v.Interface().(type) { - case *int64: - s := int64(statusCode) - v.Set(reflect.ValueOf(&s)) - } -} - -func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string, normalize bool) error { - if len(headers) == 0 { - return nil - } - switch r.Interface().(type) { - case map[string]*string: // we only support string map value types - out := map[string]*string{} - for k, v := range headers { - if awsStrings.HasPrefixFold(k, prefix) { - if normalize == true { - k = strings.ToLower(k) - } else { - k = http.CanonicalHeaderKey(k) - } - out[k[len(prefix):]] = &v[0] - } - } - if len(out) != 0 { - r.Set(reflect.ValueOf(out)) - } - - } - return nil -} - -func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { - switch tag.Get("type") { - case "jsonvalue": - if len(header) == 0 { - return nil - } - case "blob": - if len(header) == 0 { - return nil - } - default: - if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { - return nil - } - } - - switch v.Interface().(type) { - case *string: - v.Set(reflect.ValueOf(&header)) - case []byte: - b, err := base64.StdEncoding.DecodeString(header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(b)) - case *bool: - b, err := strconv.ParseBool(header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&b)) - case *int64: - i, err := strconv.ParseInt(header, 10, 64) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&i)) - case *float64: - f, err := strconv.ParseFloat(header, 64) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&f)) - case *time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.RFC822TimeFormatName - } - t, err := protocol.ParseTime(format, header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&t)) - case aws.JSONValue: - escaping := protocol.NoEscape - if tag.Get("location") == "header" { - escaping = protocol.Base64Escape - } - m, err := protocol.DecodeJSONValue(header, escaping) - if err != nil { - return err - } - v.Set(reflect.ValueOf(m)) - default: - err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) - return err - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go deleted file mode 100644 index 2e0e205af..000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go +++ /dev/null @@ -1,59 +0,0 @@ -// Package restjson provides RESTful JSON serialization of AWS -// requests and responses. -package restjson - -//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/rest-json.json build_test.go -//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/rest-json.json unmarshal_test.go - -import ( - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" - "github.com/aws/aws-sdk-go/private/protocol/rest" -) - -// BuildHandler is a named request handler for building restjson protocol -// requests -var BuildHandler = request.NamedHandler{ - Name: "awssdk.restjson.Build", - Fn: Build, -} - -// UnmarshalHandler is a named request handler for unmarshaling restjson -// protocol requests -var UnmarshalHandler = request.NamedHandler{ - Name: "awssdk.restjson.Unmarshal", - Fn: Unmarshal, -} - -// UnmarshalMetaHandler is a named request handler for unmarshaling restjson -// protocol request metadata -var UnmarshalMetaHandler = request.NamedHandler{ - Name: "awssdk.restjson.UnmarshalMeta", - Fn: UnmarshalMeta, -} - -// Build builds a request for the REST JSON protocol. -func Build(r *request.Request) { - rest.Build(r) - - if t := rest.PayloadType(r.Params); t == "structure" || t == "" { - if v := r.HTTPRequest.Header.Get("Content-Type"); len(v) == 0 { - r.HTTPRequest.Header.Set("Content-Type", "application/json") - } - jsonrpc.Build(r) - } -} - -// Unmarshal unmarshals a response body for the REST JSON protocol. -func Unmarshal(r *request.Request) { - if t := rest.PayloadType(r.Data); t == "structure" || t == "" { - jsonrpc.Unmarshal(r) - } else { - rest.Unmarshal(r) - } -} - -// UnmarshalMeta unmarshals response headers for the REST JSON protocol. -func UnmarshalMeta(r *request.Request) { - rest.UnmarshalMeta(r) -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go deleted file mode 100644 index d756d8cc5..000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go +++ /dev/null @@ -1,134 +0,0 @@ -package restjson - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" - "strings" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" - "github.com/aws/aws-sdk-go/private/protocol/rest" -) - -const ( - errorTypeHeader = "X-Amzn-Errortype" - errorMessageHeader = "X-Amzn-Errormessage" -) - -// UnmarshalTypedError provides unmarshaling errors API response errors -// for both typed and untyped errors. -type UnmarshalTypedError struct { - exceptions map[string]func(protocol.ResponseMetadata) error -} - -// NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the -// set of exception names to the error unmarshalers -func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError { - return &UnmarshalTypedError{ - exceptions: exceptions, - } -} - -// UnmarshalError attempts to unmarshal the HTTP response error as a known -// error type. If unable to unmarshal the error type, the generic SDK error -// type will be used. -func (u *UnmarshalTypedError) UnmarshalError( - resp *http.Response, - respMeta protocol.ResponseMetadata, -) (error, error) { - - code := resp.Header.Get(errorTypeHeader) - msg := resp.Header.Get(errorMessageHeader) - - body := resp.Body - if len(code) == 0 { - // If unable to get code from HTTP headers have to parse JSON message - // to determine what kind of exception this will be. - var buf bytes.Buffer - var jsonErr jsonErrorResponse - teeReader := io.TeeReader(resp.Body, &buf) - err := jsonutil.UnmarshalJSONError(&jsonErr, teeReader) - if err != nil { - return nil, err - } - - body = ioutil.NopCloser(&buf) - code = jsonErr.Code - msg = jsonErr.Message - } - - // If code has colon separators remove them so can compare against modeled - // exception names. - code = strings.SplitN(code, ":", 2)[0] - - if fn, ok := u.exceptions[code]; ok { - // If exception code is know, use associated constructor to get a value - // for the exception that the JSON body can be unmarshaled into. - v := fn(respMeta) - if err := jsonutil.UnmarshalJSONCaseInsensitive(v, body); err != nil { - return nil, err - } - - if err := rest.UnmarshalResponse(resp, v, true); err != nil { - return nil, err - } - - return v, nil - } - - // fallback to unmodeled generic exceptions - return awserr.NewRequestFailure( - awserr.New(code, msg, nil), - respMeta.StatusCode, - respMeta.RequestID, - ), nil -} - -// UnmarshalErrorHandler is a named request handler for unmarshaling restjson -// protocol request errors -var UnmarshalErrorHandler = request.NamedHandler{ - Name: "awssdk.restjson.UnmarshalError", - Fn: UnmarshalError, -} - -// UnmarshalError unmarshals a response error for the REST JSON protocol. -func UnmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - - var jsonErr jsonErrorResponse - err := jsonutil.UnmarshalJSONError(&jsonErr, r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to unmarshal response error", err), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } - - code := r.HTTPResponse.Header.Get(errorTypeHeader) - if code == "" { - code = jsonErr.Code - } - msg := r.HTTPResponse.Header.Get(errorMessageHeader) - if msg == "" { - msg = jsonErr.Message - } - - code = strings.SplitN(code, ":", 2)[0] - r.Error = awserr.NewRequestFailure( - awserr.New(code, jsonErr.Message, nil), - r.HTTPResponse.StatusCode, - r.RequestID, - ) -} - -type jsonErrorResponse struct { - Code string `json:"code"` - Message string `json:"message"` -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go deleted file mode 100644 index d9a4e7649..000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go +++ /dev/null @@ -1,134 +0,0 @@ -package protocol - -import ( - "bytes" - "fmt" - "math" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/internal/sdkmath" -) - -// Names of time formats supported by the SDK -const ( - RFC822TimeFormatName = "rfc822" - ISO8601TimeFormatName = "iso8601" - UnixTimeFormatName = "unixTimestamp" -) - -// Time formats supported by the SDK -// Output time is intended to not contain decimals -const ( - // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT - RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" - rfc822TimeFormatSingleDigitDay = "Mon, _2 Jan 2006 15:04:05 GMT" - rfc822TimeFormatSingleDigitDayTwoDigitYear = "Mon, _2 Jan 06 15:04:05 GMT" - - // This format is used for output time without seconds precision - RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" - - // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z - ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z" - iso8601TimeFormatNoZ = "2006-01-02T15:04:05.999999999" - - // This format is used for output time with fractional second precision up to milliseconds - ISO8601OutputTimeFormat = "2006-01-02T15:04:05.999999999Z" -) - -// IsKnownTimestampFormat returns if the timestamp format name -// is know to the SDK's protocols. -func IsKnownTimestampFormat(name string) bool { - switch name { - case RFC822TimeFormatName: - fallthrough - case ISO8601TimeFormatName: - fallthrough - case UnixTimeFormatName: - return true - default: - return false - } -} - -// FormatTime returns a string value of the time. -func FormatTime(name string, t time.Time) string { - t = t.UTC().Truncate(time.Millisecond) - - switch name { - case RFC822TimeFormatName: - return t.Format(RFC822OutputTimeFormat) - case ISO8601TimeFormatName: - return t.Format(ISO8601OutputTimeFormat) - case UnixTimeFormatName: - ms := t.UnixNano() / int64(time.Millisecond) - return strconv.FormatFloat(float64(ms)/1e3, 'f', -1, 64) - default: - panic("unknown timestamp format name, " + name) - } -} - -// ParseTime attempts to parse the time given the format. Returns -// the time if it was able to be parsed, and fails otherwise. -func ParseTime(formatName, value string) (time.Time, error) { - switch formatName { - case RFC822TimeFormatName: // Smithy HTTPDate format - return tryParse(value, - RFC822TimeFormat, - rfc822TimeFormatSingleDigitDay, - rfc822TimeFormatSingleDigitDayTwoDigitYear, - time.RFC850, - time.ANSIC, - ) - case ISO8601TimeFormatName: // Smithy DateTime format - return tryParse(value, - ISO8601TimeFormat, - iso8601TimeFormatNoZ, - time.RFC3339Nano, - time.RFC3339, - ) - case UnixTimeFormatName: - v, err := strconv.ParseFloat(value, 64) - _, dec := math.Modf(v) - dec = sdkmath.Round(dec*1e3) / 1e3 //Rounds 0.1229999 to 0.123 - if err != nil { - return time.Time{}, err - } - return time.Unix(int64(v), int64(dec*(1e9))), nil - default: - panic("unknown timestamp format name, " + formatName) - } -} - -func tryParse(v string, formats ...string) (time.Time, error) { - var errs parseErrors - for _, f := range formats { - t, err := time.Parse(f, v) - if err != nil { - errs = append(errs, parseError{ - Format: f, - Err: err, - }) - continue - } - return t, nil - } - - return time.Time{}, fmt.Errorf("unable to parse time string, %v", errs) -} - -type parseErrors []parseError - -func (es parseErrors) Error() string { - var s bytes.Buffer - for _, e := range es { - fmt.Fprintf(&s, "\n * %q: %v", e.Format, e.Err) - } - - return "parse errors:" + s.String() -} - -type parseError struct { - Format string - Err error -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go deleted file mode 100644 index f614ef898..000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go +++ /dev/null @@ -1,27 +0,0 @@ -package protocol - -import ( - "io" - "io/ioutil" - - "github.com/aws/aws-sdk-go/aws/request" -) - -// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body -var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} - -// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. -func UnmarshalDiscardBody(r *request.Request) { - if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { - return - } - - io.Copy(ioutil.Discard, r.HTTPResponse.Body) - r.HTTPResponse.Body.Close() -} - -// ResponseMetadata provides the SDK response metadata attributes. -type ResponseMetadata struct { - StatusCode int - RequestID string -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go deleted file mode 100644 index cc857f136..000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go +++ /dev/null @@ -1,65 +0,0 @@ -package protocol - -import ( - "net/http" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// UnmarshalErrorHandler provides unmarshaling errors API response errors for -// both typed and untyped errors. -type UnmarshalErrorHandler struct { - unmarshaler ErrorUnmarshaler -} - -// ErrorUnmarshaler is an abstract interface for concrete implementations to -// unmarshal protocol specific response errors. -type ErrorUnmarshaler interface { - UnmarshalError(*http.Response, ResponseMetadata) (error, error) -} - -// NewUnmarshalErrorHandler returns an UnmarshalErrorHandler -// initialized for the set of exception names to the error unmarshalers -func NewUnmarshalErrorHandler(unmarshaler ErrorUnmarshaler) *UnmarshalErrorHandler { - return &UnmarshalErrorHandler{ - unmarshaler: unmarshaler, - } -} - -// UnmarshalErrorHandlerName is the name of the named handler. -const UnmarshalErrorHandlerName = "awssdk.protocol.UnmarshalError" - -// NamedHandler returns a NamedHandler for the unmarshaler using the set of -// errors the unmarshaler was initialized for. -func (u *UnmarshalErrorHandler) NamedHandler() request.NamedHandler { - return request.NamedHandler{ - Name: UnmarshalErrorHandlerName, - Fn: u.UnmarshalError, - } -} - -// UnmarshalError will attempt to unmarshal the API response's error message -// into either a generic SDK error type, or a typed error corresponding to the -// errors exception name. -func (u *UnmarshalErrorHandler) UnmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - - respMeta := ResponseMetadata{ - StatusCode: r.HTTPResponse.StatusCode, - RequestID: r.RequestID, - } - - v, err := u.unmarshaler.UnmarshalError(r.HTTPResponse, respMeta) - if err != nil { - r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to unmarshal response error", err), - respMeta.StatusCode, - respMeta.RequestID, - ) - return - } - - r.Error = v -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go deleted file mode 100644 index 2fbb93ae7..000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go +++ /dev/null @@ -1,317 +0,0 @@ -// Package xmlutil provides XML serialization of AWS requests and responses. -package xmlutil - -import ( - "encoding/base64" - "encoding/xml" - "fmt" - "reflect" - "sort" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/private/protocol" -) - -// BuildXML will serialize params into an xml.Encoder. Error will be returned -// if the serialization of any of the params or nested values fails. -func BuildXML(params interface{}, e *xml.Encoder) error { - return buildXML(params, e, false) -} - -func buildXML(params interface{}, e *xml.Encoder, sorted bool) error { - b := xmlBuilder{encoder: e, namespaces: map[string]string{}} - root := NewXMLElement(xml.Name{}) - if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { - return err - } - for _, c := range root.Children { - for _, v := range c { - return StructToXML(e, v, sorted) - } - } - return nil -} - -// Returns the reflection element of a value, if it is a pointer. -func elemOf(value reflect.Value) reflect.Value { - for value.Kind() == reflect.Ptr { - value = value.Elem() - } - return value -} - -// A xmlBuilder serializes values from Go code to XML -type xmlBuilder struct { - encoder *xml.Encoder - namespaces map[string]string -} - -// buildValue generic XMLNode builder for any type. Will build value for their specific type -// struct, list, map, scalar. -// -// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If -// type is not provided reflect will be used to determine the value's type. -func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - value = elemOf(value) - if !value.IsValid() { // no need to handle zero values - return nil - } else if tag.Get("location") != "" { // don't handle non-body location values - return nil - } - - xml := tag.Get("xml") - if len(xml) != 0 { - name := strings.SplitAfterN(xml, ",", 2)[0] - if name == "-" { - return nil - } - } - - t := tag.Get("type") - if t == "" { - switch value.Kind() { - case reflect.Struct: - t = "structure" - case reflect.Slice: - t = "list" - case reflect.Map: - t = "map" - } - } - - switch t { - case "structure": - if field, ok := value.Type().FieldByName("_"); ok { - tag = tag + reflect.StructTag(" ") + field.Tag - } - return b.buildStruct(value, current, tag) - case "list": - return b.buildList(value, current, tag) - case "map": - return b.buildMap(value, current, tag) - default: - return b.buildScalar(value, current, tag) - } -} - -// buildStruct adds a struct and its fields to the current XMLNode. All fields and any nested -// types are converted to XMLNodes also. -func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - if !value.IsValid() { - return nil - } - - // unwrap payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := value.Type().FieldByName(payload) - tag = field.Tag - value = elemOf(value.FieldByName(payload)) - - if !value.IsValid() { - return nil - } - } - - child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) - - // there is an xmlNamespace associated with this struct - if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { - ns := xml.Attr{ - Name: xml.Name{Local: "xmlns"}, - Value: uri, - } - if prefix != "" { - b.namespaces[prefix] = uri // register the namespace - ns.Name.Local = "xmlns:" + prefix - } - - child.Attr = append(child.Attr, ns) - } - - var payloadFields, nonPayloadFields int - - t := value.Type() - for i := 0; i < value.NumField(); i++ { - member := elemOf(value.Field(i)) - field := t.Field(i) - - if field.PkgPath != "" { - continue // ignore unexported fields - } - if field.Tag.Get("ignore") != "" { - continue - } - - mTag := field.Tag - if mTag.Get("location") != "" { // skip non-body members - nonPayloadFields++ - continue - } - payloadFields++ - - if protocol.CanSetIdempotencyToken(value.Field(i), field) { - token := protocol.GetIdempotencyToken() - member = reflect.ValueOf(token) - } - - memberName := mTag.Get("locationName") - if memberName == "" { - memberName = field.Name - mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) - } - if err := b.buildValue(member, child, mTag); err != nil { - return err - } - } - - // Only case where the child shape is not added is if the shape only contains - // non-payload fields, e.g headers/query. - if !(payloadFields == 0 && nonPayloadFields > 0) { - current.AddChild(child) - } - - return nil -} - -// buildList adds the value's list items to the current XMLNode as children nodes. All -// nested values in the list are converted to XMLNodes also. -func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - if value.IsNil() { // don't build omitted lists - return nil - } - - // check for unflattened list member - flattened := tag.Get("flattened") != "" - - xname := xml.Name{Local: tag.Get("locationName")} - if flattened { - for i := 0; i < value.Len(); i++ { - child := NewXMLElement(xname) - current.AddChild(child) - if err := b.buildValue(value.Index(i), child, ""); err != nil { - return err - } - } - } else { - list := NewXMLElement(xname) - current.AddChild(list) - - for i := 0; i < value.Len(); i++ { - iname := tag.Get("locationNameList") - if iname == "" { - iname = "member" - } - - child := NewXMLElement(xml.Name{Local: iname}) - list.AddChild(child) - if err := b.buildValue(value.Index(i), child, ""); err != nil { - return err - } - } - } - - return nil -} - -// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All -// nested values in the map are converted to XMLNodes also. -// -// Error will be returned if it is unable to build the map's values into XMLNodes -func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - if value.IsNil() { // don't build omitted maps - return nil - } - - maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) - current.AddChild(maproot) - current = maproot - - kname, vname := "key", "value" - if n := tag.Get("locationNameKey"); n != "" { - kname = n - } - if n := tag.Get("locationNameValue"); n != "" { - vname = n - } - - // sorting is not required for compliance, but it makes testing easier - keys := make([]string, value.Len()) - for i, k := range value.MapKeys() { - keys[i] = k.String() - } - sort.Strings(keys) - - for _, k := range keys { - v := value.MapIndex(reflect.ValueOf(k)) - - mapcur := current - if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps - child := NewXMLElement(xml.Name{Local: "entry"}) - mapcur.AddChild(child) - mapcur = child - } - - kchild := NewXMLElement(xml.Name{Local: kname}) - kchild.Text = k - vchild := NewXMLElement(xml.Name{Local: vname}) - mapcur.AddChild(kchild) - mapcur.AddChild(vchild) - - if err := b.buildValue(v, vchild, ""); err != nil { - return err - } - } - - return nil -} - -// buildScalar will convert the value into a string and append it as a attribute or child -// of the current XMLNode. -// -// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. -// -// Error will be returned if the value type is unsupported. -func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - var str string - switch converted := value.Interface().(type) { - case string: - str = converted - case []byte: - if !value.IsNil() { - str = base64.StdEncoding.EncodeToString(converted) - } - case bool: - str = strconv.FormatBool(converted) - case int64: - str = strconv.FormatInt(converted, 10) - case int: - str = strconv.Itoa(converted) - case float64: - str = strconv.FormatFloat(converted, 'f', -1, 64) - case float32: - str = strconv.FormatFloat(float64(converted), 'f', -1, 32) - case time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.ISO8601TimeFormatName - } - - str = protocol.FormatTime(format, converted) - default: - return fmt.Errorf("unsupported value for param %s: %v (%s)", - tag.Get("locationName"), value.Interface(), value.Type().Name()) - } - - xname := xml.Name{Local: tag.Get("locationName")} - if tag.Get("xmlAttribute") != "" { // put into current node's attribute list - attr := xml.Attr{Name: xname, Value: str} - current.Attr = append(current.Attr, attr) - } else if len(xname.Local) == 0 { - current.Text = str - } else { // regular text node - current.AddChild(&XMLNode{Name: xname, Text: str}) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go deleted file mode 100644 index c1a511851..000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go +++ /dev/null @@ -1,32 +0,0 @@ -package xmlutil - -import ( - "encoding/xml" - "strings" -) - -type xmlAttrSlice []xml.Attr - -func (x xmlAttrSlice) Len() int { - return len(x) -} - -func (x xmlAttrSlice) Less(i, j int) bool { - spaceI, spaceJ := x[i].Name.Space, x[j].Name.Space - localI, localJ := x[i].Name.Local, x[j].Name.Local - valueI, valueJ := x[i].Value, x[j].Value - - spaceCmp := strings.Compare(spaceI, spaceJ) - localCmp := strings.Compare(localI, localJ) - valueCmp := strings.Compare(valueI, valueJ) - - if spaceCmp == -1 || (spaceCmp == 0 && (localCmp == -1 || (localCmp == 0 && valueCmp == -1))) { - return true - } - - return false -} - -func (x xmlAttrSlice) Swap(i, j int) { - x[i], x[j] = x[j], x[i] -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go deleted file mode 100644 index 107c053f8..000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go +++ /dev/null @@ -1,299 +0,0 @@ -package xmlutil - -import ( - "bytes" - "encoding/base64" - "encoding/xml" - "fmt" - "io" - "reflect" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/private/protocol" -) - -// UnmarshalXMLError unmarshals the XML error from the stream into the value -// type specified. The value must be a pointer. If the message fails to -// unmarshal, the message content will be included in the returned error as a -// awserr.UnmarshalError. -func UnmarshalXMLError(v interface{}, stream io.Reader) error { - var errBuf bytes.Buffer - body := io.TeeReader(stream, &errBuf) - - err := xml.NewDecoder(body).Decode(v) - if err != nil && err != io.EOF { - return awserr.NewUnmarshalError(err, - "failed to unmarshal error message", errBuf.Bytes()) - } - - return nil -} - -// UnmarshalXML deserializes an xml.Decoder into the container v. V -// needs to match the shape of the XML expected to be decoded. -// If the shape doesn't match unmarshaling will fail. -func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { - n, err := XMLToStruct(d, nil) - if err != nil { - return err - } - if n.Children != nil { - for _, root := range n.Children { - for _, c := range root { - if wrappedChild, ok := c.Children[wrapper]; ok { - c = wrappedChild[0] // pull out wrapped element - } - - err = parse(reflect.ValueOf(v), c, "") - if err != nil { - if err == io.EOF { - return nil - } - return err - } - } - } - return nil - } - return nil -} - -// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect -// will be used to determine the type from r. -func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - xml := tag.Get("xml") - if len(xml) != 0 { - name := strings.SplitAfterN(xml, ",", 2)[0] - if name == "-" { - return nil - } - } - - rtype := r.Type() - if rtype.Kind() == reflect.Ptr { - rtype = rtype.Elem() // check kind of actual element type - } - - t := tag.Get("type") - if t == "" { - switch rtype.Kind() { - case reflect.Struct: - // also it can't be a time object - if _, ok := r.Interface().(*time.Time); !ok { - t = "structure" - } - case reflect.Slice: - // also it can't be a byte slice - if _, ok := r.Interface().([]byte); !ok { - t = "list" - } - case reflect.Map: - t = "map" - } - } - - switch t { - case "structure": - if field, ok := rtype.FieldByName("_"); ok { - tag = field.Tag - } - return parseStruct(r, node, tag) - case "list": - return parseList(r, node, tag) - case "map": - return parseMap(r, node, tag) - default: - return parseScalar(r, node, tag) - } -} - -// parseStruct deserializes a structure and its fields from an XMLNode. Any nested -// types in the structure will also be deserialized. -func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - t := r.Type() - if r.Kind() == reflect.Ptr { - if r.IsNil() { // create the structure if it's nil - s := reflect.New(r.Type().Elem()) - r.Set(s) - r = s - } - - r = r.Elem() - t = t.Elem() - } - - // unwrap any payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := t.FieldByName(payload) - return parseStruct(r.FieldByName(payload), node, field.Tag) - } - - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - if c := field.Name[0:1]; strings.ToLower(c) == c { - continue // ignore unexported fields - } - - // figure out what this field is called - name := field.Name - if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { - name = field.Tag.Get("locationNameList") - } else if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - - // try to find the field by name in elements - elems := node.Children[name] - - if elems == nil { // try to find the field in attributes - if val, ok := node.findElem(name); ok { - elems = []*XMLNode{{Text: val}} - } - } - - member := r.FieldByName(field.Name) - for _, elem := range elems { - err := parse(member, elem, field.Tag) - if err != nil { - return err - } - } - } - return nil -} - -// parseList deserializes a list of values from an XML node. Each list entry -// will also be deserialized. -func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - t := r.Type() - - if tag.Get("flattened") == "" { // look at all item entries - mname := "member" - if name := tag.Get("locationNameList"); name != "" { - mname = name - } - - if Children, ok := node.Children[mname]; ok { - if r.IsNil() { - r.Set(reflect.MakeSlice(t, len(Children), len(Children))) - } - - for i, c := range Children { - err := parse(r.Index(i), c, "") - if err != nil { - return err - } - } - } - } else { // flattened list means this is a single element - if r.IsNil() { - r.Set(reflect.MakeSlice(t, 0, 0)) - } - - childR := reflect.Zero(t.Elem()) - r.Set(reflect.Append(r, childR)) - err := parse(r.Index(r.Len()-1), node, "") - if err != nil { - return err - } - } - - return nil -} - -// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode -// will also be deserialized as map entries. -func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - if r.IsNil() { - r.Set(reflect.MakeMap(r.Type())) - } - - if tag.Get("flattened") == "" { // look at all child entries - for _, entry := range node.Children["entry"] { - parseMapEntry(r, entry, tag) - } - } else { // this element is itself an entry - parseMapEntry(r, node, tag) - } - - return nil -} - -// parseMapEntry deserializes a map entry from a XML node. -func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - kname, vname := "key", "value" - if n := tag.Get("locationNameKey"); n != "" { - kname = n - } - if n := tag.Get("locationNameValue"); n != "" { - vname = n - } - - keys, ok := node.Children[kname] - values := node.Children[vname] - if ok { - for i, key := range keys { - keyR := reflect.ValueOf(key.Text) - value := values[i] - valueR := reflect.New(r.Type().Elem()).Elem() - - parse(valueR, value, "") - r.SetMapIndex(keyR, valueR) - } - } - return nil -} - -// parseScaller deserializes an XMLNode value into a concrete type based on the -// interface type of r. -// -// Error is returned if the deserialization fails due to invalid type conversion, -// or unsupported interface type. -func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - switch r.Interface().(type) { - case *string: - r.Set(reflect.ValueOf(&node.Text)) - return nil - case []byte: - b, err := base64.StdEncoding.DecodeString(node.Text) - if err != nil { - return err - } - r.Set(reflect.ValueOf(b)) - case *bool: - v, err := strconv.ParseBool(node.Text) - if err != nil { - return err - } - r.Set(reflect.ValueOf(&v)) - case *int64: - v, err := strconv.ParseInt(node.Text, 10, 64) - if err != nil { - return err - } - r.Set(reflect.ValueOf(&v)) - case *float64: - v, err := strconv.ParseFloat(node.Text, 64) - if err != nil { - return err - } - r.Set(reflect.ValueOf(&v)) - case *time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.ISO8601TimeFormatName - } - - t, err := protocol.ParseTime(format, node.Text) - if err != nil { - return err - } - r.Set(reflect.ValueOf(&t)) - default: - return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go deleted file mode 100644 index c85b79fdd..000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go +++ /dev/null @@ -1,173 +0,0 @@ -package xmlutil - -import ( - "encoding/xml" - "fmt" - "io" - "sort" -) - -// A XMLNode contains the values to be encoded or decoded. -type XMLNode struct { - Name xml.Name `json:",omitempty"` - Children map[string][]*XMLNode `json:",omitempty"` - Text string `json:",omitempty"` - Attr []xml.Attr `json:",omitempty"` - - namespaces map[string]string - parent *XMLNode -} - -// textEncoder is a string type alias that implemnts the TextMarshaler interface. -// This alias type is used to ensure that the line feed (\n) (U+000A) is escaped. -type textEncoder string - -func (t textEncoder) MarshalText() ([]byte, error) { - return []byte(t), nil -} - -// NewXMLElement returns a pointer to a new XMLNode initialized to default values. -func NewXMLElement(name xml.Name) *XMLNode { - return &XMLNode{ - Name: name, - Children: map[string][]*XMLNode{}, - Attr: []xml.Attr{}, - } -} - -// AddChild adds child to the XMLNode. -func (n *XMLNode) AddChild(child *XMLNode) { - child.parent = n - if _, ok := n.Children[child.Name.Local]; !ok { - n.Children[child.Name.Local] = []*XMLNode{} - } - n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) -} - -// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. -func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { - out := &XMLNode{} - for { - tok, err := d.Token() - if err != nil { - if err == io.EOF { - break - } else { - return out, err - } - } - - if tok == nil { - break - } - - switch typed := tok.(type) { - case xml.CharData: - out.Text = string(typed.Copy()) - case xml.StartElement: - el := typed.Copy() - out.Attr = el.Attr - if out.Children == nil { - out.Children = map[string][]*XMLNode{} - } - - name := typed.Name.Local - slice := out.Children[name] - if slice == nil { - slice = []*XMLNode{} - } - node, e := XMLToStruct(d, &el) - out.findNamespaces() - if e != nil { - return out, e - } - node.Name = typed.Name - node.findNamespaces() - tempOut := *out - // Save into a temp variable, simply because out gets squashed during - // loop iterations - node.parent = &tempOut - slice = append(slice, node) - out.Children[name] = slice - case xml.EndElement: - if s != nil && s.Name.Local == typed.Name.Local { // matching end token - return out, nil - } - out = &XMLNode{} - } - } - return out, nil -} - -func (n *XMLNode) findNamespaces() { - ns := map[string]string{} - for _, a := range n.Attr { - if a.Name.Space == "xmlns" { - ns[a.Value] = a.Name.Local - } - } - - n.namespaces = ns -} - -func (n *XMLNode) findElem(name string) (string, bool) { - for node := n; node != nil; node = node.parent { - for _, a := range node.Attr { - namespace := a.Name.Space - if v, ok := node.namespaces[namespace]; ok { - namespace = v - } - if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) { - return a.Value, true - } - } - } - return "", false -} - -// StructToXML writes an XMLNode to a xml.Encoder as tokens. -func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { - // Sort Attributes - attrs := node.Attr - if sorted { - sortedAttrs := make([]xml.Attr, len(attrs)) - for _, k := range node.Attr { - sortedAttrs = append(sortedAttrs, k) - } - sort.Sort(xmlAttrSlice(sortedAttrs)) - attrs = sortedAttrs - } - - startElement := xml.StartElement{Name: node.Name, Attr: attrs} - - if node.Text != "" { - e.EncodeElement(textEncoder(node.Text), startElement) - return e.Flush() - } - - e.EncodeToken(startElement) - - if sorted { - sortedNames := []string{} - for k := range node.Children { - sortedNames = append(sortedNames, k) - } - sort.Strings(sortedNames) - - for _, k := range sortedNames { - for _, v := range node.Children[k] { - StructToXML(e, v, sorted) - } - } - } else { - for _, c := range node.Children { - for _, v := range c { - StructToXML(e, v, sorted) - } - } - } - - e.EncodeToken(startElement.End()) - - return e.Flush() -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/api.go b/vendor/github.com/aws/aws-sdk-go/service/kms/api.go deleted file mode 100644 index a55357d29..000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/kms/api.go +++ /dev/null @@ -1,18095 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package kms - -import ( - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" -) - -const opCancelKeyDeletion = "CancelKeyDeletion" - -// CancelKeyDeletionRequest generates a "aws/request.Request" representing the -// client's request for the CancelKeyDeletion operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CancelKeyDeletion for more information on using the CancelKeyDeletion -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CancelKeyDeletionRequest method. -// req, resp := client.CancelKeyDeletionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CancelKeyDeletion -func (c *KMS) CancelKeyDeletionRequest(input *CancelKeyDeletionInput) (req *request.Request, output *CancelKeyDeletionOutput) { - op := &request.Operation{ - Name: opCancelKeyDeletion, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CancelKeyDeletionInput{} - } - - output = &CancelKeyDeletionOutput{} - req = c.newRequest(op, input, output) - return -} - -// CancelKeyDeletion API operation for AWS Key Management Service. -// -// Cancels the deletion of a KMS key. When this operation succeeds, the key -// state of the KMS key is Disabled. To enable the KMS key, use EnableKey. -// -// For more information about scheduling and canceling deletion of a KMS key, -// see Deleting KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html) -// in the Key Management Service Developer Guide. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:CancelKeyDeletion (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: ScheduleKeyDeletion -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation CancelKeyDeletion for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CancelKeyDeletion -func (c *KMS) CancelKeyDeletion(input *CancelKeyDeletionInput) (*CancelKeyDeletionOutput, error) { - req, out := c.CancelKeyDeletionRequest(input) - return out, req.Send() -} - -// CancelKeyDeletionWithContext is the same as CancelKeyDeletion with the addition of -// the ability to pass a context and additional request options. -// -// See CancelKeyDeletion for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) CancelKeyDeletionWithContext(ctx aws.Context, input *CancelKeyDeletionInput, opts ...request.Option) (*CancelKeyDeletionOutput, error) { - req, out := c.CancelKeyDeletionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opConnectCustomKeyStore = "ConnectCustomKeyStore" - -// ConnectCustomKeyStoreRequest generates a "aws/request.Request" representing the -// client's request for the ConnectCustomKeyStore operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ConnectCustomKeyStore for more information on using the ConnectCustomKeyStore -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ConnectCustomKeyStoreRequest method. -// req, resp := client.ConnectCustomKeyStoreRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ConnectCustomKeyStore -func (c *KMS) ConnectCustomKeyStoreRequest(input *ConnectCustomKeyStoreInput) (req *request.Request, output *ConnectCustomKeyStoreOutput) { - op := &request.Operation{ - Name: opConnectCustomKeyStore, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ConnectCustomKeyStoreInput{} - } - - output = &ConnectCustomKeyStoreOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// ConnectCustomKeyStore API operation for AWS Key Management Service. -// -// Connects or reconnects a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) -// to its associated CloudHSM cluster. -// -// The custom key store must be connected before you can create KMS keys in -// the key store or use the KMS keys it contains. You can disconnect and reconnect -// a custom key store at any time. -// -// To connect a custom key store, its associated CloudHSM cluster must have -// at least one active HSM. To get the number of active HSMs in a cluster, use -// the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) -// operation. To add HSMs to the cluster, use the CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) -// operation. Also, the kmsuser crypto user (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser) -// (CU) must not be logged into the cluster. This prevents KMS from using this -// account to log in. -// -// The connection process can take an extended amount of time to complete; up -// to 20 minutes. This operation starts the connection process, but it does -// not wait for it to complete. When it succeeds, this operation quickly returns -// an HTTP 200 response and a JSON object with no properties. However, this -// response does not indicate that the custom key store is connected. To get -// the connection state of the custom key store, use the DescribeCustomKeyStores -// operation. -// -// During the connection process, KMS finds the CloudHSM cluster that is associated -// with the custom key store, creates the connection infrastructure, connects -// to the cluster, logs into the CloudHSM client as the kmsuser CU, and rotates -// its password. -// -// The ConnectCustomKeyStore operation might fail for various reasons. To find -// the reason, use the DescribeCustomKeyStores operation and see the ConnectionErrorCode -// in the response. For help interpreting the ConnectionErrorCode, see CustomKeyStoresListEntry. -// -// To fix the failure, use the DisconnectCustomKeyStore operation to disconnect -// the custom key store, correct the error, use the UpdateCustomKeyStore operation -// if necessary, and then use ConnectCustomKeyStore again. -// -// If you are having trouble connecting or disconnecting a custom key store, -// see Troubleshooting a Custom Key Store (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a custom key -// store in a different Amazon Web Services account. -// -// Required permissions: kms:ConnectCustomKeyStore (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (IAM policy) -// -// Related operations -// -// * CreateCustomKeyStore -// -// * DeleteCustomKeyStore -// -// * DescribeCustomKeyStores -// -// * DisconnectCustomKeyStore -// -// * UpdateCustomKeyStore -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation ConnectCustomKeyStore for usage and error information. -// -// Returned Error Types: -// * CloudHsmClusterNotActiveException -// The request was rejected because the CloudHSM cluster that is associated -// with the custom key store is not active. Initialize and activate the cluster -// and try the command again. For detailed instructions, see Getting Started -// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html) -// in the CloudHSM User Guide. -// -// * CustomKeyStoreInvalidStateException -// The request was rejected because of the ConnectionState of the custom key -// store. To get the ConnectionState of a custom key store, use the DescribeCustomKeyStores -// operation. -// -// This exception is thrown under the following conditions: -// -// * You requested the CreateKey or GenerateRandom operation in a custom -// key store that is not connected. These operations are valid only when -// the custom key store ConnectionState is CONNECTED. -// -// * You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation -// on a custom key store that is not disconnected. This operation is valid -// only when the custom key store ConnectionState is DISCONNECTED. -// -// * You requested the ConnectCustomKeyStore operation on a custom key store -// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid -// for all other ConnectionState values. -// -// * CustomKeyStoreNotFoundException -// The request was rejected because KMS cannot find a custom key store with -// the specified key store name or ID. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * CloudHsmClusterInvalidConfigurationException -// The request was rejected because the associated CloudHSM cluster did not -// meet the configuration requirements for a custom key store. -// -// * The cluster must be configured with private subnets in at least two -// different Availability Zones in the Region. -// -// * The security group for the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) -// (cloudhsm-cluster--sg) must include inbound rules and outbound -// rules that allow TCP traffic on ports 2223-2225. The Source in the inbound -// rules and the Destination in the outbound rules must match the security -// group ID. These rules are set by default when you create the cluster. -// Do not delete or change them. To get information about a particular security -// group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html) -// operation. -// -// * The cluster must contain at least as many HSMs as the operation requires. -// To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) -// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey -// operations, the CloudHSM cluster must have at least two active HSMs, each -// in a different Availability Zone. For the ConnectCustomKeyStore operation, -// the CloudHSM must contain at least one active HSM. -// -// For information about the requirements for an CloudHSM cluster that is associated -// with a custom key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore) -// in the Key Management Service Developer Guide. For information about creating -// a private subnet for an CloudHSM cluster, see Create a Private Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html) -// in the CloudHSM User Guide. For information about cluster security groups, -// see Configure a Default Security Group (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) -// in the CloudHSM User Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ConnectCustomKeyStore -func (c *KMS) ConnectCustomKeyStore(input *ConnectCustomKeyStoreInput) (*ConnectCustomKeyStoreOutput, error) { - req, out := c.ConnectCustomKeyStoreRequest(input) - return out, req.Send() -} - -// ConnectCustomKeyStoreWithContext is the same as ConnectCustomKeyStore with the addition of -// the ability to pass a context and additional request options. -// -// See ConnectCustomKeyStore for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) ConnectCustomKeyStoreWithContext(ctx aws.Context, input *ConnectCustomKeyStoreInput, opts ...request.Option) (*ConnectCustomKeyStoreOutput, error) { - req, out := c.ConnectCustomKeyStoreRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateAlias = "CreateAlias" - -// CreateAliasRequest generates a "aws/request.Request" representing the -// client's request for the CreateAlias operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateAlias for more information on using the CreateAlias -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateAliasRequest method. -// req, resp := client.CreateAliasRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateAlias -func (c *KMS) CreateAliasRequest(input *CreateAliasInput) (req *request.Request, output *CreateAliasOutput) { - op := &request.Operation{ - Name: opCreateAlias, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateAliasInput{} - } - - output = &CreateAliasOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// CreateAlias API operation for AWS Key Management Service. -// -// Creates a friendly name for a KMS key. -// -// Adding, deleting, or updating an alias can allow or deny permission to the -// KMS key. For details, see Using ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) -// in the Key Management Service Developer Guide. -// -// You can use an alias to identify a KMS key in the KMS console, in the DescribeKey -// operation and in cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations), -// such as Encrypt and GenerateDataKey. You can also change the KMS key that's -// associated with the alias (UpdateAlias) or delete the alias (DeleteAlias) -// at any time. These operations don't affect the underlying KMS key. -// -// You can associate the alias with any customer managed key in the same Amazon -// Web Services Region. Each alias is associated with only one KMS key at a -// time, but a KMS key can have multiple aliases. A valid KMS key is required. -// You can't create an alias without a KMS key. -// -// The alias must be unique in the account and Region, but you can have aliases -// with the same name in different Regions. For detailed information about aliases, -// see Using aliases (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html) -// in the Key Management Service Developer Guide. -// -// This operation does not return a response. To get the alias that you created, -// use the ListAliases operation. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on an alias in a -// different Amazon Web Services account. -// -// Required permissions -// -// * kms:CreateAlias (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// on the alias (IAM policy). -// -// * kms:CreateAlias (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// on the KMS key (key policy). -// -// For details, see Controlling access to aliases (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access) -// in the Key Management Service Developer Guide. -// -// Related operations: -// -// * DeleteAlias -// -// * ListAliases -// -// * UpdateAlias -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation CreateAlias for usage and error information. -// -// Returned Error Types: -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * AlreadyExistsException -// The request was rejected because it attempted to create a resource that already -// exists. -// -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InvalidAliasNameException -// The request was rejected because the specified alias name is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * LimitExceededException -// The request was rejected because a quota was exceeded. For more information, -// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) -// in the Key Management Service Developer Guide. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateAlias -func (c *KMS) CreateAlias(input *CreateAliasInput) (*CreateAliasOutput, error) { - req, out := c.CreateAliasRequest(input) - return out, req.Send() -} - -// CreateAliasWithContext is the same as CreateAlias with the addition of -// the ability to pass a context and additional request options. -// -// See CreateAlias for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) CreateAliasWithContext(ctx aws.Context, input *CreateAliasInput, opts ...request.Option) (*CreateAliasOutput, error) { - req, out := c.CreateAliasRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateCustomKeyStore = "CreateCustomKeyStore" - -// CreateCustomKeyStoreRequest generates a "aws/request.Request" representing the -// client's request for the CreateCustomKeyStore operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateCustomKeyStore for more information on using the CreateCustomKeyStore -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateCustomKeyStoreRequest method. -// req, resp := client.CreateCustomKeyStoreRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateCustomKeyStore -func (c *KMS) CreateCustomKeyStoreRequest(input *CreateCustomKeyStoreInput) (req *request.Request, output *CreateCustomKeyStoreOutput) { - op := &request.Operation{ - Name: opCreateCustomKeyStore, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateCustomKeyStoreInput{} - } - - output = &CreateCustomKeyStoreOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateCustomKeyStore API operation for AWS Key Management Service. -// -// Creates a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) -// that is associated with an CloudHSM cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/clusters.html) -// that you own and manage. -// -// This operation is part of the Custom Key Store feature (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) -// feature in KMS, which combines the convenience and extensive integration -// of KMS with the isolation and control of a single-tenant key store. -// -// Before you create the custom key store, you must assemble the required elements, -// including an CloudHSM cluster that fulfills the requirements for a custom -// key store. For details about the required elements, see Assemble the Prerequisites -// (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore) -// in the Key Management Service Developer Guide. -// -// When the operation completes successfully, it returns the ID of the new custom -// key store. Before you can use your new custom key store, you need to use -// the ConnectCustomKeyStore operation to connect the new key store to its CloudHSM -// cluster. Even if you are not going to use your custom key store immediately, -// you might want to connect it to verify that all settings are correct and -// then disconnect it until you are ready to use it. -// -// For help with failures, see Troubleshooting a Custom Key Store (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a custom key -// store in a different Amazon Web Services account. -// -// Required permissions: kms:CreateCustomKeyStore (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (IAM policy). -// -// Related operations: -// -// * ConnectCustomKeyStore -// -// * DeleteCustomKeyStore -// -// * DescribeCustomKeyStores -// -// * DisconnectCustomKeyStore -// -// * UpdateCustomKeyStore -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation CreateCustomKeyStore for usage and error information. -// -// Returned Error Types: -// * CloudHsmClusterInUseException -// The request was rejected because the specified CloudHSM cluster is already -// associated with a custom key store or it shares a backup history with a cluster -// that is associated with a custom key store. Each custom key store must be -// associated with a different CloudHSM cluster. -// -// Clusters that share a backup history have the same cluster certificate. To -// view the cluster certificate of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) -// operation. -// -// * CustomKeyStoreNameInUseException -// The request was rejected because the specified custom key store name is already -// assigned to another custom key store in the account. Try again with a custom -// key store name that is unique in the account. -// -// * CloudHsmClusterNotFoundException -// The request was rejected because KMS cannot find the CloudHSM cluster with -// the specified cluster ID. Retry the request with a different cluster ID. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * CloudHsmClusterNotActiveException -// The request was rejected because the CloudHSM cluster that is associated -// with the custom key store is not active. Initialize and activate the cluster -// and try the command again. For detailed instructions, see Getting Started -// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html) -// in the CloudHSM User Guide. -// -// * IncorrectTrustAnchorException -// The request was rejected because the trust anchor certificate in the request -// is not the trust anchor certificate for the specified CloudHSM cluster. -// -// When you initialize the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr), -// you create the trust anchor certificate and save it in the customerCA.crt -// file. -// -// * CloudHsmClusterInvalidConfigurationException -// The request was rejected because the associated CloudHSM cluster did not -// meet the configuration requirements for a custom key store. -// -// * The cluster must be configured with private subnets in at least two -// different Availability Zones in the Region. -// -// * The security group for the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) -// (cloudhsm-cluster--sg) must include inbound rules and outbound -// rules that allow TCP traffic on ports 2223-2225. The Source in the inbound -// rules and the Destination in the outbound rules must match the security -// group ID. These rules are set by default when you create the cluster. -// Do not delete or change them. To get information about a particular security -// group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html) -// operation. -// -// * The cluster must contain at least as many HSMs as the operation requires. -// To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) -// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey -// operations, the CloudHSM cluster must have at least two active HSMs, each -// in a different Availability Zone. For the ConnectCustomKeyStore operation, -// the CloudHSM must contain at least one active HSM. -// -// For information about the requirements for an CloudHSM cluster that is associated -// with a custom key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore) -// in the Key Management Service Developer Guide. For information about creating -// a private subnet for an CloudHSM cluster, see Create a Private Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html) -// in the CloudHSM User Guide. For information about cluster security groups, -// see Configure a Default Security Group (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) -// in the CloudHSM User Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateCustomKeyStore -func (c *KMS) CreateCustomKeyStore(input *CreateCustomKeyStoreInput) (*CreateCustomKeyStoreOutput, error) { - req, out := c.CreateCustomKeyStoreRequest(input) - return out, req.Send() -} - -// CreateCustomKeyStoreWithContext is the same as CreateCustomKeyStore with the addition of -// the ability to pass a context and additional request options. -// -// See CreateCustomKeyStore for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) CreateCustomKeyStoreWithContext(ctx aws.Context, input *CreateCustomKeyStoreInput, opts ...request.Option) (*CreateCustomKeyStoreOutput, error) { - req, out := c.CreateCustomKeyStoreRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateGrant = "CreateGrant" - -// CreateGrantRequest generates a "aws/request.Request" representing the -// client's request for the CreateGrant operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateGrant for more information on using the CreateGrant -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateGrantRequest method. -// req, resp := client.CreateGrantRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateGrant -func (c *KMS) CreateGrantRequest(input *CreateGrantInput) (req *request.Request, output *CreateGrantOutput) { - op := &request.Operation{ - Name: opCreateGrant, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateGrantInput{} - } - - output = &CreateGrantOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateGrant API operation for AWS Key Management Service. -// -// Adds a grant to a KMS key. -// -// A grant is a policy instrument that allows Amazon Web Services principals -// to use KMS keys in cryptographic operations. It also can allow them to view -// a KMS key (DescribeKey) and create and manage grants. When authorizing access -// to a KMS key, grants are considered along with key policies and IAM policies. -// Grants are often used for temporary permissions because you can create one, -// use its permissions, and delete it without changing your key policies or -// IAM policies. -// -// For detailed information about grants, including grant terminology, see Using -// grants (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) -// in the Key Management Service Developer Guide . For examples of working with -// grants in several programming languages, see Programming grants (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html). -// -// The CreateGrant operation returns a GrantToken and a GrantId. -// -// * When you create, retire, or revoke a grant, there might be a brief delay, -// usually less than five minutes, until the grant is available throughout -// KMS. This state is known as eventual consistency. Once the grant has achieved -// eventual consistency, the grantee principal can use the permissions in -// the grant without identifying the grant. However, to use the permissions -// in the grant immediately, use the GrantToken that CreateGrant returns. -// For details, see Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) -// in the Key Management Service Developer Guide . -// -// * The CreateGrant operation also returns a GrantId. You can use the GrantId -// and a key identifier to identify the grant in the RetireGrant and RevokeGrant -// operations. To find the grant ID, use the ListGrants or ListRetirableGrants -// operations. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: Yes. To perform this operation on a KMS key in a different -// Amazon Web Services account, specify the key ARN in the value of the KeyId -// parameter. -// -// Required permissions: kms:CreateGrant (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * ListGrants -// -// * ListRetirableGrants -// -// * RetireGrant -// -// * RevokeGrant -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation CreateGrant for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidGrantTokenException -// The request was rejected because the specified grant token is not valid. -// -// * LimitExceededException -// The request was rejected because a quota was exceeded. For more information, -// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) -// in the Key Management Service Developer Guide. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateGrant -func (c *KMS) CreateGrant(input *CreateGrantInput) (*CreateGrantOutput, error) { - req, out := c.CreateGrantRequest(input) - return out, req.Send() -} - -// CreateGrantWithContext is the same as CreateGrant with the addition of -// the ability to pass a context and additional request options. -// -// See CreateGrant for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) CreateGrantWithContext(ctx aws.Context, input *CreateGrantInput, opts ...request.Option) (*CreateGrantOutput, error) { - req, out := c.CreateGrantRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateKey = "CreateKey" - -// CreateKeyRequest generates a "aws/request.Request" representing the -// client's request for the CreateKey operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateKey for more information on using the CreateKey -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateKeyRequest method. -// req, resp := client.CreateKeyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateKey -func (c *KMS) CreateKeyRequest(input *CreateKeyInput) (req *request.Request, output *CreateKeyOutput) { - op := &request.Operation{ - Name: opCreateKey, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateKeyInput{} - } - - output = &CreateKeyOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateKey API operation for AWS Key Management Service. -// -// Creates a unique customer managed KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms-keys) -// in your Amazon Web Services account and Region. -// -// KMS is replacing the term customer master key (CMK) with KMS key and KMS -// key. The concept has not changed. To prevent breaking changes, KMS is keeping -// some variations of this term. -// -// You can use the CreateKey operation to create symmetric or asymmetric KMS -// keys. -// -// * Symmetric KMS keys contain a 256-bit symmetric key that never leaves -// KMS unencrypted. To use the KMS key, you must call KMS. You can use a -// symmetric KMS key to encrypt and decrypt small amounts of data, but they -// are typically used to generate data keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-keys) -// and data keys pairs (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-key-pairs). -// For details, see GenerateDataKey and GenerateDataKeyPair. -// -// * Asymmetric KMS keys can contain an RSA key pair or an Elliptic Curve -// (ECC) key pair. The private key in an asymmetric KMS key never leaves -// KMS unencrypted. However, you can use the GetPublicKey operation to download -// the public key so it can be used outside of KMS. KMS keys with RSA key -// pairs can be used to encrypt or decrypt data or sign and verify messages -// (but not both). KMS keys with ECC key pairs can be used only to sign and -// verify messages. -// -// For information about symmetric and asymmetric KMS keys, see Using Symmetric -// and Asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) -// in the Key Management Service Developer Guide. -// -// To create different types of KMS keys, use the following guidance: -// -// Asymmetric KMS keys -// -// To create an asymmetric KMS key, use the KeySpec parameter to specify the -// type of key material in the KMS key. Then, use the KeyUsage parameter to -// determine whether the KMS key will be used to encrypt and decrypt or sign -// and verify. You can't change these properties after the KMS key is created. -// -// Symmetric KMS keys -// -// When creating a symmetric KMS key, you don't need to specify the KeySpec -// or KeyUsage parameters. The default value for KeySpec, SYMMETRIC_DEFAULT, -// and the default value for KeyUsage, ENCRYPT_DECRYPT, are the only valid values -// for symmetric KMS keys. -// -// Multi-Region primary keys -// -// Imported key material -// -// To create a multi-Region primary key in the local Amazon Web Services Region, -// use the MultiRegion parameter with a value of True. To create a multi-Region -// replica key, that is, a KMS key with the same key ID and key material as -// a primary key, but in a different Amazon Web Services Region, use the ReplicateKey -// operation. To change a replica key to a primary key, and its primary key -// to a replica key, use the UpdatePrimaryRegion operation. -// -// This operation supports multi-Region keys, an KMS feature that lets you create -// multiple interoperable KMS keys in different Amazon Web Services Regions. -// Because these KMS keys have the same key ID, key material, and other metadata, -// you can use them interchangeably to encrypt data in one Amazon Web Services -// Region and decrypt it in a different Amazon Web Services Region without re-encrypting -// the data or making a cross-Region call. For more information about multi-Region -// keys, see Using multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) -// in the Key Management Service Developer Guide. -// -// You can create symmetric and asymmetric multi-Region keys and multi-Region -// keys with imported key material. You cannot create multi-Region keys in a -// custom key store. -// -// To import your own key material, begin by creating a symmetric KMS key with -// no key material. To do this, use the Origin parameter of CreateKey with a -// value of EXTERNAL. Next, use GetParametersForImport operation to get a public -// key and import token, and use the public key to encrypt your key material. -// Then, use ImportKeyMaterial with your import token to import the key material. -// For step-by-step instructions, see Importing Key Material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) -// in the Key Management Service Developer Guide . You cannot import the key -// material into an asymmetric KMS key. -// -// To create a multi-Region primary key with imported key material, use the -// Origin parameter of CreateKey with a value of EXTERNAL and the MultiRegion -// parameter with a value of True. To create replicas of the multi-Region primary -// key, use the ReplicateKey operation. For more information about multi-Region -// keys, see Using multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) -// in the Key Management Service Developer Guide. -// -// Custom key store -// -// To create a symmetric KMS key in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html), -// use the CustomKeyStoreId parameter to specify the custom key store. You must -// also use the Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM -// cluster that is associated with the custom key store must have at least two -// active HSMs in different Availability Zones in the Amazon Web Services Region. -// -// You cannot create an asymmetric KMS key in a custom key store. For information -// about custom key stores in KMS see Using Custom Key Stores (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) -// in the Key Management Service Developer Guide . -// -// Cross-account use: No. You cannot use this operation to create a KMS key -// in a different Amazon Web Services account. -// -// Required permissions: kms:CreateKey (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (IAM policy). To use the Tags parameter, kms:TagResource (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (IAM policy). For examples and information about related permissions, see -// Allow a user to create KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/iam-policies.html#iam-policy-example-create-key) -// in the Key Management Service Developer Guide. -// -// Related operations: -// -// * DescribeKey -// -// * ListKeys -// -// * ScheduleKeyDeletion -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation CreateKey for usage and error information. -// -// Returned Error Types: -// * MalformedPolicyDocumentException -// The request was rejected because the specified policy is not syntactically -// or semantically correct. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * UnsupportedOperationException -// The request was rejected because a specified parameter is not supported or -// a specified resource is not valid for this operation. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * LimitExceededException -// The request was rejected because a quota was exceeded. For more information, -// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) -// in the Key Management Service Developer Guide. -// -// * TagException -// The request was rejected because one or more tags are not valid. -// -// * CustomKeyStoreNotFoundException -// The request was rejected because KMS cannot find a custom key store with -// the specified key store name or ID. -// -// * CustomKeyStoreInvalidStateException -// The request was rejected because of the ConnectionState of the custom key -// store. To get the ConnectionState of a custom key store, use the DescribeCustomKeyStores -// operation. -// -// This exception is thrown under the following conditions: -// -// * You requested the CreateKey or GenerateRandom operation in a custom -// key store that is not connected. These operations are valid only when -// the custom key store ConnectionState is CONNECTED. -// -// * You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation -// on a custom key store that is not disconnected. This operation is valid -// only when the custom key store ConnectionState is DISCONNECTED. -// -// * You requested the ConnectCustomKeyStore operation on a custom key store -// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid -// for all other ConnectionState values. -// -// * CloudHsmClusterInvalidConfigurationException -// The request was rejected because the associated CloudHSM cluster did not -// meet the configuration requirements for a custom key store. -// -// * The cluster must be configured with private subnets in at least two -// different Availability Zones in the Region. -// -// * The security group for the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) -// (cloudhsm-cluster--sg) must include inbound rules and outbound -// rules that allow TCP traffic on ports 2223-2225. The Source in the inbound -// rules and the Destination in the outbound rules must match the security -// group ID. These rules are set by default when you create the cluster. -// Do not delete or change them. To get information about a particular security -// group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html) -// operation. -// -// * The cluster must contain at least as many HSMs as the operation requires. -// To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) -// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey -// operations, the CloudHSM cluster must have at least two active HSMs, each -// in a different Availability Zone. For the ConnectCustomKeyStore operation, -// the CloudHSM must contain at least one active HSM. -// -// For information about the requirements for an CloudHSM cluster that is associated -// with a custom key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore) -// in the Key Management Service Developer Guide. For information about creating -// a private subnet for an CloudHSM cluster, see Create a Private Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html) -// in the CloudHSM User Guide. For information about cluster security groups, -// see Configure a Default Security Group (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) -// in the CloudHSM User Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateKey -func (c *KMS) CreateKey(input *CreateKeyInput) (*CreateKeyOutput, error) { - req, out := c.CreateKeyRequest(input) - return out, req.Send() -} - -// CreateKeyWithContext is the same as CreateKey with the addition of -// the ability to pass a context and additional request options. -// -// See CreateKey for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) CreateKeyWithContext(ctx aws.Context, input *CreateKeyInput, opts ...request.Option) (*CreateKeyOutput, error) { - req, out := c.CreateKeyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDecrypt = "Decrypt" - -// DecryptRequest generates a "aws/request.Request" representing the -// client's request for the Decrypt operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See Decrypt for more information on using the Decrypt -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DecryptRequest method. -// req, resp := client.DecryptRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/Decrypt -func (c *KMS) DecryptRequest(input *DecryptInput) (req *request.Request, output *DecryptOutput) { - op := &request.Operation{ - Name: opDecrypt, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DecryptInput{} - } - - output = &DecryptOutput{} - req = c.newRequest(op, input, output) - return -} - -// Decrypt API operation for AWS Key Management Service. -// -// Decrypts ciphertext that was encrypted by a KMS key using any of the following -// operations: -// -// * Encrypt -// -// * GenerateDataKey -// -// * GenerateDataKeyPair -// -// * GenerateDataKeyWithoutPlaintext -// -// * GenerateDataKeyPairWithoutPlaintext -// -// You can use this operation to decrypt ciphertext that was encrypted under -// a symmetric or asymmetric KMS key. When the KMS key is asymmetric, you must -// specify the KMS key and the encryption algorithm that was used to encrypt -// the ciphertext. For information about symmetric and asymmetric KMS keys, -// see Using Symmetric and Asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) -// in the Key Management Service Developer Guide. -// -// The Decrypt operation also decrypts ciphertext that was encrypted outside -// of KMS by the public key in an KMS asymmetric KMS key. However, it cannot -// decrypt ciphertext produced by other libraries, such as the Amazon Web Services -// Encryption SDK (https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/) -// or Amazon S3 client-side encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html). -// These libraries return a ciphertext format that is incompatible with KMS. -// -// If the ciphertext was encrypted under a symmetric KMS key, the KeyId parameter -// is optional. KMS can get this information from metadata that it adds to the -// symmetric ciphertext blob. This feature adds durability to your implementation -// by ensuring that authorized users can decrypt ciphertext decades after it -// was encrypted, even if they've lost track of the key ID. However, specifying -// the KMS key is always recommended as a best practice. When you use the KeyId -// parameter to specify a KMS key, KMS only uses the KMS key you specify. If -// the ciphertext was encrypted under a different KMS key, the Decrypt operation -// fails. This practice ensures that you use the KMS key that you intend. -// -// Whenever possible, use key policies to give users permission to call the -// Decrypt operation on a particular KMS key, instead of using IAM policies. -// Otherwise, you might create an IAM user policy that gives the user Decrypt -// permission on all KMS keys. This user could decrypt ciphertext that was encrypted -// by KMS keys in other accounts if the key policy for the cross-account KMS -// key permits it. If you must use an IAM policy for Decrypt permissions, limit -// the user to particular KMS keys or particular trusted accounts. For details, -// see Best practices for IAM policies (https://docs.aws.amazon.com/kms/latest/developerguide/iam-policies.html#iam-policies-best-practices) -// in the Key Management Service Developer Guide. -// -// Applications in Amazon Web Services Nitro Enclaves can call this operation -// by using the Amazon Web Services Nitro Enclaves Development Kit (https://github.com/aws/aws-nitro-enclaves-sdk-c). -// For information about the supporting parameters, see How Amazon Web Services -// Nitro Enclaves use KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html) -// in the Key Management Service Developer Guide. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: Yes. To perform this operation with a KMS key in a different -// Amazon Web Services account, specify the key ARN or alias ARN in the value -// of the KeyId parameter. -// -// Required permissions: kms:Decrypt (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * Encrypt -// -// * GenerateDataKey -// -// * GenerateDataKeyPair -// -// * ReEncrypt -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation Decrypt for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * InvalidCiphertextException -// From the Decrypt or ReEncrypt operation, the request was rejected because -// the specified ciphertext, or additional authenticated data incorporated into -// the ciphertext, such as the encryption context, is corrupted, missing, or -// otherwise invalid. -// -// From the ImportKeyMaterial operation, the request was rejected because KMS -// could not decrypt the encrypted (wrapped) key material. -// -// * KeyUnavailableException -// The request was rejected because the specified KMS key was not available. -// You can retry the request. -// -// * IncorrectKeyException -// The request was rejected because the specified KMS key cannot decrypt the -// data. The KeyId in a Decrypt request and the SourceKeyId in a ReEncrypt request -// must identify the same KMS key that was used to encrypt the ciphertext. -// -// * InvalidKeyUsageException -// The request was rejected for one of the following reasons: -// -// * The KeyUsage value of the KMS key is incompatible with the API operation. -// -// * The encryption algorithm or signing algorithm specified for the operation -// is incompatible with the type of key material in the KMS key (KeySpec). -// -// For encrypting, decrypting, re-encrypting, and generating data keys, the -// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying, the KeyUsage -// must be SIGN_VERIFY. To find the KeyUsage of a KMS key, use the DescribeKey -// operation. -// -// To find the encryption or signing algorithms supported for a particular KMS -// key, use the DescribeKey operation. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InvalidGrantTokenException -// The request was rejected because the specified grant token is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/Decrypt -func (c *KMS) Decrypt(input *DecryptInput) (*DecryptOutput, error) { - req, out := c.DecryptRequest(input) - return out, req.Send() -} - -// DecryptWithContext is the same as Decrypt with the addition of -// the ability to pass a context and additional request options. -// -// See Decrypt for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) DecryptWithContext(ctx aws.Context, input *DecryptInput, opts ...request.Option) (*DecryptOutput, error) { - req, out := c.DecryptRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteAlias = "DeleteAlias" - -// DeleteAliasRequest generates a "aws/request.Request" representing the -// client's request for the DeleteAlias operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteAlias for more information on using the DeleteAlias -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteAliasRequest method. -// req, resp := client.DeleteAliasRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DeleteAlias -func (c *KMS) DeleteAliasRequest(input *DeleteAliasInput) (req *request.Request, output *DeleteAliasOutput) { - op := &request.Operation{ - Name: opDeleteAlias, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteAliasInput{} - } - - output = &DeleteAliasOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteAlias API operation for AWS Key Management Service. -// -// Deletes the specified alias. -// -// Adding, deleting, or updating an alias can allow or deny permission to the -// KMS key. For details, see Using ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) -// in the Key Management Service Developer Guide. -// -// Because an alias is not a property of a KMS key, you can delete and change -// the aliases of a KMS key without affecting the KMS key. Also, aliases do -// not appear in the response from the DescribeKey operation. To get the aliases -// of all KMS keys, use the ListAliases operation. -// -// Each KMS key can have multiple aliases. To change the alias of a KMS key, -// use DeleteAlias to delete the current alias and CreateAlias to create a new -// alias. To associate an existing alias with a different KMS key, call UpdateAlias. -// -// Cross-account use: No. You cannot perform this operation on an alias in a -// different Amazon Web Services account. -// -// Required permissions -// -// * kms:DeleteAlias (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// on the alias (IAM policy). -// -// * kms:DeleteAlias (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// on the KMS key (key policy). -// -// For details, see Controlling access to aliases (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access) -// in the Key Management Service Developer Guide. -// -// Related operations: -// -// * CreateAlias -// -// * ListAliases -// -// * UpdateAlias -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation DeleteAlias for usage and error information. -// -// Returned Error Types: -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DeleteAlias -func (c *KMS) DeleteAlias(input *DeleteAliasInput) (*DeleteAliasOutput, error) { - req, out := c.DeleteAliasRequest(input) - return out, req.Send() -} - -// DeleteAliasWithContext is the same as DeleteAlias with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteAlias for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) DeleteAliasWithContext(ctx aws.Context, input *DeleteAliasInput, opts ...request.Option) (*DeleteAliasOutput, error) { - req, out := c.DeleteAliasRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteCustomKeyStore = "DeleteCustomKeyStore" - -// DeleteCustomKeyStoreRequest generates a "aws/request.Request" representing the -// client's request for the DeleteCustomKeyStore operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteCustomKeyStore for more information on using the DeleteCustomKeyStore -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteCustomKeyStoreRequest method. -// req, resp := client.DeleteCustomKeyStoreRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DeleteCustomKeyStore -func (c *KMS) DeleteCustomKeyStoreRequest(input *DeleteCustomKeyStoreInput) (req *request.Request, output *DeleteCustomKeyStoreOutput) { - op := &request.Operation{ - Name: opDeleteCustomKeyStore, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteCustomKeyStoreInput{} - } - - output = &DeleteCustomKeyStoreOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteCustomKeyStore API operation for AWS Key Management Service. -// -// Deletes a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). -// This operation does not delete the CloudHSM cluster that is associated with -// the custom key store, or affect any users or keys in the cluster. -// -// The custom key store that you delete cannot contain any KMS KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms_keys). -// Before deleting the key store, verify that you will never need to use any -// of the KMS keys in the key store for any cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations). -// Then, use ScheduleKeyDeletion to delete the KMS keys from the key store. -// When the scheduled waiting period expires, the ScheduleKeyDeletion operation -// deletes the KMS keys. Then it makes a best effort to delete the key material -// from the associated cluster. However, you might need to manually delete the -// orphaned key material (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-orphaned-key) -// from the cluster and its backups. -// -// After all KMS keys are deleted from KMS, use DisconnectCustomKeyStore to -// disconnect the key store from KMS. Then, you can delete the custom key store. -// -// Instead of deleting the custom key store, consider using DisconnectCustomKeyStore -// to disconnect it from KMS. While the key store is disconnected, you cannot -// create or use the KMS keys in the key store. But, you do not need to delete -// KMS keys and you can reconnect a disconnected custom key store at any time. -// -// If the operation succeeds, it returns a JSON object with no properties. -// -// This operation is part of the Custom Key Store feature (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) -// feature in KMS, which combines the convenience and extensive integration -// of KMS with the isolation and control of a single-tenant key store. -// -// Cross-account use: No. You cannot perform this operation on a custom key -// store in a different Amazon Web Services account. -// -// Required permissions: kms:DeleteCustomKeyStore (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (IAM policy) -// -// Related operations: -// -// * ConnectCustomKeyStore -// -// * CreateCustomKeyStore -// -// * DescribeCustomKeyStores -// -// * DisconnectCustomKeyStore -// -// * UpdateCustomKeyStore -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation DeleteCustomKeyStore for usage and error information. -// -// Returned Error Types: -// * CustomKeyStoreHasCMKsException -// The request was rejected because the custom key store contains KMS keys. -// After verifying that you do not need to use the KMS keys, use the ScheduleKeyDeletion -// operation to delete the KMS keys. After they are deleted, you can delete -// the custom key store. -// -// * CustomKeyStoreInvalidStateException -// The request was rejected because of the ConnectionState of the custom key -// store. To get the ConnectionState of a custom key store, use the DescribeCustomKeyStores -// operation. -// -// This exception is thrown under the following conditions: -// -// * You requested the CreateKey or GenerateRandom operation in a custom -// key store that is not connected. These operations are valid only when -// the custom key store ConnectionState is CONNECTED. -// -// * You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation -// on a custom key store that is not disconnected. This operation is valid -// only when the custom key store ConnectionState is DISCONNECTED. -// -// * You requested the ConnectCustomKeyStore operation on a custom key store -// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid -// for all other ConnectionState values. -// -// * CustomKeyStoreNotFoundException -// The request was rejected because KMS cannot find a custom key store with -// the specified key store name or ID. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DeleteCustomKeyStore -func (c *KMS) DeleteCustomKeyStore(input *DeleteCustomKeyStoreInput) (*DeleteCustomKeyStoreOutput, error) { - req, out := c.DeleteCustomKeyStoreRequest(input) - return out, req.Send() -} - -// DeleteCustomKeyStoreWithContext is the same as DeleteCustomKeyStore with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteCustomKeyStore for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) DeleteCustomKeyStoreWithContext(ctx aws.Context, input *DeleteCustomKeyStoreInput, opts ...request.Option) (*DeleteCustomKeyStoreOutput, error) { - req, out := c.DeleteCustomKeyStoreRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteImportedKeyMaterial = "DeleteImportedKeyMaterial" - -// DeleteImportedKeyMaterialRequest generates a "aws/request.Request" representing the -// client's request for the DeleteImportedKeyMaterial operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteImportedKeyMaterial for more information on using the DeleteImportedKeyMaterial -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteImportedKeyMaterialRequest method. -// req, resp := client.DeleteImportedKeyMaterialRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DeleteImportedKeyMaterial -func (c *KMS) DeleteImportedKeyMaterialRequest(input *DeleteImportedKeyMaterialInput) (req *request.Request, output *DeleteImportedKeyMaterialOutput) { - op := &request.Operation{ - Name: opDeleteImportedKeyMaterial, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteImportedKeyMaterialInput{} - } - - output = &DeleteImportedKeyMaterialOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteImportedKeyMaterial API operation for AWS Key Management Service. -// -// Deletes key material that you previously imported. This operation makes the -// specified KMS key unusable. For more information about importing key material -// into KMS, see Importing Key Material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) -// in the Key Management Service Developer Guide. -// -// When the specified KMS key is in the PendingDeletion state, this operation -// does not change the KMS key's state. Otherwise, it changes the KMS key's -// state to PendingImport. -// -// After you delete key material, you can use ImportKeyMaterial to reimport -// the same key material into the KMS key. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:DeleteImportedKeyMaterial (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * GetParametersForImport -// -// * ImportKeyMaterial -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation DeleteImportedKeyMaterial for usage and error information. -// -// Returned Error Types: -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * UnsupportedOperationException -// The request was rejected because a specified parameter is not supported or -// a specified resource is not valid for this operation. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DeleteImportedKeyMaterial -func (c *KMS) DeleteImportedKeyMaterial(input *DeleteImportedKeyMaterialInput) (*DeleteImportedKeyMaterialOutput, error) { - req, out := c.DeleteImportedKeyMaterialRequest(input) - return out, req.Send() -} - -// DeleteImportedKeyMaterialWithContext is the same as DeleteImportedKeyMaterial with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteImportedKeyMaterial for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) DeleteImportedKeyMaterialWithContext(ctx aws.Context, input *DeleteImportedKeyMaterialInput, opts ...request.Option) (*DeleteImportedKeyMaterialOutput, error) { - req, out := c.DeleteImportedKeyMaterialRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeCustomKeyStores = "DescribeCustomKeyStores" - -// DescribeCustomKeyStoresRequest generates a "aws/request.Request" representing the -// client's request for the DescribeCustomKeyStores operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeCustomKeyStores for more information on using the DescribeCustomKeyStores -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeCustomKeyStoresRequest method. -// req, resp := client.DescribeCustomKeyStoresRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DescribeCustomKeyStores -func (c *KMS) DescribeCustomKeyStoresRequest(input *DescribeCustomKeyStoresInput) (req *request.Request, output *DescribeCustomKeyStoresOutput) { - op := &request.Operation{ - Name: opDescribeCustomKeyStores, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeCustomKeyStoresInput{} - } - - output = &DescribeCustomKeyStoresOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeCustomKeyStores API operation for AWS Key Management Service. -// -// Gets information about custom key stores (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) -// in the account and Region. -// -// This operation is part of the Custom Key Store feature (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) -// feature in KMS, which combines the convenience and extensive integration -// of KMS with the isolation and control of a single-tenant key store. -// -// By default, this operation returns information about all custom key stores -// in the account and Region. To get only information about a particular custom -// key store, use either the CustomKeyStoreName or CustomKeyStoreId parameter -// (but not both). -// -// To determine whether the custom key store is connected to its CloudHSM cluster, -// use the ConnectionState element in the response. If an attempt to connect -// the custom key store failed, the ConnectionState value is FAILED and the -// ConnectionErrorCode element in the response indicates the cause of the failure. -// For help interpreting the ConnectionErrorCode, see CustomKeyStoresListEntry. -// -// Custom key stores have a DISCONNECTED connection state if the key store has -// never been connected or you use the DisconnectCustomKeyStore operation to -// disconnect it. If your custom key store state is CONNECTED but you are having -// trouble using it, make sure that its associated CloudHSM cluster is active -// and contains the minimum number of HSMs required for the operation, if any. -// -// For help repairing your custom key store, see the Troubleshooting Custom -// Key Stores (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html) -// topic in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a custom key -// store in a different Amazon Web Services account. -// -// Required permissions: kms:DescribeCustomKeyStores (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (IAM policy) -// -// Related operations: -// -// * ConnectCustomKeyStore -// -// * CreateCustomKeyStore -// -// * DeleteCustomKeyStore -// -// * DisconnectCustomKeyStore -// -// * UpdateCustomKeyStore -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation DescribeCustomKeyStores for usage and error information. -// -// Returned Error Types: -// * CustomKeyStoreNotFoundException -// The request was rejected because KMS cannot find a custom key store with -// the specified key store name or ID. -// -// * InvalidMarkerException -// The request was rejected because the marker that specifies where pagination -// should next begin is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DescribeCustomKeyStores -func (c *KMS) DescribeCustomKeyStores(input *DescribeCustomKeyStoresInput) (*DescribeCustomKeyStoresOutput, error) { - req, out := c.DescribeCustomKeyStoresRequest(input) - return out, req.Send() -} - -// DescribeCustomKeyStoresWithContext is the same as DescribeCustomKeyStores with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeCustomKeyStores for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) DescribeCustomKeyStoresWithContext(ctx aws.Context, input *DescribeCustomKeyStoresInput, opts ...request.Option) (*DescribeCustomKeyStoresOutput, error) { - req, out := c.DescribeCustomKeyStoresRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeKey = "DescribeKey" - -// DescribeKeyRequest generates a "aws/request.Request" representing the -// client's request for the DescribeKey operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeKey for more information on using the DescribeKey -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeKeyRequest method. -// req, resp := client.DescribeKeyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DescribeKey -func (c *KMS) DescribeKeyRequest(input *DescribeKeyInput) (req *request.Request, output *DescribeKeyOutput) { - op := &request.Operation{ - Name: opDescribeKey, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeKeyInput{} - } - - output = &DescribeKeyOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeKey API operation for AWS Key Management Service. -// -// Provides detailed information about a KMS key. You can run DescribeKey on -// a customer managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk) -// or an Amazon Web Services managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk). -// -// This detailed information includes the key ARN, creation date (and deletion -// date, if applicable), the key state, and the origin and expiration date (if -// any) of the key material. It includes fields, like KeySpec, that help you -// distinguish symmetric from asymmetric KMS keys. It also provides information -// that is particularly important to asymmetric keys, such as the key usage -// (encryption or signing) and the encryption algorithms or signing algorithms -// that the KMS key supports. For KMS keys in custom key stores, it includes -// information about the custom key store, such as the key store ID and the -// CloudHSM cluster ID. For multi-Region keys, it displays the primary key and -// all related replica keys. -// -// DescribeKey does not return the following information: -// -// * Aliases associated with the KMS key. To get this information, use ListAliases. -// -// * Whether automatic key rotation is enabled on the KMS key. To get this -// information, use GetKeyRotationStatus. Also, some key states prevent a -// KMS key from being automatically rotated. For details, see How Automatic -// Key Rotation Works (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotate-keys-how-it-works) -// in Key Management Service Developer Guide. -// -// * Tags on the KMS key. To get this information, use ListResourceTags. -// -// * Key policies and grants on the KMS key. To get this information, use -// GetKeyPolicy and ListGrants. -// -// If you call the DescribeKey operation on a predefined Amazon Web Services -// alias, that is, an Amazon Web Services alias with no key ID, KMS creates -// an Amazon Web Services managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk). -// Then, it associates the alias with the new KMS key, and returns the KeyId -// and Arn of the new KMS key in the response. -// -// Cross-account use: Yes. To perform this operation with a KMS key in a different -// Amazon Web Services account, specify the key ARN or alias ARN in the value -// of the KeyId parameter. -// -// Required permissions: kms:DescribeKey (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * GetKeyPolicy -// -// * GetKeyRotationStatus -// -// * ListAliases -// -// * ListGrants -// -// * ListKeys -// -// * ListResourceTags -// -// * ListRetirableGrants -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation DescribeKey for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DescribeKey -func (c *KMS) DescribeKey(input *DescribeKeyInput) (*DescribeKeyOutput, error) { - req, out := c.DescribeKeyRequest(input) - return out, req.Send() -} - -// DescribeKeyWithContext is the same as DescribeKey with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeKey for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) DescribeKeyWithContext(ctx aws.Context, input *DescribeKeyInput, opts ...request.Option) (*DescribeKeyOutput, error) { - req, out := c.DescribeKeyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDisableKey = "DisableKey" - -// DisableKeyRequest generates a "aws/request.Request" representing the -// client's request for the DisableKey operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DisableKey for more information on using the DisableKey -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DisableKeyRequest method. -// req, resp := client.DisableKeyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DisableKey -func (c *KMS) DisableKeyRequest(input *DisableKeyInput) (req *request.Request, output *DisableKeyOutput) { - op := &request.Operation{ - Name: opDisableKey, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DisableKeyInput{} - } - - output = &DisableKeyOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DisableKey API operation for AWS Key Management Service. -// -// Sets the state of a KMS key to disabled. This change temporarily prevents -// use of the KMS key for cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations). -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:DisableKey (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: EnableKey -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation DisableKey for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DisableKey -func (c *KMS) DisableKey(input *DisableKeyInput) (*DisableKeyOutput, error) { - req, out := c.DisableKeyRequest(input) - return out, req.Send() -} - -// DisableKeyWithContext is the same as DisableKey with the addition of -// the ability to pass a context and additional request options. -// -// See DisableKey for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) DisableKeyWithContext(ctx aws.Context, input *DisableKeyInput, opts ...request.Option) (*DisableKeyOutput, error) { - req, out := c.DisableKeyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDisableKeyRotation = "DisableKeyRotation" - -// DisableKeyRotationRequest generates a "aws/request.Request" representing the -// client's request for the DisableKeyRotation operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DisableKeyRotation for more information on using the DisableKeyRotation -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DisableKeyRotationRequest method. -// req, resp := client.DisableKeyRotationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DisableKeyRotation -func (c *KMS) DisableKeyRotationRequest(input *DisableKeyRotationInput) (req *request.Request, output *DisableKeyRotationOutput) { - op := &request.Operation{ - Name: opDisableKeyRotation, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DisableKeyRotationInput{} - } - - output = &DisableKeyRotationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DisableKeyRotation API operation for AWS Key Management Service. -// -// Disables automatic rotation of the key material (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html) -// for the specified symmetric KMS key. -// -// You cannot enable automatic rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#asymmetric-cmks), -// KMS keys with imported key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html), -// or KMS keys in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). -// To enable or disable automatic rotation of a set of related multi-Region -// keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html#mrk-replica-key), -// set the property on the primary key. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:DisableKeyRotation (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * EnableKeyRotation -// -// * GetKeyRotationStatus -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation DisableKeyRotation for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// * UnsupportedOperationException -// The request was rejected because a specified parameter is not supported or -// a specified resource is not valid for this operation. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DisableKeyRotation -func (c *KMS) DisableKeyRotation(input *DisableKeyRotationInput) (*DisableKeyRotationOutput, error) { - req, out := c.DisableKeyRotationRequest(input) - return out, req.Send() -} - -// DisableKeyRotationWithContext is the same as DisableKeyRotation with the addition of -// the ability to pass a context and additional request options. -// -// See DisableKeyRotation for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) DisableKeyRotationWithContext(ctx aws.Context, input *DisableKeyRotationInput, opts ...request.Option) (*DisableKeyRotationOutput, error) { - req, out := c.DisableKeyRotationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDisconnectCustomKeyStore = "DisconnectCustomKeyStore" - -// DisconnectCustomKeyStoreRequest generates a "aws/request.Request" representing the -// client's request for the DisconnectCustomKeyStore operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DisconnectCustomKeyStore for more information on using the DisconnectCustomKeyStore -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DisconnectCustomKeyStoreRequest method. -// req, resp := client.DisconnectCustomKeyStoreRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DisconnectCustomKeyStore -func (c *KMS) DisconnectCustomKeyStoreRequest(input *DisconnectCustomKeyStoreInput) (req *request.Request, output *DisconnectCustomKeyStoreOutput) { - op := &request.Operation{ - Name: opDisconnectCustomKeyStore, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DisconnectCustomKeyStoreInput{} - } - - output = &DisconnectCustomKeyStoreOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DisconnectCustomKeyStore API operation for AWS Key Management Service. -// -// Disconnects the custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) -// from its associated CloudHSM cluster. While a custom key store is disconnected, -// you can manage the custom key store and its KMS keys, but you cannot create -// or use KMS keys in the custom key store. You can reconnect the custom key -// store at any time. -// -// While a custom key store is disconnected, all attempts to create KMS keys -// in the custom key store or to use existing KMS keys in cryptographic operations -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) -// will fail. This action can prevent users from storing and accessing sensitive -// data. -// -// To find the connection state of a custom key store, use the DescribeCustomKeyStores -// operation. To reconnect a custom key store, use the ConnectCustomKeyStore -// operation. -// -// If the operation succeeds, it returns a JSON object with no properties. -// -// This operation is part of the Custom Key Store feature (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) -// feature in KMS, which combines the convenience and extensive integration -// of KMS with the isolation and control of a single-tenant key store. -// -// Cross-account use: No. You cannot perform this operation on a custom key -// store in a different Amazon Web Services account. -// -// Required permissions: kms:DisconnectCustomKeyStore (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (IAM policy) -// -// Related operations: -// -// * ConnectCustomKeyStore -// -// * CreateCustomKeyStore -// -// * DeleteCustomKeyStore -// -// * DescribeCustomKeyStores -// -// * UpdateCustomKeyStore -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation DisconnectCustomKeyStore for usage and error information. -// -// Returned Error Types: -// * CustomKeyStoreInvalidStateException -// The request was rejected because of the ConnectionState of the custom key -// store. To get the ConnectionState of a custom key store, use the DescribeCustomKeyStores -// operation. -// -// This exception is thrown under the following conditions: -// -// * You requested the CreateKey or GenerateRandom operation in a custom -// key store that is not connected. These operations are valid only when -// the custom key store ConnectionState is CONNECTED. -// -// * You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation -// on a custom key store that is not disconnected. This operation is valid -// only when the custom key store ConnectionState is DISCONNECTED. -// -// * You requested the ConnectCustomKeyStore operation on a custom key store -// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid -// for all other ConnectionState values. -// -// * CustomKeyStoreNotFoundException -// The request was rejected because KMS cannot find a custom key store with -// the specified key store name or ID. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DisconnectCustomKeyStore -func (c *KMS) DisconnectCustomKeyStore(input *DisconnectCustomKeyStoreInput) (*DisconnectCustomKeyStoreOutput, error) { - req, out := c.DisconnectCustomKeyStoreRequest(input) - return out, req.Send() -} - -// DisconnectCustomKeyStoreWithContext is the same as DisconnectCustomKeyStore with the addition of -// the ability to pass a context and additional request options. -// -// See DisconnectCustomKeyStore for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) DisconnectCustomKeyStoreWithContext(ctx aws.Context, input *DisconnectCustomKeyStoreInput, opts ...request.Option) (*DisconnectCustomKeyStoreOutput, error) { - req, out := c.DisconnectCustomKeyStoreRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opEnableKey = "EnableKey" - -// EnableKeyRequest generates a "aws/request.Request" representing the -// client's request for the EnableKey operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See EnableKey for more information on using the EnableKey -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the EnableKeyRequest method. -// req, resp := client.EnableKeyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/EnableKey -func (c *KMS) EnableKeyRequest(input *EnableKeyInput) (req *request.Request, output *EnableKeyOutput) { - op := &request.Operation{ - Name: opEnableKey, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &EnableKeyInput{} - } - - output = &EnableKeyOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// EnableKey API operation for AWS Key Management Service. -// -// Sets the key state of a KMS key to enabled. This allows you to use the KMS -// key for cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations). -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:EnableKey (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: DisableKey -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation EnableKey for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * LimitExceededException -// The request was rejected because a quota was exceeded. For more information, -// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) -// in the Key Management Service Developer Guide. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/EnableKey -func (c *KMS) EnableKey(input *EnableKeyInput) (*EnableKeyOutput, error) { - req, out := c.EnableKeyRequest(input) - return out, req.Send() -} - -// EnableKeyWithContext is the same as EnableKey with the addition of -// the ability to pass a context and additional request options. -// -// See EnableKey for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) EnableKeyWithContext(ctx aws.Context, input *EnableKeyInput, opts ...request.Option) (*EnableKeyOutput, error) { - req, out := c.EnableKeyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opEnableKeyRotation = "EnableKeyRotation" - -// EnableKeyRotationRequest generates a "aws/request.Request" representing the -// client's request for the EnableKeyRotation operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See EnableKeyRotation for more information on using the EnableKeyRotation -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the EnableKeyRotationRequest method. -// req, resp := client.EnableKeyRotationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/EnableKeyRotation -func (c *KMS) EnableKeyRotationRequest(input *EnableKeyRotationInput) (req *request.Request, output *EnableKeyRotationOutput) { - op := &request.Operation{ - Name: opEnableKeyRotation, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &EnableKeyRotationInput{} - } - - output = &EnableKeyRotationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// EnableKeyRotation API operation for AWS Key Management Service. -// -// Enables automatic rotation of the key material (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html) -// for the specified symmetric KMS key. -// -// You cannot enable automatic rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#asymmetric-cmks), -// KMS keys with imported key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html), -// or KMS keys in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). -// To enable or disable automatic rotation of a set of related multi-Region -// keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html#mrk-replica-key), -// set the property on the primary key. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:EnableKeyRotation (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * DisableKeyRotation -// -// * GetKeyRotationStatus -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation EnableKeyRotation for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// * UnsupportedOperationException -// The request was rejected because a specified parameter is not supported or -// a specified resource is not valid for this operation. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/EnableKeyRotation -func (c *KMS) EnableKeyRotation(input *EnableKeyRotationInput) (*EnableKeyRotationOutput, error) { - req, out := c.EnableKeyRotationRequest(input) - return out, req.Send() -} - -// EnableKeyRotationWithContext is the same as EnableKeyRotation with the addition of -// the ability to pass a context and additional request options. -// -// See EnableKeyRotation for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) EnableKeyRotationWithContext(ctx aws.Context, input *EnableKeyRotationInput, opts ...request.Option) (*EnableKeyRotationOutput, error) { - req, out := c.EnableKeyRotationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opEncrypt = "Encrypt" - -// EncryptRequest generates a "aws/request.Request" representing the -// client's request for the Encrypt operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See Encrypt for more information on using the Encrypt -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the EncryptRequest method. -// req, resp := client.EncryptRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/Encrypt -func (c *KMS) EncryptRequest(input *EncryptInput) (req *request.Request, output *EncryptOutput) { - op := &request.Operation{ - Name: opEncrypt, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &EncryptInput{} - } - - output = &EncryptOutput{} - req = c.newRequest(op, input, output) - return -} - -// Encrypt API operation for AWS Key Management Service. -// -// Encrypts plaintext into ciphertext by using a KMS key. The Encrypt operation -// has two primary use cases: -// -// * You can encrypt small amounts of arbitrary data, such as a personal -// identifier or database password, or other sensitive information. -// -// * You can use the Encrypt operation to move encrypted data from one Amazon -// Web Services Region to another. For example, in Region A, generate a data -// key and use the plaintext key to encrypt your data. Then, in Region A, -// use the Encrypt operation to encrypt the plaintext data key under a KMS -// key in Region B. Now, you can move the encrypted data and the encrypted -// data key to Region B. When necessary, you can decrypt the encrypted data -// key and the encrypted data entirely within in Region B. -// -// You don't need to use the Encrypt operation to encrypt a data key. The GenerateDataKey -// and GenerateDataKeyPair operations return a plaintext data key and an encrypted -// copy of that data key. -// -// When you encrypt data, you must specify a symmetric or asymmetric KMS key -// to use in the encryption operation. The KMS key must have a KeyUsage value -// of ENCRYPT_DECRYPT. To find the KeyUsage of a KMS key, use the DescribeKey -// operation. -// -// If you use a symmetric KMS key, you can use an encryption context to add -// additional security to your encryption operation. If you specify an EncryptionContext -// when encrypting data, you must specify the same encryption context (a case-sensitive -// exact match) when decrypting the data. Otherwise, the request to decrypt -// fails with an InvalidCiphertextException. For more information, see Encryption -// Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) -// in the Key Management Service Developer Guide. -// -// If you specify an asymmetric KMS key, you must also specify the encryption -// algorithm. The algorithm must be compatible with the KMS key type. -// -// When you use an asymmetric KMS key to encrypt or reencrypt data, be sure -// to record the KMS key and encryption algorithm that you choose. You will -// be required to provide the same KMS key and encryption algorithm when you -// decrypt the data. If the KMS key and algorithm do not match the values used -// to encrypt the data, the decrypt operation fails. -// -// You are not required to supply the key ID and encryption algorithm when you -// decrypt with symmetric KMS keys because KMS stores this information in the -// ciphertext blob. KMS cannot store metadata in ciphertext generated with asymmetric -// keys. The standard format for asymmetric key ciphertext does not include -// configurable fields. -// -// The maximum size of the data that you can encrypt varies with the type of -// KMS key and the encryption algorithm that you choose. -// -// * Symmetric KMS keys SYMMETRIC_DEFAULT: 4096 bytes -// -// * RSA_2048 RSAES_OAEP_SHA_1: 214 bytes RSAES_OAEP_SHA_256: 190 bytes -// -// * RSA_3072 RSAES_OAEP_SHA_1: 342 bytes RSAES_OAEP_SHA_256: 318 bytes -// -// * RSA_4096 RSAES_OAEP_SHA_1: 470 bytes RSAES_OAEP_SHA_256: 446 bytes -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: Yes. To perform this operation with a KMS key in a different -// Amazon Web Services account, specify the key ARN or alias ARN in the value -// of the KeyId parameter. -// -// Required permissions: kms:Encrypt (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * Decrypt -// -// * GenerateDataKey -// -// * GenerateDataKeyPair -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation Encrypt for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * KeyUnavailableException -// The request was rejected because the specified KMS key was not available. -// You can retry the request. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InvalidKeyUsageException -// The request was rejected for one of the following reasons: -// -// * The KeyUsage value of the KMS key is incompatible with the API operation. -// -// * The encryption algorithm or signing algorithm specified for the operation -// is incompatible with the type of key material in the KMS key (KeySpec). -// -// For encrypting, decrypting, re-encrypting, and generating data keys, the -// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying, the KeyUsage -// must be SIGN_VERIFY. To find the KeyUsage of a KMS key, use the DescribeKey -// operation. -// -// To find the encryption or signing algorithms supported for a particular KMS -// key, use the DescribeKey operation. -// -// * InvalidGrantTokenException -// The request was rejected because the specified grant token is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/Encrypt -func (c *KMS) Encrypt(input *EncryptInput) (*EncryptOutput, error) { - req, out := c.EncryptRequest(input) - return out, req.Send() -} - -// EncryptWithContext is the same as Encrypt with the addition of -// the ability to pass a context and additional request options. -// -// See Encrypt for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) EncryptWithContext(ctx aws.Context, input *EncryptInput, opts ...request.Option) (*EncryptOutput, error) { - req, out := c.EncryptRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGenerateDataKey = "GenerateDataKey" - -// GenerateDataKeyRequest generates a "aws/request.Request" representing the -// client's request for the GenerateDataKey operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GenerateDataKey for more information on using the GenerateDataKey -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GenerateDataKeyRequest method. -// req, resp := client.GenerateDataKeyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKey -func (c *KMS) GenerateDataKeyRequest(input *GenerateDataKeyInput) (req *request.Request, output *GenerateDataKeyOutput) { - op := &request.Operation{ - Name: opGenerateDataKey, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GenerateDataKeyInput{} - } - - output = &GenerateDataKeyOutput{} - req = c.newRequest(op, input, output) - return -} - -// GenerateDataKey API operation for AWS Key Management Service. -// -// Generates a unique symmetric data key for client-side encryption. This operation -// returns a plaintext copy of the data key and a copy that is encrypted under -// a KMS key that you specify. You can use the plaintext key to encrypt your -// data outside of KMS and store the encrypted data key with the encrypted data. -// -// GenerateDataKey returns a unique data key for each request. The bytes in -// the plaintext key are not related to the caller or the KMS key. -// -// To generate a data key, specify the symmetric KMS key that will be used to -// encrypt the data key. You cannot use an asymmetric KMS key to generate data -// keys. To get the type of your KMS key, use the DescribeKey operation. You -// must also specify the length of the data key. Use either the KeySpec or NumberOfBytes -// parameters (but not both). For 128-bit and 256-bit data keys, use the KeySpec -// parameter. -// -// To get only an encrypted copy of the data key, use GenerateDataKeyWithoutPlaintext. -// To generate an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext -// operation. To get a cryptographically secure random byte string, use GenerateRandom. -// -// You can use the optional encryption context to add additional security to -// the encryption operation. If you specify an EncryptionContext, you must specify -// the same encryption context (a case-sensitive exact match) when decrypting -// the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. -// For more information, see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) -// in the Key Management Service Developer Guide. -// -// Applications in Amazon Web Services Nitro Enclaves can call this operation -// by using the Amazon Web Services Nitro Enclaves Development Kit (https://github.com/aws/aws-nitro-enclaves-sdk-c). -// For information about the supporting parameters, see How Amazon Web Services -// Nitro Enclaves use KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html) -// in the Key Management Service Developer Guide. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// How to use your data key -// -// We recommend that you use the following pattern to encrypt data locally in -// your application. You can write your own code or use a client-side encryption -// library, such as the Amazon Web Services Encryption SDK (https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/), -// the Amazon DynamoDB Encryption Client (https://docs.aws.amazon.com/dynamodb-encryption-client/latest/devguide/), -// or Amazon S3 client-side encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html) -// to do these tasks for you. -// -// To encrypt data outside of KMS: -// -// Use the GenerateDataKey operation to get a data key. -// -// Use the plaintext data key (in the Plaintext field of the response) to encrypt -// your data outside of KMS. Then erase the plaintext data key from memory. -// -// Store the encrypted data key (in the CiphertextBlob field of the response) -// with the encrypted data. -// -// To decrypt data outside of KMS: -// -// Use the Decrypt operation to decrypt the encrypted data key. The operation -// returns a plaintext copy of the data key. -// -// Use the plaintext data key to decrypt data outside of KMS, then erase the -// plaintext data key from memory. -// -// Cross-account use: Yes. To perform this operation with a KMS key in a different -// Amazon Web Services account, specify the key ARN or alias ARN in the value -// of the KeyId parameter. -// -// Required permissions: kms:GenerateDataKey (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * Decrypt -// -// * Encrypt -// -// * GenerateDataKeyPair -// -// * GenerateDataKeyPairWithoutPlaintext -// -// * GenerateDataKeyWithoutPlaintext -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation GenerateDataKey for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * KeyUnavailableException -// The request was rejected because the specified KMS key was not available. -// You can retry the request. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InvalidKeyUsageException -// The request was rejected for one of the following reasons: -// -// * The KeyUsage value of the KMS key is incompatible with the API operation. -// -// * The encryption algorithm or signing algorithm specified for the operation -// is incompatible with the type of key material in the KMS key (KeySpec). -// -// For encrypting, decrypting, re-encrypting, and generating data keys, the -// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying, the KeyUsage -// must be SIGN_VERIFY. To find the KeyUsage of a KMS key, use the DescribeKey -// operation. -// -// To find the encryption or signing algorithms supported for a particular KMS -// key, use the DescribeKey operation. -// -// * InvalidGrantTokenException -// The request was rejected because the specified grant token is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKey -func (c *KMS) GenerateDataKey(input *GenerateDataKeyInput) (*GenerateDataKeyOutput, error) { - req, out := c.GenerateDataKeyRequest(input) - return out, req.Send() -} - -// GenerateDataKeyWithContext is the same as GenerateDataKey with the addition of -// the ability to pass a context and additional request options. -// -// See GenerateDataKey for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) GenerateDataKeyWithContext(ctx aws.Context, input *GenerateDataKeyInput, opts ...request.Option) (*GenerateDataKeyOutput, error) { - req, out := c.GenerateDataKeyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGenerateDataKeyPair = "GenerateDataKeyPair" - -// GenerateDataKeyPairRequest generates a "aws/request.Request" representing the -// client's request for the GenerateDataKeyPair operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GenerateDataKeyPair for more information on using the GenerateDataKeyPair -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GenerateDataKeyPairRequest method. -// req, resp := client.GenerateDataKeyPairRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyPair -func (c *KMS) GenerateDataKeyPairRequest(input *GenerateDataKeyPairInput) (req *request.Request, output *GenerateDataKeyPairOutput) { - op := &request.Operation{ - Name: opGenerateDataKeyPair, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GenerateDataKeyPairInput{} - } - - output = &GenerateDataKeyPairOutput{} - req = c.newRequest(op, input, output) - return -} - -// GenerateDataKeyPair API operation for AWS Key Management Service. -// -// Generates a unique asymmetric data key pair. The GenerateDataKeyPair operation -// returns a plaintext public key, a plaintext private key, and a copy of the -// private key that is encrypted under the symmetric KMS key you specify. You -// can use the data key pair to perform asymmetric cryptography and implement -// digital signatures outside of KMS. -// -// You can use the public key that GenerateDataKeyPair returns to encrypt data -// or verify a signature outside of KMS. Then, store the encrypted private key -// with the data. When you are ready to decrypt data or sign a message, you -// can use the Decrypt operation to decrypt the encrypted private key. -// -// To generate a data key pair, you must specify a symmetric KMS key to encrypt -// the private key in a data key pair. You cannot use an asymmetric KMS key -// or a KMS key in a custom key store. To get the type and origin of your KMS -// key, use the DescribeKey operation. -// -// Use the KeyPairSpec parameter to choose an RSA or Elliptic Curve (ECC) data -// key pair. KMS recommends that your use ECC key pairs for signing, and use -// RSA key pairs for either encryption or signing, but not both. However, KMS -// cannot enforce any restrictions on the use of data key pairs outside of KMS. -// -// If you are using the data key pair to encrypt data, or for any operation -// where you don't immediately need a private key, consider using the GenerateDataKeyPairWithoutPlaintext -// operation. GenerateDataKeyPairWithoutPlaintext returns a plaintext public -// key and an encrypted private key, but omits the plaintext private key that -// you need only to decrypt ciphertext or sign a message. Later, when you need -// to decrypt the data or sign a message, use the Decrypt operation to decrypt -// the encrypted private key in the data key pair. -// -// GenerateDataKeyPair returns a unique data key pair for each request. The -// bytes in the keys are not related to the caller or the KMS key that is used -// to encrypt the private key. The public key is a DER-encoded X.509 SubjectPublicKeyInfo, -// as specified in RFC 5280 (https://tools.ietf.org/html/rfc5280). The private -// key is a DER-encoded PKCS8 PrivateKeyInfo, as specified in RFC 5958 (https://tools.ietf.org/html/rfc5958). -// -// You can use the optional encryption context to add additional security to -// the encryption operation. If you specify an EncryptionContext, you must specify -// the same encryption context (a case-sensitive exact match) when decrypting -// the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. -// For more information, see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) -// in the Key Management Service Developer Guide. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: Yes. To perform this operation with a KMS key in a different -// Amazon Web Services account, specify the key ARN or alias ARN in the value -// of the KeyId parameter. -// -// Required permissions: kms:GenerateDataKeyPair (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * Decrypt -// -// * Encrypt -// -// * GenerateDataKey -// -// * GenerateDataKeyPairWithoutPlaintext -// -// * GenerateDataKeyWithoutPlaintext -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation GenerateDataKeyPair for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * KeyUnavailableException -// The request was rejected because the specified KMS key was not available. -// You can retry the request. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InvalidKeyUsageException -// The request was rejected for one of the following reasons: -// -// * The KeyUsage value of the KMS key is incompatible with the API operation. -// -// * The encryption algorithm or signing algorithm specified for the operation -// is incompatible with the type of key material in the KMS key (KeySpec). -// -// For encrypting, decrypting, re-encrypting, and generating data keys, the -// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying, the KeyUsage -// must be SIGN_VERIFY. To find the KeyUsage of a KMS key, use the DescribeKey -// operation. -// -// To find the encryption or signing algorithms supported for a particular KMS -// key, use the DescribeKey operation. -// -// * InvalidGrantTokenException -// The request was rejected because the specified grant token is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// * UnsupportedOperationException -// The request was rejected because a specified parameter is not supported or -// a specified resource is not valid for this operation. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyPair -func (c *KMS) GenerateDataKeyPair(input *GenerateDataKeyPairInput) (*GenerateDataKeyPairOutput, error) { - req, out := c.GenerateDataKeyPairRequest(input) - return out, req.Send() -} - -// GenerateDataKeyPairWithContext is the same as GenerateDataKeyPair with the addition of -// the ability to pass a context and additional request options. -// -// See GenerateDataKeyPair for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) GenerateDataKeyPairWithContext(ctx aws.Context, input *GenerateDataKeyPairInput, opts ...request.Option) (*GenerateDataKeyPairOutput, error) { - req, out := c.GenerateDataKeyPairRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGenerateDataKeyPairWithoutPlaintext = "GenerateDataKeyPairWithoutPlaintext" - -// GenerateDataKeyPairWithoutPlaintextRequest generates a "aws/request.Request" representing the -// client's request for the GenerateDataKeyPairWithoutPlaintext operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GenerateDataKeyPairWithoutPlaintext for more information on using the GenerateDataKeyPairWithoutPlaintext -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GenerateDataKeyPairWithoutPlaintextRequest method. -// req, resp := client.GenerateDataKeyPairWithoutPlaintextRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyPairWithoutPlaintext -func (c *KMS) GenerateDataKeyPairWithoutPlaintextRequest(input *GenerateDataKeyPairWithoutPlaintextInput) (req *request.Request, output *GenerateDataKeyPairWithoutPlaintextOutput) { - op := &request.Operation{ - Name: opGenerateDataKeyPairWithoutPlaintext, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GenerateDataKeyPairWithoutPlaintextInput{} - } - - output = &GenerateDataKeyPairWithoutPlaintextOutput{} - req = c.newRequest(op, input, output) - return -} - -// GenerateDataKeyPairWithoutPlaintext API operation for AWS Key Management Service. -// -// Generates a unique asymmetric data key pair. The GenerateDataKeyPairWithoutPlaintext -// operation returns a plaintext public key and a copy of the private key that -// is encrypted under the symmetric KMS key you specify. Unlike GenerateDataKeyPair, -// this operation does not return a plaintext private key. -// -// You can use the public key that GenerateDataKeyPairWithoutPlaintext returns -// to encrypt data or verify a signature outside of KMS. Then, store the encrypted -// private key with the data. When you are ready to decrypt data or sign a message, -// you can use the Decrypt operation to decrypt the encrypted private key. -// -// To generate a data key pair, you must specify a symmetric KMS key to encrypt -// the private key in a data key pair. You cannot use an asymmetric KMS key -// or a KMS key in a custom key store. To get the type and origin of your KMS -// key, use the DescribeKey operation. -// -// Use the KeyPairSpec parameter to choose an RSA or Elliptic Curve (ECC) data -// key pair. KMS recommends that your use ECC key pairs for signing, and use -// RSA key pairs for either encryption or signing, but not both. However, KMS -// cannot enforce any restrictions on the use of data key pairs outside of KMS. -// -// GenerateDataKeyPairWithoutPlaintext returns a unique data key pair for each -// request. The bytes in the key are not related to the caller or KMS key that -// is used to encrypt the private key. The public key is a DER-encoded X.509 -// SubjectPublicKeyInfo, as specified in RFC 5280 (https://tools.ietf.org/html/rfc5280). -// -// You can use the optional encryption context to add additional security to -// the encryption operation. If you specify an EncryptionContext, you must specify -// the same encryption context (a case-sensitive exact match) when decrypting -// the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. -// For more information, see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) -// in the Key Management Service Developer Guide. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: Yes. To perform this operation with a KMS key in a different -// Amazon Web Services account, specify the key ARN or alias ARN in the value -// of the KeyId parameter. -// -// Required permissions: kms:GenerateDataKeyPairWithoutPlaintext (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * Decrypt -// -// * Encrypt -// -// * GenerateDataKey -// -// * GenerateDataKeyPair -// -// * GenerateDataKeyWithoutPlaintext -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation GenerateDataKeyPairWithoutPlaintext for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * KeyUnavailableException -// The request was rejected because the specified KMS key was not available. -// You can retry the request. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InvalidKeyUsageException -// The request was rejected for one of the following reasons: -// -// * The KeyUsage value of the KMS key is incompatible with the API operation. -// -// * The encryption algorithm or signing algorithm specified for the operation -// is incompatible with the type of key material in the KMS key (KeySpec). -// -// For encrypting, decrypting, re-encrypting, and generating data keys, the -// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying, the KeyUsage -// must be SIGN_VERIFY. To find the KeyUsage of a KMS key, use the DescribeKey -// operation. -// -// To find the encryption or signing algorithms supported for a particular KMS -// key, use the DescribeKey operation. -// -// * InvalidGrantTokenException -// The request was rejected because the specified grant token is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// * UnsupportedOperationException -// The request was rejected because a specified parameter is not supported or -// a specified resource is not valid for this operation. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyPairWithoutPlaintext -func (c *KMS) GenerateDataKeyPairWithoutPlaintext(input *GenerateDataKeyPairWithoutPlaintextInput) (*GenerateDataKeyPairWithoutPlaintextOutput, error) { - req, out := c.GenerateDataKeyPairWithoutPlaintextRequest(input) - return out, req.Send() -} - -// GenerateDataKeyPairWithoutPlaintextWithContext is the same as GenerateDataKeyPairWithoutPlaintext with the addition of -// the ability to pass a context and additional request options. -// -// See GenerateDataKeyPairWithoutPlaintext for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) GenerateDataKeyPairWithoutPlaintextWithContext(ctx aws.Context, input *GenerateDataKeyPairWithoutPlaintextInput, opts ...request.Option) (*GenerateDataKeyPairWithoutPlaintextOutput, error) { - req, out := c.GenerateDataKeyPairWithoutPlaintextRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGenerateDataKeyWithoutPlaintext = "GenerateDataKeyWithoutPlaintext" - -// GenerateDataKeyWithoutPlaintextRequest generates a "aws/request.Request" representing the -// client's request for the GenerateDataKeyWithoutPlaintext operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GenerateDataKeyWithoutPlaintext for more information on using the GenerateDataKeyWithoutPlaintext -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GenerateDataKeyWithoutPlaintextRequest method. -// req, resp := client.GenerateDataKeyWithoutPlaintextRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyWithoutPlaintext -func (c *KMS) GenerateDataKeyWithoutPlaintextRequest(input *GenerateDataKeyWithoutPlaintextInput) (req *request.Request, output *GenerateDataKeyWithoutPlaintextOutput) { - op := &request.Operation{ - Name: opGenerateDataKeyWithoutPlaintext, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GenerateDataKeyWithoutPlaintextInput{} - } - - output = &GenerateDataKeyWithoutPlaintextOutput{} - req = c.newRequest(op, input, output) - return -} - -// GenerateDataKeyWithoutPlaintext API operation for AWS Key Management Service. -// -// Generates a unique symmetric data key. This operation returns a data key -// that is encrypted under a KMS key that you specify. To request an asymmetric -// data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext -// operations. -// -// GenerateDataKeyWithoutPlaintext is identical to the GenerateDataKey operation -// except that returns only the encrypted copy of the data key. This operation -// is useful for systems that need to encrypt data at some point, but not immediately. -// When you need to encrypt the data, you call the Decrypt operation on the -// encrypted copy of the key. -// -// It's also useful in distributed systems with different levels of trust. For -// example, you might store encrypted data in containers. One component of your -// system creates new containers and stores an encrypted data key with each -// container. Then, a different component puts the data into the containers. -// That component first decrypts the data key, uses the plaintext data key to -// encrypt data, puts the encrypted data into the container, and then destroys -// the plaintext data key. In this system, the component that creates the containers -// never sees the plaintext data key. -// -// GenerateDataKeyWithoutPlaintext returns a unique data key for each request. -// The bytes in the keys are not related to the caller or KMS key that is used -// to encrypt the private key. -// -// To generate a data key, you must specify the symmetric KMS key that is used -// to encrypt the data key. You cannot use an asymmetric KMS key to generate -// a data key. To get the type of your KMS key, use the DescribeKey operation. -// -// If the operation succeeds, you will find the encrypted copy of the data key -// in the CiphertextBlob field. -// -// You can use the optional encryption context to add additional security to -// the encryption operation. If you specify an EncryptionContext, you must specify -// the same encryption context (a case-sensitive exact match) when decrypting -// the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. -// For more information, see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) -// in the Key Management Service Developer Guide. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: Yes. To perform this operation with a KMS key in a different -// Amazon Web Services account, specify the key ARN or alias ARN in the value -// of the KeyId parameter. -// -// Required permissions: kms:GenerateDataKeyWithoutPlaintext (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * Decrypt -// -// * Encrypt -// -// * GenerateDataKey -// -// * GenerateDataKeyPair -// -// * GenerateDataKeyPairWithoutPlaintext -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation GenerateDataKeyWithoutPlaintext for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * KeyUnavailableException -// The request was rejected because the specified KMS key was not available. -// You can retry the request. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InvalidKeyUsageException -// The request was rejected for one of the following reasons: -// -// * The KeyUsage value of the KMS key is incompatible with the API operation. -// -// * The encryption algorithm or signing algorithm specified for the operation -// is incompatible with the type of key material in the KMS key (KeySpec). -// -// For encrypting, decrypting, re-encrypting, and generating data keys, the -// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying, the KeyUsage -// must be SIGN_VERIFY. To find the KeyUsage of a KMS key, use the DescribeKey -// operation. -// -// To find the encryption or signing algorithms supported for a particular KMS -// key, use the DescribeKey operation. -// -// * InvalidGrantTokenException -// The request was rejected because the specified grant token is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyWithoutPlaintext -func (c *KMS) GenerateDataKeyWithoutPlaintext(input *GenerateDataKeyWithoutPlaintextInput) (*GenerateDataKeyWithoutPlaintextOutput, error) { - req, out := c.GenerateDataKeyWithoutPlaintextRequest(input) - return out, req.Send() -} - -// GenerateDataKeyWithoutPlaintextWithContext is the same as GenerateDataKeyWithoutPlaintext with the addition of -// the ability to pass a context and additional request options. -// -// See GenerateDataKeyWithoutPlaintext for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) GenerateDataKeyWithoutPlaintextWithContext(ctx aws.Context, input *GenerateDataKeyWithoutPlaintextInput, opts ...request.Option) (*GenerateDataKeyWithoutPlaintextOutput, error) { - req, out := c.GenerateDataKeyWithoutPlaintextRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGenerateRandom = "GenerateRandom" - -// GenerateRandomRequest generates a "aws/request.Request" representing the -// client's request for the GenerateRandom operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GenerateRandom for more information on using the GenerateRandom -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GenerateRandomRequest method. -// req, resp := client.GenerateRandomRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateRandom -func (c *KMS) GenerateRandomRequest(input *GenerateRandomInput) (req *request.Request, output *GenerateRandomOutput) { - op := &request.Operation{ - Name: opGenerateRandom, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GenerateRandomInput{} - } - - output = &GenerateRandomOutput{} - req = c.newRequest(op, input, output) - return -} - -// GenerateRandom API operation for AWS Key Management Service. -// -// Returns a random byte string that is cryptographically secure. -// -// By default, the random byte string is generated in KMS. To generate the byte -// string in the CloudHSM cluster that is associated with a custom key store -// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html), -// specify the custom key store ID. -// -// Applications in Amazon Web Services Nitro Enclaves can call this operation -// by using the Amazon Web Services Nitro Enclaves Development Kit (https://github.com/aws/aws-nitro-enclaves-sdk-c). -// For information about the supporting parameters, see How Amazon Web Services -// Nitro Enclaves use KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html) -// in the Key Management Service Developer Guide. -// -// For more information about entropy and random number generation, see Key -// Management Service Cryptographic Details (https://docs.aws.amazon.com/kms/latest/cryptographic-details/). -// -// Required permissions: kms:GenerateRandom (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (IAM policy) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation GenerateRandom for usage and error information. -// -// Returned Error Types: -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * CustomKeyStoreNotFoundException -// The request was rejected because KMS cannot find a custom key store with -// the specified key store name or ID. -// -// * CustomKeyStoreInvalidStateException -// The request was rejected because of the ConnectionState of the custom key -// store. To get the ConnectionState of a custom key store, use the DescribeCustomKeyStores -// operation. -// -// This exception is thrown under the following conditions: -// -// * You requested the CreateKey or GenerateRandom operation in a custom -// key store that is not connected. These operations are valid only when -// the custom key store ConnectionState is CONNECTED. -// -// * You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation -// on a custom key store that is not disconnected. This operation is valid -// only when the custom key store ConnectionState is DISCONNECTED. -// -// * You requested the ConnectCustomKeyStore operation on a custom key store -// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid -// for all other ConnectionState values. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateRandom -func (c *KMS) GenerateRandom(input *GenerateRandomInput) (*GenerateRandomOutput, error) { - req, out := c.GenerateRandomRequest(input) - return out, req.Send() -} - -// GenerateRandomWithContext is the same as GenerateRandom with the addition of -// the ability to pass a context and additional request options. -// -// See GenerateRandom for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) GenerateRandomWithContext(ctx aws.Context, input *GenerateRandomInput, opts ...request.Option) (*GenerateRandomOutput, error) { - req, out := c.GenerateRandomRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetKeyPolicy = "GetKeyPolicy" - -// GetKeyPolicyRequest generates a "aws/request.Request" representing the -// client's request for the GetKeyPolicy operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetKeyPolicy for more information on using the GetKeyPolicy -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetKeyPolicyRequest method. -// req, resp := client.GetKeyPolicyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetKeyPolicy -func (c *KMS) GetKeyPolicyRequest(input *GetKeyPolicyInput) (req *request.Request, output *GetKeyPolicyOutput) { - op := &request.Operation{ - Name: opGetKeyPolicy, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetKeyPolicyInput{} - } - - output = &GetKeyPolicyOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetKeyPolicy API operation for AWS Key Management Service. -// -// Gets a key policy attached to the specified KMS key. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:GetKeyPolicy (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: PutKeyPolicy -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation GetKeyPolicy for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetKeyPolicy -func (c *KMS) GetKeyPolicy(input *GetKeyPolicyInput) (*GetKeyPolicyOutput, error) { - req, out := c.GetKeyPolicyRequest(input) - return out, req.Send() -} - -// GetKeyPolicyWithContext is the same as GetKeyPolicy with the addition of -// the ability to pass a context and additional request options. -// -// See GetKeyPolicy for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) GetKeyPolicyWithContext(ctx aws.Context, input *GetKeyPolicyInput, opts ...request.Option) (*GetKeyPolicyOutput, error) { - req, out := c.GetKeyPolicyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetKeyRotationStatus = "GetKeyRotationStatus" - -// GetKeyRotationStatusRequest generates a "aws/request.Request" representing the -// client's request for the GetKeyRotationStatus operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetKeyRotationStatus for more information on using the GetKeyRotationStatus -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetKeyRotationStatusRequest method. -// req, resp := client.GetKeyRotationStatusRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetKeyRotationStatus -func (c *KMS) GetKeyRotationStatusRequest(input *GetKeyRotationStatusInput) (req *request.Request, output *GetKeyRotationStatusOutput) { - op := &request.Operation{ - Name: opGetKeyRotationStatus, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetKeyRotationStatusInput{} - } - - output = &GetKeyRotationStatusOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetKeyRotationStatus API operation for AWS Key Management Service. -// -// Gets a Boolean value that indicates whether automatic rotation of the key -// material (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html) -// is enabled for the specified KMS key. -// -// You cannot enable automatic rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#asymmetric-cmks), -// KMS keys with imported key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html), -// or KMS keys in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). -// To enable or disable automatic rotation of a set of related multi-Region -// keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html#mrk-replica-key), -// set the property on the primary key. The key rotation status for these KMS -// keys is always false. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// * Disabled: The key rotation status does not change when you disable a -// KMS key. However, while the KMS key is disabled, KMS does not rotate the -// key material. -// -// * Pending deletion: While a KMS key is pending deletion, its key rotation -// status is false and KMS does not rotate the key material. If you cancel -// the deletion, the original key rotation status is restored. -// -// Cross-account use: Yes. To perform this operation on a KMS key in a different -// Amazon Web Services account, specify the key ARN in the value of the KeyId -// parameter. -// -// Required permissions: kms:GetKeyRotationStatus (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * DisableKeyRotation -// -// * EnableKeyRotation -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation GetKeyRotationStatus for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// * UnsupportedOperationException -// The request was rejected because a specified parameter is not supported or -// a specified resource is not valid for this operation. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetKeyRotationStatus -func (c *KMS) GetKeyRotationStatus(input *GetKeyRotationStatusInput) (*GetKeyRotationStatusOutput, error) { - req, out := c.GetKeyRotationStatusRequest(input) - return out, req.Send() -} - -// GetKeyRotationStatusWithContext is the same as GetKeyRotationStatus with the addition of -// the ability to pass a context and additional request options. -// -// See GetKeyRotationStatus for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) GetKeyRotationStatusWithContext(ctx aws.Context, input *GetKeyRotationStatusInput, opts ...request.Option) (*GetKeyRotationStatusOutput, error) { - req, out := c.GetKeyRotationStatusRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetParametersForImport = "GetParametersForImport" - -// GetParametersForImportRequest generates a "aws/request.Request" representing the -// client's request for the GetParametersForImport operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetParametersForImport for more information on using the GetParametersForImport -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetParametersForImportRequest method. -// req, resp := client.GetParametersForImportRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetParametersForImport -func (c *KMS) GetParametersForImportRequest(input *GetParametersForImportInput) (req *request.Request, output *GetParametersForImportOutput) { - op := &request.Operation{ - Name: opGetParametersForImport, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetParametersForImportInput{} - } - - output = &GetParametersForImportOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetParametersForImport API operation for AWS Key Management Service. -// -// Returns the items you need to import key material into a symmetric, customer -// managed KMS key. For more information about importing key material into KMS, -// see Importing Key Material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) -// in the Key Management Service Developer Guide. -// -// This operation returns a public key and an import token. Use the public key -// to encrypt the symmetric key material. Store the import token to send with -// a subsequent ImportKeyMaterial request. -// -// You must specify the key ID of the symmetric KMS key into which you will -// import key material. This KMS key's Origin must be EXTERNAL. You must also -// specify the wrapping algorithm and type of wrapping key (public key) that -// you will use to encrypt the key material. You cannot perform this operation -// on an asymmetric KMS key or on any KMS key in a different Amazon Web Services -// account. -// -// To import key material, you must use the public key and import token from -// the same response. These items are valid for 24 hours. The expiration date -// and time appear in the GetParametersForImport response. You cannot use an -// expired token in an ImportKeyMaterial request. If your key and token expire, -// send another GetParametersForImport request. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:GetParametersForImport (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * ImportKeyMaterial -// -// * DeleteImportedKeyMaterial -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation GetParametersForImport for usage and error information. -// -// Returned Error Types: -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * UnsupportedOperationException -// The request was rejected because a specified parameter is not supported or -// a specified resource is not valid for this operation. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetParametersForImport -func (c *KMS) GetParametersForImport(input *GetParametersForImportInput) (*GetParametersForImportOutput, error) { - req, out := c.GetParametersForImportRequest(input) - return out, req.Send() -} - -// GetParametersForImportWithContext is the same as GetParametersForImport with the addition of -// the ability to pass a context and additional request options. -// -// See GetParametersForImport for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) GetParametersForImportWithContext(ctx aws.Context, input *GetParametersForImportInput, opts ...request.Option) (*GetParametersForImportOutput, error) { - req, out := c.GetParametersForImportRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetPublicKey = "GetPublicKey" - -// GetPublicKeyRequest generates a "aws/request.Request" representing the -// client's request for the GetPublicKey operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetPublicKey for more information on using the GetPublicKey -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetPublicKeyRequest method. -// req, resp := client.GetPublicKeyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetPublicKey -func (c *KMS) GetPublicKeyRequest(input *GetPublicKeyInput) (req *request.Request, output *GetPublicKeyOutput) { - op := &request.Operation{ - Name: opGetPublicKey, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetPublicKeyInput{} - } - - output = &GetPublicKeyOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetPublicKey API operation for AWS Key Management Service. -// -// Returns the public key of an asymmetric KMS key. Unlike the private key of -// a asymmetric KMS key, which never leaves KMS unencrypted, callers with kms:GetPublicKey -// permission can download the public key of an asymmetric KMS key. You can -// share the public key to allow others to encrypt messages and verify signatures -// outside of KMS. For information about symmetric and asymmetric KMS keys, -// see Using Symmetric and Asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) -// in the Key Management Service Developer Guide. -// -// You do not need to download the public key. Instead, you can use the public -// key within KMS by calling the Encrypt, ReEncrypt, or Verify operations with -// the identifier of an asymmetric KMS key. When you use the public key within -// KMS, you benefit from the authentication, authorization, and logging that -// are part of every KMS operation. You also reduce of risk of encrypting data -// that cannot be decrypted. These features are not effective outside of KMS. -// For details, see Special Considerations for Downloading Public Keys (https://docs.aws.amazon.com/kms/latest/developerguide/download-public-key.html#download-public-key-considerations). -// -// To help you use the public key safely outside of KMS, GetPublicKey returns -// important information about the public key in the response, including: -// -// * KeySpec (https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-KeySpec): -// The type of key material in the public key, such as RSA_4096 or ECC_NIST_P521. -// -// * KeyUsage (https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-KeyUsage): -// Whether the key is used for encryption or signing. -// -// * EncryptionAlgorithms (https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-EncryptionAlgorithms) -// or SigningAlgorithms (https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-SigningAlgorithms): -// A list of the encryption algorithms or the signing algorithms for the -// key. -// -// Although KMS cannot enforce these restrictions on external operations, it -// is crucial that you use this information to prevent the public key from being -// used improperly. For example, you can prevent a public signing key from being -// used encrypt data, or prevent a public key from being used with an encryption -// algorithm that is not supported by KMS. You can also avoid errors, such as -// using the wrong signing algorithm in a verification operation. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: Yes. To perform this operation with a KMS key in a different -// Amazon Web Services account, specify the key ARN or alias ARN in the value -// of the KeyId parameter. -// -// Required permissions: kms:GetPublicKey (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: CreateKey -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation GetPublicKey for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * KeyUnavailableException -// The request was rejected because the specified KMS key was not available. -// You can retry the request. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * UnsupportedOperationException -// The request was rejected because a specified parameter is not supported or -// a specified resource is not valid for this operation. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * InvalidGrantTokenException -// The request was rejected because the specified grant token is not valid. -// -// * InvalidKeyUsageException -// The request was rejected for one of the following reasons: -// -// * The KeyUsage value of the KMS key is incompatible with the API operation. -// -// * The encryption algorithm or signing algorithm specified for the operation -// is incompatible with the type of key material in the KMS key (KeySpec). -// -// For encrypting, decrypting, re-encrypting, and generating data keys, the -// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying, the KeyUsage -// must be SIGN_VERIFY. To find the KeyUsage of a KMS key, use the DescribeKey -// operation. -// -// To find the encryption or signing algorithms supported for a particular KMS -// key, use the DescribeKey operation. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetPublicKey -func (c *KMS) GetPublicKey(input *GetPublicKeyInput) (*GetPublicKeyOutput, error) { - req, out := c.GetPublicKeyRequest(input) - return out, req.Send() -} - -// GetPublicKeyWithContext is the same as GetPublicKey with the addition of -// the ability to pass a context and additional request options. -// -// See GetPublicKey for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) GetPublicKeyWithContext(ctx aws.Context, input *GetPublicKeyInput, opts ...request.Option) (*GetPublicKeyOutput, error) { - req, out := c.GetPublicKeyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opImportKeyMaterial = "ImportKeyMaterial" - -// ImportKeyMaterialRequest generates a "aws/request.Request" representing the -// client's request for the ImportKeyMaterial operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ImportKeyMaterial for more information on using the ImportKeyMaterial -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ImportKeyMaterialRequest method. -// req, resp := client.ImportKeyMaterialRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ImportKeyMaterial -func (c *KMS) ImportKeyMaterialRequest(input *ImportKeyMaterialInput) (req *request.Request, output *ImportKeyMaterialOutput) { - op := &request.Operation{ - Name: opImportKeyMaterial, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ImportKeyMaterialInput{} - } - - output = &ImportKeyMaterialOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// ImportKeyMaterial API operation for AWS Key Management Service. -// -// Imports key material into an existing symmetric KMS KMS key that was created -// without key material. After you successfully import key material into a KMS -// key, you can reimport the same key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html#reimport-key-material) -// into that KMS key, but you cannot import different key material. -// -// You cannot perform this operation on an asymmetric KMS key or on any KMS -// key in a different Amazon Web Services account. For more information about -// creating KMS keys with no key material and then importing key material, see -// Importing Key Material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) -// in the Key Management Service Developer Guide. -// -// Before using this operation, call GetParametersForImport. Its response includes -// a public key and an import token. Use the public key to encrypt the key material. -// Then, submit the import token from the same GetParametersForImport response. -// -// When calling this operation, you must specify the following values: -// -// * The key ID or key ARN of a KMS key with no key material. Its Origin -// must be EXTERNAL. To create a KMS key with no key material, call CreateKey -// and set the value of its Origin parameter to EXTERNAL. To get the Origin -// of a KMS key, call DescribeKey.) -// -// * The encrypted key material. To get the public key to encrypt the key -// material, call GetParametersForImport. -// -// * The import token that GetParametersForImport returned. You must use -// a public key and token from the same GetParametersForImport response. -// -// * Whether the key material expires and if so, when. If you set an expiration -// date, KMS deletes the key material from the KMS key on the specified date, -// and the KMS key becomes unusable. To use the KMS key again, you must reimport -// the same key material. The only way to change an expiration date is by -// reimporting the same key material and specifying a new expiration date. -// -// When this operation is successful, the key state of the KMS key changes from -// PendingImport to Enabled, and you can use the KMS key. -// -// If this operation fails, use the exception to help determine the problem. -// If the error is related to the key material, the import token, or wrapping -// key, use GetParametersForImport to get a new public key and import token -// for the KMS key and repeat the import procedure. For help, see How To Import -// Key Material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html#importing-keys-overview) -// in the Key Management Service Developer Guide. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:ImportKeyMaterial (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * DeleteImportedKeyMaterial -// -// * GetParametersForImport -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation ImportKeyMaterial for usage and error information. -// -// Returned Error Types: -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * UnsupportedOperationException -// The request was rejected because a specified parameter is not supported or -// a specified resource is not valid for this operation. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// * InvalidCiphertextException -// From the Decrypt or ReEncrypt operation, the request was rejected because -// the specified ciphertext, or additional authenticated data incorporated into -// the ciphertext, such as the encryption context, is corrupted, missing, or -// otherwise invalid. -// -// From the ImportKeyMaterial operation, the request was rejected because KMS -// could not decrypt the encrypted (wrapped) key material. -// -// * IncorrectKeyMaterialException -// The request was rejected because the key material in the request is, expired, -// invalid, or is not the same key material that was previously imported into -// this KMS key. -// -// * ExpiredImportTokenException -// The request was rejected because the specified import token is expired. Use -// GetParametersForImport to get a new import token and public key, use the -// new public key to encrypt the key material, and then try the request again. -// -// * InvalidImportTokenException -// The request was rejected because the provided import token is invalid or -// is associated with a different KMS key. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ImportKeyMaterial -func (c *KMS) ImportKeyMaterial(input *ImportKeyMaterialInput) (*ImportKeyMaterialOutput, error) { - req, out := c.ImportKeyMaterialRequest(input) - return out, req.Send() -} - -// ImportKeyMaterialWithContext is the same as ImportKeyMaterial with the addition of -// the ability to pass a context and additional request options. -// -// See ImportKeyMaterial for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) ImportKeyMaterialWithContext(ctx aws.Context, input *ImportKeyMaterialInput, opts ...request.Option) (*ImportKeyMaterialOutput, error) { - req, out := c.ImportKeyMaterialRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListAliases = "ListAliases" - -// ListAliasesRequest generates a "aws/request.Request" representing the -// client's request for the ListAliases operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListAliases for more information on using the ListAliases -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListAliasesRequest method. -// req, resp := client.ListAliasesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListAliases -func (c *KMS) ListAliasesRequest(input *ListAliasesInput) (req *request.Request, output *ListAliasesOutput) { - op := &request.Operation{ - Name: opListAliases, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"Marker"}, - OutputTokens: []string{"NextMarker"}, - LimitToken: "Limit", - TruncationToken: "Truncated", - }, - } - - if input == nil { - input = &ListAliasesInput{} - } - - output = &ListAliasesOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListAliases API operation for AWS Key Management Service. -// -// Gets a list of aliases in the caller's Amazon Web Services account and region. -// For more information about aliases, see CreateAlias. -// -// By default, the ListAliases operation returns all aliases in the account -// and region. To get only the aliases associated with a particular KMS key, -// use the KeyId parameter. -// -// The ListAliases response can include aliases that you created and associated -// with your customer managed keys, and aliases that Amazon Web Services created -// and associated with Amazon Web Services managed keys in your account. You -// can recognize Amazon Web Services aliases because their names have the format -// aws/, such as aws/dynamodb. -// -// The response might also include aliases that have no TargetKeyId field. These -// are predefined aliases that Amazon Web Services has created but has not yet -// associated with a KMS key. Aliases that Amazon Web Services creates in your -// account, including predefined aliases, do not count against your KMS aliases -// quota (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html#aliases-limit). -// -// Cross-account use: No. ListAliases does not return aliases in other Amazon -// Web Services accounts. -// -// Required permissions: kms:ListAliases (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (IAM policy) -// -// For details, see Controlling access to aliases (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access) -// in the Key Management Service Developer Guide. -// -// Related operations: -// -// * CreateAlias -// -// * DeleteAlias -// -// * UpdateAlias -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation ListAliases for usage and error information. -// -// Returned Error Types: -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InvalidMarkerException -// The request was rejected because the marker that specifies where pagination -// should next begin is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListAliases -func (c *KMS) ListAliases(input *ListAliasesInput) (*ListAliasesOutput, error) { - req, out := c.ListAliasesRequest(input) - return out, req.Send() -} - -// ListAliasesWithContext is the same as ListAliases with the addition of -// the ability to pass a context and additional request options. -// -// See ListAliases for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) ListAliasesWithContext(ctx aws.Context, input *ListAliasesInput, opts ...request.Option) (*ListAliasesOutput, error) { - req, out := c.ListAliasesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListAliasesPages iterates over the pages of a ListAliases operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListAliases method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListAliases operation. -// pageNum := 0 -// err := client.ListAliasesPages(params, -// func(page *kms.ListAliasesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *KMS) ListAliasesPages(input *ListAliasesInput, fn func(*ListAliasesOutput, bool) bool) error { - return c.ListAliasesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListAliasesPagesWithContext same as ListAliasesPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) ListAliasesPagesWithContext(ctx aws.Context, input *ListAliasesInput, fn func(*ListAliasesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListAliasesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListAliasesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListAliasesOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListGrants = "ListGrants" - -// ListGrantsRequest generates a "aws/request.Request" representing the -// client's request for the ListGrants operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListGrants for more information on using the ListGrants -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListGrantsRequest method. -// req, resp := client.ListGrantsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListGrants -func (c *KMS) ListGrantsRequest(input *ListGrantsInput) (req *request.Request, output *ListGrantsResponse) { - op := &request.Operation{ - Name: opListGrants, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"Marker"}, - OutputTokens: []string{"NextMarker"}, - LimitToken: "Limit", - TruncationToken: "Truncated", - }, - } - - if input == nil { - input = &ListGrantsInput{} - } - - output = &ListGrantsResponse{} - req = c.newRequest(op, input, output) - return -} - -// ListGrants API operation for AWS Key Management Service. -// -// Gets a list of all grants for the specified KMS key. -// -// You must specify the KMS key in all requests. You can filter the grant list -// by grant ID or grantee principal. -// -// For detailed information about grants, including grant terminology, see Using -// grants (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) -// in the Key Management Service Developer Guide . For examples of working with -// grants in several programming languages, see Programming grants (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html). -// -// The GranteePrincipal field in the ListGrants response usually contains the -// user or role designated as the grantee principal in the grant. However, when -// the grantee principal in the grant is an Amazon Web Services service, the -// GranteePrincipal field contains the service principal (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-services), -// which might represent several different grantee principals. -// -// Cross-account use: Yes. To perform this operation on a KMS key in a different -// Amazon Web Services account, specify the key ARN in the value of the KeyId -// parameter. -// -// Required permissions: kms:ListGrants (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * CreateGrant -// -// * ListRetirableGrants -// -// * RetireGrant -// -// * RevokeGrant -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation ListGrants for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InvalidMarkerException -// The request was rejected because the marker that specifies where pagination -// should next begin is not valid. -// -// * InvalidGrantIdException -// The request was rejected because the specified GrantId is not valid. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListGrants -func (c *KMS) ListGrants(input *ListGrantsInput) (*ListGrantsResponse, error) { - req, out := c.ListGrantsRequest(input) - return out, req.Send() -} - -// ListGrantsWithContext is the same as ListGrants with the addition of -// the ability to pass a context and additional request options. -// -// See ListGrants for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) ListGrantsWithContext(ctx aws.Context, input *ListGrantsInput, opts ...request.Option) (*ListGrantsResponse, error) { - req, out := c.ListGrantsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListGrantsPages iterates over the pages of a ListGrants operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListGrants method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListGrants operation. -// pageNum := 0 -// err := client.ListGrantsPages(params, -// func(page *kms.ListGrantsResponse, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *KMS) ListGrantsPages(input *ListGrantsInput, fn func(*ListGrantsResponse, bool) bool) error { - return c.ListGrantsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListGrantsPagesWithContext same as ListGrantsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) ListGrantsPagesWithContext(ctx aws.Context, input *ListGrantsInput, fn func(*ListGrantsResponse, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListGrantsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListGrantsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListGrantsResponse), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListKeyPolicies = "ListKeyPolicies" - -// ListKeyPoliciesRequest generates a "aws/request.Request" representing the -// client's request for the ListKeyPolicies operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListKeyPolicies for more information on using the ListKeyPolicies -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListKeyPoliciesRequest method. -// req, resp := client.ListKeyPoliciesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListKeyPolicies -func (c *KMS) ListKeyPoliciesRequest(input *ListKeyPoliciesInput) (req *request.Request, output *ListKeyPoliciesOutput) { - op := &request.Operation{ - Name: opListKeyPolicies, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"Marker"}, - OutputTokens: []string{"NextMarker"}, - LimitToken: "Limit", - TruncationToken: "Truncated", - }, - } - - if input == nil { - input = &ListKeyPoliciesInput{} - } - - output = &ListKeyPoliciesOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListKeyPolicies API operation for AWS Key Management Service. -// -// Gets the names of the key policies that are attached to a KMS key. This operation -// is designed to get policy names that you can use in a GetKeyPolicy operation. -// However, the only valid policy name is default. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:ListKeyPolicies (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * GetKeyPolicy -// -// * PutKeyPolicy -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation ListKeyPolicies for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListKeyPolicies -func (c *KMS) ListKeyPolicies(input *ListKeyPoliciesInput) (*ListKeyPoliciesOutput, error) { - req, out := c.ListKeyPoliciesRequest(input) - return out, req.Send() -} - -// ListKeyPoliciesWithContext is the same as ListKeyPolicies with the addition of -// the ability to pass a context and additional request options. -// -// See ListKeyPolicies for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) ListKeyPoliciesWithContext(ctx aws.Context, input *ListKeyPoliciesInput, opts ...request.Option) (*ListKeyPoliciesOutput, error) { - req, out := c.ListKeyPoliciesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListKeyPoliciesPages iterates over the pages of a ListKeyPolicies operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListKeyPolicies method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListKeyPolicies operation. -// pageNum := 0 -// err := client.ListKeyPoliciesPages(params, -// func(page *kms.ListKeyPoliciesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *KMS) ListKeyPoliciesPages(input *ListKeyPoliciesInput, fn func(*ListKeyPoliciesOutput, bool) bool) error { - return c.ListKeyPoliciesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListKeyPoliciesPagesWithContext same as ListKeyPoliciesPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) ListKeyPoliciesPagesWithContext(ctx aws.Context, input *ListKeyPoliciesInput, fn func(*ListKeyPoliciesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListKeyPoliciesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListKeyPoliciesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListKeyPoliciesOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListKeys = "ListKeys" - -// ListKeysRequest generates a "aws/request.Request" representing the -// client's request for the ListKeys operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListKeys for more information on using the ListKeys -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListKeysRequest method. -// req, resp := client.ListKeysRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListKeys -func (c *KMS) ListKeysRequest(input *ListKeysInput) (req *request.Request, output *ListKeysOutput) { - op := &request.Operation{ - Name: opListKeys, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"Marker"}, - OutputTokens: []string{"NextMarker"}, - LimitToken: "Limit", - TruncationToken: "Truncated", - }, - } - - if input == nil { - input = &ListKeysInput{} - } - - output = &ListKeysOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListKeys API operation for AWS Key Management Service. -// -// Gets a list of all KMS keys in the caller's Amazon Web Services account and -// Region. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:ListKeys (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (IAM policy) -// -// Related operations: -// -// * CreateKey -// -// * DescribeKey -// -// * ListAliases -// -// * ListResourceTags -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation ListKeys for usage and error information. -// -// Returned Error Types: -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidMarkerException -// The request was rejected because the marker that specifies where pagination -// should next begin is not valid. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListKeys -func (c *KMS) ListKeys(input *ListKeysInput) (*ListKeysOutput, error) { - req, out := c.ListKeysRequest(input) - return out, req.Send() -} - -// ListKeysWithContext is the same as ListKeys with the addition of -// the ability to pass a context and additional request options. -// -// See ListKeys for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) ListKeysWithContext(ctx aws.Context, input *ListKeysInput, opts ...request.Option) (*ListKeysOutput, error) { - req, out := c.ListKeysRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListKeysPages iterates over the pages of a ListKeys operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListKeys method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListKeys operation. -// pageNum := 0 -// err := client.ListKeysPages(params, -// func(page *kms.ListKeysOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *KMS) ListKeysPages(input *ListKeysInput, fn func(*ListKeysOutput, bool) bool) error { - return c.ListKeysPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListKeysPagesWithContext same as ListKeysPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) ListKeysPagesWithContext(ctx aws.Context, input *ListKeysInput, fn func(*ListKeysOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListKeysInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListKeysRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListKeysOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListResourceTags = "ListResourceTags" - -// ListResourceTagsRequest generates a "aws/request.Request" representing the -// client's request for the ListResourceTags operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListResourceTags for more information on using the ListResourceTags -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListResourceTagsRequest method. -// req, resp := client.ListResourceTagsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListResourceTags -func (c *KMS) ListResourceTagsRequest(input *ListResourceTagsInput) (req *request.Request, output *ListResourceTagsOutput) { - op := &request.Operation{ - Name: opListResourceTags, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ListResourceTagsInput{} - } - - output = &ListResourceTagsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListResourceTags API operation for AWS Key Management Service. -// -// Returns all tags on the specified KMS key. -// -// For general information about tags, including the format and syntax, see -// Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) -// in the Amazon Web Services General Reference. For information about using -// tags in KMS, see Tagging keys (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html). -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:ListResourceTags (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * CreateKey -// -// * ReplicateKey -// -// * TagResource -// -// * UntagResource -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation ListResourceTags for usage and error information. -// -// Returned Error Types: -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * InvalidMarkerException -// The request was rejected because the marker that specifies where pagination -// should next begin is not valid. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListResourceTags -func (c *KMS) ListResourceTags(input *ListResourceTagsInput) (*ListResourceTagsOutput, error) { - req, out := c.ListResourceTagsRequest(input) - return out, req.Send() -} - -// ListResourceTagsWithContext is the same as ListResourceTags with the addition of -// the ability to pass a context and additional request options. -// -// See ListResourceTags for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) ListResourceTagsWithContext(ctx aws.Context, input *ListResourceTagsInput, opts ...request.Option) (*ListResourceTagsOutput, error) { - req, out := c.ListResourceTagsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListRetirableGrants = "ListRetirableGrants" - -// ListRetirableGrantsRequest generates a "aws/request.Request" representing the -// client's request for the ListRetirableGrants operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListRetirableGrants for more information on using the ListRetirableGrants -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListRetirableGrantsRequest method. -// req, resp := client.ListRetirableGrantsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListRetirableGrants -func (c *KMS) ListRetirableGrantsRequest(input *ListRetirableGrantsInput) (req *request.Request, output *ListGrantsResponse) { - op := &request.Operation{ - Name: opListRetirableGrants, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ListRetirableGrantsInput{} - } - - output = &ListGrantsResponse{} - req = c.newRequest(op, input, output) - return -} - -// ListRetirableGrants API operation for AWS Key Management Service. -// -// Returns information about all grants in the Amazon Web Services account and -// Region that have the specified retiring principal. -// -// You can specify any principal in your Amazon Web Services account. The grants -// that are returned include grants for KMS keys in your Amazon Web Services -// account and other Amazon Web Services accounts. You might use this operation -// to determine which grants you may retire. To retire a grant, use the RetireGrant -// operation. -// -// For detailed information about grants, including grant terminology, see Using -// grants (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) -// in the Key Management Service Developer Guide . For examples of working with -// grants in several programming languages, see Programming grants (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html). -// -// Cross-account use: You must specify a principal in your Amazon Web Services -// account. However, this operation can return grants in any Amazon Web Services -// account. You do not need kms:ListRetirableGrants permission (or any other -// additional permission) in any Amazon Web Services account other than your -// own. -// -// Required permissions: kms:ListRetirableGrants (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (IAM policy) in your Amazon Web Services account. -// -// Related operations: -// -// * CreateGrant -// -// * ListGrants -// -// * RetireGrant -// -// * RevokeGrant -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation ListRetirableGrants for usage and error information. -// -// Returned Error Types: -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InvalidMarkerException -// The request was rejected because the marker that specifies where pagination -// should next begin is not valid. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListRetirableGrants -func (c *KMS) ListRetirableGrants(input *ListRetirableGrantsInput) (*ListGrantsResponse, error) { - req, out := c.ListRetirableGrantsRequest(input) - return out, req.Send() -} - -// ListRetirableGrantsWithContext is the same as ListRetirableGrants with the addition of -// the ability to pass a context and additional request options. -// -// See ListRetirableGrants for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) ListRetirableGrantsWithContext(ctx aws.Context, input *ListRetirableGrantsInput, opts ...request.Option) (*ListGrantsResponse, error) { - req, out := c.ListRetirableGrantsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutKeyPolicy = "PutKeyPolicy" - -// PutKeyPolicyRequest generates a "aws/request.Request" representing the -// client's request for the PutKeyPolicy operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutKeyPolicy for more information on using the PutKeyPolicy -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutKeyPolicyRequest method. -// req, resp := client.PutKeyPolicyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/PutKeyPolicy -func (c *KMS) PutKeyPolicyRequest(input *PutKeyPolicyInput) (req *request.Request, output *PutKeyPolicyOutput) { - op := &request.Operation{ - Name: opPutKeyPolicy, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &PutKeyPolicyInput{} - } - - output = &PutKeyPolicyOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutKeyPolicy API operation for AWS Key Management Service. -// -// Attaches a key policy to the specified KMS key. -// -// For more information about key policies, see Key Policies (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) -// in the Key Management Service Developer Guide. For help writing and formatting -// a JSON policy document, see the IAM JSON Policy Reference (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html) -// in the Identity and Access Management User Guide . For examples of adding -// a key policy in multiple programming languages, see Setting a key policy -// (https://docs.aws.amazon.com/kms/latest/developerguide/programming-key-policies.html#put-policy) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:PutKeyPolicy (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: GetKeyPolicy -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation PutKeyPolicy for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * MalformedPolicyDocumentException -// The request was rejected because the specified policy is not syntactically -// or semantically correct. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * UnsupportedOperationException -// The request was rejected because a specified parameter is not supported or -// a specified resource is not valid for this operation. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * LimitExceededException -// The request was rejected because a quota was exceeded. For more information, -// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) -// in the Key Management Service Developer Guide. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/PutKeyPolicy -func (c *KMS) PutKeyPolicy(input *PutKeyPolicyInput) (*PutKeyPolicyOutput, error) { - req, out := c.PutKeyPolicyRequest(input) - return out, req.Send() -} - -// PutKeyPolicyWithContext is the same as PutKeyPolicy with the addition of -// the ability to pass a context and additional request options. -// -// See PutKeyPolicy for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) PutKeyPolicyWithContext(ctx aws.Context, input *PutKeyPolicyInput, opts ...request.Option) (*PutKeyPolicyOutput, error) { - req, out := c.PutKeyPolicyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opReEncrypt = "ReEncrypt" - -// ReEncryptRequest generates a "aws/request.Request" representing the -// client's request for the ReEncrypt operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ReEncrypt for more information on using the ReEncrypt -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ReEncryptRequest method. -// req, resp := client.ReEncryptRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ReEncrypt -func (c *KMS) ReEncryptRequest(input *ReEncryptInput) (req *request.Request, output *ReEncryptOutput) { - op := &request.Operation{ - Name: opReEncrypt, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ReEncryptInput{} - } - - output = &ReEncryptOutput{} - req = c.newRequest(op, input, output) - return -} - -// ReEncrypt API operation for AWS Key Management Service. -// -// Decrypts ciphertext and then reencrypts it entirely within KMS. You can use -// this operation to change the KMS key under which data is encrypted, such -// as when you manually rotate (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotate-keys-manually) -// a KMS key or change the KMS key that protects a ciphertext. You can also -// use it to reencrypt ciphertext under the same KMS key, such as to change -// the encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) -// of a ciphertext. -// -// The ReEncrypt operation can decrypt ciphertext that was encrypted by using -// an KMS KMS key in an KMS operation, such as Encrypt or GenerateDataKey. It -// can also decrypt ciphertext that was encrypted by using the public key of -// an asymmetric KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#asymmetric-cmks) -// outside of KMS. However, it cannot decrypt ciphertext produced by other libraries, -// such as the Amazon Web Services Encryption SDK (https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/) -// or Amazon S3 client-side encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html). -// These libraries return a ciphertext format that is incompatible with KMS. -// -// When you use the ReEncrypt operation, you need to provide information for -// the decrypt operation and the subsequent encrypt operation. -// -// * If your ciphertext was encrypted under an asymmetric KMS key, you must -// use the SourceKeyId parameter to identify the KMS key that encrypted the -// ciphertext. You must also supply the encryption algorithm that was used. -// This information is required to decrypt the data. -// -// * If your ciphertext was encrypted under a symmetric KMS key, the SourceKeyId -// parameter is optional. KMS can get this information from metadata that -// it adds to the symmetric ciphertext blob. This feature adds durability -// to your implementation by ensuring that authorized users can decrypt ciphertext -// decades after it was encrypted, even if they've lost track of the key -// ID. However, specifying the source KMS key is always recommended as a -// best practice. When you use the SourceKeyId parameter to specify a KMS -// key, KMS uses only the KMS key you specify. If the ciphertext was encrypted -// under a different KMS key, the ReEncrypt operation fails. This practice -// ensures that you use the KMS key that you intend. -// -// * To reencrypt the data, you must use the DestinationKeyId parameter specify -// the KMS key that re-encrypts the data after it is decrypted. You can select -// a symmetric or asymmetric KMS key. If the destination KMS key is an asymmetric -// KMS key, you must also provide the encryption algorithm. The algorithm -// that you choose must be compatible with the KMS key. When you use an asymmetric -// KMS key to encrypt or reencrypt data, be sure to record the KMS key and -// encryption algorithm that you choose. You will be required to provide -// the same KMS key and encryption algorithm when you decrypt the data. If -// the KMS key and algorithm do not match the values used to encrypt the -// data, the decrypt operation fails. You are not required to supply the -// key ID and encryption algorithm when you decrypt with symmetric KMS keys -// because KMS stores this information in the ciphertext blob. KMS cannot -// store metadata in ciphertext generated with asymmetric keys. The standard -// format for asymmetric key ciphertext does not include configurable fields. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: Yes. The source KMS key and destination KMS key can be -// in different Amazon Web Services accounts. Either or both KMS keys can be -// in a different account than the caller. To specify a KMS key in a different -// account, you must use its key ARN or alias ARN. -// -// Required permissions: -// -// * kms:ReEncryptFrom (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// permission on the source KMS key (key policy) -// -// * kms:ReEncryptTo (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// permission on the destination KMS key (key policy) -// -// To permit reencryption from or to a KMS key, include the "kms:ReEncrypt*" -// permission in your key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html). -// This permission is automatically included in the key policy when you use -// the console to create a KMS key. But you must include it manually when you -// create a KMS key programmatically or when you use the PutKeyPolicy operation -// to set a key policy. -// -// Related operations: -// -// * Decrypt -// -// * Encrypt -// -// * GenerateDataKey -// -// * GenerateDataKeyPair -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation ReEncrypt for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * InvalidCiphertextException -// From the Decrypt or ReEncrypt operation, the request was rejected because -// the specified ciphertext, or additional authenticated data incorporated into -// the ciphertext, such as the encryption context, is corrupted, missing, or -// otherwise invalid. -// -// From the ImportKeyMaterial operation, the request was rejected because KMS -// could not decrypt the encrypted (wrapped) key material. -// -// * KeyUnavailableException -// The request was rejected because the specified KMS key was not available. -// You can retry the request. -// -// * IncorrectKeyException -// The request was rejected because the specified KMS key cannot decrypt the -// data. The KeyId in a Decrypt request and the SourceKeyId in a ReEncrypt request -// must identify the same KMS key that was used to encrypt the ciphertext. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InvalidKeyUsageException -// The request was rejected for one of the following reasons: -// -// * The KeyUsage value of the KMS key is incompatible with the API operation. -// -// * The encryption algorithm or signing algorithm specified for the operation -// is incompatible with the type of key material in the KMS key (KeySpec). -// -// For encrypting, decrypting, re-encrypting, and generating data keys, the -// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying, the KeyUsage -// must be SIGN_VERIFY. To find the KeyUsage of a KMS key, use the DescribeKey -// operation. -// -// To find the encryption or signing algorithms supported for a particular KMS -// key, use the DescribeKey operation. -// -// * InvalidGrantTokenException -// The request was rejected because the specified grant token is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ReEncrypt -func (c *KMS) ReEncrypt(input *ReEncryptInput) (*ReEncryptOutput, error) { - req, out := c.ReEncryptRequest(input) - return out, req.Send() -} - -// ReEncryptWithContext is the same as ReEncrypt with the addition of -// the ability to pass a context and additional request options. -// -// See ReEncrypt for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) ReEncryptWithContext(ctx aws.Context, input *ReEncryptInput, opts ...request.Option) (*ReEncryptOutput, error) { - req, out := c.ReEncryptRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opReplicateKey = "ReplicateKey" - -// ReplicateKeyRequest generates a "aws/request.Request" representing the -// client's request for the ReplicateKey operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ReplicateKey for more information on using the ReplicateKey -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ReplicateKeyRequest method. -// req, resp := client.ReplicateKeyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ReplicateKey -func (c *KMS) ReplicateKeyRequest(input *ReplicateKeyInput) (req *request.Request, output *ReplicateKeyOutput) { - op := &request.Operation{ - Name: opReplicateKey, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ReplicateKeyInput{} - } - - output = &ReplicateKeyOutput{} - req = c.newRequest(op, input, output) - return -} - -// ReplicateKey API operation for AWS Key Management Service. -// -// Replicates a multi-Region key into the specified Region. This operation creates -// a multi-Region replica key based on a multi-Region primary key in a different -// Region of the same Amazon Web Services partition. You can create multiple -// replicas of a primary key, but each must be in a different Region. To create -// a multi-Region primary key, use the CreateKey operation. -// -// This operation supports multi-Region keys, an KMS feature that lets you create -// multiple interoperable KMS keys in different Amazon Web Services Regions. -// Because these KMS keys have the same key ID, key material, and other metadata, -// you can use them interchangeably to encrypt data in one Amazon Web Services -// Region and decrypt it in a different Amazon Web Services Region without re-encrypting -// the data or making a cross-Region call. For more information about multi-Region -// keys, see Using multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) -// in the Key Management Service Developer Guide. -// -// A replica key is a fully-functional KMS key that can be used independently -// of its primary and peer replica keys. A primary key and its replica keys -// share properties that make them interoperable. They have the same key ID -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-id) -// and key material. They also have the same key spec (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-spec), -// key usage (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-usage), -// key material origin (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-origin), -// and automatic key rotation status (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html). -// KMS automatically synchronizes these shared properties among related multi-Region -// keys. All other properties of a replica key can differ, including its key -// policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html), -// tags (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html), -// aliases (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html), -// and key state (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html). -// KMS pricing and quotas for KMS keys apply to each primary key and replica -// key. -// -// When this operation completes, the new replica key has a transient key state -// of Creating. This key state changes to Enabled (or PendingImport) after a -// few seconds when the process of creating the new replica key is complete. -// While the key state is Creating, you can manage key, but you cannot yet use -// it in cryptographic operations. If you are creating and using the replica -// key programmatically, retry on KMSInvalidStateException or call DescribeKey -// to check its KeyState value before using it. For details about the Creating -// key state, see Key state: Effect on your KMS key (kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// The CloudTrail log of a ReplicateKey operation records a ReplicateKey operation -// in the primary key's Region and a CreateKey operation in the replica key's -// Region. -// -// If you replicate a multi-Region primary key with imported key material, the -// replica key is created with no key material. You must import the same key -// material that you imported into the primary key. For details, see Importing -// key material into multi-Region keys (kms/latest/developerguide/multi-region-keys-import.html) -// in the Key Management Service Developer Guide. -// -// To convert a replica key to a primary key, use the UpdatePrimaryRegion operation. -// -// ReplicateKey uses different default values for the KeyPolicy and Tags parameters -// than those used in the KMS console. For details, see the parameter descriptions. -// -// Cross-account use: No. You cannot use this operation to create a replica -// key in a different Amazon Web Services account. -// -// Required permissions: -// -// * kms:ReplicateKey on the primary key (in the primary key's Region). Include -// this permission in the primary key's key policy. -// -// * kms:CreateKey in an IAM policy in the replica Region. -// -// * To use the Tags parameter, kms:TagResource in an IAM policy in the replica -// Region. -// -// Related operations -// -// * CreateKey -// -// * UpdatePrimaryRegion -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation ReplicateKey for usage and error information. -// -// Returned Error Types: -// * AlreadyExistsException -// The request was rejected because it attempted to create a resource that already -// exists. -// -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * LimitExceededException -// The request was rejected because a quota was exceeded. For more information, -// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) -// in the Key Management Service Developer Guide. -// -// * MalformedPolicyDocumentException -// The request was rejected because the specified policy is not syntactically -// or semantically correct. -// -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * TagException -// The request was rejected because one or more tags are not valid. -// -// * UnsupportedOperationException -// The request was rejected because a specified parameter is not supported or -// a specified resource is not valid for this operation. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ReplicateKey -func (c *KMS) ReplicateKey(input *ReplicateKeyInput) (*ReplicateKeyOutput, error) { - req, out := c.ReplicateKeyRequest(input) - return out, req.Send() -} - -// ReplicateKeyWithContext is the same as ReplicateKey with the addition of -// the ability to pass a context and additional request options. -// -// See ReplicateKey for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) ReplicateKeyWithContext(ctx aws.Context, input *ReplicateKeyInput, opts ...request.Option) (*ReplicateKeyOutput, error) { - req, out := c.ReplicateKeyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opRetireGrant = "RetireGrant" - -// RetireGrantRequest generates a "aws/request.Request" representing the -// client's request for the RetireGrant operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See RetireGrant for more information on using the RetireGrant -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the RetireGrantRequest method. -// req, resp := client.RetireGrantRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/RetireGrant -func (c *KMS) RetireGrantRequest(input *RetireGrantInput) (req *request.Request, output *RetireGrantOutput) { - op := &request.Operation{ - Name: opRetireGrant, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &RetireGrantInput{} - } - - output = &RetireGrantOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// RetireGrant API operation for AWS Key Management Service. -// -// Deletes a grant. Typically, you retire a grant when you no longer need its -// permissions. To identify the grant to retire, use a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token), -// or both the grant ID and a key identifier (key ID or key ARN) of the KMS -// key. The CreateGrant operation returns both values. -// -// This operation can be called by the retiring principal for a grant, by the -// grantee principal if the grant allows the RetireGrant operation, and by the -// Amazon Web Services account (root user) in which the grant is created. It -// can also be called by principals to whom permission for retiring a grant -// is delegated. For details, see Retiring and revoking grants (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete) -// in the Key Management Service Developer Guide. -// -// For detailed information about grants, including grant terminology, see Using -// grants (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) -// in the Key Management Service Developer Guide . For examples of working with -// grants in several programming languages, see Programming grants (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html). -// -// Cross-account use: Yes. You can retire a grant on a KMS key in a different -// Amazon Web Services account. -// -// Required permissions::Permission to retire a grant is determined primarily -// by the grant. For details, see Retiring and revoking grants (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete) -// in the Key Management Service Developer Guide. -// -// Related operations: -// -// * CreateGrant -// -// * ListGrants -// -// * ListRetirableGrants -// -// * RevokeGrant -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation RetireGrant for usage and error information. -// -// Returned Error Types: -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * InvalidGrantTokenException -// The request was rejected because the specified grant token is not valid. -// -// * InvalidGrantIdException -// The request was rejected because the specified GrantId is not valid. -// -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/RetireGrant -func (c *KMS) RetireGrant(input *RetireGrantInput) (*RetireGrantOutput, error) { - req, out := c.RetireGrantRequest(input) - return out, req.Send() -} - -// RetireGrantWithContext is the same as RetireGrant with the addition of -// the ability to pass a context and additional request options. -// -// See RetireGrant for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) RetireGrantWithContext(ctx aws.Context, input *RetireGrantInput, opts ...request.Option) (*RetireGrantOutput, error) { - req, out := c.RetireGrantRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opRevokeGrant = "RevokeGrant" - -// RevokeGrantRequest generates a "aws/request.Request" representing the -// client's request for the RevokeGrant operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See RevokeGrant for more information on using the RevokeGrant -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the RevokeGrantRequest method. -// req, resp := client.RevokeGrantRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/RevokeGrant -func (c *KMS) RevokeGrantRequest(input *RevokeGrantInput) (req *request.Request, output *RevokeGrantOutput) { - op := &request.Operation{ - Name: opRevokeGrant, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &RevokeGrantInput{} - } - - output = &RevokeGrantOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// RevokeGrant API operation for AWS Key Management Service. -// -// Deletes the specified grant. You revoke a grant to terminate the permissions -// that the grant allows. For more information, see Retiring and revoking grants -// (https://docs.aws.amazon.com/kms/latest/developerguide/managing-grants.html#grant-delete) -// in the Key Management Service Developer Guide . -// -// When you create, retire, or revoke a grant, there might be a brief delay, -// usually less than five minutes, until the grant is available throughout KMS. -// This state is known as eventual consistency. For details, see Eventual consistency -// (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-eventual-consistency) -// in the Key Management Service Developer Guide . -// -// For detailed information about grants, including grant terminology, see Using -// grants (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) -// in the Key Management Service Developer Guide . For examples of working with -// grants in several programming languages, see Programming grants (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html). -// -// Cross-account use: Yes. To perform this operation on a KMS key in a different -// Amazon Web Services account, specify the key ARN in the value of the KeyId -// parameter. -// -// Required permissions: kms:RevokeGrant (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy). -// -// Related operations: -// -// * CreateGrant -// -// * ListGrants -// -// * ListRetirableGrants -// -// * RetireGrant -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation RevokeGrant for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * InvalidGrantIdException -// The request was rejected because the specified GrantId is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/RevokeGrant -func (c *KMS) RevokeGrant(input *RevokeGrantInput) (*RevokeGrantOutput, error) { - req, out := c.RevokeGrantRequest(input) - return out, req.Send() -} - -// RevokeGrantWithContext is the same as RevokeGrant with the addition of -// the ability to pass a context and additional request options. -// -// See RevokeGrant for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) RevokeGrantWithContext(ctx aws.Context, input *RevokeGrantInput, opts ...request.Option) (*RevokeGrantOutput, error) { - req, out := c.RevokeGrantRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opScheduleKeyDeletion = "ScheduleKeyDeletion" - -// ScheduleKeyDeletionRequest generates a "aws/request.Request" representing the -// client's request for the ScheduleKeyDeletion operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ScheduleKeyDeletion for more information on using the ScheduleKeyDeletion -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ScheduleKeyDeletionRequest method. -// req, resp := client.ScheduleKeyDeletionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ScheduleKeyDeletion -func (c *KMS) ScheduleKeyDeletionRequest(input *ScheduleKeyDeletionInput) (req *request.Request, output *ScheduleKeyDeletionOutput) { - op := &request.Operation{ - Name: opScheduleKeyDeletion, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ScheduleKeyDeletionInput{} - } - - output = &ScheduleKeyDeletionOutput{} - req = c.newRequest(op, input, output) - return -} - -// ScheduleKeyDeletion API operation for AWS Key Management Service. -// -// Schedules the deletion of a KMS key. By default, KMS applies a waiting period -// of 30 days, but you can specify a waiting period of 7-30 days. When this -// operation is successful, the key state of the KMS key changes to PendingDeletion -// and the key can't be used in any cryptographic operations. It remains in -// this state for the duration of the waiting period. Before the waiting period -// ends, you can use CancelKeyDeletion to cancel the deletion of the KMS key. -// After the waiting period ends, KMS deletes the KMS key, its key material, -// and all KMS data associated with it, including all aliases that refer to -// it. -// -// Deleting a KMS key is a destructive and potentially dangerous operation. -// When a KMS key is deleted, all data that was encrypted under the KMS key -// is unrecoverable. (The only exception is a multi-Region replica key.) To -// prevent the use of a KMS key without deleting it, use DisableKey. -// -// If you schedule deletion of a KMS key from a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html), -// when the waiting period expires, ScheduleKeyDeletion deletes the KMS key -// from KMS. Then KMS makes a best effort to delete the key material from the -// associated CloudHSM cluster. However, you might need to manually delete the -// orphaned key material (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-orphaned-key) -// from the cluster and its backups. -// -// You can schedule the deletion of a multi-Region primary key and its replica -// keys at any time. However, KMS will not delete a multi-Region primary key -// with existing replica keys. If you schedule the deletion of a primary key -// with replicas, its key state changes to PendingReplicaDeletion and it cannot -// be replicated or used in cryptographic operations. This status can continue -// indefinitely. When the last of its replicas keys is deleted (not just scheduled), -// the key state of the primary key changes to PendingDeletion and its waiting -// period (PendingWindowInDays) begins. For details, see Deleting multi-Region -// keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-delete.html) -// in the Key Management Service Developer Guide. -// -// For more information about scheduling a KMS key for deletion, see Deleting -// KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html) -// in the Key Management Service Developer Guide. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:ScheduleKeyDeletion (key policy) -// -// Related operations -// -// * CancelKeyDeletion -// -// * DisableKey -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation ScheduleKeyDeletion for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ScheduleKeyDeletion -func (c *KMS) ScheduleKeyDeletion(input *ScheduleKeyDeletionInput) (*ScheduleKeyDeletionOutput, error) { - req, out := c.ScheduleKeyDeletionRequest(input) - return out, req.Send() -} - -// ScheduleKeyDeletionWithContext is the same as ScheduleKeyDeletion with the addition of -// the ability to pass a context and additional request options. -// -// See ScheduleKeyDeletion for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) ScheduleKeyDeletionWithContext(ctx aws.Context, input *ScheduleKeyDeletionInput, opts ...request.Option) (*ScheduleKeyDeletionOutput, error) { - req, out := c.ScheduleKeyDeletionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opSign = "Sign" - -// SignRequest generates a "aws/request.Request" representing the -// client's request for the Sign operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See Sign for more information on using the Sign -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the SignRequest method. -// req, resp := client.SignRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/Sign -func (c *KMS) SignRequest(input *SignInput) (req *request.Request, output *SignOutput) { - op := &request.Operation{ - Name: opSign, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &SignInput{} - } - - output = &SignOutput{} - req = c.newRequest(op, input, output) - return -} - -// Sign API operation for AWS Key Management Service. -// -// Creates a digital signature (https://en.wikipedia.org/wiki/Digital_signature) -// for a message or message digest by using the private key in an asymmetric -// KMS key. To verify the signature, use the Verify operation, or use the public -// key in the same asymmetric KMS key outside of KMS. For information about -// symmetric and asymmetric KMS keys, see Using Symmetric and Asymmetric KMS -// keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) -// in the Key Management Service Developer Guide. -// -// Digital signatures are generated and verified by using asymmetric key pair, -// such as an RSA or ECC pair that is represented by an asymmetric KMS key. -// The key owner (or an authorized user) uses their private key to sign a message. -// Anyone with the public key can verify that the message was signed with that -// particular private key and that the message hasn't changed since it was signed. -// -// To use the Sign operation, provide the following information: -// -// * Use the KeyId parameter to identify an asymmetric KMS key with a KeyUsage -// value of SIGN_VERIFY. To get the KeyUsage value of a KMS key, use the -// DescribeKey operation. The caller must have kms:Sign permission on the -// KMS key. -// -// * Use the Message parameter to specify the message or message digest to -// sign. You can submit messages of up to 4096 bytes. To sign a larger message, -// generate a hash digest of the message, and then provide the hash digest -// in the Message parameter. To indicate whether the message is a full message -// or a digest, use the MessageType parameter. -// -// * Choose a signing algorithm that is compatible with the KMS key. -// -// When signing a message, be sure to record the KMS key and the signing algorithm. -// This information is required to verify the signature. -// -// To verify the signature that this operation generates, use the Verify operation. -// Or use the GetPublicKey operation to download the public key and then use -// the public key to verify the signature outside of KMS. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: Yes. To perform this operation with a KMS key in a different -// Amazon Web Services account, specify the key ARN or alias ARN in the value -// of the KeyId parameter. -// -// Required permissions: kms:Sign (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: Verify -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation Sign for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * KeyUnavailableException -// The request was rejected because the specified KMS key was not available. -// You can retry the request. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InvalidKeyUsageException -// The request was rejected for one of the following reasons: -// -// * The KeyUsage value of the KMS key is incompatible with the API operation. -// -// * The encryption algorithm or signing algorithm specified for the operation -// is incompatible with the type of key material in the KMS key (KeySpec). -// -// For encrypting, decrypting, re-encrypting, and generating data keys, the -// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying, the KeyUsage -// must be SIGN_VERIFY. To find the KeyUsage of a KMS key, use the DescribeKey -// operation. -// -// To find the encryption or signing algorithms supported for a particular KMS -// key, use the DescribeKey operation. -// -// * InvalidGrantTokenException -// The request was rejected because the specified grant token is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/Sign -func (c *KMS) Sign(input *SignInput) (*SignOutput, error) { - req, out := c.SignRequest(input) - return out, req.Send() -} - -// SignWithContext is the same as Sign with the addition of -// the ability to pass a context and additional request options. -// -// See Sign for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) SignWithContext(ctx aws.Context, input *SignInput, opts ...request.Option) (*SignOutput, error) { - req, out := c.SignRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opTagResource = "TagResource" - -// TagResourceRequest generates a "aws/request.Request" representing the -// client's request for the TagResource operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See TagResource for more information on using the TagResource -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the TagResourceRequest method. -// req, resp := client.TagResourceRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/TagResource -func (c *KMS) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { - op := &request.Operation{ - Name: opTagResource, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &TagResourceInput{} - } - - output = &TagResourceOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// TagResource API operation for AWS Key Management Service. -// -// Adds or edits tags on a customer managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk). -// -// Tagging or untagging a KMS key can allow or deny permission to the KMS key. -// For details, see Using ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) -// in the Key Management Service Developer Guide. -// -// Each tag consists of a tag key and a tag value, both of which are case-sensitive -// strings. The tag value can be an empty (null) string. To add a tag, specify -// a new tag key and a tag value. To edit a tag, specify an existing tag key -// and a new tag value. -// -// You can use this operation to tag a customer managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk), -// but you cannot tag an Amazon Web Services managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk), -// an Amazon Web Services owned key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk), -// a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#keystore-concept), -// or an alias (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#alias-concept). -// -// You can also add tags to a KMS key while creating it (CreateKey) or replicating -// it (ReplicateKey). -// -// For information about using tags in KMS, see Tagging keys (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html). -// For general information about tags, including the format and syntax, see -// Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) -// in the Amazon Web Services General Reference. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:TagResource (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations -// -// * CreateKey -// -// * ListResourceTags -// -// * ReplicateKey -// -// * UntagResource -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation TagResource for usage and error information. -// -// Returned Error Types: -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// * LimitExceededException -// The request was rejected because a quota was exceeded. For more information, -// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) -// in the Key Management Service Developer Guide. -// -// * TagException -// The request was rejected because one or more tags are not valid. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/TagResource -func (c *KMS) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) - return out, req.Send() -} - -// TagResourceWithContext is the same as TagResource with the addition of -// the ability to pass a context and additional request options. -// -// See TagResource for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUntagResource = "UntagResource" - -// UntagResourceRequest generates a "aws/request.Request" representing the -// client's request for the UntagResource operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UntagResource for more information on using the UntagResource -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UntagResourceRequest method. -// req, resp := client.UntagResourceRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UntagResource -func (c *KMS) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { - op := &request.Operation{ - Name: opUntagResource, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UntagResourceInput{} - } - - output = &UntagResourceOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// UntagResource API operation for AWS Key Management Service. -// -// Deletes tags from a customer managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk). -// To delete a tag, specify the tag key and the KMS key. -// -// Tagging or untagging a KMS key can allow or deny permission to the KMS key. -// For details, see Using ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) -// in the Key Management Service Developer Guide. -// -// When it succeeds, the UntagResource operation doesn't return any output. -// Also, if the specified tag key isn't found on the KMS key, it doesn't throw -// an exception or return a response. To confirm that the operation worked, -// use the ListResourceTags operation. -// -// For information about using tags in KMS, see Tagging keys (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html). -// For general information about tags, including the format and syntax, see -// Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) -// in the Amazon Web Services General Reference. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:UntagResource (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations -// -// * CreateKey -// -// * ListResourceTags -// -// * ReplicateKey -// -// * TagResource -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation UntagResource for usage and error information. -// -// Returned Error Types: -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// * TagException -// The request was rejected because one or more tags are not valid. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UntagResource -func (c *KMS) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) - return out, req.Send() -} - -// UntagResourceWithContext is the same as UntagResource with the addition of -// the ability to pass a context and additional request options. -// -// See UntagResource for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateAlias = "UpdateAlias" - -// UpdateAliasRequest generates a "aws/request.Request" representing the -// client's request for the UpdateAlias operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateAlias for more information on using the UpdateAlias -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateAliasRequest method. -// req, resp := client.UpdateAliasRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdateAlias -func (c *KMS) UpdateAliasRequest(input *UpdateAliasInput) (req *request.Request, output *UpdateAliasOutput) { - op := &request.Operation{ - Name: opUpdateAlias, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateAliasInput{} - } - - output = &UpdateAliasOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// UpdateAlias API operation for AWS Key Management Service. -// -// Associates an existing KMS alias with a different KMS key. Each alias is -// associated with only one KMS key at a time, although a KMS key can have multiple -// aliases. The alias and the KMS key must be in the same Amazon Web Services -// account and Region. -// -// Adding, deleting, or updating an alias can allow or deny permission to the -// KMS key. For details, see Using ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) -// in the Key Management Service Developer Guide. -// -// The current and new KMS key must be the same type (both symmetric or both -// asymmetric), and they must have the same key usage (ENCRYPT_DECRYPT or SIGN_VERIFY). -// This restriction prevents errors in code that uses aliases. If you must assign -// an alias to a different type of KMS key, use DeleteAlias to delete the old -// alias and CreateAlias to create a new alias. -// -// You cannot use UpdateAlias to change an alias name. To change an alias name, -// use DeleteAlias to delete the old alias and CreateAlias to create a new alias. -// -// Because an alias is not a property of a KMS key, you can create, update, -// and delete the aliases of a KMS key without affecting the KMS key. Also, -// aliases do not appear in the response from the DescribeKey operation. To -// get the aliases of all KMS keys in the account, use the ListAliases operation. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions -// -// * kms:UpdateAlias (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// on the alias (IAM policy). -// -// * kms:UpdateAlias (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// on the current KMS key (key policy). -// -// * kms:UpdateAlias (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// on the new KMS key (key policy). -// -// For details, see Controlling access to aliases (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access) -// in the Key Management Service Developer Guide. -// -// Related operations: -// -// * CreateAlias -// -// * DeleteAlias -// -// * ListAliases -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation UpdateAlias for usage and error information. -// -// Returned Error Types: -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * LimitExceededException -// The request was rejected because a quota was exceeded. For more information, -// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) -// in the Key Management Service Developer Guide. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdateAlias -func (c *KMS) UpdateAlias(input *UpdateAliasInput) (*UpdateAliasOutput, error) { - req, out := c.UpdateAliasRequest(input) - return out, req.Send() -} - -// UpdateAliasWithContext is the same as UpdateAlias with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateAlias for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) UpdateAliasWithContext(ctx aws.Context, input *UpdateAliasInput, opts ...request.Option) (*UpdateAliasOutput, error) { - req, out := c.UpdateAliasRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateCustomKeyStore = "UpdateCustomKeyStore" - -// UpdateCustomKeyStoreRequest generates a "aws/request.Request" representing the -// client's request for the UpdateCustomKeyStore operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateCustomKeyStore for more information on using the UpdateCustomKeyStore -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateCustomKeyStoreRequest method. -// req, resp := client.UpdateCustomKeyStoreRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdateCustomKeyStore -func (c *KMS) UpdateCustomKeyStoreRequest(input *UpdateCustomKeyStoreInput) (req *request.Request, output *UpdateCustomKeyStoreOutput) { - op := &request.Operation{ - Name: opUpdateCustomKeyStore, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateCustomKeyStoreInput{} - } - - output = &UpdateCustomKeyStoreOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// UpdateCustomKeyStore API operation for AWS Key Management Service. -// -// Changes the properties of a custom key store. Use the CustomKeyStoreId parameter -// to identify the custom key store you want to edit. Use the remaining parameters -// to change the properties of the custom key store. -// -// You can only update a custom key store that is disconnected. To disconnect -// the custom key store, use DisconnectCustomKeyStore. To reconnect the custom -// key store after the update completes, use ConnectCustomKeyStore. To find -// the connection state of a custom key store, use the DescribeCustomKeyStores -// operation. -// -// The CustomKeyStoreId parameter is required in all commands. Use the other -// parameters of UpdateCustomKeyStore to edit your key store settings. -// -// * Use the NewCustomKeyStoreName parameter to change the friendly name -// of the custom key store to the value that you specify. -// -// * Use the KeyStorePassword parameter tell KMS the current password of -// the kmsuser crypto user (CU) (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser) -// in the associated CloudHSM cluster. You can use this parameter to fix -// connection failures (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-password) -// that occur when KMS cannot log into the associated cluster because the -// kmsuser password has changed. This value does not change the password -// in the CloudHSM cluster. -// -// * Use the CloudHsmClusterId parameter to associate the custom key store -// with a different, but related, CloudHSM cluster. You can use this parameter -// to repair a custom key store if its CloudHSM cluster becomes corrupted -// or is deleted, or when you need to create or restore a cluster from a -// backup. -// -// If the operation succeeds, it returns a JSON object with no properties. -// -// This operation is part of the Custom Key Store feature (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) -// feature in KMS, which combines the convenience and extensive integration -// of KMS with the isolation and control of a single-tenant key store. -// -// Cross-account use: No. You cannot perform this operation on a custom key -// store in a different Amazon Web Services account. -// -// Required permissions: kms:UpdateCustomKeyStore (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (IAM policy) -// -// Related operations: -// -// * ConnectCustomKeyStore -// -// * CreateCustomKeyStore -// -// * DeleteCustomKeyStore -// -// * DescribeCustomKeyStores -// -// * DisconnectCustomKeyStore -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation UpdateCustomKeyStore for usage and error information. -// -// Returned Error Types: -// * CustomKeyStoreNotFoundException -// The request was rejected because KMS cannot find a custom key store with -// the specified key store name or ID. -// -// * CustomKeyStoreNameInUseException -// The request was rejected because the specified custom key store name is already -// assigned to another custom key store in the account. Try again with a custom -// key store name that is unique in the account. -// -// * CloudHsmClusterNotFoundException -// The request was rejected because KMS cannot find the CloudHSM cluster with -// the specified cluster ID. Retry the request with a different cluster ID. -// -// * CloudHsmClusterNotRelatedException -// The request was rejected because the specified CloudHSM cluster has a different -// cluster certificate than the original cluster. You cannot use the operation -// to specify an unrelated cluster. -// -// Specify a cluster that shares a backup history with the original cluster. -// This includes clusters that were created from a backup of the current cluster, -// and clusters that were created from the same backup that produced the current -// cluster. -// -// Clusters that share a backup history have the same cluster certificate. To -// view the cluster certificate of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) -// operation. -// -// * CustomKeyStoreInvalidStateException -// The request was rejected because of the ConnectionState of the custom key -// store. To get the ConnectionState of a custom key store, use the DescribeCustomKeyStores -// operation. -// -// This exception is thrown under the following conditions: -// -// * You requested the CreateKey or GenerateRandom operation in a custom -// key store that is not connected. These operations are valid only when -// the custom key store ConnectionState is CONNECTED. -// -// * You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation -// on a custom key store that is not disconnected. This operation is valid -// only when the custom key store ConnectionState is DISCONNECTED. -// -// * You requested the ConnectCustomKeyStore operation on a custom key store -// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid -// for all other ConnectionState values. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * CloudHsmClusterNotActiveException -// The request was rejected because the CloudHSM cluster that is associated -// with the custom key store is not active. Initialize and activate the cluster -// and try the command again. For detailed instructions, see Getting Started -// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html) -// in the CloudHSM User Guide. -// -// * CloudHsmClusterInvalidConfigurationException -// The request was rejected because the associated CloudHSM cluster did not -// meet the configuration requirements for a custom key store. -// -// * The cluster must be configured with private subnets in at least two -// different Availability Zones in the Region. -// -// * The security group for the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) -// (cloudhsm-cluster--sg) must include inbound rules and outbound -// rules that allow TCP traffic on ports 2223-2225. The Source in the inbound -// rules and the Destination in the outbound rules must match the security -// group ID. These rules are set by default when you create the cluster. -// Do not delete or change them. To get information about a particular security -// group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html) -// operation. -// -// * The cluster must contain at least as many HSMs as the operation requires. -// To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) -// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey -// operations, the CloudHSM cluster must have at least two active HSMs, each -// in a different Availability Zone. For the ConnectCustomKeyStore operation, -// the CloudHSM must contain at least one active HSM. -// -// For information about the requirements for an CloudHSM cluster that is associated -// with a custom key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore) -// in the Key Management Service Developer Guide. For information about creating -// a private subnet for an CloudHSM cluster, see Create a Private Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html) -// in the CloudHSM User Guide. For information about cluster security groups, -// see Configure a Default Security Group (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) -// in the CloudHSM User Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdateCustomKeyStore -func (c *KMS) UpdateCustomKeyStore(input *UpdateCustomKeyStoreInput) (*UpdateCustomKeyStoreOutput, error) { - req, out := c.UpdateCustomKeyStoreRequest(input) - return out, req.Send() -} - -// UpdateCustomKeyStoreWithContext is the same as UpdateCustomKeyStore with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateCustomKeyStore for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) UpdateCustomKeyStoreWithContext(ctx aws.Context, input *UpdateCustomKeyStoreInput, opts ...request.Option) (*UpdateCustomKeyStoreOutput, error) { - req, out := c.UpdateCustomKeyStoreRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateKeyDescription = "UpdateKeyDescription" - -// UpdateKeyDescriptionRequest generates a "aws/request.Request" representing the -// client's request for the UpdateKeyDescription operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateKeyDescription for more information on using the UpdateKeyDescription -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateKeyDescriptionRequest method. -// req, resp := client.UpdateKeyDescriptionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdateKeyDescription -func (c *KMS) UpdateKeyDescriptionRequest(input *UpdateKeyDescriptionInput) (req *request.Request, output *UpdateKeyDescriptionOutput) { - op := &request.Operation{ - Name: opUpdateKeyDescription, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateKeyDescriptionInput{} - } - - output = &UpdateKeyDescriptionOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// UpdateKeyDescription API operation for AWS Key Management Service. -// -// Updates the description of a KMS key. To see the description of a KMS key, -// use DescribeKey. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:UpdateKeyDescription (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations -// -// * CreateKey -// -// * DescribeKey -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation UpdateKeyDescription for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdateKeyDescription -func (c *KMS) UpdateKeyDescription(input *UpdateKeyDescriptionInput) (*UpdateKeyDescriptionOutput, error) { - req, out := c.UpdateKeyDescriptionRequest(input) - return out, req.Send() -} - -// UpdateKeyDescriptionWithContext is the same as UpdateKeyDescription with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateKeyDescription for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) UpdateKeyDescriptionWithContext(ctx aws.Context, input *UpdateKeyDescriptionInput, opts ...request.Option) (*UpdateKeyDescriptionOutput, error) { - req, out := c.UpdateKeyDescriptionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdatePrimaryRegion = "UpdatePrimaryRegion" - -// UpdatePrimaryRegionRequest generates a "aws/request.Request" representing the -// client's request for the UpdatePrimaryRegion operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdatePrimaryRegion for more information on using the UpdatePrimaryRegion -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdatePrimaryRegionRequest method. -// req, resp := client.UpdatePrimaryRegionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdatePrimaryRegion -func (c *KMS) UpdatePrimaryRegionRequest(input *UpdatePrimaryRegionInput) (req *request.Request, output *UpdatePrimaryRegionOutput) { - op := &request.Operation{ - Name: opUpdatePrimaryRegion, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdatePrimaryRegionInput{} - } - - output = &UpdatePrimaryRegionOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// UpdatePrimaryRegion API operation for AWS Key Management Service. -// -// Changes the primary key of a multi-Region key. -// -// This operation changes the replica key in the specified Region to a primary -// key and changes the former primary key to a replica key. For example, suppose -// you have a primary key in us-east-1 and a replica key in eu-west-2. If you -// run UpdatePrimaryRegion with a PrimaryRegion value of eu-west-2, the primary -// key is now the key in eu-west-2, and the key in us-east-1 becomes a replica -// key. For details, see Updating the primary Region (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-update) -// in the Key Management Service Developer Guide. -// -// This operation supports multi-Region keys, an KMS feature that lets you create -// multiple interoperable KMS keys in different Amazon Web Services Regions. -// Because these KMS keys have the same key ID, key material, and other metadata, -// you can use them interchangeably to encrypt data in one Amazon Web Services -// Region and decrypt it in a different Amazon Web Services Region without re-encrypting -// the data or making a cross-Region call. For more information about multi-Region -// keys, see Using multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) -// in the Key Management Service Developer Guide. -// -// The primary key of a multi-Region key is the source for properties that are -// always shared by primary and replica keys, including the key material, key -// ID (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-id), -// key spec (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-spec), -// key usage (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-usage), -// key material origin (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-origin), -// and automatic key rotation (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html). -// It's the only key that can be replicated. You cannot delete the primary key -// (https://docs.aws.amazon.com/kms/latest/APIReference/API_ScheduleKeyDeletion.html) -// until all replica keys are deleted. -// -// The key ID and primary Region that you specify uniquely identify the replica -// key that will become the primary key. The primary Region must already have -// a replica key. This operation does not create a KMS key in the specified -// Region. To find the replica keys, use the DescribeKey operation on the primary -// key or any replica key. To create a replica key, use the ReplicateKey operation. -// -// You can run this operation while using the affected multi-Region keys in -// cryptographic operations. This operation should not delay, interrupt, or -// cause failures in cryptographic operations. -// -// Even after this operation completes, the process of updating the primary -// Region might still be in progress for a few more seconds. Operations such -// as DescribeKey might display both the old and new primary keys as replicas. -// The old and new primary keys have a transient key state of Updating. The -// original key state is restored when the update is complete. While the key -// state is Updating, you can use the keys in cryptographic operations, but -// you cannot replicate the new primary key or perform certain management operations, -// such as enabling or disabling these keys. For details about the Updating -// key state, see Key state: Effect on your KMS key (kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// This operation does not return any output. To verify that primary key is -// changed, use the DescribeKey operation. -// -// Cross-account use: No. You cannot use this operation in a different Amazon -// Web Services account. -// -// Required permissions: -// -// * kms:UpdatePrimaryRegion on the current primary key (in the primary key's -// Region). Include this permission primary key's key policy. -// -// * kms:UpdatePrimaryRegion on the current replica key (in the replica key's -// Region). Include this permission in the replica key's key policy. -// -// Related operations -// -// * CreateKey -// -// * ReplicateKey -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation UpdatePrimaryRegion for usage and error information. -// -// Returned Error Types: -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * UnsupportedOperationException -// The request was rejected because a specified parameter is not supported or -// a specified resource is not valid for this operation. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdatePrimaryRegion -func (c *KMS) UpdatePrimaryRegion(input *UpdatePrimaryRegionInput) (*UpdatePrimaryRegionOutput, error) { - req, out := c.UpdatePrimaryRegionRequest(input) - return out, req.Send() -} - -// UpdatePrimaryRegionWithContext is the same as UpdatePrimaryRegion with the addition of -// the ability to pass a context and additional request options. -// -// See UpdatePrimaryRegion for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) UpdatePrimaryRegionWithContext(ctx aws.Context, input *UpdatePrimaryRegionInput, opts ...request.Option) (*UpdatePrimaryRegionOutput, error) { - req, out := c.UpdatePrimaryRegionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opVerify = "Verify" - -// VerifyRequest generates a "aws/request.Request" representing the -// client's request for the Verify operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See Verify for more information on using the Verify -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the VerifyRequest method. -// req, resp := client.VerifyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/Verify -func (c *KMS) VerifyRequest(input *VerifyInput) (req *request.Request, output *VerifyOutput) { - op := &request.Operation{ - Name: opVerify, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &VerifyInput{} - } - - output = &VerifyOutput{} - req = c.newRequest(op, input, output) - return -} - -// Verify API operation for AWS Key Management Service. -// -// Verifies a digital signature that was generated by the Sign operation. -// -// Verification confirms that an authorized user signed the message with the -// specified KMS key and signing algorithm, and the message hasn't changed since -// it was signed. If the signature is verified, the value of the SignatureValid -// field in the response is True. If the signature verification fails, the Verify -// operation fails with an KMSInvalidSignatureException exception. -// -// A digital signature is generated by using the private key in an asymmetric -// KMS key. The signature is verified by using the public key in the same asymmetric -// KMS key. For information about symmetric and asymmetric KMS keys, see Using -// Symmetric and Asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) -// in the Key Management Service Developer Guide. -// -// To verify a digital signature, you can use the Verify operation. Specify -// the same asymmetric KMS key, message, and signing algorithm that were used -// to produce the signature. -// -// You can also verify the digital signature by using the public key of the -// KMS key outside of KMS. Use the GetPublicKey operation to download the public -// key in the asymmetric KMS key and then use the public key to verify the signature -// outside of KMS. The advantage of using the Verify operation is that it is -// performed within KMS. As a result, it's easy to call, the operation is performed -// within the FIPS boundary, it is logged in CloudTrail, and you can use key -// policy and IAM policy to determine who is authorized to use the KMS key to -// verify signatures. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: Yes. To perform this operation with a KMS key in a different -// Amazon Web Services account, specify the key ARN or alias ARN in the value -// of the KeyId parameter. -// -// Required permissions: kms:Verify (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: Sign -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation Verify for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * KeyUnavailableException -// The request was rejected because the specified KMS key was not available. -// You can retry the request. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InvalidKeyUsageException -// The request was rejected for one of the following reasons: -// -// * The KeyUsage value of the KMS key is incompatible with the API operation. -// -// * The encryption algorithm or signing algorithm specified for the operation -// is incompatible with the type of key material in the KMS key (KeySpec). -// -// For encrypting, decrypting, re-encrypting, and generating data keys, the -// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying, the KeyUsage -// must be SIGN_VERIFY. To find the KeyUsage of a KMS key, use the DescribeKey -// operation. -// -// To find the encryption or signing algorithms supported for a particular KMS -// key, use the DescribeKey operation. -// -// * InvalidGrantTokenException -// The request was rejected because the specified grant token is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// * KMSInvalidSignatureException -// The request was rejected because the signature verification failed. Signature -// verification fails when it cannot confirm that signature was produced by -// signing the specified message with the specified KMS key and signing algorithm. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/Verify -func (c *KMS) Verify(input *VerifyInput) (*VerifyOutput, error) { - req, out := c.VerifyRequest(input) - return out, req.Send() -} - -// VerifyWithContext is the same as Verify with the addition of -// the ability to pass a context and additional request options. -// -// See Verify for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) VerifyWithContext(ctx aws.Context, input *VerifyInput, opts ...request.Option) (*VerifyOutput, error) { - req, out := c.VerifyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// Contains information about an alias. -type AliasListEntry struct { - _ struct{} `type:"structure"` - - // String that contains the key ARN. - AliasArn *string `min:"20" type:"string"` - - // String that contains the alias. This value begins with alias/. - AliasName *string `min:"1" type:"string"` - - // Date and time that the alias was most recently created in the account and - // Region. Formatted as Unix time. - CreationDate *time.Time `type:"timestamp"` - - // Date and time that the alias was most recently associated with a KMS key - // in the account and Region. Formatted as Unix time. - LastUpdatedDate *time.Time `type:"timestamp"` - - // String that contains the key identifier of the KMS key associated with the - // alias. - TargetKeyId *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AliasListEntry) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AliasListEntry) GoString() string { - return s.String() -} - -// SetAliasArn sets the AliasArn field's value. -func (s *AliasListEntry) SetAliasArn(v string) *AliasListEntry { - s.AliasArn = &v - return s -} - -// SetAliasName sets the AliasName field's value. -func (s *AliasListEntry) SetAliasName(v string) *AliasListEntry { - s.AliasName = &v - return s -} - -// SetCreationDate sets the CreationDate field's value. -func (s *AliasListEntry) SetCreationDate(v time.Time) *AliasListEntry { - s.CreationDate = &v - return s -} - -// SetLastUpdatedDate sets the LastUpdatedDate field's value. -func (s *AliasListEntry) SetLastUpdatedDate(v time.Time) *AliasListEntry { - s.LastUpdatedDate = &v - return s -} - -// SetTargetKeyId sets the TargetKeyId field's value. -func (s *AliasListEntry) SetTargetKeyId(v string) *AliasListEntry { - s.TargetKeyId = &v - return s -} - -// The request was rejected because it attempted to create a resource that already -// exists. -type AlreadyExistsException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AlreadyExistsException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AlreadyExistsException) GoString() string { - return s.String() -} - -func newErrorAlreadyExistsException(v protocol.ResponseMetadata) error { - return &AlreadyExistsException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *AlreadyExistsException) Code() string { - return "AlreadyExistsException" -} - -// Message returns the exception's message. -func (s *AlreadyExistsException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *AlreadyExistsException) OrigErr() error { - return nil -} - -func (s *AlreadyExistsException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *AlreadyExistsException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *AlreadyExistsException) RequestID() string { - return s.RespMetadata.RequestID -} - -type CancelKeyDeletionInput struct { - _ struct{} `type:"structure"` - - // Identifies the KMS key whose deletion is being canceled. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CancelKeyDeletionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CancelKeyDeletionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CancelKeyDeletionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CancelKeyDeletionInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *CancelKeyDeletionInput) SetKeyId(v string) *CancelKeyDeletionInput { - s.KeyId = &v - return s -} - -type CancelKeyDeletionOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the KMS key whose deletion is canceled. - KeyId *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CancelKeyDeletionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CancelKeyDeletionOutput) GoString() string { - return s.String() -} - -// SetKeyId sets the KeyId field's value. -func (s *CancelKeyDeletionOutput) SetKeyId(v string) *CancelKeyDeletionOutput { - s.KeyId = &v - return s -} - -// The request was rejected because the specified CloudHSM cluster is already -// associated with a custom key store or it shares a backup history with a cluster -// that is associated with a custom key store. Each custom key store must be -// associated with a different CloudHSM cluster. -// -// Clusters that share a backup history have the same cluster certificate. To -// view the cluster certificate of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) -// operation. -type CloudHsmClusterInUseException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CloudHsmClusterInUseException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CloudHsmClusterInUseException) GoString() string { - return s.String() -} - -func newErrorCloudHsmClusterInUseException(v protocol.ResponseMetadata) error { - return &CloudHsmClusterInUseException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *CloudHsmClusterInUseException) Code() string { - return "CloudHsmClusterInUseException" -} - -// Message returns the exception's message. -func (s *CloudHsmClusterInUseException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *CloudHsmClusterInUseException) OrigErr() error { - return nil -} - -func (s *CloudHsmClusterInUseException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *CloudHsmClusterInUseException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *CloudHsmClusterInUseException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because the associated CloudHSM cluster did not -// meet the configuration requirements for a custom key store. -// -// * The cluster must be configured with private subnets in at least two -// different Availability Zones in the Region. -// -// * The security group for the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) -// (cloudhsm-cluster--sg) must include inbound rules and outbound -// rules that allow TCP traffic on ports 2223-2225. The Source in the inbound -// rules and the Destination in the outbound rules must match the security -// group ID. These rules are set by default when you create the cluster. -// Do not delete or change them. To get information about a particular security -// group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html) -// operation. -// -// * The cluster must contain at least as many HSMs as the operation requires. -// To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) -// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey -// operations, the CloudHSM cluster must have at least two active HSMs, each -// in a different Availability Zone. For the ConnectCustomKeyStore operation, -// the CloudHSM must contain at least one active HSM. -// -// For information about the requirements for an CloudHSM cluster that is associated -// with a custom key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore) -// in the Key Management Service Developer Guide. For information about creating -// a private subnet for an CloudHSM cluster, see Create a Private Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html) -// in the CloudHSM User Guide. For information about cluster security groups, -// see Configure a Default Security Group (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) -// in the CloudHSM User Guide . -type CloudHsmClusterInvalidConfigurationException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CloudHsmClusterInvalidConfigurationException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CloudHsmClusterInvalidConfigurationException) GoString() string { - return s.String() -} - -func newErrorCloudHsmClusterInvalidConfigurationException(v protocol.ResponseMetadata) error { - return &CloudHsmClusterInvalidConfigurationException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *CloudHsmClusterInvalidConfigurationException) Code() string { - return "CloudHsmClusterInvalidConfigurationException" -} - -// Message returns the exception's message. -func (s *CloudHsmClusterInvalidConfigurationException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *CloudHsmClusterInvalidConfigurationException) OrigErr() error { - return nil -} - -func (s *CloudHsmClusterInvalidConfigurationException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *CloudHsmClusterInvalidConfigurationException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *CloudHsmClusterInvalidConfigurationException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because the CloudHSM cluster that is associated -// with the custom key store is not active. Initialize and activate the cluster -// and try the command again. For detailed instructions, see Getting Started -// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html) -// in the CloudHSM User Guide. -type CloudHsmClusterNotActiveException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CloudHsmClusterNotActiveException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CloudHsmClusterNotActiveException) GoString() string { - return s.String() -} - -func newErrorCloudHsmClusterNotActiveException(v protocol.ResponseMetadata) error { - return &CloudHsmClusterNotActiveException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *CloudHsmClusterNotActiveException) Code() string { - return "CloudHsmClusterNotActiveException" -} - -// Message returns the exception's message. -func (s *CloudHsmClusterNotActiveException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *CloudHsmClusterNotActiveException) OrigErr() error { - return nil -} - -func (s *CloudHsmClusterNotActiveException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *CloudHsmClusterNotActiveException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *CloudHsmClusterNotActiveException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because KMS cannot find the CloudHSM cluster with -// the specified cluster ID. Retry the request with a different cluster ID. -type CloudHsmClusterNotFoundException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CloudHsmClusterNotFoundException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CloudHsmClusterNotFoundException) GoString() string { - return s.String() -} - -func newErrorCloudHsmClusterNotFoundException(v protocol.ResponseMetadata) error { - return &CloudHsmClusterNotFoundException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *CloudHsmClusterNotFoundException) Code() string { - return "CloudHsmClusterNotFoundException" -} - -// Message returns the exception's message. -func (s *CloudHsmClusterNotFoundException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *CloudHsmClusterNotFoundException) OrigErr() error { - return nil -} - -func (s *CloudHsmClusterNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *CloudHsmClusterNotFoundException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *CloudHsmClusterNotFoundException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because the specified CloudHSM cluster has a different -// cluster certificate than the original cluster. You cannot use the operation -// to specify an unrelated cluster. -// -// Specify a cluster that shares a backup history with the original cluster. -// This includes clusters that were created from a backup of the current cluster, -// and clusters that were created from the same backup that produced the current -// cluster. -// -// Clusters that share a backup history have the same cluster certificate. To -// view the cluster certificate of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) -// operation. -type CloudHsmClusterNotRelatedException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CloudHsmClusterNotRelatedException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CloudHsmClusterNotRelatedException) GoString() string { - return s.String() -} - -func newErrorCloudHsmClusterNotRelatedException(v protocol.ResponseMetadata) error { - return &CloudHsmClusterNotRelatedException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *CloudHsmClusterNotRelatedException) Code() string { - return "CloudHsmClusterNotRelatedException" -} - -// Message returns the exception's message. -func (s *CloudHsmClusterNotRelatedException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *CloudHsmClusterNotRelatedException) OrigErr() error { - return nil -} - -func (s *CloudHsmClusterNotRelatedException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *CloudHsmClusterNotRelatedException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *CloudHsmClusterNotRelatedException) RequestID() string { - return s.RespMetadata.RequestID -} - -type ConnectCustomKeyStoreInput struct { - _ struct{} `type:"structure"` - - // Enter the key store ID of the custom key store that you want to connect. - // To find the ID of a custom key store, use the DescribeCustomKeyStores operation. - // - // CustomKeyStoreId is a required field - CustomKeyStoreId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ConnectCustomKeyStoreInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ConnectCustomKeyStoreInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ConnectCustomKeyStoreInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ConnectCustomKeyStoreInput"} - if s.CustomKeyStoreId == nil { - invalidParams.Add(request.NewErrParamRequired("CustomKeyStoreId")) - } - if s.CustomKeyStoreId != nil && len(*s.CustomKeyStoreId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CustomKeyStoreId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCustomKeyStoreId sets the CustomKeyStoreId field's value. -func (s *ConnectCustomKeyStoreInput) SetCustomKeyStoreId(v string) *ConnectCustomKeyStoreInput { - s.CustomKeyStoreId = &v - return s -} - -type ConnectCustomKeyStoreOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ConnectCustomKeyStoreOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ConnectCustomKeyStoreOutput) GoString() string { - return s.String() -} - -type CreateAliasInput struct { - _ struct{} `type:"structure"` - - // Specifies the alias name. This value must begin with alias/ followed by a - // name, such as alias/ExampleAlias. - // - // The AliasName value must be string of 1-256 characters. It can contain only - // alphanumeric characters, forward slashes (/), underscores (_), and dashes - // (-). The alias name cannot begin with alias/aws/. The alias/aws/ prefix is - // reserved for Amazon Web Services managed keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk). - // - // AliasName is a required field - AliasName *string `min:"1" type:"string" required:"true"` - - // Associates the alias with the specified customer managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk). - // The KMS key must be in the same Amazon Web Services Region. - // - // A valid key ID is required. If you supply a null or empty string value, this - // operation returns an error. - // - // For help finding the key ID and ARN, see Finding the Key ID and ARN (https://docs.aws.amazon.com/kms/latest/developerguide/viewing-keys.html#find-cmk-id-arn) - // in the Key Management Service Developer Guide . - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // TargetKeyId is a required field - TargetKeyId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateAliasInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateAliasInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateAliasInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateAliasInput"} - if s.AliasName == nil { - invalidParams.Add(request.NewErrParamRequired("AliasName")) - } - if s.AliasName != nil && len(*s.AliasName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AliasName", 1)) - } - if s.TargetKeyId == nil { - invalidParams.Add(request.NewErrParamRequired("TargetKeyId")) - } - if s.TargetKeyId != nil && len(*s.TargetKeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TargetKeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAliasName sets the AliasName field's value. -func (s *CreateAliasInput) SetAliasName(v string) *CreateAliasInput { - s.AliasName = &v - return s -} - -// SetTargetKeyId sets the TargetKeyId field's value. -func (s *CreateAliasInput) SetTargetKeyId(v string) *CreateAliasInput { - s.TargetKeyId = &v - return s -} - -type CreateAliasOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateAliasOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateAliasOutput) GoString() string { - return s.String() -} - -type CreateCustomKeyStoreInput struct { - _ struct{} `type:"structure"` - - // Identifies the CloudHSM cluster for the custom key store. Enter the cluster - // ID of any active CloudHSM cluster that is not already associated with a custom - // key store. To find the cluster ID, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) - // operation. - // - // CloudHsmClusterId is a required field - CloudHsmClusterId *string `min:"19" type:"string" required:"true"` - - // Specifies a friendly name for the custom key store. The name must be unique - // in your Amazon Web Services account. - // - // CustomKeyStoreName is a required field - CustomKeyStoreName *string `min:"1" type:"string" required:"true"` - - // Enter the password of the kmsuser crypto user (CU) account (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser) - // in the specified CloudHSM cluster. KMS logs into the cluster as this user - // to manage key material on your behalf. - // - // The password must be a string of 7 to 32 characters. Its value is case sensitive. - // - // This parameter tells KMS the kmsuser account password; it does not change - // the password in the CloudHSM cluster. - // - // KeyStorePassword is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by CreateCustomKeyStoreInput's - // String and GoString methods. - // - // KeyStorePassword is a required field - KeyStorePassword *string `min:"7" type:"string" required:"true" sensitive:"true"` - - // Enter the content of the trust anchor certificate for the cluster. This is - // the content of the customerCA.crt file that you created when you initialized - // the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html). - // - // TrustAnchorCertificate is a required field - TrustAnchorCertificate *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateCustomKeyStoreInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateCustomKeyStoreInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateCustomKeyStoreInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateCustomKeyStoreInput"} - if s.CloudHsmClusterId == nil { - invalidParams.Add(request.NewErrParamRequired("CloudHsmClusterId")) - } - if s.CloudHsmClusterId != nil && len(*s.CloudHsmClusterId) < 19 { - invalidParams.Add(request.NewErrParamMinLen("CloudHsmClusterId", 19)) - } - if s.CustomKeyStoreName == nil { - invalidParams.Add(request.NewErrParamRequired("CustomKeyStoreName")) - } - if s.CustomKeyStoreName != nil && len(*s.CustomKeyStoreName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CustomKeyStoreName", 1)) - } - if s.KeyStorePassword == nil { - invalidParams.Add(request.NewErrParamRequired("KeyStorePassword")) - } - if s.KeyStorePassword != nil && len(*s.KeyStorePassword) < 7 { - invalidParams.Add(request.NewErrParamMinLen("KeyStorePassword", 7)) - } - if s.TrustAnchorCertificate == nil { - invalidParams.Add(request.NewErrParamRequired("TrustAnchorCertificate")) - } - if s.TrustAnchorCertificate != nil && len(*s.TrustAnchorCertificate) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TrustAnchorCertificate", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCloudHsmClusterId sets the CloudHsmClusterId field's value. -func (s *CreateCustomKeyStoreInput) SetCloudHsmClusterId(v string) *CreateCustomKeyStoreInput { - s.CloudHsmClusterId = &v - return s -} - -// SetCustomKeyStoreName sets the CustomKeyStoreName field's value. -func (s *CreateCustomKeyStoreInput) SetCustomKeyStoreName(v string) *CreateCustomKeyStoreInput { - s.CustomKeyStoreName = &v - return s -} - -// SetKeyStorePassword sets the KeyStorePassword field's value. -func (s *CreateCustomKeyStoreInput) SetKeyStorePassword(v string) *CreateCustomKeyStoreInput { - s.KeyStorePassword = &v - return s -} - -// SetTrustAnchorCertificate sets the TrustAnchorCertificate field's value. -func (s *CreateCustomKeyStoreInput) SetTrustAnchorCertificate(v string) *CreateCustomKeyStoreInput { - s.TrustAnchorCertificate = &v - return s -} - -type CreateCustomKeyStoreOutput struct { - _ struct{} `type:"structure"` - - // A unique identifier for the new custom key store. - CustomKeyStoreId *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateCustomKeyStoreOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateCustomKeyStoreOutput) GoString() string { - return s.String() -} - -// SetCustomKeyStoreId sets the CustomKeyStoreId field's value. -func (s *CreateCustomKeyStoreOutput) SetCustomKeyStoreId(v string) *CreateCustomKeyStoreOutput { - s.CustomKeyStoreId = &v - return s -} - -type CreateGrantInput struct { - _ struct{} `type:"structure"` - - // Specifies a grant constraint. - // - // KMS supports the EncryptionContextEquals and EncryptionContextSubset grant - // constraints. Each constraint value can include up to 8 encryption context - // pairs. The encryption context value in each constraint cannot exceed 384 - // characters. - // - // These grant constraints allow the permissions in the grant only when the - // encryption context in the request matches (EncryptionContextEquals) or includes - // (EncryptionContextSubset) the encryption context specified in this structure. - // For information about grant constraints, see Using grant constraints (https://docs.aws.amazon.com/kms/latest/developerguide/create-grant-overview.html#grant-constraints) - // in the Key Management Service Developer Guide. For more information about - // encryption context, see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) - // in the Key Management Service Developer Guide . - // - // The encryption context grant constraints are supported only on operations - // that include an encryption context. You cannot use an encryption context - // grant constraint for cryptographic operations with asymmetric KMS keys or - // for management operations, such as DescribeKey or RetireGrant. - Constraints *GrantConstraints `type:"structure"` - - // A list of grant tokens. - // - // Use a grant token when your permission to call this operation comes from - // a new grant that has not yet achieved eventual consistency. For more information, - // see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) - // in the Key Management Service Developer Guide. - GrantTokens []*string `type:"list"` - - // The identity that gets the permissions specified in the grant. - // - // To specify the principal, use the Amazon Resource Name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // of an Amazon Web Services principal. Valid Amazon Web Services principals - // include Amazon Web Services accounts (root), IAM users, IAM roles, federated - // users, and assumed role users. For examples of the ARN syntax to use for - // specifying a principal, see Amazon Web Services Identity and Access Management - // (IAM) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam) - // in the Example ARNs section of the Amazon Web Services General Reference. - // - // GranteePrincipal is a required field - GranteePrincipal *string `min:"1" type:"string" required:"true"` - - // Identifies the KMS key for the grant. The grant gives principals permission - // to use this KMS key. - // - // Specify the key ID or key ARN of the KMS key. To specify a KMS key in a different - // Amazon Web Services account, you must use the key ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // A friendly name for the grant. Use this value to prevent the unintended creation - // of duplicate grants when retrying this request. - // - // When this value is absent, all CreateGrant requests result in a new grant - // with a unique GrantId even if all the supplied parameters are identical. - // This can result in unintended duplicates when you retry the CreateGrant request. - // - // When this value is present, you can retry a CreateGrant request with identical - // parameters; if the grant already exists, the original GrantId is returned - // without creating a new grant. Note that the returned grant token is unique - // with every CreateGrant request, even when a duplicate GrantId is returned. - // All grant tokens for the same grant ID can be used interchangeably. - Name *string `min:"1" type:"string"` - - // A list of operations that the grant permits. - // - // The operation must be supported on the KMS key. For example, you cannot create - // a grant for a symmetric KMS key that allows the Sign operation, or a grant - // for an asymmetric KMS key that allows the GenerateDataKey operation. If you - // try, KMS returns a ValidationError exception. For details, see Grant operations - // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-grant-operations) - // in the Key Management Service Developer Guide. - // - // Operations is a required field - Operations []*string `type:"list" required:"true"` - - // The principal that has permission to use the RetireGrant operation to retire - // the grant. - // - // To specify the principal, use the Amazon Resource Name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // of an Amazon Web Services principal. Valid Amazon Web Services principals - // include Amazon Web Services accounts (root), IAM users, federated users, - // and assumed role users. For examples of the ARN syntax to use for specifying - // a principal, see Amazon Web Services Identity and Access Management (IAM) - // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam) - // in the Example ARNs section of the Amazon Web Services General Reference. - // - // The grant determines the retiring principal. Other principals might have - // permission to retire the grant or revoke the grant. For details, see RevokeGrant - // and Retiring and revoking grants (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete) - // in the Key Management Service Developer Guide. - RetiringPrincipal *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateGrantInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateGrantInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateGrantInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateGrantInput"} - if s.GranteePrincipal == nil { - invalidParams.Add(request.NewErrParamRequired("GranteePrincipal")) - } - if s.GranteePrincipal != nil && len(*s.GranteePrincipal) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GranteePrincipal", 1)) - } - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Operations == nil { - invalidParams.Add(request.NewErrParamRequired("Operations")) - } - if s.RetiringPrincipal != nil && len(*s.RetiringPrincipal) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RetiringPrincipal", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetConstraints sets the Constraints field's value. -func (s *CreateGrantInput) SetConstraints(v *GrantConstraints) *CreateGrantInput { - s.Constraints = v - return s -} - -// SetGrantTokens sets the GrantTokens field's value. -func (s *CreateGrantInput) SetGrantTokens(v []*string) *CreateGrantInput { - s.GrantTokens = v - return s -} - -// SetGranteePrincipal sets the GranteePrincipal field's value. -func (s *CreateGrantInput) SetGranteePrincipal(v string) *CreateGrantInput { - s.GranteePrincipal = &v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *CreateGrantInput) SetKeyId(v string) *CreateGrantInput { - s.KeyId = &v - return s -} - -// SetName sets the Name field's value. -func (s *CreateGrantInput) SetName(v string) *CreateGrantInput { - s.Name = &v - return s -} - -// SetOperations sets the Operations field's value. -func (s *CreateGrantInput) SetOperations(v []*string) *CreateGrantInput { - s.Operations = v - return s -} - -// SetRetiringPrincipal sets the RetiringPrincipal field's value. -func (s *CreateGrantInput) SetRetiringPrincipal(v string) *CreateGrantInput { - s.RetiringPrincipal = &v - return s -} - -type CreateGrantOutput struct { - _ struct{} `type:"structure"` - - // The unique identifier for the grant. - // - // You can use the GrantId in a ListGrants, RetireGrant, or RevokeGrant operation. - GrantId *string `min:"1" type:"string"` - - // The grant token. - // - // Use a grant token when your permission to call this operation comes from - // a new grant that has not yet achieved eventual consistency. For more information, - // see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) - // in the Key Management Service Developer Guide. - GrantToken *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateGrantOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateGrantOutput) GoString() string { - return s.String() -} - -// SetGrantId sets the GrantId field's value. -func (s *CreateGrantOutput) SetGrantId(v string) *CreateGrantOutput { - s.GrantId = &v - return s -} - -// SetGrantToken sets the GrantToken field's value. -func (s *CreateGrantOutput) SetGrantToken(v string) *CreateGrantOutput { - s.GrantToken = &v - return s -} - -type CreateKeyInput struct { - _ struct{} `type:"structure"` - - // A flag to indicate whether to bypass the key policy lockout safety check. - // - // Setting this value to true increases the risk that the KMS key becomes unmanageable. - // Do not set this value to true indiscriminately. - // - // For more information, refer to the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) - // section in the Key Management Service Developer Guide . - // - // Use this parameter only when you include a policy in the request and you - // intend to prevent the principal that is making the request from making a - // subsequent PutKeyPolicy request on the KMS key. - // - // The default value is false. - BypassPolicyLockoutSafetyCheck *bool `type:"boolean"` - - // Creates the KMS key in the specified custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) - // and the key material in its associated CloudHSM cluster. To create a KMS - // key in a custom key store, you must also specify the Origin parameter with - // a value of AWS_CLOUDHSM. The CloudHSM cluster that is associated with the - // custom key store must have at least two active HSMs, each in a different - // Availability Zone in the Region. - // - // This parameter is valid only for symmetric KMS keys and regional KMS keys. - // You cannot create an asymmetric KMS key or a multi-Region key in a custom - // key store. - // - // To find the ID of a custom key store, use the DescribeCustomKeyStores operation. - // - // The response includes the custom key store ID and the ID of the CloudHSM - // cluster. - // - // This operation is part of the Custom Key Store feature (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) - // feature in KMS, which combines the convenience and extensive integration - // of KMS with the isolation and control of a single-tenant key store. - CustomKeyStoreId *string `min:"1" type:"string"` - - // Instead, use the KeySpec parameter. - // - // The KeySpec and CustomerMasterKeySpec parameters work the same way. Only - // the names differ. We recommend that you use KeySpec parameter in your code. - // However, to avoid breaking changes, KMS will support both parameters. - // - // Deprecated: This parameter has been deprecated. Instead, use the KeySpec parameter. - CustomerMasterKeySpec *string `deprecated:"true" type:"string" enum:"CustomerMasterKeySpec"` - - // A description of the KMS key. - // - // Use a description that helps you decide whether the KMS key is appropriate - // for a task. The default value is an empty string (no description). - // - // To set or change the description after the key is created, use UpdateKeyDescription. - Description *string `type:"string"` - - // Specifies the type of KMS key to create. The default value, SYMMETRIC_DEFAULT, - // creates a KMS key with a 256-bit symmetric key for encryption and decryption. - // For help choosing a key spec for your KMS key, see How to Choose Your KMS - // key Configuration (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-choose.html) - // in the Key Management Service Developer Guide . - // - // The KeySpec determines whether the KMS key contains a symmetric key or an - // asymmetric key pair. It also determines the encryption algorithms or signing - // algorithms that the KMS key supports. You can't change the KeySpec after - // the KMS key is created. To further restrict the algorithms that can be used - // with the KMS key, use a condition key in its key policy or IAM policy. For - // more information, see kms:EncryptionAlgorithm (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-encryption-algorithm) - // or kms:Signing Algorithm (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-signing-algorithm) - // in the Key Management Service Developer Guide . - // - // Amazon Web Services services that are integrated with KMS (http://aws.amazon.com/kms/features/#AWS_Service_Integration) - // use symmetric KMS keys to protect your data. These services do not support - // asymmetric KMS keys. For help determining whether a KMS key is symmetric - // or asymmetric, see Identifying Symmetric and Asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/find-symm-asymm.html) - // in the Key Management Service Developer Guide. - // - // KMS supports the following key specs for KMS keys: - // - // * Symmetric key (default) SYMMETRIC_DEFAULT (AES-256-GCM) - // - // * Asymmetric RSA key pairs RSA_2048 RSA_3072 RSA_4096 - // - // * Asymmetric NIST-recommended elliptic curve key pairs ECC_NIST_P256 (secp256r1) - // ECC_NIST_P384 (secp384r1) ECC_NIST_P521 (secp521r1) - // - // * Other asymmetric elliptic curve key pairs ECC_SECG_P256K1 (secp256k1), - // commonly used for cryptocurrencies. - KeySpec *string `type:"string" enum:"KeySpec"` - - // Determines the cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) - // for which you can use the KMS key. The default value is ENCRYPT_DECRYPT. - // This parameter is required only for asymmetric KMS keys. You can't change - // the KeyUsage value after the KMS key is created. - // - // Select only one valid value. - // - // * For symmetric KMS keys, omit the parameter or specify ENCRYPT_DECRYPT. - // - // * For asymmetric KMS keys with RSA key material, specify ENCRYPT_DECRYPT - // or SIGN_VERIFY. - // - // * For asymmetric KMS keys with ECC key material, specify SIGN_VERIFY. - KeyUsage *string `type:"string" enum:"KeyUsageType"` - - // Creates a multi-Region primary key that you can replicate into other Amazon - // Web Services Regions. You cannot change this value after you create the KMS - // key. - // - // For a multi-Region key, set this parameter to True. For a single-Region KMS - // key, omit this parameter or set it to False. The default value is False. - // - // This operation supports multi-Region keys, an KMS feature that lets you create - // multiple interoperable KMS keys in different Amazon Web Services Regions. - // Because these KMS keys have the same key ID, key material, and other metadata, - // you can use them interchangeably to encrypt data in one Amazon Web Services - // Region and decrypt it in a different Amazon Web Services Region without re-encrypting - // the data or making a cross-Region call. For more information about multi-Region - // keys, see Using multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) - // in the Key Management Service Developer Guide. - // - // This value creates a primary key, not a replica. To create a replica key, - // use the ReplicateKey operation. - // - // You can create a symmetric or asymmetric multi-Region key, and you can create - // a multi-Region key with imported key material. However, you cannot create - // a multi-Region key in a custom key store. - MultiRegion *bool `type:"boolean"` - - // The source of the key material for the KMS key. You cannot change the origin - // after you create the KMS key. The default is AWS_KMS, which means that KMS - // creates the key material. - // - // To create a KMS key with no key material (for imported key material), set - // the value to EXTERNAL. For more information about importing key material - // into KMS, see Importing Key Material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) - // in the Key Management Service Developer Guide. This value is valid only for - // symmetric KMS keys. - // - // To create a KMS key in an KMS custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) - // and create its key material in the associated CloudHSM cluster, set this - // value to AWS_CLOUDHSM. You must also use the CustomKeyStoreId parameter to - // identify the custom key store. This value is valid only for symmetric KMS - // keys. - Origin *string `type:"string" enum:"OriginType"` - - // The key policy to attach to the KMS key. - // - // If you provide a key policy, it must meet the following criteria: - // - // * If you don't set BypassPolicyLockoutSafetyCheck to true, the key policy - // must allow the principal that is making the CreateKey request to make - // a subsequent PutKeyPolicy request on the KMS key. This reduces the risk - // that the KMS key becomes unmanageable. For more information, refer to - // the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) - // section of the Key Management Service Developer Guide . - // - // * Each statement in the key policy must contain one or more principals. - // The principals in the key policy must exist and be visible to KMS. When - // you create a new Amazon Web Services principal (for example, an IAM user - // or role), you might need to enforce a delay before including the new principal - // in a key policy because the new principal might not be immediately visible - // to KMS. For more information, see Changes that I make are not always immediately - // visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) - // in the Amazon Web Services Identity and Access Management User Guide. - // - // If you do not provide a key policy, KMS attaches a default key policy to - // the KMS key. For more information, see Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default) - // in the Key Management Service Developer Guide. - // - // The key policy size quota is 32 kilobytes (32768 bytes). - // - // For help writing and formatting a JSON policy document, see the IAM JSON - // Policy Reference (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html) - // in the Identity and Access Management User Guide . - Policy *string `min:"1" type:"string"` - - // Assigns one or more tags to the KMS key. Use this parameter to tag the KMS - // key when it is created. To tag an existing KMS key, use the TagResource operation. - // - // Tagging or untagging a KMS key can allow or deny permission to the KMS key. - // For details, see Using ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) - // in the Key Management Service Developer Guide. - // - // To use this parameter, you must have kms:TagResource (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) - // permission in an IAM policy. - // - // Each tag consists of a tag key and a tag value. Both the tag key and the - // tag value are required, but the tag value can be an empty (null) string. - // You cannot have more than one tag on a KMS key with the same tag key. If - // you specify an existing tag key with a different tag value, KMS replaces - // the current tag value with the specified one. - // - // When you add tags to an Amazon Web Services resource, Amazon Web Services - // generates a cost allocation report with usage and costs aggregated by tags. - // Tags can also be used to control access to a KMS key. For details, see Tagging - // Keys (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html). - Tags []*Tag `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateKeyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateKeyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateKeyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateKeyInput"} - if s.CustomKeyStoreId != nil && len(*s.CustomKeyStoreId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CustomKeyStoreId", 1)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBypassPolicyLockoutSafetyCheck sets the BypassPolicyLockoutSafetyCheck field's value. -func (s *CreateKeyInput) SetBypassPolicyLockoutSafetyCheck(v bool) *CreateKeyInput { - s.BypassPolicyLockoutSafetyCheck = &v - return s -} - -// SetCustomKeyStoreId sets the CustomKeyStoreId field's value. -func (s *CreateKeyInput) SetCustomKeyStoreId(v string) *CreateKeyInput { - s.CustomKeyStoreId = &v - return s -} - -// SetCustomerMasterKeySpec sets the CustomerMasterKeySpec field's value. -func (s *CreateKeyInput) SetCustomerMasterKeySpec(v string) *CreateKeyInput { - s.CustomerMasterKeySpec = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *CreateKeyInput) SetDescription(v string) *CreateKeyInput { - s.Description = &v - return s -} - -// SetKeySpec sets the KeySpec field's value. -func (s *CreateKeyInput) SetKeySpec(v string) *CreateKeyInput { - s.KeySpec = &v - return s -} - -// SetKeyUsage sets the KeyUsage field's value. -func (s *CreateKeyInput) SetKeyUsage(v string) *CreateKeyInput { - s.KeyUsage = &v - return s -} - -// SetMultiRegion sets the MultiRegion field's value. -func (s *CreateKeyInput) SetMultiRegion(v bool) *CreateKeyInput { - s.MultiRegion = &v - return s -} - -// SetOrigin sets the Origin field's value. -func (s *CreateKeyInput) SetOrigin(v string) *CreateKeyInput { - s.Origin = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *CreateKeyInput) SetPolicy(v string) *CreateKeyInput { - s.Policy = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *CreateKeyInput) SetTags(v []*Tag) *CreateKeyInput { - s.Tags = v - return s -} - -type CreateKeyOutput struct { - _ struct{} `type:"structure"` - - // Metadata associated with the KMS key. - KeyMetadata *KeyMetadata `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateKeyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateKeyOutput) GoString() string { - return s.String() -} - -// SetKeyMetadata sets the KeyMetadata field's value. -func (s *CreateKeyOutput) SetKeyMetadata(v *KeyMetadata) *CreateKeyOutput { - s.KeyMetadata = v - return s -} - -// The request was rejected because the custom key store contains KMS keys. -// After verifying that you do not need to use the KMS keys, use the ScheduleKeyDeletion -// operation to delete the KMS keys. After they are deleted, you can delete -// the custom key store. -type CustomKeyStoreHasCMKsException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CustomKeyStoreHasCMKsException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CustomKeyStoreHasCMKsException) GoString() string { - return s.String() -} - -func newErrorCustomKeyStoreHasCMKsException(v protocol.ResponseMetadata) error { - return &CustomKeyStoreHasCMKsException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *CustomKeyStoreHasCMKsException) Code() string { - return "CustomKeyStoreHasCMKsException" -} - -// Message returns the exception's message. -func (s *CustomKeyStoreHasCMKsException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *CustomKeyStoreHasCMKsException) OrigErr() error { - return nil -} - -func (s *CustomKeyStoreHasCMKsException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *CustomKeyStoreHasCMKsException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *CustomKeyStoreHasCMKsException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because of the ConnectionState of the custom key -// store. To get the ConnectionState of a custom key store, use the DescribeCustomKeyStores -// operation. -// -// This exception is thrown under the following conditions: -// -// * You requested the CreateKey or GenerateRandom operation in a custom -// key store that is not connected. These operations are valid only when -// the custom key store ConnectionState is CONNECTED. -// -// * You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation -// on a custom key store that is not disconnected. This operation is valid -// only when the custom key store ConnectionState is DISCONNECTED. -// -// * You requested the ConnectCustomKeyStore operation on a custom key store -// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid -// for all other ConnectionState values. -type CustomKeyStoreInvalidStateException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CustomKeyStoreInvalidStateException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CustomKeyStoreInvalidStateException) GoString() string { - return s.String() -} - -func newErrorCustomKeyStoreInvalidStateException(v protocol.ResponseMetadata) error { - return &CustomKeyStoreInvalidStateException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *CustomKeyStoreInvalidStateException) Code() string { - return "CustomKeyStoreInvalidStateException" -} - -// Message returns the exception's message. -func (s *CustomKeyStoreInvalidStateException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *CustomKeyStoreInvalidStateException) OrigErr() error { - return nil -} - -func (s *CustomKeyStoreInvalidStateException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *CustomKeyStoreInvalidStateException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *CustomKeyStoreInvalidStateException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because the specified custom key store name is already -// assigned to another custom key store in the account. Try again with a custom -// key store name that is unique in the account. -type CustomKeyStoreNameInUseException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CustomKeyStoreNameInUseException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CustomKeyStoreNameInUseException) GoString() string { - return s.String() -} - -func newErrorCustomKeyStoreNameInUseException(v protocol.ResponseMetadata) error { - return &CustomKeyStoreNameInUseException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *CustomKeyStoreNameInUseException) Code() string { - return "CustomKeyStoreNameInUseException" -} - -// Message returns the exception's message. -func (s *CustomKeyStoreNameInUseException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *CustomKeyStoreNameInUseException) OrigErr() error { - return nil -} - -func (s *CustomKeyStoreNameInUseException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *CustomKeyStoreNameInUseException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *CustomKeyStoreNameInUseException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because KMS cannot find a custom key store with -// the specified key store name or ID. -type CustomKeyStoreNotFoundException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CustomKeyStoreNotFoundException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CustomKeyStoreNotFoundException) GoString() string { - return s.String() -} - -func newErrorCustomKeyStoreNotFoundException(v protocol.ResponseMetadata) error { - return &CustomKeyStoreNotFoundException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *CustomKeyStoreNotFoundException) Code() string { - return "CustomKeyStoreNotFoundException" -} - -// Message returns the exception's message. -func (s *CustomKeyStoreNotFoundException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *CustomKeyStoreNotFoundException) OrigErr() error { - return nil -} - -func (s *CustomKeyStoreNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *CustomKeyStoreNotFoundException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *CustomKeyStoreNotFoundException) RequestID() string { - return s.RespMetadata.RequestID -} - -// Contains information about each custom key store in the custom key store -// list. -type CustomKeyStoresListEntry struct { - _ struct{} `type:"structure"` - - // A unique identifier for the CloudHSM cluster that is associated with the - // custom key store. - CloudHsmClusterId *string `min:"19" type:"string"` - - // Describes the connection error. This field appears in the response only when - // the ConnectionState is FAILED. For help resolving these errors, see How to - // Fix a Connection Failure (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-failed) - // in Key Management Service Developer Guide. - // - // Valid values are: - // - // * CLUSTER_NOT_FOUND - KMS cannot find the CloudHSM cluster with the specified - // cluster ID. - // - // * INSUFFICIENT_CLOUDHSM_HSMS - The associated CloudHSM cluster does not - // contain any active HSMs. To connect a custom key store to its CloudHSM - // cluster, the cluster must contain at least one active HSM. - // - // * INTERNAL_ERROR - KMS could not complete the request due to an internal - // error. Retry the request. For ConnectCustomKeyStore requests, disconnect - // the custom key store before trying to connect again. - // - // * INVALID_CREDENTIALS - KMS does not have the correct password for the - // kmsuser crypto user in the CloudHSM cluster. Before you can connect your - // custom key store to its CloudHSM cluster, you must change the kmsuser - // account password and update the key store password value for the custom - // key store. - // - // * NETWORK_ERRORS - Network errors are preventing KMS from connecting to - // the custom key store. - // - // * SUBNET_NOT_FOUND - A subnet in the CloudHSM cluster configuration was - // deleted. If KMS cannot find all of the subnets in the cluster configuration, - // attempts to connect the custom key store to the CloudHSM cluster fail. - // To fix this error, create a cluster from a recent backup and associate - // it with your custom key store. (This process creates a new cluster configuration - // with a VPC and private subnets.) For details, see How to Fix a Connection - // Failure (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-failed) - // in the Key Management Service Developer Guide. - // - // * USER_LOCKED_OUT - The kmsuser CU account is locked out of the associated - // CloudHSM cluster due to too many failed password attempts. Before you - // can connect your custom key store to its CloudHSM cluster, you must change - // the kmsuser account password and update the key store password value for - // the custom key store. - // - // * USER_LOGGED_IN - The kmsuser CU account is logged into the the associated - // CloudHSM cluster. This prevents KMS from rotating the kmsuser account - // password and logging into the cluster. Before you can connect your custom - // key store to its CloudHSM cluster, you must log the kmsuser CU out of - // the cluster. If you changed the kmsuser password to log into the cluster, - // you must also and update the key store password value for the custom key - // store. For help, see How to Log Out and Reconnect (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#login-kmsuser-2) - // in the Key Management Service Developer Guide. - // - // * USER_NOT_FOUND - KMS cannot find a kmsuser CU account in the associated - // CloudHSM cluster. Before you can connect your custom key store to its - // CloudHSM cluster, you must create a kmsuser CU account in the cluster, - // and then update the key store password value for the custom key store. - ConnectionErrorCode *string `type:"string" enum:"ConnectionErrorCodeType"` - - // Indicates whether the custom key store is connected to its CloudHSM cluster. - // - // You can create and use KMS keys in your custom key stores only when its connection - // state is CONNECTED. - // - // The value is DISCONNECTED if the key store has never been connected or you - // use the DisconnectCustomKeyStore operation to disconnect it. If the value - // is CONNECTED but you are having trouble using the custom key store, make - // sure that its associated CloudHSM cluster is active and contains at least - // one active HSM. - // - // A value of FAILED indicates that an attempt to connect was unsuccessful. - // The ConnectionErrorCode field in the response indicates the cause of the - // failure. For help resolving a connection failure, see Troubleshooting a Custom - // Key Store (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html) - // in the Key Management Service Developer Guide. - ConnectionState *string `type:"string" enum:"ConnectionStateType"` - - // The date and time when the custom key store was created. - CreationDate *time.Time `type:"timestamp"` - - // A unique identifier for the custom key store. - CustomKeyStoreId *string `min:"1" type:"string"` - - // The user-specified friendly name for the custom key store. - CustomKeyStoreName *string `min:"1" type:"string"` - - // The trust anchor certificate of the associated CloudHSM cluster. When you - // initialize the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr), - // you create this certificate and save it in the customerCA.crt file. - TrustAnchorCertificate *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CustomKeyStoresListEntry) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CustomKeyStoresListEntry) GoString() string { - return s.String() -} - -// SetCloudHsmClusterId sets the CloudHsmClusterId field's value. -func (s *CustomKeyStoresListEntry) SetCloudHsmClusterId(v string) *CustomKeyStoresListEntry { - s.CloudHsmClusterId = &v - return s -} - -// SetConnectionErrorCode sets the ConnectionErrorCode field's value. -func (s *CustomKeyStoresListEntry) SetConnectionErrorCode(v string) *CustomKeyStoresListEntry { - s.ConnectionErrorCode = &v - return s -} - -// SetConnectionState sets the ConnectionState field's value. -func (s *CustomKeyStoresListEntry) SetConnectionState(v string) *CustomKeyStoresListEntry { - s.ConnectionState = &v - return s -} - -// SetCreationDate sets the CreationDate field's value. -func (s *CustomKeyStoresListEntry) SetCreationDate(v time.Time) *CustomKeyStoresListEntry { - s.CreationDate = &v - return s -} - -// SetCustomKeyStoreId sets the CustomKeyStoreId field's value. -func (s *CustomKeyStoresListEntry) SetCustomKeyStoreId(v string) *CustomKeyStoresListEntry { - s.CustomKeyStoreId = &v - return s -} - -// SetCustomKeyStoreName sets the CustomKeyStoreName field's value. -func (s *CustomKeyStoresListEntry) SetCustomKeyStoreName(v string) *CustomKeyStoresListEntry { - s.CustomKeyStoreName = &v - return s -} - -// SetTrustAnchorCertificate sets the TrustAnchorCertificate field's value. -func (s *CustomKeyStoresListEntry) SetTrustAnchorCertificate(v string) *CustomKeyStoresListEntry { - s.TrustAnchorCertificate = &v - return s -} - -type DecryptInput struct { - _ struct{} `type:"structure"` - - // Ciphertext to be decrypted. The blob includes metadata. - // CiphertextBlob is automatically base64 encoded/decoded by the SDK. - // - // CiphertextBlob is a required field - CiphertextBlob []byte `min:"1" type:"blob" required:"true"` - - // Specifies the encryption algorithm that will be used to decrypt the ciphertext. - // Specify the same algorithm that was used to encrypt the data. If you specify - // a different algorithm, the Decrypt operation fails. - // - // This parameter is required only when the ciphertext was encrypted under an - // asymmetric KMS key. The default value, SYMMETRIC_DEFAULT, represents the - // only supported algorithm that is valid for symmetric KMS keys. - EncryptionAlgorithm *string `type:"string" enum:"EncryptionAlgorithmSpec"` - - // Specifies the encryption context to use when decrypting the data. An encryption - // context is valid only for cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) - // with a symmetric KMS key. The standard asymmetric encryption algorithms that - // KMS uses do not support an encryption context. - // - // An encryption context is a collection of non-secret key-value pairs that - // represents additional authenticated data. When you use an encryption context - // to encrypt data, you must specify the same (an exact case-sensitive match) - // encryption context to decrypt the data. An encryption context is optional - // when encrypting with a symmetric KMS key, but it is highly recommended. - // - // For more information, see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) - // in the Key Management Service Developer Guide. - EncryptionContext map[string]*string `type:"map"` - - // A list of grant tokens. - // - // Use a grant token when your permission to call this operation comes from - // a new grant that has not yet achieved eventual consistency. For more information, - // see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) - // in the Key Management Service Developer Guide. - GrantTokens []*string `type:"list"` - - // Specifies the KMS key that KMS uses to decrypt the ciphertext. Enter a key - // ID of the KMS key that was used to encrypt the ciphertext. - // - // This parameter is required only when the ciphertext was encrypted under an - // asymmetric KMS key. If you used a symmetric KMS key, KMS can get the KMS - // key from metadata that it adds to the symmetric ciphertext blob. However, - // it is always recommended as a best practice. This practice ensures that you - // use the KMS key that you intend. - // - // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. - // When using an alias name, prefix it with "alias/". To specify a KMS key in - // a different Amazon Web Services account, you must use the key ARN or alias - // ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // To get the alias name and alias ARN, use ListAliases. - KeyId *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DecryptInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DecryptInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DecryptInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DecryptInput"} - if s.CiphertextBlob == nil { - invalidParams.Add(request.NewErrParamRequired("CiphertextBlob")) - } - if s.CiphertextBlob != nil && len(s.CiphertextBlob) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CiphertextBlob", 1)) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCiphertextBlob sets the CiphertextBlob field's value. -func (s *DecryptInput) SetCiphertextBlob(v []byte) *DecryptInput { - s.CiphertextBlob = v - return s -} - -// SetEncryptionAlgorithm sets the EncryptionAlgorithm field's value. -func (s *DecryptInput) SetEncryptionAlgorithm(v string) *DecryptInput { - s.EncryptionAlgorithm = &v - return s -} - -// SetEncryptionContext sets the EncryptionContext field's value. -func (s *DecryptInput) SetEncryptionContext(v map[string]*string) *DecryptInput { - s.EncryptionContext = v - return s -} - -// SetGrantTokens sets the GrantTokens field's value. -func (s *DecryptInput) SetGrantTokens(v []*string) *DecryptInput { - s.GrantTokens = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *DecryptInput) SetKeyId(v string) *DecryptInput { - s.KeyId = &v - return s -} - -type DecryptOutput struct { - _ struct{} `type:"structure"` - - // The encryption algorithm that was used to decrypt the ciphertext. - EncryptionAlgorithm *string `type:"string" enum:"EncryptionAlgorithmSpec"` - - // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the KMS key that was used to decrypt the ciphertext. - KeyId *string `min:"1" type:"string"` - - // Decrypted plaintext data. When you use the HTTP API or the Amazon Web Services - // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. - // - // Plaintext is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by DecryptOutput's - // String and GoString methods. - // - // Plaintext is automatically base64 encoded/decoded by the SDK. - Plaintext []byte `min:"1" type:"blob" sensitive:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DecryptOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DecryptOutput) GoString() string { - return s.String() -} - -// SetEncryptionAlgorithm sets the EncryptionAlgorithm field's value. -func (s *DecryptOutput) SetEncryptionAlgorithm(v string) *DecryptOutput { - s.EncryptionAlgorithm = &v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *DecryptOutput) SetKeyId(v string) *DecryptOutput { - s.KeyId = &v - return s -} - -// SetPlaintext sets the Plaintext field's value. -func (s *DecryptOutput) SetPlaintext(v []byte) *DecryptOutput { - s.Plaintext = v - return s -} - -type DeleteAliasInput struct { - _ struct{} `type:"structure"` - - // The alias to be deleted. The alias name must begin with alias/ followed by - // the alias name, such as alias/ExampleAlias. - // - // AliasName is a required field - AliasName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteAliasInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteAliasInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteAliasInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteAliasInput"} - if s.AliasName == nil { - invalidParams.Add(request.NewErrParamRequired("AliasName")) - } - if s.AliasName != nil && len(*s.AliasName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AliasName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAliasName sets the AliasName field's value. -func (s *DeleteAliasInput) SetAliasName(v string) *DeleteAliasInput { - s.AliasName = &v - return s -} - -type DeleteAliasOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteAliasOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteAliasOutput) GoString() string { - return s.String() -} - -type DeleteCustomKeyStoreInput struct { - _ struct{} `type:"structure"` - - // Enter the ID of the custom key store you want to delete. To find the ID of - // a custom key store, use the DescribeCustomKeyStores operation. - // - // CustomKeyStoreId is a required field - CustomKeyStoreId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteCustomKeyStoreInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteCustomKeyStoreInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteCustomKeyStoreInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteCustomKeyStoreInput"} - if s.CustomKeyStoreId == nil { - invalidParams.Add(request.NewErrParamRequired("CustomKeyStoreId")) - } - if s.CustomKeyStoreId != nil && len(*s.CustomKeyStoreId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CustomKeyStoreId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCustomKeyStoreId sets the CustomKeyStoreId field's value. -func (s *DeleteCustomKeyStoreInput) SetCustomKeyStoreId(v string) *DeleteCustomKeyStoreInput { - s.CustomKeyStoreId = &v - return s -} - -type DeleteCustomKeyStoreOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteCustomKeyStoreOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteCustomKeyStoreOutput) GoString() string { - return s.String() -} - -type DeleteImportedKeyMaterialInput struct { - _ struct{} `type:"structure"` - - // Identifies the KMS key from which you are deleting imported key material. - // The Origin of the KMS key must be EXTERNAL. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteImportedKeyMaterialInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteImportedKeyMaterialInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteImportedKeyMaterialInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteImportedKeyMaterialInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *DeleteImportedKeyMaterialInput) SetKeyId(v string) *DeleteImportedKeyMaterialInput { - s.KeyId = &v - return s -} - -type DeleteImportedKeyMaterialOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteImportedKeyMaterialOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteImportedKeyMaterialOutput) GoString() string { - return s.String() -} - -// The system timed out while trying to fulfill the request. The request can -// be retried. -type DependencyTimeoutException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DependencyTimeoutException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DependencyTimeoutException) GoString() string { - return s.String() -} - -func newErrorDependencyTimeoutException(v protocol.ResponseMetadata) error { - return &DependencyTimeoutException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *DependencyTimeoutException) Code() string { - return "DependencyTimeoutException" -} - -// Message returns the exception's message. -func (s *DependencyTimeoutException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *DependencyTimeoutException) OrigErr() error { - return nil -} - -func (s *DependencyTimeoutException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *DependencyTimeoutException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *DependencyTimeoutException) RequestID() string { - return s.RespMetadata.RequestID -} - -type DescribeCustomKeyStoresInput struct { - _ struct{} `type:"structure"` - - // Gets only information about the specified custom key store. Enter the key - // store ID. - // - // By default, this operation gets information about all custom key stores in - // the account and Region. To limit the output to a particular custom key store, - // you can use either the CustomKeyStoreId or CustomKeyStoreName parameter, - // but not both. - CustomKeyStoreId *string `min:"1" type:"string"` - - // Gets only information about the specified custom key store. Enter the friendly - // name of the custom key store. - // - // By default, this operation gets information about all custom key stores in - // the account and Region. To limit the output to a particular custom key store, - // you can use either the CustomKeyStoreId or CustomKeyStoreName parameter, - // but not both. - CustomKeyStoreName *string `min:"1" type:"string"` - - // Use this parameter to specify the maximum number of items to return. When - // this value is present, KMS does not return more than the specified number - // of items, but it might return fewer. - Limit *int64 `min:"1" type:"integer"` - - // Use this parameter in a subsequent request after you receive a response with - // truncated results. Set it to the value of NextMarker from the truncated response - // you just received. - Marker *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeCustomKeyStoresInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeCustomKeyStoresInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeCustomKeyStoresInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeCustomKeyStoresInput"} - if s.CustomKeyStoreId != nil && len(*s.CustomKeyStoreId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CustomKeyStoreId", 1)) - } - if s.CustomKeyStoreName != nil && len(*s.CustomKeyStoreName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CustomKeyStoreName", 1)) - } - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.Marker != nil && len(*s.Marker) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCustomKeyStoreId sets the CustomKeyStoreId field's value. -func (s *DescribeCustomKeyStoresInput) SetCustomKeyStoreId(v string) *DescribeCustomKeyStoresInput { - s.CustomKeyStoreId = &v - return s -} - -// SetCustomKeyStoreName sets the CustomKeyStoreName field's value. -func (s *DescribeCustomKeyStoresInput) SetCustomKeyStoreName(v string) *DescribeCustomKeyStoresInput { - s.CustomKeyStoreName = &v - return s -} - -// SetLimit sets the Limit field's value. -func (s *DescribeCustomKeyStoresInput) SetLimit(v int64) *DescribeCustomKeyStoresInput { - s.Limit = &v - return s -} - -// SetMarker sets the Marker field's value. -func (s *DescribeCustomKeyStoresInput) SetMarker(v string) *DescribeCustomKeyStoresInput { - s.Marker = &v - return s -} - -type DescribeCustomKeyStoresOutput struct { - _ struct{} `type:"structure"` - - // Contains metadata about each custom key store. - CustomKeyStores []*CustomKeyStoresListEntry `type:"list"` - - // When Truncated is true, this element is present and contains the value to - // use for the Marker parameter in a subsequent request. - NextMarker *string `min:"1" type:"string"` - - // A flag that indicates whether there are more items in the list. When this - // value is true, the list in this response is truncated. To get more items, - // pass the value of the NextMarker element in thisresponse to the Marker parameter - // in a subsequent request. - Truncated *bool `type:"boolean"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeCustomKeyStoresOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeCustomKeyStoresOutput) GoString() string { - return s.String() -} - -// SetCustomKeyStores sets the CustomKeyStores field's value. -func (s *DescribeCustomKeyStoresOutput) SetCustomKeyStores(v []*CustomKeyStoresListEntry) *DescribeCustomKeyStoresOutput { - s.CustomKeyStores = v - return s -} - -// SetNextMarker sets the NextMarker field's value. -func (s *DescribeCustomKeyStoresOutput) SetNextMarker(v string) *DescribeCustomKeyStoresOutput { - s.NextMarker = &v - return s -} - -// SetTruncated sets the Truncated field's value. -func (s *DescribeCustomKeyStoresOutput) SetTruncated(v bool) *DescribeCustomKeyStoresOutput { - s.Truncated = &v - return s -} - -type DescribeKeyInput struct { - _ struct{} `type:"structure"` - - // A list of grant tokens. - // - // Use a grant token when your permission to call this operation comes from - // a new grant that has not yet achieved eventual consistency. For more information, - // see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) - // in the Key Management Service Developer Guide. - GrantTokens []*string `type:"list"` - - // Describes the specified KMS key. - // - // If you specify a predefined Amazon Web Services alias (an Amazon Web Services - // alias with no key ID), KMS associates the alias with an Amazon Web Services - // managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html##aws-managed-cmk) - // and returns its KeyId and Arn in the response. - // - // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. - // When using an alias name, prefix it with "alias/". To specify a KMS key in - // a different Amazon Web Services account, you must use the key ARN or alias - // ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // To get the alias name and alias ARN, use ListAliases. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeKeyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeKeyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeKeyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeKeyInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGrantTokens sets the GrantTokens field's value. -func (s *DescribeKeyInput) SetGrantTokens(v []*string) *DescribeKeyInput { - s.GrantTokens = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *DescribeKeyInput) SetKeyId(v string) *DescribeKeyInput { - s.KeyId = &v - return s -} - -type DescribeKeyOutput struct { - _ struct{} `type:"structure"` - - // Metadata associated with the key. - KeyMetadata *KeyMetadata `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeKeyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeKeyOutput) GoString() string { - return s.String() -} - -// SetKeyMetadata sets the KeyMetadata field's value. -func (s *DescribeKeyOutput) SetKeyMetadata(v *KeyMetadata) *DescribeKeyOutput { - s.KeyMetadata = v - return s -} - -type DisableKeyInput struct { - _ struct{} `type:"structure"` - - // Identifies the KMS key to disable. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisableKeyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisableKeyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DisableKeyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DisableKeyInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *DisableKeyInput) SetKeyId(v string) *DisableKeyInput { - s.KeyId = &v - return s -} - -type DisableKeyOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisableKeyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisableKeyOutput) GoString() string { - return s.String() -} - -type DisableKeyRotationInput struct { - _ struct{} `type:"structure"` - - // Identifies a symmetric KMS key. You cannot enable or disable automatic rotation - // of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html#asymmetric-cmks), - // KMS keys with imported key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html), - // or KMS keys in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisableKeyRotationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisableKeyRotationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DisableKeyRotationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DisableKeyRotationInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *DisableKeyRotationInput) SetKeyId(v string) *DisableKeyRotationInput { - s.KeyId = &v - return s -} - -type DisableKeyRotationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisableKeyRotationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisableKeyRotationOutput) GoString() string { - return s.String() -} - -// The request was rejected because the specified KMS key is not enabled. -type DisabledException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisabledException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisabledException) GoString() string { - return s.String() -} - -func newErrorDisabledException(v protocol.ResponseMetadata) error { - return &DisabledException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *DisabledException) Code() string { - return "DisabledException" -} - -// Message returns the exception's message. -func (s *DisabledException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *DisabledException) OrigErr() error { - return nil -} - -func (s *DisabledException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *DisabledException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *DisabledException) RequestID() string { - return s.RespMetadata.RequestID -} - -type DisconnectCustomKeyStoreInput struct { - _ struct{} `type:"structure"` - - // Enter the ID of the custom key store you want to disconnect. To find the - // ID of a custom key store, use the DescribeCustomKeyStores operation. - // - // CustomKeyStoreId is a required field - CustomKeyStoreId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisconnectCustomKeyStoreInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisconnectCustomKeyStoreInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DisconnectCustomKeyStoreInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DisconnectCustomKeyStoreInput"} - if s.CustomKeyStoreId == nil { - invalidParams.Add(request.NewErrParamRequired("CustomKeyStoreId")) - } - if s.CustomKeyStoreId != nil && len(*s.CustomKeyStoreId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CustomKeyStoreId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCustomKeyStoreId sets the CustomKeyStoreId field's value. -func (s *DisconnectCustomKeyStoreInput) SetCustomKeyStoreId(v string) *DisconnectCustomKeyStoreInput { - s.CustomKeyStoreId = &v - return s -} - -type DisconnectCustomKeyStoreOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisconnectCustomKeyStoreOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisconnectCustomKeyStoreOutput) GoString() string { - return s.String() -} - -type EnableKeyInput struct { - _ struct{} `type:"structure"` - - // Identifies the KMS key to enable. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EnableKeyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EnableKeyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *EnableKeyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "EnableKeyInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *EnableKeyInput) SetKeyId(v string) *EnableKeyInput { - s.KeyId = &v - return s -} - -type EnableKeyOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EnableKeyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EnableKeyOutput) GoString() string { - return s.String() -} - -type EnableKeyRotationInput struct { - _ struct{} `type:"structure"` - - // Identifies a symmetric KMS key. You cannot enable automatic rotation of asymmetric - // KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#asymmetric-cmks), - // KMS keys with imported key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html), - // or KMS keys in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). - // To enable or disable automatic rotation of a set of related multi-Region - // keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html#mrk-replica-key), - // set the property on the primary key. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EnableKeyRotationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EnableKeyRotationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *EnableKeyRotationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "EnableKeyRotationInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *EnableKeyRotationInput) SetKeyId(v string) *EnableKeyRotationInput { - s.KeyId = &v - return s -} - -type EnableKeyRotationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EnableKeyRotationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EnableKeyRotationOutput) GoString() string { - return s.String() -} - -type EncryptInput struct { - _ struct{} `type:"structure"` - - // Specifies the encryption algorithm that KMS will use to encrypt the plaintext - // message. The algorithm must be compatible with the KMS key that you specify. - // - // This parameter is required only for asymmetric KMS keys. The default value, - // SYMMETRIC_DEFAULT, is the algorithm used for symmetric KMS keys. If you are - // using an asymmetric KMS key, we recommend RSAES_OAEP_SHA_256. - EncryptionAlgorithm *string `type:"string" enum:"EncryptionAlgorithmSpec"` - - // Specifies the encryption context that will be used to encrypt the data. An - // encryption context is valid only for cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) - // with a symmetric KMS key. The standard asymmetric encryption algorithms that - // KMS uses do not support an encryption context. - // - // An encryption context is a collection of non-secret key-value pairs that - // represents additional authenticated data. When you use an encryption context - // to encrypt data, you must specify the same (an exact case-sensitive match) - // encryption context to decrypt the data. An encryption context is optional - // when encrypting with a symmetric KMS key, but it is highly recommended. - // - // For more information, see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) - // in the Key Management Service Developer Guide. - EncryptionContext map[string]*string `type:"map"` - - // A list of grant tokens. - // - // Use a grant token when your permission to call this operation comes from - // a new grant that has not yet achieved eventual consistency. For more information, - // see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) - // in the Key Management Service Developer Guide. - GrantTokens []*string `type:"list"` - - // Identifies the KMS key to use in the encryption operation. - // - // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. - // When using an alias name, prefix it with "alias/". To specify a KMS key in - // a different Amazon Web Services account, you must use the key ARN or alias - // ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // To get the alias name and alias ARN, use ListAliases. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // Data to be encrypted. - // - // Plaintext is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by EncryptInput's - // String and GoString methods. - // - // Plaintext is automatically base64 encoded/decoded by the SDK. - // - // Plaintext is a required field - Plaintext []byte `min:"1" type:"blob" required:"true" sensitive:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EncryptInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EncryptInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *EncryptInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "EncryptInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.Plaintext == nil { - invalidParams.Add(request.NewErrParamRequired("Plaintext")) - } - if s.Plaintext != nil && len(s.Plaintext) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Plaintext", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEncryptionAlgorithm sets the EncryptionAlgorithm field's value. -func (s *EncryptInput) SetEncryptionAlgorithm(v string) *EncryptInput { - s.EncryptionAlgorithm = &v - return s -} - -// SetEncryptionContext sets the EncryptionContext field's value. -func (s *EncryptInput) SetEncryptionContext(v map[string]*string) *EncryptInput { - s.EncryptionContext = v - return s -} - -// SetGrantTokens sets the GrantTokens field's value. -func (s *EncryptInput) SetGrantTokens(v []*string) *EncryptInput { - s.GrantTokens = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *EncryptInput) SetKeyId(v string) *EncryptInput { - s.KeyId = &v - return s -} - -// SetPlaintext sets the Plaintext field's value. -func (s *EncryptInput) SetPlaintext(v []byte) *EncryptInput { - s.Plaintext = v - return s -} - -type EncryptOutput struct { - _ struct{} `type:"structure"` - - // The encrypted plaintext. When you use the HTTP API or the Amazon Web Services - // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. - // CiphertextBlob is automatically base64 encoded/decoded by the SDK. - CiphertextBlob []byte `min:"1" type:"blob"` - - // The encryption algorithm that was used to encrypt the plaintext. - EncryptionAlgorithm *string `type:"string" enum:"EncryptionAlgorithmSpec"` - - // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the KMS key that was used to encrypt the plaintext. - KeyId *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EncryptOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EncryptOutput) GoString() string { - return s.String() -} - -// SetCiphertextBlob sets the CiphertextBlob field's value. -func (s *EncryptOutput) SetCiphertextBlob(v []byte) *EncryptOutput { - s.CiphertextBlob = v - return s -} - -// SetEncryptionAlgorithm sets the EncryptionAlgorithm field's value. -func (s *EncryptOutput) SetEncryptionAlgorithm(v string) *EncryptOutput { - s.EncryptionAlgorithm = &v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *EncryptOutput) SetKeyId(v string) *EncryptOutput { - s.KeyId = &v - return s -} - -// The request was rejected because the specified import token is expired. Use -// GetParametersForImport to get a new import token and public key, use the -// new public key to encrypt the key material, and then try the request again. -type ExpiredImportTokenException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExpiredImportTokenException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExpiredImportTokenException) GoString() string { - return s.String() -} - -func newErrorExpiredImportTokenException(v protocol.ResponseMetadata) error { - return &ExpiredImportTokenException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *ExpiredImportTokenException) Code() string { - return "ExpiredImportTokenException" -} - -// Message returns the exception's message. -func (s *ExpiredImportTokenException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *ExpiredImportTokenException) OrigErr() error { - return nil -} - -func (s *ExpiredImportTokenException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *ExpiredImportTokenException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *ExpiredImportTokenException) RequestID() string { - return s.RespMetadata.RequestID -} - -type GenerateDataKeyInput struct { - _ struct{} `type:"structure"` - - // Specifies the encryption context that will be used when encrypting the data - // key. - // - // An encryption context is a collection of non-secret key-value pairs that - // represents additional authenticated data. When you use an encryption context - // to encrypt data, you must specify the same (an exact case-sensitive match) - // encryption context to decrypt the data. An encryption context is optional - // when encrypting with a symmetric KMS key, but it is highly recommended. - // - // For more information, see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) - // in the Key Management Service Developer Guide. - EncryptionContext map[string]*string `type:"map"` - - // A list of grant tokens. - // - // Use a grant token when your permission to call this operation comes from - // a new grant that has not yet achieved eventual consistency. For more information, - // see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) - // in the Key Management Service Developer Guide. - GrantTokens []*string `type:"list"` - - // Identifies the symmetric KMS key that encrypts the data key. - // - // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. - // When using an alias name, prefix it with "alias/". To specify a KMS key in - // a different Amazon Web Services account, you must use the key ARN or alias - // ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // To get the alias name and alias ARN, use ListAliases. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // Specifies the length of the data key. Use AES_128 to generate a 128-bit symmetric - // key, or AES_256 to generate a 256-bit symmetric key. - // - // You must specify either the KeySpec or the NumberOfBytes parameter (but not - // both) in every GenerateDataKey request. - KeySpec *string `type:"string" enum:"DataKeySpec"` - - // Specifies the length of the data key in bytes. For example, use the value - // 64 to generate a 512-bit data key (64 bytes is 512 bits). For 128-bit (16-byte) - // and 256-bit (32-byte) data keys, use the KeySpec parameter. - // - // You must specify either the KeySpec or the NumberOfBytes parameter (but not - // both) in every GenerateDataKey request. - NumberOfBytes *int64 `min:"1" type:"integer"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GenerateDataKeyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GenerateDataKeyInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.NumberOfBytes != nil && *s.NumberOfBytes < 1 { - invalidParams.Add(request.NewErrParamMinValue("NumberOfBytes", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEncryptionContext sets the EncryptionContext field's value. -func (s *GenerateDataKeyInput) SetEncryptionContext(v map[string]*string) *GenerateDataKeyInput { - s.EncryptionContext = v - return s -} - -// SetGrantTokens sets the GrantTokens field's value. -func (s *GenerateDataKeyInput) SetGrantTokens(v []*string) *GenerateDataKeyInput { - s.GrantTokens = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *GenerateDataKeyInput) SetKeyId(v string) *GenerateDataKeyInput { - s.KeyId = &v - return s -} - -// SetKeySpec sets the KeySpec field's value. -func (s *GenerateDataKeyInput) SetKeySpec(v string) *GenerateDataKeyInput { - s.KeySpec = &v - return s -} - -// SetNumberOfBytes sets the NumberOfBytes field's value. -func (s *GenerateDataKeyInput) SetNumberOfBytes(v int64) *GenerateDataKeyInput { - s.NumberOfBytes = &v - return s -} - -type GenerateDataKeyOutput struct { - _ struct{} `type:"structure"` - - // The encrypted copy of the data key. When you use the HTTP API or the Amazon - // Web Services CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. - // CiphertextBlob is automatically base64 encoded/decoded by the SDK. - CiphertextBlob []byte `min:"1" type:"blob"` - - // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the KMS key that encrypted the data key. - KeyId *string `min:"1" type:"string"` - - // The plaintext data key. When you use the HTTP API or the Amazon Web Services - // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. Use - // this data key to encrypt your data outside of KMS. Then, remove it from memory - // as soon as possible. - // - // Plaintext is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by GenerateDataKeyOutput's - // String and GoString methods. - // - // Plaintext is automatically base64 encoded/decoded by the SDK. - Plaintext []byte `min:"1" type:"blob" sensitive:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyOutput) GoString() string { - return s.String() -} - -// SetCiphertextBlob sets the CiphertextBlob field's value. -func (s *GenerateDataKeyOutput) SetCiphertextBlob(v []byte) *GenerateDataKeyOutput { - s.CiphertextBlob = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *GenerateDataKeyOutput) SetKeyId(v string) *GenerateDataKeyOutput { - s.KeyId = &v - return s -} - -// SetPlaintext sets the Plaintext field's value. -func (s *GenerateDataKeyOutput) SetPlaintext(v []byte) *GenerateDataKeyOutput { - s.Plaintext = v - return s -} - -type GenerateDataKeyPairInput struct { - _ struct{} `type:"structure"` - - // Specifies the encryption context that will be used when encrypting the private - // key in the data key pair. - // - // An encryption context is a collection of non-secret key-value pairs that - // represents additional authenticated data. When you use an encryption context - // to encrypt data, you must specify the same (an exact case-sensitive match) - // encryption context to decrypt the data. An encryption context is optional - // when encrypting with a symmetric KMS key, but it is highly recommended. - // - // For more information, see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) - // in the Key Management Service Developer Guide. - EncryptionContext map[string]*string `type:"map"` - - // A list of grant tokens. - // - // Use a grant token when your permission to call this operation comes from - // a new grant that has not yet achieved eventual consistency. For more information, - // see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) - // in the Key Management Service Developer Guide. - GrantTokens []*string `type:"list"` - - // Specifies the symmetric KMS key that encrypts the private key in the data - // key pair. You cannot specify an asymmetric KMS key or a KMS key in a custom - // key store. To get the type and origin of your KMS key, use the DescribeKey - // operation. - // - // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. - // When using an alias name, prefix it with "alias/". To specify a KMS key in - // a different Amazon Web Services account, you must use the key ARN or alias - // ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // To get the alias name and alias ARN, use ListAliases. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // Determines the type of data key pair that is generated. - // - // The KMS rule that restricts the use of asymmetric RSA KMS keys to encrypt - // and decrypt or to sign and verify (but not both), and the rule that permits - // you to use ECC KMS keys only to sign and verify, are not effective on data - // key pairs, which are used outside of KMS. - // - // KeyPairSpec is a required field - KeyPairSpec *string `type:"string" required:"true" enum:"DataKeyPairSpec"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyPairInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyPairInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GenerateDataKeyPairInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GenerateDataKeyPairInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.KeyPairSpec == nil { - invalidParams.Add(request.NewErrParamRequired("KeyPairSpec")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEncryptionContext sets the EncryptionContext field's value. -func (s *GenerateDataKeyPairInput) SetEncryptionContext(v map[string]*string) *GenerateDataKeyPairInput { - s.EncryptionContext = v - return s -} - -// SetGrantTokens sets the GrantTokens field's value. -func (s *GenerateDataKeyPairInput) SetGrantTokens(v []*string) *GenerateDataKeyPairInput { - s.GrantTokens = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *GenerateDataKeyPairInput) SetKeyId(v string) *GenerateDataKeyPairInput { - s.KeyId = &v - return s -} - -// SetKeyPairSpec sets the KeyPairSpec field's value. -func (s *GenerateDataKeyPairInput) SetKeyPairSpec(v string) *GenerateDataKeyPairInput { - s.KeyPairSpec = &v - return s -} - -type GenerateDataKeyPairOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the KMS key that encrypted the private key. - KeyId *string `min:"1" type:"string"` - - // The type of data key pair that was generated. - KeyPairSpec *string `type:"string" enum:"DataKeyPairSpec"` - - // The encrypted copy of the private key. When you use the HTTP API or the Amazon - // Web Services CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. - // PrivateKeyCiphertextBlob is automatically base64 encoded/decoded by the SDK. - PrivateKeyCiphertextBlob []byte `min:"1" type:"blob"` - - // The plaintext copy of the private key. When you use the HTTP API or the Amazon - // Web Services CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. - // - // PrivateKeyPlaintext is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by GenerateDataKeyPairOutput's - // String and GoString methods. - // - // PrivateKeyPlaintext is automatically base64 encoded/decoded by the SDK. - PrivateKeyPlaintext []byte `min:"1" type:"blob" sensitive:"true"` - - // The public key (in plaintext). - // PublicKey is automatically base64 encoded/decoded by the SDK. - PublicKey []byte `min:"1" type:"blob"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyPairOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyPairOutput) GoString() string { - return s.String() -} - -// SetKeyId sets the KeyId field's value. -func (s *GenerateDataKeyPairOutput) SetKeyId(v string) *GenerateDataKeyPairOutput { - s.KeyId = &v - return s -} - -// SetKeyPairSpec sets the KeyPairSpec field's value. -func (s *GenerateDataKeyPairOutput) SetKeyPairSpec(v string) *GenerateDataKeyPairOutput { - s.KeyPairSpec = &v - return s -} - -// SetPrivateKeyCiphertextBlob sets the PrivateKeyCiphertextBlob field's value. -func (s *GenerateDataKeyPairOutput) SetPrivateKeyCiphertextBlob(v []byte) *GenerateDataKeyPairOutput { - s.PrivateKeyCiphertextBlob = v - return s -} - -// SetPrivateKeyPlaintext sets the PrivateKeyPlaintext field's value. -func (s *GenerateDataKeyPairOutput) SetPrivateKeyPlaintext(v []byte) *GenerateDataKeyPairOutput { - s.PrivateKeyPlaintext = v - return s -} - -// SetPublicKey sets the PublicKey field's value. -func (s *GenerateDataKeyPairOutput) SetPublicKey(v []byte) *GenerateDataKeyPairOutput { - s.PublicKey = v - return s -} - -type GenerateDataKeyPairWithoutPlaintextInput struct { - _ struct{} `type:"structure"` - - // Specifies the encryption context that will be used when encrypting the private - // key in the data key pair. - // - // An encryption context is a collection of non-secret key-value pairs that - // represents additional authenticated data. When you use an encryption context - // to encrypt data, you must specify the same (an exact case-sensitive match) - // encryption context to decrypt the data. An encryption context is optional - // when encrypting with a symmetric KMS key, but it is highly recommended. - // - // For more information, see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) - // in the Key Management Service Developer Guide. - EncryptionContext map[string]*string `type:"map"` - - // A list of grant tokens. - // - // Use a grant token when your permission to call this operation comes from - // a new grant that has not yet achieved eventual consistency. For more information, - // see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) - // in the Key Management Service Developer Guide. - GrantTokens []*string `type:"list"` - - // Specifies the KMS key that encrypts the private key in the data key pair. - // You must specify a symmetric KMS key. You cannot use an asymmetric KMS key - // or a KMS key in a custom key store. To get the type and origin of your KMS - // key, use the DescribeKey operation. - // - // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. - // When using an alias name, prefix it with "alias/". To specify a KMS key in - // a different Amazon Web Services account, you must use the key ARN or alias - // ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // To get the alias name and alias ARN, use ListAliases. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // Determines the type of data key pair that is generated. - // - // The KMS rule that restricts the use of asymmetric RSA KMS keys to encrypt - // and decrypt or to sign and verify (but not both), and the rule that permits - // you to use ECC KMS keys only to sign and verify, are not effective on data - // key pairs, which are used outside of KMS. - // - // KeyPairSpec is a required field - KeyPairSpec *string `type:"string" required:"true" enum:"DataKeyPairSpec"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyPairWithoutPlaintextInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyPairWithoutPlaintextInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GenerateDataKeyPairWithoutPlaintextInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GenerateDataKeyPairWithoutPlaintextInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.KeyPairSpec == nil { - invalidParams.Add(request.NewErrParamRequired("KeyPairSpec")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEncryptionContext sets the EncryptionContext field's value. -func (s *GenerateDataKeyPairWithoutPlaintextInput) SetEncryptionContext(v map[string]*string) *GenerateDataKeyPairWithoutPlaintextInput { - s.EncryptionContext = v - return s -} - -// SetGrantTokens sets the GrantTokens field's value. -func (s *GenerateDataKeyPairWithoutPlaintextInput) SetGrantTokens(v []*string) *GenerateDataKeyPairWithoutPlaintextInput { - s.GrantTokens = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *GenerateDataKeyPairWithoutPlaintextInput) SetKeyId(v string) *GenerateDataKeyPairWithoutPlaintextInput { - s.KeyId = &v - return s -} - -// SetKeyPairSpec sets the KeyPairSpec field's value. -func (s *GenerateDataKeyPairWithoutPlaintextInput) SetKeyPairSpec(v string) *GenerateDataKeyPairWithoutPlaintextInput { - s.KeyPairSpec = &v - return s -} - -type GenerateDataKeyPairWithoutPlaintextOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the KMS key that encrypted the private key. - KeyId *string `min:"1" type:"string"` - - // The type of data key pair that was generated. - KeyPairSpec *string `type:"string" enum:"DataKeyPairSpec"` - - // The encrypted copy of the private key. When you use the HTTP API or the Amazon - // Web Services CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. - // PrivateKeyCiphertextBlob is automatically base64 encoded/decoded by the SDK. - PrivateKeyCiphertextBlob []byte `min:"1" type:"blob"` - - // The public key (in plaintext). - // PublicKey is automatically base64 encoded/decoded by the SDK. - PublicKey []byte `min:"1" type:"blob"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyPairWithoutPlaintextOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyPairWithoutPlaintextOutput) GoString() string { - return s.String() -} - -// SetKeyId sets the KeyId field's value. -func (s *GenerateDataKeyPairWithoutPlaintextOutput) SetKeyId(v string) *GenerateDataKeyPairWithoutPlaintextOutput { - s.KeyId = &v - return s -} - -// SetKeyPairSpec sets the KeyPairSpec field's value. -func (s *GenerateDataKeyPairWithoutPlaintextOutput) SetKeyPairSpec(v string) *GenerateDataKeyPairWithoutPlaintextOutput { - s.KeyPairSpec = &v - return s -} - -// SetPrivateKeyCiphertextBlob sets the PrivateKeyCiphertextBlob field's value. -func (s *GenerateDataKeyPairWithoutPlaintextOutput) SetPrivateKeyCiphertextBlob(v []byte) *GenerateDataKeyPairWithoutPlaintextOutput { - s.PrivateKeyCiphertextBlob = v - return s -} - -// SetPublicKey sets the PublicKey field's value. -func (s *GenerateDataKeyPairWithoutPlaintextOutput) SetPublicKey(v []byte) *GenerateDataKeyPairWithoutPlaintextOutput { - s.PublicKey = v - return s -} - -type GenerateDataKeyWithoutPlaintextInput struct { - _ struct{} `type:"structure"` - - // Specifies the encryption context that will be used when encrypting the data - // key. - // - // An encryption context is a collection of non-secret key-value pairs that - // represents additional authenticated data. When you use an encryption context - // to encrypt data, you must specify the same (an exact case-sensitive match) - // encryption context to decrypt the data. An encryption context is optional - // when encrypting with a symmetric KMS key, but it is highly recommended. - // - // For more information, see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) - // in the Key Management Service Developer Guide. - EncryptionContext map[string]*string `type:"map"` - - // A list of grant tokens. - // - // Use a grant token when your permission to call this operation comes from - // a new grant that has not yet achieved eventual consistency. For more information, - // see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) - // in the Key Management Service Developer Guide. - GrantTokens []*string `type:"list"` - - // The identifier of the symmetric KMS key that encrypts the data key. - // - // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. - // When using an alias name, prefix it with "alias/". To specify a KMS key in - // a different Amazon Web Services account, you must use the key ARN or alias - // ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // To get the alias name and alias ARN, use ListAliases. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // The length of the data key. Use AES_128 to generate a 128-bit symmetric key, - // or AES_256 to generate a 256-bit symmetric key. - KeySpec *string `type:"string" enum:"DataKeySpec"` - - // The length of the data key in bytes. For example, use the value 64 to generate - // a 512-bit data key (64 bytes is 512 bits). For common key lengths (128-bit - // and 256-bit symmetric keys), we recommend that you use the KeySpec field - // instead of this one. - NumberOfBytes *int64 `min:"1" type:"integer"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyWithoutPlaintextInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyWithoutPlaintextInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GenerateDataKeyWithoutPlaintextInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GenerateDataKeyWithoutPlaintextInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.NumberOfBytes != nil && *s.NumberOfBytes < 1 { - invalidParams.Add(request.NewErrParamMinValue("NumberOfBytes", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEncryptionContext sets the EncryptionContext field's value. -func (s *GenerateDataKeyWithoutPlaintextInput) SetEncryptionContext(v map[string]*string) *GenerateDataKeyWithoutPlaintextInput { - s.EncryptionContext = v - return s -} - -// SetGrantTokens sets the GrantTokens field's value. -func (s *GenerateDataKeyWithoutPlaintextInput) SetGrantTokens(v []*string) *GenerateDataKeyWithoutPlaintextInput { - s.GrantTokens = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *GenerateDataKeyWithoutPlaintextInput) SetKeyId(v string) *GenerateDataKeyWithoutPlaintextInput { - s.KeyId = &v - return s -} - -// SetKeySpec sets the KeySpec field's value. -func (s *GenerateDataKeyWithoutPlaintextInput) SetKeySpec(v string) *GenerateDataKeyWithoutPlaintextInput { - s.KeySpec = &v - return s -} - -// SetNumberOfBytes sets the NumberOfBytes field's value. -func (s *GenerateDataKeyWithoutPlaintextInput) SetNumberOfBytes(v int64) *GenerateDataKeyWithoutPlaintextInput { - s.NumberOfBytes = &v - return s -} - -type GenerateDataKeyWithoutPlaintextOutput struct { - _ struct{} `type:"structure"` - - // The encrypted data key. When you use the HTTP API or the Amazon Web Services - // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. - // CiphertextBlob is automatically base64 encoded/decoded by the SDK. - CiphertextBlob []byte `min:"1" type:"blob"` - - // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the KMS key that encrypted the data key. - KeyId *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyWithoutPlaintextOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyWithoutPlaintextOutput) GoString() string { - return s.String() -} - -// SetCiphertextBlob sets the CiphertextBlob field's value. -func (s *GenerateDataKeyWithoutPlaintextOutput) SetCiphertextBlob(v []byte) *GenerateDataKeyWithoutPlaintextOutput { - s.CiphertextBlob = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *GenerateDataKeyWithoutPlaintextOutput) SetKeyId(v string) *GenerateDataKeyWithoutPlaintextOutput { - s.KeyId = &v - return s -} - -type GenerateRandomInput struct { - _ struct{} `type:"structure"` - - // Generates the random byte string in the CloudHSM cluster that is associated - // with the specified custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). - // To find the ID of a custom key store, use the DescribeCustomKeyStores operation. - CustomKeyStoreId *string `min:"1" type:"string"` - - // The length of the byte string. - NumberOfBytes *int64 `min:"1" type:"integer"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateRandomInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateRandomInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GenerateRandomInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GenerateRandomInput"} - if s.CustomKeyStoreId != nil && len(*s.CustomKeyStoreId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CustomKeyStoreId", 1)) - } - if s.NumberOfBytes != nil && *s.NumberOfBytes < 1 { - invalidParams.Add(request.NewErrParamMinValue("NumberOfBytes", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCustomKeyStoreId sets the CustomKeyStoreId field's value. -func (s *GenerateRandomInput) SetCustomKeyStoreId(v string) *GenerateRandomInput { - s.CustomKeyStoreId = &v - return s -} - -// SetNumberOfBytes sets the NumberOfBytes field's value. -func (s *GenerateRandomInput) SetNumberOfBytes(v int64) *GenerateRandomInput { - s.NumberOfBytes = &v - return s -} - -type GenerateRandomOutput struct { - _ struct{} `type:"structure"` - - // The random byte string. When you use the HTTP API or the Amazon Web Services - // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. - // - // Plaintext is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by GenerateRandomOutput's - // String and GoString methods. - // - // Plaintext is automatically base64 encoded/decoded by the SDK. - Plaintext []byte `min:"1" type:"blob" sensitive:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateRandomOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateRandomOutput) GoString() string { - return s.String() -} - -// SetPlaintext sets the Plaintext field's value. -func (s *GenerateRandomOutput) SetPlaintext(v []byte) *GenerateRandomOutput { - s.Plaintext = v - return s -} - -type GetKeyPolicyInput struct { - _ struct{} `type:"structure"` - - // Gets the key policy for the specified KMS key. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // Specifies the name of the key policy. The only valid name is default. To - // get the names of key policies, use ListKeyPolicies. - // - // PolicyName is a required field - PolicyName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetKeyPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetKeyPolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetKeyPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetKeyPolicyInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.PolicyName == nil { - invalidParams.Add(request.NewErrParamRequired("PolicyName")) - } - if s.PolicyName != nil && len(*s.PolicyName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *GetKeyPolicyInput) SetKeyId(v string) *GetKeyPolicyInput { - s.KeyId = &v - return s -} - -// SetPolicyName sets the PolicyName field's value. -func (s *GetKeyPolicyInput) SetPolicyName(v string) *GetKeyPolicyInput { - s.PolicyName = &v - return s -} - -type GetKeyPolicyOutput struct { - _ struct{} `type:"structure"` - - // A key policy document in JSON format. - Policy *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetKeyPolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetKeyPolicyOutput) GoString() string { - return s.String() -} - -// SetPolicy sets the Policy field's value. -func (s *GetKeyPolicyOutput) SetPolicy(v string) *GetKeyPolicyOutput { - s.Policy = &v - return s -} - -type GetKeyRotationStatusInput struct { - _ struct{} `type:"structure"` - - // Gets the rotation status for the specified KMS key. - // - // Specify the key ID or key ARN of the KMS key. To specify a KMS key in a different - // Amazon Web Services account, you must use the key ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetKeyRotationStatusInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetKeyRotationStatusInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetKeyRotationStatusInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetKeyRotationStatusInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *GetKeyRotationStatusInput) SetKeyId(v string) *GetKeyRotationStatusInput { - s.KeyId = &v - return s -} - -type GetKeyRotationStatusOutput struct { - _ struct{} `type:"structure"` - - // A Boolean value that specifies whether key rotation is enabled. - KeyRotationEnabled *bool `type:"boolean"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetKeyRotationStatusOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetKeyRotationStatusOutput) GoString() string { - return s.String() -} - -// SetKeyRotationEnabled sets the KeyRotationEnabled field's value. -func (s *GetKeyRotationStatusOutput) SetKeyRotationEnabled(v bool) *GetKeyRotationStatusOutput { - s.KeyRotationEnabled = &v - return s -} - -type GetParametersForImportInput struct { - _ struct{} `type:"structure"` - - // The identifier of the symmetric KMS key into which you will import key material. - // The Origin of the KMS key must be EXTERNAL. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // The algorithm you will use to encrypt the key material before importing it - // with ImportKeyMaterial. For more information, see Encrypt the Key Material - // (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys-encrypt-key-material.html) - // in the Key Management Service Developer Guide. - // - // WrappingAlgorithm is a required field - WrappingAlgorithm *string `type:"string" required:"true" enum:"AlgorithmSpec"` - - // The type of wrapping key (public key) to return in the response. Only 2048-bit - // RSA public keys are supported. - // - // WrappingKeySpec is a required field - WrappingKeySpec *string `type:"string" required:"true" enum:"WrappingKeySpec"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetParametersForImportInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetParametersForImportInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetParametersForImportInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetParametersForImportInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.WrappingAlgorithm == nil { - invalidParams.Add(request.NewErrParamRequired("WrappingAlgorithm")) - } - if s.WrappingKeySpec == nil { - invalidParams.Add(request.NewErrParamRequired("WrappingKeySpec")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *GetParametersForImportInput) SetKeyId(v string) *GetParametersForImportInput { - s.KeyId = &v - return s -} - -// SetWrappingAlgorithm sets the WrappingAlgorithm field's value. -func (s *GetParametersForImportInput) SetWrappingAlgorithm(v string) *GetParametersForImportInput { - s.WrappingAlgorithm = &v - return s -} - -// SetWrappingKeySpec sets the WrappingKeySpec field's value. -func (s *GetParametersForImportInput) SetWrappingKeySpec(v string) *GetParametersForImportInput { - s.WrappingKeySpec = &v - return s -} - -type GetParametersForImportOutput struct { - _ struct{} `type:"structure"` - - // The import token to send in a subsequent ImportKeyMaterial request. - // ImportToken is automatically base64 encoded/decoded by the SDK. - ImportToken []byte `min:"1" type:"blob"` - - // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the KMS key to use in a subsequent ImportKeyMaterial request. This is - // the same KMS key specified in the GetParametersForImport request. - KeyId *string `min:"1" type:"string"` - - // The time at which the import token and public key are no longer valid. After - // this time, you cannot use them to make an ImportKeyMaterial request and you - // must send another GetParametersForImport request to get new ones. - ParametersValidTo *time.Time `type:"timestamp"` - - // The public key to use to encrypt the key material before importing it with - // ImportKeyMaterial. - // - // PublicKey is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by GetParametersForImportOutput's - // String and GoString methods. - // - // PublicKey is automatically base64 encoded/decoded by the SDK. - PublicKey []byte `min:"1" type:"blob" sensitive:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetParametersForImportOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetParametersForImportOutput) GoString() string { - return s.String() -} - -// SetImportToken sets the ImportToken field's value. -func (s *GetParametersForImportOutput) SetImportToken(v []byte) *GetParametersForImportOutput { - s.ImportToken = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *GetParametersForImportOutput) SetKeyId(v string) *GetParametersForImportOutput { - s.KeyId = &v - return s -} - -// SetParametersValidTo sets the ParametersValidTo field's value. -func (s *GetParametersForImportOutput) SetParametersValidTo(v time.Time) *GetParametersForImportOutput { - s.ParametersValidTo = &v - return s -} - -// SetPublicKey sets the PublicKey field's value. -func (s *GetParametersForImportOutput) SetPublicKey(v []byte) *GetParametersForImportOutput { - s.PublicKey = v - return s -} - -type GetPublicKeyInput struct { - _ struct{} `type:"structure"` - - // A list of grant tokens. - // - // Use a grant token when your permission to call this operation comes from - // a new grant that has not yet achieved eventual consistency. For more information, - // see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) - // in the Key Management Service Developer Guide. - GrantTokens []*string `type:"list"` - - // Identifies the asymmetric KMS key that includes the public key. - // - // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. - // When using an alias name, prefix it with "alias/". To specify a KMS key in - // a different Amazon Web Services account, you must use the key ARN or alias - // ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // To get the alias name and alias ARN, use ListAliases. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetPublicKeyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetPublicKeyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetPublicKeyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetPublicKeyInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGrantTokens sets the GrantTokens field's value. -func (s *GetPublicKeyInput) SetGrantTokens(v []*string) *GetPublicKeyInput { - s.GrantTokens = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *GetPublicKeyInput) SetKeyId(v string) *GetPublicKeyInput { - s.KeyId = &v - return s -} - -type GetPublicKeyOutput struct { - _ struct{} `type:"structure"` - - // Instead, use the KeySpec field in the GetPublicKey response. - // - // The KeySpec and CustomerMasterKeySpec fields have the same value. We recommend - // that you use the KeySpec field in your code. However, to avoid breaking changes, - // KMS will support both fields. - // - // Deprecated: This field has been deprecated. Instead, use the KeySpec field. - CustomerMasterKeySpec *string `deprecated:"true" type:"string" enum:"CustomerMasterKeySpec"` - - // The encryption algorithms that KMS supports for this key. - // - // This information is critical. If a public key encrypts data outside of KMS - // by using an unsupported encryption algorithm, the ciphertext cannot be decrypted. - // - // This field appears in the response only when the KeyUsage of the public key - // is ENCRYPT_DECRYPT. - EncryptionAlgorithms []*string `type:"list"` - - // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the asymmetric KMS key from which the public key was downloaded. - KeyId *string `min:"1" type:"string"` - - // The type of the of the public key that was downloaded. - KeySpec *string `type:"string" enum:"KeySpec"` - - // The permitted use of the public key. Valid values are ENCRYPT_DECRYPT or - // SIGN_VERIFY. - // - // This information is critical. If a public key with SIGN_VERIFY key usage - // encrypts data outside of KMS, the ciphertext cannot be decrypted. - KeyUsage *string `type:"string" enum:"KeyUsageType"` - - // The exported public key. - // - // The value is a DER-encoded X.509 public key, also known as SubjectPublicKeyInfo - // (SPKI), as defined in RFC 5280 (https://tools.ietf.org/html/rfc5280). When - // you use the HTTP API or the Amazon Web Services CLI, the value is Base64-encoded. - // Otherwise, it is not Base64-encoded. - // PublicKey is automatically base64 encoded/decoded by the SDK. - PublicKey []byte `min:"1" type:"blob"` - - // The signing algorithms that KMS supports for this key. - // - // This field appears in the response only when the KeyUsage of the public key - // is SIGN_VERIFY. - SigningAlgorithms []*string `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetPublicKeyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetPublicKeyOutput) GoString() string { - return s.String() -} - -// SetCustomerMasterKeySpec sets the CustomerMasterKeySpec field's value. -func (s *GetPublicKeyOutput) SetCustomerMasterKeySpec(v string) *GetPublicKeyOutput { - s.CustomerMasterKeySpec = &v - return s -} - -// SetEncryptionAlgorithms sets the EncryptionAlgorithms field's value. -func (s *GetPublicKeyOutput) SetEncryptionAlgorithms(v []*string) *GetPublicKeyOutput { - s.EncryptionAlgorithms = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *GetPublicKeyOutput) SetKeyId(v string) *GetPublicKeyOutput { - s.KeyId = &v - return s -} - -// SetKeySpec sets the KeySpec field's value. -func (s *GetPublicKeyOutput) SetKeySpec(v string) *GetPublicKeyOutput { - s.KeySpec = &v - return s -} - -// SetKeyUsage sets the KeyUsage field's value. -func (s *GetPublicKeyOutput) SetKeyUsage(v string) *GetPublicKeyOutput { - s.KeyUsage = &v - return s -} - -// SetPublicKey sets the PublicKey field's value. -func (s *GetPublicKeyOutput) SetPublicKey(v []byte) *GetPublicKeyOutput { - s.PublicKey = v - return s -} - -// SetSigningAlgorithms sets the SigningAlgorithms field's value. -func (s *GetPublicKeyOutput) SetSigningAlgorithms(v []*string) *GetPublicKeyOutput { - s.SigningAlgorithms = v - return s -} - -// Use this structure to allow cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) -// in the grant only when the operation request includes the specified encryption -// context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context). -// -// KMS applies the grant constraints only to cryptographic operations that support -// an encryption context, that is, all cryptographic operations with a symmetric -// KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#symmetric-cmks). -// Grant constraints are not applied to operations that do not support an encryption -// context, such as cryptographic operations with asymmetric KMS keys and management -// operations, such as DescribeKey or RetireGrant. -// -// In a cryptographic operation, the encryption context in the decryption operation -// must be an exact, case-sensitive match for the keys and values in the encryption -// context of the encryption operation. Only the order of the pairs can vary. -// -// However, in a grant constraint, the key in each key-value pair is not case -// sensitive, but the value is case sensitive. -// -// To avoid confusion, do not use multiple encryption context pairs that differ -// only by case. To require a fully case-sensitive encryption context, use the -// kms:EncryptionContext: and kms:EncryptionContextKeys conditions in an IAM -// or key policy. For details, see kms:EncryptionContext: (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-encryption-context) -// in the Key Management Service Developer Guide . -type GrantConstraints struct { - _ struct{} `type:"structure"` - - // A list of key-value pairs that must match the encryption context in the cryptographic - // operation (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) - // request. The grant allows the operation only when the encryption context - // in the request is the same as the encryption context specified in this constraint. - EncryptionContextEquals map[string]*string `type:"map"` - - // A list of key-value pairs that must be included in the encryption context - // of the cryptographic operation (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) - // request. The grant allows the cryptographic operation only when the encryption - // context in the request includes the key-value pairs specified in this constraint, - // although it can include additional key-value pairs. - EncryptionContextSubset map[string]*string `type:"map"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GrantConstraints) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GrantConstraints) GoString() string { - return s.String() -} - -// SetEncryptionContextEquals sets the EncryptionContextEquals field's value. -func (s *GrantConstraints) SetEncryptionContextEquals(v map[string]*string) *GrantConstraints { - s.EncryptionContextEquals = v - return s -} - -// SetEncryptionContextSubset sets the EncryptionContextSubset field's value. -func (s *GrantConstraints) SetEncryptionContextSubset(v map[string]*string) *GrantConstraints { - s.EncryptionContextSubset = v - return s -} - -// Contains information about a grant. -type GrantListEntry struct { - _ struct{} `type:"structure"` - - // A list of key-value pairs that must be present in the encryption context - // of certain subsequent operations that the grant allows. - Constraints *GrantConstraints `type:"structure"` - - // The date and time when the grant was created. - CreationDate *time.Time `type:"timestamp"` - - // The unique identifier for the grant. - GrantId *string `min:"1" type:"string"` - - // The identity that gets the permissions in the grant. - // - // The GranteePrincipal field in the ListGrants response usually contains the - // user or role designated as the grantee principal in the grant. However, when - // the grantee principal in the grant is an Amazon Web Services service, the - // GranteePrincipal field contains the service principal (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-services), - // which might represent several different grantee principals. - GranteePrincipal *string `min:"1" type:"string"` - - // The Amazon Web Services account under which the grant was issued. - IssuingAccount *string `min:"1" type:"string"` - - // The unique identifier for the KMS key to which the grant applies. - KeyId *string `min:"1" type:"string"` - - // The friendly name that identifies the grant. If a name was provided in the - // CreateGrant request, that name is returned. Otherwise this value is null. - Name *string `min:"1" type:"string"` - - // The list of operations permitted by the grant. - Operations []*string `type:"list"` - - // The principal that can retire the grant. - RetiringPrincipal *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GrantListEntry) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GrantListEntry) GoString() string { - return s.String() -} - -// SetConstraints sets the Constraints field's value. -func (s *GrantListEntry) SetConstraints(v *GrantConstraints) *GrantListEntry { - s.Constraints = v - return s -} - -// SetCreationDate sets the CreationDate field's value. -func (s *GrantListEntry) SetCreationDate(v time.Time) *GrantListEntry { - s.CreationDate = &v - return s -} - -// SetGrantId sets the GrantId field's value. -func (s *GrantListEntry) SetGrantId(v string) *GrantListEntry { - s.GrantId = &v - return s -} - -// SetGranteePrincipal sets the GranteePrincipal field's value. -func (s *GrantListEntry) SetGranteePrincipal(v string) *GrantListEntry { - s.GranteePrincipal = &v - return s -} - -// SetIssuingAccount sets the IssuingAccount field's value. -func (s *GrantListEntry) SetIssuingAccount(v string) *GrantListEntry { - s.IssuingAccount = &v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *GrantListEntry) SetKeyId(v string) *GrantListEntry { - s.KeyId = &v - return s -} - -// SetName sets the Name field's value. -func (s *GrantListEntry) SetName(v string) *GrantListEntry { - s.Name = &v - return s -} - -// SetOperations sets the Operations field's value. -func (s *GrantListEntry) SetOperations(v []*string) *GrantListEntry { - s.Operations = v - return s -} - -// SetRetiringPrincipal sets the RetiringPrincipal field's value. -func (s *GrantListEntry) SetRetiringPrincipal(v string) *GrantListEntry { - s.RetiringPrincipal = &v - return s -} - -type ImportKeyMaterialInput struct { - _ struct{} `type:"structure"` - - // The encrypted key material to import. The key material must be encrypted - // with the public wrapping key that GetParametersForImport returned, using - // the wrapping algorithm that you specified in the same GetParametersForImport - // request. - // EncryptedKeyMaterial is automatically base64 encoded/decoded by the SDK. - // - // EncryptedKeyMaterial is a required field - EncryptedKeyMaterial []byte `min:"1" type:"blob" required:"true"` - - // Specifies whether the key material expires. The default is KEY_MATERIAL_EXPIRES, - // in which case you must include the ValidTo parameter. When this parameter - // is set to KEY_MATERIAL_DOES_NOT_EXPIRE, you must omit the ValidTo parameter. - ExpirationModel *string `type:"string" enum:"ExpirationModelType"` - - // The import token that you received in the response to a previous GetParametersForImport - // request. It must be from the same response that contained the public key - // that you used to encrypt the key material. - // ImportToken is automatically base64 encoded/decoded by the SDK. - // - // ImportToken is a required field - ImportToken []byte `min:"1" type:"blob" required:"true"` - - // The identifier of the symmetric KMS key that receives the imported key material. - // The KMS key's Origin must be EXTERNAL. This must be the same KMS key specified - // in the KeyID parameter of the corresponding GetParametersForImport request. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // The time at which the imported key material expires. When the key material - // expires, KMS deletes the key material and the KMS key becomes unusable. You - // must omit this parameter when the ExpirationModel parameter is set to KEY_MATERIAL_DOES_NOT_EXPIRE. - // Otherwise it is required. - ValidTo *time.Time `type:"timestamp"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ImportKeyMaterialInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ImportKeyMaterialInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ImportKeyMaterialInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ImportKeyMaterialInput"} - if s.EncryptedKeyMaterial == nil { - invalidParams.Add(request.NewErrParamRequired("EncryptedKeyMaterial")) - } - if s.EncryptedKeyMaterial != nil && len(s.EncryptedKeyMaterial) < 1 { - invalidParams.Add(request.NewErrParamMinLen("EncryptedKeyMaterial", 1)) - } - if s.ImportToken == nil { - invalidParams.Add(request.NewErrParamRequired("ImportToken")) - } - if s.ImportToken != nil && len(s.ImportToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ImportToken", 1)) - } - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEncryptedKeyMaterial sets the EncryptedKeyMaterial field's value. -func (s *ImportKeyMaterialInput) SetEncryptedKeyMaterial(v []byte) *ImportKeyMaterialInput { - s.EncryptedKeyMaterial = v - return s -} - -// SetExpirationModel sets the ExpirationModel field's value. -func (s *ImportKeyMaterialInput) SetExpirationModel(v string) *ImportKeyMaterialInput { - s.ExpirationModel = &v - return s -} - -// SetImportToken sets the ImportToken field's value. -func (s *ImportKeyMaterialInput) SetImportToken(v []byte) *ImportKeyMaterialInput { - s.ImportToken = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *ImportKeyMaterialInput) SetKeyId(v string) *ImportKeyMaterialInput { - s.KeyId = &v - return s -} - -// SetValidTo sets the ValidTo field's value. -func (s *ImportKeyMaterialInput) SetValidTo(v time.Time) *ImportKeyMaterialInput { - s.ValidTo = &v - return s -} - -type ImportKeyMaterialOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ImportKeyMaterialOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ImportKeyMaterialOutput) GoString() string { - return s.String() -} - -// The request was rejected because the specified KMS key cannot decrypt the -// data. The KeyId in a Decrypt request and the SourceKeyId in a ReEncrypt request -// must identify the same KMS key that was used to encrypt the ciphertext. -type IncorrectKeyException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s IncorrectKeyException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s IncorrectKeyException) GoString() string { - return s.String() -} - -func newErrorIncorrectKeyException(v protocol.ResponseMetadata) error { - return &IncorrectKeyException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *IncorrectKeyException) Code() string { - return "IncorrectKeyException" -} - -// Message returns the exception's message. -func (s *IncorrectKeyException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *IncorrectKeyException) OrigErr() error { - return nil -} - -func (s *IncorrectKeyException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *IncorrectKeyException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *IncorrectKeyException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because the key material in the request is, expired, -// invalid, or is not the same key material that was previously imported into -// this KMS key. -type IncorrectKeyMaterialException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s IncorrectKeyMaterialException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s IncorrectKeyMaterialException) GoString() string { - return s.String() -} - -func newErrorIncorrectKeyMaterialException(v protocol.ResponseMetadata) error { - return &IncorrectKeyMaterialException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *IncorrectKeyMaterialException) Code() string { - return "IncorrectKeyMaterialException" -} - -// Message returns the exception's message. -func (s *IncorrectKeyMaterialException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *IncorrectKeyMaterialException) OrigErr() error { - return nil -} - -func (s *IncorrectKeyMaterialException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *IncorrectKeyMaterialException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *IncorrectKeyMaterialException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because the trust anchor certificate in the request -// is not the trust anchor certificate for the specified CloudHSM cluster. -// -// When you initialize the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr), -// you create the trust anchor certificate and save it in the customerCA.crt -// file. -type IncorrectTrustAnchorException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s IncorrectTrustAnchorException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s IncorrectTrustAnchorException) GoString() string { - return s.String() -} - -func newErrorIncorrectTrustAnchorException(v protocol.ResponseMetadata) error { - return &IncorrectTrustAnchorException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *IncorrectTrustAnchorException) Code() string { - return "IncorrectTrustAnchorException" -} - -// Message returns the exception's message. -func (s *IncorrectTrustAnchorException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *IncorrectTrustAnchorException) OrigErr() error { - return nil -} - -func (s *IncorrectTrustAnchorException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *IncorrectTrustAnchorException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *IncorrectTrustAnchorException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because an internal exception occurred. The request -// can be retried. -type InternalException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InternalException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InternalException) GoString() string { - return s.String() -} - -func newErrorInternalException(v protocol.ResponseMetadata) error { - return &InternalException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *InternalException) Code() string { - return "KMSInternalException" -} - -// Message returns the exception's message. -func (s *InternalException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InternalException) OrigErr() error { - return nil -} - -func (s *InternalException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *InternalException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *InternalException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because the specified alias name is not valid. -type InvalidAliasNameException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidAliasNameException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidAliasNameException) GoString() string { - return s.String() -} - -func newErrorInvalidAliasNameException(v protocol.ResponseMetadata) error { - return &InvalidAliasNameException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *InvalidAliasNameException) Code() string { - return "InvalidAliasNameException" -} - -// Message returns the exception's message. -func (s *InvalidAliasNameException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InvalidAliasNameException) OrigErr() error { - return nil -} - -func (s *InvalidAliasNameException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *InvalidAliasNameException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *InvalidAliasNameException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -type InvalidArnException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidArnException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidArnException) GoString() string { - return s.String() -} - -func newErrorInvalidArnException(v protocol.ResponseMetadata) error { - return &InvalidArnException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *InvalidArnException) Code() string { - return "InvalidArnException" -} - -// Message returns the exception's message. -func (s *InvalidArnException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InvalidArnException) OrigErr() error { - return nil -} - -func (s *InvalidArnException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *InvalidArnException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *InvalidArnException) RequestID() string { - return s.RespMetadata.RequestID -} - -// From the Decrypt or ReEncrypt operation, the request was rejected because -// the specified ciphertext, or additional authenticated data incorporated into -// the ciphertext, such as the encryption context, is corrupted, missing, or -// otherwise invalid. -// -// From the ImportKeyMaterial operation, the request was rejected because KMS -// could not decrypt the encrypted (wrapped) key material. -type InvalidCiphertextException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidCiphertextException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidCiphertextException) GoString() string { - return s.String() -} - -func newErrorInvalidCiphertextException(v protocol.ResponseMetadata) error { - return &InvalidCiphertextException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *InvalidCiphertextException) Code() string { - return "InvalidCiphertextException" -} - -// Message returns the exception's message. -func (s *InvalidCiphertextException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InvalidCiphertextException) OrigErr() error { - return nil -} - -func (s *InvalidCiphertextException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *InvalidCiphertextException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *InvalidCiphertextException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because the specified GrantId is not valid. -type InvalidGrantIdException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidGrantIdException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidGrantIdException) GoString() string { - return s.String() -} - -func newErrorInvalidGrantIdException(v protocol.ResponseMetadata) error { - return &InvalidGrantIdException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *InvalidGrantIdException) Code() string { - return "InvalidGrantIdException" -} - -// Message returns the exception's message. -func (s *InvalidGrantIdException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InvalidGrantIdException) OrigErr() error { - return nil -} - -func (s *InvalidGrantIdException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *InvalidGrantIdException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *InvalidGrantIdException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because the specified grant token is not valid. -type InvalidGrantTokenException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidGrantTokenException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidGrantTokenException) GoString() string { - return s.String() -} - -func newErrorInvalidGrantTokenException(v protocol.ResponseMetadata) error { - return &InvalidGrantTokenException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *InvalidGrantTokenException) Code() string { - return "InvalidGrantTokenException" -} - -// Message returns the exception's message. -func (s *InvalidGrantTokenException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InvalidGrantTokenException) OrigErr() error { - return nil -} - -func (s *InvalidGrantTokenException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *InvalidGrantTokenException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *InvalidGrantTokenException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because the provided import token is invalid or -// is associated with a different KMS key. -type InvalidImportTokenException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidImportTokenException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidImportTokenException) GoString() string { - return s.String() -} - -func newErrorInvalidImportTokenException(v protocol.ResponseMetadata) error { - return &InvalidImportTokenException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *InvalidImportTokenException) Code() string { - return "InvalidImportTokenException" -} - -// Message returns the exception's message. -func (s *InvalidImportTokenException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InvalidImportTokenException) OrigErr() error { - return nil -} - -func (s *InvalidImportTokenException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *InvalidImportTokenException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *InvalidImportTokenException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected for one of the following reasons: -// -// * The KeyUsage value of the KMS key is incompatible with the API operation. -// -// * The encryption algorithm or signing algorithm specified for the operation -// is incompatible with the type of key material in the KMS key (KeySpec). -// -// For encrypting, decrypting, re-encrypting, and generating data keys, the -// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying, the KeyUsage -// must be SIGN_VERIFY. To find the KeyUsage of a KMS key, use the DescribeKey -// operation. -// -// To find the encryption or signing algorithms supported for a particular KMS -// key, use the DescribeKey operation. -type InvalidKeyUsageException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidKeyUsageException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidKeyUsageException) GoString() string { - return s.String() -} - -func newErrorInvalidKeyUsageException(v protocol.ResponseMetadata) error { - return &InvalidKeyUsageException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *InvalidKeyUsageException) Code() string { - return "InvalidKeyUsageException" -} - -// Message returns the exception's message. -func (s *InvalidKeyUsageException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InvalidKeyUsageException) OrigErr() error { - return nil -} - -func (s *InvalidKeyUsageException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *InvalidKeyUsageException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *InvalidKeyUsageException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because the marker that specifies where pagination -// should next begin is not valid. -type InvalidMarkerException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidMarkerException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidMarkerException) GoString() string { - return s.String() -} - -func newErrorInvalidMarkerException(v protocol.ResponseMetadata) error { - return &InvalidMarkerException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *InvalidMarkerException) Code() string { - return "InvalidMarkerException" -} - -// Message returns the exception's message. -func (s *InvalidMarkerException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InvalidMarkerException) OrigErr() error { - return nil -} - -func (s *InvalidMarkerException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *InvalidMarkerException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *InvalidMarkerException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -type InvalidStateException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidStateException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidStateException) GoString() string { - return s.String() -} - -func newErrorInvalidStateException(v protocol.ResponseMetadata) error { - return &InvalidStateException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *InvalidStateException) Code() string { - return "KMSInvalidStateException" -} - -// Message returns the exception's message. -func (s *InvalidStateException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InvalidStateException) OrigErr() error { - return nil -} - -func (s *InvalidStateException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *InvalidStateException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *InvalidStateException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because the signature verification failed. Signature -// verification fails when it cannot confirm that signature was produced by -// signing the specified message with the specified KMS key and signing algorithm. -type KMSInvalidSignatureException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s KMSInvalidSignatureException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s KMSInvalidSignatureException) GoString() string { - return s.String() -} - -func newErrorKMSInvalidSignatureException(v protocol.ResponseMetadata) error { - return &KMSInvalidSignatureException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *KMSInvalidSignatureException) Code() string { - return "KMSInvalidSignatureException" -} - -// Message returns the exception's message. -func (s *KMSInvalidSignatureException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *KMSInvalidSignatureException) OrigErr() error { - return nil -} - -func (s *KMSInvalidSignatureException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *KMSInvalidSignatureException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *KMSInvalidSignatureException) RequestID() string { - return s.RespMetadata.RequestID -} - -// Contains information about each entry in the key list. -type KeyListEntry struct { - _ struct{} `type:"structure"` - - // ARN of the key. - KeyArn *string `min:"20" type:"string"` - - // Unique identifier of the key. - KeyId *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s KeyListEntry) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s KeyListEntry) GoString() string { - return s.String() -} - -// SetKeyArn sets the KeyArn field's value. -func (s *KeyListEntry) SetKeyArn(v string) *KeyListEntry { - s.KeyArn = &v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *KeyListEntry) SetKeyId(v string) *KeyListEntry { - s.KeyId = &v - return s -} - -// Contains metadata about a KMS key. -// -// This data type is used as a response element for the CreateKey and DescribeKey -// operations. -type KeyMetadata struct { - _ struct{} `type:"structure"` - - // The twelve-digit account ID of the Amazon Web Services account that owns - // the KMS key. - AWSAccountId *string `type:"string"` - - // The Amazon Resource Name (ARN) of the KMS key. For examples, see Key Management - // Service (KMS) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kms) - // in the Example ARNs section of the Amazon Web Services General Reference. - Arn *string `min:"20" type:"string"` - - // The cluster ID of the CloudHSM cluster that contains the key material for - // the KMS key. When you create a KMS key in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html), - // KMS creates the key material for the KMS key in the associated CloudHSM cluster. - // This value is present only when the KMS key is created in a custom key store. - CloudHsmClusterId *string `min:"19" type:"string"` - - // The date and time when the KMS key was created. - CreationDate *time.Time `type:"timestamp"` - - // A unique identifier for the custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) - // that contains the KMS key. This value is present only when the KMS key is - // created in a custom key store. - CustomKeyStoreId *string `min:"1" type:"string"` - - // Instead, use the KeySpec field. - // - // The KeySpec and CustomerMasterKeySpec fields have the same value. We recommend - // that you use the KeySpec field in your code. However, to avoid breaking changes, - // KMS will support both fields. - // - // Deprecated: This field has been deprecated. Instead, use the KeySpec field. - CustomerMasterKeySpec *string `deprecated:"true" type:"string" enum:"CustomerMasterKeySpec"` - - // The date and time after which KMS deletes this KMS key. This value is present - // only when the KMS key is scheduled for deletion, that is, when its KeyState - // is PendingDeletion. - // - // When the primary key in a multi-Region key is scheduled for deletion but - // still has replica keys, its key state is PendingReplicaDeletion and the length - // of its waiting period is displayed in the PendingDeletionWindowInDays field. - DeletionDate *time.Time `type:"timestamp"` - - // The description of the KMS key. - Description *string `type:"string"` - - // Specifies whether the KMS key is enabled. When KeyState is Enabled this value - // is true, otherwise it is false. - Enabled *bool `type:"boolean"` - - // The encryption algorithms that the KMS key supports. You cannot use the KMS - // key with other encryption algorithms within KMS. - // - // This value is present only when the KeyUsage of the KMS key is ENCRYPT_DECRYPT. - EncryptionAlgorithms []*string `type:"list"` - - // Specifies whether the KMS key's key material expires. This value is present - // only when Origin is EXTERNAL, otherwise this value is omitted. - ExpirationModel *string `type:"string" enum:"ExpirationModelType"` - - // The globally unique identifier for the KMS key. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // The manager of the KMS key. KMS keys in your Amazon Web Services account - // are either customer managed or Amazon Web Services managed. For more information - // about the difference, see KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms_keys) - // in the Key Management Service Developer Guide. - KeyManager *string `type:"string" enum:"KeyManagerType"` - - // Describes the type of key material in the KMS key. - KeySpec *string `type:"string" enum:"KeySpec"` - - // The current status of the KMS key. - // - // For more information about how key state affects the use of a KMS key, see - // Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) - // in the Key Management Service Developer Guide. - KeyState *string `type:"string" enum:"KeyState"` - - // The cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) - // for which you can use the KMS key. - KeyUsage *string `type:"string" enum:"KeyUsageType"` - - // Indicates whether the KMS key is a multi-Region (True) or regional (False) - // key. This value is True for multi-Region primary and replica keys and False - // for regional KMS keys. - // - // For more information about multi-Region keys, see Using multi-Region keys - // (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) - // in the Key Management Service Developer Guide. - MultiRegion *bool `type:"boolean"` - - // Lists the primary and replica keys in same multi-Region key. This field is - // present only when the value of the MultiRegion field is True. - // - // For more information about any listed KMS key, use the DescribeKey operation. - // - // * MultiRegionKeyType indicates whether the KMS key is a PRIMARY or REPLICA - // key. - // - // * PrimaryKey displays the key ARN and Region of the primary key. This - // field displays the current KMS key if it is the primary key. - // - // * ReplicaKeys displays the key ARNs and Regions of all replica keys. This - // field includes the current KMS key if it is a replica key. - MultiRegionConfiguration *MultiRegionConfiguration `type:"structure"` - - // The source of the key material for the KMS key. When this value is AWS_KMS, - // KMS created the key material. When this value is EXTERNAL, the key material - // was imported or the KMS key doesn't have any key material. When this value - // is AWS_CLOUDHSM, the key material was created in the CloudHSM cluster associated - // with a custom key store. - Origin *string `type:"string" enum:"OriginType"` - - // The waiting period before the primary key in a multi-Region key is deleted. - // This waiting period begins when the last of its replica keys is deleted. - // This value is present only when the KeyState of the KMS key is PendingReplicaDeletion. - // That indicates that the KMS key is the primary key in a multi-Region key, - // it is scheduled for deletion, and it still has existing replica keys. - // - // When a single-Region KMS key or a multi-Region replica key is scheduled for - // deletion, its deletion date is displayed in the DeletionDate field. However, - // when the primary key in a multi-Region key is scheduled for deletion, its - // waiting period doesn't begin until all of its replica keys are deleted. This - // value displays that waiting period. When the last replica key in the multi-Region - // key is deleted, the KeyState of the scheduled primary key changes from PendingReplicaDeletion - // to PendingDeletion and the deletion date appears in the DeletionDate field. - PendingDeletionWindowInDays *int64 `min:"1" type:"integer"` - - // The signing algorithms that the KMS key supports. You cannot use the KMS - // key with other signing algorithms within KMS. - // - // This field appears only when the KeyUsage of the KMS key is SIGN_VERIFY. - SigningAlgorithms []*string `type:"list"` - - // The time at which the imported key material expires. When the key material - // expires, KMS deletes the key material and the KMS key becomes unusable. This - // value is present only for KMS keys whose Origin is EXTERNAL and whose ExpirationModel - // is KEY_MATERIAL_EXPIRES, otherwise this value is omitted. - ValidTo *time.Time `type:"timestamp"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s KeyMetadata) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s KeyMetadata) GoString() string { - return s.String() -} - -// SetAWSAccountId sets the AWSAccountId field's value. -func (s *KeyMetadata) SetAWSAccountId(v string) *KeyMetadata { - s.AWSAccountId = &v - return s -} - -// SetArn sets the Arn field's value. -func (s *KeyMetadata) SetArn(v string) *KeyMetadata { - s.Arn = &v - return s -} - -// SetCloudHsmClusterId sets the CloudHsmClusterId field's value. -func (s *KeyMetadata) SetCloudHsmClusterId(v string) *KeyMetadata { - s.CloudHsmClusterId = &v - return s -} - -// SetCreationDate sets the CreationDate field's value. -func (s *KeyMetadata) SetCreationDate(v time.Time) *KeyMetadata { - s.CreationDate = &v - return s -} - -// SetCustomKeyStoreId sets the CustomKeyStoreId field's value. -func (s *KeyMetadata) SetCustomKeyStoreId(v string) *KeyMetadata { - s.CustomKeyStoreId = &v - return s -} - -// SetCustomerMasterKeySpec sets the CustomerMasterKeySpec field's value. -func (s *KeyMetadata) SetCustomerMasterKeySpec(v string) *KeyMetadata { - s.CustomerMasterKeySpec = &v - return s -} - -// SetDeletionDate sets the DeletionDate field's value. -func (s *KeyMetadata) SetDeletionDate(v time.Time) *KeyMetadata { - s.DeletionDate = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *KeyMetadata) SetDescription(v string) *KeyMetadata { - s.Description = &v - return s -} - -// SetEnabled sets the Enabled field's value. -func (s *KeyMetadata) SetEnabled(v bool) *KeyMetadata { - s.Enabled = &v - return s -} - -// SetEncryptionAlgorithms sets the EncryptionAlgorithms field's value. -func (s *KeyMetadata) SetEncryptionAlgorithms(v []*string) *KeyMetadata { - s.EncryptionAlgorithms = v - return s -} - -// SetExpirationModel sets the ExpirationModel field's value. -func (s *KeyMetadata) SetExpirationModel(v string) *KeyMetadata { - s.ExpirationModel = &v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *KeyMetadata) SetKeyId(v string) *KeyMetadata { - s.KeyId = &v - return s -} - -// SetKeyManager sets the KeyManager field's value. -func (s *KeyMetadata) SetKeyManager(v string) *KeyMetadata { - s.KeyManager = &v - return s -} - -// SetKeySpec sets the KeySpec field's value. -func (s *KeyMetadata) SetKeySpec(v string) *KeyMetadata { - s.KeySpec = &v - return s -} - -// SetKeyState sets the KeyState field's value. -func (s *KeyMetadata) SetKeyState(v string) *KeyMetadata { - s.KeyState = &v - return s -} - -// SetKeyUsage sets the KeyUsage field's value. -func (s *KeyMetadata) SetKeyUsage(v string) *KeyMetadata { - s.KeyUsage = &v - return s -} - -// SetMultiRegion sets the MultiRegion field's value. -func (s *KeyMetadata) SetMultiRegion(v bool) *KeyMetadata { - s.MultiRegion = &v - return s -} - -// SetMultiRegionConfiguration sets the MultiRegionConfiguration field's value. -func (s *KeyMetadata) SetMultiRegionConfiguration(v *MultiRegionConfiguration) *KeyMetadata { - s.MultiRegionConfiguration = v - return s -} - -// SetOrigin sets the Origin field's value. -func (s *KeyMetadata) SetOrigin(v string) *KeyMetadata { - s.Origin = &v - return s -} - -// SetPendingDeletionWindowInDays sets the PendingDeletionWindowInDays field's value. -func (s *KeyMetadata) SetPendingDeletionWindowInDays(v int64) *KeyMetadata { - s.PendingDeletionWindowInDays = &v - return s -} - -// SetSigningAlgorithms sets the SigningAlgorithms field's value. -func (s *KeyMetadata) SetSigningAlgorithms(v []*string) *KeyMetadata { - s.SigningAlgorithms = v - return s -} - -// SetValidTo sets the ValidTo field's value. -func (s *KeyMetadata) SetValidTo(v time.Time) *KeyMetadata { - s.ValidTo = &v - return s -} - -// The request was rejected because the specified KMS key was not available. -// You can retry the request. -type KeyUnavailableException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s KeyUnavailableException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s KeyUnavailableException) GoString() string { - return s.String() -} - -func newErrorKeyUnavailableException(v protocol.ResponseMetadata) error { - return &KeyUnavailableException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *KeyUnavailableException) Code() string { - return "KeyUnavailableException" -} - -// Message returns the exception's message. -func (s *KeyUnavailableException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *KeyUnavailableException) OrigErr() error { - return nil -} - -func (s *KeyUnavailableException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *KeyUnavailableException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *KeyUnavailableException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because a quota was exceeded. For more information, -// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) -// in the Key Management Service Developer Guide. -type LimitExceededException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LimitExceededException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LimitExceededException) GoString() string { - return s.String() -} - -func newErrorLimitExceededException(v protocol.ResponseMetadata) error { - return &LimitExceededException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *LimitExceededException) Code() string { - return "LimitExceededException" -} - -// Message returns the exception's message. -func (s *LimitExceededException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *LimitExceededException) OrigErr() error { - return nil -} - -func (s *LimitExceededException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *LimitExceededException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *LimitExceededException) RequestID() string { - return s.RespMetadata.RequestID -} - -type ListAliasesInput struct { - _ struct{} `type:"structure"` - - // Lists only aliases that are associated with the specified KMS key. Enter - // a KMS key in your Amazon Web Services account. - // - // This parameter is optional. If you omit it, ListAliases returns all aliases - // in the account and Region. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - KeyId *string `min:"1" type:"string"` - - // Use this parameter to specify the maximum number of items to return. When - // this value is present, KMS does not return more than the specified number - // of items, but it might return fewer. - // - // This value is optional. If you include a value, it must be between 1 and - // 100, inclusive. If you do not include a value, it defaults to 50. - Limit *int64 `min:"1" type:"integer"` - - // Use this parameter in a subsequent request after you receive a response with - // truncated results. Set it to the value of NextMarker from the truncated response - // you just received. - Marker *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListAliasesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListAliasesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListAliasesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListAliasesInput"} - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.Marker != nil && len(*s.Marker) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *ListAliasesInput) SetKeyId(v string) *ListAliasesInput { - s.KeyId = &v - return s -} - -// SetLimit sets the Limit field's value. -func (s *ListAliasesInput) SetLimit(v int64) *ListAliasesInput { - s.Limit = &v - return s -} - -// SetMarker sets the Marker field's value. -func (s *ListAliasesInput) SetMarker(v string) *ListAliasesInput { - s.Marker = &v - return s -} - -type ListAliasesOutput struct { - _ struct{} `type:"structure"` - - // A list of aliases. - Aliases []*AliasListEntry `type:"list"` - - // When Truncated is true, this element is present and contains the value to - // use for the Marker parameter in a subsequent request. - NextMarker *string `min:"1" type:"string"` - - // A flag that indicates whether there are more items in the list. When this - // value is true, the list in this response is truncated. To get more items, - // pass the value of the NextMarker element in thisresponse to the Marker parameter - // in a subsequent request. - Truncated *bool `type:"boolean"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListAliasesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListAliasesOutput) GoString() string { - return s.String() -} - -// SetAliases sets the Aliases field's value. -func (s *ListAliasesOutput) SetAliases(v []*AliasListEntry) *ListAliasesOutput { - s.Aliases = v - return s -} - -// SetNextMarker sets the NextMarker field's value. -func (s *ListAliasesOutput) SetNextMarker(v string) *ListAliasesOutput { - s.NextMarker = &v - return s -} - -// SetTruncated sets the Truncated field's value. -func (s *ListAliasesOutput) SetTruncated(v bool) *ListAliasesOutput { - s.Truncated = &v - return s -} - -type ListGrantsInput struct { - _ struct{} `type:"structure"` - - // Returns only the grant with the specified grant ID. The grant ID uniquely - // identifies the grant. - GrantId *string `min:"1" type:"string"` - - // Returns only grants where the specified principal is the grantee principal - // for the grant. - GranteePrincipal *string `min:"1" type:"string"` - - // Returns only grants for the specified KMS key. This parameter is required. - // - // Specify the key ID or key ARN of the KMS key. To specify a KMS key in a different - // Amazon Web Services account, you must use the key ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // Use this parameter to specify the maximum number of items to return. When - // this value is present, KMS does not return more than the specified number - // of items, but it might return fewer. - // - // This value is optional. If you include a value, it must be between 1 and - // 100, inclusive. If you do not include a value, it defaults to 50. - Limit *int64 `min:"1" type:"integer"` - - // Use this parameter in a subsequent request after you receive a response with - // truncated results. Set it to the value of NextMarker from the truncated response - // you just received. - Marker *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListGrantsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListGrantsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListGrantsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListGrantsInput"} - if s.GrantId != nil && len(*s.GrantId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GrantId", 1)) - } - if s.GranteePrincipal != nil && len(*s.GranteePrincipal) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GranteePrincipal", 1)) - } - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.Marker != nil && len(*s.Marker) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGrantId sets the GrantId field's value. -func (s *ListGrantsInput) SetGrantId(v string) *ListGrantsInput { - s.GrantId = &v - return s -} - -// SetGranteePrincipal sets the GranteePrincipal field's value. -func (s *ListGrantsInput) SetGranteePrincipal(v string) *ListGrantsInput { - s.GranteePrincipal = &v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *ListGrantsInput) SetKeyId(v string) *ListGrantsInput { - s.KeyId = &v - return s -} - -// SetLimit sets the Limit field's value. -func (s *ListGrantsInput) SetLimit(v int64) *ListGrantsInput { - s.Limit = &v - return s -} - -// SetMarker sets the Marker field's value. -func (s *ListGrantsInput) SetMarker(v string) *ListGrantsInput { - s.Marker = &v - return s -} - -type ListGrantsResponse struct { - _ struct{} `type:"structure"` - - // A list of grants. - Grants []*GrantListEntry `type:"list"` - - // When Truncated is true, this element is present and contains the value to - // use for the Marker parameter in a subsequent request. - NextMarker *string `min:"1" type:"string"` - - // A flag that indicates whether there are more items in the list. When this - // value is true, the list in this response is truncated. To get more items, - // pass the value of the NextMarker element in thisresponse to the Marker parameter - // in a subsequent request. - Truncated *bool `type:"boolean"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListGrantsResponse) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListGrantsResponse) GoString() string { - return s.String() -} - -// SetGrants sets the Grants field's value. -func (s *ListGrantsResponse) SetGrants(v []*GrantListEntry) *ListGrantsResponse { - s.Grants = v - return s -} - -// SetNextMarker sets the NextMarker field's value. -func (s *ListGrantsResponse) SetNextMarker(v string) *ListGrantsResponse { - s.NextMarker = &v - return s -} - -// SetTruncated sets the Truncated field's value. -func (s *ListGrantsResponse) SetTruncated(v bool) *ListGrantsResponse { - s.Truncated = &v - return s -} - -type ListKeyPoliciesInput struct { - _ struct{} `type:"structure"` - - // Gets the names of key policies for the specified KMS key. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // Use this parameter to specify the maximum number of items to return. When - // this value is present, KMS does not return more than the specified number - // of items, but it might return fewer. - // - // This value is optional. If you include a value, it must be between 1 and - // 1000, inclusive. If you do not include a value, it defaults to 100. - // - // Only one policy can be attached to a key. - Limit *int64 `min:"1" type:"integer"` - - // Use this parameter in a subsequent request after you receive a response with - // truncated results. Set it to the value of NextMarker from the truncated response - // you just received. - Marker *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListKeyPoliciesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListKeyPoliciesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListKeyPoliciesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListKeyPoliciesInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.Marker != nil && len(*s.Marker) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *ListKeyPoliciesInput) SetKeyId(v string) *ListKeyPoliciesInput { - s.KeyId = &v - return s -} - -// SetLimit sets the Limit field's value. -func (s *ListKeyPoliciesInput) SetLimit(v int64) *ListKeyPoliciesInput { - s.Limit = &v - return s -} - -// SetMarker sets the Marker field's value. -func (s *ListKeyPoliciesInput) SetMarker(v string) *ListKeyPoliciesInput { - s.Marker = &v - return s -} - -type ListKeyPoliciesOutput struct { - _ struct{} `type:"structure"` - - // When Truncated is true, this element is present and contains the value to - // use for the Marker parameter in a subsequent request. - NextMarker *string `min:"1" type:"string"` - - // A list of key policy names. The only valid value is default. - PolicyNames []*string `type:"list"` - - // A flag that indicates whether there are more items in the list. When this - // value is true, the list in this response is truncated. To get more items, - // pass the value of the NextMarker element in thisresponse to the Marker parameter - // in a subsequent request. - Truncated *bool `type:"boolean"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListKeyPoliciesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListKeyPoliciesOutput) GoString() string { - return s.String() -} - -// SetNextMarker sets the NextMarker field's value. -func (s *ListKeyPoliciesOutput) SetNextMarker(v string) *ListKeyPoliciesOutput { - s.NextMarker = &v - return s -} - -// SetPolicyNames sets the PolicyNames field's value. -func (s *ListKeyPoliciesOutput) SetPolicyNames(v []*string) *ListKeyPoliciesOutput { - s.PolicyNames = v - return s -} - -// SetTruncated sets the Truncated field's value. -func (s *ListKeyPoliciesOutput) SetTruncated(v bool) *ListKeyPoliciesOutput { - s.Truncated = &v - return s -} - -type ListKeysInput struct { - _ struct{} `type:"structure"` - - // Use this parameter to specify the maximum number of items to return. When - // this value is present, KMS does not return more than the specified number - // of items, but it might return fewer. - // - // This value is optional. If you include a value, it must be between 1 and - // 1000, inclusive. If you do not include a value, it defaults to 100. - Limit *int64 `min:"1" type:"integer"` - - // Use this parameter in a subsequent request after you receive a response with - // truncated results. Set it to the value of NextMarker from the truncated response - // you just received. - Marker *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListKeysInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListKeysInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListKeysInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListKeysInput"} - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.Marker != nil && len(*s.Marker) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetLimit sets the Limit field's value. -func (s *ListKeysInput) SetLimit(v int64) *ListKeysInput { - s.Limit = &v - return s -} - -// SetMarker sets the Marker field's value. -func (s *ListKeysInput) SetMarker(v string) *ListKeysInput { - s.Marker = &v - return s -} - -type ListKeysOutput struct { - _ struct{} `type:"structure"` - - // A list of KMS keys. - Keys []*KeyListEntry `type:"list"` - - // When Truncated is true, this element is present and contains the value to - // use for the Marker parameter in a subsequent request. - NextMarker *string `min:"1" type:"string"` - - // A flag that indicates whether there are more items in the list. When this - // value is true, the list in this response is truncated. To get more items, - // pass the value of the NextMarker element in thisresponse to the Marker parameter - // in a subsequent request. - Truncated *bool `type:"boolean"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListKeysOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListKeysOutput) GoString() string { - return s.String() -} - -// SetKeys sets the Keys field's value. -func (s *ListKeysOutput) SetKeys(v []*KeyListEntry) *ListKeysOutput { - s.Keys = v - return s -} - -// SetNextMarker sets the NextMarker field's value. -func (s *ListKeysOutput) SetNextMarker(v string) *ListKeysOutput { - s.NextMarker = &v - return s -} - -// SetTruncated sets the Truncated field's value. -func (s *ListKeysOutput) SetTruncated(v bool) *ListKeysOutput { - s.Truncated = &v - return s -} - -type ListResourceTagsInput struct { - _ struct{} `type:"structure"` - - // Gets tags on the specified KMS key. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // Use this parameter to specify the maximum number of items to return. When - // this value is present, KMS does not return more than the specified number - // of items, but it might return fewer. - // - // This value is optional. If you include a value, it must be between 1 and - // 50, inclusive. If you do not include a value, it defaults to 50. - Limit *int64 `min:"1" type:"integer"` - - // Use this parameter in a subsequent request after you receive a response with - // truncated results. Set it to the value of NextMarker from the truncated response - // you just received. - // - // Do not attempt to construct this value. Use only the value of NextMarker - // from the truncated response you just received. - Marker *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListResourceTagsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListResourceTagsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListResourceTagsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListResourceTagsInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.Marker != nil && len(*s.Marker) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *ListResourceTagsInput) SetKeyId(v string) *ListResourceTagsInput { - s.KeyId = &v - return s -} - -// SetLimit sets the Limit field's value. -func (s *ListResourceTagsInput) SetLimit(v int64) *ListResourceTagsInput { - s.Limit = &v - return s -} - -// SetMarker sets the Marker field's value. -func (s *ListResourceTagsInput) SetMarker(v string) *ListResourceTagsInput { - s.Marker = &v - return s -} - -type ListResourceTagsOutput struct { - _ struct{} `type:"structure"` - - // When Truncated is true, this element is present and contains the value to - // use for the Marker parameter in a subsequent request. - // - // Do not assume or infer any information from this value. - NextMarker *string `min:"1" type:"string"` - - // A list of tags. Each tag consists of a tag key and a tag value. - // - // Tagging or untagging a KMS key can allow or deny permission to the KMS key. - // For details, see Using ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) - // in the Key Management Service Developer Guide. - Tags []*Tag `type:"list"` - - // A flag that indicates whether there are more items in the list. When this - // value is true, the list in this response is truncated. To get more items, - // pass the value of the NextMarker element in thisresponse to the Marker parameter - // in a subsequent request. - Truncated *bool `type:"boolean"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListResourceTagsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListResourceTagsOutput) GoString() string { - return s.String() -} - -// SetNextMarker sets the NextMarker field's value. -func (s *ListResourceTagsOutput) SetNextMarker(v string) *ListResourceTagsOutput { - s.NextMarker = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *ListResourceTagsOutput) SetTags(v []*Tag) *ListResourceTagsOutput { - s.Tags = v - return s -} - -// SetTruncated sets the Truncated field's value. -func (s *ListResourceTagsOutput) SetTruncated(v bool) *ListResourceTagsOutput { - s.Truncated = &v - return s -} - -type ListRetirableGrantsInput struct { - _ struct{} `type:"structure"` - - // Use this parameter to specify the maximum number of items to return. When - // this value is present, KMS does not return more than the specified number - // of items, but it might return fewer. - // - // This value is optional. If you include a value, it must be between 1 and - // 100, inclusive. If you do not include a value, it defaults to 50. - Limit *int64 `min:"1" type:"integer"` - - // Use this parameter in a subsequent request after you receive a response with - // truncated results. Set it to the value of NextMarker from the truncated response - // you just received. - Marker *string `min:"1" type:"string"` - - // The retiring principal for which to list grants. Enter a principal in your - // Amazon Web Services account. - // - // To specify the retiring principal, use the Amazon Resource Name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // of an Amazon Web Services principal. Valid Amazon Web Services principals - // include Amazon Web Services accounts (root), IAM users, federated users, - // and assumed role users. For examples of the ARN syntax for specifying a principal, - // see Amazon Web Services Identity and Access Management (IAM) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam) - // in the Example ARNs section of the Amazon Web Services General Reference. - // - // RetiringPrincipal is a required field - RetiringPrincipal *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListRetirableGrantsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListRetirableGrantsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListRetirableGrantsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListRetirableGrantsInput"} - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.Marker != nil && len(*s.Marker) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) - } - if s.RetiringPrincipal == nil { - invalidParams.Add(request.NewErrParamRequired("RetiringPrincipal")) - } - if s.RetiringPrincipal != nil && len(*s.RetiringPrincipal) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RetiringPrincipal", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetLimit sets the Limit field's value. -func (s *ListRetirableGrantsInput) SetLimit(v int64) *ListRetirableGrantsInput { - s.Limit = &v - return s -} - -// SetMarker sets the Marker field's value. -func (s *ListRetirableGrantsInput) SetMarker(v string) *ListRetirableGrantsInput { - s.Marker = &v - return s -} - -// SetRetiringPrincipal sets the RetiringPrincipal field's value. -func (s *ListRetirableGrantsInput) SetRetiringPrincipal(v string) *ListRetirableGrantsInput { - s.RetiringPrincipal = &v - return s -} - -// The request was rejected because the specified policy is not syntactically -// or semantically correct. -type MalformedPolicyDocumentException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s MalformedPolicyDocumentException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s MalformedPolicyDocumentException) GoString() string { - return s.String() -} - -func newErrorMalformedPolicyDocumentException(v protocol.ResponseMetadata) error { - return &MalformedPolicyDocumentException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *MalformedPolicyDocumentException) Code() string { - return "MalformedPolicyDocumentException" -} - -// Message returns the exception's message. -func (s *MalformedPolicyDocumentException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *MalformedPolicyDocumentException) OrigErr() error { - return nil -} - -func (s *MalformedPolicyDocumentException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *MalformedPolicyDocumentException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *MalformedPolicyDocumentException) RequestID() string { - return s.RespMetadata.RequestID -} - -// Describes the configuration of this multi-Region key. This field appears -// only when the KMS key is a primary or replica of a multi-Region key. -// -// For more information about any listed KMS key, use the DescribeKey operation. -type MultiRegionConfiguration struct { - _ struct{} `type:"structure"` - - // Indicates whether the KMS key is a PRIMARY or REPLICA key. - MultiRegionKeyType *string `type:"string" enum:"MultiRegionKeyType"` - - // Displays the key ARN and Region of the primary key. This field includes the - // current KMS key if it is the primary key. - PrimaryKey *MultiRegionKey `type:"structure"` - - // displays the key ARNs and Regions of all replica keys. This field includes - // the current KMS key if it is a replica key. - ReplicaKeys []*MultiRegionKey `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s MultiRegionConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s MultiRegionConfiguration) GoString() string { - return s.String() -} - -// SetMultiRegionKeyType sets the MultiRegionKeyType field's value. -func (s *MultiRegionConfiguration) SetMultiRegionKeyType(v string) *MultiRegionConfiguration { - s.MultiRegionKeyType = &v - return s -} - -// SetPrimaryKey sets the PrimaryKey field's value. -func (s *MultiRegionConfiguration) SetPrimaryKey(v *MultiRegionKey) *MultiRegionConfiguration { - s.PrimaryKey = v - return s -} - -// SetReplicaKeys sets the ReplicaKeys field's value. -func (s *MultiRegionConfiguration) SetReplicaKeys(v []*MultiRegionKey) *MultiRegionConfiguration { - s.ReplicaKeys = v - return s -} - -// Describes the primary or replica key in a multi-Region key. -type MultiRegionKey struct { - _ struct{} `type:"structure"` - - // Displays the key ARN of a primary or replica key of a multi-Region key. - Arn *string `min:"20" type:"string"` - - // Displays the Amazon Web Services Region of a primary or replica key in a - // multi-Region key. - Region *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s MultiRegionKey) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s MultiRegionKey) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *MultiRegionKey) SetArn(v string) *MultiRegionKey { - s.Arn = &v - return s -} - -// SetRegion sets the Region field's value. -func (s *MultiRegionKey) SetRegion(v string) *MultiRegionKey { - s.Region = &v - return s -} - -// The request was rejected because the specified entity or resource could not -// be found. -type NotFoundException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s NotFoundException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s NotFoundException) GoString() string { - return s.String() -} - -func newErrorNotFoundException(v protocol.ResponseMetadata) error { - return &NotFoundException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *NotFoundException) Code() string { - return "NotFoundException" -} - -// Message returns the exception's message. -func (s *NotFoundException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *NotFoundException) OrigErr() error { - return nil -} - -func (s *NotFoundException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *NotFoundException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *NotFoundException) RequestID() string { - return s.RespMetadata.RequestID -} - -type PutKeyPolicyInput struct { - _ struct{} `type:"structure"` - - // A flag to indicate whether to bypass the key policy lockout safety check. - // - // Setting this value to true increases the risk that the KMS key becomes unmanageable. - // Do not set this value to true indiscriminately. - // - // For more information, refer to the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) - // section in the Key Management Service Developer Guide. - // - // Use this parameter only when you intend to prevent the principal that is - // making the request from making a subsequent PutKeyPolicy request on the KMS - // key. - // - // The default value is false. - BypassPolicyLockoutSafetyCheck *bool `type:"boolean"` - - // Sets the key policy on the specified KMS key. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // The key policy to attach to the KMS key. - // - // The key policy must meet the following criteria: - // - // * If you don't set BypassPolicyLockoutSafetyCheck to true, the key policy - // must allow the principal that is making the PutKeyPolicy request to make - // a subsequent PutKeyPolicy request on the KMS key. This reduces the risk - // that the KMS key becomes unmanageable. For more information, refer to - // the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) - // section of the Key Management Service Developer Guide. - // - // * Each statement in the key policy must contain one or more principals. - // The principals in the key policy must exist and be visible to KMS. When - // you create a new Amazon Web Services principal (for example, an IAM user - // or role), you might need to enforce a delay before including the new principal - // in a key policy because the new principal might not be immediately visible - // to KMS. For more information, see Changes that I make are not always immediately - // visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) - // in the Amazon Web Services Identity and Access Management User Guide. - // - // The key policy cannot exceed 32 kilobytes (32768 bytes). For more information, - // see Resource Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/resource-limits.html) - // in the Key Management Service Developer Guide. - // - // Policy is a required field - Policy *string `min:"1" type:"string" required:"true"` - - // The name of the key policy. The only valid value is default. - // - // PolicyName is a required field - PolicyName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutKeyPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutKeyPolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutKeyPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutKeyPolicyInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.Policy == nil { - invalidParams.Add(request.NewErrParamRequired("Policy")) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) - } - if s.PolicyName == nil { - invalidParams.Add(request.NewErrParamRequired("PolicyName")) - } - if s.PolicyName != nil && len(*s.PolicyName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBypassPolicyLockoutSafetyCheck sets the BypassPolicyLockoutSafetyCheck field's value. -func (s *PutKeyPolicyInput) SetBypassPolicyLockoutSafetyCheck(v bool) *PutKeyPolicyInput { - s.BypassPolicyLockoutSafetyCheck = &v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *PutKeyPolicyInput) SetKeyId(v string) *PutKeyPolicyInput { - s.KeyId = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *PutKeyPolicyInput) SetPolicy(v string) *PutKeyPolicyInput { - s.Policy = &v - return s -} - -// SetPolicyName sets the PolicyName field's value. -func (s *PutKeyPolicyInput) SetPolicyName(v string) *PutKeyPolicyInput { - s.PolicyName = &v - return s -} - -type PutKeyPolicyOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutKeyPolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutKeyPolicyOutput) GoString() string { - return s.String() -} - -type ReEncryptInput struct { - _ struct{} `type:"structure"` - - // Ciphertext of the data to reencrypt. - // CiphertextBlob is automatically base64 encoded/decoded by the SDK. - // - // CiphertextBlob is a required field - CiphertextBlob []byte `min:"1" type:"blob" required:"true"` - - // Specifies the encryption algorithm that KMS will use to reecrypt the data - // after it has decrypted it. The default value, SYMMETRIC_DEFAULT, represents - // the encryption algorithm used for symmetric KMS keys. - // - // This parameter is required only when the destination KMS key is an asymmetric - // KMS key. - DestinationEncryptionAlgorithm *string `type:"string" enum:"EncryptionAlgorithmSpec"` - - // Specifies that encryption context to use when the reencrypting the data. - // - // A destination encryption context is valid only when the destination KMS key - // is a symmetric KMS key. The standard ciphertext format for asymmetric KMS - // keys does not include fields for metadata. - // - // An encryption context is a collection of non-secret key-value pairs that - // represents additional authenticated data. When you use an encryption context - // to encrypt data, you must specify the same (an exact case-sensitive match) - // encryption context to decrypt the data. An encryption context is optional - // when encrypting with a symmetric KMS key, but it is highly recommended. - // - // For more information, see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) - // in the Key Management Service Developer Guide. - DestinationEncryptionContext map[string]*string `type:"map"` - - // A unique identifier for the KMS key that is used to reencrypt the data. Specify - // a symmetric or asymmetric KMS key with a KeyUsage value of ENCRYPT_DECRYPT. - // To find the KeyUsage value of a KMS key, use the DescribeKey operation. - // - // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. - // When using an alias name, prefix it with "alias/". To specify a KMS key in - // a different Amazon Web Services account, you must use the key ARN or alias - // ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // To get the alias name and alias ARN, use ListAliases. - // - // DestinationKeyId is a required field - DestinationKeyId *string `min:"1" type:"string" required:"true"` - - // A list of grant tokens. - // - // Use a grant token when your permission to call this operation comes from - // a new grant that has not yet achieved eventual consistency. For more information, - // see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) - // in the Key Management Service Developer Guide. - GrantTokens []*string `type:"list"` - - // Specifies the encryption algorithm that KMS will use to decrypt the ciphertext - // before it is reencrypted. The default value, SYMMETRIC_DEFAULT, represents - // the algorithm used for symmetric KMS keys. - // - // Specify the same algorithm that was used to encrypt the ciphertext. If you - // specify a different algorithm, the decrypt attempt fails. - // - // This parameter is required only when the ciphertext was encrypted under an - // asymmetric KMS key. - SourceEncryptionAlgorithm *string `type:"string" enum:"EncryptionAlgorithmSpec"` - - // Specifies the encryption context to use to decrypt the ciphertext. Enter - // the same encryption context that was used to encrypt the ciphertext. - // - // An encryption context is a collection of non-secret key-value pairs that - // represents additional authenticated data. When you use an encryption context - // to encrypt data, you must specify the same (an exact case-sensitive match) - // encryption context to decrypt the data. An encryption context is optional - // when encrypting with a symmetric KMS key, but it is highly recommended. - // - // For more information, see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) - // in the Key Management Service Developer Guide. - SourceEncryptionContext map[string]*string `type:"map"` - - // Specifies the KMS key that KMS will use to decrypt the ciphertext before - // it is re-encrypted. Enter a key ID of the KMS key that was used to encrypt - // the ciphertext. - // - // This parameter is required only when the ciphertext was encrypted under an - // asymmetric KMS key. If you used a symmetric KMS key, KMS can get the KMS - // key from metadata that it adds to the symmetric ciphertext blob. However, - // it is always recommended as a best practice. This practice ensures that you - // use the KMS key that you intend. - // - // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. - // When using an alias name, prefix it with "alias/". To specify a KMS key in - // a different Amazon Web Services account, you must use the key ARN or alias - // ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // To get the alias name and alias ARN, use ListAliases. - SourceKeyId *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReEncryptInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReEncryptInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ReEncryptInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReEncryptInput"} - if s.CiphertextBlob == nil { - invalidParams.Add(request.NewErrParamRequired("CiphertextBlob")) - } - if s.CiphertextBlob != nil && len(s.CiphertextBlob) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CiphertextBlob", 1)) - } - if s.DestinationKeyId == nil { - invalidParams.Add(request.NewErrParamRequired("DestinationKeyId")) - } - if s.DestinationKeyId != nil && len(*s.DestinationKeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DestinationKeyId", 1)) - } - if s.SourceKeyId != nil && len(*s.SourceKeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SourceKeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCiphertextBlob sets the CiphertextBlob field's value. -func (s *ReEncryptInput) SetCiphertextBlob(v []byte) *ReEncryptInput { - s.CiphertextBlob = v - return s -} - -// SetDestinationEncryptionAlgorithm sets the DestinationEncryptionAlgorithm field's value. -func (s *ReEncryptInput) SetDestinationEncryptionAlgorithm(v string) *ReEncryptInput { - s.DestinationEncryptionAlgorithm = &v - return s -} - -// SetDestinationEncryptionContext sets the DestinationEncryptionContext field's value. -func (s *ReEncryptInput) SetDestinationEncryptionContext(v map[string]*string) *ReEncryptInput { - s.DestinationEncryptionContext = v - return s -} - -// SetDestinationKeyId sets the DestinationKeyId field's value. -func (s *ReEncryptInput) SetDestinationKeyId(v string) *ReEncryptInput { - s.DestinationKeyId = &v - return s -} - -// SetGrantTokens sets the GrantTokens field's value. -func (s *ReEncryptInput) SetGrantTokens(v []*string) *ReEncryptInput { - s.GrantTokens = v - return s -} - -// SetSourceEncryptionAlgorithm sets the SourceEncryptionAlgorithm field's value. -func (s *ReEncryptInput) SetSourceEncryptionAlgorithm(v string) *ReEncryptInput { - s.SourceEncryptionAlgorithm = &v - return s -} - -// SetSourceEncryptionContext sets the SourceEncryptionContext field's value. -func (s *ReEncryptInput) SetSourceEncryptionContext(v map[string]*string) *ReEncryptInput { - s.SourceEncryptionContext = v - return s -} - -// SetSourceKeyId sets the SourceKeyId field's value. -func (s *ReEncryptInput) SetSourceKeyId(v string) *ReEncryptInput { - s.SourceKeyId = &v - return s -} - -type ReEncryptOutput struct { - _ struct{} `type:"structure"` - - // The reencrypted data. When you use the HTTP API or the Amazon Web Services - // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. - // CiphertextBlob is automatically base64 encoded/decoded by the SDK. - CiphertextBlob []byte `min:"1" type:"blob"` - - // The encryption algorithm that was used to reencrypt the data. - DestinationEncryptionAlgorithm *string `type:"string" enum:"EncryptionAlgorithmSpec"` - - // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the KMS key that was used to reencrypt the data. - KeyId *string `min:"1" type:"string"` - - // The encryption algorithm that was used to decrypt the ciphertext before it - // was reencrypted. - SourceEncryptionAlgorithm *string `type:"string" enum:"EncryptionAlgorithmSpec"` - - // Unique identifier of the KMS key used to originally encrypt the data. - SourceKeyId *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReEncryptOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReEncryptOutput) GoString() string { - return s.String() -} - -// SetCiphertextBlob sets the CiphertextBlob field's value. -func (s *ReEncryptOutput) SetCiphertextBlob(v []byte) *ReEncryptOutput { - s.CiphertextBlob = v - return s -} - -// SetDestinationEncryptionAlgorithm sets the DestinationEncryptionAlgorithm field's value. -func (s *ReEncryptOutput) SetDestinationEncryptionAlgorithm(v string) *ReEncryptOutput { - s.DestinationEncryptionAlgorithm = &v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *ReEncryptOutput) SetKeyId(v string) *ReEncryptOutput { - s.KeyId = &v - return s -} - -// SetSourceEncryptionAlgorithm sets the SourceEncryptionAlgorithm field's value. -func (s *ReEncryptOutput) SetSourceEncryptionAlgorithm(v string) *ReEncryptOutput { - s.SourceEncryptionAlgorithm = &v - return s -} - -// SetSourceKeyId sets the SourceKeyId field's value. -func (s *ReEncryptOutput) SetSourceKeyId(v string) *ReEncryptOutput { - s.SourceKeyId = &v - return s -} - -type ReplicateKeyInput struct { - _ struct{} `type:"structure"` - - // A flag to indicate whether to bypass the key policy lockout safety check. - // - // Setting this value to true increases the risk that the KMS key becomes unmanageable. - // Do not set this value to true indiscriminately. - // - // For more information, refer to the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) - // section in the Key Management Service Developer Guide. - // - // Use this parameter only when you intend to prevent the principal that is - // making the request from making a subsequent PutKeyPolicy request on the KMS - // key. - // - // The default value is false. - BypassPolicyLockoutSafetyCheck *bool `type:"boolean"` - - // A description of the KMS key. The default value is an empty string (no description). - // - // The description is not a shared property of multi-Region keys. You can specify - // the same description or a different description for each key in a set of - // related multi-Region keys. KMS does not synchronize this property. - Description *string `type:"string"` - - // Identifies the multi-Region primary key that is being replicated. To determine - // whether a KMS key is a multi-Region primary key, use the DescribeKey operation - // to check the value of the MultiRegionKeyType property. - // - // Specify the key ID or key ARN of a multi-Region primary key. - // - // For example: - // - // * Key ID: mrk-1234abcd12ab34cd56ef1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/mrk-1234abcd12ab34cd56ef1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // The key policy to attach to the KMS key. This parameter is optional. If you - // do not provide a key policy, KMS attaches the default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default) - // to the KMS key. - // - // The key policy is not a shared property of multi-Region keys. You can specify - // the same key policy or a different key policy for each key in a set of related - // multi-Region keys. KMS does not synchronize this property. - // - // If you provide a key policy, it must meet the following criteria: - // - // * If you don't set BypassPolicyLockoutSafetyCheck to true, the key policy - // must give the caller kms:PutKeyPolicy permission on the replica key. This - // reduces the risk that the KMS key becomes unmanageable. For more information, - // refer to the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) - // section of the Key Management Service Developer Guide . - // - // * Each statement in the key policy must contain one or more principals. - // The principals in the key policy must exist and be visible to KMS. When - // you create a new Amazon Web Services principal (for example, an IAM user - // or role), you might need to enforce a delay before including the new principal - // in a key policy because the new principal might not be immediately visible - // to KMS. For more information, see Changes that I make are not always immediately - // visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) - // in the Identity and Access Management User Guide . - // - // * The key policy size quota is 32 kilobytes (32768 bytes). - Policy *string `min:"1" type:"string"` - - // The Region ID of the Amazon Web Services Region for this replica key. - // - // Enter the Region ID, such as us-east-1 or ap-southeast-2. For a list of Amazon - // Web Services Regions in which KMS is supported, see KMS service endpoints - // (https://docs.aws.amazon.com/general/latest/gr/kms.html#kms_region) in the - // Amazon Web Services General Reference. - // - // The replica must be in a different Amazon Web Services Region than its primary - // key and other replicas of that primary key, but in the same Amazon Web Services - // partition. KMS must be available in the replica Region. If the Region is - // not enabled by default, the Amazon Web Services account must be enabled in - // the Region. - // - // For information about Amazon Web Services partitions, see Amazon Resource - // Names (ARNs) in the Amazon Web Services General Reference. (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // For information about enabling and disabling Regions, see Enabling a Region - // (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html#rande-manage-enable) - // and Disabling a Region (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html#rande-manage-disable) - // in the Amazon Web Services General Reference. - // - // ReplicaRegion is a required field - ReplicaRegion *string `min:"1" type:"string" required:"true"` - - // Assigns one or more tags to the replica key. Use this parameter to tag the - // KMS key when it is created. To tag an existing KMS key, use the TagResource - // operation. - // - // Tagging or untagging a KMS key can allow or deny permission to the KMS key. - // For details, see Using ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) - // in the Key Management Service Developer Guide. - // - // To use this parameter, you must have kms:TagResource (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) - // permission in an IAM policy. - // - // Tags are not a shared property of multi-Region keys. You can specify the - // same tags or different tags for each key in a set of related multi-Region - // keys. KMS does not synchronize this property. - // - // Each tag consists of a tag key and a tag value. Both the tag key and the - // tag value are required, but the tag value can be an empty (null) string. - // You cannot have more than one tag on a KMS key with the same tag key. If - // you specify an existing tag key with a different tag value, KMS replaces - // the current tag value with the specified one. - // - // When you add tags to an Amazon Web Services resource, Amazon Web Services - // generates a cost allocation report with usage and costs aggregated by tags. - // Tags can also be used to control access to a KMS key. For details, see Tagging - // Keys (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html). - Tags []*Tag `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicateKeyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicateKeyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ReplicateKeyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReplicateKeyInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) - } - if s.ReplicaRegion == nil { - invalidParams.Add(request.NewErrParamRequired("ReplicaRegion")) - } - if s.ReplicaRegion != nil && len(*s.ReplicaRegion) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ReplicaRegion", 1)) - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBypassPolicyLockoutSafetyCheck sets the BypassPolicyLockoutSafetyCheck field's value. -func (s *ReplicateKeyInput) SetBypassPolicyLockoutSafetyCheck(v bool) *ReplicateKeyInput { - s.BypassPolicyLockoutSafetyCheck = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *ReplicateKeyInput) SetDescription(v string) *ReplicateKeyInput { - s.Description = &v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *ReplicateKeyInput) SetKeyId(v string) *ReplicateKeyInput { - s.KeyId = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *ReplicateKeyInput) SetPolicy(v string) *ReplicateKeyInput { - s.Policy = &v - return s -} - -// SetReplicaRegion sets the ReplicaRegion field's value. -func (s *ReplicateKeyInput) SetReplicaRegion(v string) *ReplicateKeyInput { - s.ReplicaRegion = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *ReplicateKeyInput) SetTags(v []*Tag) *ReplicateKeyInput { - s.Tags = v - return s -} - -type ReplicateKeyOutput struct { - _ struct{} `type:"structure"` - - // Displays details about the new replica key, including its Amazon Resource - // Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // and key state (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html). - // It also includes the ARN and Amazon Web Services Region of its primary key - // and other replica keys. - ReplicaKeyMetadata *KeyMetadata `type:"structure"` - - // The key policy of the new replica key. The value is a key policy document - // in JSON format. - ReplicaPolicy *string `min:"1" type:"string"` - - // The tags on the new replica key. The value is a list of tag key and tag value - // pairs. - ReplicaTags []*Tag `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicateKeyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicateKeyOutput) GoString() string { - return s.String() -} - -// SetReplicaKeyMetadata sets the ReplicaKeyMetadata field's value. -func (s *ReplicateKeyOutput) SetReplicaKeyMetadata(v *KeyMetadata) *ReplicateKeyOutput { - s.ReplicaKeyMetadata = v - return s -} - -// SetReplicaPolicy sets the ReplicaPolicy field's value. -func (s *ReplicateKeyOutput) SetReplicaPolicy(v string) *ReplicateKeyOutput { - s.ReplicaPolicy = &v - return s -} - -// SetReplicaTags sets the ReplicaTags field's value. -func (s *ReplicateKeyOutput) SetReplicaTags(v []*Tag) *ReplicateKeyOutput { - s.ReplicaTags = v - return s -} - -type RetireGrantInput struct { - _ struct{} `type:"structure"` - - // Identifies the grant to retire. To get the grant ID, use CreateGrant, ListGrants, - // or ListRetirableGrants. - // - // * Grant ID Example - 0123456789012345678901234567890123456789012345678901234567890123 - GrantId *string `min:"1" type:"string"` - - // Identifies the grant to be retired. You can use a grant token to identify - // a new grant even before it has achieved eventual consistency. - // - // Only the CreateGrant operation returns a grant token. For details, see Grant - // token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-eventual-consistency) - // in the Key Management Service Developer Guide. - GrantToken *string `min:"1" type:"string"` - - // The key ARN KMS key associated with the grant. To find the key ARN, use the - // ListKeys operation. - // - // For example: arn:aws:kms:us-east-2:444455556666:key/1234abcd-12ab-34cd-56ef-1234567890ab - KeyId *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RetireGrantInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RetireGrantInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RetireGrantInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RetireGrantInput"} - if s.GrantId != nil && len(*s.GrantId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GrantId", 1)) - } - if s.GrantToken != nil && len(*s.GrantToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GrantToken", 1)) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGrantId sets the GrantId field's value. -func (s *RetireGrantInput) SetGrantId(v string) *RetireGrantInput { - s.GrantId = &v - return s -} - -// SetGrantToken sets the GrantToken field's value. -func (s *RetireGrantInput) SetGrantToken(v string) *RetireGrantInput { - s.GrantToken = &v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *RetireGrantInput) SetKeyId(v string) *RetireGrantInput { - s.KeyId = &v - return s -} - -type RetireGrantOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RetireGrantOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RetireGrantOutput) GoString() string { - return s.String() -} - -type RevokeGrantInput struct { - _ struct{} `type:"structure"` - - // Identifies the grant to revoke. To get the grant ID, use CreateGrant, ListGrants, - // or ListRetirableGrants. - // - // GrantId is a required field - GrantId *string `min:"1" type:"string" required:"true"` - - // A unique identifier for the KMS key associated with the grant. To get the - // key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // Specify the key ID or key ARN of the KMS key. To specify a KMS key in a different - // Amazon Web Services account, you must use the key ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RevokeGrantInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RevokeGrantInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RevokeGrantInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RevokeGrantInput"} - if s.GrantId == nil { - invalidParams.Add(request.NewErrParamRequired("GrantId")) - } - if s.GrantId != nil && len(*s.GrantId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GrantId", 1)) - } - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGrantId sets the GrantId field's value. -func (s *RevokeGrantInput) SetGrantId(v string) *RevokeGrantInput { - s.GrantId = &v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *RevokeGrantInput) SetKeyId(v string) *RevokeGrantInput { - s.KeyId = &v - return s -} - -type RevokeGrantOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RevokeGrantOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RevokeGrantOutput) GoString() string { - return s.String() -} - -type ScheduleKeyDeletionInput struct { - _ struct{} `type:"structure"` - - // The unique identifier of the KMS key to delete. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // The waiting period, specified in number of days. After the waiting period - // ends, KMS deletes the KMS key. - // - // If the KMS key is a multi-Region primary key with replicas, the waiting period - // begins when the last of its replica keys is deleted. Otherwise, the waiting - // period begins immediately. - // - // This value is optional. If you include a value, it must be between 7 and - // 30, inclusive. If you do not include a value, it defaults to 30. - PendingWindowInDays *int64 `min:"1" type:"integer"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ScheduleKeyDeletionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ScheduleKeyDeletionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ScheduleKeyDeletionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ScheduleKeyDeletionInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.PendingWindowInDays != nil && *s.PendingWindowInDays < 1 { - invalidParams.Add(request.NewErrParamMinValue("PendingWindowInDays", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *ScheduleKeyDeletionInput) SetKeyId(v string) *ScheduleKeyDeletionInput { - s.KeyId = &v - return s -} - -// SetPendingWindowInDays sets the PendingWindowInDays field's value. -func (s *ScheduleKeyDeletionInput) SetPendingWindowInDays(v int64) *ScheduleKeyDeletionInput { - s.PendingWindowInDays = &v - return s -} - -type ScheduleKeyDeletionOutput struct { - _ struct{} `type:"structure"` - - // The date and time after which KMS deletes the KMS key. - // - // If the KMS key is a multi-Region primary key with replica keys, this field - // does not appear. The deletion date for the primary key isn't known until - // its last replica key is deleted. - DeletionDate *time.Time `type:"timestamp"` - - // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the KMS key whose deletion is scheduled. - KeyId *string `min:"1" type:"string"` - - // The current status of the KMS key. - // - // For more information about how key state affects the use of a KMS key, see - // Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) - // in the Key Management Service Developer Guide. - KeyState *string `type:"string" enum:"KeyState"` - - // The waiting period before the KMS key is deleted. - // - // If the KMS key is a multi-Region primary key with replicas, the waiting period - // begins when the last of its replica keys is deleted. Otherwise, the waiting - // period begins immediately. - PendingWindowInDays *int64 `min:"1" type:"integer"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ScheduleKeyDeletionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ScheduleKeyDeletionOutput) GoString() string { - return s.String() -} - -// SetDeletionDate sets the DeletionDate field's value. -func (s *ScheduleKeyDeletionOutput) SetDeletionDate(v time.Time) *ScheduleKeyDeletionOutput { - s.DeletionDate = &v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *ScheduleKeyDeletionOutput) SetKeyId(v string) *ScheduleKeyDeletionOutput { - s.KeyId = &v - return s -} - -// SetKeyState sets the KeyState field's value. -func (s *ScheduleKeyDeletionOutput) SetKeyState(v string) *ScheduleKeyDeletionOutput { - s.KeyState = &v - return s -} - -// SetPendingWindowInDays sets the PendingWindowInDays field's value. -func (s *ScheduleKeyDeletionOutput) SetPendingWindowInDays(v int64) *ScheduleKeyDeletionOutput { - s.PendingWindowInDays = &v - return s -} - -type SignInput struct { - _ struct{} `type:"structure"` - - // A list of grant tokens. - // - // Use a grant token when your permission to call this operation comes from - // a new grant that has not yet achieved eventual consistency. For more information, - // see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) - // in the Key Management Service Developer Guide. - GrantTokens []*string `type:"list"` - - // Identifies an asymmetric KMS key. KMS uses the private key in the asymmetric - // KMS key to sign the message. The KeyUsage type of the KMS key must be SIGN_VERIFY. - // To find the KeyUsage of a KMS key, use the DescribeKey operation. - // - // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. - // When using an alias name, prefix it with "alias/". To specify a KMS key in - // a different Amazon Web Services account, you must use the key ARN or alias - // ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // To get the alias name and alias ARN, use ListAliases. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // Specifies the message or message digest to sign. Messages can be 0-4096 bytes. - // To sign a larger message, provide the message digest. - // - // If you provide a message, KMS generates a hash digest of the message and - // then signs it. - // - // Message is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by SignInput's - // String and GoString methods. - // - // Message is automatically base64 encoded/decoded by the SDK. - // - // Message is a required field - Message []byte `min:"1" type:"blob" required:"true" sensitive:"true"` - - // Tells KMS whether the value of the Message parameter is a message or message - // digest. The default value, RAW, indicates a message. To indicate a message - // digest, enter DIGEST. - MessageType *string `type:"string" enum:"MessageType"` - - // Specifies the signing algorithm to use when signing the message. - // - // Choose an algorithm that is compatible with the type and size of the specified - // asymmetric KMS key. - // - // SigningAlgorithm is a required field - SigningAlgorithm *string `type:"string" required:"true" enum:"SigningAlgorithmSpec"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SignInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SignInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *SignInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SignInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.Message == nil { - invalidParams.Add(request.NewErrParamRequired("Message")) - } - if s.Message != nil && len(s.Message) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Message", 1)) - } - if s.SigningAlgorithm == nil { - invalidParams.Add(request.NewErrParamRequired("SigningAlgorithm")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGrantTokens sets the GrantTokens field's value. -func (s *SignInput) SetGrantTokens(v []*string) *SignInput { - s.GrantTokens = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *SignInput) SetKeyId(v string) *SignInput { - s.KeyId = &v - return s -} - -// SetMessage sets the Message field's value. -func (s *SignInput) SetMessage(v []byte) *SignInput { - s.Message = v - return s -} - -// SetMessageType sets the MessageType field's value. -func (s *SignInput) SetMessageType(v string) *SignInput { - s.MessageType = &v - return s -} - -// SetSigningAlgorithm sets the SigningAlgorithm field's value. -func (s *SignInput) SetSigningAlgorithm(v string) *SignInput { - s.SigningAlgorithm = &v - return s -} - -type SignOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the asymmetric KMS key that was used to sign the message. - KeyId *string `min:"1" type:"string"` - - // The cryptographic signature that was generated for the message. - // - // * When used with the supported RSA signing algorithms, the encoding of - // this value is defined by PKCS #1 in RFC 8017 (https://tools.ietf.org/html/rfc8017). - // - // * When used with the ECDSA_SHA_256, ECDSA_SHA_384, or ECDSA_SHA_512 signing - // algorithms, this value is a DER-encoded object as defined by ANS X9.62–2005 - // and RFC 3279 Section 2.2.3 (https://tools.ietf.org/html/rfc3279#section-2.2.3). - // This is the most commonly used signature format and is appropriate for - // most uses. - // - // When you use the HTTP API or the Amazon Web Services CLI, the value is Base64-encoded. - // Otherwise, it is not Base64-encoded. - // Signature is automatically base64 encoded/decoded by the SDK. - Signature []byte `min:"1" type:"blob"` - - // The signing algorithm that was used to sign the message. - SigningAlgorithm *string `type:"string" enum:"SigningAlgorithmSpec"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SignOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SignOutput) GoString() string { - return s.String() -} - -// SetKeyId sets the KeyId field's value. -func (s *SignOutput) SetKeyId(v string) *SignOutput { - s.KeyId = &v - return s -} - -// SetSignature sets the Signature field's value. -func (s *SignOutput) SetSignature(v []byte) *SignOutput { - s.Signature = v - return s -} - -// SetSigningAlgorithm sets the SigningAlgorithm field's value. -func (s *SignOutput) SetSigningAlgorithm(v string) *SignOutput { - s.SigningAlgorithm = &v - return s -} - -// A key-value pair. A tag consists of a tag key and a tag value. Tag keys and -// tag values are both required, but tag values can be empty (null) strings. -// -// For information about the rules that apply to tag keys and tag values, see -// User-Defined Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) -// in the Amazon Web Services Billing and Cost Management User Guide. -type Tag struct { - _ struct{} `type:"structure"` - - // The key of the tag. - // - // TagKey is a required field - TagKey *string `min:"1" type:"string" required:"true"` - - // The value of the tag. - // - // TagValue is a required field - TagValue *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Tag) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Tag) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Tag) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Tag"} - if s.TagKey == nil { - invalidParams.Add(request.NewErrParamRequired("TagKey")) - } - if s.TagKey != nil && len(*s.TagKey) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TagKey", 1)) - } - if s.TagValue == nil { - invalidParams.Add(request.NewErrParamRequired("TagValue")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetTagKey sets the TagKey field's value. -func (s *Tag) SetTagKey(v string) *Tag { - s.TagKey = &v - return s -} - -// SetTagValue sets the TagValue field's value. -func (s *Tag) SetTagValue(v string) *Tag { - s.TagValue = &v - return s -} - -// The request was rejected because one or more tags are not valid. -type TagException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TagException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TagException) GoString() string { - return s.String() -} - -func newErrorTagException(v protocol.ResponseMetadata) error { - return &TagException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *TagException) Code() string { - return "TagException" -} - -// Message returns the exception's message. -func (s *TagException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *TagException) OrigErr() error { - return nil -} - -func (s *TagException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *TagException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *TagException) RequestID() string { - return s.RespMetadata.RequestID -} - -type TagResourceInput struct { - _ struct{} `type:"structure"` - - // Identifies a customer managed key in the account and Region. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // One or more tags. - // - // Each tag consists of a tag key and a tag value. The tag value can be an empty - // (null) string. - // - // You cannot have more than one tag on a KMS key with the same tag key. If - // you specify an existing tag key with a different tag value, KMS replaces - // the current tag value with the specified one. - // - // Tags is a required field - Tags []*Tag `type:"list" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TagResourceInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TagResourceInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TagResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.Tags == nil { - invalidParams.Add(request.NewErrParamRequired("Tags")) - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *TagResourceInput) SetKeyId(v string) *TagResourceInput { - s.KeyId = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { - s.Tags = v - return s -} - -type TagResourceOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TagResourceOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TagResourceOutput) GoString() string { - return s.String() -} - -// The request was rejected because a specified parameter is not supported or -// a specified resource is not valid for this operation. -type UnsupportedOperationException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UnsupportedOperationException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UnsupportedOperationException) GoString() string { - return s.String() -} - -func newErrorUnsupportedOperationException(v protocol.ResponseMetadata) error { - return &UnsupportedOperationException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *UnsupportedOperationException) Code() string { - return "UnsupportedOperationException" -} - -// Message returns the exception's message. -func (s *UnsupportedOperationException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *UnsupportedOperationException) OrigErr() error { - return nil -} - -func (s *UnsupportedOperationException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *UnsupportedOperationException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *UnsupportedOperationException) RequestID() string { - return s.RespMetadata.RequestID -} - -type UntagResourceInput struct { - _ struct{} `type:"structure"` - - // Identifies the KMS key from which you are removing tags. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // One or more tag keys. Specify only the tag keys, not the tag values. - // - // TagKeys is a required field - TagKeys []*string `type:"list" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UntagResourceInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UntagResourceInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UntagResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.TagKeys == nil { - invalidParams.Add(request.NewErrParamRequired("TagKeys")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *UntagResourceInput) SetKeyId(v string) *UntagResourceInput { - s.KeyId = &v - return s -} - -// SetTagKeys sets the TagKeys field's value. -func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { - s.TagKeys = v - return s -} - -type UntagResourceOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UntagResourceOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UntagResourceOutput) GoString() string { - return s.String() -} - -type UpdateAliasInput struct { - _ struct{} `type:"structure"` - - // Identifies the alias that is changing its KMS key. This value must begin - // with alias/ followed by the alias name, such as alias/ExampleAlias. You cannot - // use UpdateAlias to change the alias name. - // - // AliasName is a required field - AliasName *string `min:"1" type:"string" required:"true"` - - // Identifies the customer managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk) - // to associate with the alias. You don't have permission to associate an alias - // with an Amazon Web Services managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk). - // - // The KMS key must be in the same Amazon Web Services account and Region as - // the alias. Also, the new target KMS key must be the same type as the current - // target KMS key (both symmetric or both asymmetric) and they must have the - // same key usage. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // To verify that the alias is mapped to the correct KMS key, use ListAliases. - // - // TargetKeyId is a required field - TargetKeyId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateAliasInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateAliasInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateAliasInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateAliasInput"} - if s.AliasName == nil { - invalidParams.Add(request.NewErrParamRequired("AliasName")) - } - if s.AliasName != nil && len(*s.AliasName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AliasName", 1)) - } - if s.TargetKeyId == nil { - invalidParams.Add(request.NewErrParamRequired("TargetKeyId")) - } - if s.TargetKeyId != nil && len(*s.TargetKeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TargetKeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAliasName sets the AliasName field's value. -func (s *UpdateAliasInput) SetAliasName(v string) *UpdateAliasInput { - s.AliasName = &v - return s -} - -// SetTargetKeyId sets the TargetKeyId field's value. -func (s *UpdateAliasInput) SetTargetKeyId(v string) *UpdateAliasInput { - s.TargetKeyId = &v - return s -} - -type UpdateAliasOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateAliasOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateAliasOutput) GoString() string { - return s.String() -} - -type UpdateCustomKeyStoreInput struct { - _ struct{} `type:"structure"` - - // Associates the custom key store with a related CloudHSM cluster. - // - // Enter the cluster ID of the cluster that you used to create the custom key - // store or a cluster that shares a backup history and has the same cluster - // certificate as the original cluster. You cannot use this parameter to associate - // a custom key store with an unrelated cluster. In addition, the replacement - // cluster must fulfill the requirements (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore) - // for a cluster associated with a custom key store. To view the cluster certificate - // of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) - // operation. - CloudHsmClusterId *string `min:"19" type:"string"` - - // Identifies the custom key store that you want to update. Enter the ID of - // the custom key store. To find the ID of a custom key store, use the DescribeCustomKeyStores - // operation. - // - // CustomKeyStoreId is a required field - CustomKeyStoreId *string `min:"1" type:"string" required:"true"` - - // Enter the current password of the kmsuser crypto user (CU) in the CloudHSM - // cluster that is associated with the custom key store. - // - // This parameter tells KMS the current password of the kmsuser crypto user - // (CU). It does not set or change the password of any users in the CloudHSM - // cluster. - // - // KeyStorePassword is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by UpdateCustomKeyStoreInput's - // String and GoString methods. - KeyStorePassword *string `min:"7" type:"string" sensitive:"true"` - - // Changes the friendly name of the custom key store to the value that you specify. - // The custom key store name must be unique in the Amazon Web Services account. - NewCustomKeyStoreName *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateCustomKeyStoreInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateCustomKeyStoreInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateCustomKeyStoreInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateCustomKeyStoreInput"} - if s.CloudHsmClusterId != nil && len(*s.CloudHsmClusterId) < 19 { - invalidParams.Add(request.NewErrParamMinLen("CloudHsmClusterId", 19)) - } - if s.CustomKeyStoreId == nil { - invalidParams.Add(request.NewErrParamRequired("CustomKeyStoreId")) - } - if s.CustomKeyStoreId != nil && len(*s.CustomKeyStoreId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CustomKeyStoreId", 1)) - } - if s.KeyStorePassword != nil && len(*s.KeyStorePassword) < 7 { - invalidParams.Add(request.NewErrParamMinLen("KeyStorePassword", 7)) - } - if s.NewCustomKeyStoreName != nil && len(*s.NewCustomKeyStoreName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NewCustomKeyStoreName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCloudHsmClusterId sets the CloudHsmClusterId field's value. -func (s *UpdateCustomKeyStoreInput) SetCloudHsmClusterId(v string) *UpdateCustomKeyStoreInput { - s.CloudHsmClusterId = &v - return s -} - -// SetCustomKeyStoreId sets the CustomKeyStoreId field's value. -func (s *UpdateCustomKeyStoreInput) SetCustomKeyStoreId(v string) *UpdateCustomKeyStoreInput { - s.CustomKeyStoreId = &v - return s -} - -// SetKeyStorePassword sets the KeyStorePassword field's value. -func (s *UpdateCustomKeyStoreInput) SetKeyStorePassword(v string) *UpdateCustomKeyStoreInput { - s.KeyStorePassword = &v - return s -} - -// SetNewCustomKeyStoreName sets the NewCustomKeyStoreName field's value. -func (s *UpdateCustomKeyStoreInput) SetNewCustomKeyStoreName(v string) *UpdateCustomKeyStoreInput { - s.NewCustomKeyStoreName = &v - return s -} - -type UpdateCustomKeyStoreOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateCustomKeyStoreOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateCustomKeyStoreOutput) GoString() string { - return s.String() -} - -type UpdateKeyDescriptionInput struct { - _ struct{} `type:"structure"` - - // New description for the KMS key. - // - // Description is a required field - Description *string `type:"string" required:"true"` - - // Updates the description of the specified KMS key. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateKeyDescriptionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateKeyDescriptionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateKeyDescriptionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateKeyDescriptionInput"} - if s.Description == nil { - invalidParams.Add(request.NewErrParamRequired("Description")) - } - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDescription sets the Description field's value. -func (s *UpdateKeyDescriptionInput) SetDescription(v string) *UpdateKeyDescriptionInput { - s.Description = &v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *UpdateKeyDescriptionInput) SetKeyId(v string) *UpdateKeyDescriptionInput { - s.KeyId = &v - return s -} - -type UpdateKeyDescriptionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateKeyDescriptionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateKeyDescriptionOutput) GoString() string { - return s.String() -} - -type UpdatePrimaryRegionInput struct { - _ struct{} `type:"structure"` - - // Identifies the current primary key. When the operation completes, this KMS - // key will be a replica key. - // - // Specify the key ID or key ARN of a multi-Region primary key. - // - // For example: - // - // * Key ID: mrk-1234abcd12ab34cd56ef1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/mrk-1234abcd12ab34cd56ef1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // The Amazon Web Services Region of the new primary key. Enter the Region ID, - // such as us-east-1 or ap-southeast-2. There must be an existing replica key - // in this Region. - // - // When the operation completes, the multi-Region key in this Region will be - // the primary key. - // - // PrimaryRegion is a required field - PrimaryRegion *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdatePrimaryRegionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdatePrimaryRegionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdatePrimaryRegionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdatePrimaryRegionInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.PrimaryRegion == nil { - invalidParams.Add(request.NewErrParamRequired("PrimaryRegion")) - } - if s.PrimaryRegion != nil && len(*s.PrimaryRegion) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PrimaryRegion", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *UpdatePrimaryRegionInput) SetKeyId(v string) *UpdatePrimaryRegionInput { - s.KeyId = &v - return s -} - -// SetPrimaryRegion sets the PrimaryRegion field's value. -func (s *UpdatePrimaryRegionInput) SetPrimaryRegion(v string) *UpdatePrimaryRegionInput { - s.PrimaryRegion = &v - return s -} - -type UpdatePrimaryRegionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdatePrimaryRegionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdatePrimaryRegionOutput) GoString() string { - return s.String() -} - -type VerifyInput struct { - _ struct{} `type:"structure"` - - // A list of grant tokens. - // - // Use a grant token when your permission to call this operation comes from - // a new grant that has not yet achieved eventual consistency. For more information, - // see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) - // in the Key Management Service Developer Guide. - GrantTokens []*string `type:"list"` - - // Identifies the asymmetric KMS key that will be used to verify the signature. - // This must be the same KMS key that was used to generate the signature. If - // you specify a different KMS key, the signature verification fails. - // - // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. - // When using an alias name, prefix it with "alias/". To specify a KMS key in - // a different Amazon Web Services account, you must use the key ARN or alias - // ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // To get the alias name and alias ARN, use ListAliases. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // Specifies the message that was signed. You can submit a raw message of up - // to 4096 bytes, or a hash digest of the message. If you submit a digest, use - // the MessageType parameter with a value of DIGEST. - // - // If the message specified here is different from the message that was signed, - // the signature verification fails. A message and its hash digest are considered - // to be the same message. - // - // Message is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by VerifyInput's - // String and GoString methods. - // - // Message is automatically base64 encoded/decoded by the SDK. - // - // Message is a required field - Message []byte `min:"1" type:"blob" required:"true" sensitive:"true"` - - // Tells KMS whether the value of the Message parameter is a message or message - // digest. The default value, RAW, indicates a message. To indicate a message - // digest, enter DIGEST. - // - // Use the DIGEST value only when the value of the Message parameter is a message - // digest. If you use the DIGEST value with a raw message, the security of the - // verification operation can be compromised. - MessageType *string `type:"string" enum:"MessageType"` - - // The signature that the Sign operation generated. - // Signature is automatically base64 encoded/decoded by the SDK. - // - // Signature is a required field - Signature []byte `min:"1" type:"blob" required:"true"` - - // The signing algorithm that was used to sign the message. If you submit a - // different algorithm, the signature verification fails. - // - // SigningAlgorithm is a required field - SigningAlgorithm *string `type:"string" required:"true" enum:"SigningAlgorithmSpec"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s VerifyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s VerifyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *VerifyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "VerifyInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.Message == nil { - invalidParams.Add(request.NewErrParamRequired("Message")) - } - if s.Message != nil && len(s.Message) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Message", 1)) - } - if s.Signature == nil { - invalidParams.Add(request.NewErrParamRequired("Signature")) - } - if s.Signature != nil && len(s.Signature) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Signature", 1)) - } - if s.SigningAlgorithm == nil { - invalidParams.Add(request.NewErrParamRequired("SigningAlgorithm")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGrantTokens sets the GrantTokens field's value. -func (s *VerifyInput) SetGrantTokens(v []*string) *VerifyInput { - s.GrantTokens = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *VerifyInput) SetKeyId(v string) *VerifyInput { - s.KeyId = &v - return s -} - -// SetMessage sets the Message field's value. -func (s *VerifyInput) SetMessage(v []byte) *VerifyInput { - s.Message = v - return s -} - -// SetMessageType sets the MessageType field's value. -func (s *VerifyInput) SetMessageType(v string) *VerifyInput { - s.MessageType = &v - return s -} - -// SetSignature sets the Signature field's value. -func (s *VerifyInput) SetSignature(v []byte) *VerifyInput { - s.Signature = v - return s -} - -// SetSigningAlgorithm sets the SigningAlgorithm field's value. -func (s *VerifyInput) SetSigningAlgorithm(v string) *VerifyInput { - s.SigningAlgorithm = &v - return s -} - -type VerifyOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the asymmetric KMS key that was used to verify the signature. - KeyId *string `min:"1" type:"string"` - - // A Boolean value that indicates whether the signature was verified. A value - // of True indicates that the Signature was produced by signing the Message - // with the specified KeyID and SigningAlgorithm. If the signature is not verified, - // the Verify operation fails with a KMSInvalidSignatureException exception. - SignatureValid *bool `type:"boolean"` - - // The signing algorithm that was used to verify the signature. - SigningAlgorithm *string `type:"string" enum:"SigningAlgorithmSpec"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s VerifyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s VerifyOutput) GoString() string { - return s.String() -} - -// SetKeyId sets the KeyId field's value. -func (s *VerifyOutput) SetKeyId(v string) *VerifyOutput { - s.KeyId = &v - return s -} - -// SetSignatureValid sets the SignatureValid field's value. -func (s *VerifyOutput) SetSignatureValid(v bool) *VerifyOutput { - s.SignatureValid = &v - return s -} - -// SetSigningAlgorithm sets the SigningAlgorithm field's value. -func (s *VerifyOutput) SetSigningAlgorithm(v string) *VerifyOutput { - s.SigningAlgorithm = &v - return s -} - -const ( - // AlgorithmSpecRsaesPkcs1V15 is a AlgorithmSpec enum value - AlgorithmSpecRsaesPkcs1V15 = "RSAES_PKCS1_V1_5" - - // AlgorithmSpecRsaesOaepSha1 is a AlgorithmSpec enum value - AlgorithmSpecRsaesOaepSha1 = "RSAES_OAEP_SHA_1" - - // AlgorithmSpecRsaesOaepSha256 is a AlgorithmSpec enum value - AlgorithmSpecRsaesOaepSha256 = "RSAES_OAEP_SHA_256" -) - -// AlgorithmSpec_Values returns all elements of the AlgorithmSpec enum -func AlgorithmSpec_Values() []string { - return []string{ - AlgorithmSpecRsaesPkcs1V15, - AlgorithmSpecRsaesOaepSha1, - AlgorithmSpecRsaesOaepSha256, - } -} - -const ( - // ConnectionErrorCodeTypeInvalidCredentials is a ConnectionErrorCodeType enum value - ConnectionErrorCodeTypeInvalidCredentials = "INVALID_CREDENTIALS" - - // ConnectionErrorCodeTypeClusterNotFound is a ConnectionErrorCodeType enum value - ConnectionErrorCodeTypeClusterNotFound = "CLUSTER_NOT_FOUND" - - // ConnectionErrorCodeTypeNetworkErrors is a ConnectionErrorCodeType enum value - ConnectionErrorCodeTypeNetworkErrors = "NETWORK_ERRORS" - - // ConnectionErrorCodeTypeInternalError is a ConnectionErrorCodeType enum value - ConnectionErrorCodeTypeInternalError = "INTERNAL_ERROR" - - // ConnectionErrorCodeTypeInsufficientCloudhsmHsms is a ConnectionErrorCodeType enum value - ConnectionErrorCodeTypeInsufficientCloudhsmHsms = "INSUFFICIENT_CLOUDHSM_HSMS" - - // ConnectionErrorCodeTypeUserLockedOut is a ConnectionErrorCodeType enum value - ConnectionErrorCodeTypeUserLockedOut = "USER_LOCKED_OUT" - - // ConnectionErrorCodeTypeUserNotFound is a ConnectionErrorCodeType enum value - ConnectionErrorCodeTypeUserNotFound = "USER_NOT_FOUND" - - // ConnectionErrorCodeTypeUserLoggedIn is a ConnectionErrorCodeType enum value - ConnectionErrorCodeTypeUserLoggedIn = "USER_LOGGED_IN" - - // ConnectionErrorCodeTypeSubnetNotFound is a ConnectionErrorCodeType enum value - ConnectionErrorCodeTypeSubnetNotFound = "SUBNET_NOT_FOUND" -) - -// ConnectionErrorCodeType_Values returns all elements of the ConnectionErrorCodeType enum -func ConnectionErrorCodeType_Values() []string { - return []string{ - ConnectionErrorCodeTypeInvalidCredentials, - ConnectionErrorCodeTypeClusterNotFound, - ConnectionErrorCodeTypeNetworkErrors, - ConnectionErrorCodeTypeInternalError, - ConnectionErrorCodeTypeInsufficientCloudhsmHsms, - ConnectionErrorCodeTypeUserLockedOut, - ConnectionErrorCodeTypeUserNotFound, - ConnectionErrorCodeTypeUserLoggedIn, - ConnectionErrorCodeTypeSubnetNotFound, - } -} - -const ( - // ConnectionStateTypeConnected is a ConnectionStateType enum value - ConnectionStateTypeConnected = "CONNECTED" - - // ConnectionStateTypeConnecting is a ConnectionStateType enum value - ConnectionStateTypeConnecting = "CONNECTING" - - // ConnectionStateTypeFailed is a ConnectionStateType enum value - ConnectionStateTypeFailed = "FAILED" - - // ConnectionStateTypeDisconnected is a ConnectionStateType enum value - ConnectionStateTypeDisconnected = "DISCONNECTED" - - // ConnectionStateTypeDisconnecting is a ConnectionStateType enum value - ConnectionStateTypeDisconnecting = "DISCONNECTING" -) - -// ConnectionStateType_Values returns all elements of the ConnectionStateType enum -func ConnectionStateType_Values() []string { - return []string{ - ConnectionStateTypeConnected, - ConnectionStateTypeConnecting, - ConnectionStateTypeFailed, - ConnectionStateTypeDisconnected, - ConnectionStateTypeDisconnecting, - } -} - -const ( - // CustomerMasterKeySpecRsa2048 is a CustomerMasterKeySpec enum value - CustomerMasterKeySpecRsa2048 = "RSA_2048" - - // CustomerMasterKeySpecRsa3072 is a CustomerMasterKeySpec enum value - CustomerMasterKeySpecRsa3072 = "RSA_3072" - - // CustomerMasterKeySpecRsa4096 is a CustomerMasterKeySpec enum value - CustomerMasterKeySpecRsa4096 = "RSA_4096" - - // CustomerMasterKeySpecEccNistP256 is a CustomerMasterKeySpec enum value - CustomerMasterKeySpecEccNistP256 = "ECC_NIST_P256" - - // CustomerMasterKeySpecEccNistP384 is a CustomerMasterKeySpec enum value - CustomerMasterKeySpecEccNistP384 = "ECC_NIST_P384" - - // CustomerMasterKeySpecEccNistP521 is a CustomerMasterKeySpec enum value - CustomerMasterKeySpecEccNistP521 = "ECC_NIST_P521" - - // CustomerMasterKeySpecEccSecgP256k1 is a CustomerMasterKeySpec enum value - CustomerMasterKeySpecEccSecgP256k1 = "ECC_SECG_P256K1" - - // CustomerMasterKeySpecSymmetricDefault is a CustomerMasterKeySpec enum value - CustomerMasterKeySpecSymmetricDefault = "SYMMETRIC_DEFAULT" -) - -// CustomerMasterKeySpec_Values returns all elements of the CustomerMasterKeySpec enum -func CustomerMasterKeySpec_Values() []string { - return []string{ - CustomerMasterKeySpecRsa2048, - CustomerMasterKeySpecRsa3072, - CustomerMasterKeySpecRsa4096, - CustomerMasterKeySpecEccNistP256, - CustomerMasterKeySpecEccNistP384, - CustomerMasterKeySpecEccNistP521, - CustomerMasterKeySpecEccSecgP256k1, - CustomerMasterKeySpecSymmetricDefault, - } -} - -const ( - // DataKeyPairSpecRsa2048 is a DataKeyPairSpec enum value - DataKeyPairSpecRsa2048 = "RSA_2048" - - // DataKeyPairSpecRsa3072 is a DataKeyPairSpec enum value - DataKeyPairSpecRsa3072 = "RSA_3072" - - // DataKeyPairSpecRsa4096 is a DataKeyPairSpec enum value - DataKeyPairSpecRsa4096 = "RSA_4096" - - // DataKeyPairSpecEccNistP256 is a DataKeyPairSpec enum value - DataKeyPairSpecEccNistP256 = "ECC_NIST_P256" - - // DataKeyPairSpecEccNistP384 is a DataKeyPairSpec enum value - DataKeyPairSpecEccNistP384 = "ECC_NIST_P384" - - // DataKeyPairSpecEccNistP521 is a DataKeyPairSpec enum value - DataKeyPairSpecEccNistP521 = "ECC_NIST_P521" - - // DataKeyPairSpecEccSecgP256k1 is a DataKeyPairSpec enum value - DataKeyPairSpecEccSecgP256k1 = "ECC_SECG_P256K1" -) - -// DataKeyPairSpec_Values returns all elements of the DataKeyPairSpec enum -func DataKeyPairSpec_Values() []string { - return []string{ - DataKeyPairSpecRsa2048, - DataKeyPairSpecRsa3072, - DataKeyPairSpecRsa4096, - DataKeyPairSpecEccNistP256, - DataKeyPairSpecEccNistP384, - DataKeyPairSpecEccNistP521, - DataKeyPairSpecEccSecgP256k1, - } -} - -const ( - // DataKeySpecAes256 is a DataKeySpec enum value - DataKeySpecAes256 = "AES_256" - - // DataKeySpecAes128 is a DataKeySpec enum value - DataKeySpecAes128 = "AES_128" -) - -// DataKeySpec_Values returns all elements of the DataKeySpec enum -func DataKeySpec_Values() []string { - return []string{ - DataKeySpecAes256, - DataKeySpecAes128, - } -} - -const ( - // EncryptionAlgorithmSpecSymmetricDefault is a EncryptionAlgorithmSpec enum value - EncryptionAlgorithmSpecSymmetricDefault = "SYMMETRIC_DEFAULT" - - // EncryptionAlgorithmSpecRsaesOaepSha1 is a EncryptionAlgorithmSpec enum value - EncryptionAlgorithmSpecRsaesOaepSha1 = "RSAES_OAEP_SHA_1" - - // EncryptionAlgorithmSpecRsaesOaepSha256 is a EncryptionAlgorithmSpec enum value - EncryptionAlgorithmSpecRsaesOaepSha256 = "RSAES_OAEP_SHA_256" -) - -// EncryptionAlgorithmSpec_Values returns all elements of the EncryptionAlgorithmSpec enum -func EncryptionAlgorithmSpec_Values() []string { - return []string{ - EncryptionAlgorithmSpecSymmetricDefault, - EncryptionAlgorithmSpecRsaesOaepSha1, - EncryptionAlgorithmSpecRsaesOaepSha256, - } -} - -const ( - // ExpirationModelTypeKeyMaterialExpires is a ExpirationModelType enum value - ExpirationModelTypeKeyMaterialExpires = "KEY_MATERIAL_EXPIRES" - - // ExpirationModelTypeKeyMaterialDoesNotExpire is a ExpirationModelType enum value - ExpirationModelTypeKeyMaterialDoesNotExpire = "KEY_MATERIAL_DOES_NOT_EXPIRE" -) - -// ExpirationModelType_Values returns all elements of the ExpirationModelType enum -func ExpirationModelType_Values() []string { - return []string{ - ExpirationModelTypeKeyMaterialExpires, - ExpirationModelTypeKeyMaterialDoesNotExpire, - } -} - -const ( - // GrantOperationDecrypt is a GrantOperation enum value - GrantOperationDecrypt = "Decrypt" - - // GrantOperationEncrypt is a GrantOperation enum value - GrantOperationEncrypt = "Encrypt" - - // GrantOperationGenerateDataKey is a GrantOperation enum value - GrantOperationGenerateDataKey = "GenerateDataKey" - - // GrantOperationGenerateDataKeyWithoutPlaintext is a GrantOperation enum value - GrantOperationGenerateDataKeyWithoutPlaintext = "GenerateDataKeyWithoutPlaintext" - - // GrantOperationReEncryptFrom is a GrantOperation enum value - GrantOperationReEncryptFrom = "ReEncryptFrom" - - // GrantOperationReEncryptTo is a GrantOperation enum value - GrantOperationReEncryptTo = "ReEncryptTo" - - // GrantOperationSign is a GrantOperation enum value - GrantOperationSign = "Sign" - - // GrantOperationVerify is a GrantOperation enum value - GrantOperationVerify = "Verify" - - // GrantOperationGetPublicKey is a GrantOperation enum value - GrantOperationGetPublicKey = "GetPublicKey" - - // GrantOperationCreateGrant is a GrantOperation enum value - GrantOperationCreateGrant = "CreateGrant" - - // GrantOperationRetireGrant is a GrantOperation enum value - GrantOperationRetireGrant = "RetireGrant" - - // GrantOperationDescribeKey is a GrantOperation enum value - GrantOperationDescribeKey = "DescribeKey" - - // GrantOperationGenerateDataKeyPair is a GrantOperation enum value - GrantOperationGenerateDataKeyPair = "GenerateDataKeyPair" - - // GrantOperationGenerateDataKeyPairWithoutPlaintext is a GrantOperation enum value - GrantOperationGenerateDataKeyPairWithoutPlaintext = "GenerateDataKeyPairWithoutPlaintext" -) - -// GrantOperation_Values returns all elements of the GrantOperation enum -func GrantOperation_Values() []string { - return []string{ - GrantOperationDecrypt, - GrantOperationEncrypt, - GrantOperationGenerateDataKey, - GrantOperationGenerateDataKeyWithoutPlaintext, - GrantOperationReEncryptFrom, - GrantOperationReEncryptTo, - GrantOperationSign, - GrantOperationVerify, - GrantOperationGetPublicKey, - GrantOperationCreateGrant, - GrantOperationRetireGrant, - GrantOperationDescribeKey, - GrantOperationGenerateDataKeyPair, - GrantOperationGenerateDataKeyPairWithoutPlaintext, - } -} - -const ( - // KeyManagerTypeAws is a KeyManagerType enum value - KeyManagerTypeAws = "AWS" - - // KeyManagerTypeCustomer is a KeyManagerType enum value - KeyManagerTypeCustomer = "CUSTOMER" -) - -// KeyManagerType_Values returns all elements of the KeyManagerType enum -func KeyManagerType_Values() []string { - return []string{ - KeyManagerTypeAws, - KeyManagerTypeCustomer, - } -} - -const ( - // KeySpecRsa2048 is a KeySpec enum value - KeySpecRsa2048 = "RSA_2048" - - // KeySpecRsa3072 is a KeySpec enum value - KeySpecRsa3072 = "RSA_3072" - - // KeySpecRsa4096 is a KeySpec enum value - KeySpecRsa4096 = "RSA_4096" - - // KeySpecEccNistP256 is a KeySpec enum value - KeySpecEccNistP256 = "ECC_NIST_P256" - - // KeySpecEccNistP384 is a KeySpec enum value - KeySpecEccNistP384 = "ECC_NIST_P384" - - // KeySpecEccNistP521 is a KeySpec enum value - KeySpecEccNistP521 = "ECC_NIST_P521" - - // KeySpecEccSecgP256k1 is a KeySpec enum value - KeySpecEccSecgP256k1 = "ECC_SECG_P256K1" - - // KeySpecSymmetricDefault is a KeySpec enum value - KeySpecSymmetricDefault = "SYMMETRIC_DEFAULT" -) - -// KeySpec_Values returns all elements of the KeySpec enum -func KeySpec_Values() []string { - return []string{ - KeySpecRsa2048, - KeySpecRsa3072, - KeySpecRsa4096, - KeySpecEccNistP256, - KeySpecEccNistP384, - KeySpecEccNistP521, - KeySpecEccSecgP256k1, - KeySpecSymmetricDefault, - } -} - -const ( - // KeyStateCreating is a KeyState enum value - KeyStateCreating = "Creating" - - // KeyStateEnabled is a KeyState enum value - KeyStateEnabled = "Enabled" - - // KeyStateDisabled is a KeyState enum value - KeyStateDisabled = "Disabled" - - // KeyStatePendingDeletion is a KeyState enum value - KeyStatePendingDeletion = "PendingDeletion" - - // KeyStatePendingImport is a KeyState enum value - KeyStatePendingImport = "PendingImport" - - // KeyStatePendingReplicaDeletion is a KeyState enum value - KeyStatePendingReplicaDeletion = "PendingReplicaDeletion" - - // KeyStateUnavailable is a KeyState enum value - KeyStateUnavailable = "Unavailable" - - // KeyStateUpdating is a KeyState enum value - KeyStateUpdating = "Updating" -) - -// KeyState_Values returns all elements of the KeyState enum -func KeyState_Values() []string { - return []string{ - KeyStateCreating, - KeyStateEnabled, - KeyStateDisabled, - KeyStatePendingDeletion, - KeyStatePendingImport, - KeyStatePendingReplicaDeletion, - KeyStateUnavailable, - KeyStateUpdating, - } -} - -const ( - // KeyUsageTypeSignVerify is a KeyUsageType enum value - KeyUsageTypeSignVerify = "SIGN_VERIFY" - - // KeyUsageTypeEncryptDecrypt is a KeyUsageType enum value - KeyUsageTypeEncryptDecrypt = "ENCRYPT_DECRYPT" -) - -// KeyUsageType_Values returns all elements of the KeyUsageType enum -func KeyUsageType_Values() []string { - return []string{ - KeyUsageTypeSignVerify, - KeyUsageTypeEncryptDecrypt, - } -} - -const ( - // MessageTypeRaw is a MessageType enum value - MessageTypeRaw = "RAW" - - // MessageTypeDigest is a MessageType enum value - MessageTypeDigest = "DIGEST" -) - -// MessageType_Values returns all elements of the MessageType enum -func MessageType_Values() []string { - return []string{ - MessageTypeRaw, - MessageTypeDigest, - } -} - -const ( - // MultiRegionKeyTypePrimary is a MultiRegionKeyType enum value - MultiRegionKeyTypePrimary = "PRIMARY" - - // MultiRegionKeyTypeReplica is a MultiRegionKeyType enum value - MultiRegionKeyTypeReplica = "REPLICA" -) - -// MultiRegionKeyType_Values returns all elements of the MultiRegionKeyType enum -func MultiRegionKeyType_Values() []string { - return []string{ - MultiRegionKeyTypePrimary, - MultiRegionKeyTypeReplica, - } -} - -const ( - // OriginTypeAwsKms is a OriginType enum value - OriginTypeAwsKms = "AWS_KMS" - - // OriginTypeExternal is a OriginType enum value - OriginTypeExternal = "EXTERNAL" - - // OriginTypeAwsCloudhsm is a OriginType enum value - OriginTypeAwsCloudhsm = "AWS_CLOUDHSM" -) - -// OriginType_Values returns all elements of the OriginType enum -func OriginType_Values() []string { - return []string{ - OriginTypeAwsKms, - OriginTypeExternal, - OriginTypeAwsCloudhsm, - } -} - -const ( - // SigningAlgorithmSpecRsassaPssSha256 is a SigningAlgorithmSpec enum value - SigningAlgorithmSpecRsassaPssSha256 = "RSASSA_PSS_SHA_256" - - // SigningAlgorithmSpecRsassaPssSha384 is a SigningAlgorithmSpec enum value - SigningAlgorithmSpecRsassaPssSha384 = "RSASSA_PSS_SHA_384" - - // SigningAlgorithmSpecRsassaPssSha512 is a SigningAlgorithmSpec enum value - SigningAlgorithmSpecRsassaPssSha512 = "RSASSA_PSS_SHA_512" - - // SigningAlgorithmSpecRsassaPkcs1V15Sha256 is a SigningAlgorithmSpec enum value - SigningAlgorithmSpecRsassaPkcs1V15Sha256 = "RSASSA_PKCS1_V1_5_SHA_256" - - // SigningAlgorithmSpecRsassaPkcs1V15Sha384 is a SigningAlgorithmSpec enum value - SigningAlgorithmSpecRsassaPkcs1V15Sha384 = "RSASSA_PKCS1_V1_5_SHA_384" - - // SigningAlgorithmSpecRsassaPkcs1V15Sha512 is a SigningAlgorithmSpec enum value - SigningAlgorithmSpecRsassaPkcs1V15Sha512 = "RSASSA_PKCS1_V1_5_SHA_512" - - // SigningAlgorithmSpecEcdsaSha256 is a SigningAlgorithmSpec enum value - SigningAlgorithmSpecEcdsaSha256 = "ECDSA_SHA_256" - - // SigningAlgorithmSpecEcdsaSha384 is a SigningAlgorithmSpec enum value - SigningAlgorithmSpecEcdsaSha384 = "ECDSA_SHA_384" - - // SigningAlgorithmSpecEcdsaSha512 is a SigningAlgorithmSpec enum value - SigningAlgorithmSpecEcdsaSha512 = "ECDSA_SHA_512" -) - -// SigningAlgorithmSpec_Values returns all elements of the SigningAlgorithmSpec enum -func SigningAlgorithmSpec_Values() []string { - return []string{ - SigningAlgorithmSpecRsassaPssSha256, - SigningAlgorithmSpecRsassaPssSha384, - SigningAlgorithmSpecRsassaPssSha512, - SigningAlgorithmSpecRsassaPkcs1V15Sha256, - SigningAlgorithmSpecRsassaPkcs1V15Sha384, - SigningAlgorithmSpecRsassaPkcs1V15Sha512, - SigningAlgorithmSpecEcdsaSha256, - SigningAlgorithmSpecEcdsaSha384, - SigningAlgorithmSpecEcdsaSha512, - } -} - -const ( - // WrappingKeySpecRsa2048 is a WrappingKeySpec enum value - WrappingKeySpecRsa2048 = "RSA_2048" -) - -// WrappingKeySpec_Values returns all elements of the WrappingKeySpec enum -func WrappingKeySpec_Values() []string { - return []string{ - WrappingKeySpecRsa2048, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/doc.go b/vendor/github.com/aws/aws-sdk-go/service/kms/doc.go deleted file mode 100644 index 64050c225..000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/kms/doc.go +++ /dev/null @@ -1,104 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package kms provides the client and types for making API -// requests to AWS Key Management Service. -// -// Key Management Service (KMS) is an encryption and key management web service. -// This guide describes the KMS operations that you can call programmatically. -// For general information about KMS, see the Key Management Service Developer -// Guide (https://docs.aws.amazon.com/kms/latest/developerguide/). -// -// KMS is replacing the term customer master key (CMK) with KMS key and KMS -// key. The concept has not changed. To prevent breaking changes, KMS is keeping -// some variations of this term. -// -// Amazon Web Services provides SDKs that consist of libraries and sample code -// for various programming languages and platforms (Java, Ruby, .Net, macOS, -// Android, etc.). The SDKs provide a convenient way to create programmatic -// access to KMS and other Amazon Web Services services. For example, the SDKs -// take care of tasks such as signing requests (see below), managing errors, -// and retrying requests automatically. For more information about the Amazon -// Web Services SDKs, including how to download and install them, see Tools -// for Amazon Web Services (http://aws.amazon.com/tools/). -// -// We recommend that you use the Amazon Web Services SDKs to make programmatic -// API calls to KMS. -// -// Clients must support TLS (Transport Layer Security) 1.0. We recommend TLS -// 1.2. Clients must also support cipher suites with Perfect Forward Secrecy -// (PFS) such as Ephemeral Diffie-Hellman (DHE) or Elliptic Curve Ephemeral -// Diffie-Hellman (ECDHE). Most modern systems such as Java 7 and later support -// these modes. -// -// Signing Requests -// -// Requests must be signed by using an access key ID and a secret access key. -// We strongly recommend that you do not use your Amazon Web Services account -// (root) access key ID and secret key for everyday work with KMS. Instead, -// use the access key ID and secret access key for an IAM user. You can also -// use the Amazon Web Services Security Token Service to generate temporary -// security credentials that you can use to sign requests. -// -// All KMS operations require Signature Version 4 (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). -// -// Logging API Requests -// -// KMS supports CloudTrail, a service that logs Amazon Web Services API calls -// and related events for your Amazon Web Services account and delivers them -// to an Amazon S3 bucket that you specify. By using the information collected -// by CloudTrail, you can determine what requests were made to KMS, who made -// the request, when it was made, and so on. To learn more about CloudTrail, -// including how to turn it on and find your log files, see the CloudTrail User -// Guide (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/). -// -// Additional Resources -// -// For more information about credentials and request signing, see the following: -// -// * Amazon Web Services Security Credentials (https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html) -// - This topic provides general information about the types of credentials -// used to access Amazon Web Services. -// -// * Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html) -// - This section of the IAM User Guide describes how to create and use temporary -// security credentials. -// -// * Signature Version 4 Signing Process (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html) -// - This set of topics walks you through the process of signing a request -// using an access key ID and a secret access key. -// -// Commonly Used API Operations -// -// Of the API operations discussed in this guide, the following will prove the -// most useful for most applications. You will likely perform operations other -// than these, such as creating keys and assigning policies, by using the console. -// -// * Encrypt -// -// * Decrypt -// -// * GenerateDataKey -// -// * GenerateDataKeyWithoutPlaintext -// -// See https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01 for more information on this service. -// -// See kms package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/kms/ -// -// Using the Client -// -// To contact AWS Key Management Service with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the AWS Key Management Service client KMS for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/kms/#New -package kms diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/errors.go b/vendor/github.com/aws/aws-sdk-go/service/kms/errors.go deleted file mode 100644 index 7f5a1f0ba..000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/kms/errors.go +++ /dev/null @@ -1,366 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package kms - -import ( - "github.com/aws/aws-sdk-go/private/protocol" -) - -const ( - - // ErrCodeAlreadyExistsException for service response error code - // "AlreadyExistsException". - // - // The request was rejected because it attempted to create a resource that already - // exists. - ErrCodeAlreadyExistsException = "AlreadyExistsException" - - // ErrCodeCloudHsmClusterInUseException for service response error code - // "CloudHsmClusterInUseException". - // - // The request was rejected because the specified CloudHSM cluster is already - // associated with a custom key store or it shares a backup history with a cluster - // that is associated with a custom key store. Each custom key store must be - // associated with a different CloudHSM cluster. - // - // Clusters that share a backup history have the same cluster certificate. To - // view the cluster certificate of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) - // operation. - ErrCodeCloudHsmClusterInUseException = "CloudHsmClusterInUseException" - - // ErrCodeCloudHsmClusterInvalidConfigurationException for service response error code - // "CloudHsmClusterInvalidConfigurationException". - // - // The request was rejected because the associated CloudHSM cluster did not - // meet the configuration requirements for a custom key store. - // - // * The cluster must be configured with private subnets in at least two - // different Availability Zones in the Region. - // - // * The security group for the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) - // (cloudhsm-cluster--sg) must include inbound rules and outbound - // rules that allow TCP traffic on ports 2223-2225. The Source in the inbound - // rules and the Destination in the outbound rules must match the security - // group ID. These rules are set by default when you create the cluster. - // Do not delete or change them. To get information about a particular security - // group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html) - // operation. - // - // * The cluster must contain at least as many HSMs as the operation requires. - // To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) - // operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey - // operations, the CloudHSM cluster must have at least two active HSMs, each - // in a different Availability Zone. For the ConnectCustomKeyStore operation, - // the CloudHSM must contain at least one active HSM. - // - // For information about the requirements for an CloudHSM cluster that is associated - // with a custom key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore) - // in the Key Management Service Developer Guide. For information about creating - // a private subnet for an CloudHSM cluster, see Create a Private Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html) - // in the CloudHSM User Guide. For information about cluster security groups, - // see Configure a Default Security Group (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) - // in the CloudHSM User Guide . - ErrCodeCloudHsmClusterInvalidConfigurationException = "CloudHsmClusterInvalidConfigurationException" - - // ErrCodeCloudHsmClusterNotActiveException for service response error code - // "CloudHsmClusterNotActiveException". - // - // The request was rejected because the CloudHSM cluster that is associated - // with the custom key store is not active. Initialize and activate the cluster - // and try the command again. For detailed instructions, see Getting Started - // (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html) - // in the CloudHSM User Guide. - ErrCodeCloudHsmClusterNotActiveException = "CloudHsmClusterNotActiveException" - - // ErrCodeCloudHsmClusterNotFoundException for service response error code - // "CloudHsmClusterNotFoundException". - // - // The request was rejected because KMS cannot find the CloudHSM cluster with - // the specified cluster ID. Retry the request with a different cluster ID. - ErrCodeCloudHsmClusterNotFoundException = "CloudHsmClusterNotFoundException" - - // ErrCodeCloudHsmClusterNotRelatedException for service response error code - // "CloudHsmClusterNotRelatedException". - // - // The request was rejected because the specified CloudHSM cluster has a different - // cluster certificate than the original cluster. You cannot use the operation - // to specify an unrelated cluster. - // - // Specify a cluster that shares a backup history with the original cluster. - // This includes clusters that were created from a backup of the current cluster, - // and clusters that were created from the same backup that produced the current - // cluster. - // - // Clusters that share a backup history have the same cluster certificate. To - // view the cluster certificate of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) - // operation. - ErrCodeCloudHsmClusterNotRelatedException = "CloudHsmClusterNotRelatedException" - - // ErrCodeCustomKeyStoreHasCMKsException for service response error code - // "CustomKeyStoreHasCMKsException". - // - // The request was rejected because the custom key store contains KMS keys. - // After verifying that you do not need to use the KMS keys, use the ScheduleKeyDeletion - // operation to delete the KMS keys. After they are deleted, you can delete - // the custom key store. - ErrCodeCustomKeyStoreHasCMKsException = "CustomKeyStoreHasCMKsException" - - // ErrCodeCustomKeyStoreInvalidStateException for service response error code - // "CustomKeyStoreInvalidStateException". - // - // The request was rejected because of the ConnectionState of the custom key - // store. To get the ConnectionState of a custom key store, use the DescribeCustomKeyStores - // operation. - // - // This exception is thrown under the following conditions: - // - // * You requested the CreateKey or GenerateRandom operation in a custom - // key store that is not connected. These operations are valid only when - // the custom key store ConnectionState is CONNECTED. - // - // * You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation - // on a custom key store that is not disconnected. This operation is valid - // only when the custom key store ConnectionState is DISCONNECTED. - // - // * You requested the ConnectCustomKeyStore operation on a custom key store - // with a ConnectionState of DISCONNECTING or FAILED. This operation is valid - // for all other ConnectionState values. - ErrCodeCustomKeyStoreInvalidStateException = "CustomKeyStoreInvalidStateException" - - // ErrCodeCustomKeyStoreNameInUseException for service response error code - // "CustomKeyStoreNameInUseException". - // - // The request was rejected because the specified custom key store name is already - // assigned to another custom key store in the account. Try again with a custom - // key store name that is unique in the account. - ErrCodeCustomKeyStoreNameInUseException = "CustomKeyStoreNameInUseException" - - // ErrCodeCustomKeyStoreNotFoundException for service response error code - // "CustomKeyStoreNotFoundException". - // - // The request was rejected because KMS cannot find a custom key store with - // the specified key store name or ID. - ErrCodeCustomKeyStoreNotFoundException = "CustomKeyStoreNotFoundException" - - // ErrCodeDependencyTimeoutException for service response error code - // "DependencyTimeoutException". - // - // The system timed out while trying to fulfill the request. The request can - // be retried. - ErrCodeDependencyTimeoutException = "DependencyTimeoutException" - - // ErrCodeDisabledException for service response error code - // "DisabledException". - // - // The request was rejected because the specified KMS key is not enabled. - ErrCodeDisabledException = "DisabledException" - - // ErrCodeExpiredImportTokenException for service response error code - // "ExpiredImportTokenException". - // - // The request was rejected because the specified import token is expired. Use - // GetParametersForImport to get a new import token and public key, use the - // new public key to encrypt the key material, and then try the request again. - ErrCodeExpiredImportTokenException = "ExpiredImportTokenException" - - // ErrCodeIncorrectKeyException for service response error code - // "IncorrectKeyException". - // - // The request was rejected because the specified KMS key cannot decrypt the - // data. The KeyId in a Decrypt request and the SourceKeyId in a ReEncrypt request - // must identify the same KMS key that was used to encrypt the ciphertext. - ErrCodeIncorrectKeyException = "IncorrectKeyException" - - // ErrCodeIncorrectKeyMaterialException for service response error code - // "IncorrectKeyMaterialException". - // - // The request was rejected because the key material in the request is, expired, - // invalid, or is not the same key material that was previously imported into - // this KMS key. - ErrCodeIncorrectKeyMaterialException = "IncorrectKeyMaterialException" - - // ErrCodeIncorrectTrustAnchorException for service response error code - // "IncorrectTrustAnchorException". - // - // The request was rejected because the trust anchor certificate in the request - // is not the trust anchor certificate for the specified CloudHSM cluster. - // - // When you initialize the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr), - // you create the trust anchor certificate and save it in the customerCA.crt - // file. - ErrCodeIncorrectTrustAnchorException = "IncorrectTrustAnchorException" - - // ErrCodeInternalException for service response error code - // "KMSInternalException". - // - // The request was rejected because an internal exception occurred. The request - // can be retried. - ErrCodeInternalException = "KMSInternalException" - - // ErrCodeInvalidAliasNameException for service response error code - // "InvalidAliasNameException". - // - // The request was rejected because the specified alias name is not valid. - ErrCodeInvalidAliasNameException = "InvalidAliasNameException" - - // ErrCodeInvalidArnException for service response error code - // "InvalidArnException". - // - // The request was rejected because a specified ARN, or an ARN in a key policy, - // is not valid. - ErrCodeInvalidArnException = "InvalidArnException" - - // ErrCodeInvalidCiphertextException for service response error code - // "InvalidCiphertextException". - // - // From the Decrypt or ReEncrypt operation, the request was rejected because - // the specified ciphertext, or additional authenticated data incorporated into - // the ciphertext, such as the encryption context, is corrupted, missing, or - // otherwise invalid. - // - // From the ImportKeyMaterial operation, the request was rejected because KMS - // could not decrypt the encrypted (wrapped) key material. - ErrCodeInvalidCiphertextException = "InvalidCiphertextException" - - // ErrCodeInvalidGrantIdException for service response error code - // "InvalidGrantIdException". - // - // The request was rejected because the specified GrantId is not valid. - ErrCodeInvalidGrantIdException = "InvalidGrantIdException" - - // ErrCodeInvalidGrantTokenException for service response error code - // "InvalidGrantTokenException". - // - // The request was rejected because the specified grant token is not valid. - ErrCodeInvalidGrantTokenException = "InvalidGrantTokenException" - - // ErrCodeInvalidImportTokenException for service response error code - // "InvalidImportTokenException". - // - // The request was rejected because the provided import token is invalid or - // is associated with a different KMS key. - ErrCodeInvalidImportTokenException = "InvalidImportTokenException" - - // ErrCodeInvalidKeyUsageException for service response error code - // "InvalidKeyUsageException". - // - // The request was rejected for one of the following reasons: - // - // * The KeyUsage value of the KMS key is incompatible with the API operation. - // - // * The encryption algorithm or signing algorithm specified for the operation - // is incompatible with the type of key material in the KMS key (KeySpec). - // - // For encrypting, decrypting, re-encrypting, and generating data keys, the - // KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying, the KeyUsage - // must be SIGN_VERIFY. To find the KeyUsage of a KMS key, use the DescribeKey - // operation. - // - // To find the encryption or signing algorithms supported for a particular KMS - // key, use the DescribeKey operation. - ErrCodeInvalidKeyUsageException = "InvalidKeyUsageException" - - // ErrCodeInvalidMarkerException for service response error code - // "InvalidMarkerException". - // - // The request was rejected because the marker that specifies where pagination - // should next begin is not valid. - ErrCodeInvalidMarkerException = "InvalidMarkerException" - - // ErrCodeInvalidStateException for service response error code - // "KMSInvalidStateException". - // - // The request was rejected because the state of the specified resource is not - // valid for this request. - // - // For more information about how key state affects the use of a KMS key, see - // Key state: Effect on your KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) - // in the Key Management Service Developer Guide . - ErrCodeInvalidStateException = "KMSInvalidStateException" - - // ErrCodeKMSInvalidSignatureException for service response error code - // "KMSInvalidSignatureException". - // - // The request was rejected because the signature verification failed. Signature - // verification fails when it cannot confirm that signature was produced by - // signing the specified message with the specified KMS key and signing algorithm. - ErrCodeKMSInvalidSignatureException = "KMSInvalidSignatureException" - - // ErrCodeKeyUnavailableException for service response error code - // "KeyUnavailableException". - // - // The request was rejected because the specified KMS key was not available. - // You can retry the request. - ErrCodeKeyUnavailableException = "KeyUnavailableException" - - // ErrCodeLimitExceededException for service response error code - // "LimitExceededException". - // - // The request was rejected because a quota was exceeded. For more information, - // see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) - // in the Key Management Service Developer Guide. - ErrCodeLimitExceededException = "LimitExceededException" - - // ErrCodeMalformedPolicyDocumentException for service response error code - // "MalformedPolicyDocumentException". - // - // The request was rejected because the specified policy is not syntactically - // or semantically correct. - ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocumentException" - - // ErrCodeNotFoundException for service response error code - // "NotFoundException". - // - // The request was rejected because the specified entity or resource could not - // be found. - ErrCodeNotFoundException = "NotFoundException" - - // ErrCodeTagException for service response error code - // "TagException". - // - // The request was rejected because one or more tags are not valid. - ErrCodeTagException = "TagException" - - // ErrCodeUnsupportedOperationException for service response error code - // "UnsupportedOperationException". - // - // The request was rejected because a specified parameter is not supported or - // a specified resource is not valid for this operation. - ErrCodeUnsupportedOperationException = "UnsupportedOperationException" -) - -var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ - "AlreadyExistsException": newErrorAlreadyExistsException, - "CloudHsmClusterInUseException": newErrorCloudHsmClusterInUseException, - "CloudHsmClusterInvalidConfigurationException": newErrorCloudHsmClusterInvalidConfigurationException, - "CloudHsmClusterNotActiveException": newErrorCloudHsmClusterNotActiveException, - "CloudHsmClusterNotFoundException": newErrorCloudHsmClusterNotFoundException, - "CloudHsmClusterNotRelatedException": newErrorCloudHsmClusterNotRelatedException, - "CustomKeyStoreHasCMKsException": newErrorCustomKeyStoreHasCMKsException, - "CustomKeyStoreInvalidStateException": newErrorCustomKeyStoreInvalidStateException, - "CustomKeyStoreNameInUseException": newErrorCustomKeyStoreNameInUseException, - "CustomKeyStoreNotFoundException": newErrorCustomKeyStoreNotFoundException, - "DependencyTimeoutException": newErrorDependencyTimeoutException, - "DisabledException": newErrorDisabledException, - "ExpiredImportTokenException": newErrorExpiredImportTokenException, - "IncorrectKeyException": newErrorIncorrectKeyException, - "IncorrectKeyMaterialException": newErrorIncorrectKeyMaterialException, - "IncorrectTrustAnchorException": newErrorIncorrectTrustAnchorException, - "KMSInternalException": newErrorInternalException, - "InvalidAliasNameException": newErrorInvalidAliasNameException, - "InvalidArnException": newErrorInvalidArnException, - "InvalidCiphertextException": newErrorInvalidCiphertextException, - "InvalidGrantIdException": newErrorInvalidGrantIdException, - "InvalidGrantTokenException": newErrorInvalidGrantTokenException, - "InvalidImportTokenException": newErrorInvalidImportTokenException, - "InvalidKeyUsageException": newErrorInvalidKeyUsageException, - "InvalidMarkerException": newErrorInvalidMarkerException, - "KMSInvalidStateException": newErrorInvalidStateException, - "KMSInvalidSignatureException": newErrorKMSInvalidSignatureException, - "KeyUnavailableException": newErrorKeyUnavailableException, - "LimitExceededException": newErrorLimitExceededException, - "MalformedPolicyDocumentException": newErrorMalformedPolicyDocumentException, - "NotFoundException": newErrorNotFoundException, - "TagException": newErrorTagException, - "UnsupportedOperationException": newErrorUnsupportedOperationException, -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/kmsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/kms/kmsiface/interface.go deleted file mode 100644 index 6de9505a5..000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/kms/kmsiface/interface.go +++ /dev/null @@ -1,268 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package kmsiface provides an interface to enable mocking the AWS Key Management Service service client -// for testing your code. -// -// It is important to note that this interface will have breaking changes -// when the service model is updated and adds new API operations, paginators, -// and waiters. -package kmsiface - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/kms" -) - -// KMSAPI provides an interface to enable mocking the -// kms.KMS service client's API operation, -// paginators, and waiters. This make unit testing your code that calls out -// to the SDK's service client's calls easier. -// -// The best way to use this interface is so the SDK's service client's calls -// can be stubbed out for unit testing your code with the SDK without needing -// to inject custom request handlers into the SDK's request pipeline. -// -// // myFunc uses an SDK service client to make a request to -// // AWS Key Management Service. -// func myFunc(svc kmsiface.KMSAPI) bool { -// // Make svc.CancelKeyDeletion request -// } -// -// func main() { -// sess := session.New() -// svc := kms.New(sess) -// -// myFunc(svc) -// } -// -// In your _test.go file: -// -// // Define a mock struct to be used in your unit tests of myFunc. -// type mockKMSClient struct { -// kmsiface.KMSAPI -// } -// func (m *mockKMSClient) CancelKeyDeletion(input *kms.CancelKeyDeletionInput) (*kms.CancelKeyDeletionOutput, error) { -// // mock response/functionality -// } -// -// func TestMyFunc(t *testing.T) { -// // Setup Test -// mockSvc := &mockKMSClient{} -// -// myfunc(mockSvc) -// -// // Verify myFunc's functionality -// } -// -// It is important to note that this interface will have breaking changes -// when the service model is updated and adds new API operations, paginators, -// and waiters. Its suggested to use the pattern above for testing, or using -// tooling to generate mocks to satisfy the interfaces. -type KMSAPI interface { - CancelKeyDeletion(*kms.CancelKeyDeletionInput) (*kms.CancelKeyDeletionOutput, error) - CancelKeyDeletionWithContext(aws.Context, *kms.CancelKeyDeletionInput, ...request.Option) (*kms.CancelKeyDeletionOutput, error) - CancelKeyDeletionRequest(*kms.CancelKeyDeletionInput) (*request.Request, *kms.CancelKeyDeletionOutput) - - ConnectCustomKeyStore(*kms.ConnectCustomKeyStoreInput) (*kms.ConnectCustomKeyStoreOutput, error) - ConnectCustomKeyStoreWithContext(aws.Context, *kms.ConnectCustomKeyStoreInput, ...request.Option) (*kms.ConnectCustomKeyStoreOutput, error) - ConnectCustomKeyStoreRequest(*kms.ConnectCustomKeyStoreInput) (*request.Request, *kms.ConnectCustomKeyStoreOutput) - - CreateAlias(*kms.CreateAliasInput) (*kms.CreateAliasOutput, error) - CreateAliasWithContext(aws.Context, *kms.CreateAliasInput, ...request.Option) (*kms.CreateAliasOutput, error) - CreateAliasRequest(*kms.CreateAliasInput) (*request.Request, *kms.CreateAliasOutput) - - CreateCustomKeyStore(*kms.CreateCustomKeyStoreInput) (*kms.CreateCustomKeyStoreOutput, error) - CreateCustomKeyStoreWithContext(aws.Context, *kms.CreateCustomKeyStoreInput, ...request.Option) (*kms.CreateCustomKeyStoreOutput, error) - CreateCustomKeyStoreRequest(*kms.CreateCustomKeyStoreInput) (*request.Request, *kms.CreateCustomKeyStoreOutput) - - CreateGrant(*kms.CreateGrantInput) (*kms.CreateGrantOutput, error) - CreateGrantWithContext(aws.Context, *kms.CreateGrantInput, ...request.Option) (*kms.CreateGrantOutput, error) - CreateGrantRequest(*kms.CreateGrantInput) (*request.Request, *kms.CreateGrantOutput) - - CreateKey(*kms.CreateKeyInput) (*kms.CreateKeyOutput, error) - CreateKeyWithContext(aws.Context, *kms.CreateKeyInput, ...request.Option) (*kms.CreateKeyOutput, error) - CreateKeyRequest(*kms.CreateKeyInput) (*request.Request, *kms.CreateKeyOutput) - - Decrypt(*kms.DecryptInput) (*kms.DecryptOutput, error) - DecryptWithContext(aws.Context, *kms.DecryptInput, ...request.Option) (*kms.DecryptOutput, error) - DecryptRequest(*kms.DecryptInput) (*request.Request, *kms.DecryptOutput) - - DeleteAlias(*kms.DeleteAliasInput) (*kms.DeleteAliasOutput, error) - DeleteAliasWithContext(aws.Context, *kms.DeleteAliasInput, ...request.Option) (*kms.DeleteAliasOutput, error) - DeleteAliasRequest(*kms.DeleteAliasInput) (*request.Request, *kms.DeleteAliasOutput) - - DeleteCustomKeyStore(*kms.DeleteCustomKeyStoreInput) (*kms.DeleteCustomKeyStoreOutput, error) - DeleteCustomKeyStoreWithContext(aws.Context, *kms.DeleteCustomKeyStoreInput, ...request.Option) (*kms.DeleteCustomKeyStoreOutput, error) - DeleteCustomKeyStoreRequest(*kms.DeleteCustomKeyStoreInput) (*request.Request, *kms.DeleteCustomKeyStoreOutput) - - DeleteImportedKeyMaterial(*kms.DeleteImportedKeyMaterialInput) (*kms.DeleteImportedKeyMaterialOutput, error) - DeleteImportedKeyMaterialWithContext(aws.Context, *kms.DeleteImportedKeyMaterialInput, ...request.Option) (*kms.DeleteImportedKeyMaterialOutput, error) - DeleteImportedKeyMaterialRequest(*kms.DeleteImportedKeyMaterialInput) (*request.Request, *kms.DeleteImportedKeyMaterialOutput) - - DescribeCustomKeyStores(*kms.DescribeCustomKeyStoresInput) (*kms.DescribeCustomKeyStoresOutput, error) - DescribeCustomKeyStoresWithContext(aws.Context, *kms.DescribeCustomKeyStoresInput, ...request.Option) (*kms.DescribeCustomKeyStoresOutput, error) - DescribeCustomKeyStoresRequest(*kms.DescribeCustomKeyStoresInput) (*request.Request, *kms.DescribeCustomKeyStoresOutput) - - DescribeKey(*kms.DescribeKeyInput) (*kms.DescribeKeyOutput, error) - DescribeKeyWithContext(aws.Context, *kms.DescribeKeyInput, ...request.Option) (*kms.DescribeKeyOutput, error) - DescribeKeyRequest(*kms.DescribeKeyInput) (*request.Request, *kms.DescribeKeyOutput) - - DisableKey(*kms.DisableKeyInput) (*kms.DisableKeyOutput, error) - DisableKeyWithContext(aws.Context, *kms.DisableKeyInput, ...request.Option) (*kms.DisableKeyOutput, error) - DisableKeyRequest(*kms.DisableKeyInput) (*request.Request, *kms.DisableKeyOutput) - - DisableKeyRotation(*kms.DisableKeyRotationInput) (*kms.DisableKeyRotationOutput, error) - DisableKeyRotationWithContext(aws.Context, *kms.DisableKeyRotationInput, ...request.Option) (*kms.DisableKeyRotationOutput, error) - DisableKeyRotationRequest(*kms.DisableKeyRotationInput) (*request.Request, *kms.DisableKeyRotationOutput) - - DisconnectCustomKeyStore(*kms.DisconnectCustomKeyStoreInput) (*kms.DisconnectCustomKeyStoreOutput, error) - DisconnectCustomKeyStoreWithContext(aws.Context, *kms.DisconnectCustomKeyStoreInput, ...request.Option) (*kms.DisconnectCustomKeyStoreOutput, error) - DisconnectCustomKeyStoreRequest(*kms.DisconnectCustomKeyStoreInput) (*request.Request, *kms.DisconnectCustomKeyStoreOutput) - - EnableKey(*kms.EnableKeyInput) (*kms.EnableKeyOutput, error) - EnableKeyWithContext(aws.Context, *kms.EnableKeyInput, ...request.Option) (*kms.EnableKeyOutput, error) - EnableKeyRequest(*kms.EnableKeyInput) (*request.Request, *kms.EnableKeyOutput) - - EnableKeyRotation(*kms.EnableKeyRotationInput) (*kms.EnableKeyRotationOutput, error) - EnableKeyRotationWithContext(aws.Context, *kms.EnableKeyRotationInput, ...request.Option) (*kms.EnableKeyRotationOutput, error) - EnableKeyRotationRequest(*kms.EnableKeyRotationInput) (*request.Request, *kms.EnableKeyRotationOutput) - - Encrypt(*kms.EncryptInput) (*kms.EncryptOutput, error) - EncryptWithContext(aws.Context, *kms.EncryptInput, ...request.Option) (*kms.EncryptOutput, error) - EncryptRequest(*kms.EncryptInput) (*request.Request, *kms.EncryptOutput) - - GenerateDataKey(*kms.GenerateDataKeyInput) (*kms.GenerateDataKeyOutput, error) - GenerateDataKeyWithContext(aws.Context, *kms.GenerateDataKeyInput, ...request.Option) (*kms.GenerateDataKeyOutput, error) - GenerateDataKeyRequest(*kms.GenerateDataKeyInput) (*request.Request, *kms.GenerateDataKeyOutput) - - GenerateDataKeyPair(*kms.GenerateDataKeyPairInput) (*kms.GenerateDataKeyPairOutput, error) - GenerateDataKeyPairWithContext(aws.Context, *kms.GenerateDataKeyPairInput, ...request.Option) (*kms.GenerateDataKeyPairOutput, error) - GenerateDataKeyPairRequest(*kms.GenerateDataKeyPairInput) (*request.Request, *kms.GenerateDataKeyPairOutput) - - GenerateDataKeyPairWithoutPlaintext(*kms.GenerateDataKeyPairWithoutPlaintextInput) (*kms.GenerateDataKeyPairWithoutPlaintextOutput, error) - GenerateDataKeyPairWithoutPlaintextWithContext(aws.Context, *kms.GenerateDataKeyPairWithoutPlaintextInput, ...request.Option) (*kms.GenerateDataKeyPairWithoutPlaintextOutput, error) - GenerateDataKeyPairWithoutPlaintextRequest(*kms.GenerateDataKeyPairWithoutPlaintextInput) (*request.Request, *kms.GenerateDataKeyPairWithoutPlaintextOutput) - - GenerateDataKeyWithoutPlaintext(*kms.GenerateDataKeyWithoutPlaintextInput) (*kms.GenerateDataKeyWithoutPlaintextOutput, error) - GenerateDataKeyWithoutPlaintextWithContext(aws.Context, *kms.GenerateDataKeyWithoutPlaintextInput, ...request.Option) (*kms.GenerateDataKeyWithoutPlaintextOutput, error) - GenerateDataKeyWithoutPlaintextRequest(*kms.GenerateDataKeyWithoutPlaintextInput) (*request.Request, *kms.GenerateDataKeyWithoutPlaintextOutput) - - GenerateRandom(*kms.GenerateRandomInput) (*kms.GenerateRandomOutput, error) - GenerateRandomWithContext(aws.Context, *kms.GenerateRandomInput, ...request.Option) (*kms.GenerateRandomOutput, error) - GenerateRandomRequest(*kms.GenerateRandomInput) (*request.Request, *kms.GenerateRandomOutput) - - GetKeyPolicy(*kms.GetKeyPolicyInput) (*kms.GetKeyPolicyOutput, error) - GetKeyPolicyWithContext(aws.Context, *kms.GetKeyPolicyInput, ...request.Option) (*kms.GetKeyPolicyOutput, error) - GetKeyPolicyRequest(*kms.GetKeyPolicyInput) (*request.Request, *kms.GetKeyPolicyOutput) - - GetKeyRotationStatus(*kms.GetKeyRotationStatusInput) (*kms.GetKeyRotationStatusOutput, error) - GetKeyRotationStatusWithContext(aws.Context, *kms.GetKeyRotationStatusInput, ...request.Option) (*kms.GetKeyRotationStatusOutput, error) - GetKeyRotationStatusRequest(*kms.GetKeyRotationStatusInput) (*request.Request, *kms.GetKeyRotationStatusOutput) - - GetParametersForImport(*kms.GetParametersForImportInput) (*kms.GetParametersForImportOutput, error) - GetParametersForImportWithContext(aws.Context, *kms.GetParametersForImportInput, ...request.Option) (*kms.GetParametersForImportOutput, error) - GetParametersForImportRequest(*kms.GetParametersForImportInput) (*request.Request, *kms.GetParametersForImportOutput) - - GetPublicKey(*kms.GetPublicKeyInput) (*kms.GetPublicKeyOutput, error) - GetPublicKeyWithContext(aws.Context, *kms.GetPublicKeyInput, ...request.Option) (*kms.GetPublicKeyOutput, error) - GetPublicKeyRequest(*kms.GetPublicKeyInput) (*request.Request, *kms.GetPublicKeyOutput) - - ImportKeyMaterial(*kms.ImportKeyMaterialInput) (*kms.ImportKeyMaterialOutput, error) - ImportKeyMaterialWithContext(aws.Context, *kms.ImportKeyMaterialInput, ...request.Option) (*kms.ImportKeyMaterialOutput, error) - ImportKeyMaterialRequest(*kms.ImportKeyMaterialInput) (*request.Request, *kms.ImportKeyMaterialOutput) - - ListAliases(*kms.ListAliasesInput) (*kms.ListAliasesOutput, error) - ListAliasesWithContext(aws.Context, *kms.ListAliasesInput, ...request.Option) (*kms.ListAliasesOutput, error) - ListAliasesRequest(*kms.ListAliasesInput) (*request.Request, *kms.ListAliasesOutput) - - ListAliasesPages(*kms.ListAliasesInput, func(*kms.ListAliasesOutput, bool) bool) error - ListAliasesPagesWithContext(aws.Context, *kms.ListAliasesInput, func(*kms.ListAliasesOutput, bool) bool, ...request.Option) error - - ListGrants(*kms.ListGrantsInput) (*kms.ListGrantsResponse, error) - ListGrantsWithContext(aws.Context, *kms.ListGrantsInput, ...request.Option) (*kms.ListGrantsResponse, error) - ListGrantsRequest(*kms.ListGrantsInput) (*request.Request, *kms.ListGrantsResponse) - - ListGrantsPages(*kms.ListGrantsInput, func(*kms.ListGrantsResponse, bool) bool) error - ListGrantsPagesWithContext(aws.Context, *kms.ListGrantsInput, func(*kms.ListGrantsResponse, bool) bool, ...request.Option) error - - ListKeyPolicies(*kms.ListKeyPoliciesInput) (*kms.ListKeyPoliciesOutput, error) - ListKeyPoliciesWithContext(aws.Context, *kms.ListKeyPoliciesInput, ...request.Option) (*kms.ListKeyPoliciesOutput, error) - ListKeyPoliciesRequest(*kms.ListKeyPoliciesInput) (*request.Request, *kms.ListKeyPoliciesOutput) - - ListKeyPoliciesPages(*kms.ListKeyPoliciesInput, func(*kms.ListKeyPoliciesOutput, bool) bool) error - ListKeyPoliciesPagesWithContext(aws.Context, *kms.ListKeyPoliciesInput, func(*kms.ListKeyPoliciesOutput, bool) bool, ...request.Option) error - - ListKeys(*kms.ListKeysInput) (*kms.ListKeysOutput, error) - ListKeysWithContext(aws.Context, *kms.ListKeysInput, ...request.Option) (*kms.ListKeysOutput, error) - ListKeysRequest(*kms.ListKeysInput) (*request.Request, *kms.ListKeysOutput) - - ListKeysPages(*kms.ListKeysInput, func(*kms.ListKeysOutput, bool) bool) error - ListKeysPagesWithContext(aws.Context, *kms.ListKeysInput, func(*kms.ListKeysOutput, bool) bool, ...request.Option) error - - ListResourceTags(*kms.ListResourceTagsInput) (*kms.ListResourceTagsOutput, error) - ListResourceTagsWithContext(aws.Context, *kms.ListResourceTagsInput, ...request.Option) (*kms.ListResourceTagsOutput, error) - ListResourceTagsRequest(*kms.ListResourceTagsInput) (*request.Request, *kms.ListResourceTagsOutput) - - ListRetirableGrants(*kms.ListRetirableGrantsInput) (*kms.ListGrantsResponse, error) - ListRetirableGrantsWithContext(aws.Context, *kms.ListRetirableGrantsInput, ...request.Option) (*kms.ListGrantsResponse, error) - ListRetirableGrantsRequest(*kms.ListRetirableGrantsInput) (*request.Request, *kms.ListGrantsResponse) - - PutKeyPolicy(*kms.PutKeyPolicyInput) (*kms.PutKeyPolicyOutput, error) - PutKeyPolicyWithContext(aws.Context, *kms.PutKeyPolicyInput, ...request.Option) (*kms.PutKeyPolicyOutput, error) - PutKeyPolicyRequest(*kms.PutKeyPolicyInput) (*request.Request, *kms.PutKeyPolicyOutput) - - ReEncrypt(*kms.ReEncryptInput) (*kms.ReEncryptOutput, error) - ReEncryptWithContext(aws.Context, *kms.ReEncryptInput, ...request.Option) (*kms.ReEncryptOutput, error) - ReEncryptRequest(*kms.ReEncryptInput) (*request.Request, *kms.ReEncryptOutput) - - ReplicateKey(*kms.ReplicateKeyInput) (*kms.ReplicateKeyOutput, error) - ReplicateKeyWithContext(aws.Context, *kms.ReplicateKeyInput, ...request.Option) (*kms.ReplicateKeyOutput, error) - ReplicateKeyRequest(*kms.ReplicateKeyInput) (*request.Request, *kms.ReplicateKeyOutput) - - RetireGrant(*kms.RetireGrantInput) (*kms.RetireGrantOutput, error) - RetireGrantWithContext(aws.Context, *kms.RetireGrantInput, ...request.Option) (*kms.RetireGrantOutput, error) - RetireGrantRequest(*kms.RetireGrantInput) (*request.Request, *kms.RetireGrantOutput) - - RevokeGrant(*kms.RevokeGrantInput) (*kms.RevokeGrantOutput, error) - RevokeGrantWithContext(aws.Context, *kms.RevokeGrantInput, ...request.Option) (*kms.RevokeGrantOutput, error) - RevokeGrantRequest(*kms.RevokeGrantInput) (*request.Request, *kms.RevokeGrantOutput) - - ScheduleKeyDeletion(*kms.ScheduleKeyDeletionInput) (*kms.ScheduleKeyDeletionOutput, error) - ScheduleKeyDeletionWithContext(aws.Context, *kms.ScheduleKeyDeletionInput, ...request.Option) (*kms.ScheduleKeyDeletionOutput, error) - ScheduleKeyDeletionRequest(*kms.ScheduleKeyDeletionInput) (*request.Request, *kms.ScheduleKeyDeletionOutput) - - Sign(*kms.SignInput) (*kms.SignOutput, error) - SignWithContext(aws.Context, *kms.SignInput, ...request.Option) (*kms.SignOutput, error) - SignRequest(*kms.SignInput) (*request.Request, *kms.SignOutput) - - TagResource(*kms.TagResourceInput) (*kms.TagResourceOutput, error) - TagResourceWithContext(aws.Context, *kms.TagResourceInput, ...request.Option) (*kms.TagResourceOutput, error) - TagResourceRequest(*kms.TagResourceInput) (*request.Request, *kms.TagResourceOutput) - - UntagResource(*kms.UntagResourceInput) (*kms.UntagResourceOutput, error) - UntagResourceWithContext(aws.Context, *kms.UntagResourceInput, ...request.Option) (*kms.UntagResourceOutput, error) - UntagResourceRequest(*kms.UntagResourceInput) (*request.Request, *kms.UntagResourceOutput) - - UpdateAlias(*kms.UpdateAliasInput) (*kms.UpdateAliasOutput, error) - UpdateAliasWithContext(aws.Context, *kms.UpdateAliasInput, ...request.Option) (*kms.UpdateAliasOutput, error) - UpdateAliasRequest(*kms.UpdateAliasInput) (*request.Request, *kms.UpdateAliasOutput) - - UpdateCustomKeyStore(*kms.UpdateCustomKeyStoreInput) (*kms.UpdateCustomKeyStoreOutput, error) - UpdateCustomKeyStoreWithContext(aws.Context, *kms.UpdateCustomKeyStoreInput, ...request.Option) (*kms.UpdateCustomKeyStoreOutput, error) - UpdateCustomKeyStoreRequest(*kms.UpdateCustomKeyStoreInput) (*request.Request, *kms.UpdateCustomKeyStoreOutput) - - UpdateKeyDescription(*kms.UpdateKeyDescriptionInput) (*kms.UpdateKeyDescriptionOutput, error) - UpdateKeyDescriptionWithContext(aws.Context, *kms.UpdateKeyDescriptionInput, ...request.Option) (*kms.UpdateKeyDescriptionOutput, error) - UpdateKeyDescriptionRequest(*kms.UpdateKeyDescriptionInput) (*request.Request, *kms.UpdateKeyDescriptionOutput) - - UpdatePrimaryRegion(*kms.UpdatePrimaryRegionInput) (*kms.UpdatePrimaryRegionOutput, error) - UpdatePrimaryRegionWithContext(aws.Context, *kms.UpdatePrimaryRegionInput, ...request.Option) (*kms.UpdatePrimaryRegionOutput, error) - UpdatePrimaryRegionRequest(*kms.UpdatePrimaryRegionInput) (*request.Request, *kms.UpdatePrimaryRegionOutput) - - Verify(*kms.VerifyInput) (*kms.VerifyOutput, error) - VerifyWithContext(aws.Context, *kms.VerifyInput, ...request.Option) (*kms.VerifyOutput, error) - VerifyRequest(*kms.VerifyInput) (*request.Request, *kms.VerifyOutput) -} - -var _ KMSAPI = (*kms.KMS)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/service.go b/vendor/github.com/aws/aws-sdk-go/service/kms/service.go deleted file mode 100644 index 9492c2af3..000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/kms/service.go +++ /dev/null @@ -1,104 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package kms - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" -) - -// KMS provides the API operation methods for making requests to -// AWS Key Management Service. See this package's package overview docs -// for details on the service. -// -// KMS methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type KMS struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "kms" // Name of service. - EndpointsID = ServiceName // ID to lookup a service endpoint with. - ServiceID = "KMS" // ServiceID is a unique identifier of a specific service. -) - -// New creates a new instance of the KMS client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// mySession := session.Must(session.NewSession()) -// -// // Create a KMS client from just a session. -// svc := kms.New(mySession) -// -// // Create a KMS client with additional configuration -// svc := kms.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *KMS { - c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *KMS { - svc := &KMS{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - ServiceID: ServiceID, - SigningName: signingName, - SigningRegion: signingRegion, - PartitionID: partitionID, - Endpoint: endpoint, - APIVersion: "2014-11-01", - ResolvedRegion: resolvedRegion, - JSONVersion: "1.1", - TargetPrefix: "TrentService", - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed( - protocol.NewUnmarshalErrorHandler(jsonrpc.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), - ) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a KMS operation and runs any -// custom request initialization. -func (c *KMS) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/api.go b/vendor/github.com/aws/aws-sdk-go/service/sso/api.go deleted file mode 100644 index 948f060ca..000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sso/api.go +++ /dev/null @@ -1,1354 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sso - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/restjson" -) - -const opGetRoleCredentials = "GetRoleCredentials" - -// GetRoleCredentialsRequest generates a "aws/request.Request" representing the -// client's request for the GetRoleCredentials operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetRoleCredentials for more information on using the GetRoleCredentials -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetRoleCredentialsRequest method. -// req, resp := client.GetRoleCredentialsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials -func (c *SSO) GetRoleCredentialsRequest(input *GetRoleCredentialsInput) (req *request.Request, output *GetRoleCredentialsOutput) { - op := &request.Operation{ - Name: opGetRoleCredentials, - HTTPMethod: "GET", - HTTPPath: "/federation/credentials", - } - - if input == nil { - input = &GetRoleCredentialsInput{} - } - - output = &GetRoleCredentialsOutput{} - req = c.newRequest(op, input, output) - req.Config.Credentials = credentials.AnonymousCredentials - return -} - -// GetRoleCredentials API operation for AWS Single Sign-On. -// -// Returns the STS short-term credentials for a given role name that is assigned -// to the user. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Single Sign-On's -// API operation GetRoleCredentials for usage and error information. -// -// Returned Error Types: -// * InvalidRequestException -// Indicates that a problem occurred with the input to the request. For example, -// a required parameter might be missing or out of range. -// -// * UnauthorizedException -// Indicates that the request is not authorized. This can happen due to an invalid -// access token in the request. -// -// * TooManyRequestsException -// Indicates that the request is being made too frequently and is more than -// what the server can handle. -// -// * ResourceNotFoundException -// The specified resource doesn't exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials -func (c *SSO) GetRoleCredentials(input *GetRoleCredentialsInput) (*GetRoleCredentialsOutput, error) { - req, out := c.GetRoleCredentialsRequest(input) - return out, req.Send() -} - -// GetRoleCredentialsWithContext is the same as GetRoleCredentials with the addition of -// the ability to pass a context and additional request options. -// -// See GetRoleCredentials for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SSO) GetRoleCredentialsWithContext(ctx aws.Context, input *GetRoleCredentialsInput, opts ...request.Option) (*GetRoleCredentialsOutput, error) { - req, out := c.GetRoleCredentialsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListAccountRoles = "ListAccountRoles" - -// ListAccountRolesRequest generates a "aws/request.Request" representing the -// client's request for the ListAccountRoles operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListAccountRoles for more information on using the ListAccountRoles -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListAccountRolesRequest method. -// req, resp := client.ListAccountRolesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles -func (c *SSO) ListAccountRolesRequest(input *ListAccountRolesInput) (req *request.Request, output *ListAccountRolesOutput) { - op := &request.Operation{ - Name: opListAccountRoles, - HTTPMethod: "GET", - HTTPPath: "/assignment/roles", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextToken"}, - LimitToken: "maxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListAccountRolesInput{} - } - - output = &ListAccountRolesOutput{} - req = c.newRequest(op, input, output) - req.Config.Credentials = credentials.AnonymousCredentials - return -} - -// ListAccountRoles API operation for AWS Single Sign-On. -// -// Lists all roles that are assigned to the user for a given AWS account. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Single Sign-On's -// API operation ListAccountRoles for usage and error information. -// -// Returned Error Types: -// * InvalidRequestException -// Indicates that a problem occurred with the input to the request. For example, -// a required parameter might be missing or out of range. -// -// * UnauthorizedException -// Indicates that the request is not authorized. This can happen due to an invalid -// access token in the request. -// -// * TooManyRequestsException -// Indicates that the request is being made too frequently and is more than -// what the server can handle. -// -// * ResourceNotFoundException -// The specified resource doesn't exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles -func (c *SSO) ListAccountRoles(input *ListAccountRolesInput) (*ListAccountRolesOutput, error) { - req, out := c.ListAccountRolesRequest(input) - return out, req.Send() -} - -// ListAccountRolesWithContext is the same as ListAccountRoles with the addition of -// the ability to pass a context and additional request options. -// -// See ListAccountRoles for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SSO) ListAccountRolesWithContext(ctx aws.Context, input *ListAccountRolesInput, opts ...request.Option) (*ListAccountRolesOutput, error) { - req, out := c.ListAccountRolesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListAccountRolesPages iterates over the pages of a ListAccountRoles operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListAccountRoles method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListAccountRoles operation. -// pageNum := 0 -// err := client.ListAccountRolesPages(params, -// func(page *sso.ListAccountRolesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *SSO) ListAccountRolesPages(input *ListAccountRolesInput, fn func(*ListAccountRolesOutput, bool) bool) error { - return c.ListAccountRolesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListAccountRolesPagesWithContext same as ListAccountRolesPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SSO) ListAccountRolesPagesWithContext(ctx aws.Context, input *ListAccountRolesInput, fn func(*ListAccountRolesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListAccountRolesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListAccountRolesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListAccountRolesOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListAccounts = "ListAccounts" - -// ListAccountsRequest generates a "aws/request.Request" representing the -// client's request for the ListAccounts operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListAccounts for more information on using the ListAccounts -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListAccountsRequest method. -// req, resp := client.ListAccountsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts -func (c *SSO) ListAccountsRequest(input *ListAccountsInput) (req *request.Request, output *ListAccountsOutput) { - op := &request.Operation{ - Name: opListAccounts, - HTTPMethod: "GET", - HTTPPath: "/assignment/accounts", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextToken"}, - LimitToken: "maxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListAccountsInput{} - } - - output = &ListAccountsOutput{} - req = c.newRequest(op, input, output) - req.Config.Credentials = credentials.AnonymousCredentials - return -} - -// ListAccounts API operation for AWS Single Sign-On. -// -// Lists all AWS accounts assigned to the user. These AWS accounts are assigned -// by the administrator of the account. For more information, see Assign User -// Access (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers) -// in the AWS SSO User Guide. This operation returns a paginated response. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Single Sign-On's -// API operation ListAccounts for usage and error information. -// -// Returned Error Types: -// * InvalidRequestException -// Indicates that a problem occurred with the input to the request. For example, -// a required parameter might be missing or out of range. -// -// * UnauthorizedException -// Indicates that the request is not authorized. This can happen due to an invalid -// access token in the request. -// -// * TooManyRequestsException -// Indicates that the request is being made too frequently and is more than -// what the server can handle. -// -// * ResourceNotFoundException -// The specified resource doesn't exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts -func (c *SSO) ListAccounts(input *ListAccountsInput) (*ListAccountsOutput, error) { - req, out := c.ListAccountsRequest(input) - return out, req.Send() -} - -// ListAccountsWithContext is the same as ListAccounts with the addition of -// the ability to pass a context and additional request options. -// -// See ListAccounts for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SSO) ListAccountsWithContext(ctx aws.Context, input *ListAccountsInput, opts ...request.Option) (*ListAccountsOutput, error) { - req, out := c.ListAccountsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListAccountsPages iterates over the pages of a ListAccounts operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListAccounts method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListAccounts operation. -// pageNum := 0 -// err := client.ListAccountsPages(params, -// func(page *sso.ListAccountsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *SSO) ListAccountsPages(input *ListAccountsInput, fn func(*ListAccountsOutput, bool) bool) error { - return c.ListAccountsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListAccountsPagesWithContext same as ListAccountsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SSO) ListAccountsPagesWithContext(ctx aws.Context, input *ListAccountsInput, fn func(*ListAccountsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListAccountsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListAccountsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListAccountsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opLogout = "Logout" - -// LogoutRequest generates a "aws/request.Request" representing the -// client's request for the Logout operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See Logout for more information on using the Logout -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the LogoutRequest method. -// req, resp := client.LogoutRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout -func (c *SSO) LogoutRequest(input *LogoutInput) (req *request.Request, output *LogoutOutput) { - op := &request.Operation{ - Name: opLogout, - HTTPMethod: "POST", - HTTPPath: "/logout", - } - - if input == nil { - input = &LogoutInput{} - } - - output = &LogoutOutput{} - req = c.newRequest(op, input, output) - req.Config.Credentials = credentials.AnonymousCredentials - req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// Logout API operation for AWS Single Sign-On. -// -// Removes the client- and server-side session that is associated with the user. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Single Sign-On's -// API operation Logout for usage and error information. -// -// Returned Error Types: -// * InvalidRequestException -// Indicates that a problem occurred with the input to the request. For example, -// a required parameter might be missing or out of range. -// -// * UnauthorizedException -// Indicates that the request is not authorized. This can happen due to an invalid -// access token in the request. -// -// * TooManyRequestsException -// Indicates that the request is being made too frequently and is more than -// what the server can handle. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout -func (c *SSO) Logout(input *LogoutInput) (*LogoutOutput, error) { - req, out := c.LogoutRequest(input) - return out, req.Send() -} - -// LogoutWithContext is the same as Logout with the addition of -// the ability to pass a context and additional request options. -// -// See Logout for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *SSO) LogoutWithContext(ctx aws.Context, input *LogoutInput, opts ...request.Option) (*LogoutOutput, error) { - req, out := c.LogoutRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// Provides information about your AWS account. -type AccountInfo struct { - _ struct{} `type:"structure"` - - // The identifier of the AWS account that is assigned to the user. - AccountId *string `locationName:"accountId" type:"string"` - - // The display name of the AWS account that is assigned to the user. - AccountName *string `locationName:"accountName" type:"string"` - - // The email address of the AWS account that is assigned to the user. - EmailAddress *string `locationName:"emailAddress" min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AccountInfo) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AccountInfo) GoString() string { - return s.String() -} - -// SetAccountId sets the AccountId field's value. -func (s *AccountInfo) SetAccountId(v string) *AccountInfo { - s.AccountId = &v - return s -} - -// SetAccountName sets the AccountName field's value. -func (s *AccountInfo) SetAccountName(v string) *AccountInfo { - s.AccountName = &v - return s -} - -// SetEmailAddress sets the EmailAddress field's value. -func (s *AccountInfo) SetEmailAddress(v string) *AccountInfo { - s.EmailAddress = &v - return s -} - -type GetRoleCredentialsInput struct { - _ struct{} `type:"structure" nopayload:"true"` - - // The token issued by the CreateToken API call. For more information, see CreateToken - // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the AWS SSO OIDC API Reference Guide. - // - // AccessToken is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by GetRoleCredentialsInput's - // String and GoString methods. - // - // AccessToken is a required field - AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` - - // The identifier for the AWS account that is assigned to the user. - // - // AccountId is a required field - AccountId *string `location:"querystring" locationName:"account_id" type:"string" required:"true"` - - // The friendly name of the role that is assigned to the user. - // - // RoleName is a required field - RoleName *string `location:"querystring" locationName:"role_name" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetRoleCredentialsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetRoleCredentialsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetRoleCredentialsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetRoleCredentialsInput"} - if s.AccessToken == nil { - invalidParams.Add(request.NewErrParamRequired("AccessToken")) - } - if s.AccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AccountId")) - } - if s.RoleName == nil { - invalidParams.Add(request.NewErrParamRequired("RoleName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccessToken sets the AccessToken field's value. -func (s *GetRoleCredentialsInput) SetAccessToken(v string) *GetRoleCredentialsInput { - s.AccessToken = &v - return s -} - -// SetAccountId sets the AccountId field's value. -func (s *GetRoleCredentialsInput) SetAccountId(v string) *GetRoleCredentialsInput { - s.AccountId = &v - return s -} - -// SetRoleName sets the RoleName field's value. -func (s *GetRoleCredentialsInput) SetRoleName(v string) *GetRoleCredentialsInput { - s.RoleName = &v - return s -} - -type GetRoleCredentialsOutput struct { - _ struct{} `type:"structure"` - - // The credentials for the role that is assigned to the user. - RoleCredentials *RoleCredentials `locationName:"roleCredentials" type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetRoleCredentialsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetRoleCredentialsOutput) GoString() string { - return s.String() -} - -// SetRoleCredentials sets the RoleCredentials field's value. -func (s *GetRoleCredentialsOutput) SetRoleCredentials(v *RoleCredentials) *GetRoleCredentialsOutput { - s.RoleCredentials = v - return s -} - -// Indicates that a problem occurred with the input to the request. For example, -// a required parameter might be missing or out of range. -type InvalidRequestException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidRequestException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidRequestException) GoString() string { - return s.String() -} - -func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { - return &InvalidRequestException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *InvalidRequestException) Code() string { - return "InvalidRequestException" -} - -// Message returns the exception's message. -func (s *InvalidRequestException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InvalidRequestException) OrigErr() error { - return nil -} - -func (s *InvalidRequestException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *InvalidRequestException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *InvalidRequestException) RequestID() string { - return s.RespMetadata.RequestID -} - -type ListAccountRolesInput struct { - _ struct{} `type:"structure" nopayload:"true"` - - // The token issued by the CreateToken API call. For more information, see CreateToken - // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the AWS SSO OIDC API Reference Guide. - // - // AccessToken is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by ListAccountRolesInput's - // String and GoString methods. - // - // AccessToken is a required field - AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` - - // The identifier for the AWS account that is assigned to the user. - // - // AccountId is a required field - AccountId *string `location:"querystring" locationName:"account_id" type:"string" required:"true"` - - // The number of items that clients can request per page. - MaxResults *int64 `location:"querystring" locationName:"max_result" min:"1" type:"integer"` - - // The page token from the previous response output when you request subsequent - // pages. - NextToken *string `location:"querystring" locationName:"next_token" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListAccountRolesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListAccountRolesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListAccountRolesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListAccountRolesInput"} - if s.AccessToken == nil { - invalidParams.Add(request.NewErrParamRequired("AccessToken")) - } - if s.AccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AccountId")) - } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccessToken sets the AccessToken field's value. -func (s *ListAccountRolesInput) SetAccessToken(v string) *ListAccountRolesInput { - s.AccessToken = &v - return s -} - -// SetAccountId sets the AccountId field's value. -func (s *ListAccountRolesInput) SetAccountId(v string) *ListAccountRolesInput { - s.AccountId = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListAccountRolesInput) SetMaxResults(v int64) *ListAccountRolesInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListAccountRolesInput) SetNextToken(v string) *ListAccountRolesInput { - s.NextToken = &v - return s -} - -type ListAccountRolesOutput struct { - _ struct{} `type:"structure"` - - // The page token client that is used to retrieve the list of accounts. - NextToken *string `locationName:"nextToken" type:"string"` - - // A paginated response with the list of roles and the next token if more results - // are available. - RoleList []*RoleInfo `locationName:"roleList" type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListAccountRolesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListAccountRolesOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListAccountRolesOutput) SetNextToken(v string) *ListAccountRolesOutput { - s.NextToken = &v - return s -} - -// SetRoleList sets the RoleList field's value. -func (s *ListAccountRolesOutput) SetRoleList(v []*RoleInfo) *ListAccountRolesOutput { - s.RoleList = v - return s -} - -type ListAccountsInput struct { - _ struct{} `type:"structure" nopayload:"true"` - - // The token issued by the CreateToken API call. For more information, see CreateToken - // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the AWS SSO OIDC API Reference Guide. - // - // AccessToken is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by ListAccountsInput's - // String and GoString methods. - // - // AccessToken is a required field - AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` - - // This is the number of items clients can request per page. - MaxResults *int64 `location:"querystring" locationName:"max_result" min:"1" type:"integer"` - - // (Optional) When requesting subsequent pages, this is the page token from - // the previous response output. - NextToken *string `location:"querystring" locationName:"next_token" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListAccountsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListAccountsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListAccountsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListAccountsInput"} - if s.AccessToken == nil { - invalidParams.Add(request.NewErrParamRequired("AccessToken")) - } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccessToken sets the AccessToken field's value. -func (s *ListAccountsInput) SetAccessToken(v string) *ListAccountsInput { - s.AccessToken = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListAccountsInput) SetMaxResults(v int64) *ListAccountsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListAccountsInput) SetNextToken(v string) *ListAccountsInput { - s.NextToken = &v - return s -} - -type ListAccountsOutput struct { - _ struct{} `type:"structure"` - - // A paginated response with the list of account information and the next token - // if more results are available. - AccountList []*AccountInfo `locationName:"accountList" type:"list"` - - // The page token client that is used to retrieve the list of accounts. - NextToken *string `locationName:"nextToken" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListAccountsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListAccountsOutput) GoString() string { - return s.String() -} - -// SetAccountList sets the AccountList field's value. -func (s *ListAccountsOutput) SetAccountList(v []*AccountInfo) *ListAccountsOutput { - s.AccountList = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListAccountsOutput) SetNextToken(v string) *ListAccountsOutput { - s.NextToken = &v - return s -} - -type LogoutInput struct { - _ struct{} `type:"structure" nopayload:"true"` - - // The token issued by the CreateToken API call. For more information, see CreateToken - // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the AWS SSO OIDC API Reference Guide. - // - // AccessToken is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by LogoutInput's - // String and GoString methods. - // - // AccessToken is a required field - AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LogoutInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LogoutInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *LogoutInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LogoutInput"} - if s.AccessToken == nil { - invalidParams.Add(request.NewErrParamRequired("AccessToken")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccessToken sets the AccessToken field's value. -func (s *LogoutInput) SetAccessToken(v string) *LogoutInput { - s.AccessToken = &v - return s -} - -type LogoutOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LogoutOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LogoutOutput) GoString() string { - return s.String() -} - -// The specified resource doesn't exist. -type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ResourceNotFoundException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ResourceNotFoundException) GoString() string { - return s.String() -} - -func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { - return &ResourceNotFoundException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *ResourceNotFoundException) Code() string { - return "ResourceNotFoundException" -} - -// Message returns the exception's message. -func (s *ResourceNotFoundException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *ResourceNotFoundException) OrigErr() error { - return nil -} - -func (s *ResourceNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *ResourceNotFoundException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *ResourceNotFoundException) RequestID() string { - return s.RespMetadata.RequestID -} - -// Provides information about the role credentials that are assigned to the -// user. -type RoleCredentials struct { - _ struct{} `type:"structure"` - - // The identifier used for the temporary security credentials. For more information, - // see Using Temporary Security Credentials to Request Access to AWS Resources - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) - // in the AWS IAM User Guide. - AccessKeyId *string `locationName:"accessKeyId" type:"string"` - - // The date on which temporary security credentials expire. - Expiration *int64 `locationName:"expiration" type:"long"` - - // The key that is used to sign the request. For more information, see Using - // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) - // in the AWS IAM User Guide. - // - // SecretAccessKey is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by RoleCredentials's - // String and GoString methods. - SecretAccessKey *string `locationName:"secretAccessKey" type:"string" sensitive:"true"` - - // The token used for temporary credentials. For more information, see Using - // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) - // in the AWS IAM User Guide. - // - // SessionToken is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by RoleCredentials's - // String and GoString methods. - SessionToken *string `locationName:"sessionToken" type:"string" sensitive:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RoleCredentials) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RoleCredentials) GoString() string { - return s.String() -} - -// SetAccessKeyId sets the AccessKeyId field's value. -func (s *RoleCredentials) SetAccessKeyId(v string) *RoleCredentials { - s.AccessKeyId = &v - return s -} - -// SetExpiration sets the Expiration field's value. -func (s *RoleCredentials) SetExpiration(v int64) *RoleCredentials { - s.Expiration = &v - return s -} - -// SetSecretAccessKey sets the SecretAccessKey field's value. -func (s *RoleCredentials) SetSecretAccessKey(v string) *RoleCredentials { - s.SecretAccessKey = &v - return s -} - -// SetSessionToken sets the SessionToken field's value. -func (s *RoleCredentials) SetSessionToken(v string) *RoleCredentials { - s.SessionToken = &v - return s -} - -// Provides information about the role that is assigned to the user. -type RoleInfo struct { - _ struct{} `type:"structure"` - - // The identifier of the AWS account assigned to the user. - AccountId *string `locationName:"accountId" type:"string"` - - // The friendly name of the role that is assigned to the user. - RoleName *string `locationName:"roleName" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RoleInfo) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RoleInfo) GoString() string { - return s.String() -} - -// SetAccountId sets the AccountId field's value. -func (s *RoleInfo) SetAccountId(v string) *RoleInfo { - s.AccountId = &v - return s -} - -// SetRoleName sets the RoleName field's value. -func (s *RoleInfo) SetRoleName(v string) *RoleInfo { - s.RoleName = &v - return s -} - -// Indicates that the request is being made too frequently and is more than -// what the server can handle. -type TooManyRequestsException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TooManyRequestsException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TooManyRequestsException) GoString() string { - return s.String() -} - -func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { - return &TooManyRequestsException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *TooManyRequestsException) Code() string { - return "TooManyRequestsException" -} - -// Message returns the exception's message. -func (s *TooManyRequestsException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *TooManyRequestsException) OrigErr() error { - return nil -} - -func (s *TooManyRequestsException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *TooManyRequestsException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *TooManyRequestsException) RequestID() string { - return s.RespMetadata.RequestID -} - -// Indicates that the request is not authorized. This can happen due to an invalid -// access token in the request. -type UnauthorizedException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UnauthorizedException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UnauthorizedException) GoString() string { - return s.String() -} - -func newErrorUnauthorizedException(v protocol.ResponseMetadata) error { - return &UnauthorizedException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *UnauthorizedException) Code() string { - return "UnauthorizedException" -} - -// Message returns the exception's message. -func (s *UnauthorizedException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *UnauthorizedException) OrigErr() error { - return nil -} - -func (s *UnauthorizedException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *UnauthorizedException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *UnauthorizedException) RequestID() string { - return s.RespMetadata.RequestID -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go deleted file mode 100644 index 92d82b2af..000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go +++ /dev/null @@ -1,44 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package sso provides the client and types for making API -// requests to AWS Single Sign-On. -// -// AWS Single Sign-On Portal is a web service that makes it easy for you to -// assign user access to AWS SSO resources such as the user portal. Users can -// get AWS account applications and roles assigned to them and get federated -// into the application. -// -// For general information about AWS SSO, see What is AWS Single Sign-On? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) -// in the AWS SSO User Guide. -// -// This API reference guide describes the AWS SSO Portal operations that you -// can call programatically and includes detailed information on data types -// and errors. -// -// AWS provides SDKs that consist of libraries and sample code for various programming -// languages and platforms, such as Java, Ruby, .Net, iOS, or Android. The SDKs -// provide a convenient way to create programmatic access to AWS SSO and other -// AWS services. For more information about the AWS SDKs, including how to download -// and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/). -// -// See https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10 for more information on this service. -// -// See sso package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/ -// -// Using the Client -// -// To contact AWS Single Sign-On with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the AWS Single Sign-On client SSO for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/#New -package sso diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go deleted file mode 100644 index 77a6792e3..000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go +++ /dev/null @@ -1,44 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sso - -import ( - "github.com/aws/aws-sdk-go/private/protocol" -) - -const ( - - // ErrCodeInvalidRequestException for service response error code - // "InvalidRequestException". - // - // Indicates that a problem occurred with the input to the request. For example, - // a required parameter might be missing or out of range. - ErrCodeInvalidRequestException = "InvalidRequestException" - - // ErrCodeResourceNotFoundException for service response error code - // "ResourceNotFoundException". - // - // The specified resource doesn't exist. - ErrCodeResourceNotFoundException = "ResourceNotFoundException" - - // ErrCodeTooManyRequestsException for service response error code - // "TooManyRequestsException". - // - // Indicates that the request is being made too frequently and is more than - // what the server can handle. - ErrCodeTooManyRequestsException = "TooManyRequestsException" - - // ErrCodeUnauthorizedException for service response error code - // "UnauthorizedException". - // - // Indicates that the request is not authorized. This can happen due to an invalid - // access token in the request. - ErrCodeUnauthorizedException = "UnauthorizedException" -) - -var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ - "InvalidRequestException": newErrorInvalidRequestException, - "ResourceNotFoundException": newErrorResourceNotFoundException, - "TooManyRequestsException": newErrorTooManyRequestsException, - "UnauthorizedException": newErrorUnauthorizedException, -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/service.go b/vendor/github.com/aws/aws-sdk-go/service/sso/service.go deleted file mode 100644 index 7a28dc797..000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sso/service.go +++ /dev/null @@ -1,105 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sso - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/restjson" -) - -// SSO provides the API operation methods for making requests to -// AWS Single Sign-On. See this package's package overview docs -// for details on the service. -// -// SSO methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type SSO struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "SSO" // Name of service. - EndpointsID = "portal.sso" // ID to lookup a service endpoint with. - ServiceID = "SSO" // ServiceID is a unique identifier of a specific service. -) - -// New creates a new instance of the SSO client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// mySession := session.Must(session.NewSession()) -// -// // Create a SSO client from just a session. -// svc := sso.New(mySession) -// -// // Create a SSO client with additional configuration -// svc := sso.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSO { - c := p.ClientConfig(EndpointsID, cfgs...) - if c.SigningNameDerived || len(c.SigningName) == 0 { - c.SigningName = "awsssoportal" - } - return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *SSO { - svc := &SSO{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - ServiceID: ServiceID, - SigningName: signingName, - SigningRegion: signingRegion, - PartitionID: partitionID, - Endpoint: endpoint, - APIVersion: "2019-06-10", - ResolvedRegion: resolvedRegion, - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed( - protocol.NewUnmarshalErrorHandler(restjson.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), - ) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a SSO operation and runs any -// custom request initialization. -func (c *SSO) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go deleted file mode 100644 index 4cac247c1..000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go +++ /dev/null @@ -1,86 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package ssoiface provides an interface to enable mocking the AWS Single Sign-On service client -// for testing your code. -// -// It is important to note that this interface will have breaking changes -// when the service model is updated and adds new API operations, paginators, -// and waiters. -package ssoiface - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/sso" -) - -// SSOAPI provides an interface to enable mocking the -// sso.SSO service client's API operation, -// paginators, and waiters. This make unit testing your code that calls out -// to the SDK's service client's calls easier. -// -// The best way to use this interface is so the SDK's service client's calls -// can be stubbed out for unit testing your code with the SDK without needing -// to inject custom request handlers into the SDK's request pipeline. -// -// // myFunc uses an SDK service client to make a request to -// // AWS Single Sign-On. -// func myFunc(svc ssoiface.SSOAPI) bool { -// // Make svc.GetRoleCredentials request -// } -// -// func main() { -// sess := session.New() -// svc := sso.New(sess) -// -// myFunc(svc) -// } -// -// In your _test.go file: -// -// // Define a mock struct to be used in your unit tests of myFunc. -// type mockSSOClient struct { -// ssoiface.SSOAPI -// } -// func (m *mockSSOClient) GetRoleCredentials(input *sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) { -// // mock response/functionality -// } -// -// func TestMyFunc(t *testing.T) { -// // Setup Test -// mockSvc := &mockSSOClient{} -// -// myfunc(mockSvc) -// -// // Verify myFunc's functionality -// } -// -// It is important to note that this interface will have breaking changes -// when the service model is updated and adds new API operations, paginators, -// and waiters. Its suggested to use the pattern above for testing, or using -// tooling to generate mocks to satisfy the interfaces. -type SSOAPI interface { - GetRoleCredentials(*sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) - GetRoleCredentialsWithContext(aws.Context, *sso.GetRoleCredentialsInput, ...request.Option) (*sso.GetRoleCredentialsOutput, error) - GetRoleCredentialsRequest(*sso.GetRoleCredentialsInput) (*request.Request, *sso.GetRoleCredentialsOutput) - - ListAccountRoles(*sso.ListAccountRolesInput) (*sso.ListAccountRolesOutput, error) - ListAccountRolesWithContext(aws.Context, *sso.ListAccountRolesInput, ...request.Option) (*sso.ListAccountRolesOutput, error) - ListAccountRolesRequest(*sso.ListAccountRolesInput) (*request.Request, *sso.ListAccountRolesOutput) - - ListAccountRolesPages(*sso.ListAccountRolesInput, func(*sso.ListAccountRolesOutput, bool) bool) error - ListAccountRolesPagesWithContext(aws.Context, *sso.ListAccountRolesInput, func(*sso.ListAccountRolesOutput, bool) bool, ...request.Option) error - - ListAccounts(*sso.ListAccountsInput) (*sso.ListAccountsOutput, error) - ListAccountsWithContext(aws.Context, *sso.ListAccountsInput, ...request.Option) (*sso.ListAccountsOutput, error) - ListAccountsRequest(*sso.ListAccountsInput) (*request.Request, *sso.ListAccountsOutput) - - ListAccountsPages(*sso.ListAccountsInput, func(*sso.ListAccountsOutput, bool) bool) error - ListAccountsPagesWithContext(aws.Context, *sso.ListAccountsInput, func(*sso.ListAccountsOutput, bool) bool, ...request.Option) error - - Logout(*sso.LogoutInput) (*sso.LogoutOutput, error) - LogoutWithContext(aws.Context, *sso.LogoutInput, ...request.Option) (*sso.LogoutOutput, error) - LogoutRequest(*sso.LogoutInput) (*request.Request, *sso.LogoutOutput) -} - -var _ SSOAPI = (*sso.SSO)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go deleted file mode 100644 index 1e7fa6557..000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ /dev/null @@ -1,3437 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sts - -import ( - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" -) - -const opAssumeRole = "AssumeRole" - -// AssumeRoleRequest generates a "aws/request.Request" representing the -// client's request for the AssumeRole operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See AssumeRole for more information on using the AssumeRole -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the AssumeRoleRequest method. -// req, resp := client.AssumeRoleRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole -func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { - op := &request.Operation{ - Name: opAssumeRole, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &AssumeRoleInput{} - } - - output = &AssumeRoleOutput{} - req = c.newRequest(op, input, output) - return -} - -// AssumeRole API operation for AWS Security Token Service. -// -// Returns a set of temporary security credentials that you can use to access -// Amazon Web Services resources that you might not normally have access to. -// These temporary credentials consist of an access key ID, a secret access -// key, and a security token. Typically, you use AssumeRole within your account -// or for cross-account access. For a comparison of AssumeRole with other API -// operations that produce temporary credentials, see Requesting Temporary Security -// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// Permissions -// -// The temporary security credentials created by AssumeRole can be used to make -// API calls to any Amazon Web Services service with the following exception: -// You cannot call the Amazon Web Services STS GetFederationToken or GetSessionToken -// API operations. -// -// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plaintext that you use for both inline -// and managed session policies can't exceed 2,048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// Amazon Web Services API calls to access resources in the account that owns -// the role. You cannot use session policies to grant more permissions than -// those allowed by the identity-based policy of the role that is being assumed. -// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. -// -// When you create a role, you create two policies: A role trust policy that -// specifies who can assume the role and a permissions policy that specifies -// what can be done with the role. You specify the trusted principal who is -// allowed to assume the role in the role trust policy. -// -// To assume a role from a different account, your Amazon Web Services account -// must be trusted by the role. The trust relationship is defined in the role's -// trust policy when the role is created. That trust policy states which accounts -// are allowed to delegate that access to users in the account. -// -// A user who wants to access a role in a different account must also have permissions -// that are delegated from the user account administrator. The administrator -// must attach a policy that allows the user to call AssumeRole for the ARN -// of the role in the other account. -// -// To allow a user to assume a role in the same account, you can do either of -// the following: -// -// * Attach a policy to the user that allows the user to call AssumeRole -// (as long as the role's trust policy trusts the account). -// -// * Add the user as a principal directly in the role's trust policy. -// -// You can do either because the role’s trust policy acts as an IAM resource-based -// policy. When a resource-based policy grants access to a principal in the -// same account, no additional identity-based policy is required. For more information -// about trust policies and resource-based policies, see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) -// in the IAM User Guide. -// -// Tags -// -// (Optional) You can pass tag key-value pairs to your session. These tags are -// called session tags. For more information about session tags, see Passing -// Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// An administrator must grant you the permissions necessary to pass session -// tags. The administrator can also create granular permissions to allow you -// to pass only specific session tags. For more information, see Tutorial: Using -// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. -// -// You can set the session tags as transitive. Transitive tags persist during -// role chaining. For more information, see Chaining Roles with Session Tags -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) -// in the IAM User Guide. -// -// Using MFA with AssumeRole -// -// (Optional) You can include multi-factor authentication (MFA) information -// when you call AssumeRole. This is useful for cross-account scenarios to ensure -// that the user that assumes the role has been authenticated with an Amazon -// Web Services MFA device. In that scenario, the trust policy of the role being -// assumed includes a condition that tests for MFA authentication. If the caller -// does not include valid MFA information, the request to assume the role is -// denied. The condition in a trust policy that tests for MFA authentication -// might look like the following example. -// -// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} -// -// For more information, see Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) -// in the IAM User Guide guide. -// -// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode -// parameters. The SerialNumber value identifies the user's hardware or virtual -// MFA device. The TokenCode is the time-based one-time password (TOTP) that -// the MFA device produces. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation AssumeRole for usage and error information. -// -// Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An Amazon Web Services conversion -// compresses the session policy document, session policy ARNs, and session -// tags into a packed binary format that has a separate limit. The error message -// indicates by percentage how close the policies and tags are to the upper -// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) -// in the IAM User Guide. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating Amazon Web Services STS in an Amazon Web Services Region -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// * ErrCodeExpiredTokenException "ExpiredTokenException" -// The web identity token that was passed is expired or is not valid. Get a -// new identity token from the identity provider and then retry the request. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole -func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { - req, out := c.AssumeRoleRequest(input) - return out, req.Send() -} - -// AssumeRoleWithContext is the same as AssumeRole with the addition of -// the ability to pass a context and additional request options. -// -// See AssumeRole for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) { - req, out := c.AssumeRoleRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opAssumeRoleWithSAML = "AssumeRoleWithSAML" - -// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the -// client's request for the AssumeRoleWithSAML operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See AssumeRoleWithSAML for more information on using the AssumeRoleWithSAML -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the AssumeRoleWithSAMLRequest method. -// req, resp := client.AssumeRoleWithSAMLRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML -func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { - op := &request.Operation{ - Name: opAssumeRoleWithSAML, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &AssumeRoleWithSAMLInput{} - } - - output = &AssumeRoleWithSAMLOutput{} - req = c.newRequest(op, input, output) - req.Config.Credentials = credentials.AnonymousCredentials - return -} - -// AssumeRoleWithSAML API operation for AWS Security Token Service. -// -// Returns a set of temporary security credentials for users who have been authenticated -// via a SAML authentication response. This operation provides a mechanism for -// tying an enterprise identity store or directory to role-based Amazon Web -// Services access without user-specific credentials or configuration. For a -// comparison of AssumeRoleWithSAML with the other API operations that produce -// temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// The temporary security credentials returned by this operation consist of -// an access key ID, a secret access key, and a security token. Applications -// can use these temporary security credentials to sign calls to Amazon Web -// Services services. -// -// Session Duration -// -// By default, the temporary security credentials created by AssumeRoleWithSAML -// last for one hour. However, you can use the optional DurationSeconds parameter -// to specify the duration of your session. Your role session lasts for the -// duration that you specify, or until the time specified in the SAML authentication -// response's SessionNotOnOrAfter value, whichever is shorter. You can provide -// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session -// duration setting for the role. This setting can have a value from 1 hour -// to 12 hours. To learn how to view the maximum value for your role, see View -// the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) -// in the IAM User Guide. The maximum session duration limit applies when you -// use the AssumeRole* API operations or the assume-role* CLI commands. However -// the limit does not apply when you use those operations to create a console -// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) -// in the IAM User Guide. -// -// Role chaining (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining) -// limits your CLI or Amazon Web Services API role session to a maximum of one -// hour. When you use the AssumeRole API operation to assume a role, you can -// specify the duration of your role session with the DurationSeconds parameter. -// You can specify a parameter value of up to 43200 seconds (12 hours), depending -// on the maximum session duration setting for your role. However, if you assume -// a role using role chaining and provide a DurationSeconds parameter value -// greater than one hour, the operation fails. -// -// Permissions -// -// The temporary security credentials created by AssumeRoleWithSAML can be used -// to make API calls to any Amazon Web Services service with the following exception: -// you cannot call the STS GetFederationToken or GetSessionToken API operations. -// -// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plaintext that you use for both inline -// and managed session policies can't exceed 2,048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// Amazon Web Services API calls to access resources in the account that owns -// the role. You cannot use session policies to grant more permissions than -// those allowed by the identity-based policy of the role that is being assumed. -// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. -// -// Calling AssumeRoleWithSAML does not require the use of Amazon Web Services -// security credentials. The identity of the caller is validated by using keys -// in the metadata document that is uploaded for the SAML provider entity for -// your identity provider. -// -// Calling AssumeRoleWithSAML can result in an entry in your CloudTrail logs. -// The entry includes the value in the NameID element of the SAML assertion. -// We recommend that you use a NameIDType that is not associated with any personally -// identifiable information (PII). For example, you could instead use the persistent -// identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). -// -// Tags -// -// (Optional) You can configure your IdP to pass attributes into your SAML assertion -// as session tags. Each session tag consists of a key name and an associated -// value. For more information about session tags, see Passing Session Tags -// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You can pass up to 50 session tags. The plaintext session tag keys can’t -// exceed 128 characters and the values can’t exceed 256 characters. For these -// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) -// in the IAM User Guide. -// -// An Amazon Web Services conversion compresses the passed session policies -// and session tags into a packed binary format that has a separate limit. Your -// request can fail for this limit even if your plaintext meets the other requirements. -// The PackedPolicySize response element indicates by percentage how close the -// policies and tags for your request are to the upper size limit. -// -// You can pass a session tag with the same key as a tag that is attached to -// the role. When you do, session tags override the role's tags with the same -// key. -// -// An administrator must grant you the permissions necessary to pass session -// tags. The administrator can also create granular permissions to allow you -// to pass only specific session tags. For more information, see Tutorial: Using -// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. -// -// You can set the session tags as transitive. Transitive tags persist during -// role chaining. For more information, see Chaining Roles with Session Tags -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) -// in the IAM User Guide. -// -// SAML Configuration -// -// Before your application can call AssumeRoleWithSAML, you must configure your -// SAML identity provider (IdP) to issue the claims required by Amazon Web Services. -// Additionally, you must use Identity and Access Management (IAM) to create -// a SAML provider entity in your Amazon Web Services account that represents -// your identity provider. You must also create an IAM role that specifies this -// SAML provider in its trust policy. -// -// For more information, see the following resources: -// -// * About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) -// in the IAM User Guide. -// -// * Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) -// in the IAM User Guide. -// -// * Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) -// in the IAM User Guide. -// -// * Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) -// in the IAM User Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation AssumeRoleWithSAML for usage and error information. -// -// Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An Amazon Web Services conversion -// compresses the session policy document, session policy ARNs, and session -// tags into a packed binary format that has a separate limit. The error message -// indicates by percentage how close the policies and tags are to the upper -// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) -// in the IAM User Guide. -// -// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" -// The identity provider (IdP) reported that authentication failed. This might -// be because the claim is invalid. -// -// If this error is returned for the AssumeRoleWithWebIdentity operation, it -// can also mean that the claim has expired or has been explicitly revoked. -// -// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" -// The web identity token that was passed could not be validated by Amazon Web -// Services. Get a new identity token from the identity provider and then retry -// the request. -// -// * ErrCodeExpiredTokenException "ExpiredTokenException" -// The web identity token that was passed is expired or is not valid. Get a -// new identity token from the identity provider and then retry the request. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating Amazon Web Services STS in an Amazon Web Services Region -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML -func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { - req, out := c.AssumeRoleWithSAMLRequest(input) - return out, req.Send() -} - -// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of -// the ability to pass a context and additional request options. -// -// See AssumeRoleWithSAML for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) { - req, out := c.AssumeRoleWithSAMLRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" - -// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the -// client's request for the AssumeRoleWithWebIdentity operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See AssumeRoleWithWebIdentity for more information on using the AssumeRoleWithWebIdentity -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. -// req, resp := client.AssumeRoleWithWebIdentityRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity -func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { - op := &request.Operation{ - Name: opAssumeRoleWithWebIdentity, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &AssumeRoleWithWebIdentityInput{} - } - - output = &AssumeRoleWithWebIdentityOutput{} - req = c.newRequest(op, input, output) - req.Config.Credentials = credentials.AnonymousCredentials - return -} - -// AssumeRoleWithWebIdentity API operation for AWS Security Token Service. -// -// Returns a set of temporary security credentials for users who have been authenticated -// in a mobile or web application with a web identity provider. Example providers -// include Amazon Cognito, Login with Amazon, Facebook, Google, or any OpenID -// Connect-compatible identity provider. -// -// For mobile applications, we recommend that you use Amazon Cognito. You can -// use Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide -// (http://aws.amazon.com/sdkforios/) and the Amazon Web Services SDK for Android -// Developer Guide (http://aws.amazon.com/sdkforandroid/) to uniquely identify -// a user. You can also supply the user with a consistent identity throughout -// the lifetime of an application. -// -// To learn more about Amazon Cognito, see Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) -// in Amazon Web Services SDK for Android Developer Guide and Amazon Cognito -// Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) -// in the Amazon Web Services SDK for iOS Developer Guide. -// -// Calling AssumeRoleWithWebIdentity does not require the use of Amazon Web -// Services security credentials. Therefore, you can distribute an application -// (for example, on mobile devices) that requests temporary security credentials -// without including long-term Amazon Web Services credentials in the application. -// You also don't need to deploy server-based proxy services that use long-term -// Amazon Web Services credentials. Instead, the identity of the caller is validated -// by using a token from the web identity provider. For a comparison of AssumeRoleWithWebIdentity -// with the other API operations that produce temporary credentials, see Requesting -// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// The temporary security credentials returned by this API consist of an access -// key ID, a secret access key, and a security token. Applications can use these -// temporary security credentials to sign calls to Amazon Web Services service -// API operations. -// -// Session Duration -// -// By default, the temporary security credentials created by AssumeRoleWithWebIdentity -// last for one hour. However, you can use the optional DurationSeconds parameter -// to specify the duration of your session. You can provide a value from 900 -// seconds (15 minutes) up to the maximum session duration setting for the role. -// This setting can have a value from 1 hour to 12 hours. To learn how to view -// the maximum value for your role, see View the Maximum Session Duration Setting -// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) -// in the IAM User Guide. The maximum session duration limit applies when you -// use the AssumeRole* API operations or the assume-role* CLI commands. However -// the limit does not apply when you use those operations to create a console -// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) -// in the IAM User Guide. -// -// Permissions -// -// The temporary security credentials created by AssumeRoleWithWebIdentity can -// be used to make API calls to any Amazon Web Services service with the following -// exception: you cannot call the STS GetFederationToken or GetSessionToken -// API operations. -// -// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plaintext that you use for both inline -// and managed session policies can't exceed 2,048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// Amazon Web Services API calls to access resources in the account that owns -// the role. You cannot use session policies to grant more permissions than -// those allowed by the identity-based policy of the role that is being assumed. -// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. -// -// Tags -// -// (Optional) You can configure your IdP to pass attributes into your web identity -// token as session tags. Each session tag consists of a key name and an associated -// value. For more information about session tags, see Passing Session Tags -// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You can pass up to 50 session tags. The plaintext session tag keys can’t -// exceed 128 characters and the values can’t exceed 256 characters. For these -// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) -// in the IAM User Guide. -// -// An Amazon Web Services conversion compresses the passed session policies -// and session tags into a packed binary format that has a separate limit. Your -// request can fail for this limit even if your plaintext meets the other requirements. -// The PackedPolicySize response element indicates by percentage how close the -// policies and tags for your request are to the upper size limit. -// -// You can pass a session tag with the same key as a tag that is attached to -// the role. When you do, the session tag overrides the role tag with the same -// key. -// -// An administrator must grant you the permissions necessary to pass session -// tags. The administrator can also create granular permissions to allow you -// to pass only specific session tags. For more information, see Tutorial: Using -// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. -// -// You can set the session tags as transitive. Transitive tags persist during -// role chaining. For more information, see Chaining Roles with Session Tags -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) -// in the IAM User Guide. -// -// Identities -// -// Before your application can call AssumeRoleWithWebIdentity, you must have -// an identity token from a supported identity provider and create a role that -// the application can assume. The role that your application assumes must trust -// the identity provider that is associated with the identity token. In other -// words, the identity provider must be specified in the role's trust policy. -// -// Calling AssumeRoleWithWebIdentity can result in an entry in your CloudTrail -// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) -// of the provided web identity token. We recommend that you avoid using any -// personally identifiable information (PII) in this field. For example, you -// could instead use a GUID or a pairwise identifier, as suggested in the OIDC -// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). -// -// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity -// API, see the following resources: -// -// * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) -// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). -// -// * Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/). -// Walk through the process of authenticating through Login with Amazon, -// Facebook, or Google, getting temporary security credentials, and then -// using those credentials to make a request to Amazon Web Services. -// -// * Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) -// and Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). -// These toolkits contain sample apps that show how to invoke the identity -// providers. The toolkits then show how to use the information from these -// providers to get and use temporary security credentials. -// -// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). -// This article discusses web identity federation and shows an example of -// how to use web identity federation to get access to content in Amazon -// S3. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation AssumeRoleWithWebIdentity for usage and error information. -// -// Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An Amazon Web Services conversion -// compresses the session policy document, session policy ARNs, and session -// tags into a packed binary format that has a separate limit. The error message -// indicates by percentage how close the policies and tags are to the upper -// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) -// in the IAM User Guide. -// -// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" -// The identity provider (IdP) reported that authentication failed. This might -// be because the claim is invalid. -// -// If this error is returned for the AssumeRoleWithWebIdentity operation, it -// can also mean that the claim has expired or has been explicitly revoked. -// -// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError" -// The request could not be fulfilled because the identity provider (IDP) that -// was asked to verify the incoming identity token could not be reached. This -// is often a transient error caused by network conditions. Retry the request -// a limited number of times so that you don't exceed the request rate. If the -// error persists, the identity provider might be down or not responding. -// -// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" -// The web identity token that was passed could not be validated by Amazon Web -// Services. Get a new identity token from the identity provider and then retry -// the request. -// -// * ErrCodeExpiredTokenException "ExpiredTokenException" -// The web identity token that was passed is expired or is not valid. Get a -// new identity token from the identity provider and then retry the request. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating Amazon Web Services STS in an Amazon Web Services Region -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity -func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { - req, out := c.AssumeRoleWithWebIdentityRequest(input) - return out, req.Send() -} - -// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of -// the ability to pass a context and additional request options. -// -// See AssumeRoleWithWebIdentity for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) { - req, out := c.AssumeRoleWithWebIdentityRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" - -// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the -// client's request for the DecodeAuthorizationMessage operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DecodeAuthorizationMessage for more information on using the DecodeAuthorizationMessage -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DecodeAuthorizationMessageRequest method. -// req, resp := client.DecodeAuthorizationMessageRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage -func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { - op := &request.Operation{ - Name: opDecodeAuthorizationMessage, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DecodeAuthorizationMessageInput{} - } - - output = &DecodeAuthorizationMessageOutput{} - req = c.newRequest(op, input, output) - return -} - -// DecodeAuthorizationMessage API operation for AWS Security Token Service. -// -// Decodes additional information about the authorization status of a request -// from an encoded message returned in response to an Amazon Web Services request. -// -// For example, if a user is not authorized to perform an operation that he -// or she has requested, the request returns a Client.UnauthorizedOperation -// response (an HTTP 403 response). Some Amazon Web Services operations additionally -// return an encoded message that can provide details about this authorization -// failure. -// -// Only certain Amazon Web Services operations return an encoded authorization -// message. The documentation for an individual operation indicates whether -// that operation returns an encoded message in addition to returning an HTTP -// code. -// -// The message is encoded because the details of the authorization status can -// contain privileged information that the user who requested the operation -// should not see. To decode an authorization status message, a user must be -// granted permissions through an IAM policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) -// to request the DecodeAuthorizationMessage (sts:DecodeAuthorizationMessage) -// action. -// -// The decoded message includes the following type of information: -// -// * Whether the request was denied due to an explicit deny or due to the -// absence of an explicit allow. For more information, see Determining Whether -// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) -// in the IAM User Guide. -// -// * The principal who made the request. -// -// * The requested action. -// -// * The requested resource. -// -// * The values of condition keys in the context of the user's request. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation DecodeAuthorizationMessage for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" -// The error returned if the message passed to DecodeAuthorizationMessage was -// invalid. This can happen if the token contains invalid characters, such as -// linebreaks. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage -func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { - req, out := c.DecodeAuthorizationMessageRequest(input) - return out, req.Send() -} - -// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of -// the ability to pass a context and additional request options. -// -// See DecodeAuthorizationMessage for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) { - req, out := c.DecodeAuthorizationMessageRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetAccessKeyInfo = "GetAccessKeyInfo" - -// GetAccessKeyInfoRequest generates a "aws/request.Request" representing the -// client's request for the GetAccessKeyInfo operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetAccessKeyInfo for more information on using the GetAccessKeyInfo -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetAccessKeyInfoRequest method. -// req, resp := client.GetAccessKeyInfoRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo -func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *request.Request, output *GetAccessKeyInfoOutput) { - op := &request.Operation{ - Name: opGetAccessKeyInfo, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetAccessKeyInfoInput{} - } - - output = &GetAccessKeyInfoOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetAccessKeyInfo API operation for AWS Security Token Service. -// -// Returns the account identifier for the specified access key ID. -// -// Access keys consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE) -// and a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY). -// For more information about access keys, see Managing Access Keys for IAM -// Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) -// in the IAM User Guide. -// -// When you pass an access key ID to this operation, it returns the ID of the -// Amazon Web Services account to which the keys belong. Access key IDs beginning -// with AKIA are long-term credentials for an IAM user or the Amazon Web Services -// account root user. Access key IDs beginning with ASIA are temporary credentials -// that are created using STS operations. If the account in the response belongs -// to you, you can sign in as the root user and review your root user access -// keys. Then, you can pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) -// to learn which IAM user owns the keys. To learn who requested the temporary -// credentials for an ASIA access key, view the STS events in your CloudTrail -// logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) -// in the IAM User Guide. -// -// This operation does not indicate the state of the access key. The key might -// be active, inactive, or deleted. Active keys might not have permissions to -// perform an operation. Providing a deleted access key might return an error -// that the key doesn't exist. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation GetAccessKeyInfo for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo -func (c *STS) GetAccessKeyInfo(input *GetAccessKeyInfoInput) (*GetAccessKeyInfoOutput, error) { - req, out := c.GetAccessKeyInfoRequest(input) - return out, req.Send() -} - -// GetAccessKeyInfoWithContext is the same as GetAccessKeyInfo with the addition of -// the ability to pass a context and additional request options. -// -// See GetAccessKeyInfo for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) GetAccessKeyInfoWithContext(ctx aws.Context, input *GetAccessKeyInfoInput, opts ...request.Option) (*GetAccessKeyInfoOutput, error) { - req, out := c.GetAccessKeyInfoRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetCallerIdentity = "GetCallerIdentity" - -// GetCallerIdentityRequest generates a "aws/request.Request" representing the -// client's request for the GetCallerIdentity operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetCallerIdentity for more information on using the GetCallerIdentity -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetCallerIdentityRequest method. -// req, resp := client.GetCallerIdentityRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity -func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { - op := &request.Operation{ - Name: opGetCallerIdentity, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetCallerIdentityInput{} - } - - output = &GetCallerIdentityOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetCallerIdentity API operation for AWS Security Token Service. -// -// Returns details about the IAM user or role whose credentials are used to -// call the operation. -// -// No permissions are required to perform this operation. If an administrator -// adds a policy to your IAM user or role that explicitly denies access to the -// sts:GetCallerIdentity action, you can still perform this operation. Permissions -// are not required because the same information is returned when an IAM user -// or role is denied access. To view an example response, see I Am Not Authorized -// to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) -// in the IAM User Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation GetCallerIdentity for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity -func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { - req, out := c.GetCallerIdentityRequest(input) - return out, req.Send() -} - -// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of -// the ability to pass a context and additional request options. -// -// See GetCallerIdentity for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) { - req, out := c.GetCallerIdentityRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetFederationToken = "GetFederationToken" - -// GetFederationTokenRequest generates a "aws/request.Request" representing the -// client's request for the GetFederationToken operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetFederationToken for more information on using the GetFederationToken -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetFederationTokenRequest method. -// req, resp := client.GetFederationTokenRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken -func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { - op := &request.Operation{ - Name: opGetFederationToken, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetFederationTokenInput{} - } - - output = &GetFederationTokenOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetFederationToken API operation for AWS Security Token Service. -// -// Returns a set of temporary security credentials (consisting of an access -// key ID, a secret access key, and a security token) for a federated user. -// A typical use is in a proxy application that gets temporary security credentials -// on behalf of distributed applications inside a corporate network. You must -// call the GetFederationToken operation using the long-term security credentials -// of an IAM user. As a result, this call is appropriate in contexts where those -// credentials can be safely stored, usually in a server-based application. -// For a comparison of GetFederationToken with the other API operations that -// produce temporary credentials, see Requesting Temporary Security Credentials -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// You can create a mobile-based or browser-based app that can authenticate -// users using a web identity provider like Login with Amazon, Facebook, Google, -// or an OpenID Connect-compatible identity provider. In this case, we recommend -// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. -// For more information, see Federation Through a Web-based Identity Provider -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) -// in the IAM User Guide. -// -// You can also call GetFederationToken using the security credentials of an -// Amazon Web Services account root user, but we do not recommend it. Instead, -// we recommend that you create an IAM user for the purpose of the proxy application. -// Then attach a policy to the IAM user that limits federated users to only -// the actions and resources that they need to access. For more information, -// see IAM Best Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) -// in the IAM User Guide. -// -// Session duration -// -// The temporary credentials are valid for the specified duration, from 900 -// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default -// session duration is 43,200 seconds (12 hours). Temporary credentials obtained -// by using the Amazon Web Services account root user credentials have a maximum -// duration of 3,600 seconds (1 hour). -// -// Permissions -// -// You can use the temporary credentials created by GetFederationToken in any -// Amazon Web Services service except the following: -// -// * You cannot call any IAM operations using the CLI or the Amazon Web Services -// API. -// -// * You cannot call any STS operations except GetCallerIdentity. -// -// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plaintext that you use for both inline -// and managed session policies can't exceed 2,048 characters. -// -// Though the session policy parameters are optional, if you do not pass a policy, -// then the resulting federated user session has no permissions. When you pass -// session policies, the session permissions are the intersection of the IAM -// user policies and the session policies that you pass. This gives you a way -// to further restrict the permissions for a federated user. You cannot use -// session policies to grant more permissions than those that are defined in -// the permissions policy of the IAM user. For more information, see Session -// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. For information about using GetFederationToken to -// create temporary security credentials, see GetFederationToken—Federation -// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). -// -// You can use the credentials to access a resource that has a resource-based -// policy. If that policy specifically references the federated user session -// in the Principal element of the policy, the session has the permissions allowed -// by the policy. These permissions are granted in addition to the permissions -// granted by the session policies. -// -// Tags -// -// (Optional) You can pass tag key-value pairs to your session. These are called -// session tags. For more information about session tags, see Passing Session -// Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You can create a mobile-based or browser-based app that can authenticate -// users using a web identity provider like Login with Amazon, Facebook, Google, -// or an OpenID Connect-compatible identity provider. In this case, we recommend -// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. -// For more information, see Federation Through a Web-based Identity Provider -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) -// in the IAM User Guide. -// -// An administrator must grant you the permissions necessary to pass session -// tags. The administrator can also create granular permissions to allow you -// to pass only specific session tags. For more information, see Tutorial: Using -// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. -// -// Tag key–value pairs are not case sensitive, but case is preserved. This -// means that you cannot have separate Department and department tag keys. Assume -// that the user that you are federating has the Department=Marketing tag and -// you pass the department=engineering session tag. Department and department -// are not saved as separate tags, and the session tag passed in the request -// takes precedence over the user tag. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation GetFederationToken for usage and error information. -// -// Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An Amazon Web Services conversion -// compresses the session policy document, session policy ARNs, and session -// tags into a packed binary format that has a separate limit. The error message -// indicates by percentage how close the policies and tags are to the upper -// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) -// in the IAM User Guide. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating Amazon Web Services STS in an Amazon Web Services Region -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken -func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { - req, out := c.GetFederationTokenRequest(input) - return out, req.Send() -} - -// GetFederationTokenWithContext is the same as GetFederationToken with the addition of -// the ability to pass a context and additional request options. -// -// See GetFederationToken for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) { - req, out := c.GetFederationTokenRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetSessionToken = "GetSessionToken" - -// GetSessionTokenRequest generates a "aws/request.Request" representing the -// client's request for the GetSessionToken operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetSessionToken for more information on using the GetSessionToken -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetSessionTokenRequest method. -// req, resp := client.GetSessionTokenRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken -func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { - op := &request.Operation{ - Name: opGetSessionToken, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetSessionTokenInput{} - } - - output = &GetSessionTokenOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetSessionToken API operation for AWS Security Token Service. -// -// Returns a set of temporary credentials for an Amazon Web Services account -// or IAM user. The credentials consist of an access key ID, a secret access -// key, and a security token. Typically, you use GetSessionToken if you want -// to use MFA to protect programmatic calls to specific Amazon Web Services -// API operations like Amazon EC2 StopInstances. MFA-enabled IAM users would -// need to call GetSessionToken and submit an MFA code that is associated with -// their MFA device. Using the temporary security credentials that are returned -// from the call, IAM users can then make programmatic calls to API operations -// that require MFA authentication. If you do not supply a correct MFA code, -// then the API returns an access denied error. For a comparison of GetSessionToken -// with the other API operations that produce temporary credentials, see Requesting -// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. -// -// Session Duration -// -// The GetSessionToken operation must be called by using the long-term Amazon -// Web Services security credentials of the Amazon Web Services account root -// user or an IAM user. Credentials that are created by IAM users are valid -// for the duration that you specify. This duration can range from 900 seconds -// (15 minutes) up to a maximum of 129,600 seconds (36 hours), with a default -// of 43,200 seconds (12 hours). Credentials based on account credentials can -// range from 900 seconds (15 minutes) up to 3,600 seconds (1 hour), with a -// default of 1 hour. -// -// Permissions -// -// The temporary security credentials created by GetSessionToken can be used -// to make API calls to any Amazon Web Services service with the following exceptions: -// -// * You cannot call any IAM API operations unless MFA authentication information -// is included in the request. -// -// * You cannot call any STS API except AssumeRole or GetCallerIdentity. -// -// We recommend that you do not call GetSessionToken with Amazon Web Services -// account root user credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) -// by creating one or more IAM users, giving them the necessary permissions, -// and using IAM users for everyday interaction with Amazon Web Services. -// -// The credentials that are returned by GetSessionToken are based on permissions -// associated with the user whose credentials were used to call the operation. -// If GetSessionToken is called using Amazon Web Services account root user -// credentials, the temporary credentials have root user permissions. Similarly, -// if GetSessionToken is called using the credentials of an IAM user, the temporary -// credentials have the same permissions as the IAM user. -// -// For more information about using GetSessionToken to create temporary credentials, -// go to Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) -// in the IAM User Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Security Token Service's -// API operation GetSessionToken for usage and error information. -// -// Returned Error Codes: -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating Amazon Web Services STS in an Amazon Web Services Region -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken -func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { - req, out := c.GetSessionTokenRequest(input) - return out, req.Send() -} - -// GetSessionTokenWithContext is the same as GetSessionToken with the addition of -// the ability to pass a context and additional request options. -// -// See GetSessionToken for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) { - req, out := c.GetSessionTokenRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -type AssumeRoleInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, of the role session. The value specified can range - // from 900 seconds (15 minutes) up to the maximum session duration set for - // the role. The maximum session duration setting can have a value from 1 hour - // to 12 hours. If you specify a value higher than this setting or the administrator - // setting (whichever is lower), the operation fails. For example, if you specify - // a session duration of 12 hours, but your administrator set the maximum session - // duration to 6 hours, your operation fails. - // - // Role chaining limits your Amazon Web Services CLI or Amazon Web Services - // API role session to a maximum of one hour. When you use the AssumeRole API - // operation to assume a role, you can specify the duration of your role session - // with the DurationSeconds parameter. You can specify a parameter value of - // up to 43200 seconds (12 hours), depending on the maximum session duration - // setting for your role. However, if you assume a role using role chaining - // and provide a DurationSeconds parameter value greater than one hour, the - // operation fails. To learn how to view the maximum value for your role, see - // View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. - // - // By default, the value is set to 3600 seconds. - // - // The DurationSeconds parameter is separate from the duration of a console - // session that you might request using the returned credentials. The request - // to the federation endpoint for a console sign-in token takes a SessionDuration - // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. - DurationSeconds *int64 `min:"900" type:"integer"` - - // A unique identifier that might be required when you assume a role in another - // account. If the administrator of the account to which the role belongs provided - // you with an external ID, then provide that value in the ExternalId parameter. - // This value can be any string, such as a passphrase or account number. A cross-account - // role is usually set up to trust everyone in an account. Therefore, the administrator - // of the trusting account might send an external ID to the administrator of - // the trusted account. That way, only someone with the ID can assume the role, - // rather than everyone in the account. For more information about the external - // ID, see How to Use an External ID When Granting Access to Your Amazon Web - // Services Resources to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) - // in the IAM User Guide. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@:/- - ExternalId *string `min:"2" type:"string"` - - // An IAM policy in JSON format that you want to use as an inline session policy. - // - // This parameter is optional. Passing policies to this operation returns new - // temporary credentials. The resulting session's permissions are the intersection - // of the role's identity-based policy and the session policies. You can use - // the role's temporary credentials in subsequent Amazon Web Services API calls - // to access resources in the account that owns the role. You cannot use session - // policies to grant more permissions than those allowed by the identity-based - // policy of the role that is being assumed. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - // - // The plaintext that you use for both inline and managed session policies can't - // exceed 2,048 characters. The JSON policy characters can be any ASCII character - // from the space character to the end of the valid character list (\u0020 through - // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage - // return (\u000D) characters. - // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. - Policy *string `min:"1" type:"string"` - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want - // to use as managed session policies. The policies must exist in the same account - // as the role. - // - // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plaintext that you use for both inline and managed session policies - // can't exceed 2,048 characters. For more information about ARNs, see Amazon - // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. - // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. - // - // Passing policies to this operation returns new temporary credentials. The - // resulting session's permissions are the intersection of the role's identity-based - // policy and the session policies. You can use the role's temporary credentials - // in subsequent Amazon Web Services API calls to access resources in the account - // that owns the role. You cannot use session policies to grant more permissions - // than those allowed by the identity-based policy of the role that is being - // assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - PolicyArns []*PolicyDescriptorType `type:"list"` - - // The Amazon Resource Name (ARN) of the role to assume. - // - // RoleArn is a required field - RoleArn *string `min:"20" type:"string" required:"true"` - - // An identifier for the assumed role session. - // - // Use the role session name to uniquely identify a session when the same role - // is assumed by different principals or for different reasons. In cross-account - // scenarios, the role session name is visible to, and can be logged by the - // account that owns the role. The role session name is also used in the ARN - // of the assumed role principal. This means that subsequent cross-account API - // requests that use the temporary security credentials will expose the role - // session name to the external account in their CloudTrail logs. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - // - // RoleSessionName is a required field - RoleSessionName *string `min:"2" type:"string" required:"true"` - - // The identification number of the MFA device that is associated with the user - // who is making the AssumeRole call. Specify this value if the trust policy - // of the role being assumed includes a condition that requires MFA authentication. - // The value is either the serial number for a hardware device (such as GAHT12345678) - // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - SerialNumber *string `min:"9" type:"string"` - - // The source identity specified by the principal that is calling the AssumeRole - // operation. - // - // You can require users to specify a source identity when they assume a role. - // You do this by using the sts:SourceIdentity condition key in a role trust - // policy. You can use source identity information in CloudTrail logs to determine - // who took actions with a role. You can use the aws:SourceIdentity condition - // key to further control access to Amazon Web Services resources based on the - // value of source identity. For more information about using source identity, - // see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) - // in the IAM User Guide. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@-. You cannot - // use a value that begins with the text aws:. This prefix is reserved for Amazon - // Web Services internal use. - SourceIdentity *string `min:"2" type:"string"` - - // A list of session tags that you want to pass. Each session tag consists of - // a key name and an associated value. For more information about session tags, - // see Tagging Amazon Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) - // in the IAM User Guide. - // - // This parameter is optional. You can pass up to 50 session tags. The plaintext - // session tag keys can’t exceed 128 characters, and the values can’t exceed - // 256 characters. For these and additional limits, see IAM and STS Character - // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. - // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. - // - // You can pass a session tag with the same key as a tag that is already attached - // to the role. When you do, session tags override a role tag with the same - // key. - // - // Tag key–value pairs are not case sensitive, but case is preserved. This - // means that you cannot have separate Department and department tag keys. Assume - // that the role has the Department=Marketing tag and you pass the department=engineering - // session tag. Department and department are not saved as separate tags, and - // the session tag passed in the request takes precedence over the role tag. - // - // Additionally, if you used temporary credentials to perform this operation, - // the new session inherits any transitive session tags from the calling session. - // If you pass a session tag with the same key as an inherited tag, the operation - // fails. To view the inherited tags for a session, see the CloudTrail logs. - // For more information, see Viewing Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/session-tags.html#id_session-tags_ctlogs) - // in the IAM User Guide. - Tags []*Tag `type:"list"` - - // The value provided by the MFA device, if the trust policy of the role being - // assumed requires MFA. (In other words, if the policy includes a condition - // that tests for MFA). If the role being assumed requires MFA and if the TokenCode - // value is missing or expired, the AssumeRole call returns an "access denied" - // error. - // - // The format for this parameter, as described by its regex pattern, is a sequence - // of six numeric digits. - TokenCode *string `min:"6" type:"string"` - - // A list of keys for session tags that you want to set as transitive. If you - // set a tag key as transitive, the corresponding key and value passes to subsequent - // sessions in a role chain. For more information, see Chaining Roles with Session - // Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) - // in the IAM User Guide. - // - // This parameter is optional. When you set session tags as transitive, the - // session policy and session tags packed binary limit is not affected. - // - // If you choose not to specify a transitive tag key, then no tags are passed - // from this session to any subsequent sessions. - TransitiveTagKeys []*string `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AssumeRoleInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AssumeRoleInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AssumeRoleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.ExternalId != nil && len(*s.ExternalId) < 2 { - invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) - } - if s.RoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("RoleArn")) - } - if s.RoleArn != nil && len(*s.RoleArn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) - } - if s.RoleSessionName == nil { - invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) - } - if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { - invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) - } - if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { - invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) - } - if s.SourceIdentity != nil && len(*s.SourceIdentity) < 2 { - invalidParams.Add(request.NewErrParamMinLen("SourceIdentity", 2)) - } - if s.TokenCode != nil && len(*s.TokenCode) < 6 { - invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) - } - if s.PolicyArns != nil { - for i, v := range s.PolicyArns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) - } - } - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDurationSeconds sets the DurationSeconds field's value. -func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput { - s.DurationSeconds = &v - return s -} - -// SetExternalId sets the ExternalId field's value. -func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput { - s.ExternalId = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput { - s.Policy = &v - return s -} - -// SetPolicyArns sets the PolicyArns field's value. -func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleInput { - s.PolicyArns = v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { - s.RoleArn = &v - return s -} - -// SetRoleSessionName sets the RoleSessionName field's value. -func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput { - s.RoleSessionName = &v - return s -} - -// SetSerialNumber sets the SerialNumber field's value. -func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput { - s.SerialNumber = &v - return s -} - -// SetSourceIdentity sets the SourceIdentity field's value. -func (s *AssumeRoleInput) SetSourceIdentity(v string) *AssumeRoleInput { - s.SourceIdentity = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *AssumeRoleInput) SetTags(v []*Tag) *AssumeRoleInput { - s.Tags = v - return s -} - -// SetTokenCode sets the TokenCode field's value. -func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { - s.TokenCode = &v - return s -} - -// SetTransitiveTagKeys sets the TransitiveTagKeys field's value. -func (s *AssumeRoleInput) SetTransitiveTagKeys(v []*string) *AssumeRoleInput { - s.TransitiveTagKeys = v - return s -} - -// Contains the response to a successful AssumeRole request, including temporary -// Amazon Web Services credentials that can be used to make Amazon Web Services -// requests. -type AssumeRoleOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers - // that you can use to refer to the resulting temporary security credentials. - // For example, you can reference these credentials as a principal in a resource-based - // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName - // that you specified when you called AssumeRole. - AssumedRoleUser *AssumedRoleUser `type:"structure"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // The size of the security token that STS API operations return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. - Credentials *Credentials `type:"structure"` - - // A percentage value that indicates the packed size of the session policies - // and session tags combined passed in the request. The request fails if the - // packed size is greater than 100 percent, which means the policies and tags - // exceeded the allowed space. - PackedPolicySize *int64 `type:"integer"` - - // The source identity specified by the principal that is calling the AssumeRole - // operation. - // - // You can require users to specify a source identity when they assume a role. - // You do this by using the sts:SourceIdentity condition key in a role trust - // policy. You can use source identity information in CloudTrail logs to determine - // who took actions with a role. You can use the aws:SourceIdentity condition - // key to further control access to Amazon Web Services resources based on the - // value of source identity. For more information about using source identity, - // see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) - // in the IAM User Guide. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - SourceIdentity *string `min:"2" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AssumeRoleOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AssumeRoleOutput) GoString() string { - return s.String() -} - -// SetAssumedRoleUser sets the AssumedRoleUser field's value. -func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput { - s.AssumedRoleUser = v - return s -} - -// SetCredentials sets the Credentials field's value. -func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput { - s.Credentials = v - return s -} - -// SetPackedPolicySize sets the PackedPolicySize field's value. -func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput { - s.PackedPolicySize = &v - return s -} - -// SetSourceIdentity sets the SourceIdentity field's value. -func (s *AssumeRoleOutput) SetSourceIdentity(v string) *AssumeRoleOutput { - s.SourceIdentity = &v - return s -} - -type AssumeRoleWithSAMLInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, of the role session. Your role session lasts for - // the duration that you specify for the DurationSeconds parameter, or until - // the time specified in the SAML authentication response's SessionNotOnOrAfter - // value, whichever is shorter. You can provide a DurationSeconds value from - // 900 seconds (15 minutes) up to the maximum session duration setting for the - // role. This setting can have a value from 1 hour to 12 hours. If you specify - // a value higher than this setting, the operation fails. For example, if you - // specify a session duration of 12 hours, but your administrator set the maximum - // session duration to 6 hours, your operation fails. To learn how to view the - // maximum value for your role, see View the Maximum Session Duration Setting - // for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. - // - // By default, the value is set to 3600 seconds. - // - // The DurationSeconds parameter is separate from the duration of a console - // session that you might request using the returned credentials. The request - // to the federation endpoint for a console sign-in token takes a SessionDuration - // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. - DurationSeconds *int64 `min:"900" type:"integer"` - - // An IAM policy in JSON format that you want to use as an inline session policy. - // - // This parameter is optional. Passing policies to this operation returns new - // temporary credentials. The resulting session's permissions are the intersection - // of the role's identity-based policy and the session policies. You can use - // the role's temporary credentials in subsequent Amazon Web Services API calls - // to access resources in the account that owns the role. You cannot use session - // policies to grant more permissions than those allowed by the identity-based - // policy of the role that is being assumed. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - // - // The plaintext that you use for both inline and managed session policies can't - // exceed 2,048 characters. The JSON policy characters can be any ASCII character - // from the space character to the end of the valid character list (\u0020 through - // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage - // return (\u000D) characters. - // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. - Policy *string `min:"1" type:"string"` - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want - // to use as managed session policies. The policies must exist in the same account - // as the role. - // - // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plaintext that you use for both inline and managed session policies - // can't exceed 2,048 characters. For more information about ARNs, see Amazon - // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. - // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. - // - // Passing policies to this operation returns new temporary credentials. The - // resulting session's permissions are the intersection of the role's identity-based - // policy and the session policies. You can use the role's temporary credentials - // in subsequent Amazon Web Services API calls to access resources in the account - // that owns the role. You cannot use session policies to grant more permissions - // than those allowed by the identity-based policy of the role that is being - // assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - PolicyArns []*PolicyDescriptorType `type:"list"` - - // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes - // the IdP. - // - // PrincipalArn is a required field - PrincipalArn *string `min:"20" type:"string" required:"true"` - - // The Amazon Resource Name (ARN) of the role that the caller is assuming. - // - // RoleArn is a required field - RoleArn *string `min:"20" type:"string" required:"true"` - - // The base64 encoded SAML authentication response provided by the IdP. - // - // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) - // in the IAM User Guide. - // - // SAMLAssertion is a required field - SAMLAssertion *string `min:"4" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AssumeRoleWithSAMLInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AssumeRoleWithSAMLInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AssumeRoleWithSAMLInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) - } - if s.PrincipalArn == nil { - invalidParams.Add(request.NewErrParamRequired("PrincipalArn")) - } - if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20)) - } - if s.RoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("RoleArn")) - } - if s.RoleArn != nil && len(*s.RoleArn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) - } - if s.SAMLAssertion == nil { - invalidParams.Add(request.NewErrParamRequired("SAMLAssertion")) - } - if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { - invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4)) - } - if s.PolicyArns != nil { - for i, v := range s.PolicyArns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDurationSeconds sets the DurationSeconds field's value. -func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput { - s.DurationSeconds = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput { - s.Policy = &v - return s -} - -// SetPolicyArns sets the PolicyArns field's value. -func (s *AssumeRoleWithSAMLInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithSAMLInput { - s.PolicyArns = v - return s -} - -// SetPrincipalArn sets the PrincipalArn field's value. -func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput { - s.PrincipalArn = &v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput { - s.RoleArn = &v - return s -} - -// SetSAMLAssertion sets the SAMLAssertion field's value. -func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput { - s.SAMLAssertion = &v - return s -} - -// Contains the response to a successful AssumeRoleWithSAML request, including -// temporary Amazon Web Services credentials that can be used to make Amazon -// Web Services requests. -type AssumeRoleWithSAMLOutput struct { - _ struct{} `type:"structure"` - - // The identifiers for the temporary security credentials that the operation - // returns. - AssumedRoleUser *AssumedRoleUser `type:"structure"` - - // The value of the Recipient attribute of the SubjectConfirmationData element - // of the SAML assertion. - Audience *string `type:"string"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // The size of the security token that STS API operations return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. - Credentials *Credentials `type:"structure"` - - // The value of the Issuer element of the SAML assertion. - Issuer *string `type:"string"` - - // A hash value based on the concatenation of the following: - // - // * The Issuer response value. - // - // * The Amazon Web Services account ID. - // - // * The friendly name (the last part of the ARN) of the SAML provider in - // IAM. - // - // The combination of NameQualifier and Subject can be used to uniquely identify - // a federated user. - // - // The following pseudocode shows how the hash value is calculated: - // - // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" - // ) ) - NameQualifier *string `type:"string"` - - // A percentage value that indicates the packed size of the session policies - // and session tags combined passed in the request. The request fails if the - // packed size is greater than 100 percent, which means the policies and tags - // exceeded the allowed space. - PackedPolicySize *int64 `type:"integer"` - - // The value in the SourceIdentity attribute in the SAML assertion. - // - // You can require users to set a source identity value when they assume a role. - // You do this by using the sts:SourceIdentity condition key in a role trust - // policy. That way, actions that are taken with the role are associated with - // that user. After the source identity is set, the value cannot be changed. - // It is present in the request for all actions that are taken by the role and - // persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) - // sessions. You can configure your SAML identity provider to use an attribute - // associated with your users, like user name or email, as the source identity - // when calling AssumeRoleWithSAML. You do this by adding an attribute to the - // SAML assertion. For more information about using source identity, see Monitor - // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) - // in the IAM User Guide. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - SourceIdentity *string `min:"2" type:"string"` - - // The value of the NameID element in the Subject element of the SAML assertion. - Subject *string `type:"string"` - - // The format of the name ID, as defined by the Format attribute in the NameID - // element of the SAML assertion. Typical examples of the format are transient - // or persistent. - // - // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format, - // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient - // is returned as transient. If the format includes any other prefix, the format - // is returned with no modifications. - SubjectType *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AssumeRoleWithSAMLOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AssumeRoleWithSAMLOutput) GoString() string { - return s.String() -} - -// SetAssumedRoleUser sets the AssumedRoleUser field's value. -func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput { - s.AssumedRoleUser = v - return s -} - -// SetAudience sets the Audience field's value. -func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput { - s.Audience = &v - return s -} - -// SetCredentials sets the Credentials field's value. -func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput { - s.Credentials = v - return s -} - -// SetIssuer sets the Issuer field's value. -func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput { - s.Issuer = &v - return s -} - -// SetNameQualifier sets the NameQualifier field's value. -func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput { - s.NameQualifier = &v - return s -} - -// SetPackedPolicySize sets the PackedPolicySize field's value. -func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput { - s.PackedPolicySize = &v - return s -} - -// SetSourceIdentity sets the SourceIdentity field's value. -func (s *AssumeRoleWithSAMLOutput) SetSourceIdentity(v string) *AssumeRoleWithSAMLOutput { - s.SourceIdentity = &v - return s -} - -// SetSubject sets the Subject field's value. -func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput { - s.Subject = &v - return s -} - -// SetSubjectType sets the SubjectType field's value. -func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput { - s.SubjectType = &v - return s -} - -type AssumeRoleWithWebIdentityInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) up to the maximum session duration setting for the role. - // This setting can have a value from 1 hour to 12 hours. If you specify a value - // higher than this setting, the operation fails. For example, if you specify - // a session duration of 12 hours, but your administrator set the maximum session - // duration to 6 hours, your operation fails. To learn how to view the maximum - // value for your role, see View the Maximum Session Duration Setting for a - // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. - // - // By default, the value is set to 3600 seconds. - // - // The DurationSeconds parameter is separate from the duration of a console - // session that you might request using the returned credentials. The request - // to the federation endpoint for a console sign-in token takes a SessionDuration - // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. - DurationSeconds *int64 `min:"900" type:"integer"` - - // An IAM policy in JSON format that you want to use as an inline session policy. - // - // This parameter is optional. Passing policies to this operation returns new - // temporary credentials. The resulting session's permissions are the intersection - // of the role's identity-based policy and the session policies. You can use - // the role's temporary credentials in subsequent Amazon Web Services API calls - // to access resources in the account that owns the role. You cannot use session - // policies to grant more permissions than those allowed by the identity-based - // policy of the role that is being assumed. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - // - // The plaintext that you use for both inline and managed session policies can't - // exceed 2,048 characters. The JSON policy characters can be any ASCII character - // from the space character to the end of the valid character list (\u0020 through - // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage - // return (\u000D) characters. - // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. - Policy *string `min:"1" type:"string"` - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want - // to use as managed session policies. The policies must exist in the same account - // as the role. - // - // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plaintext that you use for both inline and managed session policies - // can't exceed 2,048 characters. For more information about ARNs, see Amazon - // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. - // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. - // - // Passing policies to this operation returns new temporary credentials. The - // resulting session's permissions are the intersection of the role's identity-based - // policy and the session policies. You can use the role's temporary credentials - // in subsequent Amazon Web Services API calls to access resources in the account - // that owns the role. You cannot use session policies to grant more permissions - // than those allowed by the identity-based policy of the role that is being - // assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - PolicyArns []*PolicyDescriptorType `type:"list"` - - // The fully qualified host component of the domain name of the identity provider. - // - // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com - // and graph.facebook.com are the only supported identity providers for OAuth - // 2.0 access tokens. Do not include URL schemes and port numbers. - // - // Do not specify this value for OpenID Connect ID tokens. - ProviderId *string `min:"4" type:"string"` - - // The Amazon Resource Name (ARN) of the role that the caller is assuming. - // - // RoleArn is a required field - RoleArn *string `min:"20" type:"string" required:"true"` - - // An identifier for the assumed role session. Typically, you pass the name - // or identifier that is associated with the user who is using your application. - // That way, the temporary security credentials that your application will use - // are associated with that user. This session name is included as part of the - // ARN and assumed role ID in the AssumedRoleUser response element. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - // - // RoleSessionName is a required field - RoleSessionName *string `min:"2" type:"string" required:"true"` - - // The OAuth 2.0 access token or OpenID Connect ID token that is provided by - // the identity provider. Your application must get this token by authenticating - // the user who is using your application with a web identity provider before - // the application makes an AssumeRoleWithWebIdentity call. - // - // WebIdentityToken is a required field - WebIdentityToken *string `min:"4" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AssumeRoleWithWebIdentityInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AssumeRoleWithWebIdentityInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AssumeRoleWithWebIdentityInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) - } - if s.ProviderId != nil && len(*s.ProviderId) < 4 { - invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4)) - } - if s.RoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("RoleArn")) - } - if s.RoleArn != nil && len(*s.RoleArn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) - } - if s.RoleSessionName == nil { - invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) - } - if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { - invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) - } - if s.WebIdentityToken == nil { - invalidParams.Add(request.NewErrParamRequired("WebIdentityToken")) - } - if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { - invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4)) - } - if s.PolicyArns != nil { - for i, v := range s.PolicyArns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDurationSeconds sets the DurationSeconds field's value. -func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput { - s.DurationSeconds = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput { - s.Policy = &v - return s -} - -// SetPolicyArns sets the PolicyArns field's value. -func (s *AssumeRoleWithWebIdentityInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithWebIdentityInput { - s.PolicyArns = v - return s -} - -// SetProviderId sets the ProviderId field's value. -func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput { - s.ProviderId = &v - return s -} - -// SetRoleArn sets the RoleArn field's value. -func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput { - s.RoleArn = &v - return s -} - -// SetRoleSessionName sets the RoleSessionName field's value. -func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput { - s.RoleSessionName = &v - return s -} - -// SetWebIdentityToken sets the WebIdentityToken field's value. -func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput { - s.WebIdentityToken = &v - return s -} - -// Contains the response to a successful AssumeRoleWithWebIdentity request, -// including temporary Amazon Web Services credentials that can be used to make -// Amazon Web Services requests. -type AssumeRoleWithWebIdentityOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers - // that you can use to refer to the resulting temporary security credentials. - // For example, you can reference these credentials as a principal in a resource-based - // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName - // that you specified when you called AssumeRole. - AssumedRoleUser *AssumedRoleUser `type:"structure"` - - // The intended audience (also known as client ID) of the web identity token. - // This is traditionally the client identifier issued to the application that - // requested the web identity token. - Audience *string `type:"string"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security token. - // - // The size of the security token that STS API operations return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. - Credentials *Credentials `type:"structure"` - - // A percentage value that indicates the packed size of the session policies - // and session tags combined passed in the request. The request fails if the - // packed size is greater than 100 percent, which means the policies and tags - // exceeded the allowed space. - PackedPolicySize *int64 `type:"integer"` - - // The issuing authority of the web identity token presented. For OpenID Connect - // ID tokens, this contains the value of the iss field. For OAuth 2.0 access - // tokens, this contains the value of the ProviderId parameter that was passed - // in the AssumeRoleWithWebIdentity request. - Provider *string `type:"string"` - - // The value of the source identity that is returned in the JSON web token (JWT) - // from the identity provider. - // - // You can require users to set a source identity value when they assume a role. - // You do this by using the sts:SourceIdentity condition key in a role trust - // policy. That way, actions that are taken with the role are associated with - // that user. After the source identity is set, the value cannot be changed. - // It is present in the request for all actions that are taken by the role and - // persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) - // sessions. You can configure your identity provider to use an attribute associated - // with your users, like user name or email, as the source identity when calling - // AssumeRoleWithWebIdentity. You do this by adding a claim to the JSON web - // token. To learn more about OIDC tokens and claims, see Using Tokens with - // User Pools (https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html) - // in the Amazon Cognito Developer Guide. For more information about using source - // identity, see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) - // in the IAM User Guide. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - SourceIdentity *string `min:"2" type:"string"` - - // The unique user identifier that is returned by the identity provider. This - // identifier is associated with the WebIdentityToken that was submitted with - // the AssumeRoleWithWebIdentity call. The identifier is typically unique to - // the user and the application that acquired the WebIdentityToken (pairwise - // identifier). For OpenID Connect ID tokens, this field contains the value - // returned by the identity provider as the token's sub (Subject) claim. - SubjectFromWebIdentityToken *string `min:"6" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AssumeRoleWithWebIdentityOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AssumeRoleWithWebIdentityOutput) GoString() string { - return s.String() -} - -// SetAssumedRoleUser sets the AssumedRoleUser field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput { - s.AssumedRoleUser = v - return s -} - -// SetAudience sets the Audience field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput { - s.Audience = &v - return s -} - -// SetCredentials sets the Credentials field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput { - s.Credentials = v - return s -} - -// SetPackedPolicySize sets the PackedPolicySize field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput { - s.PackedPolicySize = &v - return s -} - -// SetProvider sets the Provider field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput { - s.Provider = &v - return s -} - -// SetSourceIdentity sets the SourceIdentity field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetSourceIdentity(v string) *AssumeRoleWithWebIdentityOutput { - s.SourceIdentity = &v - return s -} - -// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value. -func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput { - s.SubjectFromWebIdentityToken = &v - return s -} - -// The identifiers for the temporary security credentials that the operation -// returns. -type AssumedRoleUser struct { - _ struct{} `type:"structure"` - - // The ARN of the temporary security credentials that are returned from the - // AssumeRole action. For more information about ARNs and how to use them in - // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in the IAM User Guide. - // - // Arn is a required field - Arn *string `min:"20" type:"string" required:"true"` - - // A unique identifier that contains the role ID and the role session name of - // the role that is being assumed. The role ID is generated by Amazon Web Services - // when the role is created. - // - // AssumedRoleId is a required field - AssumedRoleId *string `min:"2" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AssumedRoleUser) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AssumedRoleUser) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser { - s.Arn = &v - return s -} - -// SetAssumedRoleId sets the AssumedRoleId field's value. -func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser { - s.AssumedRoleId = &v - return s -} - -// Amazon Web Services credentials for API authentication. -type Credentials struct { - _ struct{} `type:"structure"` - - // The access key ID that identifies the temporary security credentials. - // - // AccessKeyId is a required field - AccessKeyId *string `min:"16" type:"string" required:"true"` - - // The date on which the current credentials expire. - // - // Expiration is a required field - Expiration *time.Time `type:"timestamp" required:"true"` - - // The secret access key that can be used to sign requests. - // - // SecretAccessKey is a required field - SecretAccessKey *string `type:"string" required:"true"` - - // The token that users must pass to the service API to use the temporary credentials. - // - // SessionToken is a required field - SessionToken *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Credentials) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Credentials) GoString() string { - return s.String() -} - -// SetAccessKeyId sets the AccessKeyId field's value. -func (s *Credentials) SetAccessKeyId(v string) *Credentials { - s.AccessKeyId = &v - return s -} - -// SetExpiration sets the Expiration field's value. -func (s *Credentials) SetExpiration(v time.Time) *Credentials { - s.Expiration = &v - return s -} - -// SetSecretAccessKey sets the SecretAccessKey field's value. -func (s *Credentials) SetSecretAccessKey(v string) *Credentials { - s.SecretAccessKey = &v - return s -} - -// SetSessionToken sets the SessionToken field's value. -func (s *Credentials) SetSessionToken(v string) *Credentials { - s.SessionToken = &v - return s -} - -type DecodeAuthorizationMessageInput struct { - _ struct{} `type:"structure"` - - // The encoded message that was returned with the response. - // - // EncodedMessage is a required field - EncodedMessage *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DecodeAuthorizationMessageInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DecodeAuthorizationMessageInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DecodeAuthorizationMessageInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"} - if s.EncodedMessage == nil { - invalidParams.Add(request.NewErrParamRequired("EncodedMessage")) - } - if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 { - invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEncodedMessage sets the EncodedMessage field's value. -func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput { - s.EncodedMessage = &v - return s -} - -// A document that contains additional information about the authorization status -// of a request from an encoded message that is returned in response to an Amazon -// Web Services request. -type DecodeAuthorizationMessageOutput struct { - _ struct{} `type:"structure"` - - // The API returns a response with the decoded message. - DecodedMessage *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DecodeAuthorizationMessageOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DecodeAuthorizationMessageOutput) GoString() string { - return s.String() -} - -// SetDecodedMessage sets the DecodedMessage field's value. -func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput { - s.DecodedMessage = &v - return s -} - -// Identifiers for the federated user that is associated with the credentials. -type FederatedUser struct { - _ struct{} `type:"structure"` - - // The ARN that specifies the federated user that is associated with the credentials. - // For more information about ARNs and how to use them in policies, see IAM - // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in the IAM User Guide. - // - // Arn is a required field - Arn *string `min:"20" type:"string" required:"true"` - - // The string that identifies the federated user associated with the credentials, - // similar to the unique ID of an IAM user. - // - // FederatedUserId is a required field - FederatedUserId *string `min:"2" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s FederatedUser) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s FederatedUser) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *FederatedUser) SetArn(v string) *FederatedUser { - s.Arn = &v - return s -} - -// SetFederatedUserId sets the FederatedUserId field's value. -func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { - s.FederatedUserId = &v - return s -} - -type GetAccessKeyInfoInput struct { - _ struct{} `type:"structure"` - - // The identifier of an access key. - // - // This parameter allows (through its regex pattern) a string of characters - // that can consist of any upper- or lowercase letter or digit. - // - // AccessKeyId is a required field - AccessKeyId *string `min:"16" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetAccessKeyInfoInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetAccessKeyInfoInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetAccessKeyInfoInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetAccessKeyInfoInput"} - if s.AccessKeyId == nil { - invalidParams.Add(request.NewErrParamRequired("AccessKeyId")) - } - if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 { - invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 16)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccessKeyId sets the AccessKeyId field's value. -func (s *GetAccessKeyInfoInput) SetAccessKeyId(v string) *GetAccessKeyInfoInput { - s.AccessKeyId = &v - return s -} - -type GetAccessKeyInfoOutput struct { - _ struct{} `type:"structure"` - - // The number used to identify the Amazon Web Services account. - Account *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetAccessKeyInfoOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetAccessKeyInfoOutput) GoString() string { - return s.String() -} - -// SetAccount sets the Account field's value. -func (s *GetAccessKeyInfoOutput) SetAccount(v string) *GetAccessKeyInfoOutput { - s.Account = &v - return s -} - -type GetCallerIdentityInput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetCallerIdentityInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetCallerIdentityInput) GoString() string { - return s.String() -} - -// Contains the response to a successful GetCallerIdentity request, including -// information about the entity making the request. -type GetCallerIdentityOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Web Services account ID number of the account that owns or contains - // the calling entity. - Account *string `type:"string"` - - // The Amazon Web Services ARN associated with the calling entity. - Arn *string `min:"20" type:"string"` - - // The unique identifier of the calling entity. The exact value depends on the - // type of entity that is making the call. The values returned are those listed - // in the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) - // found on the Policy Variables reference page in the IAM User Guide. - UserId *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetCallerIdentityOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetCallerIdentityOutput) GoString() string { - return s.String() -} - -// SetAccount sets the Account field's value. -func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput { - s.Account = &v - return s -} - -// SetArn sets the Arn field's value. -func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput { - s.Arn = &v - return s -} - -// SetUserId sets the UserId field's value. -func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput { - s.UserId = &v - return s -} - -type GetFederationTokenInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, that the session should last. Acceptable durations - // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds - // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained - // using Amazon Web Services account root user credentials are restricted to - // a maximum of 3,600 seconds (one hour). If the specified duration is longer - // than one hour, the session obtained by using root user credentials defaults - // to one hour. - DurationSeconds *int64 `min:"900" type:"integer"` - - // The name of the federated user. The name is used as an identifier for the - // temporary security credentials (such as Bob). For example, you can reference - // the federated user name in a resource-based policy, such as in an Amazon - // S3 bucket policy. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- - // - // Name is a required field - Name *string `min:"2" type:"string" required:"true"` - - // An IAM policy in JSON format that you want to use as an inline session policy. - // - // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policies to - // use as managed session policies. - // - // This parameter is optional. However, if you do not pass any session policies, - // then the resulting federated user session has no permissions. - // - // When you pass session policies, the session permissions are the intersection - // of the IAM user policies and the session policies that you pass. This gives - // you a way to further restrict the permissions for a federated user. You cannot - // use session policies to grant more permissions than those that are defined - // in the permissions policy of the IAM user. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - // - // The resulting credentials can be used to access a resource that has a resource-based - // policy. If that policy specifically references the federated user session - // in the Principal element of the policy, the session has the permissions allowed - // by the policy. These permissions are granted in addition to the permissions - // that are granted by the session policies. - // - // The plaintext that you use for both inline and managed session policies can't - // exceed 2,048 characters. The JSON policy characters can be any ASCII character - // from the space character to the end of the valid character list (\u0020 through - // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage - // return (\u000D) characters. - // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. - Policy *string `min:"1" type:"string"` - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want - // to use as a managed session policy. The policies must exist in the same account - // as the IAM user that is requesting federated access. - // - // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policies to - // use as managed session policies. The plaintext that you use for both inline - // and managed session policies can't exceed 2,048 characters. You can provide - // up to 10 managed policy ARNs. For more information about ARNs, see Amazon - // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. - // - // This parameter is optional. However, if you do not pass any session policies, - // then the resulting federated user session has no permissions. - // - // When you pass session policies, the session permissions are the intersection - // of the IAM user policies and the session policies that you pass. This gives - // you a way to further restrict the permissions for a federated user. You cannot - // use session policies to grant more permissions than those that are defined - // in the permissions policy of the IAM user. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - // - // The resulting credentials can be used to access a resource that has a resource-based - // policy. If that policy specifically references the federated user session - // in the Principal element of the policy, the session has the permissions allowed - // by the policy. These permissions are granted in addition to the permissions - // that are granted by the session policies. - // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. - PolicyArns []*PolicyDescriptorType `type:"list"` - - // A list of session tags. Each session tag consists of a key name and an associated - // value. For more information about session tags, see Passing Session Tags - // in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) - // in the IAM User Guide. - // - // This parameter is optional. You can pass up to 50 session tags. The plaintext - // session tag keys can’t exceed 128 characters and the values can’t exceed - // 256 characters. For these and additional limits, see IAM and STS Character - // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. - // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. - // - // You can pass a session tag with the same key as a tag that is already attached - // to the user you are federating. When you do, session tags override a user - // tag with the same key. - // - // Tag key–value pairs are not case sensitive, but case is preserved. This - // means that you cannot have separate Department and department tag keys. Assume - // that the role has the Department=Marketing tag and you pass the department=engineering - // session tag. Department and department are not saved as separate tags, and - // the session tag passed in the request takes precedence over the role tag. - Tags []*Tag `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetFederationTokenInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetFederationTokenInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetFederationTokenInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 2 { - invalidParams.Add(request.NewErrParamMinLen("Name", 2)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) - } - if s.PolicyArns != nil { - for i, v := range s.PolicyArns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) - } - } - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDurationSeconds sets the DurationSeconds field's value. -func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput { - s.DurationSeconds = &v - return s -} - -// SetName sets the Name field's value. -func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput { - s.Name = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { - s.Policy = &v - return s -} - -// SetPolicyArns sets the PolicyArns field's value. -func (s *GetFederationTokenInput) SetPolicyArns(v []*PolicyDescriptorType) *GetFederationTokenInput { - s.PolicyArns = v - return s -} - -// SetTags sets the Tags field's value. -func (s *GetFederationTokenInput) SetTags(v []*Tag) *GetFederationTokenInput { - s.Tags = v - return s -} - -// Contains the response to a successful GetFederationToken request, including -// temporary Amazon Web Services credentials that can be used to make Amazon -// Web Services requests. -type GetFederationTokenOutput struct { - _ struct{} `type:"structure"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // The size of the security token that STS API operations return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. - Credentials *Credentials `type:"structure"` - - // Identifiers for the federated user associated with the credentials (such - // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You - // can use the federated user's ARN in your resource-based policies, such as - // an Amazon S3 bucket policy. - FederatedUser *FederatedUser `type:"structure"` - - // A percentage value that indicates the packed size of the session policies - // and session tags combined passed in the request. The request fails if the - // packed size is greater than 100 percent, which means the policies and tags - // exceeded the allowed space. - PackedPolicySize *int64 `type:"integer"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetFederationTokenOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetFederationTokenOutput) GoString() string { - return s.String() -} - -// SetCredentials sets the Credentials field's value. -func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput { - s.Credentials = v - return s -} - -// SetFederatedUser sets the FederatedUser field's value. -func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput { - s.FederatedUser = v - return s -} - -// SetPackedPolicySize sets the PackedPolicySize field's value. -func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput { - s.PackedPolicySize = &v - return s -} - -type GetSessionTokenInput struct { - _ struct{} `type:"structure"` - - // The duration, in seconds, that the credentials should remain valid. Acceptable - // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600 - // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions - // for Amazon Web Services account owners are restricted to a maximum of 3,600 - // seconds (one hour). If the duration is longer than one hour, the session - // for Amazon Web Services account owners defaults to one hour. - DurationSeconds *int64 `min:"900" type:"integer"` - - // The identification number of the MFA device that is associated with the IAM - // user who is making the GetSessionToken call. Specify this value if the IAM - // user has a policy that requires MFA authentication. The value is either the - // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource - // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). - // You can find the device for an IAM user by going to the Amazon Web Services - // Management Console and viewing the user's security credentials. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@:/- - SerialNumber *string `min:"9" type:"string"` - - // The value provided by the MFA device, if MFA is required. If any policy requires - // the IAM user to submit an MFA code, specify this value. If MFA authentication - // is required, the user must provide a code when requesting a set of temporary - // security credentials. A user who fails to provide the code receives an "access - // denied" response when requesting resources that require MFA authentication. - // - // The format for this parameter, as described by its regex pattern, is a sequence - // of six numeric digits. - TokenCode *string `min:"6" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetSessionTokenInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetSessionTokenInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetSessionTokenInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"} - if s.DurationSeconds != nil && *s.DurationSeconds < 900 { - invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) - } - if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { - invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) - } - if s.TokenCode != nil && len(*s.TokenCode) < 6 { - invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDurationSeconds sets the DurationSeconds field's value. -func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput { - s.DurationSeconds = &v - return s -} - -// SetSerialNumber sets the SerialNumber field's value. -func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput { - s.SerialNumber = &v - return s -} - -// SetTokenCode sets the TokenCode field's value. -func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput { - s.TokenCode = &v - return s -} - -// Contains the response to a successful GetSessionToken request, including -// temporary Amazon Web Services credentials that can be used to make Amazon -// Web Services requests. -type GetSessionTokenOutput struct { - _ struct{} `type:"structure"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // The size of the security token that STS API operations return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. - Credentials *Credentials `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetSessionTokenOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetSessionTokenOutput) GoString() string { - return s.String() -} - -// SetCredentials sets the Credentials field's value. -func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput { - s.Credentials = v - return s -} - -// A reference to the IAM managed policy that is passed as a session policy -// for a role session or a federated user session. -type PolicyDescriptorType struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session - // policy for the role. For more information about ARNs, see Amazon Resource - // Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. - Arn *string `locationName:"arn" min:"20" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PolicyDescriptorType) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PolicyDescriptorType) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PolicyDescriptorType) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PolicyDescriptorType"} - if s.Arn != nil && len(*s.Arn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("Arn", 20)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetArn sets the Arn field's value. -func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { - s.Arn = &v - return s -} - -// You can pass custom key-value pair attributes when you assume a role or federate -// a user. These are called session tags. You can then use the session tags -// to control access to resources. For more information, see Tagging Amazon -// Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -type Tag struct { - _ struct{} `type:"structure"` - - // The key for a session tag. - // - // You can pass up to 50 session tags. The plain text session tag keys can’t - // exceed 128 characters. For these and additional limits, see IAM and STS Character - // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. - // - // Key is a required field - Key *string `min:"1" type:"string" required:"true"` - - // The value for a session tag. - // - // You can pass up to 50 session tags. The plain text session tag values can’t - // exceed 256 characters. For these and additional limits, see IAM and STS Character - // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. - // - // Value is a required field - Value *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Tag) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Tag) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Tag) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Tag"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.Value == nil { - invalidParams.Add(request.NewErrParamRequired("Value")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKey sets the Key field's value. -func (s *Tag) SetKey(v string) *Tag { - s.Key = &v - return s -} - -// SetValue sets the Value field's value. -func (s *Tag) SetValue(v string) *Tag { - s.Value = &v - return s -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go deleted file mode 100644 index d5307fcaa..000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go +++ /dev/null @@ -1,11 +0,0 @@ -package sts - -import "github.com/aws/aws-sdk-go/aws/request" - -func init() { - initRequest = customizeRequest -} - -func customizeRequest(r *request.Request) { - r.RetryErrorCodes = append(r.RetryErrorCodes, ErrCodeIDPCommunicationErrorException) -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go deleted file mode 100644 index 2d98d9235..000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go +++ /dev/null @@ -1,32 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package sts provides the client and types for making API -// requests to AWS Security Token Service. -// -// Security Token Service (STS) enables you to request temporary, limited-privilege -// credentials for Identity and Access Management (IAM) users or for users that -// you authenticate (federated users). This guide provides descriptions of the -// STS API. For more information about using this service, see Temporary Security -// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). -// -// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. -// -// See sts package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/ -// -// Using the Client -// -// To contact AWS Security Token Service with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the AWS Security Token Service client STS for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New -package sts diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go deleted file mode 100644 index b680bbd5d..000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go +++ /dev/null @@ -1,84 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sts - -const ( - - // ErrCodeExpiredTokenException for service response error code - // "ExpiredTokenException". - // - // The web identity token that was passed is expired or is not valid. Get a - // new identity token from the identity provider and then retry the request. - ErrCodeExpiredTokenException = "ExpiredTokenException" - - // ErrCodeIDPCommunicationErrorException for service response error code - // "IDPCommunicationError". - // - // The request could not be fulfilled because the identity provider (IDP) that - // was asked to verify the incoming identity token could not be reached. This - // is often a transient error caused by network conditions. Retry the request - // a limited number of times so that you don't exceed the request rate. If the - // error persists, the identity provider might be down or not responding. - ErrCodeIDPCommunicationErrorException = "IDPCommunicationError" - - // ErrCodeIDPRejectedClaimException for service response error code - // "IDPRejectedClaim". - // - // The identity provider (IdP) reported that authentication failed. This might - // be because the claim is invalid. - // - // If this error is returned for the AssumeRoleWithWebIdentity operation, it - // can also mean that the claim has expired or has been explicitly revoked. - ErrCodeIDPRejectedClaimException = "IDPRejectedClaim" - - // ErrCodeInvalidAuthorizationMessageException for service response error code - // "InvalidAuthorizationMessageException". - // - // The error returned if the message passed to DecodeAuthorizationMessage was - // invalid. This can happen if the token contains invalid characters, such as - // linebreaks. - ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException" - - // ErrCodeInvalidIdentityTokenException for service response error code - // "InvalidIdentityToken". - // - // The web identity token that was passed could not be validated by Amazon Web - // Services. Get a new identity token from the identity provider and then retry - // the request. - ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken" - - // ErrCodeMalformedPolicyDocumentException for service response error code - // "MalformedPolicyDocument". - // - // The request was rejected because the policy document was malformed. The error - // message describes the specific error. - ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument" - - // ErrCodePackedPolicyTooLargeException for service response error code - // "PackedPolicyTooLarge". - // - // The request was rejected because the total packed size of the session policies - // and session tags combined was too large. An Amazon Web Services conversion - // compresses the session policy document, session policy ARNs, and session - // tags into a packed binary format that has a separate limit. The error message - // indicates by percentage how close the policies and tags are to the upper - // size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) - // in the IAM User Guide. - // - // You could receive this error even though you meet other defined session policy - // and session tag limits. For more information, see IAM and STS Entity Character - // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) - // in the IAM User Guide. - ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge" - - // ErrCodeRegionDisabledException for service response error code - // "RegionDisabledException". - // - // STS is not activated in the requested region for the account that is being - // asked to generate credentials. The account administrator must use the IAM - // console to activate STS in that region. For more information, see Activating - // and Deactivating Amazon Web Services STS in an Amazon Web Services Region - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) - // in the IAM User Guide. - ErrCodeRegionDisabledException = "RegionDisabledException" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go deleted file mode 100644 index 703defd96..000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go +++ /dev/null @@ -1,99 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package sts - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol/query" -) - -// STS provides the API operation methods for making requests to -// AWS Security Token Service. See this package's package overview docs -// for details on the service. -// -// STS methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type STS struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "sts" // Name of service. - EndpointsID = ServiceName // ID to lookup a service endpoint with. - ServiceID = "STS" // ServiceID is a unique identifier of a specific service. -) - -// New creates a new instance of the STS client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// mySession := session.Must(session.NewSession()) -// -// // Create a STS client from just a session. -// svc := sts.New(mySession) -// -// // Create a STS client with additional configuration -// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { - c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *STS { - svc := &STS{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - ServiceID: ServiceID, - SigningName: signingName, - SigningRegion: signingRegion, - PartitionID: partitionID, - Endpoint: endpoint, - APIVersion: "2011-06-15", - ResolvedRegion: resolvedRegion, - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(query.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a STS operation and runs any -// custom request initialization. -func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go deleted file mode 100644 index e2e1d6efe..000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go +++ /dev/null @@ -1,96 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package stsiface provides an interface to enable mocking the AWS Security Token Service service client -// for testing your code. -// -// It is important to note that this interface will have breaking changes -// when the service model is updated and adds new API operations, paginators, -// and waiters. -package stsiface - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/sts" -) - -// STSAPI provides an interface to enable mocking the -// sts.STS service client's API operation, -// paginators, and waiters. This make unit testing your code that calls out -// to the SDK's service client's calls easier. -// -// The best way to use this interface is so the SDK's service client's calls -// can be stubbed out for unit testing your code with the SDK without needing -// to inject custom request handlers into the SDK's request pipeline. -// -// // myFunc uses an SDK service client to make a request to -// // AWS Security Token Service. -// func myFunc(svc stsiface.STSAPI) bool { -// // Make svc.AssumeRole request -// } -// -// func main() { -// sess := session.New() -// svc := sts.New(sess) -// -// myFunc(svc) -// } -// -// In your _test.go file: -// -// // Define a mock struct to be used in your unit tests of myFunc. -// type mockSTSClient struct { -// stsiface.STSAPI -// } -// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { -// // mock response/functionality -// } -// -// func TestMyFunc(t *testing.T) { -// // Setup Test -// mockSvc := &mockSTSClient{} -// -// myfunc(mockSvc) -// -// // Verify myFunc's functionality -// } -// -// It is important to note that this interface will have breaking changes -// when the service model is updated and adds new API operations, paginators, -// and waiters. Its suggested to use the pattern above for testing, or using -// tooling to generate mocks to satisfy the interfaces. -type STSAPI interface { - AssumeRole(*sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) - AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) - AssumeRoleRequest(*sts.AssumeRoleInput) (*request.Request, *sts.AssumeRoleOutput) - - AssumeRoleWithSAML(*sts.AssumeRoleWithSAMLInput) (*sts.AssumeRoleWithSAMLOutput, error) - AssumeRoleWithSAMLWithContext(aws.Context, *sts.AssumeRoleWithSAMLInput, ...request.Option) (*sts.AssumeRoleWithSAMLOutput, error) - AssumeRoleWithSAMLRequest(*sts.AssumeRoleWithSAMLInput) (*request.Request, *sts.AssumeRoleWithSAMLOutput) - - AssumeRoleWithWebIdentity(*sts.AssumeRoleWithWebIdentityInput) (*sts.AssumeRoleWithWebIdentityOutput, error) - AssumeRoleWithWebIdentityWithContext(aws.Context, *sts.AssumeRoleWithWebIdentityInput, ...request.Option) (*sts.AssumeRoleWithWebIdentityOutput, error) - AssumeRoleWithWebIdentityRequest(*sts.AssumeRoleWithWebIdentityInput) (*request.Request, *sts.AssumeRoleWithWebIdentityOutput) - - DecodeAuthorizationMessage(*sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error) - DecodeAuthorizationMessageWithContext(aws.Context, *sts.DecodeAuthorizationMessageInput, ...request.Option) (*sts.DecodeAuthorizationMessageOutput, error) - DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput) - - GetAccessKeyInfo(*sts.GetAccessKeyInfoInput) (*sts.GetAccessKeyInfoOutput, error) - GetAccessKeyInfoWithContext(aws.Context, *sts.GetAccessKeyInfoInput, ...request.Option) (*sts.GetAccessKeyInfoOutput, error) - GetAccessKeyInfoRequest(*sts.GetAccessKeyInfoInput) (*request.Request, *sts.GetAccessKeyInfoOutput) - - GetCallerIdentity(*sts.GetCallerIdentityInput) (*sts.GetCallerIdentityOutput, error) - GetCallerIdentityWithContext(aws.Context, *sts.GetCallerIdentityInput, ...request.Option) (*sts.GetCallerIdentityOutput, error) - GetCallerIdentityRequest(*sts.GetCallerIdentityInput) (*request.Request, *sts.GetCallerIdentityOutput) - - GetFederationToken(*sts.GetFederationTokenInput) (*sts.GetFederationTokenOutput, error) - GetFederationTokenWithContext(aws.Context, *sts.GetFederationTokenInput, ...request.Option) (*sts.GetFederationTokenOutput, error) - GetFederationTokenRequest(*sts.GetFederationTokenInput) (*request.Request, *sts.GetFederationTokenOutput) - - GetSessionToken(*sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error) - GetSessionTokenWithContext(aws.Context, *sts.GetSessionTokenInput, ...request.Option) (*sts.GetSessionTokenOutput, error) - GetSessionTokenRequest(*sts.GetSessionTokenInput) (*request.Request, *sts.GetSessionTokenOutput) -} - -var _ STSAPI = (*sts.STS)(nil) diff --git a/vendor/github.com/blang/semver/.travis.yml b/vendor/github.com/blang/semver/.travis.yml new file mode 100644 index 000000000..102fb9a69 --- /dev/null +++ b/vendor/github.com/blang/semver/.travis.yml @@ -0,0 +1,21 @@ +language: go +matrix: + include: + - go: 1.4.3 + - go: 1.5.4 + - go: 1.6.3 + - go: 1.7 + - go: tip + allow_failures: + - go: tip +install: +- go get golang.org/x/tools/cmd/cover +- go get github.com/mattn/goveralls +script: +- echo "Test and track coverage" ; $HOME/gopath/bin/goveralls -package "." -service=travis-ci + -repotoken $COVERALLS_TOKEN +- echo "Build examples" ; cd examples && go build +- echo "Check if gofmt'd" ; diff -u <(echo -n) <(gofmt -d -s .) +env: + global: + secure: HroGEAUQpVq9zX1b1VIkraLiywhGbzvNnTZq2TMxgK7JHP8xqNplAeF1izrR2i4QLL9nsY+9WtYss4QuPvEtZcVHUobw6XnL6radF7jS1LgfYZ9Y7oF+zogZ2I5QUMRLGA7rcxQ05s7mKq3XZQfeqaNts4bms/eZRefWuaFZbkw= diff --git a/vendor/github.com/blang/semver/LICENSE b/vendor/github.com/blang/semver/LICENSE new file mode 100644 index 000000000..5ba5c86fc --- /dev/null +++ b/vendor/github.com/blang/semver/LICENSE @@ -0,0 +1,22 @@ +The MIT License + +Copyright (c) 2014 Benedikt Lang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/vendor/github.com/blang/semver/README.md b/vendor/github.com/blang/semver/README.md new file mode 100644 index 000000000..08b2e4a3d --- /dev/null +++ b/vendor/github.com/blang/semver/README.md @@ -0,0 +1,194 @@ +semver for golang [![Build Status](https://travis-ci.org/blang/semver.svg?branch=master)](https://travis-ci.org/blang/semver) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master) +====== + +semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`. + +Usage +----- +```bash +$ go get github.com/blang/semver +``` +Note: Always vendor your dependencies or fix on a specific version tag. + +```go +import github.com/blang/semver +v1, err := semver.Make("1.0.0-beta") +v2, err := semver.Make("2.0.0-beta") +v1.Compare(v2) +``` + +Also check the [GoDocs](http://godoc.org/github.com/blang/semver). + +Why should I use this lib? +----- + +- Fully spec compatible +- No reflection +- No regex +- Fully tested (Coverage >99%) +- Readable parsing/validation errors +- Fast (See [Benchmarks](#benchmarks)) +- Only Stdlib +- Uses values instead of pointers +- Many features, see below + + +Features +----- + +- Parsing and validation at all levels +- Comparator-like comparisons +- Compare Helper Methods +- InPlace manipulation +- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1` +- Wildcards `>=1.x`, `<=2.5.x` +- Sortable (implements sort.Interface) +- database/sql compatible (sql.Scanner/Valuer) +- encoding/json compatible (json.Marshaler/Unmarshaler) + +Ranges +------ + +A `Range` is a set of conditions which specify which versions satisfy the range. + +A condition is composed of an operator and a version. The supported operators are: + +- `<1.0.0` Less than `1.0.0` +- `<=1.0.0` Less than or equal to `1.0.0` +- `>1.0.0` Greater than `1.0.0` +- `>=1.0.0` Greater than or equal to `1.0.0` +- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0` +- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`. + +Note that spaces between the operator and the version will be gracefully tolerated. + +A `Range` can link multiple `Ranges` separated by space: + +Ranges can be linked by logical AND: + + - `>1.0.0 <2.0.0` would match between both ranges, so `1.1.1` and `1.8.7` but not `1.0.0` or `2.0.0` + - `>1.0.0 <3.0.0 !2.0.3-beta.2` would match every version between `1.0.0` and `3.0.0` except `2.0.3-beta.2` + +Ranges can also be linked by logical OR: + + - `<2.0.0 || >=3.0.0` would match `1.x.x` and `3.x.x` but not `2.x.x` + +AND has a higher precedence than OR. It's not possible to use brackets. + +Ranges can be combined by both AND and OR + + - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` + +Range usage: + +``` +v, err := semver.Parse("1.2.3") +range, err := semver.ParseRange(">1.0.0 <2.0.0 || >=3.0.0") +if range(v) { + //valid +} + +``` + +Example +----- + +Have a look at full examples in [examples/main.go](examples/main.go) + +```go +import github.com/blang/semver + +v, err := semver.Make("0.0.1-alpha.preview+123.github") +fmt.Printf("Major: %d\n", v.Major) +fmt.Printf("Minor: %d\n", v.Minor) +fmt.Printf("Patch: %d\n", v.Patch) +fmt.Printf("Pre: %s\n", v.Pre) +fmt.Printf("Build: %s\n", v.Build) + +// Prerelease versions array +if len(v.Pre) > 0 { + fmt.Println("Prerelease versions:") + for i, pre := range v.Pre { + fmt.Printf("%d: %q\n", i, pre) + } +} + +// Build meta data array +if len(v.Build) > 0 { + fmt.Println("Build meta data:") + for i, build := range v.Build { + fmt.Printf("%d: %q\n", i, build) + } +} + +v001, err := semver.Make("0.0.1") +// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE +v001.GT(v) == true +v.LT(v001) == true +v.GTE(v) == true +v.LTE(v) == true + +// Or use v.Compare(v2) for comparisons (-1, 0, 1): +v001.Compare(v) == 1 +v.Compare(v001) == -1 +v.Compare(v) == 0 + +// Manipulate Version in place: +v.Pre[0], err = semver.NewPRVersion("beta") +if err != nil { + fmt.Printf("Error parsing pre release version: %q", err) +} + +fmt.Println("\nValidate versions:") +v.Build[0] = "?" + +err = v.Validate() +if err != nil { + fmt.Printf("Validation failed: %s\n", err) +} +``` + + +Benchmarks +----- + + BenchmarkParseSimple-4 5000000 390 ns/op 48 B/op 1 allocs/op + BenchmarkParseComplex-4 1000000 1813 ns/op 256 B/op 7 allocs/op + BenchmarkParseAverage-4 1000000 1171 ns/op 163 B/op 4 allocs/op + BenchmarkStringSimple-4 20000000 119 ns/op 16 B/op 1 allocs/op + BenchmarkStringLarger-4 10000000 206 ns/op 32 B/op 2 allocs/op + BenchmarkStringComplex-4 5000000 324 ns/op 80 B/op 3 allocs/op + BenchmarkStringAverage-4 5000000 273 ns/op 53 B/op 2 allocs/op + BenchmarkValidateSimple-4 200000000 9.33 ns/op 0 B/op 0 allocs/op + BenchmarkValidateComplex-4 3000000 469 ns/op 0 B/op 0 allocs/op + BenchmarkValidateAverage-4 5000000 256 ns/op 0 B/op 0 allocs/op + BenchmarkCompareSimple-4 100000000 11.8 ns/op 0 B/op 0 allocs/op + BenchmarkCompareComplex-4 50000000 30.8 ns/op 0 B/op 0 allocs/op + BenchmarkCompareAverage-4 30000000 41.5 ns/op 0 B/op 0 allocs/op + BenchmarkSort-4 3000000 419 ns/op 256 B/op 2 allocs/op + BenchmarkRangeParseSimple-4 2000000 850 ns/op 192 B/op 5 allocs/op + BenchmarkRangeParseAverage-4 1000000 1677 ns/op 400 B/op 10 allocs/op + BenchmarkRangeParseComplex-4 300000 5214 ns/op 1440 B/op 30 allocs/op + BenchmarkRangeMatchSimple-4 50000000 25.6 ns/op 0 B/op 0 allocs/op + BenchmarkRangeMatchAverage-4 30000000 56.4 ns/op 0 B/op 0 allocs/op + BenchmarkRangeMatchComplex-4 10000000 153 ns/op 0 B/op 0 allocs/op + +See benchmark cases at [semver_test.go](semver_test.go) + + +Motivation +----- + +I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like. + + +Contribution +----- + +Feel free to make a pull request. For bigger changes create a issue first to discuss about it. + + +License +----- + +See [LICENSE](LICENSE) file. diff --git a/vendor/github.com/blang/semver/json.go b/vendor/github.com/blang/semver/json.go new file mode 100644 index 000000000..a74bf7c44 --- /dev/null +++ b/vendor/github.com/blang/semver/json.go @@ -0,0 +1,23 @@ +package semver + +import ( + "encoding/json" +) + +// MarshalJSON implements the encoding/json.Marshaler interface. +func (v Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements the encoding/json.Unmarshaler interface. +func (v *Version) UnmarshalJSON(data []byte) (err error) { + var versionString string + + if err = json.Unmarshal(data, &versionString); err != nil { + return + } + + *v, err = Parse(versionString) + + return +} diff --git a/vendor/github.com/blang/semver/package.json b/vendor/github.com/blang/semver/package.json new file mode 100644 index 000000000..1cf8ebdd9 --- /dev/null +++ b/vendor/github.com/blang/semver/package.json @@ -0,0 +1,17 @@ +{ + "author": "blang", + "bugs": { + "URL": "https://github.com/blang/semver/issues", + "url": "https://github.com/blang/semver/issues" + }, + "gx": { + "dvcsimport": "github.com/blang/semver" + }, + "gxVersion": "0.10.0", + "language": "go", + "license": "MIT", + "name": "semver", + "releaseCmd": "git commit -a -m \"gx publish $VERSION\"", + "version": "3.5.1" +} + diff --git a/vendor/github.com/blang/semver/range.go b/vendor/github.com/blang/semver/range.go new file mode 100644 index 000000000..fca406d47 --- /dev/null +++ b/vendor/github.com/blang/semver/range.go @@ -0,0 +1,416 @@ +package semver + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +type wildcardType int + +const ( + noneWildcard wildcardType = iota + majorWildcard wildcardType = 1 + minorWildcard wildcardType = 2 + patchWildcard wildcardType = 3 +) + +func wildcardTypefromInt(i int) wildcardType { + switch i { + case 1: + return majorWildcard + case 2: + return minorWildcard + case 3: + return patchWildcard + default: + return noneWildcard + } +} + +type comparator func(Version, Version) bool + +var ( + compEQ comparator = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == 0 + } + compNE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) != 0 + } + compGT = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == 1 + } + compGE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) >= 0 + } + compLT = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == -1 + } + compLE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) <= 0 + } +) + +type versionRange struct { + v Version + c comparator +} + +// rangeFunc creates a Range from the given versionRange. +func (vr *versionRange) rangeFunc() Range { + return Range(func(v Version) bool { + return vr.c(v, vr.v) + }) +} + +// Range represents a range of versions. +// A Range can be used to check if a Version satisfies it: +// +// range, err := semver.ParseRange(">1.0.0 <2.0.0") +// range(semver.MustParse("1.1.1") // returns true +type Range func(Version) bool + +// OR combines the existing Range with another Range using logical OR. +func (rf Range) OR(f Range) Range { + return Range(func(v Version) bool { + return rf(v) || f(v) + }) +} + +// AND combines the existing Range with another Range using logical AND. +func (rf Range) AND(f Range) Range { + return Range(func(v Version) bool { + return rf(v) && f(v) + }) +} + +// ParseRange parses a range and returns a Range. +// If the range could not be parsed an error is returned. +// +// Valid ranges are: +// - "<1.0.0" +// - "<=1.0.0" +// - ">1.0.0" +// - ">=1.0.0" +// - "1.0.0", "=1.0.0", "==1.0.0" +// - "!1.0.0", "!=1.0.0" +// +// A Range can consist of multiple ranges separated by space: +// Ranges can be linked by logical AND: +// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0" +// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2 +// +// Ranges can also be linked by logical OR: +// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x" +// +// AND has a higher precedence than OR. It's not possible to use brackets. +// +// Ranges can be combined by both AND and OR +// +// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` +func ParseRange(s string) (Range, error) { + parts := splitAndTrim(s) + orParts, err := splitORParts(parts) + if err != nil { + return nil, err + } + expandedParts, err := expandWildcardVersion(orParts) + if err != nil { + return nil, err + } + var orFn Range + for _, p := range expandedParts { + var andFn Range + for _, ap := range p { + opStr, vStr, err := splitComparatorVersion(ap) + if err != nil { + return nil, err + } + vr, err := buildVersionRange(opStr, vStr) + if err != nil { + return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err) + } + rf := vr.rangeFunc() + + // Set function + if andFn == nil { + andFn = rf + } else { // Combine with existing function + andFn = andFn.AND(rf) + } + } + if orFn == nil { + orFn = andFn + } else { + orFn = orFn.OR(andFn) + } + + } + return orFn, nil +} + +// splitORParts splits the already cleaned parts by '||'. +// Checks for invalid positions of the operator and returns an +// error if found. +func splitORParts(parts []string) ([][]string, error) { + var ORparts [][]string + last := 0 + for i, p := range parts { + if p == "||" { + if i == 0 { + return nil, fmt.Errorf("First element in range is '||'") + } + ORparts = append(ORparts, parts[last:i]) + last = i + 1 + } + } + if last == len(parts) { + return nil, fmt.Errorf("Last element in range is '||'") + } + ORparts = append(ORparts, parts[last:]) + return ORparts, nil +} + +// buildVersionRange takes a slice of 2: operator and version +// and builds a versionRange, otherwise an error. +func buildVersionRange(opStr, vStr string) (*versionRange, error) { + c := parseComparator(opStr) + if c == nil { + return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, "")) + } + v, err := Parse(vStr) + if err != nil { + return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err) + } + + return &versionRange{ + v: v, + c: c, + }, nil + +} + +// inArray checks if a byte is contained in an array of bytes +func inArray(s byte, list []byte) bool { + for _, el := range list { + if el == s { + return true + } + } + return false +} + +// splitAndTrim splits a range string by spaces and cleans whitespaces +func splitAndTrim(s string) (result []string) { + last := 0 + var lastChar byte + excludeFromSplit := []byte{'>', '<', '='} + for i := 0; i < len(s); i++ { + if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) { + if last < i-1 { + result = append(result, s[last:i]) + } + last = i + 1 + } else if s[i] != ' ' { + lastChar = s[i] + } + } + if last < len(s)-1 { + result = append(result, s[last:]) + } + + for i, v := range result { + result[i] = strings.Replace(v, " ", "", -1) + } + + // parts := strings.Split(s, " ") + // for _, x := range parts { + // if s := strings.TrimSpace(x); len(s) != 0 { + // result = append(result, s) + // } + // } + return +} + +// splitComparatorVersion splits the comparator from the version. +// Input must be free of leading or trailing spaces. +func splitComparatorVersion(s string) (string, string, error) { + i := strings.IndexFunc(s, unicode.IsDigit) + if i == -1 { + return "", "", fmt.Errorf("Could not get version from string: %q", s) + } + return strings.TrimSpace(s[0:i]), s[i:], nil +} + +// getWildcardType will return the type of wildcard that the +// passed version contains +func getWildcardType(vStr string) wildcardType { + parts := strings.Split(vStr, ".") + nparts := len(parts) + wildcard := parts[nparts-1] + + possibleWildcardType := wildcardTypefromInt(nparts) + if wildcard == "x" { + return possibleWildcardType + } + + return noneWildcard +} + +// createVersionFromWildcard will convert a wildcard version +// into a regular version, replacing 'x's with '0's, handling +// special cases like '1.x.x' and '1.x' +func createVersionFromWildcard(vStr string) string { + // handle 1.x.x + vStr2 := strings.Replace(vStr, ".x.x", ".x", 1) + vStr2 = strings.Replace(vStr2, ".x", ".0", 1) + parts := strings.Split(vStr2, ".") + + // handle 1.x + if len(parts) == 2 { + return vStr2 + ".0" + } + + return vStr2 +} + +// incrementMajorVersion will increment the major version +// of the passed version +func incrementMajorVersion(vStr string) (string, error) { + parts := strings.Split(vStr, ".") + i, err := strconv.Atoi(parts[0]) + if err != nil { + return "", err + } + parts[0] = strconv.Itoa(i + 1) + + return strings.Join(parts, "."), nil +} + +// incrementMajorVersion will increment the minor version +// of the passed version +func incrementMinorVersion(vStr string) (string, error) { + parts := strings.Split(vStr, ".") + i, err := strconv.Atoi(parts[1]) + if err != nil { + return "", err + } + parts[1] = strconv.Itoa(i + 1) + + return strings.Join(parts, "."), nil +} + +// expandWildcardVersion will expand wildcards inside versions +// following these rules: +// +// * when dealing with patch wildcards: +// >= 1.2.x will become >= 1.2.0 +// <= 1.2.x will become < 1.3.0 +// > 1.2.x will become >= 1.3.0 +// < 1.2.x will become < 1.2.0 +// != 1.2.x will become < 1.2.0 >= 1.3.0 +// +// * when dealing with minor wildcards: +// >= 1.x will become >= 1.0.0 +// <= 1.x will become < 2.0.0 +// > 1.x will become >= 2.0.0 +// < 1.0 will become < 1.0.0 +// != 1.x will become < 1.0.0 >= 2.0.0 +// +// * when dealing with wildcards without +// version operator: +// 1.2.x will become >= 1.2.0 < 1.3.0 +// 1.x will become >= 1.0.0 < 2.0.0 +func expandWildcardVersion(parts [][]string) ([][]string, error) { + var expandedParts [][]string + for _, p := range parts { + var newParts []string + for _, ap := range p { + if strings.Index(ap, "x") != -1 { + opStr, vStr, err := splitComparatorVersion(ap) + if err != nil { + return nil, err + } + + versionWildcardType := getWildcardType(vStr) + flatVersion := createVersionFromWildcard(vStr) + + var resultOperator string + var shouldIncrementVersion bool + switch opStr { + case ">": + resultOperator = ">=" + shouldIncrementVersion = true + case ">=": + resultOperator = ">=" + case "<": + resultOperator = "<" + case "<=": + resultOperator = "<" + shouldIncrementVersion = true + case "", "=", "==": + newParts = append(newParts, ">="+flatVersion) + resultOperator = "<" + shouldIncrementVersion = true + case "!=", "!": + newParts = append(newParts, "<"+flatVersion) + resultOperator = ">=" + shouldIncrementVersion = true + } + + var resultVersion string + if shouldIncrementVersion { + switch versionWildcardType { + case patchWildcard: + resultVersion, _ = incrementMinorVersion(flatVersion) + case minorWildcard: + resultVersion, _ = incrementMajorVersion(flatVersion) + } + } else { + resultVersion = flatVersion + } + + ap = resultOperator + resultVersion + } + newParts = append(newParts, ap) + } + expandedParts = append(expandedParts, newParts) + } + + return expandedParts, nil +} + +func parseComparator(s string) comparator { + switch s { + case "==": + fallthrough + case "": + fallthrough + case "=": + return compEQ + case ">": + return compGT + case ">=": + return compGE + case "<": + return compLT + case "<=": + return compLE + case "!": + fallthrough + case "!=": + return compNE + } + + return nil +} + +// MustParseRange is like ParseRange but panics if the range cannot be parsed. +func MustParseRange(s string) Range { + r, err := ParseRange(s) + if err != nil { + panic(`semver: ParseRange(` + s + `): ` + err.Error()) + } + return r +} diff --git a/vendor/github.com/blang/semver/semver.go b/vendor/github.com/blang/semver/semver.go new file mode 100644 index 000000000..8ee0842e6 --- /dev/null +++ b/vendor/github.com/blang/semver/semver.go @@ -0,0 +1,418 @@ +package semver + +import ( + "errors" + "fmt" + "strconv" + "strings" +) + +const ( + numbers string = "0123456789" + alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + alphanum = alphas + numbers +) + +// SpecVersion is the latest fully supported spec version of semver +var SpecVersion = Version{ + Major: 2, + Minor: 0, + Patch: 0, +} + +// Version represents a semver compatible version +type Version struct { + Major uint64 + Minor uint64 + Patch uint64 + Pre []PRVersion + Build []string //No Precendence +} + +// Version to string +func (v Version) String() string { + b := make([]byte, 0, 5) + b = strconv.AppendUint(b, v.Major, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Minor, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Patch, 10) + + if len(v.Pre) > 0 { + b = append(b, '-') + b = append(b, v.Pre[0].String()...) + + for _, pre := range v.Pre[1:] { + b = append(b, '.') + b = append(b, pre.String()...) + } + } + + if len(v.Build) > 0 { + b = append(b, '+') + b = append(b, v.Build[0]...) + + for _, build := range v.Build[1:] { + b = append(b, '.') + b = append(b, build...) + } + } + + return string(b) +} + +// Equals checks if v is equal to o. +func (v Version) Equals(o Version) bool { + return (v.Compare(o) == 0) +} + +// EQ checks if v is equal to o. +func (v Version) EQ(o Version) bool { + return (v.Compare(o) == 0) +} + +// NE checks if v is not equal to o. +func (v Version) NE(o Version) bool { + return (v.Compare(o) != 0) +} + +// GT checks if v is greater than o. +func (v Version) GT(o Version) bool { + return (v.Compare(o) == 1) +} + +// GTE checks if v is greater than or equal to o. +func (v Version) GTE(o Version) bool { + return (v.Compare(o) >= 0) +} + +// GE checks if v is greater than or equal to o. +func (v Version) GE(o Version) bool { + return (v.Compare(o) >= 0) +} + +// LT checks if v is less than o. +func (v Version) LT(o Version) bool { + return (v.Compare(o) == -1) +} + +// LTE checks if v is less than or equal to o. +func (v Version) LTE(o Version) bool { + return (v.Compare(o) <= 0) +} + +// LE checks if v is less than or equal to o. +func (v Version) LE(o Version) bool { + return (v.Compare(o) <= 0) +} + +// Compare compares Versions v to o: +// -1 == v is less than o +// 0 == v is equal to o +// 1 == v is greater than o +func (v Version) Compare(o Version) int { + if v.Major != o.Major { + if v.Major > o.Major { + return 1 + } + return -1 + } + if v.Minor != o.Minor { + if v.Minor > o.Minor { + return 1 + } + return -1 + } + if v.Patch != o.Patch { + if v.Patch > o.Patch { + return 1 + } + return -1 + } + + // Quick comparison if a version has no prerelease versions + if len(v.Pre) == 0 && len(o.Pre) == 0 { + return 0 + } else if len(v.Pre) == 0 && len(o.Pre) > 0 { + return 1 + } else if len(v.Pre) > 0 && len(o.Pre) == 0 { + return -1 + } + + i := 0 + for ; i < len(v.Pre) && i < len(o.Pre); i++ { + if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 { + continue + } else if comp == 1 { + return 1 + } else { + return -1 + } + } + + // If all pr versions are the equal but one has further prversion, this one greater + if i == len(v.Pre) && i == len(o.Pre) { + return 0 + } else if i == len(v.Pre) && i < len(o.Pre) { + return -1 + } else { + return 1 + } + +} + +// Validate validates v and returns error in case +func (v Version) Validate() error { + // Major, Minor, Patch already validated using uint64 + + for _, pre := range v.Pre { + if !pre.IsNum { //Numeric prerelease versions already uint64 + if len(pre.VersionStr) == 0 { + return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr) + } + if !containsOnly(pre.VersionStr, alphanum) { + return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr) + } + } + } + + for _, build := range v.Build { + if len(build) == 0 { + return fmt.Errorf("Build meta data can not be empty %q", build) + } + if !containsOnly(build, alphanum) { + return fmt.Errorf("Invalid character(s) found in build meta data %q", build) + } + } + + return nil +} + +// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error +func New(s string) (vp *Version, err error) { + v, err := Parse(s) + vp = &v + return +} + +// Make is an alias for Parse, parses version string and returns a validated Version or error +func Make(s string) (Version, error) { + return Parse(s) +} + +// ParseTolerant allows for certain version specifications that do not strictly adhere to semver +// specs to be parsed by this library. It does so by normalizing versions before passing them to +// Parse(). It currently trims spaces, removes a "v" prefix, and adds a 0 patch number to versions +// with only major and minor components specified +func ParseTolerant(s string) (Version, error) { + s = strings.TrimSpace(s) + s = strings.TrimPrefix(s, "v") + + // Split into major.minor.(patch+pr+meta) + parts := strings.SplitN(s, ".", 3) + if len(parts) < 3 { + if strings.ContainsAny(parts[len(parts)-1], "+-") { + return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data") + } + for len(parts) < 3 { + parts = append(parts, "0") + } + s = strings.Join(parts, ".") + } + + return Parse(s) +} + +// Parse parses version string and returns a validated Version or error +func Parse(s string) (Version, error) { + if len(s) == 0 { + return Version{}, errors.New("Version string empty") + } + + // Split into major.minor.(patch+pr+meta) + parts := strings.SplitN(s, ".", 3) + if len(parts) != 3 { + return Version{}, errors.New("No Major.Minor.Patch elements found") + } + + // Major + if !containsOnly(parts[0], numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0]) + } + if hasLeadingZeroes(parts[0]) { + return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0]) + } + major, err := strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return Version{}, err + } + + // Minor + if !containsOnly(parts[1], numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1]) + } + if hasLeadingZeroes(parts[1]) { + return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1]) + } + minor, err := strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return Version{}, err + } + + v := Version{} + v.Major = major + v.Minor = minor + + var build, prerelease []string + patchStr := parts[2] + + if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 { + build = strings.Split(patchStr[buildIndex+1:], ".") + patchStr = patchStr[:buildIndex] + } + + if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 { + prerelease = strings.Split(patchStr[preIndex+1:], ".") + patchStr = patchStr[:preIndex] + } + + if !containsOnly(patchStr, numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr) + } + if hasLeadingZeroes(patchStr) { + return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr) + } + patch, err := strconv.ParseUint(patchStr, 10, 64) + if err != nil { + return Version{}, err + } + + v.Patch = patch + + // Prerelease + for _, prstr := range prerelease { + parsedPR, err := NewPRVersion(prstr) + if err != nil { + return Version{}, err + } + v.Pre = append(v.Pre, parsedPR) + } + + // Build meta data + for _, str := range build { + if len(str) == 0 { + return Version{}, errors.New("Build meta data is empty") + } + if !containsOnly(str, alphanum) { + return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str) + } + v.Build = append(v.Build, str) + } + + return v, nil +} + +// MustParse is like Parse but panics if the version cannot be parsed. +func MustParse(s string) Version { + v, err := Parse(s) + if err != nil { + panic(`semver: Parse(` + s + `): ` + err.Error()) + } + return v +} + +// PRVersion represents a PreRelease Version +type PRVersion struct { + VersionStr string + VersionNum uint64 + IsNum bool +} + +// NewPRVersion creates a new valid prerelease version +func NewPRVersion(s string) (PRVersion, error) { + if len(s) == 0 { + return PRVersion{}, errors.New("Prerelease is empty") + } + v := PRVersion{} + if containsOnly(s, numbers) { + if hasLeadingZeroes(s) { + return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s) + } + num, err := strconv.ParseUint(s, 10, 64) + + // Might never be hit, but just in case + if err != nil { + return PRVersion{}, err + } + v.VersionNum = num + v.IsNum = true + } else if containsOnly(s, alphanum) { + v.VersionStr = s + v.IsNum = false + } else { + return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s) + } + return v, nil +} + +// IsNumeric checks if prerelease-version is numeric +func (v PRVersion) IsNumeric() bool { + return v.IsNum +} + +// Compare compares two PreRelease Versions v and o: +// -1 == v is less than o +// 0 == v is equal to o +// 1 == v is greater than o +func (v PRVersion) Compare(o PRVersion) int { + if v.IsNum && !o.IsNum { + return -1 + } else if !v.IsNum && o.IsNum { + return 1 + } else if v.IsNum && o.IsNum { + if v.VersionNum == o.VersionNum { + return 0 + } else if v.VersionNum > o.VersionNum { + return 1 + } else { + return -1 + } + } else { // both are Alphas + if v.VersionStr == o.VersionStr { + return 0 + } else if v.VersionStr > o.VersionStr { + return 1 + } else { + return -1 + } + } +} + +// PreRelease version to string +func (v PRVersion) String() string { + if v.IsNum { + return strconv.FormatUint(v.VersionNum, 10) + } + return v.VersionStr +} + +func containsOnly(s string, set string) bool { + return strings.IndexFunc(s, func(r rune) bool { + return !strings.ContainsRune(set, r) + }) == -1 +} + +func hasLeadingZeroes(s string) bool { + return len(s) > 1 && s[0] == '0' +} + +// NewBuildVersion creates a new valid build version +func NewBuildVersion(s string) (string, error) { + if len(s) == 0 { + return "", errors.New("Buildversion is empty") + } + if !containsOnly(s, alphanum) { + return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s) + } + return s, nil +} diff --git a/vendor/github.com/blang/semver/sort.go b/vendor/github.com/blang/semver/sort.go new file mode 100644 index 000000000..e18f88082 --- /dev/null +++ b/vendor/github.com/blang/semver/sort.go @@ -0,0 +1,28 @@ +package semver + +import ( + "sort" +) + +// Versions represents multiple versions. +type Versions []Version + +// Len returns length of version collection +func (s Versions) Len() int { + return len(s) +} + +// Swap swaps two versions inside the collection by its indices +func (s Versions) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Less checks if version at index i is less than version at index j +func (s Versions) Less(i, j int) bool { + return s[i].LT(s[j]) +} + +// Sort sorts a slice of versions +func Sort(versions []Version) { + sort.Sort(Versions(versions)) +} diff --git a/vendor/github.com/blang/semver/sql.go b/vendor/github.com/blang/semver/sql.go new file mode 100644 index 000000000..eb4d80266 --- /dev/null +++ b/vendor/github.com/blang/semver/sql.go @@ -0,0 +1,30 @@ +package semver + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements the database/sql.Scanner interface. +func (v *Version) Scan(src interface{}) (err error) { + var str string + switch src := src.(type) { + case string: + str = src + case []byte: + str = string(src) + default: + return fmt.Errorf("Version.Scan: cannot convert %T to string.", src) + } + + if t, err := Parse(str); err == nil { + *v = t + } + + return +} + +// Value implements the database/sql/driver.Valuer interface. +func (v Version) Value() (driver.Value, error) { + return v.String(), nil +} diff --git a/vendor/github.com/cenkalti/backoff/v4/.gitignore b/vendor/github.com/cenkalti/backoff/v4/.gitignore new file mode 100644 index 000000000..50d95c548 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +# IDEs +.idea/ diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE b/vendor/github.com/cenkalti/backoff/v4/LICENSE similarity index 95% rename from vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE rename to vendor/github.com/cenkalti/backoff/v4/LICENSE index de9c88cb6..89b817996 100644 --- a/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE +++ b/vendor/github.com/cenkalti/backoff/v4/LICENSE @@ -1,4 +1,6 @@ -Copyright (c) 2013 Joshua Tacoma +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/vendor/github.com/cenkalti/backoff/v4/README.md b/vendor/github.com/cenkalti/backoff/v4/README.md new file mode 100644 index 000000000..9433004a2 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/README.md @@ -0,0 +1,30 @@ +# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Coverage Status][coveralls image]][coveralls] + +This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. + +[Exponential backoff][exponential backoff wiki] +is an algorithm that uses feedback to multiplicatively decrease the rate of some process, +in order to gradually find an acceptable rate. +The retries exponentially increase and stop increasing when a certain threshold is met. + +## Usage + +Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end. + +Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation. + +## Contributing + +* I would like to keep this library as small as possible. +* Please don't send a PR without opening an issue and discussing it first. +* If proposed change is not a common use case, I will probably not accept it. + +[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4 +[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png +[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master +[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master + +[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java +[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff + +[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples diff --git a/vendor/github.com/cenkalti/backoff/v4/backoff.go b/vendor/github.com/cenkalti/backoff/v4/backoff.go new file mode 100644 index 000000000..3676ee405 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/backoff.go @@ -0,0 +1,66 @@ +// Package backoff implements backoff algorithms for retrying operations. +// +// Use Retry function for retrying operations that may fail. +// If Retry does not meet your needs, +// copy/paste the function into your project and modify as you wish. +// +// There is also Ticker type similar to time.Ticker. +// You can use it if you need to work with channels. +// +// See Examples section below for usage examples. +package backoff + +import "time" + +// BackOff is a backoff policy for retrying an operation. +type BackOff interface { + // NextBackOff returns the duration to wait before retrying the operation, + // or backoff. Stop to indicate that no more retries should be made. + // + // Example usage: + // + // duration := backoff.NextBackOff(); + // if (duration == backoff.Stop) { + // // Do not retry operation. + // } else { + // // Sleep for duration and retry operation. + // } + // + NextBackOff() time.Duration + + // Reset to initial state. + Reset() +} + +// Stop indicates that no more retries should be made for use in NextBackOff(). +const Stop time.Duration = -1 + +// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, +// meaning that the operation is retried immediately without waiting, indefinitely. +type ZeroBackOff struct{} + +func (b *ZeroBackOff) Reset() {} + +func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } + +// StopBackOff is a fixed backoff policy that always returns backoff.Stop for +// NextBackOff(), meaning that the operation should never be retried. +type StopBackOff struct{} + +func (b *StopBackOff) Reset() {} + +func (b *StopBackOff) NextBackOff() time.Duration { return Stop } + +// ConstantBackOff is a backoff policy that always returns the same backoff delay. +// This is in contrast to an exponential backoff policy, +// which returns a delay that grows longer as you call NextBackOff() over and over again. +type ConstantBackOff struct { + Interval time.Duration +} + +func (b *ConstantBackOff) Reset() {} +func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } + +func NewConstantBackOff(d time.Duration) *ConstantBackOff { + return &ConstantBackOff{Interval: d} +} diff --git a/vendor/github.com/cenkalti/backoff/v4/context.go b/vendor/github.com/cenkalti/backoff/v4/context.go new file mode 100644 index 000000000..48482330e --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/context.go @@ -0,0 +1,62 @@ +package backoff + +import ( + "context" + "time" +) + +// BackOffContext is a backoff policy that stops retrying after the context +// is canceled. +type BackOffContext interface { // nolint: golint + BackOff + Context() context.Context +} + +type backOffContext struct { + BackOff + ctx context.Context +} + +// WithContext returns a BackOffContext with context ctx +// +// ctx must not be nil +func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint + if ctx == nil { + panic("nil context") + } + + if b, ok := b.(*backOffContext); ok { + return &backOffContext{ + BackOff: b.BackOff, + ctx: ctx, + } + } + + return &backOffContext{ + BackOff: b, + ctx: ctx, + } +} + +func getContext(b BackOff) context.Context { + if cb, ok := b.(BackOffContext); ok { + return cb.Context() + } + if tb, ok := b.(*backOffTries); ok { + return getContext(tb.delegate) + } + return context.Background() +} + +func (b *backOffContext) Context() context.Context { + return b.ctx +} + +func (b *backOffContext) NextBackOff() time.Duration { + select { + case <-b.ctx.Done(): + return Stop + default: + return b.BackOff.NextBackOff() + } +} diff --git a/vendor/github.com/cenkalti/backoff/v4/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go new file mode 100644 index 000000000..aac99f196 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/exponential.go @@ -0,0 +1,216 @@ +package backoff + +import ( + "math/rand" + "time" +) + +/* +ExponentialBackOff is a backoff implementation that increases the backoff +period for each retry attempt using a randomization function that grows exponentially. + +NextBackOff() is calculated using the following formula: + + randomized interval = + RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) + +In other words NextBackOff() will range between the randomization factor +percentage below and above the retry interval. + +For example, given the following parameters: + + RetryInterval = 2 + RandomizationFactor = 0.5 + Multiplier = 2 + +the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, +multiplied by the exponential, that is, between 2 and 6 seconds. + +Note: MaxInterval caps the RetryInterval and not the randomized interval. + +If the time elapsed since an ExponentialBackOff instance is created goes past the +MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. + +The elapsed time can be reset by calling Reset(). + +Example: Given the following default arguments, for 10 tries the sequence will be, +and assuming we go over the MaxElapsedTime on the 10th try: + + Request # RetryInterval (seconds) Randomized Interval (seconds) + + 1 0.5 [0.25, 0.75] + 2 0.75 [0.375, 1.125] + 3 1.125 [0.562, 1.687] + 4 1.687 [0.8435, 2.53] + 5 2.53 [1.265, 3.795] + 6 3.795 [1.897, 5.692] + 7 5.692 [2.846, 8.538] + 8 8.538 [4.269, 12.807] + 9 12.807 [6.403, 19.210] + 10 19.210 backoff.Stop + +Note: Implementation is not thread-safe. +*/ +type ExponentialBackOff struct { + InitialInterval time.Duration + RandomizationFactor float64 + Multiplier float64 + MaxInterval time.Duration + // After MaxElapsedTime the ExponentialBackOff returns Stop. + // It never stops if MaxElapsedTime == 0. + MaxElapsedTime time.Duration + Stop time.Duration + Clock Clock + + currentInterval time.Duration + startTime time.Time +} + +// Clock is an interface that returns current time for BackOff. +type Clock interface { + Now() time.Time +} + +// ExponentialBackOffOpts is a function type used to configure ExponentialBackOff options. +type ExponentialBackOffOpts func(*ExponentialBackOff) + +// Default values for ExponentialBackOff. +const ( + DefaultInitialInterval = 500 * time.Millisecond + DefaultRandomizationFactor = 0.5 + DefaultMultiplier = 1.5 + DefaultMaxInterval = 60 * time.Second + DefaultMaxElapsedTime = 15 * time.Minute +) + +// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. +func NewExponentialBackOff(opts ...ExponentialBackOffOpts) *ExponentialBackOff { + b := &ExponentialBackOff{ + InitialInterval: DefaultInitialInterval, + RandomizationFactor: DefaultRandomizationFactor, + Multiplier: DefaultMultiplier, + MaxInterval: DefaultMaxInterval, + MaxElapsedTime: DefaultMaxElapsedTime, + Stop: Stop, + Clock: SystemClock, + } + for _, fn := range opts { + fn(b) + } + b.Reset() + return b +} + +// WithInitialInterval sets the initial interval between retries. +func WithInitialInterval(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.InitialInterval = duration + } +} + +// WithRandomizationFactor sets the randomization factor to add jitter to intervals. +func WithRandomizationFactor(randomizationFactor float64) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.RandomizationFactor = randomizationFactor + } +} + +// WithMultiplier sets the multiplier for increasing the interval after each retry. +func WithMultiplier(multiplier float64) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.Multiplier = multiplier + } +} + +// WithMaxInterval sets the maximum interval between retries. +func WithMaxInterval(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.MaxInterval = duration + } +} + +// WithMaxElapsedTime sets the maximum total time for retries. +func WithMaxElapsedTime(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.MaxElapsedTime = duration + } +} + +// WithRetryStopDuration sets the duration after which retries should stop. +func WithRetryStopDuration(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.Stop = duration + } +} + +// WithClockProvider sets the clock used to measure time. +func WithClockProvider(clock Clock) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.Clock = clock + } +} + +type systemClock struct{} + +func (t systemClock) Now() time.Time { + return time.Now() +} + +// SystemClock implements Clock interface that uses time.Now(). +var SystemClock = systemClock{} + +// Reset the interval back to the initial retry interval and restarts the timer. +// Reset must be called before using b. +func (b *ExponentialBackOff) Reset() { + b.currentInterval = b.InitialInterval + b.startTime = b.Clock.Now() +} + +// NextBackOff calculates the next backoff interval using the formula: +// Randomized interval = RetryInterval * (1 ± RandomizationFactor) +func (b *ExponentialBackOff) NextBackOff() time.Duration { + // Make sure we have not gone over the maximum elapsed time. + elapsed := b.GetElapsedTime() + next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) + b.incrementCurrentInterval() + if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime { + return b.Stop + } + return next +} + +// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance +// is created and is reset when Reset() is called. +// +// The elapsed time is computed using time.Now().UnixNano(). It is +// safe to call even while the backoff policy is used by a running +// ticker. +func (b *ExponentialBackOff) GetElapsedTime() time.Duration { + return b.Clock.Now().Sub(b.startTime) +} + +// Increments the current interval by multiplying it with the multiplier. +func (b *ExponentialBackOff) incrementCurrentInterval() { + // Check for overflow, if overflow is detected set the current interval to the max interval. + if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { + b.currentInterval = b.MaxInterval + } else { + b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) + } +} + +// Returns a random value from the following interval: +// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. +func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + if randomizationFactor == 0 { + return currentInterval // make sure no randomness is used when randomizationFactor is 0. + } + var delta = randomizationFactor * float64(currentInterval) + var minInterval = float64(currentInterval) - delta + var maxInterval = float64(currentInterval) + delta + + // Get a random value from the range [minInterval, maxInterval]. + // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then + // we want a 33% chance for selecting either 1, 2 or 3. + return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) +} diff --git a/vendor/github.com/cenkalti/backoff/v4/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go new file mode 100644 index 000000000..b9c0c51cd --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/retry.go @@ -0,0 +1,146 @@ +package backoff + +import ( + "errors" + "time" +) + +// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData(). +// The operation will be retried using a backoff policy if it returns an error. +type OperationWithData[T any] func() (T, error) + +// An Operation is executing by Retry() or RetryNotify(). +// The operation will be retried using a backoff policy if it returns an error. +type Operation func() error + +func (o Operation) withEmptyData() OperationWithData[struct{}] { + return func() (struct{}, error) { + return struct{}{}, o() + } +} + +// Notify is a notify-on-error function. It receives an operation error and +// backoff delay if the operation failed (with an error). +// +// NOTE that if the backoff policy stated to stop retrying, +// the notify function isn't called. +type Notify func(error, time.Duration) + +// Retry the operation o until it does not return error or BackOff stops. +// o is guaranteed to be run at least once. +// +// If o returns a *PermanentError, the operation is not retried, and the +// wrapped error is returned. +// +// Retry sleeps the goroutine for the duration returned by BackOff after a +// failed operation returns. +func Retry(o Operation, b BackOff) error { + return RetryNotify(o, b, nil) +} + +// RetryWithData is like Retry but returns data in the response too. +func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) { + return RetryNotifyWithData(o, b, nil) +} + +// RetryNotify calls notify function with the error and wait duration +// for each failed attempt before sleep. +func RetryNotify(operation Operation, b BackOff, notify Notify) error { + return RetryNotifyWithTimer(operation, b, notify, nil) +} + +// RetryNotifyWithData is like RetryNotify but returns data in the response too. +func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) { + return doRetryNotify(operation, b, notify, nil) +} + +// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer +// for each failed attempt before sleep. +// A default timer that uses system timer is used when nil is passed. +func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error { + _, err := doRetryNotify(operation.withEmptyData(), b, notify, t) + return err +} + +// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too. +func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { + return doRetryNotify(operation, b, notify, t) +} + +func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { + var ( + err error + next time.Duration + res T + ) + if t == nil { + t = &defaultTimer{} + } + + defer func() { + t.Stop() + }() + + ctx := getContext(b) + + b.Reset() + for { + res, err = operation() + if err == nil { + return res, nil + } + + var permanent *PermanentError + if errors.As(err, &permanent) { + return res, permanent.Err + } + + if next = b.NextBackOff(); next == Stop { + if cerr := ctx.Err(); cerr != nil { + return res, cerr + } + + return res, err + } + + if notify != nil { + notify(err, next) + } + + t.Start(next) + + select { + case <-ctx.Done(): + return res, ctx.Err() + case <-t.C(): + } + } +} + +// PermanentError signals that the operation should not be retried. +type PermanentError struct { + Err error +} + +func (e *PermanentError) Error() string { + return e.Err.Error() +} + +func (e *PermanentError) Unwrap() error { + return e.Err +} + +func (e *PermanentError) Is(target error) bool { + _, ok := target.(*PermanentError) + return ok +} + +// Permanent wraps the given err in a *PermanentError. +func Permanent(err error) error { + if err == nil { + return nil + } + return &PermanentError{ + Err: err, + } +} diff --git a/vendor/github.com/cenkalti/backoff/v4/ticker.go b/vendor/github.com/cenkalti/backoff/v4/ticker.go new file mode 100644 index 000000000..df9d68bce --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/ticker.go @@ -0,0 +1,97 @@ +package backoff + +import ( + "context" + "sync" + "time" +) + +// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. +// +// Ticks will continue to arrive when the previous operation is still running, +// so operations that take a while to fail could run in quick succession. +type Ticker struct { + C <-chan time.Time + c chan time.Time + b BackOff + ctx context.Context + timer Timer + stop chan struct{} + stopOnce sync.Once +} + +// NewTicker returns a new Ticker containing a channel that will send +// the time at times specified by the BackOff argument. Ticker is +// guaranteed to tick at least once. The channel is closed when Stop +// method is called or BackOff stops. It is not safe to manipulate the +// provided backoff policy (notably calling NextBackOff or Reset) +// while the ticker is running. +func NewTicker(b BackOff) *Ticker { + return NewTickerWithTimer(b, &defaultTimer{}) +} + +// NewTickerWithTimer returns a new Ticker with a custom timer. +// A default timer that uses system timer is used when nil is passed. +func NewTickerWithTimer(b BackOff, timer Timer) *Ticker { + if timer == nil { + timer = &defaultTimer{} + } + c := make(chan time.Time) + t := &Ticker{ + C: c, + c: c, + b: b, + ctx: getContext(b), + timer: timer, + stop: make(chan struct{}), + } + t.b.Reset() + go t.run() + return t +} + +// Stop turns off a ticker. After Stop, no more ticks will be sent. +func (t *Ticker) Stop() { + t.stopOnce.Do(func() { close(t.stop) }) +} + +func (t *Ticker) run() { + c := t.c + defer close(c) + + // Ticker is guaranteed to tick at least once. + afterC := t.send(time.Now()) + + for { + if afterC == nil { + return + } + + select { + case tick := <-afterC: + afterC = t.send(tick) + case <-t.stop: + t.c = nil // Prevent future ticks from being sent to the channel. + return + case <-t.ctx.Done(): + return + } + } +} + +func (t *Ticker) send(tick time.Time) <-chan time.Time { + select { + case t.c <- tick: + case <-t.stop: + return nil + } + + next := t.b.NextBackOff() + if next == Stop { + t.Stop() + return nil + } + + t.timer.Start(next) + return t.timer.C() +} diff --git a/vendor/github.com/cenkalti/backoff/v4/timer.go b/vendor/github.com/cenkalti/backoff/v4/timer.go new file mode 100644 index 000000000..8120d0213 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/timer.go @@ -0,0 +1,35 @@ +package backoff + +import "time" + +type Timer interface { + Start(duration time.Duration) + Stop() + C() <-chan time.Time +} + +// defaultTimer implements Timer interface using time.Timer +type defaultTimer struct { + timer *time.Timer +} + +// C returns the timers channel which receives the current time when the timer fires. +func (t *defaultTimer) C() <-chan time.Time { + return t.timer.C +} + +// Start starts the timer to fire after the given duration +func (t *defaultTimer) Start(duration time.Duration) { + if t.timer == nil { + t.timer = time.NewTimer(duration) + } else { + t.timer.Reset(duration) + } +} + +// Stop is called when the timer is not used anymore and resources may be freed. +func (t *defaultTimer) Stop() { + if t.timer != nil { + t.timer.Stop() + } +} diff --git a/vendor/github.com/cenkalti/backoff/v4/tries.go b/vendor/github.com/cenkalti/backoff/v4/tries.go new file mode 100644 index 000000000..28d58ca37 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/tries.go @@ -0,0 +1,38 @@ +package backoff + +import "time" + +/* +WithMaxRetries creates a wrapper around another BackOff, which will +return Stop if NextBackOff() has been called too many times since +the last time Reset() was called + +Note: Implementation is not thread-safe. +*/ +func WithMaxRetries(b BackOff, max uint64) BackOff { + return &backOffTries{delegate: b, maxTries: max} +} + +type backOffTries struct { + delegate BackOff + maxTries uint64 + numTries uint64 +} + +func (b *backOffTries) NextBackOff() time.Duration { + if b.maxTries == 0 { + return Stop + } + if b.maxTries > 0 { + if b.maxTries <= b.numTries { + return Stop + } + b.numTries++ + } + return b.delegate.NextBackOff() +} + +func (b *backOffTries) Reset() { + b.numTries = 0 + b.delegate.Reset() +} diff --git a/vendor/github.com/cloudflare/circl/LICENSE b/vendor/github.com/cloudflare/circl/LICENSE new file mode 100644 index 000000000..67edaa90a --- /dev/null +++ b/vendor/github.com/cloudflare/circl/LICENSE @@ -0,0 +1,57 @@ +Copyright (c) 2019 Cloudflare. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Cloudflare nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +======================================================================== + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve.go b/vendor/github.com/cloudflare/circl/dh/x25519/curve.go new file mode 100644 index 000000000..f9057c2b8 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/curve.go @@ -0,0 +1,96 @@ +package x25519 + +import ( + fp "github.com/cloudflare/circl/math/fp25519" +) + +// ladderJoye calculates a fixed-point multiplication with the generator point. +// The algorithm is the right-to-left Joye's ladder as described +// in "How to precompute a ladder" in SAC'2017. +func ladderJoye(k *Key) { + w := [5]fp.Elt{} // [mu,x1,z1,x2,z2] order must be preserved. + fp.SetOne(&w[1]) // x1 = 1 + fp.SetOne(&w[2]) // z1 = 1 + w[3] = fp.Elt{ // x2 = G-S + 0xbd, 0xaa, 0x2f, 0xc8, 0xfe, 0xe1, 0x94, 0x7e, + 0xf8, 0xed, 0xb2, 0x14, 0xae, 0x95, 0xf0, 0xbb, + 0xe2, 0x48, 0x5d, 0x23, 0xb9, 0xa0, 0xc7, 0xad, + 0x34, 0xab, 0x7c, 0xe2, 0xee, 0xcd, 0xae, 0x1e, + } + fp.SetOne(&w[4]) // z2 = 1 + + const n = 255 + const h = 3 + swap := uint(1) + for s := 0; s < n-h; s++ { + i := (s + h) / 8 + j := (s + h) % 8 + bit := uint((k[i] >> uint(j)) & 1) + copy(w[0][:], tableGenerator[s*Size:(s+1)*Size]) + diffAdd(&w, swap^bit) + swap = bit + } + for s := 0; s < h; s++ { + double(&w[1], &w[2]) + } + toAffine((*[fp.Size]byte)(k), &w[1], &w[2]) +} + +// ladderMontgomery calculates a generic scalar point multiplication +// The algorithm implemented is the left-to-right Montgomery's ladder. +func ladderMontgomery(k, xP *Key) { + w := [5]fp.Elt{} // [x1, x2, z2, x3, z3] order must be preserved. + w[0] = *(*fp.Elt)(xP) // x1 = xP + fp.SetOne(&w[1]) // x2 = 1 + w[3] = *(*fp.Elt)(xP) // x3 = xP + fp.SetOne(&w[4]) // z3 = 1 + + move := uint(0) + for s := 255 - 1; s >= 0; s-- { + i := s / 8 + j := s % 8 + bit := uint((k[i] >> uint(j)) & 1) + ladderStep(&w, move^bit) + move = bit + } + toAffine((*[fp.Size]byte)(k), &w[1], &w[2]) +} + +func toAffine(k *[fp.Size]byte, x, z *fp.Elt) { + fp.Inv(z, z) + fp.Mul(x, x, z) + _ = fp.ToBytes(k[:], x) +} + +var lowOrderPoints = [5]fp.Elt{ + { /* (0,_,1) point of order 2 on Curve25519 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + { /* (1,_,1) point of order 4 on Curve25519 */ + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + { /* (x,_,1) first point of order 8 on Curve25519 */ + 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae, + 0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a, + 0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd, + 0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8, 0x00, + }, + { /* (x,_,1) second point of order 8 on Curve25519 */ + 0x5f, 0x9c, 0x95, 0xbc, 0xa3, 0x50, 0x8c, 0x24, + 0xb1, 0xd0, 0xb1, 0x55, 0x9c, 0x83, 0xef, 0x5b, + 0x04, 0x44, 0x5c, 0xc4, 0x58, 0x1c, 0x8e, 0x86, + 0xd8, 0x22, 0x4e, 0xdd, 0xd0, 0x9f, 0x11, 0x57, + }, + { /* (-1,_,1) a point of order 4 on the twist of Curve25519 */ + 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, + }, +} diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.go b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.go new file mode 100644 index 000000000..8a3d54c57 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.go @@ -0,0 +1,30 @@ +//go:build amd64 && !purego +// +build amd64,!purego + +package x25519 + +import ( + fp "github.com/cloudflare/circl/math/fp25519" + "golang.org/x/sys/cpu" +) + +var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX + +var _ = hasBmi2Adx + +func double(x, z *fp.Elt) { doubleAmd64(x, z) } +func diffAdd(w *[5]fp.Elt, b uint) { diffAddAmd64(w, b) } +func ladderStep(w *[5]fp.Elt, b uint) { ladderStepAmd64(w, b) } +func mulA24(z, x *fp.Elt) { mulA24Amd64(z, x) } + +//go:noescape +func ladderStepAmd64(w *[5]fp.Elt, b uint) + +//go:noescape +func diffAddAmd64(w *[5]fp.Elt, b uint) + +//go:noescape +func doubleAmd64(x, z *fp.Elt) + +//go:noescape +func mulA24Amd64(z, x *fp.Elt) diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.h b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.h new file mode 100644 index 000000000..8c1ae4d0f --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.h @@ -0,0 +1,111 @@ +#define ladderStepLeg \ + addSub(x2,z2) \ + addSub(x3,z3) \ + integerMulLeg(b0,x2,z3) \ + integerMulLeg(b1,x3,z2) \ + reduceFromDoubleLeg(t0,b0) \ + reduceFromDoubleLeg(t1,b1) \ + addSub(t0,t1) \ + cselect(x2,x3,regMove) \ + cselect(z2,z3,regMove) \ + integerSqrLeg(b0,t0) \ + integerSqrLeg(b1,t1) \ + reduceFromDoubleLeg(x3,b0) \ + reduceFromDoubleLeg(z3,b1) \ + integerMulLeg(b0,x1,z3) \ + reduceFromDoubleLeg(z3,b0) \ + integerSqrLeg(b0,x2) \ + integerSqrLeg(b1,z2) \ + reduceFromDoubleLeg(x2,b0) \ + reduceFromDoubleLeg(z2,b1) \ + subtraction(t0,x2,z2) \ + multiplyA24Leg(t1,t0) \ + additionLeg(t1,t1,z2) \ + integerMulLeg(b0,x2,z2) \ + integerMulLeg(b1,t0,t1) \ + reduceFromDoubleLeg(x2,b0) \ + reduceFromDoubleLeg(z2,b1) + +#define ladderStepBmi2Adx \ + addSub(x2,z2) \ + addSub(x3,z3) \ + integerMulAdx(b0,x2,z3) \ + integerMulAdx(b1,x3,z2) \ + reduceFromDoubleAdx(t0,b0) \ + reduceFromDoubleAdx(t1,b1) \ + addSub(t0,t1) \ + cselect(x2,x3,regMove) \ + cselect(z2,z3,regMove) \ + integerSqrAdx(b0,t0) \ + integerSqrAdx(b1,t1) \ + reduceFromDoubleAdx(x3,b0) \ + reduceFromDoubleAdx(z3,b1) \ + integerMulAdx(b0,x1,z3) \ + reduceFromDoubleAdx(z3,b0) \ + integerSqrAdx(b0,x2) \ + integerSqrAdx(b1,z2) \ + reduceFromDoubleAdx(x2,b0) \ + reduceFromDoubleAdx(z2,b1) \ + subtraction(t0,x2,z2) \ + multiplyA24Adx(t1,t0) \ + additionAdx(t1,t1,z2) \ + integerMulAdx(b0,x2,z2) \ + integerMulAdx(b1,t0,t1) \ + reduceFromDoubleAdx(x2,b0) \ + reduceFromDoubleAdx(z2,b1) + +#define difAddLeg \ + addSub(x1,z1) \ + integerMulLeg(b0,z1,ui) \ + reduceFromDoubleLeg(z1,b0) \ + addSub(x1,z1) \ + integerSqrLeg(b0,x1) \ + integerSqrLeg(b1,z1) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) \ + integerMulLeg(b0,x1,z2) \ + integerMulLeg(b1,z1,x2) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) + +#define difAddBmi2Adx \ + addSub(x1,z1) \ + integerMulAdx(b0,z1,ui) \ + reduceFromDoubleAdx(z1,b0) \ + addSub(x1,z1) \ + integerSqrAdx(b0,x1) \ + integerSqrAdx(b1,z1) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) \ + integerMulAdx(b0,x1,z2) \ + integerMulAdx(b1,z1,x2) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) + +#define doubleLeg \ + addSub(x1,z1) \ + integerSqrLeg(b0,x1) \ + integerSqrLeg(b1,z1) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) \ + subtraction(t0,x1,z1) \ + multiplyA24Leg(t1,t0) \ + additionLeg(t1,t1,z1) \ + integerMulLeg(b0,x1,z1) \ + integerMulLeg(b1,t0,t1) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) + +#define doubleBmi2Adx \ + addSub(x1,z1) \ + integerSqrAdx(b0,x1) \ + integerSqrAdx(b1,z1) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) \ + subtraction(t0,x1,z1) \ + multiplyA24Adx(t1,t0) \ + additionAdx(t1,t1,z1) \ + integerMulAdx(b0,x1,z1) \ + integerMulAdx(b1,t0,t1) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s new file mode 100644 index 000000000..ce9f06289 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s @@ -0,0 +1,157 @@ +//go:build amd64 && !purego +// +build amd64,!purego + +#include "textflag.h" + +// Depends on circl/math/fp25519 package +#include "../../math/fp25519/fp_amd64.h" +#include "curve_amd64.h" + +// CTE_A24 is (A+2)/4 from Curve25519 +#define CTE_A24 121666 + +#define Size 32 + +// multiplyA24Leg multiplies x times CTE_A24 and stores in z +// Uses: AX, DX, R8-R13, FLAGS +// Instr: x86_64, cmov +#define multiplyA24Leg(z,x) \ + MOVL $CTE_A24, AX; MULQ 0+x; MOVQ AX, R8; MOVQ DX, R9; \ + MOVL $CTE_A24, AX; MULQ 8+x; MOVQ AX, R12; MOVQ DX, R10; \ + MOVL $CTE_A24, AX; MULQ 16+x; MOVQ AX, R13; MOVQ DX, R11; \ + MOVL $CTE_A24, AX; MULQ 24+x; \ + ADDQ R12, R9; \ + ADCQ R13, R10; \ + ADCQ AX, R11; \ + ADCQ $0, DX; \ + MOVL $38, AX; /* 2*C = 38 = 2^256 MOD 2^255-19*/ \ + IMULQ AX, DX; \ + ADDQ DX, R8; \ + ADCQ $0, R9; MOVQ R9, 8+z; \ + ADCQ $0, R10; MOVQ R10, 16+z; \ + ADCQ $0, R11; MOVQ R11, 24+z; \ + MOVQ $0, DX; \ + CMOVQCS AX, DX; \ + ADDQ DX, R8; MOVQ R8, 0+z; + +// multiplyA24Adx multiplies x times CTE_A24 and stores in z +// Uses: AX, DX, R8-R12, FLAGS +// Instr: x86_64, cmov, bmi2 +#define multiplyA24Adx(z,x) \ + MOVQ $CTE_A24, DX; \ + MULXQ 0+x, R8, R10; \ + MULXQ 8+x, R9, R11; ADDQ R10, R9; \ + MULXQ 16+x, R10, AX; ADCQ R11, R10; \ + MULXQ 24+x, R11, R12; ADCQ AX, R11; \ + ;;;;;;;;;;;;;;;;;;;;; ADCQ $0, R12; \ + MOVL $38, DX; /* 2*C = 38 = 2^256 MOD 2^255-19*/ \ + IMULQ DX, R12; \ + ADDQ R12, R8; \ + ADCQ $0, R9; MOVQ R9, 8+z; \ + ADCQ $0, R10; MOVQ R10, 16+z; \ + ADCQ $0, R11; MOVQ R11, 24+z; \ + MOVQ $0, R12; \ + CMOVQCS DX, R12; \ + ADDQ R12, R8; MOVQ R8, 0+z; + +#define mulA24Legacy \ + multiplyA24Leg(0(DI),0(SI)) +#define mulA24Bmi2Adx \ + multiplyA24Adx(0(DI),0(SI)) + +// func mulA24Amd64(z, x *fp255.Elt) +TEXT ·mulA24Amd64(SB),NOSPLIT,$0-16 + MOVQ z+0(FP), DI + MOVQ x+8(FP), SI + CHECK_BMI2ADX(LMA24, mulA24Legacy, mulA24Bmi2Adx) + + +// func ladderStepAmd64(w *[5]fp255.Elt, b uint) +// ladderStepAmd64 calculates a point addition and doubling as follows: +// (x2,z2) = 2*(x2,z2) and (x3,z3) = (x2,z2)+(x3,z3) using as a difference (x1,-). +// work = (x1,x2,z2,x3,z3) are five fp255.Elt of 32 bytes. +// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and +// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes. +TEXT ·ladderStepAmd64(SB),NOSPLIT,$192-16 + // Parameters + #define regWork DI + #define regMove SI + #define x1 0*Size(regWork) + #define x2 1*Size(regWork) + #define z2 2*Size(regWork) + #define x3 3*Size(regWork) + #define z3 4*Size(regWork) + // Local variables + #define t0 0*Size(SP) + #define t1 1*Size(SP) + #define b0 2*Size(SP) + #define b1 4*Size(SP) + MOVQ w+0(FP), regWork + MOVQ b+8(FP), regMove + CHECK_BMI2ADX(LLADSTEP, ladderStepLeg, ladderStepBmi2Adx) + #undef regWork + #undef regMove + #undef x1 + #undef x2 + #undef z2 + #undef x3 + #undef z3 + #undef t0 + #undef t1 + #undef b0 + #undef b1 + +// func diffAddAmd64(w *[5]fp255.Elt, b uint) +// diffAddAmd64 calculates a differential point addition using a precomputed point. +// (x1,z1) = (x1,z1)+(mu) using a difference point (x2,z2) +// w = (mu,x1,z1,x2,z2) are five fp.Elt, and +// stack = (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes. +TEXT ·diffAddAmd64(SB),NOSPLIT,$128-16 + // Parameters + #define regWork DI + #define regSwap SI + #define ui 0*Size(regWork) + #define x1 1*Size(regWork) + #define z1 2*Size(regWork) + #define x2 3*Size(regWork) + #define z2 4*Size(regWork) + // Local variables + #define b0 0*Size(SP) + #define b1 2*Size(SP) + MOVQ w+0(FP), regWork + MOVQ b+8(FP), regSwap + cswap(x1,x2,regSwap) + cswap(z1,z2,regSwap) + CHECK_BMI2ADX(LDIFADD, difAddLeg, difAddBmi2Adx) + #undef regWork + #undef regSwap + #undef ui + #undef x1 + #undef z1 + #undef x2 + #undef z2 + #undef b0 + #undef b1 + +// func doubleAmd64(x, z *fp255.Elt) +// doubleAmd64 calculates a point doubling (x1,z1) = 2*(x1,z1). +// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and +// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes. +TEXT ·doubleAmd64(SB),NOSPLIT,$192-16 + // Parameters + #define x1 0(DI) + #define z1 0(SI) + // Local variables + #define t0 0*Size(SP) + #define t1 1*Size(SP) + #define b0 2*Size(SP) + #define b1 4*Size(SP) + MOVQ x+0(FP), DI + MOVQ z+8(FP), SI + CHECK_BMI2ADX(LDOUB,doubleLeg,doubleBmi2Adx) + #undef x1 + #undef z1 + #undef t0 + #undef t1 + #undef b0 + #undef b1 diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_generic.go b/vendor/github.com/cloudflare/circl/dh/x25519/curve_generic.go new file mode 100644 index 000000000..dae67ea37 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/curve_generic.go @@ -0,0 +1,85 @@ +package x25519 + +import ( + "encoding/binary" + "math/bits" + + fp "github.com/cloudflare/circl/math/fp25519" +) + +func doubleGeneric(x, z *fp.Elt) { + t0, t1 := &fp.Elt{}, &fp.Elt{} + fp.AddSub(x, z) + fp.Sqr(x, x) + fp.Sqr(z, z) + fp.Sub(t0, x, z) + mulA24Generic(t1, t0) + fp.Add(t1, t1, z) + fp.Mul(x, x, z) + fp.Mul(z, t0, t1) +} + +func diffAddGeneric(w *[5]fp.Elt, b uint) { + mu, x1, z1, x2, z2 := &w[0], &w[1], &w[2], &w[3], &w[4] + fp.Cswap(x1, x2, b) + fp.Cswap(z1, z2, b) + fp.AddSub(x1, z1) + fp.Mul(z1, z1, mu) + fp.AddSub(x1, z1) + fp.Sqr(x1, x1) + fp.Sqr(z1, z1) + fp.Mul(x1, x1, z2) + fp.Mul(z1, z1, x2) +} + +func ladderStepGeneric(w *[5]fp.Elt, b uint) { + x1, x2, z2, x3, z3 := &w[0], &w[1], &w[2], &w[3], &w[4] + t0 := &fp.Elt{} + t1 := &fp.Elt{} + fp.AddSub(x2, z2) + fp.AddSub(x3, z3) + fp.Mul(t0, x2, z3) + fp.Mul(t1, x3, z2) + fp.AddSub(t0, t1) + fp.Cmov(x2, x3, b) + fp.Cmov(z2, z3, b) + fp.Sqr(x3, t0) + fp.Sqr(z3, t1) + fp.Mul(z3, x1, z3) + fp.Sqr(x2, x2) + fp.Sqr(z2, z2) + fp.Sub(t0, x2, z2) + mulA24Generic(t1, t0) + fp.Add(t1, t1, z2) + fp.Mul(x2, x2, z2) + fp.Mul(z2, t0, t1) +} + +func mulA24Generic(z, x *fp.Elt) { + const A24 = 121666 + const n = 8 + var xx [4]uint64 + for i := range xx { + xx[i] = binary.LittleEndian.Uint64(x[i*n : (i+1)*n]) + } + + h0, l0 := bits.Mul64(xx[0], A24) + h1, l1 := bits.Mul64(xx[1], A24) + h2, l2 := bits.Mul64(xx[2], A24) + h3, l3 := bits.Mul64(xx[3], A24) + + var c3 uint64 + l1, c0 := bits.Add64(h0, l1, 0) + l2, c1 := bits.Add64(h1, l2, c0) + l3, c2 := bits.Add64(h2, l3, c1) + l4, _ := bits.Add64(h3, 0, c2) + _, l4 = bits.Mul64(l4, 38) + l0, c0 = bits.Add64(l0, l4, 0) + xx[1], c1 = bits.Add64(l1, 0, c0) + xx[2], c2 = bits.Add64(l2, 0, c1) + xx[3], c3 = bits.Add64(l3, 0, c2) + xx[0], _ = bits.Add64(l0, (-c3)&38, 0) + for i := range xx { + binary.LittleEndian.PutUint64(z[i*n:(i+1)*n], xx[i]) + } +} diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_noasm.go b/vendor/github.com/cloudflare/circl/dh/x25519/curve_noasm.go new file mode 100644 index 000000000..07fab97d2 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/curve_noasm.go @@ -0,0 +1,11 @@ +//go:build !amd64 || purego +// +build !amd64 purego + +package x25519 + +import fp "github.com/cloudflare/circl/math/fp25519" + +func double(x, z *fp.Elt) { doubleGeneric(x, z) } +func diffAdd(w *[5]fp.Elt, b uint) { diffAddGeneric(w, b) } +func ladderStep(w *[5]fp.Elt, b uint) { ladderStepGeneric(w, b) } +func mulA24(z, x *fp.Elt) { mulA24Generic(z, x) } diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/doc.go b/vendor/github.com/cloudflare/circl/dh/x25519/doc.go new file mode 100644 index 000000000..3ce102d14 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/doc.go @@ -0,0 +1,19 @@ +/* +Package x25519 provides Diffie-Hellman functions as specified in RFC-7748. + +Validation of public keys. + +The Diffie-Hellman function, as described in RFC-7748 [1], works for any +public key. However, if a different protocol requires contributory +behaviour [2,3], then the public keys must be validated against low-order +points [3,4]. To do that, the Shared function performs this validation +internally and returns false when the public key is invalid (i.e., it +is a low-order point). + +References: + - [1] RFC7748 by Langley, Hamburg, Turner (https://rfc-editor.org/rfc/rfc7748.txt) + - [2] Curve25519 by Bernstein (https://cr.yp.to/ecdh.html) + - [3] Bernstein (https://cr.yp.to/ecdh.html#validate) + - [4] Cremers&Jackson (https://eprint.iacr.org/2019/526) +*/ +package x25519 diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/key.go b/vendor/github.com/cloudflare/circl/dh/x25519/key.go new file mode 100644 index 000000000..c76f72ac7 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/key.go @@ -0,0 +1,47 @@ +package x25519 + +import ( + "crypto/subtle" + + fp "github.com/cloudflare/circl/math/fp25519" +) + +// Size is the length in bytes of a X25519 key. +const Size = 32 + +// Key represents a X25519 key. +type Key [Size]byte + +func (k *Key) clamp(in *Key) *Key { + *k = *in + k[0] &= 248 + k[31] = (k[31] & 127) | 64 + return k +} + +// isValidPubKey verifies if the public key is not a low-order point. +func (k *Key) isValidPubKey() bool { + fp.Modp((*fp.Elt)(k)) + var isLowOrder int + for _, P := range lowOrderPoints { + isLowOrder |= subtle.ConstantTimeCompare(P[:], k[:]) + } + return isLowOrder == 0 +} + +// KeyGen obtains a public key given a secret key. +func KeyGen(public, secret *Key) { + ladderJoye(public.clamp(secret)) +} + +// Shared calculates Alice's shared key from Alice's secret key and Bob's +// public key returning true on success. A failure case happens when the public +// key is a low-order point, thus the shared key is all-zeros and the function +// returns false. +func Shared(shared, secret, public *Key) bool { + validPk := *public + validPk[31] &= (1 << (255 % 8)) - 1 + ok := validPk.isValidPubKey() + ladderMontgomery(shared.clamp(secret), &validPk) + return ok +} diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/table.go b/vendor/github.com/cloudflare/circl/dh/x25519/table.go new file mode 100644 index 000000000..28c8c4ac0 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/table.go @@ -0,0 +1,268 @@ +package x25519 + +import "github.com/cloudflare/circl/math/fp25519" + +// tableGenerator contains the set of points: +// +// t[i] = (xi+1)/(xi-1), +// +// where (xi,yi) = 2^iG and G is the generator point +// Size = (256)*(256/8) = 8192 bytes. +var tableGenerator = [256 * fp25519.Size]byte{ + /* (2^ 0)P */ 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5f, + /* (2^ 1)P */ 0x96, 0xfe, 0xaa, 0x16, 0xf4, 0x20, 0x82, 0x6b, 0x34, 0x6a, 0x56, 0x4f, 0x2b, 0xeb, 0xeb, 0x82, 0x0f, 0x95, 0xa5, 0x75, 0xb0, 0xa5, 0xa9, 0xd5, 0xf4, 0x88, 0x24, 0x4b, 0xcf, 0xb2, 0x42, 0x51, + /* (2^ 2)P */ 0x0c, 0x68, 0x69, 0x00, 0x75, 0xbc, 0xae, 0x6a, 0x41, 0x9c, 0xf9, 0xa0, 0x20, 0x78, 0xcf, 0x89, 0xf4, 0xd0, 0x56, 0x3b, 0x18, 0xd9, 0x58, 0x2a, 0xa4, 0x11, 0x60, 0xe3, 0x80, 0xca, 0x5a, 0x4b, + /* (2^ 3)P */ 0x5d, 0x74, 0x29, 0x8c, 0x34, 0x32, 0x91, 0x32, 0xd7, 0x2f, 0x64, 0xe1, 0x16, 0xe6, 0xa2, 0xf4, 0x34, 0xbc, 0x67, 0xff, 0x03, 0xbb, 0x45, 0x1e, 0x4a, 0x9b, 0x2a, 0xf4, 0xd0, 0x12, 0x69, 0x30, + /* (2^ 4)P */ 0x54, 0x71, 0xaf, 0xe6, 0x07, 0x65, 0x88, 0xff, 0x2f, 0xc8, 0xee, 0xdf, 0x13, 0x0e, 0xf5, 0x04, 0xce, 0xb5, 0xba, 0x2a, 0xe8, 0x2f, 0x51, 0xaa, 0x22, 0xf2, 0xd5, 0x68, 0x1a, 0x25, 0x4e, 0x17, + /* (2^ 5)P */ 0x98, 0x88, 0x02, 0x82, 0x0d, 0x70, 0x96, 0xcf, 0xc5, 0x02, 0x2c, 0x0a, 0x37, 0xe3, 0x43, 0x17, 0xaa, 0x6e, 0xe8, 0xb4, 0x98, 0xec, 0x9e, 0x37, 0x2e, 0x48, 0xe0, 0x51, 0x8a, 0x88, 0x59, 0x0c, + /* (2^ 6)P */ 0x89, 0xd1, 0xb5, 0x99, 0xd6, 0xf1, 0xcb, 0xfb, 0x84, 0xdc, 0x9f, 0x8e, 0xd5, 0xf0, 0xae, 0xac, 0x14, 0x76, 0x1f, 0x23, 0x06, 0x0d, 0xc2, 0xc1, 0x72, 0xf9, 0x74, 0xa2, 0x8d, 0x21, 0x38, 0x29, + /* (2^ 7)P */ 0x18, 0x7f, 0x1d, 0xff, 0xbe, 0x49, 0xaf, 0xf6, 0xc2, 0xc9, 0x7a, 0x38, 0x22, 0x1c, 0x54, 0xcc, 0x6b, 0xc5, 0x15, 0x40, 0xef, 0xc9, 0xfc, 0x96, 0xa9, 0x13, 0x09, 0x69, 0x7c, 0x62, 0xc1, 0x69, + /* (2^ 8)P */ 0x0e, 0xdb, 0x33, 0x47, 0x2f, 0xfd, 0x86, 0x7a, 0xe9, 0x7d, 0x08, 0x9e, 0xf2, 0xc4, 0xb8, 0xfd, 0x29, 0xa2, 0xa2, 0x8e, 0x1a, 0x4b, 0x5e, 0x09, 0x79, 0x7a, 0xb3, 0x29, 0xc8, 0xa7, 0xd7, 0x1a, + /* (2^ 9)P */ 0xc0, 0xa0, 0x7e, 0xd1, 0xca, 0x89, 0x2d, 0x34, 0x51, 0x20, 0xed, 0xcc, 0xa6, 0xdd, 0xbe, 0x67, 0x74, 0x2f, 0xb4, 0x2b, 0xbf, 0x31, 0xca, 0x19, 0xbb, 0xac, 0x80, 0x49, 0xc8, 0xb4, 0xf7, 0x3d, + /* (2^ 10)P */ 0x83, 0xd8, 0x0a, 0xc8, 0x4d, 0x44, 0xc6, 0xa8, 0x85, 0xab, 0xe3, 0x66, 0x03, 0x44, 0x1e, 0xb9, 0xd8, 0xf6, 0x64, 0x01, 0xa0, 0xcd, 0x15, 0xc2, 0x68, 0xe6, 0x47, 0xf2, 0x6e, 0x7c, 0x86, 0x3d, + /* (2^ 11)P */ 0x8c, 0x65, 0x3e, 0xcc, 0x2b, 0x58, 0xdd, 0xc7, 0x28, 0x55, 0x0e, 0xee, 0x48, 0x47, 0x2c, 0xfd, 0x71, 0x4f, 0x9f, 0xcc, 0x95, 0x9b, 0xfd, 0xa0, 0xdf, 0x5d, 0x67, 0xb0, 0x71, 0xd8, 0x29, 0x75, + /* (2^ 12)P */ 0x78, 0xbd, 0x3c, 0x2d, 0xb4, 0x68, 0xf5, 0xb8, 0x82, 0xda, 0xf3, 0x91, 0x1b, 0x01, 0x33, 0x12, 0x62, 0x3b, 0x7c, 0x4a, 0xcd, 0x6c, 0xce, 0x2d, 0x03, 0x86, 0x49, 0x9e, 0x8e, 0xfc, 0xe7, 0x75, + /* (2^ 13)P */ 0xec, 0xb6, 0xd0, 0xfc, 0xf1, 0x13, 0x4f, 0x2f, 0x45, 0x7a, 0xff, 0x29, 0x1f, 0xca, 0xa8, 0xf1, 0x9b, 0xe2, 0x81, 0x29, 0xa7, 0xc1, 0x49, 0xc2, 0x6a, 0xb5, 0x83, 0x8c, 0xbb, 0x0d, 0xbe, 0x6e, + /* (2^ 14)P */ 0x22, 0xb2, 0x0b, 0x17, 0x8d, 0xfa, 0x14, 0x71, 0x5f, 0x93, 0x93, 0xbf, 0xd5, 0xdc, 0xa2, 0x65, 0x9a, 0x97, 0x9c, 0xb5, 0x68, 0x1f, 0xc4, 0xbd, 0x89, 0x92, 0xce, 0xa2, 0x79, 0xef, 0x0e, 0x2f, + /* (2^ 15)P */ 0xce, 0x37, 0x3c, 0x08, 0x0c, 0xbf, 0xec, 0x42, 0x22, 0x63, 0x49, 0xec, 0x09, 0xbc, 0x30, 0x29, 0x0d, 0xac, 0xfe, 0x9c, 0xc1, 0xb0, 0x94, 0xf2, 0x80, 0xbb, 0xfa, 0xed, 0x4b, 0xaa, 0x80, 0x37, + /* (2^ 16)P */ 0x29, 0xd9, 0xea, 0x7c, 0x3e, 0x7d, 0xc1, 0x56, 0xc5, 0x22, 0x57, 0x2e, 0xeb, 0x4b, 0xcb, 0xe7, 0x5a, 0xe1, 0xbf, 0x2d, 0x73, 0x31, 0xe9, 0x0c, 0xf8, 0x52, 0x10, 0x62, 0xc7, 0x83, 0xb8, 0x41, + /* (2^ 17)P */ 0x50, 0x53, 0xd2, 0xc3, 0xa0, 0x5c, 0xf7, 0xdb, 0x51, 0xe3, 0xb1, 0x6e, 0x08, 0xbe, 0x36, 0x29, 0x12, 0xb2, 0xa9, 0xb4, 0x3c, 0xe0, 0x36, 0xc9, 0xaa, 0x25, 0x22, 0x32, 0x82, 0xbf, 0x45, 0x1d, + /* (2^ 18)P */ 0xc5, 0x4c, 0x02, 0x6a, 0x03, 0xb1, 0x1a, 0xe8, 0x72, 0x9a, 0x4c, 0x30, 0x1c, 0x20, 0x12, 0xe2, 0xfc, 0xb1, 0x32, 0x68, 0xba, 0x3f, 0xd7, 0xc5, 0x81, 0x95, 0x83, 0x4d, 0x5a, 0xdb, 0xff, 0x20, + /* (2^ 19)P */ 0xad, 0x0f, 0x5d, 0xbe, 0x67, 0xd3, 0x83, 0xa2, 0x75, 0x44, 0x16, 0x8b, 0xca, 0x25, 0x2b, 0x6c, 0x2e, 0xf2, 0xaa, 0x7c, 0x46, 0x35, 0x49, 0x9d, 0x49, 0xff, 0x85, 0xee, 0x8e, 0x40, 0x66, 0x51, + /* (2^ 20)P */ 0x61, 0xe3, 0xb4, 0xfa, 0xa2, 0xba, 0x67, 0x3c, 0xef, 0x5c, 0xf3, 0x7e, 0xc6, 0x33, 0xe4, 0xb3, 0x1c, 0x9b, 0x15, 0x41, 0x92, 0x72, 0x59, 0x52, 0x33, 0xab, 0xb0, 0xd5, 0x92, 0x18, 0x62, 0x6a, + /* (2^ 21)P */ 0xcb, 0xcd, 0x55, 0x75, 0x38, 0x4a, 0xb7, 0x20, 0x3f, 0x92, 0x08, 0x12, 0x0e, 0xa1, 0x2a, 0x53, 0xd1, 0x1d, 0x28, 0x62, 0x77, 0x7b, 0xa1, 0xea, 0xbf, 0x44, 0x5c, 0xf0, 0x43, 0x34, 0xab, 0x61, + /* (2^ 22)P */ 0xf8, 0xde, 0x24, 0x23, 0x42, 0x6c, 0x7a, 0x25, 0x7f, 0xcf, 0xe3, 0x17, 0x10, 0x6c, 0x1c, 0x13, 0x57, 0xa2, 0x30, 0xf6, 0x39, 0x87, 0x75, 0x23, 0x80, 0x85, 0xa7, 0x01, 0x7a, 0x40, 0x5a, 0x29, + /* (2^ 23)P */ 0xd9, 0xa8, 0x5d, 0x6d, 0x24, 0x43, 0xc4, 0xf8, 0x5d, 0xfa, 0x52, 0x0c, 0x45, 0x75, 0xd7, 0x19, 0x3d, 0xf8, 0x1b, 0x73, 0x92, 0xfc, 0xfc, 0x2a, 0x00, 0x47, 0x2b, 0x1b, 0xe8, 0xc8, 0x10, 0x7d, + /* (2^ 24)P */ 0x0b, 0xa2, 0xba, 0x70, 0x1f, 0x27, 0xe0, 0xc8, 0x57, 0x39, 0xa6, 0x7c, 0x86, 0x48, 0x37, 0x99, 0xbb, 0xd4, 0x7e, 0xcb, 0xb3, 0xef, 0x12, 0x54, 0x75, 0x29, 0xe6, 0x73, 0x61, 0xd3, 0x96, 0x31, + /* (2^ 25)P */ 0xfc, 0xdf, 0xc7, 0x41, 0xd1, 0xca, 0x5b, 0xde, 0x48, 0xc8, 0x95, 0xb3, 0xd2, 0x8c, 0xcc, 0x47, 0xcb, 0xf3, 0x1a, 0xe1, 0x42, 0xd9, 0x4c, 0xa3, 0xc2, 0xce, 0x4e, 0xd0, 0xf2, 0xdb, 0x56, 0x02, + /* (2^ 26)P */ 0x7f, 0x66, 0x0e, 0x4b, 0xe9, 0xb7, 0x5a, 0x87, 0x10, 0x0d, 0x85, 0xc0, 0x83, 0xdd, 0xd4, 0xca, 0x9f, 0xc7, 0x72, 0x4e, 0x8f, 0x2e, 0xf1, 0x47, 0x9b, 0xb1, 0x85, 0x8c, 0xbb, 0x87, 0x1a, 0x5f, + /* (2^ 27)P */ 0xb8, 0x51, 0x7f, 0x43, 0xb6, 0xd0, 0xe9, 0x7a, 0x65, 0x90, 0x87, 0x18, 0x55, 0xce, 0xc7, 0x12, 0xee, 0x7a, 0xf7, 0x5c, 0xfe, 0x09, 0xde, 0x2a, 0x27, 0x56, 0x2c, 0x7d, 0x2f, 0x5a, 0xa0, 0x23, + /* (2^ 28)P */ 0x9a, 0x16, 0x7c, 0xf1, 0x28, 0xe1, 0x08, 0x59, 0x2d, 0x85, 0xd0, 0x8a, 0xdd, 0x98, 0x74, 0xf7, 0x64, 0x2f, 0x10, 0xab, 0xce, 0xc4, 0xb4, 0x74, 0x45, 0x98, 0x13, 0x10, 0xdd, 0xba, 0x3a, 0x18, + /* (2^ 29)P */ 0xac, 0xaa, 0x92, 0xaa, 0x8d, 0xba, 0x65, 0xb1, 0x05, 0x67, 0x38, 0x99, 0x95, 0xef, 0xc5, 0xd5, 0xd1, 0x40, 0xfc, 0xf8, 0x0c, 0x8f, 0x2f, 0xbe, 0x14, 0x45, 0x20, 0xee, 0x35, 0xe6, 0x01, 0x27, + /* (2^ 30)P */ 0x14, 0x65, 0x15, 0x20, 0x00, 0xa8, 0x9f, 0x62, 0xce, 0xc1, 0xa8, 0x64, 0x87, 0x86, 0x23, 0xf2, 0x0e, 0x06, 0x3f, 0x0b, 0xff, 0x4f, 0x89, 0x5b, 0xfa, 0xa3, 0x08, 0xf7, 0x4c, 0x94, 0xd9, 0x60, + /* (2^ 31)P */ 0x1f, 0x20, 0x7a, 0x1c, 0x1a, 0x00, 0xea, 0xae, 0x63, 0xce, 0xe2, 0x3e, 0x63, 0x6a, 0xf1, 0xeb, 0xe1, 0x07, 0x7a, 0x4c, 0x59, 0x09, 0x77, 0x6f, 0xcb, 0x08, 0x02, 0x0d, 0x15, 0x58, 0xb9, 0x79, + /* (2^ 32)P */ 0xe7, 0x10, 0xd4, 0x01, 0x53, 0x5e, 0xb5, 0x24, 0x4d, 0xc8, 0xfd, 0xf3, 0xdf, 0x4e, 0xa3, 0xe3, 0xd8, 0x32, 0x40, 0x90, 0xe4, 0x68, 0x87, 0xd8, 0xec, 0xae, 0x3a, 0x7b, 0x42, 0x84, 0x13, 0x13, + /* (2^ 33)P */ 0x14, 0x4f, 0x23, 0x86, 0x12, 0xe5, 0x05, 0x84, 0x29, 0xc5, 0xb4, 0xad, 0x39, 0x47, 0xdc, 0x14, 0xfd, 0x4f, 0x63, 0x50, 0xb2, 0xb5, 0xa2, 0xb8, 0x93, 0xff, 0xa7, 0xd8, 0x4a, 0xa9, 0xe2, 0x2f, + /* (2^ 34)P */ 0xdd, 0xfa, 0x43, 0xe8, 0xef, 0x57, 0x5c, 0xec, 0x18, 0x99, 0xbb, 0xf0, 0x40, 0xce, 0x43, 0x28, 0x05, 0x63, 0x3d, 0xcf, 0xd6, 0x61, 0xb5, 0xa4, 0x7e, 0x77, 0xfb, 0xe8, 0xbd, 0x29, 0x36, 0x74, + /* (2^ 35)P */ 0x8f, 0x73, 0xaf, 0xbb, 0x46, 0xdd, 0x3e, 0x34, 0x51, 0xa6, 0x01, 0xb1, 0x28, 0x18, 0x98, 0xed, 0x7a, 0x79, 0x2c, 0x88, 0x0b, 0x76, 0x01, 0xa4, 0x30, 0x87, 0xc8, 0x8d, 0xe2, 0x23, 0xc2, 0x1f, + /* (2^ 36)P */ 0x0e, 0xba, 0x0f, 0xfc, 0x91, 0x4e, 0x60, 0x48, 0xa4, 0x6f, 0x2c, 0x05, 0x8f, 0xf7, 0x37, 0xb6, 0x9c, 0x23, 0xe9, 0x09, 0x3d, 0xac, 0xcc, 0x91, 0x7c, 0x68, 0x7a, 0x43, 0xd4, 0xee, 0xf7, 0x23, + /* (2^ 37)P */ 0x00, 0xd8, 0x9b, 0x8d, 0x11, 0xb1, 0x73, 0x51, 0xa7, 0xd4, 0x89, 0x31, 0xb6, 0x41, 0xd6, 0x29, 0x86, 0xc5, 0xbb, 0x88, 0x79, 0x17, 0xbf, 0xfd, 0xf5, 0x1d, 0xd8, 0xca, 0x4f, 0x89, 0x59, 0x29, + /* (2^ 38)P */ 0x99, 0xc8, 0xbb, 0xb4, 0xf3, 0x8e, 0xbc, 0xae, 0xb9, 0x92, 0x69, 0xb2, 0x5a, 0x99, 0x48, 0x41, 0xfb, 0x2c, 0xf9, 0x34, 0x01, 0x0b, 0xe2, 0x24, 0xe8, 0xde, 0x05, 0x4a, 0x89, 0x58, 0xd1, 0x40, + /* (2^ 39)P */ 0xf6, 0x76, 0xaf, 0x85, 0x11, 0x0b, 0xb0, 0x46, 0x79, 0x7a, 0x18, 0x73, 0x78, 0xc7, 0xba, 0x26, 0x5f, 0xff, 0x8f, 0xab, 0x95, 0xbf, 0xc0, 0x3d, 0xd7, 0x24, 0x55, 0x94, 0xd8, 0x8b, 0x60, 0x2a, + /* (2^ 40)P */ 0x02, 0x63, 0x44, 0xbd, 0x88, 0x95, 0x44, 0x26, 0x9c, 0x43, 0x88, 0x03, 0x1c, 0xc2, 0x4b, 0x7c, 0xb2, 0x11, 0xbd, 0x83, 0xf3, 0xa4, 0x98, 0x8e, 0xb9, 0x76, 0xd8, 0xc9, 0x7b, 0x8d, 0x21, 0x26, + /* (2^ 41)P */ 0x8a, 0x17, 0x7c, 0x99, 0x42, 0x15, 0x08, 0xe3, 0x6f, 0x60, 0xb6, 0x6f, 0xa8, 0x29, 0x2d, 0x3c, 0x74, 0x93, 0x27, 0xfa, 0x36, 0x77, 0x21, 0x5c, 0xfa, 0xb1, 0xfe, 0x4a, 0x73, 0x05, 0xde, 0x7d, + /* (2^ 42)P */ 0xab, 0x2b, 0xd4, 0x06, 0x39, 0x0e, 0xf1, 0x3b, 0x9c, 0x64, 0x80, 0x19, 0x3e, 0x80, 0xf7, 0xe4, 0x7a, 0xbf, 0x95, 0x95, 0xf8, 0x3b, 0x05, 0xe6, 0x30, 0x55, 0x24, 0xda, 0x38, 0xaf, 0x4f, 0x39, + /* (2^ 43)P */ 0xf4, 0x28, 0x69, 0x89, 0x58, 0xfb, 0x8e, 0x7a, 0x3c, 0x11, 0x6a, 0xcc, 0xe9, 0x78, 0xc7, 0xfb, 0x6f, 0x59, 0xaf, 0x30, 0xe3, 0x0c, 0x67, 0x72, 0xf7, 0x6c, 0x3d, 0x1d, 0xa8, 0x22, 0xf2, 0x48, + /* (2^ 44)P */ 0xa7, 0xca, 0x72, 0x0d, 0x41, 0xce, 0x1f, 0xf0, 0x95, 0x55, 0x3b, 0x21, 0xc7, 0xec, 0x20, 0x5a, 0x83, 0x14, 0xfa, 0xc1, 0x65, 0x11, 0xc2, 0x7b, 0x41, 0xa7, 0xa8, 0x1d, 0xe3, 0x9a, 0xf8, 0x07, + /* (2^ 45)P */ 0xf9, 0x0f, 0x83, 0xc6, 0xb4, 0xc2, 0xd2, 0x05, 0x93, 0x62, 0x31, 0xc6, 0x0f, 0x33, 0x3e, 0xd4, 0x04, 0xa9, 0xd3, 0x96, 0x0a, 0x59, 0xa5, 0xa5, 0xb6, 0x33, 0x53, 0xa6, 0x91, 0xdb, 0x5e, 0x70, + /* (2^ 46)P */ 0xf7, 0xa5, 0xb9, 0x0b, 0x5e, 0xe1, 0x8e, 0x04, 0x5d, 0xaf, 0x0a, 0x9e, 0xca, 0xcf, 0x40, 0x32, 0x0b, 0xa4, 0xc4, 0xed, 0xce, 0x71, 0x4b, 0x8f, 0x6d, 0x4a, 0x54, 0xde, 0xa3, 0x0d, 0x1c, 0x62, + /* (2^ 47)P */ 0x91, 0x40, 0x8c, 0xa0, 0x36, 0x28, 0x87, 0x92, 0x45, 0x14, 0xc9, 0x10, 0xb0, 0x75, 0x83, 0xce, 0x94, 0x63, 0x27, 0x4f, 0x52, 0xeb, 0x72, 0x8a, 0x35, 0x36, 0xc8, 0x7e, 0xfa, 0xfc, 0x67, 0x26, + /* (2^ 48)P */ 0x2a, 0x75, 0xe8, 0x45, 0x33, 0x17, 0x4c, 0x7f, 0xa5, 0x79, 0x70, 0xee, 0xfe, 0x47, 0x1b, 0x06, 0x34, 0xff, 0x86, 0x9f, 0xfa, 0x9a, 0xdd, 0x25, 0x9c, 0xc8, 0x5d, 0x42, 0xf5, 0xce, 0x80, 0x37, + /* (2^ 49)P */ 0xe9, 0xb4, 0x3b, 0x51, 0x5a, 0x03, 0x46, 0x1a, 0xda, 0x5a, 0x57, 0xac, 0x79, 0xf3, 0x1e, 0x3e, 0x50, 0x4b, 0xa2, 0x5f, 0x1c, 0x5f, 0x8c, 0xc7, 0x22, 0x9f, 0xfd, 0x34, 0x76, 0x96, 0x1a, 0x32, + /* (2^ 50)P */ 0xfa, 0x27, 0x6e, 0x82, 0xb8, 0x07, 0x67, 0x94, 0xd0, 0x6f, 0x50, 0x4c, 0xd6, 0x84, 0xca, 0x3d, 0x36, 0x14, 0xe9, 0x75, 0x80, 0x21, 0x89, 0xc1, 0x84, 0x84, 0x3b, 0x9b, 0x16, 0x84, 0x92, 0x6d, + /* (2^ 51)P */ 0xdf, 0x2d, 0x3f, 0x38, 0x40, 0xe8, 0x67, 0x3a, 0x75, 0x9b, 0x4f, 0x0c, 0xa3, 0xc9, 0xee, 0x33, 0x47, 0xef, 0x83, 0xa7, 0x6f, 0xc8, 0xc7, 0x3e, 0xc4, 0xfb, 0xc9, 0xba, 0x9f, 0x44, 0xec, 0x26, + /* (2^ 52)P */ 0x7d, 0x9e, 0x9b, 0xa0, 0xcb, 0x38, 0x0f, 0x5c, 0x8c, 0x47, 0xa3, 0x62, 0xc7, 0x8c, 0x16, 0x81, 0x1c, 0x12, 0xfc, 0x06, 0xd3, 0xb0, 0x23, 0x3e, 0xdd, 0xdc, 0xef, 0xa5, 0xa0, 0x8a, 0x23, 0x5a, + /* (2^ 53)P */ 0xff, 0x43, 0xea, 0xc4, 0x21, 0x61, 0xa2, 0x1b, 0xb5, 0x32, 0x88, 0x7c, 0x7f, 0xc7, 0xf8, 0x36, 0x9a, 0xf9, 0xdc, 0x0a, 0x0b, 0xea, 0xfb, 0x88, 0xf9, 0xeb, 0x5b, 0xc2, 0x8e, 0x93, 0xa9, 0x5c, + /* (2^ 54)P */ 0xa0, 0xcd, 0xfc, 0x51, 0x5e, 0x6a, 0x43, 0xd5, 0x3b, 0x89, 0xcd, 0xc2, 0x97, 0x47, 0xbc, 0x1d, 0x08, 0x4a, 0x22, 0xd3, 0x65, 0x6a, 0x34, 0x19, 0x66, 0xf4, 0x9a, 0x9b, 0xe4, 0x34, 0x50, 0x0f, + /* (2^ 55)P */ 0x6e, 0xb9, 0xe0, 0xa1, 0x67, 0x39, 0x3c, 0xf2, 0x88, 0x4d, 0x7a, 0x86, 0xfa, 0x08, 0x8b, 0xe5, 0x79, 0x16, 0x34, 0xa7, 0xc6, 0xab, 0x2f, 0xfb, 0x46, 0x69, 0x02, 0xb6, 0x1e, 0x38, 0x75, 0x2a, + /* (2^ 56)P */ 0xac, 0x20, 0x94, 0xc1, 0xe4, 0x3b, 0x0a, 0xc8, 0xdc, 0xb6, 0xf2, 0x81, 0xc6, 0xf6, 0xb1, 0x66, 0x88, 0x33, 0xe9, 0x61, 0x67, 0x03, 0xf7, 0x7c, 0xc4, 0xa4, 0x60, 0xa6, 0xd8, 0xbb, 0xab, 0x25, + /* (2^ 57)P */ 0x98, 0x51, 0xfd, 0x14, 0xba, 0x12, 0xea, 0x91, 0xa9, 0xff, 0x3c, 0x4a, 0xfc, 0x50, 0x49, 0x68, 0x28, 0xad, 0xf5, 0x30, 0x21, 0x84, 0x26, 0xf8, 0x41, 0xa4, 0x01, 0x53, 0xf7, 0x88, 0xa9, 0x3e, + /* (2^ 58)P */ 0x6f, 0x8c, 0x5f, 0x69, 0x9a, 0x10, 0x78, 0xc9, 0xf3, 0xc3, 0x30, 0x05, 0x4a, 0xeb, 0x46, 0x17, 0x95, 0x99, 0x45, 0xb4, 0x77, 0x6d, 0x4d, 0x44, 0xc7, 0x5c, 0x4e, 0x05, 0x8c, 0x2b, 0x95, 0x75, + /* (2^ 59)P */ 0xaa, 0xd6, 0xf4, 0x15, 0x79, 0x3f, 0x70, 0xa3, 0xd8, 0x47, 0x26, 0x2f, 0x20, 0x46, 0xc3, 0x66, 0x4b, 0x64, 0x1d, 0x81, 0xdf, 0x69, 0x14, 0xd0, 0x1f, 0xd7, 0xa5, 0x81, 0x7d, 0xa4, 0xfe, 0x77, + /* (2^ 60)P */ 0x81, 0xa3, 0x7c, 0xf5, 0x9e, 0x52, 0xe9, 0xc5, 0x1a, 0x88, 0x2f, 0xce, 0xb9, 0xb4, 0xee, 0x6e, 0xd6, 0x9b, 0x00, 0xe8, 0x28, 0x1a, 0xe9, 0xb6, 0xec, 0x3f, 0xfc, 0x9a, 0x3e, 0xbe, 0x80, 0x4b, + /* (2^ 61)P */ 0xc5, 0xd2, 0xae, 0x26, 0xc5, 0x73, 0x37, 0x7e, 0x9d, 0xa4, 0xc9, 0x53, 0xb4, 0xfc, 0x4a, 0x1b, 0x4d, 0xb2, 0xff, 0xba, 0xd7, 0xbd, 0x20, 0xa9, 0x0e, 0x40, 0x2d, 0x12, 0x9f, 0x69, 0x54, 0x7c, + /* (2^ 62)P */ 0xc8, 0x4b, 0xa9, 0x4f, 0xe1, 0xc8, 0x46, 0xef, 0x5e, 0xed, 0x52, 0x29, 0xce, 0x74, 0xb0, 0xe0, 0xd5, 0x85, 0xd8, 0xdb, 0xe1, 0x50, 0xa4, 0xbe, 0x2c, 0x71, 0x0f, 0x32, 0x49, 0x86, 0xb6, 0x61, + /* (2^ 63)P */ 0xd1, 0xbd, 0xcc, 0x09, 0x73, 0x5f, 0x48, 0x8a, 0x2d, 0x1a, 0x4d, 0x7d, 0x0d, 0x32, 0x06, 0xbd, 0xf4, 0xbe, 0x2d, 0x32, 0x73, 0x29, 0x23, 0x25, 0x70, 0xf7, 0x17, 0x8c, 0x75, 0xc4, 0x5d, 0x44, + /* (2^ 64)P */ 0x3c, 0x93, 0xc8, 0x7c, 0x17, 0x34, 0x04, 0xdb, 0x9f, 0x05, 0xea, 0x75, 0x21, 0xe8, 0x6f, 0xed, 0x34, 0xdb, 0x53, 0xc0, 0xfd, 0xbe, 0xfe, 0x1e, 0x99, 0xaf, 0x5d, 0xc6, 0x67, 0xe8, 0xdb, 0x4a, + /* (2^ 65)P */ 0xdf, 0x09, 0x06, 0xa9, 0xa2, 0x71, 0xcd, 0x3a, 0x50, 0x40, 0xd0, 0x6d, 0x85, 0x91, 0xe9, 0xe5, 0x3c, 0xc2, 0x57, 0x81, 0x68, 0x9b, 0xc6, 0x1e, 0x4d, 0xfe, 0x5c, 0x88, 0xf6, 0x27, 0x74, 0x69, + /* (2^ 66)P */ 0x51, 0xa8, 0xe1, 0x65, 0x9b, 0x7b, 0xbe, 0xd7, 0xdd, 0x36, 0xc5, 0x22, 0xd5, 0x28, 0x3d, 0xa0, 0x45, 0xb6, 0xd2, 0x8f, 0x65, 0x9d, 0x39, 0x28, 0xe1, 0x41, 0x26, 0x7c, 0xe1, 0xb7, 0xe5, 0x49, + /* (2^ 67)P */ 0xa4, 0x57, 0x04, 0x70, 0x98, 0x3a, 0x8c, 0x6f, 0x78, 0x67, 0xbb, 0x5e, 0xa2, 0xf0, 0x78, 0x50, 0x0f, 0x96, 0x82, 0xc3, 0xcb, 0x3c, 0x3c, 0xd1, 0xb1, 0x84, 0xdf, 0xa7, 0x58, 0x32, 0x00, 0x2e, + /* (2^ 68)P */ 0x1c, 0x6a, 0x29, 0xe6, 0x9b, 0xf3, 0xd1, 0x8a, 0xb2, 0xbf, 0x5f, 0x2a, 0x65, 0xaa, 0xee, 0xc1, 0xcb, 0xf3, 0x26, 0xfd, 0x73, 0x06, 0xee, 0x33, 0xcc, 0x2c, 0x9d, 0xa6, 0x73, 0x61, 0x25, 0x59, + /* (2^ 69)P */ 0x41, 0xfc, 0x18, 0x4e, 0xaa, 0x07, 0xea, 0x41, 0x1e, 0xa5, 0x87, 0x7c, 0x52, 0x19, 0xfc, 0xd9, 0x6f, 0xca, 0x31, 0x58, 0x80, 0xcb, 0xaa, 0xbd, 0x4f, 0x69, 0x16, 0xc9, 0x2d, 0x65, 0x5b, 0x44, + /* (2^ 70)P */ 0x15, 0x23, 0x17, 0xf2, 0xa7, 0xa3, 0x92, 0xce, 0x64, 0x99, 0x1b, 0xe1, 0x2d, 0x28, 0xdc, 0x1e, 0x4a, 0x31, 0x4c, 0xe0, 0xaf, 0x3a, 0x82, 0xa1, 0x86, 0xf5, 0x7c, 0x43, 0x94, 0x2d, 0x0a, 0x79, + /* (2^ 71)P */ 0x09, 0xe0, 0xf6, 0x93, 0xfb, 0x47, 0xc4, 0x71, 0x76, 0x52, 0x84, 0x22, 0x67, 0xa5, 0x22, 0x89, 0x69, 0x51, 0x4f, 0x20, 0x3b, 0x90, 0x70, 0xbf, 0xfe, 0x19, 0xa3, 0x1b, 0x89, 0x89, 0x7a, 0x2f, + /* (2^ 72)P */ 0x0c, 0x14, 0xe2, 0x77, 0xb5, 0x8e, 0xa0, 0x02, 0xf4, 0xdc, 0x7b, 0x42, 0xd4, 0x4e, 0x9a, 0xed, 0xd1, 0x3c, 0x32, 0xe4, 0x44, 0xec, 0x53, 0x52, 0x5b, 0x35, 0xe9, 0x14, 0x3c, 0x36, 0x88, 0x3e, + /* (2^ 73)P */ 0x8c, 0x0b, 0x11, 0x77, 0x42, 0xc1, 0x66, 0xaa, 0x90, 0x33, 0xa2, 0x10, 0x16, 0x39, 0xe0, 0x1a, 0xa2, 0xc2, 0x3f, 0xc9, 0x12, 0xbd, 0x30, 0x20, 0xab, 0xc7, 0x55, 0x95, 0x57, 0x41, 0xe1, 0x3e, + /* (2^ 74)P */ 0x41, 0x7d, 0x6e, 0x6d, 0x3a, 0xde, 0x14, 0x92, 0xfe, 0x7e, 0xf1, 0x07, 0x86, 0xd8, 0xcd, 0x3c, 0x17, 0x12, 0xe1, 0xf8, 0x88, 0x12, 0x4f, 0x67, 0xd0, 0x93, 0x9f, 0x32, 0x0f, 0x25, 0x82, 0x56, + /* (2^ 75)P */ 0x6e, 0x39, 0x2e, 0x6d, 0x13, 0x0b, 0xf0, 0x6c, 0xbf, 0xde, 0x14, 0x10, 0x6f, 0xf8, 0x4c, 0x6e, 0x83, 0x4e, 0xcc, 0xbf, 0xb5, 0xb1, 0x30, 0x59, 0xb6, 0x16, 0xba, 0x8a, 0xb4, 0x69, 0x70, 0x04, + /* (2^ 76)P */ 0x93, 0x07, 0xb2, 0x69, 0xab, 0xe4, 0x4c, 0x0d, 0x9e, 0xfb, 0xd0, 0x97, 0x1a, 0xb9, 0x4d, 0xb2, 0x1d, 0xd0, 0x00, 0x4e, 0xf5, 0x50, 0xfa, 0xcd, 0xb5, 0xdd, 0x8b, 0x36, 0x85, 0x10, 0x1b, 0x22, + /* (2^ 77)P */ 0xd2, 0xd8, 0xe3, 0xb1, 0x68, 0x94, 0xe5, 0xe7, 0x93, 0x2f, 0x12, 0xbd, 0x63, 0x65, 0xc5, 0x53, 0x09, 0x3f, 0x66, 0xe0, 0x03, 0xa9, 0xe8, 0xee, 0x42, 0x3d, 0xbe, 0xcb, 0x62, 0xa6, 0xef, 0x61, + /* (2^ 78)P */ 0x2a, 0xab, 0x6e, 0xde, 0xdd, 0xdd, 0xf8, 0x2c, 0x31, 0xf2, 0x35, 0x14, 0xd5, 0x0a, 0xf8, 0x9b, 0x73, 0x49, 0xf0, 0xc9, 0xce, 0xda, 0xea, 0x5d, 0x27, 0x9b, 0xd2, 0x41, 0x5d, 0x5b, 0x27, 0x29, + /* (2^ 79)P */ 0x4f, 0xf1, 0xeb, 0x95, 0x08, 0x0f, 0xde, 0xcf, 0xa7, 0x05, 0x49, 0x05, 0x6b, 0xb9, 0xaa, 0xb9, 0xfd, 0x20, 0xc4, 0xa1, 0xd9, 0x0d, 0xe8, 0xca, 0xc7, 0xbb, 0x73, 0x16, 0x2f, 0xbf, 0x63, 0x0a, + /* (2^ 80)P */ 0x8c, 0xbc, 0x8f, 0x95, 0x11, 0x6e, 0x2f, 0x09, 0xad, 0x2f, 0x82, 0x04, 0xe8, 0x81, 0x2a, 0x67, 0x17, 0x25, 0xd5, 0x60, 0x15, 0x35, 0xc8, 0xca, 0xf8, 0x92, 0xf1, 0xc8, 0x22, 0x77, 0x3f, 0x6f, + /* (2^ 81)P */ 0xb7, 0x94, 0xe8, 0xc2, 0xcc, 0x90, 0xba, 0xf8, 0x0d, 0x9f, 0xff, 0x38, 0xa4, 0x57, 0x75, 0x2c, 0x59, 0x23, 0xe5, 0x5a, 0x85, 0x1d, 0x4d, 0x89, 0x69, 0x3d, 0x74, 0x7b, 0x15, 0x22, 0xe1, 0x68, + /* (2^ 82)P */ 0xf3, 0x19, 0xb9, 0xcf, 0x70, 0x55, 0x7e, 0xd8, 0xb9, 0x8d, 0x79, 0x95, 0xcd, 0xde, 0x2c, 0x3f, 0xce, 0xa2, 0xc0, 0x10, 0x47, 0x15, 0x21, 0x21, 0xb2, 0xc5, 0x6d, 0x24, 0x15, 0xa1, 0x66, 0x3c, + /* (2^ 83)P */ 0x72, 0xcb, 0x4e, 0x29, 0x62, 0xc5, 0xed, 0xcb, 0x16, 0x0b, 0x28, 0x6a, 0xc3, 0x43, 0x71, 0xba, 0x67, 0x8b, 0x07, 0xd4, 0xef, 0xc2, 0x10, 0x96, 0x1e, 0x4b, 0x6a, 0x94, 0x5d, 0x73, 0x44, 0x61, + /* (2^ 84)P */ 0x50, 0x33, 0x5b, 0xd7, 0x1e, 0x11, 0x6f, 0x53, 0x1b, 0xd8, 0x41, 0x20, 0x8c, 0xdb, 0x11, 0x02, 0x3c, 0x41, 0x10, 0x0e, 0x00, 0xb1, 0x3c, 0xf9, 0x76, 0x88, 0x9e, 0x03, 0x3c, 0xfd, 0x9d, 0x14, + /* (2^ 85)P */ 0x5b, 0x15, 0x63, 0x6b, 0xe4, 0xdd, 0x79, 0xd4, 0x76, 0x79, 0x83, 0x3c, 0xe9, 0x15, 0x6e, 0xb6, 0x38, 0xe0, 0x13, 0x1f, 0x3b, 0xe4, 0xfd, 0xda, 0x35, 0x0b, 0x4b, 0x2e, 0x1a, 0xda, 0xaf, 0x5f, + /* (2^ 86)P */ 0x81, 0x75, 0x19, 0x17, 0xdf, 0xbb, 0x00, 0x36, 0xc2, 0xd2, 0x3c, 0xbe, 0x0b, 0x05, 0x72, 0x39, 0x86, 0xbe, 0xd5, 0xbd, 0x6d, 0x90, 0x38, 0x59, 0x0f, 0x86, 0x9b, 0x3f, 0xe4, 0xe5, 0xfc, 0x34, + /* (2^ 87)P */ 0x02, 0x4d, 0xd1, 0x42, 0xcd, 0xa4, 0xa8, 0x75, 0x65, 0xdf, 0x41, 0x34, 0xc5, 0xab, 0x8d, 0x82, 0xd3, 0x31, 0xe1, 0xd2, 0xed, 0xab, 0xdc, 0x33, 0x5f, 0xd2, 0x14, 0xb8, 0x6f, 0xd7, 0xba, 0x3e, + /* (2^ 88)P */ 0x0f, 0xe1, 0x70, 0x6f, 0x56, 0x6f, 0x90, 0xd4, 0x5a, 0x0f, 0x69, 0x51, 0xaa, 0xf7, 0x12, 0x5d, 0xf2, 0xfc, 0xce, 0x76, 0x6e, 0xb1, 0xad, 0x45, 0x99, 0x29, 0x23, 0xad, 0xae, 0x68, 0xf7, 0x01, + /* (2^ 89)P */ 0xbd, 0xfe, 0x48, 0x62, 0x7b, 0xc7, 0x6c, 0x2b, 0xfd, 0xaf, 0x3a, 0xec, 0x28, 0x06, 0xd3, 0x3c, 0x6a, 0x48, 0xef, 0xd4, 0x80, 0x0b, 0x1c, 0xce, 0x23, 0x6c, 0xf6, 0xa6, 0x2e, 0xff, 0x3b, 0x4c, + /* (2^ 90)P */ 0x5f, 0xeb, 0xea, 0x4a, 0x09, 0xc4, 0x2e, 0x3f, 0xa7, 0x2c, 0x37, 0x6e, 0x28, 0x9b, 0xb1, 0x61, 0x1d, 0x70, 0x2a, 0xde, 0x66, 0xa9, 0xef, 0x5e, 0xef, 0xe3, 0x55, 0xde, 0x65, 0x05, 0xb2, 0x23, + /* (2^ 91)P */ 0x57, 0x85, 0xd5, 0x79, 0x52, 0xca, 0x01, 0xe3, 0x4f, 0x87, 0xc2, 0x27, 0xce, 0xd4, 0xb2, 0x07, 0x67, 0x1d, 0xcf, 0x9d, 0x8a, 0xcd, 0x32, 0xa5, 0x56, 0xff, 0x2b, 0x3f, 0xe2, 0xfe, 0x52, 0x2a, + /* (2^ 92)P */ 0x3d, 0x66, 0xd8, 0x7c, 0xb3, 0xef, 0x24, 0x86, 0x94, 0x75, 0xbd, 0xff, 0x20, 0xac, 0xc7, 0xbb, 0x45, 0x74, 0xd3, 0x82, 0x9c, 0x5e, 0xb8, 0x57, 0x66, 0xec, 0xa6, 0x86, 0xcb, 0x52, 0x30, 0x7b, + /* (2^ 93)P */ 0x1e, 0xe9, 0x25, 0x25, 0xad, 0xf0, 0x82, 0x34, 0xa0, 0xdc, 0x8e, 0xd2, 0x43, 0x80, 0xb6, 0x2c, 0x3a, 0x00, 0x1b, 0x2e, 0x05, 0x6d, 0x4f, 0xaf, 0x0a, 0x1b, 0x78, 0x29, 0x25, 0x8c, 0x5f, 0x18, + /* (2^ 94)P */ 0xd6, 0xe0, 0x0c, 0xd8, 0x5b, 0xde, 0x41, 0xaa, 0xd6, 0xe9, 0x53, 0x68, 0x41, 0xb2, 0x07, 0x94, 0x3a, 0x4c, 0x7f, 0x35, 0x6e, 0xc3, 0x3e, 0x56, 0xce, 0x7b, 0x29, 0x0e, 0xdd, 0xb8, 0xc4, 0x4c, + /* (2^ 95)P */ 0x0e, 0x73, 0xb8, 0xff, 0x52, 0x1a, 0xfc, 0xa2, 0x37, 0x8e, 0x05, 0x67, 0x6e, 0xf1, 0x11, 0x18, 0xe1, 0x4e, 0xdf, 0xcd, 0x66, 0xa3, 0xf9, 0x10, 0x99, 0xf0, 0xb9, 0xa0, 0xc4, 0xa0, 0xf4, 0x72, + /* (2^ 96)P */ 0xa7, 0x4e, 0x3f, 0x66, 0x6f, 0xc0, 0x16, 0x8c, 0xba, 0x0f, 0x97, 0x4e, 0xf7, 0x3a, 0x3b, 0x69, 0x45, 0xc3, 0x9e, 0xd6, 0xf1, 0xe7, 0x02, 0x21, 0x89, 0x80, 0x8a, 0x96, 0xbc, 0x3c, 0xa5, 0x0b, + /* (2^ 97)P */ 0x37, 0x55, 0xa1, 0xfe, 0xc7, 0x9d, 0x3d, 0xca, 0x93, 0x64, 0x53, 0x51, 0xbb, 0x24, 0x68, 0x4c, 0xb1, 0x06, 0x40, 0x84, 0x14, 0x63, 0x88, 0xb9, 0x60, 0xcc, 0x54, 0xb4, 0x2a, 0xa7, 0xd2, 0x40, + /* (2^ 98)P */ 0x75, 0x09, 0x57, 0x12, 0xb7, 0xa1, 0x36, 0x59, 0x57, 0xa6, 0xbd, 0xde, 0x48, 0xd6, 0xb9, 0x91, 0xea, 0x30, 0x43, 0xb6, 0x4b, 0x09, 0x44, 0x33, 0xd0, 0x51, 0xee, 0x12, 0x0d, 0xa1, 0x6b, 0x00, + /* (2^ 99)P */ 0x58, 0x5d, 0xde, 0xf5, 0x68, 0x84, 0x22, 0x19, 0xb0, 0x05, 0xcc, 0x38, 0x4c, 0x2f, 0xb1, 0x0e, 0x90, 0x19, 0x60, 0xd5, 0x9d, 0x9f, 0x03, 0xa1, 0x0b, 0x0e, 0xff, 0x4f, 0xce, 0xd4, 0x02, 0x45, + /* (2^100)P */ 0x89, 0xc1, 0x37, 0x68, 0x10, 0x54, 0x20, 0xeb, 0x3c, 0xb9, 0xd3, 0x6d, 0x4c, 0x54, 0xf6, 0xd0, 0x4f, 0xd7, 0x16, 0xc4, 0x64, 0x70, 0x72, 0x40, 0xf0, 0x2e, 0x50, 0x4b, 0x11, 0xc6, 0x15, 0x6e, + /* (2^101)P */ 0x6b, 0xa7, 0xb1, 0xcf, 0x98, 0xa3, 0xf2, 0x4d, 0xb1, 0xf6, 0xf2, 0x19, 0x74, 0x6c, 0x25, 0x11, 0x43, 0x60, 0x6e, 0x06, 0x62, 0x79, 0x49, 0x4a, 0x44, 0x5b, 0x35, 0x41, 0xab, 0x3a, 0x5b, 0x70, + /* (2^102)P */ 0xd8, 0xb1, 0x97, 0xd7, 0x36, 0xf5, 0x5e, 0x36, 0xdb, 0xf0, 0xdd, 0x22, 0xd6, 0x6b, 0x07, 0x00, 0x88, 0x5a, 0x57, 0xe0, 0xb0, 0x33, 0xbf, 0x3b, 0x4d, 0xca, 0xe4, 0xc8, 0x05, 0xaa, 0x77, 0x37, + /* (2^103)P */ 0x5f, 0xdb, 0x78, 0x55, 0xc8, 0x45, 0x27, 0x39, 0xe2, 0x5a, 0xae, 0xdb, 0x49, 0x41, 0xda, 0x6f, 0x67, 0x98, 0xdc, 0x8a, 0x0b, 0xb0, 0xf0, 0xb1, 0xa3, 0x1d, 0x6f, 0xd3, 0x37, 0x34, 0x96, 0x09, + /* (2^104)P */ 0x53, 0x38, 0xdc, 0xa5, 0x90, 0x4e, 0x82, 0x7e, 0xbd, 0x5c, 0x13, 0x1f, 0x64, 0xf6, 0xb5, 0xcc, 0xcc, 0x8f, 0xce, 0x87, 0x6c, 0xd8, 0x36, 0x67, 0x9f, 0x24, 0x04, 0x66, 0xe2, 0x3c, 0x5f, 0x62, + /* (2^105)P */ 0x3f, 0xf6, 0x02, 0x95, 0x05, 0xc8, 0x8a, 0xaf, 0x69, 0x14, 0x35, 0x2e, 0x0a, 0xe7, 0x05, 0x0c, 0x05, 0x63, 0x4b, 0x76, 0x9c, 0x2e, 0x29, 0x35, 0xc3, 0x3a, 0xe2, 0xc7, 0x60, 0x43, 0x39, 0x1a, + /* (2^106)P */ 0x64, 0x32, 0x18, 0x51, 0x32, 0xd5, 0xc6, 0xd5, 0x4f, 0xb7, 0xc2, 0x43, 0xbd, 0x5a, 0x06, 0x62, 0x9b, 0x3f, 0x97, 0x3b, 0xd0, 0xf5, 0xfb, 0xb5, 0x5e, 0x6e, 0x20, 0x61, 0x36, 0xda, 0xa3, 0x13, + /* (2^107)P */ 0xe5, 0x94, 0x5d, 0x72, 0x37, 0x58, 0xbd, 0xc6, 0xc5, 0x16, 0x50, 0x20, 0x12, 0x09, 0xe3, 0x18, 0x68, 0x3c, 0x03, 0x70, 0x15, 0xce, 0x88, 0x20, 0x87, 0x79, 0x83, 0x5c, 0x49, 0x1f, 0xba, 0x7f, + /* (2^108)P */ 0x9d, 0x07, 0xf9, 0xf2, 0x23, 0x74, 0x8c, 0x5a, 0xc5, 0x3f, 0x02, 0x34, 0x7b, 0x15, 0x35, 0x17, 0x51, 0xb3, 0xfa, 0xd2, 0x9a, 0xb4, 0xf9, 0xe4, 0x3c, 0xe3, 0x78, 0xc8, 0x72, 0xff, 0x91, 0x66, + /* (2^109)P */ 0x3e, 0xff, 0x5e, 0xdc, 0xde, 0x2a, 0x2c, 0x12, 0xf4, 0x6c, 0x95, 0xd8, 0xf1, 0x4b, 0xdd, 0xf8, 0xda, 0x5b, 0x9e, 0x9e, 0x5d, 0x20, 0x86, 0xeb, 0x43, 0xc7, 0x75, 0xd9, 0xb9, 0x92, 0x9b, 0x04, + /* (2^110)P */ 0x5a, 0xc0, 0xf6, 0xb0, 0x30, 0x97, 0x37, 0xa5, 0x53, 0xa5, 0xf3, 0xc6, 0xac, 0xff, 0xa0, 0x72, 0x6d, 0xcd, 0x0d, 0xb2, 0x34, 0x2c, 0x03, 0xb0, 0x4a, 0x16, 0xd5, 0x88, 0xbc, 0x9d, 0x0e, 0x47, + /* (2^111)P */ 0x47, 0xc0, 0x37, 0xa2, 0x0c, 0xf1, 0x9c, 0xb1, 0xa2, 0x81, 0x6c, 0x1f, 0x71, 0x66, 0x54, 0xb6, 0x43, 0x0b, 0xd8, 0x6d, 0xd1, 0x1b, 0x32, 0xb3, 0x8e, 0xbe, 0x5f, 0x0c, 0x60, 0x4f, 0xc1, 0x48, + /* (2^112)P */ 0x03, 0xc8, 0xa6, 0x4a, 0x26, 0x1c, 0x45, 0x66, 0xa6, 0x7d, 0xfa, 0xa4, 0x04, 0x39, 0x6e, 0xb6, 0x95, 0x83, 0x12, 0xb3, 0xb0, 0x19, 0x5f, 0xd4, 0x10, 0xbc, 0xc9, 0xc3, 0x27, 0x26, 0x60, 0x31, + /* (2^113)P */ 0x0d, 0xe1, 0xe4, 0x32, 0x48, 0xdc, 0x20, 0x31, 0xf7, 0x17, 0xc7, 0x56, 0x67, 0xc4, 0x20, 0xeb, 0x94, 0x02, 0x28, 0x67, 0x3f, 0x2e, 0xf5, 0x00, 0x09, 0xc5, 0x30, 0x47, 0xc1, 0x4f, 0x6d, 0x56, + /* (2^114)P */ 0x06, 0x72, 0x83, 0xfd, 0x40, 0x5d, 0x3a, 0x7e, 0x7a, 0x54, 0x59, 0x71, 0xdc, 0x26, 0xe9, 0xc1, 0x95, 0x60, 0x8d, 0xa6, 0xfb, 0x30, 0x67, 0x21, 0xa7, 0xce, 0x69, 0x3f, 0x84, 0xc3, 0xe8, 0x22, + /* (2^115)P */ 0x2b, 0x4b, 0x0e, 0x93, 0xe8, 0x74, 0xd0, 0x33, 0x16, 0x58, 0xd1, 0x84, 0x0e, 0x35, 0xe4, 0xb6, 0x65, 0x23, 0xba, 0xd6, 0x6a, 0xc2, 0x34, 0x55, 0xf3, 0xf3, 0xf1, 0x89, 0x2f, 0xc1, 0x73, 0x77, + /* (2^116)P */ 0xaa, 0x62, 0x79, 0xa5, 0x4d, 0x40, 0xba, 0x8c, 0x56, 0xce, 0x99, 0x19, 0xa8, 0x97, 0x98, 0x5b, 0xfc, 0x92, 0x16, 0x12, 0x2f, 0x86, 0x8e, 0x50, 0x91, 0xc2, 0x93, 0xa0, 0x7f, 0x90, 0x81, 0x3a, + /* (2^117)P */ 0x10, 0xa5, 0x25, 0x47, 0xff, 0xd0, 0xde, 0x0d, 0x03, 0xc5, 0x3f, 0x67, 0x10, 0xcc, 0xd8, 0x10, 0x89, 0x4e, 0x1f, 0x9f, 0x1c, 0x15, 0x9d, 0x5b, 0x4c, 0xa4, 0x09, 0xcb, 0xd5, 0xc1, 0xa5, 0x32, + /* (2^118)P */ 0xfb, 0x41, 0x05, 0xb9, 0x42, 0xa4, 0x0a, 0x1e, 0xdb, 0x85, 0xb4, 0xc1, 0x7c, 0xeb, 0x85, 0x5f, 0xe5, 0xf2, 0x9d, 0x8a, 0xce, 0x95, 0xe5, 0xbe, 0x36, 0x22, 0x42, 0x22, 0xc7, 0x96, 0xe4, 0x25, + /* (2^119)P */ 0xb9, 0xe5, 0x0f, 0xcd, 0x46, 0x3c, 0xdf, 0x5e, 0x88, 0x33, 0xa4, 0xd2, 0x7e, 0x5a, 0xe7, 0x34, 0x52, 0xe3, 0x61, 0xd7, 0x11, 0xde, 0x88, 0xe4, 0x5c, 0x54, 0x85, 0xa0, 0x01, 0x8a, 0x87, 0x0e, + /* (2^120)P */ 0x04, 0xbb, 0x21, 0xe0, 0x77, 0x3c, 0x49, 0xba, 0x9a, 0x89, 0xdf, 0xc7, 0x43, 0x18, 0x4d, 0x2b, 0x67, 0x0d, 0xe8, 0x7a, 0x48, 0x7a, 0xa3, 0x9e, 0x94, 0x17, 0xe4, 0x11, 0x80, 0x95, 0xa9, 0x67, + /* (2^121)P */ 0x65, 0xb0, 0x97, 0x66, 0x1a, 0x05, 0x58, 0x4b, 0xd4, 0xa6, 0x6b, 0x8d, 0x7d, 0x3f, 0xe3, 0x47, 0xc1, 0x46, 0xca, 0x83, 0xd4, 0xa8, 0x4d, 0xbb, 0x0d, 0xdb, 0xc2, 0x81, 0xa1, 0xca, 0xbe, 0x68, + /* (2^122)P */ 0xa5, 0x9a, 0x98, 0x0b, 0xe9, 0x80, 0x89, 0x8d, 0x9b, 0xc9, 0x93, 0x2c, 0x4a, 0xb1, 0x5e, 0xf9, 0xa2, 0x73, 0x6e, 0x79, 0xc4, 0xc7, 0xc6, 0x51, 0x69, 0xb5, 0xef, 0xb5, 0x63, 0x83, 0x22, 0x6e, + /* (2^123)P */ 0xc8, 0x24, 0xd6, 0x2d, 0xb0, 0xc0, 0xbb, 0xc6, 0xee, 0x70, 0x81, 0xec, 0x7d, 0xb4, 0x7e, 0x77, 0xa9, 0xaf, 0xcf, 0x04, 0xa0, 0x15, 0xde, 0x3c, 0x9b, 0xbf, 0x60, 0x71, 0x08, 0xbc, 0xc6, 0x1d, + /* (2^124)P */ 0x02, 0x40, 0xc3, 0xee, 0x43, 0xe0, 0x07, 0x2e, 0x7f, 0xdc, 0x68, 0x7a, 0x67, 0xfc, 0xe9, 0x18, 0x9a, 0x5b, 0xd1, 0x8b, 0x18, 0x03, 0xda, 0xd8, 0x53, 0x82, 0x56, 0x00, 0xbb, 0xc3, 0xfb, 0x48, + /* (2^125)P */ 0xe1, 0x4c, 0x65, 0xfb, 0x4c, 0x7d, 0x54, 0x57, 0xad, 0xe2, 0x58, 0xa0, 0x82, 0x5b, 0x56, 0xd3, 0x78, 0x44, 0x15, 0xbf, 0x0b, 0xaf, 0x3e, 0xf6, 0x18, 0xbb, 0xdf, 0x14, 0xf1, 0x1e, 0x53, 0x47, + /* (2^126)P */ 0x87, 0xc5, 0x78, 0x42, 0x0a, 0x63, 0xec, 0xe1, 0xf3, 0x83, 0x8e, 0xca, 0x46, 0xd5, 0x07, 0x55, 0x2b, 0x0c, 0xdc, 0x3a, 0xc6, 0x35, 0xe1, 0x85, 0x4e, 0x84, 0x82, 0x56, 0xa8, 0xef, 0xa7, 0x0a, + /* (2^127)P */ 0x15, 0xf6, 0xe1, 0xb3, 0xa8, 0x1b, 0x69, 0x72, 0xfa, 0x3f, 0xbe, 0x1f, 0x70, 0xe9, 0xb4, 0x32, 0x68, 0x78, 0xbb, 0x39, 0x2e, 0xd9, 0xb6, 0x97, 0xe8, 0x39, 0x2e, 0xa0, 0xde, 0x53, 0xfe, 0x2c, + /* (2^128)P */ 0xb0, 0x52, 0xcd, 0x85, 0xcd, 0x92, 0x73, 0x68, 0x31, 0x98, 0xe2, 0x10, 0xc9, 0x66, 0xff, 0x27, 0x06, 0x2d, 0x83, 0xa9, 0x56, 0x45, 0x13, 0x97, 0xa0, 0xf8, 0x84, 0x0a, 0x36, 0xb0, 0x9b, 0x26, + /* (2^129)P */ 0x5c, 0xf8, 0x43, 0x76, 0x45, 0x55, 0x6e, 0x70, 0x1b, 0x7d, 0x59, 0x9b, 0x8c, 0xa4, 0x34, 0x37, 0x72, 0xa4, 0xef, 0xc6, 0xe8, 0x91, 0xee, 0x7a, 0xe0, 0xd9, 0xa9, 0x98, 0xc1, 0xab, 0xd6, 0x5c, + /* (2^130)P */ 0x1a, 0xe4, 0x3c, 0xcb, 0x06, 0xde, 0x04, 0x0e, 0x38, 0xe1, 0x02, 0x34, 0x89, 0xeb, 0xc6, 0xd8, 0x72, 0x37, 0x6e, 0x68, 0xbb, 0x59, 0x46, 0x90, 0xc8, 0xa8, 0x6b, 0x74, 0x71, 0xc3, 0x15, 0x72, + /* (2^131)P */ 0xd9, 0xa2, 0xe4, 0xea, 0x7e, 0xa9, 0x12, 0xfd, 0xc5, 0xf2, 0x94, 0x63, 0x51, 0xb7, 0x14, 0x95, 0x94, 0xf2, 0x08, 0x92, 0x80, 0xd5, 0x6f, 0x26, 0xb9, 0x26, 0x9a, 0x61, 0x85, 0x70, 0x84, 0x5c, + /* (2^132)P */ 0xea, 0x94, 0xd6, 0xfe, 0x10, 0x54, 0x98, 0x52, 0x54, 0xd2, 0x2e, 0x4a, 0x93, 0x5b, 0x90, 0x3c, 0x67, 0xe4, 0x3b, 0x2d, 0x69, 0x47, 0xbb, 0x10, 0xe1, 0xe9, 0xe5, 0x69, 0x2d, 0x3d, 0x3b, 0x06, + /* (2^133)P */ 0xeb, 0x7d, 0xa5, 0xdd, 0xee, 0x26, 0x27, 0x47, 0x91, 0x18, 0xf4, 0x10, 0xae, 0xc4, 0xb6, 0xef, 0x14, 0x76, 0x30, 0x7b, 0x91, 0x41, 0x16, 0x2b, 0x7c, 0x5b, 0xf4, 0xc4, 0x4f, 0x55, 0x7c, 0x11, + /* (2^134)P */ 0x12, 0x88, 0x9d, 0x8f, 0x11, 0xf3, 0x7c, 0xc0, 0x39, 0x79, 0x01, 0x50, 0x20, 0xd8, 0xdb, 0x01, 0x27, 0x28, 0x1b, 0x17, 0xf4, 0x03, 0xe8, 0xd7, 0xea, 0x25, 0xd2, 0x87, 0x74, 0xe8, 0x15, 0x10, + /* (2^135)P */ 0x4d, 0xcc, 0x3a, 0xd2, 0xfe, 0xe3, 0x8d, 0xc5, 0x2d, 0xbe, 0xa7, 0x94, 0xc2, 0x91, 0xdb, 0x50, 0x57, 0xf4, 0x9c, 0x1c, 0x3d, 0xd4, 0x94, 0x0b, 0x4a, 0x52, 0x37, 0x6e, 0xfa, 0x40, 0x16, 0x6b, + /* (2^136)P */ 0x09, 0x0d, 0xda, 0x5f, 0x6c, 0x34, 0x2f, 0x69, 0x51, 0x31, 0x4d, 0xfa, 0x59, 0x1c, 0x0b, 0x20, 0x96, 0xa2, 0x77, 0x07, 0x76, 0x6f, 0xc4, 0xb8, 0xcf, 0xfb, 0xfd, 0x3f, 0x5f, 0x39, 0x38, 0x4b, + /* (2^137)P */ 0x71, 0xd6, 0x54, 0xbe, 0x00, 0x5e, 0xd2, 0x18, 0xa6, 0xab, 0xc8, 0xbe, 0x82, 0x05, 0xd5, 0x60, 0x82, 0xb9, 0x78, 0x3b, 0x26, 0x8f, 0xad, 0x87, 0x32, 0x04, 0xda, 0x9c, 0x4e, 0xf6, 0xfd, 0x50, + /* (2^138)P */ 0xf0, 0xdc, 0x78, 0xc5, 0xaa, 0x67, 0xf5, 0x90, 0x3b, 0x13, 0xa3, 0xf2, 0x0e, 0x9b, 0x1e, 0xef, 0x71, 0xde, 0xd9, 0x42, 0x92, 0xba, 0xeb, 0x0e, 0xc7, 0x01, 0x31, 0xf0, 0x9b, 0x3c, 0x47, 0x15, + /* (2^139)P */ 0x95, 0x80, 0xb7, 0x56, 0xae, 0xe8, 0x77, 0x7c, 0x8e, 0x07, 0x6f, 0x6e, 0x66, 0xe7, 0x78, 0xb6, 0x1f, 0xba, 0x48, 0x53, 0x61, 0xb9, 0xa0, 0x2d, 0x0b, 0x3f, 0x73, 0xff, 0xc1, 0x31, 0xf9, 0x7c, + /* (2^140)P */ 0x6c, 0x36, 0x0a, 0x0a, 0xf5, 0x57, 0xb3, 0x26, 0x32, 0xd7, 0x87, 0x2b, 0xf4, 0x8c, 0x70, 0xe9, 0xc0, 0xb2, 0x1c, 0xf9, 0xa5, 0xee, 0x3a, 0xc1, 0x4c, 0xbb, 0x43, 0x11, 0x99, 0x0c, 0xd9, 0x35, + /* (2^141)P */ 0xdc, 0xd9, 0xa0, 0xa9, 0x04, 0xc4, 0xc1, 0x47, 0x51, 0xd2, 0x72, 0x19, 0x45, 0x58, 0x9e, 0x65, 0x31, 0x8c, 0xb3, 0x73, 0xc4, 0xa8, 0x75, 0x38, 0x24, 0x1f, 0x56, 0x79, 0xd3, 0x9e, 0xbd, 0x1f, + /* (2^142)P */ 0x8d, 0xc2, 0x1e, 0xd4, 0x6f, 0xbc, 0xfa, 0x11, 0xca, 0x2d, 0x2a, 0xcd, 0xe3, 0xdf, 0xf8, 0x7e, 0x95, 0x45, 0x40, 0x8c, 0x5d, 0x3b, 0xe7, 0x72, 0x27, 0x2f, 0xb7, 0x54, 0x49, 0xfa, 0x35, 0x61, + /* (2^143)P */ 0x9c, 0xb6, 0x24, 0xde, 0xa2, 0x32, 0xfc, 0xcc, 0x88, 0x5d, 0x09, 0x1f, 0x8c, 0x69, 0x55, 0x3f, 0x29, 0xf9, 0xc3, 0x5a, 0xed, 0x50, 0x33, 0xbe, 0xeb, 0x7e, 0x47, 0xca, 0x06, 0xf8, 0x9b, 0x5e, + /* (2^144)P */ 0x68, 0x9f, 0x30, 0x3c, 0xb6, 0x8f, 0xce, 0xe9, 0xf4, 0xf9, 0xe1, 0x65, 0x35, 0xf6, 0x76, 0x53, 0xf1, 0x93, 0x63, 0x5a, 0xb3, 0xcf, 0xaf, 0xd1, 0x06, 0x35, 0x62, 0xe5, 0xed, 0xa1, 0x32, 0x66, + /* (2^145)P */ 0x4c, 0xed, 0x2d, 0x0c, 0x39, 0x6c, 0x7d, 0x0b, 0x1f, 0xcb, 0x04, 0xdf, 0x81, 0x32, 0xcb, 0x56, 0xc7, 0xc3, 0xec, 0x49, 0x12, 0x5a, 0x30, 0x66, 0x2a, 0xa7, 0x8c, 0xa3, 0x60, 0x8b, 0x58, 0x5d, + /* (2^146)P */ 0x2d, 0xf4, 0xe5, 0xe8, 0x78, 0xbf, 0xec, 0xa6, 0xec, 0x3e, 0x8a, 0x3c, 0x4b, 0xb4, 0xee, 0x86, 0x04, 0x16, 0xd2, 0xfb, 0x48, 0x9c, 0x21, 0xec, 0x31, 0x67, 0xc3, 0x17, 0xf5, 0x1a, 0xaf, 0x1a, + /* (2^147)P */ 0xe7, 0xbd, 0x69, 0x67, 0x83, 0xa2, 0x06, 0xc3, 0xdb, 0x2a, 0x1e, 0x2b, 0x62, 0x80, 0x82, 0x20, 0xa6, 0x94, 0xff, 0xfb, 0x1f, 0xf5, 0x27, 0x80, 0x6b, 0xf2, 0x24, 0x11, 0xce, 0xa1, 0xcf, 0x76, + /* (2^148)P */ 0xb6, 0xab, 0x22, 0x24, 0x56, 0x00, 0xeb, 0x18, 0xc3, 0x29, 0x8c, 0x8f, 0xd5, 0xc4, 0x77, 0xf3, 0x1a, 0x56, 0x31, 0xf5, 0x07, 0xc2, 0xbb, 0x4d, 0x27, 0x8a, 0x12, 0x82, 0xf0, 0xb7, 0x53, 0x02, + /* (2^149)P */ 0xe0, 0x17, 0x2c, 0xb6, 0x1c, 0x09, 0x1f, 0x3d, 0xa9, 0x28, 0x46, 0xd6, 0xab, 0xe1, 0x60, 0x48, 0x53, 0x42, 0x9d, 0x30, 0x36, 0x74, 0xd1, 0x52, 0x76, 0xe5, 0xfa, 0x3e, 0xe1, 0x97, 0x6f, 0x35, + /* (2^150)P */ 0x5b, 0x53, 0x50, 0xa1, 0x1a, 0xe1, 0x51, 0xd3, 0xcc, 0x78, 0xd8, 0x1d, 0xbb, 0x45, 0x6b, 0x3e, 0x98, 0x2c, 0xd9, 0xbe, 0x28, 0x61, 0x77, 0x0c, 0xb8, 0x85, 0x28, 0x03, 0x93, 0xae, 0x34, 0x1d, + /* (2^151)P */ 0xc3, 0xa4, 0x5b, 0xa8, 0x8c, 0x48, 0xa0, 0x4b, 0xce, 0xe6, 0x9c, 0x3c, 0xc3, 0x48, 0x53, 0x98, 0x70, 0xa7, 0xbd, 0x97, 0x6f, 0x4c, 0x12, 0x66, 0x4a, 0x12, 0x54, 0x06, 0x29, 0xa0, 0x81, 0x0f, + /* (2^152)P */ 0xfd, 0x86, 0x9b, 0x56, 0xa6, 0x9c, 0xd0, 0x9e, 0x2d, 0x9a, 0xaf, 0x18, 0xfd, 0x09, 0x10, 0x81, 0x0a, 0xc2, 0xd8, 0x93, 0x3f, 0xd0, 0x08, 0xff, 0x6b, 0xf2, 0xae, 0x9f, 0x19, 0x48, 0xa1, 0x52, + /* (2^153)P */ 0x73, 0x1b, 0x8d, 0x2d, 0xdc, 0xf9, 0x03, 0x3e, 0x70, 0x1a, 0x96, 0x73, 0x18, 0x80, 0x05, 0x42, 0x70, 0x59, 0xa3, 0x41, 0xf0, 0x87, 0xd9, 0xc0, 0x49, 0xd5, 0xc0, 0xa1, 0x15, 0x1f, 0xaa, 0x07, + /* (2^154)P */ 0x24, 0x72, 0xd2, 0x8c, 0xe0, 0x6c, 0xd4, 0xdf, 0x39, 0x42, 0x4e, 0x93, 0x4f, 0x02, 0x0a, 0x6d, 0x59, 0x7b, 0x89, 0x99, 0x63, 0x7a, 0x8a, 0x80, 0xa2, 0x95, 0x3d, 0xe1, 0xe9, 0x56, 0x45, 0x0a, + /* (2^155)P */ 0x45, 0x30, 0xc1, 0xe9, 0x1f, 0x99, 0x1a, 0xd2, 0xb8, 0x51, 0x77, 0xfe, 0x48, 0x85, 0x0e, 0x9b, 0x35, 0x00, 0xf3, 0x4b, 0xcb, 0x43, 0xa6, 0x5d, 0x21, 0xf7, 0x40, 0x39, 0xd6, 0x28, 0xdb, 0x77, + /* (2^156)P */ 0x11, 0x90, 0xdc, 0x4a, 0x61, 0xeb, 0x5e, 0xfc, 0xeb, 0x11, 0xc4, 0xe8, 0x9a, 0x41, 0x29, 0x52, 0x74, 0xcf, 0x1d, 0x7d, 0x78, 0xe7, 0xc3, 0x9e, 0xb5, 0x4c, 0x6e, 0x21, 0x3e, 0x05, 0x0d, 0x34, + /* (2^157)P */ 0xb4, 0xf2, 0x8d, 0xb4, 0x39, 0xaf, 0xc7, 0xca, 0x94, 0x0a, 0xa1, 0x71, 0x28, 0xec, 0xfa, 0xc0, 0xed, 0x75, 0xa5, 0x5c, 0x24, 0x69, 0x0a, 0x14, 0x4c, 0x3a, 0x27, 0x34, 0x71, 0xc3, 0xf1, 0x0c, + /* (2^158)P */ 0xa5, 0xb8, 0x24, 0xc2, 0x6a, 0x30, 0xee, 0xc8, 0xb0, 0x30, 0x49, 0xcb, 0x7c, 0xee, 0xea, 0x57, 0x4f, 0xe7, 0xcb, 0xaa, 0xbd, 0x06, 0xe8, 0xa1, 0x7d, 0x65, 0xeb, 0x2e, 0x74, 0x62, 0x9a, 0x7d, + /* (2^159)P */ 0x30, 0x48, 0x6c, 0x54, 0xef, 0xb6, 0xb6, 0x9e, 0x2e, 0x6e, 0xb3, 0xdd, 0x1f, 0xca, 0x5c, 0x88, 0x05, 0x71, 0x0d, 0xef, 0x83, 0xf3, 0xb9, 0xe6, 0x12, 0x04, 0x2e, 0x9d, 0xef, 0x4f, 0x65, 0x58, + /* (2^160)P */ 0x26, 0x8e, 0x0e, 0xbe, 0xff, 0xc4, 0x05, 0xa9, 0x6e, 0x81, 0x31, 0x9b, 0xdf, 0xe5, 0x2d, 0x94, 0xe1, 0x88, 0x2e, 0x80, 0x3f, 0x72, 0x7d, 0x49, 0x8d, 0x40, 0x2f, 0x60, 0xea, 0x4d, 0x68, 0x30, + /* (2^161)P */ 0x34, 0xcb, 0xe6, 0xa3, 0x78, 0xa2, 0xe5, 0x21, 0xc4, 0x1d, 0x15, 0x5b, 0x6f, 0x6e, 0xfb, 0xae, 0x15, 0xca, 0x77, 0x9d, 0x04, 0x8e, 0x0b, 0xb3, 0x81, 0x89, 0xb9, 0x53, 0xcf, 0xc9, 0xc3, 0x28, + /* (2^162)P */ 0x2a, 0xdd, 0x6c, 0x55, 0x21, 0xb7, 0x7f, 0x28, 0x74, 0x22, 0x02, 0x97, 0xa8, 0x7c, 0x31, 0x0d, 0x58, 0x32, 0x54, 0x3a, 0x42, 0xc7, 0x68, 0x74, 0x2f, 0x64, 0xb5, 0x4e, 0x46, 0x11, 0x7f, 0x4a, + /* (2^163)P */ 0xa6, 0x3a, 0x19, 0x4d, 0x77, 0xa4, 0x37, 0xa2, 0xa1, 0x29, 0x21, 0xa9, 0x6e, 0x98, 0x65, 0xd8, 0x88, 0x1a, 0x7c, 0xf8, 0xec, 0x15, 0xc5, 0x24, 0xeb, 0xf5, 0x39, 0x5f, 0x57, 0x03, 0x40, 0x60, + /* (2^164)P */ 0x27, 0x9b, 0x0a, 0x57, 0x89, 0xf1, 0xb9, 0x47, 0x78, 0x4b, 0x5e, 0x46, 0xde, 0xce, 0x98, 0x2b, 0x20, 0x5c, 0xb8, 0xdb, 0x51, 0xf5, 0x6d, 0x02, 0x01, 0x19, 0xe2, 0x47, 0x10, 0xd9, 0xfc, 0x74, + /* (2^165)P */ 0xa3, 0xbf, 0xc1, 0x23, 0x0a, 0xa9, 0xe2, 0x13, 0xf6, 0x19, 0x85, 0x47, 0x4e, 0x07, 0xb0, 0x0c, 0x44, 0xcf, 0xf6, 0x3a, 0xbe, 0xcb, 0xf1, 0x5f, 0xbe, 0x2d, 0x81, 0xbe, 0x38, 0x54, 0xfe, 0x67, + /* (2^166)P */ 0xb0, 0x05, 0x0f, 0xa4, 0x4f, 0xf6, 0x3c, 0xd1, 0x87, 0x37, 0x28, 0x32, 0x2f, 0xfb, 0x4d, 0x05, 0xea, 0x2a, 0x0d, 0x7f, 0x5b, 0x91, 0x73, 0x41, 0x4e, 0x0d, 0x61, 0x1f, 0x4f, 0x14, 0x2f, 0x48, + /* (2^167)P */ 0x34, 0x82, 0x7f, 0xb4, 0x01, 0x02, 0x21, 0xf6, 0x90, 0xb9, 0x70, 0x9e, 0x92, 0xe1, 0x0a, 0x5d, 0x7c, 0x56, 0x49, 0xb0, 0x55, 0xf4, 0xd7, 0xdc, 0x01, 0x6f, 0x91, 0xf0, 0xf1, 0xd0, 0x93, 0x7e, + /* (2^168)P */ 0xfa, 0xb4, 0x7d, 0x8a, 0xf1, 0xcb, 0x79, 0xdd, 0x2f, 0xc6, 0x74, 0x6f, 0xbf, 0x91, 0x83, 0xbe, 0xbd, 0x91, 0x82, 0x4b, 0xd1, 0x45, 0x71, 0x02, 0x05, 0x17, 0xbf, 0x2c, 0xea, 0x73, 0x5a, 0x58, + /* (2^169)P */ 0xb2, 0x0d, 0x8a, 0x92, 0x3e, 0xa0, 0x5c, 0x48, 0xe7, 0x57, 0x28, 0x74, 0xa5, 0x01, 0xfc, 0x10, 0xa7, 0x51, 0xd5, 0xd6, 0xdb, 0x2e, 0x48, 0x2f, 0x8a, 0xdb, 0x8f, 0x04, 0xb5, 0x33, 0x04, 0x0f, + /* (2^170)P */ 0x47, 0x62, 0xdc, 0xd7, 0x8d, 0x2e, 0xda, 0x60, 0x9a, 0x81, 0xd4, 0x8c, 0xd3, 0xc9, 0xb4, 0x88, 0x97, 0x66, 0xf6, 0x01, 0xc0, 0x3a, 0x03, 0x13, 0x75, 0x7d, 0x36, 0x3b, 0xfe, 0x24, 0x3b, 0x27, + /* (2^171)P */ 0xd4, 0xb9, 0xb3, 0x31, 0x6a, 0xf6, 0xe8, 0xc6, 0xd5, 0x49, 0xdf, 0x94, 0xa4, 0x14, 0x15, 0x28, 0xa7, 0x3d, 0xb2, 0xc8, 0xdf, 0x6f, 0x72, 0xd1, 0x48, 0xe5, 0xde, 0x03, 0xd1, 0xe7, 0x3a, 0x4b, + /* (2^172)P */ 0x7e, 0x9d, 0x4b, 0xce, 0x19, 0x6e, 0x25, 0xc6, 0x1c, 0xc6, 0xe3, 0x86, 0xf1, 0x5c, 0x5c, 0xff, 0x45, 0xc1, 0x8e, 0x4b, 0xa3, 0x3c, 0xc6, 0xac, 0x74, 0x65, 0xe6, 0xfe, 0x88, 0x18, 0x62, 0x74, + /* (2^173)P */ 0x1e, 0x0a, 0x29, 0x45, 0x96, 0x40, 0x6f, 0x95, 0x2e, 0x96, 0x3a, 0x26, 0xe3, 0xf8, 0x0b, 0xef, 0x7b, 0x64, 0xc2, 0x5e, 0xeb, 0x50, 0x6a, 0xed, 0x02, 0x75, 0xca, 0x9d, 0x3a, 0x28, 0x94, 0x06, + /* (2^174)P */ 0xd1, 0xdc, 0xa2, 0x43, 0x36, 0x96, 0x9b, 0x76, 0x53, 0x53, 0xfc, 0x09, 0xea, 0xc8, 0xb7, 0x42, 0xab, 0x7e, 0x39, 0x13, 0xee, 0x2a, 0x00, 0x4f, 0x3a, 0xd6, 0xb7, 0x19, 0x2c, 0x5e, 0x00, 0x63, + /* (2^175)P */ 0xea, 0x3b, 0x02, 0x63, 0xda, 0x36, 0x67, 0xca, 0xb7, 0x99, 0x2a, 0xb1, 0x6d, 0x7f, 0x6c, 0x96, 0xe1, 0xc5, 0x37, 0xc5, 0x90, 0x93, 0xe0, 0xac, 0xee, 0x89, 0xaa, 0xa1, 0x63, 0x60, 0x69, 0x0b, + /* (2^176)P */ 0xe5, 0x56, 0x8c, 0x28, 0x97, 0x3e, 0xb0, 0xeb, 0xe8, 0x8b, 0x8c, 0x93, 0x9f, 0x9f, 0x2a, 0x43, 0x71, 0x7f, 0x71, 0x5b, 0x3d, 0xa9, 0xa5, 0xa6, 0x97, 0x9d, 0x8f, 0xe1, 0xc3, 0xb4, 0x5f, 0x1a, + /* (2^177)P */ 0xce, 0xcd, 0x60, 0x1c, 0xad, 0xe7, 0x94, 0x1c, 0xa0, 0xc4, 0x02, 0xfc, 0x43, 0x2a, 0x20, 0xee, 0x20, 0x6a, 0xc4, 0x67, 0xd8, 0xe4, 0xaf, 0x8d, 0x58, 0x7b, 0xc2, 0x8a, 0x3c, 0x26, 0x10, 0x0a, + /* (2^178)P */ 0x4a, 0x2a, 0x43, 0xe4, 0xdf, 0xa9, 0xde, 0xd0, 0xc5, 0x77, 0x92, 0xbe, 0x7b, 0xf8, 0x6a, 0x85, 0x1a, 0xc7, 0x12, 0xc2, 0xac, 0x72, 0x84, 0xce, 0x91, 0x1e, 0xbb, 0x9b, 0x6d, 0x1b, 0x15, 0x6f, + /* (2^179)P */ 0x6a, 0xd5, 0xee, 0x7c, 0x52, 0x6c, 0x77, 0x26, 0xec, 0xfa, 0xf8, 0xfb, 0xb7, 0x1c, 0x21, 0x7d, 0xcc, 0x09, 0x46, 0xfd, 0xa6, 0x66, 0xae, 0x37, 0x42, 0x0c, 0x77, 0xd2, 0x02, 0xb7, 0x81, 0x1f, + /* (2^180)P */ 0x92, 0x83, 0xc5, 0xea, 0x57, 0xb0, 0xb0, 0x2f, 0x9d, 0x4e, 0x74, 0x29, 0xfe, 0x89, 0xdd, 0xe1, 0xf8, 0xb4, 0xbe, 0x17, 0xeb, 0xf8, 0x64, 0xc9, 0x1e, 0xd4, 0xa2, 0xc9, 0x73, 0x10, 0x57, 0x29, + /* (2^181)P */ 0x54, 0xe2, 0xc0, 0x81, 0x89, 0xa1, 0x48, 0xa9, 0x30, 0x28, 0xb2, 0x65, 0x9b, 0x36, 0xf6, 0x2d, 0xc6, 0xd3, 0xcf, 0x5f, 0xd7, 0xb2, 0x3e, 0xa3, 0x1f, 0xa0, 0x99, 0x41, 0xec, 0xd6, 0x8c, 0x07, + /* (2^182)P */ 0x2f, 0x0d, 0x90, 0xad, 0x41, 0x4a, 0x58, 0x4a, 0x52, 0x4c, 0xc7, 0xe2, 0x78, 0x2b, 0x14, 0x32, 0x78, 0xc9, 0x31, 0x84, 0x33, 0xe8, 0xc4, 0x68, 0xc2, 0x9f, 0x68, 0x08, 0x90, 0xea, 0x69, 0x7f, + /* (2^183)P */ 0x65, 0x82, 0xa3, 0x46, 0x1e, 0xc8, 0xf2, 0x52, 0xfd, 0x32, 0xa8, 0x04, 0x2d, 0x07, 0x78, 0xfd, 0x94, 0x9e, 0x35, 0x25, 0xfa, 0xd5, 0xd7, 0x8c, 0xd2, 0x29, 0xcc, 0x54, 0x74, 0x1b, 0xe7, 0x4d, + /* (2^184)P */ 0xc9, 0x6a, 0xda, 0x1e, 0xad, 0x60, 0xeb, 0x42, 0x3a, 0x9c, 0xc0, 0xdb, 0xdf, 0x37, 0xad, 0x0a, 0x91, 0xc1, 0x3c, 0xe3, 0x71, 0x4b, 0x00, 0x81, 0x3c, 0x80, 0x22, 0x51, 0x34, 0xbe, 0xe6, 0x44, + /* (2^185)P */ 0xdb, 0x20, 0x19, 0xba, 0x88, 0x83, 0xfe, 0x03, 0x08, 0xb0, 0x0d, 0x15, 0x32, 0x7c, 0xd5, 0xf5, 0x29, 0x0c, 0xf6, 0x1a, 0x28, 0xc4, 0xc8, 0x49, 0xee, 0x1a, 0x70, 0xde, 0x18, 0xb5, 0xed, 0x21, + /* (2^186)P */ 0x99, 0xdc, 0x06, 0x8f, 0x41, 0x3e, 0xb6, 0x7f, 0xb8, 0xd7, 0x66, 0xc1, 0x99, 0x0d, 0x46, 0xa4, 0x83, 0x0a, 0x52, 0xce, 0x48, 0x52, 0xdd, 0x24, 0x58, 0x83, 0x92, 0x2b, 0x71, 0xad, 0xc3, 0x5e, + /* (2^187)P */ 0x0f, 0x93, 0x17, 0xbd, 0x5f, 0x2a, 0x02, 0x15, 0xe3, 0x70, 0x25, 0xd8, 0x77, 0x4a, 0xf6, 0xa4, 0x12, 0x37, 0x78, 0x15, 0x69, 0x8d, 0xbc, 0x12, 0xbb, 0x0a, 0x62, 0xfc, 0xc0, 0x94, 0x81, 0x49, + /* (2^188)P */ 0x82, 0x6c, 0x68, 0x55, 0xd2, 0xd9, 0xa2, 0x38, 0xf0, 0x21, 0x3e, 0x19, 0xd9, 0x6b, 0x5c, 0x78, 0x84, 0x54, 0x4a, 0xb2, 0x1a, 0xc8, 0xd5, 0xe4, 0x89, 0x09, 0xe2, 0xb2, 0x60, 0x78, 0x30, 0x56, + /* (2^189)P */ 0xc4, 0x74, 0x4d, 0x8b, 0xf7, 0x55, 0x9d, 0x42, 0x31, 0x01, 0x35, 0x43, 0x46, 0x83, 0xf1, 0x22, 0xff, 0x1f, 0xc7, 0x98, 0x45, 0xc2, 0x60, 0x1e, 0xef, 0x83, 0x99, 0x97, 0x14, 0xf0, 0xf2, 0x59, + /* (2^190)P */ 0x44, 0x4a, 0x49, 0xeb, 0x56, 0x7d, 0xa4, 0x46, 0x8e, 0xa1, 0x36, 0xd6, 0x54, 0xa8, 0x22, 0x3e, 0x3b, 0x1c, 0x49, 0x74, 0x52, 0xe1, 0x46, 0xb3, 0xe7, 0xcd, 0x90, 0x53, 0x4e, 0xfd, 0xea, 0x2c, + /* (2^191)P */ 0x75, 0x66, 0x0d, 0xbe, 0x38, 0x85, 0x8a, 0xba, 0x23, 0x8e, 0x81, 0x50, 0xbb, 0x74, 0x90, 0x4b, 0xc3, 0x04, 0xd3, 0x85, 0x90, 0xb8, 0xda, 0xcb, 0xc4, 0x92, 0x61, 0xe5, 0xe0, 0x4f, 0xa2, 0x61, + /* (2^192)P */ 0xcb, 0x5b, 0x52, 0xdb, 0xe6, 0x15, 0x76, 0xcb, 0xca, 0xe4, 0x67, 0xa5, 0x35, 0x8c, 0x7d, 0xdd, 0x69, 0xdd, 0xfc, 0xca, 0x3a, 0x15, 0xb4, 0xe6, 0x66, 0x97, 0x3c, 0x7f, 0x09, 0x8e, 0x66, 0x2d, + /* (2^193)P */ 0xf0, 0x5e, 0xe5, 0x5c, 0x26, 0x7e, 0x7e, 0xa5, 0x67, 0xb9, 0xd4, 0x7c, 0x52, 0x4e, 0x9f, 0x5d, 0xe5, 0xd1, 0x2f, 0x49, 0x06, 0x36, 0xc8, 0xfb, 0xae, 0xf7, 0xc3, 0xb7, 0xbe, 0x52, 0x0d, 0x09, + /* (2^194)P */ 0x7c, 0x4d, 0x7b, 0x1e, 0x5a, 0x51, 0xb9, 0x09, 0xc0, 0x44, 0xda, 0x99, 0x25, 0x6a, 0x26, 0x1f, 0x04, 0x55, 0xc5, 0xe2, 0x48, 0x95, 0xc4, 0xa1, 0xcc, 0x15, 0x6f, 0x12, 0x87, 0x42, 0xf0, 0x7e, + /* (2^195)P */ 0x15, 0xef, 0x30, 0xbd, 0x9d, 0x65, 0xd1, 0xfe, 0x7b, 0x27, 0xe0, 0xc4, 0xee, 0xb9, 0x4a, 0x8b, 0x91, 0x32, 0xdf, 0xa5, 0x36, 0x62, 0x4d, 0x88, 0x88, 0xf7, 0x5c, 0xbf, 0xa6, 0x6e, 0xd9, 0x1f, + /* (2^196)P */ 0x9a, 0x0d, 0x19, 0x1f, 0x98, 0x61, 0xa1, 0x42, 0xc1, 0x52, 0x60, 0x7e, 0x50, 0x49, 0xd8, 0x61, 0xd5, 0x2c, 0x5a, 0x28, 0xbf, 0x13, 0xe1, 0x9f, 0xd8, 0x85, 0xad, 0xdb, 0x76, 0xd6, 0x22, 0x7c, + /* (2^197)P */ 0x7d, 0xd2, 0xfb, 0x2b, 0xed, 0x70, 0xe7, 0x82, 0xa5, 0xf5, 0x96, 0xe9, 0xec, 0xb2, 0x05, 0x4c, 0x50, 0x01, 0x90, 0xb0, 0xc2, 0xa9, 0x40, 0xcd, 0x64, 0xbf, 0xd9, 0x13, 0x92, 0x31, 0x95, 0x58, + /* (2^198)P */ 0x08, 0x2e, 0xea, 0x3f, 0x70, 0x5d, 0xcc, 0xe7, 0x8c, 0x18, 0xe2, 0x58, 0x12, 0x49, 0x0c, 0xb5, 0xf0, 0x5b, 0x20, 0x48, 0xaa, 0x0b, 0xe3, 0xcc, 0x62, 0x2d, 0xa3, 0xcf, 0x9c, 0x65, 0x7c, 0x53, + /* (2^199)P */ 0x88, 0xc0, 0xcf, 0x98, 0x3a, 0x62, 0xb6, 0x37, 0xa4, 0xac, 0xd6, 0xa4, 0x1f, 0xed, 0x9b, 0xfe, 0xb0, 0xd1, 0xa8, 0x56, 0x8e, 0x9b, 0xd2, 0x04, 0x75, 0x95, 0x51, 0x0b, 0xc4, 0x71, 0x5f, 0x72, + /* (2^200)P */ 0xe6, 0x9c, 0x33, 0xd0, 0x9c, 0xf8, 0xc7, 0x28, 0x8b, 0xc1, 0xdd, 0x69, 0x44, 0xb1, 0x67, 0x83, 0x2c, 0x65, 0xa1, 0xa6, 0x83, 0xda, 0x3a, 0x88, 0x17, 0x6c, 0x4d, 0x03, 0x74, 0x19, 0x5f, 0x58, + /* (2^201)P */ 0x88, 0x91, 0xb1, 0xf1, 0x66, 0xb2, 0xcf, 0x89, 0x17, 0x52, 0xc3, 0xe7, 0x63, 0x48, 0x3b, 0xe6, 0x6a, 0x52, 0xc0, 0xb4, 0xa6, 0x9d, 0x8c, 0xd8, 0x35, 0x46, 0x95, 0xf0, 0x9d, 0x5c, 0x03, 0x3e, + /* (2^202)P */ 0x9d, 0xde, 0x45, 0xfb, 0x12, 0x54, 0x9d, 0xdd, 0x0d, 0xf4, 0xcf, 0xe4, 0x32, 0x45, 0x68, 0xdd, 0x1c, 0x67, 0x1d, 0x15, 0x9b, 0x99, 0x5c, 0x4b, 0x90, 0xf6, 0xe7, 0x11, 0xc8, 0x2c, 0x8c, 0x2d, + /* (2^203)P */ 0x40, 0x5d, 0x05, 0x90, 0x1d, 0xbe, 0x54, 0x7f, 0x40, 0xaf, 0x4a, 0x46, 0xdf, 0xc5, 0x64, 0xa4, 0xbe, 0x17, 0xe9, 0xf0, 0x24, 0x96, 0x97, 0x33, 0x30, 0x6b, 0x35, 0x27, 0xc5, 0x8d, 0x01, 0x2c, + /* (2^204)P */ 0xd4, 0xb3, 0x30, 0xe3, 0x24, 0x50, 0x41, 0xa5, 0xd3, 0x52, 0x16, 0x69, 0x96, 0x3d, 0xff, 0x73, 0xf1, 0x59, 0x9b, 0xef, 0xc4, 0x42, 0xec, 0x94, 0x5a, 0x8e, 0xd0, 0x18, 0x16, 0x20, 0x47, 0x07, + /* (2^205)P */ 0x53, 0x1c, 0x41, 0xca, 0x8a, 0xa4, 0x6c, 0x4d, 0x19, 0x61, 0xa6, 0xcf, 0x2f, 0x5f, 0x41, 0x66, 0xff, 0x27, 0xe2, 0x51, 0x00, 0xd4, 0x4d, 0x9c, 0xeb, 0xf7, 0x02, 0x9a, 0xc0, 0x0b, 0x81, 0x59, + /* (2^206)P */ 0x1d, 0x10, 0xdc, 0xb3, 0x71, 0xb1, 0x7e, 0x2a, 0x8e, 0xf6, 0xfe, 0x9f, 0xb9, 0x5a, 0x1c, 0x44, 0xea, 0x59, 0xb3, 0x93, 0x9b, 0x5c, 0x02, 0x32, 0x2f, 0x11, 0x9d, 0x1e, 0xa7, 0xe0, 0x8c, 0x5e, + /* (2^207)P */ 0xfd, 0x03, 0x95, 0x42, 0x92, 0xcb, 0xcc, 0xbf, 0x55, 0x5d, 0x09, 0x2f, 0x75, 0xba, 0x71, 0xd2, 0x1e, 0x09, 0x2d, 0x97, 0x5e, 0xad, 0x5e, 0x34, 0xba, 0x03, 0x31, 0xa8, 0x11, 0xdf, 0xc8, 0x18, + /* (2^208)P */ 0x4c, 0x0f, 0xed, 0x9a, 0x9a, 0x94, 0xcd, 0x90, 0x7e, 0xe3, 0x60, 0x66, 0xcb, 0xf4, 0xd1, 0xc5, 0x0b, 0x2e, 0xc5, 0x56, 0x2d, 0xc5, 0xca, 0xb8, 0x0d, 0x8e, 0x80, 0xc5, 0x00, 0xe4, 0x42, 0x6e, + /* (2^209)P */ 0x23, 0xfd, 0xae, 0xee, 0x66, 0x69, 0xb4, 0xa3, 0xca, 0xcd, 0x9e, 0xe3, 0x0b, 0x1f, 0x4f, 0x0c, 0x1d, 0xa5, 0x83, 0xd6, 0xc9, 0xc8, 0x9d, 0x18, 0x1b, 0x35, 0x09, 0x4c, 0x05, 0x7f, 0xf2, 0x51, + /* (2^210)P */ 0x82, 0x06, 0x32, 0x2a, 0xcd, 0x7c, 0x48, 0x4c, 0x96, 0x1c, 0xdf, 0xb3, 0x5b, 0xa9, 0x7e, 0x58, 0xe8, 0xb8, 0x5c, 0x55, 0x9e, 0xf7, 0xcc, 0xc8, 0x3d, 0xd7, 0x06, 0xa2, 0x29, 0xc8, 0x7d, 0x54, + /* (2^211)P */ 0x06, 0x9b, 0xc3, 0x80, 0xcd, 0xa6, 0x22, 0xb8, 0xc6, 0xd4, 0x00, 0x20, 0x73, 0x54, 0x6d, 0xe9, 0x4d, 0x3b, 0x46, 0x91, 0x6f, 0x5b, 0x53, 0x28, 0x1d, 0x6e, 0x48, 0xe2, 0x60, 0x46, 0x8f, 0x22, + /* (2^212)P */ 0xbf, 0x3a, 0x8d, 0xde, 0x38, 0x95, 0x79, 0x98, 0x6e, 0xca, 0xeb, 0x45, 0x00, 0x33, 0xd8, 0x8c, 0x38, 0xe7, 0x21, 0x82, 0x00, 0x2a, 0x95, 0x79, 0xbb, 0xd2, 0x5c, 0x53, 0xa7, 0xe1, 0x22, 0x43, + /* (2^213)P */ 0x1c, 0x80, 0xd1, 0x19, 0x18, 0xc1, 0x14, 0xb1, 0xc7, 0x5e, 0x3f, 0x4f, 0xd8, 0xe4, 0x16, 0x20, 0x4c, 0x0f, 0x26, 0x09, 0xf4, 0x2d, 0x0e, 0xdd, 0x66, 0x72, 0x5f, 0xae, 0xc0, 0x62, 0xc3, 0x5e, + /* (2^214)P */ 0xee, 0xb4, 0xb2, 0xb8, 0x18, 0x2b, 0x46, 0xc0, 0xfb, 0x1a, 0x4d, 0x27, 0x50, 0xd9, 0xc8, 0x7c, 0xd2, 0x02, 0x6b, 0x43, 0x05, 0x71, 0x5f, 0xf2, 0xd3, 0xcc, 0xf9, 0xbf, 0xdc, 0xf8, 0xbb, 0x43, + /* (2^215)P */ 0xdf, 0xe9, 0x39, 0xa0, 0x67, 0x17, 0xad, 0xb6, 0x83, 0x35, 0x9d, 0xf6, 0xa8, 0x4d, 0x71, 0xb0, 0xf5, 0x31, 0x29, 0xb4, 0x18, 0xfa, 0x55, 0x5e, 0x61, 0x09, 0xc6, 0x33, 0x8f, 0x55, 0xd5, 0x4e, + /* (2^216)P */ 0xdd, 0xa5, 0x47, 0xc6, 0x01, 0x79, 0xe3, 0x1f, 0x57, 0xd3, 0x81, 0x80, 0x1f, 0xdf, 0x3d, 0x59, 0xa6, 0xd7, 0x3f, 0x81, 0xfd, 0xa4, 0x49, 0x02, 0x61, 0xaf, 0x9c, 0x4e, 0x27, 0xca, 0xac, 0x69, + /* (2^217)P */ 0xc9, 0x21, 0x07, 0x33, 0xea, 0xa3, 0x7b, 0x04, 0xa0, 0x1e, 0x7e, 0x0e, 0xc2, 0x3f, 0x42, 0x83, 0x60, 0x4a, 0x31, 0x01, 0xaf, 0xc0, 0xf4, 0x1d, 0x27, 0x95, 0x28, 0x89, 0xab, 0x2d, 0xa6, 0x09, + /* (2^218)P */ 0x00, 0xcb, 0xc6, 0x9c, 0xa4, 0x25, 0xb3, 0xa5, 0xb6, 0x6c, 0xb5, 0x54, 0xc6, 0x5d, 0x4b, 0xe9, 0xa0, 0x94, 0xc9, 0xad, 0x79, 0x87, 0xe2, 0x3b, 0xad, 0x4a, 0x3a, 0xba, 0xf8, 0xe8, 0x96, 0x42, + /* (2^219)P */ 0xab, 0x1e, 0x45, 0x1e, 0x76, 0x89, 0x86, 0x32, 0x4a, 0x59, 0x59, 0xff, 0x8b, 0x59, 0x4d, 0x2e, 0x4a, 0x08, 0xa7, 0xd7, 0x53, 0x68, 0xb9, 0x49, 0xa8, 0x20, 0x14, 0x60, 0x19, 0xa3, 0x80, 0x49, + /* (2^220)P */ 0x42, 0x2c, 0x55, 0x2f, 0xe1, 0xb9, 0x65, 0x95, 0x96, 0xfe, 0x00, 0x71, 0xdb, 0x18, 0x53, 0x8a, 0xd7, 0xd0, 0xad, 0x43, 0x4d, 0x0b, 0xc9, 0x05, 0xda, 0x4e, 0x5d, 0x6a, 0xd6, 0x4c, 0x8b, 0x53, + /* (2^221)P */ 0x9f, 0x03, 0x9f, 0xe8, 0xc3, 0x4f, 0xe9, 0xf4, 0x45, 0x80, 0x61, 0x6f, 0xf2, 0x9a, 0x2c, 0x59, 0x50, 0x95, 0x4b, 0xfd, 0xb5, 0x6e, 0xa3, 0x08, 0x19, 0x14, 0xed, 0xc2, 0xf6, 0xfa, 0xff, 0x25, + /* (2^222)P */ 0x54, 0xd3, 0x79, 0xcc, 0x59, 0x44, 0x43, 0x34, 0x6b, 0x47, 0xd5, 0xb1, 0xb4, 0xbf, 0xec, 0xee, 0x99, 0x5d, 0x61, 0x61, 0xa0, 0x34, 0xeb, 0xdd, 0x73, 0xb7, 0x64, 0xeb, 0xcc, 0xce, 0x29, 0x51, + /* (2^223)P */ 0x20, 0x35, 0x99, 0x94, 0x58, 0x21, 0x43, 0xee, 0x3b, 0x0b, 0x4c, 0xf1, 0x7c, 0x9c, 0x2f, 0x77, 0xd5, 0xda, 0xbe, 0x06, 0xe3, 0xfc, 0xe2, 0xd2, 0x97, 0x6a, 0xf0, 0x46, 0xb5, 0x42, 0x5f, 0x71, + /* (2^224)P */ 0x1a, 0x5f, 0x5b, 0xda, 0xce, 0xcd, 0x4e, 0x43, 0xa9, 0x41, 0x97, 0xa4, 0x15, 0x71, 0xa1, 0x0d, 0x2e, 0xad, 0xed, 0x73, 0x7c, 0xd7, 0x0b, 0x68, 0x41, 0x90, 0xdd, 0x4e, 0x35, 0x02, 0x7c, 0x48, + /* (2^225)P */ 0xc4, 0xd9, 0x0e, 0xa7, 0xf3, 0xef, 0xef, 0xb8, 0x02, 0xe3, 0x57, 0xe8, 0xa3, 0x2a, 0xa3, 0x56, 0xa0, 0xa5, 0xa2, 0x48, 0xbd, 0x68, 0x3a, 0xdf, 0x44, 0xc4, 0x76, 0x31, 0xb7, 0x50, 0xf6, 0x07, + /* (2^226)P */ 0xb1, 0xcc, 0xe0, 0x26, 0x16, 0x9b, 0x8b, 0xe3, 0x36, 0xfb, 0x09, 0x8b, 0xc1, 0x53, 0xe0, 0x79, 0x64, 0x49, 0xf9, 0xc9, 0x19, 0x03, 0xd9, 0x56, 0xc4, 0xf5, 0x9f, 0xac, 0xe7, 0x41, 0xa9, 0x1c, + /* (2^227)P */ 0xbb, 0xa0, 0x2f, 0x16, 0x29, 0xdf, 0xc4, 0x49, 0x05, 0x33, 0xb3, 0x82, 0x32, 0xcf, 0x88, 0x84, 0x7d, 0x43, 0xbb, 0xca, 0x14, 0xda, 0xdf, 0x95, 0x86, 0xad, 0xd5, 0x64, 0x82, 0xf7, 0x91, 0x33, + /* (2^228)P */ 0x5d, 0x09, 0xb5, 0xe2, 0x6a, 0xe0, 0x9a, 0x72, 0x46, 0xa9, 0x59, 0x32, 0xd7, 0x58, 0x8a, 0xd5, 0xed, 0x21, 0x39, 0xd1, 0x62, 0x42, 0x83, 0xe9, 0x92, 0xb5, 0x4b, 0xa5, 0xfa, 0xda, 0xfe, 0x27, + /* (2^229)P */ 0xbb, 0x48, 0xad, 0x29, 0xb8, 0xc5, 0x9d, 0xa9, 0x60, 0xe2, 0x9e, 0x49, 0x42, 0x57, 0x02, 0x5f, 0xfd, 0x13, 0x75, 0x5d, 0xcd, 0x8e, 0x2c, 0x80, 0x38, 0xd9, 0x6d, 0x3f, 0xef, 0xb3, 0xce, 0x78, + /* (2^230)P */ 0x94, 0x5d, 0x13, 0x8a, 0x4f, 0xf4, 0x42, 0xc3, 0xa3, 0xdd, 0x8c, 0x82, 0x44, 0xdb, 0x9e, 0x7b, 0xe7, 0xcf, 0x37, 0x05, 0x1a, 0xd1, 0x36, 0x94, 0xc8, 0xb4, 0x1a, 0xec, 0x64, 0xb1, 0x64, 0x50, + /* (2^231)P */ 0xfc, 0xb2, 0x7e, 0xd3, 0xcf, 0xec, 0x20, 0x70, 0xfc, 0x25, 0x0d, 0xd9, 0x3e, 0xea, 0x31, 0x1f, 0x34, 0xbb, 0xa1, 0xdf, 0x7b, 0x0d, 0x93, 0x1b, 0x44, 0x30, 0x11, 0x48, 0x7a, 0x46, 0x44, 0x53, + /* (2^232)P */ 0xfb, 0x6d, 0x5e, 0xf2, 0x70, 0x31, 0x07, 0x70, 0xc8, 0x4c, 0x11, 0x50, 0x1a, 0xdc, 0x85, 0xe3, 0x00, 0x4f, 0xfc, 0xc8, 0x8a, 0x69, 0x48, 0x23, 0xd8, 0x40, 0xdd, 0x84, 0x52, 0xa5, 0x77, 0x2a, + /* (2^233)P */ 0xe4, 0x6c, 0x8c, 0xc9, 0xe0, 0xaf, 0x06, 0xfe, 0xe4, 0xd6, 0xdf, 0xdd, 0x96, 0xdf, 0x35, 0xc2, 0xd3, 0x1e, 0xbf, 0x33, 0x1e, 0xd0, 0x28, 0x14, 0xaf, 0xbd, 0x00, 0x93, 0xec, 0x68, 0x57, 0x78, + /* (2^234)P */ 0x3b, 0xb6, 0xde, 0x91, 0x7a, 0xe5, 0x02, 0x97, 0x80, 0x8b, 0xce, 0xe5, 0xbf, 0xb8, 0xbd, 0x61, 0xac, 0x58, 0x1d, 0x3d, 0x6f, 0x42, 0x5b, 0x64, 0xbc, 0x57, 0xa5, 0x27, 0x22, 0xa8, 0x04, 0x48, + /* (2^235)P */ 0x01, 0x26, 0x4d, 0xb4, 0x8a, 0x04, 0x57, 0x8e, 0x35, 0x69, 0x3a, 0x4b, 0x1a, 0x50, 0xd6, 0x68, 0x93, 0xc2, 0xe1, 0xf9, 0xc3, 0x9e, 0x9c, 0xc3, 0xe2, 0x63, 0xde, 0xd4, 0x57, 0xf2, 0x72, 0x41, + /* (2^236)P */ 0x01, 0x64, 0x0c, 0x33, 0x50, 0xb4, 0x68, 0xd3, 0x91, 0x23, 0x8f, 0x41, 0x17, 0x30, 0x0d, 0x04, 0x0d, 0xd9, 0xb7, 0x90, 0x60, 0xbb, 0x34, 0x2c, 0x1f, 0xd5, 0xdf, 0x8f, 0x22, 0x49, 0xf6, 0x16, + /* (2^237)P */ 0xf5, 0x8e, 0x92, 0x2b, 0x8e, 0x81, 0xa6, 0xbe, 0x72, 0x1e, 0xc1, 0xcd, 0x91, 0xcf, 0x8c, 0xe2, 0xcd, 0x36, 0x7a, 0xe7, 0x68, 0xaa, 0x4a, 0x59, 0x0f, 0xfd, 0x7f, 0x6c, 0x80, 0x34, 0x30, 0x31, + /* (2^238)P */ 0x65, 0xbd, 0x49, 0x22, 0xac, 0x27, 0x9d, 0x8a, 0x12, 0x95, 0x8e, 0x01, 0x64, 0xb4, 0xa3, 0x19, 0xc7, 0x7e, 0xb3, 0x52, 0xf3, 0xcf, 0x6c, 0xc2, 0x21, 0x7b, 0x79, 0x1d, 0x34, 0x68, 0x6f, 0x05, + /* (2^239)P */ 0x27, 0x23, 0xfd, 0x7e, 0x75, 0xd6, 0x79, 0x5e, 0x15, 0xfe, 0x3a, 0x55, 0xb6, 0xbc, 0xbd, 0xfa, 0x60, 0x5a, 0xaf, 0x6e, 0x2c, 0x22, 0xe7, 0xd3, 0x3b, 0x74, 0xae, 0x4d, 0x6d, 0xc7, 0x46, 0x70, + /* (2^240)P */ 0x55, 0x4a, 0x8d, 0xb1, 0x72, 0xe8, 0x0b, 0x66, 0x96, 0x14, 0x4e, 0x57, 0x18, 0x25, 0x99, 0x19, 0xbb, 0xdc, 0x2b, 0x30, 0x3a, 0x05, 0x03, 0xc1, 0x8e, 0x8e, 0x21, 0x0b, 0x80, 0xe9, 0xd8, 0x3e, + /* (2^241)P */ 0x3e, 0xe0, 0x75, 0xfa, 0x39, 0x92, 0x0b, 0x7b, 0x83, 0xc0, 0x33, 0x46, 0x68, 0xfb, 0xe9, 0xef, 0x93, 0x77, 0x1a, 0x39, 0xbe, 0x5f, 0xa3, 0x98, 0x34, 0xfe, 0xd0, 0xe2, 0x0f, 0x51, 0x65, 0x60, + /* (2^242)P */ 0x0c, 0xad, 0xab, 0x48, 0x85, 0x66, 0xcb, 0x55, 0x27, 0xe5, 0x87, 0xda, 0x48, 0x45, 0x58, 0xb4, 0xdd, 0xc1, 0x07, 0x01, 0xea, 0xec, 0x43, 0x2c, 0x35, 0xde, 0x72, 0x93, 0x80, 0x28, 0x60, 0x52, + /* (2^243)P */ 0x1f, 0x3b, 0x21, 0xf9, 0x6a, 0xc5, 0x15, 0x34, 0xdb, 0x98, 0x7e, 0x01, 0x4d, 0x1a, 0xee, 0x5b, 0x9b, 0x70, 0xcf, 0xb5, 0x05, 0xb1, 0xf6, 0x13, 0xb6, 0x9a, 0xb2, 0x82, 0x34, 0x0e, 0xf2, 0x5f, + /* (2^244)P */ 0x90, 0x6c, 0x2e, 0xcc, 0x75, 0x9c, 0xa2, 0x0a, 0x06, 0xe2, 0x70, 0x3a, 0xca, 0x73, 0x7d, 0xfc, 0x15, 0xc5, 0xb5, 0xc4, 0x8f, 0xc3, 0x9f, 0x89, 0x07, 0xc2, 0xff, 0x24, 0xb1, 0x86, 0x03, 0x25, + /* (2^245)P */ 0x56, 0x2b, 0x3d, 0xae, 0xd5, 0x28, 0xea, 0x54, 0xce, 0x60, 0xde, 0xd6, 0x9d, 0x14, 0x13, 0x99, 0xc1, 0xd6, 0x06, 0x8f, 0xc5, 0x4f, 0x69, 0x16, 0xc7, 0x8f, 0x01, 0xeb, 0x75, 0x39, 0xb2, 0x46, + /* (2^246)P */ 0xe2, 0xb4, 0xb7, 0xb4, 0x0f, 0x6a, 0x0a, 0x47, 0xde, 0x53, 0x72, 0x8f, 0x5a, 0x47, 0x92, 0x5d, 0xdb, 0x3a, 0xbd, 0x2f, 0xb5, 0xe5, 0xee, 0xab, 0x68, 0x69, 0x80, 0xa0, 0x01, 0x08, 0xa2, 0x7f, + /* (2^247)P */ 0xd2, 0x14, 0x77, 0x9f, 0xf1, 0xfa, 0xf3, 0x76, 0xc3, 0x60, 0x46, 0x2f, 0xc1, 0x40, 0xe8, 0xb3, 0x4e, 0x74, 0x12, 0xf2, 0x8d, 0xcd, 0xb4, 0x0f, 0xd2, 0x2d, 0x3a, 0x1d, 0x25, 0x5a, 0x06, 0x4b, + /* (2^248)P */ 0x4a, 0xcd, 0x77, 0x3d, 0x38, 0xde, 0xeb, 0x5c, 0xb1, 0x9c, 0x2c, 0x88, 0xdf, 0x39, 0xdf, 0x6a, 0x59, 0xf7, 0x9a, 0xb0, 0x2e, 0x24, 0xdd, 0xa2, 0x22, 0x64, 0x5f, 0x0e, 0xe5, 0xc0, 0x47, 0x31, + /* (2^249)P */ 0xdb, 0x50, 0x13, 0x1d, 0x10, 0xa5, 0x4c, 0x16, 0x62, 0xc9, 0x3f, 0xc3, 0x79, 0x34, 0xd1, 0xf8, 0x08, 0xda, 0xe5, 0x13, 0x4d, 0xce, 0x40, 0xe6, 0xba, 0xf8, 0x61, 0x50, 0xc4, 0xe0, 0xde, 0x4b, + /* (2^250)P */ 0xc9, 0xb1, 0xed, 0xa4, 0xc1, 0x6d, 0xc4, 0xd7, 0x8a, 0xd9, 0x7f, 0x43, 0xb6, 0xd7, 0x14, 0x55, 0x0b, 0xc0, 0xa1, 0xb2, 0x6b, 0x2f, 0x94, 0x58, 0x0e, 0x71, 0x70, 0x1d, 0xab, 0xb2, 0xff, 0x2d, + /* (2^251)P */ 0x68, 0x6d, 0x8b, 0xc1, 0x2f, 0xcf, 0xdf, 0xcc, 0x67, 0x61, 0x80, 0xb7, 0xa8, 0xcb, 0xeb, 0xa8, 0xe3, 0x37, 0x29, 0x5e, 0xf9, 0x97, 0x06, 0x98, 0x8c, 0x6e, 0x12, 0xd0, 0x1c, 0xba, 0xfb, 0x02, + /* (2^252)P */ 0x65, 0x45, 0xff, 0xad, 0x60, 0xc3, 0x98, 0xcb, 0x19, 0x15, 0xdb, 0x4b, 0xd2, 0x01, 0x71, 0x44, 0xd5, 0x15, 0xfb, 0x75, 0x74, 0xc8, 0xc4, 0x98, 0x7d, 0xa2, 0x22, 0x6e, 0x6d, 0xc7, 0xf8, 0x05, + /* (2^253)P */ 0x94, 0xf4, 0xb9, 0xfe, 0xdf, 0xe5, 0x69, 0xab, 0x75, 0x6b, 0x40, 0x18, 0x9d, 0xc7, 0x09, 0xae, 0x1d, 0x2d, 0xa4, 0x94, 0xfb, 0x45, 0x9b, 0x19, 0x84, 0xfa, 0x2a, 0xae, 0xeb, 0x0a, 0x71, 0x79, + /* (2^254)P */ 0xdf, 0xd2, 0x34, 0xf3, 0xa7, 0xed, 0xad, 0xa6, 0xb4, 0x57, 0x2a, 0xaf, 0x51, 0x9c, 0xde, 0x7b, 0xa8, 0xea, 0xdc, 0x86, 0x4f, 0xc6, 0x8f, 0xa9, 0x7b, 0xd0, 0x0e, 0xc2, 0x35, 0x03, 0xbe, 0x6b, + /* (2^255)P */ 0x44, 0x43, 0x98, 0x53, 0xbe, 0xdc, 0x7f, 0x66, 0xa8, 0x49, 0x59, 0x00, 0x1c, 0xbc, 0x72, 0x07, 0x8e, 0xd6, 0xbe, 0x4e, 0x9f, 0xa4, 0x07, 0xba, 0xbf, 0x30, 0xdf, 0xba, 0x85, 0xb0, 0xa7, 0x1f, +} diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve.go b/vendor/github.com/cloudflare/circl/dh/x448/curve.go new file mode 100644 index 000000000..d59564e4b --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/curve.go @@ -0,0 +1,104 @@ +package x448 + +import ( + fp "github.com/cloudflare/circl/math/fp448" +) + +// ladderJoye calculates a fixed-point multiplication with the generator point. +// The algorithm is the right-to-left Joye's ladder as described +// in "How to precompute a ladder" in SAC'2017. +func ladderJoye(k *Key) { + w := [5]fp.Elt{} // [mu,x1,z1,x2,z2] order must be preserved. + w[1] = fp.Elt{ // x1 = S + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + } + fp.SetOne(&w[2]) // z1 = 1 + w[3] = fp.Elt{ // x2 = G-S + 0x20, 0x27, 0x9d, 0xc9, 0x7d, 0x19, 0xb1, 0xac, + 0xf8, 0xba, 0x69, 0x1c, 0xff, 0x33, 0xac, 0x23, + 0x51, 0x1b, 0xce, 0x3a, 0x64, 0x65, 0xbd, 0xf1, + 0x23, 0xf8, 0xc1, 0x84, 0x9d, 0x45, 0x54, 0x29, + 0x67, 0xb9, 0x81, 0x1c, 0x03, 0xd1, 0xcd, 0xda, + 0x7b, 0xeb, 0xff, 0x1a, 0x88, 0x03, 0xcf, 0x3a, + 0x42, 0x44, 0x32, 0x01, 0x25, 0xb7, 0xfa, 0xf0, + } + fp.SetOne(&w[4]) // z2 = 1 + + const n = 448 + const h = 2 + swap := uint(1) + for s := 0; s < n-h; s++ { + i := (s + h) / 8 + j := (s + h) % 8 + bit := uint((k[i] >> uint(j)) & 1) + copy(w[0][:], tableGenerator[s*Size:(s+1)*Size]) + diffAdd(&w, swap^bit) + swap = bit + } + for s := 0; s < h; s++ { + double(&w[1], &w[2]) + } + toAffine((*[fp.Size]byte)(k), &w[1], &w[2]) +} + +// ladderMontgomery calculates a generic scalar point multiplication +// The algorithm implemented is the left-to-right Montgomery's ladder. +func ladderMontgomery(k, xP *Key) { + w := [5]fp.Elt{} // [x1, x2, z2, x3, z3] order must be preserved. + w[0] = *(*fp.Elt)(xP) // x1 = xP + fp.SetOne(&w[1]) // x2 = 1 + w[3] = *(*fp.Elt)(xP) // x3 = xP + fp.SetOne(&w[4]) // z3 = 1 + + move := uint(0) + for s := 448 - 1; s >= 0; s-- { + i := s / 8 + j := s % 8 + bit := uint((k[i] >> uint(j)) & 1) + ladderStep(&w, move^bit) + move = bit + } + toAffine((*[fp.Size]byte)(k), &w[1], &w[2]) +} + +func toAffine(k *[fp.Size]byte, x, z *fp.Elt) { + fp.Inv(z, z) + fp.Mul(x, x, z) + _ = fp.ToBytes(k[:], x) +} + +var lowOrderPoints = [3]fp.Elt{ + { /* (0,_,1) point of order 2 on Curve448 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + { /* (1,_,1) a point of order 4 on the twist of Curve448 */ + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + { /* (-1,_,1) point of order 4 on Curve448 */ + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + }, +} diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.go b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.go new file mode 100644 index 000000000..a06226661 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.go @@ -0,0 +1,30 @@ +//go:build amd64 && !purego +// +build amd64,!purego + +package x448 + +import ( + fp "github.com/cloudflare/circl/math/fp448" + "golang.org/x/sys/cpu" +) + +var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX + +var _ = hasBmi2Adx + +func double(x, z *fp.Elt) { doubleAmd64(x, z) } +func diffAdd(w *[5]fp.Elt, b uint) { diffAddAmd64(w, b) } +func ladderStep(w *[5]fp.Elt, b uint) { ladderStepAmd64(w, b) } +func mulA24(z, x *fp.Elt) { mulA24Amd64(z, x) } + +//go:noescape +func doubleAmd64(x, z *fp.Elt) + +//go:noescape +func diffAddAmd64(w *[5]fp.Elt, b uint) + +//go:noescape +func ladderStepAmd64(w *[5]fp.Elt, b uint) + +//go:noescape +func mulA24Amd64(z, x *fp.Elt) diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.h b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.h new file mode 100644 index 000000000..8c1ae4d0f --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.h @@ -0,0 +1,111 @@ +#define ladderStepLeg \ + addSub(x2,z2) \ + addSub(x3,z3) \ + integerMulLeg(b0,x2,z3) \ + integerMulLeg(b1,x3,z2) \ + reduceFromDoubleLeg(t0,b0) \ + reduceFromDoubleLeg(t1,b1) \ + addSub(t0,t1) \ + cselect(x2,x3,regMove) \ + cselect(z2,z3,regMove) \ + integerSqrLeg(b0,t0) \ + integerSqrLeg(b1,t1) \ + reduceFromDoubleLeg(x3,b0) \ + reduceFromDoubleLeg(z3,b1) \ + integerMulLeg(b0,x1,z3) \ + reduceFromDoubleLeg(z3,b0) \ + integerSqrLeg(b0,x2) \ + integerSqrLeg(b1,z2) \ + reduceFromDoubleLeg(x2,b0) \ + reduceFromDoubleLeg(z2,b1) \ + subtraction(t0,x2,z2) \ + multiplyA24Leg(t1,t0) \ + additionLeg(t1,t1,z2) \ + integerMulLeg(b0,x2,z2) \ + integerMulLeg(b1,t0,t1) \ + reduceFromDoubleLeg(x2,b0) \ + reduceFromDoubleLeg(z2,b1) + +#define ladderStepBmi2Adx \ + addSub(x2,z2) \ + addSub(x3,z3) \ + integerMulAdx(b0,x2,z3) \ + integerMulAdx(b1,x3,z2) \ + reduceFromDoubleAdx(t0,b0) \ + reduceFromDoubleAdx(t1,b1) \ + addSub(t0,t1) \ + cselect(x2,x3,regMove) \ + cselect(z2,z3,regMove) \ + integerSqrAdx(b0,t0) \ + integerSqrAdx(b1,t1) \ + reduceFromDoubleAdx(x3,b0) \ + reduceFromDoubleAdx(z3,b1) \ + integerMulAdx(b0,x1,z3) \ + reduceFromDoubleAdx(z3,b0) \ + integerSqrAdx(b0,x2) \ + integerSqrAdx(b1,z2) \ + reduceFromDoubleAdx(x2,b0) \ + reduceFromDoubleAdx(z2,b1) \ + subtraction(t0,x2,z2) \ + multiplyA24Adx(t1,t0) \ + additionAdx(t1,t1,z2) \ + integerMulAdx(b0,x2,z2) \ + integerMulAdx(b1,t0,t1) \ + reduceFromDoubleAdx(x2,b0) \ + reduceFromDoubleAdx(z2,b1) + +#define difAddLeg \ + addSub(x1,z1) \ + integerMulLeg(b0,z1,ui) \ + reduceFromDoubleLeg(z1,b0) \ + addSub(x1,z1) \ + integerSqrLeg(b0,x1) \ + integerSqrLeg(b1,z1) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) \ + integerMulLeg(b0,x1,z2) \ + integerMulLeg(b1,z1,x2) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) + +#define difAddBmi2Adx \ + addSub(x1,z1) \ + integerMulAdx(b0,z1,ui) \ + reduceFromDoubleAdx(z1,b0) \ + addSub(x1,z1) \ + integerSqrAdx(b0,x1) \ + integerSqrAdx(b1,z1) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) \ + integerMulAdx(b0,x1,z2) \ + integerMulAdx(b1,z1,x2) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) + +#define doubleLeg \ + addSub(x1,z1) \ + integerSqrLeg(b0,x1) \ + integerSqrLeg(b1,z1) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) \ + subtraction(t0,x1,z1) \ + multiplyA24Leg(t1,t0) \ + additionLeg(t1,t1,z1) \ + integerMulLeg(b0,x1,z1) \ + integerMulLeg(b1,t0,t1) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) + +#define doubleBmi2Adx \ + addSub(x1,z1) \ + integerSqrAdx(b0,x1) \ + integerSqrAdx(b1,z1) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) \ + subtraction(t0,x1,z1) \ + multiplyA24Adx(t1,t0) \ + additionAdx(t1,t1,z1) \ + integerMulAdx(b0,x1,z1) \ + integerMulAdx(b1,t0,t1) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s new file mode 100644 index 000000000..ed33ba3d0 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s @@ -0,0 +1,194 @@ +//go:build amd64 && !purego +// +build amd64,!purego + +#include "textflag.h" + +// Depends on circl/math/fp448 package +#include "../../math/fp448/fp_amd64.h" +#include "curve_amd64.h" + +// CTE_A24 is (A+2)/4 from Curve448 +#define CTE_A24 39082 + +#define Size 56 + +// multiplyA24Leg multiplies x times CTE_A24 and stores in z +// Uses: AX, DX, R8-R15, FLAGS +// Instr: x86_64, cmov, adx +#define multiplyA24Leg(z,x) \ + MOVQ $CTE_A24, R15; \ + MOVQ 0+x, AX; MULQ R15; MOVQ AX, R8; ;;;;;;;;;;;; MOVQ DX, R9; \ + MOVQ 8+x, AX; MULQ R15; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; \ + MOVQ 16+x, AX; MULQ R15; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; \ + MOVQ 24+x, AX; MULQ R15; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; \ + MOVQ 32+x, AX; MULQ R15; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; \ + MOVQ 40+x, AX; MULQ R15; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX, R14; \ + MOVQ 48+x, AX; MULQ R15; ADDQ AX, R14; ADCQ $0, DX; \ + MOVQ DX, AX; \ + SHLQ $32, AX; \ + ADDQ DX, R8; MOVQ $0, DX; \ + ADCQ $0, R9; \ + ADCQ $0, R10; \ + ADCQ AX, R11; \ + ADCQ $0, R12; \ + ADCQ $0, R13; \ + ADCQ $0, R14; \ + ADCQ $0, DX; \ + MOVQ DX, AX; \ + SHLQ $32, AX; \ + ADDQ DX, R8; \ + ADCQ $0, R9; \ + ADCQ $0, R10; \ + ADCQ AX, R11; \ + ADCQ $0, R12; \ + ADCQ $0, R13; \ + ADCQ $0, R14; \ + MOVQ R8, 0+z; \ + MOVQ R9, 8+z; \ + MOVQ R10, 16+z; \ + MOVQ R11, 24+z; \ + MOVQ R12, 32+z; \ + MOVQ R13, 40+z; \ + MOVQ R14, 48+z; + +// multiplyA24Adx multiplies x times CTE_A24 and stores in z +// Uses: AX, DX, R8-R14, FLAGS +// Instr: x86_64, bmi2 +#define multiplyA24Adx(z,x) \ + MOVQ $CTE_A24, DX; \ + MULXQ 0+x, R8, R9; \ + MULXQ 8+x, AX, R10; ADDQ AX, R9; \ + MULXQ 16+x, AX, R11; ADCQ AX, R10; \ + MULXQ 24+x, AX, R12; ADCQ AX, R11; \ + MULXQ 32+x, AX, R13; ADCQ AX, R12; \ + MULXQ 40+x, AX, R14; ADCQ AX, R13; \ + MULXQ 48+x, AX, DX; ADCQ AX, R14; \ + ;;;;;;;;;;;;;;;;;;;; ADCQ $0, DX; \ + MOVQ DX, AX; \ + SHLQ $32, AX; \ + ADDQ DX, R8; MOVQ $0, DX; \ + ADCQ $0, R9; \ + ADCQ $0, R10; \ + ADCQ AX, R11; \ + ADCQ $0, R12; \ + ADCQ $0, R13; \ + ADCQ $0, R14; \ + ADCQ $0, DX; \ + MOVQ DX, AX; \ + SHLQ $32, AX; \ + ADDQ DX, R8; \ + ADCQ $0, R9; \ + ADCQ $0, R10; \ + ADCQ AX, R11; \ + ADCQ $0, R12; \ + ADCQ $0, R13; \ + ADCQ $0, R14; \ + MOVQ R8, 0+z; \ + MOVQ R9, 8+z; \ + MOVQ R10, 16+z; \ + MOVQ R11, 24+z; \ + MOVQ R12, 32+z; \ + MOVQ R13, 40+z; \ + MOVQ R14, 48+z; + +#define mulA24Legacy \ + multiplyA24Leg(0(DI),0(SI)) +#define mulA24Bmi2Adx \ + multiplyA24Adx(0(DI),0(SI)) + +// func mulA24Amd64(z, x *fp448.Elt) +TEXT ·mulA24Amd64(SB),NOSPLIT,$0-16 + MOVQ z+0(FP), DI + MOVQ x+8(FP), SI + CHECK_BMI2ADX(LMA24, mulA24Legacy, mulA24Bmi2Adx) + +// func ladderStepAmd64(w *[5]fp448.Elt, b uint) +// ladderStepAmd64 calculates a point addition and doubling as follows: +// (x2,z2) = 2*(x2,z2) and (x3,z3) = (x2,z2)+(x3,z3) using as a difference (x1,-). +// w = {x1,x2,z2,x3,z4} are five fp255.Elt of 56 bytes. +// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and +// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes. +TEXT ·ladderStepAmd64(SB),NOSPLIT,$336-16 + // Parameters + #define regWork DI + #define regMove SI + #define x1 0*Size(regWork) + #define x2 1*Size(regWork) + #define z2 2*Size(regWork) + #define x3 3*Size(regWork) + #define z3 4*Size(regWork) + // Local variables + #define t0 0*Size(SP) + #define t1 1*Size(SP) + #define b0 2*Size(SP) + #define b1 4*Size(SP) + MOVQ w+0(FP), regWork + MOVQ b+8(FP), regMove + CHECK_BMI2ADX(LLADSTEP, ladderStepLeg, ladderStepBmi2Adx) + #undef regWork + #undef regMove + #undef x1 + #undef x2 + #undef z2 + #undef x3 + #undef z3 + #undef t0 + #undef t1 + #undef b0 + #undef b1 + +// func diffAddAmd64(work *[5]fp.Elt, swap uint) +// diffAddAmd64 calculates a differential point addition using a precomputed point. +// (x1,z1) = (x1,z1)+(mu) using a difference point (x2,z2) +// work = {mu,x1,z1,x2,z2} are five fp448.Elt of 56 bytes, and +// stack = (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes. +// This is Equation 7 at https://eprint.iacr.org/2017/264. +TEXT ·diffAddAmd64(SB),NOSPLIT,$224-16 + // Parameters + #define regWork DI + #define regSwap SI + #define ui 0*Size(regWork) + #define x1 1*Size(regWork) + #define z1 2*Size(regWork) + #define x2 3*Size(regWork) + #define z2 4*Size(regWork) + // Local variables + #define b0 0*Size(SP) + #define b1 2*Size(SP) + MOVQ w+0(FP), regWork + MOVQ b+8(FP), regSwap + cswap(x1,x2,regSwap) + cswap(z1,z2,regSwap) + CHECK_BMI2ADX(LDIFADD, difAddLeg, difAddBmi2Adx) + #undef regWork + #undef regSwap + #undef ui + #undef x1 + #undef z1 + #undef x2 + #undef z2 + #undef b0 + #undef b1 + +// func doubleAmd64(x, z *fp448.Elt) +// doubleAmd64 calculates a point doubling (x1,z1) = 2*(x1,z1). +// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and +// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes. +TEXT ·doubleAmd64(SB),NOSPLIT,$336-16 + // Parameters + #define x1 0(DI) + #define z1 0(SI) + // Local variables + #define t0 0*Size(SP) + #define t1 1*Size(SP) + #define b0 2*Size(SP) + #define b1 4*Size(SP) + MOVQ x+0(FP), DI + MOVQ z+8(FP), SI + CHECK_BMI2ADX(LDOUB,doubleLeg,doubleBmi2Adx) + #undef x1 + #undef z1 + #undef t0 + #undef t1 + #undef b0 + #undef b1 diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_generic.go b/vendor/github.com/cloudflare/circl/dh/x448/curve_generic.go new file mode 100644 index 000000000..b0b65ccf7 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/curve_generic.go @@ -0,0 +1,100 @@ +package x448 + +import ( + "encoding/binary" + "math/bits" + + "github.com/cloudflare/circl/math/fp448" +) + +func doubleGeneric(x, z *fp448.Elt) { + t0, t1 := &fp448.Elt{}, &fp448.Elt{} + fp448.AddSub(x, z) + fp448.Sqr(x, x) + fp448.Sqr(z, z) + fp448.Sub(t0, x, z) + mulA24Generic(t1, t0) + fp448.Add(t1, t1, z) + fp448.Mul(x, x, z) + fp448.Mul(z, t0, t1) +} + +func diffAddGeneric(w *[5]fp448.Elt, b uint) { + mu, x1, z1, x2, z2 := &w[0], &w[1], &w[2], &w[3], &w[4] + fp448.Cswap(x1, x2, b) + fp448.Cswap(z1, z2, b) + fp448.AddSub(x1, z1) + fp448.Mul(z1, z1, mu) + fp448.AddSub(x1, z1) + fp448.Sqr(x1, x1) + fp448.Sqr(z1, z1) + fp448.Mul(x1, x1, z2) + fp448.Mul(z1, z1, x2) +} + +func ladderStepGeneric(w *[5]fp448.Elt, b uint) { + x1, x2, z2, x3, z3 := &w[0], &w[1], &w[2], &w[3], &w[4] + t0 := &fp448.Elt{} + t1 := &fp448.Elt{} + fp448.AddSub(x2, z2) + fp448.AddSub(x3, z3) + fp448.Mul(t0, x2, z3) + fp448.Mul(t1, x3, z2) + fp448.AddSub(t0, t1) + fp448.Cmov(x2, x3, b) + fp448.Cmov(z2, z3, b) + fp448.Sqr(x3, t0) + fp448.Sqr(z3, t1) + fp448.Mul(z3, x1, z3) + fp448.Sqr(x2, x2) + fp448.Sqr(z2, z2) + fp448.Sub(t0, x2, z2) + mulA24Generic(t1, t0) + fp448.Add(t1, t1, z2) + fp448.Mul(x2, x2, z2) + fp448.Mul(z2, t0, t1) +} + +func mulA24Generic(z, x *fp448.Elt) { + const A24 = 39082 + const n = 8 + var xx [7]uint64 + for i := range xx { + xx[i] = binary.LittleEndian.Uint64(x[i*n : (i+1)*n]) + } + h0, l0 := bits.Mul64(xx[0], A24) + h1, l1 := bits.Mul64(xx[1], A24) + h2, l2 := bits.Mul64(xx[2], A24) + h3, l3 := bits.Mul64(xx[3], A24) + h4, l4 := bits.Mul64(xx[4], A24) + h5, l5 := bits.Mul64(xx[5], A24) + h6, l6 := bits.Mul64(xx[6], A24) + + l1, c0 := bits.Add64(h0, l1, 0) + l2, c1 := bits.Add64(h1, l2, c0) + l3, c2 := bits.Add64(h2, l3, c1) + l4, c3 := bits.Add64(h3, l4, c2) + l5, c4 := bits.Add64(h4, l5, c3) + l6, c5 := bits.Add64(h5, l6, c4) + l7, _ := bits.Add64(h6, 0, c5) + + l0, c0 = bits.Add64(l0, l7, 0) + l1, c1 = bits.Add64(l1, 0, c0) + l2, c2 = bits.Add64(l2, 0, c1) + l3, c3 = bits.Add64(l3, l7<<32, c2) + l4, c4 = bits.Add64(l4, 0, c3) + l5, c5 = bits.Add64(l5, 0, c4) + l6, l7 = bits.Add64(l6, 0, c5) + + xx[0], c0 = bits.Add64(l0, l7, 0) + xx[1], c1 = bits.Add64(l1, 0, c0) + xx[2], c2 = bits.Add64(l2, 0, c1) + xx[3], c3 = bits.Add64(l3, l7<<32, c2) + xx[4], c4 = bits.Add64(l4, 0, c3) + xx[5], c5 = bits.Add64(l5, 0, c4) + xx[6], _ = bits.Add64(l6, 0, c5) + + for i := range xx { + binary.LittleEndian.PutUint64(z[i*n:(i+1)*n], xx[i]) + } +} diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_noasm.go b/vendor/github.com/cloudflare/circl/dh/x448/curve_noasm.go new file mode 100644 index 000000000..3755b7c83 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/curve_noasm.go @@ -0,0 +1,11 @@ +//go:build !amd64 || purego +// +build !amd64 purego + +package x448 + +import fp "github.com/cloudflare/circl/math/fp448" + +func double(x, z *fp.Elt) { doubleGeneric(x, z) } +func diffAdd(w *[5]fp.Elt, b uint) { diffAddGeneric(w, b) } +func ladderStep(w *[5]fp.Elt, b uint) { ladderStepGeneric(w, b) } +func mulA24(z, x *fp.Elt) { mulA24Generic(z, x) } diff --git a/vendor/github.com/cloudflare/circl/dh/x448/doc.go b/vendor/github.com/cloudflare/circl/dh/x448/doc.go new file mode 100644 index 000000000..c02904fed --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/doc.go @@ -0,0 +1,19 @@ +/* +Package x448 provides Diffie-Hellman functions as specified in RFC-7748. + +Validation of public keys. + +The Diffie-Hellman function, as described in RFC-7748 [1], works for any +public key. However, if a different protocol requires contributory +behaviour [2,3], then the public keys must be validated against low-order +points [3,4]. To do that, the Shared function performs this validation +internally and returns false when the public key is invalid (i.e., it +is a low-order point). + +References: + - [1] RFC7748 by Langley, Hamburg, Turner (https://rfc-editor.org/rfc/rfc7748.txt) + - [2] Curve25519 by Bernstein (https://cr.yp.to/ecdh.html) + - [3] Bernstein (https://cr.yp.to/ecdh.html#validate) + - [4] Cremers&Jackson (https://eprint.iacr.org/2019/526) +*/ +package x448 diff --git a/vendor/github.com/cloudflare/circl/dh/x448/key.go b/vendor/github.com/cloudflare/circl/dh/x448/key.go new file mode 100644 index 000000000..2fdde5116 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/key.go @@ -0,0 +1,46 @@ +package x448 + +import ( + "crypto/subtle" + + fp "github.com/cloudflare/circl/math/fp448" +) + +// Size is the length in bytes of a X448 key. +const Size = 56 + +// Key represents a X448 key. +type Key [Size]byte + +func (k *Key) clamp(in *Key) *Key { + *k = *in + k[0] &= 252 + k[55] |= 128 + return k +} + +// isValidPubKey verifies if the public key is not a low-order point. +func (k *Key) isValidPubKey() bool { + fp.Modp((*fp.Elt)(k)) + var isLowOrder int + for _, P := range lowOrderPoints { + isLowOrder |= subtle.ConstantTimeCompare(P[:], k[:]) + } + return isLowOrder == 0 +} + +// KeyGen obtains a public key given a secret key. +func KeyGen(public, secret *Key) { + ladderJoye(public.clamp(secret)) +} + +// Shared calculates Alice's shared key from Alice's secret key and Bob's +// public key returning true on success. A failure case happens when the public +// key is a low-order point, thus the shared key is all-zeros and the function +// returns false. +func Shared(shared, secret, public *Key) bool { + validPk := *public + ok := validPk.isValidPubKey() + ladderMontgomery(shared.clamp(secret), &validPk) + return ok +} diff --git a/vendor/github.com/cloudflare/circl/dh/x448/table.go b/vendor/github.com/cloudflare/circl/dh/x448/table.go new file mode 100644 index 000000000..eef53c30f --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/table.go @@ -0,0 +1,460 @@ +package x448 + +import fp "github.com/cloudflare/circl/math/fp448" + +// tableGenerator contains the set of points: +// +// t[i] = (xi+1)/(xi-1), +// +// where (xi,yi) = 2^iG and G is the generator point +// Size = (448)*(448/8) = 25088 bytes. +var tableGenerator = [448 * fp.Size]byte{ + /* (2^ 0)P */ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, + /* (2^ 1)P */ 0x37, 0xfa, 0xaa, 0x0d, 0x86, 0xa6, 0x24, 0xe9, 0x6c, 0x95, 0x08, 0x34, 0xba, 0x1a, 0x81, 0x3a, 0xae, 0x01, 0xa5, 0xa7, 0x05, 0x85, 0x96, 0x00, 0x06, 0x5a, 0xd7, 0xff, 0xee, 0x8e, 0x8f, 0x94, 0xd2, 0xdc, 0xd7, 0xfc, 0xe7, 0xe5, 0x99, 0x1d, 0x05, 0x46, 0x43, 0xe8, 0xbc, 0x12, 0xb7, 0xeb, 0x30, 0x5e, 0x7a, 0x85, 0x68, 0xed, 0x9d, 0x28, + /* (2^ 2)P */ 0xf1, 0x7d, 0x08, 0x2b, 0x32, 0x4a, 0x62, 0x80, 0x36, 0xe7, 0xa4, 0x76, 0x5a, 0x2a, 0x1e, 0xf7, 0x9e, 0x3c, 0x40, 0x46, 0x9a, 0x1b, 0x61, 0xc1, 0xbf, 0x1a, 0x1b, 0xae, 0x91, 0x80, 0xa3, 0x76, 0x6c, 0xd4, 0x8f, 0xa4, 0xee, 0x26, 0x39, 0x23, 0xa4, 0x80, 0xf4, 0x66, 0x92, 0xe4, 0xe1, 0x18, 0x76, 0xc5, 0xe2, 0x19, 0x87, 0xd5, 0xc3, 0xe8, + /* (2^ 3)P */ 0xfb, 0xc9, 0xf0, 0x07, 0xf2, 0x93, 0xd8, 0x50, 0x36, 0xed, 0xfb, 0xbd, 0xb2, 0xd3, 0xfc, 0xdf, 0xd5, 0x2a, 0x6e, 0x26, 0x09, 0xce, 0xd4, 0x07, 0x64, 0x9f, 0x40, 0x74, 0xad, 0x98, 0x2f, 0x1c, 0xb6, 0xdc, 0x2d, 0x42, 0xff, 0xbf, 0x97, 0xd8, 0xdb, 0xef, 0x99, 0xca, 0x73, 0x99, 0x1a, 0x04, 0x3b, 0x56, 0x2c, 0x1f, 0x87, 0x9d, 0x9f, 0x03, + /* (2^ 4)P */ 0x4c, 0x35, 0x97, 0xf7, 0x81, 0x2c, 0x84, 0xa6, 0xe0, 0xcb, 0xce, 0x37, 0x4c, 0x21, 0x1c, 0x67, 0xfa, 0xab, 0x18, 0x4d, 0xef, 0xd0, 0xf0, 0x44, 0xa9, 0xfb, 0xc0, 0x8e, 0xda, 0x57, 0xa1, 0xd8, 0xeb, 0x87, 0xf4, 0x17, 0xea, 0x66, 0x0f, 0x16, 0xea, 0xcd, 0x5f, 0x3e, 0x88, 0xea, 0x09, 0x68, 0x40, 0xdf, 0x43, 0xcc, 0x54, 0x61, 0x58, 0xaa, + /* (2^ 5)P */ 0x8d, 0xe7, 0x59, 0xd7, 0x5e, 0x63, 0x37, 0xa7, 0x3f, 0xd1, 0x49, 0x85, 0x01, 0xdd, 0x5e, 0xb3, 0xe6, 0x29, 0xcb, 0x25, 0x93, 0xdd, 0x08, 0x96, 0x83, 0x52, 0x76, 0x85, 0xf5, 0x5d, 0x02, 0xbf, 0xe9, 0x6d, 0x15, 0x27, 0xc1, 0x09, 0xd1, 0x14, 0x4d, 0x6e, 0xe8, 0xaf, 0x59, 0x58, 0x34, 0x9d, 0x2a, 0x99, 0x85, 0x26, 0xbe, 0x4b, 0x1e, 0xb9, + /* (2^ 6)P */ 0x8d, 0xce, 0x94, 0xe2, 0x18, 0x56, 0x0d, 0x82, 0x8e, 0xdf, 0x85, 0x01, 0x8f, 0x93, 0x3c, 0xc6, 0xbd, 0x61, 0xfb, 0xf4, 0x22, 0xc5, 0x16, 0x87, 0xd1, 0xb1, 0x9e, 0x09, 0xc5, 0x83, 0x2e, 0x4a, 0x07, 0x88, 0xee, 0xe0, 0x29, 0x8d, 0x2e, 0x1f, 0x88, 0xad, 0xfd, 0x18, 0x93, 0xb7, 0xed, 0x42, 0x86, 0x78, 0xf0, 0xb8, 0x70, 0xbe, 0x01, 0x67, + /* (2^ 7)P */ 0xdf, 0x62, 0x2d, 0x94, 0xc7, 0x35, 0x23, 0xda, 0x27, 0xbb, 0x2b, 0xdb, 0x30, 0x80, 0x68, 0x16, 0xa3, 0xae, 0xd7, 0xd2, 0xa7, 0x7c, 0xbf, 0x6a, 0x1d, 0x83, 0xde, 0x96, 0x0a, 0x43, 0xb6, 0x30, 0x37, 0xd6, 0xee, 0x63, 0x59, 0x9a, 0xbf, 0xa3, 0x30, 0x6c, 0xaf, 0x0c, 0xee, 0x3d, 0xcb, 0x35, 0x4b, 0x55, 0x5f, 0x84, 0x85, 0xcb, 0x4f, 0x1e, + /* (2^ 8)P */ 0x9d, 0x04, 0x68, 0x89, 0xa4, 0xa9, 0x0d, 0x87, 0xc1, 0x70, 0xf1, 0xeb, 0xfb, 0x47, 0x0a, 0xf0, 0xde, 0x67, 0xb7, 0x94, 0xcd, 0x36, 0x43, 0xa5, 0x49, 0x43, 0x67, 0xc3, 0xee, 0x3c, 0x6b, 0xec, 0xd0, 0x1a, 0xf4, 0xad, 0xef, 0x06, 0x4a, 0xe8, 0x46, 0x24, 0xd7, 0x93, 0xbf, 0xf0, 0xe3, 0x81, 0x61, 0xec, 0xea, 0x64, 0xfe, 0x67, 0xeb, 0xc7, + /* (2^ 9)P */ 0x95, 0x45, 0x79, 0xcf, 0x2c, 0xfd, 0x9b, 0xfe, 0x84, 0x46, 0x4b, 0x8f, 0xa1, 0xcf, 0xc3, 0x04, 0x94, 0x78, 0xdb, 0xc9, 0xa6, 0x01, 0x75, 0xa4, 0xb4, 0x93, 0x72, 0x43, 0xa7, 0x7d, 0xda, 0x31, 0x38, 0x54, 0xab, 0x4e, 0x3f, 0x89, 0xa6, 0xab, 0x57, 0xc0, 0x16, 0x65, 0xdb, 0x92, 0x96, 0xe4, 0xc8, 0xae, 0xe7, 0x4c, 0x7a, 0xeb, 0xbb, 0x5a, + /* (2^ 10)P */ 0xbe, 0xfe, 0x86, 0xc3, 0x97, 0xe0, 0x6a, 0x18, 0x20, 0x21, 0xca, 0x22, 0x55, 0xa1, 0xeb, 0xf5, 0x74, 0xe5, 0xc9, 0x59, 0xa7, 0x92, 0x65, 0x15, 0x08, 0x71, 0xd1, 0x09, 0x7e, 0x83, 0xfc, 0xbc, 0x5a, 0x93, 0x38, 0x0d, 0x43, 0x42, 0xfd, 0x76, 0x30, 0xe8, 0x63, 0x60, 0x09, 0x8d, 0x6c, 0xd3, 0xf8, 0x56, 0x3d, 0x68, 0x47, 0xab, 0xa0, 0x1d, + /* (2^ 11)P */ 0x38, 0x50, 0x1c, 0xb1, 0xac, 0x88, 0x8f, 0x38, 0xe3, 0x69, 0xe6, 0xfc, 0x4f, 0x8f, 0xe1, 0x9b, 0xb1, 0x1a, 0x09, 0x39, 0x19, 0xdf, 0xcd, 0x98, 0x7b, 0x64, 0x42, 0xf6, 0x11, 0xea, 0xc7, 0xe8, 0x92, 0x65, 0x00, 0x2c, 0x75, 0xb5, 0x94, 0x1e, 0x5b, 0xa6, 0x66, 0x81, 0x77, 0xf3, 0x39, 0x94, 0xac, 0xbd, 0xe4, 0x2a, 0x66, 0x84, 0x9c, 0x60, + /* (2^ 12)P */ 0xb5, 0xb6, 0xd9, 0x03, 0x67, 0xa4, 0xa8, 0x0a, 0x4a, 0x2b, 0x9d, 0xfa, 0x13, 0xe1, 0x99, 0x25, 0x4a, 0x5c, 0x67, 0xb9, 0xb2, 0xb7, 0xdd, 0x1e, 0xaf, 0xeb, 0x63, 0x41, 0xb6, 0xb9, 0xa0, 0x87, 0x0a, 0xe0, 0x06, 0x07, 0xaa, 0x97, 0xf8, 0xf9, 0x38, 0x4f, 0xdf, 0x0c, 0x40, 0x7c, 0xc3, 0x98, 0xa9, 0x74, 0xf1, 0x5d, 0xda, 0xd1, 0xc0, 0x0a, + /* (2^ 13)P */ 0xf2, 0x0a, 0xab, 0xab, 0x94, 0x50, 0xf0, 0xa3, 0x6f, 0xc6, 0x66, 0xba, 0xa6, 0xdc, 0x44, 0xdd, 0xd6, 0x08, 0xf4, 0xd3, 0xed, 0xb1, 0x40, 0x93, 0xee, 0xf6, 0xb8, 0x8e, 0xb4, 0x7c, 0xb9, 0x82, 0xc9, 0x9d, 0x45, 0x3b, 0x8e, 0x10, 0xcb, 0x70, 0x1e, 0xba, 0x3c, 0x62, 0x50, 0xda, 0xa9, 0x93, 0xb5, 0xd7, 0xd0, 0x6f, 0x29, 0x52, 0x95, 0xae, + /* (2^ 14)P */ 0x14, 0x68, 0x69, 0x23, 0xa8, 0x44, 0x87, 0x9e, 0x22, 0x91, 0xe8, 0x92, 0xdf, 0xf7, 0xae, 0xba, 0x1c, 0x96, 0xe1, 0xc3, 0x94, 0xed, 0x6c, 0x95, 0xae, 0x96, 0xa7, 0x15, 0x9f, 0xf1, 0x17, 0x11, 0x92, 0x42, 0xd5, 0xcd, 0x18, 0xe7, 0xa9, 0xb5, 0x2f, 0xcd, 0xde, 0x6c, 0xc9, 0x7d, 0xfc, 0x7e, 0xbd, 0x7f, 0x10, 0x3d, 0x01, 0x00, 0x8d, 0x95, + /* (2^ 15)P */ 0x3b, 0x76, 0x72, 0xae, 0xaf, 0x84, 0xf2, 0xf7, 0xd1, 0x6d, 0x13, 0x9c, 0x47, 0xe1, 0xb7, 0xa3, 0x19, 0x16, 0xee, 0x75, 0x45, 0xf6, 0x1a, 0x7b, 0x78, 0x49, 0x79, 0x05, 0x86, 0xf0, 0x7f, 0x9f, 0xfc, 0xc4, 0xbd, 0x86, 0xf3, 0x41, 0xa7, 0xfe, 0x01, 0xd5, 0x67, 0x16, 0x10, 0x5b, 0xa5, 0x16, 0xf3, 0x7f, 0x60, 0xce, 0xd2, 0x0c, 0x8e, 0x4b, + /* (2^ 16)P */ 0x4a, 0x07, 0x99, 0x4a, 0x0f, 0x74, 0x91, 0x14, 0x68, 0xb9, 0x48, 0xb7, 0x44, 0x77, 0x9b, 0x4a, 0xe0, 0x68, 0x0e, 0x43, 0x4d, 0x98, 0x98, 0xbf, 0xa8, 0x3a, 0xb7, 0x6d, 0x2a, 0x9a, 0x77, 0x5f, 0x62, 0xf5, 0x6b, 0x4a, 0xb7, 0x7d, 0xe5, 0x09, 0x6b, 0xc0, 0x8b, 0x9c, 0x88, 0x37, 0x33, 0xf2, 0x41, 0xac, 0x22, 0x1f, 0xcf, 0x3b, 0x82, 0x34, + /* (2^ 17)P */ 0x00, 0xc3, 0x78, 0x42, 0x32, 0x2e, 0xdc, 0xda, 0xb1, 0x96, 0x21, 0xa4, 0xe4, 0xbb, 0xe9, 0x9d, 0xbb, 0x0f, 0x93, 0xed, 0x26, 0x3d, 0xb5, 0xdb, 0x94, 0x31, 0x37, 0x07, 0xa2, 0xb2, 0xd5, 0x99, 0x0d, 0x93, 0xe1, 0xce, 0x3f, 0x0b, 0x96, 0x82, 0x47, 0xfe, 0x60, 0x6f, 0x8f, 0x61, 0x88, 0xd7, 0x05, 0x95, 0x0b, 0x46, 0x06, 0xb7, 0x32, 0x06, + /* (2^ 18)P */ 0x44, 0xf5, 0x34, 0xdf, 0x2f, 0x9c, 0x5d, 0x9f, 0x53, 0x5c, 0x42, 0x8f, 0xc9, 0xdc, 0xd8, 0x40, 0xa2, 0xe7, 0x6a, 0x4a, 0x05, 0xf7, 0x86, 0x77, 0x2b, 0xae, 0x37, 0xed, 0x48, 0xfb, 0xf7, 0x62, 0x7c, 0x17, 0x59, 0x92, 0x41, 0x61, 0x93, 0x38, 0x30, 0xd1, 0xef, 0x54, 0x54, 0x03, 0x17, 0x57, 0x91, 0x15, 0x11, 0x33, 0xb5, 0xfa, 0xfb, 0x17, + /* (2^ 19)P */ 0x29, 0xbb, 0xd4, 0xb4, 0x9c, 0xf1, 0x72, 0x94, 0xce, 0x6a, 0x29, 0xa8, 0x89, 0x18, 0x19, 0xf7, 0xb7, 0xcc, 0xee, 0x9a, 0x02, 0xe3, 0xc0, 0xb1, 0xe0, 0xee, 0x83, 0x78, 0xb4, 0x9e, 0x07, 0x87, 0xdf, 0xb0, 0x82, 0x26, 0x4e, 0xa4, 0x0c, 0x33, 0xaf, 0x40, 0x59, 0xb6, 0xdd, 0x52, 0x45, 0xf0, 0xb4, 0xf6, 0xe8, 0x4e, 0x4e, 0x79, 0x1a, 0x5d, + /* (2^ 20)P */ 0x27, 0x33, 0x4d, 0x4c, 0x6b, 0x4f, 0x75, 0xb1, 0xbc, 0x1f, 0xab, 0x5b, 0x2b, 0xf0, 0x1c, 0x57, 0x86, 0xdd, 0xfd, 0x60, 0xb0, 0x8c, 0xe7, 0x9a, 0xe5, 0x5c, 0xeb, 0x11, 0x3a, 0xda, 0x22, 0x25, 0x99, 0x06, 0x8d, 0xf4, 0xaf, 0x29, 0x7a, 0xc9, 0xe5, 0xd2, 0x16, 0x9e, 0xd4, 0x63, 0x1d, 0x64, 0xa6, 0x47, 0x96, 0x37, 0x6f, 0x93, 0x2c, 0xcc, + /* (2^ 21)P */ 0xc1, 0x94, 0x74, 0x86, 0x75, 0xf2, 0x91, 0x58, 0x23, 0x85, 0x63, 0x76, 0x54, 0xc7, 0xb4, 0x8c, 0xbc, 0x4e, 0xc4, 0xa7, 0xba, 0xa0, 0x55, 0x26, 0x71, 0xd5, 0x33, 0x72, 0xc9, 0xad, 0x1e, 0xf9, 0x5d, 0x78, 0x70, 0x93, 0x4e, 0x85, 0xfc, 0x39, 0x06, 0x73, 0x76, 0xff, 0xe8, 0x64, 0x69, 0x42, 0x45, 0xb2, 0x69, 0xb5, 0x32, 0xe7, 0x2c, 0xde, + /* (2^ 22)P */ 0xde, 0x16, 0xd8, 0x33, 0x49, 0x32, 0xe9, 0x0e, 0x3a, 0x60, 0xee, 0x2e, 0x24, 0x75, 0xe3, 0x9c, 0x92, 0x07, 0xdb, 0xad, 0x92, 0xf5, 0x11, 0xdf, 0xdb, 0xb0, 0x17, 0x5c, 0xd6, 0x1a, 0x70, 0x00, 0xb7, 0xe2, 0x18, 0xec, 0xdc, 0xc2, 0x02, 0x93, 0xb3, 0xc8, 0x3f, 0x4f, 0x1b, 0x96, 0xe6, 0x33, 0x8c, 0xfb, 0xcc, 0xa5, 0x4e, 0xe8, 0xe7, 0x11, + /* (2^ 23)P */ 0x05, 0x7a, 0x74, 0x52, 0xf8, 0xdf, 0x0d, 0x7c, 0x6a, 0x1a, 0x4e, 0x9a, 0x02, 0x1d, 0xae, 0x77, 0xf8, 0x8e, 0xf9, 0xa2, 0x38, 0x54, 0x50, 0xb2, 0x2c, 0x08, 0x9d, 0x9b, 0x9f, 0xfb, 0x2b, 0x06, 0xde, 0x9d, 0xc2, 0x03, 0x0b, 0x22, 0x2b, 0x10, 0x5b, 0x3a, 0x73, 0x29, 0x8e, 0x3e, 0x37, 0x08, 0x2c, 0x3b, 0xf8, 0x80, 0xc1, 0x66, 0x1e, 0x98, + /* (2^ 24)P */ 0xd8, 0xd6, 0x3e, 0xcd, 0x63, 0x8c, 0x2b, 0x41, 0x81, 0xc0, 0x0c, 0x06, 0x87, 0xd6, 0xe7, 0x92, 0xfe, 0xf1, 0x0c, 0x4a, 0x84, 0x5b, 0xaf, 0x40, 0x53, 0x6f, 0x60, 0xd6, 0x6b, 0x76, 0x4b, 0xc2, 0xad, 0xc9, 0xb6, 0xb6, 0x6a, 0xa2, 0xb3, 0xf5, 0xf5, 0xc2, 0x55, 0x83, 0xb2, 0xd3, 0xe9, 0x41, 0x6c, 0x63, 0x51, 0xb8, 0x81, 0x74, 0xc8, 0x2c, + /* (2^ 25)P */ 0xb2, 0xaf, 0x1c, 0xee, 0x07, 0xb0, 0x58, 0xa8, 0x2c, 0x6a, 0xc9, 0x2d, 0x62, 0x28, 0x75, 0x0c, 0x40, 0xb6, 0x11, 0x33, 0x96, 0x80, 0x28, 0x6d, 0xd5, 0x9e, 0x87, 0x90, 0x01, 0x66, 0x1d, 0x1c, 0xf8, 0xb4, 0x92, 0xac, 0x38, 0x18, 0x05, 0xc2, 0x4c, 0x4b, 0x54, 0x7d, 0x80, 0x46, 0x87, 0x2d, 0x99, 0x8e, 0x70, 0x80, 0x69, 0x71, 0x8b, 0xed, + /* (2^ 26)P */ 0x37, 0xa7, 0x6b, 0x71, 0x36, 0x75, 0x8e, 0xff, 0x0f, 0x42, 0xda, 0x5a, 0x46, 0xa6, 0x97, 0x79, 0x7e, 0x30, 0xb3, 0x8f, 0xc7, 0x3a, 0xa0, 0xcb, 0x1d, 0x9c, 0x78, 0x77, 0x36, 0xc2, 0xe7, 0xf4, 0x2f, 0x29, 0x07, 0xb1, 0x07, 0xfd, 0xed, 0x1b, 0x39, 0x77, 0x06, 0x38, 0x77, 0x0f, 0x50, 0x31, 0x12, 0xbf, 0x92, 0xbf, 0x72, 0x79, 0x54, 0xa9, + /* (2^ 27)P */ 0xbd, 0x4d, 0x46, 0x6b, 0x1a, 0x80, 0x46, 0x2d, 0xed, 0xfd, 0x64, 0x6d, 0x94, 0xbc, 0x4a, 0x6e, 0x0c, 0x12, 0xf6, 0x12, 0xab, 0x54, 0x88, 0xd3, 0x85, 0xac, 0x51, 0xae, 0x6f, 0xca, 0xc4, 0xb7, 0xec, 0x22, 0x54, 0x6d, 0x80, 0xb2, 0x1c, 0x63, 0x33, 0x76, 0x6b, 0x8e, 0x6d, 0x59, 0xcd, 0x73, 0x92, 0x5f, 0xff, 0xad, 0x10, 0x35, 0x70, 0x5f, + /* (2^ 28)P */ 0xb3, 0x84, 0xde, 0xc8, 0x04, 0x43, 0x63, 0xfa, 0x29, 0xd9, 0xf0, 0x69, 0x65, 0x5a, 0x0c, 0xe8, 0x2e, 0x0b, 0xfe, 0xb0, 0x7a, 0x42, 0xb3, 0xc3, 0xfc, 0xe6, 0xb8, 0x92, 0x29, 0xae, 0xed, 0xec, 0xd5, 0xe8, 0x4a, 0xa1, 0xbd, 0x3b, 0xd3, 0xc0, 0x07, 0xab, 0x65, 0x65, 0x35, 0x9a, 0xa6, 0x5e, 0x78, 0x18, 0x76, 0x1c, 0x15, 0x49, 0xe6, 0x75, + /* (2^ 29)P */ 0x45, 0xb3, 0x92, 0xa9, 0xc3, 0xb8, 0x11, 0x68, 0x64, 0x3a, 0x83, 0x5d, 0xa8, 0x94, 0x6a, 0x9d, 0xaa, 0x27, 0x9f, 0x98, 0x5d, 0xc0, 0x29, 0xf0, 0xc0, 0x4b, 0x14, 0x3c, 0x05, 0xe7, 0xf8, 0xbd, 0x38, 0x22, 0x96, 0x75, 0x65, 0x5e, 0x0d, 0x3f, 0xbb, 0x6f, 0xe8, 0x3f, 0x96, 0x76, 0x9f, 0xba, 0xd9, 0x44, 0x92, 0x96, 0x22, 0xe7, 0x52, 0xe7, + /* (2^ 30)P */ 0xf4, 0xa3, 0x95, 0x90, 0x47, 0xdf, 0x7d, 0xdc, 0xf4, 0x13, 0x87, 0x67, 0x7d, 0x4f, 0x9d, 0xa0, 0x00, 0x46, 0x72, 0x08, 0xc3, 0xa2, 0x7a, 0x3e, 0xe7, 0x6d, 0x52, 0x7c, 0x11, 0x36, 0x50, 0x83, 0x89, 0x64, 0xcb, 0x1f, 0x08, 0x83, 0x46, 0xcb, 0xac, 0xa6, 0xd8, 0x9c, 0x1b, 0xe8, 0x05, 0x47, 0xc7, 0x26, 0x06, 0x83, 0x39, 0xe9, 0xb1, 0x1c, + /* (2^ 31)P */ 0x11, 0xe8, 0xc8, 0x42, 0xbf, 0x30, 0x9c, 0xa3, 0xf1, 0x85, 0x96, 0x95, 0x4f, 0x4f, 0x52, 0xa2, 0xf5, 0x8b, 0x68, 0x24, 0x16, 0xac, 0x9b, 0xa9, 0x27, 0x28, 0x0e, 0x84, 0x03, 0x46, 0x22, 0x5f, 0xf7, 0x0d, 0xa6, 0x85, 0x88, 0xc1, 0x45, 0x4b, 0x85, 0x1a, 0x10, 0x7f, 0xc9, 0x94, 0x20, 0xb0, 0x04, 0x28, 0x12, 0x30, 0xb9, 0xe6, 0x40, 0x6b, + /* (2^ 32)P */ 0xac, 0x1b, 0x57, 0xb6, 0x42, 0xdb, 0x81, 0x8d, 0x76, 0xfd, 0x9b, 0x1c, 0x29, 0x30, 0xd5, 0x3a, 0xcc, 0x53, 0xd9, 0x26, 0x7a, 0x0f, 0x9c, 0x2e, 0x79, 0xf5, 0x62, 0xeb, 0x61, 0x9d, 0x9b, 0x80, 0x39, 0xcd, 0x60, 0x2e, 0x1f, 0x08, 0x22, 0xbc, 0x19, 0xb3, 0x2a, 0x43, 0x44, 0xf2, 0x4e, 0x66, 0xf4, 0x36, 0xa6, 0xa7, 0xbc, 0xa4, 0x15, 0x7e, + /* (2^ 33)P */ 0xc1, 0x90, 0x8a, 0xde, 0xff, 0x78, 0xc3, 0x73, 0x16, 0xee, 0x76, 0xa0, 0x84, 0x60, 0x8d, 0xe6, 0x82, 0x0f, 0xde, 0x4e, 0xc5, 0x99, 0x34, 0x06, 0x90, 0x44, 0x55, 0xf8, 0x91, 0xd8, 0xe1, 0xe4, 0x2c, 0x8a, 0xde, 0x94, 0x1e, 0x78, 0x25, 0x3d, 0xfd, 0xd8, 0x59, 0x7d, 0xaf, 0x6e, 0xbe, 0x96, 0xbe, 0x3c, 0x16, 0x23, 0x0f, 0x4c, 0xa4, 0x28, + /* (2^ 34)P */ 0xba, 0x11, 0x35, 0x57, 0x03, 0xb6, 0xf4, 0x24, 0x89, 0xb8, 0x5a, 0x0d, 0x50, 0x9c, 0xaa, 0x51, 0x7f, 0xa4, 0x0e, 0xfc, 0x71, 0xb3, 0x3b, 0xf1, 0x96, 0x50, 0x23, 0x15, 0xf5, 0xf5, 0xd4, 0x23, 0xdc, 0x8b, 0x26, 0x9e, 0xae, 0xb7, 0x50, 0xcd, 0xc4, 0x25, 0xf6, 0x75, 0x40, 0x9c, 0x37, 0x79, 0x33, 0x60, 0xd4, 0x4b, 0x13, 0x32, 0xee, 0xe2, + /* (2^ 35)P */ 0x43, 0xb8, 0x56, 0x59, 0xf0, 0x68, 0x23, 0xb3, 0xea, 0x70, 0x58, 0x4c, 0x1e, 0x5a, 0x16, 0x54, 0x03, 0xb2, 0xf4, 0x73, 0xb6, 0xd9, 0x5c, 0x9c, 0x6f, 0xcf, 0x82, 0x2e, 0x54, 0x15, 0x46, 0x2c, 0xa3, 0xda, 0x4e, 0x87, 0xf5, 0x2b, 0xba, 0x91, 0xa3, 0xa0, 0x89, 0xba, 0x48, 0x2b, 0xfa, 0x64, 0x02, 0x7f, 0x78, 0x03, 0xd1, 0xe8, 0x3b, 0xe9, + /* (2^ 36)P */ 0x15, 0xa4, 0x71, 0xd4, 0x0c, 0x24, 0xe9, 0x07, 0xa1, 0x43, 0xf4, 0x7f, 0xbb, 0xa2, 0xa6, 0x6b, 0xfa, 0xb7, 0xea, 0x58, 0xd1, 0x96, 0xb0, 0x24, 0x5c, 0xc7, 0x37, 0x4e, 0x60, 0x0f, 0x40, 0xf2, 0x2f, 0x44, 0x70, 0xea, 0x80, 0x63, 0xfe, 0xfc, 0x46, 0x59, 0x12, 0x27, 0xb5, 0x27, 0xfd, 0xb7, 0x73, 0x0b, 0xca, 0x8b, 0xc2, 0xd3, 0x71, 0x08, + /* (2^ 37)P */ 0x26, 0x0e, 0xd7, 0x52, 0x6f, 0xf1, 0xf2, 0x9d, 0xb8, 0x3d, 0xbd, 0xd4, 0x75, 0x97, 0xd8, 0xbf, 0xa8, 0x86, 0x96, 0xa5, 0x80, 0xa0, 0x45, 0x75, 0xf6, 0x77, 0x71, 0xdb, 0x77, 0x96, 0x55, 0x99, 0x31, 0xd0, 0x4f, 0x34, 0xf4, 0x35, 0x39, 0x41, 0xd3, 0x7d, 0xf7, 0xe2, 0x74, 0xde, 0xbe, 0x5b, 0x1f, 0x39, 0x10, 0x21, 0xa3, 0x4d, 0x3b, 0xc8, + /* (2^ 38)P */ 0x04, 0x00, 0x2a, 0x45, 0xb2, 0xaf, 0x9b, 0x18, 0x6a, 0xeb, 0x96, 0x28, 0xa4, 0x77, 0xd0, 0x13, 0xcf, 0x17, 0x65, 0xe8, 0xc5, 0x81, 0x28, 0xad, 0x39, 0x7a, 0x0b, 0xaa, 0x55, 0x2b, 0xf3, 0xfc, 0x86, 0x40, 0xad, 0x0d, 0x1e, 0x28, 0xa2, 0x2d, 0xc5, 0xd6, 0x04, 0x15, 0xa2, 0x30, 0x3d, 0x12, 0x8e, 0xd6, 0xb5, 0xf7, 0x69, 0xbb, 0x84, 0x20, + /* (2^ 39)P */ 0xd7, 0x7a, 0x77, 0x2c, 0xfb, 0x81, 0x80, 0xe9, 0x1e, 0xc6, 0x36, 0x31, 0x79, 0xc3, 0x7c, 0xa9, 0x57, 0x6b, 0xb5, 0x70, 0xfb, 0xe4, 0xa1, 0xff, 0xfd, 0x21, 0xa5, 0x7c, 0xfa, 0x44, 0xba, 0x0d, 0x96, 0x3d, 0xc4, 0x5c, 0x39, 0x52, 0x87, 0xd7, 0x22, 0x0f, 0x52, 0x88, 0x91, 0x87, 0x96, 0xac, 0xfa, 0x3b, 0xdf, 0xdc, 0x83, 0x8c, 0x99, 0x29, + /* (2^ 40)P */ 0x98, 0x6b, 0x3a, 0x8d, 0x83, 0x17, 0xe1, 0x62, 0xd8, 0x80, 0x4c, 0x97, 0xce, 0x6b, 0xaa, 0x10, 0xa7, 0xc4, 0xe9, 0xeb, 0xa5, 0xfb, 0xc9, 0xdd, 0x2d, 0xeb, 0xfc, 0x9a, 0x71, 0xcd, 0x68, 0x6e, 0xc0, 0x35, 0x64, 0x62, 0x1b, 0x95, 0x12, 0xe8, 0x53, 0xec, 0xf0, 0xf4, 0x86, 0x86, 0x78, 0x18, 0xc4, 0xc6, 0xbc, 0x5a, 0x59, 0x8f, 0x7c, 0x7e, + /* (2^ 41)P */ 0x7f, 0xd7, 0x1e, 0xc5, 0x83, 0xdc, 0x1f, 0xbe, 0x0b, 0xcf, 0x2e, 0x01, 0x01, 0xed, 0xac, 0x17, 0x3b, 0xed, 0xa4, 0x30, 0x96, 0x0e, 0x14, 0x7e, 0x19, 0x2b, 0xa5, 0x67, 0x1e, 0xb3, 0x34, 0x03, 0xa8, 0xbb, 0x0a, 0x7d, 0x08, 0x2d, 0xd5, 0x53, 0x19, 0x6f, 0x13, 0xd5, 0xc0, 0x90, 0x8a, 0xcc, 0xc9, 0x5c, 0xab, 0x24, 0xd7, 0x03, 0xf6, 0x57, + /* (2^ 42)P */ 0x49, 0xcb, 0xb4, 0x96, 0x5f, 0xa6, 0xf8, 0x71, 0x6f, 0x59, 0xad, 0x05, 0x24, 0x2d, 0xaf, 0x67, 0xa8, 0xbe, 0x95, 0xdf, 0x0d, 0x28, 0x5a, 0x7f, 0x6e, 0x87, 0x8c, 0x6e, 0x67, 0x0c, 0xf4, 0xe0, 0x1c, 0x30, 0xc2, 0x66, 0xae, 0x20, 0xa1, 0x34, 0xec, 0x9c, 0xbc, 0xae, 0x3d, 0xa1, 0x28, 0x28, 0x95, 0x1d, 0xc9, 0x3a, 0xa8, 0xfd, 0xfc, 0xa1, + /* (2^ 43)P */ 0xe2, 0x2b, 0x9d, 0xed, 0x02, 0x99, 0x67, 0xbb, 0x2e, 0x16, 0x62, 0x05, 0x70, 0xc7, 0x27, 0xb9, 0x1c, 0x3f, 0xf2, 0x11, 0x01, 0xd8, 0x51, 0xa4, 0x18, 0x92, 0xa9, 0x5d, 0xfb, 0xa9, 0xe4, 0x42, 0xba, 0x38, 0x34, 0x1a, 0x4a, 0xc5, 0x6a, 0x37, 0xde, 0xa7, 0x0c, 0xb4, 0x7e, 0x7f, 0xde, 0xa6, 0xee, 0xcd, 0x55, 0x57, 0x05, 0x06, 0xfd, 0x5d, + /* (2^ 44)P */ 0x2f, 0x32, 0xcf, 0x2e, 0x2c, 0x7b, 0xbe, 0x9a, 0x0c, 0x57, 0x35, 0xf8, 0x87, 0xda, 0x9c, 0xec, 0x48, 0xf2, 0xbb, 0xe2, 0xda, 0x10, 0x58, 0x20, 0xc6, 0xd3, 0x87, 0xe9, 0xc7, 0x26, 0xd1, 0x9a, 0x46, 0x87, 0x90, 0xda, 0xdc, 0xde, 0xc3, 0xb3, 0xf2, 0xe8, 0x6f, 0x4a, 0xe6, 0xe8, 0x9d, 0x98, 0x36, 0x20, 0x03, 0x47, 0x15, 0x3f, 0x64, 0x59, + /* (2^ 45)P */ 0xd4, 0x71, 0x49, 0x0a, 0x67, 0x97, 0xaa, 0x3f, 0xf4, 0x1b, 0x3a, 0x6e, 0x5e, 0x17, 0xcc, 0x0a, 0x8f, 0x81, 0x6a, 0x41, 0x38, 0x77, 0x40, 0x8a, 0x11, 0x42, 0x62, 0xd2, 0x50, 0x32, 0x79, 0x78, 0x28, 0xc2, 0x2e, 0x10, 0x01, 0x94, 0x30, 0x4f, 0x7f, 0x18, 0x17, 0x56, 0x85, 0x4e, 0xad, 0xf7, 0xcb, 0x87, 0x3c, 0x3f, 0x50, 0x2c, 0xc0, 0xba, + /* (2^ 46)P */ 0xbc, 0x30, 0x8e, 0x65, 0x8e, 0x57, 0x5b, 0x38, 0x7a, 0xd4, 0x95, 0x52, 0x7a, 0x32, 0x59, 0x69, 0xcd, 0x9d, 0x47, 0x34, 0x5b, 0x55, 0xa5, 0x24, 0x60, 0xdd, 0xc0, 0xc1, 0x62, 0x73, 0x44, 0xae, 0x4c, 0x9c, 0x65, 0x55, 0x1b, 0x9d, 0x8a, 0x29, 0xb0, 0x1a, 0x52, 0xa8, 0xf1, 0xe6, 0x9a, 0xb3, 0xf6, 0xa3, 0xc9, 0x0a, 0x70, 0x7d, 0x0f, 0xee, + /* (2^ 47)P */ 0x77, 0xd3, 0xe5, 0x8e, 0xfa, 0x00, 0xeb, 0x1b, 0x7f, 0xdc, 0x68, 0x3f, 0x92, 0xbd, 0xb7, 0x0b, 0xb7, 0xb5, 0x24, 0xdf, 0xc5, 0x67, 0x53, 0xd4, 0x36, 0x79, 0xc4, 0x7b, 0x57, 0xbc, 0x99, 0x97, 0x60, 0xef, 0xe4, 0x01, 0xa1, 0xa7, 0xaa, 0x12, 0x36, 0x29, 0xb1, 0x03, 0xc2, 0x83, 0x1c, 0x2b, 0x83, 0xef, 0x2e, 0x2c, 0x23, 0x92, 0xfd, 0xd1, + /* (2^ 48)P */ 0x94, 0xef, 0x03, 0x59, 0xfa, 0x8a, 0x18, 0x76, 0xee, 0x58, 0x08, 0x4d, 0x44, 0xce, 0xf1, 0x52, 0x33, 0x49, 0xf6, 0x69, 0x71, 0xe3, 0xa9, 0xbc, 0x86, 0xe3, 0x43, 0xde, 0x33, 0x7b, 0x90, 0x8b, 0x3e, 0x7d, 0xd5, 0x4a, 0xf0, 0x23, 0x99, 0xa6, 0xea, 0x5f, 0x08, 0xe5, 0xb9, 0x49, 0x8b, 0x0d, 0x6a, 0x21, 0xab, 0x07, 0x62, 0xcd, 0xc4, 0xbe, + /* (2^ 49)P */ 0x61, 0xbf, 0x70, 0x14, 0xfa, 0x4e, 0x9e, 0x7c, 0x0c, 0xf8, 0xb2, 0x48, 0x71, 0x62, 0x83, 0xd6, 0xd1, 0xdc, 0x9c, 0x29, 0x66, 0xb1, 0x34, 0x9c, 0x8d, 0xe6, 0x88, 0xaf, 0xbe, 0xdc, 0x4d, 0xeb, 0xb0, 0xe7, 0x28, 0xae, 0xb2, 0x05, 0x56, 0xc6, 0x0e, 0x10, 0x26, 0xab, 0x2c, 0x59, 0x72, 0x03, 0x66, 0xfe, 0x8f, 0x2c, 0x51, 0x2d, 0xdc, 0xae, + /* (2^ 50)P */ 0xdc, 0x63, 0xf1, 0x8b, 0x5c, 0x65, 0x0b, 0xf1, 0xa6, 0x22, 0xe2, 0xd9, 0xdb, 0x49, 0xb1, 0x3c, 0x47, 0xc2, 0xfe, 0xac, 0x86, 0x07, 0x52, 0xec, 0xb0, 0x08, 0x69, 0xfb, 0xd1, 0x06, 0xdc, 0x48, 0x5c, 0x3d, 0xb2, 0x4d, 0xb8, 0x1a, 0x4e, 0xda, 0xb9, 0xc1, 0x2b, 0xab, 0x4b, 0x62, 0x81, 0x21, 0x9a, 0xfc, 0x3d, 0x39, 0x83, 0x11, 0x36, 0xeb, + /* (2^ 51)P */ 0x94, 0xf3, 0x17, 0xef, 0xf9, 0x60, 0x54, 0xc3, 0xd7, 0x27, 0x35, 0xc5, 0x98, 0x5e, 0xf6, 0x63, 0x6c, 0xa0, 0x4a, 0xd3, 0xa3, 0x98, 0xd9, 0x42, 0xe3, 0xf1, 0xf8, 0x81, 0x96, 0xa9, 0xea, 0x6d, 0x4b, 0x8e, 0x33, 0xca, 0x94, 0x0d, 0xa0, 0xf7, 0xbb, 0x64, 0xa3, 0x36, 0x6f, 0xdc, 0x5a, 0x94, 0x42, 0xca, 0x06, 0xb2, 0x2b, 0x9a, 0x9f, 0x71, + /* (2^ 52)P */ 0xec, 0xdb, 0xa6, 0x1f, 0xdf, 0x15, 0x36, 0xa3, 0xda, 0x8a, 0x7a, 0xb6, 0xa7, 0xe3, 0xaf, 0x52, 0xe0, 0x8d, 0xe8, 0xf2, 0x44, 0x20, 0xeb, 0xa1, 0x20, 0xc4, 0x65, 0x3c, 0x7c, 0x6c, 0x49, 0xed, 0x2f, 0x66, 0x23, 0x68, 0x61, 0x91, 0x40, 0x9f, 0x50, 0x19, 0xd1, 0x84, 0xa7, 0xe2, 0xed, 0x34, 0x37, 0xe3, 0xe4, 0x11, 0x7f, 0x87, 0x55, 0x0f, + /* (2^ 53)P */ 0xb3, 0xa1, 0x0f, 0xb0, 0x48, 0xc0, 0x4d, 0x96, 0xa7, 0xcf, 0x5a, 0x81, 0xb8, 0x4a, 0x46, 0xef, 0x0a, 0xd3, 0x40, 0x7e, 0x02, 0xe3, 0x63, 0xaa, 0x50, 0xd1, 0x2a, 0x37, 0x22, 0x4a, 0x7f, 0x4f, 0xb6, 0xf9, 0x01, 0x82, 0x78, 0x3d, 0x93, 0x14, 0x11, 0x8a, 0x90, 0x60, 0xcd, 0x45, 0x4e, 0x7b, 0x42, 0xb9, 0x3e, 0x6e, 0x68, 0x1f, 0x36, 0x41, + /* (2^ 54)P */ 0x13, 0x73, 0x0e, 0x4f, 0x79, 0x93, 0x9e, 0x29, 0x70, 0x7b, 0x4a, 0x59, 0x1a, 0x9a, 0xf4, 0x55, 0x08, 0xf0, 0xdb, 0x17, 0x58, 0xec, 0x64, 0xad, 0x7f, 0x29, 0xeb, 0x3f, 0x85, 0x4e, 0x60, 0x28, 0x98, 0x1f, 0x73, 0x4e, 0xe6, 0xa8, 0xab, 0xd5, 0xd6, 0xfc, 0xa1, 0x36, 0x6d, 0x15, 0xc6, 0x13, 0x83, 0xa0, 0xc2, 0x6e, 0xd9, 0xdb, 0xc9, 0xcc, + /* (2^ 55)P */ 0xff, 0xd8, 0x52, 0xa3, 0xdc, 0x99, 0xcf, 0x3e, 0x19, 0xb3, 0x68, 0xd0, 0xb5, 0x0d, 0xb8, 0xee, 0x3f, 0xef, 0x6e, 0xc0, 0x38, 0x28, 0x44, 0x92, 0x78, 0x91, 0x1a, 0x08, 0x78, 0x6c, 0x65, 0x24, 0xf3, 0xa2, 0x3d, 0xf2, 0xe5, 0x79, 0x62, 0x69, 0x29, 0xf4, 0x22, 0xc5, 0xdb, 0x6a, 0xae, 0xf4, 0x44, 0xa3, 0x6f, 0xc7, 0x86, 0xab, 0xef, 0xef, + /* (2^ 56)P */ 0xbf, 0x54, 0x9a, 0x09, 0x5d, 0x17, 0xd0, 0xde, 0xfb, 0xf5, 0xca, 0xff, 0x13, 0x20, 0x88, 0x82, 0x3a, 0xe2, 0xd0, 0x3b, 0xfb, 0x05, 0x76, 0xd1, 0xc0, 0x02, 0x71, 0x3b, 0x94, 0xe8, 0xc9, 0x84, 0xcf, 0xa4, 0xe9, 0x28, 0x7b, 0xf5, 0x09, 0xc3, 0x2b, 0x22, 0x40, 0xf1, 0x68, 0x24, 0x24, 0x7d, 0x9f, 0x6e, 0xcd, 0xfe, 0xb0, 0x19, 0x61, 0xf5, + /* (2^ 57)P */ 0xe8, 0x63, 0x51, 0xb3, 0x95, 0x6b, 0x7b, 0x74, 0x92, 0x52, 0x45, 0xa4, 0xed, 0xea, 0x0e, 0x0d, 0x2b, 0x01, 0x1e, 0x2c, 0xbc, 0x91, 0x06, 0x69, 0xdb, 0x1f, 0xb5, 0x77, 0x1d, 0x56, 0xf5, 0xb4, 0x02, 0x80, 0x49, 0x56, 0x12, 0xce, 0x86, 0x05, 0xc9, 0xd9, 0xae, 0xf3, 0x6d, 0xe6, 0x3f, 0x40, 0x52, 0xe9, 0x49, 0x2b, 0x31, 0x06, 0x86, 0x14, + /* (2^ 58)P */ 0xf5, 0x09, 0x3b, 0xd2, 0xff, 0xdf, 0x11, 0xa5, 0x1c, 0x99, 0xe8, 0x1b, 0xa4, 0x2c, 0x7d, 0x8e, 0xc8, 0xf7, 0x03, 0x46, 0xfa, 0xb6, 0xde, 0x73, 0x91, 0x7e, 0x5a, 0x7a, 0xd7, 0x9a, 0x5b, 0x80, 0x24, 0x62, 0x5e, 0x92, 0xf1, 0xa3, 0x45, 0xa3, 0x43, 0x92, 0x8a, 0x2a, 0x5b, 0x0c, 0xb4, 0xc8, 0xad, 0x1c, 0xb6, 0x6c, 0x5e, 0x81, 0x18, 0x91, + /* (2^ 59)P */ 0x96, 0xb3, 0xca, 0x2b, 0xe3, 0x7a, 0x59, 0x72, 0x17, 0x74, 0x29, 0x21, 0xe7, 0x78, 0x07, 0xad, 0xda, 0xb6, 0xcd, 0xf9, 0x27, 0x4d, 0xc8, 0xf2, 0x98, 0x22, 0xca, 0xf2, 0x33, 0x74, 0x7a, 0xdd, 0x1e, 0x71, 0xec, 0xe3, 0x3f, 0xe2, 0xa2, 0xd2, 0x38, 0x75, 0xb0, 0xd0, 0x0a, 0xcf, 0x7d, 0x36, 0xdc, 0x49, 0x38, 0x25, 0x34, 0x4f, 0x20, 0x9a, + /* (2^ 60)P */ 0x2b, 0x6e, 0x04, 0x0d, 0x4f, 0x3d, 0x3b, 0x24, 0xf6, 0x4e, 0x5e, 0x0a, 0xbd, 0x48, 0x96, 0xba, 0x81, 0x8f, 0x39, 0x82, 0x13, 0xe6, 0x72, 0xf3, 0x0f, 0xb6, 0x94, 0xf4, 0xc5, 0x90, 0x74, 0x91, 0xa8, 0xf2, 0xc9, 0xca, 0x9a, 0x4d, 0x98, 0xf2, 0xdf, 0x52, 0x4e, 0x97, 0x2f, 0xeb, 0x84, 0xd3, 0xaf, 0xc2, 0xcc, 0xfb, 0x4c, 0x26, 0x4b, 0xe4, + /* (2^ 61)P */ 0x12, 0x9e, 0xfb, 0x9d, 0x78, 0x79, 0x99, 0xdd, 0xb3, 0x0b, 0x2e, 0x56, 0x41, 0x8e, 0x3f, 0x39, 0xb8, 0x97, 0x89, 0x53, 0x9b, 0x8a, 0x3c, 0x40, 0x9d, 0xa4, 0x6c, 0x2e, 0x31, 0x71, 0xc6, 0x0a, 0x41, 0xd4, 0x95, 0x06, 0x5e, 0xc1, 0xab, 0xc2, 0x14, 0xc4, 0xc7, 0x15, 0x08, 0x3a, 0xad, 0x7a, 0xb4, 0x62, 0xa3, 0x0c, 0x90, 0xf4, 0x47, 0x08, + /* (2^ 62)P */ 0x7f, 0xec, 0x09, 0x82, 0xf5, 0x94, 0x09, 0x93, 0x32, 0xd3, 0xdc, 0x56, 0x80, 0x7b, 0x5b, 0x22, 0x80, 0x6a, 0x96, 0x72, 0xb1, 0xc2, 0xd9, 0xa1, 0x8b, 0x66, 0x42, 0x16, 0xe2, 0x07, 0xb3, 0x2d, 0xf1, 0x75, 0x35, 0x72, 0xc7, 0x98, 0xbe, 0x63, 0x3b, 0x20, 0x75, 0x05, 0xc1, 0x3e, 0x31, 0x5a, 0xf7, 0xaa, 0xae, 0x4b, 0xdb, 0x1d, 0xd0, 0x74, + /* (2^ 63)P */ 0x36, 0x5c, 0x74, 0xe6, 0x5d, 0x59, 0x3f, 0x15, 0x4b, 0x4d, 0x4e, 0x67, 0x41, 0xfe, 0x98, 0x1f, 0x49, 0x76, 0x91, 0x0f, 0x9b, 0xf4, 0xaf, 0x86, 0xaf, 0x66, 0x19, 0xed, 0x46, 0xf1, 0x05, 0x9a, 0xcc, 0xd1, 0x14, 0x1f, 0x82, 0x12, 0x8e, 0xe6, 0xf4, 0xc3, 0x42, 0x5c, 0x4e, 0x33, 0x93, 0xbe, 0x30, 0xe7, 0x64, 0xa9, 0x35, 0x00, 0x4d, 0xf9, + /* (2^ 64)P */ 0x1f, 0xc1, 0x1e, 0xb7, 0xe3, 0x7c, 0xfa, 0xa3, 0x6b, 0x76, 0xaf, 0x9c, 0x05, 0x85, 0x4a, 0xa9, 0xfb, 0xe3, 0x7e, 0xf2, 0x49, 0x56, 0xdc, 0x2f, 0x57, 0x10, 0xba, 0x37, 0xb2, 0x62, 0xf5, 0x6b, 0xe5, 0x8f, 0x0a, 0x87, 0xd1, 0x6a, 0xcb, 0x9d, 0x07, 0xd0, 0xf6, 0x38, 0x99, 0x2c, 0x61, 0x4a, 0x4e, 0xd8, 0xd2, 0x88, 0x29, 0x99, 0x11, 0x95, + /* (2^ 65)P */ 0x6f, 0xdc, 0xd5, 0xd6, 0xd6, 0xa7, 0x4c, 0x46, 0x93, 0x65, 0x62, 0x23, 0x95, 0x32, 0x9c, 0xde, 0x40, 0x41, 0x68, 0x2c, 0x18, 0x4e, 0x5a, 0x8c, 0xc0, 0xc5, 0xc5, 0xea, 0x5c, 0x45, 0x0f, 0x60, 0x78, 0x39, 0xb6, 0x36, 0x23, 0x12, 0xbc, 0x21, 0x9a, 0xf8, 0x91, 0xac, 0xc4, 0x70, 0xdf, 0x85, 0x8e, 0x3c, 0xec, 0x22, 0x04, 0x98, 0xa8, 0xaa, + /* (2^ 66)P */ 0xcc, 0x52, 0x10, 0x5b, 0x4b, 0x6c, 0xc5, 0xfa, 0x3e, 0xd4, 0xf8, 0x1c, 0x04, 0x14, 0x48, 0x33, 0xd9, 0xfc, 0x5f, 0xb0, 0xa5, 0x48, 0x8c, 0x45, 0x8a, 0xee, 0x3e, 0xa7, 0xc1, 0x2e, 0x34, 0xca, 0xf6, 0xc9, 0xeb, 0x10, 0xbb, 0xe1, 0x59, 0x84, 0x25, 0xe8, 0x81, 0x70, 0xc0, 0x09, 0x42, 0xa7, 0x3b, 0x0d, 0x33, 0x00, 0xb5, 0x77, 0xbe, 0x25, + /* (2^ 67)P */ 0xcd, 0x1f, 0xbc, 0x7d, 0xef, 0xe5, 0xca, 0x91, 0xaf, 0xa9, 0x59, 0x6a, 0x09, 0xca, 0xd6, 0x1b, 0x3d, 0x55, 0xde, 0xa2, 0x6a, 0x80, 0xd6, 0x95, 0x47, 0xe4, 0x5f, 0x68, 0x54, 0x08, 0xdf, 0x29, 0xba, 0x2a, 0x02, 0x84, 0xe8, 0xe9, 0x00, 0x77, 0x99, 0x36, 0x03, 0xf6, 0x4a, 0x3e, 0x21, 0x81, 0x7d, 0xb8, 0xa4, 0x8a, 0xa2, 0x05, 0xef, 0xbc, + /* (2^ 68)P */ 0x7c, 0x59, 0x5f, 0x66, 0xd9, 0xb7, 0x83, 0x43, 0x8a, 0xa1, 0x8d, 0x51, 0x70, 0xba, 0xf2, 0x9b, 0x95, 0xc0, 0x4b, 0x4c, 0xa0, 0x14, 0xd3, 0xa4, 0x5d, 0x4a, 0x37, 0x36, 0x97, 0x31, 0x1e, 0x12, 0xe7, 0xbb, 0x08, 0x67, 0xa5, 0x23, 0xd7, 0xfb, 0x97, 0xd8, 0x6a, 0x03, 0xb1, 0xf8, 0x7f, 0xda, 0x58, 0xd9, 0x3f, 0x73, 0x4a, 0x53, 0xe1, 0x7b, + /* (2^ 69)P */ 0x55, 0x83, 0x98, 0x78, 0x6c, 0x56, 0x5e, 0xed, 0xf7, 0x23, 0x3e, 0x4c, 0x7d, 0x09, 0x2d, 0x09, 0x9c, 0x58, 0x8b, 0x32, 0xca, 0xfe, 0xbf, 0x47, 0x03, 0xeb, 0x4d, 0xe7, 0xeb, 0x9c, 0x83, 0x05, 0x68, 0xaa, 0x80, 0x89, 0x44, 0xf9, 0xd4, 0xdc, 0xdb, 0xb1, 0xdb, 0x77, 0xac, 0xf9, 0x2a, 0xae, 0x35, 0xac, 0x74, 0xb5, 0x95, 0x62, 0x18, 0x85, + /* (2^ 70)P */ 0xab, 0x82, 0x7e, 0x10, 0xd7, 0xe6, 0x57, 0xd1, 0x66, 0x12, 0x31, 0x9c, 0x9c, 0xa6, 0x27, 0x59, 0x71, 0x2e, 0xeb, 0xa0, 0x68, 0xc5, 0x87, 0x51, 0xf4, 0xca, 0x3f, 0x98, 0x56, 0xb0, 0x89, 0xb1, 0xc7, 0x7b, 0x46, 0xb3, 0xae, 0x36, 0xf2, 0xee, 0x15, 0x1a, 0x60, 0xf4, 0x50, 0x76, 0x4f, 0xc4, 0x53, 0x0d, 0x36, 0x4d, 0x31, 0xb1, 0x20, 0x51, + /* (2^ 71)P */ 0xf7, 0x1d, 0x8c, 0x1b, 0x5e, 0xe5, 0x02, 0x6f, 0xc5, 0xa5, 0xe0, 0x5f, 0xc6, 0xb6, 0x63, 0x43, 0xaf, 0x3c, 0x19, 0x6c, 0xf4, 0xaf, 0xa4, 0x33, 0xb1, 0x0a, 0x37, 0x3d, 0xd9, 0x4d, 0xe2, 0x29, 0x24, 0x26, 0x94, 0x7c, 0x02, 0xe4, 0xe2, 0xf2, 0xbe, 0xbd, 0xac, 0x1b, 0x48, 0xb8, 0xdd, 0xe9, 0x0d, 0x9a, 0x50, 0x1a, 0x98, 0x71, 0x6e, 0xdc, + /* (2^ 72)P */ 0x9f, 0x40, 0xb1, 0xb3, 0x66, 0x28, 0x6c, 0xfe, 0xa6, 0x7d, 0xf8, 0x3e, 0xb8, 0xf3, 0xde, 0x52, 0x76, 0x52, 0xa3, 0x92, 0x98, 0x23, 0xab, 0x4f, 0x88, 0x97, 0xfc, 0x22, 0xe1, 0x6b, 0x67, 0xcd, 0x13, 0x95, 0xda, 0x65, 0xdd, 0x3b, 0x67, 0x3f, 0x5f, 0x4c, 0xf2, 0x8a, 0xad, 0x98, 0xa7, 0x94, 0x24, 0x45, 0x87, 0x11, 0x7c, 0x75, 0x79, 0x85, + /* (2^ 73)P */ 0x70, 0xbf, 0xf9, 0x3b, 0xa9, 0x44, 0x57, 0x72, 0x96, 0xc9, 0xa4, 0x98, 0x65, 0xbf, 0x87, 0xb3, 0x3a, 0x39, 0x12, 0xde, 0xe5, 0x39, 0x01, 0x4f, 0xf7, 0xc0, 0x71, 0x52, 0x36, 0x85, 0xb3, 0x18, 0xf8, 0x14, 0xc0, 0x6d, 0xae, 0x9e, 0x4f, 0xb0, 0x72, 0x87, 0xac, 0x5c, 0xd1, 0x6c, 0x41, 0x6c, 0x90, 0x9d, 0x22, 0x81, 0xe4, 0x2b, 0xea, 0xe5, + /* (2^ 74)P */ 0xfc, 0xea, 0x1a, 0x65, 0xd9, 0x49, 0x6a, 0x39, 0xb5, 0x96, 0x72, 0x7b, 0x32, 0xf1, 0xd0, 0xe9, 0x45, 0xd9, 0x31, 0x55, 0xc7, 0x34, 0xe9, 0x5a, 0xec, 0x73, 0x0b, 0x03, 0xc4, 0xb3, 0xe6, 0xc9, 0x5e, 0x0a, 0x17, 0xfe, 0x53, 0x66, 0x7f, 0x21, 0x18, 0x74, 0x54, 0x1b, 0xc9, 0x49, 0x16, 0xd2, 0x48, 0xaf, 0x5b, 0x47, 0x7b, 0xeb, 0xaa, 0xc9, + /* (2^ 75)P */ 0x47, 0x04, 0xf5, 0x5a, 0x87, 0x77, 0x9e, 0x21, 0x34, 0x4e, 0x83, 0x88, 0xaf, 0x02, 0x1d, 0xb0, 0x5a, 0x1d, 0x1d, 0x7d, 0x8d, 0x2c, 0xd3, 0x8d, 0x63, 0xa9, 0x45, 0xfb, 0x15, 0x6d, 0x86, 0x45, 0xcd, 0x38, 0x0e, 0xf7, 0x37, 0x79, 0xed, 0x6d, 0x5a, 0xbc, 0x32, 0xcc, 0x66, 0xf1, 0x3a, 0xb2, 0x87, 0x6f, 0x70, 0x71, 0xd9, 0xf2, 0xfa, 0x7b, + /* (2^ 76)P */ 0x68, 0x07, 0xdc, 0x61, 0x40, 0xe4, 0xec, 0x32, 0xc8, 0xbe, 0x66, 0x30, 0x54, 0x80, 0xfd, 0x13, 0x7a, 0xef, 0xae, 0xed, 0x2e, 0x00, 0x6d, 0x3f, 0xbd, 0xfc, 0x91, 0x24, 0x53, 0x7f, 0x63, 0x9d, 0x2e, 0xe3, 0x76, 0xe0, 0xf3, 0xe1, 0x8f, 0x7a, 0xc4, 0x77, 0x0c, 0x91, 0xc0, 0xc2, 0x18, 0x6b, 0x04, 0xad, 0xb6, 0x70, 0x9a, 0x64, 0xc5, 0x82, + /* (2^ 77)P */ 0x7f, 0xea, 0x13, 0xd8, 0x9e, 0xfc, 0x5b, 0x06, 0xb5, 0x4f, 0xda, 0x38, 0xe0, 0x9c, 0xd2, 0x3a, 0xc1, 0x1c, 0x62, 0x70, 0x7f, 0xc6, 0x24, 0x0a, 0x47, 0x04, 0x01, 0xc4, 0x55, 0x09, 0xd1, 0x7a, 0x07, 0xba, 0xa3, 0x80, 0x4f, 0xc1, 0x65, 0x36, 0x6d, 0xc0, 0x10, 0xcf, 0x94, 0xa9, 0xa2, 0x01, 0x44, 0xd1, 0xf9, 0x1c, 0x4c, 0xfb, 0xf8, 0x99, + /* (2^ 78)P */ 0x6c, 0xb9, 0x6b, 0xee, 0x43, 0x5b, 0xb9, 0xbb, 0xee, 0x2e, 0x52, 0xc1, 0xc6, 0xb9, 0x61, 0xd2, 0x93, 0xa5, 0xaf, 0x52, 0xf4, 0xa4, 0x1a, 0x51, 0x61, 0xa7, 0xcb, 0x9e, 0xbb, 0x56, 0x65, 0xe2, 0xbf, 0x75, 0xb9, 0x9c, 0x50, 0x96, 0x60, 0x81, 0x74, 0x47, 0xc0, 0x04, 0x88, 0x71, 0x76, 0x39, 0x9a, 0xa7, 0xb1, 0x4e, 0x43, 0x15, 0xe0, 0xbb, + /* (2^ 79)P */ 0xbb, 0xce, 0xe2, 0xbb, 0xf9, 0x17, 0x0f, 0x82, 0x40, 0xad, 0x73, 0xe3, 0xeb, 0x3b, 0x06, 0x1a, 0xcf, 0x8e, 0x6e, 0x28, 0xb8, 0x26, 0xd9, 0x5b, 0xb7, 0xb3, 0xcf, 0xb4, 0x6a, 0x1c, 0xbf, 0x7f, 0xb8, 0xb5, 0x79, 0xcf, 0x45, 0x68, 0x7d, 0xc5, 0xeb, 0xf3, 0xbe, 0x39, 0x40, 0xfc, 0x07, 0x90, 0x7a, 0x62, 0xad, 0x86, 0x08, 0x71, 0x25, 0xe1, + /* (2^ 80)P */ 0x9b, 0x46, 0xac, 0xef, 0xc1, 0x4e, 0xa1, 0x97, 0x95, 0x76, 0xf9, 0x1b, 0xc2, 0xb2, 0x6a, 0x41, 0xea, 0x80, 0x3d, 0xe9, 0x08, 0x52, 0x5a, 0xe3, 0xf2, 0x08, 0xc5, 0xea, 0x39, 0x3f, 0x44, 0x71, 0x4d, 0xea, 0x0d, 0x05, 0x23, 0xe4, 0x2e, 0x3c, 0x89, 0xfe, 0x12, 0x8a, 0x95, 0x42, 0x0a, 0x68, 0xea, 0x5a, 0x28, 0x06, 0x9e, 0xe3, 0x5f, 0xe0, + /* (2^ 81)P */ 0x00, 0x61, 0x6c, 0x98, 0x9b, 0xe7, 0xb9, 0x06, 0x1c, 0xc5, 0x1b, 0xed, 0xbe, 0xc8, 0xb3, 0xea, 0x87, 0xf0, 0xc4, 0x24, 0x7d, 0xbb, 0x5d, 0xa4, 0x1d, 0x7a, 0x16, 0x00, 0x55, 0x94, 0x67, 0x78, 0xbd, 0x58, 0x02, 0x82, 0x90, 0x53, 0x76, 0xd4, 0x72, 0x99, 0x51, 0x6f, 0x7b, 0xcf, 0x80, 0x30, 0x31, 0x3b, 0x01, 0xc7, 0xc1, 0xef, 0xe6, 0x42, + /* (2^ 82)P */ 0xe2, 0x35, 0xaf, 0x4b, 0x79, 0xc6, 0x12, 0x24, 0x99, 0xc0, 0x68, 0xb0, 0x43, 0x3e, 0xe5, 0xef, 0xe2, 0x29, 0xea, 0xb8, 0xb3, 0xbc, 0x6a, 0x53, 0x2c, 0x69, 0x18, 0x5a, 0xf9, 0x15, 0xae, 0x66, 0x58, 0x18, 0xd3, 0x2d, 0x4b, 0x00, 0xfd, 0x84, 0xab, 0x4f, 0xae, 0x70, 0x6b, 0x9e, 0x9a, 0xdf, 0x83, 0xfd, 0x2e, 0x3c, 0xcf, 0xf8, 0x88, 0x5b, + /* (2^ 83)P */ 0xa4, 0x90, 0x31, 0x85, 0x13, 0xcd, 0xdf, 0x64, 0xc9, 0xa1, 0x0b, 0xe7, 0xb6, 0x73, 0x8a, 0x1b, 0x22, 0x78, 0x4c, 0xd4, 0xae, 0x48, 0x18, 0x00, 0x00, 0xa8, 0x9f, 0x06, 0xf9, 0xfb, 0x2d, 0xc3, 0xb1, 0x2a, 0xbc, 0x13, 0x99, 0x57, 0xaf, 0xf0, 0x8d, 0x61, 0x54, 0x29, 0xd5, 0xf2, 0x72, 0x00, 0x96, 0xd1, 0x85, 0x12, 0x8a, 0xf0, 0x23, 0xfb, + /* (2^ 84)P */ 0x69, 0xc7, 0xdb, 0xd9, 0x92, 0x75, 0x08, 0x9b, 0xeb, 0xa5, 0x93, 0xd1, 0x1a, 0xf4, 0xf5, 0xaf, 0xe6, 0xc4, 0x4a, 0x0d, 0x35, 0x26, 0x39, 0x9d, 0xd3, 0x17, 0x3e, 0xae, 0x2d, 0xbf, 0x73, 0x9f, 0xb7, 0x74, 0x91, 0xd1, 0xd8, 0x5c, 0x14, 0xf9, 0x75, 0xdf, 0xeb, 0xc2, 0x22, 0xd8, 0x14, 0x8d, 0x86, 0x23, 0x4d, 0xd1, 0x2d, 0xdb, 0x6b, 0x42, + /* (2^ 85)P */ 0x8c, 0xda, 0xc6, 0xf8, 0x71, 0xba, 0x2b, 0x06, 0x78, 0xae, 0xcc, 0x3a, 0xe3, 0xe3, 0xa1, 0x8b, 0xe2, 0x34, 0x6d, 0x28, 0x9e, 0x46, 0x13, 0x4d, 0x9e, 0xa6, 0x73, 0x49, 0x65, 0x79, 0x88, 0xb9, 0x3a, 0xd1, 0x6d, 0x2f, 0x48, 0x2b, 0x0a, 0x7f, 0x58, 0x20, 0x37, 0xf4, 0x0e, 0xbb, 0x4a, 0x95, 0x58, 0x0c, 0x88, 0x30, 0xc4, 0x74, 0xdd, 0xfd, + /* (2^ 86)P */ 0x6d, 0x13, 0x4e, 0x89, 0x2d, 0xa9, 0xa3, 0xed, 0x09, 0xe3, 0x0e, 0x71, 0x3e, 0x4a, 0xab, 0x90, 0xde, 0x03, 0xeb, 0x56, 0x46, 0x60, 0x06, 0xf5, 0x71, 0xe5, 0xee, 0x9b, 0xef, 0xff, 0xc4, 0x2c, 0x9f, 0x37, 0x48, 0x45, 0x94, 0x12, 0x41, 0x81, 0x15, 0x70, 0x91, 0x99, 0x5e, 0x56, 0x6b, 0xf4, 0xa6, 0xc9, 0xf5, 0x69, 0x9d, 0x78, 0x37, 0x57, + /* (2^ 87)P */ 0xf3, 0x51, 0x57, 0x7e, 0x43, 0x6f, 0xc6, 0x67, 0x59, 0x0c, 0xcf, 0x94, 0xe6, 0x3d, 0xb5, 0x07, 0xc9, 0x77, 0x48, 0xc9, 0x68, 0x0d, 0x98, 0x36, 0x62, 0x35, 0x38, 0x1c, 0xf5, 0xc5, 0xec, 0x66, 0x78, 0xfe, 0x47, 0xab, 0x26, 0xd6, 0x44, 0xb6, 0x06, 0x0f, 0x89, 0xe3, 0x19, 0x40, 0x1a, 0xe7, 0xd8, 0x65, 0x55, 0xf7, 0x1a, 0xfc, 0xa3, 0x0e, + /* (2^ 88)P */ 0x0e, 0x30, 0xa6, 0xb7, 0x58, 0x60, 0x62, 0x2a, 0x6c, 0x13, 0xa8, 0x14, 0x9b, 0xb8, 0xf2, 0x70, 0xd8, 0xb1, 0x71, 0x88, 0x8c, 0x18, 0x31, 0x25, 0x93, 0x90, 0xb4, 0xc7, 0x49, 0xd8, 0xd4, 0xdb, 0x1e, 0x1e, 0x7f, 0xaa, 0xba, 0xc9, 0xf2, 0x5d, 0xa9, 0x3a, 0x43, 0xb4, 0x5c, 0xee, 0x7b, 0xc7, 0x97, 0xb7, 0x66, 0xd7, 0x23, 0xd9, 0x22, 0x59, + /* (2^ 89)P */ 0x28, 0x19, 0xa6, 0xf9, 0x89, 0x20, 0x78, 0xd4, 0x6d, 0xcb, 0x79, 0x8f, 0x61, 0x6f, 0xb2, 0x5c, 0x4f, 0xa6, 0x54, 0x84, 0x95, 0x24, 0x36, 0x64, 0xcb, 0x39, 0xe7, 0x8f, 0x97, 0x9c, 0x5c, 0x3c, 0xfb, 0x51, 0x11, 0x01, 0x17, 0xdb, 0xc9, 0x9b, 0x51, 0x03, 0x9a, 0xe9, 0xe5, 0x24, 0x1e, 0xf5, 0xda, 0xe0, 0x48, 0x02, 0x23, 0xd0, 0x2c, 0x81, + /* (2^ 90)P */ 0x42, 0x1b, 0xe4, 0x91, 0x85, 0x2a, 0x0c, 0xd2, 0x28, 0x66, 0x57, 0x9e, 0x33, 0x8d, 0x25, 0x71, 0x10, 0x65, 0x76, 0xa2, 0x8c, 0x21, 0x86, 0x81, 0x15, 0xc2, 0x27, 0xeb, 0x54, 0x2d, 0x4f, 0x6c, 0xe6, 0xd6, 0x24, 0x9c, 0x1a, 0x12, 0xb8, 0x81, 0xe2, 0x0a, 0xf3, 0xd3, 0xf0, 0xd3, 0xe1, 0x74, 0x1f, 0x9b, 0x11, 0x47, 0xd0, 0xcf, 0xb6, 0x54, + /* (2^ 91)P */ 0x26, 0x45, 0xa2, 0x10, 0xd4, 0x2d, 0xae, 0xc0, 0xb0, 0xe8, 0x86, 0xb3, 0xc7, 0xea, 0x70, 0x87, 0x61, 0xb5, 0xa5, 0x55, 0xbe, 0x88, 0x1d, 0x7a, 0xd9, 0x6f, 0xeb, 0x83, 0xe2, 0x44, 0x7f, 0x98, 0x04, 0xd6, 0x50, 0x9d, 0xa7, 0x86, 0x66, 0x09, 0x63, 0xe1, 0xed, 0x72, 0xb1, 0xe4, 0x1d, 0x3a, 0xfd, 0x47, 0xce, 0x1c, 0xaa, 0x3b, 0x8f, 0x1b, + /* (2^ 92)P */ 0xf4, 0x3c, 0x4a, 0xb6, 0xc2, 0x9c, 0xe0, 0x2e, 0xb7, 0x38, 0xea, 0x61, 0x35, 0x97, 0x10, 0x90, 0xae, 0x22, 0x48, 0xb3, 0xa9, 0xc6, 0x7a, 0xbb, 0x23, 0xf2, 0xf8, 0x1b, 0xa7, 0xa1, 0x79, 0xcc, 0xc4, 0xf8, 0x08, 0x76, 0x8a, 0x5a, 0x1c, 0x1b, 0xc5, 0x33, 0x91, 0xa9, 0xb8, 0xb9, 0xd3, 0xf8, 0x49, 0xcd, 0xe5, 0x82, 0x43, 0xf7, 0xca, 0x68, + /* (2^ 93)P */ 0x38, 0xba, 0xae, 0x44, 0xfe, 0x57, 0x64, 0x56, 0x7c, 0x0e, 0x9c, 0xca, 0xff, 0xa9, 0x82, 0xbb, 0x38, 0x4a, 0xa7, 0xf7, 0x47, 0xab, 0xbe, 0x6d, 0x23, 0x0b, 0x8a, 0xed, 0xc2, 0xb9, 0x8f, 0xf1, 0xec, 0x91, 0x44, 0x73, 0x64, 0xba, 0xd5, 0x8f, 0x37, 0x38, 0x0d, 0xd5, 0xf8, 0x73, 0x57, 0xb6, 0xc2, 0x45, 0xdc, 0x25, 0xb2, 0xb6, 0xea, 0xd9, + /* (2^ 94)P */ 0xbf, 0xe9, 0x1a, 0x40, 0x4d, 0xcc, 0xe6, 0x1d, 0x70, 0x1a, 0x65, 0xcc, 0x34, 0x2c, 0x37, 0x2c, 0x2d, 0x6b, 0x6d, 0xe5, 0x2f, 0x19, 0x9e, 0xe4, 0xe1, 0xaa, 0xd4, 0xab, 0x54, 0xf4, 0xa8, 0xe4, 0x69, 0x2d, 0x8e, 0x4d, 0xd7, 0xac, 0xb0, 0x5b, 0xfe, 0xe3, 0x26, 0x07, 0xc3, 0xf8, 0x1b, 0x43, 0xa8, 0x1d, 0x64, 0xa5, 0x25, 0x88, 0xbb, 0x77, + /* (2^ 95)P */ 0x92, 0xcd, 0x6e, 0xa0, 0x79, 0x04, 0x18, 0xf4, 0x11, 0x58, 0x48, 0xb5, 0x3c, 0x7b, 0xd1, 0xcc, 0xd3, 0x14, 0x2c, 0xa0, 0xdd, 0x04, 0x44, 0x11, 0xb3, 0x6d, 0x2f, 0x0d, 0xf5, 0x2a, 0x75, 0x5d, 0x1d, 0xda, 0x86, 0x8d, 0x7d, 0x6b, 0x32, 0x68, 0xb6, 0x6c, 0x64, 0x9e, 0xde, 0x80, 0x88, 0xce, 0x08, 0xbf, 0x0b, 0xe5, 0x8e, 0x4f, 0x1d, 0xfb, + /* (2^ 96)P */ 0xaf, 0xe8, 0x85, 0xbf, 0x7f, 0x37, 0x8d, 0x66, 0x7c, 0xd5, 0xd3, 0x96, 0xa5, 0x81, 0x67, 0x95, 0xff, 0x48, 0xde, 0xde, 0xd7, 0x7a, 0x46, 0x34, 0xb1, 0x13, 0x70, 0x29, 0xed, 0x87, 0x90, 0xb0, 0x40, 0x2c, 0xa6, 0x43, 0x6e, 0xb6, 0xbc, 0x48, 0x8a, 0xc1, 0xae, 0xb8, 0xd4, 0xe2, 0xc0, 0x32, 0xb2, 0xa6, 0x2a, 0x8f, 0xb5, 0x16, 0x9e, 0xc3, + /* (2^ 97)P */ 0xff, 0x4d, 0xd2, 0xd6, 0x74, 0xef, 0x2c, 0x96, 0xc1, 0x11, 0xa8, 0xb8, 0xfe, 0x94, 0x87, 0x3e, 0xa0, 0xfb, 0x57, 0xa3, 0xfc, 0x7a, 0x7e, 0x6a, 0x59, 0x6c, 0x54, 0xbb, 0xbb, 0xa2, 0x25, 0x38, 0x1b, 0xdf, 0x5d, 0x7b, 0x94, 0x14, 0xde, 0x07, 0x6e, 0xd3, 0xab, 0x02, 0x26, 0x74, 0x16, 0x12, 0xdf, 0x2e, 0x2a, 0xa7, 0xb0, 0xe8, 0x29, 0xc0, + /* (2^ 98)P */ 0x6a, 0x38, 0x0b, 0xd3, 0xba, 0x45, 0x23, 0xe0, 0x04, 0x3b, 0x83, 0x39, 0xc5, 0x11, 0xe6, 0xcf, 0x39, 0x0a, 0xb3, 0xb0, 0x3b, 0x27, 0x29, 0x63, 0x1c, 0xf3, 0x00, 0xe6, 0xd2, 0x55, 0x21, 0x1f, 0x84, 0x97, 0x9f, 0x01, 0x49, 0x43, 0x30, 0x5f, 0xe0, 0x1d, 0x24, 0xc4, 0x4e, 0xa0, 0x2b, 0x0b, 0x12, 0x55, 0xc3, 0x27, 0xae, 0x08, 0x83, 0x7c, + /* (2^ 99)P */ 0x5d, 0x1a, 0xb7, 0xa9, 0xf5, 0xfd, 0xec, 0xad, 0xb7, 0x87, 0x02, 0x5f, 0x0d, 0x30, 0x4d, 0xe2, 0x65, 0x87, 0xa4, 0x41, 0x45, 0x1d, 0x67, 0xe0, 0x30, 0x5c, 0x13, 0x87, 0xf6, 0x2e, 0x08, 0xc1, 0xc7, 0x12, 0x45, 0xc8, 0x9b, 0xad, 0xb8, 0xd5, 0x57, 0xbb, 0x5c, 0x48, 0x3a, 0xe1, 0x91, 0x5e, 0xf6, 0x4d, 0x8a, 0x63, 0x75, 0x69, 0x0c, 0x01, + /* (2^100)P */ 0x8f, 0x53, 0x2d, 0xa0, 0x71, 0x3d, 0xfc, 0x45, 0x10, 0x96, 0xcf, 0x56, 0xf9, 0xbb, 0x40, 0x3c, 0x86, 0x52, 0x76, 0xbe, 0x84, 0xf9, 0xa6, 0x9d, 0x3d, 0x27, 0xbe, 0xb4, 0x00, 0x49, 0x94, 0xf5, 0x5d, 0xe1, 0x62, 0x85, 0x66, 0xe5, 0xb8, 0x20, 0x2c, 0x09, 0x7d, 0x9d, 0x3d, 0x6e, 0x74, 0x39, 0xab, 0xad, 0xa0, 0x90, 0x97, 0x5f, 0xbb, 0xa7, + /* (2^101)P */ 0xdb, 0x2d, 0x99, 0x08, 0x16, 0x46, 0x83, 0x7a, 0xa8, 0xea, 0x3d, 0x28, 0x5b, 0x49, 0xfc, 0xb9, 0x6d, 0x00, 0x9e, 0x54, 0x4f, 0x47, 0x64, 0x9b, 0x58, 0x4d, 0x07, 0x0c, 0x6f, 0x29, 0x56, 0x0b, 0x00, 0x14, 0x85, 0x96, 0x41, 0x04, 0xb9, 0x5c, 0xa4, 0xf6, 0x16, 0x73, 0x6a, 0xc7, 0x62, 0x0c, 0x65, 0x2f, 0x93, 0xbf, 0xf7, 0xb9, 0xb7, 0xf1, + /* (2^102)P */ 0xeb, 0x6d, 0xb3, 0x46, 0x32, 0xd2, 0xcb, 0x08, 0x94, 0x14, 0xbf, 0x3f, 0xc5, 0xcb, 0x5f, 0x9f, 0x8a, 0x89, 0x0c, 0x1b, 0x45, 0xad, 0x4c, 0x50, 0xb4, 0xe1, 0xa0, 0x6b, 0x11, 0x92, 0xaf, 0x1f, 0x00, 0xcc, 0xe5, 0x13, 0x7e, 0xe4, 0x2e, 0xa0, 0x57, 0xf3, 0xa7, 0x84, 0x79, 0x7a, 0xc2, 0xb7, 0xb7, 0xfc, 0x5d, 0xa5, 0xa9, 0x64, 0xcc, 0xd8, + /* (2^103)P */ 0xa9, 0xc4, 0x12, 0x8b, 0x34, 0x78, 0x3e, 0x38, 0xfd, 0x3f, 0x87, 0xfa, 0x88, 0x94, 0xd5, 0xd9, 0x7f, 0xeb, 0x58, 0xff, 0xb9, 0x45, 0xdb, 0xa1, 0xed, 0x22, 0x28, 0x1d, 0x00, 0x6d, 0x79, 0x85, 0x7a, 0x75, 0x5d, 0xf0, 0xb1, 0x9e, 0x47, 0x28, 0x8c, 0x62, 0xdf, 0xfb, 0x4c, 0x7b, 0xc5, 0x1a, 0x42, 0x95, 0xef, 0x9a, 0xb7, 0x27, 0x7e, 0xda, + /* (2^104)P */ 0xca, 0xd5, 0xc0, 0x17, 0xa1, 0x66, 0x79, 0x9c, 0x2a, 0xb7, 0x0a, 0xfe, 0x62, 0xe4, 0x26, 0x78, 0x90, 0xa7, 0xcb, 0xb0, 0x4f, 0x6d, 0xf9, 0x8f, 0xf7, 0x7d, 0xac, 0xb8, 0x78, 0x1f, 0x41, 0xea, 0x97, 0x1e, 0x62, 0x97, 0x43, 0x80, 0x58, 0x80, 0xb6, 0x69, 0x7d, 0xee, 0x16, 0xd2, 0xa1, 0x81, 0xd7, 0xb1, 0x27, 0x03, 0x48, 0xda, 0xab, 0xec, + /* (2^105)P */ 0x5b, 0xed, 0x40, 0x8e, 0x8c, 0xc1, 0x66, 0x90, 0x7f, 0x0c, 0xb2, 0xfc, 0xbd, 0x16, 0xac, 0x7d, 0x4c, 0x6a, 0xf9, 0xae, 0xe7, 0x4e, 0x11, 0x12, 0xe9, 0xbe, 0x17, 0x09, 0xc6, 0xc1, 0x5e, 0xb5, 0x7b, 0x50, 0x5c, 0x27, 0xfb, 0x80, 0xab, 0x01, 0xfa, 0x5b, 0x9b, 0x75, 0x16, 0x6e, 0xb2, 0x5c, 0x8c, 0x2f, 0xa5, 0x6a, 0x1a, 0x68, 0xa6, 0x90, + /* (2^106)P */ 0x75, 0xfe, 0xb6, 0x96, 0x96, 0x87, 0x4c, 0x12, 0xa9, 0xd1, 0xd8, 0x03, 0xa3, 0xc1, 0x15, 0x96, 0xe8, 0xa0, 0x75, 0x82, 0xa0, 0x6d, 0xea, 0x54, 0xdc, 0x5f, 0x0d, 0x7e, 0xf6, 0x70, 0xb5, 0xdc, 0x7a, 0xf6, 0xc4, 0xd4, 0x21, 0x49, 0xf5, 0xd4, 0x14, 0x6d, 0x48, 0x1d, 0x7c, 0x99, 0x42, 0xdf, 0x78, 0x6b, 0x9d, 0xb9, 0x30, 0x3c, 0xd0, 0x29, + /* (2^107)P */ 0x85, 0xd6, 0xd8, 0xf3, 0x91, 0x74, 0xdd, 0xbd, 0x72, 0x96, 0x10, 0xe4, 0x76, 0x02, 0x5a, 0x72, 0x67, 0xd3, 0x17, 0x72, 0x14, 0x9a, 0x20, 0x5b, 0x0f, 0x8d, 0xed, 0x6d, 0x4e, 0xe3, 0xd9, 0x82, 0xc2, 0x99, 0xee, 0x39, 0x61, 0x69, 0x8a, 0x24, 0x01, 0x92, 0x15, 0xe7, 0xfc, 0xf9, 0x4d, 0xac, 0xf1, 0x30, 0x49, 0x01, 0x0b, 0x6e, 0x0f, 0x20, + /* (2^108)P */ 0xd8, 0x25, 0x94, 0x5e, 0x43, 0x29, 0xf5, 0xcc, 0xe8, 0xe3, 0x55, 0x41, 0x3c, 0x9f, 0x58, 0x5b, 0x00, 0xeb, 0xc5, 0xdf, 0xcf, 0xfb, 0xfd, 0x6e, 0x92, 0xec, 0x99, 0x30, 0xd6, 0x05, 0xdd, 0x80, 0x7a, 0x5d, 0x6d, 0x16, 0x85, 0xd8, 0x9d, 0x43, 0x65, 0xd8, 0x2c, 0x33, 0x2f, 0x5c, 0x41, 0xea, 0xb7, 0x95, 0x77, 0xf2, 0x9e, 0x59, 0x09, 0xe8, + /* (2^109)P */ 0x00, 0xa0, 0x03, 0x80, 0xcd, 0x60, 0xe5, 0x17, 0xd4, 0x15, 0x99, 0xdd, 0x4f, 0xbf, 0x66, 0xb8, 0xc0, 0xf5, 0xf9, 0xfc, 0x6d, 0x42, 0x18, 0x34, 0x1c, 0x7d, 0x5b, 0xb5, 0x09, 0xd0, 0x99, 0x57, 0x81, 0x0b, 0x62, 0xb3, 0xa2, 0xf9, 0x0b, 0xae, 0x95, 0xb8, 0xc2, 0x3b, 0x0d, 0x5b, 0x00, 0xf1, 0xed, 0xbc, 0x05, 0x9d, 0x61, 0xbc, 0x73, 0x9d, + /* (2^110)P */ 0xd4, 0xdb, 0x29, 0xe5, 0x85, 0xe9, 0xc6, 0x89, 0x2a, 0xa8, 0x54, 0xab, 0xb3, 0x7f, 0x88, 0xc0, 0x4d, 0xe0, 0xd1, 0x74, 0x6e, 0xa3, 0xa7, 0x39, 0xd5, 0xcc, 0xa1, 0x8a, 0xcb, 0x5b, 0x34, 0xad, 0x92, 0xb4, 0xd8, 0xd5, 0x17, 0xf6, 0x77, 0x18, 0x9e, 0xaf, 0x45, 0x3b, 0x03, 0xe2, 0xf8, 0x52, 0x60, 0xdc, 0x15, 0x20, 0x9e, 0xdf, 0xd8, 0x5d, + /* (2^111)P */ 0x02, 0xc1, 0xac, 0x1a, 0x15, 0x8e, 0x6c, 0xf5, 0x1e, 0x1e, 0xba, 0x7e, 0xc2, 0xda, 0x7d, 0x02, 0xda, 0x43, 0xae, 0x04, 0x70, 0x28, 0x54, 0x78, 0x94, 0xf5, 0x4f, 0x07, 0x84, 0x8f, 0xed, 0xaa, 0xc0, 0xb8, 0xcd, 0x7f, 0x7e, 0x33, 0xa3, 0xbe, 0x21, 0x29, 0xc8, 0x56, 0x34, 0xc0, 0x76, 0x87, 0x8f, 0xc7, 0x73, 0x58, 0x90, 0x16, 0xfc, 0xd6, + /* (2^112)P */ 0xb8, 0x3f, 0xe1, 0xdf, 0x3a, 0x91, 0x25, 0x0c, 0xf6, 0x47, 0xa8, 0x89, 0xc4, 0xc6, 0x61, 0xec, 0x86, 0x2c, 0xfd, 0xbe, 0xa4, 0x6f, 0xc2, 0xd4, 0x46, 0x19, 0x70, 0x5d, 0x09, 0x02, 0x86, 0xd3, 0x4b, 0xe9, 0x16, 0x7b, 0xf0, 0x0d, 0x6c, 0xff, 0x91, 0x05, 0xbf, 0x55, 0xb4, 0x00, 0x8d, 0xe5, 0x6d, 0x68, 0x20, 0x90, 0x12, 0xb5, 0x5c, 0x32, + /* (2^113)P */ 0x80, 0x45, 0xc8, 0x51, 0x87, 0xba, 0x1c, 0x5c, 0xcf, 0x5f, 0x4b, 0x3c, 0x9e, 0x3b, 0x36, 0xd2, 0x26, 0xa2, 0x7f, 0xab, 0xb7, 0xbf, 0xda, 0x68, 0x23, 0x8f, 0xc3, 0xa0, 0xfd, 0xad, 0xf1, 0x56, 0x3b, 0xd0, 0x75, 0x2b, 0x44, 0x61, 0xd8, 0xf4, 0xf1, 0x05, 0x49, 0x53, 0x07, 0xee, 0x47, 0xef, 0xc0, 0x7c, 0x9d, 0xe4, 0x15, 0x88, 0xc5, 0x47, + /* (2^114)P */ 0x2d, 0xb5, 0x09, 0x80, 0xb9, 0xd3, 0xd8, 0xfe, 0x4c, 0xd2, 0xa6, 0x6e, 0xd3, 0x75, 0xcf, 0xb0, 0x99, 0xcb, 0x50, 0x8d, 0xe9, 0x67, 0x9b, 0x20, 0xe8, 0x57, 0xd8, 0x14, 0x85, 0x73, 0x6a, 0x74, 0xe0, 0x99, 0xf0, 0x6b, 0x6e, 0x59, 0x30, 0x31, 0x33, 0x96, 0x5f, 0xa1, 0x0c, 0x1b, 0xf4, 0xca, 0x09, 0xe1, 0x9b, 0xb5, 0xcf, 0x6d, 0x0b, 0xeb, + /* (2^115)P */ 0x1a, 0xde, 0x50, 0xa9, 0xac, 0x3e, 0x10, 0x43, 0x4f, 0x82, 0x4f, 0xc0, 0xfe, 0x3f, 0x33, 0xd2, 0x64, 0x86, 0x50, 0xa9, 0x51, 0x76, 0x5e, 0x50, 0x97, 0x6c, 0x73, 0x8d, 0x77, 0xa3, 0x75, 0x03, 0xbc, 0xc9, 0xfb, 0x50, 0xd9, 0x6d, 0x16, 0xad, 0x5d, 0x32, 0x3d, 0xac, 0x44, 0xdf, 0x51, 0xf7, 0x19, 0xd4, 0x0b, 0x57, 0x78, 0x0b, 0x81, 0x4e, + /* (2^116)P */ 0x32, 0x24, 0xf1, 0x6c, 0x55, 0x62, 0x1d, 0xb3, 0x1f, 0xda, 0xfa, 0x6a, 0x8f, 0x98, 0x01, 0x16, 0xde, 0x44, 0x50, 0x0d, 0x2e, 0x6c, 0x0b, 0xa2, 0xd3, 0x74, 0x0e, 0xa9, 0xbf, 0x8d, 0xa9, 0xc8, 0xc8, 0x2f, 0x62, 0xc1, 0x35, 0x5e, 0xfd, 0x3a, 0xb3, 0x83, 0x2d, 0xee, 0x4e, 0xfd, 0x5c, 0x5e, 0xad, 0x85, 0xa5, 0x10, 0xb5, 0x4f, 0x34, 0xa7, + /* (2^117)P */ 0xd1, 0x58, 0x6f, 0xe6, 0x54, 0x2c, 0xc2, 0xcd, 0xcf, 0x83, 0xdc, 0x88, 0x0c, 0xb9, 0xb4, 0x62, 0x18, 0x89, 0x65, 0x28, 0xe9, 0x72, 0x4b, 0x65, 0xcf, 0xd6, 0x90, 0x88, 0xd7, 0x76, 0x17, 0x4f, 0x74, 0x64, 0x1e, 0xcb, 0xd3, 0xf5, 0x4b, 0xaa, 0x2e, 0x4d, 0x2d, 0x7c, 0x13, 0x1f, 0xfd, 0xd9, 0x60, 0x83, 0x7e, 0xda, 0x64, 0x1c, 0xdc, 0x9f, + /* (2^118)P */ 0xad, 0xef, 0xac, 0x1b, 0xc1, 0x30, 0x5a, 0x15, 0xc9, 0x1f, 0xac, 0xf1, 0xca, 0x44, 0x95, 0x95, 0xea, 0xf2, 0x22, 0xe7, 0x8d, 0x25, 0xf0, 0xff, 0xd8, 0x71, 0xf7, 0xf8, 0x8f, 0x8f, 0xcd, 0xf4, 0x1e, 0xfe, 0x6c, 0x68, 0x04, 0xb8, 0x78, 0xa1, 0x5f, 0xa6, 0x5d, 0x5e, 0xf9, 0x8d, 0xea, 0x80, 0xcb, 0xf3, 0x17, 0xa6, 0x03, 0xc9, 0x38, 0xd5, + /* (2^119)P */ 0x79, 0x14, 0x31, 0xc3, 0x38, 0xe5, 0xaa, 0xbf, 0x17, 0xa3, 0x04, 0x4e, 0x80, 0x59, 0x9c, 0x9f, 0x19, 0x39, 0xe4, 0x2d, 0x23, 0x54, 0x4a, 0x7f, 0x3e, 0xf3, 0xd9, 0xc7, 0xba, 0x6c, 0x8f, 0x6b, 0xfa, 0x34, 0xb5, 0x23, 0x17, 0x1d, 0xff, 0x1d, 0xea, 0x1f, 0xd7, 0xba, 0x61, 0xb2, 0xe0, 0x38, 0x6a, 0xe9, 0xcf, 0x48, 0x5d, 0x6a, 0x10, 0x9c, + /* (2^120)P */ 0xc8, 0xbb, 0x13, 0x1c, 0x3f, 0x3c, 0x34, 0xfd, 0xac, 0x37, 0x52, 0x44, 0x25, 0xa8, 0xde, 0x1d, 0x63, 0xf4, 0x81, 0x9a, 0xbe, 0x0b, 0x74, 0x2e, 0xc8, 0x51, 0x16, 0xd3, 0xac, 0x4a, 0xaf, 0xe2, 0x5f, 0x3a, 0x89, 0x32, 0xd1, 0x9b, 0x7c, 0x90, 0x0d, 0xac, 0xdc, 0x8b, 0x73, 0x45, 0x45, 0x97, 0xb1, 0x90, 0x2c, 0x1b, 0x31, 0xca, 0xb1, 0x94, + /* (2^121)P */ 0x07, 0x28, 0xdd, 0x10, 0x14, 0xa5, 0x95, 0x7e, 0xf3, 0xe4, 0xd4, 0x14, 0xb4, 0x7e, 0x76, 0xdb, 0x42, 0xd6, 0x94, 0x3e, 0xeb, 0x44, 0x64, 0x88, 0x0d, 0xec, 0xc1, 0x21, 0xf0, 0x79, 0xe0, 0x83, 0x67, 0x55, 0x53, 0xc2, 0xf6, 0xc5, 0xc5, 0x89, 0x39, 0xe8, 0x42, 0xd0, 0x17, 0xbd, 0xff, 0x35, 0x59, 0x0e, 0xc3, 0x06, 0x86, 0xd4, 0x64, 0xcf, + /* (2^122)P */ 0x91, 0xa8, 0xdb, 0x57, 0x9b, 0xe2, 0x96, 0x31, 0x10, 0x6e, 0xd7, 0x9a, 0x97, 0xb3, 0xab, 0xb5, 0x15, 0x66, 0xbe, 0xcc, 0x6d, 0x9a, 0xac, 0x06, 0xb3, 0x0d, 0xaa, 0x4b, 0x9c, 0x96, 0x79, 0x6c, 0x34, 0xee, 0x9e, 0x53, 0x4d, 0x6e, 0xbd, 0x88, 0x02, 0xbf, 0x50, 0x54, 0x12, 0x5d, 0x01, 0x02, 0x46, 0xc6, 0x74, 0x02, 0x8c, 0x24, 0xae, 0xb1, + /* (2^123)P */ 0xf5, 0x22, 0xea, 0xac, 0x7d, 0x9c, 0x33, 0x8a, 0xa5, 0x36, 0x79, 0x6a, 0x4f, 0xa4, 0xdc, 0xa5, 0x73, 0x64, 0xc4, 0x6f, 0x43, 0x02, 0x3b, 0x94, 0x66, 0xd2, 0x4b, 0x4f, 0xf6, 0x45, 0x33, 0x5d, 0x10, 0x33, 0x18, 0x1e, 0xa3, 0xfc, 0xf7, 0xd2, 0xb8, 0xc8, 0xa7, 0xe0, 0x76, 0x8a, 0xcd, 0xff, 0x4f, 0x99, 0x34, 0x47, 0x84, 0x91, 0x96, 0x9f, + /* (2^124)P */ 0x8a, 0x48, 0x3b, 0x48, 0x4a, 0xbc, 0xac, 0xe2, 0x80, 0xd6, 0xd2, 0x35, 0xde, 0xd0, 0x56, 0x42, 0x33, 0xb3, 0x56, 0x5a, 0xcd, 0xb8, 0x3d, 0xb5, 0x25, 0xc1, 0xed, 0xff, 0x87, 0x0b, 0x79, 0xff, 0xf2, 0x62, 0xe1, 0x76, 0xc6, 0xa2, 0x0f, 0xa8, 0x9b, 0x0d, 0xcc, 0x3f, 0x3d, 0x35, 0x27, 0x8d, 0x0b, 0x74, 0xb0, 0xc3, 0x78, 0x8c, 0xcc, 0xc8, + /* (2^125)P */ 0xfc, 0x9a, 0x0c, 0xa8, 0x49, 0x42, 0xb8, 0xdf, 0xcf, 0xb3, 0x19, 0xa6, 0x64, 0x57, 0xfe, 0xe8, 0xf8, 0xa6, 0x4b, 0x86, 0xa1, 0xd5, 0x83, 0x7f, 0x14, 0x99, 0x18, 0x0c, 0x7d, 0x5b, 0xf7, 0x3d, 0xf9, 0x4b, 0x79, 0xb1, 0x86, 0x30, 0xb4, 0x5e, 0x6a, 0xe8, 0x9d, 0xfa, 0x8a, 0x41, 0xc4, 0x30, 0xfc, 0x56, 0x74, 0x14, 0x42, 0xc8, 0x96, 0x0e, + /* (2^126)P */ 0xdf, 0x66, 0xec, 0xbc, 0x44, 0xdb, 0x19, 0xce, 0xd4, 0xb5, 0x49, 0x40, 0x07, 0x49, 0xe0, 0x3a, 0x61, 0x10, 0xfb, 0x7d, 0xba, 0xb1, 0xe0, 0x28, 0x5b, 0x99, 0x59, 0x96, 0xa2, 0xee, 0xe0, 0x23, 0x37, 0x39, 0x1f, 0xe6, 0x57, 0x9f, 0xf8, 0xf8, 0xdc, 0x74, 0xf6, 0x8f, 0x4f, 0x5e, 0x51, 0xa4, 0x12, 0xac, 0xbe, 0xe4, 0xf3, 0xd1, 0xf0, 0x24, + /* (2^127)P */ 0x1e, 0x3e, 0x9a, 0x5f, 0xdf, 0x9f, 0xd6, 0x4e, 0x8a, 0x28, 0xc3, 0xcd, 0x96, 0x9d, 0x57, 0xc7, 0x61, 0x81, 0x90, 0xff, 0xae, 0xb1, 0x4f, 0xc2, 0x96, 0x8b, 0x1a, 0x18, 0xf4, 0x50, 0xcb, 0x31, 0xe1, 0x57, 0xf4, 0x90, 0xa8, 0xea, 0xac, 0xe7, 0x61, 0x98, 0xb6, 0x15, 0xc1, 0x7b, 0x29, 0xa4, 0xc3, 0x18, 0xef, 0xb9, 0xd8, 0xdf, 0xf6, 0xac, + /* (2^128)P */ 0xca, 0xa8, 0x6c, 0xf1, 0xb4, 0xca, 0xfe, 0x31, 0xee, 0x48, 0x38, 0x8b, 0x0e, 0xbb, 0x7a, 0x30, 0xaa, 0xf9, 0xee, 0x27, 0x53, 0x24, 0xdc, 0x2e, 0x15, 0xa6, 0x48, 0x8f, 0xa0, 0x7e, 0xf1, 0xdc, 0x93, 0x87, 0x39, 0xeb, 0x7f, 0x38, 0x92, 0x92, 0x4c, 0x29, 0xe9, 0x57, 0xd8, 0x59, 0xfc, 0xe9, 0x9c, 0x44, 0xc0, 0x65, 0xcf, 0xac, 0x4b, 0xdc, + /* (2^129)P */ 0xa3, 0xd0, 0x37, 0x8f, 0x86, 0x2f, 0xc6, 0x47, 0x55, 0x46, 0x65, 0x26, 0x4b, 0x91, 0xe2, 0x18, 0x5c, 0x4f, 0x23, 0xc1, 0x37, 0x29, 0xb9, 0xc1, 0x27, 0xc5, 0x3c, 0xbf, 0x7e, 0x23, 0xdb, 0x73, 0x99, 0xbd, 0x1b, 0xb2, 0x31, 0x68, 0x3a, 0xad, 0xb7, 0xb0, 0x10, 0xc5, 0xe5, 0x11, 0x51, 0xba, 0xa7, 0x60, 0x66, 0x54, 0xf0, 0x08, 0xd7, 0x69, + /* (2^130)P */ 0x89, 0x41, 0x79, 0xcc, 0xeb, 0x0a, 0xf5, 0x4b, 0xa3, 0x4c, 0xce, 0x52, 0xb0, 0xa7, 0xe4, 0x41, 0x75, 0x7d, 0x04, 0xbb, 0x09, 0x4c, 0x50, 0x9f, 0xdf, 0xea, 0x74, 0x61, 0x02, 0xad, 0xb4, 0x9d, 0xb7, 0x05, 0xb9, 0xea, 0xeb, 0x91, 0x35, 0xe7, 0x49, 0xea, 0xd3, 0x4f, 0x3c, 0x60, 0x21, 0x7a, 0xde, 0xc7, 0xe2, 0x5a, 0xee, 0x8e, 0x93, 0xc7, + /* (2^131)P */ 0x00, 0xe8, 0xed, 0xd0, 0xb3, 0x0d, 0xaf, 0xb2, 0xde, 0x2c, 0xf6, 0x00, 0xe2, 0xea, 0x6d, 0xf8, 0x0e, 0xd9, 0x67, 0x59, 0xa9, 0x50, 0xbb, 0x17, 0x8f, 0xff, 0xb1, 0x9f, 0x17, 0xb6, 0xf2, 0xb5, 0xba, 0x80, 0xf7, 0x0f, 0xba, 0xd5, 0x09, 0x43, 0xaa, 0x4e, 0x3a, 0x67, 0x6a, 0x89, 0x9b, 0x18, 0x65, 0x35, 0xf8, 0x3a, 0x49, 0x91, 0x30, 0x51, + /* (2^132)P */ 0x8d, 0x25, 0xe9, 0x0e, 0x7d, 0x50, 0x76, 0xe4, 0x58, 0x7e, 0xb9, 0x33, 0xe6, 0x65, 0x90, 0xc2, 0x50, 0x9d, 0x50, 0x2e, 0x11, 0xad, 0xd5, 0x43, 0x52, 0x32, 0x41, 0x4f, 0x7b, 0xb6, 0xa0, 0xec, 0x81, 0x75, 0x36, 0x7c, 0x77, 0x85, 0x59, 0x70, 0xe4, 0xf9, 0xef, 0x66, 0x8d, 0x35, 0xc8, 0x2a, 0x6e, 0x5b, 0xc6, 0x0d, 0x0b, 0x29, 0x60, 0x68, + /* (2^133)P */ 0xf8, 0xce, 0xb0, 0x3a, 0x56, 0x7d, 0x51, 0x9a, 0x25, 0x73, 0xea, 0xdd, 0xe4, 0xe0, 0x0e, 0xf0, 0x07, 0xc0, 0x31, 0x00, 0x73, 0x35, 0xd0, 0x39, 0xc4, 0x9b, 0xb7, 0x95, 0xe0, 0x62, 0x70, 0x36, 0x0b, 0xcb, 0xa0, 0x42, 0xde, 0x51, 0xcf, 0x41, 0xe0, 0xb8, 0xb4, 0xc0, 0xe5, 0x46, 0x99, 0x9f, 0x02, 0x7f, 0x14, 0x8c, 0xc1, 0x4e, 0xef, 0xe8, + /* (2^134)P */ 0x10, 0x01, 0x57, 0x0a, 0xbe, 0x8b, 0x18, 0xc8, 0xca, 0x00, 0x28, 0x77, 0x4a, 0x9a, 0xc7, 0x55, 0x2a, 0xcc, 0x0c, 0x7b, 0xb9, 0xe9, 0xc8, 0x97, 0x7c, 0x02, 0xe3, 0x09, 0x2f, 0x62, 0x30, 0xb8, 0x40, 0x09, 0x65, 0xe9, 0x55, 0x63, 0xb5, 0x07, 0xca, 0x9f, 0x00, 0xdf, 0x9d, 0x5c, 0xc7, 0xee, 0x57, 0xa5, 0x90, 0x15, 0x1e, 0x22, 0xa0, 0x12, + /* (2^135)P */ 0x71, 0x2d, 0xc9, 0xef, 0x27, 0xb9, 0xd8, 0x12, 0x43, 0x6b, 0xa8, 0xce, 0x3b, 0x6d, 0x6e, 0x91, 0x43, 0x23, 0xbc, 0x32, 0xb3, 0xbf, 0xe1, 0xc7, 0x39, 0xcf, 0x7c, 0x42, 0x4c, 0xb1, 0x30, 0xe2, 0xdd, 0x69, 0x06, 0xe5, 0xea, 0xf0, 0x2a, 0x16, 0x50, 0x71, 0xca, 0x92, 0xdf, 0xc1, 0xcc, 0xec, 0xe6, 0x54, 0x07, 0xf3, 0x18, 0x8d, 0xd8, 0x29, + /* (2^136)P */ 0x98, 0x51, 0x48, 0x8f, 0xfa, 0x2e, 0x5e, 0x67, 0xb0, 0xc6, 0x17, 0x12, 0xb6, 0x7d, 0xc9, 0xad, 0x81, 0x11, 0xad, 0x0c, 0x1c, 0x2d, 0x45, 0xdf, 0xac, 0x66, 0xbd, 0x08, 0x6f, 0x7c, 0xc7, 0x06, 0x6e, 0x19, 0x08, 0x39, 0x64, 0xd7, 0xe4, 0xd1, 0x11, 0x5f, 0x1c, 0xf4, 0x67, 0xc3, 0x88, 0x6a, 0xe6, 0x07, 0xa3, 0x83, 0xd7, 0xfd, 0x2a, 0xf9, + /* (2^137)P */ 0x87, 0xed, 0xeb, 0xd9, 0xdf, 0xff, 0x43, 0x8b, 0xaa, 0x20, 0x58, 0xb0, 0xb4, 0x6b, 0x14, 0xb8, 0x02, 0xc5, 0x40, 0x20, 0x22, 0xbb, 0xf7, 0xb4, 0xf3, 0x05, 0x1e, 0x4d, 0x94, 0xff, 0xe3, 0xc5, 0x22, 0x82, 0xfe, 0xaf, 0x90, 0x42, 0x98, 0x6b, 0x76, 0x8b, 0x3e, 0x89, 0x3f, 0x42, 0x2a, 0xa7, 0x26, 0x00, 0xda, 0x5c, 0xa2, 0x2b, 0xec, 0xdd, + /* (2^138)P */ 0x5c, 0x21, 0x16, 0x0d, 0x46, 0xb8, 0xd0, 0xa7, 0x88, 0xe7, 0x25, 0xcb, 0x3e, 0x50, 0x73, 0x61, 0xe7, 0xaf, 0x5a, 0x3f, 0x47, 0x8b, 0x3d, 0x97, 0x79, 0x2c, 0xe6, 0x6d, 0x95, 0x74, 0x65, 0x70, 0x36, 0xfd, 0xd1, 0x9e, 0x13, 0x18, 0x63, 0xb1, 0x2d, 0x0b, 0xb5, 0x36, 0x3e, 0xe7, 0x35, 0x42, 0x3b, 0xe6, 0x1f, 0x4d, 0x9d, 0x59, 0xa2, 0x43, + /* (2^139)P */ 0x8c, 0x0c, 0x7c, 0x24, 0x9e, 0xe0, 0xf8, 0x05, 0x1c, 0x9e, 0x1f, 0x31, 0xc0, 0x70, 0xb3, 0xfb, 0x4e, 0xf8, 0x0a, 0x57, 0xb7, 0x49, 0xb5, 0x73, 0xa1, 0x5f, 0x9b, 0x6a, 0x07, 0x6c, 0x87, 0x71, 0x87, 0xd4, 0xbe, 0x98, 0x1e, 0x98, 0xee, 0x52, 0xc1, 0x7b, 0x95, 0x0f, 0x28, 0x32, 0x36, 0x28, 0xd0, 0x3a, 0x0f, 0x7d, 0x2a, 0xa9, 0x62, 0xb9, + /* (2^140)P */ 0x97, 0xe6, 0x18, 0x77, 0xf9, 0x34, 0xac, 0xbc, 0xe0, 0x62, 0x9f, 0x42, 0xde, 0xbd, 0x2f, 0xf7, 0x1f, 0xb7, 0x14, 0x52, 0x8a, 0x79, 0xb2, 0x3f, 0xd2, 0x95, 0x71, 0x01, 0xe8, 0xaf, 0x8c, 0xa4, 0xa4, 0xa7, 0x27, 0xf3, 0x5c, 0xdf, 0x3e, 0x57, 0x7a, 0xf1, 0x76, 0x49, 0xe6, 0x42, 0x3f, 0x8f, 0x1e, 0x63, 0x4a, 0x65, 0xb5, 0x41, 0xf5, 0x02, + /* (2^141)P */ 0x72, 0x85, 0xc5, 0x0b, 0xe1, 0x47, 0x64, 0x02, 0xc5, 0x4d, 0x81, 0x69, 0xb2, 0xcf, 0x0f, 0x6c, 0xd4, 0x6d, 0xd0, 0xc7, 0xb4, 0x1c, 0xd0, 0x32, 0x59, 0x89, 0xe2, 0xe0, 0x96, 0x8b, 0x12, 0x98, 0xbf, 0x63, 0x7a, 0x4c, 0x76, 0x7e, 0x58, 0x17, 0x8f, 0x5b, 0x0a, 0x59, 0x65, 0x75, 0xbc, 0x61, 0x1f, 0xbe, 0xc5, 0x6e, 0x0a, 0x57, 0x52, 0x70, + /* (2^142)P */ 0x92, 0x1c, 0x77, 0xbb, 0x62, 0x02, 0x6c, 0x25, 0x9c, 0x66, 0x07, 0x83, 0xab, 0xcc, 0x80, 0x5d, 0xd2, 0x76, 0x0c, 0xa4, 0xc5, 0xb4, 0x8a, 0x68, 0x23, 0x31, 0x32, 0x29, 0x8a, 0x47, 0x92, 0x12, 0x80, 0xb3, 0xfa, 0x18, 0xe4, 0x8d, 0xc0, 0x4d, 0xfe, 0x97, 0x5f, 0x72, 0x41, 0xb5, 0x5c, 0x7a, 0xbd, 0xf0, 0xcf, 0x5e, 0x97, 0xaa, 0x64, 0x32, + /* (2^143)P */ 0x35, 0x3f, 0x75, 0xc1, 0x7a, 0x75, 0x7e, 0xa9, 0xc6, 0x0b, 0x4e, 0x32, 0x62, 0xec, 0xe3, 0x5c, 0xfb, 0x01, 0x43, 0xb6, 0xd4, 0x5b, 0x75, 0xd2, 0xee, 0x7f, 0x5d, 0x23, 0x2b, 0xb3, 0x54, 0x34, 0x4c, 0xd3, 0xb4, 0x32, 0x84, 0x81, 0xb5, 0x09, 0x76, 0x19, 0xda, 0x58, 0xda, 0x7c, 0xdb, 0x2e, 0xdd, 0x4c, 0x8e, 0xdd, 0x5d, 0x89, 0x10, 0x10, + /* (2^144)P */ 0x57, 0x25, 0x6a, 0x08, 0x37, 0x92, 0xa8, 0xdf, 0x24, 0xef, 0x8f, 0x33, 0x34, 0x52, 0xa4, 0x4c, 0xf0, 0x77, 0x9f, 0x69, 0x77, 0xd5, 0x8f, 0xd2, 0x9a, 0xb3, 0xb6, 0x1d, 0x2d, 0xa6, 0xf7, 0x1f, 0xda, 0xd7, 0xcb, 0x75, 0x11, 0xc3, 0x6b, 0xc0, 0x38, 0xb1, 0xd5, 0x2d, 0x96, 0x84, 0x16, 0xfa, 0x26, 0xb9, 0xcc, 0x3f, 0x16, 0x47, 0x23, 0x74, + /* (2^145)P */ 0x9b, 0x61, 0x2a, 0x1c, 0xdd, 0x39, 0xa5, 0xfa, 0x1c, 0x7d, 0x63, 0x50, 0xca, 0xe6, 0x9d, 0xfa, 0xb7, 0xc4, 0x4c, 0x6a, 0x97, 0x5f, 0x36, 0x4e, 0x47, 0xdd, 0x17, 0xf7, 0xf9, 0x19, 0xce, 0x75, 0x17, 0xad, 0xce, 0x2a, 0xf3, 0xfe, 0x27, 0x8f, 0x3e, 0x48, 0xc0, 0x60, 0x87, 0x24, 0x19, 0xae, 0x59, 0xe4, 0x5a, 0x00, 0x2a, 0xba, 0xa2, 0x1f, + /* (2^146)P */ 0x26, 0x88, 0x42, 0x60, 0x9f, 0x6e, 0x2c, 0x7c, 0x39, 0x0f, 0x47, 0x6a, 0x0e, 0x02, 0xbb, 0x4b, 0x34, 0x29, 0x55, 0x18, 0x36, 0xcf, 0x3b, 0x47, 0xf1, 0x2e, 0xfc, 0x6e, 0x94, 0xff, 0xe8, 0x6b, 0x06, 0xd2, 0xba, 0x77, 0x5e, 0x60, 0xd7, 0x19, 0xef, 0x02, 0x9d, 0x3a, 0xc2, 0xb7, 0xa9, 0xd8, 0x57, 0xee, 0x7e, 0x2b, 0xf2, 0x6d, 0x28, 0xda, + /* (2^147)P */ 0xdf, 0xd9, 0x92, 0x11, 0x98, 0x23, 0xe2, 0x45, 0x2f, 0x74, 0x70, 0xee, 0x0e, 0x55, 0x65, 0x79, 0x86, 0x38, 0x17, 0x92, 0x85, 0x87, 0x99, 0x50, 0xd9, 0x7c, 0xdb, 0xa1, 0x10, 0xec, 0x30, 0xb7, 0x40, 0xa3, 0x23, 0x9b, 0x0e, 0x27, 0x49, 0x29, 0x03, 0x94, 0xff, 0x53, 0xdc, 0xd7, 0xed, 0x49, 0xa9, 0x5a, 0x3b, 0xee, 0xd7, 0xc7, 0x65, 0xaf, + /* (2^148)P */ 0xa0, 0xbd, 0xbe, 0x03, 0xee, 0x0c, 0xbe, 0x32, 0x00, 0x7b, 0x52, 0xcb, 0x92, 0x29, 0xbf, 0xa0, 0xc6, 0xd9, 0xd2, 0xd6, 0x15, 0xe8, 0x3a, 0x75, 0x61, 0x65, 0x56, 0xae, 0xad, 0x3c, 0x2a, 0x64, 0x14, 0x3f, 0x8e, 0xc1, 0x2d, 0x0c, 0x8d, 0x20, 0xdb, 0x58, 0x4b, 0xe5, 0x40, 0x15, 0x4b, 0xdc, 0xa8, 0xbd, 0xef, 0x08, 0xa7, 0xd1, 0xf4, 0xb0, + /* (2^149)P */ 0xa9, 0x0f, 0x05, 0x94, 0x66, 0xac, 0x1f, 0x65, 0x3f, 0xe1, 0xb8, 0xe1, 0x34, 0x5e, 0x1d, 0x8f, 0xe3, 0x93, 0x03, 0x15, 0xff, 0xb6, 0x65, 0xb6, 0x6e, 0xc0, 0x2f, 0xd4, 0x2e, 0xb9, 0x2c, 0x13, 0x3c, 0x99, 0x1c, 0xb5, 0x87, 0xba, 0x79, 0xcb, 0xf0, 0x18, 0x06, 0x86, 0x04, 0x14, 0x25, 0x09, 0xcd, 0x1c, 0x14, 0xda, 0x35, 0xd0, 0x38, 0x3b, + /* (2^150)P */ 0x1b, 0x04, 0xa3, 0x27, 0xb4, 0xd3, 0x37, 0x48, 0x1e, 0x8f, 0x69, 0xd3, 0x5a, 0x2f, 0x20, 0x02, 0x36, 0xbe, 0x06, 0x7b, 0x6b, 0x6c, 0x12, 0x5b, 0x80, 0x74, 0x44, 0xe6, 0xf8, 0xf5, 0x95, 0x59, 0x29, 0xab, 0x51, 0x47, 0x83, 0x28, 0xe0, 0xad, 0xde, 0xaa, 0xd3, 0xb1, 0x1a, 0xcb, 0xa3, 0xcd, 0x8b, 0x6a, 0xb1, 0xa7, 0x0a, 0xd1, 0xf9, 0xbe, + /* (2^151)P */ 0xce, 0x2f, 0x85, 0xca, 0x74, 0x6d, 0x49, 0xb8, 0xce, 0x80, 0x44, 0xe0, 0xda, 0x5b, 0xcf, 0x2f, 0x79, 0x74, 0xfe, 0xb4, 0x2c, 0x99, 0x20, 0x6e, 0x09, 0x04, 0xfb, 0x6d, 0x57, 0x5b, 0x95, 0x0c, 0x45, 0xda, 0x4f, 0x7f, 0x63, 0xcc, 0x85, 0x5a, 0x67, 0x50, 0x68, 0x71, 0xb4, 0x67, 0xb1, 0x2e, 0xc1, 0x1c, 0xdc, 0xff, 0x2a, 0x7c, 0x10, 0x5e, + /* (2^152)P */ 0xa6, 0xde, 0xf3, 0xd4, 0x22, 0x30, 0x24, 0x9e, 0x0b, 0x30, 0x54, 0x59, 0x7e, 0xa2, 0xeb, 0x89, 0x54, 0x65, 0x3e, 0x40, 0xd1, 0xde, 0xe6, 0xee, 0x4d, 0xbf, 0x5e, 0x40, 0x1d, 0xee, 0x4f, 0x68, 0xd9, 0xa7, 0x2f, 0xb3, 0x64, 0xb3, 0xf5, 0xc8, 0xd3, 0xaa, 0x70, 0x70, 0x3d, 0xef, 0xd3, 0x95, 0x54, 0xdb, 0x3e, 0x94, 0x95, 0x92, 0x1f, 0x45, + /* (2^153)P */ 0x22, 0x80, 0x1d, 0x9d, 0x96, 0xa5, 0x78, 0x6f, 0xe0, 0x1e, 0x1b, 0x66, 0x42, 0xc8, 0xae, 0x9e, 0x46, 0x45, 0x08, 0x41, 0xdf, 0x80, 0xae, 0x6f, 0xdb, 0x15, 0x5a, 0x21, 0x31, 0x7a, 0xd0, 0xf2, 0x54, 0x15, 0x88, 0xd3, 0x0f, 0x7f, 0x14, 0x5a, 0x14, 0x97, 0xab, 0xf4, 0x58, 0x6a, 0x9f, 0xea, 0x74, 0xe5, 0x6b, 0x90, 0x59, 0x2b, 0x48, 0xd9, + /* (2^154)P */ 0x12, 0x24, 0x04, 0xf5, 0x50, 0xc2, 0x8c, 0xb0, 0x7c, 0x46, 0x98, 0xd5, 0x24, 0xad, 0xf6, 0x72, 0xdc, 0x82, 0x1a, 0x60, 0xc1, 0xeb, 0x48, 0xef, 0x7f, 0x6e, 0xe6, 0xcc, 0xdb, 0x7b, 0xae, 0xbe, 0x5e, 0x1e, 0x5c, 0xe6, 0x0a, 0x70, 0xdf, 0xa4, 0xa3, 0x85, 0x1b, 0x1b, 0x7f, 0x72, 0xb9, 0x96, 0x6f, 0xdc, 0x03, 0x76, 0x66, 0xfb, 0xa0, 0x33, + /* (2^155)P */ 0x37, 0x40, 0xbb, 0xbc, 0x68, 0x58, 0x86, 0xca, 0xbb, 0xa5, 0x24, 0x76, 0x3d, 0x48, 0xd1, 0xad, 0xb4, 0xa8, 0xcf, 0xc3, 0xb6, 0xa8, 0xba, 0x1a, 0x3a, 0xbe, 0x33, 0x75, 0x04, 0x5c, 0x13, 0x8c, 0x0d, 0x70, 0x8d, 0xa6, 0x4e, 0x2a, 0xeb, 0x17, 0x3c, 0x22, 0xdd, 0x3e, 0x96, 0x40, 0x11, 0x9e, 0x4e, 0xae, 0x3d, 0xf8, 0x91, 0xd7, 0x50, 0xc8, + /* (2^156)P */ 0xd8, 0xca, 0xde, 0x19, 0xcf, 0x00, 0xe4, 0x73, 0x18, 0x7f, 0x9b, 0x9f, 0xf4, 0x5b, 0x49, 0x49, 0x99, 0xdc, 0xa4, 0x46, 0x21, 0xb5, 0xd7, 0x3e, 0xb7, 0x47, 0x1b, 0xa9, 0x9f, 0x4c, 0x69, 0x7d, 0xec, 0x33, 0xd6, 0x1c, 0x51, 0x7f, 0x47, 0x74, 0x7a, 0x6c, 0xf3, 0xd2, 0x2e, 0xbf, 0xdf, 0x6c, 0x9e, 0x77, 0x3b, 0x34, 0xf6, 0x73, 0x80, 0xed, + /* (2^157)P */ 0x16, 0xfb, 0x16, 0xc3, 0xc2, 0x83, 0xe4, 0xf4, 0x03, 0x7f, 0x52, 0xb0, 0x67, 0x51, 0x7b, 0x24, 0x5a, 0x51, 0xd3, 0xb6, 0x4e, 0x59, 0x76, 0xcd, 0x08, 0x7b, 0x1d, 0x7a, 0x9c, 0x65, 0xae, 0xce, 0xaa, 0xd2, 0x1c, 0x85, 0x66, 0x68, 0x06, 0x15, 0xa8, 0x06, 0xe6, 0x16, 0x37, 0xf4, 0x49, 0x9e, 0x0f, 0x50, 0x37, 0xb1, 0xb2, 0x93, 0x70, 0x43, + /* (2^158)P */ 0x18, 0x3a, 0x16, 0xe5, 0x8d, 0xc8, 0x35, 0xd6, 0x7b, 0x09, 0xec, 0x61, 0x5f, 0x5c, 0x2a, 0x19, 0x96, 0x2e, 0xc3, 0xfd, 0xab, 0xe6, 0x23, 0xae, 0xab, 0xc5, 0xcb, 0xb9, 0x7b, 0x2d, 0x34, 0x51, 0xb9, 0x41, 0x9e, 0x7d, 0xca, 0xda, 0x25, 0x45, 0x14, 0xb0, 0xc7, 0x4d, 0x26, 0x2b, 0xfe, 0x43, 0xb0, 0x21, 0x5e, 0xfa, 0xdc, 0x7c, 0xf9, 0x5a, + /* (2^159)P */ 0x94, 0xad, 0x42, 0x17, 0xf5, 0xcd, 0x1c, 0x0d, 0xf6, 0x41, 0xd2, 0x55, 0xbb, 0x50, 0xf1, 0xc6, 0xbc, 0xa6, 0xc5, 0x3a, 0xfd, 0x9b, 0x75, 0x3e, 0xf6, 0x1a, 0xa7, 0xb2, 0x6e, 0x64, 0x12, 0xdc, 0x3c, 0xe5, 0xf6, 0xfc, 0x3b, 0xfa, 0x43, 0x81, 0xd4, 0xa5, 0xee, 0xf5, 0x9c, 0x47, 0x2f, 0xd0, 0x9c, 0xde, 0xa1, 0x48, 0x91, 0x9a, 0x34, 0xc1, + /* (2^160)P */ 0x37, 0x1b, 0xb3, 0x88, 0xc9, 0x98, 0x4e, 0xfb, 0x84, 0x4f, 0x2b, 0x0a, 0xb6, 0x8f, 0x35, 0x15, 0xcd, 0x61, 0x7a, 0x5f, 0x5c, 0xa0, 0xca, 0x23, 0xa0, 0x93, 0x1f, 0xcc, 0x3c, 0x39, 0x3a, 0x24, 0xa7, 0x49, 0xad, 0x8d, 0x59, 0xcc, 0x94, 0x5a, 0x16, 0xf5, 0x70, 0xe8, 0x52, 0x1e, 0xee, 0x20, 0x30, 0x17, 0x7e, 0xf0, 0x4c, 0x93, 0x06, 0x5a, + /* (2^161)P */ 0x81, 0xba, 0x3b, 0xd7, 0x3e, 0xb4, 0x32, 0x3a, 0x22, 0x39, 0x2a, 0xfc, 0x19, 0xd9, 0xd2, 0xf6, 0xc5, 0x79, 0x6c, 0x0e, 0xde, 0xda, 0x01, 0xff, 0x52, 0xfb, 0xb6, 0x95, 0x4e, 0x7a, 0x10, 0xb8, 0x06, 0x86, 0x3c, 0xcd, 0x56, 0xd6, 0x15, 0xbf, 0x6e, 0x3e, 0x4f, 0x35, 0x5e, 0xca, 0xbc, 0xa5, 0x95, 0xa2, 0xdf, 0x2d, 0x1d, 0xaf, 0x59, 0xf9, + /* (2^162)P */ 0x69, 0xe5, 0xe2, 0xfa, 0xc9, 0x7f, 0xdd, 0x09, 0xf5, 0x6b, 0x4e, 0x2e, 0xbe, 0xb4, 0xbf, 0x3e, 0xb2, 0xf2, 0x81, 0x30, 0xe1, 0x07, 0xa8, 0x0d, 0x2b, 0xd2, 0x5a, 0x55, 0xbe, 0x4b, 0x86, 0x5d, 0xb0, 0x5e, 0x7c, 0x8f, 0xc1, 0x3c, 0x81, 0x4c, 0xf7, 0x6d, 0x7d, 0xe6, 0x4f, 0x8a, 0x85, 0xc2, 0x2f, 0x28, 0xef, 0x8c, 0x69, 0xc2, 0xc2, 0x1a, + /* (2^163)P */ 0xd9, 0xe4, 0x0e, 0x1e, 0xc2, 0xf7, 0x2f, 0x9f, 0xa1, 0x40, 0xfe, 0x46, 0x16, 0xaf, 0x2e, 0xd1, 0xec, 0x15, 0x9b, 0x61, 0x92, 0xce, 0xfc, 0x10, 0x43, 0x1d, 0x00, 0xf6, 0xbe, 0x20, 0x80, 0x80, 0x6f, 0x3c, 0x16, 0x94, 0x59, 0xba, 0x03, 0x53, 0x6e, 0xb6, 0xdd, 0x25, 0x7b, 0x86, 0xbf, 0x96, 0xf4, 0x2f, 0xa1, 0x96, 0x8d, 0xf9, 0xb3, 0x29, + /* (2^164)P */ 0x3b, 0x04, 0x60, 0x6e, 0xce, 0xab, 0xd2, 0x63, 0x18, 0x53, 0x88, 0x16, 0x4a, 0x6a, 0xab, 0x72, 0x03, 0x68, 0xa5, 0xd4, 0x0d, 0xb2, 0x82, 0x81, 0x1f, 0x2b, 0x5c, 0x75, 0xe8, 0xd2, 0x1d, 0x7f, 0xe7, 0x1b, 0x35, 0x02, 0xde, 0xec, 0xbd, 0xcb, 0xc7, 0x01, 0xd3, 0x95, 0x61, 0xfe, 0xb2, 0x7a, 0x66, 0x09, 0x4c, 0x6d, 0xfd, 0x39, 0xf7, 0x52, + /* (2^165)P */ 0x42, 0xc1, 0x5f, 0xf8, 0x35, 0x52, 0xc1, 0xfe, 0xc5, 0x11, 0x80, 0x1c, 0x11, 0x46, 0x31, 0x11, 0xbe, 0xd0, 0xc4, 0xb6, 0x07, 0x13, 0x38, 0xa0, 0x8d, 0x65, 0xf0, 0x56, 0x9e, 0x16, 0xbf, 0x9d, 0xcd, 0x51, 0x34, 0xf9, 0x08, 0x48, 0x7b, 0x76, 0x0c, 0x7b, 0x30, 0x07, 0xa8, 0x76, 0xaf, 0xa3, 0x29, 0x38, 0xb0, 0x58, 0xde, 0x72, 0x4b, 0x45, + /* (2^166)P */ 0xd4, 0x16, 0xa7, 0xc0, 0xb4, 0x9f, 0xdf, 0x1a, 0x37, 0xc8, 0x35, 0xed, 0xc5, 0x85, 0x74, 0x64, 0x09, 0x22, 0xef, 0xe9, 0x0c, 0xaf, 0x12, 0x4c, 0x9e, 0xf8, 0x47, 0x56, 0xe0, 0x7f, 0x4e, 0x24, 0x6b, 0x0c, 0xe7, 0xad, 0xc6, 0x47, 0x1d, 0xa4, 0x0d, 0x86, 0x89, 0x65, 0xe8, 0x5f, 0x71, 0xc7, 0xe9, 0xcd, 0xec, 0x6c, 0x62, 0xc7, 0xe3, 0xb3, + /* (2^167)P */ 0xb5, 0xea, 0x86, 0xe3, 0x15, 0x18, 0x3f, 0x6d, 0x7b, 0x05, 0x95, 0x15, 0x53, 0x26, 0x1c, 0xeb, 0xbe, 0x7e, 0x16, 0x42, 0x4b, 0xa2, 0x3d, 0xdd, 0x0e, 0xff, 0xba, 0x67, 0xb5, 0xae, 0x7a, 0x17, 0xde, 0x23, 0xad, 0x14, 0xcc, 0xd7, 0xaf, 0x57, 0x01, 0xe0, 0xdd, 0x48, 0xdd, 0xd7, 0xe3, 0xdf, 0xe9, 0x2d, 0xda, 0x67, 0xa4, 0x9f, 0x29, 0x04, + /* (2^168)P */ 0x16, 0x53, 0xe6, 0x9c, 0x4e, 0xe5, 0x1e, 0x70, 0x81, 0x25, 0x02, 0x9b, 0x47, 0x6d, 0xd2, 0x08, 0x73, 0xbe, 0x0a, 0xf1, 0x7b, 0xeb, 0x24, 0xeb, 0x38, 0x23, 0x5c, 0xb6, 0x3e, 0xce, 0x1e, 0xe3, 0xbc, 0x82, 0x35, 0x1f, 0xaf, 0x3a, 0x3a, 0xe5, 0x4e, 0xc1, 0xca, 0xbf, 0x47, 0xb4, 0xbb, 0xbc, 0x5f, 0xea, 0xc6, 0xca, 0xf3, 0xa0, 0xa2, 0x73, + /* (2^169)P */ 0xef, 0xa4, 0x7a, 0x4e, 0xe4, 0xc7, 0xb6, 0x43, 0x2e, 0xa5, 0xe4, 0xa5, 0xba, 0x1e, 0xa5, 0xfe, 0x9e, 0xce, 0xa9, 0x80, 0x04, 0xcb, 0x4f, 0xd8, 0x74, 0x05, 0x48, 0xfa, 0x99, 0x11, 0x5d, 0x97, 0x3b, 0x07, 0x0d, 0xdd, 0xe6, 0xb1, 0x74, 0x87, 0x1a, 0xd3, 0x26, 0xb7, 0x8f, 0xe1, 0x63, 0x3d, 0xec, 0x53, 0x93, 0xb0, 0x81, 0x78, 0x34, 0xa4, + /* (2^170)P */ 0xe1, 0xe7, 0xd4, 0x58, 0x9d, 0x0e, 0x8b, 0x65, 0x66, 0x37, 0x16, 0x48, 0x6f, 0xaa, 0x42, 0x37, 0x77, 0xad, 0xb1, 0x56, 0x48, 0xdf, 0x65, 0x36, 0x30, 0xb8, 0x00, 0x12, 0xd8, 0x32, 0x28, 0x7f, 0xc1, 0x71, 0xeb, 0x93, 0x0f, 0x48, 0x04, 0xe1, 0x5a, 0x6a, 0x96, 0xc1, 0xca, 0x89, 0x6d, 0x1b, 0x82, 0x4c, 0x18, 0x6d, 0x55, 0x4b, 0xea, 0xfd, + /* (2^171)P */ 0x62, 0x1a, 0x53, 0xb4, 0xb1, 0xbe, 0x6f, 0x15, 0x18, 0x88, 0xd4, 0x66, 0x61, 0xc7, 0x12, 0x69, 0x02, 0xbd, 0x03, 0x23, 0x2b, 0xef, 0xf9, 0x54, 0xa4, 0x85, 0xa8, 0xe3, 0xb7, 0xbd, 0xa9, 0xa3, 0xf3, 0x2a, 0xdd, 0xf1, 0xd4, 0x03, 0x0f, 0xa9, 0xa1, 0xd8, 0xa3, 0xcd, 0xb2, 0x71, 0x90, 0x4b, 0x35, 0x62, 0xf2, 0x2f, 0xce, 0x67, 0x1f, 0xaa, + /* (2^172)P */ 0x9e, 0x1e, 0xcd, 0x43, 0x7e, 0x87, 0x37, 0x94, 0x3a, 0x97, 0x4c, 0x7e, 0xee, 0xc9, 0x37, 0x85, 0xf1, 0xd9, 0x4f, 0xbf, 0xf9, 0x6f, 0x39, 0x9a, 0x39, 0x87, 0x2e, 0x25, 0x84, 0x42, 0xc3, 0x80, 0xcb, 0x07, 0x22, 0xae, 0x30, 0xd5, 0x50, 0xa1, 0x23, 0xcc, 0x31, 0x81, 0x9d, 0xf1, 0x30, 0xd9, 0x2b, 0x73, 0x41, 0x16, 0x50, 0xab, 0x2d, 0xa2, + /* (2^173)P */ 0xa4, 0x69, 0x4f, 0xa1, 0x4e, 0xb9, 0xbf, 0x14, 0xe8, 0x2b, 0x04, 0x93, 0xb7, 0x6e, 0x9f, 0x7d, 0x73, 0x0a, 0xc5, 0x14, 0xb8, 0xde, 0x8c, 0xc1, 0xfe, 0xc0, 0xa7, 0xa4, 0xcc, 0x42, 0x42, 0x81, 0x15, 0x65, 0x8a, 0x80, 0xb9, 0xde, 0x1f, 0x60, 0x33, 0x0e, 0xcb, 0xfc, 0xe0, 0xdb, 0x83, 0xa1, 0xe5, 0xd0, 0x16, 0x86, 0x2c, 0xe2, 0x87, 0xed, + /* (2^174)P */ 0x7a, 0xc0, 0xeb, 0x6b, 0xf6, 0x0d, 0x4c, 0x6d, 0x1e, 0xdb, 0xab, 0xe7, 0x19, 0x45, 0xc6, 0xe3, 0xb2, 0x06, 0xbb, 0xbc, 0x70, 0x99, 0x83, 0x33, 0xeb, 0x28, 0xc8, 0x77, 0xf6, 0x4d, 0x01, 0xb7, 0x59, 0xa0, 0xd2, 0xb3, 0x2a, 0x72, 0x30, 0xe7, 0x11, 0x39, 0xb6, 0x41, 0x29, 0x65, 0x5a, 0x14, 0xb9, 0x86, 0x08, 0xe0, 0x7d, 0x32, 0x8c, 0xf0, + /* (2^175)P */ 0x5c, 0x11, 0x30, 0x9e, 0x05, 0x27, 0xf5, 0x45, 0x0f, 0xb3, 0xc9, 0x75, 0xc3, 0xd7, 0xe1, 0x82, 0x3b, 0x8e, 0x87, 0x23, 0x00, 0x15, 0x19, 0x07, 0xd9, 0x21, 0x53, 0xc7, 0xf1, 0xa3, 0xbf, 0x70, 0x64, 0x15, 0x18, 0xca, 0x23, 0x9e, 0xd3, 0x08, 0xc3, 0x2a, 0x8b, 0xe5, 0x83, 0x04, 0x89, 0x14, 0xfd, 0x28, 0x25, 0x1c, 0xe3, 0x26, 0xa7, 0x22, + /* (2^176)P */ 0xdc, 0xd4, 0x75, 0x60, 0x99, 0x94, 0xea, 0x09, 0x8e, 0x8a, 0x3c, 0x1b, 0xf9, 0xbd, 0x33, 0x0d, 0x51, 0x3d, 0x12, 0x6f, 0x4e, 0x72, 0xe0, 0x17, 0x20, 0xe9, 0x75, 0xe6, 0x3a, 0xb2, 0x13, 0x83, 0x4e, 0x7a, 0x08, 0x9e, 0xd1, 0x04, 0x5f, 0x6b, 0x42, 0x0b, 0x76, 0x2a, 0x2d, 0x77, 0x53, 0x6c, 0x65, 0x6d, 0x8e, 0x25, 0x3c, 0xb6, 0x8b, 0x69, + /* (2^177)P */ 0xb9, 0x49, 0x28, 0xd0, 0xdc, 0x6c, 0x8f, 0x4c, 0xc9, 0x14, 0x8a, 0x38, 0xa3, 0xcb, 0xc4, 0x9d, 0x53, 0xcf, 0xe9, 0xe3, 0xcf, 0xe0, 0xb1, 0xf2, 0x1b, 0x4c, 0x7f, 0x83, 0x2a, 0x7a, 0xe9, 0x8b, 0x3b, 0x86, 0x61, 0x30, 0xe9, 0x99, 0xbd, 0xba, 0x19, 0x6e, 0x65, 0x2a, 0x12, 0x3e, 0x9c, 0xa8, 0xaf, 0xc3, 0xcf, 0xf8, 0x1f, 0x77, 0x86, 0xea, + /* (2^178)P */ 0x30, 0xde, 0xe7, 0xff, 0x54, 0xf7, 0xa2, 0x59, 0xf6, 0x0b, 0xfb, 0x7a, 0xf2, 0x39, 0xf0, 0xdb, 0x39, 0xbc, 0xf0, 0xfa, 0x60, 0xeb, 0x6b, 0x4f, 0x47, 0x17, 0xc8, 0x00, 0x65, 0x6d, 0x25, 0x1c, 0xd0, 0x48, 0x56, 0x53, 0x45, 0x11, 0x30, 0x02, 0x49, 0x20, 0x27, 0xac, 0xf2, 0x4c, 0xac, 0x64, 0x3d, 0x52, 0xb8, 0x89, 0xe0, 0x93, 0x16, 0x0f, + /* (2^179)P */ 0x84, 0x09, 0xba, 0x40, 0xb2, 0x2f, 0xa3, 0xa8, 0xc2, 0xba, 0x46, 0x33, 0x05, 0x9d, 0x62, 0xad, 0xa1, 0x3c, 0x33, 0xef, 0x0d, 0xeb, 0xf0, 0x77, 0x11, 0x5a, 0xb0, 0x21, 0x9c, 0xdf, 0x55, 0x24, 0x25, 0x35, 0x51, 0x61, 0x92, 0xf0, 0xb1, 0xce, 0xf5, 0xd4, 0x7b, 0x6c, 0x21, 0x9d, 0x56, 0x52, 0xf8, 0xa1, 0x4c, 0xe9, 0x27, 0x55, 0xac, 0x91, + /* (2^180)P */ 0x03, 0x3e, 0x30, 0xd2, 0x0a, 0xfa, 0x7d, 0x82, 0x3d, 0x1f, 0x8b, 0xcb, 0xb6, 0x04, 0x5c, 0xcc, 0x8b, 0xda, 0xe2, 0x68, 0x74, 0x08, 0x8c, 0x44, 0x83, 0x57, 0x6d, 0x6f, 0x80, 0xb0, 0x7e, 0xa9, 0x82, 0x91, 0x7b, 0x4c, 0x37, 0x97, 0xd1, 0x63, 0xd1, 0xbd, 0x45, 0xe6, 0x8a, 0x86, 0xd6, 0x89, 0x54, 0xfd, 0xd2, 0xb1, 0xd7, 0x54, 0xad, 0xaf, + /* (2^181)P */ 0x8b, 0x33, 0x62, 0x49, 0x9f, 0x63, 0xf9, 0x87, 0x42, 0x58, 0xbf, 0xb3, 0xe6, 0x68, 0x02, 0x60, 0x5c, 0x76, 0x62, 0xf7, 0x61, 0xd7, 0x36, 0x31, 0xf7, 0x9c, 0xb5, 0xe5, 0x13, 0x6c, 0xea, 0x78, 0xae, 0xcf, 0xde, 0xbf, 0xb6, 0xeb, 0x4f, 0xc8, 0x2a, 0xb4, 0x9a, 0x9f, 0xf3, 0xd1, 0x6a, 0xec, 0x0c, 0xbd, 0x85, 0x98, 0x40, 0x06, 0x1c, 0x2a, + /* (2^182)P */ 0x74, 0x3b, 0xe7, 0x81, 0xd5, 0xae, 0x54, 0x56, 0x03, 0xe8, 0x97, 0x16, 0x76, 0xcf, 0x24, 0x96, 0x96, 0x5b, 0xcc, 0x09, 0xab, 0x23, 0x6f, 0x54, 0xae, 0x8f, 0xe4, 0x12, 0xcb, 0xfd, 0xbc, 0xac, 0x93, 0x45, 0x3d, 0x68, 0x08, 0x22, 0x59, 0xc6, 0xf0, 0x47, 0x19, 0x8c, 0x79, 0x93, 0x1e, 0x0e, 0x30, 0xb0, 0x94, 0xfb, 0x17, 0x1d, 0x5a, 0x12, + /* (2^183)P */ 0x85, 0xff, 0x40, 0x18, 0x85, 0xff, 0x44, 0x37, 0x69, 0x23, 0x4d, 0x34, 0xe1, 0xeb, 0xa3, 0x1b, 0x55, 0x40, 0xc1, 0x64, 0xf4, 0xd4, 0x13, 0x0a, 0x9f, 0xb9, 0x19, 0xfc, 0x88, 0x7d, 0xc0, 0x72, 0xcf, 0x69, 0x2f, 0xd2, 0x0c, 0x82, 0x0f, 0xda, 0x08, 0xba, 0x0f, 0xaa, 0x3b, 0xe9, 0xe5, 0x83, 0x7a, 0x06, 0xe8, 0x1b, 0x38, 0x43, 0xc3, 0x54, + /* (2^184)P */ 0x14, 0xaa, 0xb3, 0x6e, 0xe6, 0x28, 0xee, 0xc5, 0x22, 0x6c, 0x7c, 0xf9, 0xa8, 0x71, 0xcc, 0xfe, 0x68, 0x7e, 0xd3, 0xb8, 0x37, 0x96, 0xca, 0x0b, 0xd9, 0xb6, 0x06, 0xa9, 0xf6, 0x71, 0xe8, 0x31, 0xf7, 0xd8, 0xf1, 0x5d, 0xab, 0xb9, 0xf0, 0x5c, 0x98, 0xcf, 0x22, 0xa2, 0x2a, 0xf6, 0xd0, 0x59, 0xf0, 0x9d, 0xd9, 0x6a, 0x4f, 0x59, 0x57, 0xad, + /* (2^185)P */ 0xd7, 0x2b, 0x3d, 0x38, 0x4c, 0x2e, 0x23, 0x4d, 0x49, 0xa2, 0x62, 0x62, 0xf9, 0x0f, 0xde, 0x08, 0xf3, 0x86, 0x71, 0xb6, 0xc7, 0xf9, 0x85, 0x9c, 0x33, 0xa1, 0xcf, 0x16, 0xaa, 0x60, 0xb9, 0xb7, 0xea, 0xed, 0x01, 0x1c, 0x59, 0xdb, 0x3f, 0x3f, 0x97, 0x2e, 0xf0, 0x09, 0x9f, 0x10, 0x85, 0x5f, 0x53, 0x39, 0xf3, 0x13, 0x40, 0x56, 0x95, 0xf9, + /* (2^186)P */ 0xb4, 0xe3, 0xda, 0xc6, 0x1f, 0x78, 0x8e, 0xac, 0xd4, 0x20, 0x1d, 0xa0, 0xbf, 0x4c, 0x09, 0x16, 0xa7, 0x30, 0xb5, 0x8d, 0x9e, 0xa1, 0x5f, 0x6d, 0x52, 0xf4, 0x71, 0xb6, 0x32, 0x2d, 0x21, 0x51, 0xc6, 0xfc, 0x2f, 0x08, 0xf4, 0x13, 0x6c, 0x55, 0xba, 0x72, 0x81, 0x24, 0x49, 0x0e, 0x4f, 0x06, 0x36, 0x39, 0x6a, 0xc5, 0x81, 0xfc, 0xeb, 0xb2, + /* (2^187)P */ 0x7d, 0x8d, 0xc8, 0x6c, 0xea, 0xb4, 0xb9, 0xe8, 0x40, 0xc9, 0x69, 0xc9, 0x30, 0x05, 0xfd, 0x34, 0x46, 0xfd, 0x94, 0x05, 0x16, 0xf5, 0x4b, 0x13, 0x3d, 0x24, 0x1a, 0xd6, 0x64, 0x2b, 0x9c, 0xe2, 0xa5, 0xd9, 0x98, 0xe0, 0xe8, 0xf4, 0xbc, 0x2c, 0xbd, 0xa2, 0x56, 0xe3, 0x9e, 0x14, 0xdb, 0xbf, 0x05, 0xbf, 0x9a, 0x13, 0x5d, 0xf7, 0x91, 0xa3, + /* (2^188)P */ 0x8b, 0xcb, 0x27, 0xf3, 0x15, 0x26, 0x05, 0x40, 0x0f, 0xa6, 0x15, 0x13, 0x71, 0x95, 0xa2, 0xc6, 0x38, 0x04, 0x67, 0xf8, 0x9a, 0x83, 0x06, 0xaa, 0x25, 0x36, 0x72, 0x01, 0x6f, 0x74, 0x5f, 0xe5, 0x6e, 0x44, 0x99, 0xce, 0x13, 0xbc, 0x82, 0xc2, 0x0d, 0xa4, 0x98, 0x50, 0x38, 0xf3, 0xa2, 0xc5, 0xe5, 0x24, 0x1f, 0x6f, 0x56, 0x3e, 0x07, 0xb2, + /* (2^189)P */ 0xbd, 0x0f, 0x32, 0x60, 0x07, 0xb1, 0xd7, 0x0b, 0x11, 0x07, 0x57, 0x02, 0x89, 0xe8, 0x8b, 0xe8, 0x5a, 0x1f, 0xee, 0x54, 0x6b, 0xff, 0xb3, 0x04, 0x07, 0x57, 0x13, 0x0b, 0x94, 0xa8, 0x4d, 0x81, 0xe2, 0x17, 0x16, 0x45, 0xd4, 0x4b, 0xf7, 0x7e, 0x64, 0x66, 0x20, 0xe8, 0x0b, 0x26, 0xfd, 0xa9, 0x8a, 0x47, 0x52, 0x89, 0x14, 0xd0, 0xd1, 0xa1, + /* (2^190)P */ 0xdc, 0x03, 0xe6, 0x20, 0x44, 0x47, 0x8f, 0x04, 0x16, 0x24, 0x22, 0xc1, 0x55, 0x5c, 0xbe, 0x43, 0xc3, 0x92, 0xc5, 0x54, 0x3d, 0x5d, 0xd1, 0x05, 0x9c, 0xc6, 0x7c, 0xbf, 0x23, 0x84, 0x1a, 0xba, 0x4f, 0x1f, 0xfc, 0xa1, 0xae, 0x1a, 0x64, 0x02, 0x51, 0xf1, 0xcb, 0x7a, 0x20, 0xce, 0xb2, 0x34, 0x3c, 0xca, 0xe0, 0xe4, 0xba, 0x22, 0xd4, 0x7b, + /* (2^191)P */ 0xca, 0xfd, 0xca, 0xd7, 0xde, 0x61, 0xae, 0xf0, 0x79, 0x0c, 0x20, 0xab, 0xbc, 0x6f, 0x4d, 0x61, 0xf0, 0xc7, 0x9c, 0x8d, 0x4b, 0x52, 0xf3, 0xb9, 0x48, 0x63, 0x0b, 0xb6, 0xd2, 0x25, 0x9a, 0x96, 0x72, 0xc1, 0x6b, 0x0c, 0xb5, 0xfb, 0x71, 0xaa, 0xad, 0x47, 0x5b, 0xe7, 0xc0, 0x0a, 0x55, 0xb2, 0xd4, 0x16, 0x2f, 0xb1, 0x01, 0xfd, 0xce, 0x27, + /* (2^192)P */ 0x64, 0x11, 0x4b, 0xab, 0x57, 0x09, 0xc6, 0x49, 0x4a, 0x37, 0xc3, 0x36, 0xc4, 0x7b, 0x81, 0x1f, 0x42, 0xed, 0xbb, 0xe0, 0xa0, 0x8d, 0x51, 0xe6, 0xca, 0x8b, 0xb9, 0xcd, 0x99, 0x2d, 0x91, 0x53, 0xa9, 0x47, 0xcb, 0x32, 0xc7, 0xa4, 0x92, 0xec, 0x46, 0x74, 0x44, 0x6d, 0x71, 0x9f, 0x6d, 0x0c, 0x69, 0xa4, 0xf8, 0xbe, 0x9f, 0x7f, 0xa0, 0xd7, + /* (2^193)P */ 0x5f, 0x33, 0xb6, 0x91, 0xc8, 0xa5, 0x3f, 0x5d, 0x7f, 0x38, 0x6e, 0x74, 0x20, 0x4a, 0xd6, 0x2b, 0x98, 0x2a, 0x41, 0x4b, 0x83, 0x64, 0x0b, 0x92, 0x7a, 0x06, 0x1e, 0xc6, 0x2c, 0xf6, 0xe4, 0x91, 0xe5, 0xb1, 0x2e, 0x6e, 0x4e, 0xa8, 0xc8, 0x14, 0x32, 0x57, 0x44, 0x1c, 0xe4, 0xb9, 0x7f, 0x54, 0x51, 0x08, 0x81, 0xaa, 0x4e, 0xce, 0xa1, 0x5d, + /* (2^194)P */ 0x5c, 0xd5, 0x9b, 0x5e, 0x7c, 0xb5, 0xb1, 0x52, 0x73, 0x00, 0x41, 0x56, 0x79, 0x08, 0x7e, 0x07, 0x28, 0x06, 0xa6, 0xfb, 0x7f, 0x69, 0xbd, 0x7a, 0x3c, 0xae, 0x9f, 0x39, 0xbb, 0x54, 0xa2, 0x79, 0xb9, 0x0e, 0x7f, 0xbb, 0xe0, 0xe6, 0xb7, 0x27, 0x64, 0x38, 0x45, 0xdb, 0x84, 0xe4, 0x61, 0x72, 0x3f, 0xe2, 0x24, 0xfe, 0x7a, 0x31, 0x9a, 0xc9, + /* (2^195)P */ 0xa1, 0xd2, 0xa4, 0xee, 0x24, 0x96, 0xe5, 0x5b, 0x79, 0x78, 0x3c, 0x7b, 0x82, 0x3b, 0x8b, 0x58, 0x0b, 0xa3, 0x63, 0x2d, 0xbc, 0x75, 0x46, 0xe8, 0x83, 0x1a, 0xc0, 0x2a, 0x92, 0x61, 0xa8, 0x75, 0x37, 0x3c, 0xbf, 0x0f, 0xef, 0x8f, 0x6c, 0x97, 0x75, 0x10, 0x05, 0x7a, 0xde, 0x23, 0xe8, 0x2a, 0x35, 0xeb, 0x41, 0x64, 0x7d, 0xcf, 0xe0, 0x52, + /* (2^196)P */ 0x4a, 0xd0, 0x49, 0x93, 0xae, 0xf3, 0x24, 0x8c, 0xe1, 0x09, 0x98, 0x45, 0xd8, 0xb9, 0xfe, 0x8e, 0x8c, 0xa8, 0x2c, 0xc9, 0x9f, 0xce, 0x01, 0xdc, 0x38, 0x11, 0xab, 0x85, 0xb9, 0xe8, 0x00, 0x51, 0xfd, 0x82, 0xe1, 0x9b, 0x4e, 0xfc, 0xb5, 0x2a, 0x0f, 0x8b, 0xda, 0x4e, 0x02, 0xca, 0xcc, 0xe3, 0x91, 0xc4, 0xe0, 0xcf, 0x7b, 0xd6, 0xe6, 0x6a, + /* (2^197)P */ 0xfe, 0x11, 0xd7, 0xaa, 0xe3, 0x0c, 0x52, 0x2e, 0x04, 0xe0, 0xe0, 0x61, 0xc8, 0x05, 0xd7, 0x31, 0x4c, 0xc3, 0x9b, 0x2d, 0xce, 0x59, 0xbe, 0x12, 0xb7, 0x30, 0x21, 0xfc, 0x81, 0xb8, 0x5e, 0x57, 0x73, 0xd0, 0xad, 0x8e, 0x9e, 0xe4, 0xeb, 0xcd, 0xcf, 0xd2, 0x0f, 0x01, 0x35, 0x16, 0xed, 0x7a, 0x43, 0x8e, 0x42, 0xdc, 0xea, 0x4c, 0xa8, 0x7c, + /* (2^198)P */ 0x37, 0x26, 0xcc, 0x76, 0x0b, 0xe5, 0x76, 0xdd, 0x3e, 0x19, 0x3c, 0xc4, 0x6c, 0x7f, 0xd0, 0x03, 0xc1, 0xb8, 0x59, 0x82, 0xca, 0x36, 0xc1, 0xe4, 0xc8, 0xb2, 0x83, 0x69, 0x9c, 0xc5, 0x9d, 0x12, 0x82, 0x1c, 0xea, 0xb2, 0x84, 0x9f, 0xf3, 0x52, 0x6b, 0xbb, 0xd8, 0x81, 0x56, 0x83, 0x04, 0x66, 0x05, 0x22, 0x49, 0x37, 0x93, 0xb1, 0xfd, 0xd5, + /* (2^199)P */ 0xaf, 0x96, 0xbf, 0x03, 0xbe, 0xe6, 0x5d, 0x78, 0x19, 0xba, 0x37, 0x46, 0x0a, 0x2b, 0x52, 0x7c, 0xd8, 0x51, 0x9e, 0x3d, 0x29, 0x42, 0xdb, 0x0e, 0x31, 0x20, 0x94, 0xf8, 0x43, 0x9a, 0x2d, 0x22, 0xd3, 0xe3, 0xa1, 0x79, 0x68, 0xfb, 0x2d, 0x7e, 0xd6, 0x79, 0xda, 0x0b, 0xc6, 0x5b, 0x76, 0x68, 0xf0, 0xfe, 0x72, 0x59, 0xbb, 0xa1, 0x9c, 0x74, + /* (2^200)P */ 0x0a, 0xd9, 0xec, 0xc5, 0xbd, 0xf0, 0xda, 0xcf, 0x82, 0xab, 0x46, 0xc5, 0x32, 0x13, 0xdc, 0x5b, 0xac, 0xc3, 0x53, 0x9a, 0x7f, 0xef, 0xa5, 0x40, 0x5a, 0x1f, 0xc1, 0x12, 0x91, 0x54, 0x83, 0x6a, 0xb0, 0x9a, 0x85, 0x4d, 0xbf, 0x36, 0x8e, 0xd3, 0xa2, 0x2b, 0xe5, 0xd6, 0xc6, 0xe1, 0x58, 0x5b, 0x82, 0x9b, 0xc8, 0xf2, 0x03, 0xba, 0xf5, 0x92, + /* (2^201)P */ 0xfb, 0x21, 0x7e, 0xde, 0xe7, 0xb4, 0xc0, 0x56, 0x86, 0x3a, 0x5b, 0x78, 0xf8, 0xf0, 0xf4, 0xe7, 0x5c, 0x00, 0xd2, 0xd7, 0xd6, 0xf8, 0x75, 0x5e, 0x0f, 0x3e, 0xd1, 0x4b, 0x77, 0xd8, 0xad, 0xb0, 0xc9, 0x8b, 0x59, 0x7d, 0x30, 0x76, 0x64, 0x7a, 0x76, 0xd9, 0x51, 0x69, 0xfc, 0xbd, 0x8e, 0xb5, 0x55, 0xe0, 0xd2, 0x07, 0x15, 0xa9, 0xf7, 0xa4, + /* (2^202)P */ 0xaa, 0x2d, 0x2f, 0x2b, 0x3c, 0x15, 0xdd, 0xcd, 0xe9, 0x28, 0x82, 0x4f, 0xa2, 0xaa, 0x31, 0x48, 0xcc, 0xfa, 0x07, 0x73, 0x8a, 0x34, 0x74, 0x0d, 0xab, 0x1a, 0xca, 0xd2, 0xbf, 0x3a, 0xdb, 0x1a, 0x5f, 0x50, 0x62, 0xf4, 0x6b, 0x83, 0x38, 0x43, 0x96, 0xee, 0x6b, 0x39, 0x1e, 0xf0, 0x17, 0x80, 0x1e, 0x9b, 0xed, 0x2b, 0x2f, 0xcc, 0x65, 0xf7, + /* (2^203)P */ 0x03, 0xb3, 0x23, 0x9c, 0x0d, 0xd1, 0xeb, 0x7e, 0x34, 0x17, 0x8a, 0x4c, 0xde, 0x54, 0x39, 0xc4, 0x11, 0x82, 0xd3, 0xa4, 0x00, 0x32, 0x95, 0x9c, 0xa6, 0x64, 0x76, 0x6e, 0xd6, 0x53, 0x27, 0xb4, 0x6a, 0x14, 0x8c, 0x54, 0xf6, 0x58, 0x9e, 0x22, 0x4a, 0x55, 0x18, 0x77, 0xd0, 0x08, 0x6b, 0x19, 0x8a, 0xb5, 0xe7, 0x19, 0xb8, 0x60, 0x92, 0xb1, + /* (2^204)P */ 0x66, 0xec, 0xf3, 0x12, 0xde, 0x67, 0x7f, 0xd4, 0x5b, 0xf6, 0x70, 0x64, 0x0a, 0xb5, 0xc2, 0xf9, 0xb3, 0x64, 0xab, 0x56, 0x46, 0xc7, 0x93, 0xc2, 0x8b, 0x2d, 0xd0, 0xd6, 0x39, 0x3b, 0x1f, 0xcd, 0xb3, 0xac, 0xcc, 0x2c, 0x27, 0x6a, 0xbc, 0xb3, 0x4b, 0xa8, 0x3c, 0x69, 0x20, 0xe2, 0x18, 0x35, 0x17, 0xe1, 0x8a, 0xd3, 0x11, 0x74, 0xaa, 0x4d, + /* (2^205)P */ 0x96, 0xc4, 0x16, 0x7e, 0xfd, 0xf5, 0xd0, 0x7d, 0x1f, 0x32, 0x1b, 0xdb, 0xa6, 0xfd, 0x51, 0x75, 0x4d, 0xd7, 0x00, 0xe5, 0x7f, 0x58, 0x5b, 0xeb, 0x4b, 0x6a, 0x78, 0xfe, 0xe5, 0xd6, 0x8f, 0x99, 0x17, 0xca, 0x96, 0x45, 0xf7, 0x52, 0xdf, 0x84, 0x06, 0x77, 0xb9, 0x05, 0x63, 0x5d, 0xe9, 0x91, 0xb1, 0x4b, 0x82, 0x5a, 0xdb, 0xd7, 0xca, 0x69, + /* (2^206)P */ 0x02, 0xd3, 0x38, 0x38, 0x87, 0xea, 0xbd, 0x9f, 0x11, 0xca, 0xf3, 0x21, 0xf1, 0x9b, 0x35, 0x97, 0x98, 0xff, 0x8e, 0x6d, 0x3d, 0xd6, 0xb2, 0xfa, 0x68, 0xcb, 0x7e, 0x62, 0x85, 0xbb, 0xc7, 0x5d, 0xee, 0x32, 0x30, 0x2e, 0x71, 0x96, 0x63, 0x43, 0x98, 0xc4, 0xa7, 0xde, 0x60, 0xb2, 0xd9, 0x43, 0x4a, 0xfa, 0x97, 0x2d, 0x5f, 0x21, 0xd4, 0xfe, + /* (2^207)P */ 0x3b, 0x20, 0x29, 0x07, 0x07, 0xb5, 0x78, 0xc3, 0xc7, 0xab, 0x56, 0xba, 0x40, 0xde, 0x1d, 0xcf, 0xc3, 0x00, 0x56, 0x21, 0x0c, 0xc8, 0x42, 0xd9, 0x0e, 0xcd, 0x02, 0x7c, 0x07, 0xb9, 0x11, 0xd7, 0x96, 0xaf, 0xff, 0xad, 0xc5, 0xba, 0x30, 0x6d, 0x82, 0x3a, 0xbf, 0xef, 0x7b, 0xf7, 0x0a, 0x74, 0xbd, 0x31, 0x0c, 0xe4, 0xec, 0x1a, 0xe5, 0xc5, + /* (2^208)P */ 0xcc, 0xf2, 0x28, 0x16, 0x12, 0xbf, 0xef, 0x85, 0xbc, 0xf7, 0xcb, 0x9f, 0xdb, 0xa8, 0xb2, 0x49, 0x53, 0x48, 0xa8, 0x24, 0xa8, 0x68, 0x8d, 0xbb, 0x21, 0x0a, 0x5a, 0xbd, 0xb2, 0x91, 0x61, 0x47, 0xc4, 0x43, 0x08, 0xa6, 0x19, 0xef, 0x8e, 0x88, 0x39, 0xc6, 0x33, 0x30, 0xf3, 0x0e, 0xc5, 0x92, 0x66, 0xd6, 0xfe, 0xc5, 0x12, 0xd9, 0x4c, 0x2d, + /* (2^209)P */ 0x30, 0x34, 0x07, 0xbf, 0x9c, 0x5a, 0x4e, 0x65, 0xf1, 0x39, 0x35, 0x38, 0xae, 0x7b, 0x55, 0xac, 0x6a, 0x92, 0x24, 0x7e, 0x50, 0xd3, 0xba, 0x78, 0x51, 0xfe, 0x4d, 0x32, 0x05, 0x11, 0xf5, 0x52, 0xf1, 0x31, 0x45, 0x39, 0x98, 0x7b, 0x28, 0x56, 0xc3, 0x5d, 0x4f, 0x07, 0x6f, 0x84, 0xb8, 0x1a, 0x58, 0x0b, 0xc4, 0x7c, 0xc4, 0x8d, 0x32, 0x8e, + /* (2^210)P */ 0x7e, 0xaf, 0x98, 0xce, 0xc5, 0x2b, 0x9d, 0xf6, 0xfa, 0x2c, 0xb6, 0x2a, 0x5a, 0x1d, 0xc0, 0x24, 0x8d, 0xa4, 0xce, 0xb1, 0x12, 0x01, 0xf9, 0x79, 0xc6, 0x79, 0x38, 0x0c, 0xd4, 0x07, 0xc9, 0xf7, 0x37, 0xa1, 0x0b, 0xfe, 0x72, 0xec, 0x5d, 0xd6, 0xb0, 0x1c, 0x70, 0xbe, 0x70, 0x01, 0x13, 0xe0, 0x86, 0x95, 0xc7, 0x2e, 0x12, 0x3b, 0xe6, 0xa6, + /* (2^211)P */ 0x24, 0x82, 0x67, 0xe0, 0x14, 0x7b, 0x56, 0x08, 0x38, 0x44, 0xdb, 0xa0, 0x3a, 0x05, 0x47, 0xb2, 0xc0, 0xac, 0xd1, 0xcc, 0x3f, 0x82, 0xb8, 0x8a, 0x88, 0xbc, 0xf5, 0x33, 0xa1, 0x35, 0x0f, 0xf6, 0xe2, 0xef, 0x6c, 0xf7, 0x37, 0x9e, 0xe8, 0x10, 0xca, 0xb0, 0x8e, 0x80, 0x86, 0x00, 0x23, 0xd0, 0x4a, 0x76, 0x9f, 0xf7, 0x2c, 0x52, 0x15, 0x0e, + /* (2^212)P */ 0x5e, 0x49, 0xe1, 0x2c, 0x9a, 0x01, 0x76, 0xa6, 0xb3, 0x07, 0x5b, 0xa4, 0x07, 0xef, 0x1d, 0xc3, 0x6a, 0xbb, 0x64, 0xbe, 0x71, 0x15, 0x6e, 0x32, 0x31, 0x46, 0x9a, 0x9e, 0x8f, 0x45, 0x73, 0xce, 0x0b, 0x94, 0x1a, 0x52, 0x07, 0xf4, 0x50, 0x30, 0x49, 0x53, 0x50, 0xfb, 0x71, 0x1f, 0x5a, 0x03, 0xa9, 0x76, 0xf2, 0x8f, 0x42, 0xff, 0xed, 0xed, + /* (2^213)P */ 0xed, 0x08, 0xdb, 0x91, 0x1c, 0xee, 0xa2, 0xb4, 0x47, 0xa2, 0xfa, 0xcb, 0x03, 0xd1, 0xff, 0x8c, 0xad, 0x64, 0x50, 0x61, 0xcd, 0xfc, 0x88, 0xa0, 0x31, 0x95, 0x30, 0xb9, 0x58, 0xdd, 0xd7, 0x43, 0xe4, 0x46, 0xc2, 0x16, 0xd9, 0x72, 0x4a, 0x56, 0x51, 0x70, 0x85, 0xf1, 0xa1, 0x80, 0x40, 0xd5, 0xba, 0x67, 0x81, 0xda, 0xcd, 0x03, 0xea, 0x51, + /* (2^214)P */ 0x42, 0x50, 0xf0, 0xef, 0x37, 0x61, 0x72, 0x85, 0xe1, 0xf1, 0xff, 0x6f, 0x3d, 0xe8, 0x7b, 0x21, 0x5c, 0xe5, 0x50, 0x03, 0xde, 0x00, 0xc1, 0xf7, 0x3a, 0x55, 0x12, 0x1c, 0x9e, 0x1e, 0xce, 0xd1, 0x2f, 0xaf, 0x05, 0x70, 0x5b, 0x47, 0xf2, 0x04, 0x7a, 0x89, 0xbc, 0x78, 0xa6, 0x65, 0x6c, 0xaa, 0x3c, 0xa2, 0x3c, 0x8b, 0x5c, 0xa9, 0x22, 0x48, + /* (2^215)P */ 0x7e, 0x8c, 0x8f, 0x2f, 0x60, 0xe3, 0x5a, 0x94, 0xd4, 0xce, 0xdd, 0x9d, 0x83, 0x3b, 0x77, 0x78, 0x43, 0x1d, 0xfd, 0x8f, 0xc8, 0xe8, 0x02, 0x90, 0xab, 0xf6, 0xc9, 0xfc, 0xf1, 0x63, 0xaa, 0x5f, 0x42, 0xf1, 0x78, 0x34, 0x64, 0x16, 0x75, 0x9c, 0x7d, 0xd0, 0xe4, 0x74, 0x5a, 0xa8, 0xfb, 0xcb, 0xac, 0x20, 0xa3, 0xc2, 0xa6, 0x20, 0xf8, 0x1b, + /* (2^216)P */ 0x00, 0x4f, 0x1e, 0x56, 0xb5, 0x34, 0xb2, 0x87, 0x31, 0xe5, 0xee, 0x8d, 0xf1, 0x41, 0x67, 0xb7, 0x67, 0x3a, 0x54, 0x86, 0x5c, 0xf0, 0x0b, 0x37, 0x2f, 0x1b, 0x92, 0x5d, 0x58, 0x93, 0xdc, 0xd8, 0x58, 0xcc, 0x9e, 0x67, 0xd0, 0x97, 0x3a, 0xaf, 0x49, 0x39, 0x2d, 0x3b, 0xd8, 0x98, 0xfb, 0x76, 0x6b, 0xe7, 0xaf, 0xc3, 0x45, 0x44, 0x53, 0x94, + /* (2^217)P */ 0x30, 0xbd, 0x90, 0x75, 0xd3, 0xbd, 0x3b, 0x58, 0x27, 0x14, 0x9f, 0x6b, 0xd4, 0x31, 0x99, 0xcd, 0xde, 0x3a, 0x21, 0x1e, 0xb4, 0x02, 0xe4, 0x33, 0x04, 0x02, 0xb0, 0x50, 0x66, 0x68, 0x90, 0xdd, 0x7b, 0x69, 0x31, 0xd9, 0xcf, 0x68, 0x73, 0xf1, 0x60, 0xdd, 0xc8, 0x1d, 0x5d, 0xe3, 0xd6, 0x5b, 0x2a, 0xa4, 0xea, 0xc4, 0x3f, 0x08, 0xcd, 0x9c, + /* (2^218)P */ 0x6b, 0x1a, 0xbf, 0x55, 0xc1, 0x1b, 0x0c, 0x05, 0x09, 0xdf, 0xf5, 0x5e, 0xa3, 0x77, 0x95, 0xe9, 0xdf, 0x19, 0xdd, 0xc7, 0x94, 0xcb, 0x06, 0x73, 0xd0, 0x88, 0x02, 0x33, 0x94, 0xca, 0x7a, 0x2f, 0x8e, 0x3d, 0x72, 0x61, 0x2d, 0x4d, 0xa6, 0x61, 0x1f, 0x32, 0x5e, 0x87, 0x53, 0x36, 0x11, 0x15, 0x20, 0xb3, 0x5a, 0x57, 0x51, 0x93, 0x20, 0xd8, + /* (2^219)P */ 0xb7, 0x56, 0xf4, 0xab, 0x7d, 0x0c, 0xfb, 0x99, 0x1a, 0x30, 0x29, 0xb0, 0x75, 0x2a, 0xf8, 0x53, 0x71, 0x23, 0xbd, 0xa7, 0xd8, 0x0a, 0xe2, 0x27, 0x65, 0xe9, 0x74, 0x26, 0x98, 0x4a, 0x69, 0x19, 0xb2, 0x4d, 0x0a, 0x17, 0x98, 0xb2, 0xa9, 0x57, 0x4e, 0xf6, 0x86, 0xc8, 0x01, 0xa4, 0xc6, 0x98, 0xad, 0x5a, 0x90, 0x2c, 0x05, 0x46, 0x64, 0xb7, + /* (2^220)P */ 0x7b, 0x91, 0xdf, 0xfc, 0xf8, 0x1c, 0x8c, 0x15, 0x9e, 0xf7, 0xd5, 0xa8, 0xe8, 0xe7, 0xe3, 0xa3, 0xb0, 0x04, 0x74, 0xfa, 0x78, 0xfb, 0x26, 0xbf, 0x67, 0x42, 0xf9, 0x8c, 0x9b, 0xb4, 0x69, 0x5b, 0x02, 0x13, 0x6d, 0x09, 0x6c, 0xd6, 0x99, 0x61, 0x7b, 0x89, 0x4a, 0x67, 0x75, 0xa3, 0x98, 0x13, 0x23, 0x1d, 0x18, 0x24, 0x0e, 0xef, 0x41, 0x79, + /* (2^221)P */ 0x86, 0x33, 0xab, 0x08, 0xcb, 0xbf, 0x1e, 0x76, 0x3c, 0x0b, 0xbd, 0x30, 0xdb, 0xe9, 0xa3, 0x35, 0x87, 0x1b, 0xe9, 0x07, 0x00, 0x66, 0x7f, 0x3b, 0x35, 0x0c, 0x8a, 0x3f, 0x61, 0xbc, 0xe0, 0xae, 0xf6, 0xcc, 0x54, 0xe1, 0x72, 0x36, 0x2d, 0xee, 0x93, 0x24, 0xf8, 0xd7, 0xc5, 0xf9, 0xcb, 0xb0, 0xe5, 0x88, 0x0d, 0x23, 0x4b, 0x76, 0x15, 0xa2, + /* (2^222)P */ 0x37, 0xdb, 0x83, 0xd5, 0x6d, 0x06, 0x24, 0x37, 0x1b, 0x15, 0x85, 0x15, 0xe2, 0xc0, 0x4e, 0x02, 0xa9, 0x6d, 0x0a, 0x3a, 0x94, 0x4a, 0x6f, 0x49, 0x00, 0x01, 0x72, 0xbb, 0x60, 0x14, 0x35, 0xae, 0xb4, 0xc6, 0x01, 0x0a, 0x00, 0x9e, 0xc3, 0x58, 0xc5, 0xd1, 0x5e, 0x30, 0x73, 0x96, 0x24, 0x85, 0x9d, 0xf0, 0xf9, 0xec, 0x09, 0xd3, 0xe7, 0x70, + /* (2^223)P */ 0xf3, 0xbd, 0x96, 0x87, 0xe9, 0x71, 0xbd, 0xd6, 0xa2, 0x45, 0xeb, 0x0a, 0xcd, 0x2c, 0xf1, 0x72, 0xa6, 0x31, 0xa9, 0x6f, 0x09, 0xa1, 0x5e, 0xdd, 0xc8, 0x8d, 0x0d, 0xbc, 0x5a, 0x8d, 0xb1, 0x2c, 0x9a, 0xcc, 0x37, 0x74, 0xc2, 0xa9, 0x4e, 0xd6, 0xc0, 0x3c, 0xa0, 0x23, 0xb0, 0xa0, 0x77, 0x14, 0x80, 0x45, 0x71, 0x6a, 0x2d, 0x41, 0xc3, 0x82, + /* (2^224)P */ 0x37, 0x44, 0xec, 0x8a, 0x3e, 0xc1, 0x0c, 0xa9, 0x12, 0x9c, 0x08, 0x88, 0xcb, 0xd9, 0xf8, 0xba, 0x00, 0xd6, 0xc3, 0xdf, 0xef, 0x7a, 0x44, 0x7e, 0x25, 0x69, 0xc9, 0xc1, 0x46, 0xe5, 0x20, 0x9e, 0xcc, 0x0b, 0x05, 0x3e, 0xf4, 0x78, 0x43, 0x0c, 0xa6, 0x2f, 0xc1, 0xfa, 0x70, 0xb2, 0x3c, 0x31, 0x7a, 0x63, 0x58, 0xab, 0x17, 0xcf, 0x4c, 0x4f, + /* (2^225)P */ 0x2b, 0x08, 0x31, 0x59, 0x75, 0x8b, 0xec, 0x0a, 0xa9, 0x79, 0x70, 0xdd, 0xf1, 0x11, 0xc3, 0x11, 0x1f, 0xab, 0x37, 0xaa, 0x26, 0xea, 0x53, 0xc4, 0x79, 0xa7, 0x91, 0x00, 0xaa, 0x08, 0x42, 0xeb, 0x8b, 0x8b, 0xe8, 0xc3, 0x2f, 0xb8, 0x78, 0x90, 0x38, 0x0e, 0x8a, 0x42, 0x0c, 0x0f, 0xbf, 0x3e, 0xf8, 0xd8, 0x07, 0xcf, 0x6a, 0x34, 0xc9, 0xfa, + /* (2^226)P */ 0x11, 0xe0, 0x76, 0x4d, 0x23, 0xc5, 0xa6, 0xcc, 0x9f, 0x9a, 0x2a, 0xde, 0x3a, 0xb5, 0x92, 0x39, 0x19, 0x8a, 0xf1, 0x8d, 0xf9, 0x4d, 0xc9, 0xb4, 0x39, 0x9f, 0x57, 0xd8, 0x72, 0xab, 0x1d, 0x61, 0x6a, 0xb2, 0xff, 0x52, 0xba, 0x54, 0x0e, 0xfb, 0x83, 0x30, 0x8a, 0xf7, 0x3b, 0xf4, 0xd8, 0xae, 0x1a, 0x94, 0x3a, 0xec, 0x63, 0xfe, 0x6e, 0x7c, + /* (2^227)P */ 0xdc, 0x70, 0x8e, 0x55, 0x44, 0xbf, 0xd2, 0x6a, 0xa0, 0x14, 0x61, 0x89, 0xd5, 0x55, 0x45, 0x3c, 0xf6, 0x40, 0x0d, 0x83, 0x85, 0x44, 0xb4, 0x62, 0x56, 0xfe, 0x60, 0xd7, 0x07, 0x1d, 0x47, 0x30, 0x3b, 0x73, 0xa4, 0xb5, 0xb7, 0xea, 0xac, 0xda, 0xf1, 0x17, 0xaa, 0x60, 0xdf, 0xe9, 0x84, 0xda, 0x31, 0x32, 0x61, 0xbf, 0xd0, 0x7e, 0x8a, 0x02, + /* (2^228)P */ 0xb9, 0x51, 0xb3, 0x89, 0x21, 0x5d, 0xa2, 0xfe, 0x79, 0x2a, 0xb3, 0x2a, 0x3b, 0xe6, 0x6f, 0x2b, 0x22, 0x03, 0xea, 0x7b, 0x1f, 0xaf, 0x85, 0xc3, 0x38, 0x55, 0x5b, 0x8e, 0xb4, 0xaa, 0x77, 0xfe, 0x03, 0x6e, 0xda, 0x91, 0x24, 0x0c, 0x48, 0x39, 0x27, 0x43, 0x16, 0xd2, 0x0a, 0x0d, 0x43, 0xa3, 0x0e, 0xca, 0x45, 0xd1, 0x7f, 0xf5, 0xd3, 0x16, + /* (2^229)P */ 0x3d, 0x32, 0x9b, 0x38, 0xf8, 0x06, 0x93, 0x78, 0x5b, 0x50, 0x2b, 0x06, 0xd8, 0x66, 0xfe, 0xab, 0x9b, 0x58, 0xc7, 0xd1, 0x4d, 0xd5, 0xf8, 0x3b, 0x10, 0x7e, 0x85, 0xde, 0x58, 0x4e, 0xdf, 0x53, 0xd9, 0x58, 0xe0, 0x15, 0x81, 0x9f, 0x1a, 0x78, 0xfc, 0x9f, 0x10, 0xc2, 0x23, 0xd6, 0x78, 0xd1, 0x9d, 0xd2, 0xd5, 0x1c, 0x53, 0xe2, 0xc9, 0x76, + /* (2^230)P */ 0x98, 0x1e, 0x38, 0x7b, 0x71, 0x18, 0x4b, 0x15, 0xaf, 0xa1, 0xa6, 0x98, 0xcb, 0x26, 0xa3, 0xc8, 0x07, 0x46, 0xda, 0x3b, 0x70, 0x65, 0xec, 0x7a, 0x2b, 0x34, 0x94, 0xa8, 0xb6, 0x14, 0xf8, 0x1a, 0xce, 0xf7, 0xc8, 0x60, 0xf3, 0x88, 0xf4, 0x33, 0x60, 0x7b, 0xd1, 0x02, 0xe7, 0xda, 0x00, 0x4a, 0xea, 0xd2, 0xfd, 0x88, 0xd2, 0x99, 0x28, 0xf3, + /* (2^231)P */ 0x28, 0x24, 0x1d, 0x26, 0xc2, 0xeb, 0x8b, 0x3b, 0xb4, 0x6b, 0xbe, 0x6b, 0x77, 0xff, 0xf3, 0x21, 0x3b, 0x26, 0x6a, 0x8c, 0x8e, 0x2a, 0x44, 0xa8, 0x01, 0x2b, 0x71, 0xea, 0x64, 0x30, 0xfd, 0xfd, 0x95, 0xcb, 0x39, 0x38, 0x48, 0xfa, 0x96, 0x97, 0x8c, 0x2f, 0x33, 0xca, 0x03, 0xe6, 0xd7, 0x94, 0x55, 0x6c, 0xc3, 0xb3, 0xa8, 0xf7, 0xae, 0x8c, + /* (2^232)P */ 0xea, 0x62, 0x8a, 0xb4, 0xeb, 0x74, 0xf7, 0xb8, 0xae, 0xc5, 0x20, 0x71, 0x06, 0xd6, 0x7c, 0x62, 0x9b, 0x69, 0x74, 0xef, 0xa7, 0x6d, 0xd6, 0x8c, 0x37, 0xb9, 0xbf, 0xcf, 0xeb, 0xe4, 0x2f, 0x04, 0x02, 0x21, 0x7d, 0x75, 0x6b, 0x92, 0x48, 0xf8, 0x70, 0xad, 0x69, 0xe2, 0xea, 0x0e, 0x88, 0x67, 0x72, 0xcc, 0x2d, 0x10, 0xce, 0x2d, 0xcf, 0x65, + /* (2^233)P */ 0x49, 0xf3, 0x57, 0x64, 0xe5, 0x5c, 0xc5, 0x65, 0x49, 0x97, 0xc4, 0x8a, 0xcc, 0xa9, 0xca, 0x94, 0x7b, 0x86, 0x88, 0xb6, 0x51, 0x27, 0x69, 0xa5, 0x0f, 0x8b, 0x06, 0x59, 0xa0, 0x94, 0xef, 0x63, 0x1a, 0x01, 0x9e, 0x4f, 0xd2, 0x5a, 0x93, 0xc0, 0x7c, 0xe6, 0x61, 0x77, 0xb6, 0xf5, 0x40, 0xd9, 0x98, 0x43, 0x5b, 0x56, 0x68, 0xe9, 0x37, 0x8f, + /* (2^234)P */ 0xee, 0x87, 0xd2, 0x05, 0x1b, 0x39, 0x89, 0x10, 0x07, 0x6d, 0xe8, 0xfd, 0x8b, 0x4d, 0xb2, 0xa7, 0x7b, 0x1e, 0xa0, 0x6c, 0x0d, 0x3d, 0x3d, 0x49, 0xba, 0x61, 0x36, 0x1f, 0xc2, 0x84, 0x4a, 0xcc, 0x87, 0xa9, 0x1b, 0x23, 0x04, 0xe2, 0x3e, 0x97, 0xe1, 0xdb, 0xd5, 0x5a, 0xe8, 0x41, 0x6b, 0xe5, 0x5a, 0xa1, 0x99, 0xe5, 0x7b, 0xa7, 0xe0, 0x3b, + /* (2^235)P */ 0xea, 0xa3, 0x6a, 0xdd, 0x77, 0x7f, 0x77, 0x41, 0xc5, 0x6a, 0xe4, 0xaf, 0x11, 0x5f, 0x88, 0xa5, 0x10, 0xee, 0xd0, 0x8c, 0x0c, 0xb4, 0xa5, 0x2a, 0xd0, 0xd8, 0x1d, 0x47, 0x06, 0xc0, 0xd5, 0xce, 0x51, 0x54, 0x9b, 0x2b, 0xe6, 0x2f, 0xe7, 0xe7, 0x31, 0x5f, 0x5c, 0x23, 0x81, 0x3e, 0x03, 0x93, 0xaa, 0x2d, 0x71, 0x84, 0xa0, 0x89, 0x32, 0xa6, + /* (2^236)P */ 0x55, 0xa3, 0x13, 0x92, 0x4e, 0x93, 0x7d, 0xec, 0xca, 0x57, 0xfb, 0x37, 0xae, 0xd2, 0x18, 0x2e, 0x54, 0x05, 0x6c, 0xd1, 0x28, 0xca, 0x90, 0x40, 0x82, 0x2e, 0x79, 0xc6, 0x5a, 0xc7, 0xdd, 0x84, 0x93, 0xdf, 0x15, 0xb8, 0x1f, 0xb1, 0xf9, 0xaf, 0x2c, 0xe5, 0x32, 0xcd, 0xc2, 0x99, 0x6d, 0xac, 0x85, 0x5c, 0x63, 0xd3, 0xe2, 0xff, 0x24, 0xda, + /* (2^237)P */ 0x2d, 0x8d, 0xfd, 0x65, 0xcc, 0xe5, 0x02, 0xa0, 0xe5, 0xb9, 0xec, 0x59, 0x09, 0x50, 0x27, 0xb7, 0x3d, 0x2a, 0x79, 0xb2, 0x76, 0x5d, 0x64, 0x95, 0xf8, 0xc5, 0xaf, 0x8a, 0x62, 0x11, 0x5c, 0x56, 0x1c, 0x05, 0x64, 0x9e, 0x5e, 0xbd, 0x54, 0x04, 0xe6, 0x9e, 0xab, 0xe6, 0x22, 0x7e, 0x42, 0x54, 0xb5, 0xa5, 0xd0, 0x8d, 0x28, 0x6b, 0x0f, 0x0b, + /* (2^238)P */ 0x2d, 0xb2, 0x8c, 0x59, 0x10, 0x37, 0x84, 0x3b, 0x9b, 0x65, 0x1b, 0x0f, 0x10, 0xf9, 0xea, 0x60, 0x1b, 0x02, 0xf5, 0xee, 0x8b, 0xe6, 0x32, 0x7d, 0x10, 0x7f, 0x5f, 0x8c, 0x72, 0x09, 0x4e, 0x1f, 0x29, 0xff, 0x65, 0xcb, 0x3e, 0x3a, 0xd2, 0x96, 0x50, 0x1e, 0xea, 0x64, 0x99, 0xb5, 0x4c, 0x7a, 0x69, 0xb8, 0x95, 0xae, 0x48, 0xc0, 0x7c, 0xb1, + /* (2^239)P */ 0xcd, 0x7c, 0x4f, 0x3e, 0xea, 0xf3, 0x90, 0xcb, 0x12, 0x76, 0xd1, 0x17, 0xdc, 0x0d, 0x13, 0x0f, 0xfd, 0x4d, 0xb5, 0x1f, 0xe4, 0xdd, 0xf2, 0x4d, 0x58, 0xea, 0xa5, 0x66, 0x92, 0xcf, 0xe5, 0x54, 0xea, 0x9b, 0x35, 0x83, 0x1a, 0x44, 0x8e, 0x62, 0x73, 0x45, 0x98, 0xa3, 0x89, 0x95, 0x52, 0x93, 0x1a, 0x8d, 0x63, 0x0f, 0xc2, 0x57, 0x3c, 0xb1, + /* (2^240)P */ 0x72, 0xb4, 0xdf, 0x51, 0xb7, 0xf6, 0x52, 0xa2, 0x14, 0x56, 0xe5, 0x0a, 0x2e, 0x75, 0x81, 0x02, 0xee, 0x93, 0x48, 0x0a, 0x92, 0x4e, 0x0c, 0x0f, 0xdf, 0x09, 0x89, 0x99, 0xf6, 0xf9, 0x22, 0xa2, 0x32, 0xf8, 0xb0, 0x76, 0x0c, 0xb2, 0x4d, 0x6e, 0xbe, 0x83, 0x35, 0x61, 0x44, 0xd2, 0x58, 0xc7, 0xdd, 0x14, 0xcf, 0xc3, 0x4b, 0x7c, 0x07, 0xee, + /* (2^241)P */ 0x8b, 0x03, 0xee, 0xcb, 0xa7, 0x2e, 0x28, 0xbd, 0x97, 0xd1, 0x4c, 0x2b, 0xd1, 0x92, 0x67, 0x5b, 0x5a, 0x12, 0xbf, 0x29, 0x17, 0xfc, 0x50, 0x09, 0x74, 0x76, 0xa2, 0xd4, 0x82, 0xfd, 0x2c, 0x0c, 0x90, 0xf7, 0xe7, 0xe5, 0x9a, 0x2c, 0x16, 0x40, 0xb9, 0x6c, 0xd9, 0xe0, 0x22, 0x9e, 0xf8, 0xdd, 0x73, 0xe4, 0x7b, 0x9e, 0xbe, 0x4f, 0x66, 0x22, + /* (2^242)P */ 0xa4, 0x10, 0xbe, 0xb8, 0x83, 0x3a, 0x77, 0x8e, 0xea, 0x0a, 0xc4, 0x97, 0x3e, 0xb6, 0x6c, 0x81, 0xd7, 0x65, 0xd9, 0xf7, 0xae, 0xe6, 0xbe, 0xab, 0x59, 0x81, 0x29, 0x4b, 0xff, 0xe1, 0x0f, 0xc3, 0x2b, 0xad, 0x4b, 0xef, 0xc4, 0x50, 0x9f, 0x88, 0x31, 0xf2, 0xde, 0x80, 0xd6, 0xf4, 0x20, 0x9c, 0x77, 0x9b, 0xbe, 0xbe, 0x08, 0xf5, 0xf0, 0x95, + /* (2^243)P */ 0x0e, 0x7c, 0x7b, 0x7c, 0xb3, 0xd8, 0x83, 0xfc, 0x8c, 0x75, 0x51, 0x74, 0x1b, 0xe1, 0x6d, 0x11, 0x05, 0x46, 0x24, 0x0d, 0xa4, 0x2b, 0x32, 0xfd, 0x2c, 0x4e, 0x21, 0xdf, 0x39, 0x6b, 0x96, 0xfc, 0xff, 0x92, 0xfc, 0x35, 0x0d, 0x9a, 0x4b, 0xc0, 0x70, 0x46, 0x32, 0x7d, 0xc0, 0xc4, 0x04, 0xe0, 0x2d, 0x83, 0xa7, 0x00, 0xc7, 0xcb, 0xb4, 0x8f, + /* (2^244)P */ 0xa9, 0x5a, 0x7f, 0x0e, 0xdd, 0x2c, 0x85, 0xaa, 0x4d, 0xac, 0xde, 0xb3, 0xb6, 0xaf, 0xe6, 0xd1, 0x06, 0x7b, 0x2c, 0xa4, 0x01, 0x19, 0x22, 0x7d, 0x78, 0xf0, 0x3a, 0xea, 0x89, 0xfe, 0x21, 0x61, 0x6d, 0xb8, 0xfe, 0xa5, 0x2a, 0xab, 0x0d, 0x7b, 0x51, 0x39, 0xb6, 0xde, 0xbc, 0xf0, 0xc5, 0x48, 0xd7, 0x09, 0x82, 0x6e, 0x66, 0x75, 0xc5, 0xcd, + /* (2^245)P */ 0xee, 0xdf, 0x2b, 0x6c, 0xa8, 0xde, 0x61, 0xe1, 0x27, 0xfa, 0x2a, 0x0f, 0x68, 0xe7, 0x7a, 0x9b, 0x13, 0xe9, 0x56, 0xd2, 0x1c, 0x3d, 0x2f, 0x3c, 0x7a, 0xf6, 0x6f, 0x45, 0xee, 0xe8, 0xf4, 0xa0, 0xa6, 0xe8, 0xa5, 0x27, 0xee, 0xf2, 0x85, 0xa9, 0xd5, 0x0e, 0xa9, 0x26, 0x60, 0xfe, 0xee, 0xc7, 0x59, 0x99, 0x5e, 0xa3, 0xdf, 0x23, 0x36, 0xd5, + /* (2^246)P */ 0x15, 0x66, 0x6f, 0xd5, 0x78, 0xa4, 0x0a, 0xf7, 0xb1, 0xe8, 0x75, 0x6b, 0x48, 0x7d, 0xa6, 0x4d, 0x3d, 0x36, 0x9b, 0xc7, 0xcc, 0x68, 0x9a, 0xfe, 0x2f, 0x39, 0x2a, 0x51, 0x31, 0x39, 0x7d, 0x73, 0x6f, 0xc8, 0x74, 0x72, 0x6f, 0x6e, 0xda, 0x5f, 0xad, 0x48, 0xc8, 0x40, 0xe1, 0x06, 0x01, 0x36, 0xa1, 0x88, 0xc8, 0x99, 0x9c, 0xd1, 0x11, 0x8f, + /* (2^247)P */ 0xab, 0xc5, 0xcb, 0xcf, 0xbd, 0x73, 0x21, 0xd0, 0x82, 0xb1, 0x2e, 0x2d, 0xd4, 0x36, 0x1b, 0xed, 0xa9, 0x8a, 0x26, 0x79, 0xc4, 0x17, 0xae, 0xe5, 0x09, 0x0a, 0x0c, 0xa4, 0x21, 0xa0, 0x6e, 0xdd, 0x62, 0x8e, 0x44, 0x62, 0xcc, 0x50, 0xff, 0x93, 0xb3, 0x9a, 0x72, 0x8c, 0x3f, 0xa1, 0xa6, 0x4d, 0x87, 0xd5, 0x1c, 0x5a, 0xc0, 0x0b, 0x1a, 0xd6, + /* (2^248)P */ 0x67, 0x36, 0x6a, 0x1f, 0x96, 0xe5, 0x80, 0x20, 0xa9, 0xe8, 0x0b, 0x0e, 0x21, 0x29, 0x3f, 0xc8, 0x0a, 0x6d, 0x27, 0x47, 0xca, 0xd9, 0x05, 0x55, 0xbf, 0x11, 0xcf, 0x31, 0x7a, 0x37, 0xc7, 0x90, 0xa9, 0xf4, 0x07, 0x5e, 0xd5, 0xc3, 0x92, 0xaa, 0x95, 0xc8, 0x23, 0x2a, 0x53, 0x45, 0xe3, 0x3a, 0x24, 0xe9, 0x67, 0x97, 0x3a, 0x82, 0xf9, 0xa6, + /* (2^249)P */ 0x92, 0x9e, 0x6d, 0x82, 0x67, 0xe9, 0xf9, 0x17, 0x96, 0x2c, 0xa7, 0xd3, 0x89, 0xf9, 0xdb, 0xd8, 0x20, 0xc6, 0x2e, 0xec, 0x4a, 0x76, 0x64, 0xbf, 0x27, 0x40, 0xe2, 0xb4, 0xdf, 0x1f, 0xa0, 0xef, 0x07, 0x80, 0xfb, 0x8e, 0x12, 0xf8, 0xb8, 0xe1, 0xc6, 0xdf, 0x7c, 0x69, 0x35, 0x5a, 0xe1, 0x8e, 0x5d, 0x69, 0x84, 0x56, 0xb6, 0x31, 0x1c, 0x0b, + /* (2^250)P */ 0xd6, 0x94, 0x5c, 0xef, 0xbb, 0x46, 0x45, 0x44, 0x5b, 0xa1, 0xae, 0x03, 0x65, 0xdd, 0xb5, 0x66, 0x88, 0x35, 0x29, 0x95, 0x16, 0x54, 0xa6, 0xf5, 0xc9, 0x78, 0x34, 0xe6, 0x0f, 0xc4, 0x2b, 0x5b, 0x79, 0x51, 0x68, 0x48, 0x3a, 0x26, 0x87, 0x05, 0x70, 0xaf, 0x8b, 0xa6, 0xc7, 0x2e, 0xb3, 0xa9, 0x10, 0x01, 0xb0, 0xb9, 0x31, 0xfd, 0xdc, 0x80, + /* (2^251)P */ 0x25, 0xf2, 0xad, 0xd6, 0x75, 0xa3, 0x04, 0x05, 0x64, 0x8a, 0x97, 0x60, 0x27, 0x2a, 0xe5, 0x6d, 0xb0, 0x73, 0xf4, 0x07, 0x2a, 0x9d, 0xe9, 0x46, 0xb4, 0x1c, 0x51, 0xf8, 0x63, 0x98, 0x7e, 0xe5, 0x13, 0x51, 0xed, 0x98, 0x65, 0x98, 0x4f, 0x8f, 0xe7, 0x7e, 0x72, 0xd7, 0x64, 0x11, 0x2f, 0xcd, 0x12, 0xf8, 0xc4, 0x63, 0x52, 0x0f, 0x7f, 0xc4, + /* (2^252)P */ 0x5c, 0xd9, 0x85, 0x63, 0xc7, 0x8a, 0x65, 0x9a, 0x25, 0x83, 0x31, 0x73, 0x49, 0xf0, 0x93, 0x96, 0x70, 0x67, 0x6d, 0xb1, 0xff, 0x95, 0x54, 0xe4, 0xf8, 0x15, 0x6c, 0x5f, 0xbd, 0xf6, 0x0f, 0x38, 0x7b, 0x68, 0x7d, 0xd9, 0x3d, 0xf0, 0xa9, 0xa0, 0xe4, 0xd1, 0xb6, 0x34, 0x6d, 0x14, 0x16, 0xc2, 0x4c, 0x30, 0x0e, 0x67, 0xd3, 0xbe, 0x2e, 0xc0, + /* (2^253)P */ 0x06, 0x6b, 0x52, 0xc8, 0x14, 0xcd, 0xae, 0x03, 0x93, 0xea, 0xc1, 0xf2, 0xf6, 0x8b, 0xc5, 0xb6, 0xdc, 0x82, 0x42, 0x29, 0x94, 0xe0, 0x25, 0x6c, 0x3f, 0x9f, 0x5d, 0xe4, 0x96, 0xf6, 0x8e, 0x3f, 0xf9, 0x72, 0xc4, 0x77, 0x60, 0x8b, 0xa4, 0xf9, 0xa8, 0xc3, 0x0a, 0x81, 0xb1, 0x97, 0x70, 0x18, 0xab, 0xea, 0x37, 0x8a, 0x08, 0xc7, 0xe2, 0x95, + /* (2^254)P */ 0x94, 0x49, 0xd9, 0x5f, 0x76, 0x72, 0x82, 0xad, 0x2d, 0x50, 0x1a, 0x7a, 0x5b, 0xe6, 0x95, 0x1e, 0x95, 0x65, 0x87, 0x1c, 0x52, 0xd7, 0x44, 0xe6, 0x9b, 0x56, 0xcd, 0x6f, 0x05, 0xff, 0x67, 0xc5, 0xdb, 0xa2, 0xac, 0xe4, 0xa2, 0x28, 0x63, 0x5f, 0xfb, 0x0c, 0x3b, 0xf1, 0x87, 0xc3, 0x36, 0x78, 0x3f, 0x77, 0xfa, 0x50, 0x85, 0xf9, 0xd7, 0x82, + /* (2^255)P */ 0x64, 0xc0, 0xe0, 0xd8, 0x2d, 0xed, 0xcb, 0x6a, 0xfd, 0xcd, 0xbc, 0x7e, 0x9f, 0xc8, 0x85, 0xe9, 0xc1, 0x7c, 0x0f, 0xe5, 0x18, 0xea, 0xd4, 0x51, 0xad, 0x59, 0x13, 0x75, 0xd9, 0x3d, 0xd4, 0x8a, 0xb2, 0xbe, 0x78, 0x52, 0x2b, 0x52, 0x94, 0x37, 0x41, 0xd6, 0xb4, 0xb6, 0x45, 0x20, 0x76, 0xe0, 0x1f, 0x31, 0xdb, 0xb1, 0xa1, 0x43, 0xf0, 0x18, + /* (2^256)P */ 0x74, 0xa9, 0xa4, 0xa9, 0xdd, 0x6e, 0x3e, 0x68, 0xe5, 0xc3, 0x2e, 0x92, 0x17, 0xa4, 0xcb, 0x80, 0xb1, 0xf0, 0x06, 0x93, 0xef, 0xe6, 0x00, 0xe6, 0x3b, 0xb1, 0x32, 0x65, 0x7b, 0x83, 0xb6, 0x8a, 0x49, 0x1b, 0x14, 0x89, 0xee, 0xba, 0xf5, 0x6a, 0x8d, 0x36, 0xef, 0xb0, 0xd8, 0xb2, 0x16, 0x99, 0x17, 0x35, 0x02, 0x16, 0x55, 0x58, 0xdd, 0x82, + /* (2^257)P */ 0x36, 0x95, 0xe8, 0xf4, 0x36, 0x42, 0xbb, 0xc5, 0x3e, 0xfa, 0x30, 0x84, 0x9e, 0x59, 0xfd, 0xd2, 0x95, 0x42, 0xf8, 0x64, 0xd9, 0xb9, 0x0e, 0x9f, 0xfa, 0xd0, 0x7b, 0x20, 0x31, 0x77, 0x48, 0x29, 0x4d, 0xd0, 0x32, 0x57, 0x56, 0x30, 0xa6, 0x17, 0x53, 0x04, 0xbf, 0x08, 0x28, 0xec, 0xb8, 0x46, 0xc1, 0x03, 0x89, 0xdc, 0xed, 0xa0, 0x35, 0x53, + /* (2^258)P */ 0xc5, 0x7f, 0x9e, 0xd8, 0xc5, 0xba, 0x5f, 0x68, 0xc8, 0x23, 0x75, 0xea, 0x0d, 0xd9, 0x5a, 0xfd, 0x61, 0x1a, 0xa3, 0x2e, 0x45, 0x63, 0x14, 0x55, 0x86, 0x21, 0x29, 0xbe, 0xef, 0x5e, 0x50, 0xe5, 0x18, 0x59, 0xe7, 0xe3, 0xce, 0x4d, 0x8c, 0x15, 0x8f, 0x89, 0x66, 0x44, 0x52, 0x3d, 0xfa, 0xc7, 0x9a, 0x59, 0x90, 0x8e, 0xc0, 0x06, 0x3f, 0xc9, + /* (2^259)P */ 0x8e, 0x04, 0xd9, 0x16, 0x50, 0x1d, 0x8c, 0x9f, 0xd5, 0xe3, 0xce, 0xfd, 0x47, 0x04, 0x27, 0x4d, 0xc2, 0xfa, 0x71, 0xd9, 0x0b, 0xb8, 0x65, 0xf4, 0x11, 0xf3, 0x08, 0xee, 0x81, 0xc8, 0x67, 0x99, 0x0b, 0x8d, 0x77, 0xa3, 0x4f, 0xb5, 0x9b, 0xdb, 0x26, 0xf1, 0x97, 0xeb, 0x04, 0x54, 0xeb, 0x80, 0x08, 0x1d, 0x1d, 0xf6, 0x3d, 0x1f, 0x5a, 0xb8, + /* (2^260)P */ 0xb7, 0x9c, 0x9d, 0xee, 0xb9, 0x5c, 0xad, 0x0d, 0x9e, 0xfd, 0x60, 0x3c, 0x27, 0x4e, 0xa2, 0x95, 0xfb, 0x64, 0x7e, 0x79, 0x64, 0x87, 0x10, 0xb4, 0x73, 0xe0, 0x9d, 0x46, 0x4d, 0x3d, 0xee, 0x83, 0xe4, 0x16, 0x88, 0x97, 0xe6, 0x4d, 0xba, 0x70, 0xb6, 0x96, 0x7b, 0xff, 0x4b, 0xc8, 0xcf, 0x72, 0x83, 0x3e, 0x5b, 0x24, 0x2e, 0x57, 0xf1, 0x82, + /* (2^261)P */ 0x30, 0x71, 0x40, 0x51, 0x4f, 0x44, 0xbb, 0xc7, 0xf0, 0x54, 0x6e, 0x9d, 0xeb, 0x15, 0xad, 0xf8, 0x61, 0x43, 0x5a, 0xef, 0xc0, 0xb1, 0x57, 0xae, 0x03, 0x40, 0xe8, 0x68, 0x6f, 0x03, 0x20, 0x4f, 0x8a, 0x51, 0x2a, 0x9e, 0xd2, 0x45, 0xaf, 0xb4, 0xf5, 0xd4, 0x95, 0x7f, 0x3d, 0x3d, 0xb7, 0xb6, 0x28, 0xc5, 0x08, 0x8b, 0x44, 0xd6, 0x3f, 0xe7, + /* (2^262)P */ 0xa9, 0x52, 0x04, 0x67, 0xcb, 0x20, 0x63, 0xf8, 0x18, 0x01, 0x44, 0x21, 0x6a, 0x8a, 0x83, 0x48, 0xd4, 0xaf, 0x23, 0x0f, 0x35, 0x8d, 0xe5, 0x5a, 0xc4, 0x7c, 0x55, 0x46, 0x19, 0x5f, 0x35, 0xe0, 0x5d, 0x97, 0x4c, 0x2d, 0x04, 0xed, 0x59, 0xd4, 0xb0, 0xb2, 0xc6, 0xe3, 0x51, 0xe1, 0x38, 0xc6, 0x30, 0x49, 0x8f, 0xae, 0x61, 0x64, 0xce, 0xa8, + /* (2^263)P */ 0x9b, 0x64, 0x83, 0x3c, 0xd3, 0xdf, 0xb9, 0x27, 0xe7, 0x5b, 0x7f, 0xeb, 0xf3, 0x26, 0xcf, 0xb1, 0x8f, 0xaf, 0x26, 0xc8, 0x48, 0xce, 0xa1, 0xac, 0x7d, 0x10, 0x34, 0x28, 0xe1, 0x1f, 0x69, 0x03, 0x64, 0x77, 0x61, 0xdd, 0x4a, 0x9b, 0x18, 0x47, 0xf8, 0xca, 0x63, 0xc9, 0x03, 0x2d, 0x20, 0x2a, 0x69, 0x6e, 0x42, 0xd0, 0xe7, 0xaa, 0xb5, 0xf3, + /* (2^264)P */ 0xea, 0x31, 0x0c, 0x57, 0x0f, 0x3e, 0xe3, 0x35, 0xd8, 0x30, 0xa5, 0x6f, 0xdd, 0x95, 0x43, 0xc6, 0x66, 0x07, 0x4f, 0x34, 0xc3, 0x7e, 0x04, 0x10, 0x2d, 0xc4, 0x1c, 0x94, 0x52, 0x2e, 0x5b, 0x9a, 0x65, 0x2f, 0x91, 0xaa, 0x4f, 0x3c, 0xdc, 0x23, 0x18, 0xe1, 0x4f, 0x85, 0xcd, 0xf4, 0x8c, 0x51, 0xf7, 0xab, 0x4f, 0xdc, 0x15, 0x5c, 0x9e, 0xc5, + /* (2^265)P */ 0x54, 0x57, 0x23, 0x17, 0xe7, 0x82, 0x2f, 0x04, 0x7d, 0xfe, 0xe7, 0x1f, 0xa2, 0x57, 0x79, 0xe9, 0x58, 0x9b, 0xbe, 0xc6, 0x16, 0x4a, 0x17, 0x50, 0x90, 0x4a, 0x34, 0x70, 0x87, 0x37, 0x01, 0x26, 0xd8, 0xa3, 0x5f, 0x07, 0x7c, 0xd0, 0x7d, 0x05, 0x8a, 0x93, 0x51, 0x2f, 0x99, 0xea, 0xcf, 0x00, 0xd8, 0xc7, 0xe6, 0x9b, 0x8c, 0x62, 0x45, 0x87, + /* (2^266)P */ 0xc3, 0xfd, 0x29, 0x66, 0xe7, 0x30, 0x29, 0x77, 0xe0, 0x0d, 0x63, 0x5b, 0xe6, 0x90, 0x1a, 0x1e, 0x99, 0xc2, 0xa7, 0xab, 0xff, 0xa7, 0xbd, 0x79, 0x01, 0x97, 0xfd, 0x27, 0x1b, 0x43, 0x2b, 0xe6, 0xfe, 0x5e, 0xf1, 0xb9, 0x35, 0x38, 0x08, 0x25, 0x55, 0x90, 0x68, 0x2e, 0xc3, 0x67, 0x39, 0x9f, 0x2b, 0x2c, 0x70, 0x48, 0x8c, 0x47, 0xee, 0x56, + /* (2^267)P */ 0xf7, 0x32, 0x70, 0xb5, 0xe6, 0x42, 0xfd, 0x0a, 0x39, 0x9b, 0x07, 0xfe, 0x0e, 0xf4, 0x47, 0xba, 0x6a, 0x3f, 0xf5, 0x2c, 0x15, 0xf3, 0x60, 0x3f, 0xb1, 0x83, 0x7b, 0x2e, 0x34, 0x58, 0x1a, 0x6e, 0x4a, 0x49, 0x05, 0x45, 0xca, 0xdb, 0x00, 0x01, 0x0c, 0x42, 0x5e, 0x60, 0x40, 0x5f, 0xd9, 0xc7, 0x3a, 0x9e, 0x1c, 0x8d, 0xab, 0x11, 0x55, 0x65, + /* (2^268)P */ 0x87, 0x40, 0xb7, 0x0d, 0xaa, 0x34, 0x89, 0x90, 0x75, 0x6d, 0xa2, 0xfe, 0x3b, 0x6d, 0x5c, 0x39, 0x98, 0x10, 0x9e, 0x15, 0xc5, 0x35, 0xa2, 0x27, 0x23, 0x0a, 0x2d, 0x60, 0xe2, 0xa8, 0x7f, 0x3e, 0x77, 0x8f, 0xcc, 0x44, 0xcc, 0x30, 0x28, 0xe2, 0xf0, 0x04, 0x8c, 0xee, 0xe4, 0x5f, 0x68, 0x8c, 0xdf, 0x70, 0xbf, 0x31, 0xee, 0x2a, 0xfc, 0xce, + /* (2^269)P */ 0x92, 0xf2, 0xa0, 0xd9, 0x58, 0x3b, 0x7c, 0x1a, 0x99, 0x46, 0x59, 0x54, 0x60, 0x06, 0x8d, 0x5e, 0xf0, 0x22, 0xa1, 0xed, 0x92, 0x8a, 0x4d, 0x76, 0x95, 0x05, 0x0b, 0xff, 0xfc, 0x9a, 0xd1, 0xcc, 0x05, 0xb9, 0x5e, 0x99, 0xe8, 0x2a, 0x76, 0x7b, 0xfd, 0xa6, 0xe2, 0xd1, 0x1a, 0xd6, 0x76, 0x9f, 0x2f, 0x0e, 0xd1, 0xa8, 0x77, 0x5a, 0x40, 0x5a, + /* (2^270)P */ 0xff, 0xf9, 0x3f, 0xa9, 0xa6, 0x6c, 0x6d, 0x03, 0x8b, 0xa7, 0x10, 0x5d, 0x3f, 0xec, 0x3e, 0x1c, 0x0b, 0x6b, 0xa2, 0x6a, 0x22, 0xa9, 0x28, 0xd0, 0x66, 0xc9, 0xc2, 0x3d, 0x47, 0x20, 0x7d, 0xa6, 0x1d, 0xd8, 0x25, 0xb5, 0xf2, 0xf9, 0x70, 0x19, 0x6b, 0xf8, 0x43, 0x36, 0xc5, 0x1f, 0xe4, 0x5a, 0x4c, 0x13, 0xe4, 0x6d, 0x08, 0x0b, 0x1d, 0xb1, + /* (2^271)P */ 0x3f, 0x20, 0x9b, 0xfb, 0xec, 0x7d, 0x31, 0xc5, 0xfc, 0x88, 0x0b, 0x30, 0xed, 0x36, 0xc0, 0x63, 0xb1, 0x7d, 0x10, 0xda, 0xb6, 0x2e, 0xad, 0xf3, 0xec, 0x94, 0xe7, 0xec, 0xb5, 0x9c, 0xfe, 0xf5, 0x35, 0xf0, 0xa2, 0x2d, 0x7f, 0xca, 0x6b, 0x67, 0x1a, 0xf6, 0xb3, 0xda, 0x09, 0x2a, 0xaa, 0xdf, 0xb1, 0xca, 0x9b, 0xfb, 0xeb, 0xb3, 0xcd, 0xc0, + /* (2^272)P */ 0xcd, 0x4d, 0x89, 0x00, 0xa4, 0x3b, 0x48, 0xf0, 0x76, 0x91, 0x35, 0xa5, 0xf8, 0xc9, 0xb6, 0x46, 0xbc, 0xf6, 0x9a, 0x45, 0x47, 0x17, 0x96, 0x80, 0x5b, 0x3a, 0x28, 0x33, 0xf9, 0x5a, 0xef, 0x43, 0x07, 0xfe, 0x3b, 0xf4, 0x8e, 0x19, 0xce, 0xd2, 0x94, 0x4b, 0x6d, 0x8e, 0x67, 0x20, 0xc7, 0x4f, 0x2f, 0x59, 0x8e, 0xe1, 0xa1, 0xa9, 0xf9, 0x0e, + /* (2^273)P */ 0xdc, 0x7b, 0xb5, 0x50, 0x2e, 0xe9, 0x7e, 0x8b, 0x78, 0xa1, 0x38, 0x96, 0x22, 0xc3, 0x61, 0x67, 0x6d, 0xc8, 0x58, 0xed, 0x41, 0x1d, 0x5d, 0x86, 0x98, 0x7f, 0x2f, 0x1b, 0x8d, 0x3e, 0xaa, 0xc1, 0xd2, 0x0a, 0xf3, 0xbf, 0x95, 0x04, 0xf3, 0x10, 0x3c, 0x2b, 0x7f, 0x90, 0x46, 0x04, 0xaa, 0x6a, 0xa9, 0x35, 0x76, 0xac, 0x49, 0xb5, 0x00, 0x45, + /* (2^274)P */ 0xb1, 0x93, 0x79, 0x84, 0x4a, 0x2a, 0x30, 0x78, 0x16, 0xaa, 0xc5, 0x74, 0x06, 0xce, 0xa5, 0xa7, 0x32, 0x86, 0xe0, 0xf9, 0x10, 0xd2, 0x58, 0x76, 0xfb, 0x66, 0x49, 0x76, 0x3a, 0x90, 0xba, 0xb5, 0xcc, 0x99, 0xcd, 0x09, 0xc1, 0x9a, 0x74, 0x23, 0xdf, 0x0c, 0xfe, 0x99, 0x52, 0x80, 0xa3, 0x7c, 0x1c, 0x71, 0x5f, 0x2c, 0x49, 0x57, 0xf4, 0xf9, + /* (2^275)P */ 0x6d, 0xbf, 0x52, 0xe6, 0x25, 0x98, 0xed, 0xcf, 0xe3, 0xbc, 0x08, 0xa2, 0x1a, 0x90, 0xae, 0xa0, 0xbf, 0x07, 0x15, 0xad, 0x0a, 0x9f, 0x3e, 0x47, 0x44, 0xc2, 0x10, 0x46, 0xa6, 0x7a, 0x9e, 0x2f, 0x57, 0xbc, 0xe2, 0xf0, 0x1d, 0xd6, 0x9a, 0x06, 0xed, 0xfc, 0x54, 0x95, 0x92, 0x15, 0xa2, 0xf7, 0x8d, 0x6b, 0xef, 0xb2, 0x05, 0xed, 0x5c, 0x63, + /* (2^276)P */ 0xbc, 0x0b, 0x27, 0x3a, 0x3a, 0xf8, 0xe1, 0x48, 0x02, 0x7e, 0x27, 0xe6, 0x81, 0x62, 0x07, 0x73, 0x74, 0xe5, 0x52, 0xd7, 0xf8, 0x26, 0xca, 0x93, 0x4d, 0x3e, 0x9b, 0x55, 0x09, 0x8e, 0xe3, 0xd7, 0xa6, 0xe3, 0xb6, 0x2a, 0xa9, 0xb3, 0xb0, 0xa0, 0x8c, 0x01, 0xbb, 0x07, 0x90, 0x78, 0x6d, 0x6d, 0xe9, 0xf0, 0x7a, 0x90, 0xbd, 0xdc, 0x0c, 0x36, + /* (2^277)P */ 0x7f, 0x20, 0x12, 0x0f, 0x40, 0x00, 0x53, 0xd8, 0x0c, 0x27, 0x47, 0x47, 0x22, 0x80, 0xfb, 0x62, 0xe4, 0xa7, 0xf7, 0xbd, 0x42, 0xa5, 0xc3, 0x2b, 0xb2, 0x7f, 0x50, 0xcc, 0xe2, 0xfb, 0xd5, 0xc0, 0x63, 0xdd, 0x24, 0x5f, 0x7c, 0x08, 0x91, 0xbf, 0x6e, 0x47, 0x44, 0xd4, 0x6a, 0xc0, 0xc3, 0x09, 0x39, 0x27, 0xdd, 0xc7, 0xca, 0x06, 0x29, 0x55, + /* (2^278)P */ 0x76, 0x28, 0x58, 0xb0, 0xd2, 0xf3, 0x0f, 0x04, 0xe9, 0xc9, 0xab, 0x66, 0x5b, 0x75, 0x51, 0xdc, 0xe5, 0x8f, 0xe8, 0x1f, 0xdb, 0x03, 0x0f, 0xb0, 0x7d, 0xf9, 0x20, 0x64, 0x89, 0xe9, 0xdc, 0xe6, 0x24, 0xc3, 0xd5, 0xd2, 0x41, 0xa6, 0xe4, 0xe3, 0xc4, 0x79, 0x7c, 0x0f, 0xa1, 0x61, 0x2f, 0xda, 0xa4, 0xc9, 0xfd, 0xad, 0x5c, 0x65, 0x6a, 0xf3, + /* (2^279)P */ 0xd5, 0xab, 0x72, 0x7a, 0x3b, 0x59, 0xea, 0xcf, 0xd5, 0x17, 0xd2, 0xb2, 0x5f, 0x2d, 0xab, 0xad, 0x9e, 0x88, 0x64, 0x55, 0x96, 0x6e, 0xf3, 0x44, 0xa9, 0x11, 0xf5, 0xf8, 0x3a, 0xf1, 0xcd, 0x79, 0x4c, 0x99, 0x6d, 0x23, 0x6a, 0xa0, 0xc2, 0x1a, 0x19, 0x45, 0xb5, 0xd8, 0x95, 0x2f, 0x49, 0xe9, 0x46, 0x39, 0x26, 0x60, 0x04, 0x15, 0x8b, 0xcc, + /* (2^280)P */ 0x66, 0x0c, 0xf0, 0x54, 0x41, 0x02, 0x91, 0xab, 0xe5, 0x85, 0x8a, 0x44, 0xa6, 0x34, 0x96, 0x32, 0xc0, 0xdf, 0x6c, 0x41, 0x39, 0xd4, 0xc6, 0xe1, 0xe3, 0x81, 0xb0, 0x4c, 0x34, 0x4f, 0xe5, 0xf4, 0x35, 0x46, 0x1f, 0xeb, 0x75, 0xfd, 0x43, 0x37, 0x50, 0x99, 0xab, 0xad, 0xb7, 0x8c, 0xa1, 0x57, 0xcb, 0xe6, 0xce, 0x16, 0x2e, 0x85, 0xcc, 0xf9, + /* (2^281)P */ 0x63, 0xd1, 0x3f, 0x9e, 0xa2, 0x17, 0x2e, 0x1d, 0x3e, 0xce, 0x48, 0x2d, 0xbb, 0x8f, 0x69, 0xc9, 0xa6, 0x3d, 0x4e, 0xfe, 0x09, 0x56, 0xb3, 0x02, 0x5f, 0x99, 0x97, 0x0c, 0x54, 0xda, 0x32, 0x97, 0x9b, 0xf4, 0x95, 0xf1, 0xad, 0xe3, 0x2b, 0x04, 0xa7, 0x9b, 0x3f, 0xbb, 0xe7, 0x87, 0x2e, 0x1f, 0x8b, 0x4b, 0x7a, 0xa4, 0x43, 0x0c, 0x0f, 0x35, + /* (2^282)P */ 0x05, 0xdc, 0xe0, 0x2c, 0xa1, 0xc1, 0xd0, 0xf1, 0x1f, 0x4e, 0xc0, 0x6c, 0x35, 0x7b, 0xca, 0x8f, 0x8b, 0x02, 0xb1, 0xf7, 0xd6, 0x2e, 0xe7, 0x93, 0x80, 0x85, 0x18, 0x88, 0x19, 0xb9, 0xb4, 0x4a, 0xbc, 0xeb, 0x5a, 0x78, 0x38, 0xed, 0xc6, 0x27, 0x2a, 0x74, 0x76, 0xf0, 0x1b, 0x79, 0x92, 0x2f, 0xd2, 0x81, 0x98, 0xdf, 0xa9, 0x50, 0x19, 0xeb, + /* (2^283)P */ 0xb5, 0xe7, 0xb4, 0x11, 0x3a, 0x81, 0xb6, 0xb4, 0xf8, 0xa2, 0xb3, 0x6c, 0xfc, 0x9d, 0xe0, 0xc0, 0xe0, 0x59, 0x7f, 0x05, 0x37, 0xef, 0x2c, 0xa9, 0x3a, 0x24, 0xac, 0x7b, 0x25, 0xa0, 0x55, 0xd2, 0x44, 0x82, 0x82, 0x6e, 0x64, 0xa3, 0x58, 0xc8, 0x67, 0xae, 0x26, 0xa7, 0x0f, 0x42, 0x63, 0xe1, 0x93, 0x01, 0x52, 0x19, 0xaf, 0x49, 0x3e, 0x33, + /* (2^284)P */ 0x05, 0x85, 0xe6, 0x66, 0xaf, 0x5f, 0xdf, 0xbf, 0x9d, 0x24, 0x62, 0x60, 0x90, 0xe2, 0x4c, 0x7d, 0x4e, 0xc3, 0x74, 0x5d, 0x4f, 0x53, 0xf3, 0x63, 0x13, 0xf4, 0x74, 0x28, 0x6b, 0x7d, 0x57, 0x0c, 0x9d, 0x84, 0xa7, 0x1a, 0xff, 0xa0, 0x79, 0xdf, 0xfc, 0x65, 0x98, 0x8e, 0x22, 0x0d, 0x62, 0x7e, 0xf2, 0x34, 0x60, 0x83, 0x05, 0x14, 0xb1, 0xc1, + /* (2^285)P */ 0x64, 0x22, 0xcc, 0xdf, 0x5c, 0xbc, 0x88, 0x68, 0x4c, 0xd9, 0xbc, 0x0e, 0xc9, 0x8b, 0xb4, 0x23, 0x52, 0xad, 0xb0, 0xb3, 0xf1, 0x17, 0xd8, 0x15, 0x04, 0x6b, 0x99, 0xf0, 0xc4, 0x7d, 0x48, 0x22, 0x4a, 0xf8, 0x6f, 0xaa, 0x88, 0x0d, 0xc5, 0x5e, 0xa9, 0x1c, 0x61, 0x3d, 0x95, 0xa9, 0x7b, 0x6a, 0x79, 0x33, 0x0a, 0x2b, 0x99, 0xe3, 0x4e, 0x48, + /* (2^286)P */ 0x6b, 0x9b, 0x6a, 0x2a, 0xf1, 0x60, 0x31, 0xb4, 0x73, 0xd1, 0x87, 0x45, 0x9c, 0x15, 0x58, 0x4b, 0x91, 0x6d, 0x94, 0x1c, 0x41, 0x11, 0x4a, 0x83, 0xec, 0xaf, 0x65, 0xbc, 0x34, 0xaa, 0x26, 0xe2, 0xaf, 0xed, 0x46, 0x05, 0x4e, 0xdb, 0xc6, 0x4e, 0x10, 0x28, 0x4e, 0x72, 0xe5, 0x31, 0xa3, 0x20, 0xd7, 0xb1, 0x96, 0x64, 0xf6, 0xce, 0x08, 0x08, + /* (2^287)P */ 0x16, 0xa9, 0x5c, 0x9f, 0x9a, 0xb4, 0xb8, 0xc8, 0x32, 0x78, 0xc0, 0x3a, 0xd9, 0x5f, 0x94, 0xac, 0x3a, 0x42, 0x1f, 0x43, 0xd6, 0x80, 0x47, 0x2c, 0xdc, 0x76, 0x27, 0xfa, 0x50, 0xe5, 0xa1, 0xe4, 0xc3, 0xcb, 0x61, 0x31, 0xe1, 0x2e, 0xde, 0x81, 0x3b, 0x77, 0x1c, 0x39, 0x3c, 0xdb, 0xda, 0x87, 0x4b, 0x84, 0x12, 0xeb, 0xdd, 0x54, 0xbf, 0xe7, + /* (2^288)P */ 0xbf, 0xcb, 0x73, 0x21, 0x3d, 0x7e, 0x13, 0x8c, 0xa6, 0x34, 0x21, 0x2b, 0xa5, 0xe4, 0x9f, 0x8e, 0x9c, 0x01, 0x9c, 0x43, 0xd9, 0xc7, 0xb9, 0xf1, 0xbe, 0x7f, 0x45, 0x51, 0x97, 0xa1, 0x8e, 0x01, 0xf8, 0xbd, 0xd2, 0xbf, 0x81, 0x3a, 0x8b, 0xab, 0xe4, 0x89, 0xb7, 0xbd, 0xf2, 0xcd, 0xa9, 0x8a, 0x8a, 0xde, 0xfb, 0x8a, 0x55, 0x12, 0x7b, 0x17, + /* (2^289)P */ 0x1b, 0x95, 0x58, 0x4d, 0xe6, 0x51, 0x31, 0x52, 0x1c, 0xd8, 0x15, 0x84, 0xb1, 0x0d, 0x36, 0x25, 0x88, 0x91, 0x46, 0x71, 0x42, 0x56, 0xe2, 0x90, 0x08, 0x9e, 0x77, 0x1b, 0xee, 0x22, 0x3f, 0xec, 0xee, 0x8c, 0x7b, 0x2e, 0x79, 0xc4, 0x6c, 0x07, 0xa1, 0x7e, 0x52, 0xf5, 0x26, 0x5c, 0x84, 0x2a, 0x50, 0x6e, 0x82, 0xb3, 0x76, 0xda, 0x35, 0x16, + /* (2^290)P */ 0x0a, 0x6f, 0x99, 0x87, 0xc0, 0x7d, 0x8a, 0xb2, 0xca, 0xae, 0xe8, 0x65, 0x98, 0x0f, 0xb3, 0x44, 0xe1, 0xdc, 0x52, 0x79, 0x75, 0xec, 0x8f, 0x95, 0x87, 0x45, 0xd1, 0x32, 0x18, 0x55, 0x15, 0xce, 0x64, 0x9b, 0x08, 0x4f, 0x2c, 0xea, 0xba, 0x1c, 0x57, 0x06, 0x63, 0xc8, 0xb1, 0xfd, 0xc5, 0x67, 0xe7, 0x1f, 0x87, 0x9e, 0xde, 0x72, 0x7d, 0xec, + /* (2^291)P */ 0x36, 0x8b, 0x4d, 0x2c, 0xc2, 0x46, 0xe8, 0x96, 0xac, 0x0b, 0x8c, 0xc5, 0x09, 0x10, 0xfc, 0xf2, 0xda, 0xea, 0x22, 0xb2, 0xd3, 0x89, 0xeb, 0xb2, 0x85, 0x0f, 0xff, 0x59, 0x50, 0x2c, 0x99, 0x5a, 0x1f, 0xec, 0x2a, 0x6f, 0xec, 0xcf, 0xe9, 0xce, 0x12, 0x6b, 0x19, 0xd8, 0xde, 0x9b, 0xce, 0x0e, 0x6a, 0xaa, 0xe1, 0x32, 0xea, 0x4c, 0xfe, 0x92, + /* (2^292)P */ 0x5f, 0x17, 0x70, 0x53, 0x26, 0x03, 0x0b, 0xab, 0xd1, 0xc1, 0x42, 0x0b, 0xab, 0x2b, 0x3d, 0x31, 0xa4, 0xd5, 0x2b, 0x5e, 0x00, 0xd5, 0x9a, 0x22, 0x34, 0xe0, 0x53, 0x3f, 0x59, 0x7f, 0x2c, 0x6d, 0x72, 0x9a, 0xa4, 0xbe, 0x3d, 0x42, 0x05, 0x1b, 0xf2, 0x7f, 0x88, 0x56, 0xd1, 0x7c, 0x7d, 0x6b, 0x9f, 0x43, 0xfe, 0x65, 0x19, 0xae, 0x9c, 0x4c, + /* (2^293)P */ 0xf3, 0x7c, 0x20, 0xa9, 0xfc, 0xf2, 0xf2, 0x3b, 0x3c, 0x57, 0x41, 0x94, 0xe5, 0xcc, 0x6a, 0x37, 0x5d, 0x09, 0xf2, 0xab, 0xc2, 0xca, 0x60, 0x38, 0x6b, 0x7a, 0xe1, 0x78, 0x2b, 0xc1, 0x1d, 0xe8, 0xfd, 0xbc, 0x3d, 0x5c, 0xa2, 0xdb, 0x49, 0x20, 0x79, 0xe6, 0x1b, 0x9b, 0x65, 0xd9, 0x6d, 0xec, 0x57, 0x1d, 0xd2, 0xe9, 0x90, 0xeb, 0x43, 0x7b, + /* (2^294)P */ 0x2a, 0x8b, 0x2e, 0x19, 0x18, 0x10, 0xb8, 0x83, 0xe7, 0x7d, 0x2d, 0x9a, 0x3a, 0xe5, 0xd1, 0xe4, 0x7c, 0x38, 0xe5, 0x59, 0x2a, 0x6e, 0xd9, 0x01, 0x29, 0x3d, 0x23, 0xf7, 0x52, 0xba, 0x61, 0x04, 0x9a, 0xde, 0xc4, 0x31, 0x50, 0xeb, 0x1b, 0xaa, 0xde, 0x39, 0x58, 0xd8, 0x1b, 0x1e, 0xfc, 0x57, 0x9a, 0x28, 0x43, 0x9e, 0x97, 0x5e, 0xaa, 0xa3, + /* (2^295)P */ 0x97, 0x0a, 0x74, 0xc4, 0x39, 0x99, 0x6b, 0x40, 0xc7, 0x3e, 0x8c, 0xa7, 0xb1, 0x4e, 0x9a, 0x59, 0x6e, 0x1c, 0xfe, 0xfc, 0x2a, 0x5e, 0x73, 0x2b, 0x8c, 0xa9, 0x71, 0xf5, 0xda, 0x6b, 0x15, 0xab, 0xf7, 0xbe, 0x2a, 0x44, 0x5f, 0xba, 0xae, 0x67, 0x93, 0xc5, 0x86, 0xc1, 0xb8, 0xdf, 0xdc, 0xcb, 0xd7, 0xff, 0xb1, 0x71, 0x7c, 0x6f, 0x88, 0xf8, + /* (2^296)P */ 0x3f, 0x89, 0xb1, 0xbf, 0x24, 0x16, 0xac, 0x56, 0xfe, 0xdf, 0x94, 0x71, 0xbf, 0xd6, 0x57, 0x0c, 0xb4, 0x77, 0x37, 0xaa, 0x2a, 0x70, 0x76, 0x49, 0xaf, 0x0c, 0x97, 0x8e, 0x78, 0x2a, 0x67, 0xc9, 0x3b, 0x3d, 0x5b, 0x01, 0x2f, 0xda, 0xd5, 0xa8, 0xde, 0x02, 0xa9, 0xac, 0x76, 0x00, 0x0b, 0x46, 0xc6, 0x2d, 0xdc, 0x08, 0xf4, 0x10, 0x2c, 0xbe, + /* (2^297)P */ 0xcb, 0x07, 0xf9, 0x91, 0xc6, 0xd5, 0x3e, 0x54, 0x63, 0xae, 0xfc, 0x10, 0xbe, 0x3a, 0x20, 0x73, 0x4e, 0x65, 0x0e, 0x2d, 0x86, 0x77, 0x83, 0x9d, 0xe2, 0x0a, 0xe9, 0xac, 0x22, 0x52, 0x76, 0xd4, 0x6e, 0xfa, 0xe0, 0x09, 0xef, 0x78, 0x82, 0x9f, 0x26, 0xf9, 0x06, 0xb5, 0xe7, 0x05, 0x0e, 0xf2, 0x46, 0x72, 0x93, 0xd3, 0x24, 0xbd, 0x87, 0x60, + /* (2^298)P */ 0x14, 0x55, 0x84, 0x7b, 0x6c, 0x60, 0x80, 0x73, 0x8c, 0xbe, 0x2d, 0xd6, 0x69, 0xd6, 0x17, 0x26, 0x44, 0x9f, 0x88, 0xa2, 0x39, 0x7c, 0x89, 0xbc, 0x6d, 0x9e, 0x46, 0xb6, 0x68, 0x66, 0xea, 0xdc, 0x31, 0xd6, 0x21, 0x51, 0x9f, 0x28, 0x28, 0xaf, 0x9e, 0x47, 0x2c, 0x4c, 0x8f, 0xf3, 0xaf, 0x1f, 0xe4, 0xab, 0xac, 0xe9, 0x0c, 0x91, 0x3a, 0x61, + /* (2^299)P */ 0xb0, 0x37, 0x55, 0x4b, 0xe9, 0xc3, 0xb1, 0xce, 0x42, 0xe6, 0xc5, 0x11, 0x7f, 0x2c, 0x11, 0xfc, 0x4e, 0x71, 0x17, 0x00, 0x74, 0x7f, 0xbf, 0x07, 0x4d, 0xfd, 0x40, 0xb2, 0x87, 0xb0, 0xef, 0x1f, 0x35, 0x2c, 0x2d, 0xd7, 0xe1, 0xe4, 0xad, 0x0e, 0x7f, 0x63, 0x66, 0x62, 0x23, 0x41, 0xf6, 0xc1, 0x14, 0xa6, 0xd7, 0xa9, 0x11, 0x56, 0x9d, 0x1b, + /* (2^300)P */ 0x02, 0x82, 0x42, 0x18, 0x4f, 0x1b, 0xc9, 0x5d, 0x78, 0x5f, 0xee, 0xed, 0x01, 0x49, 0x8f, 0xf2, 0xa0, 0xe2, 0x6e, 0xbb, 0x6b, 0x04, 0x8d, 0xb2, 0x41, 0xae, 0xc8, 0x1b, 0x59, 0x34, 0xb8, 0x2a, 0xdb, 0x1f, 0xd2, 0x52, 0xdf, 0x3f, 0x35, 0x00, 0x8b, 0x61, 0xbc, 0x97, 0xa0, 0xc4, 0x77, 0xd1, 0xe4, 0x2c, 0x59, 0x68, 0xff, 0x30, 0xf2, 0xe2, + /* (2^301)P */ 0x79, 0x08, 0xb1, 0xdb, 0x55, 0xae, 0xd0, 0xed, 0xda, 0xa0, 0xec, 0x6c, 0xae, 0x68, 0xf2, 0x0b, 0x61, 0xb3, 0xf5, 0x21, 0x69, 0x87, 0x0b, 0x03, 0xea, 0x8a, 0x15, 0xd9, 0x7e, 0xca, 0xf7, 0xcd, 0xf3, 0x33, 0xb3, 0x4c, 0x5b, 0x23, 0x4e, 0x6f, 0x90, 0xad, 0x91, 0x4b, 0x4f, 0x46, 0x37, 0xe5, 0xe8, 0xb7, 0xeb, 0xd5, 0xca, 0x34, 0x4e, 0x23, + /* (2^302)P */ 0x09, 0x02, 0xdd, 0xfd, 0x70, 0xac, 0x56, 0x80, 0x36, 0x5e, 0x49, 0xd0, 0x3f, 0xc2, 0xe0, 0xba, 0x46, 0x7f, 0x5c, 0xf7, 0xc5, 0xbd, 0xd5, 0x55, 0x7d, 0x3f, 0xd5, 0x7d, 0x06, 0xdf, 0x27, 0x20, 0x4f, 0xe9, 0x30, 0xec, 0x1b, 0xa0, 0x0c, 0xd4, 0x2c, 0xe1, 0x2b, 0x65, 0x73, 0xea, 0x75, 0x35, 0xe8, 0xe6, 0x56, 0xd6, 0x07, 0x15, 0x99, 0xdf, + /* (2^303)P */ 0x4e, 0x10, 0xb7, 0xd0, 0x63, 0x8c, 0xcf, 0x16, 0x00, 0x7c, 0x58, 0xdf, 0x86, 0xdc, 0x4e, 0xca, 0x9c, 0x40, 0x5a, 0x42, 0xfd, 0xec, 0x98, 0xa4, 0x42, 0x53, 0xae, 0x16, 0x9d, 0xfd, 0x75, 0x5a, 0x12, 0x56, 0x1e, 0xc6, 0x57, 0xcc, 0x79, 0x27, 0x96, 0x00, 0xcf, 0x80, 0x4f, 0x8a, 0x36, 0x5c, 0xbb, 0xe9, 0x12, 0xdb, 0xb6, 0x2b, 0xad, 0x96, + /* (2^304)P */ 0x92, 0x32, 0x1f, 0xfd, 0xc6, 0x02, 0x94, 0x08, 0x1b, 0x60, 0x6a, 0x9f, 0x8b, 0xd6, 0xc8, 0xad, 0xd5, 0x1b, 0x27, 0x4e, 0xa4, 0x4d, 0x4a, 0x00, 0x10, 0x5f, 0x86, 0x11, 0xf5, 0xe3, 0x14, 0x32, 0x43, 0xee, 0xb9, 0xc7, 0xab, 0xf4, 0x6f, 0xe5, 0x66, 0x0c, 0x06, 0x0d, 0x96, 0x79, 0x28, 0xaf, 0x45, 0x2b, 0x56, 0xbe, 0xe4, 0x4a, 0x52, 0xd6, + /* (2^305)P */ 0x15, 0x16, 0x69, 0xef, 0x60, 0xca, 0x82, 0x25, 0x0f, 0xc6, 0x30, 0xa0, 0x0a, 0xd1, 0x83, 0x29, 0xcd, 0xb6, 0x89, 0x6c, 0xf5, 0xb2, 0x08, 0x38, 0xe6, 0xca, 0x6b, 0x19, 0x93, 0xc6, 0x5f, 0x75, 0x8e, 0x60, 0x34, 0x23, 0xc4, 0x13, 0x17, 0x69, 0x55, 0xcc, 0x72, 0x9c, 0x2b, 0x6c, 0x80, 0xf4, 0x4b, 0x8b, 0xb6, 0x97, 0x65, 0x07, 0xb6, 0xfb, + /* (2^306)P */ 0x01, 0x99, 0x74, 0x28, 0xa6, 0x67, 0xa3, 0xe5, 0x25, 0xfb, 0xdf, 0x82, 0x93, 0xe7, 0x35, 0x74, 0xce, 0xe3, 0x15, 0x1c, 0x1d, 0x79, 0x52, 0x84, 0x08, 0x04, 0x2f, 0x5c, 0xb8, 0xcd, 0x7f, 0x89, 0xb0, 0x39, 0x93, 0x63, 0xc9, 0x5d, 0x06, 0x01, 0x59, 0xf7, 0x7e, 0xf1, 0x4c, 0x3d, 0x12, 0x8d, 0x69, 0x1d, 0xb7, 0x21, 0x5e, 0x88, 0x82, 0xa2, + /* (2^307)P */ 0x8e, 0x69, 0xaf, 0x9a, 0x41, 0x0d, 0x9d, 0xcf, 0x8e, 0x8d, 0x5c, 0x51, 0x6e, 0xde, 0x0e, 0x48, 0x23, 0x89, 0xe5, 0x37, 0x80, 0xd6, 0x9d, 0x72, 0x32, 0x26, 0x38, 0x2d, 0x63, 0xa0, 0xfa, 0xd3, 0x40, 0xc0, 0x8c, 0x68, 0x6f, 0x2b, 0x1e, 0x9a, 0x39, 0x51, 0x78, 0x74, 0x9a, 0x7b, 0x4a, 0x8f, 0x0c, 0xa0, 0x88, 0x60, 0xa5, 0x21, 0xcd, 0xc7, + /* (2^308)P */ 0x3a, 0x7f, 0x73, 0x14, 0xbf, 0x89, 0x6a, 0x4c, 0x09, 0x5d, 0xf2, 0x93, 0x20, 0x2d, 0xc4, 0x29, 0x86, 0x06, 0x95, 0xab, 0x22, 0x76, 0x4c, 0x54, 0xe1, 0x7e, 0x80, 0x6d, 0xab, 0x29, 0x61, 0x87, 0x77, 0xf6, 0xc0, 0x3e, 0xda, 0xab, 0x65, 0x7e, 0x39, 0x12, 0xa1, 0x6b, 0x42, 0xf7, 0xc5, 0x97, 0x77, 0xec, 0x6f, 0x22, 0xbe, 0x44, 0xc7, 0x03, + /* (2^309)P */ 0xa5, 0x23, 0x90, 0x41, 0xa3, 0xc5, 0x3e, 0xe0, 0xa5, 0x32, 0x49, 0x1f, 0x39, 0x78, 0xb1, 0xd8, 0x24, 0xea, 0xd4, 0x87, 0x53, 0x42, 0x51, 0xf4, 0xd9, 0x46, 0x25, 0x2f, 0x62, 0xa9, 0x90, 0x9a, 0x4a, 0x25, 0x8a, 0xd2, 0x10, 0xe7, 0x3c, 0xbc, 0x58, 0x8d, 0x16, 0x14, 0x96, 0xa4, 0x6f, 0xf8, 0x12, 0x69, 0x91, 0x73, 0xe2, 0xfa, 0xf4, 0x57, + /* (2^310)P */ 0x51, 0x45, 0x3f, 0x96, 0xdc, 0x97, 0x38, 0xa6, 0x01, 0x63, 0x09, 0xea, 0xc2, 0x13, 0x30, 0xb0, 0x00, 0xb8, 0x0a, 0xce, 0xd1, 0x8f, 0x3e, 0x69, 0x62, 0x46, 0x33, 0x9c, 0xbf, 0x4b, 0xcb, 0x0c, 0x90, 0x1c, 0x45, 0xcf, 0x37, 0x5b, 0xf7, 0x4b, 0x5e, 0x95, 0xc3, 0x28, 0x9f, 0x08, 0x83, 0x53, 0x74, 0xab, 0x0c, 0xb4, 0xc0, 0xa1, 0xbc, 0x89, + /* (2^311)P */ 0x06, 0xb1, 0x51, 0x15, 0x65, 0x60, 0x21, 0x17, 0x7a, 0x20, 0x65, 0xee, 0x12, 0x35, 0x4d, 0x46, 0xf4, 0xf8, 0xd0, 0xb1, 0xca, 0x09, 0x30, 0x08, 0x89, 0x23, 0x3b, 0xe7, 0xab, 0x8b, 0x77, 0xa6, 0xad, 0x25, 0xdd, 0xea, 0x3c, 0x7d, 0xa5, 0x24, 0xb3, 0xe8, 0xfa, 0xfb, 0xc9, 0xf2, 0x71, 0xe9, 0xfa, 0xf2, 0xdc, 0x54, 0xdd, 0x55, 0x2e, 0x2f, + /* (2^312)P */ 0x7f, 0x96, 0x96, 0xfb, 0x52, 0x86, 0xcf, 0xea, 0x62, 0x18, 0xf1, 0x53, 0x1f, 0x61, 0x2a, 0x9f, 0x8c, 0x51, 0xca, 0x2c, 0xde, 0x6d, 0xce, 0xab, 0x58, 0x32, 0x0b, 0x33, 0x9b, 0x99, 0xb4, 0x5c, 0x88, 0x2a, 0x76, 0xcc, 0x3e, 0x54, 0x1e, 0x9d, 0xa2, 0x89, 0xe4, 0x19, 0xba, 0x80, 0xc8, 0x39, 0x32, 0x7f, 0x0f, 0xc7, 0x84, 0xbb, 0x43, 0x56, + /* (2^313)P */ 0x9b, 0x07, 0xb4, 0x42, 0xa9, 0xa0, 0x78, 0x4f, 0x28, 0x70, 0x2b, 0x7e, 0x61, 0xe0, 0xdd, 0x02, 0x98, 0xfc, 0xed, 0x31, 0x80, 0xf1, 0x15, 0x52, 0x89, 0x23, 0xcd, 0x5d, 0x2b, 0xc5, 0x19, 0x32, 0xfb, 0x70, 0x50, 0x7a, 0x97, 0x6b, 0x42, 0xdb, 0xca, 0xdb, 0xc4, 0x59, 0x99, 0xe0, 0x12, 0x1f, 0x17, 0xba, 0x8b, 0xf0, 0xc4, 0x38, 0x5d, 0x27, + /* (2^314)P */ 0x29, 0x1d, 0xdc, 0x2b, 0xf6, 0x5b, 0x04, 0x61, 0x36, 0x76, 0xa0, 0x56, 0x36, 0x6e, 0xd7, 0x24, 0x4d, 0xe7, 0xef, 0x44, 0xd2, 0xd5, 0x07, 0xcd, 0xc4, 0x9d, 0x80, 0x48, 0xc3, 0x38, 0xcf, 0xd8, 0xa3, 0xdd, 0xb2, 0x5e, 0xb5, 0x70, 0x15, 0xbb, 0x36, 0x85, 0x8a, 0xd7, 0xfb, 0x56, 0x94, 0x73, 0x9c, 0x81, 0xbe, 0xb1, 0x44, 0x28, 0xf1, 0x37, + /* (2^315)P */ 0xbf, 0xcf, 0x5c, 0xd2, 0xe2, 0xea, 0xc2, 0xcd, 0x70, 0x7a, 0x9d, 0xcb, 0x81, 0xc1, 0xe9, 0xf1, 0x56, 0x71, 0x52, 0xf7, 0x1b, 0x87, 0xc6, 0xd8, 0xcc, 0xb2, 0x69, 0xf3, 0xb0, 0xbd, 0xba, 0x83, 0x12, 0x26, 0xc4, 0xce, 0x72, 0xde, 0x3b, 0x21, 0x28, 0x9e, 0x5a, 0x94, 0xf5, 0x04, 0xa3, 0xc8, 0x0f, 0x5e, 0xbc, 0x71, 0xf9, 0x0d, 0xce, 0xf5, + /* (2^316)P */ 0x93, 0x97, 0x00, 0x85, 0xf4, 0xb4, 0x40, 0xec, 0xd9, 0x2b, 0x6c, 0xd6, 0x63, 0x9e, 0x93, 0x0a, 0x5a, 0xf4, 0xa7, 0x9a, 0xe3, 0x3c, 0xf0, 0x55, 0xd1, 0x96, 0x6c, 0xf5, 0x2a, 0xce, 0xd7, 0x95, 0x72, 0xbf, 0xc5, 0x0c, 0xce, 0x79, 0xa2, 0x0a, 0x78, 0xe0, 0x72, 0xd0, 0x66, 0x28, 0x05, 0x75, 0xd3, 0x23, 0x09, 0x91, 0xed, 0x7e, 0xc4, 0xbc, + /* (2^317)P */ 0x77, 0xc2, 0x9a, 0xf7, 0xa6, 0xe6, 0x18, 0xb4, 0xe7, 0xf6, 0xda, 0xec, 0x44, 0x6d, 0xfb, 0x08, 0xee, 0x65, 0xa8, 0x92, 0x85, 0x1f, 0xba, 0x38, 0x93, 0x20, 0x5c, 0x4d, 0xd2, 0x18, 0x0f, 0x24, 0xbe, 0x1a, 0x96, 0x44, 0x7d, 0xeb, 0xb3, 0xda, 0x95, 0xf4, 0xaf, 0x6c, 0x06, 0x0f, 0x47, 0x37, 0xc8, 0x77, 0x63, 0xe1, 0x29, 0xef, 0xff, 0xa5, + /* (2^318)P */ 0x16, 0x12, 0xd9, 0x47, 0x90, 0x22, 0x9b, 0x05, 0xf2, 0xa5, 0x9a, 0xae, 0x83, 0x98, 0xb5, 0xac, 0xab, 0x29, 0xaa, 0xdc, 0x5f, 0xde, 0xcd, 0xf7, 0x42, 0xad, 0x3b, 0x96, 0xd6, 0x3e, 0x6e, 0x52, 0x47, 0xb1, 0xab, 0x51, 0xde, 0x49, 0x7c, 0x87, 0x8d, 0x86, 0xe2, 0x70, 0x13, 0x21, 0x51, 0x1c, 0x0c, 0x25, 0xc1, 0xb0, 0xe6, 0x19, 0xcf, 0x12, + /* (2^319)P */ 0xf0, 0xbc, 0x97, 0x8f, 0x4b, 0x2f, 0xd1, 0x1f, 0x8c, 0x57, 0xed, 0x3c, 0xf4, 0x26, 0x19, 0xbb, 0x60, 0xca, 0x24, 0xc5, 0xd9, 0x97, 0xe2, 0x5f, 0x76, 0x49, 0x39, 0x7e, 0x2d, 0x12, 0x21, 0x98, 0xda, 0xe6, 0xdb, 0xd2, 0xd8, 0x9f, 0x18, 0xd8, 0x83, 0x6c, 0xba, 0x89, 0x8d, 0x29, 0xfa, 0x46, 0x33, 0x8c, 0x28, 0xdf, 0x6a, 0xb3, 0x69, 0x28, + /* (2^320)P */ 0x86, 0x17, 0xbc, 0xd6, 0x7c, 0xba, 0x1e, 0x83, 0xbb, 0x84, 0xb5, 0x8c, 0xad, 0xdf, 0xa1, 0x24, 0x81, 0x70, 0x40, 0x0f, 0xad, 0xad, 0x3b, 0x23, 0xd0, 0x93, 0xa0, 0x49, 0x5c, 0x4b, 0x51, 0xbe, 0x20, 0x49, 0x4e, 0xda, 0x2d, 0xd3, 0xad, 0x1b, 0x74, 0x08, 0x41, 0xf0, 0xef, 0x19, 0xe9, 0x45, 0x5d, 0x02, 0xae, 0x26, 0x25, 0xd9, 0xd1, 0xc2, + /* (2^321)P */ 0x48, 0x81, 0x3e, 0xb2, 0x83, 0xf8, 0x4d, 0xb3, 0xd0, 0x4c, 0x75, 0xb3, 0xa0, 0x52, 0x26, 0xf2, 0xaf, 0x5d, 0x36, 0x70, 0x72, 0xd6, 0xb7, 0x88, 0x08, 0x69, 0xbd, 0x15, 0x25, 0xb1, 0x45, 0x1b, 0xb7, 0x0b, 0x5f, 0x71, 0x5d, 0x83, 0x49, 0xb9, 0x84, 0x3b, 0x7c, 0xc1, 0x50, 0x93, 0x05, 0x53, 0xe0, 0x61, 0xea, 0xc1, 0xef, 0xdb, 0x82, 0x97, + /* (2^322)P */ 0x00, 0xd5, 0xc3, 0x3a, 0x4d, 0x8a, 0x23, 0x7a, 0xef, 0xff, 0x37, 0xef, 0xf3, 0xbc, 0xa9, 0xb6, 0xae, 0xd7, 0x3a, 0x7b, 0xfd, 0x3e, 0x8e, 0x9b, 0xab, 0x44, 0x54, 0x60, 0x28, 0x6c, 0xbf, 0x15, 0x24, 0x4a, 0x56, 0x60, 0x7f, 0xa9, 0x7a, 0x28, 0x59, 0x2c, 0x8a, 0xd1, 0x7d, 0x6b, 0x00, 0xfd, 0xa5, 0xad, 0xbc, 0x19, 0x3f, 0xcb, 0x73, 0xe0, + /* (2^323)P */ 0xcf, 0x9e, 0x66, 0x06, 0x4d, 0x2b, 0xf5, 0x9c, 0xc2, 0x9d, 0x9e, 0xed, 0x5a, 0x5c, 0x2d, 0x00, 0xbf, 0x29, 0x90, 0x88, 0xe4, 0x5d, 0xfd, 0xe2, 0xf0, 0x38, 0xec, 0x4d, 0x26, 0xea, 0x54, 0xf0, 0x3c, 0x84, 0x10, 0x6a, 0xf9, 0x66, 0x9c, 0xe7, 0x21, 0xfd, 0x0f, 0xc7, 0x13, 0x50, 0x81, 0xb6, 0x50, 0xf9, 0x04, 0x7f, 0xa4, 0x37, 0x85, 0x14, + /* (2^324)P */ 0xdb, 0x87, 0x49, 0xc7, 0xa8, 0x39, 0x0c, 0x32, 0x98, 0x0c, 0xb9, 0x1a, 0x1b, 0x4d, 0xe0, 0x8a, 0x9a, 0x8e, 0x8f, 0xab, 0x5a, 0x17, 0x3d, 0x04, 0x21, 0xce, 0x3e, 0x2c, 0xf9, 0xa3, 0x97, 0xe4, 0x77, 0x95, 0x0e, 0xb6, 0xa5, 0x15, 0xad, 0x3a, 0x1e, 0x46, 0x53, 0x17, 0x09, 0x83, 0x71, 0x4e, 0x86, 0x38, 0xd5, 0x23, 0x44, 0x16, 0x8d, 0xc8, + /* (2^325)P */ 0x05, 0x5e, 0x99, 0x08, 0xbb, 0xc3, 0xc0, 0xb7, 0x6c, 0x12, 0xf2, 0xf3, 0xf4, 0x7c, 0x6a, 0x4d, 0x9e, 0xeb, 0x3d, 0xb9, 0x63, 0x94, 0xce, 0x81, 0xd8, 0x11, 0xcb, 0x55, 0x69, 0x4a, 0x20, 0x0b, 0x4c, 0x2e, 0x14, 0xb8, 0xd4, 0x6a, 0x7c, 0xf0, 0xed, 0xfc, 0x8f, 0xef, 0xa0, 0xeb, 0x6c, 0x01, 0xe2, 0xdc, 0x10, 0x22, 0xa2, 0x01, 0x85, 0x64, + /* (2^326)P */ 0x58, 0xe1, 0x9c, 0x27, 0x55, 0xc6, 0x25, 0xa6, 0x7d, 0x67, 0x88, 0x65, 0x99, 0x6c, 0xcb, 0xdb, 0x27, 0x4f, 0x44, 0x29, 0xf5, 0x4a, 0x23, 0x10, 0xbc, 0x03, 0x3f, 0x36, 0x1e, 0xef, 0xb0, 0xba, 0x75, 0xe8, 0x74, 0x5f, 0x69, 0x3e, 0x26, 0x40, 0xb4, 0x2f, 0xdc, 0x43, 0xbf, 0xa1, 0x8b, 0xbd, 0xca, 0x6e, 0xc1, 0x6e, 0x21, 0x79, 0xa0, 0xd0, + /* (2^327)P */ 0x78, 0x93, 0x4a, 0x2d, 0x22, 0x6e, 0x6e, 0x7d, 0x74, 0xd2, 0x66, 0x58, 0xce, 0x7b, 0x1d, 0x97, 0xb1, 0xf2, 0xda, 0x1c, 0x79, 0xfb, 0xba, 0xd1, 0xc0, 0xc5, 0x6e, 0xc9, 0x11, 0x89, 0xd2, 0x41, 0x8d, 0x70, 0xb9, 0xcc, 0xea, 0x6a, 0xb3, 0x45, 0xb6, 0x05, 0x2e, 0xf2, 0x17, 0xf1, 0x27, 0xb8, 0xed, 0x06, 0x1f, 0xdb, 0x9d, 0x1f, 0x69, 0x28, + /* (2^328)P */ 0x93, 0x12, 0xa8, 0x11, 0xe1, 0x92, 0x30, 0x8d, 0xac, 0xe1, 0x1c, 0x60, 0x7c, 0xed, 0x2d, 0x2e, 0xd3, 0x03, 0x5c, 0x9c, 0xc5, 0xbd, 0x64, 0x4a, 0x8c, 0xba, 0x76, 0xfe, 0xc6, 0xc1, 0xea, 0xc2, 0x4f, 0xbe, 0x70, 0x3d, 0x64, 0xcf, 0x8e, 0x18, 0xcb, 0xcd, 0x57, 0xa7, 0xf7, 0x36, 0xa9, 0x6b, 0x3e, 0xb8, 0x69, 0xee, 0x47, 0xa2, 0x7e, 0xb2, + /* (2^329)P */ 0x96, 0xaf, 0x3a, 0xf5, 0xed, 0xcd, 0xaf, 0xf7, 0x82, 0xaf, 0x59, 0x62, 0x0b, 0x36, 0x85, 0xf9, 0xaf, 0xd6, 0x38, 0xff, 0x87, 0x2e, 0x1d, 0x6c, 0x8b, 0xaf, 0x3b, 0xdf, 0x28, 0xa2, 0xd6, 0x4d, 0x80, 0x92, 0xc3, 0x0f, 0x34, 0xa8, 0xae, 0x69, 0x5d, 0x7b, 0x9d, 0xbc, 0xf5, 0xfd, 0x1d, 0xb1, 0x96, 0x55, 0x86, 0xe1, 0x5c, 0xb6, 0xac, 0xb9, + /* (2^330)P */ 0x50, 0x9e, 0x37, 0x28, 0x7d, 0xa8, 0x33, 0x63, 0xda, 0x3f, 0x20, 0x98, 0x0e, 0x09, 0xa8, 0x77, 0x3b, 0x7a, 0xfc, 0x16, 0x85, 0x44, 0x64, 0x77, 0x65, 0x68, 0x92, 0x41, 0xc6, 0x1f, 0xdf, 0x27, 0xf9, 0xec, 0xa0, 0x61, 0x22, 0xea, 0x19, 0xe7, 0x75, 0x8b, 0x4e, 0xe5, 0x0f, 0xb7, 0xf7, 0xd2, 0x53, 0xf4, 0xdd, 0x4a, 0xaa, 0x78, 0x40, 0xb7, + /* (2^331)P */ 0xd4, 0x89, 0xe3, 0x79, 0xba, 0xb6, 0xc3, 0xda, 0xe6, 0x78, 0x65, 0x7d, 0x6e, 0x22, 0x62, 0xb1, 0x3d, 0xea, 0x90, 0x84, 0x30, 0x5e, 0xd4, 0x39, 0x84, 0x78, 0xd9, 0x75, 0xd6, 0xce, 0x2a, 0x11, 0x29, 0x69, 0xa4, 0x5e, 0xaa, 0x2a, 0x98, 0x5a, 0xe5, 0x91, 0x8f, 0xb2, 0xfb, 0xda, 0x97, 0xe8, 0x83, 0x6f, 0x04, 0xb9, 0x5d, 0xaf, 0xe1, 0x9b, + /* (2^332)P */ 0x8b, 0xe4, 0xe1, 0x48, 0x9c, 0xc4, 0x83, 0x89, 0xdf, 0x65, 0xd3, 0x35, 0x55, 0x13, 0xf4, 0x1f, 0x36, 0x92, 0x33, 0x38, 0xcb, 0xed, 0x15, 0xe6, 0x60, 0x2d, 0x25, 0xf5, 0x36, 0x60, 0x3a, 0x37, 0x9b, 0x71, 0x9d, 0x42, 0xb0, 0x14, 0xc8, 0xba, 0x62, 0xa3, 0x49, 0xb0, 0x88, 0xc1, 0x72, 0x73, 0xdd, 0x62, 0x40, 0xa9, 0x62, 0x88, 0x99, 0xca, + /* (2^333)P */ 0x47, 0x7b, 0xea, 0xda, 0x46, 0x2f, 0x45, 0xc6, 0xe3, 0xb4, 0x4d, 0x8d, 0xac, 0x0b, 0x54, 0x22, 0x06, 0x31, 0x16, 0x66, 0x3e, 0xe4, 0x38, 0x12, 0xcd, 0xf3, 0xe7, 0x99, 0x37, 0xd9, 0x62, 0x24, 0x4b, 0x05, 0xf2, 0x58, 0xe6, 0x29, 0x4b, 0x0d, 0xf6, 0xc1, 0xba, 0xa0, 0x1e, 0x0f, 0xcb, 0x1f, 0xc6, 0x2b, 0x19, 0xfc, 0x82, 0x01, 0xd0, 0x86, + /* (2^334)P */ 0xa2, 0xae, 0x77, 0x20, 0xfb, 0xa8, 0x18, 0xb4, 0x61, 0xef, 0xe8, 0x52, 0x79, 0xbb, 0x86, 0x90, 0x5d, 0x2e, 0x76, 0xed, 0x66, 0x60, 0x5d, 0x00, 0xb5, 0xa4, 0x00, 0x40, 0x89, 0xec, 0xd1, 0xd2, 0x0d, 0x26, 0xb9, 0x30, 0xb2, 0xd2, 0xb8, 0xe8, 0x0e, 0x56, 0xf9, 0x67, 0x94, 0x2e, 0x62, 0xe1, 0x79, 0x48, 0x2b, 0xa9, 0xfa, 0xea, 0xdb, 0x28, + /* (2^335)P */ 0x35, 0xf1, 0xb0, 0x43, 0xbd, 0x27, 0xef, 0x18, 0x44, 0xa2, 0x04, 0xb4, 0x69, 0xa1, 0x97, 0x1f, 0x8c, 0x04, 0x82, 0x9b, 0x00, 0x6d, 0xf8, 0xbf, 0x7d, 0xc1, 0x5b, 0xab, 0xe8, 0xb2, 0x34, 0xbd, 0xaf, 0x7f, 0xb2, 0x0d, 0xf3, 0xed, 0xfc, 0x5b, 0x50, 0xee, 0xe7, 0x4a, 0x20, 0xd9, 0xf5, 0xc6, 0x9a, 0x97, 0x6d, 0x07, 0x2f, 0xb9, 0x31, 0x02, + /* (2^336)P */ 0xf9, 0x54, 0x4a, 0xc5, 0x61, 0x7e, 0x1d, 0xa6, 0x0e, 0x1a, 0xa8, 0xd3, 0x8c, 0x36, 0x7d, 0xf1, 0x06, 0xb1, 0xac, 0x93, 0xcd, 0xe9, 0x8f, 0x61, 0x6c, 0x5d, 0x03, 0x23, 0xdf, 0x85, 0x53, 0x39, 0x63, 0x5e, 0xeb, 0xf3, 0xd3, 0xd3, 0x75, 0x97, 0x9b, 0x62, 0x9b, 0x01, 0xb3, 0x19, 0xd8, 0x2b, 0x36, 0xf2, 0x2c, 0x2c, 0x6f, 0x36, 0xc6, 0x3c, + /* (2^337)P */ 0x05, 0x74, 0x43, 0x10, 0xb6, 0xb0, 0xf8, 0xbf, 0x02, 0x46, 0x9a, 0xee, 0xc1, 0xaf, 0xc1, 0xe5, 0x5a, 0x2e, 0xbb, 0xe1, 0xdc, 0xc6, 0xce, 0x51, 0x29, 0x50, 0xbf, 0x1b, 0xde, 0xff, 0xba, 0x4d, 0x8d, 0x8b, 0x7e, 0xe7, 0xbd, 0x5b, 0x8f, 0xbe, 0xe3, 0x75, 0x71, 0xff, 0x37, 0x05, 0x5a, 0x10, 0xeb, 0x54, 0x7e, 0x44, 0x72, 0x2c, 0xd4, 0xfc, + /* (2^338)P */ 0x03, 0x12, 0x1c, 0xb2, 0x08, 0x90, 0xa1, 0x2d, 0x50, 0xa0, 0xad, 0x7f, 0x8d, 0xa6, 0x97, 0xc1, 0xbd, 0xdc, 0xc3, 0xa7, 0xad, 0x31, 0xdf, 0xb8, 0x03, 0x84, 0xc3, 0xb9, 0x29, 0x3d, 0x92, 0x2e, 0xc3, 0x90, 0x07, 0xe8, 0xa7, 0xc7, 0xbc, 0x61, 0xe9, 0x3e, 0xa0, 0x35, 0xda, 0x1d, 0xab, 0x48, 0xfe, 0x50, 0xc9, 0x25, 0x59, 0x23, 0x69, 0x3f, + /* (2^339)P */ 0x8e, 0x91, 0xab, 0x6b, 0x91, 0x4f, 0x89, 0x76, 0x67, 0xad, 0xb2, 0x65, 0x9d, 0xad, 0x02, 0x36, 0xdc, 0xac, 0x96, 0x93, 0x97, 0x21, 0x14, 0xd0, 0xe8, 0x11, 0x60, 0x1e, 0xeb, 0x96, 0x06, 0xf2, 0x53, 0xf2, 0x6d, 0xb7, 0x93, 0x6f, 0x26, 0x91, 0x23, 0xe3, 0x34, 0x04, 0x92, 0x91, 0x37, 0x08, 0x50, 0xd6, 0x28, 0x09, 0x27, 0xa1, 0x0c, 0x00, + /* (2^340)P */ 0x1f, 0xbb, 0x21, 0x26, 0x33, 0xcb, 0xa4, 0xd1, 0xee, 0x85, 0xf9, 0xd9, 0x3c, 0x90, 0xc3, 0xd1, 0x26, 0xa2, 0x25, 0x93, 0x43, 0x61, 0xed, 0x91, 0x6e, 0x54, 0x03, 0x2e, 0x42, 0x9d, 0xf7, 0xa6, 0x02, 0x0f, 0x2f, 0x9c, 0x7a, 0x8d, 0x12, 0xc2, 0x18, 0xfc, 0x41, 0xff, 0x85, 0x26, 0x1a, 0x44, 0x55, 0x0b, 0x89, 0xab, 0x6f, 0x62, 0x33, 0x8c, + /* (2^341)P */ 0xe0, 0x3c, 0x5d, 0x70, 0x64, 0x87, 0x81, 0x35, 0xf2, 0x37, 0xa6, 0x24, 0x3e, 0xe0, 0x62, 0xd5, 0x71, 0xe7, 0x93, 0xfb, 0xac, 0xc3, 0xe7, 0xc7, 0x04, 0xe2, 0x70, 0xd3, 0x29, 0x5b, 0x21, 0xbf, 0xf4, 0x26, 0x5d, 0xf3, 0x95, 0xb4, 0x2a, 0x6a, 0x07, 0x55, 0xa6, 0x4b, 0x3b, 0x15, 0xf2, 0x25, 0x8a, 0x95, 0x3f, 0x63, 0x2f, 0x7a, 0x23, 0x96, + /* (2^342)P */ 0x0d, 0x3d, 0xd9, 0x13, 0xa7, 0xb3, 0x5e, 0x67, 0xf7, 0x02, 0x23, 0xee, 0x84, 0xff, 0x99, 0xda, 0xb9, 0x53, 0xf8, 0xf0, 0x0e, 0x39, 0x2f, 0x3c, 0x64, 0x34, 0xe3, 0x09, 0xfd, 0x2b, 0x33, 0xc7, 0xfe, 0x62, 0x2b, 0x84, 0xdf, 0x2b, 0xd2, 0x7c, 0x26, 0x01, 0x70, 0x66, 0x5b, 0x85, 0xc2, 0xbe, 0x88, 0x37, 0xf1, 0x30, 0xac, 0xb8, 0x76, 0xa3, + /* (2^343)P */ 0x6e, 0x01, 0xf0, 0x55, 0x35, 0xe4, 0xbd, 0x43, 0x62, 0x9d, 0xd6, 0x11, 0xef, 0x6f, 0xb8, 0x8c, 0xaa, 0x98, 0x87, 0xc6, 0x6d, 0xc4, 0xcc, 0x74, 0x92, 0x53, 0x4a, 0xdf, 0xe4, 0x08, 0x89, 0x17, 0xd0, 0x0f, 0xf4, 0x00, 0x60, 0x78, 0x08, 0x44, 0xb5, 0xda, 0x18, 0xed, 0x98, 0xc8, 0x61, 0x3d, 0x39, 0xdb, 0xcf, 0x1d, 0x49, 0x40, 0x65, 0x75, + /* (2^344)P */ 0x8e, 0x10, 0xae, 0x5f, 0x06, 0xd2, 0x95, 0xfd, 0x20, 0x16, 0x49, 0x5b, 0x57, 0xbe, 0x22, 0x8b, 0x43, 0xfb, 0xe6, 0xcc, 0x26, 0xa5, 0x5d, 0xd3, 0x68, 0xc5, 0xf9, 0x5a, 0x86, 0x24, 0x87, 0x27, 0x05, 0xfd, 0xe2, 0xff, 0xb3, 0xa3, 0x7b, 0x37, 0x59, 0xc5, 0x4e, 0x14, 0x94, 0xf9, 0x3b, 0xcb, 0x7c, 0xed, 0xca, 0x1d, 0xb2, 0xac, 0x05, 0x4a, + /* (2^345)P */ 0xf4, 0xd1, 0x81, 0xeb, 0x89, 0xbf, 0xfe, 0x1e, 0x41, 0x92, 0x29, 0xee, 0xe1, 0x43, 0xf5, 0x86, 0x1d, 0x2f, 0xbb, 0x1e, 0x84, 0x5d, 0x7b, 0x8d, 0xd5, 0xda, 0xee, 0x1e, 0x8a, 0xd0, 0x27, 0xf2, 0x60, 0x51, 0x59, 0x82, 0xf4, 0x84, 0x2b, 0x5b, 0x14, 0x2d, 0x81, 0x82, 0x3e, 0x2b, 0xb4, 0x6d, 0x51, 0x4f, 0xc5, 0xcb, 0xbf, 0x74, 0xe3, 0xb4, + /* (2^346)P */ 0x19, 0x2f, 0x22, 0xb3, 0x04, 0x5f, 0x81, 0xca, 0x05, 0x60, 0xb9, 0xaa, 0xee, 0x0e, 0x2f, 0x48, 0x38, 0xf9, 0x91, 0xb4, 0x66, 0xe4, 0x57, 0x28, 0x54, 0x10, 0xe9, 0x61, 0x9d, 0xd4, 0x90, 0x75, 0xb1, 0x39, 0x23, 0xb6, 0xfc, 0x82, 0xe0, 0xfa, 0xbb, 0x5c, 0x6e, 0xc3, 0x44, 0x13, 0x00, 0x83, 0x55, 0x9e, 0x8e, 0x10, 0x61, 0x81, 0x91, 0x04, + /* (2^347)P */ 0x5f, 0x2a, 0xd7, 0x81, 0xd9, 0x9c, 0xbb, 0x79, 0xbc, 0x62, 0x56, 0x98, 0x03, 0x5a, 0x18, 0x85, 0x2a, 0x9c, 0xd0, 0xfb, 0xd2, 0xb1, 0xaf, 0xef, 0x0d, 0x24, 0xc5, 0xfa, 0x39, 0xbb, 0x6b, 0xed, 0xa4, 0xdf, 0xe4, 0x87, 0xcd, 0x41, 0xd3, 0x72, 0x32, 0xc6, 0x28, 0x21, 0xb1, 0xba, 0x8b, 0xa3, 0x91, 0x79, 0x76, 0x22, 0x25, 0x10, 0x61, 0xd1, + /* (2^348)P */ 0x73, 0xb5, 0x32, 0x97, 0xdd, 0xeb, 0xdd, 0x22, 0x22, 0xf1, 0x33, 0x3c, 0x77, 0x56, 0x7d, 0x6b, 0x48, 0x2b, 0x05, 0x81, 0x03, 0x03, 0x91, 0x9a, 0xe3, 0x5e, 0xd4, 0xee, 0x3f, 0xf8, 0xbb, 0x50, 0x21, 0x32, 0x4c, 0x4a, 0x58, 0x49, 0xde, 0x0c, 0xde, 0x30, 0x82, 0x3d, 0x92, 0xf0, 0x6c, 0xcc, 0x32, 0x3e, 0xd2, 0x78, 0x8a, 0x6e, 0x2c, 0xd0, + /* (2^349)P */ 0xf0, 0xf7, 0xa1, 0x0b, 0xc1, 0x74, 0x85, 0xa8, 0xe9, 0xdd, 0x48, 0xa1, 0xc0, 0x16, 0xd8, 0x2b, 0x61, 0x08, 0xc2, 0x2b, 0x30, 0x26, 0x79, 0xce, 0x9e, 0xfd, 0x39, 0xd7, 0x81, 0xa4, 0x63, 0x8c, 0xd5, 0x74, 0xa0, 0x88, 0xfa, 0x03, 0x30, 0xe9, 0x7f, 0x2b, 0xc6, 0x02, 0xc9, 0x5e, 0xe4, 0xd5, 0x4d, 0x92, 0xd0, 0xf6, 0xf2, 0x5b, 0x79, 0x08, + /* (2^350)P */ 0x34, 0x89, 0x81, 0x43, 0xd1, 0x94, 0x2c, 0x10, 0x54, 0x9b, 0xa0, 0xe5, 0x44, 0xe8, 0xc2, 0x2f, 0x3e, 0x0e, 0x74, 0xae, 0xba, 0xe2, 0xac, 0x85, 0x6b, 0xd3, 0x5c, 0x97, 0xf7, 0x90, 0xf1, 0x12, 0xc0, 0x03, 0xc8, 0x1f, 0x37, 0x72, 0x8c, 0x9b, 0x9c, 0x17, 0x96, 0x9d, 0xc7, 0xbf, 0xa3, 0x3f, 0x44, 0x3d, 0x87, 0x81, 0xbd, 0x81, 0xa6, 0x5f, + /* (2^351)P */ 0xe4, 0xff, 0x78, 0x62, 0x82, 0x5b, 0x76, 0x58, 0xf5, 0x5b, 0xa6, 0xc4, 0x53, 0x11, 0x3b, 0x7b, 0xaa, 0x67, 0xf8, 0xea, 0x3b, 0x5d, 0x9a, 0x2e, 0x04, 0xeb, 0x4a, 0x24, 0xfb, 0x56, 0xf0, 0xa8, 0xd4, 0x14, 0xed, 0x0f, 0xfd, 0xc5, 0x26, 0x17, 0x2a, 0xf0, 0xb9, 0x13, 0x8c, 0xbd, 0x65, 0x14, 0x24, 0x95, 0x27, 0x12, 0x63, 0x2a, 0x09, 0x18, + /* (2^352)P */ 0xe1, 0x5c, 0xe7, 0xe0, 0x00, 0x6a, 0x96, 0xf2, 0x49, 0x6a, 0x39, 0xa5, 0xe0, 0x17, 0x79, 0x4a, 0x63, 0x07, 0x62, 0x09, 0x61, 0x1b, 0x6e, 0xa9, 0xb5, 0x62, 0xb7, 0xde, 0xdf, 0x80, 0x4c, 0x5a, 0x99, 0x73, 0x59, 0x9d, 0xfb, 0xb1, 0x5e, 0xbe, 0xb8, 0xb7, 0x63, 0x93, 0xe8, 0xad, 0x5e, 0x1f, 0xae, 0x59, 0x1c, 0xcd, 0xb4, 0xc2, 0xb3, 0x8a, + /* (2^353)P */ 0x78, 0x53, 0xa1, 0x4c, 0x70, 0x9c, 0x63, 0x7e, 0xb3, 0x12, 0x40, 0x5f, 0xbb, 0x23, 0xa7, 0xf7, 0x77, 0x96, 0x5b, 0x4d, 0x91, 0x10, 0x52, 0x85, 0x9e, 0xa5, 0x38, 0x0b, 0xfd, 0x25, 0x01, 0x4b, 0xfa, 0x4d, 0xd3, 0x3f, 0x78, 0x74, 0x42, 0xff, 0x62, 0x2d, 0x27, 0xdc, 0x9d, 0xd1, 0x29, 0x76, 0x2e, 0x78, 0xb3, 0x35, 0xfa, 0x15, 0xd5, 0x38, + /* (2^354)P */ 0x8b, 0xc7, 0x43, 0xce, 0xf0, 0x5e, 0xf1, 0x0d, 0x02, 0x38, 0xe8, 0x82, 0xc9, 0x25, 0xad, 0x2d, 0x27, 0xa4, 0x54, 0x18, 0xb2, 0x30, 0x73, 0xa4, 0x41, 0x08, 0xe4, 0x86, 0xe6, 0x8c, 0xe9, 0x2a, 0x34, 0xb3, 0xd6, 0x61, 0x8f, 0x66, 0x26, 0x08, 0xb6, 0x06, 0x33, 0xaa, 0x12, 0xac, 0x72, 0xec, 0x2e, 0x52, 0xa3, 0x25, 0x3e, 0xd7, 0x62, 0xe8, + /* (2^355)P */ 0xc4, 0xbb, 0x89, 0xc8, 0x40, 0xcc, 0x84, 0xec, 0x4a, 0xd9, 0xc4, 0x55, 0x78, 0x00, 0xcf, 0xd8, 0xe9, 0x24, 0x59, 0xdc, 0x5e, 0xf0, 0x66, 0xa1, 0x83, 0xae, 0x97, 0x18, 0xc5, 0x54, 0x27, 0xa2, 0x21, 0x52, 0x03, 0x31, 0x5b, 0x11, 0x67, 0xf6, 0x12, 0x00, 0x87, 0x2f, 0xff, 0x59, 0x70, 0x8f, 0x6d, 0x71, 0xab, 0xab, 0x24, 0xb8, 0xba, 0x35, + /* (2^356)P */ 0x69, 0x43, 0xa7, 0x14, 0x06, 0x96, 0xe9, 0xc2, 0xe3, 0x2b, 0x45, 0x22, 0xc0, 0xd0, 0x2f, 0x34, 0xd1, 0x01, 0x99, 0xfc, 0x99, 0x38, 0xa1, 0x25, 0x2e, 0x59, 0x6c, 0x27, 0xc9, 0xeb, 0x7b, 0xdc, 0x4e, 0x26, 0x68, 0xba, 0xfa, 0xec, 0x02, 0x05, 0x64, 0x80, 0x30, 0x20, 0x5c, 0x26, 0x7f, 0xaf, 0x95, 0x17, 0x3d, 0x5c, 0x9e, 0x96, 0x96, 0xaf, + /* (2^357)P */ 0xa6, 0xba, 0x21, 0x29, 0x32, 0xe2, 0x98, 0xde, 0x9b, 0x6d, 0x0b, 0x44, 0x91, 0xa8, 0x3e, 0xd4, 0xb8, 0x04, 0x6c, 0xf6, 0x04, 0x39, 0xbd, 0x52, 0x05, 0x15, 0x27, 0x78, 0x8e, 0x55, 0xac, 0x79, 0xc5, 0xe6, 0x00, 0x7f, 0x90, 0xa2, 0xdd, 0x07, 0x13, 0xe0, 0x24, 0x70, 0x5c, 0x0f, 0x4d, 0xa9, 0xf9, 0xae, 0xcb, 0x34, 0x10, 0x9d, 0x89, 0x9d, + /* (2^358)P */ 0x12, 0xe0, 0xb3, 0x9f, 0xc4, 0x96, 0x1d, 0xcf, 0xed, 0x99, 0x64, 0x28, 0x8d, 0xc7, 0x31, 0x82, 0xee, 0x5e, 0x75, 0x48, 0xff, 0x3a, 0xf2, 0x09, 0x34, 0x03, 0x93, 0x52, 0x19, 0xb2, 0xc5, 0x81, 0x93, 0x45, 0x5e, 0x59, 0x21, 0x2b, 0xec, 0x89, 0xba, 0x36, 0x6e, 0xf9, 0x82, 0x75, 0x7e, 0x82, 0x3f, 0xaa, 0xe2, 0xe3, 0x3b, 0x94, 0xfd, 0x98, + /* (2^359)P */ 0x7c, 0xdb, 0x75, 0x31, 0x61, 0xfb, 0x15, 0x28, 0x94, 0xd7, 0xc3, 0x5a, 0xa9, 0xa1, 0x0a, 0x66, 0x0f, 0x2b, 0x13, 0x3e, 0x42, 0xb5, 0x28, 0x3a, 0xca, 0x83, 0xf3, 0x61, 0x22, 0xf4, 0x40, 0xc5, 0xdf, 0xe7, 0x31, 0x9f, 0x7e, 0x51, 0x75, 0x06, 0x9d, 0x51, 0xc8, 0xe7, 0x9f, 0xc3, 0x71, 0x4f, 0x3d, 0x5b, 0xfb, 0xe9, 0x8e, 0x08, 0x40, 0x8e, + /* (2^360)P */ 0xf7, 0x31, 0xad, 0x50, 0x5d, 0x25, 0x93, 0x73, 0x68, 0xf6, 0x7c, 0x89, 0x5a, 0x3d, 0x9f, 0x9b, 0x05, 0x82, 0xe7, 0x70, 0x4b, 0x19, 0xaa, 0xcf, 0xff, 0xde, 0x50, 0x8f, 0x2f, 0x69, 0xd3, 0xf0, 0x99, 0x51, 0x6b, 0x9d, 0xb6, 0x56, 0x6f, 0xf8, 0x4c, 0x74, 0x8b, 0x4c, 0x91, 0xf9, 0xa9, 0xb1, 0x3e, 0x07, 0xdf, 0x0b, 0x27, 0x8a, 0xb1, 0xed, + /* (2^361)P */ 0xfb, 0x67, 0xd9, 0x48, 0xd2, 0xe4, 0x44, 0x9b, 0x43, 0x15, 0x8a, 0xeb, 0x00, 0x53, 0xad, 0x25, 0xc7, 0x7e, 0x19, 0x30, 0x87, 0xb7, 0xd5, 0x5f, 0x04, 0xf8, 0xaa, 0xdd, 0x57, 0xae, 0x34, 0x75, 0xe2, 0x84, 0x4b, 0x54, 0x60, 0x37, 0x95, 0xe4, 0xd3, 0xec, 0xac, 0xef, 0x47, 0x31, 0xa3, 0xc8, 0x31, 0x22, 0xdb, 0x26, 0xe7, 0x6a, 0xb5, 0xad, + /* (2^362)P */ 0x44, 0x09, 0x5c, 0x95, 0xe4, 0x72, 0x3c, 0x1a, 0xd1, 0xac, 0x42, 0x51, 0x99, 0x6f, 0xfa, 0x1f, 0xf2, 0x22, 0xbe, 0xff, 0x7b, 0x66, 0xf5, 0x6c, 0xb3, 0x66, 0xc7, 0x4d, 0x78, 0x31, 0x83, 0x80, 0xf5, 0x41, 0xe9, 0x7f, 0xbe, 0xf7, 0x23, 0x49, 0x6b, 0x84, 0x4e, 0x7e, 0x47, 0x07, 0x6e, 0x74, 0xdf, 0xe5, 0x9d, 0x9e, 0x56, 0x2a, 0xc0, 0xbc, + /* (2^363)P */ 0xac, 0x10, 0x80, 0x8c, 0x7c, 0xfa, 0x83, 0xdf, 0xb3, 0xd0, 0xc4, 0xbe, 0xfb, 0x9f, 0xac, 0xc9, 0xc3, 0x40, 0x95, 0x0b, 0x09, 0x23, 0xda, 0x63, 0x67, 0xcf, 0xe7, 0x9f, 0x7d, 0x7b, 0x6b, 0xe2, 0xe6, 0x6d, 0xdb, 0x87, 0x9e, 0xa6, 0xff, 0x6d, 0xab, 0xbd, 0xfb, 0x54, 0x84, 0x68, 0xcf, 0x89, 0xf1, 0xd0, 0xe2, 0x85, 0x61, 0xdc, 0x22, 0xd1, + /* (2^364)P */ 0xa8, 0x48, 0xfb, 0x8c, 0x6a, 0x63, 0x01, 0x72, 0x43, 0x43, 0xeb, 0x21, 0xa3, 0x00, 0x8a, 0xc0, 0x87, 0x51, 0x9e, 0x86, 0x75, 0x16, 0x79, 0xf9, 0x6b, 0x11, 0x80, 0x62, 0xc2, 0x9d, 0xb8, 0x8c, 0x30, 0x8e, 0x8d, 0x03, 0x52, 0x7e, 0x31, 0x59, 0x38, 0xf9, 0x25, 0xc7, 0x0f, 0xc7, 0xa8, 0x2b, 0x5c, 0x80, 0xfa, 0x90, 0xa2, 0x63, 0xca, 0xe7, + /* (2^365)P */ 0xf1, 0x5d, 0xb5, 0xd9, 0x20, 0x10, 0x7d, 0x0f, 0xc5, 0x50, 0x46, 0x07, 0xff, 0x02, 0x75, 0x2b, 0x4a, 0xf3, 0x39, 0x91, 0x72, 0xb7, 0xd5, 0xcc, 0x38, 0xb8, 0xe7, 0x36, 0x26, 0x5e, 0x11, 0x97, 0x25, 0xfb, 0x49, 0x68, 0xdc, 0xb4, 0x46, 0x87, 0x5c, 0xc2, 0x7f, 0xaa, 0x7d, 0x36, 0x23, 0xa6, 0xc6, 0x53, 0xec, 0xbc, 0x57, 0x47, 0xc1, 0x2b, + /* (2^366)P */ 0x25, 0x5d, 0x7d, 0x95, 0xda, 0x0b, 0x8f, 0x78, 0x1e, 0x19, 0x09, 0xfa, 0x67, 0xe0, 0xa0, 0x17, 0x24, 0x76, 0x6c, 0x30, 0x1f, 0x62, 0x3d, 0xbe, 0x45, 0x70, 0xcc, 0xb6, 0x1e, 0x68, 0x06, 0x25, 0x68, 0x16, 0x1a, 0x33, 0x3f, 0x90, 0xc7, 0x78, 0x2d, 0x98, 0x3c, 0x2f, 0xb9, 0x2d, 0x94, 0x0b, 0xfb, 0x49, 0x56, 0x30, 0xd7, 0xc1, 0xe6, 0x48, + /* (2^367)P */ 0x7a, 0xd1, 0xe0, 0x8e, 0x67, 0xfc, 0x0b, 0x50, 0x1f, 0x84, 0x98, 0xfa, 0xaf, 0xae, 0x2e, 0x31, 0x27, 0xcf, 0x3f, 0xf2, 0x6e, 0x8d, 0x81, 0x8f, 0xd2, 0x5f, 0xde, 0xd3, 0x5e, 0xe9, 0xe7, 0x13, 0x48, 0x83, 0x5a, 0x4e, 0x84, 0xd1, 0x58, 0xcf, 0x6b, 0x84, 0xdf, 0x13, 0x1d, 0x91, 0x85, 0xe8, 0xcb, 0x29, 0x79, 0xd2, 0xca, 0xac, 0x6a, 0x93, + /* (2^368)P */ 0x53, 0x82, 0xce, 0x61, 0x96, 0x88, 0x6f, 0xe1, 0x4a, 0x4c, 0x1e, 0x30, 0x73, 0xe8, 0x74, 0xde, 0x40, 0x2b, 0xe0, 0xc4, 0xb5, 0xd8, 0x7c, 0x15, 0xe7, 0xe1, 0xb1, 0xe0, 0xd6, 0x88, 0xb1, 0x6a, 0x57, 0x19, 0x6a, 0x22, 0x66, 0x57, 0xf6, 0x8d, 0xfd, 0xc0, 0xf2, 0xa3, 0x03, 0x56, 0xfb, 0x2e, 0x75, 0x5e, 0xc7, 0x8e, 0x22, 0x96, 0x5c, 0x06, + /* (2^369)P */ 0x98, 0x7e, 0xbf, 0x3e, 0xbf, 0x24, 0x9d, 0x15, 0xd3, 0xf6, 0xd3, 0xd2, 0xf0, 0x11, 0xf2, 0xdb, 0x36, 0x23, 0x38, 0xf7, 0x1d, 0x71, 0x20, 0xd2, 0x54, 0x7f, 0x1e, 0x24, 0x8f, 0xe2, 0xaa, 0xf7, 0x3f, 0x6b, 0x41, 0x4e, 0xdc, 0x0e, 0xec, 0xe8, 0x35, 0x0a, 0x08, 0x6d, 0x89, 0x5b, 0x32, 0x91, 0x01, 0xb6, 0xe0, 0x2c, 0xc6, 0xa1, 0xbe, 0xb4, + /* (2^370)P */ 0x29, 0xf2, 0x1e, 0x1c, 0xdc, 0x68, 0x8a, 0x43, 0x87, 0x2c, 0x48, 0xb3, 0x9e, 0xed, 0xd2, 0x82, 0x46, 0xac, 0x2f, 0xef, 0x93, 0x34, 0x37, 0xca, 0x64, 0x8d, 0xc9, 0x06, 0x90, 0xbb, 0x78, 0x0a, 0x3c, 0x4c, 0xcf, 0x35, 0x7a, 0x0f, 0xf7, 0xa7, 0xf4, 0x2f, 0x45, 0x69, 0x3f, 0xa9, 0x5d, 0xce, 0x7b, 0x8a, 0x84, 0xc3, 0xae, 0xf4, 0xda, 0xd5, + /* (2^371)P */ 0xca, 0xba, 0x95, 0x43, 0x05, 0x7b, 0x06, 0xd9, 0x5c, 0x0a, 0x18, 0x5f, 0x6a, 0x6a, 0xce, 0xc0, 0x3d, 0x95, 0x51, 0x0e, 0x1a, 0xbe, 0x85, 0x7a, 0xf2, 0x69, 0xec, 0xc0, 0x8c, 0xca, 0xa3, 0x32, 0x0a, 0x76, 0x50, 0xc6, 0x76, 0x61, 0x00, 0x89, 0xbf, 0x6e, 0x0f, 0x48, 0x90, 0x31, 0x93, 0xec, 0x34, 0x70, 0xf0, 0xc3, 0x8d, 0xf0, 0x0f, 0xb5, + /* (2^372)P */ 0xbe, 0x23, 0xe2, 0x18, 0x99, 0xf1, 0xed, 0x8a, 0xf6, 0xc9, 0xac, 0xb8, 0x1e, 0x9a, 0x3c, 0x15, 0xae, 0xd7, 0x6d, 0xb3, 0x04, 0xee, 0x5b, 0x0d, 0x1e, 0x79, 0xb7, 0xf9, 0xf9, 0x8d, 0xad, 0xf9, 0x8f, 0x5a, 0x6a, 0x7b, 0xd7, 0x9b, 0xca, 0x62, 0xfe, 0x9c, 0xc0, 0x6f, 0x6d, 0x9d, 0x76, 0xa3, 0x69, 0xb9, 0x4c, 0xa1, 0xc4, 0x0c, 0x76, 0xaa, + /* (2^373)P */ 0x1c, 0x06, 0xfe, 0x3f, 0x45, 0x70, 0xcd, 0x97, 0xa9, 0xa2, 0xb1, 0xd3, 0xf2, 0xa5, 0x0c, 0x49, 0x2c, 0x75, 0x73, 0x1f, 0xcf, 0x00, 0xaf, 0xd5, 0x2e, 0xde, 0x0d, 0x8f, 0x8f, 0x7c, 0xc4, 0x58, 0xce, 0xd4, 0xf6, 0x24, 0x19, 0x2e, 0xd8, 0xc5, 0x1d, 0x1a, 0x3f, 0xb8, 0x4f, 0xbc, 0x7d, 0xbd, 0x68, 0xe3, 0x81, 0x98, 0x1b, 0xa8, 0xc9, 0xd9, + /* (2^374)P */ 0x39, 0x95, 0x78, 0x24, 0x6c, 0x38, 0xe4, 0xe7, 0xd0, 0x8d, 0xb9, 0x38, 0x71, 0x5e, 0xc1, 0x62, 0x80, 0xcc, 0xcb, 0x8c, 0x97, 0xca, 0xf8, 0xb9, 0xd9, 0x9c, 0xce, 0x72, 0x7b, 0x70, 0xee, 0x5f, 0xea, 0xa2, 0xdf, 0xa9, 0x14, 0x10, 0xf9, 0x6e, 0x59, 0x9f, 0x9c, 0xe0, 0x0c, 0xb2, 0x07, 0x97, 0xcd, 0xd2, 0x89, 0x16, 0xfd, 0x9c, 0xa8, 0xa5, + /* (2^375)P */ 0x5a, 0x61, 0xf1, 0x59, 0x7c, 0x38, 0xda, 0xe2, 0x85, 0x99, 0x68, 0xe9, 0xc9, 0xf7, 0x32, 0x7e, 0xc4, 0xca, 0xb7, 0x11, 0x08, 0x69, 0x2b, 0x66, 0x02, 0xf7, 0x2e, 0x18, 0xc3, 0x8e, 0xe1, 0xf9, 0xc5, 0x19, 0x9a, 0x0a, 0x9c, 0x07, 0xba, 0xc7, 0x9c, 0x03, 0x34, 0x89, 0x99, 0x67, 0x0b, 0x16, 0x4b, 0x07, 0x36, 0x16, 0x36, 0x2c, 0xe2, 0xa1, + /* (2^376)P */ 0x70, 0x10, 0x91, 0x27, 0xa8, 0x24, 0x8e, 0x29, 0x04, 0x6f, 0x79, 0x1f, 0xd3, 0xa5, 0x68, 0xd3, 0x0b, 0x7d, 0x56, 0x4d, 0x14, 0x57, 0x7b, 0x2e, 0x00, 0x9f, 0x9a, 0xfd, 0x6c, 0x63, 0x18, 0x81, 0xdb, 0x9d, 0xb7, 0xd7, 0xa4, 0x1e, 0xe8, 0x40, 0xf1, 0x4c, 0xa3, 0x01, 0xd5, 0x4b, 0x75, 0xea, 0xdd, 0x97, 0xfd, 0x5b, 0xb2, 0x66, 0x6a, 0x24, + /* (2^377)P */ 0x72, 0x11, 0xfe, 0x73, 0x1b, 0xd3, 0xea, 0x7f, 0x93, 0x15, 0x15, 0x05, 0xfe, 0x40, 0xe8, 0x28, 0xd8, 0x50, 0x47, 0x66, 0xfa, 0xb7, 0xb5, 0x04, 0xba, 0x35, 0x1e, 0x32, 0x9f, 0x5f, 0x32, 0xba, 0x3d, 0xd1, 0xed, 0x9a, 0x76, 0xca, 0xa3, 0x3e, 0x77, 0xd8, 0xd8, 0x7c, 0x5f, 0x68, 0x42, 0xb5, 0x86, 0x7f, 0x3b, 0xc9, 0xc1, 0x89, 0x64, 0xda, + /* (2^378)P */ 0xd5, 0xd4, 0x17, 0x31, 0xfc, 0x6a, 0xfd, 0xb8, 0xe8, 0xe5, 0x3e, 0x39, 0x06, 0xe4, 0xd1, 0x90, 0x2a, 0xca, 0xf6, 0x54, 0x6c, 0x1b, 0x2f, 0x49, 0x97, 0xb1, 0x2a, 0x82, 0x43, 0x3d, 0x1f, 0x8b, 0xe2, 0x47, 0xc5, 0x24, 0xa8, 0xd5, 0x53, 0x29, 0x7d, 0xc6, 0x87, 0xa6, 0x25, 0x3a, 0x64, 0xdd, 0x71, 0x08, 0x9e, 0xcd, 0xe9, 0x45, 0xc7, 0xba, + /* (2^379)P */ 0x37, 0x72, 0x6d, 0x13, 0x7a, 0x8d, 0x04, 0x31, 0xe6, 0xe3, 0x9e, 0x36, 0x71, 0x3e, 0xc0, 0x1e, 0xe3, 0x71, 0xd3, 0x49, 0x4e, 0x4a, 0x36, 0x42, 0x68, 0x68, 0x61, 0xc7, 0x3c, 0xdb, 0x81, 0x49, 0xf7, 0x91, 0x4d, 0xea, 0x4c, 0x4f, 0x98, 0xc6, 0x7e, 0x60, 0x84, 0x4b, 0x6a, 0x37, 0xbb, 0x52, 0xf7, 0xce, 0x02, 0xe4, 0xad, 0xd1, 0x3c, 0xa7, + /* (2^380)P */ 0x51, 0x06, 0x2d, 0xf8, 0x08, 0xe8, 0xf1, 0x0c, 0xe5, 0xa9, 0xac, 0x29, 0x73, 0x3b, 0xed, 0x98, 0x5f, 0x55, 0x08, 0x38, 0x51, 0x44, 0x36, 0x5d, 0xea, 0xc3, 0xb8, 0x0e, 0xa0, 0x4f, 0xd2, 0x79, 0xe9, 0x98, 0xc3, 0xf5, 0x00, 0xb9, 0x26, 0x27, 0x42, 0xa8, 0x07, 0xc1, 0x12, 0x31, 0xc1, 0xc3, 0x3c, 0x3b, 0x7a, 0x72, 0x97, 0xc2, 0x70, 0x3a, + /* (2^381)P */ 0xf4, 0xb2, 0xba, 0x32, 0xbc, 0xa9, 0x2f, 0x87, 0xc7, 0x3c, 0x45, 0xcd, 0xae, 0xe2, 0x13, 0x6d, 0x3a, 0xf2, 0xf5, 0x66, 0x97, 0x29, 0xaf, 0x53, 0x9f, 0xda, 0xea, 0x14, 0xdf, 0x04, 0x98, 0x19, 0x95, 0x9e, 0x2a, 0x00, 0x5c, 0x9d, 0x1d, 0xf0, 0x39, 0x23, 0xff, 0xfc, 0xca, 0x36, 0xb7, 0xde, 0xdf, 0x37, 0x78, 0x52, 0x21, 0xfa, 0x19, 0x10, + /* (2^382)P */ 0x50, 0x20, 0x73, 0x74, 0x62, 0x21, 0xf2, 0xf7, 0x9b, 0x66, 0x85, 0x34, 0x74, 0xd4, 0x9d, 0x60, 0xd7, 0xbc, 0xc8, 0x46, 0x3b, 0xb8, 0x80, 0x42, 0x15, 0x0a, 0x6c, 0x35, 0x1a, 0x69, 0xf0, 0x1d, 0x4b, 0x29, 0x54, 0x5a, 0x9a, 0x48, 0xec, 0x9f, 0x37, 0x74, 0x91, 0xd0, 0xd1, 0x9e, 0x00, 0xc2, 0x76, 0x56, 0xd6, 0xa0, 0x15, 0x14, 0x83, 0x59, + /* (2^383)P */ 0xc2, 0xf8, 0x22, 0x20, 0x23, 0x07, 0xbd, 0x1d, 0x6f, 0x1e, 0x8c, 0x56, 0x06, 0x6a, 0x4b, 0x9f, 0xe2, 0xa9, 0x92, 0x46, 0x4b, 0x46, 0x59, 0xd7, 0xe1, 0xda, 0x14, 0x98, 0x07, 0x65, 0x7e, 0x28, 0x20, 0xf2, 0x9d, 0x4f, 0x36, 0x5c, 0x92, 0xe0, 0x9d, 0xfe, 0x3e, 0xda, 0xe4, 0x47, 0x19, 0x3c, 0x00, 0x7f, 0x22, 0xf2, 0x9e, 0x51, 0xae, 0x4d, + /* (2^384)P */ 0xbe, 0x8c, 0x1b, 0x10, 0xb6, 0xad, 0xcc, 0xcc, 0xd8, 0x5e, 0x21, 0xa6, 0xfb, 0xf1, 0xf6, 0xbd, 0x0a, 0x24, 0x67, 0xb4, 0x57, 0x7a, 0xbc, 0xe8, 0xe9, 0xff, 0xee, 0x0a, 0x1f, 0xee, 0xbd, 0xc8, 0x44, 0xed, 0x2b, 0xbb, 0x55, 0x1f, 0xdd, 0x7c, 0xb3, 0xeb, 0x3f, 0x63, 0xa1, 0x28, 0x91, 0x21, 0xab, 0x71, 0xc6, 0x4c, 0xd0, 0xe9, 0xb0, 0x21, + /* (2^385)P */ 0xad, 0xc9, 0x77, 0x2b, 0xee, 0x89, 0xa4, 0x7b, 0xfd, 0xf9, 0xf6, 0x14, 0xe4, 0xed, 0x1a, 0x16, 0x9b, 0x78, 0x41, 0x43, 0xa8, 0x83, 0x72, 0x06, 0x2e, 0x7c, 0xdf, 0xeb, 0x7e, 0xdd, 0xd7, 0x8b, 0xea, 0x9a, 0x2b, 0x03, 0xba, 0x57, 0xf3, 0xf1, 0xd9, 0xe5, 0x09, 0xc5, 0x98, 0x61, 0x1c, 0x51, 0x6d, 0x5d, 0x6e, 0xfb, 0x5e, 0x95, 0x9f, 0xb5, + /* (2^386)P */ 0x23, 0xe2, 0x1e, 0x95, 0xa3, 0x5e, 0x42, 0x10, 0xc7, 0xc3, 0x70, 0xbf, 0x4b, 0x6b, 0x83, 0x36, 0x93, 0xb7, 0x68, 0x47, 0x88, 0x3a, 0x10, 0x88, 0x48, 0x7f, 0x8c, 0xae, 0x54, 0x10, 0x02, 0xa4, 0x52, 0x8f, 0x8d, 0xf7, 0x26, 0x4f, 0x50, 0xc3, 0x6a, 0xe2, 0x4e, 0x3b, 0x4c, 0xb9, 0x8a, 0x14, 0x15, 0x6d, 0x21, 0x29, 0xb3, 0x6e, 0x4e, 0xd0, + /* (2^387)P */ 0x4c, 0x8a, 0x18, 0x3f, 0xb7, 0x20, 0xfd, 0x3e, 0x54, 0xca, 0x68, 0x3c, 0xea, 0x6f, 0xf4, 0x6b, 0xa2, 0xbd, 0x01, 0xbd, 0xfe, 0x08, 0xa8, 0xd8, 0xc2, 0x20, 0x36, 0x05, 0xcd, 0xe9, 0xf3, 0x9e, 0xfa, 0x85, 0x66, 0x8f, 0x4b, 0x1d, 0x8c, 0x64, 0x4f, 0xb8, 0xc6, 0x0f, 0x5b, 0x57, 0xd8, 0x24, 0x19, 0x5a, 0x14, 0x4b, 0x92, 0xd3, 0x96, 0xbc, + /* (2^388)P */ 0xa9, 0x3f, 0xc9, 0x6c, 0xca, 0x64, 0x1e, 0x6f, 0xdf, 0x65, 0x7f, 0x9a, 0x47, 0x6b, 0x8a, 0x60, 0x31, 0xa6, 0x06, 0xac, 0x69, 0x30, 0xe6, 0xea, 0x63, 0x42, 0x26, 0x5f, 0xdb, 0xd0, 0xf2, 0x8e, 0x34, 0x0a, 0x3a, 0xeb, 0xf3, 0x79, 0xc8, 0xb7, 0x60, 0x56, 0x5c, 0x37, 0x95, 0x71, 0xf8, 0x7f, 0x49, 0x3e, 0x9e, 0x01, 0x26, 0x1e, 0x80, 0x9f, + /* (2^389)P */ 0xf8, 0x16, 0x9a, 0xaa, 0xb0, 0x28, 0xb5, 0x8e, 0xd0, 0x60, 0xe5, 0x26, 0xa9, 0x47, 0xc4, 0x5c, 0xa9, 0x39, 0xfe, 0x0a, 0xd8, 0x07, 0x2b, 0xb3, 0xce, 0xf1, 0xea, 0x1a, 0xf4, 0x7b, 0x98, 0x31, 0x3d, 0x13, 0x29, 0x80, 0xe8, 0x0d, 0xcf, 0x56, 0x39, 0x86, 0x50, 0x0c, 0xb3, 0x18, 0xf4, 0xc5, 0xca, 0xf2, 0x6f, 0xcd, 0x8d, 0xd5, 0x02, 0xb0, + /* (2^390)P */ 0xbf, 0x39, 0x3f, 0xac, 0x6d, 0x1a, 0x6a, 0xe4, 0x42, 0x24, 0xd6, 0x41, 0x9d, 0xb9, 0x5b, 0x46, 0x73, 0x93, 0x76, 0xaa, 0xb7, 0x37, 0x36, 0xa6, 0x09, 0xe5, 0x04, 0x3b, 0x66, 0xc4, 0x29, 0x3e, 0x41, 0xc2, 0xcb, 0xe5, 0x17, 0xd7, 0x34, 0x67, 0x1d, 0x2c, 0x12, 0xec, 0x24, 0x7a, 0x40, 0xa2, 0x45, 0x41, 0xf0, 0x75, 0xed, 0x43, 0x30, 0xc9, + /* (2^391)P */ 0x80, 0xf6, 0x47, 0x5b, 0xad, 0x54, 0x02, 0xbc, 0xdd, 0xa4, 0xb2, 0xd7, 0x42, 0x95, 0xf2, 0x0d, 0x1b, 0xef, 0x37, 0xa7, 0xb4, 0x34, 0x04, 0x08, 0x71, 0x1b, 0xd3, 0xdf, 0xa1, 0xf0, 0x2b, 0xfa, 0xc0, 0x1f, 0xf3, 0x44, 0xb5, 0xc6, 0x47, 0x3d, 0x65, 0x67, 0x45, 0x4d, 0x2f, 0xde, 0x52, 0x73, 0xfc, 0x30, 0x01, 0x6b, 0xc1, 0x03, 0xd8, 0xd7, + /* (2^392)P */ 0x1c, 0x67, 0x55, 0x3e, 0x01, 0x17, 0x0f, 0x3e, 0xe5, 0x34, 0x58, 0xfc, 0xcb, 0x71, 0x24, 0x74, 0x5d, 0x36, 0x1e, 0x89, 0x2a, 0x63, 0xf8, 0xf8, 0x9f, 0x50, 0x9f, 0x32, 0x92, 0x29, 0xd8, 0x1a, 0xec, 0x76, 0x57, 0x6c, 0x67, 0x12, 0x6a, 0x6e, 0xef, 0x97, 0x1f, 0xc3, 0x77, 0x60, 0x3c, 0x22, 0xcb, 0xc7, 0x04, 0x1a, 0x89, 0x2d, 0x10, 0xa6, + /* (2^393)P */ 0x12, 0xf5, 0xa9, 0x26, 0x16, 0xd9, 0x3c, 0x65, 0x5d, 0x83, 0xab, 0xd1, 0x70, 0x6b, 0x1c, 0xdb, 0xe7, 0x86, 0x0d, 0xfb, 0xe7, 0xf8, 0x2a, 0x58, 0x6e, 0x7a, 0x66, 0x13, 0x53, 0x3a, 0x6f, 0x8d, 0x43, 0x5f, 0x14, 0x23, 0x14, 0xff, 0x3d, 0x52, 0x7f, 0xee, 0xbd, 0x7a, 0x34, 0x8b, 0x35, 0x24, 0xc3, 0x7a, 0xdb, 0xcf, 0x22, 0x74, 0x9a, 0x8f, + /* (2^394)P */ 0xdb, 0x20, 0xfc, 0xe5, 0x39, 0x4e, 0x7d, 0x78, 0xee, 0x0b, 0xbf, 0x1d, 0x80, 0xd4, 0x05, 0x4f, 0xb9, 0xd7, 0x4e, 0x94, 0x88, 0x9a, 0x50, 0x78, 0x1a, 0x70, 0x8c, 0xcc, 0x25, 0xb6, 0x61, 0x09, 0xdc, 0x7b, 0xea, 0x3f, 0x7f, 0xea, 0x2a, 0x0d, 0x47, 0x1c, 0x8e, 0xa6, 0x5b, 0xd2, 0xa3, 0x61, 0x93, 0x3c, 0x68, 0x9f, 0x8b, 0xea, 0xb0, 0xcb, + /* (2^395)P */ 0xff, 0x54, 0x02, 0x19, 0xae, 0x8b, 0x4c, 0x2c, 0x3a, 0xe0, 0xe4, 0xac, 0x87, 0xf7, 0x51, 0x45, 0x41, 0x43, 0xdc, 0xaa, 0xcd, 0xcb, 0xdc, 0x40, 0xe3, 0x44, 0x3b, 0x1d, 0x9e, 0x3d, 0xb9, 0x82, 0xcc, 0x7a, 0xc5, 0x12, 0xf8, 0x1e, 0xdd, 0xdb, 0x8d, 0xb0, 0x2a, 0xe8, 0xe6, 0x6c, 0x94, 0x3b, 0xb7, 0x2d, 0xba, 0x79, 0x3b, 0xb5, 0x86, 0xfb, + /* (2^396)P */ 0x82, 0x88, 0x13, 0xdd, 0x6c, 0xcd, 0x85, 0x2b, 0x90, 0x86, 0xb7, 0xac, 0x16, 0xa6, 0x6e, 0x6a, 0x94, 0xd8, 0x1e, 0x4e, 0x41, 0x0f, 0xce, 0x81, 0x6a, 0xa8, 0x26, 0x56, 0x43, 0x52, 0x52, 0xe6, 0xff, 0x88, 0xcf, 0x47, 0x05, 0x1d, 0xff, 0xf3, 0xa0, 0x10, 0xb2, 0x97, 0x87, 0xeb, 0x47, 0xbb, 0xfa, 0x1f, 0xe8, 0x4c, 0xce, 0xc4, 0xcd, 0x93, + /* (2^397)P */ 0xf4, 0x11, 0xf5, 0x8d, 0x89, 0x29, 0x79, 0xb3, 0x59, 0x0b, 0x29, 0x7d, 0x9c, 0x12, 0x4a, 0x65, 0x72, 0x3a, 0xf9, 0xec, 0x37, 0x18, 0x86, 0xef, 0x44, 0x07, 0x25, 0x74, 0x76, 0x53, 0xed, 0x51, 0x01, 0xc6, 0x28, 0xc5, 0xc3, 0x4a, 0x0f, 0x99, 0xec, 0xc8, 0x40, 0x5a, 0x83, 0x30, 0x79, 0xa2, 0x3e, 0x63, 0x09, 0x2d, 0x6f, 0x23, 0x54, 0x1c, + /* (2^398)P */ 0x5c, 0x6f, 0x3b, 0x1c, 0x30, 0x77, 0x7e, 0x87, 0x66, 0x83, 0x2e, 0x7e, 0x85, 0x50, 0xfd, 0xa0, 0x7a, 0xc2, 0xf5, 0x0f, 0xc1, 0x64, 0xe7, 0x0b, 0xbd, 0x59, 0xa7, 0xe7, 0x65, 0x53, 0xc3, 0xf5, 0x55, 0x5b, 0xe1, 0x82, 0x30, 0x5a, 0x61, 0xcd, 0xa0, 0x89, 0x32, 0xdb, 0x87, 0xfc, 0x21, 0x8a, 0xab, 0x6d, 0x82, 0xa8, 0x42, 0x81, 0x4f, 0xf2, + /* (2^399)P */ 0xb3, 0xeb, 0x88, 0x18, 0xf6, 0x56, 0x96, 0xbf, 0xba, 0x5d, 0x71, 0xa1, 0x5a, 0xd1, 0x04, 0x7b, 0xd5, 0x46, 0x01, 0x74, 0xfe, 0x15, 0x25, 0xb7, 0xff, 0x0c, 0x24, 0x47, 0xac, 0xfd, 0xab, 0x47, 0x32, 0xe1, 0x6a, 0x4e, 0xca, 0xcf, 0x7f, 0xdd, 0xf8, 0xd2, 0x4b, 0x3b, 0xf5, 0x17, 0xba, 0xba, 0x8b, 0xa1, 0xec, 0x28, 0x3f, 0x97, 0xab, 0x2a, + /* (2^400)P */ 0x51, 0x38, 0xc9, 0x5e, 0xc6, 0xb3, 0x64, 0xf2, 0x24, 0x4d, 0x04, 0x7d, 0xc8, 0x39, 0x0c, 0x4a, 0xc9, 0x73, 0x74, 0x1b, 0x5c, 0xb2, 0xc5, 0x41, 0x62, 0xa0, 0x4c, 0x6d, 0x8d, 0x91, 0x9a, 0x7b, 0x88, 0xab, 0x9c, 0x7e, 0x23, 0xdb, 0x6f, 0xb5, 0x72, 0xd6, 0x47, 0x40, 0xef, 0x22, 0x58, 0x62, 0x19, 0x6c, 0x38, 0xba, 0x5b, 0x00, 0x30, 0x9f, + /* (2^401)P */ 0x65, 0xbb, 0x3b, 0x9b, 0xe9, 0xae, 0xbf, 0xbe, 0xe4, 0x13, 0x95, 0xf3, 0xe3, 0x77, 0xcb, 0xe4, 0x9a, 0x22, 0xb5, 0x4a, 0x08, 0x9d, 0xb3, 0x9e, 0x27, 0xe0, 0x15, 0x6c, 0x9f, 0x7e, 0x9a, 0x5e, 0x15, 0x45, 0x25, 0x8d, 0x01, 0x0a, 0xd2, 0x2b, 0xbd, 0x48, 0x06, 0x0d, 0x18, 0x97, 0x4b, 0xdc, 0xbc, 0xf0, 0xcd, 0xb2, 0x52, 0x3c, 0xac, 0xf5, + /* (2^402)P */ 0x3e, 0xed, 0x47, 0x6b, 0x5c, 0xf6, 0x76, 0xd0, 0xe9, 0x15, 0xa3, 0xcb, 0x36, 0x00, 0x21, 0xa3, 0x79, 0x20, 0xa5, 0x3e, 0x88, 0x03, 0xcb, 0x7e, 0x63, 0xbb, 0xed, 0xa9, 0x13, 0x35, 0x16, 0xaf, 0x2e, 0xb4, 0x70, 0x14, 0x93, 0xfb, 0xc4, 0x9b, 0xd8, 0xb1, 0xbe, 0x43, 0xd1, 0x85, 0xb8, 0x97, 0xef, 0xea, 0x88, 0xa1, 0x25, 0x52, 0x62, 0x75, + /* (2^403)P */ 0x8e, 0x4f, 0xaa, 0x23, 0x62, 0x7e, 0x2b, 0x37, 0x89, 0x00, 0x11, 0x30, 0xc5, 0x33, 0x4a, 0x89, 0x8a, 0xe2, 0xfc, 0x5c, 0x6a, 0x75, 0xe5, 0xf7, 0x02, 0x4a, 0x9b, 0xf7, 0xb5, 0x6a, 0x85, 0x31, 0xd3, 0x5a, 0xcf, 0xc3, 0xf8, 0xde, 0x2f, 0xcf, 0xb5, 0x24, 0xf4, 0xe3, 0xa1, 0xad, 0x42, 0xae, 0x09, 0xb9, 0x2e, 0x04, 0x2d, 0x01, 0x22, 0x3f, + /* (2^404)P */ 0x41, 0x16, 0xfb, 0x7d, 0x50, 0xfd, 0xb5, 0xba, 0x88, 0x24, 0xba, 0xfd, 0x3d, 0xb2, 0x90, 0x15, 0xb7, 0xfa, 0xa2, 0xe1, 0x4c, 0x7d, 0xb9, 0xc6, 0xff, 0x81, 0x57, 0xb6, 0xc2, 0x9e, 0xcb, 0xc4, 0x35, 0xbd, 0x01, 0xb7, 0xaa, 0xce, 0xd0, 0xe9, 0xb5, 0xd6, 0x72, 0xbf, 0xd2, 0xee, 0xc7, 0xac, 0x94, 0xff, 0x29, 0x57, 0x02, 0x49, 0x09, 0xad, + /* (2^405)P */ 0x27, 0xa5, 0x78, 0x1b, 0xbf, 0x6b, 0xaf, 0x0b, 0x8c, 0xd9, 0xa8, 0x37, 0xb0, 0x67, 0x18, 0xb6, 0xc7, 0x05, 0x8a, 0x67, 0x03, 0x30, 0x62, 0x6e, 0x56, 0x82, 0xa9, 0x54, 0x3e, 0x0c, 0x4e, 0x07, 0xe1, 0x5a, 0x38, 0xed, 0xfa, 0xc8, 0x55, 0x6b, 0x08, 0xa3, 0x6b, 0x64, 0x2a, 0x15, 0xd6, 0x39, 0x6f, 0x47, 0x99, 0x42, 0x3f, 0x33, 0x84, 0x8f, + /* (2^406)P */ 0xbc, 0x45, 0x29, 0x81, 0x0e, 0xa4, 0xc5, 0x72, 0x3a, 0x10, 0xe1, 0xc4, 0x1e, 0xda, 0xc3, 0xfe, 0xb0, 0xce, 0xd2, 0x13, 0x34, 0x67, 0x21, 0xc6, 0x7e, 0xf9, 0x8c, 0xff, 0x39, 0x50, 0xae, 0x92, 0x60, 0x35, 0x2f, 0x8b, 0x6e, 0xc9, 0xc1, 0x27, 0x3a, 0x94, 0x66, 0x3e, 0x26, 0x84, 0x93, 0xc8, 0x6c, 0xcf, 0xd2, 0x03, 0xa1, 0x10, 0xcf, 0xb7, + /* (2^407)P */ 0x64, 0xda, 0x19, 0xf6, 0xc5, 0x73, 0x17, 0x44, 0x88, 0x81, 0x07, 0x0d, 0x34, 0xb2, 0x75, 0xf9, 0xd9, 0xe2, 0xe0, 0x8b, 0x71, 0xcf, 0x72, 0x34, 0x83, 0xb4, 0xce, 0xfc, 0xd7, 0x29, 0x09, 0x5a, 0x98, 0xbf, 0x14, 0xac, 0x77, 0x55, 0x38, 0x47, 0x5b, 0x0f, 0x40, 0x24, 0xe5, 0xa5, 0xa6, 0xac, 0x2d, 0xa6, 0xff, 0x9c, 0x73, 0xfe, 0x5c, 0x7e, + /* (2^408)P */ 0x1e, 0x33, 0xcc, 0x68, 0xb2, 0xbc, 0x8c, 0x93, 0xaf, 0xcc, 0x38, 0xf8, 0xd9, 0x16, 0x72, 0x50, 0xac, 0xd9, 0xb5, 0x0b, 0x9a, 0xbe, 0x46, 0x7a, 0xf1, 0xee, 0xf1, 0xad, 0xec, 0x5b, 0x59, 0x27, 0x9c, 0x05, 0xa3, 0x87, 0xe0, 0x37, 0x2c, 0x83, 0xce, 0xb3, 0x65, 0x09, 0x8e, 0xc3, 0x9c, 0xbf, 0x6a, 0xa2, 0x00, 0xcc, 0x12, 0x36, 0xc5, 0x95, + /* (2^409)P */ 0x36, 0x11, 0x02, 0x14, 0x9c, 0x3c, 0xeb, 0x2f, 0x23, 0x5b, 0x6b, 0x2b, 0x08, 0x54, 0x53, 0xac, 0xb2, 0xa3, 0xe0, 0x26, 0x62, 0x3c, 0xe4, 0xe1, 0x81, 0xee, 0x13, 0x3e, 0xa4, 0x97, 0xef, 0xf9, 0x92, 0x27, 0x01, 0xce, 0x54, 0x8b, 0x3e, 0x31, 0xbe, 0xa7, 0x88, 0xcf, 0x47, 0x99, 0x3c, 0x10, 0x6f, 0x60, 0xb3, 0x06, 0x4e, 0xee, 0x1b, 0xf0, + /* (2^410)P */ 0x59, 0x49, 0x66, 0xcf, 0x22, 0xe6, 0xf6, 0x73, 0xfe, 0xa3, 0x1c, 0x09, 0xfa, 0x5f, 0x65, 0xa8, 0xf0, 0x82, 0xc2, 0xef, 0x16, 0x63, 0x6e, 0x79, 0x69, 0x51, 0x39, 0x07, 0x65, 0xc4, 0x81, 0xec, 0x73, 0x0f, 0x15, 0x93, 0xe1, 0x30, 0x33, 0xe9, 0x37, 0x86, 0x42, 0x4c, 0x1f, 0x9b, 0xad, 0xee, 0x3f, 0xf1, 0x2a, 0x8e, 0x6a, 0xa3, 0xc8, 0x35, + /* (2^411)P */ 0x1e, 0x49, 0xf1, 0xdd, 0xd2, 0x9c, 0x8e, 0x78, 0xb2, 0x06, 0xe4, 0x6a, 0xab, 0x3a, 0xdc, 0xcd, 0xf4, 0xeb, 0xe1, 0xe7, 0x2f, 0xaa, 0xeb, 0x40, 0x31, 0x9f, 0xb9, 0xab, 0x13, 0xa9, 0x78, 0xbf, 0x38, 0x89, 0x0e, 0x85, 0x14, 0x8b, 0x46, 0x76, 0x14, 0xda, 0xcf, 0x33, 0xc8, 0x79, 0xd3, 0xd5, 0xa3, 0x6a, 0x69, 0x45, 0x70, 0x34, 0xc3, 0xe9, + /* (2^412)P */ 0x5e, 0xe7, 0x78, 0xe9, 0x24, 0xcc, 0xe9, 0xf4, 0xc8, 0x6b, 0xe0, 0xfb, 0x3a, 0xbe, 0xcc, 0x42, 0x4a, 0x00, 0x22, 0xf8, 0xe6, 0x32, 0xbe, 0x6d, 0x18, 0x55, 0x60, 0xe9, 0x72, 0x69, 0x50, 0x56, 0xca, 0x04, 0x18, 0x38, 0xa1, 0xee, 0xd8, 0x38, 0x3c, 0xa7, 0x70, 0xe2, 0xb9, 0x4c, 0xa0, 0xc8, 0x89, 0x72, 0xcf, 0x49, 0x7f, 0xdf, 0xbc, 0x67, + /* (2^413)P */ 0x1d, 0x17, 0xcb, 0x0b, 0xbd, 0xb2, 0x36, 0xe3, 0xa8, 0x99, 0x31, 0xb6, 0x26, 0x9c, 0x0c, 0x74, 0xaf, 0x4d, 0x24, 0x61, 0xcf, 0x31, 0x7b, 0xed, 0xdd, 0xc3, 0xf6, 0x32, 0x70, 0xfe, 0x17, 0xf6, 0x51, 0x37, 0x65, 0xce, 0x5d, 0xaf, 0xa5, 0x2f, 0x2a, 0xfe, 0x00, 0x71, 0x7c, 0x50, 0xbe, 0x21, 0xc7, 0xed, 0xc6, 0xfc, 0x67, 0xcf, 0x9c, 0xdd, + /* (2^414)P */ 0x26, 0x3e, 0xf8, 0xbb, 0xd0, 0xb1, 0x01, 0xd8, 0xeb, 0x0b, 0x62, 0x87, 0x35, 0x4c, 0xde, 0xca, 0x99, 0x9c, 0x6d, 0xf7, 0xb6, 0xf0, 0x57, 0x0a, 0x52, 0x29, 0x6a, 0x3f, 0x26, 0x31, 0x04, 0x07, 0x2a, 0xc9, 0xfa, 0x9b, 0x0e, 0x62, 0x8e, 0x72, 0xf2, 0xad, 0xce, 0xb6, 0x35, 0x7a, 0xc1, 0xae, 0x35, 0xc7, 0xa3, 0x14, 0xcf, 0x0c, 0x28, 0xb7, + /* (2^415)P */ 0xa6, 0xf1, 0x32, 0x3a, 0x20, 0xd2, 0x24, 0x97, 0xcf, 0x5d, 0x37, 0x99, 0xaf, 0x33, 0x7a, 0x5b, 0x7a, 0xcc, 0x4e, 0x41, 0x38, 0xb1, 0x4e, 0xad, 0xc9, 0xd9, 0x71, 0x7e, 0xb2, 0xf5, 0xd5, 0x01, 0x6c, 0x4d, 0xfd, 0xa1, 0xda, 0x03, 0x38, 0x9b, 0x3d, 0x92, 0x92, 0xf2, 0xca, 0xbf, 0x1f, 0x24, 0xa4, 0xbb, 0x30, 0x6a, 0x74, 0x56, 0xc8, 0xce, + /* (2^416)P */ 0x27, 0xf4, 0xed, 0xc9, 0xc3, 0xb1, 0x79, 0x85, 0xbe, 0xf6, 0xeb, 0xf3, 0x55, 0xc7, 0xaa, 0xa6, 0xe9, 0x07, 0x5d, 0xf4, 0xeb, 0xa6, 0x81, 0xe3, 0x0e, 0xcf, 0xa3, 0xc1, 0xef, 0xe7, 0x34, 0xb2, 0x03, 0x73, 0x8a, 0x91, 0xf1, 0xad, 0x05, 0xc7, 0x0b, 0x43, 0x99, 0x12, 0x31, 0xc8, 0xc7, 0xc5, 0xa4, 0x3d, 0xcd, 0xe5, 0x4e, 0x6d, 0x24, 0xdd, + /* (2^417)P */ 0x61, 0x54, 0xd0, 0x95, 0x2c, 0x45, 0x75, 0xac, 0xb5, 0x1a, 0x9d, 0x11, 0xeb, 0xed, 0x6b, 0x57, 0xa3, 0xe6, 0xcd, 0x77, 0xd4, 0x83, 0x8e, 0x39, 0xf1, 0x0f, 0x98, 0xcb, 0x40, 0x02, 0x6e, 0x10, 0x82, 0x9e, 0xb4, 0x93, 0x76, 0xd7, 0x97, 0xa3, 0x53, 0x12, 0x86, 0xc6, 0x15, 0x78, 0x73, 0x93, 0xe7, 0x7f, 0xcf, 0x1f, 0xbf, 0xcd, 0xd2, 0x7a, + /* (2^418)P */ 0xc2, 0x21, 0xdc, 0xd5, 0x69, 0xff, 0xca, 0x49, 0x3a, 0xe1, 0xc3, 0x69, 0x41, 0x56, 0xc1, 0x76, 0x63, 0x24, 0xbd, 0x64, 0x1b, 0x3d, 0x92, 0xf9, 0x13, 0x04, 0x25, 0xeb, 0x27, 0xa6, 0xef, 0x39, 0x3a, 0x80, 0xe0, 0xf8, 0x27, 0xee, 0xc9, 0x49, 0x77, 0xef, 0x3f, 0x29, 0x3d, 0x5e, 0xe6, 0x66, 0x83, 0xd1, 0xf6, 0xfe, 0x9d, 0xbc, 0xf1, 0x96, + /* (2^419)P */ 0x6b, 0xc6, 0x99, 0x26, 0x3c, 0xf3, 0x63, 0xf9, 0xc7, 0x29, 0x8c, 0x52, 0x62, 0x2d, 0xdc, 0x8a, 0x66, 0xce, 0x2c, 0xa7, 0xe4, 0xf0, 0xd7, 0x37, 0x17, 0x1e, 0xe4, 0xa3, 0x53, 0x7b, 0x29, 0x8e, 0x60, 0x99, 0xf9, 0x0c, 0x7c, 0x6f, 0xa2, 0xcc, 0x9f, 0x80, 0xdd, 0x5e, 0x46, 0xaa, 0x0d, 0x6c, 0xc9, 0x6c, 0xf7, 0x78, 0x5b, 0x38, 0xe3, 0x24, + /* (2^420)P */ 0x4b, 0x75, 0x6a, 0x2f, 0x08, 0xe1, 0x72, 0x76, 0xab, 0x82, 0x96, 0xdf, 0x3b, 0x1f, 0x9b, 0xd8, 0xed, 0xdb, 0xcd, 0x15, 0x09, 0x5a, 0x1e, 0xb7, 0xc5, 0x26, 0x72, 0x07, 0x0c, 0x50, 0xcd, 0x3b, 0x4d, 0x3f, 0xa2, 0x67, 0xc2, 0x02, 0x61, 0x2e, 0x68, 0xe9, 0x6f, 0xf0, 0x21, 0x2a, 0xa7, 0x3b, 0x88, 0x04, 0x11, 0x64, 0x49, 0x0d, 0xb4, 0x46, + /* (2^421)P */ 0x63, 0x85, 0xf3, 0xc5, 0x2b, 0x5a, 0x9f, 0xf0, 0x17, 0xcb, 0x45, 0x0a, 0xf3, 0x6e, 0x7e, 0xb0, 0x7c, 0xbc, 0xf0, 0x4f, 0x3a, 0xb0, 0xbc, 0x36, 0x36, 0x52, 0x51, 0xcb, 0xfe, 0x9a, 0xcb, 0xe8, 0x7e, 0x4b, 0x06, 0x7f, 0xaa, 0x35, 0xc8, 0x0e, 0x7a, 0x30, 0xa3, 0xb1, 0x09, 0xbb, 0x86, 0x4c, 0xbe, 0xb8, 0xbd, 0xe0, 0x32, 0xa5, 0xd4, 0xf7, + /* (2^422)P */ 0x7d, 0x50, 0x37, 0x68, 0x4e, 0x22, 0xb2, 0x2c, 0xd5, 0x0f, 0x2b, 0x6d, 0xb1, 0x51, 0xf2, 0x82, 0xe9, 0x98, 0x7c, 0x50, 0xc7, 0x96, 0x7e, 0x0e, 0xdc, 0xb1, 0x0e, 0xb2, 0x63, 0x8c, 0x30, 0x37, 0x72, 0x21, 0x9c, 0x61, 0xc2, 0xa7, 0x33, 0xd9, 0xb2, 0x63, 0x93, 0xd1, 0x6b, 0x6a, 0x73, 0xa5, 0x58, 0x80, 0xff, 0x04, 0xc7, 0x83, 0x21, 0x29, + /* (2^423)P */ 0x29, 0x04, 0xbc, 0x99, 0x39, 0xc9, 0x58, 0xc9, 0x6b, 0x17, 0xe8, 0x90, 0xb3, 0xe6, 0xa9, 0xb6, 0x28, 0x9b, 0xcb, 0x3b, 0x28, 0x90, 0x68, 0x71, 0xff, 0xcf, 0x08, 0x78, 0xc9, 0x8d, 0xa8, 0x4e, 0x43, 0xd1, 0x1c, 0x9e, 0xa4, 0xe3, 0xdf, 0xbf, 0x92, 0xf4, 0xf9, 0x41, 0xba, 0x4d, 0x1c, 0xf9, 0xdd, 0x74, 0x76, 0x1c, 0x6e, 0x3e, 0x94, 0x87, + /* (2^424)P */ 0xe4, 0xda, 0xc5, 0xd7, 0xfb, 0x87, 0xc5, 0x4d, 0x6b, 0x19, 0xaa, 0xb9, 0xbc, 0x8c, 0xf2, 0x8a, 0xd8, 0x5d, 0xdb, 0x4d, 0xef, 0xa6, 0xf2, 0x65, 0xf1, 0x22, 0x9c, 0xf1, 0x46, 0x30, 0x71, 0x7c, 0xe4, 0x53, 0x8e, 0x55, 0x2e, 0x9c, 0x9a, 0x31, 0x2a, 0xc3, 0xab, 0x0f, 0xde, 0xe4, 0xbe, 0xd8, 0x96, 0x50, 0x6e, 0x0c, 0x54, 0x49, 0xe6, 0xec, + /* (2^425)P */ 0x3c, 0x1d, 0x5a, 0xa5, 0xda, 0xad, 0xdd, 0xc2, 0xae, 0xac, 0x6f, 0x86, 0x75, 0x31, 0x91, 0x64, 0x45, 0x9d, 0xa4, 0xf0, 0x81, 0xf1, 0x0e, 0xba, 0x74, 0xaf, 0x7b, 0xcd, 0x6f, 0xfe, 0xac, 0x4e, 0xdb, 0x4e, 0x45, 0x35, 0x36, 0xc5, 0xc0, 0x6c, 0x3d, 0x64, 0xf4, 0xd8, 0x07, 0x62, 0xd1, 0xec, 0xf3, 0xfc, 0x93, 0xc9, 0x28, 0x0c, 0x2c, 0xf3, + /* (2^426)P */ 0x0c, 0x69, 0x2b, 0x5c, 0xb6, 0x41, 0x69, 0xf1, 0xa4, 0xf1, 0x5b, 0x75, 0x4c, 0x42, 0x8b, 0x47, 0xeb, 0x69, 0xfb, 0xa8, 0xe6, 0xf9, 0x7b, 0x48, 0x50, 0xaf, 0xd3, 0xda, 0xb2, 0x35, 0x10, 0xb5, 0x5b, 0x40, 0x90, 0x39, 0xc9, 0x07, 0x06, 0x73, 0x26, 0x20, 0x95, 0x01, 0xa4, 0x2d, 0xf0, 0xe7, 0x2e, 0x00, 0x7d, 0x41, 0x09, 0x68, 0x13, 0xc4, + /* (2^427)P */ 0xbe, 0x38, 0x78, 0xcf, 0xc9, 0x4f, 0x36, 0xca, 0x09, 0x61, 0x31, 0x3c, 0x57, 0x2e, 0xec, 0x17, 0xa4, 0x7d, 0x19, 0x2b, 0x9b, 0x5b, 0xbe, 0x8f, 0xd6, 0xc5, 0x2f, 0x86, 0xf2, 0x64, 0x76, 0x17, 0x00, 0x6e, 0x1a, 0x8c, 0x67, 0x1b, 0x68, 0xeb, 0x15, 0xa2, 0xd6, 0x09, 0x91, 0xdd, 0x23, 0x0d, 0x98, 0xb2, 0x10, 0x19, 0x55, 0x9b, 0x63, 0xf2, + /* (2^428)P */ 0x51, 0x1f, 0x93, 0xea, 0x2a, 0x3a, 0xfa, 0x41, 0xc0, 0x57, 0xfb, 0x74, 0xa6, 0x65, 0x09, 0x56, 0x14, 0xb6, 0x12, 0xaa, 0xb3, 0x1a, 0x8d, 0x3b, 0x76, 0x91, 0x7a, 0x23, 0x56, 0x9c, 0x6a, 0xc0, 0xe0, 0x3c, 0x3f, 0xb5, 0x1a, 0xf4, 0x57, 0x71, 0x93, 0x2b, 0xb1, 0xa7, 0x70, 0x57, 0x22, 0x80, 0xf5, 0xb8, 0x07, 0x77, 0x87, 0x0c, 0xbe, 0x83, + /* (2^429)P */ 0x07, 0x9b, 0x0e, 0x52, 0x38, 0x63, 0x13, 0x86, 0x6a, 0xa6, 0xb4, 0xd2, 0x60, 0x68, 0x9a, 0x99, 0x82, 0x0a, 0x04, 0x5f, 0x89, 0x7a, 0x1a, 0x2a, 0xae, 0x2d, 0x35, 0x0c, 0x1e, 0xad, 0xef, 0x4f, 0x9a, 0xfc, 0xc8, 0xd9, 0xcf, 0x9d, 0x48, 0x71, 0xa5, 0x55, 0x79, 0x73, 0x39, 0x1b, 0xd8, 0x73, 0xec, 0x9b, 0x03, 0x16, 0xd8, 0x82, 0xf7, 0x67, + /* (2^430)P */ 0x52, 0x67, 0x42, 0x21, 0xc9, 0x40, 0x78, 0x82, 0x2b, 0x95, 0x2d, 0x20, 0x92, 0xd1, 0xe2, 0x61, 0x25, 0xb0, 0xc6, 0x9c, 0x20, 0x59, 0x8e, 0x28, 0x6f, 0xf3, 0xfd, 0xd3, 0xc1, 0x32, 0x43, 0xc9, 0xa6, 0x08, 0x7a, 0x77, 0x9c, 0x4c, 0x8c, 0x33, 0x71, 0x13, 0x69, 0xe3, 0x52, 0x30, 0xa7, 0xf5, 0x07, 0x67, 0xac, 0xad, 0x46, 0x8a, 0x26, 0x25, + /* (2^431)P */ 0xda, 0x86, 0xc4, 0xa2, 0x71, 0x56, 0xdd, 0xd2, 0x48, 0xd3, 0xde, 0x42, 0x63, 0x01, 0xa7, 0x2c, 0x92, 0x83, 0x6f, 0x2e, 0xd8, 0x1e, 0x3f, 0xc1, 0xc5, 0x42, 0x4e, 0x34, 0x19, 0x54, 0x6e, 0x35, 0x2c, 0x51, 0x2e, 0xfd, 0x0f, 0x9a, 0x45, 0x66, 0x5e, 0x4a, 0x83, 0xda, 0x0a, 0x53, 0x68, 0x63, 0xfa, 0xce, 0x47, 0x20, 0xd3, 0x34, 0xba, 0x0d, + /* (2^432)P */ 0xd0, 0xe9, 0x64, 0xa4, 0x61, 0x4b, 0x86, 0xe5, 0x93, 0x6f, 0xda, 0x0e, 0x31, 0x7e, 0x6e, 0xe3, 0xc6, 0x73, 0xd8, 0xa3, 0x08, 0x57, 0x52, 0xcd, 0x51, 0x63, 0x1d, 0x9f, 0x93, 0x00, 0x62, 0x91, 0x26, 0x21, 0xa7, 0xdd, 0x25, 0x0f, 0x09, 0x0d, 0x35, 0xad, 0xcf, 0x11, 0x8e, 0x6e, 0xe8, 0xae, 0x1d, 0x95, 0xcb, 0x88, 0xf8, 0x70, 0x7b, 0x91, + /* (2^433)P */ 0x0c, 0x19, 0x5c, 0xd9, 0x8d, 0xda, 0x9d, 0x2c, 0x90, 0x54, 0x65, 0xe8, 0xb6, 0x35, 0x50, 0xae, 0xea, 0xae, 0x43, 0xb7, 0x1e, 0x99, 0x8b, 0x4c, 0x36, 0x4e, 0xe4, 0x1e, 0xc4, 0x64, 0x43, 0xb6, 0xeb, 0xd4, 0xe9, 0x60, 0x22, 0xee, 0xcf, 0xb8, 0x52, 0x1b, 0xf0, 0x04, 0xce, 0xbc, 0x2b, 0xf0, 0xbe, 0xcd, 0x44, 0x74, 0x1e, 0x1f, 0x63, 0xf9, + /* (2^434)P */ 0xe1, 0x3f, 0x95, 0x94, 0xb2, 0xb6, 0x31, 0xa9, 0x1b, 0xdb, 0xfd, 0x0e, 0xdb, 0xdd, 0x1a, 0x22, 0x78, 0x60, 0x9f, 0x75, 0x5f, 0x93, 0x06, 0x0c, 0xd8, 0xbb, 0xa2, 0x85, 0x2b, 0x5e, 0xc0, 0x9b, 0xa8, 0x5d, 0xaf, 0x93, 0x91, 0x91, 0x47, 0x41, 0x1a, 0xfc, 0xb4, 0x51, 0x85, 0xad, 0x69, 0x4d, 0x73, 0x69, 0xd5, 0x4e, 0x82, 0xfb, 0x66, 0xcb, + /* (2^435)P */ 0x7c, 0xbe, 0xc7, 0x51, 0xc4, 0x74, 0x6e, 0xab, 0xfd, 0x41, 0x4f, 0x76, 0x4f, 0x24, 0x03, 0xd6, 0x2a, 0xb7, 0x42, 0xb4, 0xda, 0x41, 0x2c, 0x82, 0x48, 0x4c, 0x7f, 0x6f, 0x25, 0x5d, 0x36, 0xd4, 0x69, 0xf5, 0xef, 0x02, 0x81, 0xea, 0x6f, 0x19, 0x69, 0xe8, 0x6f, 0x5b, 0x2f, 0x14, 0x0e, 0x6f, 0x89, 0xb4, 0xb5, 0xd8, 0xae, 0xef, 0x7b, 0x87, + /* (2^436)P */ 0xe9, 0x91, 0xa0, 0x8b, 0xc9, 0xe0, 0x01, 0x90, 0x37, 0xc1, 0x6f, 0xdc, 0x5e, 0xf7, 0xbf, 0x43, 0x00, 0xaa, 0x10, 0x76, 0x76, 0x18, 0x6e, 0x19, 0x1e, 0x94, 0x50, 0x11, 0x0a, 0xd1, 0xe2, 0xdb, 0x08, 0x21, 0xa0, 0x1f, 0xdb, 0x54, 0xfe, 0xea, 0x6e, 0xa3, 0x68, 0x56, 0x87, 0x0b, 0x22, 0x4e, 0x66, 0xf3, 0x82, 0x82, 0x00, 0xcd, 0xd4, 0x12, + /* (2^437)P */ 0x25, 0x8e, 0x24, 0x77, 0x64, 0x4c, 0xe0, 0xf8, 0x18, 0xc0, 0xdc, 0xc7, 0x1b, 0x35, 0x65, 0xde, 0x67, 0x41, 0x5e, 0x6f, 0x90, 0x82, 0xa7, 0x2e, 0x6d, 0xf1, 0x47, 0xb4, 0x92, 0x9c, 0xfd, 0x6a, 0x9a, 0x41, 0x36, 0x20, 0x24, 0x58, 0xc3, 0x59, 0x07, 0x9a, 0xfa, 0x9f, 0x03, 0xcb, 0xc7, 0x69, 0x37, 0x60, 0xe1, 0xab, 0x13, 0x72, 0xee, 0xa2, + /* (2^438)P */ 0x74, 0x78, 0xfb, 0x13, 0xcb, 0x8e, 0x37, 0x1a, 0xf6, 0x1d, 0x17, 0x83, 0x06, 0xd4, 0x27, 0x06, 0x21, 0xe8, 0xda, 0xdf, 0x6b, 0xf3, 0x83, 0x6b, 0x34, 0x8a, 0x8c, 0xee, 0x01, 0x05, 0x5b, 0xed, 0xd3, 0x1b, 0xc9, 0x64, 0x83, 0xc9, 0x49, 0xc2, 0x57, 0x1b, 0xdd, 0xcf, 0xf1, 0x9d, 0x63, 0xee, 0x1c, 0x0d, 0xa0, 0x0a, 0x73, 0x1f, 0x5b, 0x32, + /* (2^439)P */ 0x29, 0xce, 0x1e, 0xc0, 0x6a, 0xf5, 0xeb, 0x99, 0x5a, 0x39, 0x23, 0xe9, 0xdd, 0xac, 0x44, 0x88, 0xbc, 0x80, 0x22, 0xde, 0x2c, 0xcb, 0xa8, 0x3b, 0xff, 0xf7, 0x6f, 0xc7, 0x71, 0x72, 0xa8, 0xa3, 0xf6, 0x4d, 0xc6, 0x75, 0xda, 0x80, 0xdc, 0xd9, 0x30, 0xd9, 0x07, 0x50, 0x5a, 0x54, 0x7d, 0xda, 0x39, 0x6f, 0x78, 0x94, 0xbf, 0x25, 0x98, 0xdc, + /* (2^440)P */ 0x01, 0x26, 0x62, 0x44, 0xfb, 0x0f, 0x11, 0x72, 0x73, 0x0a, 0x16, 0xc7, 0x16, 0x9c, 0x9b, 0x37, 0xd8, 0xff, 0x4f, 0xfe, 0x57, 0xdb, 0xae, 0xef, 0x7d, 0x94, 0x30, 0x04, 0x70, 0x83, 0xde, 0x3c, 0xd4, 0xb5, 0x70, 0xda, 0xa7, 0x55, 0xc8, 0x19, 0xe1, 0x36, 0x15, 0x61, 0xe7, 0x3b, 0x7d, 0x85, 0xbb, 0xf3, 0x42, 0x5a, 0x94, 0xf4, 0x53, 0x2a, + /* (2^441)P */ 0x14, 0x60, 0xa6, 0x0b, 0x83, 0xe1, 0x23, 0x77, 0xc0, 0xce, 0x50, 0xed, 0x35, 0x8d, 0x98, 0x99, 0x7d, 0xf5, 0x8d, 0xce, 0x94, 0x25, 0xc8, 0x0f, 0x6d, 0xfa, 0x4a, 0xa4, 0x3a, 0x1f, 0x66, 0xfb, 0x5a, 0x64, 0xaf, 0x8b, 0x54, 0x54, 0x44, 0x3f, 0x5b, 0x88, 0x61, 0xe4, 0x48, 0x45, 0x26, 0x20, 0xbe, 0x0d, 0x06, 0xbb, 0x65, 0x59, 0xe1, 0x36, + /* (2^442)P */ 0xb7, 0x98, 0xce, 0xa3, 0xe3, 0xee, 0x11, 0x1b, 0x9e, 0x24, 0x59, 0x75, 0x31, 0x37, 0x44, 0x6f, 0x6b, 0x9e, 0xec, 0xb7, 0x44, 0x01, 0x7e, 0xab, 0xbb, 0x69, 0x5d, 0x11, 0xb0, 0x30, 0x64, 0xea, 0x91, 0xb4, 0x7a, 0x8c, 0x02, 0x4c, 0xb9, 0x10, 0xa7, 0xc7, 0x79, 0xe6, 0xdc, 0x77, 0xe3, 0xc8, 0xef, 0x3e, 0xf9, 0x38, 0x81, 0xce, 0x9a, 0xb2, + /* (2^443)P */ 0x91, 0x12, 0x76, 0xd0, 0x10, 0xb4, 0xaf, 0xe1, 0x89, 0x3a, 0x93, 0x6b, 0x5c, 0x19, 0x5f, 0x24, 0xed, 0x04, 0x92, 0xc7, 0xf0, 0x00, 0x08, 0xc1, 0x92, 0xff, 0x90, 0xdb, 0xb2, 0xbf, 0xdf, 0x49, 0xcd, 0xbd, 0x5c, 0x6e, 0xbf, 0x16, 0xbb, 0x61, 0xf9, 0x20, 0x33, 0x35, 0x93, 0x11, 0xbc, 0x59, 0x69, 0xce, 0x18, 0x9f, 0xf8, 0x7b, 0xa1, 0x6e, + /* (2^444)P */ 0xa1, 0xf4, 0xaf, 0xad, 0xf8, 0xe6, 0x99, 0xd2, 0xa1, 0x4d, 0xde, 0x56, 0xc9, 0x7b, 0x0b, 0x11, 0x3e, 0xbf, 0x89, 0x1a, 0x9a, 0x90, 0xe5, 0xe2, 0xa6, 0x37, 0x88, 0xa1, 0x68, 0x59, 0xae, 0x8c, 0xec, 0x02, 0x14, 0x8d, 0xb7, 0x2e, 0x25, 0x75, 0x7f, 0x76, 0x1a, 0xd3, 0x4d, 0xad, 0x8a, 0x00, 0x6c, 0x96, 0x49, 0xa4, 0xc3, 0x2e, 0x5c, 0x7b, + /* (2^445)P */ 0x26, 0x53, 0xf7, 0xda, 0xa8, 0x01, 0x14, 0xb1, 0x63, 0xe3, 0xc3, 0x89, 0x88, 0xb0, 0x85, 0x40, 0x2b, 0x26, 0x9a, 0x10, 0x1a, 0x70, 0x33, 0xf4, 0x50, 0x9d, 0x4d, 0xd8, 0x64, 0xc6, 0x0f, 0xe1, 0x17, 0xc8, 0x10, 0x4b, 0xfc, 0xa0, 0xc9, 0xba, 0x2c, 0x98, 0x09, 0xf5, 0x84, 0xb6, 0x7c, 0x4e, 0xa3, 0xe3, 0x81, 0x1b, 0x32, 0x60, 0x02, 0xdd, + /* (2^446)P */ 0xa3, 0xe5, 0x86, 0xd4, 0x43, 0xa8, 0xd1, 0x98, 0x9d, 0x9d, 0xdb, 0x04, 0xcf, 0x6e, 0x35, 0x05, 0x30, 0x53, 0x3b, 0xbc, 0x90, 0x00, 0x4a, 0xc5, 0x40, 0x2a, 0x0f, 0xde, 0x1a, 0xd7, 0x36, 0x27, 0x44, 0x62, 0xa6, 0xac, 0x9d, 0xd2, 0x70, 0x69, 0x14, 0x39, 0x9b, 0xd1, 0xc3, 0x0a, 0x3a, 0x82, 0x0e, 0xf1, 0x94, 0xd7, 0x42, 0x94, 0xd5, 0x7d, + /* (2^447)P */ 0x04, 0xc0, 0x6e, 0x12, 0x90, 0x70, 0xf9, 0xdf, 0xf7, 0xc9, 0x86, 0xc0, 0xe6, 0x92, 0x8b, 0x0a, 0xa1, 0xc1, 0x3b, 0xcc, 0x33, 0xb7, 0xf0, 0xeb, 0x51, 0x50, 0x80, 0x20, 0x69, 0x1c, 0x4f, 0x89, 0x05, 0x1e, 0xe4, 0x7a, 0x0a, 0xc2, 0xf0, 0xf5, 0x78, 0x91, 0x76, 0x34, 0x45, 0xdc, 0x24, 0x53, 0x24, 0x98, 0xe2, 0x73, 0x6f, 0xe6, 0x46, 0x67, +} diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/constants.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/constants.go new file mode 100644 index 000000000..b6b236e5d --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/constants.go @@ -0,0 +1,71 @@ +package goldilocks + +import fp "github.com/cloudflare/circl/math/fp448" + +var ( + // genX is the x-coordinate of the generator of Goldilocks curve. + genX = fp.Elt{ + 0x5e, 0xc0, 0x0c, 0xc7, 0x2b, 0xa8, 0x26, 0x26, + 0x8e, 0x93, 0x00, 0x8b, 0xe1, 0x80, 0x3b, 0x43, + 0x11, 0x65, 0xb6, 0x2a, 0xf7, 0x1a, 0xae, 0x12, + 0x64, 0xa4, 0xd3, 0xa3, 0x24, 0xe3, 0x6d, 0xea, + 0x67, 0x17, 0x0f, 0x47, 0x70, 0x65, 0x14, 0x9e, + 0xda, 0x36, 0xbf, 0x22, 0xa6, 0x15, 0x1d, 0x22, + 0xed, 0x0d, 0xed, 0x6b, 0xc6, 0x70, 0x19, 0x4f, + } + // genY is the y-coordinate of the generator of Goldilocks curve. + genY = fp.Elt{ + 0x14, 0xfa, 0x30, 0xf2, 0x5b, 0x79, 0x08, 0x98, + 0xad, 0xc8, 0xd7, 0x4e, 0x2c, 0x13, 0xbd, 0xfd, + 0xc4, 0x39, 0x7c, 0xe6, 0x1c, 0xff, 0xd3, 0x3a, + 0xd7, 0xc2, 0xa0, 0x05, 0x1e, 0x9c, 0x78, 0x87, + 0x40, 0x98, 0xa3, 0x6c, 0x73, 0x73, 0xea, 0x4b, + 0x62, 0xc7, 0xc9, 0x56, 0x37, 0x20, 0x76, 0x88, + 0x24, 0xbc, 0xb6, 0x6e, 0x71, 0x46, 0x3f, 0x69, + } + // paramD is -39081 in Fp. + paramD = fp.Elt{ + 0x56, 0x67, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + } + // order is 2^446-0x8335dc163bb124b65129c96fde933d8d723a70aadc873d6d54a7bb0d, + // which is the number of points in the prime subgroup. + order = Scalar{ + 0xf3, 0x44, 0x58, 0xab, 0x92, 0xc2, 0x78, 0x23, + 0x55, 0x8f, 0xc5, 0x8d, 0x72, 0xc2, 0x6c, 0x21, + 0x90, 0x36, 0xd6, 0xae, 0x49, 0xdb, 0x4e, 0xc4, + 0xe9, 0x23, 0xca, 0x7c, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, + } + // residue448 is 2^448 mod order. + residue448 = [4]uint64{ + 0x721cf5b5529eec34, 0x7a4cf635c8e9c2ab, 0xeec492d944a725bf, 0x20cd77058, + } + // invFour is 1/4 mod order. + invFour = Scalar{ + 0x3d, 0x11, 0xd6, 0xaa, 0xa4, 0x30, 0xde, 0x48, + 0xd5, 0x63, 0x71, 0xa3, 0x9c, 0x30, 0x5b, 0x08, + 0xa4, 0x8d, 0xb5, 0x6b, 0xd2, 0xb6, 0x13, 0x71, + 0xfa, 0x88, 0x32, 0xdf, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0f, + } + // paramDTwist is -39082 in Fp. The D parameter of the twist curve. + paramDTwist = fp.Elt{ + 0x55, 0x67, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + } +) diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go new file mode 100644 index 000000000..5a939100d --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go @@ -0,0 +1,80 @@ +// Package goldilocks provides elliptic curve operations over the goldilocks curve. +package goldilocks + +import fp "github.com/cloudflare/circl/math/fp448" + +// Curve is the Goldilocks curve x^2+y^2=z^2-39081x^2y^2. +type Curve struct{} + +// Identity returns the identity point. +func (Curve) Identity() *Point { + return &Point{ + y: fp.One(), + z: fp.One(), + } +} + +// IsOnCurve returns true if the point lies on the curve. +func (Curve) IsOnCurve(P *Point) bool { + x2, y2, t, t2, z2 := &fp.Elt{}, &fp.Elt{}, &fp.Elt{}, &fp.Elt{}, &fp.Elt{} + rhs, lhs := &fp.Elt{}, &fp.Elt{} + fp.Mul(t, &P.ta, &P.tb) // t = ta*tb + fp.Sqr(x2, &P.x) // x^2 + fp.Sqr(y2, &P.y) // y^2 + fp.Sqr(z2, &P.z) // z^2 + fp.Sqr(t2, t) // t^2 + fp.Add(lhs, x2, y2) // x^2 + y^2 + fp.Mul(rhs, t2, ¶mD) // dt^2 + fp.Add(rhs, rhs, z2) // z^2 + dt^2 + fp.Sub(lhs, lhs, rhs) // x^2 + y^2 - (z^2 + dt^2) + eq0 := fp.IsZero(lhs) + + fp.Mul(lhs, &P.x, &P.y) // xy + fp.Mul(rhs, t, &P.z) // tz + fp.Sub(lhs, lhs, rhs) // xy - tz + eq1 := fp.IsZero(lhs) + return eq0 && eq1 +} + +// Generator returns the generator point. +func (Curve) Generator() *Point { + return &Point{ + x: genX, + y: genY, + z: fp.One(), + ta: genX, + tb: genY, + } +} + +// Order returns the number of points in the prime subgroup. +func (Curve) Order() Scalar { return order } + +// Double returns 2P. +func (Curve) Double(P *Point) *Point { R := *P; R.Double(); return &R } + +// Add returns P+Q. +func (Curve) Add(P, Q *Point) *Point { R := *P; R.Add(Q); return &R } + +// ScalarMult returns kP. This function runs in constant time. +func (e Curve) ScalarMult(k *Scalar, P *Point) *Point { + k4 := &Scalar{} + k4.divBy4(k) + return e.pull(twistCurve{}.ScalarMult(k4, e.push(P))) +} + +// ScalarBaseMult returns kG where G is the generator point. This function runs in constant time. +func (e Curve) ScalarBaseMult(k *Scalar) *Point { + k4 := &Scalar{} + k4.divBy4(k) + return e.pull(twistCurve{}.ScalarBaseMult(k4)) +} + +// CombinedMult returns mG+nP, where G is the generator point. This function is non-constant time. +func (e Curve) CombinedMult(m, n *Scalar, P *Point) *Point { + m4 := &Scalar{} + n4 := &Scalar{} + m4.divBy4(m) + n4.divBy4(n) + return e.pull(twistCurve{}.CombinedMult(m4, n4, twistCurve{}.pull(P))) +} diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/isogeny.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/isogeny.go new file mode 100644 index 000000000..b1daab851 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/isogeny.go @@ -0,0 +1,52 @@ +package goldilocks + +import fp "github.com/cloudflare/circl/math/fp448" + +func (Curve) pull(P *twistPoint) *Point { return twistCurve{}.push(P) } +func (twistCurve) pull(P *Point) *twistPoint { return Curve{}.push(P) } + +// push sends a point on the Goldilocks curve to a point on the twist curve. +func (Curve) push(P *Point) *twistPoint { + Q := &twistPoint{} + Px, Py, Pz := &P.x, &P.y, &P.z + a, b, c, d, e, f, g, h := &Q.x, &Q.y, &Q.z, &fp.Elt{}, &Q.ta, &Q.x, &Q.y, &Q.tb + fp.Add(e, Px, Py) // x+y + fp.Sqr(a, Px) // A = x^2 + fp.Sqr(b, Py) // B = y^2 + fp.Sqr(c, Pz) // z^2 + fp.Add(c, c, c) // C = 2*z^2 + *d = *a // D = A + fp.Sqr(e, e) // (x+y)^2 + fp.Sub(e, e, a) // (x+y)^2-A + fp.Sub(e, e, b) // E = (x+y)^2-A-B + fp.Add(h, b, d) // H = B+D + fp.Sub(g, b, d) // G = B-D + fp.Sub(f, c, h) // F = C-H + fp.Mul(&Q.z, f, g) // Z = F * G + fp.Mul(&Q.x, e, f) // X = E * F + fp.Mul(&Q.y, g, h) // Y = G * H, // T = E * H + return Q +} + +// push sends a point on the twist curve to a point on the Goldilocks curve. +func (twistCurve) push(P *twistPoint) *Point { + Q := &Point{} + Px, Py, Pz := &P.x, &P.y, &P.z + a, b, c, d, e, f, g, h := &Q.x, &Q.y, &Q.z, &fp.Elt{}, &Q.ta, &Q.x, &Q.y, &Q.tb + fp.Add(e, Px, Py) // x+y + fp.Sqr(a, Px) // A = x^2 + fp.Sqr(b, Py) // B = y^2 + fp.Sqr(c, Pz) // z^2 + fp.Add(c, c, c) // C = 2*z^2 + fp.Neg(d, a) // D = -A + fp.Sqr(e, e) // (x+y)^2 + fp.Sub(e, e, a) // (x+y)^2-A + fp.Sub(e, e, b) // E = (x+y)^2-A-B + fp.Add(h, b, d) // H = B+D + fp.Sub(g, b, d) // G = B-D + fp.Sub(f, c, h) // F = C-H + fp.Mul(&Q.z, f, g) // Z = F * G + fp.Mul(&Q.x, e, f) // X = E * F + fp.Mul(&Q.y, g, h) // Y = G * H, // T = E * H + return Q +} diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/point.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/point.go new file mode 100644 index 000000000..11f73de05 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/point.go @@ -0,0 +1,171 @@ +package goldilocks + +import ( + "errors" + "fmt" + + fp "github.com/cloudflare/circl/math/fp448" +) + +// Point is a point on the Goldilocks Curve. +type Point struct{ x, y, z, ta, tb fp.Elt } + +func (P Point) String() string { + return fmt.Sprintf("x: %v\ny: %v\nz: %v\nta: %v\ntb: %v", P.x, P.y, P.z, P.ta, P.tb) +} + +// FromAffine creates a point from affine coordinates. +func FromAffine(x, y *fp.Elt) (*Point, error) { + P := &Point{ + x: *x, + y: *y, + z: fp.One(), + ta: *x, + tb: *y, + } + if !(Curve{}).IsOnCurve(P) { + return P, errors.New("point not on curve") + } + return P, nil +} + +// isLessThan returns true if 0 <= x < y, and assumes that slices are of the +// same length and are interpreted in little-endian order. +func isLessThan(x, y []byte) bool { + i := len(x) - 1 + for i > 0 && x[i] == y[i] { + i-- + } + return x[i] < y[i] +} + +// FromBytes returns a point from the input buffer. +func FromBytes(in []byte) (*Point, error) { + if len(in) < fp.Size+1 { + return nil, errors.New("wrong input length") + } + err := errors.New("invalid decoding") + P := &Point{} + signX := in[fp.Size] >> 7 + copy(P.y[:], in[:fp.Size]) + p := fp.P() + if !isLessThan(P.y[:], p[:]) { + return nil, err + } + + u, v := &fp.Elt{}, &fp.Elt{} + one := fp.One() + fp.Sqr(u, &P.y) // u = y^2 + fp.Mul(v, u, ¶mD) // v = dy^2 + fp.Sub(u, u, &one) // u = y^2-1 + fp.Sub(v, v, &one) // v = dy^2-1 + isQR := fp.InvSqrt(&P.x, u, v) // x = sqrt(u/v) + if !isQR { + return nil, err + } + fp.Modp(&P.x) // x = x mod p + if fp.IsZero(&P.x) && signX == 1 { + return nil, err + } + if signX != (P.x[0] & 1) { + fp.Neg(&P.x, &P.x) + } + P.ta = P.x + P.tb = P.y + P.z = fp.One() + return P, nil +} + +// IsIdentity returns true is P is the identity Point. +func (P *Point) IsIdentity() bool { + return fp.IsZero(&P.x) && !fp.IsZero(&P.y) && !fp.IsZero(&P.z) && P.y == P.z +} + +// IsEqual returns true if P is equivalent to Q. +func (P *Point) IsEqual(Q *Point) bool { + l, r := &fp.Elt{}, &fp.Elt{} + fp.Mul(l, &P.x, &Q.z) + fp.Mul(r, &Q.x, &P.z) + fp.Sub(l, l, r) + b := fp.IsZero(l) + fp.Mul(l, &P.y, &Q.z) + fp.Mul(r, &Q.y, &P.z) + fp.Sub(l, l, r) + b = b && fp.IsZero(l) + fp.Mul(l, &P.ta, &P.tb) + fp.Mul(l, l, &Q.z) + fp.Mul(r, &Q.ta, &Q.tb) + fp.Mul(r, r, &P.z) + fp.Sub(l, l, r) + b = b && fp.IsZero(l) + return b +} + +// Neg obtains the inverse of the Point. +func (P *Point) Neg() { fp.Neg(&P.x, &P.x); fp.Neg(&P.ta, &P.ta) } + +// ToAffine returns the x,y affine coordinates of P. +func (P *Point) ToAffine() (x, y fp.Elt) { + fp.Inv(&P.z, &P.z) // 1/z + fp.Mul(&P.x, &P.x, &P.z) // x/z + fp.Mul(&P.y, &P.y, &P.z) // y/z + fp.Modp(&P.x) + fp.Modp(&P.y) + fp.SetOne(&P.z) + P.ta = P.x + P.tb = P.y + return P.x, P.y +} + +// ToBytes stores P into a slice of bytes. +func (P *Point) ToBytes(out []byte) error { + if len(out) < fp.Size+1 { + return errors.New("invalid decoding") + } + x, y := P.ToAffine() + out[fp.Size] = (x[0] & 1) << 7 + return fp.ToBytes(out[:fp.Size], &y) +} + +// MarshalBinary encodes the receiver into a binary form and returns the result. +func (P *Point) MarshalBinary() (data []byte, err error) { + data = make([]byte, fp.Size+1) + err = P.ToBytes(data[:fp.Size+1]) + return data, err +} + +// UnmarshalBinary must be able to decode the form generated by MarshalBinary. +func (P *Point) UnmarshalBinary(data []byte) error { Q, err := FromBytes(data); *P = *Q; return err } + +// Double sets P = 2Q. +func (P *Point) Double() { P.Add(P) } + +// Add sets P =P+Q.. +func (P *Point) Add(Q *Point) { + // This is formula (5) from "Twisted Edwards Curves Revisited" by + // Hisil H., Wong K.KH., Carter G., Dawson E. (2008) + // https://doi.org/10.1007/978-3-540-89255-7_20 + x1, y1, z1, ta1, tb1 := &P.x, &P.y, &P.z, &P.ta, &P.tb + x2, y2, z2, ta2, tb2 := &Q.x, &Q.y, &Q.z, &Q.ta, &Q.tb + x3, y3, z3, E, H := &P.x, &P.y, &P.z, &P.ta, &P.tb + A, B, C, D := &fp.Elt{}, &fp.Elt{}, &fp.Elt{}, &fp.Elt{} + t1, t2, F, G := C, D, &fp.Elt{}, &fp.Elt{} + fp.Mul(t1, ta1, tb1) // t1 = ta1*tb1 + fp.Mul(t2, ta2, tb2) // t2 = ta2*tb2 + fp.Mul(A, x1, x2) // A = x1*x2 + fp.Mul(B, y1, y2) // B = y1*y2 + fp.Mul(C, t1, t2) // t1*t2 + fp.Mul(C, C, ¶mD) // C = d*t1*t2 + fp.Mul(D, z1, z2) // D = z1*z2 + fp.Add(F, x1, y1) // x1+y1 + fp.Add(E, x2, y2) // x2+y2 + fp.Mul(E, E, F) // (x1+y1)*(x2+y2) + fp.Sub(E, E, A) // (x1+y1)*(x2+y2)-A + fp.Sub(E, E, B) // E = (x1+y1)*(x2+y2)-A-B + fp.Sub(F, D, C) // F = D-C + fp.Add(G, D, C) // G = D+C + fp.Sub(H, B, A) // H = B-A + fp.Mul(z3, F, G) // Z = F * G + fp.Mul(x3, E, F) // X = E * F + fp.Mul(y3, G, H) // Y = G * H, T = E * H +} diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/scalar.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/scalar.go new file mode 100644 index 000000000..f98117b25 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/scalar.go @@ -0,0 +1,203 @@ +package goldilocks + +import ( + "encoding/binary" + "math/bits" +) + +// ScalarSize is the size (in bytes) of scalars. +const ScalarSize = 56 // 448 / 8 + +// _N is the number of 64-bit words to store scalars. +const _N = 7 // 448 / 64 + +// Scalar represents a positive integer stored in little-endian order. +type Scalar [ScalarSize]byte + +type scalar64 [_N]uint64 + +func (z *scalar64) fromScalar(x *Scalar) { + z[0] = binary.LittleEndian.Uint64(x[0*8 : 1*8]) + z[1] = binary.LittleEndian.Uint64(x[1*8 : 2*8]) + z[2] = binary.LittleEndian.Uint64(x[2*8 : 3*8]) + z[3] = binary.LittleEndian.Uint64(x[3*8 : 4*8]) + z[4] = binary.LittleEndian.Uint64(x[4*8 : 5*8]) + z[5] = binary.LittleEndian.Uint64(x[5*8 : 6*8]) + z[6] = binary.LittleEndian.Uint64(x[6*8 : 7*8]) +} + +func (z *scalar64) toScalar(x *Scalar) { + binary.LittleEndian.PutUint64(x[0*8:1*8], z[0]) + binary.LittleEndian.PutUint64(x[1*8:2*8], z[1]) + binary.LittleEndian.PutUint64(x[2*8:3*8], z[2]) + binary.LittleEndian.PutUint64(x[3*8:4*8], z[3]) + binary.LittleEndian.PutUint64(x[4*8:5*8], z[4]) + binary.LittleEndian.PutUint64(x[5*8:6*8], z[5]) + binary.LittleEndian.PutUint64(x[6*8:7*8], z[6]) +} + +// add calculates z = x + y. Assumes len(z) > max(len(x),len(y)). +func add(z, x, y []uint64) uint64 { + l, L, zz := len(x), len(y), y + if l > L { + l, L, zz = L, l, x + } + c := uint64(0) + for i := 0; i < l; i++ { + z[i], c = bits.Add64(x[i], y[i], c) + } + for i := l; i < L; i++ { + z[i], c = bits.Add64(zz[i], 0, c) + } + return c +} + +// sub calculates z = x - y. Assumes len(z) > max(len(x),len(y)). +func sub(z, x, y []uint64) uint64 { + l, L, zz := len(x), len(y), y + if l > L { + l, L, zz = L, l, x + } + c := uint64(0) + for i := 0; i < l; i++ { + z[i], c = bits.Sub64(x[i], y[i], c) + } + for i := l; i < L; i++ { + z[i], c = bits.Sub64(zz[i], 0, c) + } + return c +} + +// mulWord calculates z = x * y. Assumes len(z) >= len(x)+1. +func mulWord(z, x []uint64, y uint64) { + for i := range z { + z[i] = 0 + } + carry := uint64(0) + for i := range x { + hi, lo := bits.Mul64(x[i], y) + lo, cc := bits.Add64(lo, z[i], 0) + hi, _ = bits.Add64(hi, 0, cc) + z[i], cc = bits.Add64(lo, carry, 0) + carry, _ = bits.Add64(hi, 0, cc) + } + z[len(x)] = carry +} + +// Cmov moves x into z if b=1. +func (z *scalar64) Cmov(b uint64, x *scalar64) { + m := uint64(0) - b + for i := range z { + z[i] = (z[i] &^ m) | (x[i] & m) + } +} + +// leftShift shifts to the left the words of z returning the more significant word. +func (z *scalar64) leftShift(low uint64) uint64 { + high := z[_N-1] + for i := _N - 1; i > 0; i-- { + z[i] = z[i-1] + } + z[0] = low + return high +} + +// reduceOneWord calculates z = z + 2^448*x such that the result fits in a Scalar. +func (z *scalar64) reduceOneWord(x uint64) { + prod := (&scalar64{})[:] + mulWord(prod, residue448[:], x) + cc := add(z[:], z[:], prod) + mulWord(prod, residue448[:], cc) + add(z[:], z[:], prod) +} + +// modOrder reduces z mod order. +func (z *scalar64) modOrder() { + var o64, x scalar64 + o64.fromScalar(&order) + // Performs: while (z >= order) { z = z-order } + // At most 8 (eight) iterations reduce 3 bits by subtracting. + for i := 0; i < 8; i++ { + c := sub(x[:], z[:], o64[:]) // (c || x) = z-order + z.Cmov(1-c, &x) // if c != 0 { z = x } + } +} + +// FromBytes stores z = x mod order, where x is a number stored in little-endian order. +func (z *Scalar) FromBytes(x []byte) { + n := len(x) + nCeil := (n + 7) >> 3 + for i := range z { + z[i] = 0 + } + if nCeil < _N { + copy(z[:], x) + return + } + copy(z[:], x[8*(nCeil-_N):]) + var z64 scalar64 + z64.fromScalar(z) + for i := nCeil - _N - 1; i >= 0; i-- { + low := binary.LittleEndian.Uint64(x[8*i:]) + high := z64.leftShift(low) + z64.reduceOneWord(high) + } + z64.modOrder() + z64.toScalar(z) +} + +// divBy4 calculates z = x/4 mod order. +func (z *Scalar) divBy4(x *Scalar) { z.Mul(x, &invFour) } + +// Red reduces z mod order. +func (z *Scalar) Red() { var t scalar64; t.fromScalar(z); t.modOrder(); t.toScalar(z) } + +// Neg calculates z = -z mod order. +func (z *Scalar) Neg() { z.Sub(&order, z) } + +// Add calculates z = x+y mod order. +func (z *Scalar) Add(x, y *Scalar) { + var z64, x64, y64, t scalar64 + x64.fromScalar(x) + y64.fromScalar(y) + c := add(z64[:], x64[:], y64[:]) + add(t[:], z64[:], residue448[:]) + z64.Cmov(c, &t) + z64.modOrder() + z64.toScalar(z) +} + +// Sub calculates z = x-y mod order. +func (z *Scalar) Sub(x, y *Scalar) { + var z64, x64, y64, t scalar64 + x64.fromScalar(x) + y64.fromScalar(y) + c := sub(z64[:], x64[:], y64[:]) + sub(t[:], z64[:], residue448[:]) + z64.Cmov(c, &t) + z64.modOrder() + z64.toScalar(z) +} + +// Mul calculates z = x*y mod order. +func (z *Scalar) Mul(x, y *Scalar) { + var z64, x64, y64 scalar64 + prod := (&[_N + 1]uint64{})[:] + x64.fromScalar(x) + y64.fromScalar(y) + mulWord(prod, x64[:], y64[_N-1]) + copy(z64[:], prod[:_N]) + z64.reduceOneWord(prod[_N]) + for i := _N - 2; i >= 0; i-- { + h := z64.leftShift(0) + z64.reduceOneWord(h) + mulWord(prod, x64[:], y64[i]) + c := add(z64[:], z64[:], prod[:_N]) + z64.reduceOneWord(prod[_N] + c) + } + z64.modOrder() + z64.toScalar(z) +} + +// IsZero returns true if z=0. +func (z *Scalar) IsZero() bool { z.Red(); return *z == Scalar{} } diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go new file mode 100644 index 000000000..83d7cdadd --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go @@ -0,0 +1,138 @@ +package goldilocks + +import ( + "crypto/subtle" + "math/bits" + + "github.com/cloudflare/circl/internal/conv" + "github.com/cloudflare/circl/math" + fp "github.com/cloudflare/circl/math/fp448" +) + +// twistCurve is -x^2+y^2=1-39082x^2y^2 and is 4-isogenous to Goldilocks. +type twistCurve struct{} + +// Identity returns the identity point. +func (twistCurve) Identity() *twistPoint { + return &twistPoint{ + y: fp.One(), + z: fp.One(), + } +} + +// subYDiv16 update x = (x - y) / 16. +func subYDiv16(x *scalar64, y int64) { + s := uint64(y >> 63) + x0, b0 := bits.Sub64((*x)[0], uint64(y), 0) + x1, b1 := bits.Sub64((*x)[1], s, b0) + x2, b2 := bits.Sub64((*x)[2], s, b1) + x3, b3 := bits.Sub64((*x)[3], s, b2) + x4, b4 := bits.Sub64((*x)[4], s, b3) + x5, b5 := bits.Sub64((*x)[5], s, b4) + x6, _ := bits.Sub64((*x)[6], s, b5) + x[0] = (x0 >> 4) | (x1 << 60) + x[1] = (x1 >> 4) | (x2 << 60) + x[2] = (x2 >> 4) | (x3 << 60) + x[3] = (x3 >> 4) | (x4 << 60) + x[4] = (x4 >> 4) | (x5 << 60) + x[5] = (x5 >> 4) | (x6 << 60) + x[6] = (x6 >> 4) +} + +func recodeScalar(d *[113]int8, k *Scalar) { + var k64 scalar64 + k64.fromScalar(k) + for i := 0; i < 112; i++ { + d[i] = int8((k64[0] & 0x1f) - 16) + subYDiv16(&k64, int64(d[i])) + } + d[112] = int8(k64[0]) +} + +// ScalarMult returns kP. +func (e twistCurve) ScalarMult(k *Scalar, P *twistPoint) *twistPoint { + var TabP [8]preTwistPointProy + var S preTwistPointProy + var d [113]int8 + + var isZero int + if k.IsZero() { + isZero = 1 + } + subtle.ConstantTimeCopy(isZero, k[:], order[:]) + + minusK := *k + isEven := 1 - int(k[0]&0x1) + minusK.Neg() + subtle.ConstantTimeCopy(isEven, k[:], minusK[:]) + recodeScalar(&d, k) + + P.oddMultiples(TabP[:]) + Q := e.Identity() + for i := 112; i >= 0; i-- { + Q.Double() + Q.Double() + Q.Double() + Q.Double() + mask := d[i] >> 7 + absDi := (d[i] + mask) ^ mask + inx := int32((absDi - 1) >> 1) + sig := int((d[i] >> 7) & 0x1) + for j := range TabP { + S.cmov(&TabP[j], uint(subtle.ConstantTimeEq(inx, int32(j)))) + } + S.cneg(sig) + Q.mixAdd(&S) + } + Q.cneg(uint(isEven)) + return Q +} + +const ( + omegaFix = 7 + omegaVar = 5 +) + +// CombinedMult returns mG+nP. +func (e twistCurve) CombinedMult(m, n *Scalar, P *twistPoint) *twistPoint { + nafFix := math.OmegaNAF(conv.BytesLe2BigInt(m[:]), omegaFix) + nafVar := math.OmegaNAF(conv.BytesLe2BigInt(n[:]), omegaVar) + + if len(nafFix) > len(nafVar) { + nafVar = append(nafVar, make([]int32, len(nafFix)-len(nafVar))...) + } else if len(nafFix) < len(nafVar) { + nafFix = append(nafFix, make([]int32, len(nafVar)-len(nafFix))...) + } + + var TabQ [1 << (omegaVar - 2)]preTwistPointProy + P.oddMultiples(TabQ[:]) + Q := e.Identity() + for i := len(nafFix) - 1; i >= 0; i-- { + Q.Double() + // Generator point + if nafFix[i] != 0 { + idxM := absolute(nafFix[i]) >> 1 + R := tabVerif[idxM] + if nafFix[i] < 0 { + R.neg() + } + Q.mixAddZ1(&R) + } + // Variable input point + if nafVar[i] != 0 { + idxN := absolute(nafVar[i]) >> 1 + S := TabQ[idxN] + if nafVar[i] < 0 { + S.neg() + } + Q.mixAdd(&S) + } + } + return Q +} + +// absolute returns always a positive value. +func absolute(x int32) int32 { + mask := x >> 31 + return (x + mask) ^ mask +} diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistPoint.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistPoint.go new file mode 100644 index 000000000..c55db77b0 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistPoint.go @@ -0,0 +1,135 @@ +package goldilocks + +import ( + "fmt" + + fp "github.com/cloudflare/circl/math/fp448" +) + +type twistPoint struct{ x, y, z, ta, tb fp.Elt } + +type preTwistPointAffine struct{ addYX, subYX, dt2 fp.Elt } + +type preTwistPointProy struct { + preTwistPointAffine + z2 fp.Elt +} + +func (P *twistPoint) String() string { + return fmt.Sprintf("x: %v\ny: %v\nz: %v\nta: %v\ntb: %v", P.x, P.y, P.z, P.ta, P.tb) +} + +// cneg conditionally negates the point if b=1. +func (P *twistPoint) cneg(b uint) { + t := &fp.Elt{} + fp.Neg(t, &P.x) + fp.Cmov(&P.x, t, b) + fp.Neg(t, &P.ta) + fp.Cmov(&P.ta, t, b) +} + +// Double updates P with 2P. +func (P *twistPoint) Double() { + // This is formula (7) from "Twisted Edwards Curves Revisited" by + // Hisil H., Wong K.KH., Carter G., Dawson E. (2008) + // https://doi.org/10.1007/978-3-540-89255-7_20 + Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb + a, b, c, e, f, g, h := Px, Py, Pz, Pta, Px, Py, Ptb + fp.Add(e, Px, Py) // x+y + fp.Sqr(a, Px) // A = x^2 + fp.Sqr(b, Py) // B = y^2 + fp.Sqr(c, Pz) // z^2 + fp.Add(c, c, c) // C = 2*z^2 + fp.Add(h, a, b) // H = A+B + fp.Sqr(e, e) // (x+y)^2 + fp.Sub(e, e, h) // E = (x+y)^2-A-B + fp.Sub(g, b, a) // G = B-A + fp.Sub(f, c, g) // F = C-G + fp.Mul(Pz, f, g) // Z = F * G + fp.Mul(Px, e, f) // X = E * F + fp.Mul(Py, g, h) // Y = G * H, T = E * H +} + +// mixAdd calculates P= P+Q, where Q is a precomputed point with Z_Q = 1. +func (P *twistPoint) mixAddZ1(Q *preTwistPointAffine) { + fp.Add(&P.z, &P.z, &P.z) // D = 2*z1 (z2=1) + P.coreAddition(Q) +} + +// coreAddition calculates P=P+Q for curves with A=-1. +func (P *twistPoint) coreAddition(Q *preTwistPointAffine) { + // This is the formula following (5) from "Twisted Edwards Curves Revisited" by + // Hisil H., Wong K.KH., Carter G., Dawson E. (2008) + // https://doi.org/10.1007/978-3-540-89255-7_20 + Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb + addYX2, subYX2, dt2 := &Q.addYX, &Q.subYX, &Q.dt2 + a, b, c, d, e, f, g, h := Px, Py, &fp.Elt{}, Pz, Pta, Px, Py, Ptb + fp.Mul(c, Pta, Ptb) // t1 = ta*tb + fp.Sub(h, Py, Px) // y1-x1 + fp.Add(b, Py, Px) // y1+x1 + fp.Mul(a, h, subYX2) // A = (y1-x1)*(y2-x2) + fp.Mul(b, b, addYX2) // B = (y1+x1)*(y2+x2) + fp.Mul(c, c, dt2) // C = 2*D*t1*t2 + fp.Sub(e, b, a) // E = B-A + fp.Add(h, b, a) // H = B+A + fp.Sub(f, d, c) // F = D-C + fp.Add(g, d, c) // G = D+C + fp.Mul(Pz, f, g) // Z = F * G + fp.Mul(Px, e, f) // X = E * F + fp.Mul(Py, g, h) // Y = G * H, T = E * H +} + +func (P *preTwistPointAffine) neg() { + P.addYX, P.subYX = P.subYX, P.addYX + fp.Neg(&P.dt2, &P.dt2) +} + +func (P *preTwistPointAffine) cneg(b int) { + t := &fp.Elt{} + fp.Cswap(&P.addYX, &P.subYX, uint(b)) + fp.Neg(t, &P.dt2) + fp.Cmov(&P.dt2, t, uint(b)) +} + +func (P *preTwistPointAffine) cmov(Q *preTwistPointAffine, b uint) { + fp.Cmov(&P.addYX, &Q.addYX, b) + fp.Cmov(&P.subYX, &Q.subYX, b) + fp.Cmov(&P.dt2, &Q.dt2, b) +} + +// mixAdd calculates P= P+Q, where Q is a precomputed point with Z_Q != 1. +func (P *twistPoint) mixAdd(Q *preTwistPointProy) { + fp.Mul(&P.z, &P.z, &Q.z2) // D = 2*z1*z2 + P.coreAddition(&Q.preTwistPointAffine) +} + +// oddMultiples calculates T[i] = (2*i-1)P for 0 < i < len(T). +func (P *twistPoint) oddMultiples(T []preTwistPointProy) { + if n := len(T); n > 0 { + T[0].FromTwistPoint(P) + _2P := *P + _2P.Double() + R := &preTwistPointProy{} + R.FromTwistPoint(&_2P) + for i := 1; i < n; i++ { + P.mixAdd(R) + T[i].FromTwistPoint(P) + } + } +} + +// cmov conditionally moves Q into P if b=1. +func (P *preTwistPointProy) cmov(Q *preTwistPointProy, b uint) { + P.preTwistPointAffine.cmov(&Q.preTwistPointAffine, b) + fp.Cmov(&P.z2, &Q.z2, b) +} + +// FromTwistPoint precomputes some coordinates of Q for missed addition. +func (P *preTwistPointProy) FromTwistPoint(Q *twistPoint) { + fp.Add(&P.addYX, &Q.y, &Q.x) // addYX = X + Y + fp.Sub(&P.subYX, &Q.y, &Q.x) // subYX = Y - X + fp.Mul(&P.dt2, &Q.ta, &Q.tb) // T = ta*tb + fp.Mul(&P.dt2, &P.dt2, ¶mDTwist) // D*T + fp.Add(&P.dt2, &P.dt2, &P.dt2) // dt2 = 2*D*T + fp.Add(&P.z2, &Q.z, &Q.z) // z2 = 2*Z +} diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistTables.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistTables.go new file mode 100644 index 000000000..ed432e02c --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistTables.go @@ -0,0 +1,216 @@ +package goldilocks + +import fp "github.com/cloudflare/circl/math/fp448" + +var tabFixMult = [fxV][fx2w1]preTwistPointAffine{ + { + { + addYX: fp.Elt{0x65, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2b, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05}, + subYX: fp.Elt{0x64, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2d, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05}, + dt2: fp.Elt{0x1a, 0x33, 0xea, 0x64, 0x45, 0x1c, 0xdf, 0x17, 0x1d, 0x16, 0x34, 0x28, 0xd6, 0x61, 0x19, 0x67, 0x79, 0xb4, 0x13, 0xcf, 0x3e, 0x7c, 0x0e, 0x72, 0xda, 0xf1, 0x5f, 0xda, 0xe6, 0xcf, 0x42, 0xd3, 0xb6, 0x17, 0xc2, 0x68, 0x13, 0x2d, 0xd9, 0x60, 0x3e, 0xae, 0xf0, 0x5b, 0x96, 0xf0, 0xcd, 0xaf, 0xea, 0xb7, 0x0d, 0x59, 0x16, 0xa7, 0xff, 0x55}, + }, + { + addYX: fp.Elt{0xca, 0xd8, 0x7d, 0x86, 0x1a, 0xef, 0xad, 0x11, 0xe3, 0x27, 0x41, 0x7e, 0x7f, 0x3e, 0xa9, 0xd2, 0xb5, 0x4e, 0x50, 0xe0, 0x77, 0x91, 0xc2, 0x13, 0x52, 0x73, 0x41, 0x09, 0xa6, 0x57, 0x9a, 0xc8, 0xa8, 0x90, 0x9d, 0x26, 0x14, 0xbb, 0xa1, 0x2a, 0xf7, 0x45, 0x43, 0x4e, 0xea, 0x35, 0x62, 0xe1, 0x08, 0x85, 0x46, 0xb8, 0x24, 0x05, 0x2d, 0xab}, + subYX: fp.Elt{0x9b, 0xe6, 0xd3, 0xe5, 0xfe, 0x50, 0x36, 0x3c, 0x3c, 0x6d, 0x74, 0x1d, 0x74, 0xc0, 0xde, 0x5b, 0x45, 0x27, 0xe5, 0x12, 0xee, 0x63, 0x35, 0x6b, 0x13, 0xe2, 0x41, 0x6b, 0x3a, 0x05, 0x2b, 0xb1, 0x89, 0x26, 0xb6, 0xc6, 0xd1, 0x84, 0xff, 0x0e, 0x9b, 0xa3, 0xfb, 0x21, 0x36, 0x6b, 0x01, 0xf7, 0x9f, 0x7c, 0xeb, 0xf5, 0x18, 0x7a, 0x2a, 0x70}, + dt2: fp.Elt{0x09, 0xad, 0x99, 0x1a, 0x38, 0xd3, 0xdf, 0x22, 0x37, 0x32, 0x61, 0x8b, 0xf3, 0x19, 0x48, 0x08, 0xe8, 0x49, 0xb6, 0x4a, 0xa7, 0xed, 0xa4, 0xa2, 0xee, 0x86, 0xd7, 0x31, 0x5e, 0xce, 0x95, 0x76, 0x86, 0x42, 0x1c, 0x9d, 0x07, 0x14, 0x8c, 0x34, 0x18, 0x9c, 0x6d, 0x3a, 0xdf, 0xa9, 0xe8, 0x36, 0x7e, 0xe4, 0x95, 0xbe, 0xb5, 0x09, 0xf8, 0x9c}, + }, + { + addYX: fp.Elt{0x51, 0xdb, 0x49, 0xa8, 0x9f, 0xe3, 0xd7, 0xec, 0x0d, 0x0f, 0x49, 0xe8, 0xb6, 0xc5, 0x0f, 0x5a, 0x1c, 0xce, 0x54, 0x0d, 0xb1, 0x8d, 0x5b, 0xbf, 0xf4, 0xaa, 0x34, 0x77, 0xc4, 0x5d, 0x59, 0xb6, 0xc5, 0x0e, 0x5a, 0xd8, 0x5b, 0x30, 0xc2, 0x1d, 0xec, 0x85, 0x1c, 0x42, 0xbe, 0x24, 0x2e, 0x50, 0x55, 0x44, 0xb2, 0x3a, 0x01, 0xaa, 0x98, 0xfb}, + subYX: fp.Elt{0xe7, 0x29, 0xb7, 0xd0, 0xaa, 0x4f, 0x32, 0x53, 0x56, 0xde, 0xbc, 0xd1, 0x92, 0x5d, 0x19, 0xbe, 0xa3, 0xe3, 0x75, 0x48, 0xe0, 0x7a, 0x1b, 0x54, 0x7a, 0xb7, 0x41, 0x77, 0x84, 0x38, 0xdd, 0x14, 0x9f, 0xca, 0x3f, 0xa3, 0xc8, 0xa7, 0x04, 0x70, 0xf1, 0x4d, 0x3d, 0xb3, 0x84, 0x79, 0xcb, 0xdb, 0xe4, 0xc5, 0x42, 0x9b, 0x57, 0x19, 0xf1, 0x2d}, + dt2: fp.Elt{0x20, 0xb4, 0x94, 0x9e, 0xdf, 0x31, 0x44, 0x0b, 0xc9, 0x7b, 0x75, 0x40, 0x9d, 0xd1, 0x96, 0x39, 0x70, 0x71, 0x15, 0xc8, 0x93, 0xd5, 0xc5, 0xe5, 0xba, 0xfe, 0xee, 0x08, 0x6a, 0x98, 0x0a, 0x1b, 0xb2, 0xaa, 0x3a, 0xf4, 0xa4, 0x79, 0xf9, 0x8e, 0x4d, 0x65, 0x10, 0x9b, 0x3a, 0x6e, 0x7c, 0x87, 0x94, 0x92, 0x11, 0x65, 0xbf, 0x1a, 0x09, 0xde}, + }, + { + addYX: fp.Elt{0xf3, 0x84, 0x76, 0x77, 0xa5, 0x6b, 0x27, 0x3b, 0x83, 0x3d, 0xdf, 0xa0, 0xeb, 0x32, 0x6d, 0x58, 0x81, 0x57, 0x64, 0xc2, 0x21, 0x7c, 0x9b, 0xea, 0xe6, 0xb0, 0x93, 0xf9, 0xe7, 0xc3, 0xed, 0x5a, 0x8e, 0xe2, 0xb4, 0x72, 0x76, 0x66, 0x0f, 0x22, 0x29, 0x94, 0x3e, 0x63, 0x48, 0x5e, 0x80, 0xcb, 0xac, 0xfa, 0x95, 0xb6, 0x4b, 0xc4, 0x95, 0x33}, + subYX: fp.Elt{0x0c, 0x55, 0xd1, 0x5e, 0x5f, 0xbf, 0xbf, 0xe2, 0x4c, 0xfc, 0x37, 0x4a, 0xc4, 0xb1, 0xf4, 0x83, 0x61, 0x93, 0x60, 0x8e, 0x9f, 0x31, 0xf0, 0xa0, 0x41, 0xff, 0x1d, 0xe2, 0x7f, 0xca, 0x40, 0xd6, 0x88, 0xe8, 0x91, 0x61, 0xe2, 0x11, 0x18, 0x83, 0xf3, 0x25, 0x2f, 0x3f, 0x49, 0x40, 0xd4, 0x83, 0xe2, 0xd7, 0x74, 0x6a, 0x16, 0x86, 0x4e, 0xab}, + dt2: fp.Elt{0xdd, 0x58, 0x65, 0xd8, 0x9f, 0xdd, 0x70, 0x7f, 0x0f, 0xec, 0xbd, 0x5c, 0x5c, 0x9b, 0x7e, 0x1b, 0x9f, 0x79, 0x36, 0x1f, 0xfd, 0x79, 0x10, 0x1c, 0x52, 0xf3, 0x22, 0xa4, 0x1f, 0x71, 0x6e, 0x63, 0x14, 0xf4, 0xa7, 0x3e, 0xbe, 0xad, 0x43, 0x30, 0x38, 0x8c, 0x29, 0xc6, 0xcf, 0x50, 0x75, 0x21, 0xe5, 0x78, 0xfd, 0xb0, 0x9a, 0xc4, 0x6d, 0xd4}, + }, + }, + { + { + addYX: fp.Elt{0x7a, 0xa1, 0x38, 0xa6, 0xfd, 0x0e, 0x96, 0xd5, 0x26, 0x76, 0x86, 0x70, 0x80, 0x30, 0xa6, 0x67, 0xeb, 0xf4, 0x39, 0xdb, 0x22, 0xf5, 0x9f, 0x98, 0xe4, 0xb5, 0x3a, 0x0c, 0x59, 0xbf, 0x85, 0xc6, 0xf0, 0x0b, 0x1c, 0x41, 0x38, 0x09, 0x01, 0xdb, 0xd6, 0x3c, 0xb7, 0xf1, 0x08, 0x6b, 0x4b, 0x9e, 0x63, 0x53, 0x83, 0xd3, 0xab, 0xa3, 0x72, 0x0d}, + subYX: fp.Elt{0x84, 0x68, 0x25, 0xe8, 0xe9, 0x8f, 0x91, 0xbf, 0xf7, 0xa4, 0x30, 0xae, 0xea, 0x9f, 0xdd, 0x56, 0x64, 0x09, 0xc9, 0x54, 0x68, 0x4e, 0x33, 0xc5, 0x6f, 0x7b, 0x2d, 0x52, 0x2e, 0x42, 0xbe, 0xbe, 0xf5, 0x64, 0xbf, 0x77, 0x54, 0xdf, 0xb0, 0x10, 0xd2, 0x16, 0x5d, 0xce, 0xaf, 0x9f, 0xfb, 0xa3, 0x63, 0x50, 0xcb, 0xc0, 0xd0, 0x88, 0x44, 0xa3}, + dt2: fp.Elt{0xc3, 0x8b, 0xa5, 0xf1, 0x44, 0xe4, 0x41, 0xcd, 0x75, 0xe3, 0x17, 0x69, 0x5b, 0xb9, 0xbb, 0xee, 0x82, 0xbb, 0xce, 0x57, 0xdf, 0x2a, 0x9c, 0x12, 0xab, 0x66, 0x08, 0x68, 0x05, 0x1b, 0x87, 0xee, 0x5d, 0x1e, 0x18, 0x14, 0x22, 0x4b, 0x99, 0x61, 0x75, 0x28, 0xe7, 0x65, 0x1c, 0x36, 0xb6, 0x18, 0x09, 0xa8, 0xdf, 0xef, 0x30, 0x35, 0xbc, 0x58}, + }, + { + addYX: fp.Elt{0xc5, 0xd3, 0x0e, 0x6f, 0xaf, 0x06, 0x69, 0xc4, 0x07, 0x9e, 0x58, 0x6e, 0x3f, 0x49, 0xd9, 0x0a, 0x3c, 0x2c, 0x37, 0xcd, 0x27, 0x4d, 0x87, 0x91, 0x7a, 0xb0, 0x28, 0xad, 0x2f, 0x68, 0x92, 0x05, 0x97, 0xf1, 0x30, 0x5f, 0x4c, 0x10, 0x20, 0x30, 0xd3, 0x08, 0x3f, 0xc1, 0xc6, 0xb7, 0xb5, 0xd1, 0x71, 0x7b, 0xa8, 0x0a, 0xd8, 0xf5, 0x17, 0xcf}, + subYX: fp.Elt{0x64, 0xd4, 0x8f, 0x91, 0x40, 0xab, 0x6e, 0x1a, 0x62, 0x83, 0xdc, 0xd7, 0x30, 0x1a, 0x4a, 0x2a, 0x4c, 0x54, 0x86, 0x19, 0x81, 0x5d, 0x04, 0x52, 0xa3, 0xca, 0x82, 0x38, 0xdc, 0x1e, 0xf0, 0x7a, 0x78, 0x76, 0x49, 0x4f, 0x71, 0xc4, 0x74, 0x2f, 0xf0, 0x5b, 0x2e, 0x5e, 0xac, 0xef, 0x17, 0xe4, 0x8e, 0x6e, 0xed, 0x43, 0x23, 0x61, 0x99, 0x49}, + dt2: fp.Elt{0x64, 0x90, 0x72, 0x76, 0xf8, 0x2c, 0x7d, 0x57, 0xf9, 0x30, 0x5e, 0x7a, 0x10, 0x74, 0x19, 0x39, 0xd9, 0xaf, 0x0a, 0xf1, 0x43, 0xed, 0x88, 0x9c, 0x8b, 0xdc, 0x9b, 0x1c, 0x90, 0xe7, 0xf7, 0xa3, 0xa5, 0x0d, 0xc6, 0xbc, 0x30, 0xfb, 0x91, 0x1a, 0x51, 0xba, 0x2d, 0xbe, 0x89, 0xdf, 0x1d, 0xdc, 0x53, 0xa8, 0x82, 0x8a, 0xd3, 0x8d, 0x16, 0x68}, + }, + { + addYX: fp.Elt{0xef, 0x5c, 0xe3, 0x74, 0xbf, 0x13, 0x4a, 0xbf, 0x66, 0x73, 0x64, 0xb7, 0xd4, 0xce, 0x98, 0x82, 0x05, 0xfa, 0x98, 0x0c, 0x0a, 0xae, 0xe5, 0x6b, 0x9f, 0xac, 0xbb, 0x6e, 0x1f, 0xcf, 0xff, 0xa6, 0x71, 0x9a, 0xa8, 0x7a, 0x9e, 0x64, 0x1f, 0x20, 0x4a, 0x61, 0xa2, 0xd6, 0x50, 0xe3, 0xba, 0x81, 0x0c, 0x50, 0x59, 0x69, 0x59, 0x15, 0x55, 0xdb}, + subYX: fp.Elt{0xe8, 0x77, 0x4d, 0xe8, 0x66, 0x3d, 0xc1, 0x00, 0x3c, 0xf2, 0x25, 0x00, 0xdc, 0xb2, 0xe5, 0x9b, 0x12, 0x89, 0xf3, 0xd6, 0xea, 0x85, 0x60, 0xfe, 0x67, 0x91, 0xfd, 0x04, 0x7c, 0xe0, 0xf1, 0x86, 0x06, 0x11, 0x66, 0xee, 0xd4, 0xd5, 0xbe, 0x3b, 0x0f, 0xe3, 0x59, 0xb3, 0x4f, 0x00, 0xb6, 0xce, 0x80, 0xc1, 0x61, 0xf7, 0xaf, 0x04, 0x6a, 0x3c}, + dt2: fp.Elt{0x00, 0xd7, 0x32, 0x93, 0x67, 0x70, 0x6f, 0xd7, 0x69, 0xab, 0xb1, 0xd3, 0xdc, 0xd6, 0xa8, 0xdd, 0x35, 0x25, 0xca, 0xd3, 0x8a, 0x6d, 0xce, 0xfb, 0xfd, 0x2b, 0x83, 0xf0, 0xd4, 0xac, 0x66, 0xfb, 0x72, 0x87, 0x7e, 0x55, 0xb7, 0x91, 0x58, 0x10, 0xc3, 0x11, 0x7e, 0x15, 0xfe, 0x7c, 0x55, 0x90, 0xa3, 0x9e, 0xed, 0x9a, 0x7f, 0xa7, 0xb7, 0xeb}, + }, + { + addYX: fp.Elt{0x25, 0x0f, 0xc2, 0x09, 0x9c, 0x10, 0xc8, 0x7c, 0x93, 0xa7, 0xbe, 0xe9, 0x26, 0x25, 0x7c, 0x21, 0xfe, 0xe7, 0x5f, 0x3c, 0x02, 0x83, 0xa7, 0x9e, 0xdf, 0xc0, 0x94, 0x2b, 0x7d, 0x1a, 0xd0, 0x1d, 0xcc, 0x2e, 0x7d, 0xd4, 0x85, 0xe7, 0xc1, 0x15, 0x66, 0xd6, 0xd6, 0x32, 0xb8, 0xf7, 0x63, 0xaa, 0x3b, 0xa5, 0xea, 0x49, 0xad, 0x88, 0x9b, 0x66}, + subYX: fp.Elt{0x09, 0x97, 0x79, 0x36, 0x41, 0x56, 0x9b, 0xdf, 0x15, 0xd8, 0x43, 0x28, 0x17, 0x5b, 0x96, 0xc9, 0xcf, 0x39, 0x1f, 0x13, 0xf7, 0x4d, 0x1d, 0x1f, 0xda, 0x51, 0x56, 0xe7, 0x0a, 0x5a, 0x65, 0xb6, 0x2a, 0x87, 0x49, 0x86, 0xc2, 0x2b, 0xcd, 0xfe, 0x07, 0xf6, 0x4c, 0xe2, 0x1d, 0x9b, 0xd8, 0x82, 0x09, 0x5b, 0x11, 0x10, 0x62, 0x56, 0x89, 0xbd}, + dt2: fp.Elt{0xd9, 0x15, 0x73, 0xf2, 0x96, 0x35, 0x53, 0xb0, 0xe7, 0xa8, 0x0b, 0x93, 0x35, 0x0b, 0x3a, 0x00, 0xf5, 0x18, 0xb1, 0xc3, 0x12, 0x3f, 0x91, 0x17, 0xc1, 0x4c, 0x15, 0x5a, 0x86, 0x92, 0x11, 0xbd, 0x44, 0x40, 0x5a, 0x7b, 0x15, 0x89, 0xba, 0xc1, 0xc1, 0xbc, 0x43, 0x45, 0xe6, 0x52, 0x02, 0x73, 0x0a, 0xd0, 0x2a, 0x19, 0xda, 0x47, 0xa8, 0xff}, + }, + }, +} + +// tabVerif contains the odd multiples of P. The entry T[i] = (2i+1)P, where +// P = phi(G) and G is the generator of the Goldilocks curve, and phi is a +// 4-degree isogeny. +var tabVerif = [1 << (omegaFix - 2)]preTwistPointAffine{ + { /* 1P*/ + addYX: fp.Elt{0x65, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2b, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05}, + subYX: fp.Elt{0x64, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2d, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05}, + dt2: fp.Elt{0x1a, 0x33, 0xea, 0x64, 0x45, 0x1c, 0xdf, 0x17, 0x1d, 0x16, 0x34, 0x28, 0xd6, 0x61, 0x19, 0x67, 0x79, 0xb4, 0x13, 0xcf, 0x3e, 0x7c, 0x0e, 0x72, 0xda, 0xf1, 0x5f, 0xda, 0xe6, 0xcf, 0x42, 0xd3, 0xb6, 0x17, 0xc2, 0x68, 0x13, 0x2d, 0xd9, 0x60, 0x3e, 0xae, 0xf0, 0x5b, 0x96, 0xf0, 0xcd, 0xaf, 0xea, 0xb7, 0x0d, 0x59, 0x16, 0xa7, 0xff, 0x55}, + }, + { /* 3P*/ + addYX: fp.Elt{0xd1, 0xe9, 0xa8, 0x33, 0x20, 0x76, 0x18, 0x08, 0x45, 0x2a, 0xc9, 0x67, 0x2a, 0xc3, 0x15, 0x24, 0xf9, 0x74, 0x21, 0x30, 0x99, 0x59, 0x8b, 0xb2, 0xf0, 0xa4, 0x07, 0xe2, 0x6a, 0x36, 0x8d, 0xd9, 0xd2, 0x4a, 0x7f, 0x73, 0x50, 0x39, 0x3d, 0xaa, 0xa7, 0x51, 0x73, 0x0d, 0x2b, 0x8b, 0x96, 0x47, 0xac, 0x3c, 0x5d, 0xaa, 0x39, 0x9c, 0xcf, 0xd5}, + subYX: fp.Elt{0x6b, 0x11, 0x5d, 0x1a, 0xf9, 0x41, 0x9d, 0xc5, 0x30, 0x3e, 0xad, 0x25, 0x2c, 0x04, 0x45, 0xea, 0xcc, 0x67, 0x07, 0x85, 0xe9, 0xda, 0x0e, 0xb5, 0x40, 0xb7, 0x32, 0xb4, 0x49, 0xdd, 0xff, 0xaa, 0xfc, 0xbb, 0x19, 0xca, 0x8b, 0x79, 0x2b, 0x8f, 0x8d, 0x00, 0x33, 0xc2, 0xad, 0xe9, 0xd3, 0x12, 0xa8, 0xaa, 0x87, 0x62, 0xad, 0x2d, 0xff, 0xa4}, + dt2: fp.Elt{0xb0, 0xaf, 0x3b, 0xea, 0xf0, 0x42, 0x0b, 0x5e, 0x88, 0xd3, 0x98, 0x08, 0x87, 0x59, 0x72, 0x0a, 0xc2, 0xdf, 0xcb, 0x7f, 0x59, 0xb5, 0x4c, 0x63, 0x68, 0xe8, 0x41, 0x38, 0x67, 0x4f, 0xe9, 0xc6, 0xb2, 0x6b, 0x08, 0xa7, 0xf7, 0x0e, 0xcd, 0xea, 0xca, 0x3d, 0xaf, 0x8e, 0xda, 0x4b, 0x2e, 0xd2, 0x88, 0x64, 0x8d, 0xc5, 0x5f, 0x76, 0x0f, 0x3d}, + }, + { /* 5P*/ + addYX: fp.Elt{0xe5, 0x65, 0xc9, 0xe2, 0x75, 0xf0, 0x7d, 0x1a, 0xba, 0xa4, 0x40, 0x4b, 0x93, 0x12, 0xa2, 0x80, 0x95, 0x0d, 0x03, 0x93, 0xe8, 0xa5, 0x4d, 0xe2, 0x3d, 0x81, 0xf5, 0xce, 0xd4, 0x2d, 0x25, 0x59, 0x16, 0x5c, 0xe7, 0xda, 0xc7, 0x45, 0xd2, 0x7e, 0x2c, 0x38, 0xd4, 0x37, 0x64, 0xb2, 0xc2, 0x28, 0xc5, 0x72, 0x16, 0x32, 0x45, 0x36, 0x6f, 0x9f}, + subYX: fp.Elt{0x09, 0xf4, 0x7e, 0xbd, 0x89, 0xdb, 0x19, 0x58, 0xe1, 0x08, 0x00, 0x8a, 0xf4, 0x5f, 0x2a, 0x32, 0x40, 0xf0, 0x2c, 0x3f, 0x5d, 0xe4, 0xfc, 0x89, 0x11, 0x24, 0xb4, 0x2f, 0x97, 0xad, 0xac, 0x8f, 0x19, 0xab, 0xfa, 0x12, 0xe5, 0xf9, 0x50, 0x4e, 0x50, 0x6f, 0x32, 0x30, 0x88, 0xa6, 0xe5, 0x48, 0x28, 0xa2, 0x1b, 0x9f, 0xcd, 0xe2, 0x43, 0x38}, + dt2: fp.Elt{0xa9, 0xcc, 0x53, 0x39, 0x86, 0x02, 0x60, 0x75, 0x34, 0x99, 0x57, 0xbd, 0xfc, 0x5a, 0x8e, 0xce, 0x5e, 0x98, 0x22, 0xd0, 0xa5, 0x24, 0xff, 0x90, 0x28, 0x9f, 0x58, 0xf3, 0x39, 0xe9, 0xba, 0x36, 0x23, 0xfb, 0x7f, 0x41, 0xcc, 0x2b, 0x5a, 0x25, 0x3f, 0x4c, 0x2a, 0xf1, 0x52, 0x6f, 0x2f, 0x07, 0xe3, 0x88, 0x81, 0x77, 0xdd, 0x7c, 0x88, 0x82}, + }, + { /* 7P*/ + addYX: fp.Elt{0xf7, 0xee, 0x88, 0xfd, 0x3a, 0xbf, 0x7e, 0x28, 0x39, 0x23, 0x79, 0xe6, 0x5c, 0x56, 0xcb, 0xb5, 0x48, 0x6a, 0x80, 0x6d, 0x37, 0x60, 0x6c, 0x10, 0x35, 0x49, 0x4b, 0x46, 0x60, 0xd4, 0x79, 0xd4, 0x53, 0xd3, 0x67, 0x88, 0xd0, 0x41, 0xd5, 0x43, 0x85, 0xc8, 0x71, 0xe3, 0x1c, 0xb6, 0xda, 0x22, 0x64, 0x8f, 0x80, 0xac, 0xad, 0x7d, 0xd5, 0x82}, + subYX: fp.Elt{0x92, 0x40, 0xc1, 0x83, 0x21, 0x9b, 0xd5, 0x7d, 0x3f, 0x29, 0xb6, 0x26, 0xef, 0x12, 0xb9, 0x27, 0x39, 0x42, 0x37, 0x97, 0x09, 0x9a, 0x08, 0xe1, 0x68, 0xb6, 0x7a, 0x3f, 0x9f, 0x45, 0xf8, 0x37, 0x19, 0x83, 0x97, 0xe6, 0x73, 0x30, 0x32, 0x35, 0xcf, 0xae, 0x5c, 0x12, 0x68, 0xdf, 0x6e, 0x2b, 0xde, 0x83, 0xa0, 0x44, 0x74, 0x2e, 0x4a, 0xe9}, + dt2: fp.Elt{0xcb, 0x22, 0x0a, 0xda, 0x6b, 0xc1, 0x8a, 0x29, 0xa1, 0xac, 0x8b, 0x5b, 0x8b, 0x32, 0x20, 0xf2, 0x21, 0xae, 0x0c, 0x43, 0xc4, 0xd7, 0x19, 0x37, 0x3d, 0x79, 0x25, 0x98, 0x6c, 0x9c, 0x22, 0x31, 0x2a, 0x55, 0x9f, 0xda, 0x5e, 0xa8, 0x13, 0xdb, 0x8e, 0x2e, 0x16, 0x39, 0xf4, 0x91, 0x6f, 0xec, 0x71, 0x71, 0xc9, 0x10, 0xf2, 0xa4, 0x8f, 0x11}, + }, + { /* 9P*/ + addYX: fp.Elt{0x85, 0xdd, 0x37, 0x62, 0x74, 0x8e, 0x33, 0x5b, 0x25, 0x12, 0x1b, 0xe7, 0xdf, 0x47, 0xe5, 0x12, 0xfd, 0x3a, 0x3a, 0xf5, 0x5d, 0x4c, 0xa2, 0x29, 0x3c, 0x5c, 0x2f, 0xee, 0x18, 0x19, 0x0a, 0x2b, 0xef, 0x67, 0x50, 0x7a, 0x0d, 0x29, 0xae, 0x55, 0x82, 0xcd, 0xd6, 0x41, 0x90, 0xb4, 0x13, 0x31, 0x5d, 0x11, 0xb8, 0xaa, 0x12, 0x86, 0x08, 0xac}, + subYX: fp.Elt{0xcc, 0x37, 0x8d, 0x83, 0x5f, 0xfd, 0xde, 0xd5, 0xf7, 0xf1, 0xae, 0x0a, 0xa7, 0x0b, 0xeb, 0x6d, 0x19, 0x8a, 0xb6, 0x1a, 0x59, 0xd8, 0xff, 0x3c, 0xbc, 0xbc, 0xef, 0x9c, 0xda, 0x7b, 0x75, 0x12, 0xaf, 0x80, 0x8f, 0x2c, 0x3c, 0xaa, 0x0b, 0x17, 0x86, 0x36, 0x78, 0x18, 0xc8, 0x8a, 0xf6, 0xb8, 0x2c, 0x2f, 0x57, 0x2c, 0x62, 0x57, 0xf6, 0x90}, + dt2: fp.Elt{0x83, 0xbc, 0xa2, 0x07, 0xa5, 0x38, 0x96, 0xea, 0xfe, 0x11, 0x46, 0x1d, 0x3b, 0xcd, 0x42, 0xc5, 0xee, 0x67, 0x04, 0x72, 0x08, 0xd8, 0xd9, 0x96, 0x07, 0xf7, 0xac, 0xc3, 0x64, 0xf1, 0x98, 0x2c, 0x55, 0xd7, 0x7d, 0xc8, 0x6c, 0xbd, 0x2c, 0xff, 0x15, 0xd6, 0x6e, 0xb8, 0x17, 0x8e, 0xa8, 0x27, 0x66, 0xb1, 0x73, 0x79, 0x96, 0xff, 0x29, 0x10}, + }, + { /* 11P*/ + addYX: fp.Elt{0x76, 0xcb, 0x9b, 0x0c, 0x5b, 0xfe, 0xe1, 0x2a, 0xdd, 0x6f, 0x6c, 0xdd, 0x6f, 0xb4, 0xc0, 0xc2, 0x1b, 0x4b, 0x38, 0xe8, 0x66, 0x8c, 0x1e, 0x31, 0x63, 0xb9, 0x94, 0xcd, 0xc3, 0x8c, 0x44, 0x25, 0x7b, 0xd5, 0x39, 0x80, 0xfc, 0x01, 0xaa, 0xf7, 0x2a, 0x61, 0x8a, 0x25, 0xd2, 0x5f, 0xc5, 0x66, 0x38, 0xa4, 0x17, 0xcf, 0x3e, 0x11, 0x0f, 0xa3}, + subYX: fp.Elt{0xe0, 0xb6, 0xd1, 0x9c, 0x71, 0x49, 0x2e, 0x7b, 0xde, 0x00, 0xda, 0x6b, 0xf1, 0xec, 0xe6, 0x7a, 0x15, 0x38, 0x71, 0xe9, 0x7b, 0xdb, 0xf8, 0x98, 0xc0, 0x91, 0x2e, 0x53, 0xee, 0x92, 0x87, 0x25, 0xc9, 0xb0, 0xbb, 0x33, 0x15, 0x46, 0x7f, 0xfd, 0x4f, 0x8b, 0x77, 0x05, 0x96, 0xb6, 0xe2, 0x08, 0xdb, 0x0d, 0x09, 0xee, 0x5b, 0xd1, 0x2a, 0x63}, + dt2: fp.Elt{0x8f, 0x7b, 0x57, 0x8c, 0xbf, 0x06, 0x0d, 0x43, 0x21, 0x92, 0x94, 0x2d, 0x6a, 0x38, 0x07, 0x0f, 0xa0, 0xf1, 0xe3, 0xd8, 0x2a, 0xbf, 0x46, 0xc6, 0x9e, 0x1f, 0x8f, 0x2b, 0x46, 0x84, 0x0b, 0x74, 0xed, 0xff, 0xf8, 0xa5, 0x94, 0xae, 0xf1, 0x67, 0xb1, 0x9b, 0xdd, 0x4a, 0xd0, 0xdb, 0xc2, 0xb5, 0x58, 0x49, 0x0c, 0xa9, 0x1d, 0x7d, 0xa9, 0xd3}, + }, + { /* 13P*/ + addYX: fp.Elt{0x73, 0x84, 0x2e, 0x31, 0x1f, 0xdc, 0xed, 0x9f, 0x74, 0xfa, 0xe0, 0x35, 0xb1, 0x85, 0x6a, 0x8d, 0x86, 0xd0, 0xff, 0xd6, 0x08, 0x43, 0x73, 0x1a, 0xd5, 0xf8, 0x43, 0xd4, 0xb3, 0xe5, 0x3f, 0xa8, 0x84, 0x17, 0x59, 0x65, 0x4e, 0xe6, 0xee, 0x54, 0x9c, 0xda, 0x5e, 0x7e, 0x98, 0x29, 0x6d, 0x73, 0x34, 0x1f, 0x99, 0x80, 0x54, 0x54, 0x81, 0x0b}, + subYX: fp.Elt{0xb1, 0xe5, 0xbb, 0x80, 0x22, 0x9c, 0x81, 0x6d, 0xaf, 0x27, 0x65, 0x6f, 0x7e, 0x9c, 0xb6, 0x8d, 0x35, 0x5c, 0x2e, 0x20, 0x48, 0x7a, 0x28, 0xf0, 0x97, 0xfe, 0xb7, 0x71, 0xce, 0xd6, 0xad, 0x3a, 0x81, 0xf6, 0x74, 0x5e, 0xf3, 0xfd, 0x1b, 0xd4, 0x1e, 0x7c, 0xc2, 0xb7, 0xc8, 0xa6, 0xc9, 0x89, 0x03, 0x47, 0xec, 0x24, 0xd6, 0x0e, 0xec, 0x9c}, + dt2: fp.Elt{0x91, 0x0a, 0x43, 0x34, 0x20, 0xc2, 0x64, 0xf7, 0x4e, 0x48, 0xc8, 0xd2, 0x95, 0x83, 0xd1, 0xa4, 0xfb, 0x4e, 0x41, 0x3b, 0x0d, 0xd5, 0x07, 0xd9, 0xf1, 0x13, 0x16, 0x78, 0x54, 0x57, 0xd0, 0xf1, 0x4f, 0x20, 0xac, 0xcf, 0x9c, 0x3b, 0x33, 0x0b, 0x99, 0x54, 0xc3, 0x7f, 0x3e, 0x57, 0x26, 0x86, 0xd5, 0xa5, 0x2b, 0x8d, 0xe3, 0x19, 0x36, 0xf7}, + }, + { /* 15P*/ + addYX: fp.Elt{0x23, 0x69, 0x47, 0x14, 0xf9, 0x9a, 0x50, 0xff, 0x64, 0xd1, 0x50, 0x35, 0xc3, 0x11, 0xd3, 0x19, 0xcf, 0x87, 0xda, 0x30, 0x0b, 0x50, 0xda, 0xc0, 0xe0, 0x25, 0x00, 0xe5, 0x68, 0x93, 0x04, 0xc2, 0xaf, 0xbd, 0x2f, 0x36, 0x5f, 0x47, 0x96, 0x10, 0xa8, 0xbd, 0xe4, 0x88, 0xac, 0x80, 0x52, 0x61, 0x73, 0xe9, 0x63, 0xdd, 0x99, 0xad, 0x20, 0x5b}, + subYX: fp.Elt{0x1b, 0x5e, 0xa2, 0x2a, 0x25, 0x0f, 0x86, 0xc0, 0xb1, 0x2e, 0x0c, 0x13, 0x40, 0x8d, 0xf0, 0xe6, 0x00, 0x55, 0x08, 0xc5, 0x7d, 0xf4, 0xc9, 0x31, 0x25, 0x3a, 0x99, 0x69, 0xdd, 0x67, 0x63, 0x9a, 0xd6, 0x89, 0x2e, 0xa1, 0x19, 0xca, 0x2c, 0xd9, 0x59, 0x5f, 0x5d, 0xc3, 0x6e, 0x62, 0x36, 0x12, 0x59, 0x15, 0xe1, 0xdc, 0xa4, 0xad, 0xc9, 0xd0}, + dt2: fp.Elt{0xbc, 0xea, 0xfc, 0xaf, 0x66, 0x23, 0xb7, 0x39, 0x6b, 0x2a, 0x96, 0xa8, 0x54, 0x43, 0xe9, 0xaa, 0x32, 0x40, 0x63, 0x92, 0x5e, 0xdf, 0x35, 0xc2, 0x9f, 0x24, 0x0c, 0xed, 0xfc, 0xde, 0x73, 0x8f, 0xa7, 0xd5, 0xa3, 0x2b, 0x18, 0x1f, 0xb0, 0xf8, 0xeb, 0x55, 0xd9, 0xc3, 0xfd, 0x28, 0x7c, 0x4f, 0xce, 0x0d, 0xf7, 0xae, 0xc2, 0x83, 0xc3, 0x78}, + }, + { /* 17P*/ + addYX: fp.Elt{0x71, 0xe6, 0x60, 0x93, 0x37, 0xdb, 0x01, 0xa5, 0x4c, 0xba, 0xe8, 0x8e, 0xd5, 0xf9, 0xd3, 0x98, 0xe5, 0xeb, 0xab, 0x3a, 0x15, 0x8b, 0x35, 0x60, 0xbe, 0xe5, 0x9c, 0x2d, 0x10, 0x9b, 0x2e, 0xcf, 0x65, 0x64, 0xea, 0x8f, 0x72, 0xce, 0xf5, 0x18, 0xe5, 0xe2, 0xf0, 0x0e, 0xae, 0x04, 0xec, 0xa0, 0x20, 0x65, 0x63, 0x07, 0xb1, 0x9f, 0x03, 0x97}, + subYX: fp.Elt{0x9e, 0x41, 0x64, 0x30, 0x95, 0x7f, 0x3a, 0x89, 0x7b, 0x0a, 0x79, 0x59, 0x23, 0x9a, 0x3b, 0xfe, 0xa4, 0x13, 0x08, 0xb2, 0x2e, 0x04, 0x50, 0x10, 0x30, 0xcd, 0x2e, 0xa4, 0x91, 0x71, 0x50, 0x36, 0x4a, 0x02, 0xf4, 0x8d, 0xa3, 0x36, 0x1b, 0xf4, 0x52, 0xba, 0x15, 0x04, 0x8b, 0x80, 0x25, 0xd9, 0xae, 0x67, 0x20, 0xd9, 0x88, 0x8f, 0x97, 0xa6}, + dt2: fp.Elt{0xb5, 0xe7, 0x46, 0xbd, 0x55, 0x23, 0xa0, 0x68, 0xc0, 0x12, 0xd9, 0xf1, 0x0a, 0x75, 0xe2, 0xda, 0xf4, 0x6b, 0xca, 0x14, 0xe4, 0x9f, 0x0f, 0xb5, 0x3c, 0xa6, 0xa5, 0xa2, 0x63, 0x94, 0xd1, 0x1c, 0x39, 0x58, 0x57, 0x02, 0x27, 0x98, 0xb6, 0x47, 0xc6, 0x61, 0x4b, 0x5c, 0xab, 0x6f, 0x2d, 0xab, 0xe3, 0xc1, 0x69, 0xf9, 0x12, 0xb0, 0xc8, 0xd5}, + }, + { /* 19P*/ + addYX: fp.Elt{0x19, 0x7d, 0xd5, 0xac, 0x79, 0xa2, 0x82, 0x9b, 0x28, 0x31, 0x22, 0xc0, 0x73, 0x02, 0x76, 0x17, 0x10, 0x70, 0x79, 0x57, 0xc9, 0x84, 0x62, 0x8e, 0x04, 0x04, 0x61, 0x67, 0x08, 0x48, 0xb4, 0x4b, 0xde, 0x53, 0x8c, 0xff, 0x36, 0x1b, 0x62, 0x86, 0x5d, 0xe1, 0x9b, 0xb1, 0xe5, 0xe8, 0x44, 0x64, 0xa1, 0x68, 0x3f, 0xa8, 0x45, 0x52, 0x91, 0xed}, + subYX: fp.Elt{0x42, 0x1a, 0x36, 0x1f, 0x90, 0x15, 0x24, 0x8d, 0x24, 0x80, 0xe6, 0xfe, 0x1e, 0xf0, 0xad, 0xaf, 0x6a, 0x93, 0xf0, 0xa6, 0x0d, 0x5d, 0xea, 0xf6, 0x62, 0x96, 0x7a, 0x05, 0x76, 0x85, 0x74, 0x32, 0xc7, 0xc8, 0x64, 0x53, 0x62, 0xe7, 0x54, 0x84, 0xe0, 0x40, 0x66, 0x19, 0x70, 0x40, 0x95, 0x35, 0x68, 0x64, 0x43, 0xcd, 0xba, 0x29, 0x32, 0xa8}, + dt2: fp.Elt{0x3e, 0xf6, 0xd6, 0xe4, 0x99, 0xeb, 0x20, 0x66, 0x08, 0x2e, 0x26, 0x64, 0xd7, 0x76, 0xf3, 0xb4, 0xc5, 0xa4, 0x35, 0x92, 0xd2, 0x99, 0x70, 0x5a, 0x1a, 0xe9, 0xe9, 0x3d, 0x3b, 0xe1, 0xcd, 0x0e, 0xee, 0x24, 0x13, 0x03, 0x22, 0xd6, 0xd6, 0x72, 0x08, 0x2b, 0xde, 0xfd, 0x93, 0xed, 0x0c, 0x7f, 0x5e, 0x31, 0x22, 0x4d, 0x80, 0x78, 0xc0, 0x48}, + }, + { /* 21P*/ + addYX: fp.Elt{0x8f, 0x72, 0xd2, 0x9e, 0xc4, 0xcd, 0x2c, 0xbf, 0xa8, 0xd3, 0x24, 0x62, 0x28, 0xee, 0x39, 0x0a, 0x19, 0x3a, 0x58, 0xff, 0x21, 0x2e, 0x69, 0x6c, 0x6e, 0x18, 0xd0, 0xcd, 0x61, 0xc1, 0x18, 0x02, 0x5a, 0xe9, 0xe3, 0xef, 0x1f, 0x8e, 0x10, 0xe8, 0x90, 0x2b, 0x48, 0xcd, 0xee, 0x38, 0xbd, 0x3a, 0xca, 0xbc, 0x2d, 0xe2, 0x3a, 0x03, 0x71, 0x02}, + subYX: fp.Elt{0xf8, 0xa4, 0x32, 0x26, 0x66, 0xaf, 0x3b, 0x53, 0xe7, 0xb0, 0x91, 0x92, 0xf5, 0x3c, 0x74, 0xce, 0xf2, 0xdd, 0x68, 0xa9, 0xf4, 0xcd, 0x5f, 0x60, 0xab, 0x71, 0xdf, 0xcd, 0x5c, 0x5d, 0x51, 0x72, 0x3a, 0x96, 0xea, 0xd6, 0xde, 0x54, 0x8e, 0x55, 0x4c, 0x08, 0x4c, 0x60, 0xdd, 0x34, 0xa9, 0x6f, 0xf3, 0x04, 0x02, 0xa8, 0xa6, 0x4e, 0x4d, 0x62}, + dt2: fp.Elt{0x76, 0x4a, 0xae, 0x38, 0x62, 0x69, 0x72, 0xdc, 0xe8, 0x43, 0xbe, 0x1d, 0x61, 0xde, 0x31, 0xc3, 0x42, 0x8f, 0x33, 0x9d, 0xca, 0xc7, 0x9c, 0xec, 0x6a, 0xe2, 0xaa, 0x01, 0x49, 0x78, 0x8d, 0x72, 0x4f, 0x38, 0xea, 0x52, 0xc2, 0xd3, 0xc9, 0x39, 0x71, 0xba, 0xb9, 0x09, 0x9b, 0xa3, 0x7f, 0x45, 0x43, 0x65, 0x36, 0x29, 0xca, 0xe7, 0x5c, 0x5f}, + }, + { /* 23P*/ + addYX: fp.Elt{0x89, 0x42, 0x35, 0x48, 0x6d, 0x74, 0xe5, 0x1f, 0xc3, 0xdd, 0x28, 0x5b, 0x84, 0x41, 0x33, 0x9f, 0x42, 0xf3, 0x1d, 0x5d, 0x15, 0x6d, 0x76, 0x33, 0x36, 0xaf, 0xe9, 0xdd, 0xfa, 0x63, 0x4f, 0x7a, 0x9c, 0xeb, 0x1c, 0x4f, 0x34, 0x65, 0x07, 0x54, 0xbb, 0x4c, 0x8b, 0x62, 0x9d, 0xd0, 0x06, 0x99, 0xb3, 0xe9, 0xda, 0x85, 0x19, 0xb0, 0x3d, 0x3c}, + subYX: fp.Elt{0xbb, 0x99, 0xf6, 0xbf, 0xaf, 0x2c, 0x22, 0x0d, 0x7a, 0xaa, 0x98, 0x6f, 0x01, 0x82, 0x99, 0xcf, 0x88, 0xbd, 0x0e, 0x3a, 0x89, 0xe0, 0x9c, 0x8c, 0x17, 0x20, 0xc4, 0xe0, 0xcf, 0x43, 0x7a, 0xef, 0x0d, 0x9f, 0x87, 0xd4, 0xfb, 0xf2, 0x96, 0xb8, 0x03, 0xe8, 0xcb, 0x5c, 0xec, 0x65, 0x5f, 0x49, 0xa4, 0x7c, 0x85, 0xb4, 0xf6, 0xc7, 0xdb, 0xa3}, + dt2: fp.Elt{0x11, 0xf3, 0x32, 0xa3, 0xa7, 0xb2, 0x7d, 0x51, 0x82, 0x44, 0xeb, 0xa2, 0x7d, 0x72, 0xcb, 0xc6, 0xf6, 0xc7, 0xb2, 0x38, 0x0e, 0x0f, 0x4f, 0x29, 0x00, 0xe4, 0x5b, 0x94, 0x46, 0x86, 0x66, 0xa1, 0x83, 0xb3, 0xeb, 0x15, 0xb6, 0x31, 0x50, 0x28, 0xeb, 0xed, 0x0d, 0x32, 0x39, 0xe9, 0x23, 0x81, 0x99, 0x3e, 0xff, 0x17, 0x4c, 0x11, 0x43, 0xd1}, + }, + { /* 25P*/ + addYX: fp.Elt{0xce, 0xe7, 0xf8, 0x94, 0x8f, 0x96, 0xf8, 0x96, 0xe6, 0x72, 0x20, 0x44, 0x2c, 0xa7, 0xfc, 0xba, 0xc8, 0xe1, 0xbb, 0xc9, 0x16, 0x85, 0xcd, 0x0b, 0xe5, 0xb5, 0x5a, 0x7f, 0x51, 0x43, 0x63, 0x8b, 0x23, 0x8e, 0x1d, 0x31, 0xff, 0x46, 0x02, 0x66, 0xcc, 0x9e, 0x4d, 0xa2, 0xca, 0xe2, 0xc7, 0xfd, 0x22, 0xb1, 0xdb, 0xdf, 0x6f, 0xe6, 0xa5, 0x82}, + subYX: fp.Elt{0xd0, 0xf5, 0x65, 0x40, 0xec, 0x8e, 0x65, 0x42, 0x78, 0xc1, 0x65, 0xe4, 0x10, 0xc8, 0x0b, 0x1b, 0xdd, 0x96, 0x68, 0xce, 0xee, 0x45, 0x55, 0xd8, 0x6e, 0xd3, 0xe6, 0x77, 0x19, 0xae, 0xc2, 0x8d, 0x8d, 0x3e, 0x14, 0x3f, 0x6d, 0x00, 0x2f, 0x9b, 0xd1, 0x26, 0x60, 0x28, 0x0f, 0x3a, 0x47, 0xb3, 0xe6, 0x68, 0x28, 0x24, 0x25, 0xca, 0xc8, 0x06}, + dt2: fp.Elt{0x54, 0xbb, 0x60, 0x92, 0xdb, 0x8f, 0x0f, 0x38, 0xe0, 0xe6, 0xe4, 0xc9, 0xcc, 0x14, 0x62, 0x01, 0xc4, 0x2b, 0x0f, 0xcf, 0xed, 0x7d, 0x8e, 0xa4, 0xd9, 0x73, 0x0b, 0xba, 0x0c, 0xaf, 0x0c, 0xf9, 0xe2, 0xeb, 0x29, 0x2a, 0x53, 0xdf, 0x2c, 0x5a, 0xfa, 0x8f, 0xc1, 0x01, 0xd7, 0xb1, 0x45, 0x73, 0x92, 0x32, 0x83, 0x85, 0x12, 0x74, 0x89, 0x44}, + }, + { /* 27P*/ + addYX: fp.Elt{0x0b, 0x73, 0x3c, 0xc2, 0xb1, 0x2e, 0xe1, 0xa7, 0xf5, 0xc9, 0x7a, 0xfb, 0x3d, 0x2d, 0xac, 0x59, 0xdb, 0xfa, 0x36, 0x11, 0xd1, 0x13, 0x04, 0x51, 0x1d, 0xab, 0x9b, 0x6b, 0x93, 0xfe, 0xda, 0xb0, 0x8e, 0xb4, 0x79, 0x11, 0x21, 0x0f, 0x65, 0xb9, 0xbb, 0x79, 0x96, 0x2a, 0xfd, 0x30, 0xe0, 0xb4, 0x2d, 0x9a, 0x55, 0x25, 0x5d, 0xd4, 0xad, 0x2a}, + subYX: fp.Elt{0x9e, 0xc5, 0x04, 0xfe, 0xec, 0x3c, 0x64, 0x1c, 0xed, 0x95, 0xed, 0xae, 0xaf, 0x5c, 0x6e, 0x08, 0x9e, 0x02, 0x29, 0x59, 0x7e, 0x5f, 0xc4, 0x9a, 0xd5, 0x32, 0x72, 0x86, 0xe1, 0x4e, 0x3c, 0xce, 0x99, 0x69, 0x3b, 0xc4, 0xdd, 0x4d, 0xb7, 0xbb, 0xda, 0x3b, 0x1a, 0x99, 0xaa, 0x62, 0x15, 0xc1, 0xf0, 0xb6, 0x6c, 0xec, 0x56, 0xc1, 0xff, 0x0c}, + dt2: fp.Elt{0x2f, 0xf1, 0x3f, 0x7a, 0x2d, 0x56, 0x19, 0x7f, 0xea, 0xbe, 0x59, 0x2e, 0x13, 0x67, 0x81, 0xfb, 0xdb, 0xc8, 0xa3, 0x1d, 0xd5, 0xe9, 0x13, 0x8b, 0x29, 0xdf, 0xcf, 0x9f, 0xe7, 0xd9, 0x0b, 0x70, 0xd3, 0x15, 0x57, 0x4a, 0xe9, 0x50, 0x12, 0x1b, 0x81, 0x4b, 0x98, 0x98, 0xa8, 0x31, 0x1d, 0x27, 0x47, 0x38, 0xed, 0x57, 0x99, 0x26, 0xb2, 0xee}, + }, + { /* 29P*/ + addYX: fp.Elt{0x1c, 0xb2, 0xb2, 0x67, 0x3b, 0x8b, 0x3d, 0x5a, 0x30, 0x7e, 0x38, 0x7e, 0x3c, 0x3d, 0x28, 0x56, 0x59, 0xd8, 0x87, 0x53, 0x8b, 0xe6, 0x6c, 0x5d, 0xe5, 0x0a, 0x33, 0x10, 0xce, 0xa2, 0x17, 0x0d, 0xe8, 0x76, 0xee, 0x68, 0xa8, 0x72, 0x54, 0xbd, 0xa6, 0x24, 0x94, 0x6e, 0x77, 0xc7, 0x53, 0xb7, 0x89, 0x1c, 0x7a, 0xe9, 0x78, 0x9a, 0x74, 0x5f}, + subYX: fp.Elt{0x76, 0x96, 0x1c, 0xcf, 0x08, 0x55, 0xd8, 0x1e, 0x0d, 0xa3, 0x59, 0x95, 0x32, 0xf4, 0xc2, 0x8e, 0x84, 0x5e, 0x4b, 0x04, 0xda, 0x71, 0xc9, 0x78, 0x52, 0xde, 0x14, 0xb4, 0x31, 0xf4, 0xd4, 0xb8, 0x58, 0xc5, 0x20, 0xe8, 0xdd, 0x15, 0xb5, 0xee, 0xea, 0x61, 0xe0, 0xf5, 0xd6, 0xae, 0x55, 0x59, 0x05, 0x3e, 0xaf, 0x74, 0xac, 0x1f, 0x17, 0x82}, + dt2: fp.Elt{0x59, 0x24, 0xcd, 0xfc, 0x11, 0x7e, 0x85, 0x18, 0x3d, 0x69, 0xf7, 0x71, 0x31, 0x66, 0x98, 0x42, 0x95, 0x00, 0x8c, 0xb2, 0xae, 0x39, 0x7e, 0x85, 0xd6, 0xb0, 0x02, 0xec, 0xce, 0xfc, 0x25, 0xb2, 0xe3, 0x99, 0x8e, 0x5b, 0x61, 0x96, 0x2e, 0x6d, 0x96, 0x57, 0x71, 0xa5, 0x93, 0x41, 0x0e, 0x6f, 0xfd, 0x0a, 0xbf, 0xa9, 0xf7, 0x56, 0xa9, 0x3e}, + }, + { /* 31P*/ + addYX: fp.Elt{0xa2, 0x2e, 0x0c, 0x17, 0x4d, 0xcc, 0x85, 0x2c, 0x18, 0xa0, 0xd2, 0x08, 0xba, 0x11, 0xfa, 0x47, 0x71, 0x86, 0xaf, 0x36, 0x6a, 0xd7, 0xfe, 0xb9, 0xb0, 0x2f, 0x89, 0x98, 0x49, 0x69, 0xf8, 0x6a, 0xad, 0x27, 0x5e, 0x0a, 0x22, 0x60, 0x5e, 0x5d, 0xca, 0x06, 0x51, 0x27, 0x99, 0x29, 0x85, 0x68, 0x98, 0xe1, 0xc4, 0x21, 0x50, 0xa0, 0xe9, 0xc1}, + subYX: fp.Elt{0x4d, 0x70, 0xee, 0x91, 0x92, 0x3f, 0xb7, 0xd3, 0x1d, 0xdb, 0x8d, 0x6e, 0x16, 0xf5, 0x65, 0x7d, 0x5f, 0xb5, 0x6c, 0x59, 0x26, 0x70, 0x4b, 0xf2, 0xfc, 0xe7, 0xdf, 0x86, 0xfe, 0xa5, 0xa7, 0xa6, 0x5d, 0xfb, 0x06, 0xe9, 0xf9, 0xcc, 0xc0, 0x37, 0xcc, 0xd8, 0x09, 0x04, 0xd2, 0xa5, 0x1d, 0xd7, 0xb7, 0xce, 0x92, 0xac, 0x3c, 0xad, 0xfb, 0xae}, + dt2: fp.Elt{0x17, 0xa3, 0x9a, 0xc7, 0x86, 0x2a, 0x51, 0xf7, 0x96, 0x79, 0x49, 0x22, 0x2e, 0x5a, 0x01, 0x5c, 0xb5, 0x95, 0xd4, 0xe8, 0xcb, 0x00, 0xca, 0x2d, 0x55, 0xb6, 0x34, 0x36, 0x0b, 0x65, 0x46, 0xf0, 0x49, 0xfc, 0x87, 0x86, 0xe5, 0xc3, 0x15, 0xdb, 0x32, 0xcd, 0xf2, 0xd3, 0x82, 0x4c, 0xe6, 0x61, 0x8a, 0xaf, 0xd4, 0x9e, 0x0f, 0x5a, 0xf2, 0x81}, + }, + { /* 33P*/ + addYX: fp.Elt{0x88, 0x10, 0xc0, 0xcb, 0xf5, 0x77, 0xae, 0xa5, 0xbe, 0xf6, 0xcd, 0x2e, 0x8b, 0x7e, 0xbd, 0x79, 0x62, 0x4a, 0xeb, 0x69, 0xc3, 0x28, 0xaa, 0x72, 0x87, 0xa9, 0x25, 0x87, 0x46, 0xea, 0x0e, 0x62, 0xa3, 0x6a, 0x1a, 0xe2, 0xba, 0xdc, 0x81, 0x10, 0x33, 0x01, 0xf6, 0x16, 0x89, 0x80, 0xc6, 0xcd, 0xdb, 0xdc, 0xba, 0x0e, 0x09, 0x4a, 0x35, 0x4a}, + subYX: fp.Elt{0x86, 0xb2, 0x2b, 0xd0, 0xb8, 0x4a, 0x6d, 0x66, 0x7b, 0x32, 0xdf, 0x3b, 0x1a, 0x19, 0x1f, 0x63, 0xee, 0x1f, 0x3d, 0x1c, 0x5c, 0x14, 0x60, 0x5b, 0x72, 0x49, 0x07, 0xb1, 0x0d, 0x72, 0xc6, 0x35, 0xf0, 0xbc, 0x5e, 0xda, 0x80, 0x6b, 0x64, 0x5b, 0xe5, 0x34, 0x54, 0x39, 0xdd, 0xe6, 0x3c, 0xcb, 0xe5, 0x29, 0x32, 0x06, 0xc6, 0xb1, 0x96, 0x34}, + dt2: fp.Elt{0x85, 0x86, 0xf5, 0x84, 0x86, 0xe6, 0x77, 0x8a, 0x71, 0x85, 0x0c, 0x4f, 0x81, 0x5b, 0x29, 0x06, 0xb5, 0x2e, 0x26, 0x71, 0x07, 0x78, 0x07, 0xae, 0xbc, 0x95, 0x46, 0xc3, 0x65, 0xac, 0xe3, 0x76, 0x51, 0x7d, 0xd4, 0x85, 0x31, 0xe3, 0x43, 0xf3, 0x1b, 0x7c, 0xf7, 0x6b, 0x2c, 0xf8, 0x1c, 0xbb, 0x8d, 0xca, 0xab, 0x4b, 0xba, 0x7f, 0xa4, 0xe2}, + }, + { /* 35P*/ + addYX: fp.Elt{0x1a, 0xee, 0xe7, 0xa4, 0x8a, 0x9d, 0x53, 0x80, 0xc6, 0xb8, 0x4e, 0xdc, 0x89, 0xe0, 0xc4, 0x2b, 0x60, 0x52, 0x6f, 0xec, 0x81, 0xd2, 0x55, 0x6b, 0x1b, 0x6f, 0x17, 0x67, 0x8e, 0x42, 0x26, 0x4c, 0x65, 0x23, 0x29, 0xc6, 0x7b, 0xcd, 0x9f, 0xad, 0x4b, 0x42, 0xd3, 0x0c, 0x75, 0xc3, 0x8a, 0xf5, 0xbe, 0x9e, 0x55, 0xf7, 0x47, 0x5d, 0xbd, 0x3a}, + subYX: fp.Elt{0x0d, 0xa8, 0x3b, 0xf9, 0xc7, 0x7e, 0xc6, 0x86, 0x94, 0xc0, 0x01, 0xff, 0x27, 0xce, 0x43, 0xac, 0xe5, 0xe1, 0xd2, 0x8d, 0xc1, 0x22, 0x31, 0xbe, 0xe1, 0xaf, 0xf9, 0x4a, 0x78, 0xa1, 0x0c, 0xaa, 0xd4, 0x80, 0xe4, 0x09, 0x8d, 0xfb, 0x1d, 0x52, 0xc8, 0x60, 0x2d, 0xf2, 0xa2, 0x89, 0x02, 0x56, 0x3d, 0x56, 0x27, 0x85, 0xc7, 0xf0, 0x2b, 0x9a}, + dt2: fp.Elt{0x62, 0x7c, 0xc7, 0x6b, 0x2c, 0x9d, 0x0a, 0x7c, 0xe5, 0x50, 0x3c, 0xe6, 0x87, 0x1c, 0x82, 0x30, 0x67, 0x3c, 0x39, 0xb6, 0xa0, 0x31, 0xfb, 0x03, 0x7b, 0xa1, 0x58, 0xdf, 0x12, 0x76, 0x5d, 0x5d, 0x0a, 0x8f, 0x9b, 0x37, 0x32, 0xc3, 0x60, 0x33, 0xea, 0x9f, 0x0a, 0x99, 0xfa, 0x20, 0xd0, 0x33, 0x21, 0xc3, 0x94, 0xd4, 0x86, 0x49, 0x7c, 0x4e}, + }, + { /* 37P*/ + addYX: fp.Elt{0xc7, 0x0c, 0x71, 0xfe, 0x55, 0xd1, 0x95, 0x8f, 0x43, 0xbb, 0x6b, 0x74, 0x30, 0xbd, 0xe8, 0x6f, 0x1c, 0x1b, 0x06, 0x62, 0xf5, 0xfc, 0x65, 0xa0, 0xeb, 0x81, 0x12, 0xc9, 0x64, 0x66, 0x61, 0xde, 0xf3, 0x6d, 0xd4, 0xae, 0x8e, 0xb1, 0x72, 0xe0, 0xcd, 0x37, 0x01, 0x28, 0x52, 0xd7, 0x39, 0x46, 0x0c, 0x55, 0xcf, 0x47, 0x70, 0xef, 0xa1, 0x17}, + subYX: fp.Elt{0x8d, 0x58, 0xde, 0x83, 0x88, 0x16, 0x0e, 0x12, 0x42, 0x03, 0x50, 0x60, 0x4b, 0xdf, 0xbf, 0x95, 0xcc, 0x7d, 0x18, 0x17, 0x7e, 0x31, 0x5d, 0x8a, 0x66, 0xc1, 0xcf, 0x14, 0xea, 0xf4, 0xf4, 0xe5, 0x63, 0x2d, 0x32, 0x86, 0x9b, 0xed, 0x1f, 0x4f, 0x03, 0xaf, 0x33, 0x92, 0xcb, 0xaf, 0x9c, 0x05, 0x0d, 0x47, 0x1b, 0x42, 0xba, 0x13, 0x22, 0x98}, + dt2: fp.Elt{0xb5, 0x48, 0xeb, 0x7d, 0x3d, 0x10, 0x9f, 0x59, 0xde, 0xf8, 0x1c, 0x4f, 0x7d, 0x9d, 0x40, 0x4d, 0x9e, 0x13, 0x24, 0xb5, 0x21, 0x09, 0xb7, 0xee, 0x98, 0x5c, 0x56, 0xbc, 0x5e, 0x2b, 0x78, 0x38, 0x06, 0xac, 0xe3, 0xe0, 0xfa, 0x2e, 0xde, 0x4f, 0xd2, 0xb3, 0xfb, 0x2d, 0x71, 0x84, 0xd1, 0x9d, 0x12, 0x5b, 0x35, 0xc8, 0x03, 0x68, 0x67, 0xc7}, + }, + { /* 39P*/ + addYX: fp.Elt{0xb6, 0x65, 0xfb, 0xa7, 0x06, 0x35, 0xbb, 0xe0, 0x31, 0x8d, 0x91, 0x40, 0x98, 0xab, 0x30, 0xe4, 0xca, 0x12, 0x59, 0x89, 0xed, 0x65, 0x5d, 0x7f, 0xae, 0x69, 0xa0, 0xa4, 0xfa, 0x78, 0xb4, 0xf7, 0xed, 0xae, 0x86, 0x78, 0x79, 0x64, 0x24, 0xa6, 0xd4, 0xe1, 0xf6, 0xd3, 0xa0, 0x89, 0xba, 0x20, 0xf4, 0x54, 0x0d, 0x8f, 0xdb, 0x1a, 0x79, 0xdb}, + subYX: fp.Elt{0xe1, 0x82, 0x0c, 0x4d, 0xde, 0x9f, 0x40, 0xf0, 0xc1, 0xbd, 0x8b, 0xd3, 0x24, 0x03, 0xcd, 0xf2, 0x92, 0x7d, 0xe2, 0x68, 0x7f, 0xf1, 0xbe, 0x69, 0xde, 0x34, 0x67, 0x4c, 0x85, 0x3b, 0xec, 0x98, 0xcc, 0x4d, 0x3e, 0xc0, 0x96, 0x27, 0xe6, 0x75, 0xfc, 0xdf, 0x37, 0xc0, 0x1e, 0x27, 0xe0, 0xf6, 0xc2, 0xbd, 0xbc, 0x3d, 0x9b, 0x39, 0xdc, 0xe2}, + dt2: fp.Elt{0xd8, 0x29, 0xa7, 0x39, 0xe3, 0x9f, 0x2f, 0x0e, 0x4b, 0x24, 0x21, 0x70, 0xef, 0xfd, 0x91, 0xea, 0xbf, 0xe1, 0x72, 0x90, 0xcc, 0xc9, 0x84, 0x0e, 0xad, 0xd5, 0xe6, 0xbb, 0xc5, 0x99, 0x7f, 0xa4, 0xf0, 0x2e, 0xcc, 0x95, 0x64, 0x27, 0x19, 0xd8, 0x4c, 0x27, 0x0d, 0xff, 0xb6, 0x29, 0xe2, 0x6c, 0xfa, 0xbb, 0x4d, 0x9c, 0xbb, 0xaf, 0xa5, 0xec}, + }, + { /* 41P*/ + addYX: fp.Elt{0xd6, 0x33, 0x3f, 0x9f, 0xcf, 0xfd, 0x4c, 0xd1, 0xfe, 0xe5, 0xeb, 0x64, 0x27, 0xae, 0x7a, 0xa2, 0x82, 0x50, 0x6d, 0xaa, 0xe3, 0x5d, 0xe2, 0x48, 0x60, 0xb3, 0x76, 0x04, 0xd9, 0x19, 0xa7, 0xa1, 0x73, 0x8d, 0x38, 0xa9, 0xaf, 0x45, 0xb5, 0xb2, 0x62, 0x9b, 0xf1, 0x35, 0x7b, 0x84, 0x66, 0xeb, 0x06, 0xef, 0xf1, 0xb2, 0x2d, 0x6a, 0x61, 0x15}, + subYX: fp.Elt{0x86, 0x50, 0x42, 0xf7, 0xda, 0x59, 0xb2, 0xcf, 0x0d, 0x3d, 0xee, 0x8e, 0x53, 0x5d, 0xf7, 0x9e, 0x6a, 0x26, 0x2d, 0xc7, 0x8c, 0x8e, 0x18, 0x50, 0x6d, 0xb7, 0x51, 0x4c, 0xa7, 0x52, 0x6e, 0x0e, 0x0a, 0x16, 0x74, 0xb2, 0x81, 0x8b, 0x56, 0x27, 0x22, 0x84, 0xf4, 0x56, 0xc5, 0x06, 0xe1, 0x8b, 0xca, 0x2d, 0xdb, 0x9a, 0xf6, 0x10, 0x9c, 0x51}, + dt2: fp.Elt{0x1f, 0x16, 0xa2, 0x78, 0x96, 0x1b, 0x85, 0x9c, 0x76, 0x49, 0xd4, 0x0f, 0xac, 0xb0, 0xf4, 0xd0, 0x06, 0x2c, 0x7e, 0x6d, 0x6e, 0x8e, 0xc7, 0x9f, 0x18, 0xad, 0xfc, 0x88, 0x0c, 0x0c, 0x09, 0x05, 0x05, 0xa0, 0x79, 0x72, 0x32, 0x72, 0x87, 0x0f, 0x49, 0x87, 0x0c, 0xb4, 0x12, 0xc2, 0x09, 0xf8, 0x9f, 0x30, 0x72, 0xa9, 0x47, 0x13, 0x93, 0x49}, + }, + { /* 43P*/ + addYX: fp.Elt{0xcc, 0xb1, 0x4c, 0xd3, 0xc0, 0x9e, 0x9e, 0x4d, 0x6d, 0x28, 0x0b, 0xa5, 0x94, 0xa7, 0x2e, 0xc2, 0xc7, 0xaf, 0x29, 0x73, 0xc9, 0x68, 0xea, 0x0f, 0x34, 0x37, 0x8d, 0x96, 0x8f, 0x3a, 0x3d, 0x73, 0x1e, 0x6d, 0x9f, 0xcf, 0x8d, 0x83, 0xb5, 0x71, 0xb9, 0xe1, 0x4b, 0x67, 0x71, 0xea, 0xcf, 0x56, 0xe5, 0xeb, 0x72, 0x15, 0x2f, 0x9e, 0xa8, 0xaa}, + subYX: fp.Elt{0xf4, 0x3e, 0x85, 0x1c, 0x1a, 0xef, 0x50, 0xd1, 0xb4, 0x20, 0xb2, 0x60, 0x05, 0x98, 0xfe, 0x47, 0x3b, 0xc1, 0x76, 0xca, 0x2c, 0x4e, 0x5a, 0x42, 0xa3, 0xf7, 0x20, 0xaa, 0x57, 0x39, 0xee, 0x34, 0x1f, 0xe1, 0x68, 0xd3, 0x7e, 0x06, 0xc4, 0x6c, 0xc7, 0x76, 0x2b, 0xe4, 0x1c, 0x48, 0x44, 0xe6, 0xe5, 0x44, 0x24, 0x8d, 0xb3, 0xb6, 0x88, 0x32}, + dt2: fp.Elt{0x18, 0xa7, 0xba, 0xd0, 0x44, 0x6f, 0x33, 0x31, 0x00, 0xf8, 0xf6, 0x12, 0xe3, 0xc5, 0xc7, 0xb5, 0x91, 0x9c, 0x91, 0xb5, 0x75, 0x18, 0x18, 0x8a, 0xab, 0xed, 0x24, 0x11, 0x2e, 0xce, 0x5a, 0x0f, 0x94, 0x5f, 0x2e, 0xca, 0xd3, 0x80, 0xea, 0xe5, 0x34, 0x96, 0x67, 0x8b, 0x6a, 0x26, 0x5e, 0xc8, 0x9d, 0x2c, 0x5e, 0x6c, 0xa2, 0x0c, 0xbf, 0xf0}, + }, + { /* 45P*/ + addYX: fp.Elt{0xb3, 0xbf, 0xa3, 0x85, 0xee, 0xf6, 0x58, 0x02, 0x78, 0xc4, 0x30, 0xd6, 0x57, 0x59, 0x8c, 0x88, 0x08, 0x7c, 0xbc, 0xbe, 0x0a, 0x74, 0xa9, 0xde, 0x69, 0xe7, 0x41, 0xd8, 0xbf, 0x66, 0x8d, 0x3d, 0x28, 0x00, 0x8c, 0x47, 0x65, 0x34, 0xfe, 0x86, 0x9e, 0x6a, 0xf2, 0x41, 0x6a, 0x94, 0xc4, 0x88, 0x75, 0x23, 0x0d, 0x52, 0x69, 0xee, 0x07, 0x89}, + subYX: fp.Elt{0x22, 0x3c, 0xa1, 0x70, 0x58, 0x97, 0x93, 0xbe, 0x59, 0xa8, 0x0b, 0x8a, 0x46, 0x2a, 0x38, 0x1e, 0x08, 0x6b, 0x61, 0x9f, 0xf2, 0x4a, 0x8b, 0x80, 0x68, 0x6e, 0xc8, 0x92, 0x60, 0xf3, 0xc9, 0x89, 0xb2, 0x6d, 0x63, 0xb0, 0xeb, 0x83, 0x15, 0x63, 0x0e, 0x64, 0xbb, 0xb8, 0xfe, 0xb4, 0x81, 0x90, 0x01, 0x28, 0x10, 0xb9, 0x74, 0x6e, 0xde, 0xa4}, + dt2: fp.Elt{0x1a, 0x23, 0x45, 0xa8, 0x6f, 0x4e, 0xa7, 0x4a, 0x0c, 0xeb, 0xb0, 0x43, 0xf9, 0xef, 0x99, 0x60, 0x5b, 0xdb, 0x66, 0xc0, 0x86, 0x71, 0x43, 0xb1, 0x22, 0x7b, 0x1c, 0xe7, 0x8d, 0x09, 0x1d, 0x83, 0x76, 0x9c, 0xd3, 0x5a, 0xdd, 0x42, 0xd9, 0x2f, 0x2d, 0xba, 0x7a, 0xc2, 0xd9, 0x6b, 0xd4, 0x7a, 0xf1, 0xd5, 0x5f, 0x6b, 0x85, 0xbf, 0x0b, 0xf1}, + }, + { /* 47P*/ + addYX: fp.Elt{0xb2, 0x83, 0xfa, 0x1f, 0xd2, 0xce, 0xb6, 0xf2, 0x2d, 0xea, 0x1b, 0xe5, 0x29, 0xa5, 0x72, 0xf9, 0x25, 0x48, 0x4e, 0xf2, 0x50, 0x1b, 0x39, 0xda, 0x34, 0xc5, 0x16, 0x13, 0xb4, 0x0c, 0xa1, 0x00, 0x79, 0x7a, 0xf5, 0x8b, 0xf3, 0x70, 0x14, 0xb6, 0xfc, 0x9a, 0x47, 0x68, 0x1e, 0x42, 0x70, 0x64, 0x2a, 0x84, 0x3e, 0x3d, 0x20, 0x58, 0xf9, 0x6a}, + subYX: fp.Elt{0xd9, 0xee, 0xc0, 0xc4, 0xf5, 0xc2, 0x86, 0xaf, 0x45, 0xd2, 0xd2, 0x87, 0x1b, 0x64, 0xd5, 0xe0, 0x8c, 0x44, 0x00, 0x4f, 0x43, 0x89, 0x04, 0x48, 0x4a, 0x0b, 0xca, 0x94, 0x06, 0x2f, 0x23, 0x5b, 0x6c, 0x8d, 0x44, 0x66, 0x53, 0xf5, 0x5a, 0x20, 0x72, 0x28, 0x58, 0x84, 0xcc, 0x73, 0x22, 0x5e, 0xd1, 0x0b, 0x56, 0x5e, 0x6a, 0xa3, 0x11, 0x91}, + dt2: fp.Elt{0x6e, 0x9f, 0x88, 0xa8, 0x68, 0x2f, 0x12, 0x37, 0x88, 0xfc, 0x92, 0x8f, 0x24, 0xeb, 0x5b, 0x2a, 0x2a, 0xd0, 0x14, 0x40, 0x4c, 0xa9, 0xa4, 0x03, 0x0c, 0x45, 0x48, 0x13, 0xe8, 0xa6, 0x37, 0xab, 0xc0, 0x06, 0x38, 0x6c, 0x96, 0x73, 0x40, 0x6c, 0xc6, 0xea, 0x56, 0xc6, 0xe9, 0x1a, 0x69, 0xeb, 0x7a, 0xd1, 0x33, 0x69, 0x58, 0x2b, 0xea, 0x2f}, + }, + { /* 49P*/ + addYX: fp.Elt{0x58, 0xa8, 0x05, 0x41, 0x00, 0x9d, 0xaa, 0xd9, 0x98, 0xcf, 0xb9, 0x41, 0xb5, 0x4a, 0x8d, 0xe2, 0xe7, 0xc0, 0x72, 0xef, 0xc8, 0x28, 0x6b, 0x68, 0x9d, 0xc9, 0xdf, 0x05, 0x8b, 0xd0, 0x04, 0x74, 0x79, 0x45, 0x52, 0x05, 0xa3, 0x6e, 0x35, 0x3a, 0xe3, 0xef, 0xb2, 0xdc, 0x08, 0x6f, 0x4e, 0x76, 0x85, 0x67, 0xba, 0x23, 0x8f, 0xdd, 0xaf, 0x09}, + subYX: fp.Elt{0xb4, 0x38, 0xc8, 0xff, 0x4f, 0x65, 0x2a, 0x7e, 0xad, 0xb1, 0xc6, 0xb9, 0x3d, 0xd6, 0xf7, 0x14, 0xcf, 0xf6, 0x98, 0x75, 0xbb, 0x47, 0x83, 0x90, 0xe7, 0xe1, 0xf6, 0x14, 0x99, 0x7e, 0xfa, 0xe4, 0x77, 0x24, 0xe3, 0xe7, 0xf0, 0x1e, 0xdb, 0x27, 0x4e, 0x16, 0x04, 0xf2, 0x08, 0x52, 0xfc, 0xec, 0x55, 0xdb, 0x2e, 0x67, 0xe1, 0x94, 0x32, 0x89}, + dt2: fp.Elt{0x00, 0xad, 0x03, 0x35, 0x1a, 0xb1, 0x88, 0xf0, 0xc9, 0x11, 0xe4, 0x12, 0x52, 0x61, 0xfd, 0x8a, 0x1b, 0x6a, 0x0a, 0x4c, 0x42, 0x46, 0x22, 0x0e, 0xa5, 0xf9, 0xe2, 0x50, 0xf2, 0xb2, 0x1f, 0x20, 0x78, 0x10, 0xf6, 0xbf, 0x7f, 0x0c, 0x9c, 0xad, 0x40, 0x8b, 0x82, 0xd4, 0xba, 0x69, 0x09, 0xac, 0x4b, 0x6d, 0xc4, 0x49, 0x17, 0x81, 0x57, 0x3b}, + }, + { /* 51P*/ + addYX: fp.Elt{0x0d, 0xfe, 0xb4, 0x35, 0x11, 0xbd, 0x1d, 0x6b, 0xc2, 0xc5, 0x3b, 0xd2, 0x23, 0x2c, 0x72, 0xe3, 0x48, 0xb1, 0x48, 0x73, 0xfb, 0xa3, 0x21, 0x6e, 0xc0, 0x09, 0x69, 0xac, 0xe1, 0x60, 0xbc, 0x24, 0x03, 0x99, 0x63, 0x0a, 0x00, 0xf0, 0x75, 0xf6, 0x92, 0xc5, 0xd6, 0xdb, 0x51, 0xd4, 0x7d, 0xe6, 0xf4, 0x11, 0x79, 0xd7, 0xc3, 0xaf, 0x48, 0xd0}, + subYX: fp.Elt{0xf4, 0x4f, 0xaf, 0x31, 0xe3, 0x10, 0x89, 0x95, 0xf0, 0x8a, 0xf6, 0x31, 0x9f, 0x48, 0x02, 0xba, 0x42, 0x2b, 0x3c, 0x22, 0x8b, 0xcc, 0x12, 0x98, 0x6e, 0x7a, 0x64, 0x3a, 0xc4, 0xca, 0x32, 0x2a, 0x72, 0xf8, 0x2c, 0xcf, 0x78, 0x5e, 0x7a, 0x75, 0x6e, 0x72, 0x46, 0x48, 0x62, 0x28, 0xac, 0x58, 0x1a, 0xc6, 0x59, 0x88, 0x2a, 0x44, 0x9e, 0x83}, + dt2: fp.Elt{0xb3, 0xde, 0x36, 0xfd, 0xeb, 0x1b, 0xd4, 0x24, 0x1b, 0x08, 0x8c, 0xfe, 0xa9, 0x41, 0xa1, 0x64, 0xf2, 0x6d, 0xdb, 0xf9, 0x94, 0xae, 0x86, 0x71, 0xab, 0x10, 0xbf, 0xa3, 0xb2, 0xa0, 0xdf, 0x10, 0x8c, 0x74, 0xce, 0xb3, 0xfc, 0xdb, 0xba, 0x15, 0xf6, 0x91, 0x7a, 0x9c, 0x36, 0x1e, 0x45, 0x07, 0x3c, 0xec, 0x1a, 0x61, 0x26, 0x93, 0xe3, 0x50}, + }, + { /* 53P*/ + addYX: fp.Elt{0xc5, 0x50, 0xc5, 0x83, 0xb0, 0xbd, 0xd9, 0xf6, 0x6d, 0x15, 0x5e, 0xc1, 0x1a, 0x33, 0xa0, 0xce, 0x13, 0x70, 0x3b, 0xe1, 0x31, 0xc6, 0xc4, 0x02, 0xec, 0x8c, 0xd5, 0x9c, 0x97, 0xd3, 0x12, 0xc4, 0xa2, 0xf9, 0xd5, 0xfb, 0x22, 0x69, 0x94, 0x09, 0x2f, 0x59, 0xce, 0xdb, 0xf2, 0xf2, 0x00, 0xe0, 0xa9, 0x08, 0x44, 0x2e, 0x8b, 0x6b, 0xf5, 0xb3}, + subYX: fp.Elt{0x90, 0xdd, 0xec, 0xa2, 0x65, 0xb7, 0x61, 0xbc, 0xaa, 0x70, 0xa2, 0x15, 0xd8, 0xb0, 0xf8, 0x8e, 0x23, 0x3d, 0x9f, 0x46, 0xa3, 0x29, 0x20, 0xd1, 0xa1, 0x15, 0x81, 0xc6, 0xb6, 0xde, 0xbe, 0x60, 0x63, 0x24, 0xac, 0x15, 0xfb, 0xeb, 0xd3, 0xea, 0x57, 0x13, 0x86, 0x38, 0x1e, 0x22, 0xf4, 0x8c, 0x5d, 0xaf, 0x1b, 0x27, 0x21, 0x4f, 0xa3, 0x63}, + dt2: fp.Elt{0x07, 0x15, 0x87, 0xc4, 0xfd, 0xa1, 0x97, 0x7a, 0x07, 0x1f, 0x56, 0xcc, 0xe3, 0x6a, 0x01, 0x90, 0xce, 0xf9, 0xfa, 0x50, 0xb2, 0xe0, 0x87, 0x8b, 0x6c, 0x63, 0x6c, 0xf6, 0x2a, 0x09, 0xef, 0xef, 0xd2, 0x31, 0x40, 0x25, 0xf6, 0x84, 0xcb, 0xe0, 0xc4, 0x23, 0xc1, 0xcb, 0xe2, 0x02, 0x83, 0x2d, 0xed, 0x74, 0x74, 0x8b, 0xf8, 0x7c, 0x81, 0x18}, + }, + { /* 55P*/ + addYX: fp.Elt{0x9e, 0xe5, 0x59, 0x95, 0x63, 0x2e, 0xac, 0x8b, 0x03, 0x3c, 0xc1, 0x8e, 0xe1, 0x5b, 0x56, 0x3c, 0x16, 0x41, 0xe4, 0xc2, 0x60, 0x0c, 0x6d, 0x65, 0x9f, 0xfc, 0x27, 0x68, 0x43, 0x44, 0x05, 0x12, 0x6c, 0xda, 0x04, 0xef, 0xcf, 0xcf, 0xdc, 0x0a, 0x1a, 0x7f, 0x12, 0xd3, 0xeb, 0x02, 0xb6, 0x04, 0xca, 0xd6, 0xcb, 0xf0, 0x22, 0xba, 0x35, 0x6d}, + subYX: fp.Elt{0x09, 0x6d, 0xf9, 0x64, 0x4c, 0xe6, 0x41, 0xff, 0x01, 0x4d, 0xce, 0x1e, 0xfa, 0x38, 0xa2, 0x25, 0x62, 0xff, 0x03, 0x39, 0x18, 0x91, 0xbb, 0x9d, 0xce, 0x02, 0xf0, 0xf1, 0x3c, 0x55, 0x18, 0xa9, 0xab, 0x4d, 0xd2, 0x35, 0xfd, 0x8d, 0xa9, 0xb2, 0xad, 0xb7, 0x06, 0x6e, 0xc6, 0x69, 0x49, 0xd6, 0x98, 0x98, 0x0b, 0x22, 0x81, 0x6b, 0xbd, 0xa0}, + dt2: fp.Elt{0x22, 0xf4, 0x85, 0x5d, 0x2b, 0xf1, 0x55, 0xa5, 0xd6, 0x27, 0x86, 0x57, 0x12, 0x1f, 0x16, 0x0a, 0x5a, 0x9b, 0xf2, 0x38, 0xb6, 0x28, 0xd8, 0x99, 0x0c, 0x89, 0x1d, 0x7f, 0xca, 0x21, 0x17, 0x1a, 0x0b, 0x02, 0x5f, 0x77, 0x2f, 0x73, 0x30, 0x7c, 0xc8, 0xd7, 0x2b, 0xcc, 0xe7, 0xf3, 0x21, 0xac, 0x53, 0xa7, 0x11, 0x5d, 0xd8, 0x1d, 0x9b, 0xf5}, + }, + { /* 57P*/ + addYX: fp.Elt{0x94, 0x63, 0x5d, 0xef, 0xfd, 0x6d, 0x25, 0x4e, 0x6d, 0x29, 0x03, 0xed, 0x24, 0x28, 0x27, 0x57, 0x47, 0x3e, 0x6a, 0x1a, 0xfe, 0x37, 0xee, 0x5f, 0x83, 0x29, 0x14, 0xfd, 0x78, 0x25, 0x8a, 0xe1, 0x02, 0x38, 0xd8, 0xca, 0x65, 0x55, 0x40, 0x7d, 0x48, 0x2c, 0x7c, 0x7e, 0x60, 0xb6, 0x0c, 0x6d, 0xf7, 0xe8, 0xb3, 0x62, 0x53, 0xd6, 0x9c, 0x2b}, + subYX: fp.Elt{0x47, 0x25, 0x70, 0x62, 0xf5, 0x65, 0x93, 0x62, 0x08, 0xac, 0x59, 0x66, 0xdb, 0x08, 0xd9, 0x1a, 0x19, 0xaf, 0xf4, 0xef, 0x02, 0xa2, 0x78, 0xa9, 0x55, 0x1c, 0xfa, 0x08, 0x11, 0xcb, 0xa3, 0x71, 0x74, 0xb1, 0x62, 0xe7, 0xc7, 0xf3, 0x5a, 0xb5, 0x8b, 0xd4, 0xf6, 0x10, 0x57, 0x79, 0x72, 0x2f, 0x13, 0x86, 0x7b, 0x44, 0x5f, 0x48, 0xfd, 0x88}, + dt2: fp.Elt{0x10, 0x02, 0xcd, 0x05, 0x9a, 0xc3, 0x32, 0x6d, 0x10, 0x3a, 0x74, 0xba, 0x06, 0xc4, 0x3b, 0x34, 0xbc, 0x36, 0xed, 0xa3, 0xba, 0x9a, 0xdb, 0x6d, 0xd4, 0x69, 0x99, 0x97, 0xd0, 0xe4, 0xdd, 0xf5, 0xd4, 0x7c, 0xd3, 0x4e, 0xab, 0xd1, 0x3b, 0xbb, 0xe9, 0xc7, 0x6a, 0x94, 0x25, 0x61, 0xf0, 0x06, 0xc5, 0x12, 0xa8, 0x86, 0xe5, 0x35, 0x46, 0xeb}, + }, + { /* 59P*/ + addYX: fp.Elt{0x9e, 0x95, 0x11, 0xc6, 0xc7, 0xe8, 0xee, 0x5a, 0x26, 0xa0, 0x72, 0x72, 0x59, 0x91, 0x59, 0x16, 0x49, 0x99, 0x7e, 0xbb, 0xd7, 0x15, 0xb4, 0xf2, 0x40, 0xf9, 0x5a, 0x4d, 0xc8, 0xa0, 0xe2, 0x34, 0x7b, 0x34, 0xf3, 0x99, 0xbf, 0xa9, 0xf3, 0x79, 0xc1, 0x1a, 0x0c, 0xf4, 0x86, 0x74, 0x4e, 0xcb, 0xbc, 0x90, 0xad, 0xb6, 0x51, 0x6d, 0xaa, 0x33}, + subYX: fp.Elt{0x9f, 0xd1, 0xc5, 0xa2, 0x6c, 0x24, 0x88, 0x15, 0x71, 0x68, 0xf6, 0x07, 0x45, 0x02, 0xc4, 0x73, 0x7e, 0x75, 0x87, 0xca, 0x7c, 0xf0, 0x92, 0x00, 0x75, 0xd6, 0x5a, 0xdd, 0xe0, 0x64, 0x16, 0x9d, 0x62, 0x80, 0x33, 0x9f, 0xf4, 0x8e, 0x1a, 0x15, 0x1c, 0xd3, 0x0f, 0x4d, 0x4f, 0x62, 0x2d, 0xd7, 0xa5, 0x77, 0xe3, 0xea, 0xf0, 0xfb, 0x1a, 0xdb}, + dt2: fp.Elt{0x6a, 0xa2, 0xb1, 0xaa, 0xfb, 0x5a, 0x32, 0x4e, 0xff, 0x47, 0x06, 0xd5, 0x9a, 0x4f, 0xce, 0x83, 0x5b, 0x82, 0x34, 0x3e, 0x47, 0xb8, 0xf8, 0xe9, 0x7c, 0x67, 0x69, 0x8d, 0x9c, 0xb7, 0xde, 0x57, 0xf4, 0x88, 0x41, 0x56, 0x0c, 0x87, 0x1e, 0xc9, 0x2f, 0x54, 0xbf, 0x5c, 0x68, 0x2c, 0xd9, 0xc4, 0xef, 0x53, 0x73, 0x1e, 0xa6, 0x38, 0x02, 0x10}, + }, + { /* 61P*/ + addYX: fp.Elt{0x08, 0x80, 0x4a, 0xc9, 0xb7, 0xa8, 0x88, 0xd9, 0xfc, 0x6a, 0xc0, 0x3e, 0xc2, 0x33, 0x4d, 0x2b, 0x2a, 0xa3, 0x6d, 0x72, 0x3e, 0xdc, 0x34, 0x68, 0x08, 0xbf, 0x27, 0xef, 0xf4, 0xff, 0xe2, 0x0c, 0x31, 0x0c, 0xa2, 0x0a, 0x1f, 0x65, 0xc1, 0x4c, 0x61, 0xd3, 0x1b, 0xbc, 0x25, 0xb1, 0xd0, 0xd4, 0x89, 0xb2, 0x53, 0xfb, 0x43, 0xa5, 0xaf, 0x04}, + subYX: fp.Elt{0xe3, 0xe1, 0x37, 0xad, 0x58, 0xa9, 0x55, 0x81, 0xee, 0x64, 0x21, 0xb9, 0xf5, 0x4c, 0x35, 0xea, 0x4a, 0xd3, 0x26, 0xaa, 0x90, 0xd4, 0x60, 0x46, 0x09, 0x4b, 0x4a, 0x62, 0xf9, 0xcd, 0xe1, 0xee, 0xbb, 0xc2, 0x09, 0x0b, 0xb0, 0x96, 0x8e, 0x43, 0x77, 0xaf, 0x25, 0x20, 0x5e, 0x47, 0xe4, 0x1d, 0x50, 0x69, 0x74, 0x08, 0xd7, 0xb9, 0x90, 0x13}, + dt2: fp.Elt{0x51, 0x91, 0x95, 0x64, 0x03, 0x16, 0xfd, 0x6e, 0x26, 0x94, 0x6b, 0x61, 0xe7, 0xd9, 0xe0, 0x4a, 0x6d, 0x7c, 0xfa, 0xc0, 0xe2, 0x43, 0x23, 0x53, 0x70, 0xf5, 0x6f, 0x73, 0x8b, 0x81, 0xb0, 0x0c, 0xee, 0x2e, 0x46, 0xf2, 0x8d, 0xa6, 0xfb, 0xb5, 0x1c, 0x33, 0xbf, 0x90, 0x59, 0xc9, 0x7c, 0xb8, 0x6f, 0xad, 0x75, 0x02, 0x90, 0x8e, 0x59, 0x75}, + }, + { /* 63P*/ + addYX: fp.Elt{0x36, 0x4d, 0x77, 0x04, 0xb8, 0x7d, 0x4a, 0xd1, 0xc5, 0xbb, 0x7b, 0x50, 0x5f, 0x8d, 0x9d, 0x62, 0x0f, 0x66, 0x71, 0xec, 0x87, 0xc5, 0x80, 0x82, 0xc8, 0xf4, 0x6a, 0x94, 0x92, 0x5b, 0xb0, 0x16, 0x9b, 0xb2, 0xc9, 0x6f, 0x2b, 0x2d, 0xee, 0x95, 0x73, 0x2e, 0xc2, 0x1b, 0xc5, 0x55, 0x36, 0x86, 0x24, 0xf8, 0x20, 0x05, 0x0d, 0x93, 0xd7, 0x76}, + subYX: fp.Elt{0x7f, 0x01, 0xeb, 0x2e, 0x48, 0x4d, 0x1d, 0xf1, 0x06, 0x7e, 0x7c, 0x2a, 0x43, 0xbf, 0x28, 0xac, 0xe9, 0x58, 0x13, 0xc8, 0xbf, 0x8e, 0xc0, 0xef, 0xe8, 0x4f, 0x46, 0x8a, 0xe7, 0xc0, 0xf6, 0x0f, 0x0a, 0x03, 0x48, 0x91, 0x55, 0x39, 0x2a, 0xe3, 0xdc, 0xf6, 0x22, 0x9d, 0x4d, 0x71, 0x55, 0x68, 0x25, 0x6e, 0x95, 0x52, 0xee, 0x4c, 0xd9, 0x01}, + dt2: fp.Elt{0xac, 0x33, 0x3f, 0x7c, 0x27, 0x35, 0x15, 0x91, 0x33, 0x8d, 0xf9, 0xc4, 0xf4, 0xf3, 0x90, 0x09, 0x75, 0x69, 0x62, 0x9f, 0x61, 0x35, 0x83, 0x92, 0x04, 0xef, 0x96, 0x38, 0x80, 0x9e, 0x88, 0xb3, 0x67, 0x95, 0xbe, 0x79, 0x3c, 0x35, 0xd8, 0xdc, 0xb2, 0x3e, 0x2d, 0xe6, 0x46, 0xbe, 0x81, 0xf3, 0x32, 0x0e, 0x37, 0x23, 0x75, 0x2a, 0x3d, 0xa0}, + }, +} diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist_basemult.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist_basemult.go new file mode 100644 index 000000000..f6ac5edbb --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist_basemult.go @@ -0,0 +1,62 @@ +package goldilocks + +import ( + "crypto/subtle" + + mlsb "github.com/cloudflare/circl/math/mlsbset" +) + +const ( + // MLSBRecoding parameters + fxT = 448 + fxV = 2 + fxW = 3 + fx2w1 = 1 << (uint(fxW) - 1) +) + +// ScalarBaseMult returns kG where G is the generator point. +func (e twistCurve) ScalarBaseMult(k *Scalar) *twistPoint { + m, err := mlsb.New(fxT, fxV, fxW) + if err != nil { + panic(err) + } + if m.IsExtended() { + panic("not extended") + } + + var isZero int + if k.IsZero() { + isZero = 1 + } + subtle.ConstantTimeCopy(isZero, k[:], order[:]) + + minusK := *k + isEven := 1 - int(k[0]&0x1) + minusK.Neg() + subtle.ConstantTimeCopy(isEven, k[:], minusK[:]) + c, err := m.Encode(k[:]) + if err != nil { + panic(err) + } + + gP := c.Exp(groupMLSB{}) + P := gP.(*twistPoint) + P.cneg(uint(isEven)) + return P +} + +type groupMLSB struct{} + +func (e groupMLSB) ExtendedEltP() mlsb.EltP { return nil } +func (e groupMLSB) Sqr(x mlsb.EltG) { x.(*twistPoint).Double() } +func (e groupMLSB) Mul(x mlsb.EltG, y mlsb.EltP) { x.(*twistPoint).mixAddZ1(y.(*preTwistPointAffine)) } +func (e groupMLSB) Identity() mlsb.EltG { return twistCurve{}.Identity() } +func (e groupMLSB) NewEltP() mlsb.EltP { return &preTwistPointAffine{} } +func (e groupMLSB) Lookup(a mlsb.EltP, v uint, s, u int32) { + Tabj := &tabFixMult[v] + P := a.(*preTwistPointAffine) + for k := range Tabj { + P.cmov(&Tabj[k], uint(subtle.ConstantTimeEq(int32(k), u))) + } + P.cneg(int(s >> 31)) +} diff --git a/vendor/github.com/cloudflare/circl/internal/conv/conv.go b/vendor/github.com/cloudflare/circl/internal/conv/conv.go new file mode 100644 index 000000000..649a8e931 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/conv/conv.go @@ -0,0 +1,140 @@ +package conv + +import ( + "encoding/binary" + "fmt" + "math/big" + "strings" +) + +// BytesLe2Hex returns an hexadecimal string of a number stored in a +// little-endian order slice x. +func BytesLe2Hex(x []byte) string { + b := &strings.Builder{} + b.Grow(2*len(x) + 2) + fmt.Fprint(b, "0x") + if len(x) == 0 { + fmt.Fprint(b, "00") + } + for i := len(x) - 1; i >= 0; i-- { + fmt.Fprintf(b, "%02x", x[i]) + } + return b.String() +} + +// BytesLe2BigInt converts a little-endian slice x into a big-endian +// math/big.Int. +func BytesLe2BigInt(x []byte) *big.Int { + n := len(x) + b := new(big.Int) + if len(x) > 0 { + y := make([]byte, n) + for i := 0; i < n; i++ { + y[n-1-i] = x[i] + } + b.SetBytes(y) + } + return b +} + +// BytesBe2Uint64Le converts a big-endian slice x to a little-endian slice of uint64. +func BytesBe2Uint64Le(x []byte) []uint64 { + l := len(x) + z := make([]uint64, (l+7)/8) + blocks := l / 8 + for i := 0; i < blocks; i++ { + z[i] = binary.BigEndian.Uint64(x[l-8*(i+1):]) + } + remBytes := l % 8 + for i := 0; i < remBytes; i++ { + z[blocks] |= uint64(x[l-1-8*blocks-i]) << uint(8*i) + } + return z +} + +// BigInt2BytesLe stores a positive big.Int number x into a little-endian slice z. +// The slice is modified if the bitlength of x <= 8*len(z) (padding with zeros). +// If x does not fit in the slice or is negative, z is not modified. +func BigInt2BytesLe(z []byte, x *big.Int) { + xLen := (x.BitLen() + 7) >> 3 + zLen := len(z) + if zLen >= xLen && x.Sign() >= 0 { + y := x.Bytes() + for i := 0; i < xLen; i++ { + z[i] = y[xLen-1-i] + } + for i := xLen; i < zLen; i++ { + z[i] = 0 + } + } +} + +// Uint64Le2BigInt converts a little-endian slice x into a big number. +func Uint64Le2BigInt(x []uint64) *big.Int { + n := len(x) + b := new(big.Int) + var bi big.Int + for i := n - 1; i >= 0; i-- { + bi.SetUint64(x[i]) + b.Lsh(b, 64) + b.Add(b, &bi) + } + return b +} + +// Uint64Le2BytesLe converts a little-endian slice x to a little-endian slice of bytes. +func Uint64Le2BytesLe(x []uint64) []byte { + b := make([]byte, 8*len(x)) + n := len(x) + for i := 0; i < n; i++ { + binary.LittleEndian.PutUint64(b[i*8:], x[i]) + } + return b +} + +// Uint64Le2BytesBe converts a little-endian slice x to a big-endian slice of bytes. +func Uint64Le2BytesBe(x []uint64) []byte { + b := make([]byte, 8*len(x)) + n := len(x) + for i := 0; i < n; i++ { + binary.BigEndian.PutUint64(b[i*8:], x[n-1-i]) + } + return b +} + +// Uint64Le2Hex returns an hexadecimal string of a number stored in a +// little-endian order slice x. +func Uint64Le2Hex(x []uint64) string { + b := new(strings.Builder) + b.Grow(16*len(x) + 2) + fmt.Fprint(b, "0x") + if len(x) == 0 { + fmt.Fprint(b, "00") + } + for i := len(x) - 1; i >= 0; i-- { + fmt.Fprintf(b, "%016x", x[i]) + } + return b.String() +} + +// BigInt2Uint64Le stores a positive big.Int number x into a little-endian slice z. +// The slice is modified if the bitlength of x <= 8*len(z) (padding with zeros). +// If x does not fit in the slice or is negative, z is not modified. +func BigInt2Uint64Le(z []uint64, x *big.Int) { + xLen := (x.BitLen() + 63) >> 6 // number of 64-bit words + zLen := len(z) + if zLen >= xLen && x.Sign() > 0 { + var y, yi big.Int + y.Set(x) + two64 := big.NewInt(1) + two64.Lsh(two64, 64).Sub(two64, big.NewInt(1)) + for i := 0; i < xLen; i++ { + yi.And(&y, two64) + z[i] = yi.Uint64() + y.Rsh(&y, 64) + } + } + for i := xLen; i < zLen; i++ { + z[i] = 0 + } +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/doc.go b/vendor/github.com/cloudflare/circl/internal/sha3/doc.go new file mode 100644 index 000000000..7e0230907 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/doc.go @@ -0,0 +1,62 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sha3 implements the SHA-3 fixed-output-length hash functions and +// the SHAKE variable-output-length hash functions defined by FIPS-202. +// +// Both types of hash function use the "sponge" construction and the Keccak +// permutation. For a detailed specification see http://keccak.noekeon.org/ +// +// # Guidance +// +// If you aren't sure what function you need, use SHAKE256 with at least 64 +// bytes of output. The SHAKE instances are faster than the SHA3 instances; +// the latter have to allocate memory to conform to the hash.Hash interface. +// +// If you need a secret-key MAC (message authentication code), prepend the +// secret key to the input, hash with SHAKE256 and read at least 32 bytes of +// output. +// +// # Security strengths +// +// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security +// strength against preimage attacks of x bits. Since they only produce "x" +// bits of output, their collision-resistance is only "x/2" bits. +// +// The SHAKE-256 and -128 functions have a generic security strength of 256 and +// 128 bits against all attacks, provided that at least 2x bits of their output +// is used. Requesting more than 64 or 32 bytes of output, respectively, does +// not increase the collision-resistance of the SHAKE functions. +// +// # The sponge construction +// +// A sponge builds a pseudo-random function from a public pseudo-random +// permutation, by applying the permutation to a state of "rate + capacity" +// bytes, but hiding "capacity" of the bytes. +// +// A sponge starts out with a zero state. To hash an input using a sponge, up +// to "rate" bytes of the input are XORed into the sponge's state. The sponge +// is then "full" and the permutation is applied to "empty" it. This process is +// repeated until all the input has been "absorbed". The input is then padded. +// The digest is "squeezed" from the sponge in the same way, except that output +// is copied out instead of input being XORed in. +// +// A sponge is parameterized by its generic security strength, which is equal +// to half its capacity; capacity + rate is equal to the permutation's width. +// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means +// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2. +// +// # Recommendations +// +// The SHAKE functions are recommended for most new uses. They can produce +// output of arbitrary length. SHAKE256, with an output length of at least +// 64 bytes, provides 256-bit security against all attacks. The Keccak team +// recommends it for most applications upgrading from SHA2-512. (NIST chose a +// much stronger, but much slower, sponge instance for SHA3-512.) +// +// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions. +// They produce output of the same length, with the same security strengths +// against all attacks. This means, in particular, that SHA3-256 only has +// 128-bit collision resistance, because its output length is 32 bytes. +package sha3 diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/hashes.go b/vendor/github.com/cloudflare/circl/internal/sha3/hashes.go new file mode 100644 index 000000000..7d2365a76 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/hashes.go @@ -0,0 +1,69 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// This file provides functions for creating instances of the SHA-3 +// and SHAKE hash functions, as well as utility functions for hashing +// bytes. + +// New224 creates a new SHA3-224 hash. +// Its generic security strength is 224 bits against preimage attacks, +// and 112 bits against collision attacks. +func New224() State { + return State{rate: 144, outputLen: 28, dsbyte: 0x06} +} + +// New256 creates a new SHA3-256 hash. +// Its generic security strength is 256 bits against preimage attacks, +// and 128 bits against collision attacks. +func New256() State { + return State{rate: 136, outputLen: 32, dsbyte: 0x06} +} + +// New384 creates a new SHA3-384 hash. +// Its generic security strength is 384 bits against preimage attacks, +// and 192 bits against collision attacks. +func New384() State { + return State{rate: 104, outputLen: 48, dsbyte: 0x06} +} + +// New512 creates a new SHA3-512 hash. +// Its generic security strength is 512 bits against preimage attacks, +// and 256 bits against collision attacks. +func New512() State { + return State{rate: 72, outputLen: 64, dsbyte: 0x06} +} + +// Sum224 returns the SHA3-224 digest of the data. +func Sum224(data []byte) (digest [28]byte) { + h := New224() + _, _ = h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum256 returns the SHA3-256 digest of the data. +func Sum256(data []byte) (digest [32]byte) { + h := New256() + _, _ = h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum384 returns the SHA3-384 digest of the data. +func Sum384(data []byte) (digest [48]byte) { + h := New384() + _, _ = h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum512 returns the SHA3-512 digest of the data. +func Sum512(data []byte) (digest [64]byte) { + h := New512() + _, _ = h.Write(data) + h.Sum(digest[:0]) + return +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go b/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go new file mode 100644 index 000000000..1755fd1e6 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go @@ -0,0 +1,391 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// KeccakF1600 applies the Keccak permutation to a 1600b-wide +// state represented as a slice of 25 uint64s. +// If turbo is true, applies the 12-round variant instead of the +// regular 24-round variant. +// nolint:funlen +func KeccakF1600(a *[25]uint64, turbo bool) { + // Implementation translated from Keccak-inplace.c + // in the keccak reference code. + var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64 + + i := 0 + + if turbo { + i = 12 + } + + for ; i < 24; i += 4 { + // Combines the 5 steps in each round into 2 steps. + // Unrolls 4 rounds per loop and spreads some steps across rounds. + + // Round 1 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[6] ^ d1 + bc1 = t<<44 | t>>(64-44) + t = a[12] ^ d2 + bc2 = t<<43 | t>>(64-43) + t = a[18] ^ d3 + bc3 = t<<21 | t>>(64-21) + t = a[24] ^ d4 + bc4 = t<<14 | t>>(64-14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i] + a[6] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc2 = t<<3 | t>>(64-3) + t = a[16] ^ d1 + bc3 = t<<45 | t>>(64-45) + t = a[22] ^ d2 + bc4 = t<<61 | t>>(64-61) + t = a[3] ^ d3 + bc0 = t<<28 | t>>(64-28) + t = a[9] ^ d4 + bc1 = t<<20 | t>>(64-20) + a[10] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc4 = t<<18 | t>>(64-18) + t = a[1] ^ d1 + bc0 = t<<1 | t>>(64-1) + t = a[7] ^ d2 + bc1 = t<<6 | t>>(64-6) + t = a[13] ^ d3 + bc2 = t<<25 | t>>(64-25) + t = a[19] ^ d4 + bc3 = t<<8 | t>>(64-8) + a[20] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc1 = t<<36 | t>>(64-36) + t = a[11] ^ d1 + bc2 = t<<10 | t>>(64-10) + t = a[17] ^ d2 + bc3 = t<<15 | t>>(64-15) + t = a[23] ^ d3 + bc4 = t<<56 | t>>(64-56) + t = a[4] ^ d4 + bc0 = t<<27 | t>>(64-27) + a[5] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc3 = t<<41 | t>>(64-41) + t = a[21] ^ d1 + bc4 = t<<2 | t>>(64-2) + t = a[2] ^ d2 + bc0 = t<<62 | t>>(64-62) + t = a[8] ^ d3 + bc1 = t<<55 | t>>(64-55) + t = a[14] ^ d4 + bc2 = t<<39 | t>>(64-39) + a[15] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + // Round 2 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[16] ^ d1 + bc1 = t<<44 | t>>(64-44) + t = a[7] ^ d2 + bc2 = t<<43 | t>>(64-43) + t = a[23] ^ d3 + bc3 = t<<21 | t>>(64-21) + t = a[14] ^ d4 + bc4 = t<<14 | t>>(64-14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i+1] + a[16] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc2 = t<<3 | t>>(64-3) + t = a[11] ^ d1 + bc3 = t<<45 | t>>(64-45) + t = a[2] ^ d2 + bc4 = t<<61 | t>>(64-61) + t = a[18] ^ d3 + bc0 = t<<28 | t>>(64-28) + t = a[9] ^ d4 + bc1 = t<<20 | t>>(64-20) + a[20] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc4 = t<<18 | t>>(64-18) + t = a[6] ^ d1 + bc0 = t<<1 | t>>(64-1) + t = a[22] ^ d2 + bc1 = t<<6 | t>>(64-6) + t = a[13] ^ d3 + bc2 = t<<25 | t>>(64-25) + t = a[4] ^ d4 + bc3 = t<<8 | t>>(64-8) + a[15] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc1 = t<<36 | t>>(64-36) + t = a[1] ^ d1 + bc2 = t<<10 | t>>(64-10) + t = a[17] ^ d2 + bc3 = t<<15 | t>>(64-15) + t = a[8] ^ d3 + bc4 = t<<56 | t>>(64-56) + t = a[24] ^ d4 + bc0 = t<<27 | t>>(64-27) + a[10] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc3 = t<<41 | t>>(64-41) + t = a[21] ^ d1 + bc4 = t<<2 | t>>(64-2) + t = a[12] ^ d2 + bc0 = t<<62 | t>>(64-62) + t = a[3] ^ d3 + bc1 = t<<55 | t>>(64-55) + t = a[19] ^ d4 + bc2 = t<<39 | t>>(64-39) + a[5] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + // Round 3 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[11] ^ d1 + bc1 = t<<44 | t>>(64-44) + t = a[22] ^ d2 + bc2 = t<<43 | t>>(64-43) + t = a[8] ^ d3 + bc3 = t<<21 | t>>(64-21) + t = a[19] ^ d4 + bc4 = t<<14 | t>>(64-14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i+2] + a[11] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc2 = t<<3 | t>>(64-3) + t = a[1] ^ d1 + bc3 = t<<45 | t>>(64-45) + t = a[12] ^ d2 + bc4 = t<<61 | t>>(64-61) + t = a[23] ^ d3 + bc0 = t<<28 | t>>(64-28) + t = a[9] ^ d4 + bc1 = t<<20 | t>>(64-20) + a[15] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc4 = t<<18 | t>>(64-18) + t = a[16] ^ d1 + bc0 = t<<1 | t>>(64-1) + t = a[2] ^ d2 + bc1 = t<<6 | t>>(64-6) + t = a[13] ^ d3 + bc2 = t<<25 | t>>(64-25) + t = a[24] ^ d4 + bc3 = t<<8 | t>>(64-8) + a[5] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc1 = t<<36 | t>>(64-36) + t = a[6] ^ d1 + bc2 = t<<10 | t>>(64-10) + t = a[17] ^ d2 + bc3 = t<<15 | t>>(64-15) + t = a[3] ^ d3 + bc4 = t<<56 | t>>(64-56) + t = a[14] ^ d4 + bc0 = t<<27 | t>>(64-27) + a[20] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc3 = t<<41 | t>>(64-41) + t = a[21] ^ d1 + bc4 = t<<2 | t>>(64-2) + t = a[7] ^ d2 + bc0 = t<<62 | t>>(64-62) + t = a[18] ^ d3 + bc1 = t<<55 | t>>(64-55) + t = a[4] ^ d4 + bc2 = t<<39 | t>>(64-39) + a[10] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + // Round 4 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[1] ^ d1 + bc1 = t<<44 | t>>(64-44) + t = a[2] ^ d2 + bc2 = t<<43 | t>>(64-43) + t = a[3] ^ d3 + bc3 = t<<21 | t>>(64-21) + t = a[4] ^ d4 + bc4 = t<<14 | t>>(64-14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i+3] + a[1] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc2 = t<<3 | t>>(64-3) + t = a[6] ^ d1 + bc3 = t<<45 | t>>(64-45) + t = a[7] ^ d2 + bc4 = t<<61 | t>>(64-61) + t = a[8] ^ d3 + bc0 = t<<28 | t>>(64-28) + t = a[9] ^ d4 + bc1 = t<<20 | t>>(64-20) + a[5] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc4 = t<<18 | t>>(64-18) + t = a[11] ^ d1 + bc0 = t<<1 | t>>(64-1) + t = a[12] ^ d2 + bc1 = t<<6 | t>>(64-6) + t = a[13] ^ d3 + bc2 = t<<25 | t>>(64-25) + t = a[14] ^ d4 + bc3 = t<<8 | t>>(64-8) + a[10] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc1 = t<<36 | t>>(64-36) + t = a[16] ^ d1 + bc2 = t<<10 | t>>(64-10) + t = a[17] ^ d2 + bc3 = t<<15 | t>>(64-15) + t = a[18] ^ d3 + bc4 = t<<56 | t>>(64-56) + t = a[19] ^ d4 + bc0 = t<<27 | t>>(64-27) + a[15] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc3 = t<<41 | t>>(64-41) + t = a[21] ^ d1 + bc4 = t<<2 | t>>(64-2) + t = a[22] ^ d2 + bc0 = t<<62 | t>>(64-62) + t = a[23] ^ d3 + bc1 = t<<55 | t>>(64-55) + t = a[24] ^ d4 + bc2 = t<<39 | t>>(64-39) + a[20] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + } +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/rc.go b/vendor/github.com/cloudflare/circl/internal/sha3/rc.go new file mode 100644 index 000000000..6a3df42f3 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/rc.go @@ -0,0 +1,29 @@ +package sha3 + +// RC stores the round constants for use in the ι step. +var RC = [24]uint64{ + 0x0000000000000001, + 0x0000000000008082, + 0x800000000000808A, + 0x8000000080008000, + 0x000000000000808B, + 0x0000000080000001, + 0x8000000080008081, + 0x8000000000008009, + 0x000000000000008A, + 0x0000000000000088, + 0x0000000080008009, + 0x000000008000000A, + 0x000000008000808B, + 0x800000000000008B, + 0x8000000000008089, + 0x8000000000008003, + 0x8000000000008002, + 0x8000000000000080, + 0x000000000000800A, + 0x800000008000000A, + 0x8000000080008081, + 0x8000000000008080, + 0x0000000080000001, + 0x8000000080008008, +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go b/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go new file mode 100644 index 000000000..a0df5aa6c --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go @@ -0,0 +1,200 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// spongeDirection indicates the direction bytes are flowing through the sponge. +type spongeDirection int + +const ( + // spongeAbsorbing indicates that the sponge is absorbing input. + spongeAbsorbing spongeDirection = iota + // spongeSqueezing indicates that the sponge is being squeezed. + spongeSqueezing +) + +const ( + // maxRate is the maximum size of the internal buffer. SHAKE-256 + // currently needs the largest buffer. + maxRate = 168 +) + +func (d *State) buf() []byte { + return d.storage.asBytes()[d.bufo:d.bufe] +} + +type State struct { + // Generic sponge components. + a [25]uint64 // main state of the hash + rate int // the number of bytes of state to use + + bufo int // offset of buffer in storage + bufe int // end of buffer in storage + + // dsbyte contains the "domain separation" bits and the first bit of + // the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the + // SHA-3 and SHAKE functions by appending bitstrings to the message. + // Using a little-endian bit-ordering convention, these are "01" for SHA-3 + // and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the + // padding rule from section 5.1 is applied to pad the message to a multiple + // of the rate, which involves adding a "1" bit, zero or more "0" bits, and + // a final "1" bit. We merge the first "1" bit from the padding into dsbyte, + // giving 00000110b (0x06) and 00011111b (0x1f). + // [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf + // "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and + // Extendable-Output Functions (May 2014)" + dsbyte byte + + storage storageBuf + + // Specific to SHA-3 and SHAKE. + outputLen int // the default output size in bytes + state spongeDirection // whether the sponge is absorbing or squeezing + turbo bool // Whether we're using 12 rounds instead of 24 +} + +// BlockSize returns the rate of sponge underlying this hash function. +func (d *State) BlockSize() int { return d.rate } + +// Size returns the output size of the hash function in bytes. +func (d *State) Size() int { return d.outputLen } + +// Reset clears the internal state by zeroing the sponge state and +// the byte buffer, and setting Sponge.state to absorbing. +func (d *State) Reset() { + // Zero the permutation's state. + for i := range d.a { + d.a[i] = 0 + } + d.state = spongeAbsorbing + d.bufo = 0 + d.bufe = 0 +} + +func (d *State) clone() *State { + ret := *d + return &ret +} + +// permute applies the KeccakF-1600 permutation. It handles +// any input-output buffering. +func (d *State) permute() { + switch d.state { + case spongeAbsorbing: + // If we're absorbing, we need to xor the input into the state + // before applying the permutation. + xorIn(d, d.buf()) + d.bufe = 0 + d.bufo = 0 + KeccakF1600(&d.a, d.turbo) + case spongeSqueezing: + // If we're squeezing, we need to apply the permutation before + // copying more output. + KeccakF1600(&d.a, d.turbo) + d.bufe = d.rate + d.bufo = 0 + copyOut(d, d.buf()) + } +} + +// pads appends the domain separation bits in dsbyte, applies +// the multi-bitrate 10..1 padding rule, and permutes the state. +func (d *State) padAndPermute(dsbyte byte) { + // Pad with this instance's domain-separator bits. We know that there's + // at least one byte of space in d.buf() because, if it were full, + // permute would have been called to empty it. dsbyte also contains the + // first one bit for the padding. See the comment in the state struct. + zerosStart := d.bufe + 1 + d.bufe = d.rate + buf := d.buf() + buf[zerosStart-1] = dsbyte + for i := zerosStart; i < d.rate; i++ { + buf[i] = 0 + } + // This adds the final one bit for the padding. Because of the way that + // bits are numbered from the LSB upwards, the final bit is the MSB of + // the last byte. + buf[d.rate-1] ^= 0x80 + // Apply the permutation + d.permute() + d.state = spongeSqueezing + d.bufe = d.rate + copyOut(d, buf) +} + +// Write absorbs more data into the hash's state. It produces an error +// if more data is written to the ShakeHash after writing +func (d *State) Write(p []byte) (written int, err error) { + if d.state != spongeAbsorbing { + panic("sha3: write to sponge after read") + } + written = len(p) + + for len(p) > 0 { + bufl := d.bufe - d.bufo + if bufl == 0 && len(p) >= d.rate { + // The fast path; absorb a full "rate" bytes of input and apply the permutation. + xorIn(d, p[:d.rate]) + p = p[d.rate:] + KeccakF1600(&d.a, d.turbo) + } else { + // The slow path; buffer the input until we can fill the sponge, and then xor it in. + todo := d.rate - bufl + if todo > len(p) { + todo = len(p) + } + d.bufe += todo + buf := d.buf() + copy(buf[bufl:], p[:todo]) + p = p[todo:] + + // If the sponge is full, apply the permutation. + if d.bufe == d.rate { + d.permute() + } + } + } + + return written, nil +} + +// Read squeezes an arbitrary number of bytes from the sponge. +func (d *State) Read(out []byte) (n int, err error) { + // If we're still absorbing, pad and apply the permutation. + if d.state == spongeAbsorbing { + d.padAndPermute(d.dsbyte) + } + + n = len(out) + + // Now, do the squeezing. + for len(out) > 0 { + buf := d.buf() + n := copy(out, buf) + d.bufo += n + out = out[n:] + + // Apply the permutation if we've squeezed the sponge dry. + if d.bufo == d.bufe { + d.permute() + } + } + + return +} + +// Sum applies padding to the hash state and then squeezes out the desired +// number of output bytes. +func (d *State) Sum(in []byte) []byte { + // Make a copy of the original hash so that caller can keep writing + // and summing. + dup := d.clone() + hash := make([]byte, dup.outputLen) + _, _ = dup.Read(hash) + return append(in, hash...) +} + +func (d *State) IsAbsorbing() bool { + return d.state == spongeAbsorbing +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/sha3_s390x.s b/vendor/github.com/cloudflare/circl/internal/sha3/sha3_s390x.s new file mode 100644 index 000000000..8a4458f63 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/sha3_s390x.s @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo,!appengine + +#include "textflag.h" + +// func kimd(function code, chain *[200]byte, src []byte) +TEXT ·kimd(SB), NOFRAME|NOSPLIT, $0-40 + MOVD function+0(FP), R0 + MOVD chain+8(FP), R1 + LMG src+16(FP), R2, R3 // R2=base, R3=len + +continue: + WORD $0xB93E0002 // KIMD --, R2 + BVS continue // continue if interrupted + MOVD $0, R0 // reset R0 for pre-go1.8 compilers + RET + +// func klmd(function code, chain *[200]byte, dst, src []byte) +TEXT ·klmd(SB), NOFRAME|NOSPLIT, $0-64 + // TODO: SHAKE support + MOVD function+0(FP), R0 + MOVD chain+8(FP), R1 + LMG dst+16(FP), R2, R3 // R2=base, R3=len + LMG src+40(FP), R4, R5 // R4=base, R5=len + +continue: + WORD $0xB93F0024 // KLMD R2, R4 + BVS continue // continue if interrupted + MOVD $0, R0 // reset R0 for pre-go1.8 compilers + RET diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/shake.go b/vendor/github.com/cloudflare/circl/internal/sha3/shake.go new file mode 100644 index 000000000..77817f758 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/shake.go @@ -0,0 +1,119 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// This file defines the ShakeHash interface, and provides +// functions for creating SHAKE and cSHAKE instances, as well as utility +// functions for hashing bytes to arbitrary-length output. +// +// +// SHAKE implementation is based on FIPS PUB 202 [1] +// cSHAKE implementations is based on NIST SP 800-185 [2] +// +// [1] https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf +// [2] https://doi.org/10.6028/NIST.SP.800-185 + +import ( + "io" +) + +// ShakeHash defines the interface to hash functions that +// support arbitrary-length output. +type ShakeHash interface { + // Write absorbs more data into the hash's state. It panics if input is + // written to it after output has been read from it. + io.Writer + + // Read reads more output from the hash; reading affects the hash's + // state. (ShakeHash.Read is thus very different from Hash.Sum) + // It never returns an error. + io.Reader + + // Clone returns a copy of the ShakeHash in its current state. + Clone() ShakeHash + + // Reset resets the ShakeHash to its initial state. + Reset() +} + +// Consts for configuring initial SHA-3 state +const ( + dsbyteShake = 0x1f + rate128 = 168 + rate256 = 136 +) + +// Clone returns copy of SHAKE context within its current state. +func (d *State) Clone() ShakeHash { + return d.clone() +} + +// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash. +// Its generic security strength is 128 bits against all attacks if at +// least 32 bytes of its output are used. +func NewShake128() State { + return State{rate: rate128, dsbyte: dsbyteShake} +} + +// NewTurboShake128 creates a new TurboSHAKE128 variable-output-length ShakeHash. +// Its generic security strength is 128 bits against all attacks if at +// least 32 bytes of its output are used. +// D is the domain separation byte and must be between 0x01 and 0x7f inclusive. +func NewTurboShake128(D byte) State { + if D == 0 || D > 0x7f { + panic("turboshake: D out of range") + } + return State{rate: rate128, dsbyte: D, turbo: true} +} + +// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash. +// Its generic security strength is 256 bits against all attacks if +// at least 64 bytes of its output are used. +func NewShake256() State { + return State{rate: rate256, dsbyte: dsbyteShake} +} + +// NewTurboShake256 creates a new TurboSHAKE256 variable-output-length ShakeHash. +// Its generic security strength is 256 bits against all attacks if +// at least 64 bytes of its output are used. +// D is the domain separation byte and must be between 0x01 and 0x7f inclusive. +func NewTurboShake256(D byte) State { + if D == 0 || D > 0x7f { + panic("turboshake: D out of range") + } + return State{rate: rate256, dsbyte: D, turbo: true} +} + +// ShakeSum128 writes an arbitrary-length digest of data into hash. +func ShakeSum128(hash, data []byte) { + h := NewShake128() + _, _ = h.Write(data) + _, _ = h.Read(hash) +} + +// ShakeSum256 writes an arbitrary-length digest of data into hash. +func ShakeSum256(hash, data []byte) { + h := NewShake256() + _, _ = h.Write(data) + _, _ = h.Read(hash) +} + +// TurboShakeSum128 writes an arbitrary-length digest of data into hash. +func TurboShakeSum128(hash, data []byte, D byte) { + h := NewTurboShake128(D) + _, _ = h.Write(data) + _, _ = h.Read(hash) +} + +// TurboShakeSum256 writes an arbitrary-length digest of data into hash. +func TurboShakeSum256(hash, data []byte, D byte) { + h := NewTurboShake256(D) + _, _ = h.Write(data) + _, _ = h.Read(hash) +} + +func (d *State) SwitchDS(D byte) { + d.dsbyte = D +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/xor.go b/vendor/github.com/cloudflare/circl/internal/sha3/xor.go new file mode 100644 index 000000000..1e2133745 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/xor.go @@ -0,0 +1,15 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (!amd64 && !386 && !ppc64le) || appengine +// +build !amd64,!386,!ppc64le appengine + +package sha3 + +// A storageBuf is an aligned array of maxRate bytes. +type storageBuf [maxRate]byte + +func (b *storageBuf) asBytes() *[maxRate]byte { + return (*[maxRate]byte)(b) +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/xor_generic.go b/vendor/github.com/cloudflare/circl/internal/sha3/xor_generic.go new file mode 100644 index 000000000..2b0c66179 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/xor_generic.go @@ -0,0 +1,33 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (!amd64 || appengine) && (!386 || appengine) && (!ppc64le || appengine) +// +build !amd64 appengine +// +build !386 appengine +// +build !ppc64le appengine + +package sha3 + +import "encoding/binary" + +// xorIn xors the bytes in buf into the state; it +// makes no non-portable assumptions about memory layout +// or alignment. +func xorIn(d *State, buf []byte) { + n := len(buf) / 8 + + for i := 0; i < n; i++ { + a := binary.LittleEndian.Uint64(buf) + d.a[i] ^= a + buf = buf[8:] + } +} + +// copyOut copies ulint64s to a byte buffer. +func copyOut(d *State, b []byte) { + for i := 0; len(b) >= 8; i++ { + binary.LittleEndian.PutUint64(b, d.a[i]) + b = b[8:] + } +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/xor_unaligned.go b/vendor/github.com/cloudflare/circl/internal/sha3/xor_unaligned.go new file mode 100644 index 000000000..052fc8d32 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/xor_unaligned.go @@ -0,0 +1,61 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (amd64 || 386 || ppc64le) && !appengine +// +build amd64 386 ppc64le +// +build !appengine + +package sha3 + +import "unsafe" + +// A storageBuf is an aligned array of maxRate bytes. +type storageBuf [maxRate / 8]uint64 + +func (b *storageBuf) asBytes() *[maxRate]byte { + return (*[maxRate]byte)(unsafe.Pointer(b)) +} + +// xorInuses unaligned reads and writes to update d.a to contain d.a +// XOR buf. +func xorIn(d *State, buf []byte) { + n := len(buf) + bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0]))[: n/8 : n/8] + if n >= 72 { + d.a[0] ^= bw[0] + d.a[1] ^= bw[1] + d.a[2] ^= bw[2] + d.a[3] ^= bw[3] + d.a[4] ^= bw[4] + d.a[5] ^= bw[5] + d.a[6] ^= bw[6] + d.a[7] ^= bw[7] + d.a[8] ^= bw[8] + } + if n >= 104 { + d.a[9] ^= bw[9] + d.a[10] ^= bw[10] + d.a[11] ^= bw[11] + d.a[12] ^= bw[12] + } + if n >= 136 { + d.a[13] ^= bw[13] + d.a[14] ^= bw[14] + d.a[15] ^= bw[15] + d.a[16] ^= bw[16] + } + if n >= 144 { + d.a[17] ^= bw[17] + } + if n >= 168 { + d.a[18] ^= bw[18] + d.a[19] ^= bw[19] + d.a[20] ^= bw[20] + } +} + +func copyOut(d *State, buf []byte) { + ab := (*[maxRate]uint8)(unsafe.Pointer(&d.a[0])) + copy(buf, ab[:]) +} diff --git a/vendor/github.com/cloudflare/circl/math/fp25519/fp.go b/vendor/github.com/cloudflare/circl/math/fp25519/fp.go new file mode 100644 index 000000000..57a50ff5e --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp25519/fp.go @@ -0,0 +1,205 @@ +// Package fp25519 provides prime field arithmetic over GF(2^255-19). +package fp25519 + +import ( + "errors" + + "github.com/cloudflare/circl/internal/conv" +) + +// Size in bytes of an element. +const Size = 32 + +// Elt is a prime field element. +type Elt [Size]byte + +func (e Elt) String() string { return conv.BytesLe2Hex(e[:]) } + +// p is the prime modulus 2^255-19. +var p = Elt{ + 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, +} + +// P returns the prime modulus 2^255-19. +func P() Elt { return p } + +// ToBytes stores in b the little-endian byte representation of x. +func ToBytes(b []byte, x *Elt) error { + if len(b) != Size { + return errors.New("wrong size") + } + Modp(x) + copy(b, x[:]) + return nil +} + +// IsZero returns true if x is equal to 0. +func IsZero(x *Elt) bool { Modp(x); return *x == Elt{} } + +// SetOne assigns x=1. +func SetOne(x *Elt) { *x = Elt{}; x[0] = 1 } + +// Neg calculates z = -x. +func Neg(z, x *Elt) { Sub(z, &p, x) } + +// InvSqrt calculates z = sqrt(x/y) iff x/y is a quadratic-residue, which is +// indicated by returning isQR = true. Otherwise, when x/y is a quadratic +// non-residue, z will have an undetermined value and isQR = false. +func InvSqrt(z, x, y *Elt) (isQR bool) { + sqrtMinusOne := &Elt{ + 0xb0, 0xa0, 0x0e, 0x4a, 0x27, 0x1b, 0xee, 0xc4, + 0x78, 0xe4, 0x2f, 0xad, 0x06, 0x18, 0x43, 0x2f, + 0xa7, 0xd7, 0xfb, 0x3d, 0x99, 0x00, 0x4d, 0x2b, + 0x0b, 0xdf, 0xc1, 0x4f, 0x80, 0x24, 0x83, 0x2b, + } + t0, t1, t2, t3 := &Elt{}, &Elt{}, &Elt{}, &Elt{} + + Mul(t0, x, y) // t0 = u*v + Sqr(t1, y) // t1 = v^2 + Mul(t2, t0, t1) // t2 = u*v^3 + Sqr(t0, t1) // t0 = v^4 + Mul(t1, t0, t2) // t1 = u*v^7 + + var Tab [4]*Elt + Tab[0] = &Elt{} + Tab[1] = &Elt{} + Tab[2] = t3 + Tab[3] = t1 + + *Tab[0] = *t1 + Sqr(Tab[0], Tab[0]) + Sqr(Tab[1], Tab[0]) + Sqr(Tab[1], Tab[1]) + Mul(Tab[1], Tab[1], Tab[3]) + Mul(Tab[0], Tab[0], Tab[1]) + Sqr(Tab[0], Tab[0]) + Mul(Tab[0], Tab[0], Tab[1]) + Sqr(Tab[1], Tab[0]) + for i := 0; i < 4; i++ { + Sqr(Tab[1], Tab[1]) + } + Mul(Tab[1], Tab[1], Tab[0]) + Sqr(Tab[2], Tab[1]) + for i := 0; i < 4; i++ { + Sqr(Tab[2], Tab[2]) + } + Mul(Tab[2], Tab[2], Tab[0]) + Sqr(Tab[1], Tab[2]) + for i := 0; i < 14; i++ { + Sqr(Tab[1], Tab[1]) + } + Mul(Tab[1], Tab[1], Tab[2]) + Sqr(Tab[2], Tab[1]) + for i := 0; i < 29; i++ { + Sqr(Tab[2], Tab[2]) + } + Mul(Tab[2], Tab[2], Tab[1]) + Sqr(Tab[1], Tab[2]) + for i := 0; i < 59; i++ { + Sqr(Tab[1], Tab[1]) + } + Mul(Tab[1], Tab[1], Tab[2]) + for i := 0; i < 5; i++ { + Sqr(Tab[1], Tab[1]) + } + Mul(Tab[1], Tab[1], Tab[0]) + Sqr(Tab[2], Tab[1]) + for i := 0; i < 124; i++ { + Sqr(Tab[2], Tab[2]) + } + Mul(Tab[2], Tab[2], Tab[1]) + Sqr(Tab[2], Tab[2]) + Sqr(Tab[2], Tab[2]) + Mul(Tab[2], Tab[2], Tab[3]) + + Mul(z, t3, t2) // z = xy^(p+3)/8 = xy^3*(xy^7)^(p-5)/8 + // Checking whether y z^2 == x + Sqr(t0, z) // t0 = z^2 + Mul(t0, t0, y) // t0 = yz^2 + Sub(t1, t0, x) // t1 = t0-u + Add(t2, t0, x) // t2 = t0+u + if IsZero(t1) { + return true + } else if IsZero(t2) { + Mul(z, z, sqrtMinusOne) // z = z*sqrt(-1) + return true + } else { + return false + } +} + +// Inv calculates z = 1/x mod p. +func Inv(z, x *Elt) { + x0, x1, x2 := &Elt{}, &Elt{}, &Elt{} + Sqr(x1, x) + Sqr(x0, x1) + Sqr(x0, x0) + Mul(x0, x0, x) + Mul(z, x0, x1) + Sqr(x1, z) + Mul(x0, x0, x1) + Sqr(x1, x0) + for i := 0; i < 4; i++ { + Sqr(x1, x1) + } + Mul(x0, x0, x1) + Sqr(x1, x0) + for i := 0; i < 9; i++ { + Sqr(x1, x1) + } + Mul(x1, x1, x0) + Sqr(x2, x1) + for i := 0; i < 19; i++ { + Sqr(x2, x2) + } + Mul(x2, x2, x1) + for i := 0; i < 10; i++ { + Sqr(x2, x2) + } + Mul(x2, x2, x0) + Sqr(x0, x2) + for i := 0; i < 49; i++ { + Sqr(x0, x0) + } + Mul(x0, x0, x2) + Sqr(x1, x0) + for i := 0; i < 99; i++ { + Sqr(x1, x1) + } + Mul(x1, x1, x0) + for i := 0; i < 50; i++ { + Sqr(x1, x1) + } + Mul(x1, x1, x2) + for i := 0; i < 5; i++ { + Sqr(x1, x1) + } + Mul(z, z, x1) +} + +// Cmov assigns y to x if n is 1. +func Cmov(x, y *Elt, n uint) { cmov(x, y, n) } + +// Cswap interchanges x and y if n is 1. +func Cswap(x, y *Elt, n uint) { cswap(x, y, n) } + +// Add calculates z = x+y mod p. +func Add(z, x, y *Elt) { add(z, x, y) } + +// Sub calculates z = x-y mod p. +func Sub(z, x, y *Elt) { sub(z, x, y) } + +// AddSub calculates (x,y) = (x+y mod p, x-y mod p). +func AddSub(x, y *Elt) { addsub(x, y) } + +// Mul calculates z = x*y mod p. +func Mul(z, x, y *Elt) { mul(z, x, y) } + +// Sqr calculates z = x^2 mod p. +func Sqr(z, x *Elt) { sqr(z, x) } + +// Modp ensures that z is between [0,p-1]. +func Modp(z *Elt) { modp(z) } diff --git a/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.go b/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.go new file mode 100644 index 000000000..057f0d280 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.go @@ -0,0 +1,45 @@ +//go:build amd64 && !purego +// +build amd64,!purego + +package fp25519 + +import ( + "golang.org/x/sys/cpu" +) + +var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX + +var _ = hasBmi2Adx + +func cmov(x, y *Elt, n uint) { cmovAmd64(x, y, n) } +func cswap(x, y *Elt, n uint) { cswapAmd64(x, y, n) } +func add(z, x, y *Elt) { addAmd64(z, x, y) } +func sub(z, x, y *Elt) { subAmd64(z, x, y) } +func addsub(x, y *Elt) { addsubAmd64(x, y) } +func mul(z, x, y *Elt) { mulAmd64(z, x, y) } +func sqr(z, x *Elt) { sqrAmd64(z, x) } +func modp(z *Elt) { modpAmd64(z) } + +//go:noescape +func cmovAmd64(x, y *Elt, n uint) + +//go:noescape +func cswapAmd64(x, y *Elt, n uint) + +//go:noescape +func addAmd64(z, x, y *Elt) + +//go:noescape +func subAmd64(z, x, y *Elt) + +//go:noescape +func addsubAmd64(x, y *Elt) + +//go:noescape +func mulAmd64(z, x, y *Elt) + +//go:noescape +func sqrAmd64(z, x *Elt) + +//go:noescape +func modpAmd64(z *Elt) diff --git a/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.h b/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.h new file mode 100644 index 000000000..b884b584a --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.h @@ -0,0 +1,351 @@ +// This code was imported from https://github.com/armfazh/rfc7748_precomputed + +// CHECK_BMI2ADX triggers bmi2adx if supported, +// otherwise it fallbacks to legacy code. +#define CHECK_BMI2ADX(label, legacy, bmi2adx) \ + CMPB ·hasBmi2Adx(SB), $0 \ + JE label \ + bmi2adx \ + RET \ + label: \ + legacy \ + RET + +// cselect is a conditional move +// if b=1: it copies y into x; +// if b=0: x remains with the same value; +// if b<> 0,1: undefined. +// Uses: AX, DX, FLAGS +// Instr: x86_64, cmov +#define cselect(x,y,b) \ + TESTQ b, b \ + MOVQ 0+x, AX; MOVQ 0+y, DX; CMOVQNE DX, AX; MOVQ AX, 0+x; \ + MOVQ 8+x, AX; MOVQ 8+y, DX; CMOVQNE DX, AX; MOVQ AX, 8+x; \ + MOVQ 16+x, AX; MOVQ 16+y, DX; CMOVQNE DX, AX; MOVQ AX, 16+x; \ + MOVQ 24+x, AX; MOVQ 24+y, DX; CMOVQNE DX, AX; MOVQ AX, 24+x; + +// cswap is a conditional swap +// if b=1: x,y <- y,x; +// if b=0: x,y remain with the same values; +// if b<> 0,1: undefined. +// Uses: AX, DX, R8, FLAGS +// Instr: x86_64, cmov +#define cswap(x,y,b) \ + TESTQ b, b \ + MOVQ 0+x, AX; MOVQ AX, R8; MOVQ 0+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 0+x; MOVQ DX, 0+y; \ + MOVQ 8+x, AX; MOVQ AX, R8; MOVQ 8+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 8+x; MOVQ DX, 8+y; \ + MOVQ 16+x, AX; MOVQ AX, R8; MOVQ 16+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 16+x; MOVQ DX, 16+y; \ + MOVQ 24+x, AX; MOVQ AX, R8; MOVQ 24+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 24+x; MOVQ DX, 24+y; + +// additionLeg adds x and y and stores in z +// Uses: AX, DX, R8-R11, FLAGS +// Instr: x86_64, cmov +#define additionLeg(z,x,y) \ + MOVL $38, AX; \ + MOVL $0, DX; \ + MOVQ 0+x, R8; ADDQ 0+y, R8; \ + MOVQ 8+x, R9; ADCQ 8+y, R9; \ + MOVQ 16+x, R10; ADCQ 16+y, R10; \ + MOVQ 24+x, R11; ADCQ 24+y, R11; \ + CMOVQCS AX, DX; \ + ADDQ DX, R8; \ + ADCQ $0, R9; MOVQ R9, 8+z; \ + ADCQ $0, R10; MOVQ R10, 16+z; \ + ADCQ $0, R11; MOVQ R11, 24+z; \ + MOVL $0, DX; \ + CMOVQCS AX, DX; \ + ADDQ DX, R8; MOVQ R8, 0+z; + +// additionAdx adds x and y and stores in z +// Uses: AX, DX, R8-R11, FLAGS +// Instr: x86_64, cmov, adx +#define additionAdx(z,x,y) \ + MOVL $38, AX; \ + XORL DX, DX; \ + MOVQ 0+x, R8; ADCXQ 0+y, R8; \ + MOVQ 8+x, R9; ADCXQ 8+y, R9; \ + MOVQ 16+x, R10; ADCXQ 16+y, R10; \ + MOVQ 24+x, R11; ADCXQ 24+y, R11; \ + CMOVQCS AX, DX ; \ + XORL AX, AX; \ + ADCXQ DX, R8; \ + ADCXQ AX, R9; MOVQ R9, 8+z; \ + ADCXQ AX, R10; MOVQ R10, 16+z; \ + ADCXQ AX, R11; MOVQ R11, 24+z; \ + MOVL $38, DX; \ + CMOVQCS DX, AX; \ + ADDQ AX, R8; MOVQ R8, 0+z; + +// subtraction subtracts y from x and stores in z +// Uses: AX, DX, R8-R11, FLAGS +// Instr: x86_64, cmov +#define subtraction(z,x,y) \ + MOVL $38, AX; \ + MOVQ 0+x, R8; SUBQ 0+y, R8; \ + MOVQ 8+x, R9; SBBQ 8+y, R9; \ + MOVQ 16+x, R10; SBBQ 16+y, R10; \ + MOVQ 24+x, R11; SBBQ 24+y, R11; \ + MOVL $0, DX; \ + CMOVQCS AX, DX; \ + SUBQ DX, R8; \ + SBBQ $0, R9; MOVQ R9, 8+z; \ + SBBQ $0, R10; MOVQ R10, 16+z; \ + SBBQ $0, R11; MOVQ R11, 24+z; \ + MOVL $0, DX; \ + CMOVQCS AX, DX; \ + SUBQ DX, R8; MOVQ R8, 0+z; + +// integerMulAdx multiplies x and y and stores in z +// Uses: AX, DX, R8-R15, FLAGS +// Instr: x86_64, bmi2, adx +#define integerMulAdx(z,x,y) \ + MOVL $0,R15; \ + MOVQ 0+y, DX; XORL AX, AX; \ + MULXQ 0+x, AX, R8; MOVQ AX, 0+z; \ + MULXQ 8+x, AX, R9; ADCXQ AX, R8; \ + MULXQ 16+x, AX, R10; ADCXQ AX, R9; \ + MULXQ 24+x, AX, R11; ADCXQ AX, R10; \ + MOVL $0, AX;;;;;;;;; ADCXQ AX, R11; \ + MOVQ 8+y, DX; XORL AX, AX; \ + MULXQ 0+x, AX, R12; ADCXQ R8, AX; MOVQ AX, 8+z; \ + MULXQ 8+x, AX, R13; ADCXQ R9, R12; ADOXQ AX, R12; \ + MULXQ 16+x, AX, R14; ADCXQ R10, R13; ADOXQ AX, R13; \ + MULXQ 24+x, AX, R15; ADCXQ R11, R14; ADOXQ AX, R14; \ + MOVL $0, AX;;;;;;;;; ADCXQ AX, R15; ADOXQ AX, R15; \ + MOVQ 16+y, DX; XORL AX, AX; \ + MULXQ 0+x, AX, R8; ADCXQ R12, AX; MOVQ AX, 16+z; \ + MULXQ 8+x, AX, R9; ADCXQ R13, R8; ADOXQ AX, R8; \ + MULXQ 16+x, AX, R10; ADCXQ R14, R9; ADOXQ AX, R9; \ + MULXQ 24+x, AX, R11; ADCXQ R15, R10; ADOXQ AX, R10; \ + MOVL $0, AX;;;;;;;;; ADCXQ AX, R11; ADOXQ AX, R11; \ + MOVQ 24+y, DX; XORL AX, AX; \ + MULXQ 0+x, AX, R12; ADCXQ R8, AX; MOVQ AX, 24+z; \ + MULXQ 8+x, AX, R13; ADCXQ R9, R12; ADOXQ AX, R12; MOVQ R12, 32+z; \ + MULXQ 16+x, AX, R14; ADCXQ R10, R13; ADOXQ AX, R13; MOVQ R13, 40+z; \ + MULXQ 24+x, AX, R15; ADCXQ R11, R14; ADOXQ AX, R14; MOVQ R14, 48+z; \ + MOVL $0, AX;;;;;;;;; ADCXQ AX, R15; ADOXQ AX, R15; MOVQ R15, 56+z; + +// integerMulLeg multiplies x and y and stores in z +// Uses: AX, DX, R8-R15, FLAGS +// Instr: x86_64 +#define integerMulLeg(z,x,y) \ + MOVQ 0+y, R8; \ + MOVQ 0+x, AX; MULQ R8; MOVQ AX, 0+z; MOVQ DX, R15; \ + MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \ + MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \ + MOVQ 24+x, AX; MULQ R8; \ + ADDQ R13, R15; \ + ADCQ R14, R10; MOVQ R10, 16+z; \ + ADCQ AX, R11; MOVQ R11, 24+z; \ + ADCQ $0, DX; MOVQ DX, 32+z; \ + MOVQ 8+y, R8; \ + MOVQ 0+x, AX; MULQ R8; MOVQ AX, R12; MOVQ DX, R9; \ + MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \ + MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \ + MOVQ 24+x, AX; MULQ R8; \ + ADDQ R12, R15; MOVQ R15, 8+z; \ + ADCQ R13, R9; \ + ADCQ R14, R10; \ + ADCQ AX, R11; \ + ADCQ $0, DX; \ + ADCQ 16+z, R9; MOVQ R9, R15; \ + ADCQ 24+z, R10; MOVQ R10, 24+z; \ + ADCQ 32+z, R11; MOVQ R11, 32+z; \ + ADCQ $0, DX; MOVQ DX, 40+z; \ + MOVQ 16+y, R8; \ + MOVQ 0+x, AX; MULQ R8; MOVQ AX, R12; MOVQ DX, R9; \ + MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \ + MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \ + MOVQ 24+x, AX; MULQ R8; \ + ADDQ R12, R15; MOVQ R15, 16+z; \ + ADCQ R13, R9; \ + ADCQ R14, R10; \ + ADCQ AX, R11; \ + ADCQ $0, DX; \ + ADCQ 24+z, R9; MOVQ R9, R15; \ + ADCQ 32+z, R10; MOVQ R10, 32+z; \ + ADCQ 40+z, R11; MOVQ R11, 40+z; \ + ADCQ $0, DX; MOVQ DX, 48+z; \ + MOVQ 24+y, R8; \ + MOVQ 0+x, AX; MULQ R8; MOVQ AX, R12; MOVQ DX, R9; \ + MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \ + MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \ + MOVQ 24+x, AX; MULQ R8; \ + ADDQ R12, R15; MOVQ R15, 24+z; \ + ADCQ R13, R9; \ + ADCQ R14, R10; \ + ADCQ AX, R11; \ + ADCQ $0, DX; \ + ADCQ 32+z, R9; MOVQ R9, 32+z; \ + ADCQ 40+z, R10; MOVQ R10, 40+z; \ + ADCQ 48+z, R11; MOVQ R11, 48+z; \ + ADCQ $0, DX; MOVQ DX, 56+z; + +// integerSqrLeg squares x and stores in z +// Uses: AX, CX, DX, R8-R15, FLAGS +// Instr: x86_64 +#define integerSqrLeg(z,x) \ + MOVQ 0+x, R8; \ + MOVQ 8+x, AX; MULQ R8; MOVQ AX, R9; MOVQ DX, R10; /* A[0]*A[1] */ \ + MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; /* A[0]*A[2] */ \ + MOVQ 24+x, AX; MULQ R8; MOVQ AX, R15; MOVQ DX, R12; /* A[0]*A[3] */ \ + MOVQ 24+x, R8; \ + MOVQ 8+x, AX; MULQ R8; MOVQ AX, CX; MOVQ DX, R13; /* A[3]*A[1] */ \ + MOVQ 16+x, AX; MULQ R8; /* A[3]*A[2] */ \ + \ + ADDQ R14, R10;\ + ADCQ R15, R11; MOVL $0, R15;\ + ADCQ CX, R12;\ + ADCQ AX, R13;\ + ADCQ $0, DX; MOVQ DX, R14;\ + MOVQ 8+x, AX; MULQ 16+x;\ + \ + ADDQ AX, R11;\ + ADCQ DX, R12;\ + ADCQ $0, R13;\ + ADCQ $0, R14;\ + ADCQ $0, R15;\ + \ + SHLQ $1, R14, R15; MOVQ R15, 56+z;\ + SHLQ $1, R13, R14; MOVQ R14, 48+z;\ + SHLQ $1, R12, R13; MOVQ R13, 40+z;\ + SHLQ $1, R11, R12; MOVQ R12, 32+z;\ + SHLQ $1, R10, R11; MOVQ R11, 24+z;\ + SHLQ $1, R9, R10; MOVQ R10, 16+z;\ + SHLQ $1, R9; MOVQ R9, 8+z;\ + \ + MOVQ 0+x,AX; MULQ AX; MOVQ AX, 0+z; MOVQ DX, R9;\ + MOVQ 8+x,AX; MULQ AX; MOVQ AX, R10; MOVQ DX, R11;\ + MOVQ 16+x,AX; MULQ AX; MOVQ AX, R12; MOVQ DX, R13;\ + MOVQ 24+x,AX; MULQ AX; MOVQ AX, R14; MOVQ DX, R15;\ + \ + ADDQ 8+z, R9; MOVQ R9, 8+z;\ + ADCQ 16+z, R10; MOVQ R10, 16+z;\ + ADCQ 24+z, R11; MOVQ R11, 24+z;\ + ADCQ 32+z, R12; MOVQ R12, 32+z;\ + ADCQ 40+z, R13; MOVQ R13, 40+z;\ + ADCQ 48+z, R14; MOVQ R14, 48+z;\ + ADCQ 56+z, R15; MOVQ R15, 56+z; + +// integerSqrAdx squares x and stores in z +// Uses: AX, CX, DX, R8-R15, FLAGS +// Instr: x86_64, bmi2, adx +#define integerSqrAdx(z,x) \ + MOVQ 0+x, DX; /* A[0] */ \ + MULXQ 8+x, R8, R14; /* A[1]*A[0] */ XORL R15, R15; \ + MULXQ 16+x, R9, R10; /* A[2]*A[0] */ ADCXQ R14, R9; \ + MULXQ 24+x, AX, CX; /* A[3]*A[0] */ ADCXQ AX, R10; \ + MOVQ 24+x, DX; /* A[3] */ \ + MULXQ 8+x, R11, R12; /* A[1]*A[3] */ ADCXQ CX, R11; \ + MULXQ 16+x, AX, R13; /* A[2]*A[3] */ ADCXQ AX, R12; \ + MOVQ 8+x, DX; /* A[1] */ ADCXQ R15, R13; \ + MULXQ 16+x, AX, CX; /* A[2]*A[1] */ MOVL $0, R14; \ + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ADCXQ R15, R14; \ + XORL R15, R15; \ + ADOXQ AX, R10; ADCXQ R8, R8; \ + ADOXQ CX, R11; ADCXQ R9, R9; \ + ADOXQ R15, R12; ADCXQ R10, R10; \ + ADOXQ R15, R13; ADCXQ R11, R11; \ + ADOXQ R15, R14; ADCXQ R12, R12; \ + ;;;;;;;;;;;;;;; ADCXQ R13, R13; \ + ;;;;;;;;;;;;;;; ADCXQ R14, R14; \ + MOVQ 0+x, DX; MULXQ DX, AX, CX; /* A[0]^2 */ \ + ;;;;;;;;;;;;;;; MOVQ AX, 0+z; \ + ADDQ CX, R8; MOVQ R8, 8+z; \ + MOVQ 8+x, DX; MULXQ DX, AX, CX; /* A[1]^2 */ \ + ADCQ AX, R9; MOVQ R9, 16+z; \ + ADCQ CX, R10; MOVQ R10, 24+z; \ + MOVQ 16+x, DX; MULXQ DX, AX, CX; /* A[2]^2 */ \ + ADCQ AX, R11; MOVQ R11, 32+z; \ + ADCQ CX, R12; MOVQ R12, 40+z; \ + MOVQ 24+x, DX; MULXQ DX, AX, CX; /* A[3]^2 */ \ + ADCQ AX, R13; MOVQ R13, 48+z; \ + ADCQ CX, R14; MOVQ R14, 56+z; + +// reduceFromDouble finds z congruent to x modulo p such that 0> 63) + // PUT BIT 255 IN CARRY FLAG AND CLEAR + x3 &^= 1 << 63 + + x0, c0 := bits.Add64(x0, cx, 0) + x1, c1 := bits.Add64(x1, 0, c0) + x2, c2 := bits.Add64(x2, 0, c1) + x3, _ = bits.Add64(x3, 0, c2) + + // TEST FOR BIT 255 AGAIN; ONLY TRIGGERED ON OVERFLOW MODULO 2^255-19 + // cx = C[255] ? 0 : 19 + cx = uint64(19) &^ (-(x3 >> 63)) + // CLEAR BIT 255 + x3 &^= 1 << 63 + + x0, c0 = bits.Sub64(x0, cx, 0) + x1, c1 = bits.Sub64(x1, 0, c0) + x2, c2 = bits.Sub64(x2, 0, c1) + x3, _ = bits.Sub64(x3, 0, c2) + + binary.LittleEndian.PutUint64(x[0*8:1*8], x0) + binary.LittleEndian.PutUint64(x[1*8:2*8], x1) + binary.LittleEndian.PutUint64(x[2*8:3*8], x2) + binary.LittleEndian.PutUint64(x[3*8:4*8], x3) +} + +func red64(z *Elt, x0, x1, x2, x3, x4, x5, x6, x7 uint64) { + h0, l0 := bits.Mul64(x4, 38) + h1, l1 := bits.Mul64(x5, 38) + h2, l2 := bits.Mul64(x6, 38) + h3, l3 := bits.Mul64(x7, 38) + + l1, c0 := bits.Add64(h0, l1, 0) + l2, c1 := bits.Add64(h1, l2, c0) + l3, c2 := bits.Add64(h2, l3, c1) + l4, _ := bits.Add64(h3, 0, c2) + + l0, c0 = bits.Add64(l0, x0, 0) + l1, c1 = bits.Add64(l1, x1, c0) + l2, c2 = bits.Add64(l2, x2, c1) + l3, c3 := bits.Add64(l3, x3, c2) + l4, _ = bits.Add64(l4, 0, c3) + + _, l4 = bits.Mul64(l4, 38) + l0, c0 = bits.Add64(l0, l4, 0) + z1, c1 := bits.Add64(l1, 0, c0) + z2, c2 := bits.Add64(l2, 0, c1) + z3, c3 := bits.Add64(l3, 0, c2) + z0, _ := bits.Add64(l0, (-c3)&38, 0) + + binary.LittleEndian.PutUint64(z[0*8:1*8], z0) + binary.LittleEndian.PutUint64(z[1*8:2*8], z1) + binary.LittleEndian.PutUint64(z[2*8:3*8], z2) + binary.LittleEndian.PutUint64(z[3*8:4*8], z3) +} diff --git a/vendor/github.com/cloudflare/circl/math/fp25519/fp_noasm.go b/vendor/github.com/cloudflare/circl/math/fp25519/fp_noasm.go new file mode 100644 index 000000000..26ca4d01b --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp25519/fp_noasm.go @@ -0,0 +1,13 @@ +//go:build !amd64 || purego +// +build !amd64 purego + +package fp25519 + +func cmov(x, y *Elt, n uint) { cmovGeneric(x, y, n) } +func cswap(x, y *Elt, n uint) { cswapGeneric(x, y, n) } +func add(z, x, y *Elt) { addGeneric(z, x, y) } +func sub(z, x, y *Elt) { subGeneric(z, x, y) } +func addsub(x, y *Elt) { addsubGeneric(x, y) } +func mul(z, x, y *Elt) { mulGeneric(z, x, y) } +func sqr(z, x *Elt) { sqrGeneric(z, x) } +func modp(z *Elt) { modpGeneric(z) } diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp.go b/vendor/github.com/cloudflare/circl/math/fp448/fp.go new file mode 100644 index 000000000..a5e36600b --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp448/fp.go @@ -0,0 +1,164 @@ +// Package fp448 provides prime field arithmetic over GF(2^448-2^224-1). +package fp448 + +import ( + "errors" + + "github.com/cloudflare/circl/internal/conv" +) + +// Size in bytes of an element. +const Size = 56 + +// Elt is a prime field element. +type Elt [Size]byte + +func (e Elt) String() string { return conv.BytesLe2Hex(e[:]) } + +// p is the prime modulus 2^448-2^224-1. +var p = Elt{ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +} + +// P returns the prime modulus 2^448-2^224-1. +func P() Elt { return p } + +// ToBytes stores in b the little-endian byte representation of x. +func ToBytes(b []byte, x *Elt) error { + if len(b) != Size { + return errors.New("wrong size") + } + Modp(x) + copy(b, x[:]) + return nil +} + +// IsZero returns true if x is equal to 0. +func IsZero(x *Elt) bool { Modp(x); return *x == Elt{} } + +// IsOne returns true if x is equal to 1. +func IsOne(x *Elt) bool { Modp(x); return *x == Elt{1} } + +// SetOne assigns x=1. +func SetOne(x *Elt) { *x = Elt{1} } + +// One returns the 1 element. +func One() (x Elt) { x = Elt{1}; return } + +// Neg calculates z = -x. +func Neg(z, x *Elt) { Sub(z, &p, x) } + +// Modp ensures that z is between [0,p-1]. +func Modp(z *Elt) { Sub(z, z, &p) } + +// InvSqrt calculates z = sqrt(x/y) iff x/y is a quadratic-residue. If so, +// isQR = true; otherwise, isQR = false, since x/y is a quadratic non-residue, +// and z = sqrt(-x/y). +func InvSqrt(z, x, y *Elt) (isQR bool) { + // First note that x^(2(k+1)) = x^(p-1)/2 * x = legendre(x) * x + // so that's x if x is a quadratic residue and -x otherwise. + // Next, y^(6k+3) = y^(4k+2) * y^(2k+1) = y^(p-1) * y^((p-1)/2) = legendre(y). + // So the z we compute satisfies z^2 y = x^(2(k+1)) y^(6k+3) = legendre(x)*legendre(y). + // Thus if x and y are quadratic residues, then z is indeed sqrt(x/y). + t0, t1 := &Elt{}, &Elt{} + Mul(t0, x, y) // x*y + Sqr(t1, y) // y^2 + Mul(t1, t0, t1) // x*y^3 + powPminus3div4(z, t1) // (x*y^3)^k + Mul(z, z, t0) // z = x*y*(x*y^3)^k = x^(k+1) * y^(3k+1) + + // Check if x/y is a quadratic residue + Sqr(t0, z) // z^2 + Mul(t0, t0, y) // y*z^2 + Sub(t0, t0, x) // y*z^2-x + return IsZero(t0) +} + +// Inv calculates z = 1/x mod p. +func Inv(z, x *Elt) { + // Calculates z = x^(4k+1) = x^(p-3+1) = x^(p-2) = x^-1, where k = (p-3)/4. + t := &Elt{} + powPminus3div4(t, x) // t = x^k + Sqr(t, t) // t = x^2k + Sqr(t, t) // t = x^4k + Mul(z, t, x) // z = x^(4k+1) +} + +// powPminus3div4 calculates z = x^k mod p, where k = (p-3)/4. +func powPminus3div4(z, x *Elt) { + x0, x1 := &Elt{}, &Elt{} + Sqr(z, x) + Mul(z, z, x) + Sqr(x0, z) + Mul(x0, x0, x) + Sqr(z, x0) + Sqr(z, z) + Sqr(z, z) + Mul(z, z, x0) + Sqr(x1, z) + for i := 0; i < 5; i++ { + Sqr(x1, x1) + } + Mul(x1, x1, z) + Sqr(z, x1) + for i := 0; i < 11; i++ { + Sqr(z, z) + } + Mul(z, z, x1) + Sqr(z, z) + Sqr(z, z) + Sqr(z, z) + Mul(z, z, x0) + Sqr(x1, z) + for i := 0; i < 26; i++ { + Sqr(x1, x1) + } + Mul(x1, x1, z) + Sqr(z, x1) + for i := 0; i < 53; i++ { + Sqr(z, z) + } + Mul(z, z, x1) + Sqr(z, z) + Sqr(z, z) + Sqr(z, z) + Mul(z, z, x0) + Sqr(x1, z) + for i := 0; i < 110; i++ { + Sqr(x1, x1) + } + Mul(x1, x1, z) + Sqr(z, x1) + Mul(z, z, x) + for i := 0; i < 223; i++ { + Sqr(z, z) + } + Mul(z, z, x1) +} + +// Cmov assigns y to x if n is 1. +func Cmov(x, y *Elt, n uint) { cmov(x, y, n) } + +// Cswap interchanges x and y if n is 1. +func Cswap(x, y *Elt, n uint) { cswap(x, y, n) } + +// Add calculates z = x+y mod p. +func Add(z, x, y *Elt) { add(z, x, y) } + +// Sub calculates z = x-y mod p. +func Sub(z, x, y *Elt) { sub(z, x, y) } + +// AddSub calculates (x,y) = (x+y mod p, x-y mod p). +func AddSub(x, y *Elt) { addsub(x, y) } + +// Mul calculates z = x*y mod p. +func Mul(z, x, y *Elt) { mul(z, x, y) } + +// Sqr calculates z = x^2 mod p. +func Sqr(z, x *Elt) { sqr(z, x) } diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.go b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.go new file mode 100644 index 000000000..6a12209a7 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.go @@ -0,0 +1,43 @@ +//go:build amd64 && !purego +// +build amd64,!purego + +package fp448 + +import ( + "golang.org/x/sys/cpu" +) + +var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX + +var _ = hasBmi2Adx + +func cmov(x, y *Elt, n uint) { cmovAmd64(x, y, n) } +func cswap(x, y *Elt, n uint) { cswapAmd64(x, y, n) } +func add(z, x, y *Elt) { addAmd64(z, x, y) } +func sub(z, x, y *Elt) { subAmd64(z, x, y) } +func addsub(x, y *Elt) { addsubAmd64(x, y) } +func mul(z, x, y *Elt) { mulAmd64(z, x, y) } +func sqr(z, x *Elt) { sqrAmd64(z, x) } + +/* Functions defined in fp_amd64.s */ + +//go:noescape +func cmovAmd64(x, y *Elt, n uint) + +//go:noescape +func cswapAmd64(x, y *Elt, n uint) + +//go:noescape +func addAmd64(z, x, y *Elt) + +//go:noescape +func subAmd64(z, x, y *Elt) + +//go:noescape +func addsubAmd64(x, y *Elt) + +//go:noescape +func mulAmd64(z, x, y *Elt) + +//go:noescape +func sqrAmd64(z, x *Elt) diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.h b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.h new file mode 100644 index 000000000..536fe5bdf --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.h @@ -0,0 +1,591 @@ +// This code was imported from https://github.com/armfazh/rfc7748_precomputed + +// CHECK_BMI2ADX triggers bmi2adx if supported, +// otherwise it fallbacks to legacy code. +#define CHECK_BMI2ADX(label, legacy, bmi2adx) \ + CMPB ·hasBmi2Adx(SB), $0 \ + JE label \ + bmi2adx \ + RET \ + label: \ + legacy \ + RET + +// cselect is a conditional move +// if b=1: it copies y into x; +// if b=0: x remains with the same value; +// if b<> 0,1: undefined. +// Uses: AX, DX, FLAGS +// Instr: x86_64, cmov +#define cselect(x,y,b) \ + TESTQ b, b \ + MOVQ 0+x, AX; MOVQ 0+y, DX; CMOVQNE DX, AX; MOVQ AX, 0+x; \ + MOVQ 8+x, AX; MOVQ 8+y, DX; CMOVQNE DX, AX; MOVQ AX, 8+x; \ + MOVQ 16+x, AX; MOVQ 16+y, DX; CMOVQNE DX, AX; MOVQ AX, 16+x; \ + MOVQ 24+x, AX; MOVQ 24+y, DX; CMOVQNE DX, AX; MOVQ AX, 24+x; \ + MOVQ 32+x, AX; MOVQ 32+y, DX; CMOVQNE DX, AX; MOVQ AX, 32+x; \ + MOVQ 40+x, AX; MOVQ 40+y, DX; CMOVQNE DX, AX; MOVQ AX, 40+x; \ + MOVQ 48+x, AX; MOVQ 48+y, DX; CMOVQNE DX, AX; MOVQ AX, 48+x; + +// cswap is a conditional swap +// if b=1: x,y <- y,x; +// if b=0: x,y remain with the same values; +// if b<> 0,1: undefined. +// Uses: AX, DX, R8, FLAGS +// Instr: x86_64, cmov +#define cswap(x,y,b) \ + TESTQ b, b \ + MOVQ 0+x, AX; MOVQ AX, R8; MOVQ 0+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 0+x; MOVQ DX, 0+y; \ + MOVQ 8+x, AX; MOVQ AX, R8; MOVQ 8+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 8+x; MOVQ DX, 8+y; \ + MOVQ 16+x, AX; MOVQ AX, R8; MOVQ 16+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 16+x; MOVQ DX, 16+y; \ + MOVQ 24+x, AX; MOVQ AX, R8; MOVQ 24+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 24+x; MOVQ DX, 24+y; \ + MOVQ 32+x, AX; MOVQ AX, R8; MOVQ 32+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 32+x; MOVQ DX, 32+y; \ + MOVQ 40+x, AX; MOVQ AX, R8; MOVQ 40+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 40+x; MOVQ DX, 40+y; \ + MOVQ 48+x, AX; MOVQ AX, R8; MOVQ 48+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 48+x; MOVQ DX, 48+y; + +// additionLeg adds x and y and stores in z +// Uses: AX, DX, R8-R14, FLAGS +// Instr: x86_64 +#define additionLeg(z,x,y) \ + MOVQ 0+x, R8; ADDQ 0+y, R8; \ + MOVQ 8+x, R9; ADCQ 8+y, R9; \ + MOVQ 16+x, R10; ADCQ 16+y, R10; \ + MOVQ 24+x, R11; ADCQ 24+y, R11; \ + MOVQ 32+x, R12; ADCQ 32+y, R12; \ + MOVQ 40+x, R13; ADCQ 40+y, R13; \ + MOVQ 48+x, R14; ADCQ 48+y, R14; \ + MOVQ $0, AX; ADCQ $0, AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + ADDQ AX, R8; MOVQ $0, AX; \ + ADCQ $0, R9; \ + ADCQ $0, R10; \ + ADCQ DX, R11; \ + ADCQ $0, R12; \ + ADCQ $0, R13; \ + ADCQ $0, R14; \ + ADCQ $0, AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + ADDQ AX, R8; MOVQ R8, 0+z; \ + ADCQ $0, R9; MOVQ R9, 8+z; \ + ADCQ $0, R10; MOVQ R10, 16+z; \ + ADCQ DX, R11; MOVQ R11, 24+z; \ + ADCQ $0, R12; MOVQ R12, 32+z; \ + ADCQ $0, R13; MOVQ R13, 40+z; \ + ADCQ $0, R14; MOVQ R14, 48+z; + + +// additionAdx adds x and y and stores in z +// Uses: AX, DX, R8-R15, FLAGS +// Instr: x86_64, adx +#define additionAdx(z,x,y) \ + MOVL $32, R15; \ + XORL DX, DX; \ + MOVQ 0+x, R8; ADCXQ 0+y, R8; \ + MOVQ 8+x, R9; ADCXQ 8+y, R9; \ + MOVQ 16+x, R10; ADCXQ 16+y, R10; \ + MOVQ 24+x, R11; ADCXQ 24+y, R11; \ + MOVQ 32+x, R12; ADCXQ 32+y, R12; \ + MOVQ 40+x, R13; ADCXQ 40+y, R13; \ + MOVQ 48+x, R14; ADCXQ 48+y, R14; \ + ;;;;;;;;;;;;;;; ADCXQ DX, DX; \ + XORL AX, AX; \ + ADCXQ DX, R8; SHLXQ R15, DX, DX; \ + ADCXQ AX, R9; \ + ADCXQ AX, R10; \ + ADCXQ DX, R11; \ + ADCXQ AX, R12; \ + ADCXQ AX, R13; \ + ADCXQ AX, R14; \ + ADCXQ AX, AX; \ + XORL DX, DX; \ + ADCXQ AX, R8; MOVQ R8, 0+z; SHLXQ R15, AX, AX; \ + ADCXQ DX, R9; MOVQ R9, 8+z; \ + ADCXQ DX, R10; MOVQ R10, 16+z; \ + ADCXQ AX, R11; MOVQ R11, 24+z; \ + ADCXQ DX, R12; MOVQ R12, 32+z; \ + ADCXQ DX, R13; MOVQ R13, 40+z; \ + ADCXQ DX, R14; MOVQ R14, 48+z; + +// subtraction subtracts y from x and stores in z +// Uses: AX, DX, R8-R14, FLAGS +// Instr: x86_64 +#define subtraction(z,x,y) \ + MOVQ 0+x, R8; SUBQ 0+y, R8; \ + MOVQ 8+x, R9; SBBQ 8+y, R9; \ + MOVQ 16+x, R10; SBBQ 16+y, R10; \ + MOVQ 24+x, R11; SBBQ 24+y, R11; \ + MOVQ 32+x, R12; SBBQ 32+y, R12; \ + MOVQ 40+x, R13; SBBQ 40+y, R13; \ + MOVQ 48+x, R14; SBBQ 48+y, R14; \ + MOVQ $0, AX; SETCS AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + SUBQ AX, R8; MOVQ $0, AX; \ + SBBQ $0, R9; \ + SBBQ $0, R10; \ + SBBQ DX, R11; \ + SBBQ $0, R12; \ + SBBQ $0, R13; \ + SBBQ $0, R14; \ + SETCS AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + SUBQ AX, R8; MOVQ R8, 0+z; \ + SBBQ $0, R9; MOVQ R9, 8+z; \ + SBBQ $0, R10; MOVQ R10, 16+z; \ + SBBQ DX, R11; MOVQ R11, 24+z; \ + SBBQ $0, R12; MOVQ R12, 32+z; \ + SBBQ $0, R13; MOVQ R13, 40+z; \ + SBBQ $0, R14; MOVQ R14, 48+z; + +// maddBmi2Adx multiplies x and y and accumulates in z +// Uses: AX, DX, R15, FLAGS +// Instr: x86_64, bmi2, adx +#define maddBmi2Adx(z,x,y,i,r0,r1,r2,r3,r4,r5,r6) \ + MOVQ i+y, DX; XORL AX, AX; \ + MULXQ 0+x, AX, R8; ADOXQ AX, r0; ADCXQ R8, r1; MOVQ r0,i+z; \ + MULXQ 8+x, AX, r0; ADOXQ AX, r1; ADCXQ r0, r2; MOVQ $0, R8; \ + MULXQ 16+x, AX, r0; ADOXQ AX, r2; ADCXQ r0, r3; \ + MULXQ 24+x, AX, r0; ADOXQ AX, r3; ADCXQ r0, r4; \ + MULXQ 32+x, AX, r0; ADOXQ AX, r4; ADCXQ r0, r5; \ + MULXQ 40+x, AX, r0; ADOXQ AX, r5; ADCXQ r0, r6; \ + MULXQ 48+x, AX, r0; ADOXQ AX, r6; ADCXQ R8, r0; \ + ;;;;;;;;;;;;;;;;;;; ADOXQ R8, r0; + +// integerMulAdx multiplies x and y and stores in z +// Uses: AX, DX, R8-R15, FLAGS +// Instr: x86_64, bmi2, adx +#define integerMulAdx(z,x,y) \ + MOVL $0,R15; \ + MOVQ 0+y, DX; XORL AX, AX; MOVQ $0, R8; \ + MULXQ 0+x, AX, R9; MOVQ AX, 0+z; \ + MULXQ 8+x, AX, R10; ADCXQ AX, R9; \ + MULXQ 16+x, AX, R11; ADCXQ AX, R10; \ + MULXQ 24+x, AX, R12; ADCXQ AX, R11; \ + MULXQ 32+x, AX, R13; ADCXQ AX, R12; \ + MULXQ 40+x, AX, R14; ADCXQ AX, R13; \ + MULXQ 48+x, AX, R15; ADCXQ AX, R14; \ + ;;;;;;;;;;;;;;;;;;;; ADCXQ R8, R15; \ + maddBmi2Adx(z,x,y, 8, R9,R10,R11,R12,R13,R14,R15) \ + maddBmi2Adx(z,x,y,16,R10,R11,R12,R13,R14,R15, R9) \ + maddBmi2Adx(z,x,y,24,R11,R12,R13,R14,R15, R9,R10) \ + maddBmi2Adx(z,x,y,32,R12,R13,R14,R15, R9,R10,R11) \ + maddBmi2Adx(z,x,y,40,R13,R14,R15, R9,R10,R11,R12) \ + maddBmi2Adx(z,x,y,48,R14,R15, R9,R10,R11,R12,R13) \ + MOVQ R15, 56+z; \ + MOVQ R9, 64+z; \ + MOVQ R10, 72+z; \ + MOVQ R11, 80+z; \ + MOVQ R12, 88+z; \ + MOVQ R13, 96+z; \ + MOVQ R14, 104+z; + +// maddLegacy multiplies x and y and accumulates in z +// Uses: AX, DX, R15, FLAGS +// Instr: x86_64 +#define maddLegacy(z,x,y,i) \ + MOVQ i+y, R15; \ + MOVQ 0+x, AX; MULQ R15; MOVQ AX, R8; ;;;;;;;;;;;; MOVQ DX, R9; \ + MOVQ 8+x, AX; MULQ R15; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; \ + MOVQ 16+x, AX; MULQ R15; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; \ + MOVQ 24+x, AX; MULQ R15; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; \ + MOVQ 32+x, AX; MULQ R15; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; \ + MOVQ 40+x, AX; MULQ R15; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX, R14; \ + MOVQ 48+x, AX; MULQ R15; ADDQ AX, R14; ADCQ $0, DX; \ + ADDQ 0+i+z, R8; MOVQ R8, 0+i+z; \ + ADCQ 8+i+z, R9; MOVQ R9, 8+i+z; \ + ADCQ 16+i+z, R10; MOVQ R10, 16+i+z; \ + ADCQ 24+i+z, R11; MOVQ R11, 24+i+z; \ + ADCQ 32+i+z, R12; MOVQ R12, 32+i+z; \ + ADCQ 40+i+z, R13; MOVQ R13, 40+i+z; \ + ADCQ 48+i+z, R14; MOVQ R14, 48+i+z; \ + ADCQ $0, DX; MOVQ DX, 56+i+z; + +// integerMulLeg multiplies x and y and stores in z +// Uses: AX, DX, R8-R15, FLAGS +// Instr: x86_64 +#define integerMulLeg(z,x,y) \ + MOVQ 0+y, R15; \ + MOVQ 0+x, AX; MULQ R15; MOVQ AX, 0+z; ;;;;;;;;;;;; MOVQ DX, R8; \ + MOVQ 8+x, AX; MULQ R15; ADDQ AX, R8; ADCQ $0, DX; MOVQ DX, R9; MOVQ R8, 8+z; \ + MOVQ 16+x, AX; MULQ R15; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; MOVQ R9, 16+z; \ + MOVQ 24+x, AX; MULQ R15; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; MOVQ R10, 24+z; \ + MOVQ 32+x, AX; MULQ R15; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; MOVQ R11, 32+z; \ + MOVQ 40+x, AX; MULQ R15; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; MOVQ R12, 40+z; \ + MOVQ 48+x, AX; MULQ R15; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX,56+z; MOVQ R13, 48+z; \ + maddLegacy(z,x,y, 8) \ + maddLegacy(z,x,y,16) \ + maddLegacy(z,x,y,24) \ + maddLegacy(z,x,y,32) \ + maddLegacy(z,x,y,40) \ + maddLegacy(z,x,y,48) + +// integerSqrLeg squares x and stores in z +// Uses: AX, CX, DX, R8-R15, FLAGS +// Instr: x86_64 +#define integerSqrLeg(z,x) \ + XORL R15, R15; \ + MOVQ 0+x, CX; \ + MOVQ CX, AX; MULQ CX; MOVQ AX, 0+z; MOVQ DX, R8; \ + ADDQ CX, CX; ADCQ $0, R15; \ + MOVQ 8+x, AX; MULQ CX; ADDQ AX, R8; ADCQ $0, DX; MOVQ DX, R9; MOVQ R8, 8+z; \ + MOVQ 16+x, AX; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; \ + MOVQ 24+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; \ + MOVQ 32+x, AX; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; \ + MOVQ 40+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; \ + MOVQ 48+x, AX; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX, R14; \ + \ + MOVQ 8+x, CX; \ + MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \ + ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; MOVQ R9,16+z; \ + MOVQ R15, AX; NEGQ AX; ANDQ 8+x, AX; ADDQ AX, DX; ADCQ $0, R11; MOVQ DX, R8; \ + ADDQ 8+x, CX; ADCQ $0, R15; \ + MOVQ 16+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; ADDQ R8, R10; ADCQ $0, DX; MOVQ DX, R8; MOVQ R10, 24+z; \ + MOVQ 24+x, AX; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; ADDQ R8, R11; ADCQ $0, DX; MOVQ DX, R8; \ + MOVQ 32+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; ADDQ R8, R12; ADCQ $0, DX; MOVQ DX, R8; \ + MOVQ 40+x, AX; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; ADDQ R8, R13; ADCQ $0, DX; MOVQ DX, R8; \ + MOVQ 48+x, AX; MULQ CX; ADDQ AX, R14; ADCQ $0, DX; ADDQ R8, R14; ADCQ $0, DX; MOVQ DX, R9; \ + \ + MOVQ 16+x, CX; \ + MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \ + ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; MOVQ R11, 32+z; \ + MOVQ R15, AX; NEGQ AX; ANDQ 16+x,AX; ADDQ AX, DX; ADCQ $0, R13; MOVQ DX, R8; \ + ADDQ 16+x, CX; ADCQ $0, R15; \ + MOVQ 24+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; ADDQ R8, R12; ADCQ $0, DX; MOVQ DX, R8; MOVQ R12, 40+z; \ + MOVQ 32+x, AX; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; ADDQ R8, R13; ADCQ $0, DX; MOVQ DX, R8; \ + MOVQ 40+x, AX; MULQ CX; ADDQ AX, R14; ADCQ $0, DX; ADDQ R8, R14; ADCQ $0, DX; MOVQ DX, R8; \ + MOVQ 48+x, AX; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; ADDQ R8, R9; ADCQ $0, DX; MOVQ DX,R10; \ + \ + MOVQ 24+x, CX; \ + MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \ + ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; MOVQ R13, 48+z; \ + MOVQ R15, AX; NEGQ AX; ANDQ 24+x,AX; ADDQ AX, DX; ADCQ $0, R9; MOVQ DX, R8; \ + ADDQ 24+x, CX; ADCQ $0, R15; \ + MOVQ 32+x, AX; MULQ CX; ADDQ AX, R14; ADCQ $0, DX; ADDQ R8, R14; ADCQ $0, DX; MOVQ DX, R8; MOVQ R14, 56+z; \ + MOVQ 40+x, AX; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; ADDQ R8, R9; ADCQ $0, DX; MOVQ DX, R8; \ + MOVQ 48+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; ADDQ R8, R10; ADCQ $0, DX; MOVQ DX,R11; \ + \ + MOVQ 32+x, CX; \ + MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \ + ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; MOVQ R9, 64+z; \ + MOVQ R15, AX; NEGQ AX; ANDQ 32+x,AX; ADDQ AX, DX; ADCQ $0, R11; MOVQ DX, R8; \ + ADDQ 32+x, CX; ADCQ $0, R15; \ + MOVQ 40+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; ADDQ R8, R10; ADCQ $0, DX; MOVQ DX, R8; MOVQ R10, 72+z; \ + MOVQ 48+x, AX; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; ADDQ R8, R11; ADCQ $0, DX; MOVQ DX,R12; \ + \ + XORL R13, R13; \ + XORL R14, R14; \ + MOVQ 40+x, CX; \ + MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \ + ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; MOVQ R11, 80+z; \ + MOVQ R15, AX; NEGQ AX; ANDQ 40+x,AX; ADDQ AX, DX; ADCQ $0, R13; MOVQ DX, R8; \ + ADDQ 40+x, CX; ADCQ $0, R15; \ + MOVQ 48+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; ADDQ R8, R12; ADCQ $0, DX; MOVQ DX, R8; MOVQ R12, 88+z; \ + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ADDQ R8, R13; ADCQ $0,R14; \ + \ + XORL R9, R9; \ + MOVQ 48+x, CX; \ + MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \ + ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; MOVQ R13, 96+z; \ + MOVQ R15, AX; NEGQ AX; ANDQ 48+x,AX; ADDQ AX, DX; ADCQ $0, R9; MOVQ DX, R8; \ + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ADDQ R8,R14; ADCQ $0, R9; MOVQ R14, 104+z; + + +// integerSqrAdx squares x and stores in z +// Uses: AX, CX, DX, R8-R15, FLAGS +// Instr: x86_64, bmi2, adx +#define integerSqrAdx(z,x) \ + XORL R15, R15; \ + MOVQ 0+x, DX; \ + ;;;;;;;;;;;;;; MULXQ DX, AX, R8; MOVQ AX, 0+z; \ + ADDQ DX, DX; ADCQ $0, R15; CLC; \ + MULXQ 8+x, AX, R9; ADCXQ AX, R8; MOVQ R8, 8+z; \ + MULXQ 16+x, AX, R10; ADCXQ AX, R9; MOVQ $0, R8;\ + MULXQ 24+x, AX, R11; ADCXQ AX, R10; \ + MULXQ 32+x, AX, R12; ADCXQ AX, R11; \ + MULXQ 40+x, AX, R13; ADCXQ AX, R12; \ + MULXQ 48+x, AX, R14; ADCXQ AX, R13; \ + ;;;;;;;;;;;;;;;;;;;; ADCXQ R8, R14; \ + \ + MOVQ 8+x, DX; \ + MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \ + MULXQ AX, AX, CX; \ + MOVQ R15, R8; NEGQ R8; ANDQ 8+x, R8; \ + ADDQ AX, R9; MOVQ R9, 16+z; \ + ADCQ CX, R8; \ + ADCQ $0, R11; \ + ADDQ 8+x, DX; \ + ADCQ $0, R15; \ + XORL R9, R9; ;;;;;;;;;;;;;;;;;;;;; ADOXQ R8, R10; \ + MULXQ 16+x, AX, CX; ADCXQ AX, R10; ADOXQ CX, R11; MOVQ R10, 24+z; \ + MULXQ 24+x, AX, CX; ADCXQ AX, R11; ADOXQ CX, R12; MOVQ $0, R10; \ + MULXQ 32+x, AX, CX; ADCXQ AX, R12; ADOXQ CX, R13; \ + MULXQ 40+x, AX, CX; ADCXQ AX, R13; ADOXQ CX, R14; \ + MULXQ 48+x, AX, CX; ADCXQ AX, R14; ADOXQ CX, R9; \ + ;;;;;;;;;;;;;;;;;;; ADCXQ R10, R9; \ + \ + MOVQ 16+x, DX; \ + MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \ + MULXQ AX, AX, CX; \ + MOVQ R15, R8; NEGQ R8; ANDQ 16+x, R8; \ + ADDQ AX, R11; MOVQ R11, 32+z; \ + ADCQ CX, R8; \ + ADCQ $0, R13; \ + ADDQ 16+x, DX; \ + ADCQ $0, R15; \ + XORL R11, R11; ;;;;;;;;;;;;;;;;;;; ADOXQ R8, R12; \ + MULXQ 24+x, AX, CX; ADCXQ AX, R12; ADOXQ CX, R13; MOVQ R12, 40+z; \ + MULXQ 32+x, AX, CX; ADCXQ AX, R13; ADOXQ CX, R14; MOVQ $0, R12; \ + MULXQ 40+x, AX, CX; ADCXQ AX, R14; ADOXQ CX, R9; \ + MULXQ 48+x, AX, CX; ADCXQ AX, R9; ADOXQ CX, R10; \ + ;;;;;;;;;;;;;;;;;;; ADCXQ R11,R10; \ + \ + MOVQ 24+x, DX; \ + MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \ + MULXQ AX, AX, CX; \ + MOVQ R15, R8; NEGQ R8; ANDQ 24+x, R8; \ + ADDQ AX, R13; MOVQ R13, 48+z; \ + ADCQ CX, R8; \ + ADCQ $0, R9; \ + ADDQ 24+x, DX; \ + ADCQ $0, R15; \ + XORL R13, R13; ;;;;;;;;;;;;;;;;;;; ADOXQ R8, R14; \ + MULXQ 32+x, AX, CX; ADCXQ AX, R14; ADOXQ CX, R9; MOVQ R14, 56+z; \ + MULXQ 40+x, AX, CX; ADCXQ AX, R9; ADOXQ CX, R10; MOVQ $0, R14; \ + MULXQ 48+x, AX, CX; ADCXQ AX, R10; ADOXQ CX, R11; \ + ;;;;;;;;;;;;;;;;;;; ADCXQ R12,R11; \ + \ + MOVQ 32+x, DX; \ + MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \ + MULXQ AX, AX, CX; \ + MOVQ R15, R8; NEGQ R8; ANDQ 32+x, R8; \ + ADDQ AX, R9; MOVQ R9, 64+z; \ + ADCQ CX, R8; \ + ADCQ $0, R11; \ + ADDQ 32+x, DX; \ + ADCQ $0, R15; \ + XORL R9, R9; ;;;;;;;;;;;;;;;;;;;;; ADOXQ R8, R10; \ + MULXQ 40+x, AX, CX; ADCXQ AX, R10; ADOXQ CX, R11; MOVQ R10, 72+z; \ + MULXQ 48+x, AX, CX; ADCXQ AX, R11; ADOXQ CX, R12; \ + ;;;;;;;;;;;;;;;;;;; ADCXQ R13,R12; \ + \ + MOVQ 40+x, DX; \ + MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \ + MULXQ AX, AX, CX; \ + MOVQ R15, R8; NEGQ R8; ANDQ 40+x, R8; \ + ADDQ AX, R11; MOVQ R11, 80+z; \ + ADCQ CX, R8; \ + ADCQ $0, R13; \ + ADDQ 40+x, DX; \ + ADCQ $0, R15; \ + XORL R11, R11; ;;;;;;;;;;;;;;;;;;; ADOXQ R8, R12; \ + MULXQ 48+x, AX, CX; ADCXQ AX, R12; ADOXQ CX, R13; MOVQ R12, 88+z; \ + ;;;;;;;;;;;;;;;;;;; ADCXQ R14,R13; \ + \ + MOVQ 48+x, DX; \ + MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \ + MULXQ AX, AX, CX; \ + MOVQ R15, R8; NEGQ R8; ANDQ 48+x, R8; \ + XORL R10, R10; ;;;;;;;;;;;;;; ADOXQ CX, R14; \ + ;;;;;;;;;;;;;; ADCXQ AX, R13; ;;;;;;;;;;;;;; MOVQ R13, 96+z; \ + ;;;;;;;;;;;;;; ADCXQ R8, R14; MOVQ R14, 104+z; + +// reduceFromDoubleLeg finds a z=x modulo p such that z<2^448 and stores in z +// Uses: AX, R8-R15, FLAGS +// Instr: x86_64 +#define reduceFromDoubleLeg(z,x) \ + /* ( ,2C13,2C12,2C11,2C10|C10,C9,C8, C7) + (C6,...,C0) */ \ + /* (r14, r13, r12, r11, r10,r9,r8,r15) */ \ + MOVQ 80+x,AX; MOVQ AX,R10; \ + MOVQ $0xFFFFFFFF00000000, R8; \ + ANDQ R8,R10; \ + \ + MOVQ $0,R14; \ + MOVQ 104+x,R13; SHLQ $1,R13,R14; \ + MOVQ 96+x,R12; SHLQ $1,R12,R13; \ + MOVQ 88+x,R11; SHLQ $1,R11,R12; \ + MOVQ 72+x, R9; SHLQ $1,R10,R11; \ + MOVQ 64+x, R8; SHLQ $1,R10; \ + MOVQ $0xFFFFFFFF,R15; ANDQ R15,AX; ORQ AX,R10; \ + MOVQ 56+x,R15; \ + \ + ADDQ 0+x,R15; MOVQ R15, 0+z; MOVQ 56+x,R15; \ + ADCQ 8+x, R8; MOVQ R8, 8+z; MOVQ 64+x, R8; \ + ADCQ 16+x, R9; MOVQ R9,16+z; MOVQ 72+x, R9; \ + ADCQ 24+x,R10; MOVQ R10,24+z; MOVQ 80+x,R10; \ + ADCQ 32+x,R11; MOVQ R11,32+z; MOVQ 88+x,R11; \ + ADCQ 40+x,R12; MOVQ R12,40+z; MOVQ 96+x,R12; \ + ADCQ 48+x,R13; MOVQ R13,48+z; MOVQ 104+x,R13; \ + ADCQ $0,R14; \ + /* (c10c9,c9c8,c8c7,c7c13,c13c12,c12c11,c11c10) + (c6,...,c0) */ \ + /* ( r9, r8, r15, r13, r12, r11, r10) */ \ + MOVQ R10, AX; \ + SHRQ $32,R11,R10; \ + SHRQ $32,R12,R11; \ + SHRQ $32,R13,R12; \ + SHRQ $32,R15,R13; \ + SHRQ $32, R8,R15; \ + SHRQ $32, R9, R8; \ + SHRQ $32, AX, R9; \ + \ + ADDQ 0+z,R10; \ + ADCQ 8+z,R11; \ + ADCQ 16+z,R12; \ + ADCQ 24+z,R13; \ + ADCQ 32+z,R15; \ + ADCQ 40+z, R8; \ + ADCQ 48+z, R9; \ + ADCQ $0,R14; \ + /* ( c7) + (c6,...,c0) */ \ + /* (r14) */ \ + MOVQ R14, AX; SHLQ $32, AX; \ + ADDQ R14,R10; MOVQ $0,R14; \ + ADCQ $0,R11; \ + ADCQ $0,R12; \ + ADCQ AX,R13; \ + ADCQ $0,R15; \ + ADCQ $0, R8; \ + ADCQ $0, R9; \ + ADCQ $0,R14; \ + /* ( c7) + (c6,...,c0) */ \ + /* (r14) */ \ + MOVQ R14, AX; SHLQ $32,AX; \ + ADDQ R14,R10; MOVQ R10, 0+z; \ + ADCQ $0,R11; MOVQ R11, 8+z; \ + ADCQ $0,R12; MOVQ R12,16+z; \ + ADCQ AX,R13; MOVQ R13,24+z; \ + ADCQ $0,R15; MOVQ R15,32+z; \ + ADCQ $0, R8; MOVQ R8,40+z; \ + ADCQ $0, R9; MOVQ R9,48+z; + +// reduceFromDoubleAdx finds a z=x modulo p such that z<2^448 and stores in z +// Uses: AX, R8-R15, FLAGS +// Instr: x86_64, adx +#define reduceFromDoubleAdx(z,x) \ + /* ( ,2C13,2C12,2C11,2C10|C10,C9,C8, C7) + (C6,...,C0) */ \ + /* (r14, r13, r12, r11, r10,r9,r8,r15) */ \ + MOVQ 80+x,AX; MOVQ AX,R10; \ + MOVQ $0xFFFFFFFF00000000, R8; \ + ANDQ R8,R10; \ + \ + MOVQ $0,R14; \ + MOVQ 104+x,R13; SHLQ $1,R13,R14; \ + MOVQ 96+x,R12; SHLQ $1,R12,R13; \ + MOVQ 88+x,R11; SHLQ $1,R11,R12; \ + MOVQ 72+x, R9; SHLQ $1,R10,R11; \ + MOVQ 64+x, R8; SHLQ $1,R10; \ + MOVQ $0xFFFFFFFF,R15; ANDQ R15,AX; ORQ AX,R10; \ + MOVQ 56+x,R15; \ + \ + XORL AX,AX; \ + ADCXQ 0+x,R15; MOVQ R15, 0+z; MOVQ 56+x,R15; \ + ADCXQ 8+x, R8; MOVQ R8, 8+z; MOVQ 64+x, R8; \ + ADCXQ 16+x, R9; MOVQ R9,16+z; MOVQ 72+x, R9; \ + ADCXQ 24+x,R10; MOVQ R10,24+z; MOVQ 80+x,R10; \ + ADCXQ 32+x,R11; MOVQ R11,32+z; MOVQ 88+x,R11; \ + ADCXQ 40+x,R12; MOVQ R12,40+z; MOVQ 96+x,R12; \ + ADCXQ 48+x,R13; MOVQ R13,48+z; MOVQ 104+x,R13; \ + ADCXQ AX,R14; \ + /* (c10c9,c9c8,c8c7,c7c13,c13c12,c12c11,c11c10) + (c6,...,c0) */ \ + /* ( r9, r8, r15, r13, r12, r11, r10) */ \ + MOVQ R10, AX; \ + SHRQ $32,R11,R10; \ + SHRQ $32,R12,R11; \ + SHRQ $32,R13,R12; \ + SHRQ $32,R15,R13; \ + SHRQ $32, R8,R15; \ + SHRQ $32, R9, R8; \ + SHRQ $32, AX, R9; \ + \ + XORL AX,AX; \ + ADCXQ 0+z,R10; \ + ADCXQ 8+z,R11; \ + ADCXQ 16+z,R12; \ + ADCXQ 24+z,R13; \ + ADCXQ 32+z,R15; \ + ADCXQ 40+z, R8; \ + ADCXQ 48+z, R9; \ + ADCXQ AX,R14; \ + /* ( c7) + (c6,...,c0) */ \ + /* (r14) */ \ + MOVQ R14, AX; SHLQ $32, AX; \ + CLC; \ + ADCXQ R14,R10; MOVQ $0,R14; \ + ADCXQ R14,R11; \ + ADCXQ R14,R12; \ + ADCXQ AX,R13; \ + ADCXQ R14,R15; \ + ADCXQ R14, R8; \ + ADCXQ R14, R9; \ + ADCXQ R14,R14; \ + /* ( c7) + (c6,...,c0) */ \ + /* (r14) */ \ + MOVQ R14, AX; SHLQ $32, AX; \ + CLC; \ + ADCXQ R14,R10; MOVQ R10, 0+z; MOVQ $0,R14; \ + ADCXQ R14,R11; MOVQ R11, 8+z; \ + ADCXQ R14,R12; MOVQ R12,16+z; \ + ADCXQ AX,R13; MOVQ R13,24+z; \ + ADCXQ R14,R15; MOVQ R15,32+z; \ + ADCXQ R14, R8; MOVQ R8,40+z; \ + ADCXQ R14, R9; MOVQ R9,48+z; + +// addSub calculates two operations: x,y = x+y,x-y +// Uses: AX, DX, R8-R15, FLAGS +#define addSub(x,y) \ + MOVQ 0+x, R8; ADDQ 0+y, R8; \ + MOVQ 8+x, R9; ADCQ 8+y, R9; \ + MOVQ 16+x, R10; ADCQ 16+y, R10; \ + MOVQ 24+x, R11; ADCQ 24+y, R11; \ + MOVQ 32+x, R12; ADCQ 32+y, R12; \ + MOVQ 40+x, R13; ADCQ 40+y, R13; \ + MOVQ 48+x, R14; ADCQ 48+y, R14; \ + MOVQ $0, AX; ADCQ $0, AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + ADDQ AX, R8; MOVQ $0, AX; \ + ADCQ $0, R9; \ + ADCQ $0, R10; \ + ADCQ DX, R11; \ + ADCQ $0, R12; \ + ADCQ $0, R13; \ + ADCQ $0, R14; \ + ADCQ $0, AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + ADDQ AX, R8; MOVQ 0+x,AX; MOVQ R8, 0+x; MOVQ AX, R8; \ + ADCQ $0, R9; MOVQ 8+x,AX; MOVQ R9, 8+x; MOVQ AX, R9; \ + ADCQ $0, R10; MOVQ 16+x,AX; MOVQ R10, 16+x; MOVQ AX, R10; \ + ADCQ DX, R11; MOVQ 24+x,AX; MOVQ R11, 24+x; MOVQ AX, R11; \ + ADCQ $0, R12; MOVQ 32+x,AX; MOVQ R12, 32+x; MOVQ AX, R12; \ + ADCQ $0, R13; MOVQ 40+x,AX; MOVQ R13, 40+x; MOVQ AX, R13; \ + ADCQ $0, R14; MOVQ 48+x,AX; MOVQ R14, 48+x; MOVQ AX, R14; \ + SUBQ 0+y, R8; \ + SBBQ 8+y, R9; \ + SBBQ 16+y, R10; \ + SBBQ 24+y, R11; \ + SBBQ 32+y, R12; \ + SBBQ 40+y, R13; \ + SBBQ 48+y, R14; \ + MOVQ $0, AX; SETCS AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + SUBQ AX, R8; MOVQ $0, AX; \ + SBBQ $0, R9; \ + SBBQ $0, R10; \ + SBBQ DX, R11; \ + SBBQ $0, R12; \ + SBBQ $0, R13; \ + SBBQ $0, R14; \ + SETCS AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + SUBQ AX, R8; MOVQ R8, 0+y; \ + SBBQ $0, R9; MOVQ R9, 8+y; \ + SBBQ $0, R10; MOVQ R10, 16+y; \ + SBBQ DX, R11; MOVQ R11, 24+y; \ + SBBQ $0, R12; MOVQ R12, 32+y; \ + SBBQ $0, R13; MOVQ R13, 40+y; \ + SBBQ $0, R14; MOVQ R14, 48+y; diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s new file mode 100644 index 000000000..3f1f07c98 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s @@ -0,0 +1,75 @@ +//go:build amd64 && !purego +// +build amd64,!purego + +#include "textflag.h" +#include "fp_amd64.h" + +// func cmovAmd64(x, y *Elt, n uint) +TEXT ·cmovAmd64(SB),NOSPLIT,$0-24 + MOVQ x+0(FP), DI + MOVQ y+8(FP), SI + MOVQ n+16(FP), BX + cselect(0(DI),0(SI),BX) + RET + +// func cswapAmd64(x, y *Elt, n uint) +TEXT ·cswapAmd64(SB),NOSPLIT,$0-24 + MOVQ x+0(FP), DI + MOVQ y+8(FP), SI + MOVQ n+16(FP), BX + cswap(0(DI),0(SI),BX) + RET + +// func subAmd64(z, x, y *Elt) +TEXT ·subAmd64(SB),NOSPLIT,$0-24 + MOVQ z+0(FP), DI + MOVQ x+8(FP), SI + MOVQ y+16(FP), BX + subtraction(0(DI),0(SI),0(BX)) + RET + +// func addsubAmd64(x, y *Elt) +TEXT ·addsubAmd64(SB),NOSPLIT,$0-16 + MOVQ x+0(FP), DI + MOVQ y+8(FP), SI + addSub(0(DI),0(SI)) + RET + +#define addLegacy \ + additionLeg(0(DI),0(SI),0(BX)) +#define addBmi2Adx \ + additionAdx(0(DI),0(SI),0(BX)) + +#define mulLegacy \ + integerMulLeg(0(SP),0(SI),0(BX)) \ + reduceFromDoubleLeg(0(DI),0(SP)) +#define mulBmi2Adx \ + integerMulAdx(0(SP),0(SI),0(BX)) \ + reduceFromDoubleAdx(0(DI),0(SP)) + +#define sqrLegacy \ + integerSqrLeg(0(SP),0(SI)) \ + reduceFromDoubleLeg(0(DI),0(SP)) +#define sqrBmi2Adx \ + integerSqrAdx(0(SP),0(SI)) \ + reduceFromDoubleAdx(0(DI),0(SP)) + +// func addAmd64(z, x, y *Elt) +TEXT ·addAmd64(SB),NOSPLIT,$0-24 + MOVQ z+0(FP), DI + MOVQ x+8(FP), SI + MOVQ y+16(FP), BX + CHECK_BMI2ADX(LADD, addLegacy, addBmi2Adx) + +// func mulAmd64(z, x, y *Elt) +TEXT ·mulAmd64(SB),NOSPLIT,$112-24 + MOVQ z+0(FP), DI + MOVQ x+8(FP), SI + MOVQ y+16(FP), BX + CHECK_BMI2ADX(LMUL, mulLegacy, mulBmi2Adx) + +// func sqrAmd64(z, x *Elt) +TEXT ·sqrAmd64(SB),NOSPLIT,$112-16 + MOVQ z+0(FP), DI + MOVQ x+8(FP), SI + CHECK_BMI2ADX(LSQR, sqrLegacy, sqrBmi2Adx) diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_generic.go b/vendor/github.com/cloudflare/circl/math/fp448/fp_generic.go new file mode 100644 index 000000000..47a0b6320 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp448/fp_generic.go @@ -0,0 +1,339 @@ +package fp448 + +import ( + "encoding/binary" + "math/bits" +) + +func cmovGeneric(x, y *Elt, n uint) { + m := -uint64(n & 0x1) + x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8]) + x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8]) + x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8]) + x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8]) + x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8]) + x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8]) + x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8]) + + y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8]) + y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8]) + y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8]) + y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8]) + y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8]) + y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8]) + y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8]) + + x0 = (x0 &^ m) | (y0 & m) + x1 = (x1 &^ m) | (y1 & m) + x2 = (x2 &^ m) | (y2 & m) + x3 = (x3 &^ m) | (y3 & m) + x4 = (x4 &^ m) | (y4 & m) + x5 = (x5 &^ m) | (y5 & m) + x6 = (x6 &^ m) | (y6 & m) + + binary.LittleEndian.PutUint64(x[0*8:1*8], x0) + binary.LittleEndian.PutUint64(x[1*8:2*8], x1) + binary.LittleEndian.PutUint64(x[2*8:3*8], x2) + binary.LittleEndian.PutUint64(x[3*8:4*8], x3) + binary.LittleEndian.PutUint64(x[4*8:5*8], x4) + binary.LittleEndian.PutUint64(x[5*8:6*8], x5) + binary.LittleEndian.PutUint64(x[6*8:7*8], x6) +} + +func cswapGeneric(x, y *Elt, n uint) { + m := -uint64(n & 0x1) + x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8]) + x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8]) + x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8]) + x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8]) + x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8]) + x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8]) + x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8]) + + y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8]) + y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8]) + y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8]) + y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8]) + y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8]) + y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8]) + y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8]) + + t0 := m & (x0 ^ y0) + t1 := m & (x1 ^ y1) + t2 := m & (x2 ^ y2) + t3 := m & (x3 ^ y3) + t4 := m & (x4 ^ y4) + t5 := m & (x5 ^ y5) + t6 := m & (x6 ^ y6) + x0 ^= t0 + x1 ^= t1 + x2 ^= t2 + x3 ^= t3 + x4 ^= t4 + x5 ^= t5 + x6 ^= t6 + y0 ^= t0 + y1 ^= t1 + y2 ^= t2 + y3 ^= t3 + y4 ^= t4 + y5 ^= t5 + y6 ^= t6 + + binary.LittleEndian.PutUint64(x[0*8:1*8], x0) + binary.LittleEndian.PutUint64(x[1*8:2*8], x1) + binary.LittleEndian.PutUint64(x[2*8:3*8], x2) + binary.LittleEndian.PutUint64(x[3*8:4*8], x3) + binary.LittleEndian.PutUint64(x[4*8:5*8], x4) + binary.LittleEndian.PutUint64(x[5*8:6*8], x5) + binary.LittleEndian.PutUint64(x[6*8:7*8], x6) + + binary.LittleEndian.PutUint64(y[0*8:1*8], y0) + binary.LittleEndian.PutUint64(y[1*8:2*8], y1) + binary.LittleEndian.PutUint64(y[2*8:3*8], y2) + binary.LittleEndian.PutUint64(y[3*8:4*8], y3) + binary.LittleEndian.PutUint64(y[4*8:5*8], y4) + binary.LittleEndian.PutUint64(y[5*8:6*8], y5) + binary.LittleEndian.PutUint64(y[6*8:7*8], y6) +} + +func addGeneric(z, x, y *Elt) { + x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8]) + x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8]) + x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8]) + x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8]) + x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8]) + x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8]) + x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8]) + + y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8]) + y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8]) + y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8]) + y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8]) + y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8]) + y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8]) + y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8]) + + z0, c0 := bits.Add64(x0, y0, 0) + z1, c1 := bits.Add64(x1, y1, c0) + z2, c2 := bits.Add64(x2, y2, c1) + z3, c3 := bits.Add64(x3, y3, c2) + z4, c4 := bits.Add64(x4, y4, c3) + z5, c5 := bits.Add64(x5, y5, c4) + z6, z7 := bits.Add64(x6, y6, c5) + + z0, c0 = bits.Add64(z0, z7, 0) + z1, c1 = bits.Add64(z1, 0, c0) + z2, c2 = bits.Add64(z2, 0, c1) + z3, c3 = bits.Add64(z3, z7<<32, c2) + z4, c4 = bits.Add64(z4, 0, c3) + z5, c5 = bits.Add64(z5, 0, c4) + z6, z7 = bits.Add64(z6, 0, c5) + + z0, c0 = bits.Add64(z0, z7, 0) + z1, c1 = bits.Add64(z1, 0, c0) + z2, c2 = bits.Add64(z2, 0, c1) + z3, c3 = bits.Add64(z3, z7<<32, c2) + z4, c4 = bits.Add64(z4, 0, c3) + z5, c5 = bits.Add64(z5, 0, c4) + z6, _ = bits.Add64(z6, 0, c5) + + binary.LittleEndian.PutUint64(z[0*8:1*8], z0) + binary.LittleEndian.PutUint64(z[1*8:2*8], z1) + binary.LittleEndian.PutUint64(z[2*8:3*8], z2) + binary.LittleEndian.PutUint64(z[3*8:4*8], z3) + binary.LittleEndian.PutUint64(z[4*8:5*8], z4) + binary.LittleEndian.PutUint64(z[5*8:6*8], z5) + binary.LittleEndian.PutUint64(z[6*8:7*8], z6) +} + +func subGeneric(z, x, y *Elt) { + x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8]) + x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8]) + x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8]) + x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8]) + x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8]) + x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8]) + x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8]) + + y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8]) + y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8]) + y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8]) + y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8]) + y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8]) + y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8]) + y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8]) + + z0, c0 := bits.Sub64(x0, y0, 0) + z1, c1 := bits.Sub64(x1, y1, c0) + z2, c2 := bits.Sub64(x2, y2, c1) + z3, c3 := bits.Sub64(x3, y3, c2) + z4, c4 := bits.Sub64(x4, y4, c3) + z5, c5 := bits.Sub64(x5, y5, c4) + z6, z7 := bits.Sub64(x6, y6, c5) + + z0, c0 = bits.Sub64(z0, z7, 0) + z1, c1 = bits.Sub64(z1, 0, c0) + z2, c2 = bits.Sub64(z2, 0, c1) + z3, c3 = bits.Sub64(z3, z7<<32, c2) + z4, c4 = bits.Sub64(z4, 0, c3) + z5, c5 = bits.Sub64(z5, 0, c4) + z6, z7 = bits.Sub64(z6, 0, c5) + + z0, c0 = bits.Sub64(z0, z7, 0) + z1, c1 = bits.Sub64(z1, 0, c0) + z2, c2 = bits.Sub64(z2, 0, c1) + z3, c3 = bits.Sub64(z3, z7<<32, c2) + z4, c4 = bits.Sub64(z4, 0, c3) + z5, c5 = bits.Sub64(z5, 0, c4) + z6, _ = bits.Sub64(z6, 0, c5) + + binary.LittleEndian.PutUint64(z[0*8:1*8], z0) + binary.LittleEndian.PutUint64(z[1*8:2*8], z1) + binary.LittleEndian.PutUint64(z[2*8:3*8], z2) + binary.LittleEndian.PutUint64(z[3*8:4*8], z3) + binary.LittleEndian.PutUint64(z[4*8:5*8], z4) + binary.LittleEndian.PutUint64(z[5*8:6*8], z5) + binary.LittleEndian.PutUint64(z[6*8:7*8], z6) +} + +func addsubGeneric(x, y *Elt) { + z := &Elt{} + addGeneric(z, x, y) + subGeneric(y, x, y) + *x = *z +} + +func mulGeneric(z, x, y *Elt) { + x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8]) + x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8]) + x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8]) + x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8]) + x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8]) + x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8]) + x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8]) + + y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8]) + y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8]) + y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8]) + y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8]) + y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8]) + y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8]) + y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8]) + + yy := [7]uint64{y0, y1, y2, y3, y4, y5, y6} + zz := [7]uint64{} + + yi := yy[0] + h0, l0 := bits.Mul64(x0, yi) + h1, l1 := bits.Mul64(x1, yi) + h2, l2 := bits.Mul64(x2, yi) + h3, l3 := bits.Mul64(x3, yi) + h4, l4 := bits.Mul64(x4, yi) + h5, l5 := bits.Mul64(x5, yi) + h6, l6 := bits.Mul64(x6, yi) + + zz[0] = l0 + a0, c0 := bits.Add64(h0, l1, 0) + a1, c1 := bits.Add64(h1, l2, c0) + a2, c2 := bits.Add64(h2, l3, c1) + a3, c3 := bits.Add64(h3, l4, c2) + a4, c4 := bits.Add64(h4, l5, c3) + a5, c5 := bits.Add64(h5, l6, c4) + a6, _ := bits.Add64(h6, 0, c5) + + for i := 1; i < 7; i++ { + yi = yy[i] + h0, l0 = bits.Mul64(x0, yi) + h1, l1 = bits.Mul64(x1, yi) + h2, l2 = bits.Mul64(x2, yi) + h3, l3 = bits.Mul64(x3, yi) + h4, l4 = bits.Mul64(x4, yi) + h5, l5 = bits.Mul64(x5, yi) + h6, l6 = bits.Mul64(x6, yi) + + zz[i], c0 = bits.Add64(a0, l0, 0) + a0, c1 = bits.Add64(a1, l1, c0) + a1, c2 = bits.Add64(a2, l2, c1) + a2, c3 = bits.Add64(a3, l3, c2) + a3, c4 = bits.Add64(a4, l4, c3) + a4, c5 = bits.Add64(a5, l5, c4) + a5, a6 = bits.Add64(a6, l6, c5) + + a0, c0 = bits.Add64(a0, h0, 0) + a1, c1 = bits.Add64(a1, h1, c0) + a2, c2 = bits.Add64(a2, h2, c1) + a3, c3 = bits.Add64(a3, h3, c2) + a4, c4 = bits.Add64(a4, h4, c3) + a5, c5 = bits.Add64(a5, h5, c4) + a6, _ = bits.Add64(a6, h6, c5) + } + red64(z, &zz, &[7]uint64{a0, a1, a2, a3, a4, a5, a6}) +} + +func sqrGeneric(z, x *Elt) { mulGeneric(z, x, x) } + +func red64(z *Elt, l, h *[7]uint64) { + /* (2C13, 2C12, 2C11, 2C10|C10, C9, C8, C7) + (C6,...,C0) */ + h0 := h[0] + h1 := h[1] + h2 := h[2] + h3 := ((h[3] & (0xFFFFFFFF << 32)) << 1) | (h[3] & 0xFFFFFFFF) + h4 := (h[3] >> 63) | (h[4] << 1) + h5 := (h[4] >> 63) | (h[5] << 1) + h6 := (h[5] >> 63) | (h[6] << 1) + h7 := (h[6] >> 63) + + l0, c0 := bits.Add64(h0, l[0], 0) + l1, c1 := bits.Add64(h1, l[1], c0) + l2, c2 := bits.Add64(h2, l[2], c1) + l3, c3 := bits.Add64(h3, l[3], c2) + l4, c4 := bits.Add64(h4, l[4], c3) + l5, c5 := bits.Add64(h5, l[5], c4) + l6, c6 := bits.Add64(h6, l[6], c5) + l7, _ := bits.Add64(h7, 0, c6) + + /* (C10C9, C9C8,C8C7,C7C13,C13C12,C12C11,C11C10) + (C6,...,C0) */ + h0 = (h[3] >> 32) | (h[4] << 32) + h1 = (h[4] >> 32) | (h[5] << 32) + h2 = (h[5] >> 32) | (h[6] << 32) + h3 = (h[6] >> 32) | (h[0] << 32) + h4 = (h[0] >> 32) | (h[1] << 32) + h5 = (h[1] >> 32) | (h[2] << 32) + h6 = (h[2] >> 32) | (h[3] << 32) + + l0, c0 = bits.Add64(l0, h0, 0) + l1, c1 = bits.Add64(l1, h1, c0) + l2, c2 = bits.Add64(l2, h2, c1) + l3, c3 = bits.Add64(l3, h3, c2) + l4, c4 = bits.Add64(l4, h4, c3) + l5, c5 = bits.Add64(l5, h5, c4) + l6, c6 = bits.Add64(l6, h6, c5) + l7, _ = bits.Add64(l7, 0, c6) + + /* (C7) + (C6,...,C0) */ + l0, c0 = bits.Add64(l0, l7, 0) + l1, c1 = bits.Add64(l1, 0, c0) + l2, c2 = bits.Add64(l2, 0, c1) + l3, c3 = bits.Add64(l3, l7<<32, c2) + l4, c4 = bits.Add64(l4, 0, c3) + l5, c5 = bits.Add64(l5, 0, c4) + l6, l7 = bits.Add64(l6, 0, c5) + + /* (C7) + (C6,...,C0) */ + l0, c0 = bits.Add64(l0, l7, 0) + l1, c1 = bits.Add64(l1, 0, c0) + l2, c2 = bits.Add64(l2, 0, c1) + l3, c3 = bits.Add64(l3, l7<<32, c2) + l4, c4 = bits.Add64(l4, 0, c3) + l5, c5 = bits.Add64(l5, 0, c4) + l6, _ = bits.Add64(l6, 0, c5) + + binary.LittleEndian.PutUint64(z[0*8:1*8], l0) + binary.LittleEndian.PutUint64(z[1*8:2*8], l1) + binary.LittleEndian.PutUint64(z[2*8:3*8], l2) + binary.LittleEndian.PutUint64(z[3*8:4*8], l3) + binary.LittleEndian.PutUint64(z[4*8:5*8], l4) + binary.LittleEndian.PutUint64(z[5*8:6*8], l5) + binary.LittleEndian.PutUint64(z[6*8:7*8], l6) +} diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_noasm.go b/vendor/github.com/cloudflare/circl/math/fp448/fp_noasm.go new file mode 100644 index 000000000..a62225d29 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp448/fp_noasm.go @@ -0,0 +1,12 @@ +//go:build !amd64 || purego +// +build !amd64 purego + +package fp448 + +func cmov(x, y *Elt, n uint) { cmovGeneric(x, y, n) } +func cswap(x, y *Elt, n uint) { cswapGeneric(x, y, n) } +func add(z, x, y *Elt) { addGeneric(z, x, y) } +func sub(z, x, y *Elt) { subGeneric(z, x, y) } +func addsub(x, y *Elt) { addsubGeneric(x, y) } +func mul(z, x, y *Elt) { mulGeneric(z, x, y) } +func sqr(z, x *Elt) { sqrGeneric(z, x) } diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fuzzer.go b/vendor/github.com/cloudflare/circl/math/fp448/fuzzer.go new file mode 100644 index 000000000..2d7afc805 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp448/fuzzer.go @@ -0,0 +1,75 @@ +//go:build gofuzz +// +build gofuzz + +// How to run the fuzzer: +// +// $ go get -u github.com/dvyukov/go-fuzz/go-fuzz +// $ go get -u github.com/dvyukov/go-fuzz/go-fuzz-build +// $ go-fuzz-build -libfuzzer -func FuzzReduction -o lib.a +// $ clang -fsanitize=fuzzer lib.a -o fu.exe +// $ ./fu.exe +package fp448 + +import ( + "encoding/binary" + "fmt" + "math/big" + + "github.com/cloudflare/circl/internal/conv" +) + +// FuzzReduction is a fuzzer target for red64 function, which reduces t +// (112 bits) to a number t' (56 bits) congruent modulo p448. +func FuzzReduction(data []byte) int { + if len(data) != 2*Size { + return -1 + } + var got, want Elt + var lo, hi [7]uint64 + a := data[:Size] + b := data[Size:] + lo[0] = binary.LittleEndian.Uint64(a[0*8 : 1*8]) + lo[1] = binary.LittleEndian.Uint64(a[1*8 : 2*8]) + lo[2] = binary.LittleEndian.Uint64(a[2*8 : 3*8]) + lo[3] = binary.LittleEndian.Uint64(a[3*8 : 4*8]) + lo[4] = binary.LittleEndian.Uint64(a[4*8 : 5*8]) + lo[5] = binary.LittleEndian.Uint64(a[5*8 : 6*8]) + lo[6] = binary.LittleEndian.Uint64(a[6*8 : 7*8]) + + hi[0] = binary.LittleEndian.Uint64(b[0*8 : 1*8]) + hi[1] = binary.LittleEndian.Uint64(b[1*8 : 2*8]) + hi[2] = binary.LittleEndian.Uint64(b[2*8 : 3*8]) + hi[3] = binary.LittleEndian.Uint64(b[3*8 : 4*8]) + hi[4] = binary.LittleEndian.Uint64(b[4*8 : 5*8]) + hi[5] = binary.LittleEndian.Uint64(b[5*8 : 6*8]) + hi[6] = binary.LittleEndian.Uint64(b[6*8 : 7*8]) + + red64(&got, &lo, &hi) + + t := conv.BytesLe2BigInt(data[:2*Size]) + + two448 := big.NewInt(1) + two448.Lsh(two448, 448) // 2^448 + mask448 := big.NewInt(1) + mask448.Sub(two448, mask448) // 2^448-1 + two224plus1 := big.NewInt(1) + two224plus1.Lsh(two224plus1, 224) + two224plus1.Add(two224plus1, big.NewInt(1)) // 2^224+1 + + var loBig, hiBig big.Int + for t.Cmp(two448) >= 0 { + loBig.And(t, mask448) + hiBig.Rsh(t, 448) + t.Mul(&hiBig, two224plus1) + t.Add(t, &loBig) + } + conv.BigInt2BytesLe(want[:], t) + + if got != want { + fmt.Printf("in: %v\n", conv.BytesLe2BigInt(data[:2*Size])) + fmt.Printf("got: %v\n", got) + fmt.Printf("want: %v\n", want) + panic("error found") + } + return 1 +} diff --git a/vendor/github.com/cloudflare/circl/math/mlsbset/mlsbset.go b/vendor/github.com/cloudflare/circl/math/mlsbset/mlsbset.go new file mode 100644 index 000000000..a43851b8b --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/mlsbset/mlsbset.go @@ -0,0 +1,122 @@ +// Package mlsbset provides a constant-time exponentiation method with precomputation. +// +// References: "Efficient and secure algorithms for GLV-based scalar +// multiplication and their implementation on GLV–GLS curves" by (Faz-Hernandez et al.) +// - https://doi.org/10.1007/s13389-014-0085-7 +// - https://eprint.iacr.org/2013/158 +package mlsbset + +import ( + "errors" + "fmt" + "math/big" + + "github.com/cloudflare/circl/internal/conv" +) + +// EltG is a group element. +type EltG interface{} + +// EltP is a precomputed group element. +type EltP interface{} + +// Group defines the operations required by MLSBSet exponentiation method. +type Group interface { + Identity() EltG // Returns the identity of the group. + Sqr(x EltG) // Calculates x = x^2. + Mul(x EltG, y EltP) // Calculates x = x*y. + NewEltP() EltP // Returns an arbitrary precomputed element. + ExtendedEltP() EltP // Returns the precomputed element x^(2^(w*d)). + Lookup(a EltP, v uint, s, u int32) // Sets a = s*T[v][u]. +} + +// Params contains the parameters of the encoding. +type Params struct { + T uint // T is the maximum size (in bits) of exponents. + V uint // V is the number of tables. + W uint // W is the window size. + E uint // E is the number of digits per table. + D uint // D is the number of digits in total. + L uint // L is the length of the code. +} + +// Encoder allows to convert integers into valid powers. +type Encoder struct{ p Params } + +// New produces an encoder of the MLSBSet algorithm. +func New(t, v, w uint) (Encoder, error) { + if !(t > 1 && v >= 1 && w >= 2) { + return Encoder{}, errors.New("t>1, v>=1, w>=2") + } + e := (t + w*v - 1) / (w * v) + d := e * v + l := d * w + return Encoder{Params{t, v, w, e, d, l}}, nil +} + +// Encode converts an odd integer k into a valid power for exponentiation. +func (m Encoder) Encode(k []byte) (*Power, error) { + if len(k) == 0 { + return nil, errors.New("empty slice") + } + if !(len(k) <= int(m.p.L+7)>>3) { + return nil, errors.New("k too big") + } + if k[0]%2 == 0 { + return nil, errors.New("k must be odd") + } + ap := int((m.p.L+7)/8) - len(k) + k = append(k, make([]byte, ap)...) + s := m.signs(k) + b := make([]int32, m.p.L-m.p.D) + c := conv.BytesLe2BigInt(k) + c.Rsh(c, m.p.D) + var bi big.Int + for i := m.p.D; i < m.p.L; i++ { + c0 := int32(c.Bit(0)) + b[i-m.p.D] = s[i%m.p.D] * c0 + bi.SetInt64(int64(b[i-m.p.D] >> 1)) + c.Rsh(c, 1) + c.Sub(c, &bi) + } + carry := int(c.Int64()) + return &Power{m, s, b, carry}, nil +} + +// signs calculates the set of signs. +func (m Encoder) signs(k []byte) []int32 { + s := make([]int32, m.p.D) + s[m.p.D-1] = 1 + for i := uint(1); i < m.p.D; i++ { + ki := int32((k[i>>3] >> (i & 0x7)) & 0x1) + s[i-1] = 2*ki - 1 + } + return s +} + +// GetParams returns the complementary parameters of the encoding. +func (m Encoder) GetParams() Params { return m.p } + +// tableSize returns the size of each table. +func (m Encoder) tableSize() uint { return 1 << (m.p.W - 1) } + +// Elts returns the total number of elements that must be precomputed. +func (m Encoder) Elts() uint { return m.p.V * m.tableSize() } + +// IsExtended returns true if the element x^(2^(wd)) must be calculated. +func (m Encoder) IsExtended() bool { q := m.p.T / (m.p.V * m.p.W); return m.p.T == q*m.p.V*m.p.W } + +// Ops returns the number of squares and multiplications executed during an exponentiation. +func (m Encoder) Ops() (S uint, M uint) { + S = m.p.E + M = m.p.E * m.p.V + if m.IsExtended() { + M++ + } + return +} + +func (m Encoder) String() string { + return fmt.Sprintf("T: %v W: %v V: %v e: %v d: %v l: %v wv|t: %v", + m.p.T, m.p.W, m.p.V, m.p.E, m.p.D, m.p.L, m.IsExtended()) +} diff --git a/vendor/github.com/cloudflare/circl/math/mlsbset/power.go b/vendor/github.com/cloudflare/circl/math/mlsbset/power.go new file mode 100644 index 000000000..3f214c304 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/mlsbset/power.go @@ -0,0 +1,64 @@ +package mlsbset + +import "fmt" + +// Power is a valid exponent produced by the MLSBSet encoding algorithm. +type Power struct { + set Encoder // parameters of code. + s []int32 // set of signs. + b []int32 // set of digits. + c int // carry is {0,1}. +} + +// Exp is calculates x^k, where x is a predetermined element of a group G. +func (p *Power) Exp(G Group) EltG { + a, b := G.Identity(), G.NewEltP() + for e := int(p.set.p.E - 1); e >= 0; e-- { + G.Sqr(a) + for v := uint(0); v < p.set.p.V; v++ { + sgnElt, idElt := p.Digit(v, uint(e)) + G.Lookup(b, v, sgnElt, idElt) + G.Mul(a, b) + } + } + if p.set.IsExtended() && p.c == 1 { + G.Mul(a, G.ExtendedEltP()) + } + return a +} + +// Digit returns the (v,e)-th digit and its sign. +func (p *Power) Digit(v, e uint) (sgn, dig int32) { + sgn = p.bit(0, v, e) + dig = 0 + for i := p.set.p.W - 1; i > 0; i-- { + dig = 2*dig + p.bit(i, v, e) + } + mask := dig >> 31 + dig = (dig + mask) ^ mask + return sgn, dig +} + +// bit returns the (w,v,e)-th bit of the code. +func (p *Power) bit(w, v, e uint) int32 { + if !(w < p.set.p.W && + v < p.set.p.V && + e < p.set.p.E) { + panic(fmt.Errorf("indexes outside (%v,%v,%v)", w, v, e)) + } + if w == 0 { + return p.s[p.set.p.E*v+e] + } + return p.b[p.set.p.D*(w-1)+p.set.p.E*v+e] +} + +func (p *Power) String() string { + dig := "" + for j := uint(0); j < p.set.p.V; j++ { + for i := uint(0); i < p.set.p.E; i++ { + s, d := p.Digit(j, i) + dig += fmt.Sprintf("(%2v,%2v) = %+2v %+2v\n", j, i, s, d) + } + } + return fmt.Sprintf("len: %v\ncarry: %v\ndigits:\n%v", len(p.b)+len(p.s), p.c, dig) +} diff --git a/vendor/github.com/cloudflare/circl/math/primes.go b/vendor/github.com/cloudflare/circl/math/primes.go new file mode 100644 index 000000000..158fd83a7 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/primes.go @@ -0,0 +1,34 @@ +package math + +import ( + "crypto/rand" + "io" + "math/big" +) + +// IsSafePrime reports whether p is (probably) a safe prime. +// The prime p=2*q+1 is safe prime if both p and q are primes. +// Note that ProbablyPrime is not suitable for judging primes +// that an adversary may have crafted to fool the test. +func IsSafePrime(p *big.Int) bool { + pdiv2 := new(big.Int).Rsh(p, 1) + return p.ProbablyPrime(20) && pdiv2.ProbablyPrime(20) +} + +// SafePrime returns a number of the given bit length that is a safe prime with high probability. +// The number returned p=2*q+1 is a safe prime if both p and q are primes. +// SafePrime will return error for any error returned by rand.Read or if bits < 2. +func SafePrime(random io.Reader, bits int) (*big.Int, error) { + one := big.NewInt(1) + p := new(big.Int) + for { + q, err := rand.Prime(random, bits-1) + if err != nil { + return nil, err + } + p.Lsh(q, 1).Add(p, one) + if p.ProbablyPrime(20) { + return p, nil + } + } +} diff --git a/vendor/github.com/cloudflare/circl/math/wnaf.go b/vendor/github.com/cloudflare/circl/math/wnaf.go new file mode 100644 index 000000000..94a1ec504 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/wnaf.go @@ -0,0 +1,84 @@ +// Package math provides some utility functions for big integers. +package math + +import "math/big" + +// SignedDigit obtains the signed-digit recoding of n and returns a list L of +// digits such that n = sum( L[i]*2^(i*(w-1)) ), and each L[i] is an odd number +// in the set {±1, ±3, ..., ±2^(w-1)-1}. The third parameter ensures that the +// output has ceil(l/(w-1)) digits. +// +// Restrictions: +// - n is odd and n > 0. +// - 1 < w < 32. +// - l >= bit length of n. +// +// References: +// - Alg.6 in "Exponent Recoding and Regular Exponentiation Algorithms" +// by Joye-Tunstall. http://doi.org/10.1007/978-3-642-02384-2_21 +// - Alg.6 in "Selecting Elliptic Curves for Cryptography: An Efficiency and +// Security Analysis" by Bos et al. http://doi.org/10.1007/s13389-015-0097-y +func SignedDigit(n *big.Int, w, l uint) []int32 { + if n.Sign() <= 0 || n.Bit(0) == 0 { + panic("n must be non-zero, odd, and positive") + } + if w <= 1 || w >= 32 { + panic("Verify that 1 < w < 32") + } + if uint(n.BitLen()) > l { + panic("n is too big to fit in l digits") + } + lenN := (l + (w - 1) - 1) / (w - 1) // ceil(l/(w-1)) + L := make([]int32, lenN+1) + var k, v big.Int + k.Set(n) + + var i uint + for i = 0; i < lenN; i++ { + words := k.Bits() + value := int32(words[0] & ((1 << w) - 1)) + value -= int32(1) << (w - 1) + L[i] = value + v.SetInt64(int64(value)) + k.Sub(&k, &v) + k.Rsh(&k, w-1) + } + L[i] = int32(k.Int64()) + return L +} + +// OmegaNAF obtains the window-w Non-Adjacent Form of a positive number n and +// 1 < w < 32. The returned slice L holds n = sum( L[i]*2^i ). +// +// Reference: +// - Alg.9 "Efficient arithmetic on Koblitz curves" by Solinas. +// http://doi.org/10.1023/A:1008306223194 +func OmegaNAF(n *big.Int, w uint) (L []int32) { + if n.Sign() < 0 { + panic("n must be positive") + } + if w <= 1 || w >= 32 { + panic("Verify that 1 < w < 32") + } + + L = make([]int32, n.BitLen()+1) + var k, v big.Int + k.Set(n) + + i := 0 + for ; k.Sign() > 0; i++ { + value := int32(0) + if k.Bit(0) == 1 { + words := k.Bits() + value = int32(words[0] & ((1 << w) - 1)) + if value >= (int32(1) << (w - 1)) { + value -= int32(1) << w + } + v.SetInt64(int64(value)) + k.Sub(&k, &v) + } + L[i] = value + k.Rsh(&k, 1) + } + return L[:i] +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go b/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go new file mode 100644 index 000000000..2c73c26fb --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go @@ -0,0 +1,453 @@ +// Package ed25519 implements Ed25519 signature scheme as described in RFC-8032. +// +// This package provides optimized implementations of the three signature +// variants and maintaining closer compatibility with crypto/ed25519. +// +// | Scheme Name | Sign Function | Verification | Context | +// |-------------|-------------------|---------------|-------------------| +// | Ed25519 | Sign | Verify | None | +// | Ed25519Ph | SignPh | VerifyPh | Yes, can be empty | +// | Ed25519Ctx | SignWithCtx | VerifyWithCtx | Yes, non-empty | +// | All above | (PrivateKey).Sign | VerifyAny | As above | +// +// Specific functions for sign and verify are defined. A generic signing +// function for all schemes is available through the crypto.Signer interface, +// which is implemented by the PrivateKey type. A correspond all-in-one +// verification method is provided by the VerifyAny function. +// +// Signing with Ed25519Ph or Ed25519Ctx requires a context string for domain +// separation. This parameter is passed using a SignerOptions struct defined +// in this package. While Ed25519Ph accepts an empty context, Ed25519Ctx +// enforces non-empty context strings. +// +// # Compatibility with crypto.ed25519 +// +// These functions are compatible with the “Ed25519” function defined in +// RFC-8032. However, unlike RFC 8032's formulation, this package's private +// key representation includes a public key suffix to make multiple signing +// operations with the same key more efficient. This package refers to the +// RFC-8032 private key as the “seed”. +// +// References +// +// - RFC-8032: https://rfc-editor.org/rfc/rfc8032.txt +// - Ed25519: https://ed25519.cr.yp.to/ +// - EdDSA: High-speed high-security signatures. https://doi.org/10.1007/s13389-012-0027-1 +package ed25519 + +import ( + "bytes" + "crypto" + cryptoRand "crypto/rand" + "crypto/sha512" + "crypto/subtle" + "errors" + "fmt" + "io" + "strconv" + + "github.com/cloudflare/circl/sign" +) + +const ( + // ContextMaxSize is the maximum length (in bytes) allowed for context. + ContextMaxSize = 255 + // PublicKeySize is the size, in bytes, of public keys as used in this package. + PublicKeySize = 32 + // PrivateKeySize is the size, in bytes, of private keys as used in this package. + PrivateKeySize = 64 + // SignatureSize is the size, in bytes, of signatures generated and verified by this package. + SignatureSize = 64 + // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. + SeedSize = 32 +) + +const ( + paramB = 256 / 8 // Size of keys in bytes. +) + +// SignerOptions implements crypto.SignerOpts and augments with parameters +// that are specific to the Ed25519 signature schemes. +type SignerOptions struct { + // Hash must be crypto.Hash(0) for Ed25519/Ed25519ctx, or crypto.SHA512 + // for Ed25519ph. + crypto.Hash + + // Context is an optional domain separation string for Ed25519ph and a + // must for Ed25519ctx. Its length must be less or equal than 255 bytes. + Context string + + // Scheme is an identifier for choosing a signature scheme. The zero value + // is ED25519. + Scheme SchemeID +} + +// SchemeID is an identifier for each signature scheme. +type SchemeID uint + +const ( + ED25519 SchemeID = iota + ED25519Ph + ED25519Ctx +) + +// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. +type PrivateKey []byte + +// Equal reports whether priv and x have the same value. +func (priv PrivateKey) Equal(x crypto.PrivateKey) bool { + xx, ok := x.(PrivateKey) + return ok && subtle.ConstantTimeCompare(priv, xx) == 1 +} + +// Public returns the PublicKey corresponding to priv. +func (priv PrivateKey) Public() crypto.PublicKey { + publicKey := make(PublicKey, PublicKeySize) + copy(publicKey, priv[SeedSize:]) + return publicKey +} + +// Seed returns the private key seed corresponding to priv. It is provided for +// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds +// in this package. +func (priv PrivateKey) Seed() []byte { + seed := make([]byte, SeedSize) + copy(seed, priv[:SeedSize]) + return seed +} + +func (priv PrivateKey) Scheme() sign.Scheme { return sch } + +func (pub PublicKey) Scheme() sign.Scheme { return sch } + +func (priv PrivateKey) MarshalBinary() (data []byte, err error) { + privateKey := make(PrivateKey, PrivateKeySize) + copy(privateKey, priv) + return privateKey, nil +} + +func (pub PublicKey) MarshalBinary() (data []byte, err error) { + publicKey := make(PublicKey, PublicKeySize) + copy(publicKey, pub) + return publicKey, nil +} + +// Equal reports whether pub and x have the same value. +func (pub PublicKey) Equal(x crypto.PublicKey) bool { + xx, ok := x.(PublicKey) + return ok && bytes.Equal(pub, xx) +} + +// Sign creates a signature of a message with priv key. +// This function is compatible with crypto.ed25519 and also supports the +// three signature variants defined in RFC-8032, namely Ed25519 (or pure +// EdDSA), Ed25519Ph, and Ed25519Ctx. +// The opts.HashFunc() must return zero to specify either Ed25519 or Ed25519Ctx +// variant. This can be achieved by passing crypto.Hash(0) as the value for +// opts. +// The opts.HashFunc() must return SHA512 to specify the Ed25519Ph variant. +// This can be achieved by passing crypto.SHA512 as the value for opts. +// Use a SignerOptions struct (defined in this package) to pass a context +// string for signing. +func (priv PrivateKey) Sign( + rand io.Reader, + message []byte, + opts crypto.SignerOpts, +) (signature []byte, err error) { + var ctx string + var scheme SchemeID + if o, ok := opts.(SignerOptions); ok { + ctx = o.Context + scheme = o.Scheme + } + + switch true { + case scheme == ED25519 && opts.HashFunc() == crypto.Hash(0): + return Sign(priv, message), nil + case scheme == ED25519Ph && opts.HashFunc() == crypto.SHA512: + return SignPh(priv, message, ctx), nil + case scheme == ED25519Ctx && opts.HashFunc() == crypto.Hash(0) && len(ctx) > 0: + return SignWithCtx(priv, message, ctx), nil + default: + return nil, errors.New("ed25519: bad hash algorithm") + } +} + +// GenerateKey generates a public/private key pair using entropy from rand. +// If rand is nil, crypto/rand.Reader will be used. +func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { + if rand == nil { + rand = cryptoRand.Reader + } + + seed := make([]byte, SeedSize) + if _, err := io.ReadFull(rand, seed); err != nil { + return nil, nil, err + } + + privateKey := NewKeyFromSeed(seed) + publicKey := make(PublicKey, PublicKeySize) + copy(publicKey, privateKey[SeedSize:]) + + return publicKey, privateKey, nil +} + +// NewKeyFromSeed calculates a private key from a seed. It will panic if +// len(seed) is not SeedSize. This function is provided for interoperability +// with RFC 8032. RFC 8032's private keys correspond to seeds in this +// package. +func NewKeyFromSeed(seed []byte) PrivateKey { + privateKey := make(PrivateKey, PrivateKeySize) + newKeyFromSeed(privateKey, seed) + return privateKey +} + +func newKeyFromSeed(privateKey, seed []byte) { + if l := len(seed); l != SeedSize { + panic("ed25519: bad seed length: " + strconv.Itoa(l)) + } + var P pointR1 + k := sha512.Sum512(seed) + clamp(k[:]) + reduceModOrder(k[:paramB], false) + P.fixedMult(k[:paramB]) + copy(privateKey[:SeedSize], seed) + _ = P.ToBytes(privateKey[SeedSize:]) +} + +func signAll(signature []byte, privateKey PrivateKey, message, ctx []byte, preHash bool) { + if l := len(privateKey); l != PrivateKeySize { + panic("ed25519: bad private key length: " + strconv.Itoa(l)) + } + + H := sha512.New() + var PHM []byte + + if preHash { + _, _ = H.Write(message) + PHM = H.Sum(nil) + H.Reset() + } else { + PHM = message + } + + // 1. Hash the 32-byte private key using SHA-512. + _, _ = H.Write(privateKey[:SeedSize]) + h := H.Sum(nil) + clamp(h[:]) + prefix, s := h[paramB:], h[:paramB] + + // 2. Compute SHA-512(dom2(F, C) || prefix || PH(M)) + H.Reset() + + writeDom(H, ctx, preHash) + + _, _ = H.Write(prefix) + _, _ = H.Write(PHM) + r := H.Sum(nil) + reduceModOrder(r[:], true) + + // 3. Compute the point [r]B. + var P pointR1 + P.fixedMult(r[:paramB]) + R := (&[paramB]byte{})[:] + if err := P.ToBytes(R); err != nil { + panic(err) + } + + // 4. Compute SHA512(dom2(F, C) || R || A || PH(M)). + H.Reset() + + writeDom(H, ctx, preHash) + + _, _ = H.Write(R) + _, _ = H.Write(privateKey[SeedSize:]) + _, _ = H.Write(PHM) + hRAM := H.Sum(nil) + + reduceModOrder(hRAM[:], true) + + // 5. Compute S = (r + k * s) mod order. + S := (&[paramB]byte{})[:] + calculateS(S, r[:paramB], hRAM[:paramB], s) + + // 6. The signature is the concatenation of R and S. + copy(signature[:paramB], R[:]) + copy(signature[paramB:], S[:]) +} + +// Sign signs the message with privateKey and returns a signature. +// This function supports the signature variant defined in RFC-8032: Ed25519, +// also known as the pure version of EdDSA. +// It will panic if len(privateKey) is not PrivateKeySize. +func Sign(privateKey PrivateKey, message []byte) []byte { + signature := make([]byte, SignatureSize) + signAll(signature, privateKey, message, []byte(""), false) + return signature +} + +// SignPh creates a signature of a message with private key and context. +// This function supports the signature variant defined in RFC-8032: Ed25519ph, +// meaning it internally hashes the message using SHA-512, and optionally +// accepts a context string. +// It will panic if len(privateKey) is not PrivateKeySize. +// Context could be passed to this function, which length should be no more than +// ContextMaxSize=255. It can be empty. +func SignPh(privateKey PrivateKey, message []byte, ctx string) []byte { + if len(ctx) > ContextMaxSize { + panic(fmt.Errorf("ed25519: bad context length: %v", len(ctx))) + } + + signature := make([]byte, SignatureSize) + signAll(signature, privateKey, message, []byte(ctx), true) + return signature +} + +// SignWithCtx creates a signature of a message with private key and context. +// This function supports the signature variant defined in RFC-8032: Ed25519ctx, +// meaning it accepts a non-empty context string. +// It will panic if len(privateKey) is not PrivateKeySize. +// Context must be passed to this function, which length should be no more than +// ContextMaxSize=255 and cannot be empty. +func SignWithCtx(privateKey PrivateKey, message []byte, ctx string) []byte { + if len(ctx) == 0 || len(ctx) > ContextMaxSize { + panic(fmt.Errorf("ed25519: bad context length: %v > %v", len(ctx), ContextMaxSize)) + } + + signature := make([]byte, SignatureSize) + signAll(signature, privateKey, message, []byte(ctx), false) + return signature +} + +func verify(public PublicKey, message, signature, ctx []byte, preHash bool) bool { + if len(public) != PublicKeySize || + len(signature) != SignatureSize || + !isLessThanOrder(signature[paramB:]) { + return false + } + + var P pointR1 + if ok := P.FromBytes(public); !ok { + return false + } + + H := sha512.New() + var PHM []byte + + if preHash { + _, _ = H.Write(message) + PHM = H.Sum(nil) + H.Reset() + } else { + PHM = message + } + + R := signature[:paramB] + + writeDom(H, ctx, preHash) + + _, _ = H.Write(R) + _, _ = H.Write(public) + _, _ = H.Write(PHM) + hRAM := H.Sum(nil) + reduceModOrder(hRAM[:], true) + + var Q pointR1 + encR := (&[paramB]byte{})[:] + P.neg() + Q.doubleMult(&P, signature[paramB:], hRAM[:paramB]) + _ = Q.ToBytes(encR) + return bytes.Equal(R, encR) +} + +// VerifyAny returns true if the signature is valid. Failure cases are invalid +// signature, or when the public key cannot be decoded. +// This function supports all the three signature variants defined in RFC-8032, +// namely Ed25519 (or pure EdDSA), Ed25519Ph, and Ed25519Ctx. +// The opts.HashFunc() must return zero to specify either Ed25519 or Ed25519Ctx +// variant. This can be achieved by passing crypto.Hash(0) as the value for opts. +// The opts.HashFunc() must return SHA512 to specify the Ed25519Ph variant. +// This can be achieved by passing crypto.SHA512 as the value for opts. +// Use a SignerOptions struct to pass a context string for signing. +func VerifyAny(public PublicKey, message, signature []byte, opts crypto.SignerOpts) bool { + var ctx string + var scheme SchemeID + if o, ok := opts.(SignerOptions); ok { + ctx = o.Context + scheme = o.Scheme + } + + switch true { + case scheme == ED25519 && opts.HashFunc() == crypto.Hash(0): + return Verify(public, message, signature) + case scheme == ED25519Ph && opts.HashFunc() == crypto.SHA512: + return VerifyPh(public, message, signature, ctx) + case scheme == ED25519Ctx && opts.HashFunc() == crypto.Hash(0) && len(ctx) > 0: + return VerifyWithCtx(public, message, signature, ctx) + default: + return false + } +} + +// Verify returns true if the signature is valid. Failure cases are invalid +// signature, or when the public key cannot be decoded. +// This function supports the signature variant defined in RFC-8032: Ed25519, +// also known as the pure version of EdDSA. +func Verify(public PublicKey, message, signature []byte) bool { + return verify(public, message, signature, []byte(""), false) +} + +// VerifyPh returns true if the signature is valid. Failure cases are invalid +// signature, or when the public key cannot be decoded. +// This function supports the signature variant defined in RFC-8032: Ed25519ph, +// meaning it internally hashes the message using SHA-512. +// Context could be passed to this function, which length should be no more than +// 255. It can be empty. +func VerifyPh(public PublicKey, message, signature []byte, ctx string) bool { + return verify(public, message, signature, []byte(ctx), true) +} + +// VerifyWithCtx returns true if the signature is valid. Failure cases are invalid +// signature, or when the public key cannot be decoded, or when context is +// not provided. +// This function supports the signature variant defined in RFC-8032: Ed25519ctx, +// meaning it does not handle prehashed messages. Non-empty context string must be +// provided, and must not be more than 255 of length. +func VerifyWithCtx(public PublicKey, message, signature []byte, ctx string) bool { + if len(ctx) == 0 || len(ctx) > ContextMaxSize { + return false + } + + return verify(public, message, signature, []byte(ctx), false) +} + +func clamp(k []byte) { + k[0] &= 248 + k[paramB-1] = (k[paramB-1] & 127) | 64 +} + +// isLessThanOrder returns true if 0 <= x < order. +func isLessThanOrder(x []byte) bool { + i := len(order) - 1 + for i > 0 && x[i] == order[i] { + i-- + } + return x[i] < order[i] +} + +func writeDom(h io.Writer, ctx []byte, preHash bool) { + dom2 := "SigEd25519 no Ed25519 collisions" + + if len(ctx) > 0 { + _, _ = h.Write([]byte(dom2)) + if preHash { + _, _ = h.Write([]byte{byte(0x01), byte(len(ctx))}) + } else { + _, _ = h.Write([]byte{byte(0x00), byte(len(ctx))}) + } + _, _ = h.Write(ctx) + } else if preHash { + _, _ = h.Write([]byte(dom2)) + _, _ = h.Write([]byte{0x01, 0x00}) + } +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/modular.go b/vendor/github.com/cloudflare/circl/sign/ed25519/modular.go new file mode 100644 index 000000000..10efafdca --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/modular.go @@ -0,0 +1,175 @@ +package ed25519 + +import ( + "encoding/binary" + "math/bits" +) + +var order = [paramB]byte{ + 0xed, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58, + 0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9, 0xde, 0x14, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, +} + +// isLessThan returns true if 0 <= x < y, and assumes that slices have the same length. +func isLessThan(x, y []byte) bool { + i := len(x) - 1 + for i > 0 && x[i] == y[i] { + i-- + } + return x[i] < y[i] +} + +// reduceModOrder calculates k = k mod order of the curve. +func reduceModOrder(k []byte, is512Bit bool) { + var X [((2 * paramB) * 8) / 64]uint64 + numWords := len(k) >> 3 + for i := 0; i < numWords; i++ { + X[i] = binary.LittleEndian.Uint64(k[i*8 : (i+1)*8]) + } + red512(&X, is512Bit) + for i := 0; i < numWords; i++ { + binary.LittleEndian.PutUint64(k[i*8:(i+1)*8], X[i]) + } +} + +// red512 calculates x = x mod Order of the curve. +func red512(x *[8]uint64, full bool) { + // Implementation of Algs.(14.47)+(14.52) of Handbook of Applied + // Cryptography, by A. Menezes, P. van Oorschot, and S. Vanstone. + const ( + ell0 = uint64(0x5812631a5cf5d3ed) + ell1 = uint64(0x14def9dea2f79cd6) + ell160 = uint64(0x812631a5cf5d3ed0) + ell161 = uint64(0x4def9dea2f79cd65) + ell162 = uint64(0x0000000000000001) + ) + + var c0, c1, c2, c3 uint64 + r0, r1, r2, r3, r4 := x[0], x[1], x[2], x[3], uint64(0) + + if full { + q0, q1, q2, q3 := x[4], x[5], x[6], x[7] + + for i := 0; i < 3; i++ { + h0, s0 := bits.Mul64(q0, ell160) + h1, s1 := bits.Mul64(q1, ell160) + h2, s2 := bits.Mul64(q2, ell160) + h3, s3 := bits.Mul64(q3, ell160) + + s1, c0 = bits.Add64(h0, s1, 0) + s2, c1 = bits.Add64(h1, s2, c0) + s3, c2 = bits.Add64(h2, s3, c1) + s4, _ := bits.Add64(h3, 0, c2) + + h0, l0 := bits.Mul64(q0, ell161) + h1, l1 := bits.Mul64(q1, ell161) + h2, l2 := bits.Mul64(q2, ell161) + h3, l3 := bits.Mul64(q3, ell161) + + l1, c0 = bits.Add64(h0, l1, 0) + l2, c1 = bits.Add64(h1, l2, c0) + l3, c2 = bits.Add64(h2, l3, c1) + l4, _ := bits.Add64(h3, 0, c2) + + s1, c0 = bits.Add64(s1, l0, 0) + s2, c1 = bits.Add64(s2, l1, c0) + s3, c2 = bits.Add64(s3, l2, c1) + s4, c3 = bits.Add64(s4, l3, c2) + s5, s6 := bits.Add64(l4, 0, c3) + + s2, c0 = bits.Add64(s2, q0, 0) + s3, c1 = bits.Add64(s3, q1, c0) + s4, c2 = bits.Add64(s4, q2, c1) + s5, c3 = bits.Add64(s5, q3, c2) + s6, s7 := bits.Add64(s6, 0, c3) + + q := q0 | q1 | q2 | q3 + m := -((q | -q) >> 63) // if q=0 then m=0...0 else m=1..1 + s0 &= m + s1 &= m + s2 &= m + s3 &= m + q0, q1, q2, q3 = s4, s5, s6, s7 + + if (i+1)%2 == 0 { + r0, c0 = bits.Add64(r0, s0, 0) + r1, c1 = bits.Add64(r1, s1, c0) + r2, c2 = bits.Add64(r2, s2, c1) + r3, c3 = bits.Add64(r3, s3, c2) + r4, _ = bits.Add64(r4, 0, c3) + } else { + r0, c0 = bits.Sub64(r0, s0, 0) + r1, c1 = bits.Sub64(r1, s1, c0) + r2, c2 = bits.Sub64(r2, s2, c1) + r3, c3 = bits.Sub64(r3, s3, c2) + r4, _ = bits.Sub64(r4, 0, c3) + } + } + + m := -(r4 >> 63) + r0, c0 = bits.Add64(r0, m&ell160, 0) + r1, c1 = bits.Add64(r1, m&ell161, c0) + r2, c2 = bits.Add64(r2, m&ell162, c1) + r3, c3 = bits.Add64(r3, 0, c2) + r4, _ = bits.Add64(r4, m&1, c3) + x[4], x[5], x[6], x[7] = 0, 0, 0, 0 + } + + q0 := (r4 << 4) | (r3 >> 60) + r3 &= (uint64(1) << 60) - 1 + + h0, s0 := bits.Mul64(ell0, q0) + h1, s1 := bits.Mul64(ell1, q0) + s1, c0 = bits.Add64(h0, s1, 0) + s2, _ := bits.Add64(h1, 0, c0) + + r0, c0 = bits.Sub64(r0, s0, 0) + r1, c1 = bits.Sub64(r1, s1, c0) + r2, c2 = bits.Sub64(r2, s2, c1) + r3, _ = bits.Sub64(r3, 0, c2) + + x[0], x[1], x[2], x[3] = r0, r1, r2, r3 +} + +// calculateS performs s = r+k*a mod Order of the curve. +func calculateS(s, r, k, a []byte) { + K := [4]uint64{ + binary.LittleEndian.Uint64(k[0*8 : 1*8]), + binary.LittleEndian.Uint64(k[1*8 : 2*8]), + binary.LittleEndian.Uint64(k[2*8 : 3*8]), + binary.LittleEndian.Uint64(k[3*8 : 4*8]), + } + S := [8]uint64{ + binary.LittleEndian.Uint64(r[0*8 : 1*8]), + binary.LittleEndian.Uint64(r[1*8 : 2*8]), + binary.LittleEndian.Uint64(r[2*8 : 3*8]), + binary.LittleEndian.Uint64(r[3*8 : 4*8]), + } + var c3 uint64 + for i := range K { + ai := binary.LittleEndian.Uint64(a[i*8 : (i+1)*8]) + + h0, l0 := bits.Mul64(K[0], ai) + h1, l1 := bits.Mul64(K[1], ai) + h2, l2 := bits.Mul64(K[2], ai) + h3, l3 := bits.Mul64(K[3], ai) + + l1, c0 := bits.Add64(h0, l1, 0) + l2, c1 := bits.Add64(h1, l2, c0) + l3, c2 := bits.Add64(h2, l3, c1) + l4, _ := bits.Add64(h3, 0, c2) + + S[i+0], c0 = bits.Add64(S[i+0], l0, 0) + S[i+1], c1 = bits.Add64(S[i+1], l1, c0) + S[i+2], c2 = bits.Add64(S[i+2], l2, c1) + S[i+3], c3 = bits.Add64(S[i+3], l3, c2) + S[i+4], _ = bits.Add64(S[i+4], l4, c3) + } + red512(&S, true) + binary.LittleEndian.PutUint64(s[0*8:1*8], S[0]) + binary.LittleEndian.PutUint64(s[1*8:2*8], S[1]) + binary.LittleEndian.PutUint64(s[2*8:3*8], S[2]) + binary.LittleEndian.PutUint64(s[3*8:4*8], S[3]) +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/mult.go b/vendor/github.com/cloudflare/circl/sign/ed25519/mult.go new file mode 100644 index 000000000..3216aae30 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/mult.go @@ -0,0 +1,180 @@ +package ed25519 + +import ( + "crypto/subtle" + "encoding/binary" + "math/bits" + + "github.com/cloudflare/circl/internal/conv" + "github.com/cloudflare/circl/math" + fp "github.com/cloudflare/circl/math/fp25519" +) + +var paramD = fp.Elt{ + 0xa3, 0x78, 0x59, 0x13, 0xca, 0x4d, 0xeb, 0x75, + 0xab, 0xd8, 0x41, 0x41, 0x4d, 0x0a, 0x70, 0x00, + 0x98, 0xe8, 0x79, 0x77, 0x79, 0x40, 0xc7, 0x8c, + 0x73, 0xfe, 0x6f, 0x2b, 0xee, 0x6c, 0x03, 0x52, +} + +// mLSBRecoding parameters. +const ( + fxT = 257 + fxV = 2 + fxW = 3 + fx2w1 = 1 << (uint(fxW) - 1) + numWords64 = (paramB * 8 / 64) +) + +// mLSBRecoding is the odd-only modified LSB-set. +// +// Reference: +// +// "Efficient and secure algorithms for GLV-based scalar multiplication and +// their implementation on GLV–GLS curves" by (Faz-Hernandez et al.) +// http://doi.org/10.1007/s13389-014-0085-7. +func mLSBRecoding(L []int8, k []byte) { + const ee = (fxT + fxW*fxV - 1) / (fxW * fxV) + const dd = ee * fxV + const ll = dd * fxW + if len(L) == (ll + 1) { + var m [numWords64 + 1]uint64 + for i := 0; i < numWords64; i++ { + m[i] = binary.LittleEndian.Uint64(k[8*i : 8*i+8]) + } + condAddOrderN(&m) + L[dd-1] = 1 + for i := 0; i < dd-1; i++ { + kip1 := (m[(i+1)/64] >> (uint(i+1) % 64)) & 0x1 + L[i] = int8(kip1<<1) - 1 + } + { // right-shift by d + right := uint(dd % 64) + left := uint(64) - right + lim := ((numWords64+1)*64 - dd) / 64 + j := dd / 64 + for i := 0; i < lim; i++ { + m[i] = (m[i+j] >> right) | (m[i+j+1] << left) + } + m[lim] = m[lim+j] >> right + } + for i := dd; i < ll; i++ { + L[i] = L[i%dd] * int8(m[0]&0x1) + div2subY(m[:], int64(L[i]>>1), numWords64) + } + L[ll] = int8(m[0]) + } +} + +// absolute returns always a positive value. +func absolute(x int32) int32 { + mask := x >> 31 + return (x + mask) ^ mask +} + +// condAddOrderN updates x = x+order if x is even, otherwise x remains unchanged. +func condAddOrderN(x *[numWords64 + 1]uint64) { + isOdd := (x[0] & 0x1) - 1 + c := uint64(0) + for i := 0; i < numWords64; i++ { + orderWord := binary.LittleEndian.Uint64(order[8*i : 8*i+8]) + o := isOdd & orderWord + x0, c0 := bits.Add64(x[i], o, c) + x[i] = x0 + c = c0 + } + x[numWords64], _ = bits.Add64(x[numWords64], 0, c) +} + +// div2subY update x = (x/2) - y. +func div2subY(x []uint64, y int64, l int) { + s := uint64(y >> 63) + for i := 0; i < l-1; i++ { + x[i] = (x[i] >> 1) | (x[i+1] << 63) + } + x[l-1] = (x[l-1] >> 1) + + b := uint64(0) + x0, b0 := bits.Sub64(x[0], uint64(y), b) + x[0] = x0 + b = b0 + for i := 1; i < l-1; i++ { + x0, b0 := bits.Sub64(x[i], s, b) + x[i] = x0 + b = b0 + } + x[l-1], _ = bits.Sub64(x[l-1], s, b) +} + +func (P *pointR1) fixedMult(scalar []byte) { + if len(scalar) != paramB { + panic("wrong scalar size") + } + const ee = (fxT + fxW*fxV - 1) / (fxW * fxV) + const dd = ee * fxV + const ll = dd * fxW + + L := make([]int8, ll+1) + mLSBRecoding(L[:], scalar) + S := &pointR3{} + P.SetIdentity() + for ii := ee - 1; ii >= 0; ii-- { + P.double() + for j := 0; j < fxV; j++ { + dig := L[fxW*dd-j*ee+ii-ee] + for i := (fxW-1)*dd - j*ee + ii - ee; i >= (2*dd - j*ee + ii - ee); i = i - dd { + dig = 2*dig + L[i] + } + idx := absolute(int32(dig)) + sig := L[dd-j*ee+ii-ee] + Tabj := &tabSign[fxV-j-1] + for k := 0; k < fx2w1; k++ { + S.cmov(&Tabj[k], subtle.ConstantTimeEq(int32(k), idx)) + } + S.cneg(subtle.ConstantTimeEq(int32(sig), -1)) + P.mixAdd(S) + } + } +} + +const ( + omegaFix = 7 + omegaVar = 5 +) + +// doubleMult returns P=mG+nQ. +func (P *pointR1) doubleMult(Q *pointR1, m, n []byte) { + nafFix := math.OmegaNAF(conv.BytesLe2BigInt(m), omegaFix) + nafVar := math.OmegaNAF(conv.BytesLe2BigInt(n), omegaVar) + + if len(nafFix) > len(nafVar) { + nafVar = append(nafVar, make([]int32, len(nafFix)-len(nafVar))...) + } else if len(nafFix) < len(nafVar) { + nafFix = append(nafFix, make([]int32, len(nafVar)-len(nafFix))...) + } + + var TabQ [1 << (omegaVar - 2)]pointR2 + Q.oddMultiples(TabQ[:]) + P.SetIdentity() + for i := len(nafFix) - 1; i >= 0; i-- { + P.double() + // Generator point + if nafFix[i] != 0 { + idxM := absolute(nafFix[i]) >> 1 + R := tabVerif[idxM] + if nafFix[i] < 0 { + R.neg() + } + P.mixAdd(&R) + } + // Variable input point + if nafVar[i] != 0 { + idxN := absolute(nafVar[i]) >> 1 + S := TabQ[idxN] + if nafVar[i] < 0 { + S.neg() + } + P.add(&S) + } + } +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/point.go b/vendor/github.com/cloudflare/circl/sign/ed25519/point.go new file mode 100644 index 000000000..374a69503 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/point.go @@ -0,0 +1,195 @@ +package ed25519 + +import fp "github.com/cloudflare/circl/math/fp25519" + +type ( + pointR1 struct{ x, y, z, ta, tb fp.Elt } + pointR2 struct { + pointR3 + z2 fp.Elt + } +) +type pointR3 struct{ addYX, subYX, dt2 fp.Elt } + +func (P *pointR1) neg() { + fp.Neg(&P.x, &P.x) + fp.Neg(&P.ta, &P.ta) +} + +func (P *pointR1) SetIdentity() { + P.x = fp.Elt{} + fp.SetOne(&P.y) + fp.SetOne(&P.z) + P.ta = fp.Elt{} + P.tb = fp.Elt{} +} + +func (P *pointR1) toAffine() { + fp.Inv(&P.z, &P.z) + fp.Mul(&P.x, &P.x, &P.z) + fp.Mul(&P.y, &P.y, &P.z) + fp.Modp(&P.x) + fp.Modp(&P.y) + fp.SetOne(&P.z) + P.ta = P.x + P.tb = P.y +} + +func (P *pointR1) ToBytes(k []byte) error { + P.toAffine() + var x [fp.Size]byte + err := fp.ToBytes(k[:fp.Size], &P.y) + if err != nil { + return err + } + err = fp.ToBytes(x[:], &P.x) + if err != nil { + return err + } + b := x[0] & 1 + k[paramB-1] = k[paramB-1] | (b << 7) + return nil +} + +func (P *pointR1) FromBytes(k []byte) bool { + if len(k) != paramB { + panic("wrong size") + } + signX := k[paramB-1] >> 7 + copy(P.y[:], k[:fp.Size]) + P.y[fp.Size-1] &= 0x7F + p := fp.P() + if !isLessThan(P.y[:], p[:]) { + return false + } + + one, u, v := &fp.Elt{}, &fp.Elt{}, &fp.Elt{} + fp.SetOne(one) + fp.Sqr(u, &P.y) // u = y^2 + fp.Mul(v, u, ¶mD) // v = dy^2 + fp.Sub(u, u, one) // u = y^2-1 + fp.Add(v, v, one) // v = dy^2+1 + isQR := fp.InvSqrt(&P.x, u, v) // x = sqrt(u/v) + if !isQR { + return false + } + fp.Modp(&P.x) // x = x mod p + if fp.IsZero(&P.x) && signX == 1 { + return false + } + if signX != (P.x[0] & 1) { + fp.Neg(&P.x, &P.x) + } + P.ta = P.x + P.tb = P.y + fp.SetOne(&P.z) + return true +} + +// double calculates 2P for curves with A=-1. +func (P *pointR1) double() { + Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb + a, b, c, e, f, g, h := Px, Py, Pz, Pta, Px, Py, Ptb + fp.Add(e, Px, Py) // x+y + fp.Sqr(a, Px) // A = x^2 + fp.Sqr(b, Py) // B = y^2 + fp.Sqr(c, Pz) // z^2 + fp.Add(c, c, c) // C = 2*z^2 + fp.Add(h, a, b) // H = A+B + fp.Sqr(e, e) // (x+y)^2 + fp.Sub(e, e, h) // E = (x+y)^2-A-B + fp.Sub(g, b, a) // G = B-A + fp.Sub(f, c, g) // F = C-G + fp.Mul(Pz, f, g) // Z = F * G + fp.Mul(Px, e, f) // X = E * F + fp.Mul(Py, g, h) // Y = G * H, T = E * H +} + +func (P *pointR1) mixAdd(Q *pointR3) { + fp.Add(&P.z, &P.z, &P.z) // D = 2*z1 + P.coreAddition(Q) +} + +func (P *pointR1) add(Q *pointR2) { + fp.Mul(&P.z, &P.z, &Q.z2) // D = 2*z1*z2 + P.coreAddition(&Q.pointR3) +} + +// coreAddition calculates P=P+Q for curves with A=-1. +func (P *pointR1) coreAddition(Q *pointR3) { + Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb + addYX2, subYX2, dt2 := &Q.addYX, &Q.subYX, &Q.dt2 + a, b, c, d, e, f, g, h := Px, Py, &fp.Elt{}, Pz, Pta, Px, Py, Ptb + fp.Mul(c, Pta, Ptb) // t1 = ta*tb + fp.Sub(h, Py, Px) // y1-x1 + fp.Add(b, Py, Px) // y1+x1 + fp.Mul(a, h, subYX2) // A = (y1-x1)*(y2-x2) + fp.Mul(b, b, addYX2) // B = (y1+x1)*(y2+x2) + fp.Mul(c, c, dt2) // C = 2*D*t1*t2 + fp.Sub(e, b, a) // E = B-A + fp.Add(h, b, a) // H = B+A + fp.Sub(f, d, c) // F = D-C + fp.Add(g, d, c) // G = D+C + fp.Mul(Pz, f, g) // Z = F * G + fp.Mul(Px, e, f) // X = E * F + fp.Mul(Py, g, h) // Y = G * H, T = E * H +} + +func (P *pointR1) oddMultiples(T []pointR2) { + var R pointR2 + n := len(T) + T[0].fromR1(P) + _2P := *P + _2P.double() + R.fromR1(&_2P) + for i := 1; i < n; i++ { + P.add(&R) + T[i].fromR1(P) + } +} + +func (P *pointR1) isEqual(Q *pointR1) bool { + l, r := &fp.Elt{}, &fp.Elt{} + fp.Mul(l, &P.x, &Q.z) + fp.Mul(r, &Q.x, &P.z) + fp.Sub(l, l, r) + b := fp.IsZero(l) + fp.Mul(l, &P.y, &Q.z) + fp.Mul(r, &Q.y, &P.z) + fp.Sub(l, l, r) + b = b && fp.IsZero(l) + fp.Mul(l, &P.ta, &P.tb) + fp.Mul(l, l, &Q.z) + fp.Mul(r, &Q.ta, &Q.tb) + fp.Mul(r, r, &P.z) + fp.Sub(l, l, r) + b = b && fp.IsZero(l) + return b +} + +func (P *pointR3) neg() { + P.addYX, P.subYX = P.subYX, P.addYX + fp.Neg(&P.dt2, &P.dt2) +} + +func (P *pointR2) fromR1(Q *pointR1) { + fp.Add(&P.addYX, &Q.y, &Q.x) + fp.Sub(&P.subYX, &Q.y, &Q.x) + fp.Mul(&P.dt2, &Q.ta, &Q.tb) + fp.Mul(&P.dt2, &P.dt2, ¶mD) + fp.Add(&P.dt2, &P.dt2, &P.dt2) + fp.Add(&P.z2, &Q.z, &Q.z) +} + +func (P *pointR3) cneg(b int) { + t := &fp.Elt{} + fp.Cswap(&P.addYX, &P.subYX, uint(b)) + fp.Neg(t, &P.dt2) + fp.Cmov(&P.dt2, t, uint(b)) +} + +func (P *pointR3) cmov(Q *pointR3, b int) { + fp.Cmov(&P.addYX, &Q.addYX, uint(b)) + fp.Cmov(&P.subYX, &Q.subYX, uint(b)) + fp.Cmov(&P.dt2, &Q.dt2, uint(b)) +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey.go b/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey.go new file mode 100644 index 000000000..c3505b67a --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey.go @@ -0,0 +1,9 @@ +//go:build go1.13 +// +build go1.13 + +package ed25519 + +import cryptoEd25519 "crypto/ed25519" + +// PublicKey is the type of Ed25519 public keys. +type PublicKey cryptoEd25519.PublicKey diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey112.go b/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey112.go new file mode 100644 index 000000000..d57d86eff --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey112.go @@ -0,0 +1,7 @@ +//go:build !go1.13 +// +build !go1.13 + +package ed25519 + +// PublicKey is the type of Ed25519 public keys. +type PublicKey []byte diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/signapi.go b/vendor/github.com/cloudflare/circl/sign/ed25519/signapi.go new file mode 100644 index 000000000..e4520f520 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/signapi.go @@ -0,0 +1,87 @@ +package ed25519 + +import ( + "crypto/rand" + "encoding/asn1" + + "github.com/cloudflare/circl/sign" +) + +var sch sign.Scheme = &scheme{} + +// Scheme returns a signature interface. +func Scheme() sign.Scheme { return sch } + +type scheme struct{} + +func (*scheme) Name() string { return "Ed25519" } +func (*scheme) PublicKeySize() int { return PublicKeySize } +func (*scheme) PrivateKeySize() int { return PrivateKeySize } +func (*scheme) SignatureSize() int { return SignatureSize } +func (*scheme) SeedSize() int { return SeedSize } +func (*scheme) TLSIdentifier() uint { return 0x0807 } +func (*scheme) SupportsContext() bool { return false } +func (*scheme) Oid() asn1.ObjectIdentifier { + return asn1.ObjectIdentifier{1, 3, 101, 112} +} + +func (*scheme) GenerateKey() (sign.PublicKey, sign.PrivateKey, error) { + return GenerateKey(rand.Reader) +} + +func (*scheme) Sign( + sk sign.PrivateKey, + message []byte, + opts *sign.SignatureOpts, +) []byte { + priv, ok := sk.(PrivateKey) + if !ok { + panic(sign.ErrTypeMismatch) + } + if opts != nil && opts.Context != "" { + panic(sign.ErrContextNotSupported) + } + return Sign(priv, message) +} + +func (*scheme) Verify( + pk sign.PublicKey, + message, signature []byte, + opts *sign.SignatureOpts, +) bool { + pub, ok := pk.(PublicKey) + if !ok { + panic(sign.ErrTypeMismatch) + } + if opts != nil { + if opts.Context != "" { + panic(sign.ErrContextNotSupported) + } + } + return Verify(pub, message, signature) +} + +func (*scheme) DeriveKey(seed []byte) (sign.PublicKey, sign.PrivateKey) { + privateKey := NewKeyFromSeed(seed) + publicKey := make(PublicKey, PublicKeySize) + copy(publicKey, privateKey[SeedSize:]) + return publicKey, privateKey +} + +func (*scheme) UnmarshalBinaryPublicKey(buf []byte) (sign.PublicKey, error) { + if len(buf) < PublicKeySize { + return nil, sign.ErrPubKeySize + } + pub := make(PublicKey, PublicKeySize) + copy(pub, buf[:PublicKeySize]) + return pub, nil +} + +func (*scheme) UnmarshalBinaryPrivateKey(buf []byte) (sign.PrivateKey, error) { + if len(buf) < PrivateKeySize { + return nil, sign.ErrPrivKeySize + } + priv := make(PrivateKey, PrivateKeySize) + copy(priv, buf[:PrivateKeySize]) + return priv, nil +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/tables.go b/vendor/github.com/cloudflare/circl/sign/ed25519/tables.go new file mode 100644 index 000000000..8763b426f --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/tables.go @@ -0,0 +1,213 @@ +package ed25519 + +import fp "github.com/cloudflare/circl/math/fp25519" + +var tabSign = [fxV][fx2w1]pointR3{ + { + pointR3{ + addYX: fp.Elt{0x85, 0x3b, 0x8c, 0xf5, 0xc6, 0x93, 0xbc, 0x2f, 0x19, 0x0e, 0x8c, 0xfb, 0xc6, 0x2d, 0x93, 0xcf, 0xc2, 0x42, 0x3d, 0x64, 0x98, 0x48, 0x0b, 0x27, 0x65, 0xba, 0xd4, 0x33, 0x3a, 0x9d, 0xcf, 0x07}, + subYX: fp.Elt{0x3e, 0x91, 0x40, 0xd7, 0x05, 0x39, 0x10, 0x9d, 0xb3, 0xbe, 0x40, 0xd1, 0x05, 0x9f, 0x39, 0xfd, 0x09, 0x8a, 0x8f, 0x68, 0x34, 0x84, 0xc1, 0xa5, 0x67, 0x12, 0xf8, 0x98, 0x92, 0x2f, 0xfd, 0x44}, + dt2: fp.Elt{0x68, 0xaa, 0x7a, 0x87, 0x05, 0x12, 0xc9, 0xab, 0x9e, 0xc4, 0xaa, 0xcc, 0x23, 0xe8, 0xd9, 0x26, 0x8c, 0x59, 0x43, 0xdd, 0xcb, 0x7d, 0x1b, 0x5a, 0xa8, 0x65, 0x0c, 0x9f, 0x68, 0x7b, 0x11, 0x6f}, + }, + { + addYX: fp.Elt{0x7c, 0xb0, 0x9e, 0xe6, 0xc5, 0xbf, 0xfa, 0x13, 0x8e, 0x0d, 0x22, 0xde, 0xc8, 0xd1, 0xce, 0x52, 0x02, 0xd5, 0x62, 0x31, 0x71, 0x0e, 0x8e, 0x9d, 0xb0, 0xd6, 0x00, 0xa5, 0x5a, 0x0e, 0xce, 0x72}, + subYX: fp.Elt{0x1a, 0x8e, 0x5c, 0xdc, 0xa4, 0xb3, 0x6c, 0x51, 0x18, 0xa0, 0x09, 0x80, 0x9a, 0x46, 0x33, 0xd5, 0xe0, 0x3c, 0x4d, 0x3b, 0xfc, 0x49, 0xa2, 0x43, 0x29, 0xe1, 0x29, 0xa9, 0x93, 0xea, 0x7c, 0x35}, + dt2: fp.Elt{0x08, 0x46, 0x6f, 0x68, 0x7f, 0x0b, 0x7c, 0x9e, 0xad, 0xba, 0x07, 0x61, 0x74, 0x83, 0x2f, 0xfc, 0x26, 0xd6, 0x09, 0xb9, 0x00, 0x34, 0x36, 0x4f, 0x01, 0xf3, 0x48, 0xdb, 0x43, 0xba, 0x04, 0x44}, + }, + { + addYX: fp.Elt{0x4c, 0xda, 0x0d, 0x13, 0x66, 0xfd, 0x82, 0x84, 0x9f, 0x75, 0x5b, 0xa2, 0x17, 0xfe, 0x34, 0xbf, 0x1f, 0xcb, 0xba, 0x90, 0x55, 0x80, 0x83, 0xfd, 0x63, 0xb9, 0x18, 0xf8, 0x5b, 0x5d, 0x94, 0x1e}, + subYX: fp.Elt{0xb9, 0xdb, 0x6c, 0x04, 0x88, 0x22, 0xd8, 0x79, 0x83, 0x2f, 0x8d, 0x65, 0x6b, 0xd2, 0xab, 0x1b, 0xdd, 0x65, 0xe5, 0x93, 0x63, 0xf8, 0xa2, 0xd8, 0x3c, 0xf1, 0x4b, 0xc5, 0x99, 0xd1, 0xf2, 0x12}, + dt2: fp.Elt{0x05, 0x4c, 0xb8, 0x3b, 0xfe, 0xf5, 0x9f, 0x2e, 0xd1, 0xb2, 0xb8, 0xff, 0xfe, 0x6d, 0xd9, 0x37, 0xe0, 0xae, 0xb4, 0x5a, 0x51, 0x80, 0x7e, 0x9b, 0x1d, 0xd1, 0x8d, 0x8c, 0x56, 0xb1, 0x84, 0x35}, + }, + { + addYX: fp.Elt{0x39, 0x71, 0x43, 0x34, 0xe3, 0x42, 0x45, 0xa1, 0xf2, 0x68, 0x71, 0xa7, 0xe8, 0x23, 0xfd, 0x9f, 0x86, 0x48, 0xff, 0xe5, 0x96, 0x74, 0xcf, 0x05, 0x49, 0xe2, 0xb3, 0x6c, 0x17, 0x77, 0x2f, 0x6d}, + subYX: fp.Elt{0x73, 0x3f, 0xc1, 0xc7, 0x6a, 0x66, 0xa1, 0x20, 0xdd, 0x11, 0xfb, 0x7a, 0x6e, 0xa8, 0x51, 0xb8, 0x3f, 0x9d, 0xa2, 0x97, 0x84, 0xb5, 0xc7, 0x90, 0x7c, 0xab, 0x48, 0xd6, 0x84, 0xa3, 0xd5, 0x1a}, + dt2: fp.Elt{0x63, 0x27, 0x3c, 0x49, 0x4b, 0xfc, 0x22, 0xf2, 0x0b, 0x50, 0xc2, 0x0f, 0xb4, 0x1f, 0x31, 0x0c, 0x2f, 0x53, 0xab, 0xaa, 0x75, 0x6f, 0xe0, 0x69, 0x39, 0x56, 0xe0, 0x3b, 0xb7, 0xa8, 0xbf, 0x45}, + }, + }, + { + { + addYX: fp.Elt{0x00, 0x45, 0xd9, 0x0d, 0x58, 0x03, 0xfc, 0x29, 0x93, 0xec, 0xbb, 0x6f, 0xa4, 0x7a, 0xd2, 0xec, 0xf8, 0xa7, 0xe2, 0xc2, 0x5f, 0x15, 0x0a, 0x13, 0xd5, 0xa1, 0x06, 0xb7, 0x1a, 0x15, 0x6b, 0x41}, + subYX: fp.Elt{0x85, 0x8c, 0xb2, 0x17, 0xd6, 0x3b, 0x0a, 0xd3, 0xea, 0x3b, 0x77, 0x39, 0xb7, 0x77, 0xd3, 0xc5, 0xbf, 0x5c, 0x6a, 0x1e, 0x8c, 0xe7, 0xc6, 0xc6, 0xc4, 0xb7, 0x2a, 0x8b, 0xf7, 0xb8, 0x61, 0x0d}, + dt2: fp.Elt{0xb0, 0x36, 0xc1, 0xe9, 0xef, 0xd7, 0xa8, 0x56, 0x20, 0x4b, 0xe4, 0x58, 0xcd, 0xe5, 0x07, 0xbd, 0xab, 0xe0, 0x57, 0x1b, 0xda, 0x2f, 0xe6, 0xaf, 0xd2, 0xe8, 0x77, 0x42, 0xf7, 0x2a, 0x1a, 0x19}, + }, + { + addYX: fp.Elt{0x6a, 0x6d, 0x6d, 0xd1, 0xfa, 0xf5, 0x03, 0x30, 0xbd, 0x6d, 0xc2, 0xc8, 0xf5, 0x38, 0x80, 0x4f, 0xb2, 0xbe, 0xa1, 0x76, 0x50, 0x1a, 0x73, 0xf2, 0x78, 0x2b, 0x8e, 0x3a, 0x1e, 0x34, 0x47, 0x7b}, + subYX: fp.Elt{0xc3, 0x2c, 0x36, 0xdc, 0xc5, 0x45, 0xbc, 0xef, 0x1b, 0x64, 0xd6, 0x65, 0x28, 0xe9, 0xda, 0x84, 0x13, 0xbe, 0x27, 0x8e, 0x3f, 0x98, 0x2a, 0x37, 0xee, 0x78, 0x97, 0xd6, 0xc0, 0x6f, 0xb4, 0x53}, + dt2: fp.Elt{0x58, 0x5d, 0xa7, 0xa3, 0x68, 0xbb, 0x20, 0x30, 0x2e, 0x03, 0xe9, 0xb1, 0xd4, 0x90, 0x72, 0xe3, 0x71, 0xb2, 0x36, 0x3e, 0x73, 0xa0, 0x2e, 0x3d, 0xd1, 0x85, 0x33, 0x62, 0x4e, 0xa7, 0x7b, 0x31}, + }, + { + addYX: fp.Elt{0xbf, 0xc4, 0x38, 0x53, 0xfb, 0x68, 0xa9, 0x77, 0xce, 0x55, 0xf9, 0x05, 0xcb, 0xeb, 0xfb, 0x8c, 0x46, 0xc2, 0x32, 0x7c, 0xf0, 0xdb, 0xd7, 0x2c, 0x62, 0x8e, 0xdd, 0x54, 0x75, 0xcf, 0x3f, 0x33}, + subYX: fp.Elt{0x49, 0x50, 0x1f, 0x4e, 0x6e, 0x55, 0x55, 0xde, 0x8c, 0x4e, 0x77, 0x96, 0x38, 0x3b, 0xfe, 0xb6, 0x43, 0x3c, 0x86, 0x69, 0xc2, 0x72, 0x66, 0x1f, 0x6b, 0xf9, 0x87, 0xbc, 0x4f, 0x37, 0x3e, 0x3c}, + dt2: fp.Elt{0xd2, 0x2f, 0x06, 0x6b, 0x08, 0x07, 0x69, 0x77, 0xc0, 0x94, 0xcc, 0xae, 0x43, 0x00, 0x59, 0x6e, 0xa3, 0x63, 0xa8, 0xdd, 0xfa, 0x24, 0x18, 0xd0, 0x35, 0xc7, 0x78, 0xf7, 0x0d, 0xd4, 0x5a, 0x1e}, + }, + { + addYX: fp.Elt{0x45, 0xc1, 0x17, 0x51, 0xf8, 0xed, 0x7e, 0xc7, 0xa9, 0x1a, 0x11, 0x6e, 0x2d, 0xef, 0x0b, 0xd5, 0x3f, 0x98, 0xb0, 0xa3, 0x9d, 0x65, 0xf1, 0xcd, 0x53, 0x4a, 0x8a, 0x18, 0x70, 0x0a, 0x7f, 0x23}, + subYX: fp.Elt{0xdd, 0xef, 0xbe, 0x3a, 0x31, 0xe0, 0xbc, 0xbe, 0x6d, 0x5d, 0x79, 0x87, 0xd6, 0xbe, 0x68, 0xe3, 0x59, 0x76, 0x8c, 0x86, 0x0e, 0x7a, 0x92, 0x13, 0x14, 0x8f, 0x67, 0xb3, 0xcb, 0x1a, 0x76, 0x76}, + dt2: fp.Elt{0x56, 0x7a, 0x1c, 0x9d, 0xca, 0x96, 0xf9, 0xf9, 0x03, 0x21, 0xd4, 0xe8, 0xb3, 0xd5, 0xe9, 0x52, 0xc8, 0x54, 0x1e, 0x1b, 0x13, 0xb6, 0xfd, 0x47, 0x7d, 0x02, 0x32, 0x33, 0x27, 0xe2, 0x1f, 0x19}, + }, + }, +} + +var tabVerif = [1 << (omegaFix - 2)]pointR3{ + { /* 1P */ + addYX: fp.Elt{0x85, 0x3b, 0x8c, 0xf5, 0xc6, 0x93, 0xbc, 0x2f, 0x19, 0x0e, 0x8c, 0xfb, 0xc6, 0x2d, 0x93, 0xcf, 0xc2, 0x42, 0x3d, 0x64, 0x98, 0x48, 0x0b, 0x27, 0x65, 0xba, 0xd4, 0x33, 0x3a, 0x9d, 0xcf, 0x07}, + subYX: fp.Elt{0x3e, 0x91, 0x40, 0xd7, 0x05, 0x39, 0x10, 0x9d, 0xb3, 0xbe, 0x40, 0xd1, 0x05, 0x9f, 0x39, 0xfd, 0x09, 0x8a, 0x8f, 0x68, 0x34, 0x84, 0xc1, 0xa5, 0x67, 0x12, 0xf8, 0x98, 0x92, 0x2f, 0xfd, 0x44}, + dt2: fp.Elt{0x68, 0xaa, 0x7a, 0x87, 0x05, 0x12, 0xc9, 0xab, 0x9e, 0xc4, 0xaa, 0xcc, 0x23, 0xe8, 0xd9, 0x26, 0x8c, 0x59, 0x43, 0xdd, 0xcb, 0x7d, 0x1b, 0x5a, 0xa8, 0x65, 0x0c, 0x9f, 0x68, 0x7b, 0x11, 0x6f}, + }, + { /* 3P */ + addYX: fp.Elt{0x30, 0x97, 0xee, 0x4c, 0xa8, 0xb0, 0x25, 0xaf, 0x8a, 0x4b, 0x86, 0xe8, 0x30, 0x84, 0x5a, 0x02, 0x32, 0x67, 0x01, 0x9f, 0x02, 0x50, 0x1b, 0xc1, 0xf4, 0xf8, 0x80, 0x9a, 0x1b, 0x4e, 0x16, 0x7a}, + subYX: fp.Elt{0x65, 0xd2, 0xfc, 0xa4, 0xe8, 0x1f, 0x61, 0x56, 0x7d, 0xba, 0xc1, 0xe5, 0xfd, 0x53, 0xd3, 0x3b, 0xbd, 0xd6, 0x4b, 0x21, 0x1a, 0xf3, 0x31, 0x81, 0x62, 0xda, 0x5b, 0x55, 0x87, 0x15, 0xb9, 0x2a}, + dt2: fp.Elt{0x89, 0xd8, 0xd0, 0x0d, 0x3f, 0x93, 0xae, 0x14, 0x62, 0xda, 0x35, 0x1c, 0x22, 0x23, 0x94, 0x58, 0x4c, 0xdb, 0xf2, 0x8c, 0x45, 0xe5, 0x70, 0xd1, 0xc6, 0xb4, 0xb9, 0x12, 0xaf, 0x26, 0x28, 0x5a}, + }, + { /* 5P */ + addYX: fp.Elt{0x33, 0xbb, 0xa5, 0x08, 0x44, 0xbc, 0x12, 0xa2, 0x02, 0xed, 0x5e, 0xc7, 0xc3, 0x48, 0x50, 0x8d, 0x44, 0xec, 0xbf, 0x5a, 0x0c, 0xeb, 0x1b, 0xdd, 0xeb, 0x06, 0xe2, 0x46, 0xf1, 0xcc, 0x45, 0x29}, + subYX: fp.Elt{0xba, 0xd6, 0x47, 0xa4, 0xc3, 0x82, 0x91, 0x7f, 0xb7, 0x29, 0x27, 0x4b, 0xd1, 0x14, 0x00, 0xd5, 0x87, 0xa0, 0x64, 0xb8, 0x1c, 0xf1, 0x3c, 0xe3, 0xf3, 0x55, 0x1b, 0xeb, 0x73, 0x7e, 0x4a, 0x15}, + dt2: fp.Elt{0x85, 0x82, 0x2a, 0x81, 0xf1, 0xdb, 0xbb, 0xbc, 0xfc, 0xd1, 0xbd, 0xd0, 0x07, 0x08, 0x0e, 0x27, 0x2d, 0xa7, 0xbd, 0x1b, 0x0b, 0x67, 0x1b, 0xb4, 0x9a, 0xb6, 0x3b, 0x6b, 0x69, 0xbe, 0xaa, 0x43}, + }, + { /* 7P */ + addYX: fp.Elt{0xbf, 0xa3, 0x4e, 0x94, 0xd0, 0x5c, 0x1a, 0x6b, 0xd2, 0xc0, 0x9d, 0xb3, 0x3a, 0x35, 0x70, 0x74, 0x49, 0x2e, 0x54, 0x28, 0x82, 0x52, 0xb2, 0x71, 0x7e, 0x92, 0x3c, 0x28, 0x69, 0xea, 0x1b, 0x46}, + subYX: fp.Elt{0xb1, 0x21, 0x32, 0xaa, 0x9a, 0x2c, 0x6f, 0xba, 0xa7, 0x23, 0xba, 0x3b, 0x53, 0x21, 0xa0, 0x6c, 0x3a, 0x2c, 0x19, 0x92, 0x4f, 0x76, 0xea, 0x9d, 0xe0, 0x17, 0x53, 0x2e, 0x5d, 0xdd, 0x6e, 0x1d}, + dt2: fp.Elt{0xa2, 0xb3, 0xb8, 0x01, 0xc8, 0x6d, 0x83, 0xf1, 0x9a, 0xa4, 0x3e, 0x05, 0x47, 0x5f, 0x03, 0xb3, 0xf3, 0xad, 0x77, 0x58, 0xba, 0x41, 0x9c, 0x52, 0xa7, 0x90, 0x0f, 0x6a, 0x1c, 0xbb, 0x9f, 0x7a}, + }, + { /* 9P */ + addYX: fp.Elt{0x2f, 0x63, 0xa8, 0xa6, 0x8a, 0x67, 0x2e, 0x9b, 0xc5, 0x46, 0xbc, 0x51, 0x6f, 0x9e, 0x50, 0xa6, 0xb5, 0xf5, 0x86, 0xc6, 0xc9, 0x33, 0xb2, 0xce, 0x59, 0x7f, 0xdd, 0x8a, 0x33, 0xed, 0xb9, 0x34}, + subYX: fp.Elt{0x64, 0x80, 0x9d, 0x03, 0x7e, 0x21, 0x6e, 0xf3, 0x9b, 0x41, 0x20, 0xf5, 0xb6, 0x81, 0xa0, 0x98, 0x44, 0xb0, 0x5e, 0xe7, 0x08, 0xc6, 0xcb, 0x96, 0x8f, 0x9c, 0xdc, 0xfa, 0x51, 0x5a, 0xc0, 0x49}, + dt2: fp.Elt{0x1b, 0xaf, 0x45, 0x90, 0xbf, 0xe8, 0xb4, 0x06, 0x2f, 0xd2, 0x19, 0xa7, 0xe8, 0x83, 0xff, 0xe2, 0x16, 0xcf, 0xd4, 0x93, 0x29, 0xfc, 0xf6, 0xaa, 0x06, 0x8b, 0x00, 0x1b, 0x02, 0x72, 0xc1, 0x73}, + }, + { /* 11P */ + addYX: fp.Elt{0xde, 0x2a, 0x80, 0x8a, 0x84, 0x00, 0xbf, 0x2f, 0x27, 0x2e, 0x30, 0x02, 0xcf, 0xfe, 0xd9, 0xe5, 0x06, 0x34, 0x70, 0x17, 0x71, 0x84, 0x3e, 0x11, 0xaf, 0x8f, 0x6d, 0x54, 0xe2, 0xaa, 0x75, 0x42}, + subYX: fp.Elt{0x48, 0x43, 0x86, 0x49, 0x02, 0x5b, 0x5f, 0x31, 0x81, 0x83, 0x08, 0x77, 0x69, 0xb3, 0xd6, 0x3e, 0x95, 0xeb, 0x8d, 0x6a, 0x55, 0x75, 0xa0, 0xa3, 0x7f, 0xc7, 0xd5, 0x29, 0x80, 0x59, 0xab, 0x18}, + dt2: fp.Elt{0xe9, 0x89, 0x60, 0xfd, 0xc5, 0x2c, 0x2b, 0xd8, 0xa4, 0xe4, 0x82, 0x32, 0xa1, 0xb4, 0x1e, 0x03, 0x22, 0x86, 0x1a, 0xb5, 0x99, 0x11, 0x31, 0x44, 0x48, 0xf9, 0x3d, 0xb5, 0x22, 0x55, 0xc6, 0x3d}, + }, + { /* 13P */ + addYX: fp.Elt{0x6d, 0x7f, 0x00, 0xa2, 0x22, 0xc2, 0x70, 0xbf, 0xdb, 0xde, 0xbc, 0xb5, 0x9a, 0xb3, 0x84, 0xbf, 0x07, 0xba, 0x07, 0xfb, 0x12, 0x0e, 0x7a, 0x53, 0x41, 0xf2, 0x46, 0xc3, 0xee, 0xd7, 0x4f, 0x23}, + subYX: fp.Elt{0x93, 0xbf, 0x7f, 0x32, 0x3b, 0x01, 0x6f, 0x50, 0x6b, 0x6f, 0x77, 0x9b, 0xc9, 0xeb, 0xfc, 0xae, 0x68, 0x59, 0xad, 0xaa, 0x32, 0xb2, 0x12, 0x9d, 0xa7, 0x24, 0x60, 0x17, 0x2d, 0x88, 0x67, 0x02}, + dt2: fp.Elt{0x78, 0xa3, 0x2e, 0x73, 0x19, 0xa1, 0x60, 0x53, 0x71, 0xd4, 0x8d, 0xdf, 0xb1, 0xe6, 0x37, 0x24, 0x33, 0xe5, 0xa7, 0x91, 0xf8, 0x37, 0xef, 0xa2, 0x63, 0x78, 0x09, 0xaa, 0xfd, 0xa6, 0x7b, 0x49}, + }, + { /* 15P */ + addYX: fp.Elt{0xa0, 0xea, 0xcf, 0x13, 0x03, 0xcc, 0xce, 0x24, 0x6d, 0x24, 0x9c, 0x18, 0x8d, 0xc2, 0x48, 0x86, 0xd0, 0xd4, 0xf2, 0xc1, 0xfa, 0xbd, 0xbd, 0x2d, 0x2b, 0xe7, 0x2d, 0xf1, 0x17, 0x29, 0xe2, 0x61}, + subYX: fp.Elt{0x0b, 0xcf, 0x8c, 0x46, 0x86, 0xcd, 0x0b, 0x04, 0xd6, 0x10, 0x99, 0x2a, 0xa4, 0x9b, 0x82, 0xd3, 0x92, 0x51, 0xb2, 0x07, 0x08, 0x30, 0x08, 0x75, 0xbf, 0x5e, 0xd0, 0x18, 0x42, 0xcd, 0xb5, 0x43}, + dt2: fp.Elt{0x16, 0xb5, 0xd0, 0x9b, 0x2f, 0x76, 0x9a, 0x5d, 0xee, 0xde, 0x3f, 0x37, 0x4e, 0xaf, 0x38, 0xeb, 0x70, 0x42, 0xd6, 0x93, 0x7d, 0x5a, 0x2e, 0x03, 0x42, 0xd8, 0xe4, 0x0a, 0x21, 0x61, 0x1d, 0x51}, + }, + { /* 17P */ + addYX: fp.Elt{0x81, 0x9d, 0x0e, 0x95, 0xef, 0x76, 0xc6, 0x92, 0x4f, 0x04, 0xd7, 0xc0, 0xcd, 0x20, 0x46, 0xa5, 0x48, 0x12, 0x8f, 0x6f, 0x64, 0x36, 0x9b, 0xaa, 0xe3, 0x55, 0xb8, 0xdd, 0x24, 0x59, 0x32, 0x6d}, + subYX: fp.Elt{0x87, 0xde, 0x20, 0x44, 0x48, 0x86, 0x13, 0x08, 0xb4, 0xed, 0x92, 0xb5, 0x16, 0xf0, 0x1c, 0x8a, 0x25, 0x2d, 0x94, 0x29, 0x27, 0x4e, 0xfa, 0x39, 0x10, 0x28, 0x48, 0xe2, 0x6f, 0xfe, 0xa7, 0x71}, + dt2: fp.Elt{0x54, 0xc8, 0xc8, 0xa5, 0xb8, 0x82, 0x71, 0x6c, 0x03, 0x2a, 0x5f, 0xfe, 0x79, 0x14, 0xfd, 0x33, 0x0c, 0x8d, 0x77, 0x83, 0x18, 0x59, 0xcf, 0x72, 0xa9, 0xea, 0x9e, 0x55, 0xb6, 0xc4, 0x46, 0x47}, + }, + { /* 19P */ + addYX: fp.Elt{0x2b, 0x9a, 0xc6, 0x6d, 0x3c, 0x7b, 0x77, 0xd3, 0x17, 0xf6, 0x89, 0x6f, 0x27, 0xb2, 0xfa, 0xde, 0xb5, 0x16, 0x3a, 0xb5, 0xf7, 0x1c, 0x65, 0x45, 0xb7, 0x9f, 0xfe, 0x34, 0xde, 0x51, 0x9a, 0x5c}, + subYX: fp.Elt{0x47, 0x11, 0x74, 0x64, 0xc8, 0x46, 0x85, 0x34, 0x49, 0xc8, 0xfc, 0x0e, 0xdd, 0xae, 0x35, 0x7d, 0x32, 0xa3, 0x72, 0x06, 0x76, 0x9a, 0x93, 0xff, 0xd6, 0xe6, 0xb5, 0x7d, 0x49, 0x63, 0x96, 0x21}, + dt2: fp.Elt{0x67, 0x0e, 0xf1, 0x79, 0xcf, 0xf1, 0x10, 0xf5, 0x5b, 0x51, 0x58, 0xe6, 0xa1, 0xda, 0xdd, 0xff, 0x77, 0x22, 0x14, 0x10, 0x17, 0xa7, 0xc3, 0x09, 0xbb, 0x23, 0x82, 0x60, 0x3c, 0x50, 0x04, 0x48}, + }, + { /* 21P */ + addYX: fp.Elt{0xc7, 0x7f, 0xa3, 0x2c, 0xd0, 0x9e, 0x24, 0xc4, 0xab, 0xac, 0x15, 0xa6, 0xe3, 0xa0, 0x59, 0xa0, 0x23, 0x0e, 0x6e, 0xc9, 0xd7, 0x6e, 0xa9, 0x88, 0x6d, 0x69, 0x50, 0x16, 0xa5, 0x98, 0x33, 0x55}, + subYX: fp.Elt{0x75, 0xd1, 0x36, 0x3a, 0xd2, 0x21, 0x68, 0x3b, 0x32, 0x9e, 0x9b, 0xe9, 0xa7, 0x0a, 0xb4, 0xbb, 0x47, 0x8a, 0x83, 0x20, 0xe4, 0x5c, 0x9e, 0x5d, 0x5e, 0x4c, 0xde, 0x58, 0x88, 0x09, 0x1e, 0x77}, + dt2: fp.Elt{0xdf, 0x1e, 0x45, 0x78, 0xd2, 0xf5, 0x12, 0x9a, 0xcb, 0x9c, 0x89, 0x85, 0x79, 0x5d, 0xda, 0x3a, 0x08, 0x95, 0xa5, 0x9f, 0x2d, 0x4a, 0x7f, 0x47, 0x11, 0xa6, 0xf5, 0x8f, 0xd6, 0xd1, 0x5e, 0x5a}, + }, + { /* 23P */ + addYX: fp.Elt{0x83, 0x0e, 0x15, 0xfe, 0x2a, 0x12, 0x95, 0x11, 0xd8, 0x35, 0x4b, 0x7e, 0x25, 0x9a, 0x20, 0xcf, 0x20, 0x1e, 0x71, 0x1e, 0x29, 0xf8, 0x87, 0x73, 0xf0, 0x92, 0xbf, 0xd8, 0x97, 0xb8, 0xac, 0x44}, + subYX: fp.Elt{0x59, 0x73, 0x52, 0x58, 0xc5, 0xe0, 0xe5, 0xba, 0x7e, 0x9d, 0xdb, 0xca, 0x19, 0x5c, 0x2e, 0x39, 0xe9, 0xab, 0x1c, 0xda, 0x1e, 0x3c, 0x65, 0x28, 0x44, 0xdc, 0xef, 0x5f, 0x13, 0x60, 0x9b, 0x01}, + dt2: fp.Elt{0x83, 0x4b, 0x13, 0x5e, 0x14, 0x68, 0x60, 0x1e, 0x16, 0x4c, 0x30, 0x24, 0x4f, 0xe6, 0xf5, 0xc4, 0xd7, 0x3e, 0x1a, 0xfc, 0xa8, 0x88, 0x6e, 0x50, 0x92, 0x2f, 0xad, 0xe6, 0xfd, 0x49, 0x0c, 0x15}, + }, + { /* 25P */ + addYX: fp.Elt{0x38, 0x11, 0x47, 0x09, 0x95, 0xf2, 0x7b, 0x8e, 0x51, 0xa6, 0x75, 0x4f, 0x39, 0xef, 0x6f, 0x5d, 0xad, 0x08, 0xa7, 0x25, 0xc4, 0x79, 0xaf, 0x10, 0x22, 0x99, 0xb9, 0x5b, 0x07, 0x5a, 0x2b, 0x6b}, + subYX: fp.Elt{0x68, 0xa8, 0xdc, 0x9c, 0x3c, 0x86, 0x49, 0xb8, 0xd0, 0x4a, 0x71, 0xb8, 0xdb, 0x44, 0x3f, 0xc8, 0x8d, 0x16, 0x36, 0x0c, 0x56, 0xe3, 0x3e, 0xfe, 0xc1, 0xfb, 0x05, 0x1e, 0x79, 0xd7, 0xa6, 0x78}, + dt2: fp.Elt{0x76, 0xb9, 0xa0, 0x47, 0x4b, 0x70, 0xbf, 0x58, 0xd5, 0x48, 0x17, 0x74, 0x55, 0xb3, 0x01, 0xa6, 0x90, 0xf5, 0x42, 0xd5, 0xb1, 0x1f, 0x2b, 0xaa, 0x00, 0x5d, 0xd5, 0x4a, 0xfc, 0x7f, 0x5c, 0x72}, + }, + { /* 27P */ + addYX: fp.Elt{0xb2, 0x99, 0xcf, 0xd1, 0x15, 0x67, 0x42, 0xe4, 0x34, 0x0d, 0xa2, 0x02, 0x11, 0xd5, 0x52, 0x73, 0x9f, 0x10, 0x12, 0x8b, 0x7b, 0x15, 0xd1, 0x23, 0xa3, 0xf3, 0xb1, 0x7c, 0x27, 0xc9, 0x4c, 0x79}, + subYX: fp.Elt{0xc0, 0x98, 0xd0, 0x1c, 0xf7, 0x2b, 0x80, 0x91, 0x66, 0x63, 0x5e, 0xed, 0xa4, 0x6c, 0x41, 0xfe, 0x4c, 0x99, 0x02, 0x49, 0x71, 0x5d, 0x58, 0xdf, 0xe7, 0xfa, 0x55, 0xf8, 0x25, 0x46, 0xd5, 0x4c}, + dt2: fp.Elt{0x53, 0x50, 0xac, 0xc2, 0x26, 0xc4, 0xf6, 0x4a, 0x58, 0x72, 0xf6, 0x32, 0xad, 0xed, 0x9a, 0xbc, 0x21, 0x10, 0x31, 0x0a, 0xf1, 0x32, 0xd0, 0x2a, 0x85, 0x8e, 0xcc, 0x6f, 0x7b, 0x35, 0x08, 0x70}, + }, + { /* 29P */ + addYX: fp.Elt{0x01, 0x3f, 0x77, 0x38, 0x27, 0x67, 0x88, 0x0b, 0xfb, 0xcc, 0xfb, 0x95, 0xfa, 0xc8, 0xcc, 0xb8, 0xb6, 0x29, 0xad, 0xb9, 0xa3, 0xd5, 0x2d, 0x8d, 0x6a, 0x0f, 0xad, 0x51, 0x98, 0x7e, 0xef, 0x06}, + subYX: fp.Elt{0x34, 0x4a, 0x58, 0x82, 0xbb, 0x9f, 0x1b, 0xd0, 0x2b, 0x79, 0xb4, 0xd2, 0x63, 0x64, 0xab, 0x47, 0x02, 0x62, 0x53, 0x48, 0x9c, 0x63, 0x31, 0xb6, 0x28, 0xd4, 0xd6, 0x69, 0x36, 0x2a, 0xa9, 0x13}, + dt2: fp.Elt{0xe5, 0x7d, 0x57, 0xc0, 0x1c, 0x77, 0x93, 0xca, 0x5c, 0xdc, 0x35, 0x50, 0x1e, 0xe4, 0x40, 0x75, 0x71, 0xe0, 0x02, 0xd8, 0x01, 0x0f, 0x68, 0x24, 0x6a, 0xf8, 0x2a, 0x8a, 0xdf, 0x6d, 0x29, 0x3c}, + }, + { /* 31P */ + addYX: fp.Elt{0x13, 0xa7, 0x14, 0xd9, 0xf9, 0x15, 0xad, 0xae, 0x12, 0xf9, 0x8f, 0x8c, 0xf9, 0x7b, 0x2f, 0xa9, 0x30, 0xd7, 0x53, 0x9f, 0x17, 0x23, 0xf8, 0xaf, 0xba, 0x77, 0x0c, 0x49, 0x93, 0xd3, 0x99, 0x7a}, + subYX: fp.Elt{0x41, 0x25, 0x1f, 0xbb, 0x2e, 0x4d, 0xeb, 0xfc, 0x1f, 0xb9, 0xad, 0x40, 0xc7, 0x10, 0x95, 0xb8, 0x05, 0xad, 0xa1, 0xd0, 0x7d, 0xa3, 0x71, 0xfc, 0x7b, 0x71, 0x47, 0x07, 0x70, 0x2c, 0x89, 0x0a}, + dt2: fp.Elt{0xe8, 0xa3, 0xbd, 0x36, 0x24, 0xed, 0x52, 0x8f, 0x94, 0x07, 0xe8, 0x57, 0x41, 0xc8, 0xa8, 0x77, 0xe0, 0x9c, 0x2f, 0x26, 0x63, 0x65, 0xa9, 0xa5, 0xd2, 0xf7, 0x02, 0x83, 0xd2, 0x62, 0x67, 0x28}, + }, + { /* 33P */ + addYX: fp.Elt{0x25, 0x5b, 0xe3, 0x3c, 0x09, 0x36, 0x78, 0x4e, 0x97, 0xaa, 0x6b, 0xb2, 0x1d, 0x18, 0xe1, 0x82, 0x3f, 0xb8, 0xc7, 0xcb, 0xd3, 0x92, 0xc1, 0x0c, 0x3a, 0x9d, 0x9d, 0x6a, 0x04, 0xda, 0xf1, 0x32}, + subYX: fp.Elt{0xbd, 0xf5, 0x2e, 0xce, 0x2b, 0x8e, 0x55, 0x7c, 0x63, 0xbc, 0x47, 0x67, 0xb4, 0x6c, 0x98, 0xe4, 0xb8, 0x89, 0xbb, 0x3b, 0x9f, 0x17, 0x4a, 0x15, 0x7a, 0x76, 0xf1, 0xd6, 0xa3, 0xf2, 0x86, 0x76}, + dt2: fp.Elt{0x6a, 0x7c, 0x59, 0x6d, 0xa6, 0x12, 0x8d, 0xaa, 0x2b, 0x85, 0xd3, 0x04, 0x03, 0x93, 0x11, 0x8f, 0x22, 0xb0, 0x09, 0xc2, 0x73, 0xdc, 0x91, 0x3f, 0xa6, 0x28, 0xad, 0xa9, 0xf8, 0x05, 0x13, 0x56}, + }, + { /* 35P */ + addYX: fp.Elt{0xd1, 0xae, 0x92, 0xec, 0x8d, 0x97, 0x0c, 0x10, 0xe5, 0x73, 0x6d, 0x4d, 0x43, 0xd5, 0x43, 0xca, 0x48, 0xba, 0x47, 0xd8, 0x22, 0x1b, 0x13, 0x83, 0x2c, 0x4d, 0x5d, 0xe3, 0x53, 0xec, 0xaa}, + subYX: fp.Elt{0xd5, 0xc0, 0xb0, 0xe7, 0x28, 0xcc, 0x22, 0x67, 0x53, 0x5c, 0x07, 0xdb, 0xbb, 0xe9, 0x9d, 0x70, 0x61, 0x0a, 0x01, 0xd7, 0xa7, 0x8d, 0xf6, 0xca, 0x6c, 0xcc, 0x57, 0x2c, 0xef, 0x1a, 0x0a, 0x03}, + dt2: fp.Elt{0xaa, 0xd2, 0x3a, 0x00, 0x73, 0xf7, 0xb1, 0x7b, 0x08, 0x66, 0x21, 0x2b, 0x80, 0x29, 0x3f, 0x0b, 0x3e, 0xd2, 0x0e, 0x52, 0x86, 0xdc, 0x21, 0x78, 0x80, 0x54, 0x06, 0x24, 0x1c, 0x9c, 0xbe, 0x20}, + }, + { /* 37P */ + addYX: fp.Elt{0xa6, 0x73, 0x96, 0x24, 0xd8, 0x87, 0x53, 0xe1, 0x93, 0xe4, 0x46, 0xf5, 0x2d, 0xbc, 0x43, 0x59, 0xb5, 0x63, 0x6f, 0xc3, 0x81, 0x9a, 0x7f, 0x1c, 0xde, 0xc1, 0x0a, 0x1f, 0x36, 0xb3, 0x0a, 0x75}, + subYX: fp.Elt{0x60, 0x5e, 0x02, 0xe2, 0x4a, 0xe4, 0xe0, 0x20, 0x38, 0xb9, 0xdc, 0xcb, 0x2f, 0x3b, 0x3b, 0xb0, 0x1c, 0x0d, 0x5a, 0xf9, 0x9c, 0x63, 0x5d, 0x10, 0x11, 0xe3, 0x67, 0x50, 0x54, 0x4c, 0x76, 0x69}, + dt2: fp.Elt{0x37, 0x10, 0xf8, 0xa2, 0x83, 0x32, 0x8a, 0x1e, 0xf1, 0xcb, 0x7f, 0xbd, 0x23, 0xda, 0x2e, 0x6f, 0x63, 0x25, 0x2e, 0xac, 0x5b, 0xd1, 0x2f, 0xb7, 0x40, 0x50, 0x07, 0xb7, 0x3f, 0x6b, 0xf9, 0x54}, + }, + { /* 39P */ + addYX: fp.Elt{0x79, 0x92, 0x66, 0x29, 0x04, 0xf2, 0xad, 0x0f, 0x4a, 0x72, 0x7d, 0x7d, 0x04, 0xa2, 0xdd, 0x3a, 0xf1, 0x60, 0x57, 0x8c, 0x82, 0x94, 0x3d, 0x6f, 0x9e, 0x53, 0xb7, 0x2b, 0xc5, 0xe9, 0x7f, 0x3d}, + subYX: fp.Elt{0xcd, 0x1e, 0xb1, 0x16, 0xc6, 0xaf, 0x7d, 0x17, 0x79, 0x64, 0x57, 0xfa, 0x9c, 0x4b, 0x76, 0x89, 0x85, 0xe7, 0xec, 0xe6, 0x10, 0xa1, 0xa8, 0xb7, 0xf0, 0xdb, 0x85, 0xbe, 0x9f, 0x83, 0xe6, 0x78}, + dt2: fp.Elt{0x6b, 0x85, 0xb8, 0x37, 0xf7, 0x2d, 0x33, 0x70, 0x8a, 0x17, 0x1a, 0x04, 0x43, 0x5d, 0xd0, 0x75, 0x22, 0x9e, 0xe5, 0xa0, 0x4a, 0xf7, 0x0f, 0x32, 0x42, 0x82, 0x08, 0x50, 0xf3, 0x68, 0xf2, 0x70}, + }, + { /* 41P */ + addYX: fp.Elt{0x47, 0x5f, 0x80, 0xb1, 0x83, 0x45, 0x86, 0x66, 0x19, 0x7c, 0xdd, 0x60, 0xd1, 0xc5, 0x35, 0xf5, 0x06, 0xb0, 0x4c, 0x1e, 0xb7, 0x4e, 0x87, 0xe9, 0xd9, 0x89, 0xd8, 0xfa, 0x5c, 0x34, 0x0d, 0x7c}, + subYX: fp.Elt{0x55, 0xf3, 0xdc, 0x70, 0x20, 0x11, 0x24, 0x23, 0x17, 0xe1, 0xfc, 0xe7, 0x7e, 0xc9, 0x0c, 0x38, 0x98, 0xb6, 0x52, 0x35, 0xed, 0xde, 0x1d, 0xb3, 0xb9, 0xc4, 0xb8, 0x39, 0xc0, 0x56, 0x4e, 0x40}, + dt2: fp.Elt{0x8a, 0x33, 0x78, 0x8c, 0x4b, 0x1f, 0x1f, 0x59, 0xe1, 0xb5, 0xe0, 0x67, 0xb1, 0x6a, 0x36, 0xa0, 0x44, 0x3d, 0x5f, 0xb4, 0x52, 0x41, 0xbc, 0x5c, 0x77, 0xc7, 0xae, 0x2a, 0x76, 0x54, 0xd7, 0x20}, + }, + { /* 43P */ + addYX: fp.Elt{0x58, 0xb7, 0x3b, 0xc7, 0x6f, 0xc3, 0x8f, 0x5e, 0x9a, 0xbb, 0x3c, 0x36, 0xa5, 0x43, 0xe5, 0xac, 0x22, 0xc9, 0x3b, 0x90, 0x7d, 0x4a, 0x93, 0xa9, 0x62, 0xec, 0xce, 0xf3, 0x46, 0x1e, 0x8f, 0x2b}, + subYX: fp.Elt{0x43, 0xf5, 0xb9, 0x35, 0xb1, 0xfe, 0x74, 0x9d, 0x6c, 0x95, 0x8c, 0xde, 0xf1, 0x7d, 0xb3, 0x84, 0xa9, 0x8b, 0x13, 0x57, 0x07, 0x2b, 0x32, 0xe9, 0xe1, 0x4c, 0x0b, 0x79, 0xa8, 0xad, 0xb8, 0x38}, + dt2: fp.Elt{0x5d, 0xf9, 0x51, 0xdf, 0x9c, 0x4a, 0xc0, 0xb5, 0xac, 0xde, 0x1f, 0xcb, 0xae, 0x52, 0x39, 0x2b, 0xda, 0x66, 0x8b, 0x32, 0x8b, 0x6d, 0x10, 0x1d, 0x53, 0x19, 0xba, 0xce, 0x32, 0xeb, 0x9a, 0x04}, + }, + { /* 45P */ + addYX: fp.Elt{0x31, 0x79, 0xfc, 0x75, 0x0b, 0x7d, 0x50, 0xaa, 0xd3, 0x25, 0x67, 0x7a, 0x4b, 0x92, 0xef, 0x0f, 0x30, 0x39, 0x6b, 0x39, 0x2b, 0x54, 0x82, 0x1d, 0xfc, 0x74, 0xf6, 0x30, 0x75, 0xe1, 0x5e, 0x79}, + subYX: fp.Elt{0x7e, 0xfe, 0xdc, 0x63, 0x3c, 0x7d, 0x76, 0xd7, 0x40, 0x6e, 0x85, 0x97, 0x48, 0x59, 0x9c, 0x20, 0x13, 0x7c, 0x4f, 0xe1, 0x61, 0x68, 0x67, 0xb6, 0xfc, 0x25, 0xd6, 0xc8, 0xe0, 0x65, 0xc6, 0x51}, + dt2: fp.Elt{0x81, 0xbd, 0xec, 0x52, 0x0a, 0x5b, 0x4a, 0x25, 0xe7, 0xaf, 0x34, 0xe0, 0x6e, 0x1f, 0x41, 0x5d, 0x31, 0x4a, 0xee, 0xca, 0x0d, 0x4d, 0xa2, 0xe6, 0x77, 0x44, 0xc5, 0x9d, 0xf4, 0x9b, 0xd1, 0x6c}, + }, + { /* 47P */ + addYX: fp.Elt{0x86, 0xc3, 0xaf, 0x65, 0x21, 0x61, 0xfe, 0x1f, 0x10, 0x1b, 0xd5, 0xb8, 0x88, 0x2a, 0x2a, 0x08, 0xaa, 0x0b, 0x99, 0x20, 0x7e, 0x62, 0xf6, 0x76, 0xe7, 0x43, 0x9e, 0x42, 0xa7, 0xb3, 0x01, 0x5e}, + subYX: fp.Elt{0xa3, 0x9c, 0x17, 0x52, 0x90, 0x61, 0x87, 0x7e, 0x85, 0x9f, 0x2c, 0x0b, 0x06, 0x0a, 0x1d, 0x57, 0x1e, 0x71, 0x99, 0x84, 0xa8, 0xba, 0xa2, 0x80, 0x38, 0xe6, 0xb2, 0x40, 0xdb, 0xf3, 0x20, 0x75}, + dt2: fp.Elt{0xa1, 0x57, 0x93, 0xd3, 0xe3, 0x0b, 0xb5, 0x3d, 0xa5, 0x94, 0x9e, 0x59, 0xdd, 0x6c, 0x7b, 0x96, 0x6e, 0x1e, 0x31, 0xdf, 0x64, 0x9a, 0x30, 0x1a, 0x86, 0xc9, 0xf3, 0xce, 0x9c, 0x2c, 0x09, 0x71}, + }, + { /* 49P */ + addYX: fp.Elt{0xcf, 0x1d, 0x05, 0x74, 0xac, 0xd8, 0x6b, 0x85, 0x1e, 0xaa, 0xb7, 0x55, 0x08, 0xa4, 0xf6, 0x03, 0xeb, 0x3c, 0x74, 0xc9, 0xcb, 0xe7, 0x4a, 0x3a, 0xde, 0xab, 0x37, 0x71, 0xbb, 0xa5, 0x73, 0x41}, + subYX: fp.Elt{0x8c, 0x91, 0x64, 0x03, 0x3f, 0x52, 0xd8, 0x53, 0x1c, 0x6b, 0xab, 0x3f, 0xf4, 0x04, 0xb4, 0xa2, 0xa4, 0xe5, 0x81, 0x66, 0x9e, 0x4a, 0x0b, 0x08, 0xa7, 0x7b, 0x25, 0xd0, 0x03, 0x5b, 0xa1, 0x0e}, + dt2: fp.Elt{0x8a, 0x21, 0xf9, 0xf0, 0x31, 0x6e, 0xc5, 0x17, 0x08, 0x47, 0xfc, 0x1a, 0x2b, 0x6e, 0x69, 0x5a, 0x76, 0xf1, 0xb2, 0xf4, 0x68, 0x16, 0x93, 0xf7, 0x67, 0x3a, 0x4e, 0x4a, 0x61, 0x65, 0xc5, 0x5f}, + }, + { /* 51P */ + addYX: fp.Elt{0x8e, 0x98, 0x90, 0x77, 0xe6, 0xe1, 0x92, 0x48, 0x22, 0xd7, 0x5c, 0x1c, 0x0f, 0x95, 0xd5, 0x01, 0xed, 0x3e, 0x92, 0xe5, 0x9a, 0x81, 0xb0, 0xe3, 0x1b, 0x65, 0x46, 0x9d, 0x40, 0xc7, 0x14, 0x32}, + subYX: fp.Elt{0xe5, 0x7a, 0x6d, 0xc4, 0x0d, 0x57, 0x6e, 0x13, 0x8f, 0xdc, 0xf8, 0x54, 0xcc, 0xaa, 0xd0, 0x0f, 0x86, 0xad, 0x0d, 0x31, 0x03, 0x9f, 0x54, 0x59, 0xa1, 0x4a, 0x45, 0x4c, 0x41, 0x1c, 0x71, 0x62}, + dt2: fp.Elt{0x70, 0x17, 0x65, 0x06, 0x74, 0x82, 0x29, 0x13, 0x36, 0x94, 0x27, 0x8a, 0x66, 0xa0, 0xa4, 0x3b, 0x3c, 0x22, 0x5d, 0x18, 0xec, 0xb8, 0xb6, 0xd9, 0x3c, 0x83, 0xcb, 0x3e, 0x07, 0x94, 0xea, 0x5b}, + }, + { /* 53P */ + addYX: fp.Elt{0xf8, 0xd2, 0x43, 0xf3, 0x63, 0xce, 0x70, 0xb4, 0xf1, 0xe8, 0x43, 0x05, 0x8f, 0xba, 0x67, 0x00, 0x6f, 0x7b, 0x11, 0xa2, 0xa1, 0x51, 0xda, 0x35, 0x2f, 0xbd, 0xf1, 0x44, 0x59, 0x78, 0xd0, 0x4a}, + subYX: fp.Elt{0xe4, 0x9b, 0xc8, 0x12, 0x09, 0xbf, 0x1d, 0x64, 0x9c, 0x57, 0x6e, 0x7d, 0x31, 0x8b, 0xf3, 0xac, 0x65, 0xb0, 0x97, 0xf6, 0x02, 0x9e, 0xfe, 0xab, 0xec, 0x1e, 0xf6, 0x48, 0xc1, 0xd5, 0xac, 0x3a}, + dt2: fp.Elt{0x01, 0x83, 0x31, 0xc3, 0x34, 0x3b, 0x8e, 0x85, 0x26, 0x68, 0x31, 0x07, 0x47, 0xc0, 0x99, 0xdc, 0x8c, 0xa8, 0x9d, 0xd3, 0x2e, 0x5b, 0x08, 0x34, 0x3d, 0x85, 0x02, 0xd9, 0xb1, 0x0c, 0xff, 0x3a}, + }, + { /* 55P */ + addYX: fp.Elt{0x05, 0x35, 0xc5, 0xf4, 0x0b, 0x43, 0x26, 0x92, 0x83, 0x22, 0x1f, 0x26, 0x13, 0x9c, 0xe4, 0x68, 0xc6, 0x27, 0xd3, 0x8f, 0x78, 0x33, 0xef, 0x09, 0x7f, 0x9e, 0xd9, 0x2b, 0x73, 0x9f, 0xcf, 0x2c}, + subYX: fp.Elt{0x5e, 0x40, 0x20, 0x3a, 0xeb, 0xc7, 0xc5, 0x87, 0xc9, 0x56, 0xad, 0xed, 0xef, 0x11, 0xe3, 0x8e, 0xf9, 0xd5, 0x29, 0xad, 0x48, 0x2e, 0x25, 0x29, 0x1d, 0x25, 0xcd, 0xf4, 0x86, 0x7e, 0x0e, 0x11}, + dt2: fp.Elt{0xe4, 0xf5, 0x03, 0xd6, 0x9e, 0xd8, 0xc0, 0x57, 0x0c, 0x20, 0xb0, 0xf0, 0x28, 0x86, 0x88, 0x12, 0xb7, 0x3b, 0x2e, 0xa0, 0x09, 0x27, 0x17, 0x53, 0x37, 0x3a, 0x69, 0xb9, 0xe0, 0x57, 0xc5, 0x05}, + }, + { /* 57P */ + addYX: fp.Elt{0xb0, 0x0e, 0xc2, 0x89, 0xb0, 0xbb, 0x76, 0xf7, 0x5c, 0xd8, 0x0f, 0xfa, 0xf6, 0x5b, 0xf8, 0x61, 0xfb, 0x21, 0x44, 0x63, 0x4e, 0x3f, 0xb9, 0xb6, 0x05, 0x12, 0x86, 0x41, 0x08, 0xef, 0x9f, 0x28}, + subYX: fp.Elt{0x6f, 0x7e, 0xc9, 0x1f, 0x31, 0xce, 0xf9, 0xd8, 0xae, 0xfd, 0xf9, 0x11, 0x30, 0x26, 0x3f, 0x7a, 0xdd, 0x25, 0xed, 0x8b, 0xa0, 0x7e, 0x5b, 0xe1, 0x5a, 0x87, 0xe9, 0x8f, 0x17, 0x4c, 0x15, 0x6e}, + dt2: fp.Elt{0xbf, 0x9a, 0xd6, 0xfe, 0x36, 0x63, 0x61, 0xcf, 0x4f, 0xc9, 0x35, 0x83, 0xe7, 0xe4, 0x16, 0x9b, 0xe7, 0x7f, 0x3a, 0x75, 0x65, 0x97, 0x78, 0x13, 0x19, 0xa3, 0x5c, 0xa9, 0x42, 0xf6, 0xfb, 0x6a}, + }, + { /* 59P */ + addYX: fp.Elt{0xcc, 0xa8, 0x13, 0xf9, 0x70, 0x50, 0xe5, 0x5d, 0x61, 0xf5, 0x0c, 0x2b, 0x7b, 0x16, 0x1d, 0x7d, 0x89, 0xd4, 0xea, 0x90, 0xb6, 0x56, 0x29, 0xda, 0xd9, 0x1e, 0x80, 0xdb, 0xce, 0x93, 0xc0, 0x12}, + subYX: fp.Elt{0xc1, 0xd2, 0xf5, 0x62, 0x0c, 0xde, 0xa8, 0x7d, 0x9a, 0x7b, 0x0e, 0xb0, 0xa4, 0x3d, 0xfc, 0x98, 0xe0, 0x70, 0xad, 0x0d, 0xda, 0x6a, 0xeb, 0x7d, 0xc4, 0x38, 0x50, 0xb9, 0x51, 0xb8, 0xb4, 0x0d}, + dt2: fp.Elt{0x0f, 0x19, 0xb8, 0x08, 0x93, 0x7f, 0x14, 0xfc, 0x10, 0xe3, 0x1a, 0xa1, 0xa0, 0x9d, 0x96, 0x06, 0xfd, 0xd7, 0xc7, 0xda, 0x72, 0x55, 0xe7, 0xce, 0xe6, 0x5c, 0x63, 0xc6, 0x99, 0x87, 0xaa, 0x33}, + }, + { /* 61P */ + addYX: fp.Elt{0xb1, 0x6c, 0x15, 0xfc, 0x88, 0xf5, 0x48, 0x83, 0x27, 0x6d, 0x0a, 0x1a, 0x9b, 0xba, 0xa2, 0x6d, 0xb6, 0x5a, 0xca, 0x87, 0x5c, 0x2d, 0x26, 0xe2, 0xa6, 0x89, 0xd5, 0xc8, 0xc1, 0xd0, 0x2c, 0x21}, + subYX: fp.Elt{0xf2, 0x5c, 0x08, 0xbd, 0x1e, 0xf5, 0x0f, 0xaf, 0x1f, 0x3f, 0xd3, 0x67, 0x89, 0x1a, 0xf5, 0x78, 0x3c, 0x03, 0x60, 0x50, 0xe1, 0xbf, 0xc2, 0x6e, 0x86, 0x1a, 0xe2, 0xe8, 0x29, 0x6f, 0x3c, 0x23}, + dt2: fp.Elt{0x81, 0xc7, 0x18, 0x7f, 0x10, 0xd5, 0xf4, 0xd2, 0x28, 0x9d, 0x7e, 0x52, 0xf2, 0xcd, 0x2e, 0x12, 0x41, 0x33, 0x3d, 0x3d, 0x2a, 0x86, 0x0a, 0xa7, 0xe3, 0x4c, 0x91, 0x11, 0x89, 0x77, 0xb7, 0x1d}, + }, + { /* 63P */ + addYX: fp.Elt{0xb6, 0x1a, 0x70, 0xdd, 0x69, 0x47, 0x39, 0xb3, 0xa5, 0x8d, 0xcf, 0x19, 0xd4, 0xde, 0xb8, 0xe2, 0x52, 0xc8, 0x2a, 0xfd, 0x61, 0x41, 0xdf, 0x15, 0xbe, 0x24, 0x7d, 0x01, 0x8a, 0xca, 0xe2, 0x7a}, + subYX: fp.Elt{0x6f, 0xc2, 0x6b, 0x7c, 0x39, 0x52, 0xf3, 0xdd, 0x13, 0x01, 0xd5, 0x53, 0xcc, 0xe2, 0x97, 0x7a, 0x30, 0xa3, 0x79, 0xbf, 0x3a, 0xf4, 0x74, 0x7c, 0xfc, 0xad, 0xe2, 0x26, 0xad, 0x97, 0xad, 0x31}, + dt2: fp.Elt{0x62, 0xb9, 0x20, 0x09, 0xed, 0x17, 0xe8, 0xb7, 0x9d, 0xda, 0x19, 0x3f, 0xcc, 0x18, 0x85, 0x1e, 0x64, 0x0a, 0x56, 0x25, 0x4f, 0xc1, 0x91, 0xe4, 0x83, 0x2c, 0x62, 0xa6, 0x53, 0xfc, 0xd1, 0x1e}, + }, +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed448/ed448.go b/vendor/github.com/cloudflare/circl/sign/ed448/ed448.go new file mode 100644 index 000000000..324bd8f33 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed448/ed448.go @@ -0,0 +1,411 @@ +// Package ed448 implements Ed448 signature scheme as described in RFC-8032. +// +// This package implements two signature variants. +// +// | Scheme Name | Sign Function | Verification | Context | +// |-------------|-------------------|---------------|-------------------| +// | Ed448 | Sign | Verify | Yes, can be empty | +// | Ed448Ph | SignPh | VerifyPh | Yes, can be empty | +// | All above | (PrivateKey).Sign | VerifyAny | As above | +// +// Specific functions for sign and verify are defined. A generic signing +// function for all schemes is available through the crypto.Signer interface, +// which is implemented by the PrivateKey type. A correspond all-in-one +// verification method is provided by the VerifyAny function. +// +// Both schemes require a context string for domain separation. This parameter +// is passed using a SignerOptions struct defined in this package. +// +// References: +// +// - RFC8032: https://rfc-editor.org/rfc/rfc8032.txt +// - EdDSA for more curves: https://eprint.iacr.org/2015/677 +// - High-speed high-security signatures: https://doi.org/10.1007/s13389-012-0027-1 +package ed448 + +import ( + "bytes" + "crypto" + cryptoRand "crypto/rand" + "crypto/subtle" + "errors" + "fmt" + "io" + "strconv" + + "github.com/cloudflare/circl/ecc/goldilocks" + "github.com/cloudflare/circl/internal/sha3" + "github.com/cloudflare/circl/sign" +) + +const ( + // ContextMaxSize is the maximum length (in bytes) allowed for context. + ContextMaxSize = 255 + // PublicKeySize is the length in bytes of Ed448 public keys. + PublicKeySize = 57 + // PrivateKeySize is the length in bytes of Ed448 private keys. + PrivateKeySize = 114 + // SignatureSize is the length in bytes of signatures. + SignatureSize = 114 + // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. + SeedSize = 57 +) + +const ( + paramB = 456 / 8 // Size of keys in bytes. + hashSize = 2 * paramB // Size of the hash function's output. +) + +// SignerOptions implements crypto.SignerOpts and augments with parameters +// that are specific to the Ed448 signature schemes. +type SignerOptions struct { + // Hash must be crypto.Hash(0) for both Ed448 and Ed448Ph. + crypto.Hash + + // Context is an optional domain separation string for signing. + // Its length must be less or equal than 255 bytes. + Context string + + // Scheme is an identifier for choosing a signature scheme. + Scheme SchemeID +} + +// SchemeID is an identifier for each signature scheme. +type SchemeID uint + +const ( + ED448 SchemeID = iota + ED448Ph +) + +// PublicKey is the type of Ed448 public keys. +type PublicKey []byte + +// Equal reports whether pub and x have the same value. +func (pub PublicKey) Equal(x crypto.PublicKey) bool { + xx, ok := x.(PublicKey) + return ok && bytes.Equal(pub, xx) +} + +// PrivateKey is the type of Ed448 private keys. It implements crypto.Signer. +type PrivateKey []byte + +// Equal reports whether priv and x have the same value. +func (priv PrivateKey) Equal(x crypto.PrivateKey) bool { + xx, ok := x.(PrivateKey) + return ok && subtle.ConstantTimeCompare(priv, xx) == 1 +} + +// Public returns the PublicKey corresponding to priv. +func (priv PrivateKey) Public() crypto.PublicKey { + publicKey := make([]byte, PublicKeySize) + copy(publicKey, priv[SeedSize:]) + return PublicKey(publicKey) +} + +// Seed returns the private key seed corresponding to priv. It is provided for +// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds +// in this package. +func (priv PrivateKey) Seed() []byte { + seed := make([]byte, SeedSize) + copy(seed, priv[:SeedSize]) + return seed +} + +func (priv PrivateKey) Scheme() sign.Scheme { return sch } + +func (pub PublicKey) Scheme() sign.Scheme { return sch } + +func (priv PrivateKey) MarshalBinary() (data []byte, err error) { + privateKey := make(PrivateKey, PrivateKeySize) + copy(privateKey, priv) + return privateKey, nil +} + +func (pub PublicKey) MarshalBinary() (data []byte, err error) { + publicKey := make(PublicKey, PublicKeySize) + copy(publicKey, pub) + return publicKey, nil +} + +// Sign creates a signature of a message given a key pair. +// This function supports all the two signature variants defined in RFC-8032, +// namely Ed448 (or pure EdDSA) and Ed448Ph. +// The opts.HashFunc() must return zero to the specify Ed448 variant. This can +// be achieved by passing crypto.Hash(0) as the value for opts. +// Use an Options struct to pass a bool indicating that the ed448Ph variant +// should be used. +// The struct can also be optionally used to pass a context string for signing. +func (priv PrivateKey) Sign( + rand io.Reader, + message []byte, + opts crypto.SignerOpts, +) (signature []byte, err error) { + var ctx string + var scheme SchemeID + + if o, ok := opts.(SignerOptions); ok { + ctx = o.Context + scheme = o.Scheme + } + + switch true { + case scheme == ED448 && opts.HashFunc() == crypto.Hash(0): + return Sign(priv, message, ctx), nil + case scheme == ED448Ph && opts.HashFunc() == crypto.Hash(0): + return SignPh(priv, message, ctx), nil + default: + return nil, errors.New("ed448: bad hash algorithm") + } +} + +// GenerateKey generates a public/private key pair using entropy from rand. +// If rand is nil, crypto/rand.Reader will be used. +func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { + if rand == nil { + rand = cryptoRand.Reader + } + + seed := make(PrivateKey, SeedSize) + if _, err := io.ReadFull(rand, seed); err != nil { + return nil, nil, err + } + + privateKey := NewKeyFromSeed(seed) + publicKey := make([]byte, PublicKeySize) + copy(publicKey, privateKey[SeedSize:]) + + return publicKey, privateKey, nil +} + +// NewKeyFromSeed calculates a private key from a seed. It will panic if +// len(seed) is not SeedSize. This function is provided for interoperability +// with RFC 8032. RFC 8032's private keys correspond to seeds in this +// package. +func NewKeyFromSeed(seed []byte) PrivateKey { + privateKey := make([]byte, PrivateKeySize) + newKeyFromSeed(privateKey, seed) + return privateKey +} + +func newKeyFromSeed(privateKey, seed []byte) { + if l := len(seed); l != SeedSize { + panic("ed448: bad seed length: " + strconv.Itoa(l)) + } + + var h [hashSize]byte + H := sha3.NewShake256() + _, _ = H.Write(seed) + _, _ = H.Read(h[:]) + s := &goldilocks.Scalar{} + deriveSecretScalar(s, h[:paramB]) + + copy(privateKey[:SeedSize], seed) + _ = goldilocks.Curve{}.ScalarBaseMult(s).ToBytes(privateKey[SeedSize:]) +} + +func signAll(signature []byte, privateKey PrivateKey, message, ctx []byte, preHash bool) { + if len(ctx) > ContextMaxSize { + panic(fmt.Errorf("ed448: bad context length: " + strconv.Itoa(len(ctx)))) + } + + H := sha3.NewShake256() + var PHM []byte + + if preHash { + var h [64]byte + _, _ = H.Write(message) + _, _ = H.Read(h[:]) + PHM = h[:] + H.Reset() + } else { + PHM = message + } + + // 1. Hash the 57-byte private key using SHAKE256(x, 114). + var h [hashSize]byte + _, _ = H.Write(privateKey[:SeedSize]) + _, _ = H.Read(h[:]) + s := &goldilocks.Scalar{} + deriveSecretScalar(s, h[:paramB]) + prefix := h[paramB:] + + // 2. Compute SHAKE256(dom4(F, C) || prefix || PH(M), 114). + var rPM [hashSize]byte + H.Reset() + + writeDom(&H, ctx, preHash) + + _, _ = H.Write(prefix) + _, _ = H.Write(PHM) + _, _ = H.Read(rPM[:]) + + // 3. Compute the point [r]B. + r := &goldilocks.Scalar{} + r.FromBytes(rPM[:]) + R := (&[paramB]byte{})[:] + if err := (goldilocks.Curve{}.ScalarBaseMult(r).ToBytes(R)); err != nil { + panic(err) + } + // 4. Compute SHAKE256(dom4(F, C) || R || A || PH(M), 114) + var hRAM [hashSize]byte + H.Reset() + + writeDom(&H, ctx, preHash) + + _, _ = H.Write(R) + _, _ = H.Write(privateKey[SeedSize:]) + _, _ = H.Write(PHM) + _, _ = H.Read(hRAM[:]) + + // 5. Compute S = (r + k * s) mod order. + k := &goldilocks.Scalar{} + k.FromBytes(hRAM[:]) + S := &goldilocks.Scalar{} + S.Mul(k, s) + S.Add(S, r) + + // 6. The signature is the concatenation of R and S. + copy(signature[:paramB], R[:]) + copy(signature[paramB:], S[:]) +} + +// Sign signs the message with privateKey and returns a signature. +// This function supports the signature variant defined in RFC-8032: Ed448, +// also known as the pure version of EdDSA. +// It will panic if len(privateKey) is not PrivateKeySize. +func Sign(priv PrivateKey, message []byte, ctx string) []byte { + signature := make([]byte, SignatureSize) + signAll(signature, priv, message, []byte(ctx), false) + return signature +} + +// SignPh creates a signature of a message given a keypair. +// This function supports the signature variant defined in RFC-8032: Ed448ph, +// meaning it internally hashes the message using SHAKE-256. +// Context could be passed to this function, which length should be no more than +// 255. It can be empty. +func SignPh(priv PrivateKey, message []byte, ctx string) []byte { + signature := make([]byte, SignatureSize) + signAll(signature, priv, message, []byte(ctx), true) + return signature +} + +func verify(public PublicKey, message, signature, ctx []byte, preHash bool) bool { + if len(public) != PublicKeySize || + len(signature) != SignatureSize || + len(ctx) > ContextMaxSize || + !isLessThanOrder(signature[paramB:]) { + return false + } + + P, err := goldilocks.FromBytes(public) + if err != nil { + return false + } + + H := sha3.NewShake256() + var PHM []byte + + if preHash { + var h [64]byte + _, _ = H.Write(message) + _, _ = H.Read(h[:]) + PHM = h[:] + H.Reset() + } else { + PHM = message + } + + var hRAM [hashSize]byte + R := signature[:paramB] + + writeDom(&H, ctx, preHash) + + _, _ = H.Write(R) + _, _ = H.Write(public) + _, _ = H.Write(PHM) + _, _ = H.Read(hRAM[:]) + + k := &goldilocks.Scalar{} + k.FromBytes(hRAM[:]) + S := &goldilocks.Scalar{} + S.FromBytes(signature[paramB:]) + + encR := (&[paramB]byte{})[:] + P.Neg() + _ = goldilocks.Curve{}.CombinedMult(S, k, P).ToBytes(encR) + return bytes.Equal(R, encR) +} + +// VerifyAny returns true if the signature is valid. Failure cases are invalid +// signature, or when the public key cannot be decoded. +// This function supports all the two signature variants defined in RFC-8032, +// namely Ed448 (or pure EdDSA) and Ed448Ph. +// The opts.HashFunc() must return zero, this can be achieved by passing +// crypto.Hash(0) as the value for opts. +// Use a SignerOptions struct to pass a context string for signing. +func VerifyAny(public PublicKey, message, signature []byte, opts crypto.SignerOpts) bool { + var ctx string + var scheme SchemeID + if o, ok := opts.(SignerOptions); ok { + ctx = o.Context + scheme = o.Scheme + } + + switch true { + case scheme == ED448 && opts.HashFunc() == crypto.Hash(0): + return Verify(public, message, signature, ctx) + case scheme == ED448Ph && opts.HashFunc() == crypto.Hash(0): + return VerifyPh(public, message, signature, ctx) + default: + return false + } +} + +// Verify returns true if the signature is valid. Failure cases are invalid +// signature, or when the public key cannot be decoded. +// This function supports the signature variant defined in RFC-8032: Ed448, +// also known as the pure version of EdDSA. +func Verify(public PublicKey, message, signature []byte, ctx string) bool { + return verify(public, message, signature, []byte(ctx), false) +} + +// VerifyPh returns true if the signature is valid. Failure cases are invalid +// signature, or when the public key cannot be decoded. +// This function supports the signature variant defined in RFC-8032: Ed448ph, +// meaning it internally hashes the message using SHAKE-256. +// Context could be passed to this function, which length should be no more than +// 255. It can be empty. +func VerifyPh(public PublicKey, message, signature []byte, ctx string) bool { + return verify(public, message, signature, []byte(ctx), true) +} + +func deriveSecretScalar(s *goldilocks.Scalar, h []byte) { + h[0] &= 0xFC // The two least significant bits of the first octet are cleared, + h[paramB-1] = 0x00 // all eight bits the last octet are cleared, and + h[paramB-2] |= 0x80 // the highest bit of the second to last octet is set. + s.FromBytes(h[:paramB]) +} + +// isLessThanOrder returns true if 0 <= x < order and if the last byte of x is zero. +func isLessThanOrder(x []byte) bool { + order := goldilocks.Curve{}.Order() + i := len(order) - 1 + for i > 0 && x[i] == order[i] { + i-- + } + return x[paramB-1] == 0 && x[i] < order[i] +} + +func writeDom(h io.Writer, ctx []byte, preHash bool) { + dom4 := "SigEd448" + _, _ = h.Write([]byte(dom4)) + + if preHash { + _, _ = h.Write([]byte{byte(0x01), byte(len(ctx))}) + } else { + _, _ = h.Write([]byte{byte(0x00), byte(len(ctx))}) + } + _, _ = h.Write(ctx) +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed448/signapi.go b/vendor/github.com/cloudflare/circl/sign/ed448/signapi.go new file mode 100644 index 000000000..22da8bc0a --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed448/signapi.go @@ -0,0 +1,87 @@ +package ed448 + +import ( + "crypto/rand" + "encoding/asn1" + + "github.com/cloudflare/circl/sign" +) + +var sch sign.Scheme = &scheme{} + +// Scheme returns a signature interface. +func Scheme() sign.Scheme { return sch } + +type scheme struct{} + +func (*scheme) Name() string { return "Ed448" } +func (*scheme) PublicKeySize() int { return PublicKeySize } +func (*scheme) PrivateKeySize() int { return PrivateKeySize } +func (*scheme) SignatureSize() int { return SignatureSize } +func (*scheme) SeedSize() int { return SeedSize } +func (*scheme) TLSIdentifier() uint { return 0x0808 } +func (*scheme) SupportsContext() bool { return true } +func (*scheme) Oid() asn1.ObjectIdentifier { + return asn1.ObjectIdentifier{1, 3, 101, 113} +} + +func (*scheme) GenerateKey() (sign.PublicKey, sign.PrivateKey, error) { + return GenerateKey(rand.Reader) +} + +func (*scheme) Sign( + sk sign.PrivateKey, + message []byte, + opts *sign.SignatureOpts, +) []byte { + priv, ok := sk.(PrivateKey) + if !ok { + panic(sign.ErrTypeMismatch) + } + ctx := "" + if opts != nil { + ctx = opts.Context + } + return Sign(priv, message, ctx) +} + +func (*scheme) Verify( + pk sign.PublicKey, + message, signature []byte, + opts *sign.SignatureOpts, +) bool { + pub, ok := pk.(PublicKey) + if !ok { + panic(sign.ErrTypeMismatch) + } + ctx := "" + if opts != nil { + ctx = opts.Context + } + return Verify(pub, message, signature, ctx) +} + +func (*scheme) DeriveKey(seed []byte) (sign.PublicKey, sign.PrivateKey) { + privateKey := NewKeyFromSeed(seed) + publicKey := make(PublicKey, PublicKeySize) + copy(publicKey, privateKey[SeedSize:]) + return publicKey, privateKey +} + +func (*scheme) UnmarshalBinaryPublicKey(buf []byte) (sign.PublicKey, error) { + if len(buf) < PublicKeySize { + return nil, sign.ErrPubKeySize + } + pub := make(PublicKey, PublicKeySize) + copy(pub, buf[:PublicKeySize]) + return pub, nil +} + +func (*scheme) UnmarshalBinaryPrivateKey(buf []byte) (sign.PrivateKey, error) { + if len(buf) < PrivateKeySize { + return nil, sign.ErrPrivKeySize + } + priv := make(PrivateKey, PrivateKeySize) + copy(priv, buf[:PrivateKeySize]) + return priv, nil +} diff --git a/vendor/github.com/cloudflare/circl/sign/sign.go b/vendor/github.com/cloudflare/circl/sign/sign.go new file mode 100644 index 000000000..13b20fa4b --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/sign.go @@ -0,0 +1,110 @@ +// Package sign provides unified interfaces for signature schemes. +// +// A register of schemes is available in the package +// +// github.com/cloudflare/circl/sign/schemes +package sign + +import ( + "crypto" + "encoding" + "errors" +) + +type SignatureOpts struct { + // If non-empty, includes the given context in the signature if supported + // and will cause an error during signing otherwise. + Context string +} + +// A public key is used to verify a signature set by the corresponding private +// key. +type PublicKey interface { + // Returns the signature scheme for this public key. + Scheme() Scheme + Equal(crypto.PublicKey) bool + encoding.BinaryMarshaler + crypto.PublicKey +} + +// A private key allows one to create signatures. +type PrivateKey interface { + // Returns the signature scheme for this private key. + Scheme() Scheme + Equal(crypto.PrivateKey) bool + // For compatibility with Go standard library + crypto.Signer + crypto.PrivateKey + encoding.BinaryMarshaler +} + +// A Scheme represents a specific instance of a signature scheme. +type Scheme interface { + // Name of the scheme. + Name() string + + // GenerateKey creates a new key-pair. + GenerateKey() (PublicKey, PrivateKey, error) + + // Creates a signature using the PrivateKey on the given message and + // returns the signature. opts are additional options which can be nil. + // + // Panics if key is nil or wrong type or opts context is not supported. + Sign(sk PrivateKey, message []byte, opts *SignatureOpts) []byte + + // Checks whether the given signature is a valid signature set by + // the private key corresponding to the given public key on the + // given message. opts are additional options which can be nil. + // + // Panics if key is nil or wrong type or opts context is not supported. + Verify(pk PublicKey, message []byte, signature []byte, opts *SignatureOpts) bool + + // Deterministically derives a keypair from a seed. If you're unsure, + // you're better off using GenerateKey(). + // + // Panics if seed is not of length SeedSize(). + DeriveKey(seed []byte) (PublicKey, PrivateKey) + + // Unmarshals a PublicKey from the provided buffer. + UnmarshalBinaryPublicKey([]byte) (PublicKey, error) + + // Unmarshals a PublicKey from the provided buffer. + UnmarshalBinaryPrivateKey([]byte) (PrivateKey, error) + + // Size of binary marshalled public keys. + PublicKeySize() int + + // Size of binary marshalled public keys. + PrivateKeySize() int + + // Size of signatures. + SignatureSize() int + + // Size of seeds. + SeedSize() int + + // Returns whether contexts are supported. + SupportsContext() bool +} + +var ( + // ErrTypeMismatch is the error used if types of, for instance, private + // and public keys don't match. + ErrTypeMismatch = errors.New("types mismatch") + + // ErrSeedSize is the error used if the provided seed is of the wrong + // size. + ErrSeedSize = errors.New("wrong seed size") + + // ErrPubKeySize is the error used if the provided public key is of + // the wrong size. + ErrPubKeySize = errors.New("wrong size for public key") + + // ErrPrivKeySize is the error used if the provided private key is of + // the wrong size. + ErrPrivKeySize = errors.New("wrong size for private key") + + // ErrContextNotSupported is the error used if a context is not + // supported. + ErrContextNotSupported = errors.New("context not supported") +) diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md b/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md new file mode 100644 index 000000000..1cade6cef --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Brian Goff + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/debug.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/debug.go new file mode 100644 index 000000000..0ec4b12c7 --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/debug.go @@ -0,0 +1,62 @@ +package md2man + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/russross/blackfriday/v2" +) + +func fmtListFlags(flags blackfriday.ListType) string { + knownFlags := []struct { + name string + flag blackfriday.ListType + }{ + {"ListTypeOrdered", blackfriday.ListTypeOrdered}, + {"ListTypeDefinition", blackfriday.ListTypeDefinition}, + {"ListTypeTerm", blackfriday.ListTypeTerm}, + {"ListItemContainsBlock", blackfriday.ListItemContainsBlock}, + {"ListItemBeginningOfList", blackfriday.ListItemBeginningOfList}, + {"ListItemEndOfList", blackfriday.ListItemEndOfList}, + } + + var f []string + for _, kf := range knownFlags { + if flags&kf.flag != 0 { + f = append(f, kf.name) + flags &^= kf.flag + } + } + if flags != 0 { + f = append(f, fmt.Sprintf("Unknown(%#x)", flags)) + } + return strings.Join(f, "|") +} + +type debugDecorator struct { + blackfriday.Renderer +} + +func depth(node *blackfriday.Node) int { + d := 0 + for n := node.Parent; n != nil; n = n.Parent { + d++ + } + return d +} + +func (d *debugDecorator) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + fmt.Fprintf(os.Stderr, "%s%s %v %v\n", + strings.Repeat(" ", depth(node)), + map[bool]string{true: "+", false: "-"}[entering], + node, + fmtListFlags(node.ListFlags)) + var b strings.Builder + status := d.Renderer.RenderNode(io.MultiWriter(&b, w), node, entering) + if b.Len() > 0 { + fmt.Fprintf(os.Stderr, ">> %q\n", b.String()) + } + return status +} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go new file mode 100644 index 000000000..62d91b77d --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go @@ -0,0 +1,23 @@ +package md2man + +import ( + "os" + "strconv" + + "github.com/russross/blackfriday/v2" +) + +// Render converts a markdown document into a roff formatted document. +func Render(doc []byte) []byte { + renderer := NewRoffRenderer() + var r blackfriday.Renderer = renderer + if v, _ := strconv.ParseBool(os.Getenv("MD2MAN_DEBUG")); v { + r = &debugDecorator{Renderer: r} + } + + return blackfriday.Run(doc, + []blackfriday.Option{ + blackfriday.WithRenderer(r), + blackfriday.WithExtensions(renderer.GetExtensions()), + }...) +} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go new file mode 100644 index 000000000..9d6c473fd --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go @@ -0,0 +1,408 @@ +package md2man + +import ( + "bufio" + "bytes" + "fmt" + "io" + "os" + "strings" + + "github.com/russross/blackfriday/v2" +) + +// roffRenderer implements the blackfriday.Renderer interface for creating +// roff format (manpages) from markdown text +type roffRenderer struct { + listCounters []int + firstHeader bool + listDepth int +} + +const ( + titleHeader = ".TH " + topLevelHeader = "\n\n.SH " + secondLevelHdr = "\n.SH " + otherHeader = "\n.SS " + crTag = "\n" + emphTag = "\\fI" + emphCloseTag = "\\fP" + strongTag = "\\fB" + strongCloseTag = "\\fP" + breakTag = "\n.br\n" + paraTag = "\n.PP\n" + hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n" + linkTag = "\n\\[la]" + linkCloseTag = "\\[ra]" + codespanTag = "\\fB" + codespanCloseTag = "\\fR" + codeTag = "\n.EX\n" + codeCloseTag = ".EE\n" // Do not prepend a newline character since code blocks, by definition, include a newline already (or at least as how blackfriday gives us on). + quoteTag = "\n.PP\n.RS\n" + quoteCloseTag = "\n.RE\n" + listTag = "\n.RS\n" + listCloseTag = ".RE\n" + dtTag = "\n.TP\n" + dd2Tag = "\n" + tableStart = "\n.TS\nallbox;\n" + tableEnd = ".TE\n" + tableCellStart = "T{\n" + tableCellEnd = "\nT}\n" + tablePreprocessor = `'\" t` +) + +// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents +// from markdown +func NewRoffRenderer() *roffRenderer { // nolint: golint + return &roffRenderer{} +} + +// GetExtensions returns the list of extensions used by this renderer implementation +func (*roffRenderer) GetExtensions() blackfriday.Extensions { + return blackfriday.NoIntraEmphasis | + blackfriday.Tables | + blackfriday.FencedCode | + blackfriday.SpaceHeadings | + blackfriday.Footnotes | + blackfriday.Titleblock | + blackfriday.DefinitionLists +} + +// RenderHeader handles outputting the header at document start +func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) { + // We need to walk the tree to check if there are any tables. + // If there are, we need to enable the roff table preprocessor. + ast.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + if node.Type == blackfriday.Table { + out(w, tablePreprocessor+"\n") + return blackfriday.Terminate + } + return blackfriday.GoToNext + }) + + // disable hyphenation + out(w, ".nh\n") +} + +// RenderFooter handles outputting the footer at the document end; the roff +// renderer has no footer information +func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) { +} + +// RenderNode is called for each node in a markdown document; based on the node +// type the equivalent roff output is sent to the writer +func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + walkAction := blackfriday.GoToNext + + switch node.Type { + case blackfriday.Text: + // Special case: format the NAME section as required for proper whatis parsing. + // Refer to the lexgrog(1) and groff_man(7) manual pages for details. + if node.Parent != nil && + node.Parent.Type == blackfriday.Paragraph && + node.Parent.Prev != nil && + node.Parent.Prev.Type == blackfriday.Heading && + node.Parent.Prev.FirstChild != nil && + bytes.EqualFold(node.Parent.Prev.FirstChild.Literal, []byte("NAME")) { + before, after, found := bytes.Cut(node.Literal, []byte(" - ")) + escapeSpecialChars(w, before) + if found { + out(w, ` \- `) + escapeSpecialChars(w, after) + } + } else { + escapeSpecialChars(w, node.Literal) + } + case blackfriday.Softbreak: + out(w, crTag) + case blackfriday.Hardbreak: + out(w, breakTag) + case blackfriday.Emph: + if entering { + out(w, emphTag) + } else { + out(w, emphCloseTag) + } + case blackfriday.Strong: + if entering { + out(w, strongTag) + } else { + out(w, strongCloseTag) + } + case blackfriday.Link: + // Don't render the link text for automatic links, because this + // will only duplicate the URL in the roff output. + // See https://daringfireball.net/projects/markdown/syntax#autolink + if !bytes.Equal(node.LinkData.Destination, node.FirstChild.Literal) { + out(w, string(node.FirstChild.Literal)) + } + // Hyphens in a link must be escaped to avoid word-wrap in the rendered man page. + escapedLink := strings.ReplaceAll(string(node.LinkData.Destination), "-", "\\-") + out(w, linkTag+escapedLink+linkCloseTag) + walkAction = blackfriday.SkipChildren + case blackfriday.Image: + // ignore images + walkAction = blackfriday.SkipChildren + case blackfriday.Code: + out(w, codespanTag) + escapeSpecialChars(w, node.Literal) + out(w, codespanCloseTag) + case blackfriday.Document: + break + case blackfriday.Paragraph: + if entering { + if r.listDepth > 0 { + // roff .PP markers break lists + if node.Prev != nil { // continued paragraph + if node.Prev.Type == blackfriday.List && node.Prev.ListFlags&blackfriday.ListTypeDefinition == 0 { + out(w, ".IP\n") + } else { + out(w, crTag) + } + } + } else if node.Prev != nil && node.Prev.Type == blackfriday.Heading { + out(w, crTag) + } else { + out(w, paraTag) + } + } else { + if node.Next == nil || node.Next.Type != blackfriday.List { + out(w, crTag) + } + } + case blackfriday.BlockQuote: + if entering { + out(w, quoteTag) + } else { + out(w, quoteCloseTag) + } + case blackfriday.Heading: + r.handleHeading(w, node, entering) + case blackfriday.HorizontalRule: + out(w, hruleTag) + case blackfriday.List: + r.handleList(w, node, entering) + case blackfriday.Item: + r.handleItem(w, node, entering) + case blackfriday.CodeBlock: + out(w, codeTag) + escapeSpecialChars(w, node.Literal) + out(w, codeCloseTag) + case blackfriday.Table: + r.handleTable(w, node, entering) + case blackfriday.TableHead: + case blackfriday.TableBody: + case blackfriday.TableRow: + // no action as cell entries do all the nroff formatting + return blackfriday.GoToNext + case blackfriday.TableCell: + r.handleTableCell(w, node, entering) + case blackfriday.HTMLSpan: + // ignore other HTML tags + case blackfriday.HTMLBlock: + if bytes.HasPrefix(node.Literal, []byte("|" + processingInstruction = "[<][?].*?[?][>]" + singleQuotedValue = "'[^']*'" + tagName = "[A-Za-z][A-Za-z0-9-]*" + unquotedValue = "[^\"'=<>`\\x00-\\x20]+" +) + +// HTMLRendererParameters is a collection of supplementary parameters tweaking +// the behavior of various parts of HTML renderer. +type HTMLRendererParameters struct { + // Prepend this text to each relative URL. + AbsolutePrefix string + // Add this text to each footnote anchor, to ensure uniqueness. + FootnoteAnchorPrefix string + // Show this text inside the tag for a footnote return link, if the + // HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string + // [return] is used. + FootnoteReturnLinkContents string + // If set, add this text to the front of each Heading ID, to ensure + // uniqueness. + HeadingIDPrefix string + // If set, add this text to the back of each Heading ID, to ensure uniqueness. + HeadingIDSuffix string + // Increase heading levels: if the offset is 1,

      becomes

      etc. + // Negative offset is also valid. + // Resulting levels are clipped between 1 and 6. + HeadingLevelOffset int + + Title string // Document title (used if CompletePage is set) + CSS string // Optional CSS file URL (used if CompletePage is set) + Icon string // Optional icon file URL (used if CompletePage is set) + + Flags HTMLFlags // Flags allow customizing this renderer's behavior +} + +// HTMLRenderer is a type that implements the Renderer interface for HTML output. +// +// Do not create this directly, instead use the NewHTMLRenderer function. +type HTMLRenderer struct { + HTMLRendererParameters + + closeTag string // how to end singleton tags: either " />" or ">" + + // Track heading IDs to prevent ID collision in a single generation. + headingIDs map[string]int + + lastOutputLen int + disableTags int + + sr *SPRenderer +} + +const ( + xhtmlClose = " />" + htmlClose = ">" +) + +// NewHTMLRenderer creates and configures an HTMLRenderer object, which +// satisfies the Renderer interface. +func NewHTMLRenderer(params HTMLRendererParameters) *HTMLRenderer { + // configure the rendering engine + closeTag := htmlClose + if params.Flags&UseXHTML != 0 { + closeTag = xhtmlClose + } + + if params.FootnoteReturnLinkContents == "" { + // U+FE0E is VARIATION SELECTOR-15. + // It suppresses automatic emoji presentation of the preceding + // U+21A9 LEFTWARDS ARROW WITH HOOK on iOS and iPadOS. + params.FootnoteReturnLinkContents = "↩\ufe0e" + } + + return &HTMLRenderer{ + HTMLRendererParameters: params, + + closeTag: closeTag, + headingIDs: make(map[string]int), + + sr: NewSmartypantsRenderer(params.Flags), + } +} + +func isHTMLTag(tag []byte, tagname string) bool { + found, _ := findHTMLTagPos(tag, tagname) + return found +} + +// Look for a character, but ignore it when it's in any kind of quotes, it +// might be JavaScript +func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int { + inSingleQuote := false + inDoubleQuote := false + inGraveQuote := false + i := start + for i < len(html) { + switch { + case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote: + return i + case html[i] == '\'': + inSingleQuote = !inSingleQuote + case html[i] == '"': + inDoubleQuote = !inDoubleQuote + case html[i] == '`': + inGraveQuote = !inGraveQuote + } + i++ + } + return start +} + +func findHTMLTagPos(tag []byte, tagname string) (bool, int) { + i := 0 + if i < len(tag) && tag[0] != '<' { + return false, -1 + } + i++ + i = skipSpace(tag, i) + + if i < len(tag) && tag[i] == '/' { + i++ + } + + i = skipSpace(tag, i) + j := 0 + for ; i < len(tag); i, j = i+1, j+1 { + if j >= len(tagname) { + break + } + + if strings.ToLower(string(tag[i]))[0] != tagname[j] { + return false, -1 + } + } + + if i == len(tag) { + return false, -1 + } + + rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>') + if rightAngle >= i { + return true, rightAngle + } + + return false, -1 +} + +func skipSpace(tag []byte, i int) int { + for i < len(tag) && isspace(tag[i]) { + i++ + } + return i +} + +func isRelativeLink(link []byte) (yes bool) { + // a tag begin with '#' + if link[0] == '#' { + return true + } + + // link begin with '/' but not '//', the second maybe a protocol relative link + if len(link) >= 2 && link[0] == '/' && link[1] != '/' { + return true + } + + // only the root '/' + if len(link) == 1 && link[0] == '/' { + return true + } + + // current directory : begin with "./" + if bytes.HasPrefix(link, []byte("./")) { + return true + } + + // parent directory : begin with "../" + if bytes.HasPrefix(link, []byte("../")) { + return true + } + + return false +} + +func (r *HTMLRenderer) ensureUniqueHeadingID(id string) string { + for count, found := r.headingIDs[id]; found; count, found = r.headingIDs[id] { + tmp := fmt.Sprintf("%s-%d", id, count+1) + + if _, tmpFound := r.headingIDs[tmp]; !tmpFound { + r.headingIDs[id] = count + 1 + id = tmp + } else { + id = id + "-1" + } + } + + if _, found := r.headingIDs[id]; !found { + r.headingIDs[id] = 0 + } + + return id +} + +func (r *HTMLRenderer) addAbsPrefix(link []byte) []byte { + if r.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' { + newDest := r.AbsolutePrefix + if link[0] != '/' { + newDest += "/" + } + newDest += string(link) + return []byte(newDest) + } + return link +} + +func appendLinkAttrs(attrs []string, flags HTMLFlags, link []byte) []string { + if isRelativeLink(link) { + return attrs + } + val := []string{} + if flags&NofollowLinks != 0 { + val = append(val, "nofollow") + } + if flags&NoreferrerLinks != 0 { + val = append(val, "noreferrer") + } + if flags&NoopenerLinks != 0 { + val = append(val, "noopener") + } + if flags&HrefTargetBlank != 0 { + attrs = append(attrs, "target=\"_blank\"") + } + if len(val) == 0 { + return attrs + } + attr := fmt.Sprintf("rel=%q", strings.Join(val, " ")) + return append(attrs, attr) +} + +func isMailto(link []byte) bool { + return bytes.HasPrefix(link, []byte("mailto:")) +} + +func needSkipLink(flags HTMLFlags, dest []byte) bool { + if flags&SkipLinks != 0 { + return true + } + return flags&Safelink != 0 && !isSafeLink(dest) && !isMailto(dest) +} + +func isSmartypantable(node *Node) bool { + pt := node.Parent.Type + return pt != Link && pt != CodeBlock && pt != Code +} + +func appendLanguageAttr(attrs []string, info []byte) []string { + if len(info) == 0 { + return attrs + } + endOfLang := bytes.IndexAny(info, "\t ") + if endOfLang < 0 { + endOfLang = len(info) + } + return append(attrs, fmt.Sprintf("class=\"language-%s\"", info[:endOfLang])) +} + +func (r *HTMLRenderer) tag(w io.Writer, name []byte, attrs []string) { + w.Write(name) + if len(attrs) > 0 { + w.Write(spaceBytes) + w.Write([]byte(strings.Join(attrs, " "))) + } + w.Write(gtBytes) + r.lastOutputLen = 1 +} + +func footnoteRef(prefix string, node *Node) []byte { + urlFrag := prefix + string(slugify(node.Destination)) + anchor := fmt.Sprintf(`%d`, urlFrag, node.NoteID) + return []byte(fmt.Sprintf(`%s`, urlFrag, anchor)) +} + +func footnoteItem(prefix string, slug []byte) []byte { + return []byte(fmt.Sprintf(`
    • `, prefix, slug)) +} + +func footnoteReturnLink(prefix, returnLink string, slug []byte) []byte { + const format = ` %s` + return []byte(fmt.Sprintf(format, prefix, slug, returnLink)) +} + +func itemOpenCR(node *Node) bool { + if node.Prev == nil { + return false + } + ld := node.Parent.ListData + return !ld.Tight && ld.ListFlags&ListTypeDefinition == 0 +} + +func skipParagraphTags(node *Node) bool { + grandparent := node.Parent.Parent + if grandparent == nil || grandparent.Type != List { + return false + } + tightOrTerm := grandparent.Tight || node.Parent.ListFlags&ListTypeTerm != 0 + return grandparent.Type == List && tightOrTerm +} + +func cellAlignment(align CellAlignFlags) string { + switch align { + case TableAlignmentLeft: + return "left" + case TableAlignmentRight: + return "right" + case TableAlignmentCenter: + return "center" + default: + return "" + } +} + +func (r *HTMLRenderer) out(w io.Writer, text []byte) { + if r.disableTags > 0 { + w.Write(htmlTagRe.ReplaceAll(text, []byte{})) + } else { + w.Write(text) + } + r.lastOutputLen = len(text) +} + +func (r *HTMLRenderer) cr(w io.Writer) { + if r.lastOutputLen > 0 { + r.out(w, nlBytes) + } +} + +var ( + nlBytes = []byte{'\n'} + gtBytes = []byte{'>'} + spaceBytes = []byte{' '} +) + +var ( + brTag = []byte("
      ") + brXHTMLTag = []byte("
      ") + emTag = []byte("") + emCloseTag = []byte("") + strongTag = []byte("") + strongCloseTag = []byte("") + delTag = []byte("") + delCloseTag = []byte("") + ttTag = []byte("") + ttCloseTag = []byte("") + aTag = []byte("") + preTag = []byte("
      ")
      +	preCloseTag        = []byte("
      ") + codeTag = []byte("") + codeCloseTag = []byte("") + pTag = []byte("

      ") + pCloseTag = []byte("

      ") + blockquoteTag = []byte("
      ") + blockquoteCloseTag = []byte("
      ") + hrTag = []byte("
      ") + hrXHTMLTag = []byte("
      ") + ulTag = []byte("
        ") + ulCloseTag = []byte("
      ") + olTag = []byte("
        ") + olCloseTag = []byte("
      ") + dlTag = []byte("
      ") + dlCloseTag = []byte("
      ") + liTag = []byte("
    • ") + liCloseTag = []byte("
    • ") + ddTag = []byte("
      ") + ddCloseTag = []byte("
      ") + dtTag = []byte("
      ") + dtCloseTag = []byte("
      ") + tableTag = []byte("") + tableCloseTag = []byte("
      ") + tdTag = []byte("") + thTag = []byte("") + theadTag = []byte("") + theadCloseTag = []byte("") + tbodyTag = []byte("") + tbodyCloseTag = []byte("") + trTag = []byte("") + trCloseTag = []byte("") + h1Tag = []byte("") + h2Tag = []byte("") + h3Tag = []byte("") + h4Tag = []byte("") + h5Tag = []byte("") + h6Tag = []byte("") + + footnotesDivBytes = []byte("\n
      \n\n") + footnotesCloseDivBytes = []byte("\n
      \n") +) + +func headingTagsFromLevel(level int) ([]byte, []byte) { + if level <= 1 { + return h1Tag, h1CloseTag + } + switch level { + case 2: + return h2Tag, h2CloseTag + case 3: + return h3Tag, h3CloseTag + case 4: + return h4Tag, h4CloseTag + case 5: + return h5Tag, h5CloseTag + } + return h6Tag, h6CloseTag +} + +func (r *HTMLRenderer) outHRTag(w io.Writer) { + if r.Flags&UseXHTML == 0 { + r.out(w, hrTag) + } else { + r.out(w, hrXHTMLTag) + } +} + +// RenderNode is a default renderer of a single node of a syntax tree. For +// block nodes it will be called twice: first time with entering=true, second +// time with entering=false, so that it could know when it's working on an open +// tag and when on close. It writes the result to w. +// +// The return value is a way to tell the calling walker to adjust its walk +// pattern: e.g. it can terminate the traversal by returning Terminate. Or it +// can ask the walker to skip a subtree of this node by returning SkipChildren. +// The typical behavior is to return GoToNext, which asks for the usual +// traversal to the next node. +func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkStatus { + attrs := []string{} + switch node.Type { + case Text: + if r.Flags&Smartypants != 0 { + var tmp bytes.Buffer + escapeHTML(&tmp, node.Literal) + r.sr.Process(w, tmp.Bytes()) + } else { + if node.Parent.Type == Link { + escLink(w, node.Literal) + } else { + escapeHTML(w, node.Literal) + } + } + case Softbreak: + r.cr(w) + // TODO: make it configurable via out(renderer.softbreak) + case Hardbreak: + if r.Flags&UseXHTML == 0 { + r.out(w, brTag) + } else { + r.out(w, brXHTMLTag) + } + r.cr(w) + case Emph: + if entering { + r.out(w, emTag) + } else { + r.out(w, emCloseTag) + } + case Strong: + if entering { + r.out(w, strongTag) + } else { + r.out(w, strongCloseTag) + } + case Del: + if entering { + r.out(w, delTag) + } else { + r.out(w, delCloseTag) + } + case HTMLSpan: + if r.Flags&SkipHTML != 0 { + break + } + r.out(w, node.Literal) + case Link: + // mark it but don't link it if it is not a safe link: no smartypants + dest := node.LinkData.Destination + if needSkipLink(r.Flags, dest) { + if entering { + r.out(w, ttTag) + } else { + r.out(w, ttCloseTag) + } + } else { + if entering { + dest = r.addAbsPrefix(dest) + var hrefBuf bytes.Buffer + hrefBuf.WriteString("href=\"") + escLink(&hrefBuf, dest) + hrefBuf.WriteByte('"') + attrs = append(attrs, hrefBuf.String()) + if node.NoteID != 0 { + r.out(w, footnoteRef(r.FootnoteAnchorPrefix, node)) + break + } + attrs = appendLinkAttrs(attrs, r.Flags, dest) + if len(node.LinkData.Title) > 0 { + var titleBuff bytes.Buffer + titleBuff.WriteString("title=\"") + escapeHTML(&titleBuff, node.LinkData.Title) + titleBuff.WriteByte('"') + attrs = append(attrs, titleBuff.String()) + } + r.tag(w, aTag, attrs) + } else { + if node.NoteID != 0 { + break + } + r.out(w, aCloseTag) + } + } + case Image: + if r.Flags&SkipImages != 0 { + return SkipChildren + } + if entering { + dest := node.LinkData.Destination + dest = r.addAbsPrefix(dest) + if r.disableTags == 0 { + //if options.safe && potentiallyUnsafe(dest) { + //out(w, ``)
+				//} else {
+				r.out(w, []byte(`<img src=`)) + } + } + case Code: + r.out(w, codeTag) + escapeAllHTML(w, node.Literal) + r.out(w, codeCloseTag) + case Document: + break + case Paragraph: + if skipParagraphTags(node) { + break + } + if entering { + // TODO: untangle this clusterfuck about when the newlines need + // to be added and when not. + if node.Prev != nil { + switch node.Prev.Type { + case HTMLBlock, List, Paragraph, Heading, CodeBlock, BlockQuote, HorizontalRule: + r.cr(w) + } + } + if node.Parent.Type == BlockQuote && node.Prev == nil { + r.cr(w) + } + r.out(w, pTag) + } else { + r.out(w, pCloseTag) + if !(node.Parent.Type == Item && node.Next == nil) { + r.cr(w) + } + } + case BlockQuote: + if entering { + r.cr(w) + r.out(w, blockquoteTag) + } else { + r.out(w, blockquoteCloseTag) + r.cr(w) + } + case HTMLBlock: + if r.Flags&SkipHTML != 0 { + break + } + r.cr(w) + r.out(w, node.Literal) + r.cr(w) + case Heading: + headingLevel := r.HTMLRendererParameters.HeadingLevelOffset + node.Level + openTag, closeTag := headingTagsFromLevel(headingLevel) + if entering { + if node.IsTitleblock { + attrs = append(attrs, `class="title"`) + } + if node.HeadingID != "" { + id := r.ensureUniqueHeadingID(node.HeadingID) + if r.HeadingIDPrefix != "" { + id = r.HeadingIDPrefix + id + } + if r.HeadingIDSuffix != "" { + id = id + r.HeadingIDSuffix + } + attrs = append(attrs, fmt.Sprintf(`id="%s"`, id)) + } + r.cr(w) + r.tag(w, openTag, attrs) + } else { + r.out(w, closeTag) + if !(node.Parent.Type == Item && node.Next == nil) { + r.cr(w) + } + } + case HorizontalRule: + r.cr(w) + r.outHRTag(w) + r.cr(w) + case List: + openTag := ulTag + closeTag := ulCloseTag + if node.ListFlags&ListTypeOrdered != 0 { + openTag = olTag + closeTag = olCloseTag + } + if node.ListFlags&ListTypeDefinition != 0 { + openTag = dlTag + closeTag = dlCloseTag + } + if entering { + if node.IsFootnotesList { + r.out(w, footnotesDivBytes) + r.outHRTag(w) + r.cr(w) + } + r.cr(w) + if node.Parent.Type == Item && node.Parent.Parent.Tight { + r.cr(w) + } + r.tag(w, openTag[:len(openTag)-1], attrs) + r.cr(w) + } else { + r.out(w, closeTag) + //cr(w) + //if node.parent.Type != Item { + // cr(w) + //} + if node.Parent.Type == Item && node.Next != nil { + r.cr(w) + } + if node.Parent.Type == Document || node.Parent.Type == BlockQuote { + r.cr(w) + } + if node.IsFootnotesList { + r.out(w, footnotesCloseDivBytes) + } + } + case Item: + openTag := liTag + closeTag := liCloseTag + if node.ListFlags&ListTypeDefinition != 0 { + openTag = ddTag + closeTag = ddCloseTag + } + if node.ListFlags&ListTypeTerm != 0 { + openTag = dtTag + closeTag = dtCloseTag + } + if entering { + if itemOpenCR(node) { + r.cr(w) + } + if node.ListData.RefLink != nil { + slug := slugify(node.ListData.RefLink) + r.out(w, footnoteItem(r.FootnoteAnchorPrefix, slug)) + break + } + r.out(w, openTag) + } else { + if node.ListData.RefLink != nil { + slug := slugify(node.ListData.RefLink) + if r.Flags&FootnoteReturnLinks != 0 { + r.out(w, footnoteReturnLink(r.FootnoteAnchorPrefix, r.FootnoteReturnLinkContents, slug)) + } + } + r.out(w, closeTag) + r.cr(w) + } + case CodeBlock: + attrs = appendLanguageAttr(attrs, node.Info) + r.cr(w) + r.out(w, preTag) + r.tag(w, codeTag[:len(codeTag)-1], attrs) + escapeAllHTML(w, node.Literal) + r.out(w, codeCloseTag) + r.out(w, preCloseTag) + if node.Parent.Type != Item { + r.cr(w) + } + case Table: + if entering { + r.cr(w) + r.out(w, tableTag) + } else { + r.out(w, tableCloseTag) + r.cr(w) + } + case TableCell: + openTag := tdTag + closeTag := tdCloseTag + if node.IsHeader { + openTag = thTag + closeTag = thCloseTag + } + if entering { + align := cellAlignment(node.Align) + if align != "" { + attrs = append(attrs, fmt.Sprintf(`align="%s"`, align)) + } + if node.Prev == nil { + r.cr(w) + } + r.tag(w, openTag, attrs) + } else { + r.out(w, closeTag) + r.cr(w) + } + case TableHead: + if entering { + r.cr(w) + r.out(w, theadTag) + } else { + r.out(w, theadCloseTag) + r.cr(w) + } + case TableBody: + if entering { + r.cr(w) + r.out(w, tbodyTag) + // XXX: this is to adhere to a rather silly test. Should fix test. + if node.FirstChild == nil { + r.cr(w) + } + } else { + r.out(w, tbodyCloseTag) + r.cr(w) + } + case TableRow: + if entering { + r.cr(w) + r.out(w, trTag) + } else { + r.out(w, trCloseTag) + r.cr(w) + } + default: + panic("Unknown node type " + node.Type.String()) + } + return GoToNext +} + +// RenderHeader writes HTML document preamble and TOC if requested. +func (r *HTMLRenderer) RenderHeader(w io.Writer, ast *Node) { + r.writeDocumentHeader(w) + if r.Flags&TOC != 0 { + r.writeTOC(w, ast) + } +} + +// RenderFooter writes HTML document footer. +func (r *HTMLRenderer) RenderFooter(w io.Writer, ast *Node) { + if r.Flags&CompletePage == 0 { + return + } + io.WriteString(w, "\n\n\n") +} + +func (r *HTMLRenderer) writeDocumentHeader(w io.Writer) { + if r.Flags&CompletePage == 0 { + return + } + ending := "" + if r.Flags&UseXHTML != 0 { + io.WriteString(w, "\n") + io.WriteString(w, "\n") + ending = " /" + } else { + io.WriteString(w, "\n") + io.WriteString(w, "\n") + } + io.WriteString(w, "\n") + io.WriteString(w, " ") + if r.Flags&Smartypants != 0 { + r.sr.Process(w, []byte(r.Title)) + } else { + escapeHTML(w, []byte(r.Title)) + } + io.WriteString(w, "\n") + io.WriteString(w, " \n") + io.WriteString(w, " \n") + if r.CSS != "" { + io.WriteString(w, " \n") + } + if r.Icon != "" { + io.WriteString(w, " \n") + } + io.WriteString(w, "\n") + io.WriteString(w, "\n\n") +} + +func (r *HTMLRenderer) writeTOC(w io.Writer, ast *Node) { + buf := bytes.Buffer{} + + inHeading := false + tocLevel := 0 + headingCount := 0 + + ast.Walk(func(node *Node, entering bool) WalkStatus { + if node.Type == Heading && !node.HeadingData.IsTitleblock { + inHeading = entering + if entering { + node.HeadingID = fmt.Sprintf("toc_%d", headingCount) + if node.Level == tocLevel { + buf.WriteString("

    • \n\n
    • ") + } else if node.Level < tocLevel { + for node.Level < tocLevel { + tocLevel-- + buf.WriteString("
    • \n
    ") + } + buf.WriteString("\n\n
  • ") + } else { + for node.Level > tocLevel { + tocLevel++ + buf.WriteString("\n") + } + + if buf.Len() > 0 { + io.WriteString(w, "\n") + } + r.lastOutputLen = buf.Len() +} diff --git a/vendor/github.com/russross/blackfriday/v2/inline.go b/vendor/github.com/russross/blackfriday/v2/inline.go new file mode 100644 index 000000000..d45bd9417 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/inline.go @@ -0,0 +1,1228 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// Functions to parse inline elements. +// + +package blackfriday + +import ( + "bytes" + "regexp" + "strconv" +) + +var ( + urlRe = `((https?|ftp):\/\/|\/)[-A-Za-z0-9+&@#\/%?=~_|!:,.;\(\)]+` + anchorRe = regexp.MustCompile(`^(]+")?\s?>` + urlRe + `<\/a>)`) + + // https://www.w3.org/TR/html5/syntax.html#character-references + // highest unicode code point in 17 planes (2^20): 1,114,112d = + // 7 dec digits or 6 hex digits + // named entity references can be 2-31 characters with stuff like < + // at one end and ∳ at the other. There + // are also sometimes numbers at the end, although this isn't inherent + // in the specification; there are never numbers anywhere else in + // current character references, though; see ¾ and ▒, etc. + // https://www.w3.org/TR/html5/syntax.html#named-character-references + // + // entity := "&" (named group | number ref) ";" + // named group := [a-zA-Z]{2,31}[0-9]{0,2} + // number ref := "#" (dec ref | hex ref) + // dec ref := [0-9]{1,7} + // hex ref := ("x" | "X") [0-9a-fA-F]{1,6} + htmlEntityRe = regexp.MustCompile(`&([a-zA-Z]{2,31}[0-9]{0,2}|#([0-9]{1,7}|[xX][0-9a-fA-F]{1,6}));`) +) + +// Functions to parse text within a block +// Each function returns the number of chars taken care of +// data is the complete block being rendered +// offset is the number of valid chars before the current cursor + +func (p *Markdown) inline(currBlock *Node, data []byte) { + // handlers might call us recursively: enforce a maximum depth + if p.nesting >= p.maxNesting || len(data) == 0 { + return + } + p.nesting++ + beg, end := 0, 0 + for end < len(data) { + handler := p.inlineCallback[data[end]] + if handler != nil { + if consumed, node := handler(p, data, end); consumed == 0 { + // No action from the callback. + end++ + } else { + // Copy inactive chars into the output. + currBlock.AppendChild(text(data[beg:end])) + if node != nil { + currBlock.AppendChild(node) + } + // Skip past whatever the callback used. + beg = end + consumed + end = beg + } + } else { + end++ + } + } + if beg < len(data) { + if data[end-1] == '\n' { + end-- + } + currBlock.AppendChild(text(data[beg:end])) + } + p.nesting-- +} + +// single and double emphasis parsing +func emphasis(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + c := data[0] + + if len(data) > 2 && data[1] != c { + // whitespace cannot follow an opening emphasis; + // strikethrough only takes two characters '~~' + if c == '~' || isspace(data[1]) { + return 0, nil + } + ret, node := helperEmphasis(p, data[1:], c) + if ret == 0 { + return 0, nil + } + + return ret + 1, node + } + + if len(data) > 3 && data[1] == c && data[2] != c { + if isspace(data[2]) { + return 0, nil + } + ret, node := helperDoubleEmphasis(p, data[2:], c) + if ret == 0 { + return 0, nil + } + + return ret + 2, node + } + + if len(data) > 4 && data[1] == c && data[2] == c && data[3] != c { + if c == '~' || isspace(data[3]) { + return 0, nil + } + ret, node := helperTripleEmphasis(p, data, 3, c) + if ret == 0 { + return 0, nil + } + + return ret + 3, node + } + + return 0, nil +} + +func codeSpan(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + + nb := 0 + + // count the number of backticks in the delimiter + for nb < len(data) && data[nb] == '`' { + nb++ + } + + // find the next delimiter + i, end := 0, 0 + for end = nb; end < len(data) && i < nb; end++ { + if data[end] == '`' { + i++ + } else { + i = 0 + } + } + + // no matching delimiter? + if i < nb && end >= len(data) { + return 0, nil + } + + // trim outside whitespace + fBegin := nb + for fBegin < end && data[fBegin] == ' ' { + fBegin++ + } + + fEnd := end - nb + for fEnd > fBegin && data[fEnd-1] == ' ' { + fEnd-- + } + + // render the code span + if fBegin != fEnd { + code := NewNode(Code) + code.Literal = data[fBegin:fEnd] + return end, code + } + + return end, nil +} + +// newline preceded by two spaces becomes
    +func maybeLineBreak(p *Markdown, data []byte, offset int) (int, *Node) { + origOffset := offset + for offset < len(data) && data[offset] == ' ' { + offset++ + } + + if offset < len(data) && data[offset] == '\n' { + if offset-origOffset >= 2 { + return offset - origOffset + 1, NewNode(Hardbreak) + } + return offset - origOffset, nil + } + return 0, nil +} + +// newline without two spaces works when HardLineBreak is enabled +func lineBreak(p *Markdown, data []byte, offset int) (int, *Node) { + if p.extensions&HardLineBreak != 0 { + return 1, NewNode(Hardbreak) + } + return 0, nil +} + +type linkType int + +const ( + linkNormal linkType = iota + linkImg + linkDeferredFootnote + linkInlineFootnote +) + +func isReferenceStyleLink(data []byte, pos int, t linkType) bool { + if t == linkDeferredFootnote { + return false + } + return pos < len(data)-1 && data[pos] == '[' && data[pos+1] != '^' +} + +func maybeImage(p *Markdown, data []byte, offset int) (int, *Node) { + if offset < len(data)-1 && data[offset+1] == '[' { + return link(p, data, offset) + } + return 0, nil +} + +func maybeInlineFootnote(p *Markdown, data []byte, offset int) (int, *Node) { + if offset < len(data)-1 && data[offset+1] == '[' { + return link(p, data, offset) + } + return 0, nil +} + +// '[': parse a link or an image or a footnote +func link(p *Markdown, data []byte, offset int) (int, *Node) { + // no links allowed inside regular links, footnote, and deferred footnotes + if p.insideLink && (offset > 0 && data[offset-1] == '[' || len(data)-1 > offset && data[offset+1] == '^') { + return 0, nil + } + + var t linkType + switch { + // special case: ![^text] == deferred footnote (that follows something with + // an exclamation point) + case p.extensions&Footnotes != 0 && len(data)-1 > offset && data[offset+1] == '^': + t = linkDeferredFootnote + // ![alt] == image + case offset >= 0 && data[offset] == '!': + t = linkImg + offset++ + // ^[text] == inline footnote + // [^refId] == deferred footnote + case p.extensions&Footnotes != 0: + if offset >= 0 && data[offset] == '^' { + t = linkInlineFootnote + offset++ + } else if len(data)-1 > offset && data[offset+1] == '^' { + t = linkDeferredFootnote + } + // [text] == regular link + default: + t = linkNormal + } + + data = data[offset:] + + var ( + i = 1 + noteID int + title, link, altContent []byte + textHasNl = false + ) + + if t == linkDeferredFootnote { + i++ + } + + // look for the matching closing bracket + for level := 1; level > 0 && i < len(data); i++ { + switch { + case data[i] == '\n': + textHasNl = true + + case isBackslashEscaped(data, i): + continue + + case data[i] == '[': + level++ + + case data[i] == ']': + level-- + if level <= 0 { + i-- // compensate for extra i++ in for loop + } + } + } + + if i >= len(data) { + return 0, nil + } + + txtE := i + i++ + var footnoteNode *Node + + // skip any amount of whitespace or newline + // (this is much more lax than original markdown syntax) + for i < len(data) && isspace(data[i]) { + i++ + } + + // inline style link + switch { + case i < len(data) && data[i] == '(': + // skip initial whitespace + i++ + + for i < len(data) && isspace(data[i]) { + i++ + } + + linkB := i + + // look for link end: ' " ) + findlinkend: + for i < len(data) { + switch { + case data[i] == '\\': + i += 2 + + case data[i] == ')' || data[i] == '\'' || data[i] == '"': + break findlinkend + + default: + i++ + } + } + + if i >= len(data) { + return 0, nil + } + linkE := i + + // look for title end if present + titleB, titleE := 0, 0 + if data[i] == '\'' || data[i] == '"' { + i++ + titleB = i + + findtitleend: + for i < len(data) { + switch { + case data[i] == '\\': + i += 2 + + case data[i] == ')': + break findtitleend + + default: + i++ + } + } + + if i >= len(data) { + return 0, nil + } + + // skip whitespace after title + titleE = i - 1 + for titleE > titleB && isspace(data[titleE]) { + titleE-- + } + + // check for closing quote presence + if data[titleE] != '\'' && data[titleE] != '"' { + titleB, titleE = 0, 0 + linkE = i + } + } + + // remove whitespace at the end of the link + for linkE > linkB && isspace(data[linkE-1]) { + linkE-- + } + + // remove optional angle brackets around the link + if data[linkB] == '<' { + linkB++ + } + if data[linkE-1] == '>' { + linkE-- + } + + // build escaped link and title + if linkE > linkB { + link = data[linkB:linkE] + } + + if titleE > titleB { + title = data[titleB:titleE] + } + + i++ + + // reference style link + case isReferenceStyleLink(data, i, t): + var id []byte + altContentConsidered := false + + // look for the id + i++ + linkB := i + for i < len(data) && data[i] != ']' { + i++ + } + if i >= len(data) { + return 0, nil + } + linkE := i + + // find the reference + if linkB == linkE { + if textHasNl { + var b bytes.Buffer + + for j := 1; j < txtE; j++ { + switch { + case data[j] != '\n': + b.WriteByte(data[j]) + case data[j-1] != ' ': + b.WriteByte(' ') + } + } + + id = b.Bytes() + } else { + id = data[1:txtE] + altContentConsidered = true + } + } else { + id = data[linkB:linkE] + } + + // find the reference with matching id + lr, ok := p.getRef(string(id)) + if !ok { + return 0, nil + } + + // keep link and title from reference + link = lr.link + title = lr.title + if altContentConsidered { + altContent = lr.text + } + i++ + + // shortcut reference style link or reference or inline footnote + default: + var id []byte + + // craft the id + if textHasNl { + var b bytes.Buffer + + for j := 1; j < txtE; j++ { + switch { + case data[j] != '\n': + b.WriteByte(data[j]) + case data[j-1] != ' ': + b.WriteByte(' ') + } + } + + id = b.Bytes() + } else { + if t == linkDeferredFootnote { + id = data[2:txtE] // get rid of the ^ + } else { + id = data[1:txtE] + } + } + + footnoteNode = NewNode(Item) + if t == linkInlineFootnote { + // create a new reference + noteID = len(p.notes) + 1 + + var fragment []byte + if len(id) > 0 { + if len(id) < 16 { + fragment = make([]byte, len(id)) + } else { + fragment = make([]byte, 16) + } + copy(fragment, slugify(id)) + } else { + fragment = append([]byte("footnote-"), []byte(strconv.Itoa(noteID))...) + } + + ref := &reference{ + noteID: noteID, + hasBlock: false, + link: fragment, + title: id, + footnote: footnoteNode, + } + + p.notes = append(p.notes, ref) + + link = ref.link + title = ref.title + } else { + // find the reference with matching id + lr, ok := p.getRef(string(id)) + if !ok { + return 0, nil + } + + if t == linkDeferredFootnote { + lr.noteID = len(p.notes) + 1 + lr.footnote = footnoteNode + p.notes = append(p.notes, lr) + } + + // keep link and title from reference + link = lr.link + // if inline footnote, title == footnote contents + title = lr.title + noteID = lr.noteID + } + + // rewind the whitespace + i = txtE + 1 + } + + var uLink []byte + if t == linkNormal || t == linkImg { + if len(link) > 0 { + var uLinkBuf bytes.Buffer + unescapeText(&uLinkBuf, link) + uLink = uLinkBuf.Bytes() + } + + // links need something to click on and somewhere to go + if len(uLink) == 0 || (t == linkNormal && txtE <= 1) { + return 0, nil + } + } + + // call the relevant rendering function + var linkNode *Node + switch t { + case linkNormal: + linkNode = NewNode(Link) + linkNode.Destination = normalizeURI(uLink) + linkNode.Title = title + if len(altContent) > 0 { + linkNode.AppendChild(text(altContent)) + } else { + // links cannot contain other links, so turn off link parsing + // temporarily and recurse + insideLink := p.insideLink + p.insideLink = true + p.inline(linkNode, data[1:txtE]) + p.insideLink = insideLink + } + + case linkImg: + linkNode = NewNode(Image) + linkNode.Destination = uLink + linkNode.Title = title + linkNode.AppendChild(text(data[1:txtE])) + i++ + + case linkInlineFootnote, linkDeferredFootnote: + linkNode = NewNode(Link) + linkNode.Destination = link + linkNode.Title = title + linkNode.NoteID = noteID + linkNode.Footnote = footnoteNode + if t == linkInlineFootnote { + i++ + } + + default: + return 0, nil + } + + return i, linkNode +} + +func (p *Markdown) inlineHTMLComment(data []byte) int { + if len(data) < 5 { + return 0 + } + if data[0] != '<' || data[1] != '!' || data[2] != '-' || data[3] != '-' { + return 0 + } + i := 5 + // scan for an end-of-comment marker, across lines if necessary + for i < len(data) && !(data[i-2] == '-' && data[i-1] == '-' && data[i] == '>') { + i++ + } + // no end-of-comment marker + if i >= len(data) { + return 0 + } + return i + 1 +} + +func stripMailto(link []byte) []byte { + if bytes.HasPrefix(link, []byte("mailto://")) { + return link[9:] + } else if bytes.HasPrefix(link, []byte("mailto:")) { + return link[7:] + } else { + return link + } +} + +// autolinkType specifies a kind of autolink that gets detected. +type autolinkType int + +// These are the possible flag values for the autolink renderer. +const ( + notAutolink autolinkType = iota + normalAutolink + emailAutolink +) + +// '<' when tags or autolinks are allowed +func leftAngle(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + altype, end := tagLength(data) + if size := p.inlineHTMLComment(data); size > 0 { + end = size + } + if end > 2 { + if altype != notAutolink { + var uLink bytes.Buffer + unescapeText(&uLink, data[1:end+1-2]) + if uLink.Len() > 0 { + link := uLink.Bytes() + node := NewNode(Link) + node.Destination = link + if altype == emailAutolink { + node.Destination = append([]byte("mailto:"), link...) + } + node.AppendChild(text(stripMailto(link))) + return end, node + } + } else { + htmlTag := NewNode(HTMLSpan) + htmlTag.Literal = data[:end] + return end, htmlTag + } + } + + return end, nil +} + +// '\\' backslash escape +var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~") + +func escape(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + + if len(data) > 1 { + if p.extensions&BackslashLineBreak != 0 && data[1] == '\n' { + return 2, NewNode(Hardbreak) + } + if bytes.IndexByte(escapeChars, data[1]) < 0 { + return 0, nil + } + + return 2, text(data[1:2]) + } + + return 2, nil +} + +func unescapeText(ob *bytes.Buffer, src []byte) { + i := 0 + for i < len(src) { + org := i + for i < len(src) && src[i] != '\\' { + i++ + } + + if i > org { + ob.Write(src[org:i]) + } + + if i+1 >= len(src) { + break + } + + ob.WriteByte(src[i+1]) + i += 2 + } +} + +// '&' escaped when it doesn't belong to an entity +// valid entities are assumed to be anything matching &#?[A-Za-z0-9]+; +func entity(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + + end := 1 + + if end < len(data) && data[end] == '#' { + end++ + } + + for end < len(data) && isalnum(data[end]) { + end++ + } + + if end < len(data) && data[end] == ';' { + end++ // real entity + } else { + return 0, nil // lone '&' + } + + ent := data[:end] + // undo & escaping or it will be converted to &amp; by another + // escaper in the renderer + if bytes.Equal(ent, []byte("&")) { + ent = []byte{'&'} + } + + return end, text(ent) +} + +func linkEndsWithEntity(data []byte, linkEnd int) bool { + entityRanges := htmlEntityRe.FindAllIndex(data[:linkEnd], -1) + return entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd +} + +// hasPrefixCaseInsensitive is a custom implementation of +// strings.HasPrefix(strings.ToLower(s), prefix) +// we rolled our own because ToLower pulls in a huge machinery of lowercasing +// anything from Unicode and that's very slow. Since this func will only be +// used on ASCII protocol prefixes, we can take shortcuts. +func hasPrefixCaseInsensitive(s, prefix []byte) bool { + if len(s) < len(prefix) { + return false + } + delta := byte('a' - 'A') + for i, b := range prefix { + if b != s[i] && b != s[i]+delta { + return false + } + } + return true +} + +var protocolPrefixes = [][]byte{ + []byte("http://"), + []byte("https://"), + []byte("ftp://"), + []byte("file://"), + []byte("mailto:"), +} + +const shortestPrefix = 6 // len("ftp://"), the shortest of the above + +func maybeAutoLink(p *Markdown, data []byte, offset int) (int, *Node) { + // quick check to rule out most false hits + if p.insideLink || len(data) < offset+shortestPrefix { + return 0, nil + } + for _, prefix := range protocolPrefixes { + endOfHead := offset + 8 // 8 is the len() of the longest prefix + if endOfHead > len(data) { + endOfHead = len(data) + } + if hasPrefixCaseInsensitive(data[offset:endOfHead], prefix) { + return autoLink(p, data, offset) + } + } + return 0, nil +} + +func autoLink(p *Markdown, data []byte, offset int) (int, *Node) { + // Now a more expensive check to see if we're not inside an anchor element + anchorStart := offset + offsetFromAnchor := 0 + for anchorStart > 0 && data[anchorStart] != '<' { + anchorStart-- + offsetFromAnchor++ + } + + anchorStr := anchorRe.Find(data[anchorStart:]) + if anchorStr != nil { + anchorClose := NewNode(HTMLSpan) + anchorClose.Literal = anchorStr[offsetFromAnchor:] + return len(anchorStr) - offsetFromAnchor, anchorClose + } + + // scan backward for a word boundary + rewind := 0 + for offset-rewind > 0 && rewind <= 7 && isletter(data[offset-rewind-1]) { + rewind++ + } + if rewind > 6 { // longest supported protocol is "mailto" which has 6 letters + return 0, nil + } + + origData := data + data = data[offset-rewind:] + + if !isSafeLink(data) { + return 0, nil + } + + linkEnd := 0 + for linkEnd < len(data) && !isEndOfLink(data[linkEnd]) { + linkEnd++ + } + + // Skip punctuation at the end of the link + if (data[linkEnd-1] == '.' || data[linkEnd-1] == ',') && data[linkEnd-2] != '\\' { + linkEnd-- + } + + // But don't skip semicolon if it's a part of escaped entity: + if data[linkEnd-1] == ';' && data[linkEnd-2] != '\\' && !linkEndsWithEntity(data, linkEnd) { + linkEnd-- + } + + // See if the link finishes with a punctuation sign that can be closed. + var copen byte + switch data[linkEnd-1] { + case '"': + copen = '"' + case '\'': + copen = '\'' + case ')': + copen = '(' + case ']': + copen = '[' + case '}': + copen = '{' + default: + copen = 0 + } + + if copen != 0 { + bufEnd := offset - rewind + linkEnd - 2 + + openDelim := 1 + + /* Try to close the final punctuation sign in this same line; + * if we managed to close it outside of the URL, that means that it's + * not part of the URL. If it closes inside the URL, that means it + * is part of the URL. + * + * Examples: + * + * foo http://www.pokemon.com/Pikachu_(Electric) bar + * => http://www.pokemon.com/Pikachu_(Electric) + * + * foo (http://www.pokemon.com/Pikachu_(Electric)) bar + * => http://www.pokemon.com/Pikachu_(Electric) + * + * foo http://www.pokemon.com/Pikachu_(Electric)) bar + * => http://www.pokemon.com/Pikachu_(Electric)) + * + * (foo http://www.pokemon.com/Pikachu_(Electric)) bar + * => foo http://www.pokemon.com/Pikachu_(Electric) + */ + + for bufEnd >= 0 && origData[bufEnd] != '\n' && openDelim != 0 { + if origData[bufEnd] == data[linkEnd-1] { + openDelim++ + } + + if origData[bufEnd] == copen { + openDelim-- + } + + bufEnd-- + } + + if openDelim == 0 { + linkEnd-- + } + } + + var uLink bytes.Buffer + unescapeText(&uLink, data[:linkEnd]) + + if uLink.Len() > 0 { + node := NewNode(Link) + node.Destination = uLink.Bytes() + node.AppendChild(text(uLink.Bytes())) + return linkEnd, node + } + + return linkEnd, nil +} + +func isEndOfLink(char byte) bool { + return isspace(char) || char == '<' +} + +var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")} +var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")} + +func isSafeLink(link []byte) bool { + for _, path := range validPaths { + if len(link) >= len(path) && bytes.Equal(link[:len(path)], path) { + if len(link) == len(path) { + return true + } else if isalnum(link[len(path)]) { + return true + } + } + } + + for _, prefix := range validUris { + // TODO: handle unicode here + // case-insensitive prefix test + if len(link) > len(prefix) && bytes.Equal(bytes.ToLower(link[:len(prefix)]), prefix) && isalnum(link[len(prefix)]) { + return true + } + } + + return false +} + +// return the length of the given tag, or 0 is it's not valid +func tagLength(data []byte) (autolink autolinkType, end int) { + var i, j int + + // a valid tag can't be shorter than 3 chars + if len(data) < 3 { + return notAutolink, 0 + } + + // begins with a '<' optionally followed by '/', followed by letter or number + if data[0] != '<' { + return notAutolink, 0 + } + if data[1] == '/' { + i = 2 + } else { + i = 1 + } + + if !isalnum(data[i]) { + return notAutolink, 0 + } + + // scheme test + autolink = notAutolink + + // try to find the beginning of an URI + for i < len(data) && (isalnum(data[i]) || data[i] == '.' || data[i] == '+' || data[i] == '-') { + i++ + } + + if i > 1 && i < len(data) && data[i] == '@' { + if j = isMailtoAutoLink(data[i:]); j != 0 { + return emailAutolink, i + j + } + } + + if i > 2 && i < len(data) && data[i] == ':' { + autolink = normalAutolink + i++ + } + + // complete autolink test: no whitespace or ' or " + switch { + case i >= len(data): + autolink = notAutolink + case autolink != notAutolink: + j = i + + for i < len(data) { + if data[i] == '\\' { + i += 2 + } else if data[i] == '>' || data[i] == '\'' || data[i] == '"' || isspace(data[i]) { + break + } else { + i++ + } + + } + + if i >= len(data) { + return autolink, 0 + } + if i > j && data[i] == '>' { + return autolink, i + 1 + } + + // one of the forbidden chars has been found + autolink = notAutolink + } + i += bytes.IndexByte(data[i:], '>') + if i < 0 { + return autolink, 0 + } + return autolink, i + 1 +} + +// look for the address part of a mail autolink and '>' +// this is less strict than the original markdown e-mail address matching +func isMailtoAutoLink(data []byte) int { + nb := 0 + + // address is assumed to be: [-@._a-zA-Z0-9]+ with exactly one '@' + for i := 0; i < len(data); i++ { + if isalnum(data[i]) { + continue + } + + switch data[i] { + case '@': + nb++ + + case '-', '.', '_': + break + + case '>': + if nb == 1 { + return i + 1 + } + return 0 + default: + return 0 + } + } + + return 0 +} + +// look for the next emph char, skipping other constructs +func helperFindEmphChar(data []byte, c byte) int { + i := 0 + + for i < len(data) { + for i < len(data) && data[i] != c && data[i] != '`' && data[i] != '[' { + i++ + } + if i >= len(data) { + return 0 + } + // do not count escaped chars + if i != 0 && data[i-1] == '\\' { + i++ + continue + } + if data[i] == c { + return i + } + + if data[i] == '`' { + // skip a code span + tmpI := 0 + i++ + for i < len(data) && data[i] != '`' { + if tmpI == 0 && data[i] == c { + tmpI = i + } + i++ + } + if i >= len(data) { + return tmpI + } + i++ + } else if data[i] == '[' { + // skip a link + tmpI := 0 + i++ + for i < len(data) && data[i] != ']' { + if tmpI == 0 && data[i] == c { + tmpI = i + } + i++ + } + i++ + for i < len(data) && (data[i] == ' ' || data[i] == '\n') { + i++ + } + if i >= len(data) { + return tmpI + } + if data[i] != '[' && data[i] != '(' { // not a link + if tmpI > 0 { + return tmpI + } + continue + } + cc := data[i] + i++ + for i < len(data) && data[i] != cc { + if tmpI == 0 && data[i] == c { + return i + } + i++ + } + if i >= len(data) { + return tmpI + } + i++ + } + } + return 0 +} + +func helperEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { + i := 0 + + // skip one symbol if coming from emph3 + if len(data) > 1 && data[0] == c && data[1] == c { + i = 1 + } + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + if i >= len(data) { + return 0, nil + } + + if i+1 < len(data) && data[i+1] == c { + i++ + continue + } + + if data[i] == c && !isspace(data[i-1]) { + + if p.extensions&NoIntraEmphasis != 0 { + if !(i+1 == len(data) || isspace(data[i+1]) || ispunct(data[i+1])) { + continue + } + } + + emph := NewNode(Emph) + p.inline(emph, data[:i]) + return i + 1, emph + } + } + + return 0, nil +} + +func helperDoubleEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { + i := 0 + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + + if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !isspace(data[i-1]) { + nodeType := Strong + if c == '~' { + nodeType = Del + } + node := NewNode(nodeType) + p.inline(node, data[:i]) + return i + 2, node + } + i++ + } + return 0, nil +} + +func helperTripleEmphasis(p *Markdown, data []byte, offset int, c byte) (int, *Node) { + i := 0 + origData := data + data = data[offset:] + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + + // skip whitespace preceded symbols + if data[i] != c || isspace(data[i-1]) { + continue + } + + switch { + case i+2 < len(data) && data[i+1] == c && data[i+2] == c: + // triple symbol found + strong := NewNode(Strong) + em := NewNode(Emph) + strong.AppendChild(em) + p.inline(em, data[:i]) + return i + 3, strong + case (i+1 < len(data) && data[i+1] == c): + // double symbol found, hand over to emph1 + length, node := helperEmphasis(p, origData[offset-2:], c) + if length == 0 { + return 0, nil + } + return length - 2, node + default: + // single symbol found, hand over to emph2 + length, node := helperDoubleEmphasis(p, origData[offset-1:], c) + if length == 0 { + return 0, nil + } + return length - 1, node + } + } + return 0, nil +} + +func text(s []byte) *Node { + node := NewNode(Text) + node.Literal = s + return node +} + +func normalizeURI(s []byte) []byte { + return s // TODO: implement +} diff --git a/vendor/github.com/russross/blackfriday/v2/markdown.go b/vendor/github.com/russross/blackfriday/v2/markdown.go new file mode 100644 index 000000000..58d2e4538 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/markdown.go @@ -0,0 +1,950 @@ +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. + +package blackfriday + +import ( + "bytes" + "fmt" + "io" + "strings" + "unicode/utf8" +) + +// +// Markdown parsing and processing +// + +// Version string of the package. Appears in the rendered document when +// CompletePage flag is on. +const Version = "2.0" + +// Extensions is a bitwise or'ed collection of enabled Blackfriday's +// extensions. +type Extensions int + +// These are the supported markdown parsing extensions. +// OR these values together to select multiple extensions. +const ( + NoExtensions Extensions = 0 + NoIntraEmphasis Extensions = 1 << iota // Ignore emphasis markers inside words + Tables // Render tables + FencedCode // Render fenced code blocks + Autolink // Detect embedded URLs that are not explicitly marked + Strikethrough // Strikethrough text using ~~test~~ + LaxHTMLBlocks // Loosen up HTML block parsing rules + SpaceHeadings // Be strict about prefix heading rules + HardLineBreak // Translate newlines into line breaks + TabSizeEight // Expand tabs to eight spaces instead of four + Footnotes // Pandoc-style footnotes + NoEmptyLineBeforeBlock // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block + HeadingIDs // specify heading IDs with {#id} + Titleblock // Titleblock ala pandoc + AutoHeadingIDs // Create the heading ID from the text + BackslashLineBreak // Translate trailing backslashes into line breaks + DefinitionLists // Render definition lists + + CommonHTMLFlags HTMLFlags = UseXHTML | Smartypants | + SmartypantsFractions | SmartypantsDashes | SmartypantsLatexDashes + + CommonExtensions Extensions = NoIntraEmphasis | Tables | FencedCode | + Autolink | Strikethrough | SpaceHeadings | HeadingIDs | + BackslashLineBreak | DefinitionLists +) + +// ListType contains bitwise or'ed flags for list and list item objects. +type ListType int + +// These are the possible flag values for the ListItem renderer. +// Multiple flag values may be ORed together. +// These are mostly of interest if you are writing a new output format. +const ( + ListTypeOrdered ListType = 1 << iota + ListTypeDefinition + ListTypeTerm + + ListItemContainsBlock + ListItemBeginningOfList // TODO: figure out if this is of any use now + ListItemEndOfList +) + +// CellAlignFlags holds a type of alignment in a table cell. +type CellAlignFlags int + +// These are the possible flag values for the table cell renderer. +// Only a single one of these values will be used; they are not ORed together. +// These are mostly of interest if you are writing a new output format. +const ( + TableAlignmentLeft CellAlignFlags = 1 << iota + TableAlignmentRight + TableAlignmentCenter = (TableAlignmentLeft | TableAlignmentRight) +) + +// The size of a tab stop. +const ( + TabSizeDefault = 4 + TabSizeDouble = 8 +) + +// blockTags is a set of tags that are recognized as HTML block tags. +// Any of these can be included in markdown text without special escaping. +var blockTags = map[string]struct{}{ + "blockquote": {}, + "del": {}, + "div": {}, + "dl": {}, + "fieldset": {}, + "form": {}, + "h1": {}, + "h2": {}, + "h3": {}, + "h4": {}, + "h5": {}, + "h6": {}, + "iframe": {}, + "ins": {}, + "math": {}, + "noscript": {}, + "ol": {}, + "pre": {}, + "p": {}, + "script": {}, + "style": {}, + "table": {}, + "ul": {}, + + // HTML5 + "address": {}, + "article": {}, + "aside": {}, + "canvas": {}, + "figcaption": {}, + "figure": {}, + "footer": {}, + "header": {}, + "hgroup": {}, + "main": {}, + "nav": {}, + "output": {}, + "progress": {}, + "section": {}, + "video": {}, +} + +// Renderer is the rendering interface. This is mostly of interest if you are +// implementing a new rendering format. +// +// Only an HTML implementation is provided in this repository, see the README +// for external implementations. +type Renderer interface { + // RenderNode is the main rendering method. It will be called once for + // every leaf node and twice for every non-leaf node (first with + // entering=true, then with entering=false). The method should write its + // rendition of the node to the supplied writer w. + RenderNode(w io.Writer, node *Node, entering bool) WalkStatus + + // RenderHeader is a method that allows the renderer to produce some + // content preceding the main body of the output document. The header is + // understood in the broad sense here. For example, the default HTML + // renderer will write not only the HTML document preamble, but also the + // table of contents if it was requested. + // + // The method will be passed an entire document tree, in case a particular + // implementation needs to inspect it to produce output. + // + // The output should be written to the supplied writer w. If your + // implementation has no header to write, supply an empty implementation. + RenderHeader(w io.Writer, ast *Node) + + // RenderFooter is a symmetric counterpart of RenderHeader. + RenderFooter(w io.Writer, ast *Node) +} + +// Callback functions for inline parsing. One such function is defined +// for each character that triggers a response when parsing inline data. +type inlineParser func(p *Markdown, data []byte, offset int) (int, *Node) + +// Markdown is a type that holds extensions and the runtime state used by +// Parse, and the renderer. You can not use it directly, construct it with New. +type Markdown struct { + renderer Renderer + referenceOverride ReferenceOverrideFunc + refs map[string]*reference + inlineCallback [256]inlineParser + extensions Extensions + nesting int + maxNesting int + insideLink bool + + // Footnotes need to be ordered as well as available to quickly check for + // presence. If a ref is also a footnote, it's stored both in refs and here + // in notes. Slice is nil if footnotes not enabled. + notes []*reference + + doc *Node + tip *Node // = doc + oldTip *Node + lastMatchedContainer *Node // = doc + allClosed bool +} + +func (p *Markdown) getRef(refid string) (ref *reference, found bool) { + if p.referenceOverride != nil { + r, overridden := p.referenceOverride(refid) + if overridden { + if r == nil { + return nil, false + } + return &reference{ + link: []byte(r.Link), + title: []byte(r.Title), + noteID: 0, + hasBlock: false, + text: []byte(r.Text)}, true + } + } + // refs are case insensitive + ref, found = p.refs[strings.ToLower(refid)] + return ref, found +} + +func (p *Markdown) finalize(block *Node) { + above := block.Parent + block.open = false + p.tip = above +} + +func (p *Markdown) addChild(node NodeType, offset uint32) *Node { + return p.addExistingChild(NewNode(node), offset) +} + +func (p *Markdown) addExistingChild(node *Node, offset uint32) *Node { + for !p.tip.canContain(node.Type) { + p.finalize(p.tip) + } + p.tip.AppendChild(node) + p.tip = node + return node +} + +func (p *Markdown) closeUnmatchedBlocks() { + if !p.allClosed { + for p.oldTip != p.lastMatchedContainer { + parent := p.oldTip.Parent + p.finalize(p.oldTip) + p.oldTip = parent + } + p.allClosed = true + } +} + +// +// +// Public interface +// +// + +// Reference represents the details of a link. +// See the documentation in Options for more details on use-case. +type Reference struct { + // Link is usually the URL the reference points to. + Link string + // Title is the alternate text describing the link in more detail. + Title string + // Text is the optional text to override the ref with if the syntax used was + // [refid][] + Text string +} + +// ReferenceOverrideFunc is expected to be called with a reference string and +// return either a valid Reference type that the reference string maps to or +// nil. If overridden is false, the default reference logic will be executed. +// See the documentation in Options for more details on use-case. +type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool) + +// New constructs a Markdown processor. You can use the same With* functions as +// for Run() to customize parser's behavior and the renderer. +func New(opts ...Option) *Markdown { + var p Markdown + for _, opt := range opts { + opt(&p) + } + p.refs = make(map[string]*reference) + p.maxNesting = 16 + p.insideLink = false + docNode := NewNode(Document) + p.doc = docNode + p.tip = docNode + p.oldTip = docNode + p.lastMatchedContainer = docNode + p.allClosed = true + // register inline parsers + p.inlineCallback[' '] = maybeLineBreak + p.inlineCallback['*'] = emphasis + p.inlineCallback['_'] = emphasis + if p.extensions&Strikethrough != 0 { + p.inlineCallback['~'] = emphasis + } + p.inlineCallback['`'] = codeSpan + p.inlineCallback['\n'] = lineBreak + p.inlineCallback['['] = link + p.inlineCallback['<'] = leftAngle + p.inlineCallback['\\'] = escape + p.inlineCallback['&'] = entity + p.inlineCallback['!'] = maybeImage + p.inlineCallback['^'] = maybeInlineFootnote + if p.extensions&Autolink != 0 { + p.inlineCallback['h'] = maybeAutoLink + p.inlineCallback['m'] = maybeAutoLink + p.inlineCallback['f'] = maybeAutoLink + p.inlineCallback['H'] = maybeAutoLink + p.inlineCallback['M'] = maybeAutoLink + p.inlineCallback['F'] = maybeAutoLink + } + if p.extensions&Footnotes != 0 { + p.notes = make([]*reference, 0) + } + return &p +} + +// Option customizes the Markdown processor's default behavior. +type Option func(*Markdown) + +// WithRenderer allows you to override the default renderer. +func WithRenderer(r Renderer) Option { + return func(p *Markdown) { + p.renderer = r + } +} + +// WithExtensions allows you to pick some of the many extensions provided by +// Blackfriday. You can bitwise OR them. +func WithExtensions(e Extensions) Option { + return func(p *Markdown) { + p.extensions = e + } +} + +// WithNoExtensions turns off all extensions and custom behavior. +func WithNoExtensions() Option { + return func(p *Markdown) { + p.extensions = NoExtensions + p.renderer = NewHTMLRenderer(HTMLRendererParameters{ + Flags: HTMLFlagsNone, + }) + } +} + +// WithRefOverride sets an optional function callback that is called every +// time a reference is resolved. +// +// In Markdown, the link reference syntax can be made to resolve a link to +// a reference instead of an inline URL, in one of the following ways: +// +// * [link text][refid] +// * [refid][] +// +// Usually, the refid is defined at the bottom of the Markdown document. If +// this override function is provided, the refid is passed to the override +// function first, before consulting the defined refids at the bottom. If +// the override function indicates an override did not occur, the refids at +// the bottom will be used to fill in the link details. +func WithRefOverride(o ReferenceOverrideFunc) Option { + return func(p *Markdown) { + p.referenceOverride = o + } +} + +// Run is the main entry point to Blackfriday. It parses and renders a +// block of markdown-encoded text. +// +// The simplest invocation of Run takes one argument, input: +// output := Run(input) +// This will parse the input with CommonExtensions enabled and render it with +// the default HTMLRenderer (with CommonHTMLFlags). +// +// Variadic arguments opts can customize the default behavior. Since Markdown +// type does not contain exported fields, you can not use it directly. Instead, +// use the With* functions. For example, this will call the most basic +// functionality, with no extensions: +// output := Run(input, WithNoExtensions()) +// +// You can use any number of With* arguments, even contradicting ones. They +// will be applied in order of appearance and the latter will override the +// former: +// output := Run(input, WithNoExtensions(), WithExtensions(exts), +// WithRenderer(yourRenderer)) +func Run(input []byte, opts ...Option) []byte { + r := NewHTMLRenderer(HTMLRendererParameters{ + Flags: CommonHTMLFlags, + }) + optList := []Option{WithRenderer(r), WithExtensions(CommonExtensions)} + optList = append(optList, opts...) + parser := New(optList...) + ast := parser.Parse(input) + var buf bytes.Buffer + parser.renderer.RenderHeader(&buf, ast) + ast.Walk(func(node *Node, entering bool) WalkStatus { + return parser.renderer.RenderNode(&buf, node, entering) + }) + parser.renderer.RenderFooter(&buf, ast) + return buf.Bytes() +} + +// Parse is an entry point to the parsing part of Blackfriday. It takes an +// input markdown document and produces a syntax tree for its contents. This +// tree can then be rendered with a default or custom renderer, or +// analyzed/transformed by the caller to whatever non-standard needs they have. +// The return value is the root node of the syntax tree. +func (p *Markdown) Parse(input []byte) *Node { + p.block(input) + // Walk the tree and finish up some of unfinished blocks + for p.tip != nil { + p.finalize(p.tip) + } + // Walk the tree again and process inline markdown in each block + p.doc.Walk(func(node *Node, entering bool) WalkStatus { + if node.Type == Paragraph || node.Type == Heading || node.Type == TableCell { + p.inline(node, node.content) + node.content = nil + } + return GoToNext + }) + p.parseRefsToAST() + return p.doc +} + +func (p *Markdown) parseRefsToAST() { + if p.extensions&Footnotes == 0 || len(p.notes) == 0 { + return + } + p.tip = p.doc + block := p.addBlock(List, nil) + block.IsFootnotesList = true + block.ListFlags = ListTypeOrdered + flags := ListItemBeginningOfList + // Note: this loop is intentionally explicit, not range-form. This is + // because the body of the loop will append nested footnotes to p.notes and + // we need to process those late additions. Range form would only walk over + // the fixed initial set. + for i := 0; i < len(p.notes); i++ { + ref := p.notes[i] + p.addExistingChild(ref.footnote, 0) + block := ref.footnote + block.ListFlags = flags | ListTypeOrdered + block.RefLink = ref.link + if ref.hasBlock { + flags |= ListItemContainsBlock + p.block(ref.title) + } else { + p.inline(block, ref.title) + } + flags &^= ListItemBeginningOfList | ListItemContainsBlock + } + above := block.Parent + finalizeList(block) + p.tip = above + block.Walk(func(node *Node, entering bool) WalkStatus { + if node.Type == Paragraph || node.Type == Heading { + p.inline(node, node.content) + node.content = nil + } + return GoToNext + }) +} + +// +// Link references +// +// This section implements support for references that (usually) appear +// as footnotes in a document, and can be referenced anywhere in the document. +// The basic format is: +// +// [1]: http://www.google.com/ "Google" +// [2]: http://www.github.com/ "Github" +// +// Anywhere in the document, the reference can be linked by referring to its +// label, i.e., 1 and 2 in this example, as in: +// +// This library is hosted on [Github][2], a git hosting site. +// +// Actual footnotes as specified in Pandoc and supported by some other Markdown +// libraries such as php-markdown are also taken care of. They look like this: +// +// This sentence needs a bit of further explanation.[^note] +// +// [^note]: This is the explanation. +// +// Footnotes should be placed at the end of the document in an ordered list. +// Finally, there are inline footnotes such as: +// +// Inline footnotes^[Also supported.] provide a quick inline explanation, +// but are rendered at the bottom of the document. +// + +// reference holds all information necessary for a reference-style links or +// footnotes. +// +// Consider this markdown with reference-style links: +// +// [link][ref] +// +// [ref]: /url/ "tooltip title" +// +// It will be ultimately converted to this HTML: +// +//

    link

    +// +// And a reference structure will be populated as follows: +// +// p.refs["ref"] = &reference{ +// link: "/url/", +// title: "tooltip title", +// } +// +// Alternatively, reference can contain information about a footnote. Consider +// this markdown: +// +// Text needing a footnote.[^a] +// +// [^a]: This is the note +// +// A reference structure will be populated as follows: +// +// p.refs["a"] = &reference{ +// link: "a", +// title: "This is the note", +// noteID: , +// } +// +// TODO: As you can see, it begs for splitting into two dedicated structures +// for refs and for footnotes. +type reference struct { + link []byte + title []byte + noteID int // 0 if not a footnote ref + hasBlock bool + footnote *Node // a link to the Item node within a list of footnotes + + text []byte // only gets populated by refOverride feature with Reference.Text +} + +func (r *reference) String() string { + return fmt.Sprintf("{link: %q, title: %q, text: %q, noteID: %d, hasBlock: %v}", + r.link, r.title, r.text, r.noteID, r.hasBlock) +} + +// Check whether or not data starts with a reference link. +// If so, it is parsed and stored in the list of references +// (in the render struct). +// Returns the number of bytes to skip to move past it, +// or zero if the first line is not a reference. +func isReference(p *Markdown, data []byte, tabSize int) int { + // up to 3 optional leading spaces + if len(data) < 4 { + return 0 + } + i := 0 + for i < 3 && data[i] == ' ' { + i++ + } + + noteID := 0 + + // id part: anything but a newline between brackets + if data[i] != '[' { + return 0 + } + i++ + if p.extensions&Footnotes != 0 { + if i < len(data) && data[i] == '^' { + // we can set it to anything here because the proper noteIds will + // be assigned later during the second pass. It just has to be != 0 + noteID = 1 + i++ + } + } + idOffset := i + for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' { + i++ + } + if i >= len(data) || data[i] != ']' { + return 0 + } + idEnd := i + // footnotes can have empty ID, like this: [^], but a reference can not be + // empty like this: []. Break early if it's not a footnote and there's no ID + if noteID == 0 && idOffset == idEnd { + return 0 + } + // spacer: colon (space | tab)* newline? (space | tab)* + i++ + if i >= len(data) || data[i] != ':' { + return 0 + } + i++ + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i < len(data) && (data[i] == '\n' || data[i] == '\r') { + i++ + if i < len(data) && data[i] == '\n' && data[i-1] == '\r' { + i++ + } + } + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i >= len(data) { + return 0 + } + + var ( + linkOffset, linkEnd int + titleOffset, titleEnd int + lineEnd int + raw []byte + hasBlock bool + ) + + if p.extensions&Footnotes != 0 && noteID != 0 { + linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize) + lineEnd = linkEnd + } else { + linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i) + } + if lineEnd == 0 { + return 0 + } + + // a valid ref has been found + + ref := &reference{ + noteID: noteID, + hasBlock: hasBlock, + } + + if noteID > 0 { + // reusing the link field for the id since footnotes don't have links + ref.link = data[idOffset:idEnd] + // if footnote, it's not really a title, it's the contained text + ref.title = raw + } else { + ref.link = data[linkOffset:linkEnd] + ref.title = data[titleOffset:titleEnd] + } + + // id matches are case-insensitive + id := string(bytes.ToLower(data[idOffset:idEnd])) + + p.refs[id] = ref + + return lineEnd +} + +func scanLinkRef(p *Markdown, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) { + // link: whitespace-free sequence, optionally between angle brackets + if data[i] == '<' { + i++ + } + linkOffset = i + for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' { + i++ + } + linkEnd = i + if data[linkOffset] == '<' && data[linkEnd-1] == '>' { + linkOffset++ + linkEnd-- + } + + // optional spacer: (space | tab)* (newline | '\'' | '"' | '(' ) + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' { + return + } + + // compute end-of-line + if i >= len(data) || data[i] == '\r' || data[i] == '\n' { + lineEnd = i + } + if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' { + lineEnd++ + } + + // optional (space|tab)* spacer after a newline + if lineEnd > 0 { + i = lineEnd + 1 + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + } + + // optional title: any non-newline sequence enclosed in '"() alone on its line + if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') { + i++ + titleOffset = i + + // look for EOL + for i < len(data) && data[i] != '\n' && data[i] != '\r' { + i++ + } + if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' { + titleEnd = i + 1 + } else { + titleEnd = i + } + + // step back + i-- + for i > titleOffset && (data[i] == ' ' || data[i] == '\t') { + i-- + } + if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') { + lineEnd = titleEnd + titleEnd = i + } + } + + return +} + +// The first bit of this logic is the same as Parser.listItem, but the rest +// is much simpler. This function simply finds the entire block and shifts it +// over by one tab if it is indeed a block (just returns the line if it's not). +// blockEnd is the end of the section in the input buffer, and contents is the +// extracted text that was shifted over one tab. It will need to be rendered at +// the end of the document. +func scanFootnote(p *Markdown, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) { + if i == 0 || len(data) == 0 { + return + } + + // skip leading whitespace on first line + for i < len(data) && data[i] == ' ' { + i++ + } + + blockStart = i + + // find the end of the line + blockEnd = i + for i < len(data) && data[i-1] != '\n' { + i++ + } + + // get working buffer + var raw bytes.Buffer + + // put the first line into the working buffer + raw.Write(data[blockEnd:i]) + blockEnd = i + + // process the following lines + containsBlankLine := false + +gatherLines: + for blockEnd < len(data) { + i++ + + // find the end of this line + for i < len(data) && data[i-1] != '\n' { + i++ + } + + // if it is an empty line, guess that it is part of this item + // and move on to the next line + if p.isEmpty(data[blockEnd:i]) > 0 { + containsBlankLine = true + blockEnd = i + continue + } + + n := 0 + if n = isIndented(data[blockEnd:i], indentSize); n == 0 { + // this is the end of the block. + // we don't want to include this last line in the index. + break gatherLines + } + + // if there were blank lines before this one, insert a new one now + if containsBlankLine { + raw.WriteByte('\n') + containsBlankLine = false + } + + // get rid of that first tab, write to buffer + raw.Write(data[blockEnd+n : i]) + hasBlock = true + + blockEnd = i + } + + if data[blockEnd-1] != '\n' { + raw.WriteByte('\n') + } + + contents = raw.Bytes() + + return +} + +// +// +// Miscellaneous helper functions +// +// + +// Test if a character is a punctuation symbol. +// Taken from a private function in regexp in the stdlib. +func ispunct(c byte) bool { + for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") { + if c == r { + return true + } + } + return false +} + +// Test if a character is a whitespace character. +func isspace(c byte) bool { + return ishorizontalspace(c) || isverticalspace(c) +} + +// Test if a character is a horizontal whitespace character. +func ishorizontalspace(c byte) bool { + return c == ' ' || c == '\t' +} + +// Test if a character is a vertical character. +func isverticalspace(c byte) bool { + return c == '\n' || c == '\r' || c == '\f' || c == '\v' +} + +// Test if a character is letter. +func isletter(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} + +// Test if a character is a letter or a digit. +// TODO: check when this is looking for ASCII alnum and when it should use unicode +func isalnum(c byte) bool { + return (c >= '0' && c <= '9') || isletter(c) +} + +// Replace tab characters with spaces, aligning to the next TAB_SIZE column. +// always ends output with a newline +func expandTabs(out *bytes.Buffer, line []byte, tabSize int) { + // first, check for common cases: no tabs, or only tabs at beginning of line + i, prefix := 0, 0 + slowcase := false + for i = 0; i < len(line); i++ { + if line[i] == '\t' { + if prefix == i { + prefix++ + } else { + slowcase = true + break + } + } + } + + // no need to decode runes if all tabs are at the beginning of the line + if !slowcase { + for i = 0; i < prefix*tabSize; i++ { + out.WriteByte(' ') + } + out.Write(line[prefix:]) + return + } + + // the slow case: we need to count runes to figure out how + // many spaces to insert for each tab + column := 0 + i = 0 + for i < len(line) { + start := i + for i < len(line) && line[i] != '\t' { + _, size := utf8.DecodeRune(line[i:]) + i += size + column++ + } + + if i > start { + out.Write(line[start:i]) + } + + if i >= len(line) { + break + } + + for { + out.WriteByte(' ') + column++ + if column%tabSize == 0 { + break + } + } + + i++ + } +} + +// Find if a line counts as indented or not. +// Returns number of characters the indent is (0 = not indented). +func isIndented(data []byte, indentSize int) int { + if len(data) == 0 { + return 0 + } + if data[0] == '\t' { + return 1 + } + if len(data) < indentSize { + return 0 + } + for i := 0; i < indentSize; i++ { + if data[i] != ' ' { + return 0 + } + } + return indentSize +} + +// Create a url-safe slug for fragments +func slugify(in []byte) []byte { + if len(in) == 0 { + return in + } + out := make([]byte, 0, len(in)) + sym := false + + for _, ch := range in { + if isalnum(ch) { + sym = false + out = append(out, ch) + } else if sym { + continue + } else { + out = append(out, '-') + sym = true + } + } + var a, b int + var ch byte + for a, ch = range out { + if ch != '-' { + break + } + } + for b = len(out) - 1; b > 0; b-- { + if out[b] != '-' { + break + } + } + return out[a : b+1] +} diff --git a/vendor/github.com/russross/blackfriday/v2/node.go b/vendor/github.com/russross/blackfriday/v2/node.go new file mode 100644 index 000000000..04e6050ce --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/node.go @@ -0,0 +1,360 @@ +package blackfriday + +import ( + "bytes" + "fmt" +) + +// NodeType specifies a type of a single node of a syntax tree. Usually one +// node (and its type) corresponds to a single markdown feature, e.g. emphasis +// or code block. +type NodeType int + +// Constants for identifying different types of nodes. See NodeType. +const ( + Document NodeType = iota + BlockQuote + List + Item + Paragraph + Heading + HorizontalRule + Emph + Strong + Del + Link + Image + Text + HTMLBlock + CodeBlock + Softbreak + Hardbreak + Code + HTMLSpan + Table + TableCell + TableHead + TableBody + TableRow +) + +var nodeTypeNames = []string{ + Document: "Document", + BlockQuote: "BlockQuote", + List: "List", + Item: "Item", + Paragraph: "Paragraph", + Heading: "Heading", + HorizontalRule: "HorizontalRule", + Emph: "Emph", + Strong: "Strong", + Del: "Del", + Link: "Link", + Image: "Image", + Text: "Text", + HTMLBlock: "HTMLBlock", + CodeBlock: "CodeBlock", + Softbreak: "Softbreak", + Hardbreak: "Hardbreak", + Code: "Code", + HTMLSpan: "HTMLSpan", + Table: "Table", + TableCell: "TableCell", + TableHead: "TableHead", + TableBody: "TableBody", + TableRow: "TableRow", +} + +func (t NodeType) String() string { + return nodeTypeNames[t] +} + +// ListData contains fields relevant to a List and Item node type. +type ListData struct { + ListFlags ListType + Tight bool // Skip

    s around list item data if true + BulletChar byte // '*', '+' or '-' in bullet lists + Delimiter byte // '.' or ')' after the number in ordered lists + RefLink []byte // If not nil, turns this list item into a footnote item and triggers different rendering + IsFootnotesList bool // This is a list of footnotes +} + +// LinkData contains fields relevant to a Link node type. +type LinkData struct { + Destination []byte // Destination is what goes into a href + Title []byte // Title is the tooltip thing that goes in a title attribute + NoteID int // NoteID contains a serial number of a footnote, zero if it's not a footnote + Footnote *Node // If it's a footnote, this is a direct link to the footnote Node. Otherwise nil. +} + +// CodeBlockData contains fields relevant to a CodeBlock node type. +type CodeBlockData struct { + IsFenced bool // Specifies whether it's a fenced code block or an indented one + Info []byte // This holds the info string + FenceChar byte + FenceLength int + FenceOffset int +} + +// TableCellData contains fields relevant to a TableCell node type. +type TableCellData struct { + IsHeader bool // This tells if it's under the header row + Align CellAlignFlags // This holds the value for align attribute +} + +// HeadingData contains fields relevant to a Heading node type. +type HeadingData struct { + Level int // This holds the heading level number + HeadingID string // This might hold heading ID, if present + IsTitleblock bool // Specifies whether it's a title block +} + +// Node is a single element in the abstract syntax tree of the parsed document. +// It holds connections to the structurally neighboring nodes and, for certain +// types of nodes, additional information that might be needed when rendering. +type Node struct { + Type NodeType // Determines the type of the node + Parent *Node // Points to the parent + FirstChild *Node // Points to the first child, if any + LastChild *Node // Points to the last child, if any + Prev *Node // Previous sibling; nil if it's the first child + Next *Node // Next sibling; nil if it's the last child + + Literal []byte // Text contents of the leaf nodes + + HeadingData // Populated if Type is Heading + ListData // Populated if Type is List + CodeBlockData // Populated if Type is CodeBlock + LinkData // Populated if Type is Link + TableCellData // Populated if Type is TableCell + + content []byte // Markdown content of the block nodes + open bool // Specifies an open block node that has not been finished to process yet +} + +// NewNode allocates a node of a specified type. +func NewNode(typ NodeType) *Node { + return &Node{ + Type: typ, + open: true, + } +} + +func (n *Node) String() string { + ellipsis := "" + snippet := n.Literal + if len(snippet) > 16 { + snippet = snippet[:16] + ellipsis = "..." + } + return fmt.Sprintf("%s: '%s%s'", n.Type, snippet, ellipsis) +} + +// Unlink removes node 'n' from the tree. +// It panics if the node is nil. +func (n *Node) Unlink() { + if n.Prev != nil { + n.Prev.Next = n.Next + } else if n.Parent != nil { + n.Parent.FirstChild = n.Next + } + if n.Next != nil { + n.Next.Prev = n.Prev + } else if n.Parent != nil { + n.Parent.LastChild = n.Prev + } + n.Parent = nil + n.Next = nil + n.Prev = nil +} + +// AppendChild adds a node 'child' as a child of 'n'. +// It panics if either node is nil. +func (n *Node) AppendChild(child *Node) { + child.Unlink() + child.Parent = n + if n.LastChild != nil { + n.LastChild.Next = child + child.Prev = n.LastChild + n.LastChild = child + } else { + n.FirstChild = child + n.LastChild = child + } +} + +// InsertBefore inserts 'sibling' immediately before 'n'. +// It panics if either node is nil. +func (n *Node) InsertBefore(sibling *Node) { + sibling.Unlink() + sibling.Prev = n.Prev + if sibling.Prev != nil { + sibling.Prev.Next = sibling + } + sibling.Next = n + n.Prev = sibling + sibling.Parent = n.Parent + if sibling.Prev == nil { + sibling.Parent.FirstChild = sibling + } +} + +// IsContainer returns true if 'n' can contain children. +func (n *Node) IsContainer() bool { + switch n.Type { + case Document: + fallthrough + case BlockQuote: + fallthrough + case List: + fallthrough + case Item: + fallthrough + case Paragraph: + fallthrough + case Heading: + fallthrough + case Emph: + fallthrough + case Strong: + fallthrough + case Del: + fallthrough + case Link: + fallthrough + case Image: + fallthrough + case Table: + fallthrough + case TableHead: + fallthrough + case TableBody: + fallthrough + case TableRow: + fallthrough + case TableCell: + return true + default: + return false + } +} + +// IsLeaf returns true if 'n' is a leaf node. +func (n *Node) IsLeaf() bool { + return !n.IsContainer() +} + +func (n *Node) canContain(t NodeType) bool { + if n.Type == List { + return t == Item + } + if n.Type == Document || n.Type == BlockQuote || n.Type == Item { + return t != Item + } + if n.Type == Table { + return t == TableHead || t == TableBody + } + if n.Type == TableHead || n.Type == TableBody { + return t == TableRow + } + if n.Type == TableRow { + return t == TableCell + } + return false +} + +// WalkStatus allows NodeVisitor to have some control over the tree traversal. +// It is returned from NodeVisitor and different values allow Node.Walk to +// decide which node to go to next. +type WalkStatus int + +const ( + // GoToNext is the default traversal of every node. + GoToNext WalkStatus = iota + // SkipChildren tells walker to skip all children of current node. + SkipChildren + // Terminate tells walker to terminate the traversal. + Terminate +) + +// NodeVisitor is a callback to be called when traversing the syntax tree. +// Called twice for every node: once with entering=true when the branch is +// first visited, then with entering=false after all the children are done. +type NodeVisitor func(node *Node, entering bool) WalkStatus + +// Walk is a convenience method that instantiates a walker and starts a +// traversal of subtree rooted at n. +func (n *Node) Walk(visitor NodeVisitor) { + w := newNodeWalker(n) + for w.current != nil { + status := visitor(w.current, w.entering) + switch status { + case GoToNext: + w.next() + case SkipChildren: + w.entering = false + w.next() + case Terminate: + return + } + } +} + +type nodeWalker struct { + current *Node + root *Node + entering bool +} + +func newNodeWalker(root *Node) *nodeWalker { + return &nodeWalker{ + current: root, + root: root, + entering: true, + } +} + +func (nw *nodeWalker) next() { + if (!nw.current.IsContainer() || !nw.entering) && nw.current == nw.root { + nw.current = nil + return + } + if nw.entering && nw.current.IsContainer() { + if nw.current.FirstChild != nil { + nw.current = nw.current.FirstChild + nw.entering = true + } else { + nw.entering = false + } + } else if nw.current.Next == nil { + nw.current = nw.current.Parent + nw.entering = false + } else { + nw.current = nw.current.Next + nw.entering = true + } +} + +func dump(ast *Node) { + fmt.Println(dumpString(ast)) +} + +func dumpR(ast *Node, depth int) string { + if ast == nil { + return "" + } + indent := bytes.Repeat([]byte("\t"), depth) + content := ast.Literal + if content == nil { + content = ast.content + } + result := fmt.Sprintf("%s%s(%q)\n", indent, ast.Type, content) + for n := ast.FirstChild; n != nil; n = n.Next { + result += dumpR(n, depth+1) + } + return result +} + +func dumpString(ast *Node) string { + return dumpR(ast, 0) +} diff --git a/vendor/github.com/russross/blackfriday/v2/smartypants.go b/vendor/github.com/russross/blackfriday/v2/smartypants.go new file mode 100644 index 000000000..3a220e942 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/smartypants.go @@ -0,0 +1,457 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// +// SmartyPants rendering +// +// + +package blackfriday + +import ( + "bytes" + "io" +) + +// SPRenderer is a struct containing state of a Smartypants renderer. +type SPRenderer struct { + inSingleQuote bool + inDoubleQuote bool + callbacks [256]smartCallback +} + +func wordBoundary(c byte) bool { + return c == 0 || isspace(c) || ispunct(c) +} + +func tolower(c byte) byte { + if c >= 'A' && c <= 'Z' { + return c - 'A' + 'a' + } + return c +} + +func isdigit(c byte) bool { + return c >= '0' && c <= '9' +} + +func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool, addNBSP bool) bool { + // edge of the buffer is likely to be a tag that we don't get to see, + // so we treat it like text sometimes + + // enumerate all sixteen possibilities for (previousChar, nextChar) + // each can be one of {0, space, punct, other} + switch { + case previousChar == 0 && nextChar == 0: + // context is not any help here, so toggle + *isOpen = !*isOpen + case isspace(previousChar) && nextChar == 0: + // [ "] might be [ "foo...] + *isOpen = true + case ispunct(previousChar) && nextChar == 0: + // [!"] hmm... could be [Run!"] or [("...] + *isOpen = false + case /* isnormal(previousChar) && */ nextChar == 0: + // [a"] is probably a close + *isOpen = false + case previousChar == 0 && isspace(nextChar): + // [" ] might be [...foo" ] + *isOpen = false + case isspace(previousChar) && isspace(nextChar): + // [ " ] context is not any help here, so toggle + *isOpen = !*isOpen + case ispunct(previousChar) && isspace(nextChar): + // [!" ] is probably a close + *isOpen = false + case /* isnormal(previousChar) && */ isspace(nextChar): + // [a" ] this is one of the easy cases + *isOpen = false + case previousChar == 0 && ispunct(nextChar): + // ["!] hmm... could be ["$1.95] or ["!...] + *isOpen = false + case isspace(previousChar) && ispunct(nextChar): + // [ "!] looks more like [ "$1.95] + *isOpen = true + case ispunct(previousChar) && ispunct(nextChar): + // [!"!] context is not any help here, so toggle + *isOpen = !*isOpen + case /* isnormal(previousChar) && */ ispunct(nextChar): + // [a"!] is probably a close + *isOpen = false + case previousChar == 0 /* && isnormal(nextChar) */ : + // ["a] is probably an open + *isOpen = true + case isspace(previousChar) /* && isnormal(nextChar) */ : + // [ "a] this is one of the easy cases + *isOpen = true + case ispunct(previousChar) /* && isnormal(nextChar) */ : + // [!"a] is probably an open + *isOpen = true + default: + // [a'b] maybe a contraction? + *isOpen = false + } + + // Note that with the limited lookahead, this non-breaking + // space will also be appended to single double quotes. + if addNBSP && !*isOpen { + out.WriteString(" ") + } + + out.WriteByte('&') + if *isOpen { + out.WriteByte('l') + } else { + out.WriteByte('r') + } + out.WriteByte(quote) + out.WriteString("quo;") + + if addNBSP && *isOpen { + out.WriteString(" ") + } + + return true +} + +func (r *SPRenderer) smartSingleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 { + t1 := tolower(text[1]) + + if t1 == '\'' { + nextChar := byte(0) + if len(text) >= 3 { + nextChar = text[2] + } + if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { + return 1 + } + } + + if (t1 == 's' || t1 == 't' || t1 == 'm' || t1 == 'd') && (len(text) < 3 || wordBoundary(text[2])) { + out.WriteString("’") + return 0 + } + + if len(text) >= 3 { + t2 := tolower(text[2]) + + if ((t1 == 'r' && t2 == 'e') || (t1 == 'l' && t2 == 'l') || (t1 == 'v' && t2 == 'e')) && + (len(text) < 4 || wordBoundary(text[3])) { + out.WriteString("’") + return 0 + } + } + } + + nextChar := byte(0) + if len(text) > 1 { + nextChar = text[1] + } + if smartQuoteHelper(out, previousChar, nextChar, 's', &r.inSingleQuote, false) { + return 0 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartParens(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 { + t1 := tolower(text[1]) + t2 := tolower(text[2]) + + if t1 == 'c' && t2 == ')' { + out.WriteString("©") + return 2 + } + + if t1 == 'r' && t2 == ')' { + out.WriteString("®") + return 2 + } + + if len(text) >= 4 && t1 == 't' && t2 == 'm' && text[3] == ')' { + out.WriteString("™") + return 3 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDash(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 { + if text[1] == '-' { + out.WriteString("—") + return 1 + } + + if wordBoundary(previousChar) && wordBoundary(text[1]) { + out.WriteString("–") + return 0 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDashLatex(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 && text[1] == '-' && text[2] == '-' { + out.WriteString("—") + return 2 + } + if len(text) >= 2 && text[1] == '-' { + out.WriteString("–") + return 1 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartAmpVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte, addNBSP bool) int { + if bytes.HasPrefix(text, []byte(""")) { + nextChar := byte(0) + if len(text) >= 7 { + nextChar = text[6] + } + if smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, addNBSP) { + return 5 + } + } + + if bytes.HasPrefix(text, []byte("�")) { + return 3 + } + + out.WriteByte('&') + return 0 +} + +func (r *SPRenderer) smartAmp(angledQuotes, addNBSP bool) func(*bytes.Buffer, byte, []byte) int { + var quote byte = 'd' + if angledQuotes { + quote = 'a' + } + + return func(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartAmpVariant(out, previousChar, text, quote, addNBSP) + } +} + +func (r *SPRenderer) smartPeriod(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 && text[1] == '.' && text[2] == '.' { + out.WriteString("…") + return 2 + } + + if len(text) >= 5 && text[1] == ' ' && text[2] == '.' && text[3] == ' ' && text[4] == '.' { + out.WriteString("…") + return 4 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartBacktick(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 && text[1] == '`' { + nextChar := byte(0) + if len(text) >= 3 { + nextChar = text[2] + } + if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { + return 1 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartNumberGeneric(out *bytes.Buffer, previousChar byte, text []byte) int { + if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { + // is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b + // note: check for regular slash (/) or fraction slash (⁄, 0x2044, or 0xe2 81 84 in utf-8) + // and avoid changing dates like 1/23/2005 into fractions. + numEnd := 0 + for len(text) > numEnd && isdigit(text[numEnd]) { + numEnd++ + } + if numEnd == 0 { + out.WriteByte(text[0]) + return 0 + } + denStart := numEnd + 1 + if len(text) > numEnd+3 && text[numEnd] == 0xe2 && text[numEnd+1] == 0x81 && text[numEnd+2] == 0x84 { + denStart = numEnd + 3 + } else if len(text) < numEnd+2 || text[numEnd] != '/' { + out.WriteByte(text[0]) + return 0 + } + denEnd := denStart + for len(text) > denEnd && isdigit(text[denEnd]) { + denEnd++ + } + if denEnd == denStart { + out.WriteByte(text[0]) + return 0 + } + if len(text) == denEnd || wordBoundary(text[denEnd]) && text[denEnd] != '/' { + out.WriteString("") + out.Write(text[:numEnd]) + out.WriteString("") + out.Write(text[denStart:denEnd]) + out.WriteString("") + return denEnd - 1 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartNumber(out *bytes.Buffer, previousChar byte, text []byte) int { + if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { + if text[0] == '1' && text[1] == '/' && text[2] == '2' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' { + out.WriteString("½") + return 2 + } + } + + if text[0] == '1' && text[1] == '/' && text[2] == '4' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 5 && tolower(text[3]) == 't' && tolower(text[4]) == 'h') { + out.WriteString("¼") + return 2 + } + } + + if text[0] == '3' && text[1] == '/' && text[2] == '4' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 6 && tolower(text[3]) == 't' && tolower(text[4]) == 'h' && tolower(text[5]) == 's') { + out.WriteString("¾") + return 2 + } + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDoubleQuoteVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte) int { + nextChar := byte(0) + if len(text) > 1 { + nextChar = text[1] + } + if !smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, false) { + out.WriteString(""") + } + + return 0 +} + +func (r *SPRenderer) smartDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartDoubleQuoteVariant(out, previousChar, text, 'd') +} + +func (r *SPRenderer) smartAngledDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartDoubleQuoteVariant(out, previousChar, text, 'a') +} + +func (r *SPRenderer) smartLeftAngle(out *bytes.Buffer, previousChar byte, text []byte) int { + i := 0 + + for i < len(text) && text[i] != '>' { + i++ + } + + out.Write(text[:i+1]) + return i +} + +type smartCallback func(out *bytes.Buffer, previousChar byte, text []byte) int + +// NewSmartypantsRenderer constructs a Smartypants renderer object. +func NewSmartypantsRenderer(flags HTMLFlags) *SPRenderer { + var ( + r SPRenderer + + smartAmpAngled = r.smartAmp(true, false) + smartAmpAngledNBSP = r.smartAmp(true, true) + smartAmpRegular = r.smartAmp(false, false) + smartAmpRegularNBSP = r.smartAmp(false, true) + + addNBSP = flags&SmartypantsQuotesNBSP != 0 + ) + + if flags&SmartypantsAngledQuotes == 0 { + r.callbacks['"'] = r.smartDoubleQuote + if !addNBSP { + r.callbacks['&'] = smartAmpRegular + } else { + r.callbacks['&'] = smartAmpRegularNBSP + } + } else { + r.callbacks['"'] = r.smartAngledDoubleQuote + if !addNBSP { + r.callbacks['&'] = smartAmpAngled + } else { + r.callbacks['&'] = smartAmpAngledNBSP + } + } + r.callbacks['\''] = r.smartSingleQuote + r.callbacks['('] = r.smartParens + if flags&SmartypantsDashes != 0 { + if flags&SmartypantsLatexDashes == 0 { + r.callbacks['-'] = r.smartDash + } else { + r.callbacks['-'] = r.smartDashLatex + } + } + r.callbacks['.'] = r.smartPeriod + if flags&SmartypantsFractions == 0 { + r.callbacks['1'] = r.smartNumber + r.callbacks['3'] = r.smartNumber + } else { + for ch := '1'; ch <= '9'; ch++ { + r.callbacks[ch] = r.smartNumberGeneric + } + } + r.callbacks['<'] = r.smartLeftAngle + r.callbacks['`'] = r.smartBacktick + return &r +} + +// Process is the entry point of the Smartypants renderer. +func (r *SPRenderer) Process(w io.Writer, text []byte) { + mark := 0 + for i := 0; i < len(text); i++ { + if action := r.callbacks[text[i]]; action != nil { + if i > mark { + w.Write(text[mark:i]) + } + previousChar := byte(0) + if i > 0 { + previousChar = text[i-1] + } + var tmp bytes.Buffer + i += action(&tmp, previousChar, text[i:]) + w.Write(tmp.Bytes()) + mark = i + 1 + } + } + if mark < len(text) { + w.Write(text[mark:]) + } +} diff --git a/vendor/github.com/ryanuber/go-glob/.travis.yml b/vendor/github.com/ryanuber/go-glob/.travis.yml new file mode 100644 index 000000000..9d1ca3c37 --- /dev/null +++ b/vendor/github.com/ryanuber/go-glob/.travis.yml @@ -0,0 +1,5 @@ +language: go +go: + - tip +script: + - go test -v ./... diff --git a/vendor/github.com/ryanuber/go-glob/LICENSE b/vendor/github.com/ryanuber/go-glob/LICENSE new file mode 100644 index 000000000..bdfbd9514 --- /dev/null +++ b/vendor/github.com/ryanuber/go-glob/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Ryan Uber + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/ryanuber/go-glob/README.md b/vendor/github.com/ryanuber/go-glob/README.md new file mode 100644 index 000000000..48f7fcb05 --- /dev/null +++ b/vendor/github.com/ryanuber/go-glob/README.md @@ -0,0 +1,29 @@ +# String globbing in golang [![Build Status](https://travis-ci.org/ryanuber/go-glob.svg)](https://travis-ci.org/ryanuber/go-glob) + +`go-glob` is a single-function library implementing basic string glob support. + +Globs are an extremely user-friendly way of supporting string matching without +requiring knowledge of regular expressions or Go's particular regex engine. Most +people understand that if you put a `*` character somewhere in a string, it is +treated as a wildcard. Surprisingly, this functionality isn't found in Go's +standard library, except for `path.Match`, which is intended to be used while +comparing paths (not arbitrary strings), and contains specialized logic for this +use case. A better solution might be a POSIX basic (non-ERE) regular expression +engine for Go, which doesn't exist currently. + +Example +======= + +``` +package main + +import "github.com/ryanuber/go-glob" + +func main() { + glob.Glob("*World!", "Hello, World!") // true + glob.Glob("Hello,*", "Hello, World!") // true + glob.Glob("*ello,*", "Hello, World!") // true + glob.Glob("World!", "Hello, World!") // false + glob.Glob("/home/*", "/home/ryanuber/.bashrc") // true +} +``` diff --git a/vendor/github.com/ryanuber/go-glob/glob.go b/vendor/github.com/ryanuber/go-glob/glob.go new file mode 100644 index 000000000..e67db3be1 --- /dev/null +++ b/vendor/github.com/ryanuber/go-glob/glob.go @@ -0,0 +1,56 @@ +package glob + +import "strings" + +// The character which is treated like a glob +const GLOB = "*" + +// Glob will test a string pattern, potentially containing globs, against a +// subject string. The result is a simple true/false, determining whether or +// not the glob pattern matched the subject text. +func Glob(pattern, subj string) bool { + // Empty pattern can only match empty subject + if pattern == "" { + return subj == pattern + } + + // If the pattern _is_ a glob, it matches everything + if pattern == GLOB { + return true + } + + parts := strings.Split(pattern, GLOB) + + if len(parts) == 1 { + // No globs in pattern, so test for equality + return subj == pattern + } + + leadingGlob := strings.HasPrefix(pattern, GLOB) + trailingGlob := strings.HasSuffix(pattern, GLOB) + end := len(parts) - 1 + + // Go over the leading parts and ensure they match. + for i := 0; i < end; i++ { + idx := strings.Index(subj, parts[i]) + + switch i { + case 0: + // Check the first section. Requires special handling. + if !leadingGlob && idx != 0 { + return false + } + default: + // Check that the middle parts match. + if idx < 0 { + return false + } + } + + // Trim evaluated text from subj as we loop over the pattern. + subj = subj[idx+len(parts[i]):] + } + + // Reached the last section. Requires special handling. + return trailingGlob || strings.HasSuffix(subj, parts[end]) +} diff --git a/vendor/github.com/urfave/cli/.flake8 b/vendor/github.com/urfave/cli/.flake8 new file mode 100644 index 000000000..6deafc261 --- /dev/null +++ b/vendor/github.com/urfave/cli/.flake8 @@ -0,0 +1,2 @@ +[flake8] +max-line-length = 120 diff --git a/vendor/github.com/urfave/cli/.gitignore b/vendor/github.com/urfave/cli/.gitignore new file mode 100644 index 000000000..9d1812002 --- /dev/null +++ b/vendor/github.com/urfave/cli/.gitignore @@ -0,0 +1,10 @@ +*.coverprofile +coverage.txt +node_modules/ +vendor +.idea +/.local/ +/internal/ +/site/ +package.json +package-lock.json diff --git a/vendor/github.com/urfave/cli/CODE_OF_CONDUCT.md b/vendor/github.com/urfave/cli/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..41ba294f6 --- /dev/null +++ b/vendor/github.com/urfave/cli/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +education, socio-economic status, nationality, personal appearance, race, +religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting Dan Buch at dan@meatballhat.com. All complaints will be +reviewed and investigated and will result in a response that is deemed necessary +and appropriate to the circumstances. The project team is obligated to maintain +confidentiality with regard to the reporter of an incident. Further details of +specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + diff --git a/vendor/github.com/urfave/cli/LICENSE b/vendor/github.com/urfave/cli/LICENSE new file mode 100644 index 000000000..99d8559da --- /dev/null +++ b/vendor/github.com/urfave/cli/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Jeremy Saenz & Contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/urfave/cli/README.md b/vendor/github.com/urfave/cli/README.md new file mode 100644 index 000000000..c7dd62bc6 --- /dev/null +++ b/vendor/github.com/urfave/cli/README.md @@ -0,0 +1,51 @@ +cli +=== + +[![Run Tests](https://github.com/urfave/cli/actions/workflows/cli.yml/badge.svg?branch=v1-maint)](https://github.com/urfave/cli/actions/workflows/cli.yml) +[![Go Reference](https://pkg.go.dev/badge/github.com/urfave/cli/.svg)](https://pkg.go.dev/github.com/urfave/cli/) +[![Go Report Card](https://goreportcard.com/badge/urfave/cli)](https://goreportcard.com/report/urfave/cli) +[![codecov](https://codecov.io/gh/urfave/cli/branch/v1-maint/graph/badge.svg)](https://codecov.io/gh/urfave/cli) + +cli is a simple, fast, and fun package for building command line apps in Go. The +goal is to enable developers to write fast and distributable command line +applications in an expressive way. + +## Usage Documentation + +Usage documentation for `v1` is available [at the docs +site](https://cli.urfave.org/v1/getting-started/) or in-tree at +[./docs/v1/manual.md](./docs/v1/manual.md) + +## Installation + +Make sure you have a working Go environment. Go version 1.18+ is supported. + +### Supported platforms + +cli is tested against multiple versions of Go on Linux, and against the latest released +version of Go on OS X and Windows. For full details, see +[./.github/workflows/cli.yml](./.github/workflows/cli.yml). + +### Build tags + +You can use the following build tags: + +#### `urfave_cli_no_docs` + +When set, this removes `ToMarkdown` and `ToMan` methods, so your application +won't be able to call those. This reduces the resulting binary size by about +300-400 KB (measured using Go 1.18.1 on Linux/amd64), due to less dependencies. + +### Using `v1` releases + +``` +$ go get github.com/urfave/cli +``` + +```go +... +import ( + "github.com/urfave/cli" +) +... +``` diff --git a/vendor/github.com/urfave/cli/app.go b/vendor/github.com/urfave/cli/app.go new file mode 100644 index 000000000..df5a9a91b --- /dev/null +++ b/vendor/github.com/urfave/cli/app.go @@ -0,0 +1,531 @@ +package cli + +import ( + "flag" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "time" +) + +var ( + changeLogURL = "https://github.com/urfave/cli/blob/master/CHANGELOG.md" + appActionDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-action-signature", changeLogURL) + // unused variable. commented for now. will remove in future if agreed upon by everyone + //runAndExitOnErrorDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-runandexitonerror", changeLogURL) + + contactSysadmin = "This is an error in the application. Please contact the distributor of this application if this is not you." + + errInvalidActionType = NewExitError("ERROR invalid Action type. "+ + fmt.Sprintf("Must be `func(*Context`)` or `func(*Context) error). %s", contactSysadmin)+ + fmt.Sprintf("See %s", appActionDeprecationURL), 2) +) + +// App is the main structure of a cli application. It is recommended that +// an app be created with the cli.NewApp() function +type App struct { + // The name of the program. Defaults to path.Base(os.Args[0]) + Name string + // Full name of command for help, defaults to Name + HelpName string + // Description of the program. + Usage string + // Text to override the USAGE section of help + UsageText string + // Description of the program argument format. + ArgsUsage string + // Version of the program + Version string + // Description of the program + Description string + // List of commands to execute + Commands []Command + // List of flags to parse + Flags []Flag + // Boolean to enable bash completion commands + EnableBashCompletion bool + // Boolean to hide built-in help command + HideHelp bool + // Boolean to hide built-in version flag and the VERSION section of help + HideVersion bool + // Populate on app startup, only gettable through method Categories() + categories CommandCategories + // An action to execute when the bash-completion flag is set + BashComplete BashCompleteFunc + // An action to execute before any subcommands are run, but after the context is ready + // If a non-nil error is returned, no subcommands are run + Before BeforeFunc + // An action to execute after any subcommands are run, but after the subcommand has finished + // It is run even if Action() panics + After AfterFunc + + // The action to execute when no subcommands are specified + // Expects a `cli.ActionFunc` but will accept the *deprecated* signature of `func(*cli.Context) {}` + // *Note*: support for the deprecated `Action` signature will be removed in a future version + Action interface{} + + // Execute this function if the proper command cannot be found + CommandNotFound CommandNotFoundFunc + // Execute this function if an usage error occurs + OnUsageError OnUsageErrorFunc + // Compilation date + Compiled time.Time + // List of all authors who contributed + Authors []Author + // Copyright of the binary if any + Copyright string + // Name of Author (Note: Use App.Authors, this is deprecated) + Author string + // Email of Author (Note: Use App.Authors, this is deprecated) + Email string + // Writer writer to write output to + Writer io.Writer + // ErrWriter writes error output + ErrWriter io.Writer + // Execute this function to handle ExitErrors. If not provided, HandleExitCoder is provided to + // function as a default, so this is optional. + ExitErrHandler ExitErrHandlerFunc + // Other custom info + Metadata map[string]interface{} + // Carries a function which returns app specific info. + ExtraInfo func() map[string]string + // CustomAppHelpTemplate the text template for app help topic. + // cli.go uses text/template to render templates. You can + // render custom help text by setting this variable. + CustomAppHelpTemplate string + // Boolean to enable short-option handling so user can combine several + // single-character bool arguements into one + // i.e. foobar -o -v -> foobar -ov + UseShortOptionHandling bool + + didSetup bool +} + +// Tries to find out when this binary was compiled. +// Returns the current time if it fails to find it. +func compileTime() time.Time { + info, err := os.Stat(os.Args[0]) + if err != nil { + return time.Now() + } + return info.ModTime() +} + +// NewApp creates a new cli Application with some reasonable defaults for Name, +// Usage, Version and Action. +func NewApp() *App { + return &App{ + Name: filepath.Base(os.Args[0]), + HelpName: filepath.Base(os.Args[0]), + Usage: "A new cli application", + UsageText: "", + BashComplete: DefaultAppComplete, + Action: helpCommand.Action, + Compiled: compileTime(), + Writer: os.Stdout, + } +} + +// Setup runs initialization code to ensure all data structures are ready for +// `Run` or inspection prior to `Run`. It is internally called by `Run`, but +// will return early if setup has already happened. +func (a *App) Setup() { + if a.didSetup { + return + } + + a.didSetup = true + + if a.Author != "" || a.Email != "" { + a.Authors = append(a.Authors, Author{Name: a.Author, Email: a.Email}) + } + + var newCmds []Command + for _, c := range a.Commands { + if c.HelpName == "" { + c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name) + } + newCmds = append(newCmds, c) + } + a.Commands = newCmds + + if a.Command(helpCommand.Name) == nil && !a.HideHelp { + a.Commands = append(a.Commands, helpCommand) + if (HelpFlag != BoolFlag{}) { + a.appendFlag(HelpFlag) + } + } + + if a.Version == "" { + a.HideVersion = true + } + + if !a.HideVersion { + a.appendFlag(VersionFlag) + } + + a.categories = CommandCategories{} + for _, command := range a.Commands { + a.categories = a.categories.AddCommand(command.Category, command) + } + sort.Sort(a.categories) + + if a.Metadata == nil { + a.Metadata = make(map[string]interface{}) + } + + if a.Writer == nil { + a.Writer = os.Stdout + } +} + +func (a *App) newFlagSet() (*flag.FlagSet, error) { + return flagSet(a.Name, a.Flags) +} + +func (a *App) useShortOptionHandling() bool { + return a.UseShortOptionHandling +} + +// Run is the entry point to the cli app. Parses the arguments slice and routes +// to the proper flag/args combination +func (a *App) Run(arguments []string) (err error) { + a.Setup() + + // handle the completion flag separately from the flagset since + // completion could be attempted after a flag, but before its value was put + // on the command line. this causes the flagset to interpret the completion + // flag name as the value of the flag before it which is undesirable + // note that we can only do this because the shell autocomplete function + // always appends the completion flag at the end of the command + shellComplete, arguments := checkShellCompleteFlag(a, arguments) + + set, err := a.newFlagSet() + if err != nil { + return err + } + + err = parseIter(set, a, arguments[1:], shellComplete) + nerr := normalizeFlags(a.Flags, set) + context := NewContext(a, set, nil) + if nerr != nil { + _, _ = fmt.Fprintln(a.Writer, nerr) + _ = ShowAppHelp(context) + return nerr + } + context.shellComplete = shellComplete + + if checkCompletions(context) { + return nil + } + + if err != nil { + if a.OnUsageError != nil { + err := a.OnUsageError(context, err, false) + a.handleExitCoder(context, err) + return err + } + _, _ = fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error()) + _ = ShowAppHelp(context) + return err + } + + if !a.HideHelp && checkHelp(context) { + _ = ShowAppHelp(context) + return nil + } + + if !a.HideVersion && checkVersion(context) { + ShowVersion(context) + return nil + } + + cerr := checkRequiredFlags(a.Flags, context) + if cerr != nil { + _ = ShowAppHelp(context) + return cerr + } + + if a.After != nil && !context.shellComplete { + defer func() { + if afterErr := a.After(context); afterErr != nil { + if err != nil { + err = NewMultiError(err, afterErr) + } else { + err = afterErr + } + } + }() + } + + if a.Before != nil && !context.shellComplete { + beforeErr := a.Before(context) + if beforeErr != nil { + a.handleExitCoder(context, beforeErr) + err = beforeErr + return err + } + } + + args := context.Args() + if args.Present() { + name := args.First() + c := a.Command(name) + if c != nil { + return c.Run(context) + } + } + + if a.Action == nil { + a.Action = helpCommand.Action + } + + // Run default Action + err = HandleAction(a.Action, context) + + a.handleExitCoder(context, err) + return err +} + +// RunAndExitOnError calls .Run() and exits non-zero if an error was returned +// +// Deprecated: instead you should return an error that fulfills cli.ExitCoder +// to cli.App.Run. This will cause the application to exit with the given eror +// code in the cli.ExitCoder +func (a *App) RunAndExitOnError() { + if err := a.Run(os.Args); err != nil { + _, _ = fmt.Fprintln(a.errWriter(), err) + OsExiter(1) + } +} + +// RunAsSubcommand invokes the subcommand given the context, parses ctx.Args() to +// generate command-specific flags +func (a *App) RunAsSubcommand(ctx *Context) (err error) { + // append help to commands + if len(a.Commands) > 0 { + if a.Command(helpCommand.Name) == nil && !a.HideHelp { + a.Commands = append(a.Commands, helpCommand) + if (HelpFlag != BoolFlag{}) { + a.appendFlag(HelpFlag) + } + } + } + + newCmds := []Command{} + for _, c := range a.Commands { + if c.HelpName == "" { + c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name) + } + newCmds = append(newCmds, c) + } + a.Commands = newCmds + + set, err := a.newFlagSet() + if err != nil { + return err + } + + err = parseIter(set, a, ctx.Args().Tail(), ctx.shellComplete) + nerr := normalizeFlags(a.Flags, set) + context := NewContext(a, set, ctx) + + if nerr != nil { + _, _ = fmt.Fprintln(a.Writer, nerr) + _, _ = fmt.Fprintln(a.Writer) + if len(a.Commands) > 0 { + _ = ShowSubcommandHelp(context) + } else { + _ = ShowCommandHelp(ctx, context.Args().First()) + } + return nerr + } + + if checkCompletions(context) { + return nil + } + + if err != nil { + if a.OnUsageError != nil { + err = a.OnUsageError(context, err, true) + a.handleExitCoder(context, err) + return err + } + _, _ = fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error()) + _ = ShowSubcommandHelp(context) + return err + } + + if len(a.Commands) > 0 { + if checkSubcommandHelp(context) { + return nil + } + } else { + if checkCommandHelp(ctx, context.Args().First()) { + return nil + } + } + + cerr := checkRequiredFlags(a.Flags, context) + if cerr != nil { + _ = ShowSubcommandHelp(context) + return cerr + } + + if a.After != nil && !context.shellComplete { + defer func() { + afterErr := a.After(context) + if afterErr != nil { + a.handleExitCoder(context, err) + if err != nil { + err = NewMultiError(err, afterErr) + } else { + err = afterErr + } + } + }() + } + + if a.Before != nil && !context.shellComplete { + beforeErr := a.Before(context) + if beforeErr != nil { + a.handleExitCoder(context, beforeErr) + err = beforeErr + return err + } + } + + args := context.Args() + if args.Present() { + name := args.First() + c := a.Command(name) + if c != nil { + return c.Run(context) + } + } + + // Run default Action + err = HandleAction(a.Action, context) + + a.handleExitCoder(context, err) + return err +} + +// Command returns the named command on App. Returns nil if the command does not exist +func (a *App) Command(name string) *Command { + for _, c := range a.Commands { + if c.HasName(name) { + return &c + } + } + + return nil +} + +// Categories returns a slice containing all the categories with the commands they contain +func (a *App) Categories() CommandCategories { + return a.categories +} + +// VisibleCategories returns a slice of categories and commands that are +// Hidden=false +func (a *App) VisibleCategories() []*CommandCategory { + ret := []*CommandCategory{} + for _, category := range a.categories { + if visible := func() *CommandCategory { + for _, command := range category.Commands { + if !command.Hidden { + return category + } + } + return nil + }(); visible != nil { + ret = append(ret, visible) + } + } + return ret +} + +// VisibleCommands returns a slice of the Commands with Hidden=false +func (a *App) VisibleCommands() []Command { + var ret []Command + for _, command := range a.Commands { + if !command.Hidden { + ret = append(ret, command) + } + } + return ret +} + +// VisibleFlags returns a slice of the Flags with Hidden=false +func (a *App) VisibleFlags() []Flag { + return visibleFlags(a.Flags) +} + +func (a *App) hasFlag(flag Flag) bool { + for _, f := range a.Flags { + if flag == f { + return true + } + } + + return false +} + +func (a *App) errWriter() io.Writer { + // When the app ErrWriter is nil use the package level one. + if a.ErrWriter == nil { + return ErrWriter + } + + return a.ErrWriter +} + +func (a *App) appendFlag(flag Flag) { + if !a.hasFlag(flag) { + a.Flags = append(a.Flags, flag) + } +} + +func (a *App) handleExitCoder(context *Context, err error) { + if a.ExitErrHandler != nil { + a.ExitErrHandler(context, err) + } else { + HandleExitCoder(err) + } +} + +// Author represents someone who has contributed to a cli project. +type Author struct { + Name string // The Authors name + Email string // The Authors email +} + +// String makes Author comply to the Stringer interface, to allow an easy print in the templating process +func (a Author) String() string { + e := "" + if a.Email != "" { + e = " <" + a.Email + ">" + } + + return fmt.Sprintf("%v%v", a.Name, e) +} + +// HandleAction attempts to figure out which Action signature was used. If +// it's an ActionFunc or a func with the legacy signature for Action, the func +// is run! +func HandleAction(action interface{}, context *Context) (err error) { + switch a := action.(type) { + case ActionFunc: + return a(context) + case func(*Context) error: + return a(context) + case func(*Context): // deprecated function signature + a(context) + return nil + } + + return errInvalidActionType +} diff --git a/vendor/github.com/urfave/cli/category.go b/vendor/github.com/urfave/cli/category.go new file mode 100644 index 000000000..bf3c73c55 --- /dev/null +++ b/vendor/github.com/urfave/cli/category.go @@ -0,0 +1,44 @@ +package cli + +// CommandCategories is a slice of *CommandCategory. +type CommandCategories []*CommandCategory + +// CommandCategory is a category containing commands. +type CommandCategory struct { + Name string + Commands Commands +} + +func (c CommandCategories) Less(i, j int) bool { + return lexicographicLess(c[i].Name, c[j].Name) +} + +func (c CommandCategories) Len() int { + return len(c) +} + +func (c CommandCategories) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} + +// AddCommand adds a command to a category. +func (c CommandCategories) AddCommand(category string, command Command) CommandCategories { + for _, commandCategory := range c { + if commandCategory.Name == category { + commandCategory.Commands = append(commandCategory.Commands, command) + return c + } + } + return append(c, &CommandCategory{Name: category, Commands: []Command{command}}) +} + +// VisibleCommands returns a slice of the Commands with Hidden=false +func (c *CommandCategory) VisibleCommands() []Command { + ret := []Command{} + for _, command := range c.Commands { + if !command.Hidden { + ret = append(ret, command) + } + } + return ret +} diff --git a/vendor/github.com/urfave/cli/cli.go b/vendor/github.com/urfave/cli/cli.go new file mode 100644 index 000000000..4bd250839 --- /dev/null +++ b/vendor/github.com/urfave/cli/cli.go @@ -0,0 +1,22 @@ +// Package cli provides a minimal framework for creating and organizing command line +// Go applications. cli is designed to be easy to understand and write, the most simple +// cli application can be written as follows: +// func main() { +// cli.NewApp().Run(os.Args) +// } +// +// Of course this application does not do much, so let's make this an actual application: +// func main() { +// app := cli.NewApp() +// app.Name = "greet" +// app.Usage = "say a greeting" +// app.Action = func(c *cli.Context) error { +// println("Greetings") +// return nil +// } +// +// app.Run(os.Args) +// } +package cli + +//go:generate go run flag-gen/main.go flag-gen/assets_vfsdata.go diff --git a/vendor/github.com/urfave/cli/command.go b/vendor/github.com/urfave/cli/command.go new file mode 100644 index 000000000..6c2f9cad5 --- /dev/null +++ b/vendor/github.com/urfave/cli/command.go @@ -0,0 +1,386 @@ +package cli + +import ( + "flag" + "fmt" + "sort" + "strings" +) + +// Command is a subcommand for a cli.App. +type Command struct { + // The name of the command + Name string + // short name of the command. Typically one character (deprecated, use `Aliases`) + ShortName string + // A list of aliases for the command + Aliases []string + // A short description of the usage of this command + Usage string + // Custom text to show on USAGE section of help + UsageText string + // A longer explanation of how the command works + Description string + // A short description of the arguments of this command + ArgsUsage string + // The category the command is part of + Category string + // The function to call when checking for bash command completions + BashComplete BashCompleteFunc + // An action to execute before any sub-subcommands are run, but after the context is ready + // If a non-nil error is returned, no sub-subcommands are run + Before BeforeFunc + // An action to execute after any subcommands are run, but after the subcommand has finished + // It is run even if Action() panics + After AfterFunc + // The function to call when this command is invoked + Action interface{} + // TODO: replace `Action: interface{}` with `Action: ActionFunc` once some kind + // of deprecation period has passed, maybe? + + // Execute this function if a usage error occurs. + OnUsageError OnUsageErrorFunc + // List of child commands + Subcommands Commands + // List of flags to parse + Flags []Flag + // Treat all flags as normal arguments if true + SkipFlagParsing bool + // Skip argument reordering which attempts to move flags before arguments, + // but only works if all flags appear after all arguments. This behavior was + // removed n version 2 since it only works under specific conditions so we + // backport here by exposing it as an option for compatibility. + SkipArgReorder bool + // Boolean to hide built-in help command + HideHelp bool + // Boolean to hide this command from help or completion + Hidden bool + // Boolean to enable short-option handling so user can combine several + // single-character bool arguments into one + // i.e. foobar -o -v -> foobar -ov + UseShortOptionHandling bool + + // Full name of command for help, defaults to full command name, including parent commands. + HelpName string + commandNamePath []string + + // CustomHelpTemplate the text template for the command help topic. + // cli.go uses text/template to render templates. You can + // render custom help text by setting this variable. + CustomHelpTemplate string +} + +type CommandsByName []Command + +func (c CommandsByName) Len() int { + return len(c) +} + +func (c CommandsByName) Less(i, j int) bool { + return lexicographicLess(c[i].Name, c[j].Name) +} + +func (c CommandsByName) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} + +// FullName returns the full name of the command. +// For subcommands this ensures that parent commands are part of the command path +func (c Command) FullName() string { + if c.commandNamePath == nil { + return c.Name + } + return strings.Join(c.commandNamePath, " ") +} + +// Commands is a slice of Command +type Commands []Command + +// Run invokes the command given the context, parses ctx.Args() to generate command-specific flags +func (c Command) Run(ctx *Context) (err error) { + if !c.SkipFlagParsing { + if len(c.Subcommands) > 0 { + return c.startApp(ctx) + } + } + + if !c.HideHelp && (HelpFlag != BoolFlag{}) { + // append help to flags + c.Flags = append( + c.Flags, + HelpFlag, + ) + } + + if ctx.App.UseShortOptionHandling { + c.UseShortOptionHandling = true + } + + set, err := c.parseFlags(ctx.Args().Tail(), ctx.shellComplete) + + context := NewContext(ctx.App, set, ctx) + context.Command = c + if checkCommandCompletions(context, c.Name) { + return nil + } + + if err != nil { + if c.OnUsageError != nil { + err := c.OnUsageError(context, err, false) + context.App.handleExitCoder(context, err) + return err + } + _, _ = fmt.Fprintln(context.App.Writer, "Incorrect Usage:", err.Error()) + _, _ = fmt.Fprintln(context.App.Writer) + _ = ShowCommandHelp(context, c.Name) + return err + } + + if checkCommandHelp(context, c.Name) { + return nil + } + + cerr := checkRequiredFlags(c.Flags, context) + if cerr != nil { + _ = ShowCommandHelp(context, c.Name) + return cerr + } + + if c.After != nil { + defer func() { + afterErr := c.After(context) + if afterErr != nil { + context.App.handleExitCoder(context, err) + if err != nil { + err = NewMultiError(err, afterErr) + } else { + err = afterErr + } + } + }() + } + + if c.Before != nil { + err = c.Before(context) + if err != nil { + context.App.handleExitCoder(context, err) + return err + } + } + + if c.Action == nil { + c.Action = helpSubcommand.Action + } + + err = HandleAction(c.Action, context) + + if err != nil { + context.App.handleExitCoder(context, err) + } + return err +} + +func (c *Command) parseFlags(args Args, shellComplete bool) (*flag.FlagSet, error) { + if c.SkipFlagParsing { + set, err := c.newFlagSet() + if err != nil { + return nil, err + } + + return set, set.Parse(append([]string{"--"}, args...)) + } + + if !c.SkipArgReorder { + args = reorderArgs(c.Flags, args) + } + + set, err := c.newFlagSet() + if err != nil { + return nil, err + } + + err = parseIter(set, c, args, shellComplete) + if err != nil { + return nil, err + } + + err = normalizeFlags(c.Flags, set) + if err != nil { + return nil, err + } + + return set, nil +} + +func (c *Command) newFlagSet() (*flag.FlagSet, error) { + return flagSet(c.Name, c.Flags) +} + +func (c *Command) useShortOptionHandling() bool { + return c.UseShortOptionHandling +} + +// reorderArgs moves all flags (via reorderedArgs) before the rest of +// the arguments (remainingArgs) as this is what flag expects. +func reorderArgs(commandFlags []Flag, args []string) []string { + var remainingArgs, reorderedArgs []string + + nextIndexMayContainValue := false + for i, arg := range args { + + // if we're expecting an option-value, check if this arg is a value, in + // which case it should be re-ordered next to its associated flag + if nextIndexMayContainValue && !argIsFlag(commandFlags, arg) { + nextIndexMayContainValue = false + reorderedArgs = append(reorderedArgs, arg) + } else if arg == "--" { + // don't reorder any args after the -- delimiter As described in the POSIX spec: + // https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap12.html#tag_12_02 + // > Guideline 10: + // > The first -- argument that is not an option-argument should be accepted + // > as a delimiter indicating the end of options. Any following arguments + // > should be treated as operands, even if they begin with the '-' character. + + // make sure the "--" delimiter itself is at the start + remainingArgs = append([]string{"--"}, remainingArgs...) + remainingArgs = append(remainingArgs, args[i+1:]...) + break + // checks if this is an arg that should be re-ordered + } else if argIsFlag(commandFlags, arg) { + // we have determined that this is a flag that we should re-order + reorderedArgs = append(reorderedArgs, arg) + // if this arg does not contain a "=", then the next index may contain the value for this flag + nextIndexMayContainValue = !strings.Contains(arg, "=") + + // simply append any remaining args + } else { + remainingArgs = append(remainingArgs, arg) + } + } + + return append(reorderedArgs, remainingArgs...) +} + +// argIsFlag checks if an arg is one of our command flags +func argIsFlag(commandFlags []Flag, arg string) bool { + if arg == "-" || arg == "--" { + // `-` is never a flag + // `--` is an option-value when following a flag, and a delimiter indicating the end of options in other cases. + return false + } + // flags always start with a - + if !strings.HasPrefix(arg, "-") { + return false + } + // this line turns `--flag` into `flag` + if strings.HasPrefix(arg, "--") { + arg = strings.Replace(arg, "-", "", 2) + } + // this line turns `-flag` into `flag` + if strings.HasPrefix(arg, "-") { + arg = strings.Replace(arg, "-", "", 1) + } + // this line turns `flag=value` into `flag` + arg = strings.Split(arg, "=")[0] + // look through all the flags, to see if the `arg` is one of our flags + for _, flag := range commandFlags { + for _, key := range strings.Split(flag.GetName(), ",") { + key := strings.TrimSpace(key) + if key == arg { + return true + } + } + } + // return false if this arg was not one of our flags + return false +} + +// Names returns the names including short names and aliases. +func (c Command) Names() []string { + names := []string{c.Name} + + if c.ShortName != "" { + names = append(names, c.ShortName) + } + + return append(names, c.Aliases...) +} + +// HasName returns true if Command.Name or Command.ShortName matches given name +func (c Command) HasName(name string) bool { + for _, n := range c.Names() { + if n == name { + return true + } + } + return false +} + +func (c Command) startApp(ctx *Context) error { + app := NewApp() + app.Metadata = ctx.App.Metadata + app.ExitErrHandler = ctx.App.ExitErrHandler + // set the name and usage + app.Name = fmt.Sprintf("%s %s", ctx.App.Name, c.Name) + if c.HelpName == "" { + app.HelpName = c.HelpName + } else { + app.HelpName = app.Name + } + + app.Usage = c.Usage + app.Description = c.Description + app.ArgsUsage = c.ArgsUsage + + // set CommandNotFound + app.CommandNotFound = ctx.App.CommandNotFound + app.CustomAppHelpTemplate = c.CustomHelpTemplate + + // set the flags and commands + app.Commands = c.Subcommands + app.Flags = c.Flags + app.HideHelp = c.HideHelp + + app.Version = ctx.App.Version + app.HideVersion = ctx.App.HideVersion + app.Compiled = ctx.App.Compiled + app.Author = ctx.App.Author + app.Email = ctx.App.Email + app.Writer = ctx.App.Writer + app.ErrWriter = ctx.App.ErrWriter + app.UseShortOptionHandling = ctx.App.UseShortOptionHandling + + app.categories = CommandCategories{} + for _, command := range c.Subcommands { + app.categories = app.categories.AddCommand(command.Category, command) + } + + sort.Sort(app.categories) + + // bash completion + app.EnableBashCompletion = ctx.App.EnableBashCompletion + if c.BashComplete != nil { + app.BashComplete = c.BashComplete + } + + // set the actions + app.Before = c.Before + app.After = c.After + if c.Action != nil { + app.Action = c.Action + } else { + app.Action = helpSubcommand.Action + } + app.OnUsageError = c.OnUsageError + + for index, cc := range app.Commands { + app.Commands[index].commandNamePath = []string{c.Name, cc.Name} + } + + return app.RunAsSubcommand(ctx) +} + +// VisibleFlags returns a slice of the Flags with Hidden=false +func (c Command) VisibleFlags() []Flag { + return visibleFlags(c.Flags) +} diff --git a/vendor/github.com/urfave/cli/context.go b/vendor/github.com/urfave/cli/context.go new file mode 100644 index 000000000..3adf37e7b --- /dev/null +++ b/vendor/github.com/urfave/cli/context.go @@ -0,0 +1,348 @@ +package cli + +import ( + "errors" + "flag" + "fmt" + "os" + "reflect" + "strings" + "syscall" +) + +// Context is a type that is passed through to +// each Handler action in a cli application. Context +// can be used to retrieve context-specific Args and +// parsed command-line options. +type Context struct { + App *App + Command Command + shellComplete bool + flagSet *flag.FlagSet + setFlags map[string]bool + parentContext *Context +} + +// NewContext creates a new context. For use in when invoking an App or Command action. +func NewContext(app *App, set *flag.FlagSet, parentCtx *Context) *Context { + c := &Context{App: app, flagSet: set, parentContext: parentCtx} + + if parentCtx != nil { + c.shellComplete = parentCtx.shellComplete + } + + return c +} + +// NumFlags returns the number of flags set +func (c *Context) NumFlags() int { + return c.flagSet.NFlag() +} + +// Set sets a context flag to a value. +func (c *Context) Set(name, value string) error { + c.setFlags = nil + return c.flagSet.Set(name, value) +} + +// GlobalSet sets a context flag to a value on the global flagset +func (c *Context) GlobalSet(name, value string) error { + globalContext(c).setFlags = nil + return globalContext(c).flagSet.Set(name, value) +} + +// IsSet determines if the flag was actually set +func (c *Context) IsSet(name string) bool { + if c.setFlags == nil { + c.setFlags = make(map[string]bool) + + c.flagSet.Visit(func(f *flag.Flag) { + c.setFlags[f.Name] = true + }) + + c.flagSet.VisitAll(func(f *flag.Flag) { + if _, ok := c.setFlags[f.Name]; ok { + return + } + c.setFlags[f.Name] = false + }) + + // XXX hack to support IsSet for flags with EnvVar + // + // There isn't an easy way to do this with the current implementation since + // whether a flag was set via an environment variable is very difficult to + // determine here. Instead, we intend to introduce a backwards incompatible + // change in version 2 to add `IsSet` to the Flag interface to push the + // responsibility closer to where the information required to determine + // whether a flag is set by non-standard means such as environment + // variables is available. + // + // See https://github.com/urfave/cli/issues/294 for additional discussion + flags := c.Command.Flags + if c.Command.Name == "" { // cannot == Command{} since it contains slice types + if c.App != nil { + flags = c.App.Flags + } + } + for _, f := range flags { + eachName(f.GetName(), func(name string) { + if isSet, ok := c.setFlags[name]; isSet || !ok { + // Check if a flag is set + if isSet { + // If the flag is set, also set its other aliases + eachName(f.GetName(), func(name string) { + c.setFlags[name] = true + }) + } + + return + } + + val := reflect.ValueOf(f) + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + + filePathValue := val.FieldByName("FilePath") + if filePathValue.IsValid() { + eachName(filePathValue.String(), func(filePath string) { + if _, err := os.Stat(filePath); err == nil { + c.setFlags[name] = true + return + } + }) + } + + envVarValue := val.FieldByName("EnvVar") + if envVarValue.IsValid() { + eachName(envVarValue.String(), func(envVar string) { + envVar = strings.TrimSpace(envVar) + if _, ok := syscall.Getenv(envVar); ok { + c.setFlags[name] = true + return + } + }) + } + }) + } + } + + return c.setFlags[name] +} + +// GlobalIsSet determines if the global flag was actually set +func (c *Context) GlobalIsSet(name string) bool { + ctx := c + if ctx.parentContext != nil { + ctx = ctx.parentContext + } + + for ; ctx != nil; ctx = ctx.parentContext { + if ctx.IsSet(name) { + return true + } + } + return false +} + +// FlagNames returns a slice of flag names used in this context. +func (c *Context) FlagNames() (names []string) { + for _, f := range c.Command.Flags { + name := strings.Split(f.GetName(), ",")[0] + if name == "help" { + continue + } + names = append(names, name) + } + return +} + +// GlobalFlagNames returns a slice of global flag names used by the app. +func (c *Context) GlobalFlagNames() (names []string) { + for _, f := range c.App.Flags { + name := strings.Split(f.GetName(), ",")[0] + if name == "help" || name == "version" { + continue + } + names = append(names, name) + } + return +} + +// Parent returns the parent context, if any +func (c *Context) Parent() *Context { + return c.parentContext +} + +// value returns the value of the flag coressponding to `name` +func (c *Context) value(name string) interface{} { + return c.flagSet.Lookup(name).Value.(flag.Getter).Get() +} + +// Args contains apps console arguments +type Args []string + +// Args returns the command line arguments associated with the context. +func (c *Context) Args() Args { + args := Args(c.flagSet.Args()) + return args +} + +// NArg returns the number of the command line arguments. +func (c *Context) NArg() int { + return len(c.Args()) +} + +// Get returns the nth argument, or else a blank string +func (a Args) Get(n int) string { + if len(a) > n { + return a[n] + } + return "" +} + +// First returns the first argument, or else a blank string +func (a Args) First() string { + return a.Get(0) +} + +// Tail returns the rest of the arguments (not the first one) +// or else an empty string slice +func (a Args) Tail() []string { + if len(a) >= 2 { + return []string(a)[1:] + } + return []string{} +} + +// Present checks if there are any arguments present +func (a Args) Present() bool { + return len(a) != 0 +} + +// Swap swaps arguments at the given indexes +func (a Args) Swap(from, to int) error { + if from >= len(a) || to >= len(a) { + return errors.New("index out of range") + } + a[from], a[to] = a[to], a[from] + return nil +} + +func globalContext(ctx *Context) *Context { + if ctx == nil { + return nil + } + + for { + if ctx.parentContext == nil { + return ctx + } + ctx = ctx.parentContext + } +} + +func lookupGlobalFlagSet(name string, ctx *Context) *flag.FlagSet { + if ctx.parentContext != nil { + ctx = ctx.parentContext + } + for ; ctx != nil; ctx = ctx.parentContext { + if f := ctx.flagSet.Lookup(name); f != nil { + return ctx.flagSet + } + } + return nil +} + +func copyFlag(name string, ff *flag.Flag, set *flag.FlagSet) { + switch ff.Value.(type) { + case *StringSlice: + default: + _ = set.Set(name, ff.Value.String()) + } +} + +func normalizeFlags(flags []Flag, set *flag.FlagSet) error { + visited := make(map[string]bool) + set.Visit(func(f *flag.Flag) { + visited[f.Name] = true + }) + for _, f := range flags { + parts := strings.Split(f.GetName(), ",") + if len(parts) == 1 { + continue + } + var ff *flag.Flag + for _, name := range parts { + name = strings.Trim(name, " ") + if visited[name] { + if ff != nil { + return errors.New("Cannot use two forms of the same flag: " + name + " " + ff.Name) + } + ff = set.Lookup(name) + } + } + if ff == nil { + continue + } + for _, name := range parts { + name = strings.Trim(name, " ") + if !visited[name] { + copyFlag(name, ff, set) + } + } + } + return nil +} + +type requiredFlagsErr interface { + error + getMissingFlags() []string +} + +type errRequiredFlags struct { + missingFlags []string +} + +func (e *errRequiredFlags) Error() string { + numberOfMissingFlags := len(e.missingFlags) + if numberOfMissingFlags == 1 { + return fmt.Sprintf("Required flag %q not set", e.missingFlags[0]) + } + joinedMissingFlags := strings.Join(e.missingFlags, ", ") + return fmt.Sprintf("Required flags %q not set", joinedMissingFlags) +} + +func (e *errRequiredFlags) getMissingFlags() []string { + return e.missingFlags +} + +func checkRequiredFlags(flags []Flag, context *Context) requiredFlagsErr { + var missingFlags []string + for _, f := range flags { + if rf, ok := f.(RequiredFlag); ok && rf.IsRequired() { + var flagPresent bool + var flagName string + for _, key := range strings.Split(f.GetName(), ",") { + key = strings.TrimSpace(key) + if len(key) > 1 { + flagName = key + } + + if context.IsSet(key) { + flagPresent = true + } + } + + if !flagPresent && flagName != "" { + missingFlags = append(missingFlags, flagName) + } + } + } + + if len(missingFlags) != 0 { + return &errRequiredFlags{missingFlags: missingFlags} + } + + return nil +} diff --git a/vendor/github.com/urfave/cli/docs.go b/vendor/github.com/urfave/cli/docs.go new file mode 100644 index 000000000..725fa7ff2 --- /dev/null +++ b/vendor/github.com/urfave/cli/docs.go @@ -0,0 +1,151 @@ +//go:build !urfave_cli_no_docs +// +build !urfave_cli_no_docs + +package cli + +import ( + "bytes" + "fmt" + "io" + "sort" + "strings" + "text/template" + + "github.com/cpuguy83/go-md2man/v2/md2man" +) + +// ToMarkdown creates a markdown string for the `*App` +// The function errors if either parsing or writing of the string fails. +func (a *App) ToMarkdown() (string, error) { + var w bytes.Buffer + if err := a.writeDocTemplate(&w); err != nil { + return "", err + } + return w.String(), nil +} + +// ToMan creates a man page string for the `*App` +// The function errors if either parsing or writing of the string fails. +func (a *App) ToMan() (string, error) { + var w bytes.Buffer + if err := a.writeDocTemplate(&w); err != nil { + return "", err + } + man := md2man.Render(w.Bytes()) + return string(man), nil +} + +type cliTemplate struct { + App *App + Commands []string + GlobalArgs []string + SynopsisArgs []string +} + +func (a *App) writeDocTemplate(w io.Writer) error { + const name = "cli" + t, err := template.New(name).Parse(MarkdownDocTemplate) + if err != nil { + return err + } + return t.ExecuteTemplate(w, name, &cliTemplate{ + App: a, + Commands: prepareCommands(a.Commands, 0), + GlobalArgs: prepareArgsWithValues(a.Flags), + SynopsisArgs: prepareArgsSynopsis(a.Flags), + }) +} + +func prepareCommands(commands []Command, level int) []string { + coms := []string{} + for i := range commands { + command := &commands[i] + if command.Hidden { + continue + } + usage := "" + if command.Usage != "" { + usage = command.Usage + } + + prepared := fmt.Sprintf("%s %s\n\n%s\n", + strings.Repeat("#", level+2), + strings.Join(command.Names(), ", "), + usage, + ) + + flags := prepareArgsWithValues(command.Flags) + if len(flags) > 0 { + prepared += fmt.Sprintf("\n%s", strings.Join(flags, "\n")) + } + + coms = append(coms, prepared) + + // recursevly iterate subcommands + if len(command.Subcommands) > 0 { + coms = append( + coms, + prepareCommands(command.Subcommands, level+1)..., + ) + } + } + + return coms +} + +func prepareArgsWithValues(flags []Flag) []string { + return prepareFlags(flags, ", ", "**", "**", `""`, true) +} + +func prepareArgsSynopsis(flags []Flag) []string { + return prepareFlags(flags, "|", "[", "]", "[value]", false) +} + +func prepareFlags( + flags []Flag, + sep, opener, closer, value string, + addDetails bool, +) []string { + args := []string{} + for _, f := range flags { + flag, ok := f.(DocGenerationFlag) + if !ok { + continue + } + modifiedArg := opener + for _, s := range strings.Split(flag.GetName(), ",") { + trimmed := strings.TrimSpace(s) + if len(modifiedArg) > len(opener) { + modifiedArg += sep + } + if len(trimmed) > 1 { + modifiedArg += fmt.Sprintf("--%s", trimmed) + } else { + modifiedArg += fmt.Sprintf("-%s", trimmed) + } + } + modifiedArg += closer + if flag.TakesValue() { + modifiedArg += fmt.Sprintf("=%s", value) + } + + if addDetails { + modifiedArg += flagDetails(flag) + } + + args = append(args, modifiedArg+"\n") + + } + sort.Strings(args) + return args +} + +// flagDetails returns a string containing the flags metadata +func flagDetails(flag DocGenerationFlag) string { + description := flag.GetUsage() + value := flag.GetValue() + if value != "" { + description += " (default: " + value + ")" + } + return ": " + description +} diff --git a/vendor/github.com/urfave/cli/errors.go b/vendor/github.com/urfave/cli/errors.go new file mode 100644 index 000000000..562b2953c --- /dev/null +++ b/vendor/github.com/urfave/cli/errors.go @@ -0,0 +1,115 @@ +package cli + +import ( + "fmt" + "io" + "os" + "strings" +) + +// OsExiter is the function used when the app exits. If not set defaults to os.Exit. +var OsExiter = os.Exit + +// ErrWriter is used to write errors to the user. This can be anything +// implementing the io.Writer interface and defaults to os.Stderr. +var ErrWriter io.Writer = os.Stderr + +// MultiError is an error that wraps multiple errors. +type MultiError struct { + Errors []error +} + +// NewMultiError creates a new MultiError. Pass in one or more errors. +func NewMultiError(err ...error) MultiError { + return MultiError{Errors: err} +} + +// Error implements the error interface. +func (m MultiError) Error() string { + errs := make([]string, len(m.Errors)) + for i, err := range m.Errors { + errs[i] = err.Error() + } + + return strings.Join(errs, "\n") +} + +type ErrorFormatter interface { + Format(s fmt.State, verb rune) +} + +// ExitCoder is the interface checked by `App` and `Command` for a custom exit +// code +type ExitCoder interface { + error + ExitCode() int +} + +// ExitError fulfills both the builtin `error` interface and `ExitCoder` +type ExitError struct { + exitCode int + message interface{} +} + +// NewExitError makes a new *ExitError +func NewExitError(message interface{}, exitCode int) *ExitError { + return &ExitError{ + exitCode: exitCode, + message: message, + } +} + +// Error returns the string message, fulfilling the interface required by +// `error` +func (ee *ExitError) Error() string { + return fmt.Sprintf("%v", ee.message) +} + +// ExitCode returns the exit code, fulfilling the interface required by +// `ExitCoder` +func (ee *ExitError) ExitCode() int { + return ee.exitCode +} + +// HandleExitCoder checks if the error fulfills the ExitCoder interface, and if +// so prints the error to stderr (if it is non-empty) and calls OsExiter with the +// given exit code. If the given error is a MultiError, then this func is +// called on all members of the Errors slice and calls OsExiter with the last exit code. +func HandleExitCoder(err error) { + if err == nil { + return + } + + if exitErr, ok := err.(ExitCoder); ok { + if err.Error() != "" { + if _, ok := exitErr.(ErrorFormatter); ok { + fmt.Fprintf(ErrWriter, "%+v\n", err) + } else { + fmt.Fprintln(ErrWriter, err) + } + } + OsExiter(exitErr.ExitCode()) + return + } + + if multiErr, ok := err.(MultiError); ok { + code := handleMultiError(multiErr) + OsExiter(code) + return + } +} + +func handleMultiError(multiErr MultiError) int { + code := 1 + for _, merr := range multiErr.Errors { + if multiErr2, ok := merr.(MultiError); ok { + code = handleMultiError(multiErr2) + } else { + fmt.Fprintln(ErrWriter, merr) + if exitErr, ok := merr.(ExitCoder); ok { + code = exitErr.ExitCode() + } + } + } + return code +} diff --git a/vendor/github.com/urfave/cli/fish.go b/vendor/github.com/urfave/cli/fish.go new file mode 100644 index 000000000..cf183af61 --- /dev/null +++ b/vendor/github.com/urfave/cli/fish.go @@ -0,0 +1,194 @@ +package cli + +import ( + "bytes" + "fmt" + "io" + "strings" + "text/template" +) + +// ToFishCompletion creates a fish completion string for the `*App` +// The function errors if either parsing or writing of the string fails. +func (a *App) ToFishCompletion() (string, error) { + var w bytes.Buffer + if err := a.writeFishCompletionTemplate(&w); err != nil { + return "", err + } + return w.String(), nil +} + +type fishCompletionTemplate struct { + App *App + Completions []string + AllCommands []string +} + +func (a *App) writeFishCompletionTemplate(w io.Writer) error { + const name = "cli" + t, err := template.New(name).Parse(FishCompletionTemplate) + if err != nil { + return err + } + allCommands := []string{} + + // Add global flags + completions := a.prepareFishFlags(a.VisibleFlags(), allCommands) + + // Add help flag + if !a.HideHelp { + completions = append( + completions, + a.prepareFishFlags([]Flag{HelpFlag}, allCommands)..., + ) + } + + // Add version flag + if !a.HideVersion { + completions = append( + completions, + a.prepareFishFlags([]Flag{VersionFlag}, allCommands)..., + ) + } + + // Add commands and their flags + completions = append( + completions, + a.prepareFishCommands(a.VisibleCommands(), &allCommands, []string{})..., + ) + + return t.ExecuteTemplate(w, name, &fishCompletionTemplate{ + App: a, + Completions: completions, + AllCommands: allCommands, + }) +} + +func (a *App) prepareFishCommands(commands []Command, allCommands *[]string, previousCommands []string) []string { + completions := []string{} + for i := range commands { + command := &commands[i] + + if command.Hidden { + continue + } + + var completion strings.Builder + completion.WriteString(fmt.Sprintf( + "complete -r -c %s -n '%s' -a '%s'", + a.Name, + a.fishSubcommandHelper(previousCommands), + strings.Join(command.Names(), " "), + )) + + if command.Usage != "" { + completion.WriteString(fmt.Sprintf(" -d '%s'", + escapeSingleQuotes(command.Usage))) + } + + if !command.HideHelp { + completions = append( + completions, + a.prepareFishFlags([]Flag{HelpFlag}, command.Names())..., + ) + } + + *allCommands = append(*allCommands, command.Names()...) + completions = append(completions, completion.String()) + completions = append( + completions, + a.prepareFishFlags(command.Flags, command.Names())..., + ) + + // recursevly iterate subcommands + if len(command.Subcommands) > 0 { + completions = append( + completions, + a.prepareFishCommands( + command.Subcommands, allCommands, command.Names(), + )..., + ) + } + } + + return completions +} + +func (a *App) prepareFishFlags(flags []Flag, previousCommands []string) []string { + completions := []string{} + for _, f := range flags { + flag, ok := f.(DocGenerationFlag) + if !ok { + continue + } + + completion := &strings.Builder{} + completion.WriteString(fmt.Sprintf( + "complete -c %s -n '%s'", + a.Name, + a.fishSubcommandHelper(previousCommands), + )) + + fishAddFileFlag(f, completion) + + for idx, opt := range strings.Split(flag.GetName(), ",") { + if idx == 0 { + completion.WriteString(fmt.Sprintf( + " -l %s", strings.TrimSpace(opt), + )) + } else { + completion.WriteString(fmt.Sprintf( + " -s %s", strings.TrimSpace(opt), + )) + + } + } + + if flag.TakesValue() { + completion.WriteString(" -r") + } + + if flag.GetUsage() != "" { + completion.WriteString(fmt.Sprintf(" -d '%s'", + escapeSingleQuotes(flag.GetUsage()))) + } + + completions = append(completions, completion.String()) + } + + return completions +} + +func fishAddFileFlag(flag Flag, completion *strings.Builder) { + switch f := flag.(type) { + case GenericFlag: + if f.TakesFile { + return + } + case StringFlag: + if f.TakesFile { + return + } + case StringSliceFlag: + if f.TakesFile { + return + } + } + completion.WriteString(" -f") +} + +func (a *App) fishSubcommandHelper(allCommands []string) string { + fishHelper := fmt.Sprintf("__fish_%s_no_subcommand", a.Name) + if len(allCommands) > 0 { + fishHelper = fmt.Sprintf( + "__fish_seen_subcommand_from %s", + strings.Join(allCommands, " "), + ) + } + return fishHelper + +} + +func escapeSingleQuotes(input string) string { + return strings.Replace(input, `'`, `\'`, -1) +} diff --git a/vendor/github.com/urfave/cli/flag.go b/vendor/github.com/urfave/cli/flag.go new file mode 100644 index 000000000..5b7ae6c3f --- /dev/null +++ b/vendor/github.com/urfave/cli/flag.go @@ -0,0 +1,348 @@ +package cli + +import ( + "flag" + "fmt" + "io/ioutil" + "reflect" + "runtime" + "strconv" + "strings" + "syscall" +) + +const defaultPlaceholder = "value" + +// BashCompletionFlag enables bash-completion for all commands and subcommands +var BashCompletionFlag Flag = BoolFlag{ + Name: "generate-bash-completion", + Hidden: true, +} + +// VersionFlag prints the version for the application +var VersionFlag Flag = BoolFlag{ + Name: "version, v", + Usage: "print the version", +} + +// HelpFlag prints the help for all commands and subcommands +// Set to the zero value (BoolFlag{}) to disable flag -- keeps subcommand +// unless HideHelp is set to true) +var HelpFlag Flag = BoolFlag{ + Name: "help, h", + Usage: "show help", +} + +// FlagStringer converts a flag definition to a string. This is used by help +// to display a flag. +var FlagStringer FlagStringFunc = stringifyFlag + +// FlagNamePrefixer converts a full flag name and its placeholder into the help +// message flag prefix. This is used by the default FlagStringer. +var FlagNamePrefixer FlagNamePrefixFunc = prefixedNames + +// FlagEnvHinter annotates flag help message with the environment variable +// details. This is used by the default FlagStringer. +var FlagEnvHinter FlagEnvHintFunc = withEnvHint + +// FlagFileHinter annotates flag help message with the environment variable +// details. This is used by the default FlagStringer. +var FlagFileHinter FlagFileHintFunc = withFileHint + +// FlagsByName is a slice of Flag. +type FlagsByName []Flag + +func (f FlagsByName) Len() int { + return len(f) +} + +func (f FlagsByName) Less(i, j int) bool { + return lexicographicLess(f[i].GetName(), f[j].GetName()) +} + +func (f FlagsByName) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} + +// Flag is a common interface related to parsing flags in cli. +// For more advanced flag parsing techniques, it is recommended that +// this interface be implemented. +type Flag interface { + fmt.Stringer + // Apply Flag settings to the given flag set + Apply(*flag.FlagSet) + GetName() string +} + +// RequiredFlag is an interface that allows us to mark flags as required +// it allows flags required flags to be backwards compatible with the Flag interface +type RequiredFlag interface { + Flag + + IsRequired() bool +} + +// DocGenerationFlag is an interface that allows documentation generation for the flag +type DocGenerationFlag interface { + Flag + + // TakesValue returns true if the flag takes a value, otherwise false + TakesValue() bool + + // GetUsage returns the usage string for the flag + GetUsage() string + + // GetValue returns the flags value as string representation and an empty + // string if the flag takes no value at all. + GetValue() string +} + +// errorableFlag is an interface that allows us to return errors during apply +// it allows flags defined in this library to return errors in a fashion backwards compatible +// TODO remove in v2 and modify the existing Flag interface to return errors +type errorableFlag interface { + Flag + + ApplyWithError(*flag.FlagSet) error +} + +func flagSet(name string, flags []Flag) (*flag.FlagSet, error) { + set := flag.NewFlagSet(name, flag.ContinueOnError) + + for _, f := range flags { + //TODO remove in v2 when errorableFlag is removed + if ef, ok := f.(errorableFlag); ok { + if err := ef.ApplyWithError(set); err != nil { + return nil, err + } + } else { + f.Apply(set) + } + } + set.SetOutput(ioutil.Discard) + return set, nil +} + +func eachName(longName string, fn func(string)) { + parts := strings.Split(longName, ",") + for _, name := range parts { + name = strings.Trim(name, " ") + fn(name) + } +} + +func visibleFlags(fl []Flag) []Flag { + var visible []Flag + for _, f := range fl { + field := flagValue(f).FieldByName("Hidden") + if !field.IsValid() || !field.Bool() { + visible = append(visible, f) + } + } + return visible +} + +func prefixFor(name string) (prefix string) { + if len(name) == 1 { + prefix = "-" + } else { + prefix = "--" + } + + return +} + +// Returns the placeholder, if any, and the unquoted usage string. +func unquoteUsage(usage string) (string, string) { + for i := 0; i < len(usage); i++ { + if usage[i] == '`' { + for j := i + 1; j < len(usage); j++ { + if usage[j] == '`' { + name := usage[i+1 : j] + usage = usage[:i] + name + usage[j+1:] + return name, usage + } + } + break + } + } + return "", usage +} + +func prefixedNames(fullName, placeholder string) string { + var prefixed string + parts := strings.Split(fullName, ",") + for i, name := range parts { + name = strings.Trim(name, " ") + prefixed += prefixFor(name) + name + if placeholder != "" { + prefixed += " " + placeholder + } + if i < len(parts)-1 { + prefixed += ", " + } + } + return prefixed +} + +func withEnvHint(envVar, str string) string { + envText := "" + if envVar != "" { + prefix := "$" + suffix := "" + sep := ", $" + if runtime.GOOS == "windows" { + prefix = "%" + suffix = "%" + sep = "%, %" + } + envText = " [" + prefix + strings.Join(strings.Split(envVar, ","), sep) + suffix + "]" + } + return str + envText +} + +func withFileHint(filePath, str string) string { + fileText := "" + if filePath != "" { + fileText = fmt.Sprintf(" [%s]", filePath) + } + return str + fileText +} + +func flagValue(f Flag) reflect.Value { + fv := reflect.ValueOf(f) + for fv.Kind() == reflect.Ptr { + fv = reflect.Indirect(fv) + } + return fv +} + +func stringifyFlag(f Flag) string { + fv := flagValue(f) + + switch f.(type) { + case IntSliceFlag: + return FlagFileHinter( + fv.FieldByName("FilePath").String(), + FlagEnvHinter( + fv.FieldByName("EnvVar").String(), + stringifyIntSliceFlag(f.(IntSliceFlag)), + ), + ) + case Int64SliceFlag: + return FlagFileHinter( + fv.FieldByName("FilePath").String(), + FlagEnvHinter( + fv.FieldByName("EnvVar").String(), + stringifyInt64SliceFlag(f.(Int64SliceFlag)), + ), + ) + case StringSliceFlag: + return FlagFileHinter( + fv.FieldByName("FilePath").String(), + FlagEnvHinter( + fv.FieldByName("EnvVar").String(), + stringifyStringSliceFlag(f.(StringSliceFlag)), + ), + ) + } + + placeholder, usage := unquoteUsage(fv.FieldByName("Usage").String()) + + needsPlaceholder := false + defaultValueString := "" + + if val := fv.FieldByName("Value"); val.IsValid() { + needsPlaceholder = true + defaultValueString = fmt.Sprintf(" (default: %v)", val.Interface()) + + if val.Kind() == reflect.String && val.String() != "" { + defaultValueString = fmt.Sprintf(" (default: %q)", val.String()) + } + } + + if defaultValueString == " (default: )" { + defaultValueString = "" + } + + if needsPlaceholder && placeholder == "" { + placeholder = defaultPlaceholder + } + + usageWithDefault := strings.TrimSpace(usage + defaultValueString) + + return FlagFileHinter( + fv.FieldByName("FilePath").String(), + FlagEnvHinter( + fv.FieldByName("EnvVar").String(), + FlagNamePrefixer(fv.FieldByName("Name").String(), placeholder)+"\t"+usageWithDefault, + ), + ) +} + +func stringifyIntSliceFlag(f IntSliceFlag) string { + var defaultVals []string + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, i := range f.Value.Value() { + defaultVals = append(defaultVals, strconv.Itoa(i)) + } + } + + return stringifySliceFlag(f.Usage, f.Name, defaultVals) +} + +func stringifyInt64SliceFlag(f Int64SliceFlag) string { + var defaultVals []string + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, i := range f.Value.Value() { + defaultVals = append(defaultVals, strconv.FormatInt(i, 10)) + } + } + + return stringifySliceFlag(f.Usage, f.Name, defaultVals) +} + +func stringifyStringSliceFlag(f StringSliceFlag) string { + var defaultVals []string + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, s := range f.Value.Value() { + if len(s) > 0 { + defaultVals = append(defaultVals, strconv.Quote(s)) + } + } + } + + return stringifySliceFlag(f.Usage, f.Name, defaultVals) +} + +func stringifySliceFlag(usage, name string, defaultVals []string) string { + placeholder, usage := unquoteUsage(usage) + if placeholder == "" { + placeholder = defaultPlaceholder + } + + defaultVal := "" + if len(defaultVals) > 0 { + defaultVal = fmt.Sprintf(" (default: %s)", strings.Join(defaultVals, ", ")) + } + + usageWithDefault := strings.TrimSpace(usage + defaultVal) + return FlagNamePrefixer(name, placeholder) + "\t" + usageWithDefault +} + +func flagFromFileEnv(filePath, envName string) (val string, ok bool) { + for _, envVar := range strings.Split(envName, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + return envVal, true + } + } + for _, fileVar := range strings.Split(filePath, ",") { + if fileVar != "" { + if data, err := ioutil.ReadFile(fileVar); err == nil { + return string(data), true + } + } + } + return "", false +} diff --git a/vendor/github.com/urfave/cli/flag_bool.go b/vendor/github.com/urfave/cli/flag_bool.go new file mode 100644 index 000000000..2499b0b52 --- /dev/null +++ b/vendor/github.com/urfave/cli/flag_bool.go @@ -0,0 +1,109 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// BoolFlag is a flag with type bool +type BoolFlag struct { + Name string + Usage string + EnvVar string + FilePath string + Required bool + Hidden bool + Destination *bool +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f BoolFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f BoolFlag) GetName() string { + return f.Name +} + +// IsRequired returns whether or not the flag is required +func (f BoolFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f BoolFlag) TakesValue() bool { + return false +} + +// GetUsage returns the usage string for the flag +func (f BoolFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f BoolFlag) GetValue() string { + return "" +} + +// Bool looks up the value of a local BoolFlag, returns +// false if not found +func (c *Context) Bool(name string) bool { + return lookupBool(name, c.flagSet) +} + +// GlobalBool looks up the value of a global BoolFlag, returns +// false if not found +func (c *Context) GlobalBool(name string) bool { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupBool(name, fs) + } + return false +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f BoolFlag) Apply(set *flag.FlagSet) { + _ = f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f BoolFlag) ApplyWithError(set *flag.FlagSet) error { + val := false + if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { + if envVal == "" { + val = false + } else { + envValBool, err := strconv.ParseBool(envVal) + if err != nil { + return fmt.Errorf("could not parse %s as bool value for flag %s: %s", envVal, f.Name, err) + } + val = envValBool + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.BoolVar(f.Destination, name, val, f.Usage) + return + } + set.Bool(name, val, f.Usage) + }) + + return nil +} + +func lookupBool(name string, set *flag.FlagSet) bool { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseBool(f.Value.String()) + if err != nil { + return false + } + return parsed + } + return false +} diff --git a/vendor/github.com/urfave/cli/flag_bool_t.go b/vendor/github.com/urfave/cli/flag_bool_t.go new file mode 100644 index 000000000..cd0888fa2 --- /dev/null +++ b/vendor/github.com/urfave/cli/flag_bool_t.go @@ -0,0 +1,110 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// BoolTFlag is a flag with type bool that is true by default +type BoolTFlag struct { + Name string + Usage string + EnvVar string + FilePath string + Required bool + Hidden bool + Destination *bool +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f BoolTFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f BoolTFlag) GetName() string { + return f.Name +} + +// IsRequired returns whether or not the flag is required +func (f BoolTFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f BoolTFlag) TakesValue() bool { + return false +} + +// GetUsage returns the usage string for the flag +func (f BoolTFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f BoolTFlag) GetValue() string { + return "" +} + +// BoolT looks up the value of a local BoolTFlag, returns +// false if not found +func (c *Context) BoolT(name string) bool { + return lookupBoolT(name, c.flagSet) +} + +// GlobalBoolT looks up the value of a global BoolTFlag, returns +// false if not found +func (c *Context) GlobalBoolT(name string) bool { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupBoolT(name, fs) + } + return false +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f BoolTFlag) Apply(set *flag.FlagSet) { + _ = f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f BoolTFlag) ApplyWithError(set *flag.FlagSet) error { + val := true + + if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { + if envVal == "" { + val = false + } else { + envValBool, err := strconv.ParseBool(envVal) + if err != nil { + return fmt.Errorf("could not parse %s as bool value for flag %s: %s", envVal, f.Name, err) + } + val = envValBool + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.BoolVar(f.Destination, name, val, f.Usage) + return + } + set.Bool(name, val, f.Usage) + }) + + return nil +} + +func lookupBoolT(name string, set *flag.FlagSet) bool { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseBool(f.Value.String()) + if err != nil { + return false + } + return parsed + } + return false +} diff --git a/vendor/github.com/urfave/cli/flag_duration.go b/vendor/github.com/urfave/cli/flag_duration.go new file mode 100644 index 000000000..df4ade589 --- /dev/null +++ b/vendor/github.com/urfave/cli/flag_duration.go @@ -0,0 +1,106 @@ +package cli + +import ( + "flag" + "fmt" + "time" +) + +// DurationFlag is a flag with type time.Duration (see https://golang.org/pkg/time/#ParseDuration) +type DurationFlag struct { + Name string + Usage string + EnvVar string + FilePath string + Required bool + Hidden bool + Value time.Duration + Destination *time.Duration +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f DurationFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f DurationFlag) GetName() string { + return f.Name +} + +// IsRequired returns whether or not the flag is required +func (f DurationFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f DurationFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f DurationFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f DurationFlag) GetValue() string { + return f.Value.String() +} + +// Duration looks up the value of a local DurationFlag, returns +// 0 if not found +func (c *Context) Duration(name string) time.Duration { + return lookupDuration(name, c.flagSet) +} + +// GlobalDuration looks up the value of a global DurationFlag, returns +// 0 if not found +func (c *Context) GlobalDuration(name string) time.Duration { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupDuration(name, fs) + } + return 0 +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f DurationFlag) Apply(set *flag.FlagSet) { + _ = f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f DurationFlag) ApplyWithError(set *flag.FlagSet) error { + if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { + envValDuration, err := time.ParseDuration(envVal) + if err != nil { + return fmt.Errorf("could not parse %s as duration for flag %s: %s", envVal, f.Name, err) + } + + f.Value = envValDuration + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.DurationVar(f.Destination, name, f.Value, f.Usage) + return + } + set.Duration(name, f.Value, f.Usage) + }) + + return nil +} + +func lookupDuration(name string, set *flag.FlagSet) time.Duration { + f := set.Lookup(name) + if f != nil { + parsed, err := time.ParseDuration(f.Value.String()) + if err != nil { + return 0 + } + return parsed + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/flag_float64.go b/vendor/github.com/urfave/cli/flag_float64.go new file mode 100644 index 000000000..65398d3b5 --- /dev/null +++ b/vendor/github.com/urfave/cli/flag_float64.go @@ -0,0 +1,106 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// Float64Flag is a flag with type float64 +type Float64Flag struct { + Name string + Usage string + EnvVar string + FilePath string + Required bool + Hidden bool + Value float64 + Destination *float64 +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f Float64Flag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f Float64Flag) GetName() string { + return f.Name +} + +// IsRequired returns whether or not the flag is required +func (f Float64Flag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f Float64Flag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f Float64Flag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f Float64Flag) GetValue() string { + return fmt.Sprintf("%f", f.Value) +} + +// Float64 looks up the value of a local Float64Flag, returns +// 0 if not found +func (c *Context) Float64(name string) float64 { + return lookupFloat64(name, c.flagSet) +} + +// GlobalFloat64 looks up the value of a global Float64Flag, returns +// 0 if not found +func (c *Context) GlobalFloat64(name string) float64 { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupFloat64(name, fs) + } + return 0 +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f Float64Flag) Apply(set *flag.FlagSet) { + _ = f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f Float64Flag) ApplyWithError(set *flag.FlagSet) error { + if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { + envValFloat, err := strconv.ParseFloat(envVal, 10) + if err != nil { + return fmt.Errorf("could not parse %s as float64 value for flag %s: %s", envVal, f.Name, err) + } + + f.Value = envValFloat + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.Float64Var(f.Destination, name, f.Value, f.Usage) + return + } + set.Float64(name, f.Value, f.Usage) + }) + + return nil +} + +func lookupFloat64(name string, set *flag.FlagSet) float64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseFloat(f.Value.String(), 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/flag_generic.go b/vendor/github.com/urfave/cli/flag_generic.go new file mode 100644 index 000000000..c43dae7d0 --- /dev/null +++ b/vendor/github.com/urfave/cli/flag_generic.go @@ -0,0 +1,110 @@ +package cli + +import ( + "flag" + "fmt" +) + +// Generic is a generic parseable type identified by a specific flag +type Generic interface { + Set(value string) error + String() string +} + +// GenericFlag is a flag with type Generic +type GenericFlag struct { + Name string + Usage string + EnvVar string + FilePath string + Required bool + Hidden bool + TakesFile bool + Value Generic +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f GenericFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f GenericFlag) GetName() string { + return f.Name +} + +// IsRequired returns whether or not the flag is required +func (f GenericFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f GenericFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f GenericFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f GenericFlag) GetValue() string { + if f.Value != nil { + return f.Value.String() + } + return "" +} + +// Apply takes the flagset and calls Set on the generic flag with the value +// provided by the user for parsing by the flag +// Ignores parsing errors +func (f GenericFlag) Apply(set *flag.FlagSet) { + _ = f.ApplyWithError(set) +} + +// ApplyWithError takes the flagset and calls Set on the generic flag with the value +// provided by the user for parsing by the flag +func (f GenericFlag) ApplyWithError(set *flag.FlagSet) error { + val := f.Value + if fileEnvVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { + if err := val.Set(fileEnvVal); err != nil { + return fmt.Errorf("could not parse %s as value for flag %s: %s", fileEnvVal, f.Name, err) + } + } + + eachName(f.Name, func(name string) { + set.Var(f.Value, name, f.Usage) + }) + + return nil +} + +// Generic looks up the value of a local GenericFlag, returns +// nil if not found +func (c *Context) Generic(name string) interface{} { + return lookupGeneric(name, c.flagSet) +} + +// GlobalGeneric looks up the value of a global GenericFlag, returns +// nil if not found +func (c *Context) GlobalGeneric(name string) interface{} { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupGeneric(name, fs) + } + return nil +} + +func lookupGeneric(name string, set *flag.FlagSet) interface{} { + f := set.Lookup(name) + if f != nil { + parsed, err := f.Value, error(nil) + if err != nil { + return nil + } + return parsed + } + return nil +} diff --git a/vendor/github.com/urfave/cli/flag_int.go b/vendor/github.com/urfave/cli/flag_int.go new file mode 100644 index 000000000..bae32e281 --- /dev/null +++ b/vendor/github.com/urfave/cli/flag_int.go @@ -0,0 +1,105 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// IntFlag is a flag with type int +type IntFlag struct { + Name string + Usage string + EnvVar string + FilePath string + Required bool + Hidden bool + Value int + Destination *int +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f IntFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f IntFlag) GetName() string { + return f.Name +} + +// IsRequired returns whether or not the flag is required +func (f IntFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f IntFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f IntFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f IntFlag) GetValue() string { + return fmt.Sprintf("%d", f.Value) +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f IntFlag) Apply(set *flag.FlagSet) { + _ = f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f IntFlag) ApplyWithError(set *flag.FlagSet) error { + if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { + envValInt, err := strconv.ParseInt(envVal, 0, 64) + if err != nil { + return fmt.Errorf("could not parse %s as int value for flag %s: %s", envVal, f.Name, err) + } + f.Value = int(envValInt) + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.IntVar(f.Destination, name, f.Value, f.Usage) + return + } + set.Int(name, f.Value, f.Usage) + }) + + return nil +} + +// Int looks up the value of a local IntFlag, returns +// 0 if not found +func (c *Context) Int(name string) int { + return lookupInt(name, c.flagSet) +} + +// GlobalInt looks up the value of a global IntFlag, returns +// 0 if not found +func (c *Context) GlobalInt(name string) int { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupInt(name, fs) + } + return 0 +} + +func lookupInt(name string, set *flag.FlagSet) int { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseInt(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return int(parsed) + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/flag_int64.go b/vendor/github.com/urfave/cli/flag_int64.go new file mode 100644 index 000000000..aaafbe9d6 --- /dev/null +++ b/vendor/github.com/urfave/cli/flag_int64.go @@ -0,0 +1,106 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// Int64Flag is a flag with type int64 +type Int64Flag struct { + Name string + Usage string + EnvVar string + FilePath string + Required bool + Hidden bool + Value int64 + Destination *int64 +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f Int64Flag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f Int64Flag) GetName() string { + return f.Name +} + +// IsRequired returns whether or not the flag is required +func (f Int64Flag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f Int64Flag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f Int64Flag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f Int64Flag) GetValue() string { + return fmt.Sprintf("%d", f.Value) +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f Int64Flag) Apply(set *flag.FlagSet) { + _ = f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f Int64Flag) ApplyWithError(set *flag.FlagSet) error { + if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { + envValInt, err := strconv.ParseInt(envVal, 0, 64) + if err != nil { + return fmt.Errorf("could not parse %s as int value for flag %s: %s", envVal, f.Name, err) + } + + f.Value = envValInt + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.Int64Var(f.Destination, name, f.Value, f.Usage) + return + } + set.Int64(name, f.Value, f.Usage) + }) + + return nil +} + +// Int64 looks up the value of a local Int64Flag, returns +// 0 if not found +func (c *Context) Int64(name string) int64 { + return lookupInt64(name, c.flagSet) +} + +// GlobalInt64 looks up the value of a global Int64Flag, returns +// 0 if not found +func (c *Context) GlobalInt64(name string) int64 { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupInt64(name, fs) + } + return 0 +} + +func lookupInt64(name string, set *flag.FlagSet) int64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseInt(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/flag_int64_slice.go b/vendor/github.com/urfave/cli/flag_int64_slice.go new file mode 100644 index 000000000..80772e7c2 --- /dev/null +++ b/vendor/github.com/urfave/cli/flag_int64_slice.go @@ -0,0 +1,199 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" + "strings" +) + +// Int64Slice is an opaque type for []int to satisfy flag.Value and flag.Getter +type Int64Slice []int64 + +// Set parses the value into an integer and appends it to the list of values +func (f *Int64Slice) Set(value string) error { + tmp, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return err + } + *f = append(*f, tmp) + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (f *Int64Slice) String() string { + slice := make([]string, len(*f)) + for i, v := range *f { + slice[i] = strconv.FormatInt(v, 10) + } + + return strings.Join(slice, ",") +} + +// Value returns the slice of ints set by this flag +func (f *Int64Slice) Value() []int64 { + return *f +} + +// Get returns the slice of ints set by this flag +func (f *Int64Slice) Get() interface{} { + return *f +} + +// Int64SliceFlag is a flag with type *Int64Slice +type Int64SliceFlag struct { + Name string + Usage string + EnvVar string + FilePath string + Required bool + Hidden bool + Value *Int64Slice +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f Int64SliceFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f Int64SliceFlag) GetName() string { + return f.Name +} + +// IsRequired returns whether or not the flag is required +func (f Int64SliceFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f Int64SliceFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f Int64SliceFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f Int64SliceFlag) GetValue() string { + if f.Value != nil { + return f.Value.String() + } + return "" +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f Int64SliceFlag) Apply(set *flag.FlagSet) { + _ = f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f Int64SliceFlag) ApplyWithError(set *flag.FlagSet) error { + if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { + newVal := &Int64Slice{} + for _, s := range strings.Split(envVal, ",") { + s = strings.TrimSpace(s) + if err := newVal.Set(s); err != nil { + return fmt.Errorf("could not parse %s as int64 slice value for flag %s: %s", envVal, f.Name, err) + } + } + if f.Value == nil { + f.Value = newVal + } else { + *f.Value = *newVal + } + } + + eachName(f.Name, func(name string) { + if f.Value == nil { + f.Value = &Int64Slice{} + } + set.Var(f.Value, name, f.Usage) + }) + + return nil +} + +// Int64Slice looks up the value of a local Int64SliceFlag, returns +// nil if not found +func (c *Context) Int64Slice(name string) []int64 { + return lookupInt64Slice(name, c.flagSet) +} + +// GlobalInt64Slice looks up the value of a global Int64SliceFlag, returns +// nil if not found +func (c *Context) GlobalInt64Slice(name string) []int64 { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupInt64Slice(name, fs) + } + return nil +} + +func lookupInt64Slice(name string, set *flag.FlagSet) []int64 { + f := set.Lookup(name) + if f != nil { + value, ok := f.Value.(*Int64Slice) + if !ok { + return nil + } + + // extract the slice from asserted value + parsed := value.Value() + + // extract default value from the flag + var defaultVal []int64 + for _, v := range strings.Split(f.DefValue, ",") { + if v != "" { + int64Value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + panic(err) + } + defaultVal = append(defaultVal, int64Value) + } + } + // if the current value is not equal to the default value + // remove the default values from the flag + if !isInt64SliceEqual(parsed, defaultVal) { + for _, v := range defaultVal { + parsed = removeFromInt64Slice(parsed, v) + } + } + return parsed + } + return nil +} + +func removeFromInt64Slice(slice []int64, val int64) []int64 { + for i, v := range slice { + if v == val { + ret := append([]int64{}, slice[:i]...) + ret = append(ret, slice[i+1:]...) + return ret + } + } + return slice +} + +func isInt64SliceEqual(newValue, defaultValue []int64) bool { + // If one is nil, the other must also be nil. + if (newValue == nil) != (defaultValue == nil) { + return false + } + + if len(newValue) != len(defaultValue) { + return false + } + + for i, v := range newValue { + if v != defaultValue[i] { + return false + } + } + + return true +} diff --git a/vendor/github.com/urfave/cli/flag_int_slice.go b/vendor/github.com/urfave/cli/flag_int_slice.go new file mode 100644 index 000000000..af6d582de --- /dev/null +++ b/vendor/github.com/urfave/cli/flag_int_slice.go @@ -0,0 +1,198 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" + "strings" +) + +// IntSlice is an opaque type for []int to satisfy flag.Value and flag.Getter +type IntSlice []int + +// Set parses the value into an integer and appends it to the list of values +func (f *IntSlice) Set(value string) error { + tmp, err := strconv.Atoi(value) + if err != nil { + return err + } + *f = append(*f, tmp) + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (f *IntSlice) String() string { + slice := make([]string, len(*f)) + for i, v := range *f { + slice[i] = strconv.Itoa(v) + } + + return strings.Join(slice, ",") +} + +// Value returns the slice of ints set by this flag +func (f *IntSlice) Value() []int { + return *f +} + +// Get returns the slice of ints set by this flag +func (f *IntSlice) Get() interface{} { + return *f +} + +// IntSliceFlag is a flag with type *IntSlice +type IntSliceFlag struct { + Name string + Usage string + EnvVar string + FilePath string + Required bool + Hidden bool + Value *IntSlice +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f IntSliceFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f IntSliceFlag) GetName() string { + return f.Name +} + +// IsRequired returns whether or not the flag is required +func (f IntSliceFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f IntSliceFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f IntSliceFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f IntSliceFlag) GetValue() string { + if f.Value != nil { + return f.Value.String() + } + return "" +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f IntSliceFlag) Apply(set *flag.FlagSet) { + _ = f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f IntSliceFlag) ApplyWithError(set *flag.FlagSet) error { + if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { + newVal := &IntSlice{} + for _, s := range strings.Split(envVal, ",") { + s = strings.TrimSpace(s) + if err := newVal.Set(s); err != nil { + return fmt.Errorf("could not parse %s as int slice value for flag %s: %s", envVal, f.Name, err) + } + } + if f.Value == nil { + f.Value = newVal + } else { + *f.Value = *newVal + } + } + + eachName(f.Name, func(name string) { + if f.Value == nil { + f.Value = &IntSlice{} + } + set.Var(f.Value, name, f.Usage) + }) + + return nil +} + +// IntSlice looks up the value of a local IntSliceFlag, returns +// nil if not found +func (c *Context) IntSlice(name string) []int { + return lookupIntSlice(name, c.flagSet) +} + +// GlobalIntSlice looks up the value of a global IntSliceFlag, returns +// nil if not found +func (c *Context) GlobalIntSlice(name string) []int { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupIntSlice(name, fs) + } + return nil +} + +func lookupIntSlice(name string, set *flag.FlagSet) []int { + f := set.Lookup(name) + if f != nil { + value, ok := f.Value.(*IntSlice) + if !ok { + return nil + } + // extract the slice from asserted value + slice := value.Value() + + // extract default value from the flag + var defaultVal []int + for _, v := range strings.Split(f.DefValue, ",") { + if v != "" { + intValue, err := strconv.Atoi(v) + if err != nil { + panic(err) + } + defaultVal = append(defaultVal, intValue) + } + } + // if the current value is not equal to the default value + // remove the default values from the flag + if !isIntSliceEqual(slice, defaultVal) { + for _, v := range defaultVal { + slice = removeFromIntSlice(slice, v) + } + } + return slice + } + return nil +} + +func removeFromIntSlice(slice []int, val int) []int { + for i, v := range slice { + if v == val { + ret := append([]int{}, slice[:i]...) + ret = append(ret, slice[i+1:]...) + return ret + } + } + return slice +} + +func isIntSliceEqual(newValue, defaultValue []int) bool { + // If one is nil, the other must also be nil. + if (newValue == nil) != (defaultValue == nil) { + return false + } + + if len(newValue) != len(defaultValue) { + return false + } + + for i, v := range newValue { + if v != defaultValue[i] { + return false + } + } + + return true +} diff --git a/vendor/github.com/urfave/cli/flag_string.go b/vendor/github.com/urfave/cli/flag_string.go new file mode 100644 index 000000000..9f29da40b --- /dev/null +++ b/vendor/github.com/urfave/cli/flag_string.go @@ -0,0 +1,98 @@ +package cli + +import "flag" + +// StringFlag is a flag with type string +type StringFlag struct { + Name string + Usage string + EnvVar string + FilePath string + Required bool + Hidden bool + TakesFile bool + Value string + Destination *string +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f StringFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f StringFlag) GetName() string { + return f.Name +} + +// IsRequired returns whether or not the flag is required +func (f StringFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f StringFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f StringFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f StringFlag) GetValue() string { + return f.Value +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f StringFlag) Apply(set *flag.FlagSet) { + _ = f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f StringFlag) ApplyWithError(set *flag.FlagSet) error { + if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { + f.Value = envVal + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.StringVar(f.Destination, name, f.Value, f.Usage) + return + } + set.String(name, f.Value, f.Usage) + }) + + return nil +} + +// String looks up the value of a local StringFlag, returns +// "" if not found +func (c *Context) String(name string) string { + return lookupString(name, c.flagSet) +} + +// GlobalString looks up the value of a global StringFlag, returns +// "" if not found +func (c *Context) GlobalString(name string) string { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupString(name, fs) + } + return "" +} + +func lookupString(name string, set *flag.FlagSet) string { + f := set.Lookup(name) + if f != nil { + parsed, err := f.Value.String(), error(nil) + if err != nil { + return "" + } + return parsed + } + return "" +} diff --git a/vendor/github.com/urfave/cli/flag_string_slice.go b/vendor/github.com/urfave/cli/flag_string_slice.go new file mode 100644 index 000000000..a7c71e9dc --- /dev/null +++ b/vendor/github.com/urfave/cli/flag_string_slice.go @@ -0,0 +1,184 @@ +package cli + +import ( + "flag" + "fmt" + "strings" +) + +// StringSlice is an opaque type for []string to satisfy flag.Value and flag.Getter +type StringSlice []string + +// Set appends the string value to the list of values +func (f *StringSlice) Set(value string) error { + *f = append(*f, value) + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (f *StringSlice) String() string { + return strings.Join(*f, ",") +} + +// Value returns the slice of strings set by this flag +func (f *StringSlice) Value() []string { + return *f +} + +// Get returns the slice of strings set by this flag +func (f *StringSlice) Get() interface{} { + return *f +} + +// StringSliceFlag is a flag with type *StringSlice +type StringSliceFlag struct { + Name string + Usage string + EnvVar string + FilePath string + Required bool + Hidden bool + TakesFile bool + Value *StringSlice +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f StringSliceFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f StringSliceFlag) GetName() string { + return f.Name +} + +// IsRequired returns whether or not the flag is required +func (f StringSliceFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f StringSliceFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f StringSliceFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f StringSliceFlag) GetValue() string { + if f.Value != nil { + return f.Value.String() + } + return "" +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f StringSliceFlag) Apply(set *flag.FlagSet) { + _ = f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f StringSliceFlag) ApplyWithError(set *flag.FlagSet) error { + if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { + newVal := &StringSlice{} + for _, s := range strings.Split(envVal, ",") { + s = strings.TrimSpace(s) + if err := newVal.Set(s); err != nil { + return fmt.Errorf("could not parse %s as string value for flag %s: %s", envVal, f.Name, err) + } + } + if f.Value == nil { + f.Value = newVal + } else { + *f.Value = *newVal + } + } + + eachName(f.Name, func(name string) { + if f.Value == nil { + f.Value = &StringSlice{} + } + set.Var(f.Value, name, f.Usage) + }) + + return nil +} + +// StringSlice looks up the value of a local StringSliceFlag, returns +// nil if not found +func (c *Context) StringSlice(name string) []string { + return lookupStringSlice(name, c.flagSet) +} + +// GlobalStringSlice looks up the value of a global StringSliceFlag, returns +// nil if not found +func (c *Context) GlobalStringSlice(name string) []string { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupStringSlice(name, fs) + } + return nil +} + +func lookupStringSlice(name string, set *flag.FlagSet) []string { + f := set.Lookup(name) + if f != nil { + value, ok := f.Value.(*StringSlice) + if !ok { + return nil + } + // extract the slice from asserted value + slice := value.Value() + + // extract default value from the flag + var defaultVal []string + for _, v := range strings.Split(f.DefValue, ",") { + defaultVal = append(defaultVal, v) + } + + // if the current value is not equal to the default value + // remove the default values from the flag + if !isStringSliceEqual(slice, defaultVal) { + for _, v := range defaultVal { + slice = removeFromStringSlice(slice, v) + } + } + return slice + } + return nil +} + +func removeFromStringSlice(slice []string, val string) []string { + for i, v := range slice { + if v == val { + ret := append([]string{}, slice[:i]...) + ret = append(ret, slice[i+1:]...) + return ret + } + } + return slice +} + +func isStringSliceEqual(newValue, defaultValue []string) bool { + // If one is nil, the other must also be nil. + if (newValue == nil) != (defaultValue == nil) { + return false + } + + if len(newValue) != len(defaultValue) { + return false + } + + for i, v := range newValue { + if v != defaultValue[i] { + return false + } + } + + return true +} diff --git a/vendor/github.com/urfave/cli/flag_uint.go b/vendor/github.com/urfave/cli/flag_uint.go new file mode 100644 index 000000000..d6a04f408 --- /dev/null +++ b/vendor/github.com/urfave/cli/flag_uint.go @@ -0,0 +1,106 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// UintFlag is a flag with type uint +type UintFlag struct { + Name string + Usage string + EnvVar string + FilePath string + Required bool + Hidden bool + Value uint + Destination *uint +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f UintFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f UintFlag) GetName() string { + return f.Name +} + +// IsRequired returns whether or not the flag is required +func (f UintFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f UintFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f UintFlag) GetUsage() string { + return f.Usage +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f UintFlag) Apply(set *flag.FlagSet) { + _ = f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f UintFlag) ApplyWithError(set *flag.FlagSet) error { + if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { + envValInt, err := strconv.ParseUint(envVal, 0, 64) + if err != nil { + return fmt.Errorf("could not parse %s as uint value for flag %s: %s", envVal, f.Name, err) + } + + f.Value = uint(envValInt) + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.UintVar(f.Destination, name, f.Value, f.Usage) + return + } + set.Uint(name, f.Value, f.Usage) + }) + + return nil +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f UintFlag) GetValue() string { + return fmt.Sprintf("%d", f.Value) +} + +// Uint looks up the value of a local UintFlag, returns +// 0 if not found +func (c *Context) Uint(name string) uint { + return lookupUint(name, c.flagSet) +} + +// GlobalUint looks up the value of a global UintFlag, returns +// 0 if not found +func (c *Context) GlobalUint(name string) uint { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupUint(name, fs) + } + return 0 +} + +func lookupUint(name string, set *flag.FlagSet) uint { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseUint(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return uint(parsed) + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/flag_uint64.go b/vendor/github.com/urfave/cli/flag_uint64.go new file mode 100644 index 000000000..ea6493a8b --- /dev/null +++ b/vendor/github.com/urfave/cli/flag_uint64.go @@ -0,0 +1,106 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// Uint64Flag is a flag with type uint64 +type Uint64Flag struct { + Name string + Usage string + EnvVar string + FilePath string + Required bool + Hidden bool + Value uint64 + Destination *uint64 +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f Uint64Flag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f Uint64Flag) GetName() string { + return f.Name +} + +// IsRequired returns whether or not the flag is required +func (f Uint64Flag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f Uint64Flag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f Uint64Flag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f Uint64Flag) GetValue() string { + return fmt.Sprintf("%d", f.Value) +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f Uint64Flag) Apply(set *flag.FlagSet) { + _ = f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f Uint64Flag) ApplyWithError(set *flag.FlagSet) error { + if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { + envValInt, err := strconv.ParseUint(envVal, 0, 64) + if err != nil { + return fmt.Errorf("could not parse %s as uint64 value for flag %s: %s", envVal, f.Name, err) + } + + f.Value = envValInt + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.Uint64Var(f.Destination, name, f.Value, f.Usage) + return + } + set.Uint64(name, f.Value, f.Usage) + }) + + return nil +} + +// Uint64 looks up the value of a local Uint64Flag, returns +// 0 if not found +func (c *Context) Uint64(name string) uint64 { + return lookupUint64(name, c.flagSet) +} + +// GlobalUint64 looks up the value of a global Uint64Flag, returns +// 0 if not found +func (c *Context) GlobalUint64(name string) uint64 { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupUint64(name, fs) + } + return 0 +} + +func lookupUint64(name string, set *flag.FlagSet) uint64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseUint(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/funcs.go b/vendor/github.com/urfave/cli/funcs.go new file mode 100644 index 000000000..0036b1130 --- /dev/null +++ b/vendor/github.com/urfave/cli/funcs.go @@ -0,0 +1,44 @@ +package cli + +// BashCompleteFunc is an action to execute when the bash-completion flag is set +type BashCompleteFunc func(*Context) + +// BeforeFunc is an action to execute before any subcommands are run, but after +// the context is ready if a non-nil error is returned, no subcommands are run +type BeforeFunc func(*Context) error + +// AfterFunc is an action to execute after any subcommands are run, but after the +// subcommand has finished it is run even if Action() panics +type AfterFunc func(*Context) error + +// ActionFunc is the action to execute when no subcommands are specified +type ActionFunc func(*Context) error + +// CommandNotFoundFunc is executed if the proper command cannot be found +type CommandNotFoundFunc func(*Context, string) + +// OnUsageErrorFunc is executed if an usage error occurs. This is useful for displaying +// customized usage error messages. This function is able to replace the +// original error messages. If this function is not set, the "Incorrect usage" +// is displayed and the execution is interrupted. +type OnUsageErrorFunc func(context *Context, err error, isSubcommand bool) error + +// ExitErrHandlerFunc is executed if provided in order to handle ExitError values +// returned by Actions and Before/After functions. +type ExitErrHandlerFunc func(context *Context, err error) + +// FlagStringFunc is used by the help generation to display a flag, which is +// expected to be a single line. +type FlagStringFunc func(Flag) string + +// FlagNamePrefixFunc is used by the default FlagStringFunc to create prefix +// text for a flag's full name. +type FlagNamePrefixFunc func(fullName, placeholder string) string + +// FlagEnvHintFunc is used by the default FlagStringFunc to annotate flag help +// with the environment variable details. +type FlagEnvHintFunc func(envVar, str string) string + +// FlagFileHintFunc is used by the default FlagStringFunc to annotate flag help +// with the file path details. +type FlagFileHintFunc func(filePath, str string) string diff --git a/vendor/github.com/urfave/cli/help.go b/vendor/github.com/urfave/cli/help.go new file mode 100644 index 000000000..2280e338e --- /dev/null +++ b/vendor/github.com/urfave/cli/help.go @@ -0,0 +1,363 @@ +package cli + +import ( + "fmt" + "io" + "os" + "strings" + "text/tabwriter" + "text/template" + "unicode/utf8" +) + +var helpCommand = Command{ + Name: "help", + Aliases: []string{"h"}, + Usage: "Shows a list of commands or help for one command", + ArgsUsage: "[command]", + Action: func(c *Context) error { + args := c.Args() + if args.Present() { + return ShowCommandHelp(c, args.First()) + } + + _ = ShowAppHelp(c) + return nil + }, +} + +var helpSubcommand = Command{ + Name: "help", + Aliases: []string{"h"}, + Usage: "Shows a list of commands or help for one command", + ArgsUsage: "[command]", + Action: func(c *Context) error { + args := c.Args() + if args.Present() { + return ShowCommandHelp(c, args.First()) + } + + return ShowSubcommandHelp(c) + }, +} + +// Prints help for the App or Command +type helpPrinter func(w io.Writer, templ string, data interface{}) + +// Prints help for the App or Command with custom template function. +type helpPrinterCustom func(w io.Writer, templ string, data interface{}, customFunc map[string]interface{}) + +// HelpPrinter is a function that writes the help output. If not set explicitly, +// this calls HelpPrinterCustom using only the default template functions. +// +// If custom logic for printing help is required, this function can be +// overridden. If the ExtraInfo field is defined on an App, this function +// should not be modified, as HelpPrinterCustom will be used directly in order +// to capture the extra information. +var HelpPrinter helpPrinter = printHelp + +// HelpPrinterCustom is a function that writes the help output. It is used as +// the default implementation of HelpPrinter, and may be called directly if +// the ExtraInfo field is set on an App. +var HelpPrinterCustom helpPrinterCustom = printHelpCustom + +// VersionPrinter prints the version for the App +var VersionPrinter = printVersion + +// ShowAppHelpAndExit - Prints the list of subcommands for the app and exits with exit code. +func ShowAppHelpAndExit(c *Context, exitCode int) { + _ = ShowAppHelp(c) + os.Exit(exitCode) +} + +// ShowAppHelp is an action that displays the help. +func ShowAppHelp(c *Context) error { + template := c.App.CustomAppHelpTemplate + if template == "" { + template = AppHelpTemplate + } + + if c.App.ExtraInfo == nil { + HelpPrinter(c.App.Writer, template, c.App) + return nil + } + + customAppData := func() map[string]interface{} { + return map[string]interface{}{ + "ExtraInfo": c.App.ExtraInfo, + } + } + HelpPrinterCustom(c.App.Writer, template, c.App, customAppData()) + + return nil +} + +// DefaultAppComplete prints the list of subcommands as the default app completion method +func DefaultAppComplete(c *Context) { + DefaultCompleteWithFlags(nil)(c) +} + +func printCommandSuggestions(commands []Command, writer io.Writer) { + for _, command := range commands { + if command.Hidden { + continue + } + if os.Getenv("_CLI_ZSH_AUTOCOMPLETE_HACK") == "1" { + for _, name := range command.Names() { + _, _ = fmt.Fprintf(writer, "%s:%s\n", name, command.Usage) + } + } else { + for _, name := range command.Names() { + _, _ = fmt.Fprintf(writer, "%s\n", name) + } + } + } +} + +func cliArgContains(flagName string) bool { + for _, name := range strings.Split(flagName, ",") { + name = strings.TrimSpace(name) + count := utf8.RuneCountInString(name) + if count > 2 { + count = 2 + } + flag := fmt.Sprintf("%s%s", strings.Repeat("-", count), name) + for _, a := range os.Args { + if a == flag { + return true + } + } + } + return false +} + +func printFlagSuggestions(lastArg string, flags []Flag, writer io.Writer) { + cur := strings.TrimPrefix(lastArg, "-") + cur = strings.TrimPrefix(cur, "-") + for _, flag := range flags { + if bflag, ok := flag.(BoolFlag); ok && bflag.Hidden { + continue + } + for _, name := range strings.Split(flag.GetName(), ",") { + name = strings.TrimSpace(name) + // this will get total count utf8 letters in flag name + count := utf8.RuneCountInString(name) + if count > 2 { + count = 2 // resuse this count to generate single - or -- in flag completion + } + // if flag name has more than one utf8 letter and last argument in cli has -- prefix then + // skip flag completion for short flags example -v or -x + if strings.HasPrefix(lastArg, "--") && count == 1 { + continue + } + // match if last argument matches this flag and it is not repeated + if strings.HasPrefix(name, cur) && cur != name && !cliArgContains(flag.GetName()) { + flagCompletion := fmt.Sprintf("%s%s", strings.Repeat("-", count), name) + _, _ = fmt.Fprintln(writer, flagCompletion) + } + } + } +} + +func DefaultCompleteWithFlags(cmd *Command) func(c *Context) { + return func(c *Context) { + if len(os.Args) > 2 { + lastArg := os.Args[len(os.Args)-2] + if strings.HasPrefix(lastArg, "-") { + printFlagSuggestions(lastArg, c.App.Flags, c.App.Writer) + if cmd != nil { + printFlagSuggestions(lastArg, cmd.Flags, c.App.Writer) + } + return + } + } + if cmd != nil { + printCommandSuggestions(cmd.Subcommands, c.App.Writer) + } else { + printCommandSuggestions(c.App.Commands, c.App.Writer) + } + } +} + +// ShowCommandHelpAndExit - exits with code after showing help +func ShowCommandHelpAndExit(c *Context, command string, code int) { + _ = ShowCommandHelp(c, command) + os.Exit(code) +} + +// ShowCommandHelp prints help for the given command +func ShowCommandHelp(ctx *Context, command string) error { + // show the subcommand help for a command with subcommands + if command == "" { + HelpPrinter(ctx.App.Writer, SubcommandHelpTemplate, ctx.App) + return nil + } + + for _, c := range ctx.App.Commands { + if c.HasName(command) { + templ := c.CustomHelpTemplate + if templ == "" { + templ = CommandHelpTemplate + } + + HelpPrinter(ctx.App.Writer, templ, c) + + return nil + } + } + + if ctx.App.CommandNotFound == nil { + return NewExitError(fmt.Sprintf("No help topic for '%v'", command), 3) + } + + ctx.App.CommandNotFound(ctx, command) + return nil +} + +// ShowSubcommandHelp prints help for the given subcommand +func ShowSubcommandHelp(c *Context) error { + return ShowCommandHelp(c, c.Command.Name) +} + +// ShowVersion prints the version number of the App +func ShowVersion(c *Context) { + VersionPrinter(c) +} + +func printVersion(c *Context) { + _, _ = fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version) +} + +// ShowCompletions prints the lists of commands within a given context +func ShowCompletions(c *Context) { + a := c.App + if a != nil && a.BashComplete != nil { + a.BashComplete(c) + } +} + +// ShowCommandCompletions prints the custom completions for a given command +func ShowCommandCompletions(ctx *Context, command string) { + c := ctx.App.Command(command) + if c != nil { + if c.BashComplete != nil { + c.BashComplete(ctx) + } else { + DefaultCompleteWithFlags(c)(ctx) + } + } + +} + +// printHelpCustom is the default implementation of HelpPrinterCustom. +// +// The customFuncs map will be combined with a default template.FuncMap to +// allow using arbitrary functions in template rendering. +func printHelpCustom(out io.Writer, templ string, data interface{}, customFuncs map[string]interface{}) { + funcMap := template.FuncMap{ + "join": strings.Join, + } + for key, value := range customFuncs { + funcMap[key] = value + } + + w := tabwriter.NewWriter(out, 1, 8, 2, ' ', 0) + t := template.Must(template.New("help").Funcs(funcMap).Parse(templ)) + err := t.Execute(w, data) + if err != nil { + // If the writer is closed, t.Execute will fail, and there's nothing + // we can do to recover. + if os.Getenv("CLI_TEMPLATE_ERROR_DEBUG") != "" { + _, _ = fmt.Fprintf(ErrWriter, "CLI TEMPLATE ERROR: %#v\n", err) + } + return + } + _ = w.Flush() +} + +func printHelp(out io.Writer, templ string, data interface{}) { + HelpPrinterCustom(out, templ, data, nil) +} + +func checkVersion(c *Context) bool { + found := false + if VersionFlag.GetName() != "" { + eachName(VersionFlag.GetName(), func(name string) { + if c.GlobalBool(name) || c.Bool(name) { + found = true + } + }) + } + return found +} + +func checkHelp(c *Context) bool { + found := false + if HelpFlag.GetName() != "" { + eachName(HelpFlag.GetName(), func(name string) { + if c.GlobalBool(name) || c.Bool(name) { + found = true + } + }) + } + return found +} + +func checkCommandHelp(c *Context, name string) bool { + if c.Bool("h") || c.Bool("help") { + _ = ShowCommandHelp(c, name) + return true + } + + return false +} + +func checkSubcommandHelp(c *Context) bool { + if c.Bool("h") || c.Bool("help") { + _ = ShowSubcommandHelp(c) + return true + } + + return false +} + +func checkShellCompleteFlag(a *App, arguments []string) (bool, []string) { + if !a.EnableBashCompletion { + return false, arguments + } + + pos := len(arguments) - 1 + lastArg := arguments[pos] + + if lastArg != "--"+BashCompletionFlag.GetName() { + return false, arguments + } + + return true, arguments[:pos] +} + +func checkCompletions(c *Context) bool { + if !c.shellComplete { + return false + } + + if args := c.Args(); args.Present() { + name := args.First() + if cmd := c.App.Command(name); cmd != nil { + // let the command handle the completion + return false + } + } + + ShowCompletions(c) + return true +} + +func checkCommandCompletions(c *Context, name string) bool { + if !c.shellComplete { + return false + } + + ShowCommandCompletions(c, name) + return true +} diff --git a/vendor/github.com/urfave/cli/parse.go b/vendor/github.com/urfave/cli/parse.go new file mode 100644 index 000000000..7df17296a --- /dev/null +++ b/vendor/github.com/urfave/cli/parse.go @@ -0,0 +1,94 @@ +package cli + +import ( + "flag" + "strings" +) + +type iterativeParser interface { + newFlagSet() (*flag.FlagSet, error) + useShortOptionHandling() bool +} + +// To enable short-option handling (e.g., "-it" vs "-i -t") we have to +// iteratively catch parsing errors. This way we achieve LR parsing without +// transforming any arguments. Otherwise, there is no way we can discriminate +// combined short options from common arguments that should be left untouched. +// Pass `shellComplete` to continue parsing options on failure during shell +// completion when, the user-supplied options may be incomplete. +func parseIter(set *flag.FlagSet, ip iterativeParser, args []string, shellComplete bool) error { + for { + err := set.Parse(args) + if !ip.useShortOptionHandling() || err == nil { + if shellComplete { + return nil + } + return err + } + + errStr := err.Error() + trimmed := strings.TrimPrefix(errStr, "flag provided but not defined: -") + if errStr == trimmed { + return err + } + + // regenerate the initial args with the split short opts + argsWereSplit := false + for i, arg := range args { + // skip args that are not part of the error message + if name := strings.TrimLeft(arg, "-"); name != trimmed { + continue + } + + // if we can't split, the error was accurate + shortOpts := splitShortOptions(set, arg) + if len(shortOpts) == 1 { + return err + } + + // swap current argument with the split version + args = append(args[:i], append(shortOpts, args[i+1:]...)...) + argsWereSplit = true + break + } + + // This should be an impossible to reach code path, but in case the arg + // splitting failed to happen, this will prevent infinite loops + if !argsWereSplit { + return err + } + + // Since custom parsing failed, replace the flag set before retrying + newSet, err := ip.newFlagSet() + if err != nil { + return err + } + *set = *newSet + } +} + +func splitShortOptions(set *flag.FlagSet, arg string) []string { + shortFlagsExist := func(s string) bool { + for _, c := range s[1:] { + if f := set.Lookup(string(c)); f == nil { + return false + } + } + return true + } + + if !isSplittable(arg) || !shortFlagsExist(arg) { + return []string{arg} + } + + separated := make([]string, 0, len(arg)-1) + for _, flagChar := range arg[1:] { + separated = append(separated, "-"+string(flagChar)) + } + + return separated +} + +func isSplittable(flagArg string) bool { + return strings.HasPrefix(flagArg, "-") && !strings.HasPrefix(flagArg, "--") && len(flagArg) > 2 +} diff --git a/vendor/github.com/urfave/cli/sort.go b/vendor/github.com/urfave/cli/sort.go new file mode 100644 index 000000000..23d1c2f77 --- /dev/null +++ b/vendor/github.com/urfave/cli/sort.go @@ -0,0 +1,29 @@ +package cli + +import "unicode" + +// lexicographicLess compares strings alphabetically considering case. +func lexicographicLess(i, j string) bool { + iRunes := []rune(i) + jRunes := []rune(j) + + lenShared := len(iRunes) + if lenShared > len(jRunes) { + lenShared = len(jRunes) + } + + for index := 0; index < lenShared; index++ { + ir := iRunes[index] + jr := jRunes[index] + + if lir, ljr := unicode.ToLower(ir), unicode.ToLower(jr); lir != ljr { + return lir < ljr + } + + if ir != jr { + return ir < jr + } + } + + return i < j +} diff --git a/vendor/github.com/urfave/cli/template.go b/vendor/github.com/urfave/cli/template.go new file mode 100644 index 000000000..c631fb97d --- /dev/null +++ b/vendor/github.com/urfave/cli/template.go @@ -0,0 +1,121 @@ +package cli + +// AppHelpTemplate is the text template for the Default help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var AppHelpTemplate = `NAME: + {{.Name}}{{if .Usage}} - {{.Usage}}{{end}} + +USAGE: + {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Version}}{{if not .HideVersion}} + +VERSION: + {{.Version}}{{end}}{{end}}{{if .Description}} + +DESCRIPTION: + {{.Description}}{{end}}{{if len .Authors}} + +AUTHOR{{with $length := len .Authors}}{{if ne 1 $length}}S{{end}}{{end}}: + {{range $index, $author := .Authors}}{{if $index}} + {{end}}{{$author}}{{end}}{{end}}{{if .VisibleCommands}} + +COMMANDS:{{range .VisibleCategories}}{{if .Name}} + + {{.Name}}:{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{else}}{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{end}}{{end}}{{end}}{{if .VisibleFlags}} + +GLOBAL OPTIONS: + {{range $index, $option := .VisibleFlags}}{{if $index}} + {{end}}{{$option}}{{end}}{{end}}{{if .Copyright}} + +COPYRIGHT: + {{.Copyright}}{{end}} +` + +// CommandHelpTemplate is the text template for the command help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var CommandHelpTemplate = `NAME: + {{.HelpName}} - {{.Usage}} + +USAGE: + {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Category}} + +CATEGORY: + {{.Category}}{{end}}{{if .Description}} + +DESCRIPTION: + {{.Description}}{{end}}{{if .VisibleFlags}} + +OPTIONS: + {{range .VisibleFlags}}{{.}} + {{end}}{{end}} +` + +// SubcommandHelpTemplate is the text template for the subcommand help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var SubcommandHelpTemplate = `NAME: + {{.HelpName}} - {{if .Description}}{{.Description}}{{else}}{{.Usage}}{{end}} + +USAGE: + {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} command{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}} + +COMMANDS:{{range .VisibleCategories}}{{if .Name}} + + {{.Name}}:{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{else}}{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{end}}{{end}}{{if .VisibleFlags}} + +OPTIONS: + {{range .VisibleFlags}}{{.}} + {{end}}{{end}} +` + +var MarkdownDocTemplate = `% {{ .App.Name }}(8) {{ .App.Description }} + +% {{ .App.Author }} + +# NAME + +{{ .App.Name }}{{ if .App.Usage }} - {{ .App.Usage }}{{ end }} + +# SYNOPSIS + +{{ .App.Name }} +{{ if .SynopsisArgs }} +` + "```" + ` +{{ range $v := .SynopsisArgs }}{{ $v }}{{ end }}` + "```" + ` +{{ end }}{{ if .App.UsageText }} +# DESCRIPTION + +{{ .App.UsageText }} +{{ end }} +**Usage**: + +` + "```" + ` +{{ .App.Name }} [GLOBAL OPTIONS] command [COMMAND OPTIONS] [ARGUMENTS...] +` + "```" + ` +{{ if .GlobalArgs }} +# GLOBAL OPTIONS +{{ range $v := .GlobalArgs }} +{{ $v }}{{ end }} +{{ end }}{{ if .Commands }} +# COMMANDS +{{ range $v := .Commands }} +{{ $v }}{{ end }}{{ end }}` + +var FishCompletionTemplate = `# {{ .App.Name }} fish shell completion + +function __fish_{{ .App.Name }}_no_subcommand --description 'Test if there has been any subcommand yet' + for i in (commandline -opc) + if contains -- $i{{ range $v := .AllCommands }} {{ $v }}{{ end }} + return 1 + end + end + return 0 +end + +{{ range $v := .Completions }}{{ $v }} +{{ end }}` diff --git a/vendor/go.mozilla.org/sops/.gitignore b/vendor/go.mozilla.org/sops/.gitignore deleted file mode 100644 index ee580fc0f..000000000 --- a/vendor/go.mozilla.org/sops/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -target -Cargo.lock -vendor/ diff --git a/vendor/go.mozilla.org/sops/.sops.yaml b/vendor/go.mozilla.org/sops/.sops.yaml deleted file mode 100644 index d715d2382..000000000 --- a/vendor/go.mozilla.org/sops/.sops.yaml +++ /dev/null @@ -1,2 +0,0 @@ -creation_rules: - - pgp: 1022470DE3F0BC54BC6AB62DE05550BC07FB1A0A diff --git a/vendor/go.mozilla.org/sops/.travis.yml b/vendor/go.mozilla.org/sops/.travis.yml deleted file mode 100644 index 3e9914d23..000000000 --- a/vendor/go.mozilla.org/sops/.travis.yml +++ /dev/null @@ -1,54 +0,0 @@ -language: go -go: 1.12 -go_import_path: go.mozilla.org/sops/ - -env: - - VAULT_VERSION=1.1.3 VAULT_TOKEN=root VAULT_ADDR='http://127.0.0.1:8200' - -addons: - apt: - packages: - - rpm - - ruby - - python3 - - unzip - -before_install: - - gem install fpm || sudo gem install fpm - - curl https://sh.rustup.rs -sSf | sh -s -- -y - - source ~/.cargo/env - - curl -O "https://releases.hashicorp.com/vault/${VAULT_VERSION}/vault_${VAULT_VERSION}_linux_amd64.zip" && sudo unzip vault_${VAULT_VERSION}_linux_amd64.zip -d /usr/local/bin/ - -before_script: - - vault server -dev -dev-root-token-id="$VAULT_TOKEN" & - - vault secrets enable -version=1 kv - -script: - - 'if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then make; fi' - - 'if [ "$TRAVIS_PULL_REQUEST" = "false" ]; then make origin-build; fi' - - bash <(curl -s https://codecov.io/bash) - -before_deploy: - - mkdir dist - - make deb-pkg rpm-pkg - - mv *.deb *.rpm dist/ - - GOOS=darwin CGO_ENABLED=0 GO111MODULE=on go build -mod vendor -o dist/sops-${TRAVIS_TAG}.darwin go.mozilla.org/sops/cmd/sops - - GOOS=windows CGO_ENABLED=0 GO111MODULE=on go build -mod vendor -o dist/sops-${TRAVIS_TAG}.exe go.mozilla.org/sops/cmd/sops - - GOOS=linux CGO_ENABLED=0 GO111MODULE=on go build -mod vendor -o dist/sops-${TRAVIS_TAG}.linux go.mozilla.org/sops/cmd/sops - - | - if [ ! -z "$TRAVIS_TAG" ]; then - version="$(grep '^const Version' version/version.go |cut -d '"' -f 2)" - if [ "$version" != "$TRAVIS_TAG" ]; then - echo "Git tag $TRAVIS_TAG does not match version $version, update the source!" - exit 1 - fi - fi - -deploy: - provider: releases - api_key: "${GITHUB_OAUTH_TOKEN}" - file_glob: true - file: dist/* - skip_cleanup: true - on: - tags: true diff --git a/vendor/go.mozilla.org/sops/CHANGELOG.rst b/vendor/go.mozilla.org/sops/CHANGELOG.rst deleted file mode 100644 index 34319fd9d..000000000 --- a/vendor/go.mozilla.org/sops/CHANGELOG.rst +++ /dev/null @@ -1,196 +0,0 @@ -Changelog -========= - -3.4.0 ------ -Features: - - * `sops publish`, a new command for publishing sops encrypted secrets to S3, GCS, or Hashicorp Vault - * Support for multiple Azure authentication mechanisms - * Azure Keyvault support to the sops config file - * `encrypted_regex` option to the sops config file - -Bug fixes: - - * Return non-zero exit code for invalid CLI flags - * Broken path handling for sops editing on Windows - * `go lint/fmt` violations - * Check for pgp fingerprint before slicing it - -Project changes: - - * Build container using golang 1.12 - * Switch to using go modules - * Hashicorp Vault server in Travis CI build - * Mozilla Publice License file to repo - * Replaced expiring test gpg keys - -3.3.1 ------ - -Bug fixes: - -* Make sure the pgp key fingerprint is longer than 16 characters before - slicing it. (#463) -* Allow for `--set` value to be a string. (#461) - -Project changes: - -* Using `develop` as a staging branch to create releases off of. What - is in `master` is now the current stable release. -* Upgrade to using Go 1.12 to build sops -* Updated all vendored packages - -3.3.0 ------ - -New features: - -* Multi-document support for YAML files -* Support referencing AWS KMS keys by their alias -* Support for INI files -* Support for AWS CLI profiles -* Comment support in .env files -* Added vi to the list of known editors -* Added a way to specify the GPG key server to use through the - SOPS_GPG_KEYSERVER environment variable - -Bug fixes: - -* Now uses $HOME instead of ~ (which didn't work) to find the GPG home -* Fix panic when vim was not available as an editor, but other - alternative editors were -* Fix issue with AWS KMS Encryption Contexts (#445) with more than one - context value failing to decrypt intermittently. Includes an - automatic fix for old files affected by this issue. - -Project infrastructure changes: - -* Added integration tests for AWS KMS -* Added Code of Conduct - - -3.2.0 ------ - -* Added --output flag to write output a file directly instead of - through stdout -* Added support for dotenv files - -3.1.1 ------ - -* Fix incorrect version number from previous release - -3.1.0 ------ - -* Add support for Azure Key Service - -* Fix bug that prevented JSON escapes in input files from working - -3.0.5 ------ - -* Prevent files from being encrypted twice - -* Fix empty comments not being decrypted correctly - -* If keyservicecmd returns an error, log it. - -* Initial sops workspace auditing support (still wip) - -* Refactor Store interface to reflect operations SOPS performs - -3.0.3 ----- - -* --set now works with nested data structures and not just simple - values - -* Changed default log level to warn instead of info - -* Avoid creating empty files when using the editor mode to create new - files and not making any changes to the example files - -* Output unformatted strings when using --extract instead of encoding - them to yaml - -* Allow forcing binary input and output types from command line flags - -* Deprecate filename_regex in favor of path_regex. filename_regex had - a bug and matched on the whole file path, when it should have only - matched on the file name. path_regex on the other hand is documented - to match on the whole file path. - -* Add an encrypted-suffix option, the exact opposite of - unencrypted-suffix - -* Allow specifying unencrypted_suffix and encrypted_suffix rules in - the .sops.yaml configuration file - -* Introduce key service flag optionally prompting users on - encryption/decryption - -3.0.1 ------ - -* Don't consider io.EOF returned by Decoder.Token as error - -* add IsBinary: true to FileHints when encoding with crypto/openpgp - -* some improvements to error messages - -3.0.0 ------ - -* Shamir secret sharing scheme support allows SOPS to require multiple master - keys to access a data key and decrypt a file. See `sops groups -help` and the - documentation in README. - -* Keyservice to forward access to a local master key on a socket, similar to - gpg-agent. See `sops keyservice --help` and the documentation in README. - -* Encrypt comments by default - -* Support for Google Compute Platform KMS - -* Refactor of the store logic to separate the internal representation SOPS - has of files from the external representation used in JSON and YAML files - -* Reencoding of versions as string on sops 1.X files. - **WARNING** this change breaks backward compatibility. - SOPS shows an error message with instructions on how to solve - this if it happens. - -* Added command to reconfigure the keys used to encrypt/decrypt a file based on the .sops.yaml config file - -* Retrieve missing PGP keys from gpg.mozilla.org - -* Improved error messages for errors when decrypting files - - -2.0.0 ------ - -* [major] rewrite in Go - -1.14 ----- - -* [medium] Support AWS KMS Encryption Contexts -* [minor] Support insertion in encrypted documents via --set -* [minor] Read location of gpg binary from SOPS_GPG_EXEC env variables - -1.13 ----- - -* [minor] handle $EDITOR variable with parameters - -1.12 ----- - -* [minor] make sure filename_regex gets applied to file names, not paths -* [minor] move check of latest version under the -V flag -* [medium] fix handling of binary data to preserve file integrity -* [minor] try to use configuration when encrypting existing files diff --git a/vendor/go.mozilla.org/sops/CODE_OF_CONDUCT.md b/vendor/go.mozilla.org/sops/CODE_OF_CONDUCT.md deleted file mode 100644 index f0ef43f39..000000000 --- a/vendor/go.mozilla.org/sops/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,8 +0,0 @@ -# Community Participation Guidelines - -This repository is governed by Mozilla's code of conduct and etiquette guidelines. -For more details, please read the -[Mozilla Community Participation Guidelines](https://www.mozilla.org/about/governance/policies/participation/). - -## How to Report -For more information on how to report violations of the Community Participation Guidelines, please read our '[How to Report](https://www.mozilla.org/about/governance/policies/participation/reporting/)' page. diff --git a/vendor/go.mozilla.org/sops/CONTRIBUTING.md b/vendor/go.mozilla.org/sops/CONTRIBUTING.md deleted file mode 100644 index 07252d07f..000000000 --- a/vendor/go.mozilla.org/sops/CONTRIBUTING.md +++ /dev/null @@ -1,30 +0,0 @@ -# Contributing to SOPS - -Mozilla welcomes contributions from everyone. Here are a few guidelines and instructions if you're thinking of helping with the development of SOPS. - -# Getting started - -* Make sure you have Go 1.12 or greater installed. You can find information on how to install Go [here](https://golang.org/dl/) -* After following the [Go installation guide](https://golang.org/doc/install), run `go get go.mozilla.org/sops`. This will automatically clone this repository. -* Switch into sops's directory, which will be in `$GOPATH/src/go.mozilla.org/sops`. -* Run the tests with `make test`. They should all pass. -* Fork the project on GitHub. -* Add your fork to git's remotes: - * If you use SSH authentication: `git remote add git@github.com:/sops.git`. - * Otherwise: `git remote add https://github.com//sops.git`. -* **Switch to the `develop` branch: `git checkout develop`** -* Make any changes you want to sops, commit them, and push them to your fork. -* **Create a pull request against `develop`**, and a contributor will come by and review your code. They may ask for some changes, and hopefully your contribution will be merged to the `develop` branch! - -# Guidelines - -* Unless it's particularly hard, changes that fix a bug should have a regression test to make sure that the bug is not introduced again. -* New features and changes to existing features should be documented, and, if possible, tested. - -# Regenerating mocks - -If you encounter an error like `kms/mocks/KMSAPI.go:1607: cannot use (*KMSAPI)(nil) (type *KMSAPI) as type kmsiface.KMSAPI in assignment: *KMSAPI does not implement kmsiface.KMSAPI (missing ListResourceTags method)`, you need to regenerate mocks, probably because the interface was changed by a vendoring update. There is a make command to do this for you. Simply run `make mock`, and the new mocks will be automatically generated. - -# Communication - -If you need any help contributing to sops, several contributors are on the `#go` channel on [Mozilla's IRC server](https://wiki.mozilla.org/IRC). diff --git a/vendor/go.mozilla.org/sops/Dockerfile b/vendor/go.mozilla.org/sops/Dockerfile deleted file mode 100644 index a8aa7b57a..000000000 --- a/vendor/go.mozilla.org/sops/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM golang:1.12 - -COPY . /go/src/go.mozilla.org/sops -WORKDIR /go/src/go.mozilla.org/sops - -RUN CGO_ENABLED=1 make install -RUN apt-get update -RUN apt-get install -y vim python-pip emacs -RUN pip install awscli -ENV EDITOR vim diff --git a/vendor/go.mozilla.org/sops/Makefile b/vendor/go.mozilla.org/sops/Makefile deleted file mode 100644 index f619a6b25..000000000 --- a/vendor/go.mozilla.org/sops/Makefile +++ /dev/null @@ -1,102 +0,0 @@ -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. - -PROJECT := go.mozilla.org/sops -GO := GO15VENDOREXPERIMENT=1 GO111MODULE=on GOPROXY=https://proxy.golang.org go -GOLINT := golint - -all: test vet generate install functional-tests -origin-build: test vet generate install functional-tests-all - -install: - $(GO) install go.mozilla.org/sops/cmd/sops - -tag: all - git tag -s $(TAGVER) -a -m "$(TAGMSG)" - -lint: - $(GOLINT) $(PROJECT) - -vendor: - $(GO) mod tidy - $(GO) mod vendor - -vet: - $(GO) vet $(PROJECT) - -test: vendor - gpg --import pgp/sops_functional_tests_key.asc 2>&1 1>/dev/null || exit 0 - ./test.sh - -showcoverage: test - $(GO) tool cover -html=coverage.out - -generate: keyservice/keyservice.pb.go - $(GO) generate - -%.pb.go: %.proto - protoc --go_out=plugins=grpc:. $< - -functional-tests: - $(GO) build -o functional-tests/sops go.mozilla.org/sops/cmd/sops - cd functional-tests && cargo test - -# Ignored tests are ones that require external services (e.g. AWS KMS) -# TODO: Once `--include-ignored` lands in rust stable, switch to that. -functional-tests-all: - $(GO) build -o functional-tests/sops go.mozilla.org/sops/cmd/sops - cd functional-tests && cargo test && cargo test -- --ignored - -deb-pkg: install - rm -rf tmppkg - mkdir -p tmppkg/usr/local/bin - cp $$GOPATH/bin/sops tmppkg/usr/local/bin/ - fpm -C tmppkg -n sops --license MPL2.0 --vendor mozilla \ - --description "Sops is an editor of encrypted files that supports YAML, JSON and BINARY formats and encrypts with AWS KMS and PGP." \ - -m "Julien Vehent " \ - --url https://go.mozilla.org/sops \ - --architecture x86_64 \ - -v "$$(git describe --abbrev=0 --tags)" \ - -s dir -t deb . - -rpm-pkg: install - rm -rf tmppkg - mkdir -p tmppkg/usr/local/bin - cp $$GOPATH/bin/sops tmppkg/usr/local/bin/ - fpm -C tmppkg -n sops --license MPL2.0 --vendor mozilla \ - --description "Sops is an editor of encrypted files that supports YAML, JSON and BINARY formats and encrypts with AWS KMS and PGP." \ - -m "Julien Vehent " \ - --url https://go.mozilla.org/sops \ - --architecture x86_64 \ - -v "$$(git describe --abbrev=0 --tags)" \ - -s dir -t rpm . - -dmg-pkg: install -ifneq ($(OS),darwin) - echo 'you must be on MacOS and set OS=darwin on the make command line to build an OSX package' -else - rm -rf tmppkg - mkdir -p tmppkg/usr/local/bin - cp $$GOPATH/bin/sops tmppkg/usr/local/bin/ - fpm -C tmppkg -n sops --license MPL2.0 --vendor mozilla \ - --description "Sops is an editor of encrypted files that supports YAML, JSON and BINARY formats and encrypts with AWS KMS and PGP." \ - -m "Julien Vehent " \ - --url https://go.mozilla.org/sops \ - --architecture x86_64 \ - -v "$$(git describe --abbrev=0 --tags)" \ - -s dir -t osxpkg \ - --osxpkg-identifier-prefix org.mozilla.sops \ - -p tmppkg/sops-$$(git describe --abbrev=0 --tags).pkg . - hdiutil makehybrid -hfs -hfs-volume-name "Mozilla Sops" \ - -o tmppkg/sops-$$(git describe --abbrev=0 --tags).dmg tmpdmg -endif - -download-index: - bash make_download_page.sh - -mock: - go get github.com/vektra/mockery/.../ - mockery -dir vendor/github.com/aws/aws-sdk-go/service/kms/kmsiface/ -name KMSAPI -output kms/mocks - -.PHONY: all test generate clean vendor functional-tests mock diff --git a/vendor/go.mozilla.org/sops/README.rst b/vendor/go.mozilla.org/sops/README.rst deleted file mode 100644 index 25efd0b95..000000000 --- a/vendor/go.mozilla.org/sops/README.rst +++ /dev/null @@ -1,1481 +0,0 @@ -SOPS: Secrets OPerationS -======================== - -**sops** is an editor of encrypted files that supports YAML, JSON, ENV, INI and BINARY -formats and encrypts with AWS KMS, GCP KMS, Azure Key Vault and PGP. -(`demo `_) - -.. image:: https://i.imgur.com/X0TM5NI.gif - ------------- - -.. image:: https://godoc.org/go.mozilla.org/sops?status.svg - :target: https://godoc.org/go.mozilla.org/sops - -.. image:: https://travis-ci.org/mozilla/sops.svg?branch=master - :target: https://travis-ci.org/mozilla/sops - -Download --------- - -Stable release -~~~~~~~~~~~~~~ -Binaries and packages of the latest stable release are available at `https://github.com/mozilla/sops/releases `_. - -Development branch -~~~~~~~~~~~~~~~~~~ -For the adventurous, unstable features are available in the `develop` branch, which you can install from source: - -.. code:: bash - - $ go get -u go.mozilla.org/sops/cmd/sops - $ cd $GOPATH/src/go.mozilla.org/sops/ - $ git checkout develop - $ make install - -(requires Go >= 1.12) - -If you don't have Go installed, set it up with: - -.. code:: bash - - $ {apt,yum,brew} install golang - $ echo 'export GOPATH=~/go' >> ~/.bashrc - $ source ~/.bashrc - $ mkdir $GOPATH - -Or whatever variation of the above fits your system and shell. - -To use **sops** as a library, take a look at the `decrypt package `_. - -**Questions?** ping "ulfr" and "autrilla" in ``#security`` on `irc.mozilla.org `_ -(use a web client like `mibbit `_ ). - -**What happened to Python Sops?** We rewrote Sops in Go to solve a number of -deployment issues, but the Python branch still exists under ``python-sops``. We -will keep maintaining it for a while, and you can still ``pip install sops``, -but we strongly recommend you use the Go version instead. - -.. sectnum:: -.. contents:: Table of Contents - -Usage ------ - -For a quick presentation of Sops, check out this Youtube tutorial: - -.. image:: https://img.youtube.com/vi/V2PRhxphH2w/0.jpg - :target: https://www.youtube.com/watch?v=V2PRhxphH2w - -If you're using AWS KMS, create one or multiple master keys in the IAM console -and export them, comma separated, in the **SOPS_KMS_ARN** env variable. It is -recommended to use at least two master keys in different regions. - -.. code:: bash - - export SOPS_KMS_ARN="arn:aws:kms:us-east-1:656532927350:key/920aff2e-c5f1-4040-943a-047fa387b27e,arn:aws:kms:ap-southeast-1:656532927350:key/9006a8aa-0fa6-4c14-930e-a2dfb916de1d" - -Your AWS credentials must be present in ``~/.aws/credentials``. sops uses aws-sdk-go. - -.. code:: - - $ cat ~/.aws/credentials - [default] - aws_access_key_id = AKI..... - aws_secret_access_key = mw...... - -If you want to use PGP, export the fingerprints of the public keys, comma -separated, in the **SOPS_PGP_FP** env variable. - -.. code:: bash - - export SOPS_PGP_FP="85D77543B3D624B63CEA9E6DBC17301B491B3F21,E60892BB9BD89A69F759A1A0A3D652173B763E8F" - -Note: you can use both PGP and KMS simultaneously. - -Then simply call ``sops`` with a file path as argument. It will handle the -encryption/decryption transparently and open the cleartext file in an editor - -.. code:: shell - - $ sops mynewtestfile.yaml - mynewtestfile.yaml doesn't exist, creating it. - please wait while an encryption key is being generated and stored in a secure fashion - file written to mynewtestfile.yaml - -Editing will happen in whatever ``$EDITOR`` is set to, or, if it's not set, in vim. -Keep in mind that sops will wait for the editor to exit, and then try to reencrypt -the file. Some GUI editors (atom, sublime) spawn a child process and then exit -immediately. They usually have an option to wait for the main editor window to be -closed before exiting. See `#127 `_ for -more information. - -The resulting encrypted file looks like this: - -.. code:: yaml - - myapp1: ENC[AES256_GCM,data:Tr7o=,iv:1=,aad:No=,tag:k=] - app2: - db: - user: ENC[AES256_GCM,data:CwE4O1s=,iv:2k=,aad:o=,tag:w==] - password: ENC[AES256_GCM,data:p673w==,iv:YY=,aad:UQ=,tag:A=] - # private key for secret operations in app2 - key: |- - ENC[AES256_GCM,data:Ea3kL5O5U8=,iv:DM=,aad:FKA=,tag:EA==] - an_array: - - ENC[AES256_GCM,data:v8jQ=,iv:HBE=,aad:21c=,tag:gA==] - - ENC[AES256_GCM,data:X10=,iv:o8=,aad:CQ=,tag:Hw==] - - ENC[AES256_GCM,data:KN=,iv:160=,aad:fI4=,tag:tNw==] - sops: - kms: - - created_at: 1441570389.775376 - enc: CiC....Pm1Hm - arn: arn:aws:kms:us-east-1:656532927350:key/920aff2e-c5f1-4040-943a-047fa387b27e - - created_at: 1441570391.925734 - enc: Ci...awNx - arn: arn:aws:kms:ap-southeast-1:656532927350:key/9006a8aa-0fa6-4c14-930e-a2dfb916de1d - pgp: - - fp: 85D77543B3D624B63CEA9E6DBC17301B491B3F21 - created_at: 1441570391.930042 - enc: | - -----BEGIN PGP MESSAGE----- - hQIMA0t4uZHfl9qgAQ//UvGAwGePyHuf2/zayWcloGaDs0MzI+zw6CmXvMRNPUsA - ...=oJgS - -----END PGP MESSAGE----- - -A copy of the encryption/decryption key is stored securely in each KMS and PGP -block. As long as one of the KMS or PGP method is still usable, you will be able -to access your data. - -To decrypt a file in a ``cat`` fashion, use the ``-d`` flag: - -.. code:: bash - - $ sops -d mynewtestfile.yaml - -``sops`` encrypted files contain the necessary information to decrypt their content. -All a user of ``sops`` needs is valid AWS credentials and the necessary -permissions on KMS keys. - -Given that, the only command a ``sops`` user needs is: - -.. code:: bash - - $ sops - -`` will be opened, decrypted, passed to a text editor (vim by default), -encrypted if modified, and saved back to its original location. All of these -steps, apart from the actual editing, are transparent to the user. - -Test with the dev PGP key -~~~~~~~~~~~~~~~~~~~~~~~~~ - -If you want to test **sops** without having to do a bunch of setup, you can use -the example files and pgp key provided with the repository:: - - $ git clone https://github.com/mozilla/sops.git - $ cd sops - $ gpg --import pgp/sops_functional_tests_key.asc - $ sops example.yaml - -This last step will decrypt ``example.yaml`` using the test private key. - - -Encrypting using GCP KMS -~~~~~~~~~~~~~~~~~~~~~~~~ -GCP KMS uses `Application Default Credentials -`_. -If you already logged in using - -.. code:: bash - - $ gcloud auth login - -you can enable application default credentials using the sdk:: - - $ gcloud auth application-default login - -Encrypting/decrypting with GCP KMS requires a KMS ResourceID. You can use the -cloud console the get the ResourceID or you can create one using the gcloud -sdk: - -.. code:: bash - - $ gcloud kms keyrings create sops --location global - $ gcloud kms keys create sops-key --location global --keyring sops --purpose encryption - $ gcloud kms keys list --location global --keyring sops - - # you should see - NAME PURPOSE PRIMARY_STATE - projects/my-project/locations/global/keyRings/sops/cryptoKeys/sops-key ENCRYPT_DECRYPT ENABLED - -Now you can encrypt a file using:: - - $ sops --encrypt --gcp-kms projects/my-project/locations/global/keyRings/sops/cryptoKeys/sops-key test.yaml > test.enc.yaml - -And decrypt it using:: - - $ sops --decrypt test.enc.yaml - -Encrypting using Azure Key Vault -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The Azure Key Vault integration tries several authentication methods, in -this order: - - 1. Client credentials - 2. Client Certificate - 3. Username Password - 4. MSI - 5. Azure CLI auth - -You can force a specific authentication method through the AZURE_AUTH_METHOD -environment variable, which may be one of: clientcredentials, clientcertificate, -usernamepassword, msi, or cli (default). - -For example, you can use service principals with the following environment variables: - -.. code:: bash - - AZURE_TENANT_ID - AZURE_CLIENT_ID - AZURE_CLIENT_SECRET - -You can create a service principal using the cli like this: - -.. code:: bash - - $ az ad sp create-for-rbac -n my-keyvault-sp - - { - "appId": "", - "displayName": "my-keyvault-sp", - "name": "http://my-keyvault-sp", - "password": "", - "tenant": "" - } - -The appId is the client id, and the password is the client secret. - -Encrypting/decrypting with Azure Key Vault requires the resource identifier for -a key. This has the following form:: - - https://${VAULT_URL}/keys/${KEY_NAME}/${KEY_VERSION} - -To create a Key Vault and assign your service principal permissions on it -from the commandline: - -.. code:: bash - - # Create a resource group if you do not have one: - $ az group create --name sops-rg --location westeurope - # Key Vault names are globally unique, so generate one: - $ keyvault_name=sops-$(uuidgen | tr -d - | head -c 16) - # Create a Vault, a key, and give the service principal access: - $ az keyvault create --name $keyvault_name --resource-group sops-rg --location westeurope - $ az keyvault key create --name sops-key --vault-name $keyvault_name --protection software --ops encrypt decrypt - $ az keyvault set-policy --name $keyvault_name --resource-group sops-rg --spn $AZURE_CLIENT_ID \ - --key-permissions encrypt decrypt - # Read the key id: - $ az keyvault key show --name sops-key --vault-name $keyvault_name --query key.kid - - https://sops.vault.azure.net/keys/sops-key/some-string - -Now you can encrypt a file using:: - - $ sops --encrypt --azure-kv https://sops.vault.azure.net/keys/sops-key/some-string test.yaml > test.enc.yaml - -And decrypt it using:: - - $ sops --decrypt test.enc.yaml - - -Adding and removing keys -~~~~~~~~~~~~~~~~~~~~~~~~ - -When creating new files, ``sops`` uses the PGP, KMS and GCP KMS defined in the -command line arguments ``--kms``, ``--pgp``, ``--gcp-kms`` or ``--azure-kv``, or from -the environment variables ``SOPS_KMS_ARN``, ``SOPS_PGP_FP``, ``SOPS_GCP_KMS_IDS``, -``SOPS_AZURE_KEYVAULT_URL``. That information is stored in the file under the -``sops`` section, such that decrypting files does not require providing those -parameters again. - -Master PGP and KMS keys can be added and removed from a ``sops`` file in one of -two ways: by using command line flag, or by editing the file directly. - -Command line flag ``--add-kms``, ``--add-pgp``, ``--add-gcp-kms``, ``--add-azure-kv``, -``--rm-kms``, ``--rm-pgp``, ``--rm-gcp-kms`` and ``--rm-azure-kv`` can be used to add -and remove keys from a file. -These flags use the comma separated syntax as the ``--kms``, ``--pgp``, ``--gcp-kms`` -and ``--azure-kv`` arguments when creating new files. - -.. code:: bash - - # add a new pgp key to the file and rotate the data key - $ sops -r -i --add-pgp 85D77543B3D624B63CEA9E6DBC17301B491B3F21 example.yaml - - # remove a pgp key from the file and rotate the data key - $ sops -r -i --rm-pgp 85D77543B3D624B63CEA9E6DBC17301B491B3F21 example.yaml - -Alternatively, invoking ``sops`` with the flag **-s** will display the master keys -while editing. This method can be used to add or remove kms or pgp keys under the -sops section. Invoking ``sops`` with the **-i** flag will perform an in-place edit -instead of redirecting output to ``stdout``. - -For example, to add a KMS master key to a file, add the following entry while -editing: - -.. code:: yaml - - sops: - kms: - - arn: arn:aws:kms:us-east-1:656532927350:key/920aff2e-c5f1-4040-943a-047fa387b27e - -And, similarly, to add a PGP master key, we add its fingerprint: - -.. code:: yaml - - sops: - pgp: - - fp: 85D77543B3D624B63CEA9E6DBC17301B491B3F21 - -When the file is saved, ``sops`` will update its metadata and encrypt the data key -with the freshly added master keys. The removed entries are simply deleted from -the file. - -When removing keys, it is recommended to rotate the data key using ``-r``, -otherwise owners of the removed key may have add access to the data key in the -past. - -KMS AWS Profiles -~~~~~~~~~~~~~~~~ - -If you want to use a specific profile, you can do so with `aws_profile`: - -.. code:: yaml - - sops: - kms: - - arn: arn:aws:kms:us-east-1:656532927350:key/920aff2e-c5f1-4040-943a-047fa387b27e - aws_profile: foo - -If no AWS profile is set, default credentials will be used. - -Similarly the `--aws-profile` flag can be set with the command line with any of the KMS commands. - - -Assuming roles and using KMS in various AWS accounts -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -SOPS has the ability to use KMS in multiple AWS accounts by assuming roles in -each account. Being able to assume roles is a nice feature of AWS that allows -administrators to establish trust relationships between accounts, typically from -the most secure account to the least secure one. In our use-case, we use roles -to indicate that a user of the Master AWS account is allowed to make use of KMS -master keys in development and staging AWS accounts. Using roles, a single file -can be encrypted with KMS keys in multiple accounts, thus increasing reliability -and ease of use. - -You can use keys in various accounts by tying each KMS master key to a role that -the user is allowed to assume in each account. The `IAM roles -`_ -documentation has full details on how this needs to be configured on AWS's side. - -From the point of view of ``sops``, you only need to specify the role a KMS key -must assume alongside its ARN, as follows: - -.. code:: yaml - - sops: - kms: - - arn: arn:aws:kms:us-east-1:656532927350:key/920aff2e-c5f1-4040-943a-047fa387b27e - role: arn:aws:iam::927034868273:role/sops-dev-xyz - -The role must have permission to call Encrypt and Decrypt using KMS. An example -policy is shown below. - -.. code:: json - - { - "Sid": "Allow use of the key", - "Effect": "Allow", - "Action": [ - "kms:Encrypt", - "kms:Decrypt", - "kms:ReEncrypt*", - "kms:GenerateDataKey*", - "kms:DescribeKey" - ], - "Resource": "*", - "Principal": { - "AWS": [ - "arn:aws:iam::927034868273:role/sops-dev-xyz" - ] - } - } - -You can specify a role in the ``--kms`` flag and ``SOPS_KMS_ARN`` variable by -appending it to the ARN of the master key, separated by a **+** sign:: - - + - arn:aws:kms:us-west-2:927034868273:key/fe86dd69-4132-404c-ab86-4269956b4500+arn:aws:iam::927034868273:role/sops-dev-xyz - -AWS KMS Encryption Context -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -SOPS has the ability to use `AWS KMS key policy and encryption context -`_ -to refine the access control of a given KMS master key. - -When creating a new file, you can specify encryption context in the -``--encryption-context`` flag by comma separated list of key-value pairs: - -.. code:: bash - - $ sops --encryption-context Environment:production,Role:web-server test.dev.yaml - -The format of the Encrypt Context string is ``:,:,...`` - -The encryption context will be stored in the file metadata and does -not need to be provided at decryption. - -Encryption contexts can be used in conjunction with KMS Key Policies to define -roles that can only access a given context. An example policy is shown below: - -.. code:: json - - { - "Effect": "Allow", - "Principal": { - "AWS": "arn:aws:iam::111122223333:role/RoleForExampleApp" - }, - "Action": "kms:Decrypt", - "Resource": "*", - "Condition": { - "StringEquals": { - "kms:EncryptionContext:AppName": "ExampleApp", - "kms:EncryptionContext:FilePath": "/var/opt/secrets/" - } - } - } - -Key Rotation -~~~~~~~~~~~~ - -It is recommended to renew the data key on a regular basis. ``sops`` supports key -rotation via the ``-r`` flag. Invoking it on an existing file causes sops to -reencrypt the file with a new data key, which is then encrypted with the various -KMS and PGP master keys defined in the file. - -.. code:: bash - - sops -r example.yaml - -Using .sops.yaml conf to select KMS/PGP for new files -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -It is often tedious to specify the ``--kms`` ``--gcp-kms`` and ``--pgp`` parameters for creation -of all new files. If your secrets are stored under a specific directory, like a -``git`` repository, you can create a ``.sops.yaml`` configuration file at the root -directory to define which keys are used for which filename. - -Let's take an example: - -* file named **something.dev.yaml** should use one set of KMS A -* file named **something.prod.yaml** should use another set of KMS B -* other files use a third set of KMS C -* all live under **mysecretrepo/something.{dev,prod,gcp}.yaml** - -Under those circumstances, a file placed at **mysecretrepo/.sops.yaml** -can manage the three sets of configurations for the three types of files: - -.. code:: yaml - - # creation rules are evaluated sequentially, the first match wins - creation_rules: - # upon creation of a file that matches the pattern *.dev.yaml, - # KMS set A is used - - path_regex: \.dev\.yaml$ - kms: 'arn:aws:kms:us-west-2:927034868273:key/fe86dd69-4132-404c-ab86-4269956b4500,arn:aws:kms:us-west-2:361527076523:key/5052f06a-5d3f-489e-b86c-57201e06f31e+arn:aws:iam::361527076523:role/hiera-sops-prod' - pgp: 'FBC7B9E2A4F9289AC0C1D4843D16CEE4A27381B4' - - # prod files use KMS set B in the PROD IAM - - path_regex: \.prod\.yaml$ - kms: 'arn:aws:kms:us-west-2:361527076523:key/5052f06a-5d3f-489e-b86c-57201e06f31e+arn:aws:iam::361527076523:role/hiera-sops-prod,arn:aws:kms:eu-central-1:361527076523:key/cb1fab90-8d17-42a1-a9d8-334968904f94+arn:aws:iam::361527076523:role/hiera-sops-prod' - pgp: 'FBC7B9E2A4F9289AC0C1D4843D16CEE4A27381B4' - - # gcp files using GCP KMS - - path_regex: \.gcp\.yaml$ - gcp_kms: projects/mygcproject/locations/global/keyRings/mykeyring/cryptoKeys/thekey - - # Finally, if the rules above have not matched, this one is a - # catchall that will encrypt the file using KMS set C - # The absence of a path_regex means it will match everything - - kms: 'arn:aws:kms:us-west-2:927034868273:key/fe86dd69-4132-404c-ab86-4269956b4500,arn:aws:kms:us-west-2:142069644989:key/846cfb17-373d-49b9-8baf-f36b04512e47,arn:aws:kms:us-west-2:361527076523:key/5052f06a-5d3f-489e-b86c-57201e06f31e' - pgp: 'FBC7B9E2A4F9289AC0C1D4843D16CEE4A27381B4' - -When creating any file under **mysecretrepo**, whether at the root or under -a subdirectory, sops will recursively look for a ``.sops.yaml`` file. If one is -found, the filename of the file being created is compared with the filename -regexes of the configuration file. The first regex that matches is selected, -and its KMS and PGP keys are used to encrypt the file. It should be noted that -the looking up of ``.sops.yaml`` is from the working directory (CWD) instead of -the directory of the encrypting file (see `Issue 242 `_). - -The path_regex checks the full path of the encrypting file. Here is another example: - -* files located under directory **development** should use one set of KMS A -* files located under directory **production** should use another set of KMS B -* other files use a third set of KMS C - -.. code:: yaml - - creation_rules: - # upon creation of a file under development, - # KMS set A is used - - path_regex: .*/development/.* - kms: 'arn:aws:kms:us-west-2:927034868273:key/fe86dd69-4132-404c-ab86-4269956b4500,arn:aws:kms:us-west-2:361527076523:key/5052f06a-5d3f-489e-b86c-57201e06f31e+arn:aws:iam::361527076523:role/hiera-sops-prod' - pgp: 'FBC7B9E2A4F9289AC0C1D4843D16CEE4A27381B4' - - # prod files use KMS set B in the PROD IAM - - path_regex: .*/production/.* - kms: 'arn:aws:kms:us-west-2:361527076523:key/5052f06a-5d3f-489e-b86c-57201e06f31e+arn:aws:iam::361527076523:role/hiera-sops-prod,arn:aws:kms:eu-central-1:361527076523:key/cb1fab90-8d17-42a1-a9d8-334968904f94+arn:aws:iam::361527076523:role/hiera-sops-prod' - pgp: 'FBC7B9E2A4F9289AC0C1D4843D16CEE4A27381B4' - - # other files use KMS set C - - kms: 'arn:aws:kms:us-west-2:927034868273:key/fe86dd69-4132-404c-ab86-4269956b4500,arn:aws:kms:us-west-2:142069644989:key/846cfb17-373d-49b9-8baf-f36b04512e47,arn:aws:kms:us-west-2:361527076523:key/5052f06a-5d3f-489e-b86c-57201e06f31e' - pgp: 'FBC7B9E2A4F9289AC0C1D4843D16CEE4A27381B4' - -Creating a new file with the right keys is now as simple as - -.. code:: bash - - $ sops .prod.yaml - -Note that the configuration file is ignored when KMS or PGP parameters are -passed on the sops command line or in environment variables. - -Specify a different GPG executable -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``sops`` checks for the ``SOPS_GPG_EXEC`` environment variable. If specified, -it will attempt to use the executable set there instead of the default -of ``gpg``. - -Example: place the following in your ``~/.bashrc`` - -.. code:: bash - - SOPS_GPG_EXEC = 'your_gpg_client_wrapper' - - -Specify a different GPG key server -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -By default, ``sops`` uses the key server ``gpg.mozilla.org`` to retrieve the GPG -keys that are not present in the local keyring. -To use a different GPG key server, set the ``SOPS_GPG_KEYSERVER`` environment -variable. - -Example: place the following in your ``~/.bashrc`` - -.. code:: bash - - SOPS_GPG_KEYSERVER = 'gpg.example.com' - - -Key groups -~~~~~~~~~~ - -By default, ``sops`` encrypts the data key for a file with each of the master keys, -such that if any of the master keys is available, the file can be decrypted. -However, it is sometimes desirable to require access to multiple master keys -in order to decrypt files. This can be achieved with key groups. - -When using key groups in sops, data keys are split into parts such that keys from -multiple groups are required to decrypt a file. ``sops`` uses Shamir's Secret Sharing -to split the data key such that each key group has a fragment, each key in the -key group can decrypt that fragment, and a configurable number of fragments (threshold) -are needed to decrypt and piece together the complete data key. When decrypting a -file using multiple key groups, ``sops`` goes through key groups in order, and in -each group, tries to recover the fragment of the data key using a master key from -that group. Once the fragment is recovered, ``sops`` moves on to the next group, -until enough fragments have been recovered to obtain the complete data key. - -By default, the threshold is set to the number of key groups. For example, if -you have three key groups configured in your SOPS file and you don't override -the default threshold, then one master key from each of the three groups will -be required to decrypt the file. - -Management of key groups is done with the ``sops groups`` command. - -For example, you can add a new key group with 3 PGP keys and 3 KMS keys to the -file ``my_file.yaml``: - -.. code:: bash - - $ sops groups add --file my_file.yaml --pgp fingerprint1 --pgp fingerprint2 --pgp fingerprint3 --kms arn1 --kms arn2 --kms arn3 - -Or you can delete the 1st group (group number 0, as groups are zero-indexed) -from ``my_file.yaml``: - -.. code:: bash - - $ sops groups delete --file my_file.yaml 0 - -Key groups can also be specified in the ``.sops.yaml`` config file, -like so: - -.. code:: yaml - - creation_rules: - - path_regex: .*keygroups.* - key_groups: - # First key group - - pgp: - - fingerprint1 - - fingerprint2 - kms: - - arn: arn1 - role: role1 - context: - foo: bar - - arn: arn2 - # Second key group - - pgp: - - fingerprint3 - - fingerprint4 - kms: - - arn: arn3 - - arn: arn4 - # Third key group - - pgp: - - fingerprint5 - -Given this configuration, we can create a new encrypted file like we normally -would, and optionally provide the ``--shamir-secret-sharing-threshold`` command line -flag if we want to override the default threshold. ``sops`` will then split the data -key into three parts (from the number of key groups) and encrypt each fragment with -the master keys found in each group. - -For example: - -.. code:: bash - - $ sops --shamir-secret-sharing-threshold 2 example.json - -Alternatively, you can configure the Shamir threshold for each creation rule in the ``.sops.yaml`` config -with ``shamir_threshold``: - -.. code:: yaml - - creation_rules: - - path_regex: .*keygroups.* - shamir_threshold: 2 - key_groups: - # First key group - - pgp: - - fingerprint1 - - fingerprint2 - kms: - - arn: arn1 - role: role1 - context: - foo: bar - - arn: arn2 - # Second key group - - pgp: - - fingerprint3 - - fingerprint4 - kms: - - arn: arn3 - - arn: arn4 - # Third key group - - pgp: - - fingerprint5 - -And then run ``sops example.json``. - -The threshold (``shamir_threshold``) is set to 2, so this configuration will require -master keys from two of the three different key groups in order to decrypt the file. -You can then decrypt the file the same way as with any other SOPS file: - -.. code:: bash - - $ sops -d example.json - -Key service -~~~~~~~~~~~ - -There are situations where you might want to run ``sops`` on a machine that -doesn't have direct access to encryption keys such as PGP keys. The ``sops`` key -service allows you to forward a socket so that ``sops`` can access encryption -keys stored on a remote machine. This is similar to GPG Agent, but more -portable. - -SOPS uses a client-server approach to encrypting and decrypting the data -key. By default, SOPS runs a local key service in-process. SOPS uses a key -service client to send an encrypt or decrypt request to a key service, which -then performs the operation. The requests are sent using gRPC and Protocol -Buffers. The requests contain an identifier for the key they should perform -the operation with, and the plaintext or encrypted data key. The requests do -not contain any cryptographic keys, public or private. - -**WARNING: the key service connection currently does not use any sort of -authentication or encryption. Therefore, it is recommended that you make sure -the connection is authenticated and encrypted in some other way, for example -through an SSH tunnel.** - -Whenever we try to encrypt or decrypt a data key, SOPS will try to do so first -with the local key service (unless it's disabled), and if that fails, it will -try all other remote key services until one succeeds. - -You can start a key service server by running ``sops keyservice``. - -You can specify the key services the ``sops`` binary uses with ``--keyservice``. -This flag can be specified more than once, so you can use multiple key -services. The local key service can be disabled with -``enable-local-keyservice=false``. - -For example, to decrypt a file using both the local key service and the key -service exposed on the unix socket located in ``/tmp/sops.sock``, you can run: - -.. code:: bash - - $ sops --keyservice unix:///tmp/sops.sock -d file.yaml` - -And if you only want to use the key service exposed on the unix socket located -in ``/tmp/sops.sock`` and not the local key service, you can run: - -.. code:: bash - - $ sops --enable-local-keyservice=false --keyservice unix:///tmp/sops.sock -d file.yaml - -Auditing -~~~~~~~~ - -Sometimes, users want to be able to tell what files were accessed by whom in an -environment they control. For this reason, SOPS can generate audit logs to -record activity on encrypted files. When enabled, SOPS will write a log entry -into a pre-configured PostgreSQL database when a file is decrypted. The log -includes a timestamp, the username SOPS is running as, and the file that was -decrypted. - -In order to enable auditing, you must first create the database and credentials -using the schema found in ``audit/schema.sql``. This schema defines the -tables that store the audit events and a role named ``sops`` that only has -permission to add entries to the audit event tables. The default password for -the role ``sops`` is ``sops``. You should change this password. - -Once you have created the database, you have to tell SOPS how to connect to it. -Because we don't want users of SOPS to be able to control auditing, the audit -configuration file location is not configurable, and must be at -``/etc/sops/audit.yaml``. This file should have strict permissions such -that only the root user can modify it. - -For example, to enable auditing to a PostgreSQL database named ``sops`` running -on localhost, using the user ``sops`` and the password ``sops``, -``/etc/sops/audit.yaml`` should have the following contents: - -.. code:: yaml - - backends: - postgres: - - connection_string: "postgres://sops:sops@localhost/sops?sslmode=verify-full" - - -You can find more information on the ``connection_string`` format in the -`PostgreSQL docs `_. - -Under the ``postgres`` map entry in the above YAML is a list, so one can -provide more than one backend, and SOPS will log to all of them: - -.. code:: yaml - - backends: - postgres: - - connection_string: "postgres://sops:sops@localhost/sops?sslmode=verify-full" - - connection_string: "postgres://sops:sops@remotehost/sops?sslmode=verify-full" - -Saving Output to a File -~~~~~~~~~~~~~~~~~~~~~~~ -By default ``sops`` just dumps all the output to the standard output. We can use the -``--output`` flag followed by a filename to save the output to the file specified. -Beware using both ``--in-place`` and ``--output`` flags will result in an error. - - -Using the publish command -~~~~~~~~~~~~~~~~~~~~~~~~~ -``sops publish $file`` publishes a file to a pre-configured destination (this lives in the sops -config file). Additionally, support re-encryption rules that work just like the creation rules. - -This command requires a ``.sops.yaml`` configuration file. Below is an example: - -.. code:: yaml - - destination_rules: - - s3_bucket: "sops-secrets" - path_regex: s3/* - recreation_rule: - pgp: F69E4901EDBAD2D1753F8C67A64535C4163FB307 - - gcs_bucket: "sops-secrets" - path_regex: gcs/* - recreation_rule: - pgp: F69E4901EDBAD2D1753F8C67A64535C4163FB307 - - vault_path: "sops/" - vault_kv_mount_name: "secret/" # default - vault_kv_version: 2 # default - path_regex: vault/* - -The above configuration will place all files under ``s3/*`` into the S3 bucket ``sops-secrets``, -all files under ``gcs/*`` into the GCS bucket ``sops-secrets``, and the contents of all files under -``vault/*`` into Vault's KV store under the path ``secrets/sops/``. For the files that will be -published to S3 and GCS, it will decrypt them and re-encrypt them using the -``F69E4901EDBAD2D1753F8C67A64535C4163FB307`` pgp key. - -You would deploy a file to S3 with a command like: ``sops publish s3/app.yaml`` - -Publishing to Vault -******************* - -There are a few settings for Vault that you can place in your destination rules. The first -is ``vault_path``, which is required. The others are optional, and they are -``vault_address``, ``vault_kv_mount_name``, ``vault_kv_version``. - -``sops`` uses the official Vault API provided by Hashicorp, which makes use of `environment -variables `_ for -configuring the client. - -``vault_kv_mount_name`` is used if your Vault KV is mounted somewhere other than ``secret/``. -``vault_kv_version`` supports ``1`` and ``2``, with ``2`` being the default. - -Below is an example of publishing to Vault (using token auth with a local dev instance of Vault). - -.. code:: bash - - $ export VAULT_TOKEN=... - $ export VAULT_ADDR='http://127.0.0.1:8200' - $ sops -d vault/test.yaml - example_string: bar - example_number: 42 - example_map: - key: value - $ sops publish vault/test.yaml - uploading /home/user/sops_directory/vault/test.yaml to http://127.0.0.1:8200/v1/secret/data/sops/test.yaml ? (y/n): y - $ vault kv get secret/sops/test.yaml - ====== Metadata ====== - Key Value - --- ----- - created_time 2019-07-11T03:32:17.074792017Z - deletion_time n/a - destroyed false - version 3 - - ========= Data ========= - Key Value - --- ----- - example_map map[key:value] - example_number 42 - example_string bar - - -Important information on types ------------------------------- - -YAML and JSON type extensions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``sops`` uses the file extension to decide which encryption method to use on the file -content. ``YAML``, ``JSON``, ``ENV``, and ``INI`` files are treated as trees of data, and key/values are -extracted from the files to only encrypt the leaf values. The tree structure is also -used to check the integrity of the file. - -Therefore, if a file is encrypted using a specific format, it need to be decrypted -in the same format. The easiest way to achieve this is to conserve the original file -extension after encrypting a file. For example: - -.. code:: bash - - $ sops -e -i myfile.json - $ sops -d myfile.json - -If you want to change the extension of the file once encrypted, you need to provide -sops with the ``--input-type`` flag upon decryption. For example: - -.. code:: bash - - $ sops -e myfile.json > myfile.json.enc - - $ sops -d --input-type json myfile.json.enc - -YAML anchors -~~~~~~~~~~~~ -``sops`` only supports a subset of ``YAML``'s many types. Encrypting YAML files that -contain strings, numbers and booleans will work fine, but files that contain anchors -will not work, because the anchors redefine the structure of the file at load time. - -This file will not work in ``sops``: - -.. code:: yaml - - bill-to: &id001 - street: | - 123 Tornado Alley - Suite 16 - city: East Centerville - state: KS - - ship-to: *id001 - -``sops`` uses the path to a value as additional data in the AEAD encryption, and thus -dynamic paths generated by anchors break the authentication step. - -JSON and TEXT file types do not support anchors and thus have no such limitation. - -YAML Streams -~~~~~~~~~~~~ - -``YAML`` supports having more than one "document" in a single file, while -formats like ``JSON`` do not. ``sops`` is able to handle both. This means the -following multi-document will be encrypted as expected: - -.. code:: yaml - - --- - data: foo - --- - data: bar - -Note that the ``sops`` metadata, i.e. the hash, etc, is computed for the physical -file rather than each internal "document". - -Top-level arrays -~~~~~~~~~~~~~~~~ -``YAML`` and ``JSON`` top-level arrays are not supported, because ``sops`` -needs a top-level ``sops`` key to store its metadata. - -This file will not work in sops: - -.. code:: yaml - - --- - - some - - array - - elements - -But this one will because because the ``sops`` key can be added at the same level as the -``data`` key. - -.. code:: yaml - - data: - - some - - array - - elements - -Similarly, with ``JSON`` arrays, this document will not work: - -.. code:: json - - [ - "some", - "array", - "elements" - ] - - -But this one will work just fine: - -.. code:: json - - { - "data": [ - "some", - "array", - "elements" - ] - } - - -Examples --------- - -Take a look into the `examples `_ folder for detailed use cases of sops in a CI environment. The section below describes specific tips for common use cases. - -Creating a new file -~~~~~~~~~~~~~~~~~~~ - -The command below creates a new file with a data key encrypted by KMS and PGP. - -.. code:: bash - - $ sops --kms "arn:aws:kms:us-west-2:927034868273:key/fe86dd69-4132-404c-ab86-4269956b4500" --pgp C9CAB0AF1165060DB58D6D6B2653B624D620786D /path/to/new/file.yaml - -Encrypting an existing file -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Similar to the previous command, we tell sops to use one KMS and one PGP key. -The path points to an existing cleartext file, so we give sops flag ``-e`` to -encrypt the file, and redirect the output to a destination file. - -.. code:: bash - - $ export SOPS_KMS_ARN="arn:aws:kms:us-west-2:927034868273:key/fe86dd69-4132-404c-ab86-4269956b4500" - $ export SOPS_PGP_FP="C9CAB0AF1165060DB58D6D6B2653B624D620786D" - $ sops -e /path/to/existing/file.yaml > /path/to/new/encrypted/file.yaml - -Decrypt the file with ``-d``. - -.. code:: bash - - $ sops -d /path/to/new/encrypted/file.yaml - -Encrypt or decrypt a file in place -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Rather than redirecting the output of ``-e`` or ``-d``, sops can replace the -original file after encrypting or decrypting it. - -.. code:: bash - - # file.yaml is in cleartext - $ sops -e -i /path/to/existing/file.yaml - # file.yaml is now encrypted - $ sops -d -i /path/to/existing/file.yaml - # file.yaml is back in cleartext - -Encrypting binary files -~~~~~~~~~~~~~~~~~~~~~~~ - -``sops`` primary use case is encrypting YAML and JSON configuration files, but it -also has the ability to manage binary files. When encrypting a binary, sops will -read the data as bytes, encrypt it, store the encrypted base64 under -``tree['data']`` and write the result as JSON. - -Note that the base64 encoding of encrypted data can actually make the encrypted -file larger than the cleartext one. - -In-place encryption/decryption also works on binary files. - -.. code:: - - $ dd if=/dev/urandom of=/tmp/somerandom bs=1024 - count=512 - 512+0 records in - 512+0 records out - 524288 bytes (524 kB) copied, 0.0466158 s, 11.2 MB/s - - $ sha512sum /tmp/somerandom - 9589bb20280e9d381f7a192000498c994e921b3cdb11d2ef5a986578dc2239a340b25ef30691bac72bdb14028270828dad7e8bd31e274af9828c40d216e60cbe /tmp/somerandom - - $ sops -e -i /tmp/somerandom - please wait while a data encryption key is being generated and stored securely - - $ sops -d -i /tmp/somerandom - - $ sha512sum /tmp/somerandom - 9589bb20280e9d381f7a192000498c994e921b3cdb11d2ef5a986578dc2239a340b25ef30691bac72bdb14028270828dad7e8bd31e274af9828c40d216e60cbe /tmp/somerandom - -Extract a sub-part of a document tree -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``sops`` can extract a specific part of a YAML or JSON document, by provided the -path in the ``--extract`` command line flag. This is useful to extract specific -values, like keys, without needing an extra parser. - -.. code:: bash - - $ sops -d --extract '["app2"]["key"]' ~/git/svc/sops/example.yaml - -----BEGIN RSA PRIVATE KEY----- - MIIBPAIBAAJBAPTMNIyHuZtpLYc7VsHQtwOkWYobkUblmHWRmbXzlAX6K8tMf3Wf - ImcbNkqAKnELzFAPSBeEMhrBN0PyOC9lYlMCAwEAAQJBALXD4sjuBn1E7Y9aGiMz - bJEBuZJ4wbhYxomVoQKfaCu+kH80uLFZKoSz85/ySauWE8LgZcMLIBoiXNhDKfQL - vHECIQD6tCG9NMFWor69kgbX8vK5Y+QL+kRq+9HK6yZ9a+hsLQIhAPn4Ie6HGTjw - fHSTXWZpGSan7NwTkIu4U5q2SlLjcZh/AiEA78NYRRBwGwAYNUqzutGBqyXKUl4u - Erb0xAEyVV7e8J0CIQC8VBY8f8yg+Y7Kxbw4zDYGyb3KkXL10YorpeuZR4LuQQIg - bKGPkMM4w5blyE1tqGN0T7sJwEx+EUOgacRNqM2ljVA= - -----END RSA PRIVATE KEY----- - -The tree path syntax uses regular python dictionary syntax, without the -variable name. Extract keys by naming them, and array elements by numbering -them. - -.. code:: bash - - $ sops -d --extract '["an_array"][1]' ~/git/svc/sops/example.yaml - secretuser2 - -Set a sub-part in a document tree -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``sops`` can set a specific part of a YAML or JSON document, by providing -the path and value in the ``--set`` command line flag. This is useful to -set specific values, like keys, without needing an editor. - -.. code:: bash - - $ sops --set '["app2"]["key"] "app2keystringvalue"' ~/git/svc/sops/example.yaml - -The tree path syntax uses regular python dictionary syntax, without the -variable name. Set to keys by naming them, and array elements by -numbering them. - -.. code:: bash - - $ sops --set '["an_array"][1] "secretuser2"' ~/git/svc/sops/example.yaml - -The value must be formatted as json. - -.. code:: bash - - $ sops --set '["an_array"][1] {"uid1":null,"uid2":1000,"uid3":["bob"]}' ~/git/svc/sops/example.yaml - -Using sops as a library in a python script -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can import sops as a module and use it in your python program. - -.. code:: python - - import sops - - pathtype = sops.detect_filetype(path) - tree = sops.load_file_into_tree(path, pathtype) - sops_key, tree = sops.get_key(tree) - tree = sops.walk_and_decrypt(tree, sops_key) - sops.write_file(tree, path=path, filetype=pathtype) - -Note: this uses the previous implemenation of `sops` written in python, -and so doesn't support newer features such as GCP-KMS. -To use the current version, call out to `sops` using `subprocess.check_output` - -Showing diffs in cleartext in git -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You most likely want to store encrypted files in a version controlled repository. -Sops can be used with git to decrypt files when showing diffs between versions. -This is very handy for reviewing changes or visualizing history. - -To configure sops to decrypt files during diff, create a ``.gitattributes`` file -at the root of your repository that contains a filter and a command. - -.. code:: - - *.yaml diff=sopsdiffer - -Here we only care about YAML files. ``sopsdiffer`` is an arbitrary name that we map -to a sops command in the git configuration file of the repository. - -.. code:: bash - - $ git config diff.sopsdiffer.textconv "sops -d" - - $ grep -A 1 sopsdiffer .git/config - [diff "sopsdiffer"] - textconv = "sops -d" - -With this in place, calls to ``git diff`` will decrypt both previous and current -versions of the target file prior to displaying the diff. And it even works with -git client interfaces, because they call git diff under the hood! - -Encrypting only parts of a file -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Note: this only works on YAML and JSON files, not on BINARY files. - -By default, ``sops`` encrypts all the values of a YAML or JSON file and leaves the -keys in cleartext. In some instances, you may want to exclude some values from -being encrypted. This can be accomplished by adding the suffix **_unencrypted** -to any key of a file. When set, all values underneath the key that set the -**_unencrypted** prefix will be left in cleartext. - -Note that, while in cleartext, unencrypted content is still added to the -checksum of the file, and thus cannot be modified outside of sops without -breaking the file integrity check. - -The unencrypted suffix can be set to a different value using the -``--unencrypted-suffix`` option. - -Conversely, you can opt in to only encrypt some values in a YAML or JSON file, -by adding a chosen suffix to those keys and passing it to the ``--encrypted-suffix`` option. - -A third method is to use the ``--encrypted-regex`` which will only encrypt values under -keys that match the supplied regular expression. For example, this command: - -.. code:: bash - - $ sops --encrypt --encrypted-regex '&(data|stringData)' k8s-secrets.yaml - -will encrypt the values under the ``data`` and ``stringData`` keys in a YAML file -containing kubernetes secrets. It will not encrypt other values that help you to -navigate the file, like ``metadata`` which contains the secrets' names. - -You can also specify these options in the ``.sops.yaml`` config file. - -Note: these three options ``--unencrypted-suffix``, ``--encrypted-suffix``, and ``--encrypted-regex`` are -mutually exclusive and cannot all be used in the same file. - -Encryption Protocol -------------------- - -When sops creates a file, it generates a random 256 bit data key and asks each -KMS and PGP master key to encrypt the data key. The encrypted version of the data -key is stored in the ``sops`` metadata under ``sops.kms`` and ``sops.pgp``. - -For KMS: - -.. code:: yaml - - sops: - kms: - - enc: CiC6yCOtzsnFhkfdIslYZ0bAf//gYLYCmIu87B3sy/5yYxKnAQEBAQB4usgjrc7JxYZH3SLJWGdGwH//4GC2ApiLvOwd7Mv+cmMAAAB+MHwGCSqGSIb3DQEHBqBvMG0CAQAwaAYJKoZIhvcNAQcBMB4GCWCGSAFlAwQBLjARBAyGdRODuYMHbA8Ozj8CARCAO7opMolPJUmBXd39Zlp0L2H9fzMKidHm1vvaF6nNFq0ClRY7FlIZmTm4JfnOebPseffiXFn9tG8cq7oi - enc_ts: 1439568549.245995 - arn: arn:aws:kms:us-east-1:656532927350:key/920aff2e-c5f1-4040-943a-047fa387b27e - -For PGP: - -.. code:: yaml - - sops: - pgp: - - fp: 85D77543B3D624B63CEA9E6DBC17301B491B3F21 - created_at: 1441570391.930042 - enc: | - -----BEGIN PGP MESSAGE----- - Version: GnuPG v1 - - hQIMA0t4uZHfl9qgAQ//UvGAwGePyHuf2/zayWcloGaDs0MzI+zw6CmXvMRNPUsA - pAgRKczJmDu4+XzN+cxX5Iq9xEWIbny9B5rOjwTXT3qcUYZ4Gkzbq4MWkjuPp/Iv - qO4MJaYzoH5YxC4YORQ2LvzhA2YGsCzYnljmatGEUNg01yJ6r5mwFwDxl4Nc80Cn - RwnHuGExK8j1jYJZu/juK1qRbuBOAuruIPPWVdFB845PA7waacG1IdUW3ZtBkOy3 - O0BIfG2ekRg0Nik6sTOhDUA+l2bewCcECI8FYCEjwHm9Sg5cxmP2V5m1mby+uKAm - kewaoOyjbmV1Mh3iI1b/AQMr+/6ZE9MT2KnsoWosYamFyjxV5r1ZZM7cWKnOT+tu - KOvGhTV1TeOfVpajNTNwtV/Oyh3mMLQ0F0HgCTqomQVqw5+sj7OWAASuD3CU/dyo - pcmY5Qe0TNL1JsMNEH8LJDqSh+E0hsUxdY1ouVsg3ysf6mdM8ciWb3WRGxih1Vmf - unfLy8Ly3V7ZIC8EHV8aLJqh32jIZV4i2zXIoO4ZBKrudKcECY1C2+zb/TziVAL8 - qyPe47q8gi1rIyEv5uirLZjgpP+JkDUgoMnzlX334FZ9pWtQMYW4Y67urAI4xUq6 - /q1zBAeHoeeeQK+YKDB7Ak/Y22YsiqQbNp2n4CKSKAE4erZLWVtDvSp+49SWmS/S - XgGi+13MaXIp0ecPKyNTBjF+NOw/I3muyKr8EbDHrd2XgIT06QXqjYLsCb1TZ0zm - xgXsOTY3b+ONQ2zjhcovanDp7/k77B+gFitLYKg4BLZsl7gJB12T8MQnpfSmRT4= - =oJgS - -----END PGP MESSAGE----- - -``sops`` then opens a text editor on the newly created file. The user adds data to the -file and saves it when done. - -Upon save, sops browses the entire file as a key/value tree. Every time sops -encounters a leaf value (a value that does not have children), it encrypts the -value with AES256_GCM using the data key and a 256 bit random initialization -vector. - -Each file uses a single data key to encrypt all values of a document, but each -value receives a unique initialization vector and has unique authentication data. - -Additional data is used to guarantee the integrity of the encrypted data -and of the tree structure: when encrypting the tree, key names are concatenated -into a byte string that is used as AEAD additional data (aad) when encrypting -values. We expect that keys do not carry sensitive information, and -keeping them in cleartext allows for better diff and overall readability. - -Any valid KMS or PGP master key can later decrypt the data key and access the -data. - -Multiple master keys allow for sharing encrypted files without sharing master -keys, and provide a disaster recovery solution. The recommended way to use sops -is to have two KMS master keys in different regions and one PGP public key with -the private key stored offline. If, by any chance, both KMS master keys are -lost, you can always recover the encrypted data using the PGP private key. - -Message Authentication Code -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In addition to authenticating branches of the tree using keys as additional -data, sops computes a MAC on all the values to ensure that no value has been -added or removed fraudulently. The MAC is stored encrypted with AES_GCM and -the data key under tree->`sops`->`mac`. - -Motivation ----------- - -Automating the distribution of secrets and credentials to components of an -infrastructure is a hard problem. We know how to encrypt secrets and share them -between humans, but extending that trust to systems is difficult. Particularly -when these systems follow devops principles and are created and destroyed -without human intervention. The issue boils down to establishing the initial -trust of a system that just joined the infrastructure, and providing it access -to the secrets it needs to configure itself. - -The initial trust -~~~~~~~~~~~~~~~~~ - -In many infrastructures, even highly dynamic ones, the initial trust is -established by a human. An example is seen in Puppet by the way certificates are -issued: when a new system attempts to join a Puppetmaster, an administrator -must, by default, manually approve the issuance of the certificate the system -needs. This is cumbersome, and many puppetmasters are configured to auto-sign -new certificates to work around that issue. This is obviously not recommended -and far from ideal. - -AWS provides a more flexible approach to trusting new systems. It uses a -powerful mechanism of roles and identities. In AWS, it is possible to verify -that a new system has been granted a specific role at creation, and it is -possible to map that role to specific resources. Instead of trusting new systems -directly, the administrator trusts the AWS permission model and its automation -infrastructure. As long as AWS keys are safe, and the AWS API is secure, we can -assume that trust is maintained and systems are who they say they are. - -KMS, Trust and secrets distribution -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Using the AWS trust model, we can create fine grained access controls to -Amazon's Key Management Service (KMS). KMS is a service that encrypts and -decrypts data with AES_GCM, using keys that are never visible to users of the -service. Each KMS master key has a set of role-based access controls, and -individual roles are permitted to encrypt or decrypt using the master key. KMS -helps solve the problem of distributing keys, by shifting it into an access -control problem that can be solved using AWS's trust model. - -Operational requirements -~~~~~~~~~~~~~~~~~~~~~~~~ - -When Mozilla's Services Operations team started revisiting the issue of -distributing secrets to EC2 instances, we set a goal to store these secrets -encrypted until the very last moment, when they need to be decrypted on target -systems. Not unlike many other organizations that operate sufficiently complex -automation, we found this to be a hard problem with a number of prerequisites: - -1. Secrets must be stored in YAML files for easy integration into hiera - -2. Secrets must be stored in GIT, and when a new CloudFormation stack is - built, the current HEAD is pinned to the stack. (This allows secrets to - be changed in GIT without impacting the current stack that may - autoscale). - -3. Entries must be encrypted separately. Encrypting entire files as blobs makes - git conflict resolution almost impossible. Encrypting each entry - separately is much easier to manage. - -4. Secrets must always be encrypted on disk (admin laptop, upstream - git repo, jenkins and S3) and only be decrypted on the target - systems - -SOPS can be used to encrypt YAML, JSON and BINARY files. In BINARY mode, the -content of the file is treated as a blob, the same way PGP would encrypt an -entire file. In YAML and JSON modes, however, the content of the file is -manipulated as a tree where keys are stored in cleartext, and values are -encrypted. hiera-eyaml does something similar, and over the years we learned -to appreciate its benefits, namely: - -* diffs are meaningful. If a single value of a file is modified, only that - value will show up in the diff. The diff is still limited to only showing - encrypted data, but that information is already more granular that - indicating that an entire file has changed. - -* conflicts are easier to resolve. If multiple users are working on the - same encrypted files, as long as they don't modify the same values, - changes are easy to merge. This is an improvement over the PGP - encryption approach where unsolvable conflicts often happen when - multiple users work on the same file. - -OpenPGP integration -~~~~~~~~~~~~~~~~~~~ - -OpenPGP gets a lot of bad press for being an outdated crypto protocol, and while -true, what really made us look for alternatives is the difficulty of managing and -distributing keys to systems. With KMS, we manage permissions to an API, not keys, -and that's a lot easier to do. - -But PGP is not dead yet, and we still rely on it heavily as a backup solution: -all our files are encrypted with KMS and with one PGP public key, with its -private key stored securely for emergency decryption in the event that we lose -all our KMS master keys. - -SOPS can be used without KMS entirely, the same way you would use an encrypted -PGP file: by referencing the pubkeys of each individual who has access to the file. -It can easily be done by providing sops with a comma-separated list of public keys -when creating a new file: - -.. code:: bash - - $ sops --pgp "E60892BB9BD89A69F759A1A0A3D652173B763E8F,84050F1D61AF7C230A12217687DF65059EF093D3,85D77543B3D624B63CEA9E6DBC17301B491B3F21" mynewfile.yaml - -Threat Model ------------- - -The security of the data stored using sops is as strong as the weakest -cryptographic mechanism. Values are encrypted using AES256_GCM which is the -strongest symmetric encryption algorithm known today. Data keys are encrypted -in either KMS, which also uses AES256_GCM, or PGP which uses either RSA or -ECDSA keys. - -Going from the most likely to the least likely, the threats are as follows: - -Compromised AWS credentials grant access to KMS master key -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -An attacker with access to an AWS console can grant itself access to one of -the KMS master keys used to encrypt a sops data key. This threat should be -mitigated by protecting AWS accesses with strong controls, such as multi-factor -authentication, and also by performing regular audits of permissions granted -to AWS users. - -Compromised PGP key -~~~~~~~~~~~~~~~~~~~ - -PGP keys are routinely mishandled, either because owners copy them from -machine to machine, or because the key is left forgotten on an unused machine -an attacker gains access to. When using PGP encryption, sops users should take -special care of PGP private keys, and store them on smart cards or offline -as often as possible. - -Factorized RSA key -~~~~~~~~~~~~~~~~~~ - -sops doesn't apply any restriction on the size or type of PGP keys. A weak PGP -keys, for example 512 bits RSA, could be factorized by an attacker to gain -access to the private key and decrypt the data key. Users of sops should rely -on strong keys, such as 2048+ bits RSA keys, or 256+ bits ECDSA keys. - -Weak AES cryptography -~~~~~~~~~~~~~~~~~~~~~ - -A vulnerability in AES256_GCM could potentially leak the data key or the KMS -master key used by a sops encrypted file. While no such vulnerability exists -today, we recommend that users keep their encrypted files reasonably private. - -Backward compatibility ----------------------- - -``sops`` will remain backward compatible on the major version, meaning that all -improvements brought to the 1.X and 2.X branches (current) will maintain the -file format introduced in **1.0**. - -Security --------- - -Please report security issues to jvehent at mozilla dot com, or by using one -of the contact method available on keybase: `https://keybase.io/jvehent `_ - -License -------- -Mozilla Public License Version 2.0 - -Authors -------- - -The core team is composed of: - -* Adrian Utrilla @autrilla -* Julien Vehent @jvehent -* AJ Banhken @ajvb - -And a whole bunch of `contributors `_ - -Credits -------- - -`sops` was inspired by `hiera-eyaml `_, -`credstash `_ , -`sneaker `_, -`password store `_ and too many years managing -PGP encrypted files by hand... diff --git a/vendor/go.mozilla.org/sops/azkv/keysource.go b/vendor/go.mozilla.org/sops/azkv/keysource.go deleted file mode 100644 index 5eb1bae44..000000000 --- a/vendor/go.mozilla.org/sops/azkv/keysource.go +++ /dev/null @@ -1,285 +0,0 @@ -/* -Package azkv contains an implementation of the go.mozilla.org/sops/keys.MasterKey interface that encrypts and decrypts the -data key using Azure Key Vault with the Azure Go SDK. -*/ -package azkv //import "go.mozilla.org/sops/azkv" - -import ( - "context" - "encoding/base64" - "errors" - "fmt" - "os" - "regexp" - "strings" - "time" - - "go.mozilla.org/sops/logging" - - "github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/azure/auth" - "github.com/sirupsen/logrus" -) - -var log *logrus.Logger - -func init() { - log = logging.NewLogger("AZKV") -} - -// MasterKey is a Azure Key Vault key used to encrypt and decrypt sops' data key. -type MasterKey struct { - VaultURL string - Name string - Version string - - EncryptedKey string - CreationDate time.Time -} - -func newKeyVaultClient() (keyvault.BaseClient, error) { - var err error - c := keyvault.New() - c.Authorizer, err = newAuthorizer() - if err != nil { - log.WithError(err).Error("Failed to create Azure authorizer") - return c, err - } - - return c, nil -} - -// newAuthorizer returns the correct authorizer for the given settings and/or based on the value -// of the AZURE_AUTH_METHOD environment variable, which may be one of: -// clientcredentials, clientcertificate, usernamepassword, msi, or cli (default). -func newAuthorizer() (autorest.Authorizer, error) { - settings := struct { - authMethod string - tenantID string - clientID string - clientSecret string - certificatePath string - certificatePassword string - username string - password string - envName string - resource string - environment azure.Environment - }{ - authMethod: os.Getenv("AZURE_AUTH_METHOD"), - tenantID: os.Getenv("AZURE_TENANT_ID"), - clientID: os.Getenv("AZURE_CLIENT_ID"), - clientSecret: os.Getenv("AZURE_CLIENT_SECRET"), - certificatePath: os.Getenv("AZURE_CERTIFICATE_PATH"), - certificatePassword: os.Getenv("AZURE_CERTIFICATE_PASSWORD"), - username: os.Getenv("AZURE_USERNAME"), - password: os.Getenv("AZURE_PASSWORD"), - envName: os.Getenv("AZURE_ENVIRONMENT"), - resource: os.Getenv("AZURE_AD_RESOURCE"), - } - - settings.environment = azure.PublicCloud - if settings.envName != "" { - val, err := azure.EnvironmentFromName(settings.envName) - if err != nil { - return nil, err - } - settings.environment = val - } - - if settings.resource == "" { - settings.resource = strings.TrimSuffix(settings.environment.KeyVaultEndpoint, "/") - } - - if os.Getenv("MSI_ENDPOINT") != "" { - settings.authMethod = "msi" - } - - // 1. Client credentials - if (settings.clientSecret != "") || settings.authMethod == "clientcredentials" { - config := auth.NewClientCredentialsConfig(settings.clientID, settings.clientSecret, settings.tenantID) - config.AADEndpoint = settings.environment.ActiveDirectoryEndpoint - config.Resource = settings.resource - return config.Authorizer() - } - - // 2. Client Certificate - if (settings.certificatePath != "") || settings.authMethod == "clientcertificate" { - config := auth.NewClientCertificateConfig(settings.certificatePath, settings.certificatePassword, settings.clientID, settings.tenantID) - config.AADEndpoint = settings.environment.ActiveDirectoryEndpoint - config.Resource = settings.resource - return config.Authorizer() - } - - // 3. Username Password - if (settings.username != "" && settings.password != "") || settings.authMethod == "usernamepassword" { - config := auth.NewUsernamePasswordConfig(settings.username, settings.password, settings.clientID, settings.tenantID) - config.AADEndpoint = settings.environment.ActiveDirectoryEndpoint - config.Resource = settings.resource - return config.Authorizer() - } - - // 4. MSI - if settings.authMethod == "msi" { - config := auth.NewMSIConfig() - config.Resource = settings.resource - config.ClientID = settings.clientID - return config.Authorizer() - } - - // 5. Device Code - if settings.authMethod == "devicecode" { - // TODO: Removed until we decide how to handle prompt on stdout, etc. - //// TODO: This will be required on every execution. Consider caching. - //config := auth.NewDeviceFlowConfig(settings.clientID, settings.tenantID) - //return config.Authorizer() - return nil, errors.New("device code flow not implemented") - } - - // 6. CLI - return auth.NewAuthorizerFromCLIWithResource(settings.resource) -} - -// NewMasterKey creates a new MasterKey from an URL, key name and version, setting the creation date to the current date -func NewMasterKey(vaultURL string, keyName string, keyVersion string) *MasterKey { - return &MasterKey{ - VaultURL: vaultURL, - Name: keyName, - Version: keyVersion, - CreationDate: time.Now().UTC(), - } -} - -// MasterKeysFromURLs takes a comma separated list of Azure Key Vault URLs and returns a slice of new MasterKeys for them -func MasterKeysFromURLs(urls string) ([]*MasterKey, error) { - var keys []*MasterKey - if urls == "" { - return keys, nil - } - for _, s := range strings.Split(urls, ",") { - k, err := NewMasterKeyFromURL(s) - if err != nil { - return nil, err - } - keys = append(keys, k) - } - return keys, nil -} - -// NewMasterKeyFromURL takes an Azure Key Vault key URL and returns a new MasterKey -// URL format is {vaultUrl}/keys/{key-name}/{key-version} -func NewMasterKeyFromURL(url string) (*MasterKey, error) { - k := &MasterKey{} - re := regexp.MustCompile("^(https://[^/]+)/keys/([^/]+)/([^/]+)$") - parts := re.FindStringSubmatch(url) - if parts == nil || len(parts) < 2 { - return nil, fmt.Errorf("Could not parse valid key from %q", url) - } - - k.VaultURL = parts[1] - k.Name = parts[2] - k.Version = parts[3] - k.CreationDate = time.Now().UTC() - return k, nil -} - -// EncryptedDataKey returns the encrypted data key this master key holds -func (key *MasterKey) EncryptedDataKey() []byte { - return []byte(key.EncryptedKey) -} - -// SetEncryptedDataKey sets the encrypted data key for this master key -func (key *MasterKey) SetEncryptedDataKey(enc []byte) { - key.EncryptedKey = string(enc) -} - -// Encrypt takes a sops data key, encrypts it with Key Vault and stores the result in the EncryptedKey field -func (key *MasterKey) Encrypt(dataKey []byte) error { - c, err := newKeyVaultClient() - if err != nil { - return err - } - data := base64.RawURLEncoding.EncodeToString(dataKey) - p := keyvault.KeyOperationsParameters{Value: &data, Algorithm: keyvault.RSAOAEP256} - - res, err := c.Encrypt(context.Background(), key.VaultURL, key.Name, key.Version, p) - if err != nil { - log.WithError(err).WithFields(logrus.Fields{ - "key": key.Name, - "version": key.Version, - }).Error("Encryption failed") - return fmt.Errorf("Failed to encrypt data: %v", err) - } - - key.EncryptedKey = *res.Result - log.WithFields(logrus.Fields{ - "key": key.Name, - "version": key.Version, - }).Info("Encryption succeeded") - - return nil -} - -// EncryptIfNeeded encrypts the provided sops' data key and encrypts it if it hasn't been encrypted yet -func (key *MasterKey) EncryptIfNeeded(dataKey []byte) error { - if key.EncryptedKey == "" { - return key.Encrypt(dataKey) - } - return nil -} - -// Decrypt decrypts the EncryptedKey field with Azure Key Vault and returns the result. -func (key *MasterKey) Decrypt() ([]byte, error) { - c, err := newKeyVaultClient() - if err != nil { - return nil, err - } - p := keyvault.KeyOperationsParameters{Value: &key.EncryptedKey, Algorithm: keyvault.RSAOAEP256} - - res, err := c.Decrypt(context.TODO(), key.VaultURL, key.Name, key.Version, p) - if err != nil { - log.WithError(err).WithFields(logrus.Fields{ - "key": key.Name, - "version": key.Version, - }).Error("Decryption failed") - return nil, fmt.Errorf("Error decrypting key: %v", err) - } - - plaintext, err := base64.RawURLEncoding.DecodeString(*res.Result) - if err != nil { - log.WithError(err).WithFields(logrus.Fields{ - "key": key.Name, - "version": key.Version, - }).Error("Decryption failed") - return nil, err - } - - log.WithFields(logrus.Fields{ - "key": key.Name, - "version": key.Version, - }).Info("Decryption succeeded") - return plaintext, nil -} - -// NeedsRotation returns whether the data key needs to be rotated or not. -func (key *MasterKey) NeedsRotation() bool { - return time.Since(key.CreationDate) > (time.Hour * 24 * 30 * 6) -} - -// ToString converts the key to a string representation -func (key *MasterKey) ToString() string { - return fmt.Sprintf("%s/keys/%s/%s", key.VaultURL, key.Name, key.Version) -} - -// ToMap converts the MasterKey to a map for serialization purposes -func (key MasterKey) ToMap() map[string]interface{} { - out := make(map[string]interface{}) - out["vaultUrl"] = key.VaultURL - out["key"] = key.Name - out["version"] = key.Version - out["created_at"] = key.CreationDate.UTC().Format(time.RFC3339) - out["enc"] = key.EncryptedKey - return out -} diff --git a/vendor/go.mozilla.org/sops/example.ini b/vendor/go.mozilla.org/sops/example.ini deleted file mode 100644 index e54a166e3..000000000 --- a/vendor/go.mozilla.org/sops/example.ini +++ /dev/null @@ -1,28 +0,0 @@ -; ENC[AES256_GCM,data:40al3asZ8L1+ajzMBp/0sHai9UNP3IoqkLIPNhuuMxs8Cg==,iv:2lqF+4RJTS9wSgjadUEYOyMVbe6eIOOFNIb0jgjdmK8=,tag:PkrPbykEN0ngUhP+ajfLKg==,type:comment] -[name] -firstName = ENC[AES256_GCM,data:30Q/XQ==,iv:nxqUz6dmPjVdXoRrtw/CGOH1HqFYyLmocbfikqzwf9o=,tag:3sIh386PIoTJ3XBu8T2Pww==,type:str] -lastName = ENC[AES256_GCM,data:W1hTh14=,iv:5M3RqShfVjkOatrGj4Z+e6BtOfQK6hZxWdm6IfkwzcY=,tag:zJBc/5O+rELdKu7Ja0IHuQ==,type:str] -age = ENC[AES256_GCM,data:ulhPjA==,iv:0ylri5mup2gAfT3LEyMdoi2orn/hfTvFs0KOzKwN8Ds=,tag:UVuBXcN1vfwbWH9md0K32A==,type:str] - -[address] -city = ENC[AES256_GCM,data:0+RVjaLGpys=,iv:7XomjWqHdmEbUQ4H9RG0L4kpmjnL0ROEP/bj0u9t0io=,tag:Z57atgaPDw1S51H6JfvcGA==,type:str] -postalCode = ENC[AES256_GCM,data:aPMpTW00LR63Zw==,iv:RiTxsg7vYIlaN41O3ESL/IKLwPi5VtsrepN0RdJrRmQ=,tag:gF4W5LmZ9gsrcBXxwrTPrA==,type:str] -state = ENC[AES256_GCM,data:xg8=,iv:6nvhxItys13r39+ktd5wd53T6UmgojeuqRorrM8bPHg=,tag:Osc5sC4LsesS1wZwAUt5tw==,type:str] -streetAddress = ENC[AES256_GCM,data:DXQclrCghMapHiqkow==,iv:aqc0JH4vadJWp9xwF2kch7xYuZ5bYA8yR9L7An6x2q4=,tag:XFj1rCYSSnwZXCh5An0TAA==,type:str] - -[phoneNumbers] -home = ENC[AES256_GCM,data:uD2247keEsbFI0ub,iv:yVWbL4wF6h3FXWMToZJoKy4Unx/Mp7dDbrT8zxJnZtE=,tag:6zR/ufgemmNx5/ySNnD/Aw==,type:str] -office = ENC[AES256_GCM,data:knmFCdTtHhE0pt5q,iv:dYMzj5TX0G3g0ZpGkB+CvR2XEoPDAwu1SSaY1R/sCYM=,tag:52+8cwq7s4Jloit/lYorOA==,type:str] - -[not private] -notsecret_unencrypted = hi there! - -[sops] -lastmodified = 2019-01-11T05:24:42Z -pgp__list_0__map_created_at = 2019-01-11T05:24:42Z -pgp__list_0__map_enc = -----BEGIN PGP MESSAGE-----\n\nwYwDEEVDpnzXnMABBACb46+yRjeFaLKegY3FGypZyE2XXiVUTEEL3iOu/gjN5TOT\n+EzgNKCxT0B+4mhG6/BSkxN6D3iU9D7ABeC2a1OO2I177dH5Mu9vG0RhPmxFQoYC\nyvqW8HnYqy0SnX/BaLrUlAOutFCpnNcwCHrqjX5u2w2f/nq47ZODGp3QbHEDxdLg\nAeT+v1BMYMoCuq5GD5bBeAv04WVv4JjgweEQFuAq4tkaBRDg9eXCIZxGmpuMkYHC\nsPEuuGLX22TrU3Nhq23xMiyTamf0qOCh5FDCZ5M2BMBTtwVneMq0TO/idTFumeFk\nXwA=\n=Lrn7\n-----END PGP MESSAGE----- -unencrypted_suffix = _unencrypted -version = 3.2.0 -pgp__list_0__map_fp = 1022470DE3F0BC54BC6AB62DE05550BC07FB1A0A -mac = ENC[AES256_GCM,data:P0QMbP0PD9zTGUbGw4gdyIXP5/RGOnWqplcXTvJyIYDlZLENNGJTSqb02OpZJ7iCCHi/LfzX0dQ0uacCHVbrgh1uWwgaPlvplJ++WVEpF7CHFplCPj4gvcKk2Jh/tNmQLov1VrfbzdfZf/epkiL1ie15+mG4gYiJEkH6zR24t7k=,iv:YLZu6BIN/8LaYR84m3dNJwMFnmyaZptOhYCH0Hwckrk=,tag:ayAkLDan52fEbliNFbhUGw==,type:str] - diff --git a/vendor/go.mozilla.org/sops/example.json b/vendor/go.mozilla.org/sops/example.json deleted file mode 100644 index c80181954..000000000 --- a/vendor/go.mozilla.org/sops/example.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "firstName": "ENC[AES256_GCM,data:RwGNjw==,iv:nZqLELdF3m03J0udkBhhbT/ob2NxGv8LtuoInvilN4A=,tag:bln64G53ls2V+StjeoPDKg==,type:str]", - "lastName": "ENC[AES256_GCM,data:lUfbfis=,iv:LOKFqXfnOqcGMZ1J50tjHBcUazhVlnAn89kD+vYFzwE=,tag:NGQReEHn+XV49SVaAsfpSA==,type:str]", - "age": "ENC[AES256_GCM,data:If+rEA==,iv:2IoWeDsYgRyc83C08Hw1BMyDwVq2ybMDjLEYr6Yz9AA=,tag:IMU3jtJXQK+oyOxTmQpqFA==,type:float]", - "address": { - "city": "ENC[AES256_GCM,data:3Y42Y91x/ZQ=,iv:IQ/L3sd0ZbM3n+CH3o9TBBYwzLM/r83sBD/Mk3pZ2/A=,tag:xbVTKiDl699bnIGvB4j8xA==,type:str]", - "postalCode": "ENC[AES256_GCM,data:/QhEsQu7YWfguA==,iv:7lTIgG333LaEyBcoasevpLV3bHindHHPaoAU8gkTRz4=,tag:mwGWiTxdPIipcasTD0wUyA==,type:str]", - "state": "ENC[AES256_GCM,data:5II=,iv:hX/U4tlW3plzX78l5H6gN3ej/OD5Zi9TQM7FA7+xayg=,tag:wOpKTLMNmBzogZc4QNwo4Q==,type:str]", - "streetAddress": "ENC[AES256_GCM,data:yWSOIEzJEa5jxQL/0w==,iv:9iXFWOcHy5nc/7Qr+I38NAP4nyF1qiruK18bPFAQq4U=,tag:fAVNUdxLUObCplJl5mqROw==,type:str]" - }, - "phoneNumbers": [ - { - "number": "ENC[AES256_GCM,data:qfn79FKKwgeD1oxO,iv:j0RdoEFzHFI1u1goU8AKRCO0dxBZbvEhvNgVQueVsBg=,tag:RzheQ5/nGjjnw8iCLnpwFA==,type:str]", - "type": "ENC[AES256_GCM,data:M9V4UQ==,iv:K9fM2vFwttRoq+97oAcomxeAiR2u3peWlguk6JrHiUI=,tag:5dbFPjEPvVOnO8C5Y43a0A==,type:str]" - }, - { - "number": "ENC[AES256_GCM,data:LZIOa1sP4Epa1y/3,iv:8xXFqs4hxC0JoPrXIxim00sqYxW5bAF+u9AItq10xtA=,tag:9jDYNB+p2MB/XX/zH468zQ==,type:str]", - "type": "ENC[AES256_GCM,data:w8QcDV2m,iv:qfvYhtK1qvTTiX/AzGm6nTFHzne1DlVIkC8Xfkb8lDo=,tag:sdV1MDOEptfoyooTQHjC6g==,type:str]" - } - ], - "anEmptyValue": "", - "sops": { - "version": "1.6", - "mac": "ENC[AES256_GCM,data:YlhAZo7NAUHlRQzAPoPha12yl3nEaaq3lKyJ1hoMMtw9M7kts1cfrk301arKHrRAssqcqq1RizYVTyennOIj+TpOYoi8dOzXAtm+NrjQrb7SNosGLPXO2mbvOIoeqzZuzureHRCitNeLwzD8+U/vlGhRTuB6Be7TGt55CD+OMMM=,iv:x9DFpKn9xlq5uoRfbV2rnAsmjY4YyZH6wQnBY80x2dw=,tag:JnGt3eJNLRC3FamfNkAkew==,type:str]", - "pgp": [ - { - "fp": "1022470DE3F0BC54BC6AB62DE05550BC07FB1A0A", - "enc": "-----BEGIN PGP MESSAGE-----\nVersion: GnuPG v1\n\nhIwDEEVDpnzXnMABA/94OKD8wELXxP6gd+RdirSKvvBhtbyNMDHrkyju75OOqtgw\nnLLAmTq0sea2MZ5IdhhoudVLUbEHJqZDZyLD5KdKCGdS7psAEXjp7usQNNy0mp9G\nIXcjvM2RZArQYwkUYDHYzCbF1vI4W3zdsof8HV9DrJU3St1OzLNICnojYDrk4tJe\nAYKKHiIu3wq9NzOGaElsdpVj1yIAcVTXMU6fV76Z+fQmxQwtz3VDTzrNGcQzGMg5\nM2jkjHENFrYtNIJdobNGftnjcpzTruYxTMp0ysNuJPznqHH2jVO3XodRR0rLlw==\n=cBux\n-----END PGP MESSAGE-----\n", - "created_at": "2015-11-25T14:34:39Z" - }, - { - "fp": "85D77543B3D624B63CEA9E6DBC17301B491B3F21", - "created_at": "2015-11-25T14:34:39Z", - "enc": "-----BEGIN PGP MESSAGE-----\nVersion: GnuPG v1\n\nhQIMA0t4uZHfl9qgAQ//d2ZWmkv6LbR4aIw8z4FFs4PyOMWqMS8jhFvmAcS7u0m3\ni1rsuUduD5+PM7EGJo8MibaQL1eaHIobeE6VhXDRkMWBt6vF8sk71H1+JZ6tdOyy\n7rxWFs7B2pn3WakbGlFqrF19gAqYKoUeK938eAika2SwZIFcRv5/nxv6+FZxwZJe\ngSieoGBlaDIHoLVRWCGHQSXAt+mItBJPSgP+jHwUGYvoi0IKUwIXqXQn+jWuFJC5\n3xLr8P3LaMNG9E/7492ZsVZ9F6pX3pNWD5cUd0ThaHrBaEDxWxH3OSfl2axbGTUb\n2+KnHZ274Omf1OzaXUqHPtE8aXcloESBr9jEZe7P8VWQ1PR8lDSSgjQtJ86NrVEx\nmAm4ENtTF/pb9l3IXacNpXZBrHm7JkA6kdDZFS8fF63FnAj23Efzks4IZmKLuRM7\ngFxv07xyKgPnEQKBbedM2XTk+PF3yI9JTTHNY57EIxzbc/V+16HYoC0tvwJ3KgpP\nBh3LcoroKwNaAMbCaLaeqmyIVmrpV0mLwneRlN3yKWdk7z6tP8ONb7T/mKtMjZIh\naStCZxs/Au+UUVt15vZb8w+qzBrfzKjad/+ilbGNu+/48oV13/J4Wq3xymdR5U0e\np2qiJp9X7/EB2ZSp7dKfbYDsIcUrOL8/94ygl1xm7GcwwJOnZ5pCeTz2sLVj5cPS\nXgFuzwS2S02vdHyE3NQ7Q//eX6sIPtb1+nrdIITkrtemUXwvgPL38+J+4xUtXTLC\nu3ZvfbehSYx3sKdRpuR789zK7+aOwbNqbZTFaMMj9eNQzhXl94cXMqCUrBl3Y+I=\n=LXE7\n-----END PGP MESSAGE-----\n" - } - ], - "lastmodified": "2016-02-03T18:46:24Z", - "attention": "This section contains key material that should only be modified with extra care. See `sops -h`." - } -} diff --git a/vendor/go.mozilla.org/sops/example.txt b/vendor/go.mozilla.org/sops/example.txt deleted file mode 100644 index 3ee315940..000000000 --- a/vendor/go.mozilla.org/sops/example.txt +++ /dev/null @@ -1,21 +0,0 @@ -{ - "data": "ENC[AES256_GCM,data:nk7oZA9rH+Dr/BZhz/RQpzqR3CnapvK11LOBejE794qBFbdZS+AK1BneRcYVTQfLy/L7bHcayh82wTEFImfa6TGwn7jIHvDEeOCqsMlu5fSyDaACMvbLQZl+93DRYGIZ/hShQqlyxC+xkywvcnUVzcJ7qOvi9DxrSVfo8CvGeMLl3WAatJpLm1FmMtvO7JyJZYHd2dNWa5pPo0s=,iv:h8efJaRPketLIRemE3/lz1O1a6DS20buXssw4SJOtKA=,tag:TaC8VAcXmE6T0+dAuhicSA==,type:str]", - "sops": { - "mac": "ENC[AES256_GCM,data:OQnRHfLfaxsMclFLTsNoog7JvEyMOBcXWQ/qGRmAPQmHW4pM2nzW1n8gdVcSGTA0AHz1Dgi3HW1r9E23z2Kf6K+D1JXSed+krs9BgrMO4Cc0mbPme+dYrCcOOYuKWmwlW6n9X+OiTzJ8tQvOJHGBGjs1+829F+Y2DZ1n5/62cHQ=,iv:gx99cyKW1IfA1q1rKvSRLBauntaAxwIta3Tqq6b8NL4=,tag:eTAYmXIkgHIdn/JKTIqm0w==,type:str]", - "version": 1.0, - "pgp": [ - { - "created_at": "2015-11-25T15:05:50Z", - "enc": "-----BEGIN PGP MESSAGE-----\nVersion: GnuPG v1\n\nhIwDEEVDpnzXnMABBACKKUrfyxDq6mZxOsCaOtvu3+NamEHm8r3WTJt034ou6J4w\nPmww4tz8w+iCKRaVe5Bu0OTeh3zJkpvgnR+Qb8l+sDdm/PKZhGY0TCXfXndWa7TS\n60DBVnLr8J8MiqtF5SOrvkc74GVnxGIzePRQwBDCBASydFpwcwfdjcZRILehJNJc\nAXm4WL7aqUfaUpZIk7QhoVUPhaB3wLmiT/leTBIRzb52qKIjzB5oW4RgdnhoSK7e\nGeowLlxPKQujXPqncnPN/yctvPM1yokmGv3BkVi47CiVtZPFYvzsxYevn38=\n=SqMF\n-----END PGP MESSAGE-----\n", - "fp": "1022470DE3F0BC54BC6AB62DE05550BC07FB1A0A" - }, - { - "created_at": "2015-11-25T15:05:50Z", - "enc": "-----BEGIN PGP MESSAGE-----\nVersion: GnuPG v1\n\nhQIMA0t4uZHfl9qgAQ//Yb/uQ1hP1VY+53rHZZtW/gtC5jZB/QVY7hi3KoacHPJ8\nvySBFZnp0ZZAZf3ACWkQZZiPXvwA9cFFaxocy7KljNQ1othEgEzQC6+gxEzKmjnO\nizAqS01vubs4KiACQza7H9XXj5sQrq6+lmSNOlMWEtzBYkGjXzB+krWNPrm7EvKz\nvJtsjX0WTiii7y1vKcpKJJuX8W6Yw25IKlOawIqtrL9ATsBHvx+WgP3F4wVwAD8k\n/1RFezv/EXXEd27VcSR0QePUp2afSQq7fcmYMWkFIToZ8XOc+51uEhB6L5BcFqH2\nIBbLHQIS0fBv6pVUQjJg+W9Qs3gKo6fST7f12J0SGs3jStQ37k/vTGFocYy2FQcL\nSSAzYIMhbexp/CtnM5OH4NquGywsiACN9oIvsdZl1yJ85dN8JwRWqVPfSQSTYk55\ndMkyX6dPJFE9M/6EngbZSnHmaIDp86mwAlMftLnwm5ArjHS7wTTolHCeTsNc8nsf\nW0r1bcTOWx0+V3HhOsKvHPRnADr6YBf/BScp/CywMht75UpnwA3l0ei52a6OT23v\nPk/wicuduSB3gDxC+i3IIxLxJv0GC/QTqxt0/rJAYJBIe5S4EVgizLU9uCYI7jXk\nyl3sha+4cI5GjBsgQlurk72ccJHcCswEhePkIBszi+DqHXv2ki3SAuNUSXYkY7/S\nXAGdxAXfg5VgIVALqFYSO+PD33C+oKL/q2LDhiV87wU3VNjPGt0ZOGnFiir3twE6\nTWZ5dh/kl4LitajXEKuS+127S2pPcdAICEth8wLE9KrkUjEBx7HdgRM3VXGH\n=lK8+\n-----END PGP MESSAGE-----\n", - "fp": "85D77543B3D624B63CEA9E6DBC17301B491B3F21" - } - ], - "lastmodified": "2015-11-25T15:05:50Z", - "attention": "This section contains key material that should only be modified with extra care. See `sops -h`." - } -} diff --git a/vendor/go.mozilla.org/sops/example.yaml b/vendor/go.mozilla.org/sops/example.yaml deleted file mode 100644 index 35a477e8a..000000000 --- a/vendor/go.mozilla.org/sops/example.yaml +++ /dev/null @@ -1,77 +0,0 @@ -# The secrets below are unreadable without access to one of the sops master key -myapp1: ENC[AES256_GCM,data:QsGJGjvQOpoVCIlrYTcOQEfQzriw,iv:ShmgdRNV6UrOJ22Rgr7habB74Nd/YFxU4lDh6jy6n+8=,tag:8GT6U8lzrI27DcFc1+icgQ==,type:str] -app2: - db: - user: ENC[AES256_GCM,data:Arbb,iv:7bjm4ZaVFlxNk3O4M1P67TqfFtXTOHOe5x9rjF6/R9o=,tag:d4+O8BUj+02qaeJorev2ww==,type:str] - password: ENC[AES256_GCM,data:9/jSxNCq0A==,iv:5mk+GS016hKGj6gVfQDMSyuuPy7/SVHLsqQXK3p1nds=,tag:AtK4nPFoSOOgdw6IZmiZmw==,type:str] - # private key for secret operations in app2 - key: |- - ENC[AES256_GCM,data:UFSoBpaS7n5nipCTZeIA9HCsW619k0FO2/xKqu7eU4cMOHHrvk5fCbEAdXpz9HLiDTtXuRgA2ZdMSfD9X/mqHC3x0BNoFUtdpy7ZdPHUKiMgZEcI9lqUxEIREa9RU6thjTp0x5owxvyv4I9KqtSWFIJOhxwR1tjEGe0W+ErdXCXoI8D8/cVWDnMIFSjER1ks3dcgsldaaaV5ahUK/EmP/RqZhf1f0VEgd1+dZKO2fAjLX5kLEYDn2hkAfJZWfKzcpcFFWijeS/AYtyRnAV5eAv0R8k8vTm7w5kOMix4bJgqZ8HnouJ1sxl0H13TktLjshDftpybVfKRZ9ynOOit8nj6PRIOICdc/+gPSg7JLjEP57Q4EKctUeljFAjcyfan9mJljznXUeAJodO2lJup5QaNTXDTAC9KsRn1g2F05TUAxoEJGkli4zPK1EtuO4YwoajNCIW+s/3cjS+1me3gofHu4X6fkW3OxofboFTamO5BFQWd/A6e/DMipz5jcFqTGs8T108uPAabomoshDCpZGGYism2FrzpQHChkQHtv2387JP8/9fQI6GaHalrtXD3rg9W9T80+u3Z2HhkVdyusa/yWXnEanJi8G7uWq+9DpR3svub+Rf8EZYVQHBejjyP9Zl6fkytWbWDDtA4JlIdPnkU=,iv:oLuu8Xnv0AGS02t/eFRsZ+WHB/enNPDErlIxb4tAVh8=,tag:u9d4iOnDOENzWmm7hdg7Sg==,type:str] -number: ENC[AES256_GCM,data:KIpKMuwET3zDczZQ+w==,iv:ocf+UunCIQAbZsZzeDmT4BljsSb7F6ybQ26D9AViR2k=,tag:tUmZy0ZPCyKgwasePeZelw==,type:float] -an_array: -- ENC[AES256_GCM,data:An4qJsfBO1bVAZo=,iv:swgh9CSBihQf4JnLLKVFsT2TPyKok6MY0Uet//nAK1k=,tag:4mrt6IKFWjuEIbm6gylo7Q==,type:str] -- ENC[AES256_GCM,data:xakhro9jY0kNqpc=,iv:hucFzENuWLRK15IK3mbBELE8+eZWoSfgW724Gi7yWCU=,tag:YSFJcTFLRTJCCb6h3TLb2Q==,type:str] -- ENC[AES256_GCM,data:aGXaMsUIQBAMqutjqZPtU2hzwInryp7zao33Vt7JPY20S8eNFplGfyugRHlWbLTPQ5RHjYoPrQAyUQ==,iv:J4srvF83nPbkXKu674gINReMJasUppW4osTi/HWTGXs=,tag:g2pUXrfP5ZjA/0oYJ4yViA==,type:str] -- ENC[AES256_GCM,data:nLmw6dwybYVA65FXDbgD8Q==,iv:E047Yxv3tlwKIDrg2rm0Yng3DIdmqOPKlukcyLSsqO0=,tag:oCtYybAn4SnlpVAdwKOLnQ==,type:str] -somebooleans: -- ENC[AES256_GCM,data:LZkyvg==,iv:a9QepfteG4ZWipwWEnb3JRDztHCWNNxdbfC6L2op0dM=,tag:CY1rv9Nntbz2pMMz/A9OvQ==,type:bool] -- ENC[AES256_GCM,data:iKPW0nc=,iv:shJr4plRt/YJ0HfAl3HY86LXq/3FUgIDMLBqpddu5wA=,tag:L3IwlNRPcZiarn7YWn2dLQ==,type:bool] -this: - is: - a: - nested: - value: ENC[AES256_GCM,data:96iQFcKdmKcocHCnOm7MR78W7uFZPGoZWRyH,iv:AQ3HwSFXhP3Mx4PoLvsyb9fwsYRaQZsV3NRH5dGhrXw=,tag:l6KHQfmm/QbnmPdLvCfocQ==,type:str] - -# sops supports unencrypted fields -# by adding the `_unencrypted` suffix -# to any key -somelist_unencrypted: -- all elements of this list -- remain in clear text -- because of the _encrypted suffix in the key -nested_unencrypted: - this: - is: - all: going to remain in clear text -sops: - pgp: - - fp: 1022470DE3F0BC54BC6AB62DE05550BC07FB1A0A - created_at: '2015-11-25T00:32:57Z' - enc: | - -----BEGIN PGP MESSAGE----- - Version: GnuPG v1 - - hIwDEEVDpnzXnMABBACBf7lGw8B0sLbfup1Ye51FNpY6iF/4SPTdjeV4OB3uDwIJ - FRa6z7VR+FrtWyyNYRNB2Wm5eegnEEWwui6hFw7tvlhkN8C5hWQ0B47oYMTstZDR - TR3Eu7y70u3YLoQKZgDnPb6hQplGIoYVd/EMpDgKmKnmz5oCiIkEI68T3aXo5tJc - AZhplIlk9eSMHIW9CmGkNp5HtZlQWzVSdGdcQcIUBG4F+Vf40max9u0Jkk1Se1do - BJ+D4Kl5dZXBj3njvo4YdZ+FGoYPfMlX1GCw0W4caUu6tD8RjuzJA+fYo2Q= - =Cnu4 - -----END PGP MESSAGE----- - - fp: 85D77543B3D624B63CEA9E6DBC17301B491B3F21 - created_at: '2015-11-25T00:32:57Z' - enc: | - -----BEGIN PGP MESSAGE----- - Version: GnuPG v1 - - hQIMA0t4uZHfl9qgAQ/8Da1b/hWg6wv8ZoieIv/AlTp2Qa45dmP0yUO38vu3yn+8 - pl5YobTljaSW9EJ0rpR70/9QW7/EveJ81XpjEI4OFUb5c8OF/gz2IEW8B7Gwbi1B - joeqU/H/u2PYTnAfB4dP8g99sd0s8o6RQNQDHimlczh9/QTcVYogHBeaNnpfavpm - 128b0zkG0wH3BRYKwRIhk/506H4XFSOqSkN8x1p9UrN481O9vNP2XnCiTBLAWzHY - 6En8WUrHh0jLMAQ3MaYZCa+x3Dp+rfobqQA/4tvqA3Ai1duNfWNMk6p43kq8xErC - yIX0Kb0RKz3Sy7HxP2LvF1HfKWMkZ9wsleJAhJ+ChvCTsKL2PTYoRPtLVbCXi/Cn - Y52aeMM9KbauXMq179Kb36HHhCGE0Ad1nBBcLxJ+TY6B98jt+YthmPe8GUMDyXKl - fimpQ1qES2CE1YHnryfr/rlSl81VVOMisKW1jEBmqpMNw1Y3YMjBEWfsouQuX0rV - ywb7G2vHC/OLV4gsgaPgHUDU3Mp4cYoel0YuffHaXFlkDiqU+T96l0T485QR+BCf - F9YR3ZDYoHryWIqQYtz910KmPWUXX/h54ro7/8Rngt4DoB9dJ2PG6apa+VZqW9/5 - 4yEvv+CREkFjzjjGRqy1GOxVmombSETo+XQiQS4pj37JwmtgscaEW9hbKDU8twzS - XAGgfDxo0DklfEKFkccH8G40SS9bD1ilNVoOU513lZF8X21ZDm+fP5MyOU45pRYT - H6JUTisfwKa2t319jR0cfy81dMxUjwTAdNBOiE0nj+Iz0i3ekBIl/wmtVWpJ - =dWBE - -----END PGP MESSAGE----- - unencrypted_suffix: _unencrypted - mac: ENC[AES256_GCM,data:p9Jn/KVtp9NEQK39XLcr6Lw7cgLX2A23SAZsCyhdj88+aNkAIavzJMNNPD3z2dOpqJfpccdwEX3p5rfY6xxoQHpLjbbPOi4J2ViYUZ9NFM4lFTtKdmaB/Kugr7lNxsNw+lWB/UjBQvjp+OBfDUr3l4ZGegaN94wAiPgur+tqXpw=,iv:PDW1eTyPwR4VY/5xugSawMrfhFNdVVYVsTaVpmCTxsY=,tag:VzVKQWa/K49I5mjBCfRBQQ==,type:str] - lastmodified: '2016-03-16T23:34:46Z' - version: 1.7 - attention: This section contains key material that should only be modified with - extra care. See `sops -h`. diff --git a/vendor/go.mozilla.org/sops/gcpkms/keysource.go b/vendor/go.mozilla.org/sops/gcpkms/keysource.go deleted file mode 100644 index 0474c7f2a..000000000 --- a/vendor/go.mozilla.org/sops/gcpkms/keysource.go +++ /dev/null @@ -1,153 +0,0 @@ -package gcpkms //import "go.mozilla.org/sops/gcpkms" - -import ( - "encoding/base64" - "fmt" - "regexp" - "strings" - "time" - - "go.mozilla.org/sops/logging" - - "golang.org/x/net/context" - "golang.org/x/oauth2/google" - - "github.com/sirupsen/logrus" - cloudkms "google.golang.org/api/cloudkms/v1" -) - -var log *logrus.Logger - -func init() { - log = logging.NewLogger("GCPKMS") -} - -// MasterKey is a GCP KMS key used to encrypt and decrypt sops' data key. -type MasterKey struct { - ResourceID string - EncryptedKey string - CreationDate time.Time -} - -// EncryptedDataKey returns the encrypted data key this master key holds -func (key *MasterKey) EncryptedDataKey() []byte { - return []byte(key.EncryptedKey) -} - -// SetEncryptedDataKey sets the encrypted data key for this master key -func (key *MasterKey) SetEncryptedDataKey(enc []byte) { - key.EncryptedKey = string(enc) -} - -// Encrypt takes a sops data key, encrypts it with GCP KMS and stores the result in the EncryptedKey field -func (key *MasterKey) Encrypt(dataKey []byte) error { - cloudkmsService, err := key.createCloudKMSService() - if err != nil { - log.WithField("resourceID", key.ResourceID).Info("Encryption failed") - return fmt.Errorf("Cannot create GCP KMS service: %v", err) - } - req := &cloudkms.EncryptRequest{ - Plaintext: base64.StdEncoding.EncodeToString(dataKey), - } - resp, err := cloudkmsService.Projects.Locations.KeyRings.CryptoKeys.Encrypt(key.ResourceID, req).Do() - if err != nil { - log.WithField("resourceID", key.ResourceID).Info("Encryption failed") - return fmt.Errorf("Failed to call GCP KMS encryption service: %v", err) - } - log.WithField("resourceID", key.ResourceID).Info("Encryption succeeded") - key.EncryptedKey = resp.Ciphertext - return nil -} - -// EncryptIfNeeded encrypts the provided sops' data key and encrypts it if it hasn't been encrypted yet -func (key *MasterKey) EncryptIfNeeded(dataKey []byte) error { - if key.EncryptedKey == "" { - return key.Encrypt(dataKey) - } - return nil -} - -// Decrypt decrypts the EncryptedKey field with CGP KMS and returns the result. -func (key *MasterKey) Decrypt() ([]byte, error) { - cloudkmsService, err := key.createCloudKMSService() - if err != nil { - log.WithField("resourceID", key.ResourceID).Info("Decryption failed") - return nil, fmt.Errorf("Cannot create GCP KMS service: %v", err) - } - - req := &cloudkms.DecryptRequest{ - Ciphertext: key.EncryptedKey, - } - resp, err := cloudkmsService.Projects.Locations.KeyRings.CryptoKeys.Decrypt(key.ResourceID, req).Do() - if err != nil { - log.WithField("resourceID", key.ResourceID).Info("Decryption failed") - return nil, fmt.Errorf("Error decrypting key: %v", err) - } - encryptedKey, err := base64.StdEncoding.DecodeString(resp.Plaintext) - if err != nil { - log.WithField("resourceID", key.ResourceID).Info("Decryption failed") - return nil, err - } - log.WithField("resourceID", key.ResourceID).Info("Decryption succeeded") - return encryptedKey, nil -} - -// NeedsRotation returns whether the data key needs to be rotated or not. -func (key *MasterKey) NeedsRotation() bool { - return time.Since(key.CreationDate) > (time.Hour * 24 * 30 * 6) -} - -// ToString converts the key to a string representation -func (key *MasterKey) ToString() string { - return key.ResourceID -} - -// NewMasterKeyFromResourceID takes a GCP KMS resource ID string and returns a new MasterKey for that -func NewMasterKeyFromResourceID(resourceID string) *MasterKey { - k := &MasterKey{} - resourceID = strings.Replace(resourceID, " ", "", -1) - k.ResourceID = resourceID - k.CreationDate = time.Now().UTC() - return k -} - -// MasterKeysFromResourceIDString takes a comma separated list of GCP KMS resource IDs and returns a slice of new MasterKeys for them -func MasterKeysFromResourceIDString(resourceID string) []*MasterKey { - var keys []*MasterKey - if resourceID == "" { - return keys - } - for _, s := range strings.Split(resourceID, ",") { - keys = append(keys, NewMasterKeyFromResourceID(s)) - } - return keys -} - -func (key MasterKey) createCloudKMSService() (*cloudkms.Service, error) { - re := regexp.MustCompile(`^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$`) - matches := re.FindStringSubmatch(key.ResourceID) - if matches == nil { - return nil, fmt.Errorf("No valid resourceId found in %q", key.ResourceID) - } - - ctx := context.Background() - client, err := google.DefaultClient(ctx, cloudkms.CloudPlatformScope) - if err != nil { - return nil, err - } - - cloudkmsService, err := cloudkms.New(client) - if err != nil { - return nil, err - } - return cloudkmsService, nil -} - -// ToMap converts the MasterKey to a map for serialization purposes -func (key MasterKey) ToMap() map[string]interface{} { - out := make(map[string]interface{}) - out["resource_id"] = key.ResourceID - out["enc"] = key.EncryptedKey - out["created_at"] = key.CreationDate.UTC().Format(time.RFC3339) - return out -} diff --git a/vendor/go.mozilla.org/sops/keyservice/keyservice.pb.go b/vendor/go.mozilla.org/sops/keyservice/keyservice.pb.go deleted file mode 100644 index d39c654ab..000000000 --- a/vendor/go.mozilla.org/sops/keyservice/keyservice.pb.go +++ /dev/null @@ -1,555 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: keyservice/keyservice.proto - -/* -Package keyservice is a generated protocol buffer package. - -It is generated from these files: - keyservice/keyservice.proto - -It has these top-level messages: - Key - PgpKey - KmsKey - GcpKmsKey - AzureKeyVaultKey - EncryptRequest - EncryptResponse - DecryptRequest - DecryptResponse -*/ -package keyservice - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Key struct { - // Types that are valid to be assigned to KeyType: - // *Key_KmsKey - // *Key_PgpKey - // *Key_GcpKmsKey - // *Key_AzureKeyvaultKey - KeyType isKey_KeyType `protobuf_oneof:"key_type"` -} - -func (m *Key) Reset() { *m = Key{} } -func (m *Key) String() string { return proto.CompactTextString(m) } -func (*Key) ProtoMessage() {} -func (*Key) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -type isKey_KeyType interface { - isKey_KeyType() -} - -type Key_KmsKey struct { - KmsKey *KmsKey `protobuf:"bytes,1,opt,name=kms_key,json=kmsKey,oneof"` -} -type Key_PgpKey struct { - PgpKey *PgpKey `protobuf:"bytes,2,opt,name=pgp_key,json=pgpKey,oneof"` -} -type Key_GcpKmsKey struct { - GcpKmsKey *GcpKmsKey `protobuf:"bytes,3,opt,name=gcp_kms_key,json=gcpKmsKey,oneof"` -} -type Key_AzureKeyvaultKey struct { - AzureKeyvaultKey *AzureKeyVaultKey `protobuf:"bytes,4,opt,name=azure_keyvault_key,json=azureKeyvaultKey,oneof"` -} - -func (*Key_KmsKey) isKey_KeyType() {} -func (*Key_PgpKey) isKey_KeyType() {} -func (*Key_GcpKmsKey) isKey_KeyType() {} -func (*Key_AzureKeyvaultKey) isKey_KeyType() {} - -func (m *Key) GetKeyType() isKey_KeyType { - if m != nil { - return m.KeyType - } - return nil -} - -func (m *Key) GetKmsKey() *KmsKey { - if x, ok := m.GetKeyType().(*Key_KmsKey); ok { - return x.KmsKey - } - return nil -} - -func (m *Key) GetPgpKey() *PgpKey { - if x, ok := m.GetKeyType().(*Key_PgpKey); ok { - return x.PgpKey - } - return nil -} - -func (m *Key) GetGcpKmsKey() *GcpKmsKey { - if x, ok := m.GetKeyType().(*Key_GcpKmsKey); ok { - return x.GcpKmsKey - } - return nil -} - -func (m *Key) GetAzureKeyvaultKey() *AzureKeyVaultKey { - if x, ok := m.GetKeyType().(*Key_AzureKeyvaultKey); ok { - return x.AzureKeyvaultKey - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*Key) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _Key_OneofMarshaler, _Key_OneofUnmarshaler, _Key_OneofSizer, []interface{}{ - (*Key_KmsKey)(nil), - (*Key_PgpKey)(nil), - (*Key_GcpKmsKey)(nil), - (*Key_AzureKeyvaultKey)(nil), - } -} - -func _Key_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*Key) - // key_type - switch x := m.KeyType.(type) { - case *Key_KmsKey: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.KmsKey); err != nil { - return err - } - case *Key_PgpKey: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.PgpKey); err != nil { - return err - } - case *Key_GcpKmsKey: - b.EncodeVarint(3<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.GcpKmsKey); err != nil { - return err - } - case *Key_AzureKeyvaultKey: - b.EncodeVarint(4<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.AzureKeyvaultKey); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("Key.KeyType has unexpected type %T", x) - } - return nil -} - -func _Key_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*Key) - switch tag { - case 1: // key_type.kms_key - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(KmsKey) - err := b.DecodeMessage(msg) - m.KeyType = &Key_KmsKey{msg} - return true, err - case 2: // key_type.pgp_key - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(PgpKey) - err := b.DecodeMessage(msg) - m.KeyType = &Key_PgpKey{msg} - return true, err - case 3: // key_type.gcp_kms_key - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(GcpKmsKey) - err := b.DecodeMessage(msg) - m.KeyType = &Key_GcpKmsKey{msg} - return true, err - case 4: // key_type.azure_keyvault_key - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(AzureKeyVaultKey) - err := b.DecodeMessage(msg) - m.KeyType = &Key_AzureKeyvaultKey{msg} - return true, err - default: - return false, nil - } -} - -func _Key_OneofSizer(msg proto.Message) (n int) { - m := msg.(*Key) - // key_type - switch x := m.KeyType.(type) { - case *Key_KmsKey: - s := proto.Size(x.KmsKey) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *Key_PgpKey: - s := proto.Size(x.PgpKey) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *Key_GcpKmsKey: - s := proto.Size(x.GcpKmsKey) - n += proto.SizeVarint(3<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *Key_AzureKeyvaultKey: - s := proto.Size(x.AzureKeyvaultKey) - n += proto.SizeVarint(4<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type PgpKey struct { - Fingerprint string `protobuf:"bytes,1,opt,name=fingerprint" json:"fingerprint,omitempty"` -} - -func (m *PgpKey) Reset() { *m = PgpKey{} } -func (m *PgpKey) String() string { return proto.CompactTextString(m) } -func (*PgpKey) ProtoMessage() {} -func (*PgpKey) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -func (m *PgpKey) GetFingerprint() string { - if m != nil { - return m.Fingerprint - } - return "" -} - -type KmsKey struct { - Arn string `protobuf:"bytes,1,opt,name=arn" json:"arn,omitempty"` - Role string `protobuf:"bytes,2,opt,name=role" json:"role,omitempty"` - Context map[string]string `protobuf:"bytes,3,rep,name=context" json:"context,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - AwsProfile string `protobuf:"bytes,4,opt,name=aws_profile" json:"aws_profile,omitempty"` -} - -func (m *KmsKey) Reset() { *m = KmsKey{} } -func (m *KmsKey) String() string { return proto.CompactTextString(m) } -func (*KmsKey) ProtoMessage() {} -func (*KmsKey) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -func (m *KmsKey) GetArn() string { - if m != nil { - return m.Arn - } - return "" -} - -func (m *KmsKey) GetRole() string { - if m != nil { - return m.Role - } - return "" -} - -func (m *KmsKey) GetContext() map[string]string { - if m != nil { - return m.Context - } - return nil -} - -type GcpKmsKey struct { - ResourceId string `protobuf:"bytes,1,opt,name=resource_id,json=resourceId" json:"resource_id,omitempty"` -} - -func (m *GcpKmsKey) Reset() { *m = GcpKmsKey{} } -func (m *GcpKmsKey) String() string { return proto.CompactTextString(m) } -func (*GcpKmsKey) ProtoMessage() {} -func (*GcpKmsKey) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } - -func (m *GcpKmsKey) GetResourceId() string { - if m != nil { - return m.ResourceId - } - return "" -} - -type AzureKeyVaultKey struct { - VaultUrl string `protobuf:"bytes,1,opt,name=vault_url,json=vaultUrl" json:"vault_url,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` - Version string `protobuf:"bytes,3,opt,name=version" json:"version,omitempty"` -} - -func (m *AzureKeyVaultKey) Reset() { *m = AzureKeyVaultKey{} } -func (m *AzureKeyVaultKey) String() string { return proto.CompactTextString(m) } -func (*AzureKeyVaultKey) ProtoMessage() {} -func (*AzureKeyVaultKey) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } - -func (m *AzureKeyVaultKey) GetVaultUrl() string { - if m != nil { - return m.VaultUrl - } - return "" -} - -func (m *AzureKeyVaultKey) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *AzureKeyVaultKey) GetVersion() string { - if m != nil { - return m.Version - } - return "" -} - -type EncryptRequest struct { - Key *Key `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` - Plaintext []byte `protobuf:"bytes,2,opt,name=plaintext,proto3" json:"plaintext,omitempty"` -} - -func (m *EncryptRequest) Reset() { *m = EncryptRequest{} } -func (m *EncryptRequest) String() string { return proto.CompactTextString(m) } -func (*EncryptRequest) ProtoMessage() {} -func (*EncryptRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } - -func (m *EncryptRequest) GetKey() *Key { - if m != nil { - return m.Key - } - return nil -} - -func (m *EncryptRequest) GetPlaintext() []byte { - if m != nil { - return m.Plaintext - } - return nil -} - -type EncryptResponse struct { - Ciphertext []byte `protobuf:"bytes,1,opt,name=ciphertext,proto3" json:"ciphertext,omitempty"` -} - -func (m *EncryptResponse) Reset() { *m = EncryptResponse{} } -func (m *EncryptResponse) String() string { return proto.CompactTextString(m) } -func (*EncryptResponse) ProtoMessage() {} -func (*EncryptResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } - -func (m *EncryptResponse) GetCiphertext() []byte { - if m != nil { - return m.Ciphertext - } - return nil -} - -type DecryptRequest struct { - Key *Key `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` - Ciphertext []byte `protobuf:"bytes,2,opt,name=ciphertext,proto3" json:"ciphertext,omitempty"` -} - -func (m *DecryptRequest) Reset() { *m = DecryptRequest{} } -func (m *DecryptRequest) String() string { return proto.CompactTextString(m) } -func (*DecryptRequest) ProtoMessage() {} -func (*DecryptRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } - -func (m *DecryptRequest) GetKey() *Key { - if m != nil { - return m.Key - } - return nil -} - -func (m *DecryptRequest) GetCiphertext() []byte { - if m != nil { - return m.Ciphertext - } - return nil -} - -type DecryptResponse struct { - Plaintext []byte `protobuf:"bytes,1,opt,name=plaintext,proto3" json:"plaintext,omitempty"` -} - -func (m *DecryptResponse) Reset() { *m = DecryptResponse{} } -func (m *DecryptResponse) String() string { return proto.CompactTextString(m) } -func (*DecryptResponse) ProtoMessage() {} -func (*DecryptResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } - -func (m *DecryptResponse) GetPlaintext() []byte { - if m != nil { - return m.Plaintext - } - return nil -} - -func init() { - proto.RegisterType((*Key)(nil), "Key") - proto.RegisterType((*PgpKey)(nil), "PgpKey") - proto.RegisterType((*KmsKey)(nil), "KmsKey") - proto.RegisterType((*GcpKmsKey)(nil), "GcpKmsKey") - proto.RegisterType((*AzureKeyVaultKey)(nil), "AzureKeyVaultKey") - proto.RegisterType((*EncryptRequest)(nil), "EncryptRequest") - proto.RegisterType((*EncryptResponse)(nil), "EncryptResponse") - proto.RegisterType((*DecryptRequest)(nil), "DecryptRequest") - proto.RegisterType((*DecryptResponse)(nil), "DecryptResponse") -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for KeyService service - -type KeyServiceClient interface { - Encrypt(ctx context.Context, in *EncryptRequest, opts ...grpc.CallOption) (*EncryptResponse, error) - Decrypt(ctx context.Context, in *DecryptRequest, opts ...grpc.CallOption) (*DecryptResponse, error) -} - -type keyServiceClient struct { - cc *grpc.ClientConn -} - -func NewKeyServiceClient(cc *grpc.ClientConn) KeyServiceClient { - return &keyServiceClient{cc} -} - -func (c *keyServiceClient) Encrypt(ctx context.Context, in *EncryptRequest, opts ...grpc.CallOption) (*EncryptResponse, error) { - out := new(EncryptResponse) - err := grpc.Invoke(ctx, "/KeyService/Encrypt", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *keyServiceClient) Decrypt(ctx context.Context, in *DecryptRequest, opts ...grpc.CallOption) (*DecryptResponse, error) { - out := new(DecryptResponse) - err := grpc.Invoke(ctx, "/KeyService/Decrypt", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for KeyService service - -type KeyServiceServer interface { - Encrypt(context.Context, *EncryptRequest) (*EncryptResponse, error) - Decrypt(context.Context, *DecryptRequest) (*DecryptResponse, error) -} - -func RegisterKeyServiceServer(s *grpc.Server, srv KeyServiceServer) { - s.RegisterService(&_KeyService_serviceDesc, srv) -} - -func _KeyService_Encrypt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(EncryptRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(KeyServiceServer).Encrypt(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/KeyService/Encrypt", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(KeyServiceServer).Encrypt(ctx, req.(*EncryptRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _KeyService_Decrypt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DecryptRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(KeyServiceServer).Decrypt(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/KeyService/Decrypt", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(KeyServiceServer).Decrypt(ctx, req.(*DecryptRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _KeyService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "KeyService", - HandlerType: (*KeyServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Encrypt", - Handler: _KeyService_Encrypt_Handler, - }, - { - MethodName: "Decrypt", - Handler: _KeyService_Decrypt_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "keyservice/keyservice.proto", -} - -func init() { proto.RegisterFile("keyservice/keyservice.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 485 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x4d, 0x6f, 0xd4, 0x30, - 0x10, 0x6d, 0x36, 0xcb, 0x6e, 0x33, 0xa9, 0xba, 0xc1, 0xaa, 0xd0, 0x6a, 0x8b, 0x60, 0xe5, 0x53, - 0x85, 0x2a, 0x57, 0x2c, 0x17, 0xd4, 0x5b, 0x81, 0x42, 0x51, 0x2e, 0x28, 0x08, 0x6e, 0x68, 0x15, - 0xd2, 0x21, 0x44, 0xc9, 0x26, 0xc6, 0x71, 0x22, 0xcc, 0x4f, 0xe1, 0x27, 0xf1, 0xab, 0x90, 0x1d, - 0x27, 0xfb, 0xc1, 0x85, 0xdb, 0xf8, 0xf9, 0xcd, 0x9b, 0x79, 0xcf, 0x32, 0x9c, 0xe7, 0xa8, 0x6a, - 0x14, 0x6d, 0x96, 0xe0, 0xd5, 0xb6, 0x64, 0x5c, 0x54, 0xb2, 0xa2, 0x7f, 0x1c, 0x70, 0x43, 0x54, - 0x84, 0xc2, 0x34, 0xdf, 0xd4, 0xeb, 0x1c, 0xd5, 0xdc, 0x59, 0x3a, 0x17, 0xfe, 0x6a, 0xca, 0xc2, - 0x4d, 0x1d, 0xa2, 0xba, 0x3b, 0x8a, 0x26, 0xb9, 0xa9, 0x34, 0x87, 0xa7, 0xdc, 0x70, 0x46, 0x96, - 0xf3, 0x21, 0xe5, 0x96, 0xc3, 0x4d, 0x45, 0x2e, 0xc1, 0x4f, 0x13, 0xbe, 0xee, 0xb5, 0x5c, 0xc3, - 0x03, 0xf6, 0x2e, 0xe1, 0x83, 0x9c, 0x97, 0xf6, 0x07, 0x72, 0x03, 0x24, 0xfe, 0xd5, 0x08, 0xd4, - 0xdc, 0x36, 0x6e, 0x0a, 0x69, 0x9a, 0xc6, 0xa6, 0xe9, 0x21, 0xbb, 0xd1, 0x57, 0x21, 0xaa, 0xcf, - 0xfa, 0xa6, 0xeb, 0x0d, 0x62, 0x8b, 0xb5, 0x16, 0x7b, 0x05, 0x70, 0x9c, 0xa3, 0x5a, 0x4b, 0xc5, - 0x91, 0x3e, 0x83, 0x49, 0xb7, 0x10, 0x59, 0x82, 0xff, 0x2d, 0x2b, 0x53, 0x14, 0x5c, 0x64, 0xa5, - 0x34, 0x96, 0xbc, 0x68, 0x17, 0xa2, 0xbf, 0x1d, 0x98, 0xd8, 0x2d, 0x02, 0x70, 0x63, 0x51, 0x5a, - 0x92, 0x2e, 0x09, 0x81, 0xb1, 0xa8, 0x0a, 0x34, 0x36, 0xbd, 0xc8, 0xd4, 0x84, 0xc1, 0x34, 0xa9, - 0x4a, 0x89, 0x3f, 0xe5, 0xdc, 0x5d, 0xba, 0x17, 0xfe, 0xea, 0xcc, 0x26, 0xc4, 0x5e, 0x77, 0xf0, - 0x6d, 0x29, 0x85, 0x8a, 0x7a, 0xd2, 0xe2, 0x1a, 0x4e, 0x76, 0x2f, 0xf4, 0x94, 0x3e, 0x5d, 0x2f, - 0xd2, 0x25, 0x39, 0x83, 0x07, 0x6d, 0x5c, 0x34, 0xfd, 0x98, 0xee, 0x70, 0x3d, 0x7a, 0xe9, 0xd0, - 0x4b, 0xf0, 0x86, 0xc4, 0xc8, 0x53, 0xf0, 0x05, 0xd6, 0x55, 0x23, 0x12, 0x5c, 0x67, 0xf7, 0x56, - 0x00, 0x7a, 0xe8, 0xfd, 0x3d, 0xfd, 0x02, 0xc1, 0x61, 0x54, 0xe4, 0x1c, 0xbc, 0x2e, 0xd0, 0x46, - 0x14, 0xb6, 0xe5, 0xd8, 0x00, 0x9f, 0x44, 0xa1, 0xed, 0x95, 0xf1, 0x66, 0xb0, 0xa7, 0x6b, 0x32, - 0x87, 0x69, 0x8b, 0xa2, 0xce, 0xaa, 0xd2, 0x3c, 0x9a, 0x17, 0xf5, 0x47, 0xfa, 0x16, 0x4e, 0x6f, - 0xcb, 0x44, 0x28, 0x2e, 0x23, 0xfc, 0xd1, 0x60, 0x2d, 0xc9, 0xa3, 0xad, 0x15, 0x7f, 0x35, 0x66, - 0x21, 0xaa, 0xce, 0xd0, 0x63, 0xf0, 0x78, 0x11, 0x67, 0x5d, 0x48, 0x5a, 0xfc, 0x24, 0xda, 0x02, - 0xf4, 0x39, 0xcc, 0x06, 0x9d, 0x9a, 0x57, 0x65, 0x8d, 0xe4, 0x09, 0x40, 0x92, 0xf1, 0xef, 0x28, - 0x4c, 0x87, 0x63, 0x3a, 0x76, 0x10, 0x7a, 0x07, 0xa7, 0x6f, 0xf0, 0xbf, 0x46, 0xef, 0x2b, 0x8d, - 0xfe, 0x51, 0xba, 0x82, 0xd9, 0xa0, 0x64, 0x87, 0xef, 0x6d, 0xeb, 0x1c, 0x6c, 0xbb, 0x2a, 0x00, - 0x42, 0x54, 0x1f, 0xbb, 0xcf, 0xa2, 0x1f, 0xdf, 0xee, 0x4e, 0x66, 0x6c, 0x3f, 0x8d, 0x45, 0xc0, - 0x0e, 0x6c, 0xd1, 0x23, 0xcd, 0xb7, 0xe3, 0xc8, 0x8c, 0xed, 0x5b, 0x58, 0x04, 0xec, 0x60, 0x13, - 0x7a, 0xf4, 0x75, 0x62, 0x7e, 0xe3, 0x8b, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xeb, 0x29, 0x21, - 0x4e, 0xac, 0x03, 0x00, 0x00, -} diff --git a/vendor/go.mozilla.org/sops/kms/keysource.go b/vendor/go.mozilla.org/sops/kms/keysource.go deleted file mode 100644 index 9a4461795..000000000 --- a/vendor/go.mozilla.org/sops/kms/keysource.go +++ /dev/null @@ -1,279 +0,0 @@ -/* -Package kms contains an implementation of the go.mozilla.org/sops.MasterKey interface that encrypts and decrypts the -data key using AWS KMS with the AWS Go SDK. -*/ -package kms //import "go.mozilla.org/sops/kms" - -import ( - "encoding/base64" - "fmt" - "os" - "regexp" - "strings" - "time" - - "go.mozilla.org/sops/logging" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/stscreds" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/kms" - "github.com/aws/aws-sdk-go/service/kms/kmsiface" - "github.com/aws/aws-sdk-go/service/sts" - "github.com/sirupsen/logrus" -) - -var log *logrus.Logger - -func init() { - log = logging.NewLogger("AWSKMS") -} - -// this needs to be a global var for unit tests to work (mockKMS redefines -// it in keysource_test.go) -var kmsSvc kmsiface.KMSAPI -var isMocked bool - -// MasterKey is a AWS KMS key used to encrypt and decrypt sops' data key. -type MasterKey struct { - Arn string - Role string - EncryptedKey string - CreationDate time.Time - EncryptionContext map[string]*string - AwsProfile string -} - -// EncryptedDataKey returns the encrypted data key this master key holds -func (key *MasterKey) EncryptedDataKey() []byte { - return []byte(key.EncryptedKey) -} - -// SetEncryptedDataKey sets the encrypted data key for this master key -func (key *MasterKey) SetEncryptedDataKey(enc []byte) { - key.EncryptedKey = string(enc) -} - -// Encrypt takes a sops data key, encrypts it with KMS and stores the result in the EncryptedKey field -func (key *MasterKey) Encrypt(dataKey []byte) error { - // isMocked is set by unit test to indicate that the KMS service - // has already been initialized. it's ugly, but it works. - if kmsSvc == nil || !isMocked { - sess, err := key.createSession() - if err != nil { - log.WithField("arn", key.Arn).Info("Encryption failed") - return fmt.Errorf("Failed to create session: %v", err) - } - kmsSvc = kms.New(sess) - } - out, err := kmsSvc.Encrypt(&kms.EncryptInput{Plaintext: dataKey, KeyId: &key.Arn, EncryptionContext: key.EncryptionContext}) - if err != nil { - log.WithField("arn", key.Arn).Info("Encryption failed") - return fmt.Errorf("Failed to call KMS encryption service: %v", err) - } - key.EncryptedKey = base64.StdEncoding.EncodeToString(out.CiphertextBlob) - log.WithField("arn", key.Arn).Info("Encryption succeeded") - return nil -} - -// EncryptIfNeeded encrypts the provided sops' data key and encrypts it if it hasn't been encrypted yet -func (key *MasterKey) EncryptIfNeeded(dataKey []byte) error { - if key.EncryptedKey == "" { - return key.Encrypt(dataKey) - } - return nil -} - -// Decrypt decrypts the EncryptedKey field with AWS KMS and returns the result. -func (key *MasterKey) Decrypt() ([]byte, error) { - k, err := base64.StdEncoding.DecodeString(key.EncryptedKey) - if err != nil { - log.WithField("arn", key.Arn).Info("Decryption failed") - return nil, fmt.Errorf("Error base64-decoding encrypted data key: %s", err) - } - // isMocked is set by unit test to indicate that the KMS service - // has already been initialized. it's ugly, but it works. - if kmsSvc == nil || !isMocked { - sess, err := key.createSession() - if err != nil { - log.WithField("arn", key.Arn).Info("Decryption failed") - return nil, fmt.Errorf("Error creating AWS session: %v", err) - } - kmsSvc = kms.New(sess) - } - decrypted, err := kmsSvc.Decrypt(&kms.DecryptInput{CiphertextBlob: k, EncryptionContext: key.EncryptionContext}) - if err != nil { - log.WithField("arn", key.Arn).Info("Decryption failed") - return nil, fmt.Errorf("Error decrypting key: %v", err) - } - log.WithField("arn", key.Arn).Info("Decryption succeeded") - return decrypted.Plaintext, nil -} - -// NeedsRotation returns whether the data key needs to be rotated or not. -func (key *MasterKey) NeedsRotation() bool { - return time.Since(key.CreationDate) > (time.Hour * 24 * 30 * 6) -} - -// ToString converts the key to a string representation -func (key *MasterKey) ToString() string { - return key.Arn -} - -// NewMasterKey creates a new MasterKey from an ARN, role and context, setting the creation date to the current date -func NewMasterKey(arn string, role string, context map[string]*string) *MasterKey { - return &MasterKey{ - Arn: arn, - Role: role, - EncryptionContext: context, - CreationDate: time.Now().UTC(), - } -} - -// NewMasterKeyFromArn takes an ARN string and returns a new MasterKey for that ARN -func NewMasterKeyFromArn(arn string, context map[string]*string, awsProfile string) *MasterKey { - k := &MasterKey{} - arn = strings.Replace(arn, " ", "", -1) - roleIndex := strings.Index(arn, "+arn:aws:iam::") - if roleIndex > 0 { - k.Arn = arn[:roleIndex] - k.Role = arn[roleIndex+1:] - } else { - k.Arn = arn - } - k.EncryptionContext = context - k.CreationDate = time.Now().UTC() - k.AwsProfile = awsProfile - return k -} - -// MasterKeysFromArnString takes a comma separated list of AWS KMS ARNs and returns a slice of new MasterKeys for those ARNs -func MasterKeysFromArnString(arn string, context map[string]*string, awsProfile string) []*MasterKey { - var keys []*MasterKey - if arn == "" { - return keys - } - for _, s := range strings.Split(arn, ",") { - keys = append(keys, NewMasterKeyFromArn(s, context, awsProfile)) - } - return keys -} - -func (key MasterKey) createStsSession(config aws.Config, sess *session.Session) (*session.Session, error) { - hostname, err := os.Hostname() - if err != nil { - return nil, err - } - stsService := sts.New(sess) - name := "sops@" + hostname - out, err := stsService.AssumeRole(&sts.AssumeRoleInput{ - RoleArn: &key.Role, RoleSessionName: &name}) - if err != nil { - return nil, fmt.Errorf("Failed to assume role %q: %v", key.Role, err) - } - config.Credentials = credentials.NewStaticCredentials(*out.Credentials.AccessKeyId, - *out.Credentials.SecretAccessKey, *out.Credentials.SessionToken) - sess, err = session.NewSession(&config) - if err != nil { - return nil, fmt.Errorf("Failed to create new aws session: %v", err) - } - return sess, nil -} - -func (key MasterKey) createSession() (*session.Session, error) { - re := regexp.MustCompile(`^arn:aws[\w-]*:kms:(.+):[0-9]+:(key|alias)/.+$`) - matches := re.FindStringSubmatch(key.Arn) - if matches == nil { - return nil, fmt.Errorf("No valid ARN found in %q", key.Arn) - } - - config := aws.Config{Region: aws.String(matches[1])} - - if key.AwsProfile != "" { - config.Credentials = credentials.NewSharedCredentials("", key.AwsProfile) - } - - opts := session.Options{ - Config: config, - AssumeRoleTokenProvider: stscreds.StdinTokenProvider, - } - sess, err := session.NewSessionWithOptions(opts) - if err != nil { - return nil, err - } - if key.Role != "" { - return key.createStsSession(config, sess) - } - return sess, nil -} - -// ToMap converts the MasterKey to a map for serialization purposes -func (key MasterKey) ToMap() map[string]interface{} { - out := make(map[string]interface{}) - out["arn"] = key.Arn - if key.Role != "" { - out["role"] = key.Role - } - out["created_at"] = key.CreationDate.UTC().Format(time.RFC3339) - out["enc"] = key.EncryptedKey - if key.EncryptionContext != nil { - outcontext := make(map[string]string) - for k, v := range key.EncryptionContext { - outcontext[k] = *v - } - out["context"] = outcontext - } - return out -} - -// ParseKMSContext takes either a KMS context map or a comma-separated list of KMS context key:value pairs and returns a map -func ParseKMSContext(in interface{}) map[string]*string { - nonStringValueWarning := "Encryption context contains a non-string value, context will not be used" - out := make(map[string]*string) - - switch in := in.(type) { - case map[string]interface{}: - if len(in) == 0 { - return nil - } - for k, v := range in { - value, ok := v.(string) - if !ok { - log.Warn(nonStringValueWarning) - return nil - } - out[k] = &value - } - case map[interface{}]interface{}: - if len(in) == 0 { - return nil - } - for k, v := range in { - key, ok := k.(string) - if !ok { - log.Warn(nonStringValueWarning) - return nil - } - value, ok := v.(string) - if !ok { - log.Warn(nonStringValueWarning) - return nil - } - out[key] = &value - } - case string: - if in == "" { - return nil - } - for _, kv := range strings.Split(in, ",") { - kv := strings.Split(kv, ":") - if len(kv) != 2 { - log.Warn(nonStringValueWarning) - return nil - } - out[kv[0]] = &kv[1] - } - } - return out -} diff --git a/vendor/go.mozilla.org/sops/make_download_page.sh b/vendor/go.mozilla.org/sops/make_download_page.sh deleted file mode 100644 index a3b7d23f3..000000000 --- a/vendor/go.mozilla.org/sops/make_download_page.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env bash -[ ! -d dist ] && mkdir dist -echo -e "\nSops download page>\n\n

    Sops download page

    \n

    go.mozilla.org/sops

    \n" > index.html -IFS=$'\n' -for dist in $(aws s3 ls s3://go.mozilla.org/sops/dist/ | grep -P "deb|rpm"); do - ts=$(echo $dist|awk '{print $1,$2}') - size=$(echo $dist|awk '{print $3}') - pkg=$(echo $dist|awk '{print $4}') - echo -e "" >> index.html -done -echo -e "
    $ts$size$pkg
    \n\n" >> index.html diff --git a/vendor/go.mozilla.org/sops/pgp/keysource.go b/vendor/go.mozilla.org/sops/pgp/keysource.go deleted file mode 100644 index 7ebd7da04..000000000 --- a/vendor/go.mozilla.org/sops/pgp/keysource.go +++ /dev/null @@ -1,363 +0,0 @@ -/* -Package pgp contains an implementation of the go.mozilla.org/sops.MasterKey interface that encrypts and decrypts the -data key by first trying with the golang.org/x/crypto/openpgp package and if that fails, by calling the "gpg" binary. -*/ -package pgp //import "go.mozilla.org/sops/pgp" - -import ( - "bytes" - "encoding/hex" - "fmt" - "io/ioutil" - "net/http" - "os" - "os/user" - "path" - "strings" - "time" - - "os/exec" - - "github.com/howeyc/gopass" - "github.com/sirupsen/logrus" - gpgagent "go.mozilla.org/gopgagent" - "go.mozilla.org/sops/logging" - "golang.org/x/crypto/openpgp" - "golang.org/x/crypto/openpgp/armor" -) - -var log *logrus.Logger - -func init() { - log = logging.NewLogger("PGP") -} - -// MasterKey is a PGP key used to securely store sops' data key by encrypting it and decrypting it -type MasterKey struct { - Fingerprint string - EncryptedKey string - CreationDate time.Time -} - -// EncryptedDataKey returns the encrypted data key this master key holds -func (key *MasterKey) EncryptedDataKey() []byte { - return []byte(key.EncryptedKey) -} - -// SetEncryptedDataKey sets the encrypted data key for this master key -func (key *MasterKey) SetEncryptedDataKey(enc []byte) { - key.EncryptedKey = string(enc) -} - -func gpgBinary() string { - binary := "gpg" - if envBinary := os.Getenv("SOPS_GPG_EXEC"); envBinary != "" { - binary = envBinary - } - return binary -} - -func (key *MasterKey) encryptWithGPGBinary(dataKey []byte) error { - fingerprint := key.Fingerprint - if offset := len(fingerprint) - 16; offset > 0 { - fingerprint = fingerprint[offset:] - } - args := []string{ - "--no-default-recipient", - "--yes", - "--encrypt", - "-a", - "-r", - key.Fingerprint, - "--trusted-key", - fingerprint, - "--no-encrypt-to", - } - cmd := exec.Command(gpgBinary(), args...) - var stdout, stderr bytes.Buffer - cmd.Stdin = bytes.NewReader(dataKey) - cmd.Stdout = &stdout - cmd.Stderr = &stderr - err := cmd.Run() - if err != nil { - return err - } - key.EncryptedKey = stdout.String() - return nil -} - -func getKeyFromKeyServer(keyserver string, fingerprint string) (openpgp.Entity, error) { - url := fmt.Sprintf("https://%s/pks/lookup?op=get&options=mr&search=0x%s", keyserver, fingerprint) - resp, err := http.Get(url) - if err != nil { - return openpgp.Entity{}, fmt.Errorf("error getting key from keyserver: %s", err) - } - defer resp.Body.Close() - if resp.StatusCode != 200 { - return openpgp.Entity{}, fmt.Errorf("keyserver returned non-200 status code %s", resp.Status) - } - ents, err := openpgp.ReadArmoredKeyRing(resp.Body) - if err != nil { - return openpgp.Entity{}, fmt.Errorf("could not read entities: %s", err) - } - return *ents[0], nil -} - -func gpgKeyServer() string { - keyServer := "gpg.mozilla.org" - if envKeyServer := os.Getenv("SOPS_GPG_KEYSERVER"); envKeyServer != "" { - keyServer = envKeyServer - } - return keyServer -} - -func (key *MasterKey) getPubKey() (openpgp.Entity, error) { - ring, err := key.pubRing() - if err == nil { - fingerprints := key.fingerprintMap(ring) - entity, ok := fingerprints[key.Fingerprint] - if ok { - return entity, nil - } - } - keyServer := gpgKeyServer() - entity, err := getKeyFromKeyServer(keyServer, key.Fingerprint) - if err != nil { - return openpgp.Entity{}, - fmt.Errorf("key with fingerprint %s is not available "+ - "in keyring and could not be retrieved from keyserver", key.Fingerprint) - } - return entity, nil -} - -func (key *MasterKey) encryptWithCryptoOpenPGP(dataKey []byte) error { - entity, err := key.getPubKey() - if err != nil { - return err - } - encbuf := new(bytes.Buffer) - armorbuf, err := armor.Encode(encbuf, "PGP MESSAGE", nil) - if err != nil { - return err - } - plaintextbuf, err := openpgp.Encrypt(armorbuf, []*openpgp.Entity{&entity}, nil, &openpgp.FileHints{IsBinary: true}, nil) - if err != nil { - return err - } - _, err = plaintextbuf.Write(dataKey) - if err != nil { - return err - } - err = plaintextbuf.Close() - if err != nil { - return err - } - err = armorbuf.Close() - if err != nil { - return err - } - bytes, err := ioutil.ReadAll(encbuf) - if err != nil { - return err - } - key.EncryptedKey = string(bytes) - return nil -} - -// Encrypt encrypts the data key with the PGP key with the same fingerprint as the MasterKey. It looks for PGP public keys in $PGPHOME/pubring.gpg. -func (key *MasterKey) Encrypt(dataKey []byte) error { - openpgpErr := key.encryptWithCryptoOpenPGP(dataKey) - if openpgpErr == nil { - log.WithField("fingerprint", key.Fingerprint).Info("Encryption succeeded") - return nil - } - binaryErr := key.encryptWithGPGBinary(dataKey) - if binaryErr == nil { - log.WithField("fingerprint", key.Fingerprint).Info("Encryption succeeded") - return nil - } - log.WithField("fingerprint", key.Fingerprint).Info("Encryption failed") - return fmt.Errorf( - `could not encrypt data key with PGP key: golang.org/x/crypto/openpgp error: %v; GPG binary error: %v`, - openpgpErr, binaryErr) -} - -// EncryptIfNeeded encrypts the data key with PGP only if it's needed, that is, if it hasn't been encrypted already -func (key *MasterKey) EncryptIfNeeded(dataKey []byte) error { - if key.EncryptedKey == "" { - return key.Encrypt(dataKey) - } - return nil -} - -func (key *MasterKey) decryptWithGPGBinary() ([]byte, error) { - args := []string{ - "--use-agent", - "-d", - } - cmd := exec.Command(gpgBinary(), args...) - var stdout, stderr bytes.Buffer - cmd.Stdin = strings.NewReader(key.EncryptedKey) - cmd.Stdout = &stdout - cmd.Stderr = &stderr - err := cmd.Run() - if err != nil { - return nil, err - } - return stdout.Bytes(), nil -} - -func (key *MasterKey) decryptWithCryptoOpenpgp() ([]byte, error) { - ring, err := key.secRing() - if err != nil { - return nil, fmt.Errorf("Could not load secring: %s", err) - } - block, err := armor.Decode(strings.NewReader(key.EncryptedKey)) - if err != nil { - return nil, fmt.Errorf("Armor decoding failed: %s", err) - } - md, err := openpgp.ReadMessage(block.Body, ring, key.passphrasePrompt, nil) - if err != nil { - return nil, fmt.Errorf("Reading PGP message failed: %s", err) - } - if b, err := ioutil.ReadAll(md.UnverifiedBody); err == nil { - return b, nil - } - return nil, fmt.Errorf("The key could not be decrypted with any of the PGP entries") -} - -// Decrypt uses PGP to obtain the data key from the EncryptedKey store in the MasterKey and returns it -func (key *MasterKey) Decrypt() ([]byte, error) { - dataKey, openpgpErr := key.decryptWithCryptoOpenpgp() - if openpgpErr == nil { - log.WithField("fingerprint", key.Fingerprint).Info("Decryption succeeded") - return dataKey, nil - } - dataKey, binaryErr := key.decryptWithGPGBinary() - if binaryErr == nil { - log.WithField("fingerprint", key.Fingerprint).Info("Decryption succeeded") - return dataKey, nil - } - log.WithField("fingerprint", key.Fingerprint).Info("Decryption failed") - return nil, fmt.Errorf( - `could not decrypt data key with PGP key: golang.org/x/crypto/openpgp error: %v; GPG binary error: %v`, - openpgpErr, binaryErr) -} - -// NeedsRotation returns whether the data key needs to be rotated or not -func (key *MasterKey) NeedsRotation() bool { - return time.Since(key.CreationDate).Hours() > 24*30*6 -} - -// ToString returns the string representation of the key, i.e. its fingerprint -func (key *MasterKey) ToString() string { - return key.Fingerprint -} - -func (key *MasterKey) gpgHome() string { - dir := os.Getenv("GNUPGHOME") - if dir == "" { - usr, err := user.Current() - if err != nil { - return path.Join(os.Getenv("HOME"), "/.gnupg") - } - return path.Join(usr.HomeDir, ".gnupg") - } - return dir -} - -// NewMasterKeyFromFingerprint takes a PGP fingerprint and returns a new MasterKey with that fingerprint -func NewMasterKeyFromFingerprint(fingerprint string) *MasterKey { - return &MasterKey{ - Fingerprint: strings.Replace(fingerprint, " ", "", -1), - CreationDate: time.Now().UTC(), - } -} - -// MasterKeysFromFingerprintString takes a comma separated list of PGP fingerprints and returns a slice of new MasterKeys with those fingerprints -func MasterKeysFromFingerprintString(fingerprint string) []*MasterKey { - var keys []*MasterKey - if fingerprint == "" { - return keys - } - for _, s := range strings.Split(fingerprint, ",") { - keys = append(keys, NewMasterKeyFromFingerprint(s)) - } - return keys -} - -func (key *MasterKey) loadRing(path string) (openpgp.EntityList, error) { - f, err := os.Open(path) - if err != nil { - return openpgp.EntityList{}, err - } - defer f.Close() - keyring, err := openpgp.ReadKeyRing(f) - if err != nil { - return keyring, err - } - return keyring, nil -} - -func (key *MasterKey) secRing() (openpgp.EntityList, error) { - return key.loadRing(key.gpgHome() + "/secring.gpg") -} - -func (key *MasterKey) pubRing() (openpgp.EntityList, error) { - return key.loadRing(key.gpgHome() + "/pubring.gpg") -} - -func (key *MasterKey) fingerprintMap(ring openpgp.EntityList) map[string]openpgp.Entity { - fps := make(map[string]openpgp.Entity) - for _, entity := range ring { - fp := strings.ToUpper(hex.EncodeToString(entity.PrimaryKey.Fingerprint[:])) - if entity != nil { - fps[fp] = *entity - } - } - return fps -} - -func (key *MasterKey) passphrasePrompt(keys []openpgp.Key, symmetric bool) ([]byte, error) { - conn, err := gpgagent.NewConn() - if err == gpgagent.ErrNoAgent { - log.Infof("gpg-agent not found, continuing with manual passphrase " + - "input...") - fmt.Print("Enter PGP key passphrase: ") - pass, err := gopass.GetPasswd() - if err != nil { - return nil, err - } - for _, k := range keys { - k.PrivateKey.Decrypt(pass) - } - return pass, err - } - if err != nil { - return nil, fmt.Errorf("Could not establish connection with gpg-agent: %s", err) - } - defer conn.Close() - for _, k := range keys { - req := gpgagent.PassphraseRequest{ - CacheKey: k.PublicKey.KeyIdShortString(), - Prompt: "Passphrase", - Desc: fmt.Sprintf("Unlock key %s to decrypt sops's key", k.PublicKey.KeyIdShortString()), - } - pass, err := conn.GetPassphrase(&req) - if err != nil { - return nil, fmt.Errorf("gpg-agent passphrase request errored: %s", err) - } - k.PrivateKey.Decrypt([]byte(pass)) - return []byte(pass), nil - } - return nil, fmt.Errorf("No key to unlock") -} - -// ToMap converts the MasterKey into a map for serialization purposes -func (key MasterKey) ToMap() map[string]interface{} { - out := make(map[string]interface{}) - out["fp"] = key.Fingerprint - out["created_at"] = key.CreationDate.UTC().Format(time.RFC3339) - out["enc"] = key.EncryptedKey - return out -} diff --git a/vendor/go.mozilla.org/sops/stores/yaml/store.go b/vendor/go.mozilla.org/sops/stores/yaml/store.go deleted file mode 100644 index c897f537e..000000000 --- a/vendor/go.mozilla.org/sops/stores/yaml/store.go +++ /dev/null @@ -1,207 +0,0 @@ -package yaml //import "go.mozilla.org/sops/stores/yaml" - -import ( - "fmt" - - "github.com/mozilla-services/yaml" - "go.mozilla.org/sops" - "go.mozilla.org/sops/stores" -) - -// Store handles storage of YAML data -type Store struct { -} - -func (store Store) mapSliceToTreeBranch(in yaml.MapSlice) sops.TreeBranch { - branch := make(sops.TreeBranch, 0) - for _, item := range in { - if comment, ok := item.Key.(yaml.Comment); ok { - // Convert the yaml comment to a generic sops comment - branch = append(branch, sops.TreeItem{ - Key: sops.Comment{ - Value: comment.Value, - }, - Value: nil, - }) - } else { - branch = append(branch, sops.TreeItem{ - Key: item.Key, - Value: store.yamlValueToTreeValue(item.Value), - }) - } - } - return branch -} - -func (store Store) yamlValueToTreeValue(in interface{}) interface{} { - switch in := in.(type) { - case map[interface{}]interface{}: - return store.yamlMapToTreeBranch(in) - case yaml.MapSlice: - return store.mapSliceToTreeBranch(in) - case []interface{}: - return store.yamlSliceToTreeValue(in) - case yaml.Comment: - return sops.Comment{Value: in.Value} - default: - return in - } -} - -func (store *Store) yamlSliceToTreeValue(in []interface{}) []interface{} { - for i, v := range in { - in[i] = store.yamlValueToTreeValue(v) - } - return in -} - -func (store *Store) yamlMapToTreeBranch(in map[interface{}]interface{}) sops.TreeBranch { - branch := make(sops.TreeBranch, 0) - for k, v := range in { - branch = append(branch, sops.TreeItem{ - Key: k.(string), - Value: store.yamlValueToTreeValue(v), - }) - } - return branch -} - -func (store Store) treeValueToYamlValue(in interface{}) interface{} { - switch in := in.(type) { - case sops.TreeBranch: - return store.treeBranchToYamlMap(in) - case sops.Comment: - return yaml.Comment{in.Value} - case []interface{}: - var out []interface{} - for _, v := range in { - out = append(out, store.treeValueToYamlValue(v)) - } - return out - default: - return in - } -} - -func (store Store) treeBranchToYamlMap(in sops.TreeBranch) yaml.MapSlice { - branch := make(yaml.MapSlice, 0) - for _, item := range in { - if comment, ok := item.Key.(sops.Comment); ok { - branch = append(branch, yaml.MapItem{ - Key: store.treeValueToYamlValue(comment), - Value: nil, - }) - } else { - branch = append(branch, yaml.MapItem{ - Key: item.Key, - Value: store.treeValueToYamlValue(item.Value), - }) - } - } - return branch -} - -// LoadEncryptedFile loads the contents of an encrypted yaml file onto a -// sops.Tree runtime object -func (store *Store) LoadEncryptedFile(in []byte) (sops.Tree, error) { - var data []yaml.MapSlice - if err := (yaml.CommentUnmarshaler{}).UnmarshalDocuments(in, &data); err != nil { - return sops.Tree{}, fmt.Errorf("Error unmarshaling input YAML: %s", err) - } - // Because we don't know what fields the input file will have, we have to - // load the file in two steps. - // First, we load the file's metadata, the structure of which is known. - metadataHolder := stores.SopsFile{} - err := yaml.Unmarshal(in, &metadataHolder) - if err != nil { - return sops.Tree{}, fmt.Errorf("Error unmarshalling input yaml: %s", err) - } - if metadataHolder.Metadata == nil { - return sops.Tree{}, sops.MetadataNotFound - } - metadata, err := metadataHolder.Metadata.ToInternal() - if err != nil { - return sops.Tree{}, err - } - var branches sops.TreeBranches - for _, doc := range data { - for i, item := range doc { - if item.Key == "sops" { // Erase - doc = append(doc[:i], doc[i+1:]...) - } - } - branches = append(branches, store.mapSliceToTreeBranch(doc)) - } - return sops.Tree{ - Branches: branches, - Metadata: metadata, - }, nil -} - -// LoadPlainFile loads the contents of a plaintext yaml file onto a -// sops.Tree runtime obejct -func (store *Store) LoadPlainFile(in []byte) (sops.TreeBranches, error) { - var data []yaml.MapSlice - if err := (yaml.CommentUnmarshaler{}).UnmarshalDocuments(in, &data); err != nil { - return nil, fmt.Errorf("Error unmarshaling input YAML: %s", err) - } - - var branches sops.TreeBranches - for _, doc := range data { - branches = append(branches, store.mapSliceToTreeBranch(doc)) - } - return branches, nil -} - -// EmitEncryptedFile returns the encrypted bytes of the yaml file corresponding to a -// sops.Tree runtime object -func (store *Store) EmitEncryptedFile(in sops.Tree) ([]byte, error) { - out := []byte{} - for i, branch := range in.Branches { - if i > 0 { - out = append(out, "---\n"...) - } - yamlMap := store.treeBranchToYamlMap(branch) - yamlMap = append(yamlMap, yaml.MapItem{Key: "sops", Value: stores.MetadataFromInternal(in.Metadata)}) - tout, err := (&yaml.YAMLMarshaler{Indent: 4}).Marshal(yamlMap) - if err != nil { - return nil, fmt.Errorf("Error marshaling to yaml: %s", err) - } - out = append(out, tout...) - } - return out, nil -} - -// EmitPlainFile returns the plaintext bytes of the yaml file corresponding to a -// sops.TreeBranches runtime object -func (store *Store) EmitPlainFile(branches sops.TreeBranches) ([]byte, error) { - var out []byte - for i, branch := range branches { - if i > 0 { - out = append(out, "---\n"...) - } - yamlMap := store.treeBranchToYamlMap(branch) - tmpout, err := (&yaml.YAMLMarshaler{Indent: 4}).Marshal(yamlMap) - if err != nil { - return nil, fmt.Errorf("Error marshaling to yaml: %s", err) - } - out = append(out[:], tmpout[:]...) - } - return out, nil -} - -// EmitValue returns bytes corresponding to a single encoded value -// in a generic interface{} object -func (store *Store) EmitValue(v interface{}) ([]byte, error) { - v = store.treeValueToYamlValue(v) - return (&yaml.YAMLMarshaler{Indent: 4}).Marshal(v) -} - -// EmitExample returns the bytes corresponding to an example complex tree -func (store *Store) EmitExample() []byte { - bytes, err := store.EmitPlainFile(stores.ExampleComplexTree.Branches) - if err != nil { - panic(err) - } - return bytes -} diff --git a/vendor/go.opencensus.io/.travis.yml b/vendor/go.opencensus.io/.travis.yml deleted file mode 100644 index bd6b66ee8..000000000 --- a/vendor/go.opencensus.io/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ -language: go - -go_import_path: go.opencensus.io - -go: - - 1.11.x - -env: - global: - GO111MODULE=on - -before_script: - - make install-tools - -script: - - make travis-ci - - go run internal/check/version.go # TODO move this to makefile diff --git a/vendor/go.opencensus.io/Gopkg.lock b/vendor/go.opencensus.io/Gopkg.lock deleted file mode 100644 index 3be12ac8f..000000000 --- a/vendor/go.opencensus.io/Gopkg.lock +++ /dev/null @@ -1,231 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - branch = "master" - digest = "1:eee9386329f4fcdf8d6c0def0c9771b634bdd5ba460d888aa98c17d59b37a76c" - name = "git.apache.org/thrift.git" - packages = ["lib/go/thrift"] - pruneopts = "UT" - revision = "6e67faa92827ece022380b211c2caaadd6145bf5" - source = "github.com/apache/thrift" - -[[projects]] - branch = "master" - digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d" - name = "github.com/beorn7/perks" - packages = ["quantile"] - pruneopts = "UT" - revision = "3a771d992973f24aa725d07868b467d1ddfceafb" - -[[projects]] - digest = "1:4c0989ca0bcd10799064318923b9bc2db6b4d6338dd75f3f2d86c3511aaaf5cf" - name = "github.com/golang/protobuf" - packages = [ - "proto", - "ptypes", - "ptypes/any", - "ptypes/duration", - "ptypes/timestamp", - ] - pruneopts = "UT" - revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" - version = "v1.2.0" - -[[projects]] - digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc" - name = "github.com/matttproud/golang_protobuf_extensions" - packages = ["pbutil"] - pruneopts = "UT" - revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" - version = "v1.0.1" - -[[projects]] - digest = "1:824c8f3aa4c5f23928fa84ebbd5ed2e9443b3f0cb958a40c1f2fbed5cf5e64b1" - name = "github.com/openzipkin/zipkin-go" - packages = [ - ".", - "idgenerator", - "model", - "propagation", - "reporter", - "reporter/http", - ] - pruneopts = "UT" - revision = "d455a5674050831c1e187644faa4046d653433c2" - version = "v0.1.1" - -[[projects]] - digest = "1:d14a5f4bfecf017cb780bdde1b6483e5deb87e12c332544d2c430eda58734bcb" - name = "github.com/prometheus/client_golang" - packages = [ - "prometheus", - "prometheus/promhttp", - ] - pruneopts = "UT" - revision = "c5b7fccd204277076155f10851dad72b76a49317" - version = "v0.8.0" - -[[projects]] - branch = "master" - digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" - name = "github.com/prometheus/client_model" - packages = ["go"] - pruneopts = "UT" - revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" - -[[projects]] - branch = "master" - digest = "1:63b68062b8968092eb86bedc4e68894bd096ea6b24920faca8b9dcf451f54bb5" - name = "github.com/prometheus/common" - packages = [ - "expfmt", - "internal/bitbucket.org/ww/goautoneg", - "model", - ] - pruneopts = "UT" - revision = "c7de2306084e37d54b8be01f3541a8464345e9a5" - -[[projects]] - branch = "master" - digest = "1:8c49953a1414305f2ff5465147ee576dd705487c35b15918fcd4efdc0cb7a290" - name = "github.com/prometheus/procfs" - packages = [ - ".", - "internal/util", - "nfs", - "xfs", - ] - pruneopts = "UT" - revision = "05ee40e3a273f7245e8777337fc7b46e533a9a92" - -[[projects]] - branch = "master" - digest = "1:deafe4ab271911fec7de5b693d7faae3f38796d9eb8622e2b9e7df42bb3dfea9" - name = "golang.org/x/net" - packages = [ - "context", - "http/httpguts", - "http2", - "http2/hpack", - "idna", - "internal/timeseries", - "trace", - ] - pruneopts = "UT" - revision = "922f4815f713f213882e8ef45e0d315b164d705c" - -[[projects]] - branch = "master" - digest = "1:e0140c0c868c6e0f01c0380865194592c011fe521d6e12d78bfd33e756fe018a" - name = "golang.org/x/sync" - packages = ["semaphore"] - pruneopts = "UT" - revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca" - -[[projects]] - branch = "master" - digest = "1:a3f00ac457c955fe86a41e1495e8f4c54cb5399d609374c5cc26aa7d72e542c8" - name = "golang.org/x/sys" - packages = ["unix"] - pruneopts = "UT" - revision = "3b58ed4ad3395d483fc92d5d14123ce2c3581fec" - -[[projects]] - digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18" - name = "golang.org/x/text" - packages = [ - "collate", - "collate/build", - "internal/colltab", - "internal/gen", - "internal/tag", - "internal/triegen", - "internal/ucd", - "language", - "secure/bidirule", - "transform", - "unicode/bidi", - "unicode/cldr", - "unicode/norm", - "unicode/rangetable", - ] - pruneopts = "UT" - revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" - version = "v0.3.0" - -[[projects]] - branch = "master" - digest = "1:c0c17c94fe8bc1ab34e7f586a4a8b788c5e1f4f9f750ff23395b8b2f5a523530" - name = "google.golang.org/api" - packages = ["support/bundler"] - pruneopts = "UT" - revision = "e21acd801f91da814261b938941d193bb036441a" - -[[projects]] - branch = "master" - digest = "1:077c1c599507b3b3e9156d17d36e1e61928ee9b53a5b420f10f28ebd4a0b275c" - name = "google.golang.org/genproto" - packages = ["googleapis/rpc/status"] - pruneopts = "UT" - revision = "c66870c02cf823ceb633bcd05be3c7cda29976f4" - -[[projects]] - digest = "1:3dd7996ce6bf52dec6a2f69fa43e7c4cefea1d4dfa3c8ab7a5f8a9f7434e239d" - name = "google.golang.org/grpc" - packages = [ - ".", - "balancer", - "balancer/base", - "balancer/roundrobin", - "codes", - "connectivity", - "credentials", - "encoding", - "encoding/proto", - "grpclog", - "internal", - "internal/backoff", - "internal/channelz", - "internal/envconfig", - "internal/grpcrand", - "internal/transport", - "keepalive", - "metadata", - "naming", - "peer", - "resolver", - "resolver/dns", - "resolver/passthrough", - "stats", - "status", - "tap", - ] - pruneopts = "UT" - revision = "32fb0ac620c32ba40a4626ddf94d90d12cce3455" - version = "v1.14.0" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "git.apache.org/thrift.git/lib/go/thrift", - "github.com/golang/protobuf/proto", - "github.com/openzipkin/zipkin-go", - "github.com/openzipkin/zipkin-go/model", - "github.com/openzipkin/zipkin-go/reporter", - "github.com/openzipkin/zipkin-go/reporter/http", - "github.com/prometheus/client_golang/prometheus", - "github.com/prometheus/client_golang/prometheus/promhttp", - "golang.org/x/net/context", - "golang.org/x/net/http2", - "google.golang.org/api/support/bundler", - "google.golang.org/grpc", - "google.golang.org/grpc/codes", - "google.golang.org/grpc/grpclog", - "google.golang.org/grpc/metadata", - "google.golang.org/grpc/stats", - "google.golang.org/grpc/status", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/go.opencensus.io/Gopkg.toml b/vendor/go.opencensus.io/Gopkg.toml deleted file mode 100644 index a9f3cd68e..000000000 --- a/vendor/go.opencensus.io/Gopkg.toml +++ /dev/null @@ -1,36 +0,0 @@ -# For v0.x.y dependencies, prefer adding a constraints of the form: version=">= 0.x.y" -# to avoid locking to a particular minor version which can cause dep to not be -# able to find a satisfying dependency graph. - -[[constraint]] - branch = "master" - name = "git.apache.org/thrift.git" - source = "github.com/apache/thrift" - -[[constraint]] - name = "github.com/golang/protobuf" - version = "1.0.0" - -[[constraint]] - name = "github.com/openzipkin/zipkin-go" - version = ">=0.1.0" - -[[constraint]] - name = "github.com/prometheus/client_golang" - version = ">=0.8.0" - -[[constraint]] - branch = "master" - name = "golang.org/x/net" - -[[constraint]] - branch = "master" - name = "google.golang.org/api" - -[[constraint]] - name = "google.golang.org/grpc" - version = "1.11.3" - -[prune] - go-tests = true - unused-packages = true diff --git a/vendor/go.opencensus.io/Makefile b/vendor/go.opencensus.io/Makefile index 457866cb1..d896edc99 100644 --- a/vendor/go.opencensus.io/Makefile +++ b/vendor/go.opencensus.io/Makefile @@ -8,7 +8,7 @@ ALL_PKGS := $(shell go list $(sort $(dir $(ALL_SRC)))) GOTEST_OPT?=-v -race -timeout 30s GOTEST_OPT_WITH_COVERAGE = $(GOTEST_OPT) -coverprofile=coverage.txt -covermode=atomic GOTEST=go test -GOFMT=gofmt +GOIMPORTS=goimports GOLINT=golint GOVET=go vet EMBEDMD=embedmd @@ -17,14 +17,14 @@ TRACE_ID_LINT_EXCEPTION="type name will be used as trace.TraceID by other packag TRACE_OPTION_LINT_EXCEPTION="type name will be used as trace.TraceOptions by other packages" README_FILES := $(shell find . -name '*README.md' | sort | tr '\n' ' ') -.DEFAULT_GOAL := fmt-lint-vet-embedmd-test +.DEFAULT_GOAL := imports-lint-vet-embedmd-test -.PHONY: fmt-lint-vet-embedmd-test -fmt-lint-vet-embedmd-test: fmt lint vet embedmd test +.PHONY: imports-lint-vet-embedmd-test +imports-lint-vet-embedmd-test: imports lint vet embedmd test # TODO enable test-with-coverage in tavis .PHONY: travis-ci -travis-ci: fmt lint vet embedmd test test-386 +travis-ci: imports lint vet embedmd test test-386 all-pkgs: @echo $(ALL_PKGS) | tr ' ' '\n' | sort @@ -44,15 +44,15 @@ test-386: test-with-coverage: $(GOTEST) $(GOTEST_OPT_WITH_COVERAGE) $(ALL_PKGS) -.PHONY: fmt -fmt: - @FMTOUT=`$(GOFMT) -s -l $(ALL_SRC) 2>&1`; \ - if [ "$$FMTOUT" ]; then \ - echo "$(GOFMT) FAILED => gofmt the following files:\n"; \ - echo "$$FMTOUT\n"; \ +.PHONY: imports +imports: + @IMPORTSOUT=`$(GOIMPORTS) -l $(ALL_SRC) 2>&1`; \ + if [ "$$IMPORTSOUT" ]; then \ + echo "$(GOIMPORTS) FAILED => goimports the following files:\n"; \ + echo "$$IMPORTSOUT\n"; \ exit 1; \ else \ - echo "Fmt finished successfully"; \ + echo "Imports finished successfully"; \ fi .PHONY: lint @@ -91,6 +91,7 @@ embedmd: .PHONY: install-tools install-tools: - go get -u golang.org/x/tools/cmd/cover - go get -u golang.org/x/lint/golint - go get -u github.com/rakyll/embedmd + go install golang.org/x/lint/golint@latest + go install golang.org/x/tools/cmd/cover@latest + go install golang.org/x/tools/cmd/goimports@latest + go install github.com/rakyll/embedmd@latest diff --git a/vendor/go.opencensus.io/README.md b/vendor/go.opencensus.io/README.md index a8cd09eaf..1d7e83711 100644 --- a/vendor/go.opencensus.io/README.md +++ b/vendor/go.opencensus.io/README.md @@ -9,6 +9,8 @@ OpenCensus Go is a Go implementation of OpenCensus, a toolkit for collecting application performance and behavior monitoring data. Currently it consists of three major components: tags, stats and tracing. +#### OpenCensus and OpenTracing have merged to form OpenTelemetry, which serves as the next major version of OpenCensus and OpenTracing. OpenTelemetry will offer backwards compatibility with existing OpenCensus integrations, and we will continue to make security patches to existing OpenCensus libraries for two years. Read more about the merger [here](https://medium.com/opentracing/a-roadmap-to-convergence-b074e5815289). + ## Installation ``` @@ -57,6 +59,7 @@ can implement their own exporters by implementing the exporter interfaces * [Datadog][exporter-datadog] for stats and traces * [Graphite][exporter-graphite] for stats * [Honeycomb][exporter-honeycomb] for traces +* [New Relic][exporter-newrelic] for stats and traces ## Overview @@ -261,3 +264,4 @@ release in which the functionality was marked *Deprecated*. [exporter-datadog]: https://github.com/DataDog/opencensus-go-exporter-datadog [exporter-graphite]: https://github.com/census-ecosystem/opencensus-go-exporter-graphite [exporter-honeycomb]: https://github.com/honeycombio/opencensus-exporter +[exporter-newrelic]: https://github.com/newrelic/newrelic-opencensus-exporter-go diff --git a/vendor/go.opencensus.io/appveyor.yml b/vendor/go.opencensus.io/appveyor.yml index 12bd7c4c7..d08f0edaf 100644 --- a/vendor/go.opencensus.io/appveyor.yml +++ b/vendor/go.opencensus.io/appveyor.yml @@ -6,13 +6,12 @@ clone_folder: c:\gopath\src\go.opencensus.io environment: GOPATH: 'c:\gopath' - GOVERSION: '1.11' GO111MODULE: 'on' CGO_ENABLED: '0' # See: https://github.com/appveyor/ci/issues/2613 -install: - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - choco upgrade golang --version 1.11.5 # Temporary fix because of a go.sum bug in 1.11 +stack: go 1.11 + +before_test: - go version - go env diff --git a/vendor/go.opencensus.io/opencensus.go b/vendor/go.opencensus.io/opencensus.go index e5e4b4368..11e31f421 100644 --- a/vendor/go.opencensus.io/opencensus.go +++ b/vendor/go.opencensus.io/opencensus.go @@ -17,5 +17,5 @@ package opencensus // import "go.opencensus.io" // Version is the current release version of OpenCensus in use. func Version() string { - return "0.23.0" + return "0.24.0" } diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client.go b/vendor/go.opencensus.io/plugin/ocgrpc/client.go new file mode 100644 index 000000000..2063b6f76 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client.go @@ -0,0 +1,56 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocgrpc + +import ( + "context" + + "go.opencensus.io/trace" + "google.golang.org/grpc/stats" +) + +// ClientHandler implements a gRPC stats.Handler for recording OpenCensus stats and +// traces. Use with gRPC clients only. +type ClientHandler struct { + // StartOptions allows configuring the StartOptions used to create new spans. + // + // StartOptions.SpanKind will always be set to trace.SpanKindClient + // for spans started by this handler. + StartOptions trace.StartOptions +} + +// HandleConn exists to satisfy gRPC stats.Handler. +func (c *ClientHandler) HandleConn(ctx context.Context, cs stats.ConnStats) { + // no-op +} + +// TagConn exists to satisfy gRPC stats.Handler. +func (c *ClientHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context { + // no-op + return ctx +} + +// HandleRPC implements per-RPC tracing and stats instrumentation. +func (c *ClientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { + traceHandleRPC(ctx, rs) + statsHandleRPC(ctx, rs) +} + +// TagRPC implements per-RPC context management. +func (c *ClientHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { + ctx = c.traceTagRPC(ctx, rti) + ctx = c.statsTagRPC(ctx, rti) + return ctx +} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go new file mode 100644 index 000000000..fb3c19d6b --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go @@ -0,0 +1,118 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ocgrpc + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +// The following variables are measures are recorded by ClientHandler: +var ( + ClientSentMessagesPerRPC = stats.Int64("grpc.io/client/sent_messages_per_rpc", "Number of messages sent in the RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) + ClientSentBytesPerRPC = stats.Int64("grpc.io/client/sent_bytes_per_rpc", "Total bytes sent across all request messages per RPC.", stats.UnitBytes) + ClientReceivedMessagesPerRPC = stats.Int64("grpc.io/client/received_messages_per_rpc", "Number of response messages received per RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) + ClientReceivedBytesPerRPC = stats.Int64("grpc.io/client/received_bytes_per_rpc", "Total bytes received across all response messages per RPC.", stats.UnitBytes) + ClientRoundtripLatency = stats.Float64("grpc.io/client/roundtrip_latency", "Time between first byte of request sent to last byte of response received, or terminal error.", stats.UnitMilliseconds) + ClientStartedRPCs = stats.Int64("grpc.io/client/started_rpcs", "Number of started client RPCs.", stats.UnitDimensionless) + ClientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds) +) + +// Predefined views may be registered to collect data for the above measures. +// As always, you may also define your own custom views over measures collected by this +// package. These are declared as a convenience only; none are registered by +// default. +var ( + ClientSentBytesPerRPCView = &view.View{ + Measure: ClientSentBytesPerRPC, + Name: "grpc.io/client/sent_bytes_per_rpc", + Description: "Distribution of bytes sent per RPC, by method.", + TagKeys: []tag.Key{KeyClientMethod}, + Aggregation: DefaultBytesDistribution, + } + + ClientReceivedBytesPerRPCView = &view.View{ + Measure: ClientReceivedBytesPerRPC, + Name: "grpc.io/client/received_bytes_per_rpc", + Description: "Distribution of bytes received per RPC, by method.", + TagKeys: []tag.Key{KeyClientMethod}, + Aggregation: DefaultBytesDistribution, + } + + ClientRoundtripLatencyView = &view.View{ + Measure: ClientRoundtripLatency, + Name: "grpc.io/client/roundtrip_latency", + Description: "Distribution of round-trip latency, by method.", + TagKeys: []tag.Key{KeyClientMethod}, + Aggregation: DefaultMillisecondsDistribution, + } + + // Purposely reuses the count from `ClientRoundtripLatency`, tagging + // with method and status to result in ClientCompletedRpcs. + ClientCompletedRPCsView = &view.View{ + Measure: ClientRoundtripLatency, + Name: "grpc.io/client/completed_rpcs", + Description: "Count of RPCs by method and status.", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + Aggregation: view.Count(), + } + + ClientStartedRPCsView = &view.View{ + Measure: ClientStartedRPCs, + Name: "grpc.io/client/started_rpcs", + Description: "Number of started client RPCs.", + TagKeys: []tag.Key{KeyClientMethod}, + Aggregation: view.Count(), + } + + ClientSentMessagesPerRPCView = &view.View{ + Measure: ClientSentMessagesPerRPC, + Name: "grpc.io/client/sent_messages_per_rpc", + Description: "Distribution of sent messages count per RPC, by method.", + TagKeys: []tag.Key{KeyClientMethod}, + Aggregation: DefaultMessageCountDistribution, + } + + ClientReceivedMessagesPerRPCView = &view.View{ + Measure: ClientReceivedMessagesPerRPC, + Name: "grpc.io/client/received_messages_per_rpc", + Description: "Distribution of received messages count per RPC, by method.", + TagKeys: []tag.Key{KeyClientMethod}, + Aggregation: DefaultMessageCountDistribution, + } + + ClientServerLatencyView = &view.View{ + Measure: ClientServerLatency, + Name: "grpc.io/client/server_latency", + Description: "Distribution of server latency as viewed by client, by method.", + TagKeys: []tag.Key{KeyClientMethod}, + Aggregation: DefaultMillisecondsDistribution, + } +) + +// DefaultClientViews are the default client views provided by this package. +var DefaultClientViews = []*view.View{ + ClientSentBytesPerRPCView, + ClientReceivedBytesPerRPCView, + ClientRoundtripLatencyView, + ClientCompletedRPCsView, +} + +// TODO(jbd): Add roundtrip_latency, uncompressed_request_bytes, uncompressed_response_bytes, request_count, response_count. +// TODO(acetechnologist): This is temporary and will need to be replaced by a +// mechanism to load these defaults from a common repository/config shared by +// all supported languages. Likely a serialized protobuf of these defaults. diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go new file mode 100644 index 000000000..b36349820 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go @@ -0,0 +1,49 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ocgrpc + +import ( + "context" + "time" + + "go.opencensus.io/tag" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/stats" +) + +// statsTagRPC gets the tag.Map populated by the application code, serializes +// its tags into the GRPC metadata in order to be sent to the server. +func (h *ClientHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { + startTime := time.Now() + if info == nil { + if grpclog.V(2) { + grpclog.Info("clientHandler.TagRPC called with nil info.") + } + return ctx + } + + d := &rpcData{ + startTime: startTime, + method: info.FullMethodName, + } + ts := tag.FromContext(ctx) + if ts != nil { + encoded := tag.Encode(ts) + ctx = stats.SetTags(ctx, encoded) + } + + return context.WithValue(ctx, rpcDataKey, d) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go b/vendor/go.opencensus.io/plugin/ocgrpc/doc.go similarity index 59% rename from vendor/github.com/Azure/azure-sdk-for-go/version/version.go rename to vendor/go.opencensus.io/plugin/ocgrpc/doc.go index b71392930..1370323fb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/doc.go @@ -1,21 +1,19 @@ -package version - -// Copyright (c) Microsoft and contributors. All rights reserved. +// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 +// +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// // See the License for the specific language governing permissions and // limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// Number contains the semantic version of this SDK. -const Number = "v32.6.0" +// Package ocgrpc contains OpenCensus stats and trace +// integrations for gRPC. +// +// Use ServerHandler for servers and ClientHandler for clients. +package ocgrpc // import "go.opencensus.io/plugin/ocgrpc" diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server.go b/vendor/go.opencensus.io/plugin/ocgrpc/server.go new file mode 100644 index 000000000..8a53e0972 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/server.go @@ -0,0 +1,81 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocgrpc + +import ( + "context" + + "google.golang.org/grpc/stats" + + "go.opencensus.io/trace" +) + +// ServerHandler implements gRPC stats.Handler recording OpenCensus stats and +// traces. Use with gRPC servers. +// +// When installed (see Example), tracing metadata is read from inbound RPCs +// by default. If no tracing metadata is present, or if the tracing metadata is +// present but the SpanContext isn't sampled, then a new trace may be started +// (as determined by Sampler). +type ServerHandler struct { + // IsPublicEndpoint may be set to true to always start a new trace around + // each RPC. Any SpanContext in the RPC metadata will be added as a linked + // span instead of making it the parent of the span created around the + // server RPC. + // + // Be aware that if you leave this false (the default) on a public-facing + // server, callers will be able to send tracing metadata in gRPC headers + // and trigger traces in your backend. + IsPublicEndpoint bool + + // StartOptions to use for to spans started around RPCs handled by this server. + // + // These will apply even if there is tracing metadata already + // present on the inbound RPC but the SpanContext is not sampled. This + // ensures that each service has some opportunity to be traced. If you would + // like to not add any additional traces for this gRPC service, set: + // + // StartOptions.Sampler = trace.ProbabilitySampler(0.0) + // + // StartOptions.SpanKind will always be set to trace.SpanKindServer + // for spans started by this handler. + StartOptions trace.StartOptions +} + +var _ stats.Handler = (*ServerHandler)(nil) + +// HandleConn exists to satisfy gRPC stats.Handler. +func (s *ServerHandler) HandleConn(ctx context.Context, cs stats.ConnStats) { + // no-op +} + +// TagConn exists to satisfy gRPC stats.Handler. +func (s *ServerHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context { + // no-op + return ctx +} + +// HandleRPC implements per-RPC tracing and stats instrumentation. +func (s *ServerHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { + traceHandleRPC(ctx, rs) + statsHandleRPC(ctx, rs) +} + +// TagRPC implements per-RPC context management. +func (s *ServerHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { + ctx = s.traceTagRPC(ctx, rti) + ctx = s.statsTagRPC(ctx, rti) + return ctx +} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go new file mode 100644 index 000000000..fe0e97108 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go @@ -0,0 +1,108 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ocgrpc + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +// The following variables are measures are recorded by ServerHandler: +var ( + ServerReceivedMessagesPerRPC = stats.Int64("grpc.io/server/received_messages_per_rpc", "Number of messages received in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) + ServerReceivedBytesPerRPC = stats.Int64("grpc.io/server/received_bytes_per_rpc", "Total bytes received across all messages per RPC.", stats.UnitBytes) + ServerSentMessagesPerRPC = stats.Int64("grpc.io/server/sent_messages_per_rpc", "Number of messages sent in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) + ServerSentBytesPerRPC = stats.Int64("grpc.io/server/sent_bytes_per_rpc", "Total bytes sent in across all response messages per RPC.", stats.UnitBytes) + ServerStartedRPCs = stats.Int64("grpc.io/server/started_rpcs", "Number of started server RPCs.", stats.UnitDimensionless) + ServerLatency = stats.Float64("grpc.io/server/server_latency", "Time between first byte of request received to last byte of response sent, or terminal error.", stats.UnitMilliseconds) +) + +// TODO(acetechnologist): This is temporary and will need to be replaced by a +// mechanism to load these defaults from a common repository/config shared by +// all supported languages. Likely a serialized protobuf of these defaults. + +// Predefined views may be registered to collect data for the above measures. +// As always, you may also define your own custom views over measures collected by this +// package. These are declared as a convenience only; none are registered by +// default. +var ( + ServerReceivedBytesPerRPCView = &view.View{ + Name: "grpc.io/server/received_bytes_per_rpc", + Description: "Distribution of received bytes per RPC, by method.", + Measure: ServerReceivedBytesPerRPC, + TagKeys: []tag.Key{KeyServerMethod}, + Aggregation: DefaultBytesDistribution, + } + + ServerSentBytesPerRPCView = &view.View{ + Name: "grpc.io/server/sent_bytes_per_rpc", + Description: "Distribution of total sent bytes per RPC, by method.", + Measure: ServerSentBytesPerRPC, + TagKeys: []tag.Key{KeyServerMethod}, + Aggregation: DefaultBytesDistribution, + } + + ServerLatencyView = &view.View{ + Name: "grpc.io/server/server_latency", + Description: "Distribution of server latency in milliseconds, by method.", + TagKeys: []tag.Key{KeyServerMethod}, + Measure: ServerLatency, + Aggregation: DefaultMillisecondsDistribution, + } + + // Purposely reuses the count from `ServerLatency`, tagging + // with method and status to result in ServerCompletedRpcs. + ServerCompletedRPCsView = &view.View{ + Name: "grpc.io/server/completed_rpcs", + Description: "Count of RPCs by method and status.", + TagKeys: []tag.Key{KeyServerMethod, KeyServerStatus}, + Measure: ServerLatency, + Aggregation: view.Count(), + } + + ServerStartedRPCsView = &view.View{ + Measure: ServerStartedRPCs, + Name: "grpc.io/server/started_rpcs", + Description: "Number of started server RPCs.", + TagKeys: []tag.Key{KeyServerMethod}, + Aggregation: view.Count(), + } + + ServerReceivedMessagesPerRPCView = &view.View{ + Name: "grpc.io/server/received_messages_per_rpc", + Description: "Distribution of messages received count per RPC, by method.", + TagKeys: []tag.Key{KeyServerMethod}, + Measure: ServerReceivedMessagesPerRPC, + Aggregation: DefaultMessageCountDistribution, + } + + ServerSentMessagesPerRPCView = &view.View{ + Name: "grpc.io/server/sent_messages_per_rpc", + Description: "Distribution of messages sent count per RPC, by method.", + TagKeys: []tag.Key{KeyServerMethod}, + Measure: ServerSentMessagesPerRPC, + Aggregation: DefaultMessageCountDistribution, + } +) + +// DefaultServerViews are the default server views provided by this package. +var DefaultServerViews = []*view.View{ + ServerReceivedBytesPerRPCView, + ServerSentBytesPerRPCView, + ServerLatencyView, + ServerCompletedRPCsView, +} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go new file mode 100644 index 000000000..afcef023a --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go @@ -0,0 +1,63 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ocgrpc + +import ( + "time" + + "context" + + "go.opencensus.io/tag" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/stats" +) + +// statsTagRPC gets the metadata from gRPC context, extracts the encoded tags from +// it and creates a new tag.Map and puts them into the returned context. +func (h *ServerHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { + startTime := time.Now() + if info == nil { + if grpclog.V(2) { + grpclog.Infof("opencensus: TagRPC called with nil info.") + } + return ctx + } + d := &rpcData{ + startTime: startTime, + method: info.FullMethodName, + } + propagated := h.extractPropagatedTags(ctx) + ctx = tag.NewContext(ctx, propagated) + ctx, _ = tag.New(ctx, tag.Upsert(KeyServerMethod, methodName(info.FullMethodName))) + return context.WithValue(ctx, rpcDataKey, d) +} + +// extractPropagatedTags creates a new tag map containing the tags extracted from the +// gRPC metadata. +func (h *ServerHandler) extractPropagatedTags(ctx context.Context) *tag.Map { + buf := stats.Tags(ctx) + if buf == nil { + return nil + } + propagated, err := tag.Decode(buf) + if err != nil { + if grpclog.V(2) { + grpclog.Warningf("opencensus: Failed to decode tags from gRPC metadata failed to decode: %v", err) + } + return nil + } + return propagated +} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go new file mode 100644 index 000000000..9cb27320c --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go @@ -0,0 +1,248 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ocgrpc + +import ( + "context" + "strconv" + "strings" + "sync/atomic" + "time" + + "go.opencensus.io/metric/metricdata" + ocstats "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + "go.opencensus.io/trace" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +type grpcInstrumentationKey string + +// rpcData holds the instrumentation RPC data that is needed between the start +// and end of an call. It holds the info that this package needs to keep track +// of between the various GRPC events. +type rpcData struct { + // reqCount and respCount has to be the first words + // in order to be 64-aligned on 32-bit architectures. + sentCount, sentBytes, recvCount, recvBytes int64 // access atomically + + // startTime represents the time at which TagRPC was invoked at the + // beginning of an RPC. It is an appoximation of the time when the + // application code invoked GRPC code. + startTime time.Time + method string +} + +// The following variables define the default hard-coded auxiliary data used by +// both the default GRPC client and GRPC server metrics. +var ( + DefaultBytesDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) + DefaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) + DefaultMessageCountDistribution = view.Distribution(1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536) +) + +// Server tags are applied to the context used to process each RPC, as well as +// the measures at the end of each RPC. +var ( + KeyServerMethod = tag.MustNewKey("grpc_server_method") + KeyServerStatus = tag.MustNewKey("grpc_server_status") +) + +// Client tags are applied to measures at the end of each RPC. +var ( + KeyClientMethod = tag.MustNewKey("grpc_client_method") + KeyClientStatus = tag.MustNewKey("grpc_client_status") +) + +var ( + rpcDataKey = grpcInstrumentationKey("opencensus-rpcData") +) + +func methodName(fullname string) string { + return strings.TrimLeft(fullname, "/") +} + +// statsHandleRPC processes the RPC events. +func statsHandleRPC(ctx context.Context, s stats.RPCStats) { + switch st := s.(type) { + case *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer: + // do nothing for client + case *stats.Begin: + handleRPCBegin(ctx, st) + case *stats.OutPayload: + handleRPCOutPayload(ctx, st) + case *stats.InPayload: + handleRPCInPayload(ctx, st) + case *stats.End: + handleRPCEnd(ctx, st) + default: + grpclog.Infof("unexpected stats: %T", st) + } +} + +func handleRPCBegin(ctx context.Context, s *stats.Begin) { + d, ok := ctx.Value(rpcDataKey).(*rpcData) + if !ok { + if grpclog.V(2) { + grpclog.Infoln("Failed to retrieve *rpcData from context.") + } + } + + if s.IsClient() { + ocstats.RecordWithOptions(ctx, + ocstats.WithTags(tag.Upsert(KeyClientMethod, methodName(d.method))), + ocstats.WithMeasurements(ClientStartedRPCs.M(1))) + } else { + ocstats.RecordWithOptions(ctx, + ocstats.WithTags(tag.Upsert(KeyClientMethod, methodName(d.method))), + ocstats.WithMeasurements(ServerStartedRPCs.M(1))) + } +} + +func handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) { + d, ok := ctx.Value(rpcDataKey).(*rpcData) + if !ok { + if grpclog.V(2) { + grpclog.Infoln("Failed to retrieve *rpcData from context.") + } + return + } + + atomic.AddInt64(&d.sentBytes, int64(s.Length)) + atomic.AddInt64(&d.sentCount, 1) +} + +func handleRPCInPayload(ctx context.Context, s *stats.InPayload) { + d, ok := ctx.Value(rpcDataKey).(*rpcData) + if !ok { + if grpclog.V(2) { + grpclog.Infoln("Failed to retrieve *rpcData from context.") + } + return + } + + atomic.AddInt64(&d.recvBytes, int64(s.Length)) + atomic.AddInt64(&d.recvCount, 1) +} + +func handleRPCEnd(ctx context.Context, s *stats.End) { + d, ok := ctx.Value(rpcDataKey).(*rpcData) + if !ok { + if grpclog.V(2) { + grpclog.Infoln("Failed to retrieve *rpcData from context.") + } + return + } + + elapsedTime := time.Since(d.startTime) + + var st string + if s.Error != nil { + s, ok := status.FromError(s.Error) + if ok { + st = statusCodeToString(s) + } + } else { + st = "OK" + } + + latencyMillis := float64(elapsedTime) / float64(time.Millisecond) + attachments := getSpanCtxAttachment(ctx) + if s.Client { + ocstats.RecordWithOptions(ctx, + ocstats.WithTags( + tag.Upsert(KeyClientMethod, methodName(d.method)), + tag.Upsert(KeyClientStatus, st)), + ocstats.WithAttachments(attachments), + ocstats.WithMeasurements( + ClientSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), + ClientSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), + ClientReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), + ClientReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), + ClientRoundtripLatency.M(latencyMillis))) + } else { + ocstats.RecordWithOptions(ctx, + ocstats.WithTags( + tag.Upsert(KeyServerStatus, st), + ), + ocstats.WithAttachments(attachments), + ocstats.WithMeasurements( + ServerSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), + ServerSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), + ServerReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), + ServerReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), + ServerLatency.M(latencyMillis))) + } +} + +func statusCodeToString(s *status.Status) string { + // see https://github.com/grpc/grpc/blob/master/doc/statuscodes.md + switch c := s.Code(); c { + case codes.OK: + return "OK" + case codes.Canceled: + return "CANCELLED" + case codes.Unknown: + return "UNKNOWN" + case codes.InvalidArgument: + return "INVALID_ARGUMENT" + case codes.DeadlineExceeded: + return "DEADLINE_EXCEEDED" + case codes.NotFound: + return "NOT_FOUND" + case codes.AlreadyExists: + return "ALREADY_EXISTS" + case codes.PermissionDenied: + return "PERMISSION_DENIED" + case codes.ResourceExhausted: + return "RESOURCE_EXHAUSTED" + case codes.FailedPrecondition: + return "FAILED_PRECONDITION" + case codes.Aborted: + return "ABORTED" + case codes.OutOfRange: + return "OUT_OF_RANGE" + case codes.Unimplemented: + return "UNIMPLEMENTED" + case codes.Internal: + return "INTERNAL" + case codes.Unavailable: + return "UNAVAILABLE" + case codes.DataLoss: + return "DATA_LOSS" + case codes.Unauthenticated: + return "UNAUTHENTICATED" + default: + return "CODE_" + strconv.FormatInt(int64(c), 10) + } +} + +func getSpanCtxAttachment(ctx context.Context) metricdata.Attachments { + attachments := map[string]interface{}{} + span := trace.FromContext(ctx) + if span == nil { + return attachments + } + spanCtx := span.SpanContext() + if spanCtx.IsSampled() { + attachments[metricdata.AttachmentKeySpanContext] = spanCtx + } + return attachments +} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go new file mode 100644 index 000000000..61bc543d0 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go @@ -0,0 +1,107 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocgrpc + +import ( + "context" + "strings" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +const traceContextKey = "grpc-trace-bin" + +// TagRPC creates a new trace span for the client side of the RPC. +// +// It returns ctx with the new trace span added and a serialization of the +// SpanContext added to the outgoing gRPC metadata. +func (c *ClientHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { + name := strings.TrimPrefix(rti.FullMethodName, "/") + name = strings.Replace(name, "/", ".", -1) + ctx, span := trace.StartSpan(ctx, name, + trace.WithSampler(c.StartOptions.Sampler), + trace.WithSpanKind(trace.SpanKindClient)) // span is ended by traceHandleRPC + traceContextBinary := propagation.Binary(span.SpanContext()) + return metadata.AppendToOutgoingContext(ctx, traceContextKey, string(traceContextBinary)) +} + +// TagRPC creates a new trace span for the server side of the RPC. +// +// It checks the incoming gRPC metadata in ctx for a SpanContext, and if +// it finds one, uses that SpanContext as the parent context of the new span. +// +// It returns ctx, with the new trace span added. +func (s *ServerHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { + md, _ := metadata.FromIncomingContext(ctx) + name := strings.TrimPrefix(rti.FullMethodName, "/") + name = strings.Replace(name, "/", ".", -1) + traceContext := md[traceContextKey] + var ( + parent trace.SpanContext + haveParent bool + ) + if len(traceContext) > 0 { + // Metadata with keys ending in -bin are actually binary. They are base64 + // encoded before being put on the wire, see: + // https://github.com/grpc/grpc-go/blob/08d6261/Documentation/grpc-metadata.md#storing-binary-data-in-metadata + traceContextBinary := []byte(traceContext[0]) + parent, haveParent = propagation.FromBinary(traceContextBinary) + if haveParent && !s.IsPublicEndpoint { + ctx, _ := trace.StartSpanWithRemoteParent(ctx, name, parent, + trace.WithSpanKind(trace.SpanKindServer), + trace.WithSampler(s.StartOptions.Sampler), + ) + return ctx + } + } + ctx, span := trace.StartSpan(ctx, name, + trace.WithSpanKind(trace.SpanKindServer), + trace.WithSampler(s.StartOptions.Sampler)) + if haveParent { + span.AddLink(trace.Link{TraceID: parent.TraceID, SpanID: parent.SpanID, Type: trace.LinkTypeChild}) + } + return ctx +} + +func traceHandleRPC(ctx context.Context, rs stats.RPCStats) { + span := trace.FromContext(ctx) + // TODO: compressed and uncompressed sizes are not populated in every message. + switch rs := rs.(type) { + case *stats.Begin: + span.AddAttributes( + trace.BoolAttribute("Client", rs.Client), + trace.BoolAttribute("FailFast", rs.FailFast)) + case *stats.InPayload: + span.AddMessageReceiveEvent(0 /* TODO: messageID */, int64(rs.Length), int64(rs.WireLength)) + case *stats.OutPayload: + span.AddMessageSendEvent(0, int64(rs.Length), int64(rs.WireLength)) + case *stats.End: + if rs.Error != nil { + s, ok := status.FromError(rs.Error) + if ok { + span.SetStatus(trace.Status{Code: int32(s.Code()), Message: s.Message()}) + } else { + span.SetStatus(trace.Status{Code: int32(codes.Internal), Message: rs.Error.Error()}) + } + } + span.End() + } +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go index 2f1c7f006..9ad885219 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go +++ b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go @@ -68,7 +68,7 @@ func ParseTraceID(tid string) (trace.TraceID, bool) { return trace.TraceID{}, false } b, err := hex.DecodeString(tid) - if err != nil { + if err != nil || len(b) > 16 { return trace.TraceID{}, false } var traceID trace.TraceID @@ -90,7 +90,7 @@ func ParseSpanID(sid string) (spanID trace.SpanID, ok bool) { return trace.SpanID{}, false } b, err := hex.DecodeString(sid) - if err != nil { + if err != nil || len(b) > 8 { return trace.SpanID{}, false } start := 8 - len(b) diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go index dc6563a15..f7c8434be 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/server.go +++ b/vendor/go.opencensus.io/plugin/ochttp/server.go @@ -31,14 +31,14 @@ import ( // Handler is an http.Handler wrapper to instrument your HTTP server with // OpenCensus. It supports both stats and tracing. // -// Tracing +// # Tracing // // This handler is aware of the incoming request's span, reading it from request // headers as configured using the Propagation field. // The extracted span can be accessed from the incoming request's // context. // -// span := trace.FromContext(r.Context()) +// span := trace.FromContext(r.Context()) // // The server span will be automatically ended at the end of ServeHTTP. type Handler struct { @@ -70,6 +70,12 @@ type Handler struct { // from the information found in the incoming HTTP Request. By default the // name equals the URL Path. FormatSpanName func(*http.Request) string + + // IsHealthEndpoint holds the function to use for determining if the + // incoming HTTP request should be considered a health check. This is in + // addition to the private isHealthEndpoint func which may also indicate + // tracing should be skipped. + IsHealthEndpoint func(*http.Request) bool } func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { @@ -87,7 +93,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) { - if isHealthEndpoint(r.URL.Path) { + if h.IsHealthEndpoint != nil && h.IsHealthEndpoint(r) || isHealthEndpoint(r.URL.Path) { return r, func() {} } var name string @@ -218,7 +224,9 @@ func (t *trackingResponseWriter) WriteHeader(statusCode int) { } // wrappedResponseWriter returns a wrapped version of the original -// ResponseWriter and only implements the same combination of additional +// +// ResponseWriter and only implements the same combination of additional +// // interfaces as the original. // This implementation is based on https://github.com/felixge/httpsnoop. func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter { diff --git a/vendor/go.opencensus.io/plugin/ochttp/trace.go b/vendor/go.opencensus.io/plugin/ochttp/trace.go index 53e71305a..ed3a5db56 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/trace.go +++ b/vendor/go.opencensus.io/plugin/ochttp/trace.go @@ -204,7 +204,10 @@ func TraceStatus(httpStatusCode int, statusLine string) trace.Status { code = trace.StatusCodeUnavailable case http.StatusOK: code = trace.StatusCodeOK + case http.StatusConflict: + code = trace.StatusCodeAlreadyExists } + return trace.Status{Code: code, Message: codeToStr[code]} } diff --git a/vendor/go.opencensus.io/stats/doc.go b/vendor/go.opencensus.io/stats/doc.go index 00d473ee0..31477a464 100644 --- a/vendor/go.opencensus.io/stats/doc.go +++ b/vendor/go.opencensus.io/stats/doc.go @@ -19,7 +19,7 @@ Package stats contains support for OpenCensus stats recording. OpenCensus allows users to create typed measures, record measurements, aggregate the collected data, and export the aggregated data. -Measures +# Measures A measure represents a type of data point to be tracked and recorded. For example, latency, request Mb/s, and response Mb/s are measures @@ -33,7 +33,7 @@ Libraries can define and export measures. Application authors can then create views and collect and break down measures by the tags they are interested in. -Recording measurements +# Recording measurements Measurement is a data point to be collected for a measure. For example, for a latency (ms) measure, 100 is a measurement that represents a 100ms @@ -49,7 +49,7 @@ Libraries can always record measurements, and applications can later decide on which measurements they want to collect by registering views. This allows libraries to turn on the instrumentation by default. -Exemplars +# Exemplars For a given recorded measurement, the associated exemplar is a diagnostic map that gives more information about the measurement. @@ -64,6 +64,5 @@ then the trace span will be added to the exemplar associated with the measuremen When exported to a supporting back end, you should be able to easily navigate to example traces that fell into each bucket in the Distribution. - */ package stats // import "go.opencensus.io/stats" diff --git a/vendor/go.opencensus.io/stats/internal/record.go b/vendor/go.opencensus.io/stats/internal/record.go index 36935e629..436dc791f 100644 --- a/vendor/go.opencensus.io/stats/internal/record.go +++ b/vendor/go.opencensus.io/stats/internal/record.go @@ -21,5 +21,11 @@ import ( // DefaultRecorder will be called for each Record call. var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{}) +// MeasurementRecorder will be called for each Record call. This is the same as DefaultRecorder but +// avoids interface{} conversion. +// This will be a func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) type, +// but is interface{} here to avoid import loops +var MeasurementRecorder interface{} + // SubscriptionReporter reports when a view subscribed with a measure. var SubscriptionReporter func(measure string) diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go index ad4691184..8b5b99803 100644 --- a/vendor/go.opencensus.io/stats/record.go +++ b/vendor/go.opencensus.io/stats/record.go @@ -31,10 +31,19 @@ func init() { } } +// Recorder provides an interface for exporting measurement information from +// the static Record method by using the WithRecorder option. +type Recorder interface { + // Record records a set of measurements associated with the given tags and attachments. + // The second argument is a `[]Measurement`. + Record(*tag.Map, interface{}, map[string]interface{}) +} + type recordOptions struct { attachments metricdata.Attachments mutators []tag.Mutator measurements []Measurement + recorder Recorder } // WithAttachments applies provided exemplar attachments. @@ -58,6 +67,14 @@ func WithMeasurements(measurements ...Measurement) Options { } } +// WithRecorder records the measurements to the specified `Recorder`, rather +// than to the global metrics recorder. +func WithRecorder(meter Recorder) Options { + return func(ro *recordOptions) { + ro.recorder = meter + } +} + // Options apply changes to recordOptions. type Options func(*recordOptions) @@ -69,10 +86,29 @@ func createRecordOption(ros ...Options) *recordOptions { return o } +type measurementRecorder = func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) + // Record records one or multiple measurements with the same context at once. // If there are any tags in the context, measurements will be tagged with them. func Record(ctx context.Context, ms ...Measurement) { - RecordWithOptions(ctx, WithMeasurements(ms...)) + // Record behaves the same as RecordWithOptions, but because we do not have to handle generic functionality + // (RecordOptions) we can reduce some allocations to speed up this hot path + if len(ms) == 0 { + return + } + recorder := internal.MeasurementRecorder.(measurementRecorder) + record := false + for _, m := range ms { + if m.desc.subscribed() { + record = true + break + } + } + if !record { + return + } + recorder(tag.FromContext(ctx), ms, nil) + return } // RecordWithTags records one or multiple measurements at once. @@ -93,6 +129,9 @@ func RecordWithOptions(ctx context.Context, ros ...Options) error { return nil } recorder := internal.DefaultRecorder + if o.recorder != nil { + recorder = o.recorder.Record + } if recorder == nil { return nil } diff --git a/vendor/go.opencensus.io/stats/units.go b/vendor/go.opencensus.io/stats/units.go index 6931a5f29..736399652 100644 --- a/vendor/go.opencensus.io/stats/units.go +++ b/vendor/go.opencensus.io/stats/units.go @@ -22,4 +22,5 @@ const ( UnitDimensionless = "1" UnitBytes = "By" UnitMilliseconds = "ms" + UnitSeconds = "s" ) diff --git a/vendor/go.opencensus.io/stats/view/aggregation.go b/vendor/go.opencensus.io/stats/view/aggregation.go index 8bd25314e..61f72d20d 100644 --- a/vendor/go.opencensus.io/stats/view/aggregation.go +++ b/vendor/go.opencensus.io/stats/view/aggregation.go @@ -15,6 +15,8 @@ package view +import "time" + // AggType represents the type of aggregation function used on a View. type AggType int @@ -45,20 +47,20 @@ type Aggregation struct { Type AggType // Type is the AggType of this Aggregation. Buckets []float64 // Buckets are the bucket endpoints if this Aggregation represents a distribution, see Distribution. - newData func() AggregationData + newData func(time.Time) AggregationData } var ( aggCount = &Aggregation{ Type: AggTypeCount, - newData: func() AggregationData { - return &CountData{} + newData: func(t time.Time) AggregationData { + return &CountData{Start: t} }, } aggSum = &Aggregation{ Type: AggTypeSum, - newData: func() AggregationData { - return &SumData{} + newData: func(t time.Time) AggregationData { + return &SumData{Start: t} }, } ) @@ -88,9 +90,9 @@ func Sum() *Aggregation { // // If len(bounds) >= 2 then the boundaries for bucket index i are: // -// [-infinity, bounds[i]) for i = 0 -// [bounds[i-1], bounds[i]) for 0 < i < length -// [bounds[i-1], +infinity) for i = length +// [-infinity, bounds[i]) for i = 0 +// [bounds[i-1], bounds[i]) for 0 < i < length +// [bounds[i-1], +infinity) for i = length // // If len(bounds) is 0 then there is no histogram associated with the // distribution. There will be a single bucket with boundaries @@ -99,13 +101,14 @@ func Sum() *Aggregation { // If len(bounds) is 1 then there is no finite buckets, and that single // element is the common boundary of the overflow and underflow buckets. func Distribution(bounds ...float64) *Aggregation { - return &Aggregation{ + agg := &Aggregation{ Type: AggTypeDistribution, Buckets: bounds, - newData: func() AggregationData { - return newDistributionData(bounds) - }, } + agg.newData = func(t time.Time) AggregationData { + return newDistributionData(agg, t) + } + return agg } // LastValue only reports the last value recorded using this @@ -113,7 +116,7 @@ func Distribution(bounds ...float64) *Aggregation { func LastValue() *Aggregation { return &Aggregation{ Type: AggTypeLastValue, - newData: func() AggregationData { + newData: func(_ time.Time) AggregationData { return &LastValueData{} }, } diff --git a/vendor/go.opencensus.io/stats/view/aggregation_data.go b/vendor/go.opencensus.io/stats/view/aggregation_data.go index d500e67f7..d93b52066 100644 --- a/vendor/go.opencensus.io/stats/view/aggregation_data.go +++ b/vendor/go.opencensus.io/stats/view/aggregation_data.go @@ -31,6 +31,7 @@ type AggregationData interface { clone() AggregationData equal(other AggregationData) bool toPoint(t metricdata.Type, time time.Time) metricdata.Point + StartTime() time.Time } const epsilon = 1e-9 @@ -40,6 +41,7 @@ const epsilon = 1e-9 // // Most users won't directly access count data. type CountData struct { + Start time.Time Value int64 } @@ -50,7 +52,7 @@ func (a *CountData) addSample(_ float64, _ map[string]interface{}, _ time.Time) } func (a *CountData) clone() AggregationData { - return &CountData{Value: a.Value} + return &CountData{Value: a.Value, Start: a.Start} } func (a *CountData) equal(other AggregationData) bool { @@ -59,7 +61,7 @@ func (a *CountData) equal(other AggregationData) bool { return false } - return a.Value == a2.Value + return a.Start.Equal(a2.Start) && a.Value == a2.Value } func (a *CountData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { @@ -71,11 +73,17 @@ func (a *CountData) toPoint(metricType metricdata.Type, t time.Time) metricdata. } } +// StartTime returns the start time of the data being aggregated by CountData. +func (a *CountData) StartTime() time.Time { + return a.Start +} + // SumData is the aggregated data for the Sum aggregation. // A sum aggregation processes data and sums up the recordings. // // Most users won't directly access sum data. type SumData struct { + Start time.Time Value float64 } @@ -86,7 +94,7 @@ func (a *SumData) addSample(v float64, _ map[string]interface{}, _ time.Time) { } func (a *SumData) clone() AggregationData { - return &SumData{Value: a.Value} + return &SumData{Value: a.Value, Start: a.Start} } func (a *SumData) equal(other AggregationData) bool { @@ -94,7 +102,7 @@ func (a *SumData) equal(other AggregationData) bool { if !ok { return false } - return math.Pow(a.Value-a2.Value, 2) < epsilon + return a.Start.Equal(a2.Start) && math.Pow(a.Value-a2.Value, 2) < epsilon } func (a *SumData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { @@ -108,6 +116,11 @@ func (a *SumData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Po } } +// StartTime returns the start time of the data being aggregated by SumData. +func (a *SumData) StartTime() time.Time { + return a.Start +} + // DistributionData is the aggregated data for the // Distribution aggregation. // @@ -126,16 +139,18 @@ type DistributionData struct { // an exemplar for the associated bucket, or nil. ExemplarsPerBucket []*metricdata.Exemplar bounds []float64 // histogram distribution of the values + Start time.Time } -func newDistributionData(bounds []float64) *DistributionData { - bucketCount := len(bounds) + 1 +func newDistributionData(agg *Aggregation, t time.Time) *DistributionData { + bucketCount := len(agg.Buckets) + 1 return &DistributionData{ CountPerBucket: make([]int64, bucketCount), ExemplarsPerBucket: make([]*metricdata.Exemplar, bucketCount), - bounds: bounds, + bounds: agg.Buckets, Min: math.MaxFloat64, Max: math.SmallestNonzeroFloat64, + Start: t, } } @@ -226,7 +241,11 @@ func (a *DistributionData) equal(other AggregationData) bool { return false } } - return a.Count == a2.Count && a.Min == a2.Min && a.Max == a2.Max && math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon + return a.Start.Equal(a2.Start) && + a.Count == a2.Count && + a.Min == a2.Min && + a.Max == a2.Max && + math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon } func (a *DistributionData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { @@ -256,6 +275,11 @@ func (a *DistributionData) toPoint(metricType metricdata.Type, t time.Time) metr } } +// StartTime returns the start time of the data being aggregated by DistributionData. +func (a *DistributionData) StartTime() time.Time { + return a.Start +} + // LastValueData returns the last value recorded for LastValue aggregation. type LastValueData struct { Value float64 @@ -291,3 +315,22 @@ func (l *LastValueData) toPoint(metricType metricdata.Type, t time.Time) metricd panic("unsupported metricdata.Type") } } + +// StartTime returns an empty time value as start time is not recorded when using last value +// aggregation. +func (l *LastValueData) StartTime() time.Time { + return time.Time{} +} + +// ClearStart clears the Start field from data if present. Useful for testing in cases where the +// start time will be nondeterministic. +func ClearStart(data AggregationData) { + switch data := data.(type) { + case *CountData: + data.Start = time.Time{} + case *SumData: + data.Start = time.Time{} + case *DistributionData: + data.Start = time.Time{} + } +} diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go index 8a6a2c0fd..bcd6e08c7 100644 --- a/vendor/go.opencensus.io/stats/view/collector.go +++ b/vendor/go.opencensus.io/stats/view/collector.go @@ -35,7 +35,7 @@ type collector struct { func (c *collector) addSample(s string, v float64, attachments map[string]interface{}, t time.Time) { aggregator, ok := c.signatures[s] if !ok { - aggregator = c.a.newData() + aggregator = c.a.newData(t) c.signatures[s] = aggregator } aggregator.addSample(v, attachments, t) @@ -59,8 +59,15 @@ func (c *collector) clearRows() { // encodeWithKeys encodes the map by using values // only associated with the keys provided. func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte { + // Compute the buffer length we will need ahead of time to avoid resizing later + reqLen := 0 + for _, k := range keys { + s, _ := m.Value(k) + // We will store each key + its length + reqLen += len(s) + 1 + } vb := &tagencoding.Values{ - Buffer: make([]byte, len(keys)), + Buffer: make([]byte, reqLen), } for _, k := range keys { v, _ := m.Value(k) diff --git a/vendor/go.opencensus.io/stats/view/doc.go b/vendor/go.opencensus.io/stats/view/doc.go index 7bbedfe1f..60bf0e392 100644 --- a/vendor/go.opencensus.io/stats/view/doc.go +++ b/vendor/go.opencensus.io/stats/view/doc.go @@ -34,7 +34,7 @@ // Libraries can define views but it is recommended that in most cases registering // views be left up to applications. // -// Exporting +// # Exporting // // Collected and aggregated data can be exported to a metric collection // backend by registering its exporter. diff --git a/vendor/go.opencensus.io/stats/view/export.go b/vendor/go.opencensus.io/stats/view/export.go index 7cb59718f..73ba11f5b 100644 --- a/vendor/go.opencensus.io/stats/view/export.go +++ b/vendor/go.opencensus.io/stats/view/export.go @@ -14,13 +14,6 @@ package view -import "sync" - -var ( - exportersMu sync.RWMutex // guards exporters - exporters = make(map[Exporter]struct{}) -) - // Exporter exports the collected records as view data. // // The ExportView method should return quickly; if an @@ -43,16 +36,10 @@ type Exporter interface { // // Binaries can register exporters, libraries shouldn't register exporters. func RegisterExporter(e Exporter) { - exportersMu.Lock() - defer exportersMu.Unlock() - - exporters[e] = struct{}{} + defaultWorker.RegisterExporter(e) } // UnregisterExporter unregisters an exporter. func UnregisterExporter(e Exporter) { - exportersMu.Lock() - defer exportersMu.Unlock() - - delete(exporters, e) + defaultWorker.UnregisterExporter(e) } diff --git a/vendor/go.opencensus.io/stats/view/view_to_metric.go b/vendor/go.opencensus.io/stats/view/view_to_metric.go index 293c1646d..57d615ec7 100644 --- a/vendor/go.opencensus.io/stats/view/view_to_metric.go +++ b/vendor/go.opencensus.io/stats/view/view_to_metric.go @@ -18,6 +18,8 @@ package view import ( "time" + "go.opencensus.io/resource" + "go.opencensus.io/metric/metricdata" "go.opencensus.io/stats" ) @@ -117,20 +119,15 @@ func toLabelValues(row *Row, expectedKeys []metricdata.LabelKey) []metricdata.La return labelValues } -func rowToTimeseries(v *viewInternal, row *Row, now time.Time, startTime time.Time) *metricdata.TimeSeries { +func rowToTimeseries(v *viewInternal, row *Row, now time.Time) *metricdata.TimeSeries { return &metricdata.TimeSeries{ Points: []metricdata.Point{row.Data.toPoint(v.metricDescriptor.Type, now)}, LabelValues: toLabelValues(row, v.metricDescriptor.LabelKeys), - StartTime: startTime, + StartTime: row.Data.StartTime(), } } -func viewToMetric(v *viewInternal, now time.Time, startTime time.Time) *metricdata.Metric { - if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || - v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { - startTime = time.Time{} - } - +func viewToMetric(v *viewInternal, r *resource.Resource, now time.Time) *metricdata.Metric { rows := v.collectedRows() if len(rows) == 0 { return nil @@ -138,12 +135,13 @@ func viewToMetric(v *viewInternal, now time.Time, startTime time.Time) *metricda ts := []*metricdata.TimeSeries{} for _, row := range rows { - ts = append(ts, rowToTimeseries(v, row, now, startTime)) + ts = append(ts, rowToTimeseries(v, row, now)) } m := &metricdata.Metric{ Descriptor: *v.metricDescriptor, TimeSeries: ts, + Resource: r, } return m } diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go index 2f3c018af..6a79cd8a3 100644 --- a/vendor/go.opencensus.io/stats/view/worker.go +++ b/vendor/go.opencensus.io/stats/view/worker.go @@ -20,6 +20,8 @@ import ( "sync" "time" + "go.opencensus.io/resource" + "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricproducer" "go.opencensus.io/stats" @@ -28,9 +30,10 @@ import ( ) func init() { - defaultWorker = newWorker() + defaultWorker = NewMeter().(*worker) go defaultWorker.start() internal.DefaultRecorder = record + internal.MeasurementRecorder = recordMeasurement } type measureRef struct { @@ -39,16 +42,77 @@ type measureRef struct { } type worker struct { - measures map[string]*measureRef - views map[string]*viewInternal - startTimes map[*viewInternal]time.Time + measures map[string]*measureRef + views map[string]*viewInternal + viewStartTimes map[*viewInternal]time.Time timer *time.Ticker c chan command quit, done chan bool mu sync.RWMutex + r *resource.Resource + + exportersMu sync.RWMutex + exporters map[Exporter]struct{} +} + +// Meter defines an interface which allows a single process to maintain +// multiple sets of metrics exports (intended for the advanced case where a +// single process wants to report metrics about multiple objects, such as +// multiple databases or HTTP services). +// +// Note that this is an advanced use case, and the static functions in this +// module should cover the common use cases. +type Meter interface { + stats.Recorder + // Find returns a registered view associated with this name. + // If no registered view is found, nil is returned. + Find(name string) *View + // Register begins collecting data for the given views. + // Once a view is registered, it reports data to the registered exporters. + Register(views ...*View) error + // Unregister the given views. Data will not longer be exported for these views + // after Unregister returns. + // It is not necessary to unregister from views you expect to collect for the + // duration of your program execution. + Unregister(views ...*View) + // SetReportingPeriod sets the interval between reporting aggregated views in + // the program. If duration is less than or equal to zero, it enables the + // default behavior. + // + // Note: each exporter makes different promises about what the lowest supported + // duration is. For example, the Stackdriver exporter recommends a value no + // lower than 1 minute. Consult each exporter per your needs. + SetReportingPeriod(time.Duration) + + // RegisterExporter registers an exporter. + // Collected data will be reported via all the + // registered exporters. Once you no longer + // want data to be exported, invoke UnregisterExporter + // with the previously registered exporter. + // + // Binaries can register exporters, libraries shouldn't register exporters. + RegisterExporter(Exporter) + // UnregisterExporter unregisters an exporter. + UnregisterExporter(Exporter) + // SetResource may be used to set the Resource associated with this registry. + // This is intended to be used in cases where a single process exports metrics + // for multiple Resources, typically in a multi-tenant situation. + SetResource(*resource.Resource) + + // Start causes the Meter to start processing Record calls and aggregating + // statistics as well as exporting data. + Start() + // Stop causes the Meter to stop processing calls and terminate data export. + Stop() + + // RetrieveData gets a snapshot of the data collected for the the view registered + // with the given name. It is intended for testing only. + RetrieveData(viewName string) ([]*Row, error) } +var _ Meter = (*worker)(nil) + var defaultWorker *worker var defaultReportingDuration = 10 * time.Second @@ -56,11 +120,17 @@ var defaultReportingDuration = 10 * time.Second // Find returns a registered view associated with this name. // If no registered view is found, nil is returned. func Find(name string) (v *View) { + return defaultWorker.Find(name) +} + +// Find returns a registered view associated with this name. +// If no registered view is found, nil is returned. +func (w *worker) Find(name string) (v *View) { req := &getViewByNameReq{ name: name, c: make(chan *getViewByNameResp), } - defaultWorker.c <- req + w.c <- req resp := <-req.c return resp.v } @@ -68,11 +138,17 @@ func Find(name string) (v *View) { // Register begins collecting data for the given views. // Once a view is registered, it reports data to the registered exporters. func Register(views ...*View) error { + return defaultWorker.Register(views...) +} + +// Register begins collecting data for the given views. +// Once a view is registered, it reports data to the registered exporters. +func (w *worker) Register(views ...*View) error { req := ®isterViewReq{ views: views, err: make(chan error), } - defaultWorker.c <- req + w.c <- req return <-req.err } @@ -81,6 +157,14 @@ func Register(views ...*View) error { // It is not necessary to unregister from views you expect to collect for the // duration of your program execution. func Unregister(views ...*View) { + defaultWorker.Unregister(views...) +} + +// Unregister the given views. Data will not longer be exported for these views +// after Unregister returns. +// It is not necessary to unregister from views you expect to collect for the +// duration of your program execution. +func (w *worker) Unregister(views ...*View) { names := make([]string, len(views)) for i := range views { names[i] = views[i].Name @@ -89,31 +173,52 @@ func Unregister(views ...*View) { views: names, done: make(chan struct{}), } - defaultWorker.c <- req + w.c <- req <-req.done } // RetrieveData gets a snapshot of the data collected for the the view registered // with the given name. It is intended for testing only. func RetrieveData(viewName string) ([]*Row, error) { + return defaultWorker.RetrieveData(viewName) +} + +// RetrieveData gets a snapshot of the data collected for the the view registered +// with the given name. It is intended for testing only. +func (w *worker) RetrieveData(viewName string) ([]*Row, error) { req := &retrieveDataReq{ now: time.Now(), v: viewName, c: make(chan *retrieveDataResp), } - defaultWorker.c <- req + w.c <- req resp := <-req.c return resp.rows, resp.err } func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { + defaultWorker.Record(tags, ms, attachments) +} + +func recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) { + defaultWorker.recordMeasurement(tags, ms, attachments) +} + +// Record records a set of measurements ms associated with the given tags and attachments. +func (w *worker) Record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { + w.recordMeasurement(tags, ms.([]stats.Measurement), attachments) +} + +// recordMeasurement records a set of measurements ms associated with the given tags and attachments. +// This is the same as Record but without an interface{} type to avoid allocations +func (w *worker) recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) { req := &recordReq{ tm: tags, - ms: ms.([]stats.Measurement), + ms: ms, attachments: attachments, t: time.Now(), } - defaultWorker.c <- req + w.c <- req } // SetReportingPeriod sets the interval between reporting aggregated views in @@ -124,28 +229,61 @@ func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { // duration is. For example, the Stackdriver exporter recommends a value no // lower than 1 minute. Consult each exporter per your needs. func SetReportingPeriod(d time.Duration) { + defaultWorker.SetReportingPeriod(d) +} + +// Stop stops the default worker. +func Stop() { + defaultWorker.Stop() +} + +// SetReportingPeriod sets the interval between reporting aggregated views in +// the program. If duration is less than or equal to zero, it enables the +// default behavior. +// +// Note: each exporter makes different promises about what the lowest supported +// duration is. For example, the Stackdriver exporter recommends a value no +// lower than 1 minute. Consult each exporter per your needs. +func (w *worker) SetReportingPeriod(d time.Duration) { // TODO(acetechnologist): ensure that the duration d is more than a certain // value. e.g. 1s req := &setReportingPeriodReq{ d: d, c: make(chan bool), } - defaultWorker.c <- req + w.c <- req <-req.c // don't return until the timer is set to the new duration. } -func newWorker() *worker { +// NewMeter constructs a Meter instance. You should only need to use this if +// you need to separate out Measurement recordings and View aggregations within +// a single process. +func NewMeter() Meter { return &worker{ - measures: make(map[string]*measureRef), - views: make(map[string]*viewInternal), - startTimes: make(map[*viewInternal]time.Time), - timer: time.NewTicker(defaultReportingDuration), - c: make(chan command, 1024), - quit: make(chan bool), - done: make(chan bool), + measures: make(map[string]*measureRef), + views: make(map[string]*viewInternal), + viewStartTimes: make(map[*viewInternal]time.Time), + timer: time.NewTicker(defaultReportingDuration), + c: make(chan command, 1024), + quit: make(chan bool), + done: make(chan bool), + + exporters: make(map[Exporter]struct{}), } } +// SetResource associates all data collected by this Meter with the specified +// resource. This resource is reported when using metricexport.ReadAndExport; +// it is not provided when used with ExportView/RegisterExporter, because that +// interface does not provide a means for reporting the Resource. +func (w *worker) SetResource(r *resource.Resource) { + w.r = r +} + +func (w *worker) Start() { + go w.start() +} + func (w *worker) start() { prodMgr := metricproducer.GlobalManager() prodMgr.AddProducer(w) @@ -155,21 +293,24 @@ func (w *worker) start() { case cmd := <-w.c: cmd.handleCommand(w) case <-w.timer.C: - w.reportUsage(time.Now()) + w.reportUsage() case <-w.quit: w.timer.Stop() close(w.c) - w.done <- true + close(w.done) return } } } -func (w *worker) stop() { +func (w *worker) Stop() { prodMgr := metricproducer.GlobalManager() prodMgr.DeleteProducer(w) - - w.quit <- true + select { + case <-w.quit: + default: + close(w.quit) + } <-w.done } @@ -202,44 +343,45 @@ func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { return x, nil } w.views[vi.view.Name] = vi + w.viewStartTimes[vi] = time.Now() ref := w.getMeasureRef(vi.view.Measure.Name()) ref.views[vi] = struct{}{} return vi, nil } -func (w *worker) unregisterView(viewName string) { +func (w *worker) unregisterView(v *viewInternal) { w.mu.Lock() defer w.mu.Unlock() - delete(w.views, viewName) + delete(w.views, v.view.Name) + delete(w.viewStartTimes, v) + if measure := w.measures[v.view.Measure.Name()]; measure != nil { + delete(measure.views, v) + } } -func (w *worker) reportView(v *viewInternal, now time.Time) { +func (w *worker) reportView(v *viewInternal) { if !v.isSubscribed() { return } rows := v.collectedRows() - _, ok := w.startTimes[v] - if !ok { - w.startTimes[v] = now - } viewData := &Data{ View: v.view, - Start: w.startTimes[v], + Start: w.viewStartTimes[v], End: time.Now(), Rows: rows, } - exportersMu.Lock() - for e := range exporters { + w.exportersMu.Lock() + defer w.exportersMu.Unlock() + for e := range w.exporters { e.ExportView(viewData) } - exportersMu.Unlock() } -func (w *worker) reportUsage(now time.Time) { +func (w *worker) reportUsage() { w.mu.Lock() defer w.mu.Unlock() for _, v := range w.views { - w.reportView(v, now) + w.reportView(v) } } @@ -248,20 +390,7 @@ func (w *worker) toMetric(v *viewInternal, now time.Time) *metricdata.Metric { return nil } - _, ok := w.startTimes[v] - if !ok { - w.startTimes[v] = now - } - - var startTime time.Time - if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || - v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { - startTime = time.Time{} - } else { - startTime = w.startTimes[v] - } - - return viewToMetric(v, now, startTime) + return viewToMetric(v, w.r, now) } // Read reads all view data and returns them as metrics. @@ -279,3 +408,17 @@ func (w *worker) Read() []*metricdata.Metric { } return metrics } + +func (w *worker) RegisterExporter(e Exporter) { + w.exportersMu.Lock() + defer w.exportersMu.Unlock() + + w.exporters[e] = struct{}{} +} + +func (w *worker) UnregisterExporter(e Exporter) { + w.exportersMu.Lock() + defer w.exportersMu.Unlock() + + delete(w.exporters, e) +} diff --git a/vendor/go.opencensus.io/stats/view/worker_commands.go b/vendor/go.opencensus.io/stats/view/worker_commands.go index 0267e179a..9ac4cc059 100644 --- a/vendor/go.opencensus.io/stats/view/worker_commands.go +++ b/vendor/go.opencensus.io/stats/view/worker_commands.go @@ -95,7 +95,7 @@ func (cmd *unregisterFromViewReq) handleCommand(w *worker) { } // Report pending data for this view before removing it. - w.reportView(vi, time.Now()) + w.reportView(vi) vi.unsubscribe() if !vi.isSubscribed() { @@ -103,7 +103,7 @@ func (cmd *unregisterFromViewReq) handleCommand(w *worker) { // The collected data can be cleared. vi.clearRows() } - w.unregisterView(name) + w.unregisterView(vi) } cmd.done <- struct{}{} } @@ -163,7 +163,7 @@ func (cmd *recordReq) handleCommand(w *worker) { } ref := w.getMeasureRef(m.Measure().Name()) for v := range ref.views { - v.addSample(cmd.tm, m.Value(), cmd.attachments, time.Now()) + v.addSample(cmd.tm, m.Value(), cmd.attachments, cmd.t) } } } diff --git a/vendor/go.opencensus.io/tag/profile_19.go b/vendor/go.opencensus.io/tag/profile_19.go index b34d95e34..8fb17226f 100644 --- a/vendor/go.opencensus.io/tag/profile_19.go +++ b/vendor/go.opencensus.io/tag/profile_19.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.9 // +build go1.9 package tag diff --git a/vendor/go.opencensus.io/tag/profile_not19.go b/vendor/go.opencensus.io/tag/profile_not19.go index 83adbce56..e28cf13cd 100644 --- a/vendor/go.opencensus.io/tag/profile_not19.go +++ b/vendor/go.opencensus.io/tag/profile_not19.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.9 // +build !go1.9 package tag diff --git a/vendor/go.opencensus.io/trace/basetypes.go b/vendor/go.opencensus.io/trace/basetypes.go index 0c54492a2..c8e26ed63 100644 --- a/vendor/go.opencensus.io/trace/basetypes.go +++ b/vendor/go.opencensus.io/trace/basetypes.go @@ -49,6 +49,16 @@ type Attribute struct { value interface{} } +// Key returns the attribute's key +func (a *Attribute) Key() string { + return a.key +} + +// Value returns the attribute's value +func (a *Attribute) Value() interface{} { + return a.value +} + // BoolAttribute returns a bool-valued attribute. func BoolAttribute(key string, value bool) Attribute { return Attribute{key: key, value: value} diff --git a/vendor/go.opencensus.io/trace/doc.go b/vendor/go.opencensus.io/trace/doc.go index 04b1ee4f3..7a1616a55 100644 --- a/vendor/go.opencensus.io/trace/doc.go +++ b/vendor/go.opencensus.io/trace/doc.go @@ -18,24 +18,23 @@ Package trace contains support for OpenCensus distributed tracing. The following assumes a basic familiarity with OpenCensus concepts. See http://opencensus.io - -Exporting Traces +# Exporting Traces To export collected tracing data, register at least one exporter. You can use one of the provided exporters or write your own. - trace.RegisterExporter(exporter) + trace.RegisterExporter(exporter) By default, traces will be sampled relatively rarely. To change the sampling frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler to sample a subset of traces, or use AlwaysSample to collect a trace on every run: - trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) Be careful about using trace.AlwaysSample in a production application with significant traffic: a new trace will be started and exported for every request. -Adding Spans to a Trace +# Adding Spans to a Trace A trace consists of a tree of spans. In Go, the current span is carried in a context.Context. @@ -44,8 +43,8 @@ It is common to want to capture all the activity of a function call in a span. F this to work, the function must take a context.Context as a parameter. Add these two lines to the top of the function: - ctx, span := trace.StartSpan(ctx, "example.com/Run") - defer span.End() + ctx, span := trace.StartSpan(ctx, "example.com/Run") + defer span.End() StartSpan will create a new top-level span if the context doesn't contain another span, otherwise it will create a child span. diff --git a/vendor/go.opencensus.io/trace/lrumap.go b/vendor/go.opencensus.io/trace/lrumap.go index dc7a295c7..80095a5f6 100644 --- a/vendor/go.opencensus.io/trace/lrumap.go +++ b/vendor/go.opencensus.io/trace/lrumap.go @@ -44,7 +44,7 @@ func (lm lruMap) len() int { } func (lm lruMap) keys() []interface{} { - keys := []interface{}{} + keys := make([]interface{}, 0, len(lm.cacheKeys)) for k := range lm.cacheKeys { keys = append(keys, k) } diff --git a/vendor/go.opencensus.io/trace/spanstore.go b/vendor/go.opencensus.io/trace/spanstore.go index c442d9902..e601f76f2 100644 --- a/vendor/go.opencensus.io/trace/spanstore.go +++ b/vendor/go.opencensus.io/trace/spanstore.go @@ -48,8 +48,10 @@ func (i internalOnly) ReportActiveSpans(name string) []*SpanData { var out []*SpanData s.mu.Lock() defer s.mu.Unlock() - for span := range s.active { - out = append(out, span.makeSpanData()) + for activeSpan := range s.active { + if s, ok := activeSpan.(*span); ok { + out = append(out, s.makeSpanData()) + } } return out } @@ -185,7 +187,7 @@ func (i internalOnly) ReportSpansByLatency(name string, minLatency, maxLatency t // bucketed by latency. type spanStore struct { mu sync.Mutex // protects everything below. - active map[*Span]struct{} + active map[SpanInterface]struct{} errors map[int32]*bucket latency []bucket maxSpansPerErrorBucket int @@ -194,7 +196,7 @@ type spanStore struct { // newSpanStore creates a span store. func newSpanStore(name string, latencyBucketSize int, errorBucketSize int) *spanStore { s := &spanStore{ - active: make(map[*Span]struct{}), + active: make(map[SpanInterface]struct{}), latency: make([]bucket, len(defaultLatencies)+1), maxSpansPerErrorBucket: errorBucketSize, } @@ -271,7 +273,7 @@ func (s *spanStore) resize(latencyBucketSize int, errorBucketSize int) { } // add adds a span to the active bucket of the spanStore. -func (s *spanStore) add(span *Span) { +func (s *spanStore) add(span SpanInterface) { s.mu.Lock() s.active[span] = struct{}{} s.mu.Unlock() @@ -279,7 +281,7 @@ func (s *spanStore) add(span *Span) { // finished removes a span from the active set, and adds a corresponding // SpanData to a latency or error bucket. -func (s *spanStore) finished(span *Span, sd *SpanData) { +func (s *spanStore) finished(span SpanInterface, sd *SpanData) { latency := sd.EndTime.Sub(sd.StartTime) if latency < 0 { latency = 0 diff --git a/vendor/go.opencensus.io/trace/trace.go b/vendor/go.opencensus.io/trace/trace.go index 3f8977b41..861df9d39 100644 --- a/vendor/go.opencensus.io/trace/trace.go +++ b/vendor/go.opencensus.io/trace/trace.go @@ -28,12 +28,16 @@ import ( "go.opencensus.io/trace/tracestate" ) +type tracer struct{} + +var _ Tracer = &tracer{} + // Span represents a span of a trace. It has an associated SpanContext, and // stores data accumulated while the span is active. // // Ideally users should interact with Spans by calling the functions in this // package that take a Context parameter. -type Span struct { +type span struct { // data contains information recorded about the span. // // It will be non-nil if we are exporting the span or recording events for it. @@ -66,7 +70,7 @@ type Span struct { // IsRecordingEvents returns true if events are being recorded for this span. // Use this check to avoid computing expensive annotations when they will never // be used. -func (s *Span) IsRecordingEvents() bool { +func (s *span) IsRecordingEvents() bool { if s == nil { return false } @@ -109,13 +113,13 @@ type SpanContext struct { type contextKey struct{} // FromContext returns the Span stored in a context, or nil if there isn't one. -func FromContext(ctx context.Context) *Span { +func (t *tracer) FromContext(ctx context.Context) *Span { s, _ := ctx.Value(contextKey{}).(*Span) return s } // NewContext returns a new context with the given Span attached. -func NewContext(parent context.Context, s *Span) context.Context { +func (t *tracer) NewContext(parent context.Context, s *Span) context.Context { return context.WithValue(parent, contextKey{}, s) } @@ -166,12 +170,14 @@ func WithSampler(sampler Sampler) StartOption { // // Returned context contains the newly created span. You can use it to // propagate the returned span in process. -func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { +func (t *tracer) StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { var opts StartOptions var parent SpanContext - if p := FromContext(ctx); p != nil { - p.addChild() - parent = p.spanContext + if p := t.FromContext(ctx); p != nil { + if ps, ok := p.internal.(*span); ok { + ps.addChild() + } + parent = p.SpanContext() } for _, op := range o { op(&opts) @@ -180,7 +186,8 @@ func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Cont ctx, end := startExecutionTracerTask(ctx, name) span.executionTracerTaskEnd = end - return NewContext(ctx, span), span + extSpan := NewSpan(span) + return t.NewContext(ctx, extSpan), extSpan } // StartSpanWithRemoteParent starts a new child span of the span from the given parent. @@ -190,7 +197,7 @@ func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Cont // // Returned context contains the newly created span. You can use it to // propagate the returned span in process. -func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { +func (t *tracer) StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { var opts StartOptions for _, op := range o { op(&opts) @@ -198,19 +205,24 @@ func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanCont span := startSpanInternal(name, parent != SpanContext{}, parent, true, opts) ctx, end := startExecutionTracerTask(ctx, name) span.executionTracerTaskEnd = end - return NewContext(ctx, span), span + extSpan := NewSpan(span) + return t.NewContext(ctx, extSpan), extSpan } -func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *Span { - span := &Span{} - span.spanContext = parent +func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *span { + s := &span{} + s.spanContext = parent cfg := config.Load().(*Config) + if gen, ok := cfg.IDGenerator.(*defaultIDGenerator); ok { + // lazy initialization + gen.init() + } if !hasParent { - span.spanContext.TraceID = cfg.IDGenerator.NewTraceID() + s.spanContext.TraceID = cfg.IDGenerator.NewTraceID() } - span.spanContext.SpanID = cfg.IDGenerator.NewSpanID() + s.spanContext.SpanID = cfg.IDGenerator.NewSpanID() sampler := cfg.DefaultSampler if !hasParent || remoteParent || o.Sampler != nil { @@ -222,47 +234,47 @@ func startSpanInternal(name string, hasParent bool, parent SpanContext, remotePa if o.Sampler != nil { sampler = o.Sampler } - span.spanContext.setIsSampled(sampler(SamplingParameters{ + s.spanContext.setIsSampled(sampler(SamplingParameters{ ParentContext: parent, - TraceID: span.spanContext.TraceID, - SpanID: span.spanContext.SpanID, + TraceID: s.spanContext.TraceID, + SpanID: s.spanContext.SpanID, Name: name, HasRemoteParent: remoteParent}).Sample) } - if !internal.LocalSpanStoreEnabled && !span.spanContext.IsSampled() { - return span + if !internal.LocalSpanStoreEnabled && !s.spanContext.IsSampled() { + return s } - span.data = &SpanData{ - SpanContext: span.spanContext, + s.data = &SpanData{ + SpanContext: s.spanContext, StartTime: time.Now(), SpanKind: o.SpanKind, Name: name, HasRemoteParent: remoteParent, } - span.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan) - span.annotations = newEvictedQueue(cfg.MaxAnnotationEventsPerSpan) - span.messageEvents = newEvictedQueue(cfg.MaxMessageEventsPerSpan) - span.links = newEvictedQueue(cfg.MaxLinksPerSpan) + s.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan) + s.annotations = newEvictedQueue(cfg.MaxAnnotationEventsPerSpan) + s.messageEvents = newEvictedQueue(cfg.MaxMessageEventsPerSpan) + s.links = newEvictedQueue(cfg.MaxLinksPerSpan) if hasParent { - span.data.ParentSpanID = parent.SpanID + s.data.ParentSpanID = parent.SpanID } if internal.LocalSpanStoreEnabled { var ss *spanStore ss = spanStoreForNameCreateIfNew(name) if ss != nil { - span.spanStore = ss - ss.add(span) + s.spanStore = ss + ss.add(s) } } - return span + return s } // End ends the span. -func (s *Span) End() { +func (s *span) End() { if s == nil { return } @@ -292,7 +304,7 @@ func (s *Span) End() { // makeSpanData produces a SpanData representing the current state of the Span. // It requires that s.data is non-nil. -func (s *Span) makeSpanData() *SpanData { +func (s *span) makeSpanData() *SpanData { var sd SpanData s.mu.Lock() sd = *s.data @@ -317,7 +329,7 @@ func (s *Span) makeSpanData() *SpanData { } // SpanContext returns the SpanContext of the span. -func (s *Span) SpanContext() SpanContext { +func (s *span) SpanContext() SpanContext { if s == nil { return SpanContext{} } @@ -325,7 +337,7 @@ func (s *Span) SpanContext() SpanContext { } // SetName sets the name of the span, if it is recording events. -func (s *Span) SetName(name string) { +func (s *span) SetName(name string) { if !s.IsRecordingEvents() { return } @@ -335,7 +347,7 @@ func (s *Span) SetName(name string) { } // SetStatus sets the status of the span, if it is recording events. -func (s *Span) SetStatus(status Status) { +func (s *span) SetStatus(status Status) { if !s.IsRecordingEvents() { return } @@ -344,32 +356,32 @@ func (s *Span) SetStatus(status Status) { s.mu.Unlock() } -func (s *Span) interfaceArrayToLinksArray() []Link { - linksArr := make([]Link, 0) +func (s *span) interfaceArrayToLinksArray() []Link { + linksArr := make([]Link, 0, len(s.links.queue)) for _, value := range s.links.queue { linksArr = append(linksArr, value.(Link)) } return linksArr } -func (s *Span) interfaceArrayToMessageEventArray() []MessageEvent { - messageEventArr := make([]MessageEvent, 0) +func (s *span) interfaceArrayToMessageEventArray() []MessageEvent { + messageEventArr := make([]MessageEvent, 0, len(s.messageEvents.queue)) for _, value := range s.messageEvents.queue { messageEventArr = append(messageEventArr, value.(MessageEvent)) } return messageEventArr } -func (s *Span) interfaceArrayToAnnotationArray() []Annotation { - annotationArr := make([]Annotation, 0) +func (s *span) interfaceArrayToAnnotationArray() []Annotation { + annotationArr := make([]Annotation, 0, len(s.annotations.queue)) for _, value := range s.annotations.queue { annotationArr = append(annotationArr, value.(Annotation)) } return annotationArr } -func (s *Span) lruAttributesToAttributeMap() map[string]interface{} { - attributes := make(map[string]interface{}) +func (s *span) lruAttributesToAttributeMap() map[string]interface{} { + attributes := make(map[string]interface{}, s.lruAttributes.len()) for _, key := range s.lruAttributes.keys() { value, ok := s.lruAttributes.get(key) if ok { @@ -380,13 +392,13 @@ func (s *Span) lruAttributesToAttributeMap() map[string]interface{} { return attributes } -func (s *Span) copyToCappedAttributes(attributes []Attribute) { +func (s *span) copyToCappedAttributes(attributes []Attribute) { for _, a := range attributes { s.lruAttributes.add(a.key, a.value) } } -func (s *Span) addChild() { +func (s *span) addChild() { if !s.IsRecordingEvents() { return } @@ -398,7 +410,7 @@ func (s *Span) addChild() { // AddAttributes sets attributes in the span. // // Existing attributes whose keys appear in the attributes parameter are overwritten. -func (s *Span) AddAttributes(attributes ...Attribute) { +func (s *span) AddAttributes(attributes ...Attribute) { if !s.IsRecordingEvents() { return } @@ -407,49 +419,27 @@ func (s *Span) AddAttributes(attributes ...Attribute) { s.mu.Unlock() } -// copyAttributes copies a slice of Attributes into a map. -func copyAttributes(m map[string]interface{}, attributes []Attribute) { - for _, a := range attributes { - m[a.key] = a.value - } -} - -func (s *Span) lazyPrintfInternal(attributes []Attribute, format string, a ...interface{}) { +func (s *span) printStringInternal(attributes []Attribute, str string) { now := time.Now() - msg := fmt.Sprintf(format, a...) - var m map[string]interface{} - s.mu.Lock() + var am map[string]interface{} if len(attributes) != 0 { - m = make(map[string]interface{}) - copyAttributes(m, attributes) + am = make(map[string]interface{}, len(attributes)) + for _, attr := range attributes { + am[attr.key] = attr.value + } } - s.annotations.add(Annotation{ - Time: now, - Message: msg, - Attributes: m, - }) - s.mu.Unlock() -} - -func (s *Span) printStringInternal(attributes []Attribute, str string) { - now := time.Now() - var a map[string]interface{} s.mu.Lock() - if len(attributes) != 0 { - a = make(map[string]interface{}) - copyAttributes(a, attributes) - } s.annotations.add(Annotation{ Time: now, Message: str, - Attributes: a, + Attributes: am, }) s.mu.Unlock() } // Annotate adds an annotation with attributes. // Attributes can be nil. -func (s *Span) Annotate(attributes []Attribute, str string) { +func (s *span) Annotate(attributes []Attribute, str string) { if !s.IsRecordingEvents() { return } @@ -457,11 +447,11 @@ func (s *Span) Annotate(attributes []Attribute, str string) { } // Annotatef adds an annotation with attributes. -func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{}) { +func (s *span) Annotatef(attributes []Attribute, format string, a ...interface{}) { if !s.IsRecordingEvents() { return } - s.lazyPrintfInternal(attributes, format, a...) + s.printStringInternal(attributes, fmt.Sprintf(format, a...)) } // AddMessageSendEvent adds a message send event to the span. @@ -470,7 +460,7 @@ func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{} // unique in this span and the same between the send event and the receive // event (this allows to identify a message between the sender and receiver). // For example, this could be a sequence id. -func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { +func (s *span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { if !s.IsRecordingEvents() { return } @@ -492,7 +482,7 @@ func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedBy // unique in this span and the same between the send event and the receive // event (this allows to identify a message between the sender and receiver). // For example, this could be a sequence id. -func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { +func (s *span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { if !s.IsRecordingEvents() { return } @@ -509,7 +499,7 @@ func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compresse } // AddLink adds a link to the span. -func (s *Span) AddLink(l Link) { +func (s *span) AddLink(l Link) { if !s.IsRecordingEvents() { return } @@ -518,7 +508,7 @@ func (s *Span) AddLink(l Link) { s.mu.Unlock() } -func (s *Span) String() string { +func (s *span) String() string { if s == nil { return "" } @@ -534,20 +524,9 @@ func (s *Span) String() string { var config atomic.Value // access atomically func init() { - gen := &defaultIDGenerator{} - // initialize traceID and spanID generators. - var rngSeed int64 - for _, p := range []interface{}{ - &rngSeed, &gen.traceIDAdd, &gen.nextSpanID, &gen.spanIDInc, - } { - binary.Read(crand.Reader, binary.LittleEndian, p) - } - gen.traceIDRand = rand.New(rand.NewSource(rngSeed)) - gen.spanIDInc |= 1 - config.Store(&Config{ DefaultSampler: ProbabilitySampler(defaultSamplingProbability), - IDGenerator: gen, + IDGenerator: &defaultIDGenerator{}, MaxAttributesPerSpan: DefaultMaxAttributesPerSpan, MaxAnnotationEventsPerSpan: DefaultMaxAnnotationEventsPerSpan, MaxMessageEventsPerSpan: DefaultMaxMessageEventsPerSpan, @@ -571,6 +550,24 @@ type defaultIDGenerator struct { traceIDAdd [2]uint64 traceIDRand *rand.Rand + + initOnce sync.Once +} + +// init initializes the generator on the first call to avoid consuming entropy +// unnecessarily. +func (gen *defaultIDGenerator) init() { + gen.initOnce.Do(func() { + // initialize traceID and spanID generators. + var rngSeed int64 + for _, p := range []interface{}{ + &rngSeed, &gen.traceIDAdd, &gen.nextSpanID, &gen.spanIDInc, + } { + binary.Read(crand.Reader, binary.LittleEndian, p) + } + gen.traceIDRand = rand.New(rand.NewSource(rngSeed)) + gen.spanIDInc |= 1 + }) } // NewSpanID returns a non-zero span ID from a randomly-chosen sequence. diff --git a/vendor/go.opencensus.io/trace/trace_api.go b/vendor/go.opencensus.io/trace/trace_api.go new file mode 100644 index 000000000..9e2c3a999 --- /dev/null +++ b/vendor/go.opencensus.io/trace/trace_api.go @@ -0,0 +1,265 @@ +// Copyright 2020, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "context" +) + +// DefaultTracer is the tracer used when package-level exported functions are invoked. +var DefaultTracer Tracer = &tracer{} + +// Tracer can start spans and access context functions. +type Tracer interface { + + // StartSpan starts a new child span of the current span in the context. If + // there is no span in the context, creates a new trace and span. + // + // Returned context contains the newly created span. You can use it to + // propagate the returned span in process. + StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) + + // StartSpanWithRemoteParent starts a new child span of the span from the given parent. + // + // If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is + // preferred for cases where the parent is propagated via an incoming request. + // + // Returned context contains the newly created span. You can use it to + // propagate the returned span in process. + StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) + + // FromContext returns the Span stored in a context, or nil if there isn't one. + FromContext(ctx context.Context) *Span + + // NewContext returns a new context with the given Span attached. + NewContext(parent context.Context, s *Span) context.Context +} + +// StartSpan starts a new child span of the current span in the context. If +// there is no span in the context, creates a new trace and span. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. +func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { + return DefaultTracer.StartSpan(ctx, name, o...) +} + +// StartSpanWithRemoteParent starts a new child span of the span from the given parent. +// +// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is +// preferred for cases where the parent is propagated via an incoming request. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. +func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { + return DefaultTracer.StartSpanWithRemoteParent(ctx, name, parent, o...) +} + +// FromContext returns the Span stored in a context, or a Span that is not +// recording events if there isn't one. +func FromContext(ctx context.Context) *Span { + return DefaultTracer.FromContext(ctx) +} + +// NewContext returns a new context with the given Span attached. +func NewContext(parent context.Context, s *Span) context.Context { + return DefaultTracer.NewContext(parent, s) +} + +// SpanInterface represents a span of a trace. It has an associated SpanContext, and +// stores data accumulated while the span is active. +// +// Ideally users should interact with Spans by calling the functions in this +// package that take a Context parameter. +type SpanInterface interface { + + // IsRecordingEvents returns true if events are being recorded for this span. + // Use this check to avoid computing expensive annotations when they will never + // be used. + IsRecordingEvents() bool + + // End ends the span. + End() + + // SpanContext returns the SpanContext of the span. + SpanContext() SpanContext + + // SetName sets the name of the span, if it is recording events. + SetName(name string) + + // SetStatus sets the status of the span, if it is recording events. + SetStatus(status Status) + + // AddAttributes sets attributes in the span. + // + // Existing attributes whose keys appear in the attributes parameter are overwritten. + AddAttributes(attributes ...Attribute) + + // Annotate adds an annotation with attributes. + // Attributes can be nil. + Annotate(attributes []Attribute, str string) + + // Annotatef adds an annotation with attributes. + Annotatef(attributes []Attribute, format string, a ...interface{}) + + // AddMessageSendEvent adds a message send event to the span. + // + // messageID is an identifier for the message, which is recommended to be + // unique in this span and the same between the send event and the receive + // event (this allows to identify a message between the sender and receiver). + // For example, this could be a sequence id. + AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) + + // AddMessageReceiveEvent adds a message receive event to the span. + // + // messageID is an identifier for the message, which is recommended to be + // unique in this span and the same between the send event and the receive + // event (this allows to identify a message between the sender and receiver). + // For example, this could be a sequence id. + AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) + + // AddLink adds a link to the span. + AddLink(l Link) + + // String prints a string representation of a span. + String() string +} + +// NewSpan is a convenience function for creating a *Span out of a *span +func NewSpan(s SpanInterface) *Span { + return &Span{internal: s} +} + +// Span is a struct wrapper around the SpanInt interface, which allows correctly handling +// nil spans, while also allowing the SpanInterface implementation to be swapped out. +type Span struct { + internal SpanInterface +} + +// Internal returns the underlying implementation of the Span +func (s *Span) Internal() SpanInterface { + return s.internal +} + +// IsRecordingEvents returns true if events are being recorded for this span. +// Use this check to avoid computing expensive annotations when they will never +// be used. +func (s *Span) IsRecordingEvents() bool { + if s == nil { + return false + } + return s.internal.IsRecordingEvents() +} + +// End ends the span. +func (s *Span) End() { + if s == nil { + return + } + s.internal.End() +} + +// SpanContext returns the SpanContext of the span. +func (s *Span) SpanContext() SpanContext { + if s == nil { + return SpanContext{} + } + return s.internal.SpanContext() +} + +// SetName sets the name of the span, if it is recording events. +func (s *Span) SetName(name string) { + if !s.IsRecordingEvents() { + return + } + s.internal.SetName(name) +} + +// SetStatus sets the status of the span, if it is recording events. +func (s *Span) SetStatus(status Status) { + if !s.IsRecordingEvents() { + return + } + s.internal.SetStatus(status) +} + +// AddAttributes sets attributes in the span. +// +// Existing attributes whose keys appear in the attributes parameter are overwritten. +func (s *Span) AddAttributes(attributes ...Attribute) { + if !s.IsRecordingEvents() { + return + } + s.internal.AddAttributes(attributes...) +} + +// Annotate adds an annotation with attributes. +// Attributes can be nil. +func (s *Span) Annotate(attributes []Attribute, str string) { + if !s.IsRecordingEvents() { + return + } + s.internal.Annotate(attributes, str) +} + +// Annotatef adds an annotation with attributes. +func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{}) { + if !s.IsRecordingEvents() { + return + } + s.internal.Annotatef(attributes, format, a...) +} + +// AddMessageSendEvent adds a message send event to the span. +// +// messageID is an identifier for the message, which is recommended to be +// unique in this span and the same between the send event and the receive +// event (this allows to identify a message between the sender and receiver). +// For example, this could be a sequence id. +func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { + if !s.IsRecordingEvents() { + return + } + s.internal.AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize) +} + +// AddMessageReceiveEvent adds a message receive event to the span. +// +// messageID is an identifier for the message, which is recommended to be +// unique in this span and the same between the send event and the receive +// event (this allows to identify a message between the sender and receiver). +// For example, this could be a sequence id. +func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { + if !s.IsRecordingEvents() { + return + } + s.internal.AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize) +} + +// AddLink adds a link to the span. +func (s *Span) AddLink(l Link) { + if !s.IsRecordingEvents() { + return + } + s.internal.AddLink(l) +} + +// String prints a string representation of a span. +func (s *Span) String() string { + if s == nil { + return "" + } + return s.internal.String() +} diff --git a/vendor/go.opencensus.io/trace/trace_go11.go b/vendor/go.opencensus.io/trace/trace_go11.go index b7d8aaf28..b8fc1e495 100644 --- a/vendor/go.opencensus.io/trace/trace_go11.go +++ b/vendor/go.opencensus.io/trace/trace_go11.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.11 // +build go1.11 package trace diff --git a/vendor/go.opencensus.io/trace/trace_nongo11.go b/vendor/go.opencensus.io/trace/trace_nongo11.go index e25419859..da488fc87 100644 --- a/vendor/go.opencensus.io/trace/trace_nongo11.go +++ b/vendor/go.opencensus.io/trace/trace_nongo11.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.11 // +build !go1.11 package trace diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE similarity index 94% rename from vendor/github.com/Azure/go-autorest/autorest/azure/auth/LICENSE rename to vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE index b9d6a27ea..261eeb9e9 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/LICENSE +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -176,7 +175,18 @@ END OF TERMS AND CONDITIONS - Copyright 2015 Microsoft Corporation + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go new file mode 100644 index 000000000..18436eaed --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go @@ -0,0 +1,287 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + +import ( + "google.golang.org/grpc/stats" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/propagation" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + "go.opentelemetry.io/otel/trace" +) + +const ( + // ScopeName is the instrumentation scope name. + ScopeName = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + // GRPCStatusCodeKey is convention for numeric status code of a gRPC request. + GRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +// InterceptorFilter is a predicate used to determine whether a given request in +// interceptor info should be instrumented. A InterceptorFilter must return true if +// the request should be traced. +// +// Deprecated: Use stats handlers instead. +type InterceptorFilter func(*InterceptorInfo) bool + +// Filter is a predicate used to determine whether a given request in +// should be instrumented by the attached RPC tag info. +// A Filter must return true if the request should be instrumented. +type Filter func(*stats.RPCTagInfo) bool + +// config is a group of options for this instrumentation. +type config struct { + Filter Filter + InterceptorFilter InterceptorFilter + Propagators propagation.TextMapPropagator + TracerProvider trace.TracerProvider + MeterProvider metric.MeterProvider + SpanStartOptions []trace.SpanStartOption + SpanAttributes []attribute.KeyValue + MetricAttributes []attribute.KeyValue + + ReceivedEvent bool + SentEvent bool + + tracer trace.Tracer + meter metric.Meter + + rpcDuration metric.Float64Histogram + rpcRequestSize metric.Int64Histogram + rpcResponseSize metric.Int64Histogram + rpcRequestsPerRPC metric.Int64Histogram + rpcResponsesPerRPC metric.Int64Histogram +} + +// Option applies an option value for a config. +type Option interface { + apply(*config) +} + +// newConfig returns a config configured with all the passed Options. +func newConfig(opts []Option, role string) *config { + c := &config{ + Propagators: otel.GetTextMapPropagator(), + TracerProvider: otel.GetTracerProvider(), + MeterProvider: otel.GetMeterProvider(), + } + for _, o := range opts { + o.apply(c) + } + + c.tracer = c.TracerProvider.Tracer( + ScopeName, + trace.WithInstrumentationVersion(SemVersion()), + ) + + c.meter = c.MeterProvider.Meter( + ScopeName, + metric.WithInstrumentationVersion(Version()), + metric.WithSchemaURL(semconv.SchemaURL), + ) + + var err error + c.rpcDuration, err = c.meter.Float64Histogram("rpc."+role+".duration", + metric.WithDescription("Measures the duration of inbound RPC."), + metric.WithUnit("ms")) + if err != nil { + otel.Handle(err) + if c.rpcDuration == nil { + c.rpcDuration = noop.Float64Histogram{} + } + } + + c.rpcRequestSize, err = c.meter.Int64Histogram("rpc."+role+".request.size", + metric.WithDescription("Measures size of RPC request messages (uncompressed)."), + metric.WithUnit("By")) + if err != nil { + otel.Handle(err) + if c.rpcRequestSize == nil { + c.rpcRequestSize = noop.Int64Histogram{} + } + } + + c.rpcResponseSize, err = c.meter.Int64Histogram("rpc."+role+".response.size", + metric.WithDescription("Measures size of RPC response messages (uncompressed)."), + metric.WithUnit("By")) + if err != nil { + otel.Handle(err) + if c.rpcResponseSize == nil { + c.rpcResponseSize = noop.Int64Histogram{} + } + } + + c.rpcRequestsPerRPC, err = c.meter.Int64Histogram("rpc."+role+".requests_per_rpc", + metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."), + metric.WithUnit("{count}")) + if err != nil { + otel.Handle(err) + if c.rpcRequestsPerRPC == nil { + c.rpcRequestsPerRPC = noop.Int64Histogram{} + } + } + + c.rpcResponsesPerRPC, err = c.meter.Int64Histogram("rpc."+role+".responses_per_rpc", + metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."), + metric.WithUnit("{count}")) + if err != nil { + otel.Handle(err) + if c.rpcResponsesPerRPC == nil { + c.rpcResponsesPerRPC = noop.Int64Histogram{} + } + } + + return c +} + +type propagatorsOption struct{ p propagation.TextMapPropagator } + +func (o propagatorsOption) apply(c *config) { + if o.p != nil { + c.Propagators = o.p + } +} + +// WithPropagators returns an Option to use the Propagators when extracting +// and injecting trace context from requests. +func WithPropagators(p propagation.TextMapPropagator) Option { + return propagatorsOption{p: p} +} + +type tracerProviderOption struct{ tp trace.TracerProvider } + +func (o tracerProviderOption) apply(c *config) { + if o.tp != nil { + c.TracerProvider = o.tp + } +} + +// WithInterceptorFilter returns an Option to use the request filter. +// +// Deprecated: Use stats handlers instead. +func WithInterceptorFilter(f InterceptorFilter) Option { + return interceptorFilterOption{f: f} +} + +type interceptorFilterOption struct { + f InterceptorFilter +} + +func (o interceptorFilterOption) apply(c *config) { + if o.f != nil { + c.InterceptorFilter = o.f + } +} + +// WithFilter returns an Option to use the request filter. +func WithFilter(f Filter) Option { + return filterOption{f: f} +} + +type filterOption struct { + f Filter +} + +func (o filterOption) apply(c *config) { + if o.f != nil { + c.Filter = o.f + } +} + +// WithTracerProvider returns an Option to use the TracerProvider when +// creating a Tracer. +func WithTracerProvider(tp trace.TracerProvider) Option { + return tracerProviderOption{tp: tp} +} + +type meterProviderOption struct{ mp metric.MeterProvider } + +func (o meterProviderOption) apply(c *config) { + if o.mp != nil { + c.MeterProvider = o.mp + } +} + +// WithMeterProvider returns an Option to use the MeterProvider when +// creating a Meter. If this option is not provide the global MeterProvider will be used. +func WithMeterProvider(mp metric.MeterProvider) Option { + return meterProviderOption{mp: mp} +} + +// Event type that can be recorded, see WithMessageEvents. +type Event int + +// Different types of events that can be recorded, see WithMessageEvents. +const ( + ReceivedEvents Event = iota + SentEvents +) + +type messageEventsProviderOption struct { + events []Event +} + +func (m messageEventsProviderOption) apply(c *config) { + for _, e := range m.events { + switch e { + case ReceivedEvents: + c.ReceivedEvent = true + case SentEvents: + c.SentEvent = true + } + } +} + +// WithMessageEvents configures the Handler to record the specified events +// (span.AddEvent) on spans. By default only summary attributes are added at the +// end of the request. +// +// Valid events are: +// - ReceivedEvents: Record the number of bytes read after every gRPC read operation. +// - SentEvents: Record the number of bytes written after every gRPC write operation. +func WithMessageEvents(events ...Event) Option { + return messageEventsProviderOption{events: events} +} + +type spanStartOption struct{ opts []trace.SpanStartOption } + +func (o spanStartOption) apply(c *config) { + c.SpanStartOptions = append(c.SpanStartOptions, o.opts...) +} + +// WithSpanOptions configures an additional set of +// trace.SpanOptions, which are applied to each new span. +func WithSpanOptions(opts ...trace.SpanStartOption) Option { + return spanStartOption{opts} +} + +type spanAttributesOption struct{ a []attribute.KeyValue } + +func (o spanAttributesOption) apply(c *config) { + if o.a != nil { + c.SpanAttributes = o.a + } +} + +// WithSpanAttributes returns an Option to add custom attributes to the spans. +func WithSpanAttributes(a ...attribute.KeyValue) Option { + return spanAttributesOption{a: a} +} + +type metricAttributesOption struct{ a []attribute.KeyValue } + +func (o metricAttributesOption) apply(c *config) { + if o.a != nil { + c.MetricAttributes = o.a + } +} + +// WithMetricAttributes returns an Option to add custom attributes to the metrics. +func WithMetricAttributes(a ...attribute.KeyValue) Option { + return metricAttributesOption{a: a} +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go new file mode 100644 index 000000000..b8b836b00 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go @@ -0,0 +1,11 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package otelgrpc is the instrumentation library for [google.golang.org/grpc]. + +Use [NewClientHandler] with [grpc.WithStatsHandler] to instrument a gRPC client. + +Use [NewServerHandler] with [grpc.StatsHandler] to instrument a gRPC server. +*/ +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go new file mode 100644 index 000000000..7d5ed0580 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go @@ -0,0 +1,530 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + +// gRPC tracing middleware +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/rpc.md +import ( + "context" + "errors" + "io" + "net" + "strconv" + "time" + + "google.golang.org/grpc" + grpc_codes "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + "go.opentelemetry.io/otel/trace" +) + +type messageType attribute.KeyValue + +// Event adds an event of the messageType to the span associated with the +// passed context with a message id. +func (m messageType) Event(ctx context.Context, id int, _ interface{}) { + span := trace.SpanFromContext(ctx) + if !span.IsRecording() { + return + } + span.AddEvent("message", trace.WithAttributes( + attribute.KeyValue(m), + RPCMessageIDKey.Int(id), + )) +} + +var ( + messageSent = messageType(RPCMessageTypeSent) + messageReceived = messageType(RPCMessageTypeReceived) +) + +// UnaryClientInterceptor returns a grpc.UnaryClientInterceptor suitable +// for use in a grpc.NewClient call. +// +// Deprecated: Use [NewClientHandler] instead. +func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor { + cfg := newConfig(opts, "client") + tracer := cfg.TracerProvider.Tracer( + ScopeName, + trace.WithInstrumentationVersion(Version()), + ) + + return func( + ctx context.Context, + method string, + req, reply interface{}, + cc *grpc.ClientConn, + invoker grpc.UnaryInvoker, + callOpts ...grpc.CallOption, + ) error { + i := &InterceptorInfo{ + Method: method, + Type: UnaryClient, + } + if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) { + return invoker(ctx, method, req, reply, cc, callOpts...) + } + + name, attr, _ := telemetryAttributes(method, cc.Target()) + + startOpts := append([]trace.SpanStartOption{ + trace.WithSpanKind(trace.SpanKindClient), + trace.WithAttributes(attr...), + }, + cfg.SpanStartOptions..., + ) + + ctx, span := tracer.Start( + ctx, + name, + startOpts..., + ) + defer span.End() + + ctx = inject(ctx, cfg.Propagators) + + if cfg.SentEvent { + messageSent.Event(ctx, 1, req) + } + + err := invoker(ctx, method, req, reply, cc, callOpts...) + + if cfg.ReceivedEvent { + messageReceived.Event(ctx, 1, reply) + } + + if err != nil { + s, _ := status.FromError(err) + span.SetStatus(codes.Error, s.Message()) + span.SetAttributes(statusCodeAttr(s.Code())) + } else { + span.SetAttributes(statusCodeAttr(grpc_codes.OK)) + } + + return err + } +} + +// clientStream wraps around the embedded grpc.ClientStream, and intercepts the RecvMsg and +// SendMsg method call. +type clientStream struct { + grpc.ClientStream + desc *grpc.StreamDesc + + span trace.Span + + receivedEvent bool + sentEvent bool + + receivedMessageID int + sentMessageID int +} + +var _ = proto.Marshal + +func (w *clientStream) RecvMsg(m interface{}) error { + err := w.ClientStream.RecvMsg(m) + + if err == nil && !w.desc.ServerStreams { + w.endSpan(nil) + } else if errors.Is(err, io.EOF) { + w.endSpan(nil) + } else if err != nil { + w.endSpan(err) + } else { + w.receivedMessageID++ + + if w.receivedEvent { + messageReceived.Event(w.Context(), w.receivedMessageID, m) + } + } + + return err +} + +func (w *clientStream) SendMsg(m interface{}) error { + err := w.ClientStream.SendMsg(m) + + w.sentMessageID++ + + if w.sentEvent { + messageSent.Event(w.Context(), w.sentMessageID, m) + } + + if err != nil { + w.endSpan(err) + } + + return err +} + +func (w *clientStream) Header() (metadata.MD, error) { + md, err := w.ClientStream.Header() + if err != nil { + w.endSpan(err) + } + + return md, err +} + +func (w *clientStream) CloseSend() error { + err := w.ClientStream.CloseSend() + if err != nil { + w.endSpan(err) + } + + return err +} + +func wrapClientStream(s grpc.ClientStream, desc *grpc.StreamDesc, span trace.Span, cfg *config) *clientStream { + return &clientStream{ + ClientStream: s, + span: span, + desc: desc, + receivedEvent: cfg.ReceivedEvent, + sentEvent: cfg.SentEvent, + } +} + +func (w *clientStream) endSpan(err error) { + if err != nil { + s, _ := status.FromError(err) + w.span.SetStatus(codes.Error, s.Message()) + w.span.SetAttributes(statusCodeAttr(s.Code())) + } else { + w.span.SetAttributes(statusCodeAttr(grpc_codes.OK)) + } + + w.span.End() +} + +// StreamClientInterceptor returns a grpc.StreamClientInterceptor suitable +// for use in a grpc.NewClient call. +// +// Deprecated: Use [NewClientHandler] instead. +func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { + cfg := newConfig(opts, "client") + tracer := cfg.TracerProvider.Tracer( + ScopeName, + trace.WithInstrumentationVersion(Version()), + ) + + return func( + ctx context.Context, + desc *grpc.StreamDesc, + cc *grpc.ClientConn, + method string, + streamer grpc.Streamer, + callOpts ...grpc.CallOption, + ) (grpc.ClientStream, error) { + i := &InterceptorInfo{ + Method: method, + Type: StreamClient, + } + if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) { + return streamer(ctx, desc, cc, method, callOpts...) + } + + name, attr, _ := telemetryAttributes(method, cc.Target()) + + startOpts := append([]trace.SpanStartOption{ + trace.WithSpanKind(trace.SpanKindClient), + trace.WithAttributes(attr...), + }, + cfg.SpanStartOptions..., + ) + + ctx, span := tracer.Start( + ctx, + name, + startOpts..., + ) + + ctx = inject(ctx, cfg.Propagators) + + s, err := streamer(ctx, desc, cc, method, callOpts...) + if err != nil { + grpcStatus, _ := status.FromError(err) + span.SetStatus(codes.Error, grpcStatus.Message()) + span.SetAttributes(statusCodeAttr(grpcStatus.Code())) + span.End() + return s, err + } + stream := wrapClientStream(s, desc, span, cfg) + return stream, nil + } +} + +// UnaryServerInterceptor returns a grpc.UnaryServerInterceptor suitable +// for use in a grpc.NewServer call. +// +// Deprecated: Use [NewServerHandler] instead. +func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { + cfg := newConfig(opts, "server") + tracer := cfg.TracerProvider.Tracer( + ScopeName, + trace.WithInstrumentationVersion(Version()), + ) + + return func( + ctx context.Context, + req interface{}, + info *grpc.UnaryServerInfo, + handler grpc.UnaryHandler, + ) (interface{}, error) { + i := &InterceptorInfo{ + UnaryServerInfo: info, + Type: UnaryServer, + } + if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) { + return handler(ctx, req) + } + + ctx = extract(ctx, cfg.Propagators) + name, attr, metricAttrs := telemetryAttributes(info.FullMethod, peerFromCtx(ctx)) + + startOpts := append([]trace.SpanStartOption{ + trace.WithSpanKind(trace.SpanKindServer), + trace.WithAttributes(attr...), + }, + cfg.SpanStartOptions..., + ) + + ctx, span := tracer.Start( + trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), + name, + startOpts..., + ) + defer span.End() + + if cfg.ReceivedEvent { + messageReceived.Event(ctx, 1, req) + } + + before := time.Now() + + resp, err := handler(ctx, req) + + s, _ := status.FromError(err) + if err != nil { + statusCode, msg := serverStatus(s) + span.SetStatus(statusCode, msg) + if cfg.SentEvent { + messageSent.Event(ctx, 1, s.Proto()) + } + } else { + if cfg.SentEvent { + messageSent.Event(ctx, 1, resp) + } + } + grpcStatusCodeAttr := statusCodeAttr(s.Code()) + span.SetAttributes(grpcStatusCodeAttr) + + // Use floating point division here for higher precision (instead of Millisecond method). + elapsedTime := float64(time.Since(before)) / float64(time.Millisecond) + + metricAttrs = append(metricAttrs, grpcStatusCodeAttr) + cfg.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) + + return resp, err + } +} + +// serverStream wraps around the embedded grpc.ServerStream, and intercepts the RecvMsg and +// SendMsg method call. +type serverStream struct { + grpc.ServerStream + ctx context.Context + + receivedMessageID int + sentMessageID int + + receivedEvent bool + sentEvent bool +} + +func (w *serverStream) Context() context.Context { + return w.ctx +} + +func (w *serverStream) RecvMsg(m interface{}) error { + err := w.ServerStream.RecvMsg(m) + + if err == nil { + w.receivedMessageID++ + if w.receivedEvent { + messageReceived.Event(w.Context(), w.receivedMessageID, m) + } + } + + return err +} + +func (w *serverStream) SendMsg(m interface{}) error { + err := w.ServerStream.SendMsg(m) + + w.sentMessageID++ + if w.sentEvent { + messageSent.Event(w.Context(), w.sentMessageID, m) + } + + return err +} + +func wrapServerStream(ctx context.Context, ss grpc.ServerStream, cfg *config) *serverStream { + return &serverStream{ + ServerStream: ss, + ctx: ctx, + receivedEvent: cfg.ReceivedEvent, + sentEvent: cfg.SentEvent, + } +} + +// StreamServerInterceptor returns a grpc.StreamServerInterceptor suitable +// for use in a grpc.NewServer call. +// +// Deprecated: Use [NewServerHandler] instead. +func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { + cfg := newConfig(opts, "server") + tracer := cfg.TracerProvider.Tracer( + ScopeName, + trace.WithInstrumentationVersion(Version()), + ) + + return func( + srv interface{}, + ss grpc.ServerStream, + info *grpc.StreamServerInfo, + handler grpc.StreamHandler, + ) error { + ctx := ss.Context() + i := &InterceptorInfo{ + StreamServerInfo: info, + Type: StreamServer, + } + if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) { + return handler(srv, wrapServerStream(ctx, ss, cfg)) + } + + ctx = extract(ctx, cfg.Propagators) + name, attr, _ := telemetryAttributes(info.FullMethod, peerFromCtx(ctx)) + + startOpts := append([]trace.SpanStartOption{ + trace.WithSpanKind(trace.SpanKindServer), + trace.WithAttributes(attr...), + }, + cfg.SpanStartOptions..., + ) + + ctx, span := tracer.Start( + trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), + name, + startOpts..., + ) + defer span.End() + + err := handler(srv, wrapServerStream(ctx, ss, cfg)) + if err != nil { + s, _ := status.FromError(err) + statusCode, msg := serverStatus(s) + span.SetStatus(statusCode, msg) + span.SetAttributes(statusCodeAttr(s.Code())) + } else { + span.SetAttributes(statusCodeAttr(grpc_codes.OK)) + } + + return err + } +} + +// telemetryAttributes returns a span name and span and metric attributes from +// the gRPC method and peer address. +func telemetryAttributes(fullMethod, peerAddress string) (string, []attribute.KeyValue, []attribute.KeyValue) { + name, methodAttrs := internal.ParseFullMethod(fullMethod) + peerAttrs := peerAttr(peerAddress) + + attrs := make([]attribute.KeyValue, 0, 1+len(methodAttrs)+len(peerAttrs)) + attrs = append(attrs, RPCSystemGRPC) + attrs = append(attrs, methodAttrs...) + metricAttrs := attrs[:1+len(methodAttrs)] + attrs = append(attrs, peerAttrs...) + return name, attrs, metricAttrs +} + +// peerAttr returns attributes about the peer address. +func peerAttr(addr string) []attribute.KeyValue { + host, p, err := net.SplitHostPort(addr) + if err != nil { + return nil + } + + if host == "" { + host = "127.0.0.1" + } + port, err := strconv.Atoi(p) + if err != nil { + return nil + } + + var attr []attribute.KeyValue + if ip := net.ParseIP(host); ip != nil { + attr = []attribute.KeyValue{ + semconv.NetSockPeerAddr(host), + semconv.NetSockPeerPort(port), + } + } else { + attr = []attribute.KeyValue{ + semconv.NetPeerName(host), + semconv.NetPeerPort(port), + } + } + + return attr +} + +// peerFromCtx returns a peer address from a context, if one exists. +func peerFromCtx(ctx context.Context) string { + p, ok := peer.FromContext(ctx) + if !ok { + return "" + } + return p.Addr.String() +} + +// statusCodeAttr returns status code attribute based on given gRPC code. +func statusCodeAttr(c grpc_codes.Code) attribute.KeyValue { + return GRPCStatusCodeKey.Int64(int64(c)) +} + +// serverStatus returns a span status code and message for a given gRPC +// status code. It maps specific gRPC status codes to a corresponding span +// status code and message. This function is intended for use on the server +// side of a gRPC connection. +// +// If the gRPC status code is Unknown, DeadlineExceeded, Unimplemented, +// Internal, Unavailable, or DataLoss, it returns a span status code of Error +// and the message from the gRPC status. Otherwise, it returns a span status +// code of Unset and an empty message. +func serverStatus(grpcStatus *status.Status) (codes.Code, string) { + switch grpcStatus.Code() { + case grpc_codes.Unknown, + grpc_codes.DeadlineExceeded, + grpc_codes.Unimplemented, + grpc_codes.Internal, + grpc_codes.Unavailable, + grpc_codes.DataLoss: + return codes.Error, grpcStatus.Message() + default: + return codes.Unset, "" + } +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go new file mode 100644 index 000000000..b62f7cd7c --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + +import ( + "google.golang.org/grpc" +) + +// InterceptorType is the flag to define which gRPC interceptor +// the InterceptorInfo object is. +type InterceptorType uint8 + +const ( + // UndefinedInterceptor is the type for the interceptor information that is not + // well initialized or categorized to other types. + UndefinedInterceptor InterceptorType = iota + // UnaryClient is the type for grpc.UnaryClient interceptor. + UnaryClient + // StreamClient is the type for grpc.StreamClient interceptor. + StreamClient + // UnaryServer is the type for grpc.UnaryServer interceptor. + UnaryServer + // StreamServer is the type for grpc.StreamServer interceptor. + StreamServer +) + +// InterceptorInfo is the union of some arguments to four types of +// gRPC interceptors. +type InterceptorInfo struct { + // Method is method name registered to UnaryClient and StreamClient + Method string + // UnaryServerInfo is the metadata for UnaryServer + UnaryServerInfo *grpc.UnaryServerInfo + // StreamServerInfo if the metadata for StreamServer + StreamServerInfo *grpc.StreamServerInfo + // Type is the type for interceptor + Type InterceptorType +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go new file mode 100644 index 000000000..bef07b7a3 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" + +import ( + "strings" + + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +// ParseFullMethod returns a span name following the OpenTelemetry semantic +// conventions as well as all applicable span attribute.KeyValue attributes based +// on a gRPC's FullMethod. +// +// Parsing is consistent with grpc-go implementation: +// https://github.com/grpc/grpc-go/blob/v1.57.0/internal/grpcutil/method.go#L26-L39 +func ParseFullMethod(fullMethod string) (string, []attribute.KeyValue) { + if !strings.HasPrefix(fullMethod, "/") { + // Invalid format, does not follow `/package.service/method`. + return fullMethod, nil + } + name := fullMethod[1:] + pos := strings.LastIndex(name, "/") + if pos < 0 { + // Invalid format, does not follow `/package.service/method`. + return name, nil + } + service, method := name[:pos], name[pos+1:] + + var attrs []attribute.KeyValue + if service != "" { + attrs = append(attrs, semconv.RPCService(service)) + } + if method != "" { + attrs = append(attrs, semconv.RPCMethod(method)) + } + return name, attrs +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go new file mode 100644 index 000000000..3aa37915d --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go @@ -0,0 +1,87 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + +import ( + "context" + + "google.golang.org/grpc/metadata" + + "go.opentelemetry.io/otel/baggage" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +type metadataSupplier struct { + metadata *metadata.MD +} + +// assert that metadataSupplier implements the TextMapCarrier interface. +var _ propagation.TextMapCarrier = &metadataSupplier{} + +func (s *metadataSupplier) Get(key string) string { + values := s.metadata.Get(key) + if len(values) == 0 { + return "" + } + return values[0] +} + +func (s *metadataSupplier) Set(key string, value string) { + s.metadata.Set(key, value) +} + +func (s *metadataSupplier) Keys() []string { + out := make([]string, 0, len(*s.metadata)) + for key := range *s.metadata { + out = append(out, key) + } + return out +} + +// Inject injects correlation context and span context into the gRPC +// metadata object. This function is meant to be used on outgoing +// requests. +// Deprecated: Unnecessary public func. +func Inject(ctx context.Context, md *metadata.MD, opts ...Option) { + c := newConfig(opts, "") + c.Propagators.Inject(ctx, &metadataSupplier{ + metadata: md, + }) +} + +func inject(ctx context.Context, propagators propagation.TextMapPropagator) context.Context { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + md = metadata.MD{} + } + propagators.Inject(ctx, &metadataSupplier{ + metadata: &md, + }) + return metadata.NewOutgoingContext(ctx, md) +} + +// Extract returns the correlation context and span context that +// another service encoded in the gRPC metadata object with Inject. +// This function is meant to be used on incoming requests. +// Deprecated: Unnecessary public func. +func Extract(ctx context.Context, md *metadata.MD, opts ...Option) (baggage.Baggage, trace.SpanContext) { + c := newConfig(opts, "") + ctx = c.Propagators.Extract(ctx, &metadataSupplier{ + metadata: md, + }) + + return baggage.FromContext(ctx), trace.SpanContextFromContext(ctx) +} + +func extract(ctx context.Context, propagators propagation.TextMapPropagator) context.Context { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + md = metadata.MD{} + } + + return propagators.Extract(ctx, &metadataSupplier{ + metadata: &md, + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go new file mode 100644 index 000000000..409c621b7 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go @@ -0,0 +1,41 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + +import ( + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +// Semantic conventions for attribute keys for gRPC. +const ( + // Name of message transmitted or received. + RPCNameKey = attribute.Key("name") + + // Type of message transmitted or received. + RPCMessageTypeKey = attribute.Key("message.type") + + // Identifier of message transmitted or received. + RPCMessageIDKey = attribute.Key("message.id") + + // The compressed size of the message transmitted or received in bytes. + RPCMessageCompressedSizeKey = attribute.Key("message.compressed_size") + + // The uncompressed size of the message transmitted or received in + // bytes. + RPCMessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +// Semantic conventions for common RPC attributes. +var ( + // Semantic convention for gRPC as the remoting system. + RPCSystemGRPC = semconv.RPCSystemGRPC + + // Semantic convention for a message named message. + RPCNameMessage = RPCNameKey.String("message") + + // Semantic conventions for RPC message types. + RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") + RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") +) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go new file mode 100644 index 000000000..fbcbfb84e --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go @@ -0,0 +1,222 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + +import ( + "context" + "sync/atomic" + "time" + + grpc_codes "google.golang.org/grpc/codes" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + "go.opentelemetry.io/otel/trace" +) + +type gRPCContextKey struct{} + +type gRPCContext struct { + messagesReceived int64 + messagesSent int64 + metricAttrs []attribute.KeyValue + record bool +} + +type serverHandler struct { + *config +} + +// NewServerHandler creates a stats.Handler for a gRPC server. +func NewServerHandler(opts ...Option) stats.Handler { + h := &serverHandler{ + config: newConfig(opts, "server"), + } + + return h +} + +// TagConn can attach some information to the given context. +func (h *serverHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context { + return ctx +} + +// HandleConn processes the Conn stats. +func (h *serverHandler) HandleConn(ctx context.Context, info stats.ConnStats) { +} + +// TagRPC can attach some information to the given context. +func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { + ctx = extract(ctx, h.config.Propagators) + + name, attrs := internal.ParseFullMethod(info.FullMethodName) + attrs = append(attrs, RPCSystemGRPC) + ctx, _ = h.tracer.Start( + trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), + name, + trace.WithSpanKind(trace.SpanKindServer), + trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...), + ) + + gctx := gRPCContext{ + metricAttrs: append(attrs, h.config.MetricAttributes...), + record: true, + } + if h.config.Filter != nil { + gctx.record = h.config.Filter(info) + } + return context.WithValue(ctx, gRPCContextKey{}, &gctx) +} + +// HandleRPC processes the RPC stats. +func (h *serverHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { + isServer := true + h.handleRPC(ctx, rs, isServer) +} + +type clientHandler struct { + *config +} + +// NewClientHandler creates a stats.Handler for a gRPC client. +func NewClientHandler(opts ...Option) stats.Handler { + h := &clientHandler{ + config: newConfig(opts, "client"), + } + + return h +} + +// TagRPC can attach some information to the given context. +func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { + name, attrs := internal.ParseFullMethod(info.FullMethodName) + attrs = append(attrs, RPCSystemGRPC) + ctx, _ = h.tracer.Start( + ctx, + name, + trace.WithSpanKind(trace.SpanKindClient), + trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...), + ) + + gctx := gRPCContext{ + metricAttrs: append(attrs, h.config.MetricAttributes...), + record: true, + } + if h.config.Filter != nil { + gctx.record = h.config.Filter(info) + } + + return inject(context.WithValue(ctx, gRPCContextKey{}, &gctx), h.config.Propagators) +} + +// HandleRPC processes the RPC stats. +func (h *clientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { + isServer := false + h.handleRPC(ctx, rs, isServer) +} + +// TagConn can attach some information to the given context. +func (h *clientHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context { + return ctx +} + +// HandleConn processes the Conn stats. +func (h *clientHandler) HandleConn(context.Context, stats.ConnStats) { + // no-op +} + +func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool) { // nolint: revive // isServer is not a control flag. + span := trace.SpanFromContext(ctx) + var metricAttrs []attribute.KeyValue + var messageId int64 + + gctx, _ := ctx.Value(gRPCContextKey{}).(*gRPCContext) + if gctx != nil { + if !gctx.record { + return + } + metricAttrs = make([]attribute.KeyValue, 0, len(gctx.metricAttrs)+1) + metricAttrs = append(metricAttrs, gctx.metricAttrs...) + } + + switch rs := rs.(type) { + case *stats.Begin: + case *stats.InPayload: + if gctx != nil { + messageId = atomic.AddInt64(&gctx.messagesReceived, 1) + c.rpcRequestSize.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) + } + + if c.ReceivedEvent { + span.AddEvent("message", + trace.WithAttributes( + semconv.MessageTypeReceived, + semconv.MessageIDKey.Int64(messageId), + semconv.MessageCompressedSizeKey.Int(rs.CompressedLength), + semconv.MessageUncompressedSizeKey.Int(rs.Length), + ), + ) + } + case *stats.OutPayload: + if gctx != nil { + messageId = atomic.AddInt64(&gctx.messagesSent, 1) + c.rpcResponseSize.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) + } + + if c.SentEvent { + span.AddEvent("message", + trace.WithAttributes( + semconv.MessageTypeSent, + semconv.MessageIDKey.Int64(messageId), + semconv.MessageCompressedSizeKey.Int(rs.CompressedLength), + semconv.MessageUncompressedSizeKey.Int(rs.Length), + ), + ) + } + case *stats.OutTrailer: + case *stats.OutHeader: + if p, ok := peer.FromContext(ctx); ok { + span.SetAttributes(peerAttr(p.Addr.String())...) + } + case *stats.End: + var rpcStatusAttr attribute.KeyValue + + if rs.Error != nil { + s, _ := status.FromError(rs.Error) + if isServer { + statusCode, msg := serverStatus(s) + span.SetStatus(statusCode, msg) + } else { + span.SetStatus(codes.Error, s.Message()) + } + rpcStatusAttr = semconv.RPCGRPCStatusCodeKey.Int(int(s.Code())) + } else { + rpcStatusAttr = semconv.RPCGRPCStatusCodeKey.Int(int(grpc_codes.OK)) + } + span.SetAttributes(rpcStatusAttr) + span.End() + + metricAttrs = append(metricAttrs, rpcStatusAttr) + // Allocate vararg slice once. + recordOpts := []metric.RecordOption{metric.WithAttributeSet(attribute.NewSet(metricAttrs...))} + + // Use floating point division here for higher precision (instead of Millisecond method). + // Measure right before calling Record() to capture as much elapsed time as possible. + elapsedTime := float64(rs.EndTime.Sub(rs.BeginTime)) / float64(time.Millisecond) + + c.rpcDuration.Record(ctx, elapsedTime, recordOpts...) + if gctx != nil { + c.rpcRequestsPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesReceived), recordOpts...) + c.rpcResponsesPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesSent), recordOpts...) + } + default: + return + } +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go new file mode 100644 index 000000000..51d28156b --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + +// Version is the current release version of the gRPC instrumentation. +func Version() string { + return "0.55.0" + // This string is updated by the pre_release.sh script during release +} + +// SemVersion is the semantic version to be supplied to tracer/meter creation. +// +// Deprecated: Use [Version] instead. +func SemVersion() string { + return Version() +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go new file mode 100644 index 000000000..6aae83bfd --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -0,0 +1,50 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "io" + "net/http" + "net/url" + "strings" +) + +// DefaultClient is the default Client and is used by Get, Head, Post and PostForm. +// Please be careful of initialization order - for example, if you change +// the global propagator, the DefaultClient might still be using the old one. +var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} + +// Get is a convenient replacement for http.Get that adds a span around the request. +func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "GET", targetURL, nil) + if err != nil { + return nil, err + } + return DefaultClient.Do(req) +} + +// Head is a convenient replacement for http.Head that adds a span around the request. +func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "HEAD", targetURL, nil) + if err != nil { + return nil, err + } + return DefaultClient.Do(req) +} + +// Post is a convenient replacement for http.Post that adds a span around the request. +func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "POST", targetURL, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", contentType) + return DefaultClient.Do(req) +} + +// PostForm is a convenient replacement for http.PostForm that adds a span around the request. +func PostForm(ctx context.Context, targetURL string, data url.Values) (resp *http.Response, err error) { + return Post(ctx, targetURL, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go new file mode 100644 index 000000000..5d6e6156b --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "net/http" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// Attribute keys that can be added to a span. +const ( + ReadBytesKey = attribute.Key("http.read_bytes") // if anything was read from the request body, the total number of bytes read + ReadErrorKey = attribute.Key("http.read_error") // If an error occurred while reading a request, the string of the error (io.EOF is not recorded) + WroteBytesKey = attribute.Key("http.wrote_bytes") // if anything was written to the response writer, the total number of bytes written + WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) +) + +// Client HTTP metrics. +const ( + clientRequestSize = "http.client.request.size" // Outgoing request bytes total + clientResponseSize = "http.client.response.size" // Outgoing response bytes total + clientDuration = "http.client.duration" // Outgoing end to end duration, milliseconds +) + +// Filter is a predicate used to determine whether a given http.request should +// be traced. A Filter must return true if the request should be traced. +type Filter func(*http.Request) bool + +func newTracer(tp trace.TracerProvider) trace.Tracer { + return tp.Tracer(ScopeName, trace.WithInstrumentationVersion(Version())) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go new file mode 100644 index 000000000..a01bfafbe --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -0,0 +1,207 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "net/http" + "net/http/httptrace" + + "go.opentelemetry.io/otel/attribute" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +// ScopeName is the instrumentation scope name. +const ScopeName = "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +// config represents the configuration options available for the http.Handler +// and http.Transport types. +type config struct { + ServerName string + Tracer trace.Tracer + Meter metric.Meter + Propagators propagation.TextMapPropagator + SpanStartOptions []trace.SpanStartOption + PublicEndpoint bool + PublicEndpointFn func(*http.Request) bool + ReadEvent bool + WriteEvent bool + Filters []Filter + SpanNameFormatter func(string, *http.Request) string + ClientTrace func(context.Context) *httptrace.ClientTrace + + TracerProvider trace.TracerProvider + MeterProvider metric.MeterProvider + MetricAttributesFn func(*http.Request) []attribute.KeyValue +} + +// Option interface used for setting optional config properties. +type Option interface { + apply(*config) +} + +type optionFunc func(*config) + +func (o optionFunc) apply(c *config) { + o(c) +} + +// newConfig creates a new config struct and applies opts to it. +func newConfig(opts ...Option) *config { + c := &config{ + Propagators: otel.GetTextMapPropagator(), + MeterProvider: otel.GetMeterProvider(), + } + for _, opt := range opts { + opt.apply(c) + } + + // Tracer is only initialized if manually specified. Otherwise, can be passed with the tracing context. + if c.TracerProvider != nil { + c.Tracer = newTracer(c.TracerProvider) + } + + c.Meter = c.MeterProvider.Meter( + ScopeName, + metric.WithInstrumentationVersion(Version()), + ) + + return c +} + +// WithTracerProvider specifies a tracer provider to use for creating a tracer. +// If none is specified, the global provider is used. +func WithTracerProvider(provider trace.TracerProvider) Option { + return optionFunc(func(cfg *config) { + if provider != nil { + cfg.TracerProvider = provider + } + }) +} + +// WithMeterProvider specifies a meter provider to use for creating a meter. +// If none is specified, the global provider is used. +func WithMeterProvider(provider metric.MeterProvider) Option { + return optionFunc(func(cfg *config) { + if provider != nil { + cfg.MeterProvider = provider + } + }) +} + +// WithPublicEndpoint configures the Handler to link the span with an incoming +// span context. If this option is not provided, then the association is a child +// association instead of a link. +func WithPublicEndpoint() Option { + return optionFunc(func(c *config) { + c.PublicEndpoint = true + }) +} + +// WithPublicEndpointFn runs with every request, and allows conditionally +// configuring the Handler to link the span with an incoming span context. If +// this option is not provided or returns false, then the association is a +// child association instead of a link. +// Note: WithPublicEndpoint takes precedence over WithPublicEndpointFn. +func WithPublicEndpointFn(fn func(*http.Request) bool) Option { + return optionFunc(func(c *config) { + c.PublicEndpointFn = fn + }) +} + +// WithPropagators configures specific propagators. If this +// option isn't specified, then the global TextMapPropagator is used. +func WithPropagators(ps propagation.TextMapPropagator) Option { + return optionFunc(func(c *config) { + if ps != nil { + c.Propagators = ps + } + }) +} + +// WithSpanOptions configures an additional set of +// trace.SpanOptions, which are applied to each new span. +func WithSpanOptions(opts ...trace.SpanStartOption) Option { + return optionFunc(func(c *config) { + c.SpanStartOptions = append(c.SpanStartOptions, opts...) + }) +} + +// WithFilter adds a filter to the list of filters used by the handler. +// If any filter indicates to exclude a request then the request will not be +// traced. All filters must allow a request to be traced for a Span to be created. +// If no filters are provided then all requests are traced. +// Filters will be invoked for each processed request, it is advised to make them +// simple and fast. +func WithFilter(f Filter) Option { + return optionFunc(func(c *config) { + c.Filters = append(c.Filters, f) + }) +} + +type event int + +// Different types of events that can be recorded, see WithMessageEvents. +const ( + ReadEvents event = iota + WriteEvents +) + +// WithMessageEvents configures the Handler to record the specified events +// (span.AddEvent) on spans. By default only summary attributes are added at the +// end of the request. +// +// Valid events are: +// - ReadEvents: Record the number of bytes read after every http.Request.Body.Read +// using the ReadBytesKey +// - WriteEvents: Record the number of bytes written after every http.ResponeWriter.Write +// using the WriteBytesKey +func WithMessageEvents(events ...event) Option { + return optionFunc(func(c *config) { + for _, e := range events { + switch e { + case ReadEvents: + c.ReadEvent = true + case WriteEvents: + c.WriteEvent = true + } + } + }) +} + +// WithSpanNameFormatter takes a function that will be called on every +// request and the returned string will become the Span Name. +func WithSpanNameFormatter(f func(operation string, r *http.Request) string) Option { + return optionFunc(func(c *config) { + c.SpanNameFormatter = f + }) +} + +// WithClientTrace takes a function that returns client trace instance that will be +// applied to the requests sent through the otelhttp Transport. +func WithClientTrace(f func(context.Context) *httptrace.ClientTrace) Option { + return optionFunc(func(c *config) { + c.ClientTrace = f + }) +} + +// WithServerName returns an Option that sets the name of the (virtual) server +// handling requests. +func WithServerName(server string) Option { + return optionFunc(func(c *config) { + c.ServerName = server + }) +} + +// WithMetricAttributesFn returns an Option to set a function that maps an HTTP request to a slice of attribute.KeyValue. +// These attributes will be included in metrics for every request. +func WithMetricAttributesFn(metricAttributesFn func(r *http.Request) []attribute.KeyValue) Option { + return optionFunc(func(c *config) { + c.MetricAttributesFn = metricAttributesFn + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go new file mode 100644 index 000000000..56b24b982 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go @@ -0,0 +1,7 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package otelhttp provides an http.Handler and functions that are intended +// to be used to add tracing by wrapping existing handlers (with Handler) and +// routes WithRouteTag. +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go new file mode 100644 index 000000000..33580a35b --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -0,0 +1,217 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "net/http" + "time" + + "github.com/felixge/httpsnoop" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +// middleware is an http middleware which wraps the next handler in a span. +type middleware struct { + operation string + server string + + tracer trace.Tracer + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + readEvent bool + writeEvent bool + filters []Filter + spanNameFormatter func(string, *http.Request) string + publicEndpoint bool + publicEndpointFn func(*http.Request) bool + + semconv semconv.HTTPServer +} + +func defaultHandlerFormatter(operation string, _ *http.Request) string { + return operation +} + +// NewHandler wraps the passed handler in a span named after the operation and +// enriches it with metrics. +func NewHandler(handler http.Handler, operation string, opts ...Option) http.Handler { + return NewMiddleware(operation, opts...)(handler) +} + +// NewMiddleware returns a tracing and metrics instrumentation middleware. +// The handler returned by the middleware wraps a handler +// in a span named after the operation and enriches it with metrics. +func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler { + h := middleware{ + operation: operation, + } + + defaultOpts := []Option{ + WithSpanOptions(trace.WithSpanKind(trace.SpanKindServer)), + WithSpanNameFormatter(defaultHandlerFormatter), + } + + c := newConfig(append(defaultOpts, opts...)...) + h.configure(c) + + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + h.serveHTTP(w, r, next) + }) + } +} + +func (h *middleware) configure(c *config) { + h.tracer = c.Tracer + h.propagators = c.Propagators + h.spanStartOptions = c.SpanStartOptions + h.readEvent = c.ReadEvent + h.writeEvent = c.WriteEvent + h.filters = c.Filters + h.spanNameFormatter = c.SpanNameFormatter + h.publicEndpoint = c.PublicEndpoint + h.publicEndpointFn = c.PublicEndpointFn + h.server = c.ServerName + h.semconv = semconv.NewHTTPServer(c.Meter) +} + +func handleErr(err error) { + if err != nil { + otel.Handle(err) + } +} + +// serveHTTP sets up tracing and calls the given next http.Handler with the span +// context injected into the request context. +func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) { + requestStartTime := time.Now() + for _, f := range h.filters { + if !f(r) { + // Simply pass through to the handler if a filter rejects the request + next.ServeHTTP(w, r) + return + } + } + + ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) + opts := []trace.SpanStartOption{ + trace.WithAttributes(h.semconv.RequestTraceAttrs(h.server, r)...), + } + + opts = append(opts, h.spanStartOptions...) + if h.publicEndpoint || (h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx))) { + opts = append(opts, trace.WithNewRoot()) + // Linking incoming span context if any for public endpoint. + if s := trace.SpanContextFromContext(ctx); s.IsValid() && s.IsRemote() { + opts = append(opts, trace.WithLinks(trace.Link{SpanContext: s})) + } + } + + tracer := h.tracer + + if tracer == nil { + if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() { + tracer = newTracer(span.TracerProvider()) + } else { + tracer = newTracer(otel.GetTracerProvider()) + } + } + + ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...) + defer span.End() + + readRecordFunc := func(int64) {} + if h.readEvent { + readRecordFunc = func(n int64) { + span.AddEvent("read", trace.WithAttributes(ReadBytesKey.Int64(n))) + } + } + + // if request body is nil or NoBody, we don't want to mutate the body as it + // will affect the identity of it in an unforeseeable way because we assert + // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + bw := request.NewBodyWrapper(r.Body, readRecordFunc) + if r.Body != nil && r.Body != http.NoBody { + r.Body = bw + } + + writeRecordFunc := func(int64) {} + if h.writeEvent { + writeRecordFunc = func(n int64) { + span.AddEvent("write", trace.WithAttributes(WroteBytesKey.Int64(n))) + } + } + + rww := request.NewRespWriterWrapper(w, writeRecordFunc) + + // Wrap w to use our ResponseWriter methods while also exposing + // other interfaces that w may implement (http.CloseNotifier, + // http.Flusher, http.Hijacker, http.Pusher, io.ReaderFrom). + + w = httpsnoop.Wrap(w, httpsnoop.Hooks{ + Header: func(httpsnoop.HeaderFunc) httpsnoop.HeaderFunc { + return rww.Header + }, + Write: func(httpsnoop.WriteFunc) httpsnoop.WriteFunc { + return rww.Write + }, + WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc { + return rww.WriteHeader + }, + Flush: func(httpsnoop.FlushFunc) httpsnoop.FlushFunc { + return rww.Flush + }, + }) + + labeler, found := LabelerFromContext(ctx) + if !found { + ctx = ContextWithLabeler(ctx, labeler) + } + + next.ServeHTTP(w, r.WithContext(ctx)) + + statusCode := rww.StatusCode() + bytesWritten := rww.BytesWritten() + span.SetStatus(h.semconv.Status(statusCode)) + span.SetAttributes(h.semconv.ResponseTraceAttrs(semconv.ResponseTelemetry{ + StatusCode: statusCode, + ReadBytes: bw.BytesRead(), + ReadError: bw.Error(), + WriteBytes: bytesWritten, + WriteError: rww.Error(), + })...) + + // Use floating point division here for higher precision (instead of Millisecond method). + elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) + + h.semconv.RecordMetrics(ctx, semconv.MetricData{ + ServerName: h.server, + Req: r, + StatusCode: statusCode, + AdditionalAttributes: labeler.Get(), + RequestSize: bw.BytesRead(), + ResponseSize: bytesWritten, + ElapsedTime: elapsedTime, + }) +} + +// WithRouteTag annotates spans and metrics with the provided route name +// with HTTP route attribute. +func WithRouteTag(route string, h http.Handler) http.Handler { + attr := semconv.NewHTTPServer(nil).Route(route) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + span := trace.SpanFromContext(r.Context()) + span.SetAttributes(attr) + + labeler, _ := LabelerFromContext(r.Context()) + labeler.Add(attr) + + h.ServeHTTP(w, r) + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go new file mode 100644 index 000000000..a945f5566 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + +import ( + "io" + "sync" +) + +var _ io.ReadCloser = &BodyWrapper{} + +// BodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number +// of bytes read and the last error. +type BodyWrapper struct { + io.ReadCloser + OnRead func(n int64) // must not be nil + + mu sync.Mutex + read int64 + err error +} + +// NewBodyWrapper creates a new BodyWrapper. +// +// The onRead attribute is a callback that will be called every time the data +// is read, with the number of bytes being read. +func NewBodyWrapper(body io.ReadCloser, onRead func(int64)) *BodyWrapper { + return &BodyWrapper{ + ReadCloser: body, + OnRead: onRead, + } +} + +// Read reads the data from the io.ReadCloser, and stores the number of bytes +// read and the error. +func (w *BodyWrapper) Read(b []byte) (int, error) { + n, err := w.ReadCloser.Read(b) + n1 := int64(n) + + w.updateReadData(n1, err) + w.OnRead(n1) + return n, err +} + +func (w *BodyWrapper) updateReadData(n int64, err error) { + w.mu.Lock() + defer w.mu.Unlock() + + w.read += n + if err != nil { + w.err = err + } +} + +// Closes closes the io.ReadCloser. +func (w *BodyWrapper) Close() error { + return w.ReadCloser.Close() +} + +// BytesRead returns the number of bytes read up to this point. +func (w *BodyWrapper) BytesRead() int64 { + w.mu.Lock() + defer w.mu.Unlock() + + return w.read +} + +// Error returns the last error. +func (w *BodyWrapper) Error() error { + w.mu.Lock() + defer w.mu.Unlock() + + return w.err +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go new file mode 100644 index 000000000..fbc344cbd --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go @@ -0,0 +1,119 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + +import ( + "net/http" + "sync" +) + +var _ http.ResponseWriter = &RespWriterWrapper{} + +// RespWriterWrapper wraps a http.ResponseWriter in order to track the number of +// bytes written, the last error, and to catch the first written statusCode. +// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional +// types (http.Hijacker, http.Pusher, http.CloseNotifier, etc) +// that may be useful when using it in real life situations. +type RespWriterWrapper struct { + http.ResponseWriter + OnWrite func(n int64) // must not be nil + + mu sync.RWMutex + written int64 + statusCode int + err error + wroteHeader bool +} + +// NewRespWriterWrapper creates a new RespWriterWrapper. +// +// The onWrite attribute is a callback that will be called every time the data +// is written, with the number of bytes that were written. +func NewRespWriterWrapper(w http.ResponseWriter, onWrite func(int64)) *RespWriterWrapper { + return &RespWriterWrapper{ + ResponseWriter: w, + OnWrite: onWrite, + statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything + } +} + +// Write writes the bytes array into the [ResponseWriter], and tracks the +// number of bytes written and last error. +func (w *RespWriterWrapper) Write(p []byte) (int, error) { + w.mu.Lock() + defer w.mu.Unlock() + + if !w.wroteHeader { + w.writeHeader(http.StatusOK) + } + + n, err := w.ResponseWriter.Write(p) + n1 := int64(n) + w.OnWrite(n1) + w.written += n1 + w.err = err + return n, err +} + +// WriteHeader persists initial statusCode for span attribution. +// All calls to WriteHeader will be propagated to the underlying ResponseWriter +// and will persist the statusCode from the first call. +// Blocking consecutive calls to WriteHeader alters expected behavior and will +// remove warning logs from net/http where developers will notice incorrect handler implementations. +func (w *RespWriterWrapper) WriteHeader(statusCode int) { + w.mu.Lock() + defer w.mu.Unlock() + + w.writeHeader(statusCode) +} + +// writeHeader persists the status code for span attribution, and propagates +// the call to the underlying ResponseWriter. +// It does not acquire a lock, and therefore assumes that is being handled by a +// parent method. +func (w *RespWriterWrapper) writeHeader(statusCode int) { + if !w.wroteHeader { + w.wroteHeader = true + w.statusCode = statusCode + } + w.ResponseWriter.WriteHeader(statusCode) +} + +// Flush implements [http.Flusher]. +func (w *RespWriterWrapper) Flush() { + w.mu.Lock() + defer w.mu.Unlock() + + if !w.wroteHeader { + w.writeHeader(http.StatusOK) + } + + if f, ok := w.ResponseWriter.(http.Flusher); ok { + f.Flush() + } +} + +// BytesWritten returns the number of bytes written. +func (w *RespWriterWrapper) BytesWritten() int64 { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.written +} + +// BytesWritten returns the HTTP status code that was sent. +func (w *RespWriterWrapper) StatusCode() int { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.statusCode +} + +// Error returns the last error. +func (w *RespWriterWrapper) Error() error { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.err +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go new file mode 100644 index 000000000..9cae4cab8 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -0,0 +1,165 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "context" + "fmt" + "net/http" + "os" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" +) + +type ResponseTelemetry struct { + StatusCode int + ReadBytes int64 + ReadError error + WriteBytes int64 + WriteError error +} + +type HTTPServer struct { + duplicate bool + + // Old metrics + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + serverLatencyMeasure metric.Float64Histogram +} + +// RequestTraceAttrs returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + if s.duplicate { + return append(oldHTTPServer{}.RequestTraceAttrs(server, req), newHTTPServer{}.RequestTraceAttrs(server, req)...) + } + return oldHTTPServer{}.RequestTraceAttrs(server, req) +} + +// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + if s.duplicate { + return append(oldHTTPServer{}.ResponseTraceAttrs(resp), newHTTPServer{}.ResponseTraceAttrs(resp)...) + } + return oldHTTPServer{}.ResponseTraceAttrs(resp) +} + +// Route returns the attribute for the route. +func (s HTTPServer) Route(route string) attribute.KeyValue { + return oldHTTPServer{}.Route(route) +} + +// Status returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func (s HTTPServer) Status(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 500 { + return codes.Error, "" + } + return codes.Unset, "" +} + +type MetricData struct { + ServerName string + Req *http.Request + StatusCode int + AdditionalAttributes []attribute.KeyValue + + RequestSize int64 + ResponseSize int64 + ElapsedTime float64 +} + +func (s HTTPServer) RecordMetrics(ctx context.Context, md MetricData) { + if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil { + // This will happen if an HTTPServer{} is used insted of NewHTTPServer. + return + } + + attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + addOpts := []metric.AddOption{o} // Allocate vararg slice once. + s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...) + s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...) + s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) + + // TODO: Duplicate Metrics +} + +func NewHTTPServer(meter metric.Meter) HTTPServer { + env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + duplicate := env == "http/dup" + server := HTTPServer{ + duplicate: duplicate, + } + server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = oldHTTPServer{}.createMeasures(meter) + return server +} + +type HTTPClient struct { + duplicate bool +} + +func NewHTTPClient() HTTPClient { + env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + return HTTPClient{duplicate: env == "http/dup"} +} + +// RequestTraceAttrs returns attributes for an HTTP request made by a client. +func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + if c.duplicate { + return append(oldHTTPClient{}.RequestTraceAttrs(req), newHTTPClient{}.RequestTraceAttrs(req)...) + } + return oldHTTPClient{}.RequestTraceAttrs(req) +} + +// ResponseTraceAttrs returns metric attributes for an HTTP request made by a client. +func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + if c.duplicate { + return append(oldHTTPClient{}.ResponseTraceAttrs(resp), newHTTPClient{}.ResponseTraceAttrs(resp)...) + } + + return oldHTTPClient{}.ResponseTraceAttrs(resp) +} + +func (c HTTPClient) Status(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 400 { + return codes.Error, "" + } + return codes.Unset, "" +} + +func (c HTTPClient) ErrorType(err error) attribute.KeyValue { + if c.duplicate { + return newHTTPClient{}.ErrorType(err) + } + + return attribute.KeyValue{} +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go new file mode 100644 index 000000000..745b8c67b --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go @@ -0,0 +1,348 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "fmt" + "net/http" + "reflect" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type newHTTPServer struct{} + +// TraceRequest returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + count := 3 // ServerAddress, Method, Scheme + + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + count++ + } + + method, methodOriginal := n.method(req.Method) + if methodOriginal != (attribute.KeyValue{}) { + count++ + } + + scheme := n.scheme(req.TLS != nil) + + if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + count++ + if peerPort > 0 { + count++ + } + } + + useragent := req.UserAgent() + if useragent != "" { + count++ + } + + clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP != "" { + count++ + } + + if req.URL != nil && req.URL.Path != "" { + count++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + count++ + } + if protoVersion != "" { + count++ + } + + attrs := make([]attribute.KeyValue, 0, count) + attrs = append(attrs, + semconvNew.ServerAddress(host), + method, + scheme, + ) + + if hostPort > 0 { + attrs = append(attrs, semconvNew.ServerPort(hostPort)) + } + if methodOriginal != (attribute.KeyValue{}) { + attrs = append(attrs, methodOriginal) + } + + if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + attrs = append(attrs, semconvNew.NetworkPeerAddress(peer)) + if peerPort > 0 { + attrs = append(attrs, semconvNew.NetworkPeerPort(peerPort)) + } + } + + if useragent := req.UserAgent(); useragent != "" { + attrs = append(attrs, semconvNew.UserAgentOriginal(useragent)) + } + + if clientIP != "" { + attrs = append(attrs, semconvNew.ClientAddress(clientIP)) + } + + if req.URL != nil && req.URL.Path != "" { + attrs = append(attrs, semconvNew.URLPath(req.URL.Path)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + return attrs +} + +func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { + if method == "" { + return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} + } + if attr, ok := methodLookup[method]; ok { + return attr, attribute.KeyValue{} + } + + orig := semconvNew.HTTPRequestMethodOriginal(method) + if attr, ok := methodLookup[strings.ToUpper(method)]; ok { + return attr, orig + } + return semconvNew.HTTPRequestMethodGet, orig +} + +func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return semconvNew.URLScheme("https") + } + return semconvNew.URLScheme("http") +} + +// TraceResponse returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + var count int + + if resp.ReadBytes > 0 { + count++ + } + if resp.WriteBytes > 0 { + count++ + } + if resp.StatusCode > 0 { + count++ + } + + attributes := make([]attribute.KeyValue, 0, count) + + if resp.ReadBytes > 0 { + attributes = append(attributes, + semconvNew.HTTPRequestBodySize(int(resp.ReadBytes)), + ) + } + if resp.WriteBytes > 0 { + attributes = append(attributes, + semconvNew.HTTPResponseBodySize(int(resp.WriteBytes)), + ) + } + if resp.StatusCode > 0 { + attributes = append(attributes, + semconvNew.HTTPResponseStatusCode(resp.StatusCode), + ) + } + + return attributes +} + +// Route returns the attribute for the route. +func (n newHTTPServer) Route(route string) attribute.KeyValue { + return semconvNew.HTTPRoute(route) +} + +type newHTTPClient struct{} + +// RequestTraceAttrs returns trace attributes for an HTTP request made by a client. +func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + /* + below attributes are returned: + - http.request.method + - http.request.method.original + - url.full + - server.address + - server.port + - network.protocol.name + - network.protocol.version + */ + numOfAttributes := 3 // URL, server address, proto, and method. + + var urlHost string + if req.URL != nil { + urlHost = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{urlHost, req.Header.Get("Host")} { + requestHost, requestPort = splitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + eligiblePort := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if eligiblePort > 0 { + numOfAttributes++ + } + useragent := req.UserAgent() + if useragent != "" { + numOfAttributes++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + numOfAttributes++ + } + if protoVersion != "" { + numOfAttributes++ + } + + method, originalMethod := n.method(req.Method) + if originalMethod != (attribute.KeyValue{}) { + numOfAttributes++ + } + + attrs := make([]attribute.KeyValue, 0, numOfAttributes) + + attrs = append(attrs, method) + if originalMethod != (attribute.KeyValue{}) { + attrs = append(attrs, originalMethod) + } + + var u string + if req.URL != nil { + // Remove any username/password info that may be in the URL. + userinfo := req.URL.User + req.URL.User = nil + u = req.URL.String() + // Restore any username/password info that was removed. + req.URL.User = userinfo + } + attrs = append(attrs, semconvNew.URLFull(u)) + + attrs = append(attrs, semconvNew.ServerAddress(requestHost)) + if eligiblePort > 0 { + attrs = append(attrs, semconvNew.ServerPort(eligiblePort)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + return attrs +} + +// ResponseTraceAttrs returns trace attributes for an HTTP response made by a client. +func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + /* + below attributes are returned: + - http.response.status_code + - error.type + */ + var count int + if resp.StatusCode > 0 { + count++ + } + + if isErrorStatusCode(resp.StatusCode) { + count++ + } + + attrs := make([]attribute.KeyValue, 0, count) + if resp.StatusCode > 0 { + attrs = append(attrs, semconvNew.HTTPResponseStatusCode(resp.StatusCode)) + } + + if isErrorStatusCode(resp.StatusCode) { + errorType := strconv.Itoa(resp.StatusCode) + attrs = append(attrs, semconvNew.ErrorTypeKey.String(errorType)) + } + return attrs +} + +func (n newHTTPClient) ErrorType(err error) attribute.KeyValue { + t := reflect.TypeOf(err) + var value string + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + value = t.String() + } else { + value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) + } + + if value == "" { + return semconvNew.ErrorTypeOther + } + + return semconvNew.ErrorTypeKey.String(value) +} + +func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { + if method == "" { + return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} + } + if attr, ok := methodLookup[method]; ok { + return attr, attribute.KeyValue{} + } + + orig := semconvNew.HTTPRequestMethodOriginal(method) + if attr, ok := methodLookup[strings.ToUpper(method)]; ok { + return attr, orig + } + return semconvNew.HTTPRequestMethodGet, orig +} + +func isErrorStatusCode(code int) bool { + return code >= 400 || code < 100 +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go new file mode 100644 index 000000000..e6e14924f --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -0,0 +1,98 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "net" + "net/http" + "strconv" + "strings" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +// splitHostPort splits a network address hostport of the form "host", +// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", +// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and +// port. +// +// An empty host is returned if it is not provided or unparsable. A negative +// port is returned if it is not provided or unparsable. +func splitHostPort(hostport string) (host string, port int) { + port = -1 + + if strings.HasPrefix(hostport, "[") { + addrEnd := strings.LastIndex(hostport, "]") + if addrEnd < 0 { + // Invalid hostport. + return + } + if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 { + host = hostport[1:addrEnd] + return + } + } else { + if i := strings.LastIndex(hostport, ":"); i < 0 { + host = hostport + return + } + } + + host, pStr, err := net.SplitHostPort(hostport) + if err != nil { + return + } + + p, err := strconv.ParseUint(pStr, 10, 16) + if err != nil { + return + } + return host, int(p) // nolint: gosec // Byte size checked 16 above. +} + +func requiredHTTPPort(https bool, port int) int { // nolint:revive + if https { + if port > 0 && port != 443 { + return port + } + } else { + if port > 0 && port != 80 { + return port + } + } + return -1 +} + +func serverClientIP(xForwardedFor string) string { + if idx := strings.Index(xForwardedFor, ","); idx >= 0 { + xForwardedFor = xForwardedFor[:idx] + } + return xForwardedFor +} + +func netProtocol(proto string) (name string, version string) { + name, version, _ = strings.Cut(proto, "/") + name = strings.ToLower(name) + return name, version +} + +var methodLookup = map[string]attribute.KeyValue{ + http.MethodConnect: semconvNew.HTTPRequestMethodConnect, + http.MethodDelete: semconvNew.HTTPRequestMethodDelete, + http.MethodGet: semconvNew.HTTPRequestMethodGet, + http.MethodHead: semconvNew.HTTPRequestMethodHead, + http.MethodOptions: semconvNew.HTTPRequestMethodOptions, + http.MethodPatch: semconvNew.HTTPRequestMethodPatch, + http.MethodPost: semconvNew.HTTPRequestMethodPost, + http.MethodPut: semconvNew.HTTPRequestMethodPut, + http.MethodTrace: semconvNew.HTTPRequestMethodTrace, +} + +func handleErr(err error) { + if err != nil { + otel.Handle(err) + } +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go new file mode 100644 index 000000000..c999b05e6 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -0,0 +1,192 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "errors" + "io" + "net/http" + "slices" + "strings" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" + semconv "go.opentelemetry.io/otel/semconv/v1.20.0" +) + +type oldHTTPServer struct{} + +// RequestTraceAttrs returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (o oldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + return semconvutil.HTTPServerRequest(server, req) +} + +// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + attributes := []attribute.KeyValue{} + + if resp.ReadBytes > 0 { + attributes = append(attributes, semconv.HTTPRequestContentLength(int(resp.ReadBytes))) + } + if resp.ReadError != nil && !errors.Is(resp.ReadError, io.EOF) { + // This is not in the semantic conventions, but is historically provided + attributes = append(attributes, attribute.String("http.read_error", resp.ReadError.Error())) + } + if resp.WriteBytes > 0 { + attributes = append(attributes, semconv.HTTPResponseContentLength(int(resp.WriteBytes))) + } + if resp.StatusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(resp.StatusCode)) + } + if resp.WriteError != nil && !errors.Is(resp.WriteError, io.EOF) { + // This is not in the semantic conventions, but is historically provided + attributes = append(attributes, attribute.String("http.write_error", resp.WriteError.Error())) + } + + return attributes +} + +// Route returns the attribute for the route. +func (o oldHTTPServer) Route(route string) attribute.KeyValue { + return semconv.HTTPRoute(route) +} + +// HTTPStatusCode returns the attribute for the HTTP status code. +// This is a temporary function needed by metrics. This will be removed when MetricsRequest is added. +func HTTPStatusCode(status int) attribute.KeyValue { + return semconv.HTTPStatusCode(status) +} + +// Server HTTP metrics. +const ( + serverRequestSize = "http.server.request.size" // Incoming request bytes total + serverResponseSize = "http.server.response.size" // Incoming response bytes total + serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds +) + +func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} + } + var err error + requestBytesCounter, err := meter.Int64Counter( + serverRequestSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request messages."), + ) + handleErr(err) + + responseBytesCounter, err := meter.Int64Counter( + serverResponseSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response messages."), + ) + handleErr(err) + + serverLatencyMeasure, err := meter.Float64Histogram( + serverDuration, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of inbound HTTP requests."), + ) + handleErr(err) + + return requestBytesCounter, responseBytesCounter, serverLatencyMeasure +} + +func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + n := len(additionalAttributes) + 3 + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + n++ + } + if protoVersion != "" { + n++ + } + + if statusCode > 0 { + n++ + } + + attributes := slices.Grow(additionalAttributes, n) + attributes = append(attributes, + o.methodMetric(req.Method), + o.scheme(req.TLS != nil), + semconv.NetHostName(host)) + + if hostPort > 0 { + attributes = append(attributes, semconv.NetHostPort(hostPort)) + } + if protoName != "" { + attributes = append(attributes, semconv.NetProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconv.NetProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) + } + return attributes +} + +func (o oldHTTPServer) methodMetric(method string) attribute.KeyValue { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return semconv.HTTPMethod(method) +} + +func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return semconv.HTTPSchemeHTTPS + } + return semconv.HTTPSchemeHTTP +} + +type oldHTTPClient struct{} + +func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + return semconvutil.HTTPClientRequest(req) +} + +func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + return semconvutil.HTTPClientResponse(resp) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go new file mode 100644 index 000000000..7aa5f99e8 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + +// Generate semconvutil package: +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv_test.go.tmpl "--data={}" --out=httpconv_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv.go.tmpl "--data={}" --out=httpconv.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv_test.go.tmpl "--data={}" --out=netconv_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv.go.tmpl "--data={}" --out=netconv.go diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go new file mode 100644 index 000000000..a73bb06e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go @@ -0,0 +1,575 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconvutil/httpconv.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + +import ( + "fmt" + "net/http" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.20.0" +) + +// HTTPClientResponse returns trace attributes for an HTTP response received by a +// client from a server. It will return the following attributes if the related +// values are defined in resp: "http.status.code", +// "http.response_content_length". +// +// This does not add all OpenTelemetry required attributes for an HTTP event, +// it assumes ClientRequest was used to create the span with a complete set of +// attributes. If a complete set of attributes can be generated using the +// request contained in resp. For example: +// +// append(HTTPClientResponse(resp), ClientRequest(resp.Request)...) +func HTTPClientResponse(resp *http.Response) []attribute.KeyValue { + return hc.ClientResponse(resp) +} + +// HTTPClientRequest returns trace attributes for an HTTP request made by a client. +// The following attributes are always returned: "http.url", "http.method", +// "net.peer.name". The following attributes are returned if the related values +// are defined in req: "net.peer.port", "user_agent.original", +// "http.request_content_length". +func HTTPClientRequest(req *http.Request) []attribute.KeyValue { + return hc.ClientRequest(req) +} + +// HTTPClientRequestMetrics returns metric attributes for an HTTP request made by a client. +// The following attributes are always returned: "http.method", "net.peer.name". +// The following attributes are returned if the +// related values are defined in req: "net.peer.port". +func HTTPClientRequestMetrics(req *http.Request) []attribute.KeyValue { + return hc.ClientRequestMetrics(req) +} + +// HTTPClientStatus returns a span status code and message for an HTTP status code +// value received by a client. +func HTTPClientStatus(code int) (codes.Code, string) { + return hc.ClientStatus(code) +} + +// HTTPServerRequest returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.target", "net.host.name". The following attributes are returned if +// they related values are defined in req: "net.host.port", "net.sock.peer.addr", +// "net.sock.peer.port", "user_agent.original", "http.client_ip". +func HTTPServerRequest(server string, req *http.Request) []attribute.KeyValue { + return hc.ServerRequest(server, req) +} + +// HTTPServerRequestMetrics returns metric attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "net.host.name". The following attributes are returned if they related +// values are defined in req: "net.host.port". +func HTTPServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { + return hc.ServerRequestMetrics(server, req) +} + +// HTTPServerStatus returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func HTTPServerStatus(code int) (codes.Code, string) { + return hc.ServerStatus(code) +} + +// httpConv are the HTTP semantic convention attributes defined for a version +// of the OpenTelemetry specification. +type httpConv struct { + NetConv *netConv + + HTTPClientIPKey attribute.Key + HTTPMethodKey attribute.Key + HTTPRequestContentLengthKey attribute.Key + HTTPResponseContentLengthKey attribute.Key + HTTPRouteKey attribute.Key + HTTPSchemeHTTP attribute.KeyValue + HTTPSchemeHTTPS attribute.KeyValue + HTTPStatusCodeKey attribute.Key + HTTPTargetKey attribute.Key + HTTPURLKey attribute.Key + UserAgentOriginalKey attribute.Key +} + +var hc = &httpConv{ + NetConv: nc, + + HTTPClientIPKey: semconv.HTTPClientIPKey, + HTTPMethodKey: semconv.HTTPMethodKey, + HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey, + HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey, + HTTPRouteKey: semconv.HTTPRouteKey, + HTTPSchemeHTTP: semconv.HTTPSchemeHTTP, + HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS, + HTTPStatusCodeKey: semconv.HTTPStatusCodeKey, + HTTPTargetKey: semconv.HTTPTargetKey, + HTTPURLKey: semconv.HTTPURLKey, + UserAgentOriginalKey: semconv.UserAgentOriginalKey, +} + +// ClientResponse returns attributes for an HTTP response received by a client +// from a server. The following attributes are returned if the related values +// are defined in resp: "http.status.code", "http.response_content_length". +// +// This does not add all OpenTelemetry required attributes for an HTTP event, +// it assumes ClientRequest was used to create the span with a complete set of +// attributes. If a complete set of attributes can be generated using the +// request contained in resp. For example: +// +// append(ClientResponse(resp), ClientRequest(resp.Request)...) +func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.status_code int + http.response_content_length int + */ + var n int + if resp.StatusCode > 0 { + n++ + } + if resp.ContentLength > 0 { + n++ + } + + attrs := make([]attribute.KeyValue, 0, n) + if resp.StatusCode > 0 { + attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode)) + } + if resp.ContentLength > 0 { + attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength))) + } + return attrs +} + +// ClientRequest returns attributes for an HTTP request made by a client. The +// following attributes are always returned: "http.url", "http.method", +// "net.peer.name". The following attributes are returned if the related values +// are defined in req: "net.peer.port", "user_agent.original", +// "http.request_content_length", "user_agent.original". +func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.method string + user_agent.original string + http.url string + net.peer.name string + net.peer.port int + http.request_content_length int + */ + + /* The following semantic conventions are not returned: + http.status_code This requires the response. See ClientResponse. + http.response_content_length This requires the response. See ClientResponse. + net.sock.family This requires the socket used. + net.sock.peer.addr This requires the socket used. + net.sock.peer.name This requires the socket used. + net.sock.peer.port This requires the socket used. + http.resend_count This is something outside of a single request. + net.protocol.name The value is the Request is ignored, and the go client will always use "http". + net.protocol.version The value in the Request is ignored, and the go client will always use 1.1 or 2.0. + */ + n := 3 // URL, peer name, proto, and method. + var h string + if req.URL != nil { + h = req.URL.Host + } + peer, p := firstHostPort(h, req.Header.Get("Host")) + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p) + if port > 0 { + n++ + } + useragent := req.UserAgent() + if useragent != "" { + n++ + } + if req.ContentLength > 0 { + n++ + } + + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.method(req.Method)) + + var u string + if req.URL != nil { + // Remove any username/password info that may be in the URL. + userinfo := req.URL.User + req.URL.User = nil + u = req.URL.String() + // Restore any username/password info that was removed. + req.URL.User = userinfo + } + attrs = append(attrs, c.HTTPURLKey.String(u)) + + attrs = append(attrs, c.NetConv.PeerName(peer)) + if port > 0 { + attrs = append(attrs, c.NetConv.PeerPort(port)) + } + + if useragent != "" { + attrs = append(attrs, c.UserAgentOriginalKey.String(useragent)) + } + + if l := req.ContentLength; l > 0 { + attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l)) + } + + return attrs +} + +// ClientRequestMetrics returns metric attributes for an HTTP request made by a client. The +// following attributes are always returned: "http.method", "net.peer.name". +// The following attributes are returned if the related values +// are defined in req: "net.peer.port". +func (c *httpConv) ClientRequestMetrics(req *http.Request) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.method string + net.peer.name string + net.peer.port int + */ + + n := 2 // method, peer name. + var h string + if req.URL != nil { + h = req.URL.Host + } + peer, p := firstHostPort(h, req.Header.Get("Host")) + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p) + if port > 0 { + n++ + } + + attrs := make([]attribute.KeyValue, 0, n) + attrs = append(attrs, c.method(req.Method), c.NetConv.PeerName(peer)) + + if port > 0 { + attrs = append(attrs, c.NetConv.PeerPort(port)) + } + + return attrs +} + +// ServerRequest returns attributes for an HTTP request received by a server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.target", "net.host.name". The following attributes are returned if they +// related values are defined in req: "net.host.port", "net.sock.peer.addr", +// "net.sock.peer.port", "user_agent.original", "http.client_ip", +// "net.protocol.name", "net.protocol.version". +func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.method string + http.scheme string + net.host.name string + net.host.port int + net.sock.peer.addr string + net.sock.peer.port int + user_agent.original string + http.client_ip string + net.protocol.name string Note: not set if the value is "http". + net.protocol.version string + http.target string Note: doesn't include the query parameter. + */ + + /* The following semantic conventions are not returned: + http.status_code This requires the response. + http.request_content_length This requires the len() of body, which can mutate it. + http.response_content_length This requires the response. + http.route This is not available. + net.sock.peer.name This would require a DNS lookup. + net.sock.host.addr The request doesn't have access to the underlying socket. + net.sock.host.port The request doesn't have access to the underlying socket. + + */ + n := 4 // Method, scheme, proto, and host name. + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + peer, peerPort := splitHostPort(req.RemoteAddr) + if peer != "" { + n++ + if peerPort > 0 { + n++ + } + } + useragent := req.UserAgent() + if useragent != "" { + n++ + } + + clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP != "" { + n++ + } + + var target string + if req.URL != nil { + target = req.URL.Path + if target != "" { + n++ + } + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + n++ + } + if protoVersion != "" { + n++ + } + + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.method(req.Method)) + attrs = append(attrs, c.scheme(req.TLS != nil)) + attrs = append(attrs, c.NetConv.HostName(host)) + + if hostPort > 0 { + attrs = append(attrs, c.NetConv.HostPort(hostPort)) + } + + if peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + attrs = append(attrs, c.NetConv.SockPeerAddr(peer)) + if peerPort > 0 { + attrs = append(attrs, c.NetConv.SockPeerPort(peerPort)) + } + } + + if useragent != "" { + attrs = append(attrs, c.UserAgentOriginalKey.String(useragent)) + } + + if clientIP != "" { + attrs = append(attrs, c.HTTPClientIPKey.String(clientIP)) + } + + if target != "" { + attrs = append(attrs, c.HTTPTargetKey.String(target)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion)) + } + + return attrs +} + +// ServerRequestMetrics returns metric attributes for an HTTP request received +// by a server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "net.host.name". The following attributes are returned if they related +// values are defined in req: "net.host.port". +func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.scheme string + http.route string + http.method string + http.status_code int + net.host.name string + net.host.port int + net.protocol.name string Note: not set if the value is "http". + net.protocol.version string + */ + + n := 3 // Method, scheme, and host name. + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + n++ + } + if protoVersion != "" { + n++ + } + + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.methodMetric(req.Method)) + attrs = append(attrs, c.scheme(req.TLS != nil)) + attrs = append(attrs, c.NetConv.HostName(host)) + + if hostPort > 0 { + attrs = append(attrs, c.NetConv.HostPort(hostPort)) + } + if protoName != "" { + attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion)) + } + + return attrs +} + +func (c *httpConv) method(method string) attribute.KeyValue { + if method == "" { + return c.HTTPMethodKey.String(http.MethodGet) + } + return c.HTTPMethodKey.String(method) +} + +func (c *httpConv) methodMetric(method string) attribute.KeyValue { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return c.HTTPMethodKey.String(method) +} + +func (c *httpConv) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return c.HTTPSchemeHTTPS + } + return c.HTTPSchemeHTTP +} + +func serverClientIP(xForwardedFor string) string { + if idx := strings.Index(xForwardedFor, ","); idx >= 0 { + xForwardedFor = xForwardedFor[:idx] + } + return xForwardedFor +} + +func requiredHTTPPort(https bool, port int) int { // nolint:revive + if https { + if port > 0 && port != 443 { + return port + } + } else { + if port > 0 && port != 80 { + return port + } + } + return -1 +} + +// Return the request host and port from the first non-empty source. +func firstHostPort(source ...string) (host string, port int) { + for _, hostport := range source { + host, port = splitHostPort(hostport) + if host != "" || port > 0 { + break + } + } + return +} + +// ClientStatus returns a span status code and message for an HTTP status code +// value received by a client. +func (c *httpConv) ClientStatus(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 400 { + return codes.Error, "" + } + return codes.Unset, "" +} + +// ServerStatus returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func (c *httpConv) ServerStatus(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 500 { + return codes.Error, "" + } + return codes.Unset, "" +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go new file mode 100644 index 000000000..b80a1db61 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -0,0 +1,205 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconvutil/netconv.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + +import ( + "net" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.20.0" +) + +// NetTransport returns a trace attribute describing the transport protocol of the +// passed network. See the net.Dial for information about acceptable network +// values. +func NetTransport(network string) attribute.KeyValue { + return nc.Transport(network) +} + +// netConv are the network semantic convention attributes defined for a version +// of the OpenTelemetry specification. +type netConv struct { + NetHostNameKey attribute.Key + NetHostPortKey attribute.Key + NetPeerNameKey attribute.Key + NetPeerPortKey attribute.Key + NetProtocolName attribute.Key + NetProtocolVersion attribute.Key + NetSockFamilyKey attribute.Key + NetSockPeerAddrKey attribute.Key + NetSockPeerPortKey attribute.Key + NetSockHostAddrKey attribute.Key + NetSockHostPortKey attribute.Key + NetTransportOther attribute.KeyValue + NetTransportTCP attribute.KeyValue + NetTransportUDP attribute.KeyValue + NetTransportInProc attribute.KeyValue +} + +var nc = &netConv{ + NetHostNameKey: semconv.NetHostNameKey, + NetHostPortKey: semconv.NetHostPortKey, + NetPeerNameKey: semconv.NetPeerNameKey, + NetPeerPortKey: semconv.NetPeerPortKey, + NetProtocolName: semconv.NetProtocolNameKey, + NetProtocolVersion: semconv.NetProtocolVersionKey, + NetSockFamilyKey: semconv.NetSockFamilyKey, + NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, + NetSockPeerPortKey: semconv.NetSockPeerPortKey, + NetSockHostAddrKey: semconv.NetSockHostAddrKey, + NetSockHostPortKey: semconv.NetSockHostPortKey, + NetTransportOther: semconv.NetTransportOther, + NetTransportTCP: semconv.NetTransportTCP, + NetTransportUDP: semconv.NetTransportUDP, + NetTransportInProc: semconv.NetTransportInProc, +} + +func (c *netConv) Transport(network string) attribute.KeyValue { + switch network { + case "tcp", "tcp4", "tcp6": + return c.NetTransportTCP + case "udp", "udp4", "udp6": + return c.NetTransportUDP + case "unix", "unixgram", "unixpacket": + return c.NetTransportInProc + default: + // "ip:*", "ip4:*", and "ip6:*" all are considered other. + return c.NetTransportOther + } +} + +// Host returns attributes for a network host address. +func (c *netConv) Host(address string) []attribute.KeyValue { + h, p := splitHostPort(address) + var n int + if h != "" { + n++ + if p > 0 { + n++ + } + } + + if n == 0 { + return nil + } + + attrs := make([]attribute.KeyValue, 0, n) + attrs = append(attrs, c.HostName(h)) + if p > 0 { + attrs = append(attrs, c.HostPort(p)) + } + return attrs +} + +func (c *netConv) HostName(name string) attribute.KeyValue { + return c.NetHostNameKey.String(name) +} + +func (c *netConv) HostPort(port int) attribute.KeyValue { + return c.NetHostPortKey.Int(port) +} + +func family(network, address string) string { + switch network { + case "unix", "unixgram", "unixpacket": + return "unix" + default: + if ip := net.ParseIP(address); ip != nil { + if ip.To4() == nil { + return "inet6" + } + return "inet" + } + } + return "" +} + +// Peer returns attributes for a network peer address. +func (c *netConv) Peer(address string) []attribute.KeyValue { + h, p := splitHostPort(address) + var n int + if h != "" { + n++ + if p > 0 { + n++ + } + } + + if n == 0 { + return nil + } + + attrs := make([]attribute.KeyValue, 0, n) + attrs = append(attrs, c.PeerName(h)) + if p > 0 { + attrs = append(attrs, c.PeerPort(p)) + } + return attrs +} + +func (c *netConv) PeerName(name string) attribute.KeyValue { + return c.NetPeerNameKey.String(name) +} + +func (c *netConv) PeerPort(port int) attribute.KeyValue { + return c.NetPeerPortKey.Int(port) +} + +func (c *netConv) SockPeerAddr(addr string) attribute.KeyValue { + return c.NetSockPeerAddrKey.String(addr) +} + +func (c *netConv) SockPeerPort(port int) attribute.KeyValue { + return c.NetSockPeerPortKey.Int(port) +} + +// splitHostPort splits a network address hostport of the form "host", +// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", +// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and +// port. +// +// An empty host is returned if it is not provided or unparsable. A negative +// port is returned if it is not provided or unparsable. +func splitHostPort(hostport string) (host string, port int) { + port = -1 + + if strings.HasPrefix(hostport, "[") { + addrEnd := strings.LastIndex(hostport, "]") + if addrEnd < 0 { + // Invalid hostport. + return + } + if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 { + host = hostport[1:addrEnd] + return + } + } else { + if i := strings.LastIndex(hostport, ":"); i < 0 { + host = hostport + return + } + } + + host, pStr, err := net.SplitHostPort(hostport) + if err != nil { + return + } + + p, err := strconv.ParseUint(pStr, 10, 16) + if err != nil { + return + } + return host, int(p) // nolint: gosec // Bitsize checked to be 16 above. +} + +func netProtocol(proto string) (name string, version string) { + name, version, _ = strings.Cut(proto, "/") + name = strings.ToLower(name) + return name, version +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go new file mode 100644 index 000000000..ea504e396 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" +) + +// Labeler is used to allow instrumented HTTP handlers to add custom attributes to +// the metrics recorded by the net/http instrumentation. +type Labeler struct { + mu sync.Mutex + attributes []attribute.KeyValue +} + +// Add attributes to a Labeler. +func (l *Labeler) Add(ls ...attribute.KeyValue) { + l.mu.Lock() + defer l.mu.Unlock() + l.attributes = append(l.attributes, ls...) +} + +// Get returns a copy of the attributes added to the Labeler. +func (l *Labeler) Get() []attribute.KeyValue { + l.mu.Lock() + defer l.mu.Unlock() + ret := make([]attribute.KeyValue, len(l.attributes)) + copy(ret, l.attributes) + return ret +} + +type labelerContextKeyType int + +const lablelerContextKey labelerContextKeyType = 0 + +// ContextWithLabeler returns a new context with the provided Labeler instance. +// Attributes added to the specified labeler will be injected into metrics +// emitted by the instrumentation. Only one labeller can be injected into the +// context. Injecting it multiple times will override the previous calls. +func ContextWithLabeler(parent context.Context, l *Labeler) context.Context { + return context.WithValue(parent, lablelerContextKey, l) +} + +// LabelerFromContext retrieves a Labeler instance from the provided context if +// one is available. If no Labeler was found in the provided context a new, empty +// Labeler is returned and the second return value is false. In this case it is +// safe to use the Labeler but any attributes added to it will not be used. +func LabelerFromContext(ctx context.Context) (*Labeler, bool) { + l, ok := ctx.Value(lablelerContextKey).(*Labeler) + if !ok { + l = &Labeler{} + } + return l, ok +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go new file mode 100644 index 000000000..b4119d343 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -0,0 +1,295 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "io" + "net/http" + "net/http/httptrace" + "sync/atomic" + "time" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + + "go.opentelemetry.io/otel/trace" +) + +// Transport implements the http.RoundTripper interface and wraps +// outbound HTTP(S) requests with a span and enriches it with metrics. +type Transport struct { + rt http.RoundTripper + + tracer trace.Tracer + meter metric.Meter + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + filters []Filter + spanNameFormatter func(string, *http.Request) string + clientTrace func(context.Context) *httptrace.ClientTrace + metricAttributesFn func(*http.Request) []attribute.KeyValue + + semconv semconv.HTTPClient + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + latencyMeasure metric.Float64Histogram +} + +var _ http.RoundTripper = &Transport{} + +// NewTransport wraps the provided http.RoundTripper with one that +// starts a span, injects the span context into the outbound request headers, +// and enriches it with metrics. +// +// If the provided http.RoundTripper is nil, http.DefaultTransport will be used +// as the base http.RoundTripper. +func NewTransport(base http.RoundTripper, opts ...Option) *Transport { + if base == nil { + base = http.DefaultTransport + } + + t := Transport{ + rt: base, + semconv: semconv.NewHTTPClient(), + } + + defaultOpts := []Option{ + WithSpanOptions(trace.WithSpanKind(trace.SpanKindClient)), + WithSpanNameFormatter(defaultTransportFormatter), + } + + c := newConfig(append(defaultOpts, opts...)...) + t.applyConfig(c) + t.createMeasures() + + return &t +} + +func (t *Transport) applyConfig(c *config) { + t.tracer = c.Tracer + t.meter = c.Meter + t.propagators = c.Propagators + t.spanStartOptions = c.SpanStartOptions + t.filters = c.Filters + t.spanNameFormatter = c.SpanNameFormatter + t.clientTrace = c.ClientTrace + t.metricAttributesFn = c.MetricAttributesFn +} + +func (t *Transport) createMeasures() { + var err error + t.requestBytesCounter, err = t.meter.Int64Counter( + clientRequestSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request messages."), + ) + handleErr(err) + + t.responseBytesCounter, err = t.meter.Int64Counter( + clientResponseSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response messages."), + ) + handleErr(err) + + t.latencyMeasure, err = t.meter.Float64Histogram( + clientDuration, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of outbound HTTP requests."), + ) + handleErr(err) +} + +func defaultTransportFormatter(_ string, r *http.Request) string { + return "HTTP " + r.Method +} + +// RoundTrip creates a Span and propagates its context via the provided request's headers +// before handing the request to the configured base RoundTripper. The created span will +// end when the response body is closed or when a read from the body returns io.EOF. +func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { + requestStartTime := time.Now() + for _, f := range t.filters { + if !f(r) { + // Simply pass through to the base RoundTripper if a filter rejects the request + return t.rt.RoundTrip(r) + } + } + + tracer := t.tracer + + if tracer == nil { + if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() { + tracer = newTracer(span.TracerProvider()) + } else { + tracer = newTracer(otel.GetTracerProvider()) + } + } + + opts := append([]trace.SpanStartOption{}, t.spanStartOptions...) // start with the configured options + + ctx, span := tracer.Start(r.Context(), t.spanNameFormatter("", r), opts...) + + if t.clientTrace != nil { + ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx)) + } + + labeler, found := LabelerFromContext(ctx) + if !found { + ctx = ContextWithLabeler(ctx, labeler) + } + + r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request. + + // if request body is nil or NoBody, we don't want to mutate the body as it + // will affect the identity of it in an unforeseeable way because we assert + // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + bw := request.NewBodyWrapper(r.Body, func(int64) {}) + if r.Body != nil && r.Body != http.NoBody { + r.Body = bw + } + + span.SetAttributes(t.semconv.RequestTraceAttrs(r)...) + t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header)) + + res, err := t.rt.RoundTrip(r) + if err != nil { + // set error type attribute if the error is part of the predefined + // error types. + // otherwise, record it as an exception + if errType := t.semconv.ErrorType(err); errType.Valid() { + span.SetAttributes(errType) + } else { + span.RecordError(err) + } + + span.SetStatus(codes.Error, err.Error()) + span.End() + return res, err + } + + // metrics + metricAttrs := append(append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...), t.metricAttributesFromRequest(r)...) + if res.StatusCode > 0 { + metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode)) + } + o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...)) + + t.requestBytesCounter.Add(ctx, bw.BytesRead(), o) + // For handling response bytes we leverage a callback when the client reads the http response + readRecordFunc := func(n int64) { + t.responseBytesCounter.Add(ctx, n, o) + } + + // traces + span.SetAttributes(t.semconv.ResponseTraceAttrs(res)...) + span.SetStatus(t.semconv.Status(res.StatusCode)) + + res.Body = newWrappedBody(span, readRecordFunc, res.Body) + + // Use floating point division here for higher precision (instead of Millisecond method). + elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) + + t.latencyMeasure.Record(ctx, elapsedTime, o) + + return res, err +} + +func (t *Transport) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue { + var attributeForRequest []attribute.KeyValue + if t.metricAttributesFn != nil { + attributeForRequest = t.metricAttributesFn(r) + } + return attributeForRequest +} + +// newWrappedBody returns a new and appropriately scoped *wrappedBody as an +// io.ReadCloser. If the passed body implements io.Writer, the returned value +// will implement io.ReadWriteCloser. +func newWrappedBody(span trace.Span, record func(n int64), body io.ReadCloser) io.ReadCloser { + // The successful protocol switch responses will have a body that + // implement an io.ReadWriteCloser. Ensure this interface type continues + // to be satisfied if that is the case. + if _, ok := body.(io.ReadWriteCloser); ok { + return &wrappedBody{span: span, record: record, body: body} + } + + // Remove the implementation of the io.ReadWriteCloser and only implement + // the io.ReadCloser. + return struct{ io.ReadCloser }{&wrappedBody{span: span, record: record, body: body}} +} + +// wrappedBody is the response body type returned by the transport +// instrumentation to complete a span. Errors encountered when using the +// response body are recorded in span tracking the response. +// +// The span tracking the response is ended when this body is closed. +// +// If the response body implements the io.Writer interface (i.e. for +// successful protocol switches), the wrapped body also will. +type wrappedBody struct { + span trace.Span + recorded atomic.Bool + record func(n int64) + body io.ReadCloser + read atomic.Int64 +} + +var _ io.ReadWriteCloser = &wrappedBody{} + +func (wb *wrappedBody) Write(p []byte) (int, error) { + // This will not panic given the guard in newWrappedBody. + n, err := wb.body.(io.Writer).Write(p) + if err != nil { + wb.span.RecordError(err) + wb.span.SetStatus(codes.Error, err.Error()) + } + return n, err +} + +func (wb *wrappedBody) Read(b []byte) (int, error) { + n, err := wb.body.Read(b) + // Record the number of bytes read + wb.read.Add(int64(n)) + + switch err { + case nil: + // nothing to do here but fall through to the return + case io.EOF: + wb.recordBytesRead() + wb.span.End() + default: + wb.span.RecordError(err) + wb.span.SetStatus(codes.Error, err.Error()) + } + return n, err +} + +// recordBytesRead is a function that ensures the number of bytes read is recorded once and only once. +func (wb *wrappedBody) recordBytesRead() { + // note: it is more performant (and equally correct) to use atomic.Bool over sync.Once here. In the event that + // two goroutines are racing to call this method, the number of bytes read will no longer increase. Using + // CompareAndSwap allows later goroutines to return quickly and not block waiting for the race winner to finish + // calling wb.record(wb.read.Load()). + if wb.recorded.CompareAndSwap(false, true) { + // Record the total number of bytes read + wb.record(wb.read.Load()) + } +} + +func (wb *wrappedBody) Close() error { + wb.recordBytesRead() + wb.span.End() + if wb.body != nil { + return wb.body.Close() + } + return nil +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go new file mode 100644 index 000000000..1133961d3 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +// Version is the current release version of the otelhttp instrumentation. +func Version() string { + return "0.55.0" + // This string is updated by the pre_release.sh script during release +} + +// SemVersion is the semantic version to be supplied to tracer/meter creation. +// +// Deprecated: Use [Version] instead. +func SemVersion() string { + return Version() +} diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore new file mode 100644 index 000000000..6bf3abc41 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.codespellignore @@ -0,0 +1,9 @@ +ot +fo +te +collison +consequentially +ans +nam +valu +thirdparty diff --git a/vendor/go.opentelemetry.io/otel/.codespellrc b/vendor/go.opentelemetry.io/otel/.codespellrc new file mode 100644 index 000000000..e2cb3ea94 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.codespellrc @@ -0,0 +1,10 @@ +# https://github.com/codespell-project/codespell +[codespell] +builtin = clear,rare,informal +check-filenames = +check-hidden = +ignore-words = .codespellignore +interactive = 1 +skip = .git,go.mod,go.sum,go.work,go.work.sum,semconv,venv,.tools +uri-ignore-words-list = * +write = diff --git a/vendor/go.opentelemetry.io/otel/.gitattributes b/vendor/go.opentelemetry.io/otel/.gitattributes new file mode 100644 index 000000000..314766e91 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.gitattributes @@ -0,0 +1,3 @@ +* text=auto eol=lf +*.{cmd,[cC][mM][dD]} text eol=crlf +*.{bat,[bB][aA][tT]} text eol=crlf diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore new file mode 100644 index 000000000..895c7664b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -0,0 +1,22 @@ +.DS_Store +Thumbs.db + +.tools/ +venv/ +.idea/ +.vscode/ +*.iml +*.so +coverage.* +go.work +go.work.sum + +gen/ + +/example/dice/dice +/example/namedtracer/namedtracer +/example/otel-collector/otel-collector +/example/opencensus/opencensus +/example/passthrough/passthrough +/example/prometheus/prometheus +/example/zipkin/zipkin diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml new file mode 100644 index 000000000..a5f904197 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -0,0 +1,304 @@ +# See https://github.com/golangci/golangci-lint#config-file +run: + issues-exit-code: 1 #Default + tests: true #Default + +linters: + # Disable everything by default so upgrades to not include new "default + # enabled" linters. + disable-all: true + # Specifically enable linters we want to use. + enable: + - asasalint + - bodyclose + - depguard + - errcheck + - errorlint + - godot + - gofumpt + - goimports + - gosec + - gosimple + - govet + - ineffassign + - misspell + - revive + - staticcheck + - tenv + - typecheck + - unconvert + - unused + - unparam + +issues: + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + # Setting to unlimited so the linter only is run once to debug all issues. + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + # Setting to unlimited so the linter only is run once to debug all issues. + max-same-issues: 0 + # Excluding configuration per-path, per-linter, per-text and per-source. + exclude-rules: + # TODO: Having appropriate comments for exported objects helps development, + # even for objects in internal packages. Appropriate comments for all + # exported objects should be added and this exclusion removed. + - path: '.*internal/.*' + text: "exported (method|function|type|const) (.+) should have comment or be unexported" + linters: + - revive + # Yes, they are, but it's okay in a test. + - path: _test\.go + text: "exported func.*returns unexported type.*which can be annoying to use" + linters: + - revive + # Example test functions should be treated like main. + - path: example.*_test\.go + text: "calls to (.+) only in main[(][)] or init[(][)] functions" + linters: + - revive + # It's okay to not run gosec in a test. + - path: _test\.go + linters: + - gosec + # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) + # as we commonly use it in tests and examples. + - text: "G404:" + linters: + - gosec + # Ignoring gosec G402: TLS MinVersion too low + # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. + - text: "G402: TLS MinVersion too low." + linters: + - gosec + include: + # revive exported should have comment or be unexported. + - EXC0012 + # revive package comment should be of the form ... + - EXC0013 + +linters-settings: + depguard: + rules: + non-tests: + files: + - "!$test" + - "!**/*test/*.go" + - "!**/internal/matchers/*.go" + deny: + - pkg: "testing" + - pkg: "github.com/stretchr/testify" + - pkg: "crypto/md5" + - pkg: "crypto/sha1" + - pkg: "crypto/**/pkix" + otlp-internal: + files: + - "!**/exporters/otlp/internal/**/*.go" + deny: + - pkg: "go.opentelemetry.io/otel/exporters/otlp/internal" + desc: Do not use cross-module internal packages. + otlptrace-internal: + files: + - "!**/exporters/otlp/otlptrace/*.go" + - "!**/exporters/otlp/otlptrace/internal/**.go" + deny: + - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal" + desc: Do not use cross-module internal packages. + otlpmetric-internal: + files: + - "!**/exporters/otlp/otlpmetric/internal/*.go" + - "!**/exporters/otlp/otlpmetric/internal/**/*.go" + deny: + - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal" + desc: Do not use cross-module internal packages. + otel-internal: + files: + - "**/sdk/*.go" + - "**/sdk/**/*.go" + - "**/exporters/*.go" + - "**/exporters/**/*.go" + - "**/schema/*.go" + - "**/schema/**/*.go" + - "**/metric/*.go" + - "**/metric/**/*.go" + - "**/bridge/*.go" + - "**/bridge/**/*.go" + - "**/example/*.go" + - "**/example/**/*.go" + - "**/trace/*.go" + - "**/trace/**/*.go" + - "**/log/*.go" + - "**/log/**/*.go" + deny: + - pkg: "go.opentelemetry.io/otel/internal$" + desc: Do not use cross-module internal packages. + - pkg: "go.opentelemetry.io/otel/internal/attribute" + desc: Do not use cross-module internal packages. + - pkg: "go.opentelemetry.io/otel/internal/internaltest" + desc: Do not use cross-module internal packages. + - pkg: "go.opentelemetry.io/otel/internal/matchers" + desc: Do not use cross-module internal packages. + godot: + exclude: + # Exclude links. + - '^ *\[[^]]+\]:' + # Exclude sentence fragments for lists. + - '^[ ]*[-•]' + # Exclude sentences prefixing a list. + - ':$' + goimports: + local-prefixes: go.opentelemetry.io + misspell: + locale: US + ignore-words: + - cancelled + revive: + # Sets the default failure confidence. + # This means that linting errors with less than 0.8 confidence will be ignored. + # Default: 0.8 + confidence: 0.01 + rules: + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports + - name: blank-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr + - name: bool-literal-in-expr + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr + - name: constant-logical-expr + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument + # TODO (#3372) re-enable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280 + - name: context-as-argument + disabled: true + arguments: + allowTypesBefore: "*testing.T" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type + - name: context-keys-type + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit + - name: deep-exit + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer + - name: defer + disabled: false + arguments: + - ["call-chain", "loop"] + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports + - name: dot-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports + - name: duplicated-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return + - name: early-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block + - name: empty-block + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines + - name: empty-lines + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming + - name: error-naming + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return + - name: error-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings + - name: error-strings + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf + - name: errorf + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported + - name: exported + disabled: false + arguments: + - "sayRepetitiveInsteadOfStutters" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter + - name: flag-parameter + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches + - name: identical-branches + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return + - name: if-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement + - name: increment-decrement + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow + - name: indent-error-flow + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing + - name: import-shadowing + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments + - name: package-comments + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range + - name: range + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure + - name: range-val-in-closure + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address + - name: range-val-address + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id + - name: redefines-builtin-id + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format + - name: string-format + disabled: false + arguments: + - - panic + - '/^[^\n]*$/' + - must not contain line breaks + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag + - name: struct-tag + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else + - name: superfluous-else + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal + - name: time-equal + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming + - name: var-naming + disabled: false + arguments: + - ["ID"] # AllowList + - ["Otel", "Aws", "Gcp"] # DenyList + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration + - name: var-declaration + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion + - name: unconditional-recursion + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return + - name: unexported-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error + - name: unhandled-error + disabled: false + arguments: + - "fmt.Fprint" + - "fmt.Fprintf" + - "fmt.Fprintln" + - "fmt.Print" + - "fmt.Printf" + - "fmt.Println" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt + - name: unnecessary-stmt + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break + - name: useless-break + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value + - name: waitgroup-by-value + disabled: false diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore new file mode 100644 index 000000000..40d62fa2e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.lycheeignore @@ -0,0 +1,6 @@ +http://localhost +http://jaeger-collector +https://github.com/open-telemetry/opentelemetry-go/milestone/ +https://github.com/open-telemetry/opentelemetry-go/projects +file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries +file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual diff --git a/vendor/go.opentelemetry.io/otel/.markdownlint.yaml b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml new file mode 100644 index 000000000..3202496c3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml @@ -0,0 +1,29 @@ +# Default state for all rules +default: true + +# ul-style +MD004: false + +# hard-tabs +MD010: false + +# line-length +MD013: false + +# no-duplicate-header +MD024: + siblings_only: true + +#single-title +MD025: false + +# ol-prefix +MD029: + style: ordered + +# no-inline-html +MD033: false + +# fenced-code-language +MD040: false + diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md new file mode 100644 index 000000000..fb107426e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -0,0 +1,3182 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). + +This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + + + + +## [1.30.0/0.52.0/0.6.0/0.0.9] 2024-09-09 + +### Added + +- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environments in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#5739) +- The `WithResource` option for `NewMeterProvider` now merges the provided resources with the ones from environment variables. (#5773) +- The `WithResource` option for `NewLoggerProvider` now merges the provided resources with the ones from environment variables. (#5773) +- Add UTF-8 support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5755) + +### Fixed + +- Fix memory leak in the global `MeterProvider` when identical instruments are repeatedly created. (#5754) +- Fix panic on instruments creation when setting meter provider. (#5758) +- Fix an issue where `SetMeterProvider` in `go.opentelemetry.io/otel` might miss the delegation for instruments and registries. (#5780) + +### Removed + +- Drop support for [Go 1.21]. (#5736, #5740, #5800) + +## [1.29.0/0.51.0/0.5.0] 2024-08-23 + +This release is the last to support [Go 1.21]. +The next release will require at least [Go 1.22]. + +### Added + +- Add MacOS ARM64 platform to the compatibility testing suite. (#5577) +- Add `InstrumentationScope` field to `SpanStub` in `go.opentelemetry.io/otel/sdk/trace/tracetest`, as a replacement for the deprecated `InstrumentationLibrary`. (#5627) +- Make the initial release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. + This new module contains an OTLP exporter that transmits log telemetry using gRPC. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5629) +- Add `Walk` function to `TraceState` in `go.opentelemetry.io/otel/trace` to iterate all the key-value pairs. (#5651) +- Bridge the trace state in `go.opentelemetry.io/otel/bridge/opencensus`. (#5651) +- Zero value of `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` no longer panics. (#5665) +- The `FilterProcessor` interface type is added in `go.opentelemetry.io/otel/sdk/log/internal/x`. + This is an optional and experimental interface that log `Processor`s can implement to instruct the `Logger` if a `Record` will be processed or not. + It replaces the existing `Enabled` method that is removed from the `Processor` interface itself. + It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#5692) +- Support [Go 1.23]. (#5720) + +### Changed + +- `NewMemberRaw`, `NewKeyProperty` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage` allow UTF-8 string in key. (#5132) +- `Processor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` now accepts a pointer to `Record` instead of a value so that the record modifications done in a processor are propagated to subsequent registered processors. (#5636) +- `SimpleProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log` now returns `false` if the exporter is `nil`. (#5665) +- Update the concurrency requirements of `Exporter` in `go.opentelemetry.io/otel/sdk/log`. (#5666) +- `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` synchronizes `OnEmit` calls. (#5666) +- The `Processor` interface in `go.opentelemetry.io/otel/sdk/log` no longer includes the `Enabled` method. + See the `FilterProcessor` interface type added in `go.opentelemetry.io/otel/sdk/log/internal/x` to continue providing this functionality. (#5692) +- The `SimpleProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) +- The `BatchProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) + +### Fixed + +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5584) +- Pass the underlying error rather than a generic retry-able failure in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5541) +- Correct the `Tracer`, `Meter`, and `Logger` names used in `go.opentelemetry.io/otel/example/dice`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/namedtracer`. (#5612) +- Correct the `Tracer` name used in `go.opentelemetry.io/otel/example/opencensus`. (#5612) +- Correct the `Tracer` and `Meter` names used in `go.opentelemetry.io/otel/example/otel-collector`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/passthrough`. (#5612) +- Correct the `Meter` name used in `go.opentelemetry.io/otel/example/prometheus`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/zipkin`. (#5612) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5641) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5650) +- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) +- Remove invalid environment variable header keys in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) + +### Removed + +- The `Enabled` method of the `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) +- The `Enabled` method of the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) + +## [1.28.0/0.50.0/0.4.0] 2024-07-02 + +### Added + +- The `IsEmpty` method is added to the `Instrument` type in `go.opentelemetry.io/otel/sdk/metric`. + This method is used to check if an `Instrument` instance is a zero-value. (#5431) +- Store and provide the emitted `context.Context` in `ScopeRecords` of `go.opentelemetry.io/otel/sdk/log/logtest`. (#5468) +- The `go.opentelemetry.io/otel/semconv/v1.26.0` package. + The package contains semantic conventions from the `v1.26.0` version of the OpenTelemetry Semantic Conventions. (#5476) +- The `AssertRecordEqual` method to `go.opentelemetry.io/otel/log/logtest` to allow comparison of two log records in tests. (#5499) +- The `WithHeaders` option to `go.opentelemetry.io/otel/exporters/zipkin` to allow configuring custom http headers while exporting spans. (#5530) + +### Changed + +- `Tracer.Start` in `go.opentelemetry.io/otel/trace/noop` no longer allocates a span for empty span context. (#5457) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/otel-collector`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/zipkin`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#5490) + - The exporter no longer exports the deprecated "otel.library.name" or "otel.library.version" attributes. +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/resource`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/trace`. (#5490) +- `SimpleProcessor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` no longer allocates a slice which makes it possible to have a zero-allocation log processing using `SimpleProcessor`. (#5493) +- Use non-generic functions in the `Start` method of `"go.opentelemetry.io/otel/sdk/trace".Trace` to reduce memory allocation. (#5497) +- `service.instance.id` is populated for a `Resource` created with `"go.opentelemetry.io/otel/sdk/resource".Default` with a default value when `OTEL_GO_X_RESOURCE` is set. (#5520) +- Improve performance of metric instruments in `go.opentelemetry.io/otel/sdk/metric` by removing unnecessary calls to `time.Now`. (#5545) + +### Fixed + +- Log a warning to the OpenTelemetry internal logger when a `Record` in `go.opentelemetry.io/otel/sdk/log` drops an attribute due to a limit being reached. (#5376) +- Identify the `Tracer` returned from the global `TracerProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426) +- Identify the `Meter` returned from the global `MeterProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426) +- Log a warning to the OpenTelemetry internal logger when a `Span` in `go.opentelemetry.io/otel/sdk/trace` drops an attribute, event, or link due to a limit being reached. (#5434) +- Document instrument name requirements in `go.opentelemetry.io/otel/metric`. (#5435) +- Prevent random number generation data-race for experimental rand exemplars in `go.opentelemetry.io/otel/sdk/metric`. (#5456) +- Fix counting number of dropped attributes of `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5464) +- Fix panic in baggage creation when a member contains `0x80` char in key or value. (#5494) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5508) +- Retry trace and span ID generation if it generated an invalid one in `go.opentelemetry.io/otel/sdk/trace`. (#5514) +- Fix stale timestamps reported by the last-value aggregation. (#5517) +- Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521) +- Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549) +- Replace invalid percent-encoded octet sequences with replacement char in `go.opentelemetry.io/otel/baggage`. (#5528) + +## [1.27.0/0.49.0/0.3.0] 2024-05-21 + +### Added + +- Add example for `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5242) +- Add `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest` to facilitate testing exporter and processor implementations. (#5258) +- Add `RecordFactory` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing bridge implementations. (#5263) +- The count of dropped records from the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is logged. (#5276) +- Add metrics in the `otel-collector` example. (#5283) +- Add the synchronous gauge instrument to `go.opentelemetry.io/otel/metric`. (#5304) + - An `int64` or `float64` synchronous gauge instrument can now be created from a `Meter`. + - All implementations of the API (`go.opentelemetry.io/otel/metric/noop`, `go.opentelemetry.io/otel/sdk/metric`) are updated to support this instrument. +- Add logs to `go.opentelemetry.io/otel/example/dice`. (#5349) + +### Changed + +- The `Shutdown` method of `Exporter` in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` ignores the context cancellation and always returns `nil`. (#5189) +- The `ForceFlush` and `Shutdown` methods of the exporter returned by `New` in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` ignore the context cancellation and always return `nil`. (#5189) +- Apply the value length limits to `Record` attributes in `go.opentelemetry.io/otel/sdk/log`. (#5230) +- De-duplicate map attributes added to a `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5230) +- `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` won't print timestamps when `WithoutTimestamps` option is set. (#5241) +- The `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` exporter won't print `AttributeValueLengthLimit` and `AttributeCountLimit` fields now, instead it prints the `DroppedAttributes` field. (#5272) +- Improved performance in the `Stringer` implementation of `go.opentelemetry.io/otel/baggage.Member` by reducing the number of allocations. (#5286) +- Set the start time for last-value aggregates in `go.opentelemetry.io/otel/sdk/metric`. (#5305) +- The `Span` in `go.opentelemetry.io/otel/sdk/trace` will record links without span context if either non-empty `TraceState` or attributes are provided. (#5315) +- Upgrade all dependencies of `go.opentelemetry.io/otel/semconv/v1.24.0` to `go.opentelemetry.io/otel/semconv/v1.25.0`. (#5374) + +### Fixed + +- Comparison of unordered maps for `go.opentelemetry.io/otel/log.KeyValue` and `go.opentelemetry.io/otel/log.Value`. (#5306) +- Fix the empty output of `go.opentelemetry.io/otel/log.Value` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5311) +- Split the behavior of `Recorder` in `go.opentelemetry.io/otel/log/logtest` so it behaves as a `LoggerProvider` only. (#5365) +- Fix wrong package name of the error message when parsing endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5371) +- Identify the `Logger` returned from the global `LoggerProvider` in `go.opentelemetry.io/otel/log/global` with its schema URL. (#5375) + +## [1.26.0/0.48.0/0.2.0-alpha] 2024-04-24 + +### Added + +- Add `Recorder` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing the log bridge implementations. (#5134) +- Add span flags to OTLP spans and links exported by `go.opentelemetry.io/otel/exporters/otlp/otlptrace`. (#5194) +- Make the initial alpha release of `go.opentelemetry.io/otel/sdk/log`. + This new module contains the Go implementation of the OpenTelemetry Logs SDK. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. + This new module contains an OTLP exporter that transmits log telemetry using HTTP. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. + This new module contains an exporter prints log records to STDOUT. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- The `go.opentelemetry.io/otel/semconv/v1.25.0` package. + The package contains semantic conventions from the `v1.25.0` version of the OpenTelemetry Semantic Conventions. (#5254) + +### Changed + +- Update `go.opentelemetry.io/proto/otlp` from v1.1.0 to v1.2.0. (#5177) +- Improve performance of baggage member character validation in `go.opentelemetry.io/otel/baggage`. (#5214) +- The `otel-collector` example now uses docker compose to bring up services instead of kubernetes. (#5244) + +### Fixed + +- Slice attribute values in `go.opentelemetry.io/otel/attribute` are now emitted as their JSON representation. (#5159) + +## [1.25.0/0.47.0/0.0.8/0.1.0-alpha] 2024-04-05 + +### Added + +- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4906) +- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp`. (#4906) +- Add `AddLink` method to the `Span` interface in `go.opentelemetry.io/otel/trace`. (#5032) +- The `Enabled` method is added to the `Logger` interface in `go.opentelemetry.io/otel/log`. + This method is used to notify users if a log record will be emitted or not. (#5071) +- Add `SeverityUndefined` `const` to `go.opentelemetry.io/otel/log`. + This value represents an unset severity level. (#5072) +- Add `Empty` function in `go.opentelemetry.io/otel/log` to return a `KeyValue` for an empty value. (#5076) +- Add `go.opentelemetry.io/otel/log/global` to manage the global `LoggerProvider`. + This package is provided with the anticipation that all functionality will be migrate to `go.opentelemetry.io/otel` when `go.opentelemetry.io/otel/log` stabilizes. + At which point, users will be required to migrage their code, and this package will be deprecated then removed. (#5085) +- Add support for `Summary` metrics in the `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` exporters. (#5100) +- Add `otel.scope.name` and `otel.scope.version` tags to spans exported by `go.opentelemetry.io/otel/exporters/zipkin`. (#5108) +- Add support for `AddLink` to `go.opentelemetry.io/otel/bridge/opencensus`. (#5116) +- Add `String` method to `Value` and `KeyValue` in `go.opentelemetry.io/otel/log`. (#5117) +- Add Exemplar support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5111) +- Add metric semantic conventions to `go.opentelemetry.io/otel/semconv/v1.24.0`. Future `semconv` packages will include metric semantic conventions as well. (#4528) + +### Changed + +- `SpanFromContext` and `SpanContextFromContext` in `go.opentelemetry.io/otel/trace` no longer make a heap allocation when the passed context has no span. (#5049) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now create a gRPC client in idle mode and with "dns" as the default resolver using [`grpc.NewClient`](https://pkg.go.dev/google.golang.org/grpc#NewClient). (#5151) + Because of that `WithDialOption` ignores [`grpc.WithBlock`](https://pkg.go.dev/google.golang.org/grpc#WithBlock), [`grpc.WithTimeout`](https://pkg.go.dev/google.golang.org/grpc#WithTimeout), and [`grpc.WithReturnConnectionError`](https://pkg.go.dev/google.golang.org/grpc#WithReturnConnectionError). + Notice that [`grpc.DialContext`](https://pkg.go.dev/google.golang.org/grpc#DialContext) which was used before is now deprecated. + +### Fixed + +- Clarify the documentation about equivalence guarantees for the `Set` and `Distinct` types in `go.opentelemetry.io/otel/attribute`. (#5027) +- Prevent default `ErrorHandler` self-delegation. (#5137) +- Update all dependencies to address [GO-2024-2687]. (#5139) + +### Removed + +- Drop support for [Go 1.20]. (#4967) + +### Deprecated + +- Deprecate `go.opentelemetry.io/otel/attribute.Sortable` type. (#4734) +- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortable` function. (#4734) +- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortableFiltered` function. (#4734) + +## [1.24.0/0.46.0/0.0.1-alpha] 2024-02-23 + +This release is the last to support [Go 1.20]. +The next release will require at least [Go 1.21]. + +### Added + +- Support [Go 1.22]. (#4890) +- Add exemplar support to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4900) +- Add exemplar support to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4900) +- The `go.opentelemetry.io/otel/log` module is added. + This module includes OpenTelemetry Go's implementation of the Logs Bridge API. + This module is in an alpha state, it is subject to breaking changes. + See our [versioning policy](./VERSIONING.md) for more info. (#4961) +- Add ARM64 platform to the compatibility testing suite. (#4994) + +### Fixed + +- Fix registration of multiple callbacks when using the global meter provider from `go.opentelemetry.io/otel`. (#4945) +- Fix negative buckets in output of exponential histograms. (#4956) + +## [1.23.1] 2024-02-07 + +### Fixed + +- Register all callbacks passed during observable instrument creation instead of just the last one multiple times in `go.opentelemetry.io/otel/sdk/metric`. (#4888) + +## [1.23.0] 2024-02-06 + +This release contains the first stable, `v1`, release of the following modules: + +- `go.opentelemetry.io/otel/bridge/opencensus` +- `go.opentelemetry.io/otel/bridge/opencensus/test` +- `go.opentelemetry.io/otel/example/opencensus` +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` +- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` + +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- Add `WithEndpointURL` option to the `exporters/otlp/otlpmetric/otlpmetricgrpc`, `exporters/otlp/otlpmetric/otlpmetrichttp`, `exporters/otlp/otlptrace/otlptracegrpc` and `exporters/otlp/otlptrace/otlptracehttp` packages. (#4808) +- Experimental exemplar exporting is added to the metric SDK. + See [metric documentation](./sdk/metric/internal/x/README.md#exemplars) for more information about this feature and how to enable it. (#4871) +- `ErrSchemaURLConflict` is added to `go.opentelemetry.io/otel/sdk/resource`. + This error is returned when a merge of two `Resource`s with different (non-empty) schema URL is attempted. (#4876) + +### Changed + +- The `Merge` and `New` functions in `go.opentelemetry.io/otel/sdk/resource` now returns a partial result if there is a schema URL merge conflict. + Instead of returning `nil` when two `Resource`s with different (non-empty) schema URLs are merged the merged `Resource`, along with the new `ErrSchemaURLConflict` error, is returned. + It is up to the user to decide if they want to use the returned `Resource` or not. + It may have desired attributes overwritten or include stale semantic conventions. (#4876) + +### Fixed + +- Fix `ContainerID` resource detection on systemd when cgroup path has a colon. (#4449) +- Fix `go.opentelemetry.io/otel/sdk/metric` to cache instruments to avoid leaking memory when the same instrument is created multiple times. (#4820) +- Fix missing `Mix` and `Max` values for `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` by introducing `MarshalText` and `MarshalJSON` for the `Extrema` type in `go.opentelemetry.io/sdk/metric/metricdata`. (#4827) + +## [1.23.0-rc.1] 2024-01-18 + +This is a release candidate for the v1.23.0 release. +That release is expected to include the `v1` release of the following modules: + +- `go.opentelemetry.io/otel/bridge/opencensus` +- `go.opentelemetry.io/otel/bridge/opencensus/test` +- `go.opentelemetry.io/otel/example/opencensus` +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` +- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` + +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +## [1.22.0/0.45.0] 2024-01-17 + +### Added + +- The `go.opentelemetry.io/otel/semconv/v1.22.0` package. + The package contains semantic conventions from the `v1.22.0` version of the OpenTelemetry Semantic Conventions. (#4735) +- The `go.opentelemetry.io/otel/semconv/v1.23.0` package. + The package contains semantic conventions from the `v1.23.0` version of the OpenTelemetry Semantic Conventions. (#4746) +- The `go.opentelemetry.io/otel/semconv/v1.23.1` package. + The package contains semantic conventions from the `v1.23.1` version of the OpenTelemetry Semantic Conventions. (#4749) +- The `go.opentelemetry.io/otel/semconv/v1.24.0` package. + The package contains semantic conventions from the `v1.24.0` version of the OpenTelemetry Semantic Conventions. (#4770) +- Add `WithResourceAsConstantLabels` option to apply resource attributes for every metric emitted by the Prometheus exporter. (#4733) +- Experimental cardinality limiting is added to the metric SDK. + See [metric documentation](./sdk/metric/internal/x/README.md#cardinality-limit) for more information about this feature and how to enable it. (#4457) +- Add `NewMemberRaw` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage`. (#4804) + +### Changed + +- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.24.0`. (#4754) +- Update transformations in `go.opentelemetry.io/otel/exporters/zipkin` to follow `v1.24.0` version of the OpenTelemetry specification. (#4754) +- Record synchronous measurements when the passed context is canceled instead of dropping in `go.opentelemetry.io/otel/sdk/metric`. + If you do not want to make a measurement when the context is cancelled, you need to handle it yourself (e.g `if ctx.Err() != nil`). (#4671) +- Improve `go.opentelemetry.io/otel/trace.TraceState`'s performance. (#4722) +- Improve `go.opentelemetry.io/otel/propagation.TraceContext`'s performance. (#4721) +- Improve `go.opentelemetry.io/otel/baggage` performance. (#4743) +- Improve performance of the `(*Set).Filter` method in `go.opentelemetry.io/otel/attribute` when the passed filter does not filter out any attributes from the set. (#4774) +- `Member.String` in `go.opentelemetry.io/otel/baggage` percent-encodes only when necessary. (#4775) +- Improve `go.opentelemetry.io/otel/trace.Span`'s performance when adding multiple attributes. (#4818) +- `Property.Value` in `go.opentelemetry.io/otel/baggage` now returns a raw string instead of a percent-encoded value. (#4804) + +### Fixed + +- Fix `Parse` in `go.opentelemetry.io/otel/baggage` to validate member value before percent-decoding. (#4755) +- Fix whitespace encoding of `Member.String` in `go.opentelemetry.io/otel/baggage`. (#4756) +- Fix observable not registered error when the asynchronous instrument has a drop aggregation in `go.opentelemetry.io/otel/sdk/metric`. (#4772) +- Fix baggage item key so that it is not canonicalized in `go.opentelemetry.io/otel/bridge/opentracing`. (#4776) +- Fix `go.opentelemetry.io/otel/bridge/opentracing` to properly handle baggage values that requires escaping during propagation. (#4804) +- Fix a bug where using multiple readers resulted in incorrect asynchronous counter values in `go.opentelemetry.io/otel/sdk/metric`. (#4742) + +## [1.21.0/0.44.0] 2023-11-16 + +### Removed + +- Remove the deprecated `go.opentelemetry.io/otel/bridge/opencensus.NewTracer`. (#4706) +- Remove the deprecated `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` module. (#4707) +- Remove the deprecated `go.opentelemetry.io/otel/example/view` module. (#4708) +- Remove the deprecated `go.opentelemetry.io/otel/example/fib` module. (#4723) + +### Fixed + +- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4719) +- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4719) + +## [1.20.0/0.43.0] 2023-11-10 + +This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementers need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. + +### Added + +- Add `go.opentelemetry.io/otel/bridge/opencensus.InstallTraceBridge`, which installs the OpenCensus trace bridge, and replaces `opencensus.NewTracer`. (#4567) +- Add scope version to trace and metric bridges in `go.opentelemetry.io/otel/bridge/opencensus`. (#4584) +- Add the `go.opentelemetry.io/otel/trace/embedded` package to be embedded in the exported trace API interfaces. (#4620) +- Add the `go.opentelemetry.io/otel/trace/noop` package as a default no-op implementation of the trace API. (#4620) +- Add context propagation in `go.opentelemetry.io/otel/example/dice`. (#4644) +- Add view configuration to `go.opentelemetry.io/otel/example/prometheus`. (#4649) +- Add `go.opentelemetry.io/otel/metric.WithExplicitBucketBoundaries`, which allows defining default explicit bucket boundaries when creating histogram instruments. (#4603) +- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4660) +- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4660) +- Add Summary, SummaryDataPoint, and QuantileValue to `go.opentelemetry.io/sdk/metric/metricdata`. (#4622) +- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` now supports exemplars from OpenCensus. (#4585) +- Add support for `WithExplicitBucketBoundaries` in `go.opentelemetry.io/otel/sdk/metric`. (#4605) +- Add support for Summary metrics in `go.opentelemetry.io/otel/bridge/opencensus`. (#4668) + +### Deprecated + +- Deprecate `go.opentelemetry.io/otel/bridge/opencensus.NewTracer` in favor of `opencensus.InstallTraceBridge`. (#4567) +- Deprecate `go.opentelemetry.io/otel/example/fib` package is in favor of `go.opentelemetry.io/otel/example/dice`. (#4618) +- Deprecate `go.opentelemetry.io/otel/trace.NewNoopTracerProvider`. + Use the added `NewTracerProvider` function in `go.opentelemetry.io/otel/trace/noop` instead. (#4620) +- Deprecate `go.opentelemetry.io/otel/example/view` package in favor of `go.opentelemetry.io/otel/example/prometheus`. (#4649) +- Deprecate `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4693) + +### Changed + +- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583) +- The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type. + This extends the `TracerProvider` interface and is is a breaking change for any existing implementation. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type. + This extends the `Tracer` interface and is is a breaking change for any existing implementation. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type. + This extends the `Span` interface and is is a breaking change for any existing implementation. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) +- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4670) +- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4670) +- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4669) +- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4669) +- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4679) +- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4679) + +### Fixed + +- Fix improper parsing of characters such us `+`, `/` by `Parse` in `go.opentelemetry.io/otel/baggage` as they were rendered as a whitespace. (#4667) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_RESOURCE_ATTRIBUTES` in `go.opentelemetry.io/otel/sdk/resource` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracegrpc` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp` as they were rendered as a whitespace. (#4699) +- In `go.opentelemetry.op/otel/exporters/prometheus`, the exporter no longer `Collect`s metrics after `Shutdown` is invoked. (#4648) +- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4695) +- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4695) + +## [1.19.0/0.42.0/0.0.7] 2023-09-28 + +This release contains the first stable release of the OpenTelemetry Go [metric SDK]. +Our project stability guarantees now apply to the `go.opentelemetry.io/otel/sdk/metric` package. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- Add the "Roll the dice" getting started application example in `go.opentelemetry.io/otel/example/dice`. (#4539) +- The `WithWriter` and `WithPrettyPrint` options to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to set a custom `io.Writer`, and allow displaying the output in human-readable JSON. (#4507) + +### Changed + +- Allow '/' characters in metric instrument names. (#4501) +- The exporter in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` does not prettify its output by default anymore. (#4507) +- Upgrade `gopkg.io/yaml` from `v2` to `v3` in `go.opentelemetry.io/otel/schema`. (#4535) + +### Fixed + +- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the Prometheus metric on every `Collect` if we know the scope is invalid. (#4499) + +### Removed + +- Remove `"go.opentelemetry.io/otel/bridge/opencensus".NewMetricExporter`, which is replaced by `NewMetricProducer`. (#4566) + +## [1.19.0-rc.1/0.42.0-rc.1] 2023-09-14 + +This is a release candidate for the v1.19.0/v0.42.0 release. +That release is expected to include the `v1` release of the OpenTelemetry Go metric SDK and will provide stability guarantees of that SDK. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Changed + +- Allow '/' characters in metric instrument names. (#4501) + +### Fixed + +- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the prometheus metric on every `Collect` if we know the scope is invalid. (#4499) + +## [1.18.0/0.41.0/0.0.6] 2023-09-12 + +This release drops the compatibility guarantee of [Go 1.19]. + +### Added + +- Add `WithProducer` option in `go.opentelemetry.op/otel/exporters/prometheus` to restore the ability to register producers on the prometheus exporter's manual reader. (#4473) +- Add `IgnoreValue` option in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest` to allow ignoring values when comparing metrics. (#4447) + +### Changed + +- Use a `TestingT` interface instead of `*testing.T` struct in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#4483) + +### Deprecated + +- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` was deprecated in `v0.35.0` (#3541). + The deprecation notice format for the function has been corrected to trigger Go documentation and build tooling. (#4470) + +### Removed + +- Removed the deprecated `go.opentelemetry.io/otel/exporters/jaeger` package. (#4467) +- Removed the deprecated `go.opentelemetry.io/otel/example/jaeger` package. (#4467) +- Removed the deprecated `go.opentelemetry.io/otel/sdk/metric/aggregation` package. (#4468) +- Removed the deprecated internal packages in `go.opentelemetry.io/otel/exporters/otlp` and its sub-packages. (#4469) +- Dropped guaranteed support for versions of Go less than 1.20. (#4481) + +## [1.17.0/0.40.0/0.0.5] 2023-08-28 + +### Added + +- Export the `ManualReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244) +- Export the `PeriodicReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244) +- Add support for exponential histogram aggregations. + A histogram can be configured as an exponential histogram using a view with `"go.opentelemetry.io/otel/sdk/metric".ExponentialHistogram` as the aggregation. (#4245) +- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4272) +- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4272) +- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` environment variable. (#4287) +- Add `WithoutCounterSuffixes` option in `go.opentelemetry.io/otel/exporters/prometheus` to disable addition of `_total` suffixes. (#4306) +- Add info and debug logging to the metric SDK in `go.opentelemetry.io/otel/sdk/metric`. (#4315) +- The `go.opentelemetry.io/otel/semconv/v1.21.0` package. + The package contains semantic conventions from the `v1.21.0` version of the OpenTelemetry Semantic Conventions. (#4362) +- Accept 201 to 299 HTTP status as success in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4365) +- Document the `Temporality` and `Aggregation` methods of the `"go.opentelemetry.io/otel/sdk/metric".Exporter"` need to be concurrent safe. (#4381) +- Expand the set of units supported by the Prometheus exporter, and don't add unit suffixes if they are already present in `go.opentelemetry.op/otel/exporters/prometheus` (#4374) +- Move the `Aggregation` interface and its implementations from `go.opentelemetry.io/otel/sdk/metric/aggregation` to `go.opentelemetry.io/otel/sdk/metric`. (#4435) +- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION` environment variable. (#4437) +- Add the `NewAllowKeysFilter` and `NewDenyKeysFilter` functions to `go.opentelemetry.io/otel/attribute` to allow convenient creation of allow-keys and deny-keys filters. (#4444) +- Support Go 1.21. (#4463) + +### Changed + +- Starting from `v1.21.0` of semantic conventions, `go.opentelemetry.io/otel/semconv/{version}/httpconv` and `go.opentelemetry.io/otel/semconv/{version}/netconv` packages will no longer be published. (#4145) +- Log duplicate instrument conflict at a warning level instead of info in `go.opentelemetry.io/otel/sdk/metric`. (#4202) +- Return an error on the creation of new instruments in `go.opentelemetry.io/otel/sdk/metric` if their name doesn't pass regexp validation. (#4210) +- `NewManualReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*ManualReader` instead of `Reader`. (#4244) +- `NewPeriodicReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*PeriodicReader` instead of `Reader`. (#4244) +- Count the Collect time in the `PeriodicReader` timeout in `go.opentelemetry.io/otel/sdk/metric`. (#4221) +- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272) +- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272) +- If an attribute set is omitted from an async callback, the previous value will no longer be exported in `go.opentelemetry.io/otel/sdk/metric`. (#4290) +- If an attribute set is observed multiple times in an async callback in `go.opentelemetry.io/otel/sdk/metric`, the values will be summed instead of the last observation winning. (#4289) +- Allow the explicit bucket histogram aggregation to be used for the up-down counter, observable counter, observable up-down counter, and observable gauge in the `go.opentelemetry.io/otel/sdk/metric` package. (#4332) +- Restrict `Meter`s in `go.opentelemetry.io/otel/sdk/metric` to only register and collect instruments it created. (#4333) +- `PeriodicReader.Shutdown` and `PeriodicReader.ForceFlush` in `go.opentelemetry.io/otel/sdk/metric` now apply the periodic reader's timeout to the operation if the user provided context does not contain a deadline. (#4356, #4377) +- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.21.0`. (#4408) +- Increase instrument name maximum length from 63 to 255 characters in `go.opentelemetry.io/otel/sdk/metric`. (#4434) +- Add `go.opentelemetry.op/otel/sdk/metric.WithProducer` as an `Option` for `"go.opentelemetry.io/otel/sdk/metric".NewManualReader` and `"go.opentelemetry.io/otel/sdk/metric".NewPeriodicReader`. (#4346) + +### Removed + +- Remove `Reader.RegisterProducer` in `go.opentelemetry.io/otel/metric`. + Use the added `WithProducer` option instead. (#4346) +- Remove `Reader.ForceFlush` in `go.opentelemetry.io/otel/metric`. + Notice that `PeriodicReader.ForceFlush` is still available. (#4375) + +### Fixed + +- Correctly format log messages from the `go.opentelemetry.io/otel/exporters/zipkin` exporter. (#4143) +- Log an error for calls to `NewView` in `go.opentelemetry.io/otel/sdk/metric` that have empty criteria. (#4307) +- Fix `"go.opentelemetry.io/otel/sdk/resource".WithHostID()` to not set an empty `host.id`. (#4317) +- Use the instrument identifying fields to cache aggregators and determine duplicate instrument registrations in `go.opentelemetry.io/otel/sdk/metric`. (#4337) +- Detect duplicate instruments for case-insensitive names in `go.opentelemetry.io/otel/sdk/metric`. (#4338) +- The `ManualReader` will not panic if `AggregationSelector` returns `nil` in `go.opentelemetry.io/otel/sdk/metric`. (#4350) +- If a `Reader`'s `AggregationSelector` returns `nil` or `DefaultAggregation` the pipeline will use the default aggregation. (#4350) +- Log a suggested view that fixes instrument conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4349) +- Fix possible panic, deadlock and race condition in batch span processor in `go.opentelemetry.io/otel/sdk/trace`. (#4353) +- Improve context cancellation handling in batch span processor's `ForceFlush` in `go.opentelemetry.io/otel/sdk/trace`. (#4369) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` using gotmpl. (#4397, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4404, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4407, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4400, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4401, #3846) +- Do not block the metric SDK when OTLP metric exports are blocked in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#3925, #4395) +- Do not append `_total` if the counter already has that suffix for the Prometheus exproter in `go.opentelemetry.io/otel/exporter/prometheus`. (#4373) +- Fix resource detection data race in `go.opentelemetry.io/otel/sdk/resource`. (#4409) +- Use the first-seen instrument name during instrument name conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4428) + +### Deprecated + +- The `go.opentelemetry.io/otel/exporters/jaeger` package is deprecated. + OpenTelemetry dropped support for Jaeger exporter in July 2023. + Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` + or `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` instead. (#4423) +- The `go.opentelemetry.io/otel/example/jaeger` package is deprecated. (#4423) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/internal` package is deprecated. (#4421) +- The `go.opentelemetry.io/otel/exporters/otlp/internal/envconfig` package is deprecated. (#4421) +- The `go.opentelemetry.io/otel/exporters/otlp/internal/retry` package is deprecated. (#4421) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/envconfig` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlptracetest` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/sdk/metric/aggregation` package is deprecated. + Use the aggregation types added to `go.opentelemetry.io/otel/sdk/metric` instead. (#4435) + +## [1.16.0/0.39.0] 2023-05-18 + +This release contains the first stable release of the OpenTelemetry Go [metric API]. +Our project stability guarantees now apply to the `go.opentelemetry.io/otel/metric` package. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- The `go.opentelemetry.io/otel/semconv/v1.19.0` package. + The package contains semantic conventions from the `v1.19.0` version of the OpenTelemetry specification. (#3848) +- The `go.opentelemetry.io/otel/semconv/v1.20.0` package. + The package contains semantic conventions from the `v1.20.0` version of the OpenTelemetry specification. (#4078) +- The Exponential Histogram data types in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#4165) +- OTLP metrics exporter now supports the Exponential Histogram Data Type. (#4222) +- Fix serialization of `time.Time` zero values in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` packages. (#4271) + +### Changed + +- Use `strings.Cut()` instead of `string.SplitN()` for better readability and memory use. (#4049) +- `MeterProvider` returns noop meters once it has been shutdown. (#4154) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/instrument` package is removed. + Use `go.opentelemetry.io/otel/metric` instead. (#4055) + +### Fixed + +- Fix build for BSD based systems in `go.opentelemetry.io/otel/sdk/resource`. (#4077) + +## [1.16.0-rc.1/0.39.0-rc.1] 2023-05-03 + +This is a release candidate for the v1.16.0/v0.39.0 release. +That release is expected to include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#4039) + - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`. + - Use `GetMeterProivder` for a global `metric.MeterProvider`. + - Use `SetMeterProivder` to set the global `metric.MeterProvider`. + +### Changed + +- Move the `go.opentelemetry.io/otel/metric` module to the `stable-v1` module set. + This stages the metric API to be released as a stable module. (#4038) + +### Removed + +- The `go.opentelemetry.io/otel/metric/global` package is removed. + Use `go.opentelemetry.io/otel` instead. (#4039) + +## [1.15.1/0.38.1] 2023-05-02 + +### Fixed + +- Remove unused imports from `sdk/resource/host_id_bsd.go` which caused build failures. (#4040, #4041) + +## [1.15.0/0.38.0] 2023-04-27 + +### Added + +- The `go.opentelemetry.io/otel/metric/embedded` package. (#3916) +- The `Version` function to `go.opentelemetry.io/otel/sdk` to return the SDK version. (#3949) +- Add a `WithNamespace` option to `go.opentelemetry.io/otel/exporters/prometheus` to allow users to prefix metrics with a namespace. (#3970) +- The following configuration types were added to `go.opentelemetry.io/otel/metric/instrument` to be used in the configuration of measurement methods. (#3971) + - The `AddConfig` used to hold configuration for addition measurements + - `NewAddConfig` used to create a new `AddConfig` + - `AddOption` used to configure an `AddConfig` + - The `RecordConfig` used to hold configuration for recorded measurements + - `NewRecordConfig` used to create a new `RecordConfig` + - `RecordOption` used to configure a `RecordConfig` + - The `ObserveConfig` used to hold configuration for observed measurements + - `NewObserveConfig` used to create a new `ObserveConfig` + - `ObserveOption` used to configure an `ObserveConfig` +- `WithAttributeSet` and `WithAttributes` are added to `go.opentelemetry.io/otel/metric/instrument`. + They return an option used during a measurement that defines the attribute Set associated with the measurement. (#3971) +- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` to return the OTLP metrics client version. (#3956) +- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlptrace` to return the OTLP trace client version. (#3956) + +### Changed + +- The `Extrema` in `go.opentelemetry.io/otel/sdk/metric/metricdata` is redefined with a generic argument of `[N int64 | float64]`. (#3870) +- Update all exported interfaces from `go.opentelemetry.io/otel/metric` to embed their corresponding interface from `go.opentelemetry.io/otel/metric/embedded`. + This adds an implementation requirement to set the interface default behavior for unimplemented methods. (#3916) +- Move No-Op implementation from `go.opentelemetry.io/otel/metric` into its own package `go.opentelemetry.io/otel/metric/noop`. (#3941) + - `metric.NewNoopMeterProvider` is replaced with `noop.NewMeterProvider` +- Add all the methods from `"go.opentelemetry.io/otel/trace".SpanContext` to `bridgeSpanContext` by embedding `otel.SpanContext` in `bridgeSpanContext`. (#3966) +- Wrap `UploadMetrics` error in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/` to improve error message when encountering generic grpc errors. (#3974) +- The measurement methods for all instruments in `go.opentelemetry.io/otel/metric/instrument` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971) + - The `Int64Counter.Add` method now accepts `...AddOption` + - The `Float64Counter.Add` method now accepts `...AddOption` + - The `Int64UpDownCounter.Add` method now accepts `...AddOption` + - The `Float64UpDownCounter.Add` method now accepts `...AddOption` + - The `Int64Histogram.Record` method now accepts `...RecordOption` + - The `Float64Histogram.Record` method now accepts `...RecordOption` + - The `Int64Observer.Observe` method now accepts `...ObserveOption` + - The `Float64Observer.Observe` method now accepts `...ObserveOption` +- The `Observer` methods in `go.opentelemetry.io/otel/metric` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971) + - The `Observer.ObserveInt64` method now accepts `...ObserveOption` + - The `Observer.ObserveFloat64` method now accepts `...ObserveOption` +- Move global metric back to `go.opentelemetry.io/otel/metric/global` from `go.opentelemetry.io/otel`. (#3986) + +### Fixed + +- `TracerProvider` allows calling `Tracer()` while it's shutting down. + It used to deadlock. (#3924) +- Use the SDK version for the Telemetry SDK resource detector in `go.opentelemetry.io/otel/sdk/resource`. (#3949) +- Fix a data race in `SpanProcessor` returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace`. (#3951) +- Automatically figure out the default aggregation with `aggregation.Default`. (#3967) + +### Deprecated + +- The `go.opentelemetry.io/otel/metric/instrument` package is deprecated. + Use the equivalent types added to `go.opentelemetry.io/otel/metric` instead. (#4018) + +## [1.15.0-rc.2/0.38.0-rc.2] 2023-03-23 + +This is a release candidate for the v1.15.0/v0.38.0 release. +That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- The `WithHostID` option to `go.opentelemetry.io/otel/sdk/resource`. (#3812) +- The `WithoutTimestamps` option to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to sets all timestamps to zero. (#3828) +- The new `Exemplar` type is added to `go.opentelemetry.io/otel/sdk/metric/metricdata`. + Both the `DataPoint` and `HistogramDataPoint` types from that package have a new field of `Exemplars` containing the sampled exemplars for their timeseries. (#3849) +- Configuration for each metric instrument in `go.opentelemetry.io/otel/sdk/metric/instrument`. (#3895) +- The internal logging introduces a warning level verbosity equal to `V(1)`. (#3900) +- Added a log message warning about usage of `SimpleSpanProcessor` in production environments. (#3854) + +### Changed + +- Optimize memory allocation when creation a new `Set` using `NewSet` or `NewSetWithFiltered` in `go.opentelemetry.io/otel/attribute`. (#3832) +- Optimize memory allocation when creation new metric instruments in `go.opentelemetry.io/otel/sdk/metric`. (#3832) +- Avoid creating new objects on all calls to `WithDeferredSetup` and `SkipContextSetup` in OpenTracing bridge. (#3833) +- The `New` and `Detect` functions from `go.opentelemetry.io/otel/sdk/resource` return errors that wrap underlying errors instead of just containing the underlying error strings. (#3844) +- Both the `Histogram` and `HistogramDataPoint` are redefined with a generic argument of `[N int64 | float64]` in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#3849) +- The metric `Export` interface from `go.opentelemetry.io/otel/sdk/metric` accepts a `*ResourceMetrics` instead of `ResourceMetrics`. (#3853) +- Rename `Asynchronous` to `Observable` in `go.opentelemetry.io/otel/metric/instrument`. (#3892) +- Rename `Int64ObserverOption` to `Int64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895) +- Rename `Float64ObserverOption` to `Float64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895) +- The internal logging changes the verbosity level of info to `V(4)`, the verbosity level of debug to `V(8)`. (#3900) + +### Fixed + +- `TracerProvider` consistently doesn't allow to register a `SpanProcessor` after shutdown. (#3845) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/global` package is removed. (#3829) +- The unneeded `Synchronous` interface in `go.opentelemetry.io/otel/metric/instrument` was removed. (#3892) +- The `Float64ObserverConfig` and `NewFloat64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`. + Use the added `float64` instrument configuration instead. (#3895) +- The `Int64ObserverConfig` and `NewInt64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`. + Use the added `int64` instrument configuration instead. (#3895) +- The `NewNoopMeter` function in `go.opentelemetry.io/otel/metric`, use `NewMeterProvider().Meter("")` instead. (#3893) + +## [1.15.0-rc.1/0.38.0-rc.1] 2023-03-01 + +This is a release candidate for the v1.15.0/v0.38.0 release. +That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +This release drops the compatibility guarantee of [Go 1.18]. + +### Added + +- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#3818) + - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`. + - Use `GetMeterProivder` for a global `metric.MeterProvider`. + - Use `SetMeterProivder` to set the global `metric.MeterProvider`. + +### Changed + +- Dropped compatibility testing for [Go 1.18]. + The project no longer guarantees support for this version of Go. (#3813) + +### Fixed + +- Handle empty environment variable as it they were not set. (#3764) +- Clarify the `httpconv` and `netconv` packages in `go.opentelemetry.io/otel/semconv/*` provide tracing semantic conventions. (#3823) +- Fix race conditions in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic. (#3899) +- Fix sending nil `scopeInfo` to metrics channel in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic in `github.com/prometheus/client_golang/prometheus`. (#3899) + +### Deprecated + +- The `go.opentelemetry.io/otel/metric/global` package is deprecated. + Use `go.opentelemetry.io/otel` instead. (#3818) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/unit` package is removed. (#3814) + +## [1.14.0/0.37.0/0.0.4] 2023-02-27 + +This release is the last to support [Go 1.18]. +The next release will require at least [Go 1.19]. + +### Added + +- The `event` type semantic conventions are added to `go.opentelemetry.io/otel/semconv/v1.17.0`. (#3697) +- Support [Go 1.20]. (#3693) +- The `go.opentelemetry.io/otel/semconv/v1.18.0` package. + The package contains semantic conventions from the `v1.18.0` version of the OpenTelemetry specification. (#3719) + - The following `const` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: + - `OtelScopeNameKey` -> `OTelScopeNameKey` + - `OtelScopeVersionKey` -> `OTelScopeVersionKey` + - `OtelLibraryNameKey` -> `OTelLibraryNameKey` + - `OtelLibraryVersionKey` -> `OTelLibraryVersionKey` + - `OtelStatusCodeKey` -> `OTelStatusCodeKey` + - `OtelStatusDescriptionKey` -> `OTelStatusDescriptionKey` + - `OtelStatusCodeOk` -> `OTelStatusCodeOk` + - `OtelStatusCodeError` -> `OTelStatusCodeError` + - The following `func` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: + - `OtelScopeName` -> `OTelScopeName` + - `OtelScopeVersion` -> `OTelScopeVersion` + - `OtelLibraryName` -> `OTelLibraryName` + - `OtelLibraryVersion` -> `OTelLibraryVersion` + - `OtelStatusDescription` -> `OTelStatusDescription` +- A `IsSampled` method is added to the `SpanContext` implementation in `go.opentelemetry.io/otel/bridge/opentracing` to expose the span sampled state. + See the [README](./bridge/opentracing/README.md) for more information. (#3570) +- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/metric`. (#3738) +- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/trace`. (#3739) +- The following environment variables are supported by the periodic `Reader` in `go.opentelemetry.io/otel/sdk/metric`. (#3763) + - `OTEL_METRIC_EXPORT_INTERVAL` sets the time between collections and exports. + - `OTEL_METRIC_EXPORT_TIMEOUT` sets the timeout an export is attempted. + +### Changed + +- Fall-back to `TextMapCarrier` when it's not `HttpHeader`s in `go.opentelemetry.io/otel/bridge/opentracing`. (#3679) +- The `Collect` method of the `"go.opentelemetry.io/otel/sdk/metric".Reader` interface is updated to accept the `metricdata.ResourceMetrics` value the collection will be made into. + This change is made to enable memory reuse by SDK users. (#3732) +- The `WithUnit` option in `go.opentelemetry.io/otel/sdk/metric/instrument` is updated to accept a `string` for the unit value. (#3776) + +### Fixed + +- Ensure `go.opentelemetry.io/otel` does not use generics. (#3723, #3725) +- Multi-reader `MeterProvider`s now export metrics for all readers, instead of just the first reader. (#3720, #3724) +- Remove use of deprecated `"math/rand".Seed` in `go.opentelemetry.io/otel/example/prometheus`. (#3733) +- Do not silently drop unknown schema data with `Parse` in `go.opentelemetry.io/otel/schema/v1.1`. (#3743) +- Data race issue in OTLP exporter retry mechanism. (#3755, #3756) +- Wrapping empty errors when exporting in `go.opentelemetry.io/otel/sdk/metric`. (#3698, #3772) +- Incorrect "all" and "resource" definition for schema files in `go.opentelemetry.io/otel/schema/v1.1`. (#3777) + +### Deprecated + +- The `go.opentelemetry.io/otel/metric/unit` package is deprecated. + Use the equivalent unit string instead. (#3776) + - Use `"1"` instead of `unit.Dimensionless` + - Use `"By"` instead of `unit.Bytes` + - Use `"ms"` instead of `unit.Milliseconds` + +## [1.13.0/0.36.0] 2023-02-07 + +### Added + +- Attribute `KeyValue` creations functions to `go.opentelemetry.io/otel/semconv/v1.17.0` for all non-enum semantic conventions. + These functions ensure semantic convention type correctness. (#3675) + +### Fixed + +- Removed the `http.target` attribute from being added by `ServerRequest` in the following packages. (#3687) + - `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.14.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.15.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.16.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.17.0/httpconv` + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncint64` package is removed. (#3631) + +## [1.12.0/0.35.0] 2023-01-28 + +### Added + +- The `WithInt64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. + This options is used to configure `int64` Observer callbacks during their creation. (#3507) +- The `WithFloat64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. + This options is used to configure `float64` Observer callbacks during their creation. (#3507) +- The `Producer` interface and `Reader.RegisterProducer(Producer)` to `go.opentelemetry.io/otel/sdk/metric`. + These additions are used to enable external metric Producers. (#3524) +- The `Callback` function type to `go.opentelemetry.io/otel/metric`. + This new named function type is registered with a `Meter`. (#3564) +- The `go.opentelemetry.io/otel/semconv/v1.13.0` package. + The package contains semantic conventions from the `v1.13.0` version of the OpenTelemetry specification. (#3499) + - The `EndUserAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientRequest` and `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPAttributesFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientResponse` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPClientAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPServerAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPServerMetricAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `NetAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `Transport` in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` and `ClientRequest` or `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `SpanStatusFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `SpanStatusFromHTTPStatusCodeAndSpanKind` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `ClientStatus` and `ServerStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `Client` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Conn`. + - The `Server` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Listener`. +- The `go.opentelemetry.io/otel/semconv/v1.14.0` package. + The package contains semantic conventions from the `v1.14.0` version of the OpenTelemetry specification. (#3566) +- The `go.opentelemetry.io/otel/semconv/v1.15.0` package. + The package contains semantic conventions from the `v1.15.0` version of the OpenTelemetry specification. (#3578) +- The `go.opentelemetry.io/otel/semconv/v1.16.0` package. + The package contains semantic conventions from the `v1.16.0` version of the OpenTelemetry specification. (#3579) +- Metric instruments to `go.opentelemetry.io/otel/metric/instrument`. + These instruments are use as replacements of the deprecated `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586) + - `Float64ObservableCounter` replaces the `asyncfloat64.Counter` + - `Float64ObservableUpDownCounter` replaces the `asyncfloat64.UpDownCounter` + - `Float64ObservableGauge` replaces the `asyncfloat64.Gauge` + - `Int64ObservableCounter` replaces the `asyncint64.Counter` + - `Int64ObservableUpDownCounter` replaces the `asyncint64.UpDownCounter` + - `Int64ObservableGauge` replaces the `asyncint64.Gauge` + - `Float64Counter` replaces the `syncfloat64.Counter` + - `Float64UpDownCounter` replaces the `syncfloat64.UpDownCounter` + - `Float64Histogram` replaces the `syncfloat64.Histogram` + - `Int64Counter` replaces the `syncint64.Counter` + - `Int64UpDownCounter` replaces the `syncint64.UpDownCounter` + - `Int64Histogram` replaces the `syncint64.Histogram` +- `NewTracerProvider` to `go.opentelemetry.io/otel/bridge/opentracing`. + This is used to create `WrapperTracer` instances from a `TracerProvider`. (#3116) +- The `Extrema` type to `go.opentelemetry.io/otel/sdk/metric/metricdata`. + This type is used to represent min/max values and still be able to distinguish unset and zero values. (#3487) +- The `go.opentelemetry.io/otel/semconv/v1.17.0` package. + The package contains semantic conventions from the `v1.17.0` version of the OpenTelemetry specification. (#3599) + +### Changed + +- Jaeger and Zipkin exporter use `github.com/go-logr/logr` as the logging interface, and add the `WithLogr` option. (#3497, #3500) +- Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and configuration based on the instrument type. (#3507) + - Use the added `Int64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncint64`. + - Use the added `Float64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncfloat64`. + - Use the added `Int64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncint64`. + - Use the added `Float64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncfloat64`. +- Return a `Registration` from the `RegisterCallback` method of a `Meter` in the `go.opentelemetry.io/otel/metric` package. + This `Registration` can be used to unregister callbacks. (#3522) +- Global error handler uses an atomic value instead of a mutex. (#3543) +- Add `NewMetricProducer` to `go.opentelemetry.io/otel/bridge/opencensus`, which can be used to pass OpenCensus metrics to an OpenTelemetry Reader. (#3541) +- Global logger uses an atomic value instead of a mutex. (#3545) +- The `Shutdown` method of the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` releases all computational resources when called the first time. (#3551) +- The `Sampler` returned from `TraceIDRatioBased` `go.opentelemetry.io/otel/sdk/trace` now uses the rightmost bits for sampling decisions. + This fixes random sampling when using ID generators like `xray.IDGenerator` and increasing parity with other language implementations. (#3557) +- Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in errors identifying their signal name. + Existing users of the exporters attempting to identify specific errors will need to use `errors.Unwrap()` to get the underlying error. (#3516) +- Exporters from `go.opentelemetry.io/otel/exporters/otlp` will print the final retryable error message when attempts to retry time out. (#3514) +- The instrument kind names in `go.opentelemetry.io/otel/sdk/metric` are updated to match the API. (#3562) + - `InstrumentKindSyncCounter` is renamed to `InstrumentKindCounter` + - `InstrumentKindSyncUpDownCounter` is renamed to `InstrumentKindUpDownCounter` + - `InstrumentKindSyncHistogram` is renamed to `InstrumentKindHistogram` + - `InstrumentKindAsyncCounter` is renamed to `InstrumentKindObservableCounter` + - `InstrumentKindAsyncUpDownCounter` is renamed to `InstrumentKindObservableUpDownCounter` + - `InstrumentKindAsyncGauge` is renamed to `InstrumentKindObservableGauge` +- The `RegisterCallback` method of the `Meter` in `go.opentelemetry.io/otel/metric` changed. + - The named `Callback` replaces the inline function parameter. (#3564) + - `Callback` is required to return an error. (#3576) + - `Callback` accepts the added `Observer` parameter added. + This new parameter is used by `Callback` implementations to observe values for asynchronous instruments instead of calling the `Observe` method of the instrument directly. (#3584) + - The slice of `instrument.Asynchronous` is now passed as a variadic argument. (#3587) +- The exporter from `go.opentelemetry.io/otel/exporters/zipkin` is updated to use the `v1.16.0` version of semantic conventions. + This means it no longer uses the removed `net.peer.ip` or `http.host` attributes to determine the remote endpoint. + Instead it uses the `net.sock.peer` attributes. (#3581) +- The `Min` and `Max` fields of the `HistogramDataPoint` in `go.opentelemetry.io/otel/sdk/metric/metricdata` are now defined with the added `Extrema` type instead of a `*float64`. (#3487) + +### Fixed + +- Asynchronous instruments that use sum aggregators and attribute filters correctly add values from equivalent attribute sets that have been filtered. (#3439, #3549) +- The `RegisterCallback` method of the `Meter` from `go.opentelemetry.io/otel/sdk/metric` only registers a callback for instruments created by that meter. + Trying to register a callback with instruments from a different meter will result in an error being returned. (#3584) + +### Deprecated + +- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` is deprecated. + Use `NewMetricProducer` instead. (#3541) +- The `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/syncint64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `NewWrappedTracerProvider` in `go.opentelemetry.io/otel/bridge/opentracing` is now deprecated. + Use `NewTracerProvider` instead. (#3116) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/sdk/metric/view` package is removed. (#3520) +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncint64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Int64ObservableCounter` + - The `UpDownCounter` method is replaced by `Meter.Int64ObservableUpDownCounter` + - The `Gauge` method is replaced by `Meter.Int64ObservableGauge` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncfloat64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Float64ObservableCounter` + - The `UpDownCounter` method is replaced by `Meter.Float64ObservableUpDownCounter` + - The `Gauge` method is replaced by `Meter.Float64ObservableGauge` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncint64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Int64Counter` + - The `UpDownCounter` method is replaced by `Meter.Int64UpDownCounter` + - The `Histogram` method is replaced by `Meter.Int64Histogram` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncfloat64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Float64Counter` + - The `UpDownCounter` method is replaced by `Meter.Float64UpDownCounter` + - The `Histogram` method is replaced by `Meter.Float64Histogram` + +## [1.11.2/0.34.0] 2022-12-05 + +### Added + +- The `WithView` `Option` is added to the `go.opentelemetry.io/otel/sdk/metric` package. + This option is used to configure the view(s) a `MeterProvider` will use for all `Reader`s that are registered with it. (#3387) +- Add Instrumentation Scope and Version as info metric and label in Prometheus exporter. + This can be disabled using the `WithoutScopeInfo()` option added to that package.(#3273, #3357) +- OTLP exporters now recognize: (#3363) + - `OTEL_EXPORTER_OTLP_INSECURE` + - `OTEL_EXPORTER_OTLP_TRACES_INSECURE` + - `OTEL_EXPORTER_OTLP_METRICS_INSECURE` + - `OTEL_EXPORTER_OTLP_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE` +- The `View` type and related `NewView` function to create a view according to the OpenTelemetry specification are added to `go.opentelemetry.io/otel/sdk/metric`. + These additions are replacements for the `View` type and `New` function from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) +- The `Instrument` and `InstrumentKind` type are added to `go.opentelemetry.io/otel/sdk/metric`. + These additions are replacements for the `Instrument` and `InstrumentKind` types from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) +- The `Stream` type is added to `go.opentelemetry.io/otel/sdk/metric` to define a metric data stream a view will produce. (#3459) +- The `AssertHasAttributes` allows instrument authors to test that datapoints returned have appropriate attributes. (#3487) + +### Changed + +- The `"go.opentelemetry.io/otel/sdk/metric".WithReader` option no longer accepts views to associate with the `Reader`. + Instead, views are now registered directly with the `MeterProvider` via the new `WithView` option. + The views registered with the `MeterProvider` apply to all `Reader`s. (#3387) +- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/sdk/metric".Exporter` interface. (#3260) +- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric".Client` interface. (#3260) +- The `WithTemporalitySelector` and `WithAggregationSelector` `ReaderOption`s have been changed to `ManualReaderOption`s in the `go.opentelemetry.io/otel/sdk/metric` package. (#3260) +- The periodic reader in the `go.opentelemetry.io/otel/sdk/metric` package now uses the temporality and aggregation selectors from its configured exporter instead of accepting them as options. (#3260) + +### Fixed + +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter fixes duplicated `_total` suffixes. (#3369) +- Remove comparable requirement for `Reader`s. (#3387) +- Cumulative metrics from the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) are defined as monotonic sums, instead of non-monotonic. (#3389) +- Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398) +- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) +- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) +- Re-enabled Attribute Filters in the Metric SDK. (#3396) +- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggregation. (#3408) +- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) +- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) +- Prevent duplicate Prometheus description, unit, and type. (#3469) +- Prevents panic when using incorrect `attribute.Value.As[Type]Slice()`. (#3489) + +### Removed + +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.Client` interface is removed. (#3486) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.New` function is removed. Use the `otlpmetric[http|grpc].New` directly. (#3486) + +### Deprecated + +- The `go.opentelemetry.io/otel/sdk/metric/view` package is deprecated. + Use `Instrument`, `InstrumentKind`, `View`, and `NewView` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3476) + +## [1.11.1/0.33.0] 2022-10-19 + +### Added + +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` registers with a Prometheus registerer on creation. + By default, it will register with the default Prometheus registerer. + A non-default registerer can be used by passing the `WithRegisterer` option. (#3239) +- Added the `WithAggregationSelector` option to the `go.opentelemetry.io/otel/exporters/prometheus` package to change the default `AggregationSelector` used. (#3341) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` converts the `Resource` associated with metric exports into a `target_info` metric. (#3285) + +### Changed + +- The `"go.opentelemetry.io/otel/exporters/prometheus".New` function is updated to return an error. + It will return an error if the exporter fails to register with Prometheus. (#3239) + +### Fixed + +- The URL-encoded values from the `OTEL_RESOURCE_ATTRIBUTES` environment variable are decoded. (#2963) +- The `baggage.NewMember` function decodes the `value` parameter instead of directly using it. + This fixes the implementation to be compliant with the W3C specification. (#3226) +- Slice attributes of the `attribute` package are now comparable based on their value, not instance. (#3108 #3252) +- The `Shutdown` and `ForceFlush` methods of the `"go.opentelemetry.io/otel/sdk/trace".TraceProvider` no longer return an error when no processor is registered. (#3268) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` cumulatively sums histogram buckets. (#3281) +- The sum of each histogram data point is now uniquely exported by the `go.opentelemetry.io/otel/exporters/otlpmetric` exporters. (#3284, #3293) +- Recorded values for asynchronous counters (`Counter` and `UpDownCounter`) are interpreted as exact, not incremental, sum values by the metric SDK. (#3350, #3278) +- `UpDownCounters` are now correctly output as Prometheus gauges in the `go.opentelemetry.io/otel/exporters/prometheus` exporter. (#3358) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` no longer describes the metrics it will send to Prometheus on startup. + Instead the exporter is defined as an "unchecked" collector for Prometheus. + This fixes the `reader is not registered` warning currently emitted on startup. (#3291 #3342) +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now correctly adds `_total` suffixes to counter metrics. (#3360) +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now adds a unit suffix to metric names. + This can be disabled using the `WithoutUnits()` option added to that package. (#3352) + +## [1.11.0/0.32.3] 2022-10-12 + +### Added + +- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlptrace/otlptracehttp`). (#3261) + +### Changed + +- `span.SetStatus` has been updated such that calls that lower the status are now no-ops. (#3214) +- Upgrade `golang.org/x/sys/unix` from `v0.0.0-20210423185535-09eb48e85fd7` to `v0.0.0-20220919091848-fb04ddd9f9c8`. + This addresses [GO-2022-0493](https://pkg.go.dev/vuln/GO-2022-0493). (#3235) + +## [0.32.2] Metric SDK (Alpha) - 2022-10-11 + +### Added + +- Added an example of using metric views to customize instruments. (#3177) +- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetrichttp`). (#3261) + +### Changed + +- Flush pending measurements with the `PeriodicReader` in the `go.opentelemetry.io/otel/sdk/metric` when `ForceFlush` or `Shutdown` are called. (#3220) +- Update histogram default bounds to match the requirements of the latest specification. (#3222) +- Encode the HTTP status code in the OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`) as an integer. (#3265) + +### Fixed + +- Use default view if instrument does not match any registered view of a reader. (#3224, #3237) +- Return the same instrument every time a user makes the exact same instrument creation call. (#3229, #3251) +- Return the existing instrument when a view transforms a creation call to match an existing instrument. (#3240, #3251) +- Log a warning when a conflicting instrument (e.g. description, unit, data-type) is created instead of returning an error. (#3251) +- The OpenCensus bridge no longer sends empty batches of metrics. (#3263) + +## [0.32.1] Metric SDK (Alpha) - 2022-09-22 + +### Changed + +- The Prometheus exporter sanitizes OpenTelemetry instrument names when exporting. + Invalid characters are replaced with `_`. (#3212) + +### Added + +- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been reintroduced. (#3192) +- The OpenCensus bridge example (`go.opentelemetry.io/otel/example/opencensus`) has been reintroduced. (#3206) + +### Fixed + +- Updated go.mods to point to valid versions of the sdk. (#3216) +- Set the `MeterProvider` resource on all exported metric data. (#3218) + +## [0.32.0] Revised Metric SDK (Alpha) - 2022-09-18 + +### Changed + +- The metric SDK in `go.opentelemetry.io/otel/sdk/metric` is completely refactored to comply with the OpenTelemetry specification. + Please see the package documentation for how the new SDK is initialized and configured. (#3175) +- Update the minimum supported go version to go1.18. Removes support for go1.17 (#3179) + +### Removed + +- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been removed. + A new bridge compliant with the revised metric SDK will be added back in a future release. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/histogram` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/sum` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/basic` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/controllertest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/time` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/export/aggregation` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/export` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/metrictest` package is removed. + A replacement package that supports the new metric SDK will be added back in a future release. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/number` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/basic` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/processortest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/reducer` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/registry` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/sdkapi` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/selector/simple` package is removed, see the new metric SDK. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".ErrUninitializedInstrument` variable was removed. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".ErrBadInstrument` variable was removed. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".Accumulator` type was removed, see the `MeterProvider`in the new metric SDK. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".NewAccumulator` function was removed, see `NewMeterProvider`in the new metric SDK. (#3175) +- The deprecated `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets` function was removed. (#3175) + +## [1.10.0] - 2022-09-09 + +### Added + +- Support Go 1.19. (#3077) + Include compatibility testing and document support. (#3077) +- Support the OTLP ExportTracePartialSuccess response; these are passed to the registered error handler. (#3106) +- Upgrade go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 (#3107) + +### Changed + +- Fix misidentification of OpenTelemetry `SpanKind` in OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`). (#3096) +- Attempting to start a span with a nil `context` will no longer cause a panic. (#3110) +- All exporters will be shutdown even if one reports an error (#3091) +- Ensure valid UTF-8 when truncating over-length attribute values. (#3156) + +## [1.9.0/0.0.3] - 2022-08-01 + +### Added + +- Add support for Schema Files format 1.1.x (metric "split" transform) with the new `go.opentelemetry.io/otel/schema/v1.1` package. (#2999) +- Add the `go.opentelemetry.io/otel/semconv/v1.11.0` package. + The package contains semantic conventions from the `v1.11.0` version of the OpenTelemetry specification. (#3009) +- Add the `go.opentelemetry.io/otel/semconv/v1.12.0` package. + The package contains semantic conventions from the `v1.12.0` version of the OpenTelemetry specification. (#3010) +- Add the `http.method` attribute to HTTP server metric from all `go.opentelemetry.io/otel/semconv/*` packages. (#3018) + +### Fixed + +- Invalid warning for context setup being deferred in `go.opentelemetry.io/otel/bridge/opentracing` package. (#3029) + +## [1.8.0/0.31.0] - 2022-07-08 + +### Added + +- Add support for `opentracing.TextMap` format in the `Inject` and `Extract` methods +of the `"go.opentelemetry.io/otel/bridge/opentracing".BridgeTracer` type. (#2911) + +### Changed + +- The `crosslink` make target has been updated to use the `go.opentelemetry.io/build-tools/crosslink` package. (#2886) +- In the `go.opentelemetry.io/otel/sdk/instrumentation` package rename `Library` to `Scope` and alias `Library` as `Scope` (#2976) +- Move metric no-op implementation form `nonrecording` to `metric` package. (#2866) + +### Removed + +- Support for go1.16. Support is now only for go1.17 and go1.18 (#2917) + +### Deprecated + +- The `Library` struct in the `go.opentelemetry.io/otel/sdk/instrumentation` package is deprecated. + Use the equivalent `Scope` struct instead. (#2977) +- The `ReadOnlySpan.InstrumentationLibrary` method from the `go.opentelemetry.io/otel/sdk/trace` package is deprecated. + Use the equivalent `ReadOnlySpan.InstrumentationScope` method instead. (#2977) + +## [1.7.0/0.30.0] - 2022-04-28 + +### Added + +- Add the `go.opentelemetry.io/otel/semconv/v1.8.0` package. + The package contains semantic conventions from the `v1.8.0` version of the OpenTelemetry specification. (#2763) +- Add the `go.opentelemetry.io/otel/semconv/v1.9.0` package. + The package contains semantic conventions from the `v1.9.0` version of the OpenTelemetry specification. (#2792) +- Add the `go.opentelemetry.io/otel/semconv/v1.10.0` package. + The package contains semantic conventions from the `v1.10.0` version of the OpenTelemetry specification. (#2842) +- Added an in-memory exporter to metrictest to aid testing with a full SDK. (#2776) + +### Fixed + +- Globally delegated instruments are unwrapped before delegating asynchronous callbacks. (#2784) +- Remove import of `testing` package in non-tests builds of the `go.opentelemetry.io/otel` package. (#2786) + +### Changed + +- The `WithLabelEncoder` option from the `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` package is renamed to `WithAttributeEncoder`. (#2790) +- The `LabelFilterSelector` interface from `go.opentelemetry.io/otel/sdk/metric/processor/reducer` is renamed to `AttributeFilterSelector`. + The method included in the renamed interface also changed from `LabelFilterFor` to `AttributeFilterFor`. (#2790) +- The `Metadata.Labels` method from the `go.opentelemetry.io/otel/sdk/metric/export` package is renamed to `Metadata.Attributes`. + Consequentially, the `Record` type from the same package also has had the embedded method renamed. (#2790) + +### Deprecated + +- The `Iterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `Iterator.Attribute` method instead. (#2790) +- The `Iterator.IndexedLabel` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `Iterator.IndexedAttribute` method instead. (#2790) +- The `MergeIterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `MergeIterator.Attribute` method instead. (#2790) + +### Removed + +- Removed the `Batch` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) +- Removed the `Measurement` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) + +## [0.29.0] - 2022-04-11 + +### Added + +- The metrics global package was added back into several test files. (#2764) +- The `Meter` function is added back to the `go.opentelemetry.io/otel/metric/global` package. + This function is a convenience function equivalent to calling `global.MeterProvider().Meter(...)`. (#2750) + +### Removed + +- Removed module the `go.opentelemetry.io/otel/sdk/export/metric`. + Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2720) + +### Changed + +- Don't panic anymore when setting a global MeterProvider to itself. (#2749) +- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` from `v0.12.1` to `v0.15.0`. + This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibraryMetrics` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeMetrics`. (#2748) + +## [1.6.3] - 2022-04-07 + +### Fixed + +- Allow non-comparable global `MeterProvider`, `TracerProvider`, and `TextMapPropagator` types to be set. (#2772, #2773) + +## [1.6.2] - 2022-04-06 + +### Changed + +- Don't panic anymore when setting a global TracerProvider or TextMapPropagator to itself. (#2749) +- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from `v0.12.1` to `v0.15.0`. + This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibrarySpans` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeSpans`. (#2748) + +## [1.6.1] - 2022-03-28 + +### Fixed + +- The `go.opentelemetry.io/otel/schema/*` packages now use the correct schema URL for their `SchemaURL` constant. + Instead of using `"https://opentelemetry.io/schemas/v"` they now use the correct URL without a `v` prefix, `"https://opentelemetry.io/schemas/"`. (#2743, #2744) + +### Security + +- Upgrade `go.opentelemetry.io/proto/otlp` from `v0.12.0` to `v0.12.1`. + This includes an indirect upgrade of `github.com/grpc-ecosystem/grpc-gateway` which resolves [a vulnerability](https://nvd.nist.gov/vuln/detail/CVE-2019-11254) from `gopkg.in/yaml.v2` in version `v2.2.3`. (#2724, #2728) + +## [1.6.0/0.28.0] - 2022-03-23 + +### ⚠️ Notice ⚠️ + +This update is a breaking change of the unstable Metrics API. +Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be modified. + +### Added + +- Add metrics exponential histogram support. + New mapping functions have been made available in `sdk/metric/aggregator/exponential/mapping` for other OpenTelemetry projects to take dependencies on. (#2502) +- Add Go 1.18 to our compatibility tests. (#2679) +- Allow configuring the Sampler with the `OTEL_TRACES_SAMPLER` and `OTEL_TRACES_SAMPLER_ARG` environment variables. (#2305, #2517) +- Add the `metric/global` for obtaining and setting the global `MeterProvider`. (#2660) + +### Changed + +- The metrics API has been significantly changed to match the revised OpenTelemetry specification. + High-level changes include: + + - Synchronous and asynchronous instruments are now handled by independent `InstrumentProvider`s. + These `InstrumentProvider`s are managed with a `Meter`. + - Synchronous and asynchronous instruments are grouped into their own packages based on value types. + - Asynchronous callbacks can now be registered with a `Meter`. + + Be sure to check out the metric module documentation for more information on how to use the revised API. (#2587, #2660) + +### Fixed + +- Fallback to general attribute limits when span specific ones are not set in the environment. (#2675, #2677) + +## [1.5.0] - 2022-03-16 + +### Added + +- Log the Exporters configuration in the TracerProviders message. (#2578) +- Added support to configure the span limits with environment variables. + The following environment variables are supported. (#2606, #2637) + - `OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT` + - `OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT` + - `OTEL_SPAN_EVENT_COUNT_LIMIT` + - `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT` + - `OTEL_SPAN_LINK_COUNT_LIMIT` + - `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT` + + If the provided environment variables are invalid (negative), the default values would be used. +- Rename the `gc` runtime name to `go` (#2560) +- Add resource container ID detection. (#2418) +- Add span attribute value length limit. + The new `AttributeValueLengthLimit` field is added to the `"go.opentelemetry.io/otel/sdk/trace".SpanLimits` type to configure this limit for a `TracerProvider`. + The default limit for this resource is "unlimited". (#2637) +- Add the `WithRawSpanLimits` option to `go.opentelemetry.io/otel/sdk/trace`. + This option replaces the `WithSpanLimits` option. + Zero or negative values will not be changed to the default value like `WithSpanLimits` does. + Setting a limit to zero will effectively disable the related resource it limits and setting to a negative value will mean that resource is unlimited. + Consequentially, limits should be constructed using `NewSpanLimits` and updated accordingly. (#2637) + +### Changed + +- Drop oldest tracestate `Member` when capacity is reached. (#2592) +- Add event and link drop counts to the exported data from the `oltptrace` exporter. (#2601) +- Unify path cleaning functionally in the `otlpmetric` and `otlptrace` configuration. (#2639) +- Change the debug message from the `sdk/trace.BatchSpanProcessor` to reflect the count is cumulative. (#2640) +- Introduce new internal `envconfig` package for OTLP exporters. (#2608) +- If `http.Request.Host` is empty, fall back to use `URL.Host` when populating `http.host` in the `semconv` packages. (#2661) + +### Fixed + +- Remove the OTLP trace exporter limit of SpanEvents when exporting. (#2616) +- Default to port `4318` instead of `4317` for the `otlpmetrichttp` and `otlptracehttp` client. (#2614, #2625) +- Unlimited span limits are now supported (negative values). (#2636, #2637) + +### Deprecated + +- Deprecated `"go.opentelemetry.io/otel/sdk/trace".WithSpanLimits`. + Use `WithRawSpanLimits` instead. + That option allows setting unlimited and zero limits, this option does not. + This option will be kept until the next major version incremented release. (#2637) + +## [1.4.1] - 2022-02-16 + +### Fixed + +- Fix race condition in reading the dropped spans number for the `BatchSpanProcessor`. (#2615) + +## [1.4.0] - 2022-02-11 + +### Added + +- Use `OTEL_EXPORTER_ZIPKIN_ENDPOINT` environment variable to specify zipkin collector endpoint. (#2490) +- Log the configuration of `TracerProvider`s, and `Tracer`s for debugging. + To enable use a logger with Verbosity (V level) `>=1`. (#2500) +- Added support to configure the batch span-processor with environment variables. + The following environment variables are used. (#2515) + - `OTEL_BSP_SCHEDULE_DELAY` + - `OTEL_BSP_EXPORT_TIMEOUT` + - `OTEL_BSP_MAX_QUEUE_SIZE`. + - `OTEL_BSP_MAX_EXPORT_BATCH_SIZE` + +### Changed + +- Zipkin exporter exports `Resource` attributes in the `Tags` field. (#2589) + +### Deprecated + +- Deprecate module the `go.opentelemetry.io/otel/sdk/export/metric`. + Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2382) +- Deprecate `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets`. (#2445) + +### Fixed + +- Fixed the instrument kind for noop async instruments to correctly report an implementation. (#2461) +- Fix UDP packets overflowing with Jaeger payloads. (#2489, #2512) +- Change the `otlpmetric.Client` interface's `UploadMetrics` method to accept a single `ResourceMetrics` instead of a slice of them. (#2491) +- Specify explicit buckets in Prometheus example, fixing issue where example only has `+inf` bucket. (#2419, #2493) +- W3C baggage will now decode urlescaped values. (#2529) +- Baggage members are now only validated once, when calling `NewMember` and not also when adding it to the baggage itself. (#2522) +- The order attributes are dropped from spans in the `go.opentelemetry.io/otel/sdk/trace` package when capacity is reached is fixed to be in compliance with the OpenTelemetry specification. + Instead of dropping the least-recently-used attribute, the last added attribute is dropped. + This drop order still only applies to attributes with unique keys not already contained in the span. + If an attribute is added with a key already contained in the span, that attribute is updated to the new value being added. (#2576) + +### Removed + +- Updated `go.opentelemetry.io/proto/otlp` from `v0.11.0` to `v0.12.0`. This version removes a number of deprecated methods. (#2546) + - [`Metric.GetIntGauge()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntGauge) + - [`Metric.GetIntHistogram()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntHistogram) + - [`Metric.GetIntSum()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntSum) + +## [1.3.0] - 2021-12-10 + +### ⚠️ Notice ⚠️ + +We have updated the project minimum supported Go version to 1.16 + +### Added + +- Added an internal Logger. + This can be used by the SDK and API to provide users with feedback of the internal state. + To enable verbose logs configure the logger which will print V(1) logs. For debugging information configure to print V(5) logs. (#2343) +- Add the `WithRetry` `Option` and the `RetryConfig` type to the `go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp` package to specify retry behavior consistently. (#2425) +- Add `SpanStatusFromHTTPStatusCodeAndSpanKind` to all `semconv` packages to return a span status code similar to `SpanStatusFromHTTPStatusCode`, but exclude `4XX` HTTP errors as span errors if the span is of server kind. (#2296) + +### Changed + +- The `"go.opentelemetry.io/otel/exporter/otel/otlptrace/otlptracegrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2329) +- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2425) +- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".RetrySettings` type is renamed to `RetryConfig`. (#2425) +- The `go.opentelemetry.io/otel/exporter/otel/*` gRPC exporters now default to using the host's root CA set if none are provided by the user and `WithInsecure` is not specified. (#2432) +- Change `resource.Default` to be evaluated the first time it is called, rather than on import. This allows the caller the option to update `OTEL_RESOURCE_ATTRIBUTES` first, such as with `os.Setenv`. (#2371) + +### Fixed + +- The `go.opentelemetry.io/otel/exporter/otel/*` exporters are updated to handle per-signal and universal endpoints according to the OpenTelemetry specification. + Any per-signal endpoint set via an `OTEL_EXPORTER_OTLP__ENDPOINT` environment variable is now used without modification of the path. + When `OTEL_EXPORTER_OTLP_ENDPOINT` is set, if it contains a path, that path is used as a base path which per-signal paths are appended to. (#2433) +- Basic metric controller updated to use sync.Map to avoid blocking calls (#2381) +- The `go.opentelemetry.io/otel/exporter/jaeger` correctly sets the `otel.status_code` value to be a string of `ERROR` or `OK` instead of an integer code. (#2439, #2440) + +### Deprecated + +- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithMaxAttempts` `Option`, use the new `WithRetry` `Option` instead. (#2425) +- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithBackoff` `Option`, use the new `WithRetry` `Option` instead. (#2425) + +### Removed + +- Remove the metric Processor's ability to convert cumulative to delta aggregation temporality. (#2350) +- Remove the metric Bound Instruments interface and implementations. (#2399) +- Remove the metric MinMaxSumCount kind aggregation and the corresponding OTLP export path. (#2423) +- Metric SDK removes the "exact" aggregator for histogram instruments, as it performed a non-standard aggregation for OTLP export (creating repeated Gauge points) and worked its way into a number of confusing examples. (#2348) + +## [1.2.0] - 2021-11-12 + +### Changed + +- Metric SDK `export.ExportKind`, `export.ExportKindSelector` types have been renamed to `aggregation.Temporality` and `aggregation.TemporalitySelector` respectively to keep in line with current specification and protocol along with built-in selectors (e.g., `aggregation.CumulativeTemporalitySelector`, ...). (#2274) +- The Metric `Exporter` interface now requires a `TemporalitySelector` method instead of an `ExportKindSelector`. (#2274) +- Metrics API cleanup. The `metric/sdkapi` package has been created to relocate the API-to-SDK interface: + - The following interface types simply moved from `metric` to `metric/sdkapi`: `Descriptor`, `MeterImpl`, `InstrumentImpl`, `SyncImpl`, `BoundSyncImpl`, `AsyncImpl`, `AsyncRunner`, `AsyncSingleRunner`, and `AsyncBatchRunner` + - The following struct types moved and are replaced with type aliases, since they are exposed to the user: `Observation`, `Measurement`. + - The No-op implementations of sync and async instruments are no longer exported, new functions `sdkapi.NewNoopAsyncInstrument()` and `sdkapi.NewNoopSyncInstrument()` are provided instead. (#2271) +- Update the SDK `BatchSpanProcessor` to export all queued spans when `ForceFlush` is called. (#2080, #2335) + +### Added + +- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) +- Added a new `schema` module to help parse Schema Files in OTEP 0152 format. (#2267) +- Added a new `MapCarrier` to the `go.opentelemetry.io/otel/propagation` package to hold propagated cross-cutting concerns as a `map[string]string` held in memory. (#2334) + +## [1.1.0] - 2021-10-27 + +### Added + +- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) +- Add the `go.opentelemetry.io/otel/semconv/v1.7.0` package. + The package contains semantic conventions from the `v1.7.0` version of the OpenTelemetry specification. (#2320) +- Add the `go.opentelemetry.io/otel/semconv/v1.6.1` package. + The package contains semantic conventions from the `v1.6.1` version of the OpenTelemetry specification. (#2321) +- Add the `go.opentelemetry.io/otel/semconv/v1.5.0` package. + The package contains semantic conventions from the `v1.5.0` version of the OpenTelemetry specification. (#2322) + - When upgrading from the `semconv/v1.4.0` package note the following name changes: + - `K8SReplicasetUIDKey` -> `K8SReplicaSetUIDKey` + - `K8SReplicasetNameKey` -> `K8SReplicaSetNameKey` + - `K8SStatefulsetUIDKey` -> `K8SStatefulSetUIDKey` + - `k8SStatefulsetNameKey` -> `K8SStatefulSetNameKey` + - `K8SDaemonsetUIDKey` -> `K8SDaemonSetUIDKey` + - `K8SDaemonsetNameKey` -> `K8SDaemonSetNameKey` + +### Changed + +- Links added to a span will be dropped by the SDK if they contain an invalid span context (#2275). + +### Fixed + +- The `"go.opentelemetry.io/otel/semconv/v1.4.0".HTTPServerAttributesFromHTTPRequest` now correctly only sets the HTTP client IP attribute even if the connection was routed with proxies and there are multiple addresses in the `X-Forwarded-For` header. (#2282, #2284) +- The `"go.opentelemetry.io/otel/semconv/v1.4.0".NetAttributesFromHTTPRequest` function correctly handles IPv6 addresses as IP addresses and sets the correct net peer IP instead of the net peer hostname attribute. (#2283, #2285) +- The simple span processor shutdown method deterministically returns the exporter error status if it simultaneously finishes when the deadline is reached. (#2290, #2289) + +## [1.0.1] - 2021-10-01 + +### Fixed + +- json stdout exporter no longer crashes due to concurrency bug. (#2265) + +## [Metrics 0.24.0] - 2021-10-01 + +### Changed + +- NoopMeterProvider is now private and NewNoopMeterProvider must be used to obtain a noopMeterProvider. (#2237) +- The Metric SDK `Export()` function takes a new two-level reader interface for iterating over results one instrumentation library at a time. (#2197) + - The former `"go.opentelemetry.io/otel/sdk/export/metric".CheckpointSet` is renamed `Reader`. + - The new interface is named `"go.opentelemetry.io/otel/sdk/export/metric".InstrumentationLibraryReader`. + +## [1.0.0] - 2021-09-20 + +This is the first stable release for the project. +This release includes an API and SDK for the tracing signal that will comply with the stability guarantees defined by the projects [versioning policy](./VERSIONING.md). + +### Added + +- OTLP trace exporter now sets the `SchemaURL` field in the exported telemetry if the Tracer has `WithSchemaURL` option. (#2242) + +### Fixed + +- Slice-valued attributes can correctly be used as map keys. (#2223) + +### Removed + +- Removed the `"go.opentelemetry.io/otel/exporters/zipkin".WithSDKOptions` function. (#2248) +- Removed the deprecated package `go.opentelemetry.io/otel/oteltest`. (#2234) +- Removed the deprecated package `go.opentelemetry.io/otel/bridge/opencensus/utils`. (#2233) +- Removed deprecated functions, types, and methods from `go.opentelemetry.io/otel/attribute` package. + Use the typed functions and methods added to the package instead. (#2235) + - The `Key.Array` method is removed. + - The `Array` function is removed. + - The `Any` function is removed. + - The `ArrayValue` function is removed. + - The `AsArray` function is removed. + +## [1.0.0-RC3] - 2021-09-02 + +### Added + +- Added `ErrorHandlerFunc` to use a function as an `"go.opentelemetry.io/otel".ErrorHandler`. (#2149) +- Added `"go.opentelemetry.io/otel/trace".WithStackTrace` option to add a stack trace when using `span.RecordError` or when panic is handled in `span.End`. (#2163) +- Added typed slice attribute types and functionality to the `go.opentelemetry.io/otel/attribute` package to replace the existing array type and functions. (#2162) + - `BoolSlice`, `IntSlice`, `Int64Slice`, `Float64Slice`, and `StringSlice` replace the use of the `Array` function in the package. +- Added the `go.opentelemetry.io/otel/example/fib` example package. + Included is an example application that computes Fibonacci numbers. (#2203) + +### Changed + +- Metric instruments have been renamed to match the (feature-frozen) metric API specification: + - ValueRecorder becomes Histogram + - ValueObserver becomes Gauge + - SumObserver becomes CounterObserver + - UpDownSumObserver becomes UpDownCounterObserver + The API exported from this project is still considered experimental. (#2202) +- Metric SDK/API implementation type `InstrumentKind` moves into `sdkapi` sub-package. (#2091) +- The Metrics SDK export record no longer contains a Resource pointer, the SDK `"go.opentelemetry.io/otel/sdk/trace/export/metric".Exporter.Export()` function for push-based exporters now takes a single Resource argument, pull-based exporters use `"go.opentelemetry.io/otel/sdk/metric/controller/basic".Controller.Resource()`. (#2120) +- The JSON output of the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` is harmonized now such that the output is "plain" JSON objects after each other of the form `{ ... } { ... } { ... }`. Earlier the JSON objects describing a span were wrapped in a slice for each `Exporter.ExportSpans` call, like `[ { ... } ][ { ... } { ... } ]`. Outputting JSON object directly after each other is consistent with JSON loggers, and a bit easier to parse and read. (#2196) +- Update the `NewTracerConfig`, `NewSpanStartConfig`, `NewSpanEndConfig`, and `NewEventConfig` function in the `go.opentelemetry.io/otel/trace` package to return their respective configurations as structs instead of pointers to the struct. (#2212) + +### Deprecated + +- The `go.opentelemetry.io/otel/bridge/opencensus/utils` package is deprecated. + All functionality from this package now exists in the `go.opentelemetry.io/otel/bridge/opencensus` package. + The functions from that package should be used instead. (#2166) +- The `"go.opentelemetry.io/otel/attribute".Array` function and the related `ARRAY` value type is deprecated. + Use the typed `*Slice` functions and types added to the package instead. (#2162) +- The `"go.opentelemetry.io/otel/attribute".Any` function is deprecated. + Use the typed functions instead. (#2181) +- The `go.opentelemetry.io/otel/oteltest` package is deprecated. + The `"go.opentelemetry.io/otel/sdk/trace/tracetest".SpanRecorder` can be registered with the default SDK (`go.opentelemetry.io/otel/sdk/trace`) as a `SpanProcessor` and used as a replacement for this deprecated package. (#2188) + +### Removed + +- Removed metrics test package `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#2105) + +### Fixed + +- The `fromEnv` detector no longer throws an error when `OTEL_RESOURCE_ATTRIBUTES` environment variable is not set or empty. (#2138) +- Setting the global `ErrorHandler` with `"go.opentelemetry.io/otel".SetErrorHandler` multiple times is now supported. (#2160, #2140) +- The `"go.opentelemetry.io/otel/attribute".Any` function now supports `int32` values. (#2169) +- Multiple calls to `"go.opentelemetry.io/otel/sdk/metric/controller/basic".WithResource()` are handled correctly, and when no resources are provided `"go.opentelemetry.io/otel/sdk/resource".Default()` is used. (#2120) +- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly omit timestamps. (#2195) +- Fixed typos in resources.go. (#2201) + +## [1.0.0-RC2] - 2021-07-26 + +### Added + +- Added `WithOSDescription` resource configuration option to set OS (Operating System) description resource attribute (`os.description`). (#1840) +- Added `WithOS` resource configuration option to set all OS (Operating System) resource attributes at once. (#1840) +- Added the `WithRetry` option to the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. + This option is a replacement for the removed `WithMaxAttempts` and `WithBackoff` options. (#2095) +- Added API `LinkFromContext` to return Link which encapsulates SpanContext from provided context and also encapsulates attributes. (#2115) +- Added a new `Link` type under the SDK `otel/sdk/trace` package that counts the number of attributes that were dropped for surpassing the `AttributePerLinkCountLimit` configured in the Span's `SpanLimits`. + This new type replaces the equal-named API `Link` type found in the `otel/trace` package for most usages within the SDK. + For example, instances of this type are now returned by the `Links()` function of `ReadOnlySpan`s provided in places like the `OnEnd` function of `SpanProcessor` implementations. (#2118) +- Added the `SpanRecorder` type to the `go.opentelemetry.io/otel/skd/trace/tracetest` package. + This type can be used with the default SDK as a `SpanProcessor` during testing. (#2132) + +### Changed + +- The `SpanModels` function is now exported from the `go.opentelemetry.io/otel/exporters/zipkin` package to convert OpenTelemetry spans into Zipkin model spans. (#2027) +- Rename the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".RetrySettings` to `RetryConfig`. (#2095) + +### Deprecated + +- The `TextMapCarrier` and `TextMapPropagator` from the `go.opentelemetry.io/otel/oteltest` package and their associated creation functions (`TextMapCarrier`, `NewTextMapPropagator`) are deprecated. (#2114) +- The `Harness` type from the `go.opentelemetry.io/otel/oteltest` package and its associated creation function, `NewHarness` are deprecated and will be removed in the next release. (#2123) +- The `TraceStateFromKeyValues` function from the `go.opentelemetry.io/otel/oteltest` package is deprecated. + Use the `trace.ParseTraceState` function instead. (#2122) + +### Removed + +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/jaeger`. (#2020) +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/zipkin`. (#2020) +- Removed the `"go.opentelemetry.io/otel/sdk/resource".WithBuiltinDetectors` function. + The explicit `With*` options for every built-in detector should be used instead. (#2026 #2097) +- Removed the `WithMaxAttempts` and `WithBackoff` options from the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. + The retry logic of the package has been updated to match the `otlptracegrpc` package and accordingly a `WithRetry` option is added that should be used instead. (#2095) +- Removed `DroppedAttributeCount` field from `otel/trace.Link` struct. (#2118) + +### Fixed + +- When using WithNewRoot, don't use the parent context for making sampling decisions. (#2032) +- `oteltest.Tracer` now creates a valid `SpanContext` when using `WithNewRoot`. (#2073) +- OS type detector now sets the correct `dragonflybsd` value for DragonFly BSD. (#2092) +- The OTel span status is correctly transformed into the OTLP status in the `go.opentelemetry.io/otel/exporters/otlp/otlptrace` package. + This fix will by default set the status to `Unset` if it is not explicitly set to `Ok` or `Error`. (#2099 #2102) +- The `Inject` method for the `"go.opentelemetry.io/otel/propagation".TraceContext` type no longer injects empty `tracestate` values. (#2108) +- Use `6831` as default Jaeger agent port instead of `6832`. (#2131) + +## [Experimental Metrics v0.22.0] - 2021-07-19 + +### Added + +- Adds HTTP support for OTLP metrics exporter. (#2022) + +### Removed + +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/metric/prometheus`. (#2020) + +## [1.0.0-RC1] / 0.21.0 - 2021-06-18 + +With this release we are introducing a split in module versions. The tracing API and SDK are entering the `v1.0.0` Release Candidate phase with `v1.0.0-RC1` +while the experimental metrics API and SDK continue with `v0.x` releases at `v0.21.0`. Modules at major version 1 or greater will not depend on modules +with major version 0. + +### Added + +- Adds `otlpgrpc.WithRetry`option for configuring the retry policy for transient errors on the otlp/gRPC exporter. (#1832) + - The following status codes are defined as transient errors: + | gRPC Status Code | Description | + | ---------------- | ----------- | + | 1 | Cancelled | + | 4 | Deadline Exceeded | + | 8 | Resource Exhausted | + | 10 | Aborted | + | 10 | Out of Range | + | 14 | Unavailable | + | 15 | Data Loss | +- Added `Status` type to the `go.opentelemetry.io/otel/sdk/trace` package to represent the status of a span. (#1874) +- Added `SpanStub` type and its associated functions to the `go.opentelemetry.io/otel/sdk/trace/tracetest` package. + This type can be used as a testing replacement for the `SpanSnapshot` that was removed from the `go.opentelemetry.io/otel/sdk/trace` package. (#1873) +- Adds support for scheme in `OTEL_EXPORTER_OTLP_ENDPOINT` according to the spec. (#1886) +- Adds `trace.WithSchemaURL` option for configuring the tracer with a Schema URL. (#1889) +- Added an example of using OpenTelemetry Go as a trace context forwarder. (#1912) +- `ParseTraceState` is added to the `go.opentelemetry.io/otel/trace` package. + It can be used to decode a `TraceState` from a `tracestate` header string value. (#1937) +- Added `Len` method to the `TraceState` type in the `go.opentelemetry.io/otel/trace` package. + This method returns the number of list-members the `TraceState` holds. (#1937) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace` that defines a trace exporter that uses a `otlptrace.Client` to send data. + Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` implementing a gRPC `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing .(#1922) +- Added `Baggage`, `Member`, and `Property` types to the `go.opentelemetry.io/otel/baggage` package along with their related functions. (#1967) +- Added `ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext` functions to the `go.opentelemetry.io/otel/baggage` package. + These functions replace the `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions from that package and directly work with the new `Baggage` type. (#1967) +- The `OTEL_SERVICE_NAME` environment variable is the preferred source for `service.name`, used by the environment resource detector if a service name is present both there and in `OTEL_RESOURCE_ATTRIBUTES`. (#1969) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` implementing an HTTP `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing. (#1963) +- Changes `go.opentelemetry.io/otel/sdk/resource.NewWithAttributes` to require a schema URL. The old function is still available as `resource.NewSchemaless`. This is a breaking change. (#1938) +- Several builtin resource detectors now correctly populate the schema URL. (#1938) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` that defines a metrics exporter that uses a `otlpmetric.Client` to send data. +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` implementing a gRPC `otlpmetric.Client` and offers convenience functions, `New` and `NewUnstarted`, to create an `otlpmetric.Exporter`.(#1991) +- Added `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter. (#2005) +- Added `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` exporter. (#2005) +- Added a `TracerProvider()` method to the `"go.opentelemetry.io/otel/trace".Span` interface. This can be used to obtain a `TracerProvider` from a given span that utilizes the same trace processing pipeline. (#2009) + +### Changed + +- Make `NewSplitDriver` from `go.opentelemetry.io/otel/exporters/otlp` take variadic arguments instead of a `SplitConfig` item. + `NewSplitDriver` now automatically implements an internal `noopDriver` for `SplitConfig` fields that are not initialized. (#1798) +- `resource.New()` now creates a Resource without builtin detectors. Previous behavior is now achieved by using `WithBuiltinDetectors` Option. (#1810) +- Move the `Event` type from the `go.opentelemetry.io/otel` package to the `go.opentelemetry.io/otel/sdk/trace` package. (#1846) +- CI builds validate against last two versions of Go, dropping 1.14 and adding 1.16. (#1865) +- BatchSpanProcessor now report export failures when calling `ForceFlush()` method. (#1860) +- `Set.Encoded(Encoder)` no longer caches the result of an encoding. (#1855) +- Renamed `CloudZoneKey` to `CloudAvailabilityZoneKey` in Resource semantic conventions according to spec. (#1871) +- The `StatusCode` and `StatusMessage` methods of the `ReadOnlySpan` interface and the `Span` produced by the `go.opentelemetry.io/otel/sdk/trace` package have been replaced with a single `Status` method. + This method returns the status of a span using the new `Status` type. (#1874) +- Updated `ExportSpans` method of the`SpanExporter` interface type to accept `ReadOnlySpan`s instead of the removed `SpanSnapshot`. + This brings the export interface into compliance with the specification in that it now accepts an explicitly immutable type instead of just an implied one. (#1873) +- Unembed `SpanContext` in `Link`. (#1877) +- Generate Semantic conventions from the specification YAML. (#1891) +- Spans created by the global `Tracer` obtained from `go.opentelemetry.io/otel`, prior to a functioning `TracerProvider` being set, now propagate the span context from their parent if one exists. (#1901) +- The `"go.opentelemetry.io/otel".Tracer` function now accepts tracer options. (#1902) +- Move the `go.opentelemetry.io/otel/unit` package to `go.opentelemetry.io/otel/metric/unit`. (#1903) +- Changed `go.opentelemetry.io/otel/trace.TracerConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config.) (#1921) +- Changed `go.opentelemetry.io/otel/trace.SpanConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Changed `span.End()` now only accepts Options that are allowed at `End()`. (#1921) +- Changed `go.opentelemetry.io/otel/metric.InstrumentConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Changed `go.opentelemetry.io/otel/metric.MeterConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Refactored option types according to the contribution style guide. (#1882) +- Move the `go.opentelemetry.io/otel/trace.TraceStateFromKeyValues` function to the `go.opentelemetry.io/otel/oteltest` package. + This function is preserved for testing purposes where it may be useful to create a `TraceState` from `attribute.KeyValue`s, but it is not intended for production use. + The new `ParseTraceState` function should be used to create a `TraceState`. (#1931) +- Updated `MarshalJSON` method of the `go.opentelemetry.io/otel/trace.TraceState` type to marshal the type into the string representation of the `TraceState`. (#1931) +- The `TraceState.Delete` method from the `go.opentelemetry.io/otel/trace` package no longer returns an error in addition to a `TraceState`. (#1931) +- Updated `Get` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) +- Updated `Insert` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a pair of `string`s instead of an `attribute.KeyValue` type. (#1931) +- Updated `Delete` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/stdout` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/metric/prometheus` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) +- Renamed `NewUnstartedExporter` to `NewUnstarted` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) +- The `go.opentelemetry.io/otel/semconv` package has been moved to `go.opentelemetry.io/otel/semconv/v1.4.0` to allow for multiple [telemetry schema](https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md) versions to be used concurrently. (#1987) +- Metrics test helpers in `go.opentelemetry.io/otel/oteltest` have been moved to `go.opentelemetry.io/otel/metric/metrictest`. (#1988) + +### Deprecated + +- The `go.opentelemetry.io/otel/exporters/metric/prometheus` is deprecated, use `go.opentelemetry.io/otel/exporters/prometheus` instead. (#1993) +- The `go.opentelemetry.io/otel/exporters/trace/jaeger` is deprecated, use `go.opentelemetry.io/otel/exporters/jaeger` instead. (#1993) +- The `go.opentelemetry.io/otel/exporters/trace/zipkin` is deprecated, use `go.opentelemetry.io/otel/exporters/zipkin` instead. (#1993) + +### Removed + +- Removed `resource.WithoutBuiltin()`. Use `resource.New()`. (#1810) +- Unexported types `resource.FromEnv`, `resource.Host`, and `resource.TelemetrySDK`, Use the corresponding `With*()` to use individually. (#1810) +- Removed the `Tracer` and `IsRecording` method from the `ReadOnlySpan` in the `go.opentelemetry.io/otel/sdk/trace`. + The `Tracer` method is not a required to be included in this interface and given the mutable nature of the tracer that is associated with a span, this method is not appropriate. + The `IsRecording` method returns if the span is recording or not. + A read-only span value does not need to know if updates to it will be recorded or not. + By definition, it cannot be updated so there is no point in communicating if an update is recorded. (#1873) +- Removed the `SpanSnapshot` type from the `go.opentelemetry.io/otel/sdk/trace` package. + The use of this type has been replaced with the use of the explicitly immutable `ReadOnlySpan` type. + When a concrete representation of a read-only span is needed for testing, the newly added `SpanStub` in the `go.opentelemetry.io/otel/sdk/trace/tracetest` package should be used. (#1873) +- Removed the `Tracer` method from the `Span` interface in the `go.opentelemetry.io/otel/trace` package. + Using the same tracer that created a span introduces the error where an instrumentation library's `Tracer` is used by other code instead of their own. + The `"go.opentelemetry.io/otel".Tracer` function or a `TracerProvider` should be used to acquire a library specific `Tracer` instead. (#1900) + - The `TracerProvider()` method on the `Span` interface may also be used to obtain a `TracerProvider` using the same trace processing pipeline. (#2009) +- The `http.url` attribute generated by `HTTPClientAttributesFromHTTPRequest` will no longer include username or password information. (#1919) +- Removed `IsEmpty` method of the `TraceState` type in the `go.opentelemetry.io/otel/trace` package in favor of using the added `TraceState.Len` method. (#1931) +- Removed `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions in the `go.opentelemetry.io/otel/baggage` package. + Handling of baggage is now done using the added `Baggage` type and related context functions (`ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext`) in that package. (#1967) +- The `InstallNewPipeline` and `NewExportPipeline` creation functions in all the exporters (prometheus, otlp, stdout, jaeger, and zipkin) have been removed. + These functions were deemed premature attempts to provide convenience that did not achieve this aim. (#1985) +- The `go.opentelemetry.io/otel/exporters/otlp` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace` instead. (#1990) +- The `go.opentelemetry.io/otel/exporters/stdout` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` or `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` instead. (#2005) + +### Fixed + +- Only report errors from the `"go.opentelemetry.io/otel/sdk/resource".Environment` function when they are not `nil`. (#1850, #1851) +- The `Shutdown` method of the simple `SpanProcessor` in the `go.opentelemetry.io/otel/sdk/trace` package now honors the context deadline or cancellation. (#1616, #1856) +- BatchSpanProcessor now drops span batches that failed to be exported. (#1860) +- Use `http://localhost:14268/api/traces` as default Jaeger collector endpoint instead of `http://localhost:14250`. (#1898) +- Allow trailing and leading whitespace in the parsing of a `tracestate` header. (#1931) +- Add logic to determine if the channel is closed to fix Jaeger exporter test panic with close closed channel. (#1870, #1973) +- Avoid transport security when OTLP endpoint is a Unix socket. (#2001) + +### Security + +## [0.20.0] - 2021-04-23 + +### Added + +- The OTLP exporter now has two new convenience functions, `NewExportPipeline` and `InstallNewPipeline`, setup and install the exporter in tracing and metrics pipelines. (#1373) +- Adds semantic conventions for exceptions. (#1492) +- Added Jaeger Environment variables: `OTEL_EXPORTER_JAEGER_AGENT_HOST`, `OTEL_EXPORTER_JAEGER_AGENT_PORT` + These environment variables can be used to override Jaeger agent hostname and port (#1752) +- Option `ExportTimeout` was added to batch span processor. (#1755) +- `trace.TraceFlags` is now a defined type over `byte` and `WithSampled(bool) TraceFlags` and `IsSampled() bool` methods have been added to it. (#1770) +- The `Event` and `Link` struct types from the `go.opentelemetry.io/otel` package now include a `DroppedAttributeCount` field to record the number of attributes that were not recorded due to configured limits being reached. (#1771) +- The Jaeger exporter now reports dropped attributes for a Span event in the exported log. (#1771) +- Adds test to check BatchSpanProcessor ignores `OnEnd` and `ForceFlush` post `Shutdown`. (#1772) +- Extract resource attributes from the `OTEL_RESOURCE_ATTRIBUTES` environment variable and merge them with the `resource.Default` resource as well as resources provided to the `TracerProvider` and metric `Controller`. (#1785) +- Added `WithOSType` resource configuration option to set OS (Operating System) type resource attribute (`os.type`). (#1788) +- Added `WithProcess*` resource configuration options to set Process resource attributes. (#1788) + - `process.pid` + - `process.executable.name` + - `process.executable.path` + - `process.command_args` + - `process.owner` + - `process.runtime.name` + - `process.runtime.version` + - `process.runtime.description` +- Adds `k8s.node.name` and `k8s.node.uid` attribute keys to the `semconv` package. (#1789) +- Added support for configuring OTLP/HTTP and OTLP/gRPC Endpoints, TLS Certificates, Headers, Compression and Timeout via Environment Variables. (#1758, #1769 and #1811) + - `OTEL_EXPORTER_OTLP_ENDPOINT` + - `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` + - `OTEL_EXPORTER_OTLP_METRICS_ENDPOINT` + - `OTEL_EXPORTER_OTLP_HEADERS` + - `OTEL_EXPORTER_OTLP_TRACES_HEADERS` + - `OTEL_EXPORTER_OTLP_METRICS_HEADERS` + - `OTEL_EXPORTER_OTLP_COMPRESSION` + - `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` + - `OTEL_EXPORTER_OTLP_METRICS_COMPRESSION` + - `OTEL_EXPORTER_OTLP_TIMEOUT` + - `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` + - `OTEL_EXPORTER_OTLP_METRICS_TIMEOUT` + - `OTEL_EXPORTER_OTLP_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE` +- Adds `otlpgrpc.WithTimeout` option for configuring timeout to the otlp/gRPC exporter. (#1821) +- Adds `jaeger.WithMaxPacketSize` option for configuring maximum UDP packet size used when connecting to the Jaeger agent. (#1853) + +### Fixed + +- The `Span.IsRecording` implementation from `go.opentelemetry.io/otel/sdk/trace` always returns false when not being sampled. (#1750) +- The Jaeger exporter now correctly sets tags for the Span status code and message. + This means it uses the correct tag keys (`"otel.status_code"`, `"otel.status_description"`) and does not set the status message as a tag unless it is set on the span. (#1761) +- The Jaeger exporter now correctly records Span event's names using the `"event"` key for a tag. + Additionally, this tag is overridden, as specified in the OTel specification, if the event contains an attribute with that key. (#1768) +- Zipkin Exporter: Ensure mapping between OTel and Zipkin span data complies with the specification. (#1688) +- Fixed typo for default service name in Jaeger Exporter. (#1797) +- Fix flaky OTLP for the reconnnection of the client connection. (#1527, #1814) +- Fix Jaeger exporter dropping of span batches that exceed the UDP packet size limit. + Instead, the exporter now splits the batch into smaller sendable batches. (#1828) + +### Changed + +- Span `RecordError` now records an `exception` event to comply with the semantic convention specification. (#1492) +- Jaeger exporter was updated to use thrift v0.14.1. (#1712) +- Migrate from using internally built and maintained version of the OTLP to the one hosted at `go.opentelemetry.io/proto/otlp`. (#1713) +- Migrate from using `github.com/gogo/protobuf` to `google.golang.org/protobuf` to match `go.opentelemetry.io/proto/otlp`. (#1713) +- The storage of a local or remote Span in a `context.Context` using its SpanContext is unified to store just the current Span. + The Span's SpanContext can now self-identify as being remote or not. + This means that `"go.opentelemetry.io/otel/trace".ContextWithRemoteSpanContext` will now overwrite any existing current Span, not just existing remote Spans, and make it the current Span in a `context.Context`. (#1731) +- Improve OTLP/gRPC exporter connection errors. (#1737) +- Information about a parent span context in a `"go.opentelemetry.io/otel/export/trace".SpanSnapshot` is unified in a new `Parent` field. + The existing `ParentSpanID` and `HasRemoteParent` fields are removed in favor of this. (#1748) +- The `ParentContext` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is updated to hold a `context.Context` containing the parent span. + This changes it to make `SamplingParameters` conform with the OpenTelemetry specification. (#1749) +- Updated Jaeger Environment Variables: `JAEGER_ENDPOINT`, `JAEGER_USER`, `JAEGER_PASSWORD` + to `OTEL_EXPORTER_JAEGER_ENDPOINT`, `OTEL_EXPORTER_JAEGER_USER`, `OTEL_EXPORTER_JAEGER_PASSWORD` in compliance with OTel specification. (#1752) +- Modify `BatchSpanProcessor.ForceFlush` to abort after timeout/cancellation. (#1757) +- The `DroppedAttributeCount` field of the `Span` in the `go.opentelemetry.io/otel` package now only represents the number of attributes dropped for the span itself. + It no longer is a conglomerate of itself, events, and link attributes that have been dropped. (#1771) +- Make `ExportSpans` in Jaeger Exporter honor context deadline. (#1773) +- Modify Zipkin Exporter default service name, use default resource's serviceName instead of empty. (#1777) +- The `go.opentelemetry.io/otel/sdk/export/trace` package is merged into the `go.opentelemetry.io/otel/sdk/trace` package. (#1778) +- The prometheus.InstallNewPipeline example is moved from comment to example test (#1796) +- The convenience functions for the stdout exporter have been updated to return the `TracerProvider` implementation and enable the shutdown of the exporter. (#1800) +- Replace the flush function returned from the Jaeger exporter's convenience creation functions (`InstallNewPipeline` and `NewExportPipeline`) with the `TracerProvider` implementation they create. + This enables the caller to shutdown and flush using the related `TracerProvider` methods. (#1822) +- Updated the Jaeger exporter to have a default endpoint, `http://localhost:14250`, for the collector. (#1824) +- Changed the function `WithCollectorEndpoint` in the Jaeger exporter to no longer accept an endpoint as an argument. + The endpoint can be passed with the `CollectorEndpointOption` using the `WithEndpoint` function or by setting the `OTEL_EXPORTER_JAEGER_ENDPOINT` environment variable value appropriately. (#1824) +- The Jaeger exporter no longer batches exported spans itself, instead it relies on the SDK's `BatchSpanProcessor` for this functionality. (#1830) +- The Jaeger exporter creation functions (`NewRawExporter`, `NewExportPipeline`, and `InstallNewPipeline`) no longer accept the removed `Option` type as a variadic argument. (#1830) + +### Removed + +- Removed Jaeger Environment variables: `JAEGER_SERVICE_NAME`, `JAEGER_DISABLED`, `JAEGER_TAGS` + These environment variables will no longer be used to override values of the Jaeger exporter (#1752) +- No longer set the links for a `Span` in `go.opentelemetry.io/otel/sdk/trace` that is configured to be a new root. + This is unspecified behavior that the OpenTelemetry community plans to standardize in the future. + To prevent backwards incompatible changes when it is specified, these links are removed. (#1726) +- Setting error status while recording error with Span from oteltest package. (#1729) +- The concept of a remote and local Span stored in a context is unified to just the current Span. + Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed. + Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContext` can be used to return the current Span. + If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731) +- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed. + This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749) +- The `trace.FlagsDebug` and `trace.FlagsDeferred` constants have been removed and will be localized to the B3 propagator. (#1770) +- Remove `Process` configuration, `WithProcessFromEnv` and `ProcessFromEnv`, and type from the Jaeger exporter package. + The information that could be configured in the `Process` struct should be configured in a `Resource` instead. (#1776, #1804) +- Remove the `WithDisabled` option from the Jaeger exporter. + To disable the exporter unregister it from the `TracerProvider` or use a no-operation `TracerProvider`. (#1806) +- Removed the functions `CollectorEndpointFromEnv` and `WithCollectorEndpointOptionFromEnv` from the Jaeger exporter. + These functions for retrieving specific environment variable values are redundant of other internal functions and + are not intended for end user use. (#1824) +- Removed the Jaeger exporter `WithSDKOptions` `Option`. + This option was used to set SDK options for the exporter creation convenience functions. + These functions are provided as a way to easily setup or install the exporter with what are deemed reasonable SDK settings for common use cases. + If the SDK needs to be configured differently, the `NewRawExporter` function and direct setup of the SDK with the desired settings should be used. (#1825) +- The `WithBufferMaxCount` and `WithBatchMaxCount` `Option`s from the Jaeger exporter are removed. + The exporter no longer batches exports, instead relying on the SDK's `BatchSpanProcessor` for this functionality. (#1830) +- The Jaeger exporter `Option` type is removed. + The type is no longer used by the exporter to configure anything. + All the previous configurations these options provided were duplicates of SDK configuration. + They have been removed in favor of using the SDK configuration and focuses the exporter configuration to be only about the endpoints it will send telemetry to. (#1830) + +## [0.19.0] - 2021-03-18 + +### Added + +- Added `Marshaler` config option to `otlphttp` to enable otlp over json or protobufs. (#1586) +- A `ForceFlush` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` to flush all registered `SpanProcessor`s. (#1608) +- Added `WithSampler` and `WithSpanLimits` to tracer provider. (#1633, #1702) +- `"go.opentelemetry.io/otel/trace".SpanContext` now has a `remote` property, and `IsRemote()` predicate, that is true when the `SpanContext` has been extracted from remote context data. (#1701) +- A `Valid` method to the `"go.opentelemetry.io/otel/attribute".KeyValue` type. (#1703) + +### Changed + +- `trace.SpanContext` is now immutable and has no exported fields. (#1573) + - `trace.NewSpanContext()` can be used in conjunction with the `trace.SpanContextConfig` struct to initialize a new `SpanContext` where all values are known. +- Update the `ForceFlush` method signature to the `"go.opentelemetry.io/otel/sdk/trace".SpanProcessor` to accept a `context.Context` and return an error. (#1608) +- Update the `Shutdown` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` return an error on shutdown failure. (#1608) +- The SimpleSpanProcessor will now shut down the enclosed `SpanExporter` and gracefully ignore subsequent calls to `OnEnd` after `Shutdown` is called. (#1612) +- `"go.opentelemetry.io/sdk/metric/controller.basic".WithPusher` is replaced with `WithExporter` to provide consistent naming across project. (#1656) +- Added non-empty string check for trace `Attribute` keys. (#1659) +- Add `description` to SpanStatus only when `StatusCode` is set to error. (#1662) +- Jaeger exporter falls back to `resource.Default`'s `service.name` if the exported Span does not have one. (#1673) +- Jaeger exporter populates Jaeger's Span Process from Resource. (#1673) +- Renamed the `LabelSet` method of `"go.opentelemetry.io/otel/sdk/resource".Resource` to `Set`. (#1692) +- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1693) +- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1693) + +### Removed + +- Removed `serviceName` parameter from Zipkin exporter and uses resource instead. (#1549) +- Removed `WithConfig` from tracer provider to avoid overriding configuration. (#1633) +- Removed the exported `SimpleSpanProcessor` and `BatchSpanProcessor` structs. + These are now returned as a SpanProcessor interface from their respective constructors. (#1638) +- Removed `WithRecord()` from `trace.SpanOption` when creating a span. (#1660) +- Removed setting status to `Error` while recording an error as a span event in `RecordError`. (#1663) +- Removed `jaeger.WithProcess` configuration option. (#1673) +- Removed `ApplyConfig` method from `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` and the now unneeded `Config` struct. (#1693) + +### Fixed + +- Jaeger Exporter: Ensure mapping between OTEL and Jaeger span data complies with the specification. (#1626) +- `SamplingResult.TraceState` is correctly propagated to a newly created span's `SpanContext`. (#1655) +- The `otel-collector` example now correctly flushes metric events prior to shutting down the exporter. (#1678) +- Do not set span status message in `SpanStatusFromHTTPStatusCode` if it can be inferred from `http.status_code`. (#1681) +- Synchronization issues in global trace delegate implementation. (#1686) +- Reduced excess memory usage by global `TracerProvider`. (#1687) + +## [0.18.0] - 2021-03-03 + +### Added + +- Added `resource.Default()` for use with meter and tracer providers. (#1507) +- `AttributePerEventCountLimit` and `AttributePerLinkCountLimit` for `SpanLimits`. (#1535) +- Added `Keys()` method to `propagation.TextMapCarrier` and `propagation.HeaderCarrier` to adapt `http.Header` to this interface. (#1544) +- Added `code` attributes to `go.opentelemetry.io/otel/semconv` package. (#1558) +- Compatibility testing suite in the CI system for the following systems. (#1567) + | OS | Go Version | Architecture | + | ------- | ---------- | ------------ | + | Ubuntu | 1.15 | amd64 | + | Ubuntu | 1.14 | amd64 | + | Ubuntu | 1.15 | 386 | + | Ubuntu | 1.14 | 386 | + | MacOS | 1.15 | amd64 | + | MacOS | 1.14 | amd64 | + | Windows | 1.15 | amd64 | + | Windows | 1.14 | amd64 | + | Windows | 1.15 | 386 | + | Windows | 1.14 | 386 | + +### Changed + +- Replaced interface `oteltest.SpanRecorder` with its existing implementation + `StandardSpanRecorder`. (#1542) +- Default span limit values to 128. (#1535) +- Rename `MaxEventsPerSpan`, `MaxAttributesPerSpan` and `MaxLinksPerSpan` to `EventCountLimit`, `AttributeCountLimit` and `LinkCountLimit`, and move these fields into `SpanLimits`. (#1535) +- Renamed the `otel/label` package to `otel/attribute`. (#1541) +- Vendor the Jaeger exporter's dependency on Apache Thrift. (#1551) +- Parallelize the CI linting and testing. (#1567) +- Stagger timestamps in exact aggregator tests. (#1569) +- Changed all examples to use `WithBatchTimeout(5 * time.Second)` rather than `WithBatchTimeout(5)`. (#1621) +- Prevent end-users from implementing some interfaces (#1575) + + ``` + "otel/exporters/otlp/otlphttp".Option + "otel/exporters/stdout".Option + "otel/oteltest".Option + "otel/trace".TracerOption + "otel/trace".SpanOption + "otel/trace".EventOption + "otel/trace".LifeCycleOption + "otel/trace".InstrumentationOption + "otel/sdk/resource".Option + "otel/sdk/trace".ParentBasedSamplerOption + "otel/sdk/trace".ReadOnlySpan + "otel/sdk/trace".ReadWriteSpan + ``` + +### Removed + +- Removed attempt to resample spans upon changing the span name with `span.SetName()`. (#1545) +- The `test-benchmark` is no longer a dependency of the `precommit` make target. (#1567) +- Removed the `test-386` make target. + This was replaced with a full compatibility testing suite (i.e. multi OS/arch) in the CI system. (#1567) + +### Fixed + +- The sequential timing check of timestamps in the stdout exporter are now setup explicitly to be sequential (#1571). (#1572) +- Windows build of Jaeger tests now compiles with OS specific functions (#1576). (#1577) +- The sequential timing check of timestamps of go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue are now setup explicitly to be sequential (#1578). (#1579) +- Validate tracestate header keys with vendors according to the W3C TraceContext specification (#1475). (#1581) +- The OTLP exporter includes related labels for translations of a GaugeArray (#1563). (#1570) + +## [0.17.0] - 2021-02-12 + +### Changed + +- Rename project default branch from `master` to `main`. (#1505) +- Reverse order in which `Resource` attributes are merged, per change in spec. (#1501) +- Add tooling to maintain "replace" directives in go.mod files automatically. (#1528) +- Create new modules: otel/metric, otel/trace, otel/oteltest, otel/sdk/export/metric, otel/sdk/metric (#1528) +- Move metric-related public global APIs from otel to otel/metric/global. (#1528) + +## Fixed + +- Fixed otlpgrpc reconnection issue. +- The example code in the README.md of `go.opentelemetry.io/otel/exporters/otlp` is moved to a compiled example test and used the new `WithAddress` instead of `WithEndpoint`. (#1513) +- The otel-collector example now uses the default OTLP receiver port of the collector. + +## [0.16.0] - 2021-01-13 + +### Added + +- Add the `ReadOnlySpan` and `ReadWriteSpan` interfaces to provide better control for accessing span data. (#1360) +- `NewGRPCDriver` function returns a `ProtocolDriver` that maintains a single gRPC connection to the collector. (#1369) +- Added documentation about the project's versioning policy. (#1388) +- Added `NewSplitDriver` for OTLP exporter that allows sending traces and metrics to different endpoints. (#1418) +- Added codeql workflow to GitHub Actions (#1428) +- Added Gosec workflow to GitHub Actions (#1429) +- Add new HTTP driver for OTLP exporter in `exporters/otlp/otlphttp`. Currently it only supports the binary protobuf payloads. (#1420) +- Add an OpenCensus exporter bridge. (#1444) + +### Changed + +- Rename `internal/testing` to `internal/internaltest`. (#1449) +- Rename `export.SpanData` to `export.SpanSnapshot` and use it only for exporting spans. (#1360) +- Store the parent's full `SpanContext` rather than just its span ID in the `span` struct. (#1360) +- Improve span duration accuracy. (#1360) +- Migrated CI/CD from CircleCI to GitHub Actions (#1382) +- Remove duplicate checkout from GitHub Actions workflow (#1407) +- Metric `array` aggregator renamed `exact` to match its `aggregation.Kind` (#1412) +- Metric `exact` aggregator includes per-point timestamps (#1412) +- Metric stdout exporter uses MinMaxSumCount aggregator for ValueRecorder instruments (#1412) +- `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369) +- Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369) +- Unify endpoint API that related to OTel exporter. (#1401) +- Optimize metric histogram aggregator to reuse its slice of buckets. (#1435) +- Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430) +- Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434) +- `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432) +- Moved gRPC driver for OTLP exporter to `exporters/otlp/otlpgrpc`. (#1420) +- The `TraceContext` propagator now correctly propagates `TraceState` through the `SpanContext`. (#1447) +- Metric Push and Pull Controller components are combined into a single "basic" Controller: + - `WithExporter()` and `Start()` to configure Push behavior + - `Start()` is optional; use `Collect()` and `ForEach()` for Pull behavior + - `Start()` and `Stop()` accept Context. (#1378) +- The `Event` type is moved from the `otel/sdk/export/trace` package to the `otel/trace` API package. (#1452) + +### Removed + +- Remove `errUninitializedSpan` as its only usage is now obsolete. (#1360) +- Remove Metric export functionality related to quantiles and summary data points: this is not specified (#1412) +- Remove DDSketch metric aggregator; our intention is to re-introduce this as an option of the histogram aggregator after [new OTLP histogram data types](https://github.com/open-telemetry/opentelemetry-proto/pull/226) are released (#1412) + +### Fixed + +- `BatchSpanProcessor.Shutdown()` will now shutdown underlying `export.SpanExporter`. (#1443) + +## [0.15.0] - 2020-12-10 + +### Added + +- The `WithIDGenerator` `TracerProviderOption` is added to the `go.opentelemetry.io/otel/trace` package to configure an `IDGenerator` for the `TracerProvider`. (#1363) + +### Changed + +- The Zipkin exporter now uses the Span status code to determine. (#1328) +- `NewExporter` and `Start` functions in `go.opentelemetry.io/otel/exporters/otlp` now receive `context.Context` as a first parameter. (#1357) +- Move the OpenCensus example into `example` directory. (#1359) +- Moved the SDK's `internal.IDGenerator` interface in to the `sdk/trace` package to enable support for externally-defined ID generators. (#1363) +- Bump `github.com/google/go-cmp` from 0.5.3 to 0.5.4 (#1374) +- Bump `github.com/golangci/golangci-lint` in `/internal/tools` (#1375) + +### Fixed + +- Metric SDK `SumObserver` and `UpDownSumObserver` instruments correctness fixes. (#1381) + +## [0.14.0] - 2020-11-19 + +### Added + +- An `EventOption` and the related `NewEventConfig` function are added to the `go.opentelemetry.io/otel` package to configure Span events. (#1254) +- A `TextMapPropagator` and associated `TextMapCarrier` are added to the `go.opentelemetry.io/otel/oteltest` package to test `TextMap` type propagators and their use. (#1259) +- `SpanContextFromContext` returns `SpanContext` from context. (#1255) +- `TraceState` has been added to `SpanContext`. (#1340) +- `DeploymentEnvironmentKey` added to `go.opentelemetry.io/otel/semconv` package. (#1323) +- Add an OpenCensus to OpenTelemetry tracing bridge. (#1305) +- Add a parent context argument to `SpanProcessor.OnStart` to follow the specification. (#1333) +- Add missing tests for `sdk/trace/attributes_map.go`. (#1337) + +### Changed + +- Move the `go.opentelemetry.io/otel/api/trace` package into `go.opentelemetry.io/otel/trace` with the following changes. (#1229) (#1307) + - `ID` has been renamed to `TraceID`. + - `IDFromHex` has been renamed to `TraceIDFromHex`. + - `EmptySpanContext` is removed. +- Move the `go.opentelemetry.io/otel/api/trace/tracetest` package into `go.opentelemetry.io/otel/oteltest`. (#1229) +- OTLP Exporter updates: + - supports OTLP v0.6.0 (#1230, #1354) + - supports configurable aggregation temporality (default: Cumulative, optional: Stateless). (#1296) +- The Sampler is now called on local child spans. (#1233) +- The `Kind` type from the `go.opentelemetry.io/otel/api/metric` package was renamed to `InstrumentKind` to more specifically describe what it is and avoid semantic ambiguity. (#1240) +- The `MetricKind` method of the `Descriptor` type in the `go.opentelemetry.io/otel/api/metric` package was renamed to `Descriptor.InstrumentKind`. + This matches the returned type and fixes misuse of the term metric. (#1240) +- Move test harness from the `go.opentelemetry.io/otel/api/apitest` package into `go.opentelemetry.io/otel/oteltest`. (#1241) +- Move the `go.opentelemetry.io/otel/api/metric/metrictest` package into `go.opentelemetry.io/oteltest` as part of #964. (#1252) +- Move the `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric` as part of #1303. (#1321) +- Move the `go.opentelemetry.io/otel/api/metric/registry` package into `go.opentelemetry.io/otel/metric/registry` as a part of #1303. (#1316) +- Move the `Number` type (together with related functions) from `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric/number` as a part of #1303. (#1316) +- The function signature of the Span `AddEvent` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required name and a variable number of `EventOption`s. (#1254) +- The function signature of the Span `RecordError` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required error value and a variable number of `EventOption`s. (#1254) +- Move the `go.opentelemetry.io/otel/api/global` package to `go.opentelemetry.io/otel`. (#1262) (#1330) +- Move the `Version` function from `go.opentelemetry.io/otel/sdk` to `go.opentelemetry.io/otel`. (#1330) +- Rename correlation context header from `"otcorrelations"` to `"baggage"` to match the OpenTelemetry specification. (#1267) +- Fix `Code.UnmarshalJSON` to work with valid JSON only. (#1276) +- The `resource.New()` method changes signature to support builtin attributes and functional options, including `telemetry.sdk.*` and + `host.name` semantic conventions; the former method is renamed `resource.NewWithAttributes`. (#1235) +- The Prometheus exporter now exports non-monotonic counters (i.e. `UpDownCounter`s) as gauges. (#1210) +- Correct the `Span.End` method documentation in the `otel` API to state updates are not allowed on a span after it has ended. (#1310) +- Updated span collection limits for attribute, event and link counts to 1000 (#1318) +- Renamed `semconv.HTTPUrlKey` to `semconv.HTTPURLKey`. (#1338) + +### Removed + +- The `ErrInvalidHexID`, `ErrInvalidTraceIDLength`, `ErrInvalidSpanIDLength`, `ErrInvalidSpanIDLength`, or `ErrNilSpanID` from the `go.opentelemetry.io/otel` package are unexported now. (#1243) +- The `AddEventWithTimestamp` method on the `Span` interface in `go.opentelemetry.io/otel` is removed due to its redundancy. + It is replaced by using the `AddEvent` method with a `WithTimestamp` option. (#1254) +- The `MockSpan` and `MockTracer` types are removed from `go.opentelemetry.io/otel/oteltest`. + `Tracer` and `Span` from the same module should be used in their place instead. (#1306) +- `WorkerCount` option is removed from `go.opentelemetry.io/otel/exporters/otlp`. (#1350) +- Remove the following labels types: INT32, UINT32, UINT64 and FLOAT32. (#1314) + +### Fixed + +- Rename `MergeItererator` to `MergeIterator` in the `go.opentelemetry.io/otel/label` package. (#1244) +- The `go.opentelemetry.io/otel/api/global` packages global TextMapPropagator now delegates functionality to a globally set delegate for all previously returned propagators. (#1258) +- Fix condition in `label.Any`. (#1299) +- Fix global `TracerProvider` to pass options to its configured provider. (#1329) +- Fix missing handler for `ExactKind` aggregator in OTLP metrics transformer (#1309) + +## [0.13.0] - 2020-10-08 + +### Added + +- OTLP Metric exporter supports Histogram aggregation. (#1209) +- The `Code` struct from the `go.opentelemetry.io/otel/codes` package now supports JSON marshaling and unmarshaling as well as implements the `Stringer` interface. (#1214) +- A Baggage API to implement the OpenTelemetry specification. (#1217) +- Add Shutdown method to sdk/trace/provider, shutdown processors in the order they were registered. (#1227) + +### Changed + +- Set default propagator to no-op propagator. (#1184) +- The `HTTPSupplier`, `HTTPExtractor`, `HTTPInjector`, and `HTTPPropagator` from the `go.opentelemetry.io/otel/api/propagation` package were replaced with unified `TextMapCarrier` and `TextMapPropagator` in the `go.opentelemetry.io/otel/propagation` package. (#1212) (#1325) +- The `New` function from the `go.opentelemetry.io/otel/api/propagation` package was replaced with `NewCompositeTextMapPropagator` in the `go.opentelemetry.io/otel` package. (#1212) +- The status codes of the `go.opentelemetry.io/otel/codes` package have been updated to match the latest OpenTelemetry specification. + They now are `Unset`, `Error`, and `Ok`. + They no longer track the gRPC codes. (#1214) +- The `StatusCode` field of the `SpanData` struct in the `go.opentelemetry.io/otel/sdk/export/trace` package now uses the codes package from this package instead of the gRPC project. (#1214) +- Move the `go.opentelemetry.io/otel/api/baggage` package into `go.opentelemetry.io/otel/baggage`. (#1217) (#1325) +- A `Shutdown` method of `SpanProcessor` and all its implementations receives a context and returns an error. (#1264) + +### Fixed + +- Copies of data from arrays and slices passed to `go.opentelemetry.io/otel/label.ArrayValue()` are now used in the returned `Value` instead of using the mutable data itself. (#1226) + +### Removed + +- The `ExtractHTTP` and `InjectHTTP` functions from the `go.opentelemetry.io/otel/api/propagation` package were removed. (#1212) +- The `Propagators` interface from the `go.opentelemetry.io/otel/api/propagation` package was removed to conform to the OpenTelemetry specification. + The explicit `TextMapPropagator` type can be used in its place as this is the `Propagator` type the specification defines. (#1212) +- The `SetAttribute` method of the `Span` from the `go.opentelemetry.io/otel/api/trace` package was removed given its redundancy with the `SetAttributes` method. (#1216) +- The internal implementation of Baggage storage is removed in favor of using the new Baggage API functionality. (#1217) +- Remove duplicate hostname key `HostHostNameKey` in Resource semantic conventions. (#1219) +- Nested array/slice support has been removed. (#1226) + +## [0.12.0] - 2020-09-24 + +### Added + +- A `SpanConfigure` function in `go.opentelemetry.io/otel/api/trace` to create a new `SpanConfig` from `SpanOption`s. (#1108) +- In the `go.opentelemetry.io/otel/api/trace` package, `NewTracerConfig` was added to construct new `TracerConfig`s. + This addition was made to conform with our project option conventions. (#1155) +- Instrumentation library information was added to the Zipkin exporter. (#1119) +- The `SpanProcessor` interface now has a `ForceFlush()` method. (#1166) +- More semantic conventions for k8s as resource attributes. (#1167) + +### Changed + +- Add reconnecting udp connection type to Jaeger exporter. + This change adds a new optional implementation of the udp conn interface used to detect changes to an agent's host dns record. + It then adopts the new destination address to ensure the exporter doesn't get stuck. This change was ported from jaegertracing/jaeger-client-go#520. (#1063) +- Replace `StartOption` and `EndOption` in `go.opentelemetry.io/otel/api/trace` with `SpanOption`. + This change is matched by replacing the `StartConfig` and `EndConfig` with a unified `SpanConfig`. (#1108) +- Replace the `LinkedTo` span option in `go.opentelemetry.io/otel/api/trace` with `WithLinks`. + This is be more consistent with our other option patterns, i.e. passing the item to be configured directly instead of its component parts, and provides a cleaner function signature. (#1108) +- The `go.opentelemetry.io/otel/api/trace` `TracerOption` was changed to an interface to conform to project option conventions. (#1109) +- Move the `B3` and `TraceContext` from within the `go.opentelemetry.io/otel/api/trace` package to their own `go.opentelemetry.io/otel/propagators` package. + This removal of the propagators is reflective of the OpenTelemetry specification for these propagators as well as cleans up the `go.opentelemetry.io/otel/api/trace` API. (#1118) +- Rename Jaeger tags used for instrumentation library information to reflect changes in OpenTelemetry specification. (#1119) +- Rename `ProbabilitySampler` to `TraceIDRatioBased` and change semantics to ignore parent span sampling status. (#1115) +- Move `tools` package under `internal`. (#1141) +- Move `go.opentelemetry.io/otel/api/correlation` package to `go.opentelemetry.io/otel/api/baggage`. (#1142) + The `correlation.CorrelationContext` propagator has been renamed `baggage.Baggage`. Other exported functions and types are unchanged. +- Rename `ParentOrElse` sampler to `ParentBased` and allow setting samplers depending on parent span. (#1153) +- In the `go.opentelemetry.io/otel/api/trace` package, `SpanConfigure` was renamed to `NewSpanConfig`. (#1155) +- Change `dependabot.yml` to add a `Skip Changelog` label to dependabot-sourced PRs. (#1161) +- The [configuration style guide](https://github.com/open-telemetry/opentelemetry-go/blob/master/CONTRIBUTING.md#config) has been updated to + recommend the use of `newConfig()` instead of `configure()`. (#1163) +- The `otlp.Config` type has been unexported and changed to `otlp.config`, along with its initializer. (#1163) +- Ensure exported interface types include parameter names and update the + Style Guide to reflect this styling rule. (#1172) +- Don't consider unset environment variable for resource detection to be an error. (#1170) +- Rename `go.opentelemetry.io/otel/api/metric.ConfigureInstrument` to `NewInstrumentConfig` and + `go.opentelemetry.io/otel/api/metric.ConfigureMeter` to `NewMeterConfig`. +- ValueObserver instruments use LastValue aggregator by default. (#1165) +- OTLP Metric exporter supports LastValue aggregation. (#1165) +- Move the `go.opentelemetry.io/otel/api/unit` package to `go.opentelemetry.io/otel/unit`. (#1185) +- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) +- Rename `NoopProvider` to `NoopMeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) +- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metric/metrictest` package. (#1190) +- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric/registry` package. (#1190) +- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metri/registryc` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) +- Rename `NoopProvider` to `NoopTracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) +- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) +- Rename `WrapperProvider` to `WrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) +- Rename `NewWrapperProvider` to `NewWrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) +- Rename `Provider` method of the pull controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/pull` package. (#1190) +- Rename `Provider` method of the push controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/push` package. (#1190) +- Rename `ProviderOptions` to `TracerProviderConfig` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `ProviderOption` to `TracerProviderOption` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Renamed `SamplingDecision` values to comply with OpenTelemetry specification change. (#1192) +- Renamed Zipkin attribute names from `ot.status_code & ot.status_description` to `otel.status_code & otel.status_description`. (#1201) +- The default SDK now invokes registered `SpanProcessor`s in the order they were registered with the `TracerProvider`. (#1195) +- Add test of spans being processed by the `SpanProcessor`s in the order they were registered. (#1203) + +### Removed + +- Remove the B3 propagator from `go.opentelemetry.io/otel/propagators`. It is now located in the + `go.opentelemetry.io/contrib/propagators/` module. (#1191) +- Remove the semantic convention for HTTP status text, `HTTPStatusTextKey` from package `go.opentelemetry.io/otel/semconv`. (#1194) + +### Fixed + +- Zipkin example no longer mentions `ParentSampler`, corrected to `ParentBased`. (#1171) +- Fix missing shutdown processor in otel-collector example. (#1186) +- Fix missing shutdown processor in basic and namedtracer examples. (#1197) + +## [0.11.0] - 2020-08-24 + +### Added + +- Support for exporting array-valued attributes via OTLP. (#992) +- `Noop` and `InMemory` `SpanBatcher` implementations to help with testing integrations. (#994) +- Support for filtering metric label sets. (#1047) +- A dimensionality-reducing metric Processor. (#1057) +- Integration tests for more OTel Collector Attribute types. (#1062) +- A new `WithSpanProcessor` `ProviderOption` is added to the `go.opentelemetry.io/otel/sdk/trace` package to create a `Provider` and automatically register the `SpanProcessor`. (#1078) + +### Changed + +- Rename `sdk/metric/processor/test` to `sdk/metric/processor/processortest`. (#1049) +- Rename `sdk/metric/controller/test` to `sdk/metric/controller/controllertest`. (#1049) +- Rename `api/testharness` to `api/apitest`. (#1049) +- Rename `api/trace/testtrace` to `api/trace/tracetest`. (#1049) +- Change Metric Processor to merge multiple observations. (#1024) +- The `go.opentelemetry.io/otel/bridge/opentracing` bridge package has been made into its own module. + This removes the package dependencies of this bridge from the rest of the OpenTelemetry based project. (#1038) +- Renamed `go.opentelemetry.io/otel/api/standard` package to `go.opentelemetry.io/otel/semconv` to avoid the ambiguous and generic name `standard` and better describe the package as containing OpenTelemetry semantic conventions. (#1016) +- The environment variable used for resource detection has been changed from `OTEL_RESOURCE_LABELS` to `OTEL_RESOURCE_ATTRIBUTES` (#1042) +- Replace `WithSyncer` with `WithBatcher` in examples. (#1044) +- Replace the `google.golang.org/grpc/codes` dependency in the API with an equivalent `go.opentelemetry.io/otel/codes` package. (#1046) +- Merge the `go.opentelemetry.io/otel/api/label` and `go.opentelemetry.io/otel/api/kv` into the new `go.opentelemetry.io/otel/label` package. (#1060) +- Unify Callback Function Naming. + Rename `*Callback` with `*Func`. (#1061) +- CI builds validate against last two versions of Go, dropping 1.13 and adding 1.15. (#1064) +- The `go.opentelemetry.io/otel/sdk/export/trace` interfaces `SpanSyncer` and `SpanBatcher` have been replaced with a specification compliant `Exporter` interface. + This interface still supports the export of `SpanData`, but only as a slice. + Implementation are also required now to return any error from `ExportSpans` if one occurs as well as implement a `Shutdown` method for exporter clean-up. (#1078) +- The `go.opentelemetry.io/otel/sdk/trace` `NewBatchSpanProcessor` function no longer returns an error. + If a `nil` exporter is passed as an argument to this function, instead of it returning an error, it now returns a `BatchSpanProcessor` that handles the export of `SpanData` by not taking any action. (#1078) +- The `go.opentelemetry.io/otel/sdk/trace` `NewProvider` function to create a `Provider` no longer returns an error, instead only a `*Provider`. + This change is related to `NewBatchSpanProcessor` not returning an error which was the only error this function would return. (#1078) + +### Removed + +- Duplicate, unused API sampler interface. (#999) + Use the [`Sampler` interface](https://github.com/open-telemetry/opentelemetry-go/blob/v0.11.0/sdk/trace/sampling.go) provided by the SDK instead. +- The `grpctrace` instrumentation was moved to the `go.opentelemetry.io/contrib` repository and out of this repository. + This move includes moving the `grpc` example to the `go.opentelemetry.io/contrib` as well. (#1027) +- The `WithSpan` method of the `Tracer` interface. + The functionality this method provided was limited compared to what a user can provide themselves. + It was removed with the understanding that if there is sufficient user need it can be added back based on actual user usage. (#1043) +- The `RegisterSpanProcessor` and `UnregisterSpanProcessor` functions. + These were holdovers from an approach prior to the TracerProvider design. They were not used anymore. (#1077) +- The `oterror` package. (#1026) +- The `othttp` and `httptrace` instrumentations were moved to `go.opentelemetry.io/contrib`. (#1032) + +### Fixed + +- The `semconv.HTTPServerMetricAttributesFromHTTPRequest()` function no longer generates the high-cardinality `http.request.content.length` label. (#1031) +- Correct instrumentation version tag in Jaeger exporter. (#1037) +- The SDK span will now set an error event if the `End` method is called during a panic (i.e. it was deferred). (#1043) +- Move internally generated protobuf code from the `go.opentelemetry.io/otel` to the OTLP exporter to reduce dependency overhead. (#1050) +- The `otel-collector` example referenced outdated collector processors. (#1006) + +## [0.10.0] - 2020-07-29 + +This release migrates the default OpenTelemetry SDK into its own Go module, decoupling the SDK from the API and reducing dependencies for instrumentation packages. + +### Added + +- The Zipkin exporter now has `NewExportPipeline` and `InstallNewPipeline` constructor functions to match the common pattern. + These function build a new exporter with default SDK options and register the exporter with the `global` package respectively. (#944) +- Add propagator option for gRPC instrumentation. (#986) +- The `testtrace` package now tracks the `trace.SpanKind` for each span. (#987) + +### Changed + +- Replace the `RegisterGlobal` `Option` in the Jaeger exporter with an `InstallNewPipeline` constructor function. + This matches the other exporter constructor patterns and will register a new exporter after building it with default configuration. (#944) +- The trace (`go.opentelemetry.io/otel/exporters/trace/stdout`) and metric (`go.opentelemetry.io/otel/exporters/metric/stdout`) `stdout` exporters are now merged into a single exporter at `go.opentelemetry.io/otel/exporters/stdout`. + This new exporter was made into its own Go module to follow the pattern of all exporters and decouple it from the `go.opentelemetry.io/otel` module. (#956, #963) +- Move the `go.opentelemetry.io/otel/exporters/test` test package to `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#962) +- The `go.opentelemetry.io/otel/api/kv/value` package was merged into the parent `go.opentelemetry.io/otel/api/kv` package. (#968) + - `value.Bool` was replaced with `kv.BoolValue`. + - `value.Int64` was replaced with `kv.Int64Value`. + - `value.Uint64` was replaced with `kv.Uint64Value`. + - `value.Float64` was replaced with `kv.Float64Value`. + - `value.Int32` was replaced with `kv.Int32Value`. + - `value.Uint32` was replaced with `kv.Uint32Value`. + - `value.Float32` was replaced with `kv.Float32Value`. + - `value.String` was replaced with `kv.StringValue`. + - `value.Int` was replaced with `kv.IntValue`. + - `value.Uint` was replaced with `kv.UintValue`. + - `value.Array` was replaced with `kv.ArrayValue`. +- Rename `Infer` to `Any` in the `go.opentelemetry.io/otel/api/kv` package. (#972) +- Change `othttp` to use the `httpsnoop` package to wrap the `ResponseWriter` so that optional interfaces (`http.Hijacker`, `http.Flusher`, etc.) that are implemented by the original `ResponseWriter`are also implemented by the wrapped `ResponseWriter`. (#979) +- Rename `go.opentelemetry.io/otel/sdk/metric/aggregator/test` package to `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest`. (#980) +- Make the SDK into its own Go module called `go.opentelemetry.io/otel/sdk`. (#985) +- Changed the default trace `Sampler` from `AlwaysOn` to `ParentOrElse(AlwaysOn)`. (#989) + +### Removed + +- The `IndexedAttribute` function from the `go.opentelemetry.io/otel/api/label` package was removed in favor of `IndexedLabel` which it was synonymous with. (#970) + +### Fixed + +- Bump github.com/golangci/golangci-lint from 1.28.3 to 1.29.0 in /tools. (#953) +- Bump github.com/google/go-cmp from 0.5.0 to 0.5.1. (#957) +- Use `global.Handle` for span export errors in the OTLP exporter. (#946) +- Correct Go language formatting in the README documentation. (#961) +- Remove default SDK dependencies from the `go.opentelemetry.io/otel/api` package. (#977) +- Remove default SDK dependencies from the `go.opentelemetry.io/otel/instrumentation` package. (#983) +- Move documented examples for `go.opentelemetry.io/otel/instrumentation/grpctrace` interceptors into Go example tests. (#984) + +## [0.9.0] - 2020-07-20 + +### Added + +- A new Resource Detector interface is included to allow resources to be automatically detected and included. (#939) +- A Detector to automatically detect resources from an environment variable. (#939) +- Github action to generate protobuf Go bindings locally in `internal/opentelemetry-proto-gen`. (#938) +- OTLP .proto files from `open-telemetry/opentelemetry-proto` imported as a git submodule under `internal/opentelemetry-proto`. + References to `github.com/open-telemetry/opentelemetry-proto` changed to `go.opentelemetry.io/otel/internal/opentelemetry-proto-gen`. (#942) + +### Changed + +- Non-nil value `struct`s for key-value pairs will be marshalled using JSON rather than `Sprintf`. (#948) + +### Removed + +- Removed dependency on `github.com/open-telemetry/opentelemetry-collector`. (#943) + +## [0.8.0] - 2020-07-09 + +### Added + +- The `B3Encoding` type to represent the B3 encoding(s) the B3 propagator can inject. + A value for HTTP supported encodings (Multiple Header: `MultipleHeader`, Single Header: `SingleHeader`) are included. (#882) +- The `FlagsDeferred` trace flag to indicate if the trace sampling decision has been deferred. (#882) +- The `FlagsDebug` trace flag to indicate if the trace is a debug trace. (#882) +- Add `peer.service` semantic attribute. (#898) +- Add database-specific semantic attributes. (#899) +- Add semantic convention for `faas.coldstart` and `container.id`. (#909) +- Add http content size semantic conventions. (#905) +- Include `http.request_content_length` in HTTP request basic attributes. (#905) +- Add semantic conventions for operating system process resource attribute keys. (#919) +- The Jaeger exporter now has a `WithBatchMaxCount` option to specify the maximum number of spans sent in a batch. (#931) + +### Changed + +- Update `CONTRIBUTING.md` to ask for updates to `CHANGELOG.md` with each pull request. (#879) +- Use lowercase header names for B3 Multiple Headers. (#881) +- The B3 propagator `SingleHeader` field has been replaced with `InjectEncoding`. + This new field can be set to combinations of the `B3Encoding` bitmasks and will inject trace information in these encodings. + If no encoding is set, the propagator will default to `MultipleHeader` encoding. (#882) +- The B3 propagator now extracts from either HTTP encoding of B3 (Single Header or Multiple Header) based on what is contained in the header. + Preference is given to Single Header encoding with Multiple Header being the fallback if Single Header is not found or is invalid. + This behavior change is made to dynamically support all correctly encoded traces received instead of having to guess the expected encoding prior to receiving. (#882) +- Extend semantic conventions for RPC. (#900) +- To match constant naming conventions in the `api/standard` package, the `FaaS*` key names are appended with a suffix of `Key`. (#920) + - `"api/standard".FaaSName` -> `FaaSNameKey` + - `"api/standard".FaaSID` -> `FaaSIDKey` + - `"api/standard".FaaSVersion` -> `FaaSVersionKey` + - `"api/standard".FaaSInstance` -> `FaaSInstanceKey` + +### Removed + +- The `FlagsUnused` trace flag is removed. + The purpose of this flag was to act as the inverse of `FlagsSampled`, the inverse of `FlagsSampled` is used instead. (#882) +- The B3 header constants (`B3SingleHeader`, `B3DebugFlagHeader`, `B3TraceIDHeader`, `B3SpanIDHeader`, `B3SampledHeader`, `B3ParentSpanIDHeader`) are removed. + If B3 header keys are needed [the authoritative OpenZipkin package constants](https://pkg.go.dev/github.com/openzipkin/zipkin-go@v0.2.2/propagation/b3?tab=doc#pkg-constants) should be used instead. (#882) + +### Fixed + +- The B3 Single Header name is now correctly `b3` instead of the previous `X-B3`. (#881) +- The B3 propagator now correctly supports sampling only values (`b3: 0`, `b3: 1`, or `b3: d`) for a Single B3 Header. (#882) +- The B3 propagator now propagates the debug flag. + This removes the behavior of changing the debug flag into a set sampling bit. + Instead, this now follow the B3 specification and omits the `X-B3-Sampling` header. (#882) +- The B3 propagator now tracks "unset" sampling state (meaning "defer the decision") and does not set the `X-B3-Sampling` header when injecting. (#882) +- Bump github.com/itchyny/gojq from 0.10.3 to 0.10.4 in /tools. (#883) +- Bump github.com/opentracing/opentracing-go from v1.1.1-0.20190913142402-a7454ce5950e to v1.2.0. (#885) +- The tracing time conversion for OTLP spans is now correctly set to `UnixNano`. (#896) +- Ensure span status is not set to `Unknown` when no HTTP status code is provided as it is assumed to be `200 OK`. (#908) +- Ensure `httptrace.clientTracer` closes `http.headers` span. (#912) +- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903) +- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905) +- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913) +- Update otel-collector example to use the v0.5.0 collector. (#915) +- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922) +- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922) +- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists. + This is in accordance with OpenTelemetry semantic conventions. (#922) +- Correlation Context extractor will no longer insert an empty map into the returned context when no valid values are extracted. (#923) +- Bump google.golang.org/api from 0.28.0 to 0.29.0 in /exporters/trace/jaeger. (#925) +- Bump github.com/itchyny/gojq from 0.10.4 to 0.11.0 in /tools. (#926) +- Bump github.com/golangci/golangci-lint from 1.28.1 to 1.28.2 in /tools. (#930) + +## [0.7.0] - 2020-06-26 + +This release implements the v0.5.0 version of the OpenTelemetry specification. + +### Added + +- The othttp instrumentation now includes default metrics. (#861) +- This CHANGELOG file to track all changes in the project going forward. +- Support for array type attributes. (#798) +- Apply transitive dependabot go.mod dependency updates as part of a new automatic Github workflow. (#844) +- Timestamps are now passed to exporters for each export. (#835) +- Add new `Accumulation` type to metric SDK to transport telemetry from `Accumulator`s to `Processor`s. + This replaces the prior `Record` `struct` use for this purpose. (#835) +- New dependabot integration to automate package upgrades. (#814) +- `Meter` and `Tracer` implementations accept instrumentation version version as an optional argument. + This instrumentation version is passed on to exporters. (#811) (#805) (#802) +- The OTLP exporter includes the instrumentation version in telemetry it exports. (#811) +- Environment variables for Jaeger exporter are supported. (#796) +- New `aggregation.Kind` in the export metric API. (#808) +- New example that uses OTLP and the collector. (#790) +- Handle errors in the span `SetName` during span initialization. (#791) +- Default service config to enable retries for retry-able failed requests in the OTLP exporter and an option to override this default. (#777) +- New `go.opentelemetry.io/otel/api/oterror` package to uniformly support error handling and definitions for the project. (#778) +- New `global` default implementation of the `go.opentelemetry.io/otel/api/oterror.Handler` interface to be used to handle errors prior to an user defined `Handler`. + There is also functionality for the user to register their `Handler` as well as a convenience function `Handle` to handle an error with this global `Handler`(#778) +- Options to specify propagators for httptrace and grpctrace instrumentation. (#784) +- The required `application/json` header for the Zipkin exporter is included in all exports. (#774) +- Integrate HTTP semantics helpers from the contrib repository into the `api/standard` package. #769 + +### Changed + +- Rename `Integrator` to `Processor` in the metric SDK. (#863) +- Rename `AggregationSelector` to `AggregatorSelector`. (#859) +- Rename `SynchronizedCopy` to `SynchronizedMove`. (#858) +- Rename `simple` integrator to `basic` integrator. (#857) +- Merge otlp collector examples. (#841) +- Change the metric SDK to support cumulative, delta, and pass-through exporters directly. + With these changes, cumulative and delta specific exporters are able to request the correct kind of aggregation from the SDK. (#840) +- The `Aggregator.Checkpoint` API is renamed to `SynchronizedCopy` and adds an argument, a different `Aggregator` into which the copy is stored. (#812) +- The `export.Aggregator` contract is that `Update()` and `SynchronizedCopy()` are synchronized with each other. + All the aggregation interfaces (`Sum`, `LastValue`, ...) are not meant to be synchronized, as the caller is expected to synchronize aggregators at a higher level after the `Accumulator`. + Some of the `Aggregators` used unnecessary locking and that has been cleaned up. (#812) +- Use of `metric.Number` was replaced by `int64` now that we use `sync.Mutex` in the `MinMaxSumCount` and `Histogram` `Aggregators`. (#812) +- Replace `AlwaysParentSample` with `ParentSample(fallback)` to match the OpenTelemetry v0.5.0 specification. (#810) +- Rename `sdk/export/metric/aggregator` to `sdk/export/metric/aggregation`. #808 +- Send configured headers with every request in the OTLP exporter, instead of just on connection creation. (#806) +- Update error handling for any one off error handlers, replacing, instead, with the `global.Handle` function. (#791) +- Rename `plugin` directory to `instrumentation` to match the OpenTelemetry specification. (#779) +- Makes the argument order to Histogram and DDSketch `New()` consistent. (#781) + +### Removed + +- `Uint64NumberKind` and related functions from the API. (#864) +- Context arguments from `Aggregator.Checkpoint` and `Integrator.Process` as they were unused. (#803) +- `SpanID` is no longer included in parameters for sampling decision to match the OpenTelemetry specification. (#775) + +### Fixed + +- Upgrade OTLP exporter to opentelemetry-proto matching the opentelemetry-collector v0.4.0 release. (#866) +- Allow changes to `go.sum` and `go.mod` when running dependabot tidy-up. (#871) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1. (#824) +- Bump github.com/prometheus/client_golang from 1.7.0 to 1.7.1 in /exporters/metric/prometheus. (#867) +- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/jaeger. (#853) +- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/zipkin. (#854) +- Bumps github.com/golang/protobuf from 1.3.2 to 1.4.2 (#848) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/otlp (#817) +- Bump github.com/golangci/golangci-lint from 1.25.1 to 1.27.0 in /tools (#828) +- Bump github.com/prometheus/client_golang from 1.5.0 to 1.7.0 in /exporters/metric/prometheus (#838) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/jaeger (#829) +- Bump github.com/benbjohnson/clock from 1.0.0 to 1.0.3 (#815) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/zipkin (#823) +- Bump github.com/itchyny/gojq from 0.10.1 to 0.10.3 in /tools (#830) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/metric/prometheus (#822) +- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/zipkin (#820) +- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/jaeger (#831) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 (#836) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/trace/jaeger (#837) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/otlp (#839) +- Bump google.golang.org/api from 0.20.0 to 0.28.0 in /exporters/trace/jaeger (#843) +- Set span status from HTTP status code in the othttp instrumentation. (#832) +- Fixed typo in push controller comment. (#834) +- The `Aggregator` testing has been updated and cleaned. (#812) +- `metric.Number(0)` expressions are replaced by `0` where possible. (#812) +- Fixed `global` `handler_test.go` test failure. #804 +- Fixed `BatchSpanProcessor.Shutdown` to wait until all spans are processed. (#766) +- Fixed OTLP example's accidental early close of exporter. (#807) +- Ensure zipkin exporter reads and closes response body. (#788) +- Update instrumentation to use `api/standard` keys instead of custom keys. (#782) +- Clean up tools and RELEASING documentation. (#762) + +## [0.6.0] - 2020-05-21 + +### Added + +- Support for `Resource`s in the prometheus exporter. (#757) +- New pull controller. (#751) +- New `UpDownSumObserver` instrument. (#750) +- OpenTelemetry collector demo. (#711) +- New `SumObserver` instrument. (#747) +- New `UpDownCounter` instrument. (#745) +- New timeout `Option` and configuration function `WithTimeout` to the push controller. (#742) +- New `api/standards` package to implement semantic conventions and standard key-value generation. (#731) + +### Changed + +- Rename `Register*` functions in the metric API to `New*` for all `Observer` instruments. (#761) +- Use `[]float64` for histogram boundaries, not `[]metric.Number`. (#758) +- Change OTLP example to use exporter as a trace `Syncer` instead of as an unneeded `Batcher`. (#756) +- Replace `WithResourceAttributes()` with `WithResource()` in the trace SDK. (#754) +- The prometheus exporter now uses the new pull controller. (#751) +- Rename `ScheduleDelayMillis` to `BatchTimeout` in the trace `BatchSpanProcessor`.(#752) +- Support use of synchronous instruments in asynchronous callbacks (#725) +- Move `Resource` from the `Export` method parameter into the metric export `Record`. (#739) +- Rename `Observer` instrument to `ValueObserver`. (#734) +- The push controller now has a method (`Provider()`) to return a `metric.Provider` instead of the old `Meter` method that acted as a `metric.Provider`. (#738) +- Replace `Measure` instrument by `ValueRecorder` instrument. (#732) +- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. (#727) + +### Fixed + +- Ensure gRPC `ClientStream` override methods do not panic in grpctrace package. (#755) +- Disable parts of `BatchSpanProcessor` test until a fix is found. (#743) +- Fix `string` case in `kv` `Infer` function. (#746) +- Fix panic in grpctrace client interceptors. (#740) +- Refactor the `api/metrics` push controller and add `CheckpointSet` synchronization. (#737) +- Rewrite span batch process queue batching logic. (#719) +- Remove the push controller named Meter map. (#738) +- Fix Histogram aggregator initial state (fix #735). (#736) +- Ensure golang alpine image is running `golang-1.14` for examples. (#733) +- Added test for grpctrace `UnaryInterceptorClient`. (#695) +- Rearrange `api/metric` code layout. (#724) + +## [0.5.0] - 2020-05-13 + +### Added + +- Batch `Observer` callback support. (#717) +- Alias `api` types to root package of project. (#696) +- Create basic `othttp.Transport` for simple client instrumentation. (#678) +- `SetAttribute(string, interface{})` to the trace API. (#674) +- Jaeger exporter option that allows user to specify custom http client. (#671) +- `Stringer` and `Infer` methods to `key`s. (#662) + +### Changed + +- Rename `NewKey` in the `kv` package to just `Key`. (#721) +- Move `core` and `key` to `kv` package. (#720) +- Make the metric API `Meter` a `struct` so the abstract `MeterImpl` can be passed and simplify implementation. (#709) +- Rename SDK `Batcher` to `Integrator` to match draft OpenTelemetry SDK specification. (#710) +- Rename SDK `Ungrouped` integrator to `simple.Integrator` to match draft OpenTelemetry SDK specification. (#710) +- Rename SDK `SDK` `struct` to `Accumulator` to match draft OpenTelemetry SDK specification. (#710) +- Move `Number` from `core` to `api/metric` package. (#706) +- Move `SpanContext` from `core` to `trace` package. (#692) +- Change traceparent header from `Traceparent` to `traceparent` to implement the W3C specification. (#681) + +### Fixed + +- Update tooling to run generators in all submodules. (#705) +- gRPC interceptor regexp to match methods without a service name. (#683) +- Use a `const` for padding 64-bit B3 trace IDs. (#701) +- Update `mockZipkin` listen address from `:0` to `127.0.0.1:0`. (#700) +- Left-pad 64-bit B3 trace IDs with zero. (#698) +- Propagate at least the first W3C tracestate header. (#694) +- Remove internal `StateLocker` implementation. (#688) +- Increase instance size CI system uses. (#690) +- Add a `key` benchmark and use reflection in `key.Infer()`. (#679) +- Fix internal `global` test by using `global.Meter` with `RecordBatch()`. (#680) +- Reimplement histogram using mutex instead of `StateLocker`. (#669) +- Switch `MinMaxSumCount` to a mutex lock implementation instead of `StateLocker`. (#667) +- Update documentation to not include any references to `WithKeys`. (#672) +- Correct misspelling. (#668) +- Fix clobbering of the span context if extraction fails. (#656) +- Bump `golangci-lint` and work around the corrupting bug. (#666) (#670) + +## [0.4.3] - 2020-04-24 + +### Added + +- `Dockerfile` and `docker-compose.yml` to run example code. (#635) +- New `grpctrace` package that provides gRPC client and server interceptors for both unary and stream connections. (#621) +- New `api/label` package, providing common label set implementation. (#651) +- Support for JSON marshaling of `Resources`. (#654) +- `TraceID` and `SpanID` implementations for `Stringer` interface. (#642) +- `RemoteAddrKey` in the othttp plugin to include the HTTP client address in top-level spans. (#627) +- `WithSpanFormatter` option to the othttp plugin. (#617) +- Updated README to include section for compatible libraries and include reference to the contrib repository. (#612) +- The prometheus exporter now supports exporting histograms. (#601) +- A `String` method to the `Resource` to return a hashable identifier for a now unique resource. (#613) +- An `Iter` method to the `Resource` to return an array `AttributeIterator`. (#613) +- An `Equal` method to the `Resource` test the equivalence of resources. (#613) +- An iterable structure (`AttributeIterator`) for `Resource` attributes. + +### Changed + +- zipkin export's `NewExporter` now requires a `serviceName` argument to ensure this needed values is provided. (#644) +- Pass `Resources` through the metrics export pipeline. (#659) + +### Removed + +- `WithKeys` option from the metric API. (#639) + +### Fixed + +- Use the `label.Set.Equivalent` value instead of an encoding in the batcher. (#658) +- Correct typo `trace.Exporter` to `trace.SpanSyncer` in comments. (#653) +- Use type names for return values in jaeger exporter. (#648) +- Increase the visibility of the `api/key` package by updating comments and fixing usages locally. (#650) +- `Checkpoint` only after `Update`; Keep records in the `sync.Map` longer. (#647) +- Do not cache `reflect.ValueOf()` in metric Labels. (#649) +- Batch metrics exported from the OTLP exporter based on `Resource` and labels. (#626) +- Add error wrapping to the prometheus exporter. (#631) +- Update the OTLP exporter batching of traces to use a unique `string` representation of an associated `Resource` as the batching key. (#623) +- Update OTLP `SpanData` transform to only include the `ParentSpanID` if one exists. (#614) +- Update `Resource` internal representation to uniquely and reliably identify resources. (#613) +- Check return value from `CheckpointSet.ForEach` in prometheus exporter. (#622) +- Ensure spans created by httptrace client tracer reflect operation structure. (#618) +- Create a new recorder rather than reuse when multiple observations in same epoch for asynchronous instruments. #610 +- The default port the OTLP exporter uses to connect to the OpenTelemetry collector is updated to match the one the collector listens on by default. (#611) + +## [0.4.2] - 2020-03-31 + +### Fixed + +- Fix `pre_release.sh` to update version in `sdk/opentelemetry.go`. (#607) +- Fix time conversion from internal to OTLP in OTLP exporter. (#606) + +## [0.4.1] - 2020-03-31 + +### Fixed + +- Update `tag.sh` to create signed tags. (#604) + +## [0.4.0] - 2020-03-30 + +### Added + +- New API package `api/metric/registry` that exposes a `MeterImpl` wrapper for use by SDKs to generate unique instruments. (#580) +- Script to verify examples after a new release. (#579) + +### Removed + +- The dogstatsd exporter due to lack of support. + This additionally removes support for statsd. (#591) +- `LabelSet` from the metric API. + This is replaced by a `[]core.KeyValue` slice. (#595) +- `Labels` from the metric API's `Meter` interface. (#595) + +### Changed + +- The metric `export.Labels` became an interface which the SDK implements and the `export` package provides a simple, immutable implementation of this interface intended for testing purposes. (#574) +- Renamed `internal/metric.Meter` to `MeterImpl`. (#580) +- Renamed `api/global/internal.obsImpl` to `asyncImpl`. (#580) + +### Fixed + +- Corrected missing return in mock span. (#582) +- Update License header for all source files to match CNCF guidelines and include a test to ensure it is present. (#586) (#596) +- Update to v0.3.0 of the OTLP in the OTLP exporter. (#588) +- Update pre-release script to be compatible between GNU and BSD based systems. (#592) +- Add a `RecordBatch` benchmark. (#594) +- Moved span transforms of the OTLP exporter to the internal package. (#593) +- Build both go-1.13 and go-1.14 in circleci to test for all supported versions of Go. (#569) +- Removed unneeded allocation on empty labels in OLTP exporter. (#597) +- Update `BatchedSpanProcessor` to process the queue until no data but respect max batch size. (#599) +- Update project documentation godoc.org links to pkg.go.dev. (#602) + +## [0.3.0] - 2020-03-21 + +This is a first official beta release, which provides almost fully complete metrics, tracing, and context propagation functionality. +There is still a possibility of breaking changes. + +### Added + +- Add `Observer` metric instrument. (#474) +- Add global `Propagators` functionality to enable deferred initialization for propagators registered before the first Meter SDK is installed. (#494) +- Simplified export setup pipeline for the jaeger exporter to match other exporters. (#459) +- The zipkin trace exporter. (#495) +- The OTLP exporter to export metric and trace telemetry to the OpenTelemetry collector. (#497) (#544) (#545) +- Add `StatusMessage` field to the trace `Span`. (#524) +- Context propagation in OpenTracing bridge in terms of OpenTelemetry context propagation. (#525) +- The `Resource` type was added to the SDK. (#528) +- The global API now supports a `Tracer` and `Meter` function as shortcuts to getting a global `*Provider` and calling these methods directly. (#538) +- The metric API now defines a generic `MeterImpl` interface to support general purpose `Meter` construction. + Additionally, `SyncImpl` and `AsyncImpl` are added to support general purpose instrument construction. (#560) +- A metric `Kind` is added to represent the `MeasureKind`, `ObserverKind`, and `CounterKind`. (#560) +- Scripts to better automate the release process. (#576) + +### Changed + +- Default to to use `AlwaysSampler` instead of `ProbabilitySampler` to match OpenTelemetry specification. (#506) +- Renamed `AlwaysSampleSampler` to `AlwaysOnSampler` in the trace API. (#511) +- Renamed `NeverSampleSampler` to `AlwaysOffSampler` in the trace API. (#511) +- The `Status` field of the `Span` was changed to `StatusCode` to disambiguate with the added `StatusMessage`. (#524) +- Updated the trace `Sampler` interface conform to the OpenTelemetry specification. (#531) +- Rename metric API `Options` to `Config`. (#541) +- Rename metric `Counter` aggregator to be `Sum`. (#541) +- Unify metric options into `Option` from instrument specific options. (#541) +- The trace API's `TraceProvider` now support `Resource`s. (#545) +- Correct error in zipkin module name. (#548) +- The jaeger trace exporter now supports `Resource`s. (#551) +- Metric SDK now supports `Resource`s. + The `WithResource` option was added to configure a `Resource` on creation and the `Resource` method was added to the metric `Descriptor` to return the associated `Resource`. (#552) +- Replace `ErrNoLastValue` and `ErrEmptyDataSet` by `ErrNoData` in the metric SDK. (#557) +- The stdout trace exporter now supports `Resource`s. (#558) +- The metric `Descriptor` is now included at the API instead of the SDK. (#560) +- Replace `Ordered` with an iterator in `export.Labels`. (#567) + +### Removed + +- The vendor specific Stackdriver. It is now hosted on 3rd party vendor infrastructure. (#452) +- The `Unregister` method for metric observers as it is not in the OpenTelemetry specification. (#560) +- `GetDescriptor` from the metric SDK. (#575) +- The `Gauge` instrument from the metric API. (#537) + +### Fixed + +- Make histogram aggregator checkpoint consistent. (#438) +- Update README with import instructions and how to build and test. (#505) +- The default label encoding was updated to be unique. (#508) +- Use `NewRoot` in the othttp plugin for public endpoints. (#513) +- Fix data race in `BatchedSpanProcessor`. (#518) +- Skip test-386 for Mac OS 10.15.x (Catalina and upwards). #521 +- Use a variable-size array to represent ordered labels in maps. (#523) +- Update the OTLP protobuf and update changed import path. (#532) +- Use `StateLocker` implementation in `MinMaxSumCount`. (#546) +- Eliminate goroutine leak in histogram stress test. (#547) +- Update OTLP exporter with latest protobuf. (#550) +- Add filters to the othttp plugin. (#556) +- Provide an implementation of the `Header*` filters that do not depend on Go 1.14. (#565) +- Encode labels once during checkpoint. + The checkpoint function is executed in a single thread so we can do the encoding lazily before passing the encoded version of labels to the exporter. + This is a cheap and quick way to avoid encoding the labels on every collection interval. (#572) +- Run coverage over all packages in `COVERAGE_MOD_DIR`. (#573) + +## [0.2.3] - 2020-03-04 + +### Added + +- `RecordError` method on `Span`s in the trace API to Simplify adding error events to spans. (#473) +- Configurable push frequency for exporters setup pipeline. (#504) + +### Changed + +- Rename the `exporter` directory to `exporters`. + The `go.opentelemetry.io/otel/exporter/trace/jaeger` package was mistakenly released with a `v1.0.0` tag instead of `v0.1.0`. + This resulted in all subsequent releases not becoming the default latest. + A consequence of this was that all `go get`s pulled in the incompatible `v0.1.0` release of that package when pulling in more recent packages from other otel packages. + Renaming the `exporter` directory to `exporters` fixes this issue by renaming the package and therefore clearing any existing dependency tags. + Consequentially, this action also renames *all* exporter packages. (#502) + +### Removed + +- The `CorrelationContextHeader` constant in the `correlation` package is no longer exported. (#503) + +## [0.2.2] - 2020-02-27 + +### Added + +- `HTTPSupplier` interface in the propagation API to specify methods to retrieve and store a single value for a key to be associated with a carrier. (#467) +- `HTTPExtractor` interface in the propagation API to extract information from an `HTTPSupplier` into a context. (#467) +- `HTTPInjector` interface in the propagation API to inject information into an `HTTPSupplier.` (#467) +- `Config` and configuring `Option` to the propagator API. (#467) +- `Propagators` interface in the propagation API to contain the set of injectors and extractors for all supported carrier formats. (#467) +- `HTTPPropagator` interface in the propagation API to inject and extract from an `HTTPSupplier.` (#467) +- `WithInjectors` and `WithExtractors` functions to the propagator API to configure injectors and extractors to use. (#467) +- `ExtractHTTP` and `InjectHTTP` functions to apply configured HTTP extractors and injectors to a passed context. (#467) +- Histogram aggregator. (#433) +- `DefaultPropagator` function and have it return `trace.TraceContext` as the default context propagator. (#456) +- `AlwaysParentSample` sampler to the trace API. (#455) +- `WithNewRoot` option function to the trace API to specify the created span should be considered a root span. (#451) + +### Changed + +- Renamed `WithMap` to `ContextWithMap` in the correlation package. (#481) +- Renamed `FromContext` to `MapFromContext` in the correlation package. (#481) +- Move correlation context propagation to correlation package. (#479) +- Do not default to putting remote span context into links. (#480) +- `Tracer.WithSpan` updated to accept `StartOptions`. (#472) +- Renamed `MetricKind` to `Kind` to not stutter in the type usage. (#432) +- Renamed the `export` package to `metric` to match directory structure. (#432) +- Rename the `api/distributedcontext` package to `api/correlation`. (#444) +- Rename the `api/propagators` package to `api/propagation`. (#444) +- Move the propagators from the `propagators` package into the `trace` API package. (#444) +- Update `Float64Gauge`, `Int64Gauge`, `Float64Counter`, `Int64Counter`, `Float64Measure`, and `Int64Measure` metric methods to use value receivers instead of pointers. (#462) +- Moved all dependencies of tools package to a tools directory. (#466) + +### Removed + +- Binary propagators. (#467) +- NOOP propagator. (#467) + +### Fixed + +- Upgraded `github.com/golangci/golangci-lint` from `v1.21.0` to `v1.23.6` in `tools/`. (#492) +- Fix a possible nil-dereference crash (#478) +- Correct comments for `InstallNewPipeline` in the stdout exporter. (#483) +- Correct comments for `InstallNewPipeline` in the dogstatsd exporter. (#484) +- Correct comments for `InstallNewPipeline` in the prometheus exporter. (#482) +- Initialize `onError` based on `Config` in prometheus exporter. (#486) +- Correct module name in prometheus exporter README. (#475) +- Removed tracer name prefix from span names. (#430) +- Fix `aggregator_test.go` import package comment. (#431) +- Improved detail in stdout exporter. (#436) +- Fix a dependency issue (generate target should depend on stringer, not lint target) in Makefile. (#442) +- Reorders the Makefile targets within `precommit` target so we generate files and build the code before doing linting, so we can get much nicer errors about syntax errors from the compiler. (#442) +- Reword function documentation in gRPC plugin. (#446) +- Send the `span.kind` tag to Jaeger from the jaeger exporter. (#441) +- Fix `metadataSupplier` in the jaeger exporter to overwrite the header if existing instead of appending to it. (#441) +- Upgraded to Go 1.13 in CI. (#465) +- Correct opentelemetry.io URL in trace SDK documentation. (#464) +- Refactored reference counting logic in SDK determination of stale records. (#468) +- Add call to `runtime.Gosched` in instrument `acquireHandle` logic to not block the collector. (#469) + +## [0.2.1.1] - 2020-01-13 + +### Fixed + +- Use stateful batcher on Prometheus exporter fixing regression introduced in #395. (#428) + +## [0.2.1] - 2020-01-08 + +### Added + +- Global meter forwarding implementation. + This enables deferred initialization for metric instruments registered before the first Meter SDK is installed. (#392) +- Global trace forwarding implementation. + This enables deferred initialization for tracers registered before the first Trace SDK is installed. (#406) +- Standardize export pipeline creation in all exporters. (#395) +- A testing, organization, and comments for 64-bit field alignment. (#418) +- Script to tag all modules in the project. (#414) + +### Changed + +- Renamed `propagation` package to `propagators`. (#362) +- Renamed `B3Propagator` propagator to `B3`. (#362) +- Renamed `TextFormatPropagator` propagator to `TextFormat`. (#362) +- Renamed `BinaryPropagator` propagator to `Binary`. (#362) +- Renamed `BinaryFormatPropagator` propagator to `BinaryFormat`. (#362) +- Renamed `NoopTextFormatPropagator` propagator to `NoopTextFormat`. (#362) +- Renamed `TraceContextPropagator` propagator to `TraceContext`. (#362) +- Renamed `SpanOption` to `StartOption` in the trace API. (#369) +- Renamed `StartOptions` to `StartConfig` in the trace API. (#369) +- Renamed `EndOptions` to `EndConfig` in the trace API. (#369) +- `Number` now has a pointer receiver for its methods. (#375) +- Renamed `CurrentSpan` to `SpanFromContext` in the trace API. (#379) +- Renamed `SetCurrentSpan` to `ContextWithSpan` in the trace API. (#379) +- Renamed `Message` in Event to `Name` in the trace API. (#389) +- Prometheus exporter no longer aggregates metrics, instead it only exports them. (#385) +- Renamed `HandleImpl` to `BoundInstrumentImpl` in the metric API. (#400) +- Renamed `Float64CounterHandle` to `Float64CounterBoundInstrument` in the metric API. (#400) +- Renamed `Int64CounterHandle` to `Int64CounterBoundInstrument` in the metric API. (#400) +- Renamed `Float64GaugeHandle` to `Float64GaugeBoundInstrument` in the metric API. (#400) +- Renamed `Int64GaugeHandle` to `Int64GaugeBoundInstrument` in the metric API. (#400) +- Renamed `Float64MeasureHandle` to `Float64MeasureBoundInstrument` in the metric API. (#400) +- Renamed `Int64MeasureHandle` to `Int64MeasureBoundInstrument` in the metric API. (#400) +- Renamed `Release` method for bound instruments in the metric API to `Unbind`. (#400) +- Renamed `AcquireHandle` method for bound instruments in the metric API to `Bind`. (#400) +- Renamed the `File` option in the stdout exporter to `Writer`. (#404) +- Renamed all `Options` to `Config` for all metric exports where this wasn't already the case. + +### Fixed + +- Aggregator import path corrected. (#421) +- Correct links in README. (#368) +- The README was updated to match latest code changes in its examples. (#374) +- Don't capitalize error statements. (#375) +- Fix ignored errors. (#375) +- Fix ambiguous variable naming. (#375) +- Removed unnecessary type casting. (#375) +- Use named parameters. (#375) +- Updated release schedule. (#378) +- Correct http-stackdriver example module name. (#394) +- Removed the `http.request` span in `httptrace` package. (#397) +- Add comments in the metrics SDK (#399) +- Initialize checkpoint when creating ddsketch aggregator to prevent panic when merging into a empty one. (#402) (#403) +- Add documentation of compatible exporters in the README. (#405) +- Typo fix. (#408) +- Simplify span check logic in SDK tracer implementation. (#419) + +## [0.2.0] - 2019-12-03 + +### Added + +- Unary gRPC tracing example. (#351) +- Prometheus exporter. (#334) +- Dogstatsd metrics exporter. (#326) + +### Changed + +- Rename `MaxSumCount` aggregation to `MinMaxSumCount` and add the `Min` interface for this aggregation. (#352) +- Rename `GetMeter` to `Meter`. (#357) +- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) +- Rename `HTTPB3Propagator` to `B3Propagator`. (#355) +- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) +- Move `/global` package to `/api/global`. (#356) +- Rename `GetTracer` to `Tracer`. (#347) + +### Removed + +- `SetAttribute` from the `Span` interface in the trace API. (#361) +- `AddLink` from the `Span` interface in the trace API. (#349) +- `Link` from the `Span` interface in the trace API. (#349) + +### Fixed + +- Exclude example directories from coverage report. (#365) +- Lint make target now implements automatic fixes with `golangci-lint` before a second run to report the remaining issues. (#360) +- Drop `GO111MODULE` environment variable in Makefile as Go 1.13 is the project specified minimum version and this is environment variable is not needed for that version of Go. (#359) +- Run the race checker for all test. (#354) +- Redundant commands in the Makefile are removed. (#354) +- Split the `generate` and `lint` targets of the Makefile. (#354) +- Renames `circle-ci` target to more generic `ci` in Makefile. (#354) +- Add example Prometheus binary to gitignore. (#358) +- Support negative numbers with the `MaxSumCount`. (#335) +- Resolve race conditions in `push_test.go` identified in #339. (#340) +- Use `/usr/bin/env bash` as a shebang in scripts rather than `/bin/bash`. (#336) +- Trace benchmark now tests both `AlwaysSample` and `NeverSample`. + Previously it was testing `AlwaysSample` twice. (#325) +- Trace benchmark now uses a `[]byte` for `TraceID` to fix failing test. (#325) +- Added a trace benchmark to test variadic functions in `setAttribute` vs `setAttributes` (#325) +- The `defaultkeys` batcher was only using the encoded label set as its map key while building a checkpoint. + This allowed distinct label sets through, but any metrics sharing a label set could be overwritten or merged incorrectly. + This was corrected. (#333) + +## [0.1.2] - 2019-11-18 + +### Fixed + +- Optimized the `simplelru` map for attributes to reduce the number of allocations. (#328) +- Removed unnecessary unslicing of parameters that are already a slice. (#324) + +## [0.1.1] - 2019-11-18 + +This release contains a Metrics SDK with stdout exporter and supports basic aggregations such as counter, gauges, array, maxsumcount, and ddsketch. + +### Added + +- Metrics stdout export pipeline. (#265) +- Array aggregation for raw measure metrics. (#282) +- The core.Value now have a `MarshalJSON` method. (#281) + +### Removed + +- `WithService`, `WithResources`, and `WithComponent` methods of tracers. (#314) +- Prefix slash in `Tracer.Start()` for the Jaeger example. (#292) + +### Changed + +- Allocation in LabelSet construction to reduce GC overhead. (#318) +- `trace.WithAttributes` to append values instead of replacing (#315) +- Use a formula for tolerance in sampling tests. (#298) +- Move export types into trace and metric-specific sub-directories. (#289) +- `SpanKind` back to being based on an `int` type. (#288) + +### Fixed + +- URL to OpenTelemetry website in README. (#323) +- Name of othttp default tracer. (#321) +- `ExportSpans` for the stackdriver exporter now handles `nil` context. (#294) +- CI modules cache to correctly restore/save from/to the cache. (#316) +- Fix metric SDK race condition between `LoadOrStore` and the assignment `rec.recorder = i.meter.exporter.AggregatorFor(rec)`. (#293) +- README now reflects the new code structure introduced with these changes. (#291) +- Make the basic example work. (#279) + +## [0.1.0] - 2019-11-04 + +This is the first release of open-telemetry go library. +It contains api and sdk for trace and meter. + +### Added + +- Initial OpenTelemetry trace and metric API prototypes. +- Initial OpenTelemetry trace, metric, and export SDK packages. +- A wireframe bridge to support compatibility with OpenTracing. +- Example code for a basic, http-stackdriver, http, jaeger, and named tracer setup. +- Exporters for Jaeger, Stackdriver, and stdout. +- Propagators for binary, B3, and trace-context protocols. +- Project information and guidelines in the form of a README and CONTRIBUTING. +- Tools to build the project and a Makefile to automate the process. +- Apache-2.0 license. +- CircleCI build CI manifest files. +- CODEOWNERS file to track owners of this project. + +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.30.0...HEAD +[1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0 +[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0 +[1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0 +[1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0 +[1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0 +[1.25.0/0.47.0/0.0.8/0.1.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.25.0 +[1.24.0/0.46.0/0.0.1-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.24.0 +[1.23.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.1 +[1.23.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0 +[1.23.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0-rc.1 +[1.22.0/0.45.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.22.0 +[1.21.0/0.44.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.21.0 +[1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0 +[1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0 +[1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1 +[1.18.0/0.41.0/0.0.6]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.18.0 +[1.17.0/0.40.0/0.0.5]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.17.0 +[1.16.0/0.39.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0 +[1.16.0-rc.1/0.39.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0-rc.1 +[1.15.1/0.38.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.1 +[1.15.0/0.38.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0 +[1.15.0-rc.2/0.38.0-rc.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.2 +[1.15.0-rc.1/0.38.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.1 +[1.14.0/0.37.0/0.0.4]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.14.0 +[1.13.0/0.36.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.13.0 +[1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0 +[1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2 +[1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1 +[1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0 +[0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2 +[0.32.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.1 +[0.32.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.0 +[1.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.10.0 +[1.9.0/0.0.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.9.0 +[1.8.0/0.31.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.8.0 +[1.7.0/0.30.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.7.0 +[0.29.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.29.0 +[1.6.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.3 +[1.6.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.2 +[1.6.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.1 +[1.6.0/0.28.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.0 +[1.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.5.0 +[1.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.1 +[1.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.0 +[1.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.3.0 +[1.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.2.0 +[1.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.1.0 +[1.0.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.1 +[Metrics 0.24.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.24.0 +[1.0.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0 +[1.0.0-RC3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC3 +[1.0.0-RC2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC2 +[Experimental Metrics v0.22.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.22.0 +[1.0.0-RC1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC1 +[0.20.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.20.0 +[0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.19.0 +[0.18.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.18.0 +[0.17.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.17.0 +[0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.16.0 +[0.15.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.15.0 +[0.14.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.14.0 +[0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.13.0 +[0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.12.0 +[0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.11.0 +[0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.10.0 +[0.9.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.9.0 +[0.8.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.8.0 +[0.7.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.7.0 +[0.6.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.6.0 +[0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.5.0 +[0.4.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.3 +[0.4.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.2 +[0.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.1 +[0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.0 +[0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.3.0 +[0.2.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.3 +[0.2.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.2 +[0.2.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1.1 +[0.2.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1 +[0.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.0 +[0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2 +[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 +[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 + + + +[Go 1.23]: https://go.dev/doc/go1.23 +[Go 1.22]: https://go.dev/doc/go1.22 +[Go 1.21]: https://go.dev/doc/go1.21 +[Go 1.20]: https://go.dev/doc/go1.20 +[Go 1.19]: https://go.dev/doc/go1.19 +[Go 1.18]: https://go.dev/doc/go1.18 + +[metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric +[metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric +[trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace + +[GO-2024-2687]: https://pkg.go.dev/vuln/GO-2024-2687 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS new file mode 100644 index 000000000..5904bb707 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -0,0 +1,17 @@ +##################################################### +# +# List of approvers for this repository +# +##################################################### +# +# Learn about membership in OpenTelemetry community: +# https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md +# +# +# Learn about CODEOWNERS file format: +# https://help.github.com/en/articles/about-code-owners +# + +* @MrAlias @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu + +CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole @XSAM @dmathieu diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md new file mode 100644 index 000000000..915807253 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -0,0 +1,661 @@ +# Contributing to opentelemetry-go + +The Go special interest group (SIG) meets regularly. See the +OpenTelemetry +[community](https://github.com/open-telemetry/community#golang-sdk) +repo for information on this and other language SIGs. + +See the [public meeting +notes](https://docs.google.com/document/d/1E5e7Ld0NuU1iVvf-42tOBpu2VBBLYnh73GJuITGJTTU/edit) +for a summary description of past meetings. To request edit access, +join the meeting or get in touch on +[Slack](https://cloud-native.slack.com/archives/C01NPAXACKT). + +## Development + +You can view and edit the source code by cloning this repository: + +```sh +git clone https://github.com/open-telemetry/opentelemetry-go.git +``` + +Run `make test` to run the tests instead of `go test`. + +There are some generated files checked into the repo. To make sure +that the generated files are up-to-date, run `make` (or `make +precommit` - the `precommit` target is the default). + +The `precommit` target also fixes the formatting of the code and +checks the status of the go module files. + +Additionally, there is a `codespell` target that checks for common +typos in the code. It is not run by default, but you can run it +manually with `make codespell`. It will set up a virtual environment +in `venv` and install `codespell` there. + +If after running `make precommit` the output of `git status` contains +`nothing to commit, working tree clean` then it means that everything +is up-to-date and properly formatted. + +## Pull Requests + +### How to Send Pull Requests + +Everyone is welcome to contribute code to `opentelemetry-go` via +GitHub pull requests (PRs). + +To create a new PR, fork the project in GitHub and clone the upstream +repo: + +```sh +go get -d go.opentelemetry.io/otel +``` + +(This may print some warning about "build constraints exclude all Go +files", just ignore it.) + +This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You +can alternatively use `git` directly with: + +```sh +git clone https://github.com/open-telemetry/opentelemetry-go +``` + +(Note that `git clone` is *not* using the `go.opentelemetry.io/otel` name - +that name is a kind of a redirector to GitHub that `go get` can +understand, but `git` does not.) + +This would put the project in the `opentelemetry-go` directory in +current working directory. + +Enter the newly created directory and add your fork as a new remote: + +```sh +git remote add git@github.com:/opentelemetry-go +``` + +Check out a new branch, make modifications, run linters and tests, update +`CHANGELOG.md`, and push the branch to your fork: + +```sh +git checkout -b +# edit files +# update changelog +make precommit +git add -p +git commit +git push +``` + +Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull +request ID to the entry you added to `CHANGELOG.md`. + +Avoid rebasing and force-pushing to your branch to facilitate reviewing the pull request. +Rewriting Git history makes it difficult to keep track of iterations during code review. +All pull requests are squashed to a single commit upon merge to `main`. + +### How to Receive Comments + +* If the PR is not ready for review, please put `[WIP]` in the title, + tag it as `work-in-progress`, or mark it as + [`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/). +* Make sure CLA is signed and CI is clear. + +### How to Get PRs Merged + +A PR is considered **ready to merge** when: + +* It has received two qualified approvals[^1]. + + This is not enforced through automation, but needs to be validated by the + maintainer merging. + * The qualified approvals need to be from [Approver]s/[Maintainer]s + affiliated with different companies. Two qualified approvals from + [Approver]s or [Maintainer]s affiliated with the same company counts as a + single qualified approval. + * PRs introducing changes that have already been discussed and consensus + reached only need one qualified approval. The discussion and resolution + needs to be linked to the PR. + * Trivial changes[^2] only need one qualified approval. + +* All feedback has been addressed. + * All PR comments and suggestions are resolved. + * All GitHub Pull Request reviews with a status of "Request changes" have + been addressed. Another review by the objecting reviewer with a different + status can be submitted to clear the original review, or the review can be + dismissed by a [Maintainer] when the issues from the original review have + been addressed. + * Any comments or reviews that cannot be resolved between the PR author and + reviewers can be submitted to the community [Approver]s and [Maintainer]s + during the weekly SIG meeting. If consensus is reached among the + [Approver]s and [Maintainer]s during the SIG meeting the objections to the + PR may be dismissed or resolved or the PR closed by a [Maintainer]. + * Any substantive changes to the PR require existing Approval reviews be + cleared unless the approver explicitly states that their approval persists + across changes. This includes changes resulting from other feedback. + [Approver]s and [Maintainer]s can help in clearing reviews and they should + be consulted if there are any questions. + +* The PR branch is up to date with the base branch it is merging into. + * To ensure this does not block the PR, it should be configured to allow + maintainers to update it. + +* It has been open for review for at least one working day. This gives people + reasonable time to review. + * Trivial changes[^2] do not have to wait for one day and may be merged with + a single [Maintainer]'s approval. + +* All required GitHub workflows have succeeded. +* Urgent fix can take exception as long as it has been actively communicated + among [Maintainer]s. + +Any [Maintainer] can merge the PR once the above criteria have been met. + +[^1]: A qualified approval is a GitHub Pull Request review with "Approve" + status from an OpenTelemetry Go [Approver] or [Maintainer]. +[^2]: Trivial changes include: typo corrections, cosmetic non-substantive + changes, documentation corrections or updates, dependency updates, etc. + +## Design Choices + +As with other OpenTelemetry clients, opentelemetry-go follows the +[OpenTelemetry Specification](https://opentelemetry.io/docs/specs/otel). + +It's especially valuable to read through the [library +guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines). + +### Focus on Capabilities, Not Structure Compliance + +OpenTelemetry is an evolving specification, one where the desires and +use cases are clear, but the method to satisfy those uses cases are +not. + +As such, Contributions should provide functionality and behavior that +conforms to the specification, but the interface and structure is +flexible. + +It is preferable to have contributions follow the idioms of the +language rather than conform to specific API names or argument +patterns in the spec. + +For a deeper discussion, see +[this](https://github.com/open-telemetry/opentelemetry-specification/issues/165). + +## Documentation + +Each (non-internal, non-test) package must be documented using +[Go Doc Comments](https://go.dev/doc/comment), +preferably in a `doc.go` file. + +Prefer using [Examples](https://pkg.go.dev/testing#hdr-Examples) +instead of putting code snippets in Go doc comments. +In some cases, you can even create [Testable Examples](https://go.dev/blog/examples). + +You can install and run a "local Go Doc site" in the following way: + + ```sh + go install golang.org/x/pkgsite/cmd/pkgsite@latest + pkgsite + ``` + +[`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric) +is an example of a very well-documented package. + +### README files + +Each (non-internal, non-test, non-documentation) package must contain a +`README.md` file containing at least a title, and a `pkg.go.dev` badge. + +The README should not be a repetition of Go doc comments. + +You can verify the presence of all README files with the `make verify-readmes` +command. + +## Style Guide + +One of the primary goals of this project is that it is actually used by +developers. With this goal in mind the project strives to build +user-friendly and idiomatic Go code adhering to the Go community's best +practices. + +For a non-comprehensive but foundational overview of these best practices +the [Effective Go](https://golang.org/doc/effective_go.html) documentation +is an excellent starting place. + +As a convenience for developers building this project the `make precommit` +will format, lint, validate, and in some cases fix the changes you plan to +submit. This check will need to pass for your changes to be able to be +merged. + +In addition to idiomatic Go, the project has adopted certain standards for +implementations of common patterns. These standards should be followed as a +default, and if they are not followed documentation needs to be included as +to the reasons why. + +### Configuration + +When creating an instantiation function for a complex `type T struct`, it is +useful to allow variable number of options to be applied. However, the strong +type system of Go restricts the function design options. There are a few ways +to solve this problem, but we have landed on the following design. + +#### `config` + +Configuration should be held in a `struct` named `config`, or prefixed with +specific type name this Configuration applies to if there are multiple +`config` in the package. This type must contain configuration options. + +```go +// config contains configuration options for a thing. +type config struct { + // options ... +} +``` + +In general the `config` type will not need to be used externally to the +package and should be unexported. If, however, it is expected that the user +will likely want to build custom options for the configuration, the `config` +should be exported. Please, include in the documentation for the `config` +how the user can extend the configuration. + +It is important that internal `config` are not shared across package boundaries. +Meaning a `config` from one package should not be directly used by another. The +one exception is the API packages. The configs from the base API, eg. +`go.opentelemetry.io/otel/trace.TracerConfig` and +`go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed +by the SDK therefore it is expected that these are exported. + +When a config is exported we want to maintain forward and backward +compatibility, to achieve this no fields should be exported but should +instead be accessed by methods. + +Optionally, it is common to include a `newConfig` function (with the same +naming scheme). This function wraps any defaults setting and looping over +all options to create a configured `config`. + +```go +// newConfig returns an appropriately configured config. +func newConfig(options ...Option) config { + // Set default values for config. + config := config{/* […] */} + for _, option := range options { + config = option.apply(config) + } + // Perform any validation here. + return config +} +``` + +If validation of the `config` options is also performed this can return an +error as well that is expected to be handled by the instantiation function +or propagated to the user. + +Given the design goal of not having the user need to work with the `config`, +the `newConfig` function should also be unexported. + +#### `Option` + +To set the value of the options a `config` contains, a corresponding +`Option` interface type should be used. + +```go +type Option interface { + apply(config) config +} +``` + +Having `apply` unexported makes sure that it will not be used externally. +Moreover, the interface becomes sealed so the user cannot easily implement +the interface on its own. + +The `apply` method should return a modified version of the passed config. +This approach, instead of passing a pointer, is used to prevent the config from being allocated to the heap. + +The name of the interface should be prefixed in the same way the +corresponding `config` is (if at all). + +#### Options + +All user configurable options for a `config` must have a related unexported +implementation of the `Option` interface and an exported configuration +function that wraps this implementation. + +The wrapping function name should be prefixed with `With*` (or in the +special case of a boolean options `Without*`) and should have the following +function signature. + +```go +func With*(…) Option { … } +``` + +##### `bool` Options + +```go +type defaultFalseOption bool + +func (o defaultFalseOption) apply(c config) config { + c.Bool = bool(o) + return c +} + +// WithOption sets a T to have an option included. +func WithOption() Option { + return defaultFalseOption(true) +} +``` + +```go +type defaultTrueOption bool + +func (o defaultTrueOption) apply(c config) config { + c.Bool = bool(o) + return c +} + +// WithoutOption sets a T to have Bool option excluded. +func WithoutOption() Option { + return defaultTrueOption(false) +} +``` + +##### Declared Type Options + +```go +type myTypeOption struct { + MyType MyType +} + +func (o myTypeOption) apply(c config) config { + c.MyType = o.MyType + return c +} + +// WithMyType sets T to have include MyType. +func WithMyType(t MyType) Option { + return myTypeOption{t} +} +``` + +##### Functional Options + +```go +type optionFunc func(config) config + +func (fn optionFunc) apply(c config) config { + return fn(c) +} + +// WithMyType sets t as MyType. +func WithMyType(t MyType) Option { + return optionFunc(func(c config) config { + c.MyType = t + return c + }) +} +``` + +#### Instantiation + +Using this configuration pattern to configure instantiation with a `NewT` +function. + +```go +func NewT(options ...Option) T {…} +``` + +Any required parameters can be declared before the variadic `options`. + +#### Dealing with Overlap + +Sometimes there are multiple complex `struct` that share common +configuration and also have distinct configuration. To avoid repeated +portions of `config`s, a common `config` can be used with the union of +options being handled with the `Option` interface. + +For example. + +```go +// config holds options for all animals. +type config struct { + Weight float64 + Color string + MaxAltitude float64 +} + +// DogOption apply Dog specific options. +type DogOption interface { + applyDog(config) config +} + +// BirdOption apply Bird specific options. +type BirdOption interface { + applyBird(config) config +} + +// Option apply options for all animals. +type Option interface { + BirdOption + DogOption +} + +type weightOption float64 + +func (o weightOption) applyDog(c config) config { + c.Weight = float64(o) + return c +} + +func (o weightOption) applyBird(c config) config { + c.Weight = float64(o) + return c +} + +func WithWeight(w float64) Option { return weightOption(w) } + +type furColorOption string + +func (o furColorOption) applyDog(c config) config { + c.Color = string(o) + return c +} + +func WithFurColor(c string) DogOption { return furColorOption(c) } + +type maxAltitudeOption float64 + +func (o maxAltitudeOption) applyBird(c config) config { + c.MaxAltitude = float64(o) + return c +} + +func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) } + +func NewDog(name string, o ...DogOption) Dog {…} +func NewBird(name string, o ...BirdOption) Bird {…} +``` + +### Interfaces + +To allow other developers to better comprehend the code, it is important +to ensure it is sufficiently documented. One simple measure that contributes +to this aim is self-documenting by naming method parameters. Therefore, +where appropriate, methods of every exported interface type should have +their parameters appropriately named. + +#### Interface Stability + +All exported stable interfaces that include the following warning in their +documentation are allowed to be extended with additional methods. + +> Warning: methods may be added to this interface in minor releases. + +These interfaces are defined by the OpenTelemetry specification and will be +updated as the specification evolves. + +Otherwise, stable interfaces MUST NOT be modified. + +#### How to Change Specification Interfaces + +When an API change must be made, we will update the SDK with the new method one +release before the API change. This will allow the SDK one version before the +API change to work seamlessly with the new API. + +If an incompatible version of the SDK is used with the new API the application +will fail to compile. + +#### How Not to Change Specification Interfaces + +We have explored using a v2 of the API to change interfaces and found that there +was no way to introduce a v2 and have it work seamlessly with the v1 of the API. +Problems happened with libraries that upgraded to v2 when an application did not, +and would not produce any telemetry. + +More detail of the approaches considered and their limitations can be found in +the [Use a V2 API to evolve interfaces](https://github.com/open-telemetry/opentelemetry-go/issues/3920) +issue. + +#### How to Change Other Interfaces + +If new functionality is needed for an interface that cannot be changed it MUST +be added by including an additional interface. That added interface can be a +simple interface for the specific functionality that you want to add or it can +be a super-set of the original interface. For example, if you wanted to a +`Close` method to the `Exporter` interface: + +```go +type Exporter interface { + Export() +} +``` + +A new interface, `Closer`, can be added: + +```go +type Closer interface { + Close() +} +``` + +Code that is passed the `Exporter` interface can now check to see if the passed +value also satisfies the new interface. E.g. + +```go +func caller(e Exporter) { + /* ... */ + if c, ok := e.(Closer); ok { + c.Close() + } + /* ... */ +} +``` + +Alternatively, a new type that is the super-set of an `Exporter` can be created. + +```go +type ClosingExporter struct { + Exporter + Close() +} +``` + +This new type can be used similar to the simple interface above in that a +passed `Exporter` type can be asserted to satisfy the `ClosingExporter` type +and the `Close` method called. + +This super-set approach can be useful if there is explicit behavior that needs +to be coupled with the original type and passed as a unified type to a new +function, but, because of this coupling, it also limits the applicability of +the added functionality. If there exist other interfaces where this +functionality should be added, each one will need their own super-set +interfaces and will duplicate the pattern. For this reason, the simple targeted +interface that defines the specific functionality should be preferred. + +See also: +[Keeping Your Modules Compatible: Working with interfaces](https://go.dev/blog/module-compatibility#working-with-interfaces). + +### Testing + +The tests should never leak goroutines. + +Use the term `ConcurrentSafe` in the test name when it aims to verify the +absence of race conditions. The top-level tests with this term will be run +many times in the `test-concurrent-safe` CI job to increase the chance of +catching concurrency issues. This does not apply to subtests when this term +is not in their root name. + +### Internal packages + +The use of internal packages should be scoped to a single module. A sub-module +should never import from a parent internal package. This creates a coupling +between the two modules where a user can upgrade the parent without the child +and if the internal package API has changed it will fail to upgrade[^3]. + +There are two known exceptions to this rule: + +- `go.opentelemetry.io/otel/internal/global` + - This package manages global state for all of opentelemetry-go. It needs to + be a single package in order to ensure the uniqueness of the global state. +- `go.opentelemetry.io/otel/internal/baggage` + - This package provides values in a `context.Context` that need to be + recognized by `go.opentelemetry.io/otel/baggage` and + `go.opentelemetry.io/otel/bridge/opentracing` but remain private. + +If you have duplicate code in multiple modules, make that code into a Go +template stored in `go.opentelemetry.io/otel/internal/shared` and use [gotmpl] +to render the templates in the desired locations. See [#4404] for an example of +this. + +[^3]: https://github.com/open-telemetry/opentelemetry-go/issues/3548 + +### Ignoring context cancellation + +OpenTelemetry API implementations need to ignore the cancellation of the context that are +passed when recording a value (e.g. starting a span, recording a measurement, emitting a log). +Recording methods should not return an error describing the cancellation state of the context +when they complete, nor should they abort any work. + +This rule may not apply if the OpenTelemetry specification defines a timeout mechanism for +the method. In that case the context cancellation can be used for the timeout with the +restriction that this behavior is documented for the method. Otherwise, timeouts +are expected to be handled by the user calling the API, not the implementation. + +Stoppage of the telemetry pipeline is handled by calling the appropriate `Shutdown` method +of a provider. It is assumed the context passed from a user is not used for this purpose. + +Outside of the direct recording of telemetry from the API (e.g. exporting telemetry, +force flushing telemetry, shutting down a signal provider) the context cancellation +should be honored. This means all work done on behalf of the user provided context +should be canceled. + +## Approvers and Maintainers + +### Approvers + +- [Chester Cheung](https://github.com/hanyuancheung), Tencent + +### Maintainers + +- [Aaron Clawson](https://github.com/MadVikingGod), LightStep +- [Damien Mathieu](https://github.com/dmathieu), Elastic +- [David Ashpole](https://github.com/dashpole), Google +- [Robert Pająk](https://github.com/pellared), Splunk +- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics +- [Tyler Yahn](https://github.com/MrAlias), Splunk + +### Emeritus + +- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb +- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep +- [Josh MacDonald](https://github.com/jmacd), LightStep +- [Anthony Mirabella](https://github.com/Aneurysm9), AWS +- [Evan Torrie](https://github.com/evantorrie), Yahoo + +### Become an Approver or a Maintainer + +See the [community membership document in OpenTelemetry community +repo](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md). + +[Approver]: #approvers +[Maintainer]: #maintainers +[gotmpl]: https://pkg.go.dev/go.opentelemetry.io/build-tools/gotmpl +[#4404]: https://github.com/open-telemetry/opentelemetry-go/pull/4404 diff --git a/vendor/go.opentelemetry.io/otel/LICENSE b/vendor/go.opentelemetry.io/otel/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile new file mode 100644 index 000000000..b04695b24 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -0,0 +1,300 @@ +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +TOOLS_MOD_DIR := ./internal/tools + +ALL_DOCS := $(shell find . -name '*.md' -type f | sort) +ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort) +OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS)) +ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | grep -E -v '^./example|^$(TOOLS_MOD_DIR)' | sort) + +GO = go +TIMEOUT = 60 + +.DEFAULT_GOAL := precommit + +.PHONY: precommit ci +precommit: generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default +ci: generate license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage + +# Tools + +TOOLS = $(CURDIR)/.tools + +$(TOOLS): + @mkdir -p $@ +$(TOOLS)/%: $(TOOLS_MOD_DIR)/go.mod | $(TOOLS) + cd $(TOOLS_MOD_DIR) && \ + $(GO) build -o $@ $(PACKAGE) + +MULTIMOD = $(TOOLS)/multimod +$(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod + +SEMCONVGEN = $(TOOLS)/semconvgen +$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen + +CROSSLINK = $(TOOLS)/crosslink +$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink + +SEMCONVKIT = $(TOOLS)/semconvkit +$(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit + +GOLANGCI_LINT = $(TOOLS)/golangci-lint +$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint + +MISSPELL = $(TOOLS)/misspell +$(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell + +GOCOVMERGE = $(TOOLS)/gocovmerge +$(TOOLS)/gocovmerge: PACKAGE=github.com/wadey/gocovmerge + +STRINGER = $(TOOLS)/stringer +$(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer + +PORTO = $(TOOLS)/porto +$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto + +GOJQ = $(TOOLS)/gojq +$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq + +GOTMPL = $(TOOLS)/gotmpl +$(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl + +GORELEASE = $(TOOLS)/gorelease +$(GORELEASE): PACKAGE=golang.org/x/exp/cmd/gorelease + +GOVULNCHECK = $(TOOLS)/govulncheck +$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck + +.PHONY: tools +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) + +# Virtualized python tools via docker + +# The directory where the virtual environment is created. +VENVDIR := venv + +# The directory where the python tools are installed. +PYTOOLS := $(VENVDIR)/bin + +# The pip executable in the virtual environment. +PIP := $(PYTOOLS)/pip + +# The directory in the docker image where the current directory is mounted. +WORKDIR := /workdir + +# The python image to use for the virtual environment. +PYTHONIMAGE := python:3.11.3-slim-bullseye + +# Run the python image with the current directory mounted. +DOCKERPY := docker run --rm -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE) + +# Create a virtual environment for Python tools. +$(PYTOOLS): +# The `--upgrade` flag is needed to ensure that the virtual environment is +# created with the latest pip version. + @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip" + +# Install python packages into the virtual environment. +$(PYTOOLS)/%: $(PYTOOLS) + @$(DOCKERPY) $(PIP) install -r requirements.txt + +CODESPELL = $(PYTOOLS)/codespell +$(CODESPELL): PACKAGE=codespell + +# Generate + +.PHONY: generate +generate: go-generate vanity-import-fix + +.PHONY: go-generate +go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%) +go-generate/%: DIR=$* +go-generate/%: $(STRINGER) $(GOTMPL) + @echo "$(GO) generate $(DIR)/..." \ + && cd $(DIR) \ + && PATH="$(TOOLS):$${PATH}" $(GO) generate ./... + +.PHONY: vanity-import-fix +vanity-import-fix: $(PORTO) + @$(PORTO) --include-internal -w . + +# Generate go.work file for local development. +.PHONY: go-work +go-work: $(CROSSLINK) + $(CROSSLINK) work --root=$(shell pwd) + +# Build + +.PHONY: build + +build: $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%) +build/%: DIR=$* +build/%: + @echo "$(GO) build $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) build ./... + +build-tests/%: DIR=$* +build-tests/%: + @echo "$(GO) build tests $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null + +# Tests + +TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe +.PHONY: $(TEST_TARGETS) test +test-default test-race: ARGS=-race +test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. +test-short: ARGS=-short +test-verbose: ARGS=-v -race +test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race +test-concurrent-safe: TIMEOUT=120 +$(TEST_TARGETS): test +test: $(OTEL_GO_MOD_DIRS:%=test/%) +test/%: DIR=$* +test/%: + @echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS) + +COVERAGE_MODE = atomic +COVERAGE_PROFILE = coverage.out +.PHONY: test-coverage +test-coverage: $(GOCOVMERGE) + @set -e; \ + printf "" > coverage.txt; \ + for dir in $(ALL_COVERAGE_MOD_DIRS); do \ + echo "$(GO) test -coverpkg=go.opentelemetry.io/otel/... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" $${dir}/..."; \ + (cd "$${dir}" && \ + $(GO) list ./... \ + | grep -v third_party \ + | grep -v 'semconv/v.*' \ + | xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \ + $(GO) tool cover -html=coverage.out -o coverage.html); \ + done; \ + $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt + +.PHONY: benchmark +benchmark: $(OTEL_GO_MOD_DIRS:%=benchmark/%) +benchmark/%: + @echo "$(GO) test -run=xxxxxMatchNothingxxxxx -bench=. $*..." \ + && cd $* \ + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -run=xxxxxMatchNothingxxxxx -bench=. + +.PHONY: golangci-lint golangci-lint-fix +golangci-lint-fix: ARGS=--fix +golangci-lint-fix: golangci-lint +golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%) +golangci-lint/%: DIR=$* +golangci-lint/%: $(GOLANGCI_LINT) + @echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \ + && cd $(DIR) \ + && $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS) + +.PHONY: crosslink +crosslink: $(CROSSLINK) + @echo "Updating intra-repository dependencies in all go modules" \ + && $(CROSSLINK) --root=$(shell pwd) --prune + +.PHONY: go-mod-tidy +go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%) +go-mod-tidy/%: DIR=$* +go-mod-tidy/%: crosslink + @echo "$(GO) mod tidy in $(DIR)" \ + && cd $(DIR) \ + && $(GO) mod tidy -compat=1.21 + +.PHONY: lint-modules +lint-modules: go-mod-tidy + +.PHONY: lint +lint: misspell lint-modules golangci-lint govulncheck + +.PHONY: vanity-import-check +vanity-import-check: $(PORTO) + @$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 ) + +.PHONY: misspell +misspell: $(MISSPELL) + @$(MISSPELL) -w $(ALL_DOCS) + +.PHONY: govulncheck +govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%) +govulncheck/%: DIR=$* +govulncheck/%: $(GOVULNCHECK) + @echo "govulncheck ./... in $(DIR)" \ + && cd $(DIR) \ + && $(GOVULNCHECK) ./... + +.PHONY: codespell +codespell: $(CODESPELL) + @$(DOCKERPY) $(CODESPELL) + +.PHONY: license-check +license-check: + @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ + awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=4 { found=1; next } END { if (!found) print FILENAME }' $$f; \ + done); \ + if [ -n "$${licRes}" ]; then \ + echo "license header checking failed:"; echo "$${licRes}"; \ + exit 1; \ + fi + +.PHONY: check-clean-work-tree +check-clean-work-tree: + @if ! git diff --quiet; then \ + echo; \ + echo 'Working tree is not clean, did you forget to run "make precommit"?'; \ + echo; \ + git status; \ + exit 1; \ + fi + +SEMCONVPKG ?= "semconv/" +.PHONY: semconv-generate +semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT) + [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) + [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)" + $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" + +.PHONY: gorelease +gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%) +gorelease/%: DIR=$* +gorelease/%:| $(GORELEASE) + @echo "gorelease in $(DIR):" \ + && cd $(DIR) \ + && $(GORELEASE) \ + || echo "" + +.PHONY: verify-mods +verify-mods: $(MULTIMOD) + $(MULTIMOD) verify + +.PHONY: prerelease +prerelease: verify-mods + @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) + $(MULTIMOD) prerelease -m ${MODSET} + +COMMIT ?= "HEAD" +.PHONY: add-tags +add-tags: verify-mods + @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) + $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} + +.PHONY: lint-markdown +lint-markdown: + docker run -v "$(CURDIR):$(WORKDIR)" avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md + +.PHONY: verify-readmes +verify-readmes: + ./verify_readmes.sh diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md new file mode 100644 index 000000000..9a6570703 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -0,0 +1,111 @@ +# OpenTelemetry-Go + +[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain) +[![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main) +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel) +[![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) +[![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT) + +OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/). +It provides a set of APIs to directly measure performance and behavior of your software and send this data to observability platforms. + +## Project Status + +| Signal | Status | +|---------|--------------------| +| Traces | Stable | +| Metrics | Stable | +| Logs | Beta[^1] | + +Progress and status specific to this repository is tracked in our +[project boards](https://github.com/open-telemetry/opentelemetry-go/projects) +and +[milestones](https://github.com/open-telemetry/opentelemetry-go/milestones). + +Project versioning information and stability guarantees can be found in the +[versioning documentation](VERSIONING.md). + +[^1]: https://github.com/orgs/open-telemetry/projects/43 + +### Compatibility + +OpenTelemetry-Go ensures compatibility with the current supported versions of +the [Go language](https://golang.org/doc/devel/release#policy): + +> Each major Go release is supported until there are two newer major releases. +> For example, Go 1.5 was supported until the Go 1.7 release, and Go 1.6 was supported until the Go 1.8 release. + +For versions of Go that are no longer supported upstream, opentelemetry-go will +stop ensuring compatibility with these versions in the following manner: + +- A minor release of opentelemetry-go will be made to add support for the new + supported release of Go. +- The following minor release of opentelemetry-go will remove compatibility + testing for the oldest (now archived upstream) version of Go. This, and + future, releases of opentelemetry-go may include features only supported by + the currently supported versions of Go. + +Currently, this project supports the following environments. + +| OS | Go Version | Architecture | +|----------|------------|--------------| +| Ubuntu | 1.23 | amd64 | +| Ubuntu | 1.22 | amd64 | +| Ubuntu | 1.23 | 386 | +| Ubuntu | 1.22 | 386 | +| Linux | 1.23 | arm64 | +| Linux | 1.22 | arm64 | +| macOS 13 | 1.23 | amd64 | +| macOS 13 | 1.22 | amd64 | +| macOS | 1.23 | arm64 | +| macOS | 1.22 | arm64 | +| Windows | 1.23 | amd64 | +| Windows | 1.22 | amd64 | +| Windows | 1.23 | 386 | +| Windows | 1.22 | 386 | + +While this project should work for other systems, no compatibility guarantees +are made for those systems currently. + +## Getting Started + +You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/languages/go/getting-started/). + +OpenTelemetry's goal is to provide a single set of APIs to capture distributed +traces and metrics from your application and send them to an observability +platform. This project allows you to do just that for applications written in +Go. There are two steps to this process: instrument your application, and +configure an exporter. + +### Instrumentation + +To start capturing distributed traces and metric events from your application +it first needs to be instrumented. The easiest way to do this is by using an +instrumentation library for your code. Be sure to check out [the officially +supported instrumentation +libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation). + +If you need to extend the telemetry an instrumentation library provides or want +to build your own instrumentation for your application directly you will need +to use the +[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel) +package. The included [examples](./example/) are a good way to see some +practical uses of this process. + +### Export + +Now that your application is instrumented to collect telemetry, it needs an +export pipeline to send that telemetry to an observability platform. + +All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters). + +| Exporter | Logs | Metrics | Traces | +|---------------------------------------|:----:|:-------:|:------:| +| [OTLP](./exporters/otlp/) | ✓ | ✓ | ✓ | +| [Prometheus](./exporters/prometheus/) | | ✓ | | +| [stdout](./exporters/stdout/) | ✓ | ✓ | ✓ | +| [Zipkin](./exporters/zipkin/) | | | ✓ | + +## Contributing + +See the [contributing documentation](CONTRIBUTING.md). diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md new file mode 100644 index 000000000..59992984d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -0,0 +1,146 @@ +# Release Process + +## Semantic Convention Generation + +New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated. +The `semconv-generate` make target is used for this. + +1. Checkout a local copy of the [OpenTelemetry Semantic Conventions] to the desired release tag. +2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest` +3. Run the `make semconv-generate ...` target from this repository. + +For example, + +```sh +export TAG="v1.21.0" # Change to the release version you are generating. +export OTEL_SEMCONV_REPO="/absolute/path/to/opentelemetry/semantic-conventions" +docker pull otel/semconvgen:latest +make semconv-generate # Uses the exported TAG and OTEL_SEMCONV_REPO. +``` + +This should create a new sub-package of [`semconv`](./semconv). +Ensure things look correct before submitting a pull request to include the addition. + +## Breaking changes validation + +You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes done in the public API. + +You can check/report problems with `gorelease` [here](https://golang.org/issues/26420). + +## Verify changes for contrib repository + +If the changes in the main repository are going to affect the contrib repository, it is important to verify that the changes are compatible with the contrib repository. + +Follow [the steps](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md#verify-otel-changes) in the contrib repository to verify OTel changes. + +## Pre-Release + +First, decide which module sets will be released and update their versions +in `versions.yaml`. Commit this change to a new branch. + +Update go.mod for submodules to depend on the new release which will happen in the next step. + +1. Run the `prerelease` make target. It creates a branch + `prerelease__` that will contain all release changes. + + ``` + make prerelease MODSET= + ``` + +2. Verify the changes. + + ``` + git diff ...prerelease__ + ``` + + This should have changed the version for all modules to be ``. + If these changes look correct, merge them into your pre-release branch: + + ```go + git merge prerelease__ + ``` + +3. Update the [Changelog](./CHANGELOG.md). + - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand. + To verify this, you can look directly at the commits since the ``. + + ``` + git --no-pager log --pretty=oneline "..HEAD" + ``` + + - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `). + - Make sure the new section is under the comment for released section, like ``, so it is protected from being overwritten in the future. + - Update all the appropriate links at the bottom. + +4. Push the changes to upstream and create a Pull Request on GitHub. + Be sure to include the curated changes from the [Changelog](./CHANGELOG.md) in the description. + +## Tag + +Once the Pull Request with all the version changes has been approved and merged it is time to tag the merged commit. + +***IMPORTANT***: It is critical you use the same tag that you used in the Pre-Release step! +Failure to do so will leave things in a broken state. As long as you do not +change `versions.yaml` between pre-release and this step, things should be fine. + +***IMPORTANT***: [There is currently no way to remove an incorrectly tagged version of a Go module](https://github.com/golang/go/issues/34189). +It is critical you make sure the version you push upstream is correct. +[Failure to do so will lead to minor emergencies and tough to work around](https://github.com/open-telemetry/opentelemetry-go/issues/331). + +1. For each module set that will be released, run the `add-tags` make target + using the `` of the commit on the main branch for the merged Pull Request. + + ``` + make add-tags MODSET= COMMIT= + ``` + + It should only be necessary to provide an explicit `COMMIT` value if the + current `HEAD` of your working directory is not the correct commit. + +2. Push tags to the upstream remote (not your fork: `github.com/open-telemetry/opentelemetry-go.git`). + Make sure you push all sub-modules as well. + + ``` + git push upstream + git push upstream + ... + ``` + +## Release + +Finally create a Release for the new `` on GitHub. +The release body should include all the release notes from the Changelog for this release. + +## Verify Examples + +After releasing verify that examples build outside of the repository. + +``` +./verify_examples.sh +``` + +The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them. +This ensures they build with the published release, not the local copy. + +## Post-Release + +### Contrib Repository + +Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md) that uses this release. + +### Website Documentation + +Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/languages/go]. +Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate. + +[OpenTelemetry Semantic Conventions]: https://github.com/open-telemetry/semantic-conventions +[Go instrumentation documentation]: https://opentelemetry.io/docs/languages/go/ +[content/en/docs/languages/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/languages/go + +### Demo Repository + +Bump the dependencies in the following Go services: + +- [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice) +- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice) +- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice) diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md new file mode 100644 index 000000000..412f1e362 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -0,0 +1,224 @@ +# Versioning + +This document describes the versioning policy for this repository. This policy +is designed so the following goals can be achieved. + +**Users are provided a codebase of value that is stable and secure.** + +## Policy + +* Versioning of this project will be idiomatic of a Go project using [Go + modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import + versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) + will be used. + * Versions will comply with [semver + 2.0](https://semver.org/spec/v2.0.0.html) with the following exceptions. + * New methods may be added to exported API interfaces. All exported + interfaces that fall within this exception will include the following + paragraph in their public documentation. + + > Warning: methods may be added to this interface in minor releases. + + * If a module is version `v2` or higher, the major version of the module + must be included as a `/vN` at the end of the module paths used in + `go.mod` files (e.g., `module go.opentelemetry.io/otel/v2`, `require + go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path + (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the + paths used in `go get` commands (e.g., `go get + go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a + `@v2.0.1` in that example. One way to think about it is that the module + name now includes the `/v2`, so include `/v2` whenever you are using the + module name). + * If a module is version `v0` or `v1`, do not include the major version in + either the module path or the import path. + * Modules will be used to encapsulate signals and components. + * Experimental modules still under active development will be versioned at + `v0` to imply the stability guarantee defined by + [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). + + > Major version zero (0.y.z) is for initial development. Anything MAY + > change at any time. The public API SHOULD NOT be considered stable. + + * Mature modules for which we guarantee a stable public API will be versioned + with a major version greater than `v0`. + * The decision to make a module stable will be made on a case-by-case + basis by the maintainers of this project. + * Experimental modules will start their versioning at `v0.0.0` and will + increment their minor version when backwards incompatible changes are + released and increment their patch version when backwards compatible + changes are released. + * All stable modules that use the same major version number will use the + same entire version number. + * Stable modules may be released with an incremented minor or patch + version even though that module has not been changed, but rather so + that it will remain at the same version as other stable modules that + did undergo change. + * When an experimental module becomes stable a new stable module version + will be released and will include this now stable module. The new + stable module version will be an increment of the minor version number + and will be applied to all existing stable modules as well as the newly + stable module being released. +* Versioning of the associated [contrib + repository](https://github.com/open-telemetry/opentelemetry-go-contrib) of + this project will be idiomatic of a Go project using [Go + modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import + versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) + will be used. + * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). + * If a module is version `v2` or higher, the + major version of the module must be included as a `/vN` at the end of the + module paths used in `go.mod` files (e.g., `module + go.opentelemetry.io/contrib/instrumentation/host/v2`, `require + go.opentelemetry.io/contrib/instrumentation/host/v2 v2.0.1`) and in the + package import path (e.g., `import + "go.opentelemetry.io/contrib/instrumentation/host/v2"`). This includes + the paths used in `go get` commands (e.g., `go get + go.opentelemetry.io/contrib/instrumentation/host/v2@v2.0.1`. Note there + is both a `/v2` and a `@v2.0.1` in that example. One way to think about + it is that the module name now includes the `/v2`, so include `/v2` + whenever you are using the module name). + * If a module is version `v0` or `v1`, do not include the major version + in either the module path or the import path. + * In addition to public APIs, telemetry produced by stable instrumentation + will remain stable and backwards compatible. This is to avoid breaking + alerts and dashboard. + * Modules will be used to encapsulate instrumentation, detectors, exporters, + propagators, and any other independent sets of related components. + * Experimental modules still under active development will be versioned at + `v0` to imply the stability guarantee defined by + [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). + + > Major version zero (0.y.z) is for initial development. Anything MAY + > change at any time. The public API SHOULD NOT be considered stable. + + * Mature modules for which we guarantee a stable public API and telemetry will + be versioned with a major version greater than `v0`. + * Experimental modules will start their versioning at `v0.0.0` and will + increment their minor version when backwards incompatible changes are + released and increment their patch version when backwards compatible + changes are released. + * Stable contrib modules cannot depend on experimental modules from this + project. + * All stable contrib modules of the same major version with this project + will use the same entire version as this project. + * Stable modules may be released with an incremented minor or patch + version even though that module's code has not been changed. Instead + the only change that will have been included is to have updated that + modules dependency on this project's stable APIs. + * When an experimental module in contrib becomes stable a new stable + module version will be released and will include this now stable + module. The new stable module version will be an increment of the minor + version number and will be applied to all existing stable contrib + modules, this project's modules, and the newly stable module being + released. + * Contrib modules will be kept up to date with this project's releases. + * Due to the dependency contrib modules will implicitly have on this + project's modules the release of stable contrib modules to match the + released version number will be staggered after this project's release. + There is no explicit time guarantee for how long after this projects + release the contrib release will be. Effort should be made to keep them + as close in time as possible. + * No additional stable release in this project can be made until the + contrib repository has a matching stable release. + * No release can be made in the contrib repository after this project's + stable release except for a stable release of the contrib repository. +* GitHub releases will be made for all releases. +* Go modules will be made available at Go package mirrors. + +## Example Versioning Lifecycle + +To better understand the implementation of the above policy the following +example is provided. This project is simplified to include only the following +modules and their versions: + +* `otel`: `v0.14.0` +* `otel/trace`: `v0.14.0` +* `otel/metric`: `v0.14.0` +* `otel/baggage`: `v0.14.0` +* `otel/sdk/trace`: `v0.14.0` +* `otel/sdk/metric`: `v0.14.0` + +These modules have been developed to a point where the `otel/trace`, +`otel/baggage`, and `otel/sdk/trace` modules have reached a point that they +should be considered for a stable release. The `otel/metric` and +`otel/sdk/metric` are still under active development and the `otel` module +depends on both `otel/trace` and `otel/metric`. + +The `otel` package is refactored to remove its dependencies on `otel/metric` so +it can be released as stable as well. With that done the following release +candidates are made: + +* `otel`: `v1.0.0-RC1` +* `otel/trace`: `v1.0.0-RC1` +* `otel/baggage`: `v1.0.0-RC1` +* `otel/sdk/trace`: `v1.0.0-RC1` + +The `otel/metric` and `otel/sdk/metric` modules remain at `v0.14.0`. + +A few minor issues are discovered in the `otel/trace` package. These issues are +resolved with some minor, but backwards incompatible, changes and are released +as a second release candidate: + +* `otel`: `v1.0.0-RC2` +* `otel/trace`: `v1.0.0-RC2` +* `otel/baggage`: `v1.0.0-RC2` +* `otel/sdk/trace`: `v1.0.0-RC2` + +Notice that all module version numbers are incremented to adhere to our +versioning policy. + +After these release candidates have been evaluated to satisfaction, they are +released as version `v1.0.0`. + +* `otel`: `v1.0.0` +* `otel/trace`: `v1.0.0` +* `otel/baggage`: `v1.0.0` +* `otel/sdk/trace`: `v1.0.0` + +Since both the `go` utility and the Go module system support [the semantic +versioning definition of +precedence](https://semver.org/spec/v2.0.0.html#spec-item-11), this release +will correctly be interpreted as the successor to the previous release +candidates. + +Active development of this project continues. The `otel/metric` module now has +backwards incompatible changes to its API that need to be released and the +`otel/baggage` module has a minor bug fix that needs to be released. The +following release is made: + +* `otel`: `v1.0.1` +* `otel/trace`: `v1.0.1` +* `otel/metric`: `v0.15.0` +* `otel/baggage`: `v1.0.1` +* `otel/sdk/trace`: `v1.0.1` +* `otel/sdk/metric`: `v0.15.0` + +Notice that, again, all stable module versions are incremented in unison and +the `otel/sdk/metric` package, which depends on the `otel/metric` package, also +bumped its version. This bump of the `otel/sdk/metric` package makes sense +given their coupling, though it is not explicitly required by our versioning +policy. + +As we progress, the `otel/metric` and `otel/sdk/metric` packages have reached a +point where they should be evaluated for stability. The `otel` module is +reintegrated with the `otel/metric` package and the following release is made: + +* `otel`: `v1.1.0-RC1` +* `otel/trace`: `v1.1.0-RC1` +* `otel/metric`: `v1.1.0-RC1` +* `otel/baggage`: `v1.1.0-RC1` +* `otel/sdk/trace`: `v1.1.0-RC1` +* `otel/sdk/metric`: `v1.1.0-RC1` + +All the modules are evaluated and determined to a viable stable release. They +are then released as version `v1.1.0` (the minor version is incremented to +indicate the addition of new signal). + +* `otel`: `v1.1.0` +* `otel/trace`: `v1.1.0` +* `otel/metric`: `v1.1.0` +* `otel/baggage`: `v1.1.0` +* `otel/sdk/trace`: `v1.1.0` +* `otel/sdk/metric`: `v1.1.0` diff --git a/vendor/go.opentelemetry.io/otel/attribute/README.md b/vendor/go.opentelemetry.io/otel/attribute/README.md new file mode 100644 index 000000000..5b3da8f14 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/README.md @@ -0,0 +1,3 @@ +# Attribute + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/attribute)](https://pkg.go.dev/go.opentelemetry.io/otel/attribute) diff --git a/vendor/go.opentelemetry.io/otel/attribute/doc.go b/vendor/go.opentelemetry.io/otel/attribute/doc.go new file mode 100644 index 000000000..eef51ebc2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/doc.go @@ -0,0 +1,5 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package attribute provides key and value attributes. +package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go new file mode 100644 index 000000000..318e42fca --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -0,0 +1,135 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "bytes" + "sync" + "sync/atomic" +) + +type ( + // Encoder is a mechanism for serializing an attribute set into a specific + // string representation that supports caching, to avoid repeated + // serialization. An example could be an exporter encoding the attribute + // set into a wire representation. + Encoder interface { + // Encode returns the serialized encoding of the attribute set using + // its Iterator. This result may be cached by a attribute.Set. + Encode(iterator Iterator) string + + // ID returns a value that is unique for each class of attribute + // encoder. Attribute encoders allocate these using `NewEncoderID`. + ID() EncoderID + } + + // EncoderID is used to identify distinct Encoder + // implementations, for caching encoded results. + EncoderID struct { + value uint64 + } + + // defaultAttrEncoder uses a sync.Pool of buffers to reduce the number of + // allocations used in encoding attributes. This implementation encodes a + // comma-separated list of key=value, with '/'-escaping of '=', ',', and + // '\'. + defaultAttrEncoder struct { + // pool is a pool of attribute set builders. The buffers in this pool + // grow to a size that most attribute encodings will not allocate new + // memory. + pool sync.Pool // *bytes.Buffer + } +) + +// escapeChar is used to ensure uniqueness of the attribute encoding where +// keys or values contain either '=' or ','. Since there is no parser needed +// for this encoding and its only requirement is to be unique, this choice is +// arbitrary. Users will see these in some exporters (e.g., stdout), so the +// backslash ('\') is used as a conventional choice. +const escapeChar = '\\' + +var ( + _ Encoder = &defaultAttrEncoder{} + + // encoderIDCounter is for generating IDs for other attribute encoders. + encoderIDCounter uint64 + + defaultEncoderOnce sync.Once + defaultEncoderID = NewEncoderID() + defaultEncoderInstance *defaultAttrEncoder +) + +// NewEncoderID returns a unique attribute encoder ID. It should be called +// once per each type of attribute encoder. Preferably in init() or in var +// definition. +func NewEncoderID() EncoderID { + return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)} +} + +// DefaultEncoder returns an attribute encoder that encodes attributes in such +// a way that each escaped attribute's key is followed by an equal sign and +// then by an escaped attribute's value. All key-value pairs are separated by +// a comma. +// +// Escaping is done by prepending a backslash before either a backslash, equal +// sign or a comma. +func DefaultEncoder() Encoder { + defaultEncoderOnce.Do(func() { + defaultEncoderInstance = &defaultAttrEncoder{ + pool: sync.Pool{ + New: func() interface{} { + return &bytes.Buffer{} + }, + }, + } + }) + return defaultEncoderInstance +} + +// Encode is a part of an implementation of the AttributeEncoder interface. +func (d *defaultAttrEncoder) Encode(iter Iterator) string { + buf := d.pool.Get().(*bytes.Buffer) + defer d.pool.Put(buf) + buf.Reset() + + for iter.Next() { + i, keyValue := iter.IndexedAttribute() + if i > 0 { + _, _ = buf.WriteRune(',') + } + copyAndEscape(buf, string(keyValue.Key)) + + _, _ = buf.WriteRune('=') + + if keyValue.Value.Type() == STRING { + copyAndEscape(buf, keyValue.Value.AsString()) + } else { + _, _ = buf.WriteString(keyValue.Value.Emit()) + } + } + return buf.String() +} + +// ID is a part of an implementation of the AttributeEncoder interface. +func (*defaultAttrEncoder) ID() EncoderID { + return defaultEncoderID +} + +// copyAndEscape escapes `=`, `,` and its own escape character (`\`), +// making the default encoding unique. +func copyAndEscape(buf *bytes.Buffer, val string) { + for _, ch := range val { + switch ch { + case '=', ',', escapeChar: + _, _ = buf.WriteRune(escapeChar) + } + _, _ = buf.WriteRune(ch) + } +} + +// Valid returns true if this encoder ID was allocated by +// `NewEncoderID`. Invalid encoder IDs will not be cached. +func (id EncoderID) Valid() bool { + return id.value != 0 +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go new file mode 100644 index 000000000..be9cd922d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go @@ -0,0 +1,49 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Filter supports removing certain attributes from attribute sets. When +// the filter returns true, the attribute will be kept in the filtered +// attribute set. When the filter returns false, the attribute is excluded +// from the filtered attribute set, and the attribute instead appears in +// the removed list of excluded attributes. +type Filter func(KeyValue) bool + +// NewAllowKeysFilter returns a Filter that only allows attributes with one of +// the provided keys. +// +// If keys is empty a deny-all filter is returned. +func NewAllowKeysFilter(keys ...Key) Filter { + if len(keys) <= 0 { + return func(kv KeyValue) bool { return false } + } + + allowed := make(map[Key]struct{}) + for _, k := range keys { + allowed[k] = struct{}{} + } + return func(kv KeyValue) bool { + _, ok := allowed[kv.Key] + return ok + } +} + +// NewDenyKeysFilter returns a Filter that only allows attributes +// that do not have one of the provided keys. +// +// If keys is empty an allow-all filter is returned. +func NewDenyKeysFilter(keys ...Key) Filter { + if len(keys) <= 0 { + return func(kv KeyValue) bool { return true } + } + + forbid := make(map[Key]struct{}) + for _, k := range keys { + forbid[k] = struct{}{} + } + return func(kv KeyValue) bool { + _, ok := forbid[kv.Key] + return !ok + } +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go new file mode 100644 index 000000000..f2ba89ce4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go @@ -0,0 +1,150 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Iterator allows iterating over the set of attributes in order, sorted by +// key. +type Iterator struct { + storage *Set + idx int +} + +// MergeIterator supports iterating over two sets of attributes while +// eliminating duplicate values from the combined set. The first iterator +// value takes precedence. +type MergeIterator struct { + one oneIterator + two oneIterator + current KeyValue +} + +type oneIterator struct { + iter Iterator + done bool + attr KeyValue +} + +// Next moves the iterator to the next position. Returns false if there are no +// more attributes. +func (i *Iterator) Next() bool { + i.idx++ + return i.idx < i.Len() +} + +// Label returns current KeyValue. Must be called only after Next returns +// true. +// +// Deprecated: Use Attribute instead. +func (i *Iterator) Label() KeyValue { + return i.Attribute() +} + +// Attribute returns the current KeyValue of the Iterator. It must be called +// only after Next returns true. +func (i *Iterator) Attribute() KeyValue { + kv, _ := i.storage.Get(i.idx) + return kv +} + +// IndexedLabel returns current index and attribute. Must be called only +// after Next returns true. +// +// Deprecated: Use IndexedAttribute instead. +func (i *Iterator) IndexedLabel() (int, KeyValue) { + return i.idx, i.Attribute() +} + +// IndexedAttribute returns current index and attribute. Must be called only +// after Next returns true. +func (i *Iterator) IndexedAttribute() (int, KeyValue) { + return i.idx, i.Attribute() +} + +// Len returns a number of attributes in the iterated set. +func (i *Iterator) Len() int { + return i.storage.Len() +} + +// ToSlice is a convenience function that creates a slice of attributes from +// the passed iterator. The iterator is set up to start from the beginning +// before creating the slice. +func (i *Iterator) ToSlice() []KeyValue { + l := i.Len() + if l == 0 { + return nil + } + i.idx = -1 + slice := make([]KeyValue, 0, l) + for i.Next() { + slice = append(slice, i.Attribute()) + } + return slice +} + +// NewMergeIterator returns a MergeIterator for merging two attribute sets. +// Duplicates are resolved by taking the value from the first set. +func NewMergeIterator(s1, s2 *Set) MergeIterator { + mi := MergeIterator{ + one: makeOne(s1.Iter()), + two: makeOne(s2.Iter()), + } + return mi +} + +func makeOne(iter Iterator) oneIterator { + oi := oneIterator{ + iter: iter, + } + oi.advance() + return oi +} + +func (oi *oneIterator) advance() { + if oi.done = !oi.iter.Next(); !oi.done { + oi.attr = oi.iter.Attribute() + } +} + +// Next returns true if there is another attribute available. +func (m *MergeIterator) Next() bool { + if m.one.done && m.two.done { + return false + } + if m.one.done { + m.current = m.two.attr + m.two.advance() + return true + } + if m.two.done { + m.current = m.one.attr + m.one.advance() + return true + } + if m.one.attr.Key == m.two.attr.Key { + m.current = m.one.attr // first iterator attribute value wins + m.one.advance() + m.two.advance() + return true + } + if m.one.attr.Key < m.two.attr.Key { + m.current = m.one.attr + m.one.advance() + return true + } + m.current = m.two.attr + m.two.advance() + return true +} + +// Label returns the current value after Next() returns true. +// +// Deprecated: Use Attribute instead. +func (m *MergeIterator) Label() KeyValue { + return m.current +} + +// Attribute returns the current value after Next() returns true. +func (m *MergeIterator) Attribute() KeyValue { + return m.current +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go new file mode 100644 index 000000000..d9a22c650 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/key.go @@ -0,0 +1,123 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Key represents the key part in key-value pairs. It's a string. The +// allowed character set in the key depends on the use of the key. +type Key string + +// Bool creates a KeyValue instance with a BOOL Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Bool(name, value). +func (k Key) Bool(v bool) KeyValue { + return KeyValue{ + Key: k, + Value: BoolValue(v), + } +} + +// BoolSlice creates a KeyValue instance with a BOOLSLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- BoolSlice(name, value). +func (k Key) BoolSlice(v []bool) KeyValue { + return KeyValue{ + Key: k, + Value: BoolSliceValue(v), + } +} + +// Int creates a KeyValue instance with an INT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int(name, value). +func (k Key) Int(v int) KeyValue { + return KeyValue{ + Key: k, + Value: IntValue(v), + } +} + +// IntSlice creates a KeyValue instance with an INT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- IntSlice(name, value). +func (k Key) IntSlice(v []int) KeyValue { + return KeyValue{ + Key: k, + Value: IntSliceValue(v), + } +} + +// Int64 creates a KeyValue instance with an INT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int64(name, value). +func (k Key) Int64(v int64) KeyValue { + return KeyValue{ + Key: k, + Value: Int64Value(v), + } +} + +// Int64Slice creates a KeyValue instance with an INT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int64Slice(name, value). +func (k Key) Int64Slice(v []int64) KeyValue { + return KeyValue{ + Key: k, + Value: Int64SliceValue(v), + } +} + +// Float64 creates a KeyValue instance with a FLOAT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Float64(name, value). +func (k Key) Float64(v float64) KeyValue { + return KeyValue{ + Key: k, + Value: Float64Value(v), + } +} + +// Float64Slice creates a KeyValue instance with a FLOAT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Float64(name, value). +func (k Key) Float64Slice(v []float64) KeyValue { + return KeyValue{ + Key: k, + Value: Float64SliceValue(v), + } +} + +// String creates a KeyValue instance with a STRING Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- String(name, value). +func (k Key) String(v string) KeyValue { + return KeyValue{ + Key: k, + Value: StringValue(v), + } +} + +// StringSlice creates a KeyValue instance with a STRINGSLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- StringSlice(name, value). +func (k Key) StringSlice(v []string) KeyValue { + return KeyValue{ + Key: k, + Value: StringSliceValue(v), + } +} + +// Defined returns true for non-empty keys. +func (k Key) Defined() bool { + return len(k) != 0 +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go new file mode 100644 index 000000000..3028f9a40 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "fmt" +) + +// KeyValue holds a key and value pair. +type KeyValue struct { + Key Key + Value Value +} + +// Valid returns if kv is a valid OpenTelemetry attribute. +func (kv KeyValue) Valid() bool { + return kv.Key.Defined() && kv.Value.Type() != INVALID +} + +// Bool creates a KeyValue with a BOOL Value type. +func Bool(k string, v bool) KeyValue { + return Key(k).Bool(v) +} + +// BoolSlice creates a KeyValue with a BOOLSLICE Value type. +func BoolSlice(k string, v []bool) KeyValue { + return Key(k).BoolSlice(v) +} + +// Int creates a KeyValue with an INT64 Value type. +func Int(k string, v int) KeyValue { + return Key(k).Int(v) +} + +// IntSlice creates a KeyValue with an INT64SLICE Value type. +func IntSlice(k string, v []int) KeyValue { + return Key(k).IntSlice(v) +} + +// Int64 creates a KeyValue with an INT64 Value type. +func Int64(k string, v int64) KeyValue { + return Key(k).Int64(v) +} + +// Int64Slice creates a KeyValue with an INT64SLICE Value type. +func Int64Slice(k string, v []int64) KeyValue { + return Key(k).Int64Slice(v) +} + +// Float64 creates a KeyValue with a FLOAT64 Value type. +func Float64(k string, v float64) KeyValue { + return Key(k).Float64(v) +} + +// Float64Slice creates a KeyValue with a FLOAT64SLICE Value type. +func Float64Slice(k string, v []float64) KeyValue { + return Key(k).Float64Slice(v) +} + +// String creates a KeyValue with a STRING Value type. +func String(k, v string) KeyValue { + return Key(k).String(v) +} + +// StringSlice creates a KeyValue with a STRINGSLICE Value type. +func StringSlice(k string, v []string) KeyValue { + return Key(k).StringSlice(v) +} + +// Stringer creates a new key-value pair with a passed name and a string +// value generated by the passed Stringer interface. +func Stringer(k string, v fmt.Stringer) KeyValue { + return Key(k).String(v.String()) +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go new file mode 100644 index 000000000..bff9c7fdb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -0,0 +1,431 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "cmp" + "encoding/json" + "reflect" + "slices" + "sort" +) + +type ( + // Set is the representation for a distinct attribute set. It manages an + // immutable set of attributes, with an internal cache for storing + // attribute encodings. + // + // This type will remain comparable for backwards compatibility. The + // equivalence of Sets across versions is not guaranteed to be stable. + // Prior versions may find two Sets to be equal or not when compared + // directly (i.e. ==), but subsequent versions may not. Users should use + // the Equals method to ensure stable equivalence checking. + // + // Users should also use the Distinct returned from Equivalent as a map key + // instead of a Set directly. In addition to that type providing guarantees + // on stable equivalence, it may also provide performance improvements. + Set struct { + equivalent Distinct + } + + // Distinct is a unique identifier of a Set. + // + // Distinct is designed to be ensures equivalence stability: comparisons + // will return the save value across versions. For this reason, Distinct + // should always be used as a map key instead of a Set. + Distinct struct { + iface interface{} + } + + // Sortable implements sort.Interface, used for sorting KeyValue. + // + // Deprecated: This type is no longer used. It was added as a performance + // optimization for Go < 1.21 that is no longer needed (Go < 1.21 is no + // longer supported by the module). + Sortable []KeyValue +) + +var ( + // keyValueType is used in computeDistinctReflect. + keyValueType = reflect.TypeOf(KeyValue{}) + + // emptySet is returned for empty attribute sets. + emptySet = &Set{ + equivalent: Distinct{ + iface: [0]KeyValue{}, + }, + } +) + +// EmptySet returns a reference to a Set with no elements. +// +// This is a convenience provided for optimized calling utility. +func EmptySet() *Set { + return emptySet +} + +// reflectValue abbreviates reflect.ValueOf(d). +func (d Distinct) reflectValue() reflect.Value { + return reflect.ValueOf(d.iface) +} + +// Valid returns true if this value refers to a valid Set. +func (d Distinct) Valid() bool { + return d.iface != nil +} + +// Len returns the number of attributes in this set. +func (l *Set) Len() int { + if l == nil || !l.equivalent.Valid() { + return 0 + } + return l.equivalent.reflectValue().Len() +} + +// Get returns the KeyValue at ordered position idx in this set. +func (l *Set) Get(idx int) (KeyValue, bool) { + if l == nil || !l.equivalent.Valid() { + return KeyValue{}, false + } + value := l.equivalent.reflectValue() + + if idx >= 0 && idx < value.Len() { + // Note: The Go compiler successfully avoids an allocation for + // the interface{} conversion here: + return value.Index(idx).Interface().(KeyValue), true + } + + return KeyValue{}, false +} + +// Value returns the value of a specified key in this set. +func (l *Set) Value(k Key) (Value, bool) { + if l == nil || !l.equivalent.Valid() { + return Value{}, false + } + rValue := l.equivalent.reflectValue() + vlen := rValue.Len() + + idx := sort.Search(vlen, func(idx int) bool { + return rValue.Index(idx).Interface().(KeyValue).Key >= k + }) + if idx >= vlen { + return Value{}, false + } + keyValue := rValue.Index(idx).Interface().(KeyValue) + if k == keyValue.Key { + return keyValue.Value, true + } + return Value{}, false +} + +// HasValue tests whether a key is defined in this set. +func (l *Set) HasValue(k Key) bool { + if l == nil { + return false + } + _, ok := l.Value(k) + return ok +} + +// Iter returns an iterator for visiting the attributes in this set. +func (l *Set) Iter() Iterator { + return Iterator{ + storage: l, + idx: -1, + } +} + +// ToSlice returns the set of attributes belonging to this set, sorted, where +// keys appear no more than once. +func (l *Set) ToSlice() []KeyValue { + iter := l.Iter() + return iter.ToSlice() +} + +// Equivalent returns a value that may be used as a map key. The Distinct type +// guarantees that the result will equal the equivalent. Distinct value of any +// attribute set with the same elements as this, where sets are made unique by +// choosing the last value in the input for any given key. +func (l *Set) Equivalent() Distinct { + if l == nil || !l.equivalent.Valid() { + return emptySet.equivalent + } + return l.equivalent +} + +// Equals returns true if the argument set is equivalent to this set. +func (l *Set) Equals(o *Set) bool { + return l.Equivalent() == o.Equivalent() +} + +// Encoded returns the encoded form of this set, according to encoder. +func (l *Set) Encoded(encoder Encoder) string { + if l == nil || encoder == nil { + return "" + } + + return encoder.Encode(l.Iter()) +} + +func empty() Set { + return Set{ + equivalent: emptySet.equivalent, + } +} + +// NewSet returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// Except for empty sets, this method adds an additional allocation compared +// with calls that include a Sortable. +func NewSet(kvs ...KeyValue) Set { + s, _ := NewSetWithFiltered(kvs, nil) + return s +} + +// NewSetWithSortable returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// This call includes a Sortable option as a memory optimization. +// +// Deprecated: Use [NewSet] instead. +func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set { + s, _ := NewSetWithFiltered(kvs, nil) + return s +} + +// NewSetWithFiltered returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// This call includes a Filter to include/exclude attribute keys from the +// return value. Excluded keys are returned as a slice of attribute values. +func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { + // Check for empty set. + if len(kvs) == 0 { + return empty(), nil + } + + // Stable sort so the following de-duplication can implement + // last-value-wins semantics. + slices.SortStableFunc(kvs, func(a, b KeyValue) int { + return cmp.Compare(a.Key, b.Key) + }) + + position := len(kvs) - 1 + offset := position - 1 + + // The requirements stated above require that the stable + // result be placed in the end of the input slice, while + // overwritten values are swapped to the beginning. + // + // De-duplicate with last-value-wins semantics. Preserve + // duplicate values at the beginning of the input slice. + for ; offset >= 0; offset-- { + if kvs[offset].Key == kvs[position].Key { + continue + } + position-- + kvs[offset], kvs[position] = kvs[position], kvs[offset] + } + kvs = kvs[position:] + + if filter != nil { + if div := filteredToFront(kvs, filter); div != 0 { + return Set{equivalent: computeDistinct(kvs[div:])}, kvs[:div] + } + } + return Set{equivalent: computeDistinct(kvs)}, nil +} + +// NewSetWithSortableFiltered returns a new Set. +// +// Duplicate keys are eliminated by taking the last value. This +// re-orders the input slice so that unique last-values are contiguous +// at the end of the slice. +// +// This ensures the following: +// +// - Last-value-wins semantics +// - Caller sees the reordering, but doesn't lose values +// - Repeated call preserve last-value wins. +// +// Note that methods are defined on Set, although this returns Set. Callers +// can avoid memory allocations by: +// +// - allocating a Sortable for use as a temporary in this method +// - allocating a Set for storing the return value of this constructor. +// +// The result maintains a cache of encoded attributes, by attribute.EncoderID. +// This value should not be copied after its first use. +// +// The second []KeyValue return value is a list of attributes that were +// excluded by the Filter (if non-nil). +// +// Deprecated: Use [NewSetWithFiltered] instead. +func NewSetWithSortableFiltered(kvs []KeyValue, _ *Sortable, filter Filter) (Set, []KeyValue) { + return NewSetWithFiltered(kvs, filter) +} + +// filteredToFront filters slice in-place using keep function. All KeyValues that need to +// be removed are moved to the front. All KeyValues that need to be kept are +// moved (in-order) to the back. The index for the first KeyValue to be kept is +// returned. +func filteredToFront(slice []KeyValue, keep Filter) int { + n := len(slice) + j := n + for i := n - 1; i >= 0; i-- { + if keep(slice[i]) { + j-- + slice[i], slice[j] = slice[j], slice[i] + } + } + return j +} + +// Filter returns a filtered copy of this Set. See the documentation for +// NewSetWithSortableFiltered for more details. +func (l *Set) Filter(re Filter) (Set, []KeyValue) { + if re == nil { + return *l, nil + } + + // Iterate in reverse to the first attribute that will be filtered out. + n := l.Len() + first := n - 1 + for ; first >= 0; first-- { + kv, _ := l.Get(first) + if !re(kv) { + break + } + } + + // No attributes will be dropped, return the immutable Set l and nil. + if first < 0 { + return *l, nil + } + + // Copy now that we know we need to return a modified set. + // + // Do not do this in-place on the underlying storage of *Set l. Sets are + // immutable and filtering should not change this. + slice := l.ToSlice() + + // Don't re-iterate the slice if only slice[0] is filtered. + if first == 0 { + // It is safe to assume len(slice) >= 1 given we found at least one + // attribute above that needs to be filtered out. + return Set{equivalent: computeDistinct(slice[1:])}, slice[:1] + } + + // Move the filtered slice[first] to the front (preserving order). + kv := slice[first] + copy(slice[1:first+1], slice[:first]) + slice[0] = kv + + // Do not re-evaluate re(slice[first+1:]). + div := filteredToFront(slice[1:first+1], re) + 1 + return Set{equivalent: computeDistinct(slice[div:])}, slice[:div] +} + +// computeDistinct returns a Distinct using either the fixed- or +// reflect-oriented code path, depending on the size of the input. The input +// slice is assumed to already be sorted and de-duplicated. +func computeDistinct(kvs []KeyValue) Distinct { + iface := computeDistinctFixed(kvs) + if iface == nil { + iface = computeDistinctReflect(kvs) + } + return Distinct{ + iface: iface, + } +} + +// computeDistinctFixed computes a Distinct for small slices. It returns nil +// if the input is too large for this code path. +func computeDistinctFixed(kvs []KeyValue) interface{} { + switch len(kvs) { + case 1: + ptr := new([1]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 2: + ptr := new([2]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 3: + ptr := new([3]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 4: + ptr := new([4]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 5: + ptr := new([5]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 6: + ptr := new([6]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 7: + ptr := new([7]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 8: + ptr := new([8]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 9: + ptr := new([9]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 10: + ptr := new([10]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + default: + return nil + } +} + +// computeDistinctReflect computes a Distinct using reflection, works for any +// size input. +func computeDistinctReflect(kvs []KeyValue) interface{} { + at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() + for i, keyValue := range kvs { + *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue + } + return at.Interface() +} + +// MarshalJSON returns the JSON encoding of the Set. +func (l *Set) MarshalJSON() ([]byte, error) { + return json.Marshal(l.equivalent.iface) +} + +// MarshalLog is the marshaling function used by the logging system to represent this Set. +func (l Set) MarshalLog() interface{} { + kvs := make(map[string]string) + for _, kv := range l.ToSlice() { + kvs[string(kv.Key)] = kv.Value.Emit() + } + return kvs +} + +// Len implements sort.Interface. +func (l *Sortable) Len() int { + return len(*l) +} + +// Swap implements sort.Interface. +func (l *Sortable) Swap(i, j int) { + (*l)[i], (*l)[j] = (*l)[j], (*l)[i] +} + +// Less implements sort.Interface. +func (l *Sortable) Less(i, j int) bool { + return (*l)[i].Key < (*l)[j].Key +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/vendor/go.opentelemetry.io/otel/attribute/type_string.go new file mode 100644 index 000000000..e584b2477 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/type_string.go @@ -0,0 +1,31 @@ +// Code generated by "stringer -type=Type"; DO NOT EDIT. + +package attribute + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[INVALID-0] + _ = x[BOOL-1] + _ = x[INT64-2] + _ = x[FLOAT64-3] + _ = x[STRING-4] + _ = x[BOOLSLICE-5] + _ = x[INT64SLICE-6] + _ = x[FLOAT64SLICE-7] + _ = x[STRINGSLICE-8] +} + +const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE" + +var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71} + +func (i Type) String() string { + if i < 0 || i >= Type(len(_Type_index)-1) { + return "Type(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Type_name[_Type_index[i]:_Type_index[i+1]] +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go new file mode 100644 index 000000000..9ea0ecbbd --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -0,0 +1,271 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + + "go.opentelemetry.io/otel/internal" + "go.opentelemetry.io/otel/internal/attribute" +) + +//go:generate stringer -type=Type + +// Type describes the type of the data Value holds. +type Type int // nolint: revive // redefines builtin Type. + +// Value represents the value part in key-value pairs. +type Value struct { + vtype Type + numeric uint64 + stringly string + slice interface{} +} + +const ( + // INVALID is used for a Value with no value set. + INVALID Type = iota + // BOOL is a boolean Type Value. + BOOL + // INT64 is a 64-bit signed integral Type Value. + INT64 + // FLOAT64 is a 64-bit floating point Type Value. + FLOAT64 + // STRING is a string Type Value. + STRING + // BOOLSLICE is a slice of booleans Type Value. + BOOLSLICE + // INT64SLICE is a slice of 64-bit signed integral numbers Type Value. + INT64SLICE + // FLOAT64SLICE is a slice of 64-bit floating point numbers Type Value. + FLOAT64SLICE + // STRINGSLICE is a slice of strings Type Value. + STRINGSLICE +) + +// BoolValue creates a BOOL Value. +func BoolValue(v bool) Value { + return Value{ + vtype: BOOL, + numeric: internal.BoolToRaw(v), + } +} + +// BoolSliceValue creates a BOOLSLICE Value. +func BoolSliceValue(v []bool) Value { + return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)} +} + +// IntValue creates an INT64 Value. +func IntValue(v int) Value { + return Int64Value(int64(v)) +} + +// IntSliceValue creates an INTSLICE Value. +func IntSliceValue(v []int) Value { + var int64Val int64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val))) + for i, val := range v { + cp.Elem().Index(i).SetInt(int64(val)) + } + return Value{ + vtype: INT64SLICE, + slice: cp.Elem().Interface(), + } +} + +// Int64Value creates an INT64 Value. +func Int64Value(v int64) Value { + return Value{ + vtype: INT64, + numeric: internal.Int64ToRaw(v), + } +} + +// Int64SliceValue creates an INT64SLICE Value. +func Int64SliceValue(v []int64) Value { + return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)} +} + +// Float64Value creates a FLOAT64 Value. +func Float64Value(v float64) Value { + return Value{ + vtype: FLOAT64, + numeric: internal.Float64ToRaw(v), + } +} + +// Float64SliceValue creates a FLOAT64SLICE Value. +func Float64SliceValue(v []float64) Value { + return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)} +} + +// StringValue creates a STRING Value. +func StringValue(v string) Value { + return Value{ + vtype: STRING, + stringly: v, + } +} + +// StringSliceValue creates a STRINGSLICE Value. +func StringSliceValue(v []string) Value { + return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)} +} + +// Type returns a type of the Value. +func (v Value) Type() Type { + return v.vtype +} + +// AsBool returns the bool value. Make sure that the Value's type is +// BOOL. +func (v Value) AsBool() bool { + return internal.RawToBool(v.numeric) +} + +// AsBoolSlice returns the []bool value. Make sure that the Value's type is +// BOOLSLICE. +func (v Value) AsBoolSlice() []bool { + if v.vtype != BOOLSLICE { + return nil + } + return v.asBoolSlice() +} + +func (v Value) asBoolSlice() []bool { + return attribute.AsBoolSlice(v.slice) +} + +// AsInt64 returns the int64 value. Make sure that the Value's type is +// INT64. +func (v Value) AsInt64() int64 { + return internal.RawToInt64(v.numeric) +} + +// AsInt64Slice returns the []int64 value. Make sure that the Value's type is +// INT64SLICE. +func (v Value) AsInt64Slice() []int64 { + if v.vtype != INT64SLICE { + return nil + } + return v.asInt64Slice() +} + +func (v Value) asInt64Slice() []int64 { + return attribute.AsInt64Slice(v.slice) +} + +// AsFloat64 returns the float64 value. Make sure that the Value's +// type is FLOAT64. +func (v Value) AsFloat64() float64 { + return internal.RawToFloat64(v.numeric) +} + +// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is +// FLOAT64SLICE. +func (v Value) AsFloat64Slice() []float64 { + if v.vtype != FLOAT64SLICE { + return nil + } + return v.asFloat64Slice() +} + +func (v Value) asFloat64Slice() []float64 { + return attribute.AsFloat64Slice(v.slice) +} + +// AsString returns the string value. Make sure that the Value's type +// is STRING. +func (v Value) AsString() string { + return v.stringly +} + +// AsStringSlice returns the []string value. Make sure that the Value's type is +// STRINGSLICE. +func (v Value) AsStringSlice() []string { + if v.vtype != STRINGSLICE { + return nil + } + return v.asStringSlice() +} + +func (v Value) asStringSlice() []string { + return attribute.AsStringSlice(v.slice) +} + +type unknownValueType struct{} + +// AsInterface returns Value's data as interface{}. +func (v Value) AsInterface() interface{} { + switch v.Type() { + case BOOL: + return v.AsBool() + case BOOLSLICE: + return v.asBoolSlice() + case INT64: + return v.AsInt64() + case INT64SLICE: + return v.asInt64Slice() + case FLOAT64: + return v.AsFloat64() + case FLOAT64SLICE: + return v.asFloat64Slice() + case STRING: + return v.stringly + case STRINGSLICE: + return v.asStringSlice() + } + return unknownValueType{} +} + +// Emit returns a string representation of Value's data. +func (v Value) Emit() string { + switch v.Type() { + case BOOLSLICE: + return fmt.Sprint(v.asBoolSlice()) + case BOOL: + return strconv.FormatBool(v.AsBool()) + case INT64SLICE: + j, err := json.Marshal(v.asInt64Slice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asInt64Slice()) + } + return string(j) + case INT64: + return strconv.FormatInt(v.AsInt64(), 10) + case FLOAT64SLICE: + j, err := json.Marshal(v.asFloat64Slice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asFloat64Slice()) + } + return string(j) + case FLOAT64: + return fmt.Sprint(v.AsFloat64()) + case STRINGSLICE: + j, err := json.Marshal(v.asStringSlice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asStringSlice()) + } + return string(j) + case STRING: + return v.stringly + default: + return "unknown" + } +} + +// MarshalJSON returns the JSON encoding of the Value. +func (v Value) MarshalJSON() ([]byte, error) { + var jsonVal struct { + Type string + Value interface{} + } + jsonVal.Type = v.Type().String() + jsonVal.Value = v.AsInterface() + return json.Marshal(jsonVal) +} diff --git a/vendor/go.opentelemetry.io/otel/baggage/README.md b/vendor/go.opentelemetry.io/otel/baggage/README.md new file mode 100644 index 000000000..7d798435e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/README.md @@ -0,0 +1,3 @@ +# Baggage + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/baggage)](https://pkg.go.dev/go.opentelemetry.io/otel/baggage) diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go new file mode 100644 index 000000000..36f536703 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -0,0 +1,1018 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package baggage // import "go.opentelemetry.io/otel/baggage" + +import ( + "errors" + "fmt" + "net/url" + "strings" + "unicode/utf8" + + "go.opentelemetry.io/otel/internal/baggage" +) + +const ( + maxMembers = 180 + maxBytesPerMembers = 4096 + maxBytesPerBaggageString = 8192 + + listDelimiter = "," + keyValueDelimiter = "=" + propertyDelimiter = ";" +) + +var ( + errInvalidKey = errors.New("invalid key") + errInvalidValue = errors.New("invalid value") + errInvalidProperty = errors.New("invalid baggage list-member property") + errInvalidMember = errors.New("invalid baggage list-member") + errMemberNumber = errors.New("too many list-members in baggage-string") + errMemberBytes = errors.New("list-member too large") + errBaggageBytes = errors.New("baggage-string too large") +) + +// Property is an additional metadata entry for a baggage list-member. +type Property struct { + key, value string + + // hasValue indicates if a zero-value value means the property does not + // have a value or if it was the zero-value. + hasValue bool +} + +// NewKeyProperty returns a new Property for key. +// +// The passed key must be valid, non-empty UTF-8 string. +// If key is invalid, an error will be returned. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key. +func NewKeyProperty(key string) (Property, error) { + if !validateBaggageName(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + + p := Property{key: key} + return p, nil +} + +// NewKeyValueProperty returns a new Property for key with value. +// +// The passed key must be compliant with W3C Baggage specification. +// The passed value must be percent-encoded as defined in W3C Baggage specification. +// +// Notice: Consider using [NewKeyValuePropertyRaw] instead +// that does not require percent-encoding of the value. +func NewKeyValueProperty(key, value string) (Property, error) { + if !validateKey(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + + if !validateValue(value) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + decodedValue, err := url.PathUnescape(value) + if err != nil { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + return NewKeyValuePropertyRaw(key, decodedValue) +} + +// NewKeyValuePropertyRaw returns a new Property for key with value. +// +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key. +func NewKeyValuePropertyRaw(key, value string) (Property, error) { + if !validateBaggageName(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateBaggageValue(value) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + + p := Property{ + key: key, + value: value, + hasValue: true, + } + return p, nil +} + +func newInvalidProperty() Property { + return Property{} +} + +// parseProperty attempts to decode a Property from the passed string. It +// returns an error if the input is invalid according to the W3C Baggage +// specification. +func parseProperty(property string) (Property, error) { + if property == "" { + return newInvalidProperty(), nil + } + + p, ok := parsePropertyInternal(property) + if !ok { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property) + } + + return p, nil +} + +// validate ensures p conforms to the W3C Baggage specification, returning an +// error otherwise. +func (p Property) validate() error { + errFunc := func(err error) error { + return fmt.Errorf("invalid property: %w", err) + } + + if !validateBaggageName(p.key) { + return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) + } + if !p.hasValue && p.value != "" { + return errFunc(errors.New("inconsistent value")) + } + if p.hasValue && !validateBaggageValue(p.value) { + return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value)) + } + return nil +} + +// Key returns the Property key. +func (p Property) Key() string { + return p.key +} + +// Value returns the Property value. Additionally, a boolean value is returned +// indicating if the returned value is the empty if the Property has a value +// that is empty or if the value is not set. +func (p Property) Value() (string, bool) { + return p.value, p.hasValue +} + +// String encodes Property into a header string compliant with the W3C Baggage +// specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. +func (p Property) String() string { + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(p.key) { + return "" + } + + if p.hasValue { + return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value)) + } + return p.key +} + +type properties []Property + +func fromInternalProperties(iProps []baggage.Property) properties { + if len(iProps) == 0 { + return nil + } + + props := make(properties, len(iProps)) + for i, p := range iProps { + props[i] = Property{ + key: p.Key, + value: p.Value, + hasValue: p.HasValue, + } + } + return props +} + +func (p properties) asInternal() []baggage.Property { + if len(p) == 0 { + return nil + } + + iProps := make([]baggage.Property, len(p)) + for i, prop := range p { + iProps[i] = baggage.Property{ + Key: prop.key, + Value: prop.value, + HasValue: prop.hasValue, + } + } + return iProps +} + +func (p properties) Copy() properties { + if len(p) == 0 { + return nil + } + + props := make(properties, len(p)) + copy(props, p) + return props +} + +// validate ensures each Property in p conforms to the W3C Baggage +// specification, returning an error otherwise. +func (p properties) validate() error { + for _, prop := range p { + if err := prop.validate(); err != nil { + return err + } + } + return nil +} + +// String encodes properties into a header string compliant with the W3C Baggage +// specification. +func (p properties) String() string { + props := make([]string, 0, len(p)) + for _, prop := range p { + s := prop.String() + + // Ignored empty properties. + if s != "" { + props = append(props, s) + } + } + return strings.Join(props, propertyDelimiter) +} + +// Member is a list-member of a baggage-string as defined by the W3C Baggage +// specification. +type Member struct { + key, value string + properties properties + + // hasData indicates whether the created property contains data or not. + // Properties that do not contain data are invalid with no other check + // required. + hasData bool +} + +// NewMember returns a new Member from the passed arguments. +// +// The passed key must be compliant with W3C Baggage specification. +// The passed value must be percent-encoded as defined in W3C Baggage specification. +// +// Notice: Consider using [NewMemberRaw] instead +// that does not require percent-encoding of the value. +func NewMember(key, value string, props ...Property) (Member, error) { + if !validateKey(key) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + + if !validateValue(value) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + decodedValue, err := url.PathUnescape(value) + if err != nil { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + return NewMemberRaw(key, decodedValue, props...) +} + +// NewMemberRaw returns a new Member from the passed arguments. +// +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on baggage key. +// For example, the W3C Baggage specification restricts the baggage keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as baggage key. +func NewMemberRaw(key, value string, props ...Property) (Member, error) { + m := Member{ + key: key, + value: value, + properties: properties(props).Copy(), + hasData: true, + } + if err := m.validate(); err != nil { + return newInvalidMember(), err + } + return m, nil +} + +func newInvalidMember() Member { + return Member{} +} + +// parseMember attempts to decode a Member from the passed string. It returns +// an error if the input is invalid according to the W3C Baggage +// specification. +func parseMember(member string) (Member, error) { + if n := len(member); n > maxBytesPerMembers { + return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n) + } + + var props properties + keyValue, properties, found := strings.Cut(member, propertyDelimiter) + if found { + // Parse the member properties. + for _, pStr := range strings.Split(properties, propertyDelimiter) { + p, err := parseProperty(pStr) + if err != nil { + return newInvalidMember(), err + } + props = append(props, p) + } + } + // Parse the member key/value pair. + + // Take into account a value can contain equal signs (=). + k, v, found := strings.Cut(keyValue, keyValueDelimiter) + if !found { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member) + } + // "Leading and trailing whitespaces are allowed but MUST be trimmed + // when converting the header into a data structure." + key := strings.TrimSpace(k) + if !validateKey(key) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + + rawVal := strings.TrimSpace(v) + if !validateValue(rawVal) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v) + } + + // Decode a percent-encoded value. + unescapeVal, err := url.PathUnescape(rawVal) + if err != nil { + return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err) + } + + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) + return Member{key: key, value: value, properties: props, hasData: true}, nil +} + +// replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'. +func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { + if utf8.ValidString(unescapeVal) { + return unescapeVal + } + // W3C baggage spec: + // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69 + + var b strings.Builder + b.Grow(cap) + for i := 0; i < len(unescapeVal); { + r, size := utf8.DecodeRuneInString(unescapeVal[i:]) + if r == utf8.RuneError && size == 1 { + // Invalid UTF-8 sequence found, replace it with '�' + _, _ = b.WriteString("�") + } else { + _, _ = b.WriteRune(r) + } + i += size + } + + return b.String() +} + +// validate ensures m conforms to the W3C Baggage specification. +// A key must be an ASCII string, returning an error otherwise. +func (m Member) validate() error { + if !m.hasData { + return fmt.Errorf("%w: %q", errInvalidMember, m) + } + + if !validateBaggageName(m.key) { + return fmt.Errorf("%w: %q", errInvalidKey, m.key) + } + if !validateBaggageValue(m.value) { + return fmt.Errorf("%w: %q", errInvalidValue, m.value) + } + return m.properties.validate() +} + +// Key returns the Member key. +func (m Member) Key() string { return m.key } + +// Value returns the Member value. +func (m Member) Value() string { return m.value } + +// Properties returns a copy of the Member properties. +func (m Member) Properties() []Property { return m.properties.Copy() } + +// String encodes Member into a header string compliant with the W3C Baggage +// specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. +func (m Member) String() string { + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(m.key) { + return "" + } + + s := m.key + keyValueDelimiter + valueEscape(m.value) + if len(m.properties) > 0 { + s += propertyDelimiter + m.properties.String() + } + return s +} + +// Baggage is a list of baggage members representing the baggage-string as +// defined by the W3C Baggage specification. +type Baggage struct { //nolint:golint + list baggage.List +} + +// New returns a new valid Baggage. It returns an error if it results in a +// Baggage exceeding limits set in that specification. +// +// It expects all the provided members to have already been validated. +func New(members ...Member) (Baggage, error) { + if len(members) == 0 { + return Baggage{}, nil + } + + b := make(baggage.List) + for _, m := range members { + if !m.hasData { + return Baggage{}, errInvalidMember + } + + // OpenTelemetry resolves duplicates by last-one-wins. + b[m.key] = baggage.Item{ + Value: m.value, + Properties: m.properties.asInternal(), + } + } + + // Check member numbers after deduplication. + if len(b) > maxMembers { + return Baggage{}, errMemberNumber + } + + bag := Baggage{b} + if n := len(bag.String()); n > maxBytesPerBaggageString { + return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) + } + + return bag, nil +} + +// Parse attempts to decode a baggage-string from the passed string. It +// returns an error if the input is invalid according to the W3C Baggage +// specification. +// +// If there are duplicate list-members contained in baggage, the last one +// defined (reading left-to-right) will be the only one kept. This diverges +// from the W3C Baggage specification which allows duplicate list-members, but +// conforms to the OpenTelemetry Baggage specification. +func Parse(bStr string) (Baggage, error) { + if bStr == "" { + return Baggage{}, nil + } + + if n := len(bStr); n > maxBytesPerBaggageString { + return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) + } + + b := make(baggage.List) + for _, memberStr := range strings.Split(bStr, listDelimiter) { + m, err := parseMember(memberStr) + if err != nil { + return Baggage{}, err + } + // OpenTelemetry resolves duplicates by last-one-wins. + b[m.key] = baggage.Item{ + Value: m.value, + Properties: m.properties.asInternal(), + } + } + + // OpenTelemetry does not allow for duplicate list-members, but the W3C + // specification does. Now that we have deduplicated, ensure the baggage + // does not exceed list-member limits. + if len(b) > maxMembers { + return Baggage{}, errMemberNumber + } + + return Baggage{b}, nil +} + +// Member returns the baggage list-member identified by key. +// +// If there is no list-member matching the passed key the returned Member will +// be a zero-value Member. +// The returned member is not validated, as we assume the validation happened +// when it was added to the Baggage. +func (b Baggage) Member(key string) Member { + v, ok := b.list[key] + if !ok { + // We do not need to worry about distinguishing between the situation + // where a zero-valued Member is included in the Baggage because a + // zero-valued Member is invalid according to the W3C Baggage + // specification (it has an empty key). + return newInvalidMember() + } + + return Member{ + key: key, + value: v.Value, + properties: fromInternalProperties(v.Properties), + hasData: true, + } +} + +// Members returns all the baggage list-members. +// The order of the returned list-members is not significant. +// +// The returned members are not validated, as we assume the validation happened +// when they were added to the Baggage. +func (b Baggage) Members() []Member { + if len(b.list) == 0 { + return nil + } + + members := make([]Member, 0, len(b.list)) + for k, v := range b.list { + members = append(members, Member{ + key: k, + value: v.Value, + properties: fromInternalProperties(v.Properties), + hasData: true, + }) + } + return members +} + +// SetMember returns a copy of the Baggage with the member included. If the +// baggage contains a Member with the same key, the existing Member is +// replaced. +// +// If member is invalid according to the W3C Baggage specification, an error +// is returned with the original Baggage. +func (b Baggage) SetMember(member Member) (Baggage, error) { + if !member.hasData { + return b, errInvalidMember + } + + n := len(b.list) + if _, ok := b.list[member.key]; !ok { + n++ + } + list := make(baggage.List, n) + + for k, v := range b.list { + // Do not copy if we are just going to overwrite. + if k == member.key { + continue + } + list[k] = v + } + + list[member.key] = baggage.Item{ + Value: member.value, + Properties: member.properties.asInternal(), + } + + return Baggage{list: list}, nil +} + +// DeleteMember returns a copy of the Baggage with the list-member identified +// by key removed. +func (b Baggage) DeleteMember(key string) Baggage { + n := len(b.list) + if _, ok := b.list[key]; ok { + n-- + } + list := make(baggage.List, n) + + for k, v := range b.list { + if k == key { + continue + } + list[k] = v + } + + return Baggage{list: list} +} + +// Len returns the number of list-members in the Baggage. +func (b Baggage) Len() int { + return len(b.list) +} + +// String encodes Baggage into a header string compliant with the W3C Baggage +// specification. +// It would ignore members where the member key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. +func (b Baggage) String() string { + members := make([]string, 0, len(b.list)) + for k, v := range b.list { + s := Member{ + key: k, + value: v.Value, + properties: fromInternalProperties(v.Properties), + }.String() + + // Ignored empty members. + if s != "" { + members = append(members, s) + } + } + return strings.Join(members, listDelimiter) +} + +// parsePropertyInternal attempts to decode a Property from the passed string. +// It follows the spec at https://www.w3.org/TR/baggage/#definition. +func parsePropertyInternal(s string) (p Property, ok bool) { + // For the entire function we will use " key = value " as an example. + // Attempting to parse the key. + // First skip spaces at the beginning "< >key = value " (they could be empty). + index := skipSpace(s, 0) + + // Parse the key: " = value ". + keyStart := index + keyEnd := index + for _, c := range s[keyStart:] { + if !validateKeyChar(c) { + break + } + keyEnd++ + } + + // If we couldn't find any valid key character, + // it means the key is either empty or invalid. + if keyStart == keyEnd { + return + } + + // Skip spaces after the key: " key< >= value ". + index = skipSpace(s, keyEnd) + + if index == len(s) { + // A key can have no value, like: " key ". + ok = true + p.key = s[keyStart:keyEnd] + return + } + + // If we have not reached the end and we can't find the '=' delimiter, + // it means the property is invalid. + if s[index] != keyValueDelimiter[0] { + return + } + + // Attempting to parse the value. + // Match: " key =< >value ". + index = skipSpace(s, index+1) + + // Match the value string: " key = ". + // A valid property can be: " key =". + // Therefore, we don't have to check if the value is empty. + valueStart := index + valueEnd := index + for _, c := range s[valueStart:] { + if !validateValueChar(c) { + break + } + valueEnd++ + } + + // Skip all trailing whitespaces: " key = value< >". + index = skipSpace(s, valueEnd) + + // If after looking for the value and skipping whitespaces + // we have not reached the end, it means the property is + // invalid, something like: " key = value value1". + if index != len(s) { + return + } + + // Decode a percent-encoded value. + rawVal := s[valueStart:valueEnd] + unescapeVal, err := url.PathUnescape(rawVal) + if err != nil { + return + } + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) + + ok = true + p.key = s[keyStart:keyEnd] + p.hasValue = true + + p.value = value + return +} + +func skipSpace(s string, offset int) int { + i := offset + for ; i < len(s); i++ { + c := s[i] + if c != ' ' && c != '\t' { + break + } + } + return i +} + +var safeKeyCharset = [utf8.RuneSelf]bool{ + // 0x23 to 0x27 + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + + // 0x30 to 0x39 + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + + // 0x41 to 0x5a + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'V': true, + 'W': true, + 'X': true, + 'Y': true, + 'Z': true, + + // 0x5e to 0x7a + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + + // remainder + '!': true, + '*': true, + '+': true, + '-': true, + '.': true, + '|': true, + '~': true, +} + +// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name. +// Baggage name is a valid, non-empty UTF-8 string. +func validateBaggageName(s string) bool { + if len(s) == 0 { + return false + } + + return utf8.ValidString(s) +} + +// validateBaggageValue checks if the string is a valid OpenTelemetry Baggage value. +// Baggage value is a valid UTF-8 strings. +// Empty string is also a valid UTF-8 string. +func validateBaggageValue(s string) bool { + return utf8.ValidString(s) +} + +// validateKey checks if the string is a valid W3C Baggage key. +func validateKey(s string) bool { + if len(s) == 0 { + return false + } + + for _, c := range s { + if !validateKeyChar(c) { + return false + } + } + + return true +} + +func validateKeyChar(c int32) bool { + return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c] +} + +// validateValue checks if the string is a valid W3C Baggage value. +func validateValue(s string) bool { + for _, c := range s { + if !validateValueChar(c) { + return false + } + } + + return true +} + +var safeValueCharset = [utf8.RuneSelf]bool{ + '!': true, // 0x21 + + // 0x23 to 0x2b + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '(': true, + ')': true, + '*': true, + '+': true, + + // 0x2d to 0x3a + '-': true, + '.': true, + '/': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + ':': true, + + // 0x3c to 0x5b + '<': true, // 0x3C + '=': true, // 0x3D + '>': true, // 0x3E + '?': true, // 0x3F + '@': true, // 0x40 + 'A': true, // 0x41 + 'B': true, // 0x42 + 'C': true, // 0x43 + 'D': true, // 0x44 + 'E': true, // 0x45 + 'F': true, // 0x46 + 'G': true, // 0x47 + 'H': true, // 0x48 + 'I': true, // 0x49 + 'J': true, // 0x4A + 'K': true, // 0x4B + 'L': true, // 0x4C + 'M': true, // 0x4D + 'N': true, // 0x4E + 'O': true, // 0x4F + 'P': true, // 0x50 + 'Q': true, // 0x51 + 'R': true, // 0x52 + 'S': true, // 0x53 + 'T': true, // 0x54 + 'U': true, // 0x55 + 'V': true, // 0x56 + 'W': true, // 0x57 + 'X': true, // 0x58 + 'Y': true, // 0x59 + 'Z': true, // 0x5A + '[': true, // 0x5B + + // 0x5d to 0x7e + ']': true, // 0x5D + '^': true, // 0x5E + '_': true, // 0x5F + '`': true, // 0x60 + 'a': true, // 0x61 + 'b': true, // 0x62 + 'c': true, // 0x63 + 'd': true, // 0x64 + 'e': true, // 0x65 + 'f': true, // 0x66 + 'g': true, // 0x67 + 'h': true, // 0x68 + 'i': true, // 0x69 + 'j': true, // 0x6A + 'k': true, // 0x6B + 'l': true, // 0x6C + 'm': true, // 0x6D + 'n': true, // 0x6E + 'o': true, // 0x6F + 'p': true, // 0x70 + 'q': true, // 0x71 + 'r': true, // 0x72 + 's': true, // 0x73 + 't': true, // 0x74 + 'u': true, // 0x75 + 'v': true, // 0x76 + 'w': true, // 0x77 + 'x': true, // 0x78 + 'y': true, // 0x79 + 'z': true, // 0x7A + '{': true, // 0x7B + '|': true, // 0x7C + '}': true, // 0x7D + '~': true, // 0x7E +} + +func validateValueChar(c int32) bool { + return c >= 0 && c < int32(utf8.RuneSelf) && safeValueCharset[c] +} + +// valueEscape escapes the string so it can be safely placed inside a baggage value, +// replacing special characters with %XX sequences as needed. +// +// The implementation is based on: +// https://github.com/golang/go/blob/f6509cf5cdbb5787061b784973782933c47f1782/src/net/url/url.go#L285. +func valueEscape(s string) string { + hexCount := 0 + for i := 0; i < len(s); i++ { + c := s[i] + if shouldEscape(c) { + hexCount++ + } + } + + if hexCount == 0 { + return s + } + + var buf [64]byte + var t []byte + + required := len(s) + 2*hexCount + if required <= len(buf) { + t = buf[:required] + } else { + t = make([]byte, required) + } + + j := 0 + for i := 0; i < len(s); i++ { + c := s[i] + if shouldEscape(s[i]) { + const upperhex = "0123456789ABCDEF" + t[j] = '%' + t[j+1] = upperhex[c>>4] + t[j+2] = upperhex[c&15] + j += 3 + } else { + t[j] = c + j++ + } + } + + return string(t) +} + +// shouldEscape returns true if the specified byte should be escaped when +// appearing in a baggage value string. +func shouldEscape(c byte) bool { + if c == '%' { + // The percent character must be encoded so that percent-encoding can work. + return true + } + return !validateValueChar(int32(c)) +} diff --git a/vendor/go.opentelemetry.io/otel/baggage/context.go b/vendor/go.opentelemetry.io/otel/baggage/context.go new file mode 100644 index 000000000..a572461a0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/context.go @@ -0,0 +1,28 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package baggage // import "go.opentelemetry.io/otel/baggage" + +import ( + "context" + + "go.opentelemetry.io/otel/internal/baggage" +) + +// ContextWithBaggage returns a copy of parent with baggage. +func ContextWithBaggage(parent context.Context, b Baggage) context.Context { + // Delegate so any hooks for the OpenTracing bridge are handled. + return baggage.ContextWithList(parent, b.list) +} + +// ContextWithoutBaggage returns a copy of parent with no baggage. +func ContextWithoutBaggage(parent context.Context) context.Context { + // Delegate so any hooks for the OpenTracing bridge are handled. + return baggage.ContextWithList(parent, nil) +} + +// FromContext returns the baggage contained in ctx. +func FromContext(ctx context.Context) Baggage { + // Delegate so any hooks for the OpenTracing bridge are handled. + return Baggage{list: baggage.ListFromContext(ctx)} +} diff --git a/vendor/go.opentelemetry.io/otel/baggage/doc.go b/vendor/go.opentelemetry.io/otel/baggage/doc.go new file mode 100644 index 000000000..b51d87cab --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package baggage provides functionality for storing and retrieving +baggage items in Go context. For propagating the baggage, see the +go.opentelemetry.io/otel/propagation package. +*/ +package baggage // import "go.opentelemetry.io/otel/baggage" diff --git a/vendor/go.opentelemetry.io/otel/codes/README.md b/vendor/go.opentelemetry.io/otel/codes/README.md new file mode 100644 index 000000000..24c52b387 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/codes/README.md @@ -0,0 +1,3 @@ +# Codes + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/codes)](https://pkg.go.dev/go.opentelemetry.io/otel/codes) diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go new file mode 100644 index 000000000..2acbac354 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -0,0 +1,105 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package codes // import "go.opentelemetry.io/otel/codes" + +import ( + "encoding/json" + "fmt" + "strconv" +) + +const ( + // Unset is the default status code. + Unset Code = 0 + + // Error indicates the operation contains an error. + // + // NOTE: The error code in OTLP is 2. + // The value of this enum is only relevant to the internals + // of the Go SDK. + Error Code = 1 + + // Ok indicates operation has been validated by an Application developers + // or Operator to have completed successfully, or contain no error. + // + // NOTE: The Ok code in OTLP is 1. + // The value of this enum is only relevant to the internals + // of the Go SDK. + Ok Code = 2 + + maxCode = 3 +) + +// Code is an 32-bit representation of a status state. +type Code uint32 + +var codeToStr = map[Code]string{ + Unset: "Unset", + Error: "Error", + Ok: "Ok", +} + +var strToCode = map[string]Code{ + `"Unset"`: Unset, + `"Error"`: Error, + `"Ok"`: Ok, +} + +// String returns the Code as a string. +func (c Code) String() string { + return codeToStr[c] +} + +// UnmarshalJSON unmarshals b into the Code. +// +// This is based on the functionality in the gRPC codes package: +// https://github.com/grpc/grpc-go/blob/bb64fee312b46ebee26be43364a7a966033521b1/codes/codes.go#L218-L244 +func (c *Code) UnmarshalJSON(b []byte) error { + // From json.Unmarshaler: By convention, to approximate the behavior of + // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as + // a no-op. + if string(b) == "null" { + return nil + } + if c == nil { + return fmt.Errorf("nil receiver passed to UnmarshalJSON") + } + + var x interface{} + if err := json.Unmarshal(b, &x); err != nil { + return err + } + switch x.(type) { + case string: + if jc, ok := strToCode[string(b)]; ok { + *c = jc + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) + case float64: + if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { + if ci >= maxCode { + return fmt.Errorf("invalid code: %q", ci) + } + + *c = Code(ci) // nolint: gosec // Bit size of 32 check above. + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) + default: + return fmt.Errorf("invalid code: %q", string(b)) + } +} + +// MarshalJSON returns c as the JSON encoding of c. +func (c *Code) MarshalJSON() ([]byte, error) { + if c == nil { + return []byte("null"), nil + } + str, ok := codeToStr[*c] + if !ok { + return nil, fmt.Errorf("invalid code: %d", *c) + } + return []byte(fmt.Sprintf("%q", str)), nil +} diff --git a/vendor/go.opentelemetry.io/otel/codes/doc.go b/vendor/go.opentelemetry.io/otel/codes/doc.go new file mode 100644 index 000000000..ee8db448b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/codes/doc.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package codes defines the canonical error codes used by OpenTelemetry. + +It conforms to [the OpenTelemetry +specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/api.md#set-status). +*/ +package codes // import "go.opentelemetry.io/otel/codes" diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go new file mode 100644 index 000000000..921f85961 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/doc.go @@ -0,0 +1,25 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package otel provides global access to the OpenTelemetry API. The subpackages of +the otel package provide an implementation of the OpenTelemetry API. + +The provided API is used to instrument code and measure data about that code's +performance and operation. The measured data, by default, is not processed or +transmitted anywhere. An implementation of the OpenTelemetry SDK, like the +default SDK implementation (go.opentelemetry.io/otel/sdk), and associated +exporters are used to process and transport this data. + +To read the getting started guide, see https://opentelemetry.io/docs/languages/go/getting-started/. + +To read more about tracing, see go.opentelemetry.io/otel/trace. + +To read more about metrics, see go.opentelemetry.io/otel/metric. + +To read more about logs, see go.opentelemetry.io/otel/log. + +To read more about propagation, see go.opentelemetry.io/otel/propagation and +go.opentelemetry.io/otel/baggage. +*/ +package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/error_handler.go b/vendor/go.opentelemetry.io/otel/error_handler.go new file mode 100644 index 000000000..67414c71e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/error_handler.go @@ -0,0 +1,27 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otel // import "go.opentelemetry.io/otel" + +// ErrorHandler handles irremediable events. +type ErrorHandler interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Handle handles any error deemed irremediable by an OpenTelemetry + // component. + Handle(error) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// ErrorHandlerFunc is a convenience adapter to allow the use of a function +// as an ErrorHandler. +type ErrorHandlerFunc func(error) + +var _ ErrorHandler = ErrorHandlerFunc(nil) + +// Handle handles the irremediable error by calling the ErrorHandlerFunc itself. +func (f ErrorHandlerFunc) Handle(err error) { + f(err) +} diff --git a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh new file mode 100644 index 000000000..93e80ea30 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +top_dir='.' +if [[ $# -gt 0 ]]; then + top_dir="${1}" +fi + +p=$(pwd) +mod_dirs=() + +# Note `mapfile` does not exist in older bash versions: +# https://stackoverflow.com/questions/41475261/need-alternative-to-readarray-mapfile-for-script-on-older-version-of-bash + +while IFS= read -r line; do + mod_dirs+=("$line") +done < <(find "${top_dir}" -type f -name 'go.mod' -exec dirname {} \; | sort) + +for mod_dir in "${mod_dirs[@]}"; do + cd "${mod_dir}" + + while IFS= read -r line; do + echo ".${line#${p}}" + done < <(go list --find -f '{{.Name}}|{{.Dir}}' ./... | grep '^main|' | cut -f 2- -d '|') + cd "${p}" +done diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go new file mode 100644 index 000000000..07623b679 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/handler.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" +) + +// Compile-time check global.ErrDelegator implements ErrorHandler. +var _ ErrorHandler = (*global.ErrDelegator)(nil) + +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. +func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() } + +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. +func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) } + +// Handle is a convenience function for GetErrorHandler().Handle(err). +func Handle(err error) { global.GetErrorHandler().Handle(err) } diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go new file mode 100644 index 000000000..822d84794 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -0,0 +1,100 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package attribute provide several helper functions for some commonly used +logic of processing attributes. +*/ +package attribute // import "go.opentelemetry.io/otel/internal/attribute" + +import ( + "reflect" +) + +// BoolSliceValue converts a bool slice into an array with same elements as slice. +func BoolSliceValue(v []bool) interface{} { + var zero bool + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() +} + +// Int64SliceValue converts an int64 slice into an array with same elements as slice. +func Int64SliceValue(v []int64) interface{} { + var zero int64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() +} + +// Float64SliceValue converts a float64 slice into an array with same elements as slice. +func Float64SliceValue(v []float64) interface{} { + var zero float64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() +} + +// StringSliceValue converts a string slice into an array with same elements as slice. +func StringSliceValue(v []string) interface{} { + var zero string + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() +} + +// AsBoolSlice converts a bool array into a slice into with same elements as array. +func AsBoolSlice(v interface{}) []bool { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero bool + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]bool) +} + +// AsInt64Slice converts an int64 array into a slice into with same elements as array. +func AsInt64Slice(v interface{}) []int64 { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero int64 + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]int64) +} + +// AsFloat64Slice converts a float64 array into a slice into with same elements as array. +func AsFloat64Slice(v interface{}) []float64 { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero float64 + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]float64) +} + +// AsStringSlice converts a string array into a slice into with same elements as array. +func AsStringSlice(v interface{}) []string { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero string + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]string) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go new file mode 100644 index 000000000..b4f85f44a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go @@ -0,0 +1,32 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package baggage provides base types and functionality to store and retrieve +baggage in Go context. This package exists because the OpenTracing bridge to +OpenTelemetry needs to synchronize state whenever baggage for a context is +modified and that context contains an OpenTracing span. If it were not for +this need this package would not need to exist and the +`go.opentelemetry.io/otel/baggage` package would be the singular place where +W3C baggage is handled. +*/ +package baggage // import "go.opentelemetry.io/otel/internal/baggage" + +// List is the collection of baggage members. The W3C allows for duplicates, +// but OpenTelemetry does not, therefore, this is represented as a map. +type List map[string]Item + +// Item is the value and metadata properties part of a list-member. +type Item struct { + Value string + Properties []Property +} + +// Property is a metadata entry for a list-member. +type Property struct { + Key, Value string + + // HasValue indicates if a zero-value value means the property does not + // have a value or if it was the zero-value. + HasValue bool +} diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go new file mode 100644 index 000000000..3aea9c491 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go @@ -0,0 +1,81 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package baggage // import "go.opentelemetry.io/otel/internal/baggage" + +import "context" + +type baggageContextKeyType int + +const baggageKey baggageContextKeyType = iota + +// SetHookFunc is a callback called when storing baggage in the context. +type SetHookFunc func(context.Context, List) context.Context + +// GetHookFunc is a callback called when getting baggage from the context. +type GetHookFunc func(context.Context, List) List + +type baggageState struct { + list List + + setHook SetHookFunc + getHook GetHookFunc +} + +// ContextWithSetHook returns a copy of parent with hook configured to be +// invoked every time ContextWithBaggage is called. +// +// Passing nil SetHookFunc creates a context with no set hook to call. +func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.setHook = hook + return context.WithValue(parent, baggageKey, s) +} + +// ContextWithGetHook returns a copy of parent with hook configured to be +// invoked every time FromContext is called. +// +// Passing nil GetHookFunc creates a context with no get hook to call. +func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.getHook = hook + return context.WithValue(parent, baggageKey, s) +} + +// ContextWithList returns a copy of parent with baggage. Passing nil list +// returns a context without any baggage. +func ContextWithList(parent context.Context, list List) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.list = list + ctx := context.WithValue(parent, baggageKey, s) + if s.setHook != nil { + ctx = s.setHook(ctx, list) + } + + return ctx +} + +// ListFromContext returns the baggage contained in ctx. +func ListFromContext(ctx context.Context) List { + switch v := ctx.Value(baggageKey).(type) { + case baggageState: + if v.getHook != nil { + return v.getHook(ctx, v.list) + } + return v.list + default: + return nil + } +} diff --git a/vendor/go.opentelemetry.io/otel/internal/gen.go b/vendor/go.opentelemetry.io/otel/internal/gen.go new file mode 100644 index 000000000..4259f0320 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/gen.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/internal" + +//go:generate gotmpl --body=./shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go +//go:generate gotmpl --body=./shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go +//go:generate gotmpl --body=./shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go + +//go:generate gotmpl --body=./shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go +//go:generate gotmpl --body=./shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go +//go:generate gotmpl --body=./shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go +//go:generate gotmpl --body=./shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go +//go:generate gotmpl --body=./shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/internal/matchers\"}" --out=internaltest/harness.go +//go:generate gotmpl --body=./shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go +//go:generate gotmpl --body=./shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go +//go:generate gotmpl --body=./shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go +//go:generate gotmpl --body=./shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go diff --git a/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/vendor/go.opentelemetry.io/otel/internal/global/handler.go new file mode 100644 index 000000000..c657ff8e7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/handler.go @@ -0,0 +1,36 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "log" + "sync/atomic" +) + +// ErrorHandler handles irremediable events. +type ErrorHandler interface { + // Handle handles any error deemed irremediable by an OpenTelemetry + // component. + Handle(error) +} + +type ErrDelegator struct { + delegate atomic.Pointer[ErrorHandler] +} + +// Compile-time check that delegator implements ErrorHandler. +var _ ErrorHandler = (*ErrDelegator)(nil) + +func (d *ErrDelegator) Handle(err error) { + if eh := d.delegate.Load(); eh != nil { + (*eh).Handle(err) + return + } + log.Print(err) +} + +// setDelegate sets the ErrorHandler delegate. +func (d *ErrDelegator) setDelegate(eh ErrorHandler) { + d.delegate.Store(&eh) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go new file mode 100644 index 000000000..3a0cc42f6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -0,0 +1,412 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "context" + "sync/atomic" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" +) + +// unwrapper unwraps to return the underlying instrument implementation. +type unwrapper interface { + Unwrap() metric.Observable +} + +type afCounter struct { + embedded.Float64ObservableCounter + metric.Float64Observable + + name string + opts []metric.Float64ObservableCounterOption + + delegate atomic.Value // metric.Float64ObservableCounter +} + +var ( + _ unwrapper = (*afCounter)(nil) + _ metric.Float64ObservableCounter = (*afCounter)(nil) +) + +func (i *afCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Float64ObservableCounter) + } + return nil +} + +type afUpDownCounter struct { + embedded.Float64ObservableUpDownCounter + metric.Float64Observable + + name string + opts []metric.Float64ObservableUpDownCounterOption + + delegate atomic.Value // metric.Float64ObservableUpDownCounter +} + +var ( + _ unwrapper = (*afUpDownCounter)(nil) + _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil) +) + +func (i *afUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afUpDownCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Float64ObservableUpDownCounter) + } + return nil +} + +type afGauge struct { + embedded.Float64ObservableGauge + metric.Float64Observable + + name string + opts []metric.Float64ObservableGaugeOption + + delegate atomic.Value // metric.Float64ObservableGauge +} + +var ( + _ unwrapper = (*afGauge)(nil) + _ metric.Float64ObservableGauge = (*afGauge)(nil) +) + +func (i *afGauge) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableGauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afGauge) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Float64ObservableGauge) + } + return nil +} + +type aiCounter struct { + embedded.Int64ObservableCounter + metric.Int64Observable + + name string + opts []metric.Int64ObservableCounterOption + + delegate atomic.Value // metric.Int64ObservableCounter +} + +var ( + _ unwrapper = (*aiCounter)(nil) + _ metric.Int64ObservableCounter = (*aiCounter)(nil) +) + +func (i *aiCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Int64ObservableCounter) + } + return nil +} + +type aiUpDownCounter struct { + embedded.Int64ObservableUpDownCounter + metric.Int64Observable + + name string + opts []metric.Int64ObservableUpDownCounterOption + + delegate atomic.Value // metric.Int64ObservableUpDownCounter +} + +var ( + _ unwrapper = (*aiUpDownCounter)(nil) + _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil) +) + +func (i *aiUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiUpDownCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Int64ObservableUpDownCounter) + } + return nil +} + +type aiGauge struct { + embedded.Int64ObservableGauge + metric.Int64Observable + + name string + opts []metric.Int64ObservableGaugeOption + + delegate atomic.Value // metric.Int64ObservableGauge +} + +var ( + _ unwrapper = (*aiGauge)(nil) + _ metric.Int64ObservableGauge = (*aiGauge)(nil) +) + +func (i *aiGauge) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableGauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiGauge) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Int64ObservableGauge) + } + return nil +} + +// Sync Instruments. +type sfCounter struct { + embedded.Float64Counter + + name string + opts []metric.Float64CounterOption + + delegate atomic.Value // metric.Float64Counter +} + +var _ metric.Float64Counter = (*sfCounter)(nil) + +func (i *sfCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64Counter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64Counter).Add(ctx, incr, opts...) + } +} + +type sfUpDownCounter struct { + embedded.Float64UpDownCounter + + name string + opts []metric.Float64UpDownCounterOption + + delegate atomic.Value // metric.Float64UpDownCounter +} + +var _ metric.Float64UpDownCounter = (*sfUpDownCounter)(nil) + +func (i *sfUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64UpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64UpDownCounter).Add(ctx, incr, opts...) + } +} + +type sfHistogram struct { + embedded.Float64Histogram + + name string + opts []metric.Float64HistogramOption + + delegate atomic.Value // metric.Float64Histogram +} + +var _ metric.Float64Histogram = (*sfHistogram)(nil) + +func (i *sfHistogram) setDelegate(m metric.Meter) { + ctr, err := m.Float64Histogram(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64Histogram).Record(ctx, x, opts...) + } +} + +type sfGauge struct { + embedded.Float64Gauge + + name string + opts []metric.Float64GaugeOption + + delegate atomic.Value // metric.Float64Gauge +} + +var _ metric.Float64Gauge = (*sfGauge)(nil) + +func (i *sfGauge) setDelegate(m metric.Meter) { + ctr, err := m.Float64Gauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfGauge) Record(ctx context.Context, x float64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64Gauge).Record(ctx, x, opts...) + } +} + +type siCounter struct { + embedded.Int64Counter + + name string + opts []metric.Int64CounterOption + + delegate atomic.Value // metric.Int64Counter +} + +var _ metric.Int64Counter = (*siCounter)(nil) + +func (i *siCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64Counter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64Counter).Add(ctx, x, opts...) + } +} + +type siUpDownCounter struct { + embedded.Int64UpDownCounter + + name string + opts []metric.Int64UpDownCounterOption + + delegate atomic.Value // metric.Int64UpDownCounter +} + +var _ metric.Int64UpDownCounter = (*siUpDownCounter)(nil) + +func (i *siUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64UpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siUpDownCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64UpDownCounter).Add(ctx, x, opts...) + } +} + +type siHistogram struct { + embedded.Int64Histogram + + name string + opts []metric.Int64HistogramOption + + delegate atomic.Value // metric.Int64Histogram +} + +var _ metric.Int64Histogram = (*siHistogram)(nil) + +func (i *siHistogram) setDelegate(m metric.Meter) { + ctr, err := m.Int64Histogram(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64Histogram).Record(ctx, x, opts...) + } +} + +type siGauge struct { + embedded.Int64Gauge + + name string + opts []metric.Int64GaugeOption + + delegate atomic.Value // metric.Int64Gauge +} + +var _ metric.Int64Gauge = (*siGauge)(nil) + +func (i *siGauge) setDelegate(m metric.Meter) { + ctr, err := m.Int64Gauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siGauge) Record(ctx context.Context, x int64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64Gauge).Record(ctx, x, opts...) + } +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go new file mode 100644 index 000000000..adbca7d34 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go @@ -0,0 +1,62 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "log" + "os" + "sync/atomic" + + "github.com/go-logr/logr" + "github.com/go-logr/stdr" +) + +// globalLogger holds a reference to the [logr.Logger] used within +// go.opentelemetry.io/otel. +// +// The default logger uses stdr which is backed by the standard `log.Logger` +// interface. This logger will only show messages at the Error Level. +var globalLogger = func() *atomic.Pointer[logr.Logger] { + l := stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)) + + p := new(atomic.Pointer[logr.Logger]) + p.Store(&l) + return p +}() + +// SetLogger sets the global Logger to l. +// +// To see Warn messages use a logger with `l.V(1).Enabled() == true` +// To see Info messages use a logger with `l.V(4).Enabled() == true` +// To see Debug messages use a logger with `l.V(8).Enabled() == true`. +func SetLogger(l logr.Logger) { + globalLogger.Store(&l) +} + +// GetLogger returns the global logger. +func GetLogger() logr.Logger { + return *globalLogger.Load() +} + +// Info prints messages about the general state of the API or SDK. +// This should usually be less than 5 messages a minute. +func Info(msg string, keysAndValues ...interface{}) { + GetLogger().V(4).Info(msg, keysAndValues...) +} + +// Error prints messages about exceptional states of the API or SDK. +func Error(err error, msg string, keysAndValues ...interface{}) { + GetLogger().Error(err, msg, keysAndValues...) +} + +// Debug prints messages about all internal changes in the API or SDK. +func Debug(msg string, keysAndValues ...interface{}) { + GetLogger().V(8).Info(msg, keysAndValues...) +} + +// Warn prints messages about warnings in the API or SDK. +// Not an error but is likely more important than an informational event. +func Warn(msg string, keysAndValues ...interface{}) { + GetLogger().V(1).Info(msg, keysAndValues...) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go new file mode 100644 index 000000000..f2fc3929b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -0,0 +1,506 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "container/list" + "reflect" + "sync" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" +) + +// meterProvider is a placeholder for a configured SDK MeterProvider. +// +// All MeterProvider functionality is forwarded to a delegate once +// configured. +type meterProvider struct { + embedded.MeterProvider + + mtx sync.Mutex + meters map[il]*meter + + delegate metric.MeterProvider +} + +// setDelegate configures p to delegate all MeterProvider functionality to +// provider. +// +// All Meters provided prior to this function call are switched out to be +// Meters provided by provider. All instruments and callbacks are recreated and +// delegated. +// +// It is guaranteed by the caller that this happens only once. +func (p *meterProvider) setDelegate(provider metric.MeterProvider) { + p.mtx.Lock() + defer p.mtx.Unlock() + + p.delegate = provider + + if len(p.meters) == 0 { + return + } + + for _, meter := range p.meters { + meter.setDelegate(provider) + } + + p.meters = nil +} + +// Meter implements MeterProvider. +func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter { + p.mtx.Lock() + defer p.mtx.Unlock() + + if p.delegate != nil { + return p.delegate.Meter(name, opts...) + } + + // At this moment it is guaranteed that no sdk is installed, save the meter in the meters map. + + c := metric.NewMeterConfig(opts...) + key := il{ + name: name, + version: c.InstrumentationVersion(), + schema: c.SchemaURL(), + } + + if p.meters == nil { + p.meters = make(map[il]*meter) + } + + if val, ok := p.meters[key]; ok { + return val + } + + t := &meter{name: name, opts: opts, instruments: make(map[instID]delegatedInstrument)} + p.meters[key] = t + return t +} + +// meter is a placeholder for a metric.Meter. +// +// All Meter functionality is forwarded to a delegate once configured. +// Otherwise, all functionality is forwarded to a NoopMeter. +type meter struct { + embedded.Meter + + name string + opts []metric.MeterOption + + mtx sync.Mutex + instruments map[instID]delegatedInstrument + + registry list.List + + delegate metric.Meter +} + +type delegatedInstrument interface { + setDelegate(metric.Meter) +} + +// instID are the identifying properties of a instrument. +type instID struct { + // name is the name of the stream. + name string + // description is the description of the stream. + description string + // kind defines the functional group of the instrument. + kind reflect.Type + // unit is the unit of the stream. + unit string +} + +// setDelegate configures m to delegate all Meter functionality to Meters +// created by provider. +// +// All subsequent calls to the Meter methods will be passed to the delegate. +// +// It is guaranteed by the caller that this happens only once. +func (m *meter) setDelegate(provider metric.MeterProvider) { + m.mtx.Lock() + defer m.mtx.Unlock() + + meter := provider.Meter(m.name, m.opts...) + m.delegate = meter + + for _, inst := range m.instruments { + inst.setDelegate(meter) + } + + var n *list.Element + for e := m.registry.Front(); e != nil; e = n { + r := e.Value.(*registration) + r.setDelegate(meter) + n = e.Next() + m.registry.Remove(e) + } + + m.instruments = nil + m.registry.Init() +} + +func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Counter(name, options...) + } + + i := &siCounter{name: name, opts: options} + cfg := metric.NewInt64CounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf(i), + description: cfg.Description(), + unit: cfg.Unit(), + } + m.instruments[id] = i + return i, nil +} + +func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64UpDownCounter(name, options...) + } + + i := &siUpDownCounter{name: name, opts: options} + cfg := metric.NewInt64UpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf(i), + description: cfg.Description(), + unit: cfg.Unit(), + } + m.instruments[id] = i + return i, nil +} + +func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Histogram(name, options...) + } + + i := &siHistogram{name: name, opts: options} + cfg := metric.NewInt64HistogramConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf(i), + description: cfg.Description(), + unit: cfg.Unit(), + } + m.instruments[id] = i + return i, nil +} + +func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Gauge(name, options...) + } + + i := &siGauge{name: name, opts: options} + cfg := metric.NewInt64GaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf(i), + description: cfg.Description(), + unit: cfg.Unit(), + } + m.instruments[id] = i + return i, nil +} + +func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableCounter(name, options...) + } + + i := &aiCounter{name: name, opts: options} + cfg := metric.NewInt64ObservableCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf(i), + description: cfg.Description(), + unit: cfg.Unit(), + } + m.instruments[id] = i + return i, nil +} + +func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableUpDownCounter(name, options...) + } + + i := &aiUpDownCounter{name: name, opts: options} + cfg := metric.NewInt64ObservableUpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf(i), + description: cfg.Description(), + unit: cfg.Unit(), + } + m.instruments[id] = i + return i, nil +} + +func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableGauge(name, options...) + } + + i := &aiGauge{name: name, opts: options} + cfg := metric.NewInt64ObservableGaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf(i), + description: cfg.Description(), + unit: cfg.Unit(), + } + m.instruments[id] = i + return i, nil +} + +func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Counter(name, options...) + } + + i := &sfCounter{name: name, opts: options} + cfg := metric.NewFloat64CounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf(i), + description: cfg.Description(), + unit: cfg.Unit(), + } + m.instruments[id] = i + return i, nil +} + +func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64UpDownCounter(name, options...) + } + + i := &sfUpDownCounter{name: name, opts: options} + cfg := metric.NewFloat64UpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf(i), + description: cfg.Description(), + unit: cfg.Unit(), + } + m.instruments[id] = i + return i, nil +} + +func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Histogram(name, options...) + } + + i := &sfHistogram{name: name, opts: options} + cfg := metric.NewFloat64HistogramConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf(i), + description: cfg.Description(), + unit: cfg.Unit(), + } + m.instruments[id] = i + return i, nil +} + +func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Gauge(name, options...) + } + + i := &sfGauge{name: name, opts: options} + cfg := metric.NewFloat64GaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf(i), + description: cfg.Description(), + unit: cfg.Unit(), + } + m.instruments[id] = i + return i, nil +} + +func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableCounter(name, options...) + } + + i := &afCounter{name: name, opts: options} + cfg := metric.NewFloat64ObservableCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf(i), + description: cfg.Description(), + unit: cfg.Unit(), + } + m.instruments[id] = i + return i, nil +} + +func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableUpDownCounter(name, options...) + } + + i := &afUpDownCounter{name: name, opts: options} + cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf(i), + description: cfg.Description(), + unit: cfg.Unit(), + } + m.instruments[id] = i + return i, nil +} + +func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableGauge(name, options...) + } + + i := &afGauge{name: name, opts: options} + cfg := metric.NewFloat64ObservableGaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf(i), + description: cfg.Description(), + unit: cfg.Unit(), + } + m.instruments[id] = i + return i, nil +} + +// RegisterCallback captures the function that will be called during Collect. +func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + insts = unwrapInstruments(insts) + return m.delegate.RegisterCallback(f, insts...) + } + + reg := ®istration{instruments: insts, function: f} + e := m.registry.PushBack(reg) + reg.unreg = func() error { + m.mtx.Lock() + _ = m.registry.Remove(e) + m.mtx.Unlock() + return nil + } + return reg, nil +} + +type wrapped interface { + unwrap() metric.Observable +} + +func unwrapInstruments(instruments []metric.Observable) []metric.Observable { + out := make([]metric.Observable, 0, len(instruments)) + + for _, inst := range instruments { + if in, ok := inst.(wrapped); ok { + out = append(out, in.unwrap()) + } else { + out = append(out, inst) + } + } + + return out +} + +type registration struct { + embedded.Registration + + instruments []metric.Observable + function metric.Callback + + unreg func() error + unregMu sync.Mutex +} + +func (c *registration) setDelegate(m metric.Meter) { + insts := unwrapInstruments(c.instruments) + + c.unregMu.Lock() + defer c.unregMu.Unlock() + + if c.unreg == nil { + // Unregister already called. + return + } + + reg, err := m.RegisterCallback(c.function, insts...) + if err != nil { + GetErrorHandler().Handle(err) + } + + c.unreg = reg.Unregister +} + +func (c *registration) Unregister() error { + c.unregMu.Lock() + defer c.unregMu.Unlock() + if c.unreg == nil { + // Unregister already called. + return nil + } + + var err error + err, c.unreg = c.unreg(), nil + return err +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go new file mode 100644 index 000000000..38560ff99 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go @@ -0,0 +1,71 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/propagation" +) + +// textMapPropagator is a default TextMapPropagator that delegates calls to a +// registered delegate if one is set, otherwise it defaults to delegating the +// calls to a the default no-op propagation.TextMapPropagator. +type textMapPropagator struct { + mtx sync.Mutex + once sync.Once + delegate propagation.TextMapPropagator + noop propagation.TextMapPropagator +} + +// Compile-time guarantee that textMapPropagator implements the +// propagation.TextMapPropagator interface. +var _ propagation.TextMapPropagator = (*textMapPropagator)(nil) + +func newTextMapPropagator() *textMapPropagator { + return &textMapPropagator{ + noop: propagation.NewCompositeTextMapPropagator(), + } +} + +// SetDelegate sets a delegate propagation.TextMapPropagator that all calls are +// forwarded to. Delegation can only be performed once, all subsequent calls +// perform no delegation. +func (p *textMapPropagator) SetDelegate(delegate propagation.TextMapPropagator) { + if delegate == nil { + return + } + + p.mtx.Lock() + p.once.Do(func() { p.delegate = delegate }) + p.mtx.Unlock() +} + +// effectiveDelegate returns the current delegate of p if one is set, +// otherwise the default noop TextMapPropagator is returned. This method +// can be called concurrently. +func (p *textMapPropagator) effectiveDelegate() propagation.TextMapPropagator { + p.mtx.Lock() + defer p.mtx.Unlock() + if p.delegate != nil { + return p.delegate + } + return p.noop +} + +// Inject set cross-cutting concerns from the Context into the carrier. +func (p *textMapPropagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { + p.effectiveDelegate().Inject(ctx, carrier) +} + +// Extract reads cross-cutting concerns from the carrier into a Context. +func (p *textMapPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { + return p.effectiveDelegate().Extract(ctx, carrier) +} + +// Fields returns the keys whose values are set with Inject. +func (p *textMapPropagator) Fields() []string { + return p.effectiveDelegate().Fields() +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go new file mode 100644 index 000000000..204ea142a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/state.go @@ -0,0 +1,199 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "errors" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +type ( + errorHandlerHolder struct { + eh ErrorHandler + } + + tracerProviderHolder struct { + tp trace.TracerProvider + } + + propagatorsHolder struct { + tm propagation.TextMapPropagator + } + + meterProviderHolder struct { + mp metric.MeterProvider + } +) + +var ( + globalErrorHandler = defaultErrorHandler() + globalTracer = defaultTracerValue() + globalPropagators = defaultPropagatorsValue() + globalMeterProvider = defaultMeterProvider() + + delegateErrorHandlerOnce sync.Once + delegateTraceOnce sync.Once + delegateTextMapPropagatorOnce sync.Once + delegateMeterOnce sync.Once +) + +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. +func GetErrorHandler() ErrorHandler { + return globalErrorHandler.Load().(errorHandlerHolder).eh +} + +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. +func SetErrorHandler(h ErrorHandler) { + current := GetErrorHandler() + + if _, cOk := current.(*ErrDelegator); cOk { + if _, ehOk := h.(*ErrDelegator); ehOk && current == h { + // Do not assign to the delegate of the default ErrDelegator to be + // itself. + Error( + errors.New("no ErrorHandler delegate configured"), + "ErrorHandler remains its current value.", + ) + return + } + } + + delegateErrorHandlerOnce.Do(func() { + if def, ok := current.(*ErrDelegator); ok { + def.setDelegate(h) + } + }) + globalErrorHandler.Store(errorHandlerHolder{eh: h}) +} + +// TracerProvider is the internal implementation for global.TracerProvider. +func TracerProvider() trace.TracerProvider { + return globalTracer.Load().(tracerProviderHolder).tp +} + +// SetTracerProvider is the internal implementation for global.SetTracerProvider. +func SetTracerProvider(tp trace.TracerProvider) { + current := TracerProvider() + + if _, cOk := current.(*tracerProvider); cOk { + if _, tpOk := tp.(*tracerProvider); tpOk && current == tp { + // Do not assign the default delegating TracerProvider to delegate + // to itself. + Error( + errors.New("no delegate configured in tracer provider"), + "Setting tracer provider to its current value. No delegate will be configured", + ) + return + } + } + + delegateTraceOnce.Do(func() { + if def, ok := current.(*tracerProvider); ok { + def.setDelegate(tp) + } + }) + globalTracer.Store(tracerProviderHolder{tp: tp}) +} + +// TextMapPropagator is the internal implementation for global.TextMapPropagator. +func TextMapPropagator() propagation.TextMapPropagator { + return globalPropagators.Load().(propagatorsHolder).tm +} + +// SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator. +func SetTextMapPropagator(p propagation.TextMapPropagator) { + current := TextMapPropagator() + + if _, cOk := current.(*textMapPropagator); cOk { + if _, pOk := p.(*textMapPropagator); pOk && current == p { + // Do not assign the default delegating TextMapPropagator to + // delegate to itself. + Error( + errors.New("no delegate configured in text map propagator"), + "Setting text map propagator to its current value. No delegate will be configured", + ) + return + } + } + + // For the textMapPropagator already returned by TextMapPropagator + // delegate to p. + delegateTextMapPropagatorOnce.Do(func() { + if def, ok := current.(*textMapPropagator); ok { + def.SetDelegate(p) + } + }) + // Return p when subsequent calls to TextMapPropagator are made. + globalPropagators.Store(propagatorsHolder{tm: p}) +} + +// MeterProvider is the internal implementation for global.MeterProvider. +func MeterProvider() metric.MeterProvider { + return globalMeterProvider.Load().(meterProviderHolder).mp +} + +// SetMeterProvider is the internal implementation for global.SetMeterProvider. +func SetMeterProvider(mp metric.MeterProvider) { + current := MeterProvider() + if _, cOk := current.(*meterProvider); cOk { + if _, mpOk := mp.(*meterProvider); mpOk && current == mp { + // Do not assign the default delegating MeterProvider to delegate + // to itself. + Error( + errors.New("no delegate configured in meter provider"), + "Setting meter provider to its current value. No delegate will be configured", + ) + return + } + } + + delegateMeterOnce.Do(func() { + if def, ok := current.(*meterProvider); ok { + def.setDelegate(mp) + } + }) + globalMeterProvider.Store(meterProviderHolder{mp: mp}) +} + +func defaultErrorHandler() *atomic.Value { + v := &atomic.Value{} + v.Store(errorHandlerHolder{eh: &ErrDelegator{}}) + return v +} + +func defaultTracerValue() *atomic.Value { + v := &atomic.Value{} + v.Store(tracerProviderHolder{tp: &tracerProvider{}}) + return v +} + +func defaultPropagatorsValue() *atomic.Value { + v := &atomic.Value{} + v.Store(propagatorsHolder{tm: newTextMapPropagator()}) + return v +} + +func defaultMeterProvider() *atomic.Value { + v := &atomic.Value{} + v.Store(meterProviderHolder{mp: &meterProvider{}}) + return v +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go new file mode 100644 index 000000000..e31f442b4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/internal/global" + +/* +This file contains the forwarding implementation of the TracerProvider used as +the default global instance. Prior to initialization of an SDK, Tracers +returned by the global TracerProvider will provide no-op functionality. This +means that all Span created prior to initialization are no-op Spans. + +Once an SDK has been initialized, all provided no-op Tracers are swapped for +Tracers provided by the SDK defined TracerProvider. However, any Span started +prior to this initialization does not change its behavior. Meaning, the Span +remains a no-op Span. + +The implementation to track and swap Tracers locks all new Tracer creation +until the swap is complete. This assumes that this operation is not +performance-critical. If that assumption is incorrect, be sure to configure an +SDK prior to any Tracer creation. +*/ + +import ( + "context" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" +) + +// tracerProvider is a placeholder for a configured SDK TracerProvider. +// +// All TracerProvider functionality is forwarded to a delegate once +// configured. +type tracerProvider struct { + embedded.TracerProvider + + mtx sync.Mutex + tracers map[il]*tracer + delegate trace.TracerProvider +} + +// Compile-time guarantee that tracerProvider implements the TracerProvider +// interface. +var _ trace.TracerProvider = &tracerProvider{} + +// setDelegate configures p to delegate all TracerProvider functionality to +// provider. +// +// All Tracers provided prior to this function call are switched out to be +// Tracers provided by provider. +// +// It is guaranteed by the caller that this happens only once. +func (p *tracerProvider) setDelegate(provider trace.TracerProvider) { + p.mtx.Lock() + defer p.mtx.Unlock() + + p.delegate = provider + + if len(p.tracers) == 0 { + return + } + + for _, t := range p.tracers { + t.setDelegate(provider) + } + + p.tracers = nil +} + +// Tracer implements TracerProvider. +func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + p.mtx.Lock() + defer p.mtx.Unlock() + + if p.delegate != nil { + return p.delegate.Tracer(name, opts...) + } + + // At this moment it is guaranteed that no sdk is installed, save the tracer in the tracers map. + + c := trace.NewTracerConfig(opts...) + key := il{ + name: name, + version: c.InstrumentationVersion(), + schema: c.SchemaURL(), + } + + if p.tracers == nil { + p.tracers = make(map[il]*tracer) + } + + if val, ok := p.tracers[key]; ok { + return val + } + + t := &tracer{name: name, opts: opts, provider: p} + p.tracers[key] = t + return t +} + +type il struct{ name, version, schema string } + +// tracer is a placeholder for a trace.Tracer. +// +// All Tracer functionality is forwarded to a delegate once configured. +// Otherwise, all functionality is forwarded to a NoopTracer. +type tracer struct { + embedded.Tracer + + name string + opts []trace.TracerOption + provider *tracerProvider + + delegate atomic.Value +} + +// Compile-time guarantee that tracer implements the trace.Tracer interface. +var _ trace.Tracer = &tracer{} + +// setDelegate configures t to delegate all Tracer functionality to Tracers +// created by provider. +// +// All subsequent calls to the Tracer methods will be passed to the delegate. +// +// It is guaranteed by the caller that this happens only once. +func (t *tracer) setDelegate(provider trace.TracerProvider) { + t.delegate.Store(provider.Tracer(t.name, t.opts...)) +} + +// Start implements trace.Tracer by forwarding the call to t.delegate if +// set, otherwise it forwards the call to a NoopTracer. +func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + delegate := t.delegate.Load() + if delegate != nil { + return delegate.(trace.Tracer).Start(ctx, name, opts...) + } + + s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t} + ctx = trace.ContextWithSpan(ctx, s) + return ctx, s +} + +// nonRecordingSpan is a minimal implementation of a Span that wraps a +// SpanContext. It performs no operations other than to return the wrapped +// SpanContext. +type nonRecordingSpan struct { + embedded.Span + + sc trace.SpanContext + tracer *tracer +} + +var _ trace.Span = nonRecordingSpan{} + +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc } + +// IsRecording always returns false. +func (nonRecordingSpan) IsRecording() bool { return false } + +// SetStatus does nothing. +func (nonRecordingSpan) SetStatus(codes.Code, string) {} + +// SetError does nothing. +func (nonRecordingSpan) SetError(bool) {} + +// SetAttributes does nothing. +func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (nonRecordingSpan) End(...trace.SpanEndOption) {} + +// RecordError does nothing. +func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} + +// AddEvent does nothing. +func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} + +// AddLink does nothing. +func (nonRecordingSpan) AddLink(trace.Link) {} + +// SetName does nothing. +func (nonRecordingSpan) SetName(string) {} + +func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider } diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go new file mode 100644 index 000000000..9b1da2c02 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/internal" + +import ( + "math" + "unsafe" +) + +func BoolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag. + if b { + return 1 + } + return 0 +} + +func RawToBool(r uint64) bool { + return r != 0 +} + +func Int64ToRaw(i int64) uint64 { + return uint64(i) +} + +func RawToInt64(r uint64) int64 { + // Assumes original was a valid int64 (overflow not checked). + return int64(r) // nolint: gosec +} + +func Float64ToRaw(f float64) uint64 { + return math.Float64bits(f) +} + +func RawToFloat64(r uint64) float64 { + return math.Float64frombits(r) +} + +func RawPtrToFloat64Ptr(r *uint64) *float64 { + // Assumes original was a valid *float64 (overflow not checked). + return (*float64)(unsafe.Pointer(r)) // nolint: gosec +} + +func RawPtrToInt64Ptr(r *uint64) *int64 { + // Assumes original was a valid *int64 (overflow not checked). + return (*int64)(unsafe.Pointer(r)) // nolint: gosec +} diff --git a/vendor/go.opentelemetry.io/otel/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal_logging.go new file mode 100644 index 000000000..6de7f2e4d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal_logging.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otel // import "go.opentelemetry.io/otel" + +import ( + "github.com/go-logr/logr" + + "go.opentelemetry.io/otel/internal/global" +) + +// SetLogger configures the logger used internally to opentelemetry. +func SetLogger(logger logr.Logger) { + global.SetLogger(logger) +} diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go new file mode 100644 index 000000000..1e6473b32 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" +) + +// Meter returns a Meter from the global MeterProvider. The name must be the +// name of the library providing instrumentation. This name may be the same as +// the instrumented code only if that code provides built-in instrumentation. +// If the name is empty, then a implementation defined default name will be +// used instead. +// +// If this is called before a global MeterProvider is registered the returned +// Meter will be a No-op implementation of a Meter. When a global MeterProvider +// is registered for the first time, the returned Meter, and all the +// instruments it has created or will create, are recreated automatically from +// the new MeterProvider. +// +// This is short for GetMeterProvider().Meter(name). +func Meter(name string, opts ...metric.MeterOption) metric.Meter { + return GetMeterProvider().Meter(name, opts...) +} + +// GetMeterProvider returns the registered global meter provider. +// +// If no global GetMeterProvider has been registered, a No-op GetMeterProvider +// implementation is returned. When a global GetMeterProvider is registered for +// the first time, the returned GetMeterProvider, and all the Meters it has +// created or will create, are recreated automatically from the new +// GetMeterProvider. +func GetMeterProvider() metric.MeterProvider { + return global.MeterProvider() +} + +// SetMeterProvider registers mp as the global MeterProvider. +func SetMeterProvider(mp metric.MeterProvider) { + global.SetMeterProvider(mp) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/LICENSE b/vendor/go.opentelemetry.io/otel/metric/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/metric/README.md b/vendor/go.opentelemetry.io/otel/metric/README.md new file mode 100644 index 000000000..0cf902e01 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/README.md @@ -0,0 +1,3 @@ +# Metric API + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric)](https://pkg.go.dev/go.opentelemetry.io/otel/metric) diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go new file mode 100644 index 000000000..f8435d8f2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -0,0 +1,260 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Float64Observable describes a set of instruments used asynchronously to +// record float64 measurements once per collection cycle. Observations of +// these instruments are only made within a callback. +// +// Warning: Methods may be added to this interface in minor releases. +type Float64Observable interface { + Observable + + float64Observable() +} + +// Float64ObservableCounter is an instrument used to asynchronously record +// increasing float64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. The value observed is +// assumed the to be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for +// unimplemented methods. +type Float64ObservableCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64ObservableCounter + + Float64Observable +} + +// Float64ObservableCounterConfig contains options for asynchronous counter +// instruments that record float64 values. +type Float64ObservableCounterConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObservableCounterConfig returns a new +// [Float64ObservableCounterConfig] with all opts applied. +func NewFloat64ObservableCounterConfig(opts ...Float64ObservableCounterOption) Float64ObservableCounterConfig { + var config Float64ObservableCounterConfig + for _, o := range opts { + config = o.applyFloat64ObservableCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64ObservableCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64ObservableCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Float64ObservableCounterConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObservableCounterOption applies options to a +// [Float64ObservableCounterConfig]. See [Float64ObservableOption] and +// [InstrumentOption] for other options that can be used as a +// Float64ObservableCounterOption. +type Float64ObservableCounterOption interface { + applyFloat64ObservableCounter(Float64ObservableCounterConfig) Float64ObservableCounterConfig +} + +// Float64ObservableUpDownCounter is an instrument used to asynchronously +// record float64 measurements once per collection cycle. Observations are only +// made within a callback for this instrument. The value observed is assumed +// the to be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64ObservableUpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64ObservableUpDownCounter + + Float64Observable +} + +// Float64ObservableUpDownCounterConfig contains options for asynchronous +// counter instruments that record float64 values. +type Float64ObservableUpDownCounterConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObservableUpDownCounterConfig returns a new +// [Float64ObservableUpDownCounterConfig] with all opts applied. +func NewFloat64ObservableUpDownCounterConfig(opts ...Float64ObservableUpDownCounterOption) Float64ObservableUpDownCounterConfig { + var config Float64ObservableUpDownCounterConfig + for _, o := range opts { + config = o.applyFloat64ObservableUpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64ObservableUpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64ObservableUpDownCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Float64ObservableUpDownCounterConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObservableUpDownCounterOption applies options to a +// [Float64ObservableUpDownCounterConfig]. See [Float64ObservableOption] and +// [InstrumentOption] for other options that can be used as a +// Float64ObservableUpDownCounterOption. +type Float64ObservableUpDownCounterOption interface { + applyFloat64ObservableUpDownCounter(Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig +} + +// Float64ObservableGauge is an instrument used to asynchronously record +// instantaneous float64 measurements once per collection cycle. Observations +// are only made within a callback for this instrument. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64ObservableGauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64ObservableGauge + + Float64Observable +} + +// Float64ObservableGaugeConfig contains options for asynchronous counter +// instruments that record float64 values. +type Float64ObservableGaugeConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObservableGaugeConfig returns a new [Float64ObservableGaugeConfig] +// with all opts applied. +func NewFloat64ObservableGaugeConfig(opts ...Float64ObservableGaugeOption) Float64ObservableGaugeConfig { + var config Float64ObservableGaugeConfig + for _, o := range opts { + config = o.applyFloat64ObservableGauge(config) + } + return config +} + +// Description returns the configured description. +func (c Float64ObservableGaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64ObservableGaugeConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Float64ObservableGaugeConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObservableGaugeOption applies options to a +// [Float64ObservableGaugeConfig]. See [Float64ObservableOption] and +// [InstrumentOption] for other options that can be used as a +// Float64ObservableGaugeOption. +type Float64ObservableGaugeOption interface { + applyFloat64ObservableGauge(Float64ObservableGaugeConfig) Float64ObservableGaugeConfig +} + +// Float64Observer is a recorder of float64 measurements. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Observer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Observer + + // Observe records the float64 value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Observe(value float64, options ...ObserveOption) +} + +// Float64Callback is a function registered with a Meter that makes +// observations for a Float64Observable instrument it is registered with. +// Calls to the Float64Observer record measurement values for the +// Float64Observable. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Float64Callbacks. Meaning, it should not report measurements with the same +// attributes as another Float64Callbacks also registered for the same +// instrument. +// +// The function needs to be concurrent safe. +type Float64Callback func(context.Context, Float64Observer) error + +// Float64ObservableOption applies options to float64 Observer instruments. +type Float64ObservableOption interface { + Float64ObservableCounterOption + Float64ObservableUpDownCounterOption + Float64ObservableGaugeOption +} + +type float64CallbackOpt struct { + cback Float64Callback +} + +func (o float64CallbackOpt) applyFloat64ObservableCounter(cfg Float64ObservableCounterConfig) Float64ObservableCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter(cfg Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o float64CallbackOpt) applyFloat64ObservableGauge(cfg Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +// WithFloat64Callback adds callback to be called for an instrument. +func WithFloat64Callback(callback Float64Callback) Float64ObservableOption { + return float64CallbackOpt{callback} +} diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go new file mode 100644 index 000000000..e079aaef1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -0,0 +1,258 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Int64Observable describes a set of instruments used asynchronously to record +// int64 measurements once per collection cycle. Observations of these +// instruments are only made within a callback. +// +// Warning: Methods may be added to this interface in minor releases. +type Int64Observable interface { + Observable + + int64Observable() +} + +// Int64ObservableCounter is an instrument used to asynchronously record +// increasing int64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. The value observed is +// assumed the to be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64ObservableCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64ObservableCounter + + Int64Observable +} + +// Int64ObservableCounterConfig contains options for asynchronous counter +// instruments that record int64 values. +type Int64ObservableCounterConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObservableCounterConfig returns a new [Int64ObservableCounterConfig] +// with all opts applied. +func NewInt64ObservableCounterConfig(opts ...Int64ObservableCounterOption) Int64ObservableCounterConfig { + var config Int64ObservableCounterConfig + for _, o := range opts { + config = o.applyInt64ObservableCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64ObservableCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64ObservableCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Int64ObservableCounterConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObservableCounterOption applies options to a +// [Int64ObservableCounterConfig]. See [Int64ObservableOption] and +// [InstrumentOption] for other options that can be used as an +// Int64ObservableCounterOption. +type Int64ObservableCounterOption interface { + applyInt64ObservableCounter(Int64ObservableCounterConfig) Int64ObservableCounterConfig +} + +// Int64ObservableUpDownCounter is an instrument used to asynchronously record +// int64 measurements once per collection cycle. Observations are only made +// within a callback for this instrument. The value observed is assumed the to +// be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64ObservableUpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64ObservableUpDownCounter + + Int64Observable +} + +// Int64ObservableUpDownCounterConfig contains options for asynchronous counter +// instruments that record int64 values. +type Int64ObservableUpDownCounterConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObservableUpDownCounterConfig returns a new +// [Int64ObservableUpDownCounterConfig] with all opts applied. +func NewInt64ObservableUpDownCounterConfig(opts ...Int64ObservableUpDownCounterOption) Int64ObservableUpDownCounterConfig { + var config Int64ObservableUpDownCounterConfig + for _, o := range opts { + config = o.applyInt64ObservableUpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64ObservableUpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64ObservableUpDownCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Int64ObservableUpDownCounterConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObservableUpDownCounterOption applies options to a +// [Int64ObservableUpDownCounterConfig]. See [Int64ObservableOption] and +// [InstrumentOption] for other options that can be used as an +// Int64ObservableUpDownCounterOption. +type Int64ObservableUpDownCounterOption interface { + applyInt64ObservableUpDownCounter(Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig +} + +// Int64ObservableGauge is an instrument used to asynchronously record +// instantaneous int64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64ObservableGauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64ObservableGauge + + Int64Observable +} + +// Int64ObservableGaugeConfig contains options for asynchronous counter +// instruments that record int64 values. +type Int64ObservableGaugeConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObservableGaugeConfig returns a new [Int64ObservableGaugeConfig] +// with all opts applied. +func NewInt64ObservableGaugeConfig(opts ...Int64ObservableGaugeOption) Int64ObservableGaugeConfig { + var config Int64ObservableGaugeConfig + for _, o := range opts { + config = o.applyInt64ObservableGauge(config) + } + return config +} + +// Description returns the configured description. +func (c Int64ObservableGaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64ObservableGaugeConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Int64ObservableGaugeConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObservableGaugeOption applies options to a +// [Int64ObservableGaugeConfig]. See [Int64ObservableOption] and +// [InstrumentOption] for other options that can be used as an +// Int64ObservableGaugeOption. +type Int64ObservableGaugeOption interface { + applyInt64ObservableGauge(Int64ObservableGaugeConfig) Int64ObservableGaugeConfig +} + +// Int64Observer is a recorder of int64 measurements. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Observer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Observer + + // Observe records the int64 value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Observe(value int64, options ...ObserveOption) +} + +// Int64Callback is a function registered with a Meter that makes observations +// for an Int64Observable instrument it is registered with. Calls to the +// Int64Observer record measurement values for the Int64Observable. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Int64Callbacks. Meaning, it should not report measurements with the same +// attributes as another Int64Callbacks also registered for the same +// instrument. +// +// The function needs to be concurrent safe. +type Int64Callback func(context.Context, Int64Observer) error + +// Int64ObservableOption applies options to int64 Observer instruments. +type Int64ObservableOption interface { + Int64ObservableCounterOption + Int64ObservableUpDownCounterOption + Int64ObservableGaugeOption +} + +type int64CallbackOpt struct { + cback Int64Callback +} + +func (o int64CallbackOpt) applyInt64ObservableCounter(cfg Int64ObservableCounterConfig) Int64ObservableCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o int64CallbackOpt) applyInt64ObservableUpDownCounter(cfg Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o int64CallbackOpt) applyInt64ObservableGauge(cfg Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +// WithInt64Callback adds callback to be called for an instrument. +func WithInt64Callback(callback Int64Callback) Int64ObservableOption { + return int64CallbackOpt{callback} +} diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go new file mode 100644 index 000000000..d9e3b13e4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/config.go @@ -0,0 +1,81 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import "go.opentelemetry.io/otel/attribute" + +// MeterConfig contains options for Meters. +type MeterConfig struct { + instrumentationVersion string + schemaURL string + attrs attribute.Set + + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. +} + +// InstrumentationVersion returns the version of the library providing +// instrumentation. +func (cfg MeterConfig) InstrumentationVersion() string { + return cfg.instrumentationVersion +} + +// InstrumentationAttributes returns the attributes associated with the library +// providing instrumentation. +func (cfg MeterConfig) InstrumentationAttributes() attribute.Set { + return cfg.attrs +} + +// SchemaURL is the schema_url of the library providing instrumentation. +func (cfg MeterConfig) SchemaURL() string { + return cfg.schemaURL +} + +// MeterOption is an interface for applying Meter options. +type MeterOption interface { + // applyMeter is used to set a MeterOption value of a MeterConfig. + applyMeter(MeterConfig) MeterConfig +} + +// NewMeterConfig creates a new MeterConfig and applies +// all the given options. +func NewMeterConfig(opts ...MeterOption) MeterConfig { + var config MeterConfig + for _, o := range opts { + config = o.applyMeter(config) + } + return config +} + +type meterOptionFunc func(MeterConfig) MeterConfig + +func (fn meterOptionFunc) applyMeter(cfg MeterConfig) MeterConfig { + return fn(cfg) +} + +// WithInstrumentationVersion sets the instrumentation version. +func WithInstrumentationVersion(version string) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.instrumentationVersion = version + return config + }) +} + +// WithInstrumentationAttributes sets the instrumentation attributes. +// +// The passed attributes will be de-duplicated. +func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.attrs = attribute.NewSet(attr...) + return config + }) +} + +// WithSchemaURL sets the schema URL. +func WithSchemaURL(schemaURL string) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.schemaURL = schemaURL + return config + }) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go new file mode 100644 index 000000000..f153745b0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/doc.go @@ -0,0 +1,177 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package metric provides the OpenTelemetry API used to measure metrics about +source code operation. + +This API is separate from its implementation so the instrumentation built from +it is reusable. See [go.opentelemetry.io/otel/sdk/metric] for the official +OpenTelemetry implementation of this API. + +All measurements made with this package are made via instruments. These +instruments are created by a [Meter] which itself is created by a +[MeterProvider]. Applications need to accept a [MeterProvider] implementation +as a starting point when instrumenting. This can be done directly, or by using +the OpenTelemetry global MeterProvider via [GetMeterProvider]. Using an +appropriately named [Meter] from the accepted [MeterProvider], instrumentation +can then be built from the [Meter]'s instruments. + +# Instruments + +Each instrument is designed to make measurements of a particular type. Broadly, +all instruments fall into two overlapping logical categories: asynchronous or +synchronous, and int64 or float64. + +All synchronous instruments ([Int64Counter], [Int64UpDownCounter], +[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and +[Float64Histogram]) are used to measure the operation and performance of source +code during the source code execution. These instruments only make measurements +when the source code they instrument is run. + +All asynchronous instruments ([Int64ObservableCounter], +[Int64ObservableUpDownCounter], [Int64ObservableGauge], +[Float64ObservableCounter], [Float64ObservableUpDownCounter], and +[Float64ObservableGauge]) are used to measure metrics outside of the execution +of source code. They are said to make "observations" via a callback function +called once every measurement collection cycle. + +Each instrument is also grouped by the value type it measures. Either int64 or +float64. The value being measured will dictate which instrument in these +categories to use. + +Outside of these two broad categories, instruments are described by the +function they are designed to serve. All Counters ([Int64Counter], +[Float64Counter], [Int64ObservableCounter], and [Float64ObservableCounter]) are +designed to measure values that never decrease in value, but instead only +incrementally increase in value. UpDownCounters ([Int64UpDownCounter], +[Float64UpDownCounter], [Int64ObservableUpDownCounter], and +[Float64ObservableUpDownCounter]) on the other hand, are designed to measure +values that can increase and decrease. When more information needs to be +conveyed about all the synchronous measurements made during a collection cycle, +a Histogram ([Int64Histogram] and [Float64Histogram]) should be used. Finally, +when just the most recent measurement needs to be conveyed about an +asynchronous measurement, a Gauge ([Int64ObservableGauge] and +[Float64ObservableGauge]) should be used. + +See the [OpenTelemetry documentation] for more information about instruments +and their intended use. + +# Instrument Name + +OpenTelemetry defines an [instrument name syntax] that restricts what +instrument names are allowed. + +Instrument names should ... + + - Not be empty. + - Have an alphabetic character as their first letter. + - Have any letter after the first be an alphanumeric character, ‘_’, ‘.’, + ‘-’, or ‘/’. + - Have a maximum length of 255 letters. + +To ensure compatibility with observability platforms, all instruments created +need to conform to this syntax. Not all implementations of the API will validate +these names, it is the callers responsibility to ensure compliance. + +# Measurements + +Measurements are made by recording values and information about the values with +an instrument. How these measurements are recorded depends on the instrument. + +Measurements for synchronous instruments ([Int64Counter], [Int64UpDownCounter], +[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and +[Float64Histogram]) are recorded using the instrument methods directly. All +counter instruments have an Add method that is used to measure an increment +value, and all histogram instruments have a Record method to measure a data +point. + +Asynchronous instruments ([Int64ObservableCounter], +[Int64ObservableUpDownCounter], [Int64ObservableGauge], +[Float64ObservableCounter], [Float64ObservableUpDownCounter], and +[Float64ObservableGauge]) record measurements within a callback function. The +callback is registered with the Meter which ensures the callback is called once +per collection cycle. A callback can be registered two ways: during the +instrument's creation using an option, or later using the RegisterCallback +method of the [Meter] that created the instrument. + +If the following criteria are met, an option ([WithInt64Callback] or +[WithFloat64Callback]) can be used during the asynchronous instrument's +creation to register a callback ([Int64Callback] or [Float64Callback], +respectively): + + - The measurement process is known when the instrument is created + - Only that instrument will make a measurement within the callback + - The callback never needs to be unregistered + +If the criteria are not met, use the RegisterCallback method of the [Meter] that +created the instrument to register a [Callback]. + +# API Implementations + +This package does not conform to the standard Go versioning policy, all of its +interfaces may have methods added to them without a package major version bump. +This non-standard API evolution could surprise an uninformed implementation +author. They could unknowingly build their implementation in a way that would +result in a runtime panic for their users that update to the new API. + +The API is designed to help inform an instrumentation author about this +non-standard API evolution. It requires them to choose a default behavior for +unimplemented interface methods. There are three behavior choices they can +make: + + - Compilation failure + - Panic + - Default to another implementation + +All interfaces in this API embed a corresponding interface from +[go.opentelemetry.io/otel/metric/embedded]. If an author wants the default +behavior of their implementations to be a compilation failure, signaling to +their users they need to update to the latest version of that implementation, +they need to embed the corresponding interface from +[go.opentelemetry.io/otel/metric/embedded] in their implementation. For +example, + + import "go.opentelemetry.io/otel/metric/embedded" + + type MeterProvider struct { + embedded.MeterProvider + // ... + } + +If an author wants the default behavior of their implementations to a panic, +they need to embed the API interface directly. + + import "go.opentelemetry.io/otel/metric" + + type MeterProvider struct { + metric.MeterProvider + // ... + } + +This is not a recommended behavior as it could lead to publishing packages that +contain runtime panics when users update other package that use newer versions +of [go.opentelemetry.io/otel/metric]. + +Finally, an author can embed another implementation in theirs. The embedded +implementation will be used for methods not defined by the author. For example, +an author who wants to default to silently dropping the call can use +[go.opentelemetry.io/otel/metric/noop]: + + import "go.opentelemetry.io/otel/metric/noop" + + type MeterProvider struct { + noop.MeterProvider + // ... + } + +It is strongly recommended that authors only embed +[go.opentelemetry.io/otel/metric/noop] if they choose this default behavior. +That implementation is the only one OpenTelemetry authors can guarantee will +fully implement all the API interfaces when a user updates their API. + +[instrument name syntax]: https://opentelemetry.io/docs/specs/otel/metrics/api/#instrument-name-syntax +[OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/ +[GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider +*/ +package metric // import "go.opentelemetry.io/otel/metric" diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/README.md b/vendor/go.opentelemetry.io/otel/metric/embedded/README.md new file mode 100644 index 000000000..1f6e0efa7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/embedded/README.md @@ -0,0 +1,3 @@ +# Metric Embedded + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/metric/embedded) diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go new file mode 100644 index 000000000..1a9dc6809 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go @@ -0,0 +1,243 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package embedded provides interfaces embedded within the [OpenTelemetry +// metric API]. +// +// Implementers of the [OpenTelemetry metric API] can embed the relevant type +// from this package into their implementation directly. Doing so will result +// in a compilation error for users when the [OpenTelemetry metric API] is +// extended (which is something that can happen without a major version bump of +// the API package). +// +// [OpenTelemetry metric API]: https://pkg.go.dev/go.opentelemetry.io/otel/metric +package embedded // import "go.opentelemetry.io/otel/metric/embedded" + +// MeterProvider is embedded in +// [go.opentelemetry.io/otel/metric.MeterProvider]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.MeterProvider] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.MeterProvider] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type MeterProvider interface{ meterProvider() } + +// Meter is embedded in [go.opentelemetry.io/otel/metric.Meter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Meter] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Meter] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Meter interface{ meter() } + +// Float64Observer is embedded in +// [go.opentelemetry.io/otel/metric.Float64Observer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Observer] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64Observer] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Float64Observer interface{ float64Observer() } + +// Int64Observer is embedded in +// [go.opentelemetry.io/otel/metric.Int64Observer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Observer] if you want users +// to experience a compilation error, signaling they need to update to your +// latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64Observer] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64Observer interface{ int64Observer() } + +// Observer is embedded in [go.opentelemetry.io/otel/metric.Observer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Observer] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Observer] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Observer interface{ observer() } + +// Registration is embedded in [go.opentelemetry.io/otel/metric.Registration]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Registration] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Registration] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Registration interface{ registration() } + +// Float64Counter is embedded in +// [go.opentelemetry.io/otel/metric.Float64Counter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Counter] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64Counter] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Float64Counter interface{ float64Counter() } + +// Float64Histogram is embedded in +// [go.opentelemetry.io/otel/metric.Float64Histogram]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Histogram] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64Histogram] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Float64Histogram interface{ float64Histogram() } + +// Float64Gauge is embedded in [go.opentelemetry.io/otel/metric.Float64Gauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Gauge] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Float64Gauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64Gauge interface{ float64Gauge() } + +// Float64ObservableCounter is embedded in +// [go.opentelemetry.io/otel/metric.Float64ObservableCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64ObservableCounter interface{ float64ObservableCounter() } + +// Float64ObservableGauge is embedded in +// [go.opentelemetry.io/otel/metric.Float64ObservableGauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64ObservableGauge interface{ float64ObservableGauge() } + +// Float64ObservableUpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter] +// if you want users to experience a compilation error, signaling they need to +// update to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64ObservableUpDownCounter interface{ float64ObservableUpDownCounter() } + +// Float64UpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Float64UpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Float64UpDownCounter interface{ float64UpDownCounter() } + +// Int64Counter is embedded in +// [go.opentelemetry.io/otel/metric.Int64Counter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Counter] if you want users +// to experience a compilation error, signaling they need to update to your +// latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64Counter] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64Counter interface{ int64Counter() } + +// Int64Histogram is embedded in +// [go.opentelemetry.io/otel/metric.Int64Histogram]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Histogram] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64Histogram] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64Histogram interface{ int64Histogram() } + +// Int64Gauge is embedded in [go.opentelemetry.io/otel/metric.Int64Gauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Gauge] if you want users to experience +// a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Int64Gauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64Gauge interface{ int64Gauge() } + +// Int64ObservableCounter is embedded in +// [go.opentelemetry.io/otel/metric.Int64ObservableCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64ObservableCounter interface{ int64ObservableCounter() } + +// Int64ObservableGauge is embedded in +// [go.opentelemetry.io/otel/metric.Int64ObservableGauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Int64ObservableGauge interface{ int64ObservableGauge() } + +// Int64ObservableUpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] if +// you want users to experience a compilation error, signaling they need to +// update to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64ObservableUpDownCounter interface{ int64ObservableUpDownCounter() } + +// Int64UpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Int64UpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64UpDownCounter interface{ int64UpDownCounter() } diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go new file mode 100644 index 000000000..ea52e4023 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -0,0 +1,368 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import "go.opentelemetry.io/otel/attribute" + +// Observable is used as a grouping mechanism for all instruments that are +// updated within a Callback. +type Observable interface { + observable() +} + +// InstrumentOption applies options to all instruments. +type InstrumentOption interface { + Int64CounterOption + Int64UpDownCounterOption + Int64HistogramOption + Int64GaugeOption + Int64ObservableCounterOption + Int64ObservableUpDownCounterOption + Int64ObservableGaugeOption + + Float64CounterOption + Float64UpDownCounterOption + Float64HistogramOption + Float64GaugeOption + Float64ObservableCounterOption + Float64ObservableUpDownCounterOption + Float64ObservableGaugeOption +} + +// HistogramOption applies options to histogram instruments. +type HistogramOption interface { + Int64HistogramOption + Float64HistogramOption +} + +type descOpt string + +func (o descOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { + c.description = string(o) + return c +} + +// WithDescription sets the instrument description. +func WithDescription(desc string) InstrumentOption { return descOpt(desc) } + +type unitOpt string + +func (o unitOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { + c.unit = string(o) + return c +} + +// WithUnit sets the instrument unit. +// +// The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code. +func WithUnit(u string) InstrumentOption { return unitOpt(u) } + +// WithExplicitBucketBoundaries sets the instrument explicit bucket boundaries. +// +// This option is considered "advisory", and may be ignored by API implementations. +func WithExplicitBucketBoundaries(bounds ...float64) HistogramOption { return bucketOpt(bounds) } + +type bucketOpt []float64 + +func (o bucketOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.explicitBucketBoundaries = o + return c +} + +func (o bucketOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.explicitBucketBoundaries = o + return c +} + +// AddOption applies options to an addition measurement. See +// [MeasurementOption] for other options that can be used as an AddOption. +type AddOption interface { + applyAdd(AddConfig) AddConfig +} + +// AddConfig contains options for an addition measurement. +type AddConfig struct { + attrs attribute.Set +} + +// NewAddConfig returns a new [AddConfig] with all opts applied. +func NewAddConfig(opts []AddOption) AddConfig { + config := AddConfig{attrs: *attribute.EmptySet()} + for _, o := range opts { + config = o.applyAdd(config) + } + return config +} + +// Attributes returns the configured attribute set. +func (c AddConfig) Attributes() attribute.Set { + return c.attrs +} + +// RecordOption applies options to an addition measurement. See +// [MeasurementOption] for other options that can be used as a RecordOption. +type RecordOption interface { + applyRecord(RecordConfig) RecordConfig +} + +// RecordConfig contains options for a recorded measurement. +type RecordConfig struct { + attrs attribute.Set +} + +// NewRecordConfig returns a new [RecordConfig] with all opts applied. +func NewRecordConfig(opts []RecordOption) RecordConfig { + config := RecordConfig{attrs: *attribute.EmptySet()} + for _, o := range opts { + config = o.applyRecord(config) + } + return config +} + +// Attributes returns the configured attribute set. +func (c RecordConfig) Attributes() attribute.Set { + return c.attrs +} + +// ObserveOption applies options to an addition measurement. See +// [MeasurementOption] for other options that can be used as a ObserveOption. +type ObserveOption interface { + applyObserve(ObserveConfig) ObserveConfig +} + +// ObserveConfig contains options for an observed measurement. +type ObserveConfig struct { + attrs attribute.Set +} + +// NewObserveConfig returns a new [ObserveConfig] with all opts applied. +func NewObserveConfig(opts []ObserveOption) ObserveConfig { + config := ObserveConfig{attrs: *attribute.EmptySet()} + for _, o := range opts { + config = o.applyObserve(config) + } + return config +} + +// Attributes returns the configured attribute set. +func (c ObserveConfig) Attributes() attribute.Set { + return c.attrs +} + +// MeasurementOption applies options to all instrument measurement. +type MeasurementOption interface { + AddOption + RecordOption + ObserveOption +} + +type attrOpt struct { + set attribute.Set +} + +// mergeSets returns the union of keys between a and b. Any duplicate keys will +// use the value associated with b. +func mergeSets(a, b attribute.Set) attribute.Set { + // NewMergeIterator uses the first value for any duplicates. + iter := attribute.NewMergeIterator(&b, &a) + merged := make([]attribute.KeyValue, 0, a.Len()+b.Len()) + for iter.Next() { + merged = append(merged, iter.Attribute()) + } + return attribute.NewSet(merged...) +} + +func (o attrOpt) applyAdd(c AddConfig) AddConfig { + switch { + case o.set.Len() == 0: + case c.attrs.Len() == 0: + c.attrs = o.set + default: + c.attrs = mergeSets(c.attrs, o.set) + } + return c +} + +func (o attrOpt) applyRecord(c RecordConfig) RecordConfig { + switch { + case o.set.Len() == 0: + case c.attrs.Len() == 0: + c.attrs = o.set + default: + c.attrs = mergeSets(c.attrs, o.set) + } + return c +} + +func (o attrOpt) applyObserve(c ObserveConfig) ObserveConfig { + switch { + case o.set.Len() == 0: + case c.attrs.Len() == 0: + c.attrs = o.set + default: + c.attrs = mergeSets(c.attrs, o.set) + } + return c +} + +// WithAttributeSet sets the attribute Set associated with a measurement is +// made with. +// +// If multiple WithAttributeSet or WithAttributes options are passed the +// attributes will be merged together in the order they are passed. Attributes +// with duplicate keys will use the last value passed. +func WithAttributeSet(attributes attribute.Set) MeasurementOption { + return attrOpt{set: attributes} +} + +// WithAttributes converts attributes into an attribute Set and sets the Set to +// be associated with a measurement. This is shorthand for: +// +// cp := make([]attribute.KeyValue, len(attributes)) +// copy(cp, attributes) +// WithAttributes(attribute.NewSet(cp...)) +// +// [attribute.NewSet] may modify the passed attributes so this will make a copy +// of attributes before creating a set in order to ensure this function is +// concurrent safe. This makes this option function less optimized in +// comparison to [WithAttributeSet]. Therefore, [WithAttributeSet] should be +// preferred for performance sensitive code. +// +// See [WithAttributeSet] for information about how multiple WithAttributes are +// merged. +func WithAttributes(attributes ...attribute.KeyValue) MeasurementOption { + cp := make([]attribute.KeyValue, len(attributes)) + copy(cp, attributes) + return attrOpt{set: attribute.NewSet(cp...)} +} diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go new file mode 100644 index 000000000..14e08c24a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -0,0 +1,278 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// MeterProvider provides access to named Meter instances, for instrumenting +// an application or package. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type MeterProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.MeterProvider + + // Meter returns a new Meter with the provided name and configuration. + // + // A Meter should be scoped at most to a single package. The name needs to + // be unique so it does not collide with other names used by + // an application, nor other applications. To achieve this, the import path + // of the instrumentation package is recommended to be used as name. + // + // If the name is empty, then an implementation defined default name will + // be used instead. + Meter(name string, opts ...MeterOption) Meter +} + +// Meter provides access to instrument instances for recording metrics. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Meter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Meter + + // Int64Counter returns a new Int64Counter instrument identified by name + // and configured with options. The instrument is used to synchronously + // record increasing int64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) + + // Int64UpDownCounter returns a new Int64UpDownCounter instrument + // identified by name and configured with options. The instrument is used + // to synchronously record int64 measurements during a computational + // operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) + + // Int64Histogram returns a new Int64Histogram instrument identified by + // name and configured with options. The instrument is used to + // synchronously record the distribution of int64 measurements during a + // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) + + // Int64Gauge returns a new Int64Gauge instrument identified by name and + // configured with options. The instrument is used to synchronously record + // instantaneous int64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error) + + // Int64ObservableCounter returns a new Int64ObservableCounter identified + // by name and configured with options. The instrument is used to + // asynchronously record increasing int64 measurements once per a + // measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithInt64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) + + // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter + // instrument identified by name and configured with options. The + // instrument is used to asynchronously record int64 measurements once per + // a measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithInt64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) + + // Int64ObservableGauge returns a new Int64ObservableGauge instrument + // identified by name and configured with options. The instrument is used + // to asynchronously record instantaneous int64 measurements once per a + // measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithInt64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error) + + // Float64Counter returns a new Float64Counter instrument identified by + // name and configured with options. The instrument is used to + // synchronously record increasing float64 measurements during a + // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) + + // Float64UpDownCounter returns a new Float64UpDownCounter instrument + // identified by name and configured with options. The instrument is used + // to synchronously record float64 measurements during a computational + // operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) + + // Float64Histogram returns a new Float64Histogram instrument identified by + // name and configured with options. The instrument is used to + // synchronously record the distribution of float64 measurements during a + // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) + + // Float64Gauge returns a new Float64Gauge instrument identified by name and + // configured with options. The instrument is used to synchronously record + // instantaneous float64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error) + + // Float64ObservableCounter returns a new Float64ObservableCounter + // instrument identified by name and configured with options. The + // instrument is used to asynchronously record increasing float64 + // measurements once per a measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithFloat64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) + + // Float64ObservableUpDownCounter returns a new + // Float64ObservableUpDownCounter instrument identified by name and + // configured with options. The instrument is used to asynchronously record + // float64 measurements once per a measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithFloat64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) + + // Float64ObservableGauge returns a new Float64ObservableGauge instrument + // identified by name and configured with options. The instrument is used + // to asynchronously record instantaneous float64 measurements once per a + // measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithFloat64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error) + + // RegisterCallback registers f to be called during the collection of a + // measurement cycle. + // + // If Unregister of the returned Registration is called, f needs to be + // unregistered and not called during collection. + // + // The instruments f is registered with are the only instruments that f may + // observe values for. + // + // If no instruments are passed, f should not be registered nor called + // during collection. + // + // The function f needs to be concurrent safe. + RegisterCallback(f Callback, instruments ...Observable) (Registration, error) +} + +// Callback is a function registered with a Meter that makes observations for +// the set of instruments it is registered with. The Observer parameter is used +// to record measurement observations for these instruments. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Callbacks. Meaning, it should not report measurements for an instrument with +// the same attributes as another Callback will report. +// +// The function needs to be concurrent safe. +type Callback func(context.Context, Observer) error + +// Observer records measurements for multiple instruments in a Callback. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Observer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Observer + + // ObserveFloat64 records the float64 value for obsrv. + ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) + + // ObserveInt64 records the int64 value for obsrv. + ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) +} + +// Registration is an token representing the unique registration of a callback +// for a set of instruments with a Meter. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Registration interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Registration + + // Unregister removes the callback registration from a Meter. + // + // This method needs to be idempotent and concurrent safe. + Unregister() error +} diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/README.md b/vendor/go.opentelemetry.io/otel/metric/noop/README.md new file mode 100644 index 000000000..bb8969435 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/noop/README.md @@ -0,0 +1,3 @@ +# Metric Noop + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric/noop)](https://pkg.go.dev/go.opentelemetry.io/otel/metric/noop) diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go new file mode 100644 index 000000000..ca6fcbdc0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go @@ -0,0 +1,281 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package noop provides an implementation of the OpenTelemetry metric API that +// produces no telemetry and minimizes used computation resources. +// +// Using this package to implement the OpenTelemetry metric API will +// effectively disable OpenTelemetry. +// +// This implementation can be embedded in other implementations of the +// OpenTelemetry metric API. Doing so will mean the implementation defaults to +// no operation for methods it does not implement. +package noop // import "go.opentelemetry.io/otel/metric/noop" + +import ( + "context" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" +) + +var ( + // Compile-time check this implements the OpenTelemetry API. + + _ metric.MeterProvider = MeterProvider{} + _ metric.Meter = Meter{} + _ metric.Observer = Observer{} + _ metric.Registration = Registration{} + _ metric.Int64Counter = Int64Counter{} + _ metric.Float64Counter = Float64Counter{} + _ metric.Int64UpDownCounter = Int64UpDownCounter{} + _ metric.Float64UpDownCounter = Float64UpDownCounter{} + _ metric.Int64Histogram = Int64Histogram{} + _ metric.Float64Histogram = Float64Histogram{} + _ metric.Int64Gauge = Int64Gauge{} + _ metric.Float64Gauge = Float64Gauge{} + _ metric.Int64ObservableCounter = Int64ObservableCounter{} + _ metric.Float64ObservableCounter = Float64ObservableCounter{} + _ metric.Int64ObservableGauge = Int64ObservableGauge{} + _ metric.Float64ObservableGauge = Float64ObservableGauge{} + _ metric.Int64ObservableUpDownCounter = Int64ObservableUpDownCounter{} + _ metric.Float64ObservableUpDownCounter = Float64ObservableUpDownCounter{} + _ metric.Int64Observer = Int64Observer{} + _ metric.Float64Observer = Float64Observer{} +) + +// MeterProvider is an OpenTelemetry No-Op MeterProvider. +type MeterProvider struct{ embedded.MeterProvider } + +// NewMeterProvider returns a MeterProvider that does not record any telemetry. +func NewMeterProvider() MeterProvider { + return MeterProvider{} +} + +// Meter returns an OpenTelemetry Meter that does not record any telemetry. +func (MeterProvider) Meter(string, ...metric.MeterOption) metric.Meter { + return Meter{} +} + +// Meter is an OpenTelemetry No-Op Meter. +type Meter struct{ embedded.Meter } + +// Int64Counter returns a Counter used to record int64 measurements that +// produces no telemetry. +func (Meter) Int64Counter(string, ...metric.Int64CounterOption) (metric.Int64Counter, error) { + return Int64Counter{}, nil +} + +// Int64UpDownCounter returns an UpDownCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Int64UpDownCounter(string, ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { + return Int64UpDownCounter{}, nil +} + +// Int64Histogram returns a Histogram used to record int64 measurements that +// produces no telemetry. +func (Meter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { + return Int64Histogram{}, nil +} + +// Int64Gauge returns a Gauge used to record int64 measurements that +// produces no telemetry. +func (Meter) Int64Gauge(string, ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { + return Int64Gauge{}, nil +} + +// Int64ObservableCounter returns an ObservableCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { + return Int64ObservableCounter{}, nil +} + +// Int64ObservableUpDownCounter returns an ObservableUpDownCounter used to +// record int64 measurements that produces no telemetry. +func (Meter) Int64ObservableUpDownCounter(string, ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { + return Int64ObservableUpDownCounter{}, nil +} + +// Int64ObservableGauge returns an ObservableGauge used to record int64 +// measurements that produces no telemetry. +func (Meter) Int64ObservableGauge(string, ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { + return Int64ObservableGauge{}, nil +} + +// Float64Counter returns a Counter used to record int64 measurements that +// produces no telemetry. +func (Meter) Float64Counter(string, ...metric.Float64CounterOption) (metric.Float64Counter, error) { + return Float64Counter{}, nil +} + +// Float64UpDownCounter returns an UpDownCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Float64UpDownCounter(string, ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { + return Float64UpDownCounter{}, nil +} + +// Float64Histogram returns a Histogram used to record int64 measurements that +// produces no telemetry. +func (Meter) Float64Histogram(string, ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { + return Float64Histogram{}, nil +} + +// Float64Gauge returns a Gauge used to record float64 measurements that +// produces no telemetry. +func (Meter) Float64Gauge(string, ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { + return Float64Gauge{}, nil +} + +// Float64ObservableCounter returns an ObservableCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { + return Float64ObservableCounter{}, nil +} + +// Float64ObservableUpDownCounter returns an ObservableUpDownCounter used to +// record int64 measurements that produces no telemetry. +func (Meter) Float64ObservableUpDownCounter(string, ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { + return Float64ObservableUpDownCounter{}, nil +} + +// Float64ObservableGauge returns an ObservableGauge used to record int64 +// measurements that produces no telemetry. +func (Meter) Float64ObservableGauge(string, ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { + return Float64ObservableGauge{}, nil +} + +// RegisterCallback performs no operation. +func (Meter) RegisterCallback(metric.Callback, ...metric.Observable) (metric.Registration, error) { + return Registration{}, nil +} + +// Observer acts as a recorder of measurements for multiple instruments in a +// Callback, it performing no operation. +type Observer struct{ embedded.Observer } + +// ObserveFloat64 performs no operation. +func (Observer) ObserveFloat64(metric.Float64Observable, float64, ...metric.ObserveOption) { +} + +// ObserveInt64 performs no operation. +func (Observer) ObserveInt64(metric.Int64Observable, int64, ...metric.ObserveOption) { +} + +// Registration is the registration of a Callback with a No-Op Meter. +type Registration struct{ embedded.Registration } + +// Unregister unregisters the Callback the Registration represents with the +// No-Op Meter. This will always return nil because the No-Op Meter performs no +// operation, including hold any record of registrations. +func (Registration) Unregister() error { return nil } + +// Int64Counter is an OpenTelemetry Counter used to record int64 measurements. +// It produces no telemetry. +type Int64Counter struct{ embedded.Int64Counter } + +// Add performs no operation. +func (Int64Counter) Add(context.Context, int64, ...metric.AddOption) {} + +// Float64Counter is an OpenTelemetry Counter used to record float64 +// measurements. It produces no telemetry. +type Float64Counter struct{ embedded.Float64Counter } + +// Add performs no operation. +func (Float64Counter) Add(context.Context, float64, ...metric.AddOption) {} + +// Int64UpDownCounter is an OpenTelemetry UpDownCounter used to record int64 +// measurements. It produces no telemetry. +type Int64UpDownCounter struct{ embedded.Int64UpDownCounter } + +// Add performs no operation. +func (Int64UpDownCounter) Add(context.Context, int64, ...metric.AddOption) {} + +// Float64UpDownCounter is an OpenTelemetry UpDownCounter used to record +// float64 measurements. It produces no telemetry. +type Float64UpDownCounter struct{ embedded.Float64UpDownCounter } + +// Add performs no operation. +func (Float64UpDownCounter) Add(context.Context, float64, ...metric.AddOption) {} + +// Int64Histogram is an OpenTelemetry Histogram used to record int64 +// measurements. It produces no telemetry. +type Int64Histogram struct{ embedded.Int64Histogram } + +// Record performs no operation. +func (Int64Histogram) Record(context.Context, int64, ...metric.RecordOption) {} + +// Float64Histogram is an OpenTelemetry Histogram used to record float64 +// measurements. It produces no telemetry. +type Float64Histogram struct{ embedded.Float64Histogram } + +// Record performs no operation. +func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {} + +// Int64Gauge is an OpenTelemetry Gauge used to record instantaneous int64 +// measurements. It produces no telemetry. +type Int64Gauge struct{ embedded.Int64Gauge } + +// Record performs no operation. +func (Int64Gauge) Record(context.Context, int64, ...metric.RecordOption) {} + +// Float64Gauge is an OpenTelemetry Gauge used to record instantaneous float64 +// measurements. It produces no telemetry. +type Float64Gauge struct{ embedded.Float64Gauge } + +// Record performs no operation. +func (Float64Gauge) Record(context.Context, float64, ...metric.RecordOption) {} + +// Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record +// int64 measurements. It produces no telemetry. +type Int64ObservableCounter struct { + metric.Int64Observable + embedded.Int64ObservableCounter +} + +// Float64ObservableCounter is an OpenTelemetry ObservableCounter used to record +// float64 measurements. It produces no telemetry. +type Float64ObservableCounter struct { + metric.Float64Observable + embedded.Float64ObservableCounter +} + +// Int64ObservableGauge is an OpenTelemetry ObservableGauge used to record +// int64 measurements. It produces no telemetry. +type Int64ObservableGauge struct { + metric.Int64Observable + embedded.Int64ObservableGauge +} + +// Float64ObservableGauge is an OpenTelemetry ObservableGauge used to record +// float64 measurements. It produces no telemetry. +type Float64ObservableGauge struct { + metric.Float64Observable + embedded.Float64ObservableGauge +} + +// Int64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter +// used to record int64 measurements. It produces no telemetry. +type Int64ObservableUpDownCounter struct { + metric.Int64Observable + embedded.Int64ObservableUpDownCounter +} + +// Float64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter +// used to record float64 measurements. It produces no telemetry. +type Float64ObservableUpDownCounter struct { + metric.Float64Observable + embedded.Float64ObservableUpDownCounter +} + +// Int64Observer is a recorder of int64 measurements that performs no operation. +type Int64Observer struct{ embedded.Int64Observer } + +// Observe performs no operation. +func (Int64Observer) Observe(int64, ...metric.ObserveOption) {} + +// Float64Observer is a recorder of float64 measurements that performs no +// operation. +type Float64Observer struct{ embedded.Float64Observer } + +// Observe performs no operation. +func (Float64Observer) Observe(float64, ...metric.ObserveOption) {} diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go new file mode 100644 index 000000000..8403a4bad --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go @@ -0,0 +1,226 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Float64Counter is an instrument that records increasing float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Counter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Counter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr float64, options ...AddOption) +} + +// Float64CounterConfig contains options for synchronous counter instruments that +// record float64 values. +type Float64CounterConfig struct { + description string + unit string +} + +// NewFloat64CounterConfig returns a new [Float64CounterConfig] with all opts +// applied. +func NewFloat64CounterConfig(opts ...Float64CounterOption) Float64CounterConfig { + var config Float64CounterConfig + for _, o := range opts { + config = o.applyFloat64Counter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64CounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64CounterConfig) Unit() string { + return c.unit +} + +// Float64CounterOption applies options to a [Float64CounterConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64CounterOption. +type Float64CounterOption interface { + applyFloat64Counter(Float64CounterConfig) Float64CounterConfig +} + +// Float64UpDownCounter is an instrument that records increasing or decreasing +// float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64UpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64UpDownCounter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr float64, options ...AddOption) +} + +// Float64UpDownCounterConfig contains options for synchronous counter +// instruments that record float64 values. +type Float64UpDownCounterConfig struct { + description string + unit string +} + +// NewFloat64UpDownCounterConfig returns a new [Float64UpDownCounterConfig] +// with all opts applied. +func NewFloat64UpDownCounterConfig(opts ...Float64UpDownCounterOption) Float64UpDownCounterConfig { + var config Float64UpDownCounterConfig + for _, o := range opts { + config = o.applyFloat64UpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64UpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64UpDownCounterConfig) Unit() string { + return c.unit +} + +// Float64UpDownCounterOption applies options to a +// [Float64UpDownCounterConfig]. See [InstrumentOption] for other options that +// can be used as a Float64UpDownCounterOption. +type Float64UpDownCounterOption interface { + applyFloat64UpDownCounter(Float64UpDownCounterConfig) Float64UpDownCounterConfig +} + +// Float64Histogram is an instrument that records a distribution of float64 +// values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Histogram interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Histogram + + // Record adds an additional value to the distribution. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, incr float64, options ...RecordOption) +} + +// Float64HistogramConfig contains options for synchronous histogram +// instruments that record float64 values. +type Float64HistogramConfig struct { + description string + unit string + explicitBucketBoundaries []float64 +} + +// NewFloat64HistogramConfig returns a new [Float64HistogramConfig] with all +// opts applied. +func NewFloat64HistogramConfig(opts ...Float64HistogramOption) Float64HistogramConfig { + var config Float64HistogramConfig + for _, o := range opts { + config = o.applyFloat64Histogram(config) + } + return config +} + +// Description returns the configured description. +func (c Float64HistogramConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64HistogramConfig) Unit() string { + return c.unit +} + +// ExplicitBucketBoundaries returns the configured explicit bucket boundaries. +func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 { + return c.explicitBucketBoundaries +} + +// Float64HistogramOption applies options to a [Float64HistogramConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64HistogramOption. +type Float64HistogramOption interface { + applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig +} + +// Float64Gauge is an instrument that records instantaneous float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Gauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Gauge + + // Record records the instantaneous value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, value float64, options ...RecordOption) +} + +// Float64GaugeConfig contains options for synchronous gauge instruments that +// record float64 values. +type Float64GaugeConfig struct { + description string + unit string +} + +// NewFloat64GaugeConfig returns a new [Float64GaugeConfig] with all opts +// applied. +func NewFloat64GaugeConfig(opts ...Float64GaugeOption) Float64GaugeConfig { + var config Float64GaugeConfig + for _, o := range opts { + config = o.applyFloat64Gauge(config) + } + return config +} + +// Description returns the configured description. +func (c Float64GaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64GaugeConfig) Unit() string { + return c.unit +} + +// Float64GaugeOption applies options to a [Float64GaugeConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64GaugeOption. +type Float64GaugeOption interface { + applyFloat64Gauge(Float64GaugeConfig) Float64GaugeConfig +} diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go new file mode 100644 index 000000000..783fdfba7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go @@ -0,0 +1,226 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Int64Counter is an instrument that records increasing int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Counter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Counter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr int64, options ...AddOption) +} + +// Int64CounterConfig contains options for synchronous counter instruments that +// record int64 values. +type Int64CounterConfig struct { + description string + unit string +} + +// NewInt64CounterConfig returns a new [Int64CounterConfig] with all opts +// applied. +func NewInt64CounterConfig(opts ...Int64CounterOption) Int64CounterConfig { + var config Int64CounterConfig + for _, o := range opts { + config = o.applyInt64Counter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64CounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64CounterConfig) Unit() string { + return c.unit +} + +// Int64CounterOption applies options to a [Int64CounterConfig]. See +// [InstrumentOption] for other options that can be used as an +// Int64CounterOption. +type Int64CounterOption interface { + applyInt64Counter(Int64CounterConfig) Int64CounterConfig +} + +// Int64UpDownCounter is an instrument that records increasing or decreasing +// int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64UpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64UpDownCounter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr int64, options ...AddOption) +} + +// Int64UpDownCounterConfig contains options for synchronous counter +// instruments that record int64 values. +type Int64UpDownCounterConfig struct { + description string + unit string +} + +// NewInt64UpDownCounterConfig returns a new [Int64UpDownCounterConfig] with +// all opts applied. +func NewInt64UpDownCounterConfig(opts ...Int64UpDownCounterOption) Int64UpDownCounterConfig { + var config Int64UpDownCounterConfig + for _, o := range opts { + config = o.applyInt64UpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64UpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64UpDownCounterConfig) Unit() string { + return c.unit +} + +// Int64UpDownCounterOption applies options to a [Int64UpDownCounterConfig]. +// See [InstrumentOption] for other options that can be used as an +// Int64UpDownCounterOption. +type Int64UpDownCounterOption interface { + applyInt64UpDownCounter(Int64UpDownCounterConfig) Int64UpDownCounterConfig +} + +// Int64Histogram is an instrument that records a distribution of int64 +// values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Histogram interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Histogram + + // Record adds an additional value to the distribution. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, incr int64, options ...RecordOption) +} + +// Int64HistogramConfig contains options for synchronous histogram instruments +// that record int64 values. +type Int64HistogramConfig struct { + description string + unit string + explicitBucketBoundaries []float64 +} + +// NewInt64HistogramConfig returns a new [Int64HistogramConfig] with all opts +// applied. +func NewInt64HistogramConfig(opts ...Int64HistogramOption) Int64HistogramConfig { + var config Int64HistogramConfig + for _, o := range opts { + config = o.applyInt64Histogram(config) + } + return config +} + +// Description returns the configured description. +func (c Int64HistogramConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64HistogramConfig) Unit() string { + return c.unit +} + +// ExplicitBucketBoundaries returns the configured explicit bucket boundaries. +func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 { + return c.explicitBucketBoundaries +} + +// Int64HistogramOption applies options to a [Int64HistogramConfig]. See +// [InstrumentOption] for other options that can be used as an +// Int64HistogramOption. +type Int64HistogramOption interface { + applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig +} + +// Int64Gauge is an instrument that records instantaneous int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Gauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Gauge + + // Record records the instantaneous value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, value int64, options ...RecordOption) +} + +// Int64GaugeConfig contains options for synchronous gauge instruments that +// record int64 values. +type Int64GaugeConfig struct { + description string + unit string +} + +// NewInt64GaugeConfig returns a new [Int64GaugeConfig] with all opts +// applied. +func NewInt64GaugeConfig(opts ...Int64GaugeOption) Int64GaugeConfig { + var config Int64GaugeConfig + for _, o := range opts { + config = o.applyInt64Gauge(config) + } + return config +} + +// Description returns the configured description. +func (c Int64GaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64GaugeConfig) Unit() string { + return c.unit +} + +// Int64GaugeOption applies options to a [Int64GaugeConfig]. See +// [InstrumentOption] for other options that can be used as a +// Int64GaugeOption. +type Int64GaugeOption interface { + applyInt64Gauge(Int64GaugeConfig) Int64GaugeConfig +} diff --git a/vendor/go.opentelemetry.io/otel/propagation.go b/vendor/go.opentelemetry.io/otel/propagation.go new file mode 100644 index 000000000..2fd949733 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/propagation" +) + +// GetTextMapPropagator returns the global TextMapPropagator. If none has been +// set, a No-Op TextMapPropagator is returned. +func GetTextMapPropagator() propagation.TextMapPropagator { + return global.TextMapPropagator() +} + +// SetTextMapPropagator sets propagator as the global TextMapPropagator. +func SetTextMapPropagator(propagator propagation.TextMapPropagator) { + global.SetTextMapPropagator(propagator) +} diff --git a/vendor/go.opentelemetry.io/otel/propagation/README.md b/vendor/go.opentelemetry.io/otel/propagation/README.md new file mode 100644 index 000000000..e2959ac74 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/README.md @@ -0,0 +1,3 @@ +# Propagation + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/propagation)](https://pkg.go.dev/go.opentelemetry.io/otel/propagation) diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go new file mode 100644 index 000000000..552263ba7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + + "go.opentelemetry.io/otel/baggage" +) + +const baggageHeader = "baggage" + +// Baggage is a propagator that supports the W3C Baggage format. +// +// This propagates user-defined baggage associated with a trace. The complete +// specification is defined at https://www.w3.org/TR/baggage/. +type Baggage struct{} + +var _ TextMapPropagator = Baggage{} + +// Inject sets baggage key-values from ctx into the carrier. +func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { + bStr := baggage.FromContext(ctx).String() + if bStr != "" { + carrier.Set(baggageHeader, bStr) + } +} + +// Extract returns a copy of parent with the baggage from the carrier added. +func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { + bStr := carrier.Get(baggageHeader) + if bStr == "" { + return parent + } + + bag, err := baggage.Parse(bStr) + if err != nil { + return parent + } + return baggage.ContextWithBaggage(parent, bag) +} + +// Fields returns the keys who's values are set with Inject. +func (b Baggage) Fields() []string { + return []string{baggageHeader} +} diff --git a/vendor/go.opentelemetry.io/otel/propagation/doc.go b/vendor/go.opentelemetry.io/otel/propagation/doc.go new file mode 100644 index 000000000..33a3baf15 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/doc.go @@ -0,0 +1,13 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package propagation contains OpenTelemetry context propagators. + +OpenTelemetry propagators are used to extract and inject context data from and +into messages exchanged by applications. The propagator supported by this +package is the W3C Trace Context encoding +(https://www.w3.org/TR/trace-context/), and W3C Baggage +(https://www.w3.org/TR/baggage/). +*/ +package propagation // import "go.opentelemetry.io/otel/propagation" diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go new file mode 100644 index 000000000..8c8286aab --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/propagation.go @@ -0,0 +1,142 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + "net/http" +) + +// TextMapCarrier is the storage medium used by a TextMapPropagator. +type TextMapCarrier interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Get returns the value associated with the passed key. + Get(key string) string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Set stores the key-value pair. + Set(key string, value string) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Keys lists the keys stored in this carrier. + Keys() []string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// MapCarrier is a TextMapCarrier that uses a map held in memory as a storage +// medium for propagated key-value pairs. +type MapCarrier map[string]string + +// Compile time check that MapCarrier implements the TextMapCarrier. +var _ TextMapCarrier = MapCarrier{} + +// Get returns the value associated with the passed key. +func (c MapCarrier) Get(key string) string { + return c[key] +} + +// Set stores the key-value pair. +func (c MapCarrier) Set(key, value string) { + c[key] = value +} + +// Keys lists the keys stored in this carrier. +func (c MapCarrier) Keys() []string { + keys := make([]string, 0, len(c)) + for k := range c { + keys = append(keys, k) + } + return keys +} + +// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier interface. +type HeaderCarrier http.Header + +// Get returns the value associated with the passed key. +func (hc HeaderCarrier) Get(key string) string { + return http.Header(hc).Get(key) +} + +// Set stores the key-value pair. +func (hc HeaderCarrier) Set(key string, value string) { + http.Header(hc).Set(key, value) +} + +// Keys lists the keys stored in this carrier. +func (hc HeaderCarrier) Keys() []string { + keys := make([]string, 0, len(hc)) + for k := range hc { + keys = append(keys, k) + } + return keys +} + +// TextMapPropagator propagates cross-cutting concerns as key-value text +// pairs within a carrier that travels in-band across process boundaries. +type TextMapPropagator interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Inject set cross-cutting concerns from the Context into the carrier. + Inject(ctx context.Context, carrier TextMapCarrier) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Extract reads cross-cutting concerns from the carrier into a Context. + Extract(ctx context.Context, carrier TextMapCarrier) context.Context + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Fields returns the keys whose values are set with Inject. + Fields() []string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +type compositeTextMapPropagator []TextMapPropagator + +func (p compositeTextMapPropagator) Inject(ctx context.Context, carrier TextMapCarrier) { + for _, i := range p { + i.Inject(ctx, carrier) + } +} + +func (p compositeTextMapPropagator) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { + for _, i := range p { + ctx = i.Extract(ctx, carrier) + } + return ctx +} + +func (p compositeTextMapPropagator) Fields() []string { + unique := make(map[string]struct{}) + for _, i := range p { + for _, k := range i.Fields() { + unique[k] = struct{}{} + } + } + + fields := make([]string, 0, len(unique)) + for k := range unique { + fields = append(fields, k) + } + return fields +} + +// NewCompositeTextMapPropagator returns a unified TextMapPropagator from the +// group of passed TextMapPropagator. This allows different cross-cutting +// concerns to be propagates in a unified manner. +// +// The returned TextMapPropagator will inject and extract cross-cutting +// concerns in the order the TextMapPropagators were provided. Additionally, +// the Fields method will return a de-duplicated slice of the keys that are +// set with the Inject method. +func NewCompositeTextMapPropagator(p ...TextMapPropagator) TextMapPropagator { + return compositeTextMapPropagator(p) +} diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go new file mode 100644 index 000000000..6870e316d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -0,0 +1,156 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + "encoding/hex" + "fmt" + "strings" + + "go.opentelemetry.io/otel/trace" +) + +const ( + supportedVersion = 0 + maxVersion = 254 + traceparentHeader = "traceparent" + tracestateHeader = "tracestate" + delimiter = "-" +) + +// TraceContext is a propagator that supports the W3C Trace Context format +// (https://www.w3.org/TR/trace-context/) +// +// This propagator will propagate the traceparent and tracestate headers to +// guarantee traces are not broken. It is up to the users of this propagator +// to choose if they want to participate in a trace by modifying the +// traceparent header and relevant parts of the tracestate header containing +// their proprietary information. +type TraceContext struct{} + +var ( + _ TextMapPropagator = TraceContext{} + versionPart = fmt.Sprintf("%.2X", supportedVersion) +) + +// Inject injects the trace context from ctx into carrier. +func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { + sc := trace.SpanContextFromContext(ctx) + if !sc.IsValid() { + return + } + + if ts := sc.TraceState().String(); ts != "" { + carrier.Set(tracestateHeader, ts) + } + + // Clear all flags other than the trace-context supported sampling bit. + flags := sc.TraceFlags() & trace.FlagsSampled + + var sb strings.Builder + sb.Grow(2 + 32 + 16 + 2 + 3) + _, _ = sb.WriteString(versionPart) + traceID := sc.TraceID() + spanID := sc.SpanID() + flagByte := [1]byte{byte(flags)} + var buf [32]byte + for _, src := range [][]byte{traceID[:], spanID[:], flagByte[:]} { + _ = sb.WriteByte(delimiter[0]) + n := hex.Encode(buf[:], src) + _, _ = sb.Write(buf[:n]) + } + carrier.Set(traceparentHeader, sb.String()) +} + +// Extract reads tracecontext from the carrier into a returned Context. +// +// The returned Context will be a copy of ctx and contain the extracted +// tracecontext as the remote SpanContext. If the extracted tracecontext is +// invalid, the passed ctx will be returned directly instead. +func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { + sc := tc.extract(carrier) + if !sc.IsValid() { + return ctx + } + return trace.ContextWithRemoteSpanContext(ctx, sc) +} + +func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { + h := carrier.Get(traceparentHeader) + if h == "" { + return trace.SpanContext{} + } + + var ver [1]byte + if !extractPart(ver[:], &h, 2) { + return trace.SpanContext{} + } + version := int(ver[0]) + if version > maxVersion { + return trace.SpanContext{} + } + + var scc trace.SpanContextConfig + if !extractPart(scc.TraceID[:], &h, 32) { + return trace.SpanContext{} + } + if !extractPart(scc.SpanID[:], &h, 16) { + return trace.SpanContext{} + } + + var opts [1]byte + if !extractPart(opts[:], &h, 2) { + return trace.SpanContext{} + } + if version == 0 && (h != "" || opts[0] > 2) { + // version 0 not allow extra + // version 0 not allow other flag + return trace.SpanContext{} + } + + // Clear all flags other than the trace-context supported sampling bit. + scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled + + // Ignore the error returned here. Failure to parse tracestate MUST NOT + // affect the parsing of traceparent according to the W3C tracecontext + // specification. + scc.TraceState, _ = trace.ParseTraceState(carrier.Get(tracestateHeader)) + scc.Remote = true + + sc := trace.NewSpanContext(scc) + if !sc.IsValid() { + return trace.SpanContext{} + } + + return sc +} + +// upperHex detect hex is upper case Unicode characters. +func upperHex(v string) bool { + for _, c := range v { + if c >= 'A' && c <= 'F' { + return true + } + } + return false +} + +func extractPart(dst []byte, h *string, n int) bool { + part, left, _ := strings.Cut(*h, delimiter) + *h = left + // hex.Decode decodes unsupported upper-case characters, so exclude explicitly. + if len(part) != n || upperHex(part) { + return false + } + if p, err := hex.Decode(dst, []byte(part)); err != nil || p != n/2 { + return false + } + return true +} + +// Fields returns the keys who's values are set with Inject. +func (tc TraceContext) Fields() []string { + return []string{traceparentHeader, tracestateHeader} +} diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json new file mode 100644 index 000000000..4d36b98cf --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/renovate.json @@ -0,0 +1,28 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "config:recommended" + ], + "ignorePaths": [], + "labels": ["Skip Changelog", "dependencies"], + "postUpdateOptions" : [ + "gomodTidy" + ], + "packageRules": [ + { + "matchManagers": ["gomod"], + "matchDepTypes": ["indirect"], + "enabled": true + }, + { + "matchFileNames": ["internal/tools/**"], + "matchManagers": ["gomod"], + "matchDepTypes": ["indirect"], + "enabled": false + }, + { + "matchPackageNames": ["google.golang.org/genproto/googleapis/**"], + "groupName": "googleapis" + } + ] +} diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt new file mode 100644 index 000000000..ab09daf9d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/requirements.txt @@ -0,0 +1 @@ +codespell==2.3.0 diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md new file mode 100644 index 000000000..87b842c5d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.17.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.17.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.17.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go new file mode 100644 index 000000000..e087c9c04 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the conventions +// as of the v1.17.0 version of the OpenTelemetry specification. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go new file mode 100644 index 000000000..c7b804bbe --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go @@ -0,0 +1,188 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// This semantic convention defines the attributes used to represent a feature +// flag evaluation as an event. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// RPC received/sent message. +const ( + // MessageTypeKey is the attribute Key conforming to the "message.type" + // semantic conventions. It represents the whether this is a received or + // sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessageTypeKey = attribute.Key("message.type") + + // MessageIDKey is the attribute Key conforming to the "message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Note: This way we guarantee that the values will be consistent between + // different implementations. + MessageIDKey = attribute.Key("message.id") + + // MessageCompressedSizeKey is the attribute Key conforming to the + // "message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageCompressedSizeKey = attribute.Key("message.compressed_size") + + // MessageUncompressedSizeKey is the attribute Key conforming to the + // "message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +var ( + // sent + MessageTypeSent = MessageTypeKey.String("SENT") + // received + MessageTypeReceived = MessageTypeKey.String("RECEIVED") +) + +// MessageID returns an attribute KeyValue conforming to the "message.id" +// semantic conventions. It represents the mUST be calculated as two different +// counters starting from `1` one for sent messages and one for received +// message. +func MessageID(val int) attribute.KeyValue { + return MessageIDKey.Int(val) +} + +// MessageCompressedSize returns an attribute KeyValue conforming to the +// "message.compressed_size" semantic conventions. It represents the compressed +// size of the message in bytes. +func MessageCompressedSize(val int) attribute.KeyValue { + return MessageCompressedSizeKey.Int(val) +} + +// MessageUncompressedSize returns an attribute KeyValue conforming to the +// "message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func MessageUncompressedSize(val int) attribute.KeyValue { + return MessageUncompressedSizeKey.Int(val) +} + +// The attributes used to report a single exception associated with a span. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example above](#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go new file mode 100644 index 000000000..137acc67d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go new file mode 100644 index 000000000..d318221e5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +// HTTP scheme attributes. +var ( + HTTPSchemeHTTP = HTTPSchemeKey.String("http") + HTTPSchemeHTTPS = HTTPSchemeKey.String("https") +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go new file mode 100644 index 000000000..7e365e82c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go @@ -0,0 +1,1999 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// The web browser in which the application represented by the resource is +// running. The `browser.*` attributes MUST be used only for resources that +// represent applications running in a web browser (regardless of whether +// running on a mobile or desktop device). +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserUserAgentKey is the attribute Key conforming to the + // "browser.user_agent" semantic conventions. It represents the full + // user-agent string provided by the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) + // AppleWebKit/537.36 (KHTML, ' + // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' + // Note: The user-agent value SHOULD be provided only from browsers that do + // not have a mechanism to retrieve brands and platform individually from + // the User-Agent Client Hints API. To retrieve the value, the legacy + // `navigator.userAgent` API can be used. + BrowserUserAgentKey = attribute.Key("browser.user_agent") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserUserAgent returns an attribute KeyValue conforming to the +// "browser.user_agent" semantic conventions. It represents the full user-agent +// string provided by the browser +func BrowserUserAgent(val string) attribute.KeyValue { + return BrowserUserAgentKey.String(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://intl.cloud.tencent.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGoogleCloudOpenshift = CloudPlatformKey.String("google_cloud_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an + // [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the task + // definition family this task definition is a member of. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for this task definition. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS +// task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the task +// definition family this task definition is a member of. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// this task definition. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Resources specific to Amazon Web Services. +const ( + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") +) + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// A container instance. +const ( + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageTagKey is the attribute Key conforming to the + // "container.image.tag" semantic conventions. It represents the container + // image tag. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + ContainerImageTagKey = attribute.Key("container.image.tag") +) + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageTag returns an attribute KeyValue conforming to the +// "container.image.tag" semantic conventions. It represents the container +// image tag. +func ContainerImageTag(val string) attribute.KeyValue { + return ContainerImageTagKey.String(val) +} + +// The software deployment. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'staging', 'production' + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment +// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka +// deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// The device on which the process represented by this resource is running. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of + // the device model rather than a machine readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// A serverless instance. +const ( + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `faas.id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSIDKey is the attribute Key conforming to the "faas.id" semantic + // conventions. It represents the unique ID of the single function that + // this runtime instance executes. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so consider setting `faas.id` as a span attribute instead. + // + // The exact value to use for `faas.id` depends on the cloud provider: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + FaaSIDKey = attribute.Key("faas.id") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run:** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function in MiB. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 128 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information. + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") +) + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSID returns an attribute KeyValue conforming to the "faas.id" semantic +// conventions. It represents the unique ID of the single function that this +// runtime instance executes. +func FaaSID(val string) attribute.KeyValue { + return FaaSIDKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function in MiB. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// A host is defined as a general computing instance. +const ( + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // Linux systems, the `machine-id` located in `/etc/machine-id` or + // `/var/lib/dbus/machine-id` may be used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") + + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + HostArchKey = attribute.Key("host.arch") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID. For Cloud, this + // value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image as defined in [Version + // Attributes](README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized Linux +// systems, the `machine-id` located in `/etc/machine-id` or +// `/var/lib/dbus/machine-id` may be used. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID. For +// Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image as defined in [Version +// Attributes](README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// A Kubernetes Cluster. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// A Kubernetes Node object. +const ( + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") +) + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// A Kubernetes Namespace. +const ( + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") +) + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// A Kubernetes Pod object. +const ( + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") +) + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// A container in a +// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") +) + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// A Kubernetes ReplicaSet object. +const ( + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") +) + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// A Kubernetes Deployment object. +const ( + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") +) + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// A Kubernetes StatefulSet object. +const ( + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") +) + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// A Kubernetes DaemonSet object. +const ( + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") +) + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// A Kubernetes Job object. +const ( + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") +) + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// A Kubernetes CronJob object. +const ( + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") +) + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + OSTypeKey = attribute.Key("os.type") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](../../resource/semantic_conventions/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](../../resource/semantic_conventions/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// An operating system process. +const ( + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") +) + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// The single (language) runtime instance which is monitored. +const ( + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") +) + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// A service instance. +const ( + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, + // the value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to distinguish instances of the same + // service that exist at the same time (e.g. instances of a horizontally + // scaled service). It is preferable for the ID to be persistent and stay + // the same for the lifetime of the service instance, however it is + // acceptable that the ID is ephemeral and changes during important + // lifetime events for the service (e.g. service restarts). If the service + // has no inherent unique ID that can be used as the value of this + // attribute it is recommended to generate a random Version 1 or Version 4 + // RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + + // TelemetryAutoVersionKey is the attribute Key conforming to the + // "telemetry.auto.version" semantic conventions. It represents the version + // string of the auto instrumentation agent, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// TelemetryAutoVersion returns an attribute KeyValue conforming to the +// "telemetry.auto.version" semantic conventions. It represents the version +// string of the auto instrumentation agent, if used. +func TelemetryAutoVersion(val string) attribute.KeyValue { + return TelemetryAutoVersionKey.String(val) +} + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") + + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") +) + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OtelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OtelScopeNameKey = attribute.Key("otel.scope.name") + + // OtelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OtelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OtelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OtelScopeName(val string) attribute.KeyValue { + return OtelScopeNameKey.String(val) +} + +// OtelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OtelScopeVersion(val string) attribute.KeyValue { + return OtelScopeVersionKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // OtelLibraryNameKey is the attribute Key conforming to the + // "otel.library.name" semantic conventions. It represents the deprecated, + // use the `otel.scope.name` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + OtelLibraryNameKey = attribute.Key("otel.library.name") + + // OtelLibraryVersionKey is the attribute Key conforming to the + // "otel.library.version" semantic conventions. It represents the + // deprecated, use the `otel.scope.version` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '1.0.0' + OtelLibraryVersionKey = attribute.Key("otel.library.version") +) + +// OtelLibraryName returns an attribute KeyValue conforming to the +// "otel.library.name" semantic conventions. It represents the deprecated, use +// the `otel.scope.name` attribute. +func OtelLibraryName(val string) attribute.KeyValue { + return OtelLibraryNameKey.String(val) +} + +// OtelLibraryVersion returns an attribute KeyValue conforming to the +// "otel.library.version" semantic conventions. It represents the deprecated, +// use the `otel.scope.version` attribute. +func OtelLibraryVersion(val string) attribute.KeyValue { + return OtelLibraryVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go new file mode 100644 index 000000000..634a1dce0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go new file mode 100644 index 000000000..21497bb6b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go @@ -0,0 +1,3364 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") +) + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the name identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'click', 'exception' + EventNameKey = attribute.Key("event.name") + + // EventDomainKey is the attribute Key conforming to the "event.domain" + // semantic conventions. It represents the domain identifies the business + // context for the events. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: Events across different domains may have same `event.name`, yet be + // unrelated events. + EventDomainKey = attribute.Key("event.domain") +) + +var ( + // Events from browser apps + EventDomainBrowser = EventDomainKey.String("browser") + // Events from mobile apps + EventDomainDevice = EventDomainKey.String("device") + // Events from Kubernetes + EventDomainK8S = EventDomainKey.String("k8s") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the name identifies the event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `faas.id` if an alias is involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for CloudEvents. CloudEvents is a specification on how to define +// event data in a standard way. These attributes can be attached to spans when +// performing operations with CloudEvents, regardless of the protocol being +// used. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// Semantic conventions for the OpenTracing Shim +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span does not depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The attributes used to perform database client calls. +const ( + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents an identifier for the database management + // system (DBMS) product being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + DBSystemKey = attribute.Key("db.system") + + // DBConnectionStringKey is the attribute Key conforming to the + // "db.connection_string" semantic conventions. It represents the + // connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + + // DBUserKey is the attribute Key conforming to the "db.user" semantic + // conventions. It represents the username for accessing the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") + + // DBJDBCDriverClassnameKey is the attribute Key conforming to the + // "db.jdbc.driver_classname" semantic conventions. It represents the + // fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) + // driver used to connect. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + + // DBNameKey is the attribute Key conforming to the "db.name" semantic + // conventions. It represents the this attribute is used to report the name + // of the database being accessed. For commands that switch the database, + // this should be set to the target database (even if the command fails). + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable.) + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema + // name), the database name to be used is the more specific layer (e.g. + // Oracle schema name). + DBNameKey = attribute.Key("db.name") + + // DBStatementKey is the attribute Key conforming to the "db.statement" + // semantic conventions. It represents the database statement being + // executed. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable and not + // explicitly disabled via instrumentation configuration.) + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + // Note: The value may be sanitized to exclude sensitive information. + DBStatementKey = attribute.Key("db.statement") + + // DBOperationKey is the attribute Key conforming to the "db.operation" + // semantic conventions. It represents the name of the operation being + // executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If `db.statement` is not + // applicable.) + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to + // attempt any client-side parsing of `db.statement` just to get this + // property, but it should be set if the operation name is provided by the + // library being instrumented. If the SQL statement has an ambiguous + // operation, or performs more than one operation, this value may be + // omitted. + DBOperationKey = attribute.Key("db.operation") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") +) + +// DBConnectionString returns an attribute KeyValue conforming to the +// "db.connection_string" semantic conventions. It represents the connection +// string used to connect to the database. It is recommended to remove embedded +// credentials. +func DBConnectionString(val string) attribute.KeyValue { + return DBConnectionStringKey.String(val) +} + +// DBUser returns an attribute KeyValue conforming to the "db.user" semantic +// conventions. It represents the username for accessing the database. +func DBUser(val string) attribute.KeyValue { + return DBUserKey.String(val) +} + +// DBJDBCDriverClassname returns an attribute KeyValue conforming to the +// "db.jdbc.driver_classname" semantic conventions. It represents the +// fully-qualified class name of the [Java Database Connectivity +// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver +// used to connect. +func DBJDBCDriverClassname(val string) attribute.KeyValue { + return DBJDBCDriverClassnameKey.String(val) +} + +// DBName returns an attribute KeyValue conforming to the "db.name" semantic +// conventions. It represents the this attribute is used to report the name of +// the database being accessed. For commands that switch the database, this +// should be set to the target database (even if the command fails). +func DBName(val string) attribute.KeyValue { + return DBNameKey.String(val) +} + +// DBStatement returns an attribute KeyValue conforming to the +// "db.statement" semantic conventions. It represents the database statement +// being executed. +func DBStatement(val string) attribute.KeyValue { + return DBStatementKey.String(val) +} + +// DBOperation returns an attribute KeyValue conforming to the +// "db.operation" semantic conventions. It represents the name of the operation +// being executed, e.g. the [MongoDB command +// name](https://docs.mongodb.com/manual/reference/command/#database-operations) +// such as `findAndModify`, or the SQL keyword. +func DBOperation(val string) attribute.KeyValue { + return DBOperationKey.String(val) +} + +// Connection-level attributes for Microsoft SQL Server +const ( + // DBMSSQLInstanceNameKey is the attribute Key conforming to the + // "db.mssql.instance_name" semantic conventions. It represents the + // Microsoft SQL Server [instance + // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named + // instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no + // longer required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") +) + +// DBMSSQLInstanceName returns an attribute KeyValue conforming to the +// "db.mssql.instance_name" semantic conventions. It represents the Microsoft +// SQL Server [instance +// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) +// connecting to. This name is used to determine the port of a named instance. +func DBMSSQLInstanceName(val string) attribute.KeyValue { + return DBMSSQLInstanceNameKey.String(val) +} + +// Call-level attributes for Cassandra +const ( + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraTableKey is the attribute Key conforming to the + // "db.cassandra.table" semantic conventions. It represents the name of the + // primary table that the operation is acting upon, including the keyspace + // name (if applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra + // rather than sql. It is not recommended to attempt any client-side + // parsing of `db.statement` just to get this property, but it should be + // set if it is provided by the library being instrumented. If the + // operation is acting upon an anonymous table, or more than one table, + // this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraTable returns an attribute KeyValue conforming to the +// "db.cassandra.table" semantic conventions. It represents the name of the +// primary table that the operation is acting upon, including the keyspace name +// (if applicable). +func DBCassandraTable(val string) attribute.KeyValue { + return DBCassandraTableKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// Call-level attributes for Redis +const ( + // DBRedisDBIndexKey is the attribute Key conforming to the + // "db.redis.database_index" semantic conventions. It represents the index + // of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To + // be used instead of the generic `db.name` attribute. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If other than the default + // database (`0`).) + // Stability: stable + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") +) + +// DBRedisDBIndex returns an attribute KeyValue conforming to the +// "db.redis.database_index" semantic conventions. It represents the index of +// the database being accessed as used in the [`SELECT` +// command](https://redis.io/commands/select), provided as an integer. To be +// used instead of the generic `db.name` attribute. +func DBRedisDBIndex(val int) attribute.KeyValue { + return DBRedisDBIndexKey.Int(val) +} + +// Call-level attributes for MongoDB +const ( + // DBMongoDBCollectionKey is the attribute Key conforming to the + // "db.mongodb.collection" semantic conventions. It represents the + // collection being accessed within the database stated in `db.name`. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") +) + +// DBMongoDBCollection returns an attribute KeyValue conforming to the +// "db.mongodb.collection" semantic conventions. It represents the collection +// being accessed within the database stated in `db.name`. +func DBMongoDBCollection(val string) attribute.KeyValue { + return DBMongoDBCollectionKey.String(val) +} + +// Call-level attributes for SQL databases +const ( + // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" + // semantic conventions. It represents the name of the primary table that + // the operation is acting upon, including the database name (if + // applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting + // upon an anonymous table, or more than one table, this value MUST NOT be + // set. + DBSQLTableKey = attribute.Key("db.sql.table") +) + +// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" +// semantic conventions. It represents the name of the primary table that the +// operation is acting upon, including the database name (if applicable). +func DBSQLTable(val string) attribute.KeyValue { + return DBSQLTableKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // OtelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OtelStatusCodeKey = attribute.Key("otel.status_code") + + // OtelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OtelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OtelStatusCodeOk = OtelStatusCodeKey.String("OK") + // The operation contains an error + OtelStatusCodeError = OtelStatusCodeKey.String("ERROR") +) + +// OtelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OtelStatusDescription(val string) attribute.KeyValue { + return OtelStatusDescriptionKey.String(val) +} + +// This semantic convention describes an instance of a function that runs +// without provisioning or managing of servers (also known as serverless +// functions or Function as a Service (FaaS)) with spans. +const ( + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function execution. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: For the server/consumer span on the incoming side, + // `faas.trigger` MUST be set. + // + // Clients invoking FaaS instances usually cannot set `faas.trigger`, + // since they would typically need to look in the payload to determine + // the event type. If clients set it, it should be the same as the + // trigger that corresponding incoming would have (i.e., this has + // nothing to do with the underlying transport used to make the API + // call to invoke the lambda, which is often HTTP). + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSExecutionKey is the attribute Key conforming to the "faas.execution" + // semantic conventions. It represents the execution ID of the current + // function execution. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSExecutionKey = attribute.Key("faas.execution") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSExecution returns an attribute KeyValue conforming to the +// "faas.execution" semantic conventions. It represents the execution ID of the +// current function execution. +func FaaSExecution(val string) attribute.KeyValue { + return FaaSExecutionKey.String(val) +} + +// Semantic Convention for FaaS triggered as a response to some data source +// operation such as a database or filesystem read/write. +const ( + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") +) + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// Contains additional attributes for incoming FaaS spans. +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// Contains additional attributes for outgoing FaaS spans. +const ( + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: ConditionallyRequired (For some cloud providers, like + // AWS or GCP, the region in which a function is hosted is essential to + // uniquely identify the function and also part of its endpoint. Since it's + // part of the endpoint being called, the region is always known to + // clients. In these cases, `faas.invoked_region` MUST be set accordingly. + // If the region is unknown to the client or not required for identifying + // the invoked function, setting `faas.invoked_region` is optional.) + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetTransportKey is the attribute Key conforming to the "net.transport" + // semantic conventions. It represents the transport protocol used. See + // note below. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + NetTransportKey = attribute.Key("net.transport") + + // NetAppProtocolNameKey is the attribute Key conforming to the + // "net.app.protocol.name" semantic conventions. It represents the + // application layer protocol used. The value SHOULD be normalized to + // lowercase. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + NetAppProtocolNameKey = attribute.Key("net.app.protocol.name") + + // NetAppProtocolVersionKey is the attribute Key conforming to the + // "net.app.protocol.version" semantic conventions. It represents the + // version of the application layer protocol used. See note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3.1.1' + // Note: `net.app.protocol.version` refers to the version of the protocol + // used and might be different from the protocol client's version. If the + // HTTP client used has a version of `0.27.2`, but sends HTTP version + // `1.1`, this attribute should be set to `1.1`. + NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version") + + // NetSockPeerNameKey is the attribute Key conforming to the + // "net.sock.peer.name" semantic conventions. It represents the remote + // socket peer name. + // + // Type: string + // RequirementLevel: Recommended (If available and different from + // `net.peer.name` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 'proxy.example.com' + NetSockPeerNameKey = attribute.Key("net.sock.peer.name") + + // NetSockPeerAddrKey is the attribute Key conforming to the + // "net.sock.peer.addr" semantic conventions. It represents the remote + // socket peer address: IPv4 or IPv6 for internet protocols, path for local + // communication, + // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '127.0.0.1', '/tmp/mysql.sock' + NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") + + // NetSockPeerPortKey is the attribute Key conforming to the + // "net.sock.peer.port" semantic conventions. It represents the remote + // socket peer port. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.peer.port` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 16456 + NetSockPeerPortKey = attribute.Key("net.sock.peer.port") + + // NetSockFamilyKey is the attribute Key conforming to the + // "net.sock.family" semantic conventions. It represents the protocol + // [address + // family](https://man7.org/linux/man-pages/man7/address_families.7.html) + // which is used for communication. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (If different than `inet` and if + // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers + // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in + // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support + // instrumentations that follow previous versions of this document.) + // Stability: stable + // Examples: 'inet6', 'bluetooth' + NetSockFamilyKey = attribute.Key("net.sock.family") + + // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" + // semantic conventions. It represents the logical remote hostname, see + // note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com' + // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an + // extra DNS lookup. + NetPeerNameKey = attribute.Key("net.peer.name") + + // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" + // semantic conventions. It represents the logical remote port number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + NetPeerPortKey = attribute.Key("net.peer.port") + + // NetHostNameKey is the attribute Key conforming to the "net.host.name" + // semantic conventions. It represents the logical local hostname or + // similar, see note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'localhost' + NetHostNameKey = attribute.Key("net.host.name") + + // NetHostPortKey is the attribute Key conforming to the "net.host.port" + // semantic conventions. It represents the logical local port number, + // preferably the one that the peer used to connect + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 8080 + NetHostPortKey = attribute.Key("net.host.port") + + // NetSockHostAddrKey is the attribute Key conforming to the + // "net.sock.host.addr" semantic conventions. It represents the local + // socket address. Useful in case of a multi-IP host. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '192.168.0.1' + NetSockHostAddrKey = attribute.Key("net.sock.host.addr") + + // NetSockHostPortKey is the attribute Key conforming to the + // "net.sock.host.port" semantic conventions. It represents the local + // socket port number. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.host.port` and if `net.sock.host.addr` is set.) + // Stability: stable + // Examples: 35555 + NetSockHostPortKey = attribute.Key("net.sock.host.port") + + // NetHostConnectionTypeKey is the attribute Key conforming to the + // "net.host.connection.type" semantic conventions. It represents the + // internet connection type currently being used by the host. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'wifi' + NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") + + // NetHostConnectionSubtypeKey is the attribute Key conforming to the + // "net.host.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'LTE' + NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") + + // NetHostCarrierNameKey is the attribute Key conforming to the + // "net.host.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'sprint' + NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") + + // NetHostCarrierMccKey is the attribute Key conforming to the + // "net.host.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '310' + NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") + + // NetHostCarrierMncKey is the attribute Key conforming to the + // "net.host.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '001' + NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") + + // NetHostCarrierIccKey is the attribute Key conforming to the + // "net.host.carrier.icc" semantic conventions. It represents the ISO + // 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'DE' + NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") +) + +var ( + // ip_tcp + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + NetTransportUDP = NetTransportKey.String("ip_udp") + // Named or anonymous pipe. See note below + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + NetTransportOther = NetTransportKey.String("other") +) + +var ( + // IPv4 address + NetSockFamilyInet = NetSockFamilyKey.String("inet") + // IPv6 address + NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") + // Unix domain socket path + NetSockFamilyUnix = NetSockFamilyKey.String("unix") +) + +var ( + // wifi + NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") + // wired + NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") + // cell + NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") + // unavailable + NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") + // unknown + NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") +) + +var ( + // GPRS + NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") + // EDGE + NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") + // UMTS + NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") + // CDMA + NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") + // HSPA + NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") + // IDEN + NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") + // LTE + NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") + // EHRPD + NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") + // GSM + NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") +) + +// NetAppProtocolName returns an attribute KeyValue conforming to the +// "net.app.protocol.name" semantic conventions. It represents the application +// layer protocol used. The value SHOULD be normalized to lowercase. +func NetAppProtocolName(val string) attribute.KeyValue { + return NetAppProtocolNameKey.String(val) +} + +// NetAppProtocolVersion returns an attribute KeyValue conforming to the +// "net.app.protocol.version" semantic conventions. It represents the version +// of the application layer protocol used. See note below. +func NetAppProtocolVersion(val string) attribute.KeyValue { + return NetAppProtocolVersionKey.String(val) +} + +// NetSockPeerName returns an attribute KeyValue conforming to the +// "net.sock.peer.name" semantic conventions. It represents the remote socket +// peer name. +func NetSockPeerName(val string) attribute.KeyValue { + return NetSockPeerNameKey.String(val) +} + +// NetSockPeerAddr returns an attribute KeyValue conforming to the +// "net.sock.peer.addr" semantic conventions. It represents the remote socket +// peer address: IPv4 or IPv6 for internet protocols, path for local +// communication, +// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). +func NetSockPeerAddr(val string) attribute.KeyValue { + return NetSockPeerAddrKey.String(val) +} + +// NetSockPeerPort returns an attribute KeyValue conforming to the +// "net.sock.peer.port" semantic conventions. It represents the remote socket +// peer port. +func NetSockPeerPort(val int) attribute.KeyValue { + return NetSockPeerPortKey.Int(val) +} + +// NetPeerName returns an attribute KeyValue conforming to the +// "net.peer.name" semantic conventions. It represents the logical remote +// hostname, see note below. +func NetPeerName(val string) attribute.KeyValue { + return NetPeerNameKey.String(val) +} + +// NetPeerPort returns an attribute KeyValue conforming to the +// "net.peer.port" semantic conventions. It represents the logical remote port +// number +func NetPeerPort(val int) attribute.KeyValue { + return NetPeerPortKey.Int(val) +} + +// NetHostName returns an attribute KeyValue conforming to the +// "net.host.name" semantic conventions. It represents the logical local +// hostname or similar, see note below. +func NetHostName(val string) attribute.KeyValue { + return NetHostNameKey.String(val) +} + +// NetHostPort returns an attribute KeyValue conforming to the +// "net.host.port" semantic conventions. It represents the logical local port +// number, preferably the one that the peer used to connect +func NetHostPort(val int) attribute.KeyValue { + return NetHostPortKey.Int(val) +} + +// NetSockHostAddr returns an attribute KeyValue conforming to the +// "net.sock.host.addr" semantic conventions. It represents the local socket +// address. Useful in case of a multi-IP host. +func NetSockHostAddr(val string) attribute.KeyValue { + return NetSockHostAddrKey.String(val) +} + +// NetSockHostPort returns an attribute KeyValue conforming to the +// "net.sock.host.port" semantic conventions. It represents the local socket +// port number. +func NetSockHostPort(val int) attribute.KeyValue { + return NetSockHostPortKey.Int(val) +} + +// NetHostCarrierName returns an attribute KeyValue conforming to the +// "net.host.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetHostCarrierName(val string) attribute.KeyValue { + return NetHostCarrierNameKey.String(val) +} + +// NetHostCarrierMcc returns an attribute KeyValue conforming to the +// "net.host.carrier.mcc" semantic conventions. It represents the mobile +// carrier country code. +func NetHostCarrierMcc(val string) attribute.KeyValue { + return NetHostCarrierMccKey.String(val) +} + +// NetHostCarrierMnc returns an attribute KeyValue conforming to the +// "net.host.carrier.mnc" semantic conventions. It represents the mobile +// carrier network code. +func NetHostCarrierMnc(val string) attribute.KeyValue { + return NetHostCarrierMncKey.String(val) +} + +// NetHostCarrierIcc returns an attribute KeyValue conforming to the +// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetHostCarrierIcc(val string) attribute.KeyValue { + return NetHostCarrierIccKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](../../resource/semantic_conventions/README.md#service) + // of the remote service. SHOULD be equal to the actual `service.name` + // resource attribute of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](../../resource/semantic_conventions/README.md#service) of +// the remote service. SHOULD be equal to the actual `service.name` resource +// attribute of the remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// These attributes may be used for any operation with an authenticated and/or +// authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") +) + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// Semantic conventions for HTTP client and server Spans. +const ( + // HTTPMethodKey is the attribute Key conforming to the "http.method" + // semantic conventions. It represents the hTTP request method. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + HTTPMethodKey = attribute.Key("http.method") + + // HTTPStatusCodeKey is the attribute Key conforming to the + // "http.status_code" semantic conventions. It represents the [HTTP + // response status code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: ConditionallyRequired (If and only if one was + // received/sent.) + // Stability: stable + // Examples: 200 + HTTPStatusCodeKey = attribute.Key("http.status_code") + + // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" + // semantic conventions. It represents the kind of HTTP protocol used. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: If `net.transport` is not specified, it can be assumed to be + // `IP.TCP` except if `http.flavor` is `QUIC`, in which case `IP.UDP` is + // assumed. + HTTPFlavorKey = attribute.Key("http.flavor") + + // HTTPUserAgentKey is the attribute Key conforming to the + // "http.user_agent" semantic conventions. It represents the value of the + // [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + HTTPUserAgentKey = attribute.Key("http.user_agent") + + // HTTPRequestContentLengthKey is the attribute Key conforming to the + // "http.request_content_length" semantic conventions. It represents the + // size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + + // HTTPResponseContentLengthKey is the attribute Key conforming to the + // "http.response_content_length" semantic conventions. It represents the + // size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") +) + +var ( + // HTTP/1.0 + HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") + // HTTP/1.1 + HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") + // HTTP/2 + HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") + // HTTP/3 + HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") + // SPDY protocol + HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") + // QUIC protocol + HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") +) + +// HTTPMethod returns an attribute KeyValue conforming to the "http.method" +// semantic conventions. It represents the hTTP request method. +func HTTPMethod(val string) attribute.KeyValue { + return HTTPMethodKey.String(val) +} + +// HTTPStatusCode returns an attribute KeyValue conforming to the +// "http.status_code" semantic conventions. It represents the [HTTP response +// status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPStatusCode(val int) attribute.KeyValue { + return HTTPStatusCodeKey.Int(val) +} + +// HTTPUserAgent returns an attribute KeyValue conforming to the +// "http.user_agent" semantic conventions. It represents the value of the [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func HTTPUserAgent(val string) attribute.KeyValue { + return HTTPUserAgentKey.String(val) +} + +// HTTPRequestContentLength returns an attribute KeyValue conforming to the +// "http.request_content_length" semantic conventions. It represents the size +// of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestContentLength(val int) attribute.KeyValue { + return HTTPRequestContentLengthKey.Int(val) +} + +// HTTPResponseContentLength returns an attribute KeyValue conforming to the +// "http.response_content_length" semantic conventions. It represents the size +// of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseContentLength(val int) attribute.KeyValue { + return HTTPResponseContentLengthKey.Int(val) +} + +// Semantic Convention for HTTP Client +const ( + // HTTPURLKey is the attribute Key conforming to the "http.url" semantic + // conventions. It represents the full HTTP request URL in the form + // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is + // not transmitted over HTTP, but if it is known, it should be included + // nevertheless. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Note: `http.url` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case the + // attribute's value should be `https://www.example.com/`. + HTTPURLKey = attribute.Key("http.url") + + // HTTPResendCountKey is the attribute Key conforming to the + // "http.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Recommended (if and only if request was retried.) + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPResendCountKey = attribute.Key("http.resend_count") +) + +// HTTPURL returns an attribute KeyValue conforming to the "http.url" +// semantic conventions. It represents the full HTTP request URL in the form +// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not +// transmitted over HTTP, but if it is known, it should be included +// nevertheless. +func HTTPURL(val string) attribute.KeyValue { + return HTTPURLKey.String(val) +} + +// HTTPResendCount returns an attribute KeyValue conforming to the +// "http.resend_count" semantic conventions. It represents the ordinal number +// of request resending attempt (for any reason, including redirects). +func HTTPResendCount(val int) attribute.KeyValue { + return HTTPResendCountKey.Int(val) +} + +// Semantic Convention for HTTP Server +const ( + // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" + // semantic conventions. It represents the URI scheme identifying the used + // protocol. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'http', 'https' + HTTPSchemeKey = attribute.Key("http.scheme") + + // HTTPTargetKey is the attribute Key conforming to the "http.target" + // semantic conventions. It represents the full request target as passed in + // a HTTP request line or equivalent. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '/path/12314/?q=ddds' + HTTPTargetKey = attribute.Key("http.target") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route (path template in + // the format used by the respective server framework). See note below + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if it's available) + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: 'http.route' MUST NOT be populated when this is not supported by + // the HTTP server framework as the route attribute should have + // low-cardinality and the URI path can NOT substitute it. + HTTPRouteKey = attribute.Key("http.route") + + // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip" + // semantic conventions. It represents the IP address of the original + // client behind all proxies, if known (e.g. from + // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '83.164.160.102' + // Note: This is not necessarily the same as `net.sock.peer.addr`, which + // would + // identify the network-level peer, which may be a proxy. + // + // This attribute should be set when a source of information different + // from the one used for `net.sock.peer.addr`, is available even if that + // other + // source just confirms the same value as `net.sock.peer.addr`. + // Rationale: For `net.sock.peer.addr`, one typically does not know if it + // comes from a proxy, reverse proxy, or the actual client. Setting + // `http.client_ip` when it's the same as `net.sock.peer.addr` means that + // one is at least somewhat confident that the address is not that of + // the closest proxy. + HTTPClientIPKey = attribute.Key("http.client_ip") +) + +// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" +// semantic conventions. It represents the URI scheme identifying the used +// protocol. +func HTTPScheme(val string) attribute.KeyValue { + return HTTPSchemeKey.String(val) +} + +// HTTPTarget returns an attribute KeyValue conforming to the "http.target" +// semantic conventions. It represents the full request target as passed in a +// HTTP request line or equivalent. +func HTTPTarget(val string) attribute.KeyValue { + return HTTPTargetKey.String(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route (path template in the +// format used by the respective server framework). See note below +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// HTTPClientIP returns an attribute KeyValue conforming to the +// "http.client_ip" semantic conventions. It represents the IP address of the +// original client behind all proxies, if known (e.g. from +// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). +func HTTPClientIP(val string) attribute.KeyValue { + return HTTPClientIPKey.String(val) +} + +// Attributes that exist for multiple DynamoDB request types. +const ( + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") +) + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// DynamoDB.CreateTable +const ( + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// DynamoDB.ListTables +const ( + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the the +// number of items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// DynamoDB.Query +const ( + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// DynamoDB.Scan +const ( + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") +) + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// DynamoDB.UpdateTable +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // the `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") + + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// Semantic convention describing per-message attributes populated on messaging +// spans or links. +const ( + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the [conversation ID](#conversations) identifying the conversation to + // which the message belongs, represented as a string. Sometimes called + // "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to + // the "messaging.message.payload_size_bytes" semantic conventions. It + // represents the (uncompressed) size of the message payload in bytes. Also + // use this attribute if it is unknown whether the compressed or + // uncompressed payload size is reported. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2738 + MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") + + // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key + // conforming to the "messaging.message.payload_compressed_size_bytes" + // semantic conventions. It represents the compressed size of the message + // payload in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2048 + MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") +) + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the [conversation ID](#conversations) identifying the +// conversation to which the message belongs, represented as a string. +// Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming +// to the "messaging.message.payload_size_bytes" semantic conventions. It +// represents the (uncompressed) size of the message payload in bytes. Also use +// this attribute if it is unknown whether the compressed or uncompressed +// payload size is reported. +func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadSizeBytesKey.Int(val) +} + +// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue +// conforming to the "messaging.message.payload_compressed_size_bytes" semantic +// conventions. It represents the compressed size of the message payload in +// bytes. +func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) +} + +// Semantic convention for attributes that describe messaging destination on +// broker +const ( + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker does not have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationKindKey is the attribute Key conforming to the + // "messaging.destination.kind" semantic conventions. It represents the + // kind of message destination + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationKindKey = attribute.Key("messaging.destination.kind") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") +) + +var ( + // A message sent to a queue + MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") + // A message sent to a topic + MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") +) + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// Semantic convention for attributes that describe messaging source on broker +const ( + // MessagingSourceNameKey is the attribute Key conforming to the + // "messaging.source.name" semantic conventions. It represents the message + // source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Source name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker does not have such notion, the source name SHOULD uniquely + // identify the broker. + MessagingSourceNameKey = attribute.Key("messaging.source.name") + + // MessagingSourceKindKey is the attribute Key conforming to the + // "messaging.source.kind" semantic conventions. It represents the kind of + // message source + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingSourceKindKey = attribute.Key("messaging.source.kind") + + // MessagingSourceTemplateKey is the attribute Key conforming to the + // "messaging.source.template" semantic conventions. It represents the low + // cardinality representation of the messaging source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Source names could be constructed from templates. An example would + // be a source name involving a user name or product id. Although the + // source name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and + // aggregation. + MessagingSourceTemplateKey = attribute.Key("messaging.source.template") + + // MessagingSourceTemporaryKey is the attribute Key conforming to the + // "messaging.source.temporary" semantic conventions. It represents a + // boolean that is true if the message source is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") + + // MessagingSourceAnonymousKey is the attribute Key conforming to the + // "messaging.source.anonymous" semantic conventions. It represents a + // boolean that is true if the message source is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous") +) + +var ( + // A message received from a queue + MessagingSourceKindQueue = MessagingSourceKindKey.String("queue") + // A message received from a topic + MessagingSourceKindTopic = MessagingSourceKindKey.String("topic") +) + +// MessagingSourceName returns an attribute KeyValue conforming to the +// "messaging.source.name" semantic conventions. It represents the message +// source name +func MessagingSourceName(val string) attribute.KeyValue { + return MessagingSourceNameKey.String(val) +} + +// MessagingSourceTemplate returns an attribute KeyValue conforming to the +// "messaging.source.template" semantic conventions. It represents the low +// cardinality representation of the messaging source name +func MessagingSourceTemplate(val string) attribute.KeyValue { + return MessagingSourceTemplateKey.String(val) +} + +// MessagingSourceTemporary returns an attribute KeyValue conforming to the +// "messaging.source.temporary" semantic conventions. It represents a boolean +// that is true if the message source is temporary and might not exist anymore +// after messages are processed. +func MessagingSourceTemporary(val bool) attribute.KeyValue { + return MessagingSourceTemporaryKey.Bool(val) +} + +// MessagingSourceAnonymous returns an attribute KeyValue conforming to the +// "messaging.source.anonymous" semantic conventions. It represents a boolean +// that is true if the message source is anonymous (could be unnamed or have +// auto-generated name). +func MessagingSourceAnonymous(val bool) attribute.KeyValue { + return MessagingSourceAnonymousKey.Bool(val) +} + +// General attributes used in messaging systems. +const ( + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents a string + // identifying the messaging system. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + MessagingSystemKey = attribute.Key("messaging.system") + + // MessagingOperationKey is the attribute Key conforming to the + // "messaging.operation" semantic conventions. It represents a string + // identifying the kind of messaging operation as defined in the [Operation + // names](#operation-names) section above. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationKey = attribute.Key("messaging.operation") + + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the span describes an + // operation on a batch of messages.) + // Stability: stable + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") +) + +var ( + // publish + MessagingOperationPublish = MessagingOperationKey.String("publish") + // receive + MessagingOperationReceive = MessagingOperationKey.String("receive") + // process + MessagingOperationProcess = MessagingOperationKey.String("process") +) + +// MessagingSystem returns an attribute KeyValue conforming to the +// "messaging.system" semantic conventions. It represents a string identifying +// the messaging system. +func MessagingSystem(val string) attribute.KeyValue { + return MessagingSystemKey.String(val) +} + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// Semantic convention for a consumer of messages received from a messaging +// system +const ( + // MessagingConsumerIDKey is the attribute Key conforming to the + // "messaging.consumer.id" semantic conventions. It represents the + // identifier for the consumer receiving a message. For Kafka, set it to + // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if + // both are present, or only `messaging.kafka.consumer.group`. For brokers, + // such as RabbitMQ and Artemis, set it to the `client_id` of the client + // consuming the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mygroup - client-6' + MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") +) + +// MessagingConsumerID returns an attribute KeyValue conforming to the +// "messaging.consumer.id" semantic conventions. It represents the identifier +// for the consumer receiving a message. For Kafka, set it to +// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both +// are present, or only `messaging.kafka.consumer.group`. For brokers, such as +// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the +// message. +func MessagingConsumerID(val string) attribute.KeyValue { + return MessagingConsumerIDKey.String(val) +} + +// Attributes for RabbitMQ +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If not empty.) + // Stability: stable + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// Attributes for Apache Kafka +const ( + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaClientIDKey is the attribute Key conforming to the + // "messaging.kafka.client_id" semantic conventions. It represents the + // client ID for the Consumer or Producer that is handling the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client-5' + MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") + + // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to + // the "messaging.kafka.destination.partition" semantic conventions. It + // represents the partition the message is sent to. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") + + // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the + // "messaging.kafka.source.partition" semantic conventions. It represents + // the partition the message is received from. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (If value is `true`. When + // missing, the value is assumed to be `false`.) + // Stability: stable + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaClientID returns an attribute KeyValue conforming to the +// "messaging.kafka.client_id" semantic conventions. It represents the client +// ID for the Consumer or Producer that is handling the message. +func MessagingKafkaClientID(val string) attribute.KeyValue { + return MessagingKafkaClientIDKey.String(val) +} + +// MessagingKafkaDestinationPartition returns an attribute KeyValue +// conforming to the "messaging.kafka.destination.partition" semantic +// conventions. It represents the partition the message is sent to. +func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { + return MessagingKafkaDestinationPartitionKey.Int(val) +} + +// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to +// the "messaging.kafka.source.partition" semantic conventions. It represents +// the partition the message is received from. +func MessagingKafkaSourcePartition(val int) attribute.KeyValue { + return MessagingKafkaSourcePartitionKey.Int(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// Attributes for Apache RocketMQ +const ( + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqClientIDKey is the attribute Key conforming to the + // "messaging.rocketmq.client_id" semantic conventions. It represents the + // unique identifier for each client. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myhost@8742@s8083jm' + MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delay time level is not specified.) + // Stability: stable + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delivery timestamp is not specified.) + // Stability: stable + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) + // Stability: stable + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqClientID returns an attribute KeyValue conforming to the +// "messaging.rocketmq.client_id" semantic conventions. It represents the +// unique identifier for each client. +func MessagingRocketmqClientID(val string) attribute.KeyValue { + return MessagingRocketmqClientIDKey.String(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// Semantic conventions for remote procedure calls. +const ( + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCSystemKey = attribute.Key("rpc.system") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") +) + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// Tech-specific attributes for gRPC. +const ( + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // does not specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If other than the default + // version (`1.0`)) + // Stability: stable + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If response is not successful.) + // Stability: stable + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") +) + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// does not specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md new file mode 100644 index 000000000..82e1f46b4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.20.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.20.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.20.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go new file mode 100644 index 000000000..6685c392b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go @@ -0,0 +1,1198 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +import "go.opentelemetry.io/otel/attribute" + +// Describes HTTP attributes. +const ( + // HTTPMethodKey is the attribute Key conforming to the "http.method" + // semantic conventions. It represents the hTTP request method. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + HTTPMethodKey = attribute.Key("http.method") + + // HTTPStatusCodeKey is the attribute Key conforming to the + // "http.status_code" semantic conventions. It represents the [HTTP + // response status code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: ConditionallyRequired (If and only if one was + // received/sent.) + // Stability: stable + // Examples: 200 + HTTPStatusCodeKey = attribute.Key("http.status_code") +) + +// HTTPMethod returns an attribute KeyValue conforming to the "http.method" +// semantic conventions. It represents the hTTP request method. +func HTTPMethod(val string) attribute.KeyValue { + return HTTPMethodKey.String(val) +} + +// HTTPStatusCode returns an attribute KeyValue conforming to the +// "http.status_code" semantic conventions. It represents the [HTTP response +// status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPStatusCode(val int) attribute.KeyValue { + return HTTPStatusCodeKey.Int(val) +} + +// HTTP Server spans attributes +const ( + // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" + // semantic conventions. It represents the URI scheme identifying the used + // protocol. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'http', 'https' + HTTPSchemeKey = attribute.Key("http.scheme") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route (path template in + // the format used by the respective server framework). See note below + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if it's available) + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and + // the URI path can NOT substitute it. + // SHOULD include the [application + // root](/specification/trace/semantic_conventions/http.md#http-server-definitions) + // if there is one. + HTTPRouteKey = attribute.Key("http.route") +) + +// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" +// semantic conventions. It represents the URI scheme identifying the used +// protocol. +func HTTPScheme(val string) attribute.KeyValue { + return HTTPSchemeKey.String(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route (path template in the +// format used by the respective server framework). See note below +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the name identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'click', 'exception' + EventNameKey = attribute.Key("event.name") + + // EventDomainKey is the attribute Key conforming to the "event.domain" + // semantic conventions. It represents the domain identifies the business + // context for the events. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: Events across different domains may have same `event.name`, yet be + // unrelated events. + EventDomainKey = attribute.Key("event.domain") +) + +var ( + // Events from browser apps + EventDomainBrowser = EventDomainKey.String("browser") + // Events from mobile apps + EventDomainDevice = EventDomainKey.String("device") + // Events from Kubernetes + EventDomainK8S = EventDomainKey.String("k8s") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the name identifies the event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetTransportKey is the attribute Key conforming to the "net.transport" + // semantic conventions. It represents the transport protocol used. See + // note below. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + NetTransportKey = attribute.Key("net.transport") + + // NetProtocolNameKey is the attribute Key conforming to the + // "net.protocol.name" semantic conventions. It represents the application + // layer protocol used. The value SHOULD be normalized to lowercase. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + NetProtocolNameKey = attribute.Key("net.protocol.name") + + // NetProtocolVersionKey is the attribute Key conforming to the + // "net.protocol.version" semantic conventions. It represents the version + // of the application layer protocol used. See note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3.1.1' + // Note: `net.protocol.version` refers to the version of the protocol used + // and might be different from the protocol client's version. If the HTTP + // client used has a version of `0.27.2`, but sends HTTP version `1.1`, + // this attribute should be set to `1.1`. + NetProtocolVersionKey = attribute.Key("net.protocol.version") + + // NetSockPeerNameKey is the attribute Key conforming to the + // "net.sock.peer.name" semantic conventions. It represents the remote + // socket peer name. + // + // Type: string + // RequirementLevel: Recommended (If available and different from + // `net.peer.name` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 'proxy.example.com' + NetSockPeerNameKey = attribute.Key("net.sock.peer.name") + + // NetSockPeerAddrKey is the attribute Key conforming to the + // "net.sock.peer.addr" semantic conventions. It represents the remote + // socket peer address: IPv4 or IPv6 for internet protocols, path for local + // communication, + // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '127.0.0.1', '/tmp/mysql.sock' + NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") + + // NetSockPeerPortKey is the attribute Key conforming to the + // "net.sock.peer.port" semantic conventions. It represents the remote + // socket peer port. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.peer.port` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 16456 + NetSockPeerPortKey = attribute.Key("net.sock.peer.port") + + // NetSockFamilyKey is the attribute Key conforming to the + // "net.sock.family" semantic conventions. It represents the protocol + // [address + // family](https://man7.org/linux/man-pages/man7/address_families.7.html) + // which is used for communication. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (If different than `inet` and if + // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers + // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in + // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support + // instrumentations that follow previous versions of this document.) + // Stability: stable + // Examples: 'inet6', 'bluetooth' + NetSockFamilyKey = attribute.Key("net.sock.family") + + // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" + // semantic conventions. It represents the logical remote hostname, see + // note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com' + // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an + // extra DNS lookup. + NetPeerNameKey = attribute.Key("net.peer.name") + + // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" + // semantic conventions. It represents the logical remote port number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + NetPeerPortKey = attribute.Key("net.peer.port") + + // NetHostNameKey is the attribute Key conforming to the "net.host.name" + // semantic conventions. It represents the logical local hostname or + // similar, see note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'localhost' + NetHostNameKey = attribute.Key("net.host.name") + + // NetHostPortKey is the attribute Key conforming to the "net.host.port" + // semantic conventions. It represents the logical local port number, + // preferably the one that the peer used to connect + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 8080 + NetHostPortKey = attribute.Key("net.host.port") + + // NetSockHostAddrKey is the attribute Key conforming to the + // "net.sock.host.addr" semantic conventions. It represents the local + // socket address. Useful in case of a multi-IP host. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '192.168.0.1' + NetSockHostAddrKey = attribute.Key("net.sock.host.addr") + + // NetSockHostPortKey is the attribute Key conforming to the + // "net.sock.host.port" semantic conventions. It represents the local + // socket port number. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If defined for the address + // family and if different than `net.host.port` and if `net.sock.host.addr` + // is set. In other cases, it is still recommended to set this.) + // Stability: stable + // Examples: 35555 + NetSockHostPortKey = attribute.Key("net.sock.host.port") +) + +var ( + // ip_tcp + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + NetTransportUDP = NetTransportKey.String("ip_udp") + // Named or anonymous pipe. See note below + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + NetTransportOther = NetTransportKey.String("other") +) + +var ( + // IPv4 address + NetSockFamilyInet = NetSockFamilyKey.String("inet") + // IPv6 address + NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") + // Unix domain socket path + NetSockFamilyUnix = NetSockFamilyKey.String("unix") +) + +// NetProtocolName returns an attribute KeyValue conforming to the +// "net.protocol.name" semantic conventions. It represents the application +// layer protocol used. The value SHOULD be normalized to lowercase. +func NetProtocolName(val string) attribute.KeyValue { + return NetProtocolNameKey.String(val) +} + +// NetProtocolVersion returns an attribute KeyValue conforming to the +// "net.protocol.version" semantic conventions. It represents the version of +// the application layer protocol used. See note below. +func NetProtocolVersion(val string) attribute.KeyValue { + return NetProtocolVersionKey.String(val) +} + +// NetSockPeerName returns an attribute KeyValue conforming to the +// "net.sock.peer.name" semantic conventions. It represents the remote socket +// peer name. +func NetSockPeerName(val string) attribute.KeyValue { + return NetSockPeerNameKey.String(val) +} + +// NetSockPeerAddr returns an attribute KeyValue conforming to the +// "net.sock.peer.addr" semantic conventions. It represents the remote socket +// peer address: IPv4 or IPv6 for internet protocols, path for local +// communication, +// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). +func NetSockPeerAddr(val string) attribute.KeyValue { + return NetSockPeerAddrKey.String(val) +} + +// NetSockPeerPort returns an attribute KeyValue conforming to the +// "net.sock.peer.port" semantic conventions. It represents the remote socket +// peer port. +func NetSockPeerPort(val int) attribute.KeyValue { + return NetSockPeerPortKey.Int(val) +} + +// NetPeerName returns an attribute KeyValue conforming to the +// "net.peer.name" semantic conventions. It represents the logical remote +// hostname, see note below. +func NetPeerName(val string) attribute.KeyValue { + return NetPeerNameKey.String(val) +} + +// NetPeerPort returns an attribute KeyValue conforming to the +// "net.peer.port" semantic conventions. It represents the logical remote port +// number +func NetPeerPort(val int) attribute.KeyValue { + return NetPeerPortKey.Int(val) +} + +// NetHostName returns an attribute KeyValue conforming to the +// "net.host.name" semantic conventions. It represents the logical local +// hostname or similar, see note below. +func NetHostName(val string) attribute.KeyValue { + return NetHostNameKey.String(val) +} + +// NetHostPort returns an attribute KeyValue conforming to the +// "net.host.port" semantic conventions. It represents the logical local port +// number, preferably the one that the peer used to connect +func NetHostPort(val int) attribute.KeyValue { + return NetHostPortKey.Int(val) +} + +// NetSockHostAddr returns an attribute KeyValue conforming to the +// "net.sock.host.addr" semantic conventions. It represents the local socket +// address. Useful in case of a multi-IP host. +func NetSockHostAddr(val string) attribute.KeyValue { + return NetSockHostAddrKey.String(val) +} + +// NetSockHostPort returns an attribute KeyValue conforming to the +// "net.sock.host.port" semantic conventions. It represents the local socket +// port number. +func NetSockHostPort(val int) attribute.KeyValue { + return NetSockHostPortKey.Int(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetHostConnectionTypeKey is the attribute Key conforming to the + // "net.host.connection.type" semantic conventions. It represents the + // internet connection type currently being used by the host. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'wifi' + NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") + + // NetHostConnectionSubtypeKey is the attribute Key conforming to the + // "net.host.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'LTE' + NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") + + // NetHostCarrierNameKey is the attribute Key conforming to the + // "net.host.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'sprint' + NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") + + // NetHostCarrierMccKey is the attribute Key conforming to the + // "net.host.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '310' + NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") + + // NetHostCarrierMncKey is the attribute Key conforming to the + // "net.host.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '001' + NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") + + // NetHostCarrierIccKey is the attribute Key conforming to the + // "net.host.carrier.icc" semantic conventions. It represents the ISO + // 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'DE' + NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") +) + +var ( + // wifi + NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") + // wired + NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") + // cell + NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") + // unavailable + NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") + // unknown + NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") +) + +var ( + // GPRS + NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") + // EDGE + NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") + // UMTS + NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") + // CDMA + NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") + // HSPA + NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") + // IDEN + NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") + // LTE + NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") + // EHRPD + NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") + // GSM + NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") +) + +// NetHostCarrierName returns an attribute KeyValue conforming to the +// "net.host.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetHostCarrierName(val string) attribute.KeyValue { + return NetHostCarrierNameKey.String(val) +} + +// NetHostCarrierMcc returns an attribute KeyValue conforming to the +// "net.host.carrier.mcc" semantic conventions. It represents the mobile +// carrier country code. +func NetHostCarrierMcc(val string) attribute.KeyValue { + return NetHostCarrierMccKey.String(val) +} + +// NetHostCarrierMnc returns an attribute KeyValue conforming to the +// "net.host.carrier.mnc" semantic conventions. It represents the mobile +// carrier network code. +func NetHostCarrierMnc(val string) attribute.KeyValue { + return NetHostCarrierMncKey.String(val) +} + +// NetHostCarrierIcc returns an attribute KeyValue conforming to the +// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetHostCarrierIcc(val string) attribute.KeyValue { + return NetHostCarrierIccKey.String(val) +} + +// Semantic conventions for HTTP client and server Spans. +const ( + // HTTPRequestContentLengthKey is the attribute Key conforming to the + // "http.request_content_length" semantic conventions. It represents the + // size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + + // HTTPResponseContentLengthKey is the attribute Key conforming to the + // "http.response_content_length" semantic conventions. It represents the + // size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") +) + +// HTTPRequestContentLength returns an attribute KeyValue conforming to the +// "http.request_content_length" semantic conventions. It represents the size +// of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestContentLength(val int) attribute.KeyValue { + return HTTPRequestContentLengthKey.Int(val) +} + +// HTTPResponseContentLength returns an attribute KeyValue conforming to the +// "http.response_content_length" semantic conventions. It represents the size +// of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseContentLength(val int) attribute.KeyValue { + return HTTPResponseContentLengthKey.Int(val) +} + +// Semantic convention describing per-message attributes populated on messaging +// spans or links. +const ( + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the [conversation ID](#conversations) identifying the conversation to + // which the message belongs, represented as a string. Sometimes called + // "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to + // the "messaging.message.payload_size_bytes" semantic conventions. It + // represents the (uncompressed) size of the message payload in bytes. Also + // use this attribute if it is unknown whether the compressed or + // uncompressed payload size is reported. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2738 + MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") + + // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key + // conforming to the "messaging.message.payload_compressed_size_bytes" + // semantic conventions. It represents the compressed size of the message + // payload in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2048 + MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") +) + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the [conversation ID](#conversations) identifying the +// conversation to which the message belongs, represented as a string. +// Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming +// to the "messaging.message.payload_size_bytes" semantic conventions. It +// represents the (uncompressed) size of the message payload in bytes. Also use +// this attribute if it is unknown whether the compressed or uncompressed +// payload size is reported. +func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadSizeBytesKey.Int(val) +} + +// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue +// conforming to the "messaging.message.payload_compressed_size_bytes" semantic +// conventions. It represents the compressed size of the message payload in +// bytes. +func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) +} + +// Semantic convention for attributes that describe messaging destination on +// broker +const ( + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker does not have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") +) + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// Semantic convention for attributes that describe messaging source on broker +const ( + // MessagingSourceNameKey is the attribute Key conforming to the + // "messaging.source.name" semantic conventions. It represents the message + // source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Source name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker does not have such notion, the source name SHOULD uniquely + // identify the broker. + MessagingSourceNameKey = attribute.Key("messaging.source.name") + + // MessagingSourceTemplateKey is the attribute Key conforming to the + // "messaging.source.template" semantic conventions. It represents the low + // cardinality representation of the messaging source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Source names could be constructed from templates. An example would + // be a source name involving a user name or product id. Although the + // source name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and + // aggregation. + MessagingSourceTemplateKey = attribute.Key("messaging.source.template") + + // MessagingSourceTemporaryKey is the attribute Key conforming to the + // "messaging.source.temporary" semantic conventions. It represents a + // boolean that is true if the message source is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") + + // MessagingSourceAnonymousKey is the attribute Key conforming to the + // "messaging.source.anonymous" semantic conventions. It represents a + // boolean that is true if the message source is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous") +) + +// MessagingSourceName returns an attribute KeyValue conforming to the +// "messaging.source.name" semantic conventions. It represents the message +// source name +func MessagingSourceName(val string) attribute.KeyValue { + return MessagingSourceNameKey.String(val) +} + +// MessagingSourceTemplate returns an attribute KeyValue conforming to the +// "messaging.source.template" semantic conventions. It represents the low +// cardinality representation of the messaging source name +func MessagingSourceTemplate(val string) attribute.KeyValue { + return MessagingSourceTemplateKey.String(val) +} + +// MessagingSourceTemporary returns an attribute KeyValue conforming to the +// "messaging.source.temporary" semantic conventions. It represents a boolean +// that is true if the message source is temporary and might not exist anymore +// after messages are processed. +func MessagingSourceTemporary(val bool) attribute.KeyValue { + return MessagingSourceTemporaryKey.Bool(val) +} + +// MessagingSourceAnonymous returns an attribute KeyValue conforming to the +// "messaging.source.anonymous" semantic conventions. It represents a boolean +// that is true if the message source is anonymous (could be unnamed or have +// auto-generated name). +func MessagingSourceAnonymous(val bool) attribute.KeyValue { + return MessagingSourceAnonymousKey.Bool(val) +} + +// Attributes for RabbitMQ +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If not empty.) + // Stability: stable + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// Attributes for Apache Kafka +const ( + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaClientIDKey is the attribute Key conforming to the + // "messaging.kafka.client_id" semantic conventions. It represents the + // client ID for the Consumer or Producer that is handling the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client-5' + MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") + + // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to + // the "messaging.kafka.destination.partition" semantic conventions. It + // represents the partition the message is sent to. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") + + // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the + // "messaging.kafka.source.partition" semantic conventions. It represents + // the partition the message is received from. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (If value is `true`. When + // missing, the value is assumed to be `false`.) + // Stability: stable + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaClientID returns an attribute KeyValue conforming to the +// "messaging.kafka.client_id" semantic conventions. It represents the client +// ID for the Consumer or Producer that is handling the message. +func MessagingKafkaClientID(val string) attribute.KeyValue { + return MessagingKafkaClientIDKey.String(val) +} + +// MessagingKafkaDestinationPartition returns an attribute KeyValue +// conforming to the "messaging.kafka.destination.partition" semantic +// conventions. It represents the partition the message is sent to. +func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { + return MessagingKafkaDestinationPartitionKey.Int(val) +} + +// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to +// the "messaging.kafka.source.partition" semantic conventions. It represents +// the partition the message is received from. +func MessagingKafkaSourcePartition(val int) attribute.KeyValue { + return MessagingKafkaSourcePartitionKey.Int(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// Attributes for Apache RocketMQ +const ( + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqClientIDKey is the attribute Key conforming to the + // "messaging.rocketmq.client_id" semantic conventions. It represents the + // unique identifier for each client. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myhost@8742@s8083jm' + MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delay time level is not specified.) + // Stability: stable + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delivery timestamp is not specified.) + // Stability: stable + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) + // Stability: stable + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqClientID returns an attribute KeyValue conforming to the +// "messaging.rocketmq.client_id" semantic conventions. It represents the +// unique identifier for each client. +func MessagingRocketmqClientID(val string) attribute.KeyValue { + return MessagingRocketmqClientIDKey.String(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// Describes user-agent attributes. +const ( + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of + // the [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + UserAgentOriginalKey = attribute.Key("user_agent.original") +) + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go new file mode 100644 index 000000000..0d1f55a8f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the conventions +// as of the v1.20.0 version of the OpenTelemetry specification. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go new file mode 100644 index 000000000..637763932 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go @@ -0,0 +1,188 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +import "go.opentelemetry.io/otel/attribute" + +// This semantic convention defines the attributes used to represent a feature +// flag evaluation as an event. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// RPC received/sent message. +const ( + // MessageTypeKey is the attribute Key conforming to the "message.type" + // semantic conventions. It represents the whether this is a received or + // sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessageTypeKey = attribute.Key("message.type") + + // MessageIDKey is the attribute Key conforming to the "message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Note: This way we guarantee that the values will be consistent between + // different implementations. + MessageIDKey = attribute.Key("message.id") + + // MessageCompressedSizeKey is the attribute Key conforming to the + // "message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageCompressedSizeKey = attribute.Key("message.compressed_size") + + // MessageUncompressedSizeKey is the attribute Key conforming to the + // "message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +var ( + // sent + MessageTypeSent = MessageTypeKey.String("SENT") + // received + MessageTypeReceived = MessageTypeKey.String("RECEIVED") +) + +// MessageID returns an attribute KeyValue conforming to the "message.id" +// semantic conventions. It represents the mUST be calculated as two different +// counters starting from `1` one for sent messages and one for received +// message. +func MessageID(val int) attribute.KeyValue { + return MessageIDKey.Int(val) +} + +// MessageCompressedSize returns an attribute KeyValue conforming to the +// "message.compressed_size" semantic conventions. It represents the compressed +// size of the message in bytes. +func MessageCompressedSize(val int) attribute.KeyValue { + return MessageCompressedSizeKey.Int(val) +} + +// MessageUncompressedSize returns an attribute KeyValue conforming to the +// "message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func MessageUncompressedSize(val int) attribute.KeyValue { + return MessageUncompressedSizeKey.Int(val) +} + +// The attributes used to report a single exception associated with a span. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example above](#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go new file mode 100644 index 000000000..f40c97825 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go new file mode 100644 index 000000000..9c1840631 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +// HTTP scheme attributes. +var ( + HTTPSchemeHTTP = HTTPSchemeKey.String("http") + HTTPSchemeHTTPS = HTTPSchemeKey.String("https") +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go new file mode 100644 index 000000000..3d44dae27 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go @@ -0,0 +1,2060 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +import "go.opentelemetry.io/otel/attribute" + +// The web browser in which the application represented by the resource is +// running. The `browser.*` attributes MUST be used only for resources that +// represent applications running in a web browser (regardless of whether +// running on a mobile or desktop device). +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://www.tencentcloud.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the + // "cloud.resource_id" semantic conventions. It represents the cloud + // provider-specific native identifier of the monitored cloud resource + // (e.g. an + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // on AWS, a [fully qualified resource + // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // on Azure, a [full resource + // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) + // on GCP) + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', + // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', + // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud + // provider. + // The following well-known definitions MUST be used if you set this + // attribute and they apply: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + CloudResourceIDKey = attribute.Key("cloud.resource_id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) +// on AWS, a [fully qualified resource +// ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) +// on Azure, a [full resource +// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) +// on GCP) +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an + // [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the task + // definition family this task definition is a member of. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for this task definition. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS +// task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the task +// definition family this task definition is a member of. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// this task definition. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Resources specific to Amazon Web Services. +const ( + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") +) + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// Heroku dyno metadata +const ( + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents + // the time and date the release was created + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2022-10-23T18:00:42Z' + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit + // hash for the current release + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + HerokuAppIDKey = attribute.Key("heroku.app.id") +) + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming +// to the "heroku.release.creation_timestamp" semantic conventions. It +// represents the time and date the release was created +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuAppID returns an attribute KeyValue conforming to the +// "heroku.app.id" semantic conventions. It represents the unique identifier +// for the application +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// A container instance. +const ( + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageTagKey is the attribute Key conforming to the + // "container.image.tag" semantic conventions. It represents the container + // image tag. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + ContainerImageTagKey = attribute.Key("container.image.tag") +) + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageTag returns an attribute KeyValue conforming to the +// "container.image.tag" semantic conventions. It represents the container +// image tag. +func ContainerImageTag(val string) attribute.KeyValue { + return ContainerImageTagKey.String(val) +} + +// The software deployment. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'staging', 'production' + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment +// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka +// deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// The device on which the process represented by this resource is running. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of + // the device model rather than a machine readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// A serverless instance. +const ( + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run:** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must + // be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") +) + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// A host is defined as a general computing instance. +const ( + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // systems, this should be the `machine-id`. See the table below for the + // sources to use to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") + + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + HostArchKey = attribute.Key("host.arch") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID. For Cloud, this + // value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image as defined in [Version + // Attributes](README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use +// to determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID. For +// Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image as defined in [Version +// Attributes](README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// A Kubernetes Cluster. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// A Kubernetes Node object. +const ( + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") +) + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// A Kubernetes Namespace. +const ( + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") +) + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// A Kubernetes Pod object. +const ( + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") +) + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// A container in a +// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") +) + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// A Kubernetes ReplicaSet object. +const ( + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") +) + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// A Kubernetes Deployment object. +const ( + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") +) + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// A Kubernetes StatefulSet object. +const ( + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") +) + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// A Kubernetes DaemonSet object. +const ( + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") +) + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// A Kubernetes Job object. +const ( + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") +) + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// A Kubernetes CronJob object. +const ( + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") +) + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + OSTypeKey = attribute.Key("os.type") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](../../resource/semantic_conventions/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](../../resource/semantic_conventions/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// An operating system process. +const ( + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") +) + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// The single (language) runtime instance which is monitored. +const ( + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") +) + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// A service instance. +const ( + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, + // the value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") +) + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// A service instance. +const ( + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-k8s-pod-deployment-1', + // '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to distinguish instances of the same + // service that exist at the same time (e.g. instances of a horizontally + // scaled service). It is preferable for the ID to be persistent and stay + // the same for the lifetime of the service instance, however it is + // acceptable that the ID is ephemeral and changes during important + // lifetime events for the service (e.g. service restarts). If the service + // has no inherent unique ID that can be used as the value of this + // attribute it is recommended to generate a random Version 1 or Version 4 + // RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'opentelemetry' + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetryAutoVersionKey is the attribute Key conforming to the + // "telemetry.auto.version" semantic conventions. It represents the version + // string of the auto instrumentation agent, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") +) + +// TelemetryAutoVersion returns an attribute KeyValue conforming to the +// "telemetry.auto.version" semantic conventions. It represents the version +// string of the auto instrumentation agent, if used. +func TelemetryAutoVersion(val string) attribute.KeyValue { + return TelemetryAutoVersionKey.String(val) +} + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") + + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") +) + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OTelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OTelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // OTelLibraryNameKey is the attribute Key conforming to the + // "otel.library.name" semantic conventions. It represents the deprecated, + // use the `otel.scope.name` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelLibraryNameKey = attribute.Key("otel.library.name") + + // OTelLibraryVersionKey is the attribute Key conforming to the + // "otel.library.version" semantic conventions. It represents the + // deprecated, use the `otel.scope.version` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '1.0.0' + OTelLibraryVersionKey = attribute.Key("otel.library.version") +) + +// OTelLibraryName returns an attribute KeyValue conforming to the +// "otel.library.name" semantic conventions. It represents the deprecated, use +// the `otel.scope.name` attribute. +func OTelLibraryName(val string) attribute.KeyValue { + return OTelLibraryNameKey.String(val) +} + +// OTelLibraryVersion returns an attribute KeyValue conforming to the +// "otel.library.version" semantic conventions. It represents the deprecated, +// use the `otel.scope.version` attribute. +func OTelLibraryVersion(val string) attribute.KeyValue { + return OTelLibraryVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go new file mode 100644 index 000000000..95d0210e3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.20.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go new file mode 100644 index 000000000..90b1b0452 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go @@ -0,0 +1,2599 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +import "go.opentelemetry.io/otel/attribute" + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") +) + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// The attributes described in this section are rather generic. They may be +// used in any Log Record they apply to. +const ( + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log + // Record. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an [Universally Unique Lexicographically Sortable + // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers + // (e.g. UUID) may be used as needed. + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogRecordUID returns an attribute KeyValue conforming to the +// "log.record.uid" semantic conventions. It represents a unique identifier for +// the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `cloud.resource_id` if an alias is + // involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for CloudEvents. CloudEvents is a specification on how to define +// event data in a standard way. These attributes can be attached to spans when +// performing operations with CloudEvents, regardless of the protocol being +// used. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// Semantic conventions for the OpenTracing Shim +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span does not depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The attributes used to perform database client calls. +const ( + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents an identifier for the database management + // system (DBMS) product being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + DBSystemKey = attribute.Key("db.system") + + // DBConnectionStringKey is the attribute Key conforming to the + // "db.connection_string" semantic conventions. It represents the + // connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + + // DBUserKey is the attribute Key conforming to the "db.user" semantic + // conventions. It represents the username for accessing the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") + + // DBJDBCDriverClassnameKey is the attribute Key conforming to the + // "db.jdbc.driver_classname" semantic conventions. It represents the + // fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) + // driver used to connect. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + + // DBNameKey is the attribute Key conforming to the "db.name" semantic + // conventions. It represents the this attribute is used to report the name + // of the database being accessed. For commands that switch the database, + // this should be set to the target database (even if the command fails). + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable.) + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema + // name), the database name to be used is the more specific layer (e.g. + // Oracle schema name). + DBNameKey = attribute.Key("db.name") + + // DBStatementKey is the attribute Key conforming to the "db.statement" + // semantic conventions. It represents the database statement being + // executed. + // + // Type: string + // RequirementLevel: Recommended (Should be collected by default only if + // there is sanitization that excludes sensitive information.) + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + DBStatementKey = attribute.Key("db.statement") + + // DBOperationKey is the attribute Key conforming to the "db.operation" + // semantic conventions. It represents the name of the operation being + // executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If `db.statement` is not + // applicable.) + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to + // attempt any client-side parsing of `db.statement` just to get this + // property, but it should be set if the operation name is provided by the + // library being instrumented. If the SQL statement has an ambiguous + // operation, or performs more than one operation, this value may be + // omitted. + DBOperationKey = attribute.Key("db.operation") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // Microsoft SQL Server Compact + DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") + // Cloud Spanner + DBSystemSpanner = DBSystemKey.String("spanner") + // Trino + DBSystemTrino = DBSystemKey.String("trino") +) + +// DBConnectionString returns an attribute KeyValue conforming to the +// "db.connection_string" semantic conventions. It represents the connection +// string used to connect to the database. It is recommended to remove embedded +// credentials. +func DBConnectionString(val string) attribute.KeyValue { + return DBConnectionStringKey.String(val) +} + +// DBUser returns an attribute KeyValue conforming to the "db.user" semantic +// conventions. It represents the username for accessing the database. +func DBUser(val string) attribute.KeyValue { + return DBUserKey.String(val) +} + +// DBJDBCDriverClassname returns an attribute KeyValue conforming to the +// "db.jdbc.driver_classname" semantic conventions. It represents the +// fully-qualified class name of the [Java Database Connectivity +// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver +// used to connect. +func DBJDBCDriverClassname(val string) attribute.KeyValue { + return DBJDBCDriverClassnameKey.String(val) +} + +// DBName returns an attribute KeyValue conforming to the "db.name" semantic +// conventions. It represents the this attribute is used to report the name of +// the database being accessed. For commands that switch the database, this +// should be set to the target database (even if the command fails). +func DBName(val string) attribute.KeyValue { + return DBNameKey.String(val) +} + +// DBStatement returns an attribute KeyValue conforming to the +// "db.statement" semantic conventions. It represents the database statement +// being executed. +func DBStatement(val string) attribute.KeyValue { + return DBStatementKey.String(val) +} + +// DBOperation returns an attribute KeyValue conforming to the +// "db.operation" semantic conventions. It represents the name of the operation +// being executed, e.g. the [MongoDB command +// name](https://docs.mongodb.com/manual/reference/command/#database-operations) +// such as `findAndModify`, or the SQL keyword. +func DBOperation(val string) attribute.KeyValue { + return DBOperationKey.String(val) +} + +// Connection-level attributes for Microsoft SQL Server +const ( + // DBMSSQLInstanceNameKey is the attribute Key conforming to the + // "db.mssql.instance_name" semantic conventions. It represents the + // Microsoft SQL Server [instance + // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named + // instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no + // longer required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") +) + +// DBMSSQLInstanceName returns an attribute KeyValue conforming to the +// "db.mssql.instance_name" semantic conventions. It represents the Microsoft +// SQL Server [instance +// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) +// connecting to. This name is used to determine the port of a named instance. +func DBMSSQLInstanceName(val string) attribute.KeyValue { + return DBMSSQLInstanceNameKey.String(val) +} + +// Call-level attributes for Cassandra +const ( + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraTableKey is the attribute Key conforming to the + // "db.cassandra.table" semantic conventions. It represents the name of the + // primary table that the operation is acting upon, including the keyspace + // name (if applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra + // rather than sql. It is not recommended to attempt any client-side + // parsing of `db.statement` just to get this property, but it should be + // set if it is provided by the library being instrumented. If the + // operation is acting upon an anonymous table, or more than one table, + // this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraTable returns an attribute KeyValue conforming to the +// "db.cassandra.table" semantic conventions. It represents the name of the +// primary table that the operation is acting upon, including the keyspace name +// (if applicable). +func DBCassandraTable(val string) attribute.KeyValue { + return DBCassandraTableKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// Call-level attributes for Redis +const ( + // DBRedisDBIndexKey is the attribute Key conforming to the + // "db.redis.database_index" semantic conventions. It represents the index + // of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To + // be used instead of the generic `db.name` attribute. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If other than the default + // database (`0`).) + // Stability: stable + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") +) + +// DBRedisDBIndex returns an attribute KeyValue conforming to the +// "db.redis.database_index" semantic conventions. It represents the index of +// the database being accessed as used in the [`SELECT` +// command](https://redis.io/commands/select), provided as an integer. To be +// used instead of the generic `db.name` attribute. +func DBRedisDBIndex(val int) attribute.KeyValue { + return DBRedisDBIndexKey.Int(val) +} + +// Call-level attributes for MongoDB +const ( + // DBMongoDBCollectionKey is the attribute Key conforming to the + // "db.mongodb.collection" semantic conventions. It represents the + // collection being accessed within the database stated in `db.name`. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") +) + +// DBMongoDBCollection returns an attribute KeyValue conforming to the +// "db.mongodb.collection" semantic conventions. It represents the collection +// being accessed within the database stated in `db.name`. +func DBMongoDBCollection(val string) attribute.KeyValue { + return DBMongoDBCollectionKey.String(val) +} + +// Call-level attributes for SQL databases +const ( + // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" + // semantic conventions. It represents the name of the primary table that + // the operation is acting upon, including the database name (if + // applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting + // upon an anonymous table, or more than one table, this value MUST NOT be + // set. + DBSQLTableKey = attribute.Key("db.sql.table") +) + +// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" +// semantic conventions. It represents the name of the primary table that the +// operation is acting upon, including the database name (if applicable). +func DBSQLTable(val string) attribute.KeyValue { + return DBSQLTableKey.String(val) +} + +// Call-level attributes for Cosmos DB. +const ( + // DBCosmosDBClientIDKey is the attribute Key conforming to the + // "db.cosmosdb.client_id" semantic conventions. It represents the unique + // Cosmos client instance id. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + + // DBCosmosDBOperationTypeKey is the attribute Key conforming to the + // "db.cosmosdb.operation_type" semantic conventions. It represents the + // cosmosDB Operation Type. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (when performing one of the + // operations in this list) + // Stability: stable + DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") + + // DBCosmosDBConnectionModeKey is the attribute Key conforming to the + // "db.cosmosdb.connection_mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (if not `direct` (or pick gw as + // default)) + // Stability: stable + DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") + + // DBCosmosDBContainerKey is the attribute Key conforming to the + // "db.cosmosdb.container" semantic conventions. It represents the cosmos + // DB container name. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if available) + // Stability: stable + // Examples: 'anystring' + DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") + + // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the + // "db.cosmosdb.request_content_length" semantic conventions. It represents + // the request payload size in bytes + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") + + // DBCosmosDBStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos + // DB status code. + // + // Type: int + // RequirementLevel: ConditionallyRequired (if response was received) + // Stability: stable + // Examples: 200, 201 + DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") + + // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.sub_status_code" semantic conventions. It represents the + // cosmos DB sub status code. + // + // Type: int + // RequirementLevel: ConditionallyRequired (when response was received and + // contained sub-code.) + // Stability: stable + // Examples: 1000, 1002 + DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") + + // DBCosmosDBRequestChargeKey is the attribute Key conforming to the + // "db.cosmosdb.request_charge" semantic conventions. It represents the rU + // consumed for that operation + // + // Type: double + // RequirementLevel: ConditionallyRequired (when available) + // Stability: stable + // Examples: 46.18, 1.0 + DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") +) + +var ( + // invalid + DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") + // create + DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") + // patch + DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") + // read + DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") + // read_feed + DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") + // delete + DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") + // replace + DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") + // execute + DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") + // query + DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") + // head + DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") + // head_feed + DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") + // upsert + DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") + // batch + DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") + // query_plan + DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") + // execute_javascript + DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") +) + +var ( + // Gateway (HTTP) connections mode + DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") + // Direct connection + DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") +) + +// DBCosmosDBClientID returns an attribute KeyValue conforming to the +// "db.cosmosdb.client_id" semantic conventions. It represents the unique +// Cosmos client instance id. +func DBCosmosDBClientID(val string) attribute.KeyValue { + return DBCosmosDBClientIDKey.String(val) +} + +// DBCosmosDBContainer returns an attribute KeyValue conforming to the +// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB +// container name. +func DBCosmosDBContainer(val string) attribute.KeyValue { + return DBCosmosDBContainerKey.String(val) +} + +// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming +// to the "db.cosmosdb.request_content_length" semantic conventions. It +// represents the request payload size in bytes +func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { + return DBCosmosDBRequestContentLengthKey.Int(val) +} + +// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB +// status code. +func DBCosmosDBStatusCode(val int) attribute.KeyValue { + return DBCosmosDBStatusCodeKey.Int(val) +} + +// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos +// DB sub status code. +func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { + return DBCosmosDBSubStatusCodeKey.Int(val) +} + +// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the +// "db.cosmosdb.request_charge" semantic conventions. It represents the rU +// consumed for that operation +func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { + return DBCosmosDBRequestChargeKey.Float64(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // OTelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// This semantic convention describes an instance of a function that runs +// without provisioning or managing of servers (also known as serverless +// functions or Function as a Service (FaaS)) with spans. +const ( + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function invocation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: For the server/consumer span on the incoming side, + // `faas.trigger` MUST be set. + // + // Clients invoking FaaS instances usually cannot set `faas.trigger`, + // since they would typically need to look in the payload to determine + // the event type. If clients set it, it should be the same as the + // trigger that corresponding incoming would have (i.e., this has + // nothing to do with the underlying transport used to make the API + // call to invoke the lambda, which is often HTTP). + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation + // ID of the current function invocation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID +// of the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// Semantic Convention for FaaS triggered as a response to some data source +// operation such as a database or filesystem read/write. +const ( + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") +) + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// Contains additional attributes for incoming FaaS spans. +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// Contains additional attributes for outgoing FaaS spans. +const ( + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: ConditionallyRequired (For some cloud providers, like + // AWS or GCP, the region in which a function is hosted is essential to + // uniquely identify the function and also part of its endpoint. Since it's + // part of the endpoint being called, the region is always known to + // clients. In these cases, `faas.invoked_region` MUST be set accordingly. + // If the region is unknown to the client or not required for identifying + // the invoked function, setting `faas.invoked_region` is optional.) + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](../../resource/semantic_conventions/README.md#service) + // of the remote service. SHOULD be equal to the actual `service.name` + // resource attribute of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](../../resource/semantic_conventions/README.md#service) of +// the remote service. SHOULD be equal to the actual `service.name` resource +// attribute of the remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// These attributes may be used for any operation with an authenticated and/or +// authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") +) + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// Semantic Convention for HTTP Client +const ( + // HTTPURLKey is the attribute Key conforming to the "http.url" semantic + // conventions. It represents the full HTTP request URL in the form + // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is + // not transmitted over HTTP, but if it is known, it should be included + // nevertheless. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Note: `http.url` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case the + // attribute's value should be `https://www.example.com/`. + HTTPURLKey = attribute.Key("http.url") + + // HTTPResendCountKey is the attribute Key conforming to the + // "http.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Recommended (if and only if request was retried.) + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPResendCountKey = attribute.Key("http.resend_count") +) + +// HTTPURL returns an attribute KeyValue conforming to the "http.url" +// semantic conventions. It represents the full HTTP request URL in the form +// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not +// transmitted over HTTP, but if it is known, it should be included +// nevertheless. +func HTTPURL(val string) attribute.KeyValue { + return HTTPURLKey.String(val) +} + +// HTTPResendCount returns an attribute KeyValue conforming to the +// "http.resend_count" semantic conventions. It represents the ordinal number +// of request resending attempt (for any reason, including redirects). +func HTTPResendCount(val int) attribute.KeyValue { + return HTTPResendCountKey.Int(val) +} + +// Semantic Convention for HTTP Server +const ( + // HTTPTargetKey is the attribute Key conforming to the "http.target" + // semantic conventions. It represents the full request target as passed in + // a HTTP request line or equivalent. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '/users/12314/?q=ddds' + HTTPTargetKey = attribute.Key("http.target") + + // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip" + // semantic conventions. It represents the IP address of the original + // client behind all proxies, if known (e.g. from + // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '83.164.160.102' + // Note: This is not necessarily the same as `net.sock.peer.addr`, which + // would + // identify the network-level peer, which may be a proxy. + // + // This attribute should be set when a source of information different + // from the one used for `net.sock.peer.addr`, is available even if that + // other + // source just confirms the same value as `net.sock.peer.addr`. + // Rationale: For `net.sock.peer.addr`, one typically does not know if it + // comes from a proxy, reverse proxy, or the actual client. Setting + // `http.client_ip` when it's the same as `net.sock.peer.addr` means that + // one is at least somewhat confident that the address is not that of + // the closest proxy. + HTTPClientIPKey = attribute.Key("http.client_ip") +) + +// HTTPTarget returns an attribute KeyValue conforming to the "http.target" +// semantic conventions. It represents the full request target as passed in a +// HTTP request line or equivalent. +func HTTPTarget(val string) attribute.KeyValue { + return HTTPTargetKey.String(val) +} + +// HTTPClientIP returns an attribute KeyValue conforming to the +// "http.client_ip" semantic conventions. It represents the IP address of the +// original client behind all proxies, if known (e.g. from +// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). +func HTTPClientIP(val string) attribute.KeyValue { + return HTTPClientIPKey.String(val) +} + +// The `aws` conventions apply to operations using the AWS SDK. They map +// request or response parameters in AWS SDK API calls to attributes on a Span. +// The conventions have been collected over time based on feedback from AWS +// users of tracing and will continue to evolve as new interesting conventions +// are found. +// Some descriptions are also provided for populating general OpenTelemetry +// semantic conventions based on these APIs. +const ( + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in + // the response headers `x-amz-request-id` or `x-amz-requestid`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AWSRequestIDKey = attribute.Key("aws.request_id") +) + +// AWSRequestID returns an attribute KeyValue conforming to the +// "aws.request_id" semantic conventions. It represents the AWS request ID as +// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// Attributes that exist for multiple DynamoDB request types. +const ( + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") +) + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// DynamoDB.CreateTable +const ( + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// DynamoDB.ListTables +const ( + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the the +// number of items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// DynamoDB.Query +const ( + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// DynamoDB.Scan +const ( + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") +) + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// DynamoDB.UpdateTable +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // the `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// Attributes that exist for S3 request types. +const ( + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request + // refers to. Corresponds to the `--bucket` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'some-bucket-name' + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'someFile.yml' + // Note: The `key` attribute is applicable to all object-related S3 + // operations, i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // - + // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + // - + // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + // - + // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + // - + // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + // - + // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source + // object (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'someFile.yml' + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3UploadIDKey is the attribute Key conforming to the + // "aws.s3.upload_id" semantic conventions. It represents the upload ID + // that identifies the multipart upload. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The `upload_id` attribute applies to S3 multipart-upload + // operations and corresponds to the `--upload-id` parameter + // of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // multipart operations. + // This applies in particular to the following operations: + // + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' + // Note: The `delete` attribute is only applicable to the + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number + // of the part being uploaded in a multipart-upload operation. This is a + // positive integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // and + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + // operations. + // The `part_number` attribute corresponds to the `--part-number` parameter + // of the + // [upload-part operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") +) + +// AWSS3Bucket returns an attribute KeyValue conforming to the +// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the +// request refers to. Corresponds to the `--bucket` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" +// semantic conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object +// (in the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the +// "aws.s3.delete" semantic conventions. It represents the delete request +// container that specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") + + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// General attributes used in messaging systems. +const ( + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents a string + // identifying the messaging system. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + MessagingSystemKey = attribute.Key("messaging.system") + + // MessagingOperationKey is the attribute Key conforming to the + // "messaging.operation" semantic conventions. It represents a string + // identifying the kind of messaging operation as defined in the [Operation + // names](#operation-names) section above. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationKey = attribute.Key("messaging.operation") + + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the span describes an + // operation on a batch of messages.) + // Stability: stable + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") +) + +var ( + // publish + MessagingOperationPublish = MessagingOperationKey.String("publish") + // receive + MessagingOperationReceive = MessagingOperationKey.String("receive") + // process + MessagingOperationProcess = MessagingOperationKey.String("process") +) + +// MessagingSystem returns an attribute KeyValue conforming to the +// "messaging.system" semantic conventions. It represents a string identifying +// the messaging system. +func MessagingSystem(val string) attribute.KeyValue { + return MessagingSystemKey.String(val) +} + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// Semantic convention for a consumer of messages received from a messaging +// system +const ( + // MessagingConsumerIDKey is the attribute Key conforming to the + // "messaging.consumer.id" semantic conventions. It represents the + // identifier for the consumer receiving a message. For Kafka, set it to + // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if + // both are present, or only `messaging.kafka.consumer.group`. For brokers, + // such as RabbitMQ and Artemis, set it to the `client_id` of the client + // consuming the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mygroup - client-6' + MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") +) + +// MessagingConsumerID returns an attribute KeyValue conforming to the +// "messaging.consumer.id" semantic conventions. It represents the identifier +// for the consumer receiving a message. For Kafka, set it to +// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both +// are present, or only `messaging.kafka.consumer.group`. For brokers, such as +// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the +// message. +func MessagingConsumerID(val string) attribute.KeyValue { + return MessagingConsumerIDKey.String(val) +} + +// Semantic conventions for remote procedure calls. +const ( + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCSystemKey = attribute.Key("rpc.system") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// Tech-specific attributes for gRPC. +const ( + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // does not specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If other than the default + // version (`1.0`)) + // Stability: stable + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If response is not successful.) + // Stability: stable + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") +) + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// does not specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} + +// Tech-specific attributes for Connect RPC. +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes](https://connect.build/docs/protocol/#error-codes) of the + // Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (If response is not successful + // and if error code available.) + // Stability: stable + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") +) + +var ( + // cancelled + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md new file mode 100644 index 000000000..2de1fc3c6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.26.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.26.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.26.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go new file mode 100644 index 000000000..d8dc822b2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go @@ -0,0 +1,8996 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +import "go.opentelemetry.io/otel/attribute" + +// The Android platform on which the Android application is running. +const ( + // AndroidOSAPILevelKey is the attribute Key conforming to the + // "android.os.api_level" semantic conventions. It represents the uniquely + // identifies the framework API revision offered by a version + // (`os.version`) of the android operating system. More information can be + // found + // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '33', '32' + AndroidOSAPILevelKey = attribute.Key("android.os.api_level") +) + +// AndroidOSAPILevel returns an attribute KeyValue conforming to the +// "android.os.api_level" semantic conventions. It represents the uniquely +// identifies the framework API revision offered by a version (`os.version`) of +// the android operating system. More information can be found +// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). +func AndroidOSAPILevel(val string) attribute.KeyValue { + return AndroidOSAPILevelKey.String(val) +} + +// ASP.NET Core attributes +const ( + // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.result" semantic conventions. It represents + // the rate-limiting result, shows whether the lease was acquired or + // contains a rejection reason + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Examples: 'acquired', 'request_canceled' + AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") + + // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to + // the "aspnetcore.diagnostics.handler.type" semantic conventions. It + // represents the full type name of the + // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) + // implementation that handled the exception. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if and only if the exception + // was handled by this handler.) + // Stability: stable + // Examples: 'Contoso.MyHandler' + AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") + + // AspnetcoreDiagnosticsExceptionResultKey is the attribute Key conforming + // to the "aspnetcore.diagnostics.exception.result" semantic conventions. + // It represents the aSP.NET Core exception middleware handling result + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'handled', 'unhandled' + AspnetcoreDiagnosticsExceptionResultKey = attribute.Key("aspnetcore.diagnostics.exception.result") + + // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.policy" semantic conventions. It represents + // the rate limiting policy name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fixed', 'sliding', 'token' + AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") + + // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the + // "aspnetcore.request.is_unhandled" semantic conventions. It represents + // the flag indicating if request was handled by the application pipeline. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Examples: True + AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") + + // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the + // "aspnetcore.routing.is_fallback" semantic conventions. It represents a + // value that indicates whether the matched route is a fallback route. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Examples: True + AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") + + // AspnetcoreRoutingMatchStatusKey is the attribute Key conforming to the + // "aspnetcore.routing.match_status" semantic conventions. It represents + // the match result - success or failure + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'success', 'failure' + AspnetcoreRoutingMatchStatusKey = attribute.Key("aspnetcore.routing.match_status") +) + +var ( + // Lease was acquired + AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired") + // Lease request was rejected by the endpoint limiter + AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter") + // Lease request was rejected by the global limiter + AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter") + // Lease request was canceled + AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") +) + +var ( + // Exception was handled by the exception handling middleware + AspnetcoreDiagnosticsExceptionResultHandled = AspnetcoreDiagnosticsExceptionResultKey.String("handled") + // Exception was not handled by the exception handling middleware + AspnetcoreDiagnosticsExceptionResultUnhandled = AspnetcoreDiagnosticsExceptionResultKey.String("unhandled") + // Exception handling was skipped because the response had started + AspnetcoreDiagnosticsExceptionResultSkipped = AspnetcoreDiagnosticsExceptionResultKey.String("skipped") + // Exception handling didn't run because the request was aborted + AspnetcoreDiagnosticsExceptionResultAborted = AspnetcoreDiagnosticsExceptionResultKey.String("aborted") +) + +var ( + // Match succeeded + AspnetcoreRoutingMatchStatusSuccess = AspnetcoreRoutingMatchStatusKey.String("success") + // Match failed + AspnetcoreRoutingMatchStatusFailure = AspnetcoreRoutingMatchStatusKey.String("failure") +) + +// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming +// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It +// represents the full type name of the +// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) +// implementation that handled the exception. +func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue { + return AspnetcoreDiagnosticsHandlerTypeKey.String(val) +} + +// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to +// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents +// the rate limiting policy name. +func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue { + return AspnetcoreRateLimitingPolicyKey.String(val) +} + +// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to +// the "aspnetcore.request.is_unhandled" semantic conventions. It represents +// the flag indicating if request was handled by the application pipeline. +func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue { + return AspnetcoreRequestIsUnhandledKey.Bool(val) +} + +// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to +// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a +// value that indicates whether the matched route is a fallback route. +func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { + return AspnetcoreRoutingIsFallbackKey.Bool(val) +} + +// Generic attributes for AWS services. +const ( + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in + // the response headers `x-amz-request-id` or `x-amz-requestid`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AWSRequestIDKey = attribute.Key("aws.request_id") +) + +// AWSRequestID returns an attribute KeyValue conforming to the +// "aws.request_id" semantic conventions. It represents the AWS request ID as +// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// Attributes for AWS DynamoDB. +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") + + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") + + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") + + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the number of +// items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// Attributes for AWS Elastic Container Service (ECS). +const ( + // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" + // semantic conventions. It represents the ID of a running ECS task. The ID + // MUST be extracted from `task.arn`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if `task.arn` is + // populated.) + // Stability: experimental + // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b', + // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of a + // running [ECS + // task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b', + // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the family + // name of the [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) + // used to create the ECS task. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for the task definition used to create the ECS task. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSTaskID returns an attribute KeyValue conforming to the +// "aws.ecs.task.id" semantic conventions. It represents the ID of a running +// ECS task. The ID MUST be extracted from `task.arn`. +func AWSECSTaskID(val string) attribute.KeyValue { + return AWSECSTaskIDKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running +// [ECS +// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the family name of +// the [ECS task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) +// used to create the ECS task. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// the task definition used to create the ECS task. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Attributes for AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Attributes for AWS Logs. +const ( + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") +) + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// Attributes for AWS Lambda. +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `cloud.resource_id` if an alias is + // involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for AWS S3. +const ( + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request + // refers to. Corresponds to the `--bucket` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'some-bucket-name' + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source + // object (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' + // Note: The `delete` attribute is only applicable to the + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `key` attribute is applicable to all object-related S3 + // operations, i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // - + // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + // - + // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + // - + // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + // - + // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + // - + // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number + // of the part being uploaded in a multipart-upload operation. This is a + // positive integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // and + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + // operations. + // The `part_number` attribute corresponds to the `--part-number` parameter + // of the + // [upload-part operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") + + // AWSS3UploadIDKey is the attribute Key conforming to the + // "aws.s3.upload_id" semantic conventions. It represents the upload ID + // that identifies the multipart upload. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The `upload_id` attribute applies to S3 multipart-upload + // operations and corresponds to the `--upload-id` parameter + // of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // multipart operations. + // This applies in particular to the following operations: + // + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") +) + +// AWSS3Bucket returns an attribute KeyValue conforming to the +// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the +// request refers to. Corresponds to the `--bucket` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object +// (in the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the +// "aws.s3.delete" semantic conventions. It represents the delete request +// container that specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" +// semantic conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// The web browser attributes +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// These attributes may be used to describe the client in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.address` SHOULD represent the client address + // behind any intermediaries, for example proxies, if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" + // semantic conventions. It represents the client port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.port` SHOULD represent the client port behind + // any intermediaries, for example proxies, if it's available. + ClientPortKey = attribute.Key("client.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the +// "client.address" semantic conventions. It represents the client address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number. +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS). +const ( + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") + + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://www.tencentcloud.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the + // "cloud.resource_id" semantic conventions. It represents the cloud + // provider-specific native identifier of the monitored cloud resource + // (e.g. an + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // on AWS, a [fully qualified resource + // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) + // on Azure, a [full resource + // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) + // on GCP) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', + // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', + // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud + // provider. + // The following well-known definitions MUST be used if you set this + // attribute and they apply: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + CloudResourceIDKey = attribute.Key("cloud.resource_id") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Apps + CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Bare Metal Solution (BMS) + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) +// on AWS, a [fully qualified resource +// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on +// Azure, a [full resource +// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) +// on GCP) +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// Attributes for CloudEvents. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeStacktraceKey is the attribute Key conforming to the + // "code.stacktrace" semantic conventions. It represents a stacktrace as a + // string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'at + // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + CodeStacktraceKey = attribute.Key("code.stacktrace") +) + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeStacktrace returns an attribute KeyValue conforming to the +// "code.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func CodeStacktrace(val string) attribute.KeyValue { + return CodeStacktraceKey.String(val) +} + +// A container instance. +const ( + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used + // to run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol' + // Note: If using embedded credentials or sensitive data, it is recommended + // to remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. [2] + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol, --config, config.yaml' + ContainerCommandArgsKey = attribute.Key("container.command_args") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full + // command run by the container as a single string representing the full + // command. [2] + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol --config config.yaml' + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerCPUStateKey is the attribute Key conforming to the + // "container.cpu.state" semantic conventions. It represents the CPU state + // for this data point. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'user', 'kernel' + ContainerCPUStateKey = attribute.Key("container.cpu.state") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime + // specific image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect + // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) + // endpoint. + // K8S defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io + // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. + // The ID is assigned by the container runtime and can vary in different + // environments. Consider using `oci.manifest.digest` if it is important to + // identify the same image in different environments/runtimes. + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageRepoDigestsKey is the attribute Key conforming to the + // "container.image.repo_digests" semantic conventions. It represents the + // repo digests of the container image as provided by the container + // runtime. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', + // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' + // Note: + // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) + // and + // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) + // report those under the `RepoDigests` field. + ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") + + // ContainerImageTagsKey is the attribute Key conforming to the + // "container.image.tags" semantic conventions. It represents the container + // image tags. An example can be found in [Docker Image + // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). + // Should be only the `` section of the full name for example from + // `registry.example.com/my-org/my-image:`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'v1.27.1', '3.5.7-0' + ContainerImageTagsKey = attribute.Key("container.image.tags") + + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") +) + +var ( + // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows) + ContainerCPUStateUser = ContainerCPUStateKey.String("user") + // When CPU is used by the system (host OS) + ContainerCPUStateSystem = ContainerCPUStateKey.String("system") + // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows) + ContainerCPUStateKernel = ContainerCPUStateKey.String("kernel") +) + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. [2] +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full +// command run by the container as a single string representing the full +// command. [2] +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime +// specific image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageRepoDigests returns an attribute KeyValue conforming to the +// "container.image.repo_digests" semantic conventions. It represents the repo +// digests of the container image as provided by the container runtime. +func ContainerImageRepoDigests(val ...string) attribute.KeyValue { + return ContainerImageRepoDigestsKey.StringSlice(val) +} + +// ContainerImageTags returns an attribute KeyValue conforming to the +// "container.image.tags" semantic conventions. It represents the container +// image tags. An example can be found in [Docker Image +// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). +// Should be only the `` section of the full name for example from +// `registry.example.com/my-org/my-image:`. +func ContainerImageTags(val ...string) attribute.KeyValue { + return ContainerImageTagsKey.StringSlice(val) +} + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// This group defines the attributes used to describe telemetry in the context +// of databases. +const ( + // DBClientConnectionsPoolNameKey is the attribute Key conforming to the + // "db.client.connections.pool.name" semantic conventions. It represents + // the name of the connection pool; unique within the instrumented + // application. In case the connection pool implementation doesn't provide + // a name, instrumentation should use a combination of `server.address` and + // `server.port` attributes formatted as `server.address:server.port`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myDataSource' + DBClientConnectionsPoolNameKey = attribute.Key("db.client.connections.pool.name") + + // DBClientConnectionsStateKey is the attribute Key conforming to the + // "db.client.connections.state" semantic conventions. It represents the + // state of a connection in the pool + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'idle' + DBClientConnectionsStateKey = attribute.Key("db.client.connections.state") + + // DBCollectionNameKey is the attribute Key conforming to the + // "db.collection.name" semantic conventions. It represents the name of a + // collection (table, container) within the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'public.users', 'customers' + // Note: If the collection name is parsed from the query, it SHOULD match + // the value provided in the query and may be qualified with the schema and + // database name. + // It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + DBCollectionNameKey = attribute.Key("db.collection.name") + + // DBNamespaceKey is the attribute Key conforming to the "db.namespace" + // semantic conventions. It represents the name of the database, fully + // qualified within the server address and port. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'customers', 'test.users' + // Note: If a database system has multiple namespace components, they + // SHOULD be concatenated (potentially using database system specific + // conventions) from most general to most specific namespace component, and + // more specific namespaces SHOULD NOT be captured without the more general + // namespaces, to ensure that "startswith" queries for the more general + // namespaces will be valid. + // Semantic conventions for individual database systems SHOULD document + // what `db.namespace` means in the context of that system. + // It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + DBNamespaceKey = attribute.Key("db.namespace") + + // DBOperationNameKey is the attribute Key conforming to the + // "db.operation.name" semantic conventions. It represents the name of the + // operation or command being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: It is RECOMMENDED to capture the value as provided by the + // application without attempting to do any case normalization. + DBOperationNameKey = attribute.Key("db.operation.name") + + // DBQueryTextKey is the attribute Key conforming to the "db.query.text" + // semantic conventions. It represents the database query being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey + // "WuValue"' + DBQueryTextKey = attribute.Key("db.query.text") + + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents the database management system (DBMS) product + // as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The actual DBMS may differ from the one identified by the client. + // For example, when using PostgreSQL client libraries to connect to a + // CockroachDB, the `db.system` is set to `postgresql` based on the + // instrumentation's best knowledge. + DBSystemKey = attribute.Key("db.system") +) + +var ( + // idle + DBClientConnectionsStateIdle = DBClientConnectionsStateKey.String("idle") + // used + DBClientConnectionsStateUsed = DBClientConnectionsStateKey.String("used") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // Microsoft SQL Server Compact + DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") + // Cloud Spanner + DBSystemSpanner = DBSystemKey.String("spanner") + // Trino + DBSystemTrino = DBSystemKey.String("trino") +) + +// DBClientConnectionsPoolName returns an attribute KeyValue conforming to +// the "db.client.connections.pool.name" semantic conventions. It represents +// the name of the connection pool; unique within the instrumented application. +// In case the connection pool implementation doesn't provide a name, +// instrumentation should use a combination of `server.address` and +// `server.port` attributes formatted as `server.address:server.port`. +func DBClientConnectionsPoolName(val string) attribute.KeyValue { + return DBClientConnectionsPoolNameKey.String(val) +} + +// DBCollectionName returns an attribute KeyValue conforming to the +// "db.collection.name" semantic conventions. It represents the name of a +// collection (table, container) within the database. +func DBCollectionName(val string) attribute.KeyValue { + return DBCollectionNameKey.String(val) +} + +// DBNamespace returns an attribute KeyValue conforming to the +// "db.namespace" semantic conventions. It represents the name of the database, +// fully qualified within the server address and port. +func DBNamespace(val string) attribute.KeyValue { + return DBNamespaceKey.String(val) +} + +// DBOperationName returns an attribute KeyValue conforming to the +// "db.operation.name" semantic conventions. It represents the name of the +// operation or command being executed. +func DBOperationName(val string) attribute.KeyValue { + return DBOperationNameKey.String(val) +} + +// DBQueryText returns an attribute KeyValue conforming to the +// "db.query.text" semantic conventions. It represents the database query being +// executed. +func DBQueryText(val string) attribute.KeyValue { + return DBQueryTextKey.String(val) +} + +// This group defines attributes for Cassandra. +const ( + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// This group defines attributes for Azure Cosmos DB. +const ( + // DBCosmosDBClientIDKey is the attribute Key conforming to the + // "db.cosmosdb.client_id" semantic conventions. It represents the unique + // Cosmos client instance id. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + + // DBCosmosDBConnectionModeKey is the attribute Key conforming to the + // "db.cosmosdb.connection_mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") + + // DBCosmosDBOperationTypeKey is the attribute Key conforming to the + // "db.cosmosdb.operation_type" semantic conventions. It represents the + // cosmosDB Operation Type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") + + // DBCosmosDBRequestChargeKey is the attribute Key conforming to the + // "db.cosmosdb.request_charge" semantic conventions. It represents the rU + // consumed for that operation + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 46.18, 1.0 + DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") + + // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the + // "db.cosmosdb.request_content_length" semantic conventions. It represents + // the request payload size in bytes + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") + + // DBCosmosDBStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos + // DB status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 200, 201 + DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") + + // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.sub_status_code" semantic conventions. It represents the + // cosmos DB sub status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000, 1002 + DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") +) + +var ( + // Gateway (HTTP) connections mode + DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") + // Direct connection + DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") +) + +var ( + // invalid + DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") + // create + DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") + // patch + DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") + // read + DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") + // read_feed + DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") + // delete + DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") + // replace + DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") + // execute + DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") + // query + DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") + // head + DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") + // head_feed + DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") + // upsert + DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") + // batch + DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") + // query_plan + DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") + // execute_javascript + DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") +) + +// DBCosmosDBClientID returns an attribute KeyValue conforming to the +// "db.cosmosdb.client_id" semantic conventions. It represents the unique +// Cosmos client instance id. +func DBCosmosDBClientID(val string) attribute.KeyValue { + return DBCosmosDBClientIDKey.String(val) +} + +// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the +// "db.cosmosdb.request_charge" semantic conventions. It represents the rU +// consumed for that operation +func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { + return DBCosmosDBRequestChargeKey.Float64(val) +} + +// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming +// to the "db.cosmosdb.request_content_length" semantic conventions. It +// represents the request payload size in bytes +func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { + return DBCosmosDBRequestContentLengthKey.Int(val) +} + +// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB +// status code. +func DBCosmosDBStatusCode(val int) attribute.KeyValue { + return DBCosmosDBStatusCodeKey.Int(val) +} + +// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos +// DB sub status code. +func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { + return DBCosmosDBSubStatusCodeKey.Int(val) +} + +// This group defines attributes for Elasticsearch. +const ( + // DBElasticsearchClusterNameKey is the attribute Key conforming to the + // "db.elasticsearch.cluster.name" semantic conventions. It represents the + // represents the identifier of an Elasticsearch cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' + DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") + + // DBElasticsearchNodeNameKey is the attribute Key conforming to the + // "db.elasticsearch.node.name" semantic conventions. It represents the + // represents the human-readable identifier of the node/instance to which a + // request was routed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-0000000001' + DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") +) + +// DBElasticsearchClusterName returns an attribute KeyValue conforming to +// the "db.elasticsearch.cluster.name" semantic conventions. It represents the +// represents the identifier of an Elasticsearch cluster. +func DBElasticsearchClusterName(val string) attribute.KeyValue { + return DBElasticsearchClusterNameKey.String(val) +} + +// DBElasticsearchNodeName returns an attribute KeyValue conforming to the +// "db.elasticsearch.node.name" semantic conventions. It represents the +// represents the human-readable identifier of the node/instance to which a +// request was routed. +func DBElasticsearchNodeName(val string) attribute.KeyValue { + return DBElasticsearchNodeNameKey.String(val) +} + +// Attributes for software deployments. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'staging', 'production' + // Note: `deployment.environment` does not affect the uniqueness + // constraints defined through + // the `service.namespace`, `service.name` and `service.instance.id` + // resource attributes. + // This implies that resources carrying the following attribute + // combinations MUST be + // considered to be identifying the same service: + // + // * `service.name=frontend`, `deployment.environment=production` + // * `service.name=frontend`, `deployment.environment=staging`. + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) +// (aka deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// Attributes that represents an occurrence of a lifecycle transition on the +// Android platform. +const ( + // AndroidStateKey is the attribute Key conforming to the "android.state" + // semantic conventions. It represents the deprecated use the + // `device.app.lifecycle` event definition including `android.state` as a + // payload field instead. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The Android lifecycle states are defined in [Activity lifecycle + // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), + // and from which the `OS identifiers` are derived. + AndroidStateKey = attribute.Key("android.state") +) + +var ( + // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time + AndroidStateCreated = AndroidStateKey.String("created") + // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state + AndroidStateBackground = AndroidStateKey.String("background") + // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states + AndroidStateForeground = AndroidStateKey.String("foreground") +) + +// These attributes may be used to describe the receiver of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the + // destination address - domain name if available without reverse DNS + // lookup; otherwise, IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the source side, and when communicating through + // an intermediary, `destination.address` SHOULD represent the destination + // address behind any intermediaries, for example proxies, if it's + // available. + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the + // "destination.port" semantic conventions. It represents the destination + // port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the destination +// address - domain name if available without reverse DNS lookup; otherwise, IP +// address or Unix domain socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the destination port +// number +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// Describes device attributes. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine-readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human-readable version of + // the device model rather than a machine-readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// These attributes may be used for any disk related operation. +const ( + // DiskIoDirectionKey is the attribute Key conforming to the + // "disk.io.direction" semantic conventions. It represents the disk IO + // operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read' + DiskIoDirectionKey = attribute.Key("disk.io.direction") +) + +var ( + // read + DiskIoDirectionRead = DiskIoDirectionKey.String("read") + // write + DiskIoDirectionWrite = DiskIoDirectionKey.String("write") +) + +// The shared attributes used to report a DNS query. +const ( + // DNSQuestionNameKey is the attribute Key conforming to the + // "dns.question.name" semantic conventions. It represents the name being + // queried. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'www.example.com', 'opentelemetry.io' + // Note: If the name field contains non-printable characters (below 32 or + // above 126), those characters should be represented as escaped base 10 + // integers (\DDD). Back slashes and quotes should be escaped. Tabs, + // carriage returns, and line feeds should be converted to \t, \r, and \n + // respectively. + DNSQuestionNameKey = attribute.Key("dns.question.name") +) + +// DNSQuestionName returns an attribute KeyValue conforming to the +// "dns.question.name" semantic conventions. It represents the name being +// queried. +func DNSQuestionName(val string) attribute.KeyValue { + return DNSQuestionNameKey.String(val) +} + +// Attributes for operations with an authenticated and/or authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// The shared attributes used to report an error. +const ( + // ErrorTypeKey is the attribute Key conforming to the "error.type" + // semantic conventions. It represents the describes a class of error the + // operation ended with. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'timeout', 'java.net.UnknownHostException', + // 'server_certificate_invalid', '500' + // Note: The `error.type` SHOULD be predictable, and SHOULD have low + // cardinality. + // + // When `error.type` is set to a type (e.g., an exception type), its + // canonical class name identifying the type within the artifact SHOULD be + // used. + // + // Instrumentations SHOULD document the list of errors they report. + // + // The cardinality of `error.type` within one instrumentation library + // SHOULD be low. + // Telemetry consumers that aggregate data from multiple instrumentation + // libraries and applications + // should be prepared for `error.type` to have high cardinality at query + // time when no + // additional filters are applied. + // + // If the operation has completed successfully, instrumentations SHOULD NOT + // set `error.type`. + // + // If a specific domain defines its own set of error identifiers (such as + // HTTP or gRPC status codes), + // it's RECOMMENDED to: + // + // * Use a domain-specific attribute + // * Set `error.type` to capture all errors, regardless of whether they are + // defined within the domain-specific set or not. + ErrorTypeKey = attribute.Key("error.type") +) + +var ( + // A fallback error value to be used when the instrumentation doesn't define a custom value + ErrorTypeOther = ErrorTypeKey.String("_OTHER") +) + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the identifies the class / type of + // event. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'browser.mouse.click', 'device.app.lifecycle' + // Note: Event names are subject to the same rules as [attribute + // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.33.0/specification/common/attribute-naming.md). + // Notably, event names are namespaced to avoid collisions and provide a + // clean separation of semantics for events in separate domains like + // browser, mobile, and kubernetes. + EventNameKey = attribute.Key("event.name") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the identifies the class / type of +// event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example for recording span + // exceptions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// FaaS attributes +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + FaaSColdstartKey = attribute.Key("faas.coldstart") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") + + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation + // ID of the current function invocation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") + + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must + // be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") + + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function invocation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run (Services):** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID +// of the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// Attributes for Feature Flags. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// Describes file attributes. +const ( + // FileDirectoryKey is the attribute Key conforming to the "file.directory" + // semantic conventions. It represents the directory where the file is + // located. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/home/user', 'C:\\Program Files\\MyApp' + FileDirectoryKey = attribute.Key("file.directory") + + // FileExtensionKey is the attribute Key conforming to the "file.extension" + // semantic conventions. It represents the file extension, excluding the + // leading dot. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: When the file name has multiple extensions (example.tar.gz), only + // the last one should be captured ("gz", not "tar.gz"). + FileExtensionKey = attribute.Key("file.extension") + + // FileNameKey is the attribute Key conforming to the "file.name" semantic + // conventions. It represents the name of the file including the extension, + // without the directory. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'example.png' + FileNameKey = attribute.Key("file.name") + + // FilePathKey is the attribute Key conforming to the "file.path" semantic + // conventions. It represents the full path to the file, including the file + // name. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/home/alice/example.png', 'C:\\Program + // Files\\MyApp\\myapp.exe' + FilePathKey = attribute.Key("file.path") + + // FileSizeKey is the attribute Key conforming to the "file.size" semantic + // conventions. It represents the file size in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + FileSizeKey = attribute.Key("file.size") +) + +// FileDirectory returns an attribute KeyValue conforming to the +// "file.directory" semantic conventions. It represents the directory where the +// file is located. It should include the drive letter, when appropriate. +func FileDirectory(val string) attribute.KeyValue { + return FileDirectoryKey.String(val) +} + +// FileExtension returns an attribute KeyValue conforming to the +// "file.extension" semantic conventions. It represents the file extension, +// excluding the leading dot. +func FileExtension(val string) attribute.KeyValue { + return FileExtensionKey.String(val) +} + +// FileName returns an attribute KeyValue conforming to the "file.name" +// semantic conventions. It represents the name of the file including the +// extension, without the directory. +func FileName(val string) attribute.KeyValue { + return FileNameKey.String(val) +} + +// FilePath returns an attribute KeyValue conforming to the "file.path" +// semantic conventions. It represents the full path to the file, including the +// file name. It should include the drive letter, when appropriate. +func FilePath(val string) attribute.KeyValue { + return FilePathKey.String(val) +} + +// FileSize returns an attribute KeyValue conforming to the "file.size" +// semantic conventions. It represents the file size in bytes. +func FileSize(val int) attribute.KeyValue { + return FileSizeKey.Int(val) +} + +// Attributes for Google Cloud Run. +const ( + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the + // name of the Cloud Run + // [execution](https://cloud.google.com/run/docs/managing/job-executions) + // being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'job-name-xxxx', 'sample-job-mdw84' + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the + // index for a task within an execution as provided by the + // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1 + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") +) + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name +// of the Cloud Run +// [execution](https://cloud.google.com/run/docs/managing/job-executions) being +// run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the +// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// Attributes for Google Compute Engine (GCE). +const ( + // GCPGceInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the + // hostname of a GCE instance. This is the full value of the default or + // [custom + // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-host1234.example.com', + // 'sample-vm.us-west1-b.c.my-project.internal' + GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") + + // GCPGceInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance + // name of a GCE instance. This is the value provided by `host.name`, the + // visible name of the instance in the Cloud Console UI, and the prefix for + // the default hostname of the instance as defined by the [default internal + // DNS + // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-1', 'my-vm-name' + GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") +) + +// GCPGceInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom +// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). +func GCPGceInstanceHostname(val string) attribute.KeyValue { + return GCPGceInstanceHostnameKey.String(val) +} + +// GCPGceInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance +// name of a GCE instance. This is the value provided by `host.name`, the +// visible name of the instance in the Cloud Console UI, and the prefix for the +// default hostname of the instance as defined by the [default internal DNS +// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). +func GCPGceInstanceName(val string) attribute.KeyValue { + return GCPGceInstanceNameKey.String(val) +} + +// The attributes used to describe telemetry in the context of LLM (Large +// Language Models) requests and responses. +const ( + // GenAiCompletionKey is the attribute Key conforming to the + // "gen_ai.completion" semantic conventions. It represents the full + // response received from the LLM. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: "[{'role': 'assistant', 'content': 'The capital of France is + // Paris.'}]" + // Note: It's RECOMMENDED to format completions as JSON string matching + // [OpenAI messages + // format](https://platform.openai.com/docs/guides/text-generation) + GenAiCompletionKey = attribute.Key("gen_ai.completion") + + // GenAiPromptKey is the attribute Key conforming to the "gen_ai.prompt" + // semantic conventions. It represents the full prompt sent to an LLM. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: "[{'role': 'user', 'content': 'What is the capital of + // France?'}]" + // Note: It's RECOMMENDED to format prompts as JSON string matching [OpenAI + // messages + // format](https://platform.openai.com/docs/guides/text-generation) + GenAiPromptKey = attribute.Key("gen_ai.prompt") + + // GenAiRequestMaxTokensKey is the attribute Key conforming to the + // "gen_ai.request.max_tokens" semantic conventions. It represents the + // maximum number of tokens the LLM generates for a request. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + GenAiRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") + + // GenAiRequestModelKey is the attribute Key conforming to the + // "gen_ai.request.model" semantic conventions. It represents the name of + // the LLM a request is being made to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gpt-4' + GenAiRequestModelKey = attribute.Key("gen_ai.request.model") + + // GenAiRequestTemperatureKey is the attribute Key conforming to the + // "gen_ai.request.temperature" semantic conventions. It represents the + // temperature setting for the LLM request. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0.0 + GenAiRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") + + // GenAiRequestTopPKey is the attribute Key conforming to the + // "gen_ai.request.top_p" semantic conventions. It represents the top_p + // sampling setting for the LLM request. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0 + GenAiRequestTopPKey = attribute.Key("gen_ai.request.top_p") + + // GenAiResponseFinishReasonsKey is the attribute Key conforming to the + // "gen_ai.response.finish_reasons" semantic conventions. It represents the + // array of reasons the model stopped generating tokens, corresponding to + // each generation received. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'stop' + GenAiResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") + + // GenAiResponseIDKey is the attribute Key conforming to the + // "gen_ai.response.id" semantic conventions. It represents the unique + // identifier for the completion. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'chatcmpl-123' + GenAiResponseIDKey = attribute.Key("gen_ai.response.id") + + // GenAiResponseModelKey is the attribute Key conforming to the + // "gen_ai.response.model" semantic conventions. It represents the name of + // the LLM a response was generated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gpt-4-0613' + GenAiResponseModelKey = attribute.Key("gen_ai.response.model") + + // GenAiSystemKey is the attribute Key conforming to the "gen_ai.system" + // semantic conventions. It represents the Generative AI product as + // identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'openai' + // Note: The actual GenAI product may differ from the one identified by the + // client. For example, when using OpenAI client libraries to communicate + // with Mistral, the `gen_ai.system` is set to `openai` based on the + // instrumentation's best knowledge. + GenAiSystemKey = attribute.Key("gen_ai.system") + + // GenAiUsageCompletionTokensKey is the attribute Key conforming to the + // "gen_ai.usage.completion_tokens" semantic conventions. It represents the + // number of tokens used in the LLM response (completion). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 180 + GenAiUsageCompletionTokensKey = attribute.Key("gen_ai.usage.completion_tokens") + + // GenAiUsagePromptTokensKey is the attribute Key conforming to the + // "gen_ai.usage.prompt_tokens" semantic conventions. It represents the + // number of tokens used in the LLM prompt. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + GenAiUsagePromptTokensKey = attribute.Key("gen_ai.usage.prompt_tokens") +) + +var ( + // OpenAI + GenAiSystemOpenai = GenAiSystemKey.String("openai") +) + +// GenAiCompletion returns an attribute KeyValue conforming to the +// "gen_ai.completion" semantic conventions. It represents the full response +// received from the LLM. +func GenAiCompletion(val string) attribute.KeyValue { + return GenAiCompletionKey.String(val) +} + +// GenAiPrompt returns an attribute KeyValue conforming to the +// "gen_ai.prompt" semantic conventions. It represents the full prompt sent to +// an LLM. +func GenAiPrompt(val string) attribute.KeyValue { + return GenAiPromptKey.String(val) +} + +// GenAiRequestMaxTokens returns an attribute KeyValue conforming to the +// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum +// number of tokens the LLM generates for a request. +func GenAiRequestMaxTokens(val int) attribute.KeyValue { + return GenAiRequestMaxTokensKey.Int(val) +} + +// GenAiRequestModel returns an attribute KeyValue conforming to the +// "gen_ai.request.model" semantic conventions. It represents the name of the +// LLM a request is being made to. +func GenAiRequestModel(val string) attribute.KeyValue { + return GenAiRequestModelKey.String(val) +} + +// GenAiRequestTemperature returns an attribute KeyValue conforming to the +// "gen_ai.request.temperature" semantic conventions. It represents the +// temperature setting for the LLM request. +func GenAiRequestTemperature(val float64) attribute.KeyValue { + return GenAiRequestTemperatureKey.Float64(val) +} + +// GenAiRequestTopP returns an attribute KeyValue conforming to the +// "gen_ai.request.top_p" semantic conventions. It represents the top_p +// sampling setting for the LLM request. +func GenAiRequestTopP(val float64) attribute.KeyValue { + return GenAiRequestTopPKey.Float64(val) +} + +// GenAiResponseFinishReasons returns an attribute KeyValue conforming to +// the "gen_ai.response.finish_reasons" semantic conventions. It represents the +// array of reasons the model stopped generating tokens, corresponding to each +// generation received. +func GenAiResponseFinishReasons(val ...string) attribute.KeyValue { + return GenAiResponseFinishReasonsKey.StringSlice(val) +} + +// GenAiResponseID returns an attribute KeyValue conforming to the +// "gen_ai.response.id" semantic conventions. It represents the unique +// identifier for the completion. +func GenAiResponseID(val string) attribute.KeyValue { + return GenAiResponseIDKey.String(val) +} + +// GenAiResponseModel returns an attribute KeyValue conforming to the +// "gen_ai.response.model" semantic conventions. It represents the name of the +// LLM a response was generated from. +func GenAiResponseModel(val string) attribute.KeyValue { + return GenAiResponseModelKey.String(val) +} + +// GenAiUsageCompletionTokens returns an attribute KeyValue conforming to +// the "gen_ai.usage.completion_tokens" semantic conventions. It represents the +// number of tokens used in the LLM response (completion). +func GenAiUsageCompletionTokens(val int) attribute.KeyValue { + return GenAiUsageCompletionTokensKey.Int(val) +} + +// GenAiUsagePromptTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.prompt_tokens" semantic conventions. It represents the number +// of tokens used in the LLM prompt. +func GenAiUsagePromptTokens(val int) attribute.KeyValue { + return GenAiUsagePromptTokensKey.Int(val) +} + +// Attributes for GraphQL. +const ( + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") + + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// Attributes for the Android platform on which the Android application is +// running. +const ( + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + HerokuAppIDKey = attribute.Key("heroku.app.id") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit + // hash for the current release + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents + // the time and date the release was created + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2022-10-23T18:00:42Z' + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") +) + +// HerokuAppID returns an attribute KeyValue conforming to the +// "heroku.app.id" semantic conventions. It represents the unique identifier +// for the application +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming +// to the "heroku.release.creation_timestamp" semantic conventions. It +// represents the time and date the release was created +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. +const ( + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + HostArchKey = attribute.Key("host.arch") + + // HostCPUCacheL2SizeKey is the attribute Key conforming to the + // "host.cpu.cache.l2.size" semantic conventions. It represents the amount + // of level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12288000 + HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + + // HostCPUFamilyKey is the attribute Key conforming to the + // "host.cpu.family" semantic conventions. It represents the family or + // generation of the CPU. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', 'PA-RISC 1.1e' + HostCPUFamilyKey = attribute.Key("host.cpu.family") + + // HostCPUModelIDKey is the attribute Key conforming to the + // "host.cpu.model.id" semantic conventions. It represents the model + // identifier. It provides more granular information about the CPU, + // distinguishing it from other CPUs within the same family. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', '9000/778/B180L' + HostCPUModelIDKey = attribute.Key("host.cpu.model.id") + + // HostCPUModelNameKey is the attribute Key conforming to the + // "host.cpu.model.name" semantic conventions. It represents the model + // designation of the processor. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' + HostCPUModelNameKey = attribute.Key("host.cpu.model.name") + + // HostCPUSteppingKey is the attribute Key conforming to the + // "host.cpu.stepping" semantic conventions. It represents the stepping or + // core revisions. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1', 'r1p1' + HostCPUSteppingKey = attribute.Key("host.cpu.stepping") + + // HostCPUVendorIDKey is the attribute Key conforming to the + // "host.cpu.vendor.id" semantic conventions. It represents the processor + // manufacturer identifier. A maximum 12-character string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'GenuineIntel' + // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor + // ID string in EBX, EDX and ECX registers. Writing these to memory in this + // order results in a 12-character string. + HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") + + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // systems, this should be the `machine-id`. See the table below for the + // sources to use to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID or host OS image ID. + // For Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image or host OS as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") + + // HostIPKey is the attribute Key conforming to the "host.ip" semantic + // conventions. It represents the available IP addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 + // addresses MUST be specified in the [RFC + // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. + HostIPKey = attribute.Key("host.ip") + + // HostMacKey is the attribute Key conforming to the "host.mac" semantic + // conventions. It represents the available MAC addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' + // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal + // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): + // as hyphen-separated octets in uppercase hexadecimal form from most to + // least significant. + HostMacKey = attribute.Key("host.mac") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostCPUCacheL2Size returns an attribute KeyValue conforming to the +// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of +// level 2 memory cache available to the processor (in Bytes). +func HostCPUCacheL2Size(val int) attribute.KeyValue { + return HostCPUCacheL2SizeKey.Int(val) +} + +// HostCPUFamily returns an attribute KeyValue conforming to the +// "host.cpu.family" semantic conventions. It represents the family or +// generation of the CPU. +func HostCPUFamily(val string) attribute.KeyValue { + return HostCPUFamilyKey.String(val) +} + +// HostCPUModelID returns an attribute KeyValue conforming to the +// "host.cpu.model.id" semantic conventions. It represents the model +// identifier. It provides more granular information about the CPU, +// distinguishing it from other CPUs within the same family. +func HostCPUModelID(val string) attribute.KeyValue { + return HostCPUModelIDKey.String(val) +} + +// HostCPUModelName returns an attribute KeyValue conforming to the +// "host.cpu.model.name" semantic conventions. It represents the model +// designation of the processor. +func HostCPUModelName(val string) attribute.KeyValue { + return HostCPUModelNameKey.String(val) +} + +// HostCPUStepping returns an attribute KeyValue conforming to the +// "host.cpu.stepping" semantic conventions. It represents the stepping or core +// revisions. +func HostCPUStepping(val string) attribute.KeyValue { + return HostCPUSteppingKey.String(val) +} + +// HostCPUVendorID returns an attribute KeyValue conforming to the +// "host.cpu.vendor.id" semantic conventions. It represents the processor +// manufacturer identifier. A maximum 12-character string. +func HostCPUVendorID(val string) attribute.KeyValue { + return HostCPUVendorIDKey.String(val) +} + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use +// to determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID or host +// OS image ID. For Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image or host OS as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic +// conventions. It represents the available IP addresses of the host, excluding +// loopback interfaces. +func HostIP(val ...string) attribute.KeyValue { + return HostIPKey.StringSlice(val) +} + +// HostMac returns an attribute KeyValue conforming to the "host.mac" +// semantic conventions. It represents the available MAC addresses of the host, +// excluding loopback interfaces. +func HostMac(val ...string) attribute.KeyValue { + return HostMacKey.StringSlice(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// Semantic convention attributes in the HTTP namespace. +const ( + // HTTPConnectionStateKey is the attribute Key conforming to the + // "http.connection.state" semantic conventions. It represents the state of + // the HTTP connection in the HTTP connection pool. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'active', 'idle' + HTTPConnectionStateKey = attribute.Key("http.connection.state") + + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of + // the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the hTTP + // request method. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + // Note: HTTP request method value SHOULD be "known" to the + // instrumentation. + // By default, this convention defines "known" methods as the ones listed + // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) + // and the PATCH method defined in + // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). + // + // If the HTTP request method is not known to instrumentation, it MUST set + // the `http.request.method` attribute to `_OTHER`. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated + // list of case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is + // not a list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods + // to be case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GeT', 'ACL', 'foo' + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestResendCountKey is the attribute Key conforming to the + // "http.request.resend_count" semantic conventions. It represents the + // ordinal number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") + + // HTTPRequestSizeKey is the attribute Key conforming to the + // "http.request.size" semantic conventions. It represents the total size + // of the request in bytes. This should be the total number of bytes sent + // over the wire, including the request line (HTTP/1.1), framing (HTTP/2 + // and HTTP/3), headers, and request body if any. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1437 + HTTPRequestSizeKey = attribute.Key("http.request.size") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size + // of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") + + // HTTPResponseSizeKey is the attribute Key conforming to the + // "http.response.size" semantic conventions. It represents the total size + // of the response in bytes. This should be the total number of bytes sent + // over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and + // HTTP/3), headers, and response body and trailers if any. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1437 + HTTPResponseSizeKey = attribute.Key("http.response.size") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status + // code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 200 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route, that is, the path + // template in the format used by the respective server framework. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and + // the URI path can NOT substitute it. + // SHOULD include the [application + // root](/docs/http/http-spans.md#http-server-definitions) if there is one. + HTTPRouteKey = attribute.Key("http.route") +) + +var ( + // active state + HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") + // idle state + HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") +) + +var ( + // CONNECT method + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // Any HTTP method that the instrumentation has no prior knowledge of + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestResendCount returns an attribute KeyValue conforming to the +// "http.request.resend_count" semantic conventions. It represents the ordinal +// number of request resending attempt (for any reason, including redirects). +func HTTPRequestResendCount(val int) attribute.KeyValue { + return HTTPRequestResendCountKey.Int(val) +} + +// HTTPRequestSize returns an attribute KeyValue conforming to the +// "http.request.size" semantic conventions. It represents the total size of +// the request in bytes. This should be the total number of bytes sent over the +// wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and request body if any. +func HTTPRequestSize(val int) attribute.KeyValue { + return HTTPRequestSizeKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of +// the response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// HTTPResponseSize returns an attribute KeyValue conforming to the +// "http.response.size" semantic conventions. It represents the total size of +// the response in bytes. This should be the total number of bytes sent over +// the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and response body and trailers if any. +func HTTPResponseSize(val int) attribute.KeyValue { + return HTTPResponseSizeKey.Int(val) +} + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the [HTTP +// response status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route, that is, the path +// template in the format used by the respective server framework. +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Java Virtual machine related attributes. +const ( + // JvmBufferPoolNameKey is the attribute Key conforming to the + // "jvm.buffer.pool.name" semantic conventions. It represents the name of + // the buffer pool. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mapped', 'direct' + // Note: Pool names are generally obtained via + // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). + JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") + + // JvmGcActionKey is the attribute Key conforming to the "jvm.gc.action" + // semantic conventions. It represents the name of the garbage collector + // action. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'end of minor GC', 'end of major GC' + // Note: Garbage collector action is generally obtained via + // [GarbageCollectionNotificationInfo#getGcAction()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcAction()). + JvmGcActionKey = attribute.Key("jvm.gc.action") + + // JvmGcNameKey is the attribute Key conforming to the "jvm.gc.name" + // semantic conventions. It represents the name of the garbage collector. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'G1 Young Generation', 'G1 Old Generation' + // Note: Garbage collector name is generally obtained via + // [GarbageCollectionNotificationInfo#getGcName()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcName()). + JvmGcNameKey = attribute.Key("jvm.gc.name") + + // JvmMemoryPoolNameKey is the attribute Key conforming to the + // "jvm.memory.pool.name" semantic conventions. It represents the name of + // the memory pool. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' + // Note: Pool names are generally obtained via + // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). + JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") + + // JvmMemoryTypeKey is the attribute Key conforming to the + // "jvm.memory.type" semantic conventions. It represents the type of + // memory. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'heap', 'non_heap' + JvmMemoryTypeKey = attribute.Key("jvm.memory.type") + + // JvmThreadDaemonKey is the attribute Key conforming to the + // "jvm.thread.daemon" semantic conventions. It represents the whether the + // thread is daemon or not. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + JvmThreadDaemonKey = attribute.Key("jvm.thread.daemon") + + // JvmThreadStateKey is the attribute Key conforming to the + // "jvm.thread.state" semantic conventions. It represents the state of the + // thread. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'runnable', 'blocked' + JvmThreadStateKey = attribute.Key("jvm.thread.state") +) + +var ( + // Heap memory + JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") + // Non-heap memory + JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") +) + +var ( + // A thread that has not yet started is in this state + JvmThreadStateNew = JvmThreadStateKey.String("new") + // A thread executing in the Java virtual machine is in this state + JvmThreadStateRunnable = JvmThreadStateKey.String("runnable") + // A thread that is blocked waiting for a monitor lock is in this state + JvmThreadStateBlocked = JvmThreadStateKey.String("blocked") + // A thread that is waiting indefinitely for another thread to perform a particular action is in this state + JvmThreadStateWaiting = JvmThreadStateKey.String("waiting") + // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state + JvmThreadStateTimedWaiting = JvmThreadStateKey.String("timed_waiting") + // A thread that has exited is in this state + JvmThreadStateTerminated = JvmThreadStateKey.String("terminated") +) + +// JvmBufferPoolName returns an attribute KeyValue conforming to the +// "jvm.buffer.pool.name" semantic conventions. It represents the name of the +// buffer pool. +func JvmBufferPoolName(val string) attribute.KeyValue { + return JvmBufferPoolNameKey.String(val) +} + +// JvmGcAction returns an attribute KeyValue conforming to the +// "jvm.gc.action" semantic conventions. It represents the name of the garbage +// collector action. +func JvmGcAction(val string) attribute.KeyValue { + return JvmGcActionKey.String(val) +} + +// JvmGcName returns an attribute KeyValue conforming to the "jvm.gc.name" +// semantic conventions. It represents the name of the garbage collector. +func JvmGcName(val string) attribute.KeyValue { + return JvmGcNameKey.String(val) +} + +// JvmMemoryPoolName returns an attribute KeyValue conforming to the +// "jvm.memory.pool.name" semantic conventions. It represents the name of the +// memory pool. +func JvmMemoryPoolName(val string) attribute.KeyValue { + return JvmMemoryPoolNameKey.String(val) +} + +// JvmThreadDaemon returns an attribute KeyValue conforming to the +// "jvm.thread.daemon" semantic conventions. It represents the whether the +// thread is daemon or not. +func JvmThreadDaemon(val bool) attribute.KeyValue { + return JvmThreadDaemonKey.Bool(val) +} + +// Kubernetes resource attributes. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the + // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for + // the cluster, set to the UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' + // Note: K8S doesn't have support for obtaining a cluster ID. If this is + // ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8S cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8S ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T + // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // different from all other UUIDs generated before 3603 A.D., or is + // extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") + + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") + + // K8SContainerStatusLastTerminatedReasonKey is the attribute Key + // conforming to the "k8s.container.status.last_terminated_reason" semantic + // conventions. It represents the last terminated reason of the Container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Evicted', 'Error' + K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") + + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") + + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") + + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") + + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") + + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") + + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") + + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") + + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue +// conforming to the "k8s.container.status.last_terminated_reason" semantic +// conventions. It represents the last terminated reason of the Container. +func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { + return K8SContainerStatusLastTerminatedReasonKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// Log attributes +const ( + // LogIostreamKey is the attribute Key conforming to the "log.iostream" + // semantic conventions. It represents the stream associated with the log. + // See below for a list of well-known values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + LogIostreamKey = attribute.Key("log.iostream") +) + +var ( + // Logs from stdout stream + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// Attributes for a file to which log was emitted. +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'audit.log' + LogFileNameKey = attribute.Key("log.file.name") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the + // basename of the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'uuid.log' + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/log/mysql/audit.log' + LogFilePathKey = attribute.Key("log.file.path") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full + // path to the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/lib/docker/uuid.log' + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") +) + +// LogFileName returns an attribute KeyValue conforming to the +// "log.file.name" semantic conventions. It represents the basename of the +// file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the +// "log.file.path" semantic conventions. It represents the full path to the +// file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path +// to the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// The generic attributes that may be used in any Log Record. +const ( + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log + // Record. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an [Universally Unique Lexicographically Sortable + // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers + // (e.g. UUID) may be used as needed. + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogRecordUID returns an attribute KeyValue conforming to the +// "log.record.uid" semantic conventions. It represents a unique identifier for +// the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Attributes describing telemetry around messaging systems and messaging +// activities. +const ( + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client.id" semantic conventions. It represents a unique + // identifier for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'client-5', 'myhost@8742@s8083jm' + MessagingClientIDKey = attribute.Key("messaging.client.id") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") + + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationPartitionIDKey is the attribute Key conforming to + // the "messaging.destination.partition.id" semantic conventions. It + // represents the identifier of the partition messages are sent to or + // received from, unique within the `messaging.destination.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1' + MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationPublishAnonymousKey is the attribute Key conforming + // to the "messaging.destination_publish.anonymous" semantic conventions. + // It represents a boolean that is true if the publish message destination + // is anonymous (could be unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous") + + // MessagingDestinationPublishNameKey is the attribute Key conforming to + // the "messaging.destination_publish.name" semantic conventions. It + // represents the name of the original destination the message was + // published to + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: The name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker doesn't have such notion, the original destination name + // SHOULD uniquely identify the broker. + MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") + + // MessagingMessageBodySizeKey is the attribute Key conforming to the + // "messaging.message.body.size" semantic conventions. It represents the + // size of the message body in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1439 + // Note: This can refer to both the compressed or uncompressed body size. + // If both sizes are known, the uncompressed + // body size should be used. + MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the conversation ID identifying the conversation to which the message + // belongs, represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the + // "messaging.message.envelope.size" semantic conventions. It represents + // the size of the message body and metadata in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2738 + // Note: This can refer to both the compressed or uncompressed size. If + // both sizes are known, the uncompressed + // size should be used. + MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") + + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingOperationNameKey is the attribute Key conforming to the + // "messaging.operation.name" semantic conventions. It represents the + // system-specific name of the messaging operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ack', 'nack', 'send' + MessagingOperationNameKey = attribute.Key("messaging.operation.name") + + // MessagingOperationTypeKey is the attribute Key conforming to the + // "messaging.operation.type" semantic conventions. It represents a string + // identifying the type of the messaging operation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationTypeKey = attribute.Key("messaging.operation.type") + + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents the messaging + // system as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The actual messaging system may differ from the one known by the + // client. For example, when using Kafka client libraries to communicate + // with Azure Event Hubs, the `messaging.system` is set to `kafka` based on + // the instrumentation's best knowledge. + MessagingSystemKey = attribute.Key("messaging.system") +) + +var ( + // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created + MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish") + // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios + MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") + // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages + MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") + // One or more messages are delivered to or processed by a consumer + MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("process") + // One or more messages are settled + MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") +) + +var ( + // Apache ActiveMQ + MessagingSystemActivemq = MessagingSystemKey.String("activemq") + // Amazon Simple Queue Service (SQS) + MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") + // Azure Event Grid + MessagingSystemEventgrid = MessagingSystemKey.String("eventgrid") + // Azure Event Hubs + MessagingSystemEventhubs = MessagingSystemKey.String("eventhubs") + // Azure Service Bus + MessagingSystemServicebus = MessagingSystemKey.String("servicebus") + // Google Cloud Pub/Sub + MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") + // Java Message Service + MessagingSystemJms = MessagingSystemKey.String("jms") + // Apache Kafka + MessagingSystemKafka = MessagingSystemKey.String("kafka") + // RabbitMQ + MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") + // Apache RocketMQ + MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") +) + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client.id" semantic conventions. It represents a unique +// identifier for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationPartitionID returns an attribute KeyValue conforming +// to the "messaging.destination.partition.id" semantic conventions. It +// represents the identifier of the partition messages are sent to or received +// from, unique within the `messaging.destination.name`. +func MessagingDestinationPartitionID(val string) attribute.KeyValue { + return MessagingDestinationPartitionIDKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationPublishAnonymous returns an attribute KeyValue +// conforming to the "messaging.destination_publish.anonymous" semantic +// conventions. It represents a boolean that is true if the publish message +// destination is anonymous (could be unnamed or have auto-generated name). +func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationPublishAnonymousKey.Bool(val) +} + +// MessagingDestinationPublishName returns an attribute KeyValue conforming +// to the "messaging.destination_publish.name" semantic conventions. It +// represents the name of the original destination the message was published to +func MessagingDestinationPublishName(val string) attribute.KeyValue { + return MessagingDestinationPublishNameKey.String(val) +} + +// MessagingMessageBodySize returns an attribute KeyValue conforming to the +// "messaging.message.body.size" semantic conventions. It represents the size +// of the message body in bytes. +func MessagingMessageBodySize(val int) attribute.KeyValue { + return MessagingMessageBodySizeKey.Int(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the conversation ID identifying the conversation to which the +// message belongs, represented as a string. Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to +// the "messaging.message.envelope.size" semantic conventions. It represents +// the size of the message body and metadata in bytes. +func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { + return MessagingMessageEnvelopeSizeKey.Int(val) +} + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingOperationName returns an attribute KeyValue conforming to the +// "messaging.operation.name" semantic conventions. It represents the +// system-specific name of the messaging operation. +func MessagingOperationName(val string) attribute.KeyValue { + return MessagingOperationNameKey.String(val) +} + +// This group describes attributes specific to Apache Kafka. +const ( + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// This group describes attributes specific to RabbitMQ. +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") + + // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming + // to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. + // It represents the rabbitMQ message delivery tag + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 123 + MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// MessagingRabbitmqMessageDeliveryTag returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.message.delivery_tag" semantic +// conventions. It represents the rabbitMQ message delivery tag +func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue { + return MessagingRabbitmqMessageDeliveryTagKey.Int(val) +} + +// This group describes attributes specific to RocketMQ. +const ( + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// This group describes attributes specific to GCP Pub/Sub. +const ( + // MessagingGCPPubsubMessageAckDeadlineKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. + // It represents the ack deadline in seconds set for the modify ack + // deadline request. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + MessagingGCPPubsubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") + + // MessagingGCPPubsubMessageAckIDKey is the attribute Key conforming to the + // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It + // represents the ack id for a given message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ack_id' + MessagingGCPPubsubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") + + // MessagingGCPPubsubMessageDeliveryAttemptKey is the attribute Key + // conforming to the "messaging.gcp_pubsub.message.delivery_attempt" + // semantic conventions. It represents the delivery attempt for a given + // message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingGCPPubsubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") + + // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. + // It represents the ordering key for a given message. If the attribute is + // not present, the message does not have an ordering key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ordering_key' + MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") +) + +// MessagingGCPPubsubMessageAckDeadline returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ack_deadline" semantic +// conventions. It represents the ack deadline in seconds set for the modify +// ack deadline request. +func MessagingGCPPubsubMessageAckDeadline(val int) attribute.KeyValue { + return MessagingGCPPubsubMessageAckDeadlineKey.Int(val) +} + +// MessagingGCPPubsubMessageAckID returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ack_id" semantic conventions. It +// represents the ack id for a given message. +func MessagingGCPPubsubMessageAckID(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageAckIDKey.String(val) +} + +// MessagingGCPPubsubMessageDeliveryAttempt returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic +// conventions. It represents the delivery attempt for a given message. +func MessagingGCPPubsubMessageDeliveryAttempt(val int) attribute.KeyValue { + return MessagingGCPPubsubMessageDeliveryAttemptKey.Int(val) +} + +// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic +// conventions. It represents the ordering key for a given message. If the +// attribute is not present, the message does not have an ordering key. +func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageOrderingKeyKey.String(val) +} + +// This group describes attributes specific to Azure Service Bus. +const ( + // MessagingServicebusDestinationSubscriptionNameKey is the attribute Key + // conforming to the "messaging.servicebus.destination.subscription_name" + // semantic conventions. It represents the name of the subscription in the + // topic messages are received from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mySubscription' + MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name") + + // MessagingServicebusDispositionStatusKey is the attribute Key conforming + // to the "messaging.servicebus.disposition_status" semantic conventions. + // It represents the describes the [settlement + // type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") + + // MessagingServicebusMessageDeliveryCountKey is the attribute Key + // conforming to the "messaging.servicebus.message.delivery_count" semantic + // conventions. It represents the number of deliveries that have been + // attempted for this message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") + + // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key + // conforming to the "messaging.servicebus.message.enqueued_time" semantic + // conventions. It represents the UTC epoch seconds at which the message + // has been accepted and stored in the entity. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1701393730 + MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") +) + +var ( + // Message is completed + MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete") + // Message is abandoned + MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon") + // Message is sent to dead letter queue + MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter") + // Message is deferred + MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer") +) + +// MessagingServicebusDestinationSubscriptionName returns an attribute +// KeyValue conforming to the +// "messaging.servicebus.destination.subscription_name" semantic conventions. +// It represents the name of the subscription in the topic messages are +// received from. +func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue { + return MessagingServicebusDestinationSubscriptionNameKey.String(val) +} + +// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.delivery_count" semantic +// conventions. It represents the number of deliveries that have been attempted +// for this message. +func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue { + return MessagingServicebusMessageDeliveryCountKey.Int(val) +} + +// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has +// been accepted and stored in the entity. +func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingServicebusMessageEnqueuedTimeKey.Int(val) +} + +// This group describes attributes specific to Azure Event Hubs. +const ( + // MessagingEventhubsConsumerGroupKey is the attribute Key conforming to + // the "messaging.eventhubs.consumer.group" semantic conventions. It + // represents the name of the consumer group the event consumer is + // associated with. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'indexer' + MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group") + + // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming + // to the "messaging.eventhubs.message.enqueued_time" semantic conventions. + // It represents the UTC epoch seconds at which the message has been + // accepted and stored in the entity. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1701393730 + MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") +) + +// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming +// to the "messaging.eventhubs.consumer.group" semantic conventions. It +// represents the name of the consumer group the event consumer is associated +// with. +func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue { + return MessagingEventhubsConsumerGroupKey.String(val) +} + +// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.eventhubs.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has +// been accepted and stored in the entity. +func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingEventhubsMessageEnqueuedTimeKey.Int(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetworkCarrierIccKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier + // network. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'DE' + NetworkCarrierIccKey = attribute.Key("network.carrier.icc") + + // NetworkCarrierMccKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '310' + NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMncKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '001' + NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'sprint' + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'LTE' + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the + // internet connection type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'wifi' + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkIoDirectionKey is the attribute Key conforming to the + // "network.io.direction" semantic conventions. It represents the network + // IO operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'transmit' + NetworkIoDirectionKey = attribute.Key("network.io.direction") + + // NetworkLocalAddressKey is the attribute Key conforming to the + // "network.local.address" semantic conventions. It represents the local + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkLocalAddressKey = attribute.Key("network.local.address") + + // NetworkLocalPortKey is the attribute Key conforming to the + // "network.local.port" semantic conventions. It represents the local port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkLocalPortKey = attribute.Key("network.local.port") + + // NetworkPeerAddressKey is the attribute Key conforming to the + // "network.peer.address" semantic conventions. It represents the peer + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkPeerAddressKey = attribute.Key("network.peer.address") + + // NetworkPeerPortKey is the attribute Key conforming to the + // "network.peer.port" semantic conventions. It represents the peer port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkPeerPortKey = attribute.Key("network.peer.port") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the [OSI + // application layer](https://osi-model.com/application-layer/) or non-OSI + // equivalent. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + // Note: The value SHOULD be normalized to lowercase. + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the + // actual version of the protocol used for network communication. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.1', '2' + // Note: If protocol version is subject to negotiation (for example using + // [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute + // SHOULD be set to the negotiated version. If the actual protocol version + // is not known, this attribute SHOULD NOT be set. + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") + + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the [OSI + // transport layer](https://osi-model.com/transport-layer/) or + // [inter-process communication + // method](https://wikipedia.org/wiki/Inter-process_communication). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tcp', 'udp' + // Note: The value SHOULD be normalized to lowercase. + // + // Consider always setting the transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port + // 12345. + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" + // semantic conventions. It represents the [OSI network + // layer](https://osi-model.com/network-layer/) or non-OSI equivalent. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ipv4', 'ipv6' + // Note: The value SHOULD be normalized to lowercase. + NetworkTypeKey = attribute.Key("network.type") +) + +var ( + // GPRS + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +var ( + // wifi + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +var ( + // transmit + NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") + // receive + NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") +) + +var ( + // TCP + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + NetworkTransportUnix = NetworkTransportKey.String("unix") +) + +var ( + // IPv4 + NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") + // IPv6 + NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") +) + +// NetworkCarrierIcc returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierIcc(val string) attribute.KeyValue { + return NetworkCarrierIccKey.String(val) +} + +// NetworkCarrierMcc returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMcc(val string) attribute.KeyValue { + return NetworkCarrierMccKey.String(val) +} + +// NetworkCarrierMnc returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMnc(val string) attribute.KeyValue { + return NetworkCarrierMncKey.String(val) +} + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkLocalAddress returns an attribute KeyValue conforming to the +// "network.local.address" semantic conventions. It represents the local +// address of the network connection - IP address or Unix domain socket name. +func NetworkLocalAddress(val string) attribute.KeyValue { + return NetworkLocalAddressKey.String(val) +} + +// NetworkLocalPort returns an attribute KeyValue conforming to the +// "network.local.port" semantic conventions. It represents the local port +// number of the network connection. +func NetworkLocalPort(val int) attribute.KeyValue { + return NetworkLocalPortKey.Int(val) +} + +// NetworkPeerAddress returns an attribute KeyValue conforming to the +// "network.peer.address" semantic conventions. It represents the peer address +// of the network connection - IP address or Unix domain socket name. +func NetworkPeerAddress(val string) attribute.KeyValue { + return NetworkPeerAddressKey.String(val) +} + +// NetworkPeerPort returns an attribute KeyValue conforming to the +// "network.peer.port" semantic conventions. It represents the peer port number +// of the network connection. +func NetworkPeerPort(val int) attribute.KeyValue { + return NetworkPeerPortKey.Int(val) +} + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the [OSI +// application layer](https://osi-model.com/application-layer/) or non-OSI +// equivalent. +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the actual +// version of the protocol used for network communication. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// An OCI image manifest. +const ( + // OciManifestDigestKey is the attribute Key conforming to the + // "oci.manifest.digest" semantic conventions. It represents the digest of + // the OCI image manifest. For container images specifically is the digest + // by which the container image is known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' + // Note: Follows [OCI Image Manifest + // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), + // and specifically the [Digest + // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). + // An example can be found in [Example Image + // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). + OciManifestDigestKey = attribute.Key("oci.manifest.digest") +) + +// OciManifestDigest returns an attribute KeyValue conforming to the +// "oci.manifest.digest" semantic conventions. It represents the digest of the +// OCI image manifest. For container images specifically is the digest by which +// the container image is known. +func OciManifestDigest(val string) attribute.KeyValue { + return OciManifestDigestKey.String(val) +} + +// Attributes used by the OpenTracing Shim layer. +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span doesn't depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSBuildIDKey is the attribute Key conforming to the "os.build_id" + // semantic conventions. It represents the unique identifier for a + // particular build or compilation of the operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' + OSBuildIDKey = attribute.Key("os.build_id") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + OSTypeKey = attribute.Key("os.type") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the operating system. +func OSBuildID(val string) attribute.KeyValue { + return OSBuildIDKey.String(val) +} + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// Attributes reserved for OpenTelemetry +const ( + // OTelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OTelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OTelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](/docs/resource/README.md#service) of the remote + // service. SHOULD be equal to the actual `service.name` resource attribute + // of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](/docs/resource/README.md#service) of the remote service. +// SHOULD be equal to the actual `service.name` resource attribute of the +// remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// An operating system process. +const ( + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessContextSwitchTypeKey is the attribute Key conforming to the + // "process.context_switch_type" semantic conventions. It represents the + // specifies whether the context switches for this data point were + // voluntary or involuntary. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") + + // ProcessCreationTimeKey is the attribute Key conforming to the + // "process.creation.time" semantic conventions. It represents the date and + // time the process was created, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2023-11-21T09:25:34.853Z' + ProcessCreationTimeKey = attribute.Key("process.creation.time") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessExitCodeKey is the attribute Key conforming to the + // "process.exit.code" semantic conventions. It represents the exit code of + // the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 127 + ProcessExitCodeKey = attribute.Key("process.exit.code") + + // ProcessExitTimeKey is the attribute Key conforming to the + // "process.exit.time" semantic conventions. It represents the date and + // time the process exited, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2023-11-21T09:26:12.315Z' + ProcessExitTimeKey = attribute.Key("process.exit.time") + + // ProcessGroupLeaderPIDKey is the attribute Key conforming to the + // "process.group_leader.pid" semantic conventions. It represents the PID + // of the process's group leader. This is also the process group ID (PGID) + // of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 23 + ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") + + // ProcessInteractiveKey is the attribute Key conforming to the + // "process.interactive" semantic conventions. It represents the whether + // the process is connected to an interactive shell. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + ProcessInteractiveKey = attribute.Key("process.interactive") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") + + // ProcessPagingFaultTypeKey is the attribute Key conforming to the + // "process.paging.fault_type" semantic conventions. It represents the type + // of page fault for this data point. Type `major` is for major/hard page + // faults, and `minor` is for minor/soft page faults. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PPID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessRealUserIDKey is the attribute Key conforming to the + // "process.real_user.id" semantic conventions. It represents the real user + // ID (RUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000 + ProcessRealUserIDKey = attribute.Key("process.real_user.id") + + // ProcessRealUserNameKey is the attribute Key conforming to the + // "process.real_user.name" semantic conventions. It represents the + // username of the real user of the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'operator' + ProcessRealUserNameKey = attribute.Key("process.real_user.name") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") + + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessSavedUserIDKey is the attribute Key conforming to the + // "process.saved_user.id" semantic conventions. It represents the saved + // user ID (SUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1002 + ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") + + // ProcessSavedUserNameKey is the attribute Key conforming to the + // "process.saved_user.name" semantic conventions. It represents the + // username of the saved user. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'operator' + ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") + + // ProcessSessionLeaderPIDKey is the attribute Key conforming to the + // "process.session_leader.pid" semantic conventions. It represents the PID + // of the process's session leader. This is also the session ID (SID) of + // the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 14 + ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") + + // ProcessUserIDKey is the attribute Key conforming to the + // "process.user.id" semantic conventions. It represents the effective user + // ID (EUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1001 + ProcessUserIDKey = attribute.Key("process.user.id") + + // ProcessUserNameKey is the attribute Key conforming to the + // "process.user.name" semantic conventions. It represents the username of + // the effective user of the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessUserNameKey = attribute.Key("process.user.name") + + // ProcessVpidKey is the attribute Key conforming to the "process.vpid" + // semantic conventions. It represents the virtual process identifier. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12 + // Note: The process ID within a PID namespace. This is not necessarily + // unique across all processes on the host but it is unique within the + // process namespace that the process exists within. + ProcessVpidKey = attribute.Key("process.vpid") +) + +var ( + // voluntary + ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") + // involuntary + ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") +) + +var ( + // major + ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") + // minor + ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") +) + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCreationTime returns an attribute KeyValue conforming to the +// "process.creation.time" semantic conventions. It represents the date and +// time the process was created, in ISO 8601 format. +func ProcessCreationTime(val string) attribute.KeyValue { + return ProcessCreationTimeKey.String(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessExitCode returns an attribute KeyValue conforming to the +// "process.exit.code" semantic conventions. It represents the exit code of the +// process. +func ProcessExitCode(val int) attribute.KeyValue { + return ProcessExitCodeKey.Int(val) +} + +// ProcessExitTime returns an attribute KeyValue conforming to the +// "process.exit.time" semantic conventions. It represents the date and time +// the process exited, in ISO 8601 format. +func ProcessExitTime(val string) attribute.KeyValue { + return ProcessExitTimeKey.String(val) +} + +// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the +// "process.group_leader.pid" semantic conventions. It represents the PID of +// the process's group leader. This is also the process group ID (PGID) of the +// process. +func ProcessGroupLeaderPID(val int) attribute.KeyValue { + return ProcessGroupLeaderPIDKey.Int(val) +} + +// ProcessInteractive returns an attribute KeyValue conforming to the +// "process.interactive" semantic conventions. It represents the whether the +// process is connected to an interactive shell. +func ProcessInteractive(val bool) attribute.KeyValue { + return ProcessInteractiveKey.Bool(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PPID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessRealUserID returns an attribute KeyValue conforming to the +// "process.real_user.id" semantic conventions. It represents the real user ID +// (RUID) of the process. +func ProcessRealUserID(val int) attribute.KeyValue { + return ProcessRealUserIDKey.Int(val) +} + +// ProcessRealUserName returns an attribute KeyValue conforming to the +// "process.real_user.name" semantic conventions. It represents the username of +// the real user of the process. +func ProcessRealUserName(val string) attribute.KeyValue { + return ProcessRealUserNameKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessSavedUserID returns an attribute KeyValue conforming to the +// "process.saved_user.id" semantic conventions. It represents the saved user +// ID (SUID) of the process. +func ProcessSavedUserID(val int) attribute.KeyValue { + return ProcessSavedUserIDKey.Int(val) +} + +// ProcessSavedUserName returns an attribute KeyValue conforming to the +// "process.saved_user.name" semantic conventions. It represents the username +// of the saved user. +func ProcessSavedUserName(val string) attribute.KeyValue { + return ProcessSavedUserNameKey.String(val) +} + +// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the +// "process.session_leader.pid" semantic conventions. It represents the PID of +// the process's session leader. This is also the session ID (SID) of the +// process. +func ProcessSessionLeaderPID(val int) attribute.KeyValue { + return ProcessSessionLeaderPIDKey.Int(val) +} + +// ProcessUserID returns an attribute KeyValue conforming to the +// "process.user.id" semantic conventions. It represents the effective user ID +// (EUID) of the process. +func ProcessUserID(val int) attribute.KeyValue { + return ProcessUserIDKey.Int(val) +} + +// ProcessUserName returns an attribute KeyValue conforming to the +// "process.user.name" semantic conventions. It represents the username of the +// effective user of the process. +func ProcessUserName(val string) attribute.KeyValue { + return ProcessUserNameKey.String(val) +} + +// ProcessVpid returns an attribute KeyValue conforming to the +// "process.vpid" semantic conventions. It represents the virtual process +// identifier. +func ProcessVpid(val int) attribute.KeyValue { + return ProcessVpidKey.Int(val) +} + +// Attributes for process CPU +const ( + // ProcessCPUStateKey is the attribute Key conforming to the + // "process.cpu.state" semantic conventions. It represents the CPU state of + // the process. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessCPUStateKey = attribute.Key("process.cpu.state") +) + +var ( + // system + ProcessCPUStateSystem = ProcessCPUStateKey.String("system") + // user + ProcessCPUStateUser = ProcessCPUStateKey.String("user") + // wait + ProcessCPUStateWait = ProcessCPUStateKey.String("wait") +) + +// Attributes for remote procedure calls. +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes](https://connect.build/docs/protocol/#error-codes) of the + // Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") + + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCMessageCompressedSizeKey is the attribute Key conforming to the + // "rpc.message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") + + // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Note: This way we guarantee that the values will be consistent between + // different implementations. + RPCMessageIDKey = attribute.Key("rpc.message.id") + + // RPCMessageTypeKey is the attribute Key conforming to the + // "rpc.message.type" semantic conventions. It represents the whether this + // is a received or sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCMessageTypeKey = attribute.Key("rpc.message.type") + + // RPCMessageUncompressedSizeKey is the attribute Key conforming to the + // "rpc.message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCSystemKey = attribute.Key("rpc.system") +) + +var ( + // cancelled + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +var ( + // sent + RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") + // received + RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// doesn't specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCMessageCompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.compressed_size" semantic conventions. It represents the +// compressed size of the message in bytes. +func RPCMessageCompressedSize(val int) attribute.KeyValue { + return RPCMessageCompressedSizeKey.Int(val) +} + +// RPCMessageID returns an attribute KeyValue conforming to the +// "rpc.message.id" semantic conventions. It represents the mUST be calculated +// as two different counters starting from `1` one for sent messages and one +// for received message. +func RPCMessageID(val int) attribute.KeyValue { + return RPCMessageIDKey.Int(val) +} + +// RPCMessageUncompressedSize returns an attribute KeyValue conforming to +// the "rpc.message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func RPCMessageUncompressedSize(val int) attribute.KeyValue { + return RPCMessageUncompressedSizeKey.Int(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// These attributes may be used to describe the server in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the server domain name if available + // without reverse DNS lookup; otherwise, IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.address` SHOULD represent the server address + // behind any intermediaries, for example proxies, if it's available. + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" + // semantic conventions. It represents the server port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.port` SHOULD represent the server port behind + // any intermediaries, for example proxies, if it's available. + ServerPortKey = attribute.Key("server.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the +// "server.address" semantic conventions. It represents the server domain name +// if available without reverse DNS lookup; otherwise, IP address or Unix +// domain socket name. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the server port number. +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// A service instance. +const ( + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to + // distinguish instances of the same service that exist at the same time + // (e.g. instances of a horizontally scaled + // service). + // + // Implementations, such as SDKs, are recommended to generate a random + // Version 1 or Version 4 [RFC + // 4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an + // inherent unique ID as the source of + // this value if stability is desirable. In that case, the ID SHOULD be + // used as source of a UUID Version 5 and + // SHOULD use the following UUID as the namespace: + // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. + // + // UUIDs are typically recommended, as only an opaque value for the + // purposes of identifying a service instance is + // needed. Similar to what can be seen in the man page for the + // [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/machine-id.html) + // file, the underlying + // data, such as pod name and namespace should be treated as confidential, + // being the user's choice to expose it + // or not via another resource attribute. + // + // For applications running behind an application server (like unicorn), we + // do not recommend using one identifier + // for all processes participating in the application. Instead, it's + // recommended each division (e.g. a worker + // thread in unicorn) to have its own instance.id. + // + // It's not recommended for a Collector to set `service.instance.id` if it + // can't unambiguously determine the + // service instance that is generating that telemetry. For instance, + // creating an UUID based on `pod.name` will + // likely be wrong, as the Collector might not know from which container + // within that pod the telemetry originated. + // However, Collectors can set the `service.instance.id` if they can + // unambiguously determine the service instance + // for that telemetry. This is typically the case for scraping receivers, + // as they know the target address and + // port. + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If + // `process.executable.name` is not available, the value MUST be set to + // `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. The format is not defined by these + // conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0', 'a01dbef8a' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// Session is defined as the period of time encompassing all activities +// performed by the application and the actions executed by the end user. +// Consequently, a Session is represented as a collection of Logs, Events, and +// Spans emitted by the Client Application throughout the Session's duration. +// Each Session is assigned a unique identifier, which is included as an +// attribute in the Logs, Events, and Spans generated during the Session's +// lifecycle. +// When a session reaches end of life, typically due to user inactivity or +// session timeout, a new session identifier will be assigned. The previous +// session identifier may be provided by the instrumentation so that telemetry +// backends can link the two sessions. +const ( + // SessionIDKey is the attribute Key conforming to the "session.id" + // semantic conventions. It represents a unique id to identify a session. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionIDKey = attribute.Key("session.id") + + // SessionPreviousIDKey is the attribute Key conforming to the + // "session.previous_id" semantic conventions. It represents the previous + // `session.id` for this user, when known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionPreviousIDKey = attribute.Key("session.previous_id") +) + +// SessionID returns an attribute KeyValue conforming to the "session.id" +// semantic conventions. It represents a unique id to identify a session. +func SessionID(val string) attribute.KeyValue { + return SessionIDKey.String(val) +} + +// SessionPreviousID returns an attribute KeyValue conforming to the +// "session.previous_id" semantic conventions. It represents the previous +// `session.id` for this user, when known. +func SessionPreviousID(val string) attribute.KeyValue { + return SessionPreviousIDKey.String(val) +} + +// SignalR attributes +const ( + // SignalrConnectionStatusKey is the attribute Key conforming to the + // "signalr.connection.status" semantic conventions. It represents the + // signalR HTTP connection closure status. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'app_shutdown', 'timeout' + SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") + + // SignalrTransportKey is the attribute Key conforming to the + // "signalr.transport" semantic conventions. It represents the [SignalR + // transport + // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'web_sockets', 'long_polling' + SignalrTransportKey = attribute.Key("signalr.transport") +) + +var ( + // The connection was closed normally + SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") + // The connection was closed due to a timeout + SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") + // The connection was closed because the app is shutting down + SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") +) + +var ( + // ServerSentEvents protocol + SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") + // LongPolling protocol + SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") + // WebSockets protocol + SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") +) + +// These attributes may be used to describe the sender of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the destination side, and when communicating + // through an intermediary, `source.address` SHOULD represent the source + // address behind any intermediaries, for example proxies, if it's + // available. + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" + // semantic conventions. It represents the source port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceAddress returns an attribute KeyValue conforming to the +// "source.address" semantic conventions. It represents the source address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// Describes System attributes +const ( + // SystemDeviceKey is the attribute Key conforming to the "system.device" + // semantic conventions. It represents the device identifier + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '(identifier)' + SystemDeviceKey = attribute.Key("system.device") +) + +// SystemDevice returns an attribute KeyValue conforming to the +// "system.device" semantic conventions. It represents the device identifier +func SystemDevice(val string) attribute.KeyValue { + return SystemDeviceKey.String(val) +} + +// Describes System CPU attributes +const ( + // SystemCPULogicalNumberKey is the attribute Key conforming to the + // "system.cpu.logical_number" semantic conventions. It represents the + // logical CPU number [0..n-1] + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1 + SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") + + // SystemCPUStateKey is the attribute Key conforming to the + // "system.cpu.state" semantic conventions. It represents the state of the + // CPU + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'idle', 'interrupt' + SystemCPUStateKey = attribute.Key("system.cpu.state") +) + +var ( + // user + SystemCPUStateUser = SystemCPUStateKey.String("user") + // system + SystemCPUStateSystem = SystemCPUStateKey.String("system") + // nice + SystemCPUStateNice = SystemCPUStateKey.String("nice") + // idle + SystemCPUStateIdle = SystemCPUStateKey.String("idle") + // iowait + SystemCPUStateIowait = SystemCPUStateKey.String("iowait") + // interrupt + SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") + // steal + SystemCPUStateSteal = SystemCPUStateKey.String("steal") +) + +// SystemCPULogicalNumber returns an attribute KeyValue conforming to the +// "system.cpu.logical_number" semantic conventions. It represents the logical +// CPU number [0..n-1] +func SystemCPULogicalNumber(val int) attribute.KeyValue { + return SystemCPULogicalNumberKey.Int(val) +} + +// Describes System Memory attributes +const ( + // SystemMemoryStateKey is the attribute Key conforming to the + // "system.memory.state" semantic conventions. It represents the memory + // state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free', 'cached' + SystemMemoryStateKey = attribute.Key("system.memory.state") +) + +var ( + // used + SystemMemoryStateUsed = SystemMemoryStateKey.String("used") + // free + SystemMemoryStateFree = SystemMemoryStateKey.String("free") + // shared + SystemMemoryStateShared = SystemMemoryStateKey.String("shared") + // buffers + SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") + // cached + SystemMemoryStateCached = SystemMemoryStateKey.String("cached") +) + +// Describes System Memory Paging attributes +const ( + // SystemPagingDirectionKey is the attribute Key conforming to the + // "system.paging.direction" semantic conventions. It represents the paging + // access direction + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'in' + SystemPagingDirectionKey = attribute.Key("system.paging.direction") + + // SystemPagingStateKey is the attribute Key conforming to the + // "system.paging.state" semantic conventions. It represents the memory + // paging state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free' + SystemPagingStateKey = attribute.Key("system.paging.state") + + // SystemPagingTypeKey is the attribute Key conforming to the + // "system.paging.type" semantic conventions. It represents the memory + // paging type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'minor' + SystemPagingTypeKey = attribute.Key("system.paging.type") +) + +var ( + // in + SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") + // out + SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") +) + +var ( + // used + SystemPagingStateUsed = SystemPagingStateKey.String("used") + // free + SystemPagingStateFree = SystemPagingStateKey.String("free") +) + +var ( + // major + SystemPagingTypeMajor = SystemPagingTypeKey.String("major") + // minor + SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") +) + +// Describes Filesystem attributes +const ( + // SystemFilesystemModeKey is the attribute Key conforming to the + // "system.filesystem.mode" semantic conventions. It represents the + // filesystem mode + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'rw, ro' + SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") + + // SystemFilesystemMountpointKey is the attribute Key conforming to the + // "system.filesystem.mountpoint" semantic conventions. It represents the + // filesystem mount path + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/mnt/data' + SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") + + // SystemFilesystemStateKey is the attribute Key conforming to the + // "system.filesystem.state" semantic conventions. It represents the + // filesystem state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'used' + SystemFilesystemStateKey = attribute.Key("system.filesystem.state") + + // SystemFilesystemTypeKey is the attribute Key conforming to the + // "system.filesystem.type" semantic conventions. It represents the + // filesystem type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ext4' + SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") +) + +var ( + // used + SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") + // free + SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") + // reserved + SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") +) + +var ( + // fat32 + SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") + // exfat + SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") + // ntfs + SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") + // refs + SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") + // hfsplus + SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") + // ext4 + SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") +) + +// SystemFilesystemMode returns an attribute KeyValue conforming to the +// "system.filesystem.mode" semantic conventions. It represents the filesystem +// mode +func SystemFilesystemMode(val string) attribute.KeyValue { + return SystemFilesystemModeKey.String(val) +} + +// SystemFilesystemMountpoint returns an attribute KeyValue conforming to +// the "system.filesystem.mountpoint" semantic conventions. It represents the +// filesystem mount path +func SystemFilesystemMountpoint(val string) attribute.KeyValue { + return SystemFilesystemMountpointKey.String(val) +} + +// Describes Network attributes +const ( + // SystemNetworkStateKey is the attribute Key conforming to the + // "system.network.state" semantic conventions. It represents a stateless + // protocol MUST NOT set this attribute + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'close_wait' + SystemNetworkStateKey = attribute.Key("system.network.state") +) + +var ( + // close + SystemNetworkStateClose = SystemNetworkStateKey.String("close") + // close_wait + SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") + // closing + SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") + // delete + SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") + // established + SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") + // fin_wait_1 + SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") + // fin_wait_2 + SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") + // last_ack + SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") + // listen + SystemNetworkStateListen = SystemNetworkStateKey.String("listen") + // syn_recv + SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") + // syn_sent + SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") + // time_wait + SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") +) + +// Describes System Process attributes +const ( + // SystemProcessStatusKey is the attribute Key conforming to the + // "system.process.status" semantic conventions. It represents the process + // state, e.g., [Linux Process State + // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'running' + SystemProcessStatusKey = attribute.Key("system.process.status") +) + +var ( + // running + SystemProcessStatusRunning = SystemProcessStatusKey.String("running") + // sleeping + SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") + // stopped + SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") + // defunct + SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") +) + +// Attributes for telemetry SDK. +const ( + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'opentelemetry' + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute + // to `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is + // used, this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module + // name of this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this + // case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + + // TelemetryDistroNameKey is the attribute Key conforming to the + // "telemetry.distro.name" semantic conventions. It represents the name of + // the auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'parts-unlimited-java' + // Note: Official auto instrumentation agents and distributions SHOULD set + // the `telemetry.distro.name` attribute to + // a string starting with `opentelemetry-`, e.g. + // `opentelemetry-java-instrumentation`. + TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") + + // TelemetryDistroVersionKey is the attribute Key conforming to the + // "telemetry.distro.version" semantic conventions. It represents the + // version string of the auto instrumentation agent or distribution, if + // used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2.3' + TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// TelemetryDistroName returns an attribute KeyValue conforming to the +// "telemetry.distro.name" semantic conventions. It represents the name of the +// auto instrumentation agent or distribution, if used. +func TelemetryDistroName(val string) attribute.KeyValue { + return TelemetryDistroNameKey.String(val) +} + +// TelemetryDistroVersion returns an attribute KeyValue conforming to the +// "telemetry.distro.version" semantic conventions. It represents the version +// string of the auto instrumentation agent or distribution, if used. +func TelemetryDistroVersion(val string) attribute.KeyValue { + return TelemetryDistroVersionKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// Semantic convention attributes in the TLS namespace. +const ( + // TLSCipherKey is the attribute Key conforming to the "tls.cipher" + // semantic conventions. It represents the string indicating the + // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) + // used during the current connection. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', + // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' + // Note: The values allowed for `tls.cipher` MUST be one of the + // `Descriptions` of the [registered TLS Cipher + // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). + TLSCipherKey = attribute.Key("tls.cipher") + + // TLSClientCertificateKey is the attribute Key conforming to the + // "tls.client.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the client. This is + // usually mutually-exclusive of `client.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSClientCertificateKey = attribute.Key("tls.client.certificate") + + // TLSClientCertificateChainKey is the attribute Key conforming to the + // "tls.client.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the client. This is usually mutually-exclusive of + // `client.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") + + // TLSClientHashMd5Key is the attribute Key conforming to the + // "tls.client.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") + + // TLSClientHashSha1Key is the attribute Key conforming to the + // "tls.client.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") + + // TLSClientHashSha256Key is the attribute Key conforming to the + // "tls.client.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") + + // TLSClientIssuerKey is the attribute Key conforming to the + // "tls.client.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSClientIssuerKey = attribute.Key("tls.client.issuer") + + // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" + // semantic conventions. It represents a hash that identifies clients based + // on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSClientJa3Key = attribute.Key("tls.client.ja3") + + // TLSClientNotAfterKey is the attribute Key conforming to the + // "tls.client.not_after" semantic conventions. It represents the date/Time + // indicating when client certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSClientNotAfterKey = attribute.Key("tls.client.not_after") + + // TLSClientNotBeforeKey is the attribute Key conforming to the + // "tls.client.not_before" semantic conventions. It represents the + // date/Time indicating when client certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") + + // TLSClientServerNameKey is the attribute Key conforming to the + // "tls.client.server_name" semantic conventions. It represents the also + // called an SNI, this tells the server which hostname to which the client + // is attempting to connect to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry.io' + TLSClientServerNameKey = attribute.Key("tls.client.server_name") + + // TLSClientSubjectKey is the attribute Key conforming to the + // "tls.client.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' + TLSClientSubjectKey = attribute.Key("tls.client.subject") + + // TLSClientSupportedCiphersKey is the attribute Key conforming to the + // "tls.client.supported_ciphers" semantic conventions. It represents the + // array of ciphers offered by the client during the client hello. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' + TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") + + // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic + // conventions. It represents the string indicating the curve used for the + // given cipher, when applicable + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'secp256r1' + TLSCurveKey = attribute.Key("tls.curve") + + // TLSEstablishedKey is the attribute Key conforming to the + // "tls.established" semantic conventions. It represents the boolean flag + // indicating if the TLS negotiation was successful and transitioned to an + // encrypted tunnel. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSEstablishedKey = attribute.Key("tls.established") + + // TLSNextProtocolKey is the attribute Key conforming to the + // "tls.next_protocol" semantic conventions. It represents the string + // indicating the protocol being tunneled. Per the values in the [IANA + // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), + // this string should be lower case. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'http/1.1' + TLSNextProtocolKey = attribute.Key("tls.next_protocol") + + // TLSProtocolNameKey is the attribute Key conforming to the + // "tls.protocol.name" semantic conventions. It represents the normalized + // lowercase protocol name parsed from original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + TLSProtocolNameKey = attribute.Key("tls.protocol.name") + + // TLSProtocolVersionKey is the attribute Key conforming to the + // "tls.protocol.version" semantic conventions. It represents the numeric + // part of the version parsed from the original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2', '3' + TLSProtocolVersionKey = attribute.Key("tls.protocol.version") + + // TLSResumedKey is the attribute Key conforming to the "tls.resumed" + // semantic conventions. It represents the boolean flag indicating if this + // TLS connection was resumed from an existing TLS negotiation. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSResumedKey = attribute.Key("tls.resumed") + + // TLSServerCertificateKey is the attribute Key conforming to the + // "tls.server.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the server. This is + // usually mutually-exclusive of `server.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSServerCertificateKey = attribute.Key("tls.server.certificate") + + // TLSServerCertificateChainKey is the attribute Key conforming to the + // "tls.server.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the server. This is usually mutually-exclusive of + // `server.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") + + // TLSServerHashMd5Key is the attribute Key conforming to the + // "tls.server.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") + + // TLSServerHashSha1Key is the attribute Key conforming to the + // "tls.server.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") + + // TLSServerHashSha256Key is the attribute Key conforming to the + // "tls.server.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") + + // TLSServerIssuerKey is the attribute Key conforming to the + // "tls.server.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSServerIssuerKey = attribute.Key("tls.server.issuer") + + // TLSServerJa3sKey is the attribute Key conforming to the + // "tls.server.ja3s" semantic conventions. It represents a hash that + // identifies servers based on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSServerJa3sKey = attribute.Key("tls.server.ja3s") + + // TLSServerNotAfterKey is the attribute Key conforming to the + // "tls.server.not_after" semantic conventions. It represents the date/Time + // indicating when server certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSServerNotAfterKey = attribute.Key("tls.server.not_after") + + // TLSServerNotBeforeKey is the attribute Key conforming to the + // "tls.server.not_before" semantic conventions. It represents the + // date/Time indicating when server certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") + + // TLSServerSubjectKey is the attribute Key conforming to the + // "tls.server.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // server. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' + TLSServerSubjectKey = attribute.Key("tls.server.subject") +) + +var ( + // ssl + TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") + // tls + TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") +) + +// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" +// semantic conventions. It represents the string indicating the +// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used +// during the current connection. +func TLSCipher(val string) attribute.KeyValue { + return TLSCipherKey.String(val) +} + +// TLSClientCertificate returns an attribute KeyValue conforming to the +// "tls.client.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the client. This is usually +// mutually-exclusive of `client.certificate_chain` since this value also +// exists in that list. +func TLSClientCertificate(val string) attribute.KeyValue { + return TLSClientCertificateKey.String(val) +} + +// TLSClientCertificateChain returns an attribute KeyValue conforming to the +// "tls.client.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the client. This is usually mutually-exclusive of `client.certificate` since +// that value should be the first certificate in the chain. +func TLSClientCertificateChain(val ...string) attribute.KeyValue { + return TLSClientCertificateChainKey.StringSlice(val) +} + +// TLSClientHashMd5 returns an attribute KeyValue conforming to the +// "tls.client.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashMd5(val string) attribute.KeyValue { + return TLSClientHashMd5Key.String(val) +} + +// TLSClientHashSha1 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha1(val string) attribute.KeyValue { + return TLSClientHashSha1Key.String(val) +} + +// TLSClientHashSha256 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha256(val string) attribute.KeyValue { + return TLSClientHashSha256Key.String(val) +} + +// TLSClientIssuer returns an attribute KeyValue conforming to the +// "tls.client.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSClientIssuer(val string) attribute.KeyValue { + return TLSClientIssuerKey.String(val) +} + +// TLSClientJa3 returns an attribute KeyValue conforming to the +// "tls.client.ja3" semantic conventions. It represents a hash that identifies +// clients based on how they perform an SSL/TLS handshake. +func TLSClientJa3(val string) attribute.KeyValue { + return TLSClientJa3Key.String(val) +} + +// TLSClientNotAfter returns an attribute KeyValue conforming to the +// "tls.client.not_after" semantic conventions. It represents the date/Time +// indicating when client certificate is no longer considered valid. +func TLSClientNotAfter(val string) attribute.KeyValue { + return TLSClientNotAfterKey.String(val) +} + +// TLSClientNotBefore returns an attribute KeyValue conforming to the +// "tls.client.not_before" semantic conventions. It represents the date/Time +// indicating when client certificate is first considered valid. +func TLSClientNotBefore(val string) attribute.KeyValue { + return TLSClientNotBeforeKey.String(val) +} + +// TLSClientServerName returns an attribute KeyValue conforming to the +// "tls.client.server_name" semantic conventions. It represents the also called +// an SNI, this tells the server which hostname to which the client is +// attempting to connect to. +func TLSClientServerName(val string) attribute.KeyValue { + return TLSClientServerNameKey.String(val) +} + +// TLSClientSubject returns an attribute KeyValue conforming to the +// "tls.client.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the client. +func TLSClientSubject(val string) attribute.KeyValue { + return TLSClientSubjectKey.String(val) +} + +// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the +// "tls.client.supported_ciphers" semantic conventions. It represents the array +// of ciphers offered by the client during the client hello. +func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { + return TLSClientSupportedCiphersKey.StringSlice(val) +} + +// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" +// semantic conventions. It represents the string indicating the curve used for +// the given cipher, when applicable +func TLSCurve(val string) attribute.KeyValue { + return TLSCurveKey.String(val) +} + +// TLSEstablished returns an attribute KeyValue conforming to the +// "tls.established" semantic conventions. It represents the boolean flag +// indicating if the TLS negotiation was successful and transitioned to an +// encrypted tunnel. +func TLSEstablished(val bool) attribute.KeyValue { + return TLSEstablishedKey.Bool(val) +} + +// TLSNextProtocol returns an attribute KeyValue conforming to the +// "tls.next_protocol" semantic conventions. It represents the string +// indicating the protocol being tunneled. Per the values in the [IANA +// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), +// this string should be lower case. +func TLSNextProtocol(val string) attribute.KeyValue { + return TLSNextProtocolKey.String(val) +} + +// TLSProtocolVersion returns an attribute KeyValue conforming to the +// "tls.protocol.version" semantic conventions. It represents the numeric part +// of the version parsed from the original string of the negotiated [SSL/TLS +// protocol +// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) +func TLSProtocolVersion(val string) attribute.KeyValue { + return TLSProtocolVersionKey.String(val) +} + +// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" +// semantic conventions. It represents the boolean flag indicating if this TLS +// connection was resumed from an existing TLS negotiation. +func TLSResumed(val bool) attribute.KeyValue { + return TLSResumedKey.Bool(val) +} + +// TLSServerCertificate returns an attribute KeyValue conforming to the +// "tls.server.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the server. This is usually +// mutually-exclusive of `server.certificate_chain` since this value also +// exists in that list. +func TLSServerCertificate(val string) attribute.KeyValue { + return TLSServerCertificateKey.String(val) +} + +// TLSServerCertificateChain returns an attribute KeyValue conforming to the +// "tls.server.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the server. This is usually mutually-exclusive of `server.certificate` since +// that value should be the first certificate in the chain. +func TLSServerCertificateChain(val ...string) attribute.KeyValue { + return TLSServerCertificateChainKey.StringSlice(val) +} + +// TLSServerHashMd5 returns an attribute KeyValue conforming to the +// "tls.server.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashMd5(val string) attribute.KeyValue { + return TLSServerHashMd5Key.String(val) +} + +// TLSServerHashSha1 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha1(val string) attribute.KeyValue { + return TLSServerHashSha1Key.String(val) +} + +// TLSServerHashSha256 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha256(val string) attribute.KeyValue { + return TLSServerHashSha256Key.String(val) +} + +// TLSServerIssuer returns an attribute KeyValue conforming to the +// "tls.server.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSServerIssuer(val string) attribute.KeyValue { + return TLSServerIssuerKey.String(val) +} + +// TLSServerJa3s returns an attribute KeyValue conforming to the +// "tls.server.ja3s" semantic conventions. It represents a hash that identifies +// servers based on how they perform an SSL/TLS handshake. +func TLSServerJa3s(val string) attribute.KeyValue { + return TLSServerJa3sKey.String(val) +} + +// TLSServerNotAfter returns an attribute KeyValue conforming to the +// "tls.server.not_after" semantic conventions. It represents the date/Time +// indicating when server certificate is no longer considered valid. +func TLSServerNotAfter(val string) attribute.KeyValue { + return TLSServerNotAfterKey.String(val) +} + +// TLSServerNotBefore returns an attribute KeyValue conforming to the +// "tls.server.not_before" semantic conventions. It represents the date/Time +// indicating when server certificate is first considered valid. +func TLSServerNotBefore(val string) attribute.KeyValue { + return TLSServerNotBeforeKey.String(val) +} + +// TLSServerSubject returns an attribute KeyValue conforming to the +// "tls.server.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the server. +func TLSServerSubject(val string) attribute.KeyValue { + return TLSServerSubjectKey.String(val) +} + +// Attributes describing URL. +const ( + // URLDomainKey is the attribute Key conforming to the "url.domain" + // semantic conventions. It represents the domain extracted from the + // `url.full`, such as "opentelemetry.io". + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2', + // '[1080:0:0:0:8:800:200C:417A]' + // Note: In some cases a URL may refer to an IP and/or port directly, + // without a domain name. In this case, the IP address would go to the + // domain field. If the URL contains a [literal IPv6 + // address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by + // `[` and `]`, the `[` and `]` characters should also be captured in the + // domain field. + URLDomainKey = attribute.Key("url.domain") + + // URLExtensionKey is the attribute Key conforming to the "url.extension" + // semantic conventions. It represents the file extension extracted from + // the `url.full`, excluding the leading dot. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: The file extension is only set if it exists, as not every url has + // a file extension. When the file name has multiple extensions + // `example.tar.gz`, only the last one should be captured `gz`, not + // `tar.gz`. + URLExtensionKey = attribute.Key("url.extension") + + // URLFragmentKey is the attribute Key conforming to the "url.fragment" + // semantic conventions. It represents the [URI + // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'SemConv' + URLFragmentKey = attribute.Key("url.fragment") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network + // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // '//localhost' + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the + // fragment is not transmitted over HTTP, but if it is known, it SHOULD be + // included nevertheless. + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case username and + // password SHOULD be redacted and attribute's value SHOULD be + // `https://REDACTED:REDACTED@www.example.com/`. + // `url.full` SHOULD capture the absolute URL when it is available (or can + // be reconstructed). Sensitive content provided in `url.full` SHOULD be + // scrubbed when instrumentations can identify it. + URLFullKey = attribute.Key("url.full") + + // URLOriginalKey is the attribute Key conforming to the "url.original" + // semantic conventions. It represents the unmodified original URL as seen + // in the event source. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // 'search?q=OpenTelemetry' + // Note: In network monitoring, the observed URL may be a full URL, whereas + // in access logs, the URL is often just represented as a path. This field + // is meant to represent the URL as it was observed, complete or not. + // `url.original` might contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case password and + // username SHOULD NOT be redacted and attribute's value SHOULD remain the + // same. + URLOriginalKey = attribute.Key("url.original") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI + // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/search' + // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when + // instrumentations can identify it. + URLPathKey = attribute.Key("url.path") + + // URLPortKey is the attribute Key conforming to the "url.port" semantic + // conventions. It represents the port extracted from the `url.full` + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 443 + URLPortKey = attribute.Key("url.port") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI + // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'q=OpenTelemetry' + // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when + // instrumentations can identify it. + URLQueryKey = attribute.Key("url.query") + + // URLRegisteredDomainKey is the attribute Key conforming to the + // "url.registered_domain" semantic conventions. It represents the highest + // registered url domain, stripped of the subdomain. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'example.com', 'foo.co.uk' + // Note: This value can be determined precisely with the [public suffix + // list](http://publicsuffix.org). For example, the registered domain for + // `foo.example.com` is `example.com`. Trying to approximate this by simply + // taking the last two labels will not work well for TLDs such as `co.uk`. + URLRegisteredDomainKey = attribute.Key("url.registered_domain") + + // URLSchemeKey is the attribute Key conforming to the "url.scheme" + // semantic conventions. It represents the [URI + // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component + // identifying the used protocol. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https', 'ftp', 'telnet' + URLSchemeKey = attribute.Key("url.scheme") + + // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" + // semantic conventions. It represents the subdomain portion of a fully + // qualified domain name includes all of the names except the host name + // under the registered_domain. In a partially qualified domain, or if the + // qualification level of the full name cannot be determined, subdomain + // contains all of the names below the registered domain. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'east', 'sub2.sub1' + // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If + // the domain has multiple levels of subdomain, such as + // `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`, + // with no trailing period. + URLSubdomainKey = attribute.Key("url.subdomain") + + // URLTemplateKey is the attribute Key conforming to the "url.template" + // semantic conventions. It represents the low-cardinality template of an + // [absolute path + // reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/users/{id}', '/users/:id', '/users?id={id}' + URLTemplateKey = attribute.Key("url.template") + + // URLTopLevelDomainKey is the attribute Key conforming to the + // "url.top_level_domain" semantic conventions. It represents the effective + // top level domain (eTLD), also known as the domain suffix, is the last + // part of the domain name. For example, the top level domain for + // example.com is `com`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com', 'co.uk' + // Note: This value can be determined precisely with the [public suffix + // list](http://publicsuffix.org). + URLTopLevelDomainKey = attribute.Key("url.top_level_domain") +) + +// URLDomain returns an attribute KeyValue conforming to the "url.domain" +// semantic conventions. It represents the domain extracted from the +// `url.full`, such as "opentelemetry.io". +func URLDomain(val string) attribute.KeyValue { + return URLDomainKey.String(val) +} + +// URLExtension returns an attribute KeyValue conforming to the +// "url.extension" semantic conventions. It represents the file extension +// extracted from the `url.full`, excluding the leading dot. +func URLExtension(val string) attribute.KeyValue { + return URLExtensionKey.String(val) +} + +// URLFragment returns an attribute KeyValue conforming to the +// "url.fragment" semantic conventions. It represents the [URI +// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" +// semantic conventions. It represents the absolute URL describing a network +// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLOriginal returns an attribute KeyValue conforming to the +// "url.original" semantic conventions. It represents the unmodified original +// URL as seen in the event source. +func URLOriginal(val string) attribute.KeyValue { + return URLOriginalKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" +// semantic conventions. It represents the [URI +// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLPort returns an attribute KeyValue conforming to the "url.port" +// semantic conventions. It represents the port extracted from the `url.full` +func URLPort(val int) attribute.KeyValue { + return URLPortKey.Int(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" +// semantic conventions. It represents the [URI +// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLRegisteredDomain returns an attribute KeyValue conforming to the +// "url.registered_domain" semantic conventions. It represents the highest +// registered url domain, stripped of the subdomain. +func URLRegisteredDomain(val string) attribute.KeyValue { + return URLRegisteredDomainKey.String(val) +} + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI +// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component +// identifying the used protocol. +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// URLSubdomain returns an attribute KeyValue conforming to the +// "url.subdomain" semantic conventions. It represents the subdomain portion of +// a fully qualified domain name includes all of the names except the host name +// under the registered_domain. In a partially qualified domain, or if the +// qualification level of the full name cannot be determined, subdomain +// contains all of the names below the registered domain. +func URLSubdomain(val string) attribute.KeyValue { + return URLSubdomainKey.String(val) +} + +// URLTemplate returns an attribute KeyValue conforming to the +// "url.template" semantic conventions. It represents the low-cardinality +// template of an [absolute path +// reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). +func URLTemplate(val string) attribute.KeyValue { + return URLTemplateKey.String(val) +} + +// URLTopLevelDomain returns an attribute KeyValue conforming to the +// "url.top_level_domain" semantic conventions. It represents the effective top +// level domain (eTLD), also known as the domain suffix, is the last part of +// the domain name. For example, the top level domain for example.com is `com`. +func URLTopLevelDomain(val string) attribute.KeyValue { + return URLTopLevelDomainKey.String(val) +} + +// Describes user-agent attributes. +const ( + // UserAgentNameKey is the attribute Key conforming to the + // "user_agent.name" semantic conventions. It represents the name of the + // user-agent extracted from original. Usually refers to the browser's + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Safari', 'YourApp' + // Note: [Example](https://www.whatsmyua.info) of extracting browser's name + // from original string. In the case of using a user-agent for non-browser + // products, such as microservices with multiple names/versions inside the + // `user_agent.original`, the most significant name SHOULD be selected. In + // such a scenario it should align with `user_agent.version` + UserAgentNameKey = attribute.Key("user_agent.name") + + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of + // the [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0 + // grpc-java-okhttp/1.27.2' + UserAgentOriginalKey = attribute.Key("user_agent.original") + + // UserAgentVersionKey is the attribute Key conforming to the + // "user_agent.version" semantic conventions. It represents the version of + // the user-agent extracted from original. Usually refers to the browser's + // version + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.1.2', '1.0.0' + // Note: [Example](https://www.whatsmyua.info) of extracting browser's + // version from original string. In the case of using a user-agent for + // non-browser products, such as microservices with multiple names/versions + // inside the `user_agent.original`, the most significant version SHOULD be + // selected. In such a scenario it should align with `user_agent.name` + UserAgentVersionKey = attribute.Key("user_agent.version") +) + +// UserAgentName returns an attribute KeyValue conforming to the +// "user_agent.name" semantic conventions. It represents the name of the +// user-agent extracted from original. Usually refers to the browser's name. +func UserAgentName(val string) attribute.KeyValue { + return UserAgentNameKey.String(val) +} + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} + +// UserAgentVersion returns an attribute KeyValue conforming to the +// "user_agent.version" semantic conventions. It represents the version of the +// user-agent extracted from original. Usually refers to the browser's version +func UserAgentVersion(val string) attribute.KeyValue { + return UserAgentVersionKey.String(val) +} + +// The attributes used to describe the packaged software running the +// application code. +const ( + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") + + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") +) + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go new file mode 100644 index 000000000..d031bbea7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.26.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go new file mode 100644 index 000000000..bfaee0d56 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go new file mode 100644 index 000000000..fcdb9f485 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go @@ -0,0 +1,1307 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +const ( + + // ContainerCPUTime is the metric conforming to the "container.cpu.time" + // semantic conventions. It represents the total CPU time consumed. + // Instrument: counter + // Unit: s + // Stability: Experimental + ContainerCPUTimeName = "container.cpu.time" + ContainerCPUTimeUnit = "s" + ContainerCPUTimeDescription = "Total CPU time consumed" + + // ContainerMemoryUsage is the metric conforming to the + // "container.memory.usage" semantic conventions. It represents the memory + // usage of the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerMemoryUsageName = "container.memory.usage" + ContainerMemoryUsageUnit = "By" + ContainerMemoryUsageDescription = "Memory usage of the container." + + // ContainerDiskIo is the metric conforming to the "container.disk.io" semantic + // conventions. It represents the disk bytes for the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerDiskIoName = "container.disk.io" + ContainerDiskIoUnit = "By" + ContainerDiskIoDescription = "Disk bytes for the container." + + // ContainerNetworkIo is the metric conforming to the "container.network.io" + // semantic conventions. It represents the network bytes for the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerNetworkIoName = "container.network.io" + ContainerNetworkIoUnit = "By" + ContainerNetworkIoDescription = "Network bytes for the container." + + // DBClientOperationDuration is the metric conforming to the + // "db.client.operation.duration" semantic conventions. It represents the + // duration of database client operations. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientOperationDurationName = "db.client.operation.duration" + DBClientOperationDurationUnit = "s" + DBClientOperationDurationDescription = "Duration of database client operations." + + // DBClientConnectionCount is the metric conforming to the + // "db.client.connection.count" semantic conventions. It represents the number + // of connections that are currently in state described by the `state` + // attribute. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionCountName = "db.client.connection.count" + DBClientConnectionCountUnit = "{connection}" + DBClientConnectionCountDescription = "The number of connections that are currently in state described by the `state` attribute" + + // DBClientConnectionIdleMax is the metric conforming to the + // "db.client.connection.idle.max" semantic conventions. It represents the + // maximum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionIdleMaxName = "db.client.connection.idle.max" + DBClientConnectionIdleMaxUnit = "{connection}" + DBClientConnectionIdleMaxDescription = "The maximum number of idle open connections allowed" + + // DBClientConnectionIdleMin is the metric conforming to the + // "db.client.connection.idle.min" semantic conventions. It represents the + // minimum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionIdleMinName = "db.client.connection.idle.min" + DBClientConnectionIdleMinUnit = "{connection}" + DBClientConnectionIdleMinDescription = "The minimum number of idle open connections allowed" + + // DBClientConnectionMax is the metric conforming to the + // "db.client.connection.max" semantic conventions. It represents the maximum + // number of open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionMaxName = "db.client.connection.max" + DBClientConnectionMaxUnit = "{connection}" + DBClientConnectionMaxDescription = "The maximum number of open connections allowed" + + // DBClientConnectionPendingRequests is the metric conforming to the + // "db.client.connection.pending_requests" semantic conventions. It represents + // the number of pending requests for an open connection, cumulative for the + // entire pool. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + DBClientConnectionPendingRequestsName = "db.client.connection.pending_requests" + DBClientConnectionPendingRequestsUnit = "{request}" + DBClientConnectionPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" + + // DBClientConnectionTimeouts is the metric conforming to the + // "db.client.connection.timeouts" semantic conventions. It represents the + // number of connection timeouts that have occurred trying to obtain a + // connection from the pool. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + DBClientConnectionTimeoutsName = "db.client.connection.timeouts" + DBClientConnectionTimeoutsUnit = "{timeout}" + DBClientConnectionTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" + + // DBClientConnectionCreateTime is the metric conforming to the + // "db.client.connection.create_time" semantic conventions. It represents the + // time it took to create a new connection. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionCreateTimeName = "db.client.connection.create_time" + DBClientConnectionCreateTimeUnit = "s" + DBClientConnectionCreateTimeDescription = "The time it took to create a new connection" + + // DBClientConnectionWaitTime is the metric conforming to the + // "db.client.connection.wait_time" semantic conventions. It represents the + // time it took to obtain an open connection from the pool. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionWaitTimeName = "db.client.connection.wait_time" + DBClientConnectionWaitTimeUnit = "s" + DBClientConnectionWaitTimeDescription = "The time it took to obtain an open connection from the pool" + + // DBClientConnectionUseTime is the metric conforming to the + // "db.client.connection.use_time" semantic conventions. It represents the time + // between borrowing a connection and returning it to the pool. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionUseTimeName = "db.client.connection.use_time" + DBClientConnectionUseTimeUnit = "s" + DBClientConnectionUseTimeDescription = "The time between borrowing a connection and returning it to the pool" + + // DBClientConnectionsUsage is the metric conforming to the + // "db.client.connections.usage" semantic conventions. It represents the + // deprecated, use `db.client.connection.count` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsUsageName = "db.client.connections.usage" + DBClientConnectionsUsageUnit = "{connection}" + DBClientConnectionsUsageDescription = "Deprecated, use `db.client.connection.count` instead." + + // DBClientConnectionsIdleMax is the metric conforming to the + // "db.client.connections.idle.max" semantic conventions. It represents the + // deprecated, use `db.client.connection.idle.max` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMaxName = "db.client.connections.idle.max" + DBClientConnectionsIdleMaxUnit = "{connection}" + DBClientConnectionsIdleMaxDescription = "Deprecated, use `db.client.connection.idle.max` instead." + + // DBClientConnectionsIdleMin is the metric conforming to the + // "db.client.connections.idle.min" semantic conventions. It represents the + // deprecated, use `db.client.connection.idle.min` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMinName = "db.client.connections.idle.min" + DBClientConnectionsIdleMinUnit = "{connection}" + DBClientConnectionsIdleMinDescription = "Deprecated, use `db.client.connection.idle.min` instead." + + // DBClientConnectionsMax is the metric conforming to the + // "db.client.connections.max" semantic conventions. It represents the + // deprecated, use `db.client.connection.max` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsMaxName = "db.client.connections.max" + DBClientConnectionsMaxUnit = "{connection}" + DBClientConnectionsMaxDescription = "Deprecated, use `db.client.connection.max` instead." + + // DBClientConnectionsPendingRequests is the metric conforming to the + // "db.client.connections.pending_requests" semantic conventions. It represents + // the deprecated, use `db.client.connection.pending_requests` instead. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests" + DBClientConnectionsPendingRequestsUnit = "{request}" + DBClientConnectionsPendingRequestsDescription = "Deprecated, use `db.client.connection.pending_requests` instead." + + // DBClientConnectionsTimeouts is the metric conforming to the + // "db.client.connections.timeouts" semantic conventions. It represents the + // deprecated, use `db.client.connection.timeouts` instead. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + DBClientConnectionsTimeoutsName = "db.client.connections.timeouts" + DBClientConnectionsTimeoutsUnit = "{timeout}" + DBClientConnectionsTimeoutsDescription = "Deprecated, use `db.client.connection.timeouts` instead." + + // DBClientConnectionsCreateTime is the metric conforming to the + // "db.client.connections.create_time" semantic conventions. It represents the + // deprecated, use `db.client.connection.create_time` instead. Note: the unit + // also changed from `ms` to `s`. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsCreateTimeName = "db.client.connections.create_time" + DBClientConnectionsCreateTimeUnit = "ms" + DBClientConnectionsCreateTimeDescription = "Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`." + + // DBClientConnectionsWaitTime is the metric conforming to the + // "db.client.connections.wait_time" semantic conventions. It represents the + // deprecated, use `db.client.connection.wait_time` instead. Note: the unit + // also changed from `ms` to `s`. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsWaitTimeName = "db.client.connections.wait_time" + DBClientConnectionsWaitTimeUnit = "ms" + DBClientConnectionsWaitTimeDescription = "Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`." + + // DBClientConnectionsUseTime is the metric conforming to the + // "db.client.connections.use_time" semantic conventions. It represents the + // deprecated, use `db.client.connection.use_time` instead. Note: the unit also + // changed from `ms` to `s`. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsUseTimeName = "db.client.connections.use_time" + DBClientConnectionsUseTimeUnit = "ms" + DBClientConnectionsUseTimeDescription = "Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`." + + // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" + // semantic conventions. It represents the measures the time taken to perform a + // DNS lookup. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DNSLookupDurationName = "dns.lookup.duration" + DNSLookupDurationUnit = "s" + DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." + + // AspnetcoreRoutingMatchAttempts is the metric conforming to the + // "aspnetcore.routing.match_attempts" semantic conventions. It represents the + // number of requests that were attempted to be matched to an endpoint. + // Instrument: counter + // Unit: {match_attempt} + // Stability: Stable + AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts" + AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}" + AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint." + + // AspnetcoreDiagnosticsExceptions is the metric conforming to the + // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the + // number of exceptions caught by exception handling middleware. + // Instrument: counter + // Unit: {exception} + // Stability: Stable + AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions" + AspnetcoreDiagnosticsExceptionsUnit = "{exception}" + AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware." + + // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the + // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It + // represents the number of requests that are currently active on the server + // that hold a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Stable + AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases" + AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}" + AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease." + + // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the + // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It + // represents the duration of rate limiting lease held by requests on the + // server. + // Instrument: histogram + // Unit: s + // Stability: Stable + AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration" + AspnetcoreRateLimitingRequestLeaseDurationUnit = "s" + AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server." + + // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the + // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It + // represents the time the request spent in a queue waiting to acquire a rate + // limiting lease. + // Instrument: histogram + // Unit: s + // Stability: Stable + AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue" + AspnetcoreRateLimitingRequestTimeInQueueUnit = "s" + AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the + // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It + // represents the number of requests that are currently queued, waiting to + // acquire a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Stable + AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests" + AspnetcoreRateLimitingQueuedRequestsUnit = "{request}" + AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingRequests is the metric conforming to the + // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the + // number of requests that tried to acquire a rate limiting lease. + // Instrument: counter + // Unit: {request} + // Stability: Stable + AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests" + AspnetcoreRateLimitingRequestsUnit = "{request}" + AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease." + + // KestrelActiveConnections is the metric conforming to the + // "kestrel.active_connections" semantic conventions. It represents the number + // of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + KestrelActiveConnectionsName = "kestrel.active_connections" + KestrelActiveConnectionsUnit = "{connection}" + KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // KestrelConnectionDuration is the metric conforming to the + // "kestrel.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Stable + KestrelConnectionDurationName = "kestrel.connection.duration" + KestrelConnectionDurationUnit = "s" + KestrelConnectionDurationDescription = "The duration of connections on the server." + + // KestrelRejectedConnections is the metric conforming to the + // "kestrel.rejected_connections" semantic conventions. It represents the + // number of connections rejected by the server. + // Instrument: counter + // Unit: {connection} + // Stability: Stable + KestrelRejectedConnectionsName = "kestrel.rejected_connections" + KestrelRejectedConnectionsUnit = "{connection}" + KestrelRejectedConnectionsDescription = "Number of connections rejected by the server." + + // KestrelQueuedConnections is the metric conforming to the + // "kestrel.queued_connections" semantic conventions. It represents the number + // of connections that are currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + KestrelQueuedConnectionsName = "kestrel.queued_connections" + KestrelQueuedConnectionsUnit = "{connection}" + KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start." + + // KestrelQueuedRequests is the metric conforming to the + // "kestrel.queued_requests" semantic conventions. It represents the number of + // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are + // currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {request} + // Stability: Stable + KestrelQueuedRequestsName = "kestrel.queued_requests" + KestrelQueuedRequestsUnit = "{request}" + KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start." + + // KestrelUpgradedConnections is the metric conforming to the + // "kestrel.upgraded_connections" semantic conventions. It represents the + // number of connections that are currently upgraded (WebSockets). . + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + KestrelUpgradedConnectionsName = "kestrel.upgraded_connections" + KestrelUpgradedConnectionsUnit = "{connection}" + KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ." + + // KestrelTLSHandshakeDuration is the metric conforming to the + // "kestrel.tls_handshake.duration" semantic conventions. It represents the + // duration of TLS handshakes on the server. + // Instrument: histogram + // Unit: s + // Stability: Stable + KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration" + KestrelTLSHandshakeDurationUnit = "s" + KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server." + + // KestrelActiveTLSHandshakes is the metric conforming to the + // "kestrel.active_tls_handshakes" semantic conventions. It represents the + // number of TLS handshakes that are currently in progress on the server. + // Instrument: updowncounter + // Unit: {handshake} + // Stability: Stable + KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes" + KestrelActiveTLSHandshakesUnit = "{handshake}" + KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server." + + // SignalrServerConnectionDuration is the metric conforming to the + // "signalr.server.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Stable + SignalrServerConnectionDurationName = "signalr.server.connection.duration" + SignalrServerConnectionDurationUnit = "s" + SignalrServerConnectionDurationDescription = "The duration of connections on the server." + + // SignalrServerActiveConnections is the metric conforming to the + // "signalr.server.active_connections" semantic conventions. It represents the + // number of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + SignalrServerActiveConnectionsName = "signalr.server.active_connections" + SignalrServerActiveConnectionsUnit = "{connection}" + SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration" + // semantic conventions. It represents the measures the duration of the + // function's logic execution. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInvokeDurationName = "faas.invoke_duration" + FaaSInvokeDurationUnit = "s" + FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution" + + // FaaSInitDuration is the metric conforming to the "faas.init_duration" + // semantic conventions. It represents the measures the duration of the + // function's initialization, such as a cold start. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInitDurationName = "faas.init_duration" + FaaSInitDurationUnit = "s" + FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start" + + // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic + // conventions. It represents the number of invocation cold starts. + // Instrument: counter + // Unit: {coldstart} + // Stability: Experimental + FaaSColdstartsName = "faas.coldstarts" + FaaSColdstartsUnit = "{coldstart}" + FaaSColdstartsDescription = "Number of invocation cold starts" + + // FaaSErrors is the metric conforming to the "faas.errors" semantic + // conventions. It represents the number of invocation errors. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + FaaSErrorsName = "faas.errors" + FaaSErrorsUnit = "{error}" + FaaSErrorsDescription = "Number of invocation errors" + + // FaaSInvocations is the metric conforming to the "faas.invocations" semantic + // conventions. It represents the number of successful invocations. + // Instrument: counter + // Unit: {invocation} + // Stability: Experimental + FaaSInvocationsName = "faas.invocations" + FaaSInvocationsUnit = "{invocation}" + FaaSInvocationsDescription = "Number of successful invocations" + + // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic + // conventions. It represents the number of invocation timeouts. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + FaaSTimeoutsName = "faas.timeouts" + FaaSTimeoutsUnit = "{timeout}" + FaaSTimeoutsDescription = "Number of invocation timeouts" + + // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic + // conventions. It represents the distribution of max memory usage per + // invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSMemUsageName = "faas.mem_usage" + FaaSMemUsageUnit = "By" + FaaSMemUsageDescription = "Distribution of max memory usage per invocation" + + // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic + // conventions. It represents the distribution of CPU usage per invocation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSCPUUsageName = "faas.cpu_usage" + FaaSCPUUsageUnit = "s" + FaaSCPUUsageDescription = "Distribution of CPU usage per invocation" + + // FaaSNetIo is the metric conforming to the "faas.net_io" semantic + // conventions. It represents the distribution of net I/O usage per invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSNetIoName = "faas.net_io" + FaaSNetIoUnit = "By" + FaaSNetIoDescription = "Distribution of net I/O usage per invocation" + + // HTTPServerRequestDuration is the metric conforming to the + // "http.server.request.duration" semantic conventions. It represents the + // duration of HTTP server requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPServerRequestDurationName = "http.server.request.duration" + HTTPServerRequestDurationUnit = "s" + HTTPServerRequestDurationDescription = "Duration of HTTP server requests." + + // HTTPServerActiveRequests is the metric conforming to the + // "http.server.active_requests" semantic conventions. It represents the number + // of active HTTP server requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPServerActiveRequestsName = "http.server.active_requests" + HTTPServerActiveRequestsUnit = "{request}" + HTTPServerActiveRequestsDescription = "Number of active HTTP server requests." + + // HTTPServerRequestBodySize is the metric conforming to the + // "http.server.request.body.size" semantic conventions. It represents the size + // of HTTP server request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerRequestBodySizeName = "http.server.request.body.size" + HTTPServerRequestBodySizeUnit = "By" + HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies." + + // HTTPServerResponseBodySize is the metric conforming to the + // "http.server.response.body.size" semantic conventions. It represents the + // size of HTTP server response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerResponseBodySizeName = "http.server.response.body.size" + HTTPServerResponseBodySizeUnit = "By" + HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies." + + // HTTPClientRequestDuration is the metric conforming to the + // "http.client.request.duration" semantic conventions. It represents the + // duration of HTTP client requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPClientRequestDurationName = "http.client.request.duration" + HTTPClientRequestDurationUnit = "s" + HTTPClientRequestDurationDescription = "Duration of HTTP client requests." + + // HTTPClientRequestBodySize is the metric conforming to the + // "http.client.request.body.size" semantic conventions. It represents the size + // of HTTP client request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientRequestBodySizeName = "http.client.request.body.size" + HTTPClientRequestBodySizeUnit = "By" + HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies." + + // HTTPClientResponseBodySize is the metric conforming to the + // "http.client.response.body.size" semantic conventions. It represents the + // size of HTTP client response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientResponseBodySizeName = "http.client.response.body.size" + HTTPClientResponseBodySizeUnit = "By" + HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies." + + // HTTPClientOpenConnections is the metric conforming to the + // "http.client.open_connections" semantic conventions. It represents the + // number of outbound HTTP connections that are currently active or idle on the + // client. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + HTTPClientOpenConnectionsName = "http.client.open_connections" + HTTPClientOpenConnectionsUnit = "{connection}" + HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." + + // HTTPClientConnectionDuration is the metric conforming to the + // "http.client.connection.duration" semantic conventions. It represents the + // duration of the successfully established outbound HTTP connections. + // Instrument: histogram + // Unit: s + // Stability: Experimental + HTTPClientConnectionDurationName = "http.client.connection.duration" + HTTPClientConnectionDurationUnit = "s" + HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." + + // HTTPClientActiveRequests is the metric conforming to the + // "http.client.active_requests" semantic conventions. It represents the number + // of active HTTP requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPClientActiveRequestsName = "http.client.active_requests" + HTTPClientActiveRequestsUnit = "{request}" + HTTPClientActiveRequestsDescription = "Number of active HTTP requests." + + // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic + // conventions. It represents the measure of initial memory requested. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmMemoryInitName = "jvm.memory.init" + JvmMemoryInitUnit = "By" + JvmMemoryInitDescription = "Measure of initial memory requested." + + // JvmSystemCPUUtilization is the metric conforming to the + // "jvm.system.cpu.utilization" semantic conventions. It represents the recent + // CPU utilization for the whole system as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization" + JvmSystemCPUUtilizationUnit = "1" + JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM." + + // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m" + // semantic conventions. It represents the average CPU load of the whole system + // for the last minute as reported by the JVM. + // Instrument: gauge + // Unit: {run_queue_item} + // Stability: Experimental + JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m" + JvmSystemCPULoad1mUnit = "{run_queue_item}" + JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM." + + // JvmBufferMemoryUsage is the metric conforming to the + // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of + // memory used by buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryUsageName = "jvm.buffer.memory.usage" + JvmBufferMemoryUsageUnit = "By" + JvmBufferMemoryUsageDescription = "Measure of memory used by buffers." + + // JvmBufferMemoryLimit is the metric conforming to the + // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of + // total memory capacity of buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryLimitName = "jvm.buffer.memory.limit" + JvmBufferMemoryLimitUnit = "By" + JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers." + + // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic + // conventions. It represents the number of buffers in the pool. + // Instrument: updowncounter + // Unit: {buffer} + // Stability: Experimental + JvmBufferCountName = "jvm.buffer.count" + JvmBufferCountUnit = "{buffer}" + JvmBufferCountDescription = "Number of buffers in the pool." + + // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic + // conventions. It represents the measure of memory used. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedName = "jvm.memory.used" + JvmMemoryUsedUnit = "By" + JvmMemoryUsedDescription = "Measure of memory used." + + // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed" + // semantic conventions. It represents the measure of memory committed. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryCommittedName = "jvm.memory.committed" + JvmMemoryCommittedUnit = "By" + JvmMemoryCommittedDescription = "Measure of memory committed." + + // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic + // conventions. It represents the measure of max obtainable memory. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryLimitName = "jvm.memory.limit" + JvmMemoryLimitUnit = "By" + JvmMemoryLimitDescription = "Measure of max obtainable memory." + + // JvmMemoryUsedAfterLastGc is the metric conforming to the + // "jvm.memory.used_after_last_gc" semantic conventions. It represents the + // measure of memory used, as measured after the most recent garbage collection + // event on this pool. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc" + JvmMemoryUsedAfterLastGcUnit = "By" + JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool." + + // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic + // conventions. It represents the duration of JVM garbage collection actions. + // Instrument: histogram + // Unit: s + // Stability: Stable + JvmGcDurationName = "jvm.gc.duration" + JvmGcDurationUnit = "s" + JvmGcDurationDescription = "Duration of JVM garbage collection actions." + + // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic + // conventions. It represents the number of executing platform threads. + // Instrument: updowncounter + // Unit: {thread} + // Stability: Stable + JvmThreadCountName = "jvm.thread.count" + JvmThreadCountUnit = "{thread}" + JvmThreadCountDescription = "Number of executing platform threads." + + // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic + // conventions. It represents the number of classes loaded since JVM start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassLoadedName = "jvm.class.loaded" + JvmClassLoadedUnit = "{class}" + JvmClassLoadedDescription = "Number of classes loaded since JVM start." + + // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded" + // semantic conventions. It represents the number of classes unloaded since JVM + // start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassUnloadedName = "jvm.class.unloaded" + JvmClassUnloadedUnit = "{class}" + JvmClassUnloadedDescription = "Number of classes unloaded since JVM start." + + // JvmClassCount is the metric conforming to the "jvm.class.count" semantic + // conventions. It represents the number of classes currently loaded. + // Instrument: updowncounter + // Unit: {class} + // Stability: Stable + JvmClassCountName = "jvm.class.count" + JvmClassCountUnit = "{class}" + JvmClassCountDescription = "Number of classes currently loaded." + + // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic + // conventions. It represents the number of processors available to the Java + // virtual machine. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Stable + JvmCPUCountName = "jvm.cpu.count" + JvmCPUCountUnit = "{cpu}" + JvmCPUCountDescription = "Number of processors available to the Java virtual machine." + + // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic + // conventions. It represents the cPU time used by the process as reported by + // the JVM. + // Instrument: counter + // Unit: s + // Stability: Stable + JvmCPUTimeName = "jvm.cpu.time" + JvmCPUTimeUnit = "s" + JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM." + + // JvmCPURecentUtilization is the metric conforming to the + // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent + // CPU utilization for the process as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Stable + JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization" + JvmCPURecentUtilizationUnit = "1" + JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM." + + // MessagingPublishDuration is the metric conforming to the + // "messaging.publish.duration" semantic conventions. It represents the + // measures the duration of publish operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingPublishDurationName = "messaging.publish.duration" + MessagingPublishDurationUnit = "s" + MessagingPublishDurationDescription = "Measures the duration of publish operation." + + // MessagingReceiveDuration is the metric conforming to the + // "messaging.receive.duration" semantic conventions. It represents the + // measures the duration of receive operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingReceiveDurationName = "messaging.receive.duration" + MessagingReceiveDurationUnit = "s" + MessagingReceiveDurationDescription = "Measures the duration of receive operation." + + // MessagingProcessDuration is the metric conforming to the + // "messaging.process.duration" semantic conventions. It represents the + // measures the duration of process operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingProcessDurationName = "messaging.process.duration" + MessagingProcessDurationUnit = "s" + MessagingProcessDurationDescription = "Measures the duration of process operation." + + // MessagingPublishMessages is the metric conforming to the + // "messaging.publish.messages" semantic conventions. It represents the + // measures the number of published messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingPublishMessagesName = "messaging.publish.messages" + MessagingPublishMessagesUnit = "{message}" + MessagingPublishMessagesDescription = "Measures the number of published messages." + + // MessagingReceiveMessages is the metric conforming to the + // "messaging.receive.messages" semantic conventions. It represents the + // measures the number of received messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingReceiveMessagesName = "messaging.receive.messages" + MessagingReceiveMessagesUnit = "{message}" + MessagingReceiveMessagesDescription = "Measures the number of received messages." + + // MessagingProcessMessages is the metric conforming to the + // "messaging.process.messages" semantic conventions. It represents the + // measures the number of processed messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingProcessMessagesName = "messaging.process.messages" + MessagingProcessMessagesUnit = "{message}" + MessagingProcessMessagesDescription = "Measures the number of processed messages." + + // ProcessCPUTime is the metric conforming to the "process.cpu.time" semantic + // conventions. It represents the total CPU seconds broken down by different + // states. + // Instrument: counter + // Unit: s + // Stability: Experimental + ProcessCPUTimeName = "process.cpu.time" + ProcessCPUTimeUnit = "s" + ProcessCPUTimeDescription = "Total CPU seconds broken down by different states." + + // ProcessCPUUtilization is the metric conforming to the + // "process.cpu.utilization" semantic conventions. It represents the difference + // in process.cpu.time since the last measurement, divided by the elapsed time + // and number of CPUs available to the process. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + ProcessCPUUtilizationName = "process.cpu.utilization" + ProcessCPUUtilizationUnit = "1" + ProcessCPUUtilizationDescription = "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process." + + // ProcessMemoryUsage is the metric conforming to the "process.memory.usage" + // semantic conventions. It represents the amount of physical memory in use. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + ProcessMemoryUsageName = "process.memory.usage" + ProcessMemoryUsageUnit = "By" + ProcessMemoryUsageDescription = "The amount of physical memory in use." + + // ProcessMemoryVirtual is the metric conforming to the + // "process.memory.virtual" semantic conventions. It represents the amount of + // committed virtual memory. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + ProcessMemoryVirtualName = "process.memory.virtual" + ProcessMemoryVirtualUnit = "By" + ProcessMemoryVirtualDescription = "The amount of committed virtual memory." + + // ProcessDiskIo is the metric conforming to the "process.disk.io" semantic + // conventions. It represents the disk bytes transferred. + // Instrument: counter + // Unit: By + // Stability: Experimental + ProcessDiskIoName = "process.disk.io" + ProcessDiskIoUnit = "By" + ProcessDiskIoDescription = "Disk bytes transferred." + + // ProcessNetworkIo is the metric conforming to the "process.network.io" + // semantic conventions. It represents the network bytes transferred. + // Instrument: counter + // Unit: By + // Stability: Experimental + ProcessNetworkIoName = "process.network.io" + ProcessNetworkIoUnit = "By" + ProcessNetworkIoDescription = "Network bytes transferred." + + // ProcessThreadCount is the metric conforming to the "process.thread.count" + // semantic conventions. It represents the process threads count. + // Instrument: updowncounter + // Unit: {thread} + // Stability: Experimental + ProcessThreadCountName = "process.thread.count" + ProcessThreadCountUnit = "{thread}" + ProcessThreadCountDescription = "Process threads count." + + // ProcessOpenFileDescriptorCount is the metric conforming to the + // "process.open_file_descriptor.count" semantic conventions. It represents the + // number of file descriptors in use by the process. + // Instrument: updowncounter + // Unit: {count} + // Stability: Experimental + ProcessOpenFileDescriptorCountName = "process.open_file_descriptor.count" + ProcessOpenFileDescriptorCountUnit = "{count}" + ProcessOpenFileDescriptorCountDescription = "Number of file descriptors in use by the process." + + // ProcessContextSwitches is the metric conforming to the + // "process.context_switches" semantic conventions. It represents the number of + // times the process has been context switched. + // Instrument: counter + // Unit: {count} + // Stability: Experimental + ProcessContextSwitchesName = "process.context_switches" + ProcessContextSwitchesUnit = "{count}" + ProcessContextSwitchesDescription = "Number of times the process has been context switched." + + // ProcessPagingFaults is the metric conforming to the "process.paging.faults" + // semantic conventions. It represents the number of page faults the process + // has made. + // Instrument: counter + // Unit: {fault} + // Stability: Experimental + ProcessPagingFaultsName = "process.paging.faults" + ProcessPagingFaultsUnit = "{fault}" + ProcessPagingFaultsDescription = "Number of page faults the process has made." + + // RPCServerDuration is the metric conforming to the "rpc.server.duration" + // semantic conventions. It represents the measures the duration of inbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCServerDurationName = "rpc.server.duration" + RPCServerDurationUnit = "ms" + RPCServerDurationDescription = "Measures the duration of inbound RPC." + + // RPCServerRequestSize is the metric conforming to the + // "rpc.server.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerRequestSizeName = "rpc.server.request.size" + RPCServerRequestSizeUnit = "By" + RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCServerResponseSize is the metric conforming to the + // "rpc.server.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerResponseSizeName = "rpc.server.response.size" + RPCServerResponseSizeUnit = "By" + RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCServerRequestsPerRPC is the metric conforming to the + // "rpc.server.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc" + RPCServerRequestsPerRPCUnit = "{count}" + RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCServerResponsesPerRPC is the metric conforming to the + // "rpc.server.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc" + RPCServerResponsesPerRPCUnit = "{count}" + RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // RPCClientDuration is the metric conforming to the "rpc.client.duration" + // semantic conventions. It represents the measures the duration of outbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCClientDurationName = "rpc.client.duration" + RPCClientDurationUnit = "ms" + RPCClientDurationDescription = "Measures the duration of outbound RPC." + + // RPCClientRequestSize is the metric conforming to the + // "rpc.client.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientRequestSizeName = "rpc.client.request.size" + RPCClientRequestSizeUnit = "By" + RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCClientResponseSize is the metric conforming to the + // "rpc.client.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientResponseSizeName = "rpc.client.response.size" + RPCClientResponseSizeUnit = "By" + RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCClientRequestsPerRPC is the metric conforming to the + // "rpc.client.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc" + RPCClientRequestsPerRPCUnit = "{count}" + RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCClientResponsesPerRPC is the metric conforming to the + // "rpc.client.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc" + RPCClientResponsesPerRPCUnit = "{count}" + RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic + // conventions. It represents the seconds each logical CPU spent on each mode. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemCPUTimeName = "system.cpu.time" + SystemCPUTimeUnit = "s" + SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode" + + // SystemCPUUtilization is the metric conforming to the + // "system.cpu.utilization" semantic conventions. It represents the difference + // in system.cpu.time since the last measurement, divided by the elapsed time + // and number of logical CPUs. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + SystemCPUUtilizationName = "system.cpu.utilization" + SystemCPUUtilizationUnit = "1" + SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs" + + // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency" + // semantic conventions. It represents the reports the current frequency of the + // CPU in Hz. + // Instrument: gauge + // Unit: {Hz} + // Stability: Experimental + SystemCPUFrequencyName = "system.cpu.frequency" + SystemCPUFrequencyUnit = "{Hz}" + SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz" + + // SystemCPUPhysicalCount is the metric conforming to the + // "system.cpu.physical.count" semantic conventions. It represents the reports + // the number of actual physical processor cores on the hardware. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPUPhysicalCountName = "system.cpu.physical.count" + SystemCPUPhysicalCountUnit = "{cpu}" + SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware" + + // SystemCPULogicalCount is the metric conforming to the + // "system.cpu.logical.count" semantic conventions. It represents the reports + // the number of logical (virtual) processor cores created by the operating + // system to manage multitasking. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPULogicalCountName = "system.cpu.logical.count" + SystemCPULogicalCountUnit = "{cpu}" + SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking" + + // SystemMemoryUsage is the metric conforming to the "system.memory.usage" + // semantic conventions. It represents the reports memory in use by state. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryUsageName = "system.memory.usage" + SystemMemoryUsageUnit = "By" + SystemMemoryUsageDescription = "Reports memory in use by state." + + // SystemMemoryLimit is the metric conforming to the "system.memory.limit" + // semantic conventions. It represents the total memory available in the + // system. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryLimitName = "system.memory.limit" + SystemMemoryLimitUnit = "By" + SystemMemoryLimitDescription = "Total memory available in the system." + + // SystemMemoryShared is the metric conforming to the "system.memory.shared" + // semantic conventions. It represents the shared memory used (mostly by + // tmpfs). + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemorySharedName = "system.memory.shared" + SystemMemorySharedUnit = "By" + SystemMemorySharedDescription = "Shared memory used (mostly by tmpfs)." + + // SystemMemoryUtilization is the metric conforming to the + // "system.memory.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemMemoryUtilizationName = "system.memory.utilization" + SystemMemoryUtilizationUnit = "1" + + // SystemPagingUsage is the metric conforming to the "system.paging.usage" + // semantic conventions. It represents the unix swap or windows pagefile usage. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemPagingUsageName = "system.paging.usage" + SystemPagingUsageUnit = "By" + SystemPagingUsageDescription = "Unix swap or windows pagefile usage" + + // SystemPagingUtilization is the metric conforming to the + // "system.paging.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingUtilizationName = "system.paging.utilization" + SystemPagingUtilizationUnit = "1" + + // SystemPagingFaults is the metric conforming to the "system.paging.faults" + // semantic conventions. + // Instrument: counter + // Unit: {fault} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingFaultsName = "system.paging.faults" + SystemPagingFaultsUnit = "{fault}" + + // SystemPagingOperations is the metric conforming to the + // "system.paging.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingOperationsName = "system.paging.operations" + SystemPagingOperationsUnit = "{operation}" + + // SystemDiskIo is the metric conforming to the "system.disk.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskIoName = "system.disk.io" + SystemDiskIoUnit = "By" + + // SystemDiskOperations is the metric conforming to the + // "system.disk.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskOperationsName = "system.disk.operations" + SystemDiskOperationsUnit = "{operation}" + + // SystemDiskIoTime is the metric conforming to the "system.disk.io_time" + // semantic conventions. It represents the time disk spent activated. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskIoTimeName = "system.disk.io_time" + SystemDiskIoTimeUnit = "s" + SystemDiskIoTimeDescription = "Time disk spent activated" + + // SystemDiskOperationTime is the metric conforming to the + // "system.disk.operation_time" semantic conventions. It represents the sum of + // the time each operation took to complete. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskOperationTimeName = "system.disk.operation_time" + SystemDiskOperationTimeUnit = "s" + SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete" + + // SystemDiskMerged is the metric conforming to the "system.disk.merged" + // semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskMergedName = "system.disk.merged" + SystemDiskMergedUnit = "{operation}" + + // SystemFilesystemUsage is the metric conforming to the + // "system.filesystem.usage" semantic conventions. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUsageName = "system.filesystem.usage" + SystemFilesystemUsageUnit = "By" + + // SystemFilesystemUtilization is the metric conforming to the + // "system.filesystem.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUtilizationName = "system.filesystem.utilization" + SystemFilesystemUtilizationUnit = "1" + + // SystemNetworkDropped is the metric conforming to the + // "system.network.dropped" semantic conventions. It represents the count of + // packets that are dropped or discarded even though there was no error. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + SystemNetworkDroppedName = "system.network.dropped" + SystemNetworkDroppedUnit = "{packet}" + SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error" + + // SystemNetworkPackets is the metric conforming to the + // "system.network.packets" semantic conventions. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkPacketsName = "system.network.packets" + SystemNetworkPacketsUnit = "{packet}" + + // SystemNetworkErrors is the metric conforming to the "system.network.errors" + // semantic conventions. It represents the count of network errors detected. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + SystemNetworkErrorsName = "system.network.errors" + SystemNetworkErrorsUnit = "{error}" + SystemNetworkErrorsDescription = "Count of network errors detected" + + // SystemNetworkIo is the metric conforming to the "system.network.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkIoName = "system.network.io" + SystemNetworkIoUnit = "By" + + // SystemNetworkConnections is the metric conforming to the + // "system.network.connections" semantic conventions. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkConnectionsName = "system.network.connections" + SystemNetworkConnectionsUnit = "{connection}" + + // SystemProcessCount is the metric conforming to the "system.process.count" + // semantic conventions. It represents the total number of processes in each + // state. + // Instrument: updowncounter + // Unit: {process} + // Stability: Experimental + SystemProcessCountName = "system.process.count" + SystemProcessCountUnit = "{process}" + SystemProcessCountDescription = "Total number of processes in each state" + + // SystemProcessCreated is the metric conforming to the + // "system.process.created" semantic conventions. It represents the total + // number of processes created over uptime of the host. + // Instrument: counter + // Unit: {process} + // Stability: Experimental + SystemProcessCreatedName = "system.process.created" + SystemProcessCreatedUnit = "{process}" + SystemProcessCreatedDescription = "Total number of processes created over uptime of the host" + + // SystemLinuxMemoryAvailable is the metric conforming to the + // "system.linux.memory.available" semantic conventions. It represents an + // estimate of how much memory is available for starting new applications, + // without causing swapping. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemLinuxMemoryAvailableName = "system.linux.memory.available" + SystemLinuxMemoryAvailableUnit = "By" + SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go new file mode 100644 index 000000000..4c87c7adc --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.26.0" diff --git a/vendor/go.opentelemetry.io/otel/trace.go b/vendor/go.opentelemetry.io/otel/trace.go new file mode 100644 index 000000000..6836c6547 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace.go @@ -0,0 +1,36 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/trace" +) + +// Tracer creates a named tracer that implements Tracer interface. +// If the name is an empty string then provider uses default name. +// +// This is short for GetTracerProvider().Tracer(name, opts...) +func Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + return GetTracerProvider().Tracer(name, opts...) +} + +// GetTracerProvider returns the registered global trace provider. +// If none is registered then an instance of NoopTracerProvider is returned. +// +// Use the trace provider to create a named tracer. E.g. +// +// tracer := otel.GetTracerProvider().Tracer("example.com/foo") +// +// or +// +// tracer := otel.Tracer("example.com/foo") +func GetTracerProvider() trace.TracerProvider { + return global.TracerProvider() +} + +// SetTracerProvider registers `tp` as the global trace provider. +func SetTracerProvider(tp trace.TracerProvider) { + global.SetTracerProvider(tp) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/LICENSE b/vendor/go.opentelemetry.io/otel/trace/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/trace/README.md b/vendor/go.opentelemetry.io/otel/trace/README.md new file mode 100644 index 000000000..58ccaba69 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/README.md @@ -0,0 +1,3 @@ +# Trace API + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace)](https://pkg.go.dev/go.opentelemetry.io/otel/trace) diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go new file mode 100644 index 000000000..273d58e00 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -0,0 +1,323 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// TracerConfig is a group of options for a Tracer. +type TracerConfig struct { + instrumentationVersion string + // Schema URL of the telemetry emitted by the Tracer. + schemaURL string + attrs attribute.Set +} + +// InstrumentationVersion returns the version of the library providing instrumentation. +func (t *TracerConfig) InstrumentationVersion() string { + return t.instrumentationVersion +} + +// InstrumentationAttributes returns the attributes associated with the library +// providing instrumentation. +func (t *TracerConfig) InstrumentationAttributes() attribute.Set { + return t.attrs +} + +// SchemaURL returns the Schema URL of the telemetry emitted by the Tracer. +func (t *TracerConfig) SchemaURL() string { + return t.schemaURL +} + +// NewTracerConfig applies all the options to a returned TracerConfig. +func NewTracerConfig(options ...TracerOption) TracerConfig { + var config TracerConfig + for _, option := range options { + config = option.apply(config) + } + return config +} + +// TracerOption applies an option to a TracerConfig. +type TracerOption interface { + apply(TracerConfig) TracerConfig +} + +type tracerOptionFunc func(TracerConfig) TracerConfig + +func (fn tracerOptionFunc) apply(cfg TracerConfig) TracerConfig { + return fn(cfg) +} + +// SpanConfig is a group of options for a Span. +type SpanConfig struct { + attributes []attribute.KeyValue + timestamp time.Time + links []Link + newRoot bool + spanKind SpanKind + stackTrace bool +} + +// Attributes describe the associated qualities of a Span. +func (cfg *SpanConfig) Attributes() []attribute.KeyValue { + return cfg.attributes +} + +// Timestamp is a time in a Span life-cycle. +func (cfg *SpanConfig) Timestamp() time.Time { + return cfg.timestamp +} + +// StackTrace checks whether stack trace capturing is enabled. +func (cfg *SpanConfig) StackTrace() bool { + return cfg.stackTrace +} + +// Links are the associations a Span has with other Spans. +func (cfg *SpanConfig) Links() []Link { + return cfg.links +} + +// NewRoot identifies a Span as the root Span for a new trace. This is +// commonly used when an existing trace crosses trust boundaries and the +// remote parent span context should be ignored for security. +func (cfg *SpanConfig) NewRoot() bool { + return cfg.newRoot +} + +// SpanKind is the role a Span has in a trace. +func (cfg *SpanConfig) SpanKind() SpanKind { + return cfg.spanKind +} + +// NewSpanStartConfig applies all the options to a returned SpanConfig. +// No validation is performed on the returned SpanConfig (e.g. no uniqueness +// checking or bounding of data), it is left to the SDK to perform this +// action. +func NewSpanStartConfig(options ...SpanStartOption) SpanConfig { + var c SpanConfig + for _, option := range options { + c = option.applySpanStart(c) + } + return c +} + +// NewSpanEndConfig applies all the options to a returned SpanConfig. +// No validation is performed on the returned SpanConfig (e.g. no uniqueness +// checking or bounding of data), it is left to the SDK to perform this +// action. +func NewSpanEndConfig(options ...SpanEndOption) SpanConfig { + var c SpanConfig + for _, option := range options { + c = option.applySpanEnd(c) + } + return c +} + +// SpanStartOption applies an option to a SpanConfig. These options are applicable +// only when the span is created. +type SpanStartOption interface { + applySpanStart(SpanConfig) SpanConfig +} + +type spanOptionFunc func(SpanConfig) SpanConfig + +func (fn spanOptionFunc) applySpanStart(cfg SpanConfig) SpanConfig { + return fn(cfg) +} + +// SpanEndOption applies an option to a SpanConfig. These options are +// applicable only when the span is ended. +type SpanEndOption interface { + applySpanEnd(SpanConfig) SpanConfig +} + +// EventConfig is a group of options for an Event. +type EventConfig struct { + attributes []attribute.KeyValue + timestamp time.Time + stackTrace bool +} + +// Attributes describe the associated qualities of an Event. +func (cfg *EventConfig) Attributes() []attribute.KeyValue { + return cfg.attributes +} + +// Timestamp is a time in an Event life-cycle. +func (cfg *EventConfig) Timestamp() time.Time { + return cfg.timestamp +} + +// StackTrace checks whether stack trace capturing is enabled. +func (cfg *EventConfig) StackTrace() bool { + return cfg.stackTrace +} + +// NewEventConfig applies all the EventOptions to a returned EventConfig. If no +// timestamp option is passed, the returned EventConfig will have a Timestamp +// set to the call time, otherwise no validation is performed on the returned +// EventConfig. +func NewEventConfig(options ...EventOption) EventConfig { + var c EventConfig + for _, option := range options { + c = option.applyEvent(c) + } + if c.timestamp.IsZero() { + c.timestamp = time.Now() + } + return c +} + +// EventOption applies span event options to an EventConfig. +type EventOption interface { + applyEvent(EventConfig) EventConfig +} + +// SpanOption are options that can be used at both the beginning and end of a span. +type SpanOption interface { + SpanStartOption + SpanEndOption +} + +// SpanStartEventOption are options that can be used at the start of a span, or with an event. +type SpanStartEventOption interface { + SpanStartOption + EventOption +} + +// SpanEndEventOption are options that can be used at the end of a span, or with an event. +type SpanEndEventOption interface { + SpanEndOption + EventOption +} + +type attributeOption []attribute.KeyValue + +func (o attributeOption) applySpan(c SpanConfig) SpanConfig { + c.attributes = append(c.attributes, []attribute.KeyValue(o)...) + return c +} +func (o attributeOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o attributeOption) applyEvent(c EventConfig) EventConfig { + c.attributes = append(c.attributes, []attribute.KeyValue(o)...) + return c +} + +var _ SpanStartEventOption = attributeOption{} + +// WithAttributes adds the attributes related to a span life-cycle event. +// These attributes are used to describe the work a Span represents when this +// option is provided to a Span's start or end events. Otherwise, these +// attributes provide additional information about the event being recorded +// (e.g. error, state change, processing progress, system event). +// +// If multiple of these options are passed the attributes of each successive +// option will extend the attributes instead of overwriting. There is no +// guarantee of uniqueness in the resulting attributes. +func WithAttributes(attributes ...attribute.KeyValue) SpanStartEventOption { + return attributeOption(attributes) +} + +// SpanEventOption are options that can be used with an event or a span. +type SpanEventOption interface { + SpanOption + EventOption +} + +type timestampOption time.Time + +func (o timestampOption) applySpan(c SpanConfig) SpanConfig { + c.timestamp = time.Time(o) + return c +} +func (o timestampOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o timestampOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o timestampOption) applyEvent(c EventConfig) EventConfig { + c.timestamp = time.Time(o) + return c +} + +var _ SpanEventOption = timestampOption{} + +// WithTimestamp sets the time of a Span or Event life-cycle moment (e.g. +// started, stopped, errored). +func WithTimestamp(t time.Time) SpanEventOption { + return timestampOption(t) +} + +type stackTraceOption bool + +func (o stackTraceOption) applyEvent(c EventConfig) EventConfig { + c.stackTrace = bool(o) + return c +} + +func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig { + c.stackTrace = bool(o) + return c +} +func (o stackTraceOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } + +// WithStackTrace sets the flag to capture the error with stack trace (e.g. true, false). +func WithStackTrace(b bool) SpanEndEventOption { + return stackTraceOption(b) +} + +// WithLinks adds links to a Span. The links are added to the existing Span +// links, i.e. this does not overwrite. Links with invalid span context are ignored. +func WithLinks(links ...Link) SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.links = append(cfg.links, links...) + return cfg + }) +} + +// WithNewRoot specifies that the Span should be treated as a root Span. Any +// existing parent span context will be ignored when defining the Span's trace +// identifiers. +func WithNewRoot() SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.newRoot = true + return cfg + }) +} + +// WithSpanKind sets the SpanKind of a Span. +func WithSpanKind(kind SpanKind) SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.spanKind = kind + return cfg + }) +} + +// WithInstrumentationVersion sets the instrumentation version. +func WithInstrumentationVersion(version string) TracerOption { + return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { + cfg.instrumentationVersion = version + return cfg + }) +} + +// WithInstrumentationAttributes sets the instrumentation attributes. +// +// The passed attributes will be de-duplicated. +func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption { + return tracerOptionFunc(func(config TracerConfig) TracerConfig { + config.attrs = attribute.NewSet(attr...) + return config + }) +} + +// WithSchemaURL sets the schema URL for the Tracer. +func WithSchemaURL(schemaURL string) TracerOption { + return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { + cfg.schemaURL = schemaURL + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go new file mode 100644 index 000000000..8c45a7107 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/context.go @@ -0,0 +1,50 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import "context" + +type traceContextKeyType int + +const currentSpanKey traceContextKeyType = iota + +// ContextWithSpan returns a copy of parent with span set as the current Span. +func ContextWithSpan(parent context.Context, span Span) context.Context { + return context.WithValue(parent, currentSpanKey, span) +} + +// ContextWithSpanContext returns a copy of parent with sc as the current +// Span. The Span implementation that wraps sc is non-recording and performs +// no operations other than to return sc as the SpanContext from the +// SpanContext method. +func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Context { + return ContextWithSpan(parent, nonRecordingSpan{sc: sc}) +} + +// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicitly +// as a remote SpanContext and as the current Span. The Span implementation +// that wraps rsc is non-recording and performs no operations other than to +// return rsc as the SpanContext from the SpanContext method. +func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) context.Context { + return ContextWithSpanContext(parent, rsc.WithRemote(true)) +} + +// SpanFromContext returns the current Span from ctx. +// +// If no Span is currently set in ctx an implementation of a Span that +// performs no operations is returned. +func SpanFromContext(ctx context.Context) Span { + if ctx == nil { + return noopSpanInstance + } + if span, ok := ctx.Value(currentSpanKey).(Span); ok { + return span + } + return noopSpanInstance +} + +// SpanContextFromContext returns the current Span's SpanContext. +func SpanContextFromContext(ctx context.Context) SpanContext { + return SpanFromContext(ctx).SpanContext() +} diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go new file mode 100644 index 000000000..cdbf41d6d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -0,0 +1,119 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package trace provides an implementation of the tracing part of the +OpenTelemetry API. + +To participate in distributed traces a Span needs to be created for the +operation being performed as part of a traced workflow. In its simplest form: + + var tracer trace.Tracer + + func init() { + tracer = otel.Tracer("instrumentation/package/name") + } + + func operation(ctx context.Context) { + var span trace.Span + ctx, span = tracer.Start(ctx, "operation") + defer span.End() + // ... + } + +A Tracer is unique to the instrumentation and is used to create Spans. +Instrumentation should be designed to accept a TracerProvider from which it +can create its own unique Tracer. Alternatively, the registered global +TracerProvider from the go.opentelemetry.io/otel package can be used as +a default. + + const ( + name = "instrumentation/package/name" + version = "0.1.0" + ) + + type Instrumentation struct { + tracer trace.Tracer + } + + func NewInstrumentation(tp trace.TracerProvider) *Instrumentation { + if tp == nil { + tp = otel.TracerProvider() + } + return &Instrumentation{ + tracer: tp.Tracer(name, trace.WithInstrumentationVersion(version)), + } + } + + func operation(ctx context.Context, inst *Instrumentation) { + var span trace.Span + ctx, span = inst.tracer.Start(ctx, "operation") + defer span.End() + // ... + } + +# API Implementations + +This package does not conform to the standard Go versioning policy; all of its +interfaces may have methods added to them without a package major version bump. +This non-standard API evolution could surprise an uninformed implementation +author. They could unknowingly build their implementation in a way that would +result in a runtime panic for their users that update to the new API. + +The API is designed to help inform an instrumentation author about this +non-standard API evolution. It requires them to choose a default behavior for +unimplemented interface methods. There are three behavior choices they can +make: + + - Compilation failure + - Panic + - Default to another implementation + +All interfaces in this API embed a corresponding interface from +[go.opentelemetry.io/otel/trace/embedded]. If an author wants the default +behavior of their implementations to be a compilation failure, signaling to +their users they need to update to the latest version of that implementation, +they need to embed the corresponding interface from +[go.opentelemetry.io/otel/trace/embedded] in their implementation. For +example, + + import "go.opentelemetry.io/otel/trace/embedded" + + type TracerProvider struct { + embedded.TracerProvider + // ... + } + +If an author wants the default behavior of their implementations to panic, they +can embed the API interface directly. + + import "go.opentelemetry.io/otel/trace" + + type TracerProvider struct { + trace.TracerProvider + // ... + } + +This option is not recommended. It will lead to publishing packages that +contain runtime panics when users update to newer versions of +[go.opentelemetry.io/otel/trace], which may be done with a transitive +dependency. + +Finally, an author can embed another implementation in theirs. The embedded +implementation will be used for methods not defined by the author. For example, +an author who wants to default to silently dropping the call can use +[go.opentelemetry.io/otel/trace/noop]: + + import "go.opentelemetry.io/otel/trace/noop" + + type TracerProvider struct { + noop.TracerProvider + // ... + } + +It is strongly recommended that authors only embed +[go.opentelemetry.io/otel/trace/noop] if they choose this default behavior. +That implementation is the only one OpenTelemetry authors can guarantee will +fully implement all the API interfaces when a user updates their API. +*/ +package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/README.md b/vendor/go.opentelemetry.io/otel/trace/embedded/README.md new file mode 100644 index 000000000..7754a239e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/embedded/README.md @@ -0,0 +1,3 @@ +# Trace Embedded + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/trace/embedded) diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go new file mode 100644 index 000000000..3e359a00b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go @@ -0,0 +1,45 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package embedded provides interfaces embedded within the [OpenTelemetry +// trace API]. +// +// Implementers of the [OpenTelemetry trace API] can embed the relevant type +// from this package into their implementation directly. Doing so will result +// in a compilation error for users when the [OpenTelemetry trace API] is +// extended (which is something that can happen without a major version bump of +// the API package). +// +// [OpenTelemetry trace API]: https://pkg.go.dev/go.opentelemetry.io/otel/trace +package embedded // import "go.opentelemetry.io/otel/trace/embedded" + +// TracerProvider is embedded in +// [go.opentelemetry.io/otel/trace.TracerProvider]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.TracerProvider] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.TracerProvider] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type TracerProvider interface{ tracerProvider() } + +// Tracer is embedded in [go.opentelemetry.io/otel/trace.Tracer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.Tracer] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.Tracer] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Tracer interface{ tracer() } + +// Span is embedded in [go.opentelemetry.io/otel/trace.Span]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.Span] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.Span] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Span interface{ span() } diff --git a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go new file mode 100644 index 000000000..c00221e7b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +// nonRecordingSpan is a minimal implementation of a Span that wraps a +// SpanContext. It performs no operations other than to return the wrapped +// SpanContext. +type nonRecordingSpan struct { + noopSpan + + sc SpanContext +} + +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() SpanContext { return s.sc } diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go new file mode 100644 index 000000000..ca20e9997 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -0,0 +1,85 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" +) + +// NewNoopTracerProvider returns an implementation of TracerProvider that +// performs no operations. The Tracer and Spans created from the returned +// TracerProvider also perform no operations. +// +// Deprecated: Use [go.opentelemetry.io/otel/trace/noop.NewTracerProvider] +// instead. +func NewNoopTracerProvider() TracerProvider { + return noopTracerProvider{} +} + +type noopTracerProvider struct{ embedded.TracerProvider } + +var _ TracerProvider = noopTracerProvider{} + +// Tracer returns noop implementation of Tracer. +func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer { + return noopTracer{} +} + +// noopTracer is an implementation of Tracer that performs no operations. +type noopTracer struct{ embedded.Tracer } + +var _ Tracer = noopTracer{} + +// Start carries forward a non-recording Span, if one is present in the context, otherwise it +// creates a no-op Span. +func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) { + span := SpanFromContext(ctx) + if _, ok := span.(nonRecordingSpan); !ok { + // span is likely already a noopSpan, but let's be sure + span = noopSpanInstance + } + return ContextWithSpan(ctx, span), span +} + +// noopSpan is an implementation of Span that performs no operations. +type noopSpan struct{ embedded.Span } + +var noopSpanInstance Span = noopSpan{} + +// SpanContext returns an empty span context. +func (noopSpan) SpanContext() SpanContext { return SpanContext{} } + +// IsRecording always returns false. +func (noopSpan) IsRecording() bool { return false } + +// SetStatus does nothing. +func (noopSpan) SetStatus(codes.Code, string) {} + +// SetError does nothing. +func (noopSpan) SetError(bool) {} + +// SetAttributes does nothing. +func (noopSpan) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (noopSpan) End(...SpanEndOption) {} + +// RecordError does nothing. +func (noopSpan) RecordError(error, ...EventOption) {} + +// AddEvent does nothing. +func (noopSpan) AddEvent(string, ...EventOption) {} + +// AddLink does nothing. +func (noopSpan) AddLink(Link) {} + +// SetName does nothing. +func (noopSpan) SetName(string) {} + +// TracerProvider returns a no-op TracerProvider. +func (noopSpan) TracerProvider() TracerProvider { return noopTracerProvider{} } diff --git a/vendor/go.opentelemetry.io/otel/trace/provider.go b/vendor/go.opentelemetry.io/otel/trace/provider.go new file mode 100644 index 000000000..ef85cb70c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/provider.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import "go.opentelemetry.io/otel/trace/embedded" + +// TracerProvider provides Tracers that are used by instrumentation code to +// trace computational workflows. +// +// A TracerProvider is the collection destination of all Spans from Tracers it +// provides, it represents a unique telemetry collection pipeline. How that +// pipeline is defined, meaning how those Spans are collected, processed, and +// where they are exported, depends on its implementation. Instrumentation +// authors do not need to define this implementation, rather just use the +// provided Tracers to instrument code. +// +// Commonly, instrumentation code will accept a TracerProvider implementation +// at runtime from its users or it can simply use the globally registered one +// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type TracerProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.TracerProvider + + // Tracer returns a unique Tracer scoped to be used by instrumentation code + // to trace computational workflows. The scope and identity of that + // instrumentation code is uniquely defined by the name and options passed. + // + // The passed name needs to uniquely identify instrumentation code. + // Therefore, it is recommended that name is the Go package name of the + // library providing instrumentation (note: not the code being + // instrumented). Instrumentation libraries can have multiple versions, + // therefore, the WithInstrumentationVersion option should be used to + // distinguish these different codebases. Additionally, instrumentation + // libraries may sometimes use traces to communicate different domains of + // workflow data (i.e. using spans to communicate workflow events only). If + // this is the case, the WithScopeAttributes option should be used to + // uniquely identify Tracers that handle the different domains of workflow + // data. + // + // If the same name and options are passed multiple times, the same Tracer + // will be returned (it is up to the implementation if this will be the + // same underlying instance of that Tracer or not). It is not necessary to + // call this multiple times with the same name and options to get an + // up-to-date Tracer. All implementations will ensure any TracerProvider + // configuration changes are propagated to all provided Tracers. + // + // If name is empty, then an implementation defined default name will be + // used instead. + // + // This method is safe to call concurrently. + Tracer(name string, options ...TracerOption) Tracer +} diff --git a/vendor/go.opentelemetry.io/otel/trace/span.go b/vendor/go.opentelemetry.io/otel/trace/span.go new file mode 100644 index 000000000..d3aa476ee --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/span.go @@ -0,0 +1,177 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" +) + +// Span is the individual component of a trace. It represents a single named +// and timed operation of a workflow that is traced. A Tracer is used to +// create a Span and it is then up to the operation the Span represents to +// properly end the Span when the operation itself ends. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Span interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Span + + // End completes the Span. The Span is considered complete and ready to be + // delivered through the rest of the telemetry pipeline after this method + // is called. Therefore, updates to the Span are not allowed after this + // method has been called. + End(options ...SpanEndOption) + + // AddEvent adds an event with the provided name and options. + AddEvent(name string, options ...EventOption) + + // AddLink adds a link. + // Adding links at span creation using WithLinks is preferred to calling AddLink + // later, for contexts that are available during span creation, because head + // sampling decisions can only consider information present during span creation. + AddLink(link Link) + + // IsRecording returns the recording state of the Span. It will return + // true if the Span is active and events can be recorded. + IsRecording() bool + + // RecordError will record err as an exception span event for this span. An + // additional call to SetStatus is required if the Status of the Span should + // be set to Error, as this method does not change the Span status. If this + // span is not being recorded or err is nil then this method does nothing. + RecordError(err error, options ...EventOption) + + // SpanContext returns the SpanContext of the Span. The returned SpanContext + // is usable even after the End method has been called for the Span. + SpanContext() SpanContext + + // SetStatus sets the status of the Span in the form of a code and a + // description, provided the status hasn't already been set to a higher + // value before (OK > Error > Unset). The description is only included in a + // status when the code is for an error. + SetStatus(code codes.Code, description string) + + // SetName sets the Span name. + SetName(name string) + + // SetAttributes sets kv as attributes of the Span. If a key from kv + // already exists for an attribute of the Span it will be overwritten with + // the value contained in kv. + SetAttributes(kv ...attribute.KeyValue) + + // TracerProvider returns a TracerProvider that can be used to generate + // additional Spans on the same telemetry pipeline as the current Span. + TracerProvider() TracerProvider +} + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +// +// For example, a Link is used in the following situations: +// +// 1. Batch Processing: A batch of operations may contain operations +// associated with one or more traces/spans. Since there can only be one +// parent SpanContext, a Link is used to keep reference to the +// SpanContext of all operations in the batch. +// 2. Public Endpoint: A SpanContext for an in incoming client request on a +// public endpoint should be considered untrusted. In such a case, a new +// trace with its own identity and sampling decision needs to be created, +// but this new trace needs to be related to the original trace in some +// form. A Link is used to keep reference to the original SpanContext and +// track the relationship. +type Link struct { + // SpanContext of the linked Span. + SpanContext SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue +} + +// LinkFromContext returns a link encapsulating the SpanContext in the provided +// ctx. +func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { + return Link{ + SpanContext: SpanContextFromContext(ctx), + Attributes: attrs, + } +} + +// SpanKind is the role a Span plays in a Trace. +type SpanKind int + +// As a convenience, these match the proto definition, see +// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 +// +// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` +// to coerce a span kind to a valid value. +const ( + // SpanKindUnspecified is an unspecified SpanKind and is not a valid + // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal + // if it is received. + SpanKindUnspecified SpanKind = 0 + // SpanKindInternal is a SpanKind for a Span that represents an internal + // operation within an application. + SpanKindInternal SpanKind = 1 + // SpanKindServer is a SpanKind for a Span that represents the operation + // of handling a request from a client. + SpanKindServer SpanKind = 2 + // SpanKindClient is a SpanKind for a Span that represents the operation + // of client making a request to a server. + SpanKindClient SpanKind = 3 + // SpanKindProducer is a SpanKind for a Span that represents the operation + // of a producer sending a message to a message broker. Unlike + // SpanKindClient and SpanKindServer, there is often no direct + // relationship between this kind of Span and a SpanKindConsumer kind. A + // SpanKindProducer Span will end once the message is accepted by the + // message broker which might not overlap with the processing of that + // message. + SpanKindProducer SpanKind = 4 + // SpanKindConsumer is a SpanKind for a Span that represents the operation + // of a consumer receiving a message from a message broker. Like + // SpanKindProducer Spans, there is often no direct relationship between + // this Span and the Span that produced the message. + SpanKindConsumer SpanKind = 5 +) + +// ValidateSpanKind returns a valid span kind value. This will coerce +// invalid values into the default value, SpanKindInternal. +func ValidateSpanKind(spanKind SpanKind) SpanKind { + switch spanKind { + case SpanKindInternal, + SpanKindServer, + SpanKindClient, + SpanKindProducer, + SpanKindConsumer: + // valid + return spanKind + default: + return SpanKindInternal + } +} + +// String returns the specified name of the SpanKind in lower-case. +func (sk SpanKind) String() string { + switch sk { + case SpanKindInternal: + return "internal" + case SpanKindServer: + return "server" + case SpanKindClient: + return "client" + case SpanKindProducer: + return "producer" + case SpanKindConsumer: + return "consumer" + default: + return "unspecified" + } +} diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go new file mode 100644 index 000000000..d49adf671 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -0,0 +1,323 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "bytes" + "encoding/hex" + "encoding/json" +) + +const ( + // FlagsSampled is a bitmask with the sampled bit set. A SpanContext + // with the sampling bit set means the span is sampled. + FlagsSampled = TraceFlags(0x01) + + errInvalidHexID errorConst = "trace-id and span-id can only contain [0-9a-f] characters, all lowercase" + + errInvalidTraceIDLength errorConst = "hex encoded trace-id must have length equals to 32" + errNilTraceID errorConst = "trace-id can't be all zero" + + errInvalidSpanIDLength errorConst = "hex encoded span-id must have length equals to 16" + errNilSpanID errorConst = "span-id can't be all zero" +) + +type errorConst string + +func (e errorConst) Error() string { + return string(e) +} + +// TraceID is a unique identity of a trace. +// nolint:revive // revive complains about stutter of `trace.TraceID`. +type TraceID [16]byte + +var ( + nilTraceID TraceID + _ json.Marshaler = nilTraceID +) + +// IsValid checks whether the trace TraceID is valid. A valid trace ID does +// not consist of zeros only. +func (t TraceID) IsValid() bool { + return !bytes.Equal(t[:], nilTraceID[:]) +} + +// MarshalJSON implements a custom marshal function to encode TraceID +// as a hex string. +func (t TraceID) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} + +// String returns the hex string representation form of a TraceID. +func (t TraceID) String() string { + return hex.EncodeToString(t[:]) +} + +// SpanID is a unique identity of a span in a trace. +type SpanID [8]byte + +var ( + nilSpanID SpanID + _ json.Marshaler = nilSpanID +) + +// IsValid checks whether the SpanID is valid. A valid SpanID does not consist +// of zeros only. +func (s SpanID) IsValid() bool { + return !bytes.Equal(s[:], nilSpanID[:]) +} + +// MarshalJSON implements a custom marshal function to encode SpanID +// as a hex string. +func (s SpanID) MarshalJSON() ([]byte, error) { + return json.Marshal(s.String()) +} + +// String returns the hex string representation form of a SpanID. +func (s SpanID) String() string { + return hex.EncodeToString(s[:]) +} + +// TraceIDFromHex returns a TraceID from a hex string if it is compliant with +// the W3C trace-context specification. See more at +// https://www.w3.org/TR/trace-context/#trace-id +// nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`. +func TraceIDFromHex(h string) (TraceID, error) { + t := TraceID{} + if len(h) != 32 { + return t, errInvalidTraceIDLength + } + + if err := decodeHex(h, t[:]); err != nil { + return t, err + } + + if !t.IsValid() { + return t, errNilTraceID + } + return t, nil +} + +// SpanIDFromHex returns a SpanID from a hex string if it is compliant +// with the w3c trace-context specification. +// See more at https://www.w3.org/TR/trace-context/#parent-id +func SpanIDFromHex(h string) (SpanID, error) { + s := SpanID{} + if len(h) != 16 { + return s, errInvalidSpanIDLength + } + + if err := decodeHex(h, s[:]); err != nil { + return s, err + } + + if !s.IsValid() { + return s, errNilSpanID + } + return s, nil +} + +func decodeHex(h string, b []byte) error { + for _, r := range h { + switch { + case 'a' <= r && r <= 'f': + continue + case '0' <= r && r <= '9': + continue + default: + return errInvalidHexID + } + } + + decoded, err := hex.DecodeString(h) + if err != nil { + return err + } + + copy(b, decoded) + return nil +} + +// TraceFlags contains flags that can be set on a SpanContext. +type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`. + +// IsSampled returns if the sampling bit is set in the TraceFlags. +func (tf TraceFlags) IsSampled() bool { + return tf&FlagsSampled == FlagsSampled +} + +// WithSampled sets the sampling bit in a new copy of the TraceFlags. +func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // sampled is not a control flag. + if sampled { + return tf | FlagsSampled + } + + return tf &^ FlagsSampled +} + +// MarshalJSON implements a custom marshal function to encode TraceFlags +// as a hex string. +func (tf TraceFlags) MarshalJSON() ([]byte, error) { + return json.Marshal(tf.String()) +} + +// String returns the hex string representation form of TraceFlags. +func (tf TraceFlags) String() string { + return hex.EncodeToString([]byte{byte(tf)}[:]) +} + +// SpanContextConfig contains mutable fields usable for constructing +// an immutable SpanContext. +type SpanContextConfig struct { + TraceID TraceID + SpanID SpanID + TraceFlags TraceFlags + TraceState TraceState + Remote bool +} + +// NewSpanContext constructs a SpanContext using values from the provided +// SpanContextConfig. +func NewSpanContext(config SpanContextConfig) SpanContext { + return SpanContext{ + traceID: config.TraceID, + spanID: config.SpanID, + traceFlags: config.TraceFlags, + traceState: config.TraceState, + remote: config.Remote, + } +} + +// SpanContext contains identifying trace information about a Span. +type SpanContext struct { + traceID TraceID + spanID SpanID + traceFlags TraceFlags + traceState TraceState + remote bool +} + +var _ json.Marshaler = SpanContext{} + +// IsValid returns if the SpanContext is valid. A valid span context has a +// valid TraceID and SpanID. +func (sc SpanContext) IsValid() bool { + return sc.HasTraceID() && sc.HasSpanID() +} + +// IsRemote indicates whether the SpanContext represents a remotely-created Span. +func (sc SpanContext) IsRemote() bool { + return sc.remote +} + +// WithRemote returns a copy of sc with the Remote property set to remote. +func (sc SpanContext) WithRemote(remote bool) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: remote, + } +} + +// TraceID returns the TraceID from the SpanContext. +func (sc SpanContext) TraceID() TraceID { + return sc.traceID +} + +// HasTraceID checks if the SpanContext has a valid TraceID. +func (sc SpanContext) HasTraceID() bool { + return sc.traceID.IsValid() +} + +// WithTraceID returns a new SpanContext with the TraceID replaced. +func (sc SpanContext) WithTraceID(traceID TraceID) SpanContext { + return SpanContext{ + traceID: traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// SpanID returns the SpanID from the SpanContext. +func (sc SpanContext) SpanID() SpanID { + return sc.spanID +} + +// HasSpanID checks if the SpanContext has a valid SpanID. +func (sc SpanContext) HasSpanID() bool { + return sc.spanID.IsValid() +} + +// WithSpanID returns a new SpanContext with the SpanID replaced. +func (sc SpanContext) WithSpanID(spanID SpanID) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// TraceFlags returns the flags from the SpanContext. +func (sc SpanContext) TraceFlags() TraceFlags { + return sc.traceFlags +} + +// IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags. +func (sc SpanContext) IsSampled() bool { + return sc.traceFlags.IsSampled() +} + +// WithTraceFlags returns a new SpanContext with the TraceFlags replaced. +func (sc SpanContext) WithTraceFlags(flags TraceFlags) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: flags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// TraceState returns the TraceState from the SpanContext. +func (sc SpanContext) TraceState() TraceState { + return sc.traceState +} + +// WithTraceState returns a new SpanContext with the TraceState replaced. +func (sc SpanContext) WithTraceState(state TraceState) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: state, + remote: sc.remote, + } +} + +// Equal is a predicate that determines whether two SpanContext values are equal. +func (sc SpanContext) Equal(other SpanContext) bool { + return sc.traceID == other.traceID && + sc.spanID == other.spanID && + sc.traceFlags == other.traceFlags && + sc.traceState.String() == other.traceState.String() && + sc.remote == other.remote +} + +// MarshalJSON implements a custom marshal function to encode a SpanContext. +func (sc SpanContext) MarshalJSON() ([]byte, error) { + return json.Marshal(SpanContextConfig{ + TraceID: sc.traceID, + SpanID: sc.spanID, + TraceFlags: sc.traceFlags, + TraceState: sc.traceState, + Remote: sc.remote, + }) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracer.go b/vendor/go.opentelemetry.io/otel/trace/tracer.go new file mode 100644 index 000000000..77952d2a0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/tracer.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/trace/embedded" +) + +// Tracer is the creator of Spans. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Tracer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Tracer + + // Start creates a span and a context.Context containing the newly-created span. + // + // If the context.Context provided in `ctx` contains a Span then the newly-created + // Span will be a child of that span, otherwise it will be a root span. This behavior + // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the + // newly-created Span to be a root span even if `ctx` contains a Span. + // + // When creating a Span it is recommended to provide all known span attributes using + // the `WithAttributes()` SpanOption as samplers will only have access to the + // attributes provided when a Span is created. + // + // Any Span that is created MUST also be ended. This is the responsibility of the user. + // Implementations of this API may leak memory or other resources if Spans are not ended. + Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go new file mode 100644 index 000000000..dc5e34cad --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -0,0 +1,330 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "encoding/json" + "fmt" + "strings" +) + +const ( + maxListMembers = 32 + + listDelimiters = "," + memberDelimiter = "=" + + errInvalidKey errorConst = "invalid tracestate key" + errInvalidValue errorConst = "invalid tracestate value" + errInvalidMember errorConst = "invalid tracestate list-member" + errMemberNumber errorConst = "too many list-members in tracestate" + errDuplicate errorConst = "duplicate list-member in tracestate" +) + +type member struct { + Key string + Value string +} + +// according to (chr = %x20 / (nblk-char = %x21-2B / %x2D-3C / %x3E-7E) ) +// means (chr = %x20-2B / %x2D-3C / %x3E-7E) . +func checkValueChar(v byte) bool { + return v >= '\x20' && v <= '\x7e' && v != '\x2c' && v != '\x3d' +} + +// according to (nblk-chr = %x21-2B / %x2D-3C / %x3E-7E) . +func checkValueLast(v byte) bool { + return v >= '\x21' && v <= '\x7e' && v != '\x2c' && v != '\x3d' +} + +// based on the W3C Trace Context specification +// +// value = (0*255(chr)) nblk-chr +// nblk-chr = %x21-2B / %x2D-3C / %x3E-7E +// chr = %x20 / nblk-chr +// +// see https://www.w3.org/TR/trace-context-1/#value +func checkValue(val string) bool { + n := len(val) + if n == 0 || n > 256 { + return false + } + for i := 0; i < n-1; i++ { + if !checkValueChar(val[i]) { + return false + } + } + return checkValueLast(val[n-1]) +} + +func checkKeyRemain(key string) bool { + // ( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) + for _, v := range key { + if isAlphaNum(byte(v)) { + continue + } + switch v { + case '_', '-', '*', '/': + continue + } + return false + } + return true +} + +// according to +// +// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// +// param n is remain part length, should be 255 in simple-key or 13 in system-id. +func checkKeyPart(key string, n int) bool { + if len(key) == 0 { + return false + } + first := key[0] // key's first char + ret := len(key[1:]) <= n + ret = ret && first >= 'a' && first <= 'z' + return ret && checkKeyRemain(key[1:]) +} + +func isAlphaNum(c byte) bool { + if c >= 'a' && c <= 'z' { + return true + } + return c >= '0' && c <= '9' +} + +// according to +// +// tenant-id = ( lcalpha / DIGIT ) 0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) +// +// param n is remain part length, should be 240 exactly. +func checkKeyTenant(key string, n int) bool { + if len(key) == 0 { + return false + } + return isAlphaNum(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:]) +} + +// based on the W3C Trace Context specification +// +// key = simple-key / multi-tenant-key +// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// multi-tenant-key = tenant-id "@" system-id +// tenant-id = ( lcalpha / DIGIT ) (0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// lcalpha = %x61-7A ; a-z +// +// see https://www.w3.org/TR/trace-context-1/#tracestate-header. +func checkKey(key string) bool { + tenant, system, ok := strings.Cut(key, "@") + if !ok { + return checkKeyPart(key, 255) + } + return checkKeyTenant(tenant, 240) && checkKeyPart(system, 13) +} + +func newMember(key, value string) (member, error) { + if !checkKey(key) { + return member{}, errInvalidKey + } + if !checkValue(value) { + return member{}, errInvalidValue + } + return member{Key: key, Value: value}, nil +} + +func parseMember(m string) (member, error) { + key, val, ok := strings.Cut(m, memberDelimiter) + if !ok { + return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) + } + key = strings.TrimLeft(key, " \t") + val = strings.TrimRight(val, " \t") + result, e := newMember(key, val) + if e != nil { + return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) + } + return result, nil +} + +// String encodes member into a string compliant with the W3C Trace Context +// specification. +func (m member) String() string { + return m.Key + "=" + m.Value +} + +// TraceState provides additional vendor-specific trace identification +// information across different distributed tracing systems. It represents an +// immutable list consisting of key/value pairs, each pair is referred to as a +// list-member. +// +// TraceState conforms to the W3C Trace Context specification +// (https://www.w3.org/TR/trace-context-1). All operations that create or copy +// a TraceState do so by validating all input and will only produce TraceState +// that conform to the specification. Specifically, this means that all +// list-member's key/value pairs are valid, no duplicate list-members exist, +// and the maximum number of list-members (32) is not exceeded. +type TraceState struct { //nolint:revive // revive complains about stutter of `trace.TraceState` + // list is the members in order. + list []member +} + +var _ json.Marshaler = TraceState{} + +// ParseTraceState attempts to decode a TraceState from the passed +// string. It returns an error if the input is invalid according to the W3C +// Trace Context specification. +func ParseTraceState(ts string) (TraceState, error) { + if ts == "" { + return TraceState{}, nil + } + + wrapErr := func(err error) error { + return fmt.Errorf("failed to parse tracestate: %w", err) + } + + var members []member + found := make(map[string]struct{}) + for ts != "" { + var memberStr string + memberStr, ts, _ = strings.Cut(ts, listDelimiters) + if len(memberStr) == 0 { + continue + } + + m, err := parseMember(memberStr) + if err != nil { + return TraceState{}, wrapErr(err) + } + + if _, ok := found[m.Key]; ok { + return TraceState{}, wrapErr(errDuplicate) + } + found[m.Key] = struct{}{} + + members = append(members, m) + if n := len(members); n > maxListMembers { + return TraceState{}, wrapErr(errMemberNumber) + } + } + + return TraceState{list: members}, nil +} + +// MarshalJSON marshals the TraceState into JSON. +func (ts TraceState) MarshalJSON() ([]byte, error) { + return json.Marshal(ts.String()) +} + +// String encodes the TraceState into a string compliant with the W3C +// Trace Context specification. The returned string will be invalid if the +// TraceState contains any invalid members. +func (ts TraceState) String() string { + if len(ts.list) == 0 { + return "" + } + var n int + n += len(ts.list) // member delimiters: '=' + n += len(ts.list) - 1 // list delimiters: ',' + for _, mem := range ts.list { + n += len(mem.Key) + n += len(mem.Value) + } + + var sb strings.Builder + sb.Grow(n) + _, _ = sb.WriteString(ts.list[0].Key) + _ = sb.WriteByte('=') + _, _ = sb.WriteString(ts.list[0].Value) + for i := 1; i < len(ts.list); i++ { + _ = sb.WriteByte(listDelimiters[0]) + _, _ = sb.WriteString(ts.list[i].Key) + _ = sb.WriteByte('=') + _, _ = sb.WriteString(ts.list[i].Value) + } + return sb.String() +} + +// Get returns the value paired with key from the corresponding TraceState +// list-member if it exists, otherwise an empty string is returned. +func (ts TraceState) Get(key string) string { + for _, member := range ts.list { + if member.Key == key { + return member.Value + } + } + + return "" +} + +// Walk walks all key value pairs in the TraceState by calling f +// Iteration stops if f returns false. +func (ts TraceState) Walk(f func(key, value string) bool) { + for _, m := range ts.list { + if !f(m.Key, m.Value) { + break + } + } +} + +// Insert adds a new list-member defined by the key/value pair to the +// TraceState. If a list-member already exists for the given key, that +// list-member's value is updated. The new or updated list-member is always +// moved to the beginning of the TraceState as specified by the W3C Trace +// Context specification. +// +// If key or value are invalid according to the W3C Trace Context +// specification an error is returned with the original TraceState. +// +// If adding a new list-member means the TraceState would have more members +// then is allowed, the new list-member will be inserted and the right-most +// list-member will be dropped in the returned TraceState. +func (ts TraceState) Insert(key, value string) (TraceState, error) { + m, err := newMember(key, value) + if err != nil { + return ts, err + } + n := len(ts.list) + found := n + for i := range ts.list { + if ts.list[i].Key == key { + found = i + } + } + cTS := TraceState{} + if found == n && n < maxListMembers { + cTS.list = make([]member, n+1) + } else { + cTS.list = make([]member, n) + } + cTS.list[0] = m + // When the number of members exceeds capacity, drop the "right-most". + copy(cTS.list[1:], ts.list[0:found]) + if found < n { + copy(cTS.list[1+found:], ts.list[found+1:]) + } + return cTS, nil +} + +// Delete returns a copy of the TraceState with the list-member identified by +// key removed. +func (ts TraceState) Delete(key string) TraceState { + members := make([]member, ts.Len()) + copy(members, ts.list) + for i, member := range ts.list { + if member.Key == key { + members = append(members[:i], members[i+1:]...) + // TraceState should contain no duplicate members. + break + } + } + return TraceState{list: members} +} + +// Len returns the number of list-members in the TraceState. +func (ts TraceState) Len() int { + return len(ts.list) +} diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh new file mode 100644 index 000000000..e57bf57fc --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_examples.sh @@ -0,0 +1,74 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +cd $(dirname $0) +TOOLS_DIR=$(pwd)/.tools + +if [ -z "${GOPATH}" ] ; then + printf "GOPATH is not defined.\n" + exit -1 +fi + +if [ ! -d "${GOPATH}" ] ; then + printf "GOPATH ${GOPATH} is invalid \n" + exit -1 +fi + +# Pre-requisites +if ! git diff --quiet; then \ + git status + printf "\n\nError: working tree is not clean\n" + exit -1 +fi + +if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then + printf "$(git log -1)" + printf "\n\nError: HEAD is not pointing to a tagged version" +fi + +make ${TOOLS_DIR}/gojq + +DIR_TMP="${GOPATH}/src/oteltmp/" +rm -rf $DIR_TMP +mkdir -p $DIR_TMP + +printf "Copy examples to ${DIR_TMP}\n" +cp -a ./example ${DIR_TMP} + +# Update go.mod files +printf "Update go.mod: rename module and remove replace\n" + +PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort) + +for dir in $PACKAGE_DIRS; do + printf " Update go.mod for $dir\n" + (cd "${DIR_TMP}/${dir}" && \ + # replaces is ("mod1" "mod2" …) + replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \ + # strip double quotes + replaces=("${replaces[@]%\"}") && \ + replaces=("${replaces[@]#\"}") && \ + # make an array (-dropreplace=mod1 -dropreplace=mod2 …) + dropreplaces=("${replaces[@]/#/-dropreplace=}") && \ + go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \ + go mod tidy) +done +printf "Update done:\n\n" + +# Build directories that contain main package. These directories are different than +# directories that contain go.mod files. +printf "Build examples:\n" +EXAMPLES=$(./get_main_pkgs.sh ./example) +for ex in $EXAMPLES; do + printf " Build $ex in ${DIR_TMP}/${ex}\n" + (cd "${DIR_TMP}/${ex}" && \ + go build .) +done + +# Cleanup +printf "Remove copied files.\n" +rm -rf $DIR_TMP diff --git a/vendor/go.opentelemetry.io/otel/verify_readmes.sh b/vendor/go.opentelemetry.io/otel/verify_readmes.sh new file mode 100644 index 000000000..1e87855ee --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_readmes.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +dirs=$(find . -type d -not -path "*/internal*" -not -path "*/test*" -not -path "*/example*" -not -path "*/.*" | sort) + +missingReadme=false +for dir in $dirs; do + if [ ! -f "$dir/README.md" ]; then + echo "couldn't find README.md for $dir" + missingReadme=true + fi +done + +if [ "$missingReadme" = true ] ; then + echo "Error: some READMEs couldn't be found." + exit 1 +fi diff --git a/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh new file mode 100644 index 000000000..c9b7cdbbf --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +TARGET="${1:?Must provide target ref}" + +FILE="CHANGELOG.md" +TEMP_DIR=$(mktemp -d) +echo "Temp folder: $TEMP_DIR" + +# Only the latest commit of the feature branch is available +# automatically. To diff with the base branch, we need to +# fetch that too (and we only need its latest commit). +git fetch origin "${TARGET}" --depth=1 + +# Checkout the previous version on the base branch of the changelog to tmpfolder +git --work-tree="$TEMP_DIR" checkout FETCH_HEAD $FILE + +PREVIOUS_FILE="$TEMP_DIR/$FILE" +CURRENT_FILE="$FILE" +PREVIOUS_LOCKED_FILE="$TEMP_DIR/previous_locked_section.md" +CURRENT_LOCKED_FILE="$TEMP_DIR/current_locked_section.md" + +# Extract released sections from the previous version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$PREVIOUS_FILE" > "$PREVIOUS_LOCKED_FILE" + +# Extract released sections from the current version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$CURRENT_FILE" > "$CURRENT_LOCKED_FILE" + +# Compare the released sections +if ! diff -q "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"; then + echo "Error: The released sections of the changelog file have been modified." + diff "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE" + rm -rf "$TEMP_DIR" + false +fi + +rm -rf "$TEMP_DIR" +echo "The released sections remain unchanged." diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go new file mode 100644 index 000000000..78b40f3ed --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otel // import "go.opentelemetry.io/otel" + +// Version is the current release version of OpenTelemetry in use. +func Version() string { + return "1.30.0" +} diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml new file mode 100644 index 000000000..0c32f4fc4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -0,0 +1,49 @@ +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +module-sets: + stable-v1: + version: v1.30.0 + modules: + - go.opentelemetry.io/otel + - go.opentelemetry.io/otel/bridge/opencensus + - go.opentelemetry.io/otel/bridge/opencensus/test + - go.opentelemetry.io/otel/bridge/opentracing + - go.opentelemetry.io/otel/bridge/opentracing/test + - go.opentelemetry.io/otel/example/dice + - go.opentelemetry.io/otel/example/namedtracer + - go.opentelemetry.io/otel/example/opencensus + - go.opentelemetry.io/otel/example/otel-collector + - go.opentelemetry.io/otel/example/passthrough + - go.opentelemetry.io/otel/example/zipkin + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp + - go.opentelemetry.io/otel/exporters/otlp/otlptrace + - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc + - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp + - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric + - go.opentelemetry.io/otel/exporters/stdout/stdouttrace + - go.opentelemetry.io/otel/exporters/zipkin + - go.opentelemetry.io/otel/metric + - go.opentelemetry.io/otel/sdk + - go.opentelemetry.io/otel/sdk/metric + - go.opentelemetry.io/otel/trace + experimental-metrics: + version: v0.52.0 + modules: + - go.opentelemetry.io/otel/example/prometheus + - go.opentelemetry.io/otel/exporters/prometheus + experimental-logs: + version: v0.6.0 + modules: + - go.opentelemetry.io/otel/log + - go.opentelemetry.io/otel/sdk/log + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp + - go.opentelemetry.io/otel/exporters/stdout/stdoutlog + experimental-schema: + version: v0.0.9 + modules: + - go.opentelemetry.io/otel/schema +excluded-modules: + - go.opentelemetry.io/otel/internal/tools diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE index 6a66aea5e..2a7cf70da 100644 --- a/vendor/golang.org/x/crypto/LICENSE +++ b/vendor/golang.org/x/crypto/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/crypto/argon2/argon2.go b/vendor/golang.org/x/crypto/argon2/argon2.go new file mode 100644 index 000000000..29f0a2de4 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/argon2.go @@ -0,0 +1,283 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package argon2 implements the key derivation function Argon2. +// Argon2 was selected as the winner of the Password Hashing Competition and can +// be used to derive cryptographic keys from passwords. +// +// For a detailed specification of Argon2 see [1]. +// +// If you aren't sure which function you need, use Argon2id (IDKey) and +// the parameter recommendations for your scenario. +// +// # Argon2i +// +// Argon2i (implemented by Key) is the side-channel resistant version of Argon2. +// It uses data-independent memory access, which is preferred for password +// hashing and password-based key derivation. Argon2i requires more passes over +// memory than Argon2id to protect from trade-off attacks. The recommended +// parameters (taken from [2]) for non-interactive operations are time=3 and to +// use the maximum available memory. +// +// # Argon2id +// +// Argon2id (implemented by IDKey) is a hybrid version of Argon2 combining +// Argon2i and Argon2d. It uses data-independent memory access for the first +// half of the first iteration over the memory and data-dependent memory access +// for the rest. Argon2id is side-channel resistant and provides better brute- +// force cost savings due to time-memory tradeoffs than Argon2i. The recommended +// parameters for non-interactive operations (taken from [2]) are time=1 and to +// use the maximum available memory. +// +// [1] https://github.com/P-H-C/phc-winner-argon2/blob/master/argon2-specs.pdf +// [2] https://tools.ietf.org/html/draft-irtf-cfrg-argon2-03#section-9.3 +package argon2 + +import ( + "encoding/binary" + "sync" + + "golang.org/x/crypto/blake2b" +) + +// The Argon2 version implemented by this package. +const Version = 0x13 + +const ( + argon2d = iota + argon2i + argon2id +) + +// Key derives a key from the password, salt, and cost parameters using Argon2i +// returning a byte slice of length keyLen that can be used as cryptographic +// key. The CPU cost and parallelism degree must be greater than zero. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// key := argon2.Key([]byte("some password"), salt, 3, 32*1024, 4, 32) +// +// The draft RFC recommends[2] time=3, and memory=32*1024 is a sensible number. +// If using that amount of memory (32 MB) is not possible in some contexts then +// the time parameter can be increased to compensate. +// +// The time parameter specifies the number of passes over the memory and the +// memory parameter specifies the size of the memory in KiB. For example +// memory=32*1024 sets the memory cost to ~32 MB. The number of threads can be +// adjusted to the number of available CPUs. The cost parameters should be +// increased as memory latency and CPU parallelism increases. Remember to get a +// good random salt. +func Key(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + return deriveKey(argon2i, password, salt, nil, nil, time, memory, threads, keyLen) +} + +// IDKey derives a key from the password, salt, and cost parameters using +// Argon2id returning a byte slice of length keyLen that can be used as +// cryptographic key. The CPU cost and parallelism degree must be greater than +// zero. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// key := argon2.IDKey([]byte("some password"), salt, 1, 64*1024, 4, 32) +// +// The draft RFC recommends[2] time=1, and memory=64*1024 is a sensible number. +// If using that amount of memory (64 MB) is not possible in some contexts then +// the time parameter can be increased to compensate. +// +// The time parameter specifies the number of passes over the memory and the +// memory parameter specifies the size of the memory in KiB. For example +// memory=64*1024 sets the memory cost to ~64 MB. The number of threads can be +// adjusted to the numbers of available CPUs. The cost parameters should be +// increased as memory latency and CPU parallelism increases. Remember to get a +// good random salt. +func IDKey(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + return deriveKey(argon2id, password, salt, nil, nil, time, memory, threads, keyLen) +} + +func deriveKey(mode int, password, salt, secret, data []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + if time < 1 { + panic("argon2: number of rounds too small") + } + if threads < 1 { + panic("argon2: parallelism degree too low") + } + h0 := initHash(password, salt, secret, data, time, memory, uint32(threads), keyLen, mode) + + memory = memory / (syncPoints * uint32(threads)) * (syncPoints * uint32(threads)) + if memory < 2*syncPoints*uint32(threads) { + memory = 2 * syncPoints * uint32(threads) + } + B := initBlocks(&h0, memory, uint32(threads)) + processBlocks(B, time, memory, uint32(threads), mode) + return extractKey(B, memory, uint32(threads), keyLen) +} + +const ( + blockLength = 128 + syncPoints = 4 +) + +type block [blockLength]uint64 + +func initHash(password, salt, key, data []byte, time, memory, threads, keyLen uint32, mode int) [blake2b.Size + 8]byte { + var ( + h0 [blake2b.Size + 8]byte + params [24]byte + tmp [4]byte + ) + + b2, _ := blake2b.New512(nil) + binary.LittleEndian.PutUint32(params[0:4], threads) + binary.LittleEndian.PutUint32(params[4:8], keyLen) + binary.LittleEndian.PutUint32(params[8:12], memory) + binary.LittleEndian.PutUint32(params[12:16], time) + binary.LittleEndian.PutUint32(params[16:20], uint32(Version)) + binary.LittleEndian.PutUint32(params[20:24], uint32(mode)) + b2.Write(params[:]) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(password))) + b2.Write(tmp[:]) + b2.Write(password) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(salt))) + b2.Write(tmp[:]) + b2.Write(salt) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(key))) + b2.Write(tmp[:]) + b2.Write(key) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(data))) + b2.Write(tmp[:]) + b2.Write(data) + b2.Sum(h0[:0]) + return h0 +} + +func initBlocks(h0 *[blake2b.Size + 8]byte, memory, threads uint32) []block { + var block0 [1024]byte + B := make([]block, memory) + for lane := uint32(0); lane < threads; lane++ { + j := lane * (memory / threads) + binary.LittleEndian.PutUint32(h0[blake2b.Size+4:], lane) + + binary.LittleEndian.PutUint32(h0[blake2b.Size:], 0) + blake2bHash(block0[:], h0[:]) + for i := range B[j+0] { + B[j+0][i] = binary.LittleEndian.Uint64(block0[i*8:]) + } + + binary.LittleEndian.PutUint32(h0[blake2b.Size:], 1) + blake2bHash(block0[:], h0[:]) + for i := range B[j+1] { + B[j+1][i] = binary.LittleEndian.Uint64(block0[i*8:]) + } + } + return B +} + +func processBlocks(B []block, time, memory, threads uint32, mode int) { + lanes := memory / threads + segments := lanes / syncPoints + + processSegment := func(n, slice, lane uint32, wg *sync.WaitGroup) { + var addresses, in, zero block + if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { + in[0] = uint64(n) + in[1] = uint64(lane) + in[2] = uint64(slice) + in[3] = uint64(memory) + in[4] = uint64(time) + in[5] = uint64(mode) + } + + index := uint32(0) + if n == 0 && slice == 0 { + index = 2 // we have already generated the first two blocks + if mode == argon2i || mode == argon2id { + in[6]++ + processBlock(&addresses, &in, &zero) + processBlock(&addresses, &addresses, &zero) + } + } + + offset := lane*lanes + slice*segments + index + var random uint64 + for index < segments { + prev := offset - 1 + if index == 0 && slice == 0 { + prev += lanes // last block in lane + } + if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { + if index%blockLength == 0 { + in[6]++ + processBlock(&addresses, &in, &zero) + processBlock(&addresses, &addresses, &zero) + } + random = addresses[index%blockLength] + } else { + random = B[prev][0] + } + newOffset := indexAlpha(random, lanes, segments, threads, n, slice, lane, index) + processBlockXOR(&B[offset], &B[prev], &B[newOffset]) + index, offset = index+1, offset+1 + } + wg.Done() + } + + for n := uint32(0); n < time; n++ { + for slice := uint32(0); slice < syncPoints; slice++ { + var wg sync.WaitGroup + for lane := uint32(0); lane < threads; lane++ { + wg.Add(1) + go processSegment(n, slice, lane, &wg) + } + wg.Wait() + } + } + +} + +func extractKey(B []block, memory, threads, keyLen uint32) []byte { + lanes := memory / threads + for lane := uint32(0); lane < threads-1; lane++ { + for i, v := range B[(lane*lanes)+lanes-1] { + B[memory-1][i] ^= v + } + } + + var block [1024]byte + for i, v := range B[memory-1] { + binary.LittleEndian.PutUint64(block[i*8:], v) + } + key := make([]byte, keyLen) + blake2bHash(key, block[:]) + return key +} + +func indexAlpha(rand uint64, lanes, segments, threads, n, slice, lane, index uint32) uint32 { + refLane := uint32(rand>>32) % threads + if n == 0 && slice == 0 { + refLane = lane + } + m, s := 3*segments, ((slice+1)%syncPoints)*segments + if lane == refLane { + m += index + } + if n == 0 { + m, s = slice*segments, 0 + if slice == 0 || lane == refLane { + m += index + } + } + if index == 0 || lane == refLane { + m-- + } + return phi(rand, uint64(m), uint64(s), refLane, lanes) +} + +func phi(rand, m, s uint64, lane, lanes uint32) uint32 { + p := rand & 0xFFFFFFFF + p = (p * p) >> 32 + p = (p * m) >> 32 + return lane*lanes + uint32((s+m-(p+1))%uint64(lanes)) +} diff --git a/vendor/golang.org/x/crypto/argon2/blake2b.go b/vendor/golang.org/x/crypto/argon2/blake2b.go new file mode 100644 index 000000000..10f46948d --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blake2b.go @@ -0,0 +1,53 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package argon2 + +import ( + "encoding/binary" + "hash" + + "golang.org/x/crypto/blake2b" +) + +// blake2bHash computes an arbitrary long hash value of in +// and writes the hash to out. +func blake2bHash(out []byte, in []byte) { + var b2 hash.Hash + if n := len(out); n < blake2b.Size { + b2, _ = blake2b.New(n, nil) + } else { + b2, _ = blake2b.New512(nil) + } + + var buffer [blake2b.Size]byte + binary.LittleEndian.PutUint32(buffer[:4], uint32(len(out))) + b2.Write(buffer[:4]) + b2.Write(in) + + if len(out) <= blake2b.Size { + b2.Sum(out[:0]) + return + } + + outLen := len(out) + b2.Sum(buffer[:0]) + b2.Reset() + copy(out, buffer[:32]) + out = out[32:] + for len(out) > blake2b.Size { + b2.Write(buffer[:]) + b2.Sum(buffer[:0]) + copy(out, buffer[:32]) + out = out[32:] + b2.Reset() + } + + if outLen%blake2b.Size > 0 { // outLen > 64 + r := ((outLen + 31) / 32) - 2 // ⌈τ /32⌉-2 + b2, _ = blake2b.New(outLen-32*r, nil) + } + b2.Write(buffer[:]) + b2.Sum(out[:0]) +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.go b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go new file mode 100644 index 000000000..063e7784f --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go @@ -0,0 +1,60 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego + +package argon2 + +import "golang.org/x/sys/cpu" + +func init() { + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func mixBlocksSSE2(out, a, b, c *block) + +//go:noescape +func xorBlocksSSE2(out, a, b, c *block) + +//go:noescape +func blamkaSSE4(b *block) + +func processBlockSSE(out, in1, in2 *block, xor bool) { + var t block + mixBlocksSSE2(&t, in1, in2, &t) + if useSSE4 { + blamkaSSE4(&t) + } else { + for i := 0; i < blockLength; i += 16 { + blamkaGeneric( + &t[i+0], &t[i+1], &t[i+2], &t[i+3], + &t[i+4], &t[i+5], &t[i+6], &t[i+7], + &t[i+8], &t[i+9], &t[i+10], &t[i+11], + &t[i+12], &t[i+13], &t[i+14], &t[i+15], + ) + } + for i := 0; i < blockLength/8; i += 2 { + blamkaGeneric( + &t[i], &t[i+1], &t[16+i], &t[16+i+1], + &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], + &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], + &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], + ) + } + } + if xor { + xorBlocksSSE2(out, in1, in2, &t) + } else { + mixBlocksSSE2(out, in1, in2, &t) + } +} + +func processBlock(out, in1, in2 *block) { + processBlockSSE(out, in1, in2, false) +} + +func processBlockXOR(out, in1, in2 *block) { + processBlockSSE(out, in1, in2, true) +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s new file mode 100644 index 000000000..c3895478e --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s @@ -0,0 +1,2791 @@ +// Code generated by command: go run blamka_amd64.go -out ../blamka_amd64.s -pkg argon2. DO NOT EDIT. + +//go:build amd64 && gc && !purego + +#include "textflag.h" + +// func blamkaSSE4(b *block) +// Requires: SSE2, SSSE3 +TEXT ·blamkaSSE4(SB), NOSPLIT, $0-8 + MOVQ b+0(FP), AX + MOVOU ·c40<>+0(SB), X10 + MOVOU ·c48<>+0(SB), X11 + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + MOVOU 64(AX), X4 + MOVOU 80(AX), X5 + MOVOU 96(AX), X6 + MOVOU 112(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, 32(AX) + MOVOU X3, 48(AX) + MOVOU X4, 64(AX) + MOVOU X5, 80(AX) + MOVOU X6, 96(AX) + MOVOU X7, 112(AX) + MOVOU 128(AX), X0 + MOVOU 144(AX), X1 + MOVOU 160(AX), X2 + MOVOU 176(AX), X3 + MOVOU 192(AX), X4 + MOVOU 208(AX), X5 + MOVOU 224(AX), X6 + MOVOU 240(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 128(AX) + MOVOU X1, 144(AX) + MOVOU X2, 160(AX) + MOVOU X3, 176(AX) + MOVOU X4, 192(AX) + MOVOU X5, 208(AX) + MOVOU X6, 224(AX) + MOVOU X7, 240(AX) + MOVOU 256(AX), X0 + MOVOU 272(AX), X1 + MOVOU 288(AX), X2 + MOVOU 304(AX), X3 + MOVOU 320(AX), X4 + MOVOU 336(AX), X5 + MOVOU 352(AX), X6 + MOVOU 368(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 256(AX) + MOVOU X1, 272(AX) + MOVOU X2, 288(AX) + MOVOU X3, 304(AX) + MOVOU X4, 320(AX) + MOVOU X5, 336(AX) + MOVOU X6, 352(AX) + MOVOU X7, 368(AX) + MOVOU 384(AX), X0 + MOVOU 400(AX), X1 + MOVOU 416(AX), X2 + MOVOU 432(AX), X3 + MOVOU 448(AX), X4 + MOVOU 464(AX), X5 + MOVOU 480(AX), X6 + MOVOU 496(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 384(AX) + MOVOU X1, 400(AX) + MOVOU X2, 416(AX) + MOVOU X3, 432(AX) + MOVOU X4, 448(AX) + MOVOU X5, 464(AX) + MOVOU X6, 480(AX) + MOVOU X7, 496(AX) + MOVOU 512(AX), X0 + MOVOU 528(AX), X1 + MOVOU 544(AX), X2 + MOVOU 560(AX), X3 + MOVOU 576(AX), X4 + MOVOU 592(AX), X5 + MOVOU 608(AX), X6 + MOVOU 624(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 512(AX) + MOVOU X1, 528(AX) + MOVOU X2, 544(AX) + MOVOU X3, 560(AX) + MOVOU X4, 576(AX) + MOVOU X5, 592(AX) + MOVOU X6, 608(AX) + MOVOU X7, 624(AX) + MOVOU 640(AX), X0 + MOVOU 656(AX), X1 + MOVOU 672(AX), X2 + MOVOU 688(AX), X3 + MOVOU 704(AX), X4 + MOVOU 720(AX), X5 + MOVOU 736(AX), X6 + MOVOU 752(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 640(AX) + MOVOU X1, 656(AX) + MOVOU X2, 672(AX) + MOVOU X3, 688(AX) + MOVOU X4, 704(AX) + MOVOU X5, 720(AX) + MOVOU X6, 736(AX) + MOVOU X7, 752(AX) + MOVOU 768(AX), X0 + MOVOU 784(AX), X1 + MOVOU 800(AX), X2 + MOVOU 816(AX), X3 + MOVOU 832(AX), X4 + MOVOU 848(AX), X5 + MOVOU 864(AX), X6 + MOVOU 880(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 768(AX) + MOVOU X1, 784(AX) + MOVOU X2, 800(AX) + MOVOU X3, 816(AX) + MOVOU X4, 832(AX) + MOVOU X5, 848(AX) + MOVOU X6, 864(AX) + MOVOU X7, 880(AX) + MOVOU 896(AX), X0 + MOVOU 912(AX), X1 + MOVOU 928(AX), X2 + MOVOU 944(AX), X3 + MOVOU 960(AX), X4 + MOVOU 976(AX), X5 + MOVOU 992(AX), X6 + MOVOU 1008(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 896(AX) + MOVOU X1, 912(AX) + MOVOU X2, 928(AX) + MOVOU X3, 944(AX) + MOVOU X4, 960(AX) + MOVOU X5, 976(AX) + MOVOU X6, 992(AX) + MOVOU X7, 1008(AX) + MOVOU (AX), X0 + MOVOU 128(AX), X1 + MOVOU 256(AX), X2 + MOVOU 384(AX), X3 + MOVOU 512(AX), X4 + MOVOU 640(AX), X5 + MOVOU 768(AX), X6 + MOVOU 896(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, (AX) + MOVOU X1, 128(AX) + MOVOU X2, 256(AX) + MOVOU X3, 384(AX) + MOVOU X4, 512(AX) + MOVOU X5, 640(AX) + MOVOU X6, 768(AX) + MOVOU X7, 896(AX) + MOVOU 16(AX), X0 + MOVOU 144(AX), X1 + MOVOU 272(AX), X2 + MOVOU 400(AX), X3 + MOVOU 528(AX), X4 + MOVOU 656(AX), X5 + MOVOU 784(AX), X6 + MOVOU 912(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 16(AX) + MOVOU X1, 144(AX) + MOVOU X2, 272(AX) + MOVOU X3, 400(AX) + MOVOU X4, 528(AX) + MOVOU X5, 656(AX) + MOVOU X6, 784(AX) + MOVOU X7, 912(AX) + MOVOU 32(AX), X0 + MOVOU 160(AX), X1 + MOVOU 288(AX), X2 + MOVOU 416(AX), X3 + MOVOU 544(AX), X4 + MOVOU 672(AX), X5 + MOVOU 800(AX), X6 + MOVOU 928(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 32(AX) + MOVOU X1, 160(AX) + MOVOU X2, 288(AX) + MOVOU X3, 416(AX) + MOVOU X4, 544(AX) + MOVOU X5, 672(AX) + MOVOU X6, 800(AX) + MOVOU X7, 928(AX) + MOVOU 48(AX), X0 + MOVOU 176(AX), X1 + MOVOU 304(AX), X2 + MOVOU 432(AX), X3 + MOVOU 560(AX), X4 + MOVOU 688(AX), X5 + MOVOU 816(AX), X6 + MOVOU 944(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 48(AX) + MOVOU X1, 176(AX) + MOVOU X2, 304(AX) + MOVOU X3, 432(AX) + MOVOU X4, 560(AX) + MOVOU X5, 688(AX) + MOVOU X6, 816(AX) + MOVOU X7, 944(AX) + MOVOU 64(AX), X0 + MOVOU 192(AX), X1 + MOVOU 320(AX), X2 + MOVOU 448(AX), X3 + MOVOU 576(AX), X4 + MOVOU 704(AX), X5 + MOVOU 832(AX), X6 + MOVOU 960(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 64(AX) + MOVOU X1, 192(AX) + MOVOU X2, 320(AX) + MOVOU X3, 448(AX) + MOVOU X4, 576(AX) + MOVOU X5, 704(AX) + MOVOU X6, 832(AX) + MOVOU X7, 960(AX) + MOVOU 80(AX), X0 + MOVOU 208(AX), X1 + MOVOU 336(AX), X2 + MOVOU 464(AX), X3 + MOVOU 592(AX), X4 + MOVOU 720(AX), X5 + MOVOU 848(AX), X6 + MOVOU 976(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 80(AX) + MOVOU X1, 208(AX) + MOVOU X2, 336(AX) + MOVOU X3, 464(AX) + MOVOU X4, 592(AX) + MOVOU X5, 720(AX) + MOVOU X6, 848(AX) + MOVOU X7, 976(AX) + MOVOU 96(AX), X0 + MOVOU 224(AX), X1 + MOVOU 352(AX), X2 + MOVOU 480(AX), X3 + MOVOU 608(AX), X4 + MOVOU 736(AX), X5 + MOVOU 864(AX), X6 + MOVOU 992(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 96(AX) + MOVOU X1, 224(AX) + MOVOU X2, 352(AX) + MOVOU X3, 480(AX) + MOVOU X4, 608(AX) + MOVOU X5, 736(AX) + MOVOU X6, 864(AX) + MOVOU X7, 992(AX) + MOVOU 112(AX), X0 + MOVOU 240(AX), X1 + MOVOU 368(AX), X2 + MOVOU 496(AX), X3 + MOVOU 624(AX), X4 + MOVOU 752(AX), X5 + MOVOU 880(AX), X6 + MOVOU 1008(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 112(AX) + MOVOU X1, 240(AX) + MOVOU X2, 368(AX) + MOVOU X3, 496(AX) + MOVOU X4, 624(AX) + MOVOU X5, 752(AX) + MOVOU X6, 880(AX) + MOVOU X7, 1008(AX) + RET + +DATA ·c40<>+0(SB)/8, $0x0201000706050403 +DATA ·c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), RODATA|NOPTR, $16 + +DATA ·c48<>+0(SB)/8, $0x0100070605040302 +DATA ·c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), RODATA|NOPTR, $16 + +// func mixBlocksSSE2(out *block, a *block, b *block, c *block) +// Requires: SSE2 +TEXT ·mixBlocksSSE2(SB), NOSPLIT, $0-32 + MOVQ out+0(FP), DX + MOVQ a+8(FP), AX + MOVQ b+16(FP), BX + MOVQ c+24(FP), CX + MOVQ $0x00000080, DI + +loop: + MOVOU (AX), X0 + MOVOU (BX), X1 + MOVOU (CX), X2 + PXOR X1, X0 + PXOR X2, X0 + MOVOU X0, (DX) + ADDQ $0x10, AX + ADDQ $0x10, BX + ADDQ $0x10, CX + ADDQ $0x10, DX + SUBQ $0x02, DI + JA loop + RET + +// func xorBlocksSSE2(out *block, a *block, b *block, c *block) +// Requires: SSE2 +TEXT ·xorBlocksSSE2(SB), NOSPLIT, $0-32 + MOVQ out+0(FP), DX + MOVQ a+8(FP), AX + MOVQ b+16(FP), BX + MOVQ c+24(FP), CX + MOVQ $0x00000080, DI + +loop: + MOVOU (AX), X0 + MOVOU (BX), X1 + MOVOU (CX), X2 + MOVOU (DX), X3 + PXOR X1, X0 + PXOR X2, X0 + PXOR X3, X0 + MOVOU X0, (DX) + ADDQ $0x10, AX + ADDQ $0x10, BX + ADDQ $0x10, CX + ADDQ $0x10, DX + SUBQ $0x02, DI + JA loop + RET diff --git a/vendor/golang.org/x/crypto/argon2/blamka_generic.go b/vendor/golang.org/x/crypto/argon2/blamka_generic.go new file mode 100644 index 000000000..a481b2243 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_generic.go @@ -0,0 +1,163 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package argon2 + +var useSSE4 bool + +func processBlockGeneric(out, in1, in2 *block, xor bool) { + var t block + for i := range t { + t[i] = in1[i] ^ in2[i] + } + for i := 0; i < blockLength; i += 16 { + blamkaGeneric( + &t[i+0], &t[i+1], &t[i+2], &t[i+3], + &t[i+4], &t[i+5], &t[i+6], &t[i+7], + &t[i+8], &t[i+9], &t[i+10], &t[i+11], + &t[i+12], &t[i+13], &t[i+14], &t[i+15], + ) + } + for i := 0; i < blockLength/8; i += 2 { + blamkaGeneric( + &t[i], &t[i+1], &t[16+i], &t[16+i+1], + &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], + &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], + &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], + ) + } + if xor { + for i := range t { + out[i] ^= in1[i] ^ in2[i] ^ t[i] + } + } else { + for i := range t { + out[i] = in1[i] ^ in2[i] ^ t[i] + } + } +} + +func blamkaGeneric(t00, t01, t02, t03, t04, t05, t06, t07, t08, t09, t10, t11, t12, t13, t14, t15 *uint64) { + v00, v01, v02, v03 := *t00, *t01, *t02, *t03 + v04, v05, v06, v07 := *t04, *t05, *t06, *t07 + v08, v09, v10, v11 := *t08, *t09, *t10, *t11 + v12, v13, v14, v15 := *t12, *t13, *t14, *t15 + + v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) + v12 ^= v00 + v12 = v12>>32 | v12<<32 + v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) + v04 ^= v08 + v04 = v04>>24 | v04<<40 + + v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) + v12 ^= v00 + v12 = v12>>16 | v12<<48 + v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) + v04 ^= v08 + v04 = v04>>63 | v04<<1 + + v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) + v13 ^= v01 + v13 = v13>>32 | v13<<32 + v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) + v05 ^= v09 + v05 = v05>>24 | v05<<40 + + v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) + v13 ^= v01 + v13 = v13>>16 | v13<<48 + v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) + v05 ^= v09 + v05 = v05>>63 | v05<<1 + + v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) + v14 ^= v02 + v14 = v14>>32 | v14<<32 + v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) + v06 ^= v10 + v06 = v06>>24 | v06<<40 + + v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) + v14 ^= v02 + v14 = v14>>16 | v14<<48 + v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) + v06 ^= v10 + v06 = v06>>63 | v06<<1 + + v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) + v15 ^= v03 + v15 = v15>>32 | v15<<32 + v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) + v07 ^= v11 + v07 = v07>>24 | v07<<40 + + v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) + v15 ^= v03 + v15 = v15>>16 | v15<<48 + v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) + v07 ^= v11 + v07 = v07>>63 | v07<<1 + + v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) + v15 ^= v00 + v15 = v15>>32 | v15<<32 + v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) + v05 ^= v10 + v05 = v05>>24 | v05<<40 + + v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) + v15 ^= v00 + v15 = v15>>16 | v15<<48 + v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) + v05 ^= v10 + v05 = v05>>63 | v05<<1 + + v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) + v12 ^= v01 + v12 = v12>>32 | v12<<32 + v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) + v06 ^= v11 + v06 = v06>>24 | v06<<40 + + v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) + v12 ^= v01 + v12 = v12>>16 | v12<<48 + v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) + v06 ^= v11 + v06 = v06>>63 | v06<<1 + + v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) + v13 ^= v02 + v13 = v13>>32 | v13<<32 + v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) + v07 ^= v08 + v07 = v07>>24 | v07<<40 + + v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) + v13 ^= v02 + v13 = v13>>16 | v13<<48 + v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) + v07 ^= v08 + v07 = v07>>63 | v07<<1 + + v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) + v14 ^= v03 + v14 = v14>>32 | v14<<32 + v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) + v04 ^= v09 + v04 = v04>>24 | v04<<40 + + v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) + v14 ^= v03 + v14 = v14>>16 | v14<<48 + v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) + v04 ^= v09 + v04 = v04>>63 | v04<<1 + + *t00, *t01, *t02, *t03 = v00, v01, v02, v03 + *t04, *t05, *t06, *t07 = v04, v05, v06, v07 + *t08, *t09, *t10, *t11 = v08, v09, v10, v11 + *t12, *t13, *t14, *t15 = v12, v13, v14, v15 +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_ref.go b/vendor/golang.org/x/crypto/argon2/blamka_ref.go new file mode 100644 index 000000000..16d58c650 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_ref.go @@ -0,0 +1,15 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || purego || !gc + +package argon2 + +func processBlock(out, in1, in2 *block) { + processBlockGeneric(out, in1, in2, false) +} + +func processBlockXOR(out, in1, in2 *block) { + processBlockGeneric(out, in1, in2, true) +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b.go b/vendor/golang.org/x/crypto/blake2b/blake2b.go new file mode 100644 index 000000000..d2e98d429 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b.go @@ -0,0 +1,291 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blake2b implements the BLAKE2b hash algorithm defined by RFC 7693 +// and the extendable output function (XOF) BLAKE2Xb. +// +// BLAKE2b is optimized for 64-bit platforms—including NEON-enabled ARMs—and +// produces digests of any size between 1 and 64 bytes. +// For a detailed specification of BLAKE2b see https://blake2.net/blake2.pdf +// and for BLAKE2Xb see https://blake2.net/blake2x.pdf +// +// If you aren't sure which function you need, use BLAKE2b (Sum512 or New512). +// If you need a secret-key MAC (message authentication code), use the New512 +// function with a non-nil key. +// +// BLAKE2X is a construction to compute hash values larger than 64 bytes. It +// can produce hash values between 0 and 4 GiB. +package blake2b + +import ( + "encoding/binary" + "errors" + "hash" +) + +const ( + // The blocksize of BLAKE2b in bytes. + BlockSize = 128 + // The hash size of BLAKE2b-512 in bytes. + Size = 64 + // The hash size of BLAKE2b-384 in bytes. + Size384 = 48 + // The hash size of BLAKE2b-256 in bytes. + Size256 = 32 +) + +var ( + useAVX2 bool + useAVX bool + useSSE4 bool +) + +var ( + errKeySize = errors.New("blake2b: invalid key size") + errHashSize = errors.New("blake2b: invalid hash size") +) + +var iv = [8]uint64{ + 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, + 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179, +} + +// Sum512 returns the BLAKE2b-512 checksum of the data. +func Sum512(data []byte) [Size]byte { + var sum [Size]byte + checkSum(&sum, Size, data) + return sum +} + +// Sum384 returns the BLAKE2b-384 checksum of the data. +func Sum384(data []byte) [Size384]byte { + var sum [Size]byte + var sum384 [Size384]byte + checkSum(&sum, Size384, data) + copy(sum384[:], sum[:Size384]) + return sum384 +} + +// Sum256 returns the BLAKE2b-256 checksum of the data. +func Sum256(data []byte) [Size256]byte { + var sum [Size]byte + var sum256 [Size256]byte + checkSum(&sum, Size256, data) + copy(sum256[:], sum[:Size256]) + return sum256 +} + +// New512 returns a new hash.Hash computing the BLAKE2b-512 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New512(key []byte) (hash.Hash, error) { return newDigest(Size, key) } + +// New384 returns a new hash.Hash computing the BLAKE2b-384 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New384(key []byte) (hash.Hash, error) { return newDigest(Size384, key) } + +// New256 returns a new hash.Hash computing the BLAKE2b-256 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New256(key []byte) (hash.Hash, error) { return newDigest(Size256, key) } + +// New returns a new hash.Hash computing the BLAKE2b checksum with a custom length. +// A non-nil key turns the hash into a MAC. The key must be between zero and 64 bytes long. +// The hash size can be a value between 1 and 64 but it is highly recommended to use +// values equal or greater than: +// - 32 if BLAKE2b is used as a hash function (The key is zero bytes long). +// - 16 if BLAKE2b is used as a MAC function (The key is at least 16 bytes long). +// When the key is nil, the returned hash.Hash implements BinaryMarshaler +// and BinaryUnmarshaler for state (de)serialization as documented by hash.Hash. +func New(size int, key []byte) (hash.Hash, error) { return newDigest(size, key) } + +func newDigest(hashSize int, key []byte) (*digest, error) { + if hashSize < 1 || hashSize > Size { + return nil, errHashSize + } + if len(key) > Size { + return nil, errKeySize + } + d := &digest{ + size: hashSize, + keyLen: len(key), + } + copy(d.key[:], key) + d.Reset() + return d, nil +} + +func checkSum(sum *[Size]byte, hashSize int, data []byte) { + h := iv + h[0] ^= uint64(hashSize) | (1 << 16) | (1 << 24) + var c [2]uint64 + + if length := len(data); length > BlockSize { + n := length &^ (BlockSize - 1) + if length == n { + n -= BlockSize + } + hashBlocks(&h, &c, 0, data[:n]) + data = data[n:] + } + + var block [BlockSize]byte + offset := copy(block[:], data) + remaining := uint64(BlockSize - offset) + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + for i, v := range h[:(hashSize+7)/8] { + binary.LittleEndian.PutUint64(sum[8*i:], v) + } +} + +type digest struct { + h [8]uint64 + c [2]uint64 + size int + block [BlockSize]byte + offset int + + key [BlockSize]byte + keyLen int +} + +const ( + magic = "b2b" + marshaledSize = len(magic) + 8*8 + 2*8 + 1 + BlockSize + 1 +) + +func (d *digest) MarshalBinary() ([]byte, error) { + if d.keyLen != 0 { + return nil, errors.New("crypto/blake2b: cannot marshal MACs") + } + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + for i := 0; i < 8; i++ { + b = appendUint64(b, d.h[i]) + } + b = appendUint64(b, d.c[0]) + b = appendUint64(b, d.c[1]) + // Maximum value for size is 64 + b = append(b, byte(d.size)) + b = append(b, d.block[:]...) + b = append(b, byte(d.offset)) + return b, nil +} + +func (d *digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("crypto/blake2b: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("crypto/blake2b: invalid hash state size") + } + b = b[len(magic):] + for i := 0; i < 8; i++ { + b, d.h[i] = consumeUint64(b) + } + b, d.c[0] = consumeUint64(b) + b, d.c[1] = consumeUint64(b) + d.size = int(b[0]) + b = b[1:] + copy(d.block[:], b[:BlockSize]) + b = b[BlockSize:] + d.offset = int(b[0]) + return nil +} + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Size() int { return d.size } + +func (d *digest) Reset() { + d.h = iv + d.h[0] ^= uint64(d.size) | (uint64(d.keyLen) << 8) | (1 << 16) | (1 << 24) + d.offset, d.c[0], d.c[1] = 0, 0, 0 + if d.keyLen > 0 { + d.block = d.key + d.offset = BlockSize + } +} + +func (d *digest) Write(p []byte) (n int, err error) { + n = len(p) + + if d.offset > 0 { + remaining := BlockSize - d.offset + if n <= remaining { + d.offset += copy(d.block[d.offset:], p) + return + } + copy(d.block[d.offset:], p[:remaining]) + hashBlocks(&d.h, &d.c, 0, d.block[:]) + d.offset = 0 + p = p[remaining:] + } + + if length := len(p); length > BlockSize { + nn := length &^ (BlockSize - 1) + if length == nn { + nn -= BlockSize + } + hashBlocks(&d.h, &d.c, 0, p[:nn]) + p = p[nn:] + } + + if len(p) > 0 { + d.offset += copy(d.block[:], p) + } + + return +} + +func (d *digest) Sum(sum []byte) []byte { + var hash [Size]byte + d.finalize(&hash) + return append(sum, hash[:d.size]...) +} + +func (d *digest) finalize(hash *[Size]byte) { + var block [BlockSize]byte + copy(block[:], d.block[:d.offset]) + remaining := uint64(BlockSize - d.offset) + + c := d.c + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + h := d.h + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + for i, v := range h { + binary.LittleEndian.PutUint64(hash[8*i:], v) + } +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.BigEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func appendUint32(b []byte, x uint32) []byte { + var a [4]byte + binary.BigEndian.PutUint32(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := binary.BigEndian.Uint64(b) + return b[8:], x +} + +func consumeUint32(b []byte) ([]byte, uint32) { + x := binary.BigEndian.Uint32(b) + return b[4:], x +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go new file mode 100644 index 000000000..199c21d27 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go @@ -0,0 +1,37 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego + +package blake2b + +import "golang.org/x/sys/cpu" + +func init() { + useAVX2 = cpu.X86.HasAVX2 + useAVX = cpu.X86.HasAVX + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + switch { + case useAVX2: + hashBlocksAVX2(h, c, flag, blocks) + case useAVX: + hashBlocksAVX(h, c, flag, blocks) + case useSSE4: + hashBlocksSSE4(h, c, flag, blocks) + default: + hashBlocksGeneric(h, c, flag, blocks) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s new file mode 100644 index 000000000..f75162e03 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -0,0 +1,4559 @@ +// Code generated by command: go run blake2bAVX2_amd64_asm.go -out ../../blake2bAVX2_amd64.s -pkg blake2b. DO NOT EDIT. + +//go:build amd64 && gc && !purego + +#include "textflag.h" + +// func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +// Requires: AVX, AVX2 +TEXT ·hashBlocksAVX2(SB), NOSPLIT, $320-48 + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + MOVQ SP, DX + ADDQ $+31, DX + ANDQ $-32, DX + MOVQ CX, 16(DX) + XORQ CX, CX + MOVQ CX, 24(DX) + VMOVDQU ·AVX2_c40<>+0(SB), Y4 + VMOVDQU ·AVX2_c48<>+0(SB), Y5 + VMOVDQU (AX), Y8 + VMOVDQU 32(AX), Y9 + VMOVDQU ·AVX2_iv0<>+0(SB), Y6 + VMOVDQU ·AVX2_iv1<>+0(SB), Y7 + MOVQ (BX), R8 + MOVQ 8(BX), R9 + MOVQ R9, 8(DX) + +loop: + ADDQ $0x80, R8 + MOVQ R8, (DX) + CMPQ R8, $0x80 + JGE noinc + INCQ R9 + MOVQ R9, 8(DX) + +noinc: + VMOVDQA Y8, Y0 + VMOVDQA Y9, Y1 + VMOVDQA Y6, Y2 + VPXOR (DX), Y7, Y3 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x26 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x30 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x28 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x38 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x70 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VMOVDQA Y12, 32(DX) + VMOVDQA Y13, 64(DX) + VMOVDQA Y14, 96(DX) + VMOVDQA Y15, 128(DX) + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x48 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x78 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x30 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x58 + VPSHUFD $0x4e, (SI), X14 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x28 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VMOVDQA Y12, 160(DX) + VMOVDQA Y13, 192(DX) + VMOVDQA Y14, 224(DX) + VMOVDQA Y15, 256(DX) + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x28 + VMOVDQU 88(SI), X12 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x2e + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x48 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x20 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x58 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x70 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x1e + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x40 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x2e + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x30 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x40 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x60 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x1e + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x40 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x58 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x78 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x08 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x70 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x48 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x70 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x20 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x36 + VPSHUFD $0x4e, 64(SI), X11 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x30 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x58 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x48 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x40 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x10 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x3e + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x30 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x58 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x1e + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x78 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x18 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x40 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x08 + VMOVDQU 96(SI), X14 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x10 + VMOVDQU 32(SI), X11 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x38 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x08 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x10 + VPSHUFD $0x4e, 40(SI), X11 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x20 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x78 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x18 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x1e + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + VPADDQ 32(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 64(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ 96(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 128(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + VPADDQ 160(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 192(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ 224(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 256(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + VPXOR Y0, Y8, Y8 + VPXOR Y1, Y9, Y9 + VPXOR Y2, Y8, Y8 + VPXOR Y3, Y9, Y9 + LEAQ 128(SI), SI + SUBQ $0x80, DI + JNE loop + MOVQ R8, (BX) + MOVQ R9, 8(BX) + VMOVDQU Y8, (AX) + VMOVDQU Y9, 32(AX) + VZEROUPPER + RET + +DATA ·AVX2_c40<>+0(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +DATA ·AVX2_c40<>+16(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+24(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX2_c40<>(SB), RODATA|NOPTR, $32 + +DATA ·AVX2_c48<>+0(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +DATA ·AVX2_c48<>+16(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+24(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX2_c48<>(SB), RODATA|NOPTR, $32 + +DATA ·AVX2_iv0<>+0(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX2_iv0<>+8(SB)/8, $0xbb67ae8584caa73b +DATA ·AVX2_iv0<>+16(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX2_iv0<>+24(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX2_iv0<>(SB), RODATA|NOPTR, $32 + +DATA ·AVX2_iv1<>+0(SB)/8, $0x510e527fade682d1 +DATA ·AVX2_iv1<>+8(SB)/8, $0x9b05688c2b3e6c1f +DATA ·AVX2_iv1<>+16(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX2_iv1<>+24(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX2_iv1<>(SB), RODATA|NOPTR, $32 + +// func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +// Requires: AVX, SSE2 +TEXT ·hashBlocksAVX(SB), NOSPLIT, $288-48 + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + MOVQ SP, R10 + ADDQ $0x0f, R10 + ANDQ $-16, R10 + VMOVDQU ·AVX_c40<>+0(SB), X0 + VMOVDQU ·AVX_c48<>+0(SB), X1 + VMOVDQA X0, X8 + VMOVDQA X1, X9 + VMOVDQU ·AVX_iv3<>+0(SB), X0 + VMOVDQA X0, (R10) + XORQ CX, (R10) + VMOVDQU (AX), X10 + VMOVDQU 16(AX), X11 + VMOVDQU 32(AX), X2 + VMOVDQU 48(AX), X3 + MOVQ (BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $0x80, R8 + CMPQ R8, $0x80 + JGE noinc + INCQ R9 + +noinc: + BYTE $0xc4 + BYTE $0x41 + BYTE $0xf9 + BYTE $0x6e + BYTE $0xf8 + BYTE $0xc4 + BYTE $0x43 + BYTE $0x81 + BYTE $0x22 + BYTE $0xf9 + BYTE $0x01 + VMOVDQA X10, X0 + VMOVDQA X11, X1 + VMOVDQU ·AVX_iv0<>+0(SB), X4 + VMOVDQU ·AVX_iv1<>+0(SB), X5 + VMOVDQU ·AVX_iv2<>+0(SB), X6 + VPXOR X15, X6, X6 + VMOVDQA (R10), X7 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x26 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x28 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x38 + BYTE $0x01 + VMOVDQA X12, 16(R10) + VMOVDQA X13, 32(R10) + VMOVDQA X14, 48(R10) + VMOVDQA X15, 64(R10) + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x78 + BYTE $0x01 + VMOVDQA X12, 80(R10) + VMOVDQA X13, 96(R10) + VMOVDQA X14, 112(R10) + VMOVDQA X15, 128(R10) + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x78 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x68 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x30 + BYTE $0x01 + VMOVDQA X12, 144(R10) + VMOVDQA X13, 160(R10) + VMOVDQA X14, 176(R10) + VMOVDQA X15, 192(R10) + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VPSHUFD $0x4e, (SI), X12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x18 + BYTE $0x01 + VMOVDQA X12, 208(R10) + VMOVDQA X13, 224(R10) + VMOVDQA X14, 240(R10) + VMOVDQA X15, 256(R10) + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + VMOVDQU 88(SI), X12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x36 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x68 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x20 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x70 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x3e + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x40 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x36 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x78 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x40 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x60 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x68 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x2e + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x58 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x18 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x78 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x70 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x48 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x50 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + MOVQ (SI), X12 + VPSHUFD $0x4e, 64(SI), X13 + MOVQ 56(SI), X14 + MOVQ 16(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x58 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x48 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + MOVQ 40(SI), X12 + MOVQ 64(SI), X13 + MOVQ (SI), X14 + MOVQ 48(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x50 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + MOVQ 48(SI), X12 + MOVQ 88(SI), X13 + MOVQ 120(SI), X14 + MOVQ 24(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x2e + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x40 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VMOVDQU 96(SI), X12 + MOVQ 8(SI), X13 + MOVQ 16(SI), X14 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x38 + BYTE $0x01 + VMOVDQU 32(SI), X15 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x30 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x28 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + MOVQ 120(SI), X12 + MOVQ 24(SI), X13 + MOVQ 88(SI), X14 + MOVQ 96(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x68 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x3e + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + VPADDQ 16(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 32(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 48(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 64(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VPADDQ 80(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 96(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 112(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 128(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + VPADDQ 144(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 160(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 176(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 192(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VPADDQ 208(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 224(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 240(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 256(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + VMOVDQU 32(AX), X14 + VMOVDQU 48(AX), X15 + VPXOR X0, X10, X10 + VPXOR X1, X11, X11 + VPXOR X2, X14, X14 + VPXOR X3, X15, X15 + VPXOR X4, X10, X10 + VPXOR X5, X11, X11 + VPXOR X6, X14, X2 + VPXOR X7, X15, X3 + VMOVDQU X2, 32(AX) + VMOVDQU X3, 48(AX) + LEAQ 128(SI), SI + SUBQ $0x80, DI + JNE loop + VMOVDQU X10, (AX) + VMOVDQU X11, 16(AX) + MOVQ R8, (BX) + MOVQ R9, 8(BX) + VZEROUPPER + RET + +DATA ·AVX_c40<>+0(SB)/8, $0x0201000706050403 +DATA ·AVX_c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX_c40<>(SB), RODATA|NOPTR, $16 + +DATA ·AVX_c48<>+0(SB)/8, $0x0100070605040302 +DATA ·AVX_c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX_c48<>(SB), RODATA|NOPTR, $16 + +DATA ·AVX_iv3<>+0(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX_iv3<>+8(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX_iv3<>(SB), RODATA|NOPTR, $16 + +DATA ·AVX_iv0<>+0(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX_iv0<>+8(SB)/8, $0xbb67ae8584caa73b +GLOBL ·AVX_iv0<>(SB), RODATA|NOPTR, $16 + +DATA ·AVX_iv1<>+0(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX_iv1<>+8(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX_iv1<>(SB), RODATA|NOPTR, $16 + +DATA ·AVX_iv2<>+0(SB)/8, $0x510e527fade682d1 +DATA ·AVX_iv2<>+8(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·AVX_iv2<>(SB), RODATA|NOPTR, $16 diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s new file mode 100644 index 000000000..9a0ce2124 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s @@ -0,0 +1,1441 @@ +// Code generated by command: go run blake2b_amd64_asm.go -out ../../blake2b_amd64.s -pkg blake2b. DO NOT EDIT. + +//go:build amd64 && gc && !purego + +#include "textflag.h" + +// func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +// Requires: SSE2, SSE4.1, SSSE3 +TEXT ·hashBlocksSSE4(SB), NOSPLIT, $288-48 + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + MOVQ SP, R10 + ADDQ $0x0f, R10 + ANDQ $-16, R10 + MOVOU ·iv3<>+0(SB), X0 + MOVO X0, (R10) + XORQ CX, (R10) + MOVOU ·c40<>+0(SB), X13 + MOVOU ·c48<>+0(SB), X14 + MOVOU (AX), X12 + MOVOU 16(AX), X15 + MOVQ (BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $0x80, R8 + CMPQ R8, $0x80 + JGE noinc + INCQ R9 + +noinc: + MOVQ R8, X8 + PINSRQ $0x01, R9, X8 + MOVO X12, X0 + MOVO X15, X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + MOVOU ·iv0<>+0(SB), X4 + MOVOU ·iv1<>+0(SB), X5 + MOVOU ·iv2<>+0(SB), X6 + PXOR X8, X6 + MOVO (R10), X7 + MOVQ (SI), X8 + PINSRQ $0x01, 16(SI), X8 + MOVQ 32(SI), X9 + PINSRQ $0x01, 48(SI), X9 + MOVQ 8(SI), X10 + PINSRQ $0x01, 24(SI), X10 + MOVQ 40(SI), X11 + PINSRQ $0x01, 56(SI), X11 + MOVO X8, 16(R10) + MOVO X9, 32(R10) + MOVO X10, 48(R10) + MOVO X11, 64(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 64(SI), X8 + PINSRQ $0x01, 80(SI), X8 + MOVQ 96(SI), X9 + PINSRQ $0x01, 112(SI), X9 + MOVQ 72(SI), X10 + PINSRQ $0x01, 88(SI), X10 + MOVQ 104(SI), X11 + PINSRQ $0x01, 120(SI), X11 + MOVO X8, 80(R10) + MOVO X9, 96(R10) + MOVO X10, 112(R10) + MOVO X11, 128(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 112(SI), X8 + PINSRQ $0x01, 32(SI), X8 + MOVQ 72(SI), X9 + PINSRQ $0x01, 104(SI), X9 + MOVQ 80(SI), X10 + PINSRQ $0x01, 64(SI), X10 + MOVQ 120(SI), X11 + PINSRQ $0x01, 48(SI), X11 + MOVO X8, 144(R10) + MOVO X9, 160(R10) + MOVO X10, 176(R10) + MOVO X11, 192(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 8(SI), X8 + PINSRQ $0x01, (SI), X8 + MOVQ 88(SI), X9 + PINSRQ $0x01, 40(SI), X9 + MOVQ 96(SI), X10 + PINSRQ $0x01, 16(SI), X10 + MOVQ 56(SI), X11 + PINSRQ $0x01, 24(SI), X11 + MOVO X8, 208(R10) + MOVO X9, 224(R10) + MOVO X10, 240(R10) + MOVO X11, 256(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 88(SI), X8 + PINSRQ $0x01, 96(SI), X8 + MOVQ 40(SI), X9 + PINSRQ $0x01, 120(SI), X9 + MOVQ 64(SI), X10 + PINSRQ $0x01, (SI), X10 + MOVQ 16(SI), X11 + PINSRQ $0x01, 104(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 80(SI), X8 + PINSRQ $0x01, 24(SI), X8 + MOVQ 56(SI), X9 + PINSRQ $0x01, 72(SI), X9 + MOVQ 112(SI), X10 + PINSRQ $0x01, 48(SI), X10 + MOVQ 8(SI), X11 + PINSRQ $0x01, 32(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 56(SI), X8 + PINSRQ $0x01, 24(SI), X8 + MOVQ 104(SI), X9 + PINSRQ $0x01, 88(SI), X9 + MOVQ 72(SI), X10 + PINSRQ $0x01, 8(SI), X10 + MOVQ 96(SI), X11 + PINSRQ $0x01, 112(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 16(SI), X8 + PINSRQ $0x01, 40(SI), X8 + MOVQ 32(SI), X9 + PINSRQ $0x01, 120(SI), X9 + MOVQ 48(SI), X10 + PINSRQ $0x01, 80(SI), X10 + MOVQ (SI), X11 + PINSRQ $0x01, 64(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 72(SI), X8 + PINSRQ $0x01, 40(SI), X8 + MOVQ 16(SI), X9 + PINSRQ $0x01, 80(SI), X9 + MOVQ (SI), X10 + PINSRQ $0x01, 56(SI), X10 + MOVQ 32(SI), X11 + PINSRQ $0x01, 120(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 112(SI), X8 + PINSRQ $0x01, 88(SI), X8 + MOVQ 48(SI), X9 + PINSRQ $0x01, 24(SI), X9 + MOVQ 8(SI), X10 + PINSRQ $0x01, 96(SI), X10 + MOVQ 64(SI), X11 + PINSRQ $0x01, 104(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 16(SI), X8 + PINSRQ $0x01, 48(SI), X8 + MOVQ (SI), X9 + PINSRQ $0x01, 64(SI), X9 + MOVQ 96(SI), X10 + PINSRQ $0x01, 80(SI), X10 + MOVQ 88(SI), X11 + PINSRQ $0x01, 24(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 32(SI), X8 + PINSRQ $0x01, 56(SI), X8 + MOVQ 120(SI), X9 + PINSRQ $0x01, 8(SI), X9 + MOVQ 104(SI), X10 + PINSRQ $0x01, 40(SI), X10 + MOVQ 112(SI), X11 + PINSRQ $0x01, 72(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 96(SI), X8 + PINSRQ $0x01, 8(SI), X8 + MOVQ 112(SI), X9 + PINSRQ $0x01, 32(SI), X9 + MOVQ 40(SI), X10 + PINSRQ $0x01, 120(SI), X10 + MOVQ 104(SI), X11 + PINSRQ $0x01, 80(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ (SI), X8 + PINSRQ $0x01, 48(SI), X8 + MOVQ 72(SI), X9 + PINSRQ $0x01, 64(SI), X9 + MOVQ 56(SI), X10 + PINSRQ $0x01, 24(SI), X10 + MOVQ 16(SI), X11 + PINSRQ $0x01, 88(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 104(SI), X8 + PINSRQ $0x01, 56(SI), X8 + MOVQ 96(SI), X9 + PINSRQ $0x01, 24(SI), X9 + MOVQ 88(SI), X10 + PINSRQ $0x01, 112(SI), X10 + MOVQ 8(SI), X11 + PINSRQ $0x01, 72(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 40(SI), X8 + PINSRQ $0x01, 120(SI), X8 + MOVQ 64(SI), X9 + PINSRQ $0x01, 16(SI), X9 + MOVQ (SI), X10 + PINSRQ $0x01, 32(SI), X10 + MOVQ 48(SI), X11 + PINSRQ $0x01, 80(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 48(SI), X8 + PINSRQ $0x01, 112(SI), X8 + MOVQ 88(SI), X9 + PINSRQ $0x01, (SI), X9 + MOVQ 120(SI), X10 + PINSRQ $0x01, 72(SI), X10 + MOVQ 24(SI), X11 + PINSRQ $0x01, 64(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 96(SI), X8 + PINSRQ $0x01, 104(SI), X8 + MOVQ 8(SI), X9 + PINSRQ $0x01, 80(SI), X9 + MOVQ 16(SI), X10 + PINSRQ $0x01, 56(SI), X10 + MOVQ 32(SI), X11 + PINSRQ $0x01, 40(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 80(SI), X8 + PINSRQ $0x01, 64(SI), X8 + MOVQ 56(SI), X9 + PINSRQ $0x01, 8(SI), X9 + MOVQ 16(SI), X10 + PINSRQ $0x01, 32(SI), X10 + MOVQ 48(SI), X11 + PINSRQ $0x01, 40(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 120(SI), X8 + PINSRQ $0x01, 72(SI), X8 + MOVQ 24(SI), X9 + PINSRQ $0x01, 104(SI), X9 + MOVQ 88(SI), X10 + PINSRQ $0x01, 112(SI), X10 + MOVQ 96(SI), X11 + PINSRQ $0x01, (SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + PADDQ 16(R10), X0 + PADDQ 32(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 48(R10), X0 + PADDQ 64(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + PADDQ 80(R10), X0 + PADDQ 96(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 112(R10), X0 + PADDQ 128(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + PADDQ 144(R10), X0 + PADDQ 160(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 176(R10), X0 + PADDQ 192(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + PADDQ 208(R10), X0 + PADDQ 224(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 240(R10), X0 + PADDQ 256(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU 32(AX), X10 + MOVOU 48(AX), X11 + PXOR X0, X12 + PXOR X1, X15 + PXOR X2, X10 + PXOR X3, X11 + PXOR X4, X12 + PXOR X5, X15 + PXOR X6, X10 + PXOR X7, X11 + MOVOU X10, 32(AX) + MOVOU X11, 48(AX) + LEAQ 128(SI), SI + SUBQ $0x80, DI + JNE loop + MOVOU X12, (AX) + MOVOU X15, 16(AX) + MOVQ R8, (BX) + MOVQ R9, 8(BX) + RET + +DATA ·iv3<>+0(SB)/8, $0x1f83d9abfb41bd6b +DATA ·iv3<>+8(SB)/8, $0x5be0cd19137e2179 +GLOBL ·iv3<>(SB), RODATA|NOPTR, $16 + +DATA ·c40<>+0(SB)/8, $0x0201000706050403 +DATA ·c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), RODATA|NOPTR, $16 + +DATA ·c48<>+0(SB)/8, $0x0100070605040302 +DATA ·c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), RODATA|NOPTR, $16 + +DATA ·iv0<>+0(SB)/8, $0x6a09e667f3bcc908 +DATA ·iv0<>+8(SB)/8, $0xbb67ae8584caa73b +GLOBL ·iv0<>(SB), RODATA|NOPTR, $16 + +DATA ·iv1<>+0(SB)/8, $0x3c6ef372fe94f82b +DATA ·iv1<>+8(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·iv1<>(SB), RODATA|NOPTR, $16 + +DATA ·iv2<>+0(SB)/8, $0x510e527fade682d1 +DATA ·iv2<>+8(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·iv2<>(SB), RODATA|NOPTR, $16 diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go new file mode 100644 index 000000000..3168a8aa3 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go @@ -0,0 +1,182 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "encoding/binary" + "math/bits" +) + +// the precomputed values for BLAKE2b +// there are 12 16-byte arrays - one for each round +// the entries are calculated from the sigma constants. +var precomputed = [12][16]byte{ + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, + {11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4}, + {7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8}, + {9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13}, + {2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9}, + {12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11}, + {13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10}, + {6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5}, + {10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0}, + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, // equal to the first + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, // equal to the second +} + +func hashBlocksGeneric(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + var m [16]uint64 + c0, c1 := c[0], c[1] + + for i := 0; i < len(blocks); { + c0 += BlockSize + if c0 < BlockSize { + c1++ + } + + v0, v1, v2, v3, v4, v5, v6, v7 := h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] + v8, v9, v10, v11, v12, v13, v14, v15 := iv[0], iv[1], iv[2], iv[3], iv[4], iv[5], iv[6], iv[7] + v12 ^= c0 + v13 ^= c1 + v14 ^= flag + + for j := range m { + m[j] = binary.LittleEndian.Uint64(blocks[i:]) + i += 8 + } + + for j := range precomputed { + s := &(precomputed[j]) + + v0 += m[s[0]] + v0 += v4 + v12 ^= v0 + v12 = bits.RotateLeft64(v12, -32) + v8 += v12 + v4 ^= v8 + v4 = bits.RotateLeft64(v4, -24) + v1 += m[s[1]] + v1 += v5 + v13 ^= v1 + v13 = bits.RotateLeft64(v13, -32) + v9 += v13 + v5 ^= v9 + v5 = bits.RotateLeft64(v5, -24) + v2 += m[s[2]] + v2 += v6 + v14 ^= v2 + v14 = bits.RotateLeft64(v14, -32) + v10 += v14 + v6 ^= v10 + v6 = bits.RotateLeft64(v6, -24) + v3 += m[s[3]] + v3 += v7 + v15 ^= v3 + v15 = bits.RotateLeft64(v15, -32) + v11 += v15 + v7 ^= v11 + v7 = bits.RotateLeft64(v7, -24) + + v0 += m[s[4]] + v0 += v4 + v12 ^= v0 + v12 = bits.RotateLeft64(v12, -16) + v8 += v12 + v4 ^= v8 + v4 = bits.RotateLeft64(v4, -63) + v1 += m[s[5]] + v1 += v5 + v13 ^= v1 + v13 = bits.RotateLeft64(v13, -16) + v9 += v13 + v5 ^= v9 + v5 = bits.RotateLeft64(v5, -63) + v2 += m[s[6]] + v2 += v6 + v14 ^= v2 + v14 = bits.RotateLeft64(v14, -16) + v10 += v14 + v6 ^= v10 + v6 = bits.RotateLeft64(v6, -63) + v3 += m[s[7]] + v3 += v7 + v15 ^= v3 + v15 = bits.RotateLeft64(v15, -16) + v11 += v15 + v7 ^= v11 + v7 = bits.RotateLeft64(v7, -63) + + v0 += m[s[8]] + v0 += v5 + v15 ^= v0 + v15 = bits.RotateLeft64(v15, -32) + v10 += v15 + v5 ^= v10 + v5 = bits.RotateLeft64(v5, -24) + v1 += m[s[9]] + v1 += v6 + v12 ^= v1 + v12 = bits.RotateLeft64(v12, -32) + v11 += v12 + v6 ^= v11 + v6 = bits.RotateLeft64(v6, -24) + v2 += m[s[10]] + v2 += v7 + v13 ^= v2 + v13 = bits.RotateLeft64(v13, -32) + v8 += v13 + v7 ^= v8 + v7 = bits.RotateLeft64(v7, -24) + v3 += m[s[11]] + v3 += v4 + v14 ^= v3 + v14 = bits.RotateLeft64(v14, -32) + v9 += v14 + v4 ^= v9 + v4 = bits.RotateLeft64(v4, -24) + + v0 += m[s[12]] + v0 += v5 + v15 ^= v0 + v15 = bits.RotateLeft64(v15, -16) + v10 += v15 + v5 ^= v10 + v5 = bits.RotateLeft64(v5, -63) + v1 += m[s[13]] + v1 += v6 + v12 ^= v1 + v12 = bits.RotateLeft64(v12, -16) + v11 += v12 + v6 ^= v11 + v6 = bits.RotateLeft64(v6, -63) + v2 += m[s[14]] + v2 += v7 + v13 ^= v2 + v13 = bits.RotateLeft64(v13, -16) + v8 += v13 + v7 ^= v8 + v7 = bits.RotateLeft64(v7, -63) + v3 += m[s[15]] + v3 += v4 + v14 ^= v3 + v14 = bits.RotateLeft64(v14, -16) + v9 += v14 + v4 ^= v9 + v4 = bits.RotateLeft64(v4, -63) + + } + + h[0] ^= v0 ^ v8 + h[1] ^= v1 ^ v9 + h[2] ^= v2 ^ v10 + h[3] ^= v3 ^ v11 + h[4] ^= v4 ^ v12 + h[5] ^= v5 ^ v13 + h[6] ^= v6 ^ v14 + h[7] ^= v7 ^ v15 + } + c[0], c[1] = c0, c1 +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go new file mode 100644 index 000000000..6e28668cd --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go @@ -0,0 +1,11 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || purego || !gc + +package blake2b + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + hashBlocksGeneric(h, c, flag, blocks) +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2x.go b/vendor/golang.org/x/crypto/blake2b/blake2x.go new file mode 100644 index 000000000..52c414db0 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2x.go @@ -0,0 +1,177 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "encoding/binary" + "errors" + "io" +) + +// XOF defines the interface to hash functions that +// support arbitrary-length output. +type XOF interface { + // Write absorbs more data into the hash's state. It panics if called + // after Read. + io.Writer + + // Read reads more output from the hash. It returns io.EOF if the limit + // has been reached. + io.Reader + + // Clone returns a copy of the XOF in its current state. + Clone() XOF + + // Reset resets the XOF to its initial state. + Reset() +} + +// OutputLengthUnknown can be used as the size argument to NewXOF to indicate +// the length of the output is not known in advance. +const OutputLengthUnknown = 0 + +// magicUnknownOutputLength is a magic value for the output size that indicates +// an unknown number of output bytes. +const magicUnknownOutputLength = (1 << 32) - 1 + +// maxOutputLength is the absolute maximum number of bytes to produce when the +// number of output bytes is unknown. +const maxOutputLength = (1 << 32) * 64 + +// NewXOF creates a new variable-output-length hash. The hash either produce a +// known number of bytes (1 <= size < 2**32-1), or an unknown number of bytes +// (size == OutputLengthUnknown). In the latter case, an absolute limit of +// 256GiB applies. +// +// A non-nil key turns the hash into a MAC. The key must between +// zero and 32 bytes long. +func NewXOF(size uint32, key []byte) (XOF, error) { + if len(key) > Size { + return nil, errKeySize + } + if size == magicUnknownOutputLength { + // 2^32-1 indicates an unknown number of bytes and thus isn't a + // valid length. + return nil, errors.New("blake2b: XOF length too large") + } + if size == OutputLengthUnknown { + size = magicUnknownOutputLength + } + x := &xof{ + d: digest{ + size: Size, + keyLen: len(key), + }, + length: size, + } + copy(x.d.key[:], key) + x.Reset() + return x, nil +} + +type xof struct { + d digest + length uint32 + remaining uint64 + cfg, root, block [Size]byte + offset int + nodeOffset uint32 + readMode bool +} + +func (x *xof) Write(p []byte) (n int, err error) { + if x.readMode { + panic("blake2b: write to XOF after read") + } + return x.d.Write(p) +} + +func (x *xof) Clone() XOF { + clone := *x + return &clone +} + +func (x *xof) Reset() { + x.cfg[0] = byte(Size) + binary.LittleEndian.PutUint32(x.cfg[4:], uint32(Size)) // leaf length + binary.LittleEndian.PutUint32(x.cfg[12:], x.length) // XOF length + x.cfg[17] = byte(Size) // inner hash size + + x.d.Reset() + x.d.h[1] ^= uint64(x.length) << 32 + + x.remaining = uint64(x.length) + if x.remaining == magicUnknownOutputLength { + x.remaining = maxOutputLength + } + x.offset, x.nodeOffset = 0, 0 + x.readMode = false +} + +func (x *xof) Read(p []byte) (n int, err error) { + if !x.readMode { + x.d.finalize(&x.root) + x.readMode = true + } + + if x.remaining == 0 { + return 0, io.EOF + } + + n = len(p) + if uint64(n) > x.remaining { + n = int(x.remaining) + p = p[:n] + } + + if x.offset > 0 { + blockRemaining := Size - x.offset + if n < blockRemaining { + x.offset += copy(p, x.block[x.offset:]) + x.remaining -= uint64(n) + return + } + copy(p, x.block[x.offset:]) + p = p[blockRemaining:] + x.offset = 0 + x.remaining -= uint64(blockRemaining) + } + + for len(p) >= Size { + binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) + x.nodeOffset++ + + x.d.initConfig(&x.cfg) + x.d.Write(x.root[:]) + x.d.finalize(&x.block) + + copy(p, x.block[:]) + p = p[Size:] + x.remaining -= uint64(Size) + } + + if todo := len(p); todo > 0 { + if x.remaining < uint64(Size) { + x.cfg[0] = byte(x.remaining) + } + binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) + x.nodeOffset++ + + x.d.initConfig(&x.cfg) + x.d.Write(x.root[:]) + x.d.finalize(&x.block) + + x.offset = copy(p, x.block[:todo]) + x.remaining -= uint64(todo) + } + return +} + +func (d *digest) initConfig(cfg *[Size]byte) { + d.offset, d.c[0], d.c[1] = 0, 0, 0 + for i := range d.h { + d.h[i] = iv[i] ^ binary.LittleEndian.Uint64(cfg[i*8:]) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/register.go b/vendor/golang.org/x/crypto/blake2b/register.go new file mode 100644 index 000000000..54e446e1d --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/register.go @@ -0,0 +1,30 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "crypto" + "hash" +) + +func init() { + newHash256 := func() hash.Hash { + h, _ := New256(nil) + return h + } + newHash384 := func() hash.Hash { + h, _ := New384(nil) + return h + } + + newHash512 := func() hash.Hash { + h, _ := New512(nil) + return h + } + + crypto.RegisterHash(crypto.BLAKE2b_256, newHash256) + crypto.RegisterHash(crypto.BLAKE2b_384, newHash384) + crypto.RegisterHash(crypto.BLAKE2b_512, newHash512) +} diff --git a/vendor/golang.org/x/crypto/cast5/cast5.go b/vendor/golang.org/x/crypto/cast5/cast5.go index 425e8eecb..016e90215 100644 --- a/vendor/golang.org/x/crypto/cast5/cast5.go +++ b/vendor/golang.org/x/crypto/cast5/cast5.go @@ -11,7 +11,7 @@ // Deprecated: any new system should use AES (from crypto/aes, if necessary in // an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from // golang.org/x/crypto/chacha20poly1305). -package cast5 // import "golang.org/x/crypto/cast5" +package cast5 import ( "errors" diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go new file mode 100644 index 000000000..661ea132e --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go @@ -0,0 +1,16 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +package chacha20 + +const bufSize = 256 + +//go:noescape +func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) + +func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) { + xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter) +} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s new file mode 100644 index 000000000..7dd2638e8 --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s @@ -0,0 +1,307 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +#include "textflag.h" + +#define NUM_ROUNDS 10 + +// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) +TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0 + MOVD dst+0(FP), R1 + MOVD src+24(FP), R2 + MOVD src_len+32(FP), R3 + MOVD key+48(FP), R4 + MOVD nonce+56(FP), R6 + MOVD counter+64(FP), R7 + + MOVD $·constants(SB), R10 + MOVD $·incRotMatrix(SB), R11 + + MOVW (R7), R20 + + AND $~255, R3, R13 + ADD R2, R13, R12 // R12 for block end + AND $255, R3, R13 +loop: + MOVD $NUM_ROUNDS, R21 + VLD1 (R11), [V30.S4, V31.S4] + + // load contants + // VLD4R (R10), [V0.S4, V1.S4, V2.S4, V3.S4] + WORD $0x4D60E940 + + // load keys + // VLD4R 16(R4), [V4.S4, V5.S4, V6.S4, V7.S4] + WORD $0x4DFFE884 + // VLD4R 16(R4), [V8.S4, V9.S4, V10.S4, V11.S4] + WORD $0x4DFFE888 + SUB $32, R4 + + // load counter + nonce + // VLD1R (R7), [V12.S4] + WORD $0x4D40C8EC + + // VLD3R (R6), [V13.S4, V14.S4, V15.S4] + WORD $0x4D40E8CD + + // update counter + VADD V30.S4, V12.S4, V12.S4 + +chacha: + // V0..V3 += V4..V7 + // V12..V15 <<<= ((V12..V15 XOR V0..V3), 16) + VADD V0.S4, V4.S4, V0.S4 + VADD V1.S4, V5.S4, V1.S4 + VADD V2.S4, V6.S4, V2.S4 + VADD V3.S4, V7.S4, V3.S4 + VEOR V12.B16, V0.B16, V12.B16 + VEOR V13.B16, V1.B16, V13.B16 + VEOR V14.B16, V2.B16, V14.B16 + VEOR V15.B16, V3.B16, V15.B16 + VREV32 V12.H8, V12.H8 + VREV32 V13.H8, V13.H8 + VREV32 V14.H8, V14.H8 + VREV32 V15.H8, V15.H8 + // V8..V11 += V12..V15 + // V4..V7 <<<= ((V4..V7 XOR V8..V11), 12) + VADD V8.S4, V12.S4, V8.S4 + VADD V9.S4, V13.S4, V9.S4 + VADD V10.S4, V14.S4, V10.S4 + VADD V11.S4, V15.S4, V11.S4 + VEOR V8.B16, V4.B16, V16.B16 + VEOR V9.B16, V5.B16, V17.B16 + VEOR V10.B16, V6.B16, V18.B16 + VEOR V11.B16, V7.B16, V19.B16 + VSHL $12, V16.S4, V4.S4 + VSHL $12, V17.S4, V5.S4 + VSHL $12, V18.S4, V6.S4 + VSHL $12, V19.S4, V7.S4 + VSRI $20, V16.S4, V4.S4 + VSRI $20, V17.S4, V5.S4 + VSRI $20, V18.S4, V6.S4 + VSRI $20, V19.S4, V7.S4 + + // V0..V3 += V4..V7 + // V12..V15 <<<= ((V12..V15 XOR V0..V3), 8) + VADD V0.S4, V4.S4, V0.S4 + VADD V1.S4, V5.S4, V1.S4 + VADD V2.S4, V6.S4, V2.S4 + VADD V3.S4, V7.S4, V3.S4 + VEOR V12.B16, V0.B16, V12.B16 + VEOR V13.B16, V1.B16, V13.B16 + VEOR V14.B16, V2.B16, V14.B16 + VEOR V15.B16, V3.B16, V15.B16 + VTBL V31.B16, [V12.B16], V12.B16 + VTBL V31.B16, [V13.B16], V13.B16 + VTBL V31.B16, [V14.B16], V14.B16 + VTBL V31.B16, [V15.B16], V15.B16 + + // V8..V11 += V12..V15 + // V4..V7 <<<= ((V4..V7 XOR V8..V11), 7) + VADD V12.S4, V8.S4, V8.S4 + VADD V13.S4, V9.S4, V9.S4 + VADD V14.S4, V10.S4, V10.S4 + VADD V15.S4, V11.S4, V11.S4 + VEOR V8.B16, V4.B16, V16.B16 + VEOR V9.B16, V5.B16, V17.B16 + VEOR V10.B16, V6.B16, V18.B16 + VEOR V11.B16, V7.B16, V19.B16 + VSHL $7, V16.S4, V4.S4 + VSHL $7, V17.S4, V5.S4 + VSHL $7, V18.S4, V6.S4 + VSHL $7, V19.S4, V7.S4 + VSRI $25, V16.S4, V4.S4 + VSRI $25, V17.S4, V5.S4 + VSRI $25, V18.S4, V6.S4 + VSRI $25, V19.S4, V7.S4 + + // V0..V3 += V5..V7, V4 + // V15,V12-V14 <<<= ((V15,V12-V14 XOR V0..V3), 16) + VADD V0.S4, V5.S4, V0.S4 + VADD V1.S4, V6.S4, V1.S4 + VADD V2.S4, V7.S4, V2.S4 + VADD V3.S4, V4.S4, V3.S4 + VEOR V15.B16, V0.B16, V15.B16 + VEOR V12.B16, V1.B16, V12.B16 + VEOR V13.B16, V2.B16, V13.B16 + VEOR V14.B16, V3.B16, V14.B16 + VREV32 V12.H8, V12.H8 + VREV32 V13.H8, V13.H8 + VREV32 V14.H8, V14.H8 + VREV32 V15.H8, V15.H8 + + // V10 += V15; V5 <<<= ((V10 XOR V5), 12) + // ... + VADD V15.S4, V10.S4, V10.S4 + VADD V12.S4, V11.S4, V11.S4 + VADD V13.S4, V8.S4, V8.S4 + VADD V14.S4, V9.S4, V9.S4 + VEOR V10.B16, V5.B16, V16.B16 + VEOR V11.B16, V6.B16, V17.B16 + VEOR V8.B16, V7.B16, V18.B16 + VEOR V9.B16, V4.B16, V19.B16 + VSHL $12, V16.S4, V5.S4 + VSHL $12, V17.S4, V6.S4 + VSHL $12, V18.S4, V7.S4 + VSHL $12, V19.S4, V4.S4 + VSRI $20, V16.S4, V5.S4 + VSRI $20, V17.S4, V6.S4 + VSRI $20, V18.S4, V7.S4 + VSRI $20, V19.S4, V4.S4 + + // V0 += V5; V15 <<<= ((V0 XOR V15), 8) + // ... + VADD V5.S4, V0.S4, V0.S4 + VADD V6.S4, V1.S4, V1.S4 + VADD V7.S4, V2.S4, V2.S4 + VADD V4.S4, V3.S4, V3.S4 + VEOR V0.B16, V15.B16, V15.B16 + VEOR V1.B16, V12.B16, V12.B16 + VEOR V2.B16, V13.B16, V13.B16 + VEOR V3.B16, V14.B16, V14.B16 + VTBL V31.B16, [V12.B16], V12.B16 + VTBL V31.B16, [V13.B16], V13.B16 + VTBL V31.B16, [V14.B16], V14.B16 + VTBL V31.B16, [V15.B16], V15.B16 + + // V10 += V15; V5 <<<= ((V10 XOR V5), 7) + // ... + VADD V15.S4, V10.S4, V10.S4 + VADD V12.S4, V11.S4, V11.S4 + VADD V13.S4, V8.S4, V8.S4 + VADD V14.S4, V9.S4, V9.S4 + VEOR V10.B16, V5.B16, V16.B16 + VEOR V11.B16, V6.B16, V17.B16 + VEOR V8.B16, V7.B16, V18.B16 + VEOR V9.B16, V4.B16, V19.B16 + VSHL $7, V16.S4, V5.S4 + VSHL $7, V17.S4, V6.S4 + VSHL $7, V18.S4, V7.S4 + VSHL $7, V19.S4, V4.S4 + VSRI $25, V16.S4, V5.S4 + VSRI $25, V17.S4, V6.S4 + VSRI $25, V18.S4, V7.S4 + VSRI $25, V19.S4, V4.S4 + + SUB $1, R21 + CBNZ R21, chacha + + // VLD4R (R10), [V16.S4, V17.S4, V18.S4, V19.S4] + WORD $0x4D60E950 + + // VLD4R 16(R4), [V20.S4, V21.S4, V22.S4, V23.S4] + WORD $0x4DFFE894 + VADD V30.S4, V12.S4, V12.S4 + VADD V16.S4, V0.S4, V0.S4 + VADD V17.S4, V1.S4, V1.S4 + VADD V18.S4, V2.S4, V2.S4 + VADD V19.S4, V3.S4, V3.S4 + // VLD4R 16(R4), [V24.S4, V25.S4, V26.S4, V27.S4] + WORD $0x4DFFE898 + // restore R4 + SUB $32, R4 + + // load counter + nonce + // VLD1R (R7), [V28.S4] + WORD $0x4D40C8FC + // VLD3R (R6), [V29.S4, V30.S4, V31.S4] + WORD $0x4D40E8DD + + VADD V20.S4, V4.S4, V4.S4 + VADD V21.S4, V5.S4, V5.S4 + VADD V22.S4, V6.S4, V6.S4 + VADD V23.S4, V7.S4, V7.S4 + VADD V24.S4, V8.S4, V8.S4 + VADD V25.S4, V9.S4, V9.S4 + VADD V26.S4, V10.S4, V10.S4 + VADD V27.S4, V11.S4, V11.S4 + VADD V28.S4, V12.S4, V12.S4 + VADD V29.S4, V13.S4, V13.S4 + VADD V30.S4, V14.S4, V14.S4 + VADD V31.S4, V15.S4, V15.S4 + + VZIP1 V1.S4, V0.S4, V16.S4 + VZIP2 V1.S4, V0.S4, V17.S4 + VZIP1 V3.S4, V2.S4, V18.S4 + VZIP2 V3.S4, V2.S4, V19.S4 + VZIP1 V5.S4, V4.S4, V20.S4 + VZIP2 V5.S4, V4.S4, V21.S4 + VZIP1 V7.S4, V6.S4, V22.S4 + VZIP2 V7.S4, V6.S4, V23.S4 + VZIP1 V9.S4, V8.S4, V24.S4 + VZIP2 V9.S4, V8.S4, V25.S4 + VZIP1 V11.S4, V10.S4, V26.S4 + VZIP2 V11.S4, V10.S4, V27.S4 + VZIP1 V13.S4, V12.S4, V28.S4 + VZIP2 V13.S4, V12.S4, V29.S4 + VZIP1 V15.S4, V14.S4, V30.S4 + VZIP2 V15.S4, V14.S4, V31.S4 + VZIP1 V18.D2, V16.D2, V0.D2 + VZIP2 V18.D2, V16.D2, V4.D2 + VZIP1 V19.D2, V17.D2, V8.D2 + VZIP2 V19.D2, V17.D2, V12.D2 + VLD1.P 64(R2), [V16.B16, V17.B16, V18.B16, V19.B16] + + VZIP1 V22.D2, V20.D2, V1.D2 + VZIP2 V22.D2, V20.D2, V5.D2 + VZIP1 V23.D2, V21.D2, V9.D2 + VZIP2 V23.D2, V21.D2, V13.D2 + VLD1.P 64(R2), [V20.B16, V21.B16, V22.B16, V23.B16] + VZIP1 V26.D2, V24.D2, V2.D2 + VZIP2 V26.D2, V24.D2, V6.D2 + VZIP1 V27.D2, V25.D2, V10.D2 + VZIP2 V27.D2, V25.D2, V14.D2 + VLD1.P 64(R2), [V24.B16, V25.B16, V26.B16, V27.B16] + VZIP1 V30.D2, V28.D2, V3.D2 + VZIP2 V30.D2, V28.D2, V7.D2 + VZIP1 V31.D2, V29.D2, V11.D2 + VZIP2 V31.D2, V29.D2, V15.D2 + VLD1.P 64(R2), [V28.B16, V29.B16, V30.B16, V31.B16] + VEOR V0.B16, V16.B16, V16.B16 + VEOR V1.B16, V17.B16, V17.B16 + VEOR V2.B16, V18.B16, V18.B16 + VEOR V3.B16, V19.B16, V19.B16 + VST1.P [V16.B16, V17.B16, V18.B16, V19.B16], 64(R1) + VEOR V4.B16, V20.B16, V20.B16 + VEOR V5.B16, V21.B16, V21.B16 + VEOR V6.B16, V22.B16, V22.B16 + VEOR V7.B16, V23.B16, V23.B16 + VST1.P [V20.B16, V21.B16, V22.B16, V23.B16], 64(R1) + VEOR V8.B16, V24.B16, V24.B16 + VEOR V9.B16, V25.B16, V25.B16 + VEOR V10.B16, V26.B16, V26.B16 + VEOR V11.B16, V27.B16, V27.B16 + VST1.P [V24.B16, V25.B16, V26.B16, V27.B16], 64(R1) + VEOR V12.B16, V28.B16, V28.B16 + VEOR V13.B16, V29.B16, V29.B16 + VEOR V14.B16, V30.B16, V30.B16 + VEOR V15.B16, V31.B16, V31.B16 + VST1.P [V28.B16, V29.B16, V30.B16, V31.B16], 64(R1) + + ADD $4, R20 + MOVW R20, (R7) // update counter + + CMP R2, R12 + BGT loop + + RET + + +DATA ·constants+0x00(SB)/4, $0x61707865 +DATA ·constants+0x04(SB)/4, $0x3320646e +DATA ·constants+0x08(SB)/4, $0x79622d32 +DATA ·constants+0x0c(SB)/4, $0x6b206574 +GLOBL ·constants(SB), NOPTR|RODATA, $32 + +DATA ·incRotMatrix+0x00(SB)/4, $0x00000000 +DATA ·incRotMatrix+0x04(SB)/4, $0x00000001 +DATA ·incRotMatrix+0x08(SB)/4, $0x00000002 +DATA ·incRotMatrix+0x0c(SB)/4, $0x00000003 +DATA ·incRotMatrix+0x10(SB)/4, $0x02010003 +DATA ·incRotMatrix+0x14(SB)/4, $0x06050407 +DATA ·incRotMatrix+0x18(SB)/4, $0x0A09080B +DATA ·incRotMatrix+0x1c(SB)/4, $0x0E0D0C0F +GLOBL ·incRotMatrix(SB), NOPTR|RODATA, $32 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_generic.go b/vendor/golang.org/x/crypto/chacha20/chacha_generic.go new file mode 100644 index 000000000..93eb5ae6d --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20/chacha_generic.go @@ -0,0 +1,398 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package chacha20 implements the ChaCha20 and XChaCha20 encryption algorithms +// as specified in RFC 8439 and draft-irtf-cfrg-xchacha-01. +package chacha20 + +import ( + "crypto/cipher" + "encoding/binary" + "errors" + "math/bits" + + "golang.org/x/crypto/internal/alias" +) + +const ( + // KeySize is the size of the key used by this cipher, in bytes. + KeySize = 32 + + // NonceSize is the size of the nonce used with the standard variant of this + // cipher, in bytes. + // + // Note that this is too short to be safely generated at random if the same + // key is reused more than 2³² times. + NonceSize = 12 + + // NonceSizeX is the size of the nonce used with the XChaCha20 variant of + // this cipher, in bytes. + NonceSizeX = 24 +) + +// Cipher is a stateful instance of ChaCha20 or XChaCha20 using a particular key +// and nonce. A *Cipher implements the cipher.Stream interface. +type Cipher struct { + // The ChaCha20 state is 16 words: 4 constant, 8 of key, 1 of counter + // (incremented after each block), and 3 of nonce. + key [8]uint32 + counter uint32 + nonce [3]uint32 + + // The last len bytes of buf are leftover key stream bytes from the previous + // XORKeyStream invocation. The size of buf depends on how many blocks are + // computed at a time by xorKeyStreamBlocks. + buf [bufSize]byte + len int + + // overflow is set when the counter overflowed, no more blocks can be + // generated, and the next XORKeyStream call should panic. + overflow bool + + // The counter-independent results of the first round are cached after they + // are computed the first time. + precompDone bool + p1, p5, p9, p13 uint32 + p2, p6, p10, p14 uint32 + p3, p7, p11, p15 uint32 +} + +var _ cipher.Stream = (*Cipher)(nil) + +// NewUnauthenticatedCipher creates a new ChaCha20 stream cipher with the given +// 32 bytes key and a 12 or 24 bytes nonce. If a nonce of 24 bytes is provided, +// the XChaCha20 construction will be used. It returns an error if key or nonce +// have any other length. +// +// Note that ChaCha20, like all stream ciphers, is not authenticated and allows +// attackers to silently tamper with the plaintext. For this reason, it is more +// appropriate as a building block than as a standalone encryption mechanism. +// Instead, consider using package golang.org/x/crypto/chacha20poly1305. +func NewUnauthenticatedCipher(key, nonce []byte) (*Cipher, error) { + // This function is split into a wrapper so that the Cipher allocation will + // be inlined, and depending on how the caller uses the return value, won't + // escape to the heap. + c := &Cipher{} + return newUnauthenticatedCipher(c, key, nonce) +} + +func newUnauthenticatedCipher(c *Cipher, key, nonce []byte) (*Cipher, error) { + if len(key) != KeySize { + return nil, errors.New("chacha20: wrong key size") + } + if len(nonce) == NonceSizeX { + // XChaCha20 uses the ChaCha20 core to mix 16 bytes of the nonce into a + // derived key, allowing it to operate on a nonce of 24 bytes. See + // draft-irtf-cfrg-xchacha-01, Section 2.3. + key, _ = HChaCha20(key, nonce[0:16]) + cNonce := make([]byte, NonceSize) + copy(cNonce[4:12], nonce[16:24]) + nonce = cNonce + } else if len(nonce) != NonceSize { + return nil, errors.New("chacha20: wrong nonce size") + } + + key, nonce = key[:KeySize], nonce[:NonceSize] // bounds check elimination hint + c.key = [8]uint32{ + binary.LittleEndian.Uint32(key[0:4]), + binary.LittleEndian.Uint32(key[4:8]), + binary.LittleEndian.Uint32(key[8:12]), + binary.LittleEndian.Uint32(key[12:16]), + binary.LittleEndian.Uint32(key[16:20]), + binary.LittleEndian.Uint32(key[20:24]), + binary.LittleEndian.Uint32(key[24:28]), + binary.LittleEndian.Uint32(key[28:32]), + } + c.nonce = [3]uint32{ + binary.LittleEndian.Uint32(nonce[0:4]), + binary.LittleEndian.Uint32(nonce[4:8]), + binary.LittleEndian.Uint32(nonce[8:12]), + } + return c, nil +} + +// The constant first 4 words of the ChaCha20 state. +const ( + j0 uint32 = 0x61707865 // expa + j1 uint32 = 0x3320646e // nd 3 + j2 uint32 = 0x79622d32 // 2-by + j3 uint32 = 0x6b206574 // te k +) + +const blockSize = 64 + +// quarterRound is the core of ChaCha20. It shuffles the bits of 4 state words. +// It's executed 4 times for each of the 20 ChaCha20 rounds, operating on all 16 +// words each round, in columnar or diagonal groups of 4 at a time. +func quarterRound(a, b, c, d uint32) (uint32, uint32, uint32, uint32) { + a += b + d ^= a + d = bits.RotateLeft32(d, 16) + c += d + b ^= c + b = bits.RotateLeft32(b, 12) + a += b + d ^= a + d = bits.RotateLeft32(d, 8) + c += d + b ^= c + b = bits.RotateLeft32(b, 7) + return a, b, c, d +} + +// SetCounter sets the Cipher counter. The next invocation of XORKeyStream will +// behave as if (64 * counter) bytes had been encrypted so far. +// +// To prevent accidental counter reuse, SetCounter panics if counter is less +// than the current value. +// +// Note that the execution time of XORKeyStream is not independent of the +// counter value. +func (s *Cipher) SetCounter(counter uint32) { + // Internally, s may buffer multiple blocks, which complicates this + // implementation slightly. When checking whether the counter has rolled + // back, we must use both s.counter and s.len to determine how many blocks + // we have already output. + outputCounter := s.counter - uint32(s.len)/blockSize + if s.overflow || counter < outputCounter { + panic("chacha20: SetCounter attempted to rollback counter") + } + + // In the general case, we set the new counter value and reset s.len to 0, + // causing the next call to XORKeyStream to refill the buffer. However, if + // we're advancing within the existing buffer, we can save work by simply + // setting s.len. + if counter < s.counter { + s.len = int(s.counter-counter) * blockSize + } else { + s.counter = counter + s.len = 0 + } +} + +// XORKeyStream XORs each byte in the given slice with a byte from the +// cipher's key stream. Dst and src must overlap entirely or not at all. +// +// If len(dst) < len(src), XORKeyStream will panic. It is acceptable +// to pass a dst bigger than src, and in that case, XORKeyStream will +// only update dst[:len(src)] and will not touch the rest of dst. +// +// Multiple calls to XORKeyStream behave as if the concatenation of +// the src buffers was passed in a single run. That is, Cipher +// maintains state and does not reset at each XORKeyStream call. +func (s *Cipher) XORKeyStream(dst, src []byte) { + if len(src) == 0 { + return + } + if len(dst) < len(src) { + panic("chacha20: output smaller than input") + } + dst = dst[:len(src)] + if alias.InexactOverlap(dst, src) { + panic("chacha20: invalid buffer overlap") + } + + // First, drain any remaining key stream from a previous XORKeyStream. + if s.len != 0 { + keyStream := s.buf[bufSize-s.len:] + if len(src) < len(keyStream) { + keyStream = keyStream[:len(src)] + } + _ = src[len(keyStream)-1] // bounds check elimination hint + for i, b := range keyStream { + dst[i] = src[i] ^ b + } + s.len -= len(keyStream) + dst, src = dst[len(keyStream):], src[len(keyStream):] + } + if len(src) == 0 { + return + } + + // If we'd need to let the counter overflow and keep generating output, + // panic immediately. If instead we'd only reach the last block, remember + // not to generate any more output after the buffer is drained. + numBlocks := (uint64(len(src)) + blockSize - 1) / blockSize + if s.overflow || uint64(s.counter)+numBlocks > 1<<32 { + panic("chacha20: counter overflow") + } else if uint64(s.counter)+numBlocks == 1<<32 { + s.overflow = true + } + + // xorKeyStreamBlocks implementations expect input lengths that are a + // multiple of bufSize. Platform-specific ones process multiple blocks at a + // time, so have bufSizes that are a multiple of blockSize. + + full := len(src) - len(src)%bufSize + if full > 0 { + s.xorKeyStreamBlocks(dst[:full], src[:full]) + } + dst, src = dst[full:], src[full:] + + // If using a multi-block xorKeyStreamBlocks would overflow, use the generic + // one that does one block at a time. + const blocksPerBuf = bufSize / blockSize + if uint64(s.counter)+blocksPerBuf > 1<<32 { + s.buf = [bufSize]byte{} + numBlocks := (len(src) + blockSize - 1) / blockSize + buf := s.buf[bufSize-numBlocks*blockSize:] + copy(buf, src) + s.xorKeyStreamBlocksGeneric(buf, buf) + s.len = len(buf) - copy(dst, buf) + return + } + + // If we have a partial (multi-)block, pad it for xorKeyStreamBlocks, and + // keep the leftover keystream for the next XORKeyStream invocation. + if len(src) > 0 { + s.buf = [bufSize]byte{} + copy(s.buf[:], src) + s.xorKeyStreamBlocks(s.buf[:], s.buf[:]) + s.len = bufSize - copy(dst, s.buf[:]) + } +} + +func (s *Cipher) xorKeyStreamBlocksGeneric(dst, src []byte) { + if len(dst) != len(src) || len(dst)%blockSize != 0 { + panic("chacha20: internal error: wrong dst and/or src length") + } + + // To generate each block of key stream, the initial cipher state + // (represented below) is passed through 20 rounds of shuffling, + // alternatively applying quarterRounds by columns (like 1, 5, 9, 13) + // or by diagonals (like 1, 6, 11, 12). + // + // 0:cccccccc 1:cccccccc 2:cccccccc 3:cccccccc + // 4:kkkkkkkk 5:kkkkkkkk 6:kkkkkkkk 7:kkkkkkkk + // 8:kkkkkkkk 9:kkkkkkkk 10:kkkkkkkk 11:kkkkkkkk + // 12:bbbbbbbb 13:nnnnnnnn 14:nnnnnnnn 15:nnnnnnnn + // + // c=constant k=key b=blockcount n=nonce + var ( + c0, c1, c2, c3 = j0, j1, j2, j3 + c4, c5, c6, c7 = s.key[0], s.key[1], s.key[2], s.key[3] + c8, c9, c10, c11 = s.key[4], s.key[5], s.key[6], s.key[7] + _, c13, c14, c15 = s.counter, s.nonce[0], s.nonce[1], s.nonce[2] + ) + + // Three quarters of the first round don't depend on the counter, so we can + // calculate them here, and reuse them for multiple blocks in the loop, and + // for future XORKeyStream invocations. + if !s.precompDone { + s.p1, s.p5, s.p9, s.p13 = quarterRound(c1, c5, c9, c13) + s.p2, s.p6, s.p10, s.p14 = quarterRound(c2, c6, c10, c14) + s.p3, s.p7, s.p11, s.p15 = quarterRound(c3, c7, c11, c15) + s.precompDone = true + } + + // A condition of len(src) > 0 would be sufficient, but this also + // acts as a bounds check elimination hint. + for len(src) >= 64 && len(dst) >= 64 { + // The remainder of the first column round. + fcr0, fcr4, fcr8, fcr12 := quarterRound(c0, c4, c8, s.counter) + + // The second diagonal round. + x0, x5, x10, x15 := quarterRound(fcr0, s.p5, s.p10, s.p15) + x1, x6, x11, x12 := quarterRound(s.p1, s.p6, s.p11, fcr12) + x2, x7, x8, x13 := quarterRound(s.p2, s.p7, fcr8, s.p13) + x3, x4, x9, x14 := quarterRound(s.p3, fcr4, s.p9, s.p14) + + // The remaining 18 rounds. + for i := 0; i < 9; i++ { + // Column round. + x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12) + x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13) + x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14) + x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15) + + // Diagonal round. + x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15) + x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12) + x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13) + x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14) + } + + // Add back the initial state to generate the key stream, then + // XOR the key stream with the source and write out the result. + addXor(dst[0:4], src[0:4], x0, c0) + addXor(dst[4:8], src[4:8], x1, c1) + addXor(dst[8:12], src[8:12], x2, c2) + addXor(dst[12:16], src[12:16], x3, c3) + addXor(dst[16:20], src[16:20], x4, c4) + addXor(dst[20:24], src[20:24], x5, c5) + addXor(dst[24:28], src[24:28], x6, c6) + addXor(dst[28:32], src[28:32], x7, c7) + addXor(dst[32:36], src[32:36], x8, c8) + addXor(dst[36:40], src[36:40], x9, c9) + addXor(dst[40:44], src[40:44], x10, c10) + addXor(dst[44:48], src[44:48], x11, c11) + addXor(dst[48:52], src[48:52], x12, s.counter) + addXor(dst[52:56], src[52:56], x13, c13) + addXor(dst[56:60], src[56:60], x14, c14) + addXor(dst[60:64], src[60:64], x15, c15) + + s.counter += 1 + + src, dst = src[blockSize:], dst[blockSize:] + } +} + +// HChaCha20 uses the ChaCha20 core to generate a derived key from a 32 bytes +// key and a 16 bytes nonce. It returns an error if key or nonce have any other +// length. It is used as part of the XChaCha20 construction. +func HChaCha20(key, nonce []byte) ([]byte, error) { + // This function is split into a wrapper so that the slice allocation will + // be inlined, and depending on how the caller uses the return value, won't + // escape to the heap. + out := make([]byte, 32) + return hChaCha20(out, key, nonce) +} + +func hChaCha20(out, key, nonce []byte) ([]byte, error) { + if len(key) != KeySize { + return nil, errors.New("chacha20: wrong HChaCha20 key size") + } + if len(nonce) != 16 { + return nil, errors.New("chacha20: wrong HChaCha20 nonce size") + } + + x0, x1, x2, x3 := j0, j1, j2, j3 + x4 := binary.LittleEndian.Uint32(key[0:4]) + x5 := binary.LittleEndian.Uint32(key[4:8]) + x6 := binary.LittleEndian.Uint32(key[8:12]) + x7 := binary.LittleEndian.Uint32(key[12:16]) + x8 := binary.LittleEndian.Uint32(key[16:20]) + x9 := binary.LittleEndian.Uint32(key[20:24]) + x10 := binary.LittleEndian.Uint32(key[24:28]) + x11 := binary.LittleEndian.Uint32(key[28:32]) + x12 := binary.LittleEndian.Uint32(nonce[0:4]) + x13 := binary.LittleEndian.Uint32(nonce[4:8]) + x14 := binary.LittleEndian.Uint32(nonce[8:12]) + x15 := binary.LittleEndian.Uint32(nonce[12:16]) + + for i := 0; i < 10; i++ { + // Diagonal round. + x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12) + x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13) + x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14) + x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15) + + // Column round. + x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15) + x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12) + x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13) + x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14) + } + + _ = out[31] // bounds check elimination hint + binary.LittleEndian.PutUint32(out[0:4], x0) + binary.LittleEndian.PutUint32(out[4:8], x1) + binary.LittleEndian.PutUint32(out[8:12], x2) + binary.LittleEndian.PutUint32(out[12:16], x3) + binary.LittleEndian.PutUint32(out[16:20], x12) + binary.LittleEndian.PutUint32(out[20:24], x13) + binary.LittleEndian.PutUint32(out[24:28], x14) + binary.LittleEndian.PutUint32(out[28:32], x15) + return out, nil +} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go b/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go new file mode 100644 index 000000000..db42e6676 --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (!arm64 && !s390x && !ppc64le) || !gc || purego + +package chacha20 + +const bufSize = blockSize + +func (s *Cipher) xorKeyStreamBlocks(dst, src []byte) { + s.xorKeyStreamBlocksGeneric(dst, src) +} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go new file mode 100644 index 000000000..3a4287f99 --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go @@ -0,0 +1,16 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +package chacha20 + +const bufSize = 256 + +//go:noescape +func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32) + +func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) { + chaCha20_ctr32_vsx(&dst[0], &src[0], len(src), &c.key, &c.counter) +} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s new file mode 100644 index 000000000..c672ccf69 --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s @@ -0,0 +1,443 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on CRYPTOGAMS code with the following comment: +// # ==================================================================== +// # Written by Andy Polyakov for the OpenSSL +// # project. The module is, however, dual licensed under OpenSSL and +// # CRYPTOGAMS licenses depending on where you obtain it. For further +// # details see http://www.openssl.org/~appro/cryptogams/. +// # ==================================================================== + +// Code for the perl script that generates the ppc64 assembler +// can be found in the cryptogams repository at the link below. It is based on +// the original from openssl. + +// https://github.com/dot-asm/cryptogams/commit/a60f5b50ed908e91 + +// The differences in this and the original implementation are +// due to the calling conventions and initialization of constants. + +//go:build gc && !purego + +#include "textflag.h" + +#define OUT R3 +#define INP R4 +#define LEN R5 +#define KEY R6 +#define CNT R7 +#define TMP R15 + +#define CONSTBASE R16 +#define BLOCKS R17 + +// for VPERMXOR +#define MASK R18 + +DATA consts<>+0x00(SB)/8, $0x3320646e61707865 +DATA consts<>+0x08(SB)/8, $0x6b20657479622d32 +DATA consts<>+0x10(SB)/8, $0x0000000000000001 +DATA consts<>+0x18(SB)/8, $0x0000000000000000 +DATA consts<>+0x20(SB)/8, $0x0000000000000004 +DATA consts<>+0x28(SB)/8, $0x0000000000000000 +DATA consts<>+0x30(SB)/8, $0x0a0b08090e0f0c0d +DATA consts<>+0x38(SB)/8, $0x0203000106070405 +DATA consts<>+0x40(SB)/8, $0x090a0b080d0e0f0c +DATA consts<>+0x48(SB)/8, $0x0102030005060704 +DATA consts<>+0x50(SB)/8, $0x6170786561707865 +DATA consts<>+0x58(SB)/8, $0x6170786561707865 +DATA consts<>+0x60(SB)/8, $0x3320646e3320646e +DATA consts<>+0x68(SB)/8, $0x3320646e3320646e +DATA consts<>+0x70(SB)/8, $0x79622d3279622d32 +DATA consts<>+0x78(SB)/8, $0x79622d3279622d32 +DATA consts<>+0x80(SB)/8, $0x6b2065746b206574 +DATA consts<>+0x88(SB)/8, $0x6b2065746b206574 +DATA consts<>+0x90(SB)/8, $0x0000000100000000 +DATA consts<>+0x98(SB)/8, $0x0000000300000002 +DATA consts<>+0xa0(SB)/8, $0x5566774411223300 +DATA consts<>+0xa8(SB)/8, $0xddeeffcc99aabb88 +DATA consts<>+0xb0(SB)/8, $0x6677445522330011 +DATA consts<>+0xb8(SB)/8, $0xeeffccddaabb8899 +GLOBL consts<>(SB), RODATA, $0xc0 + +//func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32) +TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 + MOVD out+0(FP), OUT + MOVD inp+8(FP), INP + MOVD len+16(FP), LEN + MOVD key+24(FP), KEY + MOVD counter+32(FP), CNT + + // Addressing for constants + MOVD $consts<>+0x00(SB), CONSTBASE + MOVD $16, R8 + MOVD $32, R9 + MOVD $48, R10 + MOVD $64, R11 + SRD $6, LEN, BLOCKS + // for VPERMXOR + MOVD $consts<>+0xa0(SB), MASK + MOVD $16, R20 + // V16 + LXVW4X (CONSTBASE)(R0), VS48 + ADD $80,CONSTBASE + + // Load key into V17,V18 + LXVW4X (KEY)(R0), VS49 + LXVW4X (KEY)(R8), VS50 + + // Load CNT, NONCE into V19 + LXVW4X (CNT)(R0), VS51 + + // Clear V27 + VXOR V27, V27, V27 + + // V28 + LXVW4X (CONSTBASE)(R11), VS60 + + // Load mask constants for VPERMXOR + LXVW4X (MASK)(R0), V20 + LXVW4X (MASK)(R20), V21 + + // splat slot from V19 -> V26 + VSPLTW $0, V19, V26 + + VSLDOI $4, V19, V27, V19 + VSLDOI $12, V27, V19, V19 + + VADDUWM V26, V28, V26 + + MOVD $10, R14 + MOVD R14, CTR + PCALIGN $16 +loop_outer_vsx: + // V0, V1, V2, V3 + LXVW4X (R0)(CONSTBASE), VS32 + LXVW4X (R8)(CONSTBASE), VS33 + LXVW4X (R9)(CONSTBASE), VS34 + LXVW4X (R10)(CONSTBASE), VS35 + + // splat values from V17, V18 into V4-V11 + VSPLTW $0, V17, V4 + VSPLTW $1, V17, V5 + VSPLTW $2, V17, V6 + VSPLTW $3, V17, V7 + VSPLTW $0, V18, V8 + VSPLTW $1, V18, V9 + VSPLTW $2, V18, V10 + VSPLTW $3, V18, V11 + + // VOR + VOR V26, V26, V12 + + // splat values from V19 -> V13, V14, V15 + VSPLTW $1, V19, V13 + VSPLTW $2, V19, V14 + VSPLTW $3, V19, V15 + + // splat const values + VSPLTISW $-16, V27 + VSPLTISW $12, V28 + VSPLTISW $8, V29 + VSPLTISW $7, V30 + PCALIGN $16 +loop_vsx: + VADDUWM V0, V4, V0 + VADDUWM V1, V5, V1 + VADDUWM V2, V6, V2 + VADDUWM V3, V7, V3 + + VPERMXOR V12, V0, V21, V12 + VPERMXOR V13, V1, V21, V13 + VPERMXOR V14, V2, V21, V14 + VPERMXOR V15, V3, V21, V15 + + VADDUWM V8, V12, V8 + VADDUWM V9, V13, V9 + VADDUWM V10, V14, V10 + VADDUWM V11, V15, V11 + + VXOR V4, V8, V4 + VXOR V5, V9, V5 + VXOR V6, V10, V6 + VXOR V7, V11, V7 + + VRLW V4, V28, V4 + VRLW V5, V28, V5 + VRLW V6, V28, V6 + VRLW V7, V28, V7 + + VADDUWM V0, V4, V0 + VADDUWM V1, V5, V1 + VADDUWM V2, V6, V2 + VADDUWM V3, V7, V3 + + VPERMXOR V12, V0, V20, V12 + VPERMXOR V13, V1, V20, V13 + VPERMXOR V14, V2, V20, V14 + VPERMXOR V15, V3, V20, V15 + + VADDUWM V8, V12, V8 + VADDUWM V9, V13, V9 + VADDUWM V10, V14, V10 + VADDUWM V11, V15, V11 + + VXOR V4, V8, V4 + VXOR V5, V9, V5 + VXOR V6, V10, V6 + VXOR V7, V11, V7 + + VRLW V4, V30, V4 + VRLW V5, V30, V5 + VRLW V6, V30, V6 + VRLW V7, V30, V7 + + VADDUWM V0, V5, V0 + VADDUWM V1, V6, V1 + VADDUWM V2, V7, V2 + VADDUWM V3, V4, V3 + + VPERMXOR V15, V0, V21, V15 + VPERMXOR V12, V1, V21, V12 + VPERMXOR V13, V2, V21, V13 + VPERMXOR V14, V3, V21, V14 + + VADDUWM V10, V15, V10 + VADDUWM V11, V12, V11 + VADDUWM V8, V13, V8 + VADDUWM V9, V14, V9 + + VXOR V5, V10, V5 + VXOR V6, V11, V6 + VXOR V7, V8, V7 + VXOR V4, V9, V4 + + VRLW V5, V28, V5 + VRLW V6, V28, V6 + VRLW V7, V28, V7 + VRLW V4, V28, V4 + + VADDUWM V0, V5, V0 + VADDUWM V1, V6, V1 + VADDUWM V2, V7, V2 + VADDUWM V3, V4, V3 + + VPERMXOR V15, V0, V20, V15 + VPERMXOR V12, V1, V20, V12 + VPERMXOR V13, V2, V20, V13 + VPERMXOR V14, V3, V20, V14 + + VADDUWM V10, V15, V10 + VADDUWM V11, V12, V11 + VADDUWM V8, V13, V8 + VADDUWM V9, V14, V9 + + VXOR V5, V10, V5 + VXOR V6, V11, V6 + VXOR V7, V8, V7 + VXOR V4, V9, V4 + + VRLW V5, V30, V5 + VRLW V6, V30, V6 + VRLW V7, V30, V7 + VRLW V4, V30, V4 + BDNZ loop_vsx + + VADDUWM V12, V26, V12 + + VMRGEW V0, V1, V27 + VMRGEW V2, V3, V28 + + VMRGOW V0, V1, V0 + VMRGOW V2, V3, V2 + + VMRGEW V4, V5, V29 + VMRGEW V6, V7, V30 + + XXPERMDI VS32, VS34, $0, VS33 + XXPERMDI VS32, VS34, $3, VS35 + XXPERMDI VS59, VS60, $0, VS32 + XXPERMDI VS59, VS60, $3, VS34 + + VMRGOW V4, V5, V4 + VMRGOW V6, V7, V6 + + VMRGEW V8, V9, V27 + VMRGEW V10, V11, V28 + + XXPERMDI VS36, VS38, $0, VS37 + XXPERMDI VS36, VS38, $3, VS39 + XXPERMDI VS61, VS62, $0, VS36 + XXPERMDI VS61, VS62, $3, VS38 + + VMRGOW V8, V9, V8 + VMRGOW V10, V11, V10 + + VMRGEW V12, V13, V29 + VMRGEW V14, V15, V30 + + XXPERMDI VS40, VS42, $0, VS41 + XXPERMDI VS40, VS42, $3, VS43 + XXPERMDI VS59, VS60, $0, VS40 + XXPERMDI VS59, VS60, $3, VS42 + + VMRGOW V12, V13, V12 + VMRGOW V14, V15, V14 + + VSPLTISW $4, V27 + VADDUWM V26, V27, V26 + + XXPERMDI VS44, VS46, $0, VS45 + XXPERMDI VS44, VS46, $3, VS47 + XXPERMDI VS61, VS62, $0, VS44 + XXPERMDI VS61, VS62, $3, VS46 + + VADDUWM V0, V16, V0 + VADDUWM V4, V17, V4 + VADDUWM V8, V18, V8 + VADDUWM V12, V19, V12 + + CMPU LEN, $64 + BLT tail_vsx + + // Bottom of loop + LXVW4X (INP)(R0), VS59 + LXVW4X (INP)(R8), VS60 + LXVW4X (INP)(R9), VS61 + LXVW4X (INP)(R10), VS62 + + VXOR V27, V0, V27 + VXOR V28, V4, V28 + VXOR V29, V8, V29 + VXOR V30, V12, V30 + + STXVW4X VS59, (OUT)(R0) + STXVW4X VS60, (OUT)(R8) + ADD $64, INP + STXVW4X VS61, (OUT)(R9) + ADD $-64, LEN + STXVW4X VS62, (OUT)(R10) + ADD $64, OUT + BEQ done_vsx + + VADDUWM V1, V16, V0 + VADDUWM V5, V17, V4 + VADDUWM V9, V18, V8 + VADDUWM V13, V19, V12 + + CMPU LEN, $64 + BLT tail_vsx + + LXVW4X (INP)(R0), VS59 + LXVW4X (INP)(R8), VS60 + LXVW4X (INP)(R9), VS61 + LXVW4X (INP)(R10), VS62 + VXOR V27, V0, V27 + + VXOR V28, V4, V28 + VXOR V29, V8, V29 + VXOR V30, V12, V30 + + STXVW4X VS59, (OUT)(R0) + STXVW4X VS60, (OUT)(R8) + ADD $64, INP + STXVW4X VS61, (OUT)(R9) + ADD $-64, LEN + STXVW4X VS62, (OUT)(V10) + ADD $64, OUT + BEQ done_vsx + + VADDUWM V2, V16, V0 + VADDUWM V6, V17, V4 + VADDUWM V10, V18, V8 + VADDUWM V14, V19, V12 + + CMPU LEN, $64 + BLT tail_vsx + + LXVW4X (INP)(R0), VS59 + LXVW4X (INP)(R8), VS60 + LXVW4X (INP)(R9), VS61 + LXVW4X (INP)(R10), VS62 + + VXOR V27, V0, V27 + VXOR V28, V4, V28 + VXOR V29, V8, V29 + VXOR V30, V12, V30 + + STXVW4X VS59, (OUT)(R0) + STXVW4X VS60, (OUT)(R8) + ADD $64, INP + STXVW4X VS61, (OUT)(R9) + ADD $-64, LEN + STXVW4X VS62, (OUT)(R10) + ADD $64, OUT + BEQ done_vsx + + VADDUWM V3, V16, V0 + VADDUWM V7, V17, V4 + VADDUWM V11, V18, V8 + VADDUWM V15, V19, V12 + + CMPU LEN, $64 + BLT tail_vsx + + LXVW4X (INP)(R0), VS59 + LXVW4X (INP)(R8), VS60 + LXVW4X (INP)(R9), VS61 + LXVW4X (INP)(R10), VS62 + + VXOR V27, V0, V27 + VXOR V28, V4, V28 + VXOR V29, V8, V29 + VXOR V30, V12, V30 + + STXVW4X VS59, (OUT)(R0) + STXVW4X VS60, (OUT)(R8) + ADD $64, INP + STXVW4X VS61, (OUT)(R9) + ADD $-64, LEN + STXVW4X VS62, (OUT)(R10) + ADD $64, OUT + + MOVD $10, R14 + MOVD R14, CTR + BNE loop_outer_vsx + +done_vsx: + // Increment counter by number of 64 byte blocks + MOVD (CNT), R14 + ADD BLOCKS, R14 + MOVD R14, (CNT) + RET + +tail_vsx: + ADD $32, R1, R11 + MOVD LEN, CTR + + // Save values on stack to copy from + STXVW4X VS32, (R11)(R0) + STXVW4X VS36, (R11)(R8) + STXVW4X VS40, (R11)(R9) + STXVW4X VS44, (R11)(R10) + ADD $-1, R11, R12 + ADD $-1, INP + ADD $-1, OUT + PCALIGN $16 +looptail_vsx: + // Copying the result to OUT + // in bytes. + MOVBZU 1(R12), KEY + MOVBZU 1(INP), TMP + XOR KEY, TMP, KEY + MOVBU KEY, 1(OUT) + BDNZ looptail_vsx + + // Clear the stack values + STXVW4X VS48, (R11)(R0) + STXVW4X VS48, (R11)(R8) + STXVW4X VS48, (R11)(R9) + STXVW4X VS48, (R11)(R10) + BR done_vsx diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go new file mode 100644 index 000000000..683ccfd1c --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go @@ -0,0 +1,27 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +package chacha20 + +import "golang.org/x/sys/cpu" + +var haveAsm = cpu.S390X.HasVX + +const bufSize = 256 + +// xorKeyStreamVX is an assembly implementation of XORKeyStream. It must only +// be called when the vector facility is available. Implementation in asm_s390x.s. +// +//go:noescape +func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) + +func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) { + if cpu.S390X.HasVX { + xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter) + } else { + c.xorKeyStreamBlocksGeneric(dst, src) + } +} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s new file mode 100644 index 000000000..1eda91a3d --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s @@ -0,0 +1,224 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +#include "go_asm.h" +#include "textflag.h" + +// This is an implementation of the ChaCha20 encryption algorithm as +// specified in RFC 7539. It uses vector instructions to compute +// 4 keystream blocks in parallel (256 bytes) which are then XORed +// with the bytes in the input slice. + +GLOBL ·constants<>(SB), RODATA|NOPTR, $32 +// BSWAP: swap bytes in each 4-byte element +DATA ·constants<>+0x00(SB)/4, $0x03020100 +DATA ·constants<>+0x04(SB)/4, $0x07060504 +DATA ·constants<>+0x08(SB)/4, $0x0b0a0908 +DATA ·constants<>+0x0c(SB)/4, $0x0f0e0d0c +// J0: [j0, j1, j2, j3] +DATA ·constants<>+0x10(SB)/4, $0x61707865 +DATA ·constants<>+0x14(SB)/4, $0x3320646e +DATA ·constants<>+0x18(SB)/4, $0x79622d32 +DATA ·constants<>+0x1c(SB)/4, $0x6b206574 + +#define BSWAP V5 +#define J0 V6 +#define KEY0 V7 +#define KEY1 V8 +#define NONCE V9 +#define CTR V10 +#define M0 V11 +#define M1 V12 +#define M2 V13 +#define M3 V14 +#define INC V15 +#define X0 V16 +#define X1 V17 +#define X2 V18 +#define X3 V19 +#define X4 V20 +#define X5 V21 +#define X6 V22 +#define X7 V23 +#define X8 V24 +#define X9 V25 +#define X10 V26 +#define X11 V27 +#define X12 V28 +#define X13 V29 +#define X14 V30 +#define X15 V31 + +#define NUM_ROUNDS 20 + +#define ROUND4(a0, a1, a2, a3, b0, b1, b2, b3, c0, c1, c2, c3, d0, d1, d2, d3) \ + VAF a1, a0, a0 \ + VAF b1, b0, b0 \ + VAF c1, c0, c0 \ + VAF d1, d0, d0 \ + VX a0, a2, a2 \ + VX b0, b2, b2 \ + VX c0, c2, c2 \ + VX d0, d2, d2 \ + VERLLF $16, a2, a2 \ + VERLLF $16, b2, b2 \ + VERLLF $16, c2, c2 \ + VERLLF $16, d2, d2 \ + VAF a2, a3, a3 \ + VAF b2, b3, b3 \ + VAF c2, c3, c3 \ + VAF d2, d3, d3 \ + VX a3, a1, a1 \ + VX b3, b1, b1 \ + VX c3, c1, c1 \ + VX d3, d1, d1 \ + VERLLF $12, a1, a1 \ + VERLLF $12, b1, b1 \ + VERLLF $12, c1, c1 \ + VERLLF $12, d1, d1 \ + VAF a1, a0, a0 \ + VAF b1, b0, b0 \ + VAF c1, c0, c0 \ + VAF d1, d0, d0 \ + VX a0, a2, a2 \ + VX b0, b2, b2 \ + VX c0, c2, c2 \ + VX d0, d2, d2 \ + VERLLF $8, a2, a2 \ + VERLLF $8, b2, b2 \ + VERLLF $8, c2, c2 \ + VERLLF $8, d2, d2 \ + VAF a2, a3, a3 \ + VAF b2, b3, b3 \ + VAF c2, c3, c3 \ + VAF d2, d3, d3 \ + VX a3, a1, a1 \ + VX b3, b1, b1 \ + VX c3, c1, c1 \ + VX d3, d1, d1 \ + VERLLF $7, a1, a1 \ + VERLLF $7, b1, b1 \ + VERLLF $7, c1, c1 \ + VERLLF $7, d1, d1 + +#define PERMUTE(mask, v0, v1, v2, v3) \ + VPERM v0, v0, mask, v0 \ + VPERM v1, v1, mask, v1 \ + VPERM v2, v2, mask, v2 \ + VPERM v3, v3, mask, v3 + +#define ADDV(x, v0, v1, v2, v3) \ + VAF x, v0, v0 \ + VAF x, v1, v1 \ + VAF x, v2, v2 \ + VAF x, v3, v3 + +#define XORV(off, dst, src, v0, v1, v2, v3) \ + VLM off(src), M0, M3 \ + PERMUTE(BSWAP, v0, v1, v2, v3) \ + VX v0, M0, M0 \ + VX v1, M1, M1 \ + VX v2, M2, M2 \ + VX v3, M3, M3 \ + VSTM M0, M3, off(dst) + +#define SHUFFLE(a, b, c, d, t, u, v, w) \ + VMRHF a, c, t \ // t = {a[0], c[0], a[1], c[1]} + VMRHF b, d, u \ // u = {b[0], d[0], b[1], d[1]} + VMRLF a, c, v \ // v = {a[2], c[2], a[3], c[3]} + VMRLF b, d, w \ // w = {b[2], d[2], b[3], d[3]} + VMRHF t, u, a \ // a = {a[0], b[0], c[0], d[0]} + VMRLF t, u, b \ // b = {a[1], b[1], c[1], d[1]} + VMRHF v, w, c \ // c = {a[2], b[2], c[2], d[2]} + VMRLF v, w, d // d = {a[3], b[3], c[3], d[3]} + +// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) +TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0 + MOVD $·constants<>(SB), R1 + MOVD dst+0(FP), R2 // R2=&dst[0] + LMG src+24(FP), R3, R4 // R3=&src[0] R4=len(src) + MOVD key+48(FP), R5 // R5=key + MOVD nonce+56(FP), R6 // R6=nonce + MOVD counter+64(FP), R7 // R7=counter + + // load BSWAP and J0 + VLM (R1), BSWAP, J0 + + // setup + MOVD $95, R0 + VLM (R5), KEY0, KEY1 + VLL R0, (R6), NONCE + VZERO M0 + VLEIB $7, $32, M0 + VSRLB M0, NONCE, NONCE + + // initialize counter values + VLREPF (R7), CTR + VZERO INC + VLEIF $1, $1, INC + VLEIF $2, $2, INC + VLEIF $3, $3, INC + VAF INC, CTR, CTR + VREPIF $4, INC + +chacha: + VREPF $0, J0, X0 + VREPF $1, J0, X1 + VREPF $2, J0, X2 + VREPF $3, J0, X3 + VREPF $0, KEY0, X4 + VREPF $1, KEY0, X5 + VREPF $2, KEY0, X6 + VREPF $3, KEY0, X7 + VREPF $0, KEY1, X8 + VREPF $1, KEY1, X9 + VREPF $2, KEY1, X10 + VREPF $3, KEY1, X11 + VLR CTR, X12 + VREPF $1, NONCE, X13 + VREPF $2, NONCE, X14 + VREPF $3, NONCE, X15 + + MOVD $(NUM_ROUNDS/2), R1 + +loop: + ROUND4(X0, X4, X12, X8, X1, X5, X13, X9, X2, X6, X14, X10, X3, X7, X15, X11) + ROUND4(X0, X5, X15, X10, X1, X6, X12, X11, X2, X7, X13, X8, X3, X4, X14, X9) + + ADD $-1, R1 + BNE loop + + // decrement length + ADD $-256, R4 + + // rearrange vectors + SHUFFLE(X0, X1, X2, X3, M0, M1, M2, M3) + ADDV(J0, X0, X1, X2, X3) + SHUFFLE(X4, X5, X6, X7, M0, M1, M2, M3) + ADDV(KEY0, X4, X5, X6, X7) + SHUFFLE(X8, X9, X10, X11, M0, M1, M2, M3) + ADDV(KEY1, X8, X9, X10, X11) + VAF CTR, X12, X12 + SHUFFLE(X12, X13, X14, X15, M0, M1, M2, M3) + ADDV(NONCE, X12, X13, X14, X15) + + // increment counters + VAF INC, CTR, CTR + + // xor keystream with plaintext + XORV(0*64, R2, R3, X0, X4, X8, X12) + XORV(1*64, R2, R3, X1, X5, X9, X13) + XORV(2*64, R2, R3, X2, X6, X10, X14) + XORV(3*64, R2, R3, X3, X7, X11, X15) + + // increment pointers + MOVD $256(R2), R2 + MOVD $256(R3), R3 + + CMPBNE R4, $0, chacha + + VSTEF $0, CTR, (R7) + RET diff --git a/vendor/golang.org/x/crypto/chacha20/xor.go b/vendor/golang.org/x/crypto/chacha20/xor.go new file mode 100644 index 000000000..c2d04851e --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20/xor.go @@ -0,0 +1,42 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found src the LICENSE file. + +package chacha20 + +import "runtime" + +// Platforms that have fast unaligned 32-bit little endian accesses. +const unaligned = runtime.GOARCH == "386" || + runtime.GOARCH == "amd64" || + runtime.GOARCH == "arm64" || + runtime.GOARCH == "ppc64le" || + runtime.GOARCH == "s390x" + +// addXor reads a little endian uint32 from src, XORs it with (a + b) and +// places the result in little endian byte order in dst. +func addXor(dst, src []byte, a, b uint32) { + _, _ = src[3], dst[3] // bounds check elimination hint + if unaligned { + // The compiler should optimize this code into + // 32-bit unaligned little endian loads and stores. + // TODO: delete once the compiler does a reliably + // good job with the generic code below. + // See issue #25111 for more details. + v := uint32(src[0]) + v |= uint32(src[1]) << 8 + v |= uint32(src[2]) << 16 + v |= uint32(src[3]) << 24 + v ^= a + b + dst[0] = byte(v) + dst[1] = byte(v >> 8) + dst[2] = byte(v >> 16) + dst[3] = byte(v >> 24) + } else { + a += b + dst[0] = src[0] ^ byte(a) + dst[1] = src[1] ^ byte(a>>8) + dst[2] = src[2] ^ byte(a>>16) + dst[3] = src[3] ^ byte(a>>24) + } +} diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go new file mode 100644 index 000000000..8cf5d8112 --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go @@ -0,0 +1,98 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package chacha20poly1305 implements the ChaCha20-Poly1305 AEAD and its +// extended nonce variant XChaCha20-Poly1305, as specified in RFC 8439 and +// draft-irtf-cfrg-xchacha-01. +package chacha20poly1305 + +import ( + "crypto/cipher" + "errors" +) + +const ( + // KeySize is the size of the key used by this AEAD, in bytes. + KeySize = 32 + + // NonceSize is the size of the nonce used with the standard variant of this + // AEAD, in bytes. + // + // Note that this is too short to be safely generated at random if the same + // key is reused more than 2³² times. + NonceSize = 12 + + // NonceSizeX is the size of the nonce used with the XChaCha20-Poly1305 + // variant of this AEAD, in bytes. + NonceSizeX = 24 + + // Overhead is the size of the Poly1305 authentication tag, and the + // difference between a ciphertext length and its plaintext. + Overhead = 16 +) + +type chacha20poly1305 struct { + key [KeySize]byte +} + +// New returns a ChaCha20-Poly1305 AEAD that uses the given 256-bit key. +func New(key []byte) (cipher.AEAD, error) { + if len(key) != KeySize { + return nil, errors.New("chacha20poly1305: bad key length") + } + ret := new(chacha20poly1305) + copy(ret.key[:], key) + return ret, nil +} + +func (c *chacha20poly1305) NonceSize() int { + return NonceSize +} + +func (c *chacha20poly1305) Overhead() int { + return Overhead +} + +func (c *chacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte { + if len(nonce) != NonceSize { + panic("chacha20poly1305: bad nonce length passed to Seal") + } + + if uint64(len(plaintext)) > (1<<38)-64 { + panic("chacha20poly1305: plaintext too large") + } + + return c.seal(dst, nonce, plaintext, additionalData) +} + +var errOpen = errors.New("chacha20poly1305: message authentication failed") + +func (c *chacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + if len(nonce) != NonceSize { + panic("chacha20poly1305: bad nonce length passed to Open") + } + if len(ciphertext) < 16 { + return nil, errOpen + } + if uint64(len(ciphertext)) > (1<<38)-48 { + panic("chacha20poly1305: ciphertext too large") + } + + return c.open(dst, nonce, ciphertext, additionalData) +} + +// sliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func sliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return +} diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go new file mode 100644 index 000000000..50695a14f --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go @@ -0,0 +1,86 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +package chacha20poly1305 + +import ( + "encoding/binary" + + "golang.org/x/crypto/internal/alias" + "golang.org/x/sys/cpu" +) + +//go:noescape +func chacha20Poly1305Open(dst []byte, key []uint32, src, ad []byte) bool + +//go:noescape +func chacha20Poly1305Seal(dst []byte, key []uint32, src, ad []byte) + +var ( + useAVX2 = cpu.X86.HasAVX2 && cpu.X86.HasBMI2 +) + +// setupState writes a ChaCha20 input matrix to state. See +// https://tools.ietf.org/html/rfc7539#section-2.3. +func setupState(state *[16]uint32, key *[32]byte, nonce []byte) { + state[0] = 0x61707865 + state[1] = 0x3320646e + state[2] = 0x79622d32 + state[3] = 0x6b206574 + + state[4] = binary.LittleEndian.Uint32(key[0:4]) + state[5] = binary.LittleEndian.Uint32(key[4:8]) + state[6] = binary.LittleEndian.Uint32(key[8:12]) + state[7] = binary.LittleEndian.Uint32(key[12:16]) + state[8] = binary.LittleEndian.Uint32(key[16:20]) + state[9] = binary.LittleEndian.Uint32(key[20:24]) + state[10] = binary.LittleEndian.Uint32(key[24:28]) + state[11] = binary.LittleEndian.Uint32(key[28:32]) + + state[12] = 0 + state[13] = binary.LittleEndian.Uint32(nonce[0:4]) + state[14] = binary.LittleEndian.Uint32(nonce[4:8]) + state[15] = binary.LittleEndian.Uint32(nonce[8:12]) +} + +func (c *chacha20poly1305) seal(dst, nonce, plaintext, additionalData []byte) []byte { + if !cpu.X86.HasSSSE3 { + return c.sealGeneric(dst, nonce, plaintext, additionalData) + } + + var state [16]uint32 + setupState(&state, &c.key, nonce) + + ret, out := sliceForAppend(dst, len(plaintext)+16) + if alias.InexactOverlap(out, plaintext) { + panic("chacha20poly1305: invalid buffer overlap") + } + chacha20Poly1305Seal(out[:], state[:], plaintext, additionalData) + return ret +} + +func (c *chacha20poly1305) open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + if !cpu.X86.HasSSSE3 { + return c.openGeneric(dst, nonce, ciphertext, additionalData) + } + + var state [16]uint32 + setupState(&state, &c.key, nonce) + + ciphertext = ciphertext[:len(ciphertext)-16] + ret, out := sliceForAppend(dst, len(ciphertext)) + if alias.InexactOverlap(out, ciphertext) { + panic("chacha20poly1305: invalid buffer overlap") + } + if !chacha20Poly1305Open(out, state[:], ciphertext, additionalData) { + for i := range out { + out[i] = 0 + } + return nil, errOpen + } + + return ret, nil +} diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s new file mode 100644 index 000000000..fd5ee845f --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s @@ -0,0 +1,9762 @@ +// Code generated by command: go run chacha20poly1305_amd64_asm.go -out ../chacha20poly1305_amd64.s -pkg chacha20poly1305. DO NOT EDIT. + +//go:build gc && !purego + +#include "textflag.h" + +// func polyHashADInternal<>() +TEXT polyHashADInternal<>(SB), NOSPLIT, $0 + // Hack: Must declare #define macros inside of a function due to Avo constraints + // ROL rotates the uint32s in register R left by N bits, using temporary T. + #define ROL(N, R, T) \ + MOVO R, T; \ + PSLLL $(N), T; \ + PSRLL $(32-(N)), R; \ + PXOR T, R + + // ROL8 rotates the uint32s in register R left by 8, using temporary T if needed. + #ifdef GOAMD64_v2 + #define ROL8(R, T) PSHUFB ·rol8<>(SB), R + #else + #define ROL8(R, T) ROL(8, R, T) + #endif + + // ROL16 rotates the uint32s in register R left by 16, using temporary T if needed. + #ifdef GOAMD64_v2 + #define ROL16(R, T) PSHUFB ·rol16<>(SB), R + #else + #define ROL16(R, T) ROL(16, R, T) + #endif + XORQ R10, R10 + XORQ R11, R11 + XORQ R12, R12 + CMPQ R9, $0x0d + JNE hashADLoop + MOVQ (CX), R10 + MOVQ 5(CX), R11 + SHRQ $0x18, R11 + MOVQ $0x00000001, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + RET + +hashADLoop: + // Hash in 16 byte chunks + CMPQ R9, $0x10 + JB hashADTail + ADDQ (CX), R10 + ADCQ 8(CX), R11 + ADCQ $0x01, R12 + LEAQ 16(CX), CX + SUBQ $0x10, R9 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + JMP hashADLoop + +hashADTail: + CMPQ R9, $0x00 + JE hashADDone + + // Hash last < 16 byte tail + XORQ R13, R13 + XORQ R14, R14 + XORQ R15, R15 + ADDQ R9, CX + +hashADTailLoop: + SHLQ $0x08, R13, R14 + SHLQ $0x08, R13 + MOVB -1(CX), R15 + XORQ R15, R13 + DECQ CX + DECQ R9 + JNE hashADTailLoop + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + +hashADDone: + RET + +// func chacha20Poly1305Open(dst []byte, key []uint32, src []byte, ad []byte) bool +// Requires: AVX, AVX2, BMI2, CMOV, SSE2 +TEXT ·chacha20Poly1305Open(SB), $288-97 + // For aligned stack access + MOVQ SP, BP + ADDQ $0x20, BP + ANDQ $-32, BP + MOVQ dst_base+0(FP), DI + MOVQ key_base+24(FP), R8 + MOVQ src_base+48(FP), SI + MOVQ src_len+56(FP), BX + MOVQ ad_base+72(FP), CX + + // Check for AVX2 support + CMPB ·useAVX2+0(SB), $0x01 + JE chacha20Poly1305Open_AVX2 + + // Special optimization, for very short buffers + CMPQ BX, $0x80 + JBE openSSE128 + + // For long buffers, prepare the poly key first + MOVOU ·chacha20Constants<>+0(SB), X0 + MOVOU 16(R8), X3 + MOVOU 32(R8), X6 + MOVOU 48(R8), X9 + MOVO X9, X13 + + // Store state on stack for future use + MOVO X3, 32(BP) + MOVO X6, 48(BP) + MOVO X9, 128(BP) + MOVQ $0x0000000a, R9 + +openSSEPreparePolyKey: + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + DECQ R9 + JNE openSSEPreparePolyKey + + // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL 32(BP), X3 + + // Clamp and store the key + PAND ·polyClampMask<>+0(SB), X0 + MOVO X0, (BP) + MOVO X3, 16(BP) + + // Hash AAD + MOVQ ad_len+80(FP), R9 + CALL polyHashADInternal<>(SB) + +openSSEMainLoop: + CMPQ BX, $0x00000100 + JB openSSEMainLoopDone + + // Load state, increment counter blocks + MOVO ·chacha20Constants<>+0(SB), X0 + MOVO 32(BP), X3 + MOVO 48(BP), X6 + MOVO 128(BP), X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X2, X12 + MOVO X5, X13 + MOVO X8, X14 + MOVO X11, X15 + PADDL ·sseIncMask<>+0(SB), X15 + + // Store counters + MOVO X9, 80(BP) + MOVO X10, 96(BP) + MOVO X11, 112(BP) + MOVO X15, 128(BP) + + // There are 10 ChaCha20 iterations of 2QR each, so for 6 iterations we hash + // 2 blocks, and for the remaining 4 only 1 block - for a total of 16 + MOVQ $0x00000004, CX + MOVQ SI, R9 + +openSSEInternalLoop: + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x0c + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + LEAQ 16(R9), R9 + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x04 + DECQ CX + JGE openSSEInternalLoop + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(R9), R9 + CMPQ CX, $-6 + JG openSSEInternalLoop + + // Add in the state + PADDD ·chacha20Constants<>+0(SB), X0 + PADDD ·chacha20Constants<>+0(SB), X1 + PADDD ·chacha20Constants<>+0(SB), X2 + PADDD ·chacha20Constants<>+0(SB), X12 + PADDD 32(BP), X3 + PADDD 32(BP), X4 + PADDD 32(BP), X5 + PADDD 32(BP), X13 + PADDD 48(BP), X6 + PADDD 48(BP), X7 + PADDD 48(BP), X8 + PADDD 48(BP), X14 + PADDD 80(BP), X9 + PADDD 96(BP), X10 + PADDD 112(BP), X11 + PADDD 128(BP), X15 + + // Load - xor - store + MOVO X15, 64(BP) + MOVOU (SI), X15 + PXOR X15, X0 + MOVOU X0, (DI) + MOVOU 16(SI), X15 + PXOR X15, X3 + MOVOU X3, 16(DI) + MOVOU 32(SI), X15 + PXOR X15, X6 + MOVOU X6, 32(DI) + MOVOU 48(SI), X15 + PXOR X15, X9 + MOVOU X9, 48(DI) + MOVOU 64(SI), X9 + PXOR X9, X1 + MOVOU X1, 64(DI) + MOVOU 80(SI), X9 + PXOR X9, X4 + MOVOU X4, 80(DI) + MOVOU 96(SI), X9 + PXOR X9, X7 + MOVOU X7, 96(DI) + MOVOU 112(SI), X9 + PXOR X9, X10 + MOVOU X10, 112(DI) + MOVOU 128(SI), X9 + PXOR X9, X2 + MOVOU X2, 128(DI) + MOVOU 144(SI), X9 + PXOR X9, X5 + MOVOU X5, 144(DI) + MOVOU 160(SI), X9 + PXOR X9, X8 + MOVOU X8, 160(DI) + MOVOU 176(SI), X9 + PXOR X9, X11 + MOVOU X11, 176(DI) + MOVOU 192(SI), X9 + PXOR X9, X12 + MOVOU X12, 192(DI) + MOVOU 208(SI), X9 + PXOR X9, X13 + MOVOU X13, 208(DI) + MOVOU 224(SI), X9 + PXOR X9, X14 + MOVOU X14, 224(DI) + MOVOU 240(SI), X9 + PXOR 64(BP), X9 + MOVOU X9, 240(DI) + LEAQ 256(SI), SI + LEAQ 256(DI), DI + SUBQ $0x00000100, BX + JMP openSSEMainLoop + +openSSEMainLoopDone: + // Handle the various tail sizes efficiently + TESTQ BX, BX + JE openSSEFinalize + CMPQ BX, $0x40 + JBE openSSETail64 + CMPQ BX, $0x80 + JBE openSSETail128 + CMPQ BX, $0xc0 + JBE openSSETail192 + JMP openSSETail256 + +openSSEFinalize: + // Hash in the PT, AAD lengths + ADDQ ad_len+80(FP), R10 + ADCQ src_len+56(FP), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + + // Final reduce + MOVQ R10, R13 + MOVQ R11, R14 + MOVQ R12, R15 + SUBQ $-5, R10 + SBBQ $-1, R11 + SBBQ $0x03, R12 + CMOVQCS R13, R10 + CMOVQCS R14, R11 + CMOVQCS R15, R12 + + // Add in the "s" part of the key + ADDQ 16(BP), R10 + ADCQ 24(BP), R11 + + // Finally, constant time compare to the tag at the end of the message + XORQ AX, AX + MOVQ $0x00000001, DX + XORQ (SI), R10 + XORQ 8(SI), R11 + ORQ R11, R10 + CMOVQEQ DX, AX + + // Return true iff tags are equal + MOVB AX, ret+96(FP) + RET + +openSSE128: + MOVOU ·chacha20Constants<>+0(SB), X0 + MOVOU 16(R8), X3 + MOVOU 32(R8), X6 + MOVOU 48(R8), X9 + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X3, X13 + MOVO X6, X14 + MOVO X10, X15 + MOVQ $0x0000000a, R9 + +openSSE128InnerCipherLoop: + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + DECQ R9 + JNE openSSE128InnerCipherLoop + + // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL ·chacha20Constants<>+0(SB), X2 + PADDL X13, X3 + PADDL X13, X4 + PADDL X13, X5 + PADDL X14, X7 + PADDL X14, X8 + PADDL X15, X10 + PADDL ·sseIncMask<>+0(SB), X15 + PADDL X15, X11 + + // Clamp and store the key + PAND ·polyClampMask<>+0(SB), X0 + MOVOU X0, (BP) + MOVOU X3, 16(BP) + + // Hash + MOVQ ad_len+80(FP), R9 + CALL polyHashADInternal<>(SB) + +openSSE128Open: + CMPQ BX, $0x10 + JB openSSETail16 + SUBQ $0x10, BX + + // Load for hashing + ADDQ (SI), R10 + ADCQ 8(SI), R11 + ADCQ $0x01, R12 + + // Load for decryption + MOVOU (SI), X12 + PXOR X12, X1 + MOVOU X1, (DI) + LEAQ 16(SI), SI + LEAQ 16(DI), DI + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + + // Shift the stream "left" + MOVO X4, X1 + MOVO X7, X4 + MOVO X10, X7 + MOVO X2, X10 + MOVO X5, X2 + MOVO X8, X5 + MOVO X11, X8 + JMP openSSE128Open + +openSSETail16: + TESTQ BX, BX + JE openSSEFinalize + + // We can safely load the CT from the end, because it is padded with the MAC + MOVQ BX, R9 + SHLQ $0x04, R9 + LEAQ ·andMask<>+0(SB), R13 + MOVOU (SI), X12 + ADDQ BX, SI + PAND -16(R13)(R9*1), X12 + MOVO X12, 64(BP) + MOVQ X12, R13 + MOVQ 72(BP), R14 + PXOR X1, X12 + + // We can only store one byte at a time, since plaintext can be shorter than 16 bytes +openSSETail16Store: + MOVQ X12, R8 + MOVB R8, (DI) + PSRLDQ $0x01, X12 + INCQ DI + DECQ BX + JNE openSSETail16Store + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + JMP openSSEFinalize + +openSSETail64: + MOVO ·chacha20Constants<>+0(SB), X0 + MOVO 32(BP), X3 + MOVO 48(BP), X6 + MOVO 128(BP), X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X9, 80(BP) + XORQ R9, R9 + MOVQ BX, CX + CMPQ CX, $0x10 + JB openSSETail64LoopB + +openSSETail64LoopA: + ADDQ (SI)(R9*1), R10 + ADCQ 8(SI)(R9*1), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + SUBQ $0x10, CX + +openSSETail64LoopB: + ADDQ $0x10, R9 + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + CMPQ CX, $0x10 + JAE openSSETail64LoopA + CMPQ R9, $0xa0 + JNE openSSETail64LoopB + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL 32(BP), X3 + PADDL 48(BP), X6 + PADDL 80(BP), X9 + +openSSETail64DecLoop: + CMPQ BX, $0x10 + JB openSSETail64DecLoopDone + SUBQ $0x10, BX + MOVOU (SI), X12 + PXOR X12, X0 + MOVOU X0, (DI) + LEAQ 16(SI), SI + LEAQ 16(DI), DI + MOVO X3, X0 + MOVO X6, X3 + MOVO X9, X6 + JMP openSSETail64DecLoop + +openSSETail64DecLoopDone: + MOVO X0, X1 + JMP openSSETail16 + +openSSETail128: + MOVO ·chacha20Constants<>+0(SB), X1 + MOVO 32(BP), X4 + MOVO 48(BP), X7 + MOVO 128(BP), X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X10, 80(BP) + MOVO X1, X0 + MOVO X4, X3 + MOVO X7, X6 + MOVO X10, X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X9, 96(BP) + XORQ R9, R9 + MOVQ BX, CX + ANDQ $-16, CX + +openSSETail128LoopA: + ADDQ (SI)(R9*1), R10 + ADCQ 8(SI)(R9*1), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + +openSSETail128LoopB: + ADDQ $0x10, R9 + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + CMPQ R9, CX + JB openSSETail128LoopA + CMPQ R9, $0xa0 + JNE openSSETail128LoopB + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL 32(BP), X3 + PADDL 32(BP), X4 + PADDL 48(BP), X6 + PADDL 48(BP), X7 + PADDL 96(BP), X9 + PADDL 80(BP), X10 + MOVOU (SI), X12 + MOVOU 16(SI), X13 + MOVOU 32(SI), X14 + MOVOU 48(SI), X15 + PXOR X12, X1 + PXOR X13, X4 + PXOR X14, X7 + PXOR X15, X10 + MOVOU X1, (DI) + MOVOU X4, 16(DI) + MOVOU X7, 32(DI) + MOVOU X10, 48(DI) + SUBQ $0x40, BX + LEAQ 64(SI), SI + LEAQ 64(DI), DI + JMP openSSETail64DecLoop + +openSSETail192: + MOVO ·chacha20Constants<>+0(SB), X2 + MOVO 32(BP), X5 + MOVO 48(BP), X8 + MOVO 128(BP), X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X11, 80(BP) + MOVO X2, X1 + MOVO X5, X4 + MOVO X8, X7 + MOVO X11, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X10, 96(BP) + MOVO X1, X0 + MOVO X4, X3 + MOVO X7, X6 + MOVO X10, X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X9, 112(BP) + MOVQ BX, CX + MOVQ $0x000000a0, R9 + CMPQ CX, $0xa0 + CMOVQGT R9, CX + ANDQ $-16, CX + XORQ R9, R9 + +openSSLTail192LoopA: + ADDQ (SI)(R9*1), R10 + ADCQ 8(SI)(R9*1), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + +openSSLTail192LoopB: + ADDQ $0x10, R9 + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + CMPQ R9, CX + JB openSSLTail192LoopA + CMPQ R9, $0xa0 + JNE openSSLTail192LoopB + CMPQ BX, $0xb0 + JB openSSLTail192Store + ADDQ 160(SI), R10 + ADCQ 168(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + CMPQ BX, $0xc0 + JB openSSLTail192Store + ADDQ 176(SI), R10 + ADCQ 184(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + +openSSLTail192Store: + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL ·chacha20Constants<>+0(SB), X2 + PADDL 32(BP), X3 + PADDL 32(BP), X4 + PADDL 32(BP), X5 + PADDL 48(BP), X6 + PADDL 48(BP), X7 + PADDL 48(BP), X8 + PADDL 112(BP), X9 + PADDL 96(BP), X10 + PADDL 80(BP), X11 + MOVOU (SI), X12 + MOVOU 16(SI), X13 + MOVOU 32(SI), X14 + MOVOU 48(SI), X15 + PXOR X12, X2 + PXOR X13, X5 + PXOR X14, X8 + PXOR X15, X11 + MOVOU X2, (DI) + MOVOU X5, 16(DI) + MOVOU X8, 32(DI) + MOVOU X11, 48(DI) + MOVOU 64(SI), X12 + MOVOU 80(SI), X13 + MOVOU 96(SI), X14 + MOVOU 112(SI), X15 + PXOR X12, X1 + PXOR X13, X4 + PXOR X14, X7 + PXOR X15, X10 + MOVOU X1, 64(DI) + MOVOU X4, 80(DI) + MOVOU X7, 96(DI) + MOVOU X10, 112(DI) + SUBQ $0x80, BX + LEAQ 128(SI), SI + LEAQ 128(DI), DI + JMP openSSETail64DecLoop + +openSSETail256: + MOVO ·chacha20Constants<>+0(SB), X0 + MOVO 32(BP), X3 + MOVO 48(BP), X6 + MOVO 128(BP), X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X2, X12 + MOVO X5, X13 + MOVO X8, X14 + MOVO X11, X15 + PADDL ·sseIncMask<>+0(SB), X15 + + // Store counters + MOVO X9, 80(BP) + MOVO X10, 96(BP) + MOVO X11, 112(BP) + MOVO X15, 128(BP) + XORQ R9, R9 + +openSSETail256Loop: + ADDQ (SI)(R9*1), R10 + ADCQ 8(SI)(R9*1), R11 + ADCQ $0x01, R12 + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x0c + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x04 + ADDQ $0x10, R9 + CMPQ R9, $0xa0 + JB openSSETail256Loop + MOVQ BX, CX + ANDQ $-16, CX + +openSSETail256HashLoop: + ADDQ (SI)(R9*1), R10 + ADCQ 8(SI)(R9*1), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + ADDQ $0x10, R9 + CMPQ R9, CX + JB openSSETail256HashLoop + + // Add in the state + PADDD ·chacha20Constants<>+0(SB), X0 + PADDD ·chacha20Constants<>+0(SB), X1 + PADDD ·chacha20Constants<>+0(SB), X2 + PADDD ·chacha20Constants<>+0(SB), X12 + PADDD 32(BP), X3 + PADDD 32(BP), X4 + PADDD 32(BP), X5 + PADDD 32(BP), X13 + PADDD 48(BP), X6 + PADDD 48(BP), X7 + PADDD 48(BP), X8 + PADDD 48(BP), X14 + PADDD 80(BP), X9 + PADDD 96(BP), X10 + PADDD 112(BP), X11 + PADDD 128(BP), X15 + MOVO X15, 64(BP) + + // Load - xor - store + MOVOU (SI), X15 + PXOR X15, X0 + MOVOU 16(SI), X15 + PXOR X15, X3 + MOVOU 32(SI), X15 + PXOR X15, X6 + MOVOU 48(SI), X15 + PXOR X15, X9 + MOVOU X0, (DI) + MOVOU X3, 16(DI) + MOVOU X6, 32(DI) + MOVOU X9, 48(DI) + MOVOU 64(SI), X0 + MOVOU 80(SI), X3 + MOVOU 96(SI), X6 + MOVOU 112(SI), X9 + PXOR X0, X1 + PXOR X3, X4 + PXOR X6, X7 + PXOR X9, X10 + MOVOU X1, 64(DI) + MOVOU X4, 80(DI) + MOVOU X7, 96(DI) + MOVOU X10, 112(DI) + MOVOU 128(SI), X0 + MOVOU 144(SI), X3 + MOVOU 160(SI), X6 + MOVOU 176(SI), X9 + PXOR X0, X2 + PXOR X3, X5 + PXOR X6, X8 + PXOR X9, X11 + MOVOU X2, 128(DI) + MOVOU X5, 144(DI) + MOVOU X8, 160(DI) + MOVOU X11, 176(DI) + LEAQ 192(SI), SI + LEAQ 192(DI), DI + SUBQ $0xc0, BX + MOVO X12, X0 + MOVO X13, X3 + MOVO X14, X6 + MOVO 64(BP), X9 + JMP openSSETail64DecLoop + +chacha20Poly1305Open_AVX2: + VZEROUPPER + VMOVDQU ·chacha20Constants<>+0(SB), Y0 + BYTE $0xc4 + BYTE $0x42 + BYTE $0x7d + BYTE $0x5a + BYTE $0x70 + BYTE $0x10 + BYTE $0xc4 + BYTE $0x42 + BYTE $0x7d + BYTE $0x5a + BYTE $0x60 + BYTE $0x20 + BYTE $0xc4 + BYTE $0xc2 + BYTE $0x7d + BYTE $0x5a + BYTE $0x60 + BYTE $0x30 + VPADDD ·avx2InitMask<>+0(SB), Y4, Y4 + + // Special optimization, for very short buffers + CMPQ BX, $0xc0 + JBE openAVX2192 + CMPQ BX, $0x00000140 + JBE openAVX2320 + + // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream + VMOVDQA Y14, 32(BP) + VMOVDQA Y12, 64(BP) + VMOVDQA Y4, 192(BP) + MOVQ $0x0000000a, R9 + +openAVX2PreparePolyKey: + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x04, Y4, Y4, Y4 + DECQ R9 + JNE openAVX2PreparePolyKey + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD 32(BP), Y14, Y14 + VPADDD 64(BP), Y12, Y12 + VPADDD 192(BP), Y4, Y4 + VPERM2I128 $0x02, Y0, Y14, Y3 + + // Clamp and store poly key + VPAND ·polyClampMask<>+0(SB), Y3, Y3 + VMOVDQA Y3, (BP) + + // Stream for the first 64 bytes + VPERM2I128 $0x13, Y0, Y14, Y0 + VPERM2I128 $0x13, Y12, Y4, Y14 + + // Hash AD + first 64 bytes + MOVQ ad_len+80(FP), R9 + CALL polyHashADInternal<>(SB) + XORQ CX, CX + +openAVX2InitialHash64: + ADDQ (SI)(CX*1), R10 + ADCQ 8(SI)(CX*1), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + ADDQ $0x10, CX + CMPQ CX, $0x40 + JNE openAVX2InitialHash64 + + // Decrypt the first 64 bytes + VPXOR (SI), Y0, Y0 + VPXOR 32(SI), Y14, Y14 + VMOVDQU Y0, (DI) + VMOVDQU Y14, 32(DI) + LEAQ 64(SI), SI + LEAQ 64(DI), DI + SUBQ $0x40, BX + +openAVX2MainLoop: + CMPQ BX, $0x00000200 + JB openAVX2MainLoopDone + + // Load state, increment counter blocks, store the incremented counters + VMOVDQU ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA Y0, Y7 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA Y14, Y11 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA Y12, Y15 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VPADDD ·avx2IncMask<>+0(SB), Y2, Y3 + VMOVDQA Y4, 96(BP) + VMOVDQA Y1, 128(BP) + VMOVDQA Y2, 160(BP) + VMOVDQA Y3, 192(BP) + XORQ CX, CX + +openAVX2InternalLoop: + ADDQ (SI)(CX*1), R10 + ADCQ 8(SI)(CX*1), R11 + ADCQ $0x01, R12 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + ADDQ 16(SI)(CX*1), R10 + ADCQ 24(SI)(CX*1), R11 + ADCQ $0x01, R12 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x04, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPALIGNR $0x0c, Y3, Y3, Y3 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + ADDQ 32(SI)(CX*1), R10 + ADCQ 40(SI)(CX*1), R11 + ADCQ $0x01, R12 + LEAQ 48(CX), CX + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x0c, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + VPALIGNR $0x04, Y3, Y3, Y3 + CMPQ CX, $0x000001e0 + JNE openAVX2InternalLoop + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD ·chacha20Constants<>+0(SB), Y7, Y7 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 32(BP), Y11, Y11 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD 64(BP), Y15, Y15 + VPADDD 96(BP), Y4, Y4 + VPADDD 128(BP), Y1, Y1 + VPADDD 160(BP), Y2, Y2 + VPADDD 192(BP), Y3, Y3 + VMOVDQA Y15, 224(BP) + + // We only hashed 480 of the 512 bytes available - hash the remaining 32 here + ADDQ 480(SI), R10 + ADCQ 488(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPERM2I128 $0x02, Y0, Y14, Y15 + VPERM2I128 $0x13, Y0, Y14, Y14 + VPERM2I128 $0x02, Y12, Y4, Y0 + VPERM2I128 $0x13, Y12, Y4, Y12 + VPXOR (SI), Y15, Y15 + VPXOR 32(SI), Y0, Y0 + VPXOR 64(SI), Y14, Y14 + VPXOR 96(SI), Y12, Y12 + VMOVDQU Y15, (DI) + VMOVDQU Y0, 32(DI) + VMOVDQU Y14, 64(DI) + VMOVDQU Y12, 96(DI) + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + VPXOR 128(SI), Y0, Y0 + VPXOR 160(SI), Y14, Y14 + VPXOR 192(SI), Y12, Y12 + VPXOR 224(SI), Y4, Y4 + VMOVDQU Y0, 128(DI) + VMOVDQU Y14, 160(DI) + VMOVDQU Y12, 192(DI) + VMOVDQU Y4, 224(DI) + + // and here + ADDQ 496(SI), R10 + ADCQ 504(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + VPXOR 256(SI), Y0, Y0 + VPXOR 288(SI), Y14, Y14 + VPXOR 320(SI), Y12, Y12 + VPXOR 352(SI), Y4, Y4 + VMOVDQU Y0, 256(DI) + VMOVDQU Y14, 288(DI) + VMOVDQU Y12, 320(DI) + VMOVDQU Y4, 352(DI) + VPERM2I128 $0x02, Y7, Y11, Y0 + VPERM2I128 $0x02, 224(BP), Y3, Y14 + VPERM2I128 $0x13, Y7, Y11, Y12 + VPERM2I128 $0x13, 224(BP), Y3, Y4 + VPXOR 384(SI), Y0, Y0 + VPXOR 416(SI), Y14, Y14 + VPXOR 448(SI), Y12, Y12 + VPXOR 480(SI), Y4, Y4 + VMOVDQU Y0, 384(DI) + VMOVDQU Y14, 416(DI) + VMOVDQU Y12, 448(DI) + VMOVDQU Y4, 480(DI) + LEAQ 512(SI), SI + LEAQ 512(DI), DI + SUBQ $0x00000200, BX + JMP openAVX2MainLoop + +openAVX2MainLoopDone: + // Handle the various tail sizes efficiently + TESTQ BX, BX + JE openSSEFinalize + CMPQ BX, $0x80 + JBE openAVX2Tail128 + CMPQ BX, $0x00000100 + JBE openAVX2Tail256 + CMPQ BX, $0x00000180 + JBE openAVX2Tail384 + JMP openAVX2Tail512 + +openAVX2192: + VMOVDQA Y0, Y5 + VMOVDQA Y14, Y9 + VMOVDQA Y12, Y13 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y0, Y6 + VMOVDQA Y14, Y10 + VMOVDQA Y12, Y8 + VMOVDQA Y4, Y2 + VMOVDQA Y1, Y15 + MOVQ $0x0000000a, R9 + +openAVX2192InnerCipherLoop: + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + DECQ R9 + JNE openAVX2192InnerCipherLoop + VPADDD Y6, Y0, Y0 + VPADDD Y6, Y5, Y5 + VPADDD Y10, Y14, Y14 + VPADDD Y10, Y9, Y9 + VPADDD Y8, Y12, Y12 + VPADDD Y8, Y13, Y13 + VPADDD Y2, Y4, Y4 + VPADDD Y15, Y1, Y1 + VPERM2I128 $0x02, Y0, Y14, Y3 + + // Clamp and store poly key + VPAND ·polyClampMask<>+0(SB), Y3, Y3 + VMOVDQA Y3, (BP) + + // Stream for up to 192 bytes + VPERM2I128 $0x13, Y0, Y14, Y0 + VPERM2I128 $0x13, Y12, Y4, Y14 + VPERM2I128 $0x02, Y5, Y9, Y12 + VPERM2I128 $0x02, Y13, Y1, Y4 + VPERM2I128 $0x13, Y5, Y9, Y5 + VPERM2I128 $0x13, Y13, Y1, Y9 + +openAVX2ShortOpen: + // Hash + MOVQ ad_len+80(FP), R9 + CALL polyHashADInternal<>(SB) + +openAVX2ShortOpenLoop: + CMPQ BX, $0x20 + JB openAVX2ShortTail32 + SUBQ $0x20, BX + + // Load for hashing + ADDQ (SI), R10 + ADCQ 8(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + ADDQ 16(SI), R10 + ADCQ 24(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + + // Load for decryption + VPXOR (SI), Y0, Y0 + VMOVDQU Y0, (DI) + LEAQ 32(SI), SI + LEAQ 32(DI), DI + + // Shift stream left + VMOVDQA Y14, Y0 + VMOVDQA Y12, Y14 + VMOVDQA Y4, Y12 + VMOVDQA Y5, Y4 + VMOVDQA Y9, Y5 + VMOVDQA Y13, Y9 + VMOVDQA Y1, Y13 + VMOVDQA Y6, Y1 + VMOVDQA Y10, Y6 + JMP openAVX2ShortOpenLoop + +openAVX2ShortTail32: + CMPQ BX, $0x10 + VMOVDQA X0, X1 + JB openAVX2ShortDone + SUBQ $0x10, BX + + // Load for hashing + ADDQ (SI), R10 + ADCQ 8(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + + // Load for decryption + VPXOR (SI), X0, X12 + VMOVDQU X12, (DI) + LEAQ 16(SI), SI + LEAQ 16(DI), DI + VPERM2I128 $0x11, Y0, Y0, Y0 + VMOVDQA X0, X1 + +openAVX2ShortDone: + VZEROUPPER + JMP openSSETail16 + +openAVX2320: + VMOVDQA Y0, Y5 + VMOVDQA Y14, Y9 + VMOVDQA Y12, Y13 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y0, Y6 + VMOVDQA Y14, Y10 + VMOVDQA Y12, Y8 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VMOVDQA Y14, Y7 + VMOVDQA Y12, Y11 + VMOVDQA Y4, Y15 + MOVQ $0x0000000a, R9 + +openAVX2320InnerCipherLoop: + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + DECQ R9 + JNE openAVX2320InnerCipherLoop + VMOVDQA ·chacha20Constants<>+0(SB), Y3 + VPADDD Y3, Y0, Y0 + VPADDD Y3, Y5, Y5 + VPADDD Y3, Y6, Y6 + VPADDD Y7, Y14, Y14 + VPADDD Y7, Y9, Y9 + VPADDD Y7, Y10, Y10 + VPADDD Y11, Y12, Y12 + VPADDD Y11, Y13, Y13 + VPADDD Y11, Y8, Y8 + VMOVDQA ·avx2IncMask<>+0(SB), Y3 + VPADDD Y15, Y4, Y4 + VPADDD Y3, Y15, Y15 + VPADDD Y15, Y1, Y1 + VPADDD Y3, Y15, Y15 + VPADDD Y15, Y2, Y2 + + // Clamp and store poly key + VPERM2I128 $0x02, Y0, Y14, Y3 + VPAND ·polyClampMask<>+0(SB), Y3, Y3 + VMOVDQA Y3, (BP) + + // Stream for up to 320 bytes + VPERM2I128 $0x13, Y0, Y14, Y0 + VPERM2I128 $0x13, Y12, Y4, Y14 + VPERM2I128 $0x02, Y5, Y9, Y12 + VPERM2I128 $0x02, Y13, Y1, Y4 + VPERM2I128 $0x13, Y5, Y9, Y5 + VPERM2I128 $0x13, Y13, Y1, Y9 + VPERM2I128 $0x02, Y6, Y10, Y13 + VPERM2I128 $0x02, Y8, Y2, Y1 + VPERM2I128 $0x13, Y6, Y10, Y6 + VPERM2I128 $0x13, Y8, Y2, Y10 + JMP openAVX2ShortOpen + +openAVX2Tail128: + // Need to decrypt up to 128 bytes - prepare two blocks + VMOVDQA ·chacha20Constants<>+0(SB), Y5 + VMOVDQA 32(BP), Y9 + VMOVDQA 64(BP), Y13 + VMOVDQA 192(BP), Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y1 + VMOVDQA Y1, Y4 + XORQ R9, R9 + MOVQ BX, CX + ANDQ $-16, CX + TESTQ CX, CX + JE openAVX2Tail128LoopB + +openAVX2Tail128LoopA: + ADDQ (SI)(R9*1), R10 + ADCQ 8(SI)(R9*1), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + +openAVX2Tail128LoopB: + ADDQ $0x10, R9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y1, Y1, Y1 + CMPQ R9, CX + JB openAVX2Tail128LoopA + CMPQ R9, $0xa0 + JNE openAVX2Tail128LoopB + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD 32(BP), Y9, Y9 + VPADDD 64(BP), Y13, Y13 + VPADDD Y4, Y1, Y1 + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + +openAVX2TailLoop: + CMPQ BX, $0x20 + JB openAVX2Tail + SUBQ $0x20, BX + + // Load for decryption + VPXOR (SI), Y0, Y0 + VMOVDQU Y0, (DI) + LEAQ 32(SI), SI + LEAQ 32(DI), DI + VMOVDQA Y14, Y0 + VMOVDQA Y12, Y14 + VMOVDQA Y4, Y12 + JMP openAVX2TailLoop + +openAVX2Tail: + CMPQ BX, $0x10 + VMOVDQA X0, X1 + JB openAVX2TailDone + SUBQ $0x10, BX + + // Load for decryption + VPXOR (SI), X0, X12 + VMOVDQU X12, (DI) + LEAQ 16(SI), SI + LEAQ 16(DI), DI + VPERM2I128 $0x11, Y0, Y0, Y0 + VMOVDQA X0, X1 + +openAVX2TailDone: + VZEROUPPER + JMP openSSETail16 + +openAVX2Tail256: + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y4, Y7 + VMOVDQA Y1, Y11 + + // Compute the number of iterations that will hash data + MOVQ BX, 224(BP) + MOVQ BX, CX + SUBQ $0x80, CX + SHRQ $0x04, CX + MOVQ $0x0000000a, R9 + CMPQ CX, $0x0a + CMOVQGT R9, CX + MOVQ SI, BX + XORQ R9, R9 + +openAVX2Tail256LoopA: + ADDQ (BX), R10 + ADCQ 8(BX), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(BX), BX + +openAVX2Tail256LoopB: + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + INCQ R9 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + CMPQ R9, CX + JB openAVX2Tail256LoopA + CMPQ R9, $0x0a + JNE openAVX2Tail256LoopB + MOVQ BX, R9 + SUBQ SI, BX + MOVQ BX, CX + MOVQ 224(BP), BX + +openAVX2Tail256Hash: + ADDQ $0x10, CX + CMPQ CX, BX + JGT openAVX2Tail256HashEnd + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(R9), R9 + JMP openAVX2Tail256Hash + +openAVX2Tail256HashEnd: + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD Y7, Y4, Y4 + VPADDD Y11, Y1, Y1 + VPERM2I128 $0x02, Y0, Y14, Y6 + VPERM2I128 $0x02, Y12, Y4, Y10 + VPERM2I128 $0x13, Y0, Y14, Y8 + VPERM2I128 $0x13, Y12, Y4, Y2 + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + VPXOR (SI), Y6, Y6 + VPXOR 32(SI), Y10, Y10 + VPXOR 64(SI), Y8, Y8 + VPXOR 96(SI), Y2, Y2 + VMOVDQU Y6, (DI) + VMOVDQU Y10, 32(DI) + VMOVDQU Y8, 64(DI) + VMOVDQU Y2, 96(DI) + LEAQ 128(SI), SI + LEAQ 128(DI), DI + SUBQ $0x80, BX + JMP openAVX2TailLoop + +openAVX2Tail384: + // Need to decrypt up to 384 bytes - prepare six blocks + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VMOVDQA Y4, 96(BP) + VMOVDQA Y1, 128(BP) + VMOVDQA Y2, 160(BP) + + // Compute the number of iterations that will hash two blocks of data + MOVQ BX, 224(BP) + MOVQ BX, CX + SUBQ $0x00000100, CX + SHRQ $0x04, CX + ADDQ $0x06, CX + MOVQ $0x0000000a, R9 + CMPQ CX, $0x0a + CMOVQGT R9, CX + MOVQ SI, BX + XORQ R9, R9 + +openAVX2Tail384LoopB: + ADDQ (BX), R10 + ADCQ 8(BX), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(BX), BX + +openAVX2Tail384LoopA: + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + ADDQ (BX), R10 + ADCQ 8(BX), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(BX), BX + INCQ R9 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + CMPQ R9, CX + JB openAVX2Tail384LoopB + CMPQ R9, $0x0a + JNE openAVX2Tail384LoopA + MOVQ BX, R9 + SUBQ SI, BX + MOVQ BX, CX + MOVQ 224(BP), BX + +openAVX2Tail384Hash: + ADDQ $0x10, CX + CMPQ CX, BX + JGT openAVX2Tail384HashEnd + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(R9), R9 + JMP openAVX2Tail384Hash + +openAVX2Tail384HashEnd: + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD 96(BP), Y4, Y4 + VPADDD 128(BP), Y1, Y1 + VPADDD 160(BP), Y2, Y2 + VPERM2I128 $0x02, Y0, Y14, Y3 + VPERM2I128 $0x02, Y12, Y4, Y7 + VPERM2I128 $0x13, Y0, Y14, Y11 + VPERM2I128 $0x13, Y12, Y4, Y15 + VPXOR (SI), Y3, Y3 + VPXOR 32(SI), Y7, Y7 + VPXOR 64(SI), Y11, Y11 + VPXOR 96(SI), Y15, Y15 + VMOVDQU Y3, (DI) + VMOVDQU Y7, 32(DI) + VMOVDQU Y11, 64(DI) + VMOVDQU Y15, 96(DI) + VPERM2I128 $0x02, Y5, Y9, Y3 + VPERM2I128 $0x02, Y13, Y1, Y7 + VPERM2I128 $0x13, Y5, Y9, Y11 + VPERM2I128 $0x13, Y13, Y1, Y15 + VPXOR 128(SI), Y3, Y3 + VPXOR 160(SI), Y7, Y7 + VPXOR 192(SI), Y11, Y11 + VPXOR 224(SI), Y15, Y15 + VMOVDQU Y3, 128(DI) + VMOVDQU Y7, 160(DI) + VMOVDQU Y11, 192(DI) + VMOVDQU Y15, 224(DI) + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + LEAQ 256(SI), SI + LEAQ 256(DI), DI + SUBQ $0x00000100, BX + JMP openAVX2TailLoop + +openAVX2Tail512: + VMOVDQU ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA Y0, Y7 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA Y14, Y11 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA Y12, Y15 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VPADDD ·avx2IncMask<>+0(SB), Y2, Y3 + VMOVDQA Y4, 96(BP) + VMOVDQA Y1, 128(BP) + VMOVDQA Y2, 160(BP) + VMOVDQA Y3, 192(BP) + XORQ CX, CX + MOVQ SI, R9 + +openAVX2Tail512LoopB: + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(R9), R9 + +openAVX2Tail512LoopA: + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x04, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPALIGNR $0x0c, Y3, Y3, Y3 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + ADDQ 16(R9), R10 + ADCQ 24(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(R9), R9 + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x0c, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + VPALIGNR $0x04, Y3, Y3, Y3 + INCQ CX + CMPQ CX, $0x04 + JLT openAVX2Tail512LoopB + CMPQ CX, $0x0a + JNE openAVX2Tail512LoopA + MOVQ BX, CX + SUBQ $0x00000180, CX + ANDQ $-16, CX + +openAVX2Tail512HashLoop: + TESTQ CX, CX + JE openAVX2Tail512HashEnd + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(R9), R9 + SUBQ $0x10, CX + JMP openAVX2Tail512HashLoop + +openAVX2Tail512HashEnd: + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD ·chacha20Constants<>+0(SB), Y7, Y7 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 32(BP), Y11, Y11 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD 64(BP), Y15, Y15 + VPADDD 96(BP), Y4, Y4 + VPADDD 128(BP), Y1, Y1 + VPADDD 160(BP), Y2, Y2 + VPADDD 192(BP), Y3, Y3 + VMOVDQA Y15, 224(BP) + VPERM2I128 $0x02, Y0, Y14, Y15 + VPERM2I128 $0x13, Y0, Y14, Y14 + VPERM2I128 $0x02, Y12, Y4, Y0 + VPERM2I128 $0x13, Y12, Y4, Y12 + VPXOR (SI), Y15, Y15 + VPXOR 32(SI), Y0, Y0 + VPXOR 64(SI), Y14, Y14 + VPXOR 96(SI), Y12, Y12 + VMOVDQU Y15, (DI) + VMOVDQU Y0, 32(DI) + VMOVDQU Y14, 64(DI) + VMOVDQU Y12, 96(DI) + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + VPXOR 128(SI), Y0, Y0 + VPXOR 160(SI), Y14, Y14 + VPXOR 192(SI), Y12, Y12 + VPXOR 224(SI), Y4, Y4 + VMOVDQU Y0, 128(DI) + VMOVDQU Y14, 160(DI) + VMOVDQU Y12, 192(DI) + VMOVDQU Y4, 224(DI) + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + VPXOR 256(SI), Y0, Y0 + VPXOR 288(SI), Y14, Y14 + VPXOR 320(SI), Y12, Y12 + VPXOR 352(SI), Y4, Y4 + VMOVDQU Y0, 256(DI) + VMOVDQU Y14, 288(DI) + VMOVDQU Y12, 320(DI) + VMOVDQU Y4, 352(DI) + VPERM2I128 $0x02, Y7, Y11, Y0 + VPERM2I128 $0x02, 224(BP), Y3, Y14 + VPERM2I128 $0x13, Y7, Y11, Y12 + VPERM2I128 $0x13, 224(BP), Y3, Y4 + LEAQ 384(SI), SI + LEAQ 384(DI), DI + SUBQ $0x00000180, BX + JMP openAVX2TailLoop + +DATA ·chacha20Constants<>+0(SB)/4, $0x61707865 +DATA ·chacha20Constants<>+4(SB)/4, $0x3320646e +DATA ·chacha20Constants<>+8(SB)/4, $0x79622d32 +DATA ·chacha20Constants<>+12(SB)/4, $0x6b206574 +DATA ·chacha20Constants<>+16(SB)/4, $0x61707865 +DATA ·chacha20Constants<>+20(SB)/4, $0x3320646e +DATA ·chacha20Constants<>+24(SB)/4, $0x79622d32 +DATA ·chacha20Constants<>+28(SB)/4, $0x6b206574 +GLOBL ·chacha20Constants<>(SB), RODATA|NOPTR, $32 + +DATA ·polyClampMask<>+0(SB)/8, $0x0ffffffc0fffffff +DATA ·polyClampMask<>+8(SB)/8, $0x0ffffffc0ffffffc +DATA ·polyClampMask<>+16(SB)/8, $0xffffffffffffffff +DATA ·polyClampMask<>+24(SB)/8, $0xffffffffffffffff +GLOBL ·polyClampMask<>(SB), RODATA|NOPTR, $32 + +DATA ·sseIncMask<>+0(SB)/8, $0x0000000000000001 +DATA ·sseIncMask<>+8(SB)/8, $0x0000000000000000 +GLOBL ·sseIncMask<>(SB), RODATA|NOPTR, $16 + +DATA ·andMask<>+0(SB)/8, $0x00000000000000ff +DATA ·andMask<>+8(SB)/8, $0x0000000000000000 +DATA ·andMask<>+16(SB)/8, $0x000000000000ffff +DATA ·andMask<>+24(SB)/8, $0x0000000000000000 +DATA ·andMask<>+32(SB)/8, $0x0000000000ffffff +DATA ·andMask<>+40(SB)/8, $0x0000000000000000 +DATA ·andMask<>+48(SB)/8, $0x00000000ffffffff +DATA ·andMask<>+56(SB)/8, $0x0000000000000000 +DATA ·andMask<>+64(SB)/8, $0x000000ffffffffff +DATA ·andMask<>+72(SB)/8, $0x0000000000000000 +DATA ·andMask<>+80(SB)/8, $0x0000ffffffffffff +DATA ·andMask<>+88(SB)/8, $0x0000000000000000 +DATA ·andMask<>+96(SB)/8, $0x00ffffffffffffff +DATA ·andMask<>+104(SB)/8, $0x0000000000000000 +DATA ·andMask<>+112(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+120(SB)/8, $0x0000000000000000 +DATA ·andMask<>+128(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+136(SB)/8, $0x00000000000000ff +DATA ·andMask<>+144(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+152(SB)/8, $0x000000000000ffff +DATA ·andMask<>+160(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+168(SB)/8, $0x0000000000ffffff +DATA ·andMask<>+176(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+184(SB)/8, $0x00000000ffffffff +DATA ·andMask<>+192(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+200(SB)/8, $0x000000ffffffffff +DATA ·andMask<>+208(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+216(SB)/8, $0x0000ffffffffffff +DATA ·andMask<>+224(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+232(SB)/8, $0x00ffffffffffffff +GLOBL ·andMask<>(SB), RODATA|NOPTR, $240 + +DATA ·avx2InitMask<>+0(SB)/8, $0x0000000000000000 +DATA ·avx2InitMask<>+8(SB)/8, $0x0000000000000000 +DATA ·avx2InitMask<>+16(SB)/8, $0x0000000000000001 +DATA ·avx2InitMask<>+24(SB)/8, $0x0000000000000000 +GLOBL ·avx2InitMask<>(SB), RODATA|NOPTR, $32 + +DATA ·rol16<>+0(SB)/8, $0x0504070601000302 +DATA ·rol16<>+8(SB)/8, $0x0d0c0f0e09080b0a +DATA ·rol16<>+16(SB)/8, $0x0504070601000302 +DATA ·rol16<>+24(SB)/8, $0x0d0c0f0e09080b0a +GLOBL ·rol16<>(SB), RODATA|NOPTR, $32 + +DATA ·rol8<>+0(SB)/8, $0x0605040702010003 +DATA ·rol8<>+8(SB)/8, $0x0e0d0c0f0a09080b +DATA ·rol8<>+16(SB)/8, $0x0605040702010003 +DATA ·rol8<>+24(SB)/8, $0x0e0d0c0f0a09080b +GLOBL ·rol8<>(SB), RODATA|NOPTR, $32 + +DATA ·avx2IncMask<>+0(SB)/8, $0x0000000000000002 +DATA ·avx2IncMask<>+8(SB)/8, $0x0000000000000000 +DATA ·avx2IncMask<>+16(SB)/8, $0x0000000000000002 +DATA ·avx2IncMask<>+24(SB)/8, $0x0000000000000000 +GLOBL ·avx2IncMask<>(SB), RODATA|NOPTR, $32 + +// func chacha20Poly1305Seal(dst []byte, key []uint32, src []byte, ad []byte) +// Requires: AVX, AVX2, BMI2, CMOV, SSE2 +TEXT ·chacha20Poly1305Seal(SB), $288-96 + MOVQ SP, BP + ADDQ $0x20, BP + ANDQ $-32, BP + MOVQ dst_base+0(FP), DI + MOVQ key_base+24(FP), R8 + MOVQ src_base+48(FP), SI + MOVQ src_len+56(FP), BX + MOVQ ad_base+72(FP), CX + CMPB ·useAVX2+0(SB), $0x01 + JE chacha20Poly1305Seal_AVX2 + + // Special optimization, for very short buffers + CMPQ BX, $0x80 + JBE sealSSE128 + + // In the seal case - prepare the poly key + 3 blocks of stream in the first iteration + MOVOU ·chacha20Constants<>+0(SB), X0 + MOVOU 16(R8), X3 + MOVOU 32(R8), X6 + MOVOU 48(R8), X9 + + // Store state on stack for future use + MOVO X3, 32(BP) + MOVO X6, 48(BP) + + // Load state, increment counter blocks + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X2, X12 + MOVO X5, X13 + MOVO X8, X14 + MOVO X11, X15 + PADDL ·sseIncMask<>+0(SB), X15 + + // Store counters + MOVO X9, 80(BP) + MOVO X10, 96(BP) + MOVO X11, 112(BP) + MOVO X15, 128(BP) + MOVQ $0x0000000a, R9 + +sealSSEIntroLoop: + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x0c + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x04 + DECQ R9 + JNE sealSSEIntroLoop + + // Add in the state + PADDD ·chacha20Constants<>+0(SB), X0 + PADDD ·chacha20Constants<>+0(SB), X1 + PADDD ·chacha20Constants<>+0(SB), X2 + PADDD ·chacha20Constants<>+0(SB), X12 + PADDD 32(BP), X3 + PADDD 32(BP), X4 + PADDD 32(BP), X5 + PADDD 32(BP), X13 + PADDD 48(BP), X7 + PADDD 48(BP), X8 + PADDD 48(BP), X14 + PADDD 96(BP), X10 + PADDD 112(BP), X11 + PADDD 128(BP), X15 + + // Clamp and store the key + PAND ·polyClampMask<>+0(SB), X0 + MOVO X0, (BP) + MOVO X3, 16(BP) + + // Hash AAD + MOVQ ad_len+80(FP), R9 + CALL polyHashADInternal<>(SB) + MOVOU (SI), X0 + MOVOU 16(SI), X3 + MOVOU 32(SI), X6 + MOVOU 48(SI), X9 + PXOR X0, X1 + PXOR X3, X4 + PXOR X6, X7 + PXOR X9, X10 + MOVOU X1, (DI) + MOVOU X4, 16(DI) + MOVOU X7, 32(DI) + MOVOU X10, 48(DI) + MOVOU 64(SI), X0 + MOVOU 80(SI), X3 + MOVOU 96(SI), X6 + MOVOU 112(SI), X9 + PXOR X0, X2 + PXOR X3, X5 + PXOR X6, X8 + PXOR X9, X11 + MOVOU X2, 64(DI) + MOVOU X5, 80(DI) + MOVOU X8, 96(DI) + MOVOU X11, 112(DI) + MOVQ $0x00000080, CX + SUBQ $0x80, BX + LEAQ 128(SI), SI + MOVO X12, X1 + MOVO X13, X4 + MOVO X14, X7 + MOVO X15, X10 + CMPQ BX, $0x40 + JBE sealSSE128SealHash + MOVOU (SI), X0 + MOVOU 16(SI), X3 + MOVOU 32(SI), X6 + MOVOU 48(SI), X9 + PXOR X0, X12 + PXOR X3, X13 + PXOR X6, X14 + PXOR X9, X15 + MOVOU X12, 128(DI) + MOVOU X13, 144(DI) + MOVOU X14, 160(DI) + MOVOU X15, 176(DI) + ADDQ $0x40, CX + SUBQ $0x40, BX + LEAQ 64(SI), SI + MOVQ $0x00000002, CX + MOVQ $0x00000008, R9 + CMPQ BX, $0x40 + JBE sealSSETail64 + CMPQ BX, $0x80 + JBE sealSSETail128 + CMPQ BX, $0xc0 + JBE sealSSETail192 + +sealSSEMainLoop: + // Load state, increment counter blocks + MOVO ·chacha20Constants<>+0(SB), X0 + MOVO 32(BP), X3 + MOVO 48(BP), X6 + MOVO 128(BP), X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X2, X12 + MOVO X5, X13 + MOVO X8, X14 + MOVO X11, X15 + PADDL ·sseIncMask<>+0(SB), X15 + + // Store counters + MOVO X9, 80(BP) + MOVO X10, 96(BP) + MOVO X11, 112(BP) + MOVO X15, 128(BP) + +sealSSEInnerLoop: + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x0c + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + LEAQ 16(DI), DI + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x04 + DECQ R9 + JGE sealSSEInnerLoop + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + DECQ CX + JG sealSSEInnerLoop + + // Add in the state + PADDD ·chacha20Constants<>+0(SB), X0 + PADDD ·chacha20Constants<>+0(SB), X1 + PADDD ·chacha20Constants<>+0(SB), X2 + PADDD ·chacha20Constants<>+0(SB), X12 + PADDD 32(BP), X3 + PADDD 32(BP), X4 + PADDD 32(BP), X5 + PADDD 32(BP), X13 + PADDD 48(BP), X6 + PADDD 48(BP), X7 + PADDD 48(BP), X8 + PADDD 48(BP), X14 + PADDD 80(BP), X9 + PADDD 96(BP), X10 + PADDD 112(BP), X11 + PADDD 128(BP), X15 + MOVO X15, 64(BP) + + // Load - xor - store + MOVOU (SI), X15 + PXOR X15, X0 + MOVOU 16(SI), X15 + PXOR X15, X3 + MOVOU 32(SI), X15 + PXOR X15, X6 + MOVOU 48(SI), X15 + PXOR X15, X9 + MOVOU X0, (DI) + MOVOU X3, 16(DI) + MOVOU X6, 32(DI) + MOVOU X9, 48(DI) + MOVO 64(BP), X15 + MOVOU 64(SI), X0 + MOVOU 80(SI), X3 + MOVOU 96(SI), X6 + MOVOU 112(SI), X9 + PXOR X0, X1 + PXOR X3, X4 + PXOR X6, X7 + PXOR X9, X10 + MOVOU X1, 64(DI) + MOVOU X4, 80(DI) + MOVOU X7, 96(DI) + MOVOU X10, 112(DI) + MOVOU 128(SI), X0 + MOVOU 144(SI), X3 + MOVOU 160(SI), X6 + MOVOU 176(SI), X9 + PXOR X0, X2 + PXOR X3, X5 + PXOR X6, X8 + PXOR X9, X11 + MOVOU X2, 128(DI) + MOVOU X5, 144(DI) + MOVOU X8, 160(DI) + MOVOU X11, 176(DI) + ADDQ $0xc0, SI + MOVQ $0x000000c0, CX + SUBQ $0xc0, BX + MOVO X12, X1 + MOVO X13, X4 + MOVO X14, X7 + MOVO X15, X10 + CMPQ BX, $0x40 + JBE sealSSE128SealHash + MOVOU (SI), X0 + MOVOU 16(SI), X3 + MOVOU 32(SI), X6 + MOVOU 48(SI), X9 + PXOR X0, X12 + PXOR X3, X13 + PXOR X6, X14 + PXOR X9, X15 + MOVOU X12, 192(DI) + MOVOU X13, 208(DI) + MOVOU X14, 224(DI) + MOVOU X15, 240(DI) + LEAQ 64(SI), SI + SUBQ $0x40, BX + MOVQ $0x00000006, CX + MOVQ $0x00000004, R9 + CMPQ BX, $0xc0 + JG sealSSEMainLoop + MOVQ BX, CX + TESTQ BX, BX + JE sealSSE128SealHash + MOVQ $0x00000006, CX + CMPQ BX, $0x40 + JBE sealSSETail64 + CMPQ BX, $0x80 + JBE sealSSETail128 + JMP sealSSETail192 + +sealSSETail64: + MOVO ·chacha20Constants<>+0(SB), X1 + MOVO 32(BP), X4 + MOVO 48(BP), X7 + MOVO 128(BP), X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X10, 80(BP) + +sealSSETail64LoopA: + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + +sealSSETail64LoopB: + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X13) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X13 + PSLLL $0x0c, X13 + PSRLL $0x14, X4 + PXOR X13, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X13) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X13 + PSLLL $0x07, X13 + PSRLL $0x19, X4 + PXOR X13, X4 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X13) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X13 + PSLLL $0x0c, X13 + PSRLL $0x14, X4 + PXOR X13, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X13) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X13 + PSLLL $0x07, X13 + PSRLL $0x19, X4 + PXOR X13, X4 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + DECQ CX + JG sealSSETail64LoopA + DECQ R9 + JGE sealSSETail64LoopB + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL 32(BP), X4 + PADDL 48(BP), X7 + PADDL 80(BP), X10 + JMP sealSSE128Seal + +sealSSETail128: + MOVO ·chacha20Constants<>+0(SB), X0 + MOVO 32(BP), X3 + MOVO 48(BP), X6 + MOVO 128(BP), X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X9, 80(BP) + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X10, 96(BP) + +sealSSETail128LoopA: + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + +sealSSETail128LoopB: + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + DECQ CX + JG sealSSETail128LoopA + DECQ R9 + JGE sealSSETail128LoopB + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL 32(BP), X3 + PADDL 32(BP), X4 + PADDL 48(BP), X6 + PADDL 48(BP), X7 + PADDL 80(BP), X9 + PADDL 96(BP), X10 + MOVOU (SI), X12 + MOVOU 16(SI), X13 + MOVOU 32(SI), X14 + MOVOU 48(SI), X15 + PXOR X12, X0 + PXOR X13, X3 + PXOR X14, X6 + PXOR X15, X9 + MOVOU X0, (DI) + MOVOU X3, 16(DI) + MOVOU X6, 32(DI) + MOVOU X9, 48(DI) + MOVQ $0x00000040, CX + LEAQ 64(SI), SI + SUBQ $0x40, BX + JMP sealSSE128SealHash + +sealSSETail192: + MOVO ·chacha20Constants<>+0(SB), X0 + MOVO 32(BP), X3 + MOVO 48(BP), X6 + MOVO 128(BP), X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X9, 80(BP) + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X10, 96(BP) + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X11, 112(BP) + +sealSSETail192LoopA: + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + +sealSSETail192LoopB: + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + DECQ CX + JG sealSSETail192LoopA + DECQ R9 + JGE sealSSETail192LoopB + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL ·chacha20Constants<>+0(SB), X2 + PADDL 32(BP), X3 + PADDL 32(BP), X4 + PADDL 32(BP), X5 + PADDL 48(BP), X6 + PADDL 48(BP), X7 + PADDL 48(BP), X8 + PADDL 80(BP), X9 + PADDL 96(BP), X10 + PADDL 112(BP), X11 + MOVOU (SI), X12 + MOVOU 16(SI), X13 + MOVOU 32(SI), X14 + MOVOU 48(SI), X15 + PXOR X12, X0 + PXOR X13, X3 + PXOR X14, X6 + PXOR X15, X9 + MOVOU X0, (DI) + MOVOU X3, 16(DI) + MOVOU X6, 32(DI) + MOVOU X9, 48(DI) + MOVOU 64(SI), X12 + MOVOU 80(SI), X13 + MOVOU 96(SI), X14 + MOVOU 112(SI), X15 + PXOR X12, X1 + PXOR X13, X4 + PXOR X14, X7 + PXOR X15, X10 + MOVOU X1, 64(DI) + MOVOU X4, 80(DI) + MOVOU X7, 96(DI) + MOVOU X10, 112(DI) + MOVO X2, X1 + MOVO X5, X4 + MOVO X8, X7 + MOVO X11, X10 + MOVQ $0x00000080, CX + LEAQ 128(SI), SI + SUBQ $0x80, BX + JMP sealSSE128SealHash + +sealSSE128: + MOVOU ·chacha20Constants<>+0(SB), X0 + MOVOU 16(R8), X3 + MOVOU 32(R8), X6 + MOVOU 48(R8), X9 + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X3, X13 + MOVO X6, X14 + MOVO X10, X15 + MOVQ $0x0000000a, R9 + +sealSSE128InnerCipherLoop: + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + DECQ R9 + JNE sealSSE128InnerCipherLoop + + // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL ·chacha20Constants<>+0(SB), X2 + PADDL X13, X3 + PADDL X13, X4 + PADDL X13, X5 + PADDL X14, X7 + PADDL X14, X8 + PADDL X15, X10 + PADDL ·sseIncMask<>+0(SB), X15 + PADDL X15, X11 + PAND ·polyClampMask<>+0(SB), X0 + MOVOU X0, (BP) + MOVOU X3, 16(BP) + + // Hash + MOVQ ad_len+80(FP), R9 + CALL polyHashADInternal<>(SB) + XORQ CX, CX + +sealSSE128SealHash: + CMPQ CX, $0x10 + JB sealSSE128Seal + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + SUBQ $0x10, CX + ADDQ $0x10, DI + JMP sealSSE128SealHash + +sealSSE128Seal: + CMPQ BX, $0x10 + JB sealSSETail + SUBQ $0x10, BX + + // Load for decryption + MOVOU (SI), X12 + PXOR X12, X1 + MOVOU X1, (DI) + LEAQ 16(SI), SI + LEAQ 16(DI), DI + + // Extract for hashing + MOVQ X1, R13 + PSRLDQ $0x08, X1 + MOVQ X1, R14 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + + // Shift the stream "left" + MOVO X4, X1 + MOVO X7, X4 + MOVO X10, X7 + MOVO X2, X10 + MOVO X5, X2 + MOVO X8, X5 + MOVO X11, X8 + JMP sealSSE128Seal + +sealSSETail: + TESTQ BX, BX + JE sealSSEFinalize + + // We can only load the PT one byte at a time to avoid read after end of buffer + MOVQ BX, R9 + SHLQ $0x04, R9 + LEAQ ·andMask<>+0(SB), R13 + MOVQ BX, CX + LEAQ -1(SI)(BX*1), SI + XORQ R15, R15 + XORQ R8, R8 + XORQ AX, AX + +sealSSETailLoadLoop: + SHLQ $0x08, R15, R8 + SHLQ $0x08, R15 + MOVB (SI), AX + XORQ AX, R15 + LEAQ -1(SI), SI + DECQ CX + JNE sealSSETailLoadLoop + MOVQ R15, 64(BP) + MOVQ R8, 72(BP) + PXOR 64(BP), X1 + MOVOU X1, (DI) + MOVOU -16(R13)(R9*1), X12 + PAND X12, X1 + MOVQ X1, R13 + PSRLDQ $0x08, X1 + MOVQ X1, R14 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + ADDQ BX, DI + +sealSSEFinalize: + // Hash in the buffer lengths + ADDQ ad_len+80(FP), R10 + ADCQ src_len+56(FP), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + + // Final reduce + MOVQ R10, R13 + MOVQ R11, R14 + MOVQ R12, R15 + SUBQ $-5, R10 + SBBQ $-1, R11 + SBBQ $0x03, R12 + CMOVQCS R13, R10 + CMOVQCS R14, R11 + CMOVQCS R15, R12 + + // Add in the "s" part of the key + ADDQ 16(BP), R10 + ADCQ 24(BP), R11 + + // Finally store the tag at the end of the message + MOVQ R10, (DI) + MOVQ R11, 8(DI) + RET + +chacha20Poly1305Seal_AVX2: + VZEROUPPER + VMOVDQU ·chacha20Constants<>+0(SB), Y0 + BYTE $0xc4 + BYTE $0x42 + BYTE $0x7d + BYTE $0x5a + BYTE $0x70 + BYTE $0x10 + BYTE $0xc4 + BYTE $0x42 + BYTE $0x7d + BYTE $0x5a + BYTE $0x60 + BYTE $0x20 + BYTE $0xc4 + BYTE $0xc2 + BYTE $0x7d + BYTE $0x5a + BYTE $0x60 + BYTE $0x30 + VPADDD ·avx2InitMask<>+0(SB), Y4, Y4 + + // Special optimizations, for very short buffers + CMPQ BX, $0x000000c0 + JBE seal192AVX2 + CMPQ BX, $0x00000140 + JBE seal320AVX2 + + // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA Y0, Y7 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA Y14, Y11 + VMOVDQA Y14, 32(BP) + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA Y12, Y15 + VMOVDQA Y12, 64(BP) + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y4, 96(BP) + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VMOVDQA Y1, 128(BP) + VPADDD ·avx2IncMask<>+0(SB), Y2, Y3 + VMOVDQA Y2, 160(BP) + VMOVDQA Y3, 192(BP) + MOVQ $0x0000000a, R9 + +sealAVX2IntroLoop: + VMOVDQA Y15, 224(BP) + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VMOVDQA 224(BP), Y15 + VMOVDQA Y13, 224(BP) + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x0c, Y11, Y13 + VPSRLD $0x14, Y11, Y11 + VPXOR Y13, Y11, Y11 + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x07, Y11, Y13 + VPSRLD $0x19, Y11, Y11 + VPXOR Y13, Y11, Y11 + VMOVDQA 224(BP), Y13 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPALIGNR $0x04, Y11, Y11, Y11 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x0c, Y3, Y3, Y3 + VMOVDQA Y15, 224(BP) + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VMOVDQA 224(BP), Y15 + VMOVDQA Y13, 224(BP) + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x0c, Y11, Y13 + VPSRLD $0x14, Y11, Y11 + VPXOR Y13, Y11, Y11 + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x07, Y11, Y13 + VPSRLD $0x19, Y11, Y11 + VPXOR Y13, Y11, Y11 + VMOVDQA 224(BP), Y13 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x04, Y2, Y2, Y2 + VPALIGNR $0x0c, Y11, Y11, Y11 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x04, Y3, Y3, Y3 + DECQ R9 + JNE sealAVX2IntroLoop + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD ·chacha20Constants<>+0(SB), Y7, Y7 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 32(BP), Y11, Y11 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD 64(BP), Y15, Y15 + VPADDD 96(BP), Y4, Y4 + VPADDD 128(BP), Y1, Y1 + VPADDD 160(BP), Y2, Y2 + VPADDD 192(BP), Y3, Y3 + VPERM2I128 $0x13, Y12, Y4, Y12 + VPERM2I128 $0x02, Y0, Y14, Y4 + VPERM2I128 $0x13, Y0, Y14, Y0 + + // Clamp and store poly key + VPAND ·polyClampMask<>+0(SB), Y4, Y4 + VMOVDQA Y4, (BP) + + // Hash AD + MOVQ ad_len+80(FP), R9 + CALL polyHashADInternal<>(SB) + + // Can store at least 320 bytes + VPXOR (SI), Y0, Y0 + VPXOR 32(SI), Y12, Y12 + VMOVDQU Y0, (DI) + VMOVDQU Y12, 32(DI) + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + VPXOR 64(SI), Y0, Y0 + VPXOR 96(SI), Y14, Y14 + VPXOR 128(SI), Y12, Y12 + VPXOR 160(SI), Y4, Y4 + VMOVDQU Y0, 64(DI) + VMOVDQU Y14, 96(DI) + VMOVDQU Y12, 128(DI) + VMOVDQU Y4, 160(DI) + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + VPXOR 192(SI), Y0, Y0 + VPXOR 224(SI), Y14, Y14 + VPXOR 256(SI), Y12, Y12 + VPXOR 288(SI), Y4, Y4 + VMOVDQU Y0, 192(DI) + VMOVDQU Y14, 224(DI) + VMOVDQU Y12, 256(DI) + VMOVDQU Y4, 288(DI) + MOVQ $0x00000140, CX + SUBQ $0x00000140, BX + LEAQ 320(SI), SI + VPERM2I128 $0x02, Y7, Y11, Y0 + VPERM2I128 $0x02, Y15, Y3, Y14 + VPERM2I128 $0x13, Y7, Y11, Y12 + VPERM2I128 $0x13, Y15, Y3, Y4 + CMPQ BX, $0x80 + JBE sealAVX2SealHash + VPXOR (SI), Y0, Y0 + VPXOR 32(SI), Y14, Y14 + VPXOR 64(SI), Y12, Y12 + VPXOR 96(SI), Y4, Y4 + VMOVDQU Y0, 320(DI) + VMOVDQU Y14, 352(DI) + VMOVDQU Y12, 384(DI) + VMOVDQU Y4, 416(DI) + SUBQ $0x80, BX + LEAQ 128(SI), SI + MOVQ $0x00000008, CX + MOVQ $0x00000002, R9 + CMPQ BX, $0x80 + JBE sealAVX2Tail128 + CMPQ BX, $0x00000100 + JBE sealAVX2Tail256 + CMPQ BX, $0x00000180 + JBE sealAVX2Tail384 + CMPQ BX, $0x00000200 + JBE sealAVX2Tail512 + + // We have 448 bytes to hash, but main loop hashes 512 bytes at a time - perform some rounds, before the main loop + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA Y0, Y7 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA Y14, Y11 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA Y12, Y15 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VPADDD ·avx2IncMask<>+0(SB), Y2, Y3 + VMOVDQA Y4, 96(BP) + VMOVDQA Y1, 128(BP) + VMOVDQA Y2, 160(BP) + VMOVDQA Y3, 192(BP) + VMOVDQA Y15, 224(BP) + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VMOVDQA 224(BP), Y15 + VMOVDQA Y13, 224(BP) + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x0c, Y11, Y13 + VPSRLD $0x14, Y11, Y11 + VPXOR Y13, Y11, Y11 + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x07, Y11, Y13 + VPSRLD $0x19, Y11, Y11 + VPXOR Y13, Y11, Y11 + VMOVDQA 224(BP), Y13 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPALIGNR $0x04, Y11, Y11, Y11 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x0c, Y3, Y3, Y3 + VMOVDQA Y15, 224(BP) + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VMOVDQA 224(BP), Y15 + VMOVDQA Y13, 224(BP) + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x0c, Y11, Y13 + VPSRLD $0x14, Y11, Y11 + VPXOR Y13, Y11, Y11 + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x07, Y11, Y13 + VPSRLD $0x19, Y11, Y11 + VPXOR Y13, Y11, Y11 + VMOVDQA 224(BP), Y13 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x04, Y2, Y2, Y2 + VPALIGNR $0x0c, Y11, Y11, Y11 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x04, Y3, Y3, Y3 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + SUBQ $0x10, DI + MOVQ $0x00000009, CX + JMP sealAVX2InternalLoopStart + +sealAVX2MainLoop: + VMOVDQU ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA Y0, Y7 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA Y14, Y11 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA Y12, Y15 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VPADDD ·avx2IncMask<>+0(SB), Y2, Y3 + VMOVDQA Y4, 96(BP) + VMOVDQA Y1, 128(BP) + VMOVDQA Y2, 160(BP) + VMOVDQA Y3, 192(BP) + MOVQ $0x0000000a, CX + +sealAVX2InternalLoop: + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + +sealAVX2InternalLoopStart: + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x04, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPALIGNR $0x0c, Y3, Y3, Y3 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + ADDQ 32(DI), R10 + ADCQ 40(DI), R11 + ADCQ $0x01, R12 + LEAQ 48(DI), DI + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x0c, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + VPALIGNR $0x04, Y3, Y3, Y3 + DECQ CX + JNE sealAVX2InternalLoop + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD ·chacha20Constants<>+0(SB), Y7, Y7 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 32(BP), Y11, Y11 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD 64(BP), Y15, Y15 + VPADDD 96(BP), Y4, Y4 + VPADDD 128(BP), Y1, Y1 + VPADDD 160(BP), Y2, Y2 + VPADDD 192(BP), Y3, Y3 + VMOVDQA Y15, 224(BP) + + // We only hashed 480 of the 512 bytes available - hash the remaining 32 here + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI + VPERM2I128 $0x02, Y0, Y14, Y15 + VPERM2I128 $0x13, Y0, Y14, Y14 + VPERM2I128 $0x02, Y12, Y4, Y0 + VPERM2I128 $0x13, Y12, Y4, Y12 + VPXOR (SI), Y15, Y15 + VPXOR 32(SI), Y0, Y0 + VPXOR 64(SI), Y14, Y14 + VPXOR 96(SI), Y12, Y12 + VMOVDQU Y15, (DI) + VMOVDQU Y0, 32(DI) + VMOVDQU Y14, 64(DI) + VMOVDQU Y12, 96(DI) + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + VPXOR 128(SI), Y0, Y0 + VPXOR 160(SI), Y14, Y14 + VPXOR 192(SI), Y12, Y12 + VPXOR 224(SI), Y4, Y4 + VMOVDQU Y0, 128(DI) + VMOVDQU Y14, 160(DI) + VMOVDQU Y12, 192(DI) + VMOVDQU Y4, 224(DI) + + // and here + ADDQ -16(DI), R10 + ADCQ -8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + VPXOR 256(SI), Y0, Y0 + VPXOR 288(SI), Y14, Y14 + VPXOR 320(SI), Y12, Y12 + VPXOR 352(SI), Y4, Y4 + VMOVDQU Y0, 256(DI) + VMOVDQU Y14, 288(DI) + VMOVDQU Y12, 320(DI) + VMOVDQU Y4, 352(DI) + VPERM2I128 $0x02, Y7, Y11, Y0 + VPERM2I128 $0x02, 224(BP), Y3, Y14 + VPERM2I128 $0x13, Y7, Y11, Y12 + VPERM2I128 $0x13, 224(BP), Y3, Y4 + VPXOR 384(SI), Y0, Y0 + VPXOR 416(SI), Y14, Y14 + VPXOR 448(SI), Y12, Y12 + VPXOR 480(SI), Y4, Y4 + VMOVDQU Y0, 384(DI) + VMOVDQU Y14, 416(DI) + VMOVDQU Y12, 448(DI) + VMOVDQU Y4, 480(DI) + LEAQ 512(SI), SI + SUBQ $0x00000200, BX + CMPQ BX, $0x00000200 + JG sealAVX2MainLoop + + // Tail can only hash 480 bytes + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI + MOVQ $0x0000000a, CX + MOVQ $0x00000000, R9 + CMPQ BX, $0x80 + JBE sealAVX2Tail128 + CMPQ BX, $0x00000100 + JBE sealAVX2Tail256 + CMPQ BX, $0x00000180 + JBE sealAVX2Tail384 + JMP sealAVX2Tail512 + +seal192AVX2: + VMOVDQA Y0, Y5 + VMOVDQA Y14, Y9 + VMOVDQA Y12, Y13 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y0, Y6 + VMOVDQA Y14, Y10 + VMOVDQA Y12, Y8 + VMOVDQA Y4, Y2 + VMOVDQA Y1, Y15 + MOVQ $0x0000000a, R9 + +sealAVX2192InnerCipherLoop: + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + DECQ R9 + JNE sealAVX2192InnerCipherLoop + VPADDD Y6, Y0, Y0 + VPADDD Y6, Y5, Y5 + VPADDD Y10, Y14, Y14 + VPADDD Y10, Y9, Y9 + VPADDD Y8, Y12, Y12 + VPADDD Y8, Y13, Y13 + VPADDD Y2, Y4, Y4 + VPADDD Y15, Y1, Y1 + VPERM2I128 $0x02, Y0, Y14, Y3 + + // Clamp and store poly key + VPAND ·polyClampMask<>+0(SB), Y3, Y3 + VMOVDQA Y3, (BP) + + // Stream for up to 192 bytes + VPERM2I128 $0x13, Y0, Y14, Y0 + VPERM2I128 $0x13, Y12, Y4, Y14 + VPERM2I128 $0x02, Y5, Y9, Y12 + VPERM2I128 $0x02, Y13, Y1, Y4 + VPERM2I128 $0x13, Y5, Y9, Y5 + VPERM2I128 $0x13, Y13, Y1, Y9 + +sealAVX2ShortSeal: + // Hash aad + MOVQ ad_len+80(FP), R9 + CALL polyHashADInternal<>(SB) + XORQ CX, CX + +sealAVX2SealHash: + // itr1 holds the number of bytes encrypted but not yet hashed + CMPQ CX, $0x10 + JB sealAVX2ShortSealLoop + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + SUBQ $0x10, CX + ADDQ $0x10, DI + JMP sealAVX2SealHash + +sealAVX2ShortSealLoop: + CMPQ BX, $0x20 + JB sealAVX2ShortTail32 + SUBQ $0x20, BX + + // Load for encryption + VPXOR (SI), Y0, Y0 + VMOVDQU Y0, (DI) + LEAQ 32(SI), SI + + // Now can hash + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI + + // Shift stream left + VMOVDQA Y14, Y0 + VMOVDQA Y12, Y14 + VMOVDQA Y4, Y12 + VMOVDQA Y5, Y4 + VMOVDQA Y9, Y5 + VMOVDQA Y13, Y9 + VMOVDQA Y1, Y13 + VMOVDQA Y6, Y1 + VMOVDQA Y10, Y6 + JMP sealAVX2ShortSealLoop + +sealAVX2ShortTail32: + CMPQ BX, $0x10 + VMOVDQA X0, X1 + JB sealAVX2ShortDone + SUBQ $0x10, BX + + // Load for encryption + VPXOR (SI), X0, X12 + VMOVDQU X12, (DI) + LEAQ 16(SI), SI + + // Hash + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + VPERM2I128 $0x11, Y0, Y0, Y0 + VMOVDQA X0, X1 + +sealAVX2ShortDone: + VZEROUPPER + JMP sealSSETail + +seal320AVX2: + VMOVDQA Y0, Y5 + VMOVDQA Y14, Y9 + VMOVDQA Y12, Y13 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y0, Y6 + VMOVDQA Y14, Y10 + VMOVDQA Y12, Y8 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VMOVDQA Y14, Y7 + VMOVDQA Y12, Y11 + VMOVDQA Y4, Y15 + MOVQ $0x0000000a, R9 + +sealAVX2320InnerCipherLoop: + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + DECQ R9 + JNE sealAVX2320InnerCipherLoop + VMOVDQA ·chacha20Constants<>+0(SB), Y3 + VPADDD Y3, Y0, Y0 + VPADDD Y3, Y5, Y5 + VPADDD Y3, Y6, Y6 + VPADDD Y7, Y14, Y14 + VPADDD Y7, Y9, Y9 + VPADDD Y7, Y10, Y10 + VPADDD Y11, Y12, Y12 + VPADDD Y11, Y13, Y13 + VPADDD Y11, Y8, Y8 + VMOVDQA ·avx2IncMask<>+0(SB), Y3 + VPADDD Y15, Y4, Y4 + VPADDD Y3, Y15, Y15 + VPADDD Y15, Y1, Y1 + VPADDD Y3, Y15, Y15 + VPADDD Y15, Y2, Y2 + + // Clamp and store poly key + VPERM2I128 $0x02, Y0, Y14, Y3 + VPAND ·polyClampMask<>+0(SB), Y3, Y3 + VMOVDQA Y3, (BP) + + // Stream for up to 320 bytes + VPERM2I128 $0x13, Y0, Y14, Y0 + VPERM2I128 $0x13, Y12, Y4, Y14 + VPERM2I128 $0x02, Y5, Y9, Y12 + VPERM2I128 $0x02, Y13, Y1, Y4 + VPERM2I128 $0x13, Y5, Y9, Y5 + VPERM2I128 $0x13, Y13, Y1, Y9 + VPERM2I128 $0x02, Y6, Y10, Y13 + VPERM2I128 $0x02, Y8, Y2, Y1 + VPERM2I128 $0x13, Y6, Y10, Y6 + VPERM2I128 $0x13, Y8, Y2, Y10 + JMP sealAVX2ShortSeal + +sealAVX2Tail128: + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA 32(BP), Y14 + VMOVDQA 64(BP), Y12 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VMOVDQA Y4, Y1 + +sealAVX2Tail128LoopA: + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + +sealAVX2Tail128LoopB: + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x04, Y4, Y4, Y4 + DECQ CX + JG sealAVX2Tail128LoopA + DECQ R9 + JGE sealAVX2Tail128LoopB + VPADDD ·chacha20Constants<>+0(SB), Y0, Y5 + VPADDD 32(BP), Y14, Y9 + VPADDD 64(BP), Y12, Y13 + VPADDD Y1, Y4, Y1 + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + JMP sealAVX2ShortSealLoop + +sealAVX2Tail256: + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA ·chacha20Constants<>+0(SB), Y5 + VMOVDQA 32(BP), Y14 + VMOVDQA 32(BP), Y9 + VMOVDQA 64(BP), Y12 + VMOVDQA 64(BP), Y13 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y4, Y7 + VMOVDQA Y1, Y11 + +sealAVX2Tail256LoopA: + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + +sealAVX2Tail256LoopB: + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + DECQ CX + JG sealAVX2Tail256LoopA + DECQ R9 + JGE sealAVX2Tail256LoopB + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD Y7, Y4, Y4 + VPADDD Y11, Y1, Y1 + VPERM2I128 $0x02, Y0, Y14, Y3 + VPERM2I128 $0x02, Y12, Y4, Y7 + VPERM2I128 $0x13, Y0, Y14, Y11 + VPERM2I128 $0x13, Y12, Y4, Y15 + VPXOR (SI), Y3, Y3 + VPXOR 32(SI), Y7, Y7 + VPXOR 64(SI), Y11, Y11 + VPXOR 96(SI), Y15, Y15 + VMOVDQU Y3, (DI) + VMOVDQU Y7, 32(DI) + VMOVDQU Y11, 64(DI) + VMOVDQU Y15, 96(DI) + MOVQ $0x00000080, CX + LEAQ 128(SI), SI + SUBQ $0x80, BX + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + JMP sealAVX2SealHash + +sealAVX2Tail384: + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VMOVDQA Y4, Y7 + VMOVDQA Y1, Y11 + VMOVDQA Y2, Y15 + +sealAVX2Tail384LoopA: + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + +sealAVX2Tail384LoopB: + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + DECQ CX + JG sealAVX2Tail384LoopA + DECQ R9 + JGE sealAVX2Tail384LoopB + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD Y7, Y4, Y4 + VPADDD Y11, Y1, Y1 + VPADDD Y15, Y2, Y2 + VPERM2I128 $0x02, Y0, Y14, Y3 + VPERM2I128 $0x02, Y12, Y4, Y7 + VPERM2I128 $0x13, Y0, Y14, Y11 + VPERM2I128 $0x13, Y12, Y4, Y15 + VPXOR (SI), Y3, Y3 + VPXOR 32(SI), Y7, Y7 + VPXOR 64(SI), Y11, Y11 + VPXOR 96(SI), Y15, Y15 + VMOVDQU Y3, (DI) + VMOVDQU Y7, 32(DI) + VMOVDQU Y11, 64(DI) + VMOVDQU Y15, 96(DI) + VPERM2I128 $0x02, Y5, Y9, Y3 + VPERM2I128 $0x02, Y13, Y1, Y7 + VPERM2I128 $0x13, Y5, Y9, Y11 + VPERM2I128 $0x13, Y13, Y1, Y15 + VPXOR 128(SI), Y3, Y3 + VPXOR 160(SI), Y7, Y7 + VPXOR 192(SI), Y11, Y11 + VPXOR 224(SI), Y15, Y15 + VMOVDQU Y3, 128(DI) + VMOVDQU Y7, 160(DI) + VMOVDQU Y11, 192(DI) + VMOVDQU Y15, 224(DI) + MOVQ $0x00000100, CX + LEAQ 256(SI), SI + SUBQ $0x00000100, BX + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + JMP sealAVX2SealHash + +sealAVX2Tail512: + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA Y0, Y7 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA Y14, Y11 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA Y12, Y15 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VPADDD ·avx2IncMask<>+0(SB), Y2, Y3 + VMOVDQA Y4, 96(BP) + VMOVDQA Y1, 128(BP) + VMOVDQA Y2, 160(BP) + VMOVDQA Y3, 192(BP) + +sealAVX2Tail512LoopA: + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + +sealAVX2Tail512LoopB: + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x04, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPALIGNR $0x0c, Y3, Y3, Y3 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x0c, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + VPALIGNR $0x04, Y3, Y3, Y3 + DECQ CX + JG sealAVX2Tail512LoopA + DECQ R9 + JGE sealAVX2Tail512LoopB + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD ·chacha20Constants<>+0(SB), Y7, Y7 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 32(BP), Y11, Y11 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD 64(BP), Y15, Y15 + VPADDD 96(BP), Y4, Y4 + VPADDD 128(BP), Y1, Y1 + VPADDD 160(BP), Y2, Y2 + VPADDD 192(BP), Y3, Y3 + VMOVDQA Y15, 224(BP) + VPERM2I128 $0x02, Y0, Y14, Y15 + VPXOR (SI), Y15, Y15 + VMOVDQU Y15, (DI) + VPERM2I128 $0x02, Y12, Y4, Y15 + VPXOR 32(SI), Y15, Y15 + VMOVDQU Y15, 32(DI) + VPERM2I128 $0x13, Y0, Y14, Y15 + VPXOR 64(SI), Y15, Y15 + VMOVDQU Y15, 64(DI) + VPERM2I128 $0x13, Y12, Y4, Y15 + VPXOR 96(SI), Y15, Y15 + VMOVDQU Y15, 96(DI) + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + VPXOR 128(SI), Y0, Y0 + VPXOR 160(SI), Y14, Y14 + VPXOR 192(SI), Y12, Y12 + VPXOR 224(SI), Y4, Y4 + VMOVDQU Y0, 128(DI) + VMOVDQU Y14, 160(DI) + VMOVDQU Y12, 192(DI) + VMOVDQU Y4, 224(DI) + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + VPXOR 256(SI), Y0, Y0 + VPXOR 288(SI), Y14, Y14 + VPXOR 320(SI), Y12, Y12 + VPXOR 352(SI), Y4, Y4 + VMOVDQU Y0, 256(DI) + VMOVDQU Y14, 288(DI) + VMOVDQU Y12, 320(DI) + VMOVDQU Y4, 352(DI) + MOVQ $0x00000180, CX + LEAQ 384(SI), SI + SUBQ $0x00000180, BX + VPERM2I128 $0x02, Y7, Y11, Y0 + VPERM2I128 $0x02, 224(BP), Y3, Y14 + VPERM2I128 $0x13, Y7, Y11, Y12 + VPERM2I128 $0x13, 224(BP), Y3, Y4 + JMP sealAVX2SealHash diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go new file mode 100644 index 000000000..6313898f0 --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go @@ -0,0 +1,81 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package chacha20poly1305 + +import ( + "encoding/binary" + + "golang.org/x/crypto/chacha20" + "golang.org/x/crypto/internal/alias" + "golang.org/x/crypto/internal/poly1305" +) + +func writeWithPadding(p *poly1305.MAC, b []byte) { + p.Write(b) + if rem := len(b) % 16; rem != 0 { + var buf [16]byte + padLen := 16 - rem + p.Write(buf[:padLen]) + } +} + +func writeUint64(p *poly1305.MAC, n int) { + var buf [8]byte + binary.LittleEndian.PutUint64(buf[:], uint64(n)) + p.Write(buf[:]) +} + +func (c *chacha20poly1305) sealGeneric(dst, nonce, plaintext, additionalData []byte) []byte { + ret, out := sliceForAppend(dst, len(plaintext)+poly1305.TagSize) + ciphertext, tag := out[:len(plaintext)], out[len(plaintext):] + if alias.InexactOverlap(out, plaintext) { + panic("chacha20poly1305: invalid buffer overlap") + } + + var polyKey [32]byte + s, _ := chacha20.NewUnauthenticatedCipher(c.key[:], nonce) + s.XORKeyStream(polyKey[:], polyKey[:]) + s.SetCounter(1) // set the counter to 1, skipping 32 bytes + s.XORKeyStream(ciphertext, plaintext) + + p := poly1305.New(&polyKey) + writeWithPadding(p, additionalData) + writeWithPadding(p, ciphertext) + writeUint64(p, len(additionalData)) + writeUint64(p, len(plaintext)) + p.Sum(tag[:0]) + + return ret +} + +func (c *chacha20poly1305) openGeneric(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + tag := ciphertext[len(ciphertext)-16:] + ciphertext = ciphertext[:len(ciphertext)-16] + + var polyKey [32]byte + s, _ := chacha20.NewUnauthenticatedCipher(c.key[:], nonce) + s.XORKeyStream(polyKey[:], polyKey[:]) + s.SetCounter(1) // set the counter to 1, skipping 32 bytes + + p := poly1305.New(&polyKey) + writeWithPadding(p, additionalData) + writeWithPadding(p, ciphertext) + writeUint64(p, len(additionalData)) + writeUint64(p, len(ciphertext)) + + ret, out := sliceForAppend(dst, len(ciphertext)) + if alias.InexactOverlap(out, ciphertext) { + panic("chacha20poly1305: invalid buffer overlap") + } + if !p.Verify(tag) { + for i := range out { + out[i] = 0 + } + return nil, errOpen + } + + s.XORKeyStream(out, ciphertext) + return ret, nil +} diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go new file mode 100644 index 000000000..34e6ab1df --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || !gc || purego + +package chacha20poly1305 + +func (c *chacha20poly1305) seal(dst, nonce, plaintext, additionalData []byte) []byte { + return c.sealGeneric(dst, nonce, plaintext, additionalData) +} + +func (c *chacha20poly1305) open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + return c.openGeneric(dst, nonce, ciphertext, additionalData) +} diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go b/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go new file mode 100644 index 000000000..1cebfe946 --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go @@ -0,0 +1,86 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package chacha20poly1305 + +import ( + "crypto/cipher" + "errors" + + "golang.org/x/crypto/chacha20" +) + +type xchacha20poly1305 struct { + key [KeySize]byte +} + +// NewX returns a XChaCha20-Poly1305 AEAD that uses the given 256-bit key. +// +// XChaCha20-Poly1305 is a ChaCha20-Poly1305 variant that takes a longer nonce, +// suitable to be generated randomly without risk of collisions. It should be +// preferred when nonce uniqueness cannot be trivially ensured, or whenever +// nonces are randomly generated. +func NewX(key []byte) (cipher.AEAD, error) { + if len(key) != KeySize { + return nil, errors.New("chacha20poly1305: bad key length") + } + ret := new(xchacha20poly1305) + copy(ret.key[:], key) + return ret, nil +} + +func (*xchacha20poly1305) NonceSize() int { + return NonceSizeX +} + +func (*xchacha20poly1305) Overhead() int { + return Overhead +} + +func (x *xchacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte { + if len(nonce) != NonceSizeX { + panic("chacha20poly1305: bad nonce length passed to Seal") + } + + // XChaCha20-Poly1305 technically supports a 64-bit counter, so there is no + // size limit. However, since we reuse the ChaCha20-Poly1305 implementation, + // the second half of the counter is not available. This is unlikely to be + // an issue because the cipher.AEAD API requires the entire message to be in + // memory, and the counter overflows at 256 GB. + if uint64(len(plaintext)) > (1<<38)-64 { + panic("chacha20poly1305: plaintext too large") + } + + c := new(chacha20poly1305) + hKey, _ := chacha20.HChaCha20(x.key[:], nonce[0:16]) + copy(c.key[:], hKey) + + // The first 4 bytes of the final nonce are unused counter space. + cNonce := make([]byte, NonceSize) + copy(cNonce[4:12], nonce[16:24]) + + return c.seal(dst, cNonce[:], plaintext, additionalData) +} + +func (x *xchacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + if len(nonce) != NonceSizeX { + panic("chacha20poly1305: bad nonce length passed to Open") + } + if len(ciphertext) < 16 { + return nil, errOpen + } + if uint64(len(ciphertext)) > (1<<38)-48 { + panic("chacha20poly1305: ciphertext too large") + } + + c := new(chacha20poly1305) + hKey, _ := chacha20.HChaCha20(x.key[:], nonce[0:16]) + copy(c.key[:], hKey) + + // The first 4 bytes of the final nonce are unused counter space. + cNonce := make([]byte, NonceSize) + copy(cNonce[4:12], nonce[16:24]) + + return c.open(dst, cNonce[:], ciphertext, additionalData) +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1.go new file mode 100644 index 000000000..2492f796a --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1.go @@ -0,0 +1,825 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + encoding_asn1 "encoding/asn1" + "fmt" + "math/big" + "reflect" + "time" + + "golang.org/x/crypto/cryptobyte/asn1" +) + +// This file contains ASN.1-related methods for String and Builder. + +// Builder + +// AddASN1Int64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Int64(v int64) { + b.addASN1Signed(asn1.INTEGER, v) +} + +// AddASN1Int64WithTag appends a DER-encoded ASN.1 INTEGER with the +// given tag. +func (b *Builder) AddASN1Int64WithTag(v int64, tag asn1.Tag) { + b.addASN1Signed(tag, v) +} + +// AddASN1Enum appends a DER-encoded ASN.1 ENUMERATION. +func (b *Builder) AddASN1Enum(v int64) { + b.addASN1Signed(asn1.ENUM, v) +} + +func (b *Builder) addASN1Signed(tag asn1.Tag, v int64) { + b.AddASN1(tag, func(c *Builder) { + length := 1 + for i := v; i >= 0x80 || i < -0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1Uint64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Uint64(v uint64) { + b.AddASN1(asn1.INTEGER, func(c *Builder) { + length := 1 + for i := v; i >= 0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1BigInt appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1BigInt(n *big.Int) { + if b.err != nil { + return + } + + b.AddASN1(asn1.INTEGER, func(c *Builder) { + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement form. So we + // invert and subtract 1. If the most-significant-bit isn't set then + // we'll need to pad the beginning with 0xff in order to keep the number + // negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if len(bytes) == 0 || bytes[0]&0x80 == 0 { + c.add(0xff) + } + c.add(bytes...) + } else if n.Sign() == 0 { + c.add(0) + } else { + bytes := n.Bytes() + if bytes[0]&0x80 != 0 { + c.add(0) + } + c.add(bytes...) + } + }) +} + +// AddASN1OctetString appends a DER-encoded ASN.1 OCTET STRING. +func (b *Builder) AddASN1OctetString(bytes []byte) { + b.AddASN1(asn1.OCTET_STRING, func(c *Builder) { + c.AddBytes(bytes) + }) +} + +const generalizedTimeFormatStr = "20060102150405Z0700" + +// AddASN1GeneralizedTime appends a DER-encoded ASN.1 GENERALIZEDTIME. +func (b *Builder) AddASN1GeneralizedTime(t time.Time) { + if t.Year() < 0 || t.Year() > 9999 { + b.err = fmt.Errorf("cryptobyte: cannot represent %v as a GeneralizedTime", t) + return + } + b.AddASN1(asn1.GeneralizedTime, func(c *Builder) { + c.AddBytes([]byte(t.Format(generalizedTimeFormatStr))) + }) +} + +// AddASN1UTCTime appends a DER-encoded ASN.1 UTCTime. +func (b *Builder) AddASN1UTCTime(t time.Time) { + b.AddASN1(asn1.UTCTime, func(c *Builder) { + // As utilized by the X.509 profile, UTCTime can only + // represent the years 1950 through 2049. + if t.Year() < 1950 || t.Year() >= 2050 { + b.err = fmt.Errorf("cryptobyte: cannot represent %v as a UTCTime", t) + return + } + c.AddBytes([]byte(t.Format(defaultUTCTimeFormatStr))) + }) +} + +// AddASN1BitString appends a DER-encoded ASN.1 BIT STRING. This does not +// support BIT STRINGs that are not a whole number of bytes. +func (b *Builder) AddASN1BitString(data []byte) { + b.AddASN1(asn1.BIT_STRING, func(b *Builder) { + b.AddUint8(0) + b.AddBytes(data) + }) +} + +func (b *Builder) addBase128Int(n int64) { + var length int + if n == 0 { + length = 1 + } else { + for i := n; i > 0; i >>= 7 { + length++ + } + } + + for i := length - 1; i >= 0; i-- { + o := byte(n >> uint(i*7)) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + + b.add(o) + } +} + +func isValidOID(oid encoding_asn1.ObjectIdentifier) bool { + if len(oid) < 2 { + return false + } + + if oid[0] > 2 || (oid[0] <= 1 && oid[1] >= 40) { + return false + } + + for _, v := range oid { + if v < 0 { + return false + } + } + + return true +} + +func (b *Builder) AddASN1ObjectIdentifier(oid encoding_asn1.ObjectIdentifier) { + b.AddASN1(asn1.OBJECT_IDENTIFIER, func(b *Builder) { + if !isValidOID(oid) { + b.err = fmt.Errorf("cryptobyte: invalid OID: %v", oid) + return + } + + b.addBase128Int(int64(oid[0])*40 + int64(oid[1])) + for _, v := range oid[2:] { + b.addBase128Int(int64(v)) + } + }) +} + +func (b *Builder) AddASN1Boolean(v bool) { + b.AddASN1(asn1.BOOLEAN, func(b *Builder) { + if v { + b.AddUint8(0xff) + } else { + b.AddUint8(0) + } + }) +} + +func (b *Builder) AddASN1NULL() { + b.add(uint8(asn1.NULL), 0) +} + +// MarshalASN1 calls encoding_asn1.Marshal on its input and appends the result if +// successful or records an error if one occurred. +func (b *Builder) MarshalASN1(v interface{}) { + // NOTE(martinkr): This is somewhat of a hack to allow propagation of + // encoding_asn1.Marshal errors into Builder.err. N.B. if you call MarshalASN1 with a + // value embedded into a struct, its tag information is lost. + if b.err != nil { + return + } + bytes, err := encoding_asn1.Marshal(v) + if err != nil { + b.err = err + return + } + b.AddBytes(bytes) +} + +// AddASN1 appends an ASN.1 object. The object is prefixed with the given tag. +// Tags greater than 30 are not supported and result in an error (i.e. +// low-tag-number form only). The child builder passed to the +// BuilderContinuation can be used to build the content of the ASN.1 object. +func (b *Builder) AddASN1(tag asn1.Tag, f BuilderContinuation) { + if b.err != nil { + return + } + // Identifiers with the low five bits set indicate high-tag-number format + // (two or more octets), which we don't support. + if tag&0x1f == 0x1f { + b.err = fmt.Errorf("cryptobyte: high-tag number identifier octects not supported: 0x%x", tag) + return + } + b.AddUint8(uint8(tag)) + b.addLengthPrefixed(1, true, f) +} + +// String + +// ReadASN1Boolean decodes an ASN.1 BOOLEAN and converts it to a boolean +// representation into out and advances. It reports whether the read +// was successful. +func (s *String) ReadASN1Boolean(out *bool) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.BOOLEAN) || len(bytes) != 1 { + return false + } + + switch bytes[0] { + case 0: + *out = false + case 0xff: + *out = true + default: + return false + } + + return true +} + +// ReadASN1Integer decodes an ASN.1 INTEGER into out and advances. If out does +// not point to an integer, to a big.Int, or to a []byte it panics. Only +// positive and zero values can be decoded into []byte, and they are returned as +// big-endian binary values that share memory with s. Positive values will have +// no leading zeroes, and zero will be returned as a single zero byte. +// ReadASN1Integer reports whether the read was successful. +func (s *String) ReadASN1Integer(out interface{}) bool { + switch out := out.(type) { + case *int, *int8, *int16, *int32, *int64: + var i int64 + if !s.readASN1Int64(&i) || reflect.ValueOf(out).Elem().OverflowInt(i) { + return false + } + reflect.ValueOf(out).Elem().SetInt(i) + return true + case *uint, *uint8, *uint16, *uint32, *uint64: + var u uint64 + if !s.readASN1Uint64(&u) || reflect.ValueOf(out).Elem().OverflowUint(u) { + return false + } + reflect.ValueOf(out).Elem().SetUint(u) + return true + case *big.Int: + return s.readASN1BigInt(out) + case *[]byte: + return s.readASN1Bytes(out) + default: + panic("out does not point to an integer type") + } +} + +func checkASN1Integer(bytes []byte) bool { + if len(bytes) == 0 { + // An INTEGER is encoded with at least one octet. + return false + } + if len(bytes) == 1 { + return true + } + if bytes[0] == 0 && bytes[1]&0x80 == 0 || bytes[0] == 0xff && bytes[1]&0x80 == 0x80 { + // Value is not minimally encoded. + return false + } + return true +} + +var bigOne = big.NewInt(1) + +func (s *String) readASN1BigInt(out *big.Int) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { + return false + } + if bytes[0]&0x80 == 0x80 { + // Negative number. + neg := make([]byte, len(bytes)) + for i, b := range bytes { + neg[i] = ^b + } + out.SetBytes(neg) + out.Add(out, bigOne) + out.Neg(out) + } else { + out.SetBytes(bytes) + } + return true +} + +func (s *String) readASN1Bytes(out *[]byte) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { + return false + } + if bytes[0]&0x80 == 0x80 { + return false + } + for len(bytes) > 1 && bytes[0] == 0 { + bytes = bytes[1:] + } + *out = bytes + return true +} + +func (s *String) readASN1Int64(out *int64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Signed(out, bytes) { + return false + } + return true +} + +func asn1Signed(out *int64, n []byte) bool { + length := len(n) + if length > 8 { + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= int64(n[i]) + } + // Shift up and down in order to sign extend the result. + *out <<= 64 - uint8(length)*8 + *out >>= 64 - uint8(length)*8 + return true +} + +func (s *String) readASN1Uint64(out *uint64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Unsigned(out, bytes) { + return false + } + return true +} + +func asn1Unsigned(out *uint64, n []byte) bool { + length := len(n) + if length > 9 || length == 9 && n[0] != 0 { + // Too large for uint64. + return false + } + if n[0]&0x80 != 0 { + // Negative number. + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= uint64(n[i]) + } + return true +} + +// ReadASN1Int64WithTag decodes an ASN.1 INTEGER with the given tag into out +// and advances. It reports whether the read was successful and resulted in a +// value that can be represented in an int64. +func (s *String) ReadASN1Int64WithTag(out *int64, tag asn1.Tag) bool { + var bytes String + return s.ReadASN1(&bytes, tag) && checkASN1Integer(bytes) && asn1Signed(out, bytes) +} + +// ReadASN1Enum decodes an ASN.1 ENUMERATION into out and advances. It reports +// whether the read was successful. +func (s *String) ReadASN1Enum(out *int) bool { + var bytes String + var i int64 + if !s.ReadASN1(&bytes, asn1.ENUM) || !checkASN1Integer(bytes) || !asn1Signed(&i, bytes) { + return false + } + if int64(int(i)) != i { + return false + } + *out = int(i) + return true +} + +func (s *String) readBase128Int(out *int) bool { + ret := 0 + for i := 0; len(*s) > 0; i++ { + if i == 5 { + return false + } + // Avoid overflowing int on a 32-bit platform. + // We don't want different behavior based on the architecture. + if ret >= 1<<(31-7) { + return false + } + ret <<= 7 + b := s.read(1)[0] + + // ITU-T X.690, section 8.19.2: + // The subidentifier shall be encoded in the fewest possible octets, + // that is, the leading octet of the subidentifier shall not have the value 0x80. + if i == 0 && b == 0x80 { + return false + } + + ret |= int(b & 0x7f) + if b&0x80 == 0 { + *out = ret + return true + } + } + return false // truncated +} + +// ReadASN1ObjectIdentifier decodes an ASN.1 OBJECT IDENTIFIER into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1ObjectIdentifier(out *encoding_asn1.ObjectIdentifier) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.OBJECT_IDENTIFIER) || len(bytes) == 0 { + return false + } + + // In the worst case, we get two elements from the first byte (which is + // encoded differently) and then every varint is a single byte long. + components := make([]int, len(bytes)+1) + + // The first varint is 40*value1 + value2: + // According to this packing, value1 can take the values 0, 1 and 2 only. + // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2, + // then there are no restrictions on value2. + var v int + if !bytes.readBase128Int(&v) { + return false + } + if v < 80 { + components[0] = v / 40 + components[1] = v % 40 + } else { + components[0] = 2 + components[1] = v - 80 + } + + i := 2 + for ; len(bytes) > 0; i++ { + if !bytes.readBase128Int(&v) { + return false + } + components[i] = v + } + *out = components[:i] + return true +} + +// ReadASN1GeneralizedTime decodes an ASN.1 GENERALIZEDTIME into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1GeneralizedTime(out *time.Time) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.GeneralizedTime) { + return false + } + t := string(bytes) + res, err := time.Parse(generalizedTimeFormatStr, t) + if err != nil { + return false + } + if serialized := res.Format(generalizedTimeFormatStr); serialized != t { + return false + } + *out = res + return true +} + +const defaultUTCTimeFormatStr = "060102150405Z0700" + +// ReadASN1UTCTime decodes an ASN.1 UTCTime into out and advances. +// It reports whether the read was successful. +func (s *String) ReadASN1UTCTime(out *time.Time) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.UTCTime) { + return false + } + t := string(bytes) + + formatStr := defaultUTCTimeFormatStr + var err error + res, err := time.Parse(formatStr, t) + if err != nil { + // Fallback to minute precision if we can't parse second + // precision. If we are following X.509 or X.690 we shouldn't + // support this, but we do. + formatStr = "0601021504Z0700" + res, err = time.Parse(formatStr, t) + } + if err != nil { + return false + } + + if serialized := res.Format(formatStr); serialized != t { + return false + } + + if res.Year() >= 2050 { + // UTCTime interprets the low order digits 50-99 as 1950-99. + // This only applies to its use in the X.509 profile. + // See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1 + res = res.AddDate(-100, 0, 0) + } + *out = res + return true +} + +// ReadASN1BitString decodes an ASN.1 BIT STRING into out and advances. +// It reports whether the read was successful. +func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 || + len(bytes)*8/8 != len(bytes) { + return false + } + + paddingBits := bytes[0] + bytes = bytes[1:] + if paddingBits > 7 || + len(bytes) == 0 && paddingBits != 0 || + len(bytes) > 0 && bytes[len(bytes)-1]&(1< 4 || len(*s) < int(2+lenLen) { + return false + } + + lenBytes := String((*s)[2 : 2+lenLen]) + if !lenBytes.readUnsigned(&len32, int(lenLen)) { + return false + } + + // ITU-T X.690 section 10.1 (DER length forms) requires encoding the length + // with the minimum number of octets. + if len32 < 128 { + // Length should have used short-form encoding. + return false + } + if len32>>((lenLen-1)*8) == 0 { + // Leading octet is 0. Length should have been at least one byte shorter. + return false + } + + headerLen = 2 + uint32(lenLen) + if headerLen+len32 < len32 { + // Overflow. + return false + } + length = headerLen + len32 + } + + if int(length) < 0 || !s.ReadBytes((*[]byte)(out), int(length)) { + return false + } + if skipHeader && !out.Skip(int(headerLen)) { + panic("cryptobyte: internal error") + } + + return true +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go new file mode 100644 index 000000000..90ef6a241 --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go @@ -0,0 +1,46 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package asn1 contains supporting types for parsing and building ASN.1 +// messages with the cryptobyte package. +package asn1 + +// Tag represents an ASN.1 identifier octet, consisting of a tag number +// (indicating a type) and class (such as context-specific or constructed). +// +// Methods in the cryptobyte package only support the low-tag-number form, i.e. +// a single identifier octet with bits 7-8 encoding the class and bits 1-6 +// encoding the tag number. +type Tag uint8 + +const ( + classConstructed = 0x20 + classContextSpecific = 0x80 +) + +// Constructed returns t with the constructed class bit set. +func (t Tag) Constructed() Tag { return t | classConstructed } + +// ContextSpecific returns t with the context-specific class bit set. +func (t Tag) ContextSpecific() Tag { return t | classContextSpecific } + +// The following is a list of standard tag and class combinations. +const ( + BOOLEAN = Tag(1) + INTEGER = Tag(2) + BIT_STRING = Tag(3) + OCTET_STRING = Tag(4) + NULL = Tag(5) + OBJECT_IDENTIFIER = Tag(6) + ENUM = Tag(10) + UTF8String = Tag(12) + SEQUENCE = Tag(16 | classConstructed) + SET = Tag(17 | classConstructed) + PrintableString = Tag(19) + T61String = Tag(20) + IA5String = Tag(22) + UTCTime = Tag(23) + GeneralizedTime = Tag(24) + GeneralString = Tag(27) +) diff --git a/vendor/golang.org/x/crypto/cryptobyte/builder.go b/vendor/golang.org/x/crypto/cryptobyte/builder.go new file mode 100644 index 000000000..cf254f5f1 --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/builder.go @@ -0,0 +1,350 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + "errors" + "fmt" +) + +// A Builder builds byte strings from fixed-length and length-prefixed values. +// Builders either allocate space as needed, or are ‘fixed’, which means that +// they write into a given buffer and produce an error if it's exhausted. +// +// The zero value is a usable Builder that allocates space as needed. +// +// Simple values are marshaled and appended to a Builder using methods on the +// Builder. Length-prefixed values are marshaled by providing a +// BuilderContinuation, which is a function that writes the inner contents of +// the value to a given Builder. See the documentation for BuilderContinuation +// for details. +type Builder struct { + err error + result []byte + fixedSize bool + child *Builder + offset int + pendingLenLen int + pendingIsASN1 bool + inContinuation *bool +} + +// NewBuilder creates a Builder that appends its output to the given buffer. +// Like append(), the slice will be reallocated if its capacity is exceeded. +// Use Bytes to get the final buffer. +func NewBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + } +} + +// NewFixedBuilder creates a Builder that appends its output into the given +// buffer. This builder does not reallocate the output buffer. Writes that +// would exceed the buffer's capacity are treated as an error. +func NewFixedBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + fixedSize: true, + } +} + +// SetError sets the value to be returned as the error from Bytes. Writes +// performed after calling SetError are ignored. +func (b *Builder) SetError(err error) { + b.err = err +} + +// Bytes returns the bytes written by the builder or an error if one has +// occurred during building. +func (b *Builder) Bytes() ([]byte, error) { + if b.err != nil { + return nil, b.err + } + return b.result[b.offset:], nil +} + +// BytesOrPanic returns the bytes written by the builder or panics if an error +// has occurred during building. +func (b *Builder) BytesOrPanic() []byte { + if b.err != nil { + panic(b.err) + } + return b.result[b.offset:] +} + +// AddUint8 appends an 8-bit value to the byte string. +func (b *Builder) AddUint8(v uint8) { + b.add(byte(v)) +} + +// AddUint16 appends a big-endian, 16-bit value to the byte string. +func (b *Builder) AddUint16(v uint16) { + b.add(byte(v>>8), byte(v)) +} + +// AddUint24 appends a big-endian, 24-bit value to the byte string. The highest +// byte of the 32-bit input value is silently truncated. +func (b *Builder) AddUint24(v uint32) { + b.add(byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint32 appends a big-endian, 32-bit value to the byte string. +func (b *Builder) AddUint32(v uint32) { + b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint48 appends a big-endian, 48-bit value to the byte string. +func (b *Builder) AddUint48(v uint64) { + b.add(byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint64 appends a big-endian, 64-bit value to the byte string. +func (b *Builder) AddUint64(v uint64) { + b.add(byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddBytes appends a sequence of bytes to the byte string. +func (b *Builder) AddBytes(v []byte) { + b.add(v...) +} + +// BuilderContinuation is a continuation-passing interface for building +// length-prefixed byte sequences. Builder methods for length-prefixed +// sequences (AddUint8LengthPrefixed etc) will invoke the BuilderContinuation +// supplied to them. The child builder passed to the continuation can be used +// to build the content of the length-prefixed sequence. For example: +// +// parent := cryptobyte.NewBuilder() +// parent.AddUint8LengthPrefixed(func (child *Builder) { +// child.AddUint8(42) +// child.AddUint8LengthPrefixed(func (grandchild *Builder) { +// grandchild.AddUint8(5) +// }) +// }) +// +// It is an error to write more bytes to the child than allowed by the reserved +// length prefix. After the continuation returns, the child must be considered +// invalid, i.e. users must not store any copies or references of the child +// that outlive the continuation. +// +// If the continuation panics with a value of type BuildError then the inner +// error will be returned as the error from Bytes. If the child panics +// otherwise then Bytes will repanic with the same value. +type BuilderContinuation func(child *Builder) + +// BuildError wraps an error. If a BuilderContinuation panics with this value, +// the panic will be recovered and the inner error will be returned from +// Builder.Bytes. +type BuildError struct { + Err error +} + +// AddUint8LengthPrefixed adds a 8-bit length-prefixed byte sequence. +func (b *Builder) AddUint8LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(1, false, f) +} + +// AddUint16LengthPrefixed adds a big-endian, 16-bit length-prefixed byte sequence. +func (b *Builder) AddUint16LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(2, false, f) +} + +// AddUint24LengthPrefixed adds a big-endian, 24-bit length-prefixed byte sequence. +func (b *Builder) AddUint24LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(3, false, f) +} + +// AddUint32LengthPrefixed adds a big-endian, 32-bit length-prefixed byte sequence. +func (b *Builder) AddUint32LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(4, false, f) +} + +func (b *Builder) callContinuation(f BuilderContinuation, arg *Builder) { + if !*b.inContinuation { + *b.inContinuation = true + + defer func() { + *b.inContinuation = false + + r := recover() + if r == nil { + return + } + + if buildError, ok := r.(BuildError); ok { + b.err = buildError.Err + } else { + panic(r) + } + }() + } + + f(arg) +} + +func (b *Builder) addLengthPrefixed(lenLen int, isASN1 bool, f BuilderContinuation) { + // Subsequent writes can be ignored if the builder has encountered an error. + if b.err != nil { + return + } + + offset := len(b.result) + b.add(make([]byte, lenLen)...) + + if b.inContinuation == nil { + b.inContinuation = new(bool) + } + + b.child = &Builder{ + result: b.result, + fixedSize: b.fixedSize, + offset: offset, + pendingLenLen: lenLen, + pendingIsASN1: isASN1, + inContinuation: b.inContinuation, + } + + b.callContinuation(f, b.child) + b.flushChild() + if b.child != nil { + panic("cryptobyte: internal error") + } +} + +func (b *Builder) flushChild() { + if b.child == nil { + return + } + b.child.flushChild() + child := b.child + b.child = nil + + if child.err != nil { + b.err = child.err + return + } + + length := len(child.result) - child.pendingLenLen - child.offset + + if length < 0 { + panic("cryptobyte: internal error") // result unexpectedly shrunk + } + + if child.pendingIsASN1 { + // For ASN.1, we reserved a single byte for the length. If that turned out + // to be incorrect, we have to move the contents along in order to make + // space. + if child.pendingLenLen != 1 { + panic("cryptobyte: internal error") + } + var lenLen, lenByte uint8 + if int64(length) > 0xfffffffe { + b.err = errors.New("pending ASN.1 child too long") + return + } else if length > 0xffffff { + lenLen = 5 + lenByte = 0x80 | 4 + } else if length > 0xffff { + lenLen = 4 + lenByte = 0x80 | 3 + } else if length > 0xff { + lenLen = 3 + lenByte = 0x80 | 2 + } else if length > 0x7f { + lenLen = 2 + lenByte = 0x80 | 1 + } else { + lenLen = 1 + lenByte = uint8(length) + length = 0 + } + + // Insert the initial length byte, make space for successive length bytes, + // and adjust the offset. + child.result[child.offset] = lenByte + extraBytes := int(lenLen - 1) + if extraBytes != 0 { + child.add(make([]byte, extraBytes)...) + childStart := child.offset + child.pendingLenLen + copy(child.result[childStart+extraBytes:], child.result[childStart:]) + } + child.offset++ + child.pendingLenLen = extraBytes + } + + l := length + for i := child.pendingLenLen - 1; i >= 0; i-- { + child.result[child.offset+i] = uint8(l) + l >>= 8 + } + if l != 0 { + b.err = fmt.Errorf("cryptobyte: pending child length %d exceeds %d-byte length prefix", length, child.pendingLenLen) + return + } + + if b.fixedSize && &b.result[0] != &child.result[0] { + panic("cryptobyte: BuilderContinuation reallocated a fixed-size buffer") + } + + b.result = child.result +} + +func (b *Builder) add(bytes ...byte) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted write while child is pending") + } + if len(b.result)+len(bytes) < len(bytes) { + b.err = errors.New("cryptobyte: length overflow") + } + if b.fixedSize && len(b.result)+len(bytes) > cap(b.result) { + b.err = errors.New("cryptobyte: Builder is exceeding its fixed-size buffer") + return + } + b.result = append(b.result, bytes...) +} + +// Unwrite rolls back non-negative n bytes written directly to the Builder. +// An attempt by a child builder passed to a continuation to unwrite bytes +// from its parent will panic. +func (b *Builder) Unwrite(n int) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted unwrite while child is pending") + } + length := len(b.result) - b.pendingLenLen - b.offset + if length < 0 { + panic("cryptobyte: internal error") + } + if n < 0 { + panic("cryptobyte: attempted to unwrite negative number of bytes") + } + if n > length { + panic("cryptobyte: attempted to unwrite more than was written") + } + b.result = b.result[:len(b.result)-n] +} + +// A MarshalingValue marshals itself into a Builder. +type MarshalingValue interface { + // Marshal is called by Builder.AddValue. It receives a pointer to a builder + // to marshal itself into. It may return an error that occurred during + // marshaling, such as unset or invalid values. + Marshal(b *Builder) error +} + +// AddValue calls Marshal on v, passing a pointer to the builder to append to. +// If Marshal returns an error, it is set on the Builder so that subsequent +// appends don't have an effect. +func (b *Builder) AddValue(v MarshalingValue) { + err := v.Marshal(b) + if err != nil { + b.err = err + } +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/string.go b/vendor/golang.org/x/crypto/cryptobyte/string.go new file mode 100644 index 000000000..4b0f8097f --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/string.go @@ -0,0 +1,183 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cryptobyte contains types that help with parsing and constructing +// length-prefixed, binary messages, including ASN.1 DER. (The asn1 subpackage +// contains useful ASN.1 constants.) +// +// The String type is for parsing. It wraps a []byte slice and provides helper +// functions for consuming structures, value by value. +// +// The Builder type is for constructing messages. It providers helper functions +// for appending values and also for appending length-prefixed submessages – +// without having to worry about calculating the length prefix ahead of time. +// +// See the documentation and examples for the Builder and String types to get +// started. +package cryptobyte + +// String represents a string of bytes. It provides methods for parsing +// fixed-length and length-prefixed values from it. +type String []byte + +// read advances a String by n bytes and returns them. If less than n bytes +// remain, it returns nil. +func (s *String) read(n int) []byte { + if len(*s) < n || n < 0 { + return nil + } + v := (*s)[:n] + *s = (*s)[n:] + return v +} + +// Skip advances the String by n byte and reports whether it was successful. +func (s *String) Skip(n int) bool { + return s.read(n) != nil +} + +// ReadUint8 decodes an 8-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint8(out *uint8) bool { + v := s.read(1) + if v == nil { + return false + } + *out = uint8(v[0]) + return true +} + +// ReadUint16 decodes a big-endian, 16-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint16(out *uint16) bool { + v := s.read(2) + if v == nil { + return false + } + *out = uint16(v[0])<<8 | uint16(v[1]) + return true +} + +// ReadUint24 decodes a big-endian, 24-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint24(out *uint32) bool { + v := s.read(3) + if v == nil { + return false + } + *out = uint32(v[0])<<16 | uint32(v[1])<<8 | uint32(v[2]) + return true +} + +// ReadUint32 decodes a big-endian, 32-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint32(out *uint32) bool { + v := s.read(4) + if v == nil { + return false + } + *out = uint32(v[0])<<24 | uint32(v[1])<<16 | uint32(v[2])<<8 | uint32(v[3]) + return true +} + +// ReadUint48 decodes a big-endian, 48-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint48(out *uint64) bool { + v := s.read(6) + if v == nil { + return false + } + *out = uint64(v[0])<<40 | uint64(v[1])<<32 | uint64(v[2])<<24 | uint64(v[3])<<16 | uint64(v[4])<<8 | uint64(v[5]) + return true +} + +// ReadUint64 decodes a big-endian, 64-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint64(out *uint64) bool { + v := s.read(8) + if v == nil { + return false + } + *out = uint64(v[0])<<56 | uint64(v[1])<<48 | uint64(v[2])<<40 | uint64(v[3])<<32 | uint64(v[4])<<24 | uint64(v[5])<<16 | uint64(v[6])<<8 | uint64(v[7]) + return true +} + +func (s *String) readUnsigned(out *uint32, length int) bool { + v := s.read(length) + if v == nil { + return false + } + var result uint32 + for i := 0; i < length; i++ { + result <<= 8 + result |= uint32(v[i]) + } + *out = result + return true +} + +func (s *String) readLengthPrefixed(lenLen int, outChild *String) bool { + lenBytes := s.read(lenLen) + if lenBytes == nil { + return false + } + var length uint32 + for _, b := range lenBytes { + length = length << 8 + length = length | uint32(b) + } + v := s.read(int(length)) + if v == nil { + return false + } + *outChild = v + return true +} + +// ReadUint8LengthPrefixed reads the content of an 8-bit length-prefixed value +// into out and advances over it. It reports whether the read was successful. +func (s *String) ReadUint8LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(1, out) +} + +// ReadUint16LengthPrefixed reads the content of a big-endian, 16-bit +// length-prefixed value into out and advances over it. It reports whether the +// read was successful. +func (s *String) ReadUint16LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(2, out) +} + +// ReadUint24LengthPrefixed reads the content of a big-endian, 24-bit +// length-prefixed value into out and advances over it. It reports whether +// the read was successful. +func (s *String) ReadUint24LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(3, out) +} + +// ReadBytes reads n bytes into out and advances over them. It reports +// whether the read was successful. +func (s *String) ReadBytes(out *[]byte, n int) bool { + v := s.read(n) + if v == nil { + return false + } + *out = v + return true +} + +// CopyBytes copies len(out) bytes into out and advances over them. It reports +// whether the copy operation was successful +func (s *String) CopyBytes(out []byte) bool { + n := len(out) + v := s.read(n) + if v == nil { + return false + } + return copy(out, v) == n +} + +// Empty reports whether the string does not contain any bytes. +func (s String) Empty() bool { + return len(s) == 0 +} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go new file mode 100644 index 000000000..21ca3b2ee --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go @@ -0,0 +1,90 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package curve25519 provides an implementation of the X25519 function, which +// performs scalar multiplication on the elliptic curve known as Curve25519. +// See RFC 7748. +// +// This package is a wrapper for the X25519 implementation +// in the crypto/ecdh package. +package curve25519 + +import "crypto/ecdh" + +// ScalarMult sets dst to the product scalar * point. +// +// Deprecated: when provided a low-order point, ScalarMult will set dst to all +// zeroes, irrespective of the scalar. Instead, use the X25519 function, which +// will return an error. +func ScalarMult(dst, scalar, point *[32]byte) { + if _, err := x25519(dst, scalar[:], point[:]); err != nil { + // The only error condition for x25519 when the inputs are 32 bytes long + // is if the output would have been the all-zero value. + for i := range dst { + dst[i] = 0 + } + } +} + +// ScalarBaseMult sets dst to the product scalar * base where base is the +// standard generator. +// +// It is recommended to use the X25519 function with Basepoint instead, as +// copying into fixed size arrays can lead to unexpected bugs. +func ScalarBaseMult(dst, scalar *[32]byte) { + curve := ecdh.X25519() + priv, err := curve.NewPrivateKey(scalar[:]) + if err != nil { + panic("curve25519: internal error: scalarBaseMult was not 32 bytes") + } + copy(dst[:], priv.PublicKey().Bytes()) +} + +const ( + // ScalarSize is the size of the scalar input to X25519. + ScalarSize = 32 + // PointSize is the size of the point input to X25519. + PointSize = 32 +) + +// Basepoint is the canonical Curve25519 generator. +var Basepoint []byte + +var basePoint = [32]byte{9} + +func init() { Basepoint = basePoint[:] } + +// X25519 returns the result of the scalar multiplication (scalar * point), +// according to RFC 7748, Section 5. scalar, point and the return value are +// slices of 32 bytes. +// +// scalar can be generated at random, for example with crypto/rand. point should +// be either Basepoint or the output of another X25519 call. +// +// If point is Basepoint (but not if it's a different slice with the same +// contents) a precomputed implementation might be used for performance. +func X25519(scalar, point []byte) ([]byte, error) { + // Outline the body of function, to let the allocation be inlined in the + // caller, and possibly avoid escaping to the heap. + var dst [32]byte + return x25519(&dst, scalar, point) +} + +func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) { + curve := ecdh.X25519() + pub, err := curve.NewPublicKey(point) + if err != nil { + return nil, err + } + priv, err := curve.NewPrivateKey(scalar) + if err != nil { + return nil, err + } + out, err := priv.ECDH(pub) + if err != nil { + return nil, err + } + copy(dst[:], out) + return dst[:], nil +} diff --git a/vendor/golang.org/x/crypto/hkdf/hkdf.go b/vendor/golang.org/x/crypto/hkdf/hkdf.go new file mode 100644 index 000000000..3bee66294 --- /dev/null +++ b/vendor/golang.org/x/crypto/hkdf/hkdf.go @@ -0,0 +1,95 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package hkdf implements the HMAC-based Extract-and-Expand Key Derivation +// Function (HKDF) as defined in RFC 5869. +// +// HKDF is a cryptographic key derivation function (KDF) with the goal of +// expanding limited input keying material into one or more cryptographically +// strong secret keys. +package hkdf + +import ( + "crypto/hmac" + "errors" + "hash" + "io" +) + +// Extract generates a pseudorandom key for use with Expand from an input secret +// and an optional independent salt. +// +// Only use this function if you need to reuse the extracted key with multiple +// Expand invocations and different context values. Most common scenarios, +// including the generation of multiple keys, should use New instead. +func Extract(hash func() hash.Hash, secret, salt []byte) []byte { + if salt == nil { + salt = make([]byte, hash().Size()) + } + extractor := hmac.New(hash, salt) + extractor.Write(secret) + return extractor.Sum(nil) +} + +type hkdf struct { + expander hash.Hash + size int + + info []byte + counter byte + + prev []byte + buf []byte +} + +func (f *hkdf) Read(p []byte) (int, error) { + // Check whether enough data can be generated + need := len(p) + remains := len(f.buf) + int(255-f.counter+1)*f.size + if remains < need { + return 0, errors.New("hkdf: entropy limit reached") + } + // Read any leftover from the buffer + n := copy(p, f.buf) + p = p[n:] + + // Fill the rest of the buffer + for len(p) > 0 { + if f.counter > 1 { + f.expander.Reset() + } + f.expander.Write(f.prev) + f.expander.Write(f.info) + f.expander.Write([]byte{f.counter}) + f.prev = f.expander.Sum(f.prev[:0]) + f.counter++ + + // Copy the new batch into p + f.buf = f.prev + n = copy(p, f.buf) + p = p[n:] + } + // Save leftovers for next run + f.buf = f.buf[n:] + + return need, nil +} + +// Expand returns a Reader, from which keys can be read, using the given +// pseudorandom key and optional context info, skipping the extraction step. +// +// The pseudorandomKey should have been generated by Extract, or be a uniformly +// random or pseudorandom cryptographically strong key. See RFC 5869, Section +// 3.3. Most common scenarios will want to use New instead. +func Expand(hash func() hash.Hash, pseudorandomKey, info []byte) io.Reader { + expander := hmac.New(hash, pseudorandomKey) + return &hkdf{expander, expander.Size(), info, 1, nil, nil} +} + +// New returns a Reader, from which keys can be read, using the given hash, +// secret, salt and context info. Salt and info can be nil. +func New(hash func() hash.Hash, secret, salt, info []byte) io.Reader { + prk := Extract(hash, secret, salt) + return Expand(hash, prk, info) +} diff --git a/vendor/golang.org/x/crypto/internal/alias/alias.go b/vendor/golang.org/x/crypto/internal/alias/alias.go new file mode 100644 index 000000000..551ff0c35 --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/alias/alias.go @@ -0,0 +1,31 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego + +// Package alias implements memory aliasing tests. +package alias + +import "unsafe" + +// AnyOverlap reports whether x and y share memory at any (not necessarily +// corresponding) index. The memory beyond the slice length is ignored. +func AnyOverlap(x, y []byte) bool { + return len(x) > 0 && len(y) > 0 && + uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) && + uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1])) +} + +// InexactOverlap reports whether x and y share memory at any non-corresponding +// index. The memory beyond the slice length is ignored. Note that x and y can +// have different lengths and still not have any inexact overlap. +// +// InexactOverlap can be used to implement the requirements of the crypto/cipher +// AEAD, Block, BlockMode and Stream interfaces. +func InexactOverlap(x, y []byte) bool { + if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { + return false + } + return AnyOverlap(x, y) +} diff --git a/vendor/golang.org/x/crypto/internal/alias/alias_purego.go b/vendor/golang.org/x/crypto/internal/alias/alias_purego.go new file mode 100644 index 000000000..6fe61b5c6 --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/alias/alias_purego.go @@ -0,0 +1,34 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build purego + +// Package alias implements memory aliasing tests. +package alias + +// This is the Google App Engine standard variant based on reflect +// because the unsafe package and cgo are disallowed. + +import "reflect" + +// AnyOverlap reports whether x and y share memory at any (not necessarily +// corresponding) index. The memory beyond the slice length is ignored. +func AnyOverlap(x, y []byte) bool { + return len(x) > 0 && len(y) > 0 && + reflect.ValueOf(&x[0]).Pointer() <= reflect.ValueOf(&y[len(y)-1]).Pointer() && + reflect.ValueOf(&y[0]).Pointer() <= reflect.ValueOf(&x[len(x)-1]).Pointer() +} + +// InexactOverlap reports whether x and y share memory at any non-corresponding +// index. The memory beyond the slice length is ignored. Note that x and y can +// have different lengths and still not have any inexact overlap. +// +// InexactOverlap can be used to implement the requirements of the crypto/cipher +// AEAD, Block, BlockMode and Stream interfaces. +func InexactOverlap(x, y []byte) bool { + if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { + return false + } + return AnyOverlap(x, y) +} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go new file mode 100644 index 000000000..333da285b --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go @@ -0,0 +1,9 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (!amd64 && !ppc64le && !s390x) || !gc || purego + +package poly1305 + +type mac struct{ macGeneric } diff --git a/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go b/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go new file mode 100644 index 000000000..4aaea810a --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go @@ -0,0 +1,99 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package poly1305 implements Poly1305 one-time message authentication code as +// specified in https://cr.yp.to/mac/poly1305-20050329.pdf. +// +// Poly1305 is a fast, one-time authentication function. It is infeasible for an +// attacker to generate an authenticator for a message without the key. However, a +// key must only be used for a single message. Authenticating two different +// messages with the same key allows an attacker to forge authenticators for other +// messages with the same key. +// +// Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was +// used with a fixed key in order to generate one-time keys from an nonce. +// However, in this package AES isn't used and the one-time key is specified +// directly. +package poly1305 + +import "crypto/subtle" + +// TagSize is the size, in bytes, of a poly1305 authenticator. +const TagSize = 16 + +// Sum generates an authenticator for msg using a one-time key and puts the +// 16-byte result into out. Authenticating two different messages with the same +// key allows an attacker to forge messages at will. +func Sum(out *[16]byte, m []byte, key *[32]byte) { + h := New(key) + h.Write(m) + h.Sum(out[:0]) +} + +// Verify returns true if mac is a valid authenticator for m with the given key. +func Verify(mac *[16]byte, m []byte, key *[32]byte) bool { + var tmp [16]byte + Sum(&tmp, m, key) + return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1 +} + +// New returns a new MAC computing an authentication +// tag of all data written to it with the given key. +// This allows writing the message progressively instead +// of passing it as a single slice. Common users should use +// the Sum function instead. +// +// The key must be unique for each message, as authenticating +// two different messages with the same key allows an attacker +// to forge messages at will. +func New(key *[32]byte) *MAC { + m := &MAC{} + initialize(key, &m.macState) + return m +} + +// MAC is an io.Writer computing an authentication tag +// of the data written to it. +// +// MAC cannot be used like common hash.Hash implementations, +// because using a poly1305 key twice breaks its security. +// Therefore writing data to a running MAC after calling +// Sum or Verify causes it to panic. +type MAC struct { + mac // platform-dependent implementation + + finalized bool +} + +// Size returns the number of bytes Sum will return. +func (h *MAC) Size() int { return TagSize } + +// Write adds more data to the running message authentication code. +// It never returns an error. +// +// It must not be called after the first call of Sum or Verify. +func (h *MAC) Write(p []byte) (n int, err error) { + if h.finalized { + panic("poly1305: write to MAC after Sum or Verify") + } + return h.mac.Write(p) +} + +// Sum computes the authenticator of all data written to the +// message authentication code. +func (h *MAC) Sum(b []byte) []byte { + var mac [TagSize]byte + h.mac.Sum(&mac) + h.finalized = true + return append(b, mac[:]...) +} + +// Verify returns whether the authenticator of all data written to +// the message authentication code matches the expected value. +func (h *MAC) Verify(expected []byte) bool { + var mac [TagSize]byte + h.mac.Sum(&mac) + h.finalized = true + return subtle.ConstantTimeCompare(expected, mac[:]) == 1 +} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go new file mode 100644 index 000000000..164cd47d3 --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go @@ -0,0 +1,47 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +package poly1305 + +//go:noescape +func update(state *macState, msg []byte) + +// mac is a wrapper for macGeneric that redirects calls that would have gone to +// updateGeneric to update. +// +// Its Write and Sum methods are otherwise identical to the macGeneric ones, but +// using function pointers would carry a major performance cost. +type mac struct{ macGeneric } + +func (h *mac) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < TagSize { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + update(&h.macState, h.buffer[:]) + } + if n := len(p) - (len(p) % TagSize); n > 0 { + update(&h.macState, p[:n]) + p = p[n:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return nn, nil +} + +func (h *mac) Sum(out *[16]byte) { + state := h.macState + if h.offset > 0 { + update(&state, h.buffer[:h.offset]) + } + finalize(out, &state.h, &state.s) +} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s new file mode 100644 index 000000000..133757384 --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s @@ -0,0 +1,93 @@ +// Code generated by command: go run sum_amd64_asm.go -out ../sum_amd64.s -pkg poly1305. DO NOT EDIT. + +//go:build gc && !purego + +// func update(state *macState, msg []byte) +TEXT ·update(SB), $0-32 + MOVQ state+0(FP), DI + MOVQ msg_base+8(FP), SI + MOVQ msg_len+16(FP), R15 + MOVQ (DI), R8 + MOVQ 8(DI), R9 + MOVQ 16(DI), R10 + MOVQ 24(DI), R11 + MOVQ 32(DI), R12 + CMPQ R15, $0x10 + JB bytes_between_0_and_15 + +loop: + ADDQ (SI), R8 + ADCQ 8(SI), R9 + ADCQ $0x01, R10 + LEAQ 16(SI), SI + +multiply: + MOVQ R11, AX + MULQ R8 + MOVQ AX, BX + MOVQ DX, CX + MOVQ R11, AX + MULQ R9 + ADDQ AX, CX + ADCQ $0x00, DX + MOVQ R11, R13 + IMULQ R10, R13 + ADDQ DX, R13 + MOVQ R12, AX + MULQ R8 + ADDQ AX, CX + ADCQ $0x00, DX + MOVQ DX, R8 + MOVQ R12, R14 + IMULQ R10, R14 + MOVQ R12, AX + MULQ R9 + ADDQ AX, R13 + ADCQ DX, R14 + ADDQ R8, R13 + ADCQ $0x00, R14 + MOVQ BX, R8 + MOVQ CX, R9 + MOVQ R13, R10 + ANDQ $0x03, R10 + MOVQ R13, BX + ANDQ $-4, BX + ADDQ BX, R8 + ADCQ R14, R9 + ADCQ $0x00, R10 + SHRQ $0x02, R14, R13 + SHRQ $0x02, R14 + ADDQ R13, R8 + ADCQ R14, R9 + ADCQ $0x00, R10 + SUBQ $0x10, R15 + CMPQ R15, $0x10 + JAE loop + +bytes_between_0_and_15: + TESTQ R15, R15 + JZ done + MOVQ $0x00000001, BX + XORQ CX, CX + XORQ R13, R13 + ADDQ R15, SI + +flush_buffer: + SHLQ $0x08, BX, CX + SHLQ $0x08, BX + MOVB -1(SI), R13 + XORQ R13, BX + DECQ SI + DECQ R15 + JNZ flush_buffer + ADDQ BX, R8 + ADCQ CX, R9 + ADCQ $0x00, R10 + MOVQ $0x00000010, R15 + JMP multiply + +done: + MOVQ R8, (DI) + MOVQ R9, 8(DI) + MOVQ R10, 16(DI) + RET diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go new file mode 100644 index 000000000..ec2202bd7 --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go @@ -0,0 +1,312 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file provides the generic implementation of Sum and MAC. Other files +// might provide optimized assembly implementations of some of this code. + +package poly1305 + +import ( + "encoding/binary" + "math/bits" +) + +// Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag +// for a 64 bytes message is approximately +// +// s + m[0:16] * r⁴ + m[16:32] * r³ + m[32:48] * r² + m[48:64] * r mod 2¹³⁰ - 5 +// +// for some secret r and s. It can be computed sequentially like +// +// for len(msg) > 0: +// h += read(msg, 16) +// h *= r +// h %= 2¹³⁰ - 5 +// return h + s +// +// All the complexity is about doing performant constant-time math on numbers +// larger than any available numeric type. + +func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) { + h := newMACGeneric(key) + h.Write(msg) + h.Sum(out) +} + +func newMACGeneric(key *[32]byte) macGeneric { + m := macGeneric{} + initialize(key, &m.macState) + return m +} + +// macState holds numbers in saturated 64-bit little-endian limbs. That is, +// the value of [x0, x1, x2] is x[0] + x[1] * 2⁶⁴ + x[2] * 2¹²⁸. +type macState struct { + // h is the main accumulator. It is to be interpreted modulo 2¹³⁰ - 5, but + // can grow larger during and after rounds. It must, however, remain below + // 2 * (2¹³⁰ - 5). + h [3]uint64 + // r and s are the private key components. + r [2]uint64 + s [2]uint64 +} + +type macGeneric struct { + macState + + buffer [TagSize]byte + offset int +} + +// Write splits the incoming message into TagSize chunks, and passes them to +// update. It buffers incomplete chunks. +func (h *macGeneric) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < TagSize { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + updateGeneric(&h.macState, h.buffer[:]) + } + if n := len(p) - (len(p) % TagSize); n > 0 { + updateGeneric(&h.macState, p[:n]) + p = p[n:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return nn, nil +} + +// Sum flushes the last incomplete chunk from the buffer, if any, and generates +// the MAC output. It does not modify its state, in order to allow for multiple +// calls to Sum, even if no Write is allowed after Sum. +func (h *macGeneric) Sum(out *[TagSize]byte) { + state := h.macState + if h.offset > 0 { + updateGeneric(&state, h.buffer[:h.offset]) + } + finalize(out, &state.h, &state.s) +} + +// [rMask0, rMask1] is the specified Poly1305 clamping mask in little-endian. It +// clears some bits of the secret coefficient to make it possible to implement +// multiplication more efficiently. +const ( + rMask0 = 0x0FFFFFFC0FFFFFFF + rMask1 = 0x0FFFFFFC0FFFFFFC +) + +// initialize loads the 256-bit key into the two 128-bit secret values r and s. +func initialize(key *[32]byte, m *macState) { + m.r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0 + m.r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1 + m.s[0] = binary.LittleEndian.Uint64(key[16:24]) + m.s[1] = binary.LittleEndian.Uint64(key[24:32]) +} + +// uint128 holds a 128-bit number as two 64-bit limbs, for use with the +// bits.Mul64 and bits.Add64 intrinsics. +type uint128 struct { + lo, hi uint64 +} + +func mul64(a, b uint64) uint128 { + hi, lo := bits.Mul64(a, b) + return uint128{lo, hi} +} + +func add128(a, b uint128) uint128 { + lo, c := bits.Add64(a.lo, b.lo, 0) + hi, c := bits.Add64(a.hi, b.hi, c) + if c != 0 { + panic("poly1305: unexpected overflow") + } + return uint128{lo, hi} +} + +func shiftRightBy2(a uint128) uint128 { + a.lo = a.lo>>2 | (a.hi&3)<<62 + a.hi = a.hi >> 2 + return a +} + +// updateGeneric absorbs msg into the state.h accumulator. For each chunk m of +// 128 bits of message, it computes +// +// h₊ = (h + m) * r mod 2¹³⁰ - 5 +// +// If the msg length is not a multiple of TagSize, it assumes the last +// incomplete chunk is the final one. +func updateGeneric(state *macState, msg []byte) { + h0, h1, h2 := state.h[0], state.h[1], state.h[2] + r0, r1 := state.r[0], state.r[1] + + for len(msg) > 0 { + var c uint64 + + // For the first step, h + m, we use a chain of bits.Add64 intrinsics. + // The resulting value of h might exceed 2¹³⁰ - 5, but will be partially + // reduced at the end of the multiplication below. + // + // The spec requires us to set a bit just above the message size, not to + // hide leading zeroes. For full chunks, that's 1 << 128, so we can just + // add 1 to the most significant (2¹²⁸) limb, h2. + if len(msg) >= TagSize { + h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0) + h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(msg[8:16]), c) + h2 += c + 1 + + msg = msg[TagSize:] + } else { + var buf [TagSize]byte + copy(buf[:], msg) + buf[len(msg)] = 1 + + h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0) + h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(buf[8:16]), c) + h2 += c + + msg = nil + } + + // Multiplication of big number limbs is similar to elementary school + // columnar multiplication. Instead of digits, there are 64-bit limbs. + // + // We are multiplying a 3 limbs number, h, by a 2 limbs number, r. + // + // h2 h1 h0 x + // r1 r0 = + // ---------------- + // h2r0 h1r0 h0r0 <-- individual 128-bit products + // + h2r1 h1r1 h0r1 + // ------------------------ + // m3 m2 m1 m0 <-- result in 128-bit overlapping limbs + // ------------------------ + // m3.hi m2.hi m1.hi m0.hi <-- carry propagation + // + m3.lo m2.lo m1.lo m0.lo + // ------------------------------- + // t4 t3 t2 t1 t0 <-- final result in 64-bit limbs + // + // The main difference from pen-and-paper multiplication is that we do + // carry propagation in a separate step, as if we wrote two digit sums + // at first (the 128-bit limbs), and then carried the tens all at once. + + h0r0 := mul64(h0, r0) + h1r0 := mul64(h1, r0) + h2r0 := mul64(h2, r0) + h0r1 := mul64(h0, r1) + h1r1 := mul64(h1, r1) + h2r1 := mul64(h2, r1) + + // Since h2 is known to be at most 7 (5 + 1 + 1), and r0 and r1 have their + // top 4 bits cleared by rMask{0,1}, we know that their product is not going + // to overflow 64 bits, so we can ignore the high part of the products. + // + // This also means that the product doesn't have a fifth limb (t4). + if h2r0.hi != 0 { + panic("poly1305: unexpected overflow") + } + if h2r1.hi != 0 { + panic("poly1305: unexpected overflow") + } + + m0 := h0r0 + m1 := add128(h1r0, h0r1) // These two additions don't overflow thanks again + m2 := add128(h2r0, h1r1) // to the 4 masked bits at the top of r0 and r1. + m3 := h2r1 + + t0 := m0.lo + t1, c := bits.Add64(m1.lo, m0.hi, 0) + t2, c := bits.Add64(m2.lo, m1.hi, c) + t3, _ := bits.Add64(m3.lo, m2.hi, c) + + // Now we have the result as 4 64-bit limbs, and we need to reduce it + // modulo 2¹³⁰ - 5. The special shape of this Crandall prime lets us do + // a cheap partial reduction according to the reduction identity + // + // c * 2¹³⁰ + n = c * 5 + n mod 2¹³⁰ - 5 + // + // because 2¹³⁰ = 5 mod 2¹³⁰ - 5. Partial reduction since the result is + // likely to be larger than 2¹³⁰ - 5, but still small enough to fit the + // assumptions we make about h in the rest of the code. + // + // See also https://speakerdeck.com/gtank/engineering-prime-numbers?slide=23 + + // We split the final result at the 2¹³⁰ mark into h and cc, the carry. + // Note that the carry bits are effectively shifted left by 2, in other + // words, cc = c * 4 for the c in the reduction identity. + h0, h1, h2 = t0, t1, t2&maskLow2Bits + cc := uint128{t2 & maskNotLow2Bits, t3} + + // To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c. + + h0, c = bits.Add64(h0, cc.lo, 0) + h1, c = bits.Add64(h1, cc.hi, c) + h2 += c + + cc = shiftRightBy2(cc) + + h0, c = bits.Add64(h0, cc.lo, 0) + h1, c = bits.Add64(h1, cc.hi, c) + h2 += c + + // h2 is at most 3 + 1 + 1 = 5, making the whole of h at most + // + // 5 * 2¹²⁸ + (2¹²⁸ - 1) = 6 * 2¹²⁸ - 1 + } + + state.h[0], state.h[1], state.h[2] = h0, h1, h2 +} + +const ( + maskLow2Bits uint64 = 0x0000000000000003 + maskNotLow2Bits uint64 = ^maskLow2Bits +) + +// select64 returns x if v == 1 and y if v == 0, in constant time. +func select64(v, x, y uint64) uint64 { return ^(v-1)&x | (v-1)&y } + +// [p0, p1, p2] is 2¹³⁰ - 5 in little endian order. +const ( + p0 = 0xFFFFFFFFFFFFFFFB + p1 = 0xFFFFFFFFFFFFFFFF + p2 = 0x0000000000000003 +) + +// finalize completes the modular reduction of h and computes +// +// out = h + s mod 2¹²⁸ +func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) { + h0, h1, h2 := h[0], h[1], h[2] + + // After the partial reduction in updateGeneric, h might be more than + // 2¹³⁰ - 5, but will be less than 2 * (2¹³⁰ - 5). To complete the reduction + // in constant time, we compute t = h - (2¹³⁰ - 5), and select h as the + // result if the subtraction underflows, and t otherwise. + + hMinusP0, b := bits.Sub64(h0, p0, 0) + hMinusP1, b := bits.Sub64(h1, p1, b) + _, b = bits.Sub64(h2, p2, b) + + // h = h if h < p else h - p + h0 = select64(b, h0, hMinusP0) + h1 = select64(b, h1, hMinusP1) + + // Finally, we compute the last Poly1305 step + // + // tag = h + s mod 2¹²⁸ + // + // by just doing a wide addition with the 128 low bits of h and discarding + // the overflow. + h0, c := bits.Add64(h0, s[0], 0) + h1, _ = bits.Add64(h1, s[1], c) + + binary.LittleEndian.PutUint64(out[0:8], h0) + binary.LittleEndian.PutUint64(out[8:16], h1) +} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go new file mode 100644 index 000000000..4aec4874b --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go @@ -0,0 +1,47 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +package poly1305 + +//go:noescape +func update(state *macState, msg []byte) + +// mac is a wrapper for macGeneric that redirects calls that would have gone to +// updateGeneric to update. +// +// Its Write and Sum methods are otherwise identical to the macGeneric ones, but +// using function pointers would carry a major performance cost. +type mac struct{ macGeneric } + +func (h *mac) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < TagSize { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + update(&h.macState, h.buffer[:]) + } + if n := len(p) - (len(p) % TagSize); n > 0 { + update(&h.macState, p[:n]) + p = p[n:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return nn, nil +} + +func (h *mac) Sum(out *[16]byte) { + state := h.macState + if h.offset > 0 { + update(&state, h.buffer[:h.offset]) + } + finalize(out, &state.h, &state.s) +} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s new file mode 100644 index 000000000..b3c1699bf --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s @@ -0,0 +1,179 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +#include "textflag.h" + +// This was ported from the amd64 implementation. + +#define POLY1305_ADD(msg, h0, h1, h2, t0, t1, t2) \ + MOVD (msg), t0; \ + MOVD 8(msg), t1; \ + MOVD $1, t2; \ + ADDC t0, h0, h0; \ + ADDE t1, h1, h1; \ + ADDE t2, h2; \ + ADD $16, msg + +#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \ + MULLD r0, h0, t0; \ + MULHDU r0, h0, t1; \ + MULLD r0, h1, t4; \ + MULHDU r0, h1, t5; \ + ADDC t4, t1, t1; \ + MULLD r0, h2, t2; \ + MULHDU r1, h0, t4; \ + MULLD r1, h0, h0; \ + ADDE t5, t2, t2; \ + ADDC h0, t1, t1; \ + MULLD h2, r1, t3; \ + ADDZE t4, h0; \ + MULHDU r1, h1, t5; \ + MULLD r1, h1, t4; \ + ADDC t4, t2, t2; \ + ADDE t5, t3, t3; \ + ADDC h0, t2, t2; \ + MOVD $-4, t4; \ + ADDZE t3; \ + RLDICL $0, t2, $62, h2; \ + AND t2, t4, h0; \ + ADDC t0, h0, h0; \ + ADDE t3, t1, h1; \ + SLD $62, t3, t4; \ + SRD $2, t2; \ + ADDZE h2; \ + OR t4, t2, t2; \ + SRD $2, t3; \ + ADDC t2, h0, h0; \ + ADDE t3, h1, h1; \ + ADDZE h2 + +DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF +DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC +GLOBL ·poly1305Mask<>(SB), RODATA, $16 + +// func update(state *[7]uint64, msg []byte) +TEXT ·update(SB), $0-32 + MOVD state+0(FP), R3 + MOVD msg_base+8(FP), R4 + MOVD msg_len+16(FP), R5 + + MOVD 0(R3), R8 // h0 + MOVD 8(R3), R9 // h1 + MOVD 16(R3), R10 // h2 + MOVD 24(R3), R11 // r0 + MOVD 32(R3), R12 // r1 + + CMP R5, $16 + BLT bytes_between_0_and_15 + +loop: + POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22) + + PCALIGN $16 +multiply: + POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21) + ADD $-16, R5 + CMP R5, $16 + BGE loop + +bytes_between_0_and_15: + CMP R5, $0 + BEQ done + MOVD $0, R16 // h0 + MOVD $0, R17 // h1 + +flush_buffer: + CMP R5, $8 + BLE just1 + + MOVD $8, R21 + SUB R21, R5, R21 + + // Greater than 8 -- load the rightmost remaining bytes in msg + // and put into R17 (h1) + MOVD (R4)(R21), R17 + MOVD $16, R22 + + // Find the offset to those bytes + SUB R5, R22, R22 + SLD $3, R22 + + // Shift to get only the bytes in msg + SRD R22, R17, R17 + + // Put 1 at high end + MOVD $1, R23 + SLD $3, R21 + SLD R21, R23, R23 + OR R23, R17, R17 + + // Remainder is 8 + MOVD $8, R5 + +just1: + CMP R5, $8 + BLT less8 + + // Exactly 8 + MOVD (R4), R16 + + CMP R17, $0 + + // Check if we've already set R17; if not + // set 1 to indicate end of msg. + BNE carry + MOVD $1, R17 + BR carry + +less8: + MOVD $0, R16 // h0 + MOVD $0, R22 // shift count + CMP R5, $4 + BLT less4 + MOVWZ (R4), R16 + ADD $4, R4 + ADD $-4, R5 + MOVD $32, R22 + +less4: + CMP R5, $2 + BLT less2 + MOVHZ (R4), R21 + SLD R22, R21, R21 + OR R16, R21, R16 + ADD $16, R22 + ADD $-2, R5 + ADD $2, R4 + +less2: + CMP R5, $0 + BEQ insert1 + MOVBZ (R4), R21 + SLD R22, R21, R21 + OR R16, R21, R16 + ADD $8, R22 + +insert1: + // Insert 1 at end of msg + MOVD $1, R21 + SLD R22, R21, R21 + OR R16, R21, R16 + +carry: + // Add new values to h0, h1, h2 + ADDC R16, R8 + ADDE R17, R9 + ADDZE R10, R10 + MOVD $16, R5 + ADD R5, R4 + BR multiply + +done: + // Save h0, h1, h2 in state + MOVD R8, 0(R3) + MOVD R9, 8(R3) + MOVD R10, 16(R3) + RET diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go new file mode 100644 index 000000000..e1d033a49 --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go @@ -0,0 +1,76 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +package poly1305 + +import ( + "golang.org/x/sys/cpu" +) + +// updateVX is an assembly implementation of Poly1305 that uses vector +// instructions. It must only be called if the vector facility (vx) is +// available. +// +//go:noescape +func updateVX(state *macState, msg []byte) + +// mac is a replacement for macGeneric that uses a larger buffer and redirects +// calls that would have gone to updateGeneric to updateVX if the vector +// facility is installed. +// +// A larger buffer is required for good performance because the vector +// implementation has a higher fixed cost per call than the generic +// implementation. +type mac struct { + macState + + buffer [16 * TagSize]byte // size must be a multiple of block size (16) + offset int +} + +func (h *mac) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < len(h.buffer) { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + if cpu.S390X.HasVX { + updateVX(&h.macState, h.buffer[:]) + } else { + updateGeneric(&h.macState, h.buffer[:]) + } + } + + tail := len(p) % len(h.buffer) // number of bytes to copy into buffer + body := len(p) - tail // number of bytes to process now + if body > 0 { + if cpu.S390X.HasVX { + updateVX(&h.macState, p[:body]) + } else { + updateGeneric(&h.macState, p[:body]) + } + } + h.offset = copy(h.buffer[:], p[body:]) // copy tail bytes - can be 0 + return nn, nil +} + +func (h *mac) Sum(out *[TagSize]byte) { + state := h.macState + remainder := h.buffer[:h.offset] + + // Use the generic implementation if we have 2 or fewer blocks left + // to sum. The vector implementation has a higher startup time. + if cpu.S390X.HasVX && len(remainder) > 2*TagSize { + updateVX(&state, remainder) + } else if len(remainder) > 0 { + updateGeneric(&state, remainder) + } + finalize(out, &state.h, &state.s) +} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s new file mode 100644 index 000000000..0fe3a7c21 --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s @@ -0,0 +1,503 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +#include "textflag.h" + +// This implementation of Poly1305 uses the vector facility (vx) +// to process up to 2 blocks (32 bytes) per iteration using an +// algorithm based on the one described in: +// +// NEON crypto, Daniel J. Bernstein & Peter Schwabe +// https://cryptojedi.org/papers/neoncrypto-20120320.pdf +// +// This algorithm uses 5 26-bit limbs to represent a 130-bit +// value. These limbs are, for the most part, zero extended and +// placed into 64-bit vector register elements. Each vector +// register is 128-bits wide and so holds 2 of these elements. +// Using 26-bit limbs allows us plenty of headroom to accommodate +// accumulations before and after multiplication without +// overflowing either 32-bits (before multiplication) or 64-bits +// (after multiplication). +// +// In order to parallelise the operations required to calculate +// the sum we use two separate accumulators and then sum those +// in an extra final step. For compatibility with the generic +// implementation we perform this summation at the end of every +// updateVX call. +// +// To use two accumulators we must multiply the message blocks +// by r² rather than r. Only the final message block should be +// multiplied by r. +// +// Example: +// +// We want to calculate the sum (h) for a 64 byte message (m): +// +// h = m[0:16]r⁴ + m[16:32]r³ + m[32:48]r² + m[48:64]r +// +// To do this we split the calculation into the even indices +// and odd indices of the message. These form our SIMD 'lanes': +// +// h = m[ 0:16]r⁴ + m[32:48]r² + <- lane 0 +// m[16:32]r³ + m[48:64]r <- lane 1 +// +// To calculate this iteratively we refactor so that both lanes +// are written in terms of r² and r: +// +// h = (m[ 0:16]r² + m[32:48])r² + <- lane 0 +// (m[16:32]r² + m[48:64])r <- lane 1 +// ^ ^ +// | coefficients for second iteration +// coefficients for first iteration +// +// So in this case we would have two iterations. In the first +// both lanes are multiplied by r². In the second only the +// first lane is multiplied by r² and the second lane is +// instead multiplied by r. This gives use the odd and even +// powers of r that we need from the original equation. +// +// Notation: +// +// h - accumulator +// r - key +// m - message +// +// [a, b] - SIMD register holding two 64-bit values +// [a, b, c, d] - SIMD register holding four 32-bit values +// xᵢ[n] - limb n of variable x with bit width i +// +// Limbs are expressed in little endian order, so for 26-bit +// limbs x₂₆[4] will be the most significant limb and x₂₆[0] +// will be the least significant limb. + +// masking constants +#define MOD24 V0 // [0x0000000000ffffff, 0x0000000000ffffff] - mask low 24-bits +#define MOD26 V1 // [0x0000000003ffffff, 0x0000000003ffffff] - mask low 26-bits + +// expansion constants (see EXPAND macro) +#define EX0 V2 +#define EX1 V3 +#define EX2 V4 + +// key (r², r or 1 depending on context) +#define R_0 V5 +#define R_1 V6 +#define R_2 V7 +#define R_3 V8 +#define R_4 V9 + +// precalculated coefficients (5r², 5r or 0 depending on context) +#define R5_1 V10 +#define R5_2 V11 +#define R5_3 V12 +#define R5_4 V13 + +// message block (m) +#define M_0 V14 +#define M_1 V15 +#define M_2 V16 +#define M_3 V17 +#define M_4 V18 + +// accumulator (h) +#define H_0 V19 +#define H_1 V20 +#define H_2 V21 +#define H_3 V22 +#define H_4 V23 + +// temporary registers (for short-lived values) +#define T_0 V24 +#define T_1 V25 +#define T_2 V26 +#define T_3 V27 +#define T_4 V28 + +GLOBL ·constants<>(SB), RODATA, $0x30 +// EX0 +DATA ·constants<>+0x00(SB)/8, $0x0006050403020100 +DATA ·constants<>+0x08(SB)/8, $0x1016151413121110 +// EX1 +DATA ·constants<>+0x10(SB)/8, $0x060c0b0a09080706 +DATA ·constants<>+0x18(SB)/8, $0x161c1b1a19181716 +// EX2 +DATA ·constants<>+0x20(SB)/8, $0x0d0d0d0d0d0f0e0d +DATA ·constants<>+0x28(SB)/8, $0x1d1d1d1d1d1f1e1d + +// MULTIPLY multiplies each lane of f and g, partially reduced +// modulo 2¹³⁰ - 5. The result, h, consists of partial products +// in each lane that need to be reduced further to produce the +// final result. +// +// h₁₃₀ = (f₁₃₀g₁₃₀) % 2¹³⁰ + (5f₁₃₀g₁₃₀) / 2¹³⁰ +// +// Note that the multiplication by 5 of the high bits is +// achieved by precalculating the multiplication of four of the +// g coefficients by 5. These are g51-g54. +#define MULTIPLY(f0, f1, f2, f3, f4, g0, g1, g2, g3, g4, g51, g52, g53, g54, h0, h1, h2, h3, h4) \ + VMLOF f0, g0, h0 \ + VMLOF f0, g3, h3 \ + VMLOF f0, g1, h1 \ + VMLOF f0, g4, h4 \ + VMLOF f0, g2, h2 \ + VMLOF f1, g54, T_0 \ + VMLOF f1, g2, T_3 \ + VMLOF f1, g0, T_1 \ + VMLOF f1, g3, T_4 \ + VMLOF f1, g1, T_2 \ + VMALOF f2, g53, h0, h0 \ + VMALOF f2, g1, h3, h3 \ + VMALOF f2, g54, h1, h1 \ + VMALOF f2, g2, h4, h4 \ + VMALOF f2, g0, h2, h2 \ + VMALOF f3, g52, T_0, T_0 \ + VMALOF f3, g0, T_3, T_3 \ + VMALOF f3, g53, T_1, T_1 \ + VMALOF f3, g1, T_4, T_4 \ + VMALOF f3, g54, T_2, T_2 \ + VMALOF f4, g51, h0, h0 \ + VMALOF f4, g54, h3, h3 \ + VMALOF f4, g52, h1, h1 \ + VMALOF f4, g0, h4, h4 \ + VMALOF f4, g53, h2, h2 \ + VAG T_0, h0, h0 \ + VAG T_3, h3, h3 \ + VAG T_1, h1, h1 \ + VAG T_4, h4, h4 \ + VAG T_2, h2, h2 + +// REDUCE performs the following carry operations in four +// stages, as specified in Bernstein & Schwabe: +// +// 1: h₂₆[0]->h₂₆[1] h₂₆[3]->h₂₆[4] +// 2: h₂₆[1]->h₂₆[2] h₂₆[4]->h₂₆[0] +// 3: h₂₆[0]->h₂₆[1] h₂₆[2]->h₂₆[3] +// 4: h₂₆[3]->h₂₆[4] +// +// The result is that all of the limbs are limited to 26-bits +// except for h₂₆[1] and h₂₆[4] which are limited to 27-bits. +// +// Note that although each limb is aligned at 26-bit intervals +// they may contain values that exceed 2²⁶ - 1, hence the need +// to carry the excess bits in each limb. +#define REDUCE(h0, h1, h2, h3, h4) \ + VESRLG $26, h0, T_0 \ + VESRLG $26, h3, T_1 \ + VN MOD26, h0, h0 \ + VN MOD26, h3, h3 \ + VAG T_0, h1, h1 \ + VAG T_1, h4, h4 \ + VESRLG $26, h1, T_2 \ + VESRLG $26, h4, T_3 \ + VN MOD26, h1, h1 \ + VN MOD26, h4, h4 \ + VESLG $2, T_3, T_4 \ + VAG T_3, T_4, T_4 \ + VAG T_2, h2, h2 \ + VAG T_4, h0, h0 \ + VESRLG $26, h2, T_0 \ + VESRLG $26, h0, T_1 \ + VN MOD26, h2, h2 \ + VN MOD26, h0, h0 \ + VAG T_0, h3, h3 \ + VAG T_1, h1, h1 \ + VESRLG $26, h3, T_2 \ + VN MOD26, h3, h3 \ + VAG T_2, h4, h4 + +// EXPAND splits the 128-bit little-endian values in0 and in1 +// into 26-bit big-endian limbs and places the results into +// the first and second lane of d₂₆[0:4] respectively. +// +// The EX0, EX1 and EX2 constants are arrays of byte indices +// for permutation. The permutation both reverses the bytes +// in the input and ensures the bytes are copied into the +// destination limb ready to be shifted into their final +// position. +#define EXPAND(in0, in1, d0, d1, d2, d3, d4) \ + VPERM in0, in1, EX0, d0 \ + VPERM in0, in1, EX1, d2 \ + VPERM in0, in1, EX2, d4 \ + VESRLG $26, d0, d1 \ + VESRLG $30, d2, d3 \ + VESRLG $4, d2, d2 \ + VN MOD26, d0, d0 \ // [in0₂₆[0], in1₂₆[0]] + VN MOD26, d3, d3 \ // [in0₂₆[3], in1₂₆[3]] + VN MOD26, d1, d1 \ // [in0₂₆[1], in1₂₆[1]] + VN MOD24, d4, d4 \ // [in0₂₆[4], in1₂₆[4]] + VN MOD26, d2, d2 // [in0₂₆[2], in1₂₆[2]] + +// func updateVX(state *macState, msg []byte) +TEXT ·updateVX(SB), NOSPLIT, $0 + MOVD state+0(FP), R1 + LMG msg+8(FP), R2, R3 // R2=msg_base, R3=msg_len + + // load EX0, EX1 and EX2 + MOVD $·constants<>(SB), R5 + VLM (R5), EX0, EX2 + + // generate masks + VGMG $(64-24), $63, MOD24 // [0x00ffffff, 0x00ffffff] + VGMG $(64-26), $63, MOD26 // [0x03ffffff, 0x03ffffff] + + // load h (accumulator) and r (key) from state + VZERO T_1 // [0, 0] + VL 0(R1), T_0 // [h₆₄[0], h₆₄[1]] + VLEG $0, 16(R1), T_1 // [h₆₄[2], 0] + VL 24(R1), T_2 // [r₆₄[0], r₆₄[1]] + VPDI $0, T_0, T_2, T_3 // [h₆₄[0], r₆₄[0]] + VPDI $5, T_0, T_2, T_4 // [h₆₄[1], r₆₄[1]] + + // unpack h and r into 26-bit limbs + // note: h₆₄[2] may have the low 3 bits set, so h₂₆[4] is a 27-bit value + VN MOD26, T_3, H_0 // [h₂₆[0], r₂₆[0]] + VZERO H_1 // [0, 0] + VZERO H_3 // [0, 0] + VGMG $(64-12-14), $(63-12), T_0 // [0x03fff000, 0x03fff000] - 26-bit mask with low 12 bits masked out + VESLG $24, T_1, T_1 // [h₆₄[2]<<24, 0] + VERIMG $-26&63, T_3, MOD26, H_1 // [h₂₆[1], r₂₆[1]] + VESRLG $+52&63, T_3, H_2 // [h₂₆[2], r₂₆[2]] - low 12 bits only + VERIMG $-14&63, T_4, MOD26, H_3 // [h₂₆[1], r₂₆[1]] + VESRLG $40, T_4, H_4 // [h₂₆[4], r₂₆[4]] - low 24 bits only + VERIMG $+12&63, T_4, T_0, H_2 // [h₂₆[2], r₂₆[2]] - complete + VO T_1, H_4, H_4 // [h₂₆[4], r₂₆[4]] - complete + + // replicate r across all 4 vector elements + VREPF $3, H_0, R_0 // [r₂₆[0], r₂₆[0], r₂₆[0], r₂₆[0]] + VREPF $3, H_1, R_1 // [r₂₆[1], r₂₆[1], r₂₆[1], r₂₆[1]] + VREPF $3, H_2, R_2 // [r₂₆[2], r₂₆[2], r₂₆[2], r₂₆[2]] + VREPF $3, H_3, R_3 // [r₂₆[3], r₂₆[3], r₂₆[3], r₂₆[3]] + VREPF $3, H_4, R_4 // [r₂₆[4], r₂₆[4], r₂₆[4], r₂₆[4]] + + // zero out lane 1 of h + VLEIG $1, $0, H_0 // [h₂₆[0], 0] + VLEIG $1, $0, H_1 // [h₂₆[1], 0] + VLEIG $1, $0, H_2 // [h₂₆[2], 0] + VLEIG $1, $0, H_3 // [h₂₆[3], 0] + VLEIG $1, $0, H_4 // [h₂₆[4], 0] + + // calculate 5r (ignore least significant limb) + VREPIF $5, T_0 + VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r₂₆[1], 5r₂₆[1], 5r₂₆[1]] + VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r₂₆[2], 5r₂₆[2], 5r₂₆[2]] + VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r₂₆[3], 5r₂₆[3], 5r₂₆[3]] + VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r₂₆[4], 5r₂₆[4], 5r₂₆[4]] + + // skip r² calculation if we are only calculating one block + CMPBLE R3, $16, skip + + // calculate r² + MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, M_0, M_1, M_2, M_3, M_4) + REDUCE(M_0, M_1, M_2, M_3, M_4) + VGBM $0x0f0f, T_0 + VERIMG $0, M_0, T_0, R_0 // [r₂₆[0], r²₂₆[0], r₂₆[0], r²₂₆[0]] + VERIMG $0, M_1, T_0, R_1 // [r₂₆[1], r²₂₆[1], r₂₆[1], r²₂₆[1]] + VERIMG $0, M_2, T_0, R_2 // [r₂₆[2], r²₂₆[2], r₂₆[2], r²₂₆[2]] + VERIMG $0, M_3, T_0, R_3 // [r₂₆[3], r²₂₆[3], r₂₆[3], r²₂₆[3]] + VERIMG $0, M_4, T_0, R_4 // [r₂₆[4], r²₂₆[4], r₂₆[4], r²₂₆[4]] + + // calculate 5r² (ignore least significant limb) + VREPIF $5, T_0 + VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r²₂₆[1], 5r₂₆[1], 5r²₂₆[1]] + VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r²₂₆[2], 5r₂₆[2], 5r²₂₆[2]] + VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r²₂₆[3], 5r₂₆[3], 5r²₂₆[3]] + VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r²₂₆[4], 5r₂₆[4], 5r²₂₆[4]] + +loop: + CMPBLE R3, $32, b2 // 2 or fewer blocks remaining, need to change key coefficients + + // load next 2 blocks from message + VLM (R2), T_0, T_1 + + // update message slice + SUB $32, R3 + MOVD $32(R2), R2 + + // unpack message blocks into 26-bit big-endian limbs + EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) + + // add 2¹²⁸ to each message block value + VLEIB $4, $1, M_4 + VLEIB $12, $1, M_4 + +multiply: + // accumulate the incoming message + VAG H_0, M_0, M_0 + VAG H_3, M_3, M_3 + VAG H_1, M_1, M_1 + VAG H_4, M_4, M_4 + VAG H_2, M_2, M_2 + + // multiply the accumulator by the key coefficient + MULTIPLY(M_0, M_1, M_2, M_3, M_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4) + + // carry and partially reduce the partial products + REDUCE(H_0, H_1, H_2, H_3, H_4) + + CMPBNE R3, $0, loop + +finish: + // sum lane 0 and lane 1 and put the result in lane 1 + VZERO T_0 + VSUMQG H_0, T_0, H_0 + VSUMQG H_3, T_0, H_3 + VSUMQG H_1, T_0, H_1 + VSUMQG H_4, T_0, H_4 + VSUMQG H_2, T_0, H_2 + + // reduce again after summation + // TODO(mundaym): there might be a more efficient way to do this + // now that we only have 1 active lane. For example, we could + // simultaneously pack the values as we reduce them. + REDUCE(H_0, H_1, H_2, H_3, H_4) + + // carry h[1] through to h[4] so that only h[4] can exceed 2²⁶ - 1 + // TODO(mundaym): in testing this final carry was unnecessary. + // Needs a proof before it can be removed though. + VESRLG $26, H_1, T_1 + VN MOD26, H_1, H_1 + VAQ T_1, H_2, H_2 + VESRLG $26, H_2, T_2 + VN MOD26, H_2, H_2 + VAQ T_2, H_3, H_3 + VESRLG $26, H_3, T_3 + VN MOD26, H_3, H_3 + VAQ T_3, H_4, H_4 + + // h is now < 2(2¹³⁰ - 5) + // Pack each lane in h₂₆[0:4] into h₁₂₈[0:1]. + VESLG $26, H_1, H_1 + VESLG $26, H_3, H_3 + VO H_0, H_1, H_0 + VO H_2, H_3, H_2 + VESLG $4, H_2, H_2 + VLEIB $7, $48, H_1 + VSLB H_1, H_2, H_2 + VO H_0, H_2, H_0 + VLEIB $7, $104, H_1 + VSLB H_1, H_4, H_3 + VO H_3, H_0, H_0 + VLEIB $7, $24, H_1 + VSRLB H_1, H_4, H_1 + + // update state + VSTEG $1, H_0, 0(R1) + VSTEG $0, H_0, 8(R1) + VSTEG $1, H_1, 16(R1) + RET + +b2: // 2 or fewer blocks remaining + CMPBLE R3, $16, b1 + + // Load the 2 remaining blocks (17-32 bytes remaining). + MOVD $-17(R3), R0 // index of final byte to load modulo 16 + VL (R2), T_0 // load full 16 byte block + VLL R0, 16(R2), T_1 // load final (possibly partial) block and pad with zeros to 16 bytes + + // The Poly1305 algorithm requires that a 1 bit be appended to + // each message block. If the final block is less than 16 bytes + // long then it is easiest to insert the 1 before the message + // block is split into 26-bit limbs. If, on the other hand, the + // final message block is 16 bytes long then we append the 1 bit + // after expansion as normal. + MOVBZ $1, R0 + MOVD $-16(R3), R3 // index of byte in last block to insert 1 at (could be 16) + CMPBEQ R3, $16, 2(PC) // skip the insertion if the final block is 16 bytes long + VLVGB R3, R0, T_1 // insert 1 into the byte at index R3 + + // Split both blocks into 26-bit limbs in the appropriate lanes. + EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) + + // Append a 1 byte to the end of the second to last block. + VLEIB $4, $1, M_4 + + // Append a 1 byte to the end of the last block only if it is a + // full 16 byte block. + CMPBNE R3, $16, 2(PC) + VLEIB $12, $1, M_4 + + // Finally, set up the coefficients for the final multiplication. + // We have previously saved r and 5r in the 32-bit even indexes + // of the R_[0-4] and R5_[1-4] coefficient registers. + // + // We want lane 0 to be multiplied by r² so that can be kept the + // same. We want lane 1 to be multiplied by r so we need to move + // the saved r value into the 32-bit odd index in lane 1 by + // rotating the 64-bit lane by 32. + VGBM $0x00ff, T_0 // [0, 0xffffffffffffffff] - mask lane 1 only + VERIMG $32, R_0, T_0, R_0 // [_, r²₂₆[0], _, r₂₆[0]] + VERIMG $32, R_1, T_0, R_1 // [_, r²₂₆[1], _, r₂₆[1]] + VERIMG $32, R_2, T_0, R_2 // [_, r²₂₆[2], _, r₂₆[2]] + VERIMG $32, R_3, T_0, R_3 // [_, r²₂₆[3], _, r₂₆[3]] + VERIMG $32, R_4, T_0, R_4 // [_, r²₂₆[4], _, r₂₆[4]] + VERIMG $32, R5_1, T_0, R5_1 // [_, 5r²₂₆[1], _, 5r₂₆[1]] + VERIMG $32, R5_2, T_0, R5_2 // [_, 5r²₂₆[2], _, 5r₂₆[2]] + VERIMG $32, R5_3, T_0, R5_3 // [_, 5r²₂₆[3], _, 5r₂₆[3]] + VERIMG $32, R5_4, T_0, R5_4 // [_, 5r²₂₆[4], _, 5r₂₆[4]] + + MOVD $0, R3 + BR multiply + +skip: + CMPBEQ R3, $0, finish + +b1: // 1 block remaining + + // Load the final block (1-16 bytes). This will be placed into + // lane 0. + MOVD $-1(R3), R0 + VLL R0, (R2), T_0 // pad to 16 bytes with zeros + + // The Poly1305 algorithm requires that a 1 bit be appended to + // each message block. If the final block is less than 16 bytes + // long then it is easiest to insert the 1 before the message + // block is split into 26-bit limbs. If, on the other hand, the + // final message block is 16 bytes long then we append the 1 bit + // after expansion as normal. + MOVBZ $1, R0 + CMPBEQ R3, $16, 2(PC) + VLVGB R3, R0, T_0 + + // Set the message block in lane 1 to the value 0 so that it + // can be accumulated without affecting the final result. + VZERO T_1 + + // Split the final message block into 26-bit limbs in lane 0. + // Lane 1 will be contain 0. + EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) + + // Append a 1 byte to the end of the last block only if it is a + // full 16 byte block. + CMPBNE R3, $16, 2(PC) + VLEIB $4, $1, M_4 + + // We have previously saved r and 5r in the 32-bit even indexes + // of the R_[0-4] and R5_[1-4] coefficient registers. + // + // We want lane 0 to be multiplied by r so we need to move the + // saved r value into the 32-bit odd index in lane 0. We want + // lane 1 to be set to the value 1. This makes multiplication + // a no-op. We do this by setting lane 1 in every register to 0 + // and then just setting the 32-bit index 3 in R_0 to 1. + VZERO T_0 + MOVD $0, R0 + MOVD $0x10111213, R12 + VLVGP R12, R0, T_1 // [_, 0x10111213, _, 0x00000000] + VPERM T_0, R_0, T_1, R_0 // [_, r₂₆[0], _, 0] + VPERM T_0, R_1, T_1, R_1 // [_, r₂₆[1], _, 0] + VPERM T_0, R_2, T_1, R_2 // [_, r₂₆[2], _, 0] + VPERM T_0, R_3, T_1, R_3 // [_, r₂₆[3], _, 0] + VPERM T_0, R_4, T_1, R_4 // [_, r₂₆[4], _, 0] + VPERM T_0, R5_1, T_1, R5_1 // [_, 5r₂₆[1], _, 0] + VPERM T_0, R5_2, T_1, R5_2 // [_, 5r₂₆[2], _, 0] + VPERM T_0, R5_3, T_1, R5_3 // [_, 5r₂₆[3], _, 0] + VPERM T_0, R5_4, T_1, R5_4 // [_, 5r₂₆[4], _, 0] + + // Set the value of lane 1 to be 1. + VLEIF $3, $1, R_0 // [_, r₂₆[0], _, 1] + + MOVD $0, R3 + BR multiply diff --git a/vendor/golang.org/x/crypto/openpgp/errors/errors.go b/vendor/golang.org/x/crypto/openpgp/errors/errors.go deleted file mode 100644 index 1d7a0ea05..000000000 --- a/vendor/golang.org/x/crypto/openpgp/errors/errors.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package errors contains common error types for the OpenPGP packages. -// -// Deprecated: this package is unmaintained except for security fixes. New -// applications should consider a more focused, modern alternative to OpenPGP -// for their specific task. If you are required to interoperate with OpenPGP -// systems and need a maintained package, consider a community fork. -// See https://golang.org/issue/44226. -package errors // import "golang.org/x/crypto/openpgp/errors" - -import ( - "strconv" -) - -// A StructuralError is returned when OpenPGP data is found to be syntactically -// invalid. -type StructuralError string - -func (s StructuralError) Error() string { - return "openpgp: invalid data: " + string(s) -} - -// UnsupportedError indicates that, although the OpenPGP data is valid, it -// makes use of currently unimplemented features. -type UnsupportedError string - -func (s UnsupportedError) Error() string { - return "openpgp: unsupported feature: " + string(s) -} - -// InvalidArgumentError indicates that the caller is in error and passed an -// incorrect value. -type InvalidArgumentError string - -func (i InvalidArgumentError) Error() string { - return "openpgp: invalid argument: " + string(i) -} - -// SignatureError indicates that a syntactically valid signature failed to -// validate. -type SignatureError string - -func (b SignatureError) Error() string { - return "openpgp: invalid signature: " + string(b) -} - -type keyIncorrectError int - -func (ki keyIncorrectError) Error() string { - return "openpgp: incorrect key" -} - -var ErrKeyIncorrect error = keyIncorrectError(0) - -type unknownIssuerError int - -func (unknownIssuerError) Error() string { - return "openpgp: signature made by unknown entity" -} - -var ErrUnknownIssuer error = unknownIssuerError(0) - -type keyRevokedError int - -func (keyRevokedError) Error() string { - return "openpgp: signature made by revoked key" -} - -var ErrKeyRevoked error = keyRevokedError(0) - -type UnknownPacketTypeError uint8 - -func (upte UnknownPacketTypeError) Error() string { - return "openpgp: unknown packet type: " + strconv.Itoa(int(upte)) -} diff --git a/vendor/golang.org/x/crypto/openpgp/keys.go b/vendor/golang.org/x/crypto/openpgp/keys.go deleted file mode 100644 index d62f787e9..000000000 --- a/vendor/golang.org/x/crypto/openpgp/keys.go +++ /dev/null @@ -1,693 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import ( - "crypto/rsa" - "io" - "time" - - "golang.org/x/crypto/openpgp/armor" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/packet" -) - -// PublicKeyType is the armor type for a PGP public key. -var PublicKeyType = "PGP PUBLIC KEY BLOCK" - -// PrivateKeyType is the armor type for a PGP private key. -var PrivateKeyType = "PGP PRIVATE KEY BLOCK" - -// An Entity represents the components of an OpenPGP key: a primary public key -// (which must be a signing key), one or more identities claimed by that key, -// and zero or more subkeys, which may be encryption keys. -type Entity struct { - PrimaryKey *packet.PublicKey - PrivateKey *packet.PrivateKey - Identities map[string]*Identity // indexed by Identity.Name - Revocations []*packet.Signature - Subkeys []Subkey -} - -// An Identity represents an identity claimed by an Entity and zero or more -// assertions by other entities about that claim. -type Identity struct { - Name string // by convention, has the form "Full Name (comment) " - UserId *packet.UserId - SelfSignature *packet.Signature - Signatures []*packet.Signature -} - -// A Subkey is an additional public key in an Entity. Subkeys can be used for -// encryption. -type Subkey struct { - PublicKey *packet.PublicKey - PrivateKey *packet.PrivateKey - Sig *packet.Signature -} - -// A Key identifies a specific public key in an Entity. This is either the -// Entity's primary key or a subkey. -type Key struct { - Entity *Entity - PublicKey *packet.PublicKey - PrivateKey *packet.PrivateKey - SelfSignature *packet.Signature -} - -// A KeyRing provides access to public and private keys. -type KeyRing interface { - // KeysById returns the set of keys that have the given key id. - KeysById(id uint64) []Key - // KeysByIdUsage returns the set of keys with the given id - // that also meet the key usage given by requiredUsage. - // The requiredUsage is expressed as the bitwise-OR of - // packet.KeyFlag* values. - KeysByIdUsage(id uint64, requiredUsage byte) []Key - // DecryptionKeys returns all private keys that are valid for - // decryption. - DecryptionKeys() []Key -} - -// primaryIdentity returns the Identity marked as primary or the first identity -// if none are so marked. -func (e *Entity) primaryIdentity() *Identity { - var firstIdentity *Identity - for _, ident := range e.Identities { - if firstIdentity == nil { - firstIdentity = ident - } - if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { - return ident - } - } - return firstIdentity -} - -// encryptionKey returns the best candidate Key for encrypting a message to the -// given Entity. -func (e *Entity) encryptionKey(now time.Time) (Key, bool) { - candidateSubkey := -1 - - // Iterate the keys to find the newest key - var maxTime time.Time - for i, subkey := range e.Subkeys { - if subkey.Sig.FlagsValid && - subkey.Sig.FlagEncryptCommunications && - subkey.PublicKey.PubKeyAlgo.CanEncrypt() && - !subkey.Sig.KeyExpired(now) && - (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { - candidateSubkey = i - maxTime = subkey.Sig.CreationTime - } - } - - if candidateSubkey != -1 { - subkey := e.Subkeys[candidateSubkey] - return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true - } - - // If we don't have any candidate subkeys for encryption and - // the primary key doesn't have any usage metadata then we - // assume that the primary key is ok. Or, if the primary key is - // marked as ok to encrypt to, then we can obviously use it. - i := e.primaryIdentity() - if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications && - e.PrimaryKey.PubKeyAlgo.CanEncrypt() && - !i.SelfSignature.KeyExpired(now) { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true - } - - // This Entity appears to be signing only. - return Key{}, false -} - -// signingKey return the best candidate Key for signing a message with this -// Entity. -func (e *Entity) signingKey(now time.Time) (Key, bool) { - candidateSubkey := -1 - - for i, subkey := range e.Subkeys { - if subkey.Sig.FlagsValid && - subkey.Sig.FlagSign && - subkey.PublicKey.PubKeyAlgo.CanSign() && - !subkey.Sig.KeyExpired(now) { - candidateSubkey = i - break - } - } - - if candidateSubkey != -1 { - subkey := e.Subkeys[candidateSubkey] - return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true - } - - // If we have no candidate subkey then we assume that it's ok to sign - // with the primary key. - i := e.primaryIdentity() - if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign && - !i.SelfSignature.KeyExpired(now) { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true - } - - return Key{}, false -} - -// An EntityList contains one or more Entities. -type EntityList []*Entity - -// KeysById returns the set of keys that have the given key id. -func (el EntityList) KeysById(id uint64) (keys []Key) { - for _, e := range el { - if e.PrimaryKey.KeyId == id { - var selfSig *packet.Signature - for _, ident := range e.Identities { - if selfSig == nil { - selfSig = ident.SelfSignature - } else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { - selfSig = ident.SelfSignature - break - } - } - keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig}) - } - - for _, subKey := range e.Subkeys { - if subKey.PublicKey.KeyId == id { - keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) - } - } - } - return -} - -// KeysByIdUsage returns the set of keys with the given id that also meet -// the key usage given by requiredUsage. The requiredUsage is expressed as -// the bitwise-OR of packet.KeyFlag* values. -func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { - for _, key := range el.KeysById(id) { - if len(key.Entity.Revocations) > 0 { - continue - } - - if key.SelfSignature.RevocationReason != nil { - continue - } - - if key.SelfSignature.FlagsValid && requiredUsage != 0 { - var usage byte - if key.SelfSignature.FlagCertify { - usage |= packet.KeyFlagCertify - } - if key.SelfSignature.FlagSign { - usage |= packet.KeyFlagSign - } - if key.SelfSignature.FlagEncryptCommunications { - usage |= packet.KeyFlagEncryptCommunications - } - if key.SelfSignature.FlagEncryptStorage { - usage |= packet.KeyFlagEncryptStorage - } - if usage&requiredUsage != requiredUsage { - continue - } - } - - keys = append(keys, key) - } - return -} - -// DecryptionKeys returns all private keys that are valid for decryption. -func (el EntityList) DecryptionKeys() (keys []Key) { - for _, e := range el { - for _, subKey := range e.Subkeys { - if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) { - keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) - } - } - } - return -} - -// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file. -func ReadArmoredKeyRing(r io.Reader) (EntityList, error) { - block, err := armor.Decode(r) - if err == io.EOF { - return nil, errors.InvalidArgumentError("no armored data found") - } - if err != nil { - return nil, err - } - if block.Type != PublicKeyType && block.Type != PrivateKeyType { - return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type) - } - - return ReadKeyRing(block.Body) -} - -// ReadKeyRing reads one or more public/private keys. Unsupported keys are -// ignored as long as at least a single valid key is found. -func ReadKeyRing(r io.Reader) (el EntityList, err error) { - packets := packet.NewReader(r) - var lastUnsupportedError error - - for { - var e *Entity - e, err = ReadEntity(packets) - if err != nil { - // TODO: warn about skipped unsupported/unreadable keys - if _, ok := err.(errors.UnsupportedError); ok { - lastUnsupportedError = err - err = readToNextPublicKey(packets) - } else if _, ok := err.(errors.StructuralError); ok { - // Skip unreadable, badly-formatted keys - lastUnsupportedError = err - err = readToNextPublicKey(packets) - } - if err == io.EOF { - err = nil - break - } - if err != nil { - el = nil - break - } - } else { - el = append(el, e) - } - } - - if len(el) == 0 && err == nil { - err = lastUnsupportedError - } - return -} - -// readToNextPublicKey reads packets until the start of the entity and leaves -// the first packet of the new entity in the Reader. -func readToNextPublicKey(packets *packet.Reader) (err error) { - var p packet.Packet - for { - p, err = packets.Next() - if err == io.EOF { - return - } else if err != nil { - if _, ok := err.(errors.UnsupportedError); ok { - err = nil - continue - } - return - } - - if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey { - packets.Unread(p) - return - } - } -} - -// ReadEntity reads an entity (public key, identities, subkeys etc) from the -// given Reader. -func ReadEntity(packets *packet.Reader) (*Entity, error) { - e := new(Entity) - e.Identities = make(map[string]*Identity) - - p, err := packets.Next() - if err != nil { - return nil, err - } - - var ok bool - if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok { - if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok { - packets.Unread(p) - return nil, errors.StructuralError("first packet was not a public/private key") - } - e.PrimaryKey = &e.PrivateKey.PublicKey - } - - if !e.PrimaryKey.PubKeyAlgo.CanSign() { - return nil, errors.StructuralError("primary key cannot be used for signatures") - } - - var revocations []*packet.Signature -EachPacket: - for { - p, err := packets.Next() - if err == io.EOF { - break - } else if err != nil { - return nil, err - } - - switch pkt := p.(type) { - case *packet.UserId: - if err := addUserID(e, packets, pkt); err != nil { - return nil, err - } - case *packet.Signature: - if pkt.SigType == packet.SigTypeKeyRevocation { - revocations = append(revocations, pkt) - } else if pkt.SigType == packet.SigTypeDirectSignature { - // TODO: RFC4880 5.2.1 permits signatures - // directly on keys (eg. to bind additional - // revocation keys). - } - // Else, ignoring the signature as it does not follow anything - // we would know to attach it to. - case *packet.PrivateKey: - if pkt.IsSubkey == false { - packets.Unread(p) - break EachPacket - } - err = addSubkey(e, packets, &pkt.PublicKey, pkt) - if err != nil { - return nil, err - } - case *packet.PublicKey: - if pkt.IsSubkey == false { - packets.Unread(p) - break EachPacket - } - err = addSubkey(e, packets, pkt, nil) - if err != nil { - return nil, err - } - default: - // we ignore unknown packets - } - } - - if len(e.Identities) == 0 { - return nil, errors.StructuralError("entity without any identities") - } - - for _, revocation := range revocations { - err = e.PrimaryKey.VerifyRevocationSignature(revocation) - if err == nil { - e.Revocations = append(e.Revocations, revocation) - } else { - // TODO: RFC 4880 5.2.3.15 defines revocation keys. - return nil, errors.StructuralError("revocation signature signed by alternate key") - } - } - - return e, nil -} - -func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error { - // Make a new Identity object, that we might wind up throwing away. - // We'll only add it if we get a valid self-signature over this - // userID. - identity := new(Identity) - identity.Name = pkt.Id - identity.UserId = pkt - - for { - p, err := packets.Next() - if err == io.EOF { - break - } else if err != nil { - return err - } - - sig, ok := p.(*packet.Signature) - if !ok { - packets.Unread(p) - break - } - - if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId { - if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil { - return errors.StructuralError("user ID self-signature invalid: " + err.Error()) - } - identity.SelfSignature = sig - e.Identities[pkt.Id] = identity - } else { - identity.Signatures = append(identity.Signatures, sig) - } - } - - return nil -} - -func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error { - var subKey Subkey - subKey.PublicKey = pub - subKey.PrivateKey = priv - - for { - p, err := packets.Next() - if err == io.EOF { - break - } else if err != nil { - return errors.StructuralError("subkey signature invalid: " + err.Error()) - } - - sig, ok := p.(*packet.Signature) - if !ok { - packets.Unread(p) - break - } - - if sig.SigType != packet.SigTypeSubkeyBinding && sig.SigType != packet.SigTypeSubkeyRevocation { - return errors.StructuralError("subkey signature with wrong type") - } - - if err := e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, sig); err != nil { - return errors.StructuralError("subkey signature invalid: " + err.Error()) - } - - switch sig.SigType { - case packet.SigTypeSubkeyRevocation: - subKey.Sig = sig - case packet.SigTypeSubkeyBinding: - - if shouldReplaceSubkeySig(subKey.Sig, sig) { - subKey.Sig = sig - } - } - } - - if subKey.Sig == nil { - return errors.StructuralError("subkey packet not followed by signature") - } - - e.Subkeys = append(e.Subkeys, subKey) - - return nil -} - -func shouldReplaceSubkeySig(existingSig, potentialNewSig *packet.Signature) bool { - if potentialNewSig == nil { - return false - } - - if existingSig == nil { - return true - } - - if existingSig.SigType == packet.SigTypeSubkeyRevocation { - return false // never override a revocation signature - } - - return potentialNewSig.CreationTime.After(existingSig.CreationTime) -} - -const defaultRSAKeyBits = 2048 - -// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a -// single identity composed of the given full name, comment and email, any of -// which may be empty but must not contain any of "()<>\x00". -// If config is nil, sensible defaults will be used. -func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) { - creationTime := config.Now() - - bits := defaultRSAKeyBits - if config != nil && config.RSABits != 0 { - bits = config.RSABits - } - - uid := packet.NewUserId(name, comment, email) - if uid == nil { - return nil, errors.InvalidArgumentError("user id field contained invalid characters") - } - signingPriv, err := rsa.GenerateKey(config.Random(), bits) - if err != nil { - return nil, err - } - encryptingPriv, err := rsa.GenerateKey(config.Random(), bits) - if err != nil { - return nil, err - } - - e := &Entity{ - PrimaryKey: packet.NewRSAPublicKey(creationTime, &signingPriv.PublicKey), - PrivateKey: packet.NewRSAPrivateKey(creationTime, signingPriv), - Identities: make(map[string]*Identity), - } - isPrimaryId := true - e.Identities[uid.Id] = &Identity{ - Name: uid.Id, - UserId: uid, - SelfSignature: &packet.Signature{ - CreationTime: creationTime, - SigType: packet.SigTypePositiveCert, - PubKeyAlgo: packet.PubKeyAlgoRSA, - Hash: config.Hash(), - IsPrimaryId: &isPrimaryId, - FlagsValid: true, - FlagSign: true, - FlagCertify: true, - IssuerKeyId: &e.PrimaryKey.KeyId, - }, - } - err = e.Identities[uid.Id].SelfSignature.SignUserId(uid.Id, e.PrimaryKey, e.PrivateKey, config) - if err != nil { - return nil, err - } - - // If the user passes in a DefaultHash via packet.Config, - // set the PreferredHash for the SelfSignature. - if config != nil && config.DefaultHash != 0 { - e.Identities[uid.Id].SelfSignature.PreferredHash = []uint8{hashToHashId(config.DefaultHash)} - } - - // Likewise for DefaultCipher. - if config != nil && config.DefaultCipher != 0 { - e.Identities[uid.Id].SelfSignature.PreferredSymmetric = []uint8{uint8(config.DefaultCipher)} - } - - e.Subkeys = make([]Subkey, 1) - e.Subkeys[0] = Subkey{ - PublicKey: packet.NewRSAPublicKey(creationTime, &encryptingPriv.PublicKey), - PrivateKey: packet.NewRSAPrivateKey(creationTime, encryptingPriv), - Sig: &packet.Signature{ - CreationTime: creationTime, - SigType: packet.SigTypeSubkeyBinding, - PubKeyAlgo: packet.PubKeyAlgoRSA, - Hash: config.Hash(), - FlagsValid: true, - FlagEncryptStorage: true, - FlagEncryptCommunications: true, - IssuerKeyId: &e.PrimaryKey.KeyId, - }, - } - e.Subkeys[0].PublicKey.IsSubkey = true - e.Subkeys[0].PrivateKey.IsSubkey = true - err = e.Subkeys[0].Sig.SignKey(e.Subkeys[0].PublicKey, e.PrivateKey, config) - if err != nil { - return nil, err - } - return e, nil -} - -// SerializePrivate serializes an Entity, including private key material, but -// excluding signatures from other entities, to the given Writer. -// Identities and subkeys are re-signed in case they changed since NewEntry. -// If config is nil, sensible defaults will be used. -func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) { - err = e.PrivateKey.Serialize(w) - if err != nil { - return - } - for _, ident := range e.Identities { - err = ident.UserId.Serialize(w) - if err != nil { - return - } - err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) - if err != nil { - return - } - err = ident.SelfSignature.Serialize(w) - if err != nil { - return - } - } - for _, subkey := range e.Subkeys { - err = subkey.PrivateKey.Serialize(w) - if err != nil { - return - } - err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) - if err != nil { - return - } - err = subkey.Sig.Serialize(w) - if err != nil { - return - } - } - return nil -} - -// Serialize writes the public part of the given Entity to w, including -// signatures from other entities. No private key material will be output. -func (e *Entity) Serialize(w io.Writer) error { - err := e.PrimaryKey.Serialize(w) - if err != nil { - return err - } - for _, ident := range e.Identities { - err = ident.UserId.Serialize(w) - if err != nil { - return err - } - err = ident.SelfSignature.Serialize(w) - if err != nil { - return err - } - for _, sig := range ident.Signatures { - err = sig.Serialize(w) - if err != nil { - return err - } - } - } - for _, subkey := range e.Subkeys { - err = subkey.PublicKey.Serialize(w) - if err != nil { - return err - } - err = subkey.Sig.Serialize(w) - if err != nil { - return err - } - } - return nil -} - -// SignIdentity adds a signature to e, from signer, attesting that identity is -// associated with e. The provided identity must already be an element of -// e.Identities and the private key of signer must have been decrypted if -// necessary. -// If config is nil, sensible defaults will be used. -func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error { - if signer.PrivateKey == nil { - return errors.InvalidArgumentError("signing Entity must have a private key") - } - if signer.PrivateKey.Encrypted { - return errors.InvalidArgumentError("signing Entity's private key must be decrypted") - } - ident, ok := e.Identities[identity] - if !ok { - return errors.InvalidArgumentError("given identity string not found in Entity") - } - - sig := &packet.Signature{ - SigType: packet.SigTypeGenericCert, - PubKeyAlgo: signer.PrivateKey.PubKeyAlgo, - Hash: config.Hash(), - CreationTime: config.Now(), - IssuerKeyId: &signer.PrivateKey.KeyId, - } - if err := sig.SignUserId(identity, e.PrimaryKey, signer.PrivateKey, config); err != nil { - return err - } - ident.Signatures = append(ident.Signatures, sig) - return nil -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/config.go b/vendor/golang.org/x/crypto/openpgp/packet/config.go deleted file mode 100644 index c76eecc96..000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/config.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "crypto/rand" - "io" - "time" -) - -// Config collects a number of parameters along with sensible defaults. -// A nil *Config is valid and results in all default values. -type Config struct { - // Rand provides the source of entropy. - // If nil, the crypto/rand Reader is used. - Rand io.Reader - // DefaultHash is the default hash function to be used. - // If zero, SHA-256 is used. - DefaultHash crypto.Hash - // DefaultCipher is the cipher to be used. - // If zero, AES-128 is used. - DefaultCipher CipherFunction - // Time returns the current time as the number of seconds since the - // epoch. If Time is nil, time.Now is used. - Time func() time.Time - // DefaultCompressionAlgo is the compression algorithm to be - // applied to the plaintext before encryption. If zero, no - // compression is done. - DefaultCompressionAlgo CompressionAlgo - // CompressionConfig configures the compression settings. - CompressionConfig *CompressionConfig - // S2KCount is only used for symmetric encryption. It - // determines the strength of the passphrase stretching when - // the said passphrase is hashed to produce a key. S2KCount - // should be between 1024 and 65011712, inclusive. If Config - // is nil or S2KCount is 0, the value 65536 used. Not all - // values in the above range can be represented. S2KCount will - // be rounded up to the next representable value if it cannot - // be encoded exactly. When set, it is strongly encrouraged to - // use a value that is at least 65536. See RFC 4880 Section - // 3.7.1.3. - S2KCount int - // RSABits is the number of bits in new RSA keys made with NewEntity. - // If zero, then 2048 bit keys are created. - RSABits int -} - -func (c *Config) Random() io.Reader { - if c == nil || c.Rand == nil { - return rand.Reader - } - return c.Rand -} - -func (c *Config) Hash() crypto.Hash { - if c == nil || uint(c.DefaultHash) == 0 { - return crypto.SHA256 - } - return c.DefaultHash -} - -func (c *Config) Cipher() CipherFunction { - if c == nil || uint8(c.DefaultCipher) == 0 { - return CipherAES128 - } - return c.DefaultCipher -} - -func (c *Config) Now() time.Time { - if c == nil || c.Time == nil { - return time.Now() - } - return c.Time() -} - -func (c *Config) Compression() CompressionAlgo { - if c == nil { - return CompressionNone - } - return c.DefaultCompressionAlgo -} - -func (c *Config) PasswordHashIterations() int { - if c == nil || c.S2KCount == 0 { - return 0 - } - return c.S2KCount -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go b/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go deleted file mode 100644 index 6d7639722..000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "crypto/rsa" - "encoding/binary" - "io" - "math/big" - "strconv" - - "golang.org/x/crypto/openpgp/elgamal" - "golang.org/x/crypto/openpgp/errors" -) - -const encryptedKeyVersion = 3 - -// EncryptedKey represents a public-key encrypted session key. See RFC 4880, -// section 5.1. -type EncryptedKey struct { - KeyId uint64 - Algo PublicKeyAlgorithm - CipherFunc CipherFunction // only valid after a successful Decrypt - Key []byte // only valid after a successful Decrypt - - encryptedMPI1, encryptedMPI2 parsedMPI -} - -func (e *EncryptedKey) parse(r io.Reader) (err error) { - var buf [10]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != encryptedKeyVersion { - return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0]))) - } - e.KeyId = binary.BigEndian.Uint64(buf[1:9]) - e.Algo = PublicKeyAlgorithm(buf[9]) - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) - if err != nil { - return - } - case PubKeyAlgoElGamal: - e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) - if err != nil { - return - } - e.encryptedMPI2.bytes, e.encryptedMPI2.bitLength, err = readMPI(r) - if err != nil { - return - } - } - _, err = consumeAll(r) - return -} - -func checksumKeyMaterial(key []byte) uint16 { - var checksum uint16 - for _, v := range key { - checksum += uint16(v) - } - return checksum -} - -// Decrypt decrypts an encrypted session key with the given private key. The -// private key must have been decrypted first. -// If config is nil, sensible defaults will be used. -func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { - var err error - var b []byte - - // TODO(agl): use session key decryption routines here to avoid - // padding oracle attacks. - switch priv.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - // Supports both *rsa.PrivateKey and crypto.Decrypter - k := priv.PrivateKey.(crypto.Decrypter) - b, err = k.Decrypt(config.Random(), padToKeySize(k.Public().(*rsa.PublicKey), e.encryptedMPI1.bytes), nil) - case PubKeyAlgoElGamal: - c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes) - c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes) - b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2) - default: - err = errors.InvalidArgumentError("cannot decrypted encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) - } - - if err != nil { - return err - } - - e.CipherFunc = CipherFunction(b[0]) - e.Key = b[1 : len(b)-2] - expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1]) - checksum := checksumKeyMaterial(e.Key) - if checksum != expectedChecksum { - return errors.StructuralError("EncryptedKey checksum incorrect") - } - - return nil -} - -// Serialize writes the encrypted key packet, e, to w. -func (e *EncryptedKey) Serialize(w io.Writer) error { - var mpiLen int - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - mpiLen = 2 + len(e.encryptedMPI1.bytes) - case PubKeyAlgoElGamal: - mpiLen = 2 + len(e.encryptedMPI1.bytes) + 2 + len(e.encryptedMPI2.bytes) - default: - return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo))) - } - - serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen) - - w.Write([]byte{encryptedKeyVersion}) - binary.Write(w, binary.BigEndian, e.KeyId) - w.Write([]byte{byte(e.Algo)}) - - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - writeMPIs(w, e.encryptedMPI1) - case PubKeyAlgoElGamal: - writeMPIs(w, e.encryptedMPI1, e.encryptedMPI2) - default: - panic("internal error") - } - - return nil -} - -// SerializeEncryptedKey serializes an encrypted key packet to w that contains -// key, encrypted to pub. -// If config is nil, sensible defaults will be used. -func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error { - var buf [10]byte - buf[0] = encryptedKeyVersion - binary.BigEndian.PutUint64(buf[1:9], pub.KeyId) - buf[9] = byte(pub.PubKeyAlgo) - - keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */) - keyBlock[0] = byte(cipherFunc) - copy(keyBlock[1:], key) - checksum := checksumKeyMaterial(key) - keyBlock[1+len(key)] = byte(checksum >> 8) - keyBlock[1+len(key)+1] = byte(checksum) - - switch pub.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock) - case PubKeyAlgoElGamal: - return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock) - case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly: - return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) - } - - return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) -} - -func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error { - cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock) - if err != nil { - return errors.InvalidArgumentError("RSA encryption failed: " + err.Error()) - } - - packetLen := 10 /* header length */ + 2 /* mpi size */ + len(cipherText) - - err = serializeHeader(w, packetTypeEncryptedKey, packetLen) - if err != nil { - return err - } - _, err = w.Write(header[:]) - if err != nil { - return err - } - return writeMPI(w, 8*uint16(len(cipherText)), cipherText) -} - -func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error { - c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock) - if err != nil { - return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error()) - } - - packetLen := 10 /* header length */ - packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8 - packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8 - - err = serializeHeader(w, packetTypeEncryptedKey, packetLen) - if err != nil { - return err - } - _, err = w.Write(header[:]) - if err != nil { - return err - } - err = writeBig(w, c1) - if err != nil { - return err - } - return writeBig(w, c2) -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go b/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go deleted file mode 100644 index 171350339..000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "encoding/binary" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" - "io" - "strconv" -) - -// OnePassSignature represents a one-pass signature packet. See RFC 4880, -// section 5.4. -type OnePassSignature struct { - SigType SignatureType - Hash crypto.Hash - PubKeyAlgo PublicKeyAlgorithm - KeyId uint64 - IsLast bool -} - -const onePassSignatureVersion = 3 - -func (ops *OnePassSignature) parse(r io.Reader) (err error) { - var buf [13]byte - - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != onePassSignatureVersion { - err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) - } - - var ok bool - ops.Hash, ok = s2k.HashIdToHash(buf[2]) - if !ok { - return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2]))) - } - - ops.SigType = SignatureType(buf[1]) - ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3]) - ops.KeyId = binary.BigEndian.Uint64(buf[4:12]) - ops.IsLast = buf[12] != 0 - return -} - -// Serialize marshals the given OnePassSignature to w. -func (ops *OnePassSignature) Serialize(w io.Writer) error { - var buf [13]byte - buf[0] = onePassSignatureVersion - buf[1] = uint8(ops.SigType) - var ok bool - buf[2], ok = s2k.HashToHashId(ops.Hash) - if !ok { - return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash))) - } - buf[3] = uint8(ops.PubKeyAlgo) - binary.BigEndian.PutUint64(buf[4:12], ops.KeyId) - if ops.IsLast { - buf[12] = 1 - } - - if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil { - return err - } - _, err := w.Write(buf[:]) - return err -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/packet.go b/vendor/golang.org/x/crypto/openpgp/packet/packet.go deleted file mode 100644 index 0a19794a8..000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/packet.go +++ /dev/null @@ -1,590 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package packet implements parsing and serialization of OpenPGP packets, as -// specified in RFC 4880. -// -// Deprecated: this package is unmaintained except for security fixes. New -// applications should consider a more focused, modern alternative to OpenPGP -// for their specific task. If you are required to interoperate with OpenPGP -// systems and need a maintained package, consider a community fork. -// See https://golang.org/issue/44226. -package packet // import "golang.org/x/crypto/openpgp/packet" - -import ( - "bufio" - "crypto/aes" - "crypto/cipher" - "crypto/des" - "crypto/rsa" - "io" - "math/big" - "math/bits" - - "golang.org/x/crypto/cast5" - "golang.org/x/crypto/openpgp/errors" -) - -// readFull is the same as io.ReadFull except that reading zero bytes returns -// ErrUnexpectedEOF rather than EOF. -func readFull(r io.Reader, buf []byte) (n int, err error) { - n, err = io.ReadFull(r, buf) - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2. -func readLength(r io.Reader) (length int64, isPartial bool, err error) { - var buf [4]byte - _, err = readFull(r, buf[:1]) - if err != nil { - return - } - switch { - case buf[0] < 192: - length = int64(buf[0]) - case buf[0] < 224: - length = int64(buf[0]-192) << 8 - _, err = readFull(r, buf[0:1]) - if err != nil { - return - } - length += int64(buf[0]) + 192 - case buf[0] < 255: - length = int64(1) << (buf[0] & 0x1f) - isPartial = true - default: - _, err = readFull(r, buf[0:4]) - if err != nil { - return - } - length = int64(buf[0])<<24 | - int64(buf[1])<<16 | - int64(buf[2])<<8 | - int64(buf[3]) - } - return -} - -// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths. -// The continuation lengths are parsed and removed from the stream and EOF is -// returned at the end of the packet. See RFC 4880, section 4.2.2.4. -type partialLengthReader struct { - r io.Reader - remaining int64 - isPartial bool -} - -func (r *partialLengthReader) Read(p []byte) (n int, err error) { - for r.remaining == 0 { - if !r.isPartial { - return 0, io.EOF - } - r.remaining, r.isPartial, err = readLength(r.r) - if err != nil { - return 0, err - } - } - - toRead := int64(len(p)) - if toRead > r.remaining { - toRead = r.remaining - } - - n, err = r.r.Read(p[:int(toRead)]) - r.remaining -= int64(n) - if n < int(toRead) && err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// partialLengthWriter writes a stream of data using OpenPGP partial lengths. -// See RFC 4880, section 4.2.2.4. -type partialLengthWriter struct { - w io.WriteCloser - lengthByte [1]byte - sentFirst bool - buf []byte -} - -// RFC 4880 4.2.2.4: the first partial length MUST be at least 512 octets long. -const minFirstPartialWrite = 512 - -func (w *partialLengthWriter) Write(p []byte) (n int, err error) { - off := 0 - if !w.sentFirst { - if len(w.buf) > 0 || len(p) < minFirstPartialWrite { - off = len(w.buf) - w.buf = append(w.buf, p...) - if len(w.buf) < minFirstPartialWrite { - return len(p), nil - } - p = w.buf - w.buf = nil - } - w.sentFirst = true - } - - power := uint8(30) - for len(p) > 0 { - l := 1 << power - if len(p) < l { - power = uint8(bits.Len32(uint32(len(p)))) - 1 - l = 1 << power - } - w.lengthByte[0] = 224 + power - _, err = w.w.Write(w.lengthByte[:]) - if err == nil { - var m int - m, err = w.w.Write(p[:l]) - n += m - } - if err != nil { - if n < off { - return 0, err - } - return n - off, err - } - p = p[l:] - } - return n - off, nil -} - -func (w *partialLengthWriter) Close() error { - if len(w.buf) > 0 { - // In this case we can't send a 512 byte packet. - // Just send what we have. - p := w.buf - w.sentFirst = true - w.buf = nil - if _, err := w.Write(p); err != nil { - return err - } - } - - w.lengthByte[0] = 0 - _, err := w.w.Write(w.lengthByte[:]) - if err != nil { - return err - } - return w.w.Close() -} - -// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the -// underlying Reader returns EOF before the limit has been reached. -type spanReader struct { - r io.Reader - n int64 -} - -func (l *spanReader) Read(p []byte) (n int, err error) { - if l.n <= 0 { - return 0, io.EOF - } - if int64(len(p)) > l.n { - p = p[0:l.n] - } - n, err = l.r.Read(p) - l.n -= int64(n) - if l.n > 0 && err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// readHeader parses a packet header and returns an io.Reader which will return -// the contents of the packet. See RFC 4880, section 4.2. -func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) { - var buf [4]byte - _, err = io.ReadFull(r, buf[:1]) - if err != nil { - return - } - if buf[0]&0x80 == 0 { - err = errors.StructuralError("tag byte does not have MSB set") - return - } - if buf[0]&0x40 == 0 { - // Old format packet - tag = packetType((buf[0] & 0x3f) >> 2) - lengthType := buf[0] & 3 - if lengthType == 3 { - length = -1 - contents = r - return - } - lengthBytes := 1 << lengthType - _, err = readFull(r, buf[0:lengthBytes]) - if err != nil { - return - } - for i := 0; i < lengthBytes; i++ { - length <<= 8 - length |= int64(buf[i]) - } - contents = &spanReader{r, length} - return - } - - // New format packet - tag = packetType(buf[0] & 0x3f) - length, isPartial, err := readLength(r) - if err != nil { - return - } - if isPartial { - contents = &partialLengthReader{ - remaining: length, - isPartial: true, - r: r, - } - length = -1 - } else { - contents = &spanReader{r, length} - } - return -} - -// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section -// 4.2. -func serializeHeader(w io.Writer, ptype packetType, length int) (err error) { - var buf [6]byte - var n int - - buf[0] = 0x80 | 0x40 | byte(ptype) - if length < 192 { - buf[1] = byte(length) - n = 2 - } else if length < 8384 { - length -= 192 - buf[1] = 192 + byte(length>>8) - buf[2] = byte(length) - n = 3 - } else { - buf[1] = 255 - buf[2] = byte(length >> 24) - buf[3] = byte(length >> 16) - buf[4] = byte(length >> 8) - buf[5] = byte(length) - n = 6 - } - - _, err = w.Write(buf[:n]) - return -} - -// serializeStreamHeader writes an OpenPGP packet header to w where the -// length of the packet is unknown. It returns a io.WriteCloser which can be -// used to write the contents of the packet. See RFC 4880, section 4.2. -func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) { - var buf [1]byte - buf[0] = 0x80 | 0x40 | byte(ptype) - _, err = w.Write(buf[:]) - if err != nil { - return - } - out = &partialLengthWriter{w: w} - return -} - -// Packet represents an OpenPGP packet. Users are expected to try casting -// instances of this interface to specific packet types. -type Packet interface { - parse(io.Reader) error -} - -// consumeAll reads from the given Reader until error, returning the number of -// bytes read. -func consumeAll(r io.Reader) (n int64, err error) { - var m int - var buf [1024]byte - - for { - m, err = r.Read(buf[:]) - n += int64(m) - if err == io.EOF { - err = nil - return - } - if err != nil { - return - } - } -} - -// packetType represents the numeric ids of the different OpenPGP packet types. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2 -type packetType uint8 - -const ( - packetTypeEncryptedKey packetType = 1 - packetTypeSignature packetType = 2 - packetTypeSymmetricKeyEncrypted packetType = 3 - packetTypeOnePassSignature packetType = 4 - packetTypePrivateKey packetType = 5 - packetTypePublicKey packetType = 6 - packetTypePrivateSubkey packetType = 7 - packetTypeCompressed packetType = 8 - packetTypeSymmetricallyEncrypted packetType = 9 - packetTypeLiteralData packetType = 11 - packetTypeUserId packetType = 13 - packetTypePublicSubkey packetType = 14 - packetTypeUserAttribute packetType = 17 - packetTypeSymmetricallyEncryptedMDC packetType = 18 -) - -// peekVersion detects the version of a public key packet about to -// be read. A bufio.Reader at the original position of the io.Reader -// is returned. -func peekVersion(r io.Reader) (bufr *bufio.Reader, ver byte, err error) { - bufr = bufio.NewReader(r) - var verBuf []byte - if verBuf, err = bufr.Peek(1); err != nil { - return - } - ver = verBuf[0] - return -} - -// Read reads a single OpenPGP packet from the given io.Reader. If there is an -// error parsing a packet, the whole packet is consumed from the input. -func Read(r io.Reader) (p Packet, err error) { - tag, _, contents, err := readHeader(r) - if err != nil { - return - } - - switch tag { - case packetTypeEncryptedKey: - p = new(EncryptedKey) - case packetTypeSignature: - var version byte - // Detect signature version - if contents, version, err = peekVersion(contents); err != nil { - return - } - if version < 4 { - p = new(SignatureV3) - } else { - p = new(Signature) - } - case packetTypeSymmetricKeyEncrypted: - p = new(SymmetricKeyEncrypted) - case packetTypeOnePassSignature: - p = new(OnePassSignature) - case packetTypePrivateKey, packetTypePrivateSubkey: - pk := new(PrivateKey) - if tag == packetTypePrivateSubkey { - pk.IsSubkey = true - } - p = pk - case packetTypePublicKey, packetTypePublicSubkey: - var version byte - if contents, version, err = peekVersion(contents); err != nil { - return - } - isSubkey := tag == packetTypePublicSubkey - if version < 4 { - p = &PublicKeyV3{IsSubkey: isSubkey} - } else { - p = &PublicKey{IsSubkey: isSubkey} - } - case packetTypeCompressed: - p = new(Compressed) - case packetTypeSymmetricallyEncrypted: - p = new(SymmetricallyEncrypted) - case packetTypeLiteralData: - p = new(LiteralData) - case packetTypeUserId: - p = new(UserId) - case packetTypeUserAttribute: - p = new(UserAttribute) - case packetTypeSymmetricallyEncryptedMDC: - se := new(SymmetricallyEncrypted) - se.MDC = true - p = se - default: - err = errors.UnknownPacketTypeError(tag) - } - if p != nil { - err = p.parse(contents) - } - if err != nil { - consumeAll(contents) - } - return -} - -// SignatureType represents the different semantic meanings of an OpenPGP -// signature. See RFC 4880, section 5.2.1. -type SignatureType uint8 - -const ( - SigTypeBinary SignatureType = 0 - SigTypeText = 1 - SigTypeGenericCert = 0x10 - SigTypePersonaCert = 0x11 - SigTypeCasualCert = 0x12 - SigTypePositiveCert = 0x13 - SigTypeSubkeyBinding = 0x18 - SigTypePrimaryKeyBinding = 0x19 - SigTypeDirectSignature = 0x1F - SigTypeKeyRevocation = 0x20 - SigTypeSubkeyRevocation = 0x28 -) - -// PublicKeyAlgorithm represents the different public key system specified for -// OpenPGP. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12 -type PublicKeyAlgorithm uint8 - -const ( - PubKeyAlgoRSA PublicKeyAlgorithm = 1 - PubKeyAlgoElGamal PublicKeyAlgorithm = 16 - PubKeyAlgoDSA PublicKeyAlgorithm = 17 - // RFC 6637, Section 5. - PubKeyAlgoECDH PublicKeyAlgorithm = 18 - PubKeyAlgoECDSA PublicKeyAlgorithm = 19 - - // Deprecated in RFC 4880, Section 13.5. Use key flags instead. - PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2 - PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3 -) - -// CanEncrypt returns true if it's possible to encrypt a message to a public -// key of the given type. -func (pka PublicKeyAlgorithm) CanEncrypt() bool { - switch pka { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal: - return true - } - return false -} - -// CanSign returns true if it's possible for a public key of the given type to -// sign a message. -func (pka PublicKeyAlgorithm) CanSign() bool { - switch pka { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA: - return true - } - return false -} - -// CipherFunction represents the different block ciphers specified for OpenPGP. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 -type CipherFunction uint8 - -const ( - Cipher3DES CipherFunction = 2 - CipherCAST5 CipherFunction = 3 - CipherAES128 CipherFunction = 7 - CipherAES192 CipherFunction = 8 - CipherAES256 CipherFunction = 9 -) - -// KeySize returns the key size, in bytes, of cipher. -func (cipher CipherFunction) KeySize() int { - switch cipher { - case Cipher3DES: - return 24 - case CipherCAST5: - return cast5.KeySize - case CipherAES128: - return 16 - case CipherAES192: - return 24 - case CipherAES256: - return 32 - } - return 0 -} - -// blockSize returns the block size, in bytes, of cipher. -func (cipher CipherFunction) blockSize() int { - switch cipher { - case Cipher3DES: - return des.BlockSize - case CipherCAST5: - return 8 - case CipherAES128, CipherAES192, CipherAES256: - return 16 - } - return 0 -} - -// new returns a fresh instance of the given cipher. -func (cipher CipherFunction) new(key []byte) (block cipher.Block) { - switch cipher { - case Cipher3DES: - block, _ = des.NewTripleDESCipher(key) - case CipherCAST5: - block, _ = cast5.NewCipher(key) - case CipherAES128, CipherAES192, CipherAES256: - block, _ = aes.NewCipher(key) - } - return -} - -// readMPI reads a big integer from r. The bit length returned is the bit -// length that was specified in r. This is preserved so that the integer can be -// reserialized exactly. -func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err error) { - var buf [2]byte - _, err = readFull(r, buf[0:]) - if err != nil { - return - } - bitLength = uint16(buf[0])<<8 | uint16(buf[1]) - numBytes := (int(bitLength) + 7) / 8 - mpi = make([]byte, numBytes) - _, err = readFull(r, mpi) - // According to RFC 4880 3.2. we should check that the MPI has no leading - // zeroes (at least when not an encrypted MPI?), but this implementation - // does generate leading zeroes, so we keep accepting them. - return -} - -// writeMPI serializes a big integer to w. -func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err error) { - // Note that we can produce leading zeroes, in violation of RFC 4880 3.2. - // Implementations seem to be tolerant of them, and stripping them would - // make it complex to guarantee matching re-serialization. - _, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)}) - if err == nil { - _, err = w.Write(mpiBytes) - } - return -} - -// writeBig serializes a *big.Int to w. -func writeBig(w io.Writer, i *big.Int) error { - return writeMPI(w, uint16(i.BitLen()), i.Bytes()) -} - -// padToKeySize left-pads a MPI with zeroes to match the length of the -// specified RSA public. -func padToKeySize(pub *rsa.PublicKey, b []byte) []byte { - k := (pub.N.BitLen() + 7) / 8 - if len(b) >= k { - return b - } - bb := make([]byte, k) - copy(bb[len(bb)-len(b):], b) - return bb -} - -// CompressionAlgo Represents the different compression algorithms -// supported by OpenPGP (except for BZIP2, which is not currently -// supported). See Section 9.3 of RFC 4880. -type CompressionAlgo uint8 - -const ( - CompressionNone CompressionAlgo = 0 - CompressionZIP CompressionAlgo = 1 - CompressionZLIB CompressionAlgo = 2 -) diff --git a/vendor/golang.org/x/crypto/openpgp/packet/private_key.go b/vendor/golang.org/x/crypto/openpgp/packet/private_key.go deleted file mode 100644 index 192aac376..000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/private_key.go +++ /dev/null @@ -1,384 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "crypto/cipher" - "crypto/dsa" - "crypto/ecdsa" - "crypto/rsa" - "crypto/sha1" - "io" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/elgamal" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -// PrivateKey represents a possibly encrypted private key. See RFC 4880, -// section 5.5.3. -type PrivateKey struct { - PublicKey - Encrypted bool // if true then the private key is unavailable until Decrypt has been called. - encryptedData []byte - cipher CipherFunction - s2k func(out, in []byte) - PrivateKey interface{} // An *{rsa|dsa|ecdsa}.PrivateKey or crypto.Signer/crypto.Decrypter (Decryptor RSA only). - sha1Checksum bool - iv []byte -} - -func NewRSAPrivateKey(creationTime time.Time, priv *rsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewDSAPrivateKey(creationTime time.Time, priv *dsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewDSAPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewElGamalPrivateKey(creationTime time.Time, priv *elgamal.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewECDSAPrivateKey(creationTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewECDSAPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -// NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that -// implements RSA or ECDSA. -func NewSignerPrivateKey(creationTime time.Time, signer crypto.Signer) *PrivateKey { - pk := new(PrivateKey) - // In general, the public Keys should be used as pointers. We still - // type-switch on the values, for backwards-compatibility. - switch pubkey := signer.Public().(type) { - case *rsa.PublicKey: - pk.PublicKey = *NewRSAPublicKey(creationTime, pubkey) - case rsa.PublicKey: - pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey) - case *ecdsa.PublicKey: - pk.PublicKey = *NewECDSAPublicKey(creationTime, pubkey) - case ecdsa.PublicKey: - pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey) - default: - panic("openpgp: unknown crypto.Signer type in NewSignerPrivateKey") - } - pk.PrivateKey = signer - return pk -} - -func (pk *PrivateKey) parse(r io.Reader) (err error) { - err = (&pk.PublicKey).parse(r) - if err != nil { - return - } - var buf [1]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - - s2kType := buf[0] - - switch s2kType { - case 0: - pk.s2k = nil - pk.Encrypted = false - case 254, 255: - _, err = readFull(r, buf[:]) - if err != nil { - return - } - pk.cipher = CipherFunction(buf[0]) - pk.Encrypted = true - pk.s2k, err = s2k.Parse(r) - if err != nil { - return - } - if s2kType == 254 { - pk.sha1Checksum = true - } - default: - return errors.UnsupportedError("deprecated s2k function in private key") - } - - if pk.Encrypted { - blockSize := pk.cipher.blockSize() - if blockSize == 0 { - return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) - } - pk.iv = make([]byte, blockSize) - _, err = readFull(r, pk.iv) - if err != nil { - return - } - } - - pk.encryptedData, err = io.ReadAll(r) - if err != nil { - return - } - - if !pk.Encrypted { - return pk.parsePrivateKey(pk.encryptedData) - } - - return -} - -func mod64kHash(d []byte) uint16 { - var h uint16 - for _, b := range d { - h += uint16(b) - } - return h -} - -func (pk *PrivateKey) Serialize(w io.Writer) (err error) { - // TODO(agl): support encrypted private keys - buf := bytes.NewBuffer(nil) - err = pk.PublicKey.serializeWithoutHeaders(buf) - if err != nil { - return - } - buf.WriteByte(0 /* no encryption */) - - privateKeyBuf := bytes.NewBuffer(nil) - - switch priv := pk.PrivateKey.(type) { - case *rsa.PrivateKey: - err = serializeRSAPrivateKey(privateKeyBuf, priv) - case *dsa.PrivateKey: - err = serializeDSAPrivateKey(privateKeyBuf, priv) - case *elgamal.PrivateKey: - err = serializeElGamalPrivateKey(privateKeyBuf, priv) - case *ecdsa.PrivateKey: - err = serializeECDSAPrivateKey(privateKeyBuf, priv) - default: - err = errors.InvalidArgumentError("unknown private key type") - } - if err != nil { - return - } - - ptype := packetTypePrivateKey - contents := buf.Bytes() - privateKeyBytes := privateKeyBuf.Bytes() - if pk.IsSubkey { - ptype = packetTypePrivateSubkey - } - err = serializeHeader(w, ptype, len(contents)+len(privateKeyBytes)+2) - if err != nil { - return - } - _, err = w.Write(contents) - if err != nil { - return - } - _, err = w.Write(privateKeyBytes) - if err != nil { - return - } - - checksum := mod64kHash(privateKeyBytes) - var checksumBytes [2]byte - checksumBytes[0] = byte(checksum >> 8) - checksumBytes[1] = byte(checksum) - _, err = w.Write(checksumBytes[:]) - - return -} - -func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error { - err := writeBig(w, priv.D) - if err != nil { - return err - } - err = writeBig(w, priv.Primes[1]) - if err != nil { - return err - } - err = writeBig(w, priv.Primes[0]) - if err != nil { - return err - } - return writeBig(w, priv.Precomputed.Qinv) -} - -func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error { - return writeBig(w, priv.X) -} - -func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error { - return writeBig(w, priv.X) -} - -func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error { - return writeBig(w, priv.D) -} - -// Decrypt decrypts an encrypted private key using a passphrase. -func (pk *PrivateKey) Decrypt(passphrase []byte) error { - if !pk.Encrypted { - return nil - } - - key := make([]byte, pk.cipher.KeySize()) - pk.s2k(key, passphrase) - block := pk.cipher.new(key) - cfb := cipher.NewCFBDecrypter(block, pk.iv) - - data := make([]byte, len(pk.encryptedData)) - cfb.XORKeyStream(data, pk.encryptedData) - - if pk.sha1Checksum { - if len(data) < sha1.Size { - return errors.StructuralError("truncated private key data") - } - h := sha1.New() - h.Write(data[:len(data)-sha1.Size]) - sum := h.Sum(nil) - if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { - return errors.StructuralError("private key checksum failure") - } - data = data[:len(data)-sha1.Size] - } else { - if len(data) < 2 { - return errors.StructuralError("truncated private key data") - } - var sum uint16 - for i := 0; i < len(data)-2; i++ { - sum += uint16(data[i]) - } - if data[len(data)-2] != uint8(sum>>8) || - data[len(data)-1] != uint8(sum) { - return errors.StructuralError("private key checksum failure") - } - data = data[:len(data)-2] - } - - return pk.parsePrivateKey(data) -} - -func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) { - switch pk.PublicKey.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly: - return pk.parseRSAPrivateKey(data) - case PubKeyAlgoDSA: - return pk.parseDSAPrivateKey(data) - case PubKeyAlgoElGamal: - return pk.parseElGamalPrivateKey(data) - case PubKeyAlgoECDSA: - return pk.parseECDSAPrivateKey(data) - } - panic("impossible") -} - -func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) { - rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey) - rsaPriv := new(rsa.PrivateKey) - rsaPriv.PublicKey = *rsaPub - - buf := bytes.NewBuffer(data) - d, _, err := readMPI(buf) - if err != nil { - return - } - p, _, err := readMPI(buf) - if err != nil { - return - } - q, _, err := readMPI(buf) - if err != nil { - return - } - - rsaPriv.D = new(big.Int).SetBytes(d) - rsaPriv.Primes = make([]*big.Int, 2) - rsaPriv.Primes[0] = new(big.Int).SetBytes(p) - rsaPriv.Primes[1] = new(big.Int).SetBytes(q) - if err := rsaPriv.Validate(); err != nil { - return err - } - rsaPriv.Precompute() - pk.PrivateKey = rsaPriv - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} - -func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) { - dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey) - dsaPriv := new(dsa.PrivateKey) - dsaPriv.PublicKey = *dsaPub - - buf := bytes.NewBuffer(data) - x, _, err := readMPI(buf) - if err != nil { - return - } - - dsaPriv.X = new(big.Int).SetBytes(x) - pk.PrivateKey = dsaPriv - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} - -func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) { - pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey) - priv := new(elgamal.PrivateKey) - priv.PublicKey = *pub - - buf := bytes.NewBuffer(data) - x, _, err := readMPI(buf) - if err != nil { - return - } - - priv.X = new(big.Int).SetBytes(x) - pk.PrivateKey = priv - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} - -func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) { - ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey) - - buf := bytes.NewBuffer(data) - d, _, err := readMPI(buf) - if err != nil { - return - } - - pk.PrivateKey = &ecdsa.PrivateKey{ - PublicKey: *ecdsaPub, - D: new(big.Int).SetBytes(d), - } - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/public_key.go b/vendor/golang.org/x/crypto/openpgp/packet/public_key.go deleted file mode 100644 index fcd5f5251..000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/public_key.go +++ /dev/null @@ -1,753 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "crypto/sha1" - _ "crypto/sha256" - _ "crypto/sha512" - "encoding/binary" - "fmt" - "hash" - "io" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/elgamal" - "golang.org/x/crypto/openpgp/errors" -) - -var ( - // NIST curve P-256 - oidCurveP256 []byte = []byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07} - // NIST curve P-384 - oidCurveP384 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x22} - // NIST curve P-521 - oidCurveP521 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x23} -) - -const maxOIDLength = 8 - -// ecdsaKey stores the algorithm-specific fields for ECDSA keys. -// as defined in RFC 6637, Section 9. -type ecdsaKey struct { - // oid contains the OID byte sequence identifying the elliptic curve used - oid []byte - // p contains the elliptic curve point that represents the public key - p parsedMPI -} - -// parseOID reads the OID for the curve as defined in RFC 6637, Section 9. -func parseOID(r io.Reader) (oid []byte, err error) { - buf := make([]byte, maxOIDLength) - if _, err = readFull(r, buf[:1]); err != nil { - return - } - oidLen := buf[0] - if int(oidLen) > len(buf) { - err = errors.UnsupportedError("invalid oid length: " + strconv.Itoa(int(oidLen))) - return - } - oid = buf[:oidLen] - _, err = readFull(r, oid) - return -} - -func (f *ecdsaKey) parse(r io.Reader) (err error) { - if f.oid, err = parseOID(r); err != nil { - return err - } - f.p.bytes, f.p.bitLength, err = readMPI(r) - return -} - -func (f *ecdsaKey) serialize(w io.Writer) (err error) { - buf := make([]byte, maxOIDLength+1) - buf[0] = byte(len(f.oid)) - copy(buf[1:], f.oid) - if _, err = w.Write(buf[:len(f.oid)+1]); err != nil { - return - } - return writeMPIs(w, f.p) -} - -func (f *ecdsaKey) newECDSA() (*ecdsa.PublicKey, error) { - var c elliptic.Curve - if bytes.Equal(f.oid, oidCurveP256) { - c = elliptic.P256() - } else if bytes.Equal(f.oid, oidCurveP384) { - c = elliptic.P384() - } else if bytes.Equal(f.oid, oidCurveP521) { - c = elliptic.P521() - } else { - return nil, errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", f.oid)) - } - x, y := elliptic.Unmarshal(c, f.p.bytes) - if x == nil { - return nil, errors.UnsupportedError("failed to parse EC point") - } - return &ecdsa.PublicKey{Curve: c, X: x, Y: y}, nil -} - -func (f *ecdsaKey) byteLen() int { - return 1 + len(f.oid) + 2 + len(f.p.bytes) -} - -type kdfHashFunction byte -type kdfAlgorithm byte - -// ecdhKdf stores key derivation function parameters -// used for ECDH encryption. See RFC 6637, Section 9. -type ecdhKdf struct { - KdfHash kdfHashFunction - KdfAlgo kdfAlgorithm -} - -func (f *ecdhKdf) parse(r io.Reader) (err error) { - buf := make([]byte, 1) - if _, err = readFull(r, buf); err != nil { - return - } - kdfLen := int(buf[0]) - if kdfLen < 3 { - return errors.UnsupportedError("Unsupported ECDH KDF length: " + strconv.Itoa(kdfLen)) - } - buf = make([]byte, kdfLen) - if _, err = readFull(r, buf); err != nil { - return - } - reserved := int(buf[0]) - f.KdfHash = kdfHashFunction(buf[1]) - f.KdfAlgo = kdfAlgorithm(buf[2]) - if reserved != 0x01 { - return errors.UnsupportedError("Unsupported KDF reserved field: " + strconv.Itoa(reserved)) - } - return -} - -func (f *ecdhKdf) serialize(w io.Writer) (err error) { - buf := make([]byte, 4) - // See RFC 6637, Section 9, Algorithm-Specific Fields for ECDH keys. - buf[0] = byte(0x03) // Length of the following fields - buf[1] = byte(0x01) // Reserved for future extensions, must be 1 for now - buf[2] = byte(f.KdfHash) - buf[3] = byte(f.KdfAlgo) - _, err = w.Write(buf[:]) - return -} - -func (f *ecdhKdf) byteLen() int { - return 4 -} - -// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2. -type PublicKey struct { - CreationTime time.Time - PubKeyAlgo PublicKeyAlgorithm - PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey or *ecdsa.PublicKey - Fingerprint [20]byte - KeyId uint64 - IsSubkey bool - - n, e, p, q, g, y parsedMPI - - // RFC 6637 fields - ec *ecdsaKey - ecdh *ecdhKdf -} - -// signingKey provides a convenient abstraction over signature verification -// for v3 and v4 public keys. -type signingKey interface { - SerializeSignaturePrefix(io.Writer) - serializeWithoutHeaders(io.Writer) error -} - -func fromBig(n *big.Int) parsedMPI { - return parsedMPI{ - bytes: n.Bytes(), - bitLength: uint16(n.BitLen()), - } -} - -// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey. -func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoRSA, - PublicKey: pub, - n: fromBig(pub.N), - e: fromBig(big.NewInt(int64(pub.E))), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey. -func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoDSA, - PublicKey: pub, - p: fromBig(pub.P), - q: fromBig(pub.Q), - g: fromBig(pub.G), - y: fromBig(pub.Y), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey. -func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoElGamal, - PublicKey: pub, - p: fromBig(pub.P), - g: fromBig(pub.G), - y: fromBig(pub.Y), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoECDSA, - PublicKey: pub, - ec: new(ecdsaKey), - } - - switch pub.Curve { - case elliptic.P256(): - pk.ec.oid = oidCurveP256 - case elliptic.P384(): - pk.ec.oid = oidCurveP384 - case elliptic.P521(): - pk.ec.oid = oidCurveP521 - default: - panic("unknown elliptic curve") - } - - pk.ec.p.bytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y) - - // The bit length is 3 (for the 0x04 specifying an uncompressed key) - // plus two field elements (for x and y), which are rounded up to the - // nearest byte. See https://tools.ietf.org/html/rfc6637#section-6 - fieldBytes := (pub.Curve.Params().BitSize + 7) & ^7 - pk.ec.p.bitLength = uint16(3 + fieldBytes + fieldBytes) - - pk.setFingerPrintAndKeyId() - return pk -} - -func (pk *PublicKey) parse(r io.Reader) (err error) { - // RFC 4880, section 5.5.2 - var buf [6]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != 4 { - return errors.UnsupportedError("public key version") - } - pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) - pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5]) - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - err = pk.parseRSA(r) - case PubKeyAlgoDSA: - err = pk.parseDSA(r) - case PubKeyAlgoElGamal: - err = pk.parseElGamal(r) - case PubKeyAlgoECDSA: - pk.ec = new(ecdsaKey) - if err = pk.ec.parse(r); err != nil { - return err - } - pk.PublicKey, err = pk.ec.newECDSA() - case PubKeyAlgoECDH: - pk.ec = new(ecdsaKey) - if err = pk.ec.parse(r); err != nil { - return - } - pk.ecdh = new(ecdhKdf) - if err = pk.ecdh.parse(r); err != nil { - return - } - // The ECDH key is stored in an ecdsa.PublicKey for convenience. - pk.PublicKey, err = pk.ec.newECDSA() - default: - err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) - } - if err != nil { - return - } - - pk.setFingerPrintAndKeyId() - return -} - -func (pk *PublicKey) setFingerPrintAndKeyId() { - // RFC 4880, section 12.2 - fingerPrint := sha1.New() - pk.SerializeSignaturePrefix(fingerPrint) - pk.serializeWithoutHeaders(fingerPrint) - copy(pk.Fingerprint[:], fingerPrint.Sum(nil)) - pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20]) -} - -// parseRSA parses RSA public key material from the given Reader. See RFC 4880, -// section 5.5.2. -func (pk *PublicKey) parseRSA(r io.Reader) (err error) { - pk.n.bytes, pk.n.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.e.bytes, pk.e.bitLength, err = readMPI(r) - if err != nil { - return - } - - if len(pk.e.bytes) > 3 { - err = errors.UnsupportedError("large public exponent") - return - } - rsa := &rsa.PublicKey{ - N: new(big.Int).SetBytes(pk.n.bytes), - E: 0, - } - for i := 0; i < len(pk.e.bytes); i++ { - rsa.E <<= 8 - rsa.E |= int(pk.e.bytes[i]) - } - pk.PublicKey = rsa - return -} - -// parseDSA parses DSA public key material from the given Reader. See RFC 4880, -// section 5.5.2. -func (pk *PublicKey) parseDSA(r io.Reader) (err error) { - pk.p.bytes, pk.p.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.q.bytes, pk.q.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.g.bytes, pk.g.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.y.bytes, pk.y.bitLength, err = readMPI(r) - if err != nil { - return - } - - dsa := new(dsa.PublicKey) - dsa.P = new(big.Int).SetBytes(pk.p.bytes) - dsa.Q = new(big.Int).SetBytes(pk.q.bytes) - dsa.G = new(big.Int).SetBytes(pk.g.bytes) - dsa.Y = new(big.Int).SetBytes(pk.y.bytes) - pk.PublicKey = dsa - return -} - -// parseElGamal parses ElGamal public key material from the given Reader. See -// RFC 4880, section 5.5.2. -func (pk *PublicKey) parseElGamal(r io.Reader) (err error) { - pk.p.bytes, pk.p.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.g.bytes, pk.g.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.y.bytes, pk.y.bitLength, err = readMPI(r) - if err != nil { - return - } - - elgamal := new(elgamal.PublicKey) - elgamal.P = new(big.Int).SetBytes(pk.p.bytes) - elgamal.G = new(big.Int).SetBytes(pk.g.bytes) - elgamal.Y = new(big.Int).SetBytes(pk.y.bytes) - pk.PublicKey = elgamal - return -} - -// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. -// The prefix is used when calculating a signature over this public key. See -// RFC 4880, section 5.2.4. -func (pk *PublicKey) SerializeSignaturePrefix(h io.Writer) { - var pLength uint16 - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - pLength += 2 + uint16(len(pk.n.bytes)) - pLength += 2 + uint16(len(pk.e.bytes)) - case PubKeyAlgoDSA: - pLength += 2 + uint16(len(pk.p.bytes)) - pLength += 2 + uint16(len(pk.q.bytes)) - pLength += 2 + uint16(len(pk.g.bytes)) - pLength += 2 + uint16(len(pk.y.bytes)) - case PubKeyAlgoElGamal: - pLength += 2 + uint16(len(pk.p.bytes)) - pLength += 2 + uint16(len(pk.g.bytes)) - pLength += 2 + uint16(len(pk.y.bytes)) - case PubKeyAlgoECDSA: - pLength += uint16(pk.ec.byteLen()) - case PubKeyAlgoECDH: - pLength += uint16(pk.ec.byteLen()) - pLength += uint16(pk.ecdh.byteLen()) - default: - panic("unknown public key algorithm") - } - pLength += 6 - h.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) - return -} - -func (pk *PublicKey) Serialize(w io.Writer) (err error) { - length := 6 // 6 byte header - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - length += 2 + len(pk.n.bytes) - length += 2 + len(pk.e.bytes) - case PubKeyAlgoDSA: - length += 2 + len(pk.p.bytes) - length += 2 + len(pk.q.bytes) - length += 2 + len(pk.g.bytes) - length += 2 + len(pk.y.bytes) - case PubKeyAlgoElGamal: - length += 2 + len(pk.p.bytes) - length += 2 + len(pk.g.bytes) - length += 2 + len(pk.y.bytes) - case PubKeyAlgoECDSA: - length += pk.ec.byteLen() - case PubKeyAlgoECDH: - length += pk.ec.byteLen() - length += pk.ecdh.byteLen() - default: - panic("unknown public key algorithm") - } - - packetType := packetTypePublicKey - if pk.IsSubkey { - packetType = packetTypePublicSubkey - } - err = serializeHeader(w, packetType, length) - if err != nil { - return - } - return pk.serializeWithoutHeaders(w) -} - -// serializeWithoutHeaders marshals the PublicKey to w in the form of an -// OpenPGP public key packet, not including the packet header. -func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { - var buf [6]byte - buf[0] = 4 - t := uint32(pk.CreationTime.Unix()) - buf[1] = byte(t >> 24) - buf[2] = byte(t >> 16) - buf[3] = byte(t >> 8) - buf[4] = byte(t) - buf[5] = byte(pk.PubKeyAlgo) - - _, err = w.Write(buf[:]) - if err != nil { - return - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - return writeMPIs(w, pk.n, pk.e) - case PubKeyAlgoDSA: - return writeMPIs(w, pk.p, pk.q, pk.g, pk.y) - case PubKeyAlgoElGamal: - return writeMPIs(w, pk.p, pk.g, pk.y) - case PubKeyAlgoECDSA: - return pk.ec.serialize(w) - case PubKeyAlgoECDH: - if err = pk.ec.serialize(w); err != nil { - return - } - return pk.ecdh.serialize(w) - } - return errors.InvalidArgumentError("bad public-key algorithm") -} - -// CanSign returns true iff this public key can generate signatures -func (pk *PublicKey) CanSign() bool { - return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal -} - -// VerifySignature returns nil iff sig is a valid signature, made by this -// public key, of the data hashed into signed. signed is mutated by this call. -func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) { - if !pk.CanSign() { - return errors.InvalidArgumentError("public key cannot generate signatures") - } - - signed.Write(sig.HashSuffix) - hashBytes := signed.Sum(nil) - - if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { - return errors.SignatureError("hash tag doesn't match") - } - - if pk.PubKeyAlgo != sig.PubKeyAlgo { - return errors.InvalidArgumentError("public key and signature use different algorithms") - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey) - err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)) - if err != nil { - return errors.SignatureError("RSA verification failure") - } - return nil - case PubKeyAlgoDSA: - dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey) - // Need to truncate hashBytes to match FIPS 186-3 section 4.6. - subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 - if len(hashBytes) > subgroupSize { - hashBytes = hashBytes[:subgroupSize] - } - if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { - return errors.SignatureError("DSA verification failure") - } - return nil - case PubKeyAlgoECDSA: - ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey) - if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.bytes), new(big.Int).SetBytes(sig.ECDSASigS.bytes)) { - return errors.SignatureError("ECDSA verification failure") - } - return nil - default: - return errors.SignatureError("Unsupported public key algorithm used in signature") - } -} - -// VerifySignatureV3 returns nil iff sig is a valid signature, made by this -// public key, of the data hashed into signed. signed is mutated by this call. -func (pk *PublicKey) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { - if !pk.CanSign() { - return errors.InvalidArgumentError("public key cannot generate signatures") - } - - suffix := make([]byte, 5) - suffix[0] = byte(sig.SigType) - binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) - signed.Write(suffix) - hashBytes := signed.Sum(nil) - - if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { - return errors.SignatureError("hash tag doesn't match") - } - - if pk.PubKeyAlgo != sig.PubKeyAlgo { - return errors.InvalidArgumentError("public key and signature use different algorithms") - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - rsaPublicKey := pk.PublicKey.(*rsa.PublicKey) - if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)); err != nil { - return errors.SignatureError("RSA verification failure") - } - return - case PubKeyAlgoDSA: - dsaPublicKey := pk.PublicKey.(*dsa.PublicKey) - // Need to truncate hashBytes to match FIPS 186-3 section 4.6. - subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 - if len(hashBytes) > subgroupSize { - hashBytes = hashBytes[:subgroupSize] - } - if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { - return errors.SignatureError("DSA verification failure") - } - return nil - default: - panic("shouldn't happen") - } -} - -// keySignatureHash returns a Hash of the message that needs to be signed for -// pk to assert a subkey relationship to signed. -func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - signed.SerializeSignaturePrefix(h) - signed.serializeWithoutHeaders(h) - return -} - -// VerifyKeySignature returns nil iff sig is a valid signature, made by this -// public key, of signed. -func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error { - h, err := keySignatureHash(pk, signed, sig.Hash) - if err != nil { - return err - } - if err = pk.VerifySignature(h, sig); err != nil { - return err - } - - if sig.FlagSign { - // Signing subkeys must be cross-signed. See - // https://www.gnupg.org/faq/subkey-cross-certify.html. - if sig.EmbeddedSignature == nil { - return errors.StructuralError("signing subkey is missing cross-signature") - } - // Verify the cross-signature. This is calculated over the same - // data as the main signature, so we cannot just recursively - // call signed.VerifyKeySignature(...) - if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil { - return errors.StructuralError("error while hashing for cross-signature: " + err.Error()) - } - if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil { - return errors.StructuralError("error while verifying cross-signature: " + err.Error()) - } - } - - return nil -} - -func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - - return -} - -// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this -// public key. -func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { - h, err := keyRevocationHash(pk, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignature(h, sig) -} - -// userIdSignatureHash returns a Hash of the message that needs to be signed -// to assert that pk is a valid key for id. -func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - - var buf [5]byte - buf[0] = 0xb4 - buf[1] = byte(len(id) >> 24) - buf[2] = byte(len(id) >> 16) - buf[3] = byte(len(id) >> 8) - buf[4] = byte(len(id)) - h.Write(buf[:]) - h.Write([]byte(id)) - - return -} - -// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this -// public key, that id is the identity of pub. -func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) { - h, err := userIdSignatureHash(id, pub, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignature(h, sig) -} - -// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this -// public key, that id is the identity of pub. -func (pk *PublicKey) VerifyUserIdSignatureV3(id string, pub *PublicKey, sig *SignatureV3) (err error) { - h, err := userIdSignatureV3Hash(id, pub, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignatureV3(h, sig) -} - -// KeyIdString returns the public key's fingerprint in capital hex -// (e.g. "6C7EE1B8621CC013"). -func (pk *PublicKey) KeyIdString() string { - return fmt.Sprintf("%X", pk.Fingerprint[12:20]) -} - -// KeyIdShortString returns the short form of public key's fingerprint -// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). -func (pk *PublicKey) KeyIdShortString() string { - return fmt.Sprintf("%X", pk.Fingerprint[16:20]) -} - -// A parsedMPI is used to store the contents of a big integer, along with the -// bit length that was specified in the original input. This allows the MPI to -// be reserialized exactly. -type parsedMPI struct { - bytes []byte - bitLength uint16 -} - -// writeMPIs is a utility function for serializing several big integers to the -// given Writer. -func writeMPIs(w io.Writer, mpis ...parsedMPI) (err error) { - for _, mpi := range mpis { - err = writeMPI(w, mpi.bitLength, mpi.bytes) - if err != nil { - return - } - } - return -} - -// BitLength returns the bit length for the given public key. -func (pk *PublicKey) BitLength() (bitLength uint16, err error) { - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - bitLength = pk.n.bitLength - case PubKeyAlgoDSA: - bitLength = pk.p.bitLength - case PubKeyAlgoElGamal: - bitLength = pk.p.bitLength - default: - err = errors.InvalidArgumentError("bad public-key algorithm") - } - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go b/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go deleted file mode 100644 index 5daf7b6cf..000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "crypto/md5" - "crypto/rsa" - "encoding/binary" - "fmt" - "hash" - "io" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/errors" -) - -// PublicKeyV3 represents older, version 3 public keys. These keys are less secure and -// should not be used for signing or encrypting. They are supported here only for -// parsing version 3 key material and validating signatures. -// See RFC 4880, section 5.5.2. -type PublicKeyV3 struct { - CreationTime time.Time - DaysToExpire uint16 - PubKeyAlgo PublicKeyAlgorithm - PublicKey *rsa.PublicKey - Fingerprint [16]byte - KeyId uint64 - IsSubkey bool - - n, e parsedMPI -} - -// newRSAPublicKeyV3 returns a PublicKey that wraps the given rsa.PublicKey. -// Included here for testing purposes only. RFC 4880, section 5.5.2: -// "an implementation MUST NOT generate a V3 key, but MAY accept it." -func newRSAPublicKeyV3(creationTime time.Time, pub *rsa.PublicKey) *PublicKeyV3 { - pk := &PublicKeyV3{ - CreationTime: creationTime, - PublicKey: pub, - n: fromBig(pub.N), - e: fromBig(big.NewInt(int64(pub.E))), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -func (pk *PublicKeyV3) parse(r io.Reader) (err error) { - // RFC 4880, section 5.5.2 - var buf [8]byte - if _, err = readFull(r, buf[:]); err != nil { - return - } - if buf[0] < 2 || buf[0] > 3 { - return errors.UnsupportedError("public key version") - } - pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) - pk.DaysToExpire = binary.BigEndian.Uint16(buf[5:7]) - pk.PubKeyAlgo = PublicKeyAlgorithm(buf[7]) - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - err = pk.parseRSA(r) - default: - err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) - } - if err != nil { - return - } - - pk.setFingerPrintAndKeyId() - return -} - -func (pk *PublicKeyV3) setFingerPrintAndKeyId() { - // RFC 4880, section 12.2 - fingerPrint := md5.New() - fingerPrint.Write(pk.n.bytes) - fingerPrint.Write(pk.e.bytes) - fingerPrint.Sum(pk.Fingerprint[:0]) - pk.KeyId = binary.BigEndian.Uint64(pk.n.bytes[len(pk.n.bytes)-8:]) -} - -// parseRSA parses RSA public key material from the given Reader. See RFC 4880, -// section 5.5.2. -func (pk *PublicKeyV3) parseRSA(r io.Reader) (err error) { - if pk.n.bytes, pk.n.bitLength, err = readMPI(r); err != nil { - return - } - if pk.e.bytes, pk.e.bitLength, err = readMPI(r); err != nil { - return - } - - // RFC 4880 Section 12.2 requires the low 8 bytes of the - // modulus to form the key id. - if len(pk.n.bytes) < 8 { - return errors.StructuralError("v3 public key modulus is too short") - } - if len(pk.e.bytes) > 3 { - err = errors.UnsupportedError("large public exponent") - return - } - rsa := &rsa.PublicKey{N: new(big.Int).SetBytes(pk.n.bytes)} - for i := 0; i < len(pk.e.bytes); i++ { - rsa.E <<= 8 - rsa.E |= int(pk.e.bytes[i]) - } - pk.PublicKey = rsa - return -} - -// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. -// The prefix is used when calculating a signature over this public key. See -// RFC 4880, section 5.2.4. -func (pk *PublicKeyV3) SerializeSignaturePrefix(w io.Writer) { - var pLength uint16 - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - pLength += 2 + uint16(len(pk.n.bytes)) - pLength += 2 + uint16(len(pk.e.bytes)) - default: - panic("unknown public key algorithm") - } - pLength += 6 - w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) - return -} - -func (pk *PublicKeyV3) Serialize(w io.Writer) (err error) { - length := 8 // 8 byte header - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - length += 2 + len(pk.n.bytes) - length += 2 + len(pk.e.bytes) - default: - panic("unknown public key algorithm") - } - - packetType := packetTypePublicKey - if pk.IsSubkey { - packetType = packetTypePublicSubkey - } - if err = serializeHeader(w, packetType, length); err != nil { - return - } - return pk.serializeWithoutHeaders(w) -} - -// serializeWithoutHeaders marshals the PublicKey to w in the form of an -// OpenPGP public key packet, not including the packet header. -func (pk *PublicKeyV3) serializeWithoutHeaders(w io.Writer) (err error) { - var buf [8]byte - // Version 3 - buf[0] = 3 - // Creation time - t := uint32(pk.CreationTime.Unix()) - buf[1] = byte(t >> 24) - buf[2] = byte(t >> 16) - buf[3] = byte(t >> 8) - buf[4] = byte(t) - // Days to expire - buf[5] = byte(pk.DaysToExpire >> 8) - buf[6] = byte(pk.DaysToExpire) - // Public key algorithm - buf[7] = byte(pk.PubKeyAlgo) - - if _, err = w.Write(buf[:]); err != nil { - return - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - return writeMPIs(w, pk.n, pk.e) - } - return errors.InvalidArgumentError("bad public-key algorithm") -} - -// CanSign returns true iff this public key can generate signatures -func (pk *PublicKeyV3) CanSign() bool { - return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly -} - -// VerifySignatureV3 returns nil iff sig is a valid signature, made by this -// public key, of the data hashed into signed. signed is mutated by this call. -func (pk *PublicKeyV3) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { - if !pk.CanSign() { - return errors.InvalidArgumentError("public key cannot generate signatures") - } - - suffix := make([]byte, 5) - suffix[0] = byte(sig.SigType) - binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) - signed.Write(suffix) - hashBytes := signed.Sum(nil) - - if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { - return errors.SignatureError("hash tag doesn't match") - } - - if pk.PubKeyAlgo != sig.PubKeyAlgo { - return errors.InvalidArgumentError("public key and signature use different algorithms") - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - if err = rsa.VerifyPKCS1v15(pk.PublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil { - return errors.SignatureError("RSA verification failure") - } - return - default: - // V3 public keys only support RSA. - panic("shouldn't happen") - } -} - -// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this -// public key, that id is the identity of pub. -func (pk *PublicKeyV3) VerifyUserIdSignatureV3(id string, pub *PublicKeyV3, sig *SignatureV3) (err error) { - h, err := userIdSignatureV3Hash(id, pk, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignatureV3(h, sig) -} - -// VerifyKeySignatureV3 returns nil iff sig is a valid signature, made by this -// public key, of signed. -func (pk *PublicKeyV3) VerifyKeySignatureV3(signed *PublicKeyV3, sig *SignatureV3) (err error) { - h, err := keySignatureHash(pk, signed, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignatureV3(h, sig) -} - -// userIdSignatureV3Hash returns a Hash of the message that needs to be signed -// to assert that pk is a valid key for id. -func userIdSignatureV3Hash(id string, pk signingKey, hfn crypto.Hash) (h hash.Hash, err error) { - if !hfn.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hfn.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - - h.Write([]byte(id)) - - return -} - -// KeyIdString returns the public key's fingerprint in capital hex -// (e.g. "6C7EE1B8621CC013"). -func (pk *PublicKeyV3) KeyIdString() string { - return fmt.Sprintf("%X", pk.KeyId) -} - -// KeyIdShortString returns the short form of public key's fingerprint -// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). -func (pk *PublicKeyV3) KeyIdShortString() string { - return fmt.Sprintf("%X", pk.KeyId&0xFFFFFFFF) -} - -// BitLength returns the bit length for the given public key. -func (pk *PublicKeyV3) BitLength() (bitLength uint16, err error) { - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - bitLength = pk.n.bitLength - default: - err = errors.InvalidArgumentError("bad public-key algorithm") - } - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/reader.go b/vendor/golang.org/x/crypto/openpgp/packet/reader.go deleted file mode 100644 index 34bc7c613..000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/reader.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "golang.org/x/crypto/openpgp/errors" - "io" -) - -// Reader reads packets from an io.Reader and allows packets to be 'unread' so -// that they result from the next call to Next. -type Reader struct { - q []Packet - readers []io.Reader -} - -// New io.Readers are pushed when a compressed or encrypted packet is processed -// and recursively treated as a new source of packets. However, a carefully -// crafted packet can trigger an infinite recursive sequence of packets. See -// http://mumble.net/~campbell/misc/pgp-quine -// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402 -// This constant limits the number of recursive packets that may be pushed. -const maxReaders = 32 - -// Next returns the most recently unread Packet, or reads another packet from -// the top-most io.Reader. Unknown packet types are skipped. -func (r *Reader) Next() (p Packet, err error) { - if len(r.q) > 0 { - p = r.q[len(r.q)-1] - r.q = r.q[:len(r.q)-1] - return - } - - for len(r.readers) > 0 { - p, err = Read(r.readers[len(r.readers)-1]) - if err == nil { - return - } - if err == io.EOF { - r.readers = r.readers[:len(r.readers)-1] - continue - } - if _, ok := err.(errors.UnknownPacketTypeError); !ok { - return nil, err - } - } - - return nil, io.EOF -} - -// Push causes the Reader to start reading from a new io.Reader. When an EOF -// error is seen from the new io.Reader, it is popped and the Reader continues -// to read from the next most recent io.Reader. Push returns a StructuralError -// if pushing the reader would exceed the maximum recursion level, otherwise it -// returns nil. -func (r *Reader) Push(reader io.Reader) (err error) { - if len(r.readers) >= maxReaders { - return errors.StructuralError("too many layers of packets") - } - r.readers = append(r.readers, reader) - return nil -} - -// Unread causes the given Packet to be returned from the next call to Next. -func (r *Reader) Unread(p Packet) { - r.q = append(r.q, p) -} - -func NewReader(r io.Reader) *Reader { - return &Reader{ - q: nil, - readers: []io.Reader{r}, - } -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/signature.go b/vendor/golang.org/x/crypto/openpgp/packet/signature.go deleted file mode 100644 index b2a24a532..000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/signature.go +++ /dev/null @@ -1,731 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "encoding/asn1" - "encoding/binary" - "hash" - "io" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -const ( - // See RFC 4880, section 5.2.3.21 for details. - KeyFlagCertify = 1 << iota - KeyFlagSign - KeyFlagEncryptCommunications - KeyFlagEncryptStorage -) - -// Signature represents a signature. See RFC 4880, section 5.2. -type Signature struct { - SigType SignatureType - PubKeyAlgo PublicKeyAlgorithm - Hash crypto.Hash - - // HashSuffix is extra data that is hashed in after the signed data. - HashSuffix []byte - // HashTag contains the first two bytes of the hash for fast rejection - // of bad signed data. - HashTag [2]byte - CreationTime time.Time - - RSASignature parsedMPI - DSASigR, DSASigS parsedMPI - ECDSASigR, ECDSASigS parsedMPI - - // rawSubpackets contains the unparsed subpackets, in order. - rawSubpackets []outputSubpacket - - // The following are optional so are nil when not included in the - // signature. - - SigLifetimeSecs, KeyLifetimeSecs *uint32 - PreferredSymmetric, PreferredHash, PreferredCompression []uint8 - IssuerKeyId *uint64 - IsPrimaryId *bool - - // FlagsValid is set if any flags were given. See RFC 4880, section - // 5.2.3.21 for details. - FlagsValid bool - FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool - - // RevocationReason is set if this signature has been revoked. - // See RFC 4880, section 5.2.3.23 for details. - RevocationReason *uint8 - RevocationReasonText string - - // MDC is set if this signature has a feature packet that indicates - // support for MDC subpackets. - MDC bool - - // EmbeddedSignature, if non-nil, is a signature of the parent key, by - // this key. This prevents an attacker from claiming another's signing - // subkey as their own. - EmbeddedSignature *Signature - - outSubpackets []outputSubpacket -} - -func (sig *Signature) parse(r io.Reader) (err error) { - // RFC 4880, section 5.2.3 - var buf [5]byte - _, err = readFull(r, buf[:1]) - if err != nil { - return - } - if buf[0] != 4 { - err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) - return - } - - _, err = readFull(r, buf[:5]) - if err != nil { - return - } - sig.SigType = SignatureType(buf[0]) - sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA: - default: - err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) - return - } - - var ok bool - sig.Hash, ok = s2k.HashIdToHash(buf[2]) - if !ok { - return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) - } - - hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4]) - l := 6 + hashedSubpacketsLength - sig.HashSuffix = make([]byte, l+6) - sig.HashSuffix[0] = 4 - copy(sig.HashSuffix[1:], buf[:5]) - hashedSubpackets := sig.HashSuffix[6:l] - _, err = readFull(r, hashedSubpackets) - if err != nil { - return - } - // See RFC 4880, section 5.2.4 - trailer := sig.HashSuffix[l:] - trailer[0] = 4 - trailer[1] = 0xff - trailer[2] = uint8(l >> 24) - trailer[3] = uint8(l >> 16) - trailer[4] = uint8(l >> 8) - trailer[5] = uint8(l) - - err = parseSignatureSubpackets(sig, hashedSubpackets, true) - if err != nil { - return - } - - _, err = readFull(r, buf[:2]) - if err != nil { - return - } - unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1]) - unhashedSubpackets := make([]byte, unhashedSubpacketsLength) - _, err = readFull(r, unhashedSubpackets) - if err != nil { - return - } - err = parseSignatureSubpackets(sig, unhashedSubpackets, false) - if err != nil { - return - } - - _, err = readFull(r, sig.HashTag[:2]) - if err != nil { - return - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) - case PubKeyAlgoDSA: - sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r) - if err == nil { - sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) - } - case PubKeyAlgoECDSA: - sig.ECDSASigR.bytes, sig.ECDSASigR.bitLength, err = readMPI(r) - if err == nil { - sig.ECDSASigS.bytes, sig.ECDSASigS.bitLength, err = readMPI(r) - } - default: - panic("unreachable") - } - return -} - -// parseSignatureSubpackets parses subpackets of the main signature packet. See -// RFC 4880, section 5.2.3.1. -func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) { - for len(subpackets) > 0 { - subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed) - if err != nil { - return - } - } - - if sig.CreationTime.IsZero() { - err = errors.StructuralError("no creation time in signature") - } - - return -} - -type signatureSubpacketType uint8 - -const ( - creationTimeSubpacket signatureSubpacketType = 2 - signatureExpirationSubpacket signatureSubpacketType = 3 - keyExpirationSubpacket signatureSubpacketType = 9 - prefSymmetricAlgosSubpacket signatureSubpacketType = 11 - issuerSubpacket signatureSubpacketType = 16 - prefHashAlgosSubpacket signatureSubpacketType = 21 - prefCompressionSubpacket signatureSubpacketType = 22 - primaryUserIdSubpacket signatureSubpacketType = 25 - keyFlagsSubpacket signatureSubpacketType = 27 - reasonForRevocationSubpacket signatureSubpacketType = 29 - featuresSubpacket signatureSubpacketType = 30 - embeddedSignatureSubpacket signatureSubpacketType = 32 -) - -// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1. -func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) { - // RFC 4880, section 5.2.3.1 - var ( - length uint32 - packetType signatureSubpacketType - isCritical bool - ) - switch { - case subpacket[0] < 192: - length = uint32(subpacket[0]) - subpacket = subpacket[1:] - case subpacket[0] < 255: - if len(subpacket) < 2 { - goto Truncated - } - length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192 - subpacket = subpacket[2:] - default: - if len(subpacket) < 5 { - goto Truncated - } - length = uint32(subpacket[1])<<24 | - uint32(subpacket[2])<<16 | - uint32(subpacket[3])<<8 | - uint32(subpacket[4]) - subpacket = subpacket[5:] - } - if length > uint32(len(subpacket)) { - goto Truncated - } - rest = subpacket[length:] - subpacket = subpacket[:length] - if len(subpacket) == 0 { - err = errors.StructuralError("zero length signature subpacket") - return - } - packetType = signatureSubpacketType(subpacket[0] & 0x7f) - isCritical = subpacket[0]&0x80 == 0x80 - subpacket = subpacket[1:] - sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket}) - switch packetType { - case creationTimeSubpacket: - if !isHashed { - err = errors.StructuralError("signature creation time in non-hashed area") - return - } - if len(subpacket) != 4 { - err = errors.StructuralError("signature creation time not four bytes") - return - } - t := binary.BigEndian.Uint32(subpacket) - sig.CreationTime = time.Unix(int64(t), 0) - case signatureExpirationSubpacket: - // Signature expiration time, section 5.2.3.10 - if !isHashed { - return - } - if len(subpacket) != 4 { - err = errors.StructuralError("expiration subpacket with bad length") - return - } - sig.SigLifetimeSecs = new(uint32) - *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket) - case keyExpirationSubpacket: - // Key expiration time, section 5.2.3.6 - if !isHashed { - return - } - if len(subpacket) != 4 { - err = errors.StructuralError("key expiration subpacket with bad length") - return - } - sig.KeyLifetimeSecs = new(uint32) - *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket) - case prefSymmetricAlgosSubpacket: - // Preferred symmetric algorithms, section 5.2.3.7 - if !isHashed { - return - } - sig.PreferredSymmetric = make([]byte, len(subpacket)) - copy(sig.PreferredSymmetric, subpacket) - case issuerSubpacket: - // Issuer, section 5.2.3.5 - if len(subpacket) != 8 { - err = errors.StructuralError("issuer subpacket with bad length") - return - } - sig.IssuerKeyId = new(uint64) - *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) - case prefHashAlgosSubpacket: - // Preferred hash algorithms, section 5.2.3.8 - if !isHashed { - return - } - sig.PreferredHash = make([]byte, len(subpacket)) - copy(sig.PreferredHash, subpacket) - case prefCompressionSubpacket: - // Preferred compression algorithms, section 5.2.3.9 - if !isHashed { - return - } - sig.PreferredCompression = make([]byte, len(subpacket)) - copy(sig.PreferredCompression, subpacket) - case primaryUserIdSubpacket: - // Primary User ID, section 5.2.3.19 - if !isHashed { - return - } - if len(subpacket) != 1 { - err = errors.StructuralError("primary user id subpacket with bad length") - return - } - sig.IsPrimaryId = new(bool) - if subpacket[0] > 0 { - *sig.IsPrimaryId = true - } - case keyFlagsSubpacket: - // Key flags, section 5.2.3.21 - if !isHashed { - return - } - if len(subpacket) == 0 { - err = errors.StructuralError("empty key flags subpacket") - return - } - sig.FlagsValid = true - if subpacket[0]&KeyFlagCertify != 0 { - sig.FlagCertify = true - } - if subpacket[0]&KeyFlagSign != 0 { - sig.FlagSign = true - } - if subpacket[0]&KeyFlagEncryptCommunications != 0 { - sig.FlagEncryptCommunications = true - } - if subpacket[0]&KeyFlagEncryptStorage != 0 { - sig.FlagEncryptStorage = true - } - case reasonForRevocationSubpacket: - // Reason For Revocation, section 5.2.3.23 - if !isHashed { - return - } - if len(subpacket) == 0 { - err = errors.StructuralError("empty revocation reason subpacket") - return - } - sig.RevocationReason = new(uint8) - *sig.RevocationReason = subpacket[0] - sig.RevocationReasonText = string(subpacket[1:]) - case featuresSubpacket: - // Features subpacket, section 5.2.3.24 specifies a very general - // mechanism for OpenPGP implementations to signal support for new - // features. In practice, the subpacket is used exclusively to - // indicate support for MDC-protected encryption. - sig.MDC = len(subpacket) >= 1 && subpacket[0]&1 == 1 - case embeddedSignatureSubpacket: - // Only usage is in signatures that cross-certify - // signing subkeys. section 5.2.3.26 describes the - // format, with its usage described in section 11.1 - if sig.EmbeddedSignature != nil { - err = errors.StructuralError("Cannot have multiple embedded signatures") - return - } - sig.EmbeddedSignature = new(Signature) - // Embedded signatures are required to be v4 signatures see - // section 12.1. However, we only parse v4 signatures in this - // file anyway. - if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil { - return nil, err - } - if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding { - return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) - } - default: - if isCritical { - err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType))) - return - } - } - return - -Truncated: - err = errors.StructuralError("signature subpacket truncated") - return -} - -// subpacketLengthLength returns the length, in bytes, of an encoded length value. -func subpacketLengthLength(length int) int { - if length < 192 { - return 1 - } - if length < 16320 { - return 2 - } - return 5 -} - -// serializeSubpacketLength marshals the given length into to. -func serializeSubpacketLength(to []byte, length int) int { - // RFC 4880, Section 4.2.2. - if length < 192 { - to[0] = byte(length) - return 1 - } - if length < 16320 { - length -= 192 - to[0] = byte((length >> 8) + 192) - to[1] = byte(length) - return 2 - } - to[0] = 255 - to[1] = byte(length >> 24) - to[2] = byte(length >> 16) - to[3] = byte(length >> 8) - to[4] = byte(length) - return 5 -} - -// subpacketsLength returns the serialized length, in bytes, of the given -// subpackets. -func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) { - for _, subpacket := range subpackets { - if subpacket.hashed == hashed { - length += subpacketLengthLength(len(subpacket.contents) + 1) - length += 1 // type byte - length += len(subpacket.contents) - } - } - return -} - -// serializeSubpackets marshals the given subpackets into to. -func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { - for _, subpacket := range subpackets { - if subpacket.hashed == hashed { - n := serializeSubpacketLength(to, len(subpacket.contents)+1) - to[n] = byte(subpacket.subpacketType) - to = to[1+n:] - n = copy(to, subpacket.contents) - to = to[n:] - } - } - return -} - -// KeyExpired returns whether sig is a self-signature of a key that has -// expired. -func (sig *Signature) KeyExpired(currentTime time.Time) bool { - if sig.KeyLifetimeSecs == nil { - return false - } - expiry := sig.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second) - return currentTime.After(expiry) -} - -// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing. -func (sig *Signature) buildHashSuffix() (err error) { - hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) - - var ok bool - l := 6 + hashedSubpacketsLen - sig.HashSuffix = make([]byte, l+6) - sig.HashSuffix[0] = 4 - sig.HashSuffix[1] = uint8(sig.SigType) - sig.HashSuffix[2] = uint8(sig.PubKeyAlgo) - sig.HashSuffix[3], ok = s2k.HashToHashId(sig.Hash) - if !ok { - sig.HashSuffix = nil - return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash))) - } - sig.HashSuffix[4] = byte(hashedSubpacketsLen >> 8) - sig.HashSuffix[5] = byte(hashedSubpacketsLen) - serializeSubpackets(sig.HashSuffix[6:l], sig.outSubpackets, true) - trailer := sig.HashSuffix[l:] - trailer[0] = 4 - trailer[1] = 0xff - trailer[2] = byte(l >> 24) - trailer[3] = byte(l >> 16) - trailer[4] = byte(l >> 8) - trailer[5] = byte(l) - return -} - -func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) { - err = sig.buildHashSuffix() - if err != nil { - return - } - - h.Write(sig.HashSuffix) - digest = h.Sum(nil) - copy(sig.HashTag[:], digest) - return -} - -// Sign signs a message with a private key. The hash, h, must contain -// the hash of the message to be signed and will be mutated by this function. -// On success, the signature is stored in sig. Call Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) { - sig.outSubpackets = sig.buildSubpackets() - digest, err := sig.signPrepareHash(h) - if err != nil { - return - } - - switch priv.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - // supports both *rsa.PrivateKey and crypto.Signer - sig.RSASignature.bytes, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) - sig.RSASignature.bitLength = uint16(8 * len(sig.RSASignature.bytes)) - case PubKeyAlgoDSA: - dsaPriv := priv.PrivateKey.(*dsa.PrivateKey) - - // Need to truncate hashBytes to match FIPS 186-3 section 4.6. - subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8 - if len(digest) > subgroupSize { - digest = digest[:subgroupSize] - } - r, s, err := dsa.Sign(config.Random(), dsaPriv, digest) - if err == nil { - sig.DSASigR.bytes = r.Bytes() - sig.DSASigR.bitLength = uint16(8 * len(sig.DSASigR.bytes)) - sig.DSASigS.bytes = s.Bytes() - sig.DSASigS.bitLength = uint16(8 * len(sig.DSASigS.bytes)) - } - case PubKeyAlgoECDSA: - var r, s *big.Int - if pk, ok := priv.PrivateKey.(*ecdsa.PrivateKey); ok { - // direct support, avoid asn1 wrapping/unwrapping - r, s, err = ecdsa.Sign(config.Random(), pk, digest) - } else { - var b []byte - b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) - if err == nil { - r, s, err = unwrapECDSASig(b) - } - } - if err == nil { - sig.ECDSASigR = fromBig(r) - sig.ECDSASigS = fromBig(s) - } - default: - err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo))) - } - - return -} - -// unwrapECDSASig parses the two integer components of an ASN.1-encoded ECDSA -// signature. -func unwrapECDSASig(b []byte) (r, s *big.Int, err error) { - var ecsdaSig struct { - R, S *big.Int - } - _, err = asn1.Unmarshal(b, &ecsdaSig) - if err != nil { - return - } - return ecsdaSig.R, ecsdaSig.S, nil -} - -// SignUserId computes a signature from priv, asserting that pub is a valid -// key for the identity id. On success, the signature is stored in sig. Call -// Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error { - h, err := userIdSignatureHash(id, pub, sig.Hash) - if err != nil { - return err - } - return sig.Sign(h, priv, config) -} - -// SignKey computes a signature from priv, asserting that pub is a subkey. On -// success, the signature is stored in sig. Call Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error { - h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash) - if err != nil { - return err - } - return sig.Sign(h, priv, config) -} - -// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been -// called first. -func (sig *Signature) Serialize(w io.Writer) (err error) { - if len(sig.outSubpackets) == 0 { - sig.outSubpackets = sig.rawSubpackets - } - if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil && sig.ECDSASigR.bytes == nil { - return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") - } - - sigLength := 0 - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sigLength = 2 + len(sig.RSASignature.bytes) - case PubKeyAlgoDSA: - sigLength = 2 + len(sig.DSASigR.bytes) - sigLength += 2 + len(sig.DSASigS.bytes) - case PubKeyAlgoECDSA: - sigLength = 2 + len(sig.ECDSASigR.bytes) - sigLength += 2 + len(sig.ECDSASigS.bytes) - default: - panic("impossible") - } - - unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) - length := len(sig.HashSuffix) - 6 /* trailer not included */ + - 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + - 2 /* hash tag */ + sigLength - err = serializeHeader(w, packetTypeSignature, length) - if err != nil { - return - } - - _, err = w.Write(sig.HashSuffix[:len(sig.HashSuffix)-6]) - if err != nil { - return - } - - unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen) - unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) - unhashedSubpackets[1] = byte(unhashedSubpacketsLen) - serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) - - _, err = w.Write(unhashedSubpackets) - if err != nil { - return - } - _, err = w.Write(sig.HashTag[:]) - if err != nil { - return - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - err = writeMPIs(w, sig.RSASignature) - case PubKeyAlgoDSA: - err = writeMPIs(w, sig.DSASigR, sig.DSASigS) - case PubKeyAlgoECDSA: - err = writeMPIs(w, sig.ECDSASigR, sig.ECDSASigS) - default: - panic("impossible") - } - return -} - -// outputSubpacket represents a subpacket to be marshaled. -type outputSubpacket struct { - hashed bool // true if this subpacket is in the hashed area. - subpacketType signatureSubpacketType - isCritical bool - contents []byte -} - -func (sig *Signature) buildSubpackets() (subpackets []outputSubpacket) { - creationTime := make([]byte, 4) - binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix())) - subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime}) - - if sig.IssuerKeyId != nil { - keyId := make([]byte, 8) - binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) - subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId}) - } - - if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { - sigLifetime := make([]byte, 4) - binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs) - subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime}) - } - - // Key flags may only appear in self-signatures or certification signatures. - - if sig.FlagsValid { - var flags byte - if sig.FlagCertify { - flags |= KeyFlagCertify - } - if sig.FlagSign { - flags |= KeyFlagSign - } - if sig.FlagEncryptCommunications { - flags |= KeyFlagEncryptCommunications - } - if sig.FlagEncryptStorage { - flags |= KeyFlagEncryptStorage - } - subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}}) - } - - // The following subpackets may only appear in self-signatures - - if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 { - keyLifetime := make([]byte, 4) - binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs) - subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime}) - } - - if sig.IsPrimaryId != nil && *sig.IsPrimaryId { - subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}}) - } - - if len(sig.PreferredSymmetric) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric}) - } - - if len(sig.PreferredHash) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash}) - } - - if len(sig.PreferredCompression) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) - } - - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go b/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go deleted file mode 100644 index 6edff8893..000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "encoding/binary" - "fmt" - "io" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -// SignatureV3 represents older version 3 signatures. These signatures are less secure -// than version 4 and should not be used to create new signatures. They are included -// here for backwards compatibility to read and validate with older key material. -// See RFC 4880, section 5.2.2. -type SignatureV3 struct { - SigType SignatureType - CreationTime time.Time - IssuerKeyId uint64 - PubKeyAlgo PublicKeyAlgorithm - Hash crypto.Hash - HashTag [2]byte - - RSASignature parsedMPI - DSASigR, DSASigS parsedMPI -} - -func (sig *SignatureV3) parse(r io.Reader) (err error) { - // RFC 4880, section 5.2.2 - var buf [8]byte - if _, err = readFull(r, buf[:1]); err != nil { - return - } - if buf[0] < 2 || buf[0] > 3 { - err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) - return - } - if _, err = readFull(r, buf[:1]); err != nil { - return - } - if buf[0] != 5 { - err = errors.UnsupportedError( - "invalid hashed material length " + strconv.Itoa(int(buf[0]))) - return - } - - // Read hashed material: signature type + creation time - if _, err = readFull(r, buf[:5]); err != nil { - return - } - sig.SigType = SignatureType(buf[0]) - t := binary.BigEndian.Uint32(buf[1:5]) - sig.CreationTime = time.Unix(int64(t), 0) - - // Eight-octet Key ID of signer. - if _, err = readFull(r, buf[:8]); err != nil { - return - } - sig.IssuerKeyId = binary.BigEndian.Uint64(buf[:]) - - // Public-key and hash algorithm - if _, err = readFull(r, buf[:2]); err != nil { - return - } - sig.PubKeyAlgo = PublicKeyAlgorithm(buf[0]) - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA: - default: - err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) - return - } - var ok bool - if sig.Hash, ok = s2k.HashIdToHash(buf[1]); !ok { - return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) - } - - // Two-octet field holding left 16 bits of signed hash value. - if _, err = readFull(r, sig.HashTag[:2]); err != nil { - return - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) - case PubKeyAlgoDSA: - if sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r); err != nil { - return - } - sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) - default: - panic("unreachable") - } - return -} - -// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been -// called first. -func (sig *SignatureV3) Serialize(w io.Writer) (err error) { - buf := make([]byte, 8) - - // Write the sig type and creation time - buf[0] = byte(sig.SigType) - binary.BigEndian.PutUint32(buf[1:5], uint32(sig.CreationTime.Unix())) - if _, err = w.Write(buf[:5]); err != nil { - return - } - - // Write the issuer long key ID - binary.BigEndian.PutUint64(buf[:8], sig.IssuerKeyId) - if _, err = w.Write(buf[:8]); err != nil { - return - } - - // Write public key algorithm, hash ID, and hash value - buf[0] = byte(sig.PubKeyAlgo) - hashId, ok := s2k.HashToHashId(sig.Hash) - if !ok { - return errors.UnsupportedError(fmt.Sprintf("hash function %v", sig.Hash)) - } - buf[1] = hashId - copy(buf[2:4], sig.HashTag[:]) - if _, err = w.Write(buf[:4]); err != nil { - return - } - - if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil { - return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - err = writeMPIs(w, sig.RSASignature) - case PubKeyAlgoDSA: - err = writeMPIs(w, sig.DSASigR, sig.DSASigS) - default: - panic("impossible") - } - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go b/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go deleted file mode 100644 index 744c2d2c4..000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto/cipher" - "io" - "strconv" - - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -// This is the largest session key that we'll support. Since no 512-bit cipher -// has even been seriously used, this is comfortably large. -const maxSessionKeySizeInBytes = 64 - -// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC -// 4880, section 5.3. -type SymmetricKeyEncrypted struct { - CipherFunc CipherFunction - s2k func(out, in []byte) - encryptedKey []byte -} - -const symmetricKeyEncryptedVersion = 4 - -func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { - // RFC 4880, section 5.3. - var buf [2]byte - if _, err := readFull(r, buf[:]); err != nil { - return err - } - if buf[0] != symmetricKeyEncryptedVersion { - return errors.UnsupportedError("SymmetricKeyEncrypted version") - } - ske.CipherFunc = CipherFunction(buf[1]) - - if ske.CipherFunc.KeySize() == 0 { - return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1]))) - } - - var err error - ske.s2k, err = s2k.Parse(r) - if err != nil { - return err - } - - encryptedKey := make([]byte, maxSessionKeySizeInBytes) - // The session key may follow. We just have to try and read to find - // out. If it exists then we limit it to maxSessionKeySizeInBytes. - n, err := readFull(r, encryptedKey) - if err != nil && err != io.ErrUnexpectedEOF { - return err - } - - if n != 0 { - if n == maxSessionKeySizeInBytes { - return errors.UnsupportedError("oversized encrypted session key") - } - ske.encryptedKey = encryptedKey[:n] - } - - return nil -} - -// Decrypt attempts to decrypt an encrypted session key and returns the key and -// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data -// packet. -func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) { - key := make([]byte, ske.CipherFunc.KeySize()) - ske.s2k(key, passphrase) - - if len(ske.encryptedKey) == 0 { - return key, ske.CipherFunc, nil - } - - // the IV is all zeros - iv := make([]byte, ske.CipherFunc.blockSize()) - c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv) - plaintextKey := make([]byte, len(ske.encryptedKey)) - c.XORKeyStream(plaintextKey, ske.encryptedKey) - cipherFunc := CipherFunction(plaintextKey[0]) - if cipherFunc.blockSize() == 0 { - return nil, ske.CipherFunc, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) - } - plaintextKey = plaintextKey[1:] - if l, cipherKeySize := len(plaintextKey), cipherFunc.KeySize(); l != cipherFunc.KeySize() { - return nil, cipherFunc, errors.StructuralError("length of decrypted key (" + strconv.Itoa(l) + ") " + - "not equal to cipher keysize (" + strconv.Itoa(cipherKeySize) + ")") - } - return plaintextKey, cipherFunc, nil -} - -// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. The -// packet contains a random session key, encrypted by a key derived from the -// given passphrase. The session key is returned and must be passed to -// SerializeSymmetricallyEncrypted. -// If config is nil, sensible defaults will be used. -func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) { - cipherFunc := config.Cipher() - keySize := cipherFunc.KeySize() - if keySize == 0 { - return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) - } - - s2kBuf := new(bytes.Buffer) - keyEncryptingKey := make([]byte, keySize) - // s2k.Serialize salts and stretches the passphrase, and writes the - // resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf. - err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, &s2k.Config{Hash: config.Hash(), S2KCount: config.PasswordHashIterations()}) - if err != nil { - return - } - s2kBytes := s2kBuf.Bytes() - - packetLength := 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize - err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength) - if err != nil { - return - } - - var buf [2]byte - buf[0] = symmetricKeyEncryptedVersion - buf[1] = byte(cipherFunc) - _, err = w.Write(buf[:]) - if err != nil { - return - } - _, err = w.Write(s2kBytes) - if err != nil { - return - } - - sessionKey := make([]byte, keySize) - _, err = io.ReadFull(config.Random(), sessionKey) - if err != nil { - return - } - iv := make([]byte, cipherFunc.blockSize()) - c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv) - encryptedCipherAndKey := make([]byte, keySize+1) - c.XORKeyStream(encryptedCipherAndKey, buf[1:]) - c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey) - _, err = w.Write(encryptedCipherAndKey) - if err != nil { - return - } - - key = sessionKey - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/read.go b/vendor/golang.org/x/crypto/openpgp/read.go deleted file mode 100644 index 48a893146..000000000 --- a/vendor/golang.org/x/crypto/openpgp/read.go +++ /dev/null @@ -1,448 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package openpgp implements high level operations on OpenPGP messages. -// -// Deprecated: this package is unmaintained except for security fixes. New -// applications should consider a more focused, modern alternative to OpenPGP -// for their specific task. If you are required to interoperate with OpenPGP -// systems and need a maintained package, consider a community fork. -// See https://golang.org/issue/44226. -package openpgp // import "golang.org/x/crypto/openpgp" - -import ( - "crypto" - _ "crypto/sha256" - "hash" - "io" - "strconv" - - "golang.org/x/crypto/openpgp/armor" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/packet" -) - -// SignatureType is the armor type for a PGP signature. -var SignatureType = "PGP SIGNATURE" - -// readArmored reads an armored block with the given type. -func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) { - block, err := armor.Decode(r) - if err != nil { - return - } - - if block.Type != expectedType { - return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type) - } - - return block.Body, nil -} - -// MessageDetails contains the result of parsing an OpenPGP encrypted and/or -// signed message. -type MessageDetails struct { - IsEncrypted bool // true if the message was encrypted. - EncryptedToKeyIds []uint64 // the list of recipient key ids. - IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message. - DecryptedWith Key // the private key used to decrypt the message, if any. - IsSigned bool // true if the message is signed. - SignedByKeyId uint64 // the key id of the signer, if any. - SignedBy *Key // the key of the signer, if available. - LiteralData *packet.LiteralData // the metadata of the contents - UnverifiedBody io.Reader // the contents of the message. - - // If IsSigned is true and SignedBy is non-zero then the signature will - // be verified as UnverifiedBody is read. The signature cannot be - // checked until the whole of UnverifiedBody is read so UnverifiedBody - // must be consumed until EOF before the data can be trusted. Even if a - // message isn't signed (or the signer is unknown) the data may contain - // an authentication code that is only checked once UnverifiedBody has - // been consumed. Once EOF has been seen, the following fields are - // valid. (An authentication code failure is reported as a - // SignatureError error when reading from UnverifiedBody.) - SignatureError error // nil if the signature is good. - Signature *packet.Signature // the signature packet itself, if v4 (default) - SignatureV3 *packet.SignatureV3 // the signature packet if it is a v2 or v3 signature - - decrypted io.ReadCloser -} - -// A PromptFunction is used as a callback by functions that may need to decrypt -// a private key, or prompt for a passphrase. It is called with a list of -// acceptable, encrypted private keys and a boolean that indicates whether a -// passphrase is usable. It should either decrypt a private key or return a -// passphrase to try. If the decrypted private key or given passphrase isn't -// correct, the function will be called again, forever. Any error returned will -// be passed up. -type PromptFunction func(keys []Key, symmetric bool) ([]byte, error) - -// A keyEnvelopePair is used to store a private key with the envelope that -// contains a symmetric key, encrypted with that key. -type keyEnvelopePair struct { - key Key - encryptedKey *packet.EncryptedKey -} - -// ReadMessage parses an OpenPGP message that may be signed and/or encrypted. -// The given KeyRing should contain both public keys (for signature -// verification) and, possibly encrypted, private keys for decrypting. -// If config is nil, sensible defaults will be used. -func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) { - var p packet.Packet - - var symKeys []*packet.SymmetricKeyEncrypted - var pubKeys []keyEnvelopePair - var se *packet.SymmetricallyEncrypted - - packets := packet.NewReader(r) - md = new(MessageDetails) - md.IsEncrypted = true - - // The message, if encrypted, starts with a number of packets - // containing an encrypted decryption key. The decryption key is either - // encrypted to a public key, or with a passphrase. This loop - // collects these packets. -ParsePackets: - for { - p, err = packets.Next() - if err != nil { - return nil, err - } - switch p := p.(type) { - case *packet.SymmetricKeyEncrypted: - // This packet contains the decryption key encrypted with a passphrase. - md.IsSymmetricallyEncrypted = true - symKeys = append(symKeys, p) - case *packet.EncryptedKey: - // This packet contains the decryption key encrypted to a public key. - md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId) - switch p.Algo { - case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal: - break - default: - continue - } - var keys []Key - if p.KeyId == 0 { - keys = keyring.DecryptionKeys() - } else { - keys = keyring.KeysById(p.KeyId) - } - for _, k := range keys { - pubKeys = append(pubKeys, keyEnvelopePair{k, p}) - } - case *packet.SymmetricallyEncrypted: - se = p - break ParsePackets - case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature: - // This message isn't encrypted. - if len(symKeys) != 0 || len(pubKeys) != 0 { - return nil, errors.StructuralError("key material not followed by encrypted message") - } - packets.Unread(p) - return readSignedMessage(packets, nil, keyring) - } - } - - var candidates []Key - var decrypted io.ReadCloser - - // Now that we have the list of encrypted keys we need to decrypt at - // least one of them or, if we cannot, we need to call the prompt - // function so that it can decrypt a key or give us a passphrase. -FindKey: - for { - // See if any of the keys already have a private key available - candidates = candidates[:0] - candidateFingerprints := make(map[string]bool) - - for _, pk := range pubKeys { - if pk.key.PrivateKey == nil { - continue - } - if !pk.key.PrivateKey.Encrypted { - if len(pk.encryptedKey.Key) == 0 { - pk.encryptedKey.Decrypt(pk.key.PrivateKey, config) - } - if len(pk.encryptedKey.Key) == 0 { - continue - } - decrypted, err = se.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key) - if err != nil && err != errors.ErrKeyIncorrect { - return nil, err - } - if decrypted != nil { - md.DecryptedWith = pk.key - break FindKey - } - } else { - fpr := string(pk.key.PublicKey.Fingerprint[:]) - if v := candidateFingerprints[fpr]; v { - continue - } - candidates = append(candidates, pk.key) - candidateFingerprints[fpr] = true - } - } - - if len(candidates) == 0 && len(symKeys) == 0 { - return nil, errors.ErrKeyIncorrect - } - - if prompt == nil { - return nil, errors.ErrKeyIncorrect - } - - passphrase, err := prompt(candidates, len(symKeys) != 0) - if err != nil { - return nil, err - } - - // Try the symmetric passphrase first - if len(symKeys) != 0 && passphrase != nil { - for _, s := range symKeys { - key, cipherFunc, err := s.Decrypt(passphrase) - if err == nil { - decrypted, err = se.Decrypt(cipherFunc, key) - if err != nil && err != errors.ErrKeyIncorrect { - return nil, err - } - if decrypted != nil { - break FindKey - } - } - - } - } - } - - md.decrypted = decrypted - if err := packets.Push(decrypted); err != nil { - return nil, err - } - return readSignedMessage(packets, md, keyring) -} - -// readSignedMessage reads a possibly signed message if mdin is non-zero then -// that structure is updated and returned. Otherwise a fresh MessageDetails is -// used. -func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing) (md *MessageDetails, err error) { - if mdin == nil { - mdin = new(MessageDetails) - } - md = mdin - - var p packet.Packet - var h hash.Hash - var wrappedHash hash.Hash -FindLiteralData: - for { - p, err = packets.Next() - if err != nil { - return nil, err - } - switch p := p.(type) { - case *packet.Compressed: - if err := packets.Push(p.Body); err != nil { - return nil, err - } - case *packet.OnePassSignature: - if !p.IsLast { - return nil, errors.UnsupportedError("nested signatures") - } - - h, wrappedHash, err = hashForSignature(p.Hash, p.SigType) - if err != nil { - md = nil - return - } - - md.IsSigned = true - md.SignedByKeyId = p.KeyId - keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign) - if len(keys) > 0 { - md.SignedBy = &keys[0] - } - case *packet.LiteralData: - md.LiteralData = p - break FindLiteralData - } - } - - if md.SignedBy != nil { - md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md} - } else if md.decrypted != nil { - md.UnverifiedBody = checkReader{md} - } else { - md.UnverifiedBody = md.LiteralData.Body - } - - return md, nil -} - -// hashForSignature returns a pair of hashes that can be used to verify a -// signature. The signature may specify that the contents of the signed message -// should be preprocessed (i.e. to normalize line endings). Thus this function -// returns two hashes. The second should be used to hash the message itself and -// performs any needed preprocessing. -func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) { - if !hashId.Available() { - return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId))) - } - h := hashId.New() - - switch sigType { - case packet.SigTypeBinary: - return h, h, nil - case packet.SigTypeText: - return h, NewCanonicalTextHash(h), nil - } - - return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) -} - -// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF -// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger -// MDC checks. -type checkReader struct { - md *MessageDetails -} - -func (cr checkReader) Read(buf []byte) (n int, err error) { - n, err = cr.md.LiteralData.Body.Read(buf) - if err == io.EOF { - mdcErr := cr.md.decrypted.Close() - if mdcErr != nil { - err = mdcErr - } - } - return -} - -// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes -// the data as it is read. When it sees an EOF from the underlying io.Reader -// it parses and checks a trailing Signature packet and triggers any MDC checks. -type signatureCheckReader struct { - packets *packet.Reader - h, wrappedHash hash.Hash - md *MessageDetails -} - -func (scr *signatureCheckReader) Read(buf []byte) (n int, err error) { - n, err = scr.md.LiteralData.Body.Read(buf) - scr.wrappedHash.Write(buf[:n]) - if err == io.EOF { - var p packet.Packet - p, scr.md.SignatureError = scr.packets.Next() - if scr.md.SignatureError != nil { - return - } - - var ok bool - if scr.md.Signature, ok = p.(*packet.Signature); ok { - scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature) - } else if scr.md.SignatureV3, ok = p.(*packet.SignatureV3); ok { - scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignatureV3(scr.h, scr.md.SignatureV3) - } else { - scr.md.SignatureError = errors.StructuralError("LiteralData not followed by Signature") - return - } - - // The SymmetricallyEncrypted packet, if any, might have an - // unsigned hash of its own. In order to check this we need to - // close that Reader. - if scr.md.decrypted != nil { - mdcErr := scr.md.decrypted.Close() - if mdcErr != nil { - err = mdcErr - } - } - } - return -} - -// CheckDetachedSignature takes a signed file and a detached signature and -// returns the signer if the signature is valid. If the signer isn't known, -// ErrUnknownIssuer is returned. -func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { - var issuerKeyId uint64 - var hashFunc crypto.Hash - var sigType packet.SignatureType - var keys []Key - var p packet.Packet - - packets := packet.NewReader(signature) - for { - p, err = packets.Next() - if err == io.EOF { - return nil, errors.ErrUnknownIssuer - } - if err != nil { - return nil, err - } - - switch sig := p.(type) { - case *packet.Signature: - if sig.IssuerKeyId == nil { - return nil, errors.StructuralError("signature doesn't have an issuer") - } - issuerKeyId = *sig.IssuerKeyId - hashFunc = sig.Hash - sigType = sig.SigType - case *packet.SignatureV3: - issuerKeyId = sig.IssuerKeyId - hashFunc = sig.Hash - sigType = sig.SigType - default: - return nil, errors.StructuralError("non signature packet found") - } - - keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign) - if len(keys) > 0 { - break - } - } - - if len(keys) == 0 { - panic("unreachable") - } - - h, wrappedHash, err := hashForSignature(hashFunc, sigType) - if err != nil { - return nil, err - } - - if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF { - return nil, err - } - - for _, key := range keys { - switch sig := p.(type) { - case *packet.Signature: - err = key.PublicKey.VerifySignature(h, sig) - case *packet.SignatureV3: - err = key.PublicKey.VerifySignatureV3(h, sig) - default: - panic("unreachable") - } - - if err == nil { - return key.Entity, nil - } - } - - return nil, err -} - -// CheckArmoredDetachedSignature performs the same actions as -// CheckDetachedSignature but expects the signature to be armored. -func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { - body, err := readArmored(signature, SignatureType) - if err != nil { - return - } - - return CheckDetachedSignature(keyring, signed, body) -} diff --git a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go deleted file mode 100644 index f53244a1c..000000000 --- a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package s2k implements the various OpenPGP string-to-key transforms as -// specified in RFC 4800 section 3.7.1. -// -// Deprecated: this package is unmaintained except for security fixes. New -// applications should consider a more focused, modern alternative to OpenPGP -// for their specific task. If you are required to interoperate with OpenPGP -// systems and need a maintained package, consider a community fork. -// See https://golang.org/issue/44226. -package s2k // import "golang.org/x/crypto/openpgp/s2k" - -import ( - "crypto" - "hash" - "io" - "strconv" - - "golang.org/x/crypto/openpgp/errors" -) - -// Config collects configuration parameters for s2k key-stretching -// transformatioms. A nil *Config is valid and results in all default -// values. Currently, Config is used only by the Serialize function in -// this package. -type Config struct { - // Hash is the default hash function to be used. If - // nil, SHA1 is used. - Hash crypto.Hash - // S2KCount is only used for symmetric encryption. It - // determines the strength of the passphrase stretching when - // the said passphrase is hashed to produce a key. S2KCount - // should be between 1024 and 65011712, inclusive. If Config - // is nil or S2KCount is 0, the value 65536 used. Not all - // values in the above range can be represented. S2KCount will - // be rounded up to the next representable value if it cannot - // be encoded exactly. When set, it is strongly encrouraged to - // use a value that is at least 65536. See RFC 4880 Section - // 3.7.1.3. - S2KCount int -} - -func (c *Config) hash() crypto.Hash { - if c == nil || uint(c.Hash) == 0 { - // SHA1 is the historical default in this package. - return crypto.SHA1 - } - - return c.Hash -} - -func (c *Config) encodedCount() uint8 { - if c == nil || c.S2KCount == 0 { - return 96 // The common case. Correspoding to 65536 - } - - i := c.S2KCount - switch { - // Behave like GPG. Should we make 65536 the lowest value used? - case i < 1024: - i = 1024 - case i > 65011712: - i = 65011712 - } - - return encodeCount(i) -} - -// encodeCount converts an iterative "count" in the range 1024 to -// 65011712, inclusive, to an encoded count. The return value is the -// octet that is actually stored in the GPG file. encodeCount panics -// if i is not in the above range (encodedCount above takes care to -// pass i in the correct range). See RFC 4880 Section 3.7.7.1. -func encodeCount(i int) uint8 { - if i < 1024 || i > 65011712 { - panic("count arg i outside the required range") - } - - for encoded := 0; encoded < 256; encoded++ { - count := decodeCount(uint8(encoded)) - if count >= i { - return uint8(encoded) - } - } - - return 255 -} - -// decodeCount returns the s2k mode 3 iterative "count" corresponding to -// the encoded octet c. -func decodeCount(c uint8) int { - return (16 + int(c&15)) << (uint32(c>>4) + 6) -} - -// Simple writes to out the result of computing the Simple S2K function (RFC -// 4880, section 3.7.1.1) using the given hash and input passphrase. -func Simple(out []byte, h hash.Hash, in []byte) { - Salted(out, h, in, nil) -} - -var zero [1]byte - -// Salted writes to out the result of computing the Salted S2K function (RFC -// 4880, section 3.7.1.2) using the given hash, input passphrase and salt. -func Salted(out []byte, h hash.Hash, in []byte, salt []byte) { - done := 0 - var digest []byte - - for i := 0; done < len(out); i++ { - h.Reset() - for j := 0; j < i; j++ { - h.Write(zero[:]) - } - h.Write(salt) - h.Write(in) - digest = h.Sum(digest[:0]) - n := copy(out[done:], digest) - done += n - } -} - -// Iterated writes to out the result of computing the Iterated and Salted S2K -// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase, -// salt and iteration count. -func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) { - combined := make([]byte, len(in)+len(salt)) - copy(combined, salt) - copy(combined[len(salt):], in) - - if count < len(combined) { - count = len(combined) - } - - done := 0 - var digest []byte - for i := 0; done < len(out); i++ { - h.Reset() - for j := 0; j < i; j++ { - h.Write(zero[:]) - } - written := 0 - for written < count { - if written+len(combined) > count { - todo := count - written - h.Write(combined[:todo]) - written = count - } else { - h.Write(combined) - written += len(combined) - } - } - digest = h.Sum(digest[:0]) - n := copy(out[done:], digest) - done += n - } -} - -// Parse reads a binary specification for a string-to-key transformation from r -// and returns a function which performs that transform. -func Parse(r io.Reader) (f func(out, in []byte), err error) { - var buf [9]byte - - _, err = io.ReadFull(r, buf[:2]) - if err != nil { - return - } - - hash, ok := HashIdToHash(buf[1]) - if !ok { - return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(buf[1]))) - } - if !hash.Available() { - return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hash))) - } - h := hash.New() - - switch buf[0] { - case 0: - f := func(out, in []byte) { - Simple(out, h, in) - } - return f, nil - case 1: - _, err = io.ReadFull(r, buf[:8]) - if err != nil { - return - } - f := func(out, in []byte) { - Salted(out, h, in, buf[:8]) - } - return f, nil - case 3: - _, err = io.ReadFull(r, buf[:9]) - if err != nil { - return - } - count := decodeCount(buf[8]) - f := func(out, in []byte) { - Iterated(out, h, in, buf[:8], count) - } - return f, nil - } - - return nil, errors.UnsupportedError("S2K function") -} - -// Serialize salts and stretches the given passphrase and writes the -// resulting key into key. It also serializes an S2K descriptor to -// w. The key stretching can be configured with c, which may be -// nil. In that case, sensible defaults will be used. -func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error { - var buf [11]byte - buf[0] = 3 /* iterated and salted */ - buf[1], _ = HashToHashId(c.hash()) - salt := buf[2:10] - if _, err := io.ReadFull(rand, salt); err != nil { - return err - } - encodedCount := c.encodedCount() - count := decodeCount(encodedCount) - buf[10] = encodedCount - if _, err := w.Write(buf[:]); err != nil { - return err - } - - Iterated(key, c.hash().New(), passphrase, salt, count) - return nil -} - -// hashToHashIdMapping contains pairs relating OpenPGP's hash identifier with -// Go's crypto.Hash type. See RFC 4880, section 9.4. -var hashToHashIdMapping = []struct { - id byte - hash crypto.Hash - name string -}{ - {1, crypto.MD5, "MD5"}, - {2, crypto.SHA1, "SHA1"}, - {3, crypto.RIPEMD160, "RIPEMD160"}, - {8, crypto.SHA256, "SHA256"}, - {9, crypto.SHA384, "SHA384"}, - {10, crypto.SHA512, "SHA512"}, - {11, crypto.SHA224, "SHA224"}, -} - -// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP -// hash id. -func HashIdToHash(id byte) (h crypto.Hash, ok bool) { - for _, m := range hashToHashIdMapping { - if m.id == id { - return m.hash, true - } - } - return 0, false -} - -// HashIdToString returns the name of the hash function corresponding to the -// given OpenPGP hash id. -func HashIdToString(id byte) (name string, ok bool) { - for _, m := range hashToHashIdMapping { - if m.id == id { - return m.name, true - } - } - - return "", false -} - -// HashToHashId returns an OpenPGP hash id which corresponds the given Hash. -func HashToHashId(h crypto.Hash) (id byte, ok bool) { - for _, m := range hashToHashIdMapping { - if m.hash == h { - return m.id, true - } - } - return 0, false -} diff --git a/vendor/golang.org/x/crypto/openpgp/write.go b/vendor/golang.org/x/crypto/openpgp/write.go deleted file mode 100644 index b89d48b81..000000000 --- a/vendor/golang.org/x/crypto/openpgp/write.go +++ /dev/null @@ -1,418 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import ( - "crypto" - "hash" - "io" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/armor" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/packet" - "golang.org/x/crypto/openpgp/s2k" -) - -// DetachSign signs message with the private key from signer (which must -// already have been decrypted) and writes the signature to w. -// If config is nil, sensible defaults will be used. -func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return detachSign(w, signer, message, packet.SigTypeBinary, config) -} - -// ArmoredDetachSign signs message with the private key from signer (which -// must already have been decrypted) and writes an armored signature to w. -// If config is nil, sensible defaults will be used. -func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) { - return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config) -} - -// DetachSignText signs message (after canonicalising the line endings) with -// the private key from signer (which must already have been decrypted) and -// writes the signature to w. -// If config is nil, sensible defaults will be used. -func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return detachSign(w, signer, message, packet.SigTypeText, config) -} - -// ArmoredDetachSignText signs message (after canonicalising the line endings) -// with the private key from signer (which must already have been decrypted) -// and writes an armored signature to w. -// If config is nil, sensible defaults will be used. -func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return armoredDetachSign(w, signer, message, packet.SigTypeText, config) -} - -func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { - out, err := armor.Encode(w, SignatureType, nil) - if err != nil { - return - } - err = detachSign(out, signer, message, sigType, config) - if err != nil { - return - } - return out.Close() -} - -func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { - if signer.PrivateKey == nil { - return errors.InvalidArgumentError("signing key doesn't have a private key") - } - if signer.PrivateKey.Encrypted { - return errors.InvalidArgumentError("signing key is encrypted") - } - - sig := new(packet.Signature) - sig.SigType = sigType - sig.PubKeyAlgo = signer.PrivateKey.PubKeyAlgo - sig.Hash = config.Hash() - sig.CreationTime = config.Now() - sig.IssuerKeyId = &signer.PrivateKey.KeyId - - h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType) - if err != nil { - return - } - io.Copy(wrappedHash, message) - - err = sig.Sign(h, signer.PrivateKey, config) - if err != nil { - return - } - - return sig.Serialize(w) -} - -// FileHints contains metadata about encrypted files. This metadata is, itself, -// encrypted. -type FileHints struct { - // IsBinary can be set to hint that the contents are binary data. - IsBinary bool - // FileName hints at the name of the file that should be written. It's - // truncated to 255 bytes if longer. It may be empty to suggest that the - // file should not be written to disk. It may be equal to "_CONSOLE" to - // suggest the data should not be written to disk. - FileName string - // ModTime contains the modification time of the file, or the zero time if not applicable. - ModTime time.Time -} - -// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase. -// The resulting WriteCloser must be closed after the contents of the file have -// been written. -// If config is nil, sensible defaults will be used. -func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - if hints == nil { - hints = &FileHints{} - } - - key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config) - if err != nil { - return - } - w, err := packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), key, config) - if err != nil { - return - } - - literaldata := w - if algo := config.Compression(); algo != packet.CompressionNone { - var compConfig *packet.CompressionConfig - if config != nil { - compConfig = config.CompressionConfig - } - literaldata, err = packet.SerializeCompressed(w, algo, compConfig) - if err != nil { - return - } - } - - var epochSeconds uint32 - if !hints.ModTime.IsZero() { - epochSeconds = uint32(hints.ModTime.Unix()) - } - return packet.SerializeLiteral(literaldata, hints.IsBinary, hints.FileName, epochSeconds) -} - -// intersectPreferences mutates and returns a prefix of a that contains only -// the values in the intersection of a and b. The order of a is preserved. -func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) { - var j int - for _, v := range a { - for _, v2 := range b { - if v == v2 { - a[j] = v - j++ - break - } - } - } - - return a[:j] -} - -func hashToHashId(h crypto.Hash) uint8 { - v, ok := s2k.HashToHashId(h) - if !ok { - panic("tried to convert unknown hash") - } - return v -} - -// writeAndSign writes the data as a payload package and, optionally, signs -// it. hints contains optional information, that is also encrypted, -// that aids the recipients in processing the message. The resulting -// WriteCloser must be closed after the contents of the file have been -// written. If config is nil, sensible defaults will be used. -func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - var signer *packet.PrivateKey - if signed != nil { - signKey, ok := signed.signingKey(config.Now()) - if !ok { - return nil, errors.InvalidArgumentError("no valid signing keys") - } - signer = signKey.PrivateKey - if signer == nil { - return nil, errors.InvalidArgumentError("no private key in signing key") - } - if signer.Encrypted { - return nil, errors.InvalidArgumentError("signing key must be decrypted") - } - } - - var hash crypto.Hash - for _, hashId := range candidateHashes { - if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() { - hash = h - break - } - } - - // If the hash specified by config is a candidate, we'll use that. - if configuredHash := config.Hash(); configuredHash.Available() { - for _, hashId := range candidateHashes { - if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash { - hash = h - break - } - } - } - - if hash == 0 { - hashId := candidateHashes[0] - name, ok := s2k.HashIdToString(hashId) - if !ok { - name = "#" + strconv.Itoa(int(hashId)) - } - return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)") - } - - if signer != nil { - ops := &packet.OnePassSignature{ - SigType: packet.SigTypeBinary, - Hash: hash, - PubKeyAlgo: signer.PubKeyAlgo, - KeyId: signer.KeyId, - IsLast: true, - } - if err := ops.Serialize(payload); err != nil { - return nil, err - } - } - - if hints == nil { - hints = &FileHints{} - } - - w := payload - if signer != nil { - // If we need to write a signature packet after the literal - // data then we need to stop literalData from closing - // encryptedData. - w = noOpCloser{w} - - } - var epochSeconds uint32 - if !hints.ModTime.IsZero() { - epochSeconds = uint32(hints.ModTime.Unix()) - } - literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds) - if err != nil { - return nil, err - } - - if signer != nil { - return signatureWriter{payload, literalData, hash, hash.New(), signer, config}, nil - } - return literalData, nil -} - -// Encrypt encrypts a message to a number of recipients and, optionally, signs -// it. hints contains optional information, that is also encrypted, that aids -// the recipients in processing the message. The resulting WriteCloser must -// be closed after the contents of the file have been written. -// If config is nil, sensible defaults will be used. -func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - if len(to) == 0 { - return nil, errors.InvalidArgumentError("no encryption recipient provided") - } - - // These are the possible ciphers that we'll use for the message. - candidateCiphers := []uint8{ - uint8(packet.CipherAES128), - uint8(packet.CipherAES256), - uint8(packet.CipherCAST5), - } - // These are the possible hash functions that we'll use for the signature. - candidateHashes := []uint8{ - hashToHashId(crypto.SHA256), - hashToHashId(crypto.SHA384), - hashToHashId(crypto.SHA512), - hashToHashId(crypto.SHA1), - hashToHashId(crypto.RIPEMD160), - } - // In the event that a recipient doesn't specify any supported ciphers - // or hash functions, these are the ones that we assume that every - // implementation supports. - defaultCiphers := candidateCiphers[len(candidateCiphers)-1:] - defaultHashes := candidateHashes[len(candidateHashes)-1:] - - encryptKeys := make([]Key, len(to)) - for i := range to { - var ok bool - encryptKeys[i], ok = to[i].encryptionKey(config.Now()) - if !ok { - return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys") - } - - sig := to[i].primaryIdentity().SelfSignature - - preferredSymmetric := sig.PreferredSymmetric - if len(preferredSymmetric) == 0 { - preferredSymmetric = defaultCiphers - } - preferredHashes := sig.PreferredHash - if len(preferredHashes) == 0 { - preferredHashes = defaultHashes - } - candidateCiphers = intersectPreferences(candidateCiphers, preferredSymmetric) - candidateHashes = intersectPreferences(candidateHashes, preferredHashes) - } - - if len(candidateCiphers) == 0 || len(candidateHashes) == 0 { - return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common algorithms") - } - - cipher := packet.CipherFunction(candidateCiphers[0]) - // If the cipher specified by config is a candidate, we'll use that. - configuredCipher := config.Cipher() - for _, c := range candidateCiphers { - cipherFunc := packet.CipherFunction(c) - if cipherFunc == configuredCipher { - cipher = cipherFunc - break - } - } - - symKey := make([]byte, cipher.KeySize()) - if _, err := io.ReadFull(config.Random(), symKey); err != nil { - return nil, err - } - - for _, key := range encryptKeys { - if err := packet.SerializeEncryptedKey(ciphertext, key.PublicKey, cipher, symKey, config); err != nil { - return nil, err - } - } - - payload, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config) - if err != nil { - return - } - - return writeAndSign(payload, candidateHashes, signed, hints, config) -} - -// Sign signs a message. The resulting WriteCloser must be closed after the -// contents of the file have been written. hints contains optional information -// that aids the recipients in processing the message. -// If config is nil, sensible defaults will be used. -func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Config) (input io.WriteCloser, err error) { - if signed == nil { - return nil, errors.InvalidArgumentError("no signer provided") - } - - // These are the possible hash functions that we'll use for the signature. - candidateHashes := []uint8{ - hashToHashId(crypto.SHA256), - hashToHashId(crypto.SHA384), - hashToHashId(crypto.SHA512), - hashToHashId(crypto.SHA1), - hashToHashId(crypto.RIPEMD160), - } - defaultHashes := candidateHashes[len(candidateHashes)-1:] - preferredHashes := signed.primaryIdentity().SelfSignature.PreferredHash - if len(preferredHashes) == 0 { - preferredHashes = defaultHashes - } - candidateHashes = intersectPreferences(candidateHashes, preferredHashes) - return writeAndSign(noOpCloser{output}, candidateHashes, signed, hints, config) -} - -// signatureWriter hashes the contents of a message while passing it along to -// literalData. When closed, it closes literalData, writes a signature packet -// to encryptedData and then also closes encryptedData. -type signatureWriter struct { - encryptedData io.WriteCloser - literalData io.WriteCloser - hashType crypto.Hash - h hash.Hash - signer *packet.PrivateKey - config *packet.Config -} - -func (s signatureWriter) Write(data []byte) (int, error) { - s.h.Write(data) - return s.literalData.Write(data) -} - -func (s signatureWriter) Close() error { - sig := &packet.Signature{ - SigType: packet.SigTypeBinary, - PubKeyAlgo: s.signer.PubKeyAlgo, - Hash: s.hashType, - CreationTime: s.config.Now(), - IssuerKeyId: &s.signer.KeyId, - } - - if err := sig.Sign(s.h, s.signer, s.config); err != nil { - return err - } - if err := s.literalData.Close(); err != nil { - return err - } - if err := sig.Serialize(s.encryptedData); err != nil { - return err - } - return s.encryptedData.Close() -} - -// noOpCloser is like an io.NopCloser, but for an io.Writer. -// TODO: we have two of these in OpenPGP packages alone. This probably needs -// to be promoted somewhere more common. -type noOpCloser struct { - w io.Writer -} - -func (c noOpCloser) Write(data []byte) (n int, err error) { - return c.w.Write(data) -} - -func (c noOpCloser) Close() error { - return nil -} diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go new file mode 100644 index 000000000..28cd99c7f --- /dev/null +++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go @@ -0,0 +1,77 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC +2898 / PKCS #5 v2.0. + +A key derivation function is useful when encrypting data based on a password +or any other not-fully-random data. It uses a pseudorandom function to derive +a secure encryption key based on the password. + +While v2.0 of the standard defines only one pseudorandom function to use, +HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved +Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To +choose, you can pass the `New` functions from the different SHA packages to +pbkdf2.Key. +*/ +package pbkdf2 + +import ( + "crypto/hmac" + "hash" +) + +// Key derives a key from the password, salt and iteration count, returning a +// []byte of length keylen that can be used as cryptographic key. The key is +// derived based on the method described as PBKDF2 with the HMAC variant using +// the supplied hash function. +// +// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you +// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by +// doing: +// +// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) +// +// Remember to get a good random salt. At least 8 bytes is recommended by the +// RFC. +// +// Using a higher iteration count will increase the cost of an exhaustive +// search but will also make derivation proportionally slower. +func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { + prf := hmac.New(h, password) + hashLen := prf.Size() + numBlocks := (keyLen + hashLen - 1) / hashLen + + var buf [4]byte + dk := make([]byte, 0, numBlocks*hashLen) + U := make([]byte, hashLen) + for block := 1; block <= numBlocks; block++ { + // N.B.: || means concatenation, ^ means XOR + // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter + // U_1 = PRF(password, salt || uint(i)) + prf.Reset() + prf.Write(salt) + buf[0] = byte(block >> 24) + buf[1] = byte(block >> 16) + buf[2] = byte(block >> 8) + buf[3] = byte(block) + prf.Write(buf[:4]) + dk = prf.Sum(dk) + T := dk[len(dk)-hashLen:] + copy(U, T) + + // U_n = PRF(password, U_(n-1)) + for n := 2; n <= iter; n++ { + prf.Reset() + prf.Write(U) + U = U[:0] + U = prf.Sum(U) + for x := range U { + T[x] ^= U[x] + } + } + } + return dk[:keyLen] +} diff --git a/vendor/golang.org/x/crypto/poly1305/poly1305_compat.go b/vendor/golang.org/x/crypto/poly1305/poly1305_compat.go new file mode 100644 index 000000000..cb9207f30 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/poly1305_compat.go @@ -0,0 +1,91 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package poly1305 implements Poly1305 one-time message authentication code as +// specified in https://cr.yp.to/mac/poly1305-20050329.pdf. +// +// Poly1305 is a fast, one-time authentication function. It is infeasible for an +// attacker to generate an authenticator for a message without the key. However, a +// key must only be used for a single message. Authenticating two different +// messages with the same key allows an attacker to forge authenticators for other +// messages with the same key. +// +// Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was +// used with a fixed key in order to generate one-time keys from an nonce. +// However, in this package AES isn't used and the one-time key is specified +// directly. +// +// Deprecated: Poly1305 as implemented by this package is a cryptographic +// building block that is not safe for general purpose use. +// For encryption, use the full ChaCha20-Poly1305 construction implemented by +// golang.org/x/crypto/chacha20poly1305. For authentication, use a general +// purpose MAC such as HMAC implemented by crypto/hmac. +package poly1305 + +import "golang.org/x/crypto/internal/poly1305" + +// TagSize is the size, in bytes, of a poly1305 authenticator. +// +// For use with golang.org/x/crypto/chacha20poly1305, chacha20poly1305.Overhead +// can be used instead. +const TagSize = 16 + +// Sum generates an authenticator for msg using a one-time key and puts the +// 16-byte result into out. Authenticating two different messages with the same +// key allows an attacker to forge messages at will. +func Sum(out *[16]byte, m []byte, key *[32]byte) { + poly1305.Sum(out, m, key) +} + +// Verify returns true if mac is a valid authenticator for m with the given key. +func Verify(mac *[16]byte, m []byte, key *[32]byte) bool { + return poly1305.Verify(mac, m, key) +} + +// New returns a new MAC computing an authentication +// tag of all data written to it with the given key. +// This allows writing the message progressively instead +// of passing it as a single slice. Common users should use +// the Sum function instead. +// +// The key must be unique for each message, as authenticating +// two different messages with the same key allows an attacker +// to forge messages at will. +func New(key *[32]byte) *MAC { + return &MAC{mac: poly1305.New(key)} +} + +// MAC is an io.Writer computing an authentication tag +// of the data written to it. +// +// MAC cannot be used like common hash.Hash implementations, +// because using a poly1305 key twice breaks its security. +// Therefore writing data to a running MAC after calling +// Sum or Verify causes it to panic. +type MAC struct { + mac *poly1305.MAC +} + +// Size returns the number of bytes Sum will return. +func (h *MAC) Size() int { return TagSize } + +// Write adds more data to the running message authentication code. +// It never returns an error. +// +// It must not be called after the first call of Sum or Verify. +func (h *MAC) Write(p []byte) (n int, err error) { + return h.mac.Write(p) +} + +// Sum computes the authenticator of all data written to the +// message authentication code. +func (h *MAC) Sum(b []byte) []byte { + return h.mac.Sum(b) +} + +// Verify returns whether the authenticator of all data written to +// the message authentication code matches the expected value. +func (h *MAC) Verify(expected []byte) bool { + return h.mac.Verify(expected) +} diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt.go b/vendor/golang.org/x/crypto/scrypt/scrypt.go new file mode 100644 index 000000000..76fa40fb2 --- /dev/null +++ b/vendor/golang.org/x/crypto/scrypt/scrypt.go @@ -0,0 +1,212 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package scrypt implements the scrypt key derivation function as defined in +// Colin Percival's paper "Stronger Key Derivation via Sequential Memory-Hard +// Functions" (https://www.tarsnap.com/scrypt/scrypt.pdf). +package scrypt + +import ( + "crypto/sha256" + "encoding/binary" + "errors" + "math/bits" + + "golang.org/x/crypto/pbkdf2" +) + +const maxInt = int(^uint(0) >> 1) + +// blockCopy copies n numbers from src into dst. +func blockCopy(dst, src []uint32, n int) { + copy(dst, src[:n]) +} + +// blockXOR XORs numbers from dst with n numbers from src. +func blockXOR(dst, src []uint32, n int) { + for i, v := range src[:n] { + dst[i] ^= v + } +} + +// salsaXOR applies Salsa20/8 to the XOR of 16 numbers from tmp and in, +// and puts the result into both tmp and out. +func salsaXOR(tmp *[16]uint32, in, out []uint32) { + w0 := tmp[0] ^ in[0] + w1 := tmp[1] ^ in[1] + w2 := tmp[2] ^ in[2] + w3 := tmp[3] ^ in[3] + w4 := tmp[4] ^ in[4] + w5 := tmp[5] ^ in[5] + w6 := tmp[6] ^ in[6] + w7 := tmp[7] ^ in[7] + w8 := tmp[8] ^ in[8] + w9 := tmp[9] ^ in[9] + w10 := tmp[10] ^ in[10] + w11 := tmp[11] ^ in[11] + w12 := tmp[12] ^ in[12] + w13 := tmp[13] ^ in[13] + w14 := tmp[14] ^ in[14] + w15 := tmp[15] ^ in[15] + + x0, x1, x2, x3, x4, x5, x6, x7, x8 := w0, w1, w2, w3, w4, w5, w6, w7, w8 + x9, x10, x11, x12, x13, x14, x15 := w9, w10, w11, w12, w13, w14, w15 + + for i := 0; i < 8; i += 2 { + x4 ^= bits.RotateLeft32(x0+x12, 7) + x8 ^= bits.RotateLeft32(x4+x0, 9) + x12 ^= bits.RotateLeft32(x8+x4, 13) + x0 ^= bits.RotateLeft32(x12+x8, 18) + + x9 ^= bits.RotateLeft32(x5+x1, 7) + x13 ^= bits.RotateLeft32(x9+x5, 9) + x1 ^= bits.RotateLeft32(x13+x9, 13) + x5 ^= bits.RotateLeft32(x1+x13, 18) + + x14 ^= bits.RotateLeft32(x10+x6, 7) + x2 ^= bits.RotateLeft32(x14+x10, 9) + x6 ^= bits.RotateLeft32(x2+x14, 13) + x10 ^= bits.RotateLeft32(x6+x2, 18) + + x3 ^= bits.RotateLeft32(x15+x11, 7) + x7 ^= bits.RotateLeft32(x3+x15, 9) + x11 ^= bits.RotateLeft32(x7+x3, 13) + x15 ^= bits.RotateLeft32(x11+x7, 18) + + x1 ^= bits.RotateLeft32(x0+x3, 7) + x2 ^= bits.RotateLeft32(x1+x0, 9) + x3 ^= bits.RotateLeft32(x2+x1, 13) + x0 ^= bits.RotateLeft32(x3+x2, 18) + + x6 ^= bits.RotateLeft32(x5+x4, 7) + x7 ^= bits.RotateLeft32(x6+x5, 9) + x4 ^= bits.RotateLeft32(x7+x6, 13) + x5 ^= bits.RotateLeft32(x4+x7, 18) + + x11 ^= bits.RotateLeft32(x10+x9, 7) + x8 ^= bits.RotateLeft32(x11+x10, 9) + x9 ^= bits.RotateLeft32(x8+x11, 13) + x10 ^= bits.RotateLeft32(x9+x8, 18) + + x12 ^= bits.RotateLeft32(x15+x14, 7) + x13 ^= bits.RotateLeft32(x12+x15, 9) + x14 ^= bits.RotateLeft32(x13+x12, 13) + x15 ^= bits.RotateLeft32(x14+x13, 18) + } + x0 += w0 + x1 += w1 + x2 += w2 + x3 += w3 + x4 += w4 + x5 += w5 + x6 += w6 + x7 += w7 + x8 += w8 + x9 += w9 + x10 += w10 + x11 += w11 + x12 += w12 + x13 += w13 + x14 += w14 + x15 += w15 + + out[0], tmp[0] = x0, x0 + out[1], tmp[1] = x1, x1 + out[2], tmp[2] = x2, x2 + out[3], tmp[3] = x3, x3 + out[4], tmp[4] = x4, x4 + out[5], tmp[5] = x5, x5 + out[6], tmp[6] = x6, x6 + out[7], tmp[7] = x7, x7 + out[8], tmp[8] = x8, x8 + out[9], tmp[9] = x9, x9 + out[10], tmp[10] = x10, x10 + out[11], tmp[11] = x11, x11 + out[12], tmp[12] = x12, x12 + out[13], tmp[13] = x13, x13 + out[14], tmp[14] = x14, x14 + out[15], tmp[15] = x15, x15 +} + +func blockMix(tmp *[16]uint32, in, out []uint32, r int) { + blockCopy(tmp[:], in[(2*r-1)*16:], 16) + for i := 0; i < 2*r; i += 2 { + salsaXOR(tmp, in[i*16:], out[i*8:]) + salsaXOR(tmp, in[i*16+16:], out[i*8+r*16:]) + } +} + +func integer(b []uint32, r int) uint64 { + j := (2*r - 1) * 16 + return uint64(b[j]) | uint64(b[j+1])<<32 +} + +func smix(b []byte, r, N int, v, xy []uint32) { + var tmp [16]uint32 + R := 32 * r + x := xy + y := xy[R:] + + j := 0 + for i := 0; i < R; i++ { + x[i] = binary.LittleEndian.Uint32(b[j:]) + j += 4 + } + for i := 0; i < N; i += 2 { + blockCopy(v[i*R:], x, R) + blockMix(&tmp, x, y, r) + + blockCopy(v[(i+1)*R:], y, R) + blockMix(&tmp, y, x, r) + } + for i := 0; i < N; i += 2 { + j := int(integer(x, r) & uint64(N-1)) + blockXOR(x, v[j*R:], R) + blockMix(&tmp, x, y, r) + + j = int(integer(y, r) & uint64(N-1)) + blockXOR(y, v[j*R:], R) + blockMix(&tmp, y, x, r) + } + j = 0 + for _, v := range x[:R] { + binary.LittleEndian.PutUint32(b[j:], v) + j += 4 + } +} + +// Key derives a key from the password, salt, and cost parameters, returning +// a byte slice of length keyLen that can be used as cryptographic key. +// +// N is a CPU/memory cost parameter, which must be a power of two greater than 1. +// r and p must satisfy r * p < 2³⁰. If the parameters do not satisfy the +// limits, the function returns a nil byte slice and an error. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// dk, err := scrypt.Key([]byte("some password"), salt, 32768, 8, 1, 32) +// +// The recommended parameters for interactive logins as of 2017 are N=32768, r=8 +// and p=1. The parameters N, r, and p should be increased as memory latency and +// CPU parallelism increases; consider setting N to the highest power of 2 you +// can derive within 100 milliseconds. Remember to get a good random salt. +func Key(password, salt []byte, N, r, p, keyLen int) ([]byte, error) { + if N <= 1 || N&(N-1) != 0 { + return nil, errors.New("scrypt: N must be > 1 and a power of 2") + } + if uint64(r)*uint64(p) >= 1<<30 || r > maxInt/128/p || r > maxInt/256 || N > maxInt/128/r { + return nil, errors.New("scrypt: parameters are too large") + } + + xy := make([]uint32, 64*r) + v := make([]uint32, 32*N*r) + b := pbkdf2.Key(password, salt, 1, p*128*r, sha256.New) + + for i := 0; i < p; i++ { + smix(b[i*128*r:], r, N, v, xy) + } + + return pbkdf2.Key(password, b, 1, keyLen, sha256.New), nil +} diff --git a/vendor/golang.org/x/crypto/sha3/doc.go b/vendor/golang.org/x/crypto/sha3/doc.go new file mode 100644 index 000000000..7e0230907 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/doc.go @@ -0,0 +1,62 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sha3 implements the SHA-3 fixed-output-length hash functions and +// the SHAKE variable-output-length hash functions defined by FIPS-202. +// +// Both types of hash function use the "sponge" construction and the Keccak +// permutation. For a detailed specification see http://keccak.noekeon.org/ +// +// # Guidance +// +// If you aren't sure what function you need, use SHAKE256 with at least 64 +// bytes of output. The SHAKE instances are faster than the SHA3 instances; +// the latter have to allocate memory to conform to the hash.Hash interface. +// +// If you need a secret-key MAC (message authentication code), prepend the +// secret key to the input, hash with SHAKE256 and read at least 32 bytes of +// output. +// +// # Security strengths +// +// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security +// strength against preimage attacks of x bits. Since they only produce "x" +// bits of output, their collision-resistance is only "x/2" bits. +// +// The SHAKE-256 and -128 functions have a generic security strength of 256 and +// 128 bits against all attacks, provided that at least 2x bits of their output +// is used. Requesting more than 64 or 32 bytes of output, respectively, does +// not increase the collision-resistance of the SHAKE functions. +// +// # The sponge construction +// +// A sponge builds a pseudo-random function from a public pseudo-random +// permutation, by applying the permutation to a state of "rate + capacity" +// bytes, but hiding "capacity" of the bytes. +// +// A sponge starts out with a zero state. To hash an input using a sponge, up +// to "rate" bytes of the input are XORed into the sponge's state. The sponge +// is then "full" and the permutation is applied to "empty" it. This process is +// repeated until all the input has been "absorbed". The input is then padded. +// The digest is "squeezed" from the sponge in the same way, except that output +// is copied out instead of input being XORed in. +// +// A sponge is parameterized by its generic security strength, which is equal +// to half its capacity; capacity + rate is equal to the permutation's width. +// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means +// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2. +// +// # Recommendations +// +// The SHAKE functions are recommended for most new uses. They can produce +// output of arbitrary length. SHAKE256, with an output length of at least +// 64 bytes, provides 256-bit security against all attacks. The Keccak team +// recommends it for most applications upgrading from SHA2-512. (NIST chose a +// much stronger, but much slower, sponge instance for SHA3-512.) +// +// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions. +// They produce output of the same length, with the same security strengths +// against all attacks. This means, in particular, that SHA3-256 only has +// 128-bit collision resistance, because its output length is 32 bytes. +package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/hashes.go b/vendor/golang.org/x/crypto/sha3/hashes.go new file mode 100644 index 000000000..c544b29e5 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/hashes.go @@ -0,0 +1,109 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// This file provides functions for creating instances of the SHA-3 +// and SHAKE hash functions, as well as utility functions for hashing +// bytes. + +import ( + "crypto" + "hash" +) + +// New224 creates a new SHA3-224 hash. +// Its generic security strength is 224 bits against preimage attacks, +// and 112 bits against collision attacks. +func New224() hash.Hash { + return new224() +} + +// New256 creates a new SHA3-256 hash. +// Its generic security strength is 256 bits against preimage attacks, +// and 128 bits against collision attacks. +func New256() hash.Hash { + return new256() +} + +// New384 creates a new SHA3-384 hash. +// Its generic security strength is 384 bits against preimage attacks, +// and 192 bits against collision attacks. +func New384() hash.Hash { + return new384() +} + +// New512 creates a new SHA3-512 hash. +// Its generic security strength is 512 bits against preimage attacks, +// and 256 bits against collision attacks. +func New512() hash.Hash { + return new512() +} + +func init() { + crypto.RegisterHash(crypto.SHA3_224, New224) + crypto.RegisterHash(crypto.SHA3_256, New256) + crypto.RegisterHash(crypto.SHA3_384, New384) + crypto.RegisterHash(crypto.SHA3_512, New512) +} + +func new224Generic() *state { + return &state{rate: 144, outputLen: 28, dsbyte: 0x06} +} + +func new256Generic() *state { + return &state{rate: 136, outputLen: 32, dsbyte: 0x06} +} + +func new384Generic() *state { + return &state{rate: 104, outputLen: 48, dsbyte: 0x06} +} + +func new512Generic() *state { + return &state{rate: 72, outputLen: 64, dsbyte: 0x06} +} + +// NewLegacyKeccak256 creates a new Keccak-256 hash. +// +// Only use this function if you require compatibility with an existing cryptosystem +// that uses non-standard padding. All other users should use New256 instead. +func NewLegacyKeccak256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x01} } + +// NewLegacyKeccak512 creates a new Keccak-512 hash. +// +// Only use this function if you require compatibility with an existing cryptosystem +// that uses non-standard padding. All other users should use New512 instead. +func NewLegacyKeccak512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x01} } + +// Sum224 returns the SHA3-224 digest of the data. +func Sum224(data []byte) (digest [28]byte) { + h := New224() + h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum256 returns the SHA3-256 digest of the data. +func Sum256(data []byte) (digest [32]byte) { + h := New256() + h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum384 returns the SHA3-384 digest of the data. +func Sum384(data []byte) (digest [48]byte) { + h := New384() + h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum512 returns the SHA3-512 digest of the data. +func Sum512(data []byte) (digest [64]byte) { + h := New512() + h.Write(data) + h.Sum(digest[:0]) + return +} diff --git a/vendor/golang.org/x/crypto/sha3/hashes_noasm.go b/vendor/golang.org/x/crypto/sha3/hashes_noasm.go new file mode 100644 index 000000000..9d85fb621 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/hashes_noasm.go @@ -0,0 +1,23 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !gc || purego || !s390x + +package sha3 + +func new224() *state { + return new224Generic() +} + +func new256() *state { + return new256Generic() +} + +func new384() *state { + return new384Generic() +} + +func new512() *state { + return new512Generic() +} diff --git a/vendor/golang.org/x/crypto/sha3/keccakf.go b/vendor/golang.org/x/crypto/sha3/keccakf.go new file mode 100644 index 000000000..ce48b1dd3 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/keccakf.go @@ -0,0 +1,414 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || purego || !gc + +package sha3 + +import "math/bits" + +// rc stores the round constants for use in the ι step. +var rc = [24]uint64{ + 0x0000000000000001, + 0x0000000000008082, + 0x800000000000808A, + 0x8000000080008000, + 0x000000000000808B, + 0x0000000080000001, + 0x8000000080008081, + 0x8000000000008009, + 0x000000000000008A, + 0x0000000000000088, + 0x0000000080008009, + 0x000000008000000A, + 0x000000008000808B, + 0x800000000000008B, + 0x8000000000008089, + 0x8000000000008003, + 0x8000000000008002, + 0x8000000000000080, + 0x000000000000800A, + 0x800000008000000A, + 0x8000000080008081, + 0x8000000000008080, + 0x0000000080000001, + 0x8000000080008008, +} + +// keccakF1600 applies the Keccak permutation to a 1600b-wide +// state represented as a slice of 25 uint64s. +func keccakF1600(a *[25]uint64) { + // Implementation translated from Keccak-inplace.c + // in the keccak reference code. + var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64 + + for i := 0; i < 24; i += 4 { + // Combines the 5 steps in each round into 2 steps. + // Unrolls 4 rounds per loop and spreads some steps across rounds. + + // Round 1 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[6] ^ d1 + bc1 = bits.RotateLeft64(t, 44) + t = a[12] ^ d2 + bc2 = bits.RotateLeft64(t, 43) + t = a[18] ^ d3 + bc3 = bits.RotateLeft64(t, 21) + t = a[24] ^ d4 + bc4 = bits.RotateLeft64(t, 14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i] + a[6] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc2 = bits.RotateLeft64(t, 3) + t = a[16] ^ d1 + bc3 = bits.RotateLeft64(t, 45) + t = a[22] ^ d2 + bc4 = bits.RotateLeft64(t, 61) + t = a[3] ^ d3 + bc0 = bits.RotateLeft64(t, 28) + t = a[9] ^ d4 + bc1 = bits.RotateLeft64(t, 20) + a[10] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc4 = bits.RotateLeft64(t, 18) + t = a[1] ^ d1 + bc0 = bits.RotateLeft64(t, 1) + t = a[7] ^ d2 + bc1 = bits.RotateLeft64(t, 6) + t = a[13] ^ d3 + bc2 = bits.RotateLeft64(t, 25) + t = a[19] ^ d4 + bc3 = bits.RotateLeft64(t, 8) + a[20] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc1 = bits.RotateLeft64(t, 36) + t = a[11] ^ d1 + bc2 = bits.RotateLeft64(t, 10) + t = a[17] ^ d2 + bc3 = bits.RotateLeft64(t, 15) + t = a[23] ^ d3 + bc4 = bits.RotateLeft64(t, 56) + t = a[4] ^ d4 + bc0 = bits.RotateLeft64(t, 27) + a[5] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc3 = bits.RotateLeft64(t, 41) + t = a[21] ^ d1 + bc4 = bits.RotateLeft64(t, 2) + t = a[2] ^ d2 + bc0 = bits.RotateLeft64(t, 62) + t = a[8] ^ d3 + bc1 = bits.RotateLeft64(t, 55) + t = a[14] ^ d4 + bc2 = bits.RotateLeft64(t, 39) + a[15] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + // Round 2 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[16] ^ d1 + bc1 = bits.RotateLeft64(t, 44) + t = a[7] ^ d2 + bc2 = bits.RotateLeft64(t, 43) + t = a[23] ^ d3 + bc3 = bits.RotateLeft64(t, 21) + t = a[14] ^ d4 + bc4 = bits.RotateLeft64(t, 14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1] + a[16] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc2 = bits.RotateLeft64(t, 3) + t = a[11] ^ d1 + bc3 = bits.RotateLeft64(t, 45) + t = a[2] ^ d2 + bc4 = bits.RotateLeft64(t, 61) + t = a[18] ^ d3 + bc0 = bits.RotateLeft64(t, 28) + t = a[9] ^ d4 + bc1 = bits.RotateLeft64(t, 20) + a[20] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc4 = bits.RotateLeft64(t, 18) + t = a[6] ^ d1 + bc0 = bits.RotateLeft64(t, 1) + t = a[22] ^ d2 + bc1 = bits.RotateLeft64(t, 6) + t = a[13] ^ d3 + bc2 = bits.RotateLeft64(t, 25) + t = a[4] ^ d4 + bc3 = bits.RotateLeft64(t, 8) + a[15] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc1 = bits.RotateLeft64(t, 36) + t = a[1] ^ d1 + bc2 = bits.RotateLeft64(t, 10) + t = a[17] ^ d2 + bc3 = bits.RotateLeft64(t, 15) + t = a[8] ^ d3 + bc4 = bits.RotateLeft64(t, 56) + t = a[24] ^ d4 + bc0 = bits.RotateLeft64(t, 27) + a[10] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc3 = bits.RotateLeft64(t, 41) + t = a[21] ^ d1 + bc4 = bits.RotateLeft64(t, 2) + t = a[12] ^ d2 + bc0 = bits.RotateLeft64(t, 62) + t = a[3] ^ d3 + bc1 = bits.RotateLeft64(t, 55) + t = a[19] ^ d4 + bc2 = bits.RotateLeft64(t, 39) + a[5] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + // Round 3 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[11] ^ d1 + bc1 = bits.RotateLeft64(t, 44) + t = a[22] ^ d2 + bc2 = bits.RotateLeft64(t, 43) + t = a[8] ^ d3 + bc3 = bits.RotateLeft64(t, 21) + t = a[19] ^ d4 + bc4 = bits.RotateLeft64(t, 14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2] + a[11] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc2 = bits.RotateLeft64(t, 3) + t = a[1] ^ d1 + bc3 = bits.RotateLeft64(t, 45) + t = a[12] ^ d2 + bc4 = bits.RotateLeft64(t, 61) + t = a[23] ^ d3 + bc0 = bits.RotateLeft64(t, 28) + t = a[9] ^ d4 + bc1 = bits.RotateLeft64(t, 20) + a[15] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc4 = bits.RotateLeft64(t, 18) + t = a[16] ^ d1 + bc0 = bits.RotateLeft64(t, 1) + t = a[2] ^ d2 + bc1 = bits.RotateLeft64(t, 6) + t = a[13] ^ d3 + bc2 = bits.RotateLeft64(t, 25) + t = a[24] ^ d4 + bc3 = bits.RotateLeft64(t, 8) + a[5] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc1 = bits.RotateLeft64(t, 36) + t = a[6] ^ d1 + bc2 = bits.RotateLeft64(t, 10) + t = a[17] ^ d2 + bc3 = bits.RotateLeft64(t, 15) + t = a[3] ^ d3 + bc4 = bits.RotateLeft64(t, 56) + t = a[14] ^ d4 + bc0 = bits.RotateLeft64(t, 27) + a[20] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc3 = bits.RotateLeft64(t, 41) + t = a[21] ^ d1 + bc4 = bits.RotateLeft64(t, 2) + t = a[7] ^ d2 + bc0 = bits.RotateLeft64(t, 62) + t = a[18] ^ d3 + bc1 = bits.RotateLeft64(t, 55) + t = a[4] ^ d4 + bc2 = bits.RotateLeft64(t, 39) + a[10] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + // Round 4 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[1] ^ d1 + bc1 = bits.RotateLeft64(t, 44) + t = a[2] ^ d2 + bc2 = bits.RotateLeft64(t, 43) + t = a[3] ^ d3 + bc3 = bits.RotateLeft64(t, 21) + t = a[4] ^ d4 + bc4 = bits.RotateLeft64(t, 14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3] + a[1] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc2 = bits.RotateLeft64(t, 3) + t = a[6] ^ d1 + bc3 = bits.RotateLeft64(t, 45) + t = a[7] ^ d2 + bc4 = bits.RotateLeft64(t, 61) + t = a[8] ^ d3 + bc0 = bits.RotateLeft64(t, 28) + t = a[9] ^ d4 + bc1 = bits.RotateLeft64(t, 20) + a[5] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc4 = bits.RotateLeft64(t, 18) + t = a[11] ^ d1 + bc0 = bits.RotateLeft64(t, 1) + t = a[12] ^ d2 + bc1 = bits.RotateLeft64(t, 6) + t = a[13] ^ d3 + bc2 = bits.RotateLeft64(t, 25) + t = a[14] ^ d4 + bc3 = bits.RotateLeft64(t, 8) + a[10] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc1 = bits.RotateLeft64(t, 36) + t = a[16] ^ d1 + bc2 = bits.RotateLeft64(t, 10) + t = a[17] ^ d2 + bc3 = bits.RotateLeft64(t, 15) + t = a[18] ^ d3 + bc4 = bits.RotateLeft64(t, 56) + t = a[19] ^ d4 + bc0 = bits.RotateLeft64(t, 27) + a[15] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc3 = bits.RotateLeft64(t, 41) + t = a[21] ^ d1 + bc4 = bits.RotateLeft64(t, 2) + t = a[22] ^ d2 + bc0 = bits.RotateLeft64(t, 62) + t = a[23] ^ d3 + bc1 = bits.RotateLeft64(t, 55) + t = a[24] ^ d4 + bc2 = bits.RotateLeft64(t, 39) + a[20] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + } +} diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go new file mode 100644 index 000000000..b908696be --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && !purego && gc + +package sha3 + +// This function is implemented in keccakf_amd64.s. + +//go:noescape + +func keccakF1600(a *[25]uint64) diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s new file mode 100644 index 000000000..99e2f16e9 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s @@ -0,0 +1,5419 @@ +// Code generated by command: go run keccakf_amd64_asm.go -out ../keccakf_amd64.s -pkg sha3. DO NOT EDIT. + +//go:build amd64 && !purego && gc + +// func keccakF1600(a *[25]uint64) +TEXT ·keccakF1600(SB), $200-8 + MOVQ a+0(FP), DI + + // Convert the user state into an internal state + NOTQ 8(DI) + NOTQ 16(DI) + NOTQ 64(DI) + NOTQ 96(DI) + NOTQ 136(DI) + NOTQ 160(DI) + + // Execute the KeccakF permutation + MOVQ (DI), SI + MOVQ 8(DI), BP + MOVQ 32(DI), R15 + XORQ 40(DI), SI + XORQ 48(DI), BP + XORQ 72(DI), R15 + XORQ 80(DI), SI + XORQ 88(DI), BP + XORQ 112(DI), R15 + XORQ 120(DI), SI + XORQ 128(DI), BP + XORQ 152(DI), R15 + XORQ 160(DI), SI + XORQ 168(DI), BP + MOVQ 176(DI), DX + MOVQ 184(DI), R8 + XORQ 192(DI), R15 + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x0000000000000001, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x0000000000008082, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x800000000000808a, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000080008000, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x000000000000808b, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x0000000080000001, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000080008081, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000000008009, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x000000000000008a, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x0000000000000088, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x0000000080008009, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x000000008000000a, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x000000008000808b, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x800000000000008b, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000000008089, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000000008003, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000000008002, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000000000080, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x000000000000800a, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x800000008000000a, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000080008081, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000000008080, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x0000000080000001, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000080008008, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + NOP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + NOP + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + NOP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + NOP + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + NOP + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + NOP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + NOP + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + NOP + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + NOP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + NOP + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + NOP + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + NOP + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + NOP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Revert the internal state to the user state + NOTQ 8(DI) + NOTQ 16(DI) + NOTQ 64(DI) + NOTQ 96(DI) + NOTQ 136(DI) + NOTQ 160(DI) + RET diff --git a/vendor/golang.org/x/crypto/sha3/sha3.go b/vendor/golang.org/x/crypto/sha3/sha3.go new file mode 100644 index 000000000..afedde5ab --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/sha3.go @@ -0,0 +1,185 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// spongeDirection indicates the direction bytes are flowing through the sponge. +type spongeDirection int + +const ( + // spongeAbsorbing indicates that the sponge is absorbing input. + spongeAbsorbing spongeDirection = iota + // spongeSqueezing indicates that the sponge is being squeezed. + spongeSqueezing +) + +const ( + // maxRate is the maximum size of the internal buffer. SHAKE-256 + // currently needs the largest buffer. + maxRate = 168 +) + +type state struct { + // Generic sponge components. + a [25]uint64 // main state of the hash + rate int // the number of bytes of state to use + + // dsbyte contains the "domain separation" bits and the first bit of + // the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the + // SHA-3 and SHAKE functions by appending bitstrings to the message. + // Using a little-endian bit-ordering convention, these are "01" for SHA-3 + // and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the + // padding rule from section 5.1 is applied to pad the message to a multiple + // of the rate, which involves adding a "1" bit, zero or more "0" bits, and + // a final "1" bit. We merge the first "1" bit from the padding into dsbyte, + // giving 00000110b (0x06) and 00011111b (0x1f). + // [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf + // "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and + // Extendable-Output Functions (May 2014)" + dsbyte byte + + i, n int // storage[i:n] is the buffer, i is only used while squeezing + storage [maxRate]byte + + // Specific to SHA-3 and SHAKE. + outputLen int // the default output size in bytes + state spongeDirection // whether the sponge is absorbing or squeezing +} + +// BlockSize returns the rate of sponge underlying this hash function. +func (d *state) BlockSize() int { return d.rate } + +// Size returns the output size of the hash function in bytes. +func (d *state) Size() int { return d.outputLen } + +// Reset clears the internal state by zeroing the sponge state and +// the buffer indexes, and setting Sponge.state to absorbing. +func (d *state) Reset() { + // Zero the permutation's state. + for i := range d.a { + d.a[i] = 0 + } + d.state = spongeAbsorbing + d.i, d.n = 0, 0 +} + +func (d *state) clone() *state { + ret := *d + return &ret +} + +// permute applies the KeccakF-1600 permutation. It handles +// any input-output buffering. +func (d *state) permute() { + switch d.state { + case spongeAbsorbing: + // If we're absorbing, we need to xor the input into the state + // before applying the permutation. + xorIn(d, d.storage[:d.rate]) + d.n = 0 + keccakF1600(&d.a) + case spongeSqueezing: + // If we're squeezing, we need to apply the permutation before + // copying more output. + keccakF1600(&d.a) + d.i = 0 + copyOut(d, d.storage[:d.rate]) + } +} + +// pads appends the domain separation bits in dsbyte, applies +// the multi-bitrate 10..1 padding rule, and permutes the state. +func (d *state) padAndPermute() { + // Pad with this instance's domain-separator bits. We know that there's + // at least one byte of space in d.buf because, if it were full, + // permute would have been called to empty it. dsbyte also contains the + // first one bit for the padding. See the comment in the state struct. + d.storage[d.n] = d.dsbyte + d.n++ + for d.n < d.rate { + d.storage[d.n] = 0 + d.n++ + } + // This adds the final one bit for the padding. Because of the way that + // bits are numbered from the LSB upwards, the final bit is the MSB of + // the last byte. + d.storage[d.rate-1] ^= 0x80 + // Apply the permutation + d.permute() + d.state = spongeSqueezing + d.n = d.rate + copyOut(d, d.storage[:d.rate]) +} + +// Write absorbs more data into the hash's state. It panics if any +// output has already been read. +func (d *state) Write(p []byte) (written int, err error) { + if d.state != spongeAbsorbing { + panic("sha3: Write after Read") + } + written = len(p) + + for len(p) > 0 { + if d.n == 0 && len(p) >= d.rate { + // The fast path; absorb a full "rate" bytes of input and apply the permutation. + xorIn(d, p[:d.rate]) + p = p[d.rate:] + keccakF1600(&d.a) + } else { + // The slow path; buffer the input until we can fill the sponge, and then xor it in. + todo := d.rate - d.n + if todo > len(p) { + todo = len(p) + } + d.n += copy(d.storage[d.n:], p[:todo]) + p = p[todo:] + + // If the sponge is full, apply the permutation. + if d.n == d.rate { + d.permute() + } + } + } + + return +} + +// Read squeezes an arbitrary number of bytes from the sponge. +func (d *state) Read(out []byte) (n int, err error) { + // If we're still absorbing, pad and apply the permutation. + if d.state == spongeAbsorbing { + d.padAndPermute() + } + + n = len(out) + + // Now, do the squeezing. + for len(out) > 0 { + n := copy(out, d.storage[d.i:d.n]) + d.i += n + out = out[n:] + + // Apply the permutation if we've squeezed the sponge dry. + if d.i == d.rate { + d.permute() + } + } + + return +} + +// Sum applies padding to the hash state and then squeezes out the desired +// number of output bytes. It panics if any output has already been read. +func (d *state) Sum(in []byte) []byte { + if d.state != spongeAbsorbing { + panic("sha3: Sum after Read") + } + + // Make a copy of the original hash so that caller can keep writing + // and summing. + dup := d.clone() + hash := make([]byte, dup.outputLen, 64) // explicit cap to allow stack allocation + dup.Read(hash) + return append(in, hash...) +} diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go new file mode 100644 index 000000000..00d8034ae --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go @@ -0,0 +1,303 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +package sha3 + +// This file contains code for using the 'compute intermediate +// message digest' (KIMD) and 'compute last message digest' (KLMD) +// instructions to compute SHA-3 and SHAKE hashes on IBM Z. + +import ( + "hash" + + "golang.org/x/sys/cpu" +) + +// codes represent 7-bit KIMD/KLMD function codes as defined in +// the Principles of Operation. +type code uint64 + +const ( + // function codes for KIMD/KLMD + sha3_224 code = 32 + sha3_256 = 33 + sha3_384 = 34 + sha3_512 = 35 + shake_128 = 36 + shake_256 = 37 + nopad = 0x100 +) + +// kimd is a wrapper for the 'compute intermediate message digest' instruction. +// src must be a multiple of the rate for the given function code. +// +//go:noescape +func kimd(function code, chain *[200]byte, src []byte) + +// klmd is a wrapper for the 'compute last message digest' instruction. +// src padding is handled by the instruction. +// +//go:noescape +func klmd(function code, chain *[200]byte, dst, src []byte) + +type asmState struct { + a [200]byte // 1600 bit state + buf []byte // care must be taken to ensure cap(buf) is a multiple of rate + rate int // equivalent to block size + storage [3072]byte // underlying storage for buf + outputLen int // output length for full security + function code // KIMD/KLMD function code + state spongeDirection // whether the sponge is absorbing or squeezing +} + +func newAsmState(function code) *asmState { + var s asmState + s.function = function + switch function { + case sha3_224: + s.rate = 144 + s.outputLen = 28 + case sha3_256: + s.rate = 136 + s.outputLen = 32 + case sha3_384: + s.rate = 104 + s.outputLen = 48 + case sha3_512: + s.rate = 72 + s.outputLen = 64 + case shake_128: + s.rate = 168 + s.outputLen = 32 + case shake_256: + s.rate = 136 + s.outputLen = 64 + default: + panic("sha3: unrecognized function code") + } + + // limit s.buf size to a multiple of s.rate + s.resetBuf() + return &s +} + +func (s *asmState) clone() *asmState { + c := *s + c.buf = c.storage[:len(s.buf):cap(s.buf)] + return &c +} + +// copyIntoBuf copies b into buf. It will panic if there is not enough space to +// store all of b. +func (s *asmState) copyIntoBuf(b []byte) { + bufLen := len(s.buf) + s.buf = s.buf[:len(s.buf)+len(b)] + copy(s.buf[bufLen:], b) +} + +// resetBuf points buf at storage, sets the length to 0 and sets cap to be a +// multiple of the rate. +func (s *asmState) resetBuf() { + max := (cap(s.storage) / s.rate) * s.rate + s.buf = s.storage[:0:max] +} + +// Write (via the embedded io.Writer interface) adds more data to the running hash. +// It never returns an error. +func (s *asmState) Write(b []byte) (int, error) { + if s.state != spongeAbsorbing { + panic("sha3: Write after Read") + } + length := len(b) + for len(b) > 0 { + if len(s.buf) == 0 && len(b) >= cap(s.buf) { + // Hash the data directly and push any remaining bytes + // into the buffer. + remainder := len(b) % s.rate + kimd(s.function, &s.a, b[:len(b)-remainder]) + if remainder != 0 { + s.copyIntoBuf(b[len(b)-remainder:]) + } + return length, nil + } + + if len(s.buf) == cap(s.buf) { + // flush the buffer + kimd(s.function, &s.a, s.buf) + s.buf = s.buf[:0] + } + + // copy as much as we can into the buffer + n := len(b) + if len(b) > cap(s.buf)-len(s.buf) { + n = cap(s.buf) - len(s.buf) + } + s.copyIntoBuf(b[:n]) + b = b[n:] + } + return length, nil +} + +// Read squeezes an arbitrary number of bytes from the sponge. +func (s *asmState) Read(out []byte) (n int, err error) { + // The 'compute last message digest' instruction only stores the digest + // at the first operand (dst) for SHAKE functions. + if s.function != shake_128 && s.function != shake_256 { + panic("sha3: can only call Read for SHAKE functions") + } + + n = len(out) + + // need to pad if we were absorbing + if s.state == spongeAbsorbing { + s.state = spongeSqueezing + + // write hash directly into out if possible + if len(out)%s.rate == 0 { + klmd(s.function, &s.a, out, s.buf) // len(out) may be 0 + s.buf = s.buf[:0] + return + } + + // write hash into buffer + max := cap(s.buf) + if max > len(out) { + max = (len(out)/s.rate)*s.rate + s.rate + } + klmd(s.function, &s.a, s.buf[:max], s.buf) + s.buf = s.buf[:max] + } + + for len(out) > 0 { + // flush the buffer + if len(s.buf) != 0 { + c := copy(out, s.buf) + out = out[c:] + s.buf = s.buf[c:] + continue + } + + // write hash directly into out if possible + if len(out)%s.rate == 0 { + klmd(s.function|nopad, &s.a, out, nil) + return + } + + // write hash into buffer + s.resetBuf() + if cap(s.buf) > len(out) { + s.buf = s.buf[:(len(out)/s.rate)*s.rate+s.rate] + } + klmd(s.function|nopad, &s.a, s.buf, nil) + } + return +} + +// Sum appends the current hash to b and returns the resulting slice. +// It does not change the underlying hash state. +func (s *asmState) Sum(b []byte) []byte { + if s.state != spongeAbsorbing { + panic("sha3: Sum after Read") + } + + // Copy the state to preserve the original. + a := s.a + + // Hash the buffer. Note that we don't clear it because we + // aren't updating the state. + switch s.function { + case sha3_224, sha3_256, sha3_384, sha3_512: + klmd(s.function, &a, nil, s.buf) + return append(b, a[:s.outputLen]...) + case shake_128, shake_256: + d := make([]byte, s.outputLen, 64) + klmd(s.function, &a, d, s.buf) + return append(b, d[:s.outputLen]...) + default: + panic("sha3: unknown function") + } +} + +// Reset resets the Hash to its initial state. +func (s *asmState) Reset() { + for i := range s.a { + s.a[i] = 0 + } + s.resetBuf() + s.state = spongeAbsorbing +} + +// Size returns the number of bytes Sum will return. +func (s *asmState) Size() int { + return s.outputLen +} + +// BlockSize returns the hash's underlying block size. +// The Write method must be able to accept any amount +// of data, but it may operate more efficiently if all writes +// are a multiple of the block size. +func (s *asmState) BlockSize() int { + return s.rate +} + +// Clone returns a copy of the ShakeHash in its current state. +func (s *asmState) Clone() ShakeHash { + return s.clone() +} + +// new224 returns an assembly implementation of SHA3-224 if available, +// otherwise it returns a generic implementation. +func new224() hash.Hash { + if cpu.S390X.HasSHA3 { + return newAsmState(sha3_224) + } + return new224Generic() +} + +// new256 returns an assembly implementation of SHA3-256 if available, +// otherwise it returns a generic implementation. +func new256() hash.Hash { + if cpu.S390X.HasSHA3 { + return newAsmState(sha3_256) + } + return new256Generic() +} + +// new384 returns an assembly implementation of SHA3-384 if available, +// otherwise it returns a generic implementation. +func new384() hash.Hash { + if cpu.S390X.HasSHA3 { + return newAsmState(sha3_384) + } + return new384Generic() +} + +// new512 returns an assembly implementation of SHA3-512 if available, +// otherwise it returns a generic implementation. +func new512() hash.Hash { + if cpu.S390X.HasSHA3 { + return newAsmState(sha3_512) + } + return new512Generic() +} + +// newShake128 returns an assembly implementation of SHAKE-128 if available, +// otherwise it returns a generic implementation. +func newShake128() ShakeHash { + if cpu.S390X.HasSHA3 { + return newAsmState(shake_128) + } + return newShake128Generic() +} + +// newShake256 returns an assembly implementation of SHAKE-256 if available, +// otherwise it returns a generic implementation. +func newShake256() ShakeHash { + if cpu.S390X.HasSHA3 { + return newAsmState(shake_256) + } + return newShake256Generic() +} diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.s b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s new file mode 100644 index 000000000..826b862c7 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +#include "textflag.h" + +// func kimd(function code, chain *[200]byte, src []byte) +TEXT ·kimd(SB), NOFRAME|NOSPLIT, $0-40 + MOVD function+0(FP), R0 + MOVD chain+8(FP), R1 + LMG src+16(FP), R2, R3 // R2=base, R3=len + +continue: + WORD $0xB93E0002 // KIMD --, R2 + BVS continue // continue if interrupted + MOVD $0, R0 // reset R0 for pre-go1.8 compilers + RET + +// func klmd(function code, chain *[200]byte, dst, src []byte) +TEXT ·klmd(SB), NOFRAME|NOSPLIT, $0-64 + // TODO: SHAKE support + MOVD function+0(FP), R0 + MOVD chain+8(FP), R1 + LMG dst+16(FP), R2, R3 // R2=base, R3=len + LMG src+40(FP), R4, R5 // R4=base, R5=len + +continue: + WORD $0xB93F0024 // KLMD R2, R4 + BVS continue // continue if interrupted + MOVD $0, R0 // reset R0 for pre-go1.8 compilers + RET diff --git a/vendor/golang.org/x/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go new file mode 100644 index 000000000..1ea9275b8 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/shake.go @@ -0,0 +1,174 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// This file defines the ShakeHash interface, and provides +// functions for creating SHAKE and cSHAKE instances, as well as utility +// functions for hashing bytes to arbitrary-length output. +// +// +// SHAKE implementation is based on FIPS PUB 202 [1] +// cSHAKE implementations is based on NIST SP 800-185 [2] +// +// [1] https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf +// [2] https://doi.org/10.6028/NIST.SP.800-185 + +import ( + "encoding/binary" + "hash" + "io" +) + +// ShakeHash defines the interface to hash functions that support +// arbitrary-length output. When used as a plain [hash.Hash], it +// produces minimum-length outputs that provide full-strength generic +// security. +type ShakeHash interface { + hash.Hash + + // Read reads more output from the hash; reading affects the hash's + // state. (ShakeHash.Read is thus very different from Hash.Sum) + // It never returns an error, but subsequent calls to Write or Sum + // will panic. + io.Reader + + // Clone returns a copy of the ShakeHash in its current state. + Clone() ShakeHash +} + +// cSHAKE specific context +type cshakeState struct { + *state // SHA-3 state context and Read/Write operations + + // initBlock is the cSHAKE specific initialization set of bytes. It is initialized + // by newCShake function and stores concatenation of N followed by S, encoded + // by the method specified in 3.3 of [1]. + // It is stored here in order for Reset() to be able to put context into + // initial state. + initBlock []byte +} + +// Consts for configuring initial SHA-3 state +const ( + dsbyteShake = 0x1f + dsbyteCShake = 0x04 + rate128 = 168 + rate256 = 136 +) + +func bytepad(input []byte, w int) []byte { + // leftEncode always returns max 9 bytes + buf := make([]byte, 0, 9+len(input)+w) + buf = append(buf, leftEncode(uint64(w))...) + buf = append(buf, input...) + padlen := w - (len(buf) % w) + return append(buf, make([]byte, padlen)...) +} + +func leftEncode(value uint64) []byte { + var b [9]byte + binary.BigEndian.PutUint64(b[1:], value) + // Trim all but last leading zero bytes + i := byte(1) + for i < 8 && b[i] == 0 { + i++ + } + // Prepend number of encoded bytes + b[i-1] = 9 - i + return b[i-1:] +} + +func newCShake(N, S []byte, rate, outputLen int, dsbyte byte) ShakeHash { + c := cshakeState{state: &state{rate: rate, outputLen: outputLen, dsbyte: dsbyte}} + + // leftEncode returns max 9 bytes + c.initBlock = make([]byte, 0, 9*2+len(N)+len(S)) + c.initBlock = append(c.initBlock, leftEncode(uint64(len(N)*8))...) + c.initBlock = append(c.initBlock, N...) + c.initBlock = append(c.initBlock, leftEncode(uint64(len(S)*8))...) + c.initBlock = append(c.initBlock, S...) + c.Write(bytepad(c.initBlock, c.rate)) + return &c +} + +// Reset resets the hash to initial state. +func (c *cshakeState) Reset() { + c.state.Reset() + c.Write(bytepad(c.initBlock, c.rate)) +} + +// Clone returns copy of a cSHAKE context within its current state. +func (c *cshakeState) Clone() ShakeHash { + b := make([]byte, len(c.initBlock)) + copy(b, c.initBlock) + return &cshakeState{state: c.clone(), initBlock: b} +} + +// Clone returns copy of SHAKE context within its current state. +func (c *state) Clone() ShakeHash { + return c.clone() +} + +// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash. +// Its generic security strength is 128 bits against all attacks if at +// least 32 bytes of its output are used. +func NewShake128() ShakeHash { + return newShake128() +} + +// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash. +// Its generic security strength is 256 bits against all attacks if +// at least 64 bytes of its output are used. +func NewShake256() ShakeHash { + return newShake256() +} + +func newShake128Generic() *state { + return &state{rate: rate128, outputLen: 32, dsbyte: dsbyteShake} +} + +func newShake256Generic() *state { + return &state{rate: rate256, outputLen: 64, dsbyte: dsbyteShake} +} + +// NewCShake128 creates a new instance of cSHAKE128 variable-output-length ShakeHash, +// a customizable variant of SHAKE128. +// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is +// desired. S is a customization byte string used for domain separation - two cSHAKE +// computations on same input with different S yield unrelated outputs. +// When N and S are both empty, this is equivalent to NewShake128. +func NewCShake128(N, S []byte) ShakeHash { + if len(N) == 0 && len(S) == 0 { + return NewShake128() + } + return newCShake(N, S, rate128, 32, dsbyteCShake) +} + +// NewCShake256 creates a new instance of cSHAKE256 variable-output-length ShakeHash, +// a customizable variant of SHAKE256. +// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is +// desired. S is a customization byte string used for domain separation - two cSHAKE +// computations on same input with different S yield unrelated outputs. +// When N and S are both empty, this is equivalent to NewShake256. +func NewCShake256(N, S []byte) ShakeHash { + if len(N) == 0 && len(S) == 0 { + return NewShake256() + } + return newCShake(N, S, rate256, 64, dsbyteCShake) +} + +// ShakeSum128 writes an arbitrary-length digest of data into hash. +func ShakeSum128(hash, data []byte) { + h := NewShake128() + h.Write(data) + h.Read(hash) +} + +// ShakeSum256 writes an arbitrary-length digest of data into hash. +func ShakeSum256(hash, data []byte) { + h := NewShake256() + h.Write(data) + h.Read(hash) +} diff --git a/vendor/golang.org/x/crypto/sha3/shake_noasm.go b/vendor/golang.org/x/crypto/sha3/shake_noasm.go new file mode 100644 index 000000000..4276ba4ab --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/shake_noasm.go @@ -0,0 +1,15 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !gc || purego || !s390x + +package sha3 + +func newShake128() *state { + return newShake128Generic() +} + +func newShake256() *state { + return newShake256Generic() +} diff --git a/vendor/golang.org/x/crypto/sha3/xor.go b/vendor/golang.org/x/crypto/sha3/xor.go new file mode 100644 index 000000000..6ada5c957 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/xor.go @@ -0,0 +1,40 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +import ( + "crypto/subtle" + "encoding/binary" + "unsafe" + + "golang.org/x/sys/cpu" +) + +// xorIn xors the bytes in buf into the state. +func xorIn(d *state, buf []byte) { + if cpu.IsBigEndian { + for i := 0; len(buf) >= 8; i++ { + a := binary.LittleEndian.Uint64(buf) + d.a[i] ^= a + buf = buf[8:] + } + } else { + ab := (*[25 * 64 / 8]byte)(unsafe.Pointer(&d.a)) + subtle.XORBytes(ab[:], ab[:], buf) + } +} + +// copyOut copies uint64s to a byte buffer. +func copyOut(d *state, b []byte) { + if cpu.IsBigEndian { + for i := 0; len(b) >= 8; i++ { + binary.LittleEndian.PutUint64(b, d.a[i]) + b = b[8:] + } + } else { + ab := (*[25 * 64 / 8]byte)(unsafe.Pointer(&d.a)) + copy(b, ab[:]) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go deleted file mode 100644 index a4d1919a9..000000000 --- a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package terminal provides support functions for dealing with terminals, as -// commonly found on UNIX systems. -// -// Deprecated: this package moved to golang.org/x/term. -package terminal - -import ( - "io" - - "golang.org/x/term" -) - -// EscapeCodes contains escape sequences that can be written to the terminal in -// order to achieve different styles of text. -type EscapeCodes = term.EscapeCodes - -// Terminal contains the state for running a VT100 terminal that is capable of -// reading lines of input. -type Terminal = term.Terminal - -// NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is -// a local terminal, that terminal must first have been put into raw mode. -// prompt is a string that is written at the start of each input line (i.e. -// "> "). -func NewTerminal(c io.ReadWriter, prompt string) *Terminal { - return term.NewTerminal(c, prompt) -} - -// ErrPasteIndicator may be returned from ReadLine as the error, in addition -// to valid line data. It indicates that bracketed paste mode is enabled and -// that the returned line consists only of pasted data. Programs may wish to -// interpret pasted data more literally than typed data. -var ErrPasteIndicator = term.ErrPasteIndicator - -// State contains the state of a terminal. -type State = term.State - -// IsTerminal returns whether the given file descriptor is a terminal. -func IsTerminal(fd int) bool { - return term.IsTerminal(fd) -} - -// ReadPassword reads a line of input from a terminal without local echo. This -// is commonly used for inputting passwords and other sensitive data. The slice -// returned does not include the \n. -func ReadPassword(fd int) ([]byte, error) { - return term.ReadPassword(fd) -} - -// MakeRaw puts the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd int) (*State, error) { - return term.MakeRaw(fd) -} - -// Restore restores the terminal connected to the given file descriptor to a -// previous state. -func Restore(fd int, oldState *State) error { - return term.Restore(fd, oldState) -} - -// GetState returns the current state of a terminal which may be useful to -// restore the terminal after a signal. -func GetState(fd int) (*State, error) { - return term.GetState(fd) -} - -// GetSize returns the dimensions of the given terminal. -func GetSize(fd int) (width, height int, err error) { - return term.GetSize(fd) -} diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE index 6a66aea5e..2a7cf70da 100644 --- a/vendor/golang.org/x/net/LICENSE +++ b/vendor/golang.org/x/net/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go deleted file mode 100644 index 37dc0cfdb..000000000 --- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ctxhttp provides helper functions for performing context-aware HTTP requests. -package ctxhttp // import "golang.org/x/net/context/ctxhttp" - -import ( - "context" - "io" - "net/http" - "net/url" - "strings" -) - -// Do sends an HTTP request with the provided http.Client and returns -// an HTTP response. -// -// If the client is nil, http.DefaultClient is used. -// -// The provided ctx must be non-nil. If it is canceled or times out, -// ctx.Err() will be returned. -func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { - if client == nil { - client = http.DefaultClient - } - resp, err := client.Do(req.WithContext(ctx)) - // If we got an error, and the context has been canceled, - // the context's error is probably more useful. - if err != nil { - select { - case <-ctx.Done(): - err = ctx.Err() - default: - } - } - return resp, err -} - -// Get issues a GET request via the Do function. -func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Head issues a HEAD request via the Do function. -func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("HEAD", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Post issues a POST request via the Do function. -func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { - req, err := http.NewRequest("POST", url, body) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", bodyType) - return Do(ctx, client, req) -} - -// PostForm issues a POST request via the Do function. -func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { - return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) -} diff --git a/vendor/golang.org/x/net/http/httpguts/httplex.go b/vendor/golang.org/x/net/http/httpguts/httplex.go index 6e071e852..9b4de9401 100644 --- a/vendor/golang.org/x/net/http/httpguts/httplex.go +++ b/vendor/golang.org/x/net/http/httpguts/httplex.go @@ -12,7 +12,7 @@ import ( "golang.org/x/net/idna" ) -var isTokenTable = [127]bool{ +var isTokenTable = [256]bool{ '!': true, '#': true, '$': true, @@ -93,12 +93,7 @@ var isTokenTable = [127]bool{ } func IsTokenRune(r rune) bool { - i := int(r) - return i < len(isTokenTable) && isTokenTable[i] -} - -func isNotToken(r rune) bool { - return !IsTokenRune(r) + return r < utf8.RuneSelf && isTokenTable[byte(r)] } // HeaderValuesContainsToken reports whether any string in values @@ -202,8 +197,8 @@ func ValidHeaderFieldName(v string) bool { if len(v) == 0 { return false } - for _, r := range v { - if !IsTokenRune(r) { + for i := 0; i < len(v); i++ { + if !isTokenTable[v[i]] { return false } } diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 43557ab7e..105c3b279 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -490,6 +490,9 @@ func terminalReadFrameError(err error) bool { // returned error is ErrFrameTooLarge. Other errors may be of type // ConnectionError, StreamError, or anything else from the underlying // reader. +// +// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID +// indicates the stream responsible for the error. func (fr *Framer) ReadFrame() (Frame, error) { fr.errDetail = nil if fr.lastFrame != nil { @@ -1521,7 +1524,7 @@ func (fr *Framer) maxHeaderStringLen() int { // readMetaFrame returns 0 or more CONTINUATION frames from fr and // merge them into the provided hf and returns a MetaHeadersFrame // with the decoded hpack values. -func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { +func (fr *Framer) readMetaFrame(hf *HeadersFrame) (Frame, error) { if fr.AllowIllegalReads { return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") } @@ -1592,7 +1595,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { } // It would be nice to send a RST_STREAM before sending the GOAWAY, // but the structure of the server's frame writer makes this difficult. - return nil, ConnectionError(ErrCodeProtocol) + return mh, ConnectionError(ErrCodeProtocol) } // Also close the connection after any CONTINUATION frame following an @@ -1604,11 +1607,11 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { } // It would be nice to send a RST_STREAM before sending the GOAWAY, // but the structure of the server's frame writer makes this difficult. - return nil, ConnectionError(ErrCodeProtocol) + return mh, ConnectionError(ErrCodeProtocol) } if _, err := hdec.Write(frag); err != nil { - return nil, ConnectionError(ErrCodeCompression) + return mh, ConnectionError(ErrCodeCompression) } if hc.HeadersEnded() { @@ -1625,7 +1628,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { mh.HeadersFrame.invalidate() if err := hdec.Close(); err != nil { - return nil, ConnectionError(ErrCodeCompression) + return mh, ConnectionError(ErrCodeCompression) } if invalid != nil { fr.errDetail = invalid diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 6f2df2818..003e649f3 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -17,6 +17,7 @@ package http2 // import "golang.org/x/net/http2" import ( "bufio" + "context" "crypto/tls" "fmt" "io" @@ -26,6 +27,7 @@ import ( "strconv" "strings" "sync" + "time" "golang.org/x/net/http/httpguts" ) @@ -210,12 +212,6 @@ type stringWriter interface { WriteString(s string) (n int, err error) } -// A gate lets two goroutines coordinate their activities. -type gate chan struct{} - -func (g gate) Done() { g <- struct{}{} } -func (g gate) Wait() { <-g } - // A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). type closeWaiter chan struct{} @@ -383,3 +379,14 @@ func validPseudoPath(v string) bool { // makes that struct also non-comparable, and generally doesn't add // any size (as long as it's first). type incomparable [0]func() + +// synctestGroupInterface is the methods of synctestGroup used by Server and Transport. +// It's defined as an interface here to let us keep synctestGroup entirely test-only +// and not a part of non-test builds. +type synctestGroupInterface interface { + Join() + Now() time.Time + NewTimer(d time.Duration) timer + AfterFunc(d time.Duration, f func()) timer + ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) +} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index ce2e8b40e..6c349f3ec 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -154,6 +154,39 @@ type Server struct { // so that we don't embed a Mutex in this struct, which will make the // struct non-copyable, which might break some callers. state *serverInternalState + + // Synchronization group used for testing. + // Outside of tests, this is nil. + group synctestGroupInterface +} + +func (s *Server) markNewGoroutine() { + if s.group != nil { + s.group.Join() + } +} + +func (s *Server) now() time.Time { + if s.group != nil { + return s.group.Now() + } + return time.Now() +} + +// newTimer creates a new time.Timer, or a synthetic timer in tests. +func (s *Server) newTimer(d time.Duration) timer { + if s.group != nil { + return s.group.NewTimer(d) + } + return timeTimer{time.NewTimer(d)} +} + +// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. +func (s *Server) afterFunc(d time.Duration, f func()) timer { + if s.group != nil { + return s.group.AfterFunc(d, f) + } + return timeTimer{time.AfterFunc(d, f)} } func (s *Server) initialConnRecvWindowSize() int32 { @@ -400,6 +433,10 @@ func (o *ServeConnOpts) handler() http.Handler { // // The opts parameter is optional. If nil, default values are used. func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + s.serveConn(c, opts, nil) +} + +func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverConn)) { baseCtx, cancel := serverConnBaseContext(c, opts) defer cancel() @@ -426,6 +463,9 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { pushEnabled: true, sawClientPreface: opts.SawClientPreface, } + if newf != nil { + newf(sc) + } s.state.registerConn(sc) defer s.state.unregisterConn(sc) @@ -599,8 +639,8 @@ type serverConn struct { inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop needToSendGoAway bool // we need to schedule a GOAWAY frame write goAwayCode ErrCode - shutdownTimer *time.Timer // nil until used - idleTimer *time.Timer // nil if unused + shutdownTimer timer // nil until used + idleTimer timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer @@ -649,12 +689,12 @@ type stream struct { flow outflow // limits writing from Handler to client inflow inflow // what the client is allowed to POST/etc to us state streamState - resetQueued bool // RST_STREAM queued for write; set by sc.resetStream - gotTrailerHeader bool // HEADER frame for trailers was seen - wroteHeaders bool // whether we wrote headers (not status 100) - readDeadline *time.Timer // nil if unused - writeDeadline *time.Timer // nil if unused - closeErr error // set before cw is closed + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + readDeadline timer // nil if unused + writeDeadline timer // nil if unused + closeErr error // set before cw is closed trailer http.Header // accumulated trailers reqTrailer http.Header // handler's Request.Trailer @@ -732,11 +772,7 @@ func isClosedConnError(err error) bool { return false } - // TODO: remove this string search and be more like the Windows - // case below. That might involve modifying the standard library - // to return better error types. - str := err.Error() - if strings.Contains(str, "use of closed network connection") { + if errors.Is(err, net.ErrClosed) { return true } @@ -815,8 +851,9 @@ type readFrameResult struct { // consumer is done with the frame. // It's run on its own goroutine. func (sc *serverConn) readFrames() { - gate := make(gate) - gateDone := gate.Done + sc.srv.markNewGoroutine() + gate := make(chan struct{}) + gateDone := func() { gate <- struct{}{} } for { f, err := sc.framer.ReadFrame() select { @@ -847,6 +884,7 @@ type frameWriteResult struct { // At most one goroutine can be running writeFrameAsync at a time per // serverConn. func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) { + sc.srv.markNewGoroutine() var err error if wd == nil { err = wr.write.writeFrame(sc) @@ -926,13 +964,13 @@ func (sc *serverConn) serve() { sc.setConnState(http.StateIdle) if sc.srv.IdleTimeout > 0 { - sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) + sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) defer sc.idleTimer.Stop() } go sc.readFrames() // closed by defer sc.conn.Close above - settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) + settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer) defer settingsTimer.Stop() loopNum := 0 @@ -1061,10 +1099,10 @@ func (sc *serverConn) readPreface() error { errc <- nil } }() - timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? + timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server? defer timer.Stop() select { - case <-timer.C: + case <-timer.C(): return errPrefaceTimeout case err := <-errc: if err == nil { @@ -1429,7 +1467,7 @@ func (sc *serverConn) goAway(code ErrCode) { func (sc *serverConn) shutDownIn(d time.Duration) { sc.serveG.check() - sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) + sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer) } func (sc *serverConn) resetStream(se StreamError) { @@ -1482,6 +1520,11 @@ func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { sc.goAway(ErrCodeFlowControl) return true case ConnectionError: + if res.f != nil { + if id := res.f.Header().StreamID; id > sc.maxClientStreamID { + sc.maxClientStreamID = id + } + } sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) sc.goAway(ErrCode(ev)) return true // goAway will handle shutdown @@ -1638,7 +1681,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { delete(sc.streams, st.id) if len(sc.streams) == 0 { sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout > 0 { + if sc.srv.IdleTimeout > 0 && sc.idleTimer != nil { sc.idleTimer.Reset(sc.srv.IdleTimeout) } if h1ServerKeepAlivesDisabled(sc.hs) { @@ -1660,6 +1703,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { } } st.closeErr = err + st.cancelCtx() st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc sc.writeSched.CloseStream(st.id) } @@ -2020,7 +2064,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // (in Go 1.8), though. That's a more sane option anyway. if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) - st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) + st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout) } return sc.scheduleHandler(id, rw, req, handler) @@ -2118,7 +2162,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.flow.add(sc.initialStreamSendWindowSize) st.inflow.init(sc.srv.initialStreamRecvWindowSize()) if sc.hs.WriteTimeout > 0 { - st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) + st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } sc.streams[id] = st @@ -2342,6 +2386,7 @@ func (sc *serverConn) handlerDone() { // Run on its own goroutine. func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { + sc.srv.markNewGoroutine() defer sc.sendServeMsg(handlerDoneMsg) didPanic := true defer func() { @@ -2638,7 +2683,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { var date string if _, ok := rws.snapHeader["Date"]; !ok { // TODO(bradfitz): be faster here, like net/http? measure. - date = time.Now().UTC().Format(http.TimeFormat) + date = rws.conn.srv.now().UTC().Format(http.TimeFormat) } for _, v := range rws.snapHeader["Trailer"] { @@ -2760,7 +2805,7 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() { func (w *responseWriter) SetReadDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(time.Now()) { + if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onReadTimeout() @@ -2776,9 +2821,9 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { if deadline.IsZero() { st.readDeadline = nil } else if st.readDeadline == nil { - st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout) + st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout) } else { - st.readDeadline.Reset(deadline.Sub(time.Now())) + st.readDeadline.Reset(deadline.Sub(sc.srv.now())) } }) return nil @@ -2786,7 +2831,7 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(time.Now()) { + if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onWriteTimeout() @@ -2802,9 +2847,9 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { if deadline.IsZero() { st.writeDeadline = nil } else if st.writeDeadline == nil { - st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout) + st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout) } else { - st.writeDeadline.Reset(deadline.Sub(time.Now())) + st.writeDeadline.Reset(deadline.Sub(sc.srv.now())) } }) return nil diff --git a/vendor/golang.org/x/net/http2/testsync.go b/vendor/golang.org/x/net/http2/testsync.go deleted file mode 100644 index 61075bd16..000000000 --- a/vendor/golang.org/x/net/http2/testsync.go +++ /dev/null @@ -1,331 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -package http2 - -import ( - "context" - "sync" - "time" -) - -// testSyncHooks coordinates goroutines in tests. -// -// For example, a call to ClientConn.RoundTrip involves several goroutines, including: -// - the goroutine running RoundTrip; -// - the clientStream.doRequest goroutine, which writes the request; and -// - the clientStream.readLoop goroutine, which reads the response. -// -// Using testSyncHooks, a test can start a RoundTrip and identify when all these goroutines -// are blocked waiting for some condition such as reading the Request.Body or waiting for -// flow control to become available. -// -// The testSyncHooks also manage timers and synthetic time in tests. -// This permits us to, for example, start a request and cause it to time out waiting for -// response headers without resorting to time.Sleep calls. -type testSyncHooks struct { - // active/inactive act as a mutex and condition variable. - // - // - neither chan contains a value: testSyncHooks is locked. - // - active contains a value: unlocked, and at least one goroutine is not blocked - // - inactive contains a value: unlocked, and all goroutines are blocked - active chan struct{} - inactive chan struct{} - - // goroutine counts - total int // total goroutines - condwait map[*sync.Cond]int // blocked in sync.Cond.Wait - blocked []*testBlockedGoroutine // otherwise blocked - - // fake time - now time.Time - timers []*fakeTimer - - // Transport testing: Report various events. - newclientconn func(*ClientConn) - newstream func(*clientStream) -} - -// testBlockedGoroutine is a blocked goroutine. -type testBlockedGoroutine struct { - f func() bool // blocked until f returns true - ch chan struct{} // closed when unblocked -} - -func newTestSyncHooks() *testSyncHooks { - h := &testSyncHooks{ - active: make(chan struct{}, 1), - inactive: make(chan struct{}, 1), - condwait: map[*sync.Cond]int{}, - } - h.inactive <- struct{}{} - h.now = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) - return h -} - -// lock acquires the testSyncHooks mutex. -func (h *testSyncHooks) lock() { - select { - case <-h.active: - case <-h.inactive: - } -} - -// waitInactive waits for all goroutines to become inactive. -func (h *testSyncHooks) waitInactive() { - for { - <-h.inactive - if !h.unlock() { - break - } - } -} - -// unlock releases the testSyncHooks mutex. -// It reports whether any goroutines are active. -func (h *testSyncHooks) unlock() (active bool) { - // Look for a blocked goroutine which can be unblocked. - blocked := h.blocked[:0] - unblocked := false - for _, b := range h.blocked { - if !unblocked && b.f() { - unblocked = true - close(b.ch) - } else { - blocked = append(blocked, b) - } - } - h.blocked = blocked - - // Count goroutines blocked on condition variables. - condwait := 0 - for _, count := range h.condwait { - condwait += count - } - - if h.total > condwait+len(blocked) { - h.active <- struct{}{} - return true - } else { - h.inactive <- struct{}{} - return false - } -} - -// goRun starts a new goroutine. -func (h *testSyncHooks) goRun(f func()) { - h.lock() - h.total++ - h.unlock() - go func() { - defer func() { - h.lock() - h.total-- - h.unlock() - }() - f() - }() -} - -// blockUntil indicates that a goroutine is blocked waiting for some condition to become true. -// It waits until f returns true before proceeding. -// -// Example usage: -// -// h.blockUntil(func() bool { -// // Is the context done yet? -// select { -// case <-ctx.Done(): -// default: -// return false -// } -// return true -// }) -// // Wait for the context to become done. -// <-ctx.Done() -// -// The function f passed to blockUntil must be non-blocking and idempotent. -func (h *testSyncHooks) blockUntil(f func() bool) { - if f() { - return - } - ch := make(chan struct{}) - h.lock() - h.blocked = append(h.blocked, &testBlockedGoroutine{ - f: f, - ch: ch, - }) - h.unlock() - <-ch -} - -// broadcast is sync.Cond.Broadcast. -func (h *testSyncHooks) condBroadcast(cond *sync.Cond) { - h.lock() - delete(h.condwait, cond) - h.unlock() - cond.Broadcast() -} - -// broadcast is sync.Cond.Wait. -func (h *testSyncHooks) condWait(cond *sync.Cond) { - h.lock() - h.condwait[cond]++ - h.unlock() -} - -// newTimer creates a new fake timer. -func (h *testSyncHooks) newTimer(d time.Duration) timer { - h.lock() - defer h.unlock() - t := &fakeTimer{ - hooks: h, - when: h.now.Add(d), - c: make(chan time.Time), - } - h.timers = append(h.timers, t) - return t -} - -// afterFunc creates a new fake AfterFunc timer. -func (h *testSyncHooks) afterFunc(d time.Duration, f func()) timer { - h.lock() - defer h.unlock() - t := &fakeTimer{ - hooks: h, - when: h.now.Add(d), - f: f, - } - h.timers = append(h.timers, t) - return t -} - -func (h *testSyncHooks) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { - ctx, cancel := context.WithCancel(ctx) - t := h.afterFunc(d, cancel) - return ctx, func() { - t.Stop() - cancel() - } -} - -func (h *testSyncHooks) timeUntilEvent() time.Duration { - h.lock() - defer h.unlock() - var next time.Time - for _, t := range h.timers { - if next.IsZero() || t.when.Before(next) { - next = t.when - } - } - if d := next.Sub(h.now); d > 0 { - return d - } - return 0 -} - -// advance advances time and causes synthetic timers to fire. -func (h *testSyncHooks) advance(d time.Duration) { - h.lock() - defer h.unlock() - h.now = h.now.Add(d) - timers := h.timers[:0] - for _, t := range h.timers { - t := t // remove after go.mod depends on go1.22 - t.mu.Lock() - switch { - case t.when.After(h.now): - timers = append(timers, t) - case t.when.IsZero(): - // stopped timer - default: - t.when = time.Time{} - if t.c != nil { - close(t.c) - } - if t.f != nil { - h.total++ - go func() { - defer func() { - h.lock() - h.total-- - h.unlock() - }() - t.f() - }() - } - } - t.mu.Unlock() - } - h.timers = timers -} - -// A timer wraps a time.Timer, or a synthetic equivalent in tests. -// Unlike time.Timer, timer is single-use: The timer channel is closed when the timer expires. -type timer interface { - C() <-chan time.Time - Stop() bool - Reset(d time.Duration) bool -} - -// timeTimer implements timer using real time. -type timeTimer struct { - t *time.Timer - c chan time.Time -} - -// newTimeTimer creates a new timer using real time. -func newTimeTimer(d time.Duration) timer { - ch := make(chan time.Time) - t := time.AfterFunc(d, func() { - close(ch) - }) - return &timeTimer{t, ch} -} - -// newTimeAfterFunc creates an AfterFunc timer using real time. -func newTimeAfterFunc(d time.Duration, f func()) timer { - return &timeTimer{ - t: time.AfterFunc(d, f), - } -} - -func (t timeTimer) C() <-chan time.Time { return t.c } -func (t timeTimer) Stop() bool { return t.t.Stop() } -func (t timeTimer) Reset(d time.Duration) bool { return t.t.Reset(d) } - -// fakeTimer implements timer using fake time. -type fakeTimer struct { - hooks *testSyncHooks - - mu sync.Mutex - when time.Time // when the timer will fire - c chan time.Time // closed when the timer fires; mutually exclusive with f - f func() // called when the timer fires; mutually exclusive with c -} - -func (t *fakeTimer) C() <-chan time.Time { return t.c } - -func (t *fakeTimer) Stop() bool { - t.mu.Lock() - defer t.mu.Unlock() - stopped := t.when.IsZero() - t.when = time.Time{} - return stopped -} - -func (t *fakeTimer) Reset(d time.Duration) bool { - if t.c != nil || t.f == nil { - panic("fakeTimer only supports Reset on AfterFunc timers") - } - t.mu.Lock() - defer t.mu.Unlock() - t.hooks.lock() - defer t.hooks.unlock() - active := !t.when.IsZero() - t.when = t.hooks.now.Add(d) - if !active { - t.hooks.timers = append(t.hooks.timers, t) - } - return active -} diff --git a/vendor/golang.org/x/net/http2/timer.go b/vendor/golang.org/x/net/http2/timer.go new file mode 100644 index 000000000..0b1c17b81 --- /dev/null +++ b/vendor/golang.org/x/net/http2/timer.go @@ -0,0 +1,20 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package http2 + +import "time" + +// A timer is a time.Timer, as an interface which can be replaced in tests. +type timer = interface { + C() <-chan time.Time + Reset(d time.Duration) bool + Stop() bool +} + +// timeTimer adapts a time.Timer to the timer interface. +type timeTimer struct { + *time.Timer +} + +func (t timeTimer) C() <-chan time.Time { return t.Timer.C } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index ce375c8c7..61f511f97 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -185,7 +185,45 @@ type Transport struct { connPoolOnce sync.Once connPoolOrDef ClientConnPool // non-nil version of ConnPool - syncHooks *testSyncHooks + *transportTestHooks +} + +// Hook points used for testing. +// Outside of tests, t.transportTestHooks is nil and these all have minimal implementations. +// Inside tests, see the testSyncHooks function docs. + +type transportTestHooks struct { + newclientconn func(*ClientConn) + group synctestGroupInterface +} + +func (t *Transport) markNewGoroutine() { + if t != nil && t.transportTestHooks != nil { + t.transportTestHooks.group.Join() + } +} + +// newTimer creates a new time.Timer, or a synthetic timer in tests. +func (t *Transport) newTimer(d time.Duration) timer { + if t.transportTestHooks != nil { + return t.transportTestHooks.group.NewTimer(d) + } + return timeTimer{time.NewTimer(d)} +} + +// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. +func (t *Transport) afterFunc(d time.Duration, f func()) timer { + if t.transportTestHooks != nil { + return t.transportTestHooks.group.AfterFunc(d, f) + } + return timeTimer{time.AfterFunc(d, f)} +} + +func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { + if t.transportTestHooks != nil { + return t.transportTestHooks.group.ContextWithTimeout(ctx, d) + } + return context.WithTimeout(ctx, d) } func (t *Transport) maxHeaderListSize() uint32 { @@ -352,60 +390,6 @@ type ClientConn struct { werr error // first write error that has occurred hbuf bytes.Buffer // HPACK encoder writes into this henc *hpack.Encoder - - syncHooks *testSyncHooks // can be nil -} - -// Hook points used for testing. -// Outside of tests, cc.syncHooks is nil and these all have minimal implementations. -// Inside tests, see the testSyncHooks function docs. - -// goRun starts a new goroutine. -func (cc *ClientConn) goRun(f func()) { - if cc.syncHooks != nil { - cc.syncHooks.goRun(f) - return - } - go f() -} - -// condBroadcast is cc.cond.Broadcast. -func (cc *ClientConn) condBroadcast() { - if cc.syncHooks != nil { - cc.syncHooks.condBroadcast(cc.cond) - } - cc.cond.Broadcast() -} - -// condWait is cc.cond.Wait. -func (cc *ClientConn) condWait() { - if cc.syncHooks != nil { - cc.syncHooks.condWait(cc.cond) - } - cc.cond.Wait() -} - -// newTimer creates a new time.Timer, or a synthetic timer in tests. -func (cc *ClientConn) newTimer(d time.Duration) timer { - if cc.syncHooks != nil { - return cc.syncHooks.newTimer(d) - } - return newTimeTimer(d) -} - -// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. -func (cc *ClientConn) afterFunc(d time.Duration, f func()) timer { - if cc.syncHooks != nil { - return cc.syncHooks.afterFunc(d, f) - } - return newTimeAfterFunc(d, f) -} - -func (cc *ClientConn) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { - if cc.syncHooks != nil { - return cc.syncHooks.contextWithTimeout(ctx, d) - } - return context.WithTimeout(ctx, d) } // clientStream is the state for a single HTTP/2 stream. One of these @@ -487,7 +471,7 @@ func (cs *clientStream) abortStreamLocked(err error) { // TODO(dneil): Clean up tests where cs.cc.cond is nil. if cs.cc.cond != nil { // Wake up writeRequestBody if it is waiting on flow control. - cs.cc.condBroadcast() + cs.cc.cond.Broadcast() } } @@ -497,7 +481,7 @@ func (cs *clientStream) abortRequestBodyWrite() { defer cc.mu.Unlock() if cs.reqBody != nil && cs.reqBodyClosed == nil { cs.closeReqBodyLocked() - cc.condBroadcast() + cc.cond.Broadcast() } } @@ -507,10 +491,11 @@ func (cs *clientStream) closeReqBodyLocked() { } cs.reqBodyClosed = make(chan struct{}) reqBodyClosed := cs.reqBodyClosed - cs.cc.goRun(func() { + go func() { + cs.cc.t.markNewGoroutine() cs.reqBody.Close() close(reqBodyClosed) - }) + }() } type stickyErrWriter struct { @@ -626,21 +611,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) d := time.Second * time.Duration(backoff) - var tm timer - if t.syncHooks != nil { - tm = t.syncHooks.newTimer(d) - t.syncHooks.blockUntil(func() bool { - select { - case <-tm.C(): - case <-req.Context().Done(): - default: - return false - } - return true - }) - } else { - tm = newTimeTimer(d) - } + tm := t.newTimer(d) select { case <-tm.C(): t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) @@ -725,8 +696,8 @@ func canRetryError(err error) bool { } func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { - if t.syncHooks != nil { - return t.newClientConn(nil, singleUse, t.syncHooks) + if t.transportTestHooks != nil { + return t.newClientConn(nil, singleUse) } host, _, err := net.SplitHostPort(addr) if err != nil { @@ -736,7 +707,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b if err != nil { return nil, err } - return t.newClientConn(tconn, singleUse, nil) + return t.newClientConn(tconn, singleUse) } func (t *Transport) newTLSConfig(host string) *tls.Config { @@ -802,10 +773,10 @@ func (t *Transport) maxEncoderHeaderTableSize() uint32 { } func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - return t.newClientConn(c, t.disableKeepAlives(), nil) + return t.newClientConn(c, t.disableKeepAlives()) } -func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHooks) (*ClientConn, error) { +func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { cc := &ClientConn{ t: t, tconn: c, @@ -820,16 +791,12 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo wantSettingsAck: true, pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), - syncHooks: hooks, } - if hooks != nil { - hooks.newclientconn(cc) + if t.transportTestHooks != nil { + t.markNewGoroutine() + t.transportTestHooks.newclientconn(cc) c = cc.tconn } - if d := t.idleConnTimeout(); d != 0 { - cc.idleTimeout = d - cc.idleTimer = cc.afterFunc(d, cc.onIdleTimeout) - } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) } @@ -860,10 +827,6 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize()) cc.peerMaxHeaderTableSize = initialHeaderTableSize - if t.AllowHTTP { - cc.nextStreamID = 3 - } - if cs, ok := c.(connectionStater); ok { state := cs.ConnectionState() cc.tlsState = &state @@ -893,7 +856,13 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo return nil, cc.werr } - cc.goRun(cc.readLoop) + // Start the idle timer after the connection is fully initialized. + if d := t.idleConnTimeout(); d != 0 { + cc.idleTimeout = d + cc.idleTimer = t.afterFunc(d, cc.onIdleTimeout) + } + + go cc.readLoop() return cc, nil } @@ -901,7 +870,7 @@ func (cc *ClientConn) healthCheck() { pingTimeout := cc.t.pingTimeout() // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. - ctx, cancel := cc.contextWithTimeout(context.Background(), pingTimeout) + ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout) defer cancel() cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) @@ -936,7 +905,20 @@ func (cc *ClientConn) setGoAway(f *GoAwayFrame) { } last := f.LastStreamID for streamID, cs := range cc.streams { - if streamID > last { + if streamID <= last { + // The server's GOAWAY indicates that it received this stream. + // It will either finish processing it, or close the connection + // without doing so. Either way, leave the stream alone for now. + continue + } + if streamID == 1 && cc.goAway.ErrCode != ErrCodeNo { + // Don't retry the first stream on a connection if we get a non-NO error. + // If the server is sending an error on a new connection, + // retrying the request on a new one probably isn't going to work. + cs.abortStreamLocked(fmt.Errorf("http2: Transport received GOAWAY from server ErrCode:%v", cc.goAway.ErrCode)) + } else { + // Aborting the stream with errClentConnGotGoAway indicates that + // the request should be retried on a new connection. cs.abortStreamLocked(errClientConnGotGoAway) } } @@ -1131,7 +1113,8 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { // Wait for all in-flight streams to complete or connection to close done := make(chan struct{}) cancelled := false // guarded by cc.mu - cc.goRun(func() { + go func() { + cc.t.markNewGoroutine() cc.mu.Lock() defer cc.mu.Unlock() for { @@ -1143,9 +1126,9 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { if cancelled { break } - cc.condWait() + cc.cond.Wait() } - }) + }() shutdownEnterWaitStateHook() select { case <-done: @@ -1155,7 +1138,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { cc.mu.Lock() // Free the goroutine above cancelled = true - cc.condBroadcast() + cc.cond.Broadcast() cc.mu.Unlock() return ctx.Err() } @@ -1193,7 +1176,7 @@ func (cc *ClientConn) closeForError(err error) { for _, cs := range cc.streams { cs.abortStreamLocked(err) } - cc.condBroadcast() + cc.cond.Broadcast() cc.mu.Unlock() cc.closeConn() } @@ -1308,23 +1291,30 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) respHeaderRecv: make(chan struct{}), donec: make(chan struct{}), } - cc.goRun(func() { - cs.doRequest(req) - }) + + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + if !cc.t.disableCompression() && + req.Header.Get("Accept-Encoding") == "" && + req.Header.Get("Range") == "" && + !cs.isHead { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: https://zlib.net/zlib_faq.html#faq39 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + cs.requestedGzip = true + } + + go cs.doRequest(req, streamf) waitDone := func() error { - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-cs.donec: - case <-ctx.Done(): - case <-cs.reqCancel: - default: - return false - } - return true - }) - } select { case <-cs.donec: return nil @@ -1385,24 +1375,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) return err } - if streamf != nil { - streamf(cs) - } - for { - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-cs.respHeaderRecv: - case <-cs.abort: - case <-ctx.Done(): - case <-cs.reqCancel: - default: - return false - } - return true - }) - } select { case <-cs.respHeaderRecv: return handleResponseHeaders() @@ -1432,8 +1405,9 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) // doRequest runs for the duration of the request lifetime. // // It sends the request and performs post-request cleanup (closing Request.Body, etc.). -func (cs *clientStream) doRequest(req *http.Request) { - err := cs.writeRequest(req) +func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) { + cs.cc.t.markNewGoroutine() + err := cs.writeRequest(req, streamf) cs.cleanupWriteRequest(err) } @@ -1444,7 +1418,7 @@ func (cs *clientStream) doRequest(req *http.Request) { // // It returns non-nil if the request ends otherwise. // If the returned error is StreamError, the error Code may be used in resetting the stream. -func (cs *clientStream) writeRequest(req *http.Request) (err error) { +func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStream)) (err error) { cc := cs.cc ctx := cs.ctx @@ -1458,21 +1432,6 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { if cc.reqHeaderMu == nil { panic("RoundTrip on uninitialized ClientConn") // for tests } - var newStreamHook func(*clientStream) - if cc.syncHooks != nil { - newStreamHook = cc.syncHooks.newstream - cc.syncHooks.blockUntil(func() bool { - select { - case cc.reqHeaderMu <- struct{}{}: - <-cc.reqHeaderMu - case <-cs.reqCancel: - case <-ctx.Done(): - default: - return false - } - return true - }) - } select { case cc.reqHeaderMu <- struct{}{}: case <-cs.reqCancel: @@ -1497,28 +1456,8 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { } cc.mu.Unlock() - if newStreamHook != nil { - newStreamHook(cs) - } - - // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? - if !cc.t.disableCompression() && - req.Header.Get("Accept-Encoding") == "" && - req.Header.Get("Range") == "" && - !cs.isHead { - // Request gzip only, not deflate. Deflate is ambiguous and - // not as universally supported anyway. - // See: https://zlib.net/zlib_faq.html#faq39 - // - // Note that we don't request this for HEAD requests, - // due to a bug in nginx: - // http://trac.nginx.org/nginx/ticket/358 - // https://golang.org/issue/5522 - // - // We don't request gzip if the request is for a range, since - // auto-decoding a portion of a gzipped document will just fail - // anyway. See https://golang.org/issue/8923 - cs.requestedGzip = true + if streamf != nil { + streamf(cs) } continueTimeout := cc.t.expectContinueTimeout() @@ -1581,7 +1520,7 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { var respHeaderTimer <-chan time.Time var respHeaderRecv chan struct{} if d := cc.responseHeaderTimeout(); d != 0 { - timer := cc.newTimer(d) + timer := cc.t.newTimer(d) defer timer.Stop() respHeaderTimer = timer.C() respHeaderRecv = cs.respHeaderRecv @@ -1590,21 +1529,6 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { // or until the request is aborted (via context, error, or otherwise), // whichever comes first. for { - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-cs.peerClosed: - case <-respHeaderTimer: - case <-respHeaderRecv: - case <-cs.abort: - case <-ctx.Done(): - case <-cs.reqCancel: - default: - return false - } - return true - }) - } select { case <-cs.peerClosed: return nil @@ -1753,7 +1677,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { return nil } cc.pendingRequests++ - cc.condWait() + cc.cond.Wait() cc.pendingRequests-- select { case <-cs.abort: @@ -2015,7 +1939,7 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) cs.flow.take(take) return take, nil } - cc.condWait() + cc.cond.Wait() } } @@ -2298,7 +2222,7 @@ func (cc *ClientConn) forgetStreamID(id uint32) { } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. - cc.condBroadcast() + cc.cond.Broadcast() closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 { @@ -2320,6 +2244,7 @@ type clientConnReadLoop struct { // readLoop runs in its own goroutine and reads and dispatches frames. func (cc *ClientConn) readLoop() { + cc.t.markNewGoroutine() rl := &clientConnReadLoop{cc: cc} defer rl.cleanup() cc.readerErr = rl.run() @@ -2386,7 +2311,7 @@ func (rl *clientConnReadLoop) cleanup() { cs.abortStreamLocked(err) } } - cc.condBroadcast() + cc.cond.Broadcast() cc.mu.Unlock() } @@ -2423,7 +2348,7 @@ func (rl *clientConnReadLoop) run() error { readIdleTimeout := cc.t.ReadIdleTimeout var t timer if readIdleTimeout != 0 { - t = cc.afterFunc(readIdleTimeout, cc.healthCheck) + t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck) } for { f, err := cc.fr.ReadFrame() @@ -3021,7 +2946,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { for _, cs := range cc.streams { cs.flow.add(delta) } - cc.condBroadcast() + cc.cond.Broadcast() cc.initialWindowSize = s.Val case SettingHeaderTableSize: @@ -3076,7 +3001,7 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { return ConnectionError(ErrCodeFlowControl) } - cc.condBroadcast() + cc.cond.Broadcast() return nil } @@ -3120,7 +3045,8 @@ func (cc *ClientConn) Ping(ctx context.Context) error { } var pingError error errc := make(chan struct{}) - cc.goRun(func() { + go func() { + cc.t.markNewGoroutine() cc.wmu.Lock() defer cc.wmu.Unlock() if pingError = cc.fr.WritePing(false, p); pingError != nil { @@ -3131,20 +3057,7 @@ func (cc *ClientConn) Ping(ctx context.Context) error { close(errc) return } - }) - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-c: - case <-errc: - case <-ctx.Done(): - case <-cc.readerDone: - default: - return false - } - return true - }) - } + }() select { case <-c: return nil diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go index 0a242c669..f6783339d 100644 --- a/vendor/golang.org/x/net/http2/writesched_priority.go +++ b/vendor/golang.org/x/net/http2/writesched_priority.go @@ -443,8 +443,8 @@ func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, max } func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { - for k := n.kids; k != nil; k = k.next { - k.setParent(n.parent) + for n.kids != nil { + n.kids.setParent(n.parent) } n.setParent(nil) delete(ws.nodes, n.id) diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE index 6a66aea5e..2a7cf70da 100644 --- a/vendor/golang.org/x/oauth2/LICENSE +++ b/vendor/golang.org/x/oauth2/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md index 0f443e693..781770c20 100644 --- a/vendor/golang.org/x/oauth2/README.md +++ b/vendor/golang.org/x/oauth2/README.md @@ -1,7 +1,7 @@ # OAuth2 for Go +[![Go Reference](https://pkg.go.dev/badge/golang.org/x/oauth2.svg)](https://pkg.go.dev/golang.org/x/oauth2) [![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) -[![GoDoc](https://godoc.org/golang.org/x/oauth2?status.svg)](https://godoc.org/golang.org/x/oauth2) oauth2 package contains a client implementation for OAuth 2.0 spec. @@ -14,22 +14,27 @@ go get golang.org/x/oauth2 Or you can manually git clone the repository to `$(go env GOPATH)/src/golang.org/x/oauth2`. -See godoc for further documentation and examples. +See pkg.go.dev for further documentation and examples. -* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) -* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) +* [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) +* [pkg.go.dev/golang.org/x/oauth2/google](https://pkg.go.dev/golang.org/x/oauth2/google) -## Policy for new packages +## Policy for new endpoints -We no longer accept new provider-specific packages in this repo. For -defining provider endpoints and provider-specific OAuth2 behavior, we -encourage you to create packages elsewhere. We'll keep the existing -packages for compatibility. +We no longer accept new provider-specific packages in this repo if all +they do is add a single endpoint variable. If you just want to add a +single endpoint, add it to the +[pkg.go.dev/golang.org/x/oauth2/endpoints](https://pkg.go.dev/golang.org/x/oauth2/endpoints) +package. ## Report Issues / Send Patches -This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. - The main issue tracker for the oauth2 repository is located at https://github.com/golang/oauth2/issues. + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. In particular: + +* Excluding trivial changes, all contributions should be connected to an existing issue. +* API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted. +* The code owners are listed at [dev.golang.org/owners](https://dev.golang.org/owners#:~:text=x/oauth2). diff --git a/vendor/golang.org/x/oauth2/authhandler/authhandler.go b/vendor/golang.org/x/oauth2/authhandler/authhandler.go new file mode 100644 index 000000000..9bc6cd7bc --- /dev/null +++ b/vendor/golang.org/x/oauth2/authhandler/authhandler.go @@ -0,0 +1,94 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package authhandler implements a TokenSource to support +// "three-legged OAuth 2.0" via a custom AuthorizationHandler. +package authhandler + +import ( + "context" + "errors" + + "golang.org/x/oauth2" +) + +const ( + // Parameter keys for AuthCodeURL method to support PKCE. + codeChallengeKey = "code_challenge" + codeChallengeMethodKey = "code_challenge_method" + + // Parameter key for Exchange method to support PKCE. + codeVerifierKey = "code_verifier" +) + +// PKCEParams holds parameters to support PKCE. +type PKCEParams struct { + Challenge string // The unpadded, base64-url-encoded string of the encrypted code verifier. + ChallengeMethod string // The encryption method (ex. S256). + Verifier string // The original, non-encrypted secret. +} + +// AuthorizationHandler is a 3-legged-OAuth helper that prompts +// the user for OAuth consent at the specified auth code URL +// and returns an auth code and state upon approval. +type AuthorizationHandler func(authCodeURL string) (code string, state string, err error) + +// TokenSourceWithPKCE is an enhanced version of TokenSource with PKCE support. +// +// The pkce parameter supports PKCE flow, which uses code challenge and code verifier +// to prevent CSRF attacks. A unique code challenge and code verifier should be generated +// by the caller at runtime. See https://www.oauth.com/oauth2-servers/pkce/ for more info. +func TokenSourceWithPKCE(ctx context.Context, config *oauth2.Config, state string, authHandler AuthorizationHandler, pkce *PKCEParams) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, authHandlerSource{config: config, ctx: ctx, authHandler: authHandler, state: state, pkce: pkce}) +} + +// TokenSource returns an oauth2.TokenSource that fetches access tokens +// using 3-legged-OAuth flow. +// +// The provided context.Context is used for oauth2 Exchange operation. +// +// The provided oauth2.Config should be a full configuration containing AuthURL, +// TokenURL, and Scope. +// +// An environment-specific AuthorizationHandler is used to obtain user consent. +// +// Per the OAuth protocol, a unique "state" string should be specified here. +// This token source will verify that the "state" is identical in the request +// and response before exchanging the auth code for OAuth token to prevent CSRF +// attacks. +func TokenSource(ctx context.Context, config *oauth2.Config, state string, authHandler AuthorizationHandler) oauth2.TokenSource { + return TokenSourceWithPKCE(ctx, config, state, authHandler, nil) +} + +type authHandlerSource struct { + ctx context.Context + config *oauth2.Config + authHandler AuthorizationHandler + state string + pkce *PKCEParams +} + +func (source authHandlerSource) Token() (*oauth2.Token, error) { + // Step 1: Obtain auth code. + var authCodeUrlOptions []oauth2.AuthCodeOption + if source.pkce != nil && source.pkce.Challenge != "" && source.pkce.ChallengeMethod != "" { + authCodeUrlOptions = []oauth2.AuthCodeOption{oauth2.SetAuthURLParam(codeChallengeKey, source.pkce.Challenge), + oauth2.SetAuthURLParam(codeChallengeMethodKey, source.pkce.ChallengeMethod)} + } + url := source.config.AuthCodeURL(source.state, authCodeUrlOptions...) + code, state, err := source.authHandler(url) + if err != nil { + return nil, err + } + if state != source.state { + return nil, errors.New("state mismatch in 3-legged-OAuth flow") + } + + // Step 2: Exchange auth code for access token. + var exchangeOptions []oauth2.AuthCodeOption + if source.pkce != nil && source.pkce.Verifier != "" { + exchangeOptions = []oauth2.AuthCodeOption{oauth2.SetAuthURLParam(codeVerifierKey, source.pkce.Verifier)} + } + return source.config.Exchange(source.ctx, code, exchangeOptions...) +} diff --git a/vendor/golang.org/x/oauth2/deviceauth.go b/vendor/golang.org/x/oauth2/deviceauth.go new file mode 100644 index 000000000..e99c92f39 --- /dev/null +++ b/vendor/golang.org/x/oauth2/deviceauth.go @@ -0,0 +1,198 @@ +package oauth2 + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/oauth2/internal" +) + +// https://datatracker.ietf.org/doc/html/rfc8628#section-3.5 +const ( + errAuthorizationPending = "authorization_pending" + errSlowDown = "slow_down" + errAccessDenied = "access_denied" + errExpiredToken = "expired_token" +) + +// DeviceAuthResponse describes a successful RFC 8628 Device Authorization Response +// https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 +type DeviceAuthResponse struct { + // DeviceCode + DeviceCode string `json:"device_code"` + // UserCode is the code the user should enter at the verification uri + UserCode string `json:"user_code"` + // VerificationURI is where user should enter the user code + VerificationURI string `json:"verification_uri"` + // VerificationURIComplete (if populated) includes the user code in the verification URI. This is typically shown to the user in non-textual form, such as a QR code. + VerificationURIComplete string `json:"verification_uri_complete,omitempty"` + // Expiry is when the device code and user code expire + Expiry time.Time `json:"expires_in,omitempty"` + // Interval is the duration in seconds that Poll should wait between requests + Interval int64 `json:"interval,omitempty"` +} + +func (d DeviceAuthResponse) MarshalJSON() ([]byte, error) { + type Alias DeviceAuthResponse + var expiresIn int64 + if !d.Expiry.IsZero() { + expiresIn = int64(time.Until(d.Expiry).Seconds()) + } + return json.Marshal(&struct { + ExpiresIn int64 `json:"expires_in,omitempty"` + *Alias + }{ + ExpiresIn: expiresIn, + Alias: (*Alias)(&d), + }) + +} + +func (c *DeviceAuthResponse) UnmarshalJSON(data []byte) error { + type Alias DeviceAuthResponse + aux := &struct { + ExpiresIn int64 `json:"expires_in"` + // workaround misspelling of verification_uri + VerificationURL string `json:"verification_url"` + *Alias + }{ + Alias: (*Alias)(c), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + if aux.ExpiresIn != 0 { + c.Expiry = time.Now().UTC().Add(time.Second * time.Duration(aux.ExpiresIn)) + } + if c.VerificationURI == "" { + c.VerificationURI = aux.VerificationURL + } + return nil +} + +// DeviceAuth returns a device auth struct which contains a device code +// and authorization information provided for users to enter on another device. +func (c *Config) DeviceAuth(ctx context.Context, opts ...AuthCodeOption) (*DeviceAuthResponse, error) { + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.1 + v := url.Values{ + "client_id": {c.ClientID}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + for _, opt := range opts { + opt.setValue(v) + } + return retrieveDeviceAuth(ctx, c, v) +} + +func retrieveDeviceAuth(ctx context.Context, c *Config, v url.Values) (*DeviceAuthResponse, error) { + if c.Endpoint.DeviceAuthURL == "" { + return nil, errors.New("endpoint missing DeviceAuthURL") + } + + req, err := http.NewRequest("POST", c.Endpoint.DeviceAuthURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req.Header.Set("Accept", "application/json") + + t := time.Now() + r, err := internal.ContextClient(ctx).Do(req) + if err != nil { + return nil, err + } + + body, err := io.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot auth device: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, &RetrieveError{ + Response: r, + Body: body, + } + } + + da := &DeviceAuthResponse{} + err = json.Unmarshal(body, &da) + if err != nil { + return nil, fmt.Errorf("unmarshal %s", err) + } + + if !da.Expiry.IsZero() { + // Make a small adjustment to account for time taken by the request + da.Expiry = da.Expiry.Add(-time.Since(t)) + } + + return da, nil +} + +// DeviceAccessToken polls the server to exchange a device code for a token. +func (c *Config) DeviceAccessToken(ctx context.Context, da *DeviceAuthResponse, opts ...AuthCodeOption) (*Token, error) { + if !da.Expiry.IsZero() { + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, da.Expiry) + defer cancel() + } + + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.4 + v := url.Values{ + "client_id": {c.ClientID}, + "grant_type": {"urn:ietf:params:oauth:grant-type:device_code"}, + "device_code": {da.DeviceCode}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + for _, opt := range opts { + opt.setValue(v) + } + + // "If no value is provided, clients MUST use 5 as the default." + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 + interval := da.Interval + if interval == 0 { + interval = 5 + } + + ticker := time.NewTicker(time.Duration(interval) * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-ticker.C: + tok, err := retrieveToken(ctx, c, v) + if err == nil { + return tok, nil + } + + e, ok := err.(*RetrieveError) + if !ok { + return nil, err + } + switch e.ErrorCode { + case errSlowDown: + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.5 + // "the interval MUST be increased by 5 seconds for this and all subsequent requests" + interval += 5 + ticker.Reset(time.Duration(interval) * time.Second) + case errAuthorizationPending: + // Do nothing. + case errAccessDenied, errExpiredToken: + fallthrough + default: + return tok, err + } + } + } +} diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go index feb1157b1..564920bd4 100644 --- a/vendor/golang.org/x/oauth2/google/appengine.go +++ b/vendor/golang.org/x/oauth2/google/appengine.go @@ -6,16 +6,13 @@ package google import ( "context" - "time" + "log" + "sync" "golang.org/x/oauth2" ) -// Set at init time by appengine_gen1.go. If nil, we're not on App Engine standard first generation (<= Go 1.9) or App Engine flexible. -var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) - -// Set at init time by appengine_gen1.go. If nil, we're not on App Engine standard first generation (<= Go 1.9) or App Engine flexible. -var appengineAppIDFunc func(c context.Context) string +var logOnce sync.Once // only spam about deprecation once // AppEngineTokenSource returns a token source that fetches tokens from either // the current application's service account or from the metadata server, @@ -23,8 +20,10 @@ var appengineAppIDFunc func(c context.Context) string // details. If you are implementing a 3-legged OAuth 2.0 flow on App Engine that // involves user accounts, see oauth2.Config instead. // -// First generation App Engine runtimes (<= Go 1.9): -// AppEngineTokenSource returns a token source that fetches tokens issued to the +// The current version of this library requires at least Go 1.17 to build, +// so first generation App Engine runtimes (<= Go 1.9) are unsupported. +// Previously, on first generation App Engine runtimes, AppEngineTokenSource +// returned a token source that fetches tokens issued to the // current App Engine application's service account. The provided context must have // come from appengine.NewContext. // @@ -34,5 +33,8 @@ var appengineAppIDFunc func(c context.Context) string // context and scopes are not used. Please use DefaultTokenSource (or ComputeTokenSource, // which DefaultTokenSource will use in this case) instead. func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { - return appEngineTokenSource(ctx, scope...) + logOnce.Do(func() { + log.Print("google: AppEngineTokenSource is deprecated on App Engine standard second generation runtimes (>= Go 1.11) and App Engine flexible. Please use DefaultTokenSource or ComputeTokenSource.") + }) + return ComputeTokenSource("") } diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen1.go b/vendor/golang.org/x/oauth2/google/appengine_gen1.go deleted file mode 100644 index 83dacac32..000000000 --- a/vendor/golang.org/x/oauth2/google/appengine_gen1.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appengine - -// This file applies to App Engine first generation runtimes (<= Go 1.9). - -package google - -import ( - "context" - "sort" - "strings" - "sync" - - "golang.org/x/oauth2" - "google.golang.org/appengine" -) - -func init() { - appengineTokenFunc = appengine.AccessToken - appengineAppIDFunc = appengine.AppID -} - -// See comment on AppEngineTokenSource in appengine.go. -func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { - scopes := append([]string{}, scope...) - sort.Strings(scopes) - return &gaeTokenSource{ - ctx: ctx, - scopes: scopes, - key: strings.Join(scopes, " "), - } -} - -// aeTokens helps the fetched tokens to be reused until their expiration. -var ( - aeTokensMu sync.Mutex - aeTokens = make(map[string]*tokenLock) // key is space-separated scopes -) - -type tokenLock struct { - mu sync.Mutex // guards t; held while fetching or updating t - t *oauth2.Token -} - -type gaeTokenSource struct { - ctx context.Context - scopes []string - key string // to aeTokens map; space-separated scopes -} - -func (ts *gaeTokenSource) Token() (*oauth2.Token, error) { - aeTokensMu.Lock() - tok, ok := aeTokens[ts.key] - if !ok { - tok = &tokenLock{} - aeTokens[ts.key] = tok - } - aeTokensMu.Unlock() - - tok.mu.Lock() - defer tok.mu.Unlock() - if tok.t.Valid() { - return tok.t, nil - } - access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) - if err != nil { - return nil, err - } - tok.t = &oauth2.Token{ - AccessToken: access, - Expiry: exp, - } - return tok.t, nil -} diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go deleted file mode 100644 index 04c2c2216..000000000 --- a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine - -// This file applies to App Engine second generation runtimes (>= Go 1.11) and App Engine flexible. - -package google - -import ( - "context" - "log" - "sync" - - "golang.org/x/oauth2" -) - -var logOnce sync.Once // only spam about deprecation once - -// See comment on AppEngineTokenSource in appengine.go. -func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { - logOnce.Do(func() { - log.Print("google: AppEngineTokenSource is deprecated on App Engine standard second generation runtimes (>= Go 1.11) and App Engine flexible. Please use DefaultTokenSource or ComputeTokenSource.") - }) - return ComputeTokenSource("") -} diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go index ad2c09236..df958359a 100644 --- a/vendor/golang.org/x/oauth2/google/default.go +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -8,19 +8,30 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" "net/http" "os" "path/filepath" "runtime" + "sync" + "time" "cloud.google.com/go/compute/metadata" "golang.org/x/oauth2" + "golang.org/x/oauth2/authhandler" +) + +const ( + adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc" + defaultUniverseDomain = "googleapis.com" ) // Credentials holds Google credentials, including "Application Default Credentials". // For more details, see: // https://developers.google.com/accounts/docs/application-default-credentials +// Credentials from external accounts (workload identity federation) are used to +// identify a particular application from an on-prem or non-Google Cloud platform +// including Amazon Web Services (AWS), Microsoft Azure or any identity provider +// that supports OpenID Connect (OIDC). type Credentials struct { ProjectID string // may be empty TokenSource oauth2.TokenSource @@ -30,6 +41,64 @@ type Credentials struct { // environment and not with a credentials file, e.g. when code is // running on Google Cloud Platform. JSON []byte + + // UniverseDomainProvider returns the default service domain for a given + // Cloud universe. Optional. + // + // On GCE, UniverseDomainProvider should return the universe domain value + // from Google Compute Engine (GCE)'s metadata server. See also [The attached service + // account](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa). + // If the GCE metadata server returns a 404 error, the default universe + // domain value should be returned. If the GCE metadata server returns an + // error other than 404, the error should be returned. + UniverseDomainProvider func() (string, error) + + udMu sync.Mutex // guards universeDomain + // universeDomain is the default service domain for a given Cloud universe. + universeDomain string +} + +// UniverseDomain returns the default service domain for a given Cloud universe. +// +// The default value is "googleapis.com". +// +// Deprecated: Use instead (*Credentials).GetUniverseDomain(), which supports +// obtaining the universe domain when authenticating via the GCE metadata server. +// Unlike GetUniverseDomain, this method, UniverseDomain, will always return the +// default value when authenticating via the GCE metadata server. +// See also [The attached service account](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa). +func (c *Credentials) UniverseDomain() string { + if c.universeDomain == "" { + return defaultUniverseDomain + } + return c.universeDomain +} + +// GetUniverseDomain returns the default service domain for a given Cloud +// universe. If present, UniverseDomainProvider will be invoked and its return +// value will be cached. +// +// The default value is "googleapis.com". +func (c *Credentials) GetUniverseDomain() (string, error) { + c.udMu.Lock() + defer c.udMu.Unlock() + if c.universeDomain == "" && c.UniverseDomainProvider != nil { + // On Google Compute Engine, an App Engine standard second generation + // runtime, or App Engine flexible, use an externally provided function + // to request the universe domain from the metadata server. + ud, err := c.UniverseDomainProvider() + if err != nil { + return "", err + } + c.universeDomain = ud + } + // If no UniverseDomainProvider (meaning not on Google Compute Engine), or + // in case of any (non-error) empty return value from + // UniverseDomainProvider, set the default universe domain. + if c.universeDomain == "" { + c.universeDomain = defaultUniverseDomain + } + return c.universeDomain, nil } // DefaultCredentials is the old name of Credentials. @@ -37,6 +106,53 @@ type Credentials struct { // Deprecated: use Credentials instead. type DefaultCredentials = Credentials +// CredentialsParams holds user supplied parameters that are used together +// with a credentials file for building a Credentials object. +type CredentialsParams struct { + // Scopes is the list OAuth scopes. Required. + // Example: https://www.googleapis.com/auth/cloud-platform + Scopes []string + + // Subject is the user email used for domain wide delegation (see + // https://developers.google.com/identity/protocols/oauth2/service-account#delegatingauthority). + // Optional. + Subject string + + // AuthHandler is the AuthorizationHandler used for 3-legged OAuth flow. Required for 3LO flow. + AuthHandler authhandler.AuthorizationHandler + + // State is a unique string used with AuthHandler. Required for 3LO flow. + State string + + // PKCE is used to support PKCE flow. Optional for 3LO flow. + PKCE *authhandler.PKCEParams + + // The OAuth2 TokenURL default override. This value overrides the default TokenURL, + // unless explicitly specified by the credentials config file. Optional. + TokenURL string + + // EarlyTokenRefresh is the amount of time before a token expires that a new + // token will be preemptively fetched. If unset the default value is 10 + // seconds. + // + // Note: This option is currently only respected when using credentials + // fetched from the GCE metadata server. + EarlyTokenRefresh time.Duration + + // UniverseDomain is the default service domain for a given Cloud universe. + // Only supported in authentication flows that support universe domains. + // This value takes precedence over a universe domain explicitly specified + // in a credentials config file or by the GCE metadata server. Optional. + UniverseDomain string +} + +func (params CredentialsParams) deepCopy() CredentialsParams { + paramsCopy := params + paramsCopy.Scopes = make([]string, len(params.Scopes)) + copy(paramsCopy.Scopes, params.Scopes) + return paramsCopy +} + // DefaultClient returns an HTTP Client that uses the // DefaultTokenSource to obtain authentication credentials. func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { @@ -58,26 +174,31 @@ func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSourc return creds.TokenSource, nil } -// FindDefaultCredentials searches for "Application Default Credentials". +// FindDefaultCredentialsWithParams searches for "Application Default Credentials". // // It looks for credentials in the following places, // preferring the first location found: // -// 1. A JSON file whose path is specified by the -// GOOGLE_APPLICATION_CREDENTIALS environment variable. -// 2. A JSON file in a location known to the gcloud command-line tool. -// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. -// On other systems, $HOME/.config/gcloud/application_default_credentials.json. -// 3. On Google App Engine standard first generation runtimes (<= Go 1.9) it uses -// the appengine.AccessToken function. -// 4. On Google Compute Engine, Google App Engine standard second generation runtimes -// (>= Go 1.11), and Google App Engine flexible environment, it fetches -// credentials from the metadata server. -func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials, error) { +// 1. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +// For workload identity federation, refer to +// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation on +// how to generate the JSON configuration file for on-prem/non-Google cloud +// platforms. +// 2. A JSON file in a location known to the gcloud command-line tool. +// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. +// On other systems, $HOME/.config/gcloud/application_default_credentials.json. +// 3. On Google Compute Engine, Google App Engine standard second generation runtimes +// (>= Go 1.11), and Google App Engine flexible environment, it fetches +// credentials from the metadata server. +func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsParams) (*Credentials, error) { + // Make defensive copy of the slices in params. + params = params.deepCopy() + // First, try the environment variable. const envVar = "GOOGLE_APPLICATION_CREDENTIALS" if filename := os.Getenv(envVar); filename != "" { - creds, err := readCredentialsFile(ctx, filename, scopes) + creds, err := readCredentialsFile(ctx, filename, params) if err != nil { return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) } @@ -86,57 +207,99 @@ func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials // Second, try a well-known file. filename := wellKnownFile() - if creds, err := readCredentialsFile(ctx, filename, scopes); err == nil { - return creds, nil - } else if !os.IsNotExist(err) { - return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) - } - - // Third, if we're on a Google App Engine standard first generation runtime (<= Go 1.9) - // use those credentials. App Engine standard second generation runtimes (>= Go 1.11) - // and App Engine flexible use ComputeTokenSource and the metadata server. - if appengineTokenFunc != nil { - return &DefaultCredentials{ - ProjectID: appengineAppIDFunc(ctx), - TokenSource: AppEngineTokenSource(ctx, scopes...), - }, nil + if b, err := os.ReadFile(filename); err == nil { + return CredentialsFromJSONWithParams(ctx, b, params) } - // Fourth, if we're on Google Compute Engine, an App Engine standard second generation runtime, + // Third, if we're on Google Compute Engine, an App Engine standard second generation runtime, // or App Engine flexible, use the metadata server. if metadata.OnGCE() { id, _ := metadata.ProjectID() - return &DefaultCredentials{ - ProjectID: id, - TokenSource: ComputeTokenSource("", scopes...), + universeDomainProvider := func() (string, error) { + universeDomain, err := metadata.Get("universe/universe_domain") + if err != nil { + if _, ok := err.(metadata.NotDefinedError); ok { + // http.StatusNotFound (404) + return defaultUniverseDomain, nil + } else { + return "", err + } + } + return universeDomain, nil + } + return &Credentials{ + ProjectID: id, + TokenSource: computeTokenSource("", params.EarlyTokenRefresh, params.Scopes...), + UniverseDomainProvider: universeDomainProvider, + universeDomain: params.UniverseDomain, }, nil } // None are found; return helpful error. - const url = "https://developers.google.com/accounts/docs/application-default-credentials" - return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) + return nil, fmt.Errorf("google: could not find default credentials. See %v for more information", adcSetupURL) } -// CredentialsFromJSON obtains Google credentials from a JSON value. The JSON can -// represent either a Google Developers Console client_credentials.json file (as in -// ConfigFromJSON) or a Google Developers service account key file (as in -// JWTConfigFromJSON). -func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) { +// FindDefaultCredentials invokes FindDefaultCredentialsWithParams with the specified scopes. +func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials, error) { + var params CredentialsParams + params.Scopes = scopes + return FindDefaultCredentialsWithParams(ctx, params) +} + +// CredentialsFromJSONWithParams obtains Google credentials from a JSON value. The JSON can +// represent either a Google Developers Console client_credentials.json file (as in ConfigFromJSON), +// a Google Developers service account key file, a gcloud user credentials file (a.k.a. refresh +// token JSON), or the JSON configuration file for workload identity federation in non-Google cloud +// platforms (see https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation). +func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params CredentialsParams) (*Credentials, error) { + // Make defensive copy of the slices in params. + params = params.deepCopy() + + // First, attempt to parse jsonData as a Google Developers Console client_credentials.json. + config, _ := ConfigFromJSON(jsonData, params.Scopes...) + if config != nil { + return &Credentials{ + ProjectID: "", + TokenSource: authhandler.TokenSourceWithPKCE(ctx, config, params.State, params.AuthHandler, params.PKCE), + JSON: jsonData, + }, nil + } + + // Otherwise, parse jsonData as one of the other supported credentials files. var f credentialsFile if err := json.Unmarshal(jsonData, &f); err != nil { return nil, err } - ts, err := f.tokenSource(ctx, append([]string(nil), scopes...)) + + universeDomain := f.UniverseDomain + if params.UniverseDomain != "" { + universeDomain = params.UniverseDomain + } + // Authorized user credentials are only supported in the googleapis.com universe. + if f.Type == userCredentialsKey { + universeDomain = defaultUniverseDomain + } + + ts, err := f.tokenSource(ctx, params) if err != nil { return nil, err } - return &DefaultCredentials{ - ProjectID: f.ProjectID, - TokenSource: ts, - JSON: jsonData, + ts = newErrWrappingTokenSource(ts) + return &Credentials{ + ProjectID: f.ProjectID, + TokenSource: ts, + JSON: jsonData, + universeDomain: universeDomain, }, nil } +// CredentialsFromJSON invokes CredentialsFromJSONWithParams with the specified scopes. +func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) { + var params CredentialsParams + params.Scopes = scopes + return CredentialsFromJSONWithParams(ctx, jsonData, params) +} + func wellKnownFile() string { const f = "application_default_credentials.json" if runtime.GOOS == "windows" { @@ -145,10 +308,10 @@ func wellKnownFile() string { return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) } -func readCredentialsFile(ctx context.Context, filename string, scopes []string) (*DefaultCredentials, error) { - b, err := ioutil.ReadFile(filename) +func readCredentialsFile(ctx context.Context, filename string, params CredentialsParams) (*Credentials, error) { + b, err := os.ReadFile(filename) if err != nil { return nil, err } - return CredentialsFromJSON(ctx, b, scopes...) + return CredentialsFromJSONWithParams(ctx, b, params) } diff --git a/vendor/golang.org/x/oauth2/google/doc.go b/vendor/golang.org/x/oauth2/google/doc.go index 73be62903..830d268c1 100644 --- a/vendor/golang.org/x/oauth2/google/doc.go +++ b/vendor/golang.org/x/oauth2/google/doc.go @@ -4,23 +4,29 @@ // Package google provides support for making OAuth2 authorized and authenticated // HTTP requests to Google APIs. It supports the Web server flow, client-side -// credentials, service accounts, Google Compute Engine service accounts, and Google -// App Engine service accounts. +// credentials, service accounts, Google Compute Engine service accounts, +// Google App Engine service accounts and workload identity federation +// from non-Google cloud platforms. // // A brief overview of the package follows. For more information, please read // https://developers.google.com/accounts/docs/OAuth2 // and // https://developers.google.com/accounts/docs/application-default-credentials. +// For more information on using workload identity federation, refer to +// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation. // -// OAuth2 Configs +// # OAuth2 Configs // // Two functions in this package return golang.org/x/oauth2.Config values from Google credential // data. Google supports two JSON formats for OAuth2 credentials: one is handled by ConfigFromJSON, // the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or // create an http.Client. // +// # Workload and Workforce Identity Federation // -// Credentials +// For information on how to use Workload and Workforce Identity Federation, see [golang.org/x/oauth2/google/externalaccount]. +// +// # Credentials // // The Credentials type represents Google credentials, including Application Default // Credentials. @@ -29,6 +35,13 @@ // FindDefaultCredentials looks in some well-known places for a credentials file, and // will call AppEngineTokenSource or ComputeTokenSource as needed. // +// Application Default Credentials also support workload identity federation to +// access Google Cloud resources from non-Google Cloud platforms including Amazon +// Web Services (AWS), Microsoft Azure or any identity provider that supports +// OpenID Connect (OIDC). Workload identity federation is recommended for +// non-Google Cloud environments as it avoids the need to download, manage and +// store service account private keys locally. +// // DefaultClient and DefaultTokenSource are convenience methods. They first call FindDefaultCredentials, // then use the credentials to construct an http.Client or an oauth2.TokenSource. // diff --git a/vendor/golang.org/x/oauth2/google/error.go b/vendor/golang.org/x/oauth2/google/error.go new file mode 100644 index 000000000..d84dd0047 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/error.go @@ -0,0 +1,64 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "errors" + + "golang.org/x/oauth2" +) + +// AuthenticationError indicates there was an error in the authentication flow. +// +// Use (*AuthenticationError).Temporary to check if the error can be retried. +type AuthenticationError struct { + err *oauth2.RetrieveError +} + +func newAuthenticationError(err error) error { + re := &oauth2.RetrieveError{} + if !errors.As(err, &re) { + return err + } + return &AuthenticationError{ + err: re, + } +} + +// Temporary indicates that the network error has one of the following status codes and may be retried: 500, 503, 408, or 429. +func (e *AuthenticationError) Temporary() bool { + if e.err.Response == nil { + return false + } + sc := e.err.Response.StatusCode + return sc == 500 || sc == 503 || sc == 408 || sc == 429 +} + +func (e *AuthenticationError) Error() string { + return e.err.Error() +} + +func (e *AuthenticationError) Unwrap() error { + return e.err +} + +type errWrappingTokenSource struct { + src oauth2.TokenSource +} + +func newErrWrappingTokenSource(ts oauth2.TokenSource) oauth2.TokenSource { + return &errWrappingTokenSource{src: ts} +} + +// Token returns the current token if it's still valid, else will +// refresh the current token (using r.Context for HTTP client +// information) and return the new one. +func (s *errWrappingTokenSource) Token() (*oauth2.Token, error) { + t, err := s.src.Token() + if err != nil { + return nil, newAuthenticationError(err) + } + return t, nil +} diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/aws.go b/vendor/golang.org/x/oauth2/google/externalaccount/aws.go new file mode 100644 index 000000000..ca27c2e98 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/externalaccount/aws.go @@ -0,0 +1,577 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "bytes" + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path" + "sort" + "strings" + "time" + + "golang.org/x/oauth2" +) + +// AwsSecurityCredentials models AWS security credentials. +type AwsSecurityCredentials struct { + // AccessKeyId is the AWS Access Key ID - Required. + AccessKeyID string `json:"AccessKeyID"` + // SecretAccessKey is the AWS Secret Access Key - Required. + SecretAccessKey string `json:"SecretAccessKey"` + // SessionToken is the AWS Session token. This should be provided for temporary AWS security credentials - Optional. + SessionToken string `json:"Token"` +} + +// awsRequestSigner is a utility class to sign http requests using a AWS V4 signature. +type awsRequestSigner struct { + RegionName string + AwsSecurityCredentials *AwsSecurityCredentials +} + +// getenv aliases os.Getenv for testing +var getenv = os.Getenv + +const ( + defaultRegionalCredentialVerificationUrl = "https://sts.{region}.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15" + + // AWS Signature Version 4 signing algorithm identifier. + awsAlgorithm = "AWS4-HMAC-SHA256" + + // The termination string for the AWS credential scope value as defined in + // https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html + awsRequestType = "aws4_request" + + // The AWS authorization header name for the security session token if available. + awsSecurityTokenHeader = "x-amz-security-token" + + // The name of the header containing the session token for metadata endpoint calls + awsIMDSv2SessionTokenHeader = "X-aws-ec2-metadata-token" + + awsIMDSv2SessionTtlHeader = "X-aws-ec2-metadata-token-ttl-seconds" + + awsIMDSv2SessionTtl = "300" + + // The AWS authorization header name for the auto-generated date. + awsDateHeader = "x-amz-date" + + // Supported AWS configuration environment variables. + awsAccessKeyId = "AWS_ACCESS_KEY_ID" + awsDefaultRegion = "AWS_DEFAULT_REGION" + awsRegion = "AWS_REGION" + awsSecretAccessKey = "AWS_SECRET_ACCESS_KEY" + awsSessionToken = "AWS_SESSION_TOKEN" + + awsTimeFormatLong = "20060102T150405Z" + awsTimeFormatShort = "20060102" +) + +func getSha256(input []byte) (string, error) { + hash := sha256.New() + if _, err := hash.Write(input); err != nil { + return "", err + } + return hex.EncodeToString(hash.Sum(nil)), nil +} + +func getHmacSha256(key, input []byte) ([]byte, error) { + hash := hmac.New(sha256.New, key) + if _, err := hash.Write(input); err != nil { + return nil, err + } + return hash.Sum(nil), nil +} + +func cloneRequest(r *http.Request) *http.Request { + r2 := new(http.Request) + *r2 = *r + if r.Header != nil { + r2.Header = make(http.Header, len(r.Header)) + + // Find total number of values. + headerCount := 0 + for _, headerValues := range r.Header { + headerCount += len(headerValues) + } + copiedHeaders := make([]string, headerCount) // shared backing array for headers' values + + for headerKey, headerValues := range r.Header { + headerCount = copy(copiedHeaders, headerValues) + r2.Header[headerKey] = copiedHeaders[:headerCount:headerCount] + copiedHeaders = copiedHeaders[headerCount:] + } + } + return r2 +} + +func canonicalPath(req *http.Request) string { + result := req.URL.EscapedPath() + if result == "" { + return "/" + } + return path.Clean(result) +} + +func canonicalQuery(req *http.Request) string { + queryValues := req.URL.Query() + for queryKey := range queryValues { + sort.Strings(queryValues[queryKey]) + } + return queryValues.Encode() +} + +func canonicalHeaders(req *http.Request) (string, string) { + // Header keys need to be sorted alphabetically. + var headers []string + lowerCaseHeaders := make(http.Header) + for k, v := range req.Header { + k := strings.ToLower(k) + if _, ok := lowerCaseHeaders[k]; ok { + // include additional values + lowerCaseHeaders[k] = append(lowerCaseHeaders[k], v...) + } else { + headers = append(headers, k) + lowerCaseHeaders[k] = v + } + } + sort.Strings(headers) + + var fullHeaders bytes.Buffer + for _, header := range headers { + headerValue := strings.Join(lowerCaseHeaders[header], ",") + fullHeaders.WriteString(header) + fullHeaders.WriteRune(':') + fullHeaders.WriteString(headerValue) + fullHeaders.WriteRune('\n') + } + + return strings.Join(headers, ";"), fullHeaders.String() +} + +func requestDataHash(req *http.Request) (string, error) { + var requestData []byte + if req.Body != nil { + requestBody, err := req.GetBody() + if err != nil { + return "", err + } + defer requestBody.Close() + + requestData, err = ioutil.ReadAll(io.LimitReader(requestBody, 1<<20)) + if err != nil { + return "", err + } + } + + return getSha256(requestData) +} + +func requestHost(req *http.Request) string { + if req.Host != "" { + return req.Host + } + return req.URL.Host +} + +func canonicalRequest(req *http.Request, canonicalHeaderColumns, canonicalHeaderData string) (string, error) { + dataHash, err := requestDataHash(req) + if err != nil { + return "", err + } + + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", req.Method, canonicalPath(req), canonicalQuery(req), canonicalHeaderData, canonicalHeaderColumns, dataHash), nil +} + +// SignRequest adds the appropriate headers to an http.Request +// or returns an error if something prevented this. +func (rs *awsRequestSigner) SignRequest(req *http.Request) error { + signedRequest := cloneRequest(req) + timestamp := now() + + signedRequest.Header.Add("host", requestHost(req)) + + if rs.AwsSecurityCredentials.SessionToken != "" { + signedRequest.Header.Add(awsSecurityTokenHeader, rs.AwsSecurityCredentials.SessionToken) + } + + if signedRequest.Header.Get("date") == "" { + signedRequest.Header.Add(awsDateHeader, timestamp.Format(awsTimeFormatLong)) + } + + authorizationCode, err := rs.generateAuthentication(signedRequest, timestamp) + if err != nil { + return err + } + signedRequest.Header.Set("Authorization", authorizationCode) + + req.Header = signedRequest.Header + return nil +} + +func (rs *awsRequestSigner) generateAuthentication(req *http.Request, timestamp time.Time) (string, error) { + canonicalHeaderColumns, canonicalHeaderData := canonicalHeaders(req) + + dateStamp := timestamp.Format(awsTimeFormatShort) + serviceName := "" + if splitHost := strings.Split(requestHost(req), "."); len(splitHost) > 0 { + serviceName = splitHost[0] + } + + credentialScope := fmt.Sprintf("%s/%s/%s/%s", dateStamp, rs.RegionName, serviceName, awsRequestType) + + requestString, err := canonicalRequest(req, canonicalHeaderColumns, canonicalHeaderData) + if err != nil { + return "", err + } + requestHash, err := getSha256([]byte(requestString)) + if err != nil { + return "", err + } + + stringToSign := fmt.Sprintf("%s\n%s\n%s\n%s", awsAlgorithm, timestamp.Format(awsTimeFormatLong), credentialScope, requestHash) + + signingKey := []byte("AWS4" + rs.AwsSecurityCredentials.SecretAccessKey) + for _, signingInput := range []string{ + dateStamp, rs.RegionName, serviceName, awsRequestType, stringToSign, + } { + signingKey, err = getHmacSha256(signingKey, []byte(signingInput)) + if err != nil { + return "", err + } + } + + return fmt.Sprintf("%s Credential=%s/%s, SignedHeaders=%s, Signature=%s", awsAlgorithm, rs.AwsSecurityCredentials.AccessKeyID, credentialScope, canonicalHeaderColumns, hex.EncodeToString(signingKey)), nil +} + +type awsCredentialSource struct { + environmentID string + regionURL string + regionalCredVerificationURL string + credVerificationURL string + imdsv2SessionTokenURL string + targetResource string + requestSigner *awsRequestSigner + region string + ctx context.Context + client *http.Client + awsSecurityCredentialsSupplier AwsSecurityCredentialsSupplier + supplierOptions SupplierOptions +} + +type awsRequestHeader struct { + Key string `json:"key"` + Value string `json:"value"` +} + +type awsRequest struct { + URL string `json:"url"` + Method string `json:"method"` + Headers []awsRequestHeader `json:"headers"` +} + +func (cs awsCredentialSource) doRequest(req *http.Request) (*http.Response, error) { + if cs.client == nil { + cs.client = oauth2.NewClient(cs.ctx, nil) + } + return cs.client.Do(req.WithContext(cs.ctx)) +} + +func canRetrieveRegionFromEnvironment() bool { + // The AWS region can be provided through AWS_REGION or AWS_DEFAULT_REGION. Only one is + // required. + return getenv(awsRegion) != "" || getenv(awsDefaultRegion) != "" +} + +func canRetrieveSecurityCredentialFromEnvironment() bool { + // Check if both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are available. + return getenv(awsAccessKeyId) != "" && getenv(awsSecretAccessKey) != "" +} + +func (cs awsCredentialSource) shouldUseMetadataServer() bool { + return cs.awsSecurityCredentialsSupplier == nil && (!canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment()) +} + +func (cs awsCredentialSource) credentialSourceType() string { + if cs.awsSecurityCredentialsSupplier != nil { + return "programmatic" + } + return "aws" +} + +func (cs awsCredentialSource) subjectToken() (string, error) { + // Set Defaults + if cs.regionalCredVerificationURL == "" { + cs.regionalCredVerificationURL = defaultRegionalCredentialVerificationUrl + } + if cs.requestSigner == nil { + headers := make(map[string]string) + if cs.shouldUseMetadataServer() { + awsSessionToken, err := cs.getAWSSessionToken() + if err != nil { + return "", err + } + + if awsSessionToken != "" { + headers[awsIMDSv2SessionTokenHeader] = awsSessionToken + } + } + + awsSecurityCredentials, err := cs.getSecurityCredentials(headers) + if err != nil { + return "", err + } + cs.region, err = cs.getRegion(headers) + if err != nil { + return "", err + } + + cs.requestSigner = &awsRequestSigner{ + RegionName: cs.region, + AwsSecurityCredentials: awsSecurityCredentials, + } + } + + // Generate the signed request to AWS STS GetCallerIdentity API. + // Use the required regional endpoint. Otherwise, the request will fail. + req, err := http.NewRequest("POST", strings.Replace(cs.regionalCredVerificationURL, "{region}", cs.region, 1), nil) + if err != nil { + return "", err + } + // The full, canonical resource name of the workload identity pool + // provider, with or without the HTTPS prefix. + // Including this header as part of the signature is recommended to + // ensure data integrity. + if cs.targetResource != "" { + req.Header.Add("x-goog-cloud-target-resource", cs.targetResource) + } + cs.requestSigner.SignRequest(req) + + /* + The GCP STS endpoint expects the headers to be formatted as: + # [ + # {key: 'x-amz-date', value: '...'}, + # {key: 'Authorization', value: '...'}, + # ... + # ] + # And then serialized as: + # quote(json.dumps({ + # url: '...', + # method: 'POST', + # headers: [{key: 'x-amz-date', value: '...'}, ...] + # })) + */ + + awsSignedReq := awsRequest{ + URL: req.URL.String(), + Method: "POST", + } + for headerKey, headerList := range req.Header { + for _, headerValue := range headerList { + awsSignedReq.Headers = append(awsSignedReq.Headers, awsRequestHeader{ + Key: headerKey, + Value: headerValue, + }) + } + } + sort.Slice(awsSignedReq.Headers, func(i, j int) bool { + headerCompare := strings.Compare(awsSignedReq.Headers[i].Key, awsSignedReq.Headers[j].Key) + if headerCompare == 0 { + return strings.Compare(awsSignedReq.Headers[i].Value, awsSignedReq.Headers[j].Value) < 0 + } + return headerCompare < 0 + }) + + result, err := json.Marshal(awsSignedReq) + if err != nil { + return "", err + } + return url.QueryEscape(string(result)), nil +} + +func (cs *awsCredentialSource) getAWSSessionToken() (string, error) { + if cs.imdsv2SessionTokenURL == "" { + return "", nil + } + + req, err := http.NewRequest("PUT", cs.imdsv2SessionTokenURL, nil) + if err != nil { + return "", err + } + + req.Header.Add(awsIMDSv2SessionTtlHeader, awsIMDSv2SessionTtl) + + resp, err := cs.doRequest(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return "", err + } + + if resp.StatusCode != 200 { + return "", fmt.Errorf("oauth2/google/externalaccount: unable to retrieve AWS session token - %s", string(respBody)) + } + + return string(respBody), nil +} + +func (cs *awsCredentialSource) getRegion(headers map[string]string) (string, error) { + if cs.awsSecurityCredentialsSupplier != nil { + return cs.awsSecurityCredentialsSupplier.AwsRegion(cs.ctx, cs.supplierOptions) + } + if canRetrieveRegionFromEnvironment() { + if envAwsRegion := getenv(awsRegion); envAwsRegion != "" { + cs.region = envAwsRegion + return envAwsRegion, nil + } + return getenv("AWS_DEFAULT_REGION"), nil + } + + if cs.regionURL == "" { + return "", errors.New("oauth2/google/externalaccount: unable to determine AWS region") + } + + req, err := http.NewRequest("GET", cs.regionURL, nil) + if err != nil { + return "", err + } + + for name, value := range headers { + req.Header.Add(name, value) + } + + resp, err := cs.doRequest(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return "", err + } + + if resp.StatusCode != 200 { + return "", fmt.Errorf("oauth2/google/externalaccount: unable to retrieve AWS region - %s", string(respBody)) + } + + // This endpoint will return the region in format: us-east-2b. + // Only the us-east-2 part should be used. + respBodyEnd := 0 + if len(respBody) > 1 { + respBodyEnd = len(respBody) - 1 + } + return string(respBody[:respBodyEnd]), nil +} + +func (cs *awsCredentialSource) getSecurityCredentials(headers map[string]string) (result *AwsSecurityCredentials, err error) { + if cs.awsSecurityCredentialsSupplier != nil { + return cs.awsSecurityCredentialsSupplier.AwsSecurityCredentials(cs.ctx, cs.supplierOptions) + } + if canRetrieveSecurityCredentialFromEnvironment() { + return &AwsSecurityCredentials{ + AccessKeyID: getenv(awsAccessKeyId), + SecretAccessKey: getenv(awsSecretAccessKey), + SessionToken: getenv(awsSessionToken), + }, nil + } + + roleName, err := cs.getMetadataRoleName(headers) + if err != nil { + return + } + + credentials, err := cs.getMetadataSecurityCredentials(roleName, headers) + if err != nil { + return + } + + if credentials.AccessKeyID == "" { + return result, errors.New("oauth2/google/externalaccount: missing AccessKeyId credential") + } + + if credentials.SecretAccessKey == "" { + return result, errors.New("oauth2/google/externalaccount: missing SecretAccessKey credential") + } + + return &credentials, nil +} + +func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string, headers map[string]string) (AwsSecurityCredentials, error) { + var result AwsSecurityCredentials + + req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s", cs.credVerificationURL, roleName), nil) + if err != nil { + return result, err + } + + for name, value := range headers { + req.Header.Add(name, value) + } + + resp, err := cs.doRequest(req) + if err != nil { + return result, err + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return result, err + } + + if resp.StatusCode != 200 { + return result, fmt.Errorf("oauth2/google/externalaccount: unable to retrieve AWS security credentials - %s", string(respBody)) + } + + err = json.Unmarshal(respBody, &result) + return result, err +} + +func (cs *awsCredentialSource) getMetadataRoleName(headers map[string]string) (string, error) { + if cs.credVerificationURL == "" { + return "", errors.New("oauth2/google/externalaccount: unable to determine the AWS metadata server security credentials endpoint") + } + + req, err := http.NewRequest("GET", cs.credVerificationURL, nil) + if err != nil { + return "", err + } + + for name, value := range headers { + req.Header.Add(name, value) + } + + resp, err := cs.doRequest(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return "", err + } + + if resp.StatusCode != 200 { + return "", fmt.Errorf("oauth2/google/externalaccount: unable to retrieve AWS role name - %s", string(respBody)) + } + + return string(respBody), nil +} diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go new file mode 100644 index 000000000..6c81a6872 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go @@ -0,0 +1,485 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package externalaccount provides support for creating workload identity +federation and workforce identity federation token sources that can be +used to access Google Cloud resources from external identity providers. + +# Workload Identity Federation + +Using workload identity federation, your application can access Google Cloud +resources from Amazon Web Services (AWS), Microsoft Azure or any identity +provider that supports OpenID Connect (OIDC) or SAML 2.0. +Traditionally, applications running outside Google Cloud have used service +account keys to access Google Cloud resources. Using identity federation, +you can allow your workload to impersonate a service account. +This lets you access Google Cloud resources directly, eliminating the +maintenance and security burden associated with service account keys. + +Follow the detailed instructions on how to configure Workload Identity Federation +in various platforms: + +Amazon Web Services (AWS): https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#aws +Microsoft Azure: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#azure +OIDC identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#oidc +SAML 2.0 identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#saml + +For OIDC and SAML providers, the library can retrieve tokens in fours ways: +from a local file location (file-sourced credentials), from a server +(URL-sourced credentials), from a local executable (executable-sourced +credentials), or from a user defined function that returns an OIDC or SAML token. +For file-sourced credentials, a background process needs to be continuously +refreshing the file location with a new OIDC/SAML token prior to expiration. +For tokens with one hour lifetimes, the token needs to be updated in the file +every hour. The token can be stored directly as plain text or in JSON format. +For URL-sourced credentials, a local server needs to host a GET endpoint to +return the OIDC/SAML token. The response can be in plain text or JSON. +Additional required request headers can also be specified. +For executable-sourced credentials, an application needs to be available to +output the OIDC/SAML token and other information in a JSON format. +For more information on how these work (and how to implement +executable-sourced credentials), please check out: +https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#create_a_credential_configuration + +To use a custom function to supply the token, define a struct that implements the [SubjectTokenSupplier] interface for OIDC/SAML providers, +or one that implements [AwsSecurityCredentialsSupplier] for AWS providers. This can then be used when building a [Config]. +The [golang.org/x/oauth2.TokenSource] created from the config using [NewTokenSource] can then be used to access Google +Cloud resources. For instance, you can create a new client from the +[cloud.google.com/go/storage] package and pass in option.WithTokenSource(yourTokenSource)) + +Note that this library does not perform any validation on the token_url, token_info_url, +or service_account_impersonation_url fields of the credential configuration. +It is not recommended to use a credential configuration that you did not generate with +the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain. + +# Workforce Identity Federation + +Workforce identity federation lets you use an external identity provider (IdP) to +authenticate and authorize a workforce—a group of users, such as employees, partners, +and contractors—using IAM, so that the users can access Google Cloud services. +Workforce identity federation extends Google Cloud's identity capabilities to support +syncless, attribute-based single sign on. + +With workforce identity federation, your workforce can access Google Cloud resources +using an external identity provider (IdP) that supports OpenID Connect (OIDC) or +SAML 2.0 such as Azure Active Directory (Azure AD), Active Directory Federation +Services (AD FS), Okta, and others. + +Follow the detailed instructions on how to configure Workload Identity Federation +in various platforms: + +Azure AD: https://cloud.google.com/iam/docs/workforce-sign-in-azure-ad +Okta: https://cloud.google.com/iam/docs/workforce-sign-in-okta +OIDC identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#oidc +SAML 2.0 identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#saml + +For workforce identity federation, the library can retrieve tokens in four ways: +from a local file location (file-sourced credentials), from a server +(URL-sourced credentials), from a local executable (executable-sourced +credentials), or from a user supplied function that returns an OIDC or SAML token. +For file-sourced credentials, a background process needs to be continuously +refreshing the file location with a new OIDC/SAML token prior to expiration. +For tokens with one hour lifetimes, the token needs to be updated in the file +every hour. The token can be stored directly as plain text or in JSON format. +For URL-sourced credentials, a local server needs to host a GET endpoint to +return the OIDC/SAML token. The response can be in plain text or JSON. +Additional required request headers can also be specified. +For executable-sourced credentials, an application needs to be available to +output the OIDC/SAML token and other information in a JSON format. +For more information on how these work (and how to implement +executable-sourced credentials), please check out: +https://cloud.google.com/iam/docs/workforce-obtaining-short-lived-credentials#generate_a_configuration_file_for_non-interactive_sign-in + +To use a custom function to supply the token, define a struct that implements the [SubjectTokenSupplier] interface for OIDC/SAML providers. +This can then be used when building a [Config]. +The [golang.org/x/oauth2.TokenSource] created from the config using [NewTokenSource] can then be used access Google +Cloud resources. For instance, you can create a new client from the +[cloud.google.com/go/storage] package and pass in option.WithTokenSource(yourTokenSource)) + +# Security considerations + +Note that this library does not perform any validation on the token_url, token_info_url, +or service_account_impersonation_url fields of the credential configuration. +It is not recommended to use a credential configuration that you did not generate with +the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain. +*/ +package externalaccount + +import ( + "context" + "fmt" + "net/http" + "regexp" + "strconv" + "strings" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google/internal/impersonate" + "golang.org/x/oauth2/google/internal/stsexchange" +) + +const ( + universeDomainPlaceholder = "UNIVERSE_DOMAIN" + defaultTokenURL = "https://sts.UNIVERSE_DOMAIN/v1/token" + defaultUniverseDomain = "googleapis.com" +) + +// now aliases time.Now for testing +var now = func() time.Time { + return time.Now().UTC() +} + +// Config stores the configuration for fetching tokens with external credentials. +type Config struct { + // Audience is the Secure Token Service (STS) audience which contains the resource name for the workload + // identity pool or the workforce pool and the provider identifier in that pool. Required. + Audience string + // SubjectTokenType is the STS token type based on the Oauth2.0 token exchange spec. + // Expected values include: + // “urn:ietf:params:oauth:token-type:jwt” + // “urn:ietf:params:oauth:token-type:id-token” + // “urn:ietf:params:oauth:token-type:saml2” + // “urn:ietf:params:aws:token-type:aws4_request” + // Required. + SubjectTokenType string + // TokenURL is the STS token exchange endpoint. If not provided, will default to + // https://sts.UNIVERSE_DOMAIN/v1/token, with UNIVERSE_DOMAIN set to the + // default service domain googleapis.com unless UniverseDomain is set. + // Optional. + TokenURL string + // TokenInfoURL is the token_info endpoint used to retrieve the account related information ( + // user attributes like account identifier, eg. email, username, uid, etc). This is + // needed for gCloud session account identification. Optional. + TokenInfoURL string + // ServiceAccountImpersonationURL is the URL for the service account impersonation request. This is only + // required for workload identity pools when APIs to be accessed have not integrated with UberMint. Optional. + ServiceAccountImpersonationURL string + // ServiceAccountImpersonationLifetimeSeconds is the number of seconds the service account impersonation + // token will be valid for. If not provided, it will default to 3600. Optional. + ServiceAccountImpersonationLifetimeSeconds int + // ClientSecret is currently only required if token_info endpoint also + // needs to be called with the generated GCP access token. When provided, STS will be + // called with additional basic authentication using ClientId as username and ClientSecret as password. Optional. + ClientSecret string + // ClientID is only required in conjunction with ClientSecret, as described above. Optional. + ClientID string + // CredentialSource contains the necessary information to retrieve the token itself, as well + // as some environmental information. One of SubjectTokenSupplier, AWSSecurityCredentialSupplier or + // CredentialSource must be provided. Optional. + CredentialSource *CredentialSource + // QuotaProjectID is injected by gCloud. If the value is non-empty, the Auth libraries + // will set the x-goog-user-project header which overrides the project associated with the credentials. Optional. + QuotaProjectID string + // Scopes contains the desired scopes for the returned access token. Optional. + Scopes []string + // WorkforcePoolUserProject is the workforce pool user project number when the credential + // corresponds to a workforce pool and not a workload identity pool. + // The underlying principal must still have serviceusage.services.use IAM + // permission to use the project for billing/quota. Optional. + WorkforcePoolUserProject string + // SubjectTokenSupplier is an optional token supplier for OIDC/SAML credentials. + // One of SubjectTokenSupplier, AWSSecurityCredentialSupplier or CredentialSource must be provided. Optional. + SubjectTokenSupplier SubjectTokenSupplier + // AwsSecurityCredentialsSupplier is an AWS Security Credential supplier for AWS credentials. + // One of SubjectTokenSupplier, AWSSecurityCredentialSupplier or CredentialSource must be provided. Optional. + AwsSecurityCredentialsSupplier AwsSecurityCredentialsSupplier + // UniverseDomain is the default service domain for a given Cloud universe. + // This value will be used in the default STS token URL. The default value + // is "googleapis.com". It will not be used if TokenURL is set. Optional. + UniverseDomain string +} + +var ( + validWorkforceAudiencePattern *regexp.Regexp = regexp.MustCompile(`//iam\.googleapis\.com/locations/[^/]+/workforcePools/`) +) + +func validateWorkforceAudience(input string) bool { + return validWorkforceAudiencePattern.MatchString(input) +} + +// NewTokenSource Returns an external account TokenSource using the provided external account config. +func NewTokenSource(ctx context.Context, conf Config) (oauth2.TokenSource, error) { + if conf.Audience == "" { + return nil, fmt.Errorf("oauth2/google/externalaccount: Audience must be set") + } + if conf.SubjectTokenType == "" { + return nil, fmt.Errorf("oauth2/google/externalaccount: Subject token type must be set") + } + if conf.WorkforcePoolUserProject != "" { + valid := validateWorkforceAudience(conf.Audience) + if !valid { + return nil, fmt.Errorf("oauth2/google/externalaccount: Workforce pool user project should not be set for non-workforce pool credentials") + } + } + count := 0 + if conf.CredentialSource != nil { + count++ + } + if conf.SubjectTokenSupplier != nil { + count++ + } + if conf.AwsSecurityCredentialsSupplier != nil { + count++ + } + if count == 0 { + return nil, fmt.Errorf("oauth2/google/externalaccount: One of CredentialSource, SubjectTokenSupplier, or AwsSecurityCredentialsSupplier must be set") + } + if count > 1 { + return nil, fmt.Errorf("oauth2/google/externalaccount: Only one of CredentialSource, SubjectTokenSupplier, or AwsSecurityCredentialsSupplier must be set") + } + return conf.tokenSource(ctx, "https") +} + +// tokenSource is a private function that's directly called by some of the tests, +// because the unit test URLs are mocked, and would otherwise fail the +// validity check. +func (c *Config) tokenSource(ctx context.Context, scheme string) (oauth2.TokenSource, error) { + + ts := tokenSource{ + ctx: ctx, + conf: c, + } + if c.ServiceAccountImpersonationURL == "" { + return oauth2.ReuseTokenSource(nil, ts), nil + } + scopes := c.Scopes + ts.conf.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"} + imp := impersonate.ImpersonateTokenSource{ + Ctx: ctx, + URL: c.ServiceAccountImpersonationURL, + Scopes: scopes, + Ts: oauth2.ReuseTokenSource(nil, ts), + TokenLifetimeSeconds: c.ServiceAccountImpersonationLifetimeSeconds, + } + return oauth2.ReuseTokenSource(nil, imp), nil +} + +// Subject token file types. +const ( + fileTypeText = "text" + fileTypeJSON = "json" +) + +// Format contains information needed to retireve a subject token for URL or File sourced credentials. +type Format struct { + // Type should be either "text" or "json". This determines whether the file or URL sourced credentials + // expect a simple text subject token or if the subject token will be contained in a JSON object. + // When not provided "text" type is assumed. + Type string `json:"type"` + // SubjectTokenFieldName is only required for JSON format. This is the field name that the credentials will check + // for the subject token in the file or URL response. This would be "access_token" for azure. + SubjectTokenFieldName string `json:"subject_token_field_name"` +} + +// CredentialSource stores the information necessary to retrieve the credentials for the STS exchange. +type CredentialSource struct { + // File is the location for file sourced credentials. + // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + File string `json:"file"` + + // Url is the URL to call for URL sourced credentials. + // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + URL string `json:"url"` + // Headers are the headers to attach to the request for URL sourced credentials. + Headers map[string]string `json:"headers"` + + // Executable is the configuration object for executable sourced credentials. + // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + Executable *ExecutableConfig `json:"executable"` + + // EnvironmentID is the EnvironmentID used for AWS sourced credentials. This should start with "AWS". + // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + EnvironmentID string `json:"environment_id"` + // RegionURL is the metadata URL to retrieve the region from for EC2 AWS credentials. + RegionURL string `json:"region_url"` + // RegionalCredVerificationURL is the AWS regional credential verification URL, will default to + // "https://sts.{region}.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15" if not provided." + RegionalCredVerificationURL string `json:"regional_cred_verification_url"` + // IMDSv2SessionTokenURL is the URL to retrieve the session token when using IMDSv2 in AWS. + IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"` + // Format is the format type for the subject token. Used for File and URL sourced credentials. Expected values are "text" or "json". + Format Format `json:"format"` +} + +// ExecutableConfig contains information needed for executable sourced credentials. +type ExecutableConfig struct { + // Command is the the full command to run to retrieve the subject token. + // This can include arguments. Must be an absolute path for the program. Required. + Command string `json:"command"` + // TimeoutMillis is the timeout duration, in milliseconds. Defaults to 30000 milliseconds when not provided. Optional. + TimeoutMillis *int `json:"timeout_millis"` + // OutputFile is the absolute path to the output file where the executable will cache the response. + // If specified the auth libraries will first check this location before running the executable. Optional. + OutputFile string `json:"output_file"` +} + +// SubjectTokenSupplier can be used to supply a subject token to exchange for a GCP access token. +type SubjectTokenSupplier interface { + // SubjectToken should return a valid subject token or an error. + // The external account token source does not cache the returned subject token, so caching + // logic should be implemented in the supplier to prevent multiple requests for the same subject token. + SubjectToken(ctx context.Context, options SupplierOptions) (string, error) +} + +// AWSSecurityCredentialsSupplier can be used to supply AwsSecurityCredentials and an AWS Region to +// exchange for a GCP access token. +type AwsSecurityCredentialsSupplier interface { + // AwsRegion should return the AWS region or an error. + AwsRegion(ctx context.Context, options SupplierOptions) (string, error) + // GetAwsSecurityCredentials should return a valid set of AwsSecurityCredentials or an error. + // The external account token source does not cache the returned security credentials, so caching + // logic should be implemented in the supplier to prevent multiple requests for the same security credentials. + AwsSecurityCredentials(ctx context.Context, options SupplierOptions) (*AwsSecurityCredentials, error) +} + +// SupplierOptions contains information about the requested subject token or AWS security credentials from the +// Google external account credential. +type SupplierOptions struct { + // Audience is the requested audience for the external account credential. + Audience string + // Subject token type is the requested subject token type for the external account credential. Expected values include: + // “urn:ietf:params:oauth:token-type:jwt” + // “urn:ietf:params:oauth:token-type:id-token” + // “urn:ietf:params:oauth:token-type:saml2” + // “urn:ietf:params:aws:token-type:aws4_request” + SubjectTokenType string +} + +// tokenURL returns the default STS token endpoint with the configured universe +// domain. +func (c *Config) tokenURL() string { + if c.UniverseDomain == "" { + return strings.Replace(defaultTokenURL, universeDomainPlaceholder, defaultUniverseDomain, 1) + } + return strings.Replace(defaultTokenURL, universeDomainPlaceholder, c.UniverseDomain, 1) +} + +// parse determines the type of CredentialSource needed. +func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) { + //set Defaults + if c.TokenURL == "" { + c.TokenURL = c.tokenURL() + } + supplierOptions := SupplierOptions{Audience: c.Audience, SubjectTokenType: c.SubjectTokenType} + + if c.AwsSecurityCredentialsSupplier != nil { + awsCredSource := awsCredentialSource{ + awsSecurityCredentialsSupplier: c.AwsSecurityCredentialsSupplier, + targetResource: c.Audience, + supplierOptions: supplierOptions, + ctx: ctx, + } + return awsCredSource, nil + } else if c.SubjectTokenSupplier != nil { + return programmaticRefreshCredentialSource{subjectTokenSupplier: c.SubjectTokenSupplier, supplierOptions: supplierOptions, ctx: ctx}, nil + } else if len(c.CredentialSource.EnvironmentID) > 3 && c.CredentialSource.EnvironmentID[:3] == "aws" { + if awsVersion, err := strconv.Atoi(c.CredentialSource.EnvironmentID[3:]); err == nil { + if awsVersion != 1 { + return nil, fmt.Errorf("oauth2/google/externalaccount: aws version '%d' is not supported in the current build", awsVersion) + } + + awsCredSource := awsCredentialSource{ + environmentID: c.CredentialSource.EnvironmentID, + regionURL: c.CredentialSource.RegionURL, + regionalCredVerificationURL: c.CredentialSource.RegionalCredVerificationURL, + credVerificationURL: c.CredentialSource.URL, + targetResource: c.Audience, + ctx: ctx, + } + if c.CredentialSource.IMDSv2SessionTokenURL != "" { + awsCredSource.imdsv2SessionTokenURL = c.CredentialSource.IMDSv2SessionTokenURL + } + + return awsCredSource, nil + } + } else if c.CredentialSource.File != "" { + return fileCredentialSource{File: c.CredentialSource.File, Format: c.CredentialSource.Format}, nil + } else if c.CredentialSource.URL != "" { + return urlCredentialSource{URL: c.CredentialSource.URL, Headers: c.CredentialSource.Headers, Format: c.CredentialSource.Format, ctx: ctx}, nil + } else if c.CredentialSource.Executable != nil { + return createExecutableCredential(ctx, c.CredentialSource.Executable, c) + } + return nil, fmt.Errorf("oauth2/google/externalaccount: unable to parse credential source") +} + +type baseCredentialSource interface { + credentialSourceType() string + subjectToken() (string, error) +} + +// tokenSource is the source that handles external credentials. It is used to retrieve Tokens. +type tokenSource struct { + ctx context.Context + conf *Config +} + +func getMetricsHeaderValue(conf *Config, credSource baseCredentialSource) string { + return fmt.Sprintf("gl-go/%s auth/%s google-byoid-sdk source/%s sa-impersonation/%t config-lifetime/%t", + goVersion(), + "unknown", + credSource.credentialSourceType(), + conf.ServiceAccountImpersonationURL != "", + conf.ServiceAccountImpersonationLifetimeSeconds != 0) +} + +// Token allows tokenSource to conform to the oauth2.TokenSource interface. +func (ts tokenSource) Token() (*oauth2.Token, error) { + conf := ts.conf + + credSource, err := conf.parse(ts.ctx) + if err != nil { + return nil, err + } + subjectToken, err := credSource.subjectToken() + + if err != nil { + return nil, err + } + stsRequest := stsexchange.TokenExchangeRequest{ + GrantType: "urn:ietf:params:oauth:grant-type:token-exchange", + Audience: conf.Audience, + Scope: conf.Scopes, + RequestedTokenType: "urn:ietf:params:oauth:token-type:access_token", + SubjectToken: subjectToken, + SubjectTokenType: conf.SubjectTokenType, + } + header := make(http.Header) + header.Add("Content-Type", "application/x-www-form-urlencoded") + header.Add("x-goog-api-client", getMetricsHeaderValue(conf, credSource)) + clientAuth := stsexchange.ClientAuthentication{ + AuthStyle: oauth2.AuthStyleInHeader, + ClientID: conf.ClientID, + ClientSecret: conf.ClientSecret, + } + var options map[string]interface{} + // Do not pass workforce_pool_user_project when client authentication is used. + // The client ID is sufficient for determining the user project. + if conf.WorkforcePoolUserProject != "" && conf.ClientID == "" { + options = map[string]interface{}{ + "userProject": conf.WorkforcePoolUserProject, + } + } + stsResp, err := stsexchange.ExchangeToken(ts.ctx, conf.TokenURL, &stsRequest, clientAuth, header, options) + if err != nil { + return nil, err + } + + accessToken := &oauth2.Token{ + AccessToken: stsResp.AccessToken, + TokenType: stsResp.TokenType, + } + + // The RFC8693 doesn't define the explicit 0 of "expires_in" field behavior. + if stsResp.ExpiresIn <= 0 { + return nil, fmt.Errorf("oauth2/google/externalaccount: got invalid expiry from security token service") + } + accessToken.Expiry = now().Add(time.Duration(stsResp.ExpiresIn) * time.Second) + + if stsResp.RefreshToken != "" { + accessToken.RefreshToken = stsResp.RefreshToken + } + return accessToken, nil +} diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/executablecredsource.go b/vendor/golang.org/x/oauth2/google/externalaccount/executablecredsource.go new file mode 100644 index 000000000..dca5681a4 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/externalaccount/executablecredsource.go @@ -0,0 +1,313 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "regexp" + "strings" + "time" +) + +var serviceAccountImpersonationRE = regexp.MustCompile("https://iamcredentials\\..+/v1/projects/-/serviceAccounts/(.*@.*):generateAccessToken") + +const ( + executableSupportedMaxVersion = 1 + defaultTimeout = 30 * time.Second + timeoutMinimum = 5 * time.Second + timeoutMaximum = 120 * time.Second + executableSource = "response" + outputFileSource = "output file" +) + +type nonCacheableError struct { + message string +} + +func (nce nonCacheableError) Error() string { + return nce.message +} + +func missingFieldError(source, field string) error { + return fmt.Errorf("oauth2/google/externalaccount: %v missing `%q` field", source, field) +} + +func jsonParsingError(source, data string) error { + return fmt.Errorf("oauth2/google/externalaccount: unable to parse %v\nResponse: %v", source, data) +} + +func malformedFailureError() error { + return nonCacheableError{"oauth2/google/externalaccount: response must include `error` and `message` fields when unsuccessful"} +} + +func userDefinedError(code, message string) error { + return nonCacheableError{fmt.Sprintf("oauth2/google/externalaccount: response contains unsuccessful response: (%v) %v", code, message)} +} + +func unsupportedVersionError(source string, version int) error { + return fmt.Errorf("oauth2/google/externalaccount: %v contains unsupported version: %v", source, version) +} + +func tokenExpiredError() error { + return nonCacheableError{"oauth2/google/externalaccount: the token returned by the executable is expired"} +} + +func tokenTypeError(source string) error { + return fmt.Errorf("oauth2/google/externalaccount: %v contains unsupported token type", source) +} + +func exitCodeError(exitCode int) error { + return fmt.Errorf("oauth2/google/externalaccount: executable command failed with exit code %v", exitCode) +} + +func executableError(err error) error { + return fmt.Errorf("oauth2/google/externalaccount: executable command failed: %v", err) +} + +func executablesDisallowedError() error { + return errors.New("oauth2/google/externalaccount: executables need to be explicitly allowed (set GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES to '1') to run") +} + +func timeoutRangeError() error { + return errors.New("oauth2/google/externalaccount: invalid `timeout_millis` field — executable timeout must be between 5 and 120 seconds") +} + +func commandMissingError() error { + return errors.New("oauth2/google/externalaccount: missing `command` field — executable command must be provided") +} + +type environment interface { + existingEnv() []string + getenv(string) string + run(ctx context.Context, command string, env []string) ([]byte, error) + now() time.Time +} + +type runtimeEnvironment struct{} + +func (r runtimeEnvironment) existingEnv() []string { + return os.Environ() +} + +func (r runtimeEnvironment) getenv(key string) string { + return os.Getenv(key) +} + +func (r runtimeEnvironment) now() time.Time { + return time.Now().UTC() +} + +func (r runtimeEnvironment) run(ctx context.Context, command string, env []string) ([]byte, error) { + splitCommand := strings.Fields(command) + cmd := exec.CommandContext(ctx, splitCommand[0], splitCommand[1:]...) + cmd.Env = env + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + if ctx.Err() == context.DeadlineExceeded { + return nil, context.DeadlineExceeded + } + + if exitError, ok := err.(*exec.ExitError); ok { + return nil, exitCodeError(exitError.ExitCode()) + } + + return nil, executableError(err) + } + + bytesStdout := bytes.TrimSpace(stdout.Bytes()) + if len(bytesStdout) > 0 { + return bytesStdout, nil + } + return bytes.TrimSpace(stderr.Bytes()), nil +} + +type executableCredentialSource struct { + Command string + Timeout time.Duration + OutputFile string + ctx context.Context + config *Config + env environment +} + +// CreateExecutableCredential creates an executableCredentialSource given an ExecutableConfig. +// It also performs defaulting and type conversions. +func createExecutableCredential(ctx context.Context, ec *ExecutableConfig, config *Config) (executableCredentialSource, error) { + if ec.Command == "" { + return executableCredentialSource{}, commandMissingError() + } + + result := executableCredentialSource{} + result.Command = ec.Command + if ec.TimeoutMillis == nil { + result.Timeout = defaultTimeout + } else { + result.Timeout = time.Duration(*ec.TimeoutMillis) * time.Millisecond + if result.Timeout < timeoutMinimum || result.Timeout > timeoutMaximum { + return executableCredentialSource{}, timeoutRangeError() + } + } + result.OutputFile = ec.OutputFile + result.ctx = ctx + result.config = config + result.env = runtimeEnvironment{} + return result, nil +} + +type executableResponse struct { + Version int `json:"version,omitempty"` + Success *bool `json:"success,omitempty"` + TokenType string `json:"token_type,omitempty"` + ExpirationTime int64 `json:"expiration_time,omitempty"` + IdToken string `json:"id_token,omitempty"` + SamlResponse string `json:"saml_response,omitempty"` + Code string `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (cs executableCredentialSource) parseSubjectTokenFromSource(response []byte, source string, now int64) (string, error) { + var result executableResponse + if err := json.Unmarshal(response, &result); err != nil { + return "", jsonParsingError(source, string(response)) + } + + if result.Version == 0 { + return "", missingFieldError(source, "version") + } + + if result.Success == nil { + return "", missingFieldError(source, "success") + } + + if !*result.Success { + if result.Code == "" || result.Message == "" { + return "", malformedFailureError() + } + return "", userDefinedError(result.Code, result.Message) + } + + if result.Version > executableSupportedMaxVersion || result.Version < 0 { + return "", unsupportedVersionError(source, result.Version) + } + + if result.ExpirationTime == 0 && cs.OutputFile != "" { + return "", missingFieldError(source, "expiration_time") + } + + if result.TokenType == "" { + return "", missingFieldError(source, "token_type") + } + + if result.ExpirationTime != 0 && result.ExpirationTime < now { + return "", tokenExpiredError() + } + + if result.TokenType == "urn:ietf:params:oauth:token-type:jwt" || result.TokenType == "urn:ietf:params:oauth:token-type:id_token" { + if result.IdToken == "" { + return "", missingFieldError(source, "id_token") + } + return result.IdToken, nil + } + + if result.TokenType == "urn:ietf:params:oauth:token-type:saml2" { + if result.SamlResponse == "" { + return "", missingFieldError(source, "saml_response") + } + return result.SamlResponse, nil + } + + return "", tokenTypeError(source) +} + +func (cs executableCredentialSource) credentialSourceType() string { + return "executable" +} + +func (cs executableCredentialSource) subjectToken() (string, error) { + if token, err := cs.getTokenFromOutputFile(); token != "" || err != nil { + return token, err + } + + return cs.getTokenFromExecutableCommand() +} + +func (cs executableCredentialSource) getTokenFromOutputFile() (token string, err error) { + if cs.OutputFile == "" { + // This ExecutableCredentialSource doesn't use an OutputFile. + return "", nil + } + + file, err := os.Open(cs.OutputFile) + if err != nil { + // No OutputFile found. Hasn't been created yet, so skip it. + return "", nil + } + defer file.Close() + + data, err := ioutil.ReadAll(io.LimitReader(file, 1<<20)) + if err != nil || len(data) == 0 { + // Cachefile exists, but no data found. Get new credential. + return "", nil + } + + token, err = cs.parseSubjectTokenFromSource(data, outputFileSource, cs.env.now().Unix()) + if err != nil { + if _, ok := err.(nonCacheableError); ok { + // If the cached token is expired we need a new token, + // and if the cache contains a failure, we need to try again. + return "", nil + } + + // There was an error in the cached token, and the developer should be aware of it. + return "", err + } + // Token parsing succeeded. Use found token. + return token, nil +} + +func (cs executableCredentialSource) executableEnvironment() []string { + result := cs.env.existingEnv() + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_AUDIENCE=%v", cs.config.Audience)) + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_TOKEN_TYPE=%v", cs.config.SubjectTokenType)) + result = append(result, "GOOGLE_EXTERNAL_ACCOUNT_INTERACTIVE=0") + if cs.config.ServiceAccountImpersonationURL != "" { + matches := serviceAccountImpersonationRE.FindStringSubmatch(cs.config.ServiceAccountImpersonationURL) + if matches != nil { + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_IMPERSONATED_EMAIL=%v", matches[1])) + } + } + if cs.OutputFile != "" { + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_OUTPUT_FILE=%v", cs.OutputFile)) + } + return result +} + +func (cs executableCredentialSource) getTokenFromExecutableCommand() (string, error) { + // For security reasons, we need our consumers to set this environment variable to allow executables to be run. + if cs.env.getenv("GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES") != "1" { + return "", executablesDisallowedError() + } + + ctx, cancel := context.WithDeadline(cs.ctx, cs.env.now().Add(cs.Timeout)) + defer cancel() + + output, err := cs.env.run(ctx, cs.Command, cs.executableEnvironment()) + if err != nil { + return "", err + } + return cs.parseSubjectTokenFromSource(output, executableSource, cs.env.now().Unix()) +} diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/filecredsource.go b/vendor/golang.org/x/oauth2/google/externalaccount/filecredsource.go new file mode 100644 index 000000000..33766b972 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/externalaccount/filecredsource.go @@ -0,0 +1,61 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" +) + +type fileCredentialSource struct { + File string + Format Format +} + +func (cs fileCredentialSource) credentialSourceType() string { + return "file" +} + +func (cs fileCredentialSource) subjectToken() (string, error) { + tokenFile, err := os.Open(cs.File) + if err != nil { + return "", fmt.Errorf("oauth2/google/externalaccount: failed to open credential file %q", cs.File) + } + defer tokenFile.Close() + tokenBytes, err := ioutil.ReadAll(io.LimitReader(tokenFile, 1<<20)) + if err != nil { + return "", fmt.Errorf("oauth2/google/externalaccount: failed to read credential file: %v", err) + } + tokenBytes = bytes.TrimSpace(tokenBytes) + switch cs.Format.Type { + case "json": + jsonData := make(map[string]interface{}) + err = json.Unmarshal(tokenBytes, &jsonData) + if err != nil { + return "", fmt.Errorf("oauth2/google/externalaccount: failed to unmarshal subject token file: %v", err) + } + val, ok := jsonData[cs.Format.SubjectTokenFieldName] + if !ok { + return "", errors.New("oauth2/google/externalaccount: provided subject_token_field_name not found in credentials") + } + token, ok := val.(string) + if !ok { + return "", errors.New("oauth2/google/externalaccount: improperly formatted subject token") + } + return token, nil + case "text": + return string(tokenBytes), nil + case "": + return string(tokenBytes), nil + default: + return "", errors.New("oauth2/google/externalaccount: invalid credential_source file format type") + } + +} diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/header.go b/vendor/golang.org/x/oauth2/google/externalaccount/header.go new file mode 100644 index 000000000..1d5aad2e2 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/externalaccount/header.go @@ -0,0 +1,64 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "runtime" + "strings" + "unicode" +) + +var ( + // version is a package internal global variable for testing purposes. + version = runtime.Version +) + +// versionUnknown is only used when the runtime version cannot be determined. +const versionUnknown = "UNKNOWN" + +// goVersion returns a Go runtime version derived from the runtime environment +// that is modified to be suitable for reporting in a header, meaning it has no +// whitespace. If it is unable to determine the Go runtime version, it returns +// versionUnknown. +func goVersion() string { + const develPrefix = "devel +" + + s := version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } else if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + + notSemverRune := func(r rune) bool { + return !strings.ContainsRune("0123456789.", r) + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + // Some release candidates already have a dash in them. + if !strings.HasPrefix(prerelease, "-") { + prerelease = "-" + prerelease + } + s += prerelease + } + return s + } + return "UNKNOWN" +} diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/programmaticrefreshcredsource.go b/vendor/golang.org/x/oauth2/google/externalaccount/programmaticrefreshcredsource.go new file mode 100644 index 000000000..6c1abdf2d --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/externalaccount/programmaticrefreshcredsource.go @@ -0,0 +1,21 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import "context" + +type programmaticRefreshCredentialSource struct { + supplierOptions SupplierOptions + subjectTokenSupplier SubjectTokenSupplier + ctx context.Context +} + +func (cs programmaticRefreshCredentialSource) credentialSourceType() string { + return "programmatic" +} + +func (cs programmaticRefreshCredentialSource) subjectToken() (string, error) { + return cs.subjectTokenSupplier.SubjectToken(cs.ctx, cs.supplierOptions) +} diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/urlcredsource.go b/vendor/golang.org/x/oauth2/google/externalaccount/urlcredsource.go new file mode 100644 index 000000000..71a7184e0 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/externalaccount/urlcredsource.go @@ -0,0 +1,79 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + + "golang.org/x/oauth2" +) + +type urlCredentialSource struct { + URL string + Headers map[string]string + Format Format + ctx context.Context +} + +func (cs urlCredentialSource) credentialSourceType() string { + return "url" +} + +func (cs urlCredentialSource) subjectToken() (string, error) { + client := oauth2.NewClient(cs.ctx, nil) + req, err := http.NewRequest("GET", cs.URL, nil) + if err != nil { + return "", fmt.Errorf("oauth2/google/externalaccount: HTTP request for URL-sourced credential failed: %v", err) + } + req = req.WithContext(cs.ctx) + + for key, val := range cs.Headers { + req.Header.Add(key, val) + } + resp, err := client.Do(req) + if err != nil { + return "", fmt.Errorf("oauth2/google/externalaccount: invalid response when retrieving subject token: %v", err) + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return "", fmt.Errorf("oauth2/google/externalaccount: invalid body in subject token URL query: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return "", fmt.Errorf("oauth2/google/externalaccount: status code %d: %s", c, respBody) + } + + switch cs.Format.Type { + case "json": + jsonData := make(map[string]interface{}) + err = json.Unmarshal(respBody, &jsonData) + if err != nil { + return "", fmt.Errorf("oauth2/google/externalaccount: failed to unmarshal subject token file: %v", err) + } + val, ok := jsonData[cs.Format.SubjectTokenFieldName] + if !ok { + return "", errors.New("oauth2/google/externalaccount: provided subject_token_field_name not found in credentials") + } + token, ok := val.(string) + if !ok { + return "", errors.New("oauth2/google/externalaccount: improperly formatted subject token") + } + return token, nil + case "text": + return string(respBody), nil + case "": + return string(respBody), nil + default: + return "", errors.New("oauth2/google/externalaccount: invalid credential_source file format type") + } + +} diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go index 81de32b36..7b82e7a08 100644 --- a/vendor/golang.org/x/oauth2/google/google.go +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -15,16 +15,23 @@ import ( "cloud.google.com/go/compute/metadata" "golang.org/x/oauth2" + "golang.org/x/oauth2/google/externalaccount" + "golang.org/x/oauth2/google/internal/externalaccountauthorizeduser" + "golang.org/x/oauth2/google/internal/impersonate" "golang.org/x/oauth2/jwt" ) -// Endpoint is Google's OAuth 2.0 endpoint. +// Endpoint is Google's OAuth 2.0 default endpoint. var Endpoint = oauth2.Endpoint{ - AuthURL: "https://accounts.google.com/o/oauth2/auth", - TokenURL: "https://oauth2.googleapis.com/token", - AuthStyle: oauth2.AuthStyleInParams, + AuthURL: "https://accounts.google.com/o/oauth2/auth", + TokenURL: "https://oauth2.googleapis.com/token", + DeviceAuthURL: "https://oauth2.googleapis.com/device/code", + AuthStyle: oauth2.AuthStyleInParams, } +// MTLSTokenURL is Google's OAuth 2.0 default mTLS endpoint. +const MTLSTokenURL = "https://oauth2.mtls.googleapis.com/token" + // JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. const JWTTokenURL = "https://oauth2.googleapis.com/token" @@ -86,40 +93,69 @@ func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { return nil, fmt.Errorf("google: read JWT from JSON credentials: 'type' field is %q (expected %q)", f.Type, serviceAccountKey) } scope = append([]string(nil), scope...) // copy - return f.jwtConfig(scope), nil + return f.jwtConfig(scope, ""), nil } // JSON key file types. const ( - serviceAccountKey = "service_account" - userCredentialsKey = "authorized_user" + serviceAccountKey = "service_account" + userCredentialsKey = "authorized_user" + externalAccountKey = "external_account" + externalAccountAuthorizedUserKey = "external_account_authorized_user" + impersonatedServiceAccount = "impersonated_service_account" ) // credentialsFile is the unmarshalled representation of a credentials file. type credentialsFile struct { - Type string `json:"type"` // serviceAccountKey or userCredentialsKey + Type string `json:"type"` // Service Account fields - ClientEmail string `json:"client_email"` - PrivateKeyID string `json:"private_key_id"` - PrivateKey string `json:"private_key"` - TokenURL string `json:"token_uri"` - ProjectID string `json:"project_id"` + ClientEmail string `json:"client_email"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + AuthURL string `json:"auth_uri"` + TokenURL string `json:"token_uri"` + ProjectID string `json:"project_id"` + UniverseDomain string `json:"universe_domain"` // User Credential fields // (These typically come from gcloud auth.) ClientSecret string `json:"client_secret"` ClientID string `json:"client_id"` RefreshToken string `json:"refresh_token"` + + // External Account fields + Audience string `json:"audience"` + SubjectTokenType string `json:"subject_token_type"` + TokenURLExternal string `json:"token_url"` + TokenInfoURL string `json:"token_info_url"` + ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"` + ServiceAccountImpersonation serviceAccountImpersonationInfo `json:"service_account_impersonation"` + Delegates []string `json:"delegates"` + CredentialSource externalaccount.CredentialSource `json:"credential_source"` + QuotaProjectID string `json:"quota_project_id"` + WorkforcePoolUserProject string `json:"workforce_pool_user_project"` + + // External Account Authorized User fields + RevokeURL string `json:"revoke_url"` + + // Service account impersonation + SourceCredentials *credentialsFile `json:"source_credentials"` } -func (f *credentialsFile) jwtConfig(scopes []string) *jwt.Config { +type serviceAccountImpersonationInfo struct { + TokenLifetimeSeconds int `json:"token_lifetime_seconds"` +} + +func (f *credentialsFile) jwtConfig(scopes []string, subject string) *jwt.Config { cfg := &jwt.Config{ Email: f.ClientEmail, PrivateKey: []byte(f.PrivateKey), PrivateKeyID: f.PrivateKeyID, Scopes: scopes, TokenURL: f.TokenURL, + Subject: subject, // This is the user email to impersonate + Audience: f.Audience, } if cfg.TokenURL == "" { cfg.TokenURL = JWTTokenURL @@ -127,20 +163,80 @@ func (f *credentialsFile) jwtConfig(scopes []string) *jwt.Config { return cfg } -func (f *credentialsFile) tokenSource(ctx context.Context, scopes []string) (oauth2.TokenSource, error) { +func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsParams) (oauth2.TokenSource, error) { switch f.Type { case serviceAccountKey: - cfg := f.jwtConfig(scopes) + cfg := f.jwtConfig(params.Scopes, params.Subject) return cfg.TokenSource(ctx), nil case userCredentialsKey: cfg := &oauth2.Config{ ClientID: f.ClientID, ClientSecret: f.ClientSecret, - Scopes: scopes, - Endpoint: Endpoint, + Scopes: params.Scopes, + Endpoint: oauth2.Endpoint{ + AuthURL: f.AuthURL, + TokenURL: f.TokenURL, + AuthStyle: oauth2.AuthStyleInParams, + }, + } + if cfg.Endpoint.AuthURL == "" { + cfg.Endpoint.AuthURL = Endpoint.AuthURL + } + if cfg.Endpoint.TokenURL == "" { + if params.TokenURL != "" { + cfg.Endpoint.TokenURL = params.TokenURL + } else { + cfg.Endpoint.TokenURL = Endpoint.TokenURL + } } tok := &oauth2.Token{RefreshToken: f.RefreshToken} return cfg.TokenSource(ctx, tok), nil + case externalAccountKey: + cfg := &externalaccount.Config{ + Audience: f.Audience, + SubjectTokenType: f.SubjectTokenType, + TokenURL: f.TokenURLExternal, + TokenInfoURL: f.TokenInfoURL, + ServiceAccountImpersonationURL: f.ServiceAccountImpersonationURL, + ServiceAccountImpersonationLifetimeSeconds: f.ServiceAccountImpersonation.TokenLifetimeSeconds, + ClientSecret: f.ClientSecret, + ClientID: f.ClientID, + CredentialSource: &f.CredentialSource, + QuotaProjectID: f.QuotaProjectID, + Scopes: params.Scopes, + WorkforcePoolUserProject: f.WorkforcePoolUserProject, + } + return externalaccount.NewTokenSource(ctx, *cfg) + case externalAccountAuthorizedUserKey: + cfg := &externalaccountauthorizeduser.Config{ + Audience: f.Audience, + RefreshToken: f.RefreshToken, + TokenURL: f.TokenURLExternal, + TokenInfoURL: f.TokenInfoURL, + ClientID: f.ClientID, + ClientSecret: f.ClientSecret, + RevokeURL: f.RevokeURL, + QuotaProjectID: f.QuotaProjectID, + Scopes: params.Scopes, + } + return cfg.TokenSource(ctx) + case impersonatedServiceAccount: + if f.ServiceAccountImpersonationURL == "" || f.SourceCredentials == nil { + return nil, errors.New("missing 'source_credentials' field or 'service_account_impersonation_url' in credentials") + } + + ts, err := f.SourceCredentials.tokenSource(ctx, params) + if err != nil { + return nil, err + } + imp := impersonate.ImpersonateTokenSource{ + Ctx: ctx, + URL: f.ServiceAccountImpersonationURL, + Scopes: params.Scopes, + Ts: ts, + Delegates: f.Delegates, + } + return oauth2.ReuseTokenSource(nil, imp), nil case "": return nil, errors.New("missing 'type' field in credentials") default: @@ -156,7 +252,14 @@ func (f *credentialsFile) tokenSource(ctx context.Context, scopes []string) (oau // Further information about retrieving access tokens from the GCE metadata // server can be found at https://cloud.google.com/compute/docs/authentication. func ComputeTokenSource(account string, scope ...string) oauth2.TokenSource { - return oauth2.ReuseTokenSource(nil, computeSource{account: account, scopes: scope}) + // refresh 3 minutes and 45 seconds early. The shortest MDS cache is currently 4 minutes, so any + // refreshes earlier are a waste of compute. + earlyExpirySecs := 225 * time.Second + return computeTokenSource(account, earlyExpirySecs, scope...) +} + +func computeTokenSource(account string, earlyExpiry time.Duration, scope ...string) oauth2.TokenSource { + return oauth2.ReuseTokenSourceWithExpiry(nil, computeSource{account: account, scopes: scope}, earlyExpiry) } type computeSource struct { diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go b/vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go new file mode 100644 index 000000000..cb5820707 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go @@ -0,0 +1,114 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccountauthorizeduser + +import ( + "context" + "errors" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google/internal/stsexchange" +) + +// now aliases time.Now for testing. +var now = func() time.Time { + return time.Now().UTC() +} + +var tokenValid = func(token oauth2.Token) bool { + return token.Valid() +} + +type Config struct { + // Audience is the Secure Token Service (STS) audience which contains the resource name for the workforce pool and + // the provider identifier in that pool. + Audience string + // RefreshToken is the optional OAuth 2.0 refresh token. If specified, credentials can be refreshed. + RefreshToken string + // TokenURL is the optional STS token exchange endpoint for refresh. Must be specified for refresh, can be left as + // None if the token can not be refreshed. + TokenURL string + // TokenInfoURL is the optional STS endpoint URL for token introspection. + TokenInfoURL string + // ClientID is only required in conjunction with ClientSecret, as described above. + ClientID string + // ClientSecret is currently only required if token_info endpoint also needs to be called with the generated GCP + // access token. When provided, STS will be called with additional basic authentication using client_id as username + // and client_secret as password. + ClientSecret string + // Token is the OAuth2.0 access token. Can be nil if refresh information is provided. + Token string + // Expiry is the optional expiration datetime of the OAuth 2.0 access token. + Expiry time.Time + // RevokeURL is the optional STS endpoint URL for revoking tokens. + RevokeURL string + // QuotaProjectID is the optional project ID used for quota and billing. This project may be different from the + // project used to create the credentials. + QuotaProjectID string + Scopes []string +} + +func (c *Config) canRefresh() bool { + return c.ClientID != "" && c.ClientSecret != "" && c.RefreshToken != "" && c.TokenURL != "" +} + +func (c *Config) TokenSource(ctx context.Context) (oauth2.TokenSource, error) { + var token oauth2.Token + if c.Token != "" && !c.Expiry.IsZero() { + token = oauth2.Token{ + AccessToken: c.Token, + Expiry: c.Expiry, + TokenType: "Bearer", + } + } + if !tokenValid(token) && !c.canRefresh() { + return nil, errors.New("oauth2/google: Token should be created with fields to make it valid (`token` and `expiry`), or fields to allow it to refresh (`refresh_token`, `token_url`, `client_id`, `client_secret`).") + } + + ts := tokenSource{ + ctx: ctx, + conf: c, + } + + return oauth2.ReuseTokenSource(&token, ts), nil +} + +type tokenSource struct { + ctx context.Context + conf *Config +} + +func (ts tokenSource) Token() (*oauth2.Token, error) { + conf := ts.conf + if !conf.canRefresh() { + return nil, errors.New("oauth2/google: The credentials do not contain the necessary fields need to refresh the access token. You must specify refresh_token, token_url, client_id, and client_secret.") + } + + clientAuth := stsexchange.ClientAuthentication{ + AuthStyle: oauth2.AuthStyleInHeader, + ClientID: conf.ClientID, + ClientSecret: conf.ClientSecret, + } + + stsResponse, err := stsexchange.RefreshAccessToken(ts.ctx, conf.TokenURL, conf.RefreshToken, clientAuth, nil) + if err != nil { + return nil, err + } + if stsResponse.ExpiresIn < 0 { + return nil, errors.New("oauth2/google: got invalid expiry from security token service") + } + + if stsResponse.RefreshToken != "" { + conf.RefreshToken = stsResponse.RefreshToken + } + + token := &oauth2.Token{ + AccessToken: stsResponse.AccessToken, + Expiry: now().Add(time.Duration(stsResponse.ExpiresIn) * time.Second), + TokenType: "Bearer", + } + return token, nil +} diff --git a/vendor/golang.org/x/oauth2/google/internal/impersonate/impersonate.go b/vendor/golang.org/x/oauth2/google/internal/impersonate/impersonate.go new file mode 100644 index 000000000..6bc3af110 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/impersonate/impersonate.go @@ -0,0 +1,105 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impersonate + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "time" + + "golang.org/x/oauth2" +) + +// generateAccesstokenReq is used for service account impersonation +type generateAccessTokenReq struct { + Delegates []string `json:"delegates,omitempty"` + Lifetime string `json:"lifetime,omitempty"` + Scope []string `json:"scope,omitempty"` +} + +type impersonateTokenResponse struct { + AccessToken string `json:"accessToken"` + ExpireTime string `json:"expireTime"` +} + +// ImpersonateTokenSource uses a source credential, stored in Ts, to request an access token to the provided URL. +// Scopes can be defined when the access token is requested. +type ImpersonateTokenSource struct { + // Ctx is the execution context of the impersonation process + // used to perform http call to the URL. Required + Ctx context.Context + // Ts is the source credential used to generate a token on the + // impersonated service account. Required. + Ts oauth2.TokenSource + + // URL is the endpoint to call to generate a token + // on behalf the service account. Required. + URL string + // Scopes that the impersonated credential should have. Required. + Scopes []string + // Delegates are the service account email addresses in a delegation chain. + // Each service account must be granted roles/iam.serviceAccountTokenCreator + // on the next service account in the chain. Optional. + Delegates []string + // TokenLifetimeSeconds is the number of seconds the impersonation token will + // be valid for. + TokenLifetimeSeconds int +} + +// Token performs the exchange to get a temporary service account token to allow access to GCP. +func (its ImpersonateTokenSource) Token() (*oauth2.Token, error) { + lifetimeString := "3600s" + if its.TokenLifetimeSeconds != 0 { + lifetimeString = fmt.Sprintf("%ds", its.TokenLifetimeSeconds) + } + reqBody := generateAccessTokenReq{ + Lifetime: lifetimeString, + Scope: its.Scopes, + Delegates: its.Delegates, + } + b, err := json.Marshal(reqBody) + if err != nil { + return nil, fmt.Errorf("oauth2/google: unable to marshal request: %v", err) + } + client := oauth2.NewClient(its.Ctx, its.Ts) + req, err := http.NewRequest("POST", its.URL, bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("oauth2/google: unable to create impersonation request: %v", err) + } + req = req.WithContext(its.Ctx) + req.Header.Set("Content-Type", "application/json") + + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("oauth2/google: unable to generate access token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2/google: unable to read body: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("oauth2/google: status code %d: %s", c, body) + } + + var accessTokenResp impersonateTokenResponse + if err := json.Unmarshal(body, &accessTokenResp); err != nil { + return nil, fmt.Errorf("oauth2/google: unable to parse response: %v", err) + } + expiry, err := time.Parse(time.RFC3339, accessTokenResp.ExpireTime) + if err != nil { + return nil, fmt.Errorf("oauth2/google: unable to parse expiry: %v", err) + } + return &oauth2.Token{ + AccessToken: accessTokenResp.AccessToken, + Expiry: expiry, + TokenType: "Bearer", + }, nil +} diff --git a/vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go b/vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go new file mode 100644 index 000000000..ebd520eac --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go @@ -0,0 +1,45 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stsexchange + +import ( + "encoding/base64" + "net/http" + "net/url" + + "golang.org/x/oauth2" +) + +// ClientAuthentication represents an OAuth client ID and secret and the mechanism for passing these credentials as stated in rfc6749#2.3.1. +type ClientAuthentication struct { + // AuthStyle can be either basic or request-body + AuthStyle oauth2.AuthStyle + ClientID string + ClientSecret string +} + +// InjectAuthentication is used to add authentication to a Secure Token Service exchange +// request. It modifies either the passed url.Values or http.Header depending on the desired +// authentication format. +func (c *ClientAuthentication) InjectAuthentication(values url.Values, headers http.Header) { + if c.ClientID == "" || c.ClientSecret == "" || values == nil || headers == nil { + return + } + + switch c.AuthStyle { + case oauth2.AuthStyleInHeader: // AuthStyleInHeader corresponds to basic authentication as defined in rfc7617#2 + plainHeader := c.ClientID + ":" + c.ClientSecret + headers.Add("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(plainHeader))) + case oauth2.AuthStyleInParams: // AuthStyleInParams corresponds to request-body authentication with ClientID and ClientSecret in the message body. + values.Set("client_id", c.ClientID) + values.Set("client_secret", c.ClientSecret) + case oauth2.AuthStyleAutoDetect: + values.Set("client_id", c.ClientID) + values.Set("client_secret", c.ClientSecret) + default: + values.Set("client_id", c.ClientID) + values.Set("client_secret", c.ClientSecret) + } +} diff --git a/vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go b/vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go new file mode 100644 index 000000000..1a0bebd15 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go @@ -0,0 +1,125 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stsexchange + +import ( + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + + "golang.org/x/oauth2" +) + +func defaultHeader() http.Header { + header := make(http.Header) + header.Add("Content-Type", "application/x-www-form-urlencoded") + return header +} + +// ExchangeToken performs an oauth2 token exchange with the provided endpoint. +// The first 4 fields are all mandatory. headers can be used to pass additional +// headers beyond the bare minimum required by the token exchange. options can +// be used to pass additional JSON-structured options to the remote server. +func ExchangeToken(ctx context.Context, endpoint string, request *TokenExchangeRequest, authentication ClientAuthentication, headers http.Header, options map[string]interface{}) (*Response, error) { + data := url.Values{} + data.Set("audience", request.Audience) + data.Set("grant_type", "urn:ietf:params:oauth:grant-type:token-exchange") + data.Set("requested_token_type", "urn:ietf:params:oauth:token-type:access_token") + data.Set("subject_token_type", request.SubjectTokenType) + data.Set("subject_token", request.SubjectToken) + data.Set("scope", strings.Join(request.Scope, " ")) + if options != nil { + opts, err := json.Marshal(options) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to marshal additional options: %v", err) + } + data.Set("options", string(opts)) + } + + return makeRequest(ctx, endpoint, data, authentication, headers) +} + +func RefreshAccessToken(ctx context.Context, endpoint string, refreshToken string, authentication ClientAuthentication, headers http.Header) (*Response, error) { + data := url.Values{} + data.Set("grant_type", "refresh_token") + data.Set("refresh_token", refreshToken) + + return makeRequest(ctx, endpoint, data, authentication, headers) +} + +func makeRequest(ctx context.Context, endpoint string, data url.Values, authentication ClientAuthentication, headers http.Header) (*Response, error) { + if headers == nil { + headers = defaultHeader() + } + client := oauth2.NewClient(ctx, nil) + authentication.InjectAuthentication(data, headers) + encodedData := data.Encode() + + req, err := http.NewRequest("POST", endpoint, strings.NewReader(encodedData)) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to properly build http request: %v", err) + } + req = req.WithContext(ctx) + for key, list := range headers { + for _, val := range list { + req.Header.Add(key, val) + } + } + req.Header.Add("Content-Length", strconv.Itoa(len(encodedData))) + + resp, err := client.Do(req) + + if err != nil { + return nil, fmt.Errorf("oauth2/google: invalid response from Secure Token Server: %v", err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, err + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("oauth2/google: status code %d: %s", c, body) + } + var stsResp Response + err = json.Unmarshal(body, &stsResp) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to unmarshal response body from Secure Token Server: %v", err) + + } + + return &stsResp, nil +} + +// TokenExchangeRequest contains fields necessary to make an oauth2 token exchange. +type TokenExchangeRequest struct { + ActingParty struct { + ActorToken string + ActorTokenType string + } + GrantType string + Resource string + Audience string + Scope []string + RequestedTokenType string + SubjectToken string + SubjectTokenType string +} + +// Response is used to decode the remote server response during an oauth2 token exchange. +type Response struct { + AccessToken string `json:"access_token"` + IssuedTokenType string `json:"issued_token_type"` + TokenType string `json:"token_type"` + ExpiresIn int `json:"expires_in"` + Scope string `json:"scope"` + RefreshToken string `json:"refresh_token"` +} diff --git a/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/golang.org/x/oauth2/google/jwt.go index b0fdb3a88..e89e6ae17 100644 --- a/vendor/golang.org/x/oauth2/google/jwt.go +++ b/vendor/golang.org/x/oauth2/google/jwt.go @@ -7,6 +7,7 @@ package google import ( "crypto/rsa" "fmt" + "strings" "time" "golang.org/x/oauth2" @@ -24,6 +25,28 @@ import ( // optimization supported by a few Google services. // Unless you know otherwise, you should use JWTConfigFromJSON instead. func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) { + return newJWTSource(jsonKey, audience, nil) +} + +// JWTAccessTokenSourceWithScope uses a Google Developers service account JSON +// key file to read the credentials that authorize and authenticate the +// requests, and returns a TokenSource that does not use any OAuth2 flow but +// instead creates a JWT and sends that as the access token. +// The scope is typically a list of URLs that specifies the scope of the +// credentials. +// +// Note that this is not a standard OAuth flow, but rather an +// optimization supported by a few Google services. +// Unless you know otherwise, you should use JWTConfigFromJSON instead. +func JWTAccessTokenSourceWithScope(jsonKey []byte, scope ...string) (oauth2.TokenSource, error) { + return newJWTSource(jsonKey, "", scope) +} + +func newJWTSource(jsonKey []byte, audience string, scopes []string) (oauth2.TokenSource, error) { + if len(scopes) == 0 && audience == "" { + return nil, fmt.Errorf("google: missing scope/audience for JWT access token") + } + cfg, err := JWTConfigFromJSON(jsonKey) if err != nil { return nil, fmt.Errorf("google: could not parse JSON key: %v", err) @@ -35,6 +58,7 @@ func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.Token ts := &jwtAccessTokenSource{ email: cfg.Email, audience: audience, + scopes: scopes, pk: pk, pkID: cfg.PrivateKeyID, } @@ -42,11 +66,13 @@ func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.Token if err != nil { return nil, err } - return oauth2.ReuseTokenSource(tok, ts), nil + rts := newErrWrappingTokenSource(oauth2.ReuseTokenSource(tok, ts)) + return rts, nil } type jwtAccessTokenSource struct { email, audience string + scopes []string pk *rsa.PrivateKey pkID string } @@ -54,12 +80,14 @@ type jwtAccessTokenSource struct { func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) { iat := time.Now() exp := iat.Add(time.Hour) + scope := strings.Join(ts.scopes, " ") cs := &jws.ClaimSet{ - Iss: ts.email, - Sub: ts.email, - Aud: ts.audience, - Iat: iat.Unix(), - Exp: exp.Unix(), + Iss: ts.email, + Sub: ts.email, + Aud: ts.audience, + Scope: scope, + Iat: iat.Unix(), + Exp: exp.Unix(), } hdr := &jws.Header{ Algorithm: "RS256", diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go index c0ab196cf..14989beaf 100644 --- a/vendor/golang.org/x/oauth2/internal/oauth2.go +++ b/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -14,7 +14,7 @@ import ( // ParseKey converts the binary contents of a private key file // to an *rsa.PrivateKey. It detects whether the private key is in a -// PEM container or not. If so, it extracts the the private key +// PEM container or not. If so, it extracts the private key // from PEM container before conversion. It only supports PEM // containers with no passphrase. func ParseKey(key []byte) (*rsa.PrivateKey, error) { diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index 355c38696..e83ddeef0 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -18,9 +18,8 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "time" - - "golang.org/x/net/context/ctxhttp" ) // Token represents the credentials used to authorize @@ -57,12 +56,18 @@ type Token struct { } // tokenJSON is the struct representing the HTTP response from OAuth2 -// providers returning a token in JSON form. +// providers returning a token or error in JSON form. +// https://datatracker.ietf.org/doc/html/rfc6749#section-5.1 type tokenJSON struct { AccessToken string `json:"access_token"` TokenType string `json:"token_type"` RefreshToken string `json:"refresh_token"` ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number + // error fields + // https://datatracker.ietf.org/doc/html/rfc6749#section-5.2 + ErrorCode string `json:"error"` + ErrorDescription string `json:"error_description"` + ErrorURI string `json:"error_uri"` } func (e *tokenJSON) expiry() (t time.Time) { @@ -111,41 +116,60 @@ const ( AuthStyleInHeader AuthStyle = 2 ) -// authStyleCache is the set of tokenURLs we've successfully used via +// LazyAuthStyleCache is a backwards compatibility compromise to let Configs +// have a lazily-initialized AuthStyleCache. +// +// The two users of this, oauth2.Config and oauth2/clientcredentials.Config, +// both would ideally just embed an unexported AuthStyleCache but because both +// were historically allowed to be copied by value we can't retroactively add an +// uncopyable Mutex to them. +// +// We could use an atomic.Pointer, but that was added recently enough (in Go +// 1.18) that we'd break Go 1.17 users where the tests as of 2023-08-03 +// still pass. By using an atomic.Value, it supports both Go 1.17 and +// copying by value, even if that's not ideal. +type LazyAuthStyleCache struct { + v atomic.Value // of *AuthStyleCache +} + +func (lc *LazyAuthStyleCache) Get() *AuthStyleCache { + if c, ok := lc.v.Load().(*AuthStyleCache); ok { + return c + } + c := new(AuthStyleCache) + if !lc.v.CompareAndSwap(nil, c) { + c = lc.v.Load().(*AuthStyleCache) + } + return c +} + +// AuthStyleCache is the set of tokenURLs we've successfully used via // RetrieveToken and which style auth we ended up using. // It's called a cache, but it doesn't (yet?) shrink. It's expected that // the set of OAuth2 servers a program contacts over time is fixed and // small. -var authStyleCache struct { - sync.Mutex - m map[string]AuthStyle // keyed by tokenURL -} - -// ResetAuthCache resets the global authentication style cache used -// for AuthStyleUnknown token requests. -func ResetAuthCache() { - authStyleCache.Lock() - defer authStyleCache.Unlock() - authStyleCache.m = nil +type AuthStyleCache struct { + mu sync.Mutex + m map[string]AuthStyle // keyed by tokenURL } // lookupAuthStyle reports which auth style we last used with tokenURL // when calling RetrieveToken and whether we have ever done so. -func lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { - authStyleCache.Lock() - defer authStyleCache.Unlock() - style, ok = authStyleCache.m[tokenURL] +func (c *AuthStyleCache) lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { + c.mu.Lock() + defer c.mu.Unlock() + style, ok = c.m[tokenURL] return } // setAuthStyle adds an entry to authStyleCache, documented above. -func setAuthStyle(tokenURL string, v AuthStyle) { - authStyleCache.Lock() - defer authStyleCache.Unlock() - if authStyleCache.m == nil { - authStyleCache.m = make(map[string]AuthStyle) +func (c *AuthStyleCache) setAuthStyle(tokenURL string, v AuthStyle) { + c.mu.Lock() + defer c.mu.Unlock() + if c.m == nil { + c.m = make(map[string]AuthStyle) } - authStyleCache.m[tokenURL] = v + c.m[tokenURL] = v } // newTokenRequest returns a new *http.Request to retrieve a new token @@ -185,10 +209,10 @@ func cloneURLValues(v url.Values) url.Values { return v2 } -func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle) (*Token, error) { +func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle, styleCache *AuthStyleCache) (*Token, error) { needsAuthStyleProbe := authStyle == 0 if needsAuthStyleProbe { - if style, ok := lookupAuthStyle(tokenURL); ok { + if style, ok := styleCache.lookupAuthStyle(tokenURL); ok { authStyle = style needsAuthStyleProbe = false } else { @@ -218,7 +242,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, token, err = doTokenRoundTrip(ctx, req) } if needsAuthStyleProbe && err == nil { - setAuthStyle(tokenURL, authStyle) + styleCache.setAuthStyle(tokenURL, authStyle) } // Don't overwrite `RefreshToken` with an empty value // if this was a token refreshing request. @@ -229,7 +253,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, } func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { - r, err := ctxhttp.Do(ctx, ContextClient(ctx), req) + r, err := ContextClient(ctx).Do(req.WithContext(ctx)) if err != nil { return nil, err } @@ -238,21 +262,29 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { if err != nil { return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) } - if code := r.StatusCode; code < 200 || code > 299 { - return nil, &RetrieveError{ - Response: r, - Body: body, - } + + failureStatus := r.StatusCode < 200 || r.StatusCode > 299 + retrieveError := &RetrieveError{ + Response: r, + Body: body, + // attempt to populate error detail below } var token *Token content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) switch content { case "application/x-www-form-urlencoded", "text/plain": + // some endpoints return a query string vals, err := url.ParseQuery(string(body)) if err != nil { - return nil, err + if failureStatus { + return nil, retrieveError + } + return nil, fmt.Errorf("oauth2: cannot parse response: %v", err) } + retrieveError.ErrorCode = vals.Get("error") + retrieveError.ErrorDescription = vals.Get("error_description") + retrieveError.ErrorURI = vals.Get("error_uri") token = &Token{ AccessToken: vals.Get("access_token"), TokenType: vals.Get("token_type"), @@ -267,8 +299,14 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { default: var tj tokenJSON if err = json.Unmarshal(body, &tj); err != nil { - return nil, err + if failureStatus { + return nil, retrieveError + } + return nil, fmt.Errorf("oauth2: cannot parse json: %v", err) } + retrieveError.ErrorCode = tj.ErrorCode + retrieveError.ErrorDescription = tj.ErrorDescription + retrieveError.ErrorURI = tj.ErrorURI token = &Token{ AccessToken: tj.AccessToken, TokenType: tj.TokenType, @@ -278,17 +316,37 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { } json.Unmarshal(body, &token.Raw) // no error checks for optional fields } + // according to spec, servers should respond status 400 in error case + // https://www.rfc-editor.org/rfc/rfc6749#section-5.2 + // but some unorthodox servers respond 200 in error case + if failureStatus || retrieveError.ErrorCode != "" { + return nil, retrieveError + } if token.AccessToken == "" { return nil, errors.New("oauth2: server response missing access_token") } return token, nil } +// mirrors oauth2.RetrieveError type RetrieveError struct { - Response *http.Response - Body []byte + Response *http.Response + Body []byte + ErrorCode string + ErrorDescription string + ErrorURI string } func (r *RetrieveError) Error() string { + if r.ErrorCode != "" { + s := fmt.Sprintf("oauth2: %q", r.ErrorCode) + if r.ErrorDescription != "" { + s += fmt.Sprintf(" %q", r.ErrorDescription) + } + if r.ErrorURI != "" { + s += fmt.Sprintf(" %q", r.ErrorURI) + } + return s + } return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) } diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go index 572074a63..b9db01ddf 100644 --- a/vendor/golang.org/x/oauth2/internal/transport.go +++ b/vendor/golang.org/x/oauth2/internal/transport.go @@ -18,16 +18,11 @@ var HTTPClient ContextKey // because nobody else can create a ContextKey, being unexported. type ContextKey struct{} -var appengineClientHook func(context.Context) *http.Client - func ContextClient(ctx context.Context) *http.Client { if ctx != nil { if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { return hc } } - if appengineClientHook != nil { - return appengineClientHook(ctx) - } return http.DefaultClient } diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go index 683d2d271..95015648b 100644 --- a/vendor/golang.org/x/oauth2/jws/jws.go +++ b/vendor/golang.org/x/oauth2/jws/jws.go @@ -178,5 +178,5 @@ func Verify(token string, key *rsa.PublicKey) error { h := sha256.New() h.Write([]byte(signedContent)) - return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), []byte(signatureString)) + return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), signatureString) } diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 291df5c83..09f6a49b8 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -16,6 +16,7 @@ import ( "net/url" "strings" "sync" + "time" "golang.org/x/oauth2/internal" ) @@ -57,6 +58,10 @@ type Config struct { // Scope specifies optional requested permissions. Scopes []string + + // authStyleCache caches which auth style to use when Endpoint.AuthStyle is + // the zero value (AuthStyleAutoDetect). + authStyleCache internal.LazyAuthStyleCache } // A TokenSource is anything that can return a token. @@ -70,8 +75,9 @@ type TokenSource interface { // Endpoint represents an OAuth 2.0 provider's authorization and token // endpoint URLs. type Endpoint struct { - AuthURL string - TokenURL string + AuthURL string + DeviceAuthURL string + TokenURL string // AuthStyle optionally specifies how the endpoint wants the // client ID & client secret sent. The zero value means to @@ -138,15 +144,19 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // AuthCodeURL returns a URL to OAuth 2.0 provider's consent page // that asks for permissions for the required scopes explicitly. // -// State is a token to protect the user from CSRF attacks. You must -// always provide a non-empty string and validate that it matches the -// the state query parameter on your redirect callback. -// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. +// State is an opaque value used by the client to maintain state between the +// request and callback. The authorization server includes this value when +// redirecting the user agent back to the client. // // Opts may include AccessTypeOnline or AccessTypeOffline, as well // as ApprovalForce. -// It can also be used to pass the PKCE challenge. -// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +// +// To protect against CSRF attacks, opts should include a PKCE challenge +// (S256ChallengeOption). Not all servers support PKCE. An alternative is to +// generate a random state parameter and verify it after exchange. +// See https://datatracker.ietf.org/doc/html/rfc6749#section-10.12 (predating +// PKCE), https://www.oauth.com/oauth2-servers/pkce/ and +// https://www.ietf.org/archive/id/draft-ietf-oauth-v2-1-09.html#name-cross-site-request-forgery (describing both approaches) func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { var buf bytes.Buffer buf.WriteString(c.Endpoint.AuthURL) @@ -161,7 +171,6 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { v.Set("scope", strings.Join(c.Scopes, " ")) } if state != "" { - // TODO(light): Docs say never to omit state; don't allow empty. v.Set("state", state) } for _, opt := range opts { @@ -206,10 +215,11 @@ func (c *Config) PasswordCredentialsToken(ctx context.Context, username, passwor // The provided context optionally controls which HTTP client is used. See the HTTPClient variable. // // The code will be in the *http.Request.FormValue("code"). Before -// calling Exchange, be sure to validate FormValue("state"). +// calling Exchange, be sure to validate FormValue("state") if you are +// using it to protect against CSRF attacks. // -// Opts may include the PKCE verifier code if previously used in AuthCodeURL. -// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +// If using PKCE to protect against CSRF attacks, opts should include a +// VerifierOption. func (c *Config) Exchange(ctx context.Context, code string, opts ...AuthCodeOption) (*Token, error) { v := url.Values{ "grant_type": {"authorization_code"}, @@ -290,6 +300,8 @@ type reuseTokenSource struct { mu sync.Mutex // guards t t *Token + + expiryDelta time.Duration } // Token returns the current token if it's still valid, else will @@ -305,6 +317,7 @@ func (s *reuseTokenSource) Token() (*Token, error) { if err != nil { return nil, err } + t.expiryDelta = s.expiryDelta s.t = t return t, nil } @@ -379,3 +392,30 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource { new: src, } } + +// ReuseTokenSourceWithExpiry returns a TokenSource that acts in the same manner as the +// TokenSource returned by ReuseTokenSource, except the expiry buffer is +// configurable. The expiration time of a token is calculated as +// t.Expiry.Add(-earlyExpiry). +func ReuseTokenSourceWithExpiry(t *Token, src TokenSource, earlyExpiry time.Duration) TokenSource { + // Don't wrap a reuseTokenSource in itself. That would work, + // but cause an unnecessary number of mutex operations. + // Just build the equivalent one. + if rt, ok := src.(*reuseTokenSource); ok { + if t == nil { + // Just use it directly, but set the expiryDelta to earlyExpiry, + // so the behavior matches what the user expects. + rt.expiryDelta = earlyExpiry + return rt + } + src = rt.new + } + if t != nil { + t.expiryDelta = earlyExpiry + } + return &reuseTokenSource{ + t: t, + new: src, + expiryDelta: earlyExpiry, + } +} diff --git a/vendor/golang.org/x/oauth2/pkce.go b/vendor/golang.org/x/oauth2/pkce.go new file mode 100644 index 000000000..50593b6df --- /dev/null +++ b/vendor/golang.org/x/oauth2/pkce.go @@ -0,0 +1,68 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package oauth2 + +import ( + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "net/url" +) + +const ( + codeChallengeKey = "code_challenge" + codeChallengeMethodKey = "code_challenge_method" + codeVerifierKey = "code_verifier" +) + +// GenerateVerifier generates a PKCE code verifier with 32 octets of randomness. +// This follows recommendations in RFC 7636. +// +// A fresh verifier should be generated for each authorization. +// S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL +// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange +// (or Config.DeviceAccessToken). +func GenerateVerifier() string { + // "RECOMMENDED that the output of a suitable random number generator be + // used to create a 32-octet sequence. The octet sequence is then + // base64url-encoded to produce a 43-octet URL-safe string to use as the + // code verifier." + // https://datatracker.ietf.org/doc/html/rfc7636#section-4.1 + data := make([]byte, 32) + if _, err := rand.Read(data); err != nil { + panic(err) + } + return base64.RawURLEncoding.EncodeToString(data) +} + +// VerifierOption returns a PKCE code verifier AuthCodeOption. It should be +// passed to Config.Exchange or Config.DeviceAccessToken only. +func VerifierOption(verifier string) AuthCodeOption { + return setParam{k: codeVerifierKey, v: verifier} +} + +// S256ChallengeFromVerifier returns a PKCE code challenge derived from verifier with method S256. +// +// Prefer to use S256ChallengeOption where possible. +func S256ChallengeFromVerifier(verifier string) string { + sha := sha256.Sum256([]byte(verifier)) + return base64.RawURLEncoding.EncodeToString(sha[:]) +} + +// S256ChallengeOption derives a PKCE code challenge derived from verifier with +// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess +// only. +func S256ChallengeOption(verifier string) AuthCodeOption { + return challengeOption{ + challenge_method: "S256", + challenge: S256ChallengeFromVerifier(verifier), + } +} + +type challengeOption struct{ challenge_method, challenge string } + +func (p challengeOption) setValue(m url.Values) { + m.Set(codeChallengeMethodKey, p.challenge_method) + m.Set(codeChallengeKey, p.challenge) +} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 822720341..109997d77 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -16,10 +16,10 @@ import ( "golang.org/x/oauth2/internal" ) -// expiryDelta determines how earlier a token should be considered +// defaultExpiryDelta determines how earlier a token should be considered // expired than its actual expiration time. It is used to avoid late // expirations due to client-server time mismatches. -const expiryDelta = 10 * time.Second +const defaultExpiryDelta = 10 * time.Second // Token represents the credentials used to authorize // the requests to access protected resources on the OAuth 2.0 @@ -49,9 +49,21 @@ type Token struct { // mechanisms for that TokenSource will not be used. Expiry time.Time `json:"expiry,omitempty"` + // ExpiresIn is the OAuth2 wire format "expires_in" field, + // which specifies how many seconds later the token expires, + // relative to an unknown time base approximately around "now". + // It is the application's responsibility to populate + // `Expiry` from `ExpiresIn` when required. + ExpiresIn int64 `json:"expires_in,omitempty"` + // raw optionally contains extra metadata from the server // when updating a token. raw interface{} + + // expiryDelta is used to calculate when a token is considered + // expired, by subtracting from Expiry. If zero, defaultExpiryDelta + // is used. + expiryDelta time.Duration } // Type returns t.TokenType if non-empty, else "Bearer". @@ -127,6 +139,11 @@ func (t *Token) expired() bool { if t.Expiry.IsZero() { return false } + + expiryDelta := defaultExpiryDelta + if t.expiryDelta != 0 { + expiryDelta = t.expiryDelta + } return t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow()) } @@ -154,7 +171,7 @@ func tokenFromInternal(t *internal.Token) *Token { // This token is then mapped from *internal.Token into an *oauth2.Token which is returned along // with an error.. func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { - tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle)) + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle), c.authStyleCache.Get()) if err != nil { if rErr, ok := err.(*internal.RetrieveError); ok { return nil, (*RetrieveError)(rErr) @@ -165,14 +182,31 @@ func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) } // RetrieveError is the error returned when the token endpoint returns a -// non-2XX HTTP status code. +// non-2XX HTTP status code or populates RFC 6749's 'error' parameter. +// https://datatracker.ietf.org/doc/html/rfc6749#section-5.2 type RetrieveError struct { Response *http.Response // Body is the body that was consumed by reading Response.Body. // It may be truncated. Body []byte + // ErrorCode is RFC 6749's 'error' parameter. + ErrorCode string + // ErrorDescription is RFC 6749's 'error_description' parameter. + ErrorDescription string + // ErrorURI is RFC 6749's 'error_uri' parameter. + ErrorURI string } func (r *RetrieveError) Error() string { + if r.ErrorCode != "" { + s := fmt.Sprintf("oauth2: %q", r.ErrorCode) + if r.ErrorDescription != "" { + s += fmt.Sprintf(" %q", r.ErrorDescription) + } + if r.ErrorURI != "" { + s += fmt.Sprintf(" %q", r.ErrorURI) + } + return s + } return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) } diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go index aa0d34f1e..90657915f 100644 --- a/vendor/golang.org/x/oauth2/transport.go +++ b/vendor/golang.org/x/oauth2/transport.go @@ -6,7 +6,7 @@ package oauth2 import ( "errors" - "io" + "log" "net/http" "sync" ) @@ -25,9 +25,6 @@ type Transport struct { // Base is the base RoundTripper used to make HTTP requests. // If nil, http.DefaultTransport is used. Base http.RoundTripper - - mu sync.Mutex // guards modReq - modReq map[*http.Request]*http.Request // original -> modified } // RoundTrip authorizes and authenticates the request with an @@ -52,35 +49,22 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { req2 := cloneRequest(req) // per RoundTripper contract token.SetAuthHeader(req2) - t.setModReq(req, req2) - res, err := t.base().RoundTrip(req2) - // req.Body is assumed to have been closed by the base RoundTripper. + // req.Body is assumed to be closed by the base RoundTripper. reqBodyClosed = true - - if err != nil { - t.setModReq(req, nil) - return nil, err - } - res.Body = &onEOFReader{ - rc: res.Body, - fn: func() { t.setModReq(req, nil) }, - } - return res, nil + return t.base().RoundTrip(req2) } -// CancelRequest cancels an in-flight request by closing its connection. +var cancelOnce sync.Once + +// CancelRequest does nothing. It used to be a legacy cancellation mechanism +// but now only it only logs on first use to warn that it's deprecated. +// +// Deprecated: use contexts for cancellation instead. func (t *Transport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base().(canceler); ok { - t.mu.Lock() - modReq := t.modReq[req] - delete(t.modReq, req) - t.mu.Unlock() - cr.CancelRequest(modReq) - } + cancelOnce.Do(func() { + log.Printf("deprecated: golang.org/x/oauth2: Transport.CancelRequest no longer does anything; use contexts") + }) } func (t *Transport) base() http.RoundTripper { @@ -90,19 +74,6 @@ func (t *Transport) base() http.RoundTripper { return http.DefaultTransport } -func (t *Transport) setModReq(orig, mod *http.Request) { - t.mu.Lock() - defer t.mu.Unlock() - if t.modReq == nil { - t.modReq = make(map[*http.Request]*http.Request) - } - if mod == nil { - delete(t.modReq, orig) - } else { - t.modReq[orig] = mod - } -} - // cloneRequest returns a clone of the provided *http.Request. // The clone is a shallow copy of the struct and its Header map. func cloneRequest(r *http.Request) *http.Request { @@ -116,29 +87,3 @@ func cloneRequest(r *http.Request) *http.Request { } return r2 } - -type onEOFReader struct { - rc io.ReadCloser - fn func() -} - -func (r *onEOFReader) Read(p []byte) (n int, err error) { - n, err = r.rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -func (r *onEOFReader) Close() error { - err := r.rc.Close() - r.runFunc() - return err -} - -func (r *onEOFReader) runFunc() { - if fn := r.fn; fn != nil { - fn() - r.fn = nil - } -} diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE new file mode 100644 index 000000000..2a7cf70da --- /dev/null +++ b/vendor/golang.org/x/sync/LICENSE @@ -0,0 +1,27 @@ +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/golang.org/x/sync/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go new file mode 100644 index 000000000..b618162aa --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore.go @@ -0,0 +1,160 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semaphore provides a weighted semaphore implementation. +package semaphore // import "golang.org/x/sync/semaphore" + +import ( + "container/list" + "context" + "sync" +) + +type waiter struct { + n int64 + ready chan<- struct{} // Closed when semaphore acquired. +} + +// NewWeighted creates a new weighted semaphore with the given +// maximum combined weight for concurrent access. +func NewWeighted(n int64) *Weighted { + w := &Weighted{size: n} + return w +} + +// Weighted provides a way to bound concurrent access to a resource. +// The callers can request access with a given weight. +type Weighted struct { + size int64 + cur int64 + mu sync.Mutex + waiters list.List +} + +// Acquire acquires the semaphore with a weight of n, blocking until resources +// are available or ctx is done. On success, returns nil. On failure, returns +// ctx.Err() and leaves the semaphore unchanged. +func (s *Weighted) Acquire(ctx context.Context, n int64) error { + done := ctx.Done() + + s.mu.Lock() + select { + case <-done: + // ctx becoming done has "happened before" acquiring the semaphore, + // whether it became done before the call began or while we were + // waiting for the mutex. We prefer to fail even if we could acquire + // the mutex without blocking. + s.mu.Unlock() + return ctx.Err() + default: + } + if s.size-s.cur >= n && s.waiters.Len() == 0 { + // Since we hold s.mu and haven't synchronized since checking done, if + // ctx becomes done before we return here, it becoming done must have + // "happened concurrently" with this call - it cannot "happen before" + // we return in this branch. So, we're ok to always acquire here. + s.cur += n + s.mu.Unlock() + return nil + } + + if n > s.size { + // Don't make other Acquire calls block on one that's doomed to fail. + s.mu.Unlock() + <-done + return ctx.Err() + } + + ready := make(chan struct{}) + w := waiter{n: n, ready: ready} + elem := s.waiters.PushBack(w) + s.mu.Unlock() + + select { + case <-done: + s.mu.Lock() + select { + case <-ready: + // Acquired the semaphore after we were canceled. + // Pretend we didn't and put the tokens back. + s.cur -= n + s.notifyWaiters() + default: + isFront := s.waiters.Front() == elem + s.waiters.Remove(elem) + // If we're at the front and there're extra tokens left, notify other waiters. + if isFront && s.size > s.cur { + s.notifyWaiters() + } + } + s.mu.Unlock() + return ctx.Err() + + case <-ready: + // Acquired the semaphore. Check that ctx isn't already done. + // We check the done channel instead of calling ctx.Err because we + // already have the channel, and ctx.Err is O(n) with the nesting + // depth of ctx. + select { + case <-done: + s.Release(n) + return ctx.Err() + default: + } + return nil + } +} + +// TryAcquire acquires the semaphore with a weight of n without blocking. +// On success, returns true. On failure, returns false and leaves the semaphore unchanged. +func (s *Weighted) TryAcquire(n int64) bool { + s.mu.Lock() + success := s.size-s.cur >= n && s.waiters.Len() == 0 + if success { + s.cur += n + } + s.mu.Unlock() + return success +} + +// Release releases the semaphore with a weight of n. +func (s *Weighted) Release(n int64) { + s.mu.Lock() + s.cur -= n + if s.cur < 0 { + s.mu.Unlock() + panic("semaphore: released more than held") + } + s.notifyWaiters() + s.mu.Unlock() +} + +func (s *Weighted) notifyWaiters() { + for { + next := s.waiters.Front() + if next == nil { + break // No more waiters blocked. + } + + w := next.Value.(waiter) + if s.size-s.cur < w.n { + // Not enough tokens for the next waiter. We could keep going (to try to + // find a waiter with a smaller request), but under load that could cause + // starvation for large requests; instead, we leave all remaining waiters + // blocked. + // + // Consider a semaphore used as a read-write lock, with N tokens, N + // readers, and one writer. Each reader can Acquire(1) to obtain a read + // lock. The writer can Acquire(N) to obtain a write lock, excluding all + // of the readers. If we allow the readers to jump ahead in the queue, + // the writer will starve — there is always one token available for every + // reader. + break + } + + s.cur += w.n + s.waiters.Remove(next) + close(w.ready) + } +} diff --git a/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s new file mode 100644 index 000000000..269e173ca --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +#include "textflag.h" + +// +// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go +// + +TEXT ·syscall6(SB),NOSPLIT,$0-88 + JMP syscall·syscall6(SB) + +TEXT ·rawSyscall6(SB),NOSPLIT,$0-88 + JMP syscall·rawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/cpu/byteorder.go b/vendor/golang.org/x/sys/cpu/byteorder.go new file mode 100644 index 000000000..271055be0 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/byteorder.go @@ -0,0 +1,66 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "runtime" +) + +// byteOrder is a subset of encoding/binary.ByteOrder. +type byteOrder interface { + Uint32([]byte) uint32 + Uint64([]byte) uint64 +} + +type littleEndian struct{} +type bigEndian struct{} + +func (littleEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func (littleEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func (bigEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 +} + +func (bigEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 +} + +// hostByteOrder returns littleEndian on little-endian machines and +// bigEndian on big-endian machines. +func hostByteOrder() byteOrder { + switch runtime.GOARCH { + case "386", "amd64", "amd64p32", + "alpha", + "arm", "arm64", + "loong64", + "mipsle", "mips64le", "mips64p32le", + "nios2", + "ppc64le", + "riscv", "riscv64", + "sh": + return littleEndian{} + case "armbe", "arm64be", + "m68k", + "mips", "mips64", "mips64p32", + "ppc", "ppc64", + "s390", "s390x", + "shbe", + "sparc", "sparc64": + return bigEndian{} + } + panic("unknown architecture") +} diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go new file mode 100644 index 000000000..02609d5b2 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -0,0 +1,312 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cpu implements processor feature detection for +// various CPU architectures. +package cpu + +import ( + "os" + "strings" +) + +// Initialized reports whether the CPU features were initialized. +// +// For some GOOS/GOARCH combinations initialization of the CPU features depends +// on reading an operating specific file, e.g. /proc/self/auxv on linux/arm +// Initialized will report false if reading the file fails. +var Initialized bool + +// CacheLinePad is used to pad structs to avoid false sharing. +type CacheLinePad struct{ _ [cacheLineSize]byte } + +// X86 contains the supported CPU features of the +// current X86/AMD64 platform. If the current platform +// is not X86/AMD64 then all feature flags are false. +// +// X86 is padded to avoid false sharing. Further the HasAVX +// and HasAVX2 are only set if the OS supports XMM and YMM +// registers in addition to the CPUID feature bit being set. +var X86 struct { + _ CacheLinePad + HasAES bool // AES hardware implementation (AES NI) + HasADX bool // Multi-precision add-carry instruction extensions + HasAVX bool // Advanced vector extension + HasAVX2 bool // Advanced vector extension 2 + HasAVX512 bool // Advanced vector extension 512 + HasAVX512F bool // Advanced vector extension 512 Foundation Instructions + HasAVX512CD bool // Advanced vector extension 512 Conflict Detection Instructions + HasAVX512ER bool // Advanced vector extension 512 Exponential and Reciprocal Instructions + HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions + HasAVX512VL bool // Advanced vector extension 512 Vector Length Extensions + HasAVX512BW bool // Advanced vector extension 512 Byte and Word Instructions + HasAVX512DQ bool // Advanced vector extension 512 Doubleword and Quadword Instructions + HasAVX512IFMA bool // Advanced vector extension 512 Integer Fused Multiply Add + HasAVX512VBMI bool // Advanced vector extension 512 Vector Byte Manipulation Instructions + HasAVX5124VNNIW bool // Advanced vector extension 512 Vector Neural Network Instructions Word variable precision + HasAVX5124FMAPS bool // Advanced vector extension 512 Fused Multiply Accumulation Packed Single precision + HasAVX512VPOPCNTDQ bool // Advanced vector extension 512 Double and quad word population count instructions + HasAVX512VPCLMULQDQ bool // Advanced vector extension 512 Vector carry-less multiply operations + HasAVX512VNNI bool // Advanced vector extension 512 Vector Neural Network Instructions + HasAVX512GFNI bool // Advanced vector extension 512 Galois field New Instructions + HasAVX512VAES bool // Advanced vector extension 512 Vector AES instructions + HasAVX512VBMI2 bool // Advanced vector extension 512 Vector Byte Manipulation Instructions 2 + HasAVX512BITALG bool // Advanced vector extension 512 Bit Algorithms + HasAVX512BF16 bool // Advanced vector extension 512 BFloat16 Instructions + HasAMXTile bool // Advanced Matrix Extension Tile instructions + HasAMXInt8 bool // Advanced Matrix Extension Int8 instructions + HasAMXBF16 bool // Advanced Matrix Extension BFloat16 instructions + HasBMI1 bool // Bit manipulation instruction set 1 + HasBMI2 bool // Bit manipulation instruction set 2 + HasCX16 bool // Compare and exchange 16 Bytes + HasERMS bool // Enhanced REP for MOVSB and STOSB + HasFMA bool // Fused-multiply-add instructions + HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers. + HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM + HasPOPCNT bool // Hamming weight instruction POPCNT. + HasRDRAND bool // RDRAND instruction (on-chip random number generator) + HasRDSEED bool // RDSEED instruction (on-chip random number generator) + HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64) + HasSSE3 bool // Streaming SIMD extension 3 + HasSSSE3 bool // Supplemental streaming SIMD extension 3 + HasSSE41 bool // Streaming SIMD extension 4 and 4.1 + HasSSE42 bool // Streaming SIMD extension 4 and 4.2 + _ CacheLinePad +} + +// ARM64 contains the supported CPU features of the +// current ARMv8(aarch64) platform. If the current platform +// is not arm64 then all feature flags are false. +var ARM64 struct { + _ CacheLinePad + HasFP bool // Floating-point instruction set (always available) + HasASIMD bool // Advanced SIMD (always available) + HasEVTSTRM bool // Event stream support + HasAES bool // AES hardware implementation + HasPMULL bool // Polynomial multiplication instruction set + HasSHA1 bool // SHA1 hardware implementation + HasSHA2 bool // SHA2 hardware implementation + HasCRC32 bool // CRC32 hardware implementation + HasATOMICS bool // Atomic memory operation instruction set + HasFPHP bool // Half precision floating-point instruction set + HasASIMDHP bool // Advanced SIMD half precision instruction set + HasCPUID bool // CPUID identification scheme registers + HasASIMDRDM bool // Rounding double multiply add/subtract instruction set + HasJSCVT bool // Javascript conversion from floating-point to integer + HasFCMA bool // Floating-point multiplication and addition of complex numbers + HasLRCPC bool // Release Consistent processor consistent support + HasDCPOP bool // Persistent memory support + HasSHA3 bool // SHA3 hardware implementation + HasSM3 bool // SM3 hardware implementation + HasSM4 bool // SM4 hardware implementation + HasASIMDDP bool // Advanced SIMD double precision instruction set + HasSHA512 bool // SHA512 hardware implementation + HasSVE bool // Scalable Vector Extensions + HasSVE2 bool // Scalable Vector Extensions 2 + HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 + HasDIT bool // Data Independent Timing support + HasI8MM bool // Advanced SIMD Int8 matrix multiplication instructions + _ CacheLinePad +} + +// ARM contains the supported CPU features of the current ARM (32-bit) platform. +// All feature flags are false if: +// 1. the current platform is not arm, or +// 2. the current operating system is not Linux. +var ARM struct { + _ CacheLinePad + HasSWP bool // SWP instruction support + HasHALF bool // Half-word load and store support + HasTHUMB bool // ARM Thumb instruction set + Has26BIT bool // Address space limited to 26-bits + HasFASTMUL bool // 32-bit operand, 64-bit result multiplication support + HasFPA bool // Floating point arithmetic support + HasVFP bool // Vector floating point support + HasEDSP bool // DSP Extensions support + HasJAVA bool // Java instruction set + HasIWMMXT bool // Intel Wireless MMX technology support + HasCRUNCH bool // MaverickCrunch context switching and handling + HasTHUMBEE bool // Thumb EE instruction set + HasNEON bool // NEON instruction set + HasVFPv3 bool // Vector floating point version 3 support + HasVFPv3D16 bool // Vector floating point version 3 D8-D15 + HasTLS bool // Thread local storage support + HasVFPv4 bool // Vector floating point version 4 support + HasIDIVA bool // Integer divide instruction support in ARM mode + HasIDIVT bool // Integer divide instruction support in Thumb mode + HasVFPD32 bool // Vector floating point version 3 D15-D31 + HasLPAE bool // Large Physical Address Extensions + HasEVTSTRM bool // Event stream support + HasAES bool // AES hardware implementation + HasPMULL bool // Polynomial multiplication instruction set + HasSHA1 bool // SHA1 hardware implementation + HasSHA2 bool // SHA2 hardware implementation + HasCRC32 bool // CRC32 hardware implementation + _ CacheLinePad +} + +// MIPS64X contains the supported CPU features of the current mips64/mips64le +// platforms. If the current platform is not mips64/mips64le or the current +// operating system is not Linux then all feature flags are false. +var MIPS64X struct { + _ CacheLinePad + HasMSA bool // MIPS SIMD architecture + _ CacheLinePad +} + +// PPC64 contains the supported CPU features of the current ppc64/ppc64le platforms. +// If the current platform is not ppc64/ppc64le then all feature flags are false. +// +// For ppc64/ppc64le, it is safe to check only for ISA level starting on ISA v3.00, +// since there are no optional categories. There are some exceptions that also +// require kernel support to work (DARN, SCV), so there are feature bits for +// those as well. The struct is padded to avoid false sharing. +var PPC64 struct { + _ CacheLinePad + HasDARN bool // Hardware random number generator (requires kernel enablement) + HasSCV bool // Syscall vectored (requires kernel enablement) + IsPOWER8 bool // ISA v2.07 (POWER8) + IsPOWER9 bool // ISA v3.00 (POWER9), implies IsPOWER8 + _ CacheLinePad +} + +// S390X contains the supported CPU features of the current IBM Z +// (s390x) platform. If the current platform is not IBM Z then all +// feature flags are false. +// +// S390X is padded to avoid false sharing. Further HasVX is only set +// if the OS supports vector registers in addition to the STFLE +// feature bit being set. +var S390X struct { + _ CacheLinePad + HasZARCH bool // z/Architecture mode is active [mandatory] + HasSTFLE bool // store facility list extended + HasLDISP bool // long (20-bit) displacements + HasEIMM bool // 32-bit immediates + HasDFP bool // decimal floating point + HasETF3EH bool // ETF-3 enhanced + HasMSA bool // message security assist (CPACF) + HasAES bool // KM-AES{128,192,256} functions + HasAESCBC bool // KMC-AES{128,192,256} functions + HasAESCTR bool // KMCTR-AES{128,192,256} functions + HasAESGCM bool // KMA-GCM-AES{128,192,256} functions + HasGHASH bool // KIMD-GHASH function + HasSHA1 bool // K{I,L}MD-SHA-1 functions + HasSHA256 bool // K{I,L}MD-SHA-256 functions + HasSHA512 bool // K{I,L}MD-SHA-512 functions + HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions + HasVX bool // vector facility + HasVXE bool // vector-enhancements facility 1 + _ CacheLinePad +} + +// RISCV64 contains the supported CPU features and performance characteristics for riscv64 +// platforms. The booleans in RISCV64, with the exception of HasFastMisaligned, indicate +// the presence of RISC-V extensions. +// +// It is safe to assume that all the RV64G extensions are supported and so they are omitted from +// this structure. As riscv64 Go programs require at least RV64G, the code that populates +// this structure cannot run successfully if some of the RV64G extensions are missing. +// The struct is padded to avoid false sharing. +var RISCV64 struct { + _ CacheLinePad + HasFastMisaligned bool // Fast misaligned accesses + HasC bool // Compressed instruction-set extension + HasV bool // Vector extension compatible with RVV 1.0 + HasZba bool // Address generation instructions extension + HasZbb bool // Basic bit-manipulation extension + HasZbs bool // Single-bit instructions extension + _ CacheLinePad +} + +func init() { + archInit() + initOptions() + processOptions() +} + +// options contains the cpu debug options that can be used in GODEBUG. +// Options are arch dependent and are added by the arch specific initOptions functions. +// Features that are mandatory for the specific GOARCH should have the Required field set +// (e.g. SSE2 on amd64). +var options []option + +// Option names should be lower case. e.g. avx instead of AVX. +type option struct { + Name string + Feature *bool + Specified bool // whether feature value was specified in GODEBUG + Enable bool // whether feature should be enabled + Required bool // whether feature is mandatory and can not be disabled +} + +func processOptions() { + env := os.Getenv("GODEBUG") +field: + for env != "" { + field := "" + i := strings.IndexByte(env, ',') + if i < 0 { + field, env = env, "" + } else { + field, env = env[:i], env[i+1:] + } + if len(field) < 4 || field[:4] != "cpu." { + continue + } + i = strings.IndexByte(field, '=') + if i < 0 { + print("GODEBUG sys/cpu: no value specified for \"", field, "\"\n") + continue + } + key, value := field[4:i], field[i+1:] // e.g. "SSE2", "on" + + var enable bool + switch value { + case "on": + enable = true + case "off": + enable = false + default: + print("GODEBUG sys/cpu: value \"", value, "\" not supported for cpu option \"", key, "\"\n") + continue field + } + + if key == "all" { + for i := range options { + options[i].Specified = true + options[i].Enable = enable || options[i].Required + } + continue field + } + + for i := range options { + if options[i].Name == key { + options[i].Specified = true + options[i].Enable = enable + continue field + } + } + + print("GODEBUG sys/cpu: unknown cpu feature \"", key, "\"\n") + } + + for _, o := range options { + if !o.Specified { + continue + } + + if o.Enable && !*o.Feature { + print("GODEBUG sys/cpu: can not enable \"", o.Name, "\", missing CPU support\n") + continue + } + + if !o.Enable && o.Required { + print("GODEBUG sys/cpu: can not disable \"", o.Name, "\", required CPU feature\n") + continue + } + + *o.Feature = o.Enable + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_aix.go b/vendor/golang.org/x/sys/cpu/cpu_aix.go new file mode 100644 index 000000000..9bf0c32eb --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_aix.go @@ -0,0 +1,33 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix + +package cpu + +const ( + // getsystemcfg constants + _SC_IMPL = 2 + _IMPL_POWER8 = 0x10000 + _IMPL_POWER9 = 0x20000 +) + +func archInit() { + impl := getsystemcfg(_SC_IMPL) + if impl&_IMPL_POWER8 != 0 { + PPC64.IsPOWER8 = true + } + if impl&_IMPL_POWER9 != 0 { + PPC64.IsPOWER8 = true + PPC64.IsPOWER9 = true + } + + Initialized = true +} + +func getsystemcfg(label int) (n uint64) { + r0, _ := callgetsystemcfg(label) + n = uint64(r0) + return +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm.go b/vendor/golang.org/x/sys/cpu/cpu_arm.go new file mode 100644 index 000000000..301b752e9 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm.go @@ -0,0 +1,73 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 32 + +// HWCAP/HWCAP2 bits. +// These are specific to Linux. +const ( + hwcap_SWP = 1 << 0 + hwcap_HALF = 1 << 1 + hwcap_THUMB = 1 << 2 + hwcap_26BIT = 1 << 3 + hwcap_FAST_MULT = 1 << 4 + hwcap_FPA = 1 << 5 + hwcap_VFP = 1 << 6 + hwcap_EDSP = 1 << 7 + hwcap_JAVA = 1 << 8 + hwcap_IWMMXT = 1 << 9 + hwcap_CRUNCH = 1 << 10 + hwcap_THUMBEE = 1 << 11 + hwcap_NEON = 1 << 12 + hwcap_VFPv3 = 1 << 13 + hwcap_VFPv3D16 = 1 << 14 + hwcap_TLS = 1 << 15 + hwcap_VFPv4 = 1 << 16 + hwcap_IDIVA = 1 << 17 + hwcap_IDIVT = 1 << 18 + hwcap_VFPD32 = 1 << 19 + hwcap_LPAE = 1 << 20 + hwcap_EVTSTRM = 1 << 21 + + hwcap2_AES = 1 << 0 + hwcap2_PMULL = 1 << 1 + hwcap2_SHA1 = 1 << 2 + hwcap2_SHA2 = 1 << 3 + hwcap2_CRC32 = 1 << 4 +) + +func initOptions() { + options = []option{ + {Name: "pmull", Feature: &ARM.HasPMULL}, + {Name: "sha1", Feature: &ARM.HasSHA1}, + {Name: "sha2", Feature: &ARM.HasSHA2}, + {Name: "swp", Feature: &ARM.HasSWP}, + {Name: "thumb", Feature: &ARM.HasTHUMB}, + {Name: "thumbee", Feature: &ARM.HasTHUMBEE}, + {Name: "tls", Feature: &ARM.HasTLS}, + {Name: "vfp", Feature: &ARM.HasVFP}, + {Name: "vfpd32", Feature: &ARM.HasVFPD32}, + {Name: "vfpv3", Feature: &ARM.HasVFPv3}, + {Name: "vfpv3d16", Feature: &ARM.HasVFPv3D16}, + {Name: "vfpv4", Feature: &ARM.HasVFPv4}, + {Name: "half", Feature: &ARM.HasHALF}, + {Name: "26bit", Feature: &ARM.Has26BIT}, + {Name: "fastmul", Feature: &ARM.HasFASTMUL}, + {Name: "fpa", Feature: &ARM.HasFPA}, + {Name: "edsp", Feature: &ARM.HasEDSP}, + {Name: "java", Feature: &ARM.HasJAVA}, + {Name: "iwmmxt", Feature: &ARM.HasIWMMXT}, + {Name: "crunch", Feature: &ARM.HasCRUNCH}, + {Name: "neon", Feature: &ARM.HasNEON}, + {Name: "idivt", Feature: &ARM.HasIDIVT}, + {Name: "idiva", Feature: &ARM.HasIDIVA}, + {Name: "lpae", Feature: &ARM.HasLPAE}, + {Name: "evtstrm", Feature: &ARM.HasEVTSTRM}, + {Name: "aes", Feature: &ARM.HasAES}, + {Name: "crc32", Feature: &ARM.HasCRC32}, + } + +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go new file mode 100644 index 000000000..af2aa99f9 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -0,0 +1,194 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import "runtime" + +// cacheLineSize is used to prevent false sharing of cache lines. +// We choose 128 because Apple Silicon, a.k.a. M1, has 128-byte cache line size. +// It doesn't cost much and is much more future-proof. +const cacheLineSize = 128 + +func initOptions() { + options = []option{ + {Name: "fp", Feature: &ARM64.HasFP}, + {Name: "asimd", Feature: &ARM64.HasASIMD}, + {Name: "evstrm", Feature: &ARM64.HasEVTSTRM}, + {Name: "aes", Feature: &ARM64.HasAES}, + {Name: "fphp", Feature: &ARM64.HasFPHP}, + {Name: "jscvt", Feature: &ARM64.HasJSCVT}, + {Name: "lrcpc", Feature: &ARM64.HasLRCPC}, + {Name: "pmull", Feature: &ARM64.HasPMULL}, + {Name: "sha1", Feature: &ARM64.HasSHA1}, + {Name: "sha2", Feature: &ARM64.HasSHA2}, + {Name: "sha3", Feature: &ARM64.HasSHA3}, + {Name: "sha512", Feature: &ARM64.HasSHA512}, + {Name: "sm3", Feature: &ARM64.HasSM3}, + {Name: "sm4", Feature: &ARM64.HasSM4}, + {Name: "sve", Feature: &ARM64.HasSVE}, + {Name: "sve2", Feature: &ARM64.HasSVE2}, + {Name: "crc32", Feature: &ARM64.HasCRC32}, + {Name: "atomics", Feature: &ARM64.HasATOMICS}, + {Name: "asimdhp", Feature: &ARM64.HasASIMDHP}, + {Name: "cpuid", Feature: &ARM64.HasCPUID}, + {Name: "asimrdm", Feature: &ARM64.HasASIMDRDM}, + {Name: "fcma", Feature: &ARM64.HasFCMA}, + {Name: "dcpop", Feature: &ARM64.HasDCPOP}, + {Name: "asimddp", Feature: &ARM64.HasASIMDDP}, + {Name: "asimdfhm", Feature: &ARM64.HasASIMDFHM}, + {Name: "dit", Feature: &ARM64.HasDIT}, + {Name: "i8mm", Feature: &ARM64.HasI8MM}, + } +} + +func archInit() { + switch runtime.GOOS { + case "freebsd": + readARM64Registers() + case "linux", "netbsd", "openbsd": + doinit() + default: + // Many platforms don't seem to allow reading these registers. + setMinimalFeatures() + } +} + +// setMinimalFeatures fakes the minimal ARM64 features expected by +// TestARM64minimalFeatures. +func setMinimalFeatures() { + ARM64.HasASIMD = true + ARM64.HasFP = true +} + +func readARM64Registers() { + Initialized = true + + parseARM64SystemRegisters(getisar0(), getisar1(), getpfr0()) +} + +func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { + // ID_AA64ISAR0_EL1 + switch extractBits(isar0, 4, 7) { + case 1: + ARM64.HasAES = true + case 2: + ARM64.HasAES = true + ARM64.HasPMULL = true + } + + switch extractBits(isar0, 8, 11) { + case 1: + ARM64.HasSHA1 = true + } + + switch extractBits(isar0, 12, 15) { + case 1: + ARM64.HasSHA2 = true + case 2: + ARM64.HasSHA2 = true + ARM64.HasSHA512 = true + } + + switch extractBits(isar0, 16, 19) { + case 1: + ARM64.HasCRC32 = true + } + + switch extractBits(isar0, 20, 23) { + case 2: + ARM64.HasATOMICS = true + } + + switch extractBits(isar0, 28, 31) { + case 1: + ARM64.HasASIMDRDM = true + } + + switch extractBits(isar0, 32, 35) { + case 1: + ARM64.HasSHA3 = true + } + + switch extractBits(isar0, 36, 39) { + case 1: + ARM64.HasSM3 = true + } + + switch extractBits(isar0, 40, 43) { + case 1: + ARM64.HasSM4 = true + } + + switch extractBits(isar0, 44, 47) { + case 1: + ARM64.HasASIMDDP = true + } + + // ID_AA64ISAR1_EL1 + switch extractBits(isar1, 0, 3) { + case 1: + ARM64.HasDCPOP = true + } + + switch extractBits(isar1, 12, 15) { + case 1: + ARM64.HasJSCVT = true + } + + switch extractBits(isar1, 16, 19) { + case 1: + ARM64.HasFCMA = true + } + + switch extractBits(isar1, 20, 23) { + case 1: + ARM64.HasLRCPC = true + } + + switch extractBits(isar1, 52, 55) { + case 1: + ARM64.HasI8MM = true + } + + // ID_AA64PFR0_EL1 + switch extractBits(pfr0, 16, 19) { + case 0: + ARM64.HasFP = true + case 1: + ARM64.HasFP = true + ARM64.HasFPHP = true + } + + switch extractBits(pfr0, 20, 23) { + case 0: + ARM64.HasASIMD = true + case 1: + ARM64.HasASIMD = true + ARM64.HasASIMDHP = true + } + + switch extractBits(pfr0, 32, 35) { + case 1: + ARM64.HasSVE = true + + parseARM64SVERegister(getzfr0()) + } + + switch extractBits(pfr0, 48, 51) { + case 1: + ARM64.HasDIT = true + } +} + +func parseARM64SVERegister(zfr0 uint64) { + switch extractBits(zfr0, 0, 3) { + case 1: + ARM64.HasSVE2 = true + } +} + +func extractBits(data uint64, start, end uint) uint { + return (uint)(data>>start) & ((1 << (end - start + 1)) - 1) +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s new file mode 100644 index 000000000..22cc99844 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.s @@ -0,0 +1,39 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +#include "textflag.h" + +// func getisar0() uint64 +TEXT ·getisar0(SB),NOSPLIT,$0-8 + // get Instruction Set Attributes 0 into x0 + // mrs x0, ID_AA64ISAR0_EL1 = d5380600 + WORD $0xd5380600 + MOVD R0, ret+0(FP) + RET + +// func getisar1() uint64 +TEXT ·getisar1(SB),NOSPLIT,$0-8 + // get Instruction Set Attributes 1 into x0 + // mrs x0, ID_AA64ISAR1_EL1 = d5380620 + WORD $0xd5380620 + MOVD R0, ret+0(FP) + RET + +// func getpfr0() uint64 +TEXT ·getpfr0(SB),NOSPLIT,$0-8 + // get Processor Feature Register 0 into x0 + // mrs x0, ID_AA64PFR0_EL1 = d5380400 + WORD $0xd5380400 + MOVD R0, ret+0(FP) + RET + +// func getzfr0() uint64 +TEXT ·getzfr0(SB),NOSPLIT,$0-8 + // get SVE Feature Register 0 into x0 + // mrs x0, ID_AA64ZFR0_EL1 = d5380480 + WORD $0xd5380480 + MOVD R0, ret+0(FP) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go new file mode 100644 index 000000000..6ac6e1efb --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go @@ -0,0 +1,12 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +package cpu + +func getisar0() uint64 +func getisar1() uint64 +func getpfr0() uint64 +func getzfr0() uint64 diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go new file mode 100644 index 000000000..c8ae6ddc1 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return true } + +// The following feature detection functions are defined in cpu_s390x.s. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList +func kmQuery() queryResult +func kmcQuery() queryResult +func kmctrQuery() queryResult +func kmaQuery() queryResult +func kimdQuery() queryResult +func klmdQuery() queryResult diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go new file mode 100644 index 000000000..910728fb1 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gc + +package cpu + +// cpuid is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) + +// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func xgetbv() (eax, edx uint32) diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go new file mode 100644 index 000000000..7f1946780 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gccgo + +package cpu + +func getisar0() uint64 { return 0 } +func getisar1() uint64 { return 0 } +func getpfr0() uint64 { return 0 } diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go new file mode 100644 index 000000000..9526d2ce3 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gccgo + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return false } + +// TODO(mundaym): the following feature detection functions are currently +// stubs. See https://golang.org/cl/162887 for how to fix this. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList { panic("not implemented for gccgo") } +func kmQuery() queryResult { panic("not implemented for gccgo") } +func kmcQuery() queryResult { panic("not implemented for gccgo") } +func kmctrQuery() queryResult { panic("not implemented for gccgo") } +func kmaQuery() queryResult { panic("not implemented for gccgo") } +func kimdQuery() queryResult { panic("not implemented for gccgo") } +func klmdQuery() queryResult { panic("not implemented for gccgo") } diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c new file mode 100644 index 000000000..3f73a05dc --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c @@ -0,0 +1,37 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gccgo + +#include +#include +#include + +// Need to wrap __get_cpuid_count because it's declared as static. +int +gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx); +} + +#pragma GCC diagnostic ignored "-Wunknown-pragmas" +#pragma GCC push_options +#pragma GCC target("xsave") +#pragma clang attribute push (__attribute__((target("xsave"))), apply_to=function) + +// xgetbv reads the contents of an XCR (Extended Control Register) +// specified in the ECX register into registers EDX:EAX. +// Currently, the only supported value for XCR is 0. +void +gccgoXgetbv(uint32_t *eax, uint32_t *edx) +{ + uint64_t v = _xgetbv(0); + *eax = v & 0xffffffff; + *edx = v >> 32; +} + +#pragma clang attribute pop +#pragma GCC pop_options diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go new file mode 100644 index 000000000..99c60fe9f --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go @@ -0,0 +1,31 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gccgo + +package cpu + +//extern gccgoGetCpuidCount +func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32) + +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) { + var a, b, c, d uint32 + gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d) + return a, b, c, d +} + +//extern gccgoXgetbv +func gccgoXgetbv(eax, edx *uint32) + +func xgetbv() (eax, edx uint32) { + var a, d uint32 + gccgoXgetbv(&a, &d) + return a, d +} + +// gccgo doesn't build on Darwin, per: +// https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/gcc.rb#L76 +func darwinSupportsAVX512() bool { + return false +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux.go b/vendor/golang.org/x/sys/cpu/cpu_linux.go new file mode 100644 index 000000000..743eb5435 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !386 && !amd64 && !amd64p32 && !arm64 + +package cpu + +func archInit() { + if err := readHWCAP(); err != nil { + return + } + doinit() + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go new file mode 100644 index 000000000..2057006dc --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go @@ -0,0 +1,39 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +func doinit() { + ARM.HasSWP = isSet(hwCap, hwcap_SWP) + ARM.HasHALF = isSet(hwCap, hwcap_HALF) + ARM.HasTHUMB = isSet(hwCap, hwcap_THUMB) + ARM.Has26BIT = isSet(hwCap, hwcap_26BIT) + ARM.HasFASTMUL = isSet(hwCap, hwcap_FAST_MULT) + ARM.HasFPA = isSet(hwCap, hwcap_FPA) + ARM.HasVFP = isSet(hwCap, hwcap_VFP) + ARM.HasEDSP = isSet(hwCap, hwcap_EDSP) + ARM.HasJAVA = isSet(hwCap, hwcap_JAVA) + ARM.HasIWMMXT = isSet(hwCap, hwcap_IWMMXT) + ARM.HasCRUNCH = isSet(hwCap, hwcap_CRUNCH) + ARM.HasTHUMBEE = isSet(hwCap, hwcap_THUMBEE) + ARM.HasNEON = isSet(hwCap, hwcap_NEON) + ARM.HasVFPv3 = isSet(hwCap, hwcap_VFPv3) + ARM.HasVFPv3D16 = isSet(hwCap, hwcap_VFPv3D16) + ARM.HasTLS = isSet(hwCap, hwcap_TLS) + ARM.HasVFPv4 = isSet(hwCap, hwcap_VFPv4) + ARM.HasIDIVA = isSet(hwCap, hwcap_IDIVA) + ARM.HasIDIVT = isSet(hwCap, hwcap_IDIVT) + ARM.HasVFPD32 = isSet(hwCap, hwcap_VFPD32) + ARM.HasLPAE = isSet(hwCap, hwcap_LPAE) + ARM.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM.HasAES = isSet(hwCap2, hwcap2_AES) + ARM.HasPMULL = isSet(hwCap2, hwcap2_PMULL) + ARM.HasSHA1 = isSet(hwCap2, hwcap2_SHA1) + ARM.HasSHA2 = isSet(hwCap2, hwcap2_SHA2) + ARM.HasCRC32 = isSet(hwCap2, hwcap2_CRC32) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go new file mode 100644 index 000000000..08f35ea17 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -0,0 +1,121 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "strings" + "syscall" +) + +// HWCAP/HWCAP2 bits. These are exposed by Linux. +const ( + hwcap_FP = 1 << 0 + hwcap_ASIMD = 1 << 1 + hwcap_EVTSTRM = 1 << 2 + hwcap_AES = 1 << 3 + hwcap_PMULL = 1 << 4 + hwcap_SHA1 = 1 << 5 + hwcap_SHA2 = 1 << 6 + hwcap_CRC32 = 1 << 7 + hwcap_ATOMICS = 1 << 8 + hwcap_FPHP = 1 << 9 + hwcap_ASIMDHP = 1 << 10 + hwcap_CPUID = 1 << 11 + hwcap_ASIMDRDM = 1 << 12 + hwcap_JSCVT = 1 << 13 + hwcap_FCMA = 1 << 14 + hwcap_LRCPC = 1 << 15 + hwcap_DCPOP = 1 << 16 + hwcap_SHA3 = 1 << 17 + hwcap_SM3 = 1 << 18 + hwcap_SM4 = 1 << 19 + hwcap_ASIMDDP = 1 << 20 + hwcap_SHA512 = 1 << 21 + hwcap_SVE = 1 << 22 + hwcap_ASIMDFHM = 1 << 23 + hwcap_DIT = 1 << 24 + + hwcap2_SVE2 = 1 << 1 + hwcap2_I8MM = 1 << 13 +) + +// linuxKernelCanEmulateCPUID reports whether we're running +// on Linux 4.11+. Ideally we'd like to ask the question about +// whether the current kernel contains +// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=77c97b4ee21290f5f083173d957843b615abbff2 +// but the version number will have to do. +func linuxKernelCanEmulateCPUID() bool { + var un syscall.Utsname + syscall.Uname(&un) + var sb strings.Builder + for _, b := range un.Release[:] { + if b == 0 { + break + } + sb.WriteByte(byte(b)) + } + major, minor, _, ok := parseRelease(sb.String()) + return ok && (major > 4 || major == 4 && minor >= 11) +} + +func doinit() { + if err := readHWCAP(); err != nil { + // We failed to read /proc/self/auxv. This can happen if the binary has + // been given extra capabilities(7) with /bin/setcap. + // + // When this happens, we have two options. If the Linux kernel is new + // enough (4.11+), we can read the arm64 registers directly which'll + // trap into the kernel and then return back to userspace. + // + // But on older kernels, such as Linux 4.4.180 as used on many Synology + // devices, calling readARM64Registers (specifically getisar0) will + // cause a SIGILL and we'll die. So for older kernels, parse /proc/cpuinfo + // instead. + // + // See golang/go#57336. + if linuxKernelCanEmulateCPUID() { + readARM64Registers() + } else { + readLinuxProcCPUInfo() + } + return + } + + // HWCAP feature bits + ARM64.HasFP = isSet(hwCap, hwcap_FP) + ARM64.HasASIMD = isSet(hwCap, hwcap_ASIMD) + ARM64.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM64.HasAES = isSet(hwCap, hwcap_AES) + ARM64.HasPMULL = isSet(hwCap, hwcap_PMULL) + ARM64.HasSHA1 = isSet(hwCap, hwcap_SHA1) + ARM64.HasSHA2 = isSet(hwCap, hwcap_SHA2) + ARM64.HasCRC32 = isSet(hwCap, hwcap_CRC32) + ARM64.HasATOMICS = isSet(hwCap, hwcap_ATOMICS) + ARM64.HasFPHP = isSet(hwCap, hwcap_FPHP) + ARM64.HasASIMDHP = isSet(hwCap, hwcap_ASIMDHP) + ARM64.HasCPUID = isSet(hwCap, hwcap_CPUID) + ARM64.HasASIMDRDM = isSet(hwCap, hwcap_ASIMDRDM) + ARM64.HasJSCVT = isSet(hwCap, hwcap_JSCVT) + ARM64.HasFCMA = isSet(hwCap, hwcap_FCMA) + ARM64.HasLRCPC = isSet(hwCap, hwcap_LRCPC) + ARM64.HasDCPOP = isSet(hwCap, hwcap_DCPOP) + ARM64.HasSHA3 = isSet(hwCap, hwcap_SHA3) + ARM64.HasSM3 = isSet(hwCap, hwcap_SM3) + ARM64.HasSM4 = isSet(hwCap, hwcap_SM4) + ARM64.HasASIMDDP = isSet(hwCap, hwcap_ASIMDDP) + ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) + ARM64.HasSVE = isSet(hwCap, hwcap_SVE) + ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) + ARM64.HasDIT = isSet(hwCap, hwcap_DIT) + + + // HWCAP2 feature bits + ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2) + ARM64.HasI8MM = isSet(hwCap2, hwcap2_I8MM) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go new file mode 100644 index 000000000..4686c1d54 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go @@ -0,0 +1,22 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && (mips64 || mips64le) + +package cpu + +// HWCAP bits. These are exposed by the Linux kernel 5.4. +const ( + // CPU features + hwcap_MIPS_MSA = 1 << 1 +) + +func doinit() { + // HWCAP feature bits + MIPS64X.HasMSA = isSet(hwCap, hwcap_MIPS_MSA) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go new file mode 100644 index 000000000..7d902b684 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x && !riscv64 + +package cpu + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go new file mode 100644 index 000000000..197188e67 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go @@ -0,0 +1,30 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && (ppc64 || ppc64le) + +package cpu + +// HWCAP/HWCAP2 bits. These are exposed by the kernel. +const ( + // ISA Level + _PPC_FEATURE2_ARCH_2_07 = 0x80000000 + _PPC_FEATURE2_ARCH_3_00 = 0x00800000 + + // CPU features + _PPC_FEATURE2_DARN = 0x00200000 + _PPC_FEATURE2_SCV = 0x00100000 +) + +func doinit() { + // HWCAP2 feature bits + PPC64.IsPOWER8 = isSet(hwCap2, _PPC_FEATURE2_ARCH_2_07) + PPC64.IsPOWER9 = isSet(hwCap2, _PPC_FEATURE2_ARCH_3_00) + PPC64.HasDARN = isSet(hwCap2, _PPC_FEATURE2_DARN) + PPC64.HasSCV = isSet(hwCap2, _PPC_FEATURE2_SCV) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go new file mode 100644 index 000000000..cb4a0c572 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go @@ -0,0 +1,137 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// RISC-V extension discovery code for Linux. The approach here is to first try the riscv_hwprobe +// syscall falling back to HWCAP to check for the C extension if riscv_hwprobe is not available. +// +// A note on detection of the Vector extension using HWCAP. +// +// Support for the Vector extension version 1.0 was added to the Linux kernel in release 6.5. +// Support for the riscv_hwprobe syscall was added in 6.4. It follows that if the riscv_hwprobe +// syscall is not available then neither is the Vector extension (which needs kernel support). +// The riscv_hwprobe syscall should then be all we need to detect the Vector extension. +// However, some RISC-V board manufacturers ship boards with an older kernel on top of which +// they have back-ported various versions of the Vector extension patches but not the riscv_hwprobe +// patches. These kernels advertise support for the Vector extension using HWCAP. Falling +// back to HWCAP to detect the Vector extension, if riscv_hwprobe is not available, or simply not +// bothering with riscv_hwprobe at all and just using HWCAP may then seem like an attractive option. +// +// Unfortunately, simply checking the 'V' bit in AT_HWCAP will not work as this bit is used by +// RISC-V board and cloud instance providers to mean different things. The Lichee Pi 4A board +// and the Scaleway RV1 cloud instances use the 'V' bit to advertise their support for the unratified +// 0.7.1 version of the Vector Specification. The Banana Pi BPI-F3 and the CanMV-K230 board use +// it to advertise support for 1.0 of the Vector extension. Versions 0.7.1 and 1.0 of the Vector +// extension are binary incompatible. HWCAP can then not be used in isolation to populate the +// HasV field as this field indicates that the underlying CPU is compatible with RVV 1.0. +// +// There is a way at runtime to distinguish between versions 0.7.1 and 1.0 of the Vector +// specification by issuing a RVV 1.0 vsetvli instruction and checking the vill bit of the vtype +// register. This check would allow us to safely detect version 1.0 of the Vector extension +// with HWCAP, if riscv_hwprobe were not available. However, the check cannot +// be added until the assembler supports the Vector instructions. +// +// Note the riscv_hwprobe syscall does not suffer from these ambiguities by design as all of the +// extensions it advertises support for are explicitly versioned. It's also worth noting that +// the riscv_hwprobe syscall is the only way to detect multi-letter RISC-V extensions, e.g., Zba. +// These cannot be detected using HWCAP and so riscv_hwprobe must be used to detect the majority +// of RISC-V extensions. +// +// Please see https://docs.kernel.org/arch/riscv/hwprobe.html for more information. + +// golang.org/x/sys/cpu is not allowed to depend on golang.org/x/sys/unix so we must +// reproduce the constants, types and functions needed to make the riscv_hwprobe syscall +// here. + +const ( + // Copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go. + riscv_HWPROBE_KEY_IMA_EXT_0 = 0x4 + riscv_HWPROBE_IMA_C = 0x2 + riscv_HWPROBE_IMA_V = 0x4 + riscv_HWPROBE_EXT_ZBA = 0x8 + riscv_HWPROBE_EXT_ZBB = 0x10 + riscv_HWPROBE_EXT_ZBS = 0x20 + riscv_HWPROBE_KEY_CPUPERF_0 = 0x5 + riscv_HWPROBE_MISALIGNED_FAST = 0x3 + riscv_HWPROBE_MISALIGNED_MASK = 0x7 +) + +const ( + // sys_RISCV_HWPROBE is copied from golang.org/x/sys/unix/zsysnum_linux_riscv64.go. + sys_RISCV_HWPROBE = 258 +) + +// riscvHWProbePairs is copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go. +type riscvHWProbePairs struct { + key int64 + value uint64 +} + +const ( + // CPU features + hwcap_RISCV_ISA_C = 1 << ('C' - 'A') +) + +func doinit() { + // A slice of key/value pair structures is passed to the RISCVHWProbe syscall. The key + // field should be initialised with one of the key constants defined above, e.g., + // RISCV_HWPROBE_KEY_IMA_EXT_0. The syscall will set the value field to the appropriate value. + // If the kernel does not recognise a key it will set the key field to -1 and the value field to 0. + + pairs := []riscvHWProbePairs{ + {riscv_HWPROBE_KEY_IMA_EXT_0, 0}, + {riscv_HWPROBE_KEY_CPUPERF_0, 0}, + } + + // This call only indicates that extensions are supported if they are implemented on all cores. + if riscvHWProbe(pairs, 0) { + if pairs[0].key != -1 { + v := uint(pairs[0].value) + RISCV64.HasC = isSet(v, riscv_HWPROBE_IMA_C) + RISCV64.HasV = isSet(v, riscv_HWPROBE_IMA_V) + RISCV64.HasZba = isSet(v, riscv_HWPROBE_EXT_ZBA) + RISCV64.HasZbb = isSet(v, riscv_HWPROBE_EXT_ZBB) + RISCV64.HasZbs = isSet(v, riscv_HWPROBE_EXT_ZBS) + } + if pairs[1].key != -1 { + v := pairs[1].value & riscv_HWPROBE_MISALIGNED_MASK + RISCV64.HasFastMisaligned = v == riscv_HWPROBE_MISALIGNED_FAST + } + } + + // Let's double check with HWCAP if the C extension does not appear to be supported. + // This may happen if we're running on a kernel older than 6.4. + + if !RISCV64.HasC { + RISCV64.HasC = isSet(hwCap, hwcap_RISCV_ISA_C) + } +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} + +// riscvHWProbe is a simplified version of the generated wrapper function found in +// golang.org/x/sys/unix/zsyscall_linux_riscv64.go. We simplify it by removing the +// cpuCount and cpus parameters which we do not need. We always want to pass 0 for +// these parameters here so the kernel only reports the extensions that are present +// on all cores. +func riscvHWProbe(pairs []riscvHWProbePairs, flags uint) bool { + var _zero uintptr + var p0 unsafe.Pointer + if len(pairs) > 0 { + p0 = unsafe.Pointer(&pairs[0]) + } else { + p0 = unsafe.Pointer(&_zero) + } + + _, _, e1 := syscall.Syscall6(sys_RISCV_HWPROBE, uintptr(p0), uintptr(len(pairs)), uintptr(0), uintptr(0), uintptr(flags), 0) + return e1 == 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go new file mode 100644 index 000000000..1517ac61d --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go @@ -0,0 +1,40 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const ( + // bit mask values from /usr/include/bits/hwcap.h + hwcap_ZARCH = 2 + hwcap_STFLE = 4 + hwcap_MSA = 8 + hwcap_LDISP = 16 + hwcap_EIMM = 32 + hwcap_DFP = 64 + hwcap_ETF3EH = 256 + hwcap_VX = 2048 + hwcap_VXE = 8192 +) + +func initS390Xbase() { + // test HWCAP bit vector + has := func(featureMask uint) bool { + return hwCap&featureMask == featureMask + } + + // mandatory + S390X.HasZARCH = has(hwcap_ZARCH) + + // optional + S390X.HasSTFLE = has(hwcap_STFLE) + S390X.HasLDISP = has(hwcap_LDISP) + S390X.HasEIMM = has(hwcap_EIMM) + S390X.HasETF3EH = has(hwcap_ETF3EH) + S390X.HasDFP = has(hwcap_DFP) + S390X.HasMSA = has(hwcap_MSA) + S390X.HasVX = has(hwcap_VX) + if S390X.HasVX { + S390X.HasVXE = has(hwcap_VXE) + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_loong64.go b/vendor/golang.org/x/sys/cpu/cpu_loong64.go new file mode 100644 index 000000000..558635850 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_loong64.go @@ -0,0 +1,12 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build loong64 + +package cpu + +const cacheLineSize = 64 + +func initOptions() { +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go new file mode 100644 index 000000000..fedb00cc4 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips64 || mips64le + +package cpu + +const cacheLineSize = 32 + +func initOptions() { + options = []option{ + {Name: "msa", Feature: &MIPS64X.HasMSA}, + } +} diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go similarity index 54% rename from vendor/golang.org/x/oauth2/internal/client_appengine.go rename to vendor/golang.org/x/sys/cpu/cpu_mipsx.go index 743487188..ffb4ec7eb 100644 --- a/vendor/golang.org/x/oauth2/internal/client_appengine.go +++ b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go @@ -2,12 +2,10 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build appengine +//go:build mips || mipsle -package internal +package cpu -import "google.golang.org/appengine/urlfetch" +const cacheLineSize = 32 -func init() { - appengineClientHook = urlfetch.Client -} +func initOptions() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go new file mode 100644 index 000000000..ebfb3fc8e --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go @@ -0,0 +1,173 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// Minimal copy of functionality from x/sys/unix so the cpu package can call +// sysctl without depending on x/sys/unix. + +const ( + _CTL_QUERY = -2 + + _SYSCTL_VERS_1 = 0x1000000 +) + +var _zero uintptr + +func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6( + syscall.SYS___SYSCTL, + uintptr(_p0), + uintptr(len(mib)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen)) + if errno != 0 { + return errno + } + return nil +} + +type sysctlNode struct { + Flags uint32 + Num int32 + Name [32]int8 + Ver uint32 + __rsvd uint32 + Un [16]byte + _sysctl_size [8]byte + _sysctl_func [8]byte + _sysctl_parent [8]byte + _sysctl_desc [8]byte +} + +func sysctlNodes(mib []int32) ([]sysctlNode, error) { + var olen uintptr + + // Get a list of all sysctl nodes below the given MIB by performing + // a sysctl for the given MIB with CTL_QUERY appended. + mib = append(mib, _CTL_QUERY) + qnode := sysctlNode{Flags: _SYSCTL_VERS_1} + qp := (*byte)(unsafe.Pointer(&qnode)) + sz := unsafe.Sizeof(qnode) + if err := sysctl(mib, nil, &olen, qp, sz); err != nil { + return nil, err + } + + // Now that we know the size, get the actual nodes. + nodes := make([]sysctlNode, olen/sz) + np := (*byte)(unsafe.Pointer(&nodes[0])) + if err := sysctl(mib, np, &olen, qp, sz); err != nil { + return nil, err + } + + return nodes, nil +} + +func nametomib(name string) ([]int32, error) { + // Split name into components. + var parts []string + last := 0 + for i := 0; i < len(name); i++ { + if name[i] == '.' { + parts = append(parts, name[last:i]) + last = i + 1 + } + } + parts = append(parts, name[last:]) + + mib := []int32{} + // Discover the nodes and construct the MIB OID. + for partno, part := range parts { + nodes, err := sysctlNodes(mib) + if err != nil { + return nil, err + } + for _, node := range nodes { + n := make([]byte, 0) + for i := range node.Name { + if node.Name[i] != 0 { + n = append(n, byte(node.Name[i])) + } + } + if string(n) == part { + mib = append(mib, int32(node.Num)) + break + } + } + if len(mib) != partno+1 { + return nil, err + } + } + + return mib, nil +} + +// aarch64SysctlCPUID is struct aarch64_sysctl_cpu_id from NetBSD's +type aarch64SysctlCPUID struct { + midr uint64 /* Main ID Register */ + revidr uint64 /* Revision ID Register */ + mpidr uint64 /* Multiprocessor Affinity Register */ + aa64dfr0 uint64 /* A64 Debug Feature Register 0 */ + aa64dfr1 uint64 /* A64 Debug Feature Register 1 */ + aa64isar0 uint64 /* A64 Instruction Set Attribute Register 0 */ + aa64isar1 uint64 /* A64 Instruction Set Attribute Register 1 */ + aa64mmfr0 uint64 /* A64 Memory Model Feature Register 0 */ + aa64mmfr1 uint64 /* A64 Memory Model Feature Register 1 */ + aa64mmfr2 uint64 /* A64 Memory Model Feature Register 2 */ + aa64pfr0 uint64 /* A64 Processor Feature Register 0 */ + aa64pfr1 uint64 /* A64 Processor Feature Register 1 */ + aa64zfr0 uint64 /* A64 SVE Feature ID Register 0 */ + mvfr0 uint32 /* Media and VFP Feature Register 0 */ + mvfr1 uint32 /* Media and VFP Feature Register 1 */ + mvfr2 uint32 /* Media and VFP Feature Register 2 */ + pad uint32 + clidr uint64 /* Cache Level ID Register */ + ctr uint64 /* Cache Type Register */ +} + +func sysctlCPUID(name string) (*aarch64SysctlCPUID, error) { + mib, err := nametomib(name) + if err != nil { + return nil, err + } + + out := aarch64SysctlCPUID{} + n := unsafe.Sizeof(out) + _, _, errno := syscall.Syscall6( + syscall.SYS___SYSCTL, + uintptr(unsafe.Pointer(&mib[0])), + uintptr(len(mib)), + uintptr(unsafe.Pointer(&out)), + uintptr(unsafe.Pointer(&n)), + uintptr(0), + uintptr(0)) + if errno != 0 { + return nil, errno + } + return &out, nil +} + +func doinit() { + cpuid, err := sysctlCPUID("machdep.cpu0.cpu_id") + if err != nil { + setMinimalFeatures() + return + } + parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64pfr0) + + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go new file mode 100644 index 000000000..85b64d5cc --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go @@ -0,0 +1,65 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// Minimal copy of functionality from x/sys/unix so the cpu package can call +// sysctl without depending on x/sys/unix. + +const ( + // From OpenBSD's sys/sysctl.h. + _CTL_MACHDEP = 7 + + // From OpenBSD's machine/cpu.h. + _CPU_ID_AA64ISAR0 = 2 + _CPU_ID_AA64ISAR1 = 3 +) + +// Implemented in the runtime package (runtime/sys_openbsd3.go) +func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) + +//go:linkname syscall_syscall6 syscall.syscall6 + +func sysctl(mib []uint32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + _, _, errno := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if errno != 0 { + return errno + } + return nil +} + +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + +func sysctlUint64(mib []uint32) (uint64, bool) { + var out uint64 + nout := unsafe.Sizeof(out) + if err := sysctl(mib, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); err != nil { + return 0, false + } + return out, true +} + +func doinit() { + setMinimalFeatures() + + // Get ID_AA64ISAR0 and ID_AA64ISAR1 from sysctl. + isar0, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR0}) + if !ok { + return + } + isar1, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR1}) + if !ok { + return + } + parseARM64SystemRegisters(isar0, isar1, 0) + + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s new file mode 100644 index 000000000..054ba05d6 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s @@ -0,0 +1,11 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) + +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go new file mode 100644 index 000000000..e9ecf2a45 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go @@ -0,0 +1,9 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && arm + +package cpu + +func archInit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go new file mode 100644 index 000000000..5341e7f88 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && !netbsd && !openbsd && arm64 + +package cpu + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go new file mode 100644 index 000000000..5f8f2419a --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go @@ -0,0 +1,11 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && (mips64 || mips64le) + +package cpu + +func archInit() { + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go new file mode 100644 index 000000000..89608fba2 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go @@ -0,0 +1,12 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !aix && !linux && (ppc64 || ppc64le) + +package cpu + +func archInit() { + PPC64.IsPOWER8 = true + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go new file mode 100644 index 000000000..5ab87808f --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go @@ -0,0 +1,11 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && riscv64 + +package cpu + +func archInit() { + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go new file mode 100644 index 000000000..c14f12b14 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go @@ -0,0 +1,16 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ppc64 || ppc64le + +package cpu + +const cacheLineSize = 128 + +func initOptions() { + options = []option{ + {Name: "darn", Feature: &PPC64.HasDARN}, + {Name: "scv", Feature: &PPC64.HasSCV}, + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go new file mode 100644 index 000000000..aca3199c9 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go @@ -0,0 +1,20 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build riscv64 + +package cpu + +const cacheLineSize = 64 + +func initOptions() { + options = []option{ + {Name: "fastmisaligned", Feature: &RISCV64.HasFastMisaligned}, + {Name: "c", Feature: &RISCV64.HasC}, + {Name: "v", Feature: &RISCV64.HasV}, + {Name: "zba", Feature: &RISCV64.HasZba}, + {Name: "zbb", Feature: &RISCV64.HasZbb}, + {Name: "zbs", Feature: &RISCV64.HasZbs}, + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_s390x.go new file mode 100644 index 000000000..5881b8833 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.go @@ -0,0 +1,172 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 256 + +func initOptions() { + options = []option{ + {Name: "zarch", Feature: &S390X.HasZARCH, Required: true}, + {Name: "stfle", Feature: &S390X.HasSTFLE, Required: true}, + {Name: "ldisp", Feature: &S390X.HasLDISP, Required: true}, + {Name: "eimm", Feature: &S390X.HasEIMM, Required: true}, + {Name: "dfp", Feature: &S390X.HasDFP}, + {Name: "etf3eh", Feature: &S390X.HasETF3EH}, + {Name: "msa", Feature: &S390X.HasMSA}, + {Name: "aes", Feature: &S390X.HasAES}, + {Name: "aescbc", Feature: &S390X.HasAESCBC}, + {Name: "aesctr", Feature: &S390X.HasAESCTR}, + {Name: "aesgcm", Feature: &S390X.HasAESGCM}, + {Name: "ghash", Feature: &S390X.HasGHASH}, + {Name: "sha1", Feature: &S390X.HasSHA1}, + {Name: "sha256", Feature: &S390X.HasSHA256}, + {Name: "sha3", Feature: &S390X.HasSHA3}, + {Name: "sha512", Feature: &S390X.HasSHA512}, + {Name: "vx", Feature: &S390X.HasVX}, + {Name: "vxe", Feature: &S390X.HasVXE}, + } +} + +// bitIsSet reports whether the bit at index is set. The bit index +// is in big endian order, so bit index 0 is the leftmost bit. +func bitIsSet(bits []uint64, index uint) bool { + return bits[index/64]&((1<<63)>>(index%64)) != 0 +} + +// facility is a bit index for the named facility. +type facility uint8 + +const ( + // mandatory facilities + zarch facility = 1 // z architecture mode is active + stflef facility = 7 // store-facility-list-extended + ldisp facility = 18 // long-displacement + eimm facility = 21 // extended-immediate + + // miscellaneous facilities + dfp facility = 42 // decimal-floating-point + etf3eh facility = 30 // extended-translation 3 enhancement + + // cryptography facilities + msa facility = 17 // message-security-assist + msa3 facility = 76 // message-security-assist extension 3 + msa4 facility = 77 // message-security-assist extension 4 + msa5 facility = 57 // message-security-assist extension 5 + msa8 facility = 146 // message-security-assist extension 8 + msa9 facility = 155 // message-security-assist extension 9 + + // vector facilities + vx facility = 129 // vector facility + vxe facility = 135 // vector-enhancements 1 + vxe2 facility = 148 // vector-enhancements 2 +) + +// facilityList contains the result of an STFLE call. +// Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type facilityList struct { + bits [4]uint64 +} + +// Has reports whether the given facilities are present. +func (s *facilityList) Has(fs ...facility) bool { + if len(fs) == 0 { + panic("no facility bits provided") + } + for _, f := range fs { + if !bitIsSet(s.bits[:], uint(f)) { + return false + } + } + return true +} + +// function is the code for the named cryptographic function. +type function uint8 + +const ( + // KM{,A,C,CTR} function codes + aes128 function = 18 // AES-128 + aes192 function = 19 // AES-192 + aes256 function = 20 // AES-256 + + // K{I,L}MD function codes + sha1 function = 1 // SHA-1 + sha256 function = 2 // SHA-256 + sha512 function = 3 // SHA-512 + sha3_224 function = 32 // SHA3-224 + sha3_256 function = 33 // SHA3-256 + sha3_384 function = 34 // SHA3-384 + sha3_512 function = 35 // SHA3-512 + shake128 function = 36 // SHAKE-128 + shake256 function = 37 // SHAKE-256 + + // KLMD function codes + ghash function = 65 // GHASH +) + +// queryResult contains the result of a Query function +// call. Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type queryResult struct { + bits [2]uint64 +} + +// Has reports whether the given functions are present. +func (q *queryResult) Has(fns ...function) bool { + if len(fns) == 0 { + panic("no function codes provided") + } + for _, f := range fns { + if !bitIsSet(q.bits[:], uint(f)) { + return false + } + } + return true +} + +func doinit() { + initS390Xbase() + + // We need implementations of stfle, km and so on + // to detect cryptographic features. + if !haveAsmFunctions() { + return + } + + // optional cryptographic functions + if S390X.HasMSA { + aes := []function{aes128, aes192, aes256} + + // cipher message + km, kmc := kmQuery(), kmcQuery() + S390X.HasAES = km.Has(aes...) + S390X.HasAESCBC = kmc.Has(aes...) + if S390X.HasSTFLE { + facilities := stfle() + if facilities.Has(msa4) { + kmctr := kmctrQuery() + S390X.HasAESCTR = kmctr.Has(aes...) + } + if facilities.Has(msa8) { + kma := kmaQuery() + S390X.HasAESGCM = kma.Has(aes...) + } + } + + // compute message digest + kimd := kimdQuery() // intermediate (no padding) + klmd := klmdQuery() // last (padding) + S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1) + S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256) + S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512) + S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist + sha3 := []function{ + sha3_224, sha3_256, sha3_384, sha3_512, + shake128, shake256, + } + S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...) + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.s b/vendor/golang.org/x/sys/cpu/cpu_s390x.s new file mode 100644 index 000000000..1fb4b7013 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.s @@ -0,0 +1,57 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +#include "textflag.h" + +// func stfle() facilityList +TEXT ·stfle(SB), NOSPLIT|NOFRAME, $0-32 + MOVD $ret+0(FP), R1 + MOVD $3, R0 // last doubleword index to store + XC $32, (R1), (R1) // clear 4 doublewords (32 bytes) + WORD $0xb2b01000 // store facility list extended (STFLE) + RET + +// func kmQuery() queryResult +TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KM-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92E0024 // cipher message (KM) + RET + +// func kmcQuery() queryResult +TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMC-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92F0024 // cipher message with chaining (KMC) + RET + +// func kmctrQuery() queryResult +TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMCTR-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92D4024 // cipher message with counter (KMCTR) + RET + +// func kmaQuery() queryResult +TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMA-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xb9296024 // cipher message with authentication (KMA) + RET + +// func kimdQuery() queryResult +TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KIMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93E0024 // compute intermediate message digest (KIMD) + RET + +// func klmdQuery() queryResult +TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KLMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93F0024 // compute last message digest (KLMD) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/vendor/golang.org/x/sys/cpu/cpu_wasm.go new file mode 100644 index 000000000..384787ea3 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_wasm.go @@ -0,0 +1,17 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build wasm + +package cpu + +// We're compiling the cpu package for an unknown (software-abstracted) CPU. +// Make CacheLinePad an empty struct and hope that the usual struct alignment +// rules are good enough. + +const cacheLineSize = 0 + +func initOptions() {} + +func archInit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go new file mode 100644 index 000000000..c29f5e4c5 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -0,0 +1,151 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 || amd64 || amd64p32 + +package cpu + +import "runtime" + +const cacheLineSize = 64 + +func initOptions() { + options = []option{ + {Name: "adx", Feature: &X86.HasADX}, + {Name: "aes", Feature: &X86.HasAES}, + {Name: "avx", Feature: &X86.HasAVX}, + {Name: "avx2", Feature: &X86.HasAVX2}, + {Name: "avx512", Feature: &X86.HasAVX512}, + {Name: "avx512f", Feature: &X86.HasAVX512F}, + {Name: "avx512cd", Feature: &X86.HasAVX512CD}, + {Name: "avx512er", Feature: &X86.HasAVX512ER}, + {Name: "avx512pf", Feature: &X86.HasAVX512PF}, + {Name: "avx512vl", Feature: &X86.HasAVX512VL}, + {Name: "avx512bw", Feature: &X86.HasAVX512BW}, + {Name: "avx512dq", Feature: &X86.HasAVX512DQ}, + {Name: "avx512ifma", Feature: &X86.HasAVX512IFMA}, + {Name: "avx512vbmi", Feature: &X86.HasAVX512VBMI}, + {Name: "avx512vnniw", Feature: &X86.HasAVX5124VNNIW}, + {Name: "avx5124fmaps", Feature: &X86.HasAVX5124FMAPS}, + {Name: "avx512vpopcntdq", Feature: &X86.HasAVX512VPOPCNTDQ}, + {Name: "avx512vpclmulqdq", Feature: &X86.HasAVX512VPCLMULQDQ}, + {Name: "avx512vnni", Feature: &X86.HasAVX512VNNI}, + {Name: "avx512gfni", Feature: &X86.HasAVX512GFNI}, + {Name: "avx512vaes", Feature: &X86.HasAVX512VAES}, + {Name: "avx512vbmi2", Feature: &X86.HasAVX512VBMI2}, + {Name: "avx512bitalg", Feature: &X86.HasAVX512BITALG}, + {Name: "avx512bf16", Feature: &X86.HasAVX512BF16}, + {Name: "amxtile", Feature: &X86.HasAMXTile}, + {Name: "amxint8", Feature: &X86.HasAMXInt8}, + {Name: "amxbf16", Feature: &X86.HasAMXBF16}, + {Name: "bmi1", Feature: &X86.HasBMI1}, + {Name: "bmi2", Feature: &X86.HasBMI2}, + {Name: "cx16", Feature: &X86.HasCX16}, + {Name: "erms", Feature: &X86.HasERMS}, + {Name: "fma", Feature: &X86.HasFMA}, + {Name: "osxsave", Feature: &X86.HasOSXSAVE}, + {Name: "pclmulqdq", Feature: &X86.HasPCLMULQDQ}, + {Name: "popcnt", Feature: &X86.HasPOPCNT}, + {Name: "rdrand", Feature: &X86.HasRDRAND}, + {Name: "rdseed", Feature: &X86.HasRDSEED}, + {Name: "sse3", Feature: &X86.HasSSE3}, + {Name: "sse41", Feature: &X86.HasSSE41}, + {Name: "sse42", Feature: &X86.HasSSE42}, + {Name: "ssse3", Feature: &X86.HasSSSE3}, + + // These capabilities should always be enabled on amd64: + {Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"}, + } +} + +func archInit() { + + Initialized = true + + maxID, _, _, _ := cpuid(0, 0) + + if maxID < 1 { + return + } + + _, _, ecx1, edx1 := cpuid(1, 0) + X86.HasSSE2 = isSet(26, edx1) + + X86.HasSSE3 = isSet(0, ecx1) + X86.HasPCLMULQDQ = isSet(1, ecx1) + X86.HasSSSE3 = isSet(9, ecx1) + X86.HasFMA = isSet(12, ecx1) + X86.HasCX16 = isSet(13, ecx1) + X86.HasSSE41 = isSet(19, ecx1) + X86.HasSSE42 = isSet(20, ecx1) + X86.HasPOPCNT = isSet(23, ecx1) + X86.HasAES = isSet(25, ecx1) + X86.HasOSXSAVE = isSet(27, ecx1) + X86.HasRDRAND = isSet(30, ecx1) + + var osSupportsAVX, osSupportsAVX512 bool + // For XGETBV, OSXSAVE bit is required and sufficient. + if X86.HasOSXSAVE { + eax, _ := xgetbv() + // Check if XMM and YMM registers have OS support. + osSupportsAVX = isSet(1, eax) && isSet(2, eax) + + if runtime.GOOS == "darwin" { + // Darwin doesn't save/restore AVX-512 mask registers correctly across signal handlers. + // Since users can't rely on mask register contents, let's not advertise AVX-512 support. + // See issue 49233. + osSupportsAVX512 = false + } else { + // Check if OPMASK and ZMM registers have OS support. + osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax) + } + } + + X86.HasAVX = isSet(28, ecx1) && osSupportsAVX + + if maxID < 7 { + return + } + + _, ebx7, ecx7, edx7 := cpuid(7, 0) + X86.HasBMI1 = isSet(3, ebx7) + X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX + X86.HasBMI2 = isSet(8, ebx7) + X86.HasERMS = isSet(9, ebx7) + X86.HasRDSEED = isSet(18, ebx7) + X86.HasADX = isSet(19, ebx7) + + X86.HasAVX512 = isSet(16, ebx7) && osSupportsAVX512 // Because avx-512 foundation is the core required extension + if X86.HasAVX512 { + X86.HasAVX512F = true + X86.HasAVX512CD = isSet(28, ebx7) + X86.HasAVX512ER = isSet(27, ebx7) + X86.HasAVX512PF = isSet(26, ebx7) + X86.HasAVX512VL = isSet(31, ebx7) + X86.HasAVX512BW = isSet(30, ebx7) + X86.HasAVX512DQ = isSet(17, ebx7) + X86.HasAVX512IFMA = isSet(21, ebx7) + X86.HasAVX512VBMI = isSet(1, ecx7) + X86.HasAVX5124VNNIW = isSet(2, edx7) + X86.HasAVX5124FMAPS = isSet(3, edx7) + X86.HasAVX512VPOPCNTDQ = isSet(14, ecx7) + X86.HasAVX512VPCLMULQDQ = isSet(10, ecx7) + X86.HasAVX512VNNI = isSet(11, ecx7) + X86.HasAVX512GFNI = isSet(8, ecx7) + X86.HasAVX512VAES = isSet(9, ecx7) + X86.HasAVX512VBMI2 = isSet(6, ecx7) + X86.HasAVX512BITALG = isSet(12, ecx7) + + eax71, _, _, _ := cpuid(7, 1) + X86.HasAVX512BF16 = isSet(5, eax71) + } + + X86.HasAMXTile = isSet(24, edx7) + X86.HasAMXInt8 = isSet(25, edx7) + X86.HasAMXBF16 = isSet(22, edx7) +} + +func isSet(bitpos uint, value uint32) bool { + return value&(1<> 63)) +) + +// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2 +// These are initialized in cpu_$GOARCH.go +// and should not be changed after they are initialized. +var hwCap uint +var hwCap2 uint + +func readHWCAP() error { + // For Go 1.21+, get auxv from the Go runtime. + if a := getAuxv(); len(a) > 0 { + for len(a) >= 2 { + tag, val := a[0], uint(a[1]) + a = a[2:] + switch tag { + case _AT_HWCAP: + hwCap = val + case _AT_HWCAP2: + hwCap2 = val + } + } + return nil + } + + buf, err := os.ReadFile(procAuxv) + if err != nil { + // e.g. on android /proc/self/auxv is not accessible, so silently + // ignore the error and leave Initialized = false. On some + // architectures (e.g. arm64) doinit() implements a fallback + // readout and will set Initialized = true again. + return err + } + bo := hostByteOrder() + for len(buf) >= 2*(uintSize/8) { + var tag, val uint + switch uintSize { + case 32: + tag = uint(bo.Uint32(buf[0:])) + val = uint(bo.Uint32(buf[4:])) + buf = buf[8:] + case 64: + tag = uint(bo.Uint64(buf[0:])) + val = uint(bo.Uint64(buf[8:])) + buf = buf[16:] + } + switch tag { + case _AT_HWCAP: + hwCap = val + case _AT_HWCAP2: + hwCap2 = val + } + } + return nil +} diff --git a/vendor/golang.org/x/sys/cpu/parse.go b/vendor/golang.org/x/sys/cpu/parse.go new file mode 100644 index 000000000..762b63d68 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/parse.go @@ -0,0 +1,43 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import "strconv" + +// parseRelease parses a dot-separated version number. It follows the semver +// syntax, but allows the minor and patch versions to be elided. +// +// This is a copy of the Go runtime's parseRelease from +// https://golang.org/cl/209597. +func parseRelease(rel string) (major, minor, patch int, ok bool) { + // Strip anything after a dash or plus. + for i := 0; i < len(rel); i++ { + if rel[i] == '-' || rel[i] == '+' { + rel = rel[:i] + break + } + } + + next := func() (int, bool) { + for i := 0; i < len(rel); i++ { + if rel[i] == '.' { + ver, err := strconv.Atoi(rel[:i]) + rel = rel[i+1:] + return ver, err == nil + } + } + ver, err := strconv.Atoi(rel) + rel = "" + return ver, err == nil + } + if major, ok = next(); !ok || rel == "" { + return + } + if minor, ok = next(); !ok || rel == "" { + return + } + patch, ok = next() + return +} diff --git a/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go b/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go new file mode 100644 index 000000000..4cd64c704 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go @@ -0,0 +1,53 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && arm64 + +package cpu + +import ( + "errors" + "io" + "os" + "strings" +) + +func readLinuxProcCPUInfo() error { + f, err := os.Open("/proc/cpuinfo") + if err != nil { + return err + } + defer f.Close() + + var buf [1 << 10]byte // enough for first CPU + n, err := io.ReadFull(f, buf[:]) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + in := string(buf[:n]) + const features = "\nFeatures : " + i := strings.Index(in, features) + if i == -1 { + return errors.New("no CPU features found") + } + in = in[i+len(features):] + if i := strings.Index(in, "\n"); i != -1 { + in = in[:i] + } + m := map[string]*bool{} + + initOptions() // need it early here; it's harmless to call twice + for _, o := range options { + m[o.Name] = o.Feature + } + // The EVTSTRM field has alias "evstrm" in Go, but Linux calls it "evtstrm". + m["evtstrm"] = &ARM64.HasEVTSTRM + + for _, f := range strings.Fields(in) { + if p, ok := m[f]; ok { + *p = true + } + } + return nil +} diff --git a/vendor/golang.org/x/sys/cpu/runtime_auxv.go b/vendor/golang.org/x/sys/cpu/runtime_auxv.go new file mode 100644 index 000000000..5f92ac9a2 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/runtime_auxv.go @@ -0,0 +1,16 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +// getAuxvFn is non-nil on Go 1.21+ (via runtime_auxv_go121.go init) +// on platforms that use auxv. +var getAuxvFn func() []uintptr + +func getAuxv() []uintptr { + if getAuxvFn == nil { + return nil + } + return getAuxvFn() +} diff --git a/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go b/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go new file mode 100644 index 000000000..4c9788ea8 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go @@ -0,0 +1,18 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 + +package cpu + +import ( + _ "unsafe" // for linkname +) + +//go:linkname runtime_getAuxv runtime.getAuxv +func runtime_getAuxv() []uintptr + +func init() { + getAuxvFn = runtime_getAuxv +} diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go new file mode 100644 index 000000000..1b9ccb091 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go @@ -0,0 +1,26 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Recreate a getsystemcfg syscall handler instead of +// using the one provided by x/sys/unix to avoid having +// the dependency between them. (See golang.org/issue/32102) +// Moreover, this file will be used during the building of +// gccgo's libgo and thus must not used a CGo method. + +//go:build aix && gccgo + +package cpu + +import ( + "syscall" +) + +//extern getsystemcfg +func gccgoGetsystemcfg(label uint32) (r uint64) + +func callgetsystemcfg(label int) (r1 uintptr, e1 syscall.Errno) { + r1 = uintptr(gccgoGetsystemcfg(uint32(label))) + e1 = syscall.GetErrno() + return +} diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go new file mode 100644 index 000000000..e8b6cdbe9 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go @@ -0,0 +1,35 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Minimal copy of x/sys/unix so the cpu package can make a +// system call on AIX without depending on x/sys/unix. +// (See golang.org/issue/32102) + +//go:build aix && ppc64 && gc + +package cpu + +import ( + "syscall" + "unsafe" +) + +//go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o" + +//go:linkname libc_getsystemcfg libc_getsystemcfg + +type syscallFunc uintptr + +var libc_getsystemcfg syscallFunc + +type errno = syscall.Errno + +// Implemented in runtime/syscall_aix.go. +func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) +func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) + +func callgetsystemcfg(label int) (r1 uintptr, e1 errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_getsystemcfg)), 1, uintptr(label), 0, 0, 0, 0, 0) + return +} diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index d07dd09eb..e14b766a3 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -552,6 +552,7 @@ ccflags="$@" $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ && $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || $2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ || + $2 ~ /^(CONNECT|SAE)_/ || $2 ~ /^FIORDCHK$/ || $2 ~ /^SIOC/ || $2 ~ /^TIOC/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 2d15200ad..099867dee 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -566,6 +566,43 @@ func PthreadFchdir(fd int) (err error) { return pthread_fchdir_np(fd) } +// Connectx calls connectx(2) to initiate a connection on a socket. +// +// srcIf, srcAddr, and dstAddr are filled into a [SaEndpoints] struct and passed as the endpoints argument. +// +// - srcIf is the optional source interface index. 0 means unspecified. +// - srcAddr is the optional source address. nil means unspecified. +// - dstAddr is the destination address. +// +// On success, Connectx returns the number of bytes enqueued for transmission. +func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocID, flags uint32, iov []Iovec, connid *SaeConnID) (n uintptr, err error) { + endpoints := SaEndpoints{ + Srcif: srcIf, + } + + if srcAddr != nil { + addrp, addrlen, err := srcAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Srcaddr = (*RawSockaddr)(addrp) + endpoints.Srcaddrlen = uint32(addrlen) + } + + if dstAddr != nil { + addrp, addrlen, err := dstAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Dstaddr = (*RawSockaddr)(addrp) + endpoints.Dstaddrlen = uint32(addrlen) + } + + err = connectx(fd, &endpoints, associd, flags, iov, &n, connid) + return +} + +//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go index ba46651f8..a6a2d2fc2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -11,6 +11,7 @@ package unix int ioctl(int, unsigned long int, uintptr_t); */ import "C" +import "unsafe" func ioctl(fd int, req uint, arg uintptr) (err error) { r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(arg)) diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 4308ac177..d73c4652e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1265,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index c8068a7a1..4a55a4005 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1265,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go index da08b2ab3..1ec2b1407 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -581,6 +581,8 @@ const ( AT_EMPTY_PATH = 0x1000 AT_REMOVEDIR = 0x200 RENAME_NOREPLACE = 1 << 0 + ST_RDONLY = 1 + ST_NOSUID = 2 ) const ( diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index b622533ef..24b346e1a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -841,6 +841,26 @@ var libc_pthread_fchdir_np_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index cfe6646ba..ebd213100 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -248,6 +248,11 @@ TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 13f624f69..824b9c2d5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -841,6 +841,26 @@ var libc_pthread_fchdir_np_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index fe222b75d..4f178a229 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -248,6 +248,11 @@ TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 091d107f3..d003c3d43 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 28ff4ef74..0d45a941a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 6cbd094a3..51e13eb05 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -625,6 +625,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 7c03b6ee7..d002d8ef3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -630,6 +630,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 422107ee8..3f863d898 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -616,6 +616,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 505a12acf..61c729310 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -610,6 +610,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go index cc986c790..b5d17414f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -612,6 +612,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 7f1961b90..9f2550dc3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -2486,7 +2486,7 @@ type XDPMmapOffsets struct { type XDPUmemReg struct { Addr uint64 Len uint64 - Chunk_size uint32 + Size uint32 Headroom uint32 Flags uint32 Tx_metadata_len uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 15adc0414..ad05b51a6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -727,6 +727,37 @@ const ( RISCV_HWPROBE_EXT_ZBA = 0x8 RISCV_HWPROBE_EXT_ZBB = 0x10 RISCV_HWPROBE_EXT_ZBS = 0x20 + RISCV_HWPROBE_EXT_ZICBOZ = 0x40 + RISCV_HWPROBE_EXT_ZBC = 0x80 + RISCV_HWPROBE_EXT_ZBKB = 0x100 + RISCV_HWPROBE_EXT_ZBKC = 0x200 + RISCV_HWPROBE_EXT_ZBKX = 0x400 + RISCV_HWPROBE_EXT_ZKND = 0x800 + RISCV_HWPROBE_EXT_ZKNE = 0x1000 + RISCV_HWPROBE_EXT_ZKNH = 0x2000 + RISCV_HWPROBE_EXT_ZKSED = 0x4000 + RISCV_HWPROBE_EXT_ZKSH = 0x8000 + RISCV_HWPROBE_EXT_ZKT = 0x10000 + RISCV_HWPROBE_EXT_ZVBB = 0x20000 + RISCV_HWPROBE_EXT_ZVBC = 0x40000 + RISCV_HWPROBE_EXT_ZVKB = 0x80000 + RISCV_HWPROBE_EXT_ZVKG = 0x100000 + RISCV_HWPROBE_EXT_ZVKNED = 0x200000 + RISCV_HWPROBE_EXT_ZVKNHA = 0x400000 + RISCV_HWPROBE_EXT_ZVKNHB = 0x800000 + RISCV_HWPROBE_EXT_ZVKSED = 0x1000000 + RISCV_HWPROBE_EXT_ZVKSH = 0x2000000 + RISCV_HWPROBE_EXT_ZVKT = 0x4000000 + RISCV_HWPROBE_EXT_ZFH = 0x8000000 + RISCV_HWPROBE_EXT_ZFHMIN = 0x10000000 + RISCV_HWPROBE_EXT_ZIHINTNTL = 0x20000000 + RISCV_HWPROBE_EXT_ZVFH = 0x40000000 + RISCV_HWPROBE_EXT_ZVFHMIN = 0x80000000 + RISCV_HWPROBE_EXT_ZFA = 0x100000000 + RISCV_HWPROBE_EXT_ZTSO = 0x200000000 + RISCV_HWPROBE_EXT_ZACAS = 0x400000000 + RISCV_HWPROBE_EXT_ZICOND = 0x800000000 + RISCV_HWPROBE_EXT_ZIHINTPAUSE = 0x1000000000 RISCV_HWPROBE_KEY_CPUPERF_0 = 0x5 RISCV_HWPROBE_MISALIGNED_UNKNOWN = 0x0 RISCV_HWPROBE_MISALIGNED_EMULATED = 0x1 @@ -734,4 +765,6 @@ const ( RISCV_HWPROBE_MISALIGNED_FAST = 0x3 RISCV_HWPROBE_MISALIGNED_UNSUPPORTED = 0x4 RISCV_HWPROBE_MISALIGNED_MASK = 0x7 + RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE = 0x6 + RISCV_HWPROBE_WHICH_CPUS = 0x1 ) diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go new file mode 100644 index 000000000..fd8632444 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/key.go @@ -0,0 +1,205 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +// Package registry provides access to the Windows registry. +// +// Here is a simple example, opening a registry key and reading a string value from it. +// +// k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) +// if err != nil { +// log.Fatal(err) +// } +// defer k.Close() +// +// s, _, err := k.GetStringValue("SystemRoot") +// if err != nil { +// log.Fatal(err) +// } +// fmt.Printf("Windows system root is %q\n", s) +package registry + +import ( + "io" + "runtime" + "syscall" + "time" +) + +const ( + // Registry key security and access rights. + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms724878.aspx + // for details. + ALL_ACCESS = 0xf003f + CREATE_LINK = 0x00020 + CREATE_SUB_KEY = 0x00004 + ENUMERATE_SUB_KEYS = 0x00008 + EXECUTE = 0x20019 + NOTIFY = 0x00010 + QUERY_VALUE = 0x00001 + READ = 0x20019 + SET_VALUE = 0x00002 + WOW64_32KEY = 0x00200 + WOW64_64KEY = 0x00100 + WRITE = 0x20006 +) + +// Key is a handle to an open Windows registry key. +// Keys can be obtained by calling OpenKey; there are +// also some predefined root keys such as CURRENT_USER. +// Keys can be used directly in the Windows API. +type Key syscall.Handle + +const ( + // Windows defines some predefined root keys that are always open. + // An application can use these keys as entry points to the registry. + // Normally these keys are used in OpenKey to open new keys, + // but they can also be used anywhere a Key is required. + CLASSES_ROOT = Key(syscall.HKEY_CLASSES_ROOT) + CURRENT_USER = Key(syscall.HKEY_CURRENT_USER) + LOCAL_MACHINE = Key(syscall.HKEY_LOCAL_MACHINE) + USERS = Key(syscall.HKEY_USERS) + CURRENT_CONFIG = Key(syscall.HKEY_CURRENT_CONFIG) + PERFORMANCE_DATA = Key(syscall.HKEY_PERFORMANCE_DATA) +) + +// Close closes open key k. +func (k Key) Close() error { + return syscall.RegCloseKey(syscall.Handle(k)) +} + +// OpenKey opens a new key with path name relative to key k. +// It accepts any open key, including CURRENT_USER and others, +// and returns the new key and an error. +// The access parameter specifies desired access rights to the +// key to be opened. +func OpenKey(k Key, path string, access uint32) (Key, error) { + p, err := syscall.UTF16PtrFromString(path) + if err != nil { + return 0, err + } + var subkey syscall.Handle + err = syscall.RegOpenKeyEx(syscall.Handle(k), p, 0, access, &subkey) + if err != nil { + return 0, err + } + return Key(subkey), nil +} + +// OpenRemoteKey opens a predefined registry key on another +// computer pcname. The key to be opened is specified by k, but +// can only be one of LOCAL_MACHINE, PERFORMANCE_DATA or USERS. +// If pcname is "", OpenRemoteKey returns local computer key. +func OpenRemoteKey(pcname string, k Key) (Key, error) { + var err error + var p *uint16 + if pcname != "" { + p, err = syscall.UTF16PtrFromString(`\\` + pcname) + if err != nil { + return 0, err + } + } + var remoteKey syscall.Handle + err = regConnectRegistry(p, syscall.Handle(k), &remoteKey) + if err != nil { + return 0, err + } + return Key(remoteKey), nil +} + +// ReadSubKeyNames returns the names of subkeys of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadSubKeyNames(n int) ([]string, error) { + // RegEnumKeyEx must be called repeatedly and to completion. + // During this time, this goroutine cannot migrate away from + // its current thread. See https://golang.org/issue/49320 and + // https://golang.org/issue/49466. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + names := make([]string, 0) + // Registry key size limit is 255 bytes and described there: + // https://msdn.microsoft.com/library/windows/desktop/ms724872.aspx + buf := make([]uint16, 256) //plus extra room for terminating zero byte +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := syscall.RegEnumKeyEx(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} + +// CreateKey creates a key named path under open key k. +// CreateKey returns the new key and a boolean flag that reports +// whether the key already existed. +// The access parameter specifies the access rights for the key +// to be created. +func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) { + var h syscall.Handle + var d uint32 + err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path), + 0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d) + if err != nil { + return 0, false, err + } + return Key(h), d == _REG_OPENED_EXISTING_KEY, nil +} + +// DeleteKey deletes the subkey path of key k and its values. +func DeleteKey(k Key, path string) error { + return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path)) +} + +// A KeyInfo describes the statistics of a key. It is returned by Stat. +type KeyInfo struct { + SubKeyCount uint32 + MaxSubKeyLen uint32 // size of the key's subkey with the longest name, in Unicode characters, not including the terminating zero byte + ValueCount uint32 + MaxValueNameLen uint32 // size of the key's longest value name, in Unicode characters, not including the terminating zero byte + MaxValueLen uint32 // longest data component among the key's values, in bytes + lastWriteTime syscall.Filetime +} + +// ModTime returns the key's last write time. +func (ki *KeyInfo) ModTime() time.Time { + return time.Unix(0, ki.lastWriteTime.Nanoseconds()) +} + +// Stat retrieves information about the open key k. +func (k Key) Stat() (*KeyInfo, error) { + var ki KeyInfo + err := syscall.RegQueryInfoKey(syscall.Handle(k), nil, nil, nil, + &ki.SubKeyCount, &ki.MaxSubKeyLen, nil, &ki.ValueCount, + &ki.MaxValueNameLen, &ki.MaxValueLen, nil, &ki.lastWriteTime) + if err != nil { + return nil, err + } + return &ki, nil +} diff --git a/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/vendor/golang.org/x/sys/windows/registry/mksyscall.go new file mode 100644 index 000000000..bbf86ccf0 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/mksyscall.go @@ -0,0 +1,9 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build generate + +package registry + +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go syscall.go diff --git a/vendor/golang.org/x/sys/windows/registry/syscall.go b/vendor/golang.org/x/sys/windows/registry/syscall.go new file mode 100644 index 000000000..f533091c1 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/syscall.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package registry + +import "syscall" + +const ( + _REG_OPTION_NON_VOLATILE = 0 + + _REG_CREATED_NEW_KEY = 1 + _REG_OPENED_EXISTING_KEY = 2 + + _ERROR_NO_MORE_ITEMS syscall.Errno = 259 +) + +func LoadRegLoadMUIString() error { + return procRegLoadMUIStringW.Find() +} + +//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW +//sys regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) = advapi32.RegDeleteKeyW +//sys regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) = advapi32.RegSetValueExW +//sys regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegEnumValueW +//sys regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) = advapi32.RegDeleteValueW +//sys regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) = advapi32.RegLoadMUIStringW +//sys regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) = advapi32.RegConnectRegistryW + +//sys expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go new file mode 100644 index 000000000..74db26b94 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/value.go @@ -0,0 +1,386 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package registry + +import ( + "errors" + "io" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + // Registry value types. + NONE = 0 + SZ = 1 + EXPAND_SZ = 2 + BINARY = 3 + DWORD = 4 + DWORD_BIG_ENDIAN = 5 + LINK = 6 + MULTI_SZ = 7 + RESOURCE_LIST = 8 + FULL_RESOURCE_DESCRIPTOR = 9 + RESOURCE_REQUIREMENTS_LIST = 10 + QWORD = 11 +) + +var ( + // ErrShortBuffer is returned when the buffer was too short for the operation. + ErrShortBuffer = syscall.ERROR_MORE_DATA + + // ErrNotExist is returned when a registry key or value does not exist. + ErrNotExist = syscall.ERROR_FILE_NOT_FOUND + + // ErrUnexpectedType is returned by Get*Value when the value's type was unexpected. + ErrUnexpectedType = errors.New("unexpected key value type") +) + +// GetValue retrieves the type and data for the specified value associated +// with an open key k. It fills up buffer buf and returns the retrieved +// byte count n. If buf is too small to fit the stored value it returns +// ErrShortBuffer error along with the required buffer size n. +// If no buffer is provided, it returns true and actual buffer size n. +// If no buffer is provided, GetValue returns the value's type only. +// If the value does not exist, the error returned is ErrNotExist. +// +// GetValue is a low level function. If value's type is known, use the appropriate +// Get*Value function instead. +func (k Key) GetValue(name string, buf []byte) (n int, valtype uint32, err error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return 0, 0, err + } + var pbuf *byte + if len(buf) > 0 { + pbuf = (*byte)(unsafe.Pointer(&buf[0])) + } + l := uint32(len(buf)) + err = syscall.RegQueryValueEx(syscall.Handle(k), pname, nil, &valtype, pbuf, &l) + if err != nil { + return int(l), valtype, err + } + return int(l), valtype, nil +} + +func (k Key) getValue(name string, buf []byte) (data []byte, valtype uint32, err error) { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return nil, 0, err + } + var t uint32 + n := uint32(len(buf)) + for { + err = syscall.RegQueryValueEx(syscall.Handle(k), p, nil, &t, (*byte)(unsafe.Pointer(&buf[0])), &n) + if err == nil { + return buf[:n], t, nil + } + if err != syscall.ERROR_MORE_DATA { + return nil, 0, err + } + if n <= uint32(len(buf)) { + return nil, 0, err + } + buf = make([]byte, n) + } +} + +// GetStringValue retrieves the string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringValue returns ErrNotExist. +// If value is not SZ or EXPAND_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringValue(name string) (val string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return "", typ, err2 + } + switch typ { + case SZ, EXPAND_SZ: + default: + return "", typ, ErrUnexpectedType + } + if len(data) == 0 { + return "", typ, nil + } + u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + return syscall.UTF16ToString(u), typ, nil +} + +// GetMUIStringValue retrieves the localized string value for +// the specified value name associated with an open key k. +// If the value name doesn't exist or the localized string value +// can't be resolved, GetMUIStringValue returns ErrNotExist. +// GetMUIStringValue panics if the system doesn't support +// regLoadMUIString; use LoadRegLoadMUIString to check if +// regLoadMUIString is supported before calling this function. +func (k Key) GetMUIStringValue(name string) (string, error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return "", err + } + + buf := make([]uint16, 1024) + var buflen uint32 + var pdir *uint16 + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + if err == syscall.ERROR_FILE_NOT_FOUND { // Try fallback path + + // Try to resolve the string value using the system directory as + // a DLL search path; this assumes the string value is of the form + // @[path]\dllname,-strID but with no path given, e.g. @tzres.dll,-320. + + // This approach works with tzres.dll but may have to be revised + // in the future to allow callers to provide custom search paths. + + var s string + s, err = ExpandString("%SystemRoot%\\system32\\") + if err != nil { + return "", err + } + pdir, err = syscall.UTF16PtrFromString(s) + if err != nil { + return "", err + } + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + for err == syscall.ERROR_MORE_DATA { // Grow buffer if needed + if buflen <= uint32(len(buf)) { + break // Buffer not growing, assume race; break + } + buf = make([]uint16, buflen) + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + if err != nil { + return "", err + } + + return syscall.UTF16ToString(buf), nil +} + +// ExpandString expands environment-variable strings and replaces +// them with the values defined for the current user. +// Use ExpandString to expand EXPAND_SZ strings. +func ExpandString(value string) (string, error) { + if value == "" { + return "", nil + } + p, err := syscall.UTF16PtrFromString(value) + if err != nil { + return "", err + } + r := make([]uint16, 100) + for { + n, err := expandEnvironmentStrings(p, &r[0], uint32(len(r))) + if err != nil { + return "", err + } + if n <= uint32(len(r)) { + return syscall.UTF16ToString(r[:n]), nil + } + r = make([]uint16, n) + } +} + +// GetStringsValue retrieves the []string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringsValue returns ErrNotExist. +// If value is not MULTI_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != MULTI_SZ { + return nil, typ, ErrUnexpectedType + } + if len(data) == 0 { + return nil, typ, nil + } + p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + if len(p) == 0 { + return nil, typ, nil + } + if p[len(p)-1] == 0 { + p = p[:len(p)-1] // remove terminating null + } + val = make([]string, 0, 5) + from := 0 + for i, c := range p { + if c == 0 { + val = append(val, string(utf16.Decode(p[from:i]))) + from = i + 1 + } + } + return val, typ, nil +} + +// GetIntegerValue retrieves the integer value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetIntegerValue returns ErrNotExist. +// If value is not DWORD or QWORD, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetIntegerValue(name string) (val uint64, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 8)) + if err2 != nil { + return 0, typ, err2 + } + switch typ { + case DWORD: + if len(data) != 4 { + return 0, typ, errors.New("DWORD value is not 4 bytes long") + } + var val32 uint32 + copy((*[4]byte)(unsafe.Pointer(&val32))[:], data) + return uint64(val32), DWORD, nil + case QWORD: + if len(data) != 8 { + return 0, typ, errors.New("QWORD value is not 8 bytes long") + } + copy((*[8]byte)(unsafe.Pointer(&val))[:], data) + return val, QWORD, nil + default: + return 0, typ, ErrUnexpectedType + } +} + +// GetBinaryValue retrieves the binary value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetBinaryValue returns ErrNotExist. +// If value is not BINARY, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetBinaryValue(name string) (val []byte, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != BINARY { + return nil, typ, ErrUnexpectedType + } + return data, typ, nil +} + +func (k Key) setValue(name string, valtype uint32, data []byte) error { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return err + } + if len(data) == 0 { + return regSetValueEx(syscall.Handle(k), p, 0, valtype, nil, 0) + } + return regSetValueEx(syscall.Handle(k), p, 0, valtype, &data[0], uint32(len(data))) +} + +// SetDWordValue sets the data and type of a name value +// under key k to value and DWORD. +func (k Key) SetDWordValue(name string, value uint32) error { + return k.setValue(name, DWORD, (*[4]byte)(unsafe.Pointer(&value))[:]) +} + +// SetQWordValue sets the data and type of a name value +// under key k to value and QWORD. +func (k Key) SetQWordValue(name string, value uint64) error { + return k.setValue(name, QWORD, (*[8]byte)(unsafe.Pointer(&value))[:]) +} + +func (k Key) setStringValue(name string, valtype uint32, value string) error { + v, err := syscall.UTF16FromString(value) + if err != nil { + return err + } + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, valtype, buf) +} + +// SetStringValue sets the data and type of a name value +// under key k to value and SZ. The value must not contain a zero byte. +func (k Key) SetStringValue(name, value string) error { + return k.setStringValue(name, SZ, value) +} + +// SetExpandStringValue sets the data and type of a name value +// under key k to value and EXPAND_SZ. The value must not contain a zero byte. +func (k Key) SetExpandStringValue(name, value string) error { + return k.setStringValue(name, EXPAND_SZ, value) +} + +// SetStringsValue sets the data and type of a name value +// under key k to value and MULTI_SZ. The value strings +// must not contain a zero byte. +func (k Key) SetStringsValue(name string, value []string) error { + ss := "" + for _, s := range value { + for i := 0; i < len(s); i++ { + if s[i] == 0 { + return errors.New("string cannot have 0 inside") + } + } + ss += s + "\x00" + } + v := utf16.Encode([]rune(ss + "\x00")) + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, MULTI_SZ, buf) +} + +// SetBinaryValue sets the data and type of a name value +// under key k to value and BINARY. +func (k Key) SetBinaryValue(name string, value []byte) error { + return k.setValue(name, BINARY, value) +} + +// DeleteValue removes a named value from the key k. +func (k Key) DeleteValue(name string) error { + return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name)) +} + +// ReadValueNames returns the value names of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadValueNames(n int) ([]string, error) { + ki, err := k.Stat() + if err != nil { + return nil, err + } + names := make([]string, 0, ki.ValueCount) + buf := make([]uint16, ki.MaxValueNameLen+1) // extra room for terminating null character +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := regEnumValue(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} diff --git a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go new file mode 100644 index 000000000..fc1835d8a --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go @@ -0,0 +1,117 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package registry + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procRegConnectRegistryW = modadvapi32.NewProc("RegConnectRegistryW") + procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW") + procRegDeleteKeyW = modadvapi32.NewProc("RegDeleteKeyW") + procRegDeleteValueW = modadvapi32.NewProc("RegDeleteValueW") + procRegEnumValueW = modadvapi32.NewProc("RegEnumValueW") + procRegLoadMUIStringW = modadvapi32.NewProc("RegLoadMUIStringW") + procRegSetValueExW = modadvapi32.NewProc("RegSetValueExW") + procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") +) + +func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize)) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 1fa34fd17..5cee9a314 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -313,6 +313,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode //sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo //sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition +//sys GetConsoleCP() (cp uint32, err error) = kernel32.GetConsoleCP +//sys GetConsoleOutputCP() (cp uint32, err error) = kernel32.GetConsoleOutputCP +//sys SetConsoleCP(cp uint32) (err error) = kernel32.SetConsoleCP +//sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW //sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 3f03b3d57..7b97a154c 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1060,6 +1060,7 @@ const ( SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_INOUT | IOC_WS2 | 6 SIO_KEEPALIVE_VALS = IOC_IN | IOC_VENDOR | 4 SIO_UDP_CONNRESET = IOC_IN | IOC_VENDOR | 12 + SIO_UDP_NETRESET = IOC_IN | IOC_VENDOR | 15 // cf. http://support.microsoft.com/default.aspx?scid=kb;en-us;257460 diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 9bb979a3e..4c2e1bdc0 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -247,7 +247,9 @@ var ( procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") + procGetConsoleCP = modkernel32.NewProc("GetConsoleCP") procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") + procGetConsoleOutputCP = modkernel32.NewProc("GetConsoleOutputCP") procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") @@ -347,8 +349,10 @@ var ( procSetCommMask = modkernel32.NewProc("SetCommMask") procSetCommState = modkernel32.NewProc("SetCommState") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") + procSetConsoleCP = modkernel32.NewProc("SetConsoleCP") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") + procSetConsoleOutputCP = modkernel32.NewProc("SetConsoleOutputCP") procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") @@ -2162,6 +2166,15 @@ func GetComputerName(buf *uint16, n *uint32) (err error) { return } +func GetConsoleCP() (cp uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleMode(console Handle, mode *uint32) (err error) { r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) if r1 == 0 { @@ -2170,6 +2183,15 @@ func GetConsoleMode(console Handle, mode *uint32) (err error) { return } +func GetConsoleOutputCP() (cp uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetConsoleOutputCP.Addr(), 0, 0, 0, 0) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) if r1 == 0 { @@ -3038,6 +3060,14 @@ func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { return } +func SetConsoleCP(cp uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleCP.Addr(), 1, uintptr(cp), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func setConsoleCursorPosition(console Handle, position uint32) (err error) { r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0) if r1 == 0 { @@ -3054,6 +3084,14 @@ func SetConsoleMode(console Handle, mode uint32) (err error) { return } +func SetConsoleOutputCP(cp uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleOutputCP.Addr(), 1, uintptr(cp), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetCurrentDirectory(path *uint16) (err error) { r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) if r1 == 0 { diff --git a/vendor/golang.org/x/term/LICENSE b/vendor/golang.org/x/term/LICENSE index 6a66aea5e..2a7cf70da 100644 --- a/vendor/golang.org/x/term/LICENSE +++ b/vendor/golang.org/x/term/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/term/term_windows.go b/vendor/golang.org/x/term/term_windows.go index 465f56060..df6bf948e 100644 --- a/vendor/golang.org/x/term/term_windows.go +++ b/vendor/golang.org/x/term/term_windows.go @@ -26,6 +26,7 @@ func makeRaw(fd int) (*State, error) { return nil, err } raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + raw |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil { return nil, err } diff --git a/vendor/golang.org/x/text/LICENSE b/vendor/golang.org/x/text/LICENSE index 6a66aea5e..2a7cf70da 100644 --- a/vendor/golang.org/x/text/LICENSE +++ b/vendor/golang.org/x/text/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/time/LICENSE b/vendor/golang.org/x/time/LICENSE new file mode 100644 index 000000000..2a7cf70da --- /dev/null +++ b/vendor/golang.org/x/time/LICENSE @@ -0,0 +1,27 @@ +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/time/PATENTS b/vendor/golang.org/x/time/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/golang.org/x/time/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go new file mode 100644 index 000000000..8f6c7f493 --- /dev/null +++ b/vendor/golang.org/x/time/rate/rate.go @@ -0,0 +1,430 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rate provides a rate limiter. +package rate + +import ( + "context" + "fmt" + "math" + "sync" + "time" +) + +// Limit defines the maximum frequency of some events. +// Limit is represented as number of events per second. +// A zero Limit allows no events. +type Limit float64 + +// Inf is the infinite rate limit; it allows all events (even if burst is zero). +const Inf = Limit(math.MaxFloat64) + +// Every converts a minimum time interval between events to a Limit. +func Every(interval time.Duration) Limit { + if interval <= 0 { + return Inf + } + return 1 / Limit(interval.Seconds()) +} + +// A Limiter controls how frequently events are allowed to happen. +// It implements a "token bucket" of size b, initially full and refilled +// at rate r tokens per second. +// Informally, in any large enough time interval, the Limiter limits the +// rate to r tokens per second, with a maximum burst size of b events. +// As a special case, if r == Inf (the infinite rate), b is ignored. +// See https://en.wikipedia.org/wiki/Token_bucket for more about token buckets. +// +// The zero value is a valid Limiter, but it will reject all events. +// Use NewLimiter to create non-zero Limiters. +// +// Limiter has three main methods, Allow, Reserve, and Wait. +// Most callers should use Wait. +// +// Each of the three methods consumes a single token. +// They differ in their behavior when no token is available. +// If no token is available, Allow returns false. +// If no token is available, Reserve returns a reservation for a future token +// and the amount of time the caller must wait before using it. +// If no token is available, Wait blocks until one can be obtained +// or its associated context.Context is canceled. +// +// The methods AllowN, ReserveN, and WaitN consume n tokens. +// +// Limiter is safe for simultaneous use by multiple goroutines. +type Limiter struct { + mu sync.Mutex + limit Limit + burst int + tokens float64 + // last is the last time the limiter's tokens field was updated + last time.Time + // lastEvent is the latest time of a rate-limited event (past or future) + lastEvent time.Time +} + +// Limit returns the maximum overall event rate. +func (lim *Limiter) Limit() Limit { + lim.mu.Lock() + defer lim.mu.Unlock() + return lim.limit +} + +// Burst returns the maximum burst size. Burst is the maximum number of tokens +// that can be consumed in a single call to Allow, Reserve, or Wait, so higher +// Burst values allow more events to happen at once. +// A zero Burst allows no events, unless limit == Inf. +func (lim *Limiter) Burst() int { + lim.mu.Lock() + defer lim.mu.Unlock() + return lim.burst +} + +// TokensAt returns the number of tokens available at time t. +func (lim *Limiter) TokensAt(t time.Time) float64 { + lim.mu.Lock() + _, tokens := lim.advance(t) // does not mutate lim + lim.mu.Unlock() + return tokens +} + +// Tokens returns the number of tokens available now. +func (lim *Limiter) Tokens() float64 { + return lim.TokensAt(time.Now()) +} + +// NewLimiter returns a new Limiter that allows events up to rate r and permits +// bursts of at most b tokens. +func NewLimiter(r Limit, b int) *Limiter { + return &Limiter{ + limit: r, + burst: b, + } +} + +// Allow reports whether an event may happen now. +func (lim *Limiter) Allow() bool { + return lim.AllowN(time.Now(), 1) +} + +// AllowN reports whether n events may happen at time t. +// Use this method if you intend to drop / skip events that exceed the rate limit. +// Otherwise use Reserve or Wait. +func (lim *Limiter) AllowN(t time.Time, n int) bool { + return lim.reserveN(t, n, 0).ok +} + +// A Reservation holds information about events that are permitted by a Limiter to happen after a delay. +// A Reservation may be canceled, which may enable the Limiter to permit additional events. +type Reservation struct { + ok bool + lim *Limiter + tokens int + timeToAct time.Time + // This is the Limit at reservation time, it can change later. + limit Limit +} + +// OK returns whether the limiter can provide the requested number of tokens +// within the maximum wait time. If OK is false, Delay returns InfDuration, and +// Cancel does nothing. +func (r *Reservation) OK() bool { + return r.ok +} + +// Delay is shorthand for DelayFrom(time.Now()). +func (r *Reservation) Delay() time.Duration { + return r.DelayFrom(time.Now()) +} + +// InfDuration is the duration returned by Delay when a Reservation is not OK. +const InfDuration = time.Duration(math.MaxInt64) + +// DelayFrom returns the duration for which the reservation holder must wait +// before taking the reserved action. Zero duration means act immediately. +// InfDuration means the limiter cannot grant the tokens requested in this +// Reservation within the maximum wait time. +func (r *Reservation) DelayFrom(t time.Time) time.Duration { + if !r.ok { + return InfDuration + } + delay := r.timeToAct.Sub(t) + if delay < 0 { + return 0 + } + return delay +} + +// Cancel is shorthand for CancelAt(time.Now()). +func (r *Reservation) Cancel() { + r.CancelAt(time.Now()) +} + +// CancelAt indicates that the reservation holder will not perform the reserved action +// and reverses the effects of this Reservation on the rate limit as much as possible, +// considering that other reservations may have already been made. +func (r *Reservation) CancelAt(t time.Time) { + if !r.ok { + return + } + + r.lim.mu.Lock() + defer r.lim.mu.Unlock() + + if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(t) { + return + } + + // calculate tokens to restore + // The duration between lim.lastEvent and r.timeToAct tells us how many tokens were reserved + // after r was obtained. These tokens should not be restored. + restoreTokens := float64(r.tokens) - r.limit.tokensFromDuration(r.lim.lastEvent.Sub(r.timeToAct)) + if restoreTokens <= 0 { + return + } + // advance time to now + t, tokens := r.lim.advance(t) + // calculate new number of tokens + tokens += restoreTokens + if burst := float64(r.lim.burst); tokens > burst { + tokens = burst + } + // update state + r.lim.last = t + r.lim.tokens = tokens + if r.timeToAct == r.lim.lastEvent { + prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) + if !prevEvent.Before(t) { + r.lim.lastEvent = prevEvent + } + } +} + +// Reserve is shorthand for ReserveN(time.Now(), 1). +func (lim *Limiter) Reserve() *Reservation { + return lim.ReserveN(time.Now(), 1) +} + +// ReserveN returns a Reservation that indicates how long the caller must wait before n events happen. +// The Limiter takes this Reservation into account when allowing future events. +// The returned Reservation’s OK() method returns false if n exceeds the Limiter's burst size. +// Usage example: +// +// r := lim.ReserveN(time.Now(), 1) +// if !r.OK() { +// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? +// return +// } +// time.Sleep(r.Delay()) +// Act() +// +// Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events. +// If you need to respect a deadline or cancel the delay, use Wait instead. +// To drop or skip events exceeding rate limit, use Allow instead. +func (lim *Limiter) ReserveN(t time.Time, n int) *Reservation { + r := lim.reserveN(t, n, InfDuration) + return &r +} + +// Wait is shorthand for WaitN(ctx, 1). +func (lim *Limiter) Wait(ctx context.Context) (err error) { + return lim.WaitN(ctx, 1) +} + +// WaitN blocks until lim permits n events to happen. +// It returns an error if n exceeds the Limiter's burst size, the Context is +// canceled, or the expected wait time exceeds the Context's Deadline. +// The burst limit is ignored if the rate limit is Inf. +func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { + // The test code calls lim.wait with a fake timer generator. + // This is the real timer generator. + newTimer := func(d time.Duration) (<-chan time.Time, func() bool, func()) { + timer := time.NewTimer(d) + return timer.C, timer.Stop, func() {} + } + + return lim.wait(ctx, n, time.Now(), newTimer) +} + +// wait is the internal implementation of WaitN. +func (lim *Limiter) wait(ctx context.Context, n int, t time.Time, newTimer func(d time.Duration) (<-chan time.Time, func() bool, func())) error { + lim.mu.Lock() + burst := lim.burst + limit := lim.limit + lim.mu.Unlock() + + if n > burst && limit != Inf { + return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, burst) + } + // Check if ctx is already cancelled + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + // Determine wait limit + waitLimit := InfDuration + if deadline, ok := ctx.Deadline(); ok { + waitLimit = deadline.Sub(t) + } + // Reserve + r := lim.reserveN(t, n, waitLimit) + if !r.ok { + return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n) + } + // Wait if necessary + delay := r.DelayFrom(t) + if delay == 0 { + return nil + } + ch, stop, advance := newTimer(delay) + defer stop() + advance() // only has an effect when testing + select { + case <-ch: + // We can proceed. + return nil + case <-ctx.Done(): + // Context was canceled before we could proceed. Cancel the + // reservation, which may permit other events to proceed sooner. + r.Cancel() + return ctx.Err() + } +} + +// SetLimit is shorthand for SetLimitAt(time.Now(), newLimit). +func (lim *Limiter) SetLimit(newLimit Limit) { + lim.SetLimitAt(time.Now(), newLimit) +} + +// SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated +// or underutilized by those which reserved (using Reserve or Wait) but did not yet act +// before SetLimitAt was called. +func (lim *Limiter) SetLimitAt(t time.Time, newLimit Limit) { + lim.mu.Lock() + defer lim.mu.Unlock() + + t, tokens := lim.advance(t) + + lim.last = t + lim.tokens = tokens + lim.limit = newLimit +} + +// SetBurst is shorthand for SetBurstAt(time.Now(), newBurst). +func (lim *Limiter) SetBurst(newBurst int) { + lim.SetBurstAt(time.Now(), newBurst) +} + +// SetBurstAt sets a new burst size for the limiter. +func (lim *Limiter) SetBurstAt(t time.Time, newBurst int) { + lim.mu.Lock() + defer lim.mu.Unlock() + + t, tokens := lim.advance(t) + + lim.last = t + lim.tokens = tokens + lim.burst = newBurst +} + +// reserveN is a helper method for AllowN, ReserveN, and WaitN. +// maxFutureReserve specifies the maximum reservation wait duration allowed. +// reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN. +func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) Reservation { + lim.mu.Lock() + defer lim.mu.Unlock() + + if lim.limit == Inf { + return Reservation{ + ok: true, + lim: lim, + tokens: n, + timeToAct: t, + } + } else if lim.limit == 0 { + var ok bool + if lim.burst >= n { + ok = true + lim.burst -= n + } + return Reservation{ + ok: ok, + lim: lim, + tokens: lim.burst, + timeToAct: t, + } + } + + t, tokens := lim.advance(t) + + // Calculate the remaining number of tokens resulting from the request. + tokens -= float64(n) + + // Calculate the wait duration + var waitDuration time.Duration + if tokens < 0 { + waitDuration = lim.limit.durationFromTokens(-tokens) + } + + // Decide result + ok := n <= lim.burst && waitDuration <= maxFutureReserve + + // Prepare reservation + r := Reservation{ + ok: ok, + lim: lim, + limit: lim.limit, + } + if ok { + r.tokens = n + r.timeToAct = t.Add(waitDuration) + + // Update state + lim.last = t + lim.tokens = tokens + lim.lastEvent = r.timeToAct + } + + return r +} + +// advance calculates and returns an updated state for lim resulting from the passage of time. +// lim is not changed. +// advance requires that lim.mu is held. +func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) { + last := lim.last + if t.Before(last) { + last = t + } + + // Calculate the new number of tokens, due to time that passed. + elapsed := t.Sub(last) + delta := lim.limit.tokensFromDuration(elapsed) + tokens := lim.tokens + delta + if burst := float64(lim.burst); tokens > burst { + tokens = burst + } + return t, tokens +} + +// durationFromTokens is a unit conversion function from the number of tokens to the duration +// of time it takes to accumulate them at a rate of limit tokens per second. +func (limit Limit) durationFromTokens(tokens float64) time.Duration { + if limit <= 0 { + return InfDuration + } + seconds := tokens / float64(limit) + return time.Duration(float64(time.Second) * seconds) +} + +// tokensFromDuration is a unit conversion function from a time duration to the number of tokens +// which could be accumulated during that duration at a rate of limit tokens per second. +func (limit Limit) tokensFromDuration(d time.Duration) float64 { + if limit <= 0 { + return 0 + } + return d.Seconds() * float64(limit) +} diff --git a/vendor/golang.org/x/time/rate/sometimes.go b/vendor/golang.org/x/time/rate/sometimes.go new file mode 100644 index 000000000..6ba99ddb6 --- /dev/null +++ b/vendor/golang.org/x/time/rate/sometimes.go @@ -0,0 +1,67 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rate + +import ( + "sync" + "time" +) + +// Sometimes will perform an action occasionally. The First, Every, and +// Interval fields govern the behavior of Do, which performs the action. +// A zero Sometimes value will perform an action exactly once. +// +// # Example: logging with rate limiting +// +// var sometimes = rate.Sometimes{First: 3, Interval: 10*time.Second} +// func Spammy() { +// sometimes.Do(func() { log.Info("here I am!") }) +// } +type Sometimes struct { + First int // if non-zero, the first N calls to Do will run f. + Every int // if non-zero, every Nth call to Do will run f. + Interval time.Duration // if non-zero and Interval has elapsed since f's last run, Do will run f. + + mu sync.Mutex + count int // number of Do calls + last time.Time // last time f was run +} + +// Do runs the function f as allowed by First, Every, and Interval. +// +// The model is a union (not intersection) of filters. The first call to Do +// always runs f. Subsequent calls to Do run f if allowed by First or Every or +// Interval. +// +// A non-zero First:N causes the first N Do(f) calls to run f. +// +// A non-zero Every:M causes every Mth Do(f) call, starting with the first, to +// run f. +// +// A non-zero Interval causes Do(f) to run f if Interval has elapsed since +// Do last ran f. +// +// Specifying multiple filters produces the union of these execution streams. +// For example, specifying both First:N and Every:M causes the first N Do(f) +// calls and every Mth Do(f) call, starting with the first, to run f. See +// Examples for more. +// +// If Do is called multiple times simultaneously, the calls will block and run +// serially. Therefore, Do is intended for lightweight operations. +// +// Because a call to Do may block until f returns, if f causes Do to be called, +// it will deadlock. +func (s *Sometimes) Do(f func()) { + s.mu.Lock() + defer s.mu.Unlock() + if s.count == 0 || + (s.First > 0 && s.count < s.First) || + (s.Every > 0 && s.count%s.Every == 0) || + (s.Interval > 0 && time.Since(s.last) >= s.Interval) { + f() + s.last = time.Now() + } + s.count++ +} diff --git a/vendor/google.golang.org/api/AUTHORS b/vendor/google.golang.org/api/AUTHORS index f73b72574..f07029059 100644 --- a/vendor/google.golang.org/api/AUTHORS +++ b/vendor/google.golang.org/api/AUTHORS @@ -8,3 +8,4 @@ # Please keep the list sorted. Google Inc. +LightStep Inc. diff --git a/vendor/google.golang.org/api/CONTRIBUTORS b/vendor/google.golang.org/api/CONTRIBUTORS index fe55ebff0..788677b8f 100644 --- a/vendor/google.golang.org/api/CONTRIBUTORS +++ b/vendor/google.golang.org/api/CONTRIBUTORS @@ -45,6 +45,7 @@ Jason Hall Johan Euphrosine Kostik Shtoyk Kunpei Sakai +Matthew Dolan Matthew Whisenhunt Michael McGreevy Nick Craig-Wood diff --git a/vendor/google.golang.org/api/cloudkms/v1/cloudkms-api.json b/vendor/google.golang.org/api/cloudkms/v1/cloudkms-api.json deleted file mode 100644 index 0b97d8799..000000000 --- a/vendor/google.golang.org/api/cloudkms/v1/cloudkms-api.json +++ /dev/null @@ -1,2225 +0,0 @@ -{ - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - }, - "https://www.googleapis.com/auth/cloudkms": { - "description": "View and manage your keys and secrets stored in Cloud Key Management Service" - } - } - } - }, - "basePath": "", - "baseUrl": "https://cloudkms.googleapis.com/", - "batchPath": "batch", - "canonicalName": "Cloud KMS", - "description": "Manages keys and performs cryptographic operations in a central cloud service, for direct use by other cloud resources and applications.\n", - "discoveryVersion": "v1", - "documentationLink": "https://cloud.google.com/kms/", - "fullyEncodeReservedExpansion": true, - "icons": { - "x16": "http://www.google.com/images/icons/product/search-16.gif", - "x32": "http://www.google.com/images/icons/product/search-32.gif" - }, - "id": "cloudkms:v1", - "kind": "discovery#restDescription", - "name": "cloudkms", - "ownerDomain": "google.com", - "ownerName": "Google", - "parameters": { - "$.xgafv": { - "description": "V1 error format.", - "enum": [ - "1", - "2" - ], - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "location": "query", - "type": "string" - }, - "access_token": { - "description": "OAuth access token.", - "location": "query", - "type": "string" - }, - "alt": { - "default": "json", - "description": "Data format for response.", - "enum": [ - "json", - "media", - "proto" - ], - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "location": "query", - "type": "string" - }, - "callback": { - "description": "JSONP", - "location": "query", - "type": "string" - }, - "fields": { - "description": "Selector specifying which fields to include in a partial response.", - "location": "query", - "type": "string" - }, - "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "location": "query", - "type": "string" - }, - "oauth_token": { - "description": "OAuth 2.0 token for the current user.", - "location": "query", - "type": "string" - }, - "prettyPrint": { - "default": "true", - "description": "Returns response with indentations and line breaks.", - "location": "query", - "type": "boolean" - }, - "quotaUser": { - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "location": "query", - "type": "string" - }, - "uploadType": { - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "location": "query", - "type": "string" - }, - "upload_protocol": { - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "location": "query", - "type": "string" - } - }, - "protocol": "rest", - "resources": { - "projects": { - "resources": { - "locations": { - "methods": { - "get": { - "description": "Gets information about a location.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}", - "httpMethod": "GET", - "id": "cloudkms.projects.locations.get", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Resource name for the location.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+name}", - "response": { - "$ref": "Location" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - }, - "list": { - "description": "Lists information about the supported locations for this service.", - "flatPath": "v1/projects/{projectsId}/locations", - "httpMethod": "GET", - "id": "cloudkms.projects.locations.list", - "parameterOrder": [ - "name" - ], - "parameters": { - "filter": { - "description": "The standard list filter.", - "location": "query", - "type": "string" - }, - "name": { - "description": "The resource that owns the locations collection, if applicable.", - "location": "path", - "pattern": "^projects/[^/]+$", - "required": true, - "type": "string" - }, - "pageSize": { - "description": "The standard list page size.", - "format": "int32", - "location": "query", - "type": "integer" - }, - "pageToken": { - "description": "The standard list page token.", - "location": "query", - "type": "string" - } - }, - "path": "v1/{+name}/locations", - "response": { - "$ref": "ListLocationsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - } - }, - "resources": { - "keyRings": { - "methods": { - "create": { - "description": "Create a new KeyRing in a given Project and Location.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings", - "httpMethod": "POST", - "id": "cloudkms.projects.locations.keyRings.create", - "parameterOrder": [ - "parent" - ], - "parameters": { - "keyRingId": { - "description": "Required. It must be unique within a location and match the regular\nexpression `[a-zA-Z0-9_-]{1,63}`", - "location": "query", - "type": "string" - }, - "parent": { - "description": "Required. The resource name of the location associated with the\nKeyRings, in the format `projects/*/locations/*`.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+parent}/keyRings", - "request": { - "$ref": "KeyRing" - }, - "response": { - "$ref": "KeyRing" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - }, - "get": { - "description": "Returns metadata for a given KeyRing.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}", - "httpMethod": "GET", - "id": "cloudkms.projects.locations.keyRings.get", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The name of the KeyRing to get.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+name}", - "response": { - "$ref": "KeyRing" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - }, - "getIamPolicy": { - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}:getIamPolicy", - "httpMethod": "GET", - "id": "cloudkms.projects.locations.keyRings.getIamPolicy", - "parameterOrder": [ - "resource" - ], - "parameters": { - "options.requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.", - "format": "int32", - "location": "query", - "type": "integer" - }, - "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+resource}:getIamPolicy", - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - }, - "list": { - "description": "Lists KeyRings.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings", - "httpMethod": "GET", - "id": "cloudkms.projects.locations.keyRings.list", - "parameterOrder": [ - "parent" - ], - "parameters": { - "filter": { - "description": "Optional. Only include resources that match the filter in the response. For\nmore information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", - "location": "query", - "type": "string" - }, - "orderBy": { - "description": "Optional. Specify how the results should be sorted. If not specified, the\nresults will be sorted in the default order. For more information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", - "location": "query", - "type": "string" - }, - "pageSize": { - "description": "Optional. Optional limit on the number of KeyRings to include in the\nresponse. Further KeyRings can subsequently be obtained by\nincluding the ListKeyRingsResponse.next_page_token in a subsequent\nrequest. If unspecified, the server will pick an appropriate default.", - "format": "int32", - "location": "query", - "type": "integer" - }, - "pageToken": { - "description": "Optional. Optional pagination token, returned earlier via\nListKeyRingsResponse.next_page_token.", - "location": "query", - "type": "string" - }, - "parent": { - "description": "Required. The resource name of the location associated with the\nKeyRings, in the format `projects/*/locations/*`.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+parent}/keyRings", - "response": { - "$ref": "ListKeyRingsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - }, - "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}:setIamPolicy", - "httpMethod": "POST", - "id": "cloudkms.projects.locations.keyRings.setIamPolicy", - "parameterOrder": [ - "resource" - ], - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+resource}:setIamPolicy", - "request": { - "$ref": "SetIamPolicyRequest" - }, - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}:testIamPermissions", - "httpMethod": "POST", - "id": "cloudkms.projects.locations.keyRings.testIamPermissions", - "parameterOrder": [ - "resource" - ], - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+resource}:testIamPermissions", - "request": { - "$ref": "TestIamPermissionsRequest" - }, - "response": { - "$ref": "TestIamPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - } - }, - "resources": { - "cryptoKeys": { - "methods": { - "create": { - "description": "Create a new CryptoKey within a KeyRing.\n\nCryptoKey.purpose and\nCryptoKey.version_template.algorithm\nare required.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys", - "httpMethod": "POST", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.create", - "parameterOrder": [ - "parent" - ], - "parameters": { - "cryptoKeyId": { - "description": "Required. It must be unique within a KeyRing and match the regular\nexpression `[a-zA-Z0-9_-]{1,63}`", - "location": "query", - "type": "string" - }, - "parent": { - "description": "Required. The name of the KeyRing associated with the\nCryptoKeys.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - "required": true, - "type": "string" - }, - "skipInitialVersionCreation": { - "description": "If set to true, the request will create a CryptoKey without any\nCryptoKeyVersions. You must manually call\nCreateCryptoKeyVersion or\nImportCryptoKeyVersion\nbefore you can use this CryptoKey.", - "location": "query", - "type": "boolean" - } - }, - "path": "v1/{+parent}/cryptoKeys", - "request": { - "$ref": "CryptoKey" - }, - "response": { - "$ref": "CryptoKey" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - }, - "decrypt": { - "description": "Decrypts data that was protected by Encrypt. The CryptoKey.purpose\nmust be ENCRYPT_DECRYPT.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:decrypt", - "httpMethod": "POST", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.decrypt", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The resource name of the CryptoKey to use for decryption.\nThe server will choose the appropriate version.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+name}:decrypt", - "request": { - "$ref": "DecryptRequest" - }, - "response": { - "$ref": "DecryptResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - }, - "encrypt": { - "description": "Encrypts data, so that it can only be recovered by a call to Decrypt.\nThe CryptoKey.purpose must be\nENCRYPT_DECRYPT.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:encrypt", - "httpMethod": "POST", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.encrypt", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The resource name of the CryptoKey or CryptoKeyVersion\nto use for encryption.\n\nIf a CryptoKey is specified, the server will use its\nprimary version.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/.+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+name}:encrypt", - "request": { - "$ref": "EncryptRequest" - }, - "response": { - "$ref": "EncryptResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - }, - "get": { - "description": "Returns metadata for a given CryptoKey, as well as its\nprimary CryptoKeyVersion.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}", - "httpMethod": "GET", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.get", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The name of the CryptoKey to get.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+name}", - "response": { - "$ref": "CryptoKey" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - }, - "getIamPolicy": { - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:getIamPolicy", - "httpMethod": "GET", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.getIamPolicy", - "parameterOrder": [ - "resource" - ], - "parameters": { - "options.requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.", - "format": "int32", - "location": "query", - "type": "integer" - }, - "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+resource}:getIamPolicy", - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - }, - "list": { - "description": "Lists CryptoKeys.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys", - "httpMethod": "GET", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.list", - "parameterOrder": [ - "parent" - ], - "parameters": { - "filter": { - "description": "Optional. Only include resources that match the filter in the response. For\nmore information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", - "location": "query", - "type": "string" - }, - "orderBy": { - "description": "Optional. Specify how the results should be sorted. If not specified, the\nresults will be sorted in the default order. For more information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", - "location": "query", - "type": "string" - }, - "pageSize": { - "description": "Optional. Optional limit on the number of CryptoKeys to include in the\nresponse. Further CryptoKeys can subsequently be obtained by\nincluding the ListCryptoKeysResponse.next_page_token in a subsequent\nrequest. If unspecified, the server will pick an appropriate default.", - "format": "int32", - "location": "query", - "type": "integer" - }, - "pageToken": { - "description": "Optional. Optional pagination token, returned earlier via\nListCryptoKeysResponse.next_page_token.", - "location": "query", - "type": "string" - }, - "parent": { - "description": "Required. The resource name of the KeyRing to list, in the format\n`projects/*/locations/*/keyRings/*`.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - "required": true, - "type": "string" - }, - "versionView": { - "description": "The fields of the primary version to include in the response.", - "enum": [ - "CRYPTO_KEY_VERSION_VIEW_UNSPECIFIED", - "FULL" - ], - "location": "query", - "type": "string" - } - }, - "path": "v1/{+parent}/cryptoKeys", - "response": { - "$ref": "ListCryptoKeysResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - }, - "patch": { - "description": "Update a CryptoKey.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}", - "httpMethod": "PATCH", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.patch", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Output only. The resource name for this CryptoKey in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*`.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - "required": true, - "type": "string" - }, - "updateMask": { - "description": "Required. List of fields to be updated in this request.", - "format": "google-fieldmask", - "location": "query", - "type": "string" - } - }, - "path": "v1/{+name}", - "request": { - "$ref": "CryptoKey" - }, - "response": { - "$ref": "CryptoKey" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - }, - "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:setIamPolicy", - "httpMethod": "POST", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.setIamPolicy", - "parameterOrder": [ - "resource" - ], - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+resource}:setIamPolicy", - "request": { - "$ref": "SetIamPolicyRequest" - }, - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:testIamPermissions", - "httpMethod": "POST", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.testIamPermissions", - "parameterOrder": [ - "resource" - ], - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+resource}:testIamPermissions", - "request": { - "$ref": "TestIamPermissionsRequest" - }, - "response": { - "$ref": "TestIamPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - }, - "updatePrimaryVersion": { - "description": "Update the version of a CryptoKey that will be used in Encrypt.\n\nReturns an error if called on an asymmetric key.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:updatePrimaryVersion", - "httpMethod": "POST", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.updatePrimaryVersion", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The resource name of the CryptoKey to update.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+name}:updatePrimaryVersion", - "request": { - "$ref": "UpdateCryptoKeyPrimaryVersionRequest" - }, - "response": { - "$ref": "CryptoKey" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - } - }, - "resources": { - "cryptoKeyVersions": { - "methods": { - "asymmetricDecrypt": { - "description": "Decrypts data that was encrypted with a public key retrieved from\nGetPublicKey corresponding to a CryptoKeyVersion with\nCryptoKey.purpose ASYMMETRIC_DECRYPT.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}:asymmetricDecrypt", - "httpMethod": "POST", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.asymmetricDecrypt", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The resource name of the CryptoKeyVersion to use for\ndecryption.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+name}:asymmetricDecrypt", - "request": { - "$ref": "AsymmetricDecryptRequest" - }, - "response": { - "$ref": "AsymmetricDecryptResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - }, - "asymmetricSign": { - "description": "Signs data using a CryptoKeyVersion with CryptoKey.purpose\nASYMMETRIC_SIGN, producing a signature that can be verified with the public\nkey retrieved from GetPublicKey.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}:asymmetricSign", - "httpMethod": "POST", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.asymmetricSign", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The resource name of the CryptoKeyVersion to use for signing.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+name}:asymmetricSign", - "request": { - "$ref": "AsymmetricSignRequest" - }, - "response": { - "$ref": "AsymmetricSignResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - }, - "create": { - "description": "Create a new CryptoKeyVersion in a CryptoKey.\n\nThe server will assign the next sequential id. If unset,\nstate will be set to\nENABLED.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions", - "httpMethod": "POST", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.create", - "parameterOrder": [ - "parent" - ], - "parameters": { - "parent": { - "description": "Required. The name of the CryptoKey associated with\nthe CryptoKeyVersions.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+parent}/cryptoKeyVersions", - "request": { - "$ref": "CryptoKeyVersion" - }, - "response": { - "$ref": "CryptoKeyVersion" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - }, - "destroy": { - "description": "Schedule a CryptoKeyVersion for destruction.\n\nUpon calling this method, CryptoKeyVersion.state will be set to\nDESTROY_SCHEDULED\nand destroy_time will be set to a time 24\nhours in the future, at which point the state\nwill be changed to\nDESTROYED, and the key\nmaterial will be irrevocably destroyed.\n\nBefore the destroy_time is reached,\nRestoreCryptoKeyVersion may be called to reverse the process.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}:destroy", - "httpMethod": "POST", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.destroy", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The resource name of the CryptoKeyVersion to destroy.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+name}:destroy", - "request": { - "$ref": "DestroyCryptoKeyVersionRequest" - }, - "response": { - "$ref": "CryptoKeyVersion" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - }, - "get": { - "description": "Returns metadata for a given CryptoKeyVersion.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}", - "httpMethod": "GET", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.get", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The name of the CryptoKeyVersion to get.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+name}", - "response": { - "$ref": "CryptoKeyVersion" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - }, - "getPublicKey": { - "description": "Returns the public key for the given CryptoKeyVersion. The\nCryptoKey.purpose must be\nASYMMETRIC_SIGN or\nASYMMETRIC_DECRYPT.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}/publicKey", - "httpMethod": "GET", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.getPublicKey", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The name of the CryptoKeyVersion public key to\nget.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+name}/publicKey", - "response": { - "$ref": "PublicKey" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - }, - "import": { - "description": "Imports a new CryptoKeyVersion into an existing CryptoKey using the\nwrapped key material provided in the request.\n\nThe version ID will be assigned the next sequential id within the\nCryptoKey.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions:import", - "httpMethod": "POST", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.import", - "parameterOrder": [ - "parent" - ], - "parameters": { - "parent": { - "description": "Required. The name of the CryptoKey to\nbe imported into.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+parent}/cryptoKeyVersions:import", - "request": { - "$ref": "ImportCryptoKeyVersionRequest" - }, - "response": { - "$ref": "CryptoKeyVersion" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - }, - "list": { - "description": "Lists CryptoKeyVersions.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions", - "httpMethod": "GET", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.list", - "parameterOrder": [ - "parent" - ], - "parameters": { - "filter": { - "description": "Optional. Only include resources that match the filter in the response. For\nmore information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", - "location": "query", - "type": "string" - }, - "orderBy": { - "description": "Optional. Specify how the results should be sorted. If not specified, the\nresults will be sorted in the default order. For more information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", - "location": "query", - "type": "string" - }, - "pageSize": { - "description": "Optional. Optional limit on the number of CryptoKeyVersions to\ninclude in the response. Further CryptoKeyVersions can\nsubsequently be obtained by including the\nListCryptoKeyVersionsResponse.next_page_token in a subsequent request.\nIf unspecified, the server will pick an appropriate default.", - "format": "int32", - "location": "query", - "type": "integer" - }, - "pageToken": { - "description": "Optional. Optional pagination token, returned earlier via\nListCryptoKeyVersionsResponse.next_page_token.", - "location": "query", - "type": "string" - }, - "parent": { - "description": "Required. The resource name of the CryptoKey to list, in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*`.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - "required": true, - "type": "string" - }, - "view": { - "description": "The fields to include in the response.", - "enum": [ - "CRYPTO_KEY_VERSION_VIEW_UNSPECIFIED", - "FULL" - ], - "location": "query", - "type": "string" - } - }, - "path": "v1/{+parent}/cryptoKeyVersions", - "response": { - "$ref": "ListCryptoKeyVersionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - }, - "patch": { - "description": "Update a CryptoKeyVersion's metadata.\n\nstate may be changed between\nENABLED and\nDISABLED using this\nmethod. See DestroyCryptoKeyVersion and RestoreCryptoKeyVersion to\nmove between other states.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}", - "httpMethod": "PATCH", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.patch", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Output only. The resource name for this CryptoKeyVersion in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*`.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", - "required": true, - "type": "string" - }, - "updateMask": { - "description": "Required. List of fields to be updated in this request.", - "format": "google-fieldmask", - "location": "query", - "type": "string" - } - }, - "path": "v1/{+name}", - "request": { - "$ref": "CryptoKeyVersion" - }, - "response": { - "$ref": "CryptoKeyVersion" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - }, - "restore": { - "description": "Restore a CryptoKeyVersion in the\nDESTROY_SCHEDULED\nstate.\n\nUpon restoration of the CryptoKeyVersion, state\nwill be set to DISABLED,\nand destroy_time will be cleared.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}:restore", - "httpMethod": "POST", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.restore", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The resource name of the CryptoKeyVersion to restore.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+name}:restore", - "request": { - "$ref": "RestoreCryptoKeyVersionRequest" - }, - "response": { - "$ref": "CryptoKeyVersion" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - } - } - } - } - }, - "importJobs": { - "methods": { - "create": { - "description": "Create a new ImportJob within a KeyRing.\n\nImportJob.import_method is required.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/importJobs", - "httpMethod": "POST", - "id": "cloudkms.projects.locations.keyRings.importJobs.create", - "parameterOrder": [ - "parent" - ], - "parameters": { - "importJobId": { - "description": "Required. It must be unique within a KeyRing and match the regular\nexpression `[a-zA-Z0-9_-]{1,63}`", - "location": "query", - "type": "string" - }, - "parent": { - "description": "Required. The name of the KeyRing associated with the\nImportJobs.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+parent}/importJobs", - "request": { - "$ref": "ImportJob" - }, - "response": { - "$ref": "ImportJob" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - }, - "get": { - "description": "Returns metadata for a given ImportJob.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/importJobs/{importJobsId}", - "httpMethod": "GET", - "id": "cloudkms.projects.locations.keyRings.importJobs.get", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The name of the ImportJob to get.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/importJobs/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+name}", - "response": { - "$ref": "ImportJob" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - }, - "getIamPolicy": { - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/importJobs/{importJobsId}:getIamPolicy", - "httpMethod": "GET", - "id": "cloudkms.projects.locations.keyRings.importJobs.getIamPolicy", - "parameterOrder": [ - "resource" - ], - "parameters": { - "options.requestedPolicyVersion": { - "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.", - "format": "int32", - "location": "query", - "type": "integer" - }, - "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/importJobs/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+resource}:getIamPolicy", - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - }, - "list": { - "description": "Lists ImportJobs.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/importJobs", - "httpMethod": "GET", - "id": "cloudkms.projects.locations.keyRings.importJobs.list", - "parameterOrder": [ - "parent" - ], - "parameters": { - "filter": { - "description": "Optional. Only include resources that match the filter in the response. For\nmore information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", - "location": "query", - "type": "string" - }, - "orderBy": { - "description": "Optional. Specify how the results should be sorted. If not specified, the\nresults will be sorted in the default order. For more information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", - "location": "query", - "type": "string" - }, - "pageSize": { - "description": "Optional. Optional limit on the number of ImportJobs to include in the\nresponse. Further ImportJobs can subsequently be obtained by\nincluding the ListImportJobsResponse.next_page_token in a subsequent\nrequest. If unspecified, the server will pick an appropriate default.", - "format": "int32", - "location": "query", - "type": "integer" - }, - "pageToken": { - "description": "Optional. Optional pagination token, returned earlier via\nListImportJobsResponse.next_page_token.", - "location": "query", - "type": "string" - }, - "parent": { - "description": "Required. The resource name of the KeyRing to list, in the format\n`projects/*/locations/*/keyRings/*`.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+parent}/importJobs", - "response": { - "$ref": "ListImportJobsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - }, - "setIamPolicy": { - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/importJobs/{importJobsId}:setIamPolicy", - "httpMethod": "POST", - "id": "cloudkms.projects.locations.keyRings.importJobs.setIamPolicy", - "parameterOrder": [ - "resource" - ], - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/importJobs/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+resource}:setIamPolicy", - "request": { - "$ref": "SetIamPolicyRequest" - }, - "response": { - "$ref": "Policy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/importJobs/{importJobsId}:testIamPermissions", - "httpMethod": "POST", - "id": "cloudkms.projects.locations.keyRings.importJobs.testIamPermissions", - "parameterOrder": [ - "resource" - ], - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", - "location": "path", - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/importJobs/[^/]+$", - "required": true, - "type": "string" - } - }, - "path": "v1/{+resource}:testIamPermissions", - "request": { - "$ref": "TestIamPermissionsRequest" - }, - "response": { - "$ref": "TestIamPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms" - ] - } - } - } - } - } - } - } - } - } - }, - "revision": "20190926", - "rootUrl": "https://cloudkms.googleapis.com/", - "schemas": { - "AsymmetricDecryptRequest": { - "description": "Request message for KeyManagementService.AsymmetricDecrypt.", - "id": "AsymmetricDecryptRequest", - "properties": { - "ciphertext": { - "description": "Required. The data encrypted with the named CryptoKeyVersion's public\nkey using OAEP.", - "format": "byte", - "type": "string" - } - }, - "type": "object" - }, - "AsymmetricDecryptResponse": { - "description": "Response message for KeyManagementService.AsymmetricDecrypt.", - "id": "AsymmetricDecryptResponse", - "properties": { - "plaintext": { - "description": "The decrypted data originally encrypted with the matching public key.", - "format": "byte", - "type": "string" - } - }, - "type": "object" - }, - "AsymmetricSignRequest": { - "description": "Request message for KeyManagementService.AsymmetricSign.", - "id": "AsymmetricSignRequest", - "properties": { - "digest": { - "$ref": "Digest", - "description": "Required. The digest of the data to sign. The digest must be produced with\nthe same digest algorithm as specified by the key version's\nalgorithm." - } - }, - "type": "object" - }, - "AsymmetricSignResponse": { - "description": "Response message for KeyManagementService.AsymmetricSign.", - "id": "AsymmetricSignResponse", - "properties": { - "signature": { - "description": "The created signature.", - "format": "byte", - "type": "string" - } - }, - "type": "object" - }, - "AuditConfig": { - "description": "Specifies the audit configuration for a service.\nThe configuration determines which permission types are logged, and what\nidentities, if any, are exempted from logging.\nAn AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service,\nthe union of the two AuditConfigs is used for that service: the log_types\nspecified in each AuditConfig are enabled, and the exempted_members in each\nAuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n {\n \"audit_configs\": [\n {\n \"service\": \"allServices\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n },\n {\n \"log_type\": \"ADMIN_READ\",\n }\n ]\n },\n {\n \"service\": \"sampleservice.googleapis.com\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n },\n {\n \"log_type\": \"DATA_WRITE\",\n \"exempted_members\": [\n \"user:aliya@example.com\"\n ]\n }\n ]\n }\n ]\n }\n\nFor sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ\nlogging. It also exempts jose@example.com from DATA_READ logging, and\naliya@example.com from DATA_WRITE logging.", - "id": "AuditConfig", - "properties": { - "auditLogConfigs": { - "description": "The configuration for logging of each type of permission.", - "items": { - "$ref": "AuditLogConfig" - }, - "type": "array" - }, - "service": { - "description": "Specifies a service that will be enabled for audit logging.\nFor example, `storage.googleapis.com`, `cloudsql.googleapis.com`.\n`allServices` is a special value that covers all services.", - "type": "string" - } - }, - "type": "object" - }, - "AuditLogConfig": { - "description": "Provides the configuration for logging a type of permissions.\nExample:\n\n {\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n }\n ]\n }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting\njose@example.com from DATA_READ logging.", - "id": "AuditLogConfig", - "properties": { - "exemptedMembers": { - "description": "Specifies the identities that do not cause logging for this type of\npermission.\nFollows the same format of Binding.members.", - "items": { - "type": "string" - }, - "type": "array" - }, - "logType": { - "description": "The log type that this config enables.", - "enum": [ - "LOG_TYPE_UNSPECIFIED", - "ADMIN_READ", - "DATA_WRITE", - "DATA_READ" - ], - "enumDescriptions": [ - "Default case. Should never be this.", - "Admin reads. Example: CloudIAM getIamPolicy", - "Data writes. Example: CloudSQL Users create", - "Data reads. Example: CloudSQL Users list" - ], - "type": "string" - } - }, - "type": "object" - }, - "Binding": { - "description": "Associates `members` with a `role`.", - "id": "Binding", - "properties": { - "condition": { - "$ref": "Expr", - "description": "The condition that is associated with this binding.\nNOTE: An unsatisfied condition will not allow user access via current\nbinding. Different bindings, including their conditions, are examined\nindependently." - }, - "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@example.com` .\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n\n* `domain:{domain}`: The G Suite domain (primary) that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "role": { - "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.", - "type": "string" - } - }, - "type": "object" - }, - "CryptoKey": { - "description": "A CryptoKey represents a logical key that can be used for cryptographic\noperations.\n\nA CryptoKey is made up of one or more versions, which\nrepresent the actual key material used in cryptographic operations.", - "id": "CryptoKey", - "properties": { - "createTime": { - "description": "Output only. The time at which this CryptoKey was created.", - "format": "google-datetime", - "type": "string" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "Labels with user-defined metadata. For more information, see\n[Labeling Keys](/kms/docs/labeling-keys).", - "type": "object" - }, - "name": { - "description": "Output only. The resource name for this CryptoKey in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*`.", - "type": "string" - }, - "nextRotationTime": { - "description": "At next_rotation_time, the Key Management Service will automatically:\n\n1. Create a new version of this CryptoKey.\n2. Mark the new version as primary.\n\nKey rotations performed manually via\nCreateCryptoKeyVersion and\nUpdateCryptoKeyPrimaryVersion\ndo not affect next_rotation_time.\n\nKeys with purpose\nENCRYPT_DECRYPT support\nautomatic rotation. For other keys, this field must be omitted.", - "format": "google-datetime", - "type": "string" - }, - "primary": { - "$ref": "CryptoKeyVersion", - "description": "Output only. A copy of the \"primary\" CryptoKeyVersion that will be used\nby Encrypt when this CryptoKey is given\nin EncryptRequest.name.\n\nThe CryptoKey's primary version can be updated via\nUpdateCryptoKeyPrimaryVersion.\n\nAll keys with purpose\nENCRYPT_DECRYPT have a\nprimary. For other keys, this field will be omitted." - }, - "purpose": { - "description": "Immutable. The immutable purpose of this CryptoKey.", - "enum": [ - "CRYPTO_KEY_PURPOSE_UNSPECIFIED", - "ENCRYPT_DECRYPT", - "ASYMMETRIC_SIGN", - "ASYMMETRIC_DECRYPT" - ], - "enumDescriptions": [ - "Not specified.", - "CryptoKeys with this purpose may be used with\nEncrypt and\nDecrypt.", - "CryptoKeys with this purpose may be used with\nAsymmetricSign and\nGetPublicKey.", - "CryptoKeys with this purpose may be used with\nAsymmetricDecrypt and\nGetPublicKey." - ], - "type": "string" - }, - "rotationPeriod": { - "description": "next_rotation_time will be advanced by this period when the service\nautomatically rotates a key. Must be at least 24 hours and at most\n876,000 hours.\n\nIf rotation_period is set, next_rotation_time must also be set.\n\nKeys with purpose\nENCRYPT_DECRYPT support\nautomatic rotation. For other keys, this field must be omitted.", - "format": "google-duration", - "type": "string" - }, - "versionTemplate": { - "$ref": "CryptoKeyVersionTemplate", - "description": "A template describing settings for new CryptoKeyVersion instances.\nThe properties of new CryptoKeyVersion instances created by either\nCreateCryptoKeyVersion or\nauto-rotation are controlled by this template." - } - }, - "type": "object" - }, - "CryptoKeyVersion": { - "description": "A CryptoKeyVersion represents an individual cryptographic key, and the\nassociated key material.\n\nAn ENABLED version can be\nused for cryptographic operations.\n\nFor security reasons, the raw cryptographic key material represented by a\nCryptoKeyVersion can never be viewed or exported. It can only be used to\nencrypt, decrypt, or sign data when an authorized user or application invokes\nCloud KMS.", - "id": "CryptoKeyVersion", - "properties": { - "algorithm": { - "description": "Output only. The CryptoKeyVersionAlgorithm that this\nCryptoKeyVersion supports.", - "enum": [ - "CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED", - "GOOGLE_SYMMETRIC_ENCRYPTION", - "RSA_SIGN_PSS_2048_SHA256", - "RSA_SIGN_PSS_3072_SHA256", - "RSA_SIGN_PSS_4096_SHA256", - "RSA_SIGN_PSS_4096_SHA512", - "RSA_SIGN_PKCS1_2048_SHA256", - "RSA_SIGN_PKCS1_3072_SHA256", - "RSA_SIGN_PKCS1_4096_SHA256", - "RSA_SIGN_PKCS1_4096_SHA512", - "RSA_DECRYPT_OAEP_2048_SHA256", - "RSA_DECRYPT_OAEP_3072_SHA256", - "RSA_DECRYPT_OAEP_4096_SHA256", - "RSA_DECRYPT_OAEP_4096_SHA512", - "EC_SIGN_P256_SHA256", - "EC_SIGN_P384_SHA384" - ], - "enumDescriptions": [ - "Not specified.", - "Creates symmetric encryption keys.", - "RSASSA-PSS 2048 bit key with a SHA256 digest.", - "RSASSA-PSS 3072 bit key with a SHA256 digest.", - "RSASSA-PSS 4096 bit key with a SHA256 digest.", - "RSASSA-PSS 4096 bit key with a SHA512 digest.", - "RSASSA-PKCS1-v1_5 with a 2048 bit key and a SHA256 digest.", - "RSASSA-PKCS1-v1_5 with a 3072 bit key and a SHA256 digest.", - "RSASSA-PKCS1-v1_5 with a 4096 bit key and a SHA256 digest.", - "RSASSA-PKCS1-v1_5 with a 4096 bit key and a SHA512 digest.", - "RSAES-OAEP 2048 bit key with a SHA256 digest.", - "RSAES-OAEP 3072 bit key with a SHA256 digest.", - "RSAES-OAEP 4096 bit key with a SHA256 digest.", - "RSAES-OAEP 4096 bit key with a SHA512 digest.", - "ECDSA on the NIST P-256 curve with a SHA256 digest.", - "ECDSA on the NIST P-384 curve with a SHA384 digest." - ], - "type": "string" - }, - "attestation": { - "$ref": "KeyOperationAttestation", - "description": "Output only. Statement that was generated and signed by the HSM at key\ncreation time. Use this statement to verify attributes of the key as stored\non the HSM, independently of Google. Only provided for key versions with\nprotection_level HSM." - }, - "createTime": { - "description": "Output only. The time at which this CryptoKeyVersion was created.", - "format": "google-datetime", - "type": "string" - }, - "destroyEventTime": { - "description": "Output only. The time this CryptoKeyVersion's key material was\ndestroyed. Only present if state is\nDESTROYED.", - "format": "google-datetime", - "type": "string" - }, - "destroyTime": { - "description": "Output only. The time this CryptoKeyVersion's key material is scheduled\nfor destruction. Only present if state is\nDESTROY_SCHEDULED.", - "format": "google-datetime", - "type": "string" - }, - "generateTime": { - "description": "Output only. The time this CryptoKeyVersion's key material was\ngenerated.", - "format": "google-datetime", - "type": "string" - }, - "importFailureReason": { - "description": "Output only. The root cause of an import failure. Only present if\nstate is\nIMPORT_FAILED.", - "type": "string" - }, - "importJob": { - "description": "Output only. The name of the ImportJob used to import this\nCryptoKeyVersion. Only present if the underlying key material was\nimported.", - "type": "string" - }, - "importTime": { - "description": "Output only. The time at which this CryptoKeyVersion's key material\nwas imported.", - "format": "google-datetime", - "type": "string" - }, - "name": { - "description": "Output only. The resource name for this CryptoKeyVersion in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*`.", - "type": "string" - }, - "protectionLevel": { - "description": "Output only. The ProtectionLevel describing how crypto operations are\nperformed with this CryptoKeyVersion.", - "enum": [ - "PROTECTION_LEVEL_UNSPECIFIED", - "SOFTWARE", - "HSM" - ], - "enumDescriptions": [ - "Not specified.", - "Crypto operations are performed in software.", - "Crypto operations are performed in a Hardware Security Module." - ], - "type": "string" - }, - "state": { - "description": "The current state of the CryptoKeyVersion.", - "enum": [ - "CRYPTO_KEY_VERSION_STATE_UNSPECIFIED", - "PENDING_GENERATION", - "ENABLED", - "DISABLED", - "DESTROYED", - "DESTROY_SCHEDULED", - "PENDING_IMPORT", - "IMPORT_FAILED" - ], - "enumDescriptions": [ - "Not specified.", - "This version is still being generated. It may not be used, enabled,\ndisabled, or destroyed yet. Cloud KMS will automatically mark this\nversion ENABLED as soon as the version is ready.", - "This version may be used for cryptographic operations.", - "This version may not be used, but the key material is still available,\nand the version can be placed back into the ENABLED state.", - "This version is destroyed, and the key material is no longer stored.\nA version may not leave this state once entered.", - "This version is scheduled for destruction, and will be destroyed soon.\nCall\nRestoreCryptoKeyVersion\nto put it back into the DISABLED state.", - "This version is still being imported. It may not be used, enabled,\ndisabled, or destroyed yet. Cloud KMS will automatically mark this\nversion ENABLED as soon as the version is ready.", - "This version was not imported successfully. It may not be used, enabled,\ndisabled, or destroyed. The submitted key material has been discarded.\nAdditional details can be found in\nCryptoKeyVersion.import_failure_reason." - ], - "type": "string" - } - }, - "type": "object" - }, - "CryptoKeyVersionTemplate": { - "description": "A CryptoKeyVersionTemplate specifies the properties to use when creating\na new CryptoKeyVersion, either manually with\nCreateCryptoKeyVersion or\nautomatically as a result of auto-rotation.", - "id": "CryptoKeyVersionTemplate", - "properties": { - "algorithm": { - "description": "Required. Algorithm to use\nwhen creating a CryptoKeyVersion based on this template.\n\nFor backwards compatibility, GOOGLE_SYMMETRIC_ENCRYPTION is implied if both\nthis field is omitted and CryptoKey.purpose is\nENCRYPT_DECRYPT.", - "enum": [ - "CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED", - "GOOGLE_SYMMETRIC_ENCRYPTION", - "RSA_SIGN_PSS_2048_SHA256", - "RSA_SIGN_PSS_3072_SHA256", - "RSA_SIGN_PSS_4096_SHA256", - "RSA_SIGN_PSS_4096_SHA512", - "RSA_SIGN_PKCS1_2048_SHA256", - "RSA_SIGN_PKCS1_3072_SHA256", - "RSA_SIGN_PKCS1_4096_SHA256", - "RSA_SIGN_PKCS1_4096_SHA512", - "RSA_DECRYPT_OAEP_2048_SHA256", - "RSA_DECRYPT_OAEP_3072_SHA256", - "RSA_DECRYPT_OAEP_4096_SHA256", - "RSA_DECRYPT_OAEP_4096_SHA512", - "EC_SIGN_P256_SHA256", - "EC_SIGN_P384_SHA384" - ], - "enumDescriptions": [ - "Not specified.", - "Creates symmetric encryption keys.", - "RSASSA-PSS 2048 bit key with a SHA256 digest.", - "RSASSA-PSS 3072 bit key with a SHA256 digest.", - "RSASSA-PSS 4096 bit key with a SHA256 digest.", - "RSASSA-PSS 4096 bit key with a SHA512 digest.", - "RSASSA-PKCS1-v1_5 with a 2048 bit key and a SHA256 digest.", - "RSASSA-PKCS1-v1_5 with a 3072 bit key and a SHA256 digest.", - "RSASSA-PKCS1-v1_5 with a 4096 bit key and a SHA256 digest.", - "RSASSA-PKCS1-v1_5 with a 4096 bit key and a SHA512 digest.", - "RSAES-OAEP 2048 bit key with a SHA256 digest.", - "RSAES-OAEP 3072 bit key with a SHA256 digest.", - "RSAES-OAEP 4096 bit key with a SHA256 digest.", - "RSAES-OAEP 4096 bit key with a SHA512 digest.", - "ECDSA on the NIST P-256 curve with a SHA256 digest.", - "ECDSA on the NIST P-384 curve with a SHA384 digest." - ], - "type": "string" - }, - "protectionLevel": { - "description": "ProtectionLevel to use when creating a CryptoKeyVersion based on\nthis template. Immutable. Defaults to SOFTWARE.", - "enum": [ - "PROTECTION_LEVEL_UNSPECIFIED", - "SOFTWARE", - "HSM" - ], - "enumDescriptions": [ - "Not specified.", - "Crypto operations are performed in software.", - "Crypto operations are performed in a Hardware Security Module." - ], - "type": "string" - } - }, - "type": "object" - }, - "DecryptRequest": { - "description": "Request message for KeyManagementService.Decrypt.", - "id": "DecryptRequest", - "properties": { - "additionalAuthenticatedData": { - "description": "Optional. Optional data that must match the data originally supplied in\nEncryptRequest.additional_authenticated_data.", - "format": "byte", - "type": "string" - }, - "ciphertext": { - "description": "Required. The encrypted data originally returned in\nEncryptResponse.ciphertext.", - "format": "byte", - "type": "string" - } - }, - "type": "object" - }, - "DecryptResponse": { - "description": "Response message for KeyManagementService.Decrypt.", - "id": "DecryptResponse", - "properties": { - "plaintext": { - "description": "The decrypted data originally supplied in EncryptRequest.plaintext.", - "format": "byte", - "type": "string" - } - }, - "type": "object" - }, - "DestroyCryptoKeyVersionRequest": { - "description": "Request message for KeyManagementService.DestroyCryptoKeyVersion.", - "id": "DestroyCryptoKeyVersionRequest", - "properties": {}, - "type": "object" - }, - "Digest": { - "description": "A Digest holds a cryptographic message digest.", - "id": "Digest", - "properties": { - "sha256": { - "description": "A message digest produced with the SHA-256 algorithm.", - "format": "byte", - "type": "string" - }, - "sha384": { - "description": "A message digest produced with the SHA-384 algorithm.", - "format": "byte", - "type": "string" - }, - "sha512": { - "description": "A message digest produced with the SHA-512 algorithm.", - "format": "byte", - "type": "string" - } - }, - "type": "object" - }, - "EncryptRequest": { - "description": "Request message for KeyManagementService.Encrypt.", - "id": "EncryptRequest", - "properties": { - "additionalAuthenticatedData": { - "description": "Optional. Optional data that, if specified, must also be provided during decryption\nthrough DecryptRequest.additional_authenticated_data.\n\nThe maximum size depends on the key version's\nprotection_level. For\nSOFTWARE keys, the AAD must be no larger than\n64KiB. For HSM keys, the combined length of the\nplaintext and additional_authenticated_data fields must be no larger than\n8KiB.", - "format": "byte", - "type": "string" - }, - "plaintext": { - "description": "Required. The data to encrypt. Must be no larger than 64KiB.\n\nThe maximum size depends on the key version's\nprotection_level. For\nSOFTWARE keys, the plaintext must be no larger\nthan 64KiB. For HSM keys, the combined length of the\nplaintext and additional_authenticated_data fields must be no larger than\n8KiB.", - "format": "byte", - "type": "string" - } - }, - "type": "object" - }, - "EncryptResponse": { - "description": "Response message for KeyManagementService.Encrypt.", - "id": "EncryptResponse", - "properties": { - "ciphertext": { - "description": "The encrypted data.", - "format": "byte", - "type": "string" - }, - "name": { - "description": "The resource name of the CryptoKeyVersion used in encryption. Check\nthis field to verify that the intended resource was used for encryption.", - "type": "string" - } - }, - "type": "object" - }, - "Expr": { - "description": "Represents an expression text. Example:\n\n title: \"User account presence\"\n description: \"Determines whether the request has a user account\"\n expression: \"size(request.user) \u003e 0\"", - "id": "Expr", - "properties": { - "description": { - "description": "An optional description of the expression. This is a longer text which\ndescribes the expression, e.g. when hovered over it in a UI.", - "type": "string" - }, - "expression": { - "description": "Textual representation of an expression in\nCommon Expression Language syntax.\n\nThe application context of the containing message determines which\nwell-known feature set of CEL is supported.", - "type": "string" - }, - "location": { - "description": "An optional string indicating the location of the expression for error\nreporting, e.g. a file name and a position in the file.", - "type": "string" - }, - "title": { - "description": "An optional title for the expression, i.e. a short string describing\nits purpose. This can be used e.g. in UIs which allow to enter the\nexpression.", - "type": "string" - } - }, - "type": "object" - }, - "ImportCryptoKeyVersionRequest": { - "description": "Request message for KeyManagementService.ImportCryptoKeyVersion.", - "id": "ImportCryptoKeyVersionRequest", - "properties": { - "algorithm": { - "description": "Required. The algorithm of\nthe key being imported. This does not need to match the\nversion_template of the CryptoKey this\nversion imports into.", - "enum": [ - "CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED", - "GOOGLE_SYMMETRIC_ENCRYPTION", - "RSA_SIGN_PSS_2048_SHA256", - "RSA_SIGN_PSS_3072_SHA256", - "RSA_SIGN_PSS_4096_SHA256", - "RSA_SIGN_PSS_4096_SHA512", - "RSA_SIGN_PKCS1_2048_SHA256", - "RSA_SIGN_PKCS1_3072_SHA256", - "RSA_SIGN_PKCS1_4096_SHA256", - "RSA_SIGN_PKCS1_4096_SHA512", - "RSA_DECRYPT_OAEP_2048_SHA256", - "RSA_DECRYPT_OAEP_3072_SHA256", - "RSA_DECRYPT_OAEP_4096_SHA256", - "RSA_DECRYPT_OAEP_4096_SHA512", - "EC_SIGN_P256_SHA256", - "EC_SIGN_P384_SHA384" - ], - "enumDescriptions": [ - "Not specified.", - "Creates symmetric encryption keys.", - "RSASSA-PSS 2048 bit key with a SHA256 digest.", - "RSASSA-PSS 3072 bit key with a SHA256 digest.", - "RSASSA-PSS 4096 bit key with a SHA256 digest.", - "RSASSA-PSS 4096 bit key with a SHA512 digest.", - "RSASSA-PKCS1-v1_5 with a 2048 bit key and a SHA256 digest.", - "RSASSA-PKCS1-v1_5 with a 3072 bit key and a SHA256 digest.", - "RSASSA-PKCS1-v1_5 with a 4096 bit key and a SHA256 digest.", - "RSASSA-PKCS1-v1_5 with a 4096 bit key and a SHA512 digest.", - "RSAES-OAEP 2048 bit key with a SHA256 digest.", - "RSAES-OAEP 3072 bit key with a SHA256 digest.", - "RSAES-OAEP 4096 bit key with a SHA256 digest.", - "RSAES-OAEP 4096 bit key with a SHA512 digest.", - "ECDSA on the NIST P-256 curve with a SHA256 digest.", - "ECDSA on the NIST P-384 curve with a SHA384 digest." - ], - "type": "string" - }, - "importJob": { - "description": "Required. The name of the ImportJob that was used to\nwrap this key material.", - "type": "string" - }, - "rsaAesWrappedKey": { - "description": "Wrapped key material produced with\nRSA_OAEP_3072_SHA1_AES_256\nor\nRSA_OAEP_4096_SHA1_AES_256.\n\nThis field contains the concatenation of two wrapped keys:\n\u003col\u003e\n \u003cli\u003eAn ephemeral AES-256 wrapping key wrapped with the\n public_key using RSAES-OAEP with SHA-1,\n MGF1 with SHA-1, and an empty label.\n \u003c/li\u003e\n \u003cli\u003eThe key to be imported, wrapped with the ephemeral AES-256 key\n using AES-KWP (RFC 5649).\n \u003c/li\u003e\n\u003c/ol\u003e\n\nIf importing symmetric key material, it is expected that the unwrapped\nkey contains plain bytes. If importing asymmetric key material, it is\nexpected that the unwrapped key is in PKCS#8-encoded DER format (the\nPrivateKeyInfo structure from RFC 5208).\n\nThis format is the same as the format produced by PKCS#11 mechanism\nCKM_RSA_AES_KEY_WRAP.", - "format": "byte", - "type": "string" - } - }, - "type": "object" - }, - "ImportJob": { - "description": "An ImportJob can be used to create CryptoKeys and\nCryptoKeyVersions using pre-existing key material,\ngenerated outside of Cloud KMS.\n\nWhen an ImportJob is created, Cloud KMS will generate a \"wrapping key\",\nwhich is a public/private key pair. You use the wrapping key to encrypt (also\nknown as wrap) the pre-existing key material to protect it during the import\nprocess. The nature of the wrapping key depends on the choice of\nimport_method. When the wrapping key generation\nis complete, the state will be set to\nACTIVE and the public_key\ncan be fetched. The fetched public key can then be used to wrap your\npre-existing key material.\n\nOnce the key material is wrapped, it can be imported into a new\nCryptoKeyVersion in an existing CryptoKey by calling\nImportCryptoKeyVersion.\nMultiple CryptoKeyVersions can be imported with a single\nImportJob. Cloud KMS uses the private key portion of the wrapping key to\nunwrap the key material. Only Cloud KMS has access to the private key.\n\nAn ImportJob expires 3 days after it is created. Once expired, Cloud KMS\nwill no longer be able to import or unwrap any key material that was wrapped\nwith the ImportJob's public key.\n\nFor more information, see\n[Importing a key](https://cloud.google.com/kms/docs/importing-a-key).", - "id": "ImportJob", - "properties": { - "attestation": { - "$ref": "KeyOperationAttestation", - "description": "Output only. Statement that was generated and signed by the key creator\n(for example, an HSM) at key creation time. Use this statement to verify\nattributes of the key as stored on the HSM, independently of Google.\nOnly present if the chosen ImportMethod is one with a protection\nlevel of HSM." - }, - "createTime": { - "description": "Output only. The time at which this ImportJob was created.", - "format": "google-datetime", - "type": "string" - }, - "expireEventTime": { - "description": "Output only. The time this ImportJob expired. Only present if\nstate is EXPIRED.", - "format": "google-datetime", - "type": "string" - }, - "expireTime": { - "description": "Output only. The time at which this ImportJob is scheduled for\nexpiration and can no longer be used to import key material.", - "format": "google-datetime", - "type": "string" - }, - "generateTime": { - "description": "Output only. The time this ImportJob's key material was generated.", - "format": "google-datetime", - "type": "string" - }, - "importMethod": { - "description": "Required. Immutable. The wrapping method to be used for incoming key material.", - "enum": [ - "IMPORT_METHOD_UNSPECIFIED", - "RSA_OAEP_3072_SHA1_AES_256", - "RSA_OAEP_4096_SHA1_AES_256" - ], - "enumDescriptions": [ - "Not specified.", - "This ImportMethod represents the CKM_RSA_AES_KEY_WRAP key wrapping\nscheme defined in the PKCS #11 standard. In summary, this involves\nwrapping the raw key with an ephemeral AES key, and wrapping the\nephemeral AES key with a 3072 bit RSA key. For more details, see\n[RSA AES key wrap\nmechanism](http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/cos01/pkcs11-curr-v2.40-cos01.html#_Toc408226908).", - "This ImportMethod represents the CKM_RSA_AES_KEY_WRAP key wrapping\nscheme defined in the PKCS #11 standard. In summary, this involves\nwrapping the raw key with an ephemeral AES key, and wrapping the\nephemeral AES key with a 4096 bit RSA key. For more details, see\n[RSA AES key wrap\nmechanism](http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/cos01/pkcs11-curr-v2.40-cos01.html#_Toc408226908)." - ], - "type": "string" - }, - "name": { - "description": "Output only. The resource name for this ImportJob in the format\n`projects/*/locations/*/keyRings/*/importJobs/*`.", - "type": "string" - }, - "protectionLevel": { - "description": "Required. Immutable. The protection level of the ImportJob. This must match the\nprotection_level of the\nversion_template on the CryptoKey you\nattempt to import into.", - "enum": [ - "PROTECTION_LEVEL_UNSPECIFIED", - "SOFTWARE", - "HSM" - ], - "enumDescriptions": [ - "Not specified.", - "Crypto operations are performed in software.", - "Crypto operations are performed in a Hardware Security Module." - ], - "type": "string" - }, - "publicKey": { - "$ref": "WrappingPublicKey", - "description": "Output only. The public key with which to wrap key material prior to\nimport. Only returned if state is\nACTIVE." - }, - "state": { - "description": "Output only. The current state of the ImportJob, indicating if it can\nbe used.", - "enum": [ - "IMPORT_JOB_STATE_UNSPECIFIED", - "PENDING_GENERATION", - "ACTIVE", - "EXPIRED" - ], - "enumDescriptions": [ - "Not specified.", - "The wrapping key for this job is still being generated. It may not be\nused. Cloud KMS will automatically mark this job as\nACTIVE as soon as the wrapping key is generated.", - "This job may be used in\nCreateCryptoKey and\nCreateCryptoKeyVersion\nrequests.", - "This job can no longer be used and may not leave this state once entered." - ], - "type": "string" - } - }, - "type": "object" - }, - "KeyOperationAttestation": { - "description": "Contains an HSM-generated attestation about a key operation. For more\ninformation, see [Verifying attestations]\n(https://cloud.google.com/kms/docs/attest-key).", - "id": "KeyOperationAttestation", - "properties": { - "content": { - "description": "Output only. The attestation data provided by the HSM when the key\noperation was performed.", - "format": "byte", - "type": "string" - }, - "format": { - "description": "Output only. The format of the attestation data.", - "enum": [ - "ATTESTATION_FORMAT_UNSPECIFIED", - "CAVIUM_V1_COMPRESSED", - "CAVIUM_V2_COMPRESSED" - ], - "enumDescriptions": [ - "Not specified.", - "Cavium HSM attestation compressed with gzip. Note that this format is\ndefined by Cavium and subject to change at any time.", - "Cavium HSM attestation V2 compressed with gzip. This is a new format\nintroduced in Cavium's version 3.2-08." - ], - "type": "string" - } - }, - "type": "object" - }, - "KeyRing": { - "description": "A KeyRing is a toplevel logical grouping of CryptoKeys.", - "id": "KeyRing", - "properties": { - "createTime": { - "description": "Output only. The time at which this KeyRing was created.", - "format": "google-datetime", - "type": "string" - }, - "name": { - "description": "Output only. The resource name for the KeyRing in the format\n`projects/*/locations/*/keyRings/*`.", - "type": "string" - } - }, - "type": "object" - }, - "ListCryptoKeyVersionsResponse": { - "description": "Response message for KeyManagementService.ListCryptoKeyVersions.", - "id": "ListCryptoKeyVersionsResponse", - "properties": { - "cryptoKeyVersions": { - "description": "The list of CryptoKeyVersions.", - "items": { - "$ref": "CryptoKeyVersion" - }, - "type": "array" - }, - "nextPageToken": { - "description": "A token to retrieve next page of results. Pass this value in\nListCryptoKeyVersionsRequest.page_token to retrieve the next page of\nresults.", - "type": "string" - }, - "totalSize": { - "description": "The total number of CryptoKeyVersions that matched the\nquery.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "ListCryptoKeysResponse": { - "description": "Response message for KeyManagementService.ListCryptoKeys.", - "id": "ListCryptoKeysResponse", - "properties": { - "cryptoKeys": { - "description": "The list of CryptoKeys.", - "items": { - "$ref": "CryptoKey" - }, - "type": "array" - }, - "nextPageToken": { - "description": "A token to retrieve next page of results. Pass this value in\nListCryptoKeysRequest.page_token to retrieve the next page of results.", - "type": "string" - }, - "totalSize": { - "description": "The total number of CryptoKeys that matched the query.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "ListImportJobsResponse": { - "description": "Response message for KeyManagementService.ListImportJobs.", - "id": "ListImportJobsResponse", - "properties": { - "importJobs": { - "description": "The list of ImportJobs.", - "items": { - "$ref": "ImportJob" - }, - "type": "array" - }, - "nextPageToken": { - "description": "A token to retrieve next page of results. Pass this value in\nListImportJobsRequest.page_token to retrieve the next page of results.", - "type": "string" - }, - "totalSize": { - "description": "The total number of ImportJobs that matched the query.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "ListKeyRingsResponse": { - "description": "Response message for KeyManagementService.ListKeyRings.", - "id": "ListKeyRingsResponse", - "properties": { - "keyRings": { - "description": "The list of KeyRings.", - "items": { - "$ref": "KeyRing" - }, - "type": "array" - }, - "nextPageToken": { - "description": "A token to retrieve next page of results. Pass this value in\nListKeyRingsRequest.page_token to retrieve the next page of results.", - "type": "string" - }, - "totalSize": { - "description": "The total number of KeyRings that matched the query.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "ListLocationsResponse": { - "description": "The response message for Locations.ListLocations.", - "id": "ListLocationsResponse", - "properties": { - "locations": { - "description": "A list of locations that matches the specified filter in the request.", - "items": { - "$ref": "Location" - }, - "type": "array" - }, - "nextPageToken": { - "description": "The standard List next-page token.", - "type": "string" - } - }, - "type": "object" - }, - "Location": { - "description": "A resource that represents Google Cloud Platform location.", - "id": "Location", - "properties": { - "displayName": { - "description": "The friendly name for this location, typically a nearby city name.\nFor example, \"Tokyo\".", - "type": "string" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "Cross-service attributes for the location. For example\n\n {\"cloud.googleapis.com/region\": \"us-east1\"}", - "type": "object" - }, - "locationId": { - "description": "The canonical id for this location. For example: `\"us-east1\"`.", - "type": "string" - }, - "metadata": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "description": "Service-specific metadata. For example the available capacity at the given\nlocation.", - "type": "object" - }, - "name": { - "description": "Resource name for the location, which may vary between implementations.\nFor example: `\"projects/example-project/locations/us-east1\"`", - "type": "string" - } - }, - "type": "object" - }, - "LocationMetadata": { - "description": "Cloud KMS metadata for the given google.cloud.location.Location.", - "id": "LocationMetadata", - "properties": { - "hsmAvailable": { - "description": "Indicates whether CryptoKeys with\nprotection_level\nHSM can be created in this location.", - "type": "boolean" - } - }, - "type": "object" - }, - "Policy": { - "description": "Defines an Identity and Access Management (IAM) policy. It is used to\nspecify access control policies for Cloud Platform resources.\n\n\nA `Policy` is a collection of `bindings`. A `binding` binds one or more\n`members` to a single `role`. Members can be user accounts, service accounts,\nGoogle groups, and domains (such as G Suite). A `role` is a named list of\npermissions (defined by IAM or configured by users). A `binding` can\noptionally specify a `condition`, which is a logic expression that further\nconstrains the role binding based on attributes about the request and/or\ntarget resource.\n\n**JSON Example**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/resourcemanager.organizationAdmin\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-project-id@appspot.gserviceaccount.com\"\n ]\n },\n {\n \"role\": \"roles/resourcemanager.organizationViewer\",\n \"members\": [\"user:eve@example.com\"],\n \"condition\": {\n \"title\": \"expirable access\",\n \"description\": \"Does not grant access after Sep 2020\",\n \"expression\": \"request.time \u003c\n timestamp('2020-10-01T00:00:00.000Z')\",\n }\n }\n ]\n }\n\n**YAML Example**\n\n bindings:\n - members:\n - user:mike@example.com\n - group:admins@example.com\n - domain:google.com\n - serviceAccount:my-project-id@appspot.gserviceaccount.com\n role: roles/resourcemanager.organizationAdmin\n - members:\n - user:eve@example.com\n role: roles/resourcemanager.organizationViewer\n condition:\n title: expirable access\n description: Does not grant access after Sep 2020\n expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\n\nFor a description of IAM and its features, see the\n[IAM developer's guide](https://cloud.google.com/iam/docs).", - "id": "Policy", - "properties": { - "auditConfigs": { - "description": "Specifies cloud audit logging configuration for this policy.", - "items": { - "$ref": "AuditConfig" - }, - "type": "array" - }, - "bindings": { - "description": "Associates a list of `members` to a `role`. Optionally may specify a\n`condition` that determines when binding is in effect.\n`bindings` with no members will result in an error.", - "items": { - "$ref": "Binding" - }, - "type": "array" - }, - "etag": { - "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing\npolicy is overwritten. Due to blind-set semantics of an etag-less policy,\n'setIamPolicy' will not fail even if either of incoming or stored policy\ndoes not meet the version requirements.", - "format": "byte", - "type": "string" - }, - "version": { - "description": "Specifies the format of the policy.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nOperations affecting conditional bindings must specify version 3. This can\nbe either setting a conditional policy, modifying a conditional binding,\nor removing a conditional binding from the stored conditional policy.\nOperations on non-conditional policies may specify any valid value or\nleave the field unset.\n\nIf no etag is provided in the call to `setIamPolicy`, any version\ncompliance checks on the incoming and/or stored policy is skipped.", - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "PublicKey": { - "description": "The public key for a given CryptoKeyVersion. Obtained via\nGetPublicKey.", - "id": "PublicKey", - "properties": { - "algorithm": { - "description": "The Algorithm associated\nwith this key.", - "enum": [ - "CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED", - "GOOGLE_SYMMETRIC_ENCRYPTION", - "RSA_SIGN_PSS_2048_SHA256", - "RSA_SIGN_PSS_3072_SHA256", - "RSA_SIGN_PSS_4096_SHA256", - "RSA_SIGN_PSS_4096_SHA512", - "RSA_SIGN_PKCS1_2048_SHA256", - "RSA_SIGN_PKCS1_3072_SHA256", - "RSA_SIGN_PKCS1_4096_SHA256", - "RSA_SIGN_PKCS1_4096_SHA512", - "RSA_DECRYPT_OAEP_2048_SHA256", - "RSA_DECRYPT_OAEP_3072_SHA256", - "RSA_DECRYPT_OAEP_4096_SHA256", - "RSA_DECRYPT_OAEP_4096_SHA512", - "EC_SIGN_P256_SHA256", - "EC_SIGN_P384_SHA384" - ], - "enumDescriptions": [ - "Not specified.", - "Creates symmetric encryption keys.", - "RSASSA-PSS 2048 bit key with a SHA256 digest.", - "RSASSA-PSS 3072 bit key with a SHA256 digest.", - "RSASSA-PSS 4096 bit key with a SHA256 digest.", - "RSASSA-PSS 4096 bit key with a SHA512 digest.", - "RSASSA-PKCS1-v1_5 with a 2048 bit key and a SHA256 digest.", - "RSASSA-PKCS1-v1_5 with a 3072 bit key and a SHA256 digest.", - "RSASSA-PKCS1-v1_5 with a 4096 bit key and a SHA256 digest.", - "RSASSA-PKCS1-v1_5 with a 4096 bit key and a SHA512 digest.", - "RSAES-OAEP 2048 bit key with a SHA256 digest.", - "RSAES-OAEP 3072 bit key with a SHA256 digest.", - "RSAES-OAEP 4096 bit key with a SHA256 digest.", - "RSAES-OAEP 4096 bit key with a SHA512 digest.", - "ECDSA on the NIST P-256 curve with a SHA256 digest.", - "ECDSA on the NIST P-384 curve with a SHA384 digest." - ], - "type": "string" - }, - "pem": { - "description": "The public key, encoded in PEM format. For more information, see the\n[RFC 7468](https://tools.ietf.org/html/rfc7468) sections for\n[General Considerations](https://tools.ietf.org/html/rfc7468#section-2) and\n[Textual Encoding of Subject Public Key Info]\n(https://tools.ietf.org/html/rfc7468#section-13).", - "type": "string" - } - }, - "type": "object" - }, - "RestoreCryptoKeyVersionRequest": { - "description": "Request message for KeyManagementService.RestoreCryptoKeyVersion.", - "id": "RestoreCryptoKeyVersionRequest", - "properties": {}, - "type": "object" - }, - "SetIamPolicyRequest": { - "description": "Request message for `SetIamPolicy` method.", - "id": "SetIamPolicyRequest", - "properties": { - "policy": { - "$ref": "Policy", - "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them." - }, - "updateMask": { - "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only\nthe fields in the mask will be modified. If no mask is provided, the\nfollowing default mask is used:\npaths: \"bindings, etag\"\nThis field is only used by Cloud IAM.", - "format": "google-fieldmask", - "type": "string" - } - }, - "type": "object" - }, - "TestIamPermissionsRequest": { - "description": "Request message for `TestIamPermissions` method.", - "id": "TestIamPermissionsRequest", - "properties": { - "permissions": { - "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "TestIamPermissionsResponse": { - "description": "Response message for `TestIamPermissions` method.", - "id": "TestIamPermissionsResponse", - "properties": { - "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "UpdateCryptoKeyPrimaryVersionRequest": { - "description": "Request message for KeyManagementService.UpdateCryptoKeyPrimaryVersion.", - "id": "UpdateCryptoKeyPrimaryVersionRequest", - "properties": { - "cryptoKeyVersionId": { - "description": "Required. The id of the child CryptoKeyVersion to use as primary.", - "type": "string" - } - }, - "type": "object" - }, - "WrappingPublicKey": { - "description": "The public key component of the wrapping key. For details of the type of\nkey this public key corresponds to, see the ImportMethod.", - "id": "WrappingPublicKey", - "properties": { - "pem": { - "description": "The public key, encoded in PEM format. For more information, see the [RFC\n7468](https://tools.ietf.org/html/rfc7468) sections for [General\nConsiderations](https://tools.ietf.org/html/rfc7468#section-2) and\n[Textual Encoding of Subject Public Key Info]\n(https://tools.ietf.org/html/rfc7468#section-13).", - "type": "string" - } - }, - "type": "object" - } - }, - "servicePath": "", - "title": "Cloud Key Management Service (KMS) API", - "version": "v1", - "version_module": true -} \ No newline at end of file diff --git a/vendor/google.golang.org/api/cloudkms/v1/cloudkms-gen.go b/vendor/google.golang.org/api/cloudkms/v1/cloudkms-gen.go deleted file mode 100644 index cf3e70ce0..000000000 --- a/vendor/google.golang.org/api/cloudkms/v1/cloudkms-gen.go +++ /dev/null @@ -1,7819 +0,0 @@ -// Copyright 2019 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated file. DO NOT EDIT. - -// Package cloudkms provides access to the Cloud Key Management Service (KMS) API. -// -// This package is DEPRECATED. Use package cloud.google.com/go/kms/apiv1 instead. -// -// For product documentation, see: https://cloud.google.com/kms/ -// -// Creating a client -// -// Usage example: -// -// import "google.golang.org/api/cloudkms/v1" -// ... -// ctx := context.Background() -// cloudkmsService, err := cloudkms.NewService(ctx) -// -// In this example, Google Application Default Credentials are used for authentication. -// -// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. -// -// Other authentication options -// -// By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: -// -// cloudkmsService, err := cloudkms.NewService(ctx, option.WithScopes(cloudkms.CloudkmsScope)) -// -// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: -// -// cloudkmsService, err := cloudkms.NewService(ctx, option.WithAPIKey("AIza...")) -// -// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: -// -// config := &oauth2.Config{...} -// // ... -// token, err := config.Exchange(ctx, ...) -// cloudkmsService, err := cloudkms.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) -// -// See https://godoc.org/google.golang.org/api/option/ for details on options. -package cloudkms // import "google.golang.org/api/cloudkms/v1" - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "strings" - - googleapi "google.golang.org/api/googleapi" - gensupport "google.golang.org/api/internal/gensupport" - option "google.golang.org/api/option" - htransport "google.golang.org/api/transport/http" -) - -// Always reference these packages, just in case the auto-generated code -// below doesn't. -var _ = bytes.NewBuffer -var _ = strconv.Itoa -var _ = fmt.Sprintf -var _ = json.NewDecoder -var _ = io.Copy -var _ = url.Parse -var _ = gensupport.MarshalJSON -var _ = googleapi.Version -var _ = errors.New -var _ = strings.Replace -var _ = context.Canceled - -const apiId = "cloudkms:v1" -const apiName = "cloudkms" -const apiVersion = "v1" -const basePath = "https://cloudkms.googleapis.com/" - -// OAuth2 scopes used by this API. -const ( - // View and manage your data across Google Cloud Platform services - CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" - - // View and manage your keys and secrets stored in Cloud Key Management - // Service - CloudkmsScope = "https://www.googleapis.com/auth/cloudkms" -) - -// NewService creates a new Service. -func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) { - scopesOption := option.WithScopes( - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms", - ) - // NOTE: prepend, so we don't override user-specified scopes. - opts = append([]option.ClientOption{scopesOption}, opts...) - client, endpoint, err := htransport.NewClient(ctx, opts...) - if err != nil { - return nil, err - } - s, err := New(client) - if err != nil { - return nil, err - } - if endpoint != "" { - s.BasePath = endpoint - } - return s, nil -} - -// New creates a new Service. It uses the provided http.Client for requests. -// -// Deprecated: please use NewService instead. -// To provide a custom HTTP client, use option.WithHTTPClient. -// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead. -func New(client *http.Client) (*Service, error) { - if client == nil { - return nil, errors.New("client is nil") - } - s := &Service{client: client, BasePath: basePath} - s.Projects = NewProjectsService(s) - return s, nil -} - -type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment - - Projects *ProjectsService -} - -func (s *Service) userAgent() string { - if s.UserAgent == "" { - return googleapi.UserAgent - } - return googleapi.UserAgent + " " + s.UserAgent -} - -func NewProjectsService(s *Service) *ProjectsService { - rs := &ProjectsService{s: s} - rs.Locations = NewProjectsLocationsService(s) - return rs -} - -type ProjectsService struct { - s *Service - - Locations *ProjectsLocationsService -} - -func NewProjectsLocationsService(s *Service) *ProjectsLocationsService { - rs := &ProjectsLocationsService{s: s} - rs.KeyRings = NewProjectsLocationsKeyRingsService(s) - return rs -} - -type ProjectsLocationsService struct { - s *Service - - KeyRings *ProjectsLocationsKeyRingsService -} - -func NewProjectsLocationsKeyRingsService(s *Service) *ProjectsLocationsKeyRingsService { - rs := &ProjectsLocationsKeyRingsService{s: s} - rs.CryptoKeys = NewProjectsLocationsKeyRingsCryptoKeysService(s) - rs.ImportJobs = NewProjectsLocationsKeyRingsImportJobsService(s) - return rs -} - -type ProjectsLocationsKeyRingsService struct { - s *Service - - CryptoKeys *ProjectsLocationsKeyRingsCryptoKeysService - - ImportJobs *ProjectsLocationsKeyRingsImportJobsService -} - -func NewProjectsLocationsKeyRingsCryptoKeysService(s *Service) *ProjectsLocationsKeyRingsCryptoKeysService { - rs := &ProjectsLocationsKeyRingsCryptoKeysService{s: s} - rs.CryptoKeyVersions = NewProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService(s) - return rs -} - -type ProjectsLocationsKeyRingsCryptoKeysService struct { - s *Service - - CryptoKeyVersions *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService -} - -func NewProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService(s *Service) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService { - rs := &ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService{s: s} - return rs -} - -type ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService struct { - s *Service -} - -func NewProjectsLocationsKeyRingsImportJobsService(s *Service) *ProjectsLocationsKeyRingsImportJobsService { - rs := &ProjectsLocationsKeyRingsImportJobsService{s: s} - return rs -} - -type ProjectsLocationsKeyRingsImportJobsService struct { - s *Service -} - -// AsymmetricDecryptRequest: Request message for -// KeyManagementService.AsymmetricDecrypt. -type AsymmetricDecryptRequest struct { - // Ciphertext: Required. The data encrypted with the named - // CryptoKeyVersion's public - // key using OAEP. - Ciphertext string `json:"ciphertext,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Ciphertext") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Ciphertext") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AsymmetricDecryptRequest) MarshalJSON() ([]byte, error) { - type NoMethod AsymmetricDecryptRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// AsymmetricDecryptResponse: Response message for -// KeyManagementService.AsymmetricDecrypt. -type AsymmetricDecryptResponse struct { - // Plaintext: The decrypted data originally encrypted with the matching - // public key. - Plaintext string `json:"plaintext,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Plaintext") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Plaintext") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AsymmetricDecryptResponse) MarshalJSON() ([]byte, error) { - type NoMethod AsymmetricDecryptResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// AsymmetricSignRequest: Request message for -// KeyManagementService.AsymmetricSign. -type AsymmetricSignRequest struct { - // Digest: Required. The digest of the data to sign. The digest must be - // produced with - // the same digest algorithm as specified by the key - // version's - // algorithm. - Digest *Digest `json:"digest,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Digest") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Digest") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AsymmetricSignRequest) MarshalJSON() ([]byte, error) { - type NoMethod AsymmetricSignRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// AsymmetricSignResponse: Response message for -// KeyManagementService.AsymmetricSign. -type AsymmetricSignResponse struct { - // Signature: The created signature. - Signature string `json:"signature,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Signature") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Signature") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AsymmetricSignResponse) MarshalJSON() ([]byte, error) { - type NoMethod AsymmetricSignResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// AuditConfig: Specifies the audit configuration for a service. -// The configuration determines which permission types are logged, and -// what -// identities, if any, are exempted from logging. -// An AuditConfig must have one or more AuditLogConfigs. -// -// If there are AuditConfigs for both `allServices` and a specific -// service, -// the union of the two AuditConfigs is used for that service: the -// log_types -// specified in each AuditConfig are enabled, and the exempted_members -// in each -// AuditLogConfig are exempted. -// -// Example Policy with multiple AuditConfigs: -// -// { -// "audit_configs": [ -// { -// "service": "allServices" -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE", -// }, -// { -// "log_type": "ADMIN_READ", -// } -// ] -// }, -// { -// "service": "sampleservice.googleapis.com" -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// }, -// { -// "log_type": "DATA_WRITE", -// "exempted_members": [ -// "user:aliya@example.com" -// ] -// } -// ] -// } -// ] -// } -// -// For sampleservice, this policy enables DATA_READ, DATA_WRITE and -// ADMIN_READ -// logging. It also exempts jose@example.com from DATA_READ logging, -// and -// aliya@example.com from DATA_WRITE logging. -type AuditConfig struct { - // AuditLogConfigs: The configuration for logging of each type of - // permission. - AuditLogConfigs []*AuditLogConfig `json:"auditLogConfigs,omitempty"` - - // Service: Specifies a service that will be enabled for audit - // logging. - // For example, `storage.googleapis.com`, - // `cloudsql.googleapis.com`. - // `allServices` is a special value that covers all services. - Service string `json:"service,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AuditLogConfigs") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AuditLogConfigs") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *AuditConfig) MarshalJSON() ([]byte, error) { - type NoMethod AuditConfig - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// AuditLogConfig: Provides the configuration for logging a type of -// permissions. -// Example: -// -// { -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE", -// } -// ] -// } -// -// This enables 'DATA_READ' and 'DATA_WRITE' logging, while -// exempting -// jose@example.com from DATA_READ logging. -type AuditLogConfig struct { - // ExemptedMembers: Specifies the identities that do not cause logging - // for this type of - // permission. - // Follows the same format of Binding.members. - ExemptedMembers []string `json:"exemptedMembers,omitempty"` - - // LogType: The log type that this config enables. - // - // Possible values: - // "LOG_TYPE_UNSPECIFIED" - Default case. Should never be this. - // "ADMIN_READ" - Admin reads. Example: CloudIAM getIamPolicy - // "DATA_WRITE" - Data writes. Example: CloudSQL Users create - // "DATA_READ" - Data reads. Example: CloudSQL Users list - LogType string `json:"logType,omitempty"` - - // ForceSendFields is a list of field names (e.g. "ExemptedMembers") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ExemptedMembers") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { - type NoMethod AuditLogConfig - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Binding: Associates `members` with a `role`. -type Binding struct { - // Condition: The condition that is associated with this binding. - // NOTE: An unsatisfied condition will not allow user access via - // current - // binding. Different bindings, including their conditions, are - // examined - // independently. - Condition *Expr `json:"condition,omitempty"` - - // Members: Specifies the identities requesting access for a Cloud - // Platform resource. - // `members` can have the following values: - // - // * `allUsers`: A special identifier that represents anyone who is - // on the internet; with or without a Google account. - // - // * `allAuthenticatedUsers`: A special identifier that represents - // anyone - // who is authenticated with a Google account or a service - // account. - // - // * `user:{emailid}`: An email address that represents a specific - // Google - // account. For example, `alice@example.com` . - // - // - // * `serviceAccount:{emailid}`: An email address that represents a - // service - // account. For example, - // `my-other-app@appspot.gserviceaccount.com`. - // - // * `group:{emailid}`: An email address that represents a Google - // group. - // For example, `admins@example.com`. - // - // - // * `domain:{domain}`: The G Suite domain (primary) that represents all - // the - // users of that domain. For example, `google.com` or - // `example.com`. - // - // - Members []string `json:"members,omitempty"` - - // Role: Role that is assigned to `members`. - // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. - Role string `json:"role,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Condition") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Condition") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Binding) MarshalJSON() ([]byte, error) { - type NoMethod Binding - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// CryptoKey: A CryptoKey represents a logical key that can be used for -// cryptographic -// operations. -// -// A CryptoKey is made up of one or more versions, which -// represent the actual key material used in cryptographic operations. -type CryptoKey struct { - // CreateTime: Output only. The time at which this CryptoKey was - // created. - CreateTime string `json:"createTime,omitempty"` - - // Labels: Labels with user-defined metadata. For more information, - // see - // [Labeling Keys](/kms/docs/labeling-keys). - Labels map[string]string `json:"labels,omitempty"` - - // Name: Output only. The resource name for this CryptoKey in the - // format - // `projects/*/locations/*/keyRings/*/cryptoKeys/*`. - Name string `json:"name,omitempty"` - - // NextRotationTime: At next_rotation_time, the Key Management Service - // will automatically: - // - // 1. Create a new version of this CryptoKey. - // 2. Mark the new version as primary. - // - // Key rotations performed manually via - // CreateCryptoKeyVersion and - // UpdateCryptoKeyPrimaryVersion - // do not affect next_rotation_time. - // - // Keys with purpose - // ENCRYPT_DECRYPT support - // automatic rotation. For other keys, this field must be omitted. - NextRotationTime string `json:"nextRotationTime,omitempty"` - - // Primary: Output only. A copy of the "primary" CryptoKeyVersion that - // will be used - // by Encrypt when this CryptoKey is given - // in EncryptRequest.name. - // - // The CryptoKey's primary version can be updated - // via - // UpdateCryptoKeyPrimaryVersion. - // - // All keys with purpose - // ENCRYPT_DECRYPT have a - // primary. For other keys, this field will be omitted. - Primary *CryptoKeyVersion `json:"primary,omitempty"` - - // Purpose: Immutable. The immutable purpose of this CryptoKey. - // - // Possible values: - // "CRYPTO_KEY_PURPOSE_UNSPECIFIED" - Not specified. - // "ENCRYPT_DECRYPT" - CryptoKeys with this purpose may be used - // with - // Encrypt and - // Decrypt. - // "ASYMMETRIC_SIGN" - CryptoKeys with this purpose may be used - // with - // AsymmetricSign and - // GetPublicKey. - // "ASYMMETRIC_DECRYPT" - CryptoKeys with this purpose may be used - // with - // AsymmetricDecrypt and - // GetPublicKey. - Purpose string `json:"purpose,omitempty"` - - // RotationPeriod: next_rotation_time will be advanced by this period - // when the service - // automatically rotates a key. Must be at least 24 hours and at - // most - // 876,000 hours. - // - // If rotation_period is set, next_rotation_time must also be set. - // - // Keys with purpose - // ENCRYPT_DECRYPT support - // automatic rotation. For other keys, this field must be omitted. - RotationPeriod string `json:"rotationPeriod,omitempty"` - - // VersionTemplate: A template describing settings for new - // CryptoKeyVersion instances. - // The properties of new CryptoKeyVersion instances created by - // either - // CreateCryptoKeyVersion or - // auto-rotation are controlled by this template. - VersionTemplate *CryptoKeyVersionTemplate `json:"versionTemplate,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreateTime") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreateTime") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *CryptoKey) MarshalJSON() ([]byte, error) { - type NoMethod CryptoKey - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// CryptoKeyVersion: A CryptoKeyVersion represents an individual -// cryptographic key, and the -// associated key material. -// -// An ENABLED version can be -// used for cryptographic operations. -// -// For security reasons, the raw cryptographic key material represented -// by a -// CryptoKeyVersion can never be viewed or exported. It can only be used -// to -// encrypt, decrypt, or sign data when an authorized user or application -// invokes -// Cloud KMS. -type CryptoKeyVersion struct { - // Algorithm: Output only. The CryptoKeyVersionAlgorithm that - // this - // CryptoKeyVersion supports. - // - // Possible values: - // "CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED" - Not specified. - // "GOOGLE_SYMMETRIC_ENCRYPTION" - Creates symmetric encryption keys. - // "RSA_SIGN_PSS_2048_SHA256" - RSASSA-PSS 2048 bit key with a SHA256 - // digest. - // "RSA_SIGN_PSS_3072_SHA256" - RSASSA-PSS 3072 bit key with a SHA256 - // digest. - // "RSA_SIGN_PSS_4096_SHA256" - RSASSA-PSS 4096 bit key with a SHA256 - // digest. - // "RSA_SIGN_PSS_4096_SHA512" - RSASSA-PSS 4096 bit key with a SHA512 - // digest. - // "RSA_SIGN_PKCS1_2048_SHA256" - RSASSA-PKCS1-v1_5 with a 2048 bit - // key and a SHA256 digest. - // "RSA_SIGN_PKCS1_3072_SHA256" - RSASSA-PKCS1-v1_5 with a 3072 bit - // key and a SHA256 digest. - // "RSA_SIGN_PKCS1_4096_SHA256" - RSASSA-PKCS1-v1_5 with a 4096 bit - // key and a SHA256 digest. - // "RSA_SIGN_PKCS1_4096_SHA512" - RSASSA-PKCS1-v1_5 with a 4096 bit - // key and a SHA512 digest. - // "RSA_DECRYPT_OAEP_2048_SHA256" - RSAES-OAEP 2048 bit key with a - // SHA256 digest. - // "RSA_DECRYPT_OAEP_3072_SHA256" - RSAES-OAEP 3072 bit key with a - // SHA256 digest. - // "RSA_DECRYPT_OAEP_4096_SHA256" - RSAES-OAEP 4096 bit key with a - // SHA256 digest. - // "RSA_DECRYPT_OAEP_4096_SHA512" - RSAES-OAEP 4096 bit key with a - // SHA512 digest. - // "EC_SIGN_P256_SHA256" - ECDSA on the NIST P-256 curve with a SHA256 - // digest. - // "EC_SIGN_P384_SHA384" - ECDSA on the NIST P-384 curve with a SHA384 - // digest. - Algorithm string `json:"algorithm,omitempty"` - - // Attestation: Output only. Statement that was generated and signed by - // the HSM at key - // creation time. Use this statement to verify attributes of the key as - // stored - // on the HSM, independently of Google. Only provided for key versions - // with - // protection_level HSM. - Attestation *KeyOperationAttestation `json:"attestation,omitempty"` - - // CreateTime: Output only. The time at which this CryptoKeyVersion was - // created. - CreateTime string `json:"createTime,omitempty"` - - // DestroyEventTime: Output only. The time this CryptoKeyVersion's key - // material was - // destroyed. Only present if state is - // DESTROYED. - DestroyEventTime string `json:"destroyEventTime,omitempty"` - - // DestroyTime: Output only. The time this CryptoKeyVersion's key - // material is scheduled - // for destruction. Only present if state is - // DESTROY_SCHEDULED. - DestroyTime string `json:"destroyTime,omitempty"` - - // GenerateTime: Output only. The time this CryptoKeyVersion's key - // material was - // generated. - GenerateTime string `json:"generateTime,omitempty"` - - // ImportFailureReason: Output only. The root cause of an import - // failure. Only present if - // state is - // IMPORT_FAILED. - ImportFailureReason string `json:"importFailureReason,omitempty"` - - // ImportJob: Output only. The name of the ImportJob used to import - // this - // CryptoKeyVersion. Only present if the underlying key material - // was - // imported. - ImportJob string `json:"importJob,omitempty"` - - // ImportTime: Output only. The time at which this CryptoKeyVersion's - // key material - // was imported. - ImportTime string `json:"importTime,omitempty"` - - // Name: Output only. The resource name for this CryptoKeyVersion in the - // format - // `projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersio - // ns/*`. - Name string `json:"name,omitempty"` - - // ProtectionLevel: Output only. The ProtectionLevel describing how - // crypto operations are - // performed with this CryptoKeyVersion. - // - // Possible values: - // "PROTECTION_LEVEL_UNSPECIFIED" - Not specified. - // "SOFTWARE" - Crypto operations are performed in software. - // "HSM" - Crypto operations are performed in a Hardware Security - // Module. - ProtectionLevel string `json:"protectionLevel,omitempty"` - - // State: The current state of the CryptoKeyVersion. - // - // Possible values: - // "CRYPTO_KEY_VERSION_STATE_UNSPECIFIED" - Not specified. - // "PENDING_GENERATION" - This version is still being generated. It - // may not be used, enabled, - // disabled, or destroyed yet. Cloud KMS will automatically mark - // this - // version ENABLED as soon as the version is ready. - // "ENABLED" - This version may be used for cryptographic operations. - // "DISABLED" - This version may not be used, but the key material is - // still available, - // and the version can be placed back into the ENABLED state. - // "DESTROYED" - This version is destroyed, and the key material is no - // longer stored. - // A version may not leave this state once entered. - // "DESTROY_SCHEDULED" - This version is scheduled for destruction, - // and will be destroyed soon. - // Call - // RestoreCryptoKeyVersion - // to put it back into the DISABLED state. - // "PENDING_IMPORT" - This version is still being imported. It may not - // be used, enabled, - // disabled, or destroyed yet. Cloud KMS will automatically mark - // this - // version ENABLED as soon as the version is ready. - // "IMPORT_FAILED" - This version was not imported successfully. It - // may not be used, enabled, - // disabled, or destroyed. The submitted key material has been - // discarded. - // Additional details can be found - // in - // CryptoKeyVersion.import_failure_reason. - State string `json:"state,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Algorithm") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Algorithm") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *CryptoKeyVersion) MarshalJSON() ([]byte, error) { - type NoMethod CryptoKeyVersion - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// CryptoKeyVersionTemplate: A CryptoKeyVersionTemplate specifies the -// properties to use when creating -// a new CryptoKeyVersion, either manually with -// CreateCryptoKeyVersion or -// automatically as a result of auto-rotation. -type CryptoKeyVersionTemplate struct { - // Algorithm: Required. Algorithm to use - // when creating a CryptoKeyVersion based on this template. - // - // For backwards compatibility, GOOGLE_SYMMETRIC_ENCRYPTION is implied - // if both - // this field is omitted and CryptoKey.purpose is - // ENCRYPT_DECRYPT. - // - // Possible values: - // "CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED" - Not specified. - // "GOOGLE_SYMMETRIC_ENCRYPTION" - Creates symmetric encryption keys. - // "RSA_SIGN_PSS_2048_SHA256" - RSASSA-PSS 2048 bit key with a SHA256 - // digest. - // "RSA_SIGN_PSS_3072_SHA256" - RSASSA-PSS 3072 bit key with a SHA256 - // digest. - // "RSA_SIGN_PSS_4096_SHA256" - RSASSA-PSS 4096 bit key with a SHA256 - // digest. - // "RSA_SIGN_PSS_4096_SHA512" - RSASSA-PSS 4096 bit key with a SHA512 - // digest. - // "RSA_SIGN_PKCS1_2048_SHA256" - RSASSA-PKCS1-v1_5 with a 2048 bit - // key and a SHA256 digest. - // "RSA_SIGN_PKCS1_3072_SHA256" - RSASSA-PKCS1-v1_5 with a 3072 bit - // key and a SHA256 digest. - // "RSA_SIGN_PKCS1_4096_SHA256" - RSASSA-PKCS1-v1_5 with a 4096 bit - // key and a SHA256 digest. - // "RSA_SIGN_PKCS1_4096_SHA512" - RSASSA-PKCS1-v1_5 with a 4096 bit - // key and a SHA512 digest. - // "RSA_DECRYPT_OAEP_2048_SHA256" - RSAES-OAEP 2048 bit key with a - // SHA256 digest. - // "RSA_DECRYPT_OAEP_3072_SHA256" - RSAES-OAEP 3072 bit key with a - // SHA256 digest. - // "RSA_DECRYPT_OAEP_4096_SHA256" - RSAES-OAEP 4096 bit key with a - // SHA256 digest. - // "RSA_DECRYPT_OAEP_4096_SHA512" - RSAES-OAEP 4096 bit key with a - // SHA512 digest. - // "EC_SIGN_P256_SHA256" - ECDSA on the NIST P-256 curve with a SHA256 - // digest. - // "EC_SIGN_P384_SHA384" - ECDSA on the NIST P-384 curve with a SHA384 - // digest. - Algorithm string `json:"algorithm,omitempty"` - - // ProtectionLevel: ProtectionLevel to use when creating a - // CryptoKeyVersion based on - // this template. Immutable. Defaults to SOFTWARE. - // - // Possible values: - // "PROTECTION_LEVEL_UNSPECIFIED" - Not specified. - // "SOFTWARE" - Crypto operations are performed in software. - // "HSM" - Crypto operations are performed in a Hardware Security - // Module. - ProtectionLevel string `json:"protectionLevel,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Algorithm") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Algorithm") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *CryptoKeyVersionTemplate) MarshalJSON() ([]byte, error) { - type NoMethod CryptoKeyVersionTemplate - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// DecryptRequest: Request message for KeyManagementService.Decrypt. -type DecryptRequest struct { - // AdditionalAuthenticatedData: Optional. Optional data that must match - // the data originally supplied - // in - // EncryptRequest.additional_authenticated_data. - AdditionalAuthenticatedData string `json:"additionalAuthenticatedData,omitempty"` - - // Ciphertext: Required. The encrypted data originally returned - // in - // EncryptResponse.ciphertext. - Ciphertext string `json:"ciphertext,omitempty"` - - // ForceSendFields is a list of field names (e.g. - // "AdditionalAuthenticatedData") to unconditionally include in API - // requests. By default, fields with empty values are omitted from API - // requests. However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. - // "AdditionalAuthenticatedData") to include in API requests with the - // JSON null value. By default, fields with empty values are omitted - // from API requests. However, any field with an empty value appearing - // in NullFields will be sent to the server as null. It is an error if a - // field in this list has a non-empty value. This may be used to include - // null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *DecryptRequest) MarshalJSON() ([]byte, error) { - type NoMethod DecryptRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// DecryptResponse: Response message for KeyManagementService.Decrypt. -type DecryptResponse struct { - // Plaintext: The decrypted data originally supplied in - // EncryptRequest.plaintext. - Plaintext string `json:"plaintext,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Plaintext") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Plaintext") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *DecryptResponse) MarshalJSON() ([]byte, error) { - type NoMethod DecryptResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// DestroyCryptoKeyVersionRequest: Request message for -// KeyManagementService.DestroyCryptoKeyVersion. -type DestroyCryptoKeyVersionRequest struct { -} - -// Digest: A Digest holds a cryptographic message digest. -type Digest struct { - // Sha256: A message digest produced with the SHA-256 algorithm. - Sha256 string `json:"sha256,omitempty"` - - // Sha384: A message digest produced with the SHA-384 algorithm. - Sha384 string `json:"sha384,omitempty"` - - // Sha512: A message digest produced with the SHA-512 algorithm. - Sha512 string `json:"sha512,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Sha256") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Sha256") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Digest) MarshalJSON() ([]byte, error) { - type NoMethod Digest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// EncryptRequest: Request message for KeyManagementService.Encrypt. -type EncryptRequest struct { - // AdditionalAuthenticatedData: Optional. Optional data that, if - // specified, must also be provided during decryption - // through DecryptRequest.additional_authenticated_data. - // - // The maximum size depends on the key version's - // protection_level. For - // SOFTWARE keys, the AAD must be no larger than - // 64KiB. For HSM keys, the combined length of the - // plaintext and additional_authenticated_data fields must be no larger - // than - // 8KiB. - AdditionalAuthenticatedData string `json:"additionalAuthenticatedData,omitempty"` - - // Plaintext: Required. The data to encrypt. Must be no larger than - // 64KiB. - // - // The maximum size depends on the key version's - // protection_level. For - // SOFTWARE keys, the plaintext must be no larger - // than 64KiB. For HSM keys, the combined length of the - // plaintext and additional_authenticated_data fields must be no larger - // than - // 8KiB. - Plaintext string `json:"plaintext,omitempty"` - - // ForceSendFields is a list of field names (e.g. - // "AdditionalAuthenticatedData") to unconditionally include in API - // requests. By default, fields with empty values are omitted from API - // requests. However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. - // "AdditionalAuthenticatedData") to include in API requests with the - // JSON null value. By default, fields with empty values are omitted - // from API requests. However, any field with an empty value appearing - // in NullFields will be sent to the server as null. It is an error if a - // field in this list has a non-empty value. This may be used to include - // null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *EncryptRequest) MarshalJSON() ([]byte, error) { - type NoMethod EncryptRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// EncryptResponse: Response message for KeyManagementService.Encrypt. -type EncryptResponse struct { - // Ciphertext: The encrypted data. - Ciphertext string `json:"ciphertext,omitempty"` - - // Name: The resource name of the CryptoKeyVersion used in encryption. - // Check - // this field to verify that the intended resource was used for - // encryption. - Name string `json:"name,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Ciphertext") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Ciphertext") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *EncryptResponse) MarshalJSON() ([]byte, error) { - type NoMethod EncryptResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Expr: Represents an expression text. Example: -// -// title: "User account presence" -// description: "Determines whether the request has a user account" -// expression: "size(request.user) > 0" -type Expr struct { - // Description: An optional description of the expression. This is a - // longer text which - // describes the expression, e.g. when hovered over it in a UI. - Description string `json:"description,omitempty"` - - // Expression: Textual representation of an expression in - // Common Expression Language syntax. - // - // The application context of the containing message determines - // which - // well-known feature set of CEL is supported. - Expression string `json:"expression,omitempty"` - - // Location: An optional string indicating the location of the - // expression for error - // reporting, e.g. a file name and a position in the file. - Location string `json:"location,omitempty"` - - // Title: An optional title for the expression, i.e. a short string - // describing - // its purpose. This can be used e.g. in UIs which allow to enter - // the - // expression. - Title string `json:"title,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Description") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Description") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Expr) MarshalJSON() ([]byte, error) { - type NoMethod Expr - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ImportCryptoKeyVersionRequest: Request message for -// KeyManagementService.ImportCryptoKeyVersion. -type ImportCryptoKeyVersionRequest struct { - // Algorithm: Required. The algorithm of - // the key being imported. This does not need to match - // the - // version_template of the CryptoKey this - // version imports into. - // - // Possible values: - // "CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED" - Not specified. - // "GOOGLE_SYMMETRIC_ENCRYPTION" - Creates symmetric encryption keys. - // "RSA_SIGN_PSS_2048_SHA256" - RSASSA-PSS 2048 bit key with a SHA256 - // digest. - // "RSA_SIGN_PSS_3072_SHA256" - RSASSA-PSS 3072 bit key with a SHA256 - // digest. - // "RSA_SIGN_PSS_4096_SHA256" - RSASSA-PSS 4096 bit key with a SHA256 - // digest. - // "RSA_SIGN_PSS_4096_SHA512" - RSASSA-PSS 4096 bit key with a SHA512 - // digest. - // "RSA_SIGN_PKCS1_2048_SHA256" - RSASSA-PKCS1-v1_5 with a 2048 bit - // key and a SHA256 digest. - // "RSA_SIGN_PKCS1_3072_SHA256" - RSASSA-PKCS1-v1_5 with a 3072 bit - // key and a SHA256 digest. - // "RSA_SIGN_PKCS1_4096_SHA256" - RSASSA-PKCS1-v1_5 with a 4096 bit - // key and a SHA256 digest. - // "RSA_SIGN_PKCS1_4096_SHA512" - RSASSA-PKCS1-v1_5 with a 4096 bit - // key and a SHA512 digest. - // "RSA_DECRYPT_OAEP_2048_SHA256" - RSAES-OAEP 2048 bit key with a - // SHA256 digest. - // "RSA_DECRYPT_OAEP_3072_SHA256" - RSAES-OAEP 3072 bit key with a - // SHA256 digest. - // "RSA_DECRYPT_OAEP_4096_SHA256" - RSAES-OAEP 4096 bit key with a - // SHA256 digest. - // "RSA_DECRYPT_OAEP_4096_SHA512" - RSAES-OAEP 4096 bit key with a - // SHA512 digest. - // "EC_SIGN_P256_SHA256" - ECDSA on the NIST P-256 curve with a SHA256 - // digest. - // "EC_SIGN_P384_SHA384" - ECDSA on the NIST P-384 curve with a SHA384 - // digest. - Algorithm string `json:"algorithm,omitempty"` - - // ImportJob: Required. The name of the ImportJob that was used to - // wrap this key material. - ImportJob string `json:"importJob,omitempty"` - - // RsaAesWrappedKey: Wrapped key material produced - // with - // RSA_OAEP_3072_SHA1_AES_256 - // or - // RSA_OAEP_4096_SHA1_AES_256. - // - // This field contains the concatenation of two wrapped keys: - //
      - //
    1. An ephemeral AES-256 wrapping key wrapped with the - // public_key using RSAES-OAEP with SHA-1, - // MGF1 with SHA-1, and an empty label. - //
    2. - //
    3. The key to be imported, wrapped with the ephemeral AES-256 key - // using AES-KWP (RFC 5649). - //
    4. - //
    - // - // If importing symmetric key material, it is expected that the - // unwrapped - // key contains plain bytes. If importing asymmetric key material, it - // is - // expected that the unwrapped key is in PKCS#8-encoded DER format - // (the - // PrivateKeyInfo structure from RFC 5208). - // - // This format is the same as the format produced by PKCS#11 - // mechanism - // CKM_RSA_AES_KEY_WRAP. - RsaAesWrappedKey string `json:"rsaAesWrappedKey,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Algorithm") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Algorithm") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ImportCryptoKeyVersionRequest) MarshalJSON() ([]byte, error) { - type NoMethod ImportCryptoKeyVersionRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ImportJob: An ImportJob can be used to create CryptoKeys -// and -// CryptoKeyVersions using pre-existing key material, -// generated outside of Cloud KMS. -// -// When an ImportJob is created, Cloud KMS will generate a "wrapping -// key", -// which is a public/private key pair. You use the wrapping key to -// encrypt (also -// known as wrap) the pre-existing key material to protect it during the -// import -// process. The nature of the wrapping key depends on the choice -// of -// import_method. When the wrapping key generation -// is complete, the state will be set to -// ACTIVE and the public_key -// can be fetched. The fetched public key can then be used to wrap -// your -// pre-existing key material. -// -// Once the key material is wrapped, it can be imported into a -// new -// CryptoKeyVersion in an existing CryptoKey by -// calling -// ImportCryptoKeyVersion. -// Multiple CryptoKeyVersions can be imported with a single -// ImportJob. Cloud KMS uses the private key portion of the wrapping key -// to -// unwrap the key material. Only Cloud KMS has access to the private -// key. -// -// An ImportJob expires 3 days after it is created. Once expired, Cloud -// KMS -// will no longer be able to import or unwrap any key material that was -// wrapped -// with the ImportJob's public key. -// -// For more information, see -// [Importing a key](https://cloud.google.com/kms/docs/importing-a-key). -type ImportJob struct { - // Attestation: Output only. Statement that was generated and signed by - // the key creator - // (for example, an HSM) at key creation time. Use this statement to - // verify - // attributes of the key as stored on the HSM, independently of - // Google. - // Only present if the chosen ImportMethod is one with a - // protection - // level of HSM. - Attestation *KeyOperationAttestation `json:"attestation,omitempty"` - - // CreateTime: Output only. The time at which this ImportJob was - // created. - CreateTime string `json:"createTime,omitempty"` - - // ExpireEventTime: Output only. The time this ImportJob expired. Only - // present if - // state is EXPIRED. - ExpireEventTime string `json:"expireEventTime,omitempty"` - - // ExpireTime: Output only. The time at which this ImportJob is - // scheduled for - // expiration and can no longer be used to import key material. - ExpireTime string `json:"expireTime,omitempty"` - - // GenerateTime: Output only. The time this ImportJob's key material was - // generated. - GenerateTime string `json:"generateTime,omitempty"` - - // ImportMethod: Required. Immutable. The wrapping method to be used for - // incoming key material. - // - // Possible values: - // "IMPORT_METHOD_UNSPECIFIED" - Not specified. - // "RSA_OAEP_3072_SHA1_AES_256" - This ImportMethod represents the - // CKM_RSA_AES_KEY_WRAP key wrapping - // scheme defined in the PKCS #11 standard. In summary, this - // involves - // wrapping the raw key with an ephemeral AES key, and wrapping - // the - // ephemeral AES key with a 3072 bit RSA key. For more details, see - // [RSA AES key - // wrap - // mechanism](http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/co - // s01/pkcs11-curr-v2.40-cos01.html#_Toc408226908). - // "RSA_OAEP_4096_SHA1_AES_256" - This ImportMethod represents the - // CKM_RSA_AES_KEY_WRAP key wrapping - // scheme defined in the PKCS #11 standard. In summary, this - // involves - // wrapping the raw key with an ephemeral AES key, and wrapping - // the - // ephemeral AES key with a 4096 bit RSA key. For more details, see - // [RSA AES key - // wrap - // mechanism](http://docs.oasis-open.org/pkcs11/pkcs11-curr/v2.40/co - // s01/pkcs11-curr-v2.40-cos01.html#_Toc408226908). - ImportMethod string `json:"importMethod,omitempty"` - - // Name: Output only. The resource name for this ImportJob in the - // format - // `projects/*/locations/*/keyRings/*/importJobs/*`. - Name string `json:"name,omitempty"` - - // ProtectionLevel: Required. Immutable. The protection level of the - // ImportJob. This must match the - // protection_level of the - // version_template on the CryptoKey you - // attempt to import into. - // - // Possible values: - // "PROTECTION_LEVEL_UNSPECIFIED" - Not specified. - // "SOFTWARE" - Crypto operations are performed in software. - // "HSM" - Crypto operations are performed in a Hardware Security - // Module. - ProtectionLevel string `json:"protectionLevel,omitempty"` - - // PublicKey: Output only. The public key with which to wrap key - // material prior to - // import. Only returned if state is - // ACTIVE. - PublicKey *WrappingPublicKey `json:"publicKey,omitempty"` - - // State: Output only. The current state of the ImportJob, indicating if - // it can - // be used. - // - // Possible values: - // "IMPORT_JOB_STATE_UNSPECIFIED" - Not specified. - // "PENDING_GENERATION" - The wrapping key for this job is still being - // generated. It may not be - // used. Cloud KMS will automatically mark this job as - // ACTIVE as soon as the wrapping key is generated. - // "ACTIVE" - This job may be used in - // CreateCryptoKey and - // CreateCryptoKeyVersion - // requests. - // "EXPIRED" - This job can no longer be used and may not leave this - // state once entered. - State string `json:"state,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Attestation") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Attestation") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ImportJob) MarshalJSON() ([]byte, error) { - type NoMethod ImportJob - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// KeyOperationAttestation: Contains an HSM-generated attestation about -// a key operation. For more -// information, see [Verifying -// attestations] -// (https://cloud.google.com/kms/docs/attest-key). -type KeyOperationAttestation struct { - // Content: Output only. The attestation data provided by the HSM when - // the key - // operation was performed. - Content string `json:"content,omitempty"` - - // Format: Output only. The format of the attestation data. - // - // Possible values: - // "ATTESTATION_FORMAT_UNSPECIFIED" - Not specified. - // "CAVIUM_V1_COMPRESSED" - Cavium HSM attestation compressed with - // gzip. Note that this format is - // defined by Cavium and subject to change at any time. - // "CAVIUM_V2_COMPRESSED" - Cavium HSM attestation V2 compressed with - // gzip. This is a new format - // introduced in Cavium's version 3.2-08. - Format string `json:"format,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Content") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Content") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *KeyOperationAttestation) MarshalJSON() ([]byte, error) { - type NoMethod KeyOperationAttestation - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// KeyRing: A KeyRing is a toplevel logical grouping of CryptoKeys. -type KeyRing struct { - // CreateTime: Output only. The time at which this KeyRing was created. - CreateTime string `json:"createTime,omitempty"` - - // Name: Output only. The resource name for the KeyRing in the - // format - // `projects/*/locations/*/keyRings/*`. - Name string `json:"name,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreateTime") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreateTime") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *KeyRing) MarshalJSON() ([]byte, error) { - type NoMethod KeyRing - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ListCryptoKeyVersionsResponse: Response message for -// KeyManagementService.ListCryptoKeyVersions. -type ListCryptoKeyVersionsResponse struct { - // CryptoKeyVersions: The list of CryptoKeyVersions. - CryptoKeyVersions []*CryptoKeyVersion `json:"cryptoKeyVersions,omitempty"` - - // NextPageToken: A token to retrieve next page of results. Pass this - // value in - // ListCryptoKeyVersionsRequest.page_token to retrieve the next page - // of - // results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // TotalSize: The total number of CryptoKeyVersions that matched - // the - // query. - TotalSize int64 `json:"totalSize,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CryptoKeyVersions") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CryptoKeyVersions") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *ListCryptoKeyVersionsResponse) MarshalJSON() ([]byte, error) { - type NoMethod ListCryptoKeyVersionsResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ListCryptoKeysResponse: Response message for -// KeyManagementService.ListCryptoKeys. -type ListCryptoKeysResponse struct { - // CryptoKeys: The list of CryptoKeys. - CryptoKeys []*CryptoKey `json:"cryptoKeys,omitempty"` - - // NextPageToken: A token to retrieve next page of results. Pass this - // value in - // ListCryptoKeysRequest.page_token to retrieve the next page of - // results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // TotalSize: The total number of CryptoKeys that matched the query. - TotalSize int64 `json:"totalSize,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CryptoKeys") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CryptoKeys") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ListCryptoKeysResponse) MarshalJSON() ([]byte, error) { - type NoMethod ListCryptoKeysResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ListImportJobsResponse: Response message for -// KeyManagementService.ListImportJobs. -type ListImportJobsResponse struct { - // ImportJobs: The list of ImportJobs. - ImportJobs []*ImportJob `json:"importJobs,omitempty"` - - // NextPageToken: A token to retrieve next page of results. Pass this - // value in - // ListImportJobsRequest.page_token to retrieve the next page of - // results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // TotalSize: The total number of ImportJobs that matched the query. - TotalSize int64 `json:"totalSize,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "ImportJobs") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ImportJobs") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ListImportJobsResponse) MarshalJSON() ([]byte, error) { - type NoMethod ListImportJobsResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ListKeyRingsResponse: Response message for -// KeyManagementService.ListKeyRings. -type ListKeyRingsResponse struct { - // KeyRings: The list of KeyRings. - KeyRings []*KeyRing `json:"keyRings,omitempty"` - - // NextPageToken: A token to retrieve next page of results. Pass this - // value in - // ListKeyRingsRequest.page_token to retrieve the next page of results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // TotalSize: The total number of KeyRings that matched the query. - TotalSize int64 `json:"totalSize,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "KeyRings") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "KeyRings") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ListKeyRingsResponse) MarshalJSON() ([]byte, error) { - type NoMethod ListKeyRingsResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ListLocationsResponse: The response message for -// Locations.ListLocations. -type ListLocationsResponse struct { - // Locations: A list of locations that matches the specified filter in - // the request. - Locations []*Location `json:"locations,omitempty"` - - // NextPageToken: The standard List next-page token. - NextPageToken string `json:"nextPageToken,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Locations") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Locations") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ListLocationsResponse) MarshalJSON() ([]byte, error) { - type NoMethod ListLocationsResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Location: A resource that represents Google Cloud Platform location. -type Location struct { - // DisplayName: The friendly name for this location, typically a nearby - // city name. - // For example, "Tokyo". - DisplayName string `json:"displayName,omitempty"` - - // Labels: Cross-service attributes for the location. For example - // - // {"cloud.googleapis.com/region": "us-east1"} - Labels map[string]string `json:"labels,omitempty"` - - // LocationId: The canonical id for this location. For example: - // "us-east1". - LocationId string `json:"locationId,omitempty"` - - // Metadata: Service-specific metadata. For example the available - // capacity at the given - // location. - Metadata googleapi.RawMessage `json:"metadata,omitempty"` - - // Name: Resource name for the location, which may vary between - // implementations. - // For example: "projects/example-project/locations/us-east1" - Name string `json:"name,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "DisplayName") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "DisplayName") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Location) MarshalJSON() ([]byte, error) { - type NoMethod Location - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// LocationMetadata: Cloud KMS metadata for the given -// google.cloud.location.Location. -type LocationMetadata struct { - // HsmAvailable: Indicates whether CryptoKeys with - // protection_level - // HSM can be created in this location. - HsmAvailable bool `json:"hsmAvailable,omitempty"` - - // ForceSendFields is a list of field names (e.g. "HsmAvailable") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "HsmAvailable") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *LocationMetadata) MarshalJSON() ([]byte, error) { - type NoMethod LocationMetadata - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Policy: Defines an Identity and Access Management (IAM) policy. It is -// used to -// specify access control policies for Cloud Platform resources. -// -// -// A `Policy` is a collection of `bindings`. A `binding` binds one or -// more -// `members` to a single `role`. Members can be user accounts, service -// accounts, -// Google groups, and domains (such as G Suite). A `role` is a named -// list of -// permissions (defined by IAM or configured by users). A `binding` -// can -// optionally specify a `condition`, which is a logic expression that -// further -// constrains the role binding based on attributes about the request -// and/or -// target resource. -// -// **JSON Example** -// -// { -// "bindings": [ -// { -// "role": "roles/resourcemanager.organizationAdmin", -// "members": [ -// "user:mike@example.com", -// "group:admins@example.com", -// "domain:google.com", -// -// "serviceAccount:my-project-id@appspot.gserviceaccount.com" -// ] -// }, -// { -// "role": "roles/resourcemanager.organizationViewer", -// "members": ["user:eve@example.com"], -// "condition": { -// "title": "expirable access", -// "description": "Does not grant access after Sep 2020", -// "expression": "request.time < -// timestamp('2020-10-01T00:00:00.000Z')", -// } -// } -// ] -// } -// -// **YAML Example** -// -// bindings: -// - members: -// - user:mike@example.com -// - group:admins@example.com -// - domain:google.com -// - serviceAccount:my-project-id@appspot.gserviceaccount.com -// role: roles/resourcemanager.organizationAdmin -// - members: -// - user:eve@example.com -// role: roles/resourcemanager.organizationViewer -// condition: -// title: expirable access -// description: Does not grant access after Sep 2020 -// expression: request.time < -// timestamp('2020-10-01T00:00:00.000Z') -// -// For a description of IAM and its features, see the -// [IAM developer's guide](https://cloud.google.com/iam/docs). -type Policy struct { - // AuditConfigs: Specifies cloud audit logging configuration for this - // policy. - AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"` - - // Bindings: Associates a list of `members` to a `role`. Optionally may - // specify a - // `condition` that determines when binding is in effect. - // `bindings` with no members will result in an error. - Bindings []*Binding `json:"bindings,omitempty"` - - // Etag: `etag` is used for optimistic concurrency control as a way to - // help - // prevent simultaneous updates of a policy from overwriting each - // other. - // It is strongly suggested that systems make use of the `etag` in - // the - // read-modify-write cycle to perform policy updates in order to avoid - // race - // conditions: An `etag` is returned in the response to `getIamPolicy`, - // and - // systems are expected to put that etag in the request to - // `setIamPolicy` to - // ensure that their change will be applied to the same version of the - // policy. - // - // If no `etag` is provided in the call to `setIamPolicy`, then the - // existing - // policy is overwritten. Due to blind-set semantics of an etag-less - // policy, - // 'setIamPolicy' will not fail even if either of incoming or stored - // policy - // does not meet the version requirements. - Etag string `json:"etag,omitempty"` - - // Version: Specifies the format of the policy. - // - // Valid values are 0, 1, and 3. Requests specifying an invalid value - // will be - // rejected. - // - // Operations affecting conditional bindings must specify version 3. - // This can - // be either setting a conditional policy, modifying a conditional - // binding, - // or removing a conditional binding from the stored conditional - // policy. - // Operations on non-conditional policies may specify any valid value - // or - // leave the field unset. - // - // If no etag is provided in the call to `setIamPolicy`, any - // version - // compliance checks on the incoming and/or stored policy is skipped. - Version int64 `json:"version,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "AuditConfigs") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AuditConfigs") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Policy) MarshalJSON() ([]byte, error) { - type NoMethod Policy - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// PublicKey: The public key for a given CryptoKeyVersion. Obtained -// via -// GetPublicKey. -type PublicKey struct { - // Algorithm: The Algorithm associated - // with this key. - // - // Possible values: - // "CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED" - Not specified. - // "GOOGLE_SYMMETRIC_ENCRYPTION" - Creates symmetric encryption keys. - // "RSA_SIGN_PSS_2048_SHA256" - RSASSA-PSS 2048 bit key with a SHA256 - // digest. - // "RSA_SIGN_PSS_3072_SHA256" - RSASSA-PSS 3072 bit key with a SHA256 - // digest. - // "RSA_SIGN_PSS_4096_SHA256" - RSASSA-PSS 4096 bit key with a SHA256 - // digest. - // "RSA_SIGN_PSS_4096_SHA512" - RSASSA-PSS 4096 bit key with a SHA512 - // digest. - // "RSA_SIGN_PKCS1_2048_SHA256" - RSASSA-PKCS1-v1_5 with a 2048 bit - // key and a SHA256 digest. - // "RSA_SIGN_PKCS1_3072_SHA256" - RSASSA-PKCS1-v1_5 with a 3072 bit - // key and a SHA256 digest. - // "RSA_SIGN_PKCS1_4096_SHA256" - RSASSA-PKCS1-v1_5 with a 4096 bit - // key and a SHA256 digest. - // "RSA_SIGN_PKCS1_4096_SHA512" - RSASSA-PKCS1-v1_5 with a 4096 bit - // key and a SHA512 digest. - // "RSA_DECRYPT_OAEP_2048_SHA256" - RSAES-OAEP 2048 bit key with a - // SHA256 digest. - // "RSA_DECRYPT_OAEP_3072_SHA256" - RSAES-OAEP 3072 bit key with a - // SHA256 digest. - // "RSA_DECRYPT_OAEP_4096_SHA256" - RSAES-OAEP 4096 bit key with a - // SHA256 digest. - // "RSA_DECRYPT_OAEP_4096_SHA512" - RSAES-OAEP 4096 bit key with a - // SHA512 digest. - // "EC_SIGN_P256_SHA256" - ECDSA on the NIST P-256 curve with a SHA256 - // digest. - // "EC_SIGN_P384_SHA384" - ECDSA on the NIST P-384 curve with a SHA384 - // digest. - Algorithm string `json:"algorithm,omitempty"` - - // Pem: The public key, encoded in PEM format. For more information, see - // the - // [RFC 7468](https://tools.ietf.org/html/rfc7468) sections for - // [General - // Considerations](https://tools.ietf.org/html/rfc7468#section-2) - // and - // [Textual Encoding of Subject Public Key - // Info] - // (https://tools.ietf.org/html/rfc7468#section-13). - Pem string `json:"pem,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Algorithm") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Algorithm") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *PublicKey) MarshalJSON() ([]byte, error) { - type NoMethod PublicKey - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// RestoreCryptoKeyVersionRequest: Request message for -// KeyManagementService.RestoreCryptoKeyVersion. -type RestoreCryptoKeyVersionRequest struct { -} - -// SetIamPolicyRequest: Request message for `SetIamPolicy` method. -type SetIamPolicyRequest struct { - // Policy: REQUIRED: The complete policy to be applied to the - // `resource`. The size of - // the policy is limited to a few 10s of KB. An empty policy is a - // valid policy but certain Cloud Platform services (such as - // Projects) - // might reject them. - Policy *Policy `json:"policy,omitempty"` - - // UpdateMask: OPTIONAL: A FieldMask specifying which fields of the - // policy to modify. Only - // the fields in the mask will be modified. If no mask is provided, - // the - // following default mask is used: - // paths: "bindings, etag" - // This field is only used by Cloud IAM. - UpdateMask string `json:"updateMask,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Policy") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Policy") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { - type NoMethod SetIamPolicyRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TestIamPermissionsRequest: Request message for `TestIamPermissions` -// method. -type TestIamPermissionsRequest struct { - // Permissions: The set of permissions to check for the `resource`. - // Permissions with - // wildcards (such as '*' or 'storage.*') are not allowed. For - // more - // information see - // [IAM - // Overview](https://cloud.google.com/iam/docs/overview#permissions). - Permissions []string `json:"permissions,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Permissions") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Permissions") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { - type NoMethod TestIamPermissionsRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TestIamPermissionsResponse: Response message for `TestIamPermissions` -// method. -type TestIamPermissionsResponse struct { - // Permissions: A subset of `TestPermissionsRequest.permissions` that - // the caller is - // allowed. - Permissions []string `json:"permissions,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Permissions") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Permissions") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { - type NoMethod TestIamPermissionsResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// UpdateCryptoKeyPrimaryVersionRequest: Request message for -// KeyManagementService.UpdateCryptoKeyPrimaryVersion. -type UpdateCryptoKeyPrimaryVersionRequest struct { - // CryptoKeyVersionId: Required. The id of the child CryptoKeyVersion to - // use as primary. - CryptoKeyVersionId string `json:"cryptoKeyVersionId,omitempty"` - - // ForceSendFields is a list of field names (e.g. "CryptoKeyVersionId") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CryptoKeyVersionId") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *UpdateCryptoKeyPrimaryVersionRequest) MarshalJSON() ([]byte, error) { - type NoMethod UpdateCryptoKeyPrimaryVersionRequest - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// WrappingPublicKey: The public key component of the wrapping key. For -// details of the type of -// key this public key corresponds to, see the ImportMethod. -type WrappingPublicKey struct { - // Pem: The public key, encoded in PEM format. For more information, see - // the [RFC - // 7468](https://tools.ietf.org/html/rfc7468) sections for - // [General - // Considerations](https://tools.ietf.org/html/rfc7468#section-2 - // ) and - // [Textual Encoding of Subject Public Key - // Info] - // (https://tools.ietf.org/html/rfc7468#section-13). - Pem string `json:"pem,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Pem") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Pem") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *WrappingPublicKey) MarshalJSON() ([]byte, error) { - type NoMethod WrappingPublicKey - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// method id "cloudkms.projects.locations.get": - -type ProjectsLocationsGetCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Gets information about a location. -func (r *ProjectsLocationsService) Get(name string) *ProjectsLocationsGetCall { - c := &ProjectsLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsLocationsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsGetCall) Context(ctx context.Context) *ProjectsLocationsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.get" call. -// Exactly one of *Location or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Location.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsLocationsGetCall) Do(opts ...googleapi.CallOption) (*Location, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Location{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Gets information about a location.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}", - // "httpMethod": "GET", - // "id": "cloudkms.projects.locations.get", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Resource name for the location.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}", - // "response": { - // "$ref": "Location" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// method id "cloudkms.projects.locations.list": - -type ProjectsLocationsListCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists information about the supported locations for this -// service. -func (r *ProjectsLocationsService) List(name string) *ProjectsLocationsListCall { - c := &ProjectsLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - return c -} - -// Filter sets the optional parameter "filter": The standard list -// filter. -func (c *ProjectsLocationsListCall) Filter(filter string) *ProjectsLocationsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// PageSize sets the optional parameter "pageSize": The standard list -// page size. -func (c *ProjectsLocationsListCall) PageSize(pageSize int64) *ProjectsLocationsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": The standard list -// page token. -func (c *ProjectsLocationsListCall) PageToken(pageToken string) *ProjectsLocationsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsLocationsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsListCall) Context(ctx context.Context) *ProjectsLocationsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}/locations") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.list" call. -// Exactly one of *ListLocationsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListLocationsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsListCall) Do(opts ...googleapi.CallOption) (*ListLocationsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListLocationsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists information about the supported locations for this service.", - // "flatPath": "v1/projects/{projectsId}/locations", - // "httpMethod": "GET", - // "id": "cloudkms.projects.locations.list", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "filter": { - // "description": "The standard list filter.", - // "location": "query", - // "type": "string" - // }, - // "name": { - // "description": "The resource that owns the locations collection, if applicable.", - // "location": "path", - // "pattern": "^projects/[^/]+$", - // "required": true, - // "type": "string" - // }, - // "pageSize": { - // "description": "The standard list page size.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "The standard list page token.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "v1/{+name}/locations", - // "response": { - // "$ref": "ListLocationsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsLocationsListCall) Pages(ctx context.Context, f func(*ListLocationsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "cloudkms.projects.locations.keyRings.create": - -type ProjectsLocationsKeyRingsCreateCall struct { - s *Service - parent string - keyring *KeyRing - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Create: Create a new KeyRing in a given Project and Location. -func (r *ProjectsLocationsKeyRingsService) Create(parent string, keyring *KeyRing) *ProjectsLocationsKeyRingsCreateCall { - c := &ProjectsLocationsKeyRingsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - c.keyring = keyring - return c -} - -// KeyRingId sets the optional parameter "keyRingId": Required. It must -// be unique within a location and match the regular -// expression `[a-zA-Z0-9_-]{1,63}` -func (c *ProjectsLocationsKeyRingsCreateCall) KeyRingId(keyRingId string) *ProjectsLocationsKeyRingsCreateCall { - c.urlParams_.Set("keyRingId", keyRingId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCreateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCreateCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCreateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCreateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCreateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.keyring) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/keyRings") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.create" call. -// Exactly one of *KeyRing or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *KeyRing.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsLocationsKeyRingsCreateCall) Do(opts ...googleapi.CallOption) (*KeyRing, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &KeyRing{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Create a new KeyRing in a given Project and Location.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings", - // "httpMethod": "POST", - // "id": "cloudkms.projects.locations.keyRings.create", - // "parameterOrder": [ - // "parent" - // ], - // "parameters": { - // "keyRingId": { - // "description": "Required. It must be unique within a location and match the regular\nexpression `[a-zA-Z0-9_-]{1,63}`", - // "location": "query", - // "type": "string" - // }, - // "parent": { - // "description": "Required. The resource name of the location associated with the\nKeyRings, in the format `projects/*/locations/*`.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+parent}/keyRings", - // "request": { - // "$ref": "KeyRing" - // }, - // "response": { - // "$ref": "KeyRing" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.get": - -type ProjectsLocationsKeyRingsGetCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns metadata for a given KeyRing. -func (r *ProjectsLocationsKeyRingsService) Get(name string) *ProjectsLocationsKeyRingsGetCall { - c := &ProjectsLocationsKeyRingsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsLocationsKeyRingsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsKeyRingsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsGetCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.get" call. -// Exactly one of *KeyRing or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *KeyRing.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsLocationsKeyRingsGetCall) Do(opts ...googleapi.CallOption) (*KeyRing, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &KeyRing{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns metadata for a given KeyRing.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}", - // "httpMethod": "GET", - // "id": "cloudkms.projects.locations.keyRings.get", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Required. The name of the KeyRing to get.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}", - // "response": { - // "$ref": "KeyRing" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.getIamPolicy": - -type ProjectsLocationsKeyRingsGetIamPolicyCall struct { - s *Service - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// GetIamPolicy: Gets the access control policy for a resource. -// Returns an empty policy if the resource exists and does not have a -// policy -// set. -func (r *ProjectsLocationsKeyRingsService) GetIamPolicy(resource string) *ProjectsLocationsKeyRingsGetIamPolicyCall { - c := &ProjectsLocationsKeyRingsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - return c -} - -// OptionsRequestedPolicyVersion sets the optional parameter -// "options.requestedPolicyVersion": The policy format version to be -// returned. -// -// Valid values are 0, 1, and 3. Requests specifying an invalid value -// will be -// rejected. -// -// Requests for policies with any conditional bindings must specify -// version 3. -// Policies without any conditional bindings may specify any valid value -// or -// leave the field unset. -func (c *ProjectsLocationsKeyRingsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ProjectsLocationsKeyRingsGetIamPolicyCall { - c.urlParams_.Set("options.requestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsGetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsGetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsLocationsKeyRingsGetIamPolicyCall) IfNoneMatch(entityTag string) *ProjectsLocationsKeyRingsGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsGetIamPolicyCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsGetIamPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsGetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:getIamPolicy") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsLocationsKeyRingsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}:getIamPolicy", - // "httpMethod": "GET", - // "id": "cloudkms.projects.locations.keyRings.getIamPolicy", - // "parameterOrder": [ - // "resource" - // ], - // "parameters": { - // "options.requestedPolicyVersion": { - // "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+resource}:getIamPolicy", - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.list": - -type ProjectsLocationsKeyRingsListCall struct { - s *Service - parent string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists KeyRings. -func (r *ProjectsLocationsKeyRingsService) List(parent string) *ProjectsLocationsKeyRingsListCall { - c := &ProjectsLocationsKeyRingsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - return c -} - -// Filter sets the optional parameter "filter": Only include resources -// that match the filter in the response. For -// more information, see -// [Sorting and filtering -// list -// results](https://cloud.google.com/kms/docs/sorting-and-filtering) -// . -func (c *ProjectsLocationsKeyRingsListCall) Filter(filter string) *ProjectsLocationsKeyRingsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// OrderBy sets the optional parameter "orderBy": Specify how the -// results should be sorted. If not specified, the -// results will be sorted in the default order. For more information, -// see -// [Sorting and filtering -// list -// results](https://cloud.google.com/kms/docs/sorting-and-filtering) -// . -func (c *ProjectsLocationsKeyRingsListCall) OrderBy(orderBy string) *ProjectsLocationsKeyRingsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageSize sets the optional parameter "pageSize": Optional limit on -// the number of KeyRings to include in the -// response. Further KeyRings can subsequently be obtained by -// including the ListKeyRingsResponse.next_page_token in a -// subsequent -// request. If unspecified, the server will pick an appropriate -// default. -func (c *ProjectsLocationsKeyRingsListCall) PageSize(pageSize int64) *ProjectsLocationsKeyRingsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": Optional -// pagination token, returned earlier -// via -// ListKeyRingsResponse.next_page_token. -func (c *ProjectsLocationsKeyRingsListCall) PageToken(pageToken string) *ProjectsLocationsKeyRingsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsLocationsKeyRingsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsKeyRingsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsListCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/keyRings") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.list" call. -// Exactly one of *ListKeyRingsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListKeyRingsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsListCall) Do(opts ...googleapi.CallOption) (*ListKeyRingsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListKeyRingsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists KeyRings.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings", - // "httpMethod": "GET", - // "id": "cloudkms.projects.locations.keyRings.list", - // "parameterOrder": [ - // "parent" - // ], - // "parameters": { - // "filter": { - // "description": "Optional. Only include resources that match the filter in the response. For\nmore information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", - // "location": "query", - // "type": "string" - // }, - // "orderBy": { - // "description": "Optional. Specify how the results should be sorted. If not specified, the\nresults will be sorted in the default order. For more information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", - // "location": "query", - // "type": "string" - // }, - // "pageSize": { - // "description": "Optional. Optional limit on the number of KeyRings to include in the\nresponse. Further KeyRings can subsequently be obtained by\nincluding the ListKeyRingsResponse.next_page_token in a subsequent\nrequest. If unspecified, the server will pick an appropriate default.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Optional. Optional pagination token, returned earlier via\nListKeyRingsResponse.next_page_token.", - // "location": "query", - // "type": "string" - // }, - // "parent": { - // "description": "Required. The resource name of the location associated with the\nKeyRings, in the format `projects/*/locations/*`.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+parent}/keyRings", - // "response": { - // "$ref": "ListKeyRingsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsLocationsKeyRingsListCall) Pages(ctx context.Context, f func(*ListKeyRingsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "cloudkms.projects.locations.keyRings.setIamPolicy": - -type ProjectsLocationsKeyRingsSetIamPolicyCall struct { - s *Service - resource string - setiampolicyrequest *SetIamPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any -// existing policy. -func (r *ProjectsLocationsKeyRingsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsKeyRingsSetIamPolicyCall { - c := &ProjectsLocationsKeyRingsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.setiampolicyrequest = setiampolicyrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsSetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsSetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsSetIamPolicyCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsSetIamPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsSetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:setIamPolicy") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsLocationsKeyRingsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}:setIamPolicy", - // "httpMethod": "POST", - // "id": "cloudkms.projects.locations.keyRings.setIamPolicy", - // "parameterOrder": [ - // "resource" - // ], - // "parameters": { - // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+resource}:setIamPolicy", - // "request": { - // "$ref": "SetIamPolicyRequest" - // }, - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.testIamPermissions": - -type ProjectsLocationsKeyRingsTestIamPermissionsCall struct { - s *Service - resource string - testiampermissionsrequest *TestIamPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -// If the resource does not exist, this will return an empty set -// of -// permissions, not a NOT_FOUND error. -// -// Note: This operation is designed to be used for building -// permission-aware -// UIs and command-line tools, not for authorization checking. This -// operation -// may "fail open" without warning. -func (r *ProjectsLocationsKeyRingsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsLocationsKeyRingsTestIamPermissionsCall { - c := &ProjectsLocationsKeyRingsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.testiampermissionsrequest = testiampermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsTestIamPermissionsCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.testIamPermissions" call. -// Exactly one of *TestIamPermissionsResponse or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *TestIamPermissionsResponse.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestIamPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}:testIamPermissions", - // "httpMethod": "POST", - // "id": "cloudkms.projects.locations.keyRings.testIamPermissions", - // "parameterOrder": [ - // "resource" - // ], - // "parameters": { - // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+resource}:testIamPermissions", - // "request": { - // "$ref": "TestIamPermissionsRequest" - // }, - // "response": { - // "$ref": "TestIamPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.create": - -type ProjectsLocationsKeyRingsCryptoKeysCreateCall struct { - s *Service - parent string - cryptokey *CryptoKey - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Create: Create a new CryptoKey within a KeyRing. -// -// CryptoKey.purpose and -// CryptoKey.version_template.algorithm -// are required. -func (r *ProjectsLocationsKeyRingsCryptoKeysService) Create(parent string, cryptokey *CryptoKey) *ProjectsLocationsKeyRingsCryptoKeysCreateCall { - c := &ProjectsLocationsKeyRingsCryptoKeysCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - c.cryptokey = cryptokey - return c -} - -// CryptoKeyId sets the optional parameter "cryptoKeyId": Required. It -// must be unique within a KeyRing and match the regular -// expression `[a-zA-Z0-9_-]{1,63}` -func (c *ProjectsLocationsKeyRingsCryptoKeysCreateCall) CryptoKeyId(cryptoKeyId string) *ProjectsLocationsKeyRingsCryptoKeysCreateCall { - c.urlParams_.Set("cryptoKeyId", cryptoKeyId) - return c -} - -// SkipInitialVersionCreation sets the optional parameter -// "skipInitialVersionCreation": If set to true, the request will create -// a CryptoKey without any -// CryptoKeyVersions. You must manually call -// CreateCryptoKeyVersion or -// ImportCryptoKeyVersion -// before you can use this CryptoKey. -func (c *ProjectsLocationsKeyRingsCryptoKeysCreateCall) SkipInitialVersionCreation(skipInitialVersionCreation bool) *ProjectsLocationsKeyRingsCryptoKeysCreateCall { - c.urlParams_.Set("skipInitialVersionCreation", fmt.Sprint(skipInitialVersionCreation)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysCreateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysCreateCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysCreateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysCreateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysCreateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.cryptokey) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/cryptoKeys") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.create" call. -// Exactly one of *CryptoKey or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *CryptoKey.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysCreateCall) Do(opts ...googleapi.CallOption) (*CryptoKey, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &CryptoKey{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Create a new CryptoKey within a KeyRing.\n\nCryptoKey.purpose and\nCryptoKey.version_template.algorithm\nare required.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys", - // "httpMethod": "POST", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.create", - // "parameterOrder": [ - // "parent" - // ], - // "parameters": { - // "cryptoKeyId": { - // "description": "Required. It must be unique within a KeyRing and match the regular\nexpression `[a-zA-Z0-9_-]{1,63}`", - // "location": "query", - // "type": "string" - // }, - // "parent": { - // "description": "Required. The name of the KeyRing associated with the\nCryptoKeys.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - // "required": true, - // "type": "string" - // }, - // "skipInitialVersionCreation": { - // "description": "If set to true, the request will create a CryptoKey without any\nCryptoKeyVersions. You must manually call\nCreateCryptoKeyVersion or\nImportCryptoKeyVersion\nbefore you can use this CryptoKey.", - // "location": "query", - // "type": "boolean" - // } - // }, - // "path": "v1/{+parent}/cryptoKeys", - // "request": { - // "$ref": "CryptoKey" - // }, - // "response": { - // "$ref": "CryptoKey" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.decrypt": - -type ProjectsLocationsKeyRingsCryptoKeysDecryptCall struct { - s *Service - name string - decryptrequest *DecryptRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Decrypt: Decrypts data that was protected by Encrypt. The -// CryptoKey.purpose -// must be ENCRYPT_DECRYPT. -func (r *ProjectsLocationsKeyRingsCryptoKeysService) Decrypt(name string, decryptrequest *DecryptRequest) *ProjectsLocationsKeyRingsCryptoKeysDecryptCall { - c := &ProjectsLocationsKeyRingsCryptoKeysDecryptCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.decryptrequest = decryptrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysDecryptCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysDecryptCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysDecryptCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysDecryptCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysDecryptCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysDecryptCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.decryptrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:decrypt") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.decrypt" call. -// Exactly one of *DecryptResponse or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *DecryptResponse.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysDecryptCall) Do(opts ...googleapi.CallOption) (*DecryptResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &DecryptResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Decrypts data that was protected by Encrypt. The CryptoKey.purpose\nmust be ENCRYPT_DECRYPT.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:decrypt", - // "httpMethod": "POST", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.decrypt", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Required. The resource name of the CryptoKey to use for decryption.\nThe server will choose the appropriate version.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}:decrypt", - // "request": { - // "$ref": "DecryptRequest" - // }, - // "response": { - // "$ref": "DecryptResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.encrypt": - -type ProjectsLocationsKeyRingsCryptoKeysEncryptCall struct { - s *Service - name string - encryptrequest *EncryptRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Encrypt: Encrypts data, so that it can only be recovered by a call to -// Decrypt. -// The CryptoKey.purpose must be -// ENCRYPT_DECRYPT. -func (r *ProjectsLocationsKeyRingsCryptoKeysService) Encrypt(name string, encryptrequest *EncryptRequest) *ProjectsLocationsKeyRingsCryptoKeysEncryptCall { - c := &ProjectsLocationsKeyRingsCryptoKeysEncryptCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.encryptrequest = encryptrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysEncryptCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysEncryptCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysEncryptCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysEncryptCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysEncryptCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysEncryptCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.encryptrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:encrypt") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.encrypt" call. -// Exactly one of *EncryptResponse or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *EncryptResponse.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysEncryptCall) Do(opts ...googleapi.CallOption) (*EncryptResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &EncryptResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Encrypts data, so that it can only be recovered by a call to Decrypt.\nThe CryptoKey.purpose must be\nENCRYPT_DECRYPT.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:encrypt", - // "httpMethod": "POST", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.encrypt", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Required. The resource name of the CryptoKey or CryptoKeyVersion\nto use for encryption.\n\nIf a CryptoKey is specified, the server will use its\nprimary version.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/.+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}:encrypt", - // "request": { - // "$ref": "EncryptRequest" - // }, - // "response": { - // "$ref": "EncryptResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.get": - -type ProjectsLocationsKeyRingsCryptoKeysGetCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns metadata for a given CryptoKey, as well as its -// primary CryptoKeyVersion. -func (r *ProjectsLocationsKeyRingsCryptoKeysService) Get(name string) *ProjectsLocationsKeyRingsCryptoKeysGetCall { - c := &ProjectsLocationsKeyRingsCryptoKeysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsLocationsKeyRingsCryptoKeysGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsKeyRingsCryptoKeysGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysGetCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.get" call. -// Exactly one of *CryptoKey or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *CryptoKey.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysGetCall) Do(opts ...googleapi.CallOption) (*CryptoKey, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &CryptoKey{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns metadata for a given CryptoKey, as well as its\nprimary CryptoKeyVersion.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}", - // "httpMethod": "GET", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.get", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Required. The name of the CryptoKey to get.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}", - // "response": { - // "$ref": "CryptoKey" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.getIamPolicy": - -type ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall struct { - s *Service - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// GetIamPolicy: Gets the access control policy for a resource. -// Returns an empty policy if the resource exists and does not have a -// policy -// set. -func (r *ProjectsLocationsKeyRingsCryptoKeysService) GetIamPolicy(resource string) *ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall { - c := &ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - return c -} - -// OptionsRequestedPolicyVersion sets the optional parameter -// "options.requestedPolicyVersion": The policy format version to be -// returned. -// -// Valid values are 0, 1, and 3. Requests specifying an invalid value -// will be -// rejected. -// -// Requests for policies with any conditional bindings must specify -// version 3. -// Policies without any conditional bindings may specify any valid value -// or -// leave the field unset. -func (c *ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall { - c.urlParams_.Set("options.requestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall) IfNoneMatch(entityTag string) *ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:getIamPolicy") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:getIamPolicy", - // "httpMethod": "GET", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.getIamPolicy", - // "parameterOrder": [ - // "resource" - // ], - // "parameters": { - // "options.requestedPolicyVersion": { - // "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+resource}:getIamPolicy", - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.list": - -type ProjectsLocationsKeyRingsCryptoKeysListCall struct { - s *Service - parent string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists CryptoKeys. -func (r *ProjectsLocationsKeyRingsCryptoKeysService) List(parent string) *ProjectsLocationsKeyRingsCryptoKeysListCall { - c := &ProjectsLocationsKeyRingsCryptoKeysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - return c -} - -// Filter sets the optional parameter "filter": Only include resources -// that match the filter in the response. For -// more information, see -// [Sorting and filtering -// list -// results](https://cloud.google.com/kms/docs/sorting-and-filtering) -// . -func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) Filter(filter string) *ProjectsLocationsKeyRingsCryptoKeysListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// OrderBy sets the optional parameter "orderBy": Specify how the -// results should be sorted. If not specified, the -// results will be sorted in the default order. For more information, -// see -// [Sorting and filtering -// list -// results](https://cloud.google.com/kms/docs/sorting-and-filtering) -// . -func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) OrderBy(orderBy string) *ProjectsLocationsKeyRingsCryptoKeysListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageSize sets the optional parameter "pageSize": Optional limit on -// the number of CryptoKeys to include in the -// response. Further CryptoKeys can subsequently be obtained -// by -// including the ListCryptoKeysResponse.next_page_token in a -// subsequent -// request. If unspecified, the server will pick an appropriate -// default. -func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) PageSize(pageSize int64) *ProjectsLocationsKeyRingsCryptoKeysListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": Optional -// pagination token, returned earlier -// via -// ListCryptoKeysResponse.next_page_token. -func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) PageToken(pageToken string) *ProjectsLocationsKeyRingsCryptoKeysListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// VersionView sets the optional parameter "versionView": The fields of -// the primary version to include in the response. -// -// Possible values: -// "CRYPTO_KEY_VERSION_VIEW_UNSPECIFIED" -// "FULL" -func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) VersionView(versionView string) *ProjectsLocationsKeyRingsCryptoKeysListCall { - c.urlParams_.Set("versionView", versionView) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) IfNoneMatch(entityTag string) *ProjectsLocationsKeyRingsCryptoKeysListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/cryptoKeys") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.list" call. -// Exactly one of *ListCryptoKeysResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListCryptoKeysResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) Do(opts ...googleapi.CallOption) (*ListCryptoKeysResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListCryptoKeysResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists CryptoKeys.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys", - // "httpMethod": "GET", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.list", - // "parameterOrder": [ - // "parent" - // ], - // "parameters": { - // "filter": { - // "description": "Optional. Only include resources that match the filter in the response. For\nmore information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", - // "location": "query", - // "type": "string" - // }, - // "orderBy": { - // "description": "Optional. Specify how the results should be sorted. If not specified, the\nresults will be sorted in the default order. For more information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", - // "location": "query", - // "type": "string" - // }, - // "pageSize": { - // "description": "Optional. Optional limit on the number of CryptoKeys to include in the\nresponse. Further CryptoKeys can subsequently be obtained by\nincluding the ListCryptoKeysResponse.next_page_token in a subsequent\nrequest. If unspecified, the server will pick an appropriate default.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Optional. Optional pagination token, returned earlier via\nListCryptoKeysResponse.next_page_token.", - // "location": "query", - // "type": "string" - // }, - // "parent": { - // "description": "Required. The resource name of the KeyRing to list, in the format\n`projects/*/locations/*/keyRings/*`.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - // "required": true, - // "type": "string" - // }, - // "versionView": { - // "description": "The fields of the primary version to include in the response.", - // "enum": [ - // "CRYPTO_KEY_VERSION_VIEW_UNSPECIFIED", - // "FULL" - // ], - // "location": "query", - // "type": "string" - // } - // }, - // "path": "v1/{+parent}/cryptoKeys", - // "response": { - // "$ref": "ListCryptoKeysResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) Pages(ctx context.Context, f func(*ListCryptoKeysResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.patch": - -type ProjectsLocationsKeyRingsCryptoKeysPatchCall struct { - s *Service - name string - cryptokey *CryptoKey - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Update a CryptoKey. -func (r *ProjectsLocationsKeyRingsCryptoKeysService) Patch(name string, cryptokey *CryptoKey) *ProjectsLocationsKeyRingsCryptoKeysPatchCall { - c := &ProjectsLocationsKeyRingsCryptoKeysPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.cryptokey = cryptokey - return c -} - -// UpdateMask sets the optional parameter "updateMask": Required. List -// of fields to be updated in this request. -func (c *ProjectsLocationsKeyRingsCryptoKeysPatchCall) UpdateMask(updateMask string) *ProjectsLocationsKeyRingsCryptoKeysPatchCall { - c.urlParams_.Set("updateMask", updateMask) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysPatchCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysPatchCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.cryptokey) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.patch" call. -// Exactly one of *CryptoKey or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *CryptoKey.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysPatchCall) Do(opts ...googleapi.CallOption) (*CryptoKey, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &CryptoKey{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Update a CryptoKey.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}", - // "httpMethod": "PATCH", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.patch", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Output only. The resource name for this CryptoKey in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*`.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - // "required": true, - // "type": "string" - // }, - // "updateMask": { - // "description": "Required. List of fields to be updated in this request.", - // "format": "google-fieldmask", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "v1/{+name}", - // "request": { - // "$ref": "CryptoKey" - // }, - // "response": { - // "$ref": "CryptoKey" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.setIamPolicy": - -type ProjectsLocationsKeyRingsCryptoKeysSetIamPolicyCall struct { - s *Service - resource string - setiampolicyrequest *SetIamPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any -// existing policy. -func (r *ProjectsLocationsKeyRingsCryptoKeysService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsKeyRingsCryptoKeysSetIamPolicyCall { - c := &ProjectsLocationsKeyRingsCryptoKeysSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.setiampolicyrequest = setiampolicyrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysSetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysSetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysSetIamPolicyCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysSetIamPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysSetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:setIamPolicy") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:setIamPolicy", - // "httpMethod": "POST", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.setIamPolicy", - // "parameterOrder": [ - // "resource" - // ], - // "parameters": { - // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+resource}:setIamPolicy", - // "request": { - // "$ref": "SetIamPolicyRequest" - // }, - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.testIamPermissions": - -type ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall struct { - s *Service - resource string - testiampermissionsrequest *TestIamPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -// If the resource does not exist, this will return an empty set -// of -// permissions, not a NOT_FOUND error. -// -// Note: This operation is designed to be used for building -// permission-aware -// UIs and command-line tools, not for authorization checking. This -// operation -// may "fail open" without warning. -func (r *ProjectsLocationsKeyRingsCryptoKeysService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall { - c := &ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.testiampermissionsrequest = testiampermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.testIamPermissions" call. -// Exactly one of *TestIamPermissionsResponse or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *TestIamPermissionsResponse.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestIamPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:testIamPermissions", - // "httpMethod": "POST", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.testIamPermissions", - // "parameterOrder": [ - // "resource" - // ], - // "parameters": { - // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+resource}:testIamPermissions", - // "request": { - // "$ref": "TestIamPermissionsRequest" - // }, - // "response": { - // "$ref": "TestIamPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.updatePrimaryVersion": - -type ProjectsLocationsKeyRingsCryptoKeysUpdatePrimaryVersionCall struct { - s *Service - name string - updatecryptokeyprimaryversionrequest *UpdateCryptoKeyPrimaryVersionRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// UpdatePrimaryVersion: Update the version of a CryptoKey that will be -// used in Encrypt. -// -// Returns an error if called on an asymmetric key. -func (r *ProjectsLocationsKeyRingsCryptoKeysService) UpdatePrimaryVersion(name string, updatecryptokeyprimaryversionrequest *UpdateCryptoKeyPrimaryVersionRequest) *ProjectsLocationsKeyRingsCryptoKeysUpdatePrimaryVersionCall { - c := &ProjectsLocationsKeyRingsCryptoKeysUpdatePrimaryVersionCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.updatecryptokeyprimaryversionrequest = updatecryptokeyprimaryversionrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysUpdatePrimaryVersionCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysUpdatePrimaryVersionCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysUpdatePrimaryVersionCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysUpdatePrimaryVersionCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysUpdatePrimaryVersionCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysUpdatePrimaryVersionCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.updatecryptokeyprimaryversionrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:updatePrimaryVersion") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.updatePrimaryVersion" call. -// Exactly one of *CryptoKey or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *CryptoKey.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysUpdatePrimaryVersionCall) Do(opts ...googleapi.CallOption) (*CryptoKey, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &CryptoKey{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Update the version of a CryptoKey that will be used in Encrypt.\n\nReturns an error if called on an asymmetric key.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:updatePrimaryVersion", - // "httpMethod": "POST", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.updatePrimaryVersion", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Required. The resource name of the CryptoKey to update.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}:updatePrimaryVersion", - // "request": { - // "$ref": "UpdateCryptoKeyPrimaryVersionRequest" - // }, - // "response": { - // "$ref": "CryptoKey" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.asymmetricDecrypt": - -type ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricDecryptCall struct { - s *Service - name string - asymmetricdecryptrequest *AsymmetricDecryptRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// AsymmetricDecrypt: Decrypts data that was encrypted with a public key -// retrieved from -// GetPublicKey corresponding to a CryptoKeyVersion -// with -// CryptoKey.purpose ASYMMETRIC_DECRYPT. -func (r *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService) AsymmetricDecrypt(name string, asymmetricdecryptrequest *AsymmetricDecryptRequest) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricDecryptCall { - c := &ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricDecryptCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.asymmetricdecryptrequest = asymmetricdecryptrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricDecryptCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricDecryptCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricDecryptCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricDecryptCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricDecryptCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricDecryptCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.asymmetricdecryptrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:asymmetricDecrypt") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.asymmetricDecrypt" call. -// Exactly one of *AsymmetricDecryptResponse or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *AsymmetricDecryptResponse.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricDecryptCall) Do(opts ...googleapi.CallOption) (*AsymmetricDecryptResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &AsymmetricDecryptResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Decrypts data that was encrypted with a public key retrieved from\nGetPublicKey corresponding to a CryptoKeyVersion with\nCryptoKey.purpose ASYMMETRIC_DECRYPT.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}:asymmetricDecrypt", - // "httpMethod": "POST", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.asymmetricDecrypt", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Required. The resource name of the CryptoKeyVersion to use for\ndecryption.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}:asymmetricDecrypt", - // "request": { - // "$ref": "AsymmetricDecryptRequest" - // }, - // "response": { - // "$ref": "AsymmetricDecryptResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.asymmetricSign": - -type ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricSignCall struct { - s *Service - name string - asymmetricsignrequest *AsymmetricSignRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// AsymmetricSign: Signs data using a CryptoKeyVersion with -// CryptoKey.purpose -// ASYMMETRIC_SIGN, producing a signature that can be verified with the -// public -// key retrieved from GetPublicKey. -func (r *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService) AsymmetricSign(name string, asymmetricsignrequest *AsymmetricSignRequest) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricSignCall { - c := &ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricSignCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.asymmetricsignrequest = asymmetricsignrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricSignCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricSignCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricSignCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricSignCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricSignCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricSignCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.asymmetricsignrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:asymmetricSign") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.asymmetricSign" call. -// Exactly one of *AsymmetricSignResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *AsymmetricSignResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricSignCall) Do(opts ...googleapi.CallOption) (*AsymmetricSignResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &AsymmetricSignResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Signs data using a CryptoKeyVersion with CryptoKey.purpose\nASYMMETRIC_SIGN, producing a signature that can be verified with the public\nkey retrieved from GetPublicKey.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}:asymmetricSign", - // "httpMethod": "POST", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.asymmetricSign", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Required. The resource name of the CryptoKeyVersion to use for signing.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}:asymmetricSign", - // "request": { - // "$ref": "AsymmetricSignRequest" - // }, - // "response": { - // "$ref": "AsymmetricSignResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.create": - -type ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsCreateCall struct { - s *Service - parent string - cryptokeyversion *CryptoKeyVersion - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Create: Create a new CryptoKeyVersion in a CryptoKey. -// -// The server will assign the next sequential id. If unset, -// state will be set to -// ENABLED. -func (r *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService) Create(parent string, cryptokeyversion *CryptoKeyVersion) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsCreateCall { - c := &ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - c.cryptokeyversion = cryptokeyversion - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsCreateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsCreateCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsCreateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsCreateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsCreateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.cryptokeyversion) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/cryptoKeyVersions") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.create" call. -// Exactly one of *CryptoKeyVersion or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *CryptoKeyVersion.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsCreateCall) Do(opts ...googleapi.CallOption) (*CryptoKeyVersion, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &CryptoKeyVersion{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Create a new CryptoKeyVersion in a CryptoKey.\n\nThe server will assign the next sequential id. If unset,\nstate will be set to\nENABLED.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions", - // "httpMethod": "POST", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.create", - // "parameterOrder": [ - // "parent" - // ], - // "parameters": { - // "parent": { - // "description": "Required. The name of the CryptoKey associated with\nthe CryptoKeyVersions.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+parent}/cryptoKeyVersions", - // "request": { - // "$ref": "CryptoKeyVersion" - // }, - // "response": { - // "$ref": "CryptoKeyVersion" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.destroy": - -type ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsDestroyCall struct { - s *Service - name string - destroycryptokeyversionrequest *DestroyCryptoKeyVersionRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Destroy: Schedule a CryptoKeyVersion for destruction. -// -// Upon calling this method, CryptoKeyVersion.state will be set -// to -// DESTROY_SCHEDULED -// and destroy_time will be set to a time 24 -// hours in the future, at which point the state -// will be changed to -// DESTROYED, and the key -// material will be irrevocably destroyed. -// -// Before the destroy_time is reached, -// RestoreCryptoKeyVersion may be called to reverse the process. -func (r *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService) Destroy(name string, destroycryptokeyversionrequest *DestroyCryptoKeyVersionRequest) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsDestroyCall { - c := &ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsDestroyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.destroycryptokeyversionrequest = destroycryptokeyversionrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsDestroyCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsDestroyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsDestroyCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsDestroyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsDestroyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsDestroyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.destroycryptokeyversionrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:destroy") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.destroy" call. -// Exactly one of *CryptoKeyVersion or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *CryptoKeyVersion.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsDestroyCall) Do(opts ...googleapi.CallOption) (*CryptoKeyVersion, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &CryptoKeyVersion{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Schedule a CryptoKeyVersion for destruction.\n\nUpon calling this method, CryptoKeyVersion.state will be set to\nDESTROY_SCHEDULED\nand destroy_time will be set to a time 24\nhours in the future, at which point the state\nwill be changed to\nDESTROYED, and the key\nmaterial will be irrevocably destroyed.\n\nBefore the destroy_time is reached,\nRestoreCryptoKeyVersion may be called to reverse the process.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}:destroy", - // "httpMethod": "POST", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.destroy", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Required. The resource name of the CryptoKeyVersion to destroy.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}:destroy", - // "request": { - // "$ref": "DestroyCryptoKeyVersionRequest" - // }, - // "response": { - // "$ref": "CryptoKeyVersion" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.get": - -type ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns metadata for a given CryptoKeyVersion. -func (r *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService) Get(name string) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetCall { - c := &ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.get" call. -// Exactly one of *CryptoKeyVersion or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *CryptoKeyVersion.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetCall) Do(opts ...googleapi.CallOption) (*CryptoKeyVersion, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &CryptoKeyVersion{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns metadata for a given CryptoKeyVersion.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}", - // "httpMethod": "GET", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.get", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Required. The name of the CryptoKeyVersion to get.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}", - // "response": { - // "$ref": "CryptoKeyVersion" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.getPublicKey": - -type ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetPublicKeyCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// GetPublicKey: Returns the public key for the given CryptoKeyVersion. -// The -// CryptoKey.purpose must be -// ASYMMETRIC_SIGN or -// ASYMMETRIC_DECRYPT. -func (r *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService) GetPublicKey(name string) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetPublicKeyCall { - c := &ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetPublicKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetPublicKeyCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetPublicKeyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetPublicKeyCall) IfNoneMatch(entityTag string) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetPublicKeyCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetPublicKeyCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetPublicKeyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetPublicKeyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetPublicKeyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}/publicKey") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.getPublicKey" call. -// Exactly one of *PublicKey or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *PublicKey.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetPublicKeyCall) Do(opts ...googleapi.CallOption) (*PublicKey, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &PublicKey{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the public key for the given CryptoKeyVersion. The\nCryptoKey.purpose must be\nASYMMETRIC_SIGN or\nASYMMETRIC_DECRYPT.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}/publicKey", - // "httpMethod": "GET", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.getPublicKey", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Required. The name of the CryptoKeyVersion public key to\nget.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}/publicKey", - // "response": { - // "$ref": "PublicKey" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.import": - -type ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsImportCall struct { - s *Service - parent string - importcryptokeyversionrequest *ImportCryptoKeyVersionRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Import: Imports a new CryptoKeyVersion into an existing CryptoKey -// using the -// wrapped key material provided in the request. -// -// The version ID will be assigned the next sequential id within -// the -// CryptoKey. -func (r *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService) Import(parent string, importcryptokeyversionrequest *ImportCryptoKeyVersionRequest) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsImportCall { - c := &ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsImportCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - c.importcryptokeyversionrequest = importcryptokeyversionrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsImportCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsImportCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsImportCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsImportCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsImportCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsImportCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.importcryptokeyversionrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/cryptoKeyVersions:import") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.import" call. -// Exactly one of *CryptoKeyVersion or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *CryptoKeyVersion.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsImportCall) Do(opts ...googleapi.CallOption) (*CryptoKeyVersion, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &CryptoKeyVersion{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Imports a new CryptoKeyVersion into an existing CryptoKey using the\nwrapped key material provided in the request.\n\nThe version ID will be assigned the next sequential id within the\nCryptoKey.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions:import", - // "httpMethod": "POST", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.import", - // "parameterOrder": [ - // "parent" - // ], - // "parameters": { - // "parent": { - // "description": "Required. The name of the CryptoKey to\nbe imported into.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+parent}/cryptoKeyVersions:import", - // "request": { - // "$ref": "ImportCryptoKeyVersionRequest" - // }, - // "response": { - // "$ref": "CryptoKeyVersion" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.list": - -type ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall struct { - s *Service - parent string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists CryptoKeyVersions. -func (r *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService) List(parent string) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall { - c := &ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - return c -} - -// Filter sets the optional parameter "filter": Only include resources -// that match the filter in the response. For -// more information, see -// [Sorting and filtering -// list -// results](https://cloud.google.com/kms/docs/sorting-and-filtering) -// . -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) Filter(filter string) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// OrderBy sets the optional parameter "orderBy": Specify how the -// results should be sorted. If not specified, the -// results will be sorted in the default order. For more information, -// see -// [Sorting and filtering -// list -// results](https://cloud.google.com/kms/docs/sorting-and-filtering) -// . -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) OrderBy(orderBy string) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageSize sets the optional parameter "pageSize": Optional limit on -// the number of CryptoKeyVersions to -// include in the response. Further CryptoKeyVersions can -// subsequently be obtained by including -// the -// ListCryptoKeyVersionsResponse.next_page_token in a subsequent -// request. -// If unspecified, the server will pick an appropriate default. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) PageSize(pageSize int64) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": Optional -// pagination token, returned earlier -// via -// ListCryptoKeyVersionsResponse.next_page_token. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) PageToken(pageToken string) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// View sets the optional parameter "view": The fields to include in the -// response. -// -// Possible values: -// "CRYPTO_KEY_VERSION_VIEW_UNSPECIFIED" -// "FULL" -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) View(view string) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall { - c.urlParams_.Set("view", view) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/cryptoKeyVersions") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.list" call. -// Exactly one of *ListCryptoKeyVersionsResponse or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *ListCryptoKeyVersionsResponse.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) Do(opts ...googleapi.CallOption) (*ListCryptoKeyVersionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListCryptoKeyVersionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists CryptoKeyVersions.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions", - // "httpMethod": "GET", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.list", - // "parameterOrder": [ - // "parent" - // ], - // "parameters": { - // "filter": { - // "description": "Optional. Only include resources that match the filter in the response. For\nmore information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", - // "location": "query", - // "type": "string" - // }, - // "orderBy": { - // "description": "Optional. Specify how the results should be sorted. If not specified, the\nresults will be sorted in the default order. For more information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", - // "location": "query", - // "type": "string" - // }, - // "pageSize": { - // "description": "Optional. Optional limit on the number of CryptoKeyVersions to\ninclude in the response. Further CryptoKeyVersions can\nsubsequently be obtained by including the\nListCryptoKeyVersionsResponse.next_page_token in a subsequent request.\nIf unspecified, the server will pick an appropriate default.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Optional. Optional pagination token, returned earlier via\nListCryptoKeyVersionsResponse.next_page_token.", - // "location": "query", - // "type": "string" - // }, - // "parent": { - // "description": "Required. The resource name of the CryptoKey to list, in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*`.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - // "required": true, - // "type": "string" - // }, - // "view": { - // "description": "The fields to include in the response.", - // "enum": [ - // "CRYPTO_KEY_VERSION_VIEW_UNSPECIFIED", - // "FULL" - // ], - // "location": "query", - // "type": "string" - // } - // }, - // "path": "v1/{+parent}/cryptoKeyVersions", - // "response": { - // "$ref": "ListCryptoKeyVersionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) Pages(ctx context.Context, f func(*ListCryptoKeyVersionsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.patch": - -type ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall struct { - s *Service - name string - cryptokeyversion *CryptoKeyVersion - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Update a CryptoKeyVersion's metadata. -// -// state may be changed between -// ENABLED and -// DISABLED using this -// method. See DestroyCryptoKeyVersion and RestoreCryptoKeyVersion -// to -// move between other states. -func (r *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService) Patch(name string, cryptokeyversion *CryptoKeyVersion) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall { - c := &ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.cryptokeyversion = cryptokeyversion - return c -} - -// UpdateMask sets the optional parameter "updateMask": Required. List -// of fields to be updated in this request. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall) UpdateMask(updateMask string) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall { - c.urlParams_.Set("updateMask", updateMask) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.cryptokeyversion) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.patch" call. -// Exactly one of *CryptoKeyVersion or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *CryptoKeyVersion.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall) Do(opts ...googleapi.CallOption) (*CryptoKeyVersion, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &CryptoKeyVersion{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Update a CryptoKeyVersion's metadata.\n\nstate may be changed between\nENABLED and\nDISABLED using this\nmethod. See DestroyCryptoKeyVersion and RestoreCryptoKeyVersion to\nmove between other states.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}", - // "httpMethod": "PATCH", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.patch", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Output only. The resource name for this CryptoKeyVersion in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*`.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", - // "required": true, - // "type": "string" - // }, - // "updateMask": { - // "description": "Required. List of fields to be updated in this request.", - // "format": "google-fieldmask", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "v1/{+name}", - // "request": { - // "$ref": "CryptoKeyVersion" - // }, - // "response": { - // "$ref": "CryptoKeyVersion" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.restore": - -type ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRestoreCall struct { - s *Service - name string - restorecryptokeyversionrequest *RestoreCryptoKeyVersionRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Restore: Restore a CryptoKeyVersion in -// the -// DESTROY_SCHEDULED -// state. -// -// Upon restoration of the CryptoKeyVersion, state -// will be set to DISABLED, -// and destroy_time will be cleared. -func (r *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService) Restore(name string, restorecryptokeyversionrequest *RestoreCryptoKeyVersionRequest) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRestoreCall { - c := &ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRestoreCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.restorecryptokeyversionrequest = restorecryptokeyversionrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRestoreCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRestoreCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRestoreCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRestoreCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRestoreCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRestoreCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.restorecryptokeyversionrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:restore") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.restore" call. -// Exactly one of *CryptoKeyVersion or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *CryptoKeyVersion.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRestoreCall) Do(opts ...googleapi.CallOption) (*CryptoKeyVersion, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &CryptoKeyVersion{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Restore a CryptoKeyVersion in the\nDESTROY_SCHEDULED\nstate.\n\nUpon restoration of the CryptoKeyVersion, state\nwill be set to DISABLED,\nand destroy_time will be cleared.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}:restore", - // "httpMethod": "POST", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.restore", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Required. The resource name of the CryptoKeyVersion to restore.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}:restore", - // "request": { - // "$ref": "RestoreCryptoKeyVersionRequest" - // }, - // "response": { - // "$ref": "CryptoKeyVersion" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.importJobs.create": - -type ProjectsLocationsKeyRingsImportJobsCreateCall struct { - s *Service - parent string - importjob *ImportJob - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Create: Create a new ImportJob within a -// KeyRing. -// -// ImportJob.import_method is required. -func (r *ProjectsLocationsKeyRingsImportJobsService) Create(parent string, importjob *ImportJob) *ProjectsLocationsKeyRingsImportJobsCreateCall { - c := &ProjectsLocationsKeyRingsImportJobsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - c.importjob = importjob - return c -} - -// ImportJobId sets the optional parameter "importJobId": Required. It -// must be unique within a KeyRing and match the regular -// expression `[a-zA-Z0-9_-]{1,63}` -func (c *ProjectsLocationsKeyRingsImportJobsCreateCall) ImportJobId(importJobId string) *ProjectsLocationsKeyRingsImportJobsCreateCall { - c.urlParams_.Set("importJobId", importJobId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsImportJobsCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsImportJobsCreateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsImportJobsCreateCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsImportJobsCreateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsImportJobsCreateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsImportJobsCreateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.importjob) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/importJobs") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.importJobs.create" call. -// Exactly one of *ImportJob or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ImportJob.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsImportJobsCreateCall) Do(opts ...googleapi.CallOption) (*ImportJob, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ImportJob{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Create a new ImportJob within a KeyRing.\n\nImportJob.import_method is required.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/importJobs", - // "httpMethod": "POST", - // "id": "cloudkms.projects.locations.keyRings.importJobs.create", - // "parameterOrder": [ - // "parent" - // ], - // "parameters": { - // "importJobId": { - // "description": "Required. It must be unique within a KeyRing and match the regular\nexpression `[a-zA-Z0-9_-]{1,63}`", - // "location": "query", - // "type": "string" - // }, - // "parent": { - // "description": "Required. The name of the KeyRing associated with the\nImportJobs.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+parent}/importJobs", - // "request": { - // "$ref": "ImportJob" - // }, - // "response": { - // "$ref": "ImportJob" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.importJobs.get": - -type ProjectsLocationsKeyRingsImportJobsGetCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns metadata for a given ImportJob. -func (r *ProjectsLocationsKeyRingsImportJobsService) Get(name string) *ProjectsLocationsKeyRingsImportJobsGetCall { - c := &ProjectsLocationsKeyRingsImportJobsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsImportJobsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsImportJobsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsLocationsKeyRingsImportJobsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsKeyRingsImportJobsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsImportJobsGetCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsImportJobsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsImportJobsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsImportJobsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.importJobs.get" call. -// Exactly one of *ImportJob or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ImportJob.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsImportJobsGetCall) Do(opts ...googleapi.CallOption) (*ImportJob, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ImportJob{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns metadata for a given ImportJob.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/importJobs/{importJobsId}", - // "httpMethod": "GET", - // "id": "cloudkms.projects.locations.keyRings.importJobs.get", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Required. The name of the ImportJob to get.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/importJobs/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}", - // "response": { - // "$ref": "ImportJob" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.importJobs.getIamPolicy": - -type ProjectsLocationsKeyRingsImportJobsGetIamPolicyCall struct { - s *Service - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// GetIamPolicy: Gets the access control policy for a resource. -// Returns an empty policy if the resource exists and does not have a -// policy -// set. -func (r *ProjectsLocationsKeyRingsImportJobsService) GetIamPolicy(resource string) *ProjectsLocationsKeyRingsImportJobsGetIamPolicyCall { - c := &ProjectsLocationsKeyRingsImportJobsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - return c -} - -// OptionsRequestedPolicyVersion sets the optional parameter -// "options.requestedPolicyVersion": The policy format version to be -// returned. -// -// Valid values are 0, 1, and 3. Requests specifying an invalid value -// will be -// rejected. -// -// Requests for policies with any conditional bindings must specify -// version 3. -// Policies without any conditional bindings may specify any valid value -// or -// leave the field unset. -func (c *ProjectsLocationsKeyRingsImportJobsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ProjectsLocationsKeyRingsImportJobsGetIamPolicyCall { - c.urlParams_.Set("options.requestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsImportJobsGetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsImportJobsGetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsLocationsKeyRingsImportJobsGetIamPolicyCall) IfNoneMatch(entityTag string) *ProjectsLocationsKeyRingsImportJobsGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsImportJobsGetIamPolicyCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsImportJobsGetIamPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsImportJobsGetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsImportJobsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:getIamPolicy") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.importJobs.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsLocationsKeyRingsImportJobsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/importJobs/{importJobsId}:getIamPolicy", - // "httpMethod": "GET", - // "id": "cloudkms.projects.locations.keyRings.importJobs.getIamPolicy", - // "parameterOrder": [ - // "resource" - // ], - // "parameters": { - // "options.requestedPolicyVersion": { - // "description": "Optional. The policy format version to be returned.\n\nValid values are 0, 1, and 3. Requests specifying an invalid value will be\nrejected.\n\nRequests for policies with any conditional bindings must specify version 3.\nPolicies without any conditional bindings may specify any valid value or\nleave the field unset.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/importJobs/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+resource}:getIamPolicy", - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.importJobs.list": - -type ProjectsLocationsKeyRingsImportJobsListCall struct { - s *Service - parent string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists ImportJobs. -func (r *ProjectsLocationsKeyRingsImportJobsService) List(parent string) *ProjectsLocationsKeyRingsImportJobsListCall { - c := &ProjectsLocationsKeyRingsImportJobsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - return c -} - -// Filter sets the optional parameter "filter": Only include resources -// that match the filter in the response. For -// more information, see -// [Sorting and filtering -// list -// results](https://cloud.google.com/kms/docs/sorting-and-filtering) -// . -func (c *ProjectsLocationsKeyRingsImportJobsListCall) Filter(filter string) *ProjectsLocationsKeyRingsImportJobsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// OrderBy sets the optional parameter "orderBy": Specify how the -// results should be sorted. If not specified, the -// results will be sorted in the default order. For more information, -// see -// [Sorting and filtering -// list -// results](https://cloud.google.com/kms/docs/sorting-and-filtering) -// . -func (c *ProjectsLocationsKeyRingsImportJobsListCall) OrderBy(orderBy string) *ProjectsLocationsKeyRingsImportJobsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageSize sets the optional parameter "pageSize": Optional limit on -// the number of ImportJobs to include in the -// response. Further ImportJobs can subsequently be obtained -// by -// including the ListImportJobsResponse.next_page_token in a -// subsequent -// request. If unspecified, the server will pick an appropriate default. -func (c *ProjectsLocationsKeyRingsImportJobsListCall) PageSize(pageSize int64) *ProjectsLocationsKeyRingsImportJobsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": Optional -// pagination token, returned earlier -// via -// ListImportJobsResponse.next_page_token. -func (c *ProjectsLocationsKeyRingsImportJobsListCall) PageToken(pageToken string) *ProjectsLocationsKeyRingsImportJobsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsImportJobsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsImportJobsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsLocationsKeyRingsImportJobsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsKeyRingsImportJobsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsImportJobsListCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsImportJobsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsImportJobsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsImportJobsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/importJobs") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.importJobs.list" call. -// Exactly one of *ListImportJobsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListImportJobsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsImportJobsListCall) Do(opts ...googleapi.CallOption) (*ListImportJobsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListImportJobsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists ImportJobs.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/importJobs", - // "httpMethod": "GET", - // "id": "cloudkms.projects.locations.keyRings.importJobs.list", - // "parameterOrder": [ - // "parent" - // ], - // "parameters": { - // "filter": { - // "description": "Optional. Only include resources that match the filter in the response. For\nmore information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", - // "location": "query", - // "type": "string" - // }, - // "orderBy": { - // "description": "Optional. Specify how the results should be sorted. If not specified, the\nresults will be sorted in the default order. For more information, see\n[Sorting and filtering list\nresults](https://cloud.google.com/kms/docs/sorting-and-filtering).", - // "location": "query", - // "type": "string" - // }, - // "pageSize": { - // "description": "Optional. Optional limit on the number of ImportJobs to include in the\nresponse. Further ImportJobs can subsequently be obtained by\nincluding the ListImportJobsResponse.next_page_token in a subsequent\nrequest. If unspecified, the server will pick an appropriate default.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Optional. Optional pagination token, returned earlier via\nListImportJobsResponse.next_page_token.", - // "location": "query", - // "type": "string" - // }, - // "parent": { - // "description": "Required. The resource name of the KeyRing to list, in the format\n`projects/*/locations/*/keyRings/*`.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+parent}/importJobs", - // "response": { - // "$ref": "ListImportJobsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsLocationsKeyRingsImportJobsListCall) Pages(ctx context.Context, f func(*ListImportJobsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "cloudkms.projects.locations.keyRings.importJobs.setIamPolicy": - -type ProjectsLocationsKeyRingsImportJobsSetIamPolicyCall struct { - s *Service - resource string - setiampolicyrequest *SetIamPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any -// existing policy. -func (r *ProjectsLocationsKeyRingsImportJobsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsKeyRingsImportJobsSetIamPolicyCall { - c := &ProjectsLocationsKeyRingsImportJobsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.setiampolicyrequest = setiampolicyrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsImportJobsSetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsImportJobsSetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsImportJobsSetIamPolicyCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsImportJobsSetIamPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsImportJobsSetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsImportJobsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:setIamPolicy") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.importJobs.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsLocationsKeyRingsImportJobsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/importJobs/{importJobsId}:setIamPolicy", - // "httpMethod": "POST", - // "id": "cloudkms.projects.locations.keyRings.importJobs.setIamPolicy", - // "parameterOrder": [ - // "resource" - // ], - // "parameters": { - // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/importJobs/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+resource}:setIamPolicy", - // "request": { - // "$ref": "SetIamPolicyRequest" - // }, - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.importJobs.testIamPermissions": - -type ProjectsLocationsKeyRingsImportJobsTestIamPermissionsCall struct { - s *Service - resource string - testiampermissionsrequest *TestIamPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -// If the resource does not exist, this will return an empty set -// of -// permissions, not a NOT_FOUND error. -// -// Note: This operation is designed to be used for building -// permission-aware -// UIs and command-line tools, not for authorization checking. This -// operation -// may "fail open" without warning. -func (r *ProjectsLocationsKeyRingsImportJobsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsLocationsKeyRingsImportJobsTestIamPermissionsCall { - c := &ProjectsLocationsKeyRingsImportJobsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.testiampermissionsrequest = testiampermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsImportJobsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsImportJobsTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsImportJobsTestIamPermissionsCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsImportJobsTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsImportJobsTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsImportJobsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190926") - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.importJobs.testIamPermissions" call. -// Exactly one of *TestIamPermissionsResponse or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *TestIamPermissionsResponse.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsImportJobsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestIamPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/importJobs/{importJobsId}:testIamPermissions", - // "httpMethod": "POST", - // "id": "cloudkms.projects.locations.keyRings.importJobs.testIamPermissions", - // "parameterOrder": [ - // "resource" - // ], - // "parameters": { - // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/importJobs/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+resource}:testIamPermissions", - // "request": { - // "$ref": "TestIamPermissionsRequest" - // }, - // "response": { - // "$ref": "TestIamPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloudkms" - // ] - // } - -} diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go index ab5376762..b5e38c662 100644 --- a/vendor/google.golang.org/api/googleapi/googleapi.go +++ b/vendor/google.golang.org/api/googleapi/googleapi.go @@ -1,4 +1,4 @@ -// Copyright 2011 Google Inc. All rights reserved. +// Copyright 2011 Google LLC. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -11,12 +11,12 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strings" + "time" - "google.golang.org/api/googleapi/internal/uritemplates" + "google.golang.org/api/internal/third_party/uritemplates" ) // ContentTyper is an interface for Readers which know (or would like @@ -54,7 +54,7 @@ const ( // DefaultUploadChunkSize is the default chunk size to use for resumable // uploads if not specified by the user. - DefaultUploadChunkSize = 8 * 1024 * 1024 + DefaultUploadChunkSize = 16 * 1024 * 1024 // MinUploadChunkSize is the minimum chunk size that can be used for // resumable uploads. All user-specified chunk sizes must be multiple of @@ -69,6 +69,8 @@ type Error struct { // Message is the server response message and is only populated when // explicitly referenced by the JSON server response. Message string `json:"message"` + // Details provide more context to an error. + Details []interface{} `json:"details"` // Body is the raw response returned by the server. // It is often but not always JSON, depending on how the request fails. Body string @@ -76,6 +78,9 @@ type Error struct { Header http.Header Errors []ErrorItem + // err is typically a wrapped apierror.APIError, see + // google-api-go-client/internal/gensupport/error.go. + err error } // ErrorItem is a detailed error code & message from the Google API frontend. @@ -95,6 +100,16 @@ func (e *Error) Error() string { if e.Message != "" { fmt.Fprintf(&buf, "%s", e.Message) } + if len(e.Details) > 0 { + var detailBuf bytes.Buffer + enc := json.NewEncoder(&detailBuf) + enc.SetIndent("", " ") + if err := enc.Encode(e.Details); err == nil { + fmt.Fprint(&buf, "\nDetails:") + fmt.Fprintf(&buf, "\n%s", detailBuf.String()) + + } + } if len(e.Errors) == 0 { return strings.TrimSpace(buf.String()) } @@ -109,6 +124,15 @@ func (e *Error) Error() string { return buf.String() } +// Wrap allows an existing Error to wrap another error. See also [Error.Unwrap]. +func (e *Error) Wrap(err error) { + e.err = err +} + +func (e *Error) Unwrap() error { + return e.err +} + type errorReply struct { Error *Error `json:"error"` } @@ -119,7 +143,7 @@ func CheckResponse(res *http.Response) error { if res.StatusCode >= 200 && res.StatusCode <= 299 { return nil } - slurp, err := ioutil.ReadAll(res.Body) + slurp, err := io.ReadAll(res.Body) if err == nil { jerr := new(errorReply) err = json.Unmarshal(slurp, jerr) @@ -128,6 +152,7 @@ func CheckResponse(res *http.Response) error { jerr.Error.Code = res.StatusCode } jerr.Error.Body = string(slurp) + jerr.Error.Header = res.Header return jerr.Error } } @@ -158,10 +183,11 @@ func CheckMediaResponse(res *http.Response) error { if res.StatusCode >= 200 && res.StatusCode <= 299 { return nil } - slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20)) + slurp, _ := io.ReadAll(io.LimitReader(res.Body, 1<<20)) return &Error{ - Code: res.StatusCode, - Body: string(slurp), + Code: res.StatusCode, + Body: string(slurp), + Header: res.Header, } } @@ -233,12 +259,30 @@ func ChunkSize(size int) MediaOption { return chunkSizeOption(size) } +type chunkRetryDeadlineOption time.Duration + +func (cd chunkRetryDeadlineOption) setOptions(o *MediaOptions) { + o.ChunkRetryDeadline = time.Duration(cd) +} + +// ChunkRetryDeadline returns a MediaOption which sets a per-chunk retry +// deadline. If a single chunk has been attempting to upload for longer than +// this time and the request fails, it will no longer be retried, and the error +// will be returned to the caller. +// This is only applicable for files which are large enough to require +// a multi-chunk resumable upload. +// The default value is 32s. +// To set a deadline on the entire upload, use context timeout or cancellation. +func ChunkRetryDeadline(deadline time.Duration) MediaOption { + return chunkRetryDeadlineOption(deadline) +} + // MediaOptions stores options for customizing media upload. It is not used by developers directly. type MediaOptions struct { ContentType string ForceEmptyContentType bool - - ChunkSize int + ChunkSize int + ChunkRetryDeadline time.Duration } // ProcessMediaOptions stores options from opts in a MediaOptions. @@ -256,14 +300,22 @@ func ProcessMediaOptions(opts []MediaOption) *MediaOptions { // "http://www.golang.org/topics/myproject/mytopic". It strips all parent // references (e.g. ../..) as well as anything after the host // (e.g. /bar/gaz gets stripped out of foo.com/bar/gaz). +// +// ResolveRelative panics if either basestr or relstr is not able to be parsed. func ResolveRelative(basestr, relstr string) string { - u, _ := url.Parse(basestr) + u, err := url.Parse(basestr) + if err != nil { + panic(fmt.Sprintf("failed to parse %q", basestr)) + } afterColonPath := "" if i := strings.IndexRune(relstr, ':'); i > 0 { afterColonPath = relstr[i+1:] relstr = relstr[:i] } - rel, _ := url.Parse(relstr) + rel, err := url.Parse(relstr) + if err != nil { + panic(fmt.Sprintf("failed to parse %q", relstr)) + } u = u.ResolveReference(rel) us := u.String() if afterColonPath != "" { @@ -331,7 +383,7 @@ func ConvertVariant(v map[string]interface{}, dst interface{}) bool { } // A Field names a field to be retrieved with a partial response. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// https://cloud.google.com/storage/docs/json_api/v1/how-tos/performance // // Partial responses can dramatically reduce the amount of data that must be sent to your application. // In order to request partial responses, you can specify the full list of fields @@ -342,14 +394,11 @@ func ConvertVariant(v map[string]interface{}, dst interface{}) bool { // For example, if your response has a "NextPageToken" and a slice of "Items" with "Id" fields, // you could request just those fields like this: // -// svc.Events.List().Fields("nextPageToken", "items/id").Do() +// svc.Events.List().Fields("nextPageToken", "items/id").Do() // // or if you were also interested in each Item's "Updated" field, you can combine them like this: // -// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() -// -// More information about field formatting can be found here: -// https://developers.google.com/+/api/#fields-syntax +// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() // // Another way to find field names is through the Google API explorer: // https://developers.google.com/apis-explorer/#p/ @@ -374,6 +423,14 @@ type CallOption interface { Get() (key, value string) } +// A MultiCallOption is an option argument to an API call and can be passed +// anywhere a CallOption is accepted. It additionally supports returning a slice +// of values for a given key. +type MultiCallOption interface { + CallOption + GetMulti() (key string, value []string) +} + // QuotaUser returns a CallOption that will set the quota user for a call. // The quota user can be used by server-side applications to control accounting. // It can be an arbitrary string up to 40 characters, and will override UserIP @@ -400,4 +457,24 @@ type traceTok string func (t traceTok) Get() (string, string) { return "trace", "token:" + string(t) } +type queryParameter struct { + key string + values []string +} + +// QueryParameter allows setting the value(s) of an arbitrary key. +func QueryParameter(key string, values ...string) CallOption { + return queryParameter{key: key, values: append([]string{}, values...)} +} + +// Get will never actually be called -- GetMulti will. +func (q queryParameter) Get() (string, string) { + return "", "" +} + +// GetMulti returns the key and values values associated to that key. +func (q queryParameter) GetMulti() (string, []string) { + return q.key, q.values +} + // TODO: Fields too diff --git a/vendor/google.golang.org/api/googleapi/transport/apikey.go b/vendor/google.golang.org/api/googleapi/transport/apikey.go index eca1ea250..f5d826c2a 100644 --- a/vendor/google.golang.org/api/googleapi/transport/apikey.go +++ b/vendor/google.golang.org/api/googleapi/transport/apikey.go @@ -1,9 +1,13 @@ -// Copyright 2012 Google Inc. All rights reserved. +// Copyright 2012 Google LLC. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package transport contains HTTP transports used to make // authenticated API requests. +// +// This package is DEPRECATED. Users should instead use, +// +// service, err := NewService(..., option.WithAPIKey(...)) package transport import ( @@ -13,6 +17,8 @@ import ( // APIKey is an HTTP Transport which wraps an underlying transport and // appends an API Key "key" parameter to the URL of outgoing requests. +// +// Deprecated: please use NewService(..., option.WithAPIKey(...)) instead. type APIKey struct { // Key is the API Key to set on requests. Key string diff --git a/vendor/google.golang.org/api/googleapi/types.go b/vendor/google.golang.org/api/googleapi/types.go index a280e3021..fabf74d50 100644 --- a/vendor/google.golang.org/api/googleapi/types.go +++ b/vendor/google.golang.org/api/googleapi/types.go @@ -1,4 +1,4 @@ -// Copyright 2013 Google Inc. All rights reserved. +// Copyright 2013 Google LLC. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json new file mode 100644 index 000000000..633d334df --- /dev/null +++ b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json @@ -0,0 +1,372 @@ +{ + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "See, edit, configure, and delete your Google Cloud data and see the email address for your Google Account." + } + } + } + }, + "basePath": "", + "baseUrl": "https://iamcredentials.googleapis.com/", + "batchPath": "batch", + "canonicalName": "IAM Credentials", + "description": "Creates short-lived credentials for impersonating IAM service accounts. Disabling this API also disables the IAM API (iam.googleapis.com). However, enabling this API doesn't enable the IAM API. ", + "discoveryVersion": "v1", + "documentationLink": "https://cloud.google.com/iam/docs/creating-short-lived-service-account-credentials", + "fullyEncodeReservedExpansion": true, + "icons": { + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" + }, + "id": "iamcredentials:v1", + "kind": "discovery#restDescription", + "mtlsRootUrl": "https://iamcredentials.mtls.googleapis.com/", + "name": "iamcredentials", + "ownerDomain": "google.com", + "ownerName": "Google", + "parameters": { + "$.xgafv": { + "description": "V1 error format.", + "enum": [ + "1", + "2" + ], + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "type": "string" + }, + "access_token": { + "description": "OAuth access token.", + "location": "query", + "type": "string" + }, + "alt": { + "default": "json", + "description": "Data format for response.", + "enum": [ + "json", + "media", + "proto" + ], + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "type": "string" + }, + "callback": { + "description": "JSONP", + "location": "query", + "type": "string" + }, + "fields": { + "description": "Selector specifying which fields to include in a partial response.", + "location": "query", + "type": "string" + }, + "key": { + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query", + "type": "string" + }, + "oauth_token": { + "description": "OAuth 2.0 token for the current user.", + "location": "query", + "type": "string" + }, + "prettyPrint": { + "default": "true", + "description": "Returns response with indentations and line breaks.", + "location": "query", + "type": "boolean" + }, + "quotaUser": { + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "location": "query", + "type": "string" + }, + "uploadType": { + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "location": "query", + "type": "string" + }, + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "location": "query", + "type": "string" + } + }, + "protocol": "rest", + "resources": { + "projects": { + "resources": { + "serviceAccounts": { + "methods": { + "generateAccessToken": { + "description": "Generates an OAuth 2.0 access token for a service account.", + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:generateAccessToken", + "httpMethod": "POST", + "id": "iamcredentials.projects.serviceAccounts.generateAccessToken", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + "location": "path", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:generateAccessToken", + "request": { + "$ref": "GenerateAccessTokenRequest" + }, + "response": { + "$ref": "GenerateAccessTokenResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "generateIdToken": { + "description": "Generates an OpenID Connect ID token for a service account.", + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:generateIdToken", + "httpMethod": "POST", + "id": "iamcredentials.projects.serviceAccounts.generateIdToken", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + "location": "path", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:generateIdToken", + "request": { + "$ref": "GenerateIdTokenRequest" + }, + "response": { + "$ref": "GenerateIdTokenResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "signBlob": { + "description": "Signs a blob using a service account's system-managed private key.", + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signBlob", + "httpMethod": "POST", + "id": "iamcredentials.projects.serviceAccounts.signBlob", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + "location": "path", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:signBlob", + "request": { + "$ref": "SignBlobRequest" + }, + "response": { + "$ref": "SignBlobResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "signJwt": { + "description": "Signs a JWT using a service account's system-managed private key.", + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signJwt", + "httpMethod": "POST", + "id": "iamcredentials.projects.serviceAccounts.signJwt", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the service account for which the credentials are requested, in the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + "location": "path", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:signJwt", + "request": { + "$ref": "SignJwtRequest" + }, + "response": { + "$ref": "SignJwtResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } + } + } + }, + "revision": "20240227", + "rootUrl": "https://iamcredentials.googleapis.com/", + "schemas": { + "GenerateAccessTokenRequest": { + "id": "GenerateAccessTokenRequest", + "properties": { + "delegates": { + "description": "The sequence of service accounts in a delegation chain. This field is required for [delegated requests](https://cloud.google.com/iam/help/credentials/delegated-request). For [direct requests](https://cloud.google.com/iam/help/credentials/direct-request), which are more common, do not specify this field. Each service account must be granted the `roles/iam.serviceAccountTokenCreator` role on its next service account in the chain. The last service account in the chain must be granted the `roles/iam.serviceAccountTokenCreator` role on the service account that is specified in the `name` field of the request. The delegates must have the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + "items": { + "type": "string" + }, + "type": "array" + }, + "lifetime": { + "description": "The desired lifetime duration of the access token in seconds. By default, the maximum allowed value is 1 hour. To set a lifetime of up to 12 hours, you can add the service account as an allowed value in an Organization Policy that enforces the `constraints/iam.allowServiceAccountCredentialLifetimeExtension` constraint. See detailed instructions at https://cloud.google.com/iam/help/credentials/lifetime If a value is not specified, the token's lifetime will be set to a default value of 1 hour.", + "format": "google-duration", + "type": "string" + }, + "scope": { + "description": "Required. Code to identify the scopes to be included in the OAuth 2.0 access token. See https://developers.google.com/identity/protocols/googlescopes for more information. At least one value required.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "GenerateAccessTokenResponse": { + "id": "GenerateAccessTokenResponse", + "properties": { + "accessToken": { + "description": "The OAuth 2.0 access token.", + "type": "string" + }, + "expireTime": { + "description": "Token expiration time. The expiration time is always set.", + "format": "google-datetime", + "type": "string" + } + }, + "type": "object" + }, + "GenerateIdTokenRequest": { + "id": "GenerateIdTokenRequest", + "properties": { + "audience": { + "description": "Required. The audience for the token, such as the API or account that this token grants access to.", + "type": "string" + }, + "delegates": { + "description": "The sequence of service accounts in a delegation chain. Each service account must be granted the `roles/iam.serviceAccountTokenCreator` role on its next service account in the chain. The last service account in the chain must be granted the `roles/iam.serviceAccountTokenCreator` role on the service account that is specified in the `name` field of the request. The delegates must have the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + "items": { + "type": "string" + }, + "type": "array" + }, + "includeEmail": { + "description": "Include the service account email in the token. If set to `true`, the token will contain `email` and `email_verified` claims.", + "type": "boolean" + } + }, + "type": "object" + }, + "GenerateIdTokenResponse": { + "id": "GenerateIdTokenResponse", + "properties": { + "token": { + "description": "The OpenId Connect ID token.", + "type": "string" + } + }, + "type": "object" + }, + "SignBlobRequest": { + "id": "SignBlobRequest", + "properties": { + "delegates": { + "description": "The sequence of service accounts in a delegation chain. Each service account must be granted the `roles/iam.serviceAccountTokenCreator` role on its next service account in the chain. The last service account in the chain must be granted the `roles/iam.serviceAccountTokenCreator` role on the service account that is specified in the `name` field of the request. The delegates must have the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + "items": { + "type": "string" + }, + "type": "array" + }, + "payload": { + "description": "Required. The bytes to sign.", + "format": "byte", + "type": "string" + } + }, + "type": "object" + }, + "SignBlobResponse": { + "id": "SignBlobResponse", + "properties": { + "keyId": { + "description": "The ID of the key used to sign the blob. The key used for signing will remain valid for at least 12 hours after the blob is signed. To verify the signature, you can retrieve the public key in several formats from the following endpoints: - RSA public key wrapped in an X.509 v3 certificate: `https://www.googleapis.com/service_accounts/v1/metadata/x509/{ACCOUNT_EMAIL}` - Raw key in JSON format: `https://www.googleapis.com/service_accounts/v1/metadata/raw/{ACCOUNT_EMAIL}` - JSON Web Key (JWK): `https://www.googleapis.com/service_accounts/v1/metadata/jwk/{ACCOUNT_EMAIL}`", + "type": "string" + }, + "signedBlob": { + "description": "The signature for the blob. Does not include the original blob. After the key pair referenced by the `key_id` response field expires, Google no longer exposes the public key that can be used to verify the blob. As a result, the receiver can no longer verify the signature.", + "format": "byte", + "type": "string" + } + }, + "type": "object" + }, + "SignJwtRequest": { + "id": "SignJwtRequest", + "properties": { + "delegates": { + "description": "The sequence of service accounts in a delegation chain. Each service account must be granted the `roles/iam.serviceAccountTokenCreator` role on its next service account in the chain. The last service account in the chain must be granted the `roles/iam.serviceAccountTokenCreator` role on the service account that is specified in the `name` field of the request. The delegates must have the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.", + "items": { + "type": "string" + }, + "type": "array" + }, + "payload": { + "description": "Required. The JWT payload to sign. Must be a serialized JSON object that contains a JWT Claims Set. For example: `{\"sub\": \"user@example.com\", \"iat\": 313435}` If the JWT Claims Set contains an expiration time (`exp`) claim, it must be an integer timestamp that is not in the past and no more than 12 hours in the future.", + "type": "string" + } + }, + "type": "object" + }, + "SignJwtResponse": { + "id": "SignJwtResponse", + "properties": { + "keyId": { + "description": "The ID of the key used to sign the JWT. The key used for signing will remain valid for at least 12 hours after the JWT is signed. To verify the signature, you can retrieve the public key in several formats from the following endpoints: - RSA public key wrapped in an X.509 v3 certificate: `https://www.googleapis.com/service_accounts/v1/metadata/x509/{ACCOUNT_EMAIL}` - Raw key in JSON format: `https://www.googleapis.com/service_accounts/v1/metadata/raw/{ACCOUNT_EMAIL}` - JSON Web Key (JWK): `https://www.googleapis.com/service_accounts/v1/metadata/jwk/{ACCOUNT_EMAIL}`", + "type": "string" + }, + "signedJwt": { + "description": "The signed JWT. Contains the automatically generated header; the client-supplied payload; and the signature, which is generated using the key referenced by the `kid` field in the header. After the key pair referenced by the `key_id` response field expires, Google no longer exposes the public key that can be used to verify the JWT. As a result, the receiver can no longer verify the signature.", + "type": "string" + } + }, + "type": "object" + } + }, + "servicePath": "", + "title": "IAM Service Account Credentials API", + "version": "v1", + "version_module": true +} \ No newline at end of file diff --git a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go new file mode 100644 index 000000000..f0c694845 --- /dev/null +++ b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go @@ -0,0 +1,868 @@ +// Copyright 2024 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated file. DO NOT EDIT. + +// Package iamcredentials provides access to the IAM Service Account Credentials API. +// +// For product documentation, see: https://cloud.google.com/iam/docs/creating-short-lived-service-account-credentials +// +// # Library status +// +// These client libraries are officially supported by Google. However, this +// library is considered complete and is in maintenance mode. This means +// that we will address critical bugs and security issues but will not add +// any new features. +// +// When possible, we recommend using our newer +// [Cloud Client Libraries for Go](https://pkg.go.dev/cloud.google.com/go) +// that are still actively being worked and iterated on. +// +// # Creating a client +// +// Usage example: +// +// import "google.golang.org/api/iamcredentials/v1" +// ... +// ctx := context.Background() +// iamcredentialsService, err := iamcredentials.NewService(ctx) +// +// In this example, Google Application Default Credentials are used for +// authentication. For information on how to create and obtain Application +// Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. +// +// # Other authentication options +// +// To use an API key for authentication (note: some APIs do not support API +// keys), use [google.golang.org/api/option.WithAPIKey]: +// +// iamcredentialsService, err := iamcredentials.NewService(ctx, option.WithAPIKey("AIza...")) +// +// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth +// flow, use [google.golang.org/api/option.WithTokenSource]: +// +// config := &oauth2.Config{...} +// // ... +// token, err := config.Exchange(ctx, ...) +// iamcredentialsService, err := iamcredentials.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// +// See [google.golang.org/api/option.ClientOption] for details on options. +package iamcredentials // import "google.golang.org/api/iamcredentials/v1" + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + googleapi "google.golang.org/api/googleapi" + internal "google.golang.org/api/internal" + gensupport "google.golang.org/api/internal/gensupport" + option "google.golang.org/api/option" + internaloption "google.golang.org/api/option/internaloption" + htransport "google.golang.org/api/transport/http" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version + +const apiId = "iamcredentials:v1" +const apiName = "iamcredentials" +const apiVersion = "v1" +const basePath = "https://iamcredentials.googleapis.com/" +const basePathTemplate = "https://iamcredentials.UNIVERSE_DOMAIN/" +const mtlsBasePath = "https://iamcredentials.mtls.googleapis.com/" + +// OAuth2 scopes used by this API. +const ( + // See, edit, configure, and delete your Google Cloud data and see the email + // address for your Google Account. + CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" +) + +// NewService creates a new Service. +func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) { + scopesOption := internaloption.WithDefaultScopes( + "https://www.googleapis.com/auth/cloud-platform", + ) + // NOTE: prepend, so we don't override user-specified scopes. + opts = append([]option.ClientOption{scopesOption}, opts...) + opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultEndpointTemplate(basePathTemplate)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) + opts = append(opts, internaloption.EnableNewAuthLibrary()) + client, endpoint, err := htransport.NewClient(ctx, opts...) + if err != nil { + return nil, err + } + s, err := New(client) + if err != nil { + return nil, err + } + if endpoint != "" { + s.BasePath = endpoint + } + return s, nil +} + +// New creates a new Service. It uses the provided http.Client for requests. +// +// Deprecated: please use NewService instead. +// To provide a custom HTTP client, use option.WithHTTPClient. +// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead. +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.Projects = NewProjectsService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + + Projects *ProjectsService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func NewProjectsService(s *Service) *ProjectsService { + rs := &ProjectsService{s: s} + rs.ServiceAccounts = NewProjectsServiceAccountsService(s) + return rs +} + +type ProjectsService struct { + s *Service + + ServiceAccounts *ProjectsServiceAccountsService +} + +func NewProjectsServiceAccountsService(s *Service) *ProjectsServiceAccountsService { + rs := &ProjectsServiceAccountsService{s: s} + return rs +} + +type ProjectsServiceAccountsService struct { + s *Service +} + +type GenerateAccessTokenRequest struct { + // Delegates: The sequence of service accounts in a delegation chain. This + // field is required for delegated requests + // (https://cloud.google.com/iam/help/credentials/delegated-request). For + // direct requests + // (https://cloud.google.com/iam/help/credentials/direct-request), which are + // more common, do not specify this field. Each service account must be granted + // the `roles/iam.serviceAccountTokenCreator` role on its next service account + // in the chain. The last service account in the chain must be granted the + // `roles/iam.serviceAccountTokenCreator` role on the service account that is + // specified in the `name` field of the request. The delegates must have the + // following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. + // The `-` wildcard character is required; replacing it with a project ID is + // invalid. + Delegates []string `json:"delegates,omitempty"` + // Lifetime: The desired lifetime duration of the access token in seconds. By + // default, the maximum allowed value is 1 hour. To set a lifetime of up to 12 + // hours, you can add the service account as an allowed value in an + // Organization Policy that enforces the + // `constraints/iam.allowServiceAccountCredentialLifetimeExtension` constraint. + // See detailed instructions at + // https://cloud.google.com/iam/help/credentials/lifetime If a value is not + // specified, the token's lifetime will be set to a default value of 1 hour. + Lifetime string `json:"lifetime,omitempty"` + // Scope: Required. Code to identify the scopes to be included in the OAuth 2.0 + // access token. See + // https://developers.google.com/identity/protocols/googlescopes for more + // information. At least one value required. + Scope []string `json:"scope,omitempty"` + // ForceSendFields is a list of field names (e.g. "Delegates") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Delegates") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GenerateAccessTokenRequest) MarshalJSON() ([]byte, error) { + type NoMethod GenerateAccessTokenRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type GenerateAccessTokenResponse struct { + // AccessToken: The OAuth 2.0 access token. + AccessToken string `json:"accessToken,omitempty"` + // ExpireTime: Token expiration time. The expiration time is always set. + ExpireTime string `json:"expireTime,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "AccessToken") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "AccessToken") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GenerateAccessTokenResponse) MarshalJSON() ([]byte, error) { + type NoMethod GenerateAccessTokenResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type GenerateIdTokenRequest struct { + // Audience: Required. The audience for the token, such as the API or account + // that this token grants access to. + Audience string `json:"audience,omitempty"` + // Delegates: The sequence of service accounts in a delegation chain. Each + // service account must be granted the `roles/iam.serviceAccountTokenCreator` + // role on its next service account in the chain. The last service account in + // the chain must be granted the `roles/iam.serviceAccountTokenCreator` role on + // the service account that is specified in the `name` field of the request. + // The delegates must have the following format: + // `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard + // character is required; replacing it with a project ID is invalid. + Delegates []string `json:"delegates,omitempty"` + // IncludeEmail: Include the service account email in the token. If set to + // `true`, the token will contain `email` and `email_verified` claims. + IncludeEmail bool `json:"includeEmail,omitempty"` + // ForceSendFields is a list of field names (e.g. "Audience") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Audience") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GenerateIdTokenRequest) MarshalJSON() ([]byte, error) { + type NoMethod GenerateIdTokenRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type GenerateIdTokenResponse struct { + // Token: The OpenId Connect ID token. + Token string `json:"token,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Token") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Token") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GenerateIdTokenResponse) MarshalJSON() ([]byte, error) { + type NoMethod GenerateIdTokenResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type SignBlobRequest struct { + // Delegates: The sequence of service accounts in a delegation chain. Each + // service account must be granted the `roles/iam.serviceAccountTokenCreator` + // role on its next service account in the chain. The last service account in + // the chain must be granted the `roles/iam.serviceAccountTokenCreator` role on + // the service account that is specified in the `name` field of the request. + // The delegates must have the following format: + // `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard + // character is required; replacing it with a project ID is invalid. + Delegates []string `json:"delegates,omitempty"` + // Payload: Required. The bytes to sign. + Payload string `json:"payload,omitempty"` + // ForceSendFields is a list of field names (e.g. "Delegates") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Delegates") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SignBlobRequest) MarshalJSON() ([]byte, error) { + type NoMethod SignBlobRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type SignBlobResponse struct { + // KeyId: The ID of the key used to sign the blob. The key used for signing + // will remain valid for at least 12 hours after the blob is signed. To verify + // the signature, you can retrieve the public key in several formats from the + // following endpoints: - RSA public key wrapped in an X.509 v3 certificate: + // `https://www.googleapis.com/service_accounts/v1/metadata/x509/{ACCOUNT_EMAIL} + // ` - Raw key in JSON format: + // `https://www.googleapis.com/service_accounts/v1/metadata/raw/{ACCOUNT_EMAIL}` + // - JSON Web Key (JWK): + // `https://www.googleapis.com/service_accounts/v1/metadata/jwk/{ACCOUNT_EMAIL}` + KeyId string `json:"keyId,omitempty"` + // SignedBlob: The signature for the blob. Does not include the original blob. + // After the key pair referenced by the `key_id` response field expires, Google + // no longer exposes the public key that can be used to verify the blob. As a + // result, the receiver can no longer verify the signature. + SignedBlob string `json:"signedBlob,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "KeyId") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "KeyId") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SignBlobResponse) MarshalJSON() ([]byte, error) { + type NoMethod SignBlobResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type SignJwtRequest struct { + // Delegates: The sequence of service accounts in a delegation chain. Each + // service account must be granted the `roles/iam.serviceAccountTokenCreator` + // role on its next service account in the chain. The last service account in + // the chain must be granted the `roles/iam.serviceAccountTokenCreator` role on + // the service account that is specified in the `name` field of the request. + // The delegates must have the following format: + // `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard + // character is required; replacing it with a project ID is invalid. + Delegates []string `json:"delegates,omitempty"` + // Payload: Required. The JWT payload to sign. Must be a serialized JSON object + // that contains a JWT Claims Set. For example: `{"sub": "user@example.com", + // "iat": 313435}` If the JWT Claims Set contains an expiration time (`exp`) + // claim, it must be an integer timestamp that is not in the past and no more + // than 12 hours in the future. + Payload string `json:"payload,omitempty"` + // ForceSendFields is a list of field names (e.g. "Delegates") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Delegates") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SignJwtRequest) MarshalJSON() ([]byte, error) { + type NoMethod SignJwtRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type SignJwtResponse struct { + // KeyId: The ID of the key used to sign the JWT. The key used for signing will + // remain valid for at least 12 hours after the JWT is signed. To verify the + // signature, you can retrieve the public key in several formats from the + // following endpoints: - RSA public key wrapped in an X.509 v3 certificate: + // `https://www.googleapis.com/service_accounts/v1/metadata/x509/{ACCOUNT_EMAIL} + // ` - Raw key in JSON format: + // `https://www.googleapis.com/service_accounts/v1/metadata/raw/{ACCOUNT_EMAIL}` + // - JSON Web Key (JWK): + // `https://www.googleapis.com/service_accounts/v1/metadata/jwk/{ACCOUNT_EMAIL}` + KeyId string `json:"keyId,omitempty"` + // SignedJwt: The signed JWT. Contains the automatically generated header; the + // client-supplied payload; and the signature, which is generated using the key + // referenced by the `kid` field in the header. After the key pair referenced + // by the `key_id` response field expires, Google no longer exposes the public + // key that can be used to verify the JWT. As a result, the receiver can no + // longer verify the signature. + SignedJwt string `json:"signedJwt,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "KeyId") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "KeyId") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s SignJwtResponse) MarshalJSON() ([]byte, error) { + type NoMethod SignJwtResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type ProjectsServiceAccountsGenerateAccessTokenCall struct { + s *Service + name string + generateaccesstokenrequest *GenerateAccessTokenRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// GenerateAccessToken: Generates an OAuth 2.0 access token for a service +// account. +// +// - name: The resource name of the service account for which the credentials +// are requested, in the following format: +// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard +// character is required; replacing it with a project ID is invalid. +func (r *ProjectsServiceAccountsService) GenerateAccessToken(name string, generateaccesstokenrequest *GenerateAccessTokenRequest) *ProjectsServiceAccountsGenerateAccessTokenCall { + c := &ProjectsServiceAccountsGenerateAccessTokenCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.generateaccesstokenrequest = generateaccesstokenrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsGenerateAccessTokenCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Context(ctx context.Context) *ProjectsServiceAccountsGenerateAccessTokenCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsServiceAccountsGenerateAccessTokenCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.generateaccesstokenrequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:generateAccessToken") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iamcredentials.projects.serviceAccounts.generateAccessToken" call. +// Any non-2xx status code is an error. Response headers are in either +// *GenerateAccessTokenResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Do(opts ...googleapi.CallOption) (*GenerateAccessTokenResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &GenerateAccessTokenResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsServiceAccountsGenerateIdTokenCall struct { + s *Service + name string + generateidtokenrequest *GenerateIdTokenRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// GenerateIdToken: Generates an OpenID Connect ID token for a service account. +// +// - name: The resource name of the service account for which the credentials +// are requested, in the following format: +// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard +// character is required; replacing it with a project ID is invalid. +func (r *ProjectsServiceAccountsService) GenerateIdToken(name string, generateidtokenrequest *GenerateIdTokenRequest) *ProjectsServiceAccountsGenerateIdTokenCall { + c := &ProjectsServiceAccountsGenerateIdTokenCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.generateidtokenrequest = generateidtokenrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsServiceAccountsGenerateIdTokenCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsGenerateIdTokenCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsServiceAccountsGenerateIdTokenCall) Context(ctx context.Context) *ProjectsServiceAccountsGenerateIdTokenCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsServiceAccountsGenerateIdTokenCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsServiceAccountsGenerateIdTokenCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.generateidtokenrequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:generateIdToken") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iamcredentials.projects.serviceAccounts.generateIdToken" call. +// Any non-2xx status code is an error. Response headers are in either +// *GenerateIdTokenResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsServiceAccountsGenerateIdTokenCall) Do(opts ...googleapi.CallOption) (*GenerateIdTokenResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &GenerateIdTokenResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsServiceAccountsSignBlobCall struct { + s *Service + name string + signblobrequest *SignBlobRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SignBlob: Signs a blob using a service account's system-managed private key. +// +// - name: The resource name of the service account for which the credentials +// are requested, in the following format: +// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard +// character is required; replacing it with a project ID is invalid. +func (r *ProjectsServiceAccountsService) SignBlob(name string, signblobrequest *SignBlobRequest) *ProjectsServiceAccountsSignBlobCall { + c := &ProjectsServiceAccountsSignBlobCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.signblobrequest = signblobrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsServiceAccountsSignBlobCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsSignBlobCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsServiceAccountsSignBlobCall) Context(ctx context.Context) *ProjectsServiceAccountsSignBlobCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsServiceAccountsSignBlobCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsServiceAccountsSignBlobCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.signblobrequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:signBlob") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iamcredentials.projects.serviceAccounts.signBlob" call. +// Any non-2xx status code is an error. Response headers are in either +// *SignBlobResponse.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsServiceAccountsSignBlobCall) Do(opts ...googleapi.CallOption) (*SignBlobResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SignBlobResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsServiceAccountsSignJwtCall struct { + s *Service + name string + signjwtrequest *SignJwtRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SignJwt: Signs a JWT using a service account's system-managed private key. +// +// - name: The resource name of the service account for which the credentials +// are requested, in the following format: +// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard +// character is required; replacing it with a project ID is invalid. +func (r *ProjectsServiceAccountsService) SignJwt(name string, signjwtrequest *SignJwtRequest) *ProjectsServiceAccountsSignJwtCall { + c := &ProjectsServiceAccountsSignJwtCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.signjwtrequest = signjwtrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsServiceAccountsSignJwtCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsSignJwtCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsServiceAccountsSignJwtCall) Context(ctx context.Context) *ProjectsServiceAccountsSignJwtCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsServiceAccountsSignJwtCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsServiceAccountsSignJwtCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.signjwtrequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:signJwt") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iamcredentials.projects.serviceAccounts.signJwt" call. +// Any non-2xx status code is an error. Response headers are in either +// *SignJwtResponse.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsServiceAccountsSignJwtCall) Do(opts ...googleapi.CallOption) (*SignJwtResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SignJwtResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} diff --git a/vendor/google.golang.org/api/internal/cba.go b/vendor/google.golang.org/api/internal/cba.go new file mode 100644 index 000000000..fbf4ef1c6 --- /dev/null +++ b/vendor/google.golang.org/api/internal/cba.go @@ -0,0 +1,318 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// cba.go (certificate-based access) contains utils for implementing Device Certificate +// Authentication according to https://google.aip.dev/auth/4114 and Default Credentials +// for Google Cloud Virtual Environments according to https://google.aip.dev/auth/4115. +// +// The overall logic for DCA is as follows: +// 1. If both endpoint override and client certificate are specified, use them as is. +// 2. If user does not specify client certificate, we will attempt to use default +// client certificate. +// 3. If user does not specify endpoint override, we will use defaultMtlsEndpoint if +// client certificate is available and defaultEndpoint otherwise. +// +// Implications of the above logic: +// 1. If the user specifies a non-mTLS endpoint override but client certificate is +// available, we will pass along the cert anyway and let the server decide what to do. +// 2. If the user specifies an mTLS endpoint override but client certificate is not +// available, we will not fail-fast, but let backend throw error when connecting. +// +// If running within Google's cloud environment, and client certificate is not specified +// and not available through DCA, we will try mTLS with credentials held by +// the Secure Session Agent, which is part of Google's cloud infrastructure. +// +// We would like to avoid introducing client-side logic that parses whether the +// endpoint override is an mTLS url, since the url pattern may change at anytime. +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. + +// Package internal supports the options and transport packages. +package internal + +import ( + "context" + "crypto/tls" + "errors" + "net" + "net/url" + "os" + "strings" + + "github.com/google/s2a-go" + "github.com/google/s2a-go/fallback" + "google.golang.org/api/internal/cert" + "google.golang.org/grpc/credentials" +) + +const ( + mTLSModeAlways = "always" + mTLSModeNever = "never" + mTLSModeAuto = "auto" + + // Experimental: if true, the code will try MTLS with S2A as the default for transport security. Default value is false. + googleAPIUseS2AEnv = "EXPERIMENTAL_GOOGLE_API_USE_S2A" + + universeDomainPlaceholder = "UNIVERSE_DOMAIN" +) + +var ( + errUniverseNotSupportedMTLS = errors.New("mTLS is not supported in any universe other than googleapis.com") +) + +// getClientCertificateSourceAndEndpoint is a convenience function that invokes +// getClientCertificateSource and getEndpoint sequentially and returns the client +// cert source and endpoint as a tuple. +func getClientCertificateSourceAndEndpoint(settings *DialSettings) (cert.Source, string, error) { + clientCertSource, err := getClientCertificateSource(settings) + if err != nil { + return nil, "", err + } + endpoint, err := getEndpoint(settings, clientCertSource) + if err != nil { + return nil, "", err + } + // TODO(chrisdsmith): https://github.com/googleapis/google-api-go-client/issues/2359 + if settings.Endpoint == "" && !settings.IsUniverseDomainGDU() && settings.DefaultEndpointTemplate != "" { + // TODO(chrisdsmith): https://github.com/googleapis/google-api-go-client/issues/2359 + // if settings.DefaultEndpointTemplate == "" { + // return nil, "", errors.New("internaloption.WithDefaultEndpointTemplate is required if option.WithUniverseDomain is not googleapis.com") + // } + endpoint = resolvedDefaultEndpoint(settings) + } + return clientCertSource, endpoint, nil +} + +type transportConfig struct { + clientCertSource cert.Source // The client certificate source. + endpoint string // The corresponding endpoint to use based on client certificate source. + s2aAddress string // The S2A address if it can be used, otherwise an empty string. + s2aMTLSEndpoint string // The MTLS endpoint to use with S2A. +} + +func getTransportConfig(settings *DialSettings) (*transportConfig, error) { + clientCertSource, endpoint, err := getClientCertificateSourceAndEndpoint(settings) + if err != nil { + return nil, err + } + defaultTransportConfig := transportConfig{ + clientCertSource: clientCertSource, + endpoint: endpoint, + s2aAddress: "", + s2aMTLSEndpoint: "", + } + + if !shouldUseS2A(clientCertSource, settings) { + return &defaultTransportConfig, nil + } + if !settings.IsUniverseDomainGDU() { + return nil, errUniverseNotSupportedMTLS + } + + s2aAddress := GetS2AAddress() + if s2aAddress == "" { + return &defaultTransportConfig, nil + } + return &transportConfig{ + clientCertSource: clientCertSource, + endpoint: endpoint, + s2aAddress: s2aAddress, + s2aMTLSEndpoint: settings.DefaultMTLSEndpoint, + }, nil +} + +// getClientCertificateSource returns a default client certificate source, if +// not provided by the user. +// +// A nil default source can be returned if the source does not exist. Any exceptions +// encountered while initializing the default source will be reported as client +// error (ex. corrupt metadata file). +// +// Important Note: For now, the environment variable GOOGLE_API_USE_CLIENT_CERTIFICATE +// must be set to "true" to allow certificate to be used (including user provided +// certificates). For details, see AIP-4114. +func getClientCertificateSource(settings *DialSettings) (cert.Source, error) { + if !isClientCertificateEnabled() { + return nil, nil + } else if settings.ClientCertSource != nil { + return settings.ClientCertSource, nil + } else { + return cert.DefaultSource() + } +} + +func isClientCertificateEnabled() bool { + useClientCert := os.Getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE") + // TODO(andyrzhao): Update default to return "true" after DCA feature is fully released. + return strings.ToLower(useClientCert) == "true" +} + +// getEndpoint returns the endpoint for the service, taking into account the +// user-provided endpoint override "settings.Endpoint". +// +// If no endpoint override is specified, we will either return the default endpoint or +// the default mTLS endpoint if a client certificate is available. +// +// You can override the default endpoint choice (mtls vs. regular) by setting the +// GOOGLE_API_USE_MTLS_ENDPOINT environment variable. +// +// If the endpoint override is an address (host:port) rather than full base +// URL (ex. https://...), then the user-provided address will be merged into +// the default endpoint. For example, WithEndpoint("myhost:8000") and +// WithDefaultEndpoint("https://foo.com/bar/baz") will return "https://myhost:8080/bar/baz" +func getEndpoint(settings *DialSettings, clientCertSource cert.Source) (string, error) { + if settings.Endpoint == "" { + if isMTLS(clientCertSource) { + if !settings.IsUniverseDomainGDU() { + return "", errUniverseNotSupportedMTLS + } + return settings.DefaultMTLSEndpoint, nil + } + return resolvedDefaultEndpoint(settings), nil + } + if strings.Contains(settings.Endpoint, "://") { + // User passed in a full URL path, use it verbatim. + return settings.Endpoint, nil + } + if resolvedDefaultEndpoint(settings) == "" { + // If DefaultEndpoint is not configured, use the user provided endpoint verbatim. + // This allows a naked "host[:port]" URL to be used with GRPC Direct Path. + return settings.Endpoint, nil + } + + // Assume user-provided endpoint is host[:port], merge it with the default endpoint. + return mergeEndpoints(resolvedDefaultEndpoint(settings), settings.Endpoint) +} + +func isMTLS(clientCertSource cert.Source) bool { + mtlsMode := getMTLSMode() + return mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) +} + +// resolvedDefaultEndpoint returns the DefaultEndpointTemplate merged with the +// Universe Domain if the DefaultEndpointTemplate is set, otherwise returns the +// deprecated DefaultEndpoint value. +func resolvedDefaultEndpoint(settings *DialSettings) string { + if settings.DefaultEndpointTemplate == "" { + return settings.DefaultEndpoint + } + return strings.Replace(settings.DefaultEndpointTemplate, universeDomainPlaceholder, settings.GetUniverseDomain(), 1) +} + +func getMTLSMode() string { + mode := os.Getenv("GOOGLE_API_USE_MTLS_ENDPOINT") + if mode == "" { + mode = os.Getenv("GOOGLE_API_USE_MTLS") // Deprecated. + } + if mode == "" { + return mTLSModeAuto + } + return strings.ToLower(mode) +} + +func mergeEndpoints(baseURL, newHost string) (string, error) { + u, err := url.Parse(fixScheme(baseURL)) + if err != nil { + return "", err + } + return strings.Replace(baseURL, u.Host, newHost, 1), nil +} + +func fixScheme(baseURL string) string { + if !strings.Contains(baseURL, "://") { + return "https://" + baseURL + } + return baseURL +} + +// GetGRPCTransportConfigAndEndpoint returns an instance of credentials.TransportCredentials, and the +// corresponding endpoint to use for GRPC client. +func GetGRPCTransportConfigAndEndpoint(settings *DialSettings) (credentials.TransportCredentials, string, error) { + config, err := getTransportConfig(settings) + if err != nil { + return nil, "", err + } + + defaultTransportCreds := credentials.NewTLS(&tls.Config{ + GetClientCertificate: config.clientCertSource, + }) + if config.s2aAddress == "" { + return defaultTransportCreds, config.endpoint, nil + } + + var fallbackOpts *s2a.FallbackOptions + // In case of S2A failure, fall back to the endpoint that would've been used without S2A. + if fallbackHandshake, err := fallback.DefaultFallbackClientHandshakeFunc(config.endpoint); err == nil { + fallbackOpts = &s2a.FallbackOptions{ + FallbackClientHandshakeFunc: fallbackHandshake, + } + } + + s2aTransportCreds, err := s2a.NewClientCreds(&s2a.ClientOptions{ + S2AAddress: config.s2aAddress, + FallbackOpts: fallbackOpts, + }) + if err != nil { + // Use default if we cannot initialize S2A client transport credentials. + return defaultTransportCreds, config.endpoint, nil + } + return s2aTransportCreds, config.s2aMTLSEndpoint, nil +} + +// GetHTTPTransportConfigAndEndpoint returns a client certificate source, a function for dialing MTLS with S2A, +// and the endpoint to use for HTTP client. +func GetHTTPTransportConfigAndEndpoint(settings *DialSettings) (cert.Source, func(context.Context, string, string) (net.Conn, error), string, error) { + config, err := getTransportConfig(settings) + if err != nil { + return nil, nil, "", err + } + + if config.s2aAddress == "" { + return config.clientCertSource, nil, config.endpoint, nil + } + + var fallbackOpts *s2a.FallbackOptions + // In case of S2A failure, fall back to the endpoint that would've been used without S2A. + if fallbackURL, err := url.Parse(config.endpoint); err == nil { + if fallbackDialer, fallbackServerAddr, err := fallback.DefaultFallbackDialerAndAddress(fallbackURL.Hostname()); err == nil { + fallbackOpts = &s2a.FallbackOptions{ + FallbackDialer: &s2a.FallbackDialer{ + Dialer: fallbackDialer, + ServerAddr: fallbackServerAddr, + }, + } + } + } + + dialTLSContextFunc := s2a.NewS2ADialTLSContextFunc(&s2a.ClientOptions{ + S2AAddress: config.s2aAddress, + FallbackOpts: fallbackOpts, + }) + return nil, dialTLSContextFunc, config.s2aMTLSEndpoint, nil +} + +func shouldUseS2A(clientCertSource cert.Source, settings *DialSettings) bool { + // If client cert is found, use that over S2A. + if clientCertSource != nil { + return false + } + // If EXPERIMENTAL_GOOGLE_API_USE_S2A is not set to true, skip S2A. + if !isGoogleS2AEnabled() { + return false + } + // If DefaultMTLSEndpoint is not set or has endpoint override, skip S2A. + if settings.DefaultMTLSEndpoint == "" || settings.Endpoint != "" { + return false + } + // If custom HTTP client is provided, skip S2A. + if settings.HTTPClient != nil { + return false + } + return !settings.EnableDirectPath && !settings.EnableDirectPathXds +} + +func isGoogleS2AEnabled() bool { + return strings.ToLower(os.Getenv(googleAPIUseS2AEnv)) == "true" +} diff --git a/vendor/google.golang.org/api/internal/cert/default_cert.go b/vendor/google.golang.org/api/internal/cert/default_cert.go new file mode 100644 index 000000000..21d025153 --- /dev/null +++ b/vendor/google.golang.org/api/internal/cert/default_cert.go @@ -0,0 +1,58 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cert contains certificate tools for Google API clients. +// This package is intended to be used with crypto/tls.Config.GetClientCertificate. +// +// The certificates can be used to satisfy Google's Endpoint Validation. +// See https://cloud.google.com/endpoint-verification/docs/overview +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package cert + +import ( + "crypto/tls" + "errors" + "sync" +) + +// defaultCertData holds all the variables pertaining to +// the default certficate source created by DefaultSource. +// +// A singleton model is used to allow the source to be reused +// by the transport layer. +type defaultCertData struct { + once sync.Once + source Source + err error +} + +var ( + defaultCert defaultCertData +) + +// Source is a function that can be passed into crypto/tls.Config.GetClientCertificate. +type Source func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + +// errSourceUnavailable is a sentinel error to indicate certificate source is unavailable. +var errSourceUnavailable = errors.New("certificate source is unavailable") + +// DefaultSource returns a certificate source using the preferred EnterpriseCertificateProxySource. +// If EnterpriseCertificateProxySource is not available, fall back to the legacy SecureConnectSource. +// +// If neither source is available (due to missing configurations), a nil Source and a nil Error are +// returned to indicate that a default certificate source is unavailable. +func DefaultSource() (Source, error) { + defaultCert.once.Do(func() { + defaultCert.source, defaultCert.err = NewEnterpriseCertificateProxySource("") + if errors.Is(defaultCert.err, errSourceUnavailable) { + defaultCert.source, defaultCert.err = NewSecureConnectSource("") + if errors.Is(defaultCert.err, errSourceUnavailable) { + defaultCert.source, defaultCert.err = nil, nil + } + } + }) + return defaultCert.source, defaultCert.err +} diff --git a/vendor/google.golang.org/api/internal/cert/enterprise_cert.go b/vendor/google.golang.org/api/internal/cert/enterprise_cert.go new file mode 100644 index 000000000..1061b5f05 --- /dev/null +++ b/vendor/google.golang.org/api/internal/cert/enterprise_cert.go @@ -0,0 +1,54 @@ +// Copyright 2022 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cert contains certificate tools for Google API clients. +// This package is intended to be used with crypto/tls.Config.GetClientCertificate. +// +// The certificates can be used to satisfy Google's Endpoint Validation. +// See https://cloud.google.com/endpoint-verification/docs/overview +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package cert + +import ( + "crypto/tls" + "errors" + + "github.com/googleapis/enterprise-certificate-proxy/client" +) + +type ecpSource struct { + key *client.Key +} + +// NewEnterpriseCertificateProxySource creates a certificate source +// using the Enterprise Certificate Proxy client, which delegates +// certifcate related operations to an OS-specific "signer binary" +// that communicates with the native keystore (ex. keychain on MacOS). +// +// The configFilePath points to a config file containing relevant parameters +// such as the certificate issuer and the location of the signer binary. +// If configFilePath is empty, the client will attempt to load the config from +// a well-known gcloud location. +func NewEnterpriseCertificateProxySource(configFilePath string) (Source, error) { + key, err := client.Cred(configFilePath) + if err != nil { + if errors.Is(err, client.ErrCredUnavailable) { + return nil, errSourceUnavailable + } + return nil, err + } + + return (&ecpSource{ + key: key, + }).getClientCertificate, nil +} + +func (s *ecpSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + var cert tls.Certificate + cert.PrivateKey = s.key + cert.Certificate = s.key.CertificateChain() + return &cert, nil +} diff --git a/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go b/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go new file mode 100644 index 000000000..afd79ffe2 --- /dev/null +++ b/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go @@ -0,0 +1,122 @@ +// Copyright 2022 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cert contains certificate tools for Google API clients. +// This package is intended to be used with crypto/tls.Config.GetClientCertificate. +// +// The certificates can be used to satisfy Google's Endpoint Validation. +// See https://cloud.google.com/endpoint-verification/docs/overview +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package cert + +import ( + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "os/user" + "path/filepath" + "sync" + "time" +) + +const ( + metadataPath = ".secureConnect" + metadataFile = "context_aware_metadata.json" +) + +type secureConnectSource struct { + metadata secureConnectMetadata + + // Cache the cert to avoid executing helper command repeatedly. + cachedCertMutex sync.Mutex + cachedCert *tls.Certificate +} + +type secureConnectMetadata struct { + Cmd []string `json:"cert_provider_command"` +} + +// NewSecureConnectSource creates a certificate source using +// the Secure Connect Helper and its associated metadata file. +// +// The configFilePath points to the location of the context aware metadata file. +// If configFilePath is empty, use the default context aware metadata location. +func NewSecureConnectSource(configFilePath string) (Source, error) { + if configFilePath == "" { + user, err := user.Current() + if err != nil { + // Error locating the default config means Secure Connect is not supported. + return nil, errSourceUnavailable + } + configFilePath = filepath.Join(user.HomeDir, metadataPath, metadataFile) + } + + file, err := os.ReadFile(configFilePath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + // Config file missing means Secure Connect is not supported. + return nil, errSourceUnavailable + } + return nil, err + } + + var metadata secureConnectMetadata + if err := json.Unmarshal(file, &metadata); err != nil { + return nil, fmt.Errorf("cert: could not parse JSON in %q: %w", configFilePath, err) + } + if err := validateMetadata(metadata); err != nil { + return nil, fmt.Errorf("cert: invalid config in %q: %w", configFilePath, err) + } + return (&secureConnectSource{ + metadata: metadata, + }).getClientCertificate, nil +} + +func validateMetadata(metadata secureConnectMetadata) error { + if len(metadata.Cmd) == 0 { + return errors.New("empty cert_provider_command") + } + return nil +} + +func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + s.cachedCertMutex.Lock() + defer s.cachedCertMutex.Unlock() + if s.cachedCert != nil && !isCertificateExpired(s.cachedCert) { + return s.cachedCert, nil + } + // Expand OS environment variables in the cert provider command such as "$HOME". + for i := 0; i < len(s.metadata.Cmd); i++ { + s.metadata.Cmd[i] = os.ExpandEnv(s.metadata.Cmd[i]) + } + command := s.metadata.Cmd + data, err := exec.Command(command[0], command[1:]...).Output() + if err != nil { + return nil, err + } + cert, err := tls.X509KeyPair(data, data) + if err != nil { + return nil, err + } + s.cachedCert = &cert + return &cert, nil +} + +// isCertificateExpired returns true if the given cert is expired or invalid. +func isCertificateExpired(cert *tls.Certificate) bool { + if len(cert.Certificate) == 0 { + return true + } + parsed, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return true + } + return time.Now().After(parsed.NotAfter) +} diff --git a/vendor/google.golang.org/api/internal/conn_pool.go b/vendor/google.golang.org/api/internal/conn_pool.go new file mode 100644 index 000000000..fedcce15b --- /dev/null +++ b/vendor/google.golang.org/api/internal/conn_pool.go @@ -0,0 +1,30 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "google.golang.org/grpc" +) + +// ConnPool is a pool of grpc.ClientConns. +type ConnPool interface { + // Conn returns a ClientConn from the pool. + // + // Conns aren't returned to the pool. + Conn() *grpc.ClientConn + + // Num returns the number of connections in the pool. + // + // It will always return the same value. + Num() int + + // Close closes every ClientConn in the pool. + // + // The error returned by Close may be a single error or multiple errors. + Close() error + + // ConnPool implements grpc.ClientConnInterface to enable it to be used directly with generated proto stubs. + grpc.ClientConnInterface +} diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index 69b8659fd..4ebeb61c1 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -1,55 +1,145 @@ -// Copyright 2017 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2017 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package internal import ( "context" + "crypto/tls" "encoding/json" + "errors" "fmt" - "io/ioutil" + "net" + "net/http" + "os" + "time" + "cloud.google.com/go/auth/credentials" + "cloud.google.com/go/auth/oauth2adapt" "golang.org/x/oauth2" + "google.golang.org/api/internal/cert" + "google.golang.org/api/internal/impersonate" "golang.org/x/oauth2/google" ) +const quotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT" + // Creds returns credential information obtained from DialSettings, or if none, then // it returns default credential information. func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { + if ds.IsNewAuthLibraryEnabled() { + return credsNewAuth(ctx, ds) + } + creds, err := baseCreds(ctx, ds) + if err != nil { + return nil, err + } + if ds.ImpersonationConfig != nil { + return impersonateCredentials(ctx, creds, ds) + } + return creds, nil +} + +// GetOAuth2Configuration determines configurations for the OAuth2 transport, which is separate from the API transport. +// The OAuth2 transport and endpoint will be configured for mTLS if applicable. +func GetOAuth2Configuration(ctx context.Context, settings *DialSettings) (string, *http.Client, error) { + clientCertSource, err := getClientCertificateSource(settings) + if err != nil { + return "", nil, err + } + tokenURL := oAuth2Endpoint(clientCertSource) + var oauth2Client *http.Client + if clientCertSource != nil { + tlsConfig := &tls.Config{ + GetClientCertificate: clientCertSource, + } + oauth2Client = customHTTPClient(tlsConfig) + } else { + oauth2Client = oauth2.NewClient(ctx, nil) + } + return tokenURL, oauth2Client, nil +} + +func credsNewAuth(ctx context.Context, settings *DialSettings) (*google.Credentials, error) { + // Preserve old options behavior + if settings.InternalCredentials != nil { + return settings.InternalCredentials, nil + } else if settings.Credentials != nil { + return settings.Credentials, nil + } else if settings.TokenSource != nil { + return &google.Credentials{TokenSource: settings.TokenSource}, nil + } + + if settings.AuthCredentials != nil { + return oauth2adapt.Oauth2CredentialsFromAuthCredentials(settings.AuthCredentials), nil + } + + var useSelfSignedJWT bool + var aud string + var scopes []string + // If scoped JWTs are enabled user provided an aud, allow self-signed JWT. + if settings.EnableJwtWithScope || len(settings.Audiences) > 0 { + useSelfSignedJWT = true + } + + if len(settings.Scopes) > 0 { + scopes = make([]string, len(settings.Scopes)) + copy(scopes, settings.Scopes) + } + if len(settings.Audiences) > 0 { + aud = settings.Audiences[0] + } + // Only default scopes if user did not also set an audience. + if len(settings.Scopes) == 0 && aud == "" && len(settings.DefaultScopes) > 0 { + scopes = make([]string, len(settings.DefaultScopes)) + copy(scopes, settings.DefaultScopes) + } + if len(scopes) == 0 && aud == "" { + aud = settings.DefaultAudience + } + + creds, err := credentials.DetectDefault(&credentials.DetectOptions{ + Scopes: scopes, + Audience: aud, + CredentialsFile: settings.CredentialsFile, + CredentialsJSON: settings.CredentialsJSON, + UseSelfSignedJWT: useSelfSignedJWT, + }) + if err != nil { + return nil, err + } + + return oauth2adapt.Oauth2CredentialsFromAuthCredentials(creds), nil +} + +func baseCreds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { + if ds.InternalCredentials != nil { + return ds.InternalCredentials, nil + } if ds.Credentials != nil { return ds.Credentials, nil } - if ds.CredentialsJSON != nil { - return credentialsFromJSON(ctx, ds.CredentialsJSON, ds.Endpoint, ds.Scopes, ds.Audiences) + if len(ds.CredentialsJSON) > 0 { + return credentialsFromJSON(ctx, ds.CredentialsJSON, ds) } if ds.CredentialsFile != "" { - data, err := ioutil.ReadFile(ds.CredentialsFile) + data, err := os.ReadFile(ds.CredentialsFile) if err != nil { return nil, fmt.Errorf("cannot read credentials file: %v", err) } - return credentialsFromJSON(ctx, data, ds.Endpoint, ds.Scopes, ds.Audiences) + return credentialsFromJSON(ctx, data, ds) } if ds.TokenSource != nil { return &google.Credentials{TokenSource: ds.TokenSource}, nil } - cred, err := google.FindDefaultCredentials(ctx, ds.Scopes...) + cred, err := google.FindDefaultCredentials(ctx, ds.GetScopes()...) if err != nil { return nil, err } if len(cred.JSON) > 0 { - return credentialsFromJSON(ctx, cred.JSON, ds.Endpoint, ds.Scopes, ds.Audiences) + return credentialsFromJSON(ctx, cred.JSON, ds) } // For GAE and GCE, the JSON is empty so return the default credentials directly. return cred, nil @@ -60,43 +150,149 @@ const ( serviceAccountKey = "service_account" ) -// credentialsFromJSON returns a google.Credentials based on the input. +// credentialsFromJSON returns a google.Credentials from the JSON data // -// - If the JSON is a service account and no scopes provided, returns self-signed JWT auth flow -// - Otherwise, returns OAuth 2.0 flow. -func credentialsFromJSON(ctx context.Context, data []byte, endpoint string, scopes []string, audiences []string) (*google.Credentials, error) { - cred, err := google.CredentialsFromJSON(ctx, data, scopes...) +// - A self-signed JWT flow will be executed if the following conditions are +// met: +// +// (1) At least one of the following is true: +// (a) Scope for self-signed JWT flow is enabled +// (b) Audiences are explicitly provided by users +// (2) No service account impersontation +// +// - Otherwise, executes standard OAuth 2.0 flow +// More details: google.aip.dev/auth/4111 +func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*google.Credentials, error) { + var params google.CredentialsParams + params.Scopes = ds.GetScopes() + + tokenURL, oauth2Client, err := GetOAuth2Configuration(ctx, ds) if err != nil { return nil, err } - if len(data) > 0 && len(scopes) == 0 { - var f struct { - Type string `json:"type"` - // The rest JSON fields are omitted because they are not used. - } - if err := json.Unmarshal(cred.JSON, &f); err != nil { + params.TokenURL = tokenURL + ctx = context.WithValue(ctx, oauth2.HTTPClient, oauth2Client) + + // By default, a standard OAuth 2.0 token source is created + cred, err := google.CredentialsFromJSONWithParams(ctx, data, params) + if err != nil { + return nil, err + } + + // Override the token source to use self-signed JWT if conditions are met + isJWTFlow, err := isSelfSignedJWTFlow(data, ds) + if err != nil { + return nil, err + } + if isJWTFlow { + ts, err := selfSignedJWTTokenSource(data, ds) + if err != nil { return nil, err } - if f.Type == serviceAccountKey { - ts, err := selfSignedJWTTokenSource(data, endpoint, audiences) - if err != nil { - return nil, err - } - cred.TokenSource = ts - } + cred.TokenSource = ts } + return cred, err } -func selfSignedJWTTokenSource(data []byte, endpoint string, audiences []string) (oauth2.TokenSource, error) { - // Use the API endpoint as the default audience - audience := endpoint - if len(audiences) > 0 { - // TODO(shinfan): Update golang oauth to support multiple audiences. - if len(audiences) > 1 { - return nil, fmt.Errorf("multiple audiences support is not implemented") - } - audience = audiences[0] +func oAuth2Endpoint(clientCertSource cert.Source) string { + if isMTLS(clientCertSource) { + return google.MTLSTokenURL + } + return google.Endpoint.TokenURL +} + +func isSelfSignedJWTFlow(data []byte, ds *DialSettings) (bool, error) { + // For non-GDU universe domains, token exchange is impossible and services + // must support self-signed JWTs with scopes. + if !ds.IsUniverseDomainGDU() { + return typeServiceAccount(data) + } + if (ds.EnableJwtWithScope || ds.HasCustomAudience()) && ds.ImpersonationConfig == nil { + return typeServiceAccount(data) + } + return false, nil +} + +// typeServiceAccount checks if JSON data is for a service account. +func typeServiceAccount(data []byte) (bool, error) { + var f struct { + Type string `json:"type"` + // The remaining JSON fields are omitted because they are not used. + } + if err := json.Unmarshal(data, &f); err != nil { + return false, err + } + return f.Type == serviceAccountKey, nil +} + +func selfSignedJWTTokenSource(data []byte, ds *DialSettings) (oauth2.TokenSource, error) { + if len(ds.GetScopes()) > 0 && !ds.HasCustomAudience() { + // Scopes are preferred in self-signed JWT unless the scope is not available + // or a custom audience is used. + return google.JWTAccessTokenSourceWithScope(data, ds.GetScopes()...) + } else if ds.GetAudience() != "" { + // Fallback to audience if scope is not provided + return google.JWTAccessTokenSourceFromJSON(data, ds.GetAudience()) + } else { + return nil, errors.New("neither scopes or audience are available for the self-signed JWT") + } +} + +// GetQuotaProject retrieves quota project with precedence being: client option, +// environment variable, creds file. +func GetQuotaProject(creds *google.Credentials, clientOpt string) string { + if clientOpt != "" { + return clientOpt + } + if env := os.Getenv(quotaProjectEnvVar); env != "" { + return env + } + if creds == nil { + return "" + } + var v struct { + QuotaProject string `json:"quota_project_id"` + } + if err := json.Unmarshal(creds.JSON, &v); err != nil { + return "" + } + return v.QuotaProject +} + +func impersonateCredentials(ctx context.Context, creds *google.Credentials, ds *DialSettings) (*google.Credentials, error) { + if len(ds.ImpersonationConfig.Scopes) == 0 { + ds.ImpersonationConfig.Scopes = ds.GetScopes() + } + ts, err := impersonate.TokenSource(ctx, creds.TokenSource, ds.ImpersonationConfig) + if err != nil { + return nil, err + } + return &google.Credentials{ + TokenSource: ts, + ProjectID: creds.ProjectID, + }, nil +} + +// customHTTPClient constructs an HTTPClient using the provided tlsConfig, to support mTLS. +func customHTTPClient(tlsConfig *tls.Config) *http.Client { + trans := baseTransport() + trans.TLSClientConfig = tlsConfig + return &http.Client{Transport: trans} +} + +func baseTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + MaxIdleConnsPerHost: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, } - return google.JWTAccessTokenSourceFromJSON(data, audience) } diff --git a/vendor/google.golang.org/api/internal/gensupport/error.go b/vendor/google.golang.org/api/internal/gensupport/error.go new file mode 100644 index 000000000..886c6532b --- /dev/null +++ b/vendor/google.golang.org/api/internal/gensupport/error.go @@ -0,0 +1,24 @@ +// Copyright 2022 Google LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "errors" + + "github.com/googleapis/gax-go/v2/apierror" + "google.golang.org/api/googleapi" +) + +// WrapError creates an [apierror.APIError] from err, wraps it in err, and +// returns err. If err is not a [googleapi.Error] (or a +// [google.golang.org/grpc/status.Status]), it returns err without modification. +func WrapError(err error) error { + var herr *googleapi.Error + apiError, ok := apierror.ParseError(err, false) + if ok && errors.As(err, &herr) { + herr.Wrap(apiError) + } + return err +} diff --git a/vendor/google.golang.org/api/internal/gensupport/json.go b/vendor/google.golang.org/api/internal/gensupport/json.go index c01e32189..eab49a11e 100644 --- a/vendor/google.golang.org/api/internal/gensupport/json.go +++ b/vendor/google.golang.org/api/internal/gensupport/json.go @@ -13,9 +13,10 @@ import ( // MarshalJSON returns a JSON encoding of schema containing only selected fields. // A field is selected if any of the following is true: -// * it has a non-empty value -// * its field name is present in forceSendFields and it is not a nil pointer or nil interface -// * its field name is present in nullFields. +// - it has a non-empty value +// - its field name is present in forceSendFields and it is not a nil pointer or nil interface +// - its field name is present in nullFields. +// // The JSON key for each selected field is taken from the field's json: struct tag. func MarshalJSON(schema interface{}, forceSendFields, nullFields []string) ([]byte, error) { if len(forceSendFields) == 0 && len(nullFields) == 0 { @@ -85,7 +86,12 @@ func schemaToMap(schema interface{}, mustInclude, useNull map[string]bool, useNu if f.Type.Kind() == reflect.Map && useNullMaps[f.Name] != nil { ms, ok := v.Interface().(map[string]string) if !ok { - return nil, fmt.Errorf("field %q has keys in NullFields but is not a map[string]string", f.Name) + mi, err := initMapSlow(v, f.Name, useNullMaps) + if err != nil { + return nil, err + } + m[tag.apiName] = mi + continue } mi := map[string]interface{}{} for k, v := range ms { @@ -119,6 +125,25 @@ func schemaToMap(schema interface{}, mustInclude, useNull map[string]bool, useNu return m, nil } +// initMapSlow uses reflection to build up a map object. This is slower than +// the default behavior so it should be used only as a fallback. +func initMapSlow(rv reflect.Value, fieldName string, useNullMaps map[string]map[string]bool) (map[string]interface{}, error) { + mi := map[string]interface{}{} + iter := rv.MapRange() + for iter.Next() { + k, ok := iter.Key().Interface().(string) + if !ok { + return nil, fmt.Errorf("field %q has keys in NullFields but is not a map[string]any", fieldName) + } + v := iter.Value().Interface() + mi[k] = v + } + for k := range useNullMaps[fieldName] { + mi[k] = nil + } + return mi, nil +} + // formatAsString returns a string representation of v, dereferencing it first if possible. func formatAsString(v reflect.Value, kind reflect.Kind) string { if kind == reflect.Ptr && !v.IsNil() { diff --git a/vendor/google.golang.org/api/internal/gensupport/jsonfloat.go b/vendor/google.golang.org/api/internal/gensupport/jsonfloat.go index 837785081..13c2f9302 100644 --- a/vendor/google.golang.org/api/internal/gensupport/jsonfloat.go +++ b/vendor/google.golang.org/api/internal/gensupport/jsonfloat.go @@ -1,16 +1,6 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2016 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package gensupport diff --git a/vendor/google.golang.org/api/internal/gensupport/media.go b/vendor/google.golang.org/api/internal/gensupport/media.go index 0ef96b3f1..c048a5708 100644 --- a/vendor/google.golang.org/api/internal/gensupport/media.go +++ b/vendor/google.golang.org/api/internal/gensupport/media.go @@ -8,100 +8,18 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "mime" "mime/multipart" "net/http" "net/textproto" "strings" "sync" + "time" + gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/googleapi" ) -const sniffBuffSize = 512 - -func newContentSniffer(r io.Reader) *contentSniffer { - return &contentSniffer{r: r} -} - -// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader. -type contentSniffer struct { - r io.Reader - start []byte // buffer for the sniffed bytes. - err error // set to any error encountered while reading bytes to be sniffed. - - ctype string // set on first sniff. - sniffed bool // set to true on first sniff. -} - -func (cs *contentSniffer) Read(p []byte) (n int, err error) { - // Ensure that the content type is sniffed before any data is consumed from Reader. - _, _ = cs.ContentType() - - if len(cs.start) > 0 { - n := copy(p, cs.start) - cs.start = cs.start[n:] - return n, nil - } - - // We may have read some bytes into start while sniffing, even if the read ended in an error. - // We should first return those bytes, then the error. - if cs.err != nil { - return 0, cs.err - } - - // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader. - return cs.r.Read(p) -} - -// ContentType returns the sniffed content type, and whether the content type was succesfully sniffed. -func (cs *contentSniffer) ContentType() (string, bool) { - if cs.sniffed { - return cs.ctype, cs.ctype != "" - } - cs.sniffed = true - // If ReadAll hits EOF, it returns err==nil. - cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize)) - - // Don't try to detect the content type based on possibly incomplete data. - if cs.err != nil { - return "", false - } - - cs.ctype = http.DetectContentType(cs.start) - return cs.ctype, true -} - -// DetermineContentType determines the content type of the supplied reader. -// If the content type is already known, it can be specified via ctype. -// Otherwise, the content of media will be sniffed to determine the content type. -// If media implements googleapi.ContentTyper (deprecated), this will be used -// instead of sniffing the content. -// After calling DetectContentType the caller must not perform further reads on -// media, but rather read from the Reader that is returned. -func DetermineContentType(media io.Reader, ctype string) (io.Reader, string) { - // Note: callers could avoid calling DetectContentType if ctype != "", - // but doing the check inside this function reduces the amount of - // generated code. - if ctype != "" { - return media, ctype - } - - // For backwards compatability, allow clients to set content - // type by providing a ContentTyper for media. - if typer, ok := media.(googleapi.ContentTyper); ok { - return media, typer.ContentType() - } - - sniffer := newContentSniffer(media) - if ctype, ok := sniffer.ContentType(); ok { - return sniffer, ctype - } - // If content type could not be sniffed, reads from sniffer will eventually fail with an error. - return sniffer, "" -} - type typeReader struct { io.Reader typ string @@ -217,12 +135,13 @@ func PrepareUpload(media io.Reader, chunkSize int) (r io.Reader, mb *MediaBuffer // code only. type MediaInfo struct { // At most one of Media and MediaBuffer will be set. - media io.Reader - buffer *MediaBuffer - singleChunk bool - mType string - size int64 // mediaSize, if known. Used only for calls to progressUpdater_. - progressUpdater googleapi.ProgressUpdater + media io.Reader + buffer *MediaBuffer + singleChunk bool + mType string + size int64 // mediaSize, if known. Used only for calls to progressUpdater_. + progressUpdater googleapi.ProgressUpdater + chunkRetryDeadline time.Duration } // NewInfoFromMedia should be invoked from the Media method of a call. It returns a @@ -232,8 +151,12 @@ func NewInfoFromMedia(r io.Reader, options []googleapi.MediaOption) *MediaInfo { mi := &MediaInfo{} opts := googleapi.ProcessMediaOptions(options) if !opts.ForceEmptyContentType { - r, mi.mType = DetermineContentType(r, opts.ContentType) + mi.mType = opts.ContentType + if mi.mType == "" { + r, mi.mType = gax.DetermineContentType(r) + } } + mi.chunkRetryDeadline = opts.ChunkRetryDeadline mi.media, mi.buffer, mi.singleChunk = PrepareUpload(r, opts.ChunkSize) return mi } @@ -242,7 +165,11 @@ func NewInfoFromMedia(r io.Reader, options []googleapi.MediaOption) *MediaInfo { // call. It returns a MediaInfo using the given reader, size and media type. func NewInfoFromResumableMedia(r io.ReaderAt, size int64, mediaType string) *MediaInfo { rdr := ReaderAtToReader(r, size) - rdr, mType := DetermineContentType(rdr, mediaType) + mType := mediaType + if mType == "" { + rdr, mType = gax.DetermineContentType(rdr) + } + return &MediaInfo{ size: size, mType: mType, @@ -286,29 +213,49 @@ func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newB // be retried because the data is stored in the MediaBuffer. media, _, _, _ = mi.buffer.Chunk() } + toCleanup := []io.Closer{} if media != nil { fb := readerFunc(body) fm := readerFunc(media) combined, ctype := CombineBodyMedia(body, "application/json", media, mi.mType) + toCleanup = append(toCleanup, combined) if fb != nil && fm != nil { getBody = func() (io.ReadCloser, error) { - rb := ioutil.NopCloser(fb()) - rm := ioutil.NopCloser(fm()) + rb := io.NopCloser(fb()) + rm := io.NopCloser(fm()) var mimeBoundary string if _, params, err := mime.ParseMediaType(ctype); err == nil { mimeBoundary = params["boundary"] } r, _ := combineBodyMedia(rb, "application/json", rm, mi.mType, mimeBoundary) + toCleanup = append(toCleanup, r) return r, nil } } - cleanup = func() { combined.Close() } reqHeaders.Set("Content-Type", ctype) body = combined } if mi.buffer != nil && mi.mType != "" && !mi.singleChunk { + // This happens when initiating a resumable upload session. + // The initial request contains a JSON body rather than media. + // It can be retried with a getBody function that re-creates the request body. + fb := readerFunc(body) + if fb != nil { + getBody = func() (io.ReadCloser, error) { + rb := io.NopCloser(fb()) + toCleanup = append(toCleanup, rb) + return rb, nil + } + } reqHeaders.Set("X-Upload-Content-Type", mi.mType) } + // Ensure that any bodies created in getBody are cleaned up. + cleanup = func() { + for _, closer := range toCleanup { + _ = closer.Close() + } + + } return body, getBody, cleanup } @@ -347,6 +294,7 @@ func (mi *MediaInfo) ResumableUpload(locURI string) *ResumableUpload { mi.progressUpdater(curr, mi.size) } }, + ChunkRetryDeadline: mi.chunkRetryDeadline, } } diff --git a/vendor/google.golang.org/api/internal/gensupport/params.go b/vendor/google.golang.org/api/internal/gensupport/params.go index 0e878a425..6a5326c08 100644 --- a/vendor/google.golang.org/api/internal/gensupport/params.go +++ b/vendor/google.golang.org/api/internal/gensupport/params.go @@ -5,9 +5,11 @@ package gensupport import ( + "net/http" "net/url" "google.golang.org/api/googleapi" + "google.golang.org/api/internal" ) // URLParams is a simplified replacement for url.Values @@ -37,15 +39,40 @@ func (u URLParams) SetMulti(key string, values []string) { u[key] = values } -// Encode encodes the values into ``URL encoded'' form +// Encode encodes the values into “URL encoded” form // ("bar=baz&foo=quux") sorted by key. func (u URLParams) Encode() string { return url.Values(u).Encode() } -// SetOptions sets the URL params and any additional call options. +// SetOptions sets the URL params and any additional `CallOption` or +// `MultiCallOption` passed in. func SetOptions(u URLParams, opts ...googleapi.CallOption) { for _, o := range opts { + m, ok := o.(googleapi.MultiCallOption) + if ok { + u.SetMulti(m.GetMulti()) + continue + } u.Set(o.Get()) } } + +// SetHeaders sets common headers for all requests. The keyvals header pairs +// should have a corresponding value for every key provided. If there is an odd +// number of keyvals this method will panic. +func SetHeaders(userAgent, contentType string, userHeaders http.Header, keyvals ...string) http.Header { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+GoVersion()+" gdcl/"+internal.Version) + for i := 0; i < len(keyvals); i = i + 2 { + reqHeaders.Set(keyvals[i], keyvals[i+1]) + } + reqHeaders.Set("User-Agent", userAgent) + if contentType != "" { + reqHeaders.Set("Content-Type", contentType) + } + for k, v := range userHeaders { + reqHeaders[k] = v + } + return reqHeaders +} diff --git a/vendor/google.golang.org/api/internal/gensupport/resumable.go b/vendor/google.golang.org/api/internal/gensupport/resumable.go index e67ccd9a6..f828ddb60 100644 --- a/vendor/google.golang.org/api/internal/gensupport/resumable.go +++ b/vendor/google.golang.org/api/internal/gensupport/resumable.go @@ -10,32 +10,12 @@ import ( "fmt" "io" "net/http" + "strings" "sync" "time" - gax "github.com/googleapis/gax-go/v2" -) - -// Backoff is an interface around gax.Backoff's Pause method, allowing tests to provide their -// own implementation. -type Backoff interface { - Pause() time.Duration -} - -// These are declared as global variables so that tests can overwrite them. -var ( - retryDeadline = 32 * time.Second - backoff = func() Backoff { - return &gax.Backoff{Initial: 100 * time.Millisecond} - } -) - -const ( - // statusTooManyRequests is returned by the storage API if the - // per-project limits have been temporarily exceeded. The request - // should be retried. - // https://cloud.google.com/storage/docs/json_api/v1/status-codes#standardcodes - statusTooManyRequests = 429 + "github.com/google/uuid" + "google.golang.org/api/internal" ) // ResumableUpload is used by the generated APIs to provide resumable uploads. @@ -55,6 +35,18 @@ type ResumableUpload struct { // Callback is an optional function that will be periodically called with the cumulative number of bytes uploaded. Callback func(int64) + + // Retry optionally configures retries for requests made against the upload. + Retry *RetryConfig + + // ChunkRetryDeadline configures the per-chunk deadline after which no further + // retries should happen. + ChunkRetryDeadline time.Duration + + // Track current request invocation ID and attempt count for retry metrics + // and idempotency headers. + invocationID string + attempts int } // Progress returns the number of bytes uploaded at this point. @@ -89,6 +81,15 @@ func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, req.Header.Set("Content-Type", rx.MediaType) req.Header.Set("User-Agent", rx.UserAgent) + // TODO(b/274504690): Consider dropping gccl-invocation-id key since it + // duplicates the X-Goog-Gcs-Idempotency-Token header (added in v0.115.0). + baseXGoogHeader := "gl-go/" + GoVersion() + " gdcl/" + internal.Version + invocationHeader := fmt.Sprintf("gccl-invocation-id/%s gccl-attempt-count/%d", rx.invocationID, rx.attempts) + req.Header.Set("X-Goog-Api-Client", strings.Join([]string{baseXGoogHeader, invocationHeader}, " ")) + + // Set idempotency token header which is used by GCS uploads. + req.Header.Set("X-Goog-Gcs-Idempotency-Token", rx.invocationID) + // Google's upload endpoint uses status code 308 for a // different purpose than the "308 Permanent Redirect" // since-standardized in RFC 7238. Because of the conflict in @@ -160,21 +161,6 @@ func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, e // rx is private to the auto-generated API code. // Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close. func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) { - var shouldRetry = func(status int, err error) bool { - if 500 <= status && status <= 599 { - return true - } - if status == statusTooManyRequests { - return true - } - if err == io.ErrUnexpectedEOF { - return true - } - if err, ok := err.(interface{ Temporary() bool }); ok { - return err.Temporary() - } - return false - } // There are a couple of cases where it's possible for err and resp to both // be non-nil. However, we expose a simpler contract to our callers: exactly @@ -185,30 +171,74 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err if resp != nil && resp.Body != nil { resp.Body.Close() } + // If there were retries, indicate this in the error message and wrap the final error. + if rx.attempts > 1 { + return nil, fmt.Errorf("chunk upload failed after %d attempts;, final error: %w", rx.attempts, err) + } return nil, err } + // This case is very unlikely but possible only if rx.ChunkRetryDeadline is + // set to a very small value, in which case no requests will be sent before + // the deadline. Return an error to avoid causing a panic. + if resp == nil { + return nil, fmt.Errorf("upload request to %v not sent, choose larger value for ChunkRetryDealine", rx.URI) + } return resp, nil } + // Configure retryable error criteria. + errorFunc := rx.Retry.errorFunc() + + // Configure per-chunk retry deadline. + var retryDeadline time.Duration + if rx.ChunkRetryDeadline != 0 { + retryDeadline = rx.ChunkRetryDeadline + } else { + retryDeadline = defaultRetryDeadline + } // Send all chunks. for { var pause time.Duration - // Each chunk gets its own initialized-at-zero retry. - bo := backoff() - quitAfter := time.After(retryDeadline) + // Each chunk gets its own initialized-at-zero backoff and invocation ID. + bo := rx.Retry.backoff() + quitAfterTimer := time.NewTimer(retryDeadline) + rx.attempts = 1 + rx.invocationID = uuid.New().String() // Retry loop for a single chunk. for { + pauseTimer := time.NewTimer(pause) + select { + case <-ctx.Done(): + quitAfterTimer.Stop() + pauseTimer.Stop() + if err == nil { + err = ctx.Err() + } + return prepareReturn(resp, err) + case <-pauseTimer.C: + case <-quitAfterTimer.C: + pauseTimer.Stop() + return prepareReturn(resp, err) + } + pauseTimer.Stop() + + // Check for context cancellation or timeout once more. If more than one + // case in the select statement above was satisfied at the same time, Go + // will choose one arbitrarily. + // That can cause an operation to go through even if the context was + // canceled before or the timeout was reached. select { case <-ctx.Done(): + quitAfterTimer.Stop() if err == nil { err = ctx.Err() } return prepareReturn(resp, err) - case <-time.After(pause): - case <-quitAfter: + case <-quitAfterTimer.C: return prepareReturn(resp, err) + default: } resp, err = rx.transferChunk(ctx) @@ -219,10 +249,12 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err } // Check if we should retry the request. - if !shouldRetry(status, err) { + if !errorFunc(status, err) { + quitAfterTimer.Stop() break } + rx.attempts++ pause = bo.Pause() if resp != nil && resp.Body != nil { resp.Body.Close() diff --git a/vendor/google.golang.org/api/internal/gensupport/retry.go b/vendor/google.golang.org/api/internal/gensupport/retry.go new file mode 100644 index 000000000..089ee3189 --- /dev/null +++ b/vendor/google.golang.org/api/internal/gensupport/retry.go @@ -0,0 +1,123 @@ +// Copyright 2021 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "errors" + "io" + "net" + "net/url" + "strings" + "time" + + "github.com/googleapis/gax-go/v2" + "google.golang.org/api/googleapi" +) + +// Backoff is an interface around gax.Backoff's Pause method, allowing tests to provide their +// own implementation. +type Backoff interface { + Pause() time.Duration +} + +// These are declared as global variables so that tests can overwrite them. +var ( + // Default per-chunk deadline for resumable uploads. + defaultRetryDeadline = 32 * time.Second + // Default backoff timer. + backoff = func() Backoff { + return &gax.Backoff{Initial: 100 * time.Millisecond} + } +) + +const ( + // statusTooManyRequests is returned by the storage API if the + // per-project limits have been temporarily exceeded. The request + // should be retried. + // https://cloud.google.com/storage/docs/json_api/v1/status-codes#standardcodes + statusTooManyRequests = 429 + + // statusRequestTimeout is returned by the storage API if the + // upload connection was broken. The request should be retried. + statusRequestTimeout = 408 +) + +// shouldRetry indicates whether an error is retryable for the purposes of this +// package, unless a ShouldRetry func is specified by the RetryConfig instead. +// It follows guidance from +// https://cloud.google.com/storage/docs/exponential-backoff . +func shouldRetry(status int, err error) bool { + if 500 <= status && status <= 599 { + return true + } + if status == statusTooManyRequests || status == statusRequestTimeout { + return true + } + if errors.Is(err, io.ErrUnexpectedEOF) { + return true + } + if errors.Is(err, net.ErrClosed) { + return true + } + switch e := err.(type) { + case *net.OpError, *url.Error: + // Retry socket-level errors ECONNREFUSED and ECONNRESET (from syscall). + // Unfortunately the error type is unexported, so we resort to string + // matching. + retriable := []string{"connection refused", "connection reset", "broken pipe"} + for _, s := range retriable { + if strings.Contains(e.Error(), s) { + return true + } + } + case interface{ Temporary() bool }: + if e.Temporary() { + return true + } + } + + // If error unwrapping is available, use this to examine wrapped + // errors. + if e, ok := err.(interface{ Unwrap() error }); ok { + return shouldRetry(status, e.Unwrap()) + } + return false +} + +// RetryConfig allows configuration of backoff timing and retryable errors. +type RetryConfig struct { + Backoff *gax.Backoff + ShouldRetry func(err error) bool +} + +// Get a new backoff object based on the configured values. +func (r *RetryConfig) backoff() Backoff { + if r == nil || r.Backoff == nil { + return backoff() + } + return &gax.Backoff{ + Initial: r.Backoff.Initial, + Max: r.Backoff.Max, + Multiplier: r.Backoff.Multiplier, + } +} + +// This is kind of hacky; it is necessary because ShouldRetry expects to +// handle HTTP errors via googleapi.Error, but the error has not yet been +// wrapped with a googleapi.Error at this layer, and the ErrorFunc type +// in the manual layer does not pass in a status explicitly as it does +// here. So, we must wrap error status codes in a googleapi.Error so that +// ShouldRetry can parse this correctly. +func (r *RetryConfig) errorFunc() func(status int, err error) bool { + if r == nil || r.ShouldRetry == nil { + return shouldRetry + } + return func(status int, err error) bool { + if status >= 400 { + return r.ShouldRetry(&googleapi.Error{Code: status}) + } + return r.ShouldRetry(err) + } +} diff --git a/vendor/google.golang.org/api/internal/gensupport/send.go b/vendor/google.golang.org/api/internal/gensupport/send.go index 579939309..f6716134e 100644 --- a/vendor/google.golang.org/api/internal/gensupport/send.go +++ b/vendor/google.golang.org/api/internal/gensupport/send.go @@ -8,24 +8,35 @@ import ( "context" "encoding/json" "errors" + "fmt" "net/http" + "strings" + "time" + + "github.com/google/uuid" + "github.com/googleapis/gax-go/v2" + "github.com/googleapis/gax-go/v2/callctx" ) -// Hook is the type of a function that is called once before each HTTP request -// that is sent by a generated API. It returns a function that is called after -// the request returns. -// Hooks are not called if the context is nil. -type Hook func(ctx context.Context, req *http.Request) func(resp *http.Response) - -var hooks []Hook - -// RegisterHook registers a Hook to be called before each HTTP request by a -// generated API. Hooks are called in the order they are registered. Each -// hook can return a function; if it is non-nil, it is called after the HTTP -// request returns. These functions are called in the reverse order. -// RegisterHook should not be called concurrently with itself or SendRequest. -func RegisterHook(h Hook) { - hooks = append(hooks, h) +// Use this error type to return an error which allows introspection of both +// the context error and the error from the service. +type wrappedCallErr struct { + ctxErr error + wrappedErr error +} + +func (e wrappedCallErr) Error() string { + return fmt.Sprintf("retry failed with %v; last error: %v", e.ctxErr, e.wrappedErr) +} + +func (e wrappedCallErr) Unwrap() error { + return e.wrappedErr +} + +// Is allows errors.Is to match the error from the call as well as context +// sentinel errors. +func (e wrappedCallErr) Is(target error) bool { + return errors.Is(e.ctxErr, target) || errors.Is(e.wrappedErr, target) } // SendRequest sends a single HTTP request using the given client. @@ -33,6 +44,32 @@ func RegisterHook(h Hook) { // req.WithContext, then calls any functions returned by the hooks in // reverse order. func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + // Add headers set in context metadata. + if ctx != nil { + headers := callctx.HeadersFromContext(ctx) + for k, vals := range headers { + if k == "x-goog-api-client" { + // Merge all values into a single "x-goog-api-client" header. + var mergedVal strings.Builder + baseXGoogHeader := req.Header.Get("X-Goog-Api-Client") + if baseXGoogHeader != "" { + mergedVal.WriteString(baseXGoogHeader) + mergedVal.WriteRune(' ') + } + for _, v := range vals { + mergedVal.WriteString(v) + mergedVal.WriteRune(' ') + } + // Remove the last space and replace the header on the request. + req.Header.Set(k, mergedVal.String()[:mergedVal.Len()-1]) + } else { + for _, v := range vals { + req.Header.Add(k, v) + } + } + } + } + // Disallow Accept-Encoding because it interferes with the automatic gzip handling // done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219. if _, ok := req.Header["Accept-Encoding"]; ok { @@ -41,23 +78,7 @@ func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (* if ctx == nil { return client.Do(req) } - // Call hooks in order of registration, store returned funcs. - post := make([]func(resp *http.Response), len(hooks)) - for i, h := range hooks { - fn := h(ctx, req) - post[i] = fn - } - - // Send request. - resp, err := send(ctx, client, req) - - // Call returned funcs in reverse order. - for i := len(post) - 1; i >= 0; i-- { - if fn := post[i]; fn != nil { - fn(resp) - } - } - return resp, err + return send(ctx, client, req) } func send(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { @@ -77,6 +98,122 @@ func send(ctx context.Context, client *http.Client, req *http.Request) (*http.Re return resp, err } +// SendRequestWithRetry sends a single HTTP request using the given client, +// with retries if a retryable error is returned. +// If ctx is non-nil, it calls all hooks, then sends the request with +// req.WithContext, then calls any functions returned by the hooks in +// reverse order. +func SendRequestWithRetry(ctx context.Context, client *http.Client, req *http.Request, retry *RetryConfig) (*http.Response, error) { + // Add headers set in context metadata. + if ctx != nil { + headers := callctx.HeadersFromContext(ctx) + for k, vals := range headers { + for _, v := range vals { + req.Header.Add(k, v) + } + } + } + + // Disallow Accept-Encoding because it interferes with the automatic gzip handling + // done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219. + if _, ok := req.Header["Accept-Encoding"]; ok { + return nil, errors.New("google api: custom Accept-Encoding headers not allowed") + } + if ctx == nil { + return client.Do(req) + } + return sendAndRetry(ctx, client, req, retry) +} + +func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, retry *RetryConfig) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + + var resp *http.Response + var err error + attempts := 1 + invocationID := uuid.New().String() + + xGoogHeaderVals := req.Header.Values("X-Goog-Api-Client") + baseXGoogHeader := strings.Join(xGoogHeaderVals, " ") + + // Loop to retry the request, up to the context deadline. + var pause time.Duration + var bo Backoff + if retry != nil && retry.Backoff != nil { + bo = &gax.Backoff{ + Initial: retry.Backoff.Initial, + Max: retry.Backoff.Max, + Multiplier: retry.Backoff.Multiplier, + } + } else { + bo = backoff() + } + + var errorFunc = retry.errorFunc() + + for { + t := time.NewTimer(pause) + select { + case <-ctx.Done(): + t.Stop() + // If we got an error and the context has been canceled, return an error acknowledging + // both the context cancelation and the service error. + if err != nil { + return resp, wrappedCallErr{ctx.Err(), err} + } + return resp, ctx.Err() + case <-t.C: + } + + if ctx.Err() != nil { + // Check for context cancellation once more. If more than one case in a + // select is satisfied at the same time, Go will choose one arbitrarily. + // That can cause an operation to go through even if the context was + // canceled before. + if err != nil { + return resp, wrappedCallErr{ctx.Err(), err} + } + return resp, ctx.Err() + } + + // Set retry metrics and idempotency headers for GCS. + // TODO(b/274504690): Consider dropping gccl-invocation-id key since it + // duplicates the X-Goog-Gcs-Idempotency-Token header (added in v0.115.0). + invocationHeader := fmt.Sprintf("gccl-invocation-id/%s gccl-attempt-count/%d", invocationID, attempts) + xGoogHeader := strings.Join([]string{invocationHeader, baseXGoogHeader}, " ") + req.Header.Set("X-Goog-Api-Client", xGoogHeader) + req.Header.Set("X-Goog-Gcs-Idempotency-Token", invocationID) + + resp, err = client.Do(req.WithContext(ctx)) + + var status int + if resp != nil { + status = resp.StatusCode + } + + // Check if we can retry the request. A retry can only be done if the error + // is retryable and the request body can be re-created using GetBody (this + // will not be possible if the body was unbuffered). + if req.GetBody == nil || !errorFunc(status, err) { + break + } + attempts++ + var errBody error + req.Body, errBody = req.GetBody() + if errBody != nil { + break + } + + pause = bo.Pause() + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + } + return resp, err +} + // DecodeResponse decodes the body of res into target. If there is no body, // target is unchanged. func DecodeResponse(target interface{}, res *http.Response) error { diff --git a/vendor/google.golang.org/api/internal/gensupport/version.go b/vendor/google.golang.org/api/internal/gensupport/version.go new file mode 100644 index 000000000..23f6aa24e --- /dev/null +++ b/vendor/google.golang.org/api/internal/gensupport/version.go @@ -0,0 +1,53 @@ +// Copyright 2020 Google LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "runtime" + "strings" + "unicode" +) + +// GoVersion returns the Go runtime version. The returned string +// has no whitespace. +func GoVersion() string { + return goVersion +} + +var goVersion = goVer(runtime.Version()) + +const develPrefix = "devel +" + +func goVer(s string) string { + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + s += "-" + prerelease + } + return s + } + return "" +} + +func notSemverRune(r rune) bool { + return !strings.ContainsRune("0123456789.", r) +} diff --git a/vendor/google.golang.org/api/internal/impersonate/impersonate.go b/vendor/google.golang.org/api/internal/impersonate/impersonate.go new file mode 100644 index 000000000..4b2c775f2 --- /dev/null +++ b/vendor/google.golang.org/api/internal/impersonate/impersonate.go @@ -0,0 +1,127 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package impersonate is used to impersonate Google Credentials. +package impersonate + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "time" + + "golang.org/x/oauth2" +) + +// Config for generating impersonated credentials. +type Config struct { + // Target is the service account to impersonate. Required. + Target string + // Scopes the impersonated credential should have. Required. + Scopes []string + // Delegates are the service accounts in a delegation chain. Each service + // account must be granted roles/iam.serviceAccountTokenCreator on the next + // service account in the chain. Optional. + Delegates []string +} + +// TokenSource returns an impersonated TokenSource configured with the provided +// config using ts as the base credential provider for making requests. +func TokenSource(ctx context.Context, ts oauth2.TokenSource, config *Config) (oauth2.TokenSource, error) { + if len(config.Scopes) == 0 { + return nil, fmt.Errorf("impersonate: scopes must be provided") + } + its := impersonatedTokenSource{ + ctx: ctx, + ts: ts, + name: formatIAMServiceAccountName(config.Target), + // Default to the longest acceptable value of one hour as the token will + // be refreshed automatically. + lifetime: "3600s", + } + + its.delegates = make([]string, len(config.Delegates)) + for i, v := range config.Delegates { + its.delegates[i] = formatIAMServiceAccountName(v) + } + its.scopes = make([]string, len(config.Scopes)) + copy(its.scopes, config.Scopes) + + return oauth2.ReuseTokenSource(nil, its), nil +} + +func formatIAMServiceAccountName(name string) string { + return fmt.Sprintf("projects/-/serviceAccounts/%s", name) +} + +type generateAccessTokenReq struct { + Delegates []string `json:"delegates,omitempty"` + Lifetime string `json:"lifetime,omitempty"` + Scope []string `json:"scope,omitempty"` +} + +type generateAccessTokenResp struct { + AccessToken string `json:"accessToken"` + ExpireTime string `json:"expireTime"` +} + +type impersonatedTokenSource struct { + ctx context.Context + ts oauth2.TokenSource + + name string + lifetime string + scopes []string + delegates []string +} + +// Token returns an impersonated Token. +func (i impersonatedTokenSource) Token() (*oauth2.Token, error) { + hc := oauth2.NewClient(i.ctx, i.ts) + reqBody := generateAccessTokenReq{ + Delegates: i.delegates, + Lifetime: i.lifetime, + Scope: i.scopes, + } + b, err := json.Marshal(reqBody) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to marshal request: %v", err) + } + url := fmt.Sprintf("https://iamcredentials.googleapis.com/v1/%s:generateAccessToken", i.name) + req, err := http.NewRequest("POST", url, bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to create request: %v", err) + } + req = req.WithContext(i.ctx) + req.Header.Set("Content-Type", "application/json") + + resp, err := hc.Do(req) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to generate access token: %v", err) + } + defer resp.Body.Close() + body, err := io.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to read body: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) + } + + var accessTokenResp generateAccessTokenResp + if err := json.Unmarshal(body, &accessTokenResp); err != nil { + return nil, fmt.Errorf("impersonate: unable to parse response: %v", err) + } + expiry, err := time.Parse(time.RFC3339, accessTokenResp.ExpireTime) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to parse expiry: %v", err) + } + return &oauth2.Token{ + AccessToken: accessTokenResp.AccessToken, + Expiry: expiry, + }, nil +} diff --git a/vendor/google.golang.org/api/internal/pool.go b/vendor/google.golang.org/api/internal/pool.go deleted file mode 100644 index a4426dcb7..000000000 --- a/vendor/google.golang.org/api/internal/pool.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "errors" - - "google.golang.org/grpc/naming" -) - -// PoolResolver provides a fixed list of addresses to load balance between -// and does not provide further updates. -type PoolResolver struct { - poolSize int - dialOpt *DialSettings - ch chan []*naming.Update -} - -// NewPoolResolver returns a PoolResolver -// This is an EXPERIMENTAL API and may be changed or removed in the future. -func NewPoolResolver(size int, o *DialSettings) *PoolResolver { - return &PoolResolver{poolSize: size, dialOpt: o} -} - -// Resolve returns a Watcher for the endpoint defined by the DialSettings -// provided to NewPoolResolver. -func (r *PoolResolver) Resolve(target string) (naming.Watcher, error) { - if r.dialOpt.Endpoint == "" { - return nil, errors.New("no endpoint configured") - } - addrs := make([]*naming.Update, 0, r.poolSize) - for i := 0; i < r.poolSize; i++ { - addrs = append(addrs, &naming.Update{Op: naming.Add, Addr: r.dialOpt.Endpoint, Metadata: i}) - } - r.ch = make(chan []*naming.Update, 1) - r.ch <- addrs - return r, nil -} - -// Next returns a static list of updates on the first call, -// and blocks indefinitely until Close is called on subsequent calls. -func (r *PoolResolver) Next() ([]*naming.Update, error) { - return <-r.ch, nil -} - -// Close releases resources associated with the pool and causes Next to unblock. -func (r *PoolResolver) Close() { - close(r.ch) -} diff --git a/vendor/google.golang.org/api/internal/s2a.go b/vendor/google.golang.org/api/internal/s2a.go new file mode 100644 index 000000000..c70f2419b --- /dev/null +++ b/vendor/google.golang.org/api/internal/s2a.go @@ -0,0 +1,136 @@ +// Copyright 2023 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "encoding/json" + "log" + "sync" + "time" + + "cloud.google.com/go/compute/metadata" +) + +const configEndpointSuffix = "instance/platform-security/auto-mtls-configuration" + +// The period an MTLS config can be reused before needing refresh. +var configExpiry = time.Hour + +// GetS2AAddress returns the S2A address to be reached via plaintext connection. +func GetS2AAddress() string { + c, err := getMetadataMTLSAutoConfig().Config() + if err != nil { + return "" + } + if !c.Valid() { + return "" + } + return c.S2A.PlaintextAddress +} + +type mtlsConfigSource interface { + Config() (*mtlsConfig, error) +} + +// mdsMTLSAutoConfigSource is an instance of reuseMTLSConfigSource, with metadataMTLSAutoConfig as its config source. +var ( + mdsMTLSAutoConfigSource mtlsConfigSource + once sync.Once +) + +// getMetadataMTLSAutoConfig returns mdsMTLSAutoConfigSource, which is backed by config from MDS with auto-refresh. +func getMetadataMTLSAutoConfig() mtlsConfigSource { + once.Do(func() { + mdsMTLSAutoConfigSource = &reuseMTLSConfigSource{ + src: &metadataMTLSAutoConfig{}, + } + }) + return mdsMTLSAutoConfigSource +} + +// reuseMTLSConfigSource caches a valid version of mtlsConfig, and uses `src` to refresh upon config expiry. +// It implements the mtlsConfigSource interface, so calling Config() on it returns an mtlsConfig. +type reuseMTLSConfigSource struct { + src mtlsConfigSource // src.Config() is called when config is expired + mu sync.Mutex // mutex guards config + config *mtlsConfig // cached config +} + +func (cs *reuseMTLSConfigSource) Config() (*mtlsConfig, error) { + cs.mu.Lock() + defer cs.mu.Unlock() + + if cs.config.Valid() { + return cs.config, nil + } + c, err := cs.src.Config() + if err != nil { + return nil, err + } + cs.config = c + return c, nil +} + +// metadataMTLSAutoConfig is an implementation of the interface mtlsConfigSource +// It has the logic to query MDS and return an mtlsConfig +type metadataMTLSAutoConfig struct{} + +var httpGetMetadataMTLSConfig = func() (string, error) { + return metadata.Get(configEndpointSuffix) +} + +func (cs *metadataMTLSAutoConfig) Config() (*mtlsConfig, error) { + resp, err := httpGetMetadataMTLSConfig() + if err != nil { + log.Printf("querying MTLS config from MDS endpoint failed: %v", err) + return defaultMTLSConfig(), nil + } + var config mtlsConfig + err = json.Unmarshal([]byte(resp), &config) + if err != nil { + log.Printf("unmarshalling MTLS config from MDS endpoint failed: %v", err) + return defaultMTLSConfig(), nil + } + + if config.S2A == nil { + log.Printf("returned MTLS config from MDS endpoint is invalid: %v", config) + return defaultMTLSConfig(), nil + } + + // set new expiry + config.Expiry = time.Now().Add(configExpiry) + return &config, nil +} + +func defaultMTLSConfig() *mtlsConfig { + return &mtlsConfig{ + S2A: &s2aAddresses{ + PlaintextAddress: "", + MTLSAddress: "", + }, + Expiry: time.Now().Add(configExpiry), + } +} + +// s2aAddresses contains the plaintext and/or MTLS S2A addresses. +type s2aAddresses struct { + // PlaintextAddress is the plaintext address to reach S2A + PlaintextAddress string `json:"plaintext_address"` + // MTLSAddress is the MTLS address to reach S2A + MTLSAddress string `json:"mtls_address"` +} + +// mtlsConfig contains the configuration for establishing MTLS connections with Google APIs. +type mtlsConfig struct { + S2A *s2aAddresses `json:"s2a"` + Expiry time.Time +} + +func (c *mtlsConfig) Valid() bool { + return c != nil && c.S2A != nil && !c.expired() +} +func (c *mtlsConfig) expired() bool { + return c.Expiry.Before(time.Now()) +} diff --git a/vendor/google.golang.org/api/internal/service-account.json b/vendor/google.golang.org/api/internal/service-account.json deleted file mode 100644 index 6b36a9296..000000000 --- a/vendor/google.golang.org/api/internal/service-account.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "type": "service_account", - "project_id": "project_id", - "private_key_id": "private_key_id", - "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCzd9ZdbPLAR4/g\nj+Rodu15kEasMpxf/Mz+gKRb2fmgR2Y18Y/iRBYZ4SkmF2pBSfzvwE/aTCzSPBGl\njHhPzohXnSN029eWoItmxVONlqCbR29pD07aLzv08LGeIGdHIEdhVjhvRwTkYZIF\ndXmlHNDRUU/EbJN9D+3ahw22BNnC4PaDgfIWTs3xIlTCSf2rL39I4DSNLTS/LzxK\n/XrQfBMtfwMWwyQaemXbc7gRgzOy8L56wa1W1zyXx99th97j1bLnoAXBGplhB4Co\n25ohyDAuhxRm+XGMEaO0Mzo7u97kvhj48a569RH1QRhOf7EBf60jO4h5eOmfi5P5\nPV3l7041AgMBAAECggEAEZ0RTNoEeRqM5F067YW+iM/AH+ZXspP9Cn1VpC4gcbqQ\nLXsnw+0qvh97CmIB66Z3TJBzRdl0DK4YjUbcB/kdKHwjnrR01DOtesijCqJd4N+B\n762w73jzSXbV9872U+S3HLZ5k3JE6KUqz55X8fyCAgkY6w4862lEzs2yasrPFHEV\nRoQp3PM0Miif8R3hGDhOWcHxcobullthG6JHAQFfc1ctwEjZI4TK0iWqlzfWGyKN\nT9UgvjUDud5cGvS9el0AiLN6keAf77tcPn1zetUVhxN1KN4bVAm1Q+6O8esl63Rj\n7JXpHzxaRnit9S6/aH/twHsGGtLg5Puw6jey6xs4AQKBgQD2JNy1wzewCRkD+jug\n8CHbJ+LIJVRNIaWa/RK1QD8/UjmFPkIzRQSF3AKC5mRAWSa2FL3yVK3N/DD7hazW\n85XSBB7IDcnoJnA9SkUeWwqQGkDx3EntlU3gX8Kn/+ofF8O9jLXxAa901MAVXVuf\n5YDzrl4PNE3bFnPCdiNmSdRfhQKBgQC6p4DsCpwqbeTu9f5ak9VW/fQP47Fgt+Mf\nwGjBnKP5PbbNJpHCfamF7jqSRH83Xy0KNssH7jD/NZ2oT594sMmiQPUC5ni9VYY6\nsuYB0JbD5Mq+EjKIVhYtxaQJ76LzHreEI+G4z6k3H7/hRpr3/C48n9G/uVkT9DbJ\noplxxEx68QKBgQCdJ23vcwO0Firtmi/GEmtbVHz70rGfSXNFoHz4UlvPXv0wsE5u\nE4vOt2i3EMhDOWh46odYGG6bzH+tp2xyFTW70Dui+QLHgPs6dpfoyLHWzZxXj5F3\n6lK9hgZvYvqk/XRRKmzjwnK2wjsdqOyeC1covlR5mqh20D/6kZkKbur0TQKBgAwy\nCZBimRWEnKKoW/gbFKNccGfhXqONID/g2Hdd/rC4QYth68AjacIgcJ9B7nX1uAGk\n1tsryvPB0w0+NpMyKdp6GAgaeuUUA3MuYSzZLiCagEyu77JMvaI7+Z3UlHcCGMd/\neK4Uk1/QqT7U2Cc/yN2ZK6E1QQa2vCWshA4U31JhAoGAbtbSSSsul1c+PsJ13Cfk\n6qVnqYzPqt23QTyOZmGAvUHH/M4xRiQpOE0cDF4t/r5PwenAQPQzTvMmWRzj6uAY\n3eaU0eAK7ZfoweCoOIAPnpFbbRLrXfoY46H7MYh7euWGXOKEpxz5yzuEkd9ByNUE\n86vSEidqbMIiXVgEgnu/k08=\n-----END PRIVATE KEY-----\n", - "client_email": "xyz@developer.gserviceaccount.com", - "client_id": "123", - "auth_uri": "https://accounts.google.com/o/oauth2/auth", - "token_uri": "https://accounts.google.com/o/oauth2/token", - "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", - "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/xyz%40developer.gserviceaccount.com" -} diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go index 062301c65..32949cccb 100644 --- a/vendor/google.golang.org/api/internal/settings.go +++ b/vendor/google.golang.org/api/internal/settings.go @@ -1,54 +1,120 @@ -// Copyright 2017 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2017 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. // Package internal supports the options and transport packages. package internal import ( + "crypto/tls" "errors" "net/http" + "os" + "strconv" + "time" + "cloud.google.com/go/auth" "golang.org/x/oauth2" "golang.org/x/oauth2/google" + "google.golang.org/api/internal/impersonate" "google.golang.org/grpc" ) +const ( + newAuthLibEnvVar = "GOOGLE_API_GO_EXPERIMENTAL_ENABLE_NEW_AUTH_LIB" + newAuthLibDisabledEnVar = "GOOGLE_API_GO_EXPERIMENTAL_DISABLE_NEW_AUTH_LIB" + universeDomainEnvVar = "GOOGLE_CLOUD_UNIVERSE_DOMAIN" + defaultUniverseDomain = "googleapis.com" +) + // DialSettings holds information needed to establish a connection with a // Google API service. type DialSettings struct { - Endpoint string - Scopes []string - TokenSource oauth2.TokenSource - Credentials *google.Credentials - CredentialsFile string // if set, Token Source is ignored. - CredentialsJSON []byte - UserAgent string - APIKey string - Audiences []string - HTTPClient *http.Client - GRPCDialOpts []grpc.DialOption - GRPCConn *grpc.ClientConn - NoAuth bool - + Endpoint string + DefaultEndpoint string + DefaultEndpointTemplate string + DefaultMTLSEndpoint string + Scopes []string + DefaultScopes []string + EnableJwtWithScope bool + TokenSource oauth2.TokenSource + Credentials *google.Credentials + CredentialsFile string // if set, Token Source is ignored. + CredentialsJSON []byte + InternalCredentials *google.Credentials + UserAgent string + APIKey string + Audiences []string + DefaultAudience string + HTTPClient *http.Client + GRPCDialOpts []grpc.DialOption + GRPCConn *grpc.ClientConn + GRPCConnPool ConnPool + GRPCConnPoolSize int + NoAuth bool + TelemetryDisabled bool + ClientCertSource func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + CustomClaims map[string]interface{} + SkipValidation bool + ImpersonationConfig *impersonate.Config + EnableDirectPath bool + EnableDirectPathXds bool + AllowNonDefaultServiceAccount bool + DefaultUniverseDomain string + UniverseDomain string // Google API system parameters. For more information please read: // https://cloud.google.com/apis/docs/system-parameters QuotaProject string RequestReason string + + // New Auth library Options + AuthCredentials *auth.Credentials + EnableNewAuthLibrary bool +} + +// GetScopes returns the user-provided scopes, if set, or else falls back to the +// default scopes. +func (ds *DialSettings) GetScopes() []string { + if len(ds.Scopes) > 0 { + return ds.Scopes + } + return ds.DefaultScopes +} + +// GetAudience returns the user-provided audience, if set, or else falls back to the default audience. +func (ds *DialSettings) GetAudience() string { + if ds.HasCustomAudience() { + return ds.Audiences[0] + } + return ds.DefaultAudience +} + +// HasCustomAudience returns true if a custom audience is provided by users. +func (ds *DialSettings) HasCustomAudience() bool { + return len(ds.Audiences) > 0 +} + +// IsNewAuthLibraryEnabled returns true if the new auth library should be used. +func (ds *DialSettings) IsNewAuthLibraryEnabled() bool { + // Disabled env is for future rollouts to make sure there is a way to easily + // disable this behaviour once we switch in on by default. + if b, err := strconv.ParseBool(os.Getenv(newAuthLibDisabledEnVar)); err == nil && b { + return false + } + if ds.EnableNewAuthLibrary { + return true + } + if b, err := strconv.ParseBool(os.Getenv(newAuthLibEnvVar)); err == nil { + return b + } + return false } // Validate reports an error if ds is invalid. func (ds *DialSettings) Validate() error { + if ds.SkipValidation { + return nil + } hasCreds := ds.APIKey != "" || ds.TokenSource != nil || ds.CredentialsFile != "" || ds.Credentials != nil if ds.NoAuth && hasCreds { return errors.New("options.WithoutAuthentication is incompatible with any option that provides credentials") @@ -60,7 +126,7 @@ func (ds *DialSettings) Validate() error { if ds.Credentials != nil { nCreds++ } - if ds.CredentialsJSON != nil { + if len(ds.CredentialsJSON) > 0 { nCreds++ } if ds.CredentialsFile != "" { @@ -79,6 +145,12 @@ func (ds *DialSettings) Validate() error { if nCreds > 1 && !(nCreds == 2 && ds.TokenSource != nil && ds.CredentialsFile != "") { return errors.New("multiple credential options provided") } + if ds.GRPCConn != nil && ds.GRPCConnPool != nil { + return errors.New("WithGRPCConn is incompatible with WithConnPool") + } + if ds.HTTPClient != nil && ds.GRPCConnPool != nil { + return errors.New("WithHTTPClient is incompatible with WithConnPool") + } if ds.HTTPClient != nil && ds.GRPCConn != nil { return errors.New("WithHTTPClient is incompatible with WithGRPCConn") } @@ -91,6 +163,80 @@ func (ds *DialSettings) Validate() error { if ds.HTTPClient != nil && ds.RequestReason != "" { return errors.New("WithHTTPClient is incompatible with RequestReason") } - + if ds.HTTPClient != nil && ds.ClientCertSource != nil { + return errors.New("WithHTTPClient is incompatible with WithClientCertSource") + } + if ds.ClientCertSource != nil && (ds.GRPCConn != nil || ds.GRPCConnPool != nil || ds.GRPCConnPoolSize != 0 || ds.GRPCDialOpts != nil) { + return errors.New("WithClientCertSource is currently only supported for HTTP. gRPC settings are incompatible") + } + if ds.ImpersonationConfig != nil && len(ds.ImpersonationConfig.Scopes) == 0 && len(ds.Scopes) == 0 { + return errors.New("WithImpersonatedCredentials requires scopes being provided") + } return nil } + +// GetDefaultUniverseDomain returns the Google default universe domain +// ("googleapis.com"). +func (ds *DialSettings) GetDefaultUniverseDomain() string { + return defaultUniverseDomain +} + +// GetUniverseDomain returns the default service domain for a given Cloud +// universe, with the following precedence: +// +// 1. A non-empty option.WithUniverseDomain. +// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN. +// 3. The default value "googleapis.com". +func (ds *DialSettings) GetUniverseDomain() string { + if ds.UniverseDomain != "" { + return ds.UniverseDomain + } + if envUD := os.Getenv(universeDomainEnvVar); envUD != "" { + return envUD + } + return defaultUniverseDomain +} + +// IsUniverseDomainGDU returns true if the universe domain is the default Google +// universe ("googleapis.com"). +func (ds *DialSettings) IsUniverseDomainGDU() bool { + return ds.GetUniverseDomain() == defaultUniverseDomain +} + +// GetUniverseDomain returns the default service domain for a given Cloud +// universe, from google.Credentials. This wrapper function should be removed +// to close https://github.com/googleapis/google-api-go-client/issues/2399. +func GetUniverseDomain(creds *google.Credentials) (string, error) { + timer := time.NewTimer(time.Second) + defer timer.Stop() + errors := make(chan error) + results := make(chan string) + + go func() { + result, err := creds.GetUniverseDomain() + if err != nil { + errors <- err + return + } + results <- result + }() + + select { + case <-errors: + // An error that is returned before the timer expires is likely to be + // connection refused. Temporarily (2024-03-21) return the GDU domain. + return defaultUniverseDomain, nil + case res := <-results: + return res, nil + case <-timer.C: // Timer is expired. + // If err or res was not returned, it means that creds.GetUniverseDomain() + // did not complete in 1s. Assume that MDS is likely never responding to + // the endpoint and will timeout. This is the source of issues such as + // https://github.com/googleapis/google-cloud-go/issues/9350. + // Temporarily (2024-02-02) return the GDU domain. Restore the original + // calls to creds.GetUniverseDomain() in grpc/dial.go and http/dial.go + // and remove this method to close + // https://github.com/googleapis/google-api-go-client/issues/2399. + return defaultUniverseDomain, nil + } +} diff --git a/vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE b/vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE new file mode 100644 index 000000000..7109c6ef9 --- /dev/null +++ b/vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 Joshua Tacoma. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA b/vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA new file mode 100644 index 000000000..c7f86fcd5 --- /dev/null +++ b/vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA @@ -0,0 +1,14 @@ +name: "uritemplates" +description: + "Package uritemplates is a level 4 implementation of RFC 6570 (URI " + "Template, http://tools.ietf.org/html/rfc6570)." + +third_party { + url { + type: GIT + value: "https://github.com/jtacoma/uritemplates" + } + version: "0.1" + last_upgrade_date { year: 2014 month: 8 day: 18 } + license_type: NOTICE +} diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go b/vendor/google.golang.org/api/internal/third_party/uritemplates/uritemplates.go similarity index 98% rename from vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go rename to vendor/google.golang.org/api/internal/third_party/uritemplates/uritemplates.go index 63bf05383..8c27d19d7 100644 --- a/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go +++ b/vendor/google.golang.org/api/internal/third_party/uritemplates/uritemplates.go @@ -191,7 +191,7 @@ func parseTerm(term string) (result templateTerm, err error) { err = errors.New("not a valid name: " + result.name) } if result.explode && result.truncate > 0 { - err = errors.New("both explode and prefix modifers on same term") + err = errors.New("both explode and prefix modifiers on same term") } return result, err } diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go b/vendor/google.golang.org/api/internal/third_party/uritemplates/utils.go similarity index 100% rename from vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go rename to vendor/google.golang.org/api/internal/third_party/uritemplates/utils.go diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go new file mode 100644 index 000000000..df50575b9 --- /dev/null +++ b/vendor/google.golang.org/api/internal/version.go @@ -0,0 +1,8 @@ +// Copyright 2022 Google LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +// Version is the current tagged release of the library. +const Version = "0.199.0" diff --git a/vendor/google.golang.org/api/iterator/iterator.go b/vendor/google.golang.org/api/iterator/iterator.go new file mode 100644 index 000000000..1799b5d9a --- /dev/null +++ b/vendor/google.golang.org/api/iterator/iterator.go @@ -0,0 +1,227 @@ +// Copyright 2016 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package iterator provides support for standard Google API iterators. +// See https://github.com/GoogleCloudPlatform/gcloud-golang/wiki/Iterator-Guidelines. +package iterator + +import ( + "errors" + "fmt" + "reflect" +) + +// Done is returned by an iterator's Next method when the iteration is +// complete; when there are no more items to return. +var Done = errors.New("no more items in iterator") + +// We don't support mixed calls to Next and NextPage because they play +// with the paging state in incompatible ways. +var errMixed = errors.New("iterator: Next and NextPage called on same iterator") + +// PageInfo contains information about an iterator's paging state. +type PageInfo struct { + // Token is the token used to retrieve the next page of items from the + // API. You may set Token immediately after creating an iterator to + // begin iteration at a particular point. If Token is the empty string, + // the iterator will begin with the first eligible item. + // + // The result of setting Token after the first call to Next is undefined. + // + // After the underlying API method is called to retrieve a page of items, + // Token is set to the next-page token in the response. + Token string + + // MaxSize is the maximum number of items returned by a call to the API. + // Set MaxSize as a hint to optimize the buffering behavior of the iterator. + // If zero, the page size is determined by the underlying service. + // + // Use Pager to retrieve a page of a specific, exact size. + MaxSize int + + // The error state of the iterator. Manipulated by PageInfo.next and Pager. + // This is a latch: it starts as nil, and once set should never change. + err error + + // If true, no more calls to fetch should be made. Set to true when fetch + // returns an empty page token. The iterator is Done when this is true AND + // the buffer is empty. + atEnd bool + + // Function that fetches a page from the underlying service. It should pass + // the pageSize and pageToken arguments to the service, fill the buffer + // with the results from the call, and return the next-page token returned + // by the service. The function must not remove any existing items from the + // buffer. If the underlying RPC takes an int32 page size, pageSize should + // be silently truncated. + fetch func(pageSize int, pageToken string) (nextPageToken string, err error) + + // Function that returns the number of currently buffered items. + bufLen func() int + + // Function that returns the buffer, after setting the buffer variable to nil. + takeBuf func() interface{} + + // Set to true on first call to PageInfo.next or Pager.NextPage. Used to check + // for calls to both Next and NextPage with the same iterator. + nextCalled, nextPageCalled bool +} + +// NewPageInfo exposes internals for iterator implementations. +// It is not a stable interface. +var NewPageInfo = newPageInfo + +// newPageInfo creates and returns a PageInfo and a next func. If an iterator can +// support paging, its iterator-creating method should call this. Each time the +// iterator's Next is called, it should call the returned next fn to determine +// whether a next item exists, and if so it should pop an item from the buffer. +// +// The fetch, bufLen and takeBuf arguments provide access to the iterator's +// internal slice of buffered items. They behave as described in PageInfo, above. +// +// The return value is the PageInfo.next method bound to the returned PageInfo value. +// (Returning it avoids exporting PageInfo.next.) +// +// Note: the returned PageInfo and next fn do not remove items from the buffer. +// It is up to the iterator using these to remove items from the buffer: +// typically by performing a pop in its Next. If items are not removed from the +// buffer, memory may grow unbounded. +func newPageInfo(fetch func(int, string) (string, error), bufLen func() int, takeBuf func() interface{}) (pi *PageInfo, next func() error) { + pi = &PageInfo{ + fetch: fetch, + bufLen: bufLen, + takeBuf: takeBuf, + } + return pi, pi.next +} + +// Remaining returns the number of items available before the iterator makes another API call. +func (pi *PageInfo) Remaining() int { return pi.bufLen() } + +// next provides support for an iterator's Next function. An iterator's Next +// should return the error returned by next if non-nil; else it can assume +// there is at least one item in its buffer, and it should return that item and +// remove it from the buffer. +func (pi *PageInfo) next() error { + pi.nextCalled = true + if pi.err != nil { // Once we get an error, always return it. + // TODO(jba): fix so users can retry on transient errors? Probably not worth it. + return pi.err + } + if pi.nextPageCalled { + pi.err = errMixed + return pi.err + } + // Loop until we get some items or reach the end. + for pi.bufLen() == 0 && !pi.atEnd { + if err := pi.fill(pi.MaxSize); err != nil { + pi.err = err + return pi.err + } + if pi.Token == "" { + pi.atEnd = true + } + } + // Either the buffer is non-empty or pi.atEnd is true (or both). + if pi.bufLen() == 0 { + // The buffer is empty and pi.atEnd is true, i.e. the service has no + // more items. + pi.err = Done + } + return pi.err +} + +// Call the service to fill the buffer, using size and pi.Token. Set pi.Token to the +// next-page token returned by the call. +// If fill returns a non-nil error, the buffer will be empty. +func (pi *PageInfo) fill(size int) error { + tok, err := pi.fetch(size, pi.Token) + if err != nil { + pi.takeBuf() // clear the buffer + return err + } + pi.Token = tok + return nil +} + +// Pageable is implemented by iterators that support paging. +type Pageable interface { + // PageInfo returns paging information associated with the iterator. + PageInfo() *PageInfo +} + +// Pager supports retrieving iterator items a page at a time. +type Pager struct { + pageInfo *PageInfo + pageSize int +} + +// NewPager returns a pager that uses iter. Calls to its NextPage method will +// obtain exactly pageSize items, unless fewer remain. The pageToken argument +// indicates where to start the iteration. Pass the empty string to start at +// the beginning, or pass a token retrieved from a call to Pager.NextPage. +// +// If you use an iterator with a Pager, you must not call Next on the iterator. +func NewPager(iter Pageable, pageSize int, pageToken string) *Pager { + p := &Pager{ + pageInfo: iter.PageInfo(), + pageSize: pageSize, + } + p.pageInfo.Token = pageToken + if pageSize <= 0 { + p.pageInfo.err = errors.New("iterator: page size must be positive") + } + return p +} + +// NextPage retrieves a sequence of items from the iterator and appends them +// to slicep, which must be a pointer to a slice of the iterator's item type. +// Exactly p.pageSize items will be appended, unless fewer remain. +// +// The first return value is the page token to use for the next page of items. +// If empty, there are no more pages. Aside from checking for the end of the +// iteration, the returned page token is only needed if the iteration is to be +// resumed a later time, in another context (possibly another process). +// +// The second return value is non-nil if an error occurred. It will never be +// the special iterator sentinel value Done. To recognize the end of the +// iteration, compare nextPageToken to the empty string. +// +// It is possible for NextPage to return a single zero-length page along with +// an empty page token when there are no more items in the iteration. +func (p *Pager) NextPage(slicep interface{}) (nextPageToken string, err error) { + p.pageInfo.nextPageCalled = true + if p.pageInfo.err != nil { + return "", p.pageInfo.err + } + if p.pageInfo.nextCalled { + p.pageInfo.err = errMixed + return "", p.pageInfo.err + } + if p.pageInfo.bufLen() > 0 { + return "", errors.New("must call NextPage with an empty buffer") + } + // The buffer must be empty here, so takeBuf is a no-op. We call it just to get + // the buffer's type. + wantSliceType := reflect.PtrTo(reflect.ValueOf(p.pageInfo.takeBuf()).Type()) + if slicep == nil { + return "", errors.New("nil passed to Pager.NextPage") + } + vslicep := reflect.ValueOf(slicep) + if vslicep.Type() != wantSliceType { + return "", fmt.Errorf("slicep should be of type %s, got %T", wantSliceType, slicep) + } + for p.pageInfo.bufLen() < p.pageSize { + if err := p.pageInfo.fill(p.pageSize - p.pageInfo.bufLen()); err != nil { + p.pageInfo.err = err + return "", p.pageInfo.err + } + if p.pageInfo.Token == "" { + break + } + } + e := vslicep.Elem() + e.Set(reflect.AppendSlice(e, reflect.ValueOf(p.pageInfo.takeBuf()))) + return p.pageInfo.Token, nil +} diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go new file mode 100644 index 000000000..e6b5c1025 --- /dev/null +++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go @@ -0,0 +1,214 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internaloption contains options used internally by Google client code. +package internaloption + +import ( + "golang.org/x/oauth2/google" + "google.golang.org/api/internal" + "google.golang.org/api/option" +) + +type defaultEndpointOption string + +func (o defaultEndpointOption) Apply(settings *internal.DialSettings) { + settings.DefaultEndpoint = string(o) +} + +// WithDefaultEndpoint is an option that indicates the default endpoint. +// +// It should only be used internally by generated clients. +// +// This is similar to WithEndpoint, but allows us to determine whether the user has overridden the default endpoint. +// +// Deprecated: WithDefaultEndpoint does not support setting the universe domain. +// Use WithDefaultEndpointTemplate and WithDefaultUniverseDomain to compose the +// default endpoint instead. +func WithDefaultEndpoint(url string) option.ClientOption { + return defaultEndpointOption(url) +} + +type defaultEndpointTemplateOption string + +func (o defaultEndpointTemplateOption) Apply(settings *internal.DialSettings) { + settings.DefaultEndpointTemplate = string(o) +} + +// WithDefaultEndpointTemplate provides a template for creating the endpoint +// using a universe domain. See also WithDefaultUniverseDomain and +// option.WithUniverseDomain. The placeholder UNIVERSE_DOMAIN should be used +// instead of a concrete universe domain such as "googleapis.com". +// +// Example: WithDefaultEndpointTemplate("https://logging.UNIVERSE_DOMAIN/") +// +// It should only be used internally by generated clients. +func WithDefaultEndpointTemplate(url string) option.ClientOption { + return defaultEndpointTemplateOption(url) +} + +type defaultMTLSEndpointOption string + +func (o defaultMTLSEndpointOption) Apply(settings *internal.DialSettings) { + settings.DefaultMTLSEndpoint = string(o) +} + +// WithDefaultMTLSEndpoint is an option that indicates the default mTLS endpoint. +// +// It should only be used internally by generated clients. +func WithDefaultMTLSEndpoint(url string) option.ClientOption { + return defaultMTLSEndpointOption(url) +} + +// SkipDialSettingsValidation bypasses validation on ClientOptions. +// +// It should only be used internally. +func SkipDialSettingsValidation() option.ClientOption { + return skipDialSettingsValidation{} +} + +type skipDialSettingsValidation struct{} + +func (s skipDialSettingsValidation) Apply(settings *internal.DialSettings) { + settings.SkipValidation = true +} + +// EnableDirectPath returns a ClientOption that overrides the default +// attempt to use DirectPath. +// +// It should only be used internally by generated clients. +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func EnableDirectPath(dp bool) option.ClientOption { + return enableDirectPath(dp) +} + +type enableDirectPath bool + +func (e enableDirectPath) Apply(o *internal.DialSettings) { + o.EnableDirectPath = bool(e) +} + +// EnableDirectPathXds returns a ClientOption that overrides the default +// DirectPath type. It is only valid when DirectPath is enabled. +// +// It should only be used internally by generated clients. +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func EnableDirectPathXds() option.ClientOption { + return enableDirectPathXds(true) +} + +type enableDirectPathXds bool + +func (x enableDirectPathXds) Apply(o *internal.DialSettings) { + o.EnableDirectPathXds = bool(x) +} + +// AllowNonDefaultServiceAccount returns a ClientOption that overrides the default +// requirement for using the default service account for DirectPath. +// +// It should only be used internally by generated clients. +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func AllowNonDefaultServiceAccount(nd bool) option.ClientOption { + return allowNonDefaultServiceAccount(nd) +} + +type allowNonDefaultServiceAccount bool + +func (a allowNonDefaultServiceAccount) Apply(o *internal.DialSettings) { + o.AllowNonDefaultServiceAccount = bool(a) +} + +// WithDefaultAudience returns a ClientOption that specifies a default audience +// to be used as the audience field ("aud") for the JWT token authentication. +// +// It should only be used internally by generated clients. +func WithDefaultAudience(audience string) option.ClientOption { + return withDefaultAudience(audience) +} + +type withDefaultAudience string + +func (w withDefaultAudience) Apply(o *internal.DialSettings) { + o.DefaultAudience = string(w) +} + +// WithDefaultScopes returns a ClientOption that overrides the default OAuth2 +// scopes to be used for a service. +// +// It should only be used internally by generated clients. +func WithDefaultScopes(scope ...string) option.ClientOption { + return withDefaultScopes(scope) +} + +type withDefaultScopes []string + +func (w withDefaultScopes) Apply(o *internal.DialSettings) { + o.DefaultScopes = make([]string, len(w)) + copy(o.DefaultScopes, w) +} + +// WithDefaultUniverseDomain returns a ClientOption that sets the default universe domain. +// +// It should only be used internally by generated clients. +// +// This is similar to the public WithUniverse, but allows us to determine whether the user has +// overridden the default universe. +func WithDefaultUniverseDomain(ud string) option.ClientOption { + return withDefaultUniverseDomain(ud) +} + +type withDefaultUniverseDomain string + +func (w withDefaultUniverseDomain) Apply(o *internal.DialSettings) { + o.DefaultUniverseDomain = string(w) +} + +// EnableJwtWithScope returns a ClientOption that specifies if scope can be used +// with self-signed JWT. +// +// EnableJwtWithScope is ignored when option.WithUniverseDomain is set +// to a value other than the Google Default Universe (GDU) of "googleapis.com". +// For non-GDU domains, token exchange is impossible and services must +// support self-signed JWTs with scopes. +func EnableJwtWithScope() option.ClientOption { + return enableJwtWithScope(true) +} + +type enableJwtWithScope bool + +func (w enableJwtWithScope) Apply(o *internal.DialSettings) { + o.EnableJwtWithScope = bool(w) +} + +// WithCredentials returns a client option to specify credentials which will be used to authenticate API calls. +// This credential takes precedence over all other credential options. +func WithCredentials(creds *google.Credentials) option.ClientOption { + return (*withCreds)(creds) +} + +type withCreds google.Credentials + +func (w *withCreds) Apply(o *internal.DialSettings) { + o.InternalCredentials = (*google.Credentials)(w) +} + +// EnableNewAuthLibrary returns a ClientOption that specifies if libraries in this +// module to delegate auth to our new library. This option will be removed in +// the future once all clients have been moved to the new auth layer. +func EnableNewAuthLibrary() option.ClientOption { + return enableNewAuthLibrary(true) +} + +type enableNewAuthLibrary bool + +func (w enableNewAuthLibrary) Apply(o *internal.DialSettings) { + o.EnableNewAuthLibrary = bool(w) +} + +// EmbeddableAdapter is a no-op option.ClientOption that allow libraries to +// create their own client options by embedding this type into their own +// client-specific option wrapper. See example for usage. +type EmbeddableAdapter struct{} + +func (*EmbeddableAdapter) Apply(_ *internal.DialSettings) {} diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go index 0a1c2dba9..23aba01b6 100644 --- a/vendor/google.golang.org/api/option/option.go +++ b/vendor/google.golang.org/api/option/option.go @@ -1,25 +1,19 @@ -// Copyright 2017 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2017 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. // Package option contains options for Google API clients. package option import ( + "crypto/tls" "net/http" + "cloud.google.com/go/auth" "golang.org/x/oauth2" + "golang.org/x/oauth2/google" "google.golang.org/api/internal" + "google.golang.org/api/internal/impersonate" "google.golang.org/grpc" ) @@ -89,6 +83,9 @@ func (w withEndpoint) Apply(o *internal.DialSettings) { // WithScopes returns a ClientOption that overrides the default OAuth2 scopes // to be used for a service. +// +// If both WithScopes and WithTokenSource are used, scope settings from the +// token source will be used instead. func WithScopes(scope ...string) ClientOption { return withScopes(scope) } @@ -100,7 +97,9 @@ func (w withScopes) Apply(o *internal.DialSettings) { copy(o.Scopes, w) } -// WithUserAgent returns a ClientOption that sets the User-Agent. +// WithUserAgent returns a ClientOption that sets the User-Agent. This option +// is incompatible with the [WithHTTPClient] option. If you wish to provide a +// custom client you will need to add this header via RoundTripper middleware. func WithUserAgent(ua string) ClientOption { return withUA(ua) } @@ -124,7 +123,7 @@ func (w withHTTPClient) Apply(o *internal.DialSettings) { } // WithGRPCConn returns a ClientOption that specifies the gRPC client -// connection to use as the basis of communications. This option many only be +// connection to use as the basis of communications. This option may only be // used with services that support gRPC as their communication transport. When // used, the WithGRPCConn option takes precedent over all other supplied // options. @@ -152,7 +151,6 @@ func (w withGRPCDialOption) Apply(o *internal.DialSettings) { // WithGRPCConnectionPool returns a ClientOption that creates a pool of gRPC // connections that requests will be balanced between. -// This is an EXPERIMENTAL API and may be changed or removed in the future. func WithGRPCConnectionPool(size int) ClientOption { return withGRPCConnectionPool(size) } @@ -160,8 +158,7 @@ func WithGRPCConnectionPool(size int) ClientOption { type withGRPCConnectionPool int func (w withGRPCConnectionPool) Apply(o *internal.DialSettings) { - balancer := grpc.RoundRobin(internal.NewPoolResolver(int(w), o)) - o.GRPCDialOpts = append(o.GRPCDialOpts, grpc.WithBalancer(balancer)) + o.GRPCConnPoolSize = int(w) } // WithAPIKey returns a ClientOption that specifies an API key to be used @@ -233,3 +230,143 @@ type withRequestReason string func (w withRequestReason) Apply(o *internal.DialSettings) { o.RequestReason = string(w) } + +// WithTelemetryDisabled returns a ClientOption that disables default telemetry (OpenCensus) +// settings on gRPC and HTTP clients. +// An example reason would be to bind custom telemetry that overrides the defaults. +func WithTelemetryDisabled() ClientOption { + return withTelemetryDisabled{} +} + +type withTelemetryDisabled struct{} + +func (w withTelemetryDisabled) Apply(o *internal.DialSettings) { + o.TelemetryDisabled = true +} + +// ClientCertSource is a function that returns a TLS client certificate to be used +// when opening TLS connections. +// +// It follows the same semantics as crypto/tls.Config.GetClientCertificate. +// +// This is an EXPERIMENTAL API and may be changed or removed in the future. +type ClientCertSource = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + +// WithClientCertSource returns a ClientOption that specifies a +// callback function for obtaining a TLS client certificate. +// +// This option is used for supporting mTLS authentication, where the +// server validates the client certifcate when establishing a connection. +// +// The callback function will be invoked whenever the server requests a +// certificate from the client. Implementations of the callback function +// should try to ensure that a valid certificate can be repeatedly returned +// on demand for the entire life cycle of the transport client. If a nil +// Certificate is returned (i.e. no Certificate can be obtained), an error +// should be returned. +// +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func WithClientCertSource(s ClientCertSource) ClientOption { + return withClientCertSource{s} +} + +type withClientCertSource struct{ s ClientCertSource } + +func (w withClientCertSource) Apply(o *internal.DialSettings) { + o.ClientCertSource = w.s +} + +// ImpersonateCredentials returns a ClientOption that will impersonate the +// target service account. +// +// In order to impersonate the target service account +// the base service account must have the Service Account Token Creator role, +// roles/iam.serviceAccountTokenCreator, on the target service account. +// See https://cloud.google.com/iam/docs/understanding-service-accounts. +// +// Optionally, delegates can be used during impersonation if the base service +// account lacks the token creator role on the target. When using delegates, +// each service account must be granted roles/iam.serviceAccountTokenCreator +// on the next service account in the chain. +// +// For example, if a base service account of SA1 is trying to impersonate target +// service account SA2 while using delegate service accounts DSA1 and DSA2, +// the following must be true: +// +// 1. Base service account SA1 has roles/iam.serviceAccountTokenCreator on +// DSA1. +// 2. DSA1 has roles/iam.serviceAccountTokenCreator on DSA2. +// 3. DSA2 has roles/iam.serviceAccountTokenCreator on target SA2. +// +// The resulting impersonated credential will either have the default scopes of +// the client being instantiating or the scopes from WithScopes if provided. +// Scopes are required for creating impersonated credentials, so if this option +// is used while not using a NewClient/NewService function, WithScopes must also +// be explicitly passed in as well. +// +// If the base credential is an authorized user and not a service account, or if +// the option WithQuotaProject is set, the target service account must have a +// role that grants the serviceusage.services.use permission such as +// roles/serviceusage.serviceUsageConsumer. +// +// This is an EXPERIMENTAL API and may be changed or removed in the future. +// +// Deprecated: This option has been replaced by `impersonate` package: +// `google.golang.org/api/impersonate`. Please use the `impersonate` package +// instead with the WithTokenSource option. +func ImpersonateCredentials(target string, delegates ...string) ClientOption { + return impersonateServiceAccount{ + target: target, + delegates: delegates, + } +} + +type impersonateServiceAccount struct { + target string + delegates []string +} + +func (i impersonateServiceAccount) Apply(o *internal.DialSettings) { + o.ImpersonationConfig = &impersonate.Config{ + Target: i.target, + } + o.ImpersonationConfig.Delegates = make([]string, len(i.delegates)) + copy(o.ImpersonationConfig.Delegates, i.delegates) +} + +type withCreds google.Credentials + +func (w *withCreds) Apply(o *internal.DialSettings) { + o.Credentials = (*google.Credentials)(w) +} + +// WithCredentials returns a ClientOption that authenticates API calls. +func WithCredentials(creds *google.Credentials) ClientOption { + return (*withCreds)(creds) +} + +// WithAuthCredentials returns a ClientOption that specifies an +// [cloud.google.com/go/auth.Credentials] to be used as the basis for +// authentication. +func WithAuthCredentials(creds *auth.Credentials) ClientOption { + return withAuthCredentials{creds} +} + +type withAuthCredentials struct{ creds *auth.Credentials } + +func (w withAuthCredentials) Apply(o *internal.DialSettings) { + o.AuthCredentials = w.creds +} + +// WithUniverseDomain returns a ClientOption that sets the universe domain. +// +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func WithUniverseDomain(ud string) ClientOption { + return withUniverseDomain(ud) +} + +type withUniverseDomain string + +func (w withUniverseDomain) Apply(o *internal.DialSettings) { + o.UniverseDomain = string(w) +} diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json new file mode 100644 index 000000000..6cd48d597 --- /dev/null +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -0,0 +1,6005 @@ +{ + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/cloud-platform.read-only": { + "description": "View your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/devstorage.full_control": { + "description": "Manage your data and permissions in Google Cloud Storage" + }, + "https://www.googleapis.com/auth/devstorage.read_only": { + "description": "View your data in Google Cloud Storage" + }, + "https://www.googleapis.com/auth/devstorage.read_write": { + "description": "Manage your data in Google Cloud Storage" + } + } + } + }, + "basePath": "/storage/v1/", + "baseUrl": "https://storage.googleapis.com/storage/v1/", + "batchPath": "batch/storage/v1", + "description": "Stores and retrieves potentially large, immutable data objects.", + "discoveryVersion": "v1", + "documentationLink": "https://developers.google.com/storage/docs/json_api/", + "endpoints": [ + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.europe-west3.rep.googleapis.com/", + "location": "europe-west3" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.europe-west8.rep.googleapis.com/", + "location": "europe-west8" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.europe-west9.rep.googleapis.com/", + "location": "europe-west9" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.me-central2.rep.googleapis.com/", + "location": "me-central2" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-central1.rep.googleapis.com/", + "location": "us-central1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-east1.rep.googleapis.com/", + "location": "us-east1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-east4.rep.googleapis.com/", + "location": "us-east4" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-east5.rep.googleapis.com/", + "location": "us-east5" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-south1.rep.googleapis.com/", + "location": "us-south1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-west1.rep.googleapis.com/", + "location": "us-west1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-west2.rep.googleapis.com/", + "location": "us-west2" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-west3.rep.googleapis.com/", + "location": "us-west3" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-west4.rep.googleapis.com/", + "location": "us-west4" + } + ], + "etag": "\"38303531353337323634333935393838333734\"", + "icons": { + "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", + "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" + }, + "id": "storage:v1", + "kind": "discovery#restDescription", + "labels": [ + "labs" + ], + "mtlsRootUrl": "https://storage.mtls.googleapis.com/", + "name": "storage", + "ownerDomain": "google.com", + "ownerName": "Google", + "parameters": { + "alt": { + "default": "json", + "description": "Data format for the response.", + "enum": [ + "json" + ], + "enumDescriptions": [ + "Responses with Content-Type of application/json" + ], + "location": "query", + "type": "string" + }, + "fields": { + "description": "Selector specifying which fields to include in a partial response.", + "location": "query", + "type": "string" + }, + "key": { + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query", + "type": "string" + }, + "oauth_token": { + "description": "OAuth 2.0 token for the current user.", + "location": "query", + "type": "string" + }, + "prettyPrint": { + "default": "true", + "description": "Returns response with indentations and line breaks.", + "location": "query", + "type": "boolean" + }, + "quotaUser": { + "description": "An opaque string that represents a user for quota purposes. Must not exceed 40 characters.", + "location": "query", + "type": "string" + }, + "uploadType": { + "description": "Upload protocol for media (e.g. \"media\", \"multipart\", \"resumable\").", + "location": "query", + "type": "string" + }, + "userIp": { + "description": "Deprecated. Please use quotaUser instead.", + "location": "query", + "type": "string" + } + }, + "protocol": "rest", + "resources": { + "anywhereCaches": { + "methods": { + "disable": { + "description": "Disables an Anywhere Cache instance.", + "httpMethod": "POST", + "id": "storage.anywhereCaches.disable", + "parameterOrder": [ + "bucket", + "anywhereCacheId" + ], + "parameters": { + "anywhereCacheId": { + "description": "The ID of requested Anywhere Cache instance.", + "location": "path", + "required": true, + "type": "string" + }, + "bucket": { + "description": "Name of the parent bucket.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/anywhereCaches/{anywhereCacheId}/disable", + "response": { + "$ref": "AnywhereCache" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "description": "Returns the metadata of an Anywhere Cache instance.", + "httpMethod": "GET", + "id": "storage.anywhereCaches.get", + "parameterOrder": [ + "bucket", + "anywhereCacheId" + ], + "parameters": { + "anywhereCacheId": { + "description": "The ID of requested Anywhere Cache instance.", + "location": "path", + "required": true, + "type": "string" + }, + "bucket": { + "description": "Name of the parent bucket.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/anywhereCaches/{anywhereCacheId}", + "response": { + "$ref": "AnywhereCache" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "insert": { + "description": "Creates an Anywhere Cache instance.", + "httpMethod": "POST", + "id": "storage.anywhereCaches.insert", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the parent bucket.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/anywhereCaches", + "request": { + "$ref": "AnywhereCache" + }, + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "list": { + "description": "Returns a list of Anywhere Cache instances of the bucket matching the criteria.", + "httpMethod": "GET", + "id": "storage.anywhereCaches.list", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the parent bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "Maximum number of items to return in a single page of responses. Maximum 1000.", + "format": "int32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "pageToken": { + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/anywhereCaches", + "response": { + "$ref": "AnywhereCaches" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "pause": { + "description": "Pauses an Anywhere Cache instance.", + "httpMethod": "POST", + "id": "storage.anywhereCaches.pause", + "parameterOrder": [ + "bucket", + "anywhereCacheId" + ], + "parameters": { + "anywhereCacheId": { + "description": "The ID of requested Anywhere Cache instance.", + "location": "path", + "required": true, + "type": "string" + }, + "bucket": { + "description": "Name of the parent bucket.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/anywhereCaches/{anywhereCacheId}/pause", + "response": { + "$ref": "AnywhereCache" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "resume": { + "description": "Resumes a paused or disabled Anywhere Cache instance.", + "httpMethod": "POST", + "id": "storage.anywhereCaches.resume", + "parameterOrder": [ + "bucket", + "anywhereCacheId" + ], + "parameters": { + "anywhereCacheId": { + "description": "The ID of requested Anywhere Cache instance.", + "location": "path", + "required": true, + "type": "string" + }, + "bucket": { + "description": "Name of the parent bucket.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/anywhereCaches/{anywhereCacheId}/resume", + "response": { + "$ref": "AnywhereCache" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "update": { + "description": "Updates the config(ttl and admissionPolicy) of an Anywhere Cache instance.", + "httpMethod": "PATCH", + "id": "storage.anywhereCaches.update", + "parameterOrder": [ + "bucket", + "anywhereCacheId" + ], + "parameters": { + "anywhereCacheId": { + "description": "The ID of requested Anywhere Cache instance.", + "location": "path", + "required": true, + "type": "string" + }, + "bucket": { + "description": "Name of the parent bucket.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/anywhereCaches/{anywhereCacheId}", + "request": { + "$ref": "AnywhereCache" + }, + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + } + } + }, + "bucketAccessControls": { + "methods": { + "delete": { + "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.", + "httpMethod": "DELETE", + "id": "storage.bucketAccessControls.delete", + "parameterOrder": [ + "bucket", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/acl/{entity}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "get": { + "description": "Returns the ACL entry for the specified entity on the specified bucket.", + "httpMethod": "GET", + "id": "storage.bucketAccessControls.get", + "parameterOrder": [ + "bucket", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/acl/{entity}", + "response": { + "$ref": "BucketAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "insert": { + "description": "Creates a new ACL entry on the specified bucket.", + "httpMethod": "POST", + "id": "storage.bucketAccessControls.insert", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/acl", + "request": { + "$ref": "BucketAccessControl" + }, + "response": { + "$ref": "BucketAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "list": { + "description": "Retrieves ACL entries on the specified bucket.", + "httpMethod": "GET", + "id": "storage.bucketAccessControls.list", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/acl", + "response": { + "$ref": "BucketAccessControls" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "patch": { + "description": "Patches an ACL entry on the specified bucket.", + "httpMethod": "PATCH", + "id": "storage.bucketAccessControls.patch", + "parameterOrder": [ + "bucket", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/acl/{entity}", + "request": { + "$ref": "BucketAccessControl" + }, + "response": { + "$ref": "BucketAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "update": { + "description": "Updates an ACL entry on the specified bucket.", + "httpMethod": "PUT", + "id": "storage.bucketAccessControls.update", + "parameterOrder": [ + "bucket", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/acl/{entity}", + "request": { + "$ref": "BucketAccessControl" + }, + "response": { + "$ref": "BucketAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + } + } + }, + "buckets": { + "methods": { + "delete": { + "description": "Deletes an empty bucket. Deletions are permanent unless soft delete is enabled on the bucket.", + "httpMethod": "DELETE", + "id": "storage.buckets.delete", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "If set, only deletes the bucket if its metageneration matches this value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "If set, only deletes the bucket if its metageneration does not match this value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "description": "Returns metadata for the specified bucket.", + "httpMethod": "GET", + "id": "storage.buckets.get", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, specifies the generation of the bucket. This is required if softDeleted is true.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit owner, acl and defaultObjectAcl properties." + ], + "location": "query", + "type": "string" + }, + "softDeleted": { + "description": "If true, return the soft-deleted version of this bucket. The default is false. For more information, see [Soft Delete](https://cloud.google.com/storage/docs/soft-delete).", + "location": "query", + "type": "boolean" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}", + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "getIamPolicy": { + "description": "Returns an IAM policy for the specified bucket.", + "httpMethod": "GET", + "id": "storage.buckets.getIamPolicy", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "optionsRequestedPolicyVersion": { + "description": "The IAM policy format version to be returned. If the optionsRequestedPolicyVersion is for an older version that doesn't support part of the requested IAM policy, the request fails.", + "format": "int32", + "location": "query", + "minimum": "1", + "type": "integer" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/iam", + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "getStorageLayout": { + "description": "Returns the storage layout configuration for the specified bucket. Note that this operation requires storage.objects.list permission.", + "httpMethod": "GET", + "id": "storage.buckets.getStorageLayout", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "prefix": { + "description": "An optional prefix used for permission check. It is useful when the caller only has storage.objects.list permission under a specific prefix.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/storageLayout", + "response": { + "$ref": "BucketStorageLayout" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "insert": { + "description": "Creates a new bucket.", + "httpMethod": "POST", + "id": "storage.buckets.insert", + "parameterOrder": [ + "project" + ], + "parameters": { + "enableObjectRetention": { + "default": "false", + "description": "When set to true, object retention is enabled for this bucket.", + "location": "query", + "type": "boolean" + }, + "predefinedAcl": { + "description": "Apply a predefined set of access controls to this bucket.", + "enum": [ + "authenticatedRead", + "private", + "projectPrivate", + "publicRead", + "publicReadWrite" + ], + "enumDescriptions": [ + "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + "Project team owners get OWNER access.", + "Project team members get access according to their roles.", + "Project team owners get OWNER access, and allUsers get READER access.", + "Project team owners get OWNER access, and allUsers get WRITER access." + ], + "location": "query", + "type": "string" + }, + "predefinedDefaultObjectAcl": { + "description": "Apply a predefined set of default object access controls to this bucket.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query", + "type": "string" + }, + "project": { + "description": "A valid API project identifier.", + "location": "query", + "required": true, + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit owner, acl and defaultObjectAcl properties." + ], + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request.", + "location": "query", + "type": "string" + } + }, + "path": "b", + "request": { + "$ref": "Bucket" + }, + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "list": { + "description": "Retrieves a list of buckets for a given project.", + "httpMethod": "GET", + "id": "storage.buckets.list", + "parameterOrder": [ + "project" + ], + "parameters": { + "maxResults": { + "default": "1000", + "description": "Maximum number of buckets to return in a single response. The service will use this parameter or 1,000 items, whichever is smaller.", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "pageToken": { + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query", + "type": "string" + }, + "prefix": { + "description": "Filter results to buckets whose names begin with this prefix.", + "location": "query", + "type": "string" + }, + "project": { + "description": "A valid API project identifier.", + "location": "query", + "required": true, + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit owner, acl and defaultObjectAcl properties." + ], + "location": "query", + "type": "string" + }, + "softDeleted": { + "description": "If true, only soft-deleted bucket versions will be returned. The default is false. For more information, see [Soft Delete](https://cloud.google.com/storage/docs/soft-delete).", + "location": "query", + "type": "boolean" + }, + "userProject": { + "description": "The project to be billed for this request.", + "location": "query", + "type": "string" + } + }, + "path": "b", + "response": { + "$ref": "Buckets" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "lockRetentionPolicy": { + "description": "Locks retention policy on a bucket.", + "httpMethod": "POST", + "id": "storage.buckets.lockRetentionPolicy", + "parameterOrder": [ + "bucket", + "ifMetagenerationMatch" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether bucket's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/lockRetentionPolicy", + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "patch": { + "description": "Patches a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.", + "httpMethod": "PATCH", + "id": "storage.buckets.patch", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "predefinedAcl": { + "description": "Apply a predefined set of access controls to this bucket.", + "enum": [ + "authenticatedRead", + "private", + "projectPrivate", + "publicRead", + "publicReadWrite" + ], + "enumDescriptions": [ + "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + "Project team owners get OWNER access.", + "Project team members get access according to their roles.", + "Project team owners get OWNER access, and allUsers get READER access.", + "Project team owners get OWNER access, and allUsers get WRITER access." + ], + "location": "query", + "type": "string" + }, + "predefinedDefaultObjectAcl": { + "description": "Apply a predefined set of default object access controls to this bucket.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit owner, acl and defaultObjectAcl properties." + ], + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}", + "request": { + "$ref": "Bucket" + }, + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "restore": { + "description": "Restores a soft-deleted bucket.", + "httpMethod": "POST", + "id": "storage.buckets.restore", + "parameterOrder": [ + "bucket", + "generation" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "Generation of a bucket.", + "format": "int64", + "location": "query", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/restore", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "setIamPolicy": { + "description": "Updates an IAM policy for the specified bucket.", + "httpMethod": "PUT", + "id": "storage.buckets.setIamPolicy", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/iam", + "request": { + "$ref": "Policy" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "testIamPermissions": { + "description": "Tests a set of permissions on the given bucket to see which, if any, are held by the caller.", + "httpMethod": "GET", + "id": "storage.buckets.testIamPermissions", + "parameterOrder": [ + "bucket", + "permissions" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "permissions": { + "description": "Permissions to test.", + "location": "query", + "repeated": true, + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/iam/testPermissions", + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "update": { + "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.", + "httpMethod": "PUT", + "id": "storage.buckets.update", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "predefinedAcl": { + "description": "Apply a predefined set of access controls to this bucket.", + "enum": [ + "authenticatedRead", + "private", + "projectPrivate", + "publicRead", + "publicReadWrite" + ], + "enumDescriptions": [ + "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + "Project team owners get OWNER access.", + "Project team members get access according to their roles.", + "Project team owners get OWNER access, and allUsers get READER access.", + "Project team owners get OWNER access, and allUsers get WRITER access." + ], + "location": "query", + "type": "string" + }, + "predefinedDefaultObjectAcl": { + "description": "Apply a predefined set of default object access controls to this bucket.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit owner, acl and defaultObjectAcl properties." + ], + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}", + "request": { + "$ref": "Bucket" + }, + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + } + } + }, + "channels": { + "methods": { + "stop": { + "description": "Stop watching resources through this channel", + "httpMethod": "POST", + "id": "storage.channels.stop", + "path": "channels/stop", + "request": { + "$ref": "Channel", + "parameterName": "resource" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + } + } + }, + "defaultObjectAccessControls": { + "methods": { + "delete": { + "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.", + "httpMethod": "DELETE", + "id": "storage.defaultObjectAccessControls.delete", + "parameterOrder": [ + "bucket", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/defaultObjectAcl/{entity}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "get": { + "description": "Returns the default object ACL entry for the specified entity on the specified bucket.", + "httpMethod": "GET", + "id": "storage.defaultObjectAccessControls.get", + "parameterOrder": [ + "bucket", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/defaultObjectAcl/{entity}", + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "insert": { + "description": "Creates a new default object ACL entry on the specified bucket.", + "httpMethod": "POST", + "id": "storage.defaultObjectAccessControls.insert", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/defaultObjectAcl", + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "list": { + "description": "Retrieves default object ACL entries on the specified bucket.", + "httpMethod": "GET", + "id": "storage.defaultObjectAccessControls.list", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/defaultObjectAcl", + "response": { + "$ref": "ObjectAccessControls" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "patch": { + "description": "Patches a default object ACL entry on the specified bucket.", + "httpMethod": "PATCH", + "id": "storage.defaultObjectAccessControls.patch", + "parameterOrder": [ + "bucket", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/defaultObjectAcl/{entity}", + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "update": { + "description": "Updates a default object ACL entry on the specified bucket.", + "httpMethod": "PUT", + "id": "storage.defaultObjectAccessControls.update", + "parameterOrder": [ + "bucket", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/defaultObjectAcl/{entity}", + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + } + } + }, + "folders": { + "methods": { + "delete": { + "description": "Permanently deletes a folder. Only applicable to buckets with hierarchical namespace enabled.", + "httpMethod": "DELETE", + "id": "storage.folders.delete", + "parameterOrder": [ + "bucket", + "folder" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the folder resides.", + "location": "path", + "required": true, + "type": "string" + }, + "folder": { + "description": "Name of a folder.", + "location": "path", + "required": true, + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "If set, only deletes the folder if its metageneration matches this value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "If set, only deletes the folder if its metageneration does not match this value.", + "format": "int64", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/folders/{folder}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "description": "Returns metadata for the specified folder. Only applicable to buckets with hierarchical namespace enabled.", + "httpMethod": "GET", + "id": "storage.folders.get", + "parameterOrder": [ + "bucket", + "folder" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the folder resides.", + "location": "path", + "required": true, + "type": "string" + }, + "folder": { + "description": "Name of a folder.", + "location": "path", + "required": true, + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the return of the folder metadata conditional on whether the folder's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the return of the folder metadata conditional on whether the folder's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/folders/{folder}", + "response": { + "$ref": "Folder" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "insert": { + "description": "Creates a new folder. Only applicable to buckets with hierarchical namespace enabled.", + "httpMethod": "POST", + "id": "storage.folders.insert", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the folder resides.", + "location": "path", + "required": true, + "type": "string" + }, + "recursive": { + "description": "If true, any parent folder which doesn’t exist will be created automatically.", + "location": "query", + "type": "boolean" + } + }, + "path": "b/{bucket}/folders", + "request": { + "$ref": "Folder" + }, + "response": { + "$ref": "Folder" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "list": { + "description": "Retrieves a list of folders matching the criteria. Only applicable to buckets with hierarchical namespace enabled.", + "httpMethod": "GET", + "id": "storage.folders.list", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which to look for folders.", + "location": "path", + "required": true, + "type": "string" + }, + "delimiter": { + "description": "Returns results in a directory-like mode. The only supported value is '/'. If set, items will only contain folders that either exactly match the prefix, or are one level below the prefix.", + "location": "query", + "type": "string" + }, + "endOffset": { + "description": "Filter results to folders whose names are lexicographically before endOffset. If startOffset is also set, the folders listed will have names between startOffset (inclusive) and endOffset (exclusive).", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "Maximum number of items to return in a single page of responses.", + "format": "int32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "pageToken": { + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query", + "type": "string" + }, + "prefix": { + "description": "Filter results to folders whose paths begin with this prefix. If set, the value must either be an empty string or end with a '/'.", + "location": "query", + "type": "string" + }, + "startOffset": { + "description": "Filter results to folders whose names are lexicographically equal to or after startOffset. If endOffset is also set, the folders listed will have names between startOffset (inclusive) and endOffset (exclusive).", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/folders", + "response": { + "$ref": "Folders" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "rename": { + "description": "Renames a source folder to a destination folder. Only applicable to buckets with hierarchical namespace enabled.", + "httpMethod": "POST", + "id": "storage.folders.rename", + "parameterOrder": [ + "bucket", + "sourceFolder", + "destinationFolder" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the folders are in.", + "location": "path", + "required": true, + "type": "string" + }, + "destinationFolder": { + "description": "Name of the destination folder.", + "location": "path", + "required": true, + "type": "string" + }, + "ifSourceMetagenerationMatch": { + "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "sourceFolder": { + "description": "Name of the source folder.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/folders/{sourceFolder}/renameTo/folders/{destinationFolder}", + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + } + } + }, + "managedFolders": { + "methods": { + "delete": { + "description": "Permanently deletes a managed folder.", + "httpMethod": "DELETE", + "id": "storage.managedFolders.delete", + "parameterOrder": [ + "bucket", + "managedFolder" + ], + "parameters": { + "allowNonEmpty": { + "description": "Allows the deletion of a managed folder even if it is not empty. A managed folder is empty if there are no objects or managed folders that it applies to. Callers must have storage.managedFolders.setIamPolicy permission.", + "location": "query", + "type": "boolean" + }, + "bucket": { + "description": "Name of the bucket containing the managed folder.", + "location": "path", + "required": true, + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "If set, only deletes the managed folder if its metageneration matches this value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "If set, only deletes the managed folder if its metageneration does not match this value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "managedFolder": { + "description": "The managed folder name/path.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/managedFolders/{managedFolder}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "description": "Returns metadata of the specified managed folder.", + "httpMethod": "GET", + "id": "storage.managedFolders.get", + "parameterOrder": [ + "bucket", + "managedFolder" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket containing the managed folder.", + "location": "path", + "required": true, + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the return of the managed folder metadata conditional on whether the managed folder's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the return of the managed folder metadata conditional on whether the managed folder's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "managedFolder": { + "description": "The managed folder name/path.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/managedFolders/{managedFolder}", + "response": { + "$ref": "ManagedFolder" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "getIamPolicy": { + "description": "Returns an IAM policy for the specified managed folder.", + "httpMethod": "GET", + "id": "storage.managedFolders.getIamPolicy", + "parameterOrder": [ + "bucket", + "managedFolder" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket containing the managed folder.", + "location": "path", + "required": true, + "type": "string" + }, + "managedFolder": { + "description": "The managed folder name/path.", + "location": "path", + "required": true, + "type": "string" + }, + "optionsRequestedPolicyVersion": { + "description": "The IAM policy format version to be returned. If the optionsRequestedPolicyVersion is for an older version that doesn't support part of the requested IAM policy, the request fails.", + "format": "int32", + "location": "query", + "minimum": "1", + "type": "integer" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/managedFolders/{managedFolder}/iam", + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "insert": { + "description": "Creates a new managed folder.", + "httpMethod": "POST", + "id": "storage.managedFolders.insert", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket containing the managed folder.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/managedFolders", + "request": { + "$ref": "ManagedFolder" + }, + "response": { + "$ref": "ManagedFolder" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "list": { + "description": "Lists managed folders in the given bucket.", + "httpMethod": "GET", + "id": "storage.managedFolders.list", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket containing the managed folder.", + "location": "path", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "Maximum number of items to return in a single page of responses.", + "format": "int32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "pageToken": { + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query", + "type": "string" + }, + "prefix": { + "description": "The managed folder name/path prefix to filter the output list of results.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/managedFolders", + "response": { + "$ref": "ManagedFolders" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "setIamPolicy": { + "description": "Updates an IAM policy for the specified managed folder.", + "httpMethod": "PUT", + "id": "storage.managedFolders.setIamPolicy", + "parameterOrder": [ + "bucket", + "managedFolder" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket containing the managed folder.", + "location": "path", + "required": true, + "type": "string" + }, + "managedFolder": { + "description": "The managed folder name/path.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/managedFolders/{managedFolder}/iam", + "request": { + "$ref": "Policy" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "testIamPermissions": { + "description": "Tests a set of permissions on the given managed folder to see which, if any, are held by the caller.", + "httpMethod": "GET", + "id": "storage.managedFolders.testIamPermissions", + "parameterOrder": [ + "bucket", + "managedFolder", + "permissions" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket containing the managed folder.", + "location": "path", + "required": true, + "type": "string" + }, + "managedFolder": { + "description": "The managed folder name/path.", + "location": "path", + "required": true, + "type": "string" + }, + "permissions": { + "description": "Permissions to test.", + "location": "query", + "repeated": true, + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/managedFolders/{managedFolder}/iam/testPermissions", + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + } + } + }, + "notifications": { + "methods": { + "delete": { + "description": "Permanently deletes a notification subscription.", + "httpMethod": "DELETE", + "id": "storage.notifications.delete", + "parameterOrder": [ + "bucket", + "notification" + ], + "parameters": { + "bucket": { + "description": "The parent bucket of the notification.", + "location": "path", + "required": true, + "type": "string" + }, + "notification": { + "description": "ID of the notification to delete.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/notificationConfigs/{notification}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "description": "View a notification configuration.", + "httpMethod": "GET", + "id": "storage.notifications.get", + "parameterOrder": [ + "bucket", + "notification" + ], + "parameters": { + "bucket": { + "description": "The parent bucket of the notification.", + "location": "path", + "required": true, + "type": "string" + }, + "notification": { + "description": "Notification ID", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/notificationConfigs/{notification}", + "response": { + "$ref": "Notification" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "insert": { + "description": "Creates a notification subscription for a given bucket.", + "httpMethod": "POST", + "id": "storage.notifications.insert", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "The parent bucket of the notification.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/notificationConfigs", + "request": { + "$ref": "Notification" + }, + "response": { + "$ref": "Notification" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "list": { + "description": "Retrieves a list of notification subscriptions for a given bucket.", + "httpMethod": "GET", + "id": "storage.notifications.list", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a Google Cloud Storage bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/notificationConfigs", + "response": { + "$ref": "Notifications" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + } + } + }, + "objectAccessControls": { + "methods": { + "delete": { + "description": "Permanently deletes the ACL entry for the specified entity on the specified object.", + "httpMethod": "DELETE", + "id": "storage.objectAccessControls.delete", + "parameterOrder": [ + "bucket", + "object", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}/acl/{entity}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "get": { + "description": "Returns the ACL entry for the specified entity on the specified object.", + "httpMethod": "GET", + "id": "storage.objectAccessControls.get", + "parameterOrder": [ + "bucket", + "object", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}/acl/{entity}", + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "insert": { + "description": "Creates a new ACL entry on the specified object.", + "httpMethod": "POST", + "id": "storage.objectAccessControls.insert", + "parameterOrder": [ + "bucket", + "object" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}/acl", + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "list": { + "description": "Retrieves ACL entries on the specified object.", + "httpMethod": "GET", + "id": "storage.objectAccessControls.list", + "parameterOrder": [ + "bucket", + "object" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}/acl", + "response": { + "$ref": "ObjectAccessControls" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "patch": { + "description": "Patches an ACL entry on the specified object.", + "httpMethod": "PATCH", + "id": "storage.objectAccessControls.patch", + "parameterOrder": [ + "bucket", + "object", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}/acl/{entity}", + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "update": { + "description": "Updates an ACL entry on the specified object.", + "httpMethod": "PUT", + "id": "storage.objectAccessControls.update", + "parameterOrder": [ + "bucket", + "object", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}/acl/{entity}", + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + } + } + }, + "objects": { + "methods": { + "bulkRestore": { + "description": "Initiates a long-running bulk restore operation on the specified bucket.", + "httpMethod": "POST", + "id": "storage.objects.bulkRestore", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the object resides.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/o/bulkRestore", + "request": { + "$ref": "BulkRestoreObjectsRequest" + }, + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "compose": { + "description": "Concatenates a list of existing objects into a new object in the same bucket.", + "httpMethod": "POST", + "id": "storage.objects.compose", + "parameterOrder": [ + "destinationBucket", + "destinationObject" + ], + "parameters": { + "destinationBucket": { + "description": "Name of the bucket containing the source objects. The destination object is stored in this bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "destinationObject": { + "description": "Name of the new object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + "location": "path", + "required": true, + "type": "string" + }, + "destinationPredefinedAcl": { + "description": "Apply a predefined set of access controls to the destination object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query", + "type": "string" + }, + "ifGenerationMatch": { + "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "kmsKeyName": { + "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{destinationBucket}/o/{destinationObject}/compose", + "request": { + "$ref": "ComposeRequest" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "copy": { + "description": "Copies a source object to a destination object. Optionally overrides metadata.", + "httpMethod": "POST", + "id": "storage.objects.copy", + "parameterOrder": [ + "sourceBucket", + "sourceObject", + "destinationBucket", + "destinationObject" + ], + "parameters": { + "destinationBucket": { + "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + "location": "path", + "required": true, + "type": "string" + }, + "destinationKmsKeyName": { + "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + "location": "query", + "type": "string" + }, + "destinationObject": { + "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", + "location": "path", + "required": true, + "type": "string" + }, + "destinationPredefinedAcl": { + "description": "Apply a predefined set of access controls to the destination object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query", + "type": "string" + }, + "ifGenerationMatch": { + "description": "Makes the operation conditional on whether the destination object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationNotMatch": { + "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceGenerationMatch": { + "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceGenerationNotMatch": { + "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceMetagenerationMatch": { + "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query", + "type": "string" + }, + "sourceBucket": { + "description": "Name of the bucket in which to find the source object.", + "location": "path", + "required": true, + "type": "string" + }, + "sourceGeneration": { + "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "sourceObject": { + "description": "Name of the source object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}", + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "delete": { + "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.", + "httpMethod": "DELETE", + "id": "storage.objects.delete", + "parameterOrder": [ + "bucket", + "object" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the object resides.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationMatch": { + "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "description": "Retrieves an object or its metadata.", + "httpMethod": "GET", + "id": "storage.objects.get", + "parameterOrder": [ + "bucket", + "object" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the object resides.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationMatch": { + "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + "location": "path", + "required": true, + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query", + "type": "string" + }, + "restoreToken": { + "description": "Restore token used to differentiate soft-deleted objects with the same name and generation. Only applicable for hierarchical namespace buckets and if softDeleted is set to true. This parameter is optional, and is only required in the rare case when there are multiple soft-deleted objects with the same name and generation.", + "location": "query", + "type": "string" + }, + "softDeleted": { + "description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see [Soft Delete](https://cloud.google.com/storage/docs/soft-delete).", + "location": "query", + "type": "boolean" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}", + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsMediaDownload": true, + "useMediaDownloadService": true + }, + "getIamPolicy": { + "description": "Returns an IAM policy for the specified object.", + "httpMethod": "GET", + "id": "storage.objects.getIamPolicy", + "parameterOrder": [ + "bucket", + "object" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the object resides.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}/iam", + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "insert": { + "description": "Stores a new object and metadata.", + "httpMethod": "POST", + "id": "storage.objects.insert", + "mediaUpload": { + "accept": [ + "*/*" + ], + "protocols": { + "resumable": { + "multipart": true, + "path": "/resumable/upload/storage/v1/b/{bucket}/o" + }, + "simple": { + "multipart": true, + "path": "/upload/storage/v1/b/{bucket}/o" + } + } + }, + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + "location": "path", + "required": true, + "type": "string" + }, + "contentEncoding": { + "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.", + "location": "query", + "type": "string" + }, + "ifGenerationMatch": { + "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "kmsKeyName": { + "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + "location": "query", + "type": "string" + }, + "name": { + "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + "location": "query", + "type": "string" + }, + "predefinedAcl": { + "description": "Apply a predefined set of access controls to this object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o", + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsMediaUpload": true + }, + "list": { + "description": "Retrieves a list of objects matching the criteria.", + "httpMethod": "GET", + "id": "storage.objects.list", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which to look for objects.", + "location": "path", + "required": true, + "type": "string" + }, + "delimiter": { + "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", + "location": "query", + "type": "string" + }, + "endOffset": { + "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + "location": "query", + "type": "string" + }, + "includeFoldersAsPrefixes": { + "description": "Only applicable if delimiter is set to '/'. If true, will also include folders and managed folders (besides objects) in the returned prefixes.", + "location": "query", + "type": "boolean" + }, + "includeTrailingDelimiter": { + "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", + "location": "query", + "type": "boolean" + }, + "matchGlob": { + "description": "Filter results to objects and prefixes that match this glob pattern.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "1000", + "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "pageToken": { + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query", + "type": "string" + }, + "prefix": { + "description": "Filter results to objects whose names begin with this prefix.", + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query", + "type": "string" + }, + "softDeleted": { + "description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see [Soft Delete](https://cloud.google.com/storage/docs/soft-delete).", + "location": "query", + "type": "boolean" + }, + "startOffset": { + "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + }, + "versions": { + "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see [Object Versioning](https://cloud.google.com/storage/docs/object-versioning).", + "location": "query", + "type": "boolean" + } + }, + "path": "b/{bucket}/o", + "response": { + "$ref": "Objects" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsSubscription": true + }, + "patch": { + "description": "Patches an object's metadata.", + "httpMethod": "PATCH", + "id": "storage.objects.patch", + "parameterOrder": [ + "bucket", + "object" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the object resides.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationMatch": { + "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + "location": "path", + "required": true, + "type": "string" + }, + "overrideUnlockedRetention": { + "description": "Must be true to remove the retention configuration, reduce its unlocked retention period, or change its mode from unlocked to locked.", + "location": "query", + "type": "boolean" + }, + "predefinedAcl": { + "description": "Apply a predefined set of access controls to this object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request, for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}", + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "restore": { + "description": "Restores a soft-deleted object.", + "httpMethod": "POST", + "id": "storage.objects.restore", + "parameterOrder": [ + "bucket", + "object", + "generation" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the object resides.", + "location": "path", + "required": true, + "type": "string" + }, + "copySourceAcl": { + "description": "If true, copies the source object's ACL; otherwise, uses the bucket's default object ACL. The default is false.", + "location": "query", + "type": "boolean" + }, + "generation": { + "description": "Selects a specific revision of this object.", + "format": "int64", + "location": "query", + "required": true, + "type": "string" + }, + "ifGenerationMatch": { + "description": "Makes the operation conditional on whether the object's one live generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationNotMatch": { + "description": "Makes the operation conditional on whether none of the object's live generations match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether the object's one live metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether none of the object's live metagenerations match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + "location": "path", + "required": true, + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query", + "type": "string" + }, + "restoreToken": { + "description": "Restore token used to differentiate sof-deleted objects with the same name and generation. Only applicable for hierarchical namespace buckets. This parameter is optional, and is only required in the rare case when there are multiple soft-deleted objects with the same name and generation.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}/restore", + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "rewrite": { + "description": "Rewrites a source object to a destination object. Optionally overrides metadata.", + "httpMethod": "POST", + "id": "storage.objects.rewrite", + "parameterOrder": [ + "sourceBucket", + "sourceObject", + "destinationBucket", + "destinationObject" + ], + "parameters": { + "destinationBucket": { + "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + "location": "path", + "required": true, + "type": "string" + }, + "destinationKmsKeyName": { + "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + "location": "query", + "type": "string" + }, + "destinationObject": { + "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + "location": "path", + "required": true, + "type": "string" + }, + "destinationPredefinedAcl": { + "description": "Apply a predefined set of access controls to the destination object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query", + "type": "string" + }, + "ifGenerationMatch": { + "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceGenerationMatch": { + "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceGenerationNotMatch": { + "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceMetagenerationMatch": { + "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "maxBytesRewrittenPerCall": { + "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.", + "format": "int64", + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query", + "type": "string" + }, + "rewriteToken": { + "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.", + "location": "query", + "type": "string" + }, + "sourceBucket": { + "description": "Name of the bucket in which to find the source object.", + "location": "path", + "required": true, + "type": "string" + }, + "sourceGeneration": { + "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "sourceObject": { + "description": "Name of the source object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}", + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "RewriteResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "setIamPolicy": { + "description": "Updates an IAM policy for the specified object.", + "httpMethod": "PUT", + "id": "storage.objects.setIamPolicy", + "parameterOrder": [ + "bucket", + "object" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the object resides.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}/iam", + "request": { + "$ref": "Policy" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "testIamPermissions": { + "description": "Tests a set of permissions on the given object to see which, if any, are held by the caller.", + "httpMethod": "GET", + "id": "storage.objects.testIamPermissions", + "parameterOrder": [ + "bucket", + "object", + "permissions" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the object resides.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + "location": "path", + "required": true, + "type": "string" + }, + "permissions": { + "description": "Permissions to test.", + "location": "query", + "repeated": true, + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}/iam/testPermissions", + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "update": { + "description": "Updates an object's metadata.", + "httpMethod": "PUT", + "id": "storage.objects.update", + "parameterOrder": [ + "bucket", + "object" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the object resides.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationMatch": { + "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + "location": "path", + "required": true, + "type": "string" + }, + "overrideUnlockedRetention": { + "description": "Must be true to remove the retention configuration, reduce its unlocked retention period, or change its mode from unlocked to locked.", + "location": "query", + "type": "boolean" + }, + "predefinedAcl": { + "description": "Apply a predefined set of access controls to this object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}", + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "watchAll": { + "description": "Watch for changes on all objects in a bucket.", + "httpMethod": "POST", + "id": "storage.objects.watchAll", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which to look for objects.", + "location": "path", + "required": true, + "type": "string" + }, + "delimiter": { + "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", + "location": "query", + "type": "string" + }, + "endOffset": { + "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + "location": "query", + "type": "string" + }, + "includeTrailingDelimiter": { + "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "1000", + "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "pageToken": { + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query", + "type": "string" + }, + "prefix": { + "description": "Filter results to objects whose names begin with this prefix.", + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query", + "type": "string" + }, + "startOffset": { + "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + }, + "versions": { + "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see [Object Versioning](https://cloud.google.com/storage/docs/object-versioning).", + "location": "query", + "type": "boolean" + } + }, + "path": "b/{bucket}/o/watch", + "request": { + "$ref": "Channel", + "parameterName": "resource" + }, + "response": { + "$ref": "Channel" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsSubscription": true + } + } + }, + "operations": { + "methods": { + "cancel": { + "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed.", + "httpMethod": "POST", + "id": "storage.buckets.operations.cancel", + "parameterOrder": [ + "bucket", + "operationId" + ], + "parameters": { + "bucket": { + "description": "The parent bucket of the operation resource.", + "location": "path", + "required": true, + "type": "string" + }, + "operationId": { + "description": "The ID of the operation resource.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/operations/{operationId}/cancel", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "description": "Gets the latest state of a long-running operation.", + "httpMethod": "GET", + "id": "storage.buckets.operations.get", + "parameterOrder": [ + "bucket", + "operationId" + ], + "parameters": { + "bucket": { + "description": "The parent bucket of the operation resource.", + "location": "path", + "required": true, + "type": "string" + }, + "operationId": { + "description": "The ID of the operation resource.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/operations/{operationId}", + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "list": { + "description": "Lists operations that match the specified filter in the request.", + "httpMethod": "GET", + "id": "storage.buckets.operations.list", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which to look for operations.", + "location": "path", + "required": true, + "type": "string" + }, + "filter": { + "description": "A filter to narrow down results to a preferred subset. The filtering language is documented in more detail in [AIP-160](https://google.aip.dev/160).", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "Maximum number of items to return in a single page of responses. Fewer total results may be returned than requested. The service uses this parameter or 100 items, whichever is smaller.", + "format": "int32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "pageToken": { + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/operations", + "response": { + "$ref": "GoogleLongrunningListOperationsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + } + } + }, + "projects": { + "resources": { + "hmacKeys": { + "methods": { + "create": { + "description": "Creates a new HMAC key for the specified service account.", + "httpMethod": "POST", + "id": "storage.projects.hmacKeys.create", + "parameterOrder": [ + "projectId", + "serviceAccountEmail" + ], + "parameters": { + "projectId": { + "description": "Project ID owning the service account.", + "location": "path", + "required": true, + "type": "string" + }, + "serviceAccountEmail": { + "description": "Email address of the service account.", + "location": "query", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request.", + "location": "query", + "type": "string" + } + }, + "path": "projects/{projectId}/hmacKeys", + "response": { + "$ref": "HmacKey" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "delete": { + "description": "Deletes an HMAC key.", + "httpMethod": "DELETE", + "id": "storage.projects.hmacKeys.delete", + "parameterOrder": [ + "projectId", + "accessId" + ], + "parameters": { + "accessId": { + "description": "Name of the HMAC key to be deleted.", + "location": "path", + "required": true, + "type": "string" + }, + "projectId": { + "description": "Project ID owning the requested key", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request.", + "location": "query", + "type": "string" + } + }, + "path": "projects/{projectId}/hmacKeys/{accessId}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "description": "Retrieves an HMAC key's metadata", + "httpMethod": "GET", + "id": "storage.projects.hmacKeys.get", + "parameterOrder": [ + "projectId", + "accessId" + ], + "parameters": { + "accessId": { + "description": "Name of the HMAC key.", + "location": "path", + "required": true, + "type": "string" + }, + "projectId": { + "description": "Project ID owning the service account of the requested key.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request.", + "location": "query", + "type": "string" + } + }, + "path": "projects/{projectId}/hmacKeys/{accessId}", + "response": { + "$ref": "HmacKeyMetadata" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only" + ] + }, + "list": { + "description": "Retrieves a list of HMAC keys matching the criteria.", + "httpMethod": "GET", + "id": "storage.projects.hmacKeys.list", + "parameterOrder": [ + "projectId" + ], + "parameters": { + "maxResults": { + "default": "250", + "description": "Maximum number of items to return in a single page of responses. The service uses this parameter or 250 items, whichever is smaller. The max number of items per page will also be limited by the number of distinct service accounts in the response. If the number of service accounts in a single response is too high, the page will truncated and a next page token will be returned.", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "pageToken": { + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query", + "type": "string" + }, + "projectId": { + "description": "Name of the project in which to look for HMAC keys.", + "location": "path", + "required": true, + "type": "string" + }, + "serviceAccountEmail": { + "description": "If present, only keys for the given service account are returned.", + "location": "query", + "type": "string" + }, + "showDeletedKeys": { + "description": "Whether or not to show keys in the DELETED state.", + "location": "query", + "type": "boolean" + }, + "userProject": { + "description": "The project to be billed for this request.", + "location": "query", + "type": "string" + } + }, + "path": "projects/{projectId}/hmacKeys", + "response": { + "$ref": "HmacKeysMetadata" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only" + ] + }, + "update": { + "description": "Updates the state of an HMAC key. See the [HMAC Key resource descriptor](https://cloud.google.com/storage/docs/json_api/v1/projects/hmacKeys/update#request-body) for valid states.", + "httpMethod": "PUT", + "id": "storage.projects.hmacKeys.update", + "parameterOrder": [ + "projectId", + "accessId" + ], + "parameters": { + "accessId": { + "description": "Name of the HMAC key being updated.", + "location": "path", + "required": true, + "type": "string" + }, + "projectId": { + "description": "Project ID owning the service account of the updated key.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request.", + "location": "query", + "type": "string" + } + }, + "path": "projects/{projectId}/hmacKeys/{accessId}", + "request": { + "$ref": "HmacKeyMetadata" + }, + "response": { + "$ref": "HmacKeyMetadata" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + } + } + }, + "serviceAccount": { + "methods": { + "get": { + "description": "Get the email address of this project's Google Cloud Storage service account.", + "httpMethod": "GET", + "id": "storage.projects.serviceAccount.get", + "parameterOrder": [ + "projectId" + ], + "parameters": { + "projectId": { + "description": "Project ID", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request.", + "location": "query", + "type": "string" + } + }, + "path": "projects/{projectId}/serviceAccount", + "response": { + "$ref": "ServiceAccount" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + } + } + } + } + } + }, + "revision": "20240916", + "rootUrl": "https://storage.googleapis.com/", + "schemas": { + "AnywhereCache": { + "description": "An Anywhere Cache instance.", + "id": "AnywhereCache", + "properties": { + "admissionPolicy": { + "description": "The cache-level entry admission policy.", + "type": "string" + }, + "anywhereCacheId": { + "description": "The ID of the Anywhere cache instance.", + "type": "string" + }, + "bucket": { + "description": "The name of the bucket containing this cache instance.", + "type": "string" + }, + "createTime": { + "description": "The creation time of the cache instance in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, + "id": { + "description": "The ID of the resource, including the project number, bucket name and anywhere cache ID.", + "type": "string" + }, + "kind": { + "default": "storage#anywhereCache", + "description": "The kind of item this is. For Anywhere Cache, this is always storage#anywhereCache.", + "type": "string" + }, + "pendingUpdate": { + "description": "True if the cache instance has an active Update long-running operation.", + "type": "boolean" + }, + "selfLink": { + "description": "The link to this cache instance.", + "type": "string" + }, + "state": { + "description": "The current state of the cache instance.", + "type": "string" + }, + "ttl": { + "description": "The TTL of all cache entries in whole seconds. e.g., \"7200s\". ", + "format": "google-duration", + "type": "string" + }, + "updateTime": { + "description": "The modification time of the cache instance metadata in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, + "zone": { + "description": "The zone in which the cache instance is running. For example, us-central1-a.", + "type": "string" + } + }, + "type": "object" + }, + "AnywhereCaches": { + "description": "A list of Anywhere Caches.", + "id": "AnywhereCaches", + "properties": { + "items": { + "description": "The list of items.", + "items": { + "$ref": "AnywhereCache" + }, + "type": "array" + }, + "kind": { + "default": "storage#anywhereCaches", + "description": "The kind of item this is. For lists of Anywhere Caches, this is always storage#anywhereCaches.", + "type": "string" + }, + "nextPageToken": { + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", + "type": "string" + } + }, + "type": "object" + }, + "Bucket": { + "description": "A bucket.", + "id": "Bucket", + "properties": { + "acl": { + "annotations": { + "required": [ + "storage.buckets.update" + ] + }, + "description": "Access controls on the bucket.", + "items": { + "$ref": "BucketAccessControl" + }, + "type": "array" + }, + "autoclass": { + "description": "The bucket's Autoclass configuration.", + "properties": { + "enabled": { + "description": "Whether or not Autoclass is enabled on this bucket", + "type": "boolean" + }, + "terminalStorageClass": { + "description": "The storage class that objects in the bucket eventually transition to if they are not read for a certain length of time. Valid values are NEARLINE and ARCHIVE.", + "type": "string" + }, + "terminalStorageClassUpdateTime": { + "description": "A date and time in RFC 3339 format representing the time of the most recent update to \"terminalStorageClass\".", + "format": "date-time", + "type": "string" + }, + "toggleTime": { + "description": "A date and time in RFC 3339 format representing the instant at which \"enabled\" was last toggled.", + "format": "date-time", + "type": "string" + } + }, + "type": "object" + }, + "billing": { + "description": "The bucket's billing configuration.", + "properties": { + "requesterPays": { + "description": "When set to true, Requester Pays is enabled for this bucket.", + "type": "boolean" + } + }, + "type": "object" + }, + "cors": { + "description": "The bucket's Cross-Origin Resource Sharing (CORS) configuration.", + "items": { + "properties": { + "maxAgeSeconds": { + "description": "The value, in seconds, to return in the Access-Control-Max-Age header used in preflight responses.", + "format": "int32", + "type": "integer" + }, + "method": { + "description": "The list of HTTP methods on which to include CORS response headers, (GET, OPTIONS, POST, etc) Note: \"*\" is permitted in the list of methods, and means \"any method\".", + "items": { + "type": "string" + }, + "type": "array" + }, + "origin": { + "description": "The list of Origins eligible to receive CORS response headers. Note: \"*\" is permitted in the list of origins, and means \"any Origin\".", + "items": { + "type": "string" + }, + "type": "array" + }, + "responseHeader": { + "description": "The list of HTTP headers other than the simple response headers to give permission for the user-agent to share across domains.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "array" + }, + "customPlacementConfig": { + "description": "The bucket's custom placement configuration for Custom Dual Regions.", + "properties": { + "dataLocations": { + "description": "The list of regional locations in which data is placed.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "defaultEventBasedHold": { + "description": "The default value for event-based hold on newly created objects in this bucket. Event-based hold is a way to retain objects indefinitely until an event occurs, signified by the hold's release. After being released, such objects will be subject to bucket-level retention (if any). One sample use case of this flag is for banks to hold loan documents for at least 3 years after loan is paid in full. Here, bucket-level retention is 3 years and the event is loan being paid in full. In this example, these objects will be held intact for any number of years until the event has occurred (event-based hold on the object is released) and then 3 more years after that. That means retention duration of the objects begins from the moment event-based hold transitioned from true to false. Objects under event-based hold cannot be deleted, overwritten or archived until the hold is removed.", + "type": "boolean" + }, + "defaultObjectAcl": { + "description": "Default access controls to apply to new objects when no ACL is provided.", + "items": { + "$ref": "ObjectAccessControl" + }, + "type": "array" + }, + "encryption": { + "description": "Encryption configuration for a bucket.", + "properties": { + "defaultKmsKeyName": { + "description": "A Cloud KMS key that will be used to encrypt objects inserted into this bucket, if no encryption method is specified.", + "type": "string" + } + }, + "type": "object" + }, + "etag": { + "description": "HTTP 1.1 Entity tag for the bucket.", + "type": "string" + }, + "generation": { + "description": "The generation of this bucket.", + "format": "int64", + "type": "string" + }, + "hardDeleteTime": { + "description": "The hard delete time of the bucket in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, + "hierarchicalNamespace": { + "description": "The bucket's hierarchical namespace configuration.", + "properties": { + "enabled": { + "description": "When set to true, hierarchical namespace is enabled for this bucket.", + "type": "boolean" + } + }, + "type": "object" + }, + "iamConfiguration": { + "description": "The bucket's IAM configuration.", + "properties": { + "bucketPolicyOnly": { + "description": "The bucket's uniform bucket-level access configuration. The feature was formerly known as Bucket Policy Only. For backward compatibility, this field will be populated with identical information as the uniformBucketLevelAccess field. We recommend using the uniformBucketLevelAccess field to enable and disable the feature.", + "properties": { + "enabled": { + "description": "If set, access is controlled only by bucket-level or above IAM policies.", + "type": "boolean" + }, + "lockedTime": { + "description": "The deadline for changing iamConfiguration.bucketPolicyOnly.enabled from true to false in RFC 3339 format. iamConfiguration.bucketPolicyOnly.enabled may be changed from true to false until the locked time, after which the field is immutable.", + "format": "date-time", + "type": "string" + } + }, + "type": "object" + }, + "publicAccessPrevention": { + "description": "The bucket's Public Access Prevention configuration. Currently, 'inherited' and 'enforced' are supported.", + "type": "string" + }, + "uniformBucketLevelAccess": { + "description": "The bucket's uniform bucket-level access configuration.", + "properties": { + "enabled": { + "description": "If set, access is controlled only by bucket-level or above IAM policies.", + "type": "boolean" + }, + "lockedTime": { + "description": "The deadline for changing iamConfiguration.uniformBucketLevelAccess.enabled from true to false in RFC 3339 format. iamConfiguration.uniformBucketLevelAccess.enabled may be changed from true to false until the locked time, after which the field is immutable.", + "format": "date-time", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "id": { + "description": "The ID of the bucket. For buckets, the id and name properties are the same.", + "type": "string" + }, + "ipFilter": { + "description": "The bucket's IP filter configuration. Specifies the network sources that are allowed to access the operations on the bucket, as well as its underlying objects. Only enforced when the mode is set to 'Enabled'.", + "properties": { + "mode": { + "description": "The mode of the IP filter. Valid values are 'Enabled' and 'Disabled'.", + "type": "string" + }, + "publicNetworkSource": { + "description": "The public network source of the bucket's IP filter.", + "properties": { + "allowedIpCidrRanges": { + "description": "The list of public IPv4, IPv6 cidr ranges that are allowed to access the bucket.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "vpcNetworkSources": { + "description": "The list of [VPC network](https://cloud.google.com/vpc/docs/vpc) sources of the bucket's IP filter.", + "items": { + "properties": { + "allowedIpCidrRanges": { + "description": "The list of IPv4, IPv6 cidr ranges subnetworks that are allowed to access the bucket.", + "items": { + "type": "string" + }, + "type": "array" + }, + "network": { + "description": "Name of the network. Format: projects/{PROJECT_ID}/global/networks/{NETWORK_NAME}", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "kind": { + "default": "storage#bucket", + "description": "The kind of item this is. For buckets, this is always storage#bucket.", + "type": "string" + }, + "labels": { + "additionalProperties": { + "description": "An individual label entry.", + "type": "string" + }, + "description": "User-provided labels, in key/value pairs.", + "type": "object" + }, + "lifecycle": { + "description": "The bucket's lifecycle configuration. See [Lifecycle Management](https://cloud.google.com/storage/docs/lifecycle) for more information.", + "properties": { + "rule": { + "description": "A lifecycle management rule, which is made of an action to take and the condition(s) under which the action will be taken.", + "items": { + "properties": { + "action": { + "description": "The action to take.", + "properties": { + "storageClass": { + "description": "Target storage class. Required iff the type of the action is SetStorageClass.", + "type": "string" + }, + "type": { + "description": "Type of the action. Currently, only Delete, SetStorageClass, and AbortIncompleteMultipartUpload are supported.", + "type": "string" + } + }, + "type": "object" + }, + "condition": { + "description": "The condition(s) under which the action will be taken.", + "properties": { + "age": { + "description": "Age of an object (in days). This condition is satisfied when an object reaches the specified age.", + "format": "int32", + "type": "integer" + }, + "createdBefore": { + "description": "A date in RFC 3339 format with only the date part (for instance, \"2013-01-15\"). This condition is satisfied when an object is created before midnight of the specified date in UTC.", + "format": "date", + "type": "string" + }, + "customTimeBefore": { + "description": "A date in RFC 3339 format with only the date part (for instance, \"2013-01-15\"). This condition is satisfied when the custom time on an object is before this date in UTC.", + "format": "date", + "type": "string" + }, + "daysSinceCustomTime": { + "description": "Number of days elapsed since the user-specified timestamp set on an object. The condition is satisfied if the days elapsed is at least this number. If no custom timestamp is specified on an object, the condition does not apply.", + "format": "int32", + "type": "integer" + }, + "daysSinceNoncurrentTime": { + "description": "Number of days elapsed since the noncurrent timestamp of an object. The condition is satisfied if the days elapsed is at least this number. This condition is relevant only for versioned objects. The value of the field must be a nonnegative integer. If it's zero, the object version will become eligible for Lifecycle action as soon as it becomes noncurrent.", + "format": "int32", + "type": "integer" + }, + "isLive": { + "description": "Relevant only for versioned objects. If the value is true, this condition matches live objects; if the value is false, it matches archived objects.", + "type": "boolean" + }, + "matchesPattern": { + "description": "A regular expression that satisfies the RE2 syntax. This condition is satisfied when the name of the object matches the RE2 pattern. Note: This feature is currently in the \"Early Access\" launch stage and is only available to a whitelisted set of users; that means that this feature may be changed in backward-incompatible ways and that it is not guaranteed to be released.", + "type": "string" + }, + "matchesPrefix": { + "description": "List of object name prefixes. This condition will be satisfied when at least one of the prefixes exactly matches the beginning of the object name.", + "items": { + "type": "string" + }, + "type": "array" + }, + "matchesStorageClass": { + "description": "Objects having any of the storage classes specified by this condition will be matched. Values include MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE, STANDARD, and DURABLE_REDUCED_AVAILABILITY.", + "items": { + "type": "string" + }, + "type": "array" + }, + "matchesSuffix": { + "description": "List of object name suffixes. This condition will be satisfied when at least one of the suffixes exactly matches the end of the object name.", + "items": { + "type": "string" + }, + "type": "array" + }, + "noncurrentTimeBefore": { + "description": "A date in RFC 3339 format with only the date part (for instance, \"2013-01-15\"). This condition is satisfied when the noncurrent time on an object is before this date in UTC. This condition is relevant only for versioned objects.", + "format": "date", + "type": "string" + }, + "numNewerVersions": { + "description": "Relevant only for versioned objects. If the value is N, this condition is satisfied when there are at least N versions (including the live version) newer than this version of the object.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "location": { + "description": "The location of the bucket. Object data for objects in the bucket resides in physical storage within this region. Defaults to US. See the [Developer's Guide](https://cloud.google.com/storage/docs/locations) for the authoritative list.", + "type": "string" + }, + "locationType": { + "description": "The type of the bucket location.", + "type": "string" + }, + "logging": { + "description": "The bucket's logging configuration, which defines the destination bucket and optional name prefix for the current bucket's logs.", + "properties": { + "logBucket": { + "description": "The destination bucket where the current bucket's logs should be placed.", + "type": "string" + }, + "logObjectPrefix": { + "description": "A prefix for log object names.", + "type": "string" + } + }, + "type": "object" + }, + "metageneration": { + "description": "The metadata generation of this bucket.", + "format": "int64", + "type": "string" + }, + "name": { + "annotations": { + "required": [ + "storage.buckets.insert" + ] + }, + "description": "The name of the bucket.", + "type": "string" + }, + "objectRetention": { + "description": "The bucket's object retention config.", + "properties": { + "mode": { + "description": "The bucket's object retention mode. Can be Enabled.", + "type": "string" + } + }, + "type": "object" + }, + "owner": { + "description": "The owner of the bucket. This is always the project team's owner group.", + "properties": { + "entity": { + "description": "The entity, in the form project-owner-projectId.", + "type": "string" + }, + "entityId": { + "description": "The ID for the entity.", + "type": "string" + } + }, + "type": "object" + }, + "projectNumber": { + "description": "The project number of the project the bucket belongs to.", + "format": "uint64", + "type": "string" + }, + "retentionPolicy": { + "description": "The bucket's retention policy. The retention policy enforces a minimum retention time for all objects contained in the bucket, based on their creation time. Any attempt to overwrite or delete objects younger than the retention period will result in a PERMISSION_DENIED error. An unlocked retention policy can be modified or removed from the bucket via a storage.buckets.update operation. A locked retention policy cannot be removed or shortened in duration for the lifetime of the bucket. Attempting to remove or decrease period of a locked retention policy will result in a PERMISSION_DENIED error.", + "properties": { + "effectiveTime": { + "description": "Server-determined value that indicates the time from which policy was enforced and effective. This value is in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, + "isLocked": { + "description": "Once locked, an object retention policy cannot be modified.", + "type": "boolean" + }, + "retentionPeriod": { + "description": "The duration in seconds that objects need to be retained. Retention duration must be greater than zero and less than 100 years. Note that enforcement of retention periods less than a day is not guaranteed. Such periods should only be used for testing purposes.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "rpo": { + "description": "The Recovery Point Objective (RPO) of this bucket. Set to ASYNC_TURBO to turn on Turbo Replication on a bucket.", + "type": "string" + }, + "satisfiesPZI": { + "description": "Reserved for future use.", + "type": "boolean" + }, + "satisfiesPZS": { + "description": "Reserved for future use.", + "type": "boolean" + }, + "selfLink": { + "description": "The URI of this bucket.", + "type": "string" + }, + "softDeletePolicy": { + "description": "The bucket's soft delete policy, which defines the period of time that soft-deleted objects will be retained, and cannot be permanently deleted.", + "properties": { + "effectiveTime": { + "description": "Server-determined value that indicates the time from which the policy, or one with a greater retention, was effective. This value is in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, + "retentionDurationSeconds": { + "description": "The duration in seconds that soft-deleted objects in the bucket will be retained and cannot be permanently deleted.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "softDeleteTime": { + "description": "The soft delete time of the bucket in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, + "storageClass": { + "description": "The bucket's default storage class, used whenever no storageClass is specified for a newly-created object. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, ARCHIVE, and DURABLE_REDUCED_AVAILABILITY. If this value is not specified when the bucket is created, it will default to STANDARD. For more information, see [Storage Classes](https://cloud.google.com/storage/docs/storage-classes).", + "type": "string" + }, + "timeCreated": { + "description": "The creation time of the bucket in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, + "updated": { + "description": "The modification time of the bucket in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, + "versioning": { + "description": "The bucket's versioning configuration.", + "properties": { + "enabled": { + "description": "While set to true, versioning is fully enabled for this bucket.", + "type": "boolean" + } + }, + "type": "object" + }, + "website": { + "description": "The bucket's website configuration, controlling how the service behaves when accessing bucket contents as a web site. See the [Static Website Examples](https://cloud.google.com/storage/docs/static-website) for more information.", + "properties": { + "mainPageSuffix": { + "description": "If the requested object path is missing, the service will ensure the path has a trailing '/', append this suffix, and attempt to retrieve the resulting object. This allows the creation of index.html objects to represent directory pages.", + "type": "string" + }, + "notFoundPage": { + "description": "If the requested object path is missing, and any mainPageSuffix object is missing, if applicable, the service will return the named object from this bucket as the content for a 404 Not Found result.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "BucketAccessControl": { + "description": "An access-control entry.", + "id": "BucketAccessControl", + "properties": { + "bucket": { + "description": "The name of the bucket.", + "type": "string" + }, + "domain": { + "description": "The domain associated with the entity, if any.", + "type": "string" + }, + "email": { + "description": "The email address associated with the entity, if any.", + "type": "string" + }, + "entity": { + "annotations": { + "required": [ + "storage.bucketAccessControls.insert" + ] + }, + "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.", + "type": "string" + }, + "entityId": { + "description": "The ID for the entity, if any.", + "type": "string" + }, + "etag": { + "description": "HTTP 1.1 Entity tag for the access-control entry.", + "type": "string" + }, + "id": { + "description": "The ID of the access-control entry.", + "type": "string" + }, + "kind": { + "default": "storage#bucketAccessControl", + "description": "The kind of item this is. For bucket access control entries, this is always storage#bucketAccessControl.", + "type": "string" + }, + "projectTeam": { + "description": "The project team associated with the entity, if any.", + "properties": { + "projectNumber": { + "description": "The project number.", + "type": "string" + }, + "team": { + "description": "The team.", + "type": "string" + } + }, + "type": "object" + }, + "role": { + "annotations": { + "required": [ + "storage.bucketAccessControls.insert" + ] + }, + "description": "The access permission for the entity.", + "type": "string" + }, + "selfLink": { + "description": "The link to this access-control entry.", + "type": "string" + } + }, + "type": "object" + }, + "BucketAccessControls": { + "description": "An access-control list.", + "id": "BucketAccessControls", + "properties": { + "items": { + "description": "The list of items.", + "items": { + "$ref": "BucketAccessControl" + }, + "type": "array" + }, + "kind": { + "default": "storage#bucketAccessControls", + "description": "The kind of item this is. For lists of bucket access control entries, this is always storage#bucketAccessControls.", + "type": "string" + } + }, + "type": "object" + }, + "BucketStorageLayout": { + "description": "The storage layout configuration of a bucket.", + "id": "BucketStorageLayout", + "properties": { + "bucket": { + "description": "The name of the bucket.", + "type": "string" + }, + "customPlacementConfig": { + "description": "The bucket's custom placement configuration for Custom Dual Regions.", + "properties": { + "dataLocations": { + "description": "The list of regional locations in which data is placed.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "hierarchicalNamespace": { + "description": "The bucket's hierarchical namespace configuration.", + "properties": { + "enabled": { + "description": "When set to true, hierarchical namespace is enabled for this bucket.", + "type": "boolean" + } + }, + "type": "object" + }, + "kind": { + "default": "storage#storageLayout", + "description": "The kind of item this is. For storage layout, this is always storage#storageLayout.", + "type": "string" + }, + "location": { + "description": "The location of the bucket.", + "type": "string" + }, + "locationType": { + "description": "The type of the bucket location.", + "type": "string" + } + }, + "type": "object" + }, + "Buckets": { + "description": "A list of buckets.", + "id": "Buckets", + "properties": { + "items": { + "description": "The list of items.", + "items": { + "$ref": "Bucket" + }, + "type": "array" + }, + "kind": { + "default": "storage#buckets", + "description": "The kind of item this is. For lists of buckets, this is always storage#buckets.", + "type": "string" + }, + "nextPageToken": { + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", + "type": "string" + } + }, + "type": "object" + }, + "BulkRestoreObjectsRequest": { + "description": "A bulk restore objects request.", + "id": "BulkRestoreObjectsRequest", + "properties": { + "allowOverwrite": { + "description": "If false (default), the restore will not overwrite live objects with the same name at the destination. This means some deleted objects may be skipped. If true, live objects will be overwritten resulting in a noncurrent object (if versioning is enabled). If versioning is not enabled, overwriting the object will result in a soft-deleted object. In either case, if a noncurrent object already exists with the same name, a live version can be written without issue.", + "type": "boolean" + }, + "copySourceAcl": { + "description": "If true, copies the source object's ACL; otherwise, uses the bucket's default object ACL. The default is false.", + "type": "boolean" + }, + "matchGlobs": { + "description": "Restores only the objects matching any of the specified glob(s). If this parameter is not specified, all objects will be restored within the specified time range.", + "items": { + "type": "string" + }, + "type": "array" + }, + "softDeletedAfterTime": { + "description": "Restores only the objects that were soft-deleted after this time.", + "format": "date-time", + "type": "string" + }, + "softDeletedBeforeTime": { + "description": "Restores only the objects that were soft-deleted before this time.", + "format": "date-time", + "type": "string" + } + }, + "type": "object" + }, + "Channel": { + "description": "An notification channel used to watch for resource changes.", + "id": "Channel", + "properties": { + "address": { + "description": "The address where notifications are delivered for this channel.", + "type": "string" + }, + "expiration": { + "description": "Date and time of notification channel expiration, expressed as a Unix timestamp, in milliseconds. Optional.", + "format": "int64", + "type": "string" + }, + "id": { + "description": "A UUID or similar unique string that identifies this channel.", + "type": "string" + }, + "kind": { + "default": "api#channel", + "description": "Identifies this as a notification channel used to watch for changes to a resource, which is \"api#channel\".", + "type": "string" + }, + "params": { + "additionalProperties": { + "description": "Declares a new parameter by name.", + "type": "string" + }, + "description": "Additional parameters controlling delivery channel behavior. Optional.", + "type": "object" + }, + "payload": { + "description": "A Boolean value to indicate whether payload is wanted. Optional.", + "type": "boolean" + }, + "resourceId": { + "description": "An opaque ID that identifies the resource being watched on this channel. Stable across different API versions.", + "type": "string" + }, + "resourceUri": { + "description": "A version-specific identifier for the watched resource.", + "type": "string" + }, + "token": { + "description": "An arbitrary string delivered to the target address with each notification delivered over this channel. Optional.", + "type": "string" + }, + "type": { + "description": "The type of delivery mechanism used for this channel.", + "type": "string" + } + }, + "type": "object" + }, + "ComposeRequest": { + "description": "A Compose request.", + "id": "ComposeRequest", + "properties": { + "destination": { + "$ref": "Object", + "description": "Properties of the resulting object." + }, + "kind": { + "default": "storage#composeRequest", + "description": "The kind of item this is.", + "type": "string" + }, + "sourceObjects": { + "annotations": { + "required": [ + "storage.objects.compose" + ] + }, + "description": "The list of source objects that will be concatenated into a single object.", + "items": { + "properties": { + "generation": { + "description": "The generation of this object to use as the source.", + "format": "int64", + "type": "string" + }, + "name": { + "annotations": { + "required": [ + "storage.objects.compose" + ] + }, + "description": "The source object's name. All source objects must reside in the same bucket.", + "type": "string" + }, + "objectPreconditions": { + "description": "Conditions that must be met for this operation to execute.", + "properties": { + "ifGenerationMatch": { + "description": "Only perform the composition if the generation of the source object that would be used matches this value. If this value and a generation are both specified, they must be the same value or the call will fail.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "Expr": { + "description": "Represents an expression text. Example: title: \"User account presence\" description: \"Determines whether the request has a user account\" expression: \"size(request.user) \u003e 0\"", + "id": "Expr", + "properties": { + "description": { + "description": "An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", + "type": "string" + }, + "expression": { + "description": "Textual representation of an expression in Common Expression Language syntax. The application context of the containing message determines which well-known feature set of CEL is supported.", + "type": "string" + }, + "location": { + "description": "An optional string indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", + "type": "string" + }, + "title": { + "description": "An optional title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", + "type": "string" + } + }, + "type": "object" + }, + "Folder": { + "description": "A folder. Only available in buckets with hierarchical namespace enabled.", + "id": "Folder", + "properties": { + "bucket": { + "description": "The name of the bucket containing this folder.", + "type": "string" + }, + "createTime": { + "description": "The creation time of the folder in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, + "id": { + "description": "The ID of the folder, including the bucket name, folder name.", + "type": "string" + }, + "kind": { + "default": "storage#folder", + "description": "The kind of item this is. For folders, this is always storage#folder.", + "type": "string" + }, + "metageneration": { + "description": "The version of the metadata for this folder. Used for preconditions and for detecting changes in metadata.", + "format": "int64", + "type": "string" + }, + "name": { + "description": "The name of the folder. Required if not specified by URL parameter.", + "type": "string" + }, + "pendingRenameInfo": { + "description": "Only present if the folder is part of an ongoing rename folder operation. Contains information which can be used to query the operation status.", + "properties": { + "operationId": { + "description": "The ID of the rename folder operation.", + "type": "string" + } + }, + "type": "object" + }, + "selfLink": { + "description": "The link to this folder.", + "type": "string" + }, + "updateTime": { + "description": "The modification time of the folder metadata in RFC 3339 format.", + "format": "date-time", + "type": "string" + } + }, + "type": "object" + }, + "Folders": { + "description": "A list of folders.", + "id": "Folders", + "properties": { + "items": { + "description": "The list of items.", + "items": { + "$ref": "Folder" + }, + "type": "array" + }, + "kind": { + "default": "storage#folders", + "description": "The kind of item this is. For lists of folders, this is always storage#folders.", + "type": "string" + }, + "nextPageToken": { + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleLongrunningListOperationsResponse": { + "description": "The response message for storage.buckets.operations.list.", + "id": "GoogleLongrunningListOperationsResponse", + "properties": { + "kind": { + "default": "storage#operations", + "description": "The kind of item this is. For lists of operations, this is always storage#operations.", + "type": "string" + }, + "nextPageToken": { + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", + "type": "string" + }, + "operations": { + "description": "A list of operations that matches the specified filter in the request.", + "items": { + "$ref": "GoogleLongrunningOperation" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleLongrunningOperation": { + "description": "This resource represents a long-running operation that is the result of a network API call.", + "id": "GoogleLongrunningOperation", + "properties": { + "done": { + "description": "If the value is \"false\", it means the operation is still in progress. If \"true\", the operation is completed, and either \"error\" or \"response\" is available.", + "type": "boolean" + }, + "error": { + "$ref": "GoogleRpcStatus", + "description": "The error result of the operation in case of failure or cancellation." + }, + "kind": { + "default": "storage#operation", + "description": "The kind of item this is. For operations, this is always storage#operation.", + "type": "string" + }, + "metadata": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", + "type": "object" + }, + "name": { + "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the \"name\" should be a resource name ending with \"operations/{operationId}\".", + "type": "string" + }, + "response": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as \"Delete\", the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type \"XxxResponse\", where \"Xxx\" is the original method name. For example, if the original method name is \"TakeSnapshot()\", the inferred response type is \"TakeSnapshotResponse\".", + "type": "object" + }, + "selfLink": { + "description": "The link to this long running operation.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleRpcStatus": { + "description": "The \"Status\" type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each \"Status\" message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", + "id": "GoogleRpcStatus", + "properties": { + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" + }, + "details": { + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", + "items": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "A developer-facing error message, which should be in English.", + "type": "string" + } + }, + "type": "object" + }, + "HmacKey": { + "description": "JSON template to produce a JSON-style HMAC Key resource for Create responses.", + "id": "HmacKey", + "properties": { + "kind": { + "default": "storage#hmacKey", + "description": "The kind of item this is. For HMAC keys, this is always storage#hmacKey.", + "type": "string" + }, + "metadata": { + "$ref": "HmacKeyMetadata", + "description": "Key metadata." + }, + "secret": { + "description": "HMAC secret key material.", + "type": "string" + } + }, + "type": "object" + }, + "HmacKeyMetadata": { + "description": "JSON template to produce a JSON-style HMAC Key metadata resource.", + "id": "HmacKeyMetadata", + "properties": { + "accessId": { + "description": "The ID of the HMAC Key.", + "type": "string" + }, + "etag": { + "description": "HTTP 1.1 Entity tag for the HMAC key.", + "type": "string" + }, + "id": { + "description": "The ID of the HMAC key, including the Project ID and the Access ID.", + "type": "string" + }, + "kind": { + "default": "storage#hmacKeyMetadata", + "description": "The kind of item this is. For HMAC Key metadata, this is always storage#hmacKeyMetadata.", + "type": "string" + }, + "projectId": { + "description": "Project ID owning the service account to which the key authenticates.", + "type": "string" + }, + "selfLink": { + "description": "The link to this resource.", + "type": "string" + }, + "serviceAccountEmail": { + "description": "The email address of the key's associated service account.", + "type": "string" + }, + "state": { + "description": "The state of the key. Can be one of ACTIVE, INACTIVE, or DELETED.", + "type": "string" + }, + "timeCreated": { + "description": "The creation time of the HMAC key in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, + "updated": { + "description": "The last modification time of the HMAC key metadata in RFC 3339 format.", + "format": "date-time", + "type": "string" + } + }, + "type": "object" + }, + "HmacKeysMetadata": { + "description": "A list of hmacKeys.", + "id": "HmacKeysMetadata", + "properties": { + "items": { + "description": "The list of items.", + "items": { + "$ref": "HmacKeyMetadata" + }, + "type": "array" + }, + "kind": { + "default": "storage#hmacKeysMetadata", + "description": "The kind of item this is. For lists of hmacKeys, this is always storage#hmacKeysMetadata.", + "type": "string" + }, + "nextPageToken": { + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", + "type": "string" + } + }, + "type": "object" + }, + "ManagedFolder": { + "description": "A managed folder.", + "id": "ManagedFolder", + "properties": { + "bucket": { + "description": "The name of the bucket containing this managed folder.", + "type": "string" + }, + "createTime": { + "description": "The creation time of the managed folder in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, + "id": { + "description": "The ID of the managed folder, including the bucket name and managed folder name.", + "type": "string" + }, + "kind": { + "default": "storage#managedFolder", + "description": "The kind of item this is. For managed folders, this is always storage#managedFolder.", + "type": "string" + }, + "metageneration": { + "description": "The version of the metadata for this managed folder. Used for preconditions and for detecting changes in metadata.", + "format": "int64", + "type": "string" + }, + "name": { + "description": "The name of the managed folder. Required if not specified by URL parameter.", + "type": "string" + }, + "selfLink": { + "description": "The link to this managed folder.", + "type": "string" + }, + "updateTime": { + "description": "The last update time of the managed folder metadata in RFC 3339 format.", + "format": "date-time", + "type": "string" + } + }, + "type": "object" + }, + "ManagedFolders": { + "description": "A list of managed folders.", + "id": "ManagedFolders", + "properties": { + "items": { + "description": "The list of items.", + "items": { + "$ref": "ManagedFolder" + }, + "type": "array" + }, + "kind": { + "default": "storage#managedFolders", + "description": "The kind of item this is. For lists of managed folders, this is always storage#managedFolders.", + "type": "string" + }, + "nextPageToken": { + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", + "type": "string" + } + }, + "type": "object" + }, + "Notification": { + "description": "A subscription to receive Google PubSub notifications.", + "id": "Notification", + "properties": { + "custom_attributes": { + "additionalProperties": { + "type": "string" + }, + "description": "An optional list of additional attributes to attach to each Cloud PubSub message published for this notification subscription.", + "type": "object" + }, + "etag": { + "description": "HTTP 1.1 Entity tag for this subscription notification.", + "type": "string" + }, + "event_types": { + "description": "If present, only send notifications about listed event types. If empty, sent notifications for all event types.", + "items": { + "type": "string" + }, + "type": "array" + }, + "id": { + "description": "The ID of the notification.", + "type": "string" + }, + "kind": { + "default": "storage#notification", + "description": "The kind of item this is. For notifications, this is always storage#notification.", + "type": "string" + }, + "object_name_prefix": { + "description": "If present, only apply this notification configuration to object names that begin with this prefix.", + "type": "string" + }, + "payload_format": { + "annotations": { + "required": [ + "storage.notifications.insert" + ] + }, + "default": "JSON_API_V1", + "description": "The desired content of the Payload.", + "type": "string" + }, + "selfLink": { + "description": "The canonical URL of this notification.", + "type": "string" + }, + "topic": { + "annotations": { + "required": [ + "storage.notifications.insert" + ] + }, + "description": "The Cloud PubSub topic to which this subscription publishes. Formatted as: '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}'", + "type": "string" + } + }, + "type": "object" + }, + "Notifications": { + "description": "A list of notification subscriptions.", + "id": "Notifications", + "properties": { + "items": { + "description": "The list of items.", + "items": { + "$ref": "Notification" + }, + "type": "array" + }, + "kind": { + "default": "storage#notifications", + "description": "The kind of item this is. For lists of notifications, this is always storage#notifications.", + "type": "string" + } + }, + "type": "object" + }, + "Object": { + "description": "An object.", + "id": "Object", + "properties": { + "acl": { + "annotations": { + "required": [ + "storage.objects.update" + ] + }, + "description": "Access controls on the object.", + "items": { + "$ref": "ObjectAccessControl" + }, + "type": "array" + }, + "bucket": { + "description": "The name of the bucket containing this object.", + "type": "string" + }, + "cacheControl": { + "description": "Cache-Control directive for the object data. If omitted, and the object is accessible to all anonymous users, the default will be public, max-age=3600.", + "type": "string" + }, + "componentCount": { + "description": "Number of underlying components that make up this object. Components are accumulated by compose operations.", + "format": "int32", + "type": "integer" + }, + "contentDisposition": { + "description": "Content-Disposition of the object data.", + "type": "string" + }, + "contentEncoding": { + "description": "Content-Encoding of the object data.", + "type": "string" + }, + "contentLanguage": { + "description": "Content-Language of the object data.", + "type": "string" + }, + "contentType": { + "description": "Content-Type of the object data. If an object is stored without a Content-Type, it is served as application/octet-stream.", + "type": "string" + }, + "crc32c": { + "description": "CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian byte order. For more information about using the CRC32c checksum, see [Data Validation and Change Detection](https://cloud.google.com/storage/docs/data-validation).", + "type": "string" + }, + "customTime": { + "description": "A timestamp in RFC 3339 format specified by the user for an object.", + "format": "date-time", + "type": "string" + }, + "customerEncryption": { + "description": "Metadata of customer-supplied encryption key, if the object is encrypted by such a key.", + "properties": { + "encryptionAlgorithm": { + "description": "The encryption algorithm.", + "type": "string" + }, + "keySha256": { + "description": "SHA256 hash value of the encryption key.", + "type": "string" + } + }, + "type": "object" + }, + "etag": { + "description": "HTTP 1.1 Entity tag for the object.", + "type": "string" + }, + "eventBasedHold": { + "description": "Whether an object is under event-based hold. Event-based hold is a way to retain objects until an event occurs, which is signified by the hold's release (i.e. this value is set to false). After being released (set to false), such objects will be subject to bucket-level retention (if any). One sample use case of this flag is for banks to hold loan documents for at least 3 years after loan is paid in full. Here, bucket-level retention is 3 years and the event is the loan being paid in full. In this example, these objects will be held intact for any number of years until the event has occurred (event-based hold on the object is released) and then 3 more years after that. That means retention duration of the objects begins from the moment event-based hold transitioned from true to false.", + "type": "boolean" + }, + "generation": { + "description": "The content generation of this object. Used for object versioning.", + "format": "int64", + "type": "string" + }, + "hardDeleteTime": { + "description": "This is the time (in the future) when the soft-deleted object will no longer be restorable. It is equal to the soft delete time plus the current soft delete retention duration of the bucket.", + "format": "date-time", + "type": "string" + }, + "id": { + "description": "The ID of the object, including the bucket name, object name, and generation number.", + "type": "string" + }, + "kind": { + "default": "storage#object", + "description": "The kind of item this is. For objects, this is always storage#object.", + "type": "string" + }, + "kmsKeyName": { + "description": "Not currently supported. Specifying the parameter causes the request to fail with status code 400 - Bad Request.", + "type": "string" + }, + "md5Hash": { + "description": "MD5 hash of the data; encoded using base64. For more information about using the MD5 hash, see [Data Validation and Change Detection](https://cloud.google.com/storage/docs/data-validation).", + "type": "string" + }, + "mediaLink": { + "description": "Media download link.", + "type": "string" + }, + "metadata": { + "additionalProperties": { + "description": "An individual metadata entry.", + "type": "string" + }, + "description": "User-provided metadata, in key/value pairs.", + "type": "object" + }, + "metageneration": { + "description": "The version of the metadata for this object at this generation. Used for preconditions and for detecting changes in metadata. A metageneration number is only meaningful in the context of a particular generation of a particular object.", + "format": "int64", + "type": "string" + }, + "name": { + "description": "The name of the object. Required if not specified by URL parameter.", + "type": "string" + }, + "owner": { + "description": "The owner of the object. This will always be the uploader of the object.", + "properties": { + "entity": { + "description": "The entity, in the form user-userId.", + "type": "string" + }, + "entityId": { + "description": "The ID for the entity.", + "type": "string" + } + }, + "type": "object" + }, + "restoreToken": { + "description": "Restore token used to differentiate deleted objects with the same name and generation. This field is only returned for deleted objects in hierarchical namespace buckets.", + "type": "string" + }, + "retention": { + "description": "A collection of object level retention parameters.", + "properties": { + "mode": { + "description": "The bucket's object retention mode, can only be Unlocked or Locked.", + "type": "string" + }, + "retainUntilTime": { + "description": "A time in RFC 3339 format until which object retention protects this object.", + "format": "date-time", + "type": "string" + } + }, + "type": "object" + }, + "retentionExpirationTime": { + "description": "A server-determined value that specifies the earliest time that the object's retention period expires. This value is in RFC 3339 format. Note 1: This field is not provided for objects with an active event-based hold, since retention expiration is unknown until the hold is removed. Note 2: This value can be provided even when temporary hold is set (so that the user can reason about policy without having to first unset the temporary hold).", + "format": "date-time", + "type": "string" + }, + "selfLink": { + "description": "The link to this object.", + "type": "string" + }, + "size": { + "description": "Content-Length of the data in bytes.", + "format": "uint64", + "type": "string" + }, + "softDeleteTime": { + "description": "The time at which the object became soft-deleted in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, + "storageClass": { + "description": "Storage class of the object.", + "type": "string" + }, + "temporaryHold": { + "description": "Whether an object is under temporary hold. While this flag is set to true, the object is protected against deletion and overwrites. A common use case of this flag is regulatory investigations where objects need to be retained while the investigation is ongoing. Note that unlike event-based hold, temporary hold does not impact retention expiration time of an object.", + "type": "boolean" + }, + "timeCreated": { + "description": "The creation time of the object in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, + "timeDeleted": { + "description": "The time at which the object became noncurrent in RFC 3339 format. Will be returned if and only if this version of the object has been deleted.", + "format": "date-time", + "type": "string" + }, + "timeStorageClassUpdated": { + "description": "The time at which the object's storage class was last changed. When the object is initially created, it will be set to timeCreated.", + "format": "date-time", + "type": "string" + }, + "updated": { + "description": "The modification time of the object metadata in RFC 3339 format. Set initially to object creation time and then updated whenever any metadata of the object changes. This includes changes made by a requester, such as modifying custom metadata, as well as changes made by Cloud Storage on behalf of a requester, such as changing the storage class based on an Object Lifecycle Configuration.", + "format": "date-time", + "type": "string" + } + }, + "type": "object" + }, + "ObjectAccessControl": { + "description": "An access-control entry.", + "id": "ObjectAccessControl", + "properties": { + "bucket": { + "description": "The name of the bucket.", + "type": "string" + }, + "domain": { + "description": "The domain associated with the entity, if any.", + "type": "string" + }, + "email": { + "description": "The email address associated with the entity, if any.", + "type": "string" + }, + "entity": { + "annotations": { + "required": [ + "storage.defaultObjectAccessControls.insert", + "storage.objectAccessControls.insert" + ] + }, + "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.", + "type": "string" + }, + "entityId": { + "description": "The ID for the entity, if any.", + "type": "string" + }, + "etag": { + "description": "HTTP 1.1 Entity tag for the access-control entry.", + "type": "string" + }, + "generation": { + "description": "The content generation of the object, if applied to an object.", + "format": "int64", + "type": "string" + }, + "id": { + "description": "The ID of the access-control entry.", + "type": "string" + }, + "kind": { + "default": "storage#objectAccessControl", + "description": "The kind of item this is. For object access control entries, this is always storage#objectAccessControl.", + "type": "string" + }, + "object": { + "description": "The name of the object, if applied to an object.", + "type": "string" + }, + "projectTeam": { + "description": "The project team associated with the entity, if any.", + "properties": { + "projectNumber": { + "description": "The project number.", + "type": "string" + }, + "team": { + "description": "The team.", + "type": "string" + } + }, + "type": "object" + }, + "role": { + "annotations": { + "required": [ + "storage.defaultObjectAccessControls.insert", + "storage.objectAccessControls.insert" + ] + }, + "description": "The access permission for the entity.", + "type": "string" + }, + "selfLink": { + "description": "The link to this access-control entry.", + "type": "string" + } + }, + "type": "object" + }, + "ObjectAccessControls": { + "description": "An access-control list.", + "id": "ObjectAccessControls", + "properties": { + "items": { + "description": "The list of items.", + "items": { + "$ref": "ObjectAccessControl" + }, + "type": "array" + }, + "kind": { + "default": "storage#objectAccessControls", + "description": "The kind of item this is. For lists of object access control entries, this is always storage#objectAccessControls.", + "type": "string" + } + }, + "type": "object" + }, + "Objects": { + "description": "A list of objects.", + "id": "Objects", + "properties": { + "items": { + "description": "The list of items.", + "items": { + "$ref": "Object" + }, + "type": "array" + }, + "kind": { + "default": "storage#objects", + "description": "The kind of item this is. For lists of objects, this is always storage#objects.", + "type": "string" + }, + "nextPageToken": { + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", + "type": "string" + }, + "prefixes": { + "description": "The list of prefixes of objects matching-but-not-listed up to and including the requested delimiter.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "Policy": { + "description": "A bucket/object/managedFolder IAM policy.", + "id": "Policy", + "properties": { + "bindings": { + "annotations": { + "required": [ + "storage.buckets.setIamPolicy", + "storage.objects.setIamPolicy", + "storage.managedFolders.setIamPolicy" + ] + }, + "description": "An association between a role, which comes with a set of permissions, and members who may assume that role.", + "items": { + "properties": { + "condition": { + "$ref": "Expr", + "description": "The condition that is associated with this binding. NOTE: an unsatisfied condition will not allow user access via current binding. Different bindings, including their conditions, are examined independently." + }, + "members": { + "annotations": { + "required": [ + "storage.buckets.setIamPolicy", + "storage.objects.setIamPolicy", + "storage.managedFolders.setIamPolicy" + ] + }, + "description": "A collection of identifiers for members who may assume the provided role. Recognized identifiers are as follows: \n- allUsers — A special identifier that represents anyone on the internet; with or without a Google account. \n- allAuthenticatedUsers — A special identifier that represents anyone who is authenticated with a Google account or a service account. \n- user:emailid — An email address that represents a specific account. For example, user:alice@gmail.com or user:joe@example.com. \n- serviceAccount:emailid — An email address that represents a service account. For example, serviceAccount:my-other-app@appspot.gserviceaccount.com . \n- group:emailid — An email address that represents a Google group. For example, group:admins@example.com. \n- domain:domain — A Google Apps domain name that represents all the users of that domain. For example, domain:google.com or domain:example.com. \n- projectOwner:projectid — Owners of the given project. For example, projectOwner:my-example-project \n- projectEditor:projectid — Editors of the given project. For example, projectEditor:my-example-project \n- projectViewer:projectid — Viewers of the given project. For example, projectViewer:my-example-project", + "items": { + "type": "string" + }, + "type": "array" + }, + "role": { + "annotations": { + "required": [ + "storage.buckets.setIamPolicy", + "storage.objects.setIamPolicy", + "storage.managedFolders.setIamPolicy" + ] + }, + "description": "The role to which members belong. Two types of roles are supported: new IAM roles, which grant permissions that do not map directly to those provided by ACLs, and legacy IAM roles, which do map directly to ACL permissions. All roles are of the format roles/storage.specificRole.\nThe new IAM roles are: \n- roles/storage.admin — Full control of Google Cloud Storage resources. \n- roles/storage.objectViewer — Read-Only access to Google Cloud Storage objects. \n- roles/storage.objectCreator — Access to create objects in Google Cloud Storage. \n- roles/storage.objectAdmin — Full control of Google Cloud Storage objects. The legacy IAM roles are: \n- roles/storage.legacyObjectReader — Read-only access to objects without listing. Equivalent to an ACL entry on an object with the READER role. \n- roles/storage.legacyObjectOwner — Read/write access to existing objects without listing. Equivalent to an ACL entry on an object with the OWNER role. \n- roles/storage.legacyBucketReader — Read access to buckets with object listing. Equivalent to an ACL entry on a bucket with the READER role. \n- roles/storage.legacyBucketWriter — Read access to buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the WRITER role. \n- roles/storage.legacyBucketOwner — Read and write access to existing buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the OWNER role.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "etag": { + "description": "HTTP 1.1 Entity tag for the policy.", + "format": "byte", + "type": "string" + }, + "kind": { + "default": "storage#policy", + "description": "The kind of item this is. For policies, this is always storage#policy. This field is ignored on input.", + "type": "string" + }, + "resourceId": { + "description": "The ID of the resource to which this policy belongs. Will be of the form projects/_/buckets/bucket for buckets, projects/_/buckets/bucket/objects/object for objects, and projects/_/buckets/bucket/managedFolders/managedFolder. A specific generation may be specified by appending #generationNumber to the end of the object name, e.g. projects/_/buckets/my-bucket/objects/data.txt#17. The current generation can be denoted with #0. This field is ignored on input.", + "type": "string" + }, + "version": { + "description": "The IAM policy format version.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "RewriteResponse": { + "description": "A rewrite response.", + "id": "RewriteResponse", + "properties": { + "done": { + "description": "true if the copy is finished; otherwise, false if the copy is in progress. This property is always present in the response.", + "type": "boolean" + }, + "kind": { + "default": "storage#rewriteResponse", + "description": "The kind of item this is.", + "type": "string" + }, + "objectSize": { + "description": "The total size of the object being copied in bytes. This property is always present in the response.", + "format": "int64", + "type": "string" + }, + "resource": { + "$ref": "Object", + "description": "A resource containing the metadata for the copied-to object. This property is present in the response only when copying completes." + }, + "rewriteToken": { + "description": "A token to use in subsequent requests to continue copying data. This token is present in the response only when there is more data to copy.", + "type": "string" + }, + "totalBytesRewritten": { + "description": "The total bytes written so far, which can be used to provide a waiting user with a progress indicator. This property is always present in the response.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "ServiceAccount": { + "description": "A subscription to receive Google PubSub notifications.", + "id": "ServiceAccount", + "properties": { + "email_address": { + "description": "The ID of the notification.", + "type": "string" + }, + "kind": { + "default": "storage#serviceAccount", + "description": "The kind of item this is. For notifications, this is always storage#notification.", + "type": "string" + } + }, + "type": "object" + }, + "TestIamPermissionsResponse": { + "description": "A storage.(buckets|objects|managedFolders).testIamPermissions response.", + "id": "TestIamPermissionsResponse", + "properties": { + "kind": { + "default": "storage#testIamPermissionsResponse", + "description": "The kind of item this is.", + "type": "string" + }, + "permissions": { + "description": "The permissions held by the caller. Permissions are always of the format storage.resource.capability, where resource is one of buckets, objects, or managedFolders. The supported permissions are as follows: \n- storage.buckets.delete — Delete bucket. \n- storage.buckets.get — Read bucket metadata. \n- storage.buckets.getIamPolicy — Read bucket IAM policy. \n- storage.buckets.create — Create bucket. \n- storage.buckets.list — List buckets. \n- storage.buckets.setIamPolicy — Update bucket IAM policy. \n- storage.buckets.update — Update bucket metadata. \n- storage.objects.delete — Delete object. \n- storage.objects.get — Read object data and metadata. \n- storage.objects.getIamPolicy — Read object IAM policy. \n- storage.objects.create — Create object. \n- storage.objects.list — List objects. \n- storage.objects.setIamPolicy — Update object IAM policy. \n- storage.objects.update — Update object metadata. \n- storage.managedFolders.delete — Delete managed folder. \n- storage.managedFolders.get — Read managed folder metadata. \n- storage.managedFolders.getIamPolicy — Read managed folder IAM policy. \n- storage.managedFolders.create — Create managed folder. \n- storage.managedFolders.list — List managed folders. \n- storage.managedFolders.setIamPolicy — Update managed folder IAM policy.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + } + }, + "servicePath": "storage/v1/", + "title": "Cloud Storage JSON API", + "version": "v1" +} \ No newline at end of file diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go new file mode 100644 index 000000000..c7c932176 --- /dev/null +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -0,0 +1,13148 @@ +// Copyright 2024 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated file. DO NOT EDIT. + +// Package storage provides access to the Cloud Storage JSON API. +// +// This package is DEPRECATED. Use package cloud.google.com/go/storage instead. +// +// For product documentation, see: https://developers.google.com/storage/docs/json_api/ +// +// # Library status +// +// These client libraries are officially supported by Google. However, this +// library is considered complete and is in maintenance mode. This means +// that we will address critical bugs and security issues but will not add +// any new features. +// +// When possible, we recommend using our newer +// [Cloud Client Libraries for Go](https://pkg.go.dev/cloud.google.com/go) +// that are still actively being worked and iterated on. +// +// # Creating a client +// +// Usage example: +// +// import "google.golang.org/api/storage/v1" +// ... +// ctx := context.Background() +// storageService, err := storage.NewService(ctx) +// +// In this example, Google Application Default Credentials are used for +// authentication. For information on how to create and obtain Application +// Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. +// +// # Other authentication options +// +// By default, all available scopes (see "Constants") are used to authenticate. +// To restrict scopes, use [google.golang.org/api/option.WithScopes]: +// +// storageService, err := storage.NewService(ctx, option.WithScopes(storage.DevstorageReadWriteScope)) +// +// To use an API key for authentication (note: some APIs do not support API +// keys), use [google.golang.org/api/option.WithAPIKey]: +// +// storageService, err := storage.NewService(ctx, option.WithAPIKey("AIza...")) +// +// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth +// flow, use [google.golang.org/api/option.WithTokenSource]: +// +// config := &oauth2.Config{...} +// // ... +// token, err := config.Exchange(ctx, ...) +// storageService, err := storage.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// +// See [google.golang.org/api/option.ClientOption] for details on options. +package storage // import "google.golang.org/api/storage/v1" + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/googleapis/gax-go/v2" + googleapi "google.golang.org/api/googleapi" + internal "google.golang.org/api/internal" + gensupport "google.golang.org/api/internal/gensupport" + option "google.golang.org/api/option" + internaloption "google.golang.org/api/option/internaloption" + htransport "google.golang.org/api/transport/http" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version +var _ = gax.Version + +const apiId = "storage:v1" +const apiName = "storage" +const apiVersion = "v1" +const basePath = "https://storage.googleapis.com/storage/v1/" +const basePathTemplate = "https://storage.UNIVERSE_DOMAIN/storage/v1/" +const mtlsBasePath = "https://storage.mtls.googleapis.com/storage/v1/" + +// OAuth2 scopes used by this API. +const ( + // View and manage your data across Google Cloud Platform services + CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" + + // View your data across Google Cloud Platform services + CloudPlatformReadOnlyScope = "https://www.googleapis.com/auth/cloud-platform.read-only" + + // Manage your data and permissions in Google Cloud Storage + DevstorageFullControlScope = "https://www.googleapis.com/auth/devstorage.full_control" + + // View your data in Google Cloud Storage + DevstorageReadOnlyScope = "https://www.googleapis.com/auth/devstorage.read_only" + + // Manage your data in Google Cloud Storage + DevstorageReadWriteScope = "https://www.googleapis.com/auth/devstorage.read_write" +) + +// NewService creates a new Service. +func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) { + scopesOption := internaloption.WithDefaultScopes( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write", + ) + // NOTE: prepend, so we don't override user-specified scopes. + opts = append([]option.ClientOption{scopesOption}, opts...) + opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultEndpointTemplate(basePathTemplate)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) + opts = append(opts, internaloption.EnableNewAuthLibrary()) + client, endpoint, err := htransport.NewClient(ctx, opts...) + if err != nil { + return nil, err + } + s, err := New(client) + if err != nil { + return nil, err + } + if endpoint != "" { + s.BasePath = endpoint + } + return s, nil +} + +// New creates a new Service. It uses the provided http.Client for requests. +// +// Deprecated: please use NewService instead. +// To provide a custom HTTP client, use option.WithHTTPClient. +// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead. +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.AnywhereCaches = NewAnywhereCachesService(s) + s.BucketAccessControls = NewBucketAccessControlsService(s) + s.Buckets = NewBucketsService(s) + s.Channels = NewChannelsService(s) + s.DefaultObjectAccessControls = NewDefaultObjectAccessControlsService(s) + s.Folders = NewFoldersService(s) + s.ManagedFolders = NewManagedFoldersService(s) + s.Notifications = NewNotificationsService(s) + s.ObjectAccessControls = NewObjectAccessControlsService(s) + s.Objects = NewObjectsService(s) + s.Operations = NewOperationsService(s) + s.Projects = NewProjectsService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + + AnywhereCaches *AnywhereCachesService + + BucketAccessControls *BucketAccessControlsService + + Buckets *BucketsService + + Channels *ChannelsService + + DefaultObjectAccessControls *DefaultObjectAccessControlsService + + Folders *FoldersService + + ManagedFolders *ManagedFoldersService + + Notifications *NotificationsService + + ObjectAccessControls *ObjectAccessControlsService + + Objects *ObjectsService + + Operations *OperationsService + + Projects *ProjectsService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func NewAnywhereCachesService(s *Service) *AnywhereCachesService { + rs := &AnywhereCachesService{s: s} + return rs +} + +type AnywhereCachesService struct { + s *Service +} + +func NewBucketAccessControlsService(s *Service) *BucketAccessControlsService { + rs := &BucketAccessControlsService{s: s} + return rs +} + +type BucketAccessControlsService struct { + s *Service +} + +func NewBucketsService(s *Service) *BucketsService { + rs := &BucketsService{s: s} + return rs +} + +type BucketsService struct { + s *Service +} + +func NewChannelsService(s *Service) *ChannelsService { + rs := &ChannelsService{s: s} + return rs +} + +type ChannelsService struct { + s *Service +} + +func NewDefaultObjectAccessControlsService(s *Service) *DefaultObjectAccessControlsService { + rs := &DefaultObjectAccessControlsService{s: s} + return rs +} + +type DefaultObjectAccessControlsService struct { + s *Service +} + +func NewFoldersService(s *Service) *FoldersService { + rs := &FoldersService{s: s} + return rs +} + +type FoldersService struct { + s *Service +} + +func NewManagedFoldersService(s *Service) *ManagedFoldersService { + rs := &ManagedFoldersService{s: s} + return rs +} + +type ManagedFoldersService struct { + s *Service +} + +func NewNotificationsService(s *Service) *NotificationsService { + rs := &NotificationsService{s: s} + return rs +} + +type NotificationsService struct { + s *Service +} + +func NewObjectAccessControlsService(s *Service) *ObjectAccessControlsService { + rs := &ObjectAccessControlsService{s: s} + return rs +} + +type ObjectAccessControlsService struct { + s *Service +} + +func NewObjectsService(s *Service) *ObjectsService { + rs := &ObjectsService{s: s} + return rs +} + +type ObjectsService struct { + s *Service +} + +func NewOperationsService(s *Service) *OperationsService { + rs := &OperationsService{s: s} + return rs +} + +type OperationsService struct { + s *Service +} + +func NewProjectsService(s *Service) *ProjectsService { + rs := &ProjectsService{s: s} + rs.HmacKeys = NewProjectsHmacKeysService(s) + rs.ServiceAccount = NewProjectsServiceAccountService(s) + return rs +} + +type ProjectsService struct { + s *Service + + HmacKeys *ProjectsHmacKeysService + + ServiceAccount *ProjectsServiceAccountService +} + +func NewProjectsHmacKeysService(s *Service) *ProjectsHmacKeysService { + rs := &ProjectsHmacKeysService{s: s} + return rs +} + +type ProjectsHmacKeysService struct { + s *Service +} + +func NewProjectsServiceAccountService(s *Service) *ProjectsServiceAccountService { + rs := &ProjectsServiceAccountService{s: s} + return rs +} + +type ProjectsServiceAccountService struct { + s *Service +} + +// AnywhereCache: An Anywhere Cache instance. +type AnywhereCache struct { + // AdmissionPolicy: The cache-level entry admission policy. + AdmissionPolicy string `json:"admissionPolicy,omitempty"` + // AnywhereCacheId: The ID of the Anywhere cache instance. + AnywhereCacheId string `json:"anywhereCacheId,omitempty"` + // Bucket: The name of the bucket containing this cache instance. + Bucket string `json:"bucket,omitempty"` + // CreateTime: The creation time of the cache instance in RFC 3339 format. + CreateTime string `json:"createTime,omitempty"` + // Id: The ID of the resource, including the project number, bucket name and + // anywhere cache ID. + Id string `json:"id,omitempty"` + // Kind: The kind of item this is. For Anywhere Cache, this is always + // storage#anywhereCache. + Kind string `json:"kind,omitempty"` + // PendingUpdate: True if the cache instance has an active Update long-running + // operation. + PendingUpdate bool `json:"pendingUpdate,omitempty"` + // SelfLink: The link to this cache instance. + SelfLink string `json:"selfLink,omitempty"` + // State: The current state of the cache instance. + State string `json:"state,omitempty"` + // Ttl: The TTL of all cache entries in whole seconds. e.g., "7200s". + Ttl string `json:"ttl,omitempty"` + // UpdateTime: The modification time of the cache instance metadata in RFC 3339 + // format. + UpdateTime string `json:"updateTime,omitempty"` + // Zone: The zone in which the cache instance is running. For example, + // us-central1-a. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "AdmissionPolicy") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "AdmissionPolicy") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s AnywhereCache) MarshalJSON() ([]byte, error) { + type NoMethod AnywhereCache + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// AnywhereCaches: A list of Anywhere Caches. +type AnywhereCaches struct { + // Items: The list of items. + Items []*AnywhereCache `json:"items,omitempty"` + // Kind: The kind of item this is. For lists of Anywhere Caches, this is always + // storage#anywhereCaches. + Kind string `json:"kind,omitempty"` + // NextPageToken: The continuation token, used to page through large result + // sets. Provide this value in a subsequent request to return the next page of + // results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Items") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s AnywhereCaches) MarshalJSON() ([]byte, error) { + type NoMethod AnywhereCaches + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// Bucket: A bucket. +type Bucket struct { + // Acl: Access controls on the bucket. + Acl []*BucketAccessControl `json:"acl,omitempty"` + // Autoclass: The bucket's Autoclass configuration. + Autoclass *BucketAutoclass `json:"autoclass,omitempty"` + // Billing: The bucket's billing configuration. + Billing *BucketBilling `json:"billing,omitempty"` + // Cors: The bucket's Cross-Origin Resource Sharing (CORS) configuration. + Cors []*BucketCors `json:"cors,omitempty"` + // CustomPlacementConfig: The bucket's custom placement configuration for + // Custom Dual Regions. + CustomPlacementConfig *BucketCustomPlacementConfig `json:"customPlacementConfig,omitempty"` + // DefaultEventBasedHold: The default value for event-based hold on newly + // created objects in this bucket. Event-based hold is a way to retain objects + // indefinitely until an event occurs, signified by the hold's release. After + // being released, such objects will be subject to bucket-level retention (if + // any). One sample use case of this flag is for banks to hold loan documents + // for at least 3 years after loan is paid in full. Here, bucket-level + // retention is 3 years and the event is loan being paid in full. In this + // example, these objects will be held intact for any number of years until the + // event has occurred (event-based hold on the object is released) and then 3 + // more years after that. That means retention duration of the objects begins + // from the moment event-based hold transitioned from true to false. Objects + // under event-based hold cannot be deleted, overwritten or archived until the + // hold is removed. + DefaultEventBasedHold bool `json:"defaultEventBasedHold,omitempty"` + // DefaultObjectAcl: Default access controls to apply to new objects when no + // ACL is provided. + DefaultObjectAcl []*ObjectAccessControl `json:"defaultObjectAcl,omitempty"` + // Encryption: Encryption configuration for a bucket. + Encryption *BucketEncryption `json:"encryption,omitempty"` + // Etag: HTTP 1.1 Entity tag for the bucket. + Etag string `json:"etag,omitempty"` + // Generation: The generation of this bucket. + Generation int64 `json:"generation,omitempty,string"` + // HardDeleteTime: The hard delete time of the bucket in RFC 3339 format. + HardDeleteTime string `json:"hardDeleteTime,omitempty"` + // HierarchicalNamespace: The bucket's hierarchical namespace configuration. + HierarchicalNamespace *BucketHierarchicalNamespace `json:"hierarchicalNamespace,omitempty"` + // IamConfiguration: The bucket's IAM configuration. + IamConfiguration *BucketIamConfiguration `json:"iamConfiguration,omitempty"` + // Id: The ID of the bucket. For buckets, the id and name properties are the + // same. + Id string `json:"id,omitempty"` + // IpFilter: The bucket's IP filter configuration. Specifies the network + // sources that are allowed to access the operations on the bucket, as well as + // its underlying objects. Only enforced when the mode is set to 'Enabled'. + IpFilter *BucketIpFilter `json:"ipFilter,omitempty"` + // Kind: The kind of item this is. For buckets, this is always storage#bucket. + Kind string `json:"kind,omitempty"` + // Labels: User-provided labels, in key/value pairs. + Labels map[string]string `json:"labels,omitempty"` + // Lifecycle: The bucket's lifecycle configuration. See Lifecycle Management + // (https://cloud.google.com/storage/docs/lifecycle) for more information. + Lifecycle *BucketLifecycle `json:"lifecycle,omitempty"` + // Location: The location of the bucket. Object data for objects in the bucket + // resides in physical storage within this region. Defaults to US. See the + // Developer's Guide (https://cloud.google.com/storage/docs/locations) for the + // authoritative list. + Location string `json:"location,omitempty"` + // LocationType: The type of the bucket location. + LocationType string `json:"locationType,omitempty"` + // Logging: The bucket's logging configuration, which defines the destination + // bucket and optional name prefix for the current bucket's logs. + Logging *BucketLogging `json:"logging,omitempty"` + // Metageneration: The metadata generation of this bucket. + Metageneration int64 `json:"metageneration,omitempty,string"` + // Name: The name of the bucket. + Name string `json:"name,omitempty"` + // ObjectRetention: The bucket's object retention config. + ObjectRetention *BucketObjectRetention `json:"objectRetention,omitempty"` + // Owner: The owner of the bucket. This is always the project team's owner + // group. + Owner *BucketOwner `json:"owner,omitempty"` + // ProjectNumber: The project number of the project the bucket belongs to. + ProjectNumber uint64 `json:"projectNumber,omitempty,string"` + // RetentionPolicy: The bucket's retention policy. The retention policy + // enforces a minimum retention time for all objects contained in the bucket, + // based on their creation time. Any attempt to overwrite or delete objects + // younger than the retention period will result in a PERMISSION_DENIED error. + // An unlocked retention policy can be modified or removed from the bucket via + // a storage.buckets.update operation. A locked retention policy cannot be + // removed or shortened in duration for the lifetime of the bucket. Attempting + // to remove or decrease period of a locked retention policy will result in a + // PERMISSION_DENIED error. + RetentionPolicy *BucketRetentionPolicy `json:"retentionPolicy,omitempty"` + // Rpo: The Recovery Point Objective (RPO) of this bucket. Set to ASYNC_TURBO + // to turn on Turbo Replication on a bucket. + Rpo string `json:"rpo,omitempty"` + // SatisfiesPZI: Reserved for future use. + SatisfiesPZI bool `json:"satisfiesPZI,omitempty"` + // SatisfiesPZS: Reserved for future use. + SatisfiesPZS bool `json:"satisfiesPZS,omitempty"` + // SelfLink: The URI of this bucket. + SelfLink string `json:"selfLink,omitempty"` + // SoftDeletePolicy: The bucket's soft delete policy, which defines the period + // of time that soft-deleted objects will be retained, and cannot be + // permanently deleted. + SoftDeletePolicy *BucketSoftDeletePolicy `json:"softDeletePolicy,omitempty"` + // SoftDeleteTime: The soft delete time of the bucket in RFC 3339 format. + SoftDeleteTime string `json:"softDeleteTime,omitempty"` + // StorageClass: The bucket's default storage class, used whenever no + // storageClass is specified for a newly-created object. This defines how + // objects in the bucket are stored and determines the SLA and the cost of + // storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, + // COLDLINE, ARCHIVE, and DURABLE_REDUCED_AVAILABILITY. If this value is not + // specified when the bucket is created, it will default to STANDARD. For more + // information, see Storage Classes + // (https://cloud.google.com/storage/docs/storage-classes). + StorageClass string `json:"storageClass,omitempty"` + // TimeCreated: The creation time of the bucket in RFC 3339 format. + TimeCreated string `json:"timeCreated,omitempty"` + // Updated: The modification time of the bucket in RFC 3339 format. + Updated string `json:"updated,omitempty"` + // Versioning: The bucket's versioning configuration. + Versioning *BucketVersioning `json:"versioning,omitempty"` + // Website: The bucket's website configuration, controlling how the service + // behaves when accessing bucket contents as a web site. See the Static Website + // Examples (https://cloud.google.com/storage/docs/static-website) for more + // information. + Website *BucketWebsite `json:"website,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Acl") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Acl") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s Bucket) MarshalJSON() ([]byte, error) { + type NoMethod Bucket + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketAutoclass: The bucket's Autoclass configuration. +type BucketAutoclass struct { + // Enabled: Whether or not Autoclass is enabled on this bucket + Enabled bool `json:"enabled,omitempty"` + // TerminalStorageClass: The storage class that objects in the bucket + // eventually transition to if they are not read for a certain length of time. + // Valid values are NEARLINE and ARCHIVE. + TerminalStorageClass string `json:"terminalStorageClass,omitempty"` + // TerminalStorageClassUpdateTime: A date and time in RFC 3339 format + // representing the time of the most recent update to "terminalStorageClass". + TerminalStorageClassUpdateTime string `json:"terminalStorageClassUpdateTime,omitempty"` + // ToggleTime: A date and time in RFC 3339 format representing the instant at + // which "enabled" was last toggled. + ToggleTime string `json:"toggleTime,omitempty"` + // ForceSendFields is a list of field names (e.g. "Enabled") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Enabled") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketAutoclass) MarshalJSON() ([]byte, error) { + type NoMethod BucketAutoclass + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketBilling: The bucket's billing configuration. +type BucketBilling struct { + // RequesterPays: When set to true, Requester Pays is enabled for this bucket. + RequesterPays bool `json:"requesterPays,omitempty"` + // ForceSendFields is a list of field names (e.g. "RequesterPays") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "RequesterPays") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketBilling) MarshalJSON() ([]byte, error) { + type NoMethod BucketBilling + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type BucketCors struct { + // MaxAgeSeconds: The value, in seconds, to return in the + // Access-Control-Max-Age header used in preflight responses. + MaxAgeSeconds int64 `json:"maxAgeSeconds,omitempty"` + // Method: The list of HTTP methods on which to include CORS response headers, + // (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list of methods, and + // means "any method". + Method []string `json:"method,omitempty"` + // Origin: The list of Origins eligible to receive CORS response headers. Note: + // "*" is permitted in the list of origins, and means "any Origin". + Origin []string `json:"origin,omitempty"` + // ResponseHeader: The list of HTTP headers other than the simple response + // headers to give permission for the user-agent to share across domains. + ResponseHeader []string `json:"responseHeader,omitempty"` + // ForceSendFields is a list of field names (e.g. "MaxAgeSeconds") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "MaxAgeSeconds") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketCors) MarshalJSON() ([]byte, error) { + type NoMethod BucketCors + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketCustomPlacementConfig: The bucket's custom placement configuration for +// Custom Dual Regions. +type BucketCustomPlacementConfig struct { + // DataLocations: The list of regional locations in which data is placed. + DataLocations []string `json:"dataLocations,omitempty"` + // ForceSendFields is a list of field names (e.g. "DataLocations") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "DataLocations") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketCustomPlacementConfig) MarshalJSON() ([]byte, error) { + type NoMethod BucketCustomPlacementConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketEncryption: Encryption configuration for a bucket. +type BucketEncryption struct { + // DefaultKmsKeyName: A Cloud KMS key that will be used to encrypt objects + // inserted into this bucket, if no encryption method is specified. + DefaultKmsKeyName string `json:"defaultKmsKeyName,omitempty"` + // ForceSendFields is a list of field names (e.g. "DefaultKmsKeyName") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "DefaultKmsKeyName") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketEncryption) MarshalJSON() ([]byte, error) { + type NoMethod BucketEncryption + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketHierarchicalNamespace: The bucket's hierarchical namespace +// configuration. +type BucketHierarchicalNamespace struct { + // Enabled: When set to true, hierarchical namespace is enabled for this + // bucket. + Enabled bool `json:"enabled,omitempty"` + // ForceSendFields is a list of field names (e.g. "Enabled") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Enabled") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketHierarchicalNamespace) MarshalJSON() ([]byte, error) { + type NoMethod BucketHierarchicalNamespace + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketIamConfiguration: The bucket's IAM configuration. +type BucketIamConfiguration struct { + // BucketPolicyOnly: The bucket's uniform bucket-level access configuration. + // The feature was formerly known as Bucket Policy Only. For backward + // compatibility, this field will be populated with identical information as + // the uniformBucketLevelAccess field. We recommend using the + // uniformBucketLevelAccess field to enable and disable the feature. + BucketPolicyOnly *BucketIamConfigurationBucketPolicyOnly `json:"bucketPolicyOnly,omitempty"` + // PublicAccessPrevention: The bucket's Public Access Prevention configuration. + // Currently, 'inherited' and 'enforced' are supported. + PublicAccessPrevention string `json:"publicAccessPrevention,omitempty"` + // UniformBucketLevelAccess: The bucket's uniform bucket-level access + // configuration. + UniformBucketLevelAccess *BucketIamConfigurationUniformBucketLevelAccess `json:"uniformBucketLevelAccess,omitempty"` + // ForceSendFields is a list of field names (e.g. "BucketPolicyOnly") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "BucketPolicyOnly") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketIamConfiguration) MarshalJSON() ([]byte, error) { + type NoMethod BucketIamConfiguration + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketIamConfigurationBucketPolicyOnly: The bucket's uniform bucket-level +// access configuration. The feature was formerly known as Bucket Policy Only. +// For backward compatibility, this field will be populated with identical +// information as the uniformBucketLevelAccess field. We recommend using the +// uniformBucketLevelAccess field to enable and disable the feature. +type BucketIamConfigurationBucketPolicyOnly struct { + // Enabled: If set, access is controlled only by bucket-level or above IAM + // policies. + Enabled bool `json:"enabled,omitempty"` + // LockedTime: The deadline for changing + // iamConfiguration.bucketPolicyOnly.enabled from true to false in RFC 3339 + // format. iamConfiguration.bucketPolicyOnly.enabled may be changed from true + // to false until the locked time, after which the field is immutable. + LockedTime string `json:"lockedTime,omitempty"` + // ForceSendFields is a list of field names (e.g. "Enabled") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Enabled") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketIamConfigurationBucketPolicyOnly) MarshalJSON() ([]byte, error) { + type NoMethod BucketIamConfigurationBucketPolicyOnly + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketIamConfigurationUniformBucketLevelAccess: The bucket's uniform +// bucket-level access configuration. +type BucketIamConfigurationUniformBucketLevelAccess struct { + // Enabled: If set, access is controlled only by bucket-level or above IAM + // policies. + Enabled bool `json:"enabled,omitempty"` + // LockedTime: The deadline for changing + // iamConfiguration.uniformBucketLevelAccess.enabled from true to false in RFC + // 3339 format. iamConfiguration.uniformBucketLevelAccess.enabled may be + // changed from true to false until the locked time, after which the field is + // immutable. + LockedTime string `json:"lockedTime,omitempty"` + // ForceSendFields is a list of field names (e.g. "Enabled") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Enabled") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketIamConfigurationUniformBucketLevelAccess) MarshalJSON() ([]byte, error) { + type NoMethod BucketIamConfigurationUniformBucketLevelAccess + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketIpFilter: The bucket's IP filter configuration. Specifies the network +// sources that are allowed to access the operations on the bucket, as well as +// its underlying objects. Only enforced when the mode is set to 'Enabled'. +type BucketIpFilter struct { + // Mode: The mode of the IP filter. Valid values are 'Enabled' and 'Disabled'. + Mode string `json:"mode,omitempty"` + // PublicNetworkSource: The public network source of the bucket's IP filter. + PublicNetworkSource *BucketIpFilterPublicNetworkSource `json:"publicNetworkSource,omitempty"` + // VpcNetworkSources: The list of VPC network + // (https://cloud.google.com/vpc/docs/vpc) sources of the bucket's IP filter. + VpcNetworkSources []*BucketIpFilterVpcNetworkSources `json:"vpcNetworkSources,omitempty"` + // ForceSendFields is a list of field names (e.g. "Mode") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Mode") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketIpFilter) MarshalJSON() ([]byte, error) { + type NoMethod BucketIpFilter + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketIpFilterPublicNetworkSource: The public network source of the bucket's +// IP filter. +type BucketIpFilterPublicNetworkSource struct { + // AllowedIpCidrRanges: The list of public IPv4, IPv6 cidr ranges that are + // allowed to access the bucket. + AllowedIpCidrRanges []string `json:"allowedIpCidrRanges,omitempty"` + // ForceSendFields is a list of field names (e.g. "AllowedIpCidrRanges") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "AllowedIpCidrRanges") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketIpFilterPublicNetworkSource) MarshalJSON() ([]byte, error) { + type NoMethod BucketIpFilterPublicNetworkSource + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type BucketIpFilterVpcNetworkSources struct { + // AllowedIpCidrRanges: The list of IPv4, IPv6 cidr ranges subnetworks that are + // allowed to access the bucket. + AllowedIpCidrRanges []string `json:"allowedIpCidrRanges,omitempty"` + // Network: Name of the network. Format: + // projects/{PROJECT_ID}/global/networks/{NETWORK_NAME} + Network string `json:"network,omitempty"` + // ForceSendFields is a list of field names (e.g. "AllowedIpCidrRanges") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "AllowedIpCidrRanges") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketIpFilterVpcNetworkSources) MarshalJSON() ([]byte, error) { + type NoMethod BucketIpFilterVpcNetworkSources + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketLifecycle: The bucket's lifecycle configuration. See Lifecycle +// Management (https://cloud.google.com/storage/docs/lifecycle) for more +// information. +type BucketLifecycle struct { + // Rule: A lifecycle management rule, which is made of an action to take and + // the condition(s) under which the action will be taken. + Rule []*BucketLifecycleRule `json:"rule,omitempty"` + // ForceSendFields is a list of field names (e.g. "Rule") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Rule") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketLifecycle) MarshalJSON() ([]byte, error) { + type NoMethod BucketLifecycle + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type BucketLifecycleRule struct { + // Action: The action to take. + Action *BucketLifecycleRuleAction `json:"action,omitempty"` + // Condition: The condition(s) under which the action will be taken. + Condition *BucketLifecycleRuleCondition `json:"condition,omitempty"` + // ForceSendFields is a list of field names (e.g. "Action") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Action") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketLifecycleRule) MarshalJSON() ([]byte, error) { + type NoMethod BucketLifecycleRule + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketLifecycleRuleAction: The action to take. +type BucketLifecycleRuleAction struct { + // StorageClass: Target storage class. Required iff the type of the action is + // SetStorageClass. + StorageClass string `json:"storageClass,omitempty"` + // Type: Type of the action. Currently, only Delete, SetStorageClass, and + // AbortIncompleteMultipartUpload are supported. + Type string `json:"type,omitempty"` + // ForceSendFields is a list of field names (e.g. "StorageClass") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "StorageClass") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketLifecycleRuleAction) MarshalJSON() ([]byte, error) { + type NoMethod BucketLifecycleRuleAction + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketLifecycleRuleCondition: The condition(s) under which the action will +// be taken. +type BucketLifecycleRuleCondition struct { + // Age: Age of an object (in days). This condition is satisfied when an object + // reaches the specified age. + Age *int64 `json:"age,omitempty"` + // CreatedBefore: A date in RFC 3339 format with only the date part (for + // instance, "2013-01-15"). This condition is satisfied when an object is + // created before midnight of the specified date in UTC. + CreatedBefore string `json:"createdBefore,omitempty"` + // CustomTimeBefore: A date in RFC 3339 format with only the date part (for + // instance, "2013-01-15"). This condition is satisfied when the custom time on + // an object is before this date in UTC. + CustomTimeBefore string `json:"customTimeBefore,omitempty"` + // DaysSinceCustomTime: Number of days elapsed since the user-specified + // timestamp set on an object. The condition is satisfied if the days elapsed + // is at least this number. If no custom timestamp is specified on an object, + // the condition does not apply. + DaysSinceCustomTime int64 `json:"daysSinceCustomTime,omitempty"` + // DaysSinceNoncurrentTime: Number of days elapsed since the noncurrent + // timestamp of an object. The condition is satisfied if the days elapsed is at + // least this number. This condition is relevant only for versioned objects. + // The value of the field must be a nonnegative integer. If it's zero, the + // object version will become eligible for Lifecycle action as soon as it + // becomes noncurrent. + DaysSinceNoncurrentTime int64 `json:"daysSinceNoncurrentTime,omitempty"` + // IsLive: Relevant only for versioned objects. If the value is true, this + // condition matches live objects; if the value is false, it matches archived + // objects. + IsLive *bool `json:"isLive,omitempty"` + // MatchesPattern: A regular expression that satisfies the RE2 syntax. This + // condition is satisfied when the name of the object matches the RE2 pattern. + // Note: This feature is currently in the "Early Access" launch stage and is + // only available to a whitelisted set of users; that means that this feature + // may be changed in backward-incompatible ways and that it is not guaranteed + // to be released. + MatchesPattern string `json:"matchesPattern,omitempty"` + // MatchesPrefix: List of object name prefixes. This condition will be + // satisfied when at least one of the prefixes exactly matches the beginning of + // the object name. + MatchesPrefix []string `json:"matchesPrefix,omitempty"` + // MatchesStorageClass: Objects having any of the storage classes specified by + // this condition will be matched. Values include MULTI_REGIONAL, REGIONAL, + // NEARLINE, COLDLINE, ARCHIVE, STANDARD, and DURABLE_REDUCED_AVAILABILITY. + MatchesStorageClass []string `json:"matchesStorageClass,omitempty"` + // MatchesSuffix: List of object name suffixes. This condition will be + // satisfied when at least one of the suffixes exactly matches the end of the + // object name. + MatchesSuffix []string `json:"matchesSuffix,omitempty"` + // NoncurrentTimeBefore: A date in RFC 3339 format with only the date part (for + // instance, "2013-01-15"). This condition is satisfied when the noncurrent + // time on an object is before this date in UTC. This condition is relevant + // only for versioned objects. + NoncurrentTimeBefore string `json:"noncurrentTimeBefore,omitempty"` + // NumNewerVersions: Relevant only for versioned objects. If the value is N, + // this condition is satisfied when there are at least N versions (including + // the live version) newer than this version of the object. + NumNewerVersions int64 `json:"numNewerVersions,omitempty"` + // ForceSendFields is a list of field names (e.g. "Age") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Age") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketLifecycleRuleCondition) MarshalJSON() ([]byte, error) { + type NoMethod BucketLifecycleRuleCondition + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketLogging: The bucket's logging configuration, which defines the +// destination bucket and optional name prefix for the current bucket's logs. +type BucketLogging struct { + // LogBucket: The destination bucket where the current bucket's logs should be + // placed. + LogBucket string `json:"logBucket,omitempty"` + // LogObjectPrefix: A prefix for log object names. + LogObjectPrefix string `json:"logObjectPrefix,omitempty"` + // ForceSendFields is a list of field names (e.g. "LogBucket") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "LogBucket") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketLogging) MarshalJSON() ([]byte, error) { + type NoMethod BucketLogging + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketObjectRetention: The bucket's object retention config. +type BucketObjectRetention struct { + // Mode: The bucket's object retention mode. Can be Enabled. + Mode string `json:"mode,omitempty"` + // ForceSendFields is a list of field names (e.g. "Mode") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Mode") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketObjectRetention) MarshalJSON() ([]byte, error) { + type NoMethod BucketObjectRetention + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketOwner: The owner of the bucket. This is always the project team's +// owner group. +type BucketOwner struct { + // Entity: The entity, in the form project-owner-projectId. + Entity string `json:"entity,omitempty"` + // EntityId: The ID for the entity. + EntityId string `json:"entityId,omitempty"` + // ForceSendFields is a list of field names (e.g. "Entity") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Entity") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketOwner) MarshalJSON() ([]byte, error) { + type NoMethod BucketOwner + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketRetentionPolicy: The bucket's retention policy. The retention policy +// enforces a minimum retention time for all objects contained in the bucket, +// based on their creation time. Any attempt to overwrite or delete objects +// younger than the retention period will result in a PERMISSION_DENIED error. +// An unlocked retention policy can be modified or removed from the bucket via +// a storage.buckets.update operation. A locked retention policy cannot be +// removed or shortened in duration for the lifetime of the bucket. Attempting +// to remove or decrease period of a locked retention policy will result in a +// PERMISSION_DENIED error. +type BucketRetentionPolicy struct { + // EffectiveTime: Server-determined value that indicates the time from which + // policy was enforced and effective. This value is in RFC 3339 format. + EffectiveTime string `json:"effectiveTime,omitempty"` + // IsLocked: Once locked, an object retention policy cannot be modified. + IsLocked bool `json:"isLocked,omitempty"` + // RetentionPeriod: The duration in seconds that objects need to be retained. + // Retention duration must be greater than zero and less than 100 years. Note + // that enforcement of retention periods less than a day is not guaranteed. + // Such periods should only be used for testing purposes. + RetentionPeriod int64 `json:"retentionPeriod,omitempty,string"` + // ForceSendFields is a list of field names (e.g. "EffectiveTime") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "EffectiveTime") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketRetentionPolicy) MarshalJSON() ([]byte, error) { + type NoMethod BucketRetentionPolicy + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketSoftDeletePolicy: The bucket's soft delete policy, which defines the +// period of time that soft-deleted objects will be retained, and cannot be +// permanently deleted. +type BucketSoftDeletePolicy struct { + // EffectiveTime: Server-determined value that indicates the time from which + // the policy, or one with a greater retention, was effective. This value is in + // RFC 3339 format. + EffectiveTime string `json:"effectiveTime,omitempty"` + // RetentionDurationSeconds: The duration in seconds that soft-deleted objects + // in the bucket will be retained and cannot be permanently deleted. + RetentionDurationSeconds int64 `json:"retentionDurationSeconds,omitempty,string"` + // ForceSendFields is a list of field names (e.g. "EffectiveTime") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "EffectiveTime") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketSoftDeletePolicy) MarshalJSON() ([]byte, error) { + type NoMethod BucketSoftDeletePolicy + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketVersioning: The bucket's versioning configuration. +type BucketVersioning struct { + // Enabled: While set to true, versioning is fully enabled for this bucket. + Enabled bool `json:"enabled,omitempty"` + // ForceSendFields is a list of field names (e.g. "Enabled") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Enabled") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketVersioning) MarshalJSON() ([]byte, error) { + type NoMethod BucketVersioning + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketWebsite: The bucket's website configuration, controlling how the +// service behaves when accessing bucket contents as a web site. See the Static +// Website Examples (https://cloud.google.com/storage/docs/static-website) for +// more information. +type BucketWebsite struct { + // MainPageSuffix: If the requested object path is missing, the service will + // ensure the path has a trailing '/', append this suffix, and attempt to + // retrieve the resulting object. This allows the creation of index.html + // objects to represent directory pages. + MainPageSuffix string `json:"mainPageSuffix,omitempty"` + // NotFoundPage: If the requested object path is missing, and any + // mainPageSuffix object is missing, if applicable, the service will return the + // named object from this bucket as the content for a 404 Not Found result. + NotFoundPage string `json:"notFoundPage,omitempty"` + // ForceSendFields is a list of field names (e.g. "MainPageSuffix") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "MainPageSuffix") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketWebsite) MarshalJSON() ([]byte, error) { + type NoMethod BucketWebsite + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketAccessControl: An access-control entry. +type BucketAccessControl struct { + // Bucket: The name of the bucket. + Bucket string `json:"bucket,omitempty"` + // Domain: The domain associated with the entity, if any. + Domain string `json:"domain,omitempty"` + // Email: The email address associated with the entity, if any. + Email string `json:"email,omitempty"` + // Entity: The entity holding the permission, in one of the following forms: + // - user-userId + // - user-email + // - group-groupId + // - group-email + // - domain-domain + // - project-team-projectId + // - allUsers + // - allAuthenticatedUsers Examples: + // - The user liz@example.com would be user-liz@example.com. + // - The group example@googlegroups.com would be + // group-example@googlegroups.com. + // - To refer to all members of the Google Apps for Business domain + // example.com, the entity would be domain-example.com. + Entity string `json:"entity,omitempty"` + // EntityId: The ID for the entity, if any. + EntityId string `json:"entityId,omitempty"` + // Etag: HTTP 1.1 Entity tag for the access-control entry. + Etag string `json:"etag,omitempty"` + // Id: The ID of the access-control entry. + Id string `json:"id,omitempty"` + // Kind: The kind of item this is. For bucket access control entries, this is + // always storage#bucketAccessControl. + Kind string `json:"kind,omitempty"` + // ProjectTeam: The project team associated with the entity, if any. + ProjectTeam *BucketAccessControlProjectTeam `json:"projectTeam,omitempty"` + // Role: The access permission for the entity. + Role string `json:"role,omitempty"` + // SelfLink: The link to this access-control entry. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Bucket") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Bucket") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketAccessControl) MarshalJSON() ([]byte, error) { + type NoMethod BucketAccessControl + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketAccessControlProjectTeam: The project team associated with the entity, +// if any. +type BucketAccessControlProjectTeam struct { + // ProjectNumber: The project number. + ProjectNumber string `json:"projectNumber,omitempty"` + // Team: The team. + Team string `json:"team,omitempty"` + // ForceSendFields is a list of field names (e.g. "ProjectNumber") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "ProjectNumber") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketAccessControlProjectTeam) MarshalJSON() ([]byte, error) { + type NoMethod BucketAccessControlProjectTeam + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketAccessControls: An access-control list. +type BucketAccessControls struct { + // Items: The list of items. + Items []*BucketAccessControl `json:"items,omitempty"` + // Kind: The kind of item this is. For lists of bucket access control entries, + // this is always storage#bucketAccessControls. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Items") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketAccessControls) MarshalJSON() ([]byte, error) { + type NoMethod BucketAccessControls + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketStorageLayout: The storage layout configuration of a bucket. +type BucketStorageLayout struct { + // Bucket: The name of the bucket. + Bucket string `json:"bucket,omitempty"` + // CustomPlacementConfig: The bucket's custom placement configuration for + // Custom Dual Regions. + CustomPlacementConfig *BucketStorageLayoutCustomPlacementConfig `json:"customPlacementConfig,omitempty"` + // HierarchicalNamespace: The bucket's hierarchical namespace configuration. + HierarchicalNamespace *BucketStorageLayoutHierarchicalNamespace `json:"hierarchicalNamespace,omitempty"` + // Kind: The kind of item this is. For storage layout, this is always + // storage#storageLayout. + Kind string `json:"kind,omitempty"` + // Location: The location of the bucket. + Location string `json:"location,omitempty"` + // LocationType: The type of the bucket location. + LocationType string `json:"locationType,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Bucket") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Bucket") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketStorageLayout) MarshalJSON() ([]byte, error) { + type NoMethod BucketStorageLayout + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketStorageLayoutCustomPlacementConfig: The bucket's custom placement +// configuration for Custom Dual Regions. +type BucketStorageLayoutCustomPlacementConfig struct { + // DataLocations: The list of regional locations in which data is placed. + DataLocations []string `json:"dataLocations,omitempty"` + // ForceSendFields is a list of field names (e.g. "DataLocations") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "DataLocations") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketStorageLayoutCustomPlacementConfig) MarshalJSON() ([]byte, error) { + type NoMethod BucketStorageLayoutCustomPlacementConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketStorageLayoutHierarchicalNamespace: The bucket's hierarchical +// namespace configuration. +type BucketStorageLayoutHierarchicalNamespace struct { + // Enabled: When set to true, hierarchical namespace is enabled for this + // bucket. + Enabled bool `json:"enabled,omitempty"` + // ForceSendFields is a list of field names (e.g. "Enabled") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Enabled") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketStorageLayoutHierarchicalNamespace) MarshalJSON() ([]byte, error) { + type NoMethod BucketStorageLayoutHierarchicalNamespace + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// Buckets: A list of buckets. +type Buckets struct { + // Items: The list of items. + Items []*Bucket `json:"items,omitempty"` + // Kind: The kind of item this is. For lists of buckets, this is always + // storage#buckets. + Kind string `json:"kind,omitempty"` + // NextPageToken: The continuation token, used to page through large result + // sets. Provide this value in a subsequent request to return the next page of + // results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Items") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s Buckets) MarshalJSON() ([]byte, error) { + type NoMethod Buckets + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BulkRestoreObjectsRequest: A bulk restore objects request. +type BulkRestoreObjectsRequest struct { + // AllowOverwrite: If false (default), the restore will not overwrite live + // objects with the same name at the destination. This means some deleted + // objects may be skipped. If true, live objects will be overwritten resulting + // in a noncurrent object (if versioning is enabled). If versioning is not + // enabled, overwriting the object will result in a soft-deleted object. In + // either case, if a noncurrent object already exists with the same name, a + // live version can be written without issue. + AllowOverwrite bool `json:"allowOverwrite,omitempty"` + // CopySourceAcl: If true, copies the source object's ACL; otherwise, uses the + // bucket's default object ACL. The default is false. + CopySourceAcl bool `json:"copySourceAcl,omitempty"` + // MatchGlobs: Restores only the objects matching any of the specified glob(s). + // If this parameter is not specified, all objects will be restored within the + // specified time range. + MatchGlobs []string `json:"matchGlobs,omitempty"` + // SoftDeletedAfterTime: Restores only the objects that were soft-deleted after + // this time. + SoftDeletedAfterTime string `json:"softDeletedAfterTime,omitempty"` + // SoftDeletedBeforeTime: Restores only the objects that were soft-deleted + // before this time. + SoftDeletedBeforeTime string `json:"softDeletedBeforeTime,omitempty"` + // ForceSendFields is a list of field names (e.g. "AllowOverwrite") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "AllowOverwrite") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BulkRestoreObjectsRequest) MarshalJSON() ([]byte, error) { + type NoMethod BulkRestoreObjectsRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// Channel: An notification channel used to watch for resource changes. +type Channel struct { + // Address: The address where notifications are delivered for this channel. + Address string `json:"address,omitempty"` + // Expiration: Date and time of notification channel expiration, expressed as a + // Unix timestamp, in milliseconds. Optional. + Expiration int64 `json:"expiration,omitempty,string"` + // Id: A UUID or similar unique string that identifies this channel. + Id string `json:"id,omitempty"` + // Kind: Identifies this as a notification channel used to watch for changes to + // a resource, which is "api#channel". + Kind string `json:"kind,omitempty"` + // Params: Additional parameters controlling delivery channel behavior. + // Optional. + Params map[string]string `json:"params,omitempty"` + // Payload: A Boolean value to indicate whether payload is wanted. Optional. + Payload bool `json:"payload,omitempty"` + // ResourceId: An opaque ID that identifies the resource being watched on this + // channel. Stable across different API versions. + ResourceId string `json:"resourceId,omitempty"` + // ResourceUri: A version-specific identifier for the watched resource. + ResourceUri string `json:"resourceUri,omitempty"` + // Token: An arbitrary string delivered to the target address with each + // notification delivered over this channel. Optional. + Token string `json:"token,omitempty"` + // Type: The type of delivery mechanism used for this channel. + Type string `json:"type,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Address") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Address") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s Channel) MarshalJSON() ([]byte, error) { + type NoMethod Channel + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ComposeRequest: A Compose request. +type ComposeRequest struct { + // Destination: Properties of the resulting object. + Destination *Object `json:"destination,omitempty"` + // Kind: The kind of item this is. + Kind string `json:"kind,omitempty"` + // SourceObjects: The list of source objects that will be concatenated into a + // single object. + SourceObjects []*ComposeRequestSourceObjects `json:"sourceObjects,omitempty"` + // ForceSendFields is a list of field names (e.g. "Destination") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Destination") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ComposeRequest) MarshalJSON() ([]byte, error) { + type NoMethod ComposeRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type ComposeRequestSourceObjects struct { + // Generation: The generation of this object to use as the source. + Generation int64 `json:"generation,omitempty,string"` + // Name: The source object's name. All source objects must reside in the same + // bucket. + Name string `json:"name,omitempty"` + // ObjectPreconditions: Conditions that must be met for this operation to + // execute. + ObjectPreconditions *ComposeRequestSourceObjectsObjectPreconditions `json:"objectPreconditions,omitempty"` + // ForceSendFields is a list of field names (e.g. "Generation") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Generation") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ComposeRequestSourceObjects) MarshalJSON() ([]byte, error) { + type NoMethod ComposeRequestSourceObjects + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ComposeRequestSourceObjectsObjectPreconditions: Conditions that must be met +// for this operation to execute. +type ComposeRequestSourceObjectsObjectPreconditions struct { + // IfGenerationMatch: Only perform the composition if the generation of the + // source object that would be used matches this value. If this value and a + // generation are both specified, they must be the same value or the call will + // fail. + IfGenerationMatch int64 `json:"ifGenerationMatch,omitempty,string"` + // ForceSendFields is a list of field names (e.g. "IfGenerationMatch") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "IfGenerationMatch") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ComposeRequestSourceObjectsObjectPreconditions) MarshalJSON() ([]byte, error) { + type NoMethod ComposeRequestSourceObjectsObjectPreconditions + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// Expr: Represents an expression text. Example: title: "User account presence" +// description: "Determines whether the request has a user account" expression: +// "size(request.user) > 0" +type Expr struct { + // Description: An optional description of the expression. This is a longer + // text which describes the expression, e.g. when hovered over it in a UI. + Description string `json:"description,omitempty"` + // Expression: Textual representation of an expression in Common Expression + // Language syntax. The application context of the containing message + // determines which well-known feature set of CEL is supported. + Expression string `json:"expression,omitempty"` + // Location: An optional string indicating the location of the expression for + // error reporting, e.g. a file name and a position in the file. + Location string `json:"location,omitempty"` + // Title: An optional title for the expression, i.e. a short string describing + // its purpose. This can be used e.g. in UIs which allow to enter the + // expression. + Title string `json:"title,omitempty"` + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Description") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s Expr) MarshalJSON() ([]byte, error) { + type NoMethod Expr + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// Folder: A folder. Only available in buckets with hierarchical namespace +// enabled. +type Folder struct { + // Bucket: The name of the bucket containing this folder. + Bucket string `json:"bucket,omitempty"` + // CreateTime: The creation time of the folder in RFC 3339 format. + CreateTime string `json:"createTime,omitempty"` + // Id: The ID of the folder, including the bucket name, folder name. + Id string `json:"id,omitempty"` + // Kind: The kind of item this is. For folders, this is always storage#folder. + Kind string `json:"kind,omitempty"` + // Metageneration: The version of the metadata for this folder. Used for + // preconditions and for detecting changes in metadata. + Metageneration int64 `json:"metageneration,omitempty,string"` + // Name: The name of the folder. Required if not specified by URL parameter. + Name string `json:"name,omitempty"` + // PendingRenameInfo: Only present if the folder is part of an ongoing rename + // folder operation. Contains information which can be used to query the + // operation status. + PendingRenameInfo *FolderPendingRenameInfo `json:"pendingRenameInfo,omitempty"` + // SelfLink: The link to this folder. + SelfLink string `json:"selfLink,omitempty"` + // UpdateTime: The modification time of the folder metadata in RFC 3339 format. + UpdateTime string `json:"updateTime,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Bucket") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Bucket") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s Folder) MarshalJSON() ([]byte, error) { + type NoMethod Folder + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// FolderPendingRenameInfo: Only present if the folder is part of an ongoing +// rename folder operation. Contains information which can be used to query the +// operation status. +type FolderPendingRenameInfo struct { + // OperationId: The ID of the rename folder operation. + OperationId string `json:"operationId,omitempty"` + // ForceSendFields is a list of field names (e.g. "OperationId") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "OperationId") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s FolderPendingRenameInfo) MarshalJSON() ([]byte, error) { + type NoMethod FolderPendingRenameInfo + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// Folders: A list of folders. +type Folders struct { + // Items: The list of items. + Items []*Folder `json:"items,omitempty"` + // Kind: The kind of item this is. For lists of folders, this is always + // storage#folders. + Kind string `json:"kind,omitempty"` + // NextPageToken: The continuation token, used to page through large result + // sets. Provide this value in a subsequent request to return the next page of + // results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Items") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s Folders) MarshalJSON() ([]byte, error) { + type NoMethod Folders + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// GoogleLongrunningListOperationsResponse: The response message for +// storage.buckets.operations.list. +type GoogleLongrunningListOperationsResponse struct { + // Kind: The kind of item this is. For lists of operations, this is always + // storage#operations. + Kind string `json:"kind,omitempty"` + // NextPageToken: The continuation token, used to page through large result + // sets. Provide this value in a subsequent request to return the next page of + // results. + NextPageToken string `json:"nextPageToken,omitempty"` + // Operations: A list of operations that matches the specified filter in the + // request. + Operations []*GoogleLongrunningOperation `json:"operations,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Kind") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Kind") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleLongrunningListOperationsResponse) MarshalJSON() ([]byte, error) { + type NoMethod GoogleLongrunningListOperationsResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// GoogleLongrunningOperation: This resource represents a long-running +// operation that is the result of a network API call. +type GoogleLongrunningOperation struct { + // Done: If the value is "false", it means the operation is still in progress. + // If "true", the operation is completed, and either "error" or "response" is + // available. + Done bool `json:"done,omitempty"` + // Error: The error result of the operation in case of failure or cancellation. + Error *GoogleRpcStatus `json:"error,omitempty"` + // Kind: The kind of item this is. For operations, this is always + // storage#operation. + Kind string `json:"kind,omitempty"` + // Metadata: Service-specific metadata associated with the operation. It + // typically contains progress information and common metadata such as create + // time. Some services might not provide such metadata. Any method that returns + // a long-running operation should document the metadata type, if any. + Metadata googleapi.RawMessage `json:"metadata,omitempty"` + // Name: The server-assigned name, which is only unique within the same service + // that originally returns it. If you use the default HTTP mapping, the "name" + // should be a resource name ending with "operations/{operationId}". + Name string `json:"name,omitempty"` + // Response: The normal response of the operation in case of success. If the + // original method returns no data on success, such as "Delete", the response + // is google.protobuf.Empty. If the original method is standard + // Get/Create/Update, the response should be the resource. For other methods, + // the response should have the type "XxxResponse", where "Xxx" is the original + // method name. For example, if the original method name is "TakeSnapshot()", + // the inferred response type is "TakeSnapshotResponse". + Response googleapi.RawMessage `json:"response,omitempty"` + // SelfLink: The link to this long running operation. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Done") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Done") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleLongrunningOperation) MarshalJSON() ([]byte, error) { + type NoMethod GoogleLongrunningOperation + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// GoogleRpcStatus: The "Status" type defines a logical error model that is +// suitable for different programming environments, including REST APIs and RPC +// APIs. It is used by gRPC (https://github.com/grpc). Each "Status" message +// contains three pieces of data: error code, error message, and error details. +// You can find out more about this error model and how to work with it in the +// API Design Guide (https://cloud.google.com/apis/design/errors). +type GoogleRpcStatus struct { + // Code: The status code, which should be an enum value of google.rpc.Code. + Code int64 `json:"code,omitempty"` + // Details: A list of messages that carry the error details. There is a common + // set of message types for APIs to use. + Details []googleapi.RawMessage `json:"details,omitempty"` + // Message: A developer-facing error message, which should be in English. + Message string `json:"message,omitempty"` + // ForceSendFields is a list of field names (e.g. "Code") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Code") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s GoogleRpcStatus) MarshalJSON() ([]byte, error) { + type NoMethod GoogleRpcStatus + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// HmacKey: JSON template to produce a JSON-style HMAC Key resource for Create +// responses. +type HmacKey struct { + // Kind: The kind of item this is. For HMAC keys, this is always + // storage#hmacKey. + Kind string `json:"kind,omitempty"` + // Metadata: Key metadata. + Metadata *HmacKeyMetadata `json:"metadata,omitempty"` + // Secret: HMAC secret key material. + Secret string `json:"secret,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Kind") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Kind") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s HmacKey) MarshalJSON() ([]byte, error) { + type NoMethod HmacKey + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// HmacKeyMetadata: JSON template to produce a JSON-style HMAC Key metadata +// resource. +type HmacKeyMetadata struct { + // AccessId: The ID of the HMAC Key. + AccessId string `json:"accessId,omitempty"` + // Etag: HTTP 1.1 Entity tag for the HMAC key. + Etag string `json:"etag,omitempty"` + // Id: The ID of the HMAC key, including the Project ID and the Access ID. + Id string `json:"id,omitempty"` + // Kind: The kind of item this is. For HMAC Key metadata, this is always + // storage#hmacKeyMetadata. + Kind string `json:"kind,omitempty"` + // ProjectId: Project ID owning the service account to which the key + // authenticates. + ProjectId string `json:"projectId,omitempty"` + // SelfLink: The link to this resource. + SelfLink string `json:"selfLink,omitempty"` + // ServiceAccountEmail: The email address of the key's associated service + // account. + ServiceAccountEmail string `json:"serviceAccountEmail,omitempty"` + // State: The state of the key. Can be one of ACTIVE, INACTIVE, or DELETED. + State string `json:"state,omitempty"` + // TimeCreated: The creation time of the HMAC key in RFC 3339 format. + TimeCreated string `json:"timeCreated,omitempty"` + // Updated: The last modification time of the HMAC key metadata in RFC 3339 + // format. + Updated string `json:"updated,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "AccessId") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "AccessId") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s HmacKeyMetadata) MarshalJSON() ([]byte, error) { + type NoMethod HmacKeyMetadata + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// HmacKeysMetadata: A list of hmacKeys. +type HmacKeysMetadata struct { + // Items: The list of items. + Items []*HmacKeyMetadata `json:"items,omitempty"` + // Kind: The kind of item this is. For lists of hmacKeys, this is always + // storage#hmacKeysMetadata. + Kind string `json:"kind,omitempty"` + // NextPageToken: The continuation token, used to page through large result + // sets. Provide this value in a subsequent request to return the next page of + // results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Items") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s HmacKeysMetadata) MarshalJSON() ([]byte, error) { + type NoMethod HmacKeysMetadata + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ManagedFolder: A managed folder. +type ManagedFolder struct { + // Bucket: The name of the bucket containing this managed folder. + Bucket string `json:"bucket,omitempty"` + // CreateTime: The creation time of the managed folder in RFC 3339 format. + CreateTime string `json:"createTime,omitempty"` + // Id: The ID of the managed folder, including the bucket name and managed + // folder name. + Id string `json:"id,omitempty"` + // Kind: The kind of item this is. For managed folders, this is always + // storage#managedFolder. + Kind string `json:"kind,omitempty"` + // Metageneration: The version of the metadata for this managed folder. Used + // for preconditions and for detecting changes in metadata. + Metageneration int64 `json:"metageneration,omitempty,string"` + // Name: The name of the managed folder. Required if not specified by URL + // parameter. + Name string `json:"name,omitempty"` + // SelfLink: The link to this managed folder. + SelfLink string `json:"selfLink,omitempty"` + // UpdateTime: The last update time of the managed folder metadata in RFC 3339 + // format. + UpdateTime string `json:"updateTime,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Bucket") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Bucket") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ManagedFolder) MarshalJSON() ([]byte, error) { + type NoMethod ManagedFolder + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ManagedFolders: A list of managed folders. +type ManagedFolders struct { + // Items: The list of items. + Items []*ManagedFolder `json:"items,omitempty"` + // Kind: The kind of item this is. For lists of managed folders, this is always + // storage#managedFolders. + Kind string `json:"kind,omitempty"` + // NextPageToken: The continuation token, used to page through large result + // sets. Provide this value in a subsequent request to return the next page of + // results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Items") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ManagedFolders) MarshalJSON() ([]byte, error) { + type NoMethod ManagedFolders + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// Notification: A subscription to receive Google PubSub notifications. +type Notification struct { + // CustomAttributes: An optional list of additional attributes to attach to + // each Cloud PubSub message published for this notification subscription. + CustomAttributes map[string]string `json:"custom_attributes,omitempty"` + // Etag: HTTP 1.1 Entity tag for this subscription notification. + Etag string `json:"etag,omitempty"` + // EventTypes: If present, only send notifications about listed event types. If + // empty, sent notifications for all event types. + EventTypes []string `json:"event_types,omitempty"` + // Id: The ID of the notification. + Id string `json:"id,omitempty"` + // Kind: The kind of item this is. For notifications, this is always + // storage#notification. + Kind string `json:"kind,omitempty"` + // ObjectNamePrefix: If present, only apply this notification configuration to + // object names that begin with this prefix. + ObjectNamePrefix string `json:"object_name_prefix,omitempty"` + // PayloadFormat: The desired content of the Payload. + PayloadFormat string `json:"payload_format,omitempty"` + // SelfLink: The canonical URL of this notification. + SelfLink string `json:"selfLink,omitempty"` + // Topic: The Cloud PubSub topic to which this subscription publishes. + // Formatted as: + // '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}' + Topic string `json:"topic,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "CustomAttributes") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "CustomAttributes") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s Notification) MarshalJSON() ([]byte, error) { + type NoMethod Notification + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// Notifications: A list of notification subscriptions. +type Notifications struct { + // Items: The list of items. + Items []*Notification `json:"items,omitempty"` + // Kind: The kind of item this is. For lists of notifications, this is always + // storage#notifications. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Items") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s Notifications) MarshalJSON() ([]byte, error) { + type NoMethod Notifications + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// Object: An object. +type Object struct { + // Acl: Access controls on the object. + Acl []*ObjectAccessControl `json:"acl,omitempty"` + // Bucket: The name of the bucket containing this object. + Bucket string `json:"bucket,omitempty"` + // CacheControl: Cache-Control directive for the object data. If omitted, and + // the object is accessible to all anonymous users, the default will be public, + // max-age=3600. + CacheControl string `json:"cacheControl,omitempty"` + // ComponentCount: Number of underlying components that make up this object. + // Components are accumulated by compose operations. + ComponentCount int64 `json:"componentCount,omitempty"` + // ContentDisposition: Content-Disposition of the object data. + ContentDisposition string `json:"contentDisposition,omitempty"` + // ContentEncoding: Content-Encoding of the object data. + ContentEncoding string `json:"contentEncoding,omitempty"` + // ContentLanguage: Content-Language of the object data. + ContentLanguage string `json:"contentLanguage,omitempty"` + // ContentType: Content-Type of the object data. If an object is stored without + // a Content-Type, it is served as application/octet-stream. + ContentType string `json:"contentType,omitempty"` + // Crc32c: CRC32c checksum, as described in RFC 4960, Appendix B; encoded using + // base64 in big-endian byte order. For more information about using the CRC32c + // checksum, see Data Validation and Change Detection + // (https://cloud.google.com/storage/docs/data-validation). + Crc32c string `json:"crc32c,omitempty"` + // CustomTime: A timestamp in RFC 3339 format specified by the user for an + // object. + CustomTime string `json:"customTime,omitempty"` + // CustomerEncryption: Metadata of customer-supplied encryption key, if the + // object is encrypted by such a key. + CustomerEncryption *ObjectCustomerEncryption `json:"customerEncryption,omitempty"` + // Etag: HTTP 1.1 Entity tag for the object. + Etag string `json:"etag,omitempty"` + // EventBasedHold: Whether an object is under event-based hold. Event-based + // hold is a way to retain objects until an event occurs, which is signified by + // the hold's release (i.e. this value is set to false). After being released + // (set to false), such objects will be subject to bucket-level retention (if + // any). One sample use case of this flag is for banks to hold loan documents + // for at least 3 years after loan is paid in full. Here, bucket-level + // retention is 3 years and the event is the loan being paid in full. In this + // example, these objects will be held intact for any number of years until the + // event has occurred (event-based hold on the object is released) and then 3 + // more years after that. That means retention duration of the objects begins + // from the moment event-based hold transitioned from true to false. + EventBasedHold bool `json:"eventBasedHold,omitempty"` + // Generation: The content generation of this object. Used for object + // versioning. + Generation int64 `json:"generation,omitempty,string"` + // HardDeleteTime: This is the time (in the future) when the soft-deleted + // object will no longer be restorable. It is equal to the soft delete time + // plus the current soft delete retention duration of the bucket. + HardDeleteTime string `json:"hardDeleteTime,omitempty"` + // Id: The ID of the object, including the bucket name, object name, and + // generation number. + Id string `json:"id,omitempty"` + // Kind: The kind of item this is. For objects, this is always storage#object. + Kind string `json:"kind,omitempty"` + // KmsKeyName: Not currently supported. Specifying the parameter causes the + // request to fail with status code 400 - Bad Request. + KmsKeyName string `json:"kmsKeyName,omitempty"` + // Md5Hash: MD5 hash of the data; encoded using base64. For more information + // about using the MD5 hash, see Data Validation and Change Detection + // (https://cloud.google.com/storage/docs/data-validation). + Md5Hash string `json:"md5Hash,omitempty"` + // MediaLink: Media download link. + MediaLink string `json:"mediaLink,omitempty"` + // Metadata: User-provided metadata, in key/value pairs. + Metadata map[string]string `json:"metadata,omitempty"` + // Metageneration: The version of the metadata for this object at this + // generation. Used for preconditions and for detecting changes in metadata. A + // metageneration number is only meaningful in the context of a particular + // generation of a particular object. + Metageneration int64 `json:"metageneration,omitempty,string"` + // Name: The name of the object. Required if not specified by URL parameter. + Name string `json:"name,omitempty"` + // Owner: The owner of the object. This will always be the uploader of the + // object. + Owner *ObjectOwner `json:"owner,omitempty"` + // RestoreToken: Restore token used to differentiate deleted objects with the + // same name and generation. This field is only returned for deleted objects in + // hierarchical namespace buckets. + RestoreToken string `json:"restoreToken,omitempty"` + // Retention: A collection of object level retention parameters. + Retention *ObjectRetention `json:"retention,omitempty"` + // RetentionExpirationTime: A server-determined value that specifies the + // earliest time that the object's retention period expires. This value is in + // RFC 3339 format. Note 1: This field is not provided for objects with an + // active event-based hold, since retention expiration is unknown until the + // hold is removed. Note 2: This value can be provided even when temporary hold + // is set (so that the user can reason about policy without having to first + // unset the temporary hold). + RetentionExpirationTime string `json:"retentionExpirationTime,omitempty"` + // SelfLink: The link to this object. + SelfLink string `json:"selfLink,omitempty"` + // Size: Content-Length of the data in bytes. + Size uint64 `json:"size,omitempty,string"` + // SoftDeleteTime: The time at which the object became soft-deleted in RFC 3339 + // format. + SoftDeleteTime string `json:"softDeleteTime,omitempty"` + // StorageClass: Storage class of the object. + StorageClass string `json:"storageClass,omitempty"` + // TemporaryHold: Whether an object is under temporary hold. While this flag is + // set to true, the object is protected against deletion and overwrites. A + // common use case of this flag is regulatory investigations where objects need + // to be retained while the investigation is ongoing. Note that unlike + // event-based hold, temporary hold does not impact retention expiration time + // of an object. + TemporaryHold bool `json:"temporaryHold,omitempty"` + // TimeCreated: The creation time of the object in RFC 3339 format. + TimeCreated string `json:"timeCreated,omitempty"` + // TimeDeleted: The time at which the object became noncurrent in RFC 3339 + // format. Will be returned if and only if this version of the object has been + // deleted. + TimeDeleted string `json:"timeDeleted,omitempty"` + // TimeStorageClassUpdated: The time at which the object's storage class was + // last changed. When the object is initially created, it will be set to + // timeCreated. + TimeStorageClassUpdated string `json:"timeStorageClassUpdated,omitempty"` + // Updated: The modification time of the object metadata in RFC 3339 format. + // Set initially to object creation time and then updated whenever any metadata + // of the object changes. This includes changes made by a requester, such as + // modifying custom metadata, as well as changes made by Cloud Storage on + // behalf of a requester, such as changing the storage class based on an Object + // Lifecycle Configuration. + Updated string `json:"updated,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Acl") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Acl") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s Object) MarshalJSON() ([]byte, error) { + type NoMethod Object + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ObjectCustomerEncryption: Metadata of customer-supplied encryption key, if +// the object is encrypted by such a key. +type ObjectCustomerEncryption struct { + // EncryptionAlgorithm: The encryption algorithm. + EncryptionAlgorithm string `json:"encryptionAlgorithm,omitempty"` + // KeySha256: SHA256 hash value of the encryption key. + KeySha256 string `json:"keySha256,omitempty"` + // ForceSendFields is a list of field names (e.g. "EncryptionAlgorithm") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "EncryptionAlgorithm") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ObjectCustomerEncryption) MarshalJSON() ([]byte, error) { + type NoMethod ObjectCustomerEncryption + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ObjectOwner: The owner of the object. This will always be the uploader of +// the object. +type ObjectOwner struct { + // Entity: The entity, in the form user-userId. + Entity string `json:"entity,omitempty"` + // EntityId: The ID for the entity. + EntityId string `json:"entityId,omitempty"` + // ForceSendFields is a list of field names (e.g. "Entity") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Entity") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ObjectOwner) MarshalJSON() ([]byte, error) { + type NoMethod ObjectOwner + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ObjectRetention: A collection of object level retention parameters. +type ObjectRetention struct { + // Mode: The bucket's object retention mode, can only be Unlocked or Locked. + Mode string `json:"mode,omitempty"` + // RetainUntilTime: A time in RFC 3339 format until which object retention + // protects this object. + RetainUntilTime string `json:"retainUntilTime,omitempty"` + // ForceSendFields is a list of field names (e.g. "Mode") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Mode") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ObjectRetention) MarshalJSON() ([]byte, error) { + type NoMethod ObjectRetention + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ObjectAccessControl: An access-control entry. +type ObjectAccessControl struct { + // Bucket: The name of the bucket. + Bucket string `json:"bucket,omitempty"` + // Domain: The domain associated with the entity, if any. + Domain string `json:"domain,omitempty"` + // Email: The email address associated with the entity, if any. + Email string `json:"email,omitempty"` + // Entity: The entity holding the permission, in one of the following forms: + // - user-userId + // - user-email + // - group-groupId + // - group-email + // - domain-domain + // - project-team-projectId + // - allUsers + // - allAuthenticatedUsers Examples: + // - The user liz@example.com would be user-liz@example.com. + // - The group example@googlegroups.com would be + // group-example@googlegroups.com. + // - To refer to all members of the Google Apps for Business domain + // example.com, the entity would be domain-example.com. + Entity string `json:"entity,omitempty"` + // EntityId: The ID for the entity, if any. + EntityId string `json:"entityId,omitempty"` + // Etag: HTTP 1.1 Entity tag for the access-control entry. + Etag string `json:"etag,omitempty"` + // Generation: The content generation of the object, if applied to an object. + Generation int64 `json:"generation,omitempty,string"` + // Id: The ID of the access-control entry. + Id string `json:"id,omitempty"` + // Kind: The kind of item this is. For object access control entries, this is + // always storage#objectAccessControl. + Kind string `json:"kind,omitempty"` + // Object: The name of the object, if applied to an object. + Object string `json:"object,omitempty"` + // ProjectTeam: The project team associated with the entity, if any. + ProjectTeam *ObjectAccessControlProjectTeam `json:"projectTeam,omitempty"` + // Role: The access permission for the entity. + Role string `json:"role,omitempty"` + // SelfLink: The link to this access-control entry. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Bucket") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Bucket") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ObjectAccessControl) MarshalJSON() ([]byte, error) { + type NoMethod ObjectAccessControl + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ObjectAccessControlProjectTeam: The project team associated with the entity, +// if any. +type ObjectAccessControlProjectTeam struct { + // ProjectNumber: The project number. + ProjectNumber string `json:"projectNumber,omitempty"` + // Team: The team. + Team string `json:"team,omitempty"` + // ForceSendFields is a list of field names (e.g. "ProjectNumber") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "ProjectNumber") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ObjectAccessControlProjectTeam) MarshalJSON() ([]byte, error) { + type NoMethod ObjectAccessControlProjectTeam + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ObjectAccessControls: An access-control list. +type ObjectAccessControls struct { + // Items: The list of items. + Items []*ObjectAccessControl `json:"items,omitempty"` + // Kind: The kind of item this is. For lists of object access control entries, + // this is always storage#objectAccessControls. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Items") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ObjectAccessControls) MarshalJSON() ([]byte, error) { + type NoMethod ObjectAccessControls + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// Objects: A list of objects. +type Objects struct { + // Items: The list of items. + Items []*Object `json:"items,omitempty"` + // Kind: The kind of item this is. For lists of objects, this is always + // storage#objects. + Kind string `json:"kind,omitempty"` + // NextPageToken: The continuation token, used to page through large result + // sets. Provide this value in a subsequent request to return the next page of + // results. + NextPageToken string `json:"nextPageToken,omitempty"` + // Prefixes: The list of prefixes of objects matching-but-not-listed up to and + // including the requested delimiter. + Prefixes []string `json:"prefixes,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Items") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s Objects) MarshalJSON() ([]byte, error) { + type NoMethod Objects + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// Policy: A bucket/object/managedFolder IAM policy. +type Policy struct { + // Bindings: An association between a role, which comes with a set of + // permissions, and members who may assume that role. + Bindings []*PolicyBindings `json:"bindings,omitempty"` + // Etag: HTTP 1.1 Entity tag for the policy. + Etag string `json:"etag,omitempty"` + // Kind: The kind of item this is. For policies, this is always storage#policy. + // This field is ignored on input. + Kind string `json:"kind,omitempty"` + // ResourceId: The ID of the resource to which this policy belongs. Will be of + // the form projects/_/buckets/bucket for buckets, + // projects/_/buckets/bucket/objects/object for objects, and + // projects/_/buckets/bucket/managedFolders/managedFolder. A specific + // generation may be specified by appending #generationNumber to the end of the + // object name, e.g. projects/_/buckets/my-bucket/objects/data.txt#17. The + // current generation can be denoted with #0. This field is ignored on input. + ResourceId string `json:"resourceId,omitempty"` + // Version: The IAM policy format version. + Version int64 `json:"version,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Bindings") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Bindings") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s Policy) MarshalJSON() ([]byte, error) { + type NoMethod Policy + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type PolicyBindings struct { + // Condition: The condition that is associated with this binding. NOTE: an + // unsatisfied condition will not allow user access via current binding. + // Different bindings, including their conditions, are examined independently. + Condition *Expr `json:"condition,omitempty"` + // Members: A collection of identifiers for members who may assume the provided + // role. Recognized identifiers are as follows: + // - allUsers — A special identifier that represents anyone on the internet; + // with or without a Google account. + // - allAuthenticatedUsers — A special identifier that represents anyone who + // is authenticated with a Google account or a service account. + // - user:emailid — An email address that represents a specific account. For + // example, user:alice@gmail.com or user:joe@example.com. + // - serviceAccount:emailid — An email address that represents a service + // account. For example, + // serviceAccount:my-other-app@appspot.gserviceaccount.com . + // - group:emailid — An email address that represents a Google group. For + // example, group:admins@example.com. + // - domain:domain — A Google Apps domain name that represents all the users + // of that domain. For example, domain:google.com or domain:example.com. + // - projectOwner:projectid — Owners of the given project. For example, + // projectOwner:my-example-project + // - projectEditor:projectid — Editors of the given project. For example, + // projectEditor:my-example-project + // - projectViewer:projectid — Viewers of the given project. For example, + // projectViewer:my-example-project + Members []string `json:"members,omitempty"` + // Role: The role to which members belong. Two types of roles are supported: + // new IAM roles, which grant permissions that do not map directly to those + // provided by ACLs, and legacy IAM roles, which do map directly to ACL + // permissions. All roles are of the format roles/storage.specificRole. + // The new IAM roles are: + // - roles/storage.admin — Full control of Google Cloud Storage resources. + // + // - roles/storage.objectViewer — Read-Only access to Google Cloud Storage + // objects. + // - roles/storage.objectCreator — Access to create objects in Google Cloud + // Storage. + // - roles/storage.objectAdmin — Full control of Google Cloud Storage + // objects. The legacy IAM roles are: + // - roles/storage.legacyObjectReader — Read-only access to objects without + // listing. Equivalent to an ACL entry on an object with the READER role. + // - roles/storage.legacyObjectOwner — Read/write access to existing objects + // without listing. Equivalent to an ACL entry on an object with the OWNER + // role. + // - roles/storage.legacyBucketReader — Read access to buckets with object + // listing. Equivalent to an ACL entry on a bucket with the READER role. + // - roles/storage.legacyBucketWriter — Read access to buckets with object + // listing/creation/deletion. Equivalent to an ACL entry on a bucket with the + // WRITER role. + // - roles/storage.legacyBucketOwner — Read and write access to existing + // buckets with object listing/creation/deletion. Equivalent to an ACL entry on + // a bucket with the OWNER role. + Role string `json:"role,omitempty"` + // ForceSendFields is a list of field names (e.g. "Condition") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Condition") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s PolicyBindings) MarshalJSON() ([]byte, error) { + type NoMethod PolicyBindings + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// RewriteResponse: A rewrite response. +type RewriteResponse struct { + // Done: true if the copy is finished; otherwise, false if the copy is in + // progress. This property is always present in the response. + Done bool `json:"done,omitempty"` + // Kind: The kind of item this is. + Kind string `json:"kind,omitempty"` + // ObjectSize: The total size of the object being copied in bytes. This + // property is always present in the response. + ObjectSize int64 `json:"objectSize,omitempty,string"` + // Resource: A resource containing the metadata for the copied-to object. This + // property is present in the response only when copying completes. + Resource *Object `json:"resource,omitempty"` + // RewriteToken: A token to use in subsequent requests to continue copying + // data. This token is present in the response only when there is more data to + // copy. + RewriteToken string `json:"rewriteToken,omitempty"` + // TotalBytesRewritten: The total bytes written so far, which can be used to + // provide a waiting user with a progress indicator. This property is always + // present in the response. + TotalBytesRewritten int64 `json:"totalBytesRewritten,omitempty,string"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Done") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Done") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s RewriteResponse) MarshalJSON() ([]byte, error) { + type NoMethod RewriteResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// ServiceAccount: A subscription to receive Google PubSub notifications. +type ServiceAccount struct { + // EmailAddress: The ID of the notification. + EmailAddress string `json:"email_address,omitempty"` + // Kind: The kind of item this is. For notifications, this is always + // storage#notification. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "EmailAddress") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "EmailAddress") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ServiceAccount) MarshalJSON() ([]byte, error) { + type NoMethod ServiceAccount + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// TestIamPermissionsResponse: A +// storage.(buckets|objects|managedFolders).testIamPermissions response. +type TestIamPermissionsResponse struct { + // Kind: The kind of item this is. + Kind string `json:"kind,omitempty"` + // Permissions: The permissions held by the caller. Permissions are always of + // the format storage.resource.capability, where resource is one of buckets, + // objects, or managedFolders. The supported permissions are as follows: + // - storage.buckets.delete — Delete bucket. + // - storage.buckets.get — Read bucket metadata. + // - storage.buckets.getIamPolicy — Read bucket IAM policy. + // - storage.buckets.create — Create bucket. + // - storage.buckets.list — List buckets. + // - storage.buckets.setIamPolicy — Update bucket IAM policy. + // - storage.buckets.update — Update bucket metadata. + // - storage.objects.delete — Delete object. + // - storage.objects.get — Read object data and metadata. + // - storage.objects.getIamPolicy — Read object IAM policy. + // - storage.objects.create — Create object. + // - storage.objects.list — List objects. + // - storage.objects.setIamPolicy — Update object IAM policy. + // - storage.objects.update — Update object metadata. + // - storage.managedFolders.delete — Delete managed folder. + // - storage.managedFolders.get — Read managed folder metadata. + // - storage.managedFolders.getIamPolicy — Read managed folder IAM policy. + // + // - storage.managedFolders.create — Create managed folder. + // - storage.managedFolders.list — List managed folders. + // - storage.managedFolders.setIamPolicy — Update managed folder IAM policy. + Permissions []string `json:"permissions,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Kind") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Kind") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { + type NoMethod TestIamPermissionsResponse + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type AnywhereCachesDisableCall struct { + s *Service + bucket string + anywhereCacheId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Disable: Disables an Anywhere Cache instance. +// +// - anywhereCacheId: The ID of requested Anywhere Cache instance. +// - bucket: Name of the parent bucket. +func (r *AnywhereCachesService) Disable(bucket string, anywhereCacheId string) *AnywhereCachesDisableCall { + c := &AnywhereCachesDisableCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.anywhereCacheId = anywhereCacheId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *AnywhereCachesDisableCall) Fields(s ...googleapi.Field) *AnywhereCachesDisableCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *AnywhereCachesDisableCall) Context(ctx context.Context) *AnywhereCachesDisableCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *AnywhereCachesDisableCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AnywhereCachesDisableCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}/disable") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "anywhereCacheId": c.anywhereCacheId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.anywhereCaches.disable" call. +// Any non-2xx status code is an error. Response headers are in either +// *AnywhereCache.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *AnywhereCachesDisableCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &AnywhereCache{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type AnywhereCachesGetCall struct { + s *Service + bucket string + anywhereCacheId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the metadata of an Anywhere Cache instance. +// +// - anywhereCacheId: The ID of requested Anywhere Cache instance. +// - bucket: Name of the parent bucket. +func (r *AnywhereCachesService) Get(bucket string, anywhereCacheId string) *AnywhereCachesGetCall { + c := &AnywhereCachesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.anywhereCacheId = anywhereCacheId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *AnywhereCachesGetCall) Fields(s ...googleapi.Field) *AnywhereCachesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *AnywhereCachesGetCall) IfNoneMatch(entityTag string) *AnywhereCachesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *AnywhereCachesGetCall) Context(ctx context.Context) *AnywhereCachesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *AnywhereCachesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AnywhereCachesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "anywhereCacheId": c.anywhereCacheId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.anywhereCaches.get" call. +// Any non-2xx status code is an error. Response headers are in either +// *AnywhereCache.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *AnywhereCachesGetCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &AnywhereCache{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type AnywhereCachesInsertCall struct { + s *Service + bucket string + anywherecache *AnywhereCache + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates an Anywhere Cache instance. +// +// - bucket: Name of the parent bucket. +func (r *AnywhereCachesService) Insert(bucket string, anywherecache *AnywhereCache) *AnywhereCachesInsertCall { + c := &AnywhereCachesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.anywherecache = anywherecache + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *AnywhereCachesInsertCall) Fields(s ...googleapi.Field) *AnywhereCachesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *AnywhereCachesInsertCall) Context(ctx context.Context) *AnywhereCachesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *AnywhereCachesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AnywhereCachesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.anywherecache) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.anywhereCaches.insert" call. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AnywhereCachesInsertCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &GoogleLongrunningOperation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type AnywhereCachesListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Returns a list of Anywhere Cache instances of the bucket matching the +// criteria. +// +// - bucket: Name of the parent bucket. +func (r *AnywhereCachesService) List(bucket string) *AnywhereCachesListCall { + c := &AnywhereCachesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum number of items to +// return in a single page of responses. Maximum 1000. +func (c *AnywhereCachesListCall) PageSize(pageSize int64) *AnywhereCachesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A previously-returned +// page token representing part of the larger set of results to view. +func (c *AnywhereCachesListCall) PageToken(pageToken string) *AnywhereCachesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *AnywhereCachesListCall) Fields(s ...googleapi.Field) *AnywhereCachesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *AnywhereCachesListCall) IfNoneMatch(entityTag string) *AnywhereCachesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *AnywhereCachesListCall) Context(ctx context.Context) *AnywhereCachesListCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *AnywhereCachesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AnywhereCachesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.anywhereCaches.list" call. +// Any non-2xx status code is an error. Response headers are in either +// *AnywhereCaches.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *AnywhereCachesListCall) Do(opts ...googleapi.CallOption) (*AnywhereCaches, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &AnywhereCaches{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AnywhereCachesListCall) Pages(ctx context.Context, f func(*AnywhereCaches) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type AnywhereCachesPauseCall struct { + s *Service + bucket string + anywhereCacheId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Pause: Pauses an Anywhere Cache instance. +// +// - anywhereCacheId: The ID of requested Anywhere Cache instance. +// - bucket: Name of the parent bucket. +func (r *AnywhereCachesService) Pause(bucket string, anywhereCacheId string) *AnywhereCachesPauseCall { + c := &AnywhereCachesPauseCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.anywhereCacheId = anywhereCacheId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *AnywhereCachesPauseCall) Fields(s ...googleapi.Field) *AnywhereCachesPauseCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *AnywhereCachesPauseCall) Context(ctx context.Context) *AnywhereCachesPauseCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *AnywhereCachesPauseCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AnywhereCachesPauseCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}/pause") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "anywhereCacheId": c.anywhereCacheId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.anywhereCaches.pause" call. +// Any non-2xx status code is an error. Response headers are in either +// *AnywhereCache.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *AnywhereCachesPauseCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &AnywhereCache{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type AnywhereCachesResumeCall struct { + s *Service + bucket string + anywhereCacheId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Resume: Resumes a paused or disabled Anywhere Cache instance. +// +// - anywhereCacheId: The ID of requested Anywhere Cache instance. +// - bucket: Name of the parent bucket. +func (r *AnywhereCachesService) Resume(bucket string, anywhereCacheId string) *AnywhereCachesResumeCall { + c := &AnywhereCachesResumeCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.anywhereCacheId = anywhereCacheId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *AnywhereCachesResumeCall) Fields(s ...googleapi.Field) *AnywhereCachesResumeCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *AnywhereCachesResumeCall) Context(ctx context.Context) *AnywhereCachesResumeCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *AnywhereCachesResumeCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AnywhereCachesResumeCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}/resume") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "anywhereCacheId": c.anywhereCacheId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.anywhereCaches.resume" call. +// Any non-2xx status code is an error. Response headers are in either +// *AnywhereCache.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *AnywhereCachesResumeCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &AnywhereCache{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type AnywhereCachesUpdateCall struct { + s *Service + bucket string + anywhereCacheId string + anywherecache *AnywhereCache + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates the config(ttl and admissionPolicy) of an Anywhere Cache +// instance. +// +// - anywhereCacheId: The ID of requested Anywhere Cache instance. +// - bucket: Name of the parent bucket. +func (r *AnywhereCachesService) Update(bucket string, anywhereCacheId string, anywherecache *AnywhereCache) *AnywhereCachesUpdateCall { + c := &AnywhereCachesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.anywhereCacheId = anywhereCacheId + c.anywherecache = anywherecache + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *AnywhereCachesUpdateCall) Fields(s ...googleapi.Field) *AnywhereCachesUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *AnywhereCachesUpdateCall) Context(ctx context.Context) *AnywhereCachesUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *AnywhereCachesUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AnywhereCachesUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.anywherecache) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "anywhereCacheId": c.anywhereCacheId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.anywhereCaches.update" call. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AnywhereCachesUpdateCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &GoogleLongrunningOperation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type BucketAccessControlsDeleteCall struct { + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Permanently deletes the ACL entry for the specified entity on the +// specified bucket. +// +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +func (r *BucketAccessControlsService) Delete(bucket string, entity string) *BucketAccessControlsDeleteCall { + c := &BucketAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *BucketAccessControlsDeleteCall) UserProject(userProject string) *BucketAccessControlsDeleteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *BucketAccessControlsDeleteCall) Fields(s ...googleapi.Field) *BucketAccessControlsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *BucketAccessControlsDeleteCall) Context(ctx context.Context) *BucketAccessControlsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *BucketAccessControlsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.bucketAccessControls.delete" call. +func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil +} + +type BucketAccessControlsGetCall struct { + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the ACL entry for the specified entity on the specified bucket. +// +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +func (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketAccessControlsGetCall { + c := &BucketAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *BucketAccessControlsGetCall) UserProject(userProject string) *BucketAccessControlsGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *BucketAccessControlsGetCall) Fields(s ...googleapi.Field) *BucketAccessControlsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *BucketAccessControlsGetCall) IfNoneMatch(entityTag string) *BucketAccessControlsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *BucketAccessControlsGetCall) Context(ctx context.Context) *BucketAccessControlsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *BucketAccessControlsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.bucketAccessControls.get" call. +// Any non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &BucketAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type BucketAccessControlsInsertCall struct { + s *Service + bucket string + bucketaccesscontrol *BucketAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a new ACL entry on the specified bucket. +// +// - bucket: Name of a bucket. +func (r *BucketAccessControlsService) Insert(bucket string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsInsertCall { + c := &BucketAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.bucketaccesscontrol = bucketaccesscontrol + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *BucketAccessControlsInsertCall) UserProject(userProject string) *BucketAccessControlsInsertCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *BucketAccessControlsInsertCall) Fields(s ...googleapi.Field) *BucketAccessControlsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *BucketAccessControlsInsertCall) Context(ctx context.Context) *BucketAccessControlsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *BucketAccessControlsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.bucketAccessControls.insert" call. +// Any non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &BucketAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type BucketAccessControlsListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves ACL entries on the specified bucket. +// +// - bucket: Name of a bucket. +func (r *BucketAccessControlsService) List(bucket string) *BucketAccessControlsListCall { + c := &BucketAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *BucketAccessControlsListCall) UserProject(userProject string) *BucketAccessControlsListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *BucketAccessControlsListCall) Fields(s ...googleapi.Field) *BucketAccessControlsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *BucketAccessControlsListCall) IfNoneMatch(entityTag string) *BucketAccessControlsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *BucketAccessControlsListCall) Context(ctx context.Context) *BucketAccessControlsListCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *BucketAccessControlsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.bucketAccessControls.list" call. +// Any non-2xx status code is an error. Response headers are in either +// *BucketAccessControls.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*BucketAccessControls, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &BucketAccessControls{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type BucketAccessControlsPatchCall struct { + s *Service + bucket string + entity string + bucketaccesscontrol *BucketAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches an ACL entry on the specified bucket. +// +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsPatchCall { + c := &BucketAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + c.bucketaccesscontrol = bucketaccesscontrol + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *BucketAccessControlsPatchCall) UserProject(userProject string) *BucketAccessControlsPatchCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *BucketAccessControlsPatchCall) Fields(s ...googleapi.Field) *BucketAccessControlsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *BucketAccessControlsPatchCall) Context(ctx context.Context) *BucketAccessControlsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *BucketAccessControlsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.bucketAccessControls.patch" call. +// Any non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &BucketAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type BucketAccessControlsUpdateCall struct { + s *Service + bucket string + entity string + bucketaccesscontrol *BucketAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates an ACL entry on the specified bucket. +// +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +func (r *BucketAccessControlsService) Update(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsUpdateCall { + c := &BucketAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + c.bucketaccesscontrol = bucketaccesscontrol + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *BucketAccessControlsUpdateCall) UserProject(userProject string) *BucketAccessControlsUpdateCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *BucketAccessControlsUpdateCall) Fields(s ...googleapi.Field) *BucketAccessControlsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *BucketAccessControlsUpdateCall) Context(ctx context.Context) *BucketAccessControlsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *BucketAccessControlsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.bucketAccessControls.update" call. +// Any non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &BucketAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type BucketsDeleteCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes an empty bucket. Deletions are permanent unless soft delete +// is enabled on the bucket. +// +// - bucket: Name of a bucket. +func (r *BucketsService) Delete(bucket string) *BucketsDeleteCall { + c := &BucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// If set, only deletes the bucket if its metageneration matches this value. +func (c *BucketsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsDeleteCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": If set, only deletes the bucket if its +// metageneration does not match this value. +func (c *BucketsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsDeleteCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *BucketsDeleteCall) UserProject(userProject string) *BucketsDeleteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *BucketsDeleteCall) Fields(s ...googleapi.Field) *BucketsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *BucketsDeleteCall) Context(ctx context.Context) *BucketsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *BucketsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.delete" call. +func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil +} + +type BucketsGetCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns metadata for the specified bucket. +// +// - bucket: Name of a bucket. +func (r *BucketsService) Get(bucket string) *BucketsGetCall { + c := &BucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// Generation sets the optional parameter "generation": If present, specifies +// the generation of the bucket. This is required if softDeleted is true. +func (c *BucketsGetCall) Generation(generation int64) *BucketsGetCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// Makes the return of the bucket metadata conditional on whether the bucket's +// current metageneration matches the given value. +func (c *BucketsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsGetCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration does not match +// the given value. +func (c *BucketsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsGetCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Projection sets the optional parameter "projection": Set of properties to +// return. Defaults to noAcl. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall { + c.urlParams_.Set("projection", projection) + return c +} + +// SoftDeleted sets the optional parameter "softDeleted": If true, return the +// soft-deleted version of this bucket. The default is false. For more +// information, see Soft Delete +// (https://cloud.google.com/storage/docs/soft-delete). +func (c *BucketsGetCall) SoftDeleted(softDeleted bool) *BucketsGetCall { + c.urlParams_.Set("softDeleted", fmt.Sprint(softDeleted)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *BucketsGetCall) UserProject(userProject string) *BucketsGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *BucketsGetCall) Fields(s ...googleapi.Field) *BucketsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *BucketsGetCall) IfNoneMatch(entityTag string) *BucketsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *BucketsGetCall) Context(ctx context.Context) *BucketsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *BucketsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.get" call. +// Any non-2xx status code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type BucketsGetIamPolicyCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Returns an IAM policy for the specified bucket. +// +// - bucket: Name of a bucket. +func (r *BucketsService) GetIamPolicy(bucket string) *BucketsGetIamPolicyCall { + c := &BucketsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": The IAM policy format version to be +// returned. If the optionsRequestedPolicyVersion is for an older version that +// doesn't support part of the requested IAM policy, the request fails. +func (c *BucketsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *BucketsGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *BucketsGetIamPolicyCall) UserProject(userProject string) *BucketsGetIamPolicyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *BucketsGetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *BucketsGetIamPolicyCall) IfNoneMatch(entityTag string) *BucketsGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *BucketsGetIamPolicyCall) Context(ctx context.Context) *BucketsGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *BucketsGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.getIamPolicy" call. +// Any non-2xx status code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type BucketsGetStorageLayoutCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetStorageLayout: Returns the storage layout configuration for the specified +// bucket. Note that this operation requires storage.objects.list permission. +// +// - bucket: Name of a bucket. +func (r *BucketsService) GetStorageLayout(bucket string) *BucketsGetStorageLayoutCall { + c := &BucketsGetStorageLayoutCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// Prefix sets the optional parameter "prefix": An optional prefix used for +// permission check. It is useful when the caller only has storage.objects.list +// permission under a specific prefix. +func (c *BucketsGetStorageLayoutCall) Prefix(prefix string) *BucketsGetStorageLayoutCall { + c.urlParams_.Set("prefix", prefix) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *BucketsGetStorageLayoutCall) Fields(s ...googleapi.Field) *BucketsGetStorageLayoutCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *BucketsGetStorageLayoutCall) IfNoneMatch(entityTag string) *BucketsGetStorageLayoutCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *BucketsGetStorageLayoutCall) Context(ctx context.Context) *BucketsGetStorageLayoutCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *BucketsGetStorageLayoutCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsGetStorageLayoutCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/storageLayout") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.getStorageLayout" call. +// Any non-2xx status code is an error. Response headers are in either +// *BucketStorageLayout.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *BucketsGetStorageLayoutCall) Do(opts ...googleapi.CallOption) (*BucketStorageLayout, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &BucketStorageLayout{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type BucketsInsertCall struct { + s *Service + bucket *Bucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a new bucket. +// +// - project: A valid API project identifier. +func (r *BucketsService) Insert(projectid string, bucket *Bucket) *BucketsInsertCall { + c := &BucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.urlParams_.Set("project", projectid) + c.bucket = bucket + return c +} + +// EnableObjectRetention sets the optional parameter "enableObjectRetention": +// When set to true, object retention is enabled for this bucket. +func (c *BucketsInsertCall) EnableObjectRetention(enableObjectRetention bool) *BucketsInsertCall { + c.urlParams_.Set("enableObjectRetention", fmt.Sprint(enableObjectRetention)) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this bucket. +// +// Possible values: +// +// "authenticatedRead" - Project team owners get OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to their +// +// roles. +// +// "publicRead" - Project team owners get OWNER access, and allUsers get +// +// READER access. +// +// "publicReadWrite" - Project team owners get OWNER access, and allUsers get +// +// WRITER access. +func (c *BucketsInsertCall) PredefinedAcl(predefinedAcl string) *BucketsInsertCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// PredefinedDefaultObjectAcl sets the optional parameter +// "predefinedDefaultObjectAcl": Apply a predefined set of default object +// access controls to this bucket. +// +// Possible values: +// +// "authenticatedRead" - Object owner gets OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and project +// +// team owners get OWNER access. +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project team +// +// owners get READER access. +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// +// members get access according to their roles. +// +// "publicRead" - Object owner gets OWNER access, and allUsers get READER +// +// access. +func (c *BucketsInsertCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsInsertCall { + c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of properties to +// return. Defaults to noAcl, unless the bucket resource specifies acl or +// defaultObjectAcl properties, when it defaults to full. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +func (c *BucketsInsertCall) Projection(projection string) *BucketsInsertCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. +func (c *BucketsInsertCall) UserProject(userProject string) *BucketsInsertCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *BucketsInsertCall) Fields(s ...googleapi.Field) *BucketsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *BucketsInsertCall) Context(ctx context.Context) *BucketsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *BucketsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.insert" call. +// Any non-2xx status code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type BucketsListCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of buckets for a given project. +// +// - project: A valid API project identifier. +func (r *BucketsService) List(projectid string) *BucketsListCall { + c := &BucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.urlParams_.Set("project", projectid) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number of +// buckets to return in a single response. The service will use this parameter +// or 1,000 items, whichever is smaller. +func (c *BucketsListCall) MaxResults(maxResults int64) *BucketsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A previously-returned +// page token representing part of the larger set of results to view. +func (c *BucketsListCall) PageToken(pageToken string) *BucketsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Prefix sets the optional parameter "prefix": Filter results to buckets whose +// names begin with this prefix. +func (c *BucketsListCall) Prefix(prefix string) *BucketsListCall { + c.urlParams_.Set("prefix", prefix) + return c +} + +// Projection sets the optional parameter "projection": Set of properties to +// return. Defaults to noAcl. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +func (c *BucketsListCall) Projection(projection string) *BucketsListCall { + c.urlParams_.Set("projection", projection) + return c +} + +// SoftDeleted sets the optional parameter "softDeleted": If true, only +// soft-deleted bucket versions will be returned. The default is false. For +// more information, see Soft Delete +// (https://cloud.google.com/storage/docs/soft-delete). +func (c *BucketsListCall) SoftDeleted(softDeleted bool) *BucketsListCall { + c.urlParams_.Set("softDeleted", fmt.Sprint(softDeleted)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. +func (c *BucketsListCall) UserProject(userProject string) *BucketsListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *BucketsListCall) Fields(s ...googleapi.Field) *BucketsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *BucketsListCall) IfNoneMatch(entityTag string) *BucketsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *BucketsListCall) Context(ctx context.Context) *BucketsListCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *BucketsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.list" call. +// Any non-2xx status code is an error. Response headers are in either +// *Buckets.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Buckets{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *BucketsListCall) Pages(ctx context.Context, f func(*Buckets) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type BucketsLockRetentionPolicyCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// LockRetentionPolicy: Locks retention policy on a bucket. +// +// - bucket: Name of a bucket. +// - ifMetagenerationMatch: Makes the operation conditional on whether bucket's +// current metageneration matches the given value. +func (r *BucketsService) LockRetentionPolicy(bucket string, ifMetagenerationMatch int64) *BucketsLockRetentionPolicyCall { + c := &BucketsLockRetentionPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *BucketsLockRetentionPolicyCall) UserProject(userProject string) *BucketsLockRetentionPolicyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *BucketsLockRetentionPolicyCall) Fields(s ...googleapi.Field) *BucketsLockRetentionPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *BucketsLockRetentionPolicyCall) Context(ctx context.Context) *BucketsLockRetentionPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *BucketsLockRetentionPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/lockRetentionPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.lockRetentionPolicy" call. +// Any non-2xx status code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *BucketsLockRetentionPolicyCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type BucketsPatchCall struct { + s *Service + bucket string + bucket2 *Bucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches a bucket. Changes to the bucket will be readable immediately +// after writing, but configuration changes may take time to propagate. +// +// - bucket: Name of a bucket. +func (r *BucketsService) Patch(bucket string, bucket2 *Bucket) *BucketsPatchCall { + c := &BucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.bucket2 = bucket2 + return c +} + +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// Makes the return of the bucket metadata conditional on whether the bucket's +// current metageneration matches the given value. +func (c *BucketsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsPatchCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration does not match +// the given value. +func (c *BucketsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsPatchCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this bucket. +// +// Possible values: +// +// "authenticatedRead" - Project team owners get OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to their +// +// roles. +// +// "publicRead" - Project team owners get OWNER access, and allUsers get +// +// READER access. +// +// "publicReadWrite" - Project team owners get OWNER access, and allUsers get +// +// WRITER access. +func (c *BucketsPatchCall) PredefinedAcl(predefinedAcl string) *BucketsPatchCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// PredefinedDefaultObjectAcl sets the optional parameter +// "predefinedDefaultObjectAcl": Apply a predefined set of default object +// access controls to this bucket. +// +// Possible values: +// +// "authenticatedRead" - Object owner gets OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and project +// +// team owners get OWNER access. +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project team +// +// owners get READER access. +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// +// members get access according to their roles. +// +// "publicRead" - Object owner gets OWNER access, and allUsers get READER +// +// access. +func (c *BucketsPatchCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsPatchCall { + c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of properties to +// return. Defaults to full. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +func (c *BucketsPatchCall) Projection(projection string) *BucketsPatchCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *BucketsPatchCall) UserProject(userProject string) *BucketsPatchCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *BucketsPatchCall) Fields(s ...googleapi.Field) *BucketsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *BucketsPatchCall) Context(ctx context.Context) *BucketsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *BucketsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.patch" call. +// Any non-2xx status code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type BucketsRestoreCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Restore: Restores a soft-deleted bucket. +// +// - bucket: Name of a bucket. +// - generation: Generation of a bucket. +func (r *BucketsService) Restore(bucket string, generation int64) *BucketsRestoreCall { + c := &BucketsRestoreCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *BucketsRestoreCall) UserProject(userProject string) *BucketsRestoreCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *BucketsRestoreCall) Fields(s ...googleapi.Field) *BucketsRestoreCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *BucketsRestoreCall) Context(ctx context.Context) *BucketsRestoreCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *BucketsRestoreCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsRestoreCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/restore") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.restore" call. +func (c *BucketsRestoreCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil +} + +type BucketsSetIamPolicyCall struct { + s *Service + bucket string + policy *Policy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Updates an IAM policy for the specified bucket. +// +// - bucket: Name of a bucket. +func (r *BucketsService) SetIamPolicy(bucket string, policy *Policy) *BucketsSetIamPolicyCall { + c := &BucketsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.policy = policy + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *BucketsSetIamPolicyCall) UserProject(userProject string) *BucketsSetIamPolicyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *BucketsSetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *BucketsSetIamPolicyCall) Context(ctx context.Context) *BucketsSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *BucketsSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.setIamPolicy" call. +// Any non-2xx status code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *BucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type BucketsTestIamPermissionsCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Tests a set of permissions on the given bucket to see +// which, if any, are held by the caller. +// +// - bucket: Name of a bucket. +// - permissions: Permissions to test. +func (r *BucketsService) TestIamPermissions(bucket string, permissions []string) *BucketsTestIamPermissionsCall { + c := &BucketsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.urlParams_.SetMulti("permissions", append([]string{}, permissions...)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *BucketsTestIamPermissionsCall) UserProject(userProject string) *BucketsTestIamPermissionsCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *BucketsTestIamPermissionsCall) Fields(s ...googleapi.Field) *BucketsTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *BucketsTestIamPermissionsCall) IfNoneMatch(entityTag string) *BucketsTestIamPermissionsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *BucketsTestIamPermissionsCall) Context(ctx context.Context) *BucketsTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *BucketsTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam/testPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.testIamPermissions" call. +// Any non-2xx status code is an error. Response headers are in either +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &TestIamPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type BucketsUpdateCall struct { + s *Service + bucket string + bucket2 *Bucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates a bucket. Changes to the bucket will be readable immediately +// after writing, but configuration changes may take time to propagate. +// +// - bucket: Name of a bucket. +func (r *BucketsService) Update(bucket string, bucket2 *Bucket) *BucketsUpdateCall { + c := &BucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.bucket2 = bucket2 + return c +} + +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// Makes the return of the bucket metadata conditional on whether the bucket's +// current metageneration matches the given value. +func (c *BucketsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsUpdateCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration does not match +// the given value. +func (c *BucketsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsUpdateCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this bucket. +// +// Possible values: +// +// "authenticatedRead" - Project team owners get OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to their +// +// roles. +// +// "publicRead" - Project team owners get OWNER access, and allUsers get +// +// READER access. +// +// "publicReadWrite" - Project team owners get OWNER access, and allUsers get +// +// WRITER access. +func (c *BucketsUpdateCall) PredefinedAcl(predefinedAcl string) *BucketsUpdateCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// PredefinedDefaultObjectAcl sets the optional parameter +// "predefinedDefaultObjectAcl": Apply a predefined set of default object +// access controls to this bucket. +// +// Possible values: +// +// "authenticatedRead" - Object owner gets OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and project +// +// team owners get OWNER access. +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project team +// +// owners get READER access. +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// +// members get access according to their roles. +// +// "publicRead" - Object owner gets OWNER access, and allUsers get READER +// +// access. +func (c *BucketsUpdateCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsUpdateCall { + c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of properties to +// return. Defaults to full. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +func (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *BucketsUpdateCall) UserProject(userProject string) *BucketsUpdateCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *BucketsUpdateCall) Fields(s ...googleapi.Field) *BucketsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *BucketsUpdateCall) Context(ctx context.Context) *BucketsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *BucketsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.update" call. +// Any non-2xx status code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ChannelsStopCall struct { + s *Service + channel *Channel + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Stop: Stop watching resources through this channel +func (r *ChannelsService) Stop(channel *Channel) *ChannelsStopCall { + c := &ChannelsStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.channel = channel + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ChannelsStopCall) Fields(s ...googleapi.Field) *ChannelsStopCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ChannelsStopCall) Context(ctx context.Context) *ChannelsStopCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ChannelsStopCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "channels/stop") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.channels.stop" call. +func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil +} + +type DefaultObjectAccessControlsDeleteCall struct { + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Permanently deletes the default object ACL entry for the specified +// entity on the specified bucket. +// +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +func (r *DefaultObjectAccessControlsService) Delete(bucket string, entity string) *DefaultObjectAccessControlsDeleteCall { + c := &DefaultObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *DefaultObjectAccessControlsDeleteCall) UserProject(userProject string) *DefaultObjectAccessControlsDeleteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *DefaultObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *DefaultObjectAccessControlsDeleteCall) Context(ctx context.Context) *DefaultObjectAccessControlsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.defaultObjectAccessControls.delete" call. +func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil +} + +type DefaultObjectAccessControlsGetCall struct { + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the default object ACL entry for the specified entity on the +// specified bucket. +// +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +func (r *DefaultObjectAccessControlsService) Get(bucket string, entity string) *DefaultObjectAccessControlsGetCall { + c := &DefaultObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *DefaultObjectAccessControlsGetCall) UserProject(userProject string) *DefaultObjectAccessControlsGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *DefaultObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *DefaultObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *DefaultObjectAccessControlsGetCall) Context(ctx context.Context) *DefaultObjectAccessControlsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *DefaultObjectAccessControlsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.defaultObjectAccessControls.get" call. +// Any non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type DefaultObjectAccessControlsInsertCall struct { + s *Service + bucket string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a new default object ACL entry on the specified bucket. +// +// - bucket: Name of a bucket. +func (r *DefaultObjectAccessControlsService) Insert(bucket string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsInsertCall { + c := &DefaultObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *DefaultObjectAccessControlsInsertCall) UserProject(userProject string) *DefaultObjectAccessControlsInsertCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *DefaultObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *DefaultObjectAccessControlsInsertCall) Context(ctx context.Context) *DefaultObjectAccessControlsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.defaultObjectAccessControls.insert" call. +// Any non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type DefaultObjectAccessControlsListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves default object ACL entries on the specified bucket. +// +// - bucket: Name of a bucket. +func (r *DefaultObjectAccessControlsService) List(bucket string) *DefaultObjectAccessControlsListCall { + c := &DefaultObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// If present, only return default ACL listing if the bucket's current +// metageneration matches this value. +func (c *DefaultObjectAccessControlsListCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *DefaultObjectAccessControlsListCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": If present, only return default ACL listing if +// the bucket's current metageneration does not match the given value. +func (c *DefaultObjectAccessControlsListCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *DefaultObjectAccessControlsListCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *DefaultObjectAccessControlsListCall) UserProject(userProject string) *DefaultObjectAccessControlsListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *DefaultObjectAccessControlsListCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *DefaultObjectAccessControlsListCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *DefaultObjectAccessControlsListCall) Context(ctx context.Context) *DefaultObjectAccessControlsListCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *DefaultObjectAccessControlsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.defaultObjectAccessControls.list" call. +// Any non-2xx status code is an error. Response headers are in either +// *ObjectAccessControls.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ObjectAccessControls{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type DefaultObjectAccessControlsPatchCall struct { + s *Service + bucket string + entity string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches a default object ACL entry on the specified bucket. +// +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsPatchCall { + c := &DefaultObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *DefaultObjectAccessControlsPatchCall) UserProject(userProject string) *DefaultObjectAccessControlsPatchCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *DefaultObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *DefaultObjectAccessControlsPatchCall) Context(ctx context.Context) *DefaultObjectAccessControlsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.defaultObjectAccessControls.patch" call. +// Any non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type DefaultObjectAccessControlsUpdateCall struct { + s *Service + bucket string + entity string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates a default object ACL entry on the specified bucket. +// +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +func (r *DefaultObjectAccessControlsService) Update(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsUpdateCall { + c := &DefaultObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *DefaultObjectAccessControlsUpdateCall) UserProject(userProject string) *DefaultObjectAccessControlsUpdateCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *DefaultObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *DefaultObjectAccessControlsUpdateCall) Context(ctx context.Context) *DefaultObjectAccessControlsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.defaultObjectAccessControls.update" call. +// Any non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type FoldersDeleteCall struct { + s *Service + bucket string + folder string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Permanently deletes a folder. Only applicable to buckets with +// hierarchical namespace enabled. +// +// - bucket: Name of the bucket in which the folder resides. +// - folder: Name of a folder. +func (r *FoldersService) Delete(bucket string, folder string) *FoldersDeleteCall { + c := &FoldersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.folder = folder + return c +} + +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// If set, only deletes the folder if its metageneration matches this value. +func (c *FoldersDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *FoldersDeleteCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": If set, only deletes the folder if its +// metageneration does not match this value. +func (c *FoldersDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *FoldersDeleteCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *FoldersDeleteCall) Fields(s ...googleapi.Field) *FoldersDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *FoldersDeleteCall) Context(ctx context.Context) *FoldersDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *FoldersDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *FoldersDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders/{folder}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "folder": c.folder, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.folders.delete" call. +func (c *FoldersDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil +} + +type FoldersGetCall struct { + s *Service + bucket string + folder string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns metadata for the specified folder. Only applicable to buckets +// with hierarchical namespace enabled. +// +// - bucket: Name of the bucket in which the folder resides. +// - folder: Name of a folder. +func (r *FoldersService) Get(bucket string, folder string) *FoldersGetCall { + c := &FoldersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.folder = folder + return c +} + +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// Makes the return of the folder metadata conditional on whether the folder's +// current metageneration matches the given value. +func (c *FoldersGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *FoldersGetCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the return of the folder metadata +// conditional on whether the folder's current metageneration does not match +// the given value. +func (c *FoldersGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *FoldersGetCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *FoldersGetCall) Fields(s ...googleapi.Field) *FoldersGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *FoldersGetCall) IfNoneMatch(entityTag string) *FoldersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *FoldersGetCall) Context(ctx context.Context) *FoldersGetCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *FoldersGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *FoldersGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders/{folder}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "folder": c.folder, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.folders.get" call. +// Any non-2xx status code is an error. Response headers are in either +// *Folder.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *FoldersGetCall) Do(opts ...googleapi.CallOption) (*Folder, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Folder{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type FoldersInsertCall struct { + s *Service + bucket string + folder *Folder + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a new folder. Only applicable to buckets with hierarchical +// namespace enabled. +// +// - bucket: Name of the bucket in which the folder resides. +func (r *FoldersService) Insert(bucket string, folder *Folder) *FoldersInsertCall { + c := &FoldersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.folder = folder + return c +} + +// Recursive sets the optional parameter "recursive": If true, any parent +// folder which doesn’t exist will be created automatically. +func (c *FoldersInsertCall) Recursive(recursive bool) *FoldersInsertCall { + c.urlParams_.Set("recursive", fmt.Sprint(recursive)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *FoldersInsertCall) Fields(s ...googleapi.Field) *FoldersInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *FoldersInsertCall) Context(ctx context.Context) *FoldersInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *FoldersInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *FoldersInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.folder) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.folders.insert" call. +// Any non-2xx status code is an error. Response headers are in either +// *Folder.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *FoldersInsertCall) Do(opts ...googleapi.CallOption) (*Folder, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Folder{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type FoldersListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of folders matching the criteria. Only applicable to +// buckets with hierarchical namespace enabled. +// +// - bucket: Name of the bucket in which to look for folders. +func (r *FoldersService) List(bucket string) *FoldersListCall { + c := &FoldersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// Delimiter sets the optional parameter "delimiter": Returns results in a +// directory-like mode. The only supported value is '/'. If set, items will +// only contain folders that either exactly match the prefix, or are one level +// below the prefix. +func (c *FoldersListCall) Delimiter(delimiter string) *FoldersListCall { + c.urlParams_.Set("delimiter", delimiter) + return c +} + +// EndOffset sets the optional parameter "endOffset": Filter results to folders +// whose names are lexicographically before endOffset. If startOffset is also +// set, the folders listed will have names between startOffset (inclusive) and +// endOffset (exclusive). +func (c *FoldersListCall) EndOffset(endOffset string) *FoldersListCall { + c.urlParams_.Set("endOffset", endOffset) + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum number of items to +// return in a single page of responses. +func (c *FoldersListCall) PageSize(pageSize int64) *FoldersListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A previously-returned +// page token representing part of the larger set of results to view. +func (c *FoldersListCall) PageToken(pageToken string) *FoldersListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Prefix sets the optional parameter "prefix": Filter results to folders whose +// paths begin with this prefix. If set, the value must either be an empty +// string or end with a '/'. +func (c *FoldersListCall) Prefix(prefix string) *FoldersListCall { + c.urlParams_.Set("prefix", prefix) + return c +} + +// StartOffset sets the optional parameter "startOffset": Filter results to +// folders whose names are lexicographically equal to or after startOffset. If +// endOffset is also set, the folders listed will have names between +// startOffset (inclusive) and endOffset (exclusive). +func (c *FoldersListCall) StartOffset(startOffset string) *FoldersListCall { + c.urlParams_.Set("startOffset", startOffset) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *FoldersListCall) Fields(s ...googleapi.Field) *FoldersListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *FoldersListCall) IfNoneMatch(entityTag string) *FoldersListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *FoldersListCall) Context(ctx context.Context) *FoldersListCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *FoldersListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *FoldersListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.folders.list" call. +// Any non-2xx status code is an error. Response headers are in either +// *Folders.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *FoldersListCall) Do(opts ...googleapi.CallOption) (*Folders, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Folders{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *FoldersListCall) Pages(ctx context.Context, f func(*Folders) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type FoldersRenameCall struct { + s *Service + bucket string + sourceFolder string + destinationFolder string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Rename: Renames a source folder to a destination folder. Only applicable to +// buckets with hierarchical namespace enabled. +// +// - bucket: Name of the bucket in which the folders are in. +// - destinationFolder: Name of the destination folder. +// - sourceFolder: Name of the source folder. +func (r *FoldersService) Rename(bucket string, sourceFolder string, destinationFolder string) *FoldersRenameCall { + c := &FoldersRenameCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.sourceFolder = sourceFolder + c.destinationFolder = destinationFolder + return c +} + +// IfSourceMetagenerationMatch sets the optional parameter +// "ifSourceMetagenerationMatch": Makes the operation conditional on whether +// the source object's current metageneration matches the given value. +func (c *FoldersRenameCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *FoldersRenameCall { + c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) + return c +} + +// IfSourceMetagenerationNotMatch sets the optional parameter +// "ifSourceMetagenerationNotMatch": Makes the operation conditional on whether +// the source object's current metageneration does not match the given value. +func (c *FoldersRenameCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *FoldersRenameCall { + c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *FoldersRenameCall) Fields(s ...googleapi.Field) *FoldersRenameCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *FoldersRenameCall) Context(ctx context.Context) *FoldersRenameCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *FoldersRenameCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *FoldersRenameCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders/{sourceFolder}/renameTo/folders/{destinationFolder}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "sourceFolder": c.sourceFolder, + "destinationFolder": c.destinationFolder, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.folders.rename" call. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FoldersRenameCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &GoogleLongrunningOperation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ManagedFoldersDeleteCall struct { + s *Service + bucket string + managedFolder string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Permanently deletes a managed folder. +// +// - bucket: Name of the bucket containing the managed folder. +// - managedFolder: The managed folder name/path. +func (r *ManagedFoldersService) Delete(bucket string, managedFolder string) *ManagedFoldersDeleteCall { + c := &ManagedFoldersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.managedFolder = managedFolder + return c +} + +// AllowNonEmpty sets the optional parameter "allowNonEmpty": Allows the +// deletion of a managed folder even if it is not empty. A managed folder is +// empty if there are no objects or managed folders that it applies to. Callers +// must have storage.managedFolders.setIamPolicy permission. +func (c *ManagedFoldersDeleteCall) AllowNonEmpty(allowNonEmpty bool) *ManagedFoldersDeleteCall { + c.urlParams_.Set("allowNonEmpty", fmt.Sprint(allowNonEmpty)) + return c +} + +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// If set, only deletes the managed folder if its metageneration matches this +// value. +func (c *ManagedFoldersDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ManagedFoldersDeleteCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": If set, only deletes the managed folder if its +// metageneration does not match this value. +func (c *ManagedFoldersDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ManagedFoldersDeleteCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ManagedFoldersDeleteCall) Fields(s ...googleapi.Field) *ManagedFoldersDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ManagedFoldersDeleteCall) Context(ctx context.Context) *ManagedFoldersDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ManagedFoldersDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ManagedFoldersDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "managedFolder": c.managedFolder, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.managedFolders.delete" call. +func (c *ManagedFoldersDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil +} + +type ManagedFoldersGetCall struct { + s *Service + bucket string + managedFolder string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns metadata of the specified managed folder. +// +// - bucket: Name of the bucket containing the managed folder. +// - managedFolder: The managed folder name/path. +func (r *ManagedFoldersService) Get(bucket string, managedFolder string) *ManagedFoldersGetCall { + c := &ManagedFoldersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.managedFolder = managedFolder + return c +} + +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// Makes the return of the managed folder metadata conditional on whether the +// managed folder's current metageneration matches the given value. +func (c *ManagedFoldersGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ManagedFoldersGetCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the return of the managed folder metadata +// conditional on whether the managed folder's current metageneration does not +// match the given value. +func (c *ManagedFoldersGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ManagedFoldersGetCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ManagedFoldersGetCall) Fields(s ...googleapi.Field) *ManagedFoldersGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ManagedFoldersGetCall) IfNoneMatch(entityTag string) *ManagedFoldersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ManagedFoldersGetCall) Context(ctx context.Context) *ManagedFoldersGetCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ManagedFoldersGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ManagedFoldersGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "managedFolder": c.managedFolder, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.managedFolders.get" call. +// Any non-2xx status code is an error. Response headers are in either +// *ManagedFolder.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ManagedFoldersGetCall) Do(opts ...googleapi.CallOption) (*ManagedFolder, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ManagedFolder{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ManagedFoldersGetIamPolicyCall struct { + s *Service + bucket string + managedFolder string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Returns an IAM policy for the specified managed folder. +// +// - bucket: Name of the bucket containing the managed folder. +// - managedFolder: The managed folder name/path. +func (r *ManagedFoldersService) GetIamPolicy(bucket string, managedFolder string) *ManagedFoldersGetIamPolicyCall { + c := &ManagedFoldersGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.managedFolder = managedFolder + return c +} + +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": The IAM policy format version to be +// returned. If the optionsRequestedPolicyVersion is for an older version that +// doesn't support part of the requested IAM policy, the request fails. +func (c *ManagedFoldersGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ManagedFoldersGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *ManagedFoldersGetIamPolicyCall) UserProject(userProject string) *ManagedFoldersGetIamPolicyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ManagedFoldersGetIamPolicyCall) Fields(s ...googleapi.Field) *ManagedFoldersGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ManagedFoldersGetIamPolicyCall) IfNoneMatch(entityTag string) *ManagedFoldersGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ManagedFoldersGetIamPolicyCall) Context(ctx context.Context) *ManagedFoldersGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ManagedFoldersGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ManagedFoldersGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}/iam") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "managedFolder": c.managedFolder, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.managedFolders.getIamPolicy" call. +// Any non-2xx status code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ManagedFoldersGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ManagedFoldersInsertCall struct { + s *Service + bucket string + managedfolder *ManagedFolder + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a new managed folder. +// +// - bucket: Name of the bucket containing the managed folder. +func (r *ManagedFoldersService) Insert(bucket string, managedfolder *ManagedFolder) *ManagedFoldersInsertCall { + c := &ManagedFoldersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.managedfolder = managedfolder + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ManagedFoldersInsertCall) Fields(s ...googleapi.Field) *ManagedFoldersInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ManagedFoldersInsertCall) Context(ctx context.Context) *ManagedFoldersInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ManagedFoldersInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ManagedFoldersInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedfolder) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.managedFolders.insert" call. +// Any non-2xx status code is an error. Response headers are in either +// *ManagedFolder.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ManagedFoldersInsertCall) Do(opts ...googleapi.CallOption) (*ManagedFolder, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ManagedFolder{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ManagedFoldersListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists managed folders in the given bucket. +// +// - bucket: Name of the bucket containing the managed folder. +func (r *ManagedFoldersService) List(bucket string) *ManagedFoldersListCall { + c := &ManagedFoldersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum number of items to +// return in a single page of responses. +func (c *ManagedFoldersListCall) PageSize(pageSize int64) *ManagedFoldersListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A previously-returned +// page token representing part of the larger set of results to view. +func (c *ManagedFoldersListCall) PageToken(pageToken string) *ManagedFoldersListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Prefix sets the optional parameter "prefix": The managed folder name/path +// prefix to filter the output list of results. +func (c *ManagedFoldersListCall) Prefix(prefix string) *ManagedFoldersListCall { + c.urlParams_.Set("prefix", prefix) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ManagedFoldersListCall) Fields(s ...googleapi.Field) *ManagedFoldersListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ManagedFoldersListCall) IfNoneMatch(entityTag string) *ManagedFoldersListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ManagedFoldersListCall) Context(ctx context.Context) *ManagedFoldersListCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ManagedFoldersListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ManagedFoldersListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.managedFolders.list" call. +// Any non-2xx status code is an error. Response headers are in either +// *ManagedFolders.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ManagedFoldersListCall) Do(opts ...googleapi.CallOption) (*ManagedFolders, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ManagedFolders{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ManagedFoldersListCall) Pages(ctx context.Context, f func(*ManagedFolders) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type ManagedFoldersSetIamPolicyCall struct { + s *Service + bucket string + managedFolder string + policy *Policy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Updates an IAM policy for the specified managed folder. +// +// - bucket: Name of the bucket containing the managed folder. +// - managedFolder: The managed folder name/path. +func (r *ManagedFoldersService) SetIamPolicy(bucket string, managedFolder string, policy *Policy) *ManagedFoldersSetIamPolicyCall { + c := &ManagedFoldersSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.managedFolder = managedFolder + c.policy = policy + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *ManagedFoldersSetIamPolicyCall) UserProject(userProject string) *ManagedFoldersSetIamPolicyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ManagedFoldersSetIamPolicyCall) Fields(s ...googleapi.Field) *ManagedFoldersSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ManagedFoldersSetIamPolicyCall) Context(ctx context.Context) *ManagedFoldersSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ManagedFoldersSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ManagedFoldersSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}/iam") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "managedFolder": c.managedFolder, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.managedFolders.setIamPolicy" call. +// Any non-2xx status code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ManagedFoldersSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ManagedFoldersTestIamPermissionsCall struct { + s *Service + bucket string + managedFolder string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Tests a set of permissions on the given managed folder +// to see which, if any, are held by the caller. +// +// - bucket: Name of the bucket containing the managed folder. +// - managedFolder: The managed folder name/path. +// - permissions: Permissions to test. +func (r *ManagedFoldersService) TestIamPermissions(bucket string, managedFolder string, permissions []string) *ManagedFoldersTestIamPermissionsCall { + c := &ManagedFoldersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.managedFolder = managedFolder + c.urlParams_.SetMulti("permissions", append([]string{}, permissions...)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *ManagedFoldersTestIamPermissionsCall) UserProject(userProject string) *ManagedFoldersTestIamPermissionsCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ManagedFoldersTestIamPermissionsCall) Fields(s ...googleapi.Field) *ManagedFoldersTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ManagedFoldersTestIamPermissionsCall) IfNoneMatch(entityTag string) *ManagedFoldersTestIamPermissionsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ManagedFoldersTestIamPermissionsCall) Context(ctx context.Context) *ManagedFoldersTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ManagedFoldersTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ManagedFoldersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}/iam/testPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "managedFolder": c.managedFolder, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.managedFolders.testIamPermissions" call. +// Any non-2xx status code is an error. Response headers are in either +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ManagedFoldersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &TestIamPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type NotificationsDeleteCall struct { + s *Service + bucket string + notification string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Permanently deletes a notification subscription. +// +// - bucket: The parent bucket of the notification. +// - notification: ID of the notification to delete. +func (r *NotificationsService) Delete(bucket string, notification string) *NotificationsDeleteCall { + c := &NotificationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.notification = notification + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *NotificationsDeleteCall) UserProject(userProject string) *NotificationsDeleteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *NotificationsDeleteCall) Fields(s ...googleapi.Field) *NotificationsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *NotificationsDeleteCall) Context(ctx context.Context) *NotificationsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *NotificationsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "notification": c.notification, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.notifications.delete" call. +func (c *NotificationsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil +} + +type NotificationsGetCall struct { + s *Service + bucket string + notification string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: View a notification configuration. +// +// - bucket: The parent bucket of the notification. +// - notification: Notification ID. +func (r *NotificationsService) Get(bucket string, notification string) *NotificationsGetCall { + c := &NotificationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.notification = notification + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *NotificationsGetCall) UserProject(userProject string) *NotificationsGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *NotificationsGetCall) Fields(s ...googleapi.Field) *NotificationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *NotificationsGetCall) IfNoneMatch(entityTag string) *NotificationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *NotificationsGetCall) Context(ctx context.Context) *NotificationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *NotificationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "notification": c.notification, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.notifications.get" call. +// Any non-2xx status code is an error. Response headers are in either +// *Notification.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *NotificationsGetCall) Do(opts ...googleapi.CallOption) (*Notification, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Notification{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type NotificationsInsertCall struct { + s *Service + bucket string + notification *Notification + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a notification subscription for a given bucket. +// +// - bucket: The parent bucket of the notification. +func (r *NotificationsService) Insert(bucket string, notification *Notification) *NotificationsInsertCall { + c := &NotificationsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.notification = notification + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *NotificationsInsertCall) UserProject(userProject string) *NotificationsInsertCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *NotificationsInsertCall) Fields(s ...googleapi.Field) *NotificationsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *NotificationsInsertCall) Context(ctx context.Context) *NotificationsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *NotificationsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.notification) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.notifications.insert" call. +// Any non-2xx status code is an error. Response headers are in either +// *Notification.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *NotificationsInsertCall) Do(opts ...googleapi.CallOption) (*Notification, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Notification{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type NotificationsListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of notification subscriptions for a given bucket. +// +// - bucket: Name of a Google Cloud Storage bucket. +func (r *NotificationsService) List(bucket string) *NotificationsListCall { + c := &NotificationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *NotificationsListCall) UserProject(userProject string) *NotificationsListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *NotificationsListCall) Fields(s ...googleapi.Field) *NotificationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *NotificationsListCall) IfNoneMatch(entityTag string) *NotificationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *NotificationsListCall) Context(ctx context.Context) *NotificationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *NotificationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.notifications.list" call. +// Any non-2xx status code is an error. Response headers are in either +// *Notifications.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Notifications{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ObjectAccessControlsDeleteCall struct { + s *Service + bucket string + object string + entity string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Permanently deletes the ACL entry for the specified entity on the +// specified object. +// +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +// - object: Name of the object. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall { + c := &ObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.entity = entity + return c +} + +// Generation sets the optional parameter "generation": If present, selects a +// specific revision of this object (as opposed to the latest version, the +// default). +func (c *ObjectAccessControlsDeleteCall) Generation(generation int64) *ObjectAccessControlsDeleteCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsDeleteCall) UserProject(userProject string) *ObjectAccessControlsDeleteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *ObjectAccessControlsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ObjectAccessControlsDeleteCall) Context(ctx context.Context) *ObjectAccessControlsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ObjectAccessControlsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.delete" call. +func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil +} + +type ObjectAccessControlsGetCall struct { + s *Service + bucket string + object string + entity string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the ACL entry for the specified entity on the specified object. +// +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +// - object: Name of the object. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall { + c := &ObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.entity = entity + return c +} + +// Generation sets the optional parameter "generation": If present, selects a +// specific revision of this object (as opposed to the latest version, the +// default). +func (c *ObjectAccessControlsGetCall) Generation(generation int64) *ObjectAccessControlsGetCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsGetCall) UserProject(userProject string) *ObjectAccessControlsGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *ObjectAccessControlsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *ObjectAccessControlsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ObjectAccessControlsGetCall) Context(ctx context.Context) *ObjectAccessControlsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ObjectAccessControlsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.get" call. +// Any non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ObjectAccessControlsInsertCall struct { + s *Service + bucket string + object string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a new ACL entry on the specified object. +// +// - bucket: Name of a bucket. +// - object: Name of the object. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall { + c := &ObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Generation sets the optional parameter "generation": If present, selects a +// specific revision of this object (as opposed to the latest version, the +// default). +func (c *ObjectAccessControlsInsertCall) Generation(generation int64) *ObjectAccessControlsInsertCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsInsertCall) UserProject(userProject string) *ObjectAccessControlsInsertCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *ObjectAccessControlsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ObjectAccessControlsInsertCall) Context(ctx context.Context) *ObjectAccessControlsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ObjectAccessControlsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.insert" call. +// Any non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ObjectAccessControlsListCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves ACL entries on the specified object. +// +// - bucket: Name of a bucket. +// - object: Name of the object. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall { + c := &ObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, selects a +// specific revision of this object (as opposed to the latest version, the +// default). +func (c *ObjectAccessControlsListCall) Generation(generation int64) *ObjectAccessControlsListCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsListCall) UserProject(userProject string) *ObjectAccessControlsListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ObjectAccessControlsListCall) Fields(s ...googleapi.Field) *ObjectAccessControlsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ObjectAccessControlsListCall) IfNoneMatch(entityTag string) *ObjectAccessControlsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ObjectAccessControlsListCall) Context(ctx context.Context) *ObjectAccessControlsListCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ObjectAccessControlsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.list" call. +// Any non-2xx status code is an error. Response headers are in either +// *ObjectAccessControls.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ObjectAccessControls{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ObjectAccessControlsPatchCall struct { + s *Service + bucket string + object string + entity string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches an ACL entry on the specified object. +// +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +// - object: Name of the object. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall { + c := &ObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Generation sets the optional parameter "generation": If present, selects a +// specific revision of this object (as opposed to the latest version, the +// default). +func (c *ObjectAccessControlsPatchCall) Generation(generation int64) *ObjectAccessControlsPatchCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsPatchCall) UserProject(userProject string) *ObjectAccessControlsPatchCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *ObjectAccessControlsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ObjectAccessControlsPatchCall) Context(ctx context.Context) *ObjectAccessControlsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ObjectAccessControlsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.patch" call. +// Any non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ObjectAccessControlsUpdateCall struct { + s *Service + bucket string + object string + entity string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates an ACL entry on the specified object. +// +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +// - object: Name of the object. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall { + c := &ObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Generation sets the optional parameter "generation": If present, selects a +// specific revision of this object (as opposed to the latest version, the +// default). +func (c *ObjectAccessControlsUpdateCall) Generation(generation int64) *ObjectAccessControlsUpdateCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsUpdateCall) UserProject(userProject string) *ObjectAccessControlsUpdateCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *ObjectAccessControlsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ObjectAccessControlsUpdateCall) Context(ctx context.Context) *ObjectAccessControlsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ObjectAccessControlsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.update" call. +// Any non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ObjectsBulkRestoreCall struct { + s *Service + bucket string + bulkrestoreobjectsrequest *BulkRestoreObjectsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// BulkRestore: Initiates a long-running bulk restore operation on the +// specified bucket. +// +// - bucket: Name of the bucket in which the object resides. +func (r *ObjectsService) BulkRestore(bucket string, bulkrestoreobjectsrequest *BulkRestoreObjectsRequest) *ObjectsBulkRestoreCall { + c := &ObjectsBulkRestoreCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.bulkrestoreobjectsrequest = bulkrestoreobjectsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ObjectsBulkRestoreCall) Fields(s ...googleapi.Field) *ObjectsBulkRestoreCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ObjectsBulkRestoreCall) Context(ctx context.Context) *ObjectsBulkRestoreCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ObjectsBulkRestoreCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsBulkRestoreCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bulkrestoreobjectsrequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/bulkRestore") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.bulkRestore" call. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ObjectsBulkRestoreCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &GoogleLongrunningOperation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ObjectsComposeCall struct { + s *Service + destinationBucket string + destinationObject string + composerequest *ComposeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Compose: Concatenates a list of existing objects into a new object in the +// same bucket. +// +// - destinationBucket: Name of the bucket containing the source objects. The +// destination object is stored in this bucket. +// - destinationObject: Name of the new object. For information about how to +// URL encode object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectsService) Compose(destinationBucket string, destinationObject string, composerequest *ComposeRequest) *ObjectsComposeCall { + c := &ObjectsComposeCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.destinationBucket = destinationBucket + c.destinationObject = destinationObject + c.composerequest = composerequest + return c +} + +// DestinationPredefinedAcl sets the optional parameter +// "destinationPredefinedAcl": Apply a predefined set of access controls to the +// destination object. +// +// Possible values: +// +// "authenticatedRead" - Object owner gets OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and project +// +// team owners get OWNER access. +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project team +// +// owners get READER access. +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// +// members get access according to their roles. +// +// "publicRead" - Object owner gets OWNER access, and allUsers get READER +// +// access. +func (c *ObjectsComposeCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsComposeCall { + c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": Makes the +// operation conditional on whether the object's current generation matches the +// given value. Setting to 0 makes the operation succeed only if there are no +// live versions of the object. +func (c *ObjectsComposeCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsComposeCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// Makes the operation conditional on whether the object's current +// metageneration matches the given value. +func (c *ObjectsComposeCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsComposeCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of the +// Cloud KMS key, of the form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that +// will be used to encrypt the object. Overrides the object metadata's +// kms_key_name value, if any. +func (c *ObjectsComposeCall) KmsKeyName(kmsKeyName string) *ObjectsComposeCall { + c.urlParams_.Set("kmsKeyName", kmsKeyName) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *ObjectsComposeCall) UserProject(userProject string) *ObjectsComposeCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ObjectsComposeCall) Fields(s ...googleapi.Field) *ObjectsComposeCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ObjectsComposeCall) Context(ctx context.Context) *ObjectsComposeCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ObjectsComposeCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.composerequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{destinationBucket}/o/{destinationObject}/compose") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "destinationBucket": c.destinationBucket, + "destinationObject": c.destinationObject, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.compose" call. +// Any non-2xx status code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ObjectsCopyCall struct { + s *Service + sourceBucket string + sourceObject string + destinationBucket string + destinationObject string + object *Object + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Copy: Copies a source object to a destination object. Optionally overrides +// metadata. +// +// - destinationBucket: Name of the bucket in which to store the new object. +// Overrides the provided object metadata's bucket value, if any.For +// information about how to URL encode object names to be path safe, see +// Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +// - destinationObject: Name of the new object. Required when the object +// metadata is not otherwise provided. Overrides the object metadata's name +// value, if any. +// - sourceBucket: Name of the bucket in which to find the source object. +// - sourceObject: Name of the source object. For information about how to URL +// encode object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsCopyCall { + c := &ObjectsCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.sourceBucket = sourceBucket + c.sourceObject = sourceObject + c.destinationBucket = destinationBucket + c.destinationObject = destinationObject + c.object = object + return c +} + +// DestinationKmsKeyName sets the optional parameter "destinationKmsKeyName": +// Resource name of the Cloud KMS key, of the form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that +// will be used to encrypt the object. Overrides the object metadata's +// kms_key_name value, if any. +func (c *ObjectsCopyCall) DestinationKmsKeyName(destinationKmsKeyName string) *ObjectsCopyCall { + c.urlParams_.Set("destinationKmsKeyName", destinationKmsKeyName) + return c +} + +// DestinationPredefinedAcl sets the optional parameter +// "destinationPredefinedAcl": Apply a predefined set of access controls to the +// destination object. +// +// Possible values: +// +// "authenticatedRead" - Object owner gets OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and project +// +// team owners get OWNER access. +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project team +// +// owners get READER access. +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// +// members get access according to their roles. +// +// "publicRead" - Object owner gets OWNER access, and allUsers get READER +// +// access. +func (c *ObjectsCopyCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsCopyCall { + c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": Makes the +// operation conditional on whether the destination object's current generation +// matches the given value. Setting to 0 makes the operation succeed only if +// there are no live versions of the object. +func (c *ObjectsCopyCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter "ifGenerationNotMatch": +// Makes the operation conditional on whether the destination object's current +// generation does not match the given value. If no live object exists, the +// precondition fails. Setting to 0 makes the operation succeed only if there +// is a live version of the object. +func (c *ObjectsCopyCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// Makes the operation conditional on whether the destination object's current +// metageneration matches the given value. +func (c *ObjectsCopyCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on whether the +// destination object's current metageneration does not match the given value. +func (c *ObjectsCopyCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// IfSourceGenerationMatch sets the optional parameter +// "ifSourceGenerationMatch": Makes the operation conditional on whether the +// source object's current generation matches the given value. +func (c *ObjectsCopyCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) + return c +} + +// IfSourceGenerationNotMatch sets the optional parameter +// "ifSourceGenerationNotMatch": Makes the operation conditional on whether the +// source object's current generation does not match the given value. +func (c *ObjectsCopyCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) + return c +} + +// IfSourceMetagenerationMatch sets the optional parameter +// "ifSourceMetagenerationMatch": Makes the operation conditional on whether +// the source object's current metageneration matches the given value. +func (c *ObjectsCopyCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) + return c +} + +// IfSourceMetagenerationNotMatch sets the optional parameter +// "ifSourceMetagenerationNotMatch": Makes the operation conditional on whether +// the source object's current metageneration does not match the given value. +func (c *ObjectsCopyCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) + return c +} + +// Projection sets the optional parameter "projection": Set of properties to +// return. Defaults to noAcl, unless the object resource specifies the acl +// property, when it defaults to full. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsCopyCall) Projection(projection string) *ObjectsCopyCall { + c.urlParams_.Set("projection", projection) + return c +} + +// SourceGeneration sets the optional parameter "sourceGeneration": If present, +// selects a specific revision of the source object (as opposed to the latest +// version, the default). +func (c *ObjectsCopyCall) SourceGeneration(sourceGeneration int64) *ObjectsCopyCall { + c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *ObjectsCopyCall) UserProject(userProject string) *ObjectsCopyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ObjectsCopyCall) Fields(s ...googleapi.Field) *ObjectsCopyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ObjectsCopyCall) Context(ctx context.Context) *ObjectsCopyCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ObjectsCopyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "sourceBucket": c.sourceBucket, + "sourceObject": c.sourceObject, + "destinationBucket": c.destinationBucket, + "destinationObject": c.destinationObject, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.copy" call. +// Any non-2xx status code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ObjectsDeleteCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes an object and its metadata. Deletions are permanent if +// versioning is not enabled for the bucket, or if the generation parameter is +// used. +// +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall { + c := &ObjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, permanently +// deletes a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsDeleteCall) Generation(generation int64) *ObjectsDeleteCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": Makes the +// operation conditional on whether the object's current generation matches the +// given value. Setting to 0 makes the operation succeed only if there are no +// live versions of the object. +func (c *ObjectsDeleteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter "ifGenerationNotMatch": +// Makes the operation conditional on whether the object's current generation +// does not match the given value. If no live object exists, the precondition +// fails. Setting to 0 makes the operation succeed only if there is a live +// version of the object. +func (c *ObjectsDeleteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// Makes the operation conditional on whether the object's current +// metageneration matches the given value. +func (c *ObjectsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on whether the +// object's current metageneration does not match the given value. +func (c *ObjectsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *ObjectsDeleteCall) UserProject(userProject string) *ObjectsDeleteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ObjectsDeleteCall) Fields(s ...googleapi.Field) *ObjectsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ObjectsDeleteCall) Context(ctx context.Context) *ObjectsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ObjectsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.delete" call. +func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil +} + +type ObjectsGetCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves an object or its metadata. +// +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall { + c := &ObjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, selects a +// specific revision of this object (as opposed to the latest version, the +// default). +func (c *ObjectsGetCall) Generation(generation int64) *ObjectsGetCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": Makes the +// operation conditional on whether the object's current generation matches the +// given value. Setting to 0 makes the operation succeed only if there are no +// live versions of the object. +func (c *ObjectsGetCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter "ifGenerationNotMatch": +// Makes the operation conditional on whether the object's current generation +// does not match the given value. If no live object exists, the precondition +// fails. Setting to 0 makes the operation succeed only if there is a live +// version of the object. +func (c *ObjectsGetCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// Makes the operation conditional on whether the object's current +// metageneration matches the given value. +func (c *ObjectsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on whether the +// object's current metageneration does not match the given value. +func (c *ObjectsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Projection sets the optional parameter "projection": Set of properties to +// return. Defaults to noAcl. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall { + c.urlParams_.Set("projection", projection) + return c +} + +// RestoreToken sets the optional parameter "restoreToken": Restore token used +// to differentiate soft-deleted objects with the same name and generation. +// Only applicable for hierarchical namespace buckets and if softDeleted is set +// to true. This parameter is optional, and is only required in the rare case +// when there are multiple soft-deleted objects with the same name and +// generation. +func (c *ObjectsGetCall) RestoreToken(restoreToken string) *ObjectsGetCall { + c.urlParams_.Set("restoreToken", restoreToken) + return c +} + +// SoftDeleted sets the optional parameter "softDeleted": If true, only +// soft-deleted object versions will be listed. The default is false. For more +// information, see Soft Delete +// (https://cloud.google.com/storage/docs/soft-delete). +func (c *ObjectsGetCall) SoftDeleted(softDeleted bool) *ObjectsGetCall { + c.urlParams_.Set("softDeleted", fmt.Sprint(softDeleted)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *ObjectsGetCall) UserProject(userProject string) *ObjectsGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ObjectsGetCall) Fields(s ...googleapi.Field) *ObjectsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ObjectsGetCall) IfNoneMatch(entityTag string) *ObjectsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do and Download methods. +func (c *ObjectsGetCall) Context(ctx context.Context) *ObjectsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ObjectsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Download fetches the API endpoint's "media" value, instead of the normal +// API response value. If the returned error is nil, the Response is guaranteed to +// have a 2xx status code. Callers must close the Response.Body as usual. +func (c *ObjectsGetCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("media") + if err != nil { + return nil, err + } + if err := googleapi.CheckMediaResponse(res); err != nil { + res.Body.Close() + return nil, gensupport.WrapError(err) + } + return res, nil +} + +// Do executes the "storage.objects.get" call. +// Any non-2xx status code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ObjectsGetIamPolicyCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Returns an IAM policy for the specified object. +// +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectsService) GetIamPolicy(bucket string, object string) *ObjectsGetIamPolicyCall { + c := &ObjectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, selects a +// specific revision of this object (as opposed to the latest version, the +// default). +func (c *ObjectsGetIamPolicyCall) Generation(generation int64) *ObjectsGetIamPolicyCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *ObjectsGetIamPolicyCall) UserProject(userProject string) *ObjectsGetIamPolicyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ObjectsGetIamPolicyCall) Fields(s ...googleapi.Field) *ObjectsGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ObjectsGetIamPolicyCall) IfNoneMatch(entityTag string) *ObjectsGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ObjectsGetIamPolicyCall) Context(ctx context.Context) *ObjectsGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ObjectsGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.getIamPolicy" call. +// Any non-2xx status code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ObjectsInsertCall struct { + s *Service + bucket string + object *Object + urlParams_ gensupport.URLParams + mediaInfo_ *gensupport.MediaInfo + retry *gensupport.RetryConfig + ctx_ context.Context + header_ http.Header +} + +// Insert: Stores a new object and metadata. +// +// - bucket: Name of the bucket in which to store the new object. Overrides the +// provided object metadata's bucket value, if any. +func (r *ObjectsService) Insert(bucket string, object *Object) *ObjectsInsertCall { + c := &ObjectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// ContentEncoding sets the optional parameter "contentEncoding": If set, sets +// the contentEncoding property of the final object to this value. Setting this +// parameter is equivalent to setting the contentEncoding metadata property. +// This can be useful when uploading an object with uploadType=media to +// indicate the encoding of the content being uploaded. +func (c *ObjectsInsertCall) ContentEncoding(contentEncoding string) *ObjectsInsertCall { + c.urlParams_.Set("contentEncoding", contentEncoding) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": Makes the +// operation conditional on whether the object's current generation matches the +// given value. Setting to 0 makes the operation succeed only if there are no +// live versions of the object. +func (c *ObjectsInsertCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsInsertCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter "ifGenerationNotMatch": +// Makes the operation conditional on whether the object's current generation +// does not match the given value. If no live object exists, the precondition +// fails. Setting to 0 makes the operation succeed only if there is a live +// version of the object. +func (c *ObjectsInsertCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsInsertCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// Makes the operation conditional on whether the object's current +// metageneration matches the given value. +func (c *ObjectsInsertCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsInsertCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on whether the +// object's current metageneration does not match the given value. +func (c *ObjectsInsertCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsInsertCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of the +// Cloud KMS key, of the form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that +// will be used to encrypt the object. Overrides the object metadata's +// kms_key_name value, if any. +func (c *ObjectsInsertCall) KmsKeyName(kmsKeyName string) *ObjectsInsertCall { + c.urlParams_.Set("kmsKeyName", kmsKeyName) + return c +} + +// Name sets the optional parameter "name": Name of the object. Required when +// the object metadata is not otherwise provided. Overrides the object +// metadata's name value, if any. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall { + c.urlParams_.Set("name", name) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this object. +// +// Possible values: +// +// "authenticatedRead" - Object owner gets OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and project +// +// team owners get OWNER access. +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project team +// +// owners get READER access. +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// +// members get access according to their roles. +// +// "publicRead" - Object owner gets OWNER access, and allUsers get READER +// +// access. +func (c *ObjectsInsertCall) PredefinedAcl(predefinedAcl string) *ObjectsInsertCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of properties to +// return. Defaults to noAcl, unless the object resource specifies the acl +// property, when it defaults to full. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *ObjectsInsertCall) UserProject(userProject string) *ObjectsInsertCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Media specifies the media to upload in one or more chunks. The chunk size +// may be controlled by supplying a MediaOption generated by +// googleapi.ChunkSize. The chunk size defaults to +// googleapi.DefaultUploadChunkSize.The Content-Type header used in the upload +// request will be determined by sniffing the contents of r, unless a +// MediaOption generated by googleapi.ContentType is supplied. +// At most one of Media and ResumableMedia may be set. +func (c *ObjectsInsertCall) Media(r io.Reader, options ...googleapi.MediaOption) *ObjectsInsertCall { + if ct := c.object.ContentType; ct != "" { + options = append([]googleapi.MediaOption{googleapi.ContentType(ct)}, options...) + } + c.mediaInfo_ = gensupport.NewInfoFromMedia(r, options) + return c +} + +// ResumableMedia specifies the media to upload in chunks and can be canceled +// with ctx. +// +// Deprecated: use Media instead. +// +// At most one of Media and ResumableMedia may be set. mediaType identifies the +// MIME media type of the upload, such as "image/png". If mediaType is "", it +// will be auto-detected. The provided ctx will supersede any context +// previously provided to the Context method. +func (c *ObjectsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *ObjectsInsertCall { + c.ctx_ = ctx + c.mediaInfo_ = gensupport.NewInfoFromResumableMedia(r, size, mediaType) + return c +} + +// ProgressUpdater provides a callback function that will be called after every +// chunk. It should be a low-latency function in order to not slow down the +// upload operation. This should only be called when using ResumableMedia (as +// opposed to Media). +func (c *ObjectsInsertCall) ProgressUpdater(pu googleapi.ProgressUpdater) *ObjectsInsertCall { + c.mediaInfo_.SetProgressUpdater(pu) + return c +} + +// WithRetry causes the library to retry the initial request of the upload(for +// resumable uploads) or the entire upload (for multipart uploads) ifa +// transient error occurs. This is contingent on ChunkSize being > 0 (sothat +// the input data may be buffered). The backoff argument will be used +// todetermine exponential backoff timing, and the errorFunc is used to +// determinewhich errors are considered retryable. By default, exponetial +// backoff will beapplied using gax defaults, and the following errors are +// retried: +// +// - HTTP responses with codes 408, 429, 502, 503, and 504. +// +// - Transient network errors such as connection reset and +// io.ErrUnexpectedEOF. +// +// - Errors which are considered transient using the Temporary() interface. +// +// - Wrapped versions of these errors. +func (c *ObjectsInsertCall) WithRetry(bo *gax.Backoff, errorFunc func(err error) bool) *ObjectsInsertCall { + c.retry = &gensupport.RetryConfig{ + Backoff: bo, + ShouldRetry: errorFunc, + } + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ObjectsInsertCall) Fields(s ...googleapi.Field) *ObjectsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +// This context will supersede any context previously provided to the +// ResumableMedia method. +func (c *ObjectsInsertCall) Context(ctx context.Context) *ObjectsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ObjectsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") + if c.mediaInfo_ != nil { + urls = googleapi.ResolveRelative(c.s.BasePath, "/upload/storage/v1/b/{bucket}/o") + c.urlParams_.Set("uploadType", c.mediaInfo_.UploadType()) + } + if body == nil { + body = new(bytes.Buffer) + reqHeaders.Set("Content-Type", "application/json") + } + body, getBody, cleanup := c.mediaInfo_.UploadRequest(reqHeaders, body) + defer cleanup() + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + req.GetBody = getBody + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + if c.retry != nil { + return gensupport.SendRequestWithRetry(c.ctx_, c.s.client, req, c.retry) + } + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.insert" call. +// Any non-2xx status code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + rx := c.mediaInfo_.ResumableUpload(res.Header.Get("Location")) + if rx != nil { + rx.Client = c.s.client + rx.UserAgent = c.s.userAgent() + rx.Retry = c.retry + ctx := c.ctx_ + if ctx == nil { + ctx = context.TODO() + } + res, err = rx.Upload(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ObjectsListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of objects matching the criteria. +// +// - bucket: Name of the bucket in which to look for objects. +func (r *ObjectsService) List(bucket string) *ObjectsListCall { + c := &ObjectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// Delimiter sets the optional parameter "delimiter": Returns results in a +// directory-like mode. items will contain only objects whose names, aside from +// the prefix, do not contain delimiter. Objects whose names, aside from the +// prefix, contain delimiter will have their name, truncated after the +// delimiter, returned in prefixes. Duplicate prefixes are omitted. +func (c *ObjectsListCall) Delimiter(delimiter string) *ObjectsListCall { + c.urlParams_.Set("delimiter", delimiter) + return c +} + +// EndOffset sets the optional parameter "endOffset": Filter results to objects +// whose names are lexicographically before endOffset. If startOffset is also +// set, the objects listed will have names between startOffset (inclusive) and +// endOffset (exclusive). +func (c *ObjectsListCall) EndOffset(endOffset string) *ObjectsListCall { + c.urlParams_.Set("endOffset", endOffset) + return c +} + +// IncludeFoldersAsPrefixes sets the optional parameter +// "includeFoldersAsPrefixes": Only applicable if delimiter is set to '/'. If +// true, will also include folders and managed folders (besides objects) in the +// returned prefixes. +func (c *ObjectsListCall) IncludeFoldersAsPrefixes(includeFoldersAsPrefixes bool) *ObjectsListCall { + c.urlParams_.Set("includeFoldersAsPrefixes", fmt.Sprint(includeFoldersAsPrefixes)) + return c +} + +// IncludeTrailingDelimiter sets the optional parameter +// "includeTrailingDelimiter": If true, objects that end in exactly one +// instance of delimiter will have their metadata included in items in addition +// to prefixes. +func (c *ObjectsListCall) IncludeTrailingDelimiter(includeTrailingDelimiter bool) *ObjectsListCall { + c.urlParams_.Set("includeTrailingDelimiter", fmt.Sprint(includeTrailingDelimiter)) + return c +} + +// MatchGlob sets the optional parameter "matchGlob": Filter results to objects +// and prefixes that match this glob pattern. +func (c *ObjectsListCall) MatchGlob(matchGlob string) *ObjectsListCall { + c.urlParams_.Set("matchGlob", matchGlob) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number of items +// plus prefixes to return in a single page of responses. As duplicate prefixes +// are omitted, fewer total results may be returned than requested. The service +// will use this parameter or 1,000 items, whichever is smaller. +func (c *ObjectsListCall) MaxResults(maxResults int64) *ObjectsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A previously-returned +// page token representing part of the larger set of results to view. +func (c *ObjectsListCall) PageToken(pageToken string) *ObjectsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Prefix sets the optional parameter "prefix": Filter results to objects whose +// names begin with this prefix. +func (c *ObjectsListCall) Prefix(prefix string) *ObjectsListCall { + c.urlParams_.Set("prefix", prefix) + return c +} + +// Projection sets the optional parameter "projection": Set of properties to +// return. Defaults to noAcl. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall { + c.urlParams_.Set("projection", projection) + return c +} + +// SoftDeleted sets the optional parameter "softDeleted": If true, only +// soft-deleted object versions will be listed. The default is false. For more +// information, see Soft Delete +// (https://cloud.google.com/storage/docs/soft-delete). +func (c *ObjectsListCall) SoftDeleted(softDeleted bool) *ObjectsListCall { + c.urlParams_.Set("softDeleted", fmt.Sprint(softDeleted)) + return c +} + +// StartOffset sets the optional parameter "startOffset": Filter results to +// objects whose names are lexicographically equal to or after startOffset. If +// endOffset is also set, the objects listed will have names between +// startOffset (inclusive) and endOffset (exclusive). +func (c *ObjectsListCall) StartOffset(startOffset string) *ObjectsListCall { + c.urlParams_.Set("startOffset", startOffset) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *ObjectsListCall) UserProject(userProject string) *ObjectsListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Versions sets the optional parameter "versions": If true, lists all versions +// of an object as distinct results. The default is false. For more +// information, see Object Versioning +// (https://cloud.google.com/storage/docs/object-versioning). +func (c *ObjectsListCall) Versions(versions bool) *ObjectsListCall { + c.urlParams_.Set("versions", fmt.Sprint(versions)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ObjectsListCall) Fields(s ...googleapi.Field) *ObjectsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ObjectsListCall) IfNoneMatch(entityTag string) *ObjectsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ObjectsListCall) Context(ctx context.Context) *ObjectsListCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ObjectsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.list" call. +// Any non-2xx status code is an error. Response headers are in either +// *Objects.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Objects{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ObjectsListCall) Pages(ctx context.Context, f func(*Objects) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type ObjectsPatchCall struct { + s *Service + bucket string + object string + object2 *Object + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches an object's metadata. +// +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall { + c := &ObjectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.object2 = object2 + return c +} + +// Generation sets the optional parameter "generation": If present, selects a +// specific revision of this object (as opposed to the latest version, the +// default). +func (c *ObjectsPatchCall) Generation(generation int64) *ObjectsPatchCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": Makes the +// operation conditional on whether the object's current generation matches the +// given value. Setting to 0 makes the operation succeed only if there are no +// live versions of the object. +func (c *ObjectsPatchCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter "ifGenerationNotMatch": +// Makes the operation conditional on whether the object's current generation +// does not match the given value. If no live object exists, the precondition +// fails. Setting to 0 makes the operation succeed only if there is a live +// version of the object. +func (c *ObjectsPatchCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// Makes the operation conditional on whether the object's current +// metageneration matches the given value. +func (c *ObjectsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on whether the +// object's current metageneration does not match the given value. +func (c *ObjectsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// OverrideUnlockedRetention sets the optional parameter +// "overrideUnlockedRetention": Must be true to remove the retention +// configuration, reduce its unlocked retention period, or change its mode from +// unlocked to locked. +func (c *ObjectsPatchCall) OverrideUnlockedRetention(overrideUnlockedRetention bool) *ObjectsPatchCall { + c.urlParams_.Set("overrideUnlockedRetention", fmt.Sprint(overrideUnlockedRetention)) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this object. +// +// Possible values: +// +// "authenticatedRead" - Object owner gets OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and project +// +// team owners get OWNER access. +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project team +// +// owners get READER access. +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// +// members get access according to their roles. +// +// "publicRead" - Object owner gets OWNER access, and allUsers get READER +// +// access. +func (c *ObjectsPatchCall) PredefinedAcl(predefinedAcl string) *ObjectsPatchCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of properties to +// return. Defaults to full. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsPatchCall) Projection(projection string) *ObjectsPatchCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request, for Requester Pays buckets. +func (c *ObjectsPatchCall) UserProject(userProject string) *ObjectsPatchCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ObjectsPatchCall) Fields(s ...googleapi.Field) *ObjectsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ObjectsPatchCall) Context(ctx context.Context) *ObjectsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ObjectsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.patch" call. +// Any non-2xx status code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ObjectsRestoreCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Restore: Restores a soft-deleted object. +// +// - bucket: Name of the bucket in which the object resides. +// - generation: Selects a specific revision of this object. +// - object: Name of the object. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectsService) Restore(bucket string, object string, generation int64) *ObjectsRestoreCall { + c := &ObjectsRestoreCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// CopySourceAcl sets the optional parameter "copySourceAcl": If true, copies +// the source object's ACL; otherwise, uses the bucket's default object ACL. +// The default is false. +func (c *ObjectsRestoreCall) CopySourceAcl(copySourceAcl bool) *ObjectsRestoreCall { + c.urlParams_.Set("copySourceAcl", fmt.Sprint(copySourceAcl)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": Makes the +// operation conditional on whether the object's one live generation matches +// the given value. Setting to 0 makes the operation succeed only if there are +// no live versions of the object. +func (c *ObjectsRestoreCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsRestoreCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter "ifGenerationNotMatch": +// Makes the operation conditional on whether none of the object's live +// generations match the given value. If no live object exists, the +// precondition fails. Setting to 0 makes the operation succeed only if there +// is a live version of the object. +func (c *ObjectsRestoreCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsRestoreCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// Makes the operation conditional on whether the object's one live +// metageneration matches the given value. +func (c *ObjectsRestoreCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsRestoreCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on whether none +// of the object's live metagenerations match the given value. +func (c *ObjectsRestoreCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsRestoreCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Projection sets the optional parameter "projection": Set of properties to +// return. Defaults to full. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsRestoreCall) Projection(projection string) *ObjectsRestoreCall { + c.urlParams_.Set("projection", projection) + return c +} + +// RestoreToken sets the optional parameter "restoreToken": Restore token used +// to differentiate sof-deleted objects with the same name and generation. Only +// applicable for hierarchical namespace buckets. This parameter is optional, +// and is only required in the rare case when there are multiple soft-deleted +// objects with the same name and generation. +func (c *ObjectsRestoreCall) RestoreToken(restoreToken string) *ObjectsRestoreCall { + c.urlParams_.Set("restoreToken", restoreToken) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *ObjectsRestoreCall) UserProject(userProject string) *ObjectsRestoreCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ObjectsRestoreCall) Fields(s ...googleapi.Field) *ObjectsRestoreCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ObjectsRestoreCall) Context(ctx context.Context) *ObjectsRestoreCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ObjectsRestoreCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsRestoreCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/restore") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.restore" call. +// Any non-2xx status code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ObjectsRestoreCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ObjectsRewriteCall struct { + s *Service + sourceBucket string + sourceObject string + destinationBucket string + destinationObject string + object *Object + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Rewrite: Rewrites a source object to a destination object. Optionally +// overrides metadata. +// +// - destinationBucket: Name of the bucket in which to store the new object. +// Overrides the provided object metadata's bucket value, if any. +// - destinationObject: Name of the new object. Required when the object +// metadata is not otherwise provided. Overrides the object metadata's name +// value, if any. For information about how to URL encode object names to be +// path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +// - sourceBucket: Name of the bucket in which to find the source object. +// - sourceObject: Name of the source object. For information about how to URL +// encode object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsRewriteCall { + c := &ObjectsRewriteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.sourceBucket = sourceBucket + c.sourceObject = sourceObject + c.destinationBucket = destinationBucket + c.destinationObject = destinationObject + c.object = object + return c +} + +// DestinationKmsKeyName sets the optional parameter "destinationKmsKeyName": +// Resource name of the Cloud KMS key, of the form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that +// will be used to encrypt the object. Overrides the object metadata's +// kms_key_name value, if any. +func (c *ObjectsRewriteCall) DestinationKmsKeyName(destinationKmsKeyName string) *ObjectsRewriteCall { + c.urlParams_.Set("destinationKmsKeyName", destinationKmsKeyName) + return c +} + +// DestinationPredefinedAcl sets the optional parameter +// "destinationPredefinedAcl": Apply a predefined set of access controls to the +// destination object. +// +// Possible values: +// +// "authenticatedRead" - Object owner gets OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and project +// +// team owners get OWNER access. +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project team +// +// owners get READER access. +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// +// members get access according to their roles. +// +// "publicRead" - Object owner gets OWNER access, and allUsers get READER +// +// access. +func (c *ObjectsRewriteCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsRewriteCall { + c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": Makes the +// operation conditional on whether the object's current generation matches the +// given value. Setting to 0 makes the operation succeed only if there are no +// live versions of the object. +func (c *ObjectsRewriteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter "ifGenerationNotMatch": +// Makes the operation conditional on whether the object's current generation +// does not match the given value. If no live object exists, the precondition +// fails. Setting to 0 makes the operation succeed only if there is a live +// version of the object. +func (c *ObjectsRewriteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// Makes the operation conditional on whether the destination object's current +// metageneration matches the given value. +func (c *ObjectsRewriteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on whether the +// destination object's current metageneration does not match the given value. +func (c *ObjectsRewriteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// IfSourceGenerationMatch sets the optional parameter +// "ifSourceGenerationMatch": Makes the operation conditional on whether the +// source object's current generation matches the given value. +func (c *ObjectsRewriteCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) + return c +} + +// IfSourceGenerationNotMatch sets the optional parameter +// "ifSourceGenerationNotMatch": Makes the operation conditional on whether the +// source object's current generation does not match the given value. +func (c *ObjectsRewriteCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) + return c +} + +// IfSourceMetagenerationMatch sets the optional parameter +// "ifSourceMetagenerationMatch": Makes the operation conditional on whether +// the source object's current metageneration matches the given value. +func (c *ObjectsRewriteCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) + return c +} + +// IfSourceMetagenerationNotMatch sets the optional parameter +// "ifSourceMetagenerationNotMatch": Makes the operation conditional on whether +// the source object's current metageneration does not match the given value. +func (c *ObjectsRewriteCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) + return c +} + +// MaxBytesRewrittenPerCall sets the optional parameter +// "maxBytesRewrittenPerCall": The maximum number of bytes that will be +// rewritten per rewrite request. Most callers shouldn't need to specify this +// parameter - it is primarily in place to support testing. If specified the +// value must be an integral multiple of 1 MiB (1048576). Also, this only +// applies to requests where the source and destination span locations and/or +// storage classes. Finally, this value must not change across rewrite calls +// else you'll get an error that the rewriteToken is invalid. +func (c *ObjectsRewriteCall) MaxBytesRewrittenPerCall(maxBytesRewrittenPerCall int64) *ObjectsRewriteCall { + c.urlParams_.Set("maxBytesRewrittenPerCall", fmt.Sprint(maxBytesRewrittenPerCall)) + return c +} + +// Projection sets the optional parameter "projection": Set of properties to +// return. Defaults to noAcl, unless the object resource specifies the acl +// property, when it defaults to full. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsRewriteCall) Projection(projection string) *ObjectsRewriteCall { + c.urlParams_.Set("projection", projection) + return c +} + +// RewriteToken sets the optional parameter "rewriteToken": Include this field +// (from the previous rewrite response) on each rewrite request after the first +// one, until the rewrite response 'done' flag is true. Calls that provide a +// rewriteToken can omit all other request fields, but if included those fields +// must match the values provided in the first rewrite request. +func (c *ObjectsRewriteCall) RewriteToken(rewriteToken string) *ObjectsRewriteCall { + c.urlParams_.Set("rewriteToken", rewriteToken) + return c +} + +// SourceGeneration sets the optional parameter "sourceGeneration": If present, +// selects a specific revision of the source object (as opposed to the latest +// version, the default). +func (c *ObjectsRewriteCall) SourceGeneration(sourceGeneration int64) *ObjectsRewriteCall { + c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *ObjectsRewriteCall) UserProject(userProject string) *ObjectsRewriteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ObjectsRewriteCall) Fields(s ...googleapi.Field) *ObjectsRewriteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ObjectsRewriteCall) Context(ctx context.Context) *ObjectsRewriteCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ObjectsRewriteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "sourceBucket": c.sourceBucket, + "sourceObject": c.sourceObject, + "destinationBucket": c.destinationBucket, + "destinationObject": c.destinationObject, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.rewrite" call. +// Any non-2xx status code is an error. Response headers are in either +// *RewriteResponse.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &RewriteResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ObjectsSetIamPolicyCall struct { + s *Service + bucket string + object string + policy *Policy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Updates an IAM policy for the specified object. +// +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectsService) SetIamPolicy(bucket string, object string, policy *Policy) *ObjectsSetIamPolicyCall { + c := &ObjectsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.policy = policy + return c +} + +// Generation sets the optional parameter "generation": If present, selects a +// specific revision of this object (as opposed to the latest version, the +// default). +func (c *ObjectsSetIamPolicyCall) Generation(generation int64) *ObjectsSetIamPolicyCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *ObjectsSetIamPolicyCall) UserProject(userProject string) *ObjectsSetIamPolicyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ObjectsSetIamPolicyCall) Fields(s ...googleapi.Field) *ObjectsSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ObjectsSetIamPolicyCall) Context(ctx context.Context) *ObjectsSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ObjectsSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.setIamPolicy" call. +// Any non-2xx status code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ObjectsTestIamPermissionsCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Tests a set of permissions on the given object to see +// which, if any, are held by the caller. +// +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +// - permissions: Permissions to test. +func (r *ObjectsService) TestIamPermissions(bucket string, object string, permissions []string) *ObjectsTestIamPermissionsCall { + c := &ObjectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.urlParams_.SetMulti("permissions", append([]string{}, permissions...)) + return c +} + +// Generation sets the optional parameter "generation": If present, selects a +// specific revision of this object (as opposed to the latest version, the +// default). +func (c *ObjectsTestIamPermissionsCall) Generation(generation int64) *ObjectsTestIamPermissionsCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *ObjectsTestIamPermissionsCall) UserProject(userProject string) *ObjectsTestIamPermissionsCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ObjectsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ObjectsTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ObjectsTestIamPermissionsCall) IfNoneMatch(entityTag string) *ObjectsTestIamPermissionsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ObjectsTestIamPermissionsCall) Context(ctx context.Context) *ObjectsTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ObjectsTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam/testPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.testIamPermissions" call. +// Any non-2xx status code is an error. Response headers are in either +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &TestIamPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ObjectsUpdateCall struct { + s *Service + bucket string + object string + object2 *Object + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates an object's metadata. +// +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall { + c := &ObjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.object2 = object2 + return c +} + +// Generation sets the optional parameter "generation": If present, selects a +// specific revision of this object (as opposed to the latest version, the +// default). +func (c *ObjectsUpdateCall) Generation(generation int64) *ObjectsUpdateCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": Makes the +// operation conditional on whether the object's current generation matches the +// given value. Setting to 0 makes the operation succeed only if there are no +// live versions of the object. +func (c *ObjectsUpdateCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter "ifGenerationNotMatch": +// Makes the operation conditional on whether the object's current generation +// does not match the given value. If no live object exists, the precondition +// fails. Setting to 0 makes the operation succeed only if there is a live +// version of the object. +func (c *ObjectsUpdateCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// Makes the operation conditional on whether the object's current +// metageneration matches the given value. +func (c *ObjectsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on whether the +// object's current metageneration does not match the given value. +func (c *ObjectsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// OverrideUnlockedRetention sets the optional parameter +// "overrideUnlockedRetention": Must be true to remove the retention +// configuration, reduce its unlocked retention period, or change its mode from +// unlocked to locked. +func (c *ObjectsUpdateCall) OverrideUnlockedRetention(overrideUnlockedRetention bool) *ObjectsUpdateCall { + c.urlParams_.Set("overrideUnlockedRetention", fmt.Sprint(overrideUnlockedRetention)) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this object. +// +// Possible values: +// +// "authenticatedRead" - Object owner gets OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and project +// +// team owners get OWNER access. +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project team +// +// owners get READER access. +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// +// members get access according to their roles. +// +// "publicRead" - Object owner gets OWNER access, and allUsers get READER +// +// access. +func (c *ObjectsUpdateCall) PredefinedAcl(predefinedAcl string) *ObjectsUpdateCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of properties to +// return. Defaults to full. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *ObjectsUpdateCall) UserProject(userProject string) *ObjectsUpdateCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ObjectsUpdateCall) Fields(s ...googleapi.Field) *ObjectsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ObjectsUpdateCall) Context(ctx context.Context) *ObjectsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ObjectsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.update" call. +// Any non-2xx status code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ObjectsWatchAllCall struct { + s *Service + bucket string + channel *Channel + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// WatchAll: Watch for changes on all objects in a bucket. +// +// - bucket: Name of the bucket in which to look for objects. +func (r *ObjectsService) WatchAll(bucket string, channel *Channel) *ObjectsWatchAllCall { + c := &ObjectsWatchAllCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.channel = channel + return c +} + +// Delimiter sets the optional parameter "delimiter": Returns results in a +// directory-like mode. items will contain only objects whose names, aside from +// the prefix, do not contain delimiter. Objects whose names, aside from the +// prefix, contain delimiter will have their name, truncated after the +// delimiter, returned in prefixes. Duplicate prefixes are omitted. +func (c *ObjectsWatchAllCall) Delimiter(delimiter string) *ObjectsWatchAllCall { + c.urlParams_.Set("delimiter", delimiter) + return c +} + +// EndOffset sets the optional parameter "endOffset": Filter results to objects +// whose names are lexicographically before endOffset. If startOffset is also +// set, the objects listed will have names between startOffset (inclusive) and +// endOffset (exclusive). +func (c *ObjectsWatchAllCall) EndOffset(endOffset string) *ObjectsWatchAllCall { + c.urlParams_.Set("endOffset", endOffset) + return c +} + +// IncludeTrailingDelimiter sets the optional parameter +// "includeTrailingDelimiter": If true, objects that end in exactly one +// instance of delimiter will have their metadata included in items in addition +// to prefixes. +func (c *ObjectsWatchAllCall) IncludeTrailingDelimiter(includeTrailingDelimiter bool) *ObjectsWatchAllCall { + c.urlParams_.Set("includeTrailingDelimiter", fmt.Sprint(includeTrailingDelimiter)) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number of items +// plus prefixes to return in a single page of responses. As duplicate prefixes +// are omitted, fewer total results may be returned than requested. The service +// will use this parameter or 1,000 items, whichever is smaller. +func (c *ObjectsWatchAllCall) MaxResults(maxResults int64) *ObjectsWatchAllCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A previously-returned +// page token representing part of the larger set of results to view. +func (c *ObjectsWatchAllCall) PageToken(pageToken string) *ObjectsWatchAllCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Prefix sets the optional parameter "prefix": Filter results to objects whose +// names begin with this prefix. +func (c *ObjectsWatchAllCall) Prefix(prefix string) *ObjectsWatchAllCall { + c.urlParams_.Set("prefix", prefix) + return c +} + +// Projection sets the optional parameter "projection": Set of properties to +// return. Defaults to noAcl. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsWatchAllCall) Projection(projection string) *ObjectsWatchAllCall { + c.urlParams_.Set("projection", projection) + return c +} + +// StartOffset sets the optional parameter "startOffset": Filter results to +// objects whose names are lexicographically equal to or after startOffset. If +// endOffset is also set, the objects listed will have names between +// startOffset (inclusive) and endOffset (exclusive). +func (c *ObjectsWatchAllCall) StartOffset(startOffset string) *ObjectsWatchAllCall { + c.urlParams_.Set("startOffset", startOffset) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *ObjectsWatchAllCall) UserProject(userProject string) *ObjectsWatchAllCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Versions sets the optional parameter "versions": If true, lists all versions +// of an object as distinct results. The default is false. For more +// information, see Object Versioning +// (https://cloud.google.com/storage/docs/object-versioning). +func (c *ObjectsWatchAllCall) Versions(versions bool) *ObjectsWatchAllCall { + c.urlParams_.Set("versions", fmt.Sprint(versions)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ObjectsWatchAllCall) Fields(s ...googleapi.Field) *ObjectsWatchAllCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ObjectsWatchAllCall) Context(ctx context.Context) *ObjectsWatchAllCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ObjectsWatchAllCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/watch") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.watchAll" call. +// Any non-2xx status code is an error. Response headers are in either +// *Channel.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Channel{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type OperationsCancelCall struct { + s *Service + bucket string + operationId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Cancel: Starts asynchronous cancellation on a long-running operation. The +// server makes a best effort to cancel the operation, but success is not +// guaranteed. +// +// - bucket: The parent bucket of the operation resource. +// - operationId: The ID of the operation resource. +func (r *OperationsService) Cancel(bucket string, operationId string) *OperationsCancelCall { + c := &OperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.operationId = operationId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *OperationsCancelCall) Fields(s ...googleapi.Field) *OperationsCancelCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *OperationsCancelCall) Context(ctx context.Context) *OperationsCancelCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *OperationsCancelCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OperationsCancelCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/operations/{operationId}/cancel") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "operationId": c.operationId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.operations.cancel" call. +func (c *OperationsCancelCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil +} + +type OperationsGetCall struct { + s *Service + bucket string + operationId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the latest state of a long-running operation. +// +// - bucket: The parent bucket of the operation resource. +// - operationId: The ID of the operation resource. +func (r *OperationsService) Get(bucket string, operationId string) *OperationsGetCall { + c := &OperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.operationId = operationId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *OperationsGetCall) Fields(s ...googleapi.Field) *OperationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *OperationsGetCall) IfNoneMatch(entityTag string) *OperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *OperationsGetCall) Context(ctx context.Context) *OperationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *OperationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/operations/{operationId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "operationId": c.operationId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.operations.get" call. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &GoogleLongrunningOperation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type OperationsListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists operations that match the specified filter in the request. +// +// - bucket: Name of the bucket in which to look for operations. +func (r *OperationsService) List(bucket string) *OperationsListCall { + c := &OperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// Filter sets the optional parameter "filter": A filter to narrow down results +// to a preferred subset. The filtering language is documented in more detail +// in AIP-160 (https://google.aip.dev/160). +func (c *OperationsListCall) Filter(filter string) *OperationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum number of items to +// return in a single page of responses. Fewer total results may be returned +// than requested. The service uses this parameter or 100 items, whichever is +// smaller. +func (c *OperationsListCall) PageSize(pageSize int64) *OperationsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A previously-returned +// page token representing part of the larger set of results to view. +func (c *OperationsListCall) PageToken(pageToken string) *OperationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *OperationsListCall) Fields(s ...googleapi.Field) *OperationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *OperationsListCall) IfNoneMatch(entityTag string) *OperationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *OperationsListCall) Context(ctx context.Context) *OperationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *OperationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/operations") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.operations.list" call. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningListOperationsResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningListOperationsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &GoogleLongrunningListOperationsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *OperationsListCall) Pages(ctx context.Context, f func(*GoogleLongrunningListOperationsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type ProjectsHmacKeysCreateCall struct { + s *Service + projectId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a new HMAC key for the specified service account. +// +// - projectId: Project ID owning the service account. +// - serviceAccountEmail: Email address of the service account. +func (r *ProjectsHmacKeysService) Create(projectId string, serviceAccountEmail string) *ProjectsHmacKeysCreateCall { + c := &ProjectsHmacKeysCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.urlParams_.Set("serviceAccountEmail", serviceAccountEmail) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. +func (c *ProjectsHmacKeysCreateCall) UserProject(userProject string) *ProjectsHmacKeysCreateCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsHmacKeysCreateCall) Fields(s ...googleapi.Field) *ProjectsHmacKeysCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsHmacKeysCreateCall) Context(ctx context.Context) *ProjectsHmacKeysCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsHmacKeysCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHmacKeysCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.projects.hmacKeys.create" call. +// Any non-2xx status code is an error. Response headers are in either +// *HmacKey.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsHmacKeysCreateCall) Do(opts ...googleapi.CallOption) (*HmacKey, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &HmacKey{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsHmacKeysDeleteCall struct { + s *Service + projectId string + accessId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes an HMAC key. +// +// - accessId: Name of the HMAC key to be deleted. +// - projectId: Project ID owning the requested key. +func (r *ProjectsHmacKeysService) Delete(projectId string, accessId string) *ProjectsHmacKeysDeleteCall { + c := &ProjectsHmacKeysDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.accessId = accessId + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. +func (c *ProjectsHmacKeysDeleteCall) UserProject(userProject string) *ProjectsHmacKeysDeleteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsHmacKeysDeleteCall) Fields(s ...googleapi.Field) *ProjectsHmacKeysDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsHmacKeysDeleteCall) Context(ctx context.Context) *ProjectsHmacKeysDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsHmacKeysDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHmacKeysDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys/{accessId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "accessId": c.accessId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.projects.hmacKeys.delete" call. +func (c *ProjectsHmacKeysDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil +} + +type ProjectsHmacKeysGetCall struct { + s *Service + projectId string + accessId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves an HMAC key's metadata +// +// - accessId: Name of the HMAC key. +// - projectId: Project ID owning the service account of the requested key. +func (r *ProjectsHmacKeysService) Get(projectId string, accessId string) *ProjectsHmacKeysGetCall { + c := &ProjectsHmacKeysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.accessId = accessId + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. +func (c *ProjectsHmacKeysGetCall) UserProject(userProject string) *ProjectsHmacKeysGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsHmacKeysGetCall) Fields(s ...googleapi.Field) *ProjectsHmacKeysGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsHmacKeysGetCall) IfNoneMatch(entityTag string) *ProjectsHmacKeysGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsHmacKeysGetCall) Context(ctx context.Context) *ProjectsHmacKeysGetCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsHmacKeysGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHmacKeysGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys/{accessId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "accessId": c.accessId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.projects.hmacKeys.get" call. +// Any non-2xx status code is an error. Response headers are in either +// *HmacKeyMetadata.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsHmacKeysGetCall) Do(opts ...googleapi.CallOption) (*HmacKeyMetadata, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &HmacKeyMetadata{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsHmacKeysListCall struct { + s *Service + projectId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of HMAC keys matching the criteria. +// +// - projectId: Name of the project in which to look for HMAC keys. +func (r *ProjectsHmacKeysService) List(projectId string) *ProjectsHmacKeysListCall { + c := &ProjectsHmacKeysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number of items +// to return in a single page of responses. The service uses this parameter or +// 250 items, whichever is smaller. The max number of items per page will also +// be limited by the number of distinct service accounts in the response. If +// the number of service accounts in a single response is too high, the page +// will truncated and a next page token will be returned. +func (c *ProjectsHmacKeysListCall) MaxResults(maxResults int64) *ProjectsHmacKeysListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A previously-returned +// page token representing part of the larger set of results to view. +func (c *ProjectsHmacKeysListCall) PageToken(pageToken string) *ProjectsHmacKeysListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ServiceAccountEmail sets the optional parameter "serviceAccountEmail": If +// present, only keys for the given service account are returned. +func (c *ProjectsHmacKeysListCall) ServiceAccountEmail(serviceAccountEmail string) *ProjectsHmacKeysListCall { + c.urlParams_.Set("serviceAccountEmail", serviceAccountEmail) + return c +} + +// ShowDeletedKeys sets the optional parameter "showDeletedKeys": Whether or +// not to show keys in the DELETED state. +func (c *ProjectsHmacKeysListCall) ShowDeletedKeys(showDeletedKeys bool) *ProjectsHmacKeysListCall { + c.urlParams_.Set("showDeletedKeys", fmt.Sprint(showDeletedKeys)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. +func (c *ProjectsHmacKeysListCall) UserProject(userProject string) *ProjectsHmacKeysListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsHmacKeysListCall) Fields(s ...googleapi.Field) *ProjectsHmacKeysListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsHmacKeysListCall) IfNoneMatch(entityTag string) *ProjectsHmacKeysListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsHmacKeysListCall) Context(ctx context.Context) *ProjectsHmacKeysListCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsHmacKeysListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHmacKeysListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.projects.hmacKeys.list" call. +// Any non-2xx status code is an error. Response headers are in either +// *HmacKeysMetadata.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsHmacKeysListCall) Do(opts ...googleapi.CallOption) (*HmacKeysMetadata, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &HmacKeysMetadata{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsHmacKeysListCall) Pages(ctx context.Context, f func(*HmacKeysMetadata) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +type ProjectsHmacKeysUpdateCall struct { + s *Service + projectId string + accessId string + hmackeymetadata *HmacKeyMetadata + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates the state of an HMAC key. See the HMAC Key resource +// descriptor +// (https://cloud.google.com/storage/docs/json_api/v1/projects/hmacKeys/update#request-body) +// for valid states. +// +// - accessId: Name of the HMAC key being updated. +// - projectId: Project ID owning the service account of the updated key. +func (r *ProjectsHmacKeysService) Update(projectId string, accessId string, hmackeymetadata *HmacKeyMetadata) *ProjectsHmacKeysUpdateCall { + c := &ProjectsHmacKeysUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.accessId = accessId + c.hmackeymetadata = hmackeymetadata + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. +func (c *ProjectsHmacKeysUpdateCall) UserProject(userProject string) *ProjectsHmacKeysUpdateCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsHmacKeysUpdateCall) Fields(s ...googleapi.Field) *ProjectsHmacKeysUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsHmacKeysUpdateCall) Context(ctx context.Context) *ProjectsHmacKeysUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsHmacKeysUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHmacKeysUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.hmackeymetadata) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys/{accessId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "accessId": c.accessId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.projects.hmacKeys.update" call. +// Any non-2xx status code is an error. Response headers are in either +// *HmacKeyMetadata.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsHmacKeysUpdateCall) Do(opts ...googleapi.CallOption) (*HmacKeyMetadata, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &HmacKeyMetadata{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} + +type ProjectsServiceAccountGetCall struct { + s *Service + projectId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Get the email address of this project's Google Cloud Storage service +// account. +// +// - projectId: Project ID. +func (r *ProjectsServiceAccountService) Get(projectId string) *ProjectsServiceAccountGetCall { + c := &ProjectsServiceAccountGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. +func (c *ProjectsServiceAccountGetCall) UserProject(userProject string) *ProjectsServiceAccountGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsServiceAccountGetCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsServiceAccountGetCall) IfNoneMatch(entityTag string) *ProjectsServiceAccountGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsServiceAccountGetCall) Context(ctx context.Context) *ProjectsServiceAccountGetCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsServiceAccountGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/serviceAccount") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.projects.serviceAccount.get" call. +// Any non-2xx status code is an error. Response headers are in either +// *ServiceAccount.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ProjectsServiceAccountGetCall) Do(opts ...googleapi.CallOption) (*ServiceAccount, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ServiceAccount{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil +} diff --git a/vendor/google.golang.org/api/transport/dial.go b/vendor/google.golang.org/api/transport/dial.go new file mode 100644 index 000000000..652b8eba5 --- /dev/null +++ b/vendor/google.golang.org/api/transport/dial.go @@ -0,0 +1,48 @@ +// Copyright 2015 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package transport + +import ( + "context" + "net/http" + + "golang.org/x/oauth2/google" + "google.golang.org/grpc" + + "google.golang.org/api/internal" + "google.golang.org/api/option" + gtransport "google.golang.org/api/transport/grpc" + htransport "google.golang.org/api/transport/http" +) + +// NewHTTPClient returns an HTTP client for use communicating with a Google cloud +// service, configured with the given ClientOptions. It also returns the endpoint +// for the service as specified in the options. +func NewHTTPClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, string, error) { + return htransport.NewClient(ctx, opts...) +} + +// DialGRPC returns a GRPC connection for use communicating with a Google cloud +// service, configured with the given ClientOptions. +func DialGRPC(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { + return gtransport.Dial(ctx, opts...) +} + +// DialGRPCInsecure returns an insecure GRPC connection for use communicating +// with fake or mock Google cloud service implementations, such as emulators. +// The connection is configured with the given ClientOptions. +func DialGRPCInsecure(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { + return gtransport.DialInsecure(ctx, opts...) +} + +// Creds constructs a google.Credentials from the information in the options, +// or obtains the default credentials in the same way as google.FindDefaultCredentials. +func Creds(ctx context.Context, opts ...option.ClientOption) (*google.Credentials, error) { + var ds internal.DialSettings + for _, opt := range opts { + opt.Apply(&ds) + } + return internal.Creds(ctx, &ds) +} diff --git a/vendor/google.golang.org/api/transport/doc.go b/vendor/google.golang.org/api/transport/doc.go new file mode 100644 index 000000000..7143abee4 --- /dev/null +++ b/vendor/google.golang.org/api/transport/doc.go @@ -0,0 +1,11 @@ +// Copyright 2019 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package transport provides utility methods for creating authenticated +// transports to Google's HTTP and gRPC APIs. It is intended to be used in +// conjunction with google.golang.org/api/option. +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package transport diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go new file mode 100644 index 000000000..ff3539d89 --- /dev/null +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -0,0 +1,534 @@ +// Copyright 2015 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package grpc supports network connections to GRPC servers. +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package grpc + +import ( + "context" + "errors" + "log" + "net" + "os" + "strings" + "sync" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials" + "cloud.google.com/go/auth/grpctransport" + "cloud.google.com/go/auth/oauth2adapt" + "cloud.google.com/go/compute/metadata" + "go.opencensus.io/plugin/ocgrpc" + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + "golang.org/x/oauth2" + "golang.org/x/time/rate" + "google.golang.org/api/internal" + "google.golang.org/api/option" + "google.golang.org/grpc" + grpcgoogle "google.golang.org/grpc/credentials/google" + grpcinsecure "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/credentials/oauth" + "google.golang.org/grpc/stats" + + // Install grpclb, which is required for direct path. + _ "google.golang.org/grpc/balancer/grpclb" +) + +// Check env to disable DirectPath traffic. +const disableDirectPath = "GOOGLE_CLOUD_DISABLE_DIRECT_PATH" + +// Check env to decide if using google-c2p resolver for DirectPath traffic. +const enableDirectPathXds = "GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS" + +// Set at init time by dial_socketopt.go. If nil, socketopt is not supported. +var timeoutDialerOption grpc.DialOption + +// Log rate limiter +var logRateLimiter = rate.Sometimes{Interval: 1 * time.Second} + +// Assign to var for unit test replacement +var dialContext = grpc.DialContext + +// Assign to var for unit test replacement +var dialContextNewAuth = grpctransport.Dial + +// otelStatsHandler is a singleton otelgrpc.clientHandler to be used across +// all dial connections to avoid the memory leak documented in +// https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4226 +// +// TODO: If 4226 has been fixed in opentelemetry-go-contrib, replace this +// singleton with inline usage for simplicity. +var ( + initOtelStatsHandlerOnce sync.Once + otelStatsHandler stats.Handler +) + +// otelGRPCStatsHandler returns singleton otelStatsHandler for reuse across all +// dial connections. +func otelGRPCStatsHandler() stats.Handler { + initOtelStatsHandlerOnce.Do(func() { + otelStatsHandler = otelgrpc.NewClientHandler() + }) + return otelStatsHandler +} + +// Dial returns a GRPC connection for use communicating with a Google cloud +// service, configured with the given ClientOptions. +func Dial(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { + o, err := processAndValidateOpts(opts) + if err != nil { + return nil, err + } + if o.GRPCConnPool != nil { + return o.GRPCConnPool.Conn(), nil + } + if o.IsNewAuthLibraryEnabled() { + pool, err := dialPoolNewAuth(ctx, true, 1, o) + if err != nil { + return nil, err + } + return pool.Connection(), nil + } + // NOTE(cbro): We removed support for option.WithGRPCConnPool (GRPCConnPoolSize) + // on 2020-02-12 because RoundRobin and WithBalancer are deprecated and we need to remove usages of it. + // + // Connection pooling is only done via DialPool. + return dial(ctx, false, o) +} + +// DialInsecure returns an insecure GRPC connection for use communicating +// with fake or mock Google cloud service implementations, such as emulators. +// The connection is configured with the given ClientOptions. +func DialInsecure(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { + o, err := processAndValidateOpts(opts) + if err != nil { + return nil, err + } + if o.IsNewAuthLibraryEnabled() { + pool, err := dialPoolNewAuth(ctx, false, 1, o) + if err != nil { + return nil, err + } + return pool.Connection(), nil + } + return dial(ctx, true, o) +} + +// DialPool returns a pool of GRPC connections for the given service. +// This differs from the connection pooling implementation used by Dial, which uses a custom GRPC load balancer. +// DialPool should be used instead of Dial when a pool is used by default or a different custom GRPC load balancer is needed. +// The context and options are shared between each Conn in the pool. +// The pool size is configured using the WithGRPCConnectionPool option. +// +// This API is subject to change as we further refine requirements. It will go away if gRPC stubs accept an interface instead of the concrete ClientConn type. See https://github.com/grpc/grpc-go/issues/1287. +func DialPool(ctx context.Context, opts ...option.ClientOption) (ConnPool, error) { + o, err := processAndValidateOpts(opts) + if err != nil { + return nil, err + } + if o.GRPCConnPool != nil { + return o.GRPCConnPool, nil + } + + if o.IsNewAuthLibraryEnabled() { + if o.GRPCConn != nil { + return &singleConnPool{o.GRPCConn}, nil + } + pool, err := dialPoolNewAuth(ctx, true, o.GRPCConnPoolSize, o) + if err != nil { + return nil, err + } + return &poolAdapter{pool}, nil + } + + poolSize := o.GRPCConnPoolSize + if o.GRPCConn != nil { + // WithGRPCConn is technically incompatible with WithGRPCConnectionPool. + // Always assume pool size is 1 when a grpc.ClientConn is explicitly used. + poolSize = 1 + } + o.GRPCConnPoolSize = 0 // we don't *need* to set this to zero, but it's safe to. + + if poolSize == 0 || poolSize == 1 { + // Fast path for common case for a connection pool with a single connection. + conn, err := dial(ctx, false, o) + if err != nil { + return nil, err + } + return &singleConnPool{conn}, nil + } + + pool := &roundRobinConnPool{} + for i := 0; i < poolSize; i++ { + conn, err := dial(ctx, false, o) + if err != nil { + defer pool.Close() // NOTE: error from Close is ignored. + return nil, err + } + pool.conns = append(pool.conns, conn) + } + return pool, nil +} + +// dialPoolNewAuth is an adapter to call new auth library. +func dialPoolNewAuth(ctx context.Context, secure bool, poolSize int, ds *internal.DialSettings) (grpctransport.GRPCClientConnPool, error) { + // honor options if set + var creds *auth.Credentials + if ds.InternalCredentials != nil { + creds = oauth2adapt.AuthCredentialsFromOauth2Credentials(ds.InternalCredentials) + } else if ds.Credentials != nil { + creds = oauth2adapt.AuthCredentialsFromOauth2Credentials(ds.Credentials) + } else if ds.AuthCredentials != nil { + creds = ds.AuthCredentials + } else if ds.TokenSource != nil { + credOpts := &auth.CredentialsOptions{ + TokenProvider: oauth2adapt.TokenProviderFromTokenSource(ds.TokenSource), + } + if ds.QuotaProject != "" { + credOpts.QuotaProjectIDProvider = auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { + return ds.QuotaProject, nil + }) + } + creds = auth.NewCredentials(credOpts) + } + + var skipValidation bool + // If our clients explicitly setup the credential skip validation as it is + // assumed correct + if ds.SkipValidation || ds.InternalCredentials != nil { + skipValidation = true + } + + var aud string + if len(ds.Audiences) > 0 { + aud = ds.Audiences[0] + } + metadata := map[string]string{} + if ds.QuotaProject != "" { + metadata["X-goog-user-project"] = ds.QuotaProject + } + if ds.RequestReason != "" { + metadata["X-goog-request-reason"] = ds.RequestReason + } + + // Defaults for older clients that don't set this value yet + defaultEndpointTemplate := ds.DefaultEndpointTemplate + if defaultEndpointTemplate == "" { + defaultEndpointTemplate = ds.DefaultEndpoint + } + + pool, err := dialContextNewAuth(ctx, secure, &grpctransport.Options{ + DisableTelemetry: ds.TelemetryDisabled, + DisableAuthentication: ds.NoAuth, + Endpoint: ds.Endpoint, + Metadata: metadata, + GRPCDialOpts: prepareDialOptsNewAuth(ds), + PoolSize: poolSize, + Credentials: creds, + APIKey: ds.APIKey, + DetectOpts: &credentials.DetectOptions{ + Scopes: ds.Scopes, + Audience: aud, + CredentialsFile: ds.CredentialsFile, + CredentialsJSON: ds.CredentialsJSON, + }, + InternalOptions: &grpctransport.InternalOptions{ + EnableNonDefaultSAForDirectPath: ds.AllowNonDefaultServiceAccount, + EnableDirectPath: ds.EnableDirectPath, + EnableDirectPathXds: ds.EnableDirectPathXds, + EnableJWTWithScope: ds.EnableJwtWithScope, + DefaultAudience: ds.DefaultAudience, + DefaultEndpointTemplate: defaultEndpointTemplate, + DefaultMTLSEndpoint: ds.DefaultMTLSEndpoint, + DefaultScopes: ds.DefaultScopes, + SkipValidation: skipValidation, + }, + UniverseDomain: ds.UniverseDomain, + }) + return pool, err +} + +func prepareDialOptsNewAuth(ds *internal.DialSettings) []grpc.DialOption { + var opts []grpc.DialOption + if ds.UserAgent != "" { + opts = append(opts, grpc.WithUserAgent(ds.UserAgent)) + } + + return append(opts, ds.GRPCDialOpts...) +} + +func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.ClientConn, error) { + if o.HTTPClient != nil { + return nil, errors.New("unsupported HTTP client specified") + } + if o.GRPCConn != nil { + return o.GRPCConn, nil + } + transportCreds, endpoint, err := internal.GetGRPCTransportConfigAndEndpoint(o) + if err != nil { + return nil, err + } + + if insecure { + transportCreds = grpcinsecure.NewCredentials() + } + + // Initialize gRPC dial options with transport-level security options. + grpcOpts := []grpc.DialOption{ + grpc.WithTransportCredentials(transportCreds), + } + + // Authentication can only be sent when communicating over a secure connection. + // + // TODO: Should we be more lenient in the future and allow sending credentials + // when dialing an insecure connection? + if !o.NoAuth && !insecure { + if o.APIKey != "" { + grpcOpts = append(grpcOpts, grpc.WithPerRPCCredentials(grpcAPIKey{ + apiKey: o.APIKey, + requestReason: o.RequestReason, + })) + } else { + creds, err := internal.Creds(ctx, o) + if err != nil { + return nil, err + } + grpcOpts = append(grpcOpts, grpc.WithPerRPCCredentials(grpcTokenSource{ + TokenSource: oauth.TokenSource{TokenSource: creds.TokenSource}, + quotaProject: internal.GetQuotaProject(creds, o.QuotaProject), + requestReason: o.RequestReason, + })) + // Attempt Direct Path: + logRateLimiter.Do(func() { + logDirectPathMisconfig(endpoint, creds.TokenSource, o) + }) + if isDirectPathEnabled(endpoint, o) && isTokenSourceDirectPathCompatible(creds.TokenSource, o) && metadata.OnGCE() { + // Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates. + grpcOpts = []grpc.DialOption{ + grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions( + grpcgoogle.DefaultCredentialsOptions{ + PerRPCCreds: oauth.TokenSource{TokenSource: creds.TokenSource}, + })), + } + if timeoutDialerOption != nil { + grpcOpts = append(grpcOpts, timeoutDialerOption) + } + // Check if google-c2p resolver is enabled for DirectPath + if isDirectPathXdsUsed(o) { + // google-c2p resolver target must not have a port number + if addr, _, err := net.SplitHostPort(endpoint); err == nil { + endpoint = "google-c2p:///" + addr + } else { + endpoint = "google-c2p:///" + endpoint + } + } else { + if !strings.HasPrefix(endpoint, "dns:///") { + endpoint = "dns:///" + endpoint + } + grpcOpts = append(grpcOpts, + // For now all DirectPath go clients will be using the following lb config, but in future + // when different services need different configs, then we should change this to a + // per-service config. + grpc.WithDisableServiceConfig(), + grpc.WithDefaultServiceConfig(`{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}`)) + } + // TODO(cbro): add support for system parameters (quota project, request reason) via chained interceptor. + } + } + } + + // Add tracing, but before the other options, so that clients can override the + // gRPC stats handler. + // This assumes that gRPC options are processed in order, left to right. + grpcOpts = addOCStatsHandler(grpcOpts, o) + grpcOpts = addOpenTelemetryStatsHandler(grpcOpts, o) + grpcOpts = append(grpcOpts, o.GRPCDialOpts...) + if o.UserAgent != "" { + grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent)) + } + + return dialContext(ctx, endpoint, grpcOpts...) +} + +func addOCStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) []grpc.DialOption { + if settings.TelemetryDisabled { + return opts + } + return append(opts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) +} + +func addOpenTelemetryStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) []grpc.DialOption { + if settings.TelemetryDisabled { + return opts + } + return append(opts, grpc.WithStatsHandler(otelGRPCStatsHandler())) +} + +// grpcTokenSource supplies PerRPCCredentials from an oauth.TokenSource. +type grpcTokenSource struct { + oauth.TokenSource + + // Additional metadata attached as headers. + quotaProject string + requestReason string +} + +// GetRequestMetadata gets the request metadata as a map from a grpcTokenSource. +func (ts grpcTokenSource) GetRequestMetadata(ctx context.Context, uri ...string) ( + map[string]string, error) { + metadata, err := ts.TokenSource.GetRequestMetadata(ctx, uri...) + if err != nil { + return nil, err + } + + // Attach system parameter + if ts.quotaProject != "" { + metadata["X-goog-user-project"] = ts.quotaProject + } + if ts.requestReason != "" { + metadata["X-goog-request-reason"] = ts.requestReason + } + return metadata, nil +} + +// grpcAPIKey supplies PerRPCCredentials from an API Key. +type grpcAPIKey struct { + apiKey string + + // Additional metadata attached as headers. + requestReason string +} + +// GetRequestMetadata gets the request metadata as a map from a grpcAPIKey. +func (ts grpcAPIKey) GetRequestMetadata(ctx context.Context, uri ...string) ( + map[string]string, error) { + metadata := map[string]string{ + "X-goog-api-key": ts.apiKey, + } + if ts.requestReason != "" { + metadata["X-goog-request-reason"] = ts.requestReason + } + return metadata, nil +} + +// RequireTransportSecurity indicates whether the credentials requires transport security. +func (ts grpcAPIKey) RequireTransportSecurity() bool { + return true +} + +func isDirectPathEnabled(endpoint string, o *internal.DialSettings) bool { + if !o.EnableDirectPath { + return false + } + if !checkDirectPathEndPoint(endpoint) { + return false + } + if strings.EqualFold(os.Getenv(disableDirectPath), "true") { + return false + } + return true +} + +func isDirectPathXdsUsed(o *internal.DialSettings) bool { + // Method 1: Enable DirectPath xDS by env; + if strings.EqualFold(os.Getenv(enableDirectPathXds), "true") { + return true + } + // Method 2: Enable DirectPath xDS by option; + if o.EnableDirectPathXds { + return true + } + return false + +} + +func isTokenSourceDirectPathCompatible(ts oauth2.TokenSource, o *internal.DialSettings) bool { + if ts == nil { + return false + } + tok, err := ts.Token() + if err != nil { + return false + } + if tok == nil { + return false + } + if o.AllowNonDefaultServiceAccount { + return true + } + if source, _ := tok.Extra("oauth2.google.tokenSource").(string); source != "compute-metadata" { + return false + } + if acct, _ := tok.Extra("oauth2.google.serviceAccount").(string); acct != "default" { + return false + } + return true +} + +func checkDirectPathEndPoint(endpoint string) bool { + // Only [dns:///]host[:port] is supported, not other schemes (e.g., "tcp://" or "unix://"). + // Also don't try direct path if the user has chosen an alternate name resolver + // (i.e., via ":///" prefix). + // + // TODO(cbro): once gRPC has introspectible options, check the user hasn't + // provided a custom dialer in gRPC options. + if strings.Contains(endpoint, "://") && !strings.HasPrefix(endpoint, "dns:///") { + return false + } + + if endpoint == "" { + return false + } + + return true +} + +func logDirectPathMisconfig(endpoint string, ts oauth2.TokenSource, o *internal.DialSettings) { + if isDirectPathXdsUsed(o) { + // Case 1: does not enable DirectPath + if !isDirectPathEnabled(endpoint, o) { + log.Println("WARNING: DirectPath is misconfigured. Please set the EnableDirectPath option along with the EnableDirectPathXds option.") + } else { + // Case 2: credential is not correctly set + if !isTokenSourceDirectPathCompatible(ts, o) { + log.Println("WARNING: DirectPath is misconfigured. Please make sure the token source is fetched from GCE metadata server and the default service account is used.") + } + // Case 3: not running on GCE + if !metadata.OnGCE() { + log.Println("WARNING: DirectPath is misconfigured. DirectPath is only available in a GCE environment.") + } + } + } +} + +func processAndValidateOpts(opts []option.ClientOption) (*internal.DialSettings, error) { + var o internal.DialSettings + for _, opt := range opts { + opt.Apply(&o) + } + if err := o.Validate(); err != nil { + return nil, err + } + + return &o, nil +} + +type connPoolOption struct{ ConnPool } + +// WithConnPool returns a ClientOption that specifies the ConnPool +// connection to use as the basis of communications. +// +// This is only to be used by Google client libraries internally, for example +// when creating a longrunning API client that shares the same connection pool +// as a service client. +func WithConnPool(p ConnPool) option.ClientOption { + return connPoolOption{p} +} + +func (o connPoolOption) Apply(s *internal.DialSettings) { + s.GRPCConnPool = o.ConnPool +} diff --git a/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go b/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go new file mode 100644 index 000000000..507cd3ec6 --- /dev/null +++ b/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go @@ -0,0 +1,52 @@ +// Copyright 2019 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.11 && linux +// +build go1.11,linux + +package grpc + +import ( + "context" + "net" + "syscall" + + "google.golang.org/grpc" +) + +const ( + // defaultTCPUserTimeout is the default TCP_USER_TIMEOUT socket option. By + // default is 20 seconds. + tcpUserTimeoutMilliseconds = 20000 + + // Copied from golang.org/x/sys/unix.TCP_USER_TIMEOUT. + tcpUserTimeoutOp = 0x12 +) + +func init() { + // timeoutDialerOption is a grpc.DialOption that contains dialer with + // socket option TCP_USER_TIMEOUT. This dialer requires go versions 1.11+. + timeoutDialerOption = grpc.WithContextDialer(dialTCPUserTimeout) +} + +func dialTCPUserTimeout(ctx context.Context, addr string) (net.Conn, error) { + control := func(network, address string, c syscall.RawConn) error { + var syscallErr error + controlErr := c.Control(func(fd uintptr) { + syscallErr = syscall.SetsockoptInt( + int(fd), syscall.IPPROTO_TCP, tcpUserTimeoutOp, tcpUserTimeoutMilliseconds) + }) + if syscallErr != nil { + return syscallErr + } + if controlErr != nil { + return controlErr + } + return nil + } + d := &net.Dialer{ + Control: control, + } + return d.DialContext(ctx, "tcp", addr) +} diff --git a/vendor/google.golang.org/api/transport/grpc/pool.go b/vendor/google.golang.org/api/transport/grpc/pool.go new file mode 100644 index 000000000..c731293d8 --- /dev/null +++ b/vendor/google.golang.org/api/transport/grpc/pool.go @@ -0,0 +1,117 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package grpc + +import ( + "context" + "fmt" + "sync/atomic" + + "cloud.google.com/go/auth/grpctransport" + "google.golang.org/api/internal" + "google.golang.org/grpc" +) + +// ConnPool is a pool of grpc.ClientConns. +type ConnPool = internal.ConnPool // NOTE(cbro): type alias to export the type. It must live in internal to avoid a circular dependency. + +var _ ConnPool = &roundRobinConnPool{} +var _ ConnPool = &singleConnPool{} + +// singleConnPool is a special case for a single connection. +type singleConnPool struct { + *grpc.ClientConn +} + +func (p *singleConnPool) Conn() *grpc.ClientConn { return p.ClientConn } +func (p *singleConnPool) Num() int { return 1 } + +type roundRobinConnPool struct { + conns []*grpc.ClientConn + + idx uint32 // access via sync/atomic +} + +func (p *roundRobinConnPool) Num() int { + return len(p.conns) +} + +func (p *roundRobinConnPool) Conn() *grpc.ClientConn { + i := atomic.AddUint32(&p.idx, 1) + return p.conns[i%uint32(len(p.conns))] +} + +func (p *roundRobinConnPool) Close() error { + var errs multiError + for _, conn := range p.conns { + if err := conn.Close(); err != nil { + errs = append(errs, err) + } + } + if len(errs) == 0 { + return nil + } + return errs +} + +func (p *roundRobinConnPool) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...grpc.CallOption) error { + return p.Conn().Invoke(ctx, method, args, reply, opts...) +} + +func (p *roundRobinConnPool) NewStream(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) { + return p.Conn().NewStream(ctx, desc, method, opts...) +} + +// multiError represents errors from multiple conns in the group. +// +// TODO: figure out how and whether this is useful to export. End users should +// not be depending on the transport/grpc package directly, so there might need +// to be some service-specific multi-error type. +type multiError []error + +func (m multiError) Error() string { + s, n := "", 0 + for _, e := range m { + if e != nil { + if n == 0 { + s = e.Error() + } + n++ + } + } + switch n { + case 0: + return "(0 errors)" + case 1: + return s + case 2: + return s + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", s, n-1) +} + +type poolAdapter struct { + pool grpctransport.GRPCClientConnPool +} + +func (p *poolAdapter) Conn() *grpc.ClientConn { + return p.pool.Connection() +} + +func (p *poolAdapter) Num() int { + return p.pool.Len() +} + +func (p *poolAdapter) Close() error { + return p.pool.Close() +} + +func (p *poolAdapter) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...grpc.CallOption) error { + return p.pool.Invoke(ctx, method, args, reply, opts...) +} + +func (p *poolAdapter) NewStream(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) { + return p.pool.NewStream(ctx, desc, method, opts...) +} diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index c0d8bf20b..d5b213e0f 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -1,16 +1,6 @@ -// Copyright 2015 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2015 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. // Package http supports network connections to HTTP servers. // This package is not intended for use by end developers. Use the @@ -19,13 +9,23 @@ package http import ( "context" + "crypto/tls" "errors" + "net" "net/http" + "time" + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials" + "cloud.google.com/go/auth/httptransport" + "cloud.google.com/go/auth/oauth2adapt" "go.opencensus.io/plugin/ochttp" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + "golang.org/x/net/http2" "golang.org/x/oauth2" "google.golang.org/api/googleapi/transport" "google.golang.org/api/internal" + "google.golang.org/api/internal/cert" "google.golang.org/api/option" "google.golang.org/api/transport/http/internal/propagation" ) @@ -38,15 +38,104 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, if err != nil { return nil, "", err } + clientCertSource, dialTLSContext, endpoint, err := internal.GetHTTPTransportConfigAndEndpoint(settings) + if err != nil { + return nil, "", err + } // TODO(cbro): consider injecting the User-Agent even if an explicit HTTP client is provided? if settings.HTTPClient != nil { - return settings.HTTPClient, settings.Endpoint, nil + return settings.HTTPClient, endpoint, nil + } + + if settings.IsNewAuthLibraryEnabled() { + client, err := newClientNewAuth(ctx, nil, settings) + if err != nil { + return nil, "", err + } + return client, endpoint, nil } - trans, err := newTransport(ctx, defaultBaseTransport(ctx), settings) + trans, err := newTransport(ctx, defaultBaseTransport(ctx, clientCertSource, dialTLSContext), settings) if err != nil { return nil, "", err } - return &http.Client{Transport: trans}, settings.Endpoint, nil + return &http.Client{Transport: trans}, endpoint, nil +} + +// newClientNewAuth is an adapter to call new auth library. +func newClientNewAuth(ctx context.Context, base http.RoundTripper, ds *internal.DialSettings) (*http.Client, error) { + // honor options if set + var creds *auth.Credentials + if ds.InternalCredentials != nil { + creds = oauth2adapt.AuthCredentialsFromOauth2Credentials(ds.InternalCredentials) + } else if ds.Credentials != nil { + creds = oauth2adapt.AuthCredentialsFromOauth2Credentials(ds.Credentials) + } else if ds.AuthCredentials != nil { + creds = ds.AuthCredentials + } else if ds.TokenSource != nil { + credOpts := &auth.CredentialsOptions{ + TokenProvider: oauth2adapt.TokenProviderFromTokenSource(ds.TokenSource), + } + if ds.QuotaProject != "" { + credOpts.QuotaProjectIDProvider = auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { + return ds.QuotaProject, nil + }) + } + creds = auth.NewCredentials(credOpts) + } + + var skipValidation bool + // If our clients explicitly setup the credential skip validation as it is + // assumed correct + if ds.SkipValidation || ds.InternalCredentials != nil { + skipValidation = true + } + + // Defaults for older clients that don't set this value yet + defaultEndpointTemplate := ds.DefaultEndpointTemplate + if defaultEndpointTemplate == "" { + defaultEndpointTemplate = ds.DefaultEndpoint + } + + var aud string + if len(ds.Audiences) > 0 { + aud = ds.Audiences[0] + } + headers := http.Header{} + if ds.QuotaProject != "" { + headers.Set("X-goog-user-project", ds.QuotaProject) + } + if ds.RequestReason != "" { + headers.Set("X-goog-request-reason", ds.RequestReason) + } + client, err := httptransport.NewClient(&httptransport.Options{ + DisableTelemetry: ds.TelemetryDisabled, + DisableAuthentication: ds.NoAuth, + Headers: headers, + Endpoint: ds.Endpoint, + APIKey: ds.APIKey, + Credentials: creds, + ClientCertProvider: ds.ClientCertSource, + BaseRoundTripper: base, + DetectOpts: &credentials.DetectOptions{ + Scopes: ds.Scopes, + Audience: aud, + CredentialsFile: ds.CredentialsFile, + CredentialsJSON: ds.CredentialsJSON, + }, + InternalOptions: &httptransport.InternalOptions{ + EnableJWTWithScope: ds.EnableJwtWithScope, + DefaultAudience: ds.DefaultAudience, + DefaultEndpointTemplate: defaultEndpointTemplate, + DefaultMTLSEndpoint: ds.DefaultMTLSEndpoint, + DefaultScopes: ds.DefaultScopes, + SkipValidation: skipValidation, + }, + UniverseDomain: ds.UniverseDomain, + }) + if err != nil { + return nil, err + } + return client, nil } // NewTransport creates an http.RoundTripper for use communicating with a Google @@ -59,22 +148,32 @@ func NewTransport(ctx context.Context, base http.RoundTripper, opts ...option.Cl if settings.HTTPClient != nil { return nil, errors.New("transport/http: WithHTTPClient passed to NewTransport") } + if settings.IsNewAuthLibraryEnabled() { + client, err := newClientNewAuth(ctx, base, settings) + if err != nil { + return nil, err + } + return client.Transport, nil + } return newTransport(ctx, base, settings) } func newTransport(ctx context.Context, base http.RoundTripper, settings *internal.DialSettings) (http.RoundTripper, error) { - trans := base - trans = parameterTransport{ - base: trans, + paramTransport := ¶meterTransport{ + base: base, userAgent: settings.UserAgent, - quotaProject: settings.QuotaProject, requestReason: settings.RequestReason, } - trans = addOCTransport(trans) + var trans http.RoundTripper = paramTransport + // Give OpenTelemetry precedence over OpenCensus in case user configuration + // causes both to write the same header (`X-Cloud-Trace-Context`). + trans = addOpenTelemetryTransport(trans, settings) + trans = addOCTransport(trans, settings) switch { case settings.NoAuth: // Do nothing. case settings.APIKey != "": + paramTransport.quotaProject = internal.GetQuotaProject(nil, settings.QuotaProject) trans = &transport.APIKey{ Transport: trans, Key: settings.APIKey, @@ -84,9 +183,14 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna if err != nil { return nil, err } + paramTransport.quotaProject = internal.GetQuotaProject(creds, settings.QuotaProject) + ts := creds.TokenSource + if settings.ImpersonationConfig == nil && settings.TokenSource != nil { + ts = settings.TokenSource + } trans = &oauth2.Transport{ Base: trans, - Source: creds.TokenSource, + Source: ts, } } return trans, nil @@ -114,21 +218,20 @@ type parameterTransport struct { base http.RoundTripper } -func (t parameterTransport) RoundTrip(req *http.Request) (*http.Response, error) { +func (t *parameterTransport) RoundTrip(req *http.Request) (*http.Response, error) { rt := t.base if rt == nil { return nil, errors.New("transport: no Transport specified") } - if t.userAgent == "" { - return rt.RoundTrip(req) - } newReq := *req newReq.Header = make(http.Header) for k, vv := range req.Header { newReq.Header[k] = vv } - // TODO(cbro): append to existing User-Agent header? - newReq.Header.Set("User-Agent", t.userAgent) + if t.userAgent != "" { + // TODO(cbro): append to existing User-Agent header? + newReq.Header.Set("User-Agent", t.userAgent) + } // Attach system parameters into the header if t.quotaProject != "" { @@ -141,21 +244,88 @@ func (t parameterTransport) RoundTrip(req *http.Request) (*http.Response, error) return rt.RoundTrip(&newReq) } -// Set at init time by dial_appengine.go. If nil, we're not on App Engine. -var appengineUrlfetchHook func(context.Context) http.RoundTripper +// defaultBaseTransport returns the base HTTP transport. It uses a default +// transport, taking most defaults from http.DefaultTransport. +// If TLSCertificate is available, set TLSClientConfig as well. +func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source, dialTLSContext func(context.Context, string, string) (net.Conn, error)) http.RoundTripper { + // Copy http.DefaultTransport except for MaxIdleConnsPerHost setting, + // which is increased due to reported performance issues under load in the + // GCS client. Transport.Clone is only available in Go 1.13 and up. + trans := clonedTransport(http.DefaultTransport) + if trans == nil { + trans = fallbackBaseTransport() + } + trans.MaxIdleConnsPerHost = 100 + + if clientCertSource != nil { + trans.TLSClientConfig = &tls.Config{ + GetClientCertificate: clientCertSource, + } + } + if dialTLSContext != nil { + // If DialTLSContext is set, TLSClientConfig wil be ignored + trans.DialTLSContext = dialTLSContext + } + + configureHTTP2(trans) + + return trans +} + +// configureHTTP2 configures the ReadIdleTimeout HTTP/2 option for the +// transport. This allows broken idle connections to be pruned more quickly, +// preventing the client from attempting to re-use connections that will no +// longer work. +func configureHTTP2(trans *http.Transport) { + http2Trans, err := http2.ConfigureTransports(trans) + if err == nil { + http2Trans.ReadIdleTimeout = time.Second * 31 + } +} + +// fallbackBaseTransport is used in = Go 1.11). -func IsStandard() bool { - return internal.IsStandard() -} - -// IsFlex reports whether the App Engine app is running in the flexible environment. -func IsFlex() bool { - return internal.IsFlex() -} - -// IsAppEngine reports whether the App Engine app is running on App Engine, in either -// the standard or flexible environment. -func IsAppEngine() bool { - return internal.IsAppEngine() -} - -// IsSecondGen reports whether the App Engine app is running on the second generation -// runtimes (>= Go 1.11). -func IsSecondGen() bool { - return internal.IsSecondGen() -} - -// NewContext returns a context for an in-flight HTTP request. -// This function is cheap. -func NewContext(req *http.Request) context.Context { - return internal.ReqContext(req) -} - -// WithContext returns a copy of the parent context -// and associates it with an in-flight HTTP request. -// This function is cheap. -func WithContext(parent context.Context, req *http.Request) context.Context { - return internal.WithContext(parent, req) -} - -// BlobKey is a key for a blobstore blob. -// -// Conceptually, this type belongs in the blobstore package, but it lives in -// the appengine package to avoid a circular dependency: blobstore depends on -// datastore, and datastore needs to refer to the BlobKey type. -type BlobKey string - -// GeoPoint represents a location as latitude/longitude in degrees. -type GeoPoint struct { - Lat, Lng float64 -} - -// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude. -func (g GeoPoint) Valid() bool { - return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180 -} - -// APICallFunc defines a function type for handling an API call. -// See WithCallOverride. -type APICallFunc func(ctx context.Context, service, method string, in, out proto.Message) error - -// WithAPICallFunc returns a copy of the parent context -// that will cause API calls to invoke f instead of their normal operation. -// -// This is intended for advanced users only. -func WithAPICallFunc(ctx context.Context, f APICallFunc) context.Context { - return internal.WithCallOverride(ctx, internal.CallOverrideFunc(f)) -} - -// APICall performs an API call. -// -// This is not intended for general use; it is exported for use in conjunction -// with WithAPICallFunc. -func APICall(ctx context.Context, service, method string, in, out proto.Message) error { - return internal.Call(ctx, service, method, in, out) -} diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go deleted file mode 100644 index f4b645aad..000000000 --- a/vendor/google.golang.org/appengine/appengine_vm.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package appengine - -import ( - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" -) - -// BackgroundContext returns a context not associated with a request. -// This should only be used when not servicing a request. -// This only works in App Engine "flexible environment". -func BackgroundContext() context.Context { - return internal.BackgroundContext() -} diff --git a/vendor/google.golang.org/appengine/errors.go b/vendor/google.golang.org/appengine/errors.go deleted file mode 100644 index 16d0772e2..000000000 --- a/vendor/google.golang.org/appengine/errors.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// This file provides error functions for common API failure modes. - -package appengine - -import ( - "fmt" - - "google.golang.org/appengine/internal" -) - -// IsOverQuota reports whether err represents an API call failure -// due to insufficient available quota. -func IsOverQuota(err error) bool { - callErr, ok := err.(*internal.CallError) - return ok && callErr.Code == 4 -} - -// MultiError is returned by batch operations when there are errors with -// particular elements. Errors will be in a one-to-one correspondence with -// the input elements; successful elements will have a nil entry. -type MultiError []error - -func (m MultiError) Error() string { - s, n := "", 0 - for _, e := range m { - if e != nil { - if n == 0 { - s = e.Error() - } - n++ - } - } - switch n { - case 0: - return "(0 errors)" - case 1: - return s - case 2: - return s + " (and 1 other error)" - } - return fmt.Sprintf("%s (and %d other errors)", s, n-1) -} diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go deleted file mode 100644 index b8dcf8f36..000000000 --- a/vendor/google.golang.org/appengine/identity.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package appengine - -import ( - "time" - - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" - pb "google.golang.org/appengine/internal/app_identity" - modpb "google.golang.org/appengine/internal/modules" -) - -// AppID returns the application ID for the current application. -// The string will be a plain application ID (e.g. "appid"), with a -// domain prefix for custom domain deployments (e.g. "example.com:appid"). -func AppID(c context.Context) string { return internal.AppID(c) } - -// DefaultVersionHostname returns the standard hostname of the default version -// of the current application (e.g. "my-app.appspot.com"). This is suitable for -// use in constructing URLs. -func DefaultVersionHostname(c context.Context) string { - return internal.DefaultVersionHostname(c) -} - -// ModuleName returns the module name of the current instance. -func ModuleName(c context.Context) string { - return internal.ModuleName(c) -} - -// ModuleHostname returns a hostname of a module instance. -// If module is the empty string, it refers to the module of the current instance. -// If version is empty, it refers to the version of the current instance if valid, -// or the default version of the module of the current instance. -// If instance is empty, ModuleHostname returns the load-balancing hostname. -func ModuleHostname(c context.Context, module, version, instance string) (string, error) { - req := &modpb.GetHostnameRequest{} - if module != "" { - req.Module = &module - } - if version != "" { - req.Version = &version - } - if instance != "" { - req.Instance = &instance - } - res := &modpb.GetHostnameResponse{} - if err := internal.Call(c, "modules", "GetHostname", req, res); err != nil { - return "", err - } - return *res.Hostname, nil -} - -// VersionID returns the version ID for the current application. -// It will be of the form "X.Y", where X is specified in app.yaml, -// and Y is a number generated when each version of the app is uploaded. -// It does not include a module name. -func VersionID(c context.Context) string { return internal.VersionID(c) } - -// InstanceID returns a mostly-unique identifier for this instance. -func InstanceID() string { return internal.InstanceID() } - -// Datacenter returns an identifier for the datacenter that the instance is running in. -func Datacenter(c context.Context) string { return internal.Datacenter(c) } - -// ServerSoftware returns the App Engine release version. -// In production, it looks like "Google App Engine/X.Y.Z". -// In the development appserver, it looks like "Development/X.Y". -func ServerSoftware() string { return internal.ServerSoftware() } - -// RequestID returns a string that uniquely identifies the request. -func RequestID(c context.Context) string { return internal.RequestID(c) } - -// AccessToken generates an OAuth2 access token for the specified scopes on -// behalf of service account of this application. This token will expire after -// the returned time. -func AccessToken(c context.Context, scopes ...string) (token string, expiry time.Time, err error) { - req := &pb.GetAccessTokenRequest{Scope: scopes} - res := &pb.GetAccessTokenResponse{} - - err = internal.Call(c, "app_identity_service", "GetAccessToken", req, res) - if err != nil { - return "", time.Time{}, err - } - return res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil -} - -// Certificate represents a public certificate for the app. -type Certificate struct { - KeyName string - Data []byte // PEM-encoded X.509 certificate -} - -// PublicCertificates retrieves the public certificates for the app. -// They can be used to verify a signature returned by SignBytes. -func PublicCertificates(c context.Context) ([]Certificate, error) { - req := &pb.GetPublicCertificateForAppRequest{} - res := &pb.GetPublicCertificateForAppResponse{} - if err := internal.Call(c, "app_identity_service", "GetPublicCertificatesForApp", req, res); err != nil { - return nil, err - } - var cs []Certificate - for _, pc := range res.PublicCertificateList { - cs = append(cs, Certificate{ - KeyName: pc.GetKeyName(), - Data: []byte(pc.GetX509CertificatePem()), - }) - } - return cs, nil -} - -// ServiceAccount returns a string representing the service account name, in -// the form of an email address (typically app_id@appspot.gserviceaccount.com). -func ServiceAccount(c context.Context) (string, error) { - req := &pb.GetServiceAccountNameRequest{} - res := &pb.GetServiceAccountNameResponse{} - - err := internal.Call(c, "app_identity_service", "GetServiceAccountName", req, res) - if err != nil { - return "", err - } - return res.GetServiceAccountName(), err -} - -// SignBytes signs bytes using a private key unique to your application. -func SignBytes(c context.Context, bytes []byte) (keyName string, signature []byte, err error) { - req := &pb.SignForAppRequest{BytesToSign: bytes} - res := &pb.SignForAppResponse{} - - if err := internal.Call(c, "app_identity_service", "SignForApp", req, res); err != nil { - return "", nil, err - } - return res.GetKeyName(), res.GetSignatureBytes(), nil -} - -func init() { - internal.RegisterErrorCodeMap("app_identity_service", pb.AppIdentityServiceError_ErrorCode_name) - internal.RegisterErrorCodeMap("modules", modpb.ModulesServiceError_ErrorCode_name) -} diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go deleted file mode 100644 index a6ec19e14..000000000 --- a/vendor/google.golang.org/appengine/internal/api.go +++ /dev/null @@ -1,675 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package internal - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "log" - "net" - "net/http" - "net/url" - "os" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" - - basepb "google.golang.org/appengine/internal/base" - logpb "google.golang.org/appengine/internal/log" - remotepb "google.golang.org/appengine/internal/remote_api" -) - -const ( - apiPath = "/rpc_http" - defaultTicketSuffix = "/default.20150612t184001.0" -) - -var ( - // Incoming headers. - ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket") - dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo") - traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context") - curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace") - userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP") - remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr") - devRequestIdHeader = http.CanonicalHeaderKey("X-Appengine-Dev-Request-Id") - - // Outgoing headers. - apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint") - apiEndpointHeaderValue = []string{"app-engine-apis"} - apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method") - apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"} - apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline") - apiContentType = http.CanonicalHeaderKey("Content-Type") - apiContentTypeValue = []string{"application/octet-stream"} - logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count") - - apiHTTPClient = &http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: limitDial, - }, - } - - defaultTicketOnce sync.Once - defaultTicket string - backgroundContextOnce sync.Once - backgroundContext netcontext.Context -) - -func apiURL() *url.URL { - host, port := "appengine.googleapis.internal", "10001" - if h := os.Getenv("API_HOST"); h != "" { - host = h - } - if p := os.Getenv("API_PORT"); p != "" { - port = p - } - return &url.URL{ - Scheme: "http", - Host: host + ":" + port, - Path: apiPath, - } -} - -func handleHTTP(w http.ResponseWriter, r *http.Request) { - c := &context{ - req: r, - outHeader: w.Header(), - apiURL: apiURL(), - } - r = r.WithContext(withContext(r.Context(), c)) - c.req = r - - stopFlushing := make(chan int) - - // Patch up RemoteAddr so it looks reasonable. - if addr := r.Header.Get(userIPHeader); addr != "" { - r.RemoteAddr = addr - } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { - r.RemoteAddr = addr - } else { - // Should not normally reach here, but pick a sensible default anyway. - r.RemoteAddr = "127.0.0.1" - } - // The address in the headers will most likely be of these forms: - // 123.123.123.123 - // 2001:db8::1 - // net/http.Request.RemoteAddr is specified to be in "IP:port" form. - if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { - // Assume the remote address is only a host; add a default port. - r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") - } - - // Start goroutine responsible for flushing app logs. - // This is done after adding c to ctx.m (and stopped before removing it) - // because flushing logs requires making an API call. - go c.logFlusher(stopFlushing) - - executeRequestSafely(c, r) - c.outHeader = nil // make sure header changes aren't respected any more - - stopFlushing <- 1 // any logging beyond this point will be dropped - - // Flush any pending logs asynchronously. - c.pendingLogs.Lock() - flushes := c.pendingLogs.flushes - if len(c.pendingLogs.lines) > 0 { - flushes++ - } - c.pendingLogs.Unlock() - flushed := make(chan struct{}) - go func() { - defer close(flushed) - // Force a log flush, because with very short requests we - // may not ever flush logs. - c.flushLog(true) - }() - w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) - - // Avoid nil Write call if c.Write is never called. - if c.outCode != 0 { - w.WriteHeader(c.outCode) - } - if c.outBody != nil { - w.Write(c.outBody) - } - // Wait for the last flush to complete before returning, - // otherwise the security ticket will not be valid. - <-flushed -} - -func executeRequestSafely(c *context, r *http.Request) { - defer func() { - if x := recover(); x != nil { - logf(c, 4, "%s", renderPanic(x)) // 4 == critical - c.outCode = 500 - } - }() - - http.DefaultServeMux.ServeHTTP(c, r) -} - -func renderPanic(x interface{}) string { - buf := make([]byte, 16<<10) // 16 KB should be plenty - buf = buf[:runtime.Stack(buf, false)] - - // Remove the first few stack frames: - // this func - // the recover closure in the caller - // That will root the stack trace at the site of the panic. - const ( - skipStart = "internal.renderPanic" - skipFrames = 2 - ) - start := bytes.Index(buf, []byte(skipStart)) - p := start - for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ { - p = bytes.IndexByte(buf[p+1:], '\n') + p + 1 - if p < 0 { - break - } - } - if p >= 0 { - // buf[start:p+1] is the block to remove. - // Copy buf[p+1:] over buf[start:] and shrink buf. - copy(buf[start:], buf[p+1:]) - buf = buf[:len(buf)-(p+1-start)] - } - - // Add panic heading. - head := fmt.Sprintf("panic: %v\n\n", x) - if len(head) > len(buf) { - // Extremely unlikely to happen. - return head - } - copy(buf[len(head):], buf) - copy(buf, head) - - return string(buf) -} - -// context represents the context of an in-flight HTTP request. -// It implements the appengine.Context and http.ResponseWriter interfaces. -type context struct { - req *http.Request - - outCode int - outHeader http.Header - outBody []byte - - pendingLogs struct { - sync.Mutex - lines []*logpb.UserAppLogLine - flushes int - } - - apiURL *url.URL -} - -var contextKey = "holds a *context" - -// jointContext joins two contexts in a superficial way. -// It takes values and timeouts from a base context, and only values from another context. -type jointContext struct { - base netcontext.Context - valuesOnly netcontext.Context -} - -func (c jointContext) Deadline() (time.Time, bool) { - return c.base.Deadline() -} - -func (c jointContext) Done() <-chan struct{} { - return c.base.Done() -} - -func (c jointContext) Err() error { - return c.base.Err() -} - -func (c jointContext) Value(key interface{}) interface{} { - if val := c.base.Value(key); val != nil { - return val - } - return c.valuesOnly.Value(key) -} - -// fromContext returns the App Engine context or nil if ctx is not -// derived from an App Engine context. -func fromContext(ctx netcontext.Context) *context { - c, _ := ctx.Value(&contextKey).(*context) - return c -} - -func withContext(parent netcontext.Context, c *context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) - if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { - ctx = withNamespace(ctx, ns) - } - return ctx -} - -func toContext(c *context) netcontext.Context { - return withContext(netcontext.Background(), c) -} - -func IncomingHeaders(ctx netcontext.Context) http.Header { - if c := fromContext(ctx); c != nil { - return c.req.Header - } - return nil -} - -func ReqContext(req *http.Request) netcontext.Context { - return req.Context() -} - -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { - return jointContext{ - base: parent, - valuesOnly: req.Context(), - } -} - -// DefaultTicket returns a ticket used for background context or dev_appserver. -func DefaultTicket() string { - defaultTicketOnce.Do(func() { - if IsDevAppServer() { - defaultTicket = "testapp" + defaultTicketSuffix - return - } - appID := partitionlessAppID() - escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) - majVersion := VersionID(nil) - if i := strings.Index(majVersion, "."); i > 0 { - majVersion = majVersion[:i] - } - defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) - }) - return defaultTicket -} - -func BackgroundContext() netcontext.Context { - backgroundContextOnce.Do(func() { - // Compute background security ticket. - ticket := DefaultTicket() - - c := &context{ - req: &http.Request{ - Header: http.Header{ - ticketHeader: []string{ticket}, - }, - }, - apiURL: apiURL(), - } - backgroundContext = toContext(c) - - // TODO(dsymonds): Wire up the shutdown handler to do a final flush. - go c.logFlusher(make(chan int)) - }) - - return backgroundContext -} - -// RegisterTestRequest registers the HTTP request req for testing, such that -// any API calls are sent to the provided URL. It returns a closure to delete -// the registration. -// It should only be used by aetest package. -func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) { - c := &context{ - req: req, - apiURL: apiURL, - } - ctx := withContext(decorate(req.Context()), c) - req = req.WithContext(ctx) - c.req = req - return req, func() {} -} - -var errTimeout = &CallError{ - Detail: "Deadline exceeded", - Code: int32(remotepb.RpcError_CANCELLED), - Timeout: true, -} - -func (c *context) Header() http.Header { return c.outHeader } - -// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status -// codes do not permit a response body (nor response entity headers such as -// Content-Length, Content-Type, etc). -func bodyAllowedForStatus(status int) bool { - switch { - case status >= 100 && status <= 199: - return false - case status == 204: - return false - case status == 304: - return false - } - return true -} - -func (c *context) Write(b []byte) (int, error) { - if c.outCode == 0 { - c.WriteHeader(http.StatusOK) - } - if len(b) > 0 && !bodyAllowedForStatus(c.outCode) { - return 0, http.ErrBodyNotAllowed - } - c.outBody = append(c.outBody, b...) - return len(b), nil -} - -func (c *context) WriteHeader(code int) { - if c.outCode != 0 { - logf(c, 3, "WriteHeader called multiple times on request.") // error level - return - } - c.outCode = code -} - -func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { - hreq := &http.Request{ - Method: "POST", - URL: c.apiURL, - Header: http.Header{ - apiEndpointHeader: apiEndpointHeaderValue, - apiMethodHeader: apiMethodHeaderValue, - apiContentType: apiContentTypeValue, - apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)}, - }, - Body: ioutil.NopCloser(bytes.NewReader(body)), - ContentLength: int64(len(body)), - Host: c.apiURL.Host, - } - if info := c.req.Header.Get(dapperHeader); info != "" { - hreq.Header.Set(dapperHeader, info) - } - if info := c.req.Header.Get(traceHeader); info != "" { - hreq.Header.Set(traceHeader, info) - } - - tr := apiHTTPClient.Transport.(*http.Transport) - - var timedOut int32 // atomic; set to 1 if timed out - t := time.AfterFunc(timeout, func() { - atomic.StoreInt32(&timedOut, 1) - tr.CancelRequest(hreq) - }) - defer t.Stop() - defer func() { - // Check if timeout was exceeded. - if atomic.LoadInt32(&timedOut) != 0 { - err = errTimeout - } - }() - - hresp, err := apiHTTPClient.Do(hreq) - if err != nil { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge HTTP failed: %v", err), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - defer hresp.Body.Close() - hrespBody, err := ioutil.ReadAll(hresp.Body) - if hresp.StatusCode != 200 { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - if err != nil { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge response bad: %v", err), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - return hrespBody, nil -} - -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { - if ns := NamespaceFromContext(ctx); ns != "" { - if fn, ok := NamespaceMods[service]; ok { - fn(in, ns) - } - } - - if f, ctx, ok := callOverrideFromContext(ctx); ok { - return f(ctx, service, method, in, out) - } - - // Handle already-done contexts quickly. - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - c := fromContext(ctx) - if c == nil { - // Give a good error message rather than a panic lower down. - return errNotAppEngineContext - } - - // Apply transaction modifications if we're in a transaction. - if t := transactionFromContext(ctx); t != nil { - if t.finished { - return errors.New("transaction context has expired") - } - applyTransaction(in, &t.transaction) - } - - // Default RPC timeout is 60s. - timeout := 60 * time.Second - if deadline, ok := ctx.Deadline(); ok { - timeout = deadline.Sub(time.Now()) - } - - data, err := proto.Marshal(in) - if err != nil { - return err - } - - ticket := c.req.Header.Get(ticketHeader) - // Use a test ticket under test environment. - if ticket == "" { - if appid := ctx.Value(&appIDOverrideKey); appid != nil { - ticket = appid.(string) + defaultTicketSuffix - } - } - // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver. - if ticket == "" { - ticket = DefaultTicket() - } - if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" { - ticket = dri - } - req := &remotepb.Request{ - ServiceName: &service, - Method: &method, - Request: data, - RequestId: &ticket, - } - hreqBody, err := proto.Marshal(req) - if err != nil { - return err - } - - hrespBody, err := c.post(hreqBody, timeout) - if err != nil { - return err - } - - res := &remotepb.Response{} - if err := proto.Unmarshal(hrespBody, res); err != nil { - return err - } - if res.RpcError != nil { - ce := &CallError{ - Detail: res.RpcError.GetDetail(), - Code: *res.RpcError.Code, - } - switch remotepb.RpcError_ErrorCode(ce.Code) { - case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED: - ce.Timeout = true - } - return ce - } - if res.ApplicationError != nil { - return &APIError{ - Service: *req.ServiceName, - Detail: res.ApplicationError.GetDetail(), - Code: *res.ApplicationError.Code, - } - } - if res.Exception != nil || res.JavaException != nil { - // This shouldn't happen, but let's be defensive. - return &CallError{ - Detail: "service bridge returned exception", - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - return proto.Unmarshal(res.Response, out) -} - -func (c *context) Request() *http.Request { - return c.req -} - -func (c *context) addLogLine(ll *logpb.UserAppLogLine) { - // Truncate long log lines. - // TODO(dsymonds): Check if this is still necessary. - const lim = 8 << 10 - if len(*ll.Message) > lim { - suffix := fmt.Sprintf("...(length %d)", len(*ll.Message)) - ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix) - } - - c.pendingLogs.Lock() - c.pendingLogs.lines = append(c.pendingLogs.lines, ll) - c.pendingLogs.Unlock() -} - -var logLevelName = map[int64]string{ - 0: "DEBUG", - 1: "INFO", - 2: "WARNING", - 3: "ERROR", - 4: "CRITICAL", -} - -func logf(c *context, level int64, format string, args ...interface{}) { - if c == nil { - panic("not an App Engine context") - } - s := fmt.Sprintf(format, args...) - s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. - c.addLogLine(&logpb.UserAppLogLine{ - TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), - Level: &level, - Message: &s, - }) - // Only duplicate log to stderr if not running on App Engine second generation - if !IsSecondGen() { - log.Print(logLevelName[level] + ": " + s) - } -} - -// flushLog attempts to flush any pending logs to the appserver. -// It should not be called concurrently. -func (c *context) flushLog(force bool) (flushed bool) { - c.pendingLogs.Lock() - // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. - n, rem := 0, 30<<20 - for ; n < len(c.pendingLogs.lines); n++ { - ll := c.pendingLogs.lines[n] - // Each log line will require about 3 bytes of overhead. - nb := proto.Size(ll) + 3 - if nb > rem { - break - } - rem -= nb - } - lines := c.pendingLogs.lines[:n] - c.pendingLogs.lines = c.pendingLogs.lines[n:] - c.pendingLogs.Unlock() - - if len(lines) == 0 && !force { - // Nothing to flush. - return false - } - - rescueLogs := false - defer func() { - if rescueLogs { - c.pendingLogs.Lock() - c.pendingLogs.lines = append(lines, c.pendingLogs.lines...) - c.pendingLogs.Unlock() - } - }() - - buf, err := proto.Marshal(&logpb.UserAppLogGroup{ - LogLine: lines, - }) - if err != nil { - log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err) - rescueLogs = true - return false - } - - req := &logpb.FlushRequest{ - Logs: buf, - } - res := &basepb.VoidProto{} - c.pendingLogs.Lock() - c.pendingLogs.flushes++ - c.pendingLogs.Unlock() - if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil { - log.Printf("internal.flushLog: Flush RPC: %v", err) - rescueLogs = true - return false - } - return true -} - -const ( - // Log flushing parameters. - flushInterval = 1 * time.Second - forceFlushInterval = 60 * time.Second -) - -func (c *context) logFlusher(stop <-chan int) { - lastFlush := time.Now() - tick := time.NewTicker(flushInterval) - for { - select { - case <-stop: - // Request finished. - tick.Stop() - return - case <-tick.C: - force := time.Now().Sub(lastFlush) > forceFlushInterval - if c.flushLog(force) { - lastFlush = time.Now() - } - } - } -} - -func ContextForTesting(req *http.Request) netcontext.Context { - return toContext(&context{req: req}) -} diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go deleted file mode 100644 index f0f40b2e3..000000000 --- a/vendor/google.golang.org/appengine/internal/api_classic.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build appengine - -package internal - -import ( - "errors" - "fmt" - "net/http" - "time" - - "appengine" - "appengine_internal" - basepb "appengine_internal/base" - - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" -) - -var contextKey = "holds an appengine.Context" - -// fromContext returns the App Engine context or nil if ctx is not -// derived from an App Engine context. -func fromContext(ctx netcontext.Context) appengine.Context { - c, _ := ctx.Value(&contextKey).(appengine.Context) - return c -} - -// This is only for classic App Engine adapters. -func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) { - c := fromContext(ctx) - if c == nil { - return nil, errNotAppEngineContext - } - return c, nil -} - -func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) - - s := &basepb.StringProto{} - c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil) - if ns := s.GetValue(); ns != "" { - ctx = NamespacedContext(ctx, ns) - } - - return ctx -} - -func IncomingHeaders(ctx netcontext.Context) http.Header { - if c := fromContext(ctx); c != nil { - if req, ok := c.Request().(*http.Request); ok { - return req.Header - } - } - return nil -} - -func ReqContext(req *http.Request) netcontext.Context { - return WithContext(netcontext.Background(), req) -} - -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { - c := appengine.NewContext(req) - return withContext(parent, c) -} - -type testingContext struct { - appengine.Context - - req *http.Request -} - -func (t *testingContext) FullyQualifiedAppID() string { return "dev~testcontext" } -func (t *testingContext) Call(service, method string, _, _ appengine_internal.ProtoMessage, _ *appengine_internal.CallOptions) error { - if service == "__go__" && method == "GetNamespace" { - return nil - } - return fmt.Errorf("testingContext: unsupported Call") -} -func (t *testingContext) Request() interface{} { return t.req } - -func ContextForTesting(req *http.Request) netcontext.Context { - return withContext(netcontext.Background(), &testingContext{req: req}) -} - -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { - if ns := NamespaceFromContext(ctx); ns != "" { - if fn, ok := NamespaceMods[service]; ok { - fn(in, ns) - } - } - - if f, ctx, ok := callOverrideFromContext(ctx); ok { - return f(ctx, service, method, in, out) - } - - // Handle already-done contexts quickly. - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - c := fromContext(ctx) - if c == nil { - // Give a good error message rather than a panic lower down. - return errNotAppEngineContext - } - - // Apply transaction modifications if we're in a transaction. - if t := transactionFromContext(ctx); t != nil { - if t.finished { - return errors.New("transaction context has expired") - } - applyTransaction(in, &t.transaction) - } - - var opts *appengine_internal.CallOptions - if d, ok := ctx.Deadline(); ok { - opts = &appengine_internal.CallOptions{ - Timeout: d.Sub(time.Now()), - } - } - - err := c.Call(service, method, in, out, opts) - switch v := err.(type) { - case *appengine_internal.APIError: - return &APIError{ - Service: v.Service, - Detail: v.Detail, - Code: v.Code, - } - case *appengine_internal.CallError: - return &CallError{ - Detail: v.Detail, - Code: v.Code, - Timeout: v.Timeout, - } - } - return err -} - -func handleHTTP(w http.ResponseWriter, r *http.Request) { - panic("handleHTTP called; this should be impossible") -} - -func logf(c appengine.Context, level int64, format string, args ...interface{}) { - var fn func(format string, args ...interface{}) - switch level { - case 0: - fn = c.Debugf - case 1: - fn = c.Infof - case 2: - fn = c.Warningf - case 3: - fn = c.Errorf - case 4: - fn = c.Criticalf - default: - // This shouldn't happen. - fn = c.Criticalf - } - fn(format, args...) -} diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go deleted file mode 100644 index e0c0b214b..000000000 --- a/vendor/google.golang.org/appengine/internal/api_common.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -import ( - "errors" - "os" - - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" -) - -var errNotAppEngineContext = errors.New("not an App Engine context") - -type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error - -var callOverrideKey = "holds []CallOverrideFunc" - -func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context { - // We avoid appending to any existing call override - // so we don't risk overwriting a popped stack below. - var cofs []CallOverrideFunc - if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok { - cofs = append(cofs, uf...) - } - cofs = append(cofs, f) - return netcontext.WithValue(ctx, &callOverrideKey, cofs) -} - -func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) { - cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc) - if len(cofs) == 0 { - return nil, nil, false - } - // We found a list of overrides; grab the last, and reconstitute a - // context that will hide it. - f := cofs[len(cofs)-1] - ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) - return f, ctx, true -} - -type logOverrideFunc func(level int64, format string, args ...interface{}) - -var logOverrideKey = "holds a logOverrideFunc" - -func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context { - return netcontext.WithValue(ctx, &logOverrideKey, f) -} - -var appIDOverrideKey = "holds a string, being the full app ID" - -func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context { - return netcontext.WithValue(ctx, &appIDOverrideKey, appID) -} - -var namespaceKey = "holds the namespace string" - -func withNamespace(ctx netcontext.Context, ns string) netcontext.Context { - return netcontext.WithValue(ctx, &namespaceKey, ns) -} - -func NamespaceFromContext(ctx netcontext.Context) string { - // If there's no namespace, return the empty string. - ns, _ := ctx.Value(&namespaceKey).(string) - return ns -} - -// FullyQualifiedAppID returns the fully-qualified application ID. -// This may contain a partition prefix (e.g. "s~" for High Replication apps), -// or a domain prefix (e.g. "example.com:"). -func FullyQualifiedAppID(ctx netcontext.Context) string { - if id, ok := ctx.Value(&appIDOverrideKey).(string); ok { - return id - } - return fullyQualifiedAppID(ctx) -} - -func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) { - if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok { - f(level, format, args...) - return - } - c := fromContext(ctx) - if c == nil { - panic(errNotAppEngineContext) - } - logf(c, level, format, args...) -} - -// NamespacedContext wraps a Context to support namespaces. -func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context { - return withNamespace(ctx, namespace) -} - -// SetTestEnv sets the env variables for testing background ticket in Flex. -func SetTestEnv() func() { - var environ = []struct { - key, value string - }{ - {"GAE_LONG_APP_ID", "my-app-id"}, - {"GAE_MINOR_VERSION", "067924799508853122"}, - {"GAE_MODULE_INSTANCE", "0"}, - {"GAE_MODULE_NAME", "default"}, - {"GAE_MODULE_VERSION", "20150612t184001"}, - } - - for _, v := range environ { - old := os.Getenv(v.key) - os.Setenv(v.key, v.value) - v.value = old - } - return func() { // Restore old environment after the test completes. - for _, v := range environ { - if v.value == "" { - os.Unsetenv(v.key) - continue - } - os.Setenv(v.key, v.value) - } - } -} diff --git a/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/google.golang.org/appengine/internal/app_id.go deleted file mode 100644 index 11df8c07b..000000000 --- a/vendor/google.golang.org/appengine/internal/app_id.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -import ( - "strings" -) - -func parseFullAppID(appid string) (partition, domain, displayID string) { - if i := strings.Index(appid, "~"); i != -1 { - partition, appid = appid[:i], appid[i+1:] - } - if i := strings.Index(appid, ":"); i != -1 { - domain, appid = appid[:i], appid[i+1:] - } - return partition, domain, appid -} - -// appID returns "appid" or "domain.com:appid". -func appID(fullAppID string) string { - _, dom, dis := parseFullAppID(fullAppID) - if dom != "" { - return dom + ":" + dis - } - return dis -} diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go deleted file mode 100644 index 9a2ff77ab..000000000 --- a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go +++ /dev/null @@ -1,611 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto - -package app_identity - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type AppIdentityServiceError_ErrorCode int32 - -const ( - AppIdentityServiceError_SUCCESS AppIdentityServiceError_ErrorCode = 0 - AppIdentityServiceError_UNKNOWN_SCOPE AppIdentityServiceError_ErrorCode = 9 - AppIdentityServiceError_BLOB_TOO_LARGE AppIdentityServiceError_ErrorCode = 1000 - AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001 - AppIdentityServiceError_NOT_A_VALID_APP AppIdentityServiceError_ErrorCode = 1002 - AppIdentityServiceError_UNKNOWN_ERROR AppIdentityServiceError_ErrorCode = 1003 - AppIdentityServiceError_NOT_ALLOWED AppIdentityServiceError_ErrorCode = 1005 - AppIdentityServiceError_NOT_IMPLEMENTED AppIdentityServiceError_ErrorCode = 1006 -) - -var AppIdentityServiceError_ErrorCode_name = map[int32]string{ - 0: "SUCCESS", - 9: "UNKNOWN_SCOPE", - 1000: "BLOB_TOO_LARGE", - 1001: "DEADLINE_EXCEEDED", - 1002: "NOT_A_VALID_APP", - 1003: "UNKNOWN_ERROR", - 1005: "NOT_ALLOWED", - 1006: "NOT_IMPLEMENTED", -} -var AppIdentityServiceError_ErrorCode_value = map[string]int32{ - "SUCCESS": 0, - "UNKNOWN_SCOPE": 9, - "BLOB_TOO_LARGE": 1000, - "DEADLINE_EXCEEDED": 1001, - "NOT_A_VALID_APP": 1002, - "UNKNOWN_ERROR": 1003, - "NOT_ALLOWED": 1005, - "NOT_IMPLEMENTED": 1006, -} - -func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode { - p := new(AppIdentityServiceError_ErrorCode) - *p = x - return p -} -func (x AppIdentityServiceError_ErrorCode) String() string { - return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x)) -} -func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode") - if err != nil { - return err - } - *x = AppIdentityServiceError_ErrorCode(value) - return nil -} -func (AppIdentityServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{0, 0} -} - -type AppIdentityServiceError struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AppIdentityServiceError) Reset() { *m = AppIdentityServiceError{} } -func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) } -func (*AppIdentityServiceError) ProtoMessage() {} -func (*AppIdentityServiceError) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{0} -} -func (m *AppIdentityServiceError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AppIdentityServiceError.Unmarshal(m, b) -} -func (m *AppIdentityServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AppIdentityServiceError.Marshal(b, m, deterministic) -} -func (dst *AppIdentityServiceError) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppIdentityServiceError.Merge(dst, src) -} -func (m *AppIdentityServiceError) XXX_Size() int { - return xxx_messageInfo_AppIdentityServiceError.Size(m) -} -func (m *AppIdentityServiceError) XXX_DiscardUnknown() { - xxx_messageInfo_AppIdentityServiceError.DiscardUnknown(m) -} - -var xxx_messageInfo_AppIdentityServiceError proto.InternalMessageInfo - -type SignForAppRequest struct { - BytesToSign []byte `protobuf:"bytes,1,opt,name=bytes_to_sign,json=bytesToSign" json:"bytes_to_sign,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SignForAppRequest) Reset() { *m = SignForAppRequest{} } -func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) } -func (*SignForAppRequest) ProtoMessage() {} -func (*SignForAppRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{1} -} -func (m *SignForAppRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SignForAppRequest.Unmarshal(m, b) -} -func (m *SignForAppRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SignForAppRequest.Marshal(b, m, deterministic) -} -func (dst *SignForAppRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SignForAppRequest.Merge(dst, src) -} -func (m *SignForAppRequest) XXX_Size() int { - return xxx_messageInfo_SignForAppRequest.Size(m) -} -func (m *SignForAppRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SignForAppRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SignForAppRequest proto.InternalMessageInfo - -func (m *SignForAppRequest) GetBytesToSign() []byte { - if m != nil { - return m.BytesToSign - } - return nil -} - -type SignForAppResponse struct { - KeyName *string `protobuf:"bytes,1,opt,name=key_name,json=keyName" json:"key_name,omitempty"` - SignatureBytes []byte `protobuf:"bytes,2,opt,name=signature_bytes,json=signatureBytes" json:"signature_bytes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SignForAppResponse) Reset() { *m = SignForAppResponse{} } -func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) } -func (*SignForAppResponse) ProtoMessage() {} -func (*SignForAppResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{2} -} -func (m *SignForAppResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SignForAppResponse.Unmarshal(m, b) -} -func (m *SignForAppResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SignForAppResponse.Marshal(b, m, deterministic) -} -func (dst *SignForAppResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_SignForAppResponse.Merge(dst, src) -} -func (m *SignForAppResponse) XXX_Size() int { - return xxx_messageInfo_SignForAppResponse.Size(m) -} -func (m *SignForAppResponse) XXX_DiscardUnknown() { - xxx_messageInfo_SignForAppResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_SignForAppResponse proto.InternalMessageInfo - -func (m *SignForAppResponse) GetKeyName() string { - if m != nil && m.KeyName != nil { - return *m.KeyName - } - return "" -} - -func (m *SignForAppResponse) GetSignatureBytes() []byte { - if m != nil { - return m.SignatureBytes - } - return nil -} - -type GetPublicCertificateForAppRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetPublicCertificateForAppRequest) Reset() { *m = GetPublicCertificateForAppRequest{} } -func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) } -func (*GetPublicCertificateForAppRequest) ProtoMessage() {} -func (*GetPublicCertificateForAppRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{3} -} -func (m *GetPublicCertificateForAppRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPublicCertificateForAppRequest.Unmarshal(m, b) -} -func (m *GetPublicCertificateForAppRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPublicCertificateForAppRequest.Marshal(b, m, deterministic) -} -func (dst *GetPublicCertificateForAppRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPublicCertificateForAppRequest.Merge(dst, src) -} -func (m *GetPublicCertificateForAppRequest) XXX_Size() int { - return xxx_messageInfo_GetPublicCertificateForAppRequest.Size(m) -} -func (m *GetPublicCertificateForAppRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetPublicCertificateForAppRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetPublicCertificateForAppRequest proto.InternalMessageInfo - -type PublicCertificate struct { - KeyName *string `protobuf:"bytes,1,opt,name=key_name,json=keyName" json:"key_name,omitempty"` - X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem,json=x509CertificatePem" json:"x509_certificate_pem,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PublicCertificate) Reset() { *m = PublicCertificate{} } -func (m *PublicCertificate) String() string { return proto.CompactTextString(m) } -func (*PublicCertificate) ProtoMessage() {} -func (*PublicCertificate) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{4} -} -func (m *PublicCertificate) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PublicCertificate.Unmarshal(m, b) -} -func (m *PublicCertificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PublicCertificate.Marshal(b, m, deterministic) -} -func (dst *PublicCertificate) XXX_Merge(src proto.Message) { - xxx_messageInfo_PublicCertificate.Merge(dst, src) -} -func (m *PublicCertificate) XXX_Size() int { - return xxx_messageInfo_PublicCertificate.Size(m) -} -func (m *PublicCertificate) XXX_DiscardUnknown() { - xxx_messageInfo_PublicCertificate.DiscardUnknown(m) -} - -var xxx_messageInfo_PublicCertificate proto.InternalMessageInfo - -func (m *PublicCertificate) GetKeyName() string { - if m != nil && m.KeyName != nil { - return *m.KeyName - } - return "" -} - -func (m *PublicCertificate) GetX509CertificatePem() string { - if m != nil && m.X509CertificatePem != nil { - return *m.X509CertificatePem - } - return "" -} - -type GetPublicCertificateForAppResponse struct { - PublicCertificateList []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list,json=publicCertificateList" json:"public_certificate_list,omitempty"` - MaxClientCacheTimeInSecond *int64 `protobuf:"varint,2,opt,name=max_client_cache_time_in_second,json=maxClientCacheTimeInSecond" json:"max_client_cache_time_in_second,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetPublicCertificateForAppResponse) Reset() { *m = GetPublicCertificateForAppResponse{} } -func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) } -func (*GetPublicCertificateForAppResponse) ProtoMessage() {} -func (*GetPublicCertificateForAppResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{5} -} -func (m *GetPublicCertificateForAppResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPublicCertificateForAppResponse.Unmarshal(m, b) -} -func (m *GetPublicCertificateForAppResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPublicCertificateForAppResponse.Marshal(b, m, deterministic) -} -func (dst *GetPublicCertificateForAppResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPublicCertificateForAppResponse.Merge(dst, src) -} -func (m *GetPublicCertificateForAppResponse) XXX_Size() int { - return xxx_messageInfo_GetPublicCertificateForAppResponse.Size(m) -} -func (m *GetPublicCertificateForAppResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetPublicCertificateForAppResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetPublicCertificateForAppResponse proto.InternalMessageInfo - -func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate { - if m != nil { - return m.PublicCertificateList - } - return nil -} - -func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 { - if m != nil && m.MaxClientCacheTimeInSecond != nil { - return *m.MaxClientCacheTimeInSecond - } - return 0 -} - -type GetServiceAccountNameRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetServiceAccountNameRequest) Reset() { *m = GetServiceAccountNameRequest{} } -func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) } -func (*GetServiceAccountNameRequest) ProtoMessage() {} -func (*GetServiceAccountNameRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{6} -} -func (m *GetServiceAccountNameRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetServiceAccountNameRequest.Unmarshal(m, b) -} -func (m *GetServiceAccountNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetServiceAccountNameRequest.Marshal(b, m, deterministic) -} -func (dst *GetServiceAccountNameRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetServiceAccountNameRequest.Merge(dst, src) -} -func (m *GetServiceAccountNameRequest) XXX_Size() int { - return xxx_messageInfo_GetServiceAccountNameRequest.Size(m) -} -func (m *GetServiceAccountNameRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetServiceAccountNameRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetServiceAccountNameRequest proto.InternalMessageInfo - -type GetServiceAccountNameResponse struct { - ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name,json=serviceAccountName" json:"service_account_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetServiceAccountNameResponse) Reset() { *m = GetServiceAccountNameResponse{} } -func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) } -func (*GetServiceAccountNameResponse) ProtoMessage() {} -func (*GetServiceAccountNameResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{7} -} -func (m *GetServiceAccountNameResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetServiceAccountNameResponse.Unmarshal(m, b) -} -func (m *GetServiceAccountNameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetServiceAccountNameResponse.Marshal(b, m, deterministic) -} -func (dst *GetServiceAccountNameResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetServiceAccountNameResponse.Merge(dst, src) -} -func (m *GetServiceAccountNameResponse) XXX_Size() int { - return xxx_messageInfo_GetServiceAccountNameResponse.Size(m) -} -func (m *GetServiceAccountNameResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetServiceAccountNameResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetServiceAccountNameResponse proto.InternalMessageInfo - -func (m *GetServiceAccountNameResponse) GetServiceAccountName() string { - if m != nil && m.ServiceAccountName != nil { - return *m.ServiceAccountName - } - return "" -} - -type GetAccessTokenRequest struct { - Scope []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"` - ServiceAccountId *int64 `protobuf:"varint,2,opt,name=service_account_id,json=serviceAccountId" json:"service_account_id,omitempty"` - ServiceAccountName *string `protobuf:"bytes,3,opt,name=service_account_name,json=serviceAccountName" json:"service_account_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetAccessTokenRequest) Reset() { *m = GetAccessTokenRequest{} } -func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) } -func (*GetAccessTokenRequest) ProtoMessage() {} -func (*GetAccessTokenRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{8} -} -func (m *GetAccessTokenRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetAccessTokenRequest.Unmarshal(m, b) -} -func (m *GetAccessTokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetAccessTokenRequest.Marshal(b, m, deterministic) -} -func (dst *GetAccessTokenRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAccessTokenRequest.Merge(dst, src) -} -func (m *GetAccessTokenRequest) XXX_Size() int { - return xxx_messageInfo_GetAccessTokenRequest.Size(m) -} -func (m *GetAccessTokenRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetAccessTokenRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetAccessTokenRequest proto.InternalMessageInfo - -func (m *GetAccessTokenRequest) GetScope() []string { - if m != nil { - return m.Scope - } - return nil -} - -func (m *GetAccessTokenRequest) GetServiceAccountId() int64 { - if m != nil && m.ServiceAccountId != nil { - return *m.ServiceAccountId - } - return 0 -} - -func (m *GetAccessTokenRequest) GetServiceAccountName() string { - if m != nil && m.ServiceAccountName != nil { - return *m.ServiceAccountName - } - return "" -} - -type GetAccessTokenResponse struct { - AccessToken *string `protobuf:"bytes,1,opt,name=access_token,json=accessToken" json:"access_token,omitempty"` - ExpirationTime *int64 `protobuf:"varint,2,opt,name=expiration_time,json=expirationTime" json:"expiration_time,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetAccessTokenResponse) Reset() { *m = GetAccessTokenResponse{} } -func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) } -func (*GetAccessTokenResponse) ProtoMessage() {} -func (*GetAccessTokenResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{9} -} -func (m *GetAccessTokenResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetAccessTokenResponse.Unmarshal(m, b) -} -func (m *GetAccessTokenResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetAccessTokenResponse.Marshal(b, m, deterministic) -} -func (dst *GetAccessTokenResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAccessTokenResponse.Merge(dst, src) -} -func (m *GetAccessTokenResponse) XXX_Size() int { - return xxx_messageInfo_GetAccessTokenResponse.Size(m) -} -func (m *GetAccessTokenResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetAccessTokenResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetAccessTokenResponse proto.InternalMessageInfo - -func (m *GetAccessTokenResponse) GetAccessToken() string { - if m != nil && m.AccessToken != nil { - return *m.AccessToken - } - return "" -} - -func (m *GetAccessTokenResponse) GetExpirationTime() int64 { - if m != nil && m.ExpirationTime != nil { - return *m.ExpirationTime - } - return 0 -} - -type GetDefaultGcsBucketNameRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetDefaultGcsBucketNameRequest) Reset() { *m = GetDefaultGcsBucketNameRequest{} } -func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) } -func (*GetDefaultGcsBucketNameRequest) ProtoMessage() {} -func (*GetDefaultGcsBucketNameRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{10} -} -func (m *GetDefaultGcsBucketNameRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Unmarshal(m, b) -} -func (m *GetDefaultGcsBucketNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Marshal(b, m, deterministic) -} -func (dst *GetDefaultGcsBucketNameRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetDefaultGcsBucketNameRequest.Merge(dst, src) -} -func (m *GetDefaultGcsBucketNameRequest) XXX_Size() int { - return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Size(m) -} -func (m *GetDefaultGcsBucketNameRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetDefaultGcsBucketNameRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetDefaultGcsBucketNameRequest proto.InternalMessageInfo - -type GetDefaultGcsBucketNameResponse struct { - DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name,json=defaultGcsBucketName" json:"default_gcs_bucket_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetDefaultGcsBucketNameResponse) Reset() { *m = GetDefaultGcsBucketNameResponse{} } -func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) } -func (*GetDefaultGcsBucketNameResponse) ProtoMessage() {} -func (*GetDefaultGcsBucketNameResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{11} -} -func (m *GetDefaultGcsBucketNameResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Unmarshal(m, b) -} -func (m *GetDefaultGcsBucketNameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Marshal(b, m, deterministic) -} -func (dst *GetDefaultGcsBucketNameResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetDefaultGcsBucketNameResponse.Merge(dst, src) -} -func (m *GetDefaultGcsBucketNameResponse) XXX_Size() int { - return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Size(m) -} -func (m *GetDefaultGcsBucketNameResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetDefaultGcsBucketNameResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetDefaultGcsBucketNameResponse proto.InternalMessageInfo - -func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string { - if m != nil && m.DefaultGcsBucketName != nil { - return *m.DefaultGcsBucketName - } - return "" -} - -func init() { - proto.RegisterType((*AppIdentityServiceError)(nil), "appengine.AppIdentityServiceError") - proto.RegisterType((*SignForAppRequest)(nil), "appengine.SignForAppRequest") - proto.RegisterType((*SignForAppResponse)(nil), "appengine.SignForAppResponse") - proto.RegisterType((*GetPublicCertificateForAppRequest)(nil), "appengine.GetPublicCertificateForAppRequest") - proto.RegisterType((*PublicCertificate)(nil), "appengine.PublicCertificate") - proto.RegisterType((*GetPublicCertificateForAppResponse)(nil), "appengine.GetPublicCertificateForAppResponse") - proto.RegisterType((*GetServiceAccountNameRequest)(nil), "appengine.GetServiceAccountNameRequest") - proto.RegisterType((*GetServiceAccountNameResponse)(nil), "appengine.GetServiceAccountNameResponse") - proto.RegisterType((*GetAccessTokenRequest)(nil), "appengine.GetAccessTokenRequest") - proto.RegisterType((*GetAccessTokenResponse)(nil), "appengine.GetAccessTokenResponse") - proto.RegisterType((*GetDefaultGcsBucketNameRequest)(nil), "appengine.GetDefaultGcsBucketNameRequest") - proto.RegisterType((*GetDefaultGcsBucketNameResponse)(nil), "appengine.GetDefaultGcsBucketNameResponse") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/app_identity/app_identity_service.proto", fileDescriptor_app_identity_service_08a6e3f74b04cfa4) -} - -var fileDescriptor_app_identity_service_08a6e3f74b04cfa4 = []byte{ - // 676 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x54, 0xdb, 0x6e, 0xda, 0x58, - 0x14, 0x1d, 0x26, 0x1a, 0x31, 0x6c, 0x12, 0x62, 0xce, 0x90, 0xcb, 0x8c, 0x32, 0xb9, 0x78, 0x1e, - 0x26, 0x0f, 0x15, 0x89, 0x2a, 0x45, 0x55, 0x1f, 0x8d, 0xed, 0x22, 0x54, 0x07, 0x53, 0x43, 0x9a, - 0xa8, 0x2f, 0xa7, 0xce, 0x61, 0xc7, 0x3d, 0x02, 0x9f, 0xe3, 0xda, 0x87, 0x0a, 0x3e, 0xa2, 0x3f, - 0xd2, 0x9f, 0xe8, 0x5b, 0xbf, 0xa5, 0x17, 0xb5, 0xdf, 0x50, 0xd9, 0x38, 0x5c, 0x92, 0x92, 0x37, - 0xbc, 0xf6, 0x5a, 0xcb, 0x6b, 0x2f, 0x6d, 0x0c, 0x4e, 0x20, 0x65, 0x30, 0xc4, 0x7a, 0x20, 0x87, - 0xbe, 0x08, 0xea, 0x32, 0x0e, 0x4e, 0xfc, 0x28, 0x42, 0x11, 0x70, 0x81, 0x27, 0x5c, 0x28, 0x8c, - 0x85, 0x3f, 0x4c, 0x21, 0xca, 0xfb, 0x28, 0x14, 0x57, 0x93, 0xa5, 0x07, 0x9a, 0x60, 0xfc, 0x8e, - 0x33, 0xac, 0x47, 0xb1, 0x54, 0x92, 0x94, 0x66, 0x5a, 0xfd, 0x53, 0x01, 0x76, 0x8c, 0x28, 0x6a, - 0xe5, 0xc4, 0xee, 0x94, 0x67, 0xc7, 0xb1, 0x8c, 0xf5, 0x0f, 0x05, 0x28, 0x65, 0xbf, 0x4c, 0xd9, - 0x47, 0x52, 0x86, 0x62, 0xf7, 0xc2, 0x34, 0xed, 0x6e, 0x57, 0xfb, 0x8d, 0x54, 0x61, 0xe3, 0xa2, - 0xfd, 0xbc, 0xed, 0x5e, 0xb6, 0x69, 0xd7, 0x74, 0x3b, 0xb6, 0x56, 0x22, 0x7f, 0x41, 0xa5, 0xe1, - 0xb8, 0x0d, 0xda, 0x73, 0x5d, 0xea, 0x18, 0x5e, 0xd3, 0xd6, 0x3e, 0x17, 0xc9, 0x36, 0x54, 0x2d, - 0xdb, 0xb0, 0x9c, 0x56, 0xdb, 0xa6, 0xf6, 0x95, 0x69, 0xdb, 0x96, 0x6d, 0x69, 0x5f, 0x8a, 0xa4, - 0x06, 0x9b, 0x6d, 0xb7, 0x47, 0x0d, 0xfa, 0xd2, 0x70, 0x5a, 0x16, 0x35, 0x3a, 0x1d, 0xed, 0x6b, - 0x91, 0x90, 0xb9, 0xab, 0xed, 0x79, 0xae, 0xa7, 0x7d, 0x2b, 0x12, 0x0d, 0xca, 0x19, 0xd3, 0x71, - 0xdc, 0x4b, 0xdb, 0xd2, 0xbe, 0xcf, 0xb4, 0xad, 0xf3, 0x8e, 0x63, 0x9f, 0xdb, 0xed, 0x9e, 0x6d, - 0x69, 0x3f, 0x8a, 0xfa, 0x13, 0xa8, 0x76, 0x79, 0x20, 0x9e, 0xc9, 0xd8, 0x88, 0x22, 0x0f, 0xdf, - 0x8e, 0x30, 0x51, 0x44, 0x87, 0x8d, 0xeb, 0x89, 0xc2, 0x84, 0x2a, 0x49, 0x13, 0x1e, 0x88, 0xdd, - 0xc2, 0x61, 0xe1, 0x78, 0xdd, 0x2b, 0x67, 0x60, 0x4f, 0xa6, 0x02, 0xfd, 0x0a, 0xc8, 0xa2, 0x30, - 0x89, 0xa4, 0x48, 0x90, 0xfc, 0x0d, 0x7f, 0x0e, 0x70, 0x42, 0x85, 0x1f, 0x62, 0x26, 0x2a, 0x79, - 0xc5, 0x01, 0x4e, 0xda, 0x7e, 0x88, 0xe4, 0x7f, 0xd8, 0x4c, 0xbd, 0x7c, 0x35, 0x8a, 0x91, 0x66, - 0x4e, 0xbb, 0xbf, 0x67, 0xb6, 0x95, 0x19, 0xdc, 0x48, 0x51, 0xfd, 0x3f, 0x38, 0x6a, 0xa2, 0xea, - 0x8c, 0xae, 0x87, 0x9c, 0x99, 0x18, 0x2b, 0x7e, 0xc3, 0x99, 0xaf, 0x70, 0x29, 0xa2, 0xfe, 0x1a, - 0xaa, 0xf7, 0x18, 0x0f, 0xbd, 0xfd, 0x14, 0x6a, 0xe3, 0xb3, 0xd3, 0xa7, 0x94, 0xcd, 0xe9, 0x34, - 0xc2, 0x30, 0x8b, 0x50, 0xf2, 0x48, 0x3a, 0x5b, 0x70, 0xea, 0x60, 0xa8, 0x7f, 0x2c, 0x80, 0xfe, - 0x50, 0x8e, 0x7c, 0xe3, 0x1e, 0xec, 0x44, 0x19, 0x65, 0xc9, 0x7a, 0xc8, 0x13, 0xb5, 0x5b, 0x38, - 0x5c, 0x3b, 0x2e, 0x3f, 0xde, 0xab, 0xcf, 0xce, 0xa6, 0x7e, 0xcf, 0xcc, 0xdb, 0x8a, 0xee, 0x42, - 0x0e, 0x4f, 0x14, 0x31, 0xe1, 0x20, 0xf4, 0xc7, 0x94, 0x0d, 0x39, 0x0a, 0x45, 0x99, 0xcf, 0xde, - 0x20, 0x55, 0x3c, 0x44, 0xca, 0x05, 0x4d, 0x90, 0x49, 0xd1, 0xcf, 0x92, 0xaf, 0x79, 0xff, 0x84, - 0xfe, 0xd8, 0xcc, 0x58, 0x66, 0x4a, 0xea, 0xf1, 0x10, 0x5b, 0xa2, 0x9b, 0x31, 0xf4, 0x7d, 0xd8, - 0x6b, 0xa2, 0xca, 0x6f, 0xd3, 0x60, 0x4c, 0x8e, 0x84, 0x4a, 0xcb, 0xb8, 0xed, 0xf0, 0x05, 0xfc, - 0xbb, 0x62, 0x9e, 0xef, 0x76, 0x0a, 0xb5, 0xfc, 0x1f, 0x40, 0xfd, 0xe9, 0x78, 0xb1, 0x5b, 0x92, - 0xdc, 0x53, 0xea, 0xef, 0x0b, 0xb0, 0xd5, 0x44, 0x65, 0x30, 0x86, 0x49, 0xd2, 0x93, 0x03, 0x14, - 0xb7, 0x37, 0x55, 0x83, 0x3f, 0x12, 0x26, 0x23, 0xcc, 0x5a, 0x29, 0x79, 0xd3, 0x07, 0xf2, 0x08, - 0xc8, 0xdd, 0x37, 0xf0, 0xdb, 0xd5, 0xb4, 0x65, 0xff, 0x56, 0x7f, 0x65, 0x9e, 0xb5, 0x95, 0x79, - 0xfa, 0xb0, 0x7d, 0x37, 0x4e, 0xbe, 0xdb, 0x11, 0xac, 0xfb, 0x19, 0x4c, 0x55, 0x8a, 0xe7, 0x3b, - 0x95, 0xfd, 0x39, 0x35, 0xbd, 0x58, 0x1c, 0x47, 0x3c, 0xf6, 0x15, 0x97, 0x22, 0xab, 0x3f, 0x4f, - 0x56, 0x99, 0xc3, 0x69, 0xe1, 0xfa, 0x21, 0xec, 0x37, 0x51, 0x59, 0x78, 0xe3, 0x8f, 0x86, 0xaa, - 0xc9, 0x92, 0xc6, 0x88, 0x0d, 0x70, 0xa9, 0xea, 0x2b, 0x38, 0x58, 0xc9, 0xc8, 0x03, 0x9d, 0xc1, - 0x4e, 0x7f, 0x3a, 0xa7, 0x01, 0x4b, 0xe8, 0x75, 0xc6, 0x58, 0xec, 0xbb, 0xd6, 0xff, 0x85, 0xbc, - 0x51, 0x79, 0xb5, 0xbe, 0xf8, 0xc9, 0xfa, 0x19, 0x00, 0x00, 0xff, 0xff, 0x37, 0x4c, 0x56, 0x38, - 0xf3, 0x04, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto deleted file mode 100644 index 19610ca5b..000000000 --- a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto2"; -option go_package = "app_identity"; - -package appengine; - -message AppIdentityServiceError { - enum ErrorCode { - SUCCESS = 0; - UNKNOWN_SCOPE = 9; - BLOB_TOO_LARGE = 1000; - DEADLINE_EXCEEDED = 1001; - NOT_A_VALID_APP = 1002; - UNKNOWN_ERROR = 1003; - NOT_ALLOWED = 1005; - NOT_IMPLEMENTED = 1006; - } -} - -message SignForAppRequest { - optional bytes bytes_to_sign = 1; -} - -message SignForAppResponse { - optional string key_name = 1; - optional bytes signature_bytes = 2; -} - -message GetPublicCertificateForAppRequest { -} - -message PublicCertificate { - optional string key_name = 1; - optional string x509_certificate_pem = 2; -} - -message GetPublicCertificateForAppResponse { - repeated PublicCertificate public_certificate_list = 1; - optional int64 max_client_cache_time_in_second = 2; -} - -message GetServiceAccountNameRequest { -} - -message GetServiceAccountNameResponse { - optional string service_account_name = 1; -} - -message GetAccessTokenRequest { - repeated string scope = 1; - optional int64 service_account_id = 2; - optional string service_account_name = 3; -} - -message GetAccessTokenResponse { - optional string access_token = 1; - optional int64 expiration_time = 2; -} - -message GetDefaultGcsBucketNameRequest { -} - -message GetDefaultGcsBucketNameResponse { - optional string default_gcs_bucket_name = 1; -} diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go deleted file mode 100644 index db4777e68..000000000 --- a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go +++ /dev/null @@ -1,308 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/base/api_base.proto - -package base - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type StringProto struct { - Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StringProto) Reset() { *m = StringProto{} } -func (m *StringProto) String() string { return proto.CompactTextString(m) } -func (*StringProto) ProtoMessage() {} -func (*StringProto) Descriptor() ([]byte, []int) { - return fileDescriptor_api_base_9d49f8792e0c1140, []int{0} -} -func (m *StringProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StringProto.Unmarshal(m, b) -} -func (m *StringProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StringProto.Marshal(b, m, deterministic) -} -func (dst *StringProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_StringProto.Merge(dst, src) -} -func (m *StringProto) XXX_Size() int { - return xxx_messageInfo_StringProto.Size(m) -} -func (m *StringProto) XXX_DiscardUnknown() { - xxx_messageInfo_StringProto.DiscardUnknown(m) -} - -var xxx_messageInfo_StringProto proto.InternalMessageInfo - -func (m *StringProto) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value - } - return "" -} - -type Integer32Proto struct { - Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Integer32Proto) Reset() { *m = Integer32Proto{} } -func (m *Integer32Proto) String() string { return proto.CompactTextString(m) } -func (*Integer32Proto) ProtoMessage() {} -func (*Integer32Proto) Descriptor() ([]byte, []int) { - return fileDescriptor_api_base_9d49f8792e0c1140, []int{1} -} -func (m *Integer32Proto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Integer32Proto.Unmarshal(m, b) -} -func (m *Integer32Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Integer32Proto.Marshal(b, m, deterministic) -} -func (dst *Integer32Proto) XXX_Merge(src proto.Message) { - xxx_messageInfo_Integer32Proto.Merge(dst, src) -} -func (m *Integer32Proto) XXX_Size() int { - return xxx_messageInfo_Integer32Proto.Size(m) -} -func (m *Integer32Proto) XXX_DiscardUnknown() { - xxx_messageInfo_Integer32Proto.DiscardUnknown(m) -} - -var xxx_messageInfo_Integer32Proto proto.InternalMessageInfo - -func (m *Integer32Proto) GetValue() int32 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Integer64Proto struct { - Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Integer64Proto) Reset() { *m = Integer64Proto{} } -func (m *Integer64Proto) String() string { return proto.CompactTextString(m) } -func (*Integer64Proto) ProtoMessage() {} -func (*Integer64Proto) Descriptor() ([]byte, []int) { - return fileDescriptor_api_base_9d49f8792e0c1140, []int{2} -} -func (m *Integer64Proto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Integer64Proto.Unmarshal(m, b) -} -func (m *Integer64Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Integer64Proto.Marshal(b, m, deterministic) -} -func (dst *Integer64Proto) XXX_Merge(src proto.Message) { - xxx_messageInfo_Integer64Proto.Merge(dst, src) -} -func (m *Integer64Proto) XXX_Size() int { - return xxx_messageInfo_Integer64Proto.Size(m) -} -func (m *Integer64Proto) XXX_DiscardUnknown() { - xxx_messageInfo_Integer64Proto.DiscardUnknown(m) -} - -var xxx_messageInfo_Integer64Proto proto.InternalMessageInfo - -func (m *Integer64Proto) GetValue() int64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type BoolProto struct { - Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BoolProto) Reset() { *m = BoolProto{} } -func (m *BoolProto) String() string { return proto.CompactTextString(m) } -func (*BoolProto) ProtoMessage() {} -func (*BoolProto) Descriptor() ([]byte, []int) { - return fileDescriptor_api_base_9d49f8792e0c1140, []int{3} -} -func (m *BoolProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BoolProto.Unmarshal(m, b) -} -func (m *BoolProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BoolProto.Marshal(b, m, deterministic) -} -func (dst *BoolProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_BoolProto.Merge(dst, src) -} -func (m *BoolProto) XXX_Size() int { - return xxx_messageInfo_BoolProto.Size(m) -} -func (m *BoolProto) XXX_DiscardUnknown() { - xxx_messageInfo_BoolProto.DiscardUnknown(m) -} - -var xxx_messageInfo_BoolProto proto.InternalMessageInfo - -func (m *BoolProto) GetValue() bool { - if m != nil && m.Value != nil { - return *m.Value - } - return false -} - -type DoubleProto struct { - Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DoubleProto) Reset() { *m = DoubleProto{} } -func (m *DoubleProto) String() string { return proto.CompactTextString(m) } -func (*DoubleProto) ProtoMessage() {} -func (*DoubleProto) Descriptor() ([]byte, []int) { - return fileDescriptor_api_base_9d49f8792e0c1140, []int{4} -} -func (m *DoubleProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DoubleProto.Unmarshal(m, b) -} -func (m *DoubleProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DoubleProto.Marshal(b, m, deterministic) -} -func (dst *DoubleProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_DoubleProto.Merge(dst, src) -} -func (m *DoubleProto) XXX_Size() int { - return xxx_messageInfo_DoubleProto.Size(m) -} -func (m *DoubleProto) XXX_DiscardUnknown() { - xxx_messageInfo_DoubleProto.DiscardUnknown(m) -} - -var xxx_messageInfo_DoubleProto proto.InternalMessageInfo - -func (m *DoubleProto) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type BytesProto struct { - Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BytesProto) Reset() { *m = BytesProto{} } -func (m *BytesProto) String() string { return proto.CompactTextString(m) } -func (*BytesProto) ProtoMessage() {} -func (*BytesProto) Descriptor() ([]byte, []int) { - return fileDescriptor_api_base_9d49f8792e0c1140, []int{5} -} -func (m *BytesProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BytesProto.Unmarshal(m, b) -} -func (m *BytesProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BytesProto.Marshal(b, m, deterministic) -} -func (dst *BytesProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_BytesProto.Merge(dst, src) -} -func (m *BytesProto) XXX_Size() int { - return xxx_messageInfo_BytesProto.Size(m) -} -func (m *BytesProto) XXX_DiscardUnknown() { - xxx_messageInfo_BytesProto.DiscardUnknown(m) -} - -var xxx_messageInfo_BytesProto proto.InternalMessageInfo - -func (m *BytesProto) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -type VoidProto struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VoidProto) Reset() { *m = VoidProto{} } -func (m *VoidProto) String() string { return proto.CompactTextString(m) } -func (*VoidProto) ProtoMessage() {} -func (*VoidProto) Descriptor() ([]byte, []int) { - return fileDescriptor_api_base_9d49f8792e0c1140, []int{6} -} -func (m *VoidProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VoidProto.Unmarshal(m, b) -} -func (m *VoidProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VoidProto.Marshal(b, m, deterministic) -} -func (dst *VoidProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_VoidProto.Merge(dst, src) -} -func (m *VoidProto) XXX_Size() int { - return xxx_messageInfo_VoidProto.Size(m) -} -func (m *VoidProto) XXX_DiscardUnknown() { - xxx_messageInfo_VoidProto.DiscardUnknown(m) -} - -var xxx_messageInfo_VoidProto proto.InternalMessageInfo - -func init() { - proto.RegisterType((*StringProto)(nil), "appengine.base.StringProto") - proto.RegisterType((*Integer32Proto)(nil), "appengine.base.Integer32Proto") - proto.RegisterType((*Integer64Proto)(nil), "appengine.base.Integer64Proto") - proto.RegisterType((*BoolProto)(nil), "appengine.base.BoolProto") - proto.RegisterType((*DoubleProto)(nil), "appengine.base.DoubleProto") - proto.RegisterType((*BytesProto)(nil), "appengine.base.BytesProto") - proto.RegisterType((*VoidProto)(nil), "appengine.base.VoidProto") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/base/api_base.proto", fileDescriptor_api_base_9d49f8792e0c1140) -} - -var fileDescriptor_api_base_9d49f8792e0c1140 = []byte{ - // 199 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0xcf, 0x3f, 0x4b, 0xc6, 0x30, - 0x10, 0x06, 0x70, 0x5a, 0xad, 0xb4, 0x57, 0xe9, 0x20, 0x0e, 0x1d, 0xb5, 0x05, 0x71, 0x4a, 0x40, - 0x45, 0x9c, 0x83, 0x8b, 0x9b, 0x28, 0x38, 0xb8, 0x48, 0x8a, 0xc7, 0x11, 0x08, 0xb9, 0x90, 0xa6, - 0x82, 0xdf, 0x5e, 0xda, 0xd2, 0xfa, 0xc2, 0x9b, 0xed, 0xfe, 0xfc, 0xe0, 0xe1, 0x81, 0x27, 0x62, - 0x26, 0x8b, 0x82, 0xd8, 0x6a, 0x47, 0x82, 0x03, 0x49, 0xed, 0x3d, 0x3a, 0x32, 0x0e, 0xa5, 0x71, - 0x11, 0x83, 0xd3, 0x56, 0x0e, 0x7a, 0x44, 0xa9, 0xbd, 0xf9, 0x9a, 0x07, 0xe1, 0x03, 0x47, 0xbe, - 0x68, 0x76, 0x27, 0xe6, 0x6b, 0xd7, 0x43, 0xfd, 0x1e, 0x83, 0x71, 0xf4, 0xba, 0xbc, 0x2f, 0xa1, - 0xf8, 0xd1, 0x76, 0xc2, 0x36, 0xbb, 0xca, 0x6f, 0xab, 0xb7, 0x75, 0xe9, 0x6e, 0xa0, 0x79, 0x71, - 0x11, 0x09, 0xc3, 0xfd, 0x5d, 0xc2, 0x15, 0xc7, 0xee, 0xf1, 0x21, 0xe1, 0x4e, 0x36, 0x77, 0x0d, - 0x95, 0x62, 0xb6, 0x09, 0x52, 0x6e, 0xa4, 0x87, 0xfa, 0x99, 0xa7, 0xc1, 0x62, 0x02, 0x65, 0xff, - 0x79, 0xa0, 0x7e, 0x23, 0x8e, 0xab, 0x69, 0x0f, 0xcd, 0xb9, 0xca, 0xcb, 0xdd, 0xd5, 0x50, 0x7d, - 0xb0, 0xf9, 0x5e, 0x98, 0x3a, 0xfb, 0x3c, 0x9d, 0x9b, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xba, - 0x37, 0x25, 0xea, 0x44, 0x01, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.proto b/vendor/google.golang.org/appengine/internal/base/api_base.proto deleted file mode 100644 index 56cd7a3ca..000000000 --- a/vendor/google.golang.org/appengine/internal/base/api_base.proto +++ /dev/null @@ -1,33 +0,0 @@ -// Built-in base types for API calls. Primarily useful as return types. - -syntax = "proto2"; -option go_package = "base"; - -package appengine.base; - -message StringProto { - required string value = 1; -} - -message Integer32Proto { - required int32 value = 1; -} - -message Integer64Proto { - required int64 value = 1; -} - -message BoolProto { - required bool value = 1; -} - -message DoubleProto { - required double value = 1; -} - -message BytesProto { - required bytes value = 1 [ctype=CORD]; -} - -message VoidProto { -} diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go deleted file mode 100644 index 2fb748289..000000000 --- a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go +++ /dev/null @@ -1,4367 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto - -package datastore - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Property_Meaning int32 - -const ( - Property_NO_MEANING Property_Meaning = 0 - Property_BLOB Property_Meaning = 14 - Property_TEXT Property_Meaning = 15 - Property_BYTESTRING Property_Meaning = 16 - Property_ATOM_CATEGORY Property_Meaning = 1 - Property_ATOM_LINK Property_Meaning = 2 - Property_ATOM_TITLE Property_Meaning = 3 - Property_ATOM_CONTENT Property_Meaning = 4 - Property_ATOM_SUMMARY Property_Meaning = 5 - Property_ATOM_AUTHOR Property_Meaning = 6 - Property_GD_WHEN Property_Meaning = 7 - Property_GD_EMAIL Property_Meaning = 8 - Property_GEORSS_POINT Property_Meaning = 9 - Property_GD_IM Property_Meaning = 10 - Property_GD_PHONENUMBER Property_Meaning = 11 - Property_GD_POSTALADDRESS Property_Meaning = 12 - Property_GD_RATING Property_Meaning = 13 - Property_BLOBKEY Property_Meaning = 17 - Property_ENTITY_PROTO Property_Meaning = 19 - Property_INDEX_VALUE Property_Meaning = 18 -) - -var Property_Meaning_name = map[int32]string{ - 0: "NO_MEANING", - 14: "BLOB", - 15: "TEXT", - 16: "BYTESTRING", - 1: "ATOM_CATEGORY", - 2: "ATOM_LINK", - 3: "ATOM_TITLE", - 4: "ATOM_CONTENT", - 5: "ATOM_SUMMARY", - 6: "ATOM_AUTHOR", - 7: "GD_WHEN", - 8: "GD_EMAIL", - 9: "GEORSS_POINT", - 10: "GD_IM", - 11: "GD_PHONENUMBER", - 12: "GD_POSTALADDRESS", - 13: "GD_RATING", - 17: "BLOBKEY", - 19: "ENTITY_PROTO", - 18: "INDEX_VALUE", -} -var Property_Meaning_value = map[string]int32{ - "NO_MEANING": 0, - "BLOB": 14, - "TEXT": 15, - "BYTESTRING": 16, - "ATOM_CATEGORY": 1, - "ATOM_LINK": 2, - "ATOM_TITLE": 3, - "ATOM_CONTENT": 4, - "ATOM_SUMMARY": 5, - "ATOM_AUTHOR": 6, - "GD_WHEN": 7, - "GD_EMAIL": 8, - "GEORSS_POINT": 9, - "GD_IM": 10, - "GD_PHONENUMBER": 11, - "GD_POSTALADDRESS": 12, - "GD_RATING": 13, - "BLOBKEY": 17, - "ENTITY_PROTO": 19, - "INDEX_VALUE": 18, -} - -func (x Property_Meaning) Enum() *Property_Meaning { - p := new(Property_Meaning) - *p = x - return p -} -func (x Property_Meaning) String() string { - return proto.EnumName(Property_Meaning_name, int32(x)) -} -func (x *Property_Meaning) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning") - if err != nil { - return err - } - *x = Property_Meaning(value) - return nil -} -func (Property_Meaning) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2, 0} -} - -type Property_FtsTokenizationOption int32 - -const ( - Property_HTML Property_FtsTokenizationOption = 1 - Property_ATOM Property_FtsTokenizationOption = 2 -) - -var Property_FtsTokenizationOption_name = map[int32]string{ - 1: "HTML", - 2: "ATOM", -} -var Property_FtsTokenizationOption_value = map[string]int32{ - "HTML": 1, - "ATOM": 2, -} - -func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption { - p := new(Property_FtsTokenizationOption) - *p = x - return p -} -func (x Property_FtsTokenizationOption) String() string { - return proto.EnumName(Property_FtsTokenizationOption_name, int32(x)) -} -func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption") - if err != nil { - return err - } - *x = Property_FtsTokenizationOption(value) - return nil -} -func (Property_FtsTokenizationOption) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2, 1} -} - -type EntityProto_Kind int32 - -const ( - EntityProto_GD_CONTACT EntityProto_Kind = 1 - EntityProto_GD_EVENT EntityProto_Kind = 2 - EntityProto_GD_MESSAGE EntityProto_Kind = 3 -) - -var EntityProto_Kind_name = map[int32]string{ - 1: "GD_CONTACT", - 2: "GD_EVENT", - 3: "GD_MESSAGE", -} -var EntityProto_Kind_value = map[string]int32{ - "GD_CONTACT": 1, - "GD_EVENT": 2, - "GD_MESSAGE": 3, -} - -func (x EntityProto_Kind) Enum() *EntityProto_Kind { - p := new(EntityProto_Kind) - *p = x - return p -} -func (x EntityProto_Kind) String() string { - return proto.EnumName(EntityProto_Kind_name, int32(x)) -} -func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind") - if err != nil { - return err - } - *x = EntityProto_Kind(value) - return nil -} -func (EntityProto_Kind) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{6, 0} -} - -type Index_Property_Direction int32 - -const ( - Index_Property_ASCENDING Index_Property_Direction = 1 - Index_Property_DESCENDING Index_Property_Direction = 2 -) - -var Index_Property_Direction_name = map[int32]string{ - 1: "ASCENDING", - 2: "DESCENDING", -} -var Index_Property_Direction_value = map[string]int32{ - "ASCENDING": 1, - "DESCENDING": 2, -} - -func (x Index_Property_Direction) Enum() *Index_Property_Direction { - p := new(Index_Property_Direction) - *p = x - return p -} -func (x Index_Property_Direction) String() string { - return proto.EnumName(Index_Property_Direction_name, int32(x)) -} -func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction") - if err != nil { - return err - } - *x = Index_Property_Direction(value) - return nil -} -func (Index_Property_Direction) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8, 0, 0} -} - -type CompositeIndex_State int32 - -const ( - CompositeIndex_WRITE_ONLY CompositeIndex_State = 1 - CompositeIndex_READ_WRITE CompositeIndex_State = 2 - CompositeIndex_DELETED CompositeIndex_State = 3 - CompositeIndex_ERROR CompositeIndex_State = 4 -) - -var CompositeIndex_State_name = map[int32]string{ - 1: "WRITE_ONLY", - 2: "READ_WRITE", - 3: "DELETED", - 4: "ERROR", -} -var CompositeIndex_State_value = map[string]int32{ - "WRITE_ONLY": 1, - "READ_WRITE": 2, - "DELETED": 3, - "ERROR": 4, -} - -func (x CompositeIndex_State) Enum() *CompositeIndex_State { - p := new(CompositeIndex_State) - *p = x - return p -} -func (x CompositeIndex_State) String() string { - return proto.EnumName(CompositeIndex_State_name, int32(x)) -} -func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State") - if err != nil { - return err - } - *x = CompositeIndex_State(value) - return nil -} -func (CompositeIndex_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{9, 0} -} - -type Snapshot_Status int32 - -const ( - Snapshot_INACTIVE Snapshot_Status = 0 - Snapshot_ACTIVE Snapshot_Status = 1 -) - -var Snapshot_Status_name = map[int32]string{ - 0: "INACTIVE", - 1: "ACTIVE", -} -var Snapshot_Status_value = map[string]int32{ - "INACTIVE": 0, - "ACTIVE": 1, -} - -func (x Snapshot_Status) Enum() *Snapshot_Status { - p := new(Snapshot_Status) - *p = x - return p -} -func (x Snapshot_Status) String() string { - return proto.EnumName(Snapshot_Status_name, int32(x)) -} -func (x *Snapshot_Status) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status") - if err != nil { - return err - } - *x = Snapshot_Status(value) - return nil -} -func (Snapshot_Status) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{12, 0} -} - -type Query_Hint int32 - -const ( - Query_ORDER_FIRST Query_Hint = 1 - Query_ANCESTOR_FIRST Query_Hint = 2 - Query_FILTER_FIRST Query_Hint = 3 -) - -var Query_Hint_name = map[int32]string{ - 1: "ORDER_FIRST", - 2: "ANCESTOR_FIRST", - 3: "FILTER_FIRST", -} -var Query_Hint_value = map[string]int32{ - "ORDER_FIRST": 1, - "ANCESTOR_FIRST": 2, - "FILTER_FIRST": 3, -} - -func (x Query_Hint) Enum() *Query_Hint { - p := new(Query_Hint) - *p = x - return p -} -func (x Query_Hint) String() string { - return proto.EnumName(Query_Hint_name, int32(x)) -} -func (x *Query_Hint) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint") - if err != nil { - return err - } - *x = Query_Hint(value) - return nil -} -func (Query_Hint) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0} -} - -type Query_Filter_Operator int32 - -const ( - Query_Filter_LESS_THAN Query_Filter_Operator = 1 - Query_Filter_LESS_THAN_OR_EQUAL Query_Filter_Operator = 2 - Query_Filter_GREATER_THAN Query_Filter_Operator = 3 - Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4 - Query_Filter_EQUAL Query_Filter_Operator = 5 - Query_Filter_IN Query_Filter_Operator = 6 - Query_Filter_EXISTS Query_Filter_Operator = 7 -) - -var Query_Filter_Operator_name = map[int32]string{ - 1: "LESS_THAN", - 2: "LESS_THAN_OR_EQUAL", - 3: "GREATER_THAN", - 4: "GREATER_THAN_OR_EQUAL", - 5: "EQUAL", - 6: "IN", - 7: "EXISTS", -} -var Query_Filter_Operator_value = map[string]int32{ - "LESS_THAN": 1, - "LESS_THAN_OR_EQUAL": 2, - "GREATER_THAN": 3, - "GREATER_THAN_OR_EQUAL": 4, - "EQUAL": 5, - "IN": 6, - "EXISTS": 7, -} - -func (x Query_Filter_Operator) Enum() *Query_Filter_Operator { - p := new(Query_Filter_Operator) - *p = x - return p -} -func (x Query_Filter_Operator) String() string { - return proto.EnumName(Query_Filter_Operator_name, int32(x)) -} -func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator") - if err != nil { - return err - } - *x = Query_Filter_Operator(value) - return nil -} -func (Query_Filter_Operator) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0, 0} -} - -type Query_Order_Direction int32 - -const ( - Query_Order_ASCENDING Query_Order_Direction = 1 - Query_Order_DESCENDING Query_Order_Direction = 2 -) - -var Query_Order_Direction_name = map[int32]string{ - 1: "ASCENDING", - 2: "DESCENDING", -} -var Query_Order_Direction_value = map[string]int32{ - "ASCENDING": 1, - "DESCENDING": 2, -} - -func (x Query_Order_Direction) Enum() *Query_Order_Direction { - p := new(Query_Order_Direction) - *p = x - return p -} -func (x Query_Order_Direction) String() string { - return proto.EnumName(Query_Order_Direction_name, int32(x)) -} -func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction") - if err != nil { - return err - } - *x = Query_Order_Direction(value) - return nil -} -func (Query_Order_Direction) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 1, 0} -} - -type Error_ErrorCode int32 - -const ( - Error_BAD_REQUEST Error_ErrorCode = 1 - Error_CONCURRENT_TRANSACTION Error_ErrorCode = 2 - Error_INTERNAL_ERROR Error_ErrorCode = 3 - Error_NEED_INDEX Error_ErrorCode = 4 - Error_TIMEOUT Error_ErrorCode = 5 - Error_PERMISSION_DENIED Error_ErrorCode = 6 - Error_BIGTABLE_ERROR Error_ErrorCode = 7 - Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8 - Error_CAPABILITY_DISABLED Error_ErrorCode = 9 - Error_TRY_ALTERNATE_BACKEND Error_ErrorCode = 10 - Error_SAFE_TIME_TOO_OLD Error_ErrorCode = 11 -) - -var Error_ErrorCode_name = map[int32]string{ - 1: "BAD_REQUEST", - 2: "CONCURRENT_TRANSACTION", - 3: "INTERNAL_ERROR", - 4: "NEED_INDEX", - 5: "TIMEOUT", - 6: "PERMISSION_DENIED", - 7: "BIGTABLE_ERROR", - 8: "COMMITTED_BUT_STILL_APPLYING", - 9: "CAPABILITY_DISABLED", - 10: "TRY_ALTERNATE_BACKEND", - 11: "SAFE_TIME_TOO_OLD", -} -var Error_ErrorCode_value = map[string]int32{ - "BAD_REQUEST": 1, - "CONCURRENT_TRANSACTION": 2, - "INTERNAL_ERROR": 3, - "NEED_INDEX": 4, - "TIMEOUT": 5, - "PERMISSION_DENIED": 6, - "BIGTABLE_ERROR": 7, - "COMMITTED_BUT_STILL_APPLYING": 8, - "CAPABILITY_DISABLED": 9, - "TRY_ALTERNATE_BACKEND": 10, - "SAFE_TIME_TOO_OLD": 11, -} - -func (x Error_ErrorCode) Enum() *Error_ErrorCode { - p := new(Error_ErrorCode) - *p = x - return p -} -func (x Error_ErrorCode) String() string { - return proto.EnumName(Error_ErrorCode_name, int32(x)) -} -func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode") - if err != nil { - return err - } - *x = Error_ErrorCode(value) - return nil -} -func (Error_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{19, 0} -} - -type PutRequest_AutoIdPolicy int32 - -const ( - PutRequest_CURRENT PutRequest_AutoIdPolicy = 0 - PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1 -) - -var PutRequest_AutoIdPolicy_name = map[int32]string{ - 0: "CURRENT", - 1: "SEQUENTIAL", -} -var PutRequest_AutoIdPolicy_value = map[string]int32{ - "CURRENT": 0, - "SEQUENTIAL": 1, -} - -func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy { - p := new(PutRequest_AutoIdPolicy) - *p = x - return p -} -func (x PutRequest_AutoIdPolicy) String() string { - return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x)) -} -func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy") - if err != nil { - return err - } - *x = PutRequest_AutoIdPolicy(value) - return nil -} -func (PutRequest_AutoIdPolicy) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{23, 0} -} - -type BeginTransactionRequest_TransactionMode int32 - -const ( - BeginTransactionRequest_UNKNOWN BeginTransactionRequest_TransactionMode = 0 - BeginTransactionRequest_READ_ONLY BeginTransactionRequest_TransactionMode = 1 - BeginTransactionRequest_READ_WRITE BeginTransactionRequest_TransactionMode = 2 -) - -var BeginTransactionRequest_TransactionMode_name = map[int32]string{ - 0: "UNKNOWN", - 1: "READ_ONLY", - 2: "READ_WRITE", -} -var BeginTransactionRequest_TransactionMode_value = map[string]int32{ - "UNKNOWN": 0, - "READ_ONLY": 1, - "READ_WRITE": 2, -} - -func (x BeginTransactionRequest_TransactionMode) Enum() *BeginTransactionRequest_TransactionMode { - p := new(BeginTransactionRequest_TransactionMode) - *p = x - return p -} -func (x BeginTransactionRequest_TransactionMode) String() string { - return proto.EnumName(BeginTransactionRequest_TransactionMode_name, int32(x)) -} -func (x *BeginTransactionRequest_TransactionMode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(BeginTransactionRequest_TransactionMode_value, data, "BeginTransactionRequest_TransactionMode") - if err != nil { - return err - } - *x = BeginTransactionRequest_TransactionMode(value) - return nil -} -func (BeginTransactionRequest_TransactionMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{36, 0} -} - -type Action struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Action) Reset() { *m = Action{} } -func (m *Action) String() string { return proto.CompactTextString(m) } -func (*Action) ProtoMessage() {} -func (*Action) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{0} -} -func (m *Action) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Action.Unmarshal(m, b) -} -func (m *Action) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Action.Marshal(b, m, deterministic) -} -func (dst *Action) XXX_Merge(src proto.Message) { - xxx_messageInfo_Action.Merge(dst, src) -} -func (m *Action) XXX_Size() int { - return xxx_messageInfo_Action.Size(m) -} -func (m *Action) XXX_DiscardUnknown() { - xxx_messageInfo_Action.DiscardUnknown(m) -} - -var xxx_messageInfo_Action proto.InternalMessageInfo - -type PropertyValue struct { - Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"` - BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"` - StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"` - Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue,json=pointvalue" json:"pointvalue,omitempty"` - Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue,json=uservalue" json:"uservalue,omitempty"` - Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue,json=referencevalue" json:"referencevalue,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PropertyValue) Reset() { *m = PropertyValue{} } -func (m *PropertyValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue) ProtoMessage() {} -func (*PropertyValue) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1} -} -func (m *PropertyValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PropertyValue.Unmarshal(m, b) -} -func (m *PropertyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PropertyValue.Marshal(b, m, deterministic) -} -func (dst *PropertyValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_PropertyValue.Merge(dst, src) -} -func (m *PropertyValue) XXX_Size() int { - return xxx_messageInfo_PropertyValue.Size(m) -} -func (m *PropertyValue) XXX_DiscardUnknown() { - xxx_messageInfo_PropertyValue.DiscardUnknown(m) -} - -var xxx_messageInfo_PropertyValue proto.InternalMessageInfo - -func (m *PropertyValue) GetInt64Value() int64 { - if m != nil && m.Int64Value != nil { - return *m.Int64Value - } - return 0 -} - -func (m *PropertyValue) GetBooleanValue() bool { - if m != nil && m.BooleanValue != nil { - return *m.BooleanValue - } - return false -} - -func (m *PropertyValue) GetStringValue() string { - if m != nil && m.StringValue != nil { - return *m.StringValue - } - return "" -} - -func (m *PropertyValue) GetDoubleValue() float64 { - if m != nil && m.DoubleValue != nil { - return *m.DoubleValue - } - return 0 -} - -func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue { - if m != nil { - return m.Pointvalue - } - return nil -} - -func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue { - if m != nil { - return m.Uservalue - } - return nil -} - -func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue { - if m != nil { - return m.Referencevalue - } - return nil -} - -type PropertyValue_PointValue struct { - X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"` - Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} } -func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_PointValue) ProtoMessage() {} -func (*PropertyValue_PointValue) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 0} -} -func (m *PropertyValue_PointValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PropertyValue_PointValue.Unmarshal(m, b) -} -func (m *PropertyValue_PointValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PropertyValue_PointValue.Marshal(b, m, deterministic) -} -func (dst *PropertyValue_PointValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_PropertyValue_PointValue.Merge(dst, src) -} -func (m *PropertyValue_PointValue) XXX_Size() int { - return xxx_messageInfo_PropertyValue_PointValue.Size(m) -} -func (m *PropertyValue_PointValue) XXX_DiscardUnknown() { - xxx_messageInfo_PropertyValue_PointValue.DiscardUnknown(m) -} - -var xxx_messageInfo_PropertyValue_PointValue proto.InternalMessageInfo - -func (m *PropertyValue_PointValue) GetX() float64 { - if m != nil && m.X != nil { - return *m.X - } - return 0 -} - -func (m *PropertyValue_PointValue) GetY() float64 { - if m != nil && m.Y != nil { - return *m.Y - } - return 0 -} - -type PropertyValue_UserValue struct { - Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"` - AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"` - Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"` - FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"` - FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} } -func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_UserValue) ProtoMessage() {} -func (*PropertyValue_UserValue) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 1} -} -func (m *PropertyValue_UserValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PropertyValue_UserValue.Unmarshal(m, b) -} -func (m *PropertyValue_UserValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PropertyValue_UserValue.Marshal(b, m, deterministic) -} -func (dst *PropertyValue_UserValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_PropertyValue_UserValue.Merge(dst, src) -} -func (m *PropertyValue_UserValue) XXX_Size() int { - return xxx_messageInfo_PropertyValue_UserValue.Size(m) -} -func (m *PropertyValue_UserValue) XXX_DiscardUnknown() { - xxx_messageInfo_PropertyValue_UserValue.DiscardUnknown(m) -} - -var xxx_messageInfo_PropertyValue_UserValue proto.InternalMessageInfo - -func (m *PropertyValue_UserValue) GetEmail() string { - if m != nil && m.Email != nil { - return *m.Email - } - return "" -} - -func (m *PropertyValue_UserValue) GetAuthDomain() string { - if m != nil && m.AuthDomain != nil { - return *m.AuthDomain - } - return "" -} - -func (m *PropertyValue_UserValue) GetNickname() string { - if m != nil && m.Nickname != nil { - return *m.Nickname - } - return "" -} - -func (m *PropertyValue_UserValue) GetFederatedIdentity() string { - if m != nil && m.FederatedIdentity != nil { - return *m.FederatedIdentity - } - return "" -} - -func (m *PropertyValue_UserValue) GetFederatedProvider() string { - if m != nil && m.FederatedProvider != nil { - return *m.FederatedProvider - } - return "" -} - -type PropertyValue_ReferenceValue struct { - App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` - NameSpace *string `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"` - Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement,json=pathelement" json:"pathelement,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} } -func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_ReferenceValue) ProtoMessage() {} -func (*PropertyValue_ReferenceValue) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 2} -} -func (m *PropertyValue_ReferenceValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PropertyValue_ReferenceValue.Unmarshal(m, b) -} -func (m *PropertyValue_ReferenceValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PropertyValue_ReferenceValue.Marshal(b, m, deterministic) -} -func (dst *PropertyValue_ReferenceValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_PropertyValue_ReferenceValue.Merge(dst, src) -} -func (m *PropertyValue_ReferenceValue) XXX_Size() int { - return xxx_messageInfo_PropertyValue_ReferenceValue.Size(m) -} -func (m *PropertyValue_ReferenceValue) XXX_DiscardUnknown() { - xxx_messageInfo_PropertyValue_ReferenceValue.DiscardUnknown(m) -} - -var xxx_messageInfo_PropertyValue_ReferenceValue proto.InternalMessageInfo - -func (m *PropertyValue_ReferenceValue) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *PropertyValue_ReferenceValue) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement { - if m != nil { - return m.Pathelement - } - return nil -} - -type PropertyValue_ReferenceValue_PathElement struct { - Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"` - Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"` - Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PropertyValue_ReferenceValue_PathElement) Reset() { - *m = PropertyValue_ReferenceValue_PathElement{} -} -func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage() {} -func (*PropertyValue_ReferenceValue_PathElement) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 2, 0} -} -func (m *PropertyValue_ReferenceValue_PathElement) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Unmarshal(m, b) -} -func (m *PropertyValue_ReferenceValue_PathElement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Marshal(b, m, deterministic) -} -func (dst *PropertyValue_ReferenceValue_PathElement) XXX_Merge(src proto.Message) { - xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Merge(dst, src) -} -func (m *PropertyValue_ReferenceValue_PathElement) XXX_Size() int { - return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Size(m) -} -func (m *PropertyValue_ReferenceValue_PathElement) XXX_DiscardUnknown() { - xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.DiscardUnknown(m) -} - -var xxx_messageInfo_PropertyValue_ReferenceValue_PathElement proto.InternalMessageInfo - -func (m *PropertyValue_ReferenceValue_PathElement) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *PropertyValue_ReferenceValue_PathElement) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -type Property struct { - Meaning *Property_Meaning `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"` - MeaningUri *string `protobuf:"bytes,2,opt,name=meaning_uri,json=meaningUri" json:"meaning_uri,omitempty"` - Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` - Value *PropertyValue `protobuf:"bytes,5,req,name=value" json:"value,omitempty"` - Multiple *bool `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"` - Searchable *bool `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"` - FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,json=ftsTokenizationOption,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"` - Locale *string `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Property) Reset() { *m = Property{} } -func (m *Property) String() string { return proto.CompactTextString(m) } -func (*Property) ProtoMessage() {} -func (*Property) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2} -} -func (m *Property) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Property.Unmarshal(m, b) -} -func (m *Property) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Property.Marshal(b, m, deterministic) -} -func (dst *Property) XXX_Merge(src proto.Message) { - xxx_messageInfo_Property.Merge(dst, src) -} -func (m *Property) XXX_Size() int { - return xxx_messageInfo_Property.Size(m) -} -func (m *Property) XXX_DiscardUnknown() { - xxx_messageInfo_Property.DiscardUnknown(m) -} - -var xxx_messageInfo_Property proto.InternalMessageInfo - -const Default_Property_Meaning Property_Meaning = Property_NO_MEANING -const Default_Property_Searchable bool = false -const Default_Property_Locale string = "en" - -func (m *Property) GetMeaning() Property_Meaning { - if m != nil && m.Meaning != nil { - return *m.Meaning - } - return Default_Property_Meaning -} - -func (m *Property) GetMeaningUri() string { - if m != nil && m.MeaningUri != nil { - return *m.MeaningUri - } - return "" -} - -func (m *Property) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *Property) GetValue() *PropertyValue { - if m != nil { - return m.Value - } - return nil -} - -func (m *Property) GetMultiple() bool { - if m != nil && m.Multiple != nil { - return *m.Multiple - } - return false -} - -func (m *Property) GetSearchable() bool { - if m != nil && m.Searchable != nil { - return *m.Searchable - } - return Default_Property_Searchable -} - -func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption { - if m != nil && m.FtsTokenizationOption != nil { - return *m.FtsTokenizationOption - } - return Property_HTML -} - -func (m *Property) GetLocale() string { - if m != nil && m.Locale != nil { - return *m.Locale - } - return Default_Property_Locale -} - -type Path struct { - Element []*Path_Element `protobuf:"group,1,rep,name=Element,json=element" json:"element,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Path) Reset() { *m = Path{} } -func (m *Path) String() string { return proto.CompactTextString(m) } -func (*Path) ProtoMessage() {} -func (*Path) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{3} -} -func (m *Path) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Path.Unmarshal(m, b) -} -func (m *Path) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Path.Marshal(b, m, deterministic) -} -func (dst *Path) XXX_Merge(src proto.Message) { - xxx_messageInfo_Path.Merge(dst, src) -} -func (m *Path) XXX_Size() int { - return xxx_messageInfo_Path.Size(m) -} -func (m *Path) XXX_DiscardUnknown() { - xxx_messageInfo_Path.DiscardUnknown(m) -} - -var xxx_messageInfo_Path proto.InternalMessageInfo - -func (m *Path) GetElement() []*Path_Element { - if m != nil { - return m.Element - } - return nil -} - -type Path_Element struct { - Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"` - Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"` - Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Path_Element) Reset() { *m = Path_Element{} } -func (m *Path_Element) String() string { return proto.CompactTextString(m) } -func (*Path_Element) ProtoMessage() {} -func (*Path_Element) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{3, 0} -} -func (m *Path_Element) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Path_Element.Unmarshal(m, b) -} -func (m *Path_Element) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Path_Element.Marshal(b, m, deterministic) -} -func (dst *Path_Element) XXX_Merge(src proto.Message) { - xxx_messageInfo_Path_Element.Merge(dst, src) -} -func (m *Path_Element) XXX_Size() int { - return xxx_messageInfo_Path_Element.Size(m) -} -func (m *Path_Element) XXX_DiscardUnknown() { - xxx_messageInfo_Path_Element.DiscardUnknown(m) -} - -var xxx_messageInfo_Path_Element proto.InternalMessageInfo - -func (m *Path_Element) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -func (m *Path_Element) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *Path_Element) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -type Reference struct { - App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` - NameSpace *string `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"` - Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Reference) Reset() { *m = Reference{} } -func (m *Reference) String() string { return proto.CompactTextString(m) } -func (*Reference) ProtoMessage() {} -func (*Reference) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{4} -} -func (m *Reference) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Reference.Unmarshal(m, b) -} -func (m *Reference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Reference.Marshal(b, m, deterministic) -} -func (dst *Reference) XXX_Merge(src proto.Message) { - xxx_messageInfo_Reference.Merge(dst, src) -} -func (m *Reference) XXX_Size() int { - return xxx_messageInfo_Reference.Size(m) -} -func (m *Reference) XXX_DiscardUnknown() { - xxx_messageInfo_Reference.DiscardUnknown(m) -} - -var xxx_messageInfo_Reference proto.InternalMessageInfo - -func (m *Reference) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *Reference) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *Reference) GetPath() *Path { - if m != nil { - return m.Path - } - return nil -} - -type User struct { - Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"` - AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"` - Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"` - FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"` - FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *User) Reset() { *m = User{} } -func (m *User) String() string { return proto.CompactTextString(m) } -func (*User) ProtoMessage() {} -func (*User) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{5} -} -func (m *User) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_User.Unmarshal(m, b) -} -func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_User.Marshal(b, m, deterministic) -} -func (dst *User) XXX_Merge(src proto.Message) { - xxx_messageInfo_User.Merge(dst, src) -} -func (m *User) XXX_Size() int { - return xxx_messageInfo_User.Size(m) -} -func (m *User) XXX_DiscardUnknown() { - xxx_messageInfo_User.DiscardUnknown(m) -} - -var xxx_messageInfo_User proto.InternalMessageInfo - -func (m *User) GetEmail() string { - if m != nil && m.Email != nil { - return *m.Email - } - return "" -} - -func (m *User) GetAuthDomain() string { - if m != nil && m.AuthDomain != nil { - return *m.AuthDomain - } - return "" -} - -func (m *User) GetNickname() string { - if m != nil && m.Nickname != nil { - return *m.Nickname - } - return "" -} - -func (m *User) GetFederatedIdentity() string { - if m != nil && m.FederatedIdentity != nil { - return *m.FederatedIdentity - } - return "" -} - -func (m *User) GetFederatedProvider() string { - if m != nil && m.FederatedProvider != nil { - return *m.FederatedProvider - } - return "" -} - -type EntityProto struct { - Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"` - EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group,json=entityGroup" json:"entity_group,omitempty"` - Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"` - Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"` - KindUri *string `protobuf:"bytes,5,opt,name=kind_uri,json=kindUri" json:"kind_uri,omitempty"` - Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` - RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property,json=rawProperty" json:"raw_property,omitempty"` - Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EntityProto) Reset() { *m = EntityProto{} } -func (m *EntityProto) String() string { return proto.CompactTextString(m) } -func (*EntityProto) ProtoMessage() {} -func (*EntityProto) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{6} -} -func (m *EntityProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EntityProto.Unmarshal(m, b) -} -func (m *EntityProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EntityProto.Marshal(b, m, deterministic) -} -func (dst *EntityProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_EntityProto.Merge(dst, src) -} -func (m *EntityProto) XXX_Size() int { - return xxx_messageInfo_EntityProto.Size(m) -} -func (m *EntityProto) XXX_DiscardUnknown() { - xxx_messageInfo_EntityProto.DiscardUnknown(m) -} - -var xxx_messageInfo_EntityProto proto.InternalMessageInfo - -func (m *EntityProto) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *EntityProto) GetEntityGroup() *Path { - if m != nil { - return m.EntityGroup - } - return nil -} - -func (m *EntityProto) GetOwner() *User { - if m != nil { - return m.Owner - } - return nil -} - -func (m *EntityProto) GetKind() EntityProto_Kind { - if m != nil && m.Kind != nil { - return *m.Kind - } - return EntityProto_GD_CONTACT -} - -func (m *EntityProto) GetKindUri() string { - if m != nil && m.KindUri != nil { - return *m.KindUri - } - return "" -} - -func (m *EntityProto) GetProperty() []*Property { - if m != nil { - return m.Property - } - return nil -} - -func (m *EntityProto) GetRawProperty() []*Property { - if m != nil { - return m.RawProperty - } - return nil -} - -func (m *EntityProto) GetRank() int32 { - if m != nil && m.Rank != nil { - return *m.Rank - } - return 0 -} - -type CompositeProperty struct { - IndexId *int64 `protobuf:"varint,1,req,name=index_id,json=indexId" json:"index_id,omitempty"` - Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompositeProperty) Reset() { *m = CompositeProperty{} } -func (m *CompositeProperty) String() string { return proto.CompactTextString(m) } -func (*CompositeProperty) ProtoMessage() {} -func (*CompositeProperty) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{7} -} -func (m *CompositeProperty) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompositeProperty.Unmarshal(m, b) -} -func (m *CompositeProperty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompositeProperty.Marshal(b, m, deterministic) -} -func (dst *CompositeProperty) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompositeProperty.Merge(dst, src) -} -func (m *CompositeProperty) XXX_Size() int { - return xxx_messageInfo_CompositeProperty.Size(m) -} -func (m *CompositeProperty) XXX_DiscardUnknown() { - xxx_messageInfo_CompositeProperty.DiscardUnknown(m) -} - -var xxx_messageInfo_CompositeProperty proto.InternalMessageInfo - -func (m *CompositeProperty) GetIndexId() int64 { - if m != nil && m.IndexId != nil { - return *m.IndexId - } - return 0 -} - -func (m *CompositeProperty) GetValue() []string { - if m != nil { - return m.Value - } - return nil -} - -type Index struct { - EntityType *string `protobuf:"bytes,1,req,name=entity_type,json=entityType" json:"entity_type,omitempty"` - Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"` - Property []*Index_Property `protobuf:"group,2,rep,name=Property,json=property" json:"property,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Index) Reset() { *m = Index{} } -func (m *Index) String() string { return proto.CompactTextString(m) } -func (*Index) ProtoMessage() {} -func (*Index) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8} -} -func (m *Index) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Index.Unmarshal(m, b) -} -func (m *Index) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Index.Marshal(b, m, deterministic) -} -func (dst *Index) XXX_Merge(src proto.Message) { - xxx_messageInfo_Index.Merge(dst, src) -} -func (m *Index) XXX_Size() int { - return xxx_messageInfo_Index.Size(m) -} -func (m *Index) XXX_DiscardUnknown() { - xxx_messageInfo_Index.DiscardUnknown(m) -} - -var xxx_messageInfo_Index proto.InternalMessageInfo - -func (m *Index) GetEntityType() string { - if m != nil && m.EntityType != nil { - return *m.EntityType - } - return "" -} - -func (m *Index) GetAncestor() bool { - if m != nil && m.Ancestor != nil { - return *m.Ancestor - } - return false -} - -func (m *Index) GetProperty() []*Index_Property { - if m != nil { - return m.Property - } - return nil -} - -type Index_Property struct { - Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` - Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Index_Property) Reset() { *m = Index_Property{} } -func (m *Index_Property) String() string { return proto.CompactTextString(m) } -func (*Index_Property) ProtoMessage() {} -func (*Index_Property) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8, 0} -} -func (m *Index_Property) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Index_Property.Unmarshal(m, b) -} -func (m *Index_Property) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Index_Property.Marshal(b, m, deterministic) -} -func (dst *Index_Property) XXX_Merge(src proto.Message) { - xxx_messageInfo_Index_Property.Merge(dst, src) -} -func (m *Index_Property) XXX_Size() int { - return xxx_messageInfo_Index_Property.Size(m) -} -func (m *Index_Property) XXX_DiscardUnknown() { - xxx_messageInfo_Index_Property.DiscardUnknown(m) -} - -var xxx_messageInfo_Index_Property proto.InternalMessageInfo - -const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING - -func (m *Index_Property) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *Index_Property) GetDirection() Index_Property_Direction { - if m != nil && m.Direction != nil { - return *m.Direction - } - return Default_Index_Property_Direction -} - -type CompositeIndex struct { - AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"` - Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"` - Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"` - State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"` - OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,json=onlyUseIfRequired,def=0" json:"only_use_if_required,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompositeIndex) Reset() { *m = CompositeIndex{} } -func (m *CompositeIndex) String() string { return proto.CompactTextString(m) } -func (*CompositeIndex) ProtoMessage() {} -func (*CompositeIndex) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{9} -} -func (m *CompositeIndex) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompositeIndex.Unmarshal(m, b) -} -func (m *CompositeIndex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompositeIndex.Marshal(b, m, deterministic) -} -func (dst *CompositeIndex) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompositeIndex.Merge(dst, src) -} -func (m *CompositeIndex) XXX_Size() int { - return xxx_messageInfo_CompositeIndex.Size(m) -} -func (m *CompositeIndex) XXX_DiscardUnknown() { - xxx_messageInfo_CompositeIndex.DiscardUnknown(m) -} - -var xxx_messageInfo_CompositeIndex proto.InternalMessageInfo - -const Default_CompositeIndex_OnlyUseIfRequired bool = false - -func (m *CompositeIndex) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *CompositeIndex) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *CompositeIndex) GetDefinition() *Index { - if m != nil { - return m.Definition - } - return nil -} - -func (m *CompositeIndex) GetState() CompositeIndex_State { - if m != nil && m.State != nil { - return *m.State - } - return CompositeIndex_WRITE_ONLY -} - -func (m *CompositeIndex) GetOnlyUseIfRequired() bool { - if m != nil && m.OnlyUseIfRequired != nil { - return *m.OnlyUseIfRequired - } - return Default_CompositeIndex_OnlyUseIfRequired -} - -type IndexPostfix struct { - IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value,json=indexValue" json:"index_value,omitempty"` - Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"` - Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IndexPostfix) Reset() { *m = IndexPostfix{} } -func (m *IndexPostfix) String() string { return proto.CompactTextString(m) } -func (*IndexPostfix) ProtoMessage() {} -func (*IndexPostfix) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{10} -} -func (m *IndexPostfix) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IndexPostfix.Unmarshal(m, b) -} -func (m *IndexPostfix) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IndexPostfix.Marshal(b, m, deterministic) -} -func (dst *IndexPostfix) XXX_Merge(src proto.Message) { - xxx_messageInfo_IndexPostfix.Merge(dst, src) -} -func (m *IndexPostfix) XXX_Size() int { - return xxx_messageInfo_IndexPostfix.Size(m) -} -func (m *IndexPostfix) XXX_DiscardUnknown() { - xxx_messageInfo_IndexPostfix.DiscardUnknown(m) -} - -var xxx_messageInfo_IndexPostfix proto.InternalMessageInfo - -const Default_IndexPostfix_Before bool = true - -func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue { - if m != nil { - return m.IndexValue - } - return nil -} - -func (m *IndexPostfix) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *IndexPostfix) GetBefore() bool { - if m != nil && m.Before != nil { - return *m.Before - } - return Default_IndexPostfix_Before -} - -type IndexPostfix_IndexValue struct { - PropertyName *string `protobuf:"bytes,1,req,name=property_name,json=propertyName" json:"property_name,omitempty"` - Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} } -func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) } -func (*IndexPostfix_IndexValue) ProtoMessage() {} -func (*IndexPostfix_IndexValue) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{10, 0} -} -func (m *IndexPostfix_IndexValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IndexPostfix_IndexValue.Unmarshal(m, b) -} -func (m *IndexPostfix_IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IndexPostfix_IndexValue.Marshal(b, m, deterministic) -} -func (dst *IndexPostfix_IndexValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_IndexPostfix_IndexValue.Merge(dst, src) -} -func (m *IndexPostfix_IndexValue) XXX_Size() int { - return xxx_messageInfo_IndexPostfix_IndexValue.Size(m) -} -func (m *IndexPostfix_IndexValue) XXX_DiscardUnknown() { - xxx_messageInfo_IndexPostfix_IndexValue.DiscardUnknown(m) -} - -var xxx_messageInfo_IndexPostfix_IndexValue proto.InternalMessageInfo - -func (m *IndexPostfix_IndexValue) GetPropertyName() string { - if m != nil && m.PropertyName != nil { - return *m.PropertyName - } - return "" -} - -func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue { - if m != nil { - return m.Value - } - return nil -} - -type IndexPosition struct { - Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` - Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IndexPosition) Reset() { *m = IndexPosition{} } -func (m *IndexPosition) String() string { return proto.CompactTextString(m) } -func (*IndexPosition) ProtoMessage() {} -func (*IndexPosition) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{11} -} -func (m *IndexPosition) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IndexPosition.Unmarshal(m, b) -} -func (m *IndexPosition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IndexPosition.Marshal(b, m, deterministic) -} -func (dst *IndexPosition) XXX_Merge(src proto.Message) { - xxx_messageInfo_IndexPosition.Merge(dst, src) -} -func (m *IndexPosition) XXX_Size() int { - return xxx_messageInfo_IndexPosition.Size(m) -} -func (m *IndexPosition) XXX_DiscardUnknown() { - xxx_messageInfo_IndexPosition.DiscardUnknown(m) -} - -var xxx_messageInfo_IndexPosition proto.InternalMessageInfo - -const Default_IndexPosition_Before bool = true - -func (m *IndexPosition) GetKey() string { - if m != nil && m.Key != nil { - return *m.Key - } - return "" -} - -func (m *IndexPosition) GetBefore() bool { - if m != nil && m.Before != nil { - return *m.Before - } - return Default_IndexPosition_Before -} - -type Snapshot struct { - Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Snapshot) Reset() { *m = Snapshot{} } -func (m *Snapshot) String() string { return proto.CompactTextString(m) } -func (*Snapshot) ProtoMessage() {} -func (*Snapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{12} -} -func (m *Snapshot) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Snapshot.Unmarshal(m, b) -} -func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) -} -func (dst *Snapshot) XXX_Merge(src proto.Message) { - xxx_messageInfo_Snapshot.Merge(dst, src) -} -func (m *Snapshot) XXX_Size() int { - return xxx_messageInfo_Snapshot.Size(m) -} -func (m *Snapshot) XXX_DiscardUnknown() { - xxx_messageInfo_Snapshot.DiscardUnknown(m) -} - -var xxx_messageInfo_Snapshot proto.InternalMessageInfo - -func (m *Snapshot) GetTs() int64 { - if m != nil && m.Ts != nil { - return *m.Ts - } - return 0 -} - -type InternalHeader struct { - Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *InternalHeader) Reset() { *m = InternalHeader{} } -func (m *InternalHeader) String() string { return proto.CompactTextString(m) } -func (*InternalHeader) ProtoMessage() {} -func (*InternalHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{13} -} -func (m *InternalHeader) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_InternalHeader.Unmarshal(m, b) -} -func (m *InternalHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_InternalHeader.Marshal(b, m, deterministic) -} -func (dst *InternalHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_InternalHeader.Merge(dst, src) -} -func (m *InternalHeader) XXX_Size() int { - return xxx_messageInfo_InternalHeader.Size(m) -} -func (m *InternalHeader) XXX_DiscardUnknown() { - xxx_messageInfo_InternalHeader.DiscardUnknown(m) -} - -var xxx_messageInfo_InternalHeader proto.InternalMessageInfo - -func (m *InternalHeader) GetQos() string { - if m != nil && m.Qos != nil { - return *m.Qos - } - return "" -} - -type Transaction struct { - Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` - Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"` - App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"` - MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Transaction) Reset() { *m = Transaction{} } -func (m *Transaction) String() string { return proto.CompactTextString(m) } -func (*Transaction) ProtoMessage() {} -func (*Transaction) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{14} -} -func (m *Transaction) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Transaction.Unmarshal(m, b) -} -func (m *Transaction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Transaction.Marshal(b, m, deterministic) -} -func (dst *Transaction) XXX_Merge(src proto.Message) { - xxx_messageInfo_Transaction.Merge(dst, src) -} -func (m *Transaction) XXX_Size() int { - return xxx_messageInfo_Transaction.Size(m) -} -func (m *Transaction) XXX_DiscardUnknown() { - xxx_messageInfo_Transaction.DiscardUnknown(m) -} - -var xxx_messageInfo_Transaction proto.InternalMessageInfo - -const Default_Transaction_MarkChanges bool = false - -func (m *Transaction) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *Transaction) GetHandle() uint64 { - if m != nil && m.Handle != nil { - return *m.Handle - } - return 0 -} - -func (m *Transaction) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *Transaction) GetMarkChanges() bool { - if m != nil && m.MarkChanges != nil { - return *m.MarkChanges - } - return Default_Transaction_MarkChanges -} - -type Query struct { - Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"` - App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` - NameSpace *string `protobuf:"bytes,29,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"` - Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"` - Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"` - Filter []*Query_Filter `protobuf:"group,4,rep,name=Filter,json=filter" json:"filter,omitempty"` - SearchQuery *string `protobuf:"bytes,8,opt,name=search_query,json=searchQuery" json:"search_query,omitempty"` - Order []*Query_Order `protobuf:"group,9,rep,name=Order,json=order" json:"order,omitempty"` - Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"` - Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"` - Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"` - Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"` - CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"` - EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor,json=endCompiledCursor" json:"end_compiled_cursor,omitempty"` - CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"` - RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,json=requirePerfectPlan,def=0" json:"require_perfect_plan,omitempty"` - KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,json=keysOnly,def=0" json:"keys_only,omitempty"` - Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"` - Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"` - FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"` - Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"` - PropertyName []string `protobuf:"bytes,33,rep,name=property_name,json=propertyName" json:"property_name,omitempty"` - GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name,json=groupByPropertyName" json:"group_by_property_name,omitempty"` - Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"` - MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds,json=minSafeTimeSeconds" json:"min_safe_time_seconds,omitempty"` - SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name,json=safeReplicaName" json:"safe_replica_name,omitempty"` - PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,json=persistOffset,def=0" json:"persist_offset,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Query) Reset() { *m = Query{} } -func (m *Query) String() string { return proto.CompactTextString(m) } -func (*Query) ProtoMessage() {} -func (*Query) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15} -} -func (m *Query) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Query.Unmarshal(m, b) -} -func (m *Query) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Query.Marshal(b, m, deterministic) -} -func (dst *Query) XXX_Merge(src proto.Message) { - xxx_messageInfo_Query.Merge(dst, src) -} -func (m *Query) XXX_Size() int { - return xxx_messageInfo_Query.Size(m) -} -func (m *Query) XXX_DiscardUnknown() { - xxx_messageInfo_Query.DiscardUnknown(m) -} - -var xxx_messageInfo_Query proto.InternalMessageInfo - -const Default_Query_Offset int32 = 0 -const Default_Query_RequirePerfectPlan bool = false -const Default_Query_KeysOnly bool = false -const Default_Query_Compile bool = false -const Default_Query_PersistOffset bool = false - -func (m *Query) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *Query) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *Query) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *Query) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -func (m *Query) GetAncestor() *Reference { - if m != nil { - return m.Ancestor - } - return nil -} - -func (m *Query) GetFilter() []*Query_Filter { - if m != nil { - return m.Filter - } - return nil -} - -func (m *Query) GetSearchQuery() string { - if m != nil && m.SearchQuery != nil { - return *m.SearchQuery - } - return "" -} - -func (m *Query) GetOrder() []*Query_Order { - if m != nil { - return m.Order - } - return nil -} - -func (m *Query) GetHint() Query_Hint { - if m != nil && m.Hint != nil { - return *m.Hint - } - return Query_ORDER_FIRST -} - -func (m *Query) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *Query) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return Default_Query_Offset -} - -func (m *Query) GetLimit() int32 { - if m != nil && m.Limit != nil { - return *m.Limit - } - return 0 -} - -func (m *Query) GetCompiledCursor() *CompiledCursor { - if m != nil { - return m.CompiledCursor - } - return nil -} - -func (m *Query) GetEndCompiledCursor() *CompiledCursor { - if m != nil { - return m.EndCompiledCursor - } - return nil -} - -func (m *Query) GetCompositeIndex() []*CompositeIndex { - if m != nil { - return m.CompositeIndex - } - return nil -} - -func (m *Query) GetRequirePerfectPlan() bool { - if m != nil && m.RequirePerfectPlan != nil { - return *m.RequirePerfectPlan - } - return Default_Query_RequirePerfectPlan -} - -func (m *Query) GetKeysOnly() bool { - if m != nil && m.KeysOnly != nil { - return *m.KeysOnly - } - return Default_Query_KeysOnly -} - -func (m *Query) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *Query) GetCompile() bool { - if m != nil && m.Compile != nil { - return *m.Compile - } - return Default_Query_Compile -} - -func (m *Query) GetFailoverMs() int64 { - if m != nil && m.FailoverMs != nil { - return *m.FailoverMs - } - return 0 -} - -func (m *Query) GetStrong() bool { - if m != nil && m.Strong != nil { - return *m.Strong - } - return false -} - -func (m *Query) GetPropertyName() []string { - if m != nil { - return m.PropertyName - } - return nil -} - -func (m *Query) GetGroupByPropertyName() []string { - if m != nil { - return m.GroupByPropertyName - } - return nil -} - -func (m *Query) GetDistinct() bool { - if m != nil && m.Distinct != nil { - return *m.Distinct - } - return false -} - -func (m *Query) GetMinSafeTimeSeconds() int64 { - if m != nil && m.MinSafeTimeSeconds != nil { - return *m.MinSafeTimeSeconds - } - return 0 -} - -func (m *Query) GetSafeReplicaName() []string { - if m != nil { - return m.SafeReplicaName - } - return nil -} - -func (m *Query) GetPersistOffset() bool { - if m != nil && m.PersistOffset != nil { - return *m.PersistOffset - } - return Default_Query_PersistOffset -} - -type Query_Filter struct { - Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"` - Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Query_Filter) Reset() { *m = Query_Filter{} } -func (m *Query_Filter) String() string { return proto.CompactTextString(m) } -func (*Query_Filter) ProtoMessage() {} -func (*Query_Filter) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0} -} -func (m *Query_Filter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Query_Filter.Unmarshal(m, b) -} -func (m *Query_Filter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Query_Filter.Marshal(b, m, deterministic) -} -func (dst *Query_Filter) XXX_Merge(src proto.Message) { - xxx_messageInfo_Query_Filter.Merge(dst, src) -} -func (m *Query_Filter) XXX_Size() int { - return xxx_messageInfo_Query_Filter.Size(m) -} -func (m *Query_Filter) XXX_DiscardUnknown() { - xxx_messageInfo_Query_Filter.DiscardUnknown(m) -} - -var xxx_messageInfo_Query_Filter proto.InternalMessageInfo - -func (m *Query_Filter) GetOp() Query_Filter_Operator { - if m != nil && m.Op != nil { - return *m.Op - } - return Query_Filter_LESS_THAN -} - -func (m *Query_Filter) GetProperty() []*Property { - if m != nil { - return m.Property - } - return nil -} - -type Query_Order struct { - Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"` - Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Query_Order) Reset() { *m = Query_Order{} } -func (m *Query_Order) String() string { return proto.CompactTextString(m) } -func (*Query_Order) ProtoMessage() {} -func (*Query_Order) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 1} -} -func (m *Query_Order) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Query_Order.Unmarshal(m, b) -} -func (m *Query_Order) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Query_Order.Marshal(b, m, deterministic) -} -func (dst *Query_Order) XXX_Merge(src proto.Message) { - xxx_messageInfo_Query_Order.Merge(dst, src) -} -func (m *Query_Order) XXX_Size() int { - return xxx_messageInfo_Query_Order.Size(m) -} -func (m *Query_Order) XXX_DiscardUnknown() { - xxx_messageInfo_Query_Order.DiscardUnknown(m) -} - -var xxx_messageInfo_Query_Order proto.InternalMessageInfo - -const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING - -func (m *Query_Order) GetProperty() string { - if m != nil && m.Property != nil { - return *m.Property - } - return "" -} - -func (m *Query_Order) GetDirection() Query_Order_Direction { - if m != nil && m.Direction != nil { - return *m.Direction - } - return Default_Query_Order_Direction -} - -type CompiledQuery struct { - Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan,json=primaryscan" json:"primaryscan,omitempty"` - Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan,json=mergejoinscan" json:"mergejoinscan,omitempty"` - IndexDef *Index `protobuf:"bytes,21,opt,name=index_def,json=indexDef" json:"index_def,omitempty"` - Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"` - Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"` - KeysOnly *bool `protobuf:"varint,12,req,name=keys_only,json=keysOnly" json:"keys_only,omitempty"` - PropertyName []string `protobuf:"bytes,24,rep,name=property_name,json=propertyName" json:"property_name,omitempty"` - DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size,json=distinctInfixSize" json:"distinct_infix_size,omitempty"` - Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter,json=entityfilter" json:"entityfilter,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompiledQuery) Reset() { *m = CompiledQuery{} } -func (m *CompiledQuery) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery) ProtoMessage() {} -func (*CompiledQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16} -} -func (m *CompiledQuery) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompiledQuery.Unmarshal(m, b) -} -func (m *CompiledQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompiledQuery.Marshal(b, m, deterministic) -} -func (dst *CompiledQuery) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompiledQuery.Merge(dst, src) -} -func (m *CompiledQuery) XXX_Size() int { - return xxx_messageInfo_CompiledQuery.Size(m) -} -func (m *CompiledQuery) XXX_DiscardUnknown() { - xxx_messageInfo_CompiledQuery.DiscardUnknown(m) -} - -var xxx_messageInfo_CompiledQuery proto.InternalMessageInfo - -const Default_CompiledQuery_Offset int32 = 0 - -func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan { - if m != nil { - return m.Primaryscan - } - return nil -} - -func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan { - if m != nil { - return m.Mergejoinscan - } - return nil -} - -func (m *CompiledQuery) GetIndexDef() *Index { - if m != nil { - return m.IndexDef - } - return nil -} - -func (m *CompiledQuery) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return Default_CompiledQuery_Offset -} - -func (m *CompiledQuery) GetLimit() int32 { - if m != nil && m.Limit != nil { - return *m.Limit - } - return 0 -} - -func (m *CompiledQuery) GetKeysOnly() bool { - if m != nil && m.KeysOnly != nil { - return *m.KeysOnly - } - return false -} - -func (m *CompiledQuery) GetPropertyName() []string { - if m != nil { - return m.PropertyName - } - return nil -} - -func (m *CompiledQuery) GetDistinctInfixSize() int32 { - if m != nil && m.DistinctInfixSize != nil { - return *m.DistinctInfixSize - } - return 0 -} - -func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter { - if m != nil { - return m.Entityfilter - } - return nil -} - -type CompiledQuery_PrimaryScan struct { - IndexName *string `protobuf:"bytes,2,opt,name=index_name,json=indexName" json:"index_name,omitempty"` - StartKey *string `protobuf:"bytes,3,opt,name=start_key,json=startKey" json:"start_key,omitempty"` - StartInclusive *bool `protobuf:"varint,4,opt,name=start_inclusive,json=startInclusive" json:"start_inclusive,omitempty"` - EndKey *string `protobuf:"bytes,5,opt,name=end_key,json=endKey" json:"end_key,omitempty"` - EndInclusive *bool `protobuf:"varint,6,opt,name=end_inclusive,json=endInclusive" json:"end_inclusive,omitempty"` - StartPostfixValue []string `protobuf:"bytes,22,rep,name=start_postfix_value,json=startPostfixValue" json:"start_postfix_value,omitempty"` - EndPostfixValue []string `protobuf:"bytes,23,rep,name=end_postfix_value,json=endPostfixValue" json:"end_postfix_value,omitempty"` - EndUnappliedLogTimestampUs *int64 `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us,json=endUnappliedLogTimestampUs" json:"end_unapplied_log_timestamp_us,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} } -func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery_PrimaryScan) ProtoMessage() {} -func (*CompiledQuery_PrimaryScan) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 0} -} -func (m *CompiledQuery_PrimaryScan) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompiledQuery_PrimaryScan.Unmarshal(m, b) -} -func (m *CompiledQuery_PrimaryScan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompiledQuery_PrimaryScan.Marshal(b, m, deterministic) -} -func (dst *CompiledQuery_PrimaryScan) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompiledQuery_PrimaryScan.Merge(dst, src) -} -func (m *CompiledQuery_PrimaryScan) XXX_Size() int { - return xxx_messageInfo_CompiledQuery_PrimaryScan.Size(m) -} -func (m *CompiledQuery_PrimaryScan) XXX_DiscardUnknown() { - xxx_messageInfo_CompiledQuery_PrimaryScan.DiscardUnknown(m) -} - -var xxx_messageInfo_CompiledQuery_PrimaryScan proto.InternalMessageInfo - -func (m *CompiledQuery_PrimaryScan) GetIndexName() string { - if m != nil && m.IndexName != nil { - return *m.IndexName - } - return "" -} - -func (m *CompiledQuery_PrimaryScan) GetStartKey() string { - if m != nil && m.StartKey != nil { - return *m.StartKey - } - return "" -} - -func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool { - if m != nil && m.StartInclusive != nil { - return *m.StartInclusive - } - return false -} - -func (m *CompiledQuery_PrimaryScan) GetEndKey() string { - if m != nil && m.EndKey != nil { - return *m.EndKey - } - return "" -} - -func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool { - if m != nil && m.EndInclusive != nil { - return *m.EndInclusive - } - return false -} - -func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string { - if m != nil { - return m.StartPostfixValue - } - return nil -} - -func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string { - if m != nil { - return m.EndPostfixValue - } - return nil -} - -func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 { - if m != nil && m.EndUnappliedLogTimestampUs != nil { - return *m.EndUnappliedLogTimestampUs - } - return 0 -} - -type CompiledQuery_MergeJoinScan struct { - IndexName *string `protobuf:"bytes,8,req,name=index_name,json=indexName" json:"index_name,omitempty"` - PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value,json=prefixValue" json:"prefix_value,omitempty"` - ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,json=valuePrefix,def=0" json:"value_prefix,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} } -func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery_MergeJoinScan) ProtoMessage() {} -func (*CompiledQuery_MergeJoinScan) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 1} -} -func (m *CompiledQuery_MergeJoinScan) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompiledQuery_MergeJoinScan.Unmarshal(m, b) -} -func (m *CompiledQuery_MergeJoinScan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompiledQuery_MergeJoinScan.Marshal(b, m, deterministic) -} -func (dst *CompiledQuery_MergeJoinScan) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompiledQuery_MergeJoinScan.Merge(dst, src) -} -func (m *CompiledQuery_MergeJoinScan) XXX_Size() int { - return xxx_messageInfo_CompiledQuery_MergeJoinScan.Size(m) -} -func (m *CompiledQuery_MergeJoinScan) XXX_DiscardUnknown() { - xxx_messageInfo_CompiledQuery_MergeJoinScan.DiscardUnknown(m) -} - -var xxx_messageInfo_CompiledQuery_MergeJoinScan proto.InternalMessageInfo - -const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false - -func (m *CompiledQuery_MergeJoinScan) GetIndexName() string { - if m != nil && m.IndexName != nil { - return *m.IndexName - } - return "" -} - -func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string { - if m != nil { - return m.PrefixValue - } - return nil -} - -func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool { - if m != nil && m.ValuePrefix != nil { - return *m.ValuePrefix - } - return Default_CompiledQuery_MergeJoinScan_ValuePrefix -} - -type CompiledQuery_EntityFilter struct { - Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"` - Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"` - Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} } -func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery_EntityFilter) ProtoMessage() {} -func (*CompiledQuery_EntityFilter) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 2} -} -func (m *CompiledQuery_EntityFilter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompiledQuery_EntityFilter.Unmarshal(m, b) -} -func (m *CompiledQuery_EntityFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompiledQuery_EntityFilter.Marshal(b, m, deterministic) -} -func (dst *CompiledQuery_EntityFilter) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompiledQuery_EntityFilter.Merge(dst, src) -} -func (m *CompiledQuery_EntityFilter) XXX_Size() int { - return xxx_messageInfo_CompiledQuery_EntityFilter.Size(m) -} -func (m *CompiledQuery_EntityFilter) XXX_DiscardUnknown() { - xxx_messageInfo_CompiledQuery_EntityFilter.DiscardUnknown(m) -} - -var xxx_messageInfo_CompiledQuery_EntityFilter proto.InternalMessageInfo - -const Default_CompiledQuery_EntityFilter_Distinct bool = false - -func (m *CompiledQuery_EntityFilter) GetDistinct() bool { - if m != nil && m.Distinct != nil { - return *m.Distinct - } - return Default_CompiledQuery_EntityFilter_Distinct -} - -func (m *CompiledQuery_EntityFilter) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference { - if m != nil { - return m.Ancestor - } - return nil -} - -type CompiledCursor struct { - Position *CompiledCursor_Position `protobuf:"group,2,opt,name=Position,json=position" json:"position,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompiledCursor) Reset() { *m = CompiledCursor{} } -func (m *CompiledCursor) String() string { return proto.CompactTextString(m) } -func (*CompiledCursor) ProtoMessage() {} -func (*CompiledCursor) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17} -} -func (m *CompiledCursor) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompiledCursor.Unmarshal(m, b) -} -func (m *CompiledCursor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompiledCursor.Marshal(b, m, deterministic) -} -func (dst *CompiledCursor) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompiledCursor.Merge(dst, src) -} -func (m *CompiledCursor) XXX_Size() int { - return xxx_messageInfo_CompiledCursor.Size(m) -} -func (m *CompiledCursor) XXX_DiscardUnknown() { - xxx_messageInfo_CompiledCursor.DiscardUnknown(m) -} - -var xxx_messageInfo_CompiledCursor proto.InternalMessageInfo - -func (m *CompiledCursor) GetPosition() *CompiledCursor_Position { - if m != nil { - return m.Position - } - return nil -} - -type CompiledCursor_Position struct { - StartKey *string `protobuf:"bytes,27,opt,name=start_key,json=startKey" json:"start_key,omitempty"` - Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue,json=indexvalue" json:"indexvalue,omitempty"` - Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"` - StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,json=startInclusive,def=1" json:"start_inclusive,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} } -func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) } -func (*CompiledCursor_Position) ProtoMessage() {} -func (*CompiledCursor_Position) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17, 0} -} -func (m *CompiledCursor_Position) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompiledCursor_Position.Unmarshal(m, b) -} -func (m *CompiledCursor_Position) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompiledCursor_Position.Marshal(b, m, deterministic) -} -func (dst *CompiledCursor_Position) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompiledCursor_Position.Merge(dst, src) -} -func (m *CompiledCursor_Position) XXX_Size() int { - return xxx_messageInfo_CompiledCursor_Position.Size(m) -} -func (m *CompiledCursor_Position) XXX_DiscardUnknown() { - xxx_messageInfo_CompiledCursor_Position.DiscardUnknown(m) -} - -var xxx_messageInfo_CompiledCursor_Position proto.InternalMessageInfo - -const Default_CompiledCursor_Position_StartInclusive bool = true - -func (m *CompiledCursor_Position) GetStartKey() string { - if m != nil && m.StartKey != nil { - return *m.StartKey - } - return "" -} - -func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue { - if m != nil { - return m.Indexvalue - } - return nil -} - -func (m *CompiledCursor_Position) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *CompiledCursor_Position) GetStartInclusive() bool { - if m != nil && m.StartInclusive != nil { - return *m.StartInclusive - } - return Default_CompiledCursor_Position_StartInclusive -} - -type CompiledCursor_Position_IndexValue struct { - Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"` - Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompiledCursor_Position_IndexValue) Reset() { *m = CompiledCursor_Position_IndexValue{} } -func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) } -func (*CompiledCursor_Position_IndexValue) ProtoMessage() {} -func (*CompiledCursor_Position_IndexValue) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17, 0, 0} -} -func (m *CompiledCursor_Position_IndexValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompiledCursor_Position_IndexValue.Unmarshal(m, b) -} -func (m *CompiledCursor_Position_IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompiledCursor_Position_IndexValue.Marshal(b, m, deterministic) -} -func (dst *CompiledCursor_Position_IndexValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompiledCursor_Position_IndexValue.Merge(dst, src) -} -func (m *CompiledCursor_Position_IndexValue) XXX_Size() int { - return xxx_messageInfo_CompiledCursor_Position_IndexValue.Size(m) -} -func (m *CompiledCursor_Position_IndexValue) XXX_DiscardUnknown() { - xxx_messageInfo_CompiledCursor_Position_IndexValue.DiscardUnknown(m) -} - -var xxx_messageInfo_CompiledCursor_Position_IndexValue proto.InternalMessageInfo - -func (m *CompiledCursor_Position_IndexValue) GetProperty() string { - if m != nil && m.Property != nil { - return *m.Property - } - return "" -} - -func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue { - if m != nil { - return m.Value - } - return nil -} - -type Cursor struct { - Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"` - App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Cursor) Reset() { *m = Cursor{} } -func (m *Cursor) String() string { return proto.CompactTextString(m) } -func (*Cursor) ProtoMessage() {} -func (*Cursor) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{18} -} -func (m *Cursor) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Cursor.Unmarshal(m, b) -} -func (m *Cursor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Cursor.Marshal(b, m, deterministic) -} -func (dst *Cursor) XXX_Merge(src proto.Message) { - xxx_messageInfo_Cursor.Merge(dst, src) -} -func (m *Cursor) XXX_Size() int { - return xxx_messageInfo_Cursor.Size(m) -} -func (m *Cursor) XXX_DiscardUnknown() { - xxx_messageInfo_Cursor.DiscardUnknown(m) -} - -var xxx_messageInfo_Cursor proto.InternalMessageInfo - -func (m *Cursor) GetCursor() uint64 { - if m != nil && m.Cursor != nil { - return *m.Cursor - } - return 0 -} - -func (m *Cursor) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -type Error struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Error) Reset() { *m = Error{} } -func (m *Error) String() string { return proto.CompactTextString(m) } -func (*Error) ProtoMessage() {} -func (*Error) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{19} -} -func (m *Error) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Error.Unmarshal(m, b) -} -func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Error.Marshal(b, m, deterministic) -} -func (dst *Error) XXX_Merge(src proto.Message) { - xxx_messageInfo_Error.Merge(dst, src) -} -func (m *Error) XXX_Size() int { - return xxx_messageInfo_Error.Size(m) -} -func (m *Error) XXX_DiscardUnknown() { - xxx_messageInfo_Error.DiscardUnknown(m) -} - -var xxx_messageInfo_Error proto.InternalMessageInfo - -type Cost struct { - IndexWrites *int32 `protobuf:"varint,1,opt,name=index_writes,json=indexWrites" json:"index_writes,omitempty"` - IndexWriteBytes *int32 `protobuf:"varint,2,opt,name=index_write_bytes,json=indexWriteBytes" json:"index_write_bytes,omitempty"` - EntityWrites *int32 `protobuf:"varint,3,opt,name=entity_writes,json=entityWrites" json:"entity_writes,omitempty"` - EntityWriteBytes *int32 `protobuf:"varint,4,opt,name=entity_write_bytes,json=entityWriteBytes" json:"entity_write_bytes,omitempty"` - Commitcost *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost,json=commitcost" json:"commitcost,omitempty"` - ApproximateStorageDelta *int32 `protobuf:"varint,8,opt,name=approximate_storage_delta,json=approximateStorageDelta" json:"approximate_storage_delta,omitempty"` - IdSequenceUpdates *int32 `protobuf:"varint,9,opt,name=id_sequence_updates,json=idSequenceUpdates" json:"id_sequence_updates,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Cost) Reset() { *m = Cost{} } -func (m *Cost) String() string { return proto.CompactTextString(m) } -func (*Cost) ProtoMessage() {} -func (*Cost) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{20} -} -func (m *Cost) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Cost.Unmarshal(m, b) -} -func (m *Cost) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Cost.Marshal(b, m, deterministic) -} -func (dst *Cost) XXX_Merge(src proto.Message) { - xxx_messageInfo_Cost.Merge(dst, src) -} -func (m *Cost) XXX_Size() int { - return xxx_messageInfo_Cost.Size(m) -} -func (m *Cost) XXX_DiscardUnknown() { - xxx_messageInfo_Cost.DiscardUnknown(m) -} - -var xxx_messageInfo_Cost proto.InternalMessageInfo - -func (m *Cost) GetIndexWrites() int32 { - if m != nil && m.IndexWrites != nil { - return *m.IndexWrites - } - return 0 -} - -func (m *Cost) GetIndexWriteBytes() int32 { - if m != nil && m.IndexWriteBytes != nil { - return *m.IndexWriteBytes - } - return 0 -} - -func (m *Cost) GetEntityWrites() int32 { - if m != nil && m.EntityWrites != nil { - return *m.EntityWrites - } - return 0 -} - -func (m *Cost) GetEntityWriteBytes() int32 { - if m != nil && m.EntityWriteBytes != nil { - return *m.EntityWriteBytes - } - return 0 -} - -func (m *Cost) GetCommitcost() *Cost_CommitCost { - if m != nil { - return m.Commitcost - } - return nil -} - -func (m *Cost) GetApproximateStorageDelta() int32 { - if m != nil && m.ApproximateStorageDelta != nil { - return *m.ApproximateStorageDelta - } - return 0 -} - -func (m *Cost) GetIdSequenceUpdates() int32 { - if m != nil && m.IdSequenceUpdates != nil { - return *m.IdSequenceUpdates - } - return 0 -} - -type Cost_CommitCost struct { - RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts,json=requestedEntityPuts" json:"requested_entity_puts,omitempty"` - RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes,json=requestedEntityDeletes" json:"requested_entity_deletes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} } -func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) } -func (*Cost_CommitCost) ProtoMessage() {} -func (*Cost_CommitCost) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{20, 0} -} -func (m *Cost_CommitCost) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Cost_CommitCost.Unmarshal(m, b) -} -func (m *Cost_CommitCost) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Cost_CommitCost.Marshal(b, m, deterministic) -} -func (dst *Cost_CommitCost) XXX_Merge(src proto.Message) { - xxx_messageInfo_Cost_CommitCost.Merge(dst, src) -} -func (m *Cost_CommitCost) XXX_Size() int { - return xxx_messageInfo_Cost_CommitCost.Size(m) -} -func (m *Cost_CommitCost) XXX_DiscardUnknown() { - xxx_messageInfo_Cost_CommitCost.DiscardUnknown(m) -} - -var xxx_messageInfo_Cost_CommitCost proto.InternalMessageInfo - -func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 { - if m != nil && m.RequestedEntityPuts != nil { - return *m.RequestedEntityPuts - } - return 0 -} - -func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 { - if m != nil && m.RequestedEntityDeletes != nil { - return *m.RequestedEntityDeletes - } - return 0 -} - -type GetRequest struct { - Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"` - Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` - Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` - FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"` - Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"` - AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,json=allowDeferred,def=0" json:"allow_deferred,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetRequest) Reset() { *m = GetRequest{} } -func (m *GetRequest) String() string { return proto.CompactTextString(m) } -func (*GetRequest) ProtoMessage() {} -func (*GetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{21} -} -func (m *GetRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetRequest.Unmarshal(m, b) -} -func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic) -} -func (dst *GetRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetRequest.Merge(dst, src) -} -func (m *GetRequest) XXX_Size() int { - return xxx_messageInfo_GetRequest.Size(m) -} -func (m *GetRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetRequest proto.InternalMessageInfo - -const Default_GetRequest_AllowDeferred bool = false - -func (m *GetRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *GetRequest) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *GetRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *GetRequest) GetFailoverMs() int64 { - if m != nil && m.FailoverMs != nil { - return *m.FailoverMs - } - return 0 -} - -func (m *GetRequest) GetStrong() bool { - if m != nil && m.Strong != nil { - return *m.Strong - } - return false -} - -func (m *GetRequest) GetAllowDeferred() bool { - if m != nil && m.AllowDeferred != nil { - return *m.AllowDeferred - } - return Default_GetRequest_AllowDeferred -} - -type GetResponse struct { - Entity []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity,json=entity" json:"entity,omitempty"` - Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"` - InOrder *bool `protobuf:"varint,6,opt,name=in_order,json=inOrder,def=1" json:"in_order,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetResponse) Reset() { *m = GetResponse{} } -func (m *GetResponse) String() string { return proto.CompactTextString(m) } -func (*GetResponse) ProtoMessage() {} -func (*GetResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{22} -} -func (m *GetResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetResponse.Unmarshal(m, b) -} -func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic) -} -func (dst *GetResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetResponse.Merge(dst, src) -} -func (m *GetResponse) XXX_Size() int { - return xxx_messageInfo_GetResponse.Size(m) -} -func (m *GetResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetResponse proto.InternalMessageInfo - -const Default_GetResponse_InOrder bool = true - -func (m *GetResponse) GetEntity() []*GetResponse_Entity { - if m != nil { - return m.Entity - } - return nil -} - -func (m *GetResponse) GetDeferred() []*Reference { - if m != nil { - return m.Deferred - } - return nil -} - -func (m *GetResponse) GetInOrder() bool { - if m != nil && m.InOrder != nil { - return *m.InOrder - } - return Default_GetResponse_InOrder -} - -type GetResponse_Entity struct { - Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"` - Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"` - Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} } -func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) } -func (*GetResponse_Entity) ProtoMessage() {} -func (*GetResponse_Entity) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{22, 0} -} -func (m *GetResponse_Entity) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetResponse_Entity.Unmarshal(m, b) -} -func (m *GetResponse_Entity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetResponse_Entity.Marshal(b, m, deterministic) -} -func (dst *GetResponse_Entity) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetResponse_Entity.Merge(dst, src) -} -func (m *GetResponse_Entity) XXX_Size() int { - return xxx_messageInfo_GetResponse_Entity.Size(m) -} -func (m *GetResponse_Entity) XXX_DiscardUnknown() { - xxx_messageInfo_GetResponse_Entity.DiscardUnknown(m) -} - -var xxx_messageInfo_GetResponse_Entity proto.InternalMessageInfo - -func (m *GetResponse_Entity) GetEntity() *EntityProto { - if m != nil { - return m.Entity - } - return nil -} - -func (m *GetResponse_Entity) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *GetResponse_Entity) GetVersion() int64 { - if m != nil && m.Version != nil { - return *m.Version - } - return 0 -} - -type PutRequest struct { - Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"` - Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"` - Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` - CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"` - Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` - Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` - MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"` - Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` - AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,json=autoIdPolicy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PutRequest) Reset() { *m = PutRequest{} } -func (m *PutRequest) String() string { return proto.CompactTextString(m) } -func (*PutRequest) ProtoMessage() {} -func (*PutRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{23} -} -func (m *PutRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PutRequest.Unmarshal(m, b) -} -func (m *PutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PutRequest.Marshal(b, m, deterministic) -} -func (dst *PutRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PutRequest.Merge(dst, src) -} -func (m *PutRequest) XXX_Size() int { - return xxx_messageInfo_PutRequest.Size(m) -} -func (m *PutRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PutRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_PutRequest proto.InternalMessageInfo - -const Default_PutRequest_Trusted bool = false -const Default_PutRequest_Force bool = false -const Default_PutRequest_MarkChanges bool = false -const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT - -func (m *PutRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *PutRequest) GetEntity() []*EntityProto { - if m != nil { - return m.Entity - } - return nil -} - -func (m *PutRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *PutRequest) GetCompositeIndex() []*CompositeIndex { - if m != nil { - return m.CompositeIndex - } - return nil -} - -func (m *PutRequest) GetTrusted() bool { - if m != nil && m.Trusted != nil { - return *m.Trusted - } - return Default_PutRequest_Trusted -} - -func (m *PutRequest) GetForce() bool { - if m != nil && m.Force != nil { - return *m.Force - } - return Default_PutRequest_Force -} - -func (m *PutRequest) GetMarkChanges() bool { - if m != nil && m.MarkChanges != nil { - return *m.MarkChanges - } - return Default_PutRequest_MarkChanges -} - -func (m *PutRequest) GetSnapshot() []*Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy { - if m != nil && m.AutoIdPolicy != nil { - return *m.AutoIdPolicy - } - return Default_PutRequest_AutoIdPolicy -} - -type PutResponse struct { - Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` - Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"` - Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PutResponse) Reset() { *m = PutResponse{} } -func (m *PutResponse) String() string { return proto.CompactTextString(m) } -func (*PutResponse) ProtoMessage() {} -func (*PutResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{24} -} -func (m *PutResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PutResponse.Unmarshal(m, b) -} -func (m *PutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PutResponse.Marshal(b, m, deterministic) -} -func (dst *PutResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_PutResponse.Merge(dst, src) -} -func (m *PutResponse) XXX_Size() int { - return xxx_messageInfo_PutResponse.Size(m) -} -func (m *PutResponse) XXX_DiscardUnknown() { - xxx_messageInfo_PutResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_PutResponse proto.InternalMessageInfo - -func (m *PutResponse) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *PutResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -func (m *PutResponse) GetVersion() []int64 { - if m != nil { - return m.Version - } - return nil -} - -type TouchRequest struct { - Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` - Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` - CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"` - Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"` - Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TouchRequest) Reset() { *m = TouchRequest{} } -func (m *TouchRequest) String() string { return proto.CompactTextString(m) } -func (*TouchRequest) ProtoMessage() {} -func (*TouchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{25} -} -func (m *TouchRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TouchRequest.Unmarshal(m, b) -} -func (m *TouchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TouchRequest.Marshal(b, m, deterministic) -} -func (dst *TouchRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_TouchRequest.Merge(dst, src) -} -func (m *TouchRequest) XXX_Size() int { - return xxx_messageInfo_TouchRequest.Size(m) -} -func (m *TouchRequest) XXX_DiscardUnknown() { - xxx_messageInfo_TouchRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_TouchRequest proto.InternalMessageInfo - -const Default_TouchRequest_Force bool = false - -func (m *TouchRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *TouchRequest) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex { - if m != nil { - return m.CompositeIndex - } - return nil -} - -func (m *TouchRequest) GetForce() bool { - if m != nil && m.Force != nil { - return *m.Force - } - return Default_TouchRequest_Force -} - -func (m *TouchRequest) GetSnapshot() []*Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -type TouchResponse struct { - Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TouchResponse) Reset() { *m = TouchResponse{} } -func (m *TouchResponse) String() string { return proto.CompactTextString(m) } -func (*TouchResponse) ProtoMessage() {} -func (*TouchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{26} -} -func (m *TouchResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TouchResponse.Unmarshal(m, b) -} -func (m *TouchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TouchResponse.Marshal(b, m, deterministic) -} -func (dst *TouchResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_TouchResponse.Merge(dst, src) -} -func (m *TouchResponse) XXX_Size() int { - return xxx_messageInfo_TouchResponse.Size(m) -} -func (m *TouchResponse) XXX_DiscardUnknown() { - xxx_messageInfo_TouchResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_TouchResponse proto.InternalMessageInfo - -func (m *TouchResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -type DeleteRequest struct { - Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` - Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"` - Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"` - Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` - Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` - MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"` - Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } -func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteRequest) ProtoMessage() {} -func (*DeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{27} -} -func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteRequest.Unmarshal(m, b) -} -func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic) -} -func (dst *DeleteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteRequest.Merge(dst, src) -} -func (m *DeleteRequest) XXX_Size() int { - return xxx_messageInfo_DeleteRequest.Size(m) -} -func (m *DeleteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo - -const Default_DeleteRequest_Trusted bool = false -const Default_DeleteRequest_Force bool = false -const Default_DeleteRequest_MarkChanges bool = false - -func (m *DeleteRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *DeleteRequest) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *DeleteRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *DeleteRequest) GetTrusted() bool { - if m != nil && m.Trusted != nil { - return *m.Trusted - } - return Default_DeleteRequest_Trusted -} - -func (m *DeleteRequest) GetForce() bool { - if m != nil && m.Force != nil { - return *m.Force - } - return Default_DeleteRequest_Force -} - -func (m *DeleteRequest) GetMarkChanges() bool { - if m != nil && m.MarkChanges != nil { - return *m.MarkChanges - } - return Default_DeleteRequest_MarkChanges -} - -func (m *DeleteRequest) GetSnapshot() []*Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -type DeleteResponse struct { - Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` - Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteResponse) Reset() { *m = DeleteResponse{} } -func (m *DeleteResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteResponse) ProtoMessage() {} -func (*DeleteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{28} -} -func (m *DeleteResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteResponse.Unmarshal(m, b) -} -func (m *DeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteResponse.Marshal(b, m, deterministic) -} -func (dst *DeleteResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteResponse.Merge(dst, src) -} -func (m *DeleteResponse) XXX_Size() int { - return xxx_messageInfo_DeleteResponse.Size(m) -} -func (m *DeleteResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteResponse proto.InternalMessageInfo - -func (m *DeleteResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -func (m *DeleteResponse) GetVersion() []int64 { - if m != nil { - return m.Version - } - return nil -} - -type NextRequest struct { - Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"` - Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"` - Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"` - Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"` - Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NextRequest) Reset() { *m = NextRequest{} } -func (m *NextRequest) String() string { return proto.CompactTextString(m) } -func (*NextRequest) ProtoMessage() {} -func (*NextRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{29} -} -func (m *NextRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NextRequest.Unmarshal(m, b) -} -func (m *NextRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NextRequest.Marshal(b, m, deterministic) -} -func (dst *NextRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NextRequest.Merge(dst, src) -} -func (m *NextRequest) XXX_Size() int { - return xxx_messageInfo_NextRequest.Size(m) -} -func (m *NextRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NextRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NextRequest proto.InternalMessageInfo - -const Default_NextRequest_Offset int32 = 0 -const Default_NextRequest_Compile bool = false - -func (m *NextRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *NextRequest) GetCursor() *Cursor { - if m != nil { - return m.Cursor - } - return nil -} - -func (m *NextRequest) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *NextRequest) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return Default_NextRequest_Offset -} - -func (m *NextRequest) GetCompile() bool { - if m != nil && m.Compile != nil { - return *m.Compile - } - return Default_NextRequest_Compile -} - -type QueryResult struct { - Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"` - Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"` - SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results,json=skippedResults" json:"skipped_results,omitempty"` - MoreResults *bool `protobuf:"varint,3,req,name=more_results,json=moreResults" json:"more_results,omitempty"` - KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only,json=keysOnly" json:"keys_only,omitempty"` - IndexOnly *bool `protobuf:"varint,9,opt,name=index_only,json=indexOnly" json:"index_only,omitempty"` - SmallOps *bool `protobuf:"varint,10,opt,name=small_ops,json=smallOps" json:"small_ops,omitempty"` - CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query,json=compiledQuery" json:"compiled_query,omitempty"` - CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"` - Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"` - Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *QueryResult) Reset() { *m = QueryResult{} } -func (m *QueryResult) String() string { return proto.CompactTextString(m) } -func (*QueryResult) ProtoMessage() {} -func (*QueryResult) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{30} -} -func (m *QueryResult) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_QueryResult.Unmarshal(m, b) -} -func (m *QueryResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_QueryResult.Marshal(b, m, deterministic) -} -func (dst *QueryResult) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryResult.Merge(dst, src) -} -func (m *QueryResult) XXX_Size() int { - return xxx_messageInfo_QueryResult.Size(m) -} -func (m *QueryResult) XXX_DiscardUnknown() { - xxx_messageInfo_QueryResult.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryResult proto.InternalMessageInfo - -func (m *QueryResult) GetCursor() *Cursor { - if m != nil { - return m.Cursor - } - return nil -} - -func (m *QueryResult) GetResult() []*EntityProto { - if m != nil { - return m.Result - } - return nil -} - -func (m *QueryResult) GetSkippedResults() int32 { - if m != nil && m.SkippedResults != nil { - return *m.SkippedResults - } - return 0 -} - -func (m *QueryResult) GetMoreResults() bool { - if m != nil && m.MoreResults != nil { - return *m.MoreResults - } - return false -} - -func (m *QueryResult) GetKeysOnly() bool { - if m != nil && m.KeysOnly != nil { - return *m.KeysOnly - } - return false -} - -func (m *QueryResult) GetIndexOnly() bool { - if m != nil && m.IndexOnly != nil { - return *m.IndexOnly - } - return false -} - -func (m *QueryResult) GetSmallOps() bool { - if m != nil && m.SmallOps != nil { - return *m.SmallOps - } - return false -} - -func (m *QueryResult) GetCompiledQuery() *CompiledQuery { - if m != nil { - return m.CompiledQuery - } - return nil -} - -func (m *QueryResult) GetCompiledCursor() *CompiledCursor { - if m != nil { - return m.CompiledCursor - } - return nil -} - -func (m *QueryResult) GetIndex() []*CompositeIndex { - if m != nil { - return m.Index - } - return nil -} - -func (m *QueryResult) GetVersion() []int64 { - if m != nil { - return m.Version - } - return nil -} - -type AllocateIdsRequest struct { - Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` - ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key,json=modelKey" json:"model_key,omitempty"` - Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"` - Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"` - Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} } -func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) } -func (*AllocateIdsRequest) ProtoMessage() {} -func (*AllocateIdsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{31} -} -func (m *AllocateIdsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AllocateIdsRequest.Unmarshal(m, b) -} -func (m *AllocateIdsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AllocateIdsRequest.Marshal(b, m, deterministic) -} -func (dst *AllocateIdsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllocateIdsRequest.Merge(dst, src) -} -func (m *AllocateIdsRequest) XXX_Size() int { - return xxx_messageInfo_AllocateIdsRequest.Size(m) -} -func (m *AllocateIdsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AllocateIdsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AllocateIdsRequest proto.InternalMessageInfo - -func (m *AllocateIdsRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AllocateIdsRequest) GetModelKey() *Reference { - if m != nil { - return m.ModelKey - } - return nil -} - -func (m *AllocateIdsRequest) GetSize() int64 { - if m != nil && m.Size != nil { - return *m.Size - } - return 0 -} - -func (m *AllocateIdsRequest) GetMax() int64 { - if m != nil && m.Max != nil { - return *m.Max - } - return 0 -} - -func (m *AllocateIdsRequest) GetReserve() []*Reference { - if m != nil { - return m.Reserve - } - return nil -} - -type AllocateIdsResponse struct { - Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"` - End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"` - Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} } -func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) } -func (*AllocateIdsResponse) ProtoMessage() {} -func (*AllocateIdsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{32} -} -func (m *AllocateIdsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AllocateIdsResponse.Unmarshal(m, b) -} -func (m *AllocateIdsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AllocateIdsResponse.Marshal(b, m, deterministic) -} -func (dst *AllocateIdsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllocateIdsResponse.Merge(dst, src) -} -func (m *AllocateIdsResponse) XXX_Size() int { - return xxx_messageInfo_AllocateIdsResponse.Size(m) -} -func (m *AllocateIdsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AllocateIdsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AllocateIdsResponse proto.InternalMessageInfo - -func (m *AllocateIdsResponse) GetStart() int64 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *AllocateIdsResponse) GetEnd() int64 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -func (m *AllocateIdsResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -type CompositeIndices struct { - Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompositeIndices) Reset() { *m = CompositeIndices{} } -func (m *CompositeIndices) String() string { return proto.CompactTextString(m) } -func (*CompositeIndices) ProtoMessage() {} -func (*CompositeIndices) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{33} -} -func (m *CompositeIndices) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompositeIndices.Unmarshal(m, b) -} -func (m *CompositeIndices) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompositeIndices.Marshal(b, m, deterministic) -} -func (dst *CompositeIndices) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompositeIndices.Merge(dst, src) -} -func (m *CompositeIndices) XXX_Size() int { - return xxx_messageInfo_CompositeIndices.Size(m) -} -func (m *CompositeIndices) XXX_DiscardUnknown() { - xxx_messageInfo_CompositeIndices.DiscardUnknown(m) -} - -var xxx_messageInfo_CompositeIndices proto.InternalMessageInfo - -func (m *CompositeIndices) GetIndex() []*CompositeIndex { - if m != nil { - return m.Index - } - return nil -} - -type AddActionsRequest struct { - Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` - Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"` - Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} } -func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) } -func (*AddActionsRequest) ProtoMessage() {} -func (*AddActionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{34} -} -func (m *AddActionsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AddActionsRequest.Unmarshal(m, b) -} -func (m *AddActionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AddActionsRequest.Marshal(b, m, deterministic) -} -func (dst *AddActionsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddActionsRequest.Merge(dst, src) -} -func (m *AddActionsRequest) XXX_Size() int { - return xxx_messageInfo_AddActionsRequest.Size(m) -} -func (m *AddActionsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AddActionsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AddActionsRequest proto.InternalMessageInfo - -func (m *AddActionsRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AddActionsRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *AddActionsRequest) GetAction() []*Action { - if m != nil { - return m.Action - } - return nil -} - -type AddActionsResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} } -func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) } -func (*AddActionsResponse) ProtoMessage() {} -func (*AddActionsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{35} -} -func (m *AddActionsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AddActionsResponse.Unmarshal(m, b) -} -func (m *AddActionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AddActionsResponse.Marshal(b, m, deterministic) -} -func (dst *AddActionsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddActionsResponse.Merge(dst, src) -} -func (m *AddActionsResponse) XXX_Size() int { - return xxx_messageInfo_AddActionsResponse.Size(m) -} -func (m *AddActionsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AddActionsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AddActionsResponse proto.InternalMessageInfo - -type BeginTransactionRequest struct { - Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` - App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` - AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,json=allowMultipleEg,def=0" json:"allow_multiple_eg,omitempty"` - DatabaseId *string `protobuf:"bytes,4,opt,name=database_id,json=databaseId" json:"database_id,omitempty"` - Mode *BeginTransactionRequest_TransactionMode `protobuf:"varint,5,opt,name=mode,enum=appengine.BeginTransactionRequest_TransactionMode,def=0" json:"mode,omitempty"` - PreviousTransaction *Transaction `protobuf:"bytes,7,opt,name=previous_transaction,json=previousTransaction" json:"previous_transaction,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } -func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } -func (*BeginTransactionRequest) ProtoMessage() {} -func (*BeginTransactionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{36} -} -func (m *BeginTransactionRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BeginTransactionRequest.Unmarshal(m, b) -} -func (m *BeginTransactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BeginTransactionRequest.Marshal(b, m, deterministic) -} -func (dst *BeginTransactionRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_BeginTransactionRequest.Merge(dst, src) -} -func (m *BeginTransactionRequest) XXX_Size() int { - return xxx_messageInfo_BeginTransactionRequest.Size(m) -} -func (m *BeginTransactionRequest) XXX_DiscardUnknown() { - xxx_messageInfo_BeginTransactionRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_BeginTransactionRequest proto.InternalMessageInfo - -const Default_BeginTransactionRequest_AllowMultipleEg bool = false -const Default_BeginTransactionRequest_Mode BeginTransactionRequest_TransactionMode = BeginTransactionRequest_UNKNOWN - -func (m *BeginTransactionRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *BeginTransactionRequest) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *BeginTransactionRequest) GetAllowMultipleEg() bool { - if m != nil && m.AllowMultipleEg != nil { - return *m.AllowMultipleEg - } - return Default_BeginTransactionRequest_AllowMultipleEg -} - -func (m *BeginTransactionRequest) GetDatabaseId() string { - if m != nil && m.DatabaseId != nil { - return *m.DatabaseId - } - return "" -} - -func (m *BeginTransactionRequest) GetMode() BeginTransactionRequest_TransactionMode { - if m != nil && m.Mode != nil { - return *m.Mode - } - return Default_BeginTransactionRequest_Mode -} - -func (m *BeginTransactionRequest) GetPreviousTransaction() *Transaction { - if m != nil { - return m.PreviousTransaction - } - return nil -} - -type CommitResponse struct { - Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` - Version []*CommitResponse_Version `protobuf:"group,3,rep,name=Version,json=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CommitResponse) Reset() { *m = CommitResponse{} } -func (m *CommitResponse) String() string { return proto.CompactTextString(m) } -func (*CommitResponse) ProtoMessage() {} -func (*CommitResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{37} -} -func (m *CommitResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CommitResponse.Unmarshal(m, b) -} -func (m *CommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CommitResponse.Marshal(b, m, deterministic) -} -func (dst *CommitResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CommitResponse.Merge(dst, src) -} -func (m *CommitResponse) XXX_Size() int { - return xxx_messageInfo_CommitResponse.Size(m) -} -func (m *CommitResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CommitResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_CommitResponse proto.InternalMessageInfo - -func (m *CommitResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -func (m *CommitResponse) GetVersion() []*CommitResponse_Version { - if m != nil { - return m.Version - } - return nil -} - -type CommitResponse_Version struct { - RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key,json=rootEntityKey" json:"root_entity_key,omitempty"` - Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} } -func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) } -func (*CommitResponse_Version) ProtoMessage() {} -func (*CommitResponse_Version) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{37, 0} -} -func (m *CommitResponse_Version) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CommitResponse_Version.Unmarshal(m, b) -} -func (m *CommitResponse_Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CommitResponse_Version.Marshal(b, m, deterministic) -} -func (dst *CommitResponse_Version) XXX_Merge(src proto.Message) { - xxx_messageInfo_CommitResponse_Version.Merge(dst, src) -} -func (m *CommitResponse_Version) XXX_Size() int { - return xxx_messageInfo_CommitResponse_Version.Size(m) -} -func (m *CommitResponse_Version) XXX_DiscardUnknown() { - xxx_messageInfo_CommitResponse_Version.DiscardUnknown(m) -} - -var xxx_messageInfo_CommitResponse_Version proto.InternalMessageInfo - -func (m *CommitResponse_Version) GetRootEntityKey() *Reference { - if m != nil { - return m.RootEntityKey - } - return nil -} - -func (m *CommitResponse_Version) GetVersion() int64 { - if m != nil && m.Version != nil { - return *m.Version - } - return 0 -} - -func init() { - proto.RegisterType((*Action)(nil), "appengine.Action") - proto.RegisterType((*PropertyValue)(nil), "appengine.PropertyValue") - proto.RegisterType((*PropertyValue_PointValue)(nil), "appengine.PropertyValue.PointValue") - proto.RegisterType((*PropertyValue_UserValue)(nil), "appengine.PropertyValue.UserValue") - proto.RegisterType((*PropertyValue_ReferenceValue)(nil), "appengine.PropertyValue.ReferenceValue") - proto.RegisterType((*PropertyValue_ReferenceValue_PathElement)(nil), "appengine.PropertyValue.ReferenceValue.PathElement") - proto.RegisterType((*Property)(nil), "appengine.Property") - proto.RegisterType((*Path)(nil), "appengine.Path") - proto.RegisterType((*Path_Element)(nil), "appengine.Path.Element") - proto.RegisterType((*Reference)(nil), "appengine.Reference") - proto.RegisterType((*User)(nil), "appengine.User") - proto.RegisterType((*EntityProto)(nil), "appengine.EntityProto") - proto.RegisterType((*CompositeProperty)(nil), "appengine.CompositeProperty") - proto.RegisterType((*Index)(nil), "appengine.Index") - proto.RegisterType((*Index_Property)(nil), "appengine.Index.Property") - proto.RegisterType((*CompositeIndex)(nil), "appengine.CompositeIndex") - proto.RegisterType((*IndexPostfix)(nil), "appengine.IndexPostfix") - proto.RegisterType((*IndexPostfix_IndexValue)(nil), "appengine.IndexPostfix.IndexValue") - proto.RegisterType((*IndexPosition)(nil), "appengine.IndexPosition") - proto.RegisterType((*Snapshot)(nil), "appengine.Snapshot") - proto.RegisterType((*InternalHeader)(nil), "appengine.InternalHeader") - proto.RegisterType((*Transaction)(nil), "appengine.Transaction") - proto.RegisterType((*Query)(nil), "appengine.Query") - proto.RegisterType((*Query_Filter)(nil), "appengine.Query.Filter") - proto.RegisterType((*Query_Order)(nil), "appengine.Query.Order") - proto.RegisterType((*CompiledQuery)(nil), "appengine.CompiledQuery") - proto.RegisterType((*CompiledQuery_PrimaryScan)(nil), "appengine.CompiledQuery.PrimaryScan") - proto.RegisterType((*CompiledQuery_MergeJoinScan)(nil), "appengine.CompiledQuery.MergeJoinScan") - proto.RegisterType((*CompiledQuery_EntityFilter)(nil), "appengine.CompiledQuery.EntityFilter") - proto.RegisterType((*CompiledCursor)(nil), "appengine.CompiledCursor") - proto.RegisterType((*CompiledCursor_Position)(nil), "appengine.CompiledCursor.Position") - proto.RegisterType((*CompiledCursor_Position_IndexValue)(nil), "appengine.CompiledCursor.Position.IndexValue") - proto.RegisterType((*Cursor)(nil), "appengine.Cursor") - proto.RegisterType((*Error)(nil), "appengine.Error") - proto.RegisterType((*Cost)(nil), "appengine.Cost") - proto.RegisterType((*Cost_CommitCost)(nil), "appengine.Cost.CommitCost") - proto.RegisterType((*GetRequest)(nil), "appengine.GetRequest") - proto.RegisterType((*GetResponse)(nil), "appengine.GetResponse") - proto.RegisterType((*GetResponse_Entity)(nil), "appengine.GetResponse.Entity") - proto.RegisterType((*PutRequest)(nil), "appengine.PutRequest") - proto.RegisterType((*PutResponse)(nil), "appengine.PutResponse") - proto.RegisterType((*TouchRequest)(nil), "appengine.TouchRequest") - proto.RegisterType((*TouchResponse)(nil), "appengine.TouchResponse") - proto.RegisterType((*DeleteRequest)(nil), "appengine.DeleteRequest") - proto.RegisterType((*DeleteResponse)(nil), "appengine.DeleteResponse") - proto.RegisterType((*NextRequest)(nil), "appengine.NextRequest") - proto.RegisterType((*QueryResult)(nil), "appengine.QueryResult") - proto.RegisterType((*AllocateIdsRequest)(nil), "appengine.AllocateIdsRequest") - proto.RegisterType((*AllocateIdsResponse)(nil), "appengine.AllocateIdsResponse") - proto.RegisterType((*CompositeIndices)(nil), "appengine.CompositeIndices") - proto.RegisterType((*AddActionsRequest)(nil), "appengine.AddActionsRequest") - proto.RegisterType((*AddActionsResponse)(nil), "appengine.AddActionsResponse") - proto.RegisterType((*BeginTransactionRequest)(nil), "appengine.BeginTransactionRequest") - proto.RegisterType((*CommitResponse)(nil), "appengine.CommitResponse") - proto.RegisterType((*CommitResponse_Version)(nil), "appengine.CommitResponse.Version") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/datastore/datastore_v3.proto", fileDescriptor_datastore_v3_83b17b80c34f6179) -} - -var fileDescriptor_datastore_v3_83b17b80c34f6179 = []byte{ - // 4156 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcd, 0x73, 0xe3, 0x46, - 0x76, 0x37, 0xc1, 0xef, 0x47, 0x89, 0x82, 0x5a, 0xf3, 0xc1, 0xa1, 0x3f, 0x46, 0xc6, 0xac, 0x6d, - 0xd9, 0x6b, 0x73, 0x6c, 0xf9, 0x23, 0x5b, 0x4a, 0x76, 0x1d, 0x4a, 0xc4, 0x68, 0x90, 0xa1, 0x48, - 0xb9, 0x09, 0xd9, 0x9e, 0x5c, 0x50, 0x18, 0xa2, 0x29, 0x21, 0x43, 0x02, 0x30, 0x00, 0x6a, 0x46, - 0x93, 0xe4, 0x90, 0x4b, 0x2a, 0x55, 0x5b, 0xa9, 0x1c, 0x92, 0x4a, 0x25, 0xf9, 0x07, 0x72, 0xc8, - 0x39, 0x95, 0xaa, 0x54, 0xf6, 0x98, 0x5b, 0x0e, 0x7b, 0xc9, 0x31, 0x95, 0x73, 0xf2, 0x27, 0x24, - 0x39, 0xa4, 0xfa, 0x75, 0x03, 0x02, 0x28, 0x4a, 0x23, 0x6d, 0xf6, 0x90, 0x13, 0xd1, 0xef, 0xfd, - 0xba, 0xf1, 0xfa, 0xf5, 0xfb, 0x6c, 0x10, 0xba, 0xc7, 0xbe, 0x7f, 0x3c, 0x65, 0x9d, 0x63, 0x7f, - 0x6a, 0x7b, 0xc7, 0x1d, 0x3f, 0x3c, 0x7e, 0x68, 0x07, 0x01, 0xf3, 0x8e, 0x5d, 0x8f, 0x3d, 0x74, - 0xbd, 0x98, 0x85, 0x9e, 0x3d, 0x7d, 0xe8, 0xd8, 0xb1, 0x1d, 0xc5, 0x7e, 0xc8, 0xce, 0x9f, 0xac, - 0xd3, 0xcf, 0x3b, 0x41, 0xe8, 0xc7, 0x3e, 0xa9, 0xa7, 0x13, 0xb4, 0x1a, 0x54, 0xba, 0xe3, 0xd8, - 0xf5, 0x3d, 0xed, 0x1f, 0x2b, 0xb0, 0x7a, 0x18, 0xfa, 0x01, 0x0b, 0xe3, 0xb3, 0x6f, 0xed, 0xe9, - 0x9c, 0x91, 0x77, 0x00, 0x5c, 0x2f, 0xfe, 0xea, 0x0b, 0x1c, 0xb5, 0x0a, 0x9b, 0x85, 0xad, 0x22, - 0xcd, 0x50, 0x88, 0x06, 0x2b, 0xcf, 0x7c, 0x7f, 0xca, 0x6c, 0x4f, 0x20, 0x94, 0xcd, 0xc2, 0x56, - 0x8d, 0xe6, 0x68, 0x64, 0x13, 0x1a, 0x51, 0x1c, 0xba, 0xde, 0xb1, 0x80, 0x14, 0x37, 0x0b, 0x5b, - 0x75, 0x9a, 0x25, 0x71, 0x84, 0xe3, 0xcf, 0x9f, 0x4d, 0x99, 0x40, 0x94, 0x36, 0x0b, 0x5b, 0x05, - 0x9a, 0x25, 0x91, 0x3d, 0x80, 0xc0, 0x77, 0xbd, 0xf8, 0x14, 0x01, 0xe5, 0xcd, 0xc2, 0x16, 0x6c, - 0x3f, 0xe8, 0xa4, 0x7b, 0xe8, 0xe4, 0xa4, 0xee, 0x1c, 0x72, 0x28, 0x3e, 0xd2, 0xcc, 0x34, 0xf2, - 0xdb, 0x50, 0x9f, 0x47, 0x2c, 0x14, 0x6b, 0xd4, 0x70, 0x0d, 0xed, 0xd2, 0x35, 0x8e, 0x22, 0x16, - 0x8a, 0x25, 0xce, 0x27, 0x91, 0x21, 0x34, 0x43, 0x36, 0x61, 0x21, 0xf3, 0xc6, 0x4c, 0x2c, 0xb3, - 0x82, 0xcb, 0x7c, 0x70, 0xe9, 0x32, 0x34, 0x81, 0x8b, 0xb5, 0x16, 0xa6, 0xb7, 0xb7, 0x00, 0xce, - 0x85, 0x25, 0x2b, 0x50, 0x78, 0xd9, 0xaa, 0x6c, 0x2a, 0x5b, 0x05, 0x5a, 0x78, 0xc9, 0x47, 0x67, - 0xad, 0xaa, 0x18, 0x9d, 0xb5, 0xff, 0xa9, 0x00, 0xf5, 0x54, 0x26, 0x72, 0x0b, 0xca, 0x6c, 0x66, - 0xbb, 0xd3, 0x56, 0x7d, 0x53, 0xd9, 0xaa, 0x53, 0x31, 0x20, 0xf7, 0xa1, 0x61, 0xcf, 0xe3, 0x13, - 0xcb, 0xf1, 0x67, 0xb6, 0xeb, 0xb5, 0x00, 0x79, 0xc0, 0x49, 0x3d, 0xa4, 0x90, 0x36, 0xd4, 0x3c, - 0x77, 0xfc, 0xdc, 0xb3, 0x67, 0xac, 0xd5, 0xc0, 0x73, 0x48, 0xc7, 0xe4, 0x13, 0x20, 0x13, 0xe6, - 0xb0, 0xd0, 0x8e, 0x99, 0x63, 0xb9, 0x0e, 0xf3, 0x62, 0x37, 0x3e, 0x6b, 0xdd, 0x46, 0xd4, 0x7a, - 0xca, 0x31, 0x24, 0x23, 0x0f, 0x0f, 0x42, 0xff, 0xd4, 0x75, 0x58, 0xd8, 0xba, 0xb3, 0x00, 0x3f, - 0x94, 0x8c, 0xf6, 0xbf, 0x17, 0xa0, 0x99, 0xd7, 0x05, 0x51, 0xa1, 0x68, 0x07, 0x41, 0x6b, 0x15, - 0xa5, 0xe4, 0x8f, 0xe4, 0x6d, 0x00, 0x2e, 0x8a, 0x15, 0x05, 0xf6, 0x98, 0xb5, 0x6e, 0xe1, 0x5a, - 0x75, 0x4e, 0x19, 0x71, 0x02, 0x39, 0x82, 0x46, 0x60, 0xc7, 0x27, 0x6c, 0xca, 0x66, 0xcc, 0x8b, - 0x5b, 0xcd, 0xcd, 0xe2, 0x16, 0x6c, 0x7f, 0x7e, 0x4d, 0xd5, 0x77, 0x0e, 0xed, 0xf8, 0x44, 0x17, - 0x53, 0x69, 0x76, 0x9d, 0xb6, 0x0e, 0x8d, 0x0c, 0x8f, 0x10, 0x28, 0xc5, 0x67, 0x01, 0x6b, 0xad, - 0xa1, 0x5c, 0xf8, 0x4c, 0x9a, 0xa0, 0xb8, 0x4e, 0x4b, 0x45, 0xf3, 0x57, 0x5c, 0x87, 0x63, 0x50, - 0x87, 0xeb, 0x28, 0x22, 0x3e, 0x6b, 0xff, 0x51, 0x86, 0x5a, 0x22, 0x00, 0xe9, 0x42, 0x75, 0xc6, - 0x6c, 0xcf, 0xf5, 0x8e, 0xd1, 0x69, 0x9a, 0xdb, 0x6f, 0x2e, 0x11, 0xb3, 0x73, 0x20, 0x20, 0x3b, - 0x30, 0x18, 0x5a, 0x07, 0x7a, 0x77, 0x60, 0x0c, 0xf6, 0x69, 0x32, 0x8f, 0x1f, 0xa6, 0x7c, 0xb4, - 0xe6, 0xa1, 0x8b, 0x9e, 0x55, 0xa7, 0x20, 0x49, 0x47, 0xa1, 0x9b, 0x0a, 0x51, 0x14, 0x82, 0xe2, - 0x21, 0x76, 0xa0, 0x9c, 0xb8, 0x88, 0xb2, 0xd5, 0xd8, 0x6e, 0x5d, 0xa6, 0x1c, 0x2a, 0x60, 0xdc, - 0x20, 0x66, 0xf3, 0x69, 0xec, 0x06, 0x53, 0xee, 0x76, 0xca, 0x56, 0x8d, 0xa6, 0x63, 0xf2, 0x1e, - 0x40, 0xc4, 0xec, 0x70, 0x7c, 0x62, 0x3f, 0x9b, 0xb2, 0x56, 0x85, 0x7b, 0xf6, 0x4e, 0x79, 0x62, - 0x4f, 0x23, 0x46, 0x33, 0x0c, 0x62, 0xc3, 0xdd, 0x49, 0x1c, 0x59, 0xb1, 0xff, 0x9c, 0x79, 0xee, - 0x2b, 0x9b, 0x07, 0x12, 0xcb, 0x0f, 0xf8, 0x0f, 0xfa, 0x58, 0x73, 0xfb, 0xc3, 0x65, 0x5b, 0x7f, - 0x14, 0x47, 0x66, 0x66, 0xc6, 0x10, 0x27, 0xd0, 0xdb, 0x93, 0x65, 0x64, 0xd2, 0x86, 0xca, 0xd4, - 0x1f, 0xdb, 0x53, 0xd6, 0xaa, 0x73, 0x2d, 0xec, 0x28, 0xcc, 0xa3, 0x92, 0xa2, 0xfd, 0xb3, 0x02, - 0x55, 0xa9, 0x47, 0xd2, 0x84, 0x8c, 0x26, 0xd5, 0x37, 0x48, 0x0d, 0x4a, 0xbb, 0xfd, 0xe1, 0xae, - 0xda, 0xe4, 0x4f, 0xa6, 0xfe, 0xbd, 0xa9, 0xae, 0x71, 0xcc, 0xee, 0x53, 0x53, 0x1f, 0x99, 0x94, - 0x63, 0x54, 0xb2, 0x0e, 0xab, 0x5d, 0x73, 0x78, 0x60, 0xed, 0x75, 0x4d, 0x7d, 0x7f, 0x48, 0x9f, - 0xaa, 0x05, 0xb2, 0x0a, 0x75, 0x24, 0xf5, 0x8d, 0xc1, 0x13, 0x55, 0xe1, 0x33, 0x70, 0x68, 0x1a, - 0x66, 0x5f, 0x57, 0x8b, 0x44, 0x85, 0x15, 0x31, 0x63, 0x38, 0x30, 0xf5, 0x81, 0xa9, 0x96, 0x52, - 0xca, 0xe8, 0xe8, 0xe0, 0xa0, 0x4b, 0x9f, 0xaa, 0x65, 0xb2, 0x06, 0x0d, 0xa4, 0x74, 0x8f, 0xcc, - 0xc7, 0x43, 0xaa, 0x56, 0x48, 0x03, 0xaa, 0xfb, 0x3d, 0xeb, 0xbb, 0xc7, 0xfa, 0x40, 0xad, 0x92, - 0x15, 0xa8, 0xed, 0xf7, 0x2c, 0xfd, 0xa0, 0x6b, 0xf4, 0xd5, 0x1a, 0x9f, 0xbd, 0xaf, 0x0f, 0xe9, - 0x68, 0x64, 0x1d, 0x0e, 0x8d, 0x81, 0xa9, 0xd6, 0x49, 0x1d, 0xca, 0xfb, 0x3d, 0xcb, 0x38, 0x50, - 0x81, 0x10, 0x68, 0xee, 0xf7, 0xac, 0xc3, 0xc7, 0xc3, 0x81, 0x3e, 0x38, 0x3a, 0xd8, 0xd5, 0xa9, - 0xda, 0x20, 0xb7, 0x40, 0xe5, 0xb4, 0xe1, 0xc8, 0xec, 0xf6, 0xbb, 0xbd, 0x1e, 0xd5, 0x47, 0x23, - 0x75, 0x85, 0x4b, 0xbd, 0xdf, 0xb3, 0x68, 0xd7, 0xe4, 0xfb, 0x5a, 0xe5, 0x2f, 0xe4, 0x7b, 0x7f, - 0xa2, 0x3f, 0x55, 0xd7, 0xf9, 0x2b, 0xf4, 0x81, 0x69, 0x98, 0x4f, 0xad, 0x43, 0x3a, 0x34, 0x87, - 0xea, 0x06, 0x17, 0xd0, 0x18, 0xf4, 0xf4, 0xef, 0xad, 0x6f, 0xbb, 0xfd, 0x23, 0x5d, 0x25, 0xda, - 0x8f, 0xe1, 0xf6, 0xd2, 0x33, 0xe1, 0xaa, 0x7b, 0x6c, 0x1e, 0xf4, 0xd5, 0x02, 0x7f, 0xe2, 0x9b, - 0x52, 0x15, 0xed, 0x0f, 0xa0, 0xc4, 0x5d, 0x86, 0x7c, 0x06, 0xd5, 0xc4, 0x1b, 0x0b, 0xe8, 0x8d, - 0x77, 0xb3, 0x67, 0x6d, 0xc7, 0x27, 0x9d, 0xc4, 0xe3, 0x12, 0x5c, 0xbb, 0x0b, 0xd5, 0x45, 0x4f, - 0x53, 0x2e, 0x78, 0x5a, 0xf1, 0x82, 0xa7, 0x95, 0x32, 0x9e, 0x66, 0x43, 0x3d, 0xf5, 0xed, 0x9b, - 0x47, 0x91, 0x07, 0x50, 0xe2, 0xde, 0xdf, 0x6a, 0xa2, 0x87, 0xac, 0x2d, 0x08, 0x4c, 0x91, 0xa9, - 0xfd, 0x43, 0x01, 0x4a, 0x3c, 0xda, 0x9e, 0x07, 0xda, 0xc2, 0x15, 0x81, 0x56, 0xb9, 0x32, 0xd0, - 0x16, 0xaf, 0x15, 0x68, 0x2b, 0x37, 0x0b, 0xb4, 0xd5, 0x4b, 0x02, 0xad, 0xf6, 0x67, 0x45, 0x68, - 0xe8, 0x38, 0xf3, 0x10, 0x13, 0xfd, 0xfb, 0x50, 0x7c, 0xce, 0xce, 0x50, 0x3f, 0x8d, 0xed, 0x5b, - 0x99, 0xdd, 0xa6, 0x2a, 0xa4, 0x1c, 0x40, 0xb6, 0x61, 0x45, 0xbc, 0xd0, 0x3a, 0x0e, 0xfd, 0x79, - 0xd0, 0x52, 0x97, 0xab, 0xa7, 0x21, 0x40, 0xfb, 0x1c, 0x43, 0xde, 0x83, 0xb2, 0xff, 0xc2, 0x63, - 0x21, 0xc6, 0xc1, 0x3c, 0x98, 0x2b, 0x8f, 0x0a, 0x2e, 0x79, 0x08, 0xa5, 0xe7, 0xae, 0xe7, 0xe0, - 0x19, 0xe6, 0x23, 0x61, 0x46, 0xd0, 0xce, 0x13, 0xd7, 0x73, 0x28, 0x02, 0xc9, 0x3d, 0xa8, 0xf1, - 0x5f, 0x8c, 0x7b, 0x65, 0xdc, 0x68, 0x95, 0x8f, 0x79, 0xd0, 0x7b, 0x08, 0xb5, 0x40, 0xc6, 0x10, - 0x4c, 0x00, 0x8d, 0xed, 0x8d, 0x25, 0xe1, 0x85, 0xa6, 0x20, 0xf2, 0x15, 0xac, 0x84, 0xf6, 0x0b, - 0x2b, 0x9d, 0xb4, 0x76, 0xf9, 0xa4, 0x46, 0x68, 0xbf, 0x48, 0x23, 0x38, 0x81, 0x52, 0x68, 0x7b, - 0xcf, 0x5b, 0x64, 0xb3, 0xb0, 0x55, 0xa6, 0xf8, 0xac, 0x7d, 0x01, 0x25, 0x2e, 0x25, 0x8f, 0x08, - 0xfb, 0x3d, 0xf4, 0xff, 0xee, 0x9e, 0xa9, 0x16, 0x12, 0x7f, 0xfe, 0x96, 0x47, 0x03, 0x45, 0x72, - 0x0f, 0xf4, 0xd1, 0xa8, 0xbb, 0xaf, 0xab, 0x45, 0xad, 0x07, 0xeb, 0x7b, 0xfe, 0x2c, 0xf0, 0x23, - 0x37, 0x66, 0xe9, 0xf2, 0xf7, 0xa0, 0xe6, 0x7a, 0x0e, 0x7b, 0x69, 0xb9, 0x0e, 0x9a, 0x56, 0x91, - 0x56, 0x71, 0x6c, 0x38, 0xdc, 0xe4, 0x4e, 0x65, 0x31, 0x55, 0xe4, 0x26, 0x87, 0x03, 0xed, 0x2f, - 0x15, 0x28, 0x1b, 0x1c, 0xc1, 0x8d, 0x4f, 0x9e, 0x14, 0x7a, 0x8f, 0x30, 0x4c, 0x10, 0x24, 0x93, - 0xfb, 0x50, 0x1b, 0x6a, 0xb6, 0x37, 0x66, 0xbc, 0xe2, 0xc3, 0x3c, 0x50, 0xa3, 0xe9, 0x98, 0x7c, - 0x99, 0xd1, 0x9f, 0x82, 0x2e, 0x7b, 0x2f, 0xa3, 0x0a, 0x7c, 0xc1, 0x12, 0x2d, 0xb6, 0xff, 0xaa, - 0x90, 0x49, 0x6e, 0xcb, 0x12, 0x4f, 0x1f, 0xea, 0x8e, 0x1b, 0x32, 0xac, 0x23, 0xe5, 0x41, 0x3f, - 0xb8, 0x74, 0xe1, 0x4e, 0x2f, 0x81, 0xee, 0xd4, 0xbb, 0xa3, 0x3d, 0x7d, 0xd0, 0xe3, 0x99, 0xef, - 0x7c, 0x01, 0xed, 0x23, 0xa8, 0xa7, 0x10, 0x0c, 0xc7, 0x09, 0x48, 0x2d, 0x70, 0xf5, 0xf6, 0xf4, - 0x74, 0xac, 0x68, 0x7f, 0xad, 0x40, 0x33, 0xd5, 0xaf, 0xd0, 0xd0, 0x6d, 0xa8, 0xd8, 0x41, 0x90, - 0xa8, 0xb6, 0x4e, 0xcb, 0x76, 0x10, 0x18, 0x8e, 0x8c, 0x2d, 0x0a, 0x6a, 0x9b, 0xc7, 0x96, 0x4f, - 0x01, 0x1c, 0x36, 0x71, 0x3d, 0x17, 0x85, 0x2e, 0xa2, 0xc1, 0xab, 0x8b, 0x42, 0xd3, 0x0c, 0x86, - 0x7c, 0x09, 0xe5, 0x28, 0xb6, 0x63, 0x91, 0x2b, 0x9b, 0xdb, 0xf7, 0x33, 0xe0, 0xbc, 0x08, 0x9d, - 0x11, 0x87, 0x51, 0x81, 0x26, 0x5f, 0xc1, 0x2d, 0xdf, 0x9b, 0x9e, 0x59, 0xf3, 0x88, 0x59, 0xee, - 0xc4, 0x0a, 0xd9, 0x0f, 0x73, 0x37, 0x64, 0x4e, 0x3e, 0xa7, 0xae, 0x73, 0xc8, 0x51, 0xc4, 0x8c, - 0x09, 0x95, 0x7c, 0xed, 0x6b, 0x28, 0xe3, 0x3a, 0x7c, 0xcf, 0xdf, 0x51, 0xc3, 0xd4, 0xad, 0xe1, - 0xa0, 0xff, 0x54, 0xe8, 0x80, 0xea, 0xdd, 0x9e, 0x85, 0x44, 0x55, 0xe1, 0xc1, 0xbe, 0xa7, 0xf7, - 0x75, 0x53, 0xef, 0xa9, 0x45, 0x9e, 0x3d, 0x74, 0x4a, 0x87, 0x54, 0x2d, 0x69, 0xff, 0x53, 0x80, - 0x15, 0x94, 0xe7, 0xd0, 0x8f, 0xe2, 0x89, 0xfb, 0x92, 0xec, 0x41, 0x43, 0x98, 0xdd, 0xa9, 0x2c, - 0xe8, 0xb9, 0x33, 0x68, 0x8b, 0x7b, 0x96, 0x68, 0x31, 0x90, 0x75, 0xb4, 0x9b, 0x3e, 0x27, 0x21, - 0x45, 0x41, 0xa7, 0xbf, 0x22, 0xa4, 0xbc, 0x05, 0x95, 0x67, 0x6c, 0xe2, 0x87, 0x22, 0x04, 0xd6, - 0x76, 0x4a, 0x71, 0x38, 0x67, 0x54, 0xd2, 0xda, 0x36, 0xc0, 0xf9, 0xfa, 0xe4, 0x01, 0xac, 0x26, - 0xc6, 0x66, 0xa1, 0x71, 0x89, 0x93, 0x5b, 0x49, 0x88, 0x83, 0x5c, 0x75, 0xa3, 0x5c, 0xab, 0xba, - 0xd1, 0xbe, 0x86, 0xd5, 0x64, 0x3f, 0xe2, 0xfc, 0x54, 0x21, 0x79, 0x01, 0x63, 0xca, 0x82, 0x8c, - 0xca, 0x45, 0x19, 0xb5, 0x9f, 0x41, 0x6d, 0xe4, 0xd9, 0x41, 0x74, 0xe2, 0xc7, 0xdc, 0x7a, 0xe2, - 0x48, 0xfa, 0xaa, 0x12, 0x47, 0x9a, 0x06, 0x15, 0x7e, 0x38, 0xf3, 0x88, 0xbb, 0xbf, 0x31, 0xe8, - 0xee, 0x99, 0xc6, 0xb7, 0xba, 0xfa, 0x06, 0x01, 0xa8, 0xc8, 0xe7, 0x82, 0xa6, 0x41, 0xd3, 0x90, - 0xed, 0xd8, 0x63, 0x66, 0x3b, 0x2c, 0xe4, 0x12, 0xfc, 0xe0, 0x47, 0x89, 0x04, 0x3f, 0xf8, 0x91, - 0xf6, 0x17, 0x05, 0x68, 0x98, 0xa1, 0xed, 0x45, 0xb6, 0x30, 0xf7, 0xcf, 0xa0, 0x72, 0x82, 0x58, - 0x74, 0xa3, 0xc6, 0x82, 0x7f, 0x66, 0x17, 0xa3, 0x12, 0x48, 0xee, 0x40, 0xe5, 0xc4, 0xf6, 0x9c, - 0xa9, 0xd0, 0x5a, 0x85, 0xca, 0x51, 0x92, 0x1b, 0x95, 0xf3, 0xdc, 0xb8, 0x05, 0x2b, 0x33, 0x3b, - 0x7c, 0x6e, 0x8d, 0x4f, 0x6c, 0xef, 0x98, 0x45, 0xf2, 0x60, 0xa4, 0x05, 0x36, 0x38, 0x6b, 0x4f, - 0x70, 0xb4, 0xbf, 0x5f, 0x81, 0xf2, 0x37, 0x73, 0x16, 0x9e, 0x65, 0x04, 0xfa, 0xe0, 0xba, 0x02, - 0xc9, 0x17, 0x17, 0x2e, 0x4b, 0xca, 0x6f, 0x2f, 0x26, 0x65, 0x22, 0x53, 0x84, 0xc8, 0x95, 0x22, - 0x0b, 0x7c, 0x9a, 0x09, 0x63, 0xeb, 0x57, 0xd8, 0xda, 0x79, 0x70, 0x7b, 0x08, 0x95, 0x89, 0x3b, - 0x8d, 0x51, 0x75, 0x8b, 0xd5, 0x08, 0xee, 0xa5, 0xf3, 0x08, 0xd9, 0x54, 0xc2, 0xc8, 0xbb, 0xb0, - 0x22, 0x2a, 0x59, 0xeb, 0x07, 0xce, 0xc6, 0x82, 0x95, 0xf7, 0xa6, 0x48, 0x13, 0xbb, 0xff, 0x18, - 0xca, 0x7e, 0xc8, 0x37, 0x5f, 0xc7, 0x25, 0xef, 0x5c, 0x58, 0x72, 0xc8, 0xb9, 0x54, 0x80, 0xc8, - 0x87, 0x50, 0x3a, 0x71, 0xbd, 0x18, 0xb3, 0x46, 0x73, 0xfb, 0xf6, 0x05, 0xf0, 0x63, 0xd7, 0x8b, - 0x29, 0x42, 0x78, 0x98, 0x1f, 0xfb, 0x73, 0x2f, 0x6e, 0xdd, 0xc5, 0x0c, 0x23, 0x06, 0xe4, 0x1e, - 0x54, 0xfc, 0xc9, 0x24, 0x62, 0x31, 0x76, 0x96, 0xe5, 0x9d, 0xc2, 0xa7, 0x54, 0x12, 0xf8, 0x84, - 0xa9, 0x3b, 0x73, 0x63, 0xec, 0x43, 0xca, 0x54, 0x0c, 0xc8, 0x2e, 0xac, 0x8d, 0xfd, 0x59, 0xe0, - 0x4e, 0x99, 0x63, 0x8d, 0xe7, 0x61, 0xe4, 0x87, 0xad, 0x77, 0x2e, 0x1c, 0xd3, 0x9e, 0x44, 0xec, - 0x21, 0x80, 0x36, 0xc7, 0xb9, 0x31, 0x31, 0x60, 0x83, 0x79, 0x8e, 0xb5, 0xb8, 0xce, 0xfd, 0xd7, - 0xad, 0xb3, 0xce, 0x3c, 0x27, 0x4f, 0x4a, 0xc4, 0xc1, 0x48, 0x68, 0x61, 0xcc, 0x68, 0x6d, 0x60, - 0x90, 0xb9, 0x77, 0x69, 0xac, 0x14, 0xe2, 0x64, 0xc2, 0xf7, 0x6f, 0xc0, 0x2d, 0x19, 0x22, 0xad, - 0x80, 0x85, 0x13, 0x36, 0x8e, 0xad, 0x60, 0x6a, 0x7b, 0x58, 0xca, 0xa5, 0xc6, 0x4a, 0x24, 0xe4, - 0x50, 0x20, 0x0e, 0xa7, 0xb6, 0x47, 0x34, 0xa8, 0x3f, 0x67, 0x67, 0x91, 0xc5, 0x23, 0x29, 0x76, - 0xae, 0x29, 0xba, 0xc6, 0xe9, 0x43, 0x6f, 0x7a, 0x46, 0x7e, 0x02, 0x8d, 0xf8, 0xdc, 0xdb, 0xb0, - 0x61, 0x6d, 0xe4, 0x4e, 0x35, 0xe3, 0x8b, 0x34, 0x0b, 0x25, 0xf7, 0xa1, 0x2a, 0x35, 0xd4, 0xba, - 0x97, 0x5d, 0x3b, 0xa1, 0xf2, 0xc4, 0x3c, 0xb1, 0xdd, 0xa9, 0x7f, 0xca, 0x42, 0x6b, 0x16, 0xb5, - 0xda, 0xe2, 0xb6, 0x24, 0x21, 0x1d, 0x44, 0xdc, 0x4f, 0xa3, 0x38, 0xf4, 0xbd, 0xe3, 0xd6, 0x26, - 0xde, 0x93, 0xc8, 0xd1, 0xc5, 0xe0, 0xf7, 0x2e, 0x66, 0xfe, 0x7c, 0xf0, 0xfb, 0x1c, 0xee, 0x60, - 0x65, 0x66, 0x3d, 0x3b, 0xb3, 0xf2, 0x68, 0x0d, 0xd1, 0x1b, 0xc8, 0xdd, 0x3d, 0x3b, 0xcc, 0x4e, - 0x6a, 0x43, 0xcd, 0x71, 0xa3, 0xd8, 0xf5, 0xc6, 0x71, 0xab, 0x85, 0xef, 0x4c, 0xc7, 0xe4, 0x33, - 0xb8, 0x3d, 0x73, 0x3d, 0x2b, 0xb2, 0x27, 0xcc, 0x8a, 0x5d, 0xee, 0x9b, 0x6c, 0xec, 0x7b, 0x4e, - 0xd4, 0x7a, 0x80, 0x82, 0x93, 0x99, 0xeb, 0x8d, 0xec, 0x09, 0x33, 0xdd, 0x19, 0x1b, 0x09, 0x0e, - 0xf9, 0x08, 0xd6, 0x11, 0x1e, 0xb2, 0x60, 0xea, 0x8e, 0x6d, 0xf1, 0xfa, 0x1f, 0xe1, 0xeb, 0xd7, - 0x38, 0x83, 0x0a, 0x3a, 0xbe, 0xfa, 0x63, 0x68, 0x06, 0x2c, 0x8c, 0xdc, 0x28, 0xb6, 0xa4, 0x45, - 0xbf, 0x97, 0xd5, 0xda, 0xaa, 0x64, 0x0e, 0x91, 0xd7, 0xfe, 0xcf, 0x02, 0x54, 0x84, 0x73, 0x92, - 0x4f, 0x41, 0xf1, 0x03, 0xbc, 0x06, 0x69, 0x6e, 0x6f, 0x5e, 0xe2, 0xc1, 0x9d, 0x61, 0xc0, 0xeb, - 0x5e, 0x3f, 0xa4, 0x8a, 0x1f, 0xdc, 0xb8, 0x28, 0xd4, 0xfe, 0x10, 0x6a, 0xc9, 0x02, 0xbc, 0xbc, - 0xe8, 0xeb, 0xa3, 0x91, 0x65, 0x3e, 0xee, 0x0e, 0xd4, 0x02, 0xb9, 0x03, 0x24, 0x1d, 0x5a, 0x43, - 0x6a, 0xe9, 0xdf, 0x1c, 0x75, 0xfb, 0xaa, 0x82, 0x5d, 0x1a, 0xd5, 0xbb, 0xa6, 0x4e, 0x05, 0xb2, - 0x48, 0xee, 0xc1, 0xed, 0x2c, 0xe5, 0x1c, 0x5c, 0xc2, 0x14, 0x8c, 0x8f, 0x65, 0x52, 0x01, 0xc5, - 0x18, 0xa8, 0x15, 0x9e, 0x16, 0xf4, 0xef, 0x8d, 0x91, 0x39, 0x52, 0xab, 0xed, 0xbf, 0x29, 0x40, - 0x19, 0xc3, 0x06, 0x3f, 0x9f, 0x54, 0x72, 0x71, 0x5d, 0x73, 0x5e, 0xb9, 0x1a, 0xd9, 0x92, 0xaa, - 0x81, 0x01, 0x65, 0x73, 0x79, 0xf4, 0xf9, 0xb5, 0xd6, 0x53, 0x3f, 0x85, 0x12, 0x8f, 0x52, 0xbc, - 0x43, 0x1c, 0xd2, 0x9e, 0x4e, 0xad, 0x47, 0x06, 0x1d, 0xf1, 0x2a, 0x97, 0x40, 0xb3, 0x3b, 0xd8, - 0xd3, 0x47, 0xe6, 0x30, 0xa1, 0xa1, 0x56, 0x1e, 0x19, 0x7d, 0x33, 0x45, 0x15, 0xb5, 0x9f, 0xd7, - 0x60, 0x35, 0x89, 0x09, 0x22, 0x82, 0x3e, 0x82, 0x46, 0x10, 0xba, 0x33, 0x3b, 0x3c, 0x8b, 0xc6, - 0xb6, 0x87, 0x49, 0x01, 0xb6, 0x7f, 0xb4, 0x24, 0xaa, 0x88, 0x1d, 0x1d, 0x0a, 0xec, 0x68, 0x6c, - 0x7b, 0x34, 0x3b, 0x91, 0xf4, 0x61, 0x75, 0xc6, 0xc2, 0x63, 0xf6, 0x7b, 0xbe, 0xeb, 0xe1, 0x4a, - 0x55, 0x8c, 0xc8, 0xef, 0x5f, 0xba, 0xd2, 0x01, 0x47, 0xff, 0x8e, 0xef, 0x7a, 0xb8, 0x56, 0x7e, - 0x32, 0xf9, 0x04, 0xea, 0xa2, 0x12, 0x72, 0xd8, 0x04, 0x63, 0xc5, 0xb2, 0xda, 0x4f, 0xd4, 0xe8, - 0x3d, 0x36, 0xc9, 0xc4, 0x65, 0xb8, 0x34, 0x2e, 0x37, 0xb2, 0x71, 0xf9, 0xcd, 0x6c, 0x2c, 0x5a, - 0x11, 0x55, 0x78, 0x1a, 0x84, 0x2e, 0x38, 0x7c, 0x6b, 0x89, 0xc3, 0x77, 0x60, 0x23, 0xf1, 0x55, - 0xcb, 0xf5, 0x26, 0xee, 0x4b, 0x2b, 0x72, 0x5f, 0x89, 0xd8, 0x53, 0xa6, 0xeb, 0x09, 0xcb, 0xe0, - 0x9c, 0x91, 0xfb, 0x8a, 0x11, 0x23, 0xe9, 0xe0, 0x64, 0x0e, 0x5c, 0xc5, 0xab, 0xc9, 0xf7, 0x2e, - 0x55, 0x8f, 0x68, 0xbe, 0x64, 0x46, 0xcc, 0x4d, 0x6d, 0xff, 0x52, 0x81, 0x46, 0xe6, 0x1c, 0x78, - 0xf6, 0x16, 0xca, 0x42, 0x61, 0xc5, 0x55, 0x94, 0x50, 0x1f, 0x4a, 0xfa, 0x26, 0xd4, 0xa3, 0xd8, - 0x0e, 0x63, 0x8b, 0x17, 0x57, 0xb2, 0xdd, 0x45, 0xc2, 0x13, 0x76, 0x46, 0x3e, 0x80, 0x35, 0xc1, - 0x74, 0xbd, 0xf1, 0x74, 0x1e, 0xb9, 0xa7, 0xa2, 0x99, 0xaf, 0xd1, 0x26, 0x92, 0x8d, 0x84, 0x4a, - 0xee, 0x42, 0x95, 0x67, 0x21, 0xbe, 0x86, 0x68, 0xfa, 0x2a, 0xcc, 0x73, 0xf8, 0x0a, 0x0f, 0x60, - 0x95, 0x33, 0xce, 0xe7, 0x57, 0xc4, 0x2d, 0x33, 0xf3, 0x9c, 0xf3, 0xd9, 0x1d, 0xd8, 0x10, 0xaf, - 0x09, 0x44, 0xf1, 0x2a, 0x2b, 0xdc, 0x3b, 0xa8, 0xd8, 0x75, 0x64, 0xc9, 0xb2, 0x56, 0x14, 0x9c, - 0x1f, 0x01, 0xcf, 0x5e, 0x0b, 0xe8, 0xbb, 0x22, 0x94, 0x31, 0xcf, 0xc9, 0x61, 0x77, 0xe1, 0x1d, - 0x8e, 0x9d, 0x7b, 0x76, 0x10, 0x4c, 0x5d, 0xe6, 0x58, 0x53, 0xff, 0x18, 0x43, 0x66, 0x14, 0xdb, - 0xb3, 0xc0, 0x9a, 0x47, 0xad, 0x0d, 0x0c, 0x99, 0x6d, 0xe6, 0x39, 0x47, 0x09, 0xa8, 0xef, 0x1f, - 0x9b, 0x09, 0xe4, 0x28, 0x6a, 0xff, 0x3e, 0xac, 0xe6, 0xec, 0x71, 0x41, 0xa7, 0x35, 0x74, 0xfe, - 0x8c, 0x4e, 0xdf, 0x85, 0x95, 0x20, 0x64, 0xe7, 0xa2, 0xd5, 0x51, 0xb4, 0x86, 0xa0, 0x09, 0xb1, - 0xb6, 0x60, 0x05, 0x79, 0x96, 0x20, 0xe6, 0xf3, 0x63, 0x03, 0x59, 0x87, 0xc8, 0x69, 0xbf, 0x80, - 0x95, 0xec, 0x69, 0x93, 0x77, 0x33, 0x69, 0xa1, 0x99, 0xcb, 0x93, 0x69, 0x76, 0x48, 0x2a, 0xb2, - 0xf5, 0x4b, 0x2a, 0x32, 0x72, 0x9d, 0x8a, 0x4c, 0xfb, 0x2f, 0xd9, 0x9c, 0x65, 0x2a, 0x84, 0x9f, - 0x41, 0x2d, 0x90, 0xf5, 0x38, 0x5a, 0x52, 0xfe, 0x12, 0x3e, 0x0f, 0xee, 0x24, 0x95, 0x3b, 0x4d, - 0xe7, 0xb4, 0xff, 0x56, 0x81, 0x5a, 0x5a, 0xd0, 0xe7, 0x2c, 0xef, 0xcd, 0x05, 0xcb, 0x3b, 0x90, - 0x1a, 0x16, 0x0a, 0x7c, 0x1b, 0xa3, 0xc5, 0x27, 0xaf, 0x7f, 0xd7, 0xc5, 0xb6, 0xe7, 0x34, 0xdb, - 0xf6, 0x6c, 0xbe, 0xae, 0xed, 0xf9, 0xe4, 0xa2, 0xc1, 0xbf, 0x95, 0xe9, 0x2d, 0x16, 0xcc, 0xbe, - 0xfd, 0x7d, 0xae, 0x0f, 0xca, 0x26, 0x84, 0x77, 0xc4, 0x7e, 0xd2, 0x84, 0x90, 0xb6, 0x3f, 0xf7, - 0xaf, 0xd7, 0xfe, 0x6c, 0x43, 0x45, 0xea, 0xfc, 0x0e, 0x54, 0x64, 0x4d, 0x27, 0x1b, 0x04, 0x31, - 0x3a, 0x6f, 0x10, 0x0a, 0xb2, 0x4e, 0xd7, 0x7e, 0xae, 0x40, 0x59, 0x0f, 0x43, 0x3f, 0xd4, 0xfe, - 0x48, 0x81, 0x3a, 0x3e, 0xed, 0xf9, 0x0e, 0xe3, 0xd9, 0x60, 0xb7, 0xdb, 0xb3, 0xa8, 0xfe, 0xcd, - 0x91, 0x8e, 0xd9, 0xa0, 0x0d, 0x77, 0xf6, 0x86, 0x83, 0xbd, 0x23, 0x4a, 0xf5, 0x81, 0x69, 0x99, - 0xb4, 0x3b, 0x18, 0xf1, 0xb6, 0x67, 0x38, 0x50, 0x15, 0x9e, 0x29, 0x8c, 0x81, 0xa9, 0xd3, 0x41, - 0xb7, 0x6f, 0x89, 0x56, 0xb4, 0x88, 0x77, 0xb3, 0xba, 0xde, 0xb3, 0xf0, 0xd6, 0x51, 0x2d, 0xf1, - 0x96, 0xd5, 0x34, 0x0e, 0xf4, 0xe1, 0x91, 0xa9, 0x96, 0xc9, 0x6d, 0x58, 0x3f, 0xd4, 0xe9, 0x81, - 0x31, 0x1a, 0x19, 0xc3, 0x81, 0xd5, 0xd3, 0x07, 0x86, 0xde, 0x53, 0x2b, 0x7c, 0x9d, 0x5d, 0x63, - 0xdf, 0xec, 0xee, 0xf6, 0x75, 0xb9, 0x4e, 0x95, 0x6c, 0xc2, 0x5b, 0x7b, 0xc3, 0x83, 0x03, 0xc3, - 0x34, 0xf5, 0x9e, 0xb5, 0x7b, 0x64, 0x5a, 0x23, 0xd3, 0xe8, 0xf7, 0xad, 0xee, 0xe1, 0x61, 0xff, - 0x29, 0x4f, 0x60, 0x35, 0x72, 0x17, 0x36, 0xf6, 0xba, 0x87, 0xdd, 0x5d, 0xa3, 0x6f, 0x98, 0x4f, - 0xad, 0x9e, 0x31, 0xe2, 0xf3, 0x7b, 0x6a, 0x9d, 0x27, 0x6c, 0x93, 0x3e, 0xb5, 0xba, 0x7d, 0x14, - 0xcd, 0xd4, 0xad, 0xdd, 0xee, 0xde, 0x13, 0x7d, 0xd0, 0x53, 0x81, 0x0b, 0x30, 0xea, 0x3e, 0xd2, - 0x2d, 0x2e, 0x92, 0x65, 0x0e, 0x87, 0xd6, 0xb0, 0xdf, 0x53, 0x1b, 0xda, 0xbf, 0x14, 0xa1, 0xb4, - 0xe7, 0x47, 0x31, 0xf7, 0x46, 0xe1, 0xac, 0x2f, 0x42, 0x37, 0x66, 0xa2, 0x7f, 0x2b, 0x53, 0xd1, - 0x4b, 0x7f, 0x87, 0x24, 0x1e, 0x50, 0x32, 0x10, 0xeb, 0xd9, 0x19, 0xc7, 0x29, 0x88, 0x5b, 0x3b, - 0xc7, 0xed, 0x72, 0xb2, 0x88, 0x68, 0x78, 0x85, 0x23, 0xd7, 0x2b, 0x22, 0x4e, 0x06, 0x61, 0xb9, - 0xe0, 0xc7, 0x40, 0xb2, 0x20, 0xb9, 0x62, 0x09, 0x91, 0x6a, 0x06, 0x29, 0x96, 0xdc, 0x01, 0x18, - 0xfb, 0xb3, 0x99, 0x1b, 0x8f, 0xfd, 0x28, 0x96, 0x5f, 0xc8, 0xda, 0x39, 0x63, 0x8f, 0x62, 0x6e, - 0xf1, 0x33, 0x37, 0xe6, 0x8f, 0x34, 0x83, 0x26, 0x3b, 0x70, 0xcf, 0x0e, 0x82, 0xd0, 0x7f, 0xe9, - 0xce, 0xec, 0x98, 0x59, 0xdc, 0x73, 0xed, 0x63, 0x66, 0x39, 0x6c, 0x1a, 0xdb, 0xd8, 0x13, 0x95, - 0xe9, 0xdd, 0x0c, 0x60, 0x24, 0xf8, 0x3d, 0xce, 0xe6, 0x71, 0xd7, 0x75, 0xac, 0x88, 0xfd, 0x30, - 0xe7, 0x1e, 0x60, 0xcd, 0x03, 0xc7, 0xe6, 0x62, 0xd6, 0x45, 0x96, 0x72, 0x9d, 0x91, 0xe4, 0x1c, - 0x09, 0x46, 0xfb, 0x15, 0xc0, 0xb9, 0x14, 0x64, 0x1b, 0x6e, 0xf3, 0x3a, 0x9e, 0x45, 0x31, 0x73, - 0x2c, 0xb9, 0xdb, 0x60, 0x1e, 0x47, 0x18, 0xe2, 0xcb, 0x74, 0x23, 0x65, 0xca, 0x9b, 0xc2, 0x79, - 0x1c, 0x91, 0x9f, 0x40, 0xeb, 0xc2, 0x1c, 0x87, 0x4d, 0x19, 0x7f, 0x6d, 0x15, 0xa7, 0xdd, 0x59, - 0x98, 0xd6, 0x13, 0x5c, 0xed, 0x4f, 0x14, 0x80, 0x7d, 0x16, 0x53, 0xc1, 0xcd, 0x34, 0xb6, 0x95, - 0xeb, 0x36, 0xb6, 0xef, 0x27, 0x17, 0x08, 0xc5, 0xab, 0x63, 0xc0, 0x42, 0x97, 0xa1, 0xdc, 0xa4, - 0xcb, 0xc8, 0x35, 0x11, 0xc5, 0x2b, 0x9a, 0x88, 0x52, 0xae, 0x89, 0xf8, 0x18, 0x9a, 0xf6, 0x74, - 0xea, 0xbf, 0xe0, 0x05, 0x0d, 0x0b, 0x43, 0xe6, 0xa0, 0x11, 0x9c, 0xd7, 0xdb, 0xc8, 0xec, 0x49, - 0x9e, 0xf6, 0xe7, 0x0a, 0x34, 0x50, 0x15, 0x51, 0xe0, 0x7b, 0x11, 0x23, 0x5f, 0x42, 0x45, 0x5e, - 0x44, 0x8b, 0x8b, 0xfc, 0xb7, 0x33, 0xb2, 0x66, 0x70, 0xb2, 0x68, 0xa0, 0x12, 0xcc, 0x33, 0x42, - 0xe6, 0x75, 0x97, 0x2b, 0x25, 0x45, 0x91, 0xfb, 0x50, 0x73, 0x3d, 0x4b, 0xb4, 0xd4, 0x95, 0x4c, - 0x58, 0xac, 0xba, 0x1e, 0xd6, 0xb2, 0xed, 0x57, 0x50, 0x11, 0x2f, 0x21, 0x9d, 0x54, 0xa6, 0x8b, - 0xfa, 0xcb, 0xdc, 0x1c, 0xa7, 0xc2, 0xc8, 0xc3, 0x29, 0xbd, 0x2e, 0x40, 0xb7, 0xa0, 0x7a, 0xca, - 0x9b, 0x0f, 0xbc, 0xf4, 0xe3, 0xea, 0x4d, 0x86, 0xda, 0x1f, 0x97, 0x00, 0x0e, 0xe7, 0x4b, 0x0c, - 0xa4, 0x71, 0x5d, 0x03, 0xe9, 0xe4, 0xf4, 0xf8, 0x7a, 0x99, 0x7f, 0x75, 0x43, 0x59, 0xd2, 0x69, - 0x17, 0x6f, 0xda, 0x69, 0xdf, 0x87, 0x6a, 0x1c, 0xce, 0xb9, 0xa3, 0x08, 0x63, 0x4a, 0x5b, 0x5a, - 0x49, 0x25, 0x6f, 0x42, 0x79, 0xe2, 0x87, 0x63, 0x86, 0x8e, 0x95, 0xb2, 0x05, 0xed, 0xc2, 0x65, - 0x52, 0xed, 0xb2, 0xcb, 0x24, 0xde, 0xa0, 0x45, 0xf2, 0x1e, 0x0d, 0x0b, 0x99, 0x7c, 0x83, 0x96, - 0x5c, 0xb1, 0xd1, 0x14, 0x44, 0xbe, 0x81, 0xa6, 0x3d, 0x8f, 0x7d, 0xcb, 0xe5, 0x15, 0xda, 0xd4, - 0x1d, 0x9f, 0x61, 0xd9, 0xdd, 0xcc, 0x7f, 0xaf, 0x4f, 0x0f, 0xaa, 0xd3, 0x9d, 0xc7, 0xbe, 0xe1, - 0x1c, 0x22, 0x72, 0xa7, 0x2a, 0x93, 0x12, 0x5d, 0xb1, 0x33, 0x64, 0xed, 0xc7, 0xb0, 0x92, 0x85, - 0xf1, 0x04, 0x24, 0x81, 0xea, 0x1b, 0x3c, 0x3b, 0x8d, 0x78, 0x6a, 0x1b, 0x98, 0x46, 0xb7, 0xaf, - 0x16, 0xb4, 0x18, 0x1a, 0xb8, 0xbc, 0xf4, 0x8e, 0xeb, 0xba, 0xfd, 0x03, 0x28, 0x61, 0xf8, 0x55, - 0x2e, 0x7c, 0x0f, 0xc1, 0x98, 0x8b, 0xcc, 0xbc, 0xf9, 0x15, 0xb3, 0xe6, 0xf7, 0xdf, 0x05, 0x58, - 0x31, 0xfd, 0xf9, 0xf8, 0xe4, 0xa2, 0x01, 0xc2, 0xaf, 0x3b, 0x42, 0x2d, 0x31, 0x1f, 0xe5, 0xa6, - 0xe6, 0x93, 0x5a, 0x47, 0x71, 0x89, 0x75, 0xdc, 0xf4, 0xcc, 0xb5, 0x2f, 0x60, 0x55, 0x6e, 0x5e, - 0x6a, 0x3d, 0xd1, 0x66, 0xe1, 0x0a, 0x6d, 0x6a, 0xbf, 0x50, 0x60, 0x55, 0xc4, 0xf7, 0xff, 0xbb, - 0xd2, 0x2a, 0x37, 0x0c, 0xeb, 0xe5, 0x1b, 0x5d, 0x1e, 0xfd, 0xbf, 0xf4, 0x34, 0x6d, 0x08, 0xcd, - 0x44, 0x7d, 0x37, 0x50, 0xfb, 0x15, 0x46, 0xfc, 0x8b, 0x02, 0x34, 0x06, 0xec, 0xe5, 0x92, 0x20, - 0x5a, 0xbe, 0xee, 0x71, 0x7c, 0x98, 0x2b, 0x57, 0x1b, 0xdb, 0xeb, 0x59, 0x19, 0xc4, 0xd5, 0x63, - 0x52, 0xc1, 0xa6, 0xb7, 0xa8, 0xca, 0xf2, 0x5b, 0xd4, 0xd2, 0x62, 0xb7, 0x9e, 0xb9, 0xc5, 0x2b, - 0x2e, 0xbb, 0xc5, 0xd3, 0xfe, 0xad, 0x08, 0x0d, 0x6c, 0x90, 0x29, 0x8b, 0xe6, 0xd3, 0x38, 0x27, - 0x4c, 0xe1, 0x6a, 0x61, 0x3a, 0x50, 0x09, 0x71, 0x92, 0x74, 0xa5, 0x4b, 0x83, 0xbf, 0x40, 0x61, - 0x6b, 0xfc, 0xdc, 0x0d, 0x02, 0xe6, 0x58, 0x82, 0x92, 0x14, 0x30, 0x4d, 0x49, 0x16, 0x22, 0x44, - 0xbc, 0xfc, 0x9c, 0xf9, 0x21, 0x4b, 0x51, 0x45, 0xbc, 0x4f, 0x68, 0x70, 0x5a, 0x02, 0xc9, 0xdd, - 0x37, 0x88, 0xca, 0xe0, 0xfc, 0xbe, 0x21, 0xed, 0x35, 0x91, 0x5b, 0x47, 0xae, 0xe8, 0x35, 0x91, - 0xcd, 0xbb, 0xa8, 0x99, 0x3d, 0x9d, 0x5a, 0x7e, 0x10, 0xa1, 0xd3, 0xd4, 0x68, 0x0d, 0x09, 0xc3, - 0x20, 0x22, 0x5f, 0x43, 0x7a, 0x5d, 0x2c, 0x6f, 0xc9, 0xc5, 0x39, 0xb6, 0x2e, 0xbb, 0x58, 0xa0, - 0xab, 0xe3, 0xdc, 0xfd, 0xcf, 0x92, 0x1b, 0xea, 0xca, 0x4d, 0x6f, 0xa8, 0x1f, 0x42, 0x59, 0xc4, - 0xa8, 0xda, 0xeb, 0x62, 0x94, 0xc0, 0x65, 0xed, 0xb3, 0x91, 0xb7, 0xcf, 0x5f, 0x16, 0x80, 0x74, - 0xa7, 0x53, 0x7f, 0x6c, 0xc7, 0xcc, 0x70, 0xa2, 0x8b, 0x66, 0x7a, 0xed, 0xcf, 0x2e, 0x9f, 0x41, - 0x7d, 0xe6, 0x3b, 0x6c, 0x6a, 0x25, 0xdf, 0x94, 0x2e, 0xad, 0x7e, 0x10, 0xc6, 0x5b, 0x52, 0x02, - 0x25, 0xbc, 0xc4, 0x51, 0xb0, 0xee, 0xc0, 0x67, 0xde, 0x84, 0xcd, 0xec, 0x97, 0xb2, 0x14, 0xe1, - 0x8f, 0xa4, 0x03, 0xd5, 0x90, 0x45, 0x2c, 0x3c, 0x65, 0x57, 0x16, 0x55, 0x09, 0x48, 0x7b, 0x06, - 0x1b, 0xb9, 0x1d, 0x49, 0x47, 0xbe, 0x85, 0x5f, 0x2b, 0xc3, 0x58, 0x7e, 0xb4, 0x12, 0x03, 0xfe, - 0x3a, 0xe6, 0x25, 0x9f, 0x41, 0xf9, 0x63, 0xea, 0xf0, 0xc5, 0xab, 0xe2, 0xec, 0x1e, 0xa8, 0x59, - 0x4d, 0xbb, 0x63, 0x0c, 0x36, 0xf2, 0x54, 0x0a, 0xd7, 0x3b, 0x15, 0xed, 0xef, 0x0a, 0xb0, 0xde, - 0x75, 0x1c, 0xf1, 0x77, 0xc3, 0x25, 0xaa, 0x2f, 0x5e, 0x57, 0xf5, 0x0b, 0x81, 0x58, 0x84, 0x89, - 0x6b, 0x05, 0xe2, 0x0f, 0xa1, 0x92, 0xd6, 0x5a, 0xc5, 0x05, 0x77, 0x16, 0x72, 0x51, 0x09, 0xd0, - 0x6e, 0x01, 0xc9, 0x0a, 0x2b, 0xb4, 0xaa, 0xfd, 0x69, 0x11, 0xee, 0xee, 0xb2, 0x63, 0xd7, 0xcb, - 0xbe, 0xe2, 0x57, 0xdf, 0xc9, 0xc5, 0x4f, 0x65, 0x9f, 0xc1, 0xba, 0x28, 0xe4, 0x93, 0x7f, 0x62, - 0x59, 0xec, 0x58, 0x7e, 0x9d, 0x94, 0xb1, 0x6a, 0x0d, 0xf9, 0x07, 0x92, 0xad, 0xe3, 0x7f, 0xc5, - 0x1c, 0x3b, 0xb6, 0x9f, 0xd9, 0x11, 0xb3, 0x5c, 0x47, 0xfe, 0x59, 0x06, 0x12, 0x92, 0xe1, 0x90, - 0x21, 0x94, 0xb8, 0x0d, 0xa2, 0xeb, 0x36, 0xb7, 0xb7, 0x33, 0x62, 0x5d, 0xb2, 0x95, 0xac, 0x02, - 0x0f, 0x7c, 0x87, 0xed, 0x54, 0x8f, 0x06, 0x4f, 0x06, 0xc3, 0xef, 0x06, 0x14, 0x17, 0x22, 0x06, - 0xdc, 0x0a, 0x42, 0x76, 0xea, 0xfa, 0xf3, 0xc8, 0xca, 0x9e, 0x44, 0xf5, 0xca, 0x94, 0xb8, 0x91, - 0xcc, 0xc9, 0x10, 0xb5, 0x9f, 0xc2, 0xda, 0xc2, 0xcb, 0x78, 0x6d, 0x26, 0x5f, 0xa7, 0xbe, 0x41, - 0x56, 0xa1, 0x8e, 0x1f, 0xbb, 0x97, 0x7f, 0xfb, 0xd6, 0xfe, 0xb5, 0x80, 0x57, 0x4c, 0x33, 0x37, - 0xbe, 0x59, 0x06, 0xfb, 0xcd, 0x7c, 0x06, 0x83, 0xed, 0x77, 0xf3, 0xe6, 0x9b, 0x59, 0xb0, 0xf3, - 0xad, 0x00, 0xa6, 0x41, 0xa4, 0x6d, 0x43, 0x55, 0xd2, 0xc8, 0x6f, 0xc1, 0x5a, 0xe8, 0xfb, 0x71, - 0xd2, 0x89, 0x8a, 0x0e, 0xe4, 0xf2, 0x3f, 0xdb, 0xac, 0x72, 0xb0, 0x48, 0x06, 0x4f, 0xf2, 0xbd, - 0x48, 0x59, 0xfc, 0x0d, 0x44, 0x0e, 0x77, 0x1b, 0xbf, 0x5b, 0x4f, 0xff, 0xb7, 0xfb, 0xbf, 0x01, - 0x00, 0x00, 0xff, 0xff, 0x35, 0x9f, 0x30, 0x98, 0xf2, 0x2b, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto deleted file mode 100644 index 497b4d9a9..000000000 --- a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto +++ /dev/null @@ -1,551 +0,0 @@ -syntax = "proto2"; -option go_package = "datastore"; - -package appengine; - -message Action{} - -message PropertyValue { - optional int64 int64Value = 1; - optional bool booleanValue = 2; - optional string stringValue = 3; - optional double doubleValue = 4; - - optional group PointValue = 5 { - required double x = 6; - required double y = 7; - } - - optional group UserValue = 8 { - required string email = 9; - required string auth_domain = 10; - optional string nickname = 11; - optional string federated_identity = 21; - optional string federated_provider = 22; - } - - optional group ReferenceValue = 12 { - required string app = 13; - optional string name_space = 20; - repeated group PathElement = 14 { - required string type = 15; - optional int64 id = 16; - optional string name = 17; - } - } -} - -message Property { - enum Meaning { - NO_MEANING = 0; - BLOB = 14; - TEXT = 15; - BYTESTRING = 16; - - ATOM_CATEGORY = 1; - ATOM_LINK = 2; - ATOM_TITLE = 3; - ATOM_CONTENT = 4; - ATOM_SUMMARY = 5; - ATOM_AUTHOR = 6; - - GD_WHEN = 7; - GD_EMAIL = 8; - GEORSS_POINT = 9; - GD_IM = 10; - - GD_PHONENUMBER = 11; - GD_POSTALADDRESS = 12; - - GD_RATING = 13; - - BLOBKEY = 17; - ENTITY_PROTO = 19; - - INDEX_VALUE = 18; - }; - - optional Meaning meaning = 1 [default = NO_MEANING]; - optional string meaning_uri = 2; - - required string name = 3; - - required PropertyValue value = 5; - - required bool multiple = 4; - - optional bool searchable = 6 [default=false]; - - enum FtsTokenizationOption { - HTML = 1; - ATOM = 2; - } - - optional FtsTokenizationOption fts_tokenization_option = 8; - - optional string locale = 9 [default = "en"]; -} - -message Path { - repeated group Element = 1 { - required string type = 2; - optional int64 id = 3; - optional string name = 4; - } -} - -message Reference { - required string app = 13; - optional string name_space = 20; - required Path path = 14; -} - -message User { - required string email = 1; - required string auth_domain = 2; - optional string nickname = 3; - optional string federated_identity = 6; - optional string federated_provider = 7; -} - -message EntityProto { - required Reference key = 13; - required Path entity_group = 16; - optional User owner = 17; - - enum Kind { - GD_CONTACT = 1; - GD_EVENT = 2; - GD_MESSAGE = 3; - } - optional Kind kind = 4; - optional string kind_uri = 5; - - repeated Property property = 14; - repeated Property raw_property = 15; - - optional int32 rank = 18; -} - -message CompositeProperty { - required int64 index_id = 1; - repeated string value = 2; -} - -message Index { - required string entity_type = 1; - required bool ancestor = 5; - repeated group Property = 2 { - required string name = 3; - enum Direction { - ASCENDING = 1; - DESCENDING = 2; - } - optional Direction direction = 4 [default = ASCENDING]; - } -} - -message CompositeIndex { - required string app_id = 1; - required int64 id = 2; - required Index definition = 3; - - enum State { - WRITE_ONLY = 1; - READ_WRITE = 2; - DELETED = 3; - ERROR = 4; - } - required State state = 4; - - optional bool only_use_if_required = 6 [default = false]; -} - -message IndexPostfix { - message IndexValue { - required string property_name = 1; - required PropertyValue value = 2; - } - - repeated IndexValue index_value = 1; - - optional Reference key = 2; - - optional bool before = 3 [default=true]; -} - -message IndexPosition { - optional string key = 1; - - optional bool before = 2 [default=true]; -} - -message Snapshot { - enum Status { - INACTIVE = 0; - ACTIVE = 1; - } - - required int64 ts = 1; -} - -message InternalHeader { - optional string qos = 1; -} - -message Transaction { - optional InternalHeader header = 4; - required fixed64 handle = 1; - required string app = 2; - optional bool mark_changes = 3 [default = false]; -} - -message Query { - optional InternalHeader header = 39; - - required string app = 1; - optional string name_space = 29; - - optional string kind = 3; - optional Reference ancestor = 17; - - repeated group Filter = 4 { - enum Operator { - LESS_THAN = 1; - LESS_THAN_OR_EQUAL = 2; - GREATER_THAN = 3; - GREATER_THAN_OR_EQUAL = 4; - EQUAL = 5; - IN = 6; - EXISTS = 7; - } - - required Operator op = 6; - repeated Property property = 14; - } - - optional string search_query = 8; - - repeated group Order = 9 { - enum Direction { - ASCENDING = 1; - DESCENDING = 2; - } - - required string property = 10; - optional Direction direction = 11 [default = ASCENDING]; - } - - enum Hint { - ORDER_FIRST = 1; - ANCESTOR_FIRST = 2; - FILTER_FIRST = 3; - } - optional Hint hint = 18; - - optional int32 count = 23; - - optional int32 offset = 12 [default = 0]; - - optional int32 limit = 16; - - optional CompiledCursor compiled_cursor = 30; - optional CompiledCursor end_compiled_cursor = 31; - - repeated CompositeIndex composite_index = 19; - - optional bool require_perfect_plan = 20 [default = false]; - - optional bool keys_only = 21 [default = false]; - - optional Transaction transaction = 22; - - optional bool compile = 25 [default = false]; - - optional int64 failover_ms = 26; - - optional bool strong = 32; - - repeated string property_name = 33; - - repeated string group_by_property_name = 34; - - optional bool distinct = 24; - - optional int64 min_safe_time_seconds = 35; - - repeated string safe_replica_name = 36; - - optional bool persist_offset = 37 [default=false]; -} - -message CompiledQuery { - required group PrimaryScan = 1 { - optional string index_name = 2; - - optional string start_key = 3; - optional bool start_inclusive = 4; - optional string end_key = 5; - optional bool end_inclusive = 6; - - repeated string start_postfix_value = 22; - repeated string end_postfix_value = 23; - - optional int64 end_unapplied_log_timestamp_us = 19; - } - - repeated group MergeJoinScan = 7 { - required string index_name = 8; - - repeated string prefix_value = 9; - - optional bool value_prefix = 20 [default=false]; - } - - optional Index index_def = 21; - - optional int32 offset = 10 [default = 0]; - - optional int32 limit = 11; - - required bool keys_only = 12; - - repeated string property_name = 24; - - optional int32 distinct_infix_size = 25; - - optional group EntityFilter = 13 { - optional bool distinct = 14 [default=false]; - - optional string kind = 17; - optional Reference ancestor = 18; - } -} - -message CompiledCursor { - optional group Position = 2 { - optional string start_key = 27; - - repeated group IndexValue = 29 { - optional string property = 30; - required PropertyValue value = 31; - } - - optional Reference key = 32; - - optional bool start_inclusive = 28 [default=true]; - } -} - -message Cursor { - required fixed64 cursor = 1; - - optional string app = 2; -} - -message Error { - enum ErrorCode { - BAD_REQUEST = 1; - CONCURRENT_TRANSACTION = 2; - INTERNAL_ERROR = 3; - NEED_INDEX = 4; - TIMEOUT = 5; - PERMISSION_DENIED = 6; - BIGTABLE_ERROR = 7; - COMMITTED_BUT_STILL_APPLYING = 8; - CAPABILITY_DISABLED = 9; - TRY_ALTERNATE_BACKEND = 10; - SAFE_TIME_TOO_OLD = 11; - } -} - -message Cost { - optional int32 index_writes = 1; - optional int32 index_write_bytes = 2; - optional int32 entity_writes = 3; - optional int32 entity_write_bytes = 4; - optional group CommitCost = 5 { - optional int32 requested_entity_puts = 6; - optional int32 requested_entity_deletes = 7; - }; - optional int32 approximate_storage_delta = 8; - optional int32 id_sequence_updates = 9; -} - -message GetRequest { - optional InternalHeader header = 6; - - repeated Reference key = 1; - optional Transaction transaction = 2; - - optional int64 failover_ms = 3; - - optional bool strong = 4; - - optional bool allow_deferred = 5 [default=false]; -} - -message GetResponse { - repeated group Entity = 1 { - optional EntityProto entity = 2; - optional Reference key = 4; - - optional int64 version = 3; - } - - repeated Reference deferred = 5; - - optional bool in_order = 6 [default=true]; -} - -message PutRequest { - optional InternalHeader header = 11; - - repeated EntityProto entity = 1; - optional Transaction transaction = 2; - repeated CompositeIndex composite_index = 3; - - optional bool trusted = 4 [default = false]; - - optional bool force = 7 [default = false]; - - optional bool mark_changes = 8 [default = false]; - repeated Snapshot snapshot = 9; - - enum AutoIdPolicy { - CURRENT = 0; - SEQUENTIAL = 1; - } - optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT]; -} - -message PutResponse { - repeated Reference key = 1; - optional Cost cost = 2; - repeated int64 version = 3; -} - -message TouchRequest { - optional InternalHeader header = 10; - - repeated Reference key = 1; - repeated CompositeIndex composite_index = 2; - optional bool force = 3 [default = false]; - repeated Snapshot snapshot = 9; -} - -message TouchResponse { - optional Cost cost = 1; -} - -message DeleteRequest { - optional InternalHeader header = 10; - - repeated Reference key = 6; - optional Transaction transaction = 5; - - optional bool trusted = 4 [default = false]; - - optional bool force = 7 [default = false]; - - optional bool mark_changes = 8 [default = false]; - repeated Snapshot snapshot = 9; -} - -message DeleteResponse { - optional Cost cost = 1; - repeated int64 version = 3; -} - -message NextRequest { - optional InternalHeader header = 5; - - required Cursor cursor = 1; - optional int32 count = 2; - - optional int32 offset = 4 [default = 0]; - - optional bool compile = 3 [default = false]; -} - -message QueryResult { - optional Cursor cursor = 1; - - repeated EntityProto result = 2; - - optional int32 skipped_results = 7; - - required bool more_results = 3; - - optional bool keys_only = 4; - - optional bool index_only = 9; - - optional bool small_ops = 10; - - optional CompiledQuery compiled_query = 5; - - optional CompiledCursor compiled_cursor = 6; - - repeated CompositeIndex index = 8; - - repeated int64 version = 11; -} - -message AllocateIdsRequest { - optional InternalHeader header = 4; - - optional Reference model_key = 1; - - optional int64 size = 2; - - optional int64 max = 3; - - repeated Reference reserve = 5; -} - -message AllocateIdsResponse { - required int64 start = 1; - required int64 end = 2; - optional Cost cost = 3; -} - -message CompositeIndices { - repeated CompositeIndex index = 1; -} - -message AddActionsRequest { - optional InternalHeader header = 3; - - required Transaction transaction = 1; - repeated Action action = 2; -} - -message AddActionsResponse { -} - -message BeginTransactionRequest { - optional InternalHeader header = 3; - - required string app = 1; - optional bool allow_multiple_eg = 2 [default = false]; - optional string database_id = 4; - - enum TransactionMode { - UNKNOWN = 0; - READ_ONLY = 1; - READ_WRITE = 2; - } - optional TransactionMode mode = 5 [default = UNKNOWN]; - - optional Transaction previous_transaction = 7; -} - -message CommitResponse { - optional Cost cost = 1; - - repeated group Version = 3 { - required Reference root_entity_key = 4; - required int64 version = 5; - } -} diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go deleted file mode 100644 index 9b4134e42..000000000 --- a/vendor/google.golang.org/appengine/internal/identity.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -import ( - "os" - - netcontext "golang.org/x/net/context" -) - -var ( - // This is set to true in identity_classic.go, which is behind the appengine build tag. - // The appengine build tag is set for the first generation runtimes (<= Go 1.9) but not - // the second generation runtimes (>= Go 1.11), so this indicates whether we're on a - // first-gen runtime. See IsStandard below for the second-gen check. - appengineStandard bool - - // This is set to true in identity_flex.go, which is behind the appenginevm build tag. - appengineFlex bool -) - -// AppID is the implementation of the wrapper function of the same name in -// ../identity.go. See that file for commentary. -func AppID(c netcontext.Context) string { - return appID(FullyQualifiedAppID(c)) -} - -// IsStandard is the implementation of the wrapper function of the same name in -// ../appengine.go. See that file for commentary. -func IsStandard() bool { - // appengineStandard will be true for first-gen runtimes (<= Go 1.9) but not - // second-gen (>= Go 1.11). - return appengineStandard || IsSecondGen() -} - -// IsStandard is the implementation of the wrapper function of the same name in -// ../appengine.go. See that file for commentary. -func IsSecondGen() bool { - // Second-gen runtimes set $GAE_ENV so we use that to check if we're on a second-gen runtime. - return os.Getenv("GAE_ENV") == "standard" -} - -// IsFlex is the implementation of the wrapper function of the same name in -// ../appengine.go. See that file for commentary. -func IsFlex() bool { - return appengineFlex -} - -// IsAppEngine is the implementation of the wrapper function of the same name in -// ../appengine.go. See that file for commentary. -func IsAppEngine() bool { - return IsStandard() || IsFlex() -} diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go deleted file mode 100644 index 4e979f45e..000000000 --- a/vendor/google.golang.org/appengine/internal/identity_classic.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build appengine - -package internal - -import ( - "appengine" - - netcontext "golang.org/x/net/context" -) - -func init() { - appengineStandard = true -} - -func DefaultVersionHostname(ctx netcontext.Context) string { - c := fromContext(ctx) - if c == nil { - panic(errNotAppEngineContext) - } - return appengine.DefaultVersionHostname(c) -} - -func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() } -func ServerSoftware() string { return appengine.ServerSoftware() } -func InstanceID() string { return appengine.InstanceID() } -func IsDevAppServer() bool { return appengine.IsDevAppServer() } - -func RequestID(ctx netcontext.Context) string { - c := fromContext(ctx) - if c == nil { - panic(errNotAppEngineContext) - } - return appengine.RequestID(c) -} - -func ModuleName(ctx netcontext.Context) string { - c := fromContext(ctx) - if c == nil { - panic(errNotAppEngineContext) - } - return appengine.ModuleName(c) -} -func VersionID(ctx netcontext.Context) string { - c := fromContext(ctx) - if c == nil { - panic(errNotAppEngineContext) - } - return appengine.VersionID(c) -} - -func fullyQualifiedAppID(ctx netcontext.Context) string { - c := fromContext(ctx) - if c == nil { - panic(errNotAppEngineContext) - } - return c.FullyQualifiedAppID() -} diff --git a/vendor/google.golang.org/appengine/internal/identity_flex.go b/vendor/google.golang.org/appengine/internal/identity_flex.go deleted file mode 100644 index d5e2e7b5e..000000000 --- a/vendor/google.golang.org/appengine/internal/identity_flex.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2018 Google LLC. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build appenginevm - -package internal - -func init() { - appengineFlex = true -} diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go deleted file mode 100644 index 5d8067263..000000000 --- a/vendor/google.golang.org/appengine/internal/identity_vm.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package internal - -import ( - "log" - "net/http" - "os" - "strings" - - netcontext "golang.org/x/net/context" -) - -// These functions are implementations of the wrapper functions -// in ../appengine/identity.go. See that file for commentary. - -const ( - hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname" - hRequestLogId = "X-AppEngine-Request-Log-Id" - hDatacenter = "X-AppEngine-Datacenter" -) - -func ctxHeaders(ctx netcontext.Context) http.Header { - c := fromContext(ctx) - if c == nil { - return nil - } - return c.Request().Header -} - -func DefaultVersionHostname(ctx netcontext.Context) string { - return ctxHeaders(ctx).Get(hDefaultVersionHostname) -} - -func RequestID(ctx netcontext.Context) string { - return ctxHeaders(ctx).Get(hRequestLogId) -} - -func Datacenter(ctx netcontext.Context) string { - if dc := ctxHeaders(ctx).Get(hDatacenter); dc != "" { - return dc - } - // If the header isn't set, read zone from the metadata service. - // It has the format projects/[NUMERIC_PROJECT_ID]/zones/[ZONE] - zone, err := getMetadata("instance/zone") - if err != nil { - log.Printf("Datacenter: %v", err) - return "" - } - parts := strings.Split(string(zone), "/") - if len(parts) == 0 { - return "" - } - return parts[len(parts)-1] -} - -func ServerSoftware() string { - // TODO(dsymonds): Remove fallback when we've verified this. - if s := os.Getenv("SERVER_SOFTWARE"); s != "" { - return s - } - if s := os.Getenv("GAE_ENV"); s != "" { - return s - } - return "Google App Engine/1.x.x" -} - -// TODO(dsymonds): Remove the metadata fetches. - -func ModuleName(_ netcontext.Context) string { - if s := os.Getenv("GAE_MODULE_NAME"); s != "" { - return s - } - if s := os.Getenv("GAE_SERVICE"); s != "" { - return s - } - return string(mustGetMetadata("instance/attributes/gae_backend_name")) -} - -func VersionID(_ netcontext.Context) string { - if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" { - return s1 + "." + s2 - } - if s1, s2 := os.Getenv("GAE_VERSION"), os.Getenv("GAE_DEPLOYMENT_ID"); s1 != "" && s2 != "" { - return s1 + "." + s2 - } - return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version")) -} - -func InstanceID() string { - if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" { - return s - } - if s := os.Getenv("GAE_INSTANCE"); s != "" { - return s - } - return string(mustGetMetadata("instance/attributes/gae_backend_instance")) -} - -func partitionlessAppID() string { - // gae_project has everything except the partition prefix. - if appID := os.Getenv("GAE_LONG_APP_ID"); appID != "" { - return appID - } - if project := os.Getenv("GOOGLE_CLOUD_PROJECT"); project != "" { - return project - } - return string(mustGetMetadata("instance/attributes/gae_project")) -} - -func fullyQualifiedAppID(_ netcontext.Context) string { - if s := os.Getenv("GAE_APPLICATION"); s != "" { - return s - } - appID := partitionlessAppID() - - part := os.Getenv("GAE_PARTITION") - if part == "" { - part = string(mustGetMetadata("instance/attributes/gae_partition")) - } - - if part != "" { - appID = part + "~" + appID - } - return appID -} - -func IsDevAppServer() bool { - return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" -} diff --git a/vendor/google.golang.org/appengine/internal/internal.go b/vendor/google.golang.org/appengine/internal/internal.go deleted file mode 100644 index 051ea3980..000000000 --- a/vendor/google.golang.org/appengine/internal/internal.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package internal provides support for package appengine. -// -// Programs should not use this package directly. Its API is not stable. -// Use packages appengine and appengine/* instead. -package internal - -import ( - "fmt" - - "github.com/golang/protobuf/proto" - - remotepb "google.golang.org/appengine/internal/remote_api" -) - -// errorCodeMaps is a map of service name to the error code map for the service. -var errorCodeMaps = make(map[string]map[int32]string) - -// RegisterErrorCodeMap is called from API implementations to register their -// error code map. This should only be called from init functions. -func RegisterErrorCodeMap(service string, m map[int32]string) { - errorCodeMaps[service] = m -} - -type timeoutCodeKey struct { - service string - code int32 -} - -// timeoutCodes is the set of service+code pairs that represent timeouts. -var timeoutCodes = make(map[timeoutCodeKey]bool) - -func RegisterTimeoutErrorCode(service string, code int32) { - timeoutCodes[timeoutCodeKey{service, code}] = true -} - -// APIError is the type returned by appengine.Context's Call method -// when an API call fails in an API-specific way. This may be, for instance, -// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE. -type APIError struct { - Service string - Detail string - Code int32 // API-specific error code -} - -func (e *APIError) Error() string { - if e.Code == 0 { - if e.Detail == "" { - return "APIError " - } - return e.Detail - } - s := fmt.Sprintf("API error %d", e.Code) - if m, ok := errorCodeMaps[e.Service]; ok { - s += " (" + e.Service + ": " + m[e.Code] + ")" - } else { - // Shouldn't happen, but provide a bit more detail if it does. - s = e.Service + " " + s - } - if e.Detail != "" { - s += ": " + e.Detail - } - return s -} - -func (e *APIError) IsTimeout() bool { - return timeoutCodes[timeoutCodeKey{e.Service, e.Code}] -} - -// CallError is the type returned by appengine.Context's Call method when an -// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED. -type CallError struct { - Detail string - Code int32 - // TODO: Remove this if we get a distinguishable error code. - Timeout bool -} - -func (e *CallError) Error() string { - var msg string - switch remotepb.RpcError_ErrorCode(e.Code) { - case remotepb.RpcError_UNKNOWN: - return e.Detail - case remotepb.RpcError_OVER_QUOTA: - msg = "Over quota" - case remotepb.RpcError_CAPABILITY_DISABLED: - msg = "Capability disabled" - case remotepb.RpcError_CANCELLED: - msg = "Canceled" - default: - msg = fmt.Sprintf("Call error %d", e.Code) - } - s := msg + ": " + e.Detail - if e.Timeout { - s += " (timeout)" - } - return s -} - -func (e *CallError) IsTimeout() bool { - return e.Timeout -} - -// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace. -// The function should be prepared to be called on the same message more than once; it should only modify the -// RPC request the first time. -var NamespaceMods = make(map[string]func(m proto.Message, namespace string)) diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go deleted file mode 100644 index 8545ac4ad..000000000 --- a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go +++ /dev/null @@ -1,1313 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/log/log_service.proto - -package log - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type LogServiceError_ErrorCode int32 - -const ( - LogServiceError_OK LogServiceError_ErrorCode = 0 - LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1 - LogServiceError_STORAGE_ERROR LogServiceError_ErrorCode = 2 -) - -var LogServiceError_ErrorCode_name = map[int32]string{ - 0: "OK", - 1: "INVALID_REQUEST", - 2: "STORAGE_ERROR", -} -var LogServiceError_ErrorCode_value = map[string]int32{ - "OK": 0, - "INVALID_REQUEST": 1, - "STORAGE_ERROR": 2, -} - -func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode { - p := new(LogServiceError_ErrorCode) - *p = x - return p -} -func (x LogServiceError_ErrorCode) String() string { - return proto.EnumName(LogServiceError_ErrorCode_name, int32(x)) -} -func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode") - if err != nil { - return err - } - *x = LogServiceError_ErrorCode(value) - return nil -} -func (LogServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{0, 0} -} - -type LogServiceError struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogServiceError) Reset() { *m = LogServiceError{} } -func (m *LogServiceError) String() string { return proto.CompactTextString(m) } -func (*LogServiceError) ProtoMessage() {} -func (*LogServiceError) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{0} -} -func (m *LogServiceError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogServiceError.Unmarshal(m, b) -} -func (m *LogServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogServiceError.Marshal(b, m, deterministic) -} -func (dst *LogServiceError) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogServiceError.Merge(dst, src) -} -func (m *LogServiceError) XXX_Size() int { - return xxx_messageInfo_LogServiceError.Size(m) -} -func (m *LogServiceError) XXX_DiscardUnknown() { - xxx_messageInfo_LogServiceError.DiscardUnknown(m) -} - -var xxx_messageInfo_LogServiceError proto.InternalMessageInfo - -type UserAppLogLine struct { - TimestampUsec *int64 `protobuf:"varint,1,req,name=timestamp_usec,json=timestampUsec" json:"timestamp_usec,omitempty"` - Level *int64 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` - Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UserAppLogLine) Reset() { *m = UserAppLogLine{} } -func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) } -func (*UserAppLogLine) ProtoMessage() {} -func (*UserAppLogLine) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{1} -} -func (m *UserAppLogLine) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UserAppLogLine.Unmarshal(m, b) -} -func (m *UserAppLogLine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UserAppLogLine.Marshal(b, m, deterministic) -} -func (dst *UserAppLogLine) XXX_Merge(src proto.Message) { - xxx_messageInfo_UserAppLogLine.Merge(dst, src) -} -func (m *UserAppLogLine) XXX_Size() int { - return xxx_messageInfo_UserAppLogLine.Size(m) -} -func (m *UserAppLogLine) XXX_DiscardUnknown() { - xxx_messageInfo_UserAppLogLine.DiscardUnknown(m) -} - -var xxx_messageInfo_UserAppLogLine proto.InternalMessageInfo - -func (m *UserAppLogLine) GetTimestampUsec() int64 { - if m != nil && m.TimestampUsec != nil { - return *m.TimestampUsec - } - return 0 -} - -func (m *UserAppLogLine) GetLevel() int64 { - if m != nil && m.Level != nil { - return *m.Level - } - return 0 -} - -func (m *UserAppLogLine) GetMessage() string { - if m != nil && m.Message != nil { - return *m.Message - } - return "" -} - -type UserAppLogGroup struct { - LogLine []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line,json=logLine" json:"log_line,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UserAppLogGroup) Reset() { *m = UserAppLogGroup{} } -func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) } -func (*UserAppLogGroup) ProtoMessage() {} -func (*UserAppLogGroup) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{2} -} -func (m *UserAppLogGroup) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UserAppLogGroup.Unmarshal(m, b) -} -func (m *UserAppLogGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UserAppLogGroup.Marshal(b, m, deterministic) -} -func (dst *UserAppLogGroup) XXX_Merge(src proto.Message) { - xxx_messageInfo_UserAppLogGroup.Merge(dst, src) -} -func (m *UserAppLogGroup) XXX_Size() int { - return xxx_messageInfo_UserAppLogGroup.Size(m) -} -func (m *UserAppLogGroup) XXX_DiscardUnknown() { - xxx_messageInfo_UserAppLogGroup.DiscardUnknown(m) -} - -var xxx_messageInfo_UserAppLogGroup proto.InternalMessageInfo - -func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine { - if m != nil { - return m.LogLine - } - return nil -} - -type FlushRequest struct { - Logs []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FlushRequest) Reset() { *m = FlushRequest{} } -func (m *FlushRequest) String() string { return proto.CompactTextString(m) } -func (*FlushRequest) ProtoMessage() {} -func (*FlushRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{3} -} -func (m *FlushRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FlushRequest.Unmarshal(m, b) -} -func (m *FlushRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FlushRequest.Marshal(b, m, deterministic) -} -func (dst *FlushRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_FlushRequest.Merge(dst, src) -} -func (m *FlushRequest) XXX_Size() int { - return xxx_messageInfo_FlushRequest.Size(m) -} -func (m *FlushRequest) XXX_DiscardUnknown() { - xxx_messageInfo_FlushRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_FlushRequest proto.InternalMessageInfo - -func (m *FlushRequest) GetLogs() []byte { - if m != nil { - return m.Logs - } - return nil -} - -type SetStatusRequest struct { - Status *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SetStatusRequest) Reset() { *m = SetStatusRequest{} } -func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) } -func (*SetStatusRequest) ProtoMessage() {} -func (*SetStatusRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{4} -} -func (m *SetStatusRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetStatusRequest.Unmarshal(m, b) -} -func (m *SetStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetStatusRequest.Marshal(b, m, deterministic) -} -func (dst *SetStatusRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetStatusRequest.Merge(dst, src) -} -func (m *SetStatusRequest) XXX_Size() int { - return xxx_messageInfo_SetStatusRequest.Size(m) -} -func (m *SetStatusRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetStatusRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SetStatusRequest proto.InternalMessageInfo - -func (m *SetStatusRequest) GetStatus() string { - if m != nil && m.Status != nil { - return *m.Status - } - return "" -} - -type LogOffset struct { - RequestId []byte `protobuf:"bytes,1,opt,name=request_id,json=requestId" json:"request_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogOffset) Reset() { *m = LogOffset{} } -func (m *LogOffset) String() string { return proto.CompactTextString(m) } -func (*LogOffset) ProtoMessage() {} -func (*LogOffset) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{5} -} -func (m *LogOffset) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogOffset.Unmarshal(m, b) -} -func (m *LogOffset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogOffset.Marshal(b, m, deterministic) -} -func (dst *LogOffset) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogOffset.Merge(dst, src) -} -func (m *LogOffset) XXX_Size() int { - return xxx_messageInfo_LogOffset.Size(m) -} -func (m *LogOffset) XXX_DiscardUnknown() { - xxx_messageInfo_LogOffset.DiscardUnknown(m) -} - -var xxx_messageInfo_LogOffset proto.InternalMessageInfo - -func (m *LogOffset) GetRequestId() []byte { - if m != nil { - return m.RequestId - } - return nil -} - -type LogLine struct { - Time *int64 `protobuf:"varint,1,req,name=time" json:"time,omitempty"` - Level *int32 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` - LogMessage *string `protobuf:"bytes,3,req,name=log_message,json=logMessage" json:"log_message,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogLine) Reset() { *m = LogLine{} } -func (m *LogLine) String() string { return proto.CompactTextString(m) } -func (*LogLine) ProtoMessage() {} -func (*LogLine) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{6} -} -func (m *LogLine) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogLine.Unmarshal(m, b) -} -func (m *LogLine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogLine.Marshal(b, m, deterministic) -} -func (dst *LogLine) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogLine.Merge(dst, src) -} -func (m *LogLine) XXX_Size() int { - return xxx_messageInfo_LogLine.Size(m) -} -func (m *LogLine) XXX_DiscardUnknown() { - xxx_messageInfo_LogLine.DiscardUnknown(m) -} - -var xxx_messageInfo_LogLine proto.InternalMessageInfo - -func (m *LogLine) GetTime() int64 { - if m != nil && m.Time != nil { - return *m.Time - } - return 0 -} - -func (m *LogLine) GetLevel() int32 { - if m != nil && m.Level != nil { - return *m.Level - } - return 0 -} - -func (m *LogLine) GetLogMessage() string { - if m != nil && m.LogMessage != nil { - return *m.LogMessage - } - return "" -} - -type RequestLog struct { - AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"` - ModuleId *string `protobuf:"bytes,37,opt,name=module_id,json=moduleId,def=default" json:"module_id,omitempty"` - VersionId *string `protobuf:"bytes,2,req,name=version_id,json=versionId" json:"version_id,omitempty"` - RequestId []byte `protobuf:"bytes,3,req,name=request_id,json=requestId" json:"request_id,omitempty"` - Offset *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"` - Ip *string `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"` - Nickname *string `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"` - StartTime *int64 `protobuf:"varint,6,req,name=start_time,json=startTime" json:"start_time,omitempty"` - EndTime *int64 `protobuf:"varint,7,req,name=end_time,json=endTime" json:"end_time,omitempty"` - Latency *int64 `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"` - Mcycles *int64 `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"` - Method *string `protobuf:"bytes,10,req,name=method" json:"method,omitempty"` - Resource *string `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"` - HttpVersion *string `protobuf:"bytes,12,req,name=http_version,json=httpVersion" json:"http_version,omitempty"` - Status *int32 `protobuf:"varint,13,req,name=status" json:"status,omitempty"` - ResponseSize *int64 `protobuf:"varint,14,req,name=response_size,json=responseSize" json:"response_size,omitempty"` - Referrer *string `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"` - UserAgent *string `protobuf:"bytes,16,opt,name=user_agent,json=userAgent" json:"user_agent,omitempty"` - UrlMapEntry *string `protobuf:"bytes,17,req,name=url_map_entry,json=urlMapEntry" json:"url_map_entry,omitempty"` - Combined *string `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"` - ApiMcycles *int64 `protobuf:"varint,19,opt,name=api_mcycles,json=apiMcycles" json:"api_mcycles,omitempty"` - Host *string `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"` - Cost *float64 `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"` - TaskQueueName *string `protobuf:"bytes,22,opt,name=task_queue_name,json=taskQueueName" json:"task_queue_name,omitempty"` - TaskName *string `protobuf:"bytes,23,opt,name=task_name,json=taskName" json:"task_name,omitempty"` - WasLoadingRequest *bool `protobuf:"varint,24,opt,name=was_loading_request,json=wasLoadingRequest" json:"was_loading_request,omitempty"` - PendingTime *int64 `protobuf:"varint,25,opt,name=pending_time,json=pendingTime" json:"pending_time,omitempty"` - ReplicaIndex *int32 `protobuf:"varint,26,opt,name=replica_index,json=replicaIndex,def=-1" json:"replica_index,omitempty"` - Finished *bool `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"` - CloneKey []byte `protobuf:"bytes,28,opt,name=clone_key,json=cloneKey" json:"clone_key,omitempty"` - Line []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"` - LinesIncomplete *bool `protobuf:"varint,36,opt,name=lines_incomplete,json=linesIncomplete" json:"lines_incomplete,omitempty"` - AppEngineRelease []byte `protobuf:"bytes,38,opt,name=app_engine_release,json=appEngineRelease" json:"app_engine_release,omitempty"` - ExitReason *int32 `protobuf:"varint,30,opt,name=exit_reason,json=exitReason" json:"exit_reason,omitempty"` - WasThrottledForTime *bool `protobuf:"varint,31,opt,name=was_throttled_for_time,json=wasThrottledForTime" json:"was_throttled_for_time,omitempty"` - WasThrottledForRequests *bool `protobuf:"varint,32,opt,name=was_throttled_for_requests,json=wasThrottledForRequests" json:"was_throttled_for_requests,omitempty"` - ThrottledTime *int64 `protobuf:"varint,33,opt,name=throttled_time,json=throttledTime" json:"throttled_time,omitempty"` - ServerName []byte `protobuf:"bytes,34,opt,name=server_name,json=serverName" json:"server_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RequestLog) Reset() { *m = RequestLog{} } -func (m *RequestLog) String() string { return proto.CompactTextString(m) } -func (*RequestLog) ProtoMessage() {} -func (*RequestLog) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{7} -} -func (m *RequestLog) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RequestLog.Unmarshal(m, b) -} -func (m *RequestLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RequestLog.Marshal(b, m, deterministic) -} -func (dst *RequestLog) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestLog.Merge(dst, src) -} -func (m *RequestLog) XXX_Size() int { - return xxx_messageInfo_RequestLog.Size(m) -} -func (m *RequestLog) XXX_DiscardUnknown() { - xxx_messageInfo_RequestLog.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestLog proto.InternalMessageInfo - -const Default_RequestLog_ModuleId string = "default" -const Default_RequestLog_ReplicaIndex int32 = -1 -const Default_RequestLog_Finished bool = true - -func (m *RequestLog) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *RequestLog) GetModuleId() string { - if m != nil && m.ModuleId != nil { - return *m.ModuleId - } - return Default_RequestLog_ModuleId -} - -func (m *RequestLog) GetVersionId() string { - if m != nil && m.VersionId != nil { - return *m.VersionId - } - return "" -} - -func (m *RequestLog) GetRequestId() []byte { - if m != nil { - return m.RequestId - } - return nil -} - -func (m *RequestLog) GetOffset() *LogOffset { - if m != nil { - return m.Offset - } - return nil -} - -func (m *RequestLog) GetIp() string { - if m != nil && m.Ip != nil { - return *m.Ip - } - return "" -} - -func (m *RequestLog) GetNickname() string { - if m != nil && m.Nickname != nil { - return *m.Nickname - } - return "" -} - -func (m *RequestLog) GetStartTime() int64 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *RequestLog) GetEndTime() int64 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *RequestLog) GetLatency() int64 { - if m != nil && m.Latency != nil { - return *m.Latency - } - return 0 -} - -func (m *RequestLog) GetMcycles() int64 { - if m != nil && m.Mcycles != nil { - return *m.Mcycles - } - return 0 -} - -func (m *RequestLog) GetMethod() string { - if m != nil && m.Method != nil { - return *m.Method - } - return "" -} - -func (m *RequestLog) GetResource() string { - if m != nil && m.Resource != nil { - return *m.Resource - } - return "" -} - -func (m *RequestLog) GetHttpVersion() string { - if m != nil && m.HttpVersion != nil { - return *m.HttpVersion - } - return "" -} - -func (m *RequestLog) GetStatus() int32 { - if m != nil && m.Status != nil { - return *m.Status - } - return 0 -} - -func (m *RequestLog) GetResponseSize() int64 { - if m != nil && m.ResponseSize != nil { - return *m.ResponseSize - } - return 0 -} - -func (m *RequestLog) GetReferrer() string { - if m != nil && m.Referrer != nil { - return *m.Referrer - } - return "" -} - -func (m *RequestLog) GetUserAgent() string { - if m != nil && m.UserAgent != nil { - return *m.UserAgent - } - return "" -} - -func (m *RequestLog) GetUrlMapEntry() string { - if m != nil && m.UrlMapEntry != nil { - return *m.UrlMapEntry - } - return "" -} - -func (m *RequestLog) GetCombined() string { - if m != nil && m.Combined != nil { - return *m.Combined - } - return "" -} - -func (m *RequestLog) GetApiMcycles() int64 { - if m != nil && m.ApiMcycles != nil { - return *m.ApiMcycles - } - return 0 -} - -func (m *RequestLog) GetHost() string { - if m != nil && m.Host != nil { - return *m.Host - } - return "" -} - -func (m *RequestLog) GetCost() float64 { - if m != nil && m.Cost != nil { - return *m.Cost - } - return 0 -} - -func (m *RequestLog) GetTaskQueueName() string { - if m != nil && m.TaskQueueName != nil { - return *m.TaskQueueName - } - return "" -} - -func (m *RequestLog) GetTaskName() string { - if m != nil && m.TaskName != nil { - return *m.TaskName - } - return "" -} - -func (m *RequestLog) GetWasLoadingRequest() bool { - if m != nil && m.WasLoadingRequest != nil { - return *m.WasLoadingRequest - } - return false -} - -func (m *RequestLog) GetPendingTime() int64 { - if m != nil && m.PendingTime != nil { - return *m.PendingTime - } - return 0 -} - -func (m *RequestLog) GetReplicaIndex() int32 { - if m != nil && m.ReplicaIndex != nil { - return *m.ReplicaIndex - } - return Default_RequestLog_ReplicaIndex -} - -func (m *RequestLog) GetFinished() bool { - if m != nil && m.Finished != nil { - return *m.Finished - } - return Default_RequestLog_Finished -} - -func (m *RequestLog) GetCloneKey() []byte { - if m != nil { - return m.CloneKey - } - return nil -} - -func (m *RequestLog) GetLine() []*LogLine { - if m != nil { - return m.Line - } - return nil -} - -func (m *RequestLog) GetLinesIncomplete() bool { - if m != nil && m.LinesIncomplete != nil { - return *m.LinesIncomplete - } - return false -} - -func (m *RequestLog) GetAppEngineRelease() []byte { - if m != nil { - return m.AppEngineRelease - } - return nil -} - -func (m *RequestLog) GetExitReason() int32 { - if m != nil && m.ExitReason != nil { - return *m.ExitReason - } - return 0 -} - -func (m *RequestLog) GetWasThrottledForTime() bool { - if m != nil && m.WasThrottledForTime != nil { - return *m.WasThrottledForTime - } - return false -} - -func (m *RequestLog) GetWasThrottledForRequests() bool { - if m != nil && m.WasThrottledForRequests != nil { - return *m.WasThrottledForRequests - } - return false -} - -func (m *RequestLog) GetThrottledTime() int64 { - if m != nil && m.ThrottledTime != nil { - return *m.ThrottledTime - } - return 0 -} - -func (m *RequestLog) GetServerName() []byte { - if m != nil { - return m.ServerName - } - return nil -} - -type LogModuleVersion struct { - ModuleId *string `protobuf:"bytes,1,opt,name=module_id,json=moduleId,def=default" json:"module_id,omitempty"` - VersionId *string `protobuf:"bytes,2,opt,name=version_id,json=versionId" json:"version_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogModuleVersion) Reset() { *m = LogModuleVersion{} } -func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) } -func (*LogModuleVersion) ProtoMessage() {} -func (*LogModuleVersion) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{8} -} -func (m *LogModuleVersion) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogModuleVersion.Unmarshal(m, b) -} -func (m *LogModuleVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogModuleVersion.Marshal(b, m, deterministic) -} -func (dst *LogModuleVersion) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogModuleVersion.Merge(dst, src) -} -func (m *LogModuleVersion) XXX_Size() int { - return xxx_messageInfo_LogModuleVersion.Size(m) -} -func (m *LogModuleVersion) XXX_DiscardUnknown() { - xxx_messageInfo_LogModuleVersion.DiscardUnknown(m) -} - -var xxx_messageInfo_LogModuleVersion proto.InternalMessageInfo - -const Default_LogModuleVersion_ModuleId string = "default" - -func (m *LogModuleVersion) GetModuleId() string { - if m != nil && m.ModuleId != nil { - return *m.ModuleId - } - return Default_LogModuleVersion_ModuleId -} - -func (m *LogModuleVersion) GetVersionId() string { - if m != nil && m.VersionId != nil { - return *m.VersionId - } - return "" -} - -type LogReadRequest struct { - AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"` - VersionId []string `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"` - ModuleVersion []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version,json=moduleVersion" json:"module_version,omitempty"` - StartTime *int64 `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"` - EndTime *int64 `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"` - Offset *LogOffset `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"` - RequestId [][]byte `protobuf:"bytes,6,rep,name=request_id,json=requestId" json:"request_id,omitempty"` - MinimumLogLevel *int32 `protobuf:"varint,7,opt,name=minimum_log_level,json=minimumLogLevel" json:"minimum_log_level,omitempty"` - IncludeIncomplete *bool `protobuf:"varint,8,opt,name=include_incomplete,json=includeIncomplete" json:"include_incomplete,omitempty"` - Count *int64 `protobuf:"varint,9,opt,name=count" json:"count,omitempty"` - CombinedLogRegex *string `protobuf:"bytes,14,opt,name=combined_log_regex,json=combinedLogRegex" json:"combined_log_regex,omitempty"` - HostRegex *string `protobuf:"bytes,15,opt,name=host_regex,json=hostRegex" json:"host_regex,omitempty"` - ReplicaIndex *int32 `protobuf:"varint,16,opt,name=replica_index,json=replicaIndex" json:"replica_index,omitempty"` - IncludeAppLogs *bool `protobuf:"varint,10,opt,name=include_app_logs,json=includeAppLogs" json:"include_app_logs,omitempty"` - AppLogsPerRequest *int32 `protobuf:"varint,17,opt,name=app_logs_per_request,json=appLogsPerRequest" json:"app_logs_per_request,omitempty"` - IncludeHost *bool `protobuf:"varint,11,opt,name=include_host,json=includeHost" json:"include_host,omitempty"` - IncludeAll *bool `protobuf:"varint,12,opt,name=include_all,json=includeAll" json:"include_all,omitempty"` - CacheIterator *bool `protobuf:"varint,13,opt,name=cache_iterator,json=cacheIterator" json:"cache_iterator,omitempty"` - NumShards *int32 `protobuf:"varint,18,opt,name=num_shards,json=numShards" json:"num_shards,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogReadRequest) Reset() { *m = LogReadRequest{} } -func (m *LogReadRequest) String() string { return proto.CompactTextString(m) } -func (*LogReadRequest) ProtoMessage() {} -func (*LogReadRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{9} -} -func (m *LogReadRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogReadRequest.Unmarshal(m, b) -} -func (m *LogReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogReadRequest.Marshal(b, m, deterministic) -} -func (dst *LogReadRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogReadRequest.Merge(dst, src) -} -func (m *LogReadRequest) XXX_Size() int { - return xxx_messageInfo_LogReadRequest.Size(m) -} -func (m *LogReadRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LogReadRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_LogReadRequest proto.InternalMessageInfo - -func (m *LogReadRequest) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *LogReadRequest) GetVersionId() []string { - if m != nil { - return m.VersionId - } - return nil -} - -func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion { - if m != nil { - return m.ModuleVersion - } - return nil -} - -func (m *LogReadRequest) GetStartTime() int64 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *LogReadRequest) GetEndTime() int64 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *LogReadRequest) GetOffset() *LogOffset { - if m != nil { - return m.Offset - } - return nil -} - -func (m *LogReadRequest) GetRequestId() [][]byte { - if m != nil { - return m.RequestId - } - return nil -} - -func (m *LogReadRequest) GetMinimumLogLevel() int32 { - if m != nil && m.MinimumLogLevel != nil { - return *m.MinimumLogLevel - } - return 0 -} - -func (m *LogReadRequest) GetIncludeIncomplete() bool { - if m != nil && m.IncludeIncomplete != nil { - return *m.IncludeIncomplete - } - return false -} - -func (m *LogReadRequest) GetCount() int64 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *LogReadRequest) GetCombinedLogRegex() string { - if m != nil && m.CombinedLogRegex != nil { - return *m.CombinedLogRegex - } - return "" -} - -func (m *LogReadRequest) GetHostRegex() string { - if m != nil && m.HostRegex != nil { - return *m.HostRegex - } - return "" -} - -func (m *LogReadRequest) GetReplicaIndex() int32 { - if m != nil && m.ReplicaIndex != nil { - return *m.ReplicaIndex - } - return 0 -} - -func (m *LogReadRequest) GetIncludeAppLogs() bool { - if m != nil && m.IncludeAppLogs != nil { - return *m.IncludeAppLogs - } - return false -} - -func (m *LogReadRequest) GetAppLogsPerRequest() int32 { - if m != nil && m.AppLogsPerRequest != nil { - return *m.AppLogsPerRequest - } - return 0 -} - -func (m *LogReadRequest) GetIncludeHost() bool { - if m != nil && m.IncludeHost != nil { - return *m.IncludeHost - } - return false -} - -func (m *LogReadRequest) GetIncludeAll() bool { - if m != nil && m.IncludeAll != nil { - return *m.IncludeAll - } - return false -} - -func (m *LogReadRequest) GetCacheIterator() bool { - if m != nil && m.CacheIterator != nil { - return *m.CacheIterator - } - return false -} - -func (m *LogReadRequest) GetNumShards() int32 { - if m != nil && m.NumShards != nil { - return *m.NumShards - } - return 0 -} - -type LogReadResponse struct { - Log []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"` - Offset *LogOffset `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"` - LastEndTime *int64 `protobuf:"varint,3,opt,name=last_end_time,json=lastEndTime" json:"last_end_time,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogReadResponse) Reset() { *m = LogReadResponse{} } -func (m *LogReadResponse) String() string { return proto.CompactTextString(m) } -func (*LogReadResponse) ProtoMessage() {} -func (*LogReadResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{10} -} -func (m *LogReadResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogReadResponse.Unmarshal(m, b) -} -func (m *LogReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogReadResponse.Marshal(b, m, deterministic) -} -func (dst *LogReadResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogReadResponse.Merge(dst, src) -} -func (m *LogReadResponse) XXX_Size() int { - return xxx_messageInfo_LogReadResponse.Size(m) -} -func (m *LogReadResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LogReadResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_LogReadResponse proto.InternalMessageInfo - -func (m *LogReadResponse) GetLog() []*RequestLog { - if m != nil { - return m.Log - } - return nil -} - -func (m *LogReadResponse) GetOffset() *LogOffset { - if m != nil { - return m.Offset - } - return nil -} - -func (m *LogReadResponse) GetLastEndTime() int64 { - if m != nil && m.LastEndTime != nil { - return *m.LastEndTime - } - return 0 -} - -type LogUsageRecord struct { - VersionId *string `protobuf:"bytes,1,opt,name=version_id,json=versionId" json:"version_id,omitempty"` - StartTime *int32 `protobuf:"varint,2,opt,name=start_time,json=startTime" json:"start_time,omitempty"` - EndTime *int32 `protobuf:"varint,3,opt,name=end_time,json=endTime" json:"end_time,omitempty"` - Count *int64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"` - TotalSize *int64 `protobuf:"varint,5,opt,name=total_size,json=totalSize" json:"total_size,omitempty"` - Records *int32 `protobuf:"varint,6,opt,name=records" json:"records,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogUsageRecord) Reset() { *m = LogUsageRecord{} } -func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) } -func (*LogUsageRecord) ProtoMessage() {} -func (*LogUsageRecord) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{11} -} -func (m *LogUsageRecord) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogUsageRecord.Unmarshal(m, b) -} -func (m *LogUsageRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogUsageRecord.Marshal(b, m, deterministic) -} -func (dst *LogUsageRecord) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogUsageRecord.Merge(dst, src) -} -func (m *LogUsageRecord) XXX_Size() int { - return xxx_messageInfo_LogUsageRecord.Size(m) -} -func (m *LogUsageRecord) XXX_DiscardUnknown() { - xxx_messageInfo_LogUsageRecord.DiscardUnknown(m) -} - -var xxx_messageInfo_LogUsageRecord proto.InternalMessageInfo - -func (m *LogUsageRecord) GetVersionId() string { - if m != nil && m.VersionId != nil { - return *m.VersionId - } - return "" -} - -func (m *LogUsageRecord) GetStartTime() int32 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *LogUsageRecord) GetEndTime() int32 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *LogUsageRecord) GetCount() int64 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *LogUsageRecord) GetTotalSize() int64 { - if m != nil && m.TotalSize != nil { - return *m.TotalSize - } - return 0 -} - -func (m *LogUsageRecord) GetRecords() int32 { - if m != nil && m.Records != nil { - return *m.Records - } - return 0 -} - -type LogUsageRequest struct { - AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"` - VersionId []string `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"` - StartTime *int32 `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"` - EndTime *int32 `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"` - ResolutionHours *uint32 `protobuf:"varint,5,opt,name=resolution_hours,json=resolutionHours,def=1" json:"resolution_hours,omitempty"` - CombineVersions *bool `protobuf:"varint,6,opt,name=combine_versions,json=combineVersions" json:"combine_versions,omitempty"` - UsageVersion *int32 `protobuf:"varint,7,opt,name=usage_version,json=usageVersion" json:"usage_version,omitempty"` - VersionsOnly *bool `protobuf:"varint,8,opt,name=versions_only,json=versionsOnly" json:"versions_only,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogUsageRequest) Reset() { *m = LogUsageRequest{} } -func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) } -func (*LogUsageRequest) ProtoMessage() {} -func (*LogUsageRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{12} -} -func (m *LogUsageRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogUsageRequest.Unmarshal(m, b) -} -func (m *LogUsageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogUsageRequest.Marshal(b, m, deterministic) -} -func (dst *LogUsageRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogUsageRequest.Merge(dst, src) -} -func (m *LogUsageRequest) XXX_Size() int { - return xxx_messageInfo_LogUsageRequest.Size(m) -} -func (m *LogUsageRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LogUsageRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_LogUsageRequest proto.InternalMessageInfo - -const Default_LogUsageRequest_ResolutionHours uint32 = 1 - -func (m *LogUsageRequest) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *LogUsageRequest) GetVersionId() []string { - if m != nil { - return m.VersionId - } - return nil -} - -func (m *LogUsageRequest) GetStartTime() int32 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *LogUsageRequest) GetEndTime() int32 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *LogUsageRequest) GetResolutionHours() uint32 { - if m != nil && m.ResolutionHours != nil { - return *m.ResolutionHours - } - return Default_LogUsageRequest_ResolutionHours -} - -func (m *LogUsageRequest) GetCombineVersions() bool { - if m != nil && m.CombineVersions != nil { - return *m.CombineVersions - } - return false -} - -func (m *LogUsageRequest) GetUsageVersion() int32 { - if m != nil && m.UsageVersion != nil { - return *m.UsageVersion - } - return 0 -} - -func (m *LogUsageRequest) GetVersionsOnly() bool { - if m != nil && m.VersionsOnly != nil { - return *m.VersionsOnly - } - return false -} - -type LogUsageResponse struct { - Usage []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"` - Summary *LogUsageRecord `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogUsageResponse) Reset() { *m = LogUsageResponse{} } -func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) } -func (*LogUsageResponse) ProtoMessage() {} -func (*LogUsageResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{13} -} -func (m *LogUsageResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogUsageResponse.Unmarshal(m, b) -} -func (m *LogUsageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogUsageResponse.Marshal(b, m, deterministic) -} -func (dst *LogUsageResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogUsageResponse.Merge(dst, src) -} -func (m *LogUsageResponse) XXX_Size() int { - return xxx_messageInfo_LogUsageResponse.Size(m) -} -func (m *LogUsageResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LogUsageResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_LogUsageResponse proto.InternalMessageInfo - -func (m *LogUsageResponse) GetUsage() []*LogUsageRecord { - if m != nil { - return m.Usage - } - return nil -} - -func (m *LogUsageResponse) GetSummary() *LogUsageRecord { - if m != nil { - return m.Summary - } - return nil -} - -func init() { - proto.RegisterType((*LogServiceError)(nil), "appengine.LogServiceError") - proto.RegisterType((*UserAppLogLine)(nil), "appengine.UserAppLogLine") - proto.RegisterType((*UserAppLogGroup)(nil), "appengine.UserAppLogGroup") - proto.RegisterType((*FlushRequest)(nil), "appengine.FlushRequest") - proto.RegisterType((*SetStatusRequest)(nil), "appengine.SetStatusRequest") - proto.RegisterType((*LogOffset)(nil), "appengine.LogOffset") - proto.RegisterType((*LogLine)(nil), "appengine.LogLine") - proto.RegisterType((*RequestLog)(nil), "appengine.RequestLog") - proto.RegisterType((*LogModuleVersion)(nil), "appengine.LogModuleVersion") - proto.RegisterType((*LogReadRequest)(nil), "appengine.LogReadRequest") - proto.RegisterType((*LogReadResponse)(nil), "appengine.LogReadResponse") - proto.RegisterType((*LogUsageRecord)(nil), "appengine.LogUsageRecord") - proto.RegisterType((*LogUsageRequest)(nil), "appengine.LogUsageRequest") - proto.RegisterType((*LogUsageResponse)(nil), "appengine.LogUsageResponse") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/log/log_service.proto", fileDescriptor_log_service_f054fd4b5012319d) -} - -var fileDescriptor_log_service_f054fd4b5012319d = []byte{ - // 1553 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x72, 0xdb, 0xc6, - 0x15, 0x2e, 0x48, 0x51, 0x24, 0x0f, 0x49, 0x91, 0x5a, 0xcb, 0xce, 0xda, 0xae, 0x6b, 0x1a, 0x4e, - 0x1c, 0xd6, 0x93, 0x48, 0x93, 0xa4, 0x57, 0xca, 0x95, 0xd3, 0x2a, 0x8e, 0x26, 0xb4, 0xd5, 0x40, - 0x72, 0x3a, 0xd3, 0x1b, 0x0c, 0x0a, 0x1c, 0x81, 0x18, 0x2f, 0xb1, 0xc8, 0xee, 0xc2, 0x91, 0x72, - 0xdb, 0xdb, 0x3e, 0x46, 0x1f, 0xa2, 0xaf, 0xd2, 0xb7, 0xe9, 0xec, 0xd9, 0x05, 0x44, 0x2a, 0x4d, - 0xc6, 0x33, 0xb9, 0xe0, 0x10, 0xfb, 0x9d, 0x83, 0xdd, 0xf3, 0xf3, 0x9d, 0x6f, 0x01, 0xc7, 0xb9, - 0x94, 0xb9, 0xc0, 0xc3, 0x5c, 0x8a, 0xa4, 0xcc, 0x0f, 0xa5, 0xca, 0x8f, 0x92, 0xaa, 0xc2, 0x32, - 0x2f, 0x4a, 0x3c, 0x2a, 0x4a, 0x83, 0xaa, 0x4c, 0xc4, 0x91, 0x90, 0xb9, 0xfd, 0xc5, 0x1a, 0xd5, - 0xbb, 0x22, 0xc5, 0xc3, 0x4a, 0x49, 0x23, 0xd9, 0xb0, 0xf5, 0x0c, 0x5f, 0xc3, 0x74, 0x29, 0xf3, - 0x73, 0x67, 0x3e, 0x51, 0x4a, 0xaa, 0xf0, 0x4b, 0x18, 0xd2, 0xc3, 0x9f, 0x65, 0x86, 0x6c, 0x17, - 0x3a, 0x67, 0xdf, 0xce, 0x7e, 0xc7, 0xee, 0xc0, 0xf4, 0xf4, 0xf5, 0xf7, 0x2f, 0x96, 0xa7, 0x7f, - 0x89, 0xa3, 0x93, 0xef, 0xde, 0x9c, 0x9c, 0x5f, 0xcc, 0x02, 0xb6, 0x0f, 0x93, 0xf3, 0x8b, 0xb3, - 0xe8, 0xc5, 0xcb, 0x93, 0xf8, 0x24, 0x8a, 0xce, 0xa2, 0x59, 0x27, 0xcc, 0x61, 0xef, 0x8d, 0x46, - 0xf5, 0xa2, 0xaa, 0x96, 0x32, 0x5f, 0x16, 0x25, 0xb2, 0x8f, 0x60, 0xcf, 0x14, 0x6b, 0xd4, 0x26, - 0x59, 0x57, 0x71, 0xad, 0x31, 0xe5, 0xc1, 0xbc, 0xb3, 0xe8, 0x46, 0x93, 0x16, 0x7d, 0xa3, 0x31, - 0x65, 0x07, 0xd0, 0x13, 0xf8, 0x0e, 0x05, 0xef, 0x90, 0xd5, 0x2d, 0x18, 0x87, 0xfe, 0x1a, 0xb5, - 0x4e, 0x72, 0xe4, 0xdd, 0x79, 0x67, 0x31, 0x8c, 0x9a, 0x65, 0xf8, 0x12, 0xa6, 0x37, 0x07, 0xbd, - 0x54, 0xb2, 0xae, 0xd8, 0x9f, 0x60, 0x60, 0x73, 0x15, 0x45, 0x89, 0xbc, 0x33, 0xef, 0x2e, 0x46, - 0x9f, 0xdf, 0x3f, 0x6c, 0x33, 0x3d, 0xdc, 0x0e, 0x2b, 0xea, 0x0b, 0xf7, 0x10, 0x86, 0x30, 0xfe, - 0x5a, 0xd4, 0x7a, 0x15, 0xe1, 0x0f, 0x35, 0x6a, 0xc3, 0x18, 0xec, 0x08, 0x99, 0x6b, 0x1e, 0xcc, - 0x83, 0xc5, 0x38, 0xa2, 0xe7, 0xf0, 0x39, 0xcc, 0xce, 0xd1, 0x9c, 0x9b, 0xc4, 0xd4, 0xba, 0xf1, - 0xbb, 0x07, 0xbb, 0x9a, 0x00, 0xca, 0x67, 0x18, 0xf9, 0x55, 0xf8, 0x1c, 0x86, 0x4b, 0x99, 0x9f, - 0x5d, 0x5e, 0x6a, 0x34, 0xec, 0x11, 0x80, 0x72, 0xfe, 0x71, 0x91, 0xf9, 0x2d, 0x87, 0x1e, 0x39, - 0xcd, 0xc2, 0x0b, 0xe8, 0x37, 0x65, 0x62, 0xb0, 0x63, 0x0b, 0xe2, 0x8b, 0x43, 0xcf, 0xdb, 0x35, - 0xe9, 0x35, 0x35, 0x79, 0x0c, 0x23, 0x9b, 0xe6, 0x76, 0x5d, 0x40, 0xc8, 0xfc, 0x95, 0x2f, 0xcd, - 0x3f, 0x01, 0xc0, 0x47, 0xb9, 0x94, 0x39, 0xbb, 0x0b, 0xbb, 0x49, 0x55, 0xb9, 0xf3, 0xad, 0x6b, - 0x2f, 0xa9, 0xaa, 0xd3, 0x8c, 0x7d, 0x08, 0xc3, 0xb5, 0xcc, 0x6a, 0x81, 0xd6, 0xf2, 0xd1, 0x3c, - 0x58, 0x0c, 0x8f, 0xfb, 0x19, 0x5e, 0x26, 0xb5, 0x30, 0xd1, 0xc0, 0x59, 0x4e, 0x33, 0x9b, 0xc0, - 0x3b, 0x54, 0xba, 0x90, 0xa5, 0x75, 0xeb, 0xd0, 0x06, 0x43, 0x8f, 0x38, 0xf3, 0x46, 0x7e, 0x36, - 0x94, 0xcd, 0xfc, 0xd8, 0x27, 0xb0, 0x2b, 0xa9, 0x10, 0xfc, 0xe9, 0x3c, 0x58, 0x8c, 0x3e, 0x3f, - 0xd8, 0xe8, 0x47, 0x5b, 0xa4, 0xc8, 0xfb, 0xb0, 0x3d, 0xe8, 0x14, 0x15, 0xdf, 0xa1, 0x33, 0x3a, - 0x45, 0xc5, 0x1e, 0xc0, 0xa0, 0x2c, 0xd2, 0xb7, 0x65, 0xb2, 0x46, 0xde, 0xb3, 0x01, 0x46, 0xed, - 0xda, 0x1e, 0xac, 0x4d, 0xa2, 0x4c, 0x4c, 0x45, 0xdb, 0xa5, 0xa2, 0x0d, 0x09, 0xb9, 0xb0, 0x95, - 0xbb, 0x0f, 0x03, 0x2c, 0x33, 0x67, 0xec, 0x93, 0xb1, 0x8f, 0x65, 0x46, 0x26, 0x0e, 0x7d, 0x91, - 0x18, 0x2c, 0xd3, 0x6b, 0x3e, 0x70, 0x16, 0xbf, 0x24, 0xb2, 0xa5, 0xd7, 0xa9, 0x40, 0xcd, 0x87, - 0xce, 0xe2, 0x97, 0xb6, 0xd7, 0x6b, 0x34, 0x2b, 0x99, 0x71, 0x70, 0xbd, 0x76, 0x2b, 0x1b, 0xa1, - 0x42, 0x2d, 0x6b, 0x95, 0x22, 0x1f, 0x91, 0xa5, 0x5d, 0xb3, 0x27, 0x30, 0x5e, 0x19, 0x53, 0xc5, - 0xbe, 0x58, 0x7c, 0x4c, 0xf6, 0x91, 0xc5, 0xbe, 0x77, 0xd0, 0x06, 0x85, 0x26, 0xd4, 0x60, 0xbf, - 0x62, 0x4f, 0x61, 0xa2, 0x50, 0x57, 0xb2, 0xd4, 0x18, 0xeb, 0xe2, 0x27, 0xe4, 0x7b, 0x14, 0xce, - 0xb8, 0x01, 0xcf, 0x8b, 0x9f, 0xd0, 0x9d, 0x7d, 0x89, 0x4a, 0xa1, 0xe2, 0x53, 0x57, 0x9d, 0x66, - 0x6d, 0xab, 0x53, 0x6b, 0x54, 0x71, 0x92, 0x63, 0x69, 0xf8, 0x8c, 0xac, 0x43, 0x8b, 0xbc, 0xb0, - 0x00, 0x0b, 0x61, 0x52, 0x2b, 0x11, 0xaf, 0x93, 0x2a, 0xc6, 0xd2, 0xa8, 0x6b, 0xbe, 0xef, 0x62, - 0xab, 0x95, 0x78, 0x95, 0x54, 0x27, 0x16, 0xb2, 0xdb, 0xa7, 0x72, 0xfd, 0x8f, 0xa2, 0xc4, 0x8c, - 0x33, 0x97, 0x5a, 0xb3, 0xb6, 0x0c, 0x4c, 0xaa, 0x22, 0x6e, 0x8a, 0x75, 0x67, 0x1e, 0x2c, 0xba, - 0x11, 0x24, 0x55, 0xf1, 0xca, 0xd7, 0x8b, 0xc1, 0xce, 0x4a, 0x6a, 0xc3, 0x0f, 0xe8, 0x64, 0x7a, - 0xb6, 0x58, 0x6a, 0xb1, 0xbb, 0xf3, 0x60, 0x11, 0x44, 0xf4, 0xcc, 0x9e, 0xc1, 0xd4, 0x24, 0xfa, - 0x6d, 0xfc, 0x43, 0x8d, 0x35, 0xc6, 0xd4, 0xe8, 0x7b, 0xf4, 0xca, 0xc4, 0xc2, 0xdf, 0x59, 0xf4, - 0xb5, 0xed, 0xf6, 0x43, 0x18, 0x92, 0x1f, 0x79, 0x7c, 0xe0, 0x92, 0xb5, 0x00, 0x19, 0x0f, 0xe1, - 0xce, 0x8f, 0x89, 0x8e, 0x85, 0x4c, 0xb2, 0xa2, 0xcc, 0x63, 0xcf, 0x3e, 0xce, 0xe7, 0xc1, 0x62, - 0x10, 0xed, 0xff, 0x98, 0xe8, 0xa5, 0xb3, 0x34, 0x83, 0xfb, 0x04, 0xc6, 0x15, 0x96, 0xe4, 0x4b, - 0xfc, 0xb8, 0x4f, 0xe1, 0x8f, 0x3c, 0x46, 0x1c, 0xf9, 0xd8, 0x36, 0xa0, 0x12, 0x45, 0x9a, 0xc4, - 0x45, 0x99, 0xe1, 0x15, 0x7f, 0x30, 0x0f, 0x16, 0xbd, 0xe3, 0xce, 0xa7, 0x9f, 0xd9, 0x26, 0x90, - 0xe1, 0xd4, 0xe2, 0x6c, 0x0e, 0x83, 0xcb, 0xa2, 0x2c, 0xf4, 0x0a, 0x33, 0xfe, 0xd0, 0x1e, 0x78, - 0xbc, 0x63, 0x54, 0x8d, 0x51, 0x8b, 0xda, 0xd0, 0x53, 0x21, 0x4b, 0x8c, 0xdf, 0xe2, 0x35, 0xff, - 0x3d, 0x09, 0xc0, 0x80, 0x80, 0x6f, 0xf1, 0x9a, 0x3d, 0x83, 0x1d, 0x52, 0xab, 0x47, 0xa4, 0x56, - 0x6c, 0x7b, 0x3a, 0x48, 0xa6, 0xc8, 0xce, 0xfe, 0x08, 0x33, 0xfb, 0xaf, 0xe3, 0xa2, 0x4c, 0xe5, - 0xba, 0x12, 0x68, 0x90, 0x7f, 0x48, 0xf9, 0x4d, 0x09, 0x3f, 0x6d, 0x61, 0xf6, 0x09, 0x30, 0x3b, - 0xed, 0x6e, 0x9b, 0x58, 0xa1, 0xc0, 0x44, 0x23, 0x7f, 0x46, 0x07, 0xcf, 0x92, 0xaa, 0x3a, 0x21, - 0x43, 0xe4, 0x70, 0xdb, 0x49, 0xbc, 0x2a, 0x4c, 0xac, 0x30, 0xd1, 0xb2, 0xe4, 0x7f, 0xb0, 0x69, - 0x46, 0x60, 0xa1, 0x88, 0x10, 0xf6, 0x05, 0xdc, 0xb3, 0xc5, 0x35, 0x2b, 0x25, 0x8d, 0x11, 0x98, - 0xc5, 0x97, 0x52, 0xb9, 0xb2, 0x3d, 0xa6, 0xf3, 0x6d, 0xe9, 0x2f, 0x1a, 0xe3, 0xd7, 0x52, 0x51, - 0xf9, 0xbe, 0x84, 0x07, 0x3f, 0x7f, 0xc9, 0xf7, 0x45, 0xf3, 0x39, 0xbd, 0xf8, 0xc1, 0xad, 0x17, - 0x7d, 0x77, 0x34, 0xdd, 0x17, 0xed, 0x8b, 0x74, 0xd2, 0x13, 0x6a, 0xd0, 0xa4, 0x45, 0xe9, 0x8c, - 0xc7, 0x30, 0xb2, 0x97, 0x1a, 0x2a, 0x47, 0x8a, 0x90, 0x12, 0x04, 0x07, 0x59, 0x5a, 0x84, 0x7f, - 0x83, 0xd9, 0x52, 0xe6, 0xaf, 0x48, 0xc8, 0x9a, 0x81, 0xdb, 0xd2, 0xbc, 0xe0, 0x7d, 0x35, 0x2f, - 0xd8, 0xd2, 0xbc, 0xf0, 0xbf, 0x3d, 0xd8, 0x5b, 0xca, 0x3c, 0xc2, 0x24, 0x6b, 0x28, 0xf5, 0x0b, - 0x12, 0x7b, 0x7b, 0xa3, 0xee, 0xb6, 0x78, 0x7e, 0x05, 0x7b, 0x3e, 0x9a, 0x46, 0x23, 0xee, 0x10, - 0x0f, 0x1e, 0x6e, 0xf3, 0x60, 0x2b, 0x85, 0x68, 0xb2, 0xde, 0xca, 0x68, 0x5b, 0x07, 0xbb, 0x54, - 0xa9, 0x5f, 0xd0, 0xc1, 0x1d, 0x32, 0xb6, 0x3a, 0x78, 0xa3, 0xcd, 0xbd, 0xf7, 0xd0, 0xe6, 0x6d, - 0xa1, 0xdf, 0x9d, 0x77, 0xb7, 0x85, 0xfe, 0x39, 0xec, 0xaf, 0x8b, 0xb2, 0x58, 0xd7, 0xeb, 0x98, - 0xae, 0x60, 0xba, 0xb5, 0xfa, 0xc4, 0xa6, 0xa9, 0x37, 0x58, 0x46, 0xd3, 0xfd, 0xf5, 0x29, 0xb0, - 0xa2, 0x4c, 0x45, 0x9d, 0xe1, 0x26, 0x9d, 0x07, 0x6e, 0x5c, 0xbd, 0x65, 0x83, 0xd0, 0x07, 0xd0, - 0x4b, 0x65, 0x5d, 0x1a, 0x3e, 0xa4, 0xf8, 0xdd, 0xc2, 0xd2, 0xbc, 0x91, 0x23, 0x3a, 0x51, 0x61, - 0x8e, 0x57, 0x7c, 0x8f, 0x7a, 0x35, 0x6b, 0x2c, 0xd4, 0xa5, 0x1c, 0xaf, 0x6c, 0xf4, 0x56, 0x83, - 0xbc, 0x97, 0x53, 0xcb, 0xa1, 0x45, 0x9c, 0xf9, 0xe9, 0xed, 0x71, 0x9f, 0x51, 0xe4, 0xdb, 0xa3, - 0xbe, 0x80, 0x59, 0x13, 0xb6, 0xed, 0x35, 0x7d, 0x23, 0x00, 0x05, 0xbd, 0xe7, 0x71, 0xf7, 0x75, - 0xa1, 0xd9, 0x11, 0x1c, 0x34, 0x1e, 0x71, 0x85, 0x2d, 0xf3, 0xf9, 0x3e, 0xed, 0xba, 0x9f, 0x38, - 0xb7, 0xbf, 0xa2, 0xda, 0x50, 0xa4, 0x66, 0x6b, 0x92, 0xcd, 0x11, 0x6d, 0x3b, 0xf2, 0xd8, 0x37, - 0x56, 0x29, 0x1f, 0xc3, 0xa8, 0x3d, 0x5d, 0x08, 0x3e, 0x26, 0x0f, 0x68, 0x0e, 0x16, 0xc2, 0x8e, - 0x4d, 0x9a, 0xa4, 0x2b, 0x8c, 0x0b, 0x83, 0x2a, 0x31, 0x52, 0xf1, 0x09, 0xf9, 0x4c, 0x08, 0x3d, - 0xf5, 0xa0, 0xad, 0x44, 0x59, 0xaf, 0x63, 0xbd, 0x4a, 0x54, 0xa6, 0x39, 0xa3, 0x88, 0x86, 0x65, - 0xbd, 0x3e, 0x27, 0x20, 0xfc, 0x57, 0x40, 0xdf, 0x83, 0x8e, 0xdb, 0xee, 0xb2, 0x61, 0x1f, 0x43, - 0x57, 0xc8, 0x9c, 0x07, 0xc4, 0xcd, 0xbb, 0x1b, 0x2c, 0xb9, 0xf9, 0xc6, 0x88, 0xac, 0xc7, 0x06, - 0xa3, 0x3a, 0xef, 0xc1, 0xa8, 0x10, 0x26, 0x22, 0xd1, 0x26, 0x6e, 0xf9, 0xe9, 0xc8, 0x3b, 0xb2, - 0xe0, 0x89, 0xe3, 0x68, 0xf8, 0x9f, 0x80, 0x46, 0xed, 0x8d, 0xfd, 0xac, 0x89, 0x30, 0x95, 0xea, - 0xf6, 0x4c, 0x05, 0xb7, 0x86, 0xf3, 0xd6, 0x3c, 0x74, 0x5c, 0x7e, 0xff, 0x7f, 0x1e, 0xba, 0x64, - 0x6c, 0xe7, 0xa1, 0xe5, 0xd9, 0xce, 0x26, 0xcf, 0x1e, 0x01, 0x18, 0x69, 0x12, 0xe1, 0xee, 0xe1, - 0x9e, 0x9b, 0x2f, 0x42, 0xe8, 0x12, 0xe6, 0xd0, 0x57, 0x14, 0x97, 0xe6, 0xbb, 0x6e, 0x3b, 0xbf, - 0x0c, 0xff, 0xdd, 0xa1, 0x4a, 0xfa, 0xd0, 0x7f, 0x8b, 0x4c, 0xfc, 0x7c, 0xc4, 0x7b, 0xbf, 0x36, - 0xe2, 0xbd, 0xcd, 0x11, 0x9f, 0xd9, 0xcf, 0x11, 0x51, 0x1b, 0xbb, 0xf7, 0x4a, 0xd6, 0x4a, 0x53, - 0x0a, 0x93, 0xe3, 0xe0, 0xb3, 0x68, 0x7a, 0x63, 0xfa, 0xc6, 0x5a, 0xec, 0x25, 0xe3, 0x07, 0xa7, - 0xd1, 0x23, 0x97, 0xd4, 0x20, 0x9a, 0x7a, 0xdc, 0x8b, 0x0e, 0x7d, 0xa0, 0xd4, 0x36, 0xb1, 0x56, - 0xb8, 0xdc, 0xa8, 0x8f, 0x09, 0x6c, 0xa4, 0xe9, 0x29, 0x4c, 0x9a, 0x7d, 0x62, 0x59, 0x8a, 0x6b, - 0x3f, 0xe2, 0xe3, 0x06, 0x3c, 0x2b, 0xc5, 0x75, 0x78, 0x45, 0x2a, 0xed, 0xab, 0xe4, 0x09, 0x77, - 0x04, 0x3d, 0xda, 0xc8, 0x53, 0xee, 0xfe, 0x36, 0x8d, 0x36, 0xc8, 0x10, 0x39, 0x3f, 0xf6, 0x05, - 0xf4, 0x75, 0xbd, 0x5e, 0x27, 0xea, 0xda, 0x33, 0xef, 0x57, 0x5e, 0x69, 0x3c, 0xbf, 0xea, 0xfd, - 0xdd, 0x92, 0xf6, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x70, 0xd9, 0xa0, 0xf8, 0x48, 0x0d, 0x00, - 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.proto b/vendor/google.golang.org/appengine/internal/log/log_service.proto deleted file mode 100644 index 8981dc475..000000000 --- a/vendor/google.golang.org/appengine/internal/log/log_service.proto +++ /dev/null @@ -1,150 +0,0 @@ -syntax = "proto2"; -option go_package = "log"; - -package appengine; - -message LogServiceError { - enum ErrorCode { - OK = 0; - INVALID_REQUEST = 1; - STORAGE_ERROR = 2; - } -} - -message UserAppLogLine { - required int64 timestamp_usec = 1; - required int64 level = 2; - required string message = 3; -} - -message UserAppLogGroup { - repeated UserAppLogLine log_line = 2; -} - -message FlushRequest { - optional bytes logs = 1; -} - -message SetStatusRequest { - required string status = 1; -} - - -message LogOffset { - optional bytes request_id = 1; -} - -message LogLine { - required int64 time = 1; - required int32 level = 2; - required string log_message = 3; -} - -message RequestLog { - required string app_id = 1; - optional string module_id = 37 [default="default"]; - required string version_id = 2; - required bytes request_id = 3; - optional LogOffset offset = 35; - required string ip = 4; - optional string nickname = 5; - required int64 start_time = 6; - required int64 end_time = 7; - required int64 latency = 8; - required int64 mcycles = 9; - required string method = 10; - required string resource = 11; - required string http_version = 12; - required int32 status = 13; - required int64 response_size = 14; - optional string referrer = 15; - optional string user_agent = 16; - required string url_map_entry = 17; - required string combined = 18; - optional int64 api_mcycles = 19; - optional string host = 20; - optional double cost = 21; - - optional string task_queue_name = 22; - optional string task_name = 23; - - optional bool was_loading_request = 24; - optional int64 pending_time = 25; - optional int32 replica_index = 26 [default = -1]; - optional bool finished = 27 [default = true]; - optional bytes clone_key = 28; - - repeated LogLine line = 29; - - optional bool lines_incomplete = 36; - optional bytes app_engine_release = 38; - - optional int32 exit_reason = 30; - optional bool was_throttled_for_time = 31; - optional bool was_throttled_for_requests = 32; - optional int64 throttled_time = 33; - - optional bytes server_name = 34; -} - -message LogModuleVersion { - optional string module_id = 1 [default="default"]; - optional string version_id = 2; -} - -message LogReadRequest { - required string app_id = 1; - repeated string version_id = 2; - repeated LogModuleVersion module_version = 19; - - optional int64 start_time = 3; - optional int64 end_time = 4; - optional LogOffset offset = 5; - repeated bytes request_id = 6; - - optional int32 minimum_log_level = 7; - optional bool include_incomplete = 8; - optional int64 count = 9; - - optional string combined_log_regex = 14; - optional string host_regex = 15; - optional int32 replica_index = 16; - - optional bool include_app_logs = 10; - optional int32 app_logs_per_request = 17; - optional bool include_host = 11; - optional bool include_all = 12; - optional bool cache_iterator = 13; - optional int32 num_shards = 18; -} - -message LogReadResponse { - repeated RequestLog log = 1; - optional LogOffset offset = 2; - optional int64 last_end_time = 3; -} - -message LogUsageRecord { - optional string version_id = 1; - optional int32 start_time = 2; - optional int32 end_time = 3; - optional int64 count = 4; - optional int64 total_size = 5; - optional int32 records = 6; -} - -message LogUsageRequest { - required string app_id = 1; - repeated string version_id = 2; - optional int32 start_time = 3; - optional int32 end_time = 4; - optional uint32 resolution_hours = 5 [default = 1]; - optional bool combine_versions = 6; - optional int32 usage_version = 7; - optional bool versions_only = 8; -} - -message LogUsageResponse { - repeated LogUsageRecord usage = 1; - optional LogUsageRecord summary = 2; -} diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go deleted file mode 100644 index 1e765312f..000000000 --- a/vendor/google.golang.org/appengine/internal/main.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build appengine - -package internal - -import ( - "appengine_internal" -) - -func Main() { - MainPath = "" - appengine_internal.Main() -} diff --git a/vendor/google.golang.org/appengine/internal/main_common.go b/vendor/google.golang.org/appengine/internal/main_common.go deleted file mode 100644 index 357dce4dd..000000000 --- a/vendor/google.golang.org/appengine/internal/main_common.go +++ /dev/null @@ -1,7 +0,0 @@ -package internal - -// MainPath stores the file path of the main package. On App Engine Standard -// using Go version 1.9 and below, this will be unset. On App Engine Flex and -// App Engine Standard second-gen (Go 1.11 and above), this will be the -// filepath to package main. -var MainPath string diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go deleted file mode 100644 index ddb79a333..000000000 --- a/vendor/google.golang.org/appengine/internal/main_vm.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package internal - -import ( - "io" - "log" - "net/http" - "net/url" - "os" - "path/filepath" - "runtime" -) - -func Main() { - MainPath = filepath.Dir(findMainPath()) - installHealthChecker(http.DefaultServeMux) - - port := "8080" - if s := os.Getenv("PORT"); s != "" { - port = s - } - - host := "" - if IsDevAppServer() { - host = "127.0.0.1" - } - if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil { - log.Fatalf("http.ListenAndServe: %v", err) - } -} - -// Find the path to package main by looking at the root Caller. -func findMainPath() string { - pc := make([]uintptr, 100) - n := runtime.Callers(2, pc) - frames := runtime.CallersFrames(pc[:n]) - for { - frame, more := frames.Next() - // Tests won't have package main, instead they have testing.tRunner - if frame.Function == "main.main" || frame.Function == "testing.tRunner" { - return frame.File - } - if !more { - break - } - } - return "" -} - -func installHealthChecker(mux *http.ServeMux) { - // If no health check handler has been installed by this point, add a trivial one. - const healthPath = "/_ah/health" - hreq := &http.Request{ - Method: "GET", - URL: &url.URL{ - Path: healthPath, - }, - } - if _, pat := mux.Handler(hreq); pat != healthPath { - mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) { - io.WriteString(w, "ok") - }) - } -} diff --git a/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/google.golang.org/appengine/internal/metadata.go deleted file mode 100644 index c4ba63bb4..000000000 --- a/vendor/google.golang.org/appengine/internal/metadata.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -// This file has code for accessing metadata. -// -// References: -// https://cloud.google.com/compute/docs/metadata - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/url" -) - -const ( - metadataHost = "metadata" - metadataPath = "/computeMetadata/v1/" -) - -var ( - metadataRequestHeaders = http.Header{ - "Metadata-Flavor": []string{"Google"}, - } -) - -// TODO(dsymonds): Do we need to support default values, like Python? -func mustGetMetadata(key string) []byte { - b, err := getMetadata(key) - if err != nil { - panic(fmt.Sprintf("Metadata fetch failed for '%s': %v", key, err)) - } - return b -} - -func getMetadata(key string) ([]byte, error) { - // TODO(dsymonds): May need to use url.Parse to support keys with query args. - req := &http.Request{ - Method: "GET", - URL: &url.URL{ - Scheme: "http", - Host: metadataHost, - Path: metadataPath + key, - }, - Header: metadataRequestHeaders, - Host: metadataHost, - } - resp, err := http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode != 200 { - return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode) - } - return ioutil.ReadAll(resp.Body) -} diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go deleted file mode 100644 index ddfc0c04a..000000000 --- a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go +++ /dev/null @@ -1,786 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/modules/modules_service.proto - -package modules - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type ModulesServiceError_ErrorCode int32 - -const ( - ModulesServiceError_OK ModulesServiceError_ErrorCode = 0 - ModulesServiceError_INVALID_MODULE ModulesServiceError_ErrorCode = 1 - ModulesServiceError_INVALID_VERSION ModulesServiceError_ErrorCode = 2 - ModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3 - ModulesServiceError_TRANSIENT_ERROR ModulesServiceError_ErrorCode = 4 - ModulesServiceError_UNEXPECTED_STATE ModulesServiceError_ErrorCode = 5 -) - -var ModulesServiceError_ErrorCode_name = map[int32]string{ - 0: "OK", - 1: "INVALID_MODULE", - 2: "INVALID_VERSION", - 3: "INVALID_INSTANCES", - 4: "TRANSIENT_ERROR", - 5: "UNEXPECTED_STATE", -} -var ModulesServiceError_ErrorCode_value = map[string]int32{ - "OK": 0, - "INVALID_MODULE": 1, - "INVALID_VERSION": 2, - "INVALID_INSTANCES": 3, - "TRANSIENT_ERROR": 4, - "UNEXPECTED_STATE": 5, -} - -func (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode { - p := new(ModulesServiceError_ErrorCode) - *p = x - return p -} -func (x ModulesServiceError_ErrorCode) String() string { - return proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x)) -} -func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, "ModulesServiceError_ErrorCode") - if err != nil { - return err - } - *x = ModulesServiceError_ErrorCode(value) - return nil -} -func (ModulesServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{0, 0} -} - -type ModulesServiceError struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ModulesServiceError) Reset() { *m = ModulesServiceError{} } -func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) } -func (*ModulesServiceError) ProtoMessage() {} -func (*ModulesServiceError) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{0} -} -func (m *ModulesServiceError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ModulesServiceError.Unmarshal(m, b) -} -func (m *ModulesServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ModulesServiceError.Marshal(b, m, deterministic) -} -func (dst *ModulesServiceError) XXX_Merge(src proto.Message) { - xxx_messageInfo_ModulesServiceError.Merge(dst, src) -} -func (m *ModulesServiceError) XXX_Size() int { - return xxx_messageInfo_ModulesServiceError.Size(m) -} -func (m *ModulesServiceError) XXX_DiscardUnknown() { - xxx_messageInfo_ModulesServiceError.DiscardUnknown(m) -} - -var xxx_messageInfo_ModulesServiceError proto.InternalMessageInfo - -type GetModulesRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetModulesRequest) Reset() { *m = GetModulesRequest{} } -func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) } -func (*GetModulesRequest) ProtoMessage() {} -func (*GetModulesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{1} -} -func (m *GetModulesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetModulesRequest.Unmarshal(m, b) -} -func (m *GetModulesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetModulesRequest.Marshal(b, m, deterministic) -} -func (dst *GetModulesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetModulesRequest.Merge(dst, src) -} -func (m *GetModulesRequest) XXX_Size() int { - return xxx_messageInfo_GetModulesRequest.Size(m) -} -func (m *GetModulesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetModulesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetModulesRequest proto.InternalMessageInfo - -type GetModulesResponse struct { - Module []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetModulesResponse) Reset() { *m = GetModulesResponse{} } -func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) } -func (*GetModulesResponse) ProtoMessage() {} -func (*GetModulesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{2} -} -func (m *GetModulesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetModulesResponse.Unmarshal(m, b) -} -func (m *GetModulesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetModulesResponse.Marshal(b, m, deterministic) -} -func (dst *GetModulesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetModulesResponse.Merge(dst, src) -} -func (m *GetModulesResponse) XXX_Size() int { - return xxx_messageInfo_GetModulesResponse.Size(m) -} -func (m *GetModulesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetModulesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetModulesResponse proto.InternalMessageInfo - -func (m *GetModulesResponse) GetModule() []string { - if m != nil { - return m.Module - } - return nil -} - -type GetVersionsRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetVersionsRequest) Reset() { *m = GetVersionsRequest{} } -func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) } -func (*GetVersionsRequest) ProtoMessage() {} -func (*GetVersionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{3} -} -func (m *GetVersionsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetVersionsRequest.Unmarshal(m, b) -} -func (m *GetVersionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetVersionsRequest.Marshal(b, m, deterministic) -} -func (dst *GetVersionsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetVersionsRequest.Merge(dst, src) -} -func (m *GetVersionsRequest) XXX_Size() int { - return xxx_messageInfo_GetVersionsRequest.Size(m) -} -func (m *GetVersionsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetVersionsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetVersionsRequest proto.InternalMessageInfo - -func (m *GetVersionsRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -type GetVersionsResponse struct { - Version []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetVersionsResponse) Reset() { *m = GetVersionsResponse{} } -func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) } -func (*GetVersionsResponse) ProtoMessage() {} -func (*GetVersionsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{4} -} -func (m *GetVersionsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetVersionsResponse.Unmarshal(m, b) -} -func (m *GetVersionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetVersionsResponse.Marshal(b, m, deterministic) -} -func (dst *GetVersionsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetVersionsResponse.Merge(dst, src) -} -func (m *GetVersionsResponse) XXX_Size() int { - return xxx_messageInfo_GetVersionsResponse.Size(m) -} -func (m *GetVersionsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetVersionsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetVersionsResponse proto.InternalMessageInfo - -func (m *GetVersionsResponse) GetVersion() []string { - if m != nil { - return m.Version - } - return nil -} - -type GetDefaultVersionRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetDefaultVersionRequest) Reset() { *m = GetDefaultVersionRequest{} } -func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) } -func (*GetDefaultVersionRequest) ProtoMessage() {} -func (*GetDefaultVersionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{5} -} -func (m *GetDefaultVersionRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetDefaultVersionRequest.Unmarshal(m, b) -} -func (m *GetDefaultVersionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetDefaultVersionRequest.Marshal(b, m, deterministic) -} -func (dst *GetDefaultVersionRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetDefaultVersionRequest.Merge(dst, src) -} -func (m *GetDefaultVersionRequest) XXX_Size() int { - return xxx_messageInfo_GetDefaultVersionRequest.Size(m) -} -func (m *GetDefaultVersionRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetDefaultVersionRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetDefaultVersionRequest proto.InternalMessageInfo - -func (m *GetDefaultVersionRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -type GetDefaultVersionResponse struct { - Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetDefaultVersionResponse) Reset() { *m = GetDefaultVersionResponse{} } -func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) } -func (*GetDefaultVersionResponse) ProtoMessage() {} -func (*GetDefaultVersionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{6} -} -func (m *GetDefaultVersionResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetDefaultVersionResponse.Unmarshal(m, b) -} -func (m *GetDefaultVersionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetDefaultVersionResponse.Marshal(b, m, deterministic) -} -func (dst *GetDefaultVersionResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetDefaultVersionResponse.Merge(dst, src) -} -func (m *GetDefaultVersionResponse) XXX_Size() int { - return xxx_messageInfo_GetDefaultVersionResponse.Size(m) -} -func (m *GetDefaultVersionResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetDefaultVersionResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetDefaultVersionResponse proto.InternalMessageInfo - -func (m *GetDefaultVersionResponse) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type GetNumInstancesRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetNumInstancesRequest) Reset() { *m = GetNumInstancesRequest{} } -func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) } -func (*GetNumInstancesRequest) ProtoMessage() {} -func (*GetNumInstancesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{7} -} -func (m *GetNumInstancesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetNumInstancesRequest.Unmarshal(m, b) -} -func (m *GetNumInstancesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetNumInstancesRequest.Marshal(b, m, deterministic) -} -func (dst *GetNumInstancesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetNumInstancesRequest.Merge(dst, src) -} -func (m *GetNumInstancesRequest) XXX_Size() int { - return xxx_messageInfo_GetNumInstancesRequest.Size(m) -} -func (m *GetNumInstancesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetNumInstancesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetNumInstancesRequest proto.InternalMessageInfo - -func (m *GetNumInstancesRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *GetNumInstancesRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type GetNumInstancesResponse struct { - Instances *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetNumInstancesResponse) Reset() { *m = GetNumInstancesResponse{} } -func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) } -func (*GetNumInstancesResponse) ProtoMessage() {} -func (*GetNumInstancesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{8} -} -func (m *GetNumInstancesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetNumInstancesResponse.Unmarshal(m, b) -} -func (m *GetNumInstancesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetNumInstancesResponse.Marshal(b, m, deterministic) -} -func (dst *GetNumInstancesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetNumInstancesResponse.Merge(dst, src) -} -func (m *GetNumInstancesResponse) XXX_Size() int { - return xxx_messageInfo_GetNumInstancesResponse.Size(m) -} -func (m *GetNumInstancesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetNumInstancesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetNumInstancesResponse proto.InternalMessageInfo - -func (m *GetNumInstancesResponse) GetInstances() int64 { - if m != nil && m.Instances != nil { - return *m.Instances - } - return 0 -} - -type SetNumInstancesRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - Instances *int64 `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SetNumInstancesRequest) Reset() { *m = SetNumInstancesRequest{} } -func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) } -func (*SetNumInstancesRequest) ProtoMessage() {} -func (*SetNumInstancesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{9} -} -func (m *SetNumInstancesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetNumInstancesRequest.Unmarshal(m, b) -} -func (m *SetNumInstancesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetNumInstancesRequest.Marshal(b, m, deterministic) -} -func (dst *SetNumInstancesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetNumInstancesRequest.Merge(dst, src) -} -func (m *SetNumInstancesRequest) XXX_Size() int { - return xxx_messageInfo_SetNumInstancesRequest.Size(m) -} -func (m *SetNumInstancesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetNumInstancesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SetNumInstancesRequest proto.InternalMessageInfo - -func (m *SetNumInstancesRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *SetNumInstancesRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -func (m *SetNumInstancesRequest) GetInstances() int64 { - if m != nil && m.Instances != nil { - return *m.Instances - } - return 0 -} - -type SetNumInstancesResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SetNumInstancesResponse) Reset() { *m = SetNumInstancesResponse{} } -func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) } -func (*SetNumInstancesResponse) ProtoMessage() {} -func (*SetNumInstancesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{10} -} -func (m *SetNumInstancesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetNumInstancesResponse.Unmarshal(m, b) -} -func (m *SetNumInstancesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetNumInstancesResponse.Marshal(b, m, deterministic) -} -func (dst *SetNumInstancesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetNumInstancesResponse.Merge(dst, src) -} -func (m *SetNumInstancesResponse) XXX_Size() int { - return xxx_messageInfo_SetNumInstancesResponse.Size(m) -} -func (m *SetNumInstancesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_SetNumInstancesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_SetNumInstancesResponse proto.InternalMessageInfo - -type StartModuleRequest struct { - Module *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StartModuleRequest) Reset() { *m = StartModuleRequest{} } -func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) } -func (*StartModuleRequest) ProtoMessage() {} -func (*StartModuleRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{11} -} -func (m *StartModuleRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StartModuleRequest.Unmarshal(m, b) -} -func (m *StartModuleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StartModuleRequest.Marshal(b, m, deterministic) -} -func (dst *StartModuleRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_StartModuleRequest.Merge(dst, src) -} -func (m *StartModuleRequest) XXX_Size() int { - return xxx_messageInfo_StartModuleRequest.Size(m) -} -func (m *StartModuleRequest) XXX_DiscardUnknown() { - xxx_messageInfo_StartModuleRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_StartModuleRequest proto.InternalMessageInfo - -func (m *StartModuleRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *StartModuleRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type StartModuleResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StartModuleResponse) Reset() { *m = StartModuleResponse{} } -func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) } -func (*StartModuleResponse) ProtoMessage() {} -func (*StartModuleResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{12} -} -func (m *StartModuleResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StartModuleResponse.Unmarshal(m, b) -} -func (m *StartModuleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StartModuleResponse.Marshal(b, m, deterministic) -} -func (dst *StartModuleResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_StartModuleResponse.Merge(dst, src) -} -func (m *StartModuleResponse) XXX_Size() int { - return xxx_messageInfo_StartModuleResponse.Size(m) -} -func (m *StartModuleResponse) XXX_DiscardUnknown() { - xxx_messageInfo_StartModuleResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_StartModuleResponse proto.InternalMessageInfo - -type StopModuleRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StopModuleRequest) Reset() { *m = StopModuleRequest{} } -func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) } -func (*StopModuleRequest) ProtoMessage() {} -func (*StopModuleRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{13} -} -func (m *StopModuleRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StopModuleRequest.Unmarshal(m, b) -} -func (m *StopModuleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StopModuleRequest.Marshal(b, m, deterministic) -} -func (dst *StopModuleRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_StopModuleRequest.Merge(dst, src) -} -func (m *StopModuleRequest) XXX_Size() int { - return xxx_messageInfo_StopModuleRequest.Size(m) -} -func (m *StopModuleRequest) XXX_DiscardUnknown() { - xxx_messageInfo_StopModuleRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_StopModuleRequest proto.InternalMessageInfo - -func (m *StopModuleRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *StopModuleRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type StopModuleResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StopModuleResponse) Reset() { *m = StopModuleResponse{} } -func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) } -func (*StopModuleResponse) ProtoMessage() {} -func (*StopModuleResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{14} -} -func (m *StopModuleResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StopModuleResponse.Unmarshal(m, b) -} -func (m *StopModuleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StopModuleResponse.Marshal(b, m, deterministic) -} -func (dst *StopModuleResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_StopModuleResponse.Merge(dst, src) -} -func (m *StopModuleResponse) XXX_Size() int { - return xxx_messageInfo_StopModuleResponse.Size(m) -} -func (m *StopModuleResponse) XXX_DiscardUnknown() { - xxx_messageInfo_StopModuleResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_StopModuleResponse proto.InternalMessageInfo - -type GetHostnameRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - Instance *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetHostnameRequest) Reset() { *m = GetHostnameRequest{} } -func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) } -func (*GetHostnameRequest) ProtoMessage() {} -func (*GetHostnameRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{15} -} -func (m *GetHostnameRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetHostnameRequest.Unmarshal(m, b) -} -func (m *GetHostnameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetHostnameRequest.Marshal(b, m, deterministic) -} -func (dst *GetHostnameRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetHostnameRequest.Merge(dst, src) -} -func (m *GetHostnameRequest) XXX_Size() int { - return xxx_messageInfo_GetHostnameRequest.Size(m) -} -func (m *GetHostnameRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetHostnameRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetHostnameRequest proto.InternalMessageInfo - -func (m *GetHostnameRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *GetHostnameRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -func (m *GetHostnameRequest) GetInstance() string { - if m != nil && m.Instance != nil { - return *m.Instance - } - return "" -} - -type GetHostnameResponse struct { - Hostname *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetHostnameResponse) Reset() { *m = GetHostnameResponse{} } -func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) } -func (*GetHostnameResponse) ProtoMessage() {} -func (*GetHostnameResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{16} -} -func (m *GetHostnameResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetHostnameResponse.Unmarshal(m, b) -} -func (m *GetHostnameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetHostnameResponse.Marshal(b, m, deterministic) -} -func (dst *GetHostnameResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetHostnameResponse.Merge(dst, src) -} -func (m *GetHostnameResponse) XXX_Size() int { - return xxx_messageInfo_GetHostnameResponse.Size(m) -} -func (m *GetHostnameResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetHostnameResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetHostnameResponse proto.InternalMessageInfo - -func (m *GetHostnameResponse) GetHostname() string { - if m != nil && m.Hostname != nil { - return *m.Hostname - } - return "" -} - -func init() { - proto.RegisterType((*ModulesServiceError)(nil), "appengine.ModulesServiceError") - proto.RegisterType((*GetModulesRequest)(nil), "appengine.GetModulesRequest") - proto.RegisterType((*GetModulesResponse)(nil), "appengine.GetModulesResponse") - proto.RegisterType((*GetVersionsRequest)(nil), "appengine.GetVersionsRequest") - proto.RegisterType((*GetVersionsResponse)(nil), "appengine.GetVersionsResponse") - proto.RegisterType((*GetDefaultVersionRequest)(nil), "appengine.GetDefaultVersionRequest") - proto.RegisterType((*GetDefaultVersionResponse)(nil), "appengine.GetDefaultVersionResponse") - proto.RegisterType((*GetNumInstancesRequest)(nil), "appengine.GetNumInstancesRequest") - proto.RegisterType((*GetNumInstancesResponse)(nil), "appengine.GetNumInstancesResponse") - proto.RegisterType((*SetNumInstancesRequest)(nil), "appengine.SetNumInstancesRequest") - proto.RegisterType((*SetNumInstancesResponse)(nil), "appengine.SetNumInstancesResponse") - proto.RegisterType((*StartModuleRequest)(nil), "appengine.StartModuleRequest") - proto.RegisterType((*StartModuleResponse)(nil), "appengine.StartModuleResponse") - proto.RegisterType((*StopModuleRequest)(nil), "appengine.StopModuleRequest") - proto.RegisterType((*StopModuleResponse)(nil), "appengine.StopModuleResponse") - proto.RegisterType((*GetHostnameRequest)(nil), "appengine.GetHostnameRequest") - proto.RegisterType((*GetHostnameResponse)(nil), "appengine.GetHostnameResponse") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/modules/modules_service.proto", fileDescriptor_modules_service_9cd3bffe4e91c59a) -} - -var fileDescriptor_modules_service_9cd3bffe4e91c59a = []byte{ - // 457 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xc1, 0x6f, 0xd3, 0x30, - 0x14, 0xc6, 0x69, 0x02, 0xdb, 0xf2, 0x0e, 0x90, 0x3a, 0x5b, 0xd7, 0x4d, 0x1c, 0x50, 0x4e, 0x1c, - 0x50, 0x2b, 0x90, 0x10, 0xe7, 0xae, 0x35, 0x25, 0xb0, 0xa5, 0x28, 0xce, 0x2a, 0xc4, 0xa5, 0x0a, - 0xdb, 0x23, 0x8b, 0x94, 0xda, 0xc1, 0x76, 0x77, 0xe4, 0xbf, 0xe0, 0xff, 0x45, 0x4b, 0xed, 0xb6, - 0x81, 0x4e, 0x45, 0x68, 0xa7, 0xe4, 0x7d, 0xfe, 0xfc, 0x7b, 0x9f, 0x5f, 0xac, 0xc0, 0x59, 0x2e, - 0x44, 0x5e, 0x62, 0x2f, 0x17, 0x65, 0xc6, 0xf3, 0x9e, 0x90, 0x79, 0x3f, 0xab, 0x2a, 0xe4, 0x79, - 0xc1, 0xb1, 0x5f, 0x70, 0x8d, 0x92, 0x67, 0x65, 0x7f, 0x2e, 0xae, 0x17, 0x25, 0x2a, 0xfb, 0x9c, - 0x29, 0x94, 0xb7, 0xc5, 0x15, 0xf6, 0x2a, 0x29, 0xb4, 0x20, 0xde, 0x6a, 0x47, 0xf8, 0xab, 0x05, - 0xc1, 0xc5, 0xd2, 0xc4, 0x96, 0x1e, 0x2a, 0xa5, 0x90, 0xe1, 0x4f, 0xf0, 0xea, 0x97, 0xa1, 0xb8, - 0x46, 0xb2, 0x07, 0xce, 0xe4, 0x93, 0xff, 0x88, 0x10, 0x78, 0x1a, 0xc5, 0xd3, 0xc1, 0x79, 0x34, - 0x9a, 0x5d, 0x4c, 0x46, 0x97, 0xe7, 0xd4, 0x6f, 0x91, 0x00, 0x9e, 0x59, 0x6d, 0x4a, 0x13, 0x16, - 0x4d, 0x62, 0xdf, 0x21, 0x47, 0xd0, 0xb6, 0x62, 0x14, 0xb3, 0x74, 0x10, 0x0f, 0x29, 0xf3, 0xdd, - 0x3b, 0x6f, 0x9a, 0x0c, 0x62, 0x16, 0xd1, 0x38, 0x9d, 0xd1, 0x24, 0x99, 0x24, 0xfe, 0x63, 0x72, - 0x08, 0xfe, 0x65, 0x4c, 0xbf, 0x7c, 0xa6, 0xc3, 0x94, 0x8e, 0x66, 0x2c, 0x1d, 0xa4, 0xd4, 0x7f, - 0x12, 0x06, 0xd0, 0x1e, 0xa3, 0x36, 0xc9, 0x12, 0xfc, 0xb1, 0x40, 0xa5, 0xc3, 0x57, 0x40, 0x36, - 0x45, 0x55, 0x09, 0xae, 0x90, 0x74, 0x60, 0x6f, 0x79, 0xcc, 0x6e, 0xeb, 0x85, 0xfb, 0xd2, 0x4b, - 0x4c, 0x65, 0xdc, 0x53, 0x94, 0xaa, 0x10, 0xdc, 0x32, 0x1a, 0xee, 0xd6, 0x86, 0xbb, 0x0f, 0x41, - 0xc3, 0x6d, 0xe0, 0x5d, 0xd8, 0xbf, 0x5d, 0x6a, 0x86, 0x6e, 0xcb, 0xf0, 0x0d, 0x74, 0xc7, 0xa8, - 0x47, 0xf8, 0x3d, 0x5b, 0x94, 0x76, 0xdf, 0xae, 0x26, 0x6f, 0xe1, 0x64, 0xcb, 0x9e, 0x6d, 0xad, - 0x9c, 0xcd, 0x56, 0x1f, 0xa1, 0x33, 0x46, 0x1d, 0x2f, 0xe6, 0x11, 0x57, 0x3a, 0xe3, 0x57, 0xb8, - 0xeb, 0x34, 0x9b, 0x2c, 0xa7, 0x5e, 0x58, 0xb1, 0xde, 0xc1, 0xf1, 0x5f, 0x2c, 0x13, 0xe0, 0x39, - 0x78, 0x85, 0x15, 0xeb, 0x08, 0x6e, 0xb2, 0x16, 0xc2, 0x1b, 0xe8, 0xb0, 0x07, 0x0a, 0xd1, 0xec, - 0xe4, 0xfe, 0xd9, 0xe9, 0x04, 0x8e, 0xd9, 0xf6, 0x88, 0xe1, 0x7b, 0x20, 0x4c, 0x67, 0xd2, 0xdc, - 0x81, 0x6d, 0x01, 0x9c, 0xfb, 0x02, 0x34, 0x26, 0x7a, 0x04, 0x41, 0x83, 0x63, 0xf0, 0x14, 0xda, - 0x4c, 0x8b, 0xea, 0x7e, 0xfa, 0xbf, 0xcd, 0xf8, 0xf0, 0x2e, 0xe5, 0x1a, 0x63, 0xe0, 0xdf, 0xea, - 0xfb, 0xf8, 0x41, 0x28, 0xcd, 0xb3, 0xf9, 0xff, 0xd3, 0xc9, 0x29, 0x1c, 0xd8, 0x59, 0x75, 0xdd, - 0x7a, 0x69, 0x55, 0x87, 0xaf, 0xeb, 0x5b, 0xbc, 0xee, 0x61, 0xbe, 0xec, 0x29, 0x1c, 0xdc, 0x18, - 0xcd, 0x8c, 0x68, 0x55, 0x9f, 0x79, 0x5f, 0xf7, 0xcd, 0x5f, 0xe2, 0x77, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x6e, 0xbc, 0xe0, 0x61, 0x5c, 0x04, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.proto b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto deleted file mode 100644 index d29f0065a..000000000 --- a/vendor/google.golang.org/appengine/internal/modules/modules_service.proto +++ /dev/null @@ -1,80 +0,0 @@ -syntax = "proto2"; -option go_package = "modules"; - -package appengine; - -message ModulesServiceError { - enum ErrorCode { - OK = 0; - INVALID_MODULE = 1; - INVALID_VERSION = 2; - INVALID_INSTANCES = 3; - TRANSIENT_ERROR = 4; - UNEXPECTED_STATE = 5; - } -} - -message GetModulesRequest { -} - -message GetModulesResponse { - repeated string module = 1; -} - -message GetVersionsRequest { - optional string module = 1; -} - -message GetVersionsResponse { - repeated string version = 1; -} - -message GetDefaultVersionRequest { - optional string module = 1; -} - -message GetDefaultVersionResponse { - required string version = 1; -} - -message GetNumInstancesRequest { - optional string module = 1; - optional string version = 2; -} - -message GetNumInstancesResponse { - required int64 instances = 1; -} - -message SetNumInstancesRequest { - optional string module = 1; - optional string version = 2; - required int64 instances = 3; -} - -message SetNumInstancesResponse {} - -message StartModuleRequest { - required string module = 1; - required string version = 2; -} - -message StartModuleResponse {} - -message StopModuleRequest { - optional string module = 1; - optional string version = 2; -} - -message StopModuleResponse {} - -message GetHostnameRequest { - optional string module = 1; - optional string version = 2; - optional string instance = 3; -} - -message GetHostnameResponse { - required string hostname = 1; -} - diff --git a/vendor/google.golang.org/appengine/internal/net.go b/vendor/google.golang.org/appengine/internal/net.go deleted file mode 100644 index 3b94cf0c6..000000000 --- a/vendor/google.golang.org/appengine/internal/net.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -// This file implements a network dialer that limits the number of concurrent connections. -// It is only used for API calls. - -import ( - "log" - "net" - "runtime" - "sync" - "time" -) - -var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable. - -func limitRelease() { - // non-blocking - select { - case <-limitSem: - default: - // This should not normally happen. - log.Print("appengine: unbalanced limitSem release!") - } -} - -func limitDial(network, addr string) (net.Conn, error) { - limitSem <- 1 - - // Dial with a timeout in case the API host is MIA. - // The connection should normally be very fast. - conn, err := net.DialTimeout(network, addr, 500*time.Millisecond) - if err != nil { - limitRelease() - return nil, err - } - lc := &limitConn{Conn: conn} - runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required - return lc, nil -} - -type limitConn struct { - close sync.Once - net.Conn -} - -func (lc *limitConn) Close() error { - defer lc.close.Do(func() { - limitRelease() - runtime.SetFinalizer(lc, nil) - }) - return lc.Conn.Close() -} diff --git a/vendor/google.golang.org/appengine/internal/regen.sh b/vendor/google.golang.org/appengine/internal/regen.sh deleted file mode 100644 index 2fdb546a6..000000000 --- a/vendor/google.golang.org/appengine/internal/regen.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -e -# -# This script rebuilds the generated code for the protocol buffers. -# To run this you will need protoc and goprotobuf installed; -# see https://github.com/golang/protobuf for instructions. - -PKG=google.golang.org/appengine - -function die() { - echo 1>&2 $* - exit 1 -} - -# Sanity check that the right tools are accessible. -for tool in go protoc protoc-gen-go; do - q=$(which $tool) || die "didn't find $tool" - echo 1>&2 "$tool: $q" -done - -echo -n 1>&2 "finding package dir... " -pkgdir=$(go list -f '{{.Dir}}' $PKG) -echo 1>&2 $pkgdir -base=$(echo $pkgdir | sed "s,/$PKG\$,,") -echo 1>&2 "base: $base" -cd $base - -# Run protoc once per package. -for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do - echo 1>&2 "* $dir" - protoc --go_out=. $dir/*.proto -done - -for f in $(find $PKG/internal -name '*.pb.go'); do - # Remove proto.RegisterEnum calls. - # These cause duplicate registration panics when these packages - # are used on classic App Engine. proto.RegisterEnum only affects - # parsing the text format; we don't care about that. - # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17 - sed -i '/proto.RegisterEnum/d' $f -done diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go deleted file mode 100644 index 8d782a38e..000000000 --- a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go +++ /dev/null @@ -1,361 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/remote_api/remote_api.proto - -package remote_api - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type RpcError_ErrorCode int32 - -const ( - RpcError_UNKNOWN RpcError_ErrorCode = 0 - RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1 - RpcError_PARSE_ERROR RpcError_ErrorCode = 2 - RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3 - RpcError_OVER_QUOTA RpcError_ErrorCode = 4 - RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5 - RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6 - RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7 - RpcError_BAD_REQUEST RpcError_ErrorCode = 8 - RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9 - RpcError_CANCELLED RpcError_ErrorCode = 10 - RpcError_REPLAY_ERROR RpcError_ErrorCode = 11 - RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12 -) - -var RpcError_ErrorCode_name = map[int32]string{ - 0: "UNKNOWN", - 1: "CALL_NOT_FOUND", - 2: "PARSE_ERROR", - 3: "SECURITY_VIOLATION", - 4: "OVER_QUOTA", - 5: "REQUEST_TOO_LARGE", - 6: "CAPABILITY_DISABLED", - 7: "FEATURE_DISABLED", - 8: "BAD_REQUEST", - 9: "RESPONSE_TOO_LARGE", - 10: "CANCELLED", - 11: "REPLAY_ERROR", - 12: "DEADLINE_EXCEEDED", -} -var RpcError_ErrorCode_value = map[string]int32{ - "UNKNOWN": 0, - "CALL_NOT_FOUND": 1, - "PARSE_ERROR": 2, - "SECURITY_VIOLATION": 3, - "OVER_QUOTA": 4, - "REQUEST_TOO_LARGE": 5, - "CAPABILITY_DISABLED": 6, - "FEATURE_DISABLED": 7, - "BAD_REQUEST": 8, - "RESPONSE_TOO_LARGE": 9, - "CANCELLED": 10, - "REPLAY_ERROR": 11, - "DEADLINE_EXCEEDED": 12, -} - -func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode { - p := new(RpcError_ErrorCode) - *p = x - return p -} -func (x RpcError_ErrorCode) String() string { - return proto.EnumName(RpcError_ErrorCode_name, int32(x)) -} -func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode") - if err != nil { - return err - } - *x = RpcError_ErrorCode(value) - return nil -} -func (RpcError_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_remote_api_1978114ec33a273d, []int{2, 0} -} - -type Request struct { - ServiceName *string `protobuf:"bytes,2,req,name=service_name,json=serviceName" json:"service_name,omitempty"` - Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"` - Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"` - RequestId *string `protobuf:"bytes,5,opt,name=request_id,json=requestId" json:"request_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Request) Reset() { *m = Request{} } -func (m *Request) String() string { return proto.CompactTextString(m) } -func (*Request) ProtoMessage() {} -func (*Request) Descriptor() ([]byte, []int) { - return fileDescriptor_remote_api_1978114ec33a273d, []int{0} -} -func (m *Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Request.Unmarshal(m, b) -} -func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Request.Marshal(b, m, deterministic) -} -func (dst *Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_Request.Merge(dst, src) -} -func (m *Request) XXX_Size() int { - return xxx_messageInfo_Request.Size(m) -} -func (m *Request) XXX_DiscardUnknown() { - xxx_messageInfo_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_Request proto.InternalMessageInfo - -func (m *Request) GetServiceName() string { - if m != nil && m.ServiceName != nil { - return *m.ServiceName - } - return "" -} - -func (m *Request) GetMethod() string { - if m != nil && m.Method != nil { - return *m.Method - } - return "" -} - -func (m *Request) GetRequest() []byte { - if m != nil { - return m.Request - } - return nil -} - -func (m *Request) GetRequestId() string { - if m != nil && m.RequestId != nil { - return *m.RequestId - } - return "" -} - -type ApplicationError struct { - Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` - Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ApplicationError) Reset() { *m = ApplicationError{} } -func (m *ApplicationError) String() string { return proto.CompactTextString(m) } -func (*ApplicationError) ProtoMessage() {} -func (*ApplicationError) Descriptor() ([]byte, []int) { - return fileDescriptor_remote_api_1978114ec33a273d, []int{1} -} -func (m *ApplicationError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ApplicationError.Unmarshal(m, b) -} -func (m *ApplicationError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ApplicationError.Marshal(b, m, deterministic) -} -func (dst *ApplicationError) XXX_Merge(src proto.Message) { - xxx_messageInfo_ApplicationError.Merge(dst, src) -} -func (m *ApplicationError) XXX_Size() int { - return xxx_messageInfo_ApplicationError.Size(m) -} -func (m *ApplicationError) XXX_DiscardUnknown() { - xxx_messageInfo_ApplicationError.DiscardUnknown(m) -} - -var xxx_messageInfo_ApplicationError proto.InternalMessageInfo - -func (m *ApplicationError) GetCode() int32 { - if m != nil && m.Code != nil { - return *m.Code - } - return 0 -} - -func (m *ApplicationError) GetDetail() string { - if m != nil && m.Detail != nil { - return *m.Detail - } - return "" -} - -type RpcError struct { - Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` - Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RpcError) Reset() { *m = RpcError{} } -func (m *RpcError) String() string { return proto.CompactTextString(m) } -func (*RpcError) ProtoMessage() {} -func (*RpcError) Descriptor() ([]byte, []int) { - return fileDescriptor_remote_api_1978114ec33a273d, []int{2} -} -func (m *RpcError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RpcError.Unmarshal(m, b) -} -func (m *RpcError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RpcError.Marshal(b, m, deterministic) -} -func (dst *RpcError) XXX_Merge(src proto.Message) { - xxx_messageInfo_RpcError.Merge(dst, src) -} -func (m *RpcError) XXX_Size() int { - return xxx_messageInfo_RpcError.Size(m) -} -func (m *RpcError) XXX_DiscardUnknown() { - xxx_messageInfo_RpcError.DiscardUnknown(m) -} - -var xxx_messageInfo_RpcError proto.InternalMessageInfo - -func (m *RpcError) GetCode() int32 { - if m != nil && m.Code != nil { - return *m.Code - } - return 0 -} - -func (m *RpcError) GetDetail() string { - if m != nil && m.Detail != nil { - return *m.Detail - } - return "" -} - -type Response struct { - Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"` - Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"` - ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error,json=applicationError" json:"application_error,omitempty"` - JavaException []byte `protobuf:"bytes,4,opt,name=java_exception,json=javaException" json:"java_exception,omitempty"` - RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error,json=rpcError" json:"rpc_error,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Response) Reset() { *m = Response{} } -func (m *Response) String() string { return proto.CompactTextString(m) } -func (*Response) ProtoMessage() {} -func (*Response) Descriptor() ([]byte, []int) { - return fileDescriptor_remote_api_1978114ec33a273d, []int{3} -} -func (m *Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Response.Unmarshal(m, b) -} -func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Response.Marshal(b, m, deterministic) -} -func (dst *Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_Response.Merge(dst, src) -} -func (m *Response) XXX_Size() int { - return xxx_messageInfo_Response.Size(m) -} -func (m *Response) XXX_DiscardUnknown() { - xxx_messageInfo_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_Response proto.InternalMessageInfo - -func (m *Response) GetResponse() []byte { - if m != nil { - return m.Response - } - return nil -} - -func (m *Response) GetException() []byte { - if m != nil { - return m.Exception - } - return nil -} - -func (m *Response) GetApplicationError() *ApplicationError { - if m != nil { - return m.ApplicationError - } - return nil -} - -func (m *Response) GetJavaException() []byte { - if m != nil { - return m.JavaException - } - return nil -} - -func (m *Response) GetRpcError() *RpcError { - if m != nil { - return m.RpcError - } - return nil -} - -func init() { - proto.RegisterType((*Request)(nil), "remote_api.Request") - proto.RegisterType((*ApplicationError)(nil), "remote_api.ApplicationError") - proto.RegisterType((*RpcError)(nil), "remote_api.RpcError") - proto.RegisterType((*Response)(nil), "remote_api.Response") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/remote_api/remote_api.proto", fileDescriptor_remote_api_1978114ec33a273d) -} - -var fileDescriptor_remote_api_1978114ec33a273d = []byte{ - // 531 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x51, 0x6e, 0xd3, 0x40, - 0x10, 0x86, 0xb1, 0x9b, 0x34, 0xf1, 0xc4, 0x2d, 0xdb, 0xa5, 0x14, 0x0b, 0x15, 0x29, 0x44, 0x42, - 0xca, 0x53, 0x2a, 0x38, 0x00, 0x62, 0x63, 0x6f, 0x91, 0x85, 0x65, 0xa7, 0x6b, 0xbb, 0x50, 0x5e, - 0x56, 0x2b, 0x67, 0x65, 0x8c, 0x12, 0xaf, 0xd9, 0x98, 0x8a, 0x17, 0x6e, 0xc0, 0xb5, 0x38, 0x0c, - 0xb7, 0x40, 0x36, 0x6e, 0x63, 0xf5, 0x89, 0xb7, 0x7f, 0x7e, 0x7b, 0xe6, 0x1b, 0xcd, 0xcc, 0xc2, - 0xbb, 0x5c, 0xa9, 0x7c, 0x23, 0x17, 0xb9, 0xda, 0x88, 0x32, 0x5f, 0x28, 0x9d, 0x5f, 0x88, 0xaa, - 0x92, 0x65, 0x5e, 0x94, 0xf2, 0xa2, 0x28, 0x6b, 0xa9, 0x4b, 0xb1, 0xb9, 0xd0, 0x72, 0xab, 0x6a, - 0xc9, 0x45, 0x55, 0xf4, 0xe4, 0xa2, 0xd2, 0xaa, 0x56, 0x18, 0xf6, 0xce, 0xec, 0x27, 0x8c, 0x98, - 0xfc, 0xf6, 0x5d, 0xee, 0x6a, 0xfc, 0x12, 0xec, 0x9d, 0xd4, 0xb7, 0x45, 0x26, 0x79, 0x29, 0xb6, - 0xd2, 0x31, 0xa7, 0xe6, 0xdc, 0x62, 0x93, 0xce, 0x0b, 0xc5, 0x56, 0xe2, 0x33, 0x38, 0xdc, 0xca, - 0xfa, 0x8b, 0x5a, 0x3b, 0x07, 0xed, 0xc7, 0x2e, 0xc2, 0x0e, 0x8c, 0xf4, 0xbf, 0x2a, 0xce, 0x60, - 0x6a, 0xce, 0x6d, 0x76, 0x17, 0xe2, 0x17, 0x00, 0x9d, 0xe4, 0xc5, 0xda, 0x19, 0x4e, 0x8d, 0xb9, - 0xc5, 0xac, 0xce, 0xf1, 0xd7, 0xb3, 0xb7, 0x80, 0x48, 0x55, 0x6d, 0x8a, 0x4c, 0xd4, 0x85, 0x2a, - 0xa9, 0xd6, 0x4a, 0x63, 0x0c, 0x83, 0x4c, 0xad, 0xa5, 0x63, 0x4c, 0xcd, 0xf9, 0x90, 0xb5, 0xba, - 0x01, 0xaf, 0x65, 0x2d, 0x8a, 0x4d, 0xd7, 0x55, 0x17, 0xcd, 0x7e, 0x9b, 0x30, 0x66, 0x55, 0xf6, - 0x7f, 0x89, 0x46, 0x2f, 0xf1, 0x97, 0x09, 0x56, 0x9b, 0xe5, 0x36, 0x7f, 0x4d, 0x60, 0x94, 0x86, - 0x1f, 0xc2, 0xe8, 0x63, 0x88, 0x1e, 0x61, 0x0c, 0xc7, 0x2e, 0x09, 0x02, 0x1e, 0x46, 0x09, 0xbf, - 0x8c, 0xd2, 0xd0, 0x43, 0x06, 0x7e, 0x0c, 0x93, 0x15, 0x61, 0x31, 0xe5, 0x94, 0xb1, 0x88, 0x21, - 0x13, 0x9f, 0x01, 0x8e, 0xa9, 0x9b, 0x32, 0x3f, 0xb9, 0xe1, 0xd7, 0x7e, 0x14, 0x90, 0xc4, 0x8f, - 0x42, 0x74, 0x80, 0x8f, 0x01, 0xa2, 0x6b, 0xca, 0xf8, 0x55, 0x1a, 0x25, 0x04, 0x0d, 0xf0, 0x53, - 0x38, 0x61, 0xf4, 0x2a, 0xa5, 0x71, 0xc2, 0x93, 0x28, 0xe2, 0x01, 0x61, 0xef, 0x29, 0x1a, 0xe2, - 0x67, 0xf0, 0xc4, 0x25, 0x2b, 0xb2, 0xf4, 0x83, 0xa6, 0x80, 0xe7, 0xc7, 0x64, 0x19, 0x50, 0x0f, - 0x1d, 0xe2, 0x53, 0x40, 0x97, 0x94, 0x24, 0x29, 0xa3, 0x7b, 0x77, 0xd4, 0xe0, 0x97, 0xc4, 0xe3, - 0x5d, 0x25, 0x34, 0x6e, 0xf0, 0x8c, 0xc6, 0xab, 0x28, 0x8c, 0x69, 0xaf, 0xae, 0x85, 0x8f, 0xc0, - 0x72, 0x49, 0xe8, 0xd2, 0xa0, 0xc9, 0x03, 0x8c, 0xc0, 0x66, 0x74, 0x15, 0x90, 0x9b, 0xae, 0xef, - 0x49, 0xd3, 0x8f, 0x47, 0x89, 0x17, 0xf8, 0x21, 0xe5, 0xf4, 0x93, 0x4b, 0xa9, 0x47, 0x3d, 0x64, - 0xcf, 0xfe, 0x18, 0x30, 0x66, 0x72, 0x57, 0xa9, 0x72, 0x27, 0xf1, 0x73, 0x18, 0xeb, 0x4e, 0x3b, - 0xc6, 0xd4, 0x98, 0xdb, 0xec, 0x3e, 0xc6, 0xe7, 0x60, 0xc9, 0x1f, 0x99, 0xac, 0x9a, 0x75, 0xb5, - 0x23, 0xb5, 0xd9, 0xde, 0xc0, 0x3e, 0x9c, 0x88, 0xfd, 0x3a, 0xb9, 0x6c, 0x06, 0xec, 0x1c, 0x4c, - 0x8d, 0xf9, 0xe4, 0xcd, 0xf9, 0xa2, 0x77, 0x87, 0x0f, 0x77, 0xce, 0x90, 0x78, 0x78, 0x05, 0xaf, - 0xe0, 0xf8, 0xab, 0xb8, 0x15, 0x7c, 0x4f, 0x1b, 0xb4, 0xb4, 0xa3, 0xc6, 0xa5, 0xf7, 0xc4, 0xd7, - 0x60, 0xe9, 0x2a, 0xeb, 0x48, 0xc3, 0x96, 0x74, 0xda, 0x27, 0xdd, 0x1d, 0x07, 0x1b, 0xeb, 0x4e, - 0x2d, 0xed, 0xcf, 0xbd, 0x07, 0xf0, 0x37, 0x00, 0x00, 0xff, 0xff, 0x38, 0xd1, 0x0f, 0x22, 0x4f, - 0x03, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto deleted file mode 100644 index f21763a4e..000000000 --- a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto +++ /dev/null @@ -1,44 +0,0 @@ -syntax = "proto2"; -option go_package = "remote_api"; - -package remote_api; - -message Request { - required string service_name = 2; - required string method = 3; - required bytes request = 4; - optional string request_id = 5; -} - -message ApplicationError { - required int32 code = 1; - required string detail = 2; -} - -message RpcError { - enum ErrorCode { - UNKNOWN = 0; - CALL_NOT_FOUND = 1; - PARSE_ERROR = 2; - SECURITY_VIOLATION = 3; - OVER_QUOTA = 4; - REQUEST_TOO_LARGE = 5; - CAPABILITY_DISABLED = 6; - FEATURE_DISABLED = 7; - BAD_REQUEST = 8; - RESPONSE_TOO_LARGE = 9; - CANCELLED = 10; - REPLAY_ERROR = 11; - DEADLINE_EXCEEDED = 12; - } - required int32 code = 1; - optional string detail = 2; -} - -message Response { - optional bytes response = 1; - optional bytes exception = 2; - optional ApplicationError application_error = 3; - optional bytes java_exception = 4; - optional RpcError rpc_error = 5; -} diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go deleted file mode 100644 index 9006ae653..000000000 --- a/vendor/google.golang.org/appengine/internal/transaction.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -// This file implements hooks for applying datastore transactions. - -import ( - "errors" - "reflect" - - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" - - basepb "google.golang.org/appengine/internal/base" - pb "google.golang.org/appengine/internal/datastore" -) - -var transactionSetters = make(map[reflect.Type]reflect.Value) - -// RegisterTransactionSetter registers a function that sets transaction information -// in a protocol buffer message. f should be a function with two arguments, -// the first being a protocol buffer type, and the second being *datastore.Transaction. -func RegisterTransactionSetter(f interface{}) { - v := reflect.ValueOf(f) - transactionSetters[v.Type().In(0)] = v -} - -// applyTransaction applies the transaction t to message pb -// by using the relevant setter passed to RegisterTransactionSetter. -func applyTransaction(pb proto.Message, t *pb.Transaction) { - v := reflect.ValueOf(pb) - if f, ok := transactionSetters[v.Type()]; ok { - f.Call([]reflect.Value{v, reflect.ValueOf(t)}) - } -} - -var transactionKey = "used for *Transaction" - -func transactionFromContext(ctx netcontext.Context) *transaction { - t, _ := ctx.Value(&transactionKey).(*transaction) - return t -} - -func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context { - return netcontext.WithValue(ctx, &transactionKey, t) -} - -type transaction struct { - transaction pb.Transaction - finished bool -} - -var ErrConcurrentTransaction = errors.New("internal: concurrent transaction") - -func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) { - if transactionFromContext(c) != nil { - return nil, errors.New("nested transactions are not supported") - } - - // Begin the transaction. - t := &transaction{} - req := &pb.BeginTransactionRequest{ - App: proto.String(FullyQualifiedAppID(c)), - } - if xg { - req.AllowMultipleEg = proto.Bool(true) - } - if previousTransaction != nil { - req.PreviousTransaction = previousTransaction - } - if readOnly { - req.Mode = pb.BeginTransactionRequest_READ_ONLY.Enum() - } else { - req.Mode = pb.BeginTransactionRequest_READ_WRITE.Enum() - } - if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil { - return nil, err - } - - // Call f, rolling back the transaction if f returns a non-nil error, or panics. - // The panic is not recovered. - defer func() { - if t.finished { - return - } - t.finished = true - // Ignore the error return value, since we are already returning a non-nil - // error (or we're panicking). - Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{}) - }() - if err := f(withTransaction(c, t)); err != nil { - return &t.transaction, err - } - t.finished = true - - // Commit the transaction. - res := &pb.CommitResponse{} - err := Call(c, "datastore_v3", "Commit", &t.transaction, res) - if ae, ok := err.(*APIError); ok { - /* TODO: restore this conditional - if appengine.IsDevAppServer() { - */ - // The Python Dev AppServer raises an ApplicationError with error code 2 (which is - // Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.". - if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." { - return &t.transaction, ErrConcurrentTransaction - } - if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) { - return &t.transaction, ErrConcurrentTransaction - } - } - return &t.transaction, err -} diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go deleted file mode 100644 index 5f727750a..000000000 --- a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go +++ /dev/null @@ -1,527 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto - -package urlfetch - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type URLFetchServiceError_ErrorCode int32 - -const ( - URLFetchServiceError_OK URLFetchServiceError_ErrorCode = 0 - URLFetchServiceError_INVALID_URL URLFetchServiceError_ErrorCode = 1 - URLFetchServiceError_FETCH_ERROR URLFetchServiceError_ErrorCode = 2 - URLFetchServiceError_UNSPECIFIED_ERROR URLFetchServiceError_ErrorCode = 3 - URLFetchServiceError_RESPONSE_TOO_LARGE URLFetchServiceError_ErrorCode = 4 - URLFetchServiceError_DEADLINE_EXCEEDED URLFetchServiceError_ErrorCode = 5 - URLFetchServiceError_SSL_CERTIFICATE_ERROR URLFetchServiceError_ErrorCode = 6 - URLFetchServiceError_DNS_ERROR URLFetchServiceError_ErrorCode = 7 - URLFetchServiceError_CLOSED URLFetchServiceError_ErrorCode = 8 - URLFetchServiceError_INTERNAL_TRANSIENT_ERROR URLFetchServiceError_ErrorCode = 9 - URLFetchServiceError_TOO_MANY_REDIRECTS URLFetchServiceError_ErrorCode = 10 - URLFetchServiceError_MALFORMED_REPLY URLFetchServiceError_ErrorCode = 11 - URLFetchServiceError_CONNECTION_ERROR URLFetchServiceError_ErrorCode = 12 -) - -var URLFetchServiceError_ErrorCode_name = map[int32]string{ - 0: "OK", - 1: "INVALID_URL", - 2: "FETCH_ERROR", - 3: "UNSPECIFIED_ERROR", - 4: "RESPONSE_TOO_LARGE", - 5: "DEADLINE_EXCEEDED", - 6: "SSL_CERTIFICATE_ERROR", - 7: "DNS_ERROR", - 8: "CLOSED", - 9: "INTERNAL_TRANSIENT_ERROR", - 10: "TOO_MANY_REDIRECTS", - 11: "MALFORMED_REPLY", - 12: "CONNECTION_ERROR", -} -var URLFetchServiceError_ErrorCode_value = map[string]int32{ - "OK": 0, - "INVALID_URL": 1, - "FETCH_ERROR": 2, - "UNSPECIFIED_ERROR": 3, - "RESPONSE_TOO_LARGE": 4, - "DEADLINE_EXCEEDED": 5, - "SSL_CERTIFICATE_ERROR": 6, - "DNS_ERROR": 7, - "CLOSED": 8, - "INTERNAL_TRANSIENT_ERROR": 9, - "TOO_MANY_REDIRECTS": 10, - "MALFORMED_REPLY": 11, - "CONNECTION_ERROR": 12, -} - -func (x URLFetchServiceError_ErrorCode) Enum() *URLFetchServiceError_ErrorCode { - p := new(URLFetchServiceError_ErrorCode) - *p = x - return p -} -func (x URLFetchServiceError_ErrorCode) String() string { - return proto.EnumName(URLFetchServiceError_ErrorCode_name, int32(x)) -} -func (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(URLFetchServiceError_ErrorCode_value, data, "URLFetchServiceError_ErrorCode") - if err != nil { - return err - } - *x = URLFetchServiceError_ErrorCode(value) - return nil -} -func (URLFetchServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{0, 0} -} - -type URLFetchRequest_RequestMethod int32 - -const ( - URLFetchRequest_GET URLFetchRequest_RequestMethod = 1 - URLFetchRequest_POST URLFetchRequest_RequestMethod = 2 - URLFetchRequest_HEAD URLFetchRequest_RequestMethod = 3 - URLFetchRequest_PUT URLFetchRequest_RequestMethod = 4 - URLFetchRequest_DELETE URLFetchRequest_RequestMethod = 5 - URLFetchRequest_PATCH URLFetchRequest_RequestMethod = 6 -) - -var URLFetchRequest_RequestMethod_name = map[int32]string{ - 1: "GET", - 2: "POST", - 3: "HEAD", - 4: "PUT", - 5: "DELETE", - 6: "PATCH", -} -var URLFetchRequest_RequestMethod_value = map[string]int32{ - "GET": 1, - "POST": 2, - "HEAD": 3, - "PUT": 4, - "DELETE": 5, - "PATCH": 6, -} - -func (x URLFetchRequest_RequestMethod) Enum() *URLFetchRequest_RequestMethod { - p := new(URLFetchRequest_RequestMethod) - *p = x - return p -} -func (x URLFetchRequest_RequestMethod) String() string { - return proto.EnumName(URLFetchRequest_RequestMethod_name, int32(x)) -} -func (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(URLFetchRequest_RequestMethod_value, data, "URLFetchRequest_RequestMethod") - if err != nil { - return err - } - *x = URLFetchRequest_RequestMethod(value) - return nil -} -func (URLFetchRequest_RequestMethod) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1, 0} -} - -type URLFetchServiceError struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *URLFetchServiceError) Reset() { *m = URLFetchServiceError{} } -func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) } -func (*URLFetchServiceError) ProtoMessage() {} -func (*URLFetchServiceError) Descriptor() ([]byte, []int) { - return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{0} -} -func (m *URLFetchServiceError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_URLFetchServiceError.Unmarshal(m, b) -} -func (m *URLFetchServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_URLFetchServiceError.Marshal(b, m, deterministic) -} -func (dst *URLFetchServiceError) XXX_Merge(src proto.Message) { - xxx_messageInfo_URLFetchServiceError.Merge(dst, src) -} -func (m *URLFetchServiceError) XXX_Size() int { - return xxx_messageInfo_URLFetchServiceError.Size(m) -} -func (m *URLFetchServiceError) XXX_DiscardUnknown() { - xxx_messageInfo_URLFetchServiceError.DiscardUnknown(m) -} - -var xxx_messageInfo_URLFetchServiceError proto.InternalMessageInfo - -type URLFetchRequest struct { - Method *URLFetchRequest_RequestMethod `protobuf:"varint,1,req,name=Method,enum=appengine.URLFetchRequest_RequestMethod" json:"Method,omitempty"` - Url *string `protobuf:"bytes,2,req,name=Url" json:"Url,omitempty"` - Header []*URLFetchRequest_Header `protobuf:"group,3,rep,name=Header,json=header" json:"header,omitempty"` - Payload []byte `protobuf:"bytes,6,opt,name=Payload" json:"Payload,omitempty"` - FollowRedirects *bool `protobuf:"varint,7,opt,name=FollowRedirects,def=1" json:"FollowRedirects,omitempty"` - Deadline *float64 `protobuf:"fixed64,8,opt,name=Deadline" json:"Deadline,omitempty"` - MustValidateServerCertificate *bool `protobuf:"varint,9,opt,name=MustValidateServerCertificate,def=1" json:"MustValidateServerCertificate,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *URLFetchRequest) Reset() { *m = URLFetchRequest{} } -func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) } -func (*URLFetchRequest) ProtoMessage() {} -func (*URLFetchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1} -} -func (m *URLFetchRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_URLFetchRequest.Unmarshal(m, b) -} -func (m *URLFetchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_URLFetchRequest.Marshal(b, m, deterministic) -} -func (dst *URLFetchRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_URLFetchRequest.Merge(dst, src) -} -func (m *URLFetchRequest) XXX_Size() int { - return xxx_messageInfo_URLFetchRequest.Size(m) -} -func (m *URLFetchRequest) XXX_DiscardUnknown() { - xxx_messageInfo_URLFetchRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_URLFetchRequest proto.InternalMessageInfo - -const Default_URLFetchRequest_FollowRedirects bool = true -const Default_URLFetchRequest_MustValidateServerCertificate bool = true - -func (m *URLFetchRequest) GetMethod() URLFetchRequest_RequestMethod { - if m != nil && m.Method != nil { - return *m.Method - } - return URLFetchRequest_GET -} - -func (m *URLFetchRequest) GetUrl() string { - if m != nil && m.Url != nil { - return *m.Url - } - return "" -} - -func (m *URLFetchRequest) GetHeader() []*URLFetchRequest_Header { - if m != nil { - return m.Header - } - return nil -} - -func (m *URLFetchRequest) GetPayload() []byte { - if m != nil { - return m.Payload - } - return nil -} - -func (m *URLFetchRequest) GetFollowRedirects() bool { - if m != nil && m.FollowRedirects != nil { - return *m.FollowRedirects - } - return Default_URLFetchRequest_FollowRedirects -} - -func (m *URLFetchRequest) GetDeadline() float64 { - if m != nil && m.Deadline != nil { - return *m.Deadline - } - return 0 -} - -func (m *URLFetchRequest) GetMustValidateServerCertificate() bool { - if m != nil && m.MustValidateServerCertificate != nil { - return *m.MustValidateServerCertificate - } - return Default_URLFetchRequest_MustValidateServerCertificate -} - -type URLFetchRequest_Header struct { - Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"` - Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *URLFetchRequest_Header) Reset() { *m = URLFetchRequest_Header{} } -func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) } -func (*URLFetchRequest_Header) ProtoMessage() {} -func (*URLFetchRequest_Header) Descriptor() ([]byte, []int) { - return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1, 0} -} -func (m *URLFetchRequest_Header) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_URLFetchRequest_Header.Unmarshal(m, b) -} -func (m *URLFetchRequest_Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_URLFetchRequest_Header.Marshal(b, m, deterministic) -} -func (dst *URLFetchRequest_Header) XXX_Merge(src proto.Message) { - xxx_messageInfo_URLFetchRequest_Header.Merge(dst, src) -} -func (m *URLFetchRequest_Header) XXX_Size() int { - return xxx_messageInfo_URLFetchRequest_Header.Size(m) -} -func (m *URLFetchRequest_Header) XXX_DiscardUnknown() { - xxx_messageInfo_URLFetchRequest_Header.DiscardUnknown(m) -} - -var xxx_messageInfo_URLFetchRequest_Header proto.InternalMessageInfo - -func (m *URLFetchRequest_Header) GetKey() string { - if m != nil && m.Key != nil { - return *m.Key - } - return "" -} - -func (m *URLFetchRequest_Header) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value - } - return "" -} - -type URLFetchResponse struct { - Content []byte `protobuf:"bytes,1,opt,name=Content" json:"Content,omitempty"` - StatusCode *int32 `protobuf:"varint,2,req,name=StatusCode" json:"StatusCode,omitempty"` - Header []*URLFetchResponse_Header `protobuf:"group,3,rep,name=Header,json=header" json:"header,omitempty"` - ContentWasTruncated *bool `protobuf:"varint,6,opt,name=ContentWasTruncated,def=0" json:"ContentWasTruncated,omitempty"` - ExternalBytesSent *int64 `protobuf:"varint,7,opt,name=ExternalBytesSent" json:"ExternalBytesSent,omitempty"` - ExternalBytesReceived *int64 `protobuf:"varint,8,opt,name=ExternalBytesReceived" json:"ExternalBytesReceived,omitempty"` - FinalUrl *string `protobuf:"bytes,9,opt,name=FinalUrl" json:"FinalUrl,omitempty"` - ApiCpuMilliseconds *int64 `protobuf:"varint,10,opt,name=ApiCpuMilliseconds,def=0" json:"ApiCpuMilliseconds,omitempty"` - ApiBytesSent *int64 `protobuf:"varint,11,opt,name=ApiBytesSent,def=0" json:"ApiBytesSent,omitempty"` - ApiBytesReceived *int64 `protobuf:"varint,12,opt,name=ApiBytesReceived,def=0" json:"ApiBytesReceived,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *URLFetchResponse) Reset() { *m = URLFetchResponse{} } -func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) } -func (*URLFetchResponse) ProtoMessage() {} -func (*URLFetchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{2} -} -func (m *URLFetchResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_URLFetchResponse.Unmarshal(m, b) -} -func (m *URLFetchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_URLFetchResponse.Marshal(b, m, deterministic) -} -func (dst *URLFetchResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_URLFetchResponse.Merge(dst, src) -} -func (m *URLFetchResponse) XXX_Size() int { - return xxx_messageInfo_URLFetchResponse.Size(m) -} -func (m *URLFetchResponse) XXX_DiscardUnknown() { - xxx_messageInfo_URLFetchResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_URLFetchResponse proto.InternalMessageInfo - -const Default_URLFetchResponse_ContentWasTruncated bool = false -const Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0 -const Default_URLFetchResponse_ApiBytesSent int64 = 0 -const Default_URLFetchResponse_ApiBytesReceived int64 = 0 - -func (m *URLFetchResponse) GetContent() []byte { - if m != nil { - return m.Content - } - return nil -} - -func (m *URLFetchResponse) GetStatusCode() int32 { - if m != nil && m.StatusCode != nil { - return *m.StatusCode - } - return 0 -} - -func (m *URLFetchResponse) GetHeader() []*URLFetchResponse_Header { - if m != nil { - return m.Header - } - return nil -} - -func (m *URLFetchResponse) GetContentWasTruncated() bool { - if m != nil && m.ContentWasTruncated != nil { - return *m.ContentWasTruncated - } - return Default_URLFetchResponse_ContentWasTruncated -} - -func (m *URLFetchResponse) GetExternalBytesSent() int64 { - if m != nil && m.ExternalBytesSent != nil { - return *m.ExternalBytesSent - } - return 0 -} - -func (m *URLFetchResponse) GetExternalBytesReceived() int64 { - if m != nil && m.ExternalBytesReceived != nil { - return *m.ExternalBytesReceived - } - return 0 -} - -func (m *URLFetchResponse) GetFinalUrl() string { - if m != nil && m.FinalUrl != nil { - return *m.FinalUrl - } - return "" -} - -func (m *URLFetchResponse) GetApiCpuMilliseconds() int64 { - if m != nil && m.ApiCpuMilliseconds != nil { - return *m.ApiCpuMilliseconds - } - return Default_URLFetchResponse_ApiCpuMilliseconds -} - -func (m *URLFetchResponse) GetApiBytesSent() int64 { - if m != nil && m.ApiBytesSent != nil { - return *m.ApiBytesSent - } - return Default_URLFetchResponse_ApiBytesSent -} - -func (m *URLFetchResponse) GetApiBytesReceived() int64 { - if m != nil && m.ApiBytesReceived != nil { - return *m.ApiBytesReceived - } - return Default_URLFetchResponse_ApiBytesReceived -} - -type URLFetchResponse_Header struct { - Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"` - Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *URLFetchResponse_Header) Reset() { *m = URLFetchResponse_Header{} } -func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) } -func (*URLFetchResponse_Header) ProtoMessage() {} -func (*URLFetchResponse_Header) Descriptor() ([]byte, []int) { - return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{2, 0} -} -func (m *URLFetchResponse_Header) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_URLFetchResponse_Header.Unmarshal(m, b) -} -func (m *URLFetchResponse_Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_URLFetchResponse_Header.Marshal(b, m, deterministic) -} -func (dst *URLFetchResponse_Header) XXX_Merge(src proto.Message) { - xxx_messageInfo_URLFetchResponse_Header.Merge(dst, src) -} -func (m *URLFetchResponse_Header) XXX_Size() int { - return xxx_messageInfo_URLFetchResponse_Header.Size(m) -} -func (m *URLFetchResponse_Header) XXX_DiscardUnknown() { - xxx_messageInfo_URLFetchResponse_Header.DiscardUnknown(m) -} - -var xxx_messageInfo_URLFetchResponse_Header proto.InternalMessageInfo - -func (m *URLFetchResponse_Header) GetKey() string { - if m != nil && m.Key != nil { - return *m.Key - } - return "" -} - -func (m *URLFetchResponse_Header) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value - } - return "" -} - -func init() { - proto.RegisterType((*URLFetchServiceError)(nil), "appengine.URLFetchServiceError") - proto.RegisterType((*URLFetchRequest)(nil), "appengine.URLFetchRequest") - proto.RegisterType((*URLFetchRequest_Header)(nil), "appengine.URLFetchRequest.Header") - proto.RegisterType((*URLFetchResponse)(nil), "appengine.URLFetchResponse") - proto.RegisterType((*URLFetchResponse_Header)(nil), "appengine.URLFetchResponse.Header") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto", fileDescriptor_urlfetch_service_b245a7065f33bced) -} - -var fileDescriptor_urlfetch_service_b245a7065f33bced = []byte{ - // 770 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xdd, 0x6e, 0xe3, 0x54, - 0x10, 0xc6, 0x76, 0x7e, 0xa7, 0x5d, 0x7a, 0x76, 0xb6, 0x45, 0x66, 0xb5, 0xa0, 0x10, 0x09, 0x29, - 0x17, 0x90, 0x2e, 0x2b, 0x24, 0x44, 0xaf, 0x70, 0xed, 0x93, 0xad, 0xa9, 0x63, 0x47, 0xc7, 0x4e, - 0x61, 0xb9, 0xb1, 0xac, 0x78, 0x9a, 0x5a, 0xb2, 0xec, 0x60, 0x9f, 0x2c, 0xf4, 0x35, 0x78, 0x0d, - 0xde, 0x87, 0xa7, 0xe1, 0x02, 0x9d, 0xc4, 0xc9, 0x6e, 0xbb, 0xd1, 0x4a, 0x5c, 0x65, 0xe6, 0x9b, - 0xef, 0xcc, 0x99, 0x7c, 0xdf, 0xf8, 0x80, 0xb3, 0x2c, 0xcb, 0x65, 0x4e, 0xe3, 0x65, 0x99, 0x27, - 0xc5, 0x72, 0x5c, 0x56, 0xcb, 0xf3, 0x64, 0xb5, 0xa2, 0x62, 0x99, 0x15, 0x74, 0x9e, 0x15, 0x92, - 0xaa, 0x22, 0xc9, 0xcf, 0xd7, 0x55, 0x7e, 0x4b, 0x72, 0x71, 0xb7, 0x0f, 0xe2, 0x9a, 0xaa, 0xb7, - 0xd9, 0x82, 0xc6, 0xab, 0xaa, 0x94, 0x25, 0xf6, 0xf7, 0x67, 0x86, 0x7f, 0xeb, 0x70, 0x3a, 0x17, - 0xde, 0x44, 0xb1, 0xc2, 0x2d, 0x89, 0x57, 0x55, 0x59, 0x0d, 0xff, 0xd2, 0xa1, 0xbf, 0x89, 0xec, - 0x32, 0x25, 0xec, 0x80, 0x1e, 0x5c, 0xb3, 0x4f, 0xf0, 0x04, 0x8e, 0x5c, 0xff, 0xc6, 0xf2, 0x5c, - 0x27, 0x9e, 0x0b, 0x8f, 0x69, 0x0a, 0x98, 0xf0, 0xc8, 0xbe, 0x8a, 0xb9, 0x10, 0x81, 0x60, 0x3a, - 0x9e, 0xc1, 0xd3, 0xb9, 0x1f, 0xce, 0xb8, 0xed, 0x4e, 0x5c, 0xee, 0x34, 0xb0, 0x81, 0x9f, 0x01, - 0x0a, 0x1e, 0xce, 0x02, 0x3f, 0xe4, 0x71, 0x14, 0x04, 0xb1, 0x67, 0x89, 0xd7, 0x9c, 0xb5, 0x14, - 0xdd, 0xe1, 0x96, 0xe3, 0xb9, 0x3e, 0x8f, 0xf9, 0xaf, 0x36, 0xe7, 0x0e, 0x77, 0x58, 0x1b, 0x3f, - 0x87, 0xb3, 0x30, 0xf4, 0x62, 0x9b, 0x8b, 0xc8, 0x9d, 0xb8, 0xb6, 0x15, 0xf1, 0xa6, 0x53, 0x07, - 0x9f, 0x40, 0xdf, 0xf1, 0xc3, 0x26, 0xed, 0x22, 0x40, 0xc7, 0xf6, 0x82, 0x90, 0x3b, 0xac, 0x87, - 0x2f, 0xc0, 0x74, 0xfd, 0x88, 0x0b, 0xdf, 0xf2, 0xe2, 0x48, 0x58, 0x7e, 0xe8, 0x72, 0x3f, 0x6a, - 0x98, 0x7d, 0x35, 0x82, 0xba, 0x79, 0x6a, 0xf9, 0x6f, 0x62, 0xc1, 0x1d, 0x57, 0x70, 0x3b, 0x0a, - 0x19, 0xe0, 0x33, 0x38, 0x99, 0x5a, 0xde, 0x24, 0x10, 0x53, 0xee, 0xc4, 0x82, 0xcf, 0xbc, 0x37, - 0xec, 0x08, 0x4f, 0x81, 0xd9, 0x81, 0xef, 0x73, 0x3b, 0x72, 0x03, 0xbf, 0x69, 0x71, 0x3c, 0xfc, - 0xc7, 0x80, 0x93, 0x9d, 0x5a, 0x82, 0x7e, 0x5f, 0x53, 0x2d, 0xf1, 0x27, 0xe8, 0x4c, 0x49, 0xde, - 0x95, 0xa9, 0xa9, 0x0d, 0xf4, 0xd1, 0xa7, 0xaf, 0x46, 0xe3, 0xbd, 0xba, 0xe3, 0x47, 0xdc, 0x71, - 0xf3, 0xbb, 0xe5, 0x8b, 0xe6, 0x1c, 0x32, 0x30, 0xe6, 0x55, 0x6e, 0xea, 0x03, 0x7d, 0xd4, 0x17, - 0x2a, 0xc4, 0x1f, 0xa1, 0x73, 0x47, 0x49, 0x4a, 0x95, 0x69, 0x0c, 0x8c, 0x11, 0xbc, 0xfa, 0xea, - 0x23, 0x3d, 0xaf, 0x36, 0x44, 0xd1, 0x1c, 0xc0, 0x17, 0xd0, 0x9d, 0x25, 0xf7, 0x79, 0x99, 0xa4, - 0x66, 0x67, 0xa0, 0x8d, 0x8e, 0x2f, 0xf5, 0x9e, 0x26, 0x76, 0x10, 0x8e, 0xe1, 0x64, 0x52, 0xe6, - 0x79, 0xf9, 0x87, 0xa0, 0x34, 0xab, 0x68, 0x21, 0x6b, 0xb3, 0x3b, 0xd0, 0x46, 0xbd, 0x8b, 0x96, - 0xac, 0xd6, 0x24, 0x1e, 0x17, 0xf1, 0x39, 0xf4, 0x1c, 0x4a, 0xd2, 0x3c, 0x2b, 0xc8, 0xec, 0x0d, - 0xb4, 0x91, 0x26, 0xf6, 0x39, 0xfe, 0x0c, 0x5f, 0x4c, 0xd7, 0xb5, 0xbc, 0x49, 0xf2, 0x2c, 0x4d, - 0x24, 0xa9, 0xed, 0xa1, 0xca, 0xa6, 0x4a, 0x66, 0xb7, 0xd9, 0x22, 0x91, 0x64, 0xf6, 0xdf, 0xeb, - 0xfc, 0x71, 0xea, 0xf3, 0x97, 0xd0, 0xd9, 0xfe, 0x0f, 0x25, 0xc6, 0x35, 0xdd, 0x9b, 0xad, 0xad, - 0x18, 0xd7, 0x74, 0x8f, 0xa7, 0xd0, 0xbe, 0x49, 0xf2, 0x35, 0x99, 0xed, 0x0d, 0xb6, 0x4d, 0x86, - 0x1e, 0x3c, 0x79, 0xa0, 0x26, 0x76, 0xc1, 0x78, 0xcd, 0x23, 0xa6, 0x61, 0x0f, 0x5a, 0xb3, 0x20, - 0x8c, 0x98, 0xae, 0xa2, 0x2b, 0x6e, 0x39, 0xcc, 0x50, 0xc5, 0xd9, 0x3c, 0x62, 0x2d, 0xb5, 0x2e, - 0x0e, 0xf7, 0x78, 0xc4, 0x59, 0x1b, 0xfb, 0xd0, 0x9e, 0x59, 0x91, 0x7d, 0xc5, 0x3a, 0xc3, 0x7f, - 0x0d, 0x60, 0xef, 0x84, 0xad, 0x57, 0x65, 0x51, 0x13, 0x9a, 0xd0, 0xb5, 0xcb, 0x42, 0x52, 0x21, - 0x4d, 0x4d, 0x49, 0x29, 0x76, 0x29, 0x7e, 0x09, 0x10, 0xca, 0x44, 0xae, 0x6b, 0xf5, 0x71, 0x6c, - 0x8c, 0x6b, 0x8b, 0xf7, 0x10, 0xbc, 0x78, 0xe4, 0xdf, 0xf0, 0xa0, 0x7f, 0xdb, 0x6b, 0x1e, 0x1b, - 0xf8, 0x03, 0x3c, 0x6b, 0xae, 0xf9, 0x25, 0xa9, 0xa3, 0x6a, 0x5d, 0x28, 0x81, 0xb6, 0x66, 0xf6, - 0x2e, 0xda, 0xb7, 0x49, 0x5e, 0x93, 0x38, 0xc4, 0xc0, 0x6f, 0xe0, 0x29, 0xff, 0x73, 0xfb, 0x02, - 0x5c, 0xde, 0x4b, 0xaa, 0x43, 0x35, 0xb8, 0x72, 0xd7, 0x10, 0x1f, 0x16, 0xf0, 0x7b, 0x38, 0x7b, - 0x00, 0x0a, 0x5a, 0x50, 0xf6, 0x96, 0xd2, 0x8d, 0xcd, 0x86, 0x38, 0x5c, 0x54, 0xfb, 0x30, 0xc9, - 0x8a, 0x24, 0x57, 0xfb, 0xaa, 0xec, 0xed, 0x8b, 0x7d, 0x8e, 0xdf, 0x01, 0x5a, 0xab, 0xcc, 0x5e, - 0xad, 0xa7, 0x59, 0x9e, 0x67, 0x35, 0x2d, 0xca, 0x22, 0xad, 0x4d, 0x50, 0xed, 0x2e, 0xb4, 0x97, - 0xe2, 0x40, 0x11, 0xbf, 0x86, 0x63, 0x6b, 0x95, 0xbd, 0x9b, 0xf6, 0x68, 0x47, 0x7e, 0x00, 0xe3, - 0xb7, 0xc0, 0x76, 0xf9, 0x7e, 0xcc, 0xe3, 0x1d, 0xf5, 0x83, 0xd2, 0xff, 0x5f, 0xa6, 0x4b, 0xf8, - 0xad, 0xb7, 0x7b, 0x2a, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x1d, 0x9f, 0x6d, 0x24, 0x63, 0x05, - 0x00, 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto deleted file mode 100644 index f695edf6a..000000000 --- a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto2"; -option go_package = "urlfetch"; - -package appengine; - -message URLFetchServiceError { - enum ErrorCode { - OK = 0; - INVALID_URL = 1; - FETCH_ERROR = 2; - UNSPECIFIED_ERROR = 3; - RESPONSE_TOO_LARGE = 4; - DEADLINE_EXCEEDED = 5; - SSL_CERTIFICATE_ERROR = 6; - DNS_ERROR = 7; - CLOSED = 8; - INTERNAL_TRANSIENT_ERROR = 9; - TOO_MANY_REDIRECTS = 10; - MALFORMED_REPLY = 11; - CONNECTION_ERROR = 12; - } -} - -message URLFetchRequest { - enum RequestMethod { - GET = 1; - POST = 2; - HEAD = 3; - PUT = 4; - DELETE = 5; - PATCH = 6; - } - required RequestMethod Method = 1; - required string Url = 2; - repeated group Header = 3 { - required string Key = 4; - required string Value = 5; - } - optional bytes Payload = 6 [ctype=CORD]; - - optional bool FollowRedirects = 7 [default=true]; - - optional double Deadline = 8; - - optional bool MustValidateServerCertificate = 9 [default=true]; -} - -message URLFetchResponse { - optional bytes Content = 1; - required int32 StatusCode = 2; - repeated group Header = 3 { - required string Key = 4; - required string Value = 5; - } - optional bool ContentWasTruncated = 6 [default=false]; - optional int64 ExternalBytesSent = 7; - optional int64 ExternalBytesReceived = 8; - - optional string FinalUrl = 9; - - optional int64 ApiCpuMilliseconds = 10 [default=0]; - optional int64 ApiBytesSent = 11 [default=0]; - optional int64 ApiBytesReceived = 12 [default=0]; -} diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go deleted file mode 100644 index 21860ca08..000000000 --- a/vendor/google.golang.org/appengine/namespace.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2012 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package appengine - -import ( - "fmt" - "regexp" - - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" -) - -// Namespace returns a replacement context that operates within the given namespace. -func Namespace(c context.Context, namespace string) (context.Context, error) { - if !validNamespace.MatchString(namespace) { - return nil, fmt.Errorf("appengine: namespace %q does not match /%s/", namespace, validNamespace) - } - return internal.NamespacedContext(c, namespace), nil -} - -// validNamespace matches valid namespace names. -var validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`) diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go deleted file mode 100644 index 05642a992..000000000 --- a/vendor/google.golang.org/appengine/timeout.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2013 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package appengine - -import "golang.org/x/net/context" - -// IsTimeoutError reports whether err is a timeout error. -func IsTimeoutError(err error) bool { - if err == context.DeadlineExceeded { - return true - } - if t, ok := err.(interface { - IsTimeout() bool - }); ok { - return t.IsTimeout() - } - return false -} diff --git a/vendor/google.golang.org/appengine/travis_install.sh b/vendor/google.golang.org/appengine/travis_install.sh deleted file mode 100644 index 785b62f46..000000000 --- a/vendor/google.golang.org/appengine/travis_install.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -set -e - -if [[ $GO111MODULE == "on" ]]; then - go get . -else - go get -u -v $(go list -f '{{join .Imports "\n"}}{{"\n"}}{{join .TestImports "\n"}}' ./... | sort | uniq | grep -v appengine) -fi - -if [[ $GOAPP == "true" ]]; then - mkdir /tmp/sdk - curl -o /tmp/sdk.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" - unzip -q /tmp/sdk.zip -d /tmp/sdk - # NOTE: Set the following env vars in the test script: - # export PATH="$PATH:/tmp/sdk/go_appengine" - # export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py -fi - diff --git a/vendor/google.golang.org/appengine/travis_test.sh b/vendor/google.golang.org/appengine/travis_test.sh deleted file mode 100644 index d4390f045..000000000 --- a/vendor/google.golang.org/appengine/travis_test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -set -e - -go version -go test -v google.golang.org/appengine/... -go test -v -race google.golang.org/appengine/... -if [[ $GOAPP == "true" ]]; then - export PATH="$PATH:/tmp/sdk/go_appengine" - export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py - goapp version - goapp test -v google.golang.org/appengine/... -fi diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go deleted file mode 100644 index 6ffe1e6d9..000000000 --- a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package urlfetch provides an http.RoundTripper implementation -// for fetching URLs via App Engine's urlfetch service. -package urlfetch // import "google.golang.org/appengine/urlfetch" - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" - pb "google.golang.org/appengine/internal/urlfetch" -) - -// Transport is an implementation of http.RoundTripper for -// App Engine. Users should generally create an http.Client using -// this transport and use the Client rather than using this transport -// directly. -type Transport struct { - Context context.Context - - // Controls whether the application checks the validity of SSL certificates - // over HTTPS connections. A value of false (the default) instructs the - // application to send a request to the server only if the certificate is - // valid and signed by a trusted certificate authority (CA), and also - // includes a hostname that matches the certificate. A value of true - // instructs the application to perform no certificate validation. - AllowInvalidServerCertificate bool -} - -// Verify statically that *Transport implements http.RoundTripper. -var _ http.RoundTripper = (*Transport)(nil) - -// Client returns an *http.Client using a default urlfetch Transport. This -// client will have the default deadline of 5 seconds, and will check the -// validity of SSL certificates. -// -// Any deadline of the provided context will be used for requests through this client; -// if the client does not have a deadline then a 5 second default is used. -func Client(ctx context.Context) *http.Client { - return &http.Client{ - Transport: &Transport{ - Context: ctx, - }, - } -} - -type bodyReader struct { - content []byte - truncated bool - closed bool -} - -// ErrTruncatedBody is the error returned after the final Read() from a -// response's Body if the body has been truncated by App Engine's proxy. -var ErrTruncatedBody = errors.New("urlfetch: truncated body") - -func statusCodeToText(code int) string { - if t := http.StatusText(code); t != "" { - return t - } - return strconv.Itoa(code) -} - -func (br *bodyReader) Read(p []byte) (n int, err error) { - if br.closed { - if br.truncated { - return 0, ErrTruncatedBody - } - return 0, io.EOF - } - n = copy(p, br.content) - if n > 0 { - br.content = br.content[n:] - return - } - if br.truncated { - br.closed = true - return 0, ErrTruncatedBody - } - return 0, io.EOF -} - -func (br *bodyReader) Close() error { - br.closed = true - br.content = nil - return nil -} - -// A map of the URL Fetch-accepted methods that take a request body. -var methodAcceptsRequestBody = map[string]bool{ - "POST": true, - "PUT": true, - "PATCH": true, -} - -// urlString returns a valid string given a URL. This function is necessary because -// the String method of URL doesn't correctly handle URLs with non-empty Opaque values. -// See http://code.google.com/p/go/issues/detail?id=4860. -func urlString(u *url.URL) string { - if u.Opaque == "" || strings.HasPrefix(u.Opaque, "//") { - return u.String() - } - aux := *u - aux.Opaque = "//" + aux.Host + aux.Opaque - return aux.String() -} - -// RoundTrip issues a single HTTP request and returns its response. Per the -// http.RoundTripper interface, RoundTrip only returns an error if there -// was an unsupported request or the URL Fetch proxy fails. -// Note that HTTP response codes such as 5xx, 403, 404, etc are not -// errors as far as the transport is concerned and will be returned -// with err set to nil. -func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) { - methNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method] - if !ok { - return nil, fmt.Errorf("urlfetch: unsupported HTTP method %q", req.Method) - } - - method := pb.URLFetchRequest_RequestMethod(methNum) - - freq := &pb.URLFetchRequest{ - Method: &method, - Url: proto.String(urlString(req.URL)), - FollowRedirects: proto.Bool(false), // http.Client's responsibility - MustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate), - } - if deadline, ok := t.Context.Deadline(); ok { - freq.Deadline = proto.Float64(deadline.Sub(time.Now()).Seconds()) - } - - for k, vals := range req.Header { - for _, val := range vals { - freq.Header = append(freq.Header, &pb.URLFetchRequest_Header{ - Key: proto.String(k), - Value: proto.String(val), - }) - } - } - if methodAcceptsRequestBody[req.Method] && req.Body != nil { - // Avoid a []byte copy if req.Body has a Bytes method. - switch b := req.Body.(type) { - case interface { - Bytes() []byte - }: - freq.Payload = b.Bytes() - default: - freq.Payload, err = ioutil.ReadAll(req.Body) - if err != nil { - return nil, err - } - } - } - - fres := &pb.URLFetchResponse{} - if err := internal.Call(t.Context, "urlfetch", "Fetch", freq, fres); err != nil { - return nil, err - } - - res = &http.Response{} - res.StatusCode = int(*fres.StatusCode) - res.Status = fmt.Sprintf("%d %s", res.StatusCode, statusCodeToText(res.StatusCode)) - res.Header = make(http.Header) - res.Request = req - - // Faked: - res.ProtoMajor = 1 - res.ProtoMinor = 1 - res.Proto = "HTTP/1.1" - res.Close = true - - for _, h := range fres.Header { - hkey := http.CanonicalHeaderKey(*h.Key) - hval := *h.Value - if hkey == "Content-Length" { - // Will get filled in below for all but HEAD requests. - if req.Method == "HEAD" { - res.ContentLength, _ = strconv.ParseInt(hval, 10, 64) - } - continue - } - res.Header.Add(hkey, hval) - } - - if req.Method != "HEAD" { - res.ContentLength = int64(len(fres.Content)) - } - - truncated := fres.GetContentWasTruncated() - res.Body = &bodyReader{content: fres.Content, truncated: truncated} - return -} - -func init() { - internal.RegisterErrorCodeMap("urlfetch", pb.URLFetchServiceError_ErrorCode_name) - internal.RegisterTimeoutErrorCode("urlfetch", int32(pb.URLFetchServiceError_DEADLINE_EXCEEDED)) -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/LICENSE b/vendor/google.golang.org/genproto/googleapis/api/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go new file mode 100644 index 000000000..8b462f3df --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go @@ -0,0 +1,119 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/annotations.proto + +package annotations + +import ( + reflect "reflect" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var file_google_api_annotations_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.MethodOptions)(nil), + ExtensionType: (*HttpRule)(nil), + Field: 72295728, + Name: "google.api.http", + Tag: "bytes,72295728,opt,name=http", + Filename: "google/api/annotations.proto", + }, +} + +// Extension fields to descriptorpb.MethodOptions. +var ( + // See `HttpRule`. + // + // optional google.api.HttpRule http = 72295728; + E_Http = &file_google_api_annotations_proto_extTypes[0] +) + +var File_google_api_annotations_proto protoreflect.FileDescriptor + +var file_google_api_annotations_proto_rawDesc = []byte{ + 0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x15, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x3a, 0x4b, 0x0a, 0x04, 0x68, 0x74, 0x74, 0x70, 0x12, 0x1e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xb0, 0xca, 0xbc, 0x22, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x68, 0x74, 0x74, 0x70, + 0x42, 0x6e, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x42, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_google_api_annotations_proto_goTypes = []interface{}{ + (*descriptorpb.MethodOptions)(nil), // 0: google.protobuf.MethodOptions + (*HttpRule)(nil), // 1: google.api.HttpRule +} +var file_google_api_annotations_proto_depIdxs = []int32{ + 0, // 0: google.api.http:extendee -> google.protobuf.MethodOptions + 1, // 1: google.api.http:type_name -> google.api.HttpRule + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_api_annotations_proto_init() } +func file_google_api_annotations_proto_init() { + if File_google_api_annotations_proto != nil { + return + } + file_google_api_http_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_annotations_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_google_api_annotations_proto_goTypes, + DependencyIndexes: file_google_api_annotations_proto_depIdxs, + ExtensionInfos: file_google_api_annotations_proto_extTypes, + }.Build() + File_google_api_annotations_proto = out.File + file_google_api_annotations_proto_rawDesc = nil + file_google_api_annotations_proto_goTypes = nil + file_google_api_annotations_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go new file mode 100644 index 000000000..aa69fb4d5 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -0,0 +1,1940 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/client.proto + +package annotations + +import ( + reflect "reflect" + sync "sync" + + api "google.golang.org/genproto/googleapis/api" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + durationpb "google.golang.org/protobuf/types/known/durationpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The organization for which the client libraries are being published. +// Affects the url where generated docs are published, etc. +type ClientLibraryOrganization int32 + +const ( + // Not useful. + ClientLibraryOrganization_CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED ClientLibraryOrganization = 0 + // Google Cloud Platform Org. + ClientLibraryOrganization_CLOUD ClientLibraryOrganization = 1 + // Ads (Advertising) Org. + ClientLibraryOrganization_ADS ClientLibraryOrganization = 2 + // Photos Org. + ClientLibraryOrganization_PHOTOS ClientLibraryOrganization = 3 + // Street View Org. + ClientLibraryOrganization_STREET_VIEW ClientLibraryOrganization = 4 + // Shopping Org. + ClientLibraryOrganization_SHOPPING ClientLibraryOrganization = 5 + // Geo Org. + ClientLibraryOrganization_GEO ClientLibraryOrganization = 6 + // Generative AI - https://developers.generativeai.google + ClientLibraryOrganization_GENERATIVE_AI ClientLibraryOrganization = 7 +) + +// Enum value maps for ClientLibraryOrganization. +var ( + ClientLibraryOrganization_name = map[int32]string{ + 0: "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED", + 1: "CLOUD", + 2: "ADS", + 3: "PHOTOS", + 4: "STREET_VIEW", + 5: "SHOPPING", + 6: "GEO", + 7: "GENERATIVE_AI", + } + ClientLibraryOrganization_value = map[string]int32{ + "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED": 0, + "CLOUD": 1, + "ADS": 2, + "PHOTOS": 3, + "STREET_VIEW": 4, + "SHOPPING": 5, + "GEO": 6, + "GENERATIVE_AI": 7, + } +) + +func (x ClientLibraryOrganization) Enum() *ClientLibraryOrganization { + p := new(ClientLibraryOrganization) + *p = x + return p +} + +func (x ClientLibraryOrganization) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ClientLibraryOrganization) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_client_proto_enumTypes[0].Descriptor() +} + +func (ClientLibraryOrganization) Type() protoreflect.EnumType { + return &file_google_api_client_proto_enumTypes[0] +} + +func (x ClientLibraryOrganization) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ClientLibraryOrganization.Descriptor instead. +func (ClientLibraryOrganization) EnumDescriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{0} +} + +// To where should client libraries be published? +type ClientLibraryDestination int32 + +const ( + // Client libraries will neither be generated nor published to package + // managers. + ClientLibraryDestination_CLIENT_LIBRARY_DESTINATION_UNSPECIFIED ClientLibraryDestination = 0 + // Generate the client library in a repo under github.com/googleapis, + // but don't publish it to package managers. + ClientLibraryDestination_GITHUB ClientLibraryDestination = 10 + // Publish the library to package managers like nuget.org and npmjs.com. + ClientLibraryDestination_PACKAGE_MANAGER ClientLibraryDestination = 20 +) + +// Enum value maps for ClientLibraryDestination. +var ( + ClientLibraryDestination_name = map[int32]string{ + 0: "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED", + 10: "GITHUB", + 20: "PACKAGE_MANAGER", + } + ClientLibraryDestination_value = map[string]int32{ + "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED": 0, + "GITHUB": 10, + "PACKAGE_MANAGER": 20, + } +) + +func (x ClientLibraryDestination) Enum() *ClientLibraryDestination { + p := new(ClientLibraryDestination) + *p = x + return p +} + +func (x ClientLibraryDestination) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ClientLibraryDestination) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_client_proto_enumTypes[1].Descriptor() +} + +func (ClientLibraryDestination) Type() protoreflect.EnumType { + return &file_google_api_client_proto_enumTypes[1] +} + +func (x ClientLibraryDestination) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ClientLibraryDestination.Descriptor instead. +func (ClientLibraryDestination) EnumDescriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{1} +} + +// Required information for every language. +type CommonLanguageSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Link to automatically generated reference documentation. Example: + // https://cloud.google.com/nodejs/docs/reference/asset/latest + // + // Deprecated: Do not use. + ReferenceDocsUri string `protobuf:"bytes,1,opt,name=reference_docs_uri,json=referenceDocsUri,proto3" json:"reference_docs_uri,omitempty"` + // The destination where API teams want this client library to be published. + Destinations []ClientLibraryDestination `protobuf:"varint,2,rep,packed,name=destinations,proto3,enum=google.api.ClientLibraryDestination" json:"destinations,omitempty"` +} + +func (x *CommonLanguageSettings) Reset() { + *x = CommonLanguageSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CommonLanguageSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CommonLanguageSettings) ProtoMessage() {} + +func (x *CommonLanguageSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CommonLanguageSettings.ProtoReflect.Descriptor instead. +func (*CommonLanguageSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{0} +} + +// Deprecated: Do not use. +func (x *CommonLanguageSettings) GetReferenceDocsUri() string { + if x != nil { + return x.ReferenceDocsUri + } + return "" +} + +func (x *CommonLanguageSettings) GetDestinations() []ClientLibraryDestination { + if x != nil { + return x.Destinations + } + return nil +} + +// Details about how and where to publish client libraries. +type ClientLibrarySettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Version of the API to apply these settings to. This is the full protobuf + // package for the API, ending in the version element. + // Examples: "google.cloud.speech.v1" and "google.spanner.admin.database.v1". + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Launch stage of this version of the API. + LaunchStage api.LaunchStage `protobuf:"varint,2,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` + // When using transport=rest, the client request will encode enums as + // numbers rather than strings. + RestNumericEnums bool `protobuf:"varint,3,opt,name=rest_numeric_enums,json=restNumericEnums,proto3" json:"rest_numeric_enums,omitempty"` + // Settings for legacy Java features, supported in the Service YAML. + JavaSettings *JavaSettings `protobuf:"bytes,21,opt,name=java_settings,json=javaSettings,proto3" json:"java_settings,omitempty"` + // Settings for C++ client libraries. + CppSettings *CppSettings `protobuf:"bytes,22,opt,name=cpp_settings,json=cppSettings,proto3" json:"cpp_settings,omitempty"` + // Settings for PHP client libraries. + PhpSettings *PhpSettings `protobuf:"bytes,23,opt,name=php_settings,json=phpSettings,proto3" json:"php_settings,omitempty"` + // Settings for Python client libraries. + PythonSettings *PythonSettings `protobuf:"bytes,24,opt,name=python_settings,json=pythonSettings,proto3" json:"python_settings,omitempty"` + // Settings for Node client libraries. + NodeSettings *NodeSettings `protobuf:"bytes,25,opt,name=node_settings,json=nodeSettings,proto3" json:"node_settings,omitempty"` + // Settings for .NET client libraries. + DotnetSettings *DotnetSettings `protobuf:"bytes,26,opt,name=dotnet_settings,json=dotnetSettings,proto3" json:"dotnet_settings,omitempty"` + // Settings for Ruby client libraries. + RubySettings *RubySettings `protobuf:"bytes,27,opt,name=ruby_settings,json=rubySettings,proto3" json:"ruby_settings,omitempty"` + // Settings for Go client libraries. + GoSettings *GoSettings `protobuf:"bytes,28,opt,name=go_settings,json=goSettings,proto3" json:"go_settings,omitempty"` +} + +func (x *ClientLibrarySettings) Reset() { + *x = ClientLibrarySettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientLibrarySettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientLibrarySettings) ProtoMessage() {} + +func (x *ClientLibrarySettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientLibrarySettings.ProtoReflect.Descriptor instead. +func (*ClientLibrarySettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{1} +} + +func (x *ClientLibrarySettings) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *ClientLibrarySettings) GetLaunchStage() api.LaunchStage { + if x != nil { + return x.LaunchStage + } + return api.LaunchStage_LAUNCH_STAGE_UNSPECIFIED +} + +func (x *ClientLibrarySettings) GetRestNumericEnums() bool { + if x != nil { + return x.RestNumericEnums + } + return false +} + +func (x *ClientLibrarySettings) GetJavaSettings() *JavaSettings { + if x != nil { + return x.JavaSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetCppSettings() *CppSettings { + if x != nil { + return x.CppSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetPhpSettings() *PhpSettings { + if x != nil { + return x.PhpSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetPythonSettings() *PythonSettings { + if x != nil { + return x.PythonSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetNodeSettings() *NodeSettings { + if x != nil { + return x.NodeSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetDotnetSettings() *DotnetSettings { + if x != nil { + return x.DotnetSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetRubySettings() *RubySettings { + if x != nil { + return x.RubySettings + } + return nil +} + +func (x *ClientLibrarySettings) GetGoSettings() *GoSettings { + if x != nil { + return x.GoSettings + } + return nil +} + +// This message configures the settings for publishing [Google Cloud Client +// libraries](https://cloud.google.com/apis/docs/cloud-client-libraries) +// generated from the service config. +type Publishing struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A list of API method settings, e.g. the behavior for methods that use the + // long-running operation pattern. + MethodSettings []*MethodSettings `protobuf:"bytes,2,rep,name=method_settings,json=methodSettings,proto3" json:"method_settings,omitempty"` + // Link to a *public* URI where users can report issues. Example: + // https://issuetracker.google.com/issues/new?component=190865&template=1161103 + NewIssueUri string `protobuf:"bytes,101,opt,name=new_issue_uri,json=newIssueUri,proto3" json:"new_issue_uri,omitempty"` + // Link to product home page. Example: + // https://cloud.google.com/asset-inventory/docs/overview + DocumentationUri string `protobuf:"bytes,102,opt,name=documentation_uri,json=documentationUri,proto3" json:"documentation_uri,omitempty"` + // Used as a tracking tag when collecting data about the APIs developer + // relations artifacts like docs, packages delivered to package managers, + // etc. Example: "speech". + ApiShortName string `protobuf:"bytes,103,opt,name=api_short_name,json=apiShortName,proto3" json:"api_short_name,omitempty"` + // GitHub label to apply to issues and pull requests opened for this API. + GithubLabel string `protobuf:"bytes,104,opt,name=github_label,json=githubLabel,proto3" json:"github_label,omitempty"` + // GitHub teams to be added to CODEOWNERS in the directory in GitHub + // containing source code for the client libraries for this API. + CodeownerGithubTeams []string `protobuf:"bytes,105,rep,name=codeowner_github_teams,json=codeownerGithubTeams,proto3" json:"codeowner_github_teams,omitempty"` + // A prefix used in sample code when demarking regions to be included in + // documentation. + DocTagPrefix string `protobuf:"bytes,106,opt,name=doc_tag_prefix,json=docTagPrefix,proto3" json:"doc_tag_prefix,omitempty"` + // For whom the client library is being published. + Organization ClientLibraryOrganization `protobuf:"varint,107,opt,name=organization,proto3,enum=google.api.ClientLibraryOrganization" json:"organization,omitempty"` + // Client library settings. If the same version string appears multiple + // times in this list, then the last one wins. Settings from earlier + // settings with the same version string are discarded. + LibrarySettings []*ClientLibrarySettings `protobuf:"bytes,109,rep,name=library_settings,json=librarySettings,proto3" json:"library_settings,omitempty"` + // Optional link to proto reference documentation. Example: + // https://cloud.google.com/pubsub/lite/docs/reference/rpc + ProtoReferenceDocumentationUri string `protobuf:"bytes,110,opt,name=proto_reference_documentation_uri,json=protoReferenceDocumentationUri,proto3" json:"proto_reference_documentation_uri,omitempty"` + // Optional link to REST reference documentation. Example: + // https://cloud.google.com/pubsub/lite/docs/reference/rest + RestReferenceDocumentationUri string `protobuf:"bytes,111,opt,name=rest_reference_documentation_uri,json=restReferenceDocumentationUri,proto3" json:"rest_reference_documentation_uri,omitempty"` +} + +func (x *Publishing) Reset() { + *x = Publishing{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Publishing) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Publishing) ProtoMessage() {} + +func (x *Publishing) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Publishing.ProtoReflect.Descriptor instead. +func (*Publishing) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{2} +} + +func (x *Publishing) GetMethodSettings() []*MethodSettings { + if x != nil { + return x.MethodSettings + } + return nil +} + +func (x *Publishing) GetNewIssueUri() string { + if x != nil { + return x.NewIssueUri + } + return "" +} + +func (x *Publishing) GetDocumentationUri() string { + if x != nil { + return x.DocumentationUri + } + return "" +} + +func (x *Publishing) GetApiShortName() string { + if x != nil { + return x.ApiShortName + } + return "" +} + +func (x *Publishing) GetGithubLabel() string { + if x != nil { + return x.GithubLabel + } + return "" +} + +func (x *Publishing) GetCodeownerGithubTeams() []string { + if x != nil { + return x.CodeownerGithubTeams + } + return nil +} + +func (x *Publishing) GetDocTagPrefix() string { + if x != nil { + return x.DocTagPrefix + } + return "" +} + +func (x *Publishing) GetOrganization() ClientLibraryOrganization { + if x != nil { + return x.Organization + } + return ClientLibraryOrganization_CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED +} + +func (x *Publishing) GetLibrarySettings() []*ClientLibrarySettings { + if x != nil { + return x.LibrarySettings + } + return nil +} + +func (x *Publishing) GetProtoReferenceDocumentationUri() string { + if x != nil { + return x.ProtoReferenceDocumentationUri + } + return "" +} + +func (x *Publishing) GetRestReferenceDocumentationUri() string { + if x != nil { + return x.RestReferenceDocumentationUri + } + return "" +} + +// Settings for Java client libraries. +type JavaSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The package name to use in Java. Clobbers the java_package option + // set in the protobuf. This should be used **only** by APIs + // who have already set the language_settings.java.package_name" field + // in gapic.yaml. API teams should use the protobuf java_package option + // where possible. + // + // Example of a YAML configuration:: + // + // publishing: + // java_settings: + // library_package: com.google.cloud.pubsub.v1 + LibraryPackage string `protobuf:"bytes,1,opt,name=library_package,json=libraryPackage,proto3" json:"library_package,omitempty"` + // Configure the Java class name to use instead of the service's for its + // corresponding generated GAPIC client. Keys are fully-qualified + // service names as they appear in the protobuf (including the full + // the language_settings.java.interface_names" field in gapic.yaml. API + // teams should otherwise use the service name as it appears in the + // protobuf. + // + // Example of a YAML configuration:: + // + // publishing: + // java_settings: + // service_class_names: + // - google.pubsub.v1.Publisher: TopicAdmin + // - google.pubsub.v1.Subscriber: SubscriptionAdmin + ServiceClassNames map[string]string `protobuf:"bytes,2,rep,name=service_class_names,json=serviceClassNames,proto3" json:"service_class_names,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,3,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *JavaSettings) Reset() { + *x = JavaSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JavaSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JavaSettings) ProtoMessage() {} + +func (x *JavaSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JavaSettings.ProtoReflect.Descriptor instead. +func (*JavaSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{3} +} + +func (x *JavaSettings) GetLibraryPackage() string { + if x != nil { + return x.LibraryPackage + } + return "" +} + +func (x *JavaSettings) GetServiceClassNames() map[string]string { + if x != nil { + return x.ServiceClassNames + } + return nil +} + +func (x *JavaSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for C++ client libraries. +type CppSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *CppSettings) Reset() { + *x = CppSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CppSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CppSettings) ProtoMessage() {} + +func (x *CppSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CppSettings.ProtoReflect.Descriptor instead. +func (*CppSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{4} +} + +func (x *CppSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Php client libraries. +type PhpSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *PhpSettings) Reset() { + *x = PhpSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PhpSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PhpSettings) ProtoMessage() {} + +func (x *PhpSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PhpSettings.ProtoReflect.Descriptor instead. +func (*PhpSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{5} +} + +func (x *PhpSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Python client libraries. +type PythonSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` + // Experimental features to be included during client library generation. + ExperimentalFeatures *PythonSettings_ExperimentalFeatures `protobuf:"bytes,2,opt,name=experimental_features,json=experimentalFeatures,proto3" json:"experimental_features,omitempty"` +} + +func (x *PythonSettings) Reset() { + *x = PythonSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PythonSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PythonSettings) ProtoMessage() {} + +func (x *PythonSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PythonSettings.ProtoReflect.Descriptor instead. +func (*PythonSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{6} +} + +func (x *PythonSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +func (x *PythonSettings) GetExperimentalFeatures() *PythonSettings_ExperimentalFeatures { + if x != nil { + return x.ExperimentalFeatures + } + return nil +} + +// Settings for Node client libraries. +type NodeSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *NodeSettings) Reset() { + *x = NodeSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NodeSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NodeSettings) ProtoMessage() {} + +func (x *NodeSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NodeSettings.ProtoReflect.Descriptor instead. +func (*NodeSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{7} +} + +func (x *NodeSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Dotnet client libraries. +type DotnetSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` + // Map from original service names to renamed versions. + // This is used when the default generated types + // would cause a naming conflict. (Neither name is + // fully-qualified.) + // Example: Subscriber to SubscriberServiceApi. + RenamedServices map[string]string `protobuf:"bytes,2,rep,name=renamed_services,json=renamedServices,proto3" json:"renamed_services,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Map from full resource types to the effective short name + // for the resource. This is used when otherwise resource + // named from different services would cause naming collisions. + // Example entry: + // "datalabeling.googleapis.com/Dataset": "DataLabelingDataset" + RenamedResources map[string]string `protobuf:"bytes,3,rep,name=renamed_resources,json=renamedResources,proto3" json:"renamed_resources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // List of full resource types to ignore during generation. + // This is typically used for API-specific Location resources, + // which should be handled by the generator as if they were actually + // the common Location resources. + // Example entry: "documentai.googleapis.com/Location" + IgnoredResources []string `protobuf:"bytes,4,rep,name=ignored_resources,json=ignoredResources,proto3" json:"ignored_resources,omitempty"` + // Namespaces which must be aliased in snippets due to + // a known (but non-generator-predictable) naming collision + ForcedNamespaceAliases []string `protobuf:"bytes,5,rep,name=forced_namespace_aliases,json=forcedNamespaceAliases,proto3" json:"forced_namespace_aliases,omitempty"` + // Method signatures (in the form "service.method(signature)") + // which are provided separately, so shouldn't be generated. + // Snippets *calling* these methods are still generated, however. + HandwrittenSignatures []string `protobuf:"bytes,6,rep,name=handwritten_signatures,json=handwrittenSignatures,proto3" json:"handwritten_signatures,omitempty"` +} + +func (x *DotnetSettings) Reset() { + *x = DotnetSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DotnetSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DotnetSettings) ProtoMessage() {} + +func (x *DotnetSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DotnetSettings.ProtoReflect.Descriptor instead. +func (*DotnetSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{8} +} + +func (x *DotnetSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +func (x *DotnetSettings) GetRenamedServices() map[string]string { + if x != nil { + return x.RenamedServices + } + return nil +} + +func (x *DotnetSettings) GetRenamedResources() map[string]string { + if x != nil { + return x.RenamedResources + } + return nil +} + +func (x *DotnetSettings) GetIgnoredResources() []string { + if x != nil { + return x.IgnoredResources + } + return nil +} + +func (x *DotnetSettings) GetForcedNamespaceAliases() []string { + if x != nil { + return x.ForcedNamespaceAliases + } + return nil +} + +func (x *DotnetSettings) GetHandwrittenSignatures() []string { + if x != nil { + return x.HandwrittenSignatures + } + return nil +} + +// Settings for Ruby client libraries. +type RubySettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *RubySettings) Reset() { + *x = RubySettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RubySettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RubySettings) ProtoMessage() {} + +func (x *RubySettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RubySettings.ProtoReflect.Descriptor instead. +func (*RubySettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{9} +} + +func (x *RubySettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Go client libraries. +type GoSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *GoSettings) Reset() { + *x = GoSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GoSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GoSettings) ProtoMessage() {} + +func (x *GoSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GoSettings.ProtoReflect.Descriptor instead. +func (*GoSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{10} +} + +func (x *GoSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Describes the generator configuration for a method. +type MethodSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The fully qualified name of the method, for which the options below apply. + // This is used to find the method to apply the options. + // + // Example: + // + // publishing: + // method_settings: + // - selector: google.storage.control.v2.StorageControl.CreateFolder + // # method settings for CreateFolder... + Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` + // Describes settings to use for long-running operations when generating + // API methods for RPCs. Complements RPCs that use the annotations in + // google/longrunning/operations.proto. + // + // Example of a YAML configuration:: + // + // publishing: + // method_settings: + // - selector: google.cloud.speech.v2.Speech.BatchRecognize + // long_running: + // initial_poll_delay: 60s # 1 minute + // poll_delay_multiplier: 1.5 + // max_poll_delay: 360s # 6 minutes + // total_poll_timeout: 54000s # 90 minutes + LongRunning *MethodSettings_LongRunning `protobuf:"bytes,2,opt,name=long_running,json=longRunning,proto3" json:"long_running,omitempty"` + // List of top-level fields of the request message, that should be + // automatically populated by the client libraries based on their + // (google.api.field_info).format. Currently supported format: UUID4. + // + // Example of a YAML configuration: + // + // publishing: + // method_settings: + // - selector: google.example.v1.ExampleService.CreateExample + // auto_populated_fields: + // - request_id + AutoPopulatedFields []string `protobuf:"bytes,3,rep,name=auto_populated_fields,json=autoPopulatedFields,proto3" json:"auto_populated_fields,omitempty"` +} + +func (x *MethodSettings) Reset() { + *x = MethodSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodSettings) ProtoMessage() {} + +func (x *MethodSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodSettings.ProtoReflect.Descriptor instead. +func (*MethodSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{11} +} + +func (x *MethodSettings) GetSelector() string { + if x != nil { + return x.Selector + } + return "" +} + +func (x *MethodSettings) GetLongRunning() *MethodSettings_LongRunning { + if x != nil { + return x.LongRunning + } + return nil +} + +func (x *MethodSettings) GetAutoPopulatedFields() []string { + if x != nil { + return x.AutoPopulatedFields + } + return nil +} + +// Experimental features to be included during client library generation. +// These fields will be deprecated once the feature graduates and is enabled +// by default. +type PythonSettings_ExperimentalFeatures struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Enables generation of asynchronous REST clients if `rest` transport is + // enabled. By default, asynchronous REST clients will not be generated. + // This feature will be enabled by default 1 month after launching the + // feature in preview packages. + RestAsyncIoEnabled bool `protobuf:"varint,1,opt,name=rest_async_io_enabled,json=restAsyncIoEnabled,proto3" json:"rest_async_io_enabled,omitempty"` +} + +func (x *PythonSettings_ExperimentalFeatures) Reset() { + *x = PythonSettings_ExperimentalFeatures{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PythonSettings_ExperimentalFeatures) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PythonSettings_ExperimentalFeatures) ProtoMessage() {} + +func (x *PythonSettings_ExperimentalFeatures) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PythonSettings_ExperimentalFeatures.ProtoReflect.Descriptor instead. +func (*PythonSettings_ExperimentalFeatures) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *PythonSettings_ExperimentalFeatures) GetRestAsyncIoEnabled() bool { + if x != nil { + return x.RestAsyncIoEnabled + } + return false +} + +// Describes settings to use when generating API methods that use the +// long-running operation pattern. +// All default values below are from those used in the client library +// generators (e.g. +// [Java](https://github.com/googleapis/gapic-generator-java/blob/04c2faa191a9b5a10b92392fe8482279c4404803/src/main/java/com/google/api/generator/gapic/composer/common/RetrySettingsComposer.java)). +type MethodSettings_LongRunning struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Initial delay after which the first poll request will be made. + // Default value: 5 seconds. + InitialPollDelay *durationpb.Duration `protobuf:"bytes,1,opt,name=initial_poll_delay,json=initialPollDelay,proto3" json:"initial_poll_delay,omitempty"` + // Multiplier to gradually increase delay between subsequent polls until it + // reaches max_poll_delay. + // Default value: 1.5. + PollDelayMultiplier float32 `protobuf:"fixed32,2,opt,name=poll_delay_multiplier,json=pollDelayMultiplier,proto3" json:"poll_delay_multiplier,omitempty"` + // Maximum time between two subsequent poll requests. + // Default value: 45 seconds. + MaxPollDelay *durationpb.Duration `protobuf:"bytes,3,opt,name=max_poll_delay,json=maxPollDelay,proto3" json:"max_poll_delay,omitempty"` + // Total polling timeout. + // Default value: 5 minutes. + TotalPollTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=total_poll_timeout,json=totalPollTimeout,proto3" json:"total_poll_timeout,omitempty"` +} + +func (x *MethodSettings_LongRunning) Reset() { + *x = MethodSettings_LongRunning{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodSettings_LongRunning) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodSettings_LongRunning) ProtoMessage() {} + +func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodSettings_LongRunning.ProtoReflect.Descriptor instead. +func (*MethodSettings_LongRunning) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{11, 0} +} + +func (x *MethodSettings_LongRunning) GetInitialPollDelay() *durationpb.Duration { + if x != nil { + return x.InitialPollDelay + } + return nil +} + +func (x *MethodSettings_LongRunning) GetPollDelayMultiplier() float32 { + if x != nil { + return x.PollDelayMultiplier + } + return 0 +} + +func (x *MethodSettings_LongRunning) GetMaxPollDelay() *durationpb.Duration { + if x != nil { + return x.MaxPollDelay + } + return nil +} + +func (x *MethodSettings_LongRunning) GetTotalPollTimeout() *durationpb.Duration { + if x != nil { + return x.TotalPollTimeout + } + return nil +} + +var file_google_api_client_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.MethodOptions)(nil), + ExtensionType: ([]string)(nil), + Field: 1051, + Name: "google.api.method_signature", + Tag: "bytes,1051,rep,name=method_signature", + Filename: "google/api/client.proto", + }, + { + ExtendedType: (*descriptorpb.ServiceOptions)(nil), + ExtensionType: (*string)(nil), + Field: 1049, + Name: "google.api.default_host", + Tag: "bytes,1049,opt,name=default_host", + Filename: "google/api/client.proto", + }, + { + ExtendedType: (*descriptorpb.ServiceOptions)(nil), + ExtensionType: (*string)(nil), + Field: 1050, + Name: "google.api.oauth_scopes", + Tag: "bytes,1050,opt,name=oauth_scopes", + Filename: "google/api/client.proto", + }, + { + ExtendedType: (*descriptorpb.ServiceOptions)(nil), + ExtensionType: (*string)(nil), + Field: 525000001, + Name: "google.api.api_version", + Tag: "bytes,525000001,opt,name=api_version", + Filename: "google/api/client.proto", + }, +} + +// Extension fields to descriptorpb.MethodOptions. +var ( + // A definition of a client library method signature. + // + // In client libraries, each proto RPC corresponds to one or more methods + // which the end user is able to call, and calls the underlying RPC. + // Normally, this method receives a single argument (a struct or instance + // corresponding to the RPC request object). Defining this field will + // add one or more overloads providing flattened or simpler method signatures + // in some languages. + // + // The fields on the method signature are provided as a comma-separated + // string. + // + // For example, the proto RPC and annotation: + // + // rpc CreateSubscription(CreateSubscriptionRequest) + // returns (Subscription) { + // option (google.api.method_signature) = "name,topic"; + // } + // + // Would add the following Java overload (in addition to the method accepting + // the request object): + // + // public final Subscription createSubscription(String name, String topic) + // + // The following backwards-compatibility guidelines apply: + // + // - Adding this annotation to an unannotated method is backwards + // compatible. + // - Adding this annotation to a method which already has existing + // method signature annotations is backwards compatible if and only if + // the new method signature annotation is last in the sequence. + // - Modifying or removing an existing method signature annotation is + // a breaking change. + // - Re-ordering existing method signature annotations is a breaking + // change. + // + // repeated string method_signature = 1051; + E_MethodSignature = &file_google_api_client_proto_extTypes[0] +) + +// Extension fields to descriptorpb.ServiceOptions. +var ( + // The hostname for this service. + // This should be specified with no prefix or protocol. + // + // Example: + // + // service Foo { + // option (google.api.default_host) = "foo.googleapi.com"; + // ... + // } + // + // optional string default_host = 1049; + E_DefaultHost = &file_google_api_client_proto_extTypes[1] + // OAuth scopes needed for the client. + // + // Example: + // + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform"; + // ... + // } + // + // If there is more than one scope, use a comma-separated string: + // + // Example: + // + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform," + // "https://www.googleapis.com/auth/monitoring"; + // ... + // } + // + // optional string oauth_scopes = 1050; + E_OauthScopes = &file_google_api_client_proto_extTypes[2] + // The API version of this service, which should be sent by version-aware + // clients to the service. This allows services to abide by the schema and + // behavior of the service at the time this API version was deployed. + // The format of the API version must be treated as opaque by clients. + // Services may use a format with an apparent structure, but clients must + // not rely on this to determine components within an API version, or attempt + // to construct other valid API versions. Note that this is for upcoming + // functionality and may not be implemented for all services. + // + // Example: + // + // service Foo { + // option (google.api.api_version) = "v1_20230821_preview"; + // } + // + // optional string api_version = 525000001; + E_ApiVersion = &file_google_api_client_proto_extTypes[3] +) + +var File_google_api_client_proto protoreflect.FileDescriptor + +var file_google_api_client_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x12, 0x30, 0x0a, 0x12, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, + 0x6f, 0x63, 0x73, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, + 0x01, 0x52, 0x10, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x73, + 0x55, 0x72, 0x69, 0x12, 0x48, 0x0a, 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, + 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x93, 0x05, + 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, + 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a, + 0x12, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x65, 0x6e, + 0x75, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x73, 0x74, 0x4e, + 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6a, + 0x61, 0x76, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x15, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6a, 0x61, + 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x63, 0x70, + 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x70, + 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63, 0x70, 0x70, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68, 0x70, 0x5f, 0x73, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e, 0x6f, 0x64, 0x65, 0x5f, + 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4e, 0x6f, 0x64, 0x65, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, + 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, + 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x64, 0x6f, 0x74, + 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x72, + 0x75, 0x62, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x72, 0x75, + 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x67, 0x6f, + 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x22, 0xf4, 0x04, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x69, + 0x6e, 0x67, 0x12, 0x43, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x69, + 0x73, 0x73, 0x75, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x6e, 0x65, 0x77, 0x49, 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12, 0x2b, 0x0a, 0x11, 0x64, + 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, + 0x18, 0x66, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x69, 0x5f, + 0x73, 0x68, 0x6f, 0x72, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x61, 0x70, 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x68, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x4c, 0x61, 0x62, 0x65, + 0x6c, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x69, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x14, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x47, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x54, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64, 0x6f, 0x63, 0x5f, 0x74, + 0x61, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x64, 0x6f, 0x63, 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x49, 0x0a, + 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x6b, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, + 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61, + 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10, 0x6c, 0x69, 0x62, 0x72, + 0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x6d, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x49, 0x0a, 0x21, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5f, + 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6e, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x1e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, + 0x69, 0x12, 0x47, 0x0a, 0x20, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x72, 0x65, 0x73, + 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x22, 0x9a, 0x02, 0x0a, 0x0c, 0x4a, + 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6c, + 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x50, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4a, + 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, + 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, + 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x0b, 0x43, 0x70, 0x70, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, + 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xfd, 0x01, + 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x64, 0x0a, 0x15, + 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x14, 0x65, 0x78, + 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x1a, 0x49, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x15, 0x72, 0x65, + 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f, 0x5f, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x72, 0x65, 0x73, 0x74, 0x41, + 0x73, 0x79, 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a, + 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, + 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, + 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, + 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x12, 0x38, 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, + 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, + 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, + 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, + 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x22, 0xc2, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, + 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, + 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, + 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, + 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, + 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, + 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, + 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, + 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, + 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, + 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, + 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, + 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, + 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, + 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, + 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, + 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, + 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, + 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, + 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, + 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, + 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, + 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, + 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, + 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, + 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, + 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x42, 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_client_proto_rawDescOnce sync.Once + file_google_api_client_proto_rawDescData = file_google_api_client_proto_rawDesc +) + +func file_google_api_client_proto_rawDescGZIP() []byte { + file_google_api_client_proto_rawDescOnce.Do(func() { + file_google_api_client_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_client_proto_rawDescData) + }) + return file_google_api_client_proto_rawDescData +} + +var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var file_google_api_client_proto_goTypes = []interface{}{ + (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization + (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination + (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings + (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings + (*Publishing)(nil), // 4: google.api.Publishing + (*JavaSettings)(nil), // 5: google.api.JavaSettings + (*CppSettings)(nil), // 6: google.api.CppSettings + (*PhpSettings)(nil), // 7: google.api.PhpSettings + (*PythonSettings)(nil), // 8: google.api.PythonSettings + (*NodeSettings)(nil), // 9: google.api.NodeSettings + (*DotnetSettings)(nil), // 10: google.api.DotnetSettings + (*RubySettings)(nil), // 11: google.api.RubySettings + (*GoSettings)(nil), // 12: google.api.GoSettings + (*MethodSettings)(nil), // 13: google.api.MethodSettings + nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry + (*PythonSettings_ExperimentalFeatures)(nil), // 15: google.api.PythonSettings.ExperimentalFeatures + nil, // 16: google.api.DotnetSettings.RenamedServicesEntry + nil, // 17: google.api.DotnetSettings.RenamedResourcesEntry + (*MethodSettings_LongRunning)(nil), // 18: google.api.MethodSettings.LongRunning + (api.LaunchStage)(0), // 19: google.api.LaunchStage + (*durationpb.Duration)(nil), // 20: google.protobuf.Duration + (*descriptorpb.MethodOptions)(nil), // 21: google.protobuf.MethodOptions + (*descriptorpb.ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions +} +var file_google_api_client_proto_depIdxs = []int32{ + 1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination + 19, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage + 5, // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings + 6, // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings + 7, // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings + 8, // 5: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings + 9, // 6: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings + 10, // 7: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings + 11, // 8: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings + 12, // 9: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings + 13, // 10: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings + 0, // 11: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization + 3, // 12: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings + 14, // 13: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry + 2, // 14: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 15: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 16: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings + 15, // 18: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures + 2, // 19: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 20: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings + 16, // 21: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry + 17, // 22: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry + 2, // 23: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 24: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings + 18, // 25: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning + 20, // 26: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration + 20, // 27: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration + 20, // 28: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration + 21, // 29: google.api.method_signature:extendee -> google.protobuf.MethodOptions + 22, // 30: google.api.default_host:extendee -> google.protobuf.ServiceOptions + 22, // 31: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions + 22, // 32: google.api.api_version:extendee -> google.protobuf.ServiceOptions + 33, // [33:33] is the sub-list for method output_type + 33, // [33:33] is the sub-list for method input_type + 33, // [33:33] is the sub-list for extension type_name + 29, // [29:33] is the sub-list for extension extendee + 0, // [0:29] is the sub-list for field type_name +} + +func init() { file_google_api_client_proto_init() } +func file_google_api_client_proto_init() { + if File_google_api_client_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_client_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CommonLanguageSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientLibrarySettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Publishing); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JavaSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CppSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PhpSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PythonSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DotnetSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RubySettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GoSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PythonSettings_ExperimentalFeatures); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodSettings_LongRunning); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_client_proto_rawDesc, + NumEnums: 2, + NumMessages: 17, + NumExtensions: 4, + NumServices: 0, + }, + GoTypes: file_google_api_client_proto_goTypes, + DependencyIndexes: file_google_api_client_proto_depIdxs, + EnumInfos: file_google_api_client_proto_enumTypes, + MessageInfos: file_google_api_client_proto_msgTypes, + ExtensionInfos: file_google_api_client_proto_extTypes, + }.Build() + File_google_api_client_proto = out.File + file_google_api_client_proto_rawDesc = nil + file_google_api_client_proto_goTypes = nil + file_google_api_client_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go new file mode 100644 index 000000000..08505ba3f --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go @@ -0,0 +1,266 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/field_behavior.proto + +package annotations + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// An indicator of the behavior of a given field (for example, that a field +// is required in requests, or given as output but ignored as input). +// This **does not** change the behavior in protocol buffers itself; it only +// denotes the behavior and may affect how API tooling handles the field. +// +// Note: This enum **may** receive new values in the future. +type FieldBehavior int32 + +const ( + // Conventional default for enums. Do not use this. + FieldBehavior_FIELD_BEHAVIOR_UNSPECIFIED FieldBehavior = 0 + // Specifically denotes a field as optional. + // While all fields in protocol buffers are optional, this may be specified + // for emphasis if appropriate. + FieldBehavior_OPTIONAL FieldBehavior = 1 + // Denotes a field as required. + // This indicates that the field **must** be provided as part of the request, + // and failure to do so will cause an error (usually `INVALID_ARGUMENT`). + FieldBehavior_REQUIRED FieldBehavior = 2 + // Denotes a field as output only. + // This indicates that the field is provided in responses, but including the + // field in a request does nothing (the server *must* ignore it and + // *must not* throw an error as a result of the field's presence). + FieldBehavior_OUTPUT_ONLY FieldBehavior = 3 + // Denotes a field as input only. + // This indicates that the field is provided in requests, and the + // corresponding field is not included in output. + FieldBehavior_INPUT_ONLY FieldBehavior = 4 + // Denotes a field as immutable. + // This indicates that the field may be set once in a request to create a + // resource, but may not be changed thereafter. + FieldBehavior_IMMUTABLE FieldBehavior = 5 + // Denotes that a (repeated) field is an unordered list. + // This indicates that the service may provide the elements of the list + // in any arbitrary order, rather than the order the user originally + // provided. Additionally, the list's order may or may not be stable. + FieldBehavior_UNORDERED_LIST FieldBehavior = 6 + // Denotes that this field returns a non-empty default value if not set. + // This indicates that if the user provides the empty value in a request, + // a non-empty value will be returned. The user will not be aware of what + // non-empty value to expect. + FieldBehavior_NON_EMPTY_DEFAULT FieldBehavior = 7 + // Denotes that the field in a resource (a message annotated with + // google.api.resource) is used in the resource name to uniquely identify the + // resource. For AIP-compliant APIs, this should only be applied to the + // `name` field on the resource. + // + // This behavior should not be applied to references to other resources within + // the message. + // + // The identifier field of resources often have different field behavior + // depending on the request it is embedded in (e.g. for Create methods name + // is optional and unused, while for Update methods it is required). Instead + // of method-specific annotations, only `IDENTIFIER` is required. + FieldBehavior_IDENTIFIER FieldBehavior = 8 +) + +// Enum value maps for FieldBehavior. +var ( + FieldBehavior_name = map[int32]string{ + 0: "FIELD_BEHAVIOR_UNSPECIFIED", + 1: "OPTIONAL", + 2: "REQUIRED", + 3: "OUTPUT_ONLY", + 4: "INPUT_ONLY", + 5: "IMMUTABLE", + 6: "UNORDERED_LIST", + 7: "NON_EMPTY_DEFAULT", + 8: "IDENTIFIER", + } + FieldBehavior_value = map[string]int32{ + "FIELD_BEHAVIOR_UNSPECIFIED": 0, + "OPTIONAL": 1, + "REQUIRED": 2, + "OUTPUT_ONLY": 3, + "INPUT_ONLY": 4, + "IMMUTABLE": 5, + "UNORDERED_LIST": 6, + "NON_EMPTY_DEFAULT": 7, + "IDENTIFIER": 8, + } +) + +func (x FieldBehavior) Enum() *FieldBehavior { + p := new(FieldBehavior) + *p = x + return p +} + +func (x FieldBehavior) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldBehavior) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_field_behavior_proto_enumTypes[0].Descriptor() +} + +func (FieldBehavior) Type() protoreflect.EnumType { + return &file_google_api_field_behavior_proto_enumTypes[0] +} + +func (x FieldBehavior) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use FieldBehavior.Descriptor instead. +func (FieldBehavior) EnumDescriptor() ([]byte, []int) { + return file_google_api_field_behavior_proto_rawDescGZIP(), []int{0} +} + +var file_google_api_field_behavior_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: ([]FieldBehavior)(nil), + Field: 1052, + Name: "google.api.field_behavior", + Tag: "varint,1052,rep,name=field_behavior,enum=google.api.FieldBehavior", + Filename: "google/api/field_behavior.proto", + }, +} + +// Extension fields to descriptorpb.FieldOptions. +var ( + // A designation of a specific field behavior (required, output only, etc.) + // in protobuf messages. + // + // Examples: + // + // string name = 1 [(google.api.field_behavior) = REQUIRED]; + // State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + // google.protobuf.Duration ttl = 1 + // [(google.api.field_behavior) = INPUT_ONLY]; + // google.protobuf.Timestamp expire_time = 1 + // [(google.api.field_behavior) = OUTPUT_ONLY, + // (google.api.field_behavior) = IMMUTABLE]; + // + // repeated google.api.FieldBehavior field_behavior = 1052; + E_FieldBehavior = &file_google_api_field_behavior_proto_extTypes[0] +) + +var File_google_api_field_behavior_proto protoreflect.FileDescriptor + +var file_google_api_field_behavior_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2a, + 0xb6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, + 0x72, 0x12, 0x1e, 0x0a, 0x1a, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x42, 0x45, 0x48, 0x41, 0x56, + 0x49, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, + 0x0c, 0x0a, 0x08, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0f, 0x0a, + 0x0b, 0x4f, 0x55, 0x54, 0x50, 0x55, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x03, 0x12, 0x0e, + 0x0a, 0x0a, 0x49, 0x4e, 0x50, 0x55, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x04, 0x12, 0x0d, + 0x0a, 0x09, 0x49, 0x4d, 0x4d, 0x55, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x05, 0x12, 0x12, 0x0a, + 0x0e, 0x55, 0x4e, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x45, 0x44, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10, + 0x06, 0x12, 0x15, 0x0a, 0x11, 0x4e, 0x4f, 0x4e, 0x5f, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x44, + 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4e, + 0x54, 0x49, 0x46, 0x49, 0x45, 0x52, 0x10, 0x08, 0x3a, 0x64, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9c, 0x08, 0x20, 0x03, 0x28, 0x0e, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x42, 0x02, 0x10, 0x00, 0x52, + 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x42, 0x70, + 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x42, 0x12, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_field_behavior_proto_rawDescOnce sync.Once + file_google_api_field_behavior_proto_rawDescData = file_google_api_field_behavior_proto_rawDesc +) + +func file_google_api_field_behavior_proto_rawDescGZIP() []byte { + file_google_api_field_behavior_proto_rawDescOnce.Do(func() { + file_google_api_field_behavior_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_field_behavior_proto_rawDescData) + }) + return file_google_api_field_behavior_proto_rawDescData +} + +var file_google_api_field_behavior_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_api_field_behavior_proto_goTypes = []interface{}{ + (FieldBehavior)(0), // 0: google.api.FieldBehavior + (*descriptorpb.FieldOptions)(nil), // 1: google.protobuf.FieldOptions +} +var file_google_api_field_behavior_proto_depIdxs = []int32{ + 1, // 0: google.api.field_behavior:extendee -> google.protobuf.FieldOptions + 0, // 1: google.api.field_behavior:type_name -> google.api.FieldBehavior + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_api_field_behavior_proto_init() } +func file_google_api_field_behavior_proto_init() { + if File_google_api_field_behavior_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_field_behavior_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_google_api_field_behavior_proto_goTypes, + DependencyIndexes: file_google_api_field_behavior_proto_depIdxs, + EnumInfos: file_google_api_field_behavior_proto_enumTypes, + ExtensionInfos: file_google_api_field_behavior_proto_extTypes, + }.Build() + File_google_api_field_behavior_proto = out.File + file_google_api_field_behavior_proto_rawDesc = nil + file_google_api_field_behavior_proto_goTypes = nil + file_google_api_field_behavior_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go new file mode 100644 index 000000000..a462e7d01 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go @@ -0,0 +1,392 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/field_info.proto + +package annotations + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The standard format of a field value. The supported formats are all backed +// by either an RFC defined by the IETF or a Google-defined AIP. +type FieldInfo_Format int32 + +const ( + // Default, unspecified value. + FieldInfo_FORMAT_UNSPECIFIED FieldInfo_Format = 0 + // Universally Unique Identifier, version 4, value as defined by + // https://datatracker.ietf.org/doc/html/rfc4122. The value may be + // normalized to entirely lowercase letters. For example, the value + // `F47AC10B-58CC-0372-8567-0E02B2C3D479` would be normalized to + // `f47ac10b-58cc-0372-8567-0e02b2c3d479`. + FieldInfo_UUID4 FieldInfo_Format = 1 + // Internet Protocol v4 value as defined by [RFC + // 791](https://datatracker.ietf.org/doc/html/rfc791). The value may be + // condensed, with leading zeros in each octet stripped. For example, + // `001.022.233.040` would be condensed to `1.22.233.40`. + FieldInfo_IPV4 FieldInfo_Format = 2 + // Internet Protocol v6 value as defined by [RFC + // 2460](https://datatracker.ietf.org/doc/html/rfc2460). The value may be + // normalized to entirely lowercase letters with zeros compressed, following + // [RFC 5952](https://datatracker.ietf.org/doc/html/rfc5952). For example, + // the value `2001:0DB8:0::0` would be normalized to `2001:db8::`. + FieldInfo_IPV6 FieldInfo_Format = 3 + // An IP address in either v4 or v6 format as described by the individual + // values defined herein. See the comments on the IPV4 and IPV6 types for + // allowed normalizations of each. + FieldInfo_IPV4_OR_IPV6 FieldInfo_Format = 4 +) + +// Enum value maps for FieldInfo_Format. +var ( + FieldInfo_Format_name = map[int32]string{ + 0: "FORMAT_UNSPECIFIED", + 1: "UUID4", + 2: "IPV4", + 3: "IPV6", + 4: "IPV4_OR_IPV6", + } + FieldInfo_Format_value = map[string]int32{ + "FORMAT_UNSPECIFIED": 0, + "UUID4": 1, + "IPV4": 2, + "IPV6": 3, + "IPV4_OR_IPV6": 4, + } +) + +func (x FieldInfo_Format) Enum() *FieldInfo_Format { + p := new(FieldInfo_Format) + *p = x + return p +} + +func (x FieldInfo_Format) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldInfo_Format) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_field_info_proto_enumTypes[0].Descriptor() +} + +func (FieldInfo_Format) Type() protoreflect.EnumType { + return &file_google_api_field_info_proto_enumTypes[0] +} + +func (x FieldInfo_Format) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use FieldInfo_Format.Descriptor instead. +func (FieldInfo_Format) EnumDescriptor() ([]byte, []int) { + return file_google_api_field_info_proto_rawDescGZIP(), []int{0, 0} +} + +// Rich semantic information of an API field beyond basic typing. +type FieldInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The standard format of a field value. This does not explicitly configure + // any API consumer, just documents the API's format for the field it is + // applied to. + Format FieldInfo_Format `protobuf:"varint,1,opt,name=format,proto3,enum=google.api.FieldInfo_Format" json:"format,omitempty"` + // The type(s) that the annotated, generic field may represent. + // + // Currently, this must only be used on fields of type `google.protobuf.Any`. + // Supporting other generic types may be considered in the future. + ReferencedTypes []*TypeReference `protobuf:"bytes,2,rep,name=referenced_types,json=referencedTypes,proto3" json:"referenced_types,omitempty"` +} + +func (x *FieldInfo) Reset() { + *x = FieldInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_field_info_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldInfo) ProtoMessage() {} + +func (x *FieldInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_api_field_info_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldInfo.ProtoReflect.Descriptor instead. +func (*FieldInfo) Descriptor() ([]byte, []int) { + return file_google_api_field_info_proto_rawDescGZIP(), []int{0} +} + +func (x *FieldInfo) GetFormat() FieldInfo_Format { + if x != nil { + return x.Format + } + return FieldInfo_FORMAT_UNSPECIFIED +} + +func (x *FieldInfo) GetReferencedTypes() []*TypeReference { + if x != nil { + return x.ReferencedTypes + } + return nil +} + +// A reference to a message type, for use in [FieldInfo][google.api.FieldInfo]. +type TypeReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the type that the annotated, generic field may represent. + // If the type is in the same protobuf package, the value can be the simple + // message name e.g., `"MyMessage"`. Otherwise, the value must be the + // fully-qualified message name e.g., `"google.library.v1.Book"`. + // + // If the type(s) are unknown to the service (e.g. the field accepts generic + // user input), use the wildcard `"*"` to denote this behavior. + // + // See [AIP-202](https://google.aip.dev/202#type-references) for more details. + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` +} + +func (x *TypeReference) Reset() { + *x = TypeReference{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_field_info_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TypeReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TypeReference) ProtoMessage() {} + +func (x *TypeReference) ProtoReflect() protoreflect.Message { + mi := &file_google_api_field_info_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TypeReference.ProtoReflect.Descriptor instead. +func (*TypeReference) Descriptor() ([]byte, []int) { + return file_google_api_field_info_proto_rawDescGZIP(), []int{1} +} + +func (x *TypeReference) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + +var file_google_api_field_info_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*FieldInfo)(nil), + Field: 291403980, + Name: "google.api.field_info", + Tag: "bytes,291403980,opt,name=field_info", + Filename: "google/api/field_info.proto", + }, +} + +// Extension fields to descriptorpb.FieldOptions. +var ( + // Rich semantic descriptor of an API field beyond the basic typing. + // + // Examples: + // + // string request_id = 1 [(google.api.field_info).format = UUID4]; + // string old_ip_address = 2 [(google.api.field_info).format = IPV4]; + // string new_ip_address = 3 [(google.api.field_info).format = IPV6]; + // string actual_ip_address = 4 [ + // (google.api.field_info).format = IPV4_OR_IPV6 + // ]; + // google.protobuf.Any generic_field = 5 [ + // (google.api.field_info).referenced_types = {type_name: "ActualType"}, + // (google.api.field_info).referenced_types = {type_name: "OtherType"}, + // ]; + // google.protobuf.Any generic_user_input = 5 [ + // (google.api.field_info).referenced_types = {type_name: "*"}, + // ]; + // + // optional google.api.FieldInfo field_info = 291403980; + E_FieldInfo = &file_google_api_field_info_proto_extTypes[0] +) + +var File_google_api_field_info_proto protoreflect.FileDescriptor + +var file_google_api_field_info_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xda, 0x01, 0x0a, 0x09, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x34, 0x0a, 0x06, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, + 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x44, 0x0a, 0x10, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64, + 0x54, 0x79, 0x70, 0x65, 0x73, 0x22, 0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34, + 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, + 0x49, 0x50, 0x56, 0x36, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f, + 0x52, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x04, 0x22, 0x2c, 0x0a, 0x0d, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, + 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42, + 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x42, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, + 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_field_info_proto_rawDescOnce sync.Once + file_google_api_field_info_proto_rawDescData = file_google_api_field_info_proto_rawDesc +) + +func file_google_api_field_info_proto_rawDescGZIP() []byte { + file_google_api_field_info_proto_rawDescOnce.Do(func() { + file_google_api_field_info_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_field_info_proto_rawDescData) + }) + return file_google_api_field_info_proto_rawDescData +} + +var file_google_api_field_info_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_google_api_field_info_proto_goTypes = []interface{}{ + (FieldInfo_Format)(0), // 0: google.api.FieldInfo.Format + (*FieldInfo)(nil), // 1: google.api.FieldInfo + (*TypeReference)(nil), // 2: google.api.TypeReference + (*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions +} +var file_google_api_field_info_proto_depIdxs = []int32{ + 0, // 0: google.api.FieldInfo.format:type_name -> google.api.FieldInfo.Format + 2, // 1: google.api.FieldInfo.referenced_types:type_name -> google.api.TypeReference + 3, // 2: google.api.field_info:extendee -> google.protobuf.FieldOptions + 1, // 3: google.api.field_info:type_name -> google.api.FieldInfo + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 3, // [3:4] is the sub-list for extension type_name + 2, // [2:3] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_google_api_field_info_proto_init() } +func file_google_api_field_info_proto_init() { + if File_google_api_field_info_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_field_info_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_field_info_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TypeReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_field_info_proto_rawDesc, + NumEnums: 1, + NumMessages: 2, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_google_api_field_info_proto_goTypes, + DependencyIndexes: file_google_api_field_info_proto_depIdxs, + EnumInfos: file_google_api_field_info_proto_enumTypes, + MessageInfos: file_google_api_field_info_proto_msgTypes, + ExtensionInfos: file_google_api_field_info_proto_extTypes, + }.Build() + File_google_api_field_info_proto = out.File + file_google_api_field_info_proto_rawDesc = nil + file_google_api_field_info_proto_goTypes = nil + file_google_api_field_info_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go new file mode 100644 index 000000000..ffb5838cb --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -0,0 +1,774 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/http.proto + +package annotations + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Defines the HTTP configuration for an API service. It contains a list of +// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method +// to one or more HTTP REST API methods. +type Http struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A list of HTTP configuration rules that apply to individual API methods. + // + // **NOTE:** All service configuration rules follow "last one wins" order. + Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"` + // When set to true, URL path parameters will be fully URI-decoded except in + // cases of single segment matches in reserved expansion, where "%2F" will be + // left encoded. + // + // The default behavior is to not decode RFC 6570 reserved characters in multi + // segment matches. + FullyDecodeReservedExpansion bool `protobuf:"varint,2,opt,name=fully_decode_reserved_expansion,json=fullyDecodeReservedExpansion,proto3" json:"fully_decode_reserved_expansion,omitempty"` +} + +func (x *Http) Reset() { + *x = Http{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_http_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Http) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Http) ProtoMessage() {} + +func (x *Http) ProtoReflect() protoreflect.Message { + mi := &file_google_api_http_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Http.ProtoReflect.Descriptor instead. +func (*Http) Descriptor() ([]byte, []int) { + return file_google_api_http_proto_rawDescGZIP(), []int{0} +} + +func (x *Http) GetRules() []*HttpRule { + if x != nil { + return x.Rules + } + return nil +} + +func (x *Http) GetFullyDecodeReservedExpansion() bool { + if x != nil { + return x.FullyDecodeReservedExpansion + } + return false +} + +// gRPC Transcoding +// +// gRPC Transcoding is a feature for mapping between a gRPC method and one or +// more HTTP REST endpoints. It allows developers to build a single API service +// that supports both gRPC APIs and REST APIs. Many systems, including [Google +// APIs](https://github.com/googleapis/googleapis), +// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC +// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), +// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature +// and use it for large scale production services. +// +// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies +// how different portions of the gRPC request message are mapped to the URL +// path, URL query parameters, and HTTP request body. It also controls how the +// gRPC response message is mapped to the HTTP response body. `HttpRule` is +// typically specified as an `google.api.http` annotation on the gRPC method. +// +// Each mapping specifies a URL path template and an HTTP method. The path +// template may refer to one or more fields in the gRPC request message, as long +// as each field is a non-repeated field with a primitive (non-message) type. +// The path template controls how fields of the request message are mapped to +// the URL path. +// +// Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/{name=messages/*}" +// }; +// } +// } +// message GetMessageRequest { +// string name = 1; // Mapped to URL path. +// } +// message Message { +// string text = 1; // The resource content. +// } +// +// This enables an HTTP REST to gRPC mapping as below: +// +// - HTTP: `GET /v1/messages/123456` +// - gRPC: `GetMessage(name: "messages/123456")` +// +// Any fields in the request message which are not bound by the path template +// automatically become HTTP query parameters if there is no HTTP request body. +// For example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get:"/v1/messages/{message_id}" +// }; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // Mapped to URL path. +// int64 revision = 2; // Mapped to URL query parameter `revision`. +// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. +// } +// +// This enables a HTTP JSON to RPC mapping as below: +// +// - HTTP: `GET /v1/messages/123456?revision=2&sub.subfield=foo` +// - gRPC: `GetMessage(message_id: "123456" revision: 2 sub: +// SubMessage(subfield: "foo"))` +// +// Note that fields which are mapped to URL query parameters must have a +// primitive type or a repeated primitive type or a non-repeated message type. +// In the case of a repeated type, the parameter can be repeated in the URL +// as `...?param=A¶m=B`. In the case of a message type, each field of the +// message is mapped to a separate parameter, such as +// `...?foo.a=A&foo.b=B&foo.c=C`. +// +// For HTTP methods that allow a request body, the `body` field +// specifies the mapping. Consider a REST update method on the +// message resource collection: +// +// service Messaging { +// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "message" +// }; +// } +// } +// message UpdateMessageRequest { +// string message_id = 1; // mapped to the URL +// Message message = 2; // mapped to the body +// } +// +// The following HTTP JSON to RPC mapping is enabled, where the +// representation of the JSON in the request body is determined by +// protos JSON encoding: +// +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +// - gRPC: `UpdateMessage(message_id: "123456" message { text: "Hi!" })` +// +// The special name `*` can be used in the body mapping to define that +// every field not bound by the path template should be mapped to the +// request body. This enables the following alternative definition of +// the update method: +// +// service Messaging { +// rpc UpdateMessage(Message) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "*" +// }; +// } +// } +// message Message { +// string message_id = 1; +// string text = 2; +// } +// +// The following HTTP JSON to RPC mapping is enabled: +// +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +// - gRPC: `UpdateMessage(message_id: "123456" text: "Hi!")` +// +// Note that when using `*` in the body mapping, it is not possible to +// have HTTP parameters, as all fields not bound by the path end in +// the body. This makes this option more rarely used in practice when +// defining REST APIs. The common usage of `*` is in custom methods +// which don't use the URL at all for transferring data. +// +// It is possible to define multiple HTTP methods for one RPC by using +// the `additional_bindings` option. Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/messages/{message_id}" +// additional_bindings { +// get: "/v1/users/{user_id}/messages/{message_id}" +// } +// }; +// } +// } +// message GetMessageRequest { +// string message_id = 1; +// string user_id = 2; +// } +// +// This enables the following two alternative HTTP JSON to RPC mappings: +// +// - HTTP: `GET /v1/messages/123456` +// - gRPC: `GetMessage(message_id: "123456")` +// +// - HTTP: `GET /v1/users/me/messages/123456` +// - gRPC: `GetMessage(user_id: "me" message_id: "123456")` +// +// # Rules for HTTP mapping +// +// 1. Leaf request fields (recursive expansion nested messages in the request +// message) are classified into three categories: +// - Fields referred by the path template. They are passed via the URL path. +// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They +// are passed via the HTTP +// request body. +// - All other fields are passed via the URL query parameters, and the +// parameter name is the field path in the request message. A repeated +// field can be represented as multiple query parameters under the same +// name. +// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL +// query parameter, all fields +// are passed via URL path and HTTP request body. +// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP +// request body, all +// fields are passed via URL path and URL query parameters. +// +// Path template syntax +// +// Template = "/" Segments [ Verb ] ; +// Segments = Segment { "/" Segment } ; +// Segment = "*" | "**" | LITERAL | Variable ; +// Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; +// Verb = ":" LITERAL ; +// +// The syntax `*` matches a single URL path segment. The syntax `**` matches +// zero or more URL path segments, which must be the last part of the URL path +// except the `Verb`. +// +// The syntax `Variable` matches part of the URL path as specified by its +// template. A variable template must not contain other variables. If a variable +// matches a single path segment, its template may be omitted, e.g. `{var}` +// is equivalent to `{var=*}`. +// +// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` +// contains any reserved character, such characters should be percent-encoded +// before the matching. +// +// If a variable contains exactly one path segment, such as `"{var}"` or +// `"{var=*}"`, when such a variable is expanded into a URL path on the client +// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The +// server side does the reverse decoding. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{var}`. +// +// If a variable contains multiple path segments, such as `"{var=foo/*}"` +// or `"{var=**}"`, when such a variable is expanded into a URL path on the +// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. +// The server side does the reverse decoding, except "%2F" and "%2f" are left +// unchanged. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{+var}`. +// +// # Using gRPC API Service Configuration +// +// gRPC API Service Configuration (service config) is a configuration language +// for configuring a gRPC service to become a user-facing product. The +// service config is simply the YAML representation of the `google.api.Service` +// proto message. +// +// As an alternative to annotating your proto file, you can configure gRPC +// transcoding in your service config YAML files. You do this by specifying a +// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same +// effect as the proto annotation. This can be particularly useful if you +// have a proto that is reused in multiple services. Note that any transcoding +// specified in the service config will override any matching transcoding +// configuration in the proto. +// +// The following example selects a gRPC method and applies an `HttpRule` to it: +// +// http: +// rules: +// - selector: example.v1.Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} +// +// # Special notes +// +// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the +// proto to JSON conversion must follow the [proto3 +// specification](https://developers.google.com/protocol-buffers/docs/proto3#json). +// +// While the single segment variable follows the semantics of +// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String +// Expansion, the multi segment variable **does not** follow RFC 6570 Section +// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion +// does not expand special characters like `?` and `#`, which would lead +// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding +// for multi segment variables. +// +// The path variables **must not** refer to any repeated or mapped field, +// because client libraries are not capable of handling such variable expansion. +// +// The path variables **must not** capture the leading "/" character. The reason +// is that the most common use case "{var}" does not capture the leading "/" +// character. For consistency, all path variables must share the same behavior. +// +// Repeated message fields must not be mapped to URL query parameters, because +// no client library can support such complicated mapping. +// +// If an API needs to use a JSON array for request or response body, it can map +// the request or response body to a repeated field. However, some gRPC +// Transcoding implementations may not support this feature. +type HttpRule struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Selects a method to which this rule applies. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax + // details. + Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` + // Determines the URL pattern is matched by this rules. This pattern can be + // used with any of the {get|put|post|delete|patch} methods. A custom method + // can be defined using the 'custom' field. + // + // Types that are assignable to Pattern: + // + // *HttpRule_Get + // *HttpRule_Put + // *HttpRule_Post + // *HttpRule_Delete + // *HttpRule_Patch + // *HttpRule_Custom + Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"` + // The name of the request field whose value is mapped to the HTTP request + // body, or `*` for mapping all request fields not captured by the path + // pattern to the HTTP body, or omitted for not having any HTTP request body. + // + // NOTE: the referred field must be present at the top-level of the request + // message type. + Body string `protobuf:"bytes,7,opt,name=body,proto3" json:"body,omitempty"` + // Optional. The name of the response field whose value is mapped to the HTTP + // response body. When omitted, the entire response message will be used + // as the HTTP response body. + // + // NOTE: The referred field must be present at the top-level of the response + // message type. + ResponseBody string `protobuf:"bytes,12,opt,name=response_body,json=responseBody,proto3" json:"response_body,omitempty"` + // Additional HTTP bindings for the selector. Nested bindings must + // not contain an `additional_bindings` field themselves (that is, + // the nesting may only be one level deep). + AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings,proto3" json:"additional_bindings,omitempty"` +} + +func (x *HttpRule) Reset() { + *x = HttpRule{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_http_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpRule) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpRule) ProtoMessage() {} + +func (x *HttpRule) ProtoReflect() protoreflect.Message { + mi := &file_google_api_http_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpRule.ProtoReflect.Descriptor instead. +func (*HttpRule) Descriptor() ([]byte, []int) { + return file_google_api_http_proto_rawDescGZIP(), []int{1} +} + +func (x *HttpRule) GetSelector() string { + if x != nil { + return x.Selector + } + return "" +} + +func (m *HttpRule) GetPattern() isHttpRule_Pattern { + if m != nil { + return m.Pattern + } + return nil +} + +func (x *HttpRule) GetGet() string { + if x, ok := x.GetPattern().(*HttpRule_Get); ok { + return x.Get + } + return "" +} + +func (x *HttpRule) GetPut() string { + if x, ok := x.GetPattern().(*HttpRule_Put); ok { + return x.Put + } + return "" +} + +func (x *HttpRule) GetPost() string { + if x, ok := x.GetPattern().(*HttpRule_Post); ok { + return x.Post + } + return "" +} + +func (x *HttpRule) GetDelete() string { + if x, ok := x.GetPattern().(*HttpRule_Delete); ok { + return x.Delete + } + return "" +} + +func (x *HttpRule) GetPatch() string { + if x, ok := x.GetPattern().(*HttpRule_Patch); ok { + return x.Patch + } + return "" +} + +func (x *HttpRule) GetCustom() *CustomHttpPattern { + if x, ok := x.GetPattern().(*HttpRule_Custom); ok { + return x.Custom + } + return nil +} + +func (x *HttpRule) GetBody() string { + if x != nil { + return x.Body + } + return "" +} + +func (x *HttpRule) GetResponseBody() string { + if x != nil { + return x.ResponseBody + } + return "" +} + +func (x *HttpRule) GetAdditionalBindings() []*HttpRule { + if x != nil { + return x.AdditionalBindings + } + return nil +} + +type isHttpRule_Pattern interface { + isHttpRule_Pattern() +} + +type HttpRule_Get struct { + // Maps to HTTP GET. Used for listing and getting information about + // resources. + Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof"` +} + +type HttpRule_Put struct { + // Maps to HTTP PUT. Used for replacing a resource. + Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof"` +} + +type HttpRule_Post struct { + // Maps to HTTP POST. Used for creating a resource or performing an action. + Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof"` +} + +type HttpRule_Delete struct { + // Maps to HTTP DELETE. Used for deleting a resource. + Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof"` +} + +type HttpRule_Patch struct { + // Maps to HTTP PATCH. Used for updating a resource. + Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof"` +} + +type HttpRule_Custom struct { + // The custom pattern is used for specifying an HTTP method that is not + // included in the `pattern` field, such as HEAD, or "*" to leave the + // HTTP method unspecified for this rule. The wild-card rule is useful + // for services that provide content to Web (HTML) clients. + Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof"` +} + +func (*HttpRule_Get) isHttpRule_Pattern() {} + +func (*HttpRule_Put) isHttpRule_Pattern() {} + +func (*HttpRule_Post) isHttpRule_Pattern() {} + +func (*HttpRule_Delete) isHttpRule_Pattern() {} + +func (*HttpRule_Patch) isHttpRule_Pattern() {} + +func (*HttpRule_Custom) isHttpRule_Pattern() {} + +// A custom pattern is used for defining custom HTTP verb. +type CustomHttpPattern struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of this custom HTTP verb. + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + // The path matched by this custom verb. + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` +} + +func (x *CustomHttpPattern) Reset() { + *x = CustomHttpPattern{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_http_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CustomHttpPattern) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CustomHttpPattern) ProtoMessage() {} + +func (x *CustomHttpPattern) ProtoReflect() protoreflect.Message { + mi := &file_google_api_http_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CustomHttpPattern.ProtoReflect.Descriptor instead. +func (*CustomHttpPattern) Descriptor() ([]byte, []int) { + return file_google_api_http_proto_rawDescGZIP(), []int{2} +} + +func (x *CustomHttpPattern) GetKind() string { + if x != nil { + return x.Kind + } + return "" +} + +func (x *CustomHttpPattern) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +var File_google_api_http_proto protoreflect.FileDescriptor + +var file_google_api_http_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, + 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x22, 0x79, 0x0a, 0x04, 0x48, 0x74, 0x74, 0x70, 0x12, 0x2a, 0x0a, 0x05, 0x72, + 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, + 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x1f, 0x66, 0x75, 0x6c, 0x6c, 0x79, + 0x5f, 0x64, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x5f, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x1c, 0x66, 0x75, 0x6c, 0x6c, 0x79, 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x45, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xda, + 0x02, 0x0a, 0x08, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x03, 0x70, + 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x70, 0x75, 0x74, 0x12, + 0x14, 0x0a, 0x04, 0x70, 0x6f, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x04, 0x70, 0x6f, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, + 0x16, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, + 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x37, 0x0a, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, + 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x48, 0x00, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x62, 0x6f, 0x64, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x13, 0x61, 0x64, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, + 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x12, 0x61, 0x64, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, + 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x22, 0x3b, 0x0a, 0x11, 0x43, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, + 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x42, 0x6a, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x09, 0x48, 0x74, 0x74, 0x70, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04, + 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_http_proto_rawDescOnce sync.Once + file_google_api_http_proto_rawDescData = file_google_api_http_proto_rawDesc +) + +func file_google_api_http_proto_rawDescGZIP() []byte { + file_google_api_http_proto_rawDescOnce.Do(func() { + file_google_api_http_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_http_proto_rawDescData) + }) + return file_google_api_http_proto_rawDescData +} + +var file_google_api_http_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_google_api_http_proto_goTypes = []interface{}{ + (*Http)(nil), // 0: google.api.Http + (*HttpRule)(nil), // 1: google.api.HttpRule + (*CustomHttpPattern)(nil), // 2: google.api.CustomHttpPattern +} +var file_google_api_http_proto_depIdxs = []int32{ + 1, // 0: google.api.Http.rules:type_name -> google.api.HttpRule + 2, // 1: google.api.HttpRule.custom:type_name -> google.api.CustomHttpPattern + 1, // 2: google.api.HttpRule.additional_bindings:type_name -> google.api.HttpRule + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_google_api_http_proto_init() } +func file_google_api_http_proto_init() { + if File_google_api_http_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_http_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Http); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_http_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpRule); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_http_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CustomHttpPattern); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_api_http_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*HttpRule_Get)(nil), + (*HttpRule_Put)(nil), + (*HttpRule_Post)(nil), + (*HttpRule_Delete)(nil), + (*HttpRule_Patch)(nil), + (*HttpRule_Custom)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_http_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_http_proto_goTypes, + DependencyIndexes: file_google_api_http_proto_depIdxs, + MessageInfos: file_google_api_http_proto_msgTypes, + }.Build() + File_google_api_http_proto = out.File + file_google_api_http_proto_rawDesc = nil + file_google_api_http_proto_goTypes = nil + file_google_api_http_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go new file mode 100644 index 000000000..b5db279ae --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go @@ -0,0 +1,660 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/resource.proto + +package annotations + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A description of the historical or future-looking state of the +// resource pattern. +type ResourceDescriptor_History int32 + +const ( + // The "unset" value. + ResourceDescriptor_HISTORY_UNSPECIFIED ResourceDescriptor_History = 0 + // The resource originally had one pattern and launched as such, and + // additional patterns were added later. + ResourceDescriptor_ORIGINALLY_SINGLE_PATTERN ResourceDescriptor_History = 1 + // The resource has one pattern, but the API owner expects to add more + // later. (This is the inverse of ORIGINALLY_SINGLE_PATTERN, and prevents + // that from being necessary once there are multiple patterns.) + ResourceDescriptor_FUTURE_MULTI_PATTERN ResourceDescriptor_History = 2 +) + +// Enum value maps for ResourceDescriptor_History. +var ( + ResourceDescriptor_History_name = map[int32]string{ + 0: "HISTORY_UNSPECIFIED", + 1: "ORIGINALLY_SINGLE_PATTERN", + 2: "FUTURE_MULTI_PATTERN", + } + ResourceDescriptor_History_value = map[string]int32{ + "HISTORY_UNSPECIFIED": 0, + "ORIGINALLY_SINGLE_PATTERN": 1, + "FUTURE_MULTI_PATTERN": 2, + } +) + +func (x ResourceDescriptor_History) Enum() *ResourceDescriptor_History { + p := new(ResourceDescriptor_History) + *p = x + return p +} + +func (x ResourceDescriptor_History) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ResourceDescriptor_History) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_resource_proto_enumTypes[0].Descriptor() +} + +func (ResourceDescriptor_History) Type() protoreflect.EnumType { + return &file_google_api_resource_proto_enumTypes[0] +} + +func (x ResourceDescriptor_History) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ResourceDescriptor_History.Descriptor instead. +func (ResourceDescriptor_History) EnumDescriptor() ([]byte, []int) { + return file_google_api_resource_proto_rawDescGZIP(), []int{0, 0} +} + +// A flag representing a specific style that a resource claims to conform to. +type ResourceDescriptor_Style int32 + +const ( + // The unspecified value. Do not use. + ResourceDescriptor_STYLE_UNSPECIFIED ResourceDescriptor_Style = 0 + // This resource is intended to be "declarative-friendly". + // + // Declarative-friendly resources must be more strictly consistent, and + // setting this to true communicates to tools that this resource should + // adhere to declarative-friendly expectations. + // + // Note: This is used by the API linter (linter.aip.dev) to enable + // additional checks. + ResourceDescriptor_DECLARATIVE_FRIENDLY ResourceDescriptor_Style = 1 +) + +// Enum value maps for ResourceDescriptor_Style. +var ( + ResourceDescriptor_Style_name = map[int32]string{ + 0: "STYLE_UNSPECIFIED", + 1: "DECLARATIVE_FRIENDLY", + } + ResourceDescriptor_Style_value = map[string]int32{ + "STYLE_UNSPECIFIED": 0, + "DECLARATIVE_FRIENDLY": 1, + } +) + +func (x ResourceDescriptor_Style) Enum() *ResourceDescriptor_Style { + p := new(ResourceDescriptor_Style) + *p = x + return p +} + +func (x ResourceDescriptor_Style) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ResourceDescriptor_Style) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_resource_proto_enumTypes[1].Descriptor() +} + +func (ResourceDescriptor_Style) Type() protoreflect.EnumType { + return &file_google_api_resource_proto_enumTypes[1] +} + +func (x ResourceDescriptor_Style) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ResourceDescriptor_Style.Descriptor instead. +func (ResourceDescriptor_Style) EnumDescriptor() ([]byte, []int) { + return file_google_api_resource_proto_rawDescGZIP(), []int{0, 1} +} + +// A simple descriptor of a resource type. +// +// ResourceDescriptor annotates a resource message (either by means of a +// protobuf annotation or use in the service config), and associates the +// resource's schema, the resource type, and the pattern of the resource name. +// +// Example: +// +// message Topic { +// // Indicates this message defines a resource schema. +// // Declares the resource type in the format of {service}/{kind}. +// // For Kubernetes resources, the format is {api group}/{kind}. +// option (google.api.resource) = { +// type: "pubsub.googleapis.com/Topic" +// pattern: "projects/{project}/topics/{topic}" +// }; +// } +// +// The ResourceDescriptor Yaml config will look like: +// +// resources: +// - type: "pubsub.googleapis.com/Topic" +// pattern: "projects/{project}/topics/{topic}" +// +// Sometimes, resources have multiple patterns, typically because they can +// live under multiple parents. +// +// Example: +// +// message LogEntry { +// option (google.api.resource) = { +// type: "logging.googleapis.com/LogEntry" +// pattern: "projects/{project}/logs/{log}" +// pattern: "folders/{folder}/logs/{log}" +// pattern: "organizations/{organization}/logs/{log}" +// pattern: "billingAccounts/{billing_account}/logs/{log}" +// }; +// } +// +// The ResourceDescriptor Yaml config will look like: +// +// resources: +// - type: 'logging.googleapis.com/LogEntry' +// pattern: "projects/{project}/logs/{log}" +// pattern: "folders/{folder}/logs/{log}" +// pattern: "organizations/{organization}/logs/{log}" +// pattern: "billingAccounts/{billing_account}/logs/{log}" +type ResourceDescriptor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource type. It must be in the format of + // {service_name}/{resource_type_kind}. The `resource_type_kind` must be + // singular and must not include version numbers. + // + // Example: `storage.googleapis.com/Bucket` + // + // The value of the resource_type_kind must follow the regular expression + // /[A-Za-z][a-zA-Z0-9]+/. It should start with an upper case character and + // should use PascalCase (UpperCamelCase). The maximum number of + // characters allowed for the `resource_type_kind` is 100. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Optional. The relative resource name pattern associated with this resource + // type. The DNS prefix of the full resource name shouldn't be specified here. + // + // The path pattern must follow the syntax, which aligns with HTTP binding + // syntax: + // + // Template = Segment { "/" Segment } ; + // Segment = LITERAL | Variable ; + // Variable = "{" LITERAL "}" ; + // + // Examples: + // + // - "projects/{project}/topics/{topic}" + // - "projects/{project}/knowledgeBases/{knowledge_base}" + // + // The components in braces correspond to the IDs for each resource in the + // hierarchy. It is expected that, if multiple patterns are provided, + // the same component name (e.g. "project") refers to IDs of the same + // type of resource. + Pattern []string `protobuf:"bytes,2,rep,name=pattern,proto3" json:"pattern,omitempty"` + // Optional. The field on the resource that designates the resource name + // field. If omitted, this is assumed to be "name". + NameField string `protobuf:"bytes,3,opt,name=name_field,json=nameField,proto3" json:"name_field,omitempty"` + // Optional. The historical or future-looking state of the resource pattern. + // + // Example: + // + // // The InspectTemplate message originally only supported resource + // // names with organization, and project was added later. + // message InspectTemplate { + // option (google.api.resource) = { + // type: "dlp.googleapis.com/InspectTemplate" + // pattern: + // "organizations/{organization}/inspectTemplates/{inspect_template}" + // pattern: "projects/{project}/inspectTemplates/{inspect_template}" + // history: ORIGINALLY_SINGLE_PATTERN + // }; + // } + History ResourceDescriptor_History `protobuf:"varint,4,opt,name=history,proto3,enum=google.api.ResourceDescriptor_History" json:"history,omitempty"` + // The plural name used in the resource name and permission names, such as + // 'projects' for the resource name of 'projects/{project}' and the permission + // name of 'cloudresourcemanager.googleapis.com/projects.get'. One exception + // to this is for Nested Collections that have stuttering names, as defined + // in [AIP-122](https://google.aip.dev/122#nested-collections), where the + // collection ID in the resource name pattern does not necessarily directly + // match the `plural` value. + // + // It is the same concept of the `plural` field in k8s CRD spec + // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ + // + // Note: The plural form is required even for singleton resources. See + // https://aip.dev/156 + Plural string `protobuf:"bytes,5,opt,name=plural,proto3" json:"plural,omitempty"` + // The same concept of the `singular` field in k8s CRD spec + // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ + // Such as "project" for the `resourcemanager.googleapis.com/Project` type. + Singular string `protobuf:"bytes,6,opt,name=singular,proto3" json:"singular,omitempty"` + // Style flag(s) for this resource. + // These indicate that a resource is expected to conform to a given + // style. See the specific style flags for additional information. + Style []ResourceDescriptor_Style `protobuf:"varint,10,rep,packed,name=style,proto3,enum=google.api.ResourceDescriptor_Style" json:"style,omitempty"` +} + +func (x *ResourceDescriptor) Reset() { + *x = ResourceDescriptor{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_resource_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceDescriptor) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceDescriptor) ProtoMessage() {} + +func (x *ResourceDescriptor) ProtoReflect() protoreflect.Message { + mi := &file_google_api_resource_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceDescriptor.ProtoReflect.Descriptor instead. +func (*ResourceDescriptor) Descriptor() ([]byte, []int) { + return file_google_api_resource_proto_rawDescGZIP(), []int{0} +} + +func (x *ResourceDescriptor) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *ResourceDescriptor) GetPattern() []string { + if x != nil { + return x.Pattern + } + return nil +} + +func (x *ResourceDescriptor) GetNameField() string { + if x != nil { + return x.NameField + } + return "" +} + +func (x *ResourceDescriptor) GetHistory() ResourceDescriptor_History { + if x != nil { + return x.History + } + return ResourceDescriptor_HISTORY_UNSPECIFIED +} + +func (x *ResourceDescriptor) GetPlural() string { + if x != nil { + return x.Plural + } + return "" +} + +func (x *ResourceDescriptor) GetSingular() string { + if x != nil { + return x.Singular + } + return "" +} + +func (x *ResourceDescriptor) GetStyle() []ResourceDescriptor_Style { + if x != nil { + return x.Style + } + return nil +} + +// Defines a proto annotation that describes a string field that refers to +// an API resource. +type ResourceReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource type that the annotated field references. + // + // Example: + // + // message Subscription { + // string topic = 2 [(google.api.resource_reference) = { + // type: "pubsub.googleapis.com/Topic" + // }]; + // } + // + // Occasionally, a field may reference an arbitrary resource. In this case, + // APIs use the special value * in their resource reference. + // + // Example: + // + // message GetIamPolicyRequest { + // string resource = 2 [(google.api.resource_reference) = { + // type: "*" + // }]; + // } + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // The resource type of a child collection that the annotated field + // references. This is useful for annotating the `parent` field that + // doesn't have a fixed resource type. + // + // Example: + // + // message ListLogEntriesRequest { + // string parent = 1 [(google.api.resource_reference) = { + // child_type: "logging.googleapis.com/LogEntry" + // }; + // } + ChildType string `protobuf:"bytes,2,opt,name=child_type,json=childType,proto3" json:"child_type,omitempty"` +} + +func (x *ResourceReference) Reset() { + *x = ResourceReference{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_resource_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceReference) ProtoMessage() {} + +func (x *ResourceReference) ProtoReflect() protoreflect.Message { + mi := &file_google_api_resource_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceReference.ProtoReflect.Descriptor instead. +func (*ResourceReference) Descriptor() ([]byte, []int) { + return file_google_api_resource_proto_rawDescGZIP(), []int{1} +} + +func (x *ResourceReference) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *ResourceReference) GetChildType() string { + if x != nil { + return x.ChildType + } + return "" +} + +var file_google_api_resource_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*ResourceReference)(nil), + Field: 1055, + Name: "google.api.resource_reference", + Tag: "bytes,1055,opt,name=resource_reference", + Filename: "google/api/resource.proto", + }, + { + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: ([]*ResourceDescriptor)(nil), + Field: 1053, + Name: "google.api.resource_definition", + Tag: "bytes,1053,rep,name=resource_definition", + Filename: "google/api/resource.proto", + }, + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*ResourceDescriptor)(nil), + Field: 1053, + Name: "google.api.resource", + Tag: "bytes,1053,opt,name=resource", + Filename: "google/api/resource.proto", + }, +} + +// Extension fields to descriptorpb.FieldOptions. +var ( + // An annotation that describes a resource reference, see + // [ResourceReference][]. + // + // optional google.api.ResourceReference resource_reference = 1055; + E_ResourceReference = &file_google_api_resource_proto_extTypes[0] +) + +// Extension fields to descriptorpb.FileOptions. +var ( + // An annotation that describes a resource definition without a corresponding + // message; see [ResourceDescriptor][]. + // + // repeated google.api.ResourceDescriptor resource_definition = 1053; + E_ResourceDefinition = &file_google_api_resource_proto_extTypes[1] +) + +// Extension fields to descriptorpb.MessageOptions. +var ( + // An annotation that describes a resource definition, see + // [ResourceDescriptor][]. + // + // optional google.api.ResourceDescriptor resource = 1053; + E_Resource = &file_google_api_resource_proto_extTypes[2] +) + +var File_google_api_resource_proto protoreflect.FileDescriptor + +var file_google_api_resource_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xaa, 0x03, 0x0a, 0x12, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1d, + 0x0a, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x40, 0x0a, + 0x07, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x48, + 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x07, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12, + 0x16, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x72, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x70, 0x6c, 0x75, 0x72, 0x61, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x67, 0x75, + 0x6c, 0x61, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x69, 0x6e, 0x67, 0x75, + 0x6c, 0x61, 0x72, 0x12, 0x3a, 0x0a, 0x05, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x03, + 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x2e, 0x53, 0x74, 0x79, 0x6c, 0x65, 0x52, 0x05, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x22, + 0x5b, 0x0a, 0x07, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x17, 0x0a, 0x13, 0x48, 0x49, + 0x53, 0x54, 0x4f, 0x52, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x1d, 0x0a, 0x19, 0x4f, 0x52, 0x49, 0x47, 0x49, 0x4e, 0x41, 0x4c, 0x4c, + 0x59, 0x5f, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x5f, 0x50, 0x41, 0x54, 0x54, 0x45, 0x52, 0x4e, + 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x46, 0x55, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x4d, 0x55, 0x4c, + 0x54, 0x49, 0x5f, 0x50, 0x41, 0x54, 0x54, 0x45, 0x52, 0x4e, 0x10, 0x02, 0x22, 0x38, 0x0a, 0x05, + 0x53, 0x74, 0x79, 0x6c, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x59, 0x4c, 0x45, 0x5f, 0x55, + 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, + 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x46, 0x52, 0x49, 0x45, + 0x4e, 0x44, 0x4c, 0x59, 0x10, 0x01, 0x22, 0x46, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x1d, 0x0a, 0x0a, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x6c, + 0x0a, 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x9f, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x11, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x3a, 0x6e, 0x0a, 0x13, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x9d, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x5c, 0x0a, 0x08, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9d, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x6e, 0x0a, 0x0e, 0x63, 0x6f, + 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, + 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_google_api_resource_proto_rawDescOnce sync.Once + file_google_api_resource_proto_rawDescData = file_google_api_resource_proto_rawDesc +) + +func file_google_api_resource_proto_rawDescGZIP() []byte { + file_google_api_resource_proto_rawDescOnce.Do(func() { + file_google_api_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_resource_proto_rawDescData) + }) + return file_google_api_resource_proto_rawDescData +} + +var file_google_api_resource_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_google_api_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_google_api_resource_proto_goTypes = []interface{}{ + (ResourceDescriptor_History)(0), // 0: google.api.ResourceDescriptor.History + (ResourceDescriptor_Style)(0), // 1: google.api.ResourceDescriptor.Style + (*ResourceDescriptor)(nil), // 2: google.api.ResourceDescriptor + (*ResourceReference)(nil), // 3: google.api.ResourceReference + (*descriptorpb.FieldOptions)(nil), // 4: google.protobuf.FieldOptions + (*descriptorpb.FileOptions)(nil), // 5: google.protobuf.FileOptions + (*descriptorpb.MessageOptions)(nil), // 6: google.protobuf.MessageOptions +} +var file_google_api_resource_proto_depIdxs = []int32{ + 0, // 0: google.api.ResourceDescriptor.history:type_name -> google.api.ResourceDescriptor.History + 1, // 1: google.api.ResourceDescriptor.style:type_name -> google.api.ResourceDescriptor.Style + 4, // 2: google.api.resource_reference:extendee -> google.protobuf.FieldOptions + 5, // 3: google.api.resource_definition:extendee -> google.protobuf.FileOptions + 6, // 4: google.api.resource:extendee -> google.protobuf.MessageOptions + 3, // 5: google.api.resource_reference:type_name -> google.api.ResourceReference + 2, // 6: google.api.resource_definition:type_name -> google.api.ResourceDescriptor + 2, // 7: google.api.resource:type_name -> google.api.ResourceDescriptor + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 5, // [5:8] is the sub-list for extension type_name + 2, // [2:5] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_google_api_resource_proto_init() } +func file_google_api_resource_proto_init() { + if File_google_api_resource_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceDescriptor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_resource_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_resource_proto_rawDesc, + NumEnums: 2, + NumMessages: 2, + NumExtensions: 3, + NumServices: 0, + }, + GoTypes: file_google_api_resource_proto_goTypes, + DependencyIndexes: file_google_api_resource_proto_depIdxs, + EnumInfos: file_google_api_resource_proto_enumTypes, + MessageInfos: file_google_api_resource_proto_msgTypes, + ExtensionInfos: file_google_api_resource_proto_extTypes, + }.Build() + File_google_api_resource_proto = out.File + file_google_api_resource_proto_rawDesc = nil + file_google_api_resource_proto_goTypes = nil + file_google_api_resource_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go new file mode 100644 index 000000000..1d8397b02 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go @@ -0,0 +1,693 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/routing.proto + +package annotations + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Specifies the routing information that should be sent along with the request +// in the form of routing header. +// **NOTE:** All service configuration rules follow the "last one wins" order. +// +// The examples below will apply to an RPC which has the following request type: +// +// Message Definition: +// +// message Request { +// // The name of the Table +// // Values can be of the following formats: +// // - `projects//tables/` +// // - `projects//instances//tables/
    ` +// // - `region//zones//tables/
    ` +// string table_name = 1; +// +// // This value specifies routing for replication. +// // It can be in the following formats: +// // - `profiles/` +// // - a legacy `profile_id` that can be any string +// string app_profile_id = 2; +// } +// +// Example message: +// +// { +// table_name: projects/proj_foo/instances/instance_bar/table/table_baz, +// app_profile_id: profiles/prof_qux +// } +// +// The routing header consists of one or multiple key-value pairs. Every key +// and value must be percent-encoded, and joined together in the format of +// `key1=value1&key2=value2`. +// In the examples below I am skipping the percent-encoding for readablity. +// +// # Example 1 +// +// Extracting a field from the request to put into the routing header +// unchanged, with the key equal to the field name. +// +// annotation: +// +// option (google.api.routing) = { +// // Take the `app_profile_id`. +// routing_parameters { +// field: "app_profile_id" +// } +// }; +// +// result: +// +// x-goog-request-params: app_profile_id=profiles/prof_qux +// +// # Example 2 +// +// Extracting a field from the request to put into the routing header +// unchanged, with the key different from the field name. +// +// annotation: +// +// option (google.api.routing) = { +// // Take the `app_profile_id`, but name it `routing_id` in the header. +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// }; +// +// result: +// +// x-goog-request-params: routing_id=profiles/prof_qux +// +// # Example 3 +// +// Extracting a field from the request to put into the routing +// header, while matching a path template syntax on the field's value. +// +// NB: it is more useful to send nothing than to send garbage for the purpose +// of dynamic routing, since garbage pollutes cache. Thus the matching. +// +// # Sub-example 3a +// +// The field matches the template. +// +// annotation: +// +// option (google.api.routing) = { +// // Take the `table_name`, if it's well-formed (with project-based +// // syntax). +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=projects/*/instances/*/**}" +// } +// }; +// +// result: +// +// x-goog-request-params: +// table_name=projects/proj_foo/instances/instance_bar/table/table_baz +// +// # Sub-example 3b +// +// The field does not match the template. +// +// annotation: +// +// option (google.api.routing) = { +// // Take the `table_name`, if it's well-formed (with region-based +// // syntax). +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=regions/*/zones/*/**}" +// } +// }; +// +// result: +// +// +// +// # Sub-example 3c +// +// Multiple alternative conflictingly named path templates are +// specified. The one that matches is used to construct the header. +// +// annotation: +// +// option (google.api.routing) = { +// // Take the `table_name`, if it's well-formed, whether +// // using the region- or projects-based syntax. +// +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=regions/*/zones/*/**}" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=projects/*/instances/*/**}" +// } +// }; +// +// result: +// +// x-goog-request-params: +// table_name=projects/proj_foo/instances/instance_bar/table/table_baz +// +// # Example 4 +// +// Extracting a single routing header key-value pair by matching a +// template syntax on (a part of) a single request field. +// +// annotation: +// +// option (google.api.routing) = { +// // Take just the project id from the `table_name` field. +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// }; +// +// result: +// +// x-goog-request-params: routing_id=projects/proj_foo +// +// # Example 5 +// +// Extracting a single routing header key-value pair by matching +// several conflictingly named path templates on (parts of) a single request +// field. The last template to match "wins" the conflict. +// +// annotation: +// +// option (google.api.routing) = { +// // If the `table_name` does not have instances information, +// // take just the project id for routing. +// // Otherwise take project + instance. +// +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*/instances/*}/**" +// } +// }; +// +// result: +// +// x-goog-request-params: +// routing_id=projects/proj_foo/instances/instance_bar +// +// # Example 6 +// +// Extracting multiple routing header key-value pairs by matching +// several non-conflicting path templates on (parts of) a single request field. +// +// # Sub-example 6a +// +// Make the templates strict, so that if the `table_name` does not +// have an instance information, nothing is sent. +// +// annotation: +// +// option (google.api.routing) = { +// // The routing code needs two keys instead of one composite +// // but works only for the tables with the "project-instance" name +// // syntax. +// +// routing_parameters { +// field: "table_name" +// path_template: "{project_id=projects/*}/instances/*/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "projects/*/{instance_id=instances/*}/**" +// } +// }; +// +// result: +// +// x-goog-request-params: +// project_id=projects/proj_foo&instance_id=instances/instance_bar +// +// # Sub-example 6b +// +// Make the templates loose, so that if the `table_name` does not +// have an instance information, just the project id part is sent. +// +// annotation: +// +// option (google.api.routing) = { +// // The routing code wants two keys instead of one composite +// // but will work with just the `project_id` for tables without +// // an instance in the `table_name`. +// +// routing_parameters { +// field: "table_name" +// path_template: "{project_id=projects/*}/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "projects/*/{instance_id=instances/*}/**" +// } +// }; +// +// result (is the same as 6a for our example message because it has the instance +// information): +// +// x-goog-request-params: +// project_id=projects/proj_foo&instance_id=instances/instance_bar +// +// # Example 7 +// +// Extracting multiple routing header key-value pairs by matching +// several path templates on multiple request fields. +// +// NB: note that here there is no way to specify sending nothing if one of the +// fields does not match its template. E.g. if the `table_name` is in the wrong +// format, the `project_id` will not be sent, but the `routing_id` will be. +// The backend routing code has to be aware of that and be prepared to not +// receive a full complement of keys if it expects multiple. +// +// annotation: +// +// option (google.api.routing) = { +// // The routing needs both `project_id` and `routing_id` +// // (from the `app_profile_id` field) for routing. +// +// routing_parameters { +// field: "table_name" +// path_template: "{project_id=projects/*}/**" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// }; +// +// result: +// +// x-goog-request-params: +// project_id=projects/proj_foo&routing_id=profiles/prof_qux +// +// # Example 8 +// +// Extracting a single routing header key-value pair by matching +// several conflictingly named path templates on several request fields. The +// last template to match "wins" the conflict. +// +// annotation: +// +// option (google.api.routing) = { +// // The `routing_id` can be a project id or a region id depending on +// // the table name format, but only if the `app_profile_id` is not set. +// // If `app_profile_id` is set it should be used instead. +// +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=regions/*}/**" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// }; +// +// result: +// +// x-goog-request-params: routing_id=profiles/prof_qux +// +// # Example 9 +// +// Bringing it all together. +// +// annotation: +// +// option (google.api.routing) = { +// // For routing both `table_location` and a `routing_id` are needed. +// // +// // table_location can be either an instance id or a region+zone id. +// // +// // For `routing_id`, take the value of `app_profile_id` +// // - If it's in the format `profiles/`, send +// // just the `` part. +// // - If it's any other literal, send it as is. +// // If the `app_profile_id` is empty, and the `table_name` starts with +// // the project_id, send that instead. +// +// routing_parameters { +// field: "table_name" +// path_template: "projects/*/{table_location=instances/*}/tables/*" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{table_location=regions/*/zones/*}/tables/*" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "profiles/{routing_id=*}" +// } +// }; +// +// result: +// +// x-goog-request-params: +// table_location=instances/instance_bar&routing_id=prof_qux +type RoutingRule struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A collection of Routing Parameter specifications. + // **NOTE:** If multiple Routing Parameters describe the same key + // (via the `path_template` field or via the `field` field when + // `path_template` is not provided), "last one wins" rule + // determines which Parameter gets used. + // See the examples for more details. + RoutingParameters []*RoutingParameter `protobuf:"bytes,2,rep,name=routing_parameters,json=routingParameters,proto3" json:"routing_parameters,omitempty"` +} + +func (x *RoutingRule) Reset() { + *x = RoutingRule{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_routing_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RoutingRule) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RoutingRule) ProtoMessage() {} + +func (x *RoutingRule) ProtoReflect() protoreflect.Message { + mi := &file_google_api_routing_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RoutingRule.ProtoReflect.Descriptor instead. +func (*RoutingRule) Descriptor() ([]byte, []int) { + return file_google_api_routing_proto_rawDescGZIP(), []int{0} +} + +func (x *RoutingRule) GetRoutingParameters() []*RoutingParameter { + if x != nil { + return x.RoutingParameters + } + return nil +} + +// A projection from an input message to the GRPC or REST header. +type RoutingParameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A request field to extract the header key-value pair from. + Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` + // A pattern matching the key-value field. Optional. + // If not specified, the whole field specified in the `field` field will be + // taken as value, and its name used as key. If specified, it MUST contain + // exactly one named segment (along with any number of unnamed segments) The + // pattern will be matched over the field specified in the `field` field, then + // if the match is successful: + // - the name of the single named segment will be used as a header name, + // - the match value of the segment will be used as a header value; + // if the match is NOT successful, nothing will be sent. + // + // Example: + // + // -- This is a field in the request message + // | that the header value will be extracted from. + // | + // | -- This is the key name in the + // | | routing header. + // V | + // field: "table_name" v + // path_template: "projects/*/{table_location=instances/*}/tables/*" + // ^ ^ + // | | + // In the {} brackets is the pattern that -- | + // specifies what to extract from the | + // field as a value to be sent. | + // | + // The string in the field must match the whole pattern -- + // before brackets, inside brackets, after brackets. + // + // When looking at this specific example, we can see that: + // - A key-value pair with the key `table_location` + // and the value matching `instances/*` should be added + // to the x-goog-request-params routing header. + // - The value is extracted from the request message's `table_name` field + // if it matches the full pattern specified: + // `projects/*/instances/*/tables/*`. + // + // **NB:** If the `path_template` field is not provided, the key name is + // equal to the field name, and the whole field should be sent as a value. + // This makes the pattern for the field and the value functionally equivalent + // to `**`, and the configuration + // + // { + // field: "table_name" + // } + // + // is a functionally equivalent shorthand to: + // + // { + // field: "table_name" + // path_template: "{table_name=**}" + // } + // + // See Example 1 for more details. + PathTemplate string `protobuf:"bytes,2,opt,name=path_template,json=pathTemplate,proto3" json:"path_template,omitempty"` +} + +func (x *RoutingParameter) Reset() { + *x = RoutingParameter{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_routing_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RoutingParameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RoutingParameter) ProtoMessage() {} + +func (x *RoutingParameter) ProtoReflect() protoreflect.Message { + mi := &file_google_api_routing_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RoutingParameter.ProtoReflect.Descriptor instead. +func (*RoutingParameter) Descriptor() ([]byte, []int) { + return file_google_api_routing_proto_rawDescGZIP(), []int{1} +} + +func (x *RoutingParameter) GetField() string { + if x != nil { + return x.Field + } + return "" +} + +func (x *RoutingParameter) GetPathTemplate() string { + if x != nil { + return x.PathTemplate + } + return "" +} + +var file_google_api_routing_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.MethodOptions)(nil), + ExtensionType: (*RoutingRule)(nil), + Field: 72295729, + Name: "google.api.routing", + Tag: "bytes,72295729,opt,name=routing", + Filename: "google/api/routing.proto", + }, +} + +// Extension fields to descriptorpb.MethodOptions. +var ( + // See RoutingRule. + // + // optional google.api.RoutingRule routing = 72295729; + E_Routing = &file_google_api_routing_proto_extTypes[0] +) + +var File_google_api_routing_proto protoreflect.FileDescriptor + +var file_google_api_routing_proto_rawDesc = []byte{ + 0x0a, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x5a, 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x74, + 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x4b, 0x0a, 0x12, 0x72, 0x6f, 0x75, 0x74, 0x69, + 0x6e, 0x67, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x52, 0x11, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x73, 0x22, 0x4d, 0x0a, 0x10, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x23, + 0x0a, 0x0d, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x61, 0x74, 0x68, 0x54, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x74, 0x65, 0x3a, 0x54, 0x0a, 0x07, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x1e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xb1, + 0xca, 0xbc, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, + 0x52, 0x07, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x42, 0x6a, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0c, 0x52, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, + 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, + 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_routing_proto_rawDescOnce sync.Once + file_google_api_routing_proto_rawDescData = file_google_api_routing_proto_rawDesc +) + +func file_google_api_routing_proto_rawDescGZIP() []byte { + file_google_api_routing_proto_rawDescOnce.Do(func() { + file_google_api_routing_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_routing_proto_rawDescData) + }) + return file_google_api_routing_proto_rawDescData +} + +var file_google_api_routing_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_google_api_routing_proto_goTypes = []interface{}{ + (*RoutingRule)(nil), // 0: google.api.RoutingRule + (*RoutingParameter)(nil), // 1: google.api.RoutingParameter + (*descriptorpb.MethodOptions)(nil), // 2: google.protobuf.MethodOptions +} +var file_google_api_routing_proto_depIdxs = []int32{ + 1, // 0: google.api.RoutingRule.routing_parameters:type_name -> google.api.RoutingParameter + 2, // 1: google.api.routing:extendee -> google.protobuf.MethodOptions + 0, // 2: google.api.routing:type_name -> google.api.RoutingRule + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 2, // [2:3] is the sub-list for extension type_name + 1, // [1:2] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_api_routing_proto_init() } +func file_google_api_routing_proto_init() { + if File_google_api_routing_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_routing_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RoutingRule); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_routing_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RoutingParameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_routing_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_google_api_routing_proto_goTypes, + DependencyIndexes: file_google_api_routing_proto_depIdxs, + MessageInfos: file_google_api_routing_proto_msgTypes, + ExtensionInfos: file_google_api_routing_proto_extTypes, + }.Build() + File_google_api_routing_proto = out.File + file_google_api_routing_proto_rawDesc = nil + file_google_api_routing_proto_goTypes = nil + file_google_api_routing_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go new file mode 100644 index 000000000..498020e33 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go @@ -0,0 +1,203 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/launch_stage.proto + +package api + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The launch stage as defined by [Google Cloud Platform +// Launch Stages](https://cloud.google.com/terms/launch-stages). +type LaunchStage int32 + +const ( + // Do not use this default value. + LaunchStage_LAUNCH_STAGE_UNSPECIFIED LaunchStage = 0 + // The feature is not yet implemented. Users can not use it. + LaunchStage_UNIMPLEMENTED LaunchStage = 6 + // Prelaunch features are hidden from users and are only visible internally. + LaunchStage_PRELAUNCH LaunchStage = 7 + // Early Access features are limited to a closed group of testers. To use + // these features, you must sign up in advance and sign a Trusted Tester + // agreement (which includes confidentiality provisions). These features may + // be unstable, changed in backward-incompatible ways, and are not + // guaranteed to be released. + LaunchStage_EARLY_ACCESS LaunchStage = 1 + // Alpha is a limited availability test for releases before they are cleared + // for widespread use. By Alpha, all significant design issues are resolved + // and we are in the process of verifying functionality. Alpha customers + // need to apply for access, agree to applicable terms, and have their + // projects allowlisted. Alpha releases don't have to be feature complete, + // no SLAs are provided, and there are no technical support obligations, but + // they will be far enough along that customers can actually use them in + // test environments or for limited-use tests -- just like they would in + // normal production cases. + LaunchStage_ALPHA LaunchStage = 2 + // Beta is the point at which we are ready to open a release for any + // customer to use. There are no SLA or technical support obligations in a + // Beta release. Products will be complete from a feature perspective, but + // may have some open outstanding issues. Beta releases are suitable for + // limited production use cases. + LaunchStage_BETA LaunchStage = 3 + // GA features are open to all developers and are considered stable and + // fully qualified for production use. + LaunchStage_GA LaunchStage = 4 + // Deprecated features are scheduled to be shut down and removed. For more + // information, see the "Deprecation Policy" section of our [Terms of + // Service](https://cloud.google.com/terms/) + // and the [Google Cloud Platform Subject to the Deprecation + // Policy](https://cloud.google.com/terms/deprecation) documentation. + LaunchStage_DEPRECATED LaunchStage = 5 +) + +// Enum value maps for LaunchStage. +var ( + LaunchStage_name = map[int32]string{ + 0: "LAUNCH_STAGE_UNSPECIFIED", + 6: "UNIMPLEMENTED", + 7: "PRELAUNCH", + 1: "EARLY_ACCESS", + 2: "ALPHA", + 3: "BETA", + 4: "GA", + 5: "DEPRECATED", + } + LaunchStage_value = map[string]int32{ + "LAUNCH_STAGE_UNSPECIFIED": 0, + "UNIMPLEMENTED": 6, + "PRELAUNCH": 7, + "EARLY_ACCESS": 1, + "ALPHA": 2, + "BETA": 3, + "GA": 4, + "DEPRECATED": 5, + } +) + +func (x LaunchStage) Enum() *LaunchStage { + p := new(LaunchStage) + *p = x + return p +} + +func (x LaunchStage) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (LaunchStage) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_launch_stage_proto_enumTypes[0].Descriptor() +} + +func (LaunchStage) Type() protoreflect.EnumType { + return &file_google_api_launch_stage_proto_enumTypes[0] +} + +func (x LaunchStage) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use LaunchStage.Descriptor instead. +func (LaunchStage) EnumDescriptor() ([]byte, []int) { + return file_google_api_launch_stage_proto_rawDescGZIP(), []int{0} +} + +var File_google_api_launch_stage_proto protoreflect.FileDescriptor + +var file_google_api_launch_stage_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, + 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2a, 0x8c, 0x01, 0x0a, 0x0b, + 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x18, 0x4c, + 0x41, 0x55, 0x4e, 0x43, 0x48, 0x5f, 0x53, 0x54, 0x41, 0x47, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x49, + 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, + 0x50, 0x52, 0x45, 0x4c, 0x41, 0x55, 0x4e, 0x43, 0x48, 0x10, 0x07, 0x12, 0x10, 0x0a, 0x0c, 0x45, + 0x41, 0x52, 0x4c, 0x59, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x09, 0x0a, + 0x05, 0x41, 0x4c, 0x50, 0x48, 0x41, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x45, 0x54, 0x41, + 0x10, 0x03, 0x12, 0x06, 0x0a, 0x02, 0x47, 0x41, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x45, + 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x05, 0x42, 0x5a, 0x0a, 0x0e, 0x63, 0x6f, + 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x10, 0x4c, 0x61, + 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x3b, 0x61, 0x70, 0x69, 0xa2, + 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_launch_stage_proto_rawDescOnce sync.Once + file_google_api_launch_stage_proto_rawDescData = file_google_api_launch_stage_proto_rawDesc +) + +func file_google_api_launch_stage_proto_rawDescGZIP() []byte { + file_google_api_launch_stage_proto_rawDescOnce.Do(func() { + file_google_api_launch_stage_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_launch_stage_proto_rawDescData) + }) + return file_google_api_launch_stage_proto_rawDescData +} + +var file_google_api_launch_stage_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_api_launch_stage_proto_goTypes = []interface{}{ + (LaunchStage)(0), // 0: google.api.LaunchStage +} +var file_google_api_launch_stage_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_api_launch_stage_proto_init() } +func file_google_api_launch_stage_proto_init() { + if File_google_api_launch_stage_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_launch_stage_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_launch_stage_proto_goTypes, + DependencyIndexes: file_google_api_launch_stage_proto_depIdxs, + EnumInfos: file_google_api_launch_stage_proto_enumTypes, + }.Build() + File_google_api_launch_stage_proto = out.File + file_google_api_launch_stage_proto_rawDesc = nil + file_google_api_launch_stage_proto_goTypes = nil + file_google_api_launch_stage_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/location/locations.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/location/locations.pb.go new file mode 100644 index 000000000..db70a101b --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/location/locations.pb.go @@ -0,0 +1,631 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.12.2 +// source: google/cloud/location/locations.proto + +package location + +import ( + context "context" + reflect "reflect" + sync "sync" + + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The request message for [Locations.ListLocations][google.cloud.location.Locations.ListLocations]. +type ListLocationsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource that owns the locations collection, if applicable. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The standard list filter. + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // The standard list page size. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // The standard list page token. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListLocationsRequest) Reset() { + *x = ListLocationsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_location_locations_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListLocationsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListLocationsRequest) ProtoMessage() {} + +func (x *ListLocationsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_location_locations_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListLocationsRequest.ProtoReflect.Descriptor instead. +func (*ListLocationsRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_location_locations_proto_rawDescGZIP(), []int{0} +} + +func (x *ListLocationsRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ListLocationsRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *ListLocationsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListLocationsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// The response message for [Locations.ListLocations][google.cloud.location.Locations.ListLocations]. +type ListLocationsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A list of locations that matches the specified filter in the request. + Locations []*Location `protobuf:"bytes,1,rep,name=locations,proto3" json:"locations,omitempty"` + // The standard List next-page token. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListLocationsResponse) Reset() { + *x = ListLocationsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_location_locations_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListLocationsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListLocationsResponse) ProtoMessage() {} + +func (x *ListLocationsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_location_locations_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListLocationsResponse.ProtoReflect.Descriptor instead. +func (*ListLocationsResponse) Descriptor() ([]byte, []int) { + return file_google_cloud_location_locations_proto_rawDescGZIP(), []int{1} +} + +func (x *ListLocationsResponse) GetLocations() []*Location { + if x != nil { + return x.Locations + } + return nil +} + +func (x *ListLocationsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// The request message for [Locations.GetLocation][google.cloud.location.Locations.GetLocation]. +type GetLocationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Resource name for the location. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetLocationRequest) Reset() { + *x = GetLocationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_location_locations_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetLocationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetLocationRequest) ProtoMessage() {} + +func (x *GetLocationRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_location_locations_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetLocationRequest.ProtoReflect.Descriptor instead. +func (*GetLocationRequest) Descriptor() ([]byte, []int) { + return file_google_cloud_location_locations_proto_rawDescGZIP(), []int{2} +} + +func (x *GetLocationRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// A resource that represents Google Cloud Platform location. +type Location struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Resource name for the location, which may vary between implementations. + // For example: `"projects/example-project/locations/us-east1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The canonical id for this location. For example: `"us-east1"`. + LocationId string `protobuf:"bytes,4,opt,name=location_id,json=locationId,proto3" json:"location_id,omitempty"` + // The friendly name for this location, typically a nearby city name. + // For example, "Tokyo". + DisplayName string `protobuf:"bytes,5,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // Cross-service attributes for the location. For example + // + // {"cloud.googleapis.com/region": "us-east1"} + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Service-specific metadata. For example the available capacity at the given + // location. + Metadata *anypb.Any `protobuf:"bytes,3,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *Location) Reset() { + *x = Location{} + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_location_locations_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Location) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Location) ProtoMessage() {} + +func (x *Location) ProtoReflect() protoreflect.Message { + mi := &file_google_cloud_location_locations_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Location.ProtoReflect.Descriptor instead. +func (*Location) Descriptor() ([]byte, []int) { + return file_google_cloud_location_locations_proto_rawDescGZIP(), []int{3} +} + +func (x *Location) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Location) GetLocationId() string { + if x != nil { + return x.LocationId + } + return "" +} + +func (x *Location) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *Location) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *Location) GetMetadata() *anypb.Any { + if x != nil { + return x.Metadata + } + return nil +} + +var File_google_cloud_location_locations_proto protoreflect.FileDescriptor + +var file_google_cloud_location_locations_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x1c, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x7e, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, + 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x22, 0x7e, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x09, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, + 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x22, 0x28, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x94, 0x02, 0x0a, 0x08, 0x4c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, + 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x43, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x12, 0x30, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x32, 0xa4, 0x03, 0x0a, 0x09, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0xab, 0x01, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3f, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x39, 0x12, 0x14, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x7d, 0x5a, 0x21, 0x12, 0x1f, 0x2f, 0x76, + 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x9e, 0x01, + 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x43, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x3d, 0x12, 0x16, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x5a, 0x23, 0x12, 0x21, 0x2f, 0x76, 0x31, + 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x1a, 0x48, + 0xca, 0x41, 0x14, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x2e, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, + 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, + 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x6f, 0x0a, 0x19, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_google_cloud_location_locations_proto_rawDescOnce sync.Once + file_google_cloud_location_locations_proto_rawDescData = file_google_cloud_location_locations_proto_rawDesc +) + +func file_google_cloud_location_locations_proto_rawDescGZIP() []byte { + file_google_cloud_location_locations_proto_rawDescOnce.Do(func() { + file_google_cloud_location_locations_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_cloud_location_locations_proto_rawDescData) + }) + return file_google_cloud_location_locations_proto_rawDescData +} + +var file_google_cloud_location_locations_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_google_cloud_location_locations_proto_goTypes = []interface{}{ + (*ListLocationsRequest)(nil), // 0: google.cloud.location.ListLocationsRequest + (*ListLocationsResponse)(nil), // 1: google.cloud.location.ListLocationsResponse + (*GetLocationRequest)(nil), // 2: google.cloud.location.GetLocationRequest + (*Location)(nil), // 3: google.cloud.location.Location + nil, // 4: google.cloud.location.Location.LabelsEntry + (*anypb.Any)(nil), // 5: google.protobuf.Any +} +var file_google_cloud_location_locations_proto_depIdxs = []int32{ + 3, // 0: google.cloud.location.ListLocationsResponse.locations:type_name -> google.cloud.location.Location + 4, // 1: google.cloud.location.Location.labels:type_name -> google.cloud.location.Location.LabelsEntry + 5, // 2: google.cloud.location.Location.metadata:type_name -> google.protobuf.Any + 0, // 3: google.cloud.location.Locations.ListLocations:input_type -> google.cloud.location.ListLocationsRequest + 2, // 4: google.cloud.location.Locations.GetLocation:input_type -> google.cloud.location.GetLocationRequest + 1, // 5: google.cloud.location.Locations.ListLocations:output_type -> google.cloud.location.ListLocationsResponse + 3, // 6: google.cloud.location.Locations.GetLocation:output_type -> google.cloud.location.Location + 5, // [5:7] is the sub-list for method output_type + 3, // [3:5] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_google_cloud_location_locations_proto_init() } +func file_google_cloud_location_locations_proto_init() { + if File_google_cloud_location_locations_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_cloud_location_locations_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListLocationsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_location_locations_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListLocationsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_location_locations_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetLocationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_location_locations_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_cloud_location_locations_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_cloud_location_locations_proto_goTypes, + DependencyIndexes: file_google_cloud_location_locations_proto_depIdxs, + MessageInfos: file_google_cloud_location_locations_proto_msgTypes, + }.Build() + File_google_cloud_location_locations_proto = out.File + file_google_cloud_location_locations_proto_rawDesc = nil + file_google_cloud_location_locations_proto_goTypes = nil + file_google_cloud_location_locations_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// LocationsClient is the client API for Locations service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type LocationsClient interface { + // Lists information about the supported locations for this service. + ListLocations(ctx context.Context, in *ListLocationsRequest, opts ...grpc.CallOption) (*ListLocationsResponse, error) + // Gets information about a location. + GetLocation(ctx context.Context, in *GetLocationRequest, opts ...grpc.CallOption) (*Location, error) +} + +type locationsClient struct { + cc grpc.ClientConnInterface +} + +func NewLocationsClient(cc grpc.ClientConnInterface) LocationsClient { + return &locationsClient{cc} +} + +func (c *locationsClient) ListLocations(ctx context.Context, in *ListLocationsRequest, opts ...grpc.CallOption) (*ListLocationsResponse, error) { + out := new(ListLocationsResponse) + err := c.cc.Invoke(ctx, "/google.cloud.location.Locations/ListLocations", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *locationsClient) GetLocation(ctx context.Context, in *GetLocationRequest, opts ...grpc.CallOption) (*Location, error) { + out := new(Location) + err := c.cc.Invoke(ctx, "/google.cloud.location.Locations/GetLocation", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// LocationsServer is the server API for Locations service. +type LocationsServer interface { + // Lists information about the supported locations for this service. + ListLocations(context.Context, *ListLocationsRequest) (*ListLocationsResponse, error) + // Gets information about a location. + GetLocation(context.Context, *GetLocationRequest) (*Location, error) +} + +// UnimplementedLocationsServer can be embedded to have forward compatible implementations. +type UnimplementedLocationsServer struct { +} + +func (*UnimplementedLocationsServer) ListLocations(context.Context, *ListLocationsRequest) (*ListLocationsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListLocations not implemented") +} +func (*UnimplementedLocationsServer) GetLocation(context.Context, *GetLocationRequest) (*Location, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetLocation not implemented") +} + +func RegisterLocationsServer(s *grpc.Server, srv LocationsServer) { + s.RegisterService(&_Locations_serviceDesc, srv) +} + +func _Locations_ListLocations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListLocationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LocationsServer).ListLocations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.location.Locations/ListLocations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LocationsServer).ListLocations(ctx, req.(*ListLocationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Locations_GetLocation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetLocationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LocationsServer).GetLocation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.location.Locations/GetLocation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LocationsServer).GetLocation(ctx, req.(*GetLocationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Locations_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.location.Locations", + HandlerType: (*LocationsServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListLocations", + Handler: _Locations_ListLocations_Handler, + }, + { + MethodName: "GetLocation", + Handler: _Locations_GetLocation_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/location/locations.proto", +} diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE b/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go new file mode 100644 index 000000000..bd46edbe7 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go @@ -0,0 +1,336 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/rpc/code.proto + +package code + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The canonical error codes for gRPC APIs. +// +// Sometimes multiple error codes may apply. Services should return +// the most specific error code that applies. For example, prefer +// `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply. +// Similarly prefer `NOT_FOUND` or `ALREADY_EXISTS` over `FAILED_PRECONDITION`. +type Code int32 + +const ( + // Not an error; returned on success. + // + // HTTP Mapping: 200 OK + Code_OK Code = 0 + // The operation was cancelled, typically by the caller. + // + // HTTP Mapping: 499 Client Closed Request + Code_CANCELLED Code = 1 + // Unknown error. For example, this error may be returned when + // a `Status` value received from another address space belongs to + // an error space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + // + // HTTP Mapping: 500 Internal Server Error + Code_UNKNOWN Code = 2 + // The client specified an invalid argument. Note that this differs + // from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + // + // HTTP Mapping: 400 Bad Request + Code_INVALID_ARGUMENT Code = 3 + // The deadline expired before the operation could complete. For operations + // that change the state of the system, this error may be returned + // even if the operation has completed successfully. For example, a + // successful response from a server could have been delayed long + // enough for the deadline to expire. + // + // HTTP Mapping: 504 Gateway Timeout + Code_DEADLINE_EXCEEDED Code = 4 + // Some requested entity (e.g., file or directory) was not found. + // + // Note to server developers: if a request is denied for an entire class + // of users, such as gradual feature rollout or undocumented allowlist, + // `NOT_FOUND` may be used. If a request is denied for some users within + // a class of users, such as user-based access control, `PERMISSION_DENIED` + // must be used. + // + // HTTP Mapping: 404 Not Found + Code_NOT_FOUND Code = 5 + // The entity that a client attempted to create (e.g., file or directory) + // already exists. + // + // HTTP Mapping: 409 Conflict + Code_ALREADY_EXISTS Code = 6 + // The caller does not have permission to execute the specified + // operation. `PERMISSION_DENIED` must not be used for rejections + // caused by exhausting some resource (use `RESOURCE_EXHAUSTED` + // instead for those errors). `PERMISSION_DENIED` must not be + // used if the caller can not be identified (use `UNAUTHENTICATED` + // instead for those errors). This error code does not imply the + // request is valid or the requested entity exists or satisfies + // other pre-conditions. + // + // HTTP Mapping: 403 Forbidden + Code_PERMISSION_DENIED Code = 7 + // The request does not have valid authentication credentials for the + // operation. + // + // HTTP Mapping: 401 Unauthorized + Code_UNAUTHENTICATED Code = 16 + // Some resource has been exhausted, perhaps a per-user quota, or + // perhaps the entire file system is out of space. + // + // HTTP Mapping: 429 Too Many Requests + Code_RESOURCE_EXHAUSTED Code = 8 + // The operation was rejected because the system is not in a state + // required for the operation's execution. For example, the directory + // to be deleted is non-empty, an rmdir operation is applied to + // a non-directory, etc. + // + // Service implementors can use the following guidelines to decide + // between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`: + // + // (a) Use `UNAVAILABLE` if the client can retry just the failing call. + // (b) Use `ABORTED` if the client should retry at a higher level. For + // example, when a client-specified test-and-set fails, indicating the + // client should restart a read-modify-write sequence. + // (c) Use `FAILED_PRECONDITION` if the client should not retry until + // the system state has been explicitly fixed. For example, if an "rmdir" + // fails because the directory is non-empty, `FAILED_PRECONDITION` + // should be returned since the client should not retry unless + // the files are deleted from the directory. + // + // HTTP Mapping: 400 Bad Request + Code_FAILED_PRECONDITION Code = 9 + // The operation was aborted, typically due to a concurrency issue such as + // a sequencer check failure or transaction abort. + // + // See the guidelines above for deciding between `FAILED_PRECONDITION`, + // `ABORTED`, and `UNAVAILABLE`. + // + // HTTP Mapping: 409 Conflict + Code_ABORTED Code = 10 + // The operation was attempted past the valid range. E.g., seeking or + // reading past end-of-file. + // + // Unlike `INVALID_ARGUMENT`, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate `INVALID_ARGUMENT` if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // `OUT_OF_RANGE` if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between `FAILED_PRECONDITION` and + // `OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an `OUT_OF_RANGE` error to detect when + // they are done. + // + // HTTP Mapping: 400 Bad Request + Code_OUT_OF_RANGE Code = 11 + // The operation is not implemented or is not supported/enabled in this + // service. + // + // HTTP Mapping: 501 Not Implemented + Code_UNIMPLEMENTED Code = 12 + // Internal errors. This means that some invariants expected by the + // underlying system have been broken. This error code is reserved + // for serious errors. + // + // HTTP Mapping: 500 Internal Server Error + Code_INTERNAL Code = 13 + // The service is currently unavailable. This is most likely a + // transient condition, which can be corrected by retrying with + // a backoff. Note that it is not always safe to retry + // non-idempotent operations. + // + // See the guidelines above for deciding between `FAILED_PRECONDITION`, + // `ABORTED`, and `UNAVAILABLE`. + // + // HTTP Mapping: 503 Service Unavailable + Code_UNAVAILABLE Code = 14 + // Unrecoverable data loss or corruption. + // + // HTTP Mapping: 500 Internal Server Error + Code_DATA_LOSS Code = 15 +) + +// Enum value maps for Code. +var ( + Code_name = map[int32]string{ + 0: "OK", + 1: "CANCELLED", + 2: "UNKNOWN", + 3: "INVALID_ARGUMENT", + 4: "DEADLINE_EXCEEDED", + 5: "NOT_FOUND", + 6: "ALREADY_EXISTS", + 7: "PERMISSION_DENIED", + 16: "UNAUTHENTICATED", + 8: "RESOURCE_EXHAUSTED", + 9: "FAILED_PRECONDITION", + 10: "ABORTED", + 11: "OUT_OF_RANGE", + 12: "UNIMPLEMENTED", + 13: "INTERNAL", + 14: "UNAVAILABLE", + 15: "DATA_LOSS", + } + Code_value = map[string]int32{ + "OK": 0, + "CANCELLED": 1, + "UNKNOWN": 2, + "INVALID_ARGUMENT": 3, + "DEADLINE_EXCEEDED": 4, + "NOT_FOUND": 5, + "ALREADY_EXISTS": 6, + "PERMISSION_DENIED": 7, + "UNAUTHENTICATED": 16, + "RESOURCE_EXHAUSTED": 8, + "FAILED_PRECONDITION": 9, + "ABORTED": 10, + "OUT_OF_RANGE": 11, + "UNIMPLEMENTED": 12, + "INTERNAL": 13, + "UNAVAILABLE": 14, + "DATA_LOSS": 15, + } +) + +func (x Code) Enum() *Code { + p := new(Code) + *p = x + return p +} + +func (x Code) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Code) Descriptor() protoreflect.EnumDescriptor { + return file_google_rpc_code_proto_enumTypes[0].Descriptor() +} + +func (Code) Type() protoreflect.EnumType { + return &file_google_rpc_code_proto_enumTypes[0] +} + +func (x Code) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Code.Descriptor instead. +func (Code) EnumDescriptor() ([]byte, []int) { + return file_google_rpc_code_proto_rawDescGZIP(), []int{0} +} + +var File_google_rpc_code_proto protoreflect.FileDescriptor + +var file_google_rpc_code_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x72, 0x70, 0x63, 0x2a, 0xb7, 0x02, 0x0a, 0x04, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x06, 0x0a, 0x02, + 0x4f, 0x4b, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x4c, 0x45, + 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x02, + 0x12, 0x14, 0x0a, 0x10, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x41, 0x52, 0x47, 0x55, + 0x4d, 0x45, 0x4e, 0x54, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x45, 0x41, 0x44, 0x4c, 0x49, + 0x4e, 0x45, 0x5f, 0x45, 0x58, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0d, 0x0a, + 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x05, 0x12, 0x12, 0x0a, 0x0e, + 0x41, 0x4c, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, 0x06, + 0x12, 0x15, 0x0a, 0x11, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, + 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x07, 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x41, 0x55, 0x54, + 0x48, 0x45, 0x4e, 0x54, 0x49, 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x10, 0x12, 0x16, 0x0a, 0x12, + 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x58, 0x48, 0x41, 0x55, 0x53, 0x54, + 0x45, 0x44, 0x10, 0x08, 0x12, 0x17, 0x0a, 0x13, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x5f, 0x50, + 0x52, 0x45, 0x43, 0x4f, 0x4e, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x09, 0x12, 0x0b, 0x0a, + 0x07, 0x41, 0x42, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x4f, 0x55, + 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x11, 0x0a, 0x0d, + 0x55, 0x4e, 0x49, 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x0c, 0x12, + 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x0d, 0x12, 0x0f, 0x0a, + 0x0b, 0x55, 0x4e, 0x41, 0x56, 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x0e, 0x12, 0x0d, + 0x0a, 0x09, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x4c, 0x4f, 0x53, 0x53, 0x10, 0x0f, 0x42, 0x58, 0x0a, + 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, + 0x09, 0x43, 0x6f, 0x64, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x33, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x3b, 0x63, 0x6f, 0x64, + 0x65, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_rpc_code_proto_rawDescOnce sync.Once + file_google_rpc_code_proto_rawDescData = file_google_rpc_code_proto_rawDesc +) + +func file_google_rpc_code_proto_rawDescGZIP() []byte { + file_google_rpc_code_proto_rawDescOnce.Do(func() { + file_google_rpc_code_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_code_proto_rawDescData) + }) + return file_google_rpc_code_proto_rawDescData +} + +var file_google_rpc_code_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_rpc_code_proto_goTypes = []interface{}{ + (Code)(0), // 0: google.rpc.Code +} +var file_google_rpc_code_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_rpc_code_proto_init() } +func file_google_rpc_code_proto_init() { + if File_google_rpc_code_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_rpc_code_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_rpc_code_proto_goTypes, + DependencyIndexes: file_google_rpc_code_proto_depIdxs, + EnumInfos: file_google_rpc_code_proto_enumTypes, + }.Build() + File_google_rpc_code_proto = out.File + file_google_rpc_code_proto_rawDesc = nil + file_google_rpc_code_proto_goTypes = nil + file_google_rpc_code_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go new file mode 100644 index 000000000..3e5621827 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -0,0 +1,1314 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/rpc/error_details.proto + +package errdetails + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Describes the cause of the error with structured details. +// +// Example of an error when contacting the "pubsub.googleapis.com" API when it +// is not enabled: +// +// { "reason": "API_DISABLED" +// "domain": "googleapis.com" +// "metadata": { +// "resource": "projects/123", +// "service": "pubsub.googleapis.com" +// } +// } +// +// This response indicates that the pubsub.googleapis.com API is not enabled. +// +// Example of an error that is returned when attempting to create a Spanner +// instance in a region that is out of stock: +// +// { "reason": "STOCKOUT" +// "domain": "spanner.googleapis.com", +// "metadata": { +// "availableRegions": "us-central1,us-east2" +// } +// } +type ErrorInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The reason of the error. This is a constant value that identifies the + // proximate cause of the error. Error reasons are unique within a particular + // domain of errors. This should be at most 63 characters and match a + // regular expression of `[A-Z][A-Z0-9_]+[A-Z0-9]`, which represents + // UPPER_SNAKE_CASE. + Reason string `protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"` + // The logical grouping to which the "reason" belongs. The error domain + // is typically the registered service name of the tool or product that + // generates the error. Example: "pubsub.googleapis.com". If the error is + // generated by some common infrastructure, the error domain must be a + // globally unique value that identifies the infrastructure. For Google API + // infrastructure, the error domain is "googleapis.com". + Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"` + // Additional structured details about this error. + // + // Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in + // length. When identifying the current value of an exceeded limit, the units + // should be contained in the key, not the value. For example, rather than + // {"instanceLimit": "100/request"}, should be returned as, + // {"instanceLimitPerRequest": "100"}, if the client exceeds the number of + // instances that can be created in a single (batch) request. + Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ErrorInfo) Reset() { + *x = ErrorInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorInfo) ProtoMessage() {} + +func (x *ErrorInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorInfo.ProtoReflect.Descriptor instead. +func (*ErrorInfo) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{0} +} + +func (x *ErrorInfo) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +func (x *ErrorInfo) GetDomain() string { + if x != nil { + return x.Domain + } + return "" +} + +func (x *ErrorInfo) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +// Describes when the clients can retry a failed request. Clients could ignore +// the recommendation here or retry when this information is missing from error +// responses. +// +// It's always recommended that clients should use exponential backoff when +// retrying. +// +// Clients should wait until `retry_delay` amount of time has passed since +// receiving the error response before retrying. If retrying requests also +// fail, clients should use an exponential backoff scheme to gradually increase +// the delay between retries based on `retry_delay`, until either a maximum +// number of retries have been reached or a maximum retry delay cap has been +// reached. +type RetryInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Clients should wait at least this long between retrying the same request. + RetryDelay *durationpb.Duration `protobuf:"bytes,1,opt,name=retry_delay,json=retryDelay,proto3" json:"retry_delay,omitempty"` +} + +func (x *RetryInfo) Reset() { + *x = RetryInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RetryInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RetryInfo) ProtoMessage() {} + +func (x *RetryInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RetryInfo.ProtoReflect.Descriptor instead. +func (*RetryInfo) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{1} +} + +func (x *RetryInfo) GetRetryDelay() *durationpb.Duration { + if x != nil { + return x.RetryDelay + } + return nil +} + +// Describes additional debugging info. +type DebugInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The stack trace entries indicating where the error occurred. + StackEntries []string `protobuf:"bytes,1,rep,name=stack_entries,json=stackEntries,proto3" json:"stack_entries,omitempty"` + // Additional debugging information provided by the server. + Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"` +} + +func (x *DebugInfo) Reset() { + *x = DebugInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DebugInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DebugInfo) ProtoMessage() {} + +func (x *DebugInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DebugInfo.ProtoReflect.Descriptor instead. +func (*DebugInfo) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{2} +} + +func (x *DebugInfo) GetStackEntries() []string { + if x != nil { + return x.StackEntries + } + return nil +} + +func (x *DebugInfo) GetDetail() string { + if x != nil { + return x.Detail + } + return "" +} + +// Describes how a quota check failed. +// +// For example if a daily limit was exceeded for the calling project, +// a service could respond with a QuotaFailure detail containing the project +// id and the description of the quota limit that was exceeded. If the +// calling project hasn't enabled the service in the developer console, then +// a service could respond with the project id and set `service_disabled` +// to true. +// +// Also see RetryInfo and Help types for other details about handling a +// quota failure. +type QuotaFailure struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Describes all quota violations. + Violations []*QuotaFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"` +} + +func (x *QuotaFailure) Reset() { + *x = QuotaFailure{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QuotaFailure) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QuotaFailure) ProtoMessage() {} + +func (x *QuotaFailure) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QuotaFailure.ProtoReflect.Descriptor instead. +func (*QuotaFailure) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{3} +} + +func (x *QuotaFailure) GetViolations() []*QuotaFailure_Violation { + if x != nil { + return x.Violations + } + return nil +} + +// Describes what preconditions have failed. +// +// For example, if an RPC failed because it required the Terms of Service to be +// acknowledged, it could list the terms of service violation in the +// PreconditionFailure message. +type PreconditionFailure struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Describes all precondition violations. + Violations []*PreconditionFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"` +} + +func (x *PreconditionFailure) Reset() { + *x = PreconditionFailure{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PreconditionFailure) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PreconditionFailure) ProtoMessage() {} + +func (x *PreconditionFailure) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PreconditionFailure.ProtoReflect.Descriptor instead. +func (*PreconditionFailure) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{4} +} + +func (x *PreconditionFailure) GetViolations() []*PreconditionFailure_Violation { + if x != nil { + return x.Violations + } + return nil +} + +// Describes violations in a client request. This error type focuses on the +// syntactic aspects of the request. +type BadRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Describes all violations in a client request. + FieldViolations []*BadRequest_FieldViolation `protobuf:"bytes,1,rep,name=field_violations,json=fieldViolations,proto3" json:"field_violations,omitempty"` +} + +func (x *BadRequest) Reset() { + *x = BadRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BadRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BadRequest) ProtoMessage() {} + +func (x *BadRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BadRequest.ProtoReflect.Descriptor instead. +func (*BadRequest) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{5} +} + +func (x *BadRequest) GetFieldViolations() []*BadRequest_FieldViolation { + if x != nil { + return x.FieldViolations + } + return nil +} + +// Contains metadata about the request that clients can attach when filing a bug +// or providing other forms of feedback. +type RequestInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An opaque string that should only be interpreted by the service generating + // it. For example, it can be used to identify requests in the service's logs. + RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Any data that was used to serve this request. For example, an encrypted + // stack trace that can be sent back to the service provider for debugging. + ServingData string `protobuf:"bytes,2,opt,name=serving_data,json=servingData,proto3" json:"serving_data,omitempty"` +} + +func (x *RequestInfo) Reset() { + *x = RequestInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestInfo) ProtoMessage() {} + +func (x *RequestInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestInfo.ProtoReflect.Descriptor instead. +func (*RequestInfo) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{6} +} + +func (x *RequestInfo) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +func (x *RequestInfo) GetServingData() string { + if x != nil { + return x.ServingData + } + return "" +} + +// Describes the resource that is being accessed. +type ResourceInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A name for the type of resource being accessed, e.g. "sql table", + // "cloud storage bucket", "file", "Google calendar"; or the type URL + // of the resource: e.g. "type.googleapis.com/google.pubsub.v1.Topic". + ResourceType string `protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"` + // The name of the resource being accessed. For example, a shared calendar + // name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current + // error is + // [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. + ResourceName string `protobuf:"bytes,2,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` + // The owner of the resource (optional). + // For example, "user:" or "project:". + Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"` + // Describes what error is encountered when accessing this resource. + // For example, updating a cloud project may require the `writer` permission + // on the developer console project. + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *ResourceInfo) Reset() { + *x = ResourceInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceInfo) ProtoMessage() {} + +func (x *ResourceInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceInfo.ProtoReflect.Descriptor instead. +func (*ResourceInfo) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{7} +} + +func (x *ResourceInfo) GetResourceType() string { + if x != nil { + return x.ResourceType + } + return "" +} + +func (x *ResourceInfo) GetResourceName() string { + if x != nil { + return x.ResourceName + } + return "" +} + +func (x *ResourceInfo) GetOwner() string { + if x != nil { + return x.Owner + } + return "" +} + +func (x *ResourceInfo) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +// Provides links to documentation or for performing an out of band action. +// +// For example, if a quota check failed with an error indicating the calling +// project hasn't enabled the accessed service, this can contain a URL pointing +// directly to the right place in the developer console to flip the bit. +type Help struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // URL(s) pointing to additional information on handling the current error. + Links []*Help_Link `protobuf:"bytes,1,rep,name=links,proto3" json:"links,omitempty"` +} + +func (x *Help) Reset() { + *x = Help{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Help) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Help) ProtoMessage() {} + +func (x *Help) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Help.ProtoReflect.Descriptor instead. +func (*Help) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{8} +} + +func (x *Help) GetLinks() []*Help_Link { + if x != nil { + return x.Links + } + return nil +} + +// Provides a localized error message that is safe to return to the user +// which can be attached to an RPC error. +type LocalizedMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The locale used following the specification defined at + // https://www.rfc-editor.org/rfc/bcp/bcp47.txt. + // Examples are: "en-US", "fr-CH", "es-MX" + Locale string `protobuf:"bytes,1,opt,name=locale,proto3" json:"locale,omitempty"` + // The localized error message in the above locale. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` +} + +func (x *LocalizedMessage) Reset() { + *x = LocalizedMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocalizedMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalizedMessage) ProtoMessage() {} + +func (x *LocalizedMessage) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalizedMessage.ProtoReflect.Descriptor instead. +func (*LocalizedMessage) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{9} +} + +func (x *LocalizedMessage) GetLocale() string { + if x != nil { + return x.Locale + } + return "" +} + +func (x *LocalizedMessage) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +// A message type used to describe a single quota violation. For example, a +// daily quota or a custom quota that was exceeded. +type QuotaFailure_Violation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The subject on which the quota check failed. + // For example, "clientip:" or "project:". + Subject string `protobuf:"bytes,1,opt,name=subject,proto3" json:"subject,omitempty"` + // A description of how the quota check failed. Clients can use this + // description to find more about the quota configuration in the service's + // public documentation, or find the relevant quota limit to adjust through + // developer console. + // + // For example: "Service disabled" or "Daily Limit for read operations + // exceeded". + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *QuotaFailure_Violation) Reset() { + *x = QuotaFailure_Violation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QuotaFailure_Violation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QuotaFailure_Violation) ProtoMessage() {} + +func (x *QuotaFailure_Violation) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QuotaFailure_Violation.ProtoReflect.Descriptor instead. +func (*QuotaFailure_Violation) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *QuotaFailure_Violation) GetSubject() string { + if x != nil { + return x.Subject + } + return "" +} + +func (x *QuotaFailure_Violation) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +// A message type used to describe a single precondition failure. +type PreconditionFailure_Violation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of PreconditionFailure. We recommend using a service-specific + // enum type to define the supported precondition violation subjects. For + // example, "TOS" for "Terms of Service violation". + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // The subject, relative to the type, that failed. + // For example, "google.com/cloud" relative to the "TOS" type would indicate + // which terms of service is being referenced. + Subject string `protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"` + // A description of how the precondition failed. Developers can use this + // description to understand how to fix the failure. + // + // For example: "Terms of service not accepted". + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *PreconditionFailure_Violation) Reset() { + *x = PreconditionFailure_Violation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PreconditionFailure_Violation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PreconditionFailure_Violation) ProtoMessage() {} + +func (x *PreconditionFailure_Violation) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PreconditionFailure_Violation.ProtoReflect.Descriptor instead. +func (*PreconditionFailure_Violation) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *PreconditionFailure_Violation) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *PreconditionFailure_Violation) GetSubject() string { + if x != nil { + return x.Subject + } + return "" +} + +func (x *PreconditionFailure_Violation) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +// A message type used to describe a single bad request field. +type BadRequest_FieldViolation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A path that leads to a field in the request body. The value will be a + // sequence of dot-separated identifiers that identify a protocol buffer + // field. + // + // Consider the following: + // + // message CreateContactRequest { + // message EmailAddress { + // enum Type { + // TYPE_UNSPECIFIED = 0; + // HOME = 1; + // WORK = 2; + // } + // + // optional string email = 1; + // repeated EmailType type = 2; + // } + // + // string full_name = 1; + // repeated EmailAddress email_addresses = 2; + // } + // + // In this example, in proto `field` could take one of the following values: + // + // - `full_name` for a violation in the `full_name` value + // - `email_addresses[1].email` for a violation in the `email` field of the + // first `email_addresses` message + // - `email_addresses[3].type[2]` for a violation in the second `type` + // value in the third `email_addresses` message. + // + // In JSON, the same values are represented as: + // + // - `fullName` for a violation in the `fullName` value + // - `emailAddresses[1].email` for a violation in the `email` field of the + // first `emailAddresses` message + // - `emailAddresses[3].type[2]` for a violation in the second `type` + // value in the third `emailAddresses` message. + Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` + // A description of why the request element is bad. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *BadRequest_FieldViolation) Reset() { + *x = BadRequest_FieldViolation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BadRequest_FieldViolation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BadRequest_FieldViolation) ProtoMessage() {} + +func (x *BadRequest_FieldViolation) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BadRequest_FieldViolation.ProtoReflect.Descriptor instead. +func (*BadRequest_FieldViolation) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{5, 0} +} + +func (x *BadRequest_FieldViolation) GetField() string { + if x != nil { + return x.Field + } + return "" +} + +func (x *BadRequest_FieldViolation) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +// Describes a URL link. +type Help_Link struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Describes what the link offers. + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // The URL of the link. + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` +} + +func (x *Help_Link) Reset() { + *x = Help_Link{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Help_Link) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Help_Link) ProtoMessage() {} + +func (x *Help_Link) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Help_Link.ProtoReflect.Descriptor instead. +func (*Help_Link) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{8, 0} +} + +func (x *Help_Link) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Help_Link) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +var File_google_rpc_error_details_proto protoreflect.FileDescriptor + +var file_google_rpc_error_details_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x1e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb9, 0x01, 0x0a, + 0x09, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, + 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, + 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x3f, 0x0a, 0x08, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x49, + 0x6e, 0x66, 0x6f, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x47, 0x0a, 0x09, 0x52, 0x65, 0x74, 0x72, + 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3a, 0x0a, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x64, + 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x72, 0x65, 0x74, 0x72, 0x79, 0x44, 0x65, 0x6c, 0x61, + 0x79, 0x22, 0x48, 0x0a, 0x09, 0x44, 0x65, 0x62, 0x75, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, + 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x45, 0x6e, 0x74, 0x72, + 0x69, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x22, 0x9b, 0x01, 0x0a, 0x0c, + 0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x42, 0x0a, 0x0a, + 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x51, 0x75, + 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x1a, 0x47, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xbd, 0x01, 0x0a, 0x13, 0x50, 0x72, + 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, + 0x65, 0x12, 0x49, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, + 0x70, 0x63, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, + 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x5b, 0x0a, 0x09, + 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa8, 0x01, 0x0a, 0x0a, 0x42, 0x61, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, + 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x48, 0x0a, 0x0e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, + 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, + 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, 0x48, 0x65, 0x6c, 0x70, + 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x6c, + 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3a, 0x0a, + 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, 0x10, 0x4c, 0x6f, 0x63, + 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, + 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, + 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, + 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72, + 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_rpc_error_details_proto_rawDescOnce sync.Once + file_google_rpc_error_details_proto_rawDescData = file_google_rpc_error_details_proto_rawDesc +) + +func file_google_rpc_error_details_proto_rawDescGZIP() []byte { + file_google_rpc_error_details_proto_rawDescOnce.Do(func() { + file_google_rpc_error_details_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_error_details_proto_rawDescData) + }) + return file_google_rpc_error_details_proto_rawDescData +} + +var file_google_rpc_error_details_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_google_rpc_error_details_proto_goTypes = []interface{}{ + (*ErrorInfo)(nil), // 0: google.rpc.ErrorInfo + (*RetryInfo)(nil), // 1: google.rpc.RetryInfo + (*DebugInfo)(nil), // 2: google.rpc.DebugInfo + (*QuotaFailure)(nil), // 3: google.rpc.QuotaFailure + (*PreconditionFailure)(nil), // 4: google.rpc.PreconditionFailure + (*BadRequest)(nil), // 5: google.rpc.BadRequest + (*RequestInfo)(nil), // 6: google.rpc.RequestInfo + (*ResourceInfo)(nil), // 7: google.rpc.ResourceInfo + (*Help)(nil), // 8: google.rpc.Help + (*LocalizedMessage)(nil), // 9: google.rpc.LocalizedMessage + nil, // 10: google.rpc.ErrorInfo.MetadataEntry + (*QuotaFailure_Violation)(nil), // 11: google.rpc.QuotaFailure.Violation + (*PreconditionFailure_Violation)(nil), // 12: google.rpc.PreconditionFailure.Violation + (*BadRequest_FieldViolation)(nil), // 13: google.rpc.BadRequest.FieldViolation + (*Help_Link)(nil), // 14: google.rpc.Help.Link + (*durationpb.Duration)(nil), // 15: google.protobuf.Duration +} +var file_google_rpc_error_details_proto_depIdxs = []int32{ + 10, // 0: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry + 15, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration + 11, // 2: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation + 12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation + 13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation + 14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_google_rpc_error_details_proto_init() } +func file_google_rpc_error_details_proto_init() { + if File_google_rpc_error_details_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_rpc_error_details_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ErrorInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetryInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DebugInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QuotaFailure); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PreconditionFailure); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BadRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Help); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocalizedMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QuotaFailure_Violation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PreconditionFailure_Violation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BadRequest_FieldViolation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Help_Link); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_rpc_error_details_proto_rawDesc, + NumEnums: 0, + NumMessages: 15, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_rpc_error_details_proto_goTypes, + DependencyIndexes: file_google_rpc_error_details_proto_depIdxs, + MessageInfos: file_google_rpc_error_details_proto_msgTypes, + }.Build() + File_google_rpc_error_details_proto = out.File + file_google_rpc_error_details_proto_rawDesc = nil + file_google_rpc_error_details_proto_goTypes = nil + file_google_rpc_error_details_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index 0b9907f89..6ad1b1c1d 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -1,81 +1,53 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 // source: google/rpc/status.proto package status import ( - fmt "fmt" - math "math" + reflect "reflect" + sync "sync" - proto "github.com/golang/protobuf/proto" - any "github.com/golang/protobuf/ptypes/any" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) // The `Status` type defines a logical error model that is suitable for // different programming environments, including REST APIs and RPC APIs. It is -// used by [gRPC](https://github.com/grpc). The error model is designed to be: -// -// - Simple to use and understand for most users -// - Flexible enough to meet unexpected needs -// -// # Overview -// -// The `Status` message contains three pieces of data: error code, error -// message, and error details. The error code should be an enum value of -// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes -// if needed. The error message should be a developer-facing English message -// that helps developers *understand* and *resolve* the error. If a localized -// user-facing error message is needed, put the localized message in the error -// details or localize it in the client. The optional error details may contain -// arbitrary information about the error. There is a predefined set of error -// detail types in the package `google.rpc` that can be used for common error -// conditions. -// -// # Language mapping +// used by [gRPC](https://github.com/grpc). Each `Status` message contains +// three pieces of data: error code, error message, and error details. // -// The `Status` message is the logical representation of the error model, but it -// is not necessarily the actual wire format. When the `Status` message is -// exposed in different client libraries and different wire protocols, it can be -// mapped differently. For example, it will likely be mapped to some exceptions -// in Java, but more likely mapped to some error codes in C. -// -// # Other uses -// -// The error model and the `Status` message can be used in a variety of -// environments, either with or without APIs, to provide a -// consistent developer experience across different environments. -// -// Example uses of this error model include: -// -// - Partial errors. If a service needs to return partial errors to the client, -// it may embed the `Status` in the normal response to indicate the partial -// errors. -// -// - Workflow errors. A typical workflow has multiple steps. Each step may -// have a `Status` message for error reporting. -// -// - Batch operations. If a client uses batch request and batch response, the -// `Status` message should be used directly inside batch response, one for -// each error sub-response. -// -// - Asynchronous operations. If an API call embeds asynchronous operation -// results in its response, the status of those operations should be -// represented directly using the `Status` message. -// -// - Logging. If some API errors are stored in logs, the message `Status` could -// be used directly after any stripping needed for security/privacy reasons. +// You can find out more about this error model and how to work with it in the +// [API Design Guide](https://cloud.google.com/apis/design/errors). type Status struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The status code, which should be an enum value of // [google.rpc.Code][google.rpc.Code]. Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` @@ -86,78 +58,146 @@ type Status struct { Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` // A list of messages that carry the error details. There is a common set of // message types for APIs to use. - Details []*any.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` } -func (m *Status) Reset() { *m = Status{} } -func (m *Status) String() string { return proto.CompactTextString(m) } -func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_24d244abaf643bfe, []int{0} +func (x *Status) Reset() { + *x = Status{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_status_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Status) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Status.Unmarshal(m, b) -} -func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Status.Marshal(b, m, deterministic) -} -func (m *Status) XXX_Merge(src proto.Message) { - xxx_messageInfo_Status.Merge(m, src) +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Status) XXX_Size() int { - return xxx_messageInfo_Status.Size(m) -} -func (m *Status) XXX_DiscardUnknown() { - xxx_messageInfo_Status.DiscardUnknown(m) + +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_status_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Status proto.InternalMessageInfo +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_google_rpc_status_proto_rawDescGZIP(), []int{0} +} -func (m *Status) GetCode() int32 { - if m != nil { - return m.Code +func (x *Status) GetCode() int32 { + if x != nil { + return x.Code } return 0 } -func (m *Status) GetMessage() string { - if m != nil { - return m.Message +func (x *Status) GetMessage() string { + if x != nil { + return x.Message } return "" } -func (m *Status) GetDetails() []*any.Any { - if m != nil { - return m.Details +func (x *Status) GetDetails() []*anypb.Any { + if x != nil { + return x.Details } return nil } -func init() { - proto.RegisterType((*Status)(nil), "google.rpc.Status") +var File_google_rpc_status_proto protoreflect.FileDescriptor + +var file_google_rpc_status_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x61, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x0b, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3b, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } -func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor_24d244abaf643bfe) } - -var fileDescriptor_24d244abaf643bfe = []byte{ - // 209 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28, - 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe8, 0x15, 0x15, 0x24, 0x4b, 0x49, 0x42, 0x15, 0x81, - 0x65, 0x92, 0x4a, 0xd3, 0xf4, 0x13, 0xf3, 0x2a, 0x21, 0xca, 0x94, 0xd2, 0xb8, 0xd8, 0x82, 0xc1, - 0xda, 0x84, 0x84, 0xb8, 0x58, 0x92, 0xf3, 0x53, 0x52, 0x25, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83, - 0xc0, 0x6c, 0x21, 0x09, 0x2e, 0xf6, 0xdc, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x09, 0x26, 0x05, - 0x46, 0x0d, 0xce, 0x20, 0x18, 0x57, 0x48, 0x8f, 0x8b, 0x3d, 0x25, 0xb5, 0x24, 0x31, 0x33, 0xa7, - 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x6a, 0x21, 0xcc, 0x12, 0x3d, 0xc7, - 0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0x38, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x3d, 0x84, 0xa3, 0x9c, - 0xb8, 0x21, 0xf6, 0x06, 0x80, 0x94, 0x07, 0x30, 0x46, 0x99, 0x43, 0xa5, 0xd2, 0xf3, 0x73, 0x12, - 0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3, 0xc0, 0x86, 0xe9, 0x43, 0xa4, 0x12, - 0x0b, 0x32, 0x8b, 0x91, 0xfc, 0x69, 0x0d, 0xa1, 0x16, 0x31, 0x31, 0x07, 0x05, 0x38, 0x27, 0xb1, - 0x81, 0x55, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x53, 0xf0, 0x7c, 0x10, 0x01, 0x00, - 0x00, +var ( + file_google_rpc_status_proto_rawDescOnce sync.Once + file_google_rpc_status_proto_rawDescData = file_google_rpc_status_proto_rawDesc +) + +func file_google_rpc_status_proto_rawDescGZIP() []byte { + file_google_rpc_status_proto_rawDescOnce.Do(func() { + file_google_rpc_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_status_proto_rawDescData) + }) + return file_google_rpc_status_proto_rawDescData +} + +var file_google_rpc_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_rpc_status_proto_goTypes = []interface{}{ + (*Status)(nil), // 0: google.rpc.Status + (*anypb.Any)(nil), // 1: google.protobuf.Any +} +var file_google_rpc_status_proto_depIdxs = []int32{ + 1, // 0: google.rpc.Status.details:type_name -> google.protobuf.Any + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_rpc_status_proto_init() } +func file_google_rpc_status_proto_init() { + if File_google_rpc_status_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_rpc_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Status); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_rpc_status_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_rpc_status_proto_goTypes, + DependencyIndexes: file_google_rpc_status_proto_depIdxs, + MessageInfos: file_google_rpc_status_proto_msgTypes, + }.Build() + File_google_rpc_status_proto = out.File + file_google_rpc_status_proto_rawDesc = nil + file_google_rpc_status_proto_goTypes = nil + file_google_rpc_status_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go b/vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go new file mode 100644 index 000000000..c7bef08aa --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go @@ -0,0 +1,200 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/type/date.proto + +package date + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Represents a whole or partial calendar date, such as a birthday. The time of +// day and time zone are either specified elsewhere or are insignificant. The +// date is relative to the Gregorian Calendar. This can represent one of the +// following: +// +// * A full date, with non-zero year, month, and day values +// * A month and day value, with a zero year, such as an anniversary +// * A year on its own, with zero month and day values +// * A year and month value, with a zero day, such as a credit card expiration +// date +// +// Related types are [google.type.TimeOfDay][google.type.TimeOfDay] and +// `google.protobuf.Timestamp`. +type Date struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Year of the date. Must be from 1 to 9999, or 0 to specify a date without + // a year. + Year int32 `protobuf:"varint,1,opt,name=year,proto3" json:"year,omitempty"` + // Month of a year. Must be from 1 to 12, or 0 to specify a year without a + // month and day. + Month int32 `protobuf:"varint,2,opt,name=month,proto3" json:"month,omitempty"` + // Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 + // to specify a year by itself or a year and month where the day isn't + // significant. + Day int32 `protobuf:"varint,3,opt,name=day,proto3" json:"day,omitempty"` +} + +func (x *Date) Reset() { + *x = Date{} + if protoimpl.UnsafeEnabled { + mi := &file_google_type_date_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Date) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Date) ProtoMessage() {} + +func (x *Date) ProtoReflect() protoreflect.Message { + mi := &file_google_type_date_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Date.ProtoReflect.Descriptor instead. +func (*Date) Descriptor() ([]byte, []int) { + return file_google_type_date_proto_rawDescGZIP(), []int{0} +} + +func (x *Date) GetYear() int32 { + if x != nil { + return x.Year + } + return 0 +} + +func (x *Date) GetMonth() int32 { + if x != nil { + return x.Month + } + return 0 +} + +func (x *Date) GetDay() int32 { + if x != nil { + return x.Day + } + return 0 +} + +var File_google_type_date_proto protoreflect.FileDescriptor + +var file_google_type_date_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x64, 0x61, + 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x22, 0x42, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x79, 0x65, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x79, 0x65, 0x61, + 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x6f, 0x6e, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x6d, 0x6f, 0x6e, 0x74, 0x68, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x61, 0x79, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x64, 0x61, 0x79, 0x42, 0x5d, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x42, 0x09, 0x44, 0x61, + 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x34, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x64, 0x61, 0x74, 0x65, 0x3b, 0x64, 0x61, 0x74, 0x65, 0xf8, + 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x54, 0x50, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_type_date_proto_rawDescOnce sync.Once + file_google_type_date_proto_rawDescData = file_google_type_date_proto_rawDesc +) + +func file_google_type_date_proto_rawDescGZIP() []byte { + file_google_type_date_proto_rawDescOnce.Do(func() { + file_google_type_date_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_type_date_proto_rawDescData) + }) + return file_google_type_date_proto_rawDescData +} + +var file_google_type_date_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_type_date_proto_goTypes = []interface{}{ + (*Date)(nil), // 0: google.type.Date +} +var file_google_type_date_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_type_date_proto_init() } +func file_google_type_date_proto_init() { + if File_google_type_date_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_type_date_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Date); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_type_date_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_type_date_proto_goTypes, + DependencyIndexes: file_google_type_date_proto_depIdxs, + MessageInfos: file_google_type_date_proto_msgTypes, + }.Build() + File_google_type_date_proto = out.File + file_google_type_date_proto_rawDesc = nil + file_google_type_date_proto_goTypes = nil + file_google_type_date_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go b/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go new file mode 100644 index 000000000..7d57f34b4 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go @@ -0,0 +1,232 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/type/expr.proto + +package expr + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Represents a textual expression in the Common Expression Language (CEL) +// syntax. CEL is a C-like expression language. The syntax and semantics of CEL +// are documented at https://github.com/google/cel-spec. +// +// Example (Comparison): +// +// title: "Summary size limit" +// description: "Determines if a summary is less than 100 chars" +// expression: "document.summary.size() < 100" +// +// Example (Equality): +// +// title: "Requestor is owner" +// description: "Determines if requestor is the document owner" +// expression: "document.owner == request.auth.claims.email" +// +// Example (Logic): +// +// title: "Public documents" +// description: "Determine whether the document should be publicly visible" +// expression: "document.type != 'private' && document.type != 'internal'" +// +// Example (Data Manipulation): +// +// title: "Notification string" +// description: "Create a notification string with a timestamp." +// expression: "'New message received at ' + string(document.create_time)" +// +// The exact variables and functions that may be referenced within an expression +// are determined by the service that evaluates it. See the service +// documentation for additional information. +type Expr struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Textual representation of an expression in Common Expression Language + // syntax. + Expression string `protobuf:"bytes,1,opt,name=expression,proto3" json:"expression,omitempty"` + // Optional. Title for the expression, i.e. a short string describing + // its purpose. This can be used e.g. in UIs which allow to enter the + // expression. + Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"` + // Optional. Description of the expression. This is a longer text which + // describes the expression, e.g. when hovered over it in a UI. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // Optional. String indicating the location of the expression for error + // reporting, e.g. a file name and a position in the file. + Location string `protobuf:"bytes,4,opt,name=location,proto3" json:"location,omitempty"` +} + +func (x *Expr) Reset() { + *x = Expr{} + if protoimpl.UnsafeEnabled { + mi := &file_google_type_expr_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr) ProtoMessage() {} + +func (x *Expr) ProtoReflect() protoreflect.Message { + mi := &file_google_type_expr_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr.ProtoReflect.Descriptor instead. +func (*Expr) Descriptor() ([]byte, []int) { + return file_google_type_expr_proto_rawDescGZIP(), []int{0} +} + +func (x *Expr) GetExpression() string { + if x != nil { + return x.Expression + } + return "" +} + +func (x *Expr) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *Expr) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Expr) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +var File_google_type_expr_proto protoreflect.FileDescriptor + +var file_google_type_expr_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x65, 0x78, + 0x70, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x22, 0x7a, 0x0a, 0x04, 0x45, 0x78, 0x70, 0x72, 0x12, 0x1e, 0x0a, + 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, + 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x69, + 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x42, 0x5a, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x42, 0x09, 0x45, 0x78, 0x70, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x34, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x65, 0x78, + 0x70, 0x72, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xa2, 0x02, 0x03, 0x47, 0x54, 0x50, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_type_expr_proto_rawDescOnce sync.Once + file_google_type_expr_proto_rawDescData = file_google_type_expr_proto_rawDesc +) + +func file_google_type_expr_proto_rawDescGZIP() []byte { + file_google_type_expr_proto_rawDescOnce.Do(func() { + file_google_type_expr_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_type_expr_proto_rawDescData) + }) + return file_google_type_expr_proto_rawDescData +} + +var file_google_type_expr_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_type_expr_proto_goTypes = []interface{}{ + (*Expr)(nil), // 0: google.type.Expr +} +var file_google_type_expr_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_type_expr_proto_init() } +func file_google_type_expr_proto_init() { + if File_google_type_expr_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_type_expr_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_type_expr_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_type_expr_proto_goTypes, + DependencyIndexes: file_google_type_expr_proto_depIdxs, + MessageInfos: file_google_type_expr_proto_msgTypes, + }.Build() + File_google_type_expr_proto = out.File + file_google_type_expr_proto_rawDesc = nil + file_google_type_expr_proto_goTypes = nil + file_google_type_expr_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml deleted file mode 100644 index f0f723f35..000000000 --- a/vendor/google.golang.org/grpc/.travis.yml +++ /dev/null @@ -1,39 +0,0 @@ -language: go - -matrix: - include: - - go: 1.12.x - env: VET=1 GO111MODULE=on - - go: 1.12.x - env: RACE=1 GO111MODULE=on - - go: 1.12.x - env: RUN386=1 - - go: 1.12.x - env: GRPC_GO_RETRY=on - - go: 1.11.x - env: GO111MODULE=on - - go: 1.10.x - - go: 1.9.x - - go: 1.9.x - env: GAE=1 - -go_import_path: google.golang.org/grpc - -before_install: - - if [[ "${GO111MODULE}" = "on" ]]; then mkdir "${HOME}/go"; export GOPATH="${HOME}/go"; fi - - if [[ -n "${RUN386}" ]]; then export GOARCH=386; fi - - if [[ "${TRAVIS_EVENT_TYPE}" = "cron" && -z "${RUN386}" ]]; then RACE=1; fi - - if [[ "${TRAVIS_EVENT_TYPE}" != "cron" ]]; then export VET_SKIP_PROTO=1; fi - -install: - - try3() { eval "$*" || eval "$*" || eval "$*"; } - - try3 'if [[ "${GO111MODULE}" = "on" ]]; then go mod download; else make testdeps; fi' - - if [[ "${GAE}" = 1 ]]; then source ./install_gae.sh; make testappenginedeps; fi - - if [[ "${VET}" = 1 ]]; then ./vet.sh -install; fi - -script: - - set -e - - if [[ "${VET}" = 1 ]]; then ./vet.sh; fi - - if [[ "${GAE}" = 1 ]]; then make testappengine; exit 0; fi - - if [[ "${RACE}" = 1 ]]; then make testrace; exit 0; fi - - make test diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 4f1567e2f..0854d298e 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -20,6 +20,15 @@ How to get your contributions merged smoothly and quickly. both author's & review's time is wasted. Create more PRs to address different concerns and everyone will be happy. +- If you are searching for features to work on, issues labeled [Status: Help + Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22) + is a great place to start. These issues are well-documented and usually can be + resolved with a single pull request. + +- If you are adding a new file, make sure it has the copyright message template + at the top as a comment. You can copy over the message from an existing file + and update the year. + - The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a @@ -32,14 +41,18 @@ How to get your contributions merged smoothly and quickly. - Provide a good **PR description** as a record of **what** change is being made and **why** it was made. Link to a github issue if it exists. -- Don't fix code style and formatting unless you are already changing that line - to address an issue. PRs with irrelevant changes won't be merged. If you do - want to fix formatting or style, do that in a separate PR. +- If you want to fix formatting or style, consider whether your changes are an + obvious improvement or might be considered a personal preference. If a style + change is based on preference, it likely will not be accepted. If it corrects + widely agreed-upon anti-patterns, then please do create a PR and explain the + benefits of the change. - Unless your PR is trivial, you should expect there will be reviewer comments - that you'll need to address before merging. We expect you to be reasonably - responsive to those comments, otherwise the PR will be closed after 2-3 weeks - of inactivity. + that you'll need to address before merging. We'll mark it as `Status: Requires + Reporter Clarification` if we expect you to respond to these comments in a + timely manner. If the PR remains inactive for 6 days, it will be marked as + `stale` and automatically close 7 days after that if we don't hear back from + you. - Maintain **clean commit history** and use **meaningful commit messages**. PRs with messy commit history are difficult to review and won't be merged. Use @@ -53,10 +66,8 @@ How to get your contributions merged smoothly and quickly. - **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. - - `make all` to test everything, OR - - `make vet` to catch vet errors - - `make test` to run the tests - - `make testrace` to run tests in race mode - - optional `make testappengine` to run tests with appengine + - `./scripts/vet.sh` to catch vet errors + - `go test -cpu 1,4 -timeout 7m ./...` to run the tests + - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode - Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md index 093c82b3a..5d4096d46 100644 --- a/vendor/google.golang.org/grpc/MAINTAINERS.md +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -8,20 +8,29 @@ See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIB for general contribution guidelines. ## Maintainers (in alphabetical order) -- [canguler](https://github.com/canguler), Google LLC -- [cesarghali](https://github.com/cesarghali), Google LLC + +- [aranjans](https://github.com/aranjans), Google LLC +- [arjan-bal](https://github.com/arjan-bal), Google LLC +- [arvindbr8](https://github.com/arvindbr8), Google LLC +- [atollena](https://github.com/atollena), Datadog, Inc. - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC -- [jadekler](https://github.com/jadekler), Google LLC -- [menghanl](https://github.com/menghanl), Google LLC -- [srini100](https://github.com/srini100), Google LLC +- [erm-g](https://github.com/erm-g), Google LLC +- [gtcooke94](https://github.com/gtcooke94), Google LLC +- [purnesh42h](https://github.com/purnesh42h), Google LLC +- [zasweq](https://github.com/zasweq), Google LLC ## Emeritus Maintainers (in alphabetical order) -- [adelez](https://github.com/adelez), Google LLC -- [iamqizhao](https://github.com/iamqizhao), Google LLC -- [jtattermusch](https://github.com/jtattermusch), Google LLC -- [lyuxuan](https://github.com/lyuxuan), Google LLC -- [makmukhi](https://github.com/makmukhi), Google LLC -- [matt-kwong](https://github.com/matt-kwong), Google LLC -- [nicolasnoble](https://github.com/nicolasnoble), Google LLC -- [yongni](https://github.com/yongni), Google LLC +- [adelez](https://github.com/adelez) +- [canguler](https://github.com/canguler) +- [cesarghali](https://github.com/cesarghali) +- [iamqizhao](https://github.com/iamqizhao) +- [jeanbza](https://github.com/jeanbza) +- [jtattermusch](https://github.com/jtattermusch) +- [lyuxuan](https://github.com/lyuxuan) +- [makmukhi](https://github.com/makmukhi) +- [matt-kwong](https://github.com/matt-kwong) +- [menghanl](https://github.com/menghanl) +- [nicolasnoble](https://github.com/nicolasnoble) +- [srini100](https://github.com/srini100) +- [yongni](https://github.com/yongni) diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile index db982aabd..be38384ff 100644 --- a/vendor/google.golang.org/grpc/Makefile +++ b/vendor/google.golang.org/grpc/Makefile @@ -1,13 +1,13 @@ all: vet test testrace -build: deps +build: go build google.golang.org/grpc/... clean: go clean -i google.golang.org/grpc/... deps: - go get -d -v google.golang.org/grpc/... + GO111MODULE=on go get -d -v google.golang.org/grpc/... proto: @ if ! which protoc > /dev/null; then \ @@ -16,32 +16,24 @@ proto: fi go generate google.golang.org/grpc/... -test: testdeps +test: go test -cpu 1,4 -timeout 7m google.golang.org/grpc/... -testappengine: testappenginedeps - goapp test -cpu 1,4 -timeout 7m google.golang.org/grpc/... +testsubmodule: + cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/... + cd security/authorization && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/authorization/... -testappenginedeps: - goapp get -d -v -t -tags 'appengine appenginevm' google.golang.org/grpc/... - -testdeps: - go get -d -v -t google.golang.org/grpc/... - -testrace: testdeps +testrace: go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/... -updatedeps: - go get -d -v -u -f google.golang.org/grpc/... - -updatetestdeps: - go get -d -v -t -u -f google.golang.org/grpc/... +testdeps: + GO111MODULE=on go get -d -v -t google.golang.org/grpc/... vet: vetdeps - ./vet.sh + ./scripts/vet.sh vetdeps: - ./vet.sh -install + ./scripts/vet.sh -install .PHONY: \ all \ @@ -50,11 +42,8 @@ vetdeps: deps \ proto \ test \ - testappengine \ - testappenginedeps \ - testdeps \ + testsubmodule \ testrace \ - updatedeps \ - updatetestdeps \ + testdeps \ vet \ vetdeps diff --git a/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/google.golang.org/grpc/NOTICE.txt similarity index 93% rename from vendor/github.com/jmespath/go-jmespath/LICENSE rename to vendor/google.golang.org/grpc/NOTICE.txt index b03310a91..530197749 100644 --- a/vendor/github.com/jmespath/go-jmespath/LICENSE +++ b/vendor/google.golang.org/grpc/NOTICE.txt @@ -1,4 +1,4 @@ -Copyright 2015 James Saryerwinnie +Copyright 2014 gRPC authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index afbc43db5..b572707c6 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -1,64 +1,46 @@ # gRPC-Go -[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) -[![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) +[![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API] [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) +[![codecov](https://codecov.io/gh/grpc/grpc-go/graph/badge.svg)](https://codecov.io/gh/grpc/grpc-go) -The Go implementation of [gRPC](https://grpc.io/): A high performance, open -source, general RPC framework that puts mobile and HTTP/2 first. For more -information see the [gRPC Quick Start: -Go](https://grpc.io/docs/quickstart/go.html) guide. +The [Go][] implementation of [gRPC][]: A high performance, open source, general +RPC framework that puts mobile and HTTP/2 first. For more information see the +[Go gRPC docs][], or jump directly into the [quick start][]. -Installation ------------- +## Prerequisites -To install this package, you need to install Go and setup your Go workspace on -your computer. The simplest way to install the library is to run: +- **[Go][]**: any one of the **two latest major** [releases][go-releases]. -``` -$ go get -u google.golang.org/grpc -``` +## Installation -With Go module support (Go 1.11+), simply `import "google.golang.org/grpc"` in -your source code and `go [build|run|test]` will automatically download the -necessary dependencies ([Go modules -ref](https://github.com/golang/go/wiki/Modules)). +Simply add the following import to your code, and then `go [build|run|test]` +will automatically fetch the necessary dependencies: -If you are trying to access grpc-go from within China, please see the -[FAQ](#FAQ) below. -Prerequisites -------------- -gRPC-Go requires Go 1.9 or later. +```go +import "google.golang.org/grpc" +``` -Documentation -------------- -- See [godoc](https://godoc.org/google.golang.org/grpc) for package and API - descriptions. -- Documentation on specific topics can be found in the [Documentation - directory](Documentation/). -- Examples can be found in the [examples directory](examples/). +> **Note:** If you are trying to access `grpc-go` from **China**, see the +> [FAQ](#FAQ) below. -Performance ------------ -Performance benchmark data for grpc-go and other languages is maintained in -[this -dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696). +## Learn more -Status ------- -General Availability [Google Cloud Platform Launch -Stages](https://cloud.google.com/terms/launch-stages). +- [Go gRPC docs][], which include a [quick start][] and [API + reference][API] among other resources +- [Low-level technical docs](Documentation) from this repository +- [Performance benchmark][] +- [Examples](examples) -FAQ ---- +## FAQ -#### I/O Timeout Errors +### I/O Timeout Errors -The `golang.org` domain may be blocked from some countries. `go get` usually +The `golang.org` domain may be blocked from some countries. `go get` usually produces an error like the following when this happens: -``` +```console $ go get -u google.golang.org/grpc package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout) ``` @@ -67,19 +49,10 @@ To build Go code, there are several options: - Set up a VPN and access google.golang.org through that. -- Without Go module support: `git clone` the repo manually: - - ``` - git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc - ``` - - You will need to do the same for all of grpc's dependencies in `golang.org`, - e.g. `golang.org/x/net`. - - With Go module support: it is possible to use the `replace` feature of `go mod` to create aliases for golang.org packages. In your project's directory: - ``` + ```sh go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest go mod tidy go mod vendor @@ -87,35 +60,48 @@ To build Go code, there are several options: ``` Again, this will need to be done for all transitive dependencies hosted on - golang.org as well. Please refer to [this - issue](https://github.com/golang/go/issues/28652) in the golang repo regarding - this concern. + golang.org as well. For details, refer to [golang/go issue + #28652](https://github.com/golang/go/issues/28652). -#### Compiling error, undefined: grpc.SupportPackageIsVersion +### Compiling error, undefined: grpc.SupportPackageIsVersion -Please update proto package, gRPC package and rebuild the proto files: - - `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}` - - `go get -u google.golang.org/grpc` - - `protoc --go_out=plugins=grpc:. *.proto` +Please update to the latest version of gRPC-Go using +`go get google.golang.org/grpc`. -#### How to turn on logging +### How to turn on logging -The default logger is controlled by the environment variables. Turn everything -on by setting: +The default logger is controlled by environment variables. Turn everything on +like this: -``` -GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info +```console +$ export GRPC_GO_LOG_VERBOSITY_LEVEL=99 +$ export GRPC_GO_LOG_SEVERITY_LEVEL=info ``` -#### The RPC failed with error `"code = Unavailable desc = transport is closing"` +### The RPC failed with error `"code = Unavailable desc = transport is closing"` This error means the connection the RPC is using was closed, and there are many possible reasons, including: 1. mis-configured transport credentials, connection failed on handshaking 1. bytes disrupted, possibly by a proxy in between 1. server shutdown + 1. Keepalive parameters caused connection shutdown, for example if you have + configured your server to terminate connections regularly to [trigger DNS + lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). + If this is the case, you may want to increase your + [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), + to allow longer RPC calls to finish. It can be tricky to debug this because the error happens on the client side but the root cause of the connection being closed is on the server side. Turn on logging on __both client and server__, and see if there are any transport errors. + +[API]: https://pkg.go.dev/google.golang.org/grpc +[Go]: https://golang.org +[Go module]: https://github.com/golang/go/wiki/Modules +[gRPC]: https://grpc.io +[Go gRPC docs]: https://grpc.io/docs/languages/go +[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5180705743044608 +[quick start]: https://grpc.io/docs/languages/go/quickstart +[go-releases]: https://golang.org/doc/devel/release.html diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md new file mode 100644 index 000000000..abab27937 --- /dev/null +++ b/vendor/google.golang.org/grpc/SECURITY.md @@ -0,0 +1,3 @@ +# Security Policy + +For information on gRPC Security Policy and reporting potential security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go new file mode 100644 index 000000000..52d530d7a --- /dev/null +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -0,0 +1,141 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package attributes defines a generic key/value store used in various gRPC +// components. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package attributes + +import ( + "fmt" + "strings" +) + +// Attributes is an immutable struct for storing and retrieving generic +// key/value pairs. Keys must be hashable, and users should define their own +// types for keys. Values should not be modified after they are added to an +// Attributes or if they were received from one. If values implement 'Equal(o +// any) bool', it will be called by (*Attributes).Equal to determine whether +// two values with the same key should be considered equal. +type Attributes struct { + m map[any]any +} + +// New returns a new Attributes containing the key/value pair. +func New(key, value any) *Attributes { + return &Attributes{m: map[any]any{key: value}} +} + +// WithValue returns a new Attributes containing the previous keys and values +// and the new key/value pair. If the same key appears multiple times, the +// last value overwrites all previous values for that key. To remove an +// existing key, use a nil value. value should not be modified later. +func (a *Attributes) WithValue(key, value any) *Attributes { + if a == nil { + return New(key, value) + } + n := &Attributes{m: make(map[any]any, len(a.m)+1)} + for k, v := range a.m { + n.m[k] = v + } + n.m[key] = value + return n +} + +// Value returns the value associated with these attributes for key, or nil if +// no value is associated with key. The returned value should not be modified. +func (a *Attributes) Value(key any) any { + if a == nil { + return nil + } + return a.m[key] +} + +// Equal returns whether a and o are equivalent. If 'Equal(o any) bool' is +// implemented for a value in the attributes, it is called to determine if the +// value matches the one stored in the other attributes. If Equal is not +// implemented, standard equality is used to determine if the two values are +// equal. Note that some types (e.g. maps) aren't comparable by default, so +// they must be wrapped in a struct, or in an alias type, with Equal defined. +func (a *Attributes) Equal(o *Attributes) bool { + if a == nil && o == nil { + return true + } + if a == nil || o == nil { + return false + } + if len(a.m) != len(o.m) { + return false + } + for k, v := range a.m { + ov, ok := o.m[k] + if !ok { + // o missing element of a + return false + } + if eq, ok := v.(interface{ Equal(o any) bool }); ok { + if !eq.Equal(ov) { + return false + } + } else if v != ov { + // Fallback to a standard equality check if Value is unimplemented. + return false + } + } + return true +} + +// String prints the attribute map. If any key or values throughout the map +// implement fmt.Stringer, it calls that method and appends. +func (a *Attributes) String() string { + var sb strings.Builder + sb.WriteString("{") + first := true + for k, v := range a.m { + if !first { + sb.WriteString(", ") + } + sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v))) + first = false + } + sb.WriteString("}") + return sb.String() +} + +func str(x any) (s string) { + if v, ok := x.(fmt.Stringer); ok { + return fmt.Sprint(v) + } else if v, ok := x.(string); ok { + return v + } + return fmt.Sprintf("<%p>", x) +} + +// MarshalJSON helps implement the json.Marshaler interface, thereby rendering +// the Attributes correctly when printing (via pretty.JSON) structs containing +// Attributes as fields. +// +// Is it impossible to unmarshal attributes from a JSON representation and this +// method is meant only for debugging purposes. +func (a *Attributes) MarshalJSON() ([]byte, error) { + return []byte(a.String()), nil +} diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go index 97c6e2568..29475e31c 100644 --- a/vendor/google.golang.org/grpc/backoff.go +++ b/vendor/google.golang.org/grpc/backoff.go @@ -23,16 +23,39 @@ package grpc import ( "time" + + "google.golang.org/grpc/backoff" ) // DefaultBackoffConfig uses values specified for backoff in // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. var DefaultBackoffConfig = BackoffConfig{ MaxDelay: 120 * time.Second, } // BackoffConfig defines the parameters for the default gRPC backoff strategy. +// +// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. type BackoffConfig struct { // MaxDelay is the upper bound of backoff delay. MaxDelay time.Duration } + +// ConnectParams defines the parameters for connecting and retrying. Users are +// encouraged to use this instead of the BackoffConfig type defined above. See +// here for more details: +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ConnectParams struct { + // Backoff specifies the configuration options for connection backoff. + Backoff backoff.Config + // MinConnectTimeout is the minimum amount of time we are willing to give a + // connection to complete. + MinConnectTimeout time.Duration +} diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go new file mode 100644 index 000000000..d7b40b7cb --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff/backoff.go @@ -0,0 +1,52 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff provides configuration options for backoff. +// +// More details can be found at: +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// All APIs in this package are experimental. +package backoff + +import "time" + +// Config defines the configuration options for backoff. +type Config struct { + // BaseDelay is the amount of time to backoff after the first failure. + BaseDelay time.Duration + // Multiplier is the factor with which to multiply backoffs after a + // failed retry. Should ideally be greater than 1. + Multiplier float64 + // Jitter is the factor with which backoffs are randomized. + Jitter float64 + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} + +// DefaultConfig is a backoff configuration with the default values specified +// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// This should be useful for callers who want to configure backoff with +// non-default values only for a subset of the options. +var DefaultConfig = Config{ + BaseDelay: 1.0 * time.Second, + Multiplier: 1.6, + Jitter: 0.2, + MaxDelay: 120 * time.Second, +} diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go deleted file mode 100644 index a8eb0f476..000000000 --- a/vendor/google.golang.org/grpc/balancer.go +++ /dev/null @@ -1,391 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "net" - "sync" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/naming" - "google.golang.org/grpc/status" -) - -// Address represents a server the client connects to. -// -// Deprecated: please use package balancer. -type Address struct { - // Addr is the server address on which a connection will be established. - Addr string - // Metadata is the information associated with Addr, which may be used - // to make load balancing decision. - Metadata interface{} -} - -// BalancerConfig specifies the configurations for Balancer. -// -// Deprecated: please use package balancer. May be removed in a future 1.x release. -type BalancerConfig struct { - // DialCreds is the transport credential the Balancer implementation can - // use to dial to a remote load balancer server. The Balancer implementations - // can ignore this if it does not need to talk to another party securely. - DialCreds credentials.TransportCredentials - // Dialer is the custom dialer the Balancer implementation can use to dial - // to a remote load balancer server. The Balancer implementations - // can ignore this if it doesn't need to talk to remote balancer. - Dialer func(context.Context, string) (net.Conn, error) -} - -// BalancerGetOptions configures a Get call. -// -// Deprecated: please use package balancer. May be removed in a future 1.x release. -type BalancerGetOptions struct { - // BlockingWait specifies whether Get should block when there is no - // connected address. - BlockingWait bool -} - -// Balancer chooses network addresses for RPCs. -// -// Deprecated: please use package balancer. May be removed in a future 1.x release. -type Balancer interface { - // Start does the initialization work to bootstrap a Balancer. For example, - // this function may start the name resolution and watch the updates. It will - // be called when dialing. - Start(target string, config BalancerConfig) error - // Up informs the Balancer that gRPC has a connection to the server at - // addr. It returns down which is called once the connection to addr gets - // lost or closed. - // TODO: It is not clear how to construct and take advantage of the meaningful error - // parameter for down. Need realistic demands to guide. - Up(addr Address) (down func(error)) - // Get gets the address of a server for the RPC corresponding to ctx. - // i) If it returns a connected address, gRPC internals issues the RPC on the - // connection to this address; - // ii) If it returns an address on which the connection is under construction - // (initiated by Notify(...)) but not connected, gRPC internals - // * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or - // Shutdown state; - // or - // * issues RPC on the connection otherwise. - // iii) If it returns an address on which the connection does not exist, gRPC - // internals treats it as an error and will fail the corresponding RPC. - // - // Therefore, the following is the recommended rule when writing a custom Balancer. - // If opts.BlockingWait is true, it should return a connected address or - // block if there is no connected address. It should respect the timeout or - // cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast - // RPCs), it should return an address it has notified via Notify(...) immediately - // instead of blocking. - // - // The function returns put which is called once the rpc has completed or failed. - // put can collect and report RPC stats to a remote load balancer. - // - // This function should only return the errors Balancer cannot recover by itself. - // gRPC internals will fail the RPC if an error is returned. - Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) - // Notify returns a channel that is used by gRPC internals to watch the addresses - // gRPC needs to connect. The addresses might be from a name resolver or remote - // load balancer. gRPC internals will compare it with the existing connected - // addresses. If the address Balancer notified is not in the existing connected - // addresses, gRPC starts to connect the address. If an address in the existing - // connected addresses is not in the notification list, the corresponding connection - // is shutdown gracefully. Otherwise, there are no operations to take. Note that - // the Address slice must be the full list of the Addresses which should be connected. - // It is NOT delta. - Notify() <-chan []Address - // Close shuts down the balancer. - Close() error -} - -// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch -// the name resolution updates and updates the addresses available correspondingly. -// -// Deprecated: please use package balancer/roundrobin. May be removed in a future 1.x release. -func RoundRobin(r naming.Resolver) Balancer { - return &roundRobin{r: r} -} - -type addrInfo struct { - addr Address - connected bool -} - -type roundRobin struct { - r naming.Resolver - w naming.Watcher - addrs []*addrInfo // all the addresses the client should potentially connect - mu sync.Mutex - addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to. - next int // index of the next address to return for Get() - waitCh chan struct{} // the channel to block when there is no connected address available - done bool // The Balancer is closed. -} - -func (rr *roundRobin) watchAddrUpdates() error { - updates, err := rr.w.Next() - if err != nil { - grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err) - return err - } - rr.mu.Lock() - defer rr.mu.Unlock() - for _, update := range updates { - addr := Address{ - Addr: update.Addr, - Metadata: update.Metadata, - } - switch update.Op { - case naming.Add: - var exist bool - for _, v := range rr.addrs { - if addr == v.addr { - exist = true - grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr) - break - } - } - if exist { - continue - } - rr.addrs = append(rr.addrs, &addrInfo{addr: addr}) - case naming.Delete: - for i, v := range rr.addrs { - if addr == v.addr { - copy(rr.addrs[i:], rr.addrs[i+1:]) - rr.addrs = rr.addrs[:len(rr.addrs)-1] - break - } - } - default: - grpclog.Errorln("Unknown update.Op ", update.Op) - } - } - // Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified. - open := make([]Address, len(rr.addrs)) - for i, v := range rr.addrs { - open[i] = v.addr - } - if rr.done { - return ErrClientConnClosing - } - select { - case <-rr.addrCh: - default: - } - rr.addrCh <- open - return nil -} - -func (rr *roundRobin) Start(target string, config BalancerConfig) error { - rr.mu.Lock() - defer rr.mu.Unlock() - if rr.done { - return ErrClientConnClosing - } - if rr.r == nil { - // If there is no name resolver installed, it is not needed to - // do name resolution. In this case, target is added into rr.addrs - // as the only address available and rr.addrCh stays nil. - rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}}) - return nil - } - w, err := rr.r.Resolve(target) - if err != nil { - return err - } - rr.w = w - rr.addrCh = make(chan []Address, 1) - go func() { - for { - if err := rr.watchAddrUpdates(); err != nil { - return - } - } - }() - return nil -} - -// Up sets the connected state of addr and sends notification if there are pending -// Get() calls. -func (rr *roundRobin) Up(addr Address) func(error) { - rr.mu.Lock() - defer rr.mu.Unlock() - var cnt int - for _, a := range rr.addrs { - if a.addr == addr { - if a.connected { - return nil - } - a.connected = true - } - if a.connected { - cnt++ - } - } - // addr is only one which is connected. Notify the Get() callers who are blocking. - if cnt == 1 && rr.waitCh != nil { - close(rr.waitCh) - rr.waitCh = nil - } - return func(err error) { - rr.down(addr, err) - } -} - -// down unsets the connected state of addr. -func (rr *roundRobin) down(addr Address, err error) { - rr.mu.Lock() - defer rr.mu.Unlock() - for _, a := range rr.addrs { - if addr == a.addr { - a.connected = false - break - } - } -} - -// Get returns the next addr in the rotation. -func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { - var ch chan struct{} - rr.mu.Lock() - if rr.done { - rr.mu.Unlock() - err = ErrClientConnClosing - return - } - - if len(rr.addrs) > 0 { - if rr.next >= len(rr.addrs) { - rr.next = 0 - } - next := rr.next - for { - a := rr.addrs[next] - next = (next + 1) % len(rr.addrs) - if a.connected { - addr = a.addr - rr.next = next - rr.mu.Unlock() - return - } - if next == rr.next { - // Has iterated all the possible address but none is connected. - break - } - } - } - if !opts.BlockingWait { - if len(rr.addrs) == 0 { - rr.mu.Unlock() - err = status.Errorf(codes.Unavailable, "there is no address available") - return - } - // Returns the next addr on rr.addrs for failfast RPCs. - addr = rr.addrs[rr.next].addr - rr.next++ - rr.mu.Unlock() - return - } - // Wait on rr.waitCh for non-failfast RPCs. - if rr.waitCh == nil { - ch = make(chan struct{}) - rr.waitCh = ch - } else { - ch = rr.waitCh - } - rr.mu.Unlock() - for { - select { - case <-ctx.Done(): - err = ctx.Err() - return - case <-ch: - rr.mu.Lock() - if rr.done { - rr.mu.Unlock() - err = ErrClientConnClosing - return - } - - if len(rr.addrs) > 0 { - if rr.next >= len(rr.addrs) { - rr.next = 0 - } - next := rr.next - for { - a := rr.addrs[next] - next = (next + 1) % len(rr.addrs) - if a.connected { - addr = a.addr - rr.next = next - rr.mu.Unlock() - return - } - if next == rr.next { - // Has iterated all the possible address but none is connected. - break - } - } - } - // The newly added addr got removed by Down() again. - if rr.waitCh == nil { - ch = make(chan struct{}) - rr.waitCh = ch - } else { - ch = rr.waitCh - } - rr.mu.Unlock() - } - } -} - -func (rr *roundRobin) Notify() <-chan []Address { - return rr.addrCh -} - -func (rr *roundRobin) Close() error { - rr.mu.Lock() - defer rr.mu.Unlock() - if rr.done { - return errBalancerClosed - } - rr.done = true - if rr.w != nil { - rr.w.Close() - } - if rr.waitCh != nil { - close(rr.waitCh) - rr.waitCh = nil - } - if rr.addrCh != nil { - close(rr.addrCh) - } - return nil -} - -// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn. -// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get() -// returns the only address Up by resetTransport(). -type pickFirst struct { - *roundRobin -} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index c266f4ec1..b181f386a 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -27,8 +27,11 @@ import ( "net" "strings" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + estats "google.golang.org/grpc/experimental/stats" + "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" @@ -38,6 +41,8 @@ import ( var ( // m is a map from name to balancer builder. m = make(map[string]Builder) + + logger = grpclog.Component("balancer") ) // Register registers the balancer builder to the balancer map. b.Name @@ -50,7 +55,14 @@ var ( // an init() function), and is not thread-safe. If multiple Balancers are // registered with the same name, the one registered last will take effect. func Register(b Builder) { - m[strings.ToLower(b.Name())] = b + name := strings.ToLower(b.Name()) + if name != b.Name() { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name()) + } + m[name] = b } // unregisterForTesting deletes the balancer with the given name from the @@ -61,38 +73,59 @@ func unregisterForTesting(name string) { delete(m, name) } +// connectedAddress returns the connected address for a SubConnState. The +// address is only valid if the state is READY. +func connectedAddress(scs SubConnState) resolver.Address { + return scs.connectedAddress +} + +// setConnectedAddress sets the connected address for a SubConnState. +func setConnectedAddress(scs *SubConnState, addr resolver.Address) { + scs.connectedAddress = addr +} + func init() { internal.BalancerUnregister = unregisterForTesting + internal.ConnectedAddress = connectedAddress + internal.SetConnectedAddress = setConnectedAddress } // Get returns the resolver builder registered with the given name. // Note that the compare is done in a case-insensitive fashion. // If no builder is register with the name, nil will be returned. func Get(name string) Builder { + if strings.ToLower(name) != name { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon", name) + } if b, ok := m[strings.ToLower(name)]; ok { return b } return nil } -// SubConn represents a gRPC sub connection. -// Each sub connection contains a list of addresses. gRPC will -// try to connect to them (in sequence), and stop trying the -// remainder once one connection is successful. +// A SubConn represents a single connection to a gRPC backend service. // -// The reconnect backoff will be applied on the list, not a single address. -// For example, try_on_all_addresses -> backoff -> try_on_all_addresses. +// Each SubConn contains a list of addresses. // -// All SubConns start in IDLE, and will not try to connect. To trigger -// the connecting, Balancers must call Connect. -// When the connection encounters an error, it will reconnect immediately. -// When the connection becomes IDLE, it will not reconnect unless Connect is -// called. +// All SubConns start in IDLE, and will not try to connect. To trigger the +// connecting, Balancers must call Connect. If a connection re-enters IDLE, +// Balancers must call Connect again to trigger a new connection attempt. // -// This interface is to be implemented by gRPC. Users should not need a -// brand new implementation of this interface. For the situations like -// testing, the new implementation should embed this interface. This allows -// gRPC to add new methods to this interface. +// gRPC will try to connect to the addresses in sequence, and stop trying the +// remainder once the first connection is successful. If an attempt to connect +// to all addresses encounters an error, the SubConn will enter +// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE. +// +// Once established, if a connection is lost, the SubConn will transition +// directly to IDLE. +// +// This interface is to be implemented by gRPC. Users should not need their own +// implementation of this interface. For situations like testing, any +// implementations should embed this interface. This allows gRPC to add new +// methods to this interface. type SubConn interface { // UpdateAddresses updates the addresses used in this SubConn. // gRPC checks if currently-connected address is still in the new list. @@ -101,9 +134,24 @@ type SubConn interface { // a new connection will be created. // // This will trigger a state transition for the SubConn. + // + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. UpdateAddresses([]resolver.Address) // Connect starts the connecting for this SubConn. Connect() + // GetOrBuildProducer returns a reference to the existing Producer for this + // ProducerBuilder in this SubConn, or, if one does not currently exist, + // creates a new one and returns it. Returns a close function which must + // be called when the Producer is no longer needed. + GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) + // Shutdown shuts down the SubConn gracefully. Any started RPCs will be + // allowed to complete. No future calls should be made on the SubConn. + // One final state update will be delivered to the StateListener (or + // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to + // indicate the shutdown operation. This may be delivered before + // in-progress RPCs are complete and the actual connection is closed. + Shutdown() } // NewSubConnOptions contains options to create new SubConn. @@ -111,10 +159,27 @@ type NewSubConnOptions struct { // CredsBundle is the credentials bundle that will be used in the created // SubConn. If it's nil, the original creds from grpc DialOptions will be // used. + // + // Deprecated: Use the Attributes field in resolver.Address to pass + // arbitrary data to the credential handshaker. CredsBundle credentials.Bundle // HealthCheckEnabled indicates whether health check service should be // enabled on this SubConn HealthCheckEnabled bool + // StateListener is called when the state of the subconn changes. If nil, + // Balancer.UpdateSubConnState will be called instead. Will never be + // invoked until after Connect() is called on the SubConn created with + // these options. + StateListener func(SubConnState) +} + +// State contains the balancer's state relevant to the gRPC ClientConn. +type State struct { + // State contains the connectivity state of the balancer, which is used to + // determine the state of the ClientConn. + ConnectivityState connectivity.State + // Picker is used to choose connections (SubConns) for RPCs. + Picker Picker } // ClientConn represents a gRPC ClientConn. @@ -127,20 +192,35 @@ type ClientConn interface { // NewSubConn is called by balancer to create a new SubConn. // It doesn't block and wait for the connections to be established. // Behaviors of the SubConn can be controlled by options. + // + // Deprecated: please be aware that in a future version, SubConns will only + // support one address per SubConn. NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) // RemoveSubConn removes the SubConn from ClientConn. // The SubConn will be shutdown. + // + // Deprecated: use SubConn.Shutdown instead. RemoveSubConn(SubConn) + // UpdateAddresses updates the addresses used in the passed in SubConn. + // gRPC checks if the currently connected address is still in the new list. + // If so, the connection will be kept. Else, the connection will be + // gracefully closed, and a new connection will be created. + // + // This may trigger a state transition for the SubConn. + // + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. + UpdateAddresses(SubConn, []resolver.Address) - // UpdateBalancerState is called by balancer to notify gRPC that some internal - // state in balancer has changed. + // UpdateState notifies gRPC that the balancer's internal state has + // changed. // - // gRPC will update the connectivity state of the ClientConn, and will call pick - // on the new picker to pick new SubConn. - UpdateBalancerState(s connectivity.State, p Picker) + // gRPC will update the connectivity state of the ClientConn, and will call + // Pick on the new Picker to pick new SubConns. + UpdateState(State) // ResolveNow is called by balancer to notify gRPC to do a name resolving. - ResolveNow(resolver.ResolveNowOption) + ResolveNow(resolver.ResolveNowOptions) // Target returns the dial target for this ClientConn. // @@ -150,22 +230,37 @@ type ClientConn interface { // BuildOptions contains additional information for Build. type BuildOptions struct { - // DialCreds is the transport credential the Balancer implementation can - // use to dial to a remote load balancer server. The Balancer implementations - // can ignore this if it does not need to talk to another party securely. + // DialCreds is the transport credentials to use when communicating with a + // remote load balancer server. Balancer implementations which do not + // communicate with a remote load balancer server can ignore this field. DialCreds credentials.TransportCredentials - // CredsBundle is the credentials bundle that the Balancer can use. + // CredsBundle is the credentials bundle to use when communicating with a + // remote load balancer server. Balancer implementations which do not + // communicate with a remote load balancer server can ignore this field. CredsBundle credentials.Bundle - // Dialer is the custom dialer the Balancer implementation can use to dial - // to a remote load balancer server. The Balancer implementations - // can ignore this if it doesn't need to talk to remote balancer. + // Dialer is the custom dialer to use when communicating with a remote load + // balancer server. Balancer implementations which do not communicate with a + // remote load balancer server can ignore this field. Dialer func(context.Context, string) (net.Conn, error) - // ChannelzParentID is the entity parent's channelz unique identification number. - ChannelzParentID int64 - // Target contains the parsed address info of the dial target. It is the same resolver.Target as - // passed to the resolver. - // See the documentation for the resolver.Target type for details about what it contains. + // Authority is the server name to use as part of the authentication + // handshake when communicating with a remote load balancer server. Balancer + // implementations which do not communicate with a remote load balancer + // server can ignore this field. + Authority string + // ChannelzParent is the parent ClientConn's channelz channel. + ChannelzParent channelz.Identifier + // CustomUserAgent is the custom user agent set on the parent ClientConn. + // The balancer should set the same custom user agent if it creates a + // ClientConn. + CustomUserAgent string + // Target contains the parsed address info of the dial target. It is the + // same resolver.Target as passed to the resolver. See the documentation for + // the resolver.Target type for details about what it contains. Target resolver.Target + // MetricsRecorder is the metrics recorder that balancers can use to record + // metrics. Balancer implementations which do not register metrics on + // metrics registry and record on them can ignore this field. + MetricsRecorder estats.MetricsRecorder } // Builder creates a balancer. @@ -185,11 +280,14 @@ type ConfigParser interface { ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error) } -// PickOptions contains addition information for the Pick operation. -type PickOptions struct { +// PickInfo contains additional information for the Pick operation. +type PickInfo struct { // FullMethodName is the method name that NewClientStream() is called // with. The canonical format is /service/Method. FullMethodName string + // Ctx is the RPC's context, and may contain relevant RPC-level information + // like the outgoing header metadata. + Ctx context.Context } // DoneInfo contains additional information for done. @@ -205,56 +303,79 @@ type DoneInfo struct { // ServerLoad is the load received from server. It's usually sent as part of // trailing metadata. // - // The only supported type now is *orca_v1.LoadReport. - ServerLoad interface{} + // The only supported type now is *orca_v3.LoadReport. + ServerLoad any } var ( // ErrNoSubConnAvailable indicates no SubConn is available for pick(). - // gRPC will block the RPC until a new picker is available via UpdateBalancerState(). + // gRPC will block the RPC until a new picker is available via UpdateState(). ErrNoSubConnAvailable = errors.New("no SubConn is available") // ErrTransientFailure indicates all SubConns are in TransientFailure. // WaitForReady RPCs will block, non-WaitForReady RPCs will fail. + // + // Deprecated: return an appropriate error based on the last resolution or + // connection attempt instead. The behavior is the same for any non-gRPC + // status error. ErrTransientFailure = errors.New("all SubConns are in TransientFailure") ) +// PickResult contains information related to a connection chosen for an RPC. +type PickResult struct { + // SubConn is the connection to use for this pick, if its state is Ready. + // If the state is not Ready, gRPC will block the RPC until a new Picker is + // provided by the balancer (using ClientConn.UpdateState). The SubConn + // must be one returned by ClientConn.NewSubConn. + SubConn SubConn + + // Done is called when the RPC is completed. If the SubConn is not ready, + // this will be called with a nil parameter. If the SubConn is not a valid + // type, Done may not be called. May be nil if the balancer does not wish + // to be notified when the RPC completes. + Done func(DoneInfo) + + // Metadata provides a way for LB policies to inject arbitrary per-call + // metadata. Any metadata returned here will be merged with existing + // metadata added by the client application. + // + // LB policies with child policies are responsible for propagating metadata + // injected by their children to the ClientConn, as part of Pick(). + Metadata metadata.MD +} + +// TransientFailureError returns e. It exists for backward compatibility and +// will be deleted soon. +// +// Deprecated: no longer necessary, picker errors are treated this way by +// default. +func TransientFailureError(e error) error { return e } + // Picker is used by gRPC to pick a SubConn to send an RPC. // Balancer is expected to generate a new picker from its snapshot every time its // internal state has changed. // -// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState(). +// The pickers used by gRPC can be updated by ClientConn.UpdateState(). type Picker interface { - // Pick returns the SubConn to be used to send the RPC. - // The returned SubConn must be one returned by NewSubConn(). + // Pick returns the connection to use for this RPC and related information. + // + // Pick should not block. If the balancer needs to do I/O or any blocking + // or time-consuming work to service this call, it should return + // ErrNoSubConnAvailable, and the Pick call will be repeated by gRPC when + // the Picker is updated (using ClientConn.UpdateState). // - // This functions is expected to return: - // - a SubConn that is known to be READY; - // - ErrNoSubConnAvailable if no SubConn is available, but progress is being - // made (for example, some SubConn is in CONNECTING mode); - // - other errors if no active connecting is happening (for example, all SubConn - // are in TRANSIENT_FAILURE mode). + // If an error is returned: // - // If a SubConn is returned: - // - If it is READY, gRPC will send the RPC on it; - // - If it is not ready, or becomes not ready after it's returned, gRPC will - // block until UpdateBalancerState() is called and will call pick on the - // new picker. The done function returned from Pick(), if not nil, will be - // called with nil error, no bytes sent and no bytes received. + // - If the error is ErrNoSubConnAvailable, gRPC will block until a new + // Picker is provided by the balancer (using ClientConn.UpdateState). // - // If the returned error is not nil: - // - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState() - // - If the error is ErrTransientFailure: - // - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState() - // is called to pick again; - // - Otherwise, RPC will fail with unavailable error. - // - Else (error is other non-nil error): - // - The RPC will fail with unavailable error. + // - If the error is a status error (implemented by the grpc/status + // package), gRPC will terminate the RPC with the code and message + // provided. // - // The returned done() function will be called once the rpc has finished, - // with the final status of that RPC. If the SubConn returned is not a - // valid SubConn type, done may not be called. done may be nil if balancer - // doesn't care about the RPC status. - Pick(ctx context.Context, opts PickOptions) (conn SubConn, done func(DoneInfo), err error) + // - For all other errors, wait for ready RPCs will wait, but non-wait for + // ready RPCs will be terminated with this error's Error() string and + // status code Unavailable. + Pick(info PickInfo) (PickResult, error) } // Balancer takes input from gRPC, manages SubConns, and collects and aggregates @@ -262,38 +383,54 @@ type Picker interface { // // It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. // -// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed -// to be called synchronously from the same goroutine. -// There's no guarantee on picker.Pick, it may be called anytime. +// UpdateClientConnState, ResolverError, UpdateSubConnState, and Close are +// guaranteed to be called synchronously from the same goroutine. There's no +// guarantee on picker.Pick, it may be called anytime. type Balancer interface { - // HandleSubConnStateChange is called by gRPC when the connectivity state - // of sc has changed. - // Balancer is expected to aggregate all the state of SubConn and report - // that back to gRPC. - // Balancer should also generate and update Pickers when its internal state has - // been changed by the new state. - // - // Deprecated: if V2Balancer is implemented by the Balancer, - // UpdateSubConnState will be called instead. - HandleSubConnStateChange(sc SubConn, state connectivity.State) - // HandleResolvedAddrs is called by gRPC to send updated resolved addresses to - // balancers. - // Balancer can create new SubConn or remove SubConn with the addresses. - // An empty address slice and a non-nil error will be passed if the resolver returns - // non-nil error to gRPC. + // UpdateClientConnState is called by gRPC when the state of the ClientConn + // changes. If the error returned is ErrBadResolverState, the ClientConn + // will begin calling ResolveNow on the active name resolver with + // exponential backoff until a subsequent call to UpdateClientConnState + // returns a nil error. Any other errors are currently ignored. + UpdateClientConnState(ClientConnState) error + // ResolverError is called by gRPC when the name resolver reports an error. + ResolverError(error) + // UpdateSubConnState is called by gRPC when the state of a SubConn + // changes. // - // Deprecated: if V2Balancer is implemented by the Balancer, - // UpdateClientConnState will be called instead. - HandleResolvedAddrs([]resolver.Address, error) - // Close closes the balancer. The balancer is not required to call - // ClientConn.RemoveSubConn for its existing SubConns. + // Deprecated: Use NewSubConnOptions.StateListener when creating the + // SubConn instead. + UpdateSubConnState(SubConn, SubConnState) + // Close closes the balancer. The balancer is not currently required to + // call SubConn.Shutdown for its existing SubConns; however, this will be + // required in a future release, so it is recommended. Close() } +// ExitIdler is an optional interface for balancers to implement. If +// implemented, ExitIdle will be called when ClientConn.Connect is called, if +// the ClientConn is idle. If unimplemented, ClientConn.Connect will cause +// all SubConns to connect. +// +// Notice: it will be required for all balancers to implement this in a future +// release. +type ExitIdler interface { + // ExitIdle instructs the LB policy to reconnect to backends / exit the + // IDLE state, if appropriate and possible. Note that SubConns that enter + // the IDLE state will not reconnect until SubConn.Connect is called. + ExitIdle() +} + // SubConnState describes the state of a SubConn. type SubConnState struct { + // ConnectivityState is the connectivity state of the SubConn. ConnectivityState connectivity.State - // TODO: add last connection error + // ConnectionError is set if the ConnectivityState is TransientFailure, + // describing the reason the SubConn failed. Otherwise, it is nil. + ConnectionError error + // connectedAddr contains the connected address when ConnectivityState is + // Ready. Otherwise, it is indeterminate. + connectedAddress resolver.Address } // ClientConnState describes the state of a ClientConn relevant to the @@ -305,60 +442,23 @@ type ClientConnState struct { BalancerConfig serviceconfig.LoadBalancingConfig } -// V2Balancer is defined for documentation purposes. If a Balancer also -// implements V2Balancer, its UpdateClientConnState method will be called -// instead of HandleResolvedAddrs and its UpdateSubConnState will be called -// instead of HandleSubConnStateChange. -type V2Balancer interface { - // UpdateClientConnState is called by gRPC when the state of the ClientConn - // changes. - UpdateClientConnState(ClientConnState) - // UpdateSubConnState is called by gRPC when the state of a SubConn - // changes. - UpdateSubConnState(SubConn, SubConnState) - // Close closes the balancer. The balancer is not required to call - // ClientConn.RemoveSubConn for its existing SubConns. - Close() -} +// ErrBadResolverState may be returned by UpdateClientConnState to indicate a +// problem with the provided name resolver data. +var ErrBadResolverState = errors.New("bad resolver state") -// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns -// and returns one aggregated connectivity state. -// -// It's not thread safe. -type ConnectivityStateEvaluator struct { - numReady uint64 // Number of addrConns in ready state. - numConnecting uint64 // Number of addrConns in connecting state. - numTransientFailure uint64 // Number of addrConns in transientFailure. +// A ProducerBuilder is a simple constructor for a Producer. It is used by the +// SubConn to create producers when needed. +type ProducerBuilder interface { + // Build creates a Producer. The first parameter is always a + // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the + // associated SubConn), but is declared as `any` to avoid a dependency + // cycle. Should also return a close function that will be called when all + // references to the Producer have been given up. + Build(grpcClientConnInterface any) (p Producer, close func()) } -// RecordTransition records state change happening in subConn and based on that -// it evaluates what aggregated state should be. -// -// - If at least one SubConn in Ready, the aggregated state is Ready; -// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; -// - Else the aggregated state is TransientFailure. -// -// Idle and Shutdown are not considered. -func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { - // Update counters. - for idx, state := range []connectivity.State{oldState, newState} { - updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. - switch state { - case connectivity.Ready: - cse.numReady += updateVal - case connectivity.Connecting: - cse.numConnecting += updateVal - case connectivity.TransientFailure: - cse.numTransientFailure += updateVal - } - } - - // Evaluate. - if cse.numReady > 0 { - return connectivity.Ready - } - if cse.numConnecting > 0 { - return connectivity.Connecting - } - return connectivity.TransientFailure -} +// A Producer is a type shared among potentially many consumers. It is +// associated with a SubConn, and an implementation will typically contain +// other methods to provide additional functionality, e.g. configuration or +// subscription registration. +type Producer any diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index 1af88f0a3..2b87bd79c 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -19,7 +19,8 @@ package base import ( - "context" + "errors" + "fmt" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" @@ -27,26 +28,30 @@ import ( "google.golang.org/grpc/resolver" ) +var logger = grpclog.Component("balancer") + type baseBuilder struct { name string pickerBuilder PickerBuilder config Config } -func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { - return &baseBalancer{ +func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { + bal := &baseBalancer{ cc: cc, pickerBuilder: bb.pickerBuilder, - subConns: make(map[resolver.Address]balancer.SubConn), + subConns: resolver.NewAddressMap(), scStates: make(map[balancer.SubConn]connectivity.State), csEvltr: &balancer.ConnectivityStateEvaluator{}, - // Initialize picker to a picker that always return - // ErrNoSubConnAvailable, because when state of a SubConn changes, we - // may call UpdateBalancerState with this picker. - picker: NewErrPicker(balancer.ErrNoSubConnAvailable), - config: bb.config, + config: bb.config, + state: connectivity.Connecting, } + // Initialize picker to a picker that always returns + // ErrNoSubConnAvailable, because when state of a SubConn changes, we + // may call UpdateState with this picker. + bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable) + return bal } func (bb *baseBuilder) Name() string { @@ -60,82 +65,147 @@ type baseBalancer struct { csEvltr *balancer.ConnectivityStateEvaluator state connectivity.State - subConns map[resolver.Address]balancer.SubConn + subConns *resolver.AddressMap scStates map[balancer.SubConn]connectivity.State picker balancer.Picker config Config + + resolverErr error // the last error reported by the resolver; cleared on successful resolution + connErr error // the last connection error; cleared upon leaving TransientFailure } -func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { - panic("not implemented") +func (b *baseBalancer) ResolverError(err error) { + b.resolverErr = err + if b.subConns.Len() == 0 { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.state, + Picker: b.picker, + }) } -func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) { - // TODO: handle s.ResolverState.Err (log if not nil) once implemented. +func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { // TODO: handle s.ResolverState.ServiceConfig? - if grpclog.V(2) { - grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s) + if logger.V(2) { + logger.Info("base.baseBalancer: got new ClientConn state: ", s) } + // Successful resolution; clear resolver error and ensure we return nil. + b.resolverErr = nil // addrsSet is the set converted from addrs, it's used for quick lookup of an address. - addrsSet := make(map[resolver.Address]struct{}) + addrsSet := resolver.NewAddressMap() for _, a := range s.ResolverState.Addresses { - addrsSet[a] = struct{}{} - if _, ok := b.subConns[a]; !ok { + addrsSet.Set(a, nil) + if _, ok := b.subConns.Get(a); !ok { // a is a new address (not existing in b.subConns). - sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) + var sc balancer.SubConn + opts := balancer.NewSubConnOptions{ + HealthCheckEnabled: b.config.HealthCheck, + StateListener: func(scs balancer.SubConnState) { b.updateSubConnState(sc, scs) }, + } + sc, err := b.cc.NewSubConn([]resolver.Address{a}, opts) if err != nil { - grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) + logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue } - b.subConns[a] = sc + b.subConns.Set(a, sc) b.scStates[sc] = connectivity.Idle + b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle) sc.Connect() } } - for a, sc := range b.subConns { + for _, a := range b.subConns.Keys() { + sci, _ := b.subConns.Get(a) + sc := sci.(balancer.SubConn) // a was removed by resolver. - if _, ok := addrsSet[a]; !ok { - b.cc.RemoveSubConn(sc) - delete(b.subConns, a) + if _, ok := addrsSet.Get(a); !ok { + sc.Shutdown() + b.subConns.Delete(a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. - // The entry will be deleted in HandleSubConnStateChange. + // The entry will be deleted in updateSubConnState. } } + // If resolver state contains no addresses, return an error so ClientConn + // will trigger re-resolve. Also records this as an resolver error, so when + // the overall state turns transient failure, the error message will have + // the zero address information. + if len(s.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) + return nil +} + +// mergeErrors builds an error from the last connection error and the last +// resolver error. Must only be called if b.state is TransientFailure. +func (b *baseBalancer) mergeErrors() error { + // connErr must always be non-nil unless there are no SubConns, in which + // case resolverErr must be non-nil. + if b.connErr == nil { + return fmt.Errorf("last resolver error: %v", b.resolverErr) + } + if b.resolverErr == nil { + return fmt.Errorf("last connection error: %v", b.connErr) + } + return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) } // regeneratePicker takes a snapshot of the balancer, and generates a picker // from it. The picker is -// - errPicker with ErrTransientFailure if the balancer is in TransientFailure, -// - built by the pickerBuilder with all READY SubConns otherwise. +// - errPicker if the balancer is in TransientFailure, +// - built by the pickerBuilder with all READY SubConns otherwise. func (b *baseBalancer) regeneratePicker() { if b.state == connectivity.TransientFailure { - b.picker = NewErrPicker(balancer.ErrTransientFailure) + b.picker = NewErrPicker(b.mergeErrors()) return } - readySCs := make(map[resolver.Address]balancer.SubConn) + readySCs := make(map[balancer.SubConn]SubConnInfo) // Filter out all ready SCs from full subConn map. - for addr, sc := range b.subConns { + for _, addr := range b.subConns.Keys() { + sci, _ := b.subConns.Get(addr) + sc := sci.(balancer.SubConn) if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { - readySCs[addr] = sc + readySCs[sc] = SubConnInfo{Address: addr} } } - b.picker = b.pickerBuilder.Build(readySCs) + b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) } -func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - panic("not implemented") +// UpdateSubConnState is a nop because a StateListener is always set in NewSubConn. +func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + logger.Errorf("base.baseBalancer: UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) } -func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { +func (b *baseBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { s := state.ConnectivityState - if grpclog.V(2) { - grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) + if logger.V(2) { + logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) } oldS, ok := b.scStates[sc] if !ok { - if grpclog.V(2) { - grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + if logger.V(2) { + logger.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + } + return + } + if oldS == connectivity.TransientFailure && + (s == connectivity.Connecting || s == connectivity.Idle) { + // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or + // CONNECTING transitions to prevent the aggregated state from being + // always CONNECTING when many backends exist but are all down. + if s == connectivity.Idle { + sc.Connect() } return } @@ -144,41 +214,51 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su case connectivity.Idle: sc.Connect() case connectivity.Shutdown: - // When an address was removed by resolver, b called RemoveSubConn but - // kept the sc's state in scStates. Remove state for this sc here. + // When an address was removed by resolver, b called Shutdown but kept + // the sc's state in scStates. Remove state for this sc here. delete(b.scStates, sc) + case connectivity.TransientFailure: + // Save error to be reported via picker. + b.connErr = state.ConnectionError } - oldAggrState := b.state b.state = b.csEvltr.RecordTransition(oldS, s) // Regenerate picker when one of the following happens: - // - this sc became ready from not-ready - // - this sc became not-ready from ready - // - the aggregated state of balancer became TransientFailure from non-TransientFailure - // - the aggregated state of balancer became non-TransientFailure from TransientFailure + // - this sc entered or left ready + // - the aggregated state of balancer is TransientFailure + // (may need to update error message) if (s == connectivity.Ready) != (oldS == connectivity.Ready) || - (b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) { + b.state == connectivity.TransientFailure { b.regeneratePicker() } - - b.cc.UpdateBalancerState(b.state, b.picker) + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) } // Close is a nop because base balancer doesn't have internal state to clean up, -// and it doesn't need to call RemoveSubConn for the SubConns. +// and it doesn't need to call Shutdown for the SubConns. func (b *baseBalancer) Close() { } -// NewErrPicker returns a picker that always returns err on Pick(). +// ExitIdle is a nop because the base balancer attempts to stay connected to +// all SubConns at all times. +func (b *baseBalancer) ExitIdle() { +} + +// NewErrPicker returns a Picker that always returns err on Pick(). func NewErrPicker(err error) balancer.Picker { return &errPicker{err: err} } +// NewErrPickerV2 is temporarily defined for backward compatibility reasons. +// +// Deprecated: use NewErrPicker instead. +var NewErrPickerV2 = NewErrPicker + type errPicker struct { err error // Pick() always returns this err. } -func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { - return nil, nil, p.err +func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + return balancer.PickResult{}, p.err } diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go index 34b1f2994..e31d76e33 100644 --- a/vendor/google.golang.org/grpc/balancer/base/base.go +++ b/vendor/google.golang.org/grpc/balancer/base/base.go @@ -37,15 +37,22 @@ import ( // PickerBuilder creates balancer.Picker. type PickerBuilder interface { - // Build takes a slice of ready SubConns, and returns a picker that will be - // used by gRPC to pick a SubConn. - Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker + // Build returns a picker that will be used by gRPC to pick a SubConn. + Build(info PickerBuildInfo) balancer.Picker } -// NewBalancerBuilder returns a balancer builder. The balancers -// built by this builder will use the picker builder to build pickers. -func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder { - return NewBalancerBuilderWithConfig(name, pb, Config{}) +// PickerBuildInfo contains information needed by the picker builder to +// construct a picker. +type PickerBuildInfo struct { + // ReadySCs is a map from all ready SubConns to the Addresses used to + // create them. + ReadySCs map[balancer.SubConn]SubConnInfo +} + +// SubConnInfo contains information about a SubConn created by the base +// balancer. +type SubConnInfo struct { + Address resolver.Address // the address used to create this SubConn } // Config contains the config info about the base balancer builder. @@ -54,8 +61,8 @@ type Config struct { HealthCheck bool } -// NewBalancerBuilderWithConfig returns a base balancer builder configured by the provided config. -func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config) balancer.Builder { +// NewBalancerBuilder returns a base balancer builder configured by the provided config. +func NewBalancerBuilder(name string, pb PickerBuilder, config Config) balancer.Builder { return &baseBuilder{ name: name, pickerBuilder: pb, diff --git a/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go new file mode 100644 index 000000000..c33413581 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package balancer + +import "google.golang.org/grpc/connectivity" + +// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type ConnectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transient failure state. + numIdle uint64 // Number of addrConns in idle state. +} + +// RecordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else if at least one SubConn is Idle, the aggregated state is Idle; +// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure. +// +// Shutdown is not considered. +func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + case connectivity.Idle: + cse.numIdle += updateVal + } + } + return cse.CurrentState() +} + +// CurrentState returns the current aggregate conn state by evaluating the counters +func (cse *ConnectivityStateEvaluator) CurrentState() connectivity.State { + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + if cse.numIdle > 0 { + return connectivity.Idle + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go new file mode 100644 index 000000000..52f54e6a0 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -0,0 +1,957 @@ +// Copyright 2015 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file defines the GRPCLB LoadBalancing protocol. +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/lb/v1/load_balancer.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v5.27.1 +// source: grpc/lb/v1/load_balancer.proto + +package grpc_lb_v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type LoadBalanceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to LoadBalanceRequestType: + // + // *LoadBalanceRequest_InitialRequest + // *LoadBalanceRequest_ClientStats + LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"` +} + +func (x *LoadBalanceRequest) Reset() { + *x = LoadBalanceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LoadBalanceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LoadBalanceRequest) ProtoMessage() {} + +func (x *LoadBalanceRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LoadBalanceRequest.ProtoReflect.Descriptor instead. +func (*LoadBalanceRequest) Descriptor() ([]byte, []int) { + return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{0} +} + +func (m *LoadBalanceRequest) GetLoadBalanceRequestType() isLoadBalanceRequest_LoadBalanceRequestType { + if m != nil { + return m.LoadBalanceRequestType + } + return nil +} + +func (x *LoadBalanceRequest) GetInitialRequest() *InitialLoadBalanceRequest { + if x, ok := x.GetLoadBalanceRequestType().(*LoadBalanceRequest_InitialRequest); ok { + return x.InitialRequest + } + return nil +} + +func (x *LoadBalanceRequest) GetClientStats() *ClientStats { + if x, ok := x.GetLoadBalanceRequestType().(*LoadBalanceRequest_ClientStats); ok { + return x.ClientStats + } + return nil +} + +type isLoadBalanceRequest_LoadBalanceRequestType interface { + isLoadBalanceRequest_LoadBalanceRequestType() +} + +type LoadBalanceRequest_InitialRequest struct { + // This message should be sent on the first request to the load balancer. + InitialRequest *InitialLoadBalanceRequest `protobuf:"bytes,1,opt,name=initial_request,json=initialRequest,proto3,oneof"` +} + +type LoadBalanceRequest_ClientStats struct { + // The client stats should be periodically reported to the load balancer + // based on the duration defined in the InitialLoadBalanceResponse. + ClientStats *ClientStats `protobuf:"bytes,2,opt,name=client_stats,json=clientStats,proto3,oneof"` +} + +func (*LoadBalanceRequest_InitialRequest) isLoadBalanceRequest_LoadBalanceRequestType() {} + +func (*LoadBalanceRequest_ClientStats) isLoadBalanceRequest_LoadBalanceRequestType() {} + +type InitialLoadBalanceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the load balanced service (e.g., service.googleapis.com). Its + // length should be less than 256 bytes. + // The name might include a port number. How to handle the port number is up + // to the balancer. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *InitialLoadBalanceRequest) Reset() { + *x = InitialLoadBalanceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InitialLoadBalanceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InitialLoadBalanceRequest) ProtoMessage() {} + +func (x *InitialLoadBalanceRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InitialLoadBalanceRequest.ProtoReflect.Descriptor instead. +func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) { + return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{1} +} + +func (x *InitialLoadBalanceRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// Contains the number of calls finished for a particular load balance token. +type ClientStatsPerToken struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // See Server.load_balance_token. + LoadBalanceToken string `protobuf:"bytes,1,opt,name=load_balance_token,json=loadBalanceToken,proto3" json:"load_balance_token,omitempty"` + // The total number of RPCs that finished associated with the token. + NumCalls int64 `protobuf:"varint,2,opt,name=num_calls,json=numCalls,proto3" json:"num_calls,omitempty"` +} + +func (x *ClientStatsPerToken) Reset() { + *x = ClientStatsPerToken{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientStatsPerToken) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientStatsPerToken) ProtoMessage() {} + +func (x *ClientStatsPerToken) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientStatsPerToken.ProtoReflect.Descriptor instead. +func (*ClientStatsPerToken) Descriptor() ([]byte, []int) { + return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{2} +} + +func (x *ClientStatsPerToken) GetLoadBalanceToken() string { + if x != nil { + return x.LoadBalanceToken + } + return "" +} + +func (x *ClientStatsPerToken) GetNumCalls() int64 { + if x != nil { + return x.NumCalls + } + return 0 +} + +// Contains client level statistics that are useful to load balancing. Each +// count except the timestamp should be reset to zero after reporting the stats. +type ClientStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The timestamp of generating the report. + Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // The total number of RPCs that started. + NumCallsStarted int64 `protobuf:"varint,2,opt,name=num_calls_started,json=numCallsStarted,proto3" json:"num_calls_started,omitempty"` + // The total number of RPCs that finished. + NumCallsFinished int64 `protobuf:"varint,3,opt,name=num_calls_finished,json=numCallsFinished,proto3" json:"num_calls_finished,omitempty"` + // The total number of RPCs that failed to reach a server except dropped RPCs. + NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend,proto3" json:"num_calls_finished_with_client_failed_to_send,omitempty"` + // The total number of RPCs that finished and are known to have been received + // by a server. + NumCallsFinishedKnownReceived int64 `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived,proto3" json:"num_calls_finished_known_received,omitempty"` + // The list of dropped calls. + CallsFinishedWithDrop []*ClientStatsPerToken `protobuf:"bytes,8,rep,name=calls_finished_with_drop,json=callsFinishedWithDrop,proto3" json:"calls_finished_with_drop,omitempty"` +} + +func (x *ClientStats) Reset() { + *x = ClientStats{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientStats) ProtoMessage() {} + +func (x *ClientStats) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientStats.ProtoReflect.Descriptor instead. +func (*ClientStats) Descriptor() ([]byte, []int) { + return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{3} +} + +func (x *ClientStats) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp + } + return nil +} + +func (x *ClientStats) GetNumCallsStarted() int64 { + if x != nil { + return x.NumCallsStarted + } + return 0 +} + +func (x *ClientStats) GetNumCallsFinished() int64 { + if x != nil { + return x.NumCallsFinished + } + return 0 +} + +func (x *ClientStats) GetNumCallsFinishedWithClientFailedToSend() int64 { + if x != nil { + return x.NumCallsFinishedWithClientFailedToSend + } + return 0 +} + +func (x *ClientStats) GetNumCallsFinishedKnownReceived() int64 { + if x != nil { + return x.NumCallsFinishedKnownReceived + } + return 0 +} + +func (x *ClientStats) GetCallsFinishedWithDrop() []*ClientStatsPerToken { + if x != nil { + return x.CallsFinishedWithDrop + } + return nil +} + +type LoadBalanceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to LoadBalanceResponseType: + // + // *LoadBalanceResponse_InitialResponse + // *LoadBalanceResponse_ServerList + // *LoadBalanceResponse_FallbackResponse + LoadBalanceResponseType isLoadBalanceResponse_LoadBalanceResponseType `protobuf_oneof:"load_balance_response_type"` +} + +func (x *LoadBalanceResponse) Reset() { + *x = LoadBalanceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LoadBalanceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LoadBalanceResponse) ProtoMessage() {} + +func (x *LoadBalanceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LoadBalanceResponse.ProtoReflect.Descriptor instead. +func (*LoadBalanceResponse) Descriptor() ([]byte, []int) { + return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{4} +} + +func (m *LoadBalanceResponse) GetLoadBalanceResponseType() isLoadBalanceResponse_LoadBalanceResponseType { + if m != nil { + return m.LoadBalanceResponseType + } + return nil +} + +func (x *LoadBalanceResponse) GetInitialResponse() *InitialLoadBalanceResponse { + if x, ok := x.GetLoadBalanceResponseType().(*LoadBalanceResponse_InitialResponse); ok { + return x.InitialResponse + } + return nil +} + +func (x *LoadBalanceResponse) GetServerList() *ServerList { + if x, ok := x.GetLoadBalanceResponseType().(*LoadBalanceResponse_ServerList); ok { + return x.ServerList + } + return nil +} + +func (x *LoadBalanceResponse) GetFallbackResponse() *FallbackResponse { + if x, ok := x.GetLoadBalanceResponseType().(*LoadBalanceResponse_FallbackResponse); ok { + return x.FallbackResponse + } + return nil +} + +type isLoadBalanceResponse_LoadBalanceResponseType interface { + isLoadBalanceResponse_LoadBalanceResponseType() +} + +type LoadBalanceResponse_InitialResponse struct { + // This message should be sent on the first response to the client. + InitialResponse *InitialLoadBalanceResponse `protobuf:"bytes,1,opt,name=initial_response,json=initialResponse,proto3,oneof"` +} + +type LoadBalanceResponse_ServerList struct { + // Contains the list of servers selected by the load balancer. The client + // should send requests to these servers in the specified order. + ServerList *ServerList `protobuf:"bytes,2,opt,name=server_list,json=serverList,proto3,oneof"` +} + +type LoadBalanceResponse_FallbackResponse struct { + // If this field is set, then the client should eagerly enter fallback + // mode (even if there are existing, healthy connections to backends). + FallbackResponse *FallbackResponse `protobuf:"bytes,3,opt,name=fallback_response,json=fallbackResponse,proto3,oneof"` +} + +func (*LoadBalanceResponse_InitialResponse) isLoadBalanceResponse_LoadBalanceResponseType() {} + +func (*LoadBalanceResponse_ServerList) isLoadBalanceResponse_LoadBalanceResponseType() {} + +func (*LoadBalanceResponse_FallbackResponse) isLoadBalanceResponse_LoadBalanceResponseType() {} + +type FallbackResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *FallbackResponse) Reset() { + *x = FallbackResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FallbackResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FallbackResponse) ProtoMessage() {} + +func (x *FallbackResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FallbackResponse.ProtoReflect.Descriptor instead. +func (*FallbackResponse) Descriptor() ([]byte, []int) { + return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{5} +} + +type InitialLoadBalanceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This interval defines how often the client should send the client stats + // to the load balancer. Stats should only be reported when the duration is + // positive. + ClientStatsReportInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval,proto3" json:"client_stats_report_interval,omitempty"` +} + +func (x *InitialLoadBalanceResponse) Reset() { + *x = InitialLoadBalanceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InitialLoadBalanceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InitialLoadBalanceResponse) ProtoMessage() {} + +func (x *InitialLoadBalanceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InitialLoadBalanceResponse.ProtoReflect.Descriptor instead. +func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) { + return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{6} +} + +func (x *InitialLoadBalanceResponse) GetClientStatsReportInterval() *durationpb.Duration { + if x != nil { + return x.ClientStatsReportInterval + } + return nil +} + +type ServerList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Contains a list of servers selected by the load balancer. The list will + // be updated when server resolutions change or as needed to balance load + // across more servers. The client should consume the server list in order + // unless instructed otherwise via the client_config. + Servers []*Server `protobuf:"bytes,1,rep,name=servers,proto3" json:"servers,omitempty"` +} + +func (x *ServerList) Reset() { + *x = ServerList{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerList) ProtoMessage() {} + +func (x *ServerList) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerList.ProtoReflect.Descriptor instead. +func (*ServerList) Descriptor() ([]byte, []int) { + return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{7} +} + +func (x *ServerList) GetServers() []*Server { + if x != nil { + return x.Servers + } + return nil +} + +// Contains server information. When the drop field is not true, use the other +// fields. +type Server struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A resolved address for the server, serialized in network-byte-order. It may + // either be an IPv4 or IPv6 address. + IpAddress []byte `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + // A resolved port number for the server. + Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` + // An opaque but printable token for load reporting. The client must include + // the token of the picked server into the initial metadata when it starts a + // call to that server. The token is used by the server to verify the request + // and to allow the server to report load to the gRPC LB system. The token is + // also used in client stats for reporting dropped calls. + // + // Its length can be variable but must be less than 50 bytes. + LoadBalanceToken string `protobuf:"bytes,3,opt,name=load_balance_token,json=loadBalanceToken,proto3" json:"load_balance_token,omitempty"` + // Indicates whether this particular request should be dropped by the client. + // If the request is dropped, there will be a corresponding entry in + // ClientStats.calls_finished_with_drop. + Drop bool `protobuf:"varint,4,opt,name=drop,proto3" json:"drop,omitempty"` +} + +func (x *Server) Reset() { + *x = Server{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Server) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Server) ProtoMessage() {} + +func (x *Server) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Server.ProtoReflect.Descriptor instead. +func (*Server) Descriptor() ([]byte, []int) { + return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{8} +} + +func (x *Server) GetIpAddress() []byte { + if x != nil { + return x.IpAddress + } + return nil +} + +func (x *Server) GetPort() int32 { + if x != nil { + return x.Port + } + return 0 +} + +func (x *Server) GetLoadBalanceToken() string { + if x != nil { + return x.LoadBalanceToken + } + return "" +} + +func (x *Server) GetDrop() bool { + if x != nil { + return x.Drop + } + return false +} + +var File_grpc_lb_v1_load_balancer_proto protoreflect.FileDescriptor + +var file_grpc_lb_v1_load_balancer_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x62, 0x2f, 0x76, 0x31, 0x2f, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0a, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc1, 0x01, + 0x0a, 0x12, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, + 0x61, 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0e, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x53, 0x74, 0x61, 0x74, 0x73, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x73, 0x42, 0x1b, 0x0a, 0x19, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x22, 0x2f, 0x0a, 0x19, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4c, 0x6f, 0x61, 0x64, + 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x22, 0x60, 0x0a, 0x13, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, + 0x73, 0x50, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x75, 0x6d, 0x5f, 0x63, + 0x61, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6e, 0x75, 0x6d, 0x43, + 0x61, 0x6c, 0x6c, 0x73, 0x22, 0xb0, 0x03, 0x0a, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2a, + 0x0a, 0x11, 0x6e, 0x75, 0x6d, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x6e, 0x75, 0x6d, 0x43, 0x61, + 0x6c, 0x6c, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x75, + 0x6d, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x43, 0x61, 0x6c, 0x6c, 0x73, + 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x12, 0x5d, 0x0a, 0x2d, 0x6e, 0x75, 0x6d, 0x5f, + 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x77, + 0x69, 0x74, 0x68, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x65, + 0x64, 0x5f, 0x74, 0x6f, 0x5f, 0x73, 0x65, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x26, 0x6e, 0x75, 0x6d, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, + 0x64, 0x57, 0x69, 0x74, 0x68, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, + 0x64, 0x54, 0x6f, 0x53, 0x65, 0x6e, 0x64, 0x12, 0x48, 0x0a, 0x21, 0x6e, 0x75, 0x6d, 0x5f, 0x63, + 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x1d, 0x6e, 0x75, 0x6d, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x46, 0x69, 0x6e, 0x69, + 0x73, 0x68, 0x65, 0x64, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, + 0x64, 0x12, 0x58, 0x0a, 0x18, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, + 0x68, 0x65, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x64, 0x72, 0x6f, 0x70, 0x18, 0x08, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, 0x65, 0x72, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x15, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x46, 0x69, 0x6e, 0x69, 0x73, + 0x68, 0x65, 0x64, 0x57, 0x69, 0x74, 0x68, 0x44, 0x72, 0x6f, 0x70, 0x4a, 0x04, 0x08, 0x04, 0x10, + 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x90, 0x02, 0x0a, 0x13, 0x4c, 0x6f, 0x61, 0x64, + 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x53, 0x0a, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4c, 0x6f, + 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x48, 0x00, 0x52, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6c, + 0x69, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x69, 0x73, + 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, + 0x4b, 0x0a, 0x11, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x72, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x10, 0x66, 0x61, 0x6c, 0x6c, + 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x1c, 0x0a, 0x1a, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0x12, 0x0a, 0x10, 0x46, 0x61, + 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7e, + 0x0a, 0x1a, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x1c, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x72, 0x65, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x19, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x40, + 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x07, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, + 0x22, 0x83, 0x01, 0x0a, 0x06, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x69, + 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x09, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, + 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x2c, + 0x0a, 0x12, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x6f, 0x61, 0x64, + 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x12, 0x0a, 0x04, + 0x64, 0x72, 0x6f, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x72, 0x6f, 0x70, + 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x32, 0x62, 0x0a, 0x0c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0b, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x65, 0x4c, 0x6f, 0x61, 0x64, 0x12, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, + 0x76, 0x31, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, + 0x76, 0x31, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x57, 0x0a, 0x0d, 0x69, 0x6f, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x4c, 0x6f, 0x61, + 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x72, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x62, + 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_lb_v1_load_balancer_proto_rawDescOnce sync.Once + file_grpc_lb_v1_load_balancer_proto_rawDescData = file_grpc_lb_v1_load_balancer_proto_rawDesc +) + +func file_grpc_lb_v1_load_balancer_proto_rawDescGZIP() []byte { + file_grpc_lb_v1_load_balancer_proto_rawDescOnce.Do(func() { + file_grpc_lb_v1_load_balancer_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_lb_v1_load_balancer_proto_rawDescData) + }) + return file_grpc_lb_v1_load_balancer_proto_rawDescData +} + +var file_grpc_lb_v1_load_balancer_proto_msgTypes = make([]protoimpl.MessageInfo, 9) +var file_grpc_lb_v1_load_balancer_proto_goTypes = []any{ + (*LoadBalanceRequest)(nil), // 0: grpc.lb.v1.LoadBalanceRequest + (*InitialLoadBalanceRequest)(nil), // 1: grpc.lb.v1.InitialLoadBalanceRequest + (*ClientStatsPerToken)(nil), // 2: grpc.lb.v1.ClientStatsPerToken + (*ClientStats)(nil), // 3: grpc.lb.v1.ClientStats + (*LoadBalanceResponse)(nil), // 4: grpc.lb.v1.LoadBalanceResponse + (*FallbackResponse)(nil), // 5: grpc.lb.v1.FallbackResponse + (*InitialLoadBalanceResponse)(nil), // 6: grpc.lb.v1.InitialLoadBalanceResponse + (*ServerList)(nil), // 7: grpc.lb.v1.ServerList + (*Server)(nil), // 8: grpc.lb.v1.Server + (*timestamppb.Timestamp)(nil), // 9: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 10: google.protobuf.Duration +} +var file_grpc_lb_v1_load_balancer_proto_depIdxs = []int32{ + 1, // 0: grpc.lb.v1.LoadBalanceRequest.initial_request:type_name -> grpc.lb.v1.InitialLoadBalanceRequest + 3, // 1: grpc.lb.v1.LoadBalanceRequest.client_stats:type_name -> grpc.lb.v1.ClientStats + 9, // 2: grpc.lb.v1.ClientStats.timestamp:type_name -> google.protobuf.Timestamp + 2, // 3: grpc.lb.v1.ClientStats.calls_finished_with_drop:type_name -> grpc.lb.v1.ClientStatsPerToken + 6, // 4: grpc.lb.v1.LoadBalanceResponse.initial_response:type_name -> grpc.lb.v1.InitialLoadBalanceResponse + 7, // 5: grpc.lb.v1.LoadBalanceResponse.server_list:type_name -> grpc.lb.v1.ServerList + 5, // 6: grpc.lb.v1.LoadBalanceResponse.fallback_response:type_name -> grpc.lb.v1.FallbackResponse + 10, // 7: grpc.lb.v1.InitialLoadBalanceResponse.client_stats_report_interval:type_name -> google.protobuf.Duration + 8, // 8: grpc.lb.v1.ServerList.servers:type_name -> grpc.lb.v1.Server + 0, // 9: grpc.lb.v1.LoadBalancer.BalanceLoad:input_type -> grpc.lb.v1.LoadBalanceRequest + 4, // 10: grpc.lb.v1.LoadBalancer.BalanceLoad:output_type -> grpc.lb.v1.LoadBalanceResponse + 10, // [10:11] is the sub-list for method output_type + 9, // [9:10] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_grpc_lb_v1_load_balancer_proto_init() } +func file_grpc_lb_v1_load_balancer_proto_init() { + if File_grpc_lb_v1_load_balancer_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_lb_v1_load_balancer_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*LoadBalanceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lb_v1_load_balancer_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*InitialLoadBalanceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lb_v1_load_balancer_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*ClientStatsPerToken); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lb_v1_load_balancer_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*ClientStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lb_v1_load_balancer_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*LoadBalanceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lb_v1_load_balancer_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*FallbackResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lb_v1_load_balancer_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*InitialLoadBalanceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lb_v1_load_balancer_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*ServerList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lb_v1_load_balancer_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*Server); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_grpc_lb_v1_load_balancer_proto_msgTypes[0].OneofWrappers = []any{ + (*LoadBalanceRequest_InitialRequest)(nil), + (*LoadBalanceRequest_ClientStats)(nil), + } + file_grpc_lb_v1_load_balancer_proto_msgTypes[4].OneofWrappers = []any{ + (*LoadBalanceResponse_InitialResponse)(nil), + (*LoadBalanceResponse_ServerList)(nil), + (*LoadBalanceResponse_FallbackResponse)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_lb_v1_load_balancer_proto_rawDesc, + NumEnums: 0, + NumMessages: 9, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_grpc_lb_v1_load_balancer_proto_goTypes, + DependencyIndexes: file_grpc_lb_v1_load_balancer_proto_depIdxs, + MessageInfos: file_grpc_lb_v1_load_balancer_proto_msgTypes, + }.Build() + File_grpc_lb_v1_load_balancer_proto = out.File + file_grpc_lb_v1_load_balancer_proto_rawDesc = nil + file_grpc_lb_v1_load_balancer_proto_goTypes = nil + file_grpc_lb_v1_load_balancer_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go new file mode 100644 index 000000000..84e6a2505 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go @@ -0,0 +1,134 @@ +// Copyright 2015 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file defines the GRPCLB LoadBalancing protocol. +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/lb/v1/load_balancer.proto + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 +// source: grpc/lb/v1/load_balancer.proto + +package grpc_lb_v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + LoadBalancer_BalanceLoad_FullMethodName = "/grpc.lb.v1.LoadBalancer/BalanceLoad" +) + +// LoadBalancerClient is the client API for LoadBalancer service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type LoadBalancerClient interface { + // Bidirectional rpc to get a list of servers. + BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[LoadBalanceRequest, LoadBalanceResponse], error) +} + +type loadBalancerClient struct { + cc grpc.ClientConnInterface +} + +func NewLoadBalancerClient(cc grpc.ClientConnInterface) LoadBalancerClient { + return &loadBalancerClient{cc} +} + +func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[LoadBalanceRequest, LoadBalanceResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &LoadBalancer_ServiceDesc.Streams[0], LoadBalancer_BalanceLoad_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[LoadBalanceRequest, LoadBalanceResponse]{ClientStream: stream} + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type LoadBalancer_BalanceLoadClient = grpc.BidiStreamingClient[LoadBalanceRequest, LoadBalanceResponse] + +// LoadBalancerServer is the server API for LoadBalancer service. +// All implementations should embed UnimplementedLoadBalancerServer +// for forward compatibility. +type LoadBalancerServer interface { + // Bidirectional rpc to get a list of servers. + BalanceLoad(grpc.BidiStreamingServer[LoadBalanceRequest, LoadBalanceResponse]) error +} + +// UnimplementedLoadBalancerServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedLoadBalancerServer struct{} + +func (UnimplementedLoadBalancerServer) BalanceLoad(grpc.BidiStreamingServer[LoadBalanceRequest, LoadBalanceResponse]) error { + return status.Errorf(codes.Unimplemented, "method BalanceLoad not implemented") +} +func (UnimplementedLoadBalancerServer) testEmbeddedByValue() {} + +// UnsafeLoadBalancerServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to LoadBalancerServer will +// result in compilation errors. +type UnsafeLoadBalancerServer interface { + mustEmbedUnimplementedLoadBalancerServer() +} + +func RegisterLoadBalancerServer(s grpc.ServiceRegistrar, srv LoadBalancerServer) { + // If the following call panics, it indicates UnimplementedLoadBalancerServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&LoadBalancer_ServiceDesc, srv) +} + +func _LoadBalancer_BalanceLoad_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(LoadBalancerServer).BalanceLoad(&grpc.GenericServerStream[LoadBalanceRequest, LoadBalanceResponse]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type LoadBalancer_BalanceLoadServer = grpc.BidiStreamingServer[LoadBalanceRequest, LoadBalanceResponse] + +// LoadBalancer_ServiceDesc is the grpc.ServiceDesc for LoadBalancer service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var LoadBalancer_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.lb.v1.LoadBalancer", + HandlerType: (*LoadBalancerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "BalanceLoad", + Handler: _LoadBalancer_BalanceLoad_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/lb/v1/load_balancer.proto", +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go new file mode 100644 index 000000000..c09876274 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go @@ -0,0 +1,535 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclb defines a grpclb balancer. +// +// To install grpclb balancer, import this package as: +// +// import _ "google.golang.org/grpc/balancer/grpclb" +package grpclb + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + grpclbstate "google.golang.org/grpc/balancer/grpclb/state" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/backoff" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/resolver/dns" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/protobuf/types/known/durationpb" + + lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" +) + +const ( + lbTokenKey = "lb-token" + defaultFallbackTimeout = 10 * time.Second + grpclbName = "grpclb" +) + +var errServerTerminatedConnection = errors.New("grpclb: failed to recv server list: server terminated connection") +var logger = grpclog.Component("grpclb") + +func convertDuration(d *durationpb.Duration) time.Duration { + if d == nil { + return 0 + } + return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond +} + +// Client API for LoadBalancer service. +// Mostly copied from generated pb.go file. +// To avoid circular dependency. +type loadBalancerClient struct { + cc *grpc.ClientConn +} + +func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (*balanceLoadClientStream, error) { + desc := &grpc.StreamDesc{ + StreamName: "BalanceLoad", + ServerStreams: true, + ClientStreams: true, + } + stream, err := c.cc.NewStream(ctx, desc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) + if err != nil { + return nil, err + } + x := &balanceLoadClientStream{stream} + return x, nil +} + +type balanceLoadClientStream struct { + grpc.ClientStream +} + +func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) { + m := new(lbpb.LoadBalanceResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func init() { + balancer.Register(newLBBuilder()) + dns.EnableSRVLookups = true +} + +// newLBBuilder creates a builder for grpclb. +func newLBBuilder() balancer.Builder { + return newLBBuilderWithFallbackTimeout(defaultFallbackTimeout) +} + +// newLBBuilderWithFallbackTimeout creates a grpclb builder with the given +// fallbackTimeout. If no response is received from the remote balancer within +// fallbackTimeout, the backend addresses from the resolved address list will be +// used. +// +// Only call this function when a non-default fallback timeout is needed. +func newLBBuilderWithFallbackTimeout(fallbackTimeout time.Duration) balancer.Builder { + return &lbBuilder{ + fallbackTimeout: fallbackTimeout, + } +} + +type lbBuilder struct { + fallbackTimeout time.Duration +} + +func (b *lbBuilder) Name() string { + return grpclbName +} + +func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + // This generates a manual resolver builder with a fixed scheme. This + // scheme will be used to dial to remote LB, so we can send filtered + // address updates to remote LB ClientConn using this manual resolver. + mr := manual.NewBuilderWithScheme("grpclb-internal") + // ResolveNow() on this manual resolver is forwarded to the parent + // ClientConn, so when grpclb client loses contact with the remote balancer, + // the parent ClientConn's resolver will re-resolve. + mr.ResolveNowCallback = cc.ResolveNow + + lb := &lbBalancer{ + cc: newLBCacheClientConn(cc), + dialTarget: opt.Target.Endpoint(), + target: opt.Target.Endpoint(), + opt: opt, + fallbackTimeout: b.fallbackTimeout, + doneCh: make(chan struct{}), + + manualResolver: mr, + subConns: make(map[resolver.Address]balancer.SubConn), + scStates: make(map[balancer.SubConn]connectivity.State), + picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + clientStats: newRPCStats(), + backoff: backoff.DefaultExponential, // TODO: make backoff configurable. + } + lb.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[grpclb %p] ", lb)) + + var err error + if opt.CredsBundle != nil { + lb.grpclbClientConnCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBalancer) + if err != nil { + lb.logger.Warningf("Failed to create credentials used for connecting to grpclb: %v", err) + } + lb.grpclbBackendCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBackendFromBalancer) + if err != nil { + lb.logger.Warningf("Failed to create credentials used for connecting to backends returned by grpclb: %v", err) + } + } + + return lb +} + +type lbBalancer struct { + cc *lbCacheClientConn + dialTarget string // user's dial target + target string // same as dialTarget unless overridden in service config + opt balancer.BuildOptions + logger *internalgrpclog.PrefixLogger + + usePickFirst bool + + // grpclbClientConnCreds is the creds bundle to be used to connect to grpclb + // servers. If it's nil, use the TransportCredentials from BuildOptions + // instead. + grpclbClientConnCreds credentials.Bundle + // grpclbBackendCreds is the creds bundle to be used for addresses that are + // returned by grpclb server. If it's nil, don't set anything when creating + // SubConns. + grpclbBackendCreds credentials.Bundle + + fallbackTimeout time.Duration + doneCh chan struct{} + + // manualResolver is used in the remote LB ClientConn inside grpclb. When + // resolved address updates are received by grpclb, filtered updates will be + // send to remote LB ClientConn through this resolver. + manualResolver *manual.Resolver + // The ClientConn to talk to the remote balancer. + ccRemoteLB *remoteBalancerCCWrapper + // backoff for calling remote balancer. + backoff backoff.Strategy + + // Support client side load reporting. Each picker gets a reference to this, + // and will update its content. + clientStats *rpcStats + + mu sync.Mutex // guards everything following. + // The full server list including drops, used to check if the newly received + // serverList contains anything new. Each generate picker will also have + // reference to this list to do the first layer pick. + fullServerList []*lbpb.Server + // Backend addresses. It's kept so the addresses are available when + // switching between round_robin and pickfirst. + backendAddrs []resolver.Address + // All backends addresses, with metadata set to nil. This list contains all + // backend addresses in the same order and with the same duplicates as in + // serverlist. When generating picker, a SubConn slice with the same order + // but with only READY SCs will be generated. + backendAddrsWithoutMetadata []resolver.Address + // Roundrobin functionalities. + state connectivity.State + subConns map[resolver.Address]balancer.SubConn // Used to new/shutdown SubConn. + scStates map[balancer.SubConn]connectivity.State // Used to filter READY SubConns. + picker balancer.Picker + // Support fallback to resolved backend addresses if there's no response + // from remote balancer within fallbackTimeout. + remoteBalancerConnected bool + serverListReceived bool + inFallback bool + // resolvedBackendAddrs is resolvedAddrs minus remote balancers. It's set + // when resolved address updates are received, and read in the goroutine + // handling fallback. + resolvedBackendAddrs []resolver.Address + connErr error // the last connection error +} + +// regeneratePicker takes a snapshot of the balancer, and generates a picker from +// it. The picker +// - always returns ErrTransientFailure if the balancer is in TransientFailure, +// - does two layer roundrobin pick otherwise. +// +// Caller must hold lb.mu. +func (lb *lbBalancer) regeneratePicker(resetDrop bool) { + if lb.state == connectivity.TransientFailure { + lb.picker = base.NewErrPicker(fmt.Errorf("all SubConns are in TransientFailure, last connection error: %v", lb.connErr)) + return + } + + if lb.state == connectivity.Connecting { + lb.picker = base.NewErrPicker(balancer.ErrNoSubConnAvailable) + return + } + + var readySCs []balancer.SubConn + if lb.usePickFirst { + for _, sc := range lb.subConns { + readySCs = append(readySCs, sc) + break + } + } else { + for _, a := range lb.backendAddrsWithoutMetadata { + if sc, ok := lb.subConns[a]; ok { + if st, ok := lb.scStates[sc]; ok && st == connectivity.Ready { + readySCs = append(readySCs, sc) + } + } + } + } + + if len(readySCs) <= 0 { + // If there's no ready SubConns, always re-pick. This is to avoid drops + // unless at least one SubConn is ready. Otherwise we may drop more + // often than want because of drops + re-picks(which become re-drops). + // + // This doesn't seem to be necessary after the connecting check above. + // Kept for safety. + lb.picker = base.NewErrPicker(balancer.ErrNoSubConnAvailable) + return + } + if lb.inFallback { + lb.picker = newRRPicker(readySCs) + return + } + if resetDrop { + lb.picker = newLBPicker(lb.fullServerList, readySCs, lb.clientStats) + return + } + prevLBPicker, ok := lb.picker.(*lbPicker) + if !ok { + lb.picker = newLBPicker(lb.fullServerList, readySCs, lb.clientStats) + return + } + prevLBPicker.updateReadySCs(readySCs) +} + +// aggregateSubConnStats calculate the aggregated state of SubConns in +// lb.SubConns. These SubConns are subconns in use (when switching between +// fallback and grpclb). lb.scState contains states for all SubConns, including +// those in cache (SubConns are cached for 10 seconds after shutdown). +// +// The aggregated state is: +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting or IDLE, the aggregated state is Connecting; +// - It's OK to consider IDLE as Connecting. SubConns never stay in IDLE, +// they start to connect immediately. But there's a race between the overall +// state is reported, and when the new SubConn state arrives. And SubConns +// never go back to IDLE. +// - Else the aggregated state is TransientFailure. +func (lb *lbBalancer) aggregateSubConnStates() connectivity.State { + var numConnecting uint64 + + for _, sc := range lb.subConns { + if state, ok := lb.scStates[sc]; ok { + switch state { + case connectivity.Ready: + return connectivity.Ready + case connectivity.Connecting, connectivity.Idle: + numConnecting++ + } + } + } + if numConnecting > 0 { + return connectivity.Connecting + } + return connectivity.TransientFailure +} + +// UpdateSubConnState is unused; NewSubConn's options always specifies +// updateSubConnState as the listener. +func (lb *lbBalancer) UpdateSubConnState(sc balancer.SubConn, scs balancer.SubConnState) { + lb.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, scs) +} + +func (lb *lbBalancer) updateSubConnState(sc balancer.SubConn, scs balancer.SubConnState) { + s := scs.ConnectivityState + if lb.logger.V(2) { + lb.logger.Infof("SubConn state change: %p, %v", sc, s) + } + lb.mu.Lock() + defer lb.mu.Unlock() + + oldS, ok := lb.scStates[sc] + if !ok { + if lb.logger.V(2) { + lb.logger.Infof("Received state change for an unknown SubConn: %p, %v", sc, s) + } + return + } + lb.scStates[sc] = s + switch s { + case connectivity.Idle: + sc.Connect() + case connectivity.Shutdown: + // When an address was removed by resolver, b called Shutdown but kept + // the sc's state in scStates. Remove state for this sc here. + delete(lb.scStates, sc) + case connectivity.TransientFailure: + lb.connErr = scs.ConnectionError + } + // Force regenerate picker if + // - this sc became ready from not-ready + // - this sc became not-ready from ready + lb.updateStateAndPicker((oldS == connectivity.Ready) != (s == connectivity.Ready), false) + + // Enter fallback when the aggregated state is not Ready and the connection + // to remote balancer is lost. + if lb.state != connectivity.Ready { + if !lb.inFallback && !lb.remoteBalancerConnected { + // Enter fallback. + lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst) + } + } +} + +// updateStateAndPicker re-calculate the aggregated state, and regenerate picker +// if overall state is changed. +// +// If forceRegeneratePicker is true, picker will be regenerated. +func (lb *lbBalancer) updateStateAndPicker(forceRegeneratePicker bool, resetDrop bool) { + oldAggrState := lb.state + lb.state = lb.aggregateSubConnStates() + // Regenerate picker when one of the following happens: + // - caller wants to regenerate + // - the aggregated state changed + if forceRegeneratePicker || (lb.state != oldAggrState) { + lb.regeneratePicker(resetDrop) + } + var cc balancer.ClientConn = lb.cc + if lb.usePickFirst { + // Bypass the caching layer that would wrap the picker. + cc = lb.cc.ClientConn + } + + cc.UpdateState(balancer.State{ConnectivityState: lb.state, Picker: lb.picker}) +} + +// fallbackToBackendsAfter blocks for fallbackTimeout and falls back to use +// resolved backends (backends received from resolver, not from remote balancer) +// if no connection to remote balancers was successful. +func (lb *lbBalancer) fallbackToBackendsAfter(fallbackTimeout time.Duration) { + timer := time.NewTimer(fallbackTimeout) + defer timer.Stop() + select { + case <-timer.C: + case <-lb.doneCh: + return + } + lb.mu.Lock() + if lb.inFallback || lb.serverListReceived { + lb.mu.Unlock() + return + } + // Enter fallback. + lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst) + lb.mu.Unlock() +} + +func (lb *lbBalancer) handleServiceConfig(gc *grpclbServiceConfig) { + lb.mu.Lock() + defer lb.mu.Unlock() + + // grpclb uses the user's dial target to populate the `Name` field of the + // `InitialLoadBalanceRequest` message sent to the remote balancer. But when + // grpclb is used a child policy in the context of RLS, we want the `Name` + // field to be populated with the value received from the RLS server. To + // support this use case, an optional "target_name" field has been added to + // the grpclb LB policy's config. If specified, it overrides the name of + // the target to be sent to the remote balancer; if not, the target to be + // sent to the balancer will continue to be obtained from the target URI + // passed to the gRPC client channel. Whenever that target to be sent to the + // balancer is updated, we need to restart the stream to the balancer as + // this target is sent in the first message on the stream. + if gc != nil { + target := lb.dialTarget + if gc.ServiceName != "" { + target = gc.ServiceName + } + if target != lb.target { + lb.target = target + if lb.ccRemoteLB != nil { + lb.ccRemoteLB.cancelRemoteBalancerCall() + } + } + } + + newUsePickFirst := childIsPickFirst(gc) + if lb.usePickFirst == newUsePickFirst { + return + } + if lb.logger.V(2) { + lb.logger.Infof("Switching mode. Is pick_first used for backends? %v", newUsePickFirst) + } + lb.refreshSubConns(lb.backendAddrs, lb.inFallback, newUsePickFirst) +} + +func (lb *lbBalancer) ResolverError(error) { + // Ignore resolver errors. GRPCLB is not selected unless the resolver + // works at least once. +} + +func (lb *lbBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { + if lb.logger.V(2) { + lb.logger.Infof("UpdateClientConnState: %s", pretty.ToJSON(ccs)) + } + gc, _ := ccs.BalancerConfig.(*grpclbServiceConfig) + lb.handleServiceConfig(gc) + + backendAddrs := ccs.ResolverState.Addresses + + var remoteBalancerAddrs []resolver.Address + if sd := grpclbstate.Get(ccs.ResolverState); sd != nil { + // Override any balancer addresses provided via + // ccs.ResolverState.Addresses. + remoteBalancerAddrs = sd.BalancerAddresses + } + + if len(backendAddrs)+len(remoteBalancerAddrs) == 0 { + // There should be at least one address, either grpclb server or + // fallback. Empty address is not valid. + return balancer.ErrBadResolverState + } + + if len(remoteBalancerAddrs) == 0 { + if lb.ccRemoteLB != nil { + lb.ccRemoteLB.close() + lb.ccRemoteLB = nil + } + } else if lb.ccRemoteLB == nil { + // First time receiving resolved addresses, create a cc to remote + // balancers. + if err := lb.newRemoteBalancerCCWrapper(); err != nil { + return err + } + // Start the fallback goroutine. + go lb.fallbackToBackendsAfter(lb.fallbackTimeout) + } + + if lb.ccRemoteLB != nil { + // cc to remote balancers uses lb.manualResolver. Send the updated remote + // balancer addresses to it through manualResolver. + lb.manualResolver.UpdateState(resolver.State{Addresses: remoteBalancerAddrs}) + } + + lb.mu.Lock() + lb.resolvedBackendAddrs = backendAddrs + if len(remoteBalancerAddrs) == 0 || lb.inFallback { + // If there's no remote balancer address in ClientConn update, grpclb + // enters fallback mode immediately. + // + // If a new update is received while grpclb is in fallback, update the + // list of backends being used to the new fallback backends. + lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst) + } + lb.mu.Unlock() + return nil +} + +func (lb *lbBalancer) Close() { + select { + case <-lb.doneCh: + return + default: + } + close(lb.doneCh) + if lb.ccRemoteLB != nil { + lb.ccRemoteLB.close() + } + lb.cc.close() +} + +func (lb *lbBalancer) ExitIdle() {} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go new file mode 100644 index 000000000..96a57c8c7 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go @@ -0,0 +1,67 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclb + +import ( + "encoding/json" + + "google.golang.org/grpc/balancer/pickfirst" + "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/serviceconfig" +) + +const ( + roundRobinName = roundrobin.Name + pickFirstName = pickfirst.Name +) + +type grpclbServiceConfig struct { + serviceconfig.LoadBalancingConfig + ChildPolicy *[]map[string]json.RawMessage + ServiceName string +} + +func (b *lbBuilder) ParseConfig(lbConfig json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + ret := &grpclbServiceConfig{} + if err := json.Unmarshal(lbConfig, ret); err != nil { + return nil, err + } + return ret, nil +} + +func childIsPickFirst(sc *grpclbServiceConfig) bool { + if sc == nil { + return false + } + childConfigs := sc.ChildPolicy + if childConfigs == nil { + return false + } + for _, childC := range *childConfigs { + // If round_robin exists before pick_first, return false + if _, ok := childC[roundRobinName]; ok { + return false + } + // If pick_first is before round_robin, return true + if _, ok := childC[pickFirstName]; ok { + return true + } + } + return false +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go new file mode 100644 index 000000000..671bc663f --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go @@ -0,0 +1,193 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclb + +import ( + "math/rand" + "sync" + "sync/atomic" + + "google.golang.org/grpc/balancer" + lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// rpcStats is same as lbpb.ClientStats, except that numCallsDropped is a map +// instead of a slice. +type rpcStats struct { + // Only access the following fields atomically. + numCallsStarted int64 + numCallsFinished int64 + numCallsFinishedWithClientFailedToSend int64 + numCallsFinishedKnownReceived int64 + + mu sync.Mutex + // map load_balance_token -> num_calls_dropped + numCallsDropped map[string]int64 +} + +func newRPCStats() *rpcStats { + return &rpcStats{ + numCallsDropped: make(map[string]int64), + } +} + +func isZeroStats(stats *lbpb.ClientStats) bool { + return len(stats.CallsFinishedWithDrop) == 0 && + stats.NumCallsStarted == 0 && + stats.NumCallsFinished == 0 && + stats.NumCallsFinishedWithClientFailedToSend == 0 && + stats.NumCallsFinishedKnownReceived == 0 +} + +// toClientStats converts rpcStats to lbpb.ClientStats, and clears rpcStats. +func (s *rpcStats) toClientStats() *lbpb.ClientStats { + stats := &lbpb.ClientStats{ + NumCallsStarted: atomic.SwapInt64(&s.numCallsStarted, 0), + NumCallsFinished: atomic.SwapInt64(&s.numCallsFinished, 0), + NumCallsFinishedWithClientFailedToSend: atomic.SwapInt64(&s.numCallsFinishedWithClientFailedToSend, 0), + NumCallsFinishedKnownReceived: atomic.SwapInt64(&s.numCallsFinishedKnownReceived, 0), + } + s.mu.Lock() + dropped := s.numCallsDropped + s.numCallsDropped = make(map[string]int64) + s.mu.Unlock() + for token, count := range dropped { + stats.CallsFinishedWithDrop = append(stats.CallsFinishedWithDrop, &lbpb.ClientStatsPerToken{ + LoadBalanceToken: token, + NumCalls: count, + }) + } + return stats +} + +func (s *rpcStats) drop(token string) { + atomic.AddInt64(&s.numCallsStarted, 1) + s.mu.Lock() + s.numCallsDropped[token]++ + s.mu.Unlock() + atomic.AddInt64(&s.numCallsFinished, 1) +} + +func (s *rpcStats) failedToSend() { + atomic.AddInt64(&s.numCallsStarted, 1) + atomic.AddInt64(&s.numCallsFinishedWithClientFailedToSend, 1) + atomic.AddInt64(&s.numCallsFinished, 1) +} + +func (s *rpcStats) knownReceived() { + atomic.AddInt64(&s.numCallsStarted, 1) + atomic.AddInt64(&s.numCallsFinishedKnownReceived, 1) + atomic.AddInt64(&s.numCallsFinished, 1) +} + +// rrPicker does roundrobin on subConns. It's typically used when there's no +// response from remote balancer, and grpclb falls back to the resolved +// backends. +// +// It guaranteed that len(subConns) > 0. +type rrPicker struct { + mu sync.Mutex + subConns []balancer.SubConn // The subConns that were READY when taking the snapshot. + subConnsNext int +} + +func newRRPicker(readySCs []balancer.SubConn) *rrPicker { + return &rrPicker{ + subConns: readySCs, + subConnsNext: rand.Intn(len(readySCs)), + } +} + +func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + p.mu.Lock() + defer p.mu.Unlock() + sc := p.subConns[p.subConnsNext] + p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns) + return balancer.PickResult{SubConn: sc}, nil +} + +// lbPicker does two layers of picks: +// +// First layer: roundrobin on all servers in serverList, including drops and backends. +// - If it picks a drop, the RPC will fail as being dropped. +// - If it picks a backend, do a second layer pick to pick the real backend. +// +// Second layer: roundrobin on all READY backends. +// +// It's guaranteed that len(serverList) > 0. +type lbPicker struct { + mu sync.Mutex + serverList []*lbpb.Server + serverListNext int + subConns []balancer.SubConn // The subConns that were READY when taking the snapshot. + subConnsNext int + + stats *rpcStats +} + +func newLBPicker(serverList []*lbpb.Server, readySCs []balancer.SubConn, stats *rpcStats) *lbPicker { + return &lbPicker{ + serverList: serverList, + subConns: readySCs, + subConnsNext: rand.Intn(len(readySCs)), + stats: stats, + } +} + +func (p *lbPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + p.mu.Lock() + defer p.mu.Unlock() + + // Layer one roundrobin on serverList. + s := p.serverList[p.serverListNext] + p.serverListNext = (p.serverListNext + 1) % len(p.serverList) + + // If it's a drop, return an error and fail the RPC. + if s.Drop { + p.stats.drop(s.LoadBalanceToken) + return balancer.PickResult{}, status.Errorf(codes.Unavailable, "request dropped by grpclb") + } + + // If not a drop but there's no ready subConns. + if len(p.subConns) <= 0 { + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + } + + // Return the next ready subConn in the list, also collect rpc stats. + sc := p.subConns[p.subConnsNext] + p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns) + done := func(info balancer.DoneInfo) { + if !info.BytesSent { + p.stats.failedToSend() + } else if info.BytesReceived { + p.stats.knownReceived() + } + } + return balancer.PickResult{SubConn: sc, Done: done}, nil +} + +func (p *lbPicker) updateReadySCs(readySCs []balancer.SubConn) { + p.mu.Lock() + defer p.mu.Unlock() + + p.subConns = readySCs + p.subConnsNext = p.subConnsNext % len(readySCs) +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go new file mode 100644 index 000000000..506fae0d4 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go @@ -0,0 +1,458 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclb + +import ( + "context" + "fmt" + "io" + "net" + "sync" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/backoff" + imetadata "google.golang.org/grpc/internal/metadata" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" + + lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" +) + +func serverListEqual(a, b []*lbpb.Server) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !proto.Equal(a[i], b[i]) { + return false + } + } + return true +} + +// processServerList updates balancer's internal state, create/remove SubConns +// and regenerates picker using the received serverList. +func (lb *lbBalancer) processServerList(l *lbpb.ServerList) { + if lb.logger.V(2) { + lb.logger.Infof("Processing server list: %#v", l) + } + lb.mu.Lock() + defer lb.mu.Unlock() + + // Set serverListReceived to true so fallback will not take effect if it has + // not hit timeout. + lb.serverListReceived = true + + // If the new server list == old server list, do nothing. + if serverListEqual(lb.fullServerList, l.Servers) { + if lb.logger.V(2) { + lb.logger.Infof("Ignoring new server list as it is the same as the previous one") + } + return + } + lb.fullServerList = l.Servers + + var backendAddrs []resolver.Address + for i, s := range l.Servers { + if s.Drop { + continue + } + + md := metadata.Pairs(lbTokenKey, s.LoadBalanceToken) + ip := net.IP(s.IpAddress) + ipStr := ip.String() + if ip.To4() == nil { + // Add square brackets to ipv6 addresses, otherwise net.Dial() and + // net.SplitHostPort() will return too many colons error. + ipStr = fmt.Sprintf("[%s]", ipStr) + } + addr := imetadata.Set(resolver.Address{Addr: fmt.Sprintf("%s:%d", ipStr, s.Port)}, md) + if lb.logger.V(2) { + lb.logger.Infof("Server list entry:|%d|, ipStr:|%s|, port:|%d|, load balancer token:|%v|", i, ipStr, s.Port, s.LoadBalanceToken) + } + backendAddrs = append(backendAddrs, addr) + } + + // Call refreshSubConns to create/remove SubConns. If we are in fallback, + // this is also exiting fallback. + lb.refreshSubConns(backendAddrs, false, lb.usePickFirst) +} + +// refreshSubConns creates/removes SubConns with backendAddrs, and refreshes +// balancer state and picker. +// +// Caller must hold lb.mu. +func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback bool, pickFirst bool) { + opts := balancer.NewSubConnOptions{} + if !fallback { + opts.CredsBundle = lb.grpclbBackendCreds + } + + lb.backendAddrs = backendAddrs + lb.backendAddrsWithoutMetadata = nil + + fallbackModeChanged := lb.inFallback != fallback + lb.inFallback = fallback + if fallbackModeChanged && lb.inFallback { + // Clear previous received list when entering fallback, so if the server + // comes back and sends the same list again, the new addresses will be + // used. + lb.fullServerList = nil + } + + balancingPolicyChanged := lb.usePickFirst != pickFirst + lb.usePickFirst = pickFirst + + if fallbackModeChanged || balancingPolicyChanged { + // Remove all SubConns when switching balancing policy or switching + // fallback mode. + // + // For fallback mode switching with pickfirst, we want to recreate the + // SubConn because the creds could be different. + for a, sc := range lb.subConns { + sc.Shutdown() + delete(lb.subConns, a) + } + } + + if lb.usePickFirst { + var ( + scKey resolver.Address + sc balancer.SubConn + ) + for scKey, sc = range lb.subConns { + break + } + if sc != nil { + if len(backendAddrs) == 0 { + sc.Shutdown() + delete(lb.subConns, scKey) + return + } + lb.cc.ClientConn.UpdateAddresses(sc, backendAddrs) + sc.Connect() + return + } + opts.StateListener = func(scs balancer.SubConnState) { lb.updateSubConnState(sc, scs) } + // This bypasses the cc wrapper with SubConn cache. + sc, err := lb.cc.ClientConn.NewSubConn(backendAddrs, opts) + if err != nil { + lb.logger.Warningf("Failed to create new SubConn: %v", err) + return + } + sc.Connect() + lb.subConns[backendAddrs[0]] = sc + lb.scStates[sc] = connectivity.Idle + return + } + + // addrsSet is the set converted from backendAddrsWithoutMetadata, it's used to quick + // lookup for an address. + addrsSet := make(map[resolver.Address]struct{}) + // Create new SubConns. + for _, addr := range backendAddrs { + addrWithoutAttrs := addr + addrWithoutAttrs.Attributes = nil + addrsSet[addrWithoutAttrs] = struct{}{} + lb.backendAddrsWithoutMetadata = append(lb.backendAddrsWithoutMetadata, addrWithoutAttrs) + + if _, ok := lb.subConns[addrWithoutAttrs]; !ok { + // Use addrWithMD to create the SubConn. + var sc balancer.SubConn + opts.StateListener = func(scs balancer.SubConnState) { lb.updateSubConnState(sc, scs) } + sc, err := lb.cc.NewSubConn([]resolver.Address{addr}, opts) + if err != nil { + lb.logger.Warningf("Failed to create new SubConn: %v", err) + continue + } + lb.subConns[addrWithoutAttrs] = sc // Use the addr without MD as key for the map. + if _, ok := lb.scStates[sc]; !ok { + // Only set state of new sc to IDLE. The state could already be + // READY for cached SubConns. + lb.scStates[sc] = connectivity.Idle + } + sc.Connect() + } + } + + for a, sc := range lb.subConns { + // a was removed by resolver. + if _, ok := addrsSet[a]; !ok { + sc.Shutdown() + delete(lb.subConns, a) + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in UpdateSubConnState. + } + } + + // Regenerate and update picker after refreshing subconns because with + // cache, even if SubConn was newed/removed, there might be no state + // changes (the subconn will be kept in cache, not actually + // newed/removed). + lb.updateStateAndPicker(true, true) +} + +type remoteBalancerCCWrapper struct { + cc *grpc.ClientConn + lb *lbBalancer + backoff backoff.Strategy + done chan struct{} + + streamMu sync.Mutex + streamCancel func() + + // waitgroup to wait for all goroutines to exit. + wg sync.WaitGroup +} + +func (lb *lbBalancer) newRemoteBalancerCCWrapper() error { + var dopts []grpc.DialOption + if creds := lb.opt.DialCreds; creds != nil { + dopts = append(dopts, grpc.WithTransportCredentials(creds)) + } else if bundle := lb.grpclbClientConnCreds; bundle != nil { + dopts = append(dopts, grpc.WithCredentialsBundle(bundle)) + } else { + dopts = append(dopts, grpc.WithTransportCredentials(insecure.NewCredentials())) + } + if lb.opt.Dialer != nil { + dopts = append(dopts, grpc.WithContextDialer(lb.opt.Dialer)) + } + if lb.opt.CustomUserAgent != "" { + dopts = append(dopts, grpc.WithUserAgent(lb.opt.CustomUserAgent)) + } + // Explicitly set pickfirst as the balancer. + dopts = append(dopts, grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy":"pick_first"}`)) + dopts = append(dopts, grpc.WithResolvers(lb.manualResolver)) + dopts = append(dopts, grpc.WithChannelzParentID(lb.opt.ChannelzParent)) + + // Enable Keepalive for grpclb client. + dopts = append(dopts, grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: 20 * time.Second, + Timeout: 10 * time.Second, + PermitWithoutStream: true, + })) + + // The dial target is not important. + // + // The grpclb server addresses will set field ServerName, and creds will + // receive ServerName as authority. + target := lb.manualResolver.Scheme() + ":///grpclb.subClientConn" + cc, err := grpc.Dial(target, dopts...) + if err != nil { + return fmt.Errorf("grpc.Dial(%s): %v", target, err) + } + ccw := &remoteBalancerCCWrapper{ + cc: cc, + lb: lb, + backoff: lb.backoff, + done: make(chan struct{}), + } + lb.ccRemoteLB = ccw + ccw.wg.Add(1) + go ccw.watchRemoteBalancer() + return nil +} + +// close closed the ClientConn to remote balancer, and waits until all +// goroutines to finish. +func (ccw *remoteBalancerCCWrapper) close() { + close(ccw.done) + ccw.cc.Close() + ccw.wg.Wait() +} + +func (ccw *remoteBalancerCCWrapper) readServerList(s *balanceLoadClientStream) error { + for { + reply, err := s.Recv() + if err != nil { + if err == io.EOF { + return errServerTerminatedConnection + } + return fmt.Errorf("grpclb: failed to recv server list: %v", err) + } + if serverList := reply.GetServerList(); serverList != nil { + ccw.lb.processServerList(serverList) + } + if reply.GetFallbackResponse() != nil { + // Eagerly enter fallback + ccw.lb.mu.Lock() + ccw.lb.refreshSubConns(ccw.lb.resolvedBackendAddrs, true, ccw.lb.usePickFirst) + ccw.lb.mu.Unlock() + } + } +} + +func (ccw *remoteBalancerCCWrapper) sendLoadReport(s *balanceLoadClientStream, interval time.Duration) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + lastZero := false + for { + select { + case <-ticker.C: + case <-s.Context().Done(): + return + } + stats := ccw.lb.clientStats.toClientStats() + zero := isZeroStats(stats) + if zero && lastZero { + // Quash redundant empty load reports. + continue + } + lastZero = zero + t := time.Now() + stats.Timestamp = ×tamppb.Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } + if err := s.Send(&lbpb.LoadBalanceRequest{ + LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{ + ClientStats: stats, + }, + }); err != nil { + return + } + } +} + +func (ccw *remoteBalancerCCWrapper) callRemoteBalancer(ctx context.Context) (backoff bool, _ error) { + lbClient := &loadBalancerClient{cc: ccw.cc} + stream, err := lbClient.BalanceLoad(ctx, grpc.WaitForReady(true)) + if err != nil { + return true, fmt.Errorf("grpclb: failed to perform RPC to the remote balancer: %v", err) + } + ccw.lb.mu.Lock() + ccw.lb.remoteBalancerConnected = true + ccw.lb.mu.Unlock() + + // grpclb handshake on the stream. + initReq := &lbpb.LoadBalanceRequest{ + LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{ + InitialRequest: &lbpb.InitialLoadBalanceRequest{ + Name: ccw.lb.target, + }, + }, + } + if err := stream.Send(initReq); err != nil { + return true, fmt.Errorf("grpclb: failed to send init request: %v", err) + } + reply, err := stream.Recv() + if err != nil { + return true, fmt.Errorf("grpclb: failed to recv init response: %v", err) + } + initResp := reply.GetInitialResponse() + if initResp == nil { + return true, fmt.Errorf("grpclb: reply from remote balancer did not include initial response") + } + + ccw.wg.Add(1) + go func() { + defer ccw.wg.Done() + if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 { + ccw.sendLoadReport(stream, d) + } + }() + // No backoff if init req/resp handshake was successful. + return false, ccw.readServerList(stream) +} + +// cancelRemoteBalancerCall cancels the context used by the stream to the remote +// balancer. watchRemoteBalancer() takes care of restarting this call after the +// stream fails. +func (ccw *remoteBalancerCCWrapper) cancelRemoteBalancerCall() { + ccw.streamMu.Lock() + if ccw.streamCancel != nil { + ccw.streamCancel() + ccw.streamCancel = nil + } + ccw.streamMu.Unlock() +} + +func (ccw *remoteBalancerCCWrapper) watchRemoteBalancer() { + defer func() { + ccw.wg.Done() + ccw.streamMu.Lock() + if ccw.streamCancel != nil { + // This is to make sure that we don't leak the context when we are + // directly returning from inside of the below `for` loop. + ccw.streamCancel() + ccw.streamCancel = nil + } + ccw.streamMu.Unlock() + }() + + var retryCount int + var ctx context.Context + for { + ccw.streamMu.Lock() + if ccw.streamCancel != nil { + ccw.streamCancel() + ccw.streamCancel = nil + } + ctx, ccw.streamCancel = context.WithCancel(context.Background()) + ccw.streamMu.Unlock() + + doBackoff, err := ccw.callRemoteBalancer(ctx) + select { + case <-ccw.done: + return + default: + if err != nil { + if err == errServerTerminatedConnection { + ccw.lb.logger.Infof("Call to remote balancer failed: %v", err) + } else { + ccw.lb.logger.Warningf("Call to remote balancer failed: %v", err) + } + } + } + // Trigger a re-resolve when the stream errors. + ccw.lb.cc.ClientConn.ResolveNow(resolver.ResolveNowOptions{}) + + ccw.lb.mu.Lock() + ccw.lb.remoteBalancerConnected = false + ccw.lb.fullServerList = nil + // Enter fallback when connection to remote balancer is lost, and the + // aggregated state is not Ready. + if !ccw.lb.inFallback && ccw.lb.state != connectivity.Ready { + // Entering fallback. + ccw.lb.refreshSubConns(ccw.lb.resolvedBackendAddrs, true, ccw.lb.usePickFirst) + } + ccw.lb.mu.Unlock() + + if !doBackoff { + retryCount = 0 + continue + } + + timer := time.NewTimer(ccw.backoff.Backoff(retryCount)) // Copy backoff + select { + case <-timer.C: + case <-ccw.done: + timer.Stop() + return + } + retryCount++ + } +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go new file mode 100644 index 000000000..c0f762c0c --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go @@ -0,0 +1,173 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclb + +import ( + "fmt" + "sync" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" +) + +const subConnCacheTime = time.Second * 10 + +// lbCacheClientConn is a wrapper balancer.ClientConn with a SubConn cache. +// SubConns will be kept in cache for subConnCacheTime before being shut down. +// +// Its NewSubconn and SubConn.Shutdown methods are updated to do cache first. +type lbCacheClientConn struct { + balancer.ClientConn + + timeout time.Duration + + mu sync.Mutex + // subConnCache only keeps subConns that are being deleted. + subConnCache map[resolver.Address]*subConnCacheEntry + subConnToAddr map[balancer.SubConn]resolver.Address +} + +type subConnCacheEntry struct { + sc balancer.SubConn + + cancel func() + abortDeleting bool +} + +func newLBCacheClientConn(cc balancer.ClientConn) *lbCacheClientConn { + return &lbCacheClientConn{ + ClientConn: cc, + timeout: subConnCacheTime, + subConnCache: make(map[resolver.Address]*subConnCacheEntry), + subConnToAddr: make(map[balancer.SubConn]resolver.Address), + } +} + +func (ccc *lbCacheClientConn) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + if len(addrs) != 1 { + return nil, fmt.Errorf("grpclb calling NewSubConn with addrs of length %v", len(addrs)) + } + addrWithoutAttrs := addrs[0] + addrWithoutAttrs.Attributes = nil + + ccc.mu.Lock() + defer ccc.mu.Unlock() + if entry, ok := ccc.subConnCache[addrWithoutAttrs]; ok { + // If entry is in subConnCache, the SubConn was being deleted. + // cancel function will never be nil. + entry.cancel() + delete(ccc.subConnCache, addrWithoutAttrs) + return entry.sc, nil + } + + scNew, err := ccc.ClientConn.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + scNew = &lbCacheSubConn{SubConn: scNew, ccc: ccc} + + ccc.subConnToAddr[scNew] = addrWithoutAttrs + return scNew, nil +} + +func (ccc *lbCacheClientConn) RemoveSubConn(sc balancer.SubConn) { + logger.Errorf("RemoveSubConn(%v) called unexpectedly", sc) +} + +type lbCacheSubConn struct { + balancer.SubConn + ccc *lbCacheClientConn +} + +func (sc *lbCacheSubConn) Shutdown() { + ccc := sc.ccc + ccc.mu.Lock() + defer ccc.mu.Unlock() + addr, ok := ccc.subConnToAddr[sc] + if !ok { + return + } + + if entry, ok := ccc.subConnCache[addr]; ok { + if entry.sc != sc { + // This could happen if NewSubConn was called multiple times for + // the same address, and those SubConns are all shut down. We + // remove sc immediately here. + delete(ccc.subConnToAddr, sc) + sc.SubConn.Shutdown() + } + return + } + + entry := &subConnCacheEntry{ + sc: sc, + } + ccc.subConnCache[addr] = entry + + timer := time.AfterFunc(ccc.timeout, func() { + ccc.mu.Lock() + defer ccc.mu.Unlock() + if entry.abortDeleting { + return + } + sc.SubConn.Shutdown() + delete(ccc.subConnToAddr, sc) + delete(ccc.subConnCache, addr) + }) + entry.cancel = func() { + if !timer.Stop() { + // If stop was not successful, the timer has fired (this can only + // happen in a race). But the deleting function is blocked on ccc.mu + // because the mutex was held by the caller of this function. + // + // Set abortDeleting to true to abort the deleting function. When + // the lock is released, the deleting function will acquire the + // lock, check the value of abortDeleting and return. + entry.abortDeleting = true + } + } +} + +func (ccc *lbCacheClientConn) UpdateState(s balancer.State) { + s.Picker = &lbCachePicker{Picker: s.Picker} + ccc.ClientConn.UpdateState(s) +} + +func (ccc *lbCacheClientConn) close() { + ccc.mu.Lock() + defer ccc.mu.Unlock() + // Only cancel all existing timers. There's no need to shut down SubConns. + for _, entry := range ccc.subConnCache { + entry.cancel() + } +} + +type lbCachePicker struct { + balancer.Picker +} + +func (cp *lbCachePicker) Pick(i balancer.PickInfo) (balancer.PickResult, error) { + res, err := cp.Picker.Pick(i) + if err != nil { + return res, err + } + res.SubConn = res.SubConn.(*lbCacheSubConn).SubConn + return res, nil +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go new file mode 100644 index 000000000..4ecfa1c21 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package state declares grpclb types to be set by resolvers wishing to pass +// information to grpclb via resolver.State Attributes. +package state + +import ( + "google.golang.org/grpc/resolver" +) + +// keyType is the key to use for storing State in Attributes. +type keyType string + +const key = keyType("grpc.grpclb.state") + +// State contains gRPCLB-relevant data passed from the name resolver. +type State struct { + // BalancerAddresses contains the remote load balancer address(es). If + // set, overrides any resolver-provided addresses with Type of GRPCLB. + BalancerAddresses []resolver.Address +} + +// Set returns a copy of the provided state with attributes containing s. s's +// data should not be mutated after calling Set. +func Set(state resolver.State, s *State) resolver.State { + state.Attributes = state.Attributes.WithValue(key, s) + return state +} + +// Get returns the grpclb State in the resolver.State, or nil if not present. +// The returned data should not be mutated. +func Get(state resolver.State) *State { + s, _ := state.Attributes.Value(key).(*State) + return s +} diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go new file mode 100644 index 000000000..4d69b4052 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -0,0 +1,283 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pickfirst contains the pick_first load balancing policy. +package pickfirst + +import ( + "encoding/json" + "errors" + "fmt" + "math/rand" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +func init() { + balancer.Register(pickfirstBuilder{}) + internal.ShuffleAddressListForTesting = func(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } +} + +var logger = grpclog.Component("pick-first-lb") + +const ( + // Name is the name of the pick_first balancer. + Name = "pick_first" + logPrefix = "[pick-first-lb %p] " +) + +type pickfirstBuilder struct{} + +func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { + b := &pickfirstBalancer{cc: cc} + b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) + return b +} + +func (pickfirstBuilder) Name() string { + return Name +} + +type pfConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // If set to true, instructs the LB policy to shuffle the order of the list + // of endpoints received from the name resolver before attempting to + // connect to them. + ShuffleAddressList bool `json:"shuffleAddressList"` +} + +func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + +type pickfirstBalancer struct { + logger *internalgrpclog.PrefixLogger + state connectivity.State + cc balancer.ClientConn + subConn balancer.SubConn +} + +func (b *pickfirstBalancer) ResolverError(err error) { + if b.logger.V(2) { + b.logger.Infof("Received error from the name resolver: %v", err) + } + if b.subConn == nil { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) +} + +type Shuffler interface { + ShuffleAddressListForTesting(n int, swap func(i, j int)) +} + +func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } + +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { + // The resolver reported an empty address list. Treat it like an error by + // calling b.ResolverError. + if b.subConn != nil { + // Shut down the old subConn. All addresses were removed, so it is + // no longer valid. + b.subConn.Shutdown() + b.subConn = nil + } + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + // We don't have to guard this block with the env var because ParseConfig + // already does so. + cfg, ok := state.BalancerConfig.(pfConfig) + if state.BalancerConfig != nil && !ok { + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) + } + + if b.logger.V(2) { + b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) + } + + var addrs []resolver.Address + if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 { + // Perform the optional shuffling described in gRFC A62. The shuffling will + // change the order of endpoints but not touch the order of the addresses + // within each endpoint. - A61 + if cfg.ShuffleAddressList { + endpoints = append([]resolver.Endpoint{}, endpoints...) + internal.ShuffleAddressListForTesting.(func(int, func(int, int)))(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + } + + // "Flatten the list by concatenating the ordered list of addresses for each + // of the endpoints, in order." - A61 + for _, endpoint := range endpoints { + // "In the flattened list, interleave addresses from the two address + // families, as per RFC-8304 section 4." - A61 + // TODO: support the above language. + addrs = append(addrs, endpoint.Addresses...) + } + } else { + // Endpoints not set, process addresses until we migrate resolver + // emissions fully to Endpoints. The top channel does wrap emitted + // addresses with endpoints, however some balancers such as weighted + // target do not forward the corresponding correct endpoints down/split + // endpoints properly. Once all balancers correctly forward endpoints + // down, can delete this else conditional. + addrs = state.ResolverState.Addresses + if cfg.ShuffleAddressList { + addrs = append([]resolver.Address{}, addrs...) + rand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + } + } + + if b.subConn != nil { + b.cc.UpdateAddresses(b.subConn, addrs) + return nil + } + + var subConn balancer.SubConn + subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(subConn, state) + }, + }) + if err != nil { + if b.logger.V(2) { + b.logger.Infof("Failed to create new SubConn: %v", err) + } + b.state = connectivity.TransientFailure + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + }) + return balancer.ErrBadResolverState + } + b.subConn = subConn + b.state = connectivity.Idle + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + b.subConn.Connect() + return nil +} + +// UpdateSubConnState is unused as a StateListener is always registered when +// creating SubConns. +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) +} + +func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + if b.logger.V(2) { + b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state) + } + if b.subConn != subConn { + if b.logger.V(2) { + b.logger.Infof("Ignored state change because subConn is not recognized") + } + return + } + if state.ConnectivityState == connectivity.Shutdown { + b.subConn = nil + return + } + + switch state.ConnectivityState { + case connectivity.Ready: + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, + }) + case connectivity.Connecting: + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. See A62. + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + case connectivity.Idle: + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. Also kick the + // subConn out of Idle into Connecting. See A62. + b.subConn.Connect() + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &idlePicker{subConn: subConn}, + }) + case connectivity.TransientFailure: + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: state.ConnectionError}, + }) + } + b.state = state.ConnectivityState +} + +func (b *pickfirstBalancer) Close() { +} + +func (b *pickfirstBalancer) ExitIdle() { + if b.subConn != nil && b.state == connectivity.Idle { + b.subConn.Connect() + } +} + +type picker struct { + result balancer.PickResult + err error +} + +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + return p.result, p.err +} + +// idlePicker is used when the SubConn is IDLE and kicks the SubConn into +// CONNECTING when Pick is called. +type idlePicker struct { + subConn balancer.SubConn +} + +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + i.subConn.Connect() + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable +} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 29f7a4ddd..260255d31 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,22 +22,22 @@ package roundrobin import ( - "context" - "sync" + "math/rand" + "sync/atomic" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/grpcrand" - "google.golang.org/grpc/resolver" ) // Name is the name of round_robin balancer. const Name = "round_robin" +var logger = grpclog.Component("roundrobin") + // newBuilder creates a new roundrobin balancer builder. func newBuilder() balancer.Builder { - return base.NewBalancerBuilderWithConfig(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) + return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) } func init() { @@ -46,13 +46,13 @@ func init() { type rrPickerBuilder struct{} -func (*rrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker { - grpclog.Infof("roundrobinPicker: newPicker called with readySCs: %v", readySCs) - if len(readySCs) == 0 { +func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { + logger.Infof("roundrobinPicker: Build called with info: %v", info) + if len(info.ReadySCs) == 0 { return base.NewErrPicker(balancer.ErrNoSubConnAvailable) } - var scs []balancer.SubConn - for _, sc := range readySCs { + scs := make([]balancer.SubConn, 0, len(info.ReadySCs)) + for sc := range info.ReadySCs { scs = append(scs, sc) } return &rrPicker{ @@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) ba // Start at a random index, as the same RR balancer rebuilds a new // picker when SubConn states change, and we don't want to apply excess // load to the first server in the list. - next: grpcrand.Intn(len(scs)), + next: uint32(rand.Intn(len(scs))), } } @@ -69,15 +69,13 @@ type rrPicker struct { // created. The slice is immutable. Each Get() will do a round robin // selection from it and return the selected SubConn. subConns []balancer.SubConn - - mu sync.Mutex - next int + next uint32 } -func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { - p.mu.Lock() - sc := p.subConns[p.next] - p.next = (p.next + 1) % len(p.subConns) - p.mu.Unlock() - return sc, nil, nil +func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + subConnsLen := uint32(len(p.subConns)) + nextIndex := atomic.AddUint32(&p.next, 1) + + sc := p.subConns[nextIndex%subConnsLen] + return balancer.PickResult{SubConn: sc}, nil } diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go deleted file mode 100644 index 8df4095ca..000000000 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ /dev/null @@ -1,318 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "fmt" - "sync" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/resolver" -) - -// scStateUpdate contains the subConn and the new state it changed to. -type scStateUpdate struct { - sc balancer.SubConn - state connectivity.State -} - -// scStateUpdateBuffer is an unbounded channel for scStateChangeTuple. -// TODO make a general purpose buffer that uses interface{}. -type scStateUpdateBuffer struct { - c chan *scStateUpdate - mu sync.Mutex - backlog []*scStateUpdate -} - -func newSCStateUpdateBuffer() *scStateUpdateBuffer { - return &scStateUpdateBuffer{ - c: make(chan *scStateUpdate, 1), - } -} - -func (b *scStateUpdateBuffer) put(t *scStateUpdate) { - b.mu.Lock() - defer b.mu.Unlock() - if len(b.backlog) == 0 { - select { - case b.c <- t: - return - default: - } - } - b.backlog = append(b.backlog, t) -} - -func (b *scStateUpdateBuffer) load() { - b.mu.Lock() - defer b.mu.Unlock() - if len(b.backlog) > 0 { - select { - case b.c <- b.backlog[0]: - b.backlog[0] = nil - b.backlog = b.backlog[1:] - default: - } - } -} - -// get returns the channel that the scStateUpdate will be sent to. -// -// Upon receiving, the caller should call load to send another -// scStateChangeTuple onto the channel if there is any. -func (b *scStateUpdateBuffer) get() <-chan *scStateUpdate { - return b.c -} - -// ccBalancerWrapper is a wrapper on top of cc for balancers. -// It implements balancer.ClientConn interface. -type ccBalancerWrapper struct { - cc *ClientConn - balancer balancer.Balancer - stateChangeQueue *scStateUpdateBuffer - ccUpdateCh chan *balancer.ClientConnState - done chan struct{} - - mu sync.Mutex - subConns map[*acBalancerWrapper]struct{} -} - -func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { - ccb := &ccBalancerWrapper{ - cc: cc, - stateChangeQueue: newSCStateUpdateBuffer(), - ccUpdateCh: make(chan *balancer.ClientConnState, 1), - done: make(chan struct{}), - subConns: make(map[*acBalancerWrapper]struct{}), - } - go ccb.watcher() - ccb.balancer = b.Build(ccb, bopts) - return ccb -} - -// watcher balancer functions sequentially, so the balancer can be implemented -// lock-free. -func (ccb *ccBalancerWrapper) watcher() { - for { - select { - case t := <-ccb.stateChangeQueue.get(): - ccb.stateChangeQueue.load() - select { - case <-ccb.done: - ccb.balancer.Close() - return - default: - } - if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { - ub.UpdateSubConnState(t.sc, balancer.SubConnState{ConnectivityState: t.state}) - } else { - ccb.balancer.HandleSubConnStateChange(t.sc, t.state) - } - case s := <-ccb.ccUpdateCh: - select { - case <-ccb.done: - ccb.balancer.Close() - return - default: - } - if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { - ub.UpdateClientConnState(*s) - } else { - ccb.balancer.HandleResolvedAddrs(s.ResolverState.Addresses, nil) - } - case <-ccb.done: - } - - select { - case <-ccb.done: - ccb.balancer.Close() - ccb.mu.Lock() - scs := ccb.subConns - ccb.subConns = nil - ccb.mu.Unlock() - for acbw := range scs { - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) - } - ccb.UpdateBalancerState(connectivity.Connecting, nil) - return - default: - } - ccb.cc.firstResolveEvent.Fire() - } -} - -func (ccb *ccBalancerWrapper) close() { - close(ccb.done) -} - -func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - // When updating addresses for a SubConn, if the address in use is not in - // the new addresses, the old ac will be tearDown() and a new ac will be - // created. tearDown() generates a state change with Shutdown state, we - // don't want the balancer to receive this state change. So before - // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and - // this function will be called with (nil, Shutdown). We don't need to call - // balancer method in this case. - if sc == nil { - return - } - ccb.stateChangeQueue.put(&scStateUpdate{ - sc: sc, - state: s, - }) -} - -func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) { - if ccb.cc.curBalancerName != grpclbName { - // Filter any grpclb addresses since we don't have the grpclb balancer. - s := &ccs.ResolverState - for i := 0; i < len(s.Addresses); { - if s.Addresses[i].Type == resolver.GRPCLB { - copy(s.Addresses[i:], s.Addresses[i+1:]) - s.Addresses = s.Addresses[:len(s.Addresses)-1] - continue - } - i++ - } - } - select { - case <-ccb.ccUpdateCh: - default: - } - ccb.ccUpdateCh <- ccs -} - -func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - if len(addrs) <= 0 { - return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") - } - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") - } - ac, err := ccb.cc.newAddrConn(addrs, opts) - if err != nil { - return nil, err - } - acbw := &acBalancerWrapper{ac: ac} - acbw.ac.mu.Lock() - ac.acbw = acbw - acbw.ac.mu.Unlock() - ccb.subConns[acbw] = struct{}{} - return acbw, nil -} - -func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - acbw, ok := sc.(*acBalancerWrapper) - if !ok { - return - } - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return - } - delete(ccb.subConns, acbw) - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) -} - -func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) { - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return - } - // Update picker before updating state. Even though the ordering here does - // not matter, it can lead to multiple calls of Pick in the common start-up - // case where we wait for ready and then perform an RPC. If the picker is - // updated later, we could call the "connecting" picker when the state is - // updated, and then call the "ready" picker after the picker gets updated. - ccb.cc.blockingpicker.updatePicker(p) - ccb.cc.csMgr.updateState(s) -} - -func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOption) { - ccb.cc.resolveNow(o) -} - -func (ccb *ccBalancerWrapper) Target() string { - return ccb.cc.target -} - -// acBalancerWrapper is a wrapper on top of ac for balancers. -// It implements balancer.SubConn interface. -type acBalancerWrapper struct { - mu sync.Mutex - ac *addrConn -} - -func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { - acbw.mu.Lock() - defer acbw.mu.Unlock() - if len(addrs) <= 0 { - acbw.ac.tearDown(errConnDrain) - return - } - if !acbw.ac.tryUpdateAddrs(addrs) { - cc := acbw.ac.cc - opts := acbw.ac.scopts - acbw.ac.mu.Lock() - // Set old ac.acbw to nil so the Shutdown state update will be ignored - // by balancer. - // - // TODO(bar) the state transition could be wrong when tearDown() old ac - // and creating new ac, fix the transition. - acbw.ac.acbw = nil - acbw.ac.mu.Unlock() - acState := acbw.ac.getState() - acbw.ac.tearDown(errConnDrain) - - if acState == connectivity.Shutdown { - return - } - - ac, err := cc.newAddrConn(addrs, opts) - if err != nil { - grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) - return - } - acbw.ac = ac - ac.mu.Lock() - ac.acbw = acbw - ac.mu.Unlock() - if acState != connectivity.Idle { - ac.connect() - } - } -} - -func (acbw *acBalancerWrapper) Connect() { - acbw.mu.Lock() - defer acbw.mu.Unlock() - acbw.ac.connect() -} - -func (acbw *acBalancerWrapper) getAddrConn() *addrConn { - acbw.mu.Lock() - defer acbw.mu.Unlock() - return acbw.ac -} diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go deleted file mode 100644 index 66e9a44ac..000000000 --- a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go +++ /dev/null @@ -1,334 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "sync" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/resolver" -) - -type balancerWrapperBuilder struct { - b Balancer // The v1 balancer. -} - -func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - bwb.b.Start(opts.Target.Endpoint, BalancerConfig{ - DialCreds: opts.DialCreds, - Dialer: opts.Dialer, - }) - _, pickfirst := bwb.b.(*pickFirst) - bw := &balancerWrapper{ - balancer: bwb.b, - pickfirst: pickfirst, - cc: cc, - targetAddr: opts.Target.Endpoint, - startCh: make(chan struct{}), - conns: make(map[resolver.Address]balancer.SubConn), - connSt: make(map[balancer.SubConn]*scState), - csEvltr: &balancer.ConnectivityStateEvaluator{}, - state: connectivity.Idle, - } - cc.UpdateBalancerState(connectivity.Idle, bw) - go bw.lbWatcher() - return bw -} - -func (bwb *balancerWrapperBuilder) Name() string { - return "wrapper" -} - -type scState struct { - addr Address // The v1 address type. - s connectivity.State - down func(error) -} - -type balancerWrapper struct { - balancer Balancer // The v1 balancer. - pickfirst bool - - cc balancer.ClientConn - targetAddr string // Target without the scheme. - - mu sync.Mutex - conns map[resolver.Address]balancer.SubConn - connSt map[balancer.SubConn]*scState - // This channel is closed when handling the first resolver result. - // lbWatcher blocks until this is closed, to avoid race between - // - NewSubConn is created, cc wants to notify balancer of state changes; - // - Build hasn't return, cc doesn't have access to balancer. - startCh chan struct{} - - // To aggregate the connectivity state. - csEvltr *balancer.ConnectivityStateEvaluator - state connectivity.State -} - -// lbWatcher watches the Notify channel of the balancer and manages -// connections accordingly. -func (bw *balancerWrapper) lbWatcher() { - <-bw.startCh - notifyCh := bw.balancer.Notify() - if notifyCh == nil { - // There's no resolver in the balancer. Connect directly. - a := resolver.Address{ - Addr: bw.targetAddr, - Type: resolver.Backend, - } - sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) - if err != nil { - grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) - } else { - bw.mu.Lock() - bw.conns[a] = sc - bw.connSt[sc] = &scState{ - addr: Address{Addr: bw.targetAddr}, - s: connectivity.Idle, - } - bw.mu.Unlock() - sc.Connect() - } - return - } - - for addrs := range notifyCh { - grpclog.Infof("balancerWrapper: got update addr from Notify: %v", addrs) - if bw.pickfirst { - var ( - oldA resolver.Address - oldSC balancer.SubConn - ) - bw.mu.Lock() - for oldA, oldSC = range bw.conns { - break - } - bw.mu.Unlock() - if len(addrs) <= 0 { - if oldSC != nil { - // Teardown old sc. - bw.mu.Lock() - delete(bw.conns, oldA) - delete(bw.connSt, oldSC) - bw.mu.Unlock() - bw.cc.RemoveSubConn(oldSC) - } - continue - } - - var newAddrs []resolver.Address - for _, a := range addrs { - newAddr := resolver.Address{ - Addr: a.Addr, - Type: resolver.Backend, // All addresses from balancer are all backends. - ServerName: "", - Metadata: a.Metadata, - } - newAddrs = append(newAddrs, newAddr) - } - if oldSC == nil { - // Create new sc. - sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{}) - if err != nil { - grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err) - } else { - bw.mu.Lock() - // For pickfirst, there should be only one SubConn, so the - // address doesn't matter. All states updating (up and down) - // and picking should all happen on that only SubConn. - bw.conns[resolver.Address{}] = sc - bw.connSt[sc] = &scState{ - addr: addrs[0], // Use the first address. - s: connectivity.Idle, - } - bw.mu.Unlock() - sc.Connect() - } - } else { - bw.mu.Lock() - bw.connSt[oldSC].addr = addrs[0] - bw.mu.Unlock() - oldSC.UpdateAddresses(newAddrs) - } - } else { - var ( - add []resolver.Address // Addresses need to setup connections. - del []balancer.SubConn // Connections need to tear down. - ) - resAddrs := make(map[resolver.Address]Address) - for _, a := range addrs { - resAddrs[resolver.Address{ - Addr: a.Addr, - Type: resolver.Backend, // All addresses from balancer are all backends. - ServerName: "", - Metadata: a.Metadata, - }] = a - } - bw.mu.Lock() - for a := range resAddrs { - if _, ok := bw.conns[a]; !ok { - add = append(add, a) - } - } - for a, c := range bw.conns { - if _, ok := resAddrs[a]; !ok { - del = append(del, c) - delete(bw.conns, a) - // Keep the state of this sc in bw.connSt until its state becomes Shutdown. - } - } - bw.mu.Unlock() - for _, a := range add { - sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) - if err != nil { - grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) - } else { - bw.mu.Lock() - bw.conns[a] = sc - bw.connSt[sc] = &scState{ - addr: resAddrs[a], - s: connectivity.Idle, - } - bw.mu.Unlock() - sc.Connect() - } - } - for _, c := range del { - bw.cc.RemoveSubConn(c) - } - } - } -} - -func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - bw.mu.Lock() - defer bw.mu.Unlock() - scSt, ok := bw.connSt[sc] - if !ok { - return - } - if s == connectivity.Idle { - sc.Connect() - } - oldS := scSt.s - scSt.s = s - if oldS != connectivity.Ready && s == connectivity.Ready { - scSt.down = bw.balancer.Up(scSt.addr) - } else if oldS == connectivity.Ready && s != connectivity.Ready { - if scSt.down != nil { - scSt.down(errConnClosing) - } - } - sa := bw.csEvltr.RecordTransition(oldS, s) - if bw.state != sa { - bw.state = sa - } - bw.cc.UpdateBalancerState(bw.state, bw) - if s == connectivity.Shutdown { - // Remove state for this sc. - delete(bw.connSt, sc) - } -} - -func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) { - bw.mu.Lock() - defer bw.mu.Unlock() - select { - case <-bw.startCh: - default: - close(bw.startCh) - } - // There should be a resolver inside the balancer. - // All updates here, if any, are ignored. -} - -func (bw *balancerWrapper) Close() { - bw.mu.Lock() - defer bw.mu.Unlock() - select { - case <-bw.startCh: - default: - close(bw.startCh) - } - bw.balancer.Close() -} - -// The picker is the balancerWrapper itself. -// It either blocks or returns error, consistent with v1 balancer Get(). -func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (sc balancer.SubConn, done func(balancer.DoneInfo), err error) { - failfast := true // Default failfast is true. - if ss, ok := rpcInfoFromContext(ctx); ok { - failfast = ss.failfast - } - a, p, err := bw.balancer.Get(ctx, BalancerGetOptions{BlockingWait: !failfast}) - if err != nil { - return nil, nil, err - } - if p != nil { - done = func(balancer.DoneInfo) { p() } - defer func() { - if err != nil { - p() - } - }() - } - - bw.mu.Lock() - defer bw.mu.Unlock() - if bw.pickfirst { - // Get the first sc in conns. - for _, sc := range bw.conns { - return sc, done, nil - } - return nil, nil, balancer.ErrNoSubConnAvailable - } - sc, ok1 := bw.conns[resolver.Address{ - Addr: a.Addr, - Type: resolver.Backend, - ServerName: "", - Metadata: a.Metadata, - }] - s, ok2 := bw.connSt[sc] - if !ok1 || !ok2 { - // This can only happen due to a race where Get() returned an address - // that was subsequently removed by Notify. In this case we should - // retry always. - return nil, nil, balancer.ErrNoSubConnAvailable - } - switch s.s { - case connectivity.Ready, connectivity.Idle: - return sc, done, nil - case connectivity.Shutdown, connectivity.TransientFailure: - // If the returned sc has been shut down or is in transient failure, - // return error, and this RPC will fail or wait for another picker (if - // non-failfast). - return nil, nil, balancer.ErrTransientFailure - default: - // For other states (connecting or unknown), the v1 balancer would - // traditionally wait until ready and then issue the RPC. Returning - // ErrNoSubConnAvailable will be a slight improvement in that it will - // allow the balancer to choose another address in case others are - // connected. - return nil, nil, balancer.ErrNoSubConnAvailable - } -} diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go new file mode 100644 index 000000000..8ad6ce2f0 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -0,0 +1,365 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancer/gracefulswitch" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/resolver" +) + +var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) + +// ccBalancerWrapper sits between the ClientConn and the Balancer. +// +// ccBalancerWrapper implements methods corresponding to the ones on the +// balancer.Balancer interface. The ClientConn is free to call these methods +// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn +// to the Balancer happen in order by performing them in the serializer, without +// any mutexes held. +// +// ccBalancerWrapper also implements the balancer.ClientConn interface and is +// passed to the Balancer implementations. It invokes unexported methods on the +// ClientConn to handle these calls from the Balancer. +// +// It uses the gracefulswitch.Balancer internally to ensure that balancer +// switches happen in a graceful manner. +type ccBalancerWrapper struct { + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + opts balancer.BuildOptions + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + + // The following fields are only accessed within the serializer or during + // initialization. + curBalancerName string + balancer *gracefulswitch.Balancer + + // The following field is protected by mu. Caller must take cc.mu before + // taking mu. + mu sync.Mutex + closed bool +} + +// newCCBalancerWrapper creates a new balancer wrapper in idle state. The +// underlying balancer is not created until the updateClientConnState() method +// is invoked. +func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { + ctx, cancel := context.WithCancel(cc.ctx) + ccb := &ccBalancerWrapper{ + cc: cc, + opts: balancer.BuildOptions{ + DialCreds: cc.dopts.copts.TransportCredentials, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParent: cc.channelz, + Target: cc.parsedTarget, + MetricsRecorder: cc.metricsRecorderList, + }, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } + ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) + return ccb +} + +// updateClientConnState is invoked by grpc to push a ClientConnState update to +// the underlying balancer. This is always executed from the serializer, so +// it is safe to call into the balancer here. +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + errCh := make(chan error) + uccs := func(ctx context.Context) { + defer close(errCh) + if ctx.Err() != nil || ccb.balancer == nil { + return + } + name := gracefulswitch.ChildName(ccs.BalancerConfig) + if ccb.curBalancerName != name { + ccb.curBalancerName = name + channelz.Infof(logger, ccb.cc.channelz, "Channel switches to new LB policy %q", name) + } + err := ccb.balancer.UpdateClientConnState(*ccs) + if logger.V(2) && err != nil { + logger.Infof("error from balancer.UpdateClientConnState: %v", err) + } + errCh <- err + } + onFailure := func() { close(errCh) } + + // UpdateClientConnState can race with Close, and when the latter wins, the + // serializer is closed, and the attempt to schedule the callback will fail. + // It is acceptable to ignore this failure. But since we want to handle the + // state update in a blocking fashion (when we successfully schedule the + // callback), we have to use the ScheduleOr method and not the MaybeSchedule + // method on the serializer. + ccb.serializer.ScheduleOr(uccs, onFailure) + return <-errCh +} + +// resolverError is invoked by grpc to push a resolver error to the underlying +// balancer. The call to the balancer is executed from the serializer. +func (ccb *ccBalancerWrapper) resolverError(err error) { + ccb.serializer.TrySchedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } + ccb.balancer.ResolverError(err) + }) +} + +// close initiates async shutdown of the wrapper. cc.mu must be held when +// calling this function. To determine the wrapper has finished shutting down, +// the channel should block on ccb.serializer.Done() without cc.mu held. +func (ccb *ccBalancerWrapper) close() { + ccb.mu.Lock() + ccb.closed = true + ccb.mu.Unlock() + channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing") + ccb.serializer.TrySchedule(func(context.Context) { + if ccb.balancer == nil { + return + } + ccb.balancer.Close() + ccb.balancer = nil + }) + ccb.serializerCancel() +} + +// exitIdle invokes the balancer's exitIdle method in the serializer. +func (ccb *ccBalancerWrapper) exitIdle() { + ccb.serializer.TrySchedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } + ccb.balancer.ExitIdle() + }) +} + +func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + ccb.cc.mu.Lock() + defer ccb.cc.mu.Unlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() + return nil, fmt.Errorf("balancer is being closed; no new SubConns allowed") + } + ccb.mu.Unlock() + + if len(addrs) == 0 { + return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") + } + ac, err := ccb.cc.newAddrConnLocked(addrs, opts) + if err != nil { + channelz.Warningf(logger, ccb.cc.channelz, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) + return nil, err + } + acbw := &acBalancerWrapper{ + ccb: ccb, + ac: ac, + producers: make(map[balancer.ProducerBuilder]*refCountedProducer), + stateListener: opts.StateListener, + } + ac.acbw = acbw + return acbw, nil +} + +func (ccb *ccBalancerWrapper) RemoveSubConn(balancer.SubConn) { + // The graceful switch balancer will never call this. + logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") +} + +func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + acbw.UpdateAddresses(addrs) +} + +func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { + ccb.cc.mu.Lock() + defer ccb.cc.mu.Unlock() + if ccb.cc.conns == nil { + // The CC has been closed; ignore this update. + return + } + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() + return + } + ccb.mu.Unlock() + // Update picker before updating state. Even though the ordering here does + // not matter, it can lead to multiple calls of Pick in the common start-up + // case where we wait for ready and then perform an RPC. If the picker is + // updated later, we could call the "connecting" picker when the state is + // updated, and then call the "ready" picker after the picker gets updated. + + // Note that there is no need to check if the balancer wrapper was closed, + // as we know the graceful switch LB policy will not call cc if it has been + // closed. + ccb.cc.pickerWrapper.updatePicker(s.Picker) + ccb.cc.csMgr.updateState(s.ConnectivityState) +} + +func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { + ccb.cc.mu.RLock() + defer ccb.cc.mu.RUnlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() + return + } + ccb.mu.Unlock() + ccb.cc.resolveNowLocked(o) +} + +func (ccb *ccBalancerWrapper) Target() string { + return ccb.cc.target +} + +// acBalancerWrapper is a wrapper on top of ac for balancers. +// It implements balancer.SubConn interface. +type acBalancerWrapper struct { + ac *addrConn // read-only + ccb *ccBalancerWrapper // read-only + stateListener func(balancer.SubConnState) + + mu sync.Mutex + producers map[balancer.ProducerBuilder]*refCountedProducer +} + +// updateState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolver.Address, err error) { + acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { + if ctx.Err() != nil || acbw.ccb.balancer == nil { + return + } + // Even though it is optional for balancers, gracefulswitch ensures + // opts.StateListener is set, so this cannot ever be nil. + // TODO: delete this comment when UpdateSubConnState is removed. + scs := balancer.SubConnState{ConnectivityState: s, ConnectionError: err} + if s == connectivity.Ready { + setConnectedAddress(&scs, curAddr) + } + acbw.stateListener(scs) + acbw.ac.mu.Lock() + defer acbw.ac.mu.Unlock() + if s == connectivity.Ready { + // When changing states to READY, reset stateReadyChan. Wait until + // after we notify the LB policy's listener(s) in order to prevent + // ac.getTransport() from unblocking before the LB policy starts + // tracking the subchannel as READY. + close(acbw.ac.stateReadyChan) + acbw.ac.stateReadyChan = make(chan struct{}) + } + }) +} + +func (acbw *acBalancerWrapper) String() string { + return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelz.ID) +} + +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + acbw.ac.updateAddrs(addrs) +} + +func (acbw *acBalancerWrapper) Connect() { + go acbw.ac.connect() +} + +func (acbw *acBalancerWrapper) Shutdown() { + acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain) +} + +// NewStream begins a streaming RPC on the addrConn. If the addrConn is not +// ready, blocks until it is or ctx expires. Returns an error when the context +// expires or the addrConn is shut down. +func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + transport, err := acbw.ac.getTransport(ctx) + if err != nil { + return nil, err + } + return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) +} + +// Invoke performs a unary RPC. If the addrConn is not ready, returns +// errSubConnNotReady. +func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error { + cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) + if err != nil { + return err + } + if err := cs.SendMsg(args); err != nil { + return err + } + return cs.RecvMsg(reply) +} + +type refCountedProducer struct { + producer balancer.Producer + refs int // number of current refs to the producer + close func() // underlying producer's close function +} + +func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { + acbw.mu.Lock() + defer acbw.mu.Unlock() + + // Look up existing producer from this builder. + pData := acbw.producers[pb] + if pData == nil { + // Not found; create a new one and add it to the producers map. + p, closeFn := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: closeFn} + acbw.producers[pb] = pData + } + // Account for this new reference. + pData.refs++ + + // Return a cleanup function wrapped in a OnceFunc to remove this reference + // and delete the refCountedProducer from the map if the total reference + // count goes to zero. + unref := func() { + acbw.mu.Lock() + pData.refs-- + if pData.refs == 0 { + defer pData.close() // Run outside the acbw mutex + delete(acbw.producers, pb) + } + acbw.mu.Unlock() + } + return pData.producer, grpcsync.OnceFunc(unref) +} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index f393bb661..55bffaa77 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -1,24 +1,44 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: grpc/binarylog/grpc_binarylog_v1/binarylog.proto - -package grpc_binarylog_v1 // import "google.golang.org/grpc/binarylog/grpc_binarylog_v1" +// Copyright 2018 The gRPC Authors +// All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import duration "github.com/golang/protobuf/ptypes/duration" -import timestamp "github.com/golang/protobuf/ptypes/timestamp" +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/binlog/v1/binarylog.proto -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v5.27.1 +// source: grpc/binlog/v1/binarylog.proto + +package grpc_binarylog_v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) // Enumerates the type of event // Note the terminology is different from the RPC semantics @@ -54,32 +74,55 @@ const ( GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7 ) -var GrpcLogEntry_EventType_name = map[int32]string{ - 0: "EVENT_TYPE_UNKNOWN", - 1: "EVENT_TYPE_CLIENT_HEADER", - 2: "EVENT_TYPE_SERVER_HEADER", - 3: "EVENT_TYPE_CLIENT_MESSAGE", - 4: "EVENT_TYPE_SERVER_MESSAGE", - 5: "EVENT_TYPE_CLIENT_HALF_CLOSE", - 6: "EVENT_TYPE_SERVER_TRAILER", - 7: "EVENT_TYPE_CANCEL", -} -var GrpcLogEntry_EventType_value = map[string]int32{ - "EVENT_TYPE_UNKNOWN": 0, - "EVENT_TYPE_CLIENT_HEADER": 1, - "EVENT_TYPE_SERVER_HEADER": 2, - "EVENT_TYPE_CLIENT_MESSAGE": 3, - "EVENT_TYPE_SERVER_MESSAGE": 4, - "EVENT_TYPE_CLIENT_HALF_CLOSE": 5, - "EVENT_TYPE_SERVER_TRAILER": 6, - "EVENT_TYPE_CANCEL": 7, +// Enum value maps for GrpcLogEntry_EventType. +var ( + GrpcLogEntry_EventType_name = map[int32]string{ + 0: "EVENT_TYPE_UNKNOWN", + 1: "EVENT_TYPE_CLIENT_HEADER", + 2: "EVENT_TYPE_SERVER_HEADER", + 3: "EVENT_TYPE_CLIENT_MESSAGE", + 4: "EVENT_TYPE_SERVER_MESSAGE", + 5: "EVENT_TYPE_CLIENT_HALF_CLOSE", + 6: "EVENT_TYPE_SERVER_TRAILER", + 7: "EVENT_TYPE_CANCEL", + } + GrpcLogEntry_EventType_value = map[string]int32{ + "EVENT_TYPE_UNKNOWN": 0, + "EVENT_TYPE_CLIENT_HEADER": 1, + "EVENT_TYPE_SERVER_HEADER": 2, + "EVENT_TYPE_CLIENT_MESSAGE": 3, + "EVENT_TYPE_SERVER_MESSAGE": 4, + "EVENT_TYPE_CLIENT_HALF_CLOSE": 5, + "EVENT_TYPE_SERVER_TRAILER": 6, + "EVENT_TYPE_CANCEL": 7, + } +) + +func (x GrpcLogEntry_EventType) Enum() *GrpcLogEntry_EventType { + p := new(GrpcLogEntry_EventType) + *p = x + return p } func (x GrpcLogEntry_EventType) String() string { - return proto.EnumName(GrpcLogEntry_EventType_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GrpcLogEntry_EventType) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[0].Descriptor() +} + +func (GrpcLogEntry_EventType) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[0] +} + +func (x GrpcLogEntry_EventType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) } + +// Deprecated: Use GrpcLogEntry_EventType.Descriptor instead. func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 0} + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 0} } // Enumerates the entity that generates the log entry @@ -91,22 +134,45 @@ const ( GrpcLogEntry_LOGGER_SERVER GrpcLogEntry_Logger = 2 ) -var GrpcLogEntry_Logger_name = map[int32]string{ - 0: "LOGGER_UNKNOWN", - 1: "LOGGER_CLIENT", - 2: "LOGGER_SERVER", -} -var GrpcLogEntry_Logger_value = map[string]int32{ - "LOGGER_UNKNOWN": 0, - "LOGGER_CLIENT": 1, - "LOGGER_SERVER": 2, +// Enum value maps for GrpcLogEntry_Logger. +var ( + GrpcLogEntry_Logger_name = map[int32]string{ + 0: "LOGGER_UNKNOWN", + 1: "LOGGER_CLIENT", + 2: "LOGGER_SERVER", + } + GrpcLogEntry_Logger_value = map[string]int32{ + "LOGGER_UNKNOWN": 0, + "LOGGER_CLIENT": 1, + "LOGGER_SERVER": 2, + } +) + +func (x GrpcLogEntry_Logger) Enum() *GrpcLogEntry_Logger { + p := new(GrpcLogEntry_Logger) + *p = x + return p } func (x GrpcLogEntry_Logger) String() string { - return proto.EnumName(GrpcLogEntry_Logger_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GrpcLogEntry_Logger) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[1].Descriptor() +} + +func (GrpcLogEntry_Logger) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[1] +} + +func (x GrpcLogEntry_Logger) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) } + +// Deprecated: Use GrpcLogEntry_Logger.Descriptor instead. func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 1} + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 1} } type Address_Type int32 @@ -122,30 +188,57 @@ const ( Address_TYPE_UNIX Address_Type = 3 ) -var Address_Type_name = map[int32]string{ - 0: "TYPE_UNKNOWN", - 1: "TYPE_IPV4", - 2: "TYPE_IPV6", - 3: "TYPE_UNIX", -} -var Address_Type_value = map[string]int32{ - "TYPE_UNKNOWN": 0, - "TYPE_IPV4": 1, - "TYPE_IPV6": 2, - "TYPE_UNIX": 3, +// Enum value maps for Address_Type. +var ( + Address_Type_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "TYPE_IPV4", + 2: "TYPE_IPV6", + 3: "TYPE_UNIX", + } + Address_Type_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "TYPE_IPV4": 1, + "TYPE_IPV6": 2, + "TYPE_UNIX": 3, + } +) + +func (x Address_Type) Enum() *Address_Type { + p := new(Address_Type) + *p = x + return p } func (x Address_Type) String() string { - return proto.EnumName(Address_Type_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Address_Type) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[2].Descriptor() +} + +func (Address_Type) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[2] +} + +func (x Address_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) } + +// Deprecated: Use Address_Type.Descriptor instead. func (Address_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{7, 0} + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7, 0} } // Log entry we store in binary logs type GrpcLogEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The timestamp of the binary log message - Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // Uniquely identifies a call. The value must not be 0 in order to disambiguate // from an unset value. // Each call may have several log entries, they will all have the same call_id. @@ -158,11 +251,12 @@ type GrpcLogEntry struct { // durability or ordering is not guaranteed. SequenceIdWithinCall uint64 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"` Type GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"` - Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"` + Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"` // One of the above Logger enum // The logger uses one of the following fields to record the payload, // according to the type of the log entry. // - // Types that are valid to be assigned to Payload: + // Types that are assignable to Payload: + // // *GrpcLogEntry_ClientHeader // *GrpcLogEntry_ServerHeader // *GrpcLogEntry_Message @@ -175,99 +269,76 @@ type GrpcLogEntry struct { // EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in // the case of trailers-only. On server side, peer is always // logged on EVENT_TYPE_CLIENT_HEADER. - Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` } -func (m *GrpcLogEntry) Reset() { *m = GrpcLogEntry{} } -func (m *GrpcLogEntry) String() string { return proto.CompactTextString(m) } -func (*GrpcLogEntry) ProtoMessage() {} -func (*GrpcLogEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{0} -} -func (m *GrpcLogEntry) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GrpcLogEntry.Unmarshal(m, b) -} -func (m *GrpcLogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GrpcLogEntry.Marshal(b, m, deterministic) -} -func (dst *GrpcLogEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_GrpcLogEntry.Merge(dst, src) +func (x *GrpcLogEntry) Reset() { + *x = GrpcLogEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *GrpcLogEntry) XXX_Size() int { - return xxx_messageInfo_GrpcLogEntry.Size(m) + +func (x *GrpcLogEntry) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GrpcLogEntry) XXX_DiscardUnknown() { - xxx_messageInfo_GrpcLogEntry.DiscardUnknown(m) + +func (*GrpcLogEntry) ProtoMessage() {} + +func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_GrpcLogEntry proto.InternalMessageInfo +// Deprecated: Use GrpcLogEntry.ProtoReflect.Descriptor instead. +func (*GrpcLogEntry) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0} +} -func (m *GrpcLogEntry) GetTimestamp() *timestamp.Timestamp { - if m != nil { - return m.Timestamp +func (x *GrpcLogEntry) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp } return nil } -func (m *GrpcLogEntry) GetCallId() uint64 { - if m != nil { - return m.CallId +func (x *GrpcLogEntry) GetCallId() uint64 { + if x != nil { + return x.CallId } return 0 } -func (m *GrpcLogEntry) GetSequenceIdWithinCall() uint64 { - if m != nil { - return m.SequenceIdWithinCall +func (x *GrpcLogEntry) GetSequenceIdWithinCall() uint64 { + if x != nil { + return x.SequenceIdWithinCall } return 0 } -func (m *GrpcLogEntry) GetType() GrpcLogEntry_EventType { - if m != nil { - return m.Type +func (x *GrpcLogEntry) GetType() GrpcLogEntry_EventType { + if x != nil { + return x.Type } return GrpcLogEntry_EVENT_TYPE_UNKNOWN } -func (m *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger { - if m != nil { - return m.Logger +func (x *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger { + if x != nil { + return x.Logger } return GrpcLogEntry_LOGGER_UNKNOWN } -type isGrpcLogEntry_Payload interface { - isGrpcLogEntry_Payload() -} - -type GrpcLogEntry_ClientHeader struct { - ClientHeader *ClientHeader `protobuf:"bytes,6,opt,name=client_header,json=clientHeader,proto3,oneof"` -} - -type GrpcLogEntry_ServerHeader struct { - ServerHeader *ServerHeader `protobuf:"bytes,7,opt,name=server_header,json=serverHeader,proto3,oneof"` -} - -type GrpcLogEntry_Message struct { - Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"` -} - -type GrpcLogEntry_Trailer struct { - Trailer *Trailer `protobuf:"bytes,9,opt,name=trailer,proto3,oneof"` -} - -func (*GrpcLogEntry_ClientHeader) isGrpcLogEntry_Payload() {} - -func (*GrpcLogEntry_ServerHeader) isGrpcLogEntry_Payload() {} - -func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {} - -func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {} - func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { if m != nil { return m.Payload @@ -275,161 +346,82 @@ func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { return nil } -func (m *GrpcLogEntry) GetClientHeader() *ClientHeader { - if x, ok := m.GetPayload().(*GrpcLogEntry_ClientHeader); ok { +func (x *GrpcLogEntry) GetClientHeader() *ClientHeader { + if x, ok := x.GetPayload().(*GrpcLogEntry_ClientHeader); ok { return x.ClientHeader } return nil } -func (m *GrpcLogEntry) GetServerHeader() *ServerHeader { - if x, ok := m.GetPayload().(*GrpcLogEntry_ServerHeader); ok { +func (x *GrpcLogEntry) GetServerHeader() *ServerHeader { + if x, ok := x.GetPayload().(*GrpcLogEntry_ServerHeader); ok { return x.ServerHeader } return nil } -func (m *GrpcLogEntry) GetMessage() *Message { - if x, ok := m.GetPayload().(*GrpcLogEntry_Message); ok { +func (x *GrpcLogEntry) GetMessage() *Message { + if x, ok := x.GetPayload().(*GrpcLogEntry_Message); ok { return x.Message } return nil } -func (m *GrpcLogEntry) GetTrailer() *Trailer { - if x, ok := m.GetPayload().(*GrpcLogEntry_Trailer); ok { +func (x *GrpcLogEntry) GetTrailer() *Trailer { + if x, ok := x.GetPayload().(*GrpcLogEntry_Trailer); ok { return x.Trailer } return nil } -func (m *GrpcLogEntry) GetPayloadTruncated() bool { - if m != nil { - return m.PayloadTruncated +func (x *GrpcLogEntry) GetPayloadTruncated() bool { + if x != nil { + return x.PayloadTruncated } return false } -func (m *GrpcLogEntry) GetPeer() *Address { - if m != nil { - return m.Peer +func (x *GrpcLogEntry) GetPeer() *Address { + if x != nil { + return x.Peer } return nil } -// XXX_OneofFuncs is for the internal use of the proto package. -func (*GrpcLogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _GrpcLogEntry_OneofMarshaler, _GrpcLogEntry_OneofUnmarshaler, _GrpcLogEntry_OneofSizer, []interface{}{ - (*GrpcLogEntry_ClientHeader)(nil), - (*GrpcLogEntry_ServerHeader)(nil), - (*GrpcLogEntry_Message)(nil), - (*GrpcLogEntry_Trailer)(nil), - } +type isGrpcLogEntry_Payload interface { + isGrpcLogEntry_Payload() } -func _GrpcLogEntry_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*GrpcLogEntry) - // payload - switch x := m.Payload.(type) { - case *GrpcLogEntry_ClientHeader: - b.EncodeVarint(6<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ClientHeader); err != nil { - return err - } - case *GrpcLogEntry_ServerHeader: - b.EncodeVarint(7<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ServerHeader); err != nil { - return err - } - case *GrpcLogEntry_Message: - b.EncodeVarint(8<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Message); err != nil { - return err - } - case *GrpcLogEntry_Trailer: - b.EncodeVarint(9<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Trailer); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("GrpcLogEntry.Payload has unexpected type %T", x) - } - return nil +type GrpcLogEntry_ClientHeader struct { + ClientHeader *ClientHeader `protobuf:"bytes,6,opt,name=client_header,json=clientHeader,proto3,oneof"` } -func _GrpcLogEntry_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*GrpcLogEntry) - switch tag { - case 6: // payload.client_header - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ClientHeader) - err := b.DecodeMessage(msg) - m.Payload = &GrpcLogEntry_ClientHeader{msg} - return true, err - case 7: // payload.server_header - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ServerHeader) - err := b.DecodeMessage(msg) - m.Payload = &GrpcLogEntry_ServerHeader{msg} - return true, err - case 8: // payload.message - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Message) - err := b.DecodeMessage(msg) - m.Payload = &GrpcLogEntry_Message{msg} - return true, err - case 9: // payload.trailer - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Trailer) - err := b.DecodeMessage(msg) - m.Payload = &GrpcLogEntry_Trailer{msg} - return true, err - default: - return false, nil - } -} - -func _GrpcLogEntry_OneofSizer(msg proto.Message) (n int) { - m := msg.(*GrpcLogEntry) - // payload - switch x := m.Payload.(type) { - case *GrpcLogEntry_ClientHeader: - s := proto.Size(x.ClientHeader) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *GrpcLogEntry_ServerHeader: - s := proto.Size(x.ServerHeader) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *GrpcLogEntry_Message: - s := proto.Size(x.Message) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *GrpcLogEntry_Trailer: - s := proto.Size(x.Trailer) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n +type GrpcLogEntry_ServerHeader struct { + ServerHeader *ServerHeader `protobuf:"bytes,7,opt,name=server_header,json=serverHeader,proto3,oneof"` +} + +type GrpcLogEntry_Message struct { + // Used by EVENT_TYPE_CLIENT_MESSAGE, EVENT_TYPE_SERVER_MESSAGE + Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"` +} + +type GrpcLogEntry_Trailer struct { + Trailer *Trailer `protobuf:"bytes,9,opt,name=trailer,proto3,oneof"` } +func (*GrpcLogEntry_ClientHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_ServerHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {} + type ClientHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // This contains only the metadata from the application. Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` // The name of the RPC method, which looks something like: @@ -438,109 +430,127 @@ type ClientHeader struct { MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` // A single process may be used to run multiple virtual // servers with different identities. - // The authority is the name of such a server identitiy. + // The authority is the name of such a server identity. // It is typically a portion of the URI in the form of // or : . Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` // the RPC timeout - Timeout *duration.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` } -func (m *ClientHeader) Reset() { *m = ClientHeader{} } -func (m *ClientHeader) String() string { return proto.CompactTextString(m) } -func (*ClientHeader) ProtoMessage() {} -func (*ClientHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{1} -} -func (m *ClientHeader) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ClientHeader.Unmarshal(m, b) -} -func (m *ClientHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ClientHeader.Marshal(b, m, deterministic) -} -func (dst *ClientHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClientHeader.Merge(dst, src) +func (x *ClientHeader) Reset() { + *x = ClientHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ClientHeader) XXX_Size() int { - return xxx_messageInfo_ClientHeader.Size(m) + +func (x *ClientHeader) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ClientHeader) XXX_DiscardUnknown() { - xxx_messageInfo_ClientHeader.DiscardUnknown(m) + +func (*ClientHeader) ProtoMessage() {} + +func (x *ClientHeader) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ClientHeader proto.InternalMessageInfo +// Deprecated: Use ClientHeader.ProtoReflect.Descriptor instead. +func (*ClientHeader) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{1} +} -func (m *ClientHeader) GetMetadata() *Metadata { - if m != nil { - return m.Metadata +func (x *ClientHeader) GetMetadata() *Metadata { + if x != nil { + return x.Metadata } return nil } -func (m *ClientHeader) GetMethodName() string { - if m != nil { - return m.MethodName +func (x *ClientHeader) GetMethodName() string { + if x != nil { + return x.MethodName } return "" } -func (m *ClientHeader) GetAuthority() string { - if m != nil { - return m.Authority +func (x *ClientHeader) GetAuthority() string { + if x != nil { + return x.Authority } return "" } -func (m *ClientHeader) GetTimeout() *duration.Duration { - if m != nil { - return m.Timeout +func (x *ClientHeader) GetTimeout() *durationpb.Duration { + if x != nil { + return x.Timeout } return nil } type ServerHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // This contains only the metadata from the application. - Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` } -func (m *ServerHeader) Reset() { *m = ServerHeader{} } -func (m *ServerHeader) String() string { return proto.CompactTextString(m) } -func (*ServerHeader) ProtoMessage() {} -func (*ServerHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{2} -} -func (m *ServerHeader) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ServerHeader.Unmarshal(m, b) -} -func (m *ServerHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ServerHeader.Marshal(b, m, deterministic) -} -func (dst *ServerHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServerHeader.Merge(dst, src) +func (x *ServerHeader) Reset() { + *x = ServerHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ServerHeader) XXX_Size() int { - return xxx_messageInfo_ServerHeader.Size(m) + +func (x *ServerHeader) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ServerHeader) XXX_DiscardUnknown() { - xxx_messageInfo_ServerHeader.DiscardUnknown(m) + +func (*ServerHeader) ProtoMessage() {} + +func (x *ServerHeader) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ServerHeader proto.InternalMessageInfo +// Deprecated: Use ServerHeader.ProtoReflect.Descriptor instead. +func (*ServerHeader) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{2} +} -func (m *ServerHeader) GetMetadata() *Metadata { - if m != nil { - return m.Metadata +func (x *ServerHeader) GetMetadata() *Metadata { + if x != nil { + return x.Metadata } return nil } type Trailer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // This contains only the metadata from the application. Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` // The gRPC status code. @@ -550,110 +560,124 @@ type Trailer struct { StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"` // The value of the 'grpc-status-details-bin' metadata key. If // present, this is always an encoded 'google.rpc.Status' message. - StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"` } -func (m *Trailer) Reset() { *m = Trailer{} } -func (m *Trailer) String() string { return proto.CompactTextString(m) } -func (*Trailer) ProtoMessage() {} -func (*Trailer) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{3} -} -func (m *Trailer) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Trailer.Unmarshal(m, b) -} -func (m *Trailer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Trailer.Marshal(b, m, deterministic) -} -func (dst *Trailer) XXX_Merge(src proto.Message) { - xxx_messageInfo_Trailer.Merge(dst, src) +func (x *Trailer) Reset() { + *x = Trailer{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Trailer) XXX_Size() int { - return xxx_messageInfo_Trailer.Size(m) + +func (x *Trailer) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Trailer) XXX_DiscardUnknown() { - xxx_messageInfo_Trailer.DiscardUnknown(m) + +func (*Trailer) ProtoMessage() {} + +func (x *Trailer) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Trailer proto.InternalMessageInfo +// Deprecated: Use Trailer.ProtoReflect.Descriptor instead. +func (*Trailer) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{3} +} -func (m *Trailer) GetMetadata() *Metadata { - if m != nil { - return m.Metadata +func (x *Trailer) GetMetadata() *Metadata { + if x != nil { + return x.Metadata } return nil } -func (m *Trailer) GetStatusCode() uint32 { - if m != nil { - return m.StatusCode +func (x *Trailer) GetStatusCode() uint32 { + if x != nil { + return x.StatusCode } return 0 } -func (m *Trailer) GetStatusMessage() string { - if m != nil { - return m.StatusMessage +func (x *Trailer) GetStatusMessage() string { + if x != nil { + return x.StatusMessage } return "" } -func (m *Trailer) GetStatusDetails() []byte { - if m != nil { - return m.StatusDetails +func (x *Trailer) GetStatusDetails() []byte { + if x != nil { + return x.StatusDetails } return nil } // Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE type Message struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Length of the message. It may not be the same as the length of the // data field, as the logging payload can be truncated or omitted. Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` // May be truncated or omitted. - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` } -func (m *Message) Reset() { *m = Message{} } -func (m *Message) String() string { return proto.CompactTextString(m) } -func (*Message) ProtoMessage() {} -func (*Message) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{4} -} -func (m *Message) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Message.Unmarshal(m, b) -} -func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Message.Marshal(b, m, deterministic) -} -func (dst *Message) XXX_Merge(src proto.Message) { - xxx_messageInfo_Message.Merge(dst, src) +func (x *Message) Reset() { + *x = Message{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Message) XXX_Size() int { - return xxx_messageInfo_Message.Size(m) + +func (x *Message) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Message) XXX_DiscardUnknown() { - xxx_messageInfo_Message.DiscardUnknown(m) + +func (*Message) ProtoMessage() {} + +func (x *Message) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Message proto.InternalMessageInfo +// Deprecated: Use Message.ProtoReflect.Descriptor instead. +func (*Message) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{4} +} -func (m *Message) GetLength() uint32 { - if m != nil { - return m.Length +func (x *Message) GetLength() uint32 { + if x != nil { + return x.Length } return 0 } -func (m *Message) GetData() []byte { - if m != nil { - return m.Data +func (x *Message) GetData() []byte { + if x != nil { + return x.Data } return nil } @@ -666,12 +690,12 @@ func (m *Message) GetData() []byte { // Header keys added by gRPC are omitted. To be more specific, // implementations will not log the following entries, and this is // not to be treated as a truncation: -// - entries handled by grpc that are not user visible, such as those -// that begin with 'grpc-' (with exception of grpc-trace-bin) -// or keys like 'lb-token' -// - transport specific entries, including but not limited to: -// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc -// - entries added for call credentials +// - entries handled by grpc that are not user visible, such as those +// that begin with 'grpc-' (with exception of grpc-trace-bin) +// or keys like 'lb-token' +// - transport specific entries, including but not limited to: +// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc +// - entries added for call credentials // // Implementations must always log grpc-trace-bin if it is present. // Practically speaking it will only be visible on server side because @@ -680,221 +704,480 @@ func (m *Message) GetData() []byte { // header is just a normal metadata key. // The pair will not count towards the size limit. type Metadata struct { - Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Metadata) Reset() { *m = Metadata{} } -func (m *Metadata) String() string { return proto.CompactTextString(m) } -func (*Metadata) ProtoMessage() {} -func (*Metadata) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{5} + Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` } -func (m *Metadata) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Metadata.Unmarshal(m, b) -} -func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) -} -func (dst *Metadata) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metadata.Merge(dst, src) + +func (x *Metadata) Reset() { + *x = Metadata{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Metadata) XXX_Size() int { - return xxx_messageInfo_Metadata.Size(m) + +func (x *Metadata) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Metadata) XXX_DiscardUnknown() { - xxx_messageInfo_Metadata.DiscardUnknown(m) + +func (*Metadata) ProtoMessage() {} + +func (x *Metadata) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Metadata proto.InternalMessageInfo +// Deprecated: Use Metadata.ProtoReflect.Descriptor instead. +func (*Metadata) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{5} +} -func (m *Metadata) GetEntry() []*MetadataEntry { - if m != nil { - return m.Entry +func (x *Metadata) GetEntry() []*MetadataEntry { + if x != nil { + return x.Entry } return nil } // A metadata key value pair type MetadataEntry struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *MetadataEntry) Reset() { *m = MetadataEntry{} } -func (m *MetadataEntry) String() string { return proto.CompactTextString(m) } -func (*MetadataEntry) ProtoMessage() {} -func (*MetadataEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{6} -} -func (m *MetadataEntry) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MetadataEntry.Unmarshal(m, b) -} -func (m *MetadataEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MetadataEntry.Marshal(b, m, deterministic) + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } -func (dst *MetadataEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetadataEntry.Merge(dst, src) + +func (x *MetadataEntry) Reset() { + *x = MetadataEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *MetadataEntry) XXX_Size() int { - return xxx_messageInfo_MetadataEntry.Size(m) + +func (x *MetadataEntry) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *MetadataEntry) XXX_DiscardUnknown() { - xxx_messageInfo_MetadataEntry.DiscardUnknown(m) + +func (*MetadataEntry) ProtoMessage() {} + +func (x *MetadataEntry) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_MetadataEntry proto.InternalMessageInfo +// Deprecated: Use MetadataEntry.ProtoReflect.Descriptor instead. +func (*MetadataEntry) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{6} +} -func (m *MetadataEntry) GetKey() string { - if m != nil { - return m.Key +func (x *MetadataEntry) GetKey() string { + if x != nil { + return x.Key } return "" } -func (m *MetadataEntry) GetValue() []byte { - if m != nil { - return m.Value +func (x *MetadataEntry) GetValue() []byte { + if x != nil { + return x.Value } return nil } // Address information type Address struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"` Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` // only for TYPE_IPV4 and TYPE_IPV6 - IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` } -func (m *Address) Reset() { *m = Address{} } -func (m *Address) String() string { return proto.CompactTextString(m) } -func (*Address) ProtoMessage() {} -func (*Address) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{7} -} -func (m *Address) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Address.Unmarshal(m, b) -} -func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Address.Marshal(b, m, deterministic) -} -func (dst *Address) XXX_Merge(src proto.Message) { - xxx_messageInfo_Address.Merge(dst, src) +func (x *Address) Reset() { + *x = Address{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Address) XXX_Size() int { - return xxx_messageInfo_Address.Size(m) + +func (x *Address) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Address) XXX_DiscardUnknown() { - xxx_messageInfo_Address.DiscardUnknown(m) + +func (*Address) ProtoMessage() {} + +func (x *Address) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Address proto.InternalMessageInfo +// Deprecated: Use Address.ProtoReflect.Descriptor instead. +func (*Address) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7} +} -func (m *Address) GetType() Address_Type { - if m != nil { - return m.Type +func (x *Address) GetType() Address_Type { + if x != nil { + return x.Type } return Address_TYPE_UNKNOWN } -func (m *Address) GetAddress() string { - if m != nil { - return m.Address +func (x *Address) GetAddress() string { + if x != nil { + return x.Address } return "" } -func (m *Address) GetIpPort() uint32 { - if m != nil { - return m.IpPort +func (x *Address) GetIpPort() uint32 { + if x != nil { + return x.IpPort } return 0 } -func init() { - proto.RegisterType((*GrpcLogEntry)(nil), "grpc.binarylog.v1.GrpcLogEntry") - proto.RegisterType((*ClientHeader)(nil), "grpc.binarylog.v1.ClientHeader") - proto.RegisterType((*ServerHeader)(nil), "grpc.binarylog.v1.ServerHeader") - proto.RegisterType((*Trailer)(nil), "grpc.binarylog.v1.Trailer") - proto.RegisterType((*Message)(nil), "grpc.binarylog.v1.Message") - proto.RegisterType((*Metadata)(nil), "grpc.binarylog.v1.Metadata") - proto.RegisterType((*MetadataEntry)(nil), "grpc.binarylog.v1.MetadataEntry") - proto.RegisterType((*Address)(nil), "grpc.binarylog.v1.Address") - proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value) - proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value) - proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value) -} - -func init() { - proto.RegisterFile("grpc/binarylog/grpc_binarylog_v1/binarylog.proto", fileDescriptor_binarylog_264c8c9c551ce911) -} - -var fileDescriptor_binarylog_264c8c9c551ce911 = []byte{ - // 900 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x51, 0x6f, 0xe3, 0x44, - 0x10, 0x3e, 0x37, 0x69, 0xdc, 0x4c, 0x92, 0xca, 0x5d, 0x95, 0x3b, 0x5f, 0x29, 0x34, 0xb2, 0x04, - 0x0a, 0x42, 0x72, 0xb9, 0x94, 0xeb, 0xf1, 0x02, 0x52, 0x92, 0xfa, 0xd2, 0x88, 0x5c, 0x1a, 0x6d, - 0x72, 0x3d, 0x40, 0x48, 0xd6, 0x36, 0x5e, 0x1c, 0x0b, 0xc7, 0x6b, 0xd6, 0x9b, 0xa0, 0xfc, 0x2c, - 0xde, 0x90, 0xee, 0x77, 0xf1, 0x8e, 0xbc, 0x6b, 0x27, 0xa6, 0x69, 0x0f, 0x09, 0xde, 0x3c, 0xdf, - 0x7c, 0xf3, 0xcd, 0xee, 0x78, 0x66, 0x16, 0xbe, 0xf2, 0x79, 0x3c, 0x3b, 0xbf, 0x0b, 0x22, 0xc2, - 0xd7, 0x21, 0xf3, 0xcf, 0x53, 0xd3, 0xdd, 0x98, 0xee, 0xea, 0xc5, 0xd6, 0x67, 0xc7, 0x9c, 0x09, - 0x86, 0x8e, 0x52, 0x8a, 0xbd, 0x45, 0x57, 0x2f, 0x4e, 0x3e, 0xf5, 0x19, 0xf3, 0x43, 0x7a, 0x2e, - 0x09, 0x77, 0xcb, 0x5f, 0xce, 0xbd, 0x25, 0x27, 0x22, 0x60, 0x91, 0x0a, 0x39, 0x39, 0xbb, 0xef, - 0x17, 0xc1, 0x82, 0x26, 0x82, 0x2c, 0x62, 0x45, 0xb0, 0xde, 0xeb, 0x50, 0xef, 0xf3, 0x78, 0x36, - 0x64, 0xbe, 0x13, 0x09, 0xbe, 0x46, 0xdf, 0x40, 0x75, 0xc3, 0x31, 0xb5, 0xa6, 0xd6, 0xaa, 0xb5, - 0x4f, 0x6c, 0xa5, 0x62, 0xe7, 0x2a, 0xf6, 0x34, 0x67, 0xe0, 0x2d, 0x19, 0x3d, 0x03, 0x7d, 0x46, - 0xc2, 0xd0, 0x0d, 0x3c, 0x73, 0xaf, 0xa9, 0xb5, 0xca, 0xb8, 0x92, 0x9a, 0x03, 0x0f, 0xbd, 0x84, - 0x67, 0x09, 0xfd, 0x6d, 0x49, 0xa3, 0x19, 0x75, 0x03, 0xcf, 0xfd, 0x3d, 0x10, 0xf3, 0x20, 0x72, - 0x53, 0xa7, 0x59, 0x92, 0xc4, 0xe3, 0xdc, 0x3d, 0xf0, 0xde, 0x49, 0x67, 0x8f, 0x84, 0x21, 0xfa, - 0x16, 0xca, 0x62, 0x1d, 0x53, 0xb3, 0xdc, 0xd4, 0x5a, 0x87, 0xed, 0x2f, 0xec, 0x9d, 0xdb, 0xdb, - 0xc5, 0x83, 0xdb, 0xce, 0x8a, 0x46, 0x62, 0xba, 0x8e, 0x29, 0x96, 0x61, 0xe8, 0x3b, 0xa8, 0x84, - 0xcc, 0xf7, 0x29, 0x37, 0xf7, 0xa5, 0xc0, 0xe7, 0xff, 0x26, 0x30, 0x94, 0x6c, 0x9c, 0x45, 0xa1, - 0xd7, 0xd0, 0x98, 0x85, 0x01, 0x8d, 0x84, 0x3b, 0xa7, 0xc4, 0xa3, 0xdc, 0xac, 0xc8, 0x62, 0x9c, - 0x3d, 0x20, 0xd3, 0x93, 0xbc, 0x6b, 0x49, 0xbb, 0x7e, 0x82, 0xeb, 0xb3, 0x82, 0x9d, 0xea, 0x24, - 0x94, 0xaf, 0x28, 0xcf, 0x75, 0xf4, 0x47, 0x75, 0x26, 0x92, 0xb7, 0xd5, 0x49, 0x0a, 0x36, 0xba, - 0x04, 0x7d, 0x41, 0x93, 0x84, 0xf8, 0xd4, 0x3c, 0xc8, 0x7f, 0xcb, 0x8e, 0xc2, 0x1b, 0xc5, 0xb8, - 0x7e, 0x82, 0x73, 0x72, 0x1a, 0x27, 0x38, 0x09, 0x42, 0xca, 0xcd, 0xea, 0xa3, 0x71, 0x53, 0xc5, - 0x48, 0xe3, 0x32, 0x32, 0xfa, 0x12, 0x8e, 0x62, 0xb2, 0x0e, 0x19, 0xf1, 0x5c, 0xc1, 0x97, 0xd1, - 0x8c, 0x08, 0xea, 0x99, 0xd0, 0xd4, 0x5a, 0x07, 0xd8, 0xc8, 0x1c, 0xd3, 0x1c, 0x47, 0x36, 0x94, - 0x63, 0x4a, 0xb9, 0x59, 0x7b, 0x34, 0x43, 0xc7, 0xf3, 0x38, 0x4d, 0x12, 0x2c, 0x79, 0xd6, 0x5f, - 0x1a, 0x54, 0x37, 0x3f, 0x0c, 0x3d, 0x05, 0xe4, 0xdc, 0x3a, 0xa3, 0xa9, 0x3b, 0xfd, 0x71, 0xec, - 0xb8, 0x6f, 0x47, 0xdf, 0x8f, 0x6e, 0xde, 0x8d, 0x8c, 0x27, 0xe8, 0x14, 0xcc, 0x02, 0xde, 0x1b, - 0x0e, 0xd2, 0xef, 0x6b, 0xa7, 0x73, 0xe5, 0x60, 0x43, 0xbb, 0xe7, 0x9d, 0x38, 0xf8, 0xd6, 0xc1, - 0xb9, 0x77, 0x0f, 0x7d, 0x02, 0xcf, 0x77, 0x63, 0xdf, 0x38, 0x93, 0x49, 0xa7, 0xef, 0x18, 0xa5, - 0x7b, 0xee, 0x2c, 0x38, 0x77, 0x97, 0x51, 0x13, 0x4e, 0x1f, 0xc8, 0xdc, 0x19, 0xbe, 0x76, 0x7b, - 0xc3, 0x9b, 0x89, 0x63, 0xec, 0x3f, 0x2c, 0x30, 0xc5, 0x9d, 0xc1, 0xd0, 0xc1, 0x46, 0x05, 0x7d, - 0x04, 0x47, 0x45, 0x81, 0xce, 0xa8, 0xe7, 0x0c, 0x0d, 0xdd, 0xea, 0x42, 0x45, 0xb5, 0x19, 0x42, - 0x70, 0x38, 0xbc, 0xe9, 0xf7, 0x1d, 0x5c, 0xb8, 0xef, 0x11, 0x34, 0x32, 0x4c, 0x65, 0x34, 0xb4, - 0x02, 0xa4, 0x52, 0x18, 0x7b, 0xdd, 0x2a, 0xe8, 0x59, 0xfd, 0xad, 0xf7, 0x1a, 0xd4, 0x8b, 0xcd, - 0x87, 0x5e, 0xc1, 0xc1, 0x82, 0x0a, 0xe2, 0x11, 0x41, 0xb2, 0xe1, 0xfd, 0xf8, 0xc1, 0x2e, 0x51, - 0x14, 0xbc, 0x21, 0xa3, 0x33, 0xa8, 0x2d, 0xa8, 0x98, 0x33, 0xcf, 0x8d, 0xc8, 0x82, 0xca, 0x01, - 0xae, 0x62, 0x50, 0xd0, 0x88, 0x2c, 0x28, 0x3a, 0x85, 0x2a, 0x59, 0x8a, 0x39, 0xe3, 0x81, 0x58, - 0xcb, 0xb1, 0xad, 0xe2, 0x2d, 0x80, 0x2e, 0x40, 0x4f, 0x17, 0x01, 0x5b, 0x0a, 0x39, 0xae, 0xb5, - 0xf6, 0xf3, 0x9d, 0x9d, 0x71, 0x95, 0x6d, 0x26, 0x9c, 0x33, 0xad, 0x3e, 0xd4, 0x8b, 0x1d, 0xff, - 0x9f, 0x0f, 0x6f, 0xfd, 0xa1, 0x81, 0x9e, 0x75, 0xf0, 0xff, 0xaa, 0x40, 0x22, 0x88, 0x58, 0x26, - 0xee, 0x8c, 0x79, 0xaa, 0x02, 0x0d, 0x0c, 0x0a, 0xea, 0x31, 0x8f, 0xa2, 0xcf, 0xe0, 0x30, 0x23, - 0xe4, 0x73, 0xa8, 0xca, 0xd0, 0x50, 0x68, 0x36, 0x7a, 0x05, 0x9a, 0x47, 0x05, 0x09, 0xc2, 0x44, - 0x56, 0xa4, 0x9e, 0xd3, 0xae, 0x14, 0x68, 0xbd, 0x04, 0x3d, 0x8f, 0x78, 0x0a, 0x95, 0x90, 0x46, - 0xbe, 0x98, 0xcb, 0x03, 0x37, 0x70, 0x66, 0x21, 0x04, 0x65, 0x79, 0x8d, 0x3d, 0x19, 0x2f, 0xbf, - 0xad, 0x2e, 0x1c, 0xe4, 0x67, 0x47, 0x97, 0xb0, 0x4f, 0xd3, 0xcd, 0x65, 0x6a, 0xcd, 0x52, 0xab, - 0xd6, 0x6e, 0x7e, 0xe0, 0x9e, 0x72, 0xc3, 0x61, 0x45, 0xb7, 0x5e, 0x41, 0xe3, 0x1f, 0x38, 0x32, - 0xa0, 0xf4, 0x2b, 0x5d, 0xcb, 0xec, 0x55, 0x9c, 0x7e, 0xa2, 0x63, 0xd8, 0x5f, 0x91, 0x70, 0x49, - 0xb3, 0xdc, 0xca, 0xb0, 0xfe, 0xd4, 0x40, 0xcf, 0xe6, 0x18, 0x5d, 0x64, 0xdb, 0x59, 0x93, 0xcb, - 0xf5, 0xec, 0xf1, 0x89, 0xb7, 0x0b, 0x3b, 0xd9, 0x04, 0x9d, 0x28, 0x34, 0xeb, 0xb0, 0xdc, 0x4c, - 0x1f, 0x8f, 0x20, 0x76, 0x63, 0xc6, 0x85, 0xac, 0x6a, 0x03, 0x57, 0x82, 0x78, 0xcc, 0xb8, 0xb0, - 0x1c, 0x28, 0xcb, 0x1d, 0x61, 0x40, 0xfd, 0xde, 0x76, 0x68, 0x40, 0x55, 0x22, 0x83, 0xf1, 0xed, - 0xd7, 0x86, 0x56, 0x34, 0x2f, 0x8d, 0xbd, 0x8d, 0xf9, 0x76, 0x34, 0xf8, 0xc1, 0x28, 0x75, 0x7f, - 0x86, 0xe3, 0x80, 0xed, 0x1e, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7, - 0xda, 0x4f, 0xed, 0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0x57, 0x4f, 0xf3, 0x87, 0x5e, - 0xea, 0xbb, 0x8a, 0xec, 0xf2, 0x8b, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xf6, 0x4b, 0x50, - 0xd4, 0x07, 0x00, 0x00, +var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor + +var file_grpc_binlog_v1_binarylog_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, + 0x2f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x11, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbb, 0x07, 0x0a, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x17, 0x0a, 0x07, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x06, 0x63, 0x61, 0x6c, 0x6c, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x65, 0x71, 0x75, + 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x5f, 0x63, + 0x61, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x73, 0x65, 0x71, 0x75, 0x65, + 0x6e, 0x63, 0x65, 0x49, 0x64, 0x57, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x43, 0x61, 0x6c, 0x6c, 0x12, + 0x3d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3e, + 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, + 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x46, + 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, + 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, + 0x52, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x36, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, + 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x69, + 0x6c, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x2b, + 0x0a, 0x11, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x04, 0x70, + 0x65, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x22, 0xf5, 0x01, 0x0a, 0x09, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x56, 0x45, + 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12, + 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, + 0x52, 0x56, 0x45, 0x52, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x12, 0x1d, 0x0a, + 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x1d, 0x0a, 0x19, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, + 0x52, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, 0x45, + 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, + 0x5f, 0x48, 0x41, 0x4c, 0x46, 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x10, 0x05, 0x12, 0x1d, 0x0a, + 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, + 0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, + 0x4c, 0x10, 0x07, 0x22, 0x42, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x12, 0x0a, + 0x0e, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x53, + 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x22, 0xbb, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, + 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, + 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x74, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x22, 0x47, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xb1, 0x01, 0x0a, 0x07, 0x54, 0x72, + 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, + 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, + 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x35, 0x0a, + 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, + 0x64, 0x61, 0x74, 0x61, 0x22, 0x42, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x36, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x37, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x33, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x07, + 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, + 0x70, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x45, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, + 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x34, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x02, 0x12, 0x0d, 0x0a, + 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x49, 0x58, 0x10, 0x03, 0x42, 0x5c, 0x0a, 0x14, + 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, + 0x67, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x4c, 0x6f, 0x67, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x69, + 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once + file_grpc_binlog_v1_binarylog_proto_rawDescData = file_grpc_binlog_v1_binarylog_proto_rawDesc +) + +func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte { + file_grpc_binlog_v1_binarylog_proto_rawDescOnce.Do(func() { + file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_binlog_v1_binarylog_proto_rawDescData) + }) + return file_grpc_binlog_v1_binarylog_proto_rawDescData +} + +var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_binlog_v1_binarylog_proto_goTypes = []any{ + (GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType + (GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger + (Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type + (*GrpcLogEntry)(nil), // 3: grpc.binarylog.v1.GrpcLogEntry + (*ClientHeader)(nil), // 4: grpc.binarylog.v1.ClientHeader + (*ServerHeader)(nil), // 5: grpc.binarylog.v1.ServerHeader + (*Trailer)(nil), // 6: grpc.binarylog.v1.Trailer + (*Message)(nil), // 7: grpc.binarylog.v1.Message + (*Metadata)(nil), // 8: grpc.binarylog.v1.Metadata + (*MetadataEntry)(nil), // 9: grpc.binarylog.v1.MetadataEntry + (*Address)(nil), // 10: grpc.binarylog.v1.Address + (*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 12: google.protobuf.Duration +} +var file_grpc_binlog_v1_binarylog_proto_depIdxs = []int32{ + 11, // 0: grpc.binarylog.v1.GrpcLogEntry.timestamp:type_name -> google.protobuf.Timestamp + 0, // 1: grpc.binarylog.v1.GrpcLogEntry.type:type_name -> grpc.binarylog.v1.GrpcLogEntry.EventType + 1, // 2: grpc.binarylog.v1.GrpcLogEntry.logger:type_name -> grpc.binarylog.v1.GrpcLogEntry.Logger + 4, // 3: grpc.binarylog.v1.GrpcLogEntry.client_header:type_name -> grpc.binarylog.v1.ClientHeader + 5, // 4: grpc.binarylog.v1.GrpcLogEntry.server_header:type_name -> grpc.binarylog.v1.ServerHeader + 7, // 5: grpc.binarylog.v1.GrpcLogEntry.message:type_name -> grpc.binarylog.v1.Message + 6, // 6: grpc.binarylog.v1.GrpcLogEntry.trailer:type_name -> grpc.binarylog.v1.Trailer + 10, // 7: grpc.binarylog.v1.GrpcLogEntry.peer:type_name -> grpc.binarylog.v1.Address + 8, // 8: grpc.binarylog.v1.ClientHeader.metadata:type_name -> grpc.binarylog.v1.Metadata + 12, // 9: grpc.binarylog.v1.ClientHeader.timeout:type_name -> google.protobuf.Duration + 8, // 10: grpc.binarylog.v1.ServerHeader.metadata:type_name -> grpc.binarylog.v1.Metadata + 8, // 11: grpc.binarylog.v1.Trailer.metadata:type_name -> grpc.binarylog.v1.Metadata + 9, // 12: grpc.binarylog.v1.Metadata.entry:type_name -> grpc.binarylog.v1.MetadataEntry + 2, // 13: grpc.binarylog.v1.Address.type:type_name -> grpc.binarylog.v1.Address.Type + 14, // [14:14] is the sub-list for method output_type + 14, // [14:14] is the sub-list for method input_type + 14, // [14:14] is the sub-list for extension type_name + 14, // [14:14] is the sub-list for extension extendee + 0, // [0:14] is the sub-list for field type_name +} + +func init() { file_grpc_binlog_v1_binarylog_proto_init() } +func file_grpc_binlog_v1_binarylog_proto_init() { + if File_grpc_binlog_v1_binarylog_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*GrpcLogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ClientHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*ServerHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*Trailer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*Message); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*Metadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*MetadataEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*Address); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{ + (*GrpcLogEntry_ClientHeader)(nil), + (*GrpcLogEntry_ServerHeader)(nil), + (*GrpcLogEntry_Message)(nil), + (*GrpcLogEntry_Trailer)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_binlog_v1_binarylog_proto_rawDesc, + NumEnums: 3, + NumMessages: 8, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_grpc_binlog_v1_binarylog_proto_goTypes, + DependencyIndexes: file_grpc_binlog_v1_binarylog_proto_depIdxs, + EnumInfos: file_grpc_binlog_v1_binarylog_proto_enumTypes, + MessageInfos: file_grpc_binlog_v1_binarylog_proto_msgTypes, + }.Build() + File_grpc_binlog_v1_binarylog_proto = out.File + file_grpc_binlog_v1_binarylog_proto_rawDesc = nil + file_grpc_binlog_v1_binarylog_proto_goTypes = nil + file_grpc_binlog_v1_binarylog_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go index 9e20e4d38..788c89c16 100644 --- a/vendor/google.golang.org/grpc/call.go +++ b/vendor/google.golang.org/grpc/call.go @@ -26,7 +26,7 @@ import ( // received. This is typically called by generated code. // // All errors returned by Invoke are compatible with the status package. -func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply any, opts ...CallOption) error { // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) @@ -56,13 +56,13 @@ func combine(o1 []CallOption, o2 []CallOption) []CallOption { // received. This is typically called by generated code. // // DEPRECATED: Use ClientConn.Invoke instead. -func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { +func Invoke(ctx context.Context, method string, args, reply any, cc *ClientConn, opts ...CallOption) error { return cc.Invoke(ctx, method, args, reply, opts...) } var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} -func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { +func invoke(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) if err != nil { return err diff --git a/vendor/google.golang.org/grpc/channelz/channelz.go b/vendor/google.golang.org/grpc/channelz/channelz.go new file mode 100644 index 000000000..32b7fa579 --- /dev/null +++ b/vendor/google.golang.org/grpc/channelz/channelz.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz exports internals of the channelz implementation as required +// by other gRPC packages. +// +// The implementation of the channelz spec as defined in +// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by +// the `internal/channelz` package. +// +// # Experimental +// +// Notice: All APIs in this package are experimental and may be removed in a +// later release. +package channelz + +import "google.golang.org/grpc/internal/channelz" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier = channelz.Identifier diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index a7643df7d..9c8850e3f 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -23,36 +23,39 @@ import ( "errors" "fmt" "math" - "net" - "reflect" + "net/url" + "slices" "strings" "sync" "sync/atomic" "time" "google.golang.org/grpc/balancer" - _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/idle" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/stats" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" - _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. - _ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver. "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/status" + + _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. + _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. + _ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver. + _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. ) const ( // minimum time to give a connection to complete minConnectTimeout = 20 * time.Second - // must match grpclbName in grpclb/grpclb.go - grpclbName = "grpclb" ) var ( @@ -66,11 +69,14 @@ var ( errConnDrain = errors.New("grpc: the connection is drained") // errConnClosing indicates that the connection is closing. errConnClosing = errors.New("grpc: the connection is closing") - // errBalancerClosed indicates that the balancer is closed. - errBalancerClosed = errors.New("grpc: balancer is closed") + // errConnIdling indicates the connection is being closed as the channel + // is moving to an idle mode due to inactivity. + errConnIdling = errors.New("grpc: the connection is closing due to channel idleness") // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default // service config. invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" + // PickFirstBalancerName is the name of the pick_first balancer. + PickFirstBalancerName = pickfirst.Name ) // The following errors are returned from Dial and DialContext @@ -78,17 +84,17 @@ var ( // errNoTransportSecurity indicates that there is no transport security // being set for ClientConn. Users should either set one or explicitly // call WithInsecure DialOption to disable security. - errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)") // errTransportCredsAndBundle indicates that creds bundle is used together // with other individual Transport Credentials. errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") - // errTransportCredentialsMissing indicates that users want to transmit security - // information (e.g., OAuth2 token) which requires secure connection on an insecure - // connection. + // errNoTransportCredsInBundle indicated that the configured creds bundle + // returned a transport credentials which was nil. + errNoTransportCredsInBundle = errors.New("grpc: credentials.Bundle must return non-nil transport credentials") + // errTransportCredentialsMissing indicates that users want to transmit + // security information (e.g., OAuth2 token) which requires secure + // connection on an insecure connection. errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") - // errCredentialsConflict indicates that grpc.WithTransportCredentials() - // and grpc.WithInsecure() are both called for a connection. - errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") ) const ( @@ -99,114 +105,149 @@ const ( defaultReadBufSize = 32 * 1024 ) -// Dial creates a client connection to the given target. -func Dial(target string, opts ...DialOption) (*ClientConn, error) { - return DialContext(context.Background(), target, opts...) +type defaultConfigSelector struct { + sc *ServiceConfig } -// DialContext creates a client connection to the given target. By default, it's -// a non-blocking dial (the function won't wait for connections to be -// established, and connecting happens in the background). To make it a blocking -// dial, use WithBlock() dial option. -// -// In the non-blocking case, the ctx does not act against the connection. It -// only controls the setup steps. -// -// In the blocking case, ctx can be used to cancel or expire the pending -// connection. Once this function returns, the cancellation and expiration of -// ctx will be noop. Users should call ClientConn.Close to terminate all the -// pending operations after this function returns. +func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RPCConfig, error) { + return &iresolver.RPCConfig{ + Context: rpcInfo.Context, + MethodConfig: getMethodConfig(dcs.sc, rpcInfo.Method), + }, nil +} + +// NewClient creates a new gRPC "channel" for the target URI provided. No I/O +// is performed. Use of the ClientConn for RPCs will automatically cause it to +// connect. Connect may be used to manually create a connection, but for most +// users this is unnecessary. // // The target name syntax is defined in -// https://github.com/grpc/grpc/blob/master/doc/naming.md. -// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. -func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { +// https://github.com/grpc/grpc/blob/master/doc/naming.md. e.g. to use dns +// resolver, a "dns:///" prefix should be applied to the target. +// +// The DialOptions returned by WithBlock, WithTimeout, +// WithReturnConnectionError, and FailOnNonTempDialError are ignored by this +// function. +func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ - target: target, - csMgr: &connectivityStateManager{}, - conns: make(map[*addrConn]struct{}), - dopts: defaultDialOptions(), - blockingpicker: newPickerWrapper(), - czData: new(channelzData), - firstResolveEvent: grpcsync.NewEvent(), + target: target, + conns: make(map[*addrConn]struct{}), + dopts: defaultDialOptions(), } + cc.retryThrottler.Store((*retryThrottler)(nil)) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) + // Apply dial options. + disableGlobalOpts := false + for _, opt := range opts { + if _, ok := opt.(*disableGlobalDialOptions); ok { + disableGlobalOpts = true + break + } + } + + if !disableGlobalOpts { + for _, opt := range globalDialOptions { + opt.apply(&cc.dopts) + } + } + for _, opt := range opts { opt.apply(&cc.dopts) } + // Determine the resolver to use. + if err := cc.initParsedTargetAndResolverBuilder(); err != nil { + return nil, err + } + + for _, opt := range globalPerTargetDialOptions { + opt.DialOptionForTarget(cc.parsedTarget.URL).apply(&cc.dopts) + } + chainUnaryClientInterceptors(cc) chainStreamClientInterceptors(cc) - defer func() { - if err != nil { - cc.Close() - } - }() + if err := cc.validateTransportCredentials(); err != nil { + return nil, err + } - if channelz.IsOn() { - if cc.dopts.channelzParentID != 0 { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ - Desc: "Channel Created", - Severity: channelz.CtINFO, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID), - Severity: channelz.CtINFO, - }, - }) - } else { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ - Desc: "Channel Created", - Severity: channelz.CtINFO, - }) + if cc.dopts.defaultServiceConfigRawJSON != nil { + scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON, cc.dopts.maxCallAttempts) + if scpr.Err != nil { + return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err) } - cc.csMgr.channelzID = cc.channelzID + cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig) } + cc.mkp = cc.dopts.copts.KeepaliveParams - if !cc.dopts.insecure { - if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { - return nil, errNoTransportSecurity - } - if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { - return nil, errTransportCredsAndBundle - } - } else { - if cc.dopts.copts.TransportCredentials != nil || cc.dopts.copts.CredsBundle != nil { - return nil, errCredentialsConflict - } - for _, cd := range cc.dopts.copts.PerRPCCredentials { - if cd.RequireTransportSecurity() { - return nil, errTransportCredentialsMissing - } - } + if err = cc.initAuthority(); err != nil { + return nil, err } - if cc.dopts.defaultServiceConfigRawJSON != nil { - sc, err := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) + // Register ClientConn with channelz. Note that this is only done after + // channel creation cannot fail. + cc.channelzRegistration(target) + channelz.Infof(logger, cc.channelz, "parsed dial target is: %#v", cc.parsedTarget) + channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority) + + cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) + cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + + cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) + + cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. + cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) + + return cc, nil +} + +// Dial calls DialContext(context.Background(), target, opts...). +// +// Deprecated: use NewClient instead. Will be supported throughout 1.x. +func Dial(target string, opts ...DialOption) (*ClientConn, error) { + return DialContext(context.Background(), target, opts...) +} + +// DialContext calls NewClient and then exits idle mode. If WithBlock(true) is +// used, it calls Connect and WaitForStateChange until either the context +// expires or the state of the ClientConn is Ready. +// +// One subtle difference between NewClient and Dial and DialContext is that the +// former uses "dns" as the default name resolver, while the latter use +// "passthrough" for backward compatibility. This distinction should not matter +// to most users, but could matter to legacy users that specify a custom dialer +// and expect it to receive the target string directly. +// +// Deprecated: use NewClient instead. Will be supported throughout 1.x. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + // At the end of this method, we kick the channel out of idle, rather than + // waiting for the first rpc. + opts = append([]DialOption{withDefaultScheme("passthrough")}, opts...) + cc, err := NewClient(target, opts...) + if err != nil { + return nil, err + } + + // We start the channel off in idle mode, but kick it out of idle now, + // instead of waiting for the first RPC. This is the legacy behavior of + // Dial. + defer func() { if err != nil { - return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, err) + cc.Close() } - cc.dopts.defaultServiceConfig = sc - } - cc.mkp = cc.dopts.copts.KeepaliveParams + }() - if cc.dopts.copts.Dialer == nil { - cc.dopts.copts.Dialer = newProxyDialer( - func(ctx context.Context, addr string) (net.Conn, error) { - network, addr := parseDialTarget(addr) - return (&net.Dialer{}).DialContext(ctx, network, addr) - }, - ) + // This creates the name resolver, load balancer, etc. + if err := cc.idlenessMgr.ExitIdleMode(); err != nil { + return nil, err } - if cc.dopts.copts.UserAgent != "" { - cc.dopts.copts.UserAgent += " " + grpcUA - } else { - cc.dopts.copts.UserAgent = grpcUA + // Return now for non-blocking dials. + if !cc.dopts.block { + return cc, nil } if cc.dopts.timeout > 0 { @@ -217,118 +258,186 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * defer func() { select { case <-ctx.Done(): - conn, err = nil, ctx.Err() + switch { + case ctx.Err() == err: + conn = nil + case err == nil || !cc.dopts.returnLastError: + conn, err = nil, ctx.Err() + default: + conn, err = nil, fmt.Errorf("%v: %v", ctx.Err(), err) + } default: } }() - scSet := false - if cc.dopts.scChan != nil { - // Try to get an initial service config. - select { - case sc, ok := <-cc.dopts.scChan: - if ok { - cc.sc = &sc - scSet = true - } - default: - } - } - if cc.dopts.bs == nil { - cc.dopts.bs = backoff.Exponential{ - MaxDelay: DefaultBackoffConfig.MaxDelay, + // A blocking dial blocks until the clientConn is ready. + for { + s := cc.GetState() + if s == connectivity.Idle { + cc.Connect() } - } - if cc.dopts.resolverBuilder == nil { - // Only try to parse target when resolver builder is not already set. - cc.parsedTarget = parseTarget(cc.target) - grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme) - cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme) - if cc.dopts.resolverBuilder == nil { - // If resolver builder is still nil, the parsed target's scheme is - // not registered. Fallback to default resolver and set Endpoint to - // the original target. - grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) - cc.parsedTarget = resolver.Target{ - Scheme: resolver.GetDefaultScheme(), - Endpoint: target, + if s == connectivity.Ready { + return cc, nil + } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { + if err = cc.connectionError(); err != nil { + terr, ok := err.(interface { + Temporary() bool + }) + if ok && !terr.Temporary() { + return nil, err + } } - cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme) } - } else { - cc.parsedTarget = resolver.Target{Endpoint: target} - } - creds := cc.dopts.copts.TransportCredentials - if creds != nil && creds.Info().ServerName != "" { - cc.authority = creds.Info().ServerName - } else if cc.dopts.insecure && cc.dopts.authority != "" { - cc.authority = cc.dopts.authority - } else { - // Use endpoint from "scheme://authority/endpoint" as the default - // authority for ClientConn. - cc.authority = cc.parsedTarget.Endpoint - } - - if cc.dopts.scChan != nil && !scSet { - // Blocking wait for the initial service config. - select { - case sc, ok := <-cc.dopts.scChan: - if ok { - cc.sc = &sc + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { + return nil, err } - case <-ctx.Done(): return nil, ctx.Err() } } - if cc.dopts.scChan != nil { - go cc.scWatcher() - } +} - var credsClone credentials.TransportCredentials - if creds := cc.dopts.copts.TransportCredentials; creds != nil { - credsClone = creds.Clone() +// addTraceEvent is a helper method to add a trace event on the channel. If the +// channel is a nested one, the same event is also added on the parent channel. +func (cc *ClientConn) addTraceEvent(msg string) { + ted := &channelz.TraceEvent{ + Desc: fmt.Sprintf("Channel %s", msg), + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParent != nil { + ted.Parent = &channelz.TraceEvent{ + Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelz.ID, msg), + Severity: channelz.CtInfo, + } } - cc.balancerBuildOpts = balancer.BuildOptions{ - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - ChannelzParentID: cc.channelzID, - Target: cc.parsedTarget, + channelz.AddTraceEvent(logger, cc.channelz, 0, ted) +} + +type idler ClientConn + +func (i *idler) EnterIdleMode() { + (*ClientConn)(i).enterIdleMode() +} + +func (i *idler) ExitIdleMode() error { + return (*ClientConn)(i).exitIdleMode() +} + +// exitIdleMode moves the channel out of idle mode by recreating the name +// resolver and load balancer. This should never be called directly; use +// cc.idlenessMgr.ExitIdleMode instead. +func (cc *ClientConn) exitIdleMode() (err error) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return errConnClosing } + cc.mu.Unlock() - // Build the resolver. - rWrapper, err := newCCResolverWrapper(cc) - if err != nil { - return nil, fmt.Errorf("failed to build resolver: %v", err) + // This needs to be called without cc.mu because this builds a new resolver + // which might update state or report error inline, which would then need to + // acquire cc.mu. + if err := cc.resolverWrapper.start(); err != nil { + return err } + cc.addTraceEvent("exiting idle mode") + return nil +} + +// initIdleStateLocked initializes common state to how it should be while idle. +func (cc *ClientConn) initIdleStateLocked() { + cc.resolverWrapper = newCCResolverWrapper(cc) + cc.balancerWrapper = newCCBalancerWrapper(cc) + cc.firstResolveEvent = grpcsync.NewEvent() + // cc.conns == nil is a proxy for the ClientConn being closed. So, instead + // of setting it to nil here, we recreate the map. This also means that we + // don't have to do this when exiting idle mode. + cc.conns = make(map[*addrConn]struct{}) +} + +// enterIdleMode puts the channel in idle mode, and as part of it shuts down the +// name resolver, load balancer, and any subchannels. This should never be +// called directly; use cc.idlenessMgr.EnterIdleMode instead. +func (cc *ClientConn) enterIdleMode() { cc.mu.Lock() - cc.resolverWrapper = rWrapper + + if cc.conns == nil { + cc.mu.Unlock() + return + } + + conns := cc.conns + + rWrapper := cc.resolverWrapper + rWrapper.close() + cc.pickerWrapper.reset() + bWrapper := cc.balancerWrapper + bWrapper.close() + cc.csMgr.updateState(connectivity.Idle) + cc.addTraceEvent("entering idle mode") + + cc.initIdleStateLocked() + cc.mu.Unlock() - // A blocking dial blocks until the clientConn is ready. - if cc.dopts.block { - for { - s := cc.GetState() - if s == connectivity.Ready { - break - } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { - if err = cc.blockingpicker.connectionError(); err != nil { - terr, ok := err.(interface { - Temporary() bool - }) - if ok && !terr.Temporary() { - return nil, err - } - } - } - if !cc.WaitForStateChange(ctx, s) { - // ctx got timeout or canceled. - return nil, ctx.Err() + + // Block until the name resolver and LB policy are closed. + <-rWrapper.serializer.Done() + <-bWrapper.serializer.Done() + + // Close all subchannels after the LB policy is closed. + for ac := range conns { + ac.tearDown(errConnIdling) + } +} + +// validateTransportCredentials performs a series of checks on the configured +// transport credentials. It returns a non-nil error if any of these conditions +// are met: +// - no transport creds and no creds bundle is configured +// - both transport creds and creds bundle are configured +// - creds bundle is configured, but it lacks a transport credentials +// - insecure transport creds configured alongside call creds that require +// transport level security +// +// If none of the above conditions are met, the configured credentials are +// deemed valid and a nil error is returned. +func (cc *ClientConn) validateTransportCredentials() error { + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + return errNoTransportSecurity + } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return errTransportCredsAndBundle + } + if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { + return errNoTransportCredsInBundle + } + transportCreds := cc.dopts.copts.TransportCredentials + if transportCreds == nil { + transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() + } + if transportCreds.Info().SecurityProtocol == "insecure" { + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return errTransportCredentialsMissing } } } + return nil +} - return cc, nil +// channelzRegistration registers the newly created ClientConn with channelz and +// stores the returned identifier in `cc.channelz`. A channelz trace event is +// emitted for ClientConn creation. If the newly created ClientConn is a nested +// one, i.e a valid parent ClientConn ID is specified via a dial option, the +// trace event is also added to the parent. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) channelzRegistration(target string) { + parentChannel, _ := cc.dopts.channelzParent.(*channelz.Channel) + cc.channelz = channelz.RegisterChannel(parentChannel, target) + cc.addTraceEvent("created") } // chainUnaryClientInterceptors chains all unary client interceptors into one. @@ -345,7 +454,7 @@ func chainUnaryClientInterceptors(cc *ClientConn) { } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { - chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { + chainedInt = func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) } } @@ -357,7 +466,7 @@ func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, final if curr == len(interceptors)-1 { return finalInvoker } - return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) } } @@ -393,13 +502,27 @@ func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStr } } +// newConnectivityStateManager creates an connectivityStateManager with +// the specified channel. +func newConnectivityStateManager(ctx context.Context, channel *channelz.Channel) *connectivityStateManager { + return &connectivityStateManager{ + channelz: channel, + pubSub: grpcsync.NewPubSub(ctx), + } +} + // connectivityStateManager keeps the connectivity.State of ClientConn. // This struct will eventually be exported so the balancers can access it. +// +// TODO: If possible, get rid of the `connectivityStateManager` type, and +// provide this functionality using the `PubSub`, to avoid keeping track of +// the connectivity state at two places. type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} - channelzID int64 + channelz *channelz.Channel + pubSub *grpcsync.PubSub } // updateState updates the connectivity.State of ClientConn. @@ -415,12 +538,10 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) { return } csm.state = state - if channelz.IsOn() { - channelz.AddTraceEvent(csm.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Channel Connectivity change to %v", state), - Severity: channelz.CtINFO, - }) - } + csm.channelz.ChannelMetrics.State.Store(&state) + csm.pubSub.Publish(state) + + channelz.Infof(logger, csm.channelz, "Channel Connectivity change to %v", state) if csm.notifyChan != nil { // There are other goroutines waiting on this channel. close(csm.notifyChan) @@ -443,39 +564,74 @@ func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { return csm.notifyChan } -// ClientConn represents a client connection to an RPC server. -type ClientConn struct { - ctx context.Context - cancel context.CancelFunc - - target string - parsedTarget resolver.Target - authority string - dopts dialOptions - csMgr *connectivityStateManager +// ClientConnInterface defines the functions clients need to perform unary and +// streaming RPCs. It is implemented by *ClientConn, and is only intended to +// be referenced by generated code. +type ClientConnInterface interface { + // Invoke performs a unary RPC and returns after the response is received + // into reply. + Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error + // NewStream begins a streaming RPC. + NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) +} - balancerBuildOpts balancer.BuildOptions - blockingpicker *pickerWrapper +// Assert *ClientConn implements ClientConnInterface. +var _ ClientConnInterface = (*ClientConn)(nil) +// ClientConn represents a virtual connection to a conceptual endpoint, to +// perform RPCs. +// +// A ClientConn is free to have zero or more actual connections to the endpoint +// based on configuration, load, etc. It is also free to determine which actual +// endpoints to use and may change it every RPC, permitting client-side load +// balancing. +// +// A ClientConn encapsulates a range of functionality including name +// resolution, TCP connection establishment (with retries and backoff) and TLS +// handshakes. It also handles errors on established connections by +// re-resolving the name and reconnecting. +type ClientConn struct { + ctx context.Context // Initialized using the background context at dial time. + cancel context.CancelFunc // Cancelled on close. + + // The following are initialized at dial time, and are read-only after that. + target string // User's dial target. + parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder(). + authority string // See initAuthority(). + dopts dialOptions // Default and user specified dial options. + channelz *channelz.Channel // Channelz object. + resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). + idlenessMgr *idle.Manager + metricsRecorderList *stats.MetricsRecorderList + + // The following provide their own synchronization, and therefore don't + // require cc.mu to be held to access them. + csMgr *connectivityStateManager + pickerWrapper *pickerWrapper + safeConfigSelector iresolver.SafeConfigSelector + retryThrottler atomic.Value // Updated from service config. + + // mu protects the following fields. + // TODO: split mu so the same mutex isn't used for everything. mu sync.RWMutex - resolverWrapper *ccResolverWrapper - sc *ServiceConfig - conns map[*addrConn]struct{} - // Keepalive parameter can be updated if a GoAway is received. - mkp keepalive.ClientParameters - curBalancerName string - balancerWrapper *ccBalancerWrapper - retryThrottler atomic.Value - + resolverWrapper *ccResolverWrapper // Always recreated whenever entering idle to simplify Close. + balancerWrapper *ccBalancerWrapper // Always recreated whenever entering idle to simplify Close. + sc *ServiceConfig // Latest service config received from the resolver. + conns map[*addrConn]struct{} // Set to nil on close. + mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. May be accessed without mu + // if we know we cannot be asked to enter idle mode while accessing it (e.g. + // when the idle manager has already been closed, or if we are already + // entering idle mode). firstResolveEvent *grpcsync.Event - channelzID int64 // channelz unique identification number - czData *channelzData + lceMu sync.Mutex // protects lastConnectionError + lastConnectionError error } // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. -// This is an EXPERIMENTAL API. func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { ch := cc.csMgr.getNotifyChan() if cc.csMgr.getState() != sourceState { @@ -490,27 +646,28 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec } // GetState returns the connectivity.State of ClientConn. -// This is an EXPERIMENTAL API. func (cc *ClientConn) GetState() connectivity.State { return cc.csMgr.getState() } -func (cc *ClientConn) scWatcher() { - for { - select { - case sc, ok := <-cc.dopts.scChan: - if !ok { - return - } - cc.mu.Lock() - // TODO: load balance policy runtime change is ignored. - // We may revisit this decision in the future. - cc.sc = &sc - cc.mu.Unlock() - case <-cc.ctx.Done(): - return - } +// Connect causes all subchannels in the ClientConn to attempt to connect if +// the channel is idle. Does not wait for the connection attempts to begin +// before returning. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. +func (cc *ClientConn) Connect() { + if err := cc.idlenessMgr.ExitIdleMode(); err != nil { + cc.addTraceEvent(err.Error()) + return } + // If the ClientConn was not in idle mode, we need to call ExitIdle on the + // LB policy so that connections can be created. + cc.mu.Lock() + cc.balancerWrapper.exitIdle() + cc.mu.Unlock() } // waitForResolvedAddrs blocks until the resolver has provided addresses or the @@ -532,149 +689,167 @@ func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { } } -func (cc *ClientConn) updateResolverState(s resolver.State) error { - cc.mu.Lock() - defer cc.mu.Unlock() +var emptyServiceConfig *ServiceConfig + +func init() { + cfg := parseServiceConfig("{}", defaultMaxCallAttempts) + if cfg.Err != nil { + panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) + } + emptyServiceConfig = cfg.Config.(*ServiceConfig) + + internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() { + return cc.csMgr.pubSub.Subscribe(s) + } + internal.EnterIdleModeForTesting = func(cc *ClientConn) { + cc.idlenessMgr.EnterIdleModeForTesting() + } + internal.ExitIdleModeForTesting = func(cc *ClientConn) error { + return cc.idlenessMgr.ExitIdleMode() + } +} + +func (cc *ClientConn) maybeApplyDefaultServiceConfig() { + if cc.sc != nil { + cc.applyServiceConfigAndBalancer(cc.sc, nil) + return + } + if cc.dopts.defaultServiceConfig != nil { + cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig}) + } else { + cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig}) + } +} + +func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) error { + defer cc.firstResolveEvent.Fire() // Check if the ClientConn is already closed. Some fields (e.g. // balancerWrapper) are set to nil when closing the ClientConn, and could // cause nil pointer panic if we don't have this check. if cc.conns == nil { + cc.mu.Unlock() return nil } - if cc.dopts.disableServiceConfig || s.ServiceConfig == nil { - if cc.dopts.defaultServiceConfig != nil && cc.sc == nil { - cc.applyServiceConfig(cc.dopts.defaultServiceConfig) - } - } else if sc, ok := s.ServiceConfig.(*ServiceConfig); ok { - cc.applyServiceConfig(sc) + if err != nil { + // May need to apply the initial service config in case the resolver + // doesn't support service configs, or doesn't provide a service config + // with the new addresses. + cc.maybeApplyDefaultServiceConfig() + + cc.balancerWrapper.resolverError(err) + + // No addresses are valid with err set; return early. + cc.mu.Unlock() + return balancer.ErrBadResolverState } - var balCfg serviceconfig.LoadBalancingConfig - if cc.dopts.balancerBuilder == nil { - // Only look at balancer types and switch balancer if balancer dial - // option is not set. - var newBalancerName string - if cc.sc != nil && cc.sc.lbConfig != nil { - newBalancerName = cc.sc.lbConfig.name - balCfg = cc.sc.lbConfig.cfg - } else { - var isGRPCLB bool - for _, a := range s.Addresses { - if a.Type == resolver.GRPCLB { - isGRPCLB = true - break + var ret error + if cc.dopts.disableServiceConfig { + channelz.Infof(logger, cc.channelz, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig) + cc.maybeApplyDefaultServiceConfig() + } else if s.ServiceConfig == nil { + cc.maybeApplyDefaultServiceConfig() + // TODO: do we need to apply a failing LB policy if there is no + // default, per the error handling design? + } else { + if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok { + configSelector := iresolver.GetConfigSelector(s) + if configSelector != nil { + if len(s.ServiceConfig.Config.(*ServiceConfig).Methods) != 0 { + channelz.Infof(logger, cc.channelz, "method configs in service config will be ignored due to presence of config selector") } - } - if isGRPCLB { - newBalancerName = grpclbName - } else if cc.sc != nil && cc.sc.LB != nil { - newBalancerName = *cc.sc.LB } else { - newBalancerName = PickFirstBalancerName + configSelector = &defaultConfigSelector{sc} + } + cc.applyServiceConfigAndBalancer(sc, configSelector) + } else { + ret = balancer.ErrBadResolverState + if cc.sc == nil { + // Apply the failing LB only if we haven't received valid service config + // from the name resolver in the past. + cc.applyFailingLBLocked(s.ServiceConfig) + cc.mu.Unlock() + return ret } } - cc.switchBalancer(newBalancerName) - } else if cc.balancerWrapper == nil { - // Balancer dial option was set, and this is the first time handling - // resolved addresses. Build a balancer with dopts.balancerBuilder. - cc.curBalancerName = cc.dopts.balancerBuilder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) } - cc.balancerWrapper.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) - return nil -} - -// switchBalancer starts the switching from current balancer to the balancer -// with the given name. -// -// It will NOT send the current address list to the new balancer. If needed, -// caller of this function should send address list to the new balancer after -// this function returns. -// -// Caller must hold cc.mu. -func (cc *ClientConn) switchBalancer(name string) { - if strings.EqualFold(cc.curBalancerName, name) { - return + var balCfg serviceconfig.LoadBalancingConfig + if cc.sc != nil && cc.sc.lbConfig != nil { + balCfg = cc.sc.lbConfig } + bw := cc.balancerWrapper + cc.mu.Unlock() - grpclog.Infof("ClientConn switching balancer to %q", name) - if cc.dopts.balancerBuilder != nil { - grpclog.Infoln("ignoring balancer switching: Balancer DialOption used instead") - return - } - if cc.balancerWrapper != nil { - cc.balancerWrapper.close() + uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) + if ret == nil { + ret = uccsErr // prefer ErrBadResolver state since any other error is + // currently meaningless to the caller. } + return ret +} - builder := balancer.Get(name) - if channelz.IsOn() { - if builder == nil { - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName), - Severity: channelz.CtWarning, - }) - } else { - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Channel switches to new LB policy %q", name), - Severity: channelz.CtINFO, - }) - } - } - if builder == nil { - grpclog.Infof("failed to get balancer builder for: %v, using pick_first instead", name) - builder = newPickfirstBuilder() +// applyFailingLBLocked is akin to configuring an LB policy on the channel which +// always fails RPCs. Here, an actual LB policy is not configured, but an always +// erroring picker is configured, which returns errors with information about +// what was invalid in the received service config. A config selector with no +// service config is configured, and the connectivity state of the channel is +// set to TransientFailure. +func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) { + var err error + if sc.Err != nil { + err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) + } else { + err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) } - - cc.curBalancerName = builder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) + cc.pickerWrapper.updatePicker(base.NewErrPicker(err)) + cc.csMgr.updateState(connectivity.TransientFailure) } -func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return - } - // TODO(bar switching) send updates to all balancer wrappers when balancer - // gracefully switching is supported. - cc.balancerWrapper.handleSubConnStateChange(sc, s) - cc.mu.Unlock() +// Makes a copy of the input addresses slice. Addresses are passed during +// subconn creation and address update operations. +func copyAddresses(in []resolver.Address) []resolver.Address { + out := make([]resolver.Address, len(in)) + copy(out, in) + return out } -// newAddrConn creates an addrConn for addrs and adds it to cc.conns. +// newAddrConnLocked creates an addrConn for addrs and adds it to cc.conns. // // Caller needs to make sure len(addrs) > 0. -func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { - ac := &addrConn{ - cc: cc, - addrs: addrs, - scopts: opts, - dopts: cc.dopts, - czData: new(channelzData), - resetBackoff: make(chan struct{}), - } - ac.ctx, ac.cancel = context.WithCancel(cc.ctx) - // Track ac in cc. This needs to be done before any getTransport(...) is called. - cc.mu.Lock() +func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { if cc.conns == nil { - cc.mu.Unlock() return nil, ErrClientConnClosing } - if channelz.IsOn() { - ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: "Subchannel Created", - Severity: channelz.CtINFO, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID), - Severity: channelz.CtINFO, - }, - }) + + ac := &addrConn{ + state: connectivity.Idle, + cc: cc, + addrs: copyAddresses(addrs), + scopts: opts, + dopts: cc.dopts, + channelz: channelz.RegisterSubChannel(cc.channelz, ""), + resetBackoff: make(chan struct{}), + stateReadyChan: make(chan struct{}), } + ac.ctx, ac.cancel = context.WithCancel(cc.ctx) + // Start with our address set to the first address; this may be updated if + // we connect to different addresses. + ac.channelz.ChannelMetrics.Target.Store(&addrs[0].Addr) + + channelz.AddTraceEvent(logger, ac.channelz, 0, &channelz.TraceEvent{ + Desc: "Subchannel created", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEvent{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelz.ID), + Severity: channelz.CtInfo, + }, + }) + + // Track ac in cc. This needs to be done before any getTransport(...) is called. cc.conns[ac] = struct{}{} - cc.mu.Unlock() return ac, nil } @@ -691,34 +866,27 @@ func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { ac.tearDown(err) } -func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { - return &channelz.ChannelInternalMetric{ - State: cc.GetState(), - Target: cc.target, - CallsStarted: atomic.LoadInt64(&cc.czData.callsStarted), - CallsSucceeded: atomic.LoadInt64(&cc.czData.callsSucceeded), - CallsFailed: atomic.LoadInt64(&cc.czData.callsFailed), - LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)), - } -} - // Target returns the target string of the ClientConn. -// This is an EXPERIMENTAL API. func (cc *ClientConn) Target() string { return cc.target } +// CanonicalTarget returns the canonical target string of the ClientConn. +func (cc *ClientConn) CanonicalTarget() string { + return cc.parsedTarget.String() +} + func (cc *ClientConn) incrCallsStarted() { - atomic.AddInt64(&cc.czData.callsStarted, 1) - atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano()) + cc.channelz.ChannelMetrics.CallsStarted.Add(1) + cc.channelz.ChannelMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano()) } func (cc *ClientConn) incrCallsSucceeded() { - atomic.AddInt64(&cc.czData.callsSucceeded, 1) + cc.channelz.ChannelMetrics.CallsSucceeded.Add(1) } func (cc *ClientConn) incrCallsFailed() { - atomic.AddInt64(&cc.czData.callsFailed, 1) + cc.channelz.ChannelMetrics.CallsFailed.Add(1) } // connect starts creating a transport. @@ -727,89 +895,146 @@ func (cc *ClientConn) incrCallsFailed() { func (ac *addrConn) connect() error { ac.mu.Lock() if ac.state == connectivity.Shutdown { + if logger.V(2) { + logger.Infof("connect called on shutdown addrConn; ignoring.") + } ac.mu.Unlock() return errConnClosing } if ac.state != connectivity.Idle { + if logger.V(2) { + logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state) + } ac.mu.Unlock() return nil } - // Update connectivity state within the lock to prevent subsequent or - // concurrent calls from resetting the transport more than once. - ac.updateConnectivityState(connectivity.Connecting) - ac.mu.Unlock() - // Start a goroutine connecting to the server asynchronously. - go ac.resetTransport() + ac.resetTransportAndUnlock() return nil } -// tryUpdateAddrs tries to update ac.addrs with the new addresses list. -// -// If ac is Connecting, it returns false. The caller should tear down the ac and -// create a new one. Note that the backoff will be reset when this happens. -// -// If ac is TransientFailure, it updates ac.addrs and returns true. The updated -// addresses will be picked up by retry in the next iteration after backoff. -// -// If ac is Shutdown or Idle, it updates ac.addrs and returns true. -// -// If ac is Ready, it checks whether current connected address of ac is in the -// new addrs list. -// - If true, it updates ac.addrs and returns true. The ac will keep using -// the existing connection. -// - If false, it does nothing and returns false. -func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { +// equalAddressIgnoringBalAttributes returns true is a and b are considered equal. +// This is different from the Equal method on the resolver.Address type which +// considers all fields to determine equality. Here, we only consider fields +// that are meaningful to the subConn. +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { + return a.Addr == b.Addr && a.ServerName == b.ServerName && + a.Attributes.Equal(b.Attributes) && + a.Metadata == b.Metadata +} + +func equalAddressesIgnoringBalAttributes(a, b []resolver.Address) bool { + return slices.EqualFunc(a, b, func(a, b resolver.Address) bool { return equalAddressIgnoringBalAttributes(&a, &b) }) +} + +// updateAddrs updates ac.addrs with the new addresses list and handles active +// connections or connection attempts. +func (ac *addrConn) updateAddrs(addrs []resolver.Address) { + addrs = copyAddresses(addrs) + limit := len(addrs) + if limit > 5 { + limit = 5 + } + channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit]) + ac.mu.Lock() - defer ac.mu.Unlock() - grpclog.Infof("addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + if equalAddressesIgnoringBalAttributes(ac.addrs, addrs) { + ac.mu.Unlock() + return + } + + ac.addrs = addrs + if ac.state == connectivity.Shutdown || ac.state == connectivity.TransientFailure || ac.state == connectivity.Idle { - ac.addrs = addrs - return true + // We were not connecting, so do nothing but update the addresses. + ac.mu.Unlock() + return } - if ac.state == connectivity.Connecting { - return false + if ac.state == connectivity.Ready { + // Try to find the connected address. + for _, a := range addrs { + a.ServerName = ac.cc.getServerName(a) + if equalAddressIgnoringBalAttributes(&a, &ac.curAddr) { + // We are connected to a valid address, so do nothing but + // update the addresses. + ac.mu.Unlock() + return + } + } } - // ac.state is Ready, try to find the connected address. - var curAddrFound bool - for _, a := range addrs { - if reflect.DeepEqual(ac.curAddr, a) { - curAddrFound = true - break - } + // We are either connected to the wrong address or currently connecting. + // Stop the current iteration and restart. + + ac.cancel() + ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx) + + // We have to defer here because GracefulClose => onClose, which requires + // locking ac.mu. + if ac.transport != nil { + defer ac.transport.GracefulClose() + ac.transport = nil } - grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) - if curAddrFound { - ac.addrs = addrs + + if len(addrs) == 0 { + ac.updateConnectivityState(connectivity.Idle, nil) } - return curAddrFound + // Since we were connecting/connected, we should start a new connection + // attempt. + go ac.resetTransportAndUnlock() +} + +// getServerName determines the serverName to be used in the connection +// handshake. The default value for the serverName is the authority on the +// ClientConn, which either comes from the user's dial target or through an +// authority override specified using the WithAuthority dial option. Name +// resolvers can specify a per-address override for the serverName through the +// resolver.Address.ServerName field which is used only if the WithAuthority +// dial option was not used. The rationale is that per-address authority +// overrides specified by the name resolver can represent a security risk, while +// an override specified by the user is more dependable since they probably know +// what they are doing. +func (cc *ClientConn) getServerName(addr resolver.Address) string { + if cc.dopts.authority != "" { + return cc.dopts.authority + } + if addr.ServerName != "" { + return addr.ServerName + } + return cc.authority +} + +func getMethodConfig(sc *ServiceConfig, method string) MethodConfig { + if sc == nil { + return MethodConfig{} + } + if m, ok := sc.Methods[method]; ok { + return m + } + i := strings.LastIndex(method, "/") + if m, ok := sc.Methods[method[:i+1]]; ok { + return m + } + return sc.Methods[""] } // GetMethodConfig gets the method config of the input method. // If there's an exact match for input method (i.e. /service/method), we return // the corresponding MethodConfig. -// If there isn't an exact match for the input method, we look for the default config -// under the service (i.e /service/). If there is a default MethodConfig for -// the service, we return it. +// If there isn't an exact match for the input method, we look for the service's default +// config under the service (i.e /service/) and then for the default for all services (empty string). +// +// If there is a default MethodConfig for the service, we return it. // Otherwise, we return an empty MethodConfig. func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { // TODO: Avoid the locking here. cc.mu.RLock() defer cc.mu.RUnlock() - if cc.sc == nil { - return MethodConfig{} - } - m, ok := cc.sc.Methods[method] - if !ok { - i := strings.LastIndex(method, "/") - m = cc.sc.Methods[method[:i+1]] - } - return m + return getMethodConfig(cc.sc, method) } func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { @@ -821,22 +1046,22 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { return cc.sc.healthCheckConfig } -func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { - t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{ +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { + return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{ + Ctx: ctx, FullMethodName: method, }) - if err != nil { - return nil, nil, toRPCErr(err) - } - return t, done, nil } -func (cc *ClientConn) applyServiceConfig(sc *ServiceConfig) error { +func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector) { if sc == nil { // should never reach here. - return fmt.Errorf("got nil pointer for service config") + return } cc.sc = sc + if configSelector != nil { + cc.safeConfigSelector.UpdateConfigSelector(configSelector) + } if cc.sc.retryThrottling != nil { newThrottler := &retryThrottler{ @@ -849,18 +1074,16 @@ func (cc *ClientConn) applyServiceConfig(sc *ServiceConfig) error { } else { cc.retryThrottler.Store((*retryThrottler)(nil)) } - - return nil } -func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) { +func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { cc.mu.RLock() - r := cc.resolverWrapper + cc.resolverWrapper.resolveNow(o) cc.mu.RUnlock() - if r == nil { - return - } - go r.resolveNow(o) +} + +func (cc *ClientConn) resolveNowLocked(o resolver.ResolveNowOptions) { + cc.resolverWrapper.resolveNow(o) } // ResetConnectBackoff wakes up all subchannels in transient failure and causes @@ -872,62 +1095,62 @@ func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) { // However, if a previously unavailable network becomes available, this may be // used to trigger an immediate reconnect. // -// This API is EXPERIMENTAL. +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func (cc *ClientConn) ResetConnectBackoff() { cc.mu.Lock() - defer cc.mu.Unlock() - for ac := range cc.conns { + conns := cc.conns + cc.mu.Unlock() + for ac := range conns { ac.resetConnectBackoff() } } // Close tears down the ClientConn and all underlying connections. func (cc *ClientConn) Close() error { - defer cc.cancel() + defer func() { + cc.cancel() + <-cc.csMgr.pubSub.Done() + }() + + // Prevent calls to enter/exit idle immediately, and ensure we are not + // currently entering/exiting idle mode. + cc.idlenessMgr.Close() cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() return ErrClientConnClosing } + conns := cc.conns cc.conns = nil cc.csMgr.updateState(connectivity.Shutdown) - rWrapper := cc.resolverWrapper - cc.resolverWrapper = nil - bWrapper := cc.balancerWrapper - cc.balancerWrapper = nil + // We can safely unlock and continue to access all fields now as + // cc.conns==nil, preventing any further operations on cc. cc.mu.Unlock() - cc.blockingpicker.close() + cc.resolverWrapper.close() + // The order of closing matters here since the balancer wrapper assumes the + // picker is closed before it is closed. + cc.pickerWrapper.close() + cc.balancerWrapper.close() - if rWrapper != nil { - rWrapper.close() - } - if bWrapper != nil { - bWrapper.close() - } + <-cc.resolverWrapper.serializer.Done() + <-cc.balancerWrapper.serializer.Done() for ac := range conns { ac.tearDown(ErrClientConnClosing) } - if channelz.IsOn() { - ted := &channelz.TraceEventDesc{ - Desc: "Channel Deleted", - Severity: channelz.CtINFO, - } - if cc.dopts.channelzParentID != 0 { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID), - Severity: channelz.CtINFO, - } - } - channelz.AddTraceEvent(cc.channelzID, ted) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(cc.channelzID) - } + cc.addTraceEvent("deleted") + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from being + // deleted right away. + channelz.RemoveEntry(cc.channelz.ID) + return nil } @@ -938,7 +1161,7 @@ type addrConn struct { cc *ClientConn dopts dialOptions - acbw balancer.SubConn + acbw *acBalancerWrapper scopts balancer.NewSubConnOptions // transport is set when there's a viable transport (note: ac state may not be READY as LB channel @@ -947,35 +1170,37 @@ type addrConn struct { // is received, transport is closed, ac has been torn down). transport transport.ClientTransport // The current transport. + // This mutex is used on the RPC path, so its usage should be minimized as + // much as possible. + // TODO: Find a lock-free way to retrieve the transport and state from the + // addrConn. mu sync.Mutex curAddr resolver.Address // The current address. addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State + state connectivity.State + stateReadyChan chan struct{} // closed and recreated on every READY state change. backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} - channelzID int64 // channelz unique identification number. - czData *channelzData + channelz *channelz.SubChannel } // Note: this requires a lock on ac.mu. -func (ac *addrConn) updateConnectivityState(s connectivity.State) { +func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) { if ac.state == s { return } - - updateMsg := fmt.Sprintf("Subchannel Connectivity change to %v", s) ac.state = s - if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: updateMsg, - Severity: channelz.CtINFO, - }) + ac.channelz.ChannelMetrics.State.Store(&s) + if lastErr == nil { + channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v", s) + } else { + channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) } - ac.cc.handleSubConnStateChange(ac.acbw, s) + ac.acbw.updateState(s, ac.curAddr, lastErr) } // adjustParams updates parameters used to create transports upon @@ -992,113 +1217,92 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { } } -func (ac *addrConn) resetTransport() { - for i := 0; ; i++ { - if i > 0 { - ac.cc.resolveNow(resolver.ResolveNowOption{}) - } +// resetTransportAndUnlock unconditionally connects the addrConn. +// +// ac.mu must be held by the caller, and this function will guarantee it is released. +func (ac *addrConn) resetTransportAndUnlock() { + acCtx := ac.ctx + if acCtx.Err() != nil { + ac.mu.Unlock() + return + } + + addrs := ac.addrs + backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) + // This will be the duration that dial gets to finish. + dialDuration := minConnectTimeout + if ac.dopts.minConnectTimeout != nil { + dialDuration = ac.dopts.minConnectTimeout() + } + + if dialDuration < backoffFor { + // Give dial more time as we keep failing to connect. + dialDuration = backoffFor + } + // We can potentially spend all the time trying the first address, and + // if the server accepts the connection and then hangs, the following + // addresses will never be tried. + // + // The spec doesn't mention what should be done for multiple addresses. + // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm + connectDeadline := time.Now().Add(dialDuration) + ac.updateConnectivityState(connectivity.Connecting, nil) + ac.mu.Unlock() + + if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { + ac.cc.resolveNow(resolver.ResolveNowOptions{}) ac.mu.Lock() - if ac.state == connectivity.Shutdown { + if acCtx.Err() != nil { + // addrConn was torn down. ac.mu.Unlock() return } + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. + ac.updateConnectivityState(connectivity.TransientFailure, err) - addrs := ac.addrs - backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) - // This will be the duration that dial gets to finish. - dialDuration := minConnectTimeout - if ac.dopts.minConnectTimeout != nil { - dialDuration = ac.dopts.minConnectTimeout() - } - - if dialDuration < backoffFor { - // Give dial more time as we keep failing to connect. - dialDuration = backoffFor - } - // We can potentially spend all the time trying the first address, and - // if the server accepts the connection and then hangs, the following - // addresses will never be tried. - // - // The spec doesn't mention what should be done for multiple addresses. - // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm - connectDeadline := time.Now().Add(dialDuration) - - ac.updateConnectivityState(connectivity.Connecting) - ac.transport = nil + // Backoff. + b := ac.resetBackoff ac.mu.Unlock() - newTr, addr, reconnect, err := ac.tryAllAddrs(addrs, connectDeadline) - if err != nil { - // After exhausting all addresses, the addrConn enters - // TRANSIENT_FAILURE. + timer := time.NewTimer(backoffFor) + select { + case <-timer.C: ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return - } - ac.updateConnectivityState(connectivity.TransientFailure) - - // Backoff. - b := ac.resetBackoff + ac.backoffIdx++ ac.mu.Unlock() - - timer := time.NewTimer(backoffFor) - select { - case <-timer.C: - ac.mu.Lock() - ac.backoffIdx++ - ac.mu.Unlock() - case <-b: - timer.Stop() - case <-ac.ctx.Done(): - timer.Stop() - return - } - continue + case <-b: + timer.Stop() + case <-acCtx.Done(): + timer.Stop() + return } ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - newTr.Close() - return + if acCtx.Err() == nil { + ac.updateConnectivityState(connectivity.Idle, err) } - ac.curAddr = addr - ac.transport = newTr - ac.backoffIdx = 0 - - hctx, hcancel := context.WithCancel(ac.ctx) - ac.startHealthCheck(hctx) ac.mu.Unlock() - - // Block until the created transport is down. And when this happens, - // we restart from the top of the addr list. - <-reconnect.Done() - hcancel() - // restart connecting - the top of the loop will set state to - // CONNECTING. This is against the current connectivity semantics doc, - // however it allows for graceful behavior for RPCs not yet dispatched - // - unfortunate timing would otherwise lead to the RPC failing even - // though the TRANSIENT_FAILURE state (called for by the doc) would be - // instantaneous. - // - // Ideally we should transition to Idle here and block until there is - // RPC activity that leads to the balancer requesting a reconnect of - // the associated SubConn. + return } + // Success; reset backoff. + ac.mu.Lock() + ac.backoffIdx = 0 + ac.mu.Unlock() } -// tryAllAddrs tries to creates a connection to the addresses, and stop when at the -// first successful one. It returns the transport, the address and a Event in -// the successful case. The Event fires when the returned transport disconnects. -func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) { +// tryAllAddrs tries to creates a connection to the addresses, and stop when at +// the first successful one. It returns an error if no address was successfully +// connected, or updates ac appropriately with the new transport. +func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { + var firstConnErr error for _, addr := range addrs { - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return nil, resolver.Address{}, nil, errConnClosing + ac.channelz.ChannelMetrics.Target.Store(&addr.Addr) + if ctx.Err() != nil { + return errConnClosing } + ac.mu.Lock() ac.cc.mu.RLock() ac.dopts.copts.KeepaliveParams = ac.cc.mkp @@ -1110,102 +1314,103 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T } ac.mu.Unlock() - if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel picks a new address %q to connect", addr.Addr), - Severity: channelz.CtINFO, - }) - } + channelz.Infof(logger, ac.channelz, "Subchannel picks a new address %q to connect", addr.Addr) - newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline) + err := ac.createTransport(ctx, addr, copts, connectDeadline) if err == nil { - return newTr, addr, reconnect, nil + return nil + } + if firstConnErr == nil { + firstConnErr = err } - ac.cc.blockingpicker.updateConnectionError(err) + ac.cc.updateConnectionError(err) } // Couldn't connect to any address. - return nil, resolver.Address{}, nil, fmt.Errorf("couldn't connect to any address") + return firstConnErr } -// createTransport creates a connection to addr. It returns the transport and a -// Event in the successful case. The Event fires when the returned transport -// disconnects. -func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) (transport.ClientTransport, *grpcsync.Event, error) { - prefaceReceived := make(chan struct{}) - onCloseCalled := make(chan struct{}) - reconnect := grpcsync.NewEvent() +// createTransport creates a connection to addr. It returns an error if the +// address was not successfully connected, or updates ac appropriately with the +// new transport. +func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { + addr.ServerName = ac.cc.getServerName(addr) + hctx, hcancel := context.WithCancel(ctx) - target := transport.TargetInfo{ - Addr: addr.Addr, - Metadata: addr.Metadata, - Authority: ac.cc.authority, - } - - once := sync.Once{} - onGoAway := func(r transport.GoAwayReason) { + onClose := func(r transport.GoAwayReason) { ac.mu.Lock() + defer ac.mu.Unlock() + // adjust params based on GoAwayReason ac.adjustParams(r) - once.Do(func() { - if ac.state == connectivity.Ready { - // Prevent this SubConn from being used for new RPCs by setting its - // state to Connecting. - // - // TODO: this should be Idle when grpc-go properly supports it. - ac.updateConnectivityState(connectivity.Connecting) - } - }) - ac.mu.Unlock() - reconnect.Fire() - } - - onClose := func() { - ac.mu.Lock() - once.Do(func() { - if ac.state == connectivity.Ready { - // Prevent this SubConn from being used for new RPCs by setting its - // state to Connecting. - // - // TODO: this should be Idle when grpc-go properly supports it. - ac.updateConnectivityState(connectivity.Connecting) - } - }) - ac.mu.Unlock() - close(onCloseCalled) - reconnect.Fire() - } - - onPrefaceReceipt := func() { - close(prefaceReceived) + if ctx.Err() != nil { + // Already shut down or connection attempt canceled. tearDown() or + // updateAddrs() already cleared the transport and canceled hctx + // via ac.ctx, and we expected this connection to be closed, so do + // nothing here. + return + } + hcancel() + if ac.transport == nil { + // We're still connecting to this address, which could error. Do + // not update the connectivity state or resolve; these will happen + // at the end of the tryAllAddrs connection loop in the event of an + // error. + return + } + ac.transport = nil + // Refresh the name resolver on any connection loss. + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + // Always go idle and wait for the LB policy to initiate a new + // connection attempt. + ac.updateConnectivityState(connectivity.Idle, nil) } - connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) + connectCtx, cancel := context.WithDeadline(ctx, connectDeadline) defer cancel() - if channelz.IsOn() { - copts.ChannelzParentID = ac.channelzID - } + copts.ChannelzParent = ac.channelz - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt, onGoAway, onClose) + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) if err != nil { + if logger.V(2) { + logger.Infof("Creating new client transport to %q: %v", addr, err) + } // newTr is either nil, or closed. - grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err) - return nil, nil, err + hcancel() + channelz.Warningf(logger, ac.channelz, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) + return err } - select { - case <-time.After(connectDeadline.Sub(time.Now())): - // We didn't get the preface in time. - newTr.Close() - grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) - return nil, nil, errors.New("timed out waiting for server handshake") - case <-prefaceReceived: - // We got the preface - huzzah! things are good. - case <-onCloseCalled: - // The transport has already closed - noop. - return nil, nil, errors.New("connection closed") - // TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix. + ac.mu.Lock() + defer ac.mu.Unlock() + if ctx.Err() != nil { + // This can happen if the subConn was removed while in `Connecting` + // state. tearDown() would have set the state to `Shutdown`, but + // would not have closed the transport since ac.transport would not + // have been set at that point. + // + // We run this in a goroutine because newTr.Close() calls onClose() + // inline, which requires locking ac.mu. + // + // The error we pass to Close() is immaterial since there are no open + // streams at this point, so no trailers with error details will be sent + // out. We just need to pass a non-nil error. + // + // This can also happen when updateAddrs is called during a connection + // attempt. + go newTr.Close(transport.ErrConnClosing) + return nil + } + if hctx.Err() != nil { + // onClose was already called for this connection, but the connection + // was successfully established first. Consider it a success and set + // the new state to Idle. + ac.updateConnectivityState(connectivity.Idle, nil) + return nil } - return newTr, reconnect, nil + ac.curAddr = addr + ac.transport = newTr + ac.startHealthCheck(hctx) // Will set state to READY if appropriate. + return nil } // startHealthCheck starts the health checking stream (RPC) to watch the health @@ -1213,7 +1418,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne // // LB channel health checking is enabled when all requirements below are met: // 1. it is not disabled by the user with the WithDisableHealthCheck DialOption -// 2. internal.HealthCheckFunc is set by importing the grpc/healthcheck package +// 2. internal.HealthCheckFunc is set by importing the grpc/health package // 3. a service config with non-empty healthCheckConfig field is provided // 4. the load balancer requests it // @@ -1224,7 +1429,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { var healthcheckManagingState bool defer func() { if !healthcheckManagingState { - ac.updateConnectivityState(connectivity.Ready) + ac.updateConnectivityState(connectivity.Ready, nil) } }() @@ -1243,7 +1448,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { // The health package is not imported to set health check function. // // TODO: add a link to the health check doc in the error message. - grpclog.Error("Health check is requested but health check function is not set.") + channelz.Error(logger, ac.channelz, "Health check is requested but health check function is not set.") return } @@ -1251,7 +1456,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { // Set up the health check helper functions. currentTr := ac.transport - newStream := func(method string) (interface{}, error) { + newStream := func(method string) (any, error) { ac.mu.Lock() if ac.transport != currentTr { ac.mu.Unlock() @@ -1260,28 +1465,22 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { ac.mu.Unlock() return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac) } - setConnectivityState := func(s connectivity.State) { + setConnectivityState := func(s connectivity.State, lastErr error) { ac.mu.Lock() defer ac.mu.Unlock() if ac.transport != currentTr { return } - ac.updateConnectivityState(s) + ac.updateConnectivityState(s, lastErr) } // Start the health checking stream. go func() { err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) if err != nil { if status.Code(err) == codes.Unimplemented { - if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: "Subchannel health check is unimplemented at server side, thus health check is disabled", - Severity: channelz.CtError, - }) - } - grpclog.Error("Subchannel health check is unimplemented at server side, thus health check is disabled") + channelz.Error(logger, ac.channelz, "Subchannel health check is unimplemented at server side, thus health check is disabled") } else { - grpclog.Errorf("HealthCheckFunc exits with unexpected error %v", err) + channelz.Errorf(logger, ac.channelz, "Health checking failed: %v", err) } } }() @@ -1295,33 +1494,43 @@ func (ac *addrConn) resetConnectBackoff() { ac.mu.Unlock() } -// getReadyTransport returns the transport if ac's state is READY. -// Otherwise it returns nil, false. -// If ac's state is IDLE, it will trigger ac to connect. -func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { +// getReadyTransport returns the transport if ac's state is READY or nil if not. +func (ac *addrConn) getReadyTransport() transport.ClientTransport { ac.mu.Lock() - if ac.state == connectivity.Ready && ac.transport != nil { - t := ac.transport - ac.mu.Unlock() - return t, true - } - var idle bool - if ac.state == connectivity.Idle { - idle = true + defer ac.mu.Unlock() + if ac.state == connectivity.Ready { + return ac.transport } - ac.mu.Unlock() - // Trigger idle ac to connect. - if idle { - ac.connect() + return nil +} + +// getTransport waits until the addrconn is ready and returns the transport. +// If the context expires first, returns an appropriate status. If the +// addrConn is stopped first, returns an Unavailable status error. +func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { + for ctx.Err() == nil { + ac.mu.Lock() + t, state, sc := ac.transport, ac.state, ac.stateReadyChan + ac.mu.Unlock() + if state == connectivity.Ready { + return t, nil + } + if state == connectivity.Shutdown { + return nil, status.Errorf(codes.Unavailable, "SubConn shutting down") + } + + select { + case <-ctx.Done(): + case <-sc: + } } - return nil, false + return nil, status.FromContextError(ctx.Err()).Err() } // tearDown starts to tear down the addrConn. -// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in -// some edge cases (e.g., the caller opens and closes many addrConn's in a -// tight loop. -// tearDown doesn't remove ac from ac.cc.conns. +// +// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct +// will leak. In most cases, call cc.removeAddrConn() instead. func (ac *addrConn) tearDown(err error) { ac.mu.Lock() if ac.state == connectivity.Shutdown { @@ -1331,67 +1540,47 @@ func (ac *addrConn) tearDown(err error) { curTr := ac.transport ac.transport = nil // We have to set the state to Shutdown before anything else to prevent races - // between setting the state and logic that waits on context cancelation / etc. - ac.updateConnectivityState(connectivity.Shutdown) + // between setting the state and logic that waits on context cancellation / etc. + ac.updateConnectivityState(connectivity.Shutdown, nil) ac.cancel() ac.curAddr = resolver.Address{} - if err == errConnDrain && curTr != nil { - // GracefulClose(...) may be executed multiple times when - // i) receiving multiple GoAway frames from the server; or - // ii) there are concurrent name resolver/Balancer triggered - // address removal and GoAway. - // We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu. - ac.mu.Unlock() - curTr.GracefulClose() - ac.mu.Lock() - } - if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: "Subchannel Deleted", - Severity: channelz.CtINFO, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID), - Severity: channelz.CtINFO, - }, - }) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity beng deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(ac.channelzID) - } - ac.mu.Unlock() -} - -func (ac *addrConn) getState() connectivity.State { - ac.mu.Lock() - defer ac.mu.Unlock() - return ac.state -} -func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric { - ac.mu.Lock() - addr := ac.curAddr.Addr + channelz.AddTraceEvent(logger, ac.channelz, 0, &channelz.TraceEvent{ + Desc: "Subchannel deleted", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEvent{ + Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelz.ID), + Severity: channelz.CtInfo, + }, + }) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from + // being deleted right away. + channelz.RemoveEntry(ac.channelz.ID) ac.mu.Unlock() - return &channelz.ChannelInternalMetric{ - State: ac.getState(), - Target: addr, - CallsStarted: atomic.LoadInt64(&ac.czData.callsStarted), - CallsSucceeded: atomic.LoadInt64(&ac.czData.callsSucceeded), - CallsFailed: atomic.LoadInt64(&ac.czData.callsFailed), - LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)), - } -} -func (ac *addrConn) incrCallsStarted() { - atomic.AddInt64(&ac.czData.callsStarted, 1) - atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano()) -} - -func (ac *addrConn) incrCallsSucceeded() { - atomic.AddInt64(&ac.czData.callsSucceeded, 1) -} - -func (ac *addrConn) incrCallsFailed() { - atomic.AddInt64(&ac.czData.callsFailed, 1) + // We have to release the lock before the call to GracefulClose/Close here + // because both of them call onClose(), which requires locking ac.mu. + if curTr != nil { + if err == errConnDrain { + // Close the transport gracefully when the subConn is being shutdown. + // + // GracefulClose() may be executed multiple times if: + // - multiple GoAway frames are received from the server + // - there are concurrent name resolver or balancer triggered + // address removal and GoAway + curTr.GracefulClose() + } else { + // Hard close the transport when the channel is entering idle or is + // being shutdown. In the case where the channel is being shutdown, + // closing of transports is also taken care of by cancellation of cc.ctx. + // But in the case where the channel is entering idle, we need to + // explicitly close the transports here. Instead of distinguishing + // between these two cases, it is simpler to close the transport + // unconditionally here. + curTr.Close(err) + } + } } type retryThrottler struct { @@ -1431,12 +1620,17 @@ func (rt *retryThrottler) successfulRPC() { } } -type channelzChannel struct { - cc *ClientConn +func (ac *addrConn) incrCallsStarted() { + ac.channelz.ChannelMetrics.CallsStarted.Add(1) + ac.channelz.ChannelMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano()) +} + +func (ac *addrConn) incrCallsSucceeded() { + ac.channelz.ChannelMetrics.CallsSucceeded.Add(1) } -func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { - return c.cc.channelzMetric() +func (ac *addrConn) incrCallsFailed() { + ac.channelz.ChannelMetrics.CallsFailed.Add(1) } // ErrClientConnTimeout indicates that the ClientConn cannot establish the @@ -1445,3 +1639,189 @@ func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { // Deprecated: This error is never returned by grpc and should not be // referenced by users. var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") + +// getResolver finds the scheme in the cc's resolvers or the global registry. +// scheme should always be lowercase (typically by virtue of url.Parse() +// performing proper RFC3986 behavior). +func (cc *ClientConn) getResolver(scheme string) resolver.Builder { + for _, rb := range cc.dopts.resolvers { + if scheme == rb.Scheme() { + return rb + } + } + return resolver.Get(scheme) +} + +func (cc *ClientConn) updateConnectionError(err error) { + cc.lceMu.Lock() + cc.lastConnectionError = err + cc.lceMu.Unlock() +} + +func (cc *ClientConn) connectionError() error { + cc.lceMu.Lock() + defer cc.lceMu.Unlock() + return cc.lastConnectionError +} + +// initParsedTargetAndResolverBuilder parses the user's dial target and stores +// the parsed target in `cc.parsedTarget`. +// +// The resolver to use is determined based on the scheme in the parsed target +// and the same is stored in `cc.resolverBuilder`. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) initParsedTargetAndResolverBuilder() error { + logger.Infof("original dial target is: %q", cc.target) + + var rb resolver.Builder + parsedTarget, err := parseTarget(cc.target) + if err == nil { + rb = cc.getResolver(parsedTarget.URL.Scheme) + if rb != nil { + cc.parsedTarget = parsedTarget + cc.resolverBuilder = rb + return nil + } + } + + // We are here because the user's dial target did not contain a scheme or + // specified an unregistered scheme. We should fallback to the default + // scheme, except when a custom dialer is specified in which case, we should + // always use passthrough scheme. For either case, we need to respect any overridden + // global defaults set by the user. + defScheme := cc.dopts.defaultScheme + if internal.UserSetDefaultScheme { + defScheme = resolver.GetDefaultScheme() + } + + canonicalTarget := defScheme + ":///" + cc.target + + parsedTarget, err = parseTarget(canonicalTarget) + if err != nil { + return err + } + rb = cc.getResolver(parsedTarget.URL.Scheme) + if rb == nil { + return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) + } + cc.parsedTarget = parsedTarget + cc.resolverBuilder = rb + return nil +} + +// parseTarget uses RFC 3986 semantics to parse the given target into a +// resolver.Target struct containing url. Query params are stripped from the +// endpoint. +func parseTarget(target string) (resolver.Target, error) { + u, err := url.Parse(target) + if err != nil { + return resolver.Target{}, err + } + + return resolver.Target{URL: *u}, nil +} + +// encodeAuthority escapes the authority string based on valid chars defined in +// https://datatracker.ietf.org/doc/html/rfc3986#section-3.2. +func encodeAuthority(authority string) string { + const upperhex = "0123456789ABCDEF" + + // Return for characters that must be escaped as per + // Valid chars are mentioned here: + // https://datatracker.ietf.org/doc/html/rfc3986#section-3.2 + shouldEscape := func(c byte) bool { + // Alphanum are always allowed. + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + return false + } + switch c { + case '-', '_', '.', '~': // Unreserved characters + return false + case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters + return false + case ':', '[', ']', '@': // Authority related delimiters + return false + } + // Everything else must be escaped. + return true + } + + hexCount := 0 + for i := 0; i < len(authority); i++ { + c := authority[i] + if shouldEscape(c) { + hexCount++ + } + } + + if hexCount == 0 { + return authority + } + + required := len(authority) + 2*hexCount + t := make([]byte, required) + + j := 0 + // This logic is a barebones version of escape in the go net/url library. + for i := 0; i < len(authority); i++ { + switch c := authority[i]; { + case shouldEscape(c): + t[j] = '%' + t[j+1] = upperhex[c>>4] + t[j+2] = upperhex[c&15] + j += 3 + default: + t[j] = authority[i] + j++ + } + } + return string(t) +} + +// Determine channel authority. The order of precedence is as follows: +// - user specified authority override using `WithAuthority` dial option +// - creds' notion of server name for the authentication handshake +// - endpoint from dial target of the form "scheme://[authority]/endpoint" +// +// Stores the determined authority in `cc.authority`. +// +// Returns a non-nil error if the authority returned by the transport +// credentials do not match the authority configured through the dial option. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) initAuthority() error { + dopts := cc.dopts + // Historically, we had two options for users to specify the serverName or + // authority for a channel. One was through the transport credentials + // (either in its constructor, or through the OverrideServerName() method). + // The other option (for cases where WithInsecure() dial option was used) + // was to use the WithAuthority() dial option. + // + // A few things have changed since: + // - `insecure` package with an implementation of the `TransportCredentials` + // interface for the insecure case + // - WithAuthority() dial option support for secure credentials + authorityFromCreds := "" + if creds := dopts.copts.TransportCredentials; creds != nil && creds.Info().ServerName != "" { + authorityFromCreds = creds.Info().ServerName + } + authorityFromDialOption := dopts.authority + if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption { + return fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) + } + + endpoint := cc.parsedTarget.Endpoint() + if authorityFromDialOption != "" { + cc.authority = authorityFromDialOption + } else if authorityFromCreds != "" { + cc.authority = authorityFromCreds + } else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok { + cc.authority = auth.OverrideAuthority(cc.parsedTarget) + } else if strings.HasPrefix(endpoint, ":") { + cc.authority = "localhost" + endpoint + } else { + cc.authority = encodeAuthority(endpoint) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index 129776547..e840858b7 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -21,18 +21,73 @@ package grpc import ( "google.golang.org/grpc/encoding" _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" + "google.golang.org/grpc/mem" ) -// baseCodec contains the functionality of both Codec and encoding.Codec, but -// omits the name/string, which vary between the two and are not needed for -// anything besides the registry in the encoding package. +// baseCodec captures the new encoding.CodecV2 interface without the Name +// function, allowing it to be implemented by older Codec and encoding.Codec +// implementations. The omitted Name function is only needed for the register in +// the encoding package and is not part of the core functionality. type baseCodec interface { - Marshal(v interface{}) ([]byte, error) - Unmarshal(data []byte, v interface{}) error + Marshal(v any) (mem.BufferSlice, error) + Unmarshal(data mem.BufferSlice, v any) error } -var _ baseCodec = Codec(nil) -var _ baseCodec = encoding.Codec(nil) +// getCodec returns an encoding.CodecV2 for the codec of the given name (if +// registered). Initially checks the V2 registry with encoding.GetCodecV2 and +// returns the V2 codec if it is registered. Otherwise, it checks the V1 registry +// with encoding.GetCodec and if it is registered wraps it with newCodecV1Bridge +// to turn it into an encoding.CodecV2. Returns nil otherwise. +func getCodec(name string) encoding.CodecV2 { + if codecV1 := encoding.GetCodec(name); codecV1 != nil { + return newCodecV1Bridge(codecV1) + } + + return encoding.GetCodecV2(name) +} + +func newCodecV0Bridge(c Codec) baseCodec { + return codecV0Bridge{codec: c} +} + +func newCodecV1Bridge(c encoding.Codec) encoding.CodecV2 { + return codecV1Bridge{ + codecV0Bridge: codecV0Bridge{codec: c}, + name: c.Name(), + } +} + +var _ baseCodec = codecV0Bridge{} + +type codecV0Bridge struct { + codec interface { + Marshal(v any) ([]byte, error) + Unmarshal(data []byte, v any) error + } +} + +func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) { + data, err := c.codec.Marshal(v) + if err != nil { + return nil, err + } + return mem.BufferSlice{mem.NewBuffer(&data, nil)}, nil +} + +func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) { + return c.codec.Unmarshal(data.Materialize(), v) +} + +var _ encoding.CodecV2 = codecV1Bridge{} + +type codecV1Bridge struct { + codecV0Bridge + name string +} + +func (c codecV1Bridge) Name() string { + return c.name +} // Codec defines the interface gRPC uses to encode and decode messages. // Note that implementations of this interface must be thread safe; @@ -41,9 +96,9 @@ var _ baseCodec = encoding.Codec(nil) // Deprecated: use encoding.Codec instead. type Codec interface { // Marshal returns the wire format of v. - Marshal(v interface{}) ([]byte, error) + Marshal(v any) ([]byte, error) // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v interface{}) error + Unmarshal(data []byte, v any) error // String returns the name of the Codec implementation. This is unused by // gRPC. String() string diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh deleted file mode 100644 index 4cdc6ba7c..000000000 --- a/vendor/google.golang.org/grpc/codegen.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash - -# This script serves as an example to demonstrate how to generate the gRPC-Go -# interface and the related messages from .proto file. -# -# It assumes the installation of i) Google proto buffer compiler at -# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen -# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have -# not, please install them first. -# -# We recommend running this script at $GOPATH/src. -# -# If this is not what you need, feel free to make your own scripts. Again, this -# script is for demonstration purpose. -# -proto=$1 -protoc --go_out=plugins=grpc:. $proto diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go index 0b206a578..934fac2b0 100644 --- a/vendor/google.golang.org/grpc/codes/code_string.go +++ b/vendor/google.golang.org/grpc/codes/code_string.go @@ -18,7 +18,15 @@ package codes -import "strconv" +import ( + "strconv" + + "google.golang.org/grpc/internal" +) + +func init() { + internal.CanonicalString = canonicalString +} func (c Code) String() string { switch c { @@ -60,3 +68,44 @@ func (c Code) String() string { return "Code(" + strconv.FormatInt(int64(c), 10) + ")" } } + +func canonicalString(c Code) string { + switch c { + case OK: + return "OK" + case Canceled: + return "CANCELLED" + case Unknown: + return "UNKNOWN" + case InvalidArgument: + return "INVALID_ARGUMENT" + case DeadlineExceeded: + return "DEADLINE_EXCEEDED" + case NotFound: + return "NOT_FOUND" + case AlreadyExists: + return "ALREADY_EXISTS" + case PermissionDenied: + return "PERMISSION_DENIED" + case ResourceExhausted: + return "RESOURCE_EXHAUSTED" + case FailedPrecondition: + return "FAILED_PRECONDITION" + case Aborted: + return "ABORTED" + case OutOfRange: + return "OUT_OF_RANGE" + case Unimplemented: + return "UNIMPLEMENTED" + case Internal: + return "INTERNAL" + case Unavailable: + return "UNAVAILABLE" + case DataLoss: + return "DATA_LOSS" + case Unauthenticated: + return "UNAUTHENTICATED" + default: + return "CODE(" + strconv.FormatInt(int64(c), 10) + ")" + } +} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go index 02738839d..0b42c302b 100644 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -25,7 +25,13 @@ import ( "strconv" ) -// A Code is an unsigned 32-bit error code as defined in the gRPC spec. +// A Code is a status code defined according to the [gRPC documentation]. +// +// Only the codes defined as consts in this package are valid codes. Do not use +// other code values. Behavior of other codes is implementation-specific and +// interoperability between implementations is not guaranteed. +// +// [gRPC documentation]: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md type Code uint32 const ( @@ -33,6 +39,9 @@ const ( OK Code = 0 // Canceled indicates the operation was canceled (typically by the caller). + // + // The gRPC framework will generate this error code when cancellation + // is requested. Canceled Code = 1 // Unknown error. An example of where this error may be returned is @@ -40,12 +49,17 @@ const ( // an error-space that is not known in this address space. Also // errors raised by APIs that do not return enough error information // may be converted to this error. + // + // The gRPC framework will generate this error code in the above two + // mentioned cases. Unknown Code = 2 // InvalidArgument indicates client specified an invalid argument. // Note that this differs from FailedPrecondition. It indicates arguments // that are problematic regardless of the state of the system // (e.g., a malformed file name). + // + // This error code will not be generated by the gRPC framework. InvalidArgument Code = 3 // DeadlineExceeded means operation expired before completion. @@ -53,14 +67,21 @@ const ( // returned even if the operation has completed successfully. For // example, a successful response from a server could have been delayed // long enough for the deadline to expire. + // + // The gRPC framework will generate this error code when the deadline is + // exceeded. DeadlineExceeded Code = 4 // NotFound means some requested entity (e.g., file or directory) was // not found. + // + // This error code will not be generated by the gRPC framework. NotFound Code = 5 // AlreadyExists means an attempt to create an entity failed because one // already exists. + // + // This error code will not be generated by the gRPC framework. AlreadyExists Code = 6 // PermissionDenied indicates the caller does not have permission to @@ -69,10 +90,17 @@ const ( // instead for those errors). It must not be // used if the caller cannot be identified (use Unauthenticated // instead for those errors). + // + // This error code will not be generated by the gRPC core framework, + // but expect authentication middleware to use it. PermissionDenied Code = 7 // ResourceExhausted indicates some resource has been exhausted, perhaps // a per-user quota, or perhaps the entire file system is out of space. + // + // This error code will be generated by the gRPC framework in + // out-of-memory and server overload situations, or when a message is + // larger than the configured maximum size. ResourceExhausted Code = 8 // FailedPrecondition indicates operation was rejected because the @@ -94,6 +122,8 @@ const ( // REST Get/Update/Delete on a resource and the resource on the // server does not match the condition. E.g., conflicting // read-modify-write on the same resource. + // + // This error code will not be generated by the gRPC framework. FailedPrecondition Code = 9 // Aborted indicates the operation was aborted, typically due to a @@ -102,6 +132,8 @@ const ( // // See litmus test above for deciding between FailedPrecondition, // Aborted, and Unavailable. + // + // This error code will not be generated by the gRPC framework. Aborted Code = 10 // OutOfRange means operation was attempted past the valid range. @@ -119,15 +151,26 @@ const ( // error) when it applies so that callers who are iterating through // a space can easily look for an OutOfRange error to detect when // they are done. + // + // This error code will not be generated by the gRPC framework. OutOfRange Code = 11 // Unimplemented indicates operation is not implemented or not // supported/enabled in this service. + // + // This error code will be generated by the gRPC framework. Most + // commonly, you will see this error code when a method implementation + // is missing on the server. It can also be generated for unknown + // compression algorithms or a disagreement as to whether an RPC should + // be streaming. Unimplemented Code = 12 // Internal errors. Means some invariants expected by underlying // system has been broken. If you see one of these errors, // something is very broken. + // + // This error code will be generated by the gRPC framework in several + // internal error conditions. Internal Code = 13 // Unavailable indicates the service is currently unavailable. @@ -137,13 +180,22 @@ const ( // // See litmus test above for deciding between FailedPrecondition, // Aborted, and Unavailable. + // + // This error code will be generated by the gRPC framework during + // abrupt shutdown of a server process or network connection. Unavailable Code = 14 // DataLoss indicates unrecoverable data loss or corruption. + // + // This error code will not be generated by the gRPC framework. DataLoss Code = 15 // Unauthenticated indicates the request does not have valid // authentication credentials for the operation. + // + // The gRPC framework will generate this error code when the + // authentication metadata is invalid or a Credentials callback fails, + // but also expect authentication middleware to generate it. Unauthenticated Code = 16 _maxCode = 17 @@ -183,7 +235,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { if ci >= _maxCode { - return fmt.Errorf("invalid code: %q", ci) + return fmt.Errorf("invalid code: %d", ci) } *c = Code(ci) diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go index 34ec36fbf..4a8992642 100644 --- a/vendor/google.golang.org/grpc/connectivity/connectivity.go +++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -18,15 +18,14 @@ // Package connectivity defines connectivity semantics. // For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. -// All APIs in this package are experimental. package connectivity import ( - "context" - "google.golang.org/grpc/grpclog" ) +var logger = grpclog.Component("core") + // State indicates the state of connectivity. // It can be the state of a ClientConn or SubConn. type State int @@ -44,8 +43,8 @@ func (s State) String() string { case Shutdown: return "SHUTDOWN" default: - grpclog.Errorf("unknown connectivity state: %d", s) - return "Invalid-State" + logger.Errorf("unknown connectivity state: %d", s) + return "INVALID_STATE" } } @@ -62,12 +61,34 @@ const ( Shutdown ) -// Reporter reports the connectivity states. -type Reporter interface { - // CurrentState returns the current state of the reporter. - CurrentState() State - // WaitForStateChange blocks until the reporter's state is different from the given state, - // and returns true. - // It returns false if <-ctx.Done() can proceed (ctx got timeout or got canceled). - WaitForStateChange(context.Context, State) bool +// ServingMode indicates the current mode of operation of the server. +// +// Only xDS enabled gRPC servers currently report their serving mode. +type ServingMode int + +const ( + // ServingModeStarting indicates that the server is starting up. + ServingModeStarting ServingMode = iota + // ServingModeServing indicates that the server contains all required + // configuration and is serving RPCs. + ServingModeServing + // ServingModeNotServing indicates that the server is not accepting new + // connections. Existing connections will be closed gracefully, allowing + // in-progress RPCs to complete. A server enters this mode when it does not + // contain the required configuration to serve RPCs. + ServingModeNotServing +) + +func (s ServingMode) String() string { + switch s { + case ServingModeStarting: + return "STARTING" + case ServingModeServing: + return "SERVING" + case ServingModeNotServing: + return "NOT_SERVING" + default: + logger.Errorf("unknown serving mode: %d", s) + return "INVALID_MODE" + } } diff --git a/vendor/google.golang.org/grpc/credentials/alts/alts.go b/vendor/google.golang.org/grpc/credentials/alts/alts.go new file mode 100644 index 000000000..afcdb8a0d --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/alts.go @@ -0,0 +1,332 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package alts implements the ALTS credential support by gRPC library, which +// encapsulates all the state needed by a client to authenticate with a server +// using ALTS and make various assertions, e.g., about the client's identity, +// role, or whether it is authorized to make a particular call. +// This package is experimental. +package alts + +import ( + "context" + "errors" + "fmt" + "net" + "sync" + "time" + + "google.golang.org/grpc/credentials" + core "google.golang.org/grpc/credentials/alts/internal" + "google.golang.org/grpc/credentials/alts/internal/handshaker" + "google.golang.org/grpc/credentials/alts/internal/handshaker/service" + altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/googlecloud" +) + +const ( + // hypervisorHandshakerServiceAddress represents the default ALTS gRPC + // handshaker service address in the hypervisor. + hypervisorHandshakerServiceAddress = "dns:///metadata.google.internal.:8080" + // defaultTimeout specifies the server handshake timeout. + defaultTimeout = 30.0 * time.Second + // The following constants specify the minimum and maximum acceptable + // protocol versions. + protocolVersionMaxMajor = 2 + protocolVersionMaxMinor = 1 + protocolVersionMinMajor = 2 + protocolVersionMinMinor = 1 +) + +var ( + vmOnGCP bool + once sync.Once + maxRPCVersion = &altspb.RpcProtocolVersions_Version{ + Major: protocolVersionMaxMajor, + Minor: protocolVersionMaxMinor, + } + minRPCVersion = &altspb.RpcProtocolVersions_Version{ + Major: protocolVersionMinMajor, + Minor: protocolVersionMinMinor, + } + // ErrUntrustedPlatform is returned from ClientHandshake and + // ServerHandshake is running on a platform where the trustworthiness of + // the handshaker service is not guaranteed. + ErrUntrustedPlatform = errors.New("ALTS: untrusted platform. ALTS is only supported on GCP") + logger = grpclog.Component("alts") +) + +// AuthInfo exposes security information from the ALTS handshake to the +// application. This interface is to be implemented by ALTS. Users should not +// need a brand new implementation of this interface. For situations like +// testing, any new implementation should embed this interface. This allows +// ALTS to add new methods to this interface. +type AuthInfo interface { + // ApplicationProtocol returns application protocol negotiated for the + // ALTS connection. + ApplicationProtocol() string + // RecordProtocol returns the record protocol negotiated for the ALTS + // connection. + RecordProtocol() string + // SecurityLevel returns the security level of the created ALTS secure + // channel. + SecurityLevel() altspb.SecurityLevel + // PeerServiceAccount returns the peer service account. + PeerServiceAccount() string + // LocalServiceAccount returns the local service account. + LocalServiceAccount() string + // PeerRPCVersions returns the RPC version supported by the peer. + PeerRPCVersions() *altspb.RpcProtocolVersions +} + +// ClientOptions contains the client-side options of an ALTS channel. These +// options will be passed to the underlying ALTS handshaker. +type ClientOptions struct { + // TargetServiceAccounts contains a list of expected target service + // accounts. + TargetServiceAccounts []string + // HandshakerServiceAddress represents the ALTS handshaker gRPC service + // address to connect to. + HandshakerServiceAddress string +} + +// DefaultClientOptions creates a new ClientOptions object with the default +// values. +func DefaultClientOptions() *ClientOptions { + return &ClientOptions{ + HandshakerServiceAddress: hypervisorHandshakerServiceAddress, + } +} + +// ServerOptions contains the server-side options of an ALTS channel. These +// options will be passed to the underlying ALTS handshaker. +type ServerOptions struct { + // HandshakerServiceAddress represents the ALTS handshaker gRPC service + // address to connect to. + HandshakerServiceAddress string +} + +// DefaultServerOptions creates a new ServerOptions object with the default +// values. +func DefaultServerOptions() *ServerOptions { + return &ServerOptions{ + HandshakerServiceAddress: hypervisorHandshakerServiceAddress, + } +} + +// altsTC is the credentials required for authenticating a connection using ALTS. +// It implements credentials.TransportCredentials interface. +type altsTC struct { + info *credentials.ProtocolInfo + side core.Side + accounts []string + hsAddress string +} + +// NewClientCreds constructs a client-side ALTS TransportCredentials object. +func NewClientCreds(opts *ClientOptions) credentials.TransportCredentials { + return newALTS(core.ClientSide, opts.TargetServiceAccounts, opts.HandshakerServiceAddress) +} + +// NewServerCreds constructs a server-side ALTS TransportCredentials object. +func NewServerCreds(opts *ServerOptions) credentials.TransportCredentials { + return newALTS(core.ServerSide, nil, opts.HandshakerServiceAddress) +} + +func newALTS(side core.Side, accounts []string, hsAddress string) credentials.TransportCredentials { + once.Do(func() { + vmOnGCP = googlecloud.OnGCE() + }) + if hsAddress == "" { + hsAddress = hypervisorHandshakerServiceAddress + } + return &altsTC{ + info: &credentials.ProtocolInfo{ + SecurityProtocol: "alts", + SecurityVersion: "1.0", + }, + side: side, + accounts: accounts, + hsAddress: hsAddress, + } +} + +// ClientHandshake implements the client side handshake protocol. +func (g *altsTC) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (_ net.Conn, _ credentials.AuthInfo, err error) { + if !vmOnGCP { + return nil, nil, ErrUntrustedPlatform + } + + // Connecting to ALTS handshaker service. + hsConn, err := service.Dial(g.hsAddress) + if err != nil { + return nil, nil, err + } + // Do not close hsConn since it is shared with other handshakes. + + // Possible context leak: + // The cancel function for the child context we create will only be + // called a non-nil error is returned. + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + opts := handshaker.DefaultClientHandshakerOptions() + opts.TargetName = addr + opts.TargetServiceAccounts = g.accounts + opts.RPCVersions = &altspb.RpcProtocolVersions{ + MaxRpcVersion: maxRPCVersion, + MinRpcVersion: minRPCVersion, + } + chs, err := handshaker.NewClientHandshaker(ctx, hsConn, rawConn, opts) + if err != nil { + return nil, nil, err + } + defer func() { + if err != nil { + chs.Close() + } + }() + secConn, authInfo, err := chs.ClientHandshake(ctx) + if err != nil { + return nil, nil, err + } + altsAuthInfo, ok := authInfo.(AuthInfo) + if !ok { + return nil, nil, errors.New("client-side auth info is not of type alts.AuthInfo") + } + match, _ := checkRPCVersions(opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) + if !match { + return nil, nil, fmt.Errorf("server-side RPC versions are not compatible with this client, local versions: %v, peer versions: %v", opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) + } + return secConn, authInfo, nil +} + +// ServerHandshake implements the server side ALTS handshaker. +func (g *altsTC) ServerHandshake(rawConn net.Conn) (_ net.Conn, _ credentials.AuthInfo, err error) { + if !vmOnGCP { + return nil, nil, ErrUntrustedPlatform + } + // Connecting to ALTS handshaker service. + hsConn, err := service.Dial(g.hsAddress) + if err != nil { + return nil, nil, err + } + // Do not close hsConn since it's shared with other handshakes. + + ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) + defer cancel() + opts := handshaker.DefaultServerHandshakerOptions() + opts.RPCVersions = &altspb.RpcProtocolVersions{ + MaxRpcVersion: maxRPCVersion, + MinRpcVersion: minRPCVersion, + } + shs, err := handshaker.NewServerHandshaker(ctx, hsConn, rawConn, opts) + if err != nil { + return nil, nil, err + } + defer func() { + if err != nil { + shs.Close() + } + }() + secConn, authInfo, err := shs.ServerHandshake(ctx) + if err != nil { + return nil, nil, err + } + altsAuthInfo, ok := authInfo.(AuthInfo) + if !ok { + return nil, nil, errors.New("server-side auth info is not of type alts.AuthInfo") + } + match, _ := checkRPCVersions(opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) + if !match { + return nil, nil, fmt.Errorf("client-side RPC versions is not compatible with this server, local versions: %v, peer versions: %v", opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) + } + return secConn, authInfo, nil +} + +func (g *altsTC) Info() credentials.ProtocolInfo { + return *g.info +} + +func (g *altsTC) Clone() credentials.TransportCredentials { + info := *g.info + var accounts []string + if g.accounts != nil { + accounts = make([]string, len(g.accounts)) + copy(accounts, g.accounts) + } + return &altsTC{ + info: &info, + side: g.side, + hsAddress: g.hsAddress, + accounts: accounts, + } +} + +func (g *altsTC) OverrideServerName(serverNameOverride string) error { + g.info.ServerName = serverNameOverride + return nil +} + +// compareRPCVersion returns 0 if v1 == v2, 1 if v1 > v2 and -1 if v1 < v2. +func compareRPCVersions(v1, v2 *altspb.RpcProtocolVersions_Version) int { + switch { + case v1.GetMajor() > v2.GetMajor(), + v1.GetMajor() == v2.GetMajor() && v1.GetMinor() > v2.GetMinor(): + return 1 + case v1.GetMajor() < v2.GetMajor(), + v1.GetMajor() == v2.GetMajor() && v1.GetMinor() < v2.GetMinor(): + return -1 + } + return 0 +} + +// checkRPCVersions performs a version check between local and peer rpc protocol +// versions. This function returns true if the check passes which means both +// parties agreed on a common rpc protocol to use, and false otherwise. The +// function also returns the highest common RPC protocol version both parties +// agreed on. +func checkRPCVersions(local, peer *altspb.RpcProtocolVersions) (bool, *altspb.RpcProtocolVersions_Version) { + if local == nil || peer == nil { + logger.Error("invalid checkRPCVersions argument, either local or peer is nil.") + return false, nil + } + + // maxCommonVersion is MIN(local.max, peer.max). + maxCommonVersion := local.GetMaxRpcVersion() + if compareRPCVersions(local.GetMaxRpcVersion(), peer.GetMaxRpcVersion()) > 0 { + maxCommonVersion = peer.GetMaxRpcVersion() + } + + // minCommonVersion is MAX(local.min, peer.min). + minCommonVersion := peer.GetMinRpcVersion() + if compareRPCVersions(local.GetMinRpcVersion(), peer.GetMinRpcVersion()) > 0 { + minCommonVersion = local.GetMinRpcVersion() + } + + if compareRPCVersions(maxCommonVersion, minCommonVersion) < 0 { + return false, nil + } + return true, maxCommonVersion +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go b/vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go new file mode 100644 index 000000000..ebea57da1 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go @@ -0,0 +1,95 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package authinfo provide authentication information returned by handshakers. +package authinfo + +import ( + "google.golang.org/grpc/credentials" + altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" +) + +var _ credentials.AuthInfo = (*altsAuthInfo)(nil) + +// altsAuthInfo exposes security information from the ALTS handshake to the +// application. altsAuthInfo is immutable and implements credentials.AuthInfo. +type altsAuthInfo struct { + p *altspb.AltsContext + credentials.CommonAuthInfo +} + +// New returns a new altsAuthInfo object given handshaker results. +func New(result *altspb.HandshakerResult) credentials.AuthInfo { + return newAuthInfo(result) +} + +func newAuthInfo(result *altspb.HandshakerResult) *altsAuthInfo { + return &altsAuthInfo{ + p: &altspb.AltsContext{ + ApplicationProtocol: result.GetApplicationProtocol(), + RecordProtocol: result.GetRecordProtocol(), + // TODO: assign security level from result. + SecurityLevel: altspb.SecurityLevel_INTEGRITY_AND_PRIVACY, + PeerServiceAccount: result.GetPeerIdentity().GetServiceAccount(), + LocalServiceAccount: result.GetLocalIdentity().GetServiceAccount(), + PeerRpcVersions: result.GetPeerRpcVersions(), + PeerAttributes: result.GetPeerIdentity().GetAttributes(), + }, + CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}, + } +} + +// AuthType identifies the context as providing ALTS authentication information. +func (s *altsAuthInfo) AuthType() string { + return "alts" +} + +// ApplicationProtocol returns the context's application protocol. +func (s *altsAuthInfo) ApplicationProtocol() string { + return s.p.GetApplicationProtocol() +} + +// RecordProtocol returns the context's record protocol. +func (s *altsAuthInfo) RecordProtocol() string { + return s.p.GetRecordProtocol() +} + +// SecurityLevel returns the context's security level. +func (s *altsAuthInfo) SecurityLevel() altspb.SecurityLevel { + return s.p.GetSecurityLevel() +} + +// PeerServiceAccount returns the context's peer service account. +func (s *altsAuthInfo) PeerServiceAccount() string { + return s.p.GetPeerServiceAccount() +} + +// LocalServiceAccount returns the context's local service account. +func (s *altsAuthInfo) LocalServiceAccount() string { + return s.p.GetLocalServiceAccount() +} + +// PeerRPCVersions returns the context's peer RPC versions. +func (s *altsAuthInfo) PeerRPCVersions() *altspb.RpcProtocolVersions { + return s.p.GetPeerRpcVersions() +} + +// PeerAttributes returns the context's peer attributes. +func (s *altsAuthInfo) PeerAttributes() map[string]string { + return s.p.GetPeerAttributes() +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/common.go b/vendor/google.golang.org/grpc/credentials/alts/internal/common.go new file mode 100644 index 000000000..3896e8cf2 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/common.go @@ -0,0 +1,67 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains common core functionality for ALTS. +package internal + +import ( + "context" + "net" + + "google.golang.org/grpc/credentials" +) + +const ( + // ClientSide identifies the client in this communication. + ClientSide Side = iota + // ServerSide identifies the server in this communication. + ServerSide +) + +// PeerNotRespondingError is returned when a peer server is not responding +// after a channel has been established. It is treated as a temporary connection +// error and re-connection to the server should be attempted. +var PeerNotRespondingError = &peerNotRespondingError{} + +// Side identifies the party's role: client or server. +type Side int + +type peerNotRespondingError struct{} + +// Return an error message for the purpose of logging. +func (e *peerNotRespondingError) Error() string { + return "peer server is not responding and re-connection should be attempted." +} + +// Temporary indicates if this connection error is temporary or fatal. +func (e *peerNotRespondingError) Temporary() bool { + return true +} + +// Handshaker defines a ALTS handshaker interface. +type Handshaker interface { + // ClientHandshake starts and completes a client-side handshaking and + // returns a secure connection and corresponding auth information. + ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) + // ServerHandshake starts and completes a server-side handshaking and + // returns a secure connection and corresponding auth information. + ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) + // Close terminates the Handshaker. It should be called when the caller + // obtains the secure connection. + Close() +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go new file mode 100644 index 000000000..7e4bfee88 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go @@ -0,0 +1,131 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/hmac" + "crypto/sha256" + "encoding/binary" + "fmt" + "strconv" +) + +// rekeyAEAD holds the necessary information for an AEAD based on +// AES-GCM that performs nonce-based key derivation and XORs the +// nonce with a random mask. +type rekeyAEAD struct { + kdfKey []byte + kdfCounter []byte + nonceMask []byte + nonceBuf []byte + gcmAEAD cipher.AEAD +} + +// KeySizeError signals that the given key does not have the correct size. +type KeySizeError int + +func (k KeySizeError) Error() string { + return "alts/conn: invalid key size " + strconv.Itoa(int(k)) +} + +// newRekeyAEAD creates a new instance of aes128gcm with rekeying. +// The key argument should be 44 bytes, the first 32 bytes are used as a key +// for HKDF-expand and the remaining 12 bytes are used as a random mask for +// the counter. +func newRekeyAEAD(key []byte) (*rekeyAEAD, error) { + k := len(key) + if k != kdfKeyLen+nonceLen { + return nil, KeySizeError(k) + } + return &rekeyAEAD{ + kdfKey: key[:kdfKeyLen], + kdfCounter: make([]byte, kdfCounterLen), + nonceMask: key[kdfKeyLen:], + nonceBuf: make([]byte, nonceLen), + gcmAEAD: nil, + }, nil +} + +// Seal rekeys if nonce[2:8] is different than in the last call, masks the nonce, +// and calls Seal for aes128gcm. +func (s *rekeyAEAD) Seal(dst, nonce, plaintext, additionalData []byte) []byte { + if err := s.rekeyIfRequired(nonce); err != nil { + panic(fmt.Sprintf("Rekeying failed with: %s", err.Error())) + } + maskNonce(s.nonceBuf, nonce, s.nonceMask) + return s.gcmAEAD.Seal(dst, s.nonceBuf, plaintext, additionalData) +} + +// Open rekeys if nonce[2:8] is different than in the last call, masks the nonce, +// and calls Open for aes128gcm. +func (s *rekeyAEAD) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + if err := s.rekeyIfRequired(nonce); err != nil { + return nil, err + } + maskNonce(s.nonceBuf, nonce, s.nonceMask) + return s.gcmAEAD.Open(dst, s.nonceBuf, ciphertext, additionalData) +} + +// rekeyIfRequired creates a new aes128gcm AEAD if the existing AEAD is nil +// or cannot be used with given nonce. +func (s *rekeyAEAD) rekeyIfRequired(nonce []byte) error { + newKdfCounter := nonce[kdfCounterOffset : kdfCounterOffset+kdfCounterLen] + if s.gcmAEAD != nil && bytes.Equal(newKdfCounter, s.kdfCounter) { + return nil + } + copy(s.kdfCounter, newKdfCounter) + a, err := aes.NewCipher(hkdfExpand(s.kdfKey, s.kdfCounter)) + if err != nil { + return err + } + s.gcmAEAD, err = cipher.NewGCM(a) + return err +} + +// maskNonce XORs the given nonce with the mask and stores the result in dst. +func maskNonce(dst, nonce, mask []byte) { + nonce1 := binary.LittleEndian.Uint64(nonce[:sizeUint64]) + nonce2 := binary.LittleEndian.Uint32(nonce[sizeUint64:]) + mask1 := binary.LittleEndian.Uint64(mask[:sizeUint64]) + mask2 := binary.LittleEndian.Uint32(mask[sizeUint64:]) + binary.LittleEndian.PutUint64(dst[:sizeUint64], nonce1^mask1) + binary.LittleEndian.PutUint32(dst[sizeUint64:], nonce2^mask2) +} + +// NonceSize returns the required nonce size. +func (s *rekeyAEAD) NonceSize() int { + return s.gcmAEAD.NonceSize() +} + +// Overhead returns the ciphertext overhead. +func (s *rekeyAEAD) Overhead() int { + return s.gcmAEAD.Overhead() +} + +// hkdfExpand computes the first 16 bytes of the HKDF-expand function +// defined in RFC5869. +func hkdfExpand(key, info []byte) []byte { + mac := hmac.New(sha256.New, key) + mac.Write(info) + mac.Write([]byte{0x01}[:]) + return mac.Sum(nil)[:aeadKeyLen] +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcm.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcm.go new file mode 100644 index 000000000..04e0adb6c --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcm.go @@ -0,0 +1,105 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import ( + "crypto/aes" + "crypto/cipher" + + core "google.golang.org/grpc/credentials/alts/internal" +) + +const ( + // Overflow length n in bytes, never encrypt more than 2^(n*8) frames (in + // each direction). + overflowLenAES128GCM = 5 +) + +// aes128gcm is the struct that holds necessary information for ALTS record. +// The counter value is NOT included in the payload during the encryption and +// decryption operations. +type aes128gcm struct { + // inCounter is used in ALTS record to check that incoming counters are + // as expected, since ALTS record guarantees that messages are unwrapped + // in the same order that the peer wrapped them. + inCounter Counter + outCounter Counter + aead cipher.AEAD +} + +// NewAES128GCM creates an instance that uses aes128gcm for ALTS record. +func NewAES128GCM(side core.Side, key []byte) (ALTSRecordCrypto, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + a, err := cipher.NewGCM(c) + if err != nil { + return nil, err + } + return &aes128gcm{ + inCounter: NewInCounter(side, overflowLenAES128GCM), + outCounter: NewOutCounter(side, overflowLenAES128GCM), + aead: a, + }, nil +} + +// Encrypt is the encryption function. dst can contain bytes at the beginning of +// the ciphertext that will not be encrypted but will be authenticated. If dst +// has enough capacity to hold these bytes, the ciphertext and the tag, no +// allocation and copy operations will be performed. dst and plaintext do not +// overlap. +func (s *aes128gcm) Encrypt(dst, plaintext []byte) ([]byte, error) { + // If we need to allocate an output buffer, we want to include space for + // GCM tag to avoid forcing ALTS record to reallocate as well. + dlen := len(dst) + dst, out := SliceForAppend(dst, len(plaintext)+GcmTagSize) + seq, err := s.outCounter.Value() + if err != nil { + return nil, err + } + data := out[:len(plaintext)] + copy(data, plaintext) // data may alias plaintext + + // Seal appends the ciphertext and the tag to its first argument and + // returns the updated slice. However, SliceForAppend above ensures that + // dst has enough capacity to avoid a reallocation and copy due to the + // append. + dst = s.aead.Seal(dst[:dlen], seq, data, nil) + s.outCounter.Inc() + return dst, nil +} + +func (s *aes128gcm) EncryptionOverhead() int { + return GcmTagSize +} + +func (s *aes128gcm) Decrypt(dst, ciphertext []byte) ([]byte, error) { + seq, err := s.inCounter.Value() + if err != nil { + return nil, err + } + // If dst is equal to ciphertext[:0], ciphertext storage is reused. + plaintext, err := s.aead.Open(dst, seq, ciphertext, nil) + if err != nil { + return nil, ErrAuth + } + s.inCounter.Inc() + return plaintext, nil +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go new file mode 100644 index 000000000..b5bbb5497 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go @@ -0,0 +1,116 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import ( + "crypto/cipher" + + core "google.golang.org/grpc/credentials/alts/internal" +) + +const ( + // Overflow length n in bytes, never encrypt more than 2^(n*8) frames (in + // each direction). + overflowLenAES128GCMRekey = 8 + nonceLen = 12 + aeadKeyLen = 16 + kdfKeyLen = 32 + kdfCounterOffset = 2 + kdfCounterLen = 6 + sizeUint64 = 8 +) + +// aes128gcmRekey is the struct that holds necessary information for ALTS record. +// The counter value is NOT included in the payload during the encryption and +// decryption operations. +type aes128gcmRekey struct { + // inCounter is used in ALTS record to check that incoming counters are + // as expected, since ALTS record guarantees that messages are unwrapped + // in the same order that the peer wrapped them. + inCounter Counter + outCounter Counter + inAEAD cipher.AEAD + outAEAD cipher.AEAD +} + +// NewAES128GCMRekey creates an instance that uses aes128gcm with rekeying +// for ALTS record. The key argument should be 44 bytes, the first 32 bytes +// are used as a key for HKDF-expand and the remaining 12 bytes are used +// as a random mask for the counter. +func NewAES128GCMRekey(side core.Side, key []byte) (ALTSRecordCrypto, error) { + inCounter := NewInCounter(side, overflowLenAES128GCMRekey) + outCounter := NewOutCounter(side, overflowLenAES128GCMRekey) + inAEAD, err := newRekeyAEAD(key) + if err != nil { + return nil, err + } + outAEAD, err := newRekeyAEAD(key) + if err != nil { + return nil, err + } + return &aes128gcmRekey{ + inCounter, + outCounter, + inAEAD, + outAEAD, + }, nil +} + +// Encrypt is the encryption function. dst can contain bytes at the beginning of +// the ciphertext that will not be encrypted but will be authenticated. If dst +// has enough capacity to hold these bytes, the ciphertext and the tag, no +// allocation and copy operations will be performed. dst and plaintext do not +// overlap. +func (s *aes128gcmRekey) Encrypt(dst, plaintext []byte) ([]byte, error) { + // If we need to allocate an output buffer, we want to include space for + // GCM tag to avoid forcing ALTS record to reallocate as well. + dlen := len(dst) + dst, out := SliceForAppend(dst, len(plaintext)+GcmTagSize) + seq, err := s.outCounter.Value() + if err != nil { + return nil, err + } + data := out[:len(plaintext)] + copy(data, plaintext) // data may alias plaintext + + // Seal appends the ciphertext and the tag to its first argument and + // returns the updated slice. However, SliceForAppend above ensures that + // dst has enough capacity to avoid a reallocation and copy due to the + // append. + dst = s.outAEAD.Seal(dst[:dlen], seq, data, nil) + s.outCounter.Inc() + return dst, nil +} + +func (s *aes128gcmRekey) EncryptionOverhead() int { + return GcmTagSize +} + +func (s *aes128gcmRekey) Decrypt(dst, ciphertext []byte) ([]byte, error) { + seq, err := s.inCounter.Value() + if err != nil { + return nil, err + } + plaintext, err := s.inAEAD.Open(dst, seq, ciphertext, nil) + if err != nil { + return nil, ErrAuth + } + s.inCounter.Inc() + return plaintext, nil +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/common.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/common.go new file mode 100644 index 000000000..1795d0c9e --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/common.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import ( + "encoding/binary" + "errors" + "fmt" +) + +const ( + // GcmTagSize is the GCM tag size is the difference in length between + // plaintext and ciphertext. From crypto/cipher/gcm.go in Go crypto + // library. + GcmTagSize = 16 +) + +// ErrAuth occurs on authentication failure. +var ErrAuth = errors.New("message authentication failed") + +// SliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func SliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return head, tail +} + +// ParseFramedMsg parse the provided buffer and returns a frame of the format +// msgLength+msg and any remaining bytes in that buffer. +func ParseFramedMsg(b []byte, maxLen uint32) ([]byte, []byte, error) { + // If the size field is not complete, return the provided buffer as + // remaining buffer. + if len(b) < MsgLenFieldSize { + return nil, b, nil + } + msgLenField := b[:MsgLenFieldSize] + length := binary.LittleEndian.Uint32(msgLenField) + if length > maxLen { + return nil, nil, fmt.Errorf("received the frame length %d larger than the limit %d", length, maxLen) + } + if len(b) < int(length)+4 { // account for the first 4 msg length bytes. + // Frame is not complete yet. + return nil, b, nil + } + return b[:MsgLenFieldSize+length], b[MsgLenFieldSize+length:], nil +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/counter.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/counter.go new file mode 100644 index 000000000..9f00aca0b --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/counter.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import ( + "errors" +) + +const counterLen = 12 + +var ( + errInvalidCounter = errors.New("invalid counter") +) + +// Counter is a 96-bit, little-endian counter. +type Counter struct { + value [counterLen]byte + invalid bool + overflowLen int +} + +// Value returns the current value of the counter as a byte slice. +func (c *Counter) Value() ([]byte, error) { + if c.invalid { + return nil, errInvalidCounter + } + return c.value[:], nil +} + +// Inc increments the counter and checks for overflow. +func (c *Counter) Inc() { + // If the counter is already invalid, there is no need to increase it. + if c.invalid { + return + } + i := 0 + for ; i < c.overflowLen; i++ { + c.value[i]++ + if c.value[i] != 0 { + break + } + } + if i == c.overflowLen { + c.invalid = true + } +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go new file mode 100644 index 000000000..f1ea7bb20 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go @@ -0,0 +1,268 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package conn contains an implementation of a secure channel created by gRPC +// handshakers. +package conn + +import ( + "encoding/binary" + "fmt" + "math" + "net" + + core "google.golang.org/grpc/credentials/alts/internal" +) + +// ALTSRecordCrypto is the interface for gRPC ALTS record protocol. +type ALTSRecordCrypto interface { + // Encrypt encrypts the plaintext and computes the tag (if any) of dst + // and plaintext. dst and plaintext may fully overlap or not at all. + Encrypt(dst, plaintext []byte) ([]byte, error) + // EncryptionOverhead returns the tag size (if any) in bytes. + EncryptionOverhead() int + // Decrypt decrypts ciphertext and verify the tag (if any). dst and + // ciphertext may alias exactly or not at all. To reuse ciphertext's + // storage for the decrypted output, use ciphertext[:0] as dst. + Decrypt(dst, ciphertext []byte) ([]byte, error) +} + +// ALTSRecordFunc is a function type for factory functions that create +// ALTSRecordCrypto instances. +type ALTSRecordFunc func(s core.Side, keyData []byte) (ALTSRecordCrypto, error) + +const ( + // MsgLenFieldSize is the byte size of the frame length field of a + // framed message. + MsgLenFieldSize = 4 + // The byte size of the message type field of a framed message. + msgTypeFieldSize = 4 + // The bytes size limit for a ALTS record message. + altsRecordLengthLimit = 1024 * 1024 // 1 MiB + // The default bytes size of a ALTS record message. + altsRecordDefaultLength = 4 * 1024 // 4KiB + // Message type value included in ALTS record framing. + altsRecordMsgType = uint32(0x06) + // The initial write buffer size. + altsWriteBufferInitialSize = 32 * 1024 // 32KiB + // The maximum write buffer size. This *must* be multiple of + // altsRecordDefaultLength. + altsWriteBufferMaxSize = 512 * 1024 // 512KiB +) + +var ( + protocols = make(map[string]ALTSRecordFunc) +) + +// RegisterProtocol register a ALTS record encryption protocol. +func RegisterProtocol(protocol string, f ALTSRecordFunc) error { + if _, ok := protocols[protocol]; ok { + return fmt.Errorf("protocol %v is already registered", protocol) + } + protocols[protocol] = f + return nil +} + +// conn represents a secured connection. It implements the net.Conn interface. +type conn struct { + net.Conn + crypto ALTSRecordCrypto + // buf holds data that has been read from the connection and decrypted, + // but has not yet been returned by Read. + buf []byte + payloadLengthLimit int + // protected holds data read from the network but have not yet been + // decrypted. This data might not compose a complete frame. + protected []byte + // writeBuf is a buffer used to contain encrypted frames before being + // written to the network. + writeBuf []byte + // nextFrame stores the next frame (in protected buffer) info. + nextFrame []byte + // overhead is the calculated overhead of each frame. + overhead int +} + +// NewConn creates a new secure channel instance given the other party role and +// handshaking result. +func NewConn(c net.Conn, side core.Side, recordProtocol string, key []byte, protected []byte) (net.Conn, error) { + newCrypto := protocols[recordProtocol] + if newCrypto == nil { + return nil, fmt.Errorf("negotiated unknown next_protocol %q", recordProtocol) + } + crypto, err := newCrypto(side, key) + if err != nil { + return nil, fmt.Errorf("protocol %q: %v", recordProtocol, err) + } + overhead := MsgLenFieldSize + msgTypeFieldSize + crypto.EncryptionOverhead() + payloadLengthLimit := altsRecordDefaultLength - overhead + var protectedBuf []byte + if protected == nil { + // We pre-allocate protected to be of size + // 2*altsRecordDefaultLength-1 during initialization. We only + // read from the network into protected when protected does not + // contain a complete frame, which is at most + // altsRecordDefaultLength-1 (bytes). And we read at most + // altsRecordDefaultLength (bytes) data into protected at one + // time. Therefore, 2*altsRecordDefaultLength-1 is large enough + // to buffer data read from the network. + protectedBuf = make([]byte, 0, 2*altsRecordDefaultLength-1) + } else { + protectedBuf = make([]byte, len(protected)) + copy(protectedBuf, protected) + } + + altsConn := &conn{ + Conn: c, + crypto: crypto, + payloadLengthLimit: payloadLengthLimit, + protected: protectedBuf, + writeBuf: make([]byte, altsWriteBufferInitialSize), + nextFrame: protectedBuf, + overhead: overhead, + } + return altsConn, nil +} + +// Read reads and decrypts a frame from the underlying connection, and copies the +// decrypted payload into b. If the size of the payload is greater than len(b), +// Read retains the remaining bytes in an internal buffer, and subsequent calls +// to Read will read from this buffer until it is exhausted. +func (p *conn) Read(b []byte) (n int, err error) { + if len(p.buf) == 0 { + var framedMsg []byte + framedMsg, p.nextFrame, err = ParseFramedMsg(p.nextFrame, altsRecordLengthLimit) + if err != nil { + return n, err + } + // Check whether the next frame to be decrypted has been + // completely received yet. + if len(framedMsg) == 0 { + copy(p.protected, p.nextFrame) + p.protected = p.protected[:len(p.nextFrame)] + // Always copy next incomplete frame to the beginning of + // the protected buffer and reset nextFrame to it. + p.nextFrame = p.protected + } + // Check whether a complete frame has been received yet. + for len(framedMsg) == 0 { + if len(p.protected) == cap(p.protected) { + tmp := make([]byte, len(p.protected), cap(p.protected)+altsRecordDefaultLength) + copy(tmp, p.protected) + p.protected = tmp + } + n, err = p.Conn.Read(p.protected[len(p.protected):min(cap(p.protected), len(p.protected)+altsRecordDefaultLength)]) + if err != nil { + return 0, err + } + p.protected = p.protected[:len(p.protected)+n] + framedMsg, p.nextFrame, err = ParseFramedMsg(p.protected, altsRecordLengthLimit) + if err != nil { + return 0, err + } + } + // Now we have a complete frame, decrypted it. + msg := framedMsg[MsgLenFieldSize:] + msgType := binary.LittleEndian.Uint32(msg[:msgTypeFieldSize]) + if msgType&0xff != altsRecordMsgType { + return 0, fmt.Errorf("received frame with incorrect message type %v, expected lower byte %v", + msgType, altsRecordMsgType) + } + ciphertext := msg[msgTypeFieldSize:] + + // Decrypt requires that if the dst and ciphertext alias, they + // must alias exactly. Code here used to use msg[:0], but msg + // starts MsgLenFieldSize+msgTypeFieldSize bytes earlier than + // ciphertext, so they alias inexactly. Using ciphertext[:0] + // arranges the appropriate aliasing without needing to copy + // ciphertext or use a separate destination buffer. For more info + // check: https://golang.org/pkg/crypto/cipher/#AEAD. + p.buf, err = p.crypto.Decrypt(ciphertext[:0], ciphertext) + if err != nil { + return 0, err + } + } + + n = copy(b, p.buf) + p.buf = p.buf[n:] + return n, nil +} + +// Write encrypts, frames, and writes bytes from b to the underlying connection. +func (p *conn) Write(b []byte) (n int, err error) { + n = len(b) + // Calculate the output buffer size with framing and encryption overhead. + numOfFrames := int(math.Ceil(float64(len(b)) / float64(p.payloadLengthLimit))) + size := len(b) + numOfFrames*p.overhead + // If writeBuf is too small, increase its size up to the maximum size. + partialBSize := len(b) + if size > altsWriteBufferMaxSize { + size = altsWriteBufferMaxSize + const numOfFramesInMaxWriteBuf = altsWriteBufferMaxSize / altsRecordDefaultLength + partialBSize = numOfFramesInMaxWriteBuf * p.payloadLengthLimit + } + if len(p.writeBuf) < size { + p.writeBuf = make([]byte, size) + } + + for partialBStart := 0; partialBStart < len(b); partialBStart += partialBSize { + partialBEnd := partialBStart + partialBSize + if partialBEnd > len(b) { + partialBEnd = len(b) + } + partialB := b[partialBStart:partialBEnd] + writeBufIndex := 0 + for len(partialB) > 0 { + payloadLen := len(partialB) + if payloadLen > p.payloadLengthLimit { + payloadLen = p.payloadLengthLimit + } + buf := partialB[:payloadLen] + partialB = partialB[payloadLen:] + + // Write buffer contains: length, type, payload, and tag + // if any. + + // 1. Fill in type field. + msg := p.writeBuf[writeBufIndex+MsgLenFieldSize:] + binary.LittleEndian.PutUint32(msg, altsRecordMsgType) + + // 2. Encrypt the payload and create a tag if any. + msg, err = p.crypto.Encrypt(msg[:msgTypeFieldSize], buf) + if err != nil { + return n, err + } + + // 3. Fill in the size field. + binary.LittleEndian.PutUint32(p.writeBuf[writeBufIndex:], uint32(len(msg))) + + // 4. Increase writeBufIndex. + writeBufIndex += len(buf) + p.overhead + } + nn, err := p.Conn.Write(p.writeBuf[:writeBufIndex]) + if err != nil { + // We need to calculate the actual data size that was + // written. This means we need to remove header, + // encryption overheads, and any partially-written + // frame data. + numOfWrittenFrames := int(math.Floor(float64(nn) / float64(altsRecordDefaultLength))) + return partialBStart + numOfWrittenFrames*p.payloadLengthLimit, err + } + } + return n, nil +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/utils.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/utils.go new file mode 100644 index 000000000..84821fa25 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/utils.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import core "google.golang.org/grpc/credentials/alts/internal" + +// NewOutCounter returns an outgoing counter initialized to the starting sequence +// number for the client/server side of a connection. +func NewOutCounter(s core.Side, overflowLen int) (c Counter) { + c.overflowLen = overflowLen + if s == core.ServerSide { + // Server counters in ALTS record have the little-endian high bit + // set. + c.value[counterLen-1] = 0x80 + } + return +} + +// NewInCounter returns an incoming counter initialized to the starting sequence +// number for the client/server side of a connection. This is used in ALTS record +// to check that incoming counters are as expected, since ALTS record guarantees +// that messages are unwrapped in the same order that the peer wrapped them. +func NewInCounter(s core.Side, overflowLen int) (c Counter) { + c.overflowLen = overflowLen + if s == core.ClientSide { + // Server counters in ALTS record have the little-endian high bit + // set. + c.value[counterLen-1] = 0x80 + } + return +} + +// CounterFromValue creates a new counter given an initial value. +func CounterFromValue(value []byte, overflowLen int) (c Counter) { + c.overflowLen = overflowLen + copy(c.value[:], value) + return +} + +// CounterSide returns the connection side (client/server) a sequence counter is +// associated with. +func CounterSide(c []byte) core.Side { + if c[counterLen-1]&0x80 == 0x80 { + return core.ServerSide + } + return core.ClientSide +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go new file mode 100644 index 000000000..50721f690 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go @@ -0,0 +1,373 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package handshaker provides ALTS handshaking functionality for GCP. +package handshaker + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "time" + + "golang.org/x/sync/semaphore" + grpc "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + core "google.golang.org/grpc/credentials/alts/internal" + "google.golang.org/grpc/credentials/alts/internal/authinfo" + "google.golang.org/grpc/credentials/alts/internal/conn" + altsgrpc "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" + altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" + "google.golang.org/grpc/internal/envconfig" +) + +const ( + // The maximum byte size of receive frames. + frameLimit = 64 * 1024 // 64 KB + rekeyRecordProtocolName = "ALTSRP_GCM_AES128_REKEY" +) + +var ( + hsProtocol = altspb.HandshakeProtocol_ALTS + appProtocols = []string{"grpc"} + recordProtocols = []string{rekeyRecordProtocolName} + keyLength = map[string]int{ + rekeyRecordProtocolName: 44, + } + altsRecordFuncs = map[string]conn.ALTSRecordFunc{ + // ALTS handshaker protocols. + rekeyRecordProtocolName: func(s core.Side, keyData []byte) (conn.ALTSRecordCrypto, error) { + return conn.NewAES128GCMRekey(s, keyData) + }, + } + // control number of concurrent created (but not closed) handshakes. + clientHandshakes = semaphore.NewWeighted(int64(envconfig.ALTSMaxConcurrentHandshakes)) + serverHandshakes = semaphore.NewWeighted(int64(envconfig.ALTSMaxConcurrentHandshakes)) + // errOutOfBound occurs when the handshake service returns a consumed + // bytes value larger than the buffer that was passed to it originally. + errOutOfBound = errors.New("handshaker service consumed bytes value is out-of-bound") +) + +func init() { + for protocol, f := range altsRecordFuncs { + if err := conn.RegisterProtocol(protocol, f); err != nil { + panic(err) + } + } +} + +// ClientHandshakerOptions contains the client handshaker options that can +// provided by the caller. +type ClientHandshakerOptions struct { + // ClientIdentity is the handshaker client local identity. + ClientIdentity *altspb.Identity + // TargetName is the server service account name for secure name + // checking. + TargetName string + // TargetServiceAccounts contains a list of expected target service + // accounts. One of these accounts should match one of the accounts in + // the handshaker results. Otherwise, the handshake fails. + TargetServiceAccounts []string + // RPCVersions specifies the gRPC versions accepted by the client. + RPCVersions *altspb.RpcProtocolVersions +} + +// ServerHandshakerOptions contains the server handshaker options that can +// provided by the caller. +type ServerHandshakerOptions struct { + // RPCVersions specifies the gRPC versions accepted by the server. + RPCVersions *altspb.RpcProtocolVersions +} + +// DefaultClientHandshakerOptions returns the default client handshaker options. +func DefaultClientHandshakerOptions() *ClientHandshakerOptions { + return &ClientHandshakerOptions{} +} + +// DefaultServerHandshakerOptions returns the default client handshaker options. +func DefaultServerHandshakerOptions() *ServerHandshakerOptions { + return &ServerHandshakerOptions{} +} + +// altsHandshaker is used to complete an ALTS handshake between client and +// server. This handshaker talks to the ALTS handshaker service in the metadata +// server. +type altsHandshaker struct { + // RPC stream used to access the ALTS Handshaker service. + stream altsgrpc.HandshakerService_DoHandshakeClient + // the connection to the peer. + conn net.Conn + // a virtual connection to the ALTS handshaker service. + clientConn *grpc.ClientConn + // client handshake options. + clientOpts *ClientHandshakerOptions + // server handshake options. + serverOpts *ServerHandshakerOptions + // defines the side doing the handshake, client or server. + side core.Side +} + +// NewClientHandshaker creates a core.Handshaker that performs a client-side +// ALTS handshake by acting as a proxy between the peer and the ALTS handshaker +// service in the metadata server. +func NewClientHandshaker(_ context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) { + return &altsHandshaker{ + stream: nil, + conn: c, + clientConn: conn, + clientOpts: opts, + side: core.ClientSide, + }, nil +} + +// NewServerHandshaker creates a core.Handshaker that performs a server-side +// ALTS handshake by acting as a proxy between the peer and the ALTS handshaker +// service in the metadata server. +func NewServerHandshaker(_ context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) { + return &altsHandshaker{ + stream: nil, + conn: c, + clientConn: conn, + serverOpts: opts, + side: core.ServerSide, + }, nil +} + +// ClientHandshake starts and completes a client ALTS handshake for GCP. Once +// done, ClientHandshake returns a secure connection. +func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { + if err := clientHandshakes.Acquire(ctx, 1); err != nil { + return nil, nil, err + } + defer clientHandshakes.Release(1) + + if h.side != core.ClientSide { + return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client handshaker") + } + + // TODO(matthewstevenson88): Change unit tests to use public APIs so + // that h.stream can unconditionally be set based on h.clientConn. + if h.stream == nil { + stream, err := altsgrpc.NewHandshakerServiceClient(h.clientConn).DoHandshake(ctx) + if err != nil { + return nil, nil, fmt.Errorf("failed to establish stream to ALTS handshaker service: %v", err) + } + h.stream = stream + } + + // Create target identities from service account list. + targetIdentities := make([]*altspb.Identity, 0, len(h.clientOpts.TargetServiceAccounts)) + for _, account := range h.clientOpts.TargetServiceAccounts { + targetIdentities = append(targetIdentities, &altspb.Identity{ + IdentityOneof: &altspb.Identity_ServiceAccount{ + ServiceAccount: account, + }, + }) + } + req := &altspb.HandshakerReq{ + ReqOneof: &altspb.HandshakerReq_ClientStart{ + ClientStart: &altspb.StartClientHandshakeReq{ + HandshakeSecurityProtocol: hsProtocol, + ApplicationProtocols: appProtocols, + RecordProtocols: recordProtocols, + TargetIdentities: targetIdentities, + LocalIdentity: h.clientOpts.ClientIdentity, + TargetName: h.clientOpts.TargetName, + RpcVersions: h.clientOpts.RPCVersions, + }, + }, + } + + conn, result, err := h.doHandshake(req) + if err != nil { + return nil, nil, err + } + authInfo := authinfo.New(result) + return conn, authInfo, nil +} + +// ServerHandshake starts and completes a server ALTS handshake for GCP. Once +// done, ServerHandshake returns a secure connection. +func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { + if err := serverHandshakes.Acquire(ctx, 1); err != nil { + return nil, nil, err + } + defer serverHandshakes.Release(1) + + if h.side != core.ServerSide { + return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server handshaker") + } + + // TODO(matthewstevenson88): Change unit tests to use public APIs so + // that h.stream can unconditionally be set based on h.clientConn. + if h.stream == nil { + stream, err := altsgrpc.NewHandshakerServiceClient(h.clientConn).DoHandshake(ctx) + if err != nil { + return nil, nil, fmt.Errorf("failed to establish stream to ALTS handshaker service: %v", err) + } + h.stream = stream + } + + p := make([]byte, frameLimit) + n, err := h.conn.Read(p) + if err != nil { + return nil, nil, err + } + + // Prepare server parameters. + params := make(map[int32]*altspb.ServerHandshakeParameters) + params[int32(altspb.HandshakeProtocol_ALTS)] = &altspb.ServerHandshakeParameters{ + RecordProtocols: recordProtocols, + } + req := &altspb.HandshakerReq{ + ReqOneof: &altspb.HandshakerReq_ServerStart{ + ServerStart: &altspb.StartServerHandshakeReq{ + ApplicationProtocols: appProtocols, + HandshakeParameters: params, + InBytes: p[:n], + RpcVersions: h.serverOpts.RPCVersions, + }, + }, + } + + conn, result, err := h.doHandshake(req) + if err != nil { + return nil, nil, err + } + authInfo := authinfo.New(result) + return conn, authInfo, nil +} + +func (h *altsHandshaker) doHandshake(req *altspb.HandshakerReq) (net.Conn, *altspb.HandshakerResult, error) { + resp, err := h.accessHandshakerService(req) + if err != nil { + return nil, nil, err + } + // Check of the returned status is an error. + if resp.GetStatus() != nil { + if got, want := resp.GetStatus().Code, uint32(codes.OK); got != want { + return nil, nil, fmt.Errorf("%v", resp.GetStatus().Details) + } + } + + var extra []byte + if req.GetServerStart() != nil { + if resp.GetBytesConsumed() > uint32(len(req.GetServerStart().GetInBytes())) { + return nil, nil, errOutOfBound + } + extra = req.GetServerStart().GetInBytes()[resp.GetBytesConsumed():] + } + result, extra, err := h.processUntilDone(resp, extra) + if err != nil { + return nil, nil, err + } + // The handshaker returns a 128 bytes key. It should be truncated based + // on the returned record protocol. + keyLen, ok := keyLength[result.RecordProtocol] + if !ok { + return nil, nil, fmt.Errorf("unknown resulted record protocol %v", result.RecordProtocol) + } + sc, err := conn.NewConn(h.conn, h.side, result.GetRecordProtocol(), result.KeyData[:keyLen], extra) + if err != nil { + return nil, nil, err + } + return sc, result, nil +} + +func (h *altsHandshaker) accessHandshakerService(req *altspb.HandshakerReq) (*altspb.HandshakerResp, error) { + if err := h.stream.Send(req); err != nil { + return nil, err + } + resp, err := h.stream.Recv() + if err != nil { + return nil, err + } + return resp, nil +} + +// processUntilDone processes the handshake until the handshaker service returns +// the results. Handshaker service takes care of frame parsing, so we read +// whatever received from the network and send it to the handshaker service. +func (h *altsHandshaker) processUntilDone(resp *altspb.HandshakerResp, extra []byte) (*altspb.HandshakerResult, []byte, error) { + var lastWriteTime time.Time + for { + if len(resp.OutFrames) > 0 { + lastWriteTime = time.Now() + if _, err := h.conn.Write(resp.OutFrames); err != nil { + return nil, nil, err + } + } + if resp.Result != nil { + return resp.Result, extra, nil + } + buf := make([]byte, frameLimit) + n, err := h.conn.Read(buf) + if err != nil && err != io.EOF { + return nil, nil, err + } + // If there is nothing to send to the handshaker service, and + // nothing is received from the peer, then we are stuck. + // This covers the case when the peer is not responding. Note + // that handshaker service connection issues are caught in + // accessHandshakerService before we even get here. + if len(resp.OutFrames) == 0 && n == 0 { + return nil, nil, core.PeerNotRespondingError + } + // Append extra bytes from the previous interaction with the + // handshaker service with the current buffer read from conn. + p := append(extra, buf[:n]...) + // Compute the time elapsed since the last write to the peer. + timeElapsed := time.Since(lastWriteTime) + timeElapsedMs := uint32(timeElapsed.Milliseconds()) + // From here on, p and extra point to the same slice. + resp, err = h.accessHandshakerService(&altspb.HandshakerReq{ + ReqOneof: &altspb.HandshakerReq_Next{ + Next: &altspb.NextHandshakeMessageReq{ + InBytes: p, + NetworkLatencyMs: timeElapsedMs, + }, + }, + }) + if err != nil { + return nil, nil, err + } + // Set extra based on handshaker service response. + if resp.GetBytesConsumed() > uint32(len(p)) { + return nil, nil, errOutOfBound + } + extra = p[resp.GetBytesConsumed():] + } +} + +// Close terminates the Handshaker. It should be called when the caller obtains +// the secure connection. +func (h *altsHandshaker) Close() { + if h.stream != nil { + h.stream.CloseSend() + } +} + +// ResetConcurrentHandshakeSemaphoreForTesting resets the handshake semaphores +// to allow numberOfAllowedHandshakes concurrent handshakes each. +func ResetConcurrentHandshakeSemaphoreForTesting(numberOfAllowedHandshakes int64) { + clientHandshakes = semaphore.NewWeighted(numberOfAllowedHandshakes) + serverHandshakes = semaphore.NewWeighted(numberOfAllowedHandshakes) +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go new file mode 100644 index 000000000..b3af03590 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go @@ -0,0 +1,76 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package service manages connections between the VM application and the ALTS +// handshaker service. +package service + +import ( + "sync" + + grpc "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +var ( + // mu guards hsConnMap and hsDialer. + mu sync.Mutex + // hsConn represents a mapping from a hypervisor handshaker service address + // to a corresponding connection to a hypervisor handshaker service + // instance. + hsConnMap = make(map[string]*grpc.ClientConn) +) + +// Dial dials the handshake service in the hypervisor. If a connection has +// already been established, this function returns it. Otherwise, a new +// connection is created. +func Dial(hsAddress string) (*grpc.ClientConn, error) { + mu.Lock() + defer mu.Unlock() + + hsConn, ok := hsConnMap[hsAddress] + if !ok { + // Create a new connection to the handshaker service. Note that + // this connection stays open until the application is closed. + var err error + hsConn, err = grpc.Dial(hsAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return nil, err + } + hsConnMap[hsAddress] = hsConn + } + return hsConn, nil +} + +// CloseForTesting closes all open connections to the handshaker service. +// +// For testing purposes only. +func CloseForTesting() error { + for _, hsConn := range hsConnMap { + if hsConn == nil { + continue + } + if err := hsConn.Close(); err != nil { + return err + } + } + + // Reset the connection map. + hsConnMap = make(map[string]*grpc.ClientConn) + return nil +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go new file mode 100644 index 000000000..b7de8f05b --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -0,0 +1,259 @@ +// Copyright 2018 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/gcp/altscontext.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v5.27.1 +// source: grpc/gcp/altscontext.proto + +package grpc_gcp + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type AltsContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The application protocol negotiated for this connection. + ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` + // The record protocol negotiated for this connection. + RecordProtocol string `protobuf:"bytes,2,opt,name=record_protocol,json=recordProtocol,proto3" json:"record_protocol,omitempty"` + // The security level of the created secure channel. + SecurityLevel SecurityLevel `protobuf:"varint,3,opt,name=security_level,json=securityLevel,proto3,enum=grpc.gcp.SecurityLevel" json:"security_level,omitempty"` + // The peer service account. + PeerServiceAccount string `protobuf:"bytes,4,opt,name=peer_service_account,json=peerServiceAccount,proto3" json:"peer_service_account,omitempty"` + // The local service account. + LocalServiceAccount string `protobuf:"bytes,5,opt,name=local_service_account,json=localServiceAccount,proto3" json:"local_service_account,omitempty"` + // The RPC protocol versions supported by the peer. + PeerRpcVersions *RpcProtocolVersions `protobuf:"bytes,6,opt,name=peer_rpc_versions,json=peerRpcVersions,proto3" json:"peer_rpc_versions,omitempty"` + // Additional attributes of the peer. + PeerAttributes map[string]string `protobuf:"bytes,7,rep,name=peer_attributes,json=peerAttributes,proto3" json:"peer_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *AltsContext) Reset() { + *x = AltsContext{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_altscontext_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AltsContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AltsContext) ProtoMessage() {} + +func (x *AltsContext) ProtoReflect() protoreflect.Message { + mi := &file_grpc_gcp_altscontext_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AltsContext.ProtoReflect.Descriptor instead. +func (*AltsContext) Descriptor() ([]byte, []int) { + return file_grpc_gcp_altscontext_proto_rawDescGZIP(), []int{0} +} + +func (x *AltsContext) GetApplicationProtocol() string { + if x != nil { + return x.ApplicationProtocol + } + return "" +} + +func (x *AltsContext) GetRecordProtocol() string { + if x != nil { + return x.RecordProtocol + } + return "" +} + +func (x *AltsContext) GetSecurityLevel() SecurityLevel { + if x != nil { + return x.SecurityLevel + } + return SecurityLevel_SECURITY_NONE +} + +func (x *AltsContext) GetPeerServiceAccount() string { + if x != nil { + return x.PeerServiceAccount + } + return "" +} + +func (x *AltsContext) GetLocalServiceAccount() string { + if x != nil { + return x.LocalServiceAccount + } + return "" +} + +func (x *AltsContext) GetPeerRpcVersions() *RpcProtocolVersions { + if x != nil { + return x.PeerRpcVersions + } + return nil +} + +func (x *AltsContext) GetPeerAttributes() map[string]string { + if x != nil { + return x.PeerAttributes + } + return nil +} + +var File_grpc_gcp_altscontext_proto protoreflect.FileDescriptor + +var file_grpc_gcp_altscontext_proto_rawDesc = []byte{ + 0x0a, 0x1a, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x1a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70, + 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xf1, 0x03, 0x0a, 0x0b, 0x41, 0x6c, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x12, 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, + 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, + 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x3e, 0x0a, 0x0e, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, + 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0d, 0x73, + 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x30, 0x0a, 0x14, + 0x70, 0x65, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x70, 0x65, 0x65, 0x72, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x32, + 0x0a, 0x15, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x49, 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0f, 0x70, 0x65, + 0x65, 0x72, 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x52, 0x0a, + 0x0f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, + 0x70, 0x2e, 0x41, 0x6c, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x50, 0x65, + 0x65, 0x72, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0e, 0x70, 0x65, 0x65, 0x72, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x1a, 0x41, 0x0a, 0x13, 0x50, 0x65, 0x65, 0x72, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x42, 0x6c, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x42, 0x10, 0x41, + 0x6c, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x67, + 0x63, 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_gcp_altscontext_proto_rawDescOnce sync.Once + file_grpc_gcp_altscontext_proto_rawDescData = file_grpc_gcp_altscontext_proto_rawDesc +) + +func file_grpc_gcp_altscontext_proto_rawDescGZIP() []byte { + file_grpc_gcp_altscontext_proto_rawDescOnce.Do(func() { + file_grpc_gcp_altscontext_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_gcp_altscontext_proto_rawDescData) + }) + return file_grpc_gcp_altscontext_proto_rawDescData +} + +var file_grpc_gcp_altscontext_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_grpc_gcp_altscontext_proto_goTypes = []any{ + (*AltsContext)(nil), // 0: grpc.gcp.AltsContext + nil, // 1: grpc.gcp.AltsContext.PeerAttributesEntry + (SecurityLevel)(0), // 2: grpc.gcp.SecurityLevel + (*RpcProtocolVersions)(nil), // 3: grpc.gcp.RpcProtocolVersions +} +var file_grpc_gcp_altscontext_proto_depIdxs = []int32{ + 2, // 0: grpc.gcp.AltsContext.security_level:type_name -> grpc.gcp.SecurityLevel + 3, // 1: grpc.gcp.AltsContext.peer_rpc_versions:type_name -> grpc.gcp.RpcProtocolVersions + 1, // 2: grpc.gcp.AltsContext.peer_attributes:type_name -> grpc.gcp.AltsContext.PeerAttributesEntry + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_grpc_gcp_altscontext_proto_init() } +func file_grpc_gcp_altscontext_proto_init() { + if File_grpc_gcp_altscontext_proto != nil { + return + } + file_grpc_gcp_transport_security_common_proto_init() + if !protoimpl.UnsafeEnabled { + file_grpc_gcp_altscontext_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*AltsContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_gcp_altscontext_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_grpc_gcp_altscontext_proto_goTypes, + DependencyIndexes: file_grpc_gcp_altscontext_proto_depIdxs, + MessageInfos: file_grpc_gcp_altscontext_proto_msgTypes, + }.Build() + File_grpc_gcp_altscontext_proto = out.File + file_grpc_gcp_altscontext_proto_rawDesc = nil + file_grpc_gcp_altscontext_proto_goTypes = nil + file_grpc_gcp_altscontext_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go new file mode 100644 index 000000000..79b5dad47 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -0,0 +1,1467 @@ +// Copyright 2018 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/gcp/handshaker.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v5.27.1 +// source: grpc/gcp/handshaker.proto + +package grpc_gcp + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type HandshakeProtocol int32 + +const ( + // Default value. + HandshakeProtocol_HANDSHAKE_PROTOCOL_UNSPECIFIED HandshakeProtocol = 0 + // TLS handshake protocol. + HandshakeProtocol_TLS HandshakeProtocol = 1 + // Application Layer Transport Security handshake protocol. + HandshakeProtocol_ALTS HandshakeProtocol = 2 +) + +// Enum value maps for HandshakeProtocol. +var ( + HandshakeProtocol_name = map[int32]string{ + 0: "HANDSHAKE_PROTOCOL_UNSPECIFIED", + 1: "TLS", + 2: "ALTS", + } + HandshakeProtocol_value = map[string]int32{ + "HANDSHAKE_PROTOCOL_UNSPECIFIED": 0, + "TLS": 1, + "ALTS": 2, + } +) + +func (x HandshakeProtocol) Enum() *HandshakeProtocol { + p := new(HandshakeProtocol) + *p = x + return p +} + +func (x HandshakeProtocol) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HandshakeProtocol) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_gcp_handshaker_proto_enumTypes[0].Descriptor() +} + +func (HandshakeProtocol) Type() protoreflect.EnumType { + return &file_grpc_gcp_handshaker_proto_enumTypes[0] +} + +func (x HandshakeProtocol) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HandshakeProtocol.Descriptor instead. +func (HandshakeProtocol) EnumDescriptor() ([]byte, []int) { + return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{0} +} + +type NetworkProtocol int32 + +const ( + NetworkProtocol_NETWORK_PROTOCOL_UNSPECIFIED NetworkProtocol = 0 + NetworkProtocol_TCP NetworkProtocol = 1 + NetworkProtocol_UDP NetworkProtocol = 2 +) + +// Enum value maps for NetworkProtocol. +var ( + NetworkProtocol_name = map[int32]string{ + 0: "NETWORK_PROTOCOL_UNSPECIFIED", + 1: "TCP", + 2: "UDP", + } + NetworkProtocol_value = map[string]int32{ + "NETWORK_PROTOCOL_UNSPECIFIED": 0, + "TCP": 1, + "UDP": 2, + } +) + +func (x NetworkProtocol) Enum() *NetworkProtocol { + p := new(NetworkProtocol) + *p = x + return p +} + +func (x NetworkProtocol) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (NetworkProtocol) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_gcp_handshaker_proto_enumTypes[1].Descriptor() +} + +func (NetworkProtocol) Type() protoreflect.EnumType { + return &file_grpc_gcp_handshaker_proto_enumTypes[1] +} + +func (x NetworkProtocol) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use NetworkProtocol.Descriptor instead. +func (NetworkProtocol) EnumDescriptor() ([]byte, []int) { + return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{1} +} + +type Endpoint struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // IP address. It should contain an IPv4 or IPv6 string literal, e.g. + // "192.168.0.1" or "2001:db8::1". + IpAddress string `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + // Port number. + Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` + // Network protocol (e.g., TCP, UDP) associated with this endpoint. + Protocol NetworkProtocol `protobuf:"varint,3,opt,name=protocol,proto3,enum=grpc.gcp.NetworkProtocol" json:"protocol,omitempty"` +} + +func (x *Endpoint) Reset() { + *x = Endpoint{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Endpoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Endpoint) ProtoMessage() {} + +func (x *Endpoint) ProtoReflect() protoreflect.Message { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Endpoint.ProtoReflect.Descriptor instead. +func (*Endpoint) Descriptor() ([]byte, []int) { + return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{0} +} + +func (x *Endpoint) GetIpAddress() string { + if x != nil { + return x.IpAddress + } + return "" +} + +func (x *Endpoint) GetPort() int32 { + if x != nil { + return x.Port + } + return 0 +} + +func (x *Endpoint) GetProtocol() NetworkProtocol { + if x != nil { + return x.Protocol + } + return NetworkProtocol_NETWORK_PROTOCOL_UNSPECIFIED +} + +type Identity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to IdentityOneof: + // + // *Identity_ServiceAccount + // *Identity_Hostname + IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` + // Additional attributes of the identity. + Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Identity) Reset() { + *x = Identity{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Identity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Identity) ProtoMessage() {} + +func (x *Identity) ProtoReflect() protoreflect.Message { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Identity.ProtoReflect.Descriptor instead. +func (*Identity) Descriptor() ([]byte, []int) { + return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{1} +} + +func (m *Identity) GetIdentityOneof() isIdentity_IdentityOneof { + if m != nil { + return m.IdentityOneof + } + return nil +} + +func (x *Identity) GetServiceAccount() string { + if x, ok := x.GetIdentityOneof().(*Identity_ServiceAccount); ok { + return x.ServiceAccount + } + return "" +} + +func (x *Identity) GetHostname() string { + if x, ok := x.GetIdentityOneof().(*Identity_Hostname); ok { + return x.Hostname + } + return "" +} + +func (x *Identity) GetAttributes() map[string]string { + if x != nil { + return x.Attributes + } + return nil +} + +type isIdentity_IdentityOneof interface { + isIdentity_IdentityOneof() +} + +type Identity_ServiceAccount struct { + // Service account of a connection endpoint. + ServiceAccount string `protobuf:"bytes,1,opt,name=service_account,json=serviceAccount,proto3,oneof"` +} + +type Identity_Hostname struct { + // Hostname of a connection endpoint. + Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3,oneof"` +} + +func (*Identity_ServiceAccount) isIdentity_IdentityOneof() {} + +func (*Identity_Hostname) isIdentity_IdentityOneof() {} + +type StartClientHandshakeReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Handshake security protocol requested by the client. + HandshakeSecurityProtocol HandshakeProtocol `protobuf:"varint,1,opt,name=handshake_security_protocol,json=handshakeSecurityProtocol,proto3,enum=grpc.gcp.HandshakeProtocol" json:"handshake_security_protocol,omitempty"` + // The application protocols supported by the client, e.g., "h2" (for http2), + // "grpc". + ApplicationProtocols []string `protobuf:"bytes,2,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` + // The record protocols supported by the client, e.g., + // "ALTSRP_GCM_AES128". + RecordProtocols []string `protobuf:"bytes,3,rep,name=record_protocols,json=recordProtocols,proto3" json:"record_protocols,omitempty"` + // (Optional) Describes which server identities are acceptable by the client. + // If target identities are provided and none of them matches the peer + // identity of the server, handshake will fail. + TargetIdentities []*Identity `protobuf:"bytes,4,rep,name=target_identities,json=targetIdentities,proto3" json:"target_identities,omitempty"` + // (Optional) Application may specify a local identity. Otherwise, the + // handshaker chooses a default local identity. + LocalIdentity *Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // (Optional) Local endpoint information of the connection to the server, + // such as local IP address, port number, and network protocol. + LocalEndpoint *Endpoint `protobuf:"bytes,6,opt,name=local_endpoint,json=localEndpoint,proto3" json:"local_endpoint,omitempty"` + // (Optional) Endpoint information of the remote server, such as IP address, + // port number, and network protocol. + RemoteEndpoint *Endpoint `protobuf:"bytes,7,opt,name=remote_endpoint,json=remoteEndpoint,proto3" json:"remote_endpoint,omitempty"` + // (Optional) If target name is provided, a secure naming check is performed + // to verify that the peer authenticated identity is indeed authorized to run + // the target name. + TargetName string `protobuf:"bytes,8,opt,name=target_name,json=targetName,proto3" json:"target_name,omitempty"` + // (Optional) RPC protocol versions supported by the client. + RpcVersions *RpcProtocolVersions `protobuf:"bytes,9,opt,name=rpc_versions,json=rpcVersions,proto3" json:"rpc_versions,omitempty"` + // (Optional) Maximum frame size supported by the client. + MaxFrameSize uint32 `protobuf:"varint,10,opt,name=max_frame_size,json=maxFrameSize,proto3" json:"max_frame_size,omitempty"` + // (Optional) An access token created by the caller only intended for use in + // ALTS connections. The access token that should be used to authenticate to + // the peer. The access token MUST be strongly bound to the ALTS credentials + // used to establish the connection that the token is sent over. + AccessToken string `protobuf:"bytes,11,opt,name=access_token,json=accessToken,proto3" json:"access_token,omitempty"` +} + +func (x *StartClientHandshakeReq) Reset() { + *x = StartClientHandshakeReq{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StartClientHandshakeReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartClientHandshakeReq) ProtoMessage() {} + +func (x *StartClientHandshakeReq) ProtoReflect() protoreflect.Message { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartClientHandshakeReq.ProtoReflect.Descriptor instead. +func (*StartClientHandshakeReq) Descriptor() ([]byte, []int) { + return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{2} +} + +func (x *StartClientHandshakeReq) GetHandshakeSecurityProtocol() HandshakeProtocol { + if x != nil { + return x.HandshakeSecurityProtocol + } + return HandshakeProtocol_HANDSHAKE_PROTOCOL_UNSPECIFIED +} + +func (x *StartClientHandshakeReq) GetApplicationProtocols() []string { + if x != nil { + return x.ApplicationProtocols + } + return nil +} + +func (x *StartClientHandshakeReq) GetRecordProtocols() []string { + if x != nil { + return x.RecordProtocols + } + return nil +} + +func (x *StartClientHandshakeReq) GetTargetIdentities() []*Identity { + if x != nil { + return x.TargetIdentities + } + return nil +} + +func (x *StartClientHandshakeReq) GetLocalIdentity() *Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +func (x *StartClientHandshakeReq) GetLocalEndpoint() *Endpoint { + if x != nil { + return x.LocalEndpoint + } + return nil +} + +func (x *StartClientHandshakeReq) GetRemoteEndpoint() *Endpoint { + if x != nil { + return x.RemoteEndpoint + } + return nil +} + +func (x *StartClientHandshakeReq) GetTargetName() string { + if x != nil { + return x.TargetName + } + return "" +} + +func (x *StartClientHandshakeReq) GetRpcVersions() *RpcProtocolVersions { + if x != nil { + return x.RpcVersions + } + return nil +} + +func (x *StartClientHandshakeReq) GetMaxFrameSize() uint32 { + if x != nil { + return x.MaxFrameSize + } + return 0 +} + +func (x *StartClientHandshakeReq) GetAccessToken() string { + if x != nil { + return x.AccessToken + } + return "" +} + +type ServerHandshakeParameters struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The record protocols supported by the server, e.g., + // "ALTSRP_GCM_AES128". + RecordProtocols []string `protobuf:"bytes,1,rep,name=record_protocols,json=recordProtocols,proto3" json:"record_protocols,omitempty"` + // (Optional) A list of local identities supported by the server, if + // specified. Otherwise, the handshaker chooses a default local identity. + LocalIdentities []*Identity `protobuf:"bytes,2,rep,name=local_identities,json=localIdentities,proto3" json:"local_identities,omitempty"` + // A token created by the caller only intended for use in + // ALTS connections. The token should be used to authenticate to + // the peer. The token MUST be strongly bound to the ALTS credentials + // used to establish the connection that the token is sent over. + Token *string `protobuf:"bytes,3,opt,name=token,proto3,oneof" json:"token,omitempty"` +} + +func (x *ServerHandshakeParameters) Reset() { + *x = ServerHandshakeParameters{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerHandshakeParameters) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerHandshakeParameters) ProtoMessage() {} + +func (x *ServerHandshakeParameters) ProtoReflect() protoreflect.Message { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerHandshakeParameters.ProtoReflect.Descriptor instead. +func (*ServerHandshakeParameters) Descriptor() ([]byte, []int) { + return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{3} +} + +func (x *ServerHandshakeParameters) GetRecordProtocols() []string { + if x != nil { + return x.RecordProtocols + } + return nil +} + +func (x *ServerHandshakeParameters) GetLocalIdentities() []*Identity { + if x != nil { + return x.LocalIdentities + } + return nil +} + +func (x *ServerHandshakeParameters) GetToken() string { + if x != nil && x.Token != nil { + return *x.Token + } + return "" +} + +type StartServerHandshakeReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The application protocols supported by the server, e.g., "h2" (for http2), + // "grpc". + ApplicationProtocols []string `protobuf:"bytes,1,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` + // Handshake parameters (record protocols and local identities supported by + // the server) mapped by the handshake protocol. Each handshake security + // protocol (e.g., TLS or ALTS) has its own set of record protocols and local + // identities. Since protobuf does not support enum as key to the map, the key + // to handshake_parameters is the integer value of HandshakeProtocol enum. + HandshakeParameters map[int32]*ServerHandshakeParameters `protobuf:"bytes,2,rep,name=handshake_parameters,json=handshakeParameters,proto3" json:"handshake_parameters,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Bytes in out_frames returned from the peer's HandshakerResp. It is possible + // that the peer's out_frames are split into multiple HandshakeReq messages. + InBytes []byte `protobuf:"bytes,3,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` + // (Optional) Local endpoint information of the connection to the client, + // such as local IP address, port number, and network protocol. + LocalEndpoint *Endpoint `protobuf:"bytes,4,opt,name=local_endpoint,json=localEndpoint,proto3" json:"local_endpoint,omitempty"` + // (Optional) Endpoint information of the remote client, such as IP address, + // port number, and network protocol. + RemoteEndpoint *Endpoint `protobuf:"bytes,5,opt,name=remote_endpoint,json=remoteEndpoint,proto3" json:"remote_endpoint,omitempty"` + // (Optional) RPC protocol versions supported by the server. + RpcVersions *RpcProtocolVersions `protobuf:"bytes,6,opt,name=rpc_versions,json=rpcVersions,proto3" json:"rpc_versions,omitempty"` + // (Optional) Maximum frame size supported by the server. + MaxFrameSize uint32 `protobuf:"varint,7,opt,name=max_frame_size,json=maxFrameSize,proto3" json:"max_frame_size,omitempty"` +} + +func (x *StartServerHandshakeReq) Reset() { + *x = StartServerHandshakeReq{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StartServerHandshakeReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartServerHandshakeReq) ProtoMessage() {} + +func (x *StartServerHandshakeReq) ProtoReflect() protoreflect.Message { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartServerHandshakeReq.ProtoReflect.Descriptor instead. +func (*StartServerHandshakeReq) Descriptor() ([]byte, []int) { + return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{4} +} + +func (x *StartServerHandshakeReq) GetApplicationProtocols() []string { + if x != nil { + return x.ApplicationProtocols + } + return nil +} + +func (x *StartServerHandshakeReq) GetHandshakeParameters() map[int32]*ServerHandshakeParameters { + if x != nil { + return x.HandshakeParameters + } + return nil +} + +func (x *StartServerHandshakeReq) GetInBytes() []byte { + if x != nil { + return x.InBytes + } + return nil +} + +func (x *StartServerHandshakeReq) GetLocalEndpoint() *Endpoint { + if x != nil { + return x.LocalEndpoint + } + return nil +} + +func (x *StartServerHandshakeReq) GetRemoteEndpoint() *Endpoint { + if x != nil { + return x.RemoteEndpoint + } + return nil +} + +func (x *StartServerHandshakeReq) GetRpcVersions() *RpcProtocolVersions { + if x != nil { + return x.RpcVersions + } + return nil +} + +func (x *StartServerHandshakeReq) GetMaxFrameSize() uint32 { + if x != nil { + return x.MaxFrameSize + } + return 0 +} + +type NextHandshakeMessageReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Bytes in out_frames returned from the peer's HandshakerResp. It is possible + // that the peer's out_frames are split into multiple NextHandshakerMessageReq + // messages. + InBytes []byte `protobuf:"bytes,1,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` + // Number of milliseconds between when the application send the last handshake + // message to the peer and when the application received the current handshake + // message (in the in_bytes field) from the peer. + NetworkLatencyMs uint32 `protobuf:"varint,2,opt,name=network_latency_ms,json=networkLatencyMs,proto3" json:"network_latency_ms,omitempty"` +} + +func (x *NextHandshakeMessageReq) Reset() { + *x = NextHandshakeMessageReq{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NextHandshakeMessageReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NextHandshakeMessageReq) ProtoMessage() {} + +func (x *NextHandshakeMessageReq) ProtoReflect() protoreflect.Message { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NextHandshakeMessageReq.ProtoReflect.Descriptor instead. +func (*NextHandshakeMessageReq) Descriptor() ([]byte, []int) { + return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{5} +} + +func (x *NextHandshakeMessageReq) GetInBytes() []byte { + if x != nil { + return x.InBytes + } + return nil +} + +func (x *NextHandshakeMessageReq) GetNetworkLatencyMs() uint32 { + if x != nil { + return x.NetworkLatencyMs + } + return 0 +} + +type HandshakerReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to ReqOneof: + // + // *HandshakerReq_ClientStart + // *HandshakerReq_ServerStart + // *HandshakerReq_Next + ReqOneof isHandshakerReq_ReqOneof `protobuf_oneof:"req_oneof"` +} + +func (x *HandshakerReq) Reset() { + *x = HandshakerReq{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HandshakerReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HandshakerReq) ProtoMessage() {} + +func (x *HandshakerReq) ProtoReflect() protoreflect.Message { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HandshakerReq.ProtoReflect.Descriptor instead. +func (*HandshakerReq) Descriptor() ([]byte, []int) { + return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{6} +} + +func (m *HandshakerReq) GetReqOneof() isHandshakerReq_ReqOneof { + if m != nil { + return m.ReqOneof + } + return nil +} + +func (x *HandshakerReq) GetClientStart() *StartClientHandshakeReq { + if x, ok := x.GetReqOneof().(*HandshakerReq_ClientStart); ok { + return x.ClientStart + } + return nil +} + +func (x *HandshakerReq) GetServerStart() *StartServerHandshakeReq { + if x, ok := x.GetReqOneof().(*HandshakerReq_ServerStart); ok { + return x.ServerStart + } + return nil +} + +func (x *HandshakerReq) GetNext() *NextHandshakeMessageReq { + if x, ok := x.GetReqOneof().(*HandshakerReq_Next); ok { + return x.Next + } + return nil +} + +type isHandshakerReq_ReqOneof interface { + isHandshakerReq_ReqOneof() +} + +type HandshakerReq_ClientStart struct { + // The start client handshake request message. + ClientStart *StartClientHandshakeReq `protobuf:"bytes,1,opt,name=client_start,json=clientStart,proto3,oneof"` +} + +type HandshakerReq_ServerStart struct { + // The start server handshake request message. + ServerStart *StartServerHandshakeReq `protobuf:"bytes,2,opt,name=server_start,json=serverStart,proto3,oneof"` +} + +type HandshakerReq_Next struct { + // The next handshake request message. + Next *NextHandshakeMessageReq `protobuf:"bytes,3,opt,name=next,proto3,oneof"` +} + +func (*HandshakerReq_ClientStart) isHandshakerReq_ReqOneof() {} + +func (*HandshakerReq_ServerStart) isHandshakerReq_ReqOneof() {} + +func (*HandshakerReq_Next) isHandshakerReq_ReqOneof() {} + +type HandshakerResult struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The application protocol negotiated for this connection. + ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` + // The record protocol negotiated for this connection. + RecordProtocol string `protobuf:"bytes,2,opt,name=record_protocol,json=recordProtocol,proto3" json:"record_protocol,omitempty"` + // Cryptographic key data. The key data may be more than the key length + // required for the record protocol, thus the client of the handshaker + // service needs to truncate the key data into the right key length. + KeyData []byte `protobuf:"bytes,3,opt,name=key_data,json=keyData,proto3" json:"key_data,omitempty"` + // The authenticated identity of the peer. + PeerIdentity *Identity `protobuf:"bytes,4,opt,name=peer_identity,json=peerIdentity,proto3" json:"peer_identity,omitempty"` + // The local identity used in the handshake. + LocalIdentity *Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // Indicate whether the handshaker service client should keep the channel + // between the handshaker service open, e.g., in order to handle + // post-handshake messages in the future. + KeepChannelOpen bool `protobuf:"varint,6,opt,name=keep_channel_open,json=keepChannelOpen,proto3" json:"keep_channel_open,omitempty"` + // The RPC protocol versions supported by the peer. + PeerRpcVersions *RpcProtocolVersions `protobuf:"bytes,7,opt,name=peer_rpc_versions,json=peerRpcVersions,proto3" json:"peer_rpc_versions,omitempty"` + // The maximum frame size of the peer. + MaxFrameSize uint32 `protobuf:"varint,8,opt,name=max_frame_size,json=maxFrameSize,proto3" json:"max_frame_size,omitempty"` +} + +func (x *HandshakerResult) Reset() { + *x = HandshakerResult{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HandshakerResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HandshakerResult) ProtoMessage() {} + +func (x *HandshakerResult) ProtoReflect() protoreflect.Message { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HandshakerResult.ProtoReflect.Descriptor instead. +func (*HandshakerResult) Descriptor() ([]byte, []int) { + return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{7} +} + +func (x *HandshakerResult) GetApplicationProtocol() string { + if x != nil { + return x.ApplicationProtocol + } + return "" +} + +func (x *HandshakerResult) GetRecordProtocol() string { + if x != nil { + return x.RecordProtocol + } + return "" +} + +func (x *HandshakerResult) GetKeyData() []byte { + if x != nil { + return x.KeyData + } + return nil +} + +func (x *HandshakerResult) GetPeerIdentity() *Identity { + if x != nil { + return x.PeerIdentity + } + return nil +} + +func (x *HandshakerResult) GetLocalIdentity() *Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +func (x *HandshakerResult) GetKeepChannelOpen() bool { + if x != nil { + return x.KeepChannelOpen + } + return false +} + +func (x *HandshakerResult) GetPeerRpcVersions() *RpcProtocolVersions { + if x != nil { + return x.PeerRpcVersions + } + return nil +} + +func (x *HandshakerResult) GetMaxFrameSize() uint32 { + if x != nil { + return x.MaxFrameSize + } + return 0 +} + +type HandshakerStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status code. This could be the gRPC status code. + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // The status details. + Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` +} + +func (x *HandshakerStatus) Reset() { + *x = HandshakerStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HandshakerStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HandshakerStatus) ProtoMessage() {} + +func (x *HandshakerStatus) ProtoReflect() protoreflect.Message { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HandshakerStatus.ProtoReflect.Descriptor instead. +func (*HandshakerStatus) Descriptor() ([]byte, []int) { + return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{8} +} + +func (x *HandshakerStatus) GetCode() uint32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *HandshakerStatus) GetDetails() string { + if x != nil { + return x.Details + } + return "" +} + +type HandshakerResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Frames to be given to the peer for the NextHandshakeMessageReq. May be + // empty if no out_frames have to be sent to the peer or if in_bytes in the + // HandshakerReq are incomplete. All the non-empty out frames must be sent to + // the peer even if the handshaker status is not OK as these frames may + // contain the alert frames. + OutFrames []byte `protobuf:"bytes,1,opt,name=out_frames,json=outFrames,proto3" json:"out_frames,omitempty"` + // Number of bytes in the in_bytes consumed by the handshaker. It is possible + // that part of in_bytes in HandshakerReq was unrelated to the handshake + // process. + BytesConsumed uint32 `protobuf:"varint,2,opt,name=bytes_consumed,json=bytesConsumed,proto3" json:"bytes_consumed,omitempty"` + // This is set iff the handshake was successful. out_frames may still be set + // to frames that needs to be forwarded to the peer. + Result *HandshakerResult `protobuf:"bytes,3,opt,name=result,proto3" json:"result,omitempty"` + // Status of the handshaker. + Status *HandshakerStatus `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"` +} + +func (x *HandshakerResp) Reset() { + *x = HandshakerResp{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HandshakerResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HandshakerResp) ProtoMessage() {} + +func (x *HandshakerResp) ProtoReflect() protoreflect.Message { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HandshakerResp.ProtoReflect.Descriptor instead. +func (*HandshakerResp) Descriptor() ([]byte, []int) { + return file_grpc_gcp_handshaker_proto_rawDescGZIP(), []int{9} +} + +func (x *HandshakerResp) GetOutFrames() []byte { + if x != nil { + return x.OutFrames + } + return nil +} + +func (x *HandshakerResp) GetBytesConsumed() uint32 { + if x != nil { + return x.BytesConsumed + } + return 0 +} + +func (x *HandshakerResp) GetResult() *HandshakerResult { + if x != nil { + return x.Result + } + return nil +} + +func (x *HandshakerResp) GetStatus() *HandshakerStatus { + if x != nil { + return x.Status + } + return nil +} + +var File_grpc_gcp_handshaker_proto protoreflect.FileDescriptor + +var file_grpc_gcp_handshaker_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70, 0x2f, 0x68, 0x61, 0x6e, 0x64, 0x73, + 0x68, 0x61, 0x6b, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x63, 0x70, 0x1a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70, 0x2f, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, + 0x74, 0x79, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0x74, 0x0a, 0x08, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x69, + 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, + 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x35, + 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x19, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x4e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0xe8, 0x01, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x12, 0x29, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1c, 0x0a, + 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x0a, 0x61, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x1a, + 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x10, + 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, + 0x22, 0xfb, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x12, 0x5b, 0x0a, 0x1b, + 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, + 0x74, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, + 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x19, + 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x33, 0x0a, 0x15, 0x61, 0x70, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x29, + 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x3f, 0x0a, 0x11, 0x74, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, + 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, + 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x65, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x12, 0x3b, 0x0a, 0x0f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0e, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1f, 0x0a, + 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x40, + 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, + 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, + 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x26, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0x80, 0x01, + 0x01, 0x52, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xaf, + 0x01, 0x0a, 0x19, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, + 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x29, 0x0a, 0x10, + 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0x80, 0x01, 0x01, 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x22, 0xa5, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x12, 0x33, 0x0a, 0x15, + 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, 0x70, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x73, 0x12, 0x6d, 0x0a, 0x14, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x70, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, + 0x65, 0x71, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x68, 0x61, 0x6e, + 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0e, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x45, 0x6e, + 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, + 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, + 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x1a, 0x6b, 0x0a, 0x18, 0x48, + 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x67, 0x63, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, + 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x62, 0x0a, 0x17, 0x4e, 0x65, 0x78, 0x74, + 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x52, 0x65, 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x2c, + 0x0a, 0x12, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, + 0x79, 0x5f, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x6e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4d, 0x73, 0x22, 0xe5, 0x01, 0x0a, + 0x0d, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x12, 0x46, + 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, + 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x46, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, + 0x00, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x37, + 0x0a, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, + 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x48, + 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x9a, 0x03, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, + 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f, + 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x44, 0x61, 0x74, 0x61, + 0x12, 0x37, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, + 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x63, 0x68, 0x61, + 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0f, 0x6b, 0x65, 0x65, 0x70, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e, + 0x12, 0x49, 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0f, 0x70, 0x65, 0x65, 0x72, + 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, + 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, + 0x65, 0x22, 0x40, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, + 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x5f, 0x66, 0x72, + 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x75, 0x74, 0x46, + 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63, + 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x06, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x12, 0x32, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, + 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2a, 0x4a, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x22, 0x0a, 0x1e, 0x48, 0x41, 0x4e, + 0x44, 0x53, 0x48, 0x41, 0x4b, 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, + 0x03, 0x54, 0x4c, 0x53, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x4c, 0x54, 0x53, 0x10, 0x02, + 0x2a, 0x45, 0x0a, 0x0f, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50, + 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x01, 0x12, 0x07, + 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x02, 0x32, 0x5b, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, + 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x46, 0x0a, 0x0b, + 0x44, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x17, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, + 0x72, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, + 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, + 0x28, 0x01, 0x30, 0x01, 0x42, 0x6b, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x42, 0x0f, 0x48, + 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63, + 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_gcp_handshaker_proto_rawDescOnce sync.Once + file_grpc_gcp_handshaker_proto_rawDescData = file_grpc_gcp_handshaker_proto_rawDesc +) + +func file_grpc_gcp_handshaker_proto_rawDescGZIP() []byte { + file_grpc_gcp_handshaker_proto_rawDescOnce.Do(func() { + file_grpc_gcp_handshaker_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_gcp_handshaker_proto_rawDescData) + }) + return file_grpc_gcp_handshaker_proto_rawDescData +} + +var file_grpc_gcp_handshaker_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_grpc_gcp_handshaker_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_grpc_gcp_handshaker_proto_goTypes = []any{ + (HandshakeProtocol)(0), // 0: grpc.gcp.HandshakeProtocol + (NetworkProtocol)(0), // 1: grpc.gcp.NetworkProtocol + (*Endpoint)(nil), // 2: grpc.gcp.Endpoint + (*Identity)(nil), // 3: grpc.gcp.Identity + (*StartClientHandshakeReq)(nil), // 4: grpc.gcp.StartClientHandshakeReq + (*ServerHandshakeParameters)(nil), // 5: grpc.gcp.ServerHandshakeParameters + (*StartServerHandshakeReq)(nil), // 6: grpc.gcp.StartServerHandshakeReq + (*NextHandshakeMessageReq)(nil), // 7: grpc.gcp.NextHandshakeMessageReq + (*HandshakerReq)(nil), // 8: grpc.gcp.HandshakerReq + (*HandshakerResult)(nil), // 9: grpc.gcp.HandshakerResult + (*HandshakerStatus)(nil), // 10: grpc.gcp.HandshakerStatus + (*HandshakerResp)(nil), // 11: grpc.gcp.HandshakerResp + nil, // 12: grpc.gcp.Identity.AttributesEntry + nil, // 13: grpc.gcp.StartServerHandshakeReq.HandshakeParametersEntry + (*RpcProtocolVersions)(nil), // 14: grpc.gcp.RpcProtocolVersions +} +var file_grpc_gcp_handshaker_proto_depIdxs = []int32{ + 1, // 0: grpc.gcp.Endpoint.protocol:type_name -> grpc.gcp.NetworkProtocol + 12, // 1: grpc.gcp.Identity.attributes:type_name -> grpc.gcp.Identity.AttributesEntry + 0, // 2: grpc.gcp.StartClientHandshakeReq.handshake_security_protocol:type_name -> grpc.gcp.HandshakeProtocol + 3, // 3: grpc.gcp.StartClientHandshakeReq.target_identities:type_name -> grpc.gcp.Identity + 3, // 4: grpc.gcp.StartClientHandshakeReq.local_identity:type_name -> grpc.gcp.Identity + 2, // 5: grpc.gcp.StartClientHandshakeReq.local_endpoint:type_name -> grpc.gcp.Endpoint + 2, // 6: grpc.gcp.StartClientHandshakeReq.remote_endpoint:type_name -> grpc.gcp.Endpoint + 14, // 7: grpc.gcp.StartClientHandshakeReq.rpc_versions:type_name -> grpc.gcp.RpcProtocolVersions + 3, // 8: grpc.gcp.ServerHandshakeParameters.local_identities:type_name -> grpc.gcp.Identity + 13, // 9: grpc.gcp.StartServerHandshakeReq.handshake_parameters:type_name -> grpc.gcp.StartServerHandshakeReq.HandshakeParametersEntry + 2, // 10: grpc.gcp.StartServerHandshakeReq.local_endpoint:type_name -> grpc.gcp.Endpoint + 2, // 11: grpc.gcp.StartServerHandshakeReq.remote_endpoint:type_name -> grpc.gcp.Endpoint + 14, // 12: grpc.gcp.StartServerHandshakeReq.rpc_versions:type_name -> grpc.gcp.RpcProtocolVersions + 4, // 13: grpc.gcp.HandshakerReq.client_start:type_name -> grpc.gcp.StartClientHandshakeReq + 6, // 14: grpc.gcp.HandshakerReq.server_start:type_name -> grpc.gcp.StartServerHandshakeReq + 7, // 15: grpc.gcp.HandshakerReq.next:type_name -> grpc.gcp.NextHandshakeMessageReq + 3, // 16: grpc.gcp.HandshakerResult.peer_identity:type_name -> grpc.gcp.Identity + 3, // 17: grpc.gcp.HandshakerResult.local_identity:type_name -> grpc.gcp.Identity + 14, // 18: grpc.gcp.HandshakerResult.peer_rpc_versions:type_name -> grpc.gcp.RpcProtocolVersions + 9, // 19: grpc.gcp.HandshakerResp.result:type_name -> grpc.gcp.HandshakerResult + 10, // 20: grpc.gcp.HandshakerResp.status:type_name -> grpc.gcp.HandshakerStatus + 5, // 21: grpc.gcp.StartServerHandshakeReq.HandshakeParametersEntry.value:type_name -> grpc.gcp.ServerHandshakeParameters + 8, // 22: grpc.gcp.HandshakerService.DoHandshake:input_type -> grpc.gcp.HandshakerReq + 11, // 23: grpc.gcp.HandshakerService.DoHandshake:output_type -> grpc.gcp.HandshakerResp + 23, // [23:24] is the sub-list for method output_type + 22, // [22:23] is the sub-list for method input_type + 22, // [22:22] is the sub-list for extension type_name + 22, // [22:22] is the sub-list for extension extendee + 0, // [0:22] is the sub-list for field type_name +} + +func init() { file_grpc_gcp_handshaker_proto_init() } +func file_grpc_gcp_handshaker_proto_init() { + if File_grpc_gcp_handshaker_proto != nil { + return + } + file_grpc_gcp_transport_security_common_proto_init() + if !protoimpl.UnsafeEnabled { + file_grpc_gcp_handshaker_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Endpoint); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_gcp_handshaker_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*Identity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_gcp_handshaker_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*StartClientHandshakeReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_gcp_handshaker_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*ServerHandshakeParameters); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_gcp_handshaker_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*StartServerHandshakeReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_gcp_handshaker_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*NextHandshakeMessageReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_gcp_handshaker_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*HandshakerReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_gcp_handshaker_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*HandshakerResult); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_gcp_handshaker_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*HandshakerStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_gcp_handshaker_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*HandshakerResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_grpc_gcp_handshaker_proto_msgTypes[1].OneofWrappers = []any{ + (*Identity_ServiceAccount)(nil), + (*Identity_Hostname)(nil), + } + file_grpc_gcp_handshaker_proto_msgTypes[3].OneofWrappers = []any{} + file_grpc_gcp_handshaker_proto_msgTypes[6].OneofWrappers = []any{ + (*HandshakerReq_ClientStart)(nil), + (*HandshakerReq_ServerStart)(nil), + (*HandshakerReq_Next)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_gcp_handshaker_proto_rawDesc, + NumEnums: 2, + NumMessages: 12, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_grpc_gcp_handshaker_proto_goTypes, + DependencyIndexes: file_grpc_gcp_handshaker_proto_depIdxs, + EnumInfos: file_grpc_gcp_handshaker_proto_enumTypes, + MessageInfos: file_grpc_gcp_handshaker_proto_msgTypes, + }.Build() + File_grpc_gcp_handshaker_proto = out.File + file_grpc_gcp_handshaker_proto_rawDesc = nil + file_grpc_gcp_handshaker_proto_goTypes = nil + file_grpc_gcp_handshaker_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go new file mode 100644 index 000000000..34443b1d2 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go @@ -0,0 +1,144 @@ +// Copyright 2018 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/gcp/handshaker.proto + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 +// source: grpc/gcp/handshaker.proto + +package grpc_gcp + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + HandshakerService_DoHandshake_FullMethodName = "/grpc.gcp.HandshakerService/DoHandshake" +) + +// HandshakerServiceClient is the client API for HandshakerService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type HandshakerServiceClient interface { + // Handshaker service accepts a stream of handshaker request, returning a + // stream of handshaker response. Client is expected to send exactly one + // message with either client_start or server_start followed by one or more + // messages with next. Each time client sends a request, the handshaker + // service expects to respond. Client does not have to wait for service's + // response before sending next request. + DoHandshake(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[HandshakerReq, HandshakerResp], error) +} + +type handshakerServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewHandshakerServiceClient(cc grpc.ClientConnInterface) HandshakerServiceClient { + return &handshakerServiceClient{cc} +} + +func (c *handshakerServiceClient) DoHandshake(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[HandshakerReq, HandshakerResp], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &HandshakerService_ServiceDesc.Streams[0], HandshakerService_DoHandshake_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[HandshakerReq, HandshakerResp]{ClientStream: stream} + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type HandshakerService_DoHandshakeClient = grpc.BidiStreamingClient[HandshakerReq, HandshakerResp] + +// HandshakerServiceServer is the server API for HandshakerService service. +// All implementations must embed UnimplementedHandshakerServiceServer +// for forward compatibility. +type HandshakerServiceServer interface { + // Handshaker service accepts a stream of handshaker request, returning a + // stream of handshaker response. Client is expected to send exactly one + // message with either client_start or server_start followed by one or more + // messages with next. Each time client sends a request, the handshaker + // service expects to respond. Client does not have to wait for service's + // response before sending next request. + DoHandshake(grpc.BidiStreamingServer[HandshakerReq, HandshakerResp]) error + mustEmbedUnimplementedHandshakerServiceServer() +} + +// UnimplementedHandshakerServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedHandshakerServiceServer struct{} + +func (UnimplementedHandshakerServiceServer) DoHandshake(grpc.BidiStreamingServer[HandshakerReq, HandshakerResp]) error { + return status.Errorf(codes.Unimplemented, "method DoHandshake not implemented") +} +func (UnimplementedHandshakerServiceServer) mustEmbedUnimplementedHandshakerServiceServer() {} +func (UnimplementedHandshakerServiceServer) testEmbeddedByValue() {} + +// UnsafeHandshakerServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to HandshakerServiceServer will +// result in compilation errors. +type UnsafeHandshakerServiceServer interface { + mustEmbedUnimplementedHandshakerServiceServer() +} + +func RegisterHandshakerServiceServer(s grpc.ServiceRegistrar, srv HandshakerServiceServer) { + // If the following call panics, it indicates UnimplementedHandshakerServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&HandshakerService_ServiceDesc, srv) +} + +func _HandshakerService_DoHandshake_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(HandshakerServiceServer).DoHandshake(&grpc.GenericServerStream[HandshakerReq, HandshakerResp]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type HandshakerService_DoHandshakeServer = grpc.BidiStreamingServer[HandshakerReq, HandshakerResp] + +// HandshakerService_ServiceDesc is the grpc.ServiceDesc for HandshakerService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var HandshakerService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.gcp.HandshakerService", + HandlerType: (*HandshakerServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "DoHandshake", + Handler: _HandshakerService_DoHandshake_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/gcp/handshaker.proto", +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go new file mode 100644 index 000000000..6956c14f6 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -0,0 +1,321 @@ +// Copyright 2018 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/gcp/transport_security_common.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v5.27.1 +// source: grpc/gcp/transport_security_common.proto + +package grpc_gcp + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The security level of the created channel. The list is sorted in increasing +// level of security. This order must always be maintained. +type SecurityLevel int32 + +const ( + SecurityLevel_SECURITY_NONE SecurityLevel = 0 + SecurityLevel_INTEGRITY_ONLY SecurityLevel = 1 + SecurityLevel_INTEGRITY_AND_PRIVACY SecurityLevel = 2 +) + +// Enum value maps for SecurityLevel. +var ( + SecurityLevel_name = map[int32]string{ + 0: "SECURITY_NONE", + 1: "INTEGRITY_ONLY", + 2: "INTEGRITY_AND_PRIVACY", + } + SecurityLevel_value = map[string]int32{ + "SECURITY_NONE": 0, + "INTEGRITY_ONLY": 1, + "INTEGRITY_AND_PRIVACY": 2, + } +) + +func (x SecurityLevel) Enum() *SecurityLevel { + p := new(SecurityLevel) + *p = x + return p +} + +func (x SecurityLevel) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SecurityLevel) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_gcp_transport_security_common_proto_enumTypes[0].Descriptor() +} + +func (SecurityLevel) Type() protoreflect.EnumType { + return &file_grpc_gcp_transport_security_common_proto_enumTypes[0] +} + +func (x SecurityLevel) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SecurityLevel.Descriptor instead. +func (SecurityLevel) EnumDescriptor() ([]byte, []int) { + return file_grpc_gcp_transport_security_common_proto_rawDescGZIP(), []int{0} +} + +// Max and min supported RPC protocol versions. +type RpcProtocolVersions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Maximum supported RPC version. + MaxRpcVersion *RpcProtocolVersions_Version `protobuf:"bytes,1,opt,name=max_rpc_version,json=maxRpcVersion,proto3" json:"max_rpc_version,omitempty"` + // Minimum supported RPC version. + MinRpcVersion *RpcProtocolVersions_Version `protobuf:"bytes,2,opt,name=min_rpc_version,json=minRpcVersion,proto3" json:"min_rpc_version,omitempty"` +} + +func (x *RpcProtocolVersions) Reset() { + *x = RpcProtocolVersions{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RpcProtocolVersions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RpcProtocolVersions) ProtoMessage() {} + +func (x *RpcProtocolVersions) ProtoReflect() protoreflect.Message { + mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RpcProtocolVersions.ProtoReflect.Descriptor instead. +func (*RpcProtocolVersions) Descriptor() ([]byte, []int) { + return file_grpc_gcp_transport_security_common_proto_rawDescGZIP(), []int{0} +} + +func (x *RpcProtocolVersions) GetMaxRpcVersion() *RpcProtocolVersions_Version { + if x != nil { + return x.MaxRpcVersion + } + return nil +} + +func (x *RpcProtocolVersions) GetMinRpcVersion() *RpcProtocolVersions_Version { + if x != nil { + return x.MinRpcVersion + } + return nil +} + +// RPC version contains a major version and a minor version. +type RpcProtocolVersions_Version struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Major uint32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` + Minor uint32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` +} + +func (x *RpcProtocolVersions_Version) Reset() { + *x = RpcProtocolVersions_Version{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RpcProtocolVersions_Version) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RpcProtocolVersions_Version) ProtoMessage() {} + +func (x *RpcProtocolVersions_Version) ProtoReflect() protoreflect.Message { + mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RpcProtocolVersions_Version.ProtoReflect.Descriptor instead. +func (*RpcProtocolVersions_Version) Descriptor() ([]byte, []int) { + return file_grpc_gcp_transport_security_common_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *RpcProtocolVersions_Version) GetMajor() uint32 { + if x != nil { + return x.Major + } + return 0 +} + +func (x *RpcProtocolVersions_Version) GetMinor() uint32 { + if x != nil { + return x.Minor + } + return 0 +} + +var File_grpc_gcp_transport_security_common_proto protoreflect.FileDescriptor + +var file_grpc_gcp_transport_security_common_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x67, 0x63, 0x70, 0x22, 0xea, 0x01, 0x0a, 0x13, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4d, 0x0a, 0x0f, + 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, + 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, + 0x78, 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x0f, 0x6d, + 0x69, 0x6e, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, + 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, + 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x35, 0x0a, 0x07, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, + 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, + 0x72, 0x2a, 0x51, 0x0a, 0x0d, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x45, 0x43, 0x55, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, + 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x49, 0x4e, 0x54, 0x45, 0x47, 0x52, 0x49, + 0x54, 0x59, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x49, 0x4e, 0x54, + 0x45, 0x47, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x50, 0x52, 0x49, 0x56, 0x41, + 0x43, 0x59, 0x10, 0x02, 0x42, 0x78, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x42, 0x1c, 0x54, + 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, + 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, + 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63, 0x70, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_gcp_transport_security_common_proto_rawDescOnce sync.Once + file_grpc_gcp_transport_security_common_proto_rawDescData = file_grpc_gcp_transport_security_common_proto_rawDesc +) + +func file_grpc_gcp_transport_security_common_proto_rawDescGZIP() []byte { + file_grpc_gcp_transport_security_common_proto_rawDescOnce.Do(func() { + file_grpc_gcp_transport_security_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_gcp_transport_security_common_proto_rawDescData) + }) + return file_grpc_gcp_transport_security_common_proto_rawDescData +} + +var file_grpc_gcp_transport_security_common_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_grpc_gcp_transport_security_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_grpc_gcp_transport_security_common_proto_goTypes = []any{ + (SecurityLevel)(0), // 0: grpc.gcp.SecurityLevel + (*RpcProtocolVersions)(nil), // 1: grpc.gcp.RpcProtocolVersions + (*RpcProtocolVersions_Version)(nil), // 2: grpc.gcp.RpcProtocolVersions.Version +} +var file_grpc_gcp_transport_security_common_proto_depIdxs = []int32{ + 2, // 0: grpc.gcp.RpcProtocolVersions.max_rpc_version:type_name -> grpc.gcp.RpcProtocolVersions.Version + 2, // 1: grpc.gcp.RpcProtocolVersions.min_rpc_version:type_name -> grpc.gcp.RpcProtocolVersions.Version + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_grpc_gcp_transport_security_common_proto_init() } +func file_grpc_gcp_transport_security_common_proto_init() { + if File_grpc_gcp_transport_security_common_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_gcp_transport_security_common_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*RpcProtocolVersions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_gcp_transport_security_common_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*RpcProtocolVersions_Version); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_gcp_transport_security_common_proto_rawDesc, + NumEnums: 1, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_grpc_gcp_transport_security_common_proto_goTypes, + DependencyIndexes: file_grpc_gcp_transport_security_common_proto_depIdxs, + EnumInfos: file_grpc_gcp_transport_security_common_proto_enumTypes, + MessageInfos: file_grpc_gcp_transport_security_common_proto_msgTypes, + }.Build() + File_grpc_gcp_transport_security_common_proto = out.File + file_grpc_gcp_transport_security_common_proto_rawDesc = nil + file_grpc_gcp_transport_security_common_proto_goTypes = nil + file_grpc_gcp_transport_security_common_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/utils.go b/vendor/google.golang.org/grpc/credentials/alts/utils.go new file mode 100644 index 000000000..cbfd056cf --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/utils.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package alts + +import ( + "context" + "errors" + "strings" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" +) + +// AuthInfoFromContext extracts the alts.AuthInfo object from the given context, +// if it exists. This API should be used by gRPC server RPC handlers to get +// information about the communicating peer. For client-side, use grpc.Peer() +// CallOption. +func AuthInfoFromContext(ctx context.Context) (AuthInfo, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return nil, errors.New("no Peer found in Context") + } + return AuthInfoFromPeer(p) +} + +// AuthInfoFromPeer extracts the alts.AuthInfo object from the given peer, if it +// exists. This API should be used by gRPC clients after obtaining a peer object +// using the grpc.Peer() CallOption. +func AuthInfoFromPeer(p *peer.Peer) (AuthInfo, error) { + altsAuthInfo, ok := p.AuthInfo.(AuthInfo) + if !ok { + return nil, errors.New("no alts.AuthInfo found in Peer") + } + return altsAuthInfo, nil +} + +// ClientAuthorizationCheck checks whether the client is authorized to access +// the requested resources based on the given expected client service accounts. +// This API should be used by gRPC server RPC handlers. This API should not be +// used by clients. +func ClientAuthorizationCheck(ctx context.Context, expectedServiceAccounts []string) error { + authInfo, err := AuthInfoFromContext(ctx) + if err != nil { + return status.Errorf(codes.PermissionDenied, "The context is not an ALTS-compatible context: %v", err) + } + peer := authInfo.PeerServiceAccount() + for _, sa := range expectedServiceAccounts { + if strings.EqualFold(peer, sa) { + return nil + } + } + return status.Errorf(codes.PermissionDenied, "Client %v is not authorized", peer) +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 8ea3d4a1d..665e790bb 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -24,36 +24,78 @@ package credentials // import "google.golang.org/grpc/credentials" import ( "context" - "crypto/tls" - "crypto/x509" "errors" "fmt" - "io/ioutil" "net" - "strings" - "github.com/golang/protobuf/proto" - "google.golang.org/grpc/credentials/internal" + "google.golang.org/grpc/attributes" + icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/protobuf/proto" ) // PerRPCCredentials defines the common interface for the credentials which need to // attach security information to every RPC (e.g., oauth2). type PerRPCCredentials interface { - // GetRequestMetadata gets the current request metadata, refreshing - // tokens if required. This should be called by the transport layer on - // each request, and the data should be populated in headers or other - // context. If a status code is returned, it will be used as the status - // for the RPC. uri is the URI of the entry point for the request. - // When supported by the underlying implementation, ctx can be used for - // timeout and cancellation. - // TODO(zhaoq): Define the set of the qualified keys instead of leaving - // it as an arbitrary string. + // GetRequestMetadata gets the current request metadata, refreshing tokens + // if required. This should be called by the transport layer on each + // request, and the data should be populated in headers or other + // context. If a status code is returned, it will be used as the status for + // the RPC (restricted to an allowable set of codes as defined by gRFC + // A54). uri is the URI of the entry point for the request. When supported + // by the underlying implementation, ctx can be used for timeout and + // cancellation. Additionally, RequestInfo data will be available via ctx + // to this call. TODO(zhaoq): Define the set of the qualified keys instead + // of leaving it as an arbitrary string. GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) // RequireTransportSecurity indicates whether the credentials requires // transport security. RequireTransportSecurity() bool } +// SecurityLevel defines the protection level on an established connection. +// +// This API is experimental. +type SecurityLevel int + +const ( + // InvalidSecurityLevel indicates an invalid security level. + // The zero SecurityLevel value is invalid for backward compatibility. + InvalidSecurityLevel SecurityLevel = iota + // NoSecurity indicates a connection is insecure. + NoSecurity + // IntegrityOnly indicates a connection only provides integrity protection. + IntegrityOnly + // PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection. + PrivacyAndIntegrity +) + +// String returns SecurityLevel in a string format. +func (s SecurityLevel) String() string { + switch s { + case NoSecurity: + return "NoSecurity" + case IntegrityOnly: + return "IntegrityOnly" + case PrivacyAndIntegrity: + return "PrivacyAndIntegrity" + } + return fmt.Sprintf("invalid SecurityLevel: %v", int(s)) +} + +// CommonAuthInfo contains authenticated information common to AuthInfo implementations. +// It should be embedded in a struct implementing AuthInfo to provide additional information +// about the credentials. +// +// This API is experimental. +type CommonAuthInfo struct { + SecurityLevel SecurityLevel +} + +// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct. +func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo { + return c +} + // ProtocolInfo provides information regarding the gRPC wire protocol version, // security protocol, security protocol version in use, server name, etc. type ProtocolInfo struct { @@ -61,13 +103,19 @@ type ProtocolInfo struct { ProtocolVersion string // SecurityProtocol is the security protocol in use. SecurityProtocol string - // SecurityVersion is the security protocol version. + // SecurityVersion is the security protocol version. It is a static version string from the + // credentials, not a value that reflects per-connection protocol negotiation. To retrieve + // details about the credentials used for a connection, use the Peer's AuthInfo field instead. + // + // Deprecated: please use Peer.AuthInfo. SecurityVersion string // ServerName is the user-configured server name. ServerName string } // AuthInfo defines the common interface for the auth information the users are interested in. +// A struct that implements AuthInfo should embed CommonAuthInfo by including additional +// information about the credentials in it. type AuthInfo interface { AuthType() string } @@ -79,20 +127,30 @@ var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gR // TransportCredentials defines the common interface for all the live gRPC wire // protocols and supported transport security protocols (e.g., TLS, SSL). type TransportCredentials interface { - // ClientHandshake does the authentication handshake specified by the corresponding - // authentication protocol on rawConn for clients. It returns the authenticated - // connection and the corresponding auth information about the connection. - // Implementations must use the provided context to implement timely cancellation. - // gRPC will try to reconnect if the error returned is a temporary error - // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). - // If the returned error is a wrapper error, implementations should make sure that + // ClientHandshake does the authentication handshake specified by the + // corresponding authentication protocol on rawConn for clients. It returns + // the authenticated connection and the corresponding auth information + // about the connection. The auth information should embed CommonAuthInfo + // to return additional information about the credentials. Implementations + // must use the provided context to implement timely cancellation. gRPC + // will try to reconnect if the error returned is a temporary error + // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). If the + // returned error is a wrapper error, implementations should make sure that // the error implements Temporary() to have the correct retry behaviors. + // Additionally, ClientHandshakeInfo data will be available via the context + // passed to this call. + // + // The second argument to this method is the `:authority` header value used + // while creating new streams on this connection after authentication + // succeeds. Implementations must use this as the server name during the + // authentication handshake. // // If the returned net.Conn is closed, it MUST close the net.Conn provided. ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) // ServerHandshake does the authentication handshake for servers. It returns // the authenticated connection and the corresponding auth information about - // the connection. + // the connection. The auth information should embed CommonAuthInfo to return additional information + // about the credentials. // // If the returned net.Conn is closed, it MUST close the net.Conn provided. ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) @@ -100,9 +158,13 @@ type TransportCredentials interface { Info() ProtocolInfo // Clone makes a copy of this TransportCredentials. Clone() TransportCredentials - // OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server. - // gRPC internals also use it to override the virtual hosting name if it is set. - // It must be called before dialing. Currently, this is only used by grpclb. + // OverrideServerName specifies the value used for the following: + // - verifying the hostname on the returned certificates + // - as SNI in the client's handshake to support virtual hosting + // - as the value for `:authority` header at stream creation time + // + // Deprecated: use grpc.WithAuthority instead. Will be supported + // throughout 1.x. OverrideServerName(string) error } @@ -116,8 +178,18 @@ type TransportCredentials interface { // // This API is experimental. type Bundle interface { + // TransportCredentials returns the transport credentials from the Bundle. + // + // Implementations must return non-nil transport credentials. If transport + // security is not needed by the Bundle, implementations may choose to + // return insecure.NewCredentials(). TransportCredentials() TransportCredentials + + // PerRPCCredentials returns the per-RPC credentials from the Bundle. + // + // May be nil if per-RPC credentials are not needed. PerRPCCredentials() PerRPCCredentials + // NewWithMode should make a copy of Bundle, and switch mode. Modifying the // existing Bundle may cause races. // @@ -125,145 +197,74 @@ type Bundle interface { NewWithMode(mode string) (Bundle, error) } -// TLSInfo contains the auth information for a TLS authenticated connection. -// It implements the AuthInfo interface. -type TLSInfo struct { - State tls.ConnectionState +// RequestInfo contains request data attached to the context passed to GetRequestMetadata calls. +// +// This API is experimental. +type RequestInfo struct { + // The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method") + Method string + // AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake) + AuthInfo AuthInfo } -// AuthType returns the type of TLSInfo as a string. -func (t TLSInfo) AuthType() string { - return "tls" +// RequestInfoFromContext extracts the RequestInfo from the context if it exists. +// +// This API is experimental. +func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) { + ri, ok = icredentials.RequestInfoFromContext(ctx).(RequestInfo) + return ri, ok } -// GetSecurityValue returns security info requested by channelz. -func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { - v := &TLSChannelzSecurityValue{ - StandardName: cipherSuiteLookup[t.State.CipherSuite], - } - // Currently there's no way to get LocalCertificate info from tls package. - if len(t.State.PeerCertificates) > 0 { - v.RemoteCertificate = t.State.PeerCertificates[0].Raw - } - return v +// ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes +// it possible to pass arbitrary data to the handshaker from gRPC, resolver, +// balancer etc. Individual credential implementations control the actual +// format of the data that they are willing to receive. +// +// This API is experimental. +type ClientHandshakeInfo struct { + // Attributes contains the attributes for the address. It could be provided + // by the gRPC, resolver, balancer etc. + Attributes *attributes.Attributes } -// tlsCreds is the credentials required for authenticating a connection using TLS. -type tlsCreds struct { - // TLS configuration - config *tls.Config +// ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored +// in ctx. +// +// This API is experimental. +func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo { + chi, _ := icredentials.ClientHandshakeInfoFromContext(ctx).(ClientHandshakeInfo) + return chi } -func (c tlsCreds) Info() ProtocolInfo { - return ProtocolInfo{ - SecurityProtocol: "tls", - SecurityVersion: "1.2", - ServerName: c.config.ServerName, +// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one. +// It returns success if 1) the condition is satisfied or 2) AuthInfo struct does not implement GetCommonAuthInfo() method +// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility. +// +// This API is experimental. +func CheckSecurityLevel(ai AuthInfo, level SecurityLevel) error { + type internalInfo interface { + GetCommonAuthInfo() CommonAuthInfo } -} - -func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { - // use local cfg to avoid clobbering ServerName if using multiple endpoints - cfg := cloneTLSConfig(c.config) - if cfg.ServerName == "" { - colonPos := strings.LastIndex(authority, ":") - if colonPos == -1 { - colonPos = len(authority) - } - cfg.ServerName = authority[:colonPos] + if ai == nil { + return errors.New("AuthInfo is nil") } - conn := tls.Client(rawConn, cfg) - errChannel := make(chan error, 1) - go func() { - errChannel <- conn.Handshake() - }() - select { - case err := <-errChannel: - if err != nil { - return nil, nil, err + if ci, ok := ai.(internalInfo); ok { + // CommonAuthInfo.SecurityLevel has an invalid value. + if ci.GetCommonAuthInfo().SecurityLevel == InvalidSecurityLevel { + return nil } - case <-ctx.Done(): - return nil, nil, ctx.Err() - } - return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil -} - -func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { - conn := tls.Server(rawConn, c.config) - if err := conn.Handshake(); err != nil { - return nil, nil, err - } - return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil -} - -func (c *tlsCreds) Clone() TransportCredentials { - return NewTLS(c.config) -} - -func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { - c.config.ServerName = serverNameOverride - return nil -} - -const alpnProtoStrH2 = "h2" - -func appendH2ToNextProtos(ps []string) []string { - for _, p := range ps { - if p == alpnProtoStrH2 { - return ps + if ci.GetCommonAuthInfo().SecurityLevel < level { + return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel) } } - ret := make([]string, 0, len(ps)+1) - ret = append(ret, ps...) - return append(ret, alpnProtoStrH2) -} - -// NewTLS uses c to construct a TransportCredentials based on TLS. -func NewTLS(c *tls.Config) TransportCredentials { - tc := &tlsCreds{cloneTLSConfig(c)} - tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos) - return tc -} - -// NewClientTLSFromCert constructs TLS credentials from the input certificate for client. -// serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header field) in requests. -func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { - return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) -} - -// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client. -// serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header field) in requests. -func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { - b, err := ioutil.ReadFile(certFile) - if err != nil { - return nil, err - } - cp := x509.NewCertPool() - if !cp.AppendCertsFromPEM(b) { - return nil, fmt.Errorf("credentials: failed to append certificates") - } - return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil -} - -// NewServerTLSFromCert constructs TLS credentials from the input certificate for server. -func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { - return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) -} - -// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key -// file for server. -func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { - cert, err := tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return nil, err - } - return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil + // The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method. + return nil } // ChannelzSecurityInfo defines the interface that security protocols should implement // in order to provide security info to channelz. +// +// This API is experimental. type ChannelzSecurityInfo interface { GetSecurityValue() ChannelzSecurityValue } @@ -271,66 +272,20 @@ type ChannelzSecurityInfo interface { // ChannelzSecurityValue defines the interface that GetSecurityValue() return value // should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue // and *OtherChannelzSecurityValue. +// +// This API is experimental. type ChannelzSecurityValue interface { isChannelzSecurityValue() } -// TLSChannelzSecurityValue defines the struct that TLS protocol should return -// from GetSecurityValue(), containing security info like cipher and certificate used. -type TLSChannelzSecurityValue struct { - ChannelzSecurityValue - StandardName string - LocalCertificate []byte - RemoteCertificate []byte -} - // OtherChannelzSecurityValue defines the struct that non-TLS protocol should return // from GetSecurityValue(), which contains protocol specific security info. Note // the Value field will be sent to users of channelz requesting channel info, and // thus sensitive info should better be avoided. +// +// This API is experimental. type OtherChannelzSecurityValue struct { ChannelzSecurityValue Name string Value proto.Message } - -var cipherSuiteLookup = map[uint16]string{ - tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", - tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", -} - -// cloneTLSConfig returns a shallow clone of the exported -// fields of cfg, ignoring the unexported sync.Once, which -// contains a mutex and must not be copied. -// -// If cfg is nil, a new zero tls.Config is returned. -// -// TODO: inline this function if possible. -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - if cfg == nil { - return &tls.Config{} - } - - return cfg.Clone() -} diff --git a/vendor/google.golang.org/grpc/credentials/google/google.go b/vendor/google.golang.org/grpc/credentials/google/google.go new file mode 100644 index 000000000..fbdf7dc29 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/google/google.go @@ -0,0 +1,145 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package google defines credentials for google cloud services. +package google + +import ( + "context" + "fmt" + "time" + + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/alts" + "google.golang.org/grpc/credentials/oauth" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" +) + +const tokenRequestTimeout = 30 * time.Second + +var logger = grpclog.Component("credentials") + +// DefaultCredentialsOptions constructs options to build DefaultCredentials. +type DefaultCredentialsOptions struct { + // PerRPCCreds is a per RPC credentials that is passed to a bundle. + PerRPCCreds credentials.PerRPCCredentials +} + +// NewDefaultCredentialsWithOptions returns a credentials bundle that is +// configured to work with google services. +// +// This API is experimental. +func NewDefaultCredentialsWithOptions(opts DefaultCredentialsOptions) credentials.Bundle { + if opts.PerRPCCreds == nil { + ctx, cancel := context.WithTimeout(context.Background(), tokenRequestTimeout) + defer cancel() + var err error + opts.PerRPCCreds, err = newADC(ctx) + if err != nil { + logger.Warningf("NewDefaultCredentialsWithOptions: failed to create application oauth: %v", err) + } + } + c := &creds{opts: opts} + bundle, err := c.NewWithMode(internal.CredsBundleModeFallback) + if err != nil { + logger.Warningf("NewDefaultCredentialsWithOptions: failed to create new creds: %v", err) + } + return bundle +} + +// NewDefaultCredentials returns a credentials bundle that is configured to work +// with google services. +// +// This API is experimental. +func NewDefaultCredentials() credentials.Bundle { + return NewDefaultCredentialsWithOptions(DefaultCredentialsOptions{}) +} + +// NewComputeEngineCredentials returns a credentials bundle that is configured to work +// with google services. This API must only be used when running on GCE. Authentication configured +// by this API represents the GCE VM's default service account. +// +// This API is experimental. +func NewComputeEngineCredentials() credentials.Bundle { + return NewDefaultCredentialsWithOptions(DefaultCredentialsOptions{ + PerRPCCreds: oauth.NewComputeEngine(), + }) +} + +// creds implements credentials.Bundle. +type creds struct { + opts DefaultCredentialsOptions + + // Supported modes are defined in internal/internal.go. + mode string + // The active transport credentials associated with this bundle. + transportCreds credentials.TransportCredentials + // The active per RPC credentials associated with this bundle. + perRPCCreds credentials.PerRPCCredentials +} + +func (c *creds) TransportCredentials() credentials.TransportCredentials { + return c.transportCreds +} + +func (c *creds) PerRPCCredentials() credentials.PerRPCCredentials { + if c == nil { + return nil + } + return c.perRPCCreds +} + +var ( + newTLS = func() credentials.TransportCredentials { + return credentials.NewTLS(nil) + } + newALTS = func() credentials.TransportCredentials { + return alts.NewClientCreds(alts.DefaultClientOptions()) + } + newADC = func(ctx context.Context) (credentials.PerRPCCredentials, error) { + return oauth.NewApplicationDefault(ctx) + } +) + +// NewWithMode should make a copy of Bundle, and switch mode. Modifying the +// existing Bundle may cause races. +func (c *creds) NewWithMode(mode string) (credentials.Bundle, error) { + newCreds := &creds{ + opts: c.opts, + mode: mode, + } + + // Create transport credentials. + switch mode { + case internal.CredsBundleModeFallback: + newCreds.transportCreds = newClusterTransportCreds(newTLS(), newALTS()) + case internal.CredsBundleModeBackendFromBalancer, internal.CredsBundleModeBalancer: + // Only the clients can use google default credentials, so we only need + // to create new ALTS client creds here. + newCreds.transportCreds = newALTS() + default: + return nil, fmt.Errorf("unsupported mode: %v", mode) + } + + if mode == internal.CredsBundleModeFallback || mode == internal.CredsBundleModeBackendFromBalancer { + newCreds.perRPCCreds = newCreds.opts.PerRPCCreds + } + + return newCreds, nil +} diff --git a/vendor/google.golang.org/grpc/credentials/google/xds.go b/vendor/google.golang.org/grpc/credentials/google/xds.go new file mode 100644 index 000000000..cccb22271 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/google/xds.go @@ -0,0 +1,128 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package google + +import ( + "context" + "net" + "net/url" + "strings" + + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/xds" +) + +const cfeClusterNamePrefix = "google_cfe_" +const cfeClusterResourceNamePrefix = "/envoy.config.cluster.v3.Cluster/google_cfe_" +const cfeClusterAuthorityName = "traffic-director-c2p.xds.googleapis.com" + +// clusterTransportCreds is a combo of TLS + ALTS. +// +// On the client, ClientHandshake picks TLS or ALTS based on address attributes. +// - if attributes has cluster name +// - if cluster name has prefix "google_cfe_", or +// "xdstp://traffic-director-c2p.xds.googleapis.com/envoy.config.cluster.v3.Cluster/google_cfe_", +// use TLS +// - otherwise, use ALTS +// +// - else, do TLS +// +// On the server, ServerHandshake always does TLS. +type clusterTransportCreds struct { + tls credentials.TransportCredentials + alts credentials.TransportCredentials +} + +func newClusterTransportCreds(tls, alts credentials.TransportCredentials) *clusterTransportCreds { + return &clusterTransportCreds{ + tls: tls, + alts: alts, + } +} + +// clusterName returns the xDS cluster name stored in the attributes in the +// context. +func clusterName(ctx context.Context) string { + chi := credentials.ClientHandshakeInfoFromContext(ctx) + if chi.Attributes == nil { + return "" + } + cluster, _ := xds.GetXDSHandshakeClusterName(chi.Attributes) + return cluster +} + +// isDirectPathCluster returns true if the cluster in the context is a +// directpath cluster, meaning ALTS should be used. +func isDirectPathCluster(ctx context.Context) bool { + cluster := clusterName(ctx) + if cluster == "" { + // No cluster; not xDS; use TLS. + return false + } + if strings.HasPrefix(cluster, cfeClusterNamePrefix) { + // xDS cluster prefixed by "google_cfe_"; use TLS. + return false + } + if !strings.HasPrefix(cluster, "xdstp:") { + // Other xDS cluster name; use ALTS. + return true + } + u, err := url.Parse(cluster) + if err != nil { + // Shouldn't happen, but assume ALTS. + return true + } + // If authority AND path match our CFE checks, use TLS; otherwise use ALTS. + return u.Host != cfeClusterAuthorityName || !strings.HasPrefix(u.Path, cfeClusterResourceNamePrefix) +} + +func (c *clusterTransportCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if isDirectPathCluster(ctx) { + // If attributes have cluster name, and cluster name is not cfe, it's a + // backend address, use ALTS. + return c.alts.ClientHandshake(ctx, authority, rawConn) + } + return c.tls.ClientHandshake(ctx, authority, rawConn) +} + +func (c *clusterTransportCreds) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return c.tls.ServerHandshake(conn) +} + +func (c *clusterTransportCreds) Info() credentials.ProtocolInfo { + // TODO: this always returns tls.Info now, because we don't have a cluster + // name to check when this method is called. This method doesn't affect + // anything important now. We may want to revisit this if it becomes more + // important later. + return c.tls.Info() +} + +func (c *clusterTransportCreds) Clone() credentials.TransportCredentials { + return &clusterTransportCreds{ + tls: c.tls.Clone(), + alts: c.alts.Clone(), + } +} + +func (c *clusterTransportCreds) OverrideServerName(s string) error { + if err := c.tls.OverrideServerName(s); err != nil { + return err + } + return c.alts.OverrideServerName(s) +} diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go new file mode 100644 index 000000000..4c805c644 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -0,0 +1,98 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package insecure provides an implementation of the +// credentials.TransportCredentials interface which disables transport security. +package insecure + +import ( + "context" + "net" + + "google.golang.org/grpc/credentials" +) + +// NewCredentials returns a credentials which disables transport security. +// +// Note that using this credentials with per-RPC credentials which require +// transport security is incompatible and will cause grpc.Dial() to fail. +func NewCredentials() credentials.TransportCredentials { + return insecureTC{} +} + +// insecureTC implements the insecure transport credentials. The handshake +// methods simply return the passed in net.Conn and set the security level to +// NoSecurity. +type insecureTC struct{} + +func (insecureTC) ClientHandshake(_ context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil +} + +func (insecureTC) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil +} + +func (insecureTC) Info() credentials.ProtocolInfo { + return credentials.ProtocolInfo{SecurityProtocol: "insecure"} +} + +func (insecureTC) Clone() credentials.TransportCredentials { + return insecureTC{} +} + +func (insecureTC) OverrideServerName(string) error { + return nil +} + +// info contains the auth information for an insecure connection. +// It implements the AuthInfo interface. +type info struct { + credentials.CommonAuthInfo +} + +// AuthType returns the type of info as a string. +func (info) AuthType() string { + return "insecure" +} + +// insecureBundle implements an insecure bundle. +// An insecure bundle provides a thin wrapper around insecureTC to support +// the credentials.Bundle interface. +type insecureBundle struct{} + +// NewBundle returns a bundle with disabled transport security and no per rpc credential. +func NewBundle() credentials.Bundle { + return insecureBundle{} +} + +// NewWithMode returns a new insecure Bundle. The mode is ignored. +func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) { + return insecureBundle{}, nil +} + +// PerRPCCredentials returns an nil implementation as insecure +// bundle does not support a per rpc credential. +func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials { + return nil +} + +// TransportCredentials returns the underlying insecure transport credential. +func (insecureBundle) TransportCredentials() credentials.TransportCredentials { + return NewCredentials() +} diff --git a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go new file mode 100644 index 000000000..328b838ed --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go @@ -0,0 +1,244 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package oauth implements gRPC credentials using OAuth. +package oauth + +import ( + "context" + "fmt" + "net/url" + "os" + "sync" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" + "google.golang.org/grpc/credentials" +) + +// TokenSource supplies PerRPCCredentials from an oauth2.TokenSource. +type TokenSource struct { + oauth2.TokenSource +} + +// GetRequestMetadata gets the request metadata as a map from a TokenSource. +func (ts TokenSource) GetRequestMetadata(ctx context.Context, _ ...string) (map[string]string, error) { + token, err := ts.Token() + if err != nil { + return nil, err + } + ri, _ := credentials.RequestInfoFromContext(ctx) + if err = credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil { + return nil, fmt.Errorf("unable to transfer TokenSource PerRPCCredentials: %v", err) + } + return map[string]string{ + "authorization": token.Type() + " " + token.AccessToken, + }, nil +} + +// RequireTransportSecurity indicates whether the credentials requires transport security. +func (ts TokenSource) RequireTransportSecurity() bool { + return true +} + +// removeServiceNameFromJWTURI removes RPC service name from URI. +func removeServiceNameFromJWTURI(uri string) (string, error) { + parsed, err := url.Parse(uri) + if err != nil { + return "", err + } + parsed.Path = "/" + return parsed.String(), nil +} + +type jwtAccess struct { + jsonKey []byte +} + +// NewJWTAccessFromFile creates PerRPCCredentials from the given keyFile. +func NewJWTAccessFromFile(keyFile string) (credentials.PerRPCCredentials, error) { + jsonKey, err := os.ReadFile(keyFile) + if err != nil { + return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) + } + return NewJWTAccessFromKey(jsonKey) +} + +// NewJWTAccessFromKey creates PerRPCCredentials from the given jsonKey. +func NewJWTAccessFromKey(jsonKey []byte) (credentials.PerRPCCredentials, error) { + return jwtAccess{jsonKey}, nil +} + +func (j jwtAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + // Remove RPC service name from URI that will be used as audience + // in a self-signed JWT token. It follows https://google.aip.dev/auth/4111. + aud, err := removeServiceNameFromJWTURI(uri[0]) + if err != nil { + return nil, err + } + // TODO: the returned TokenSource is reusable. Store it in a sync.Map, with + // uri as the key, to avoid recreating for every RPC. + ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, aud) + if err != nil { + return nil, err + } + token, err := ts.Token() + if err != nil { + return nil, err + } + ri, _ := credentials.RequestInfoFromContext(ctx) + if err = credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil { + return nil, fmt.Errorf("unable to transfer jwtAccess PerRPCCredentials: %v", err) + } + return map[string]string{ + "authorization": token.Type() + " " + token.AccessToken, + }, nil +} + +func (j jwtAccess) RequireTransportSecurity() bool { + return true +} + +// oauthAccess supplies PerRPCCredentials from a given token. +type oauthAccess struct { + token oauth2.Token +} + +// NewOauthAccess constructs the PerRPCCredentials using a given token. +// +// Deprecated: use oauth.TokenSource instead. +func NewOauthAccess(token *oauth2.Token) credentials.PerRPCCredentials { + return oauthAccess{token: *token} +} + +func (oa oauthAccess) GetRequestMetadata(ctx context.Context, _ ...string) (map[string]string, error) { + ri, _ := credentials.RequestInfoFromContext(ctx) + if err := credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil { + return nil, fmt.Errorf("unable to transfer oauthAccess PerRPCCredentials: %v", err) + } + return map[string]string{ + "authorization": oa.token.Type() + " " + oa.token.AccessToken, + }, nil +} + +func (oa oauthAccess) RequireTransportSecurity() bool { + return true +} + +// NewComputeEngine constructs the PerRPCCredentials that fetches access tokens from +// Google Compute Engine (GCE)'s metadata server. It is only valid to use this +// if your program is running on a GCE instance. +// TODO(dsymonds): Deprecate and remove this. +func NewComputeEngine() credentials.PerRPCCredentials { + return TokenSource{google.ComputeTokenSource("")} +} + +// serviceAccount represents PerRPCCredentials via JWT signing key. +type serviceAccount struct { + mu sync.Mutex + config *jwt.Config + t *oauth2.Token +} + +func (s *serviceAccount) GetRequestMetadata(ctx context.Context, _ ...string) (map[string]string, error) { + s.mu.Lock() + defer s.mu.Unlock() + if !s.t.Valid() { + var err error + s.t, err = s.config.TokenSource(ctx).Token() + if err != nil { + return nil, err + } + } + ri, _ := credentials.RequestInfoFromContext(ctx) + if err := credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil { + return nil, fmt.Errorf("unable to transfer serviceAccount PerRPCCredentials: %v", err) + } + return map[string]string{ + "authorization": s.t.Type() + " " + s.t.AccessToken, + }, nil +} + +func (s *serviceAccount) RequireTransportSecurity() bool { + return true +} + +// NewServiceAccountFromKey constructs the PerRPCCredentials using the JSON key slice +// from a Google Developers service account. +func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.PerRPCCredentials, error) { + config, err := google.JWTConfigFromJSON(jsonKey, scope...) + if err != nil { + return nil, err + } + return &serviceAccount{config: config}, nil +} + +// NewServiceAccountFromFile constructs the PerRPCCredentials using the JSON key file +// of a Google Developers service account. +func NewServiceAccountFromFile(keyFile string, scope ...string) (credentials.PerRPCCredentials, error) { + jsonKey, err := os.ReadFile(keyFile) + if err != nil { + return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) + } + return NewServiceAccountFromKey(jsonKey, scope...) +} + +// NewApplicationDefault returns "Application Default Credentials". For more +// detail, see https://developers.google.com/accounts/docs/application-default-credentials. +func NewApplicationDefault(ctx context.Context, scope ...string) (credentials.PerRPCCredentials, error) { + creds, err := google.FindDefaultCredentials(ctx, scope...) + if err != nil { + return nil, err + } + + // If JSON is nil, the authentication is provided by the environment and not + // with a credentials file, e.g. when code is running on Google Cloud + // Platform. Use the returned token source. + if creds.JSON == nil { + return TokenSource{creds.TokenSource}, nil + } + + // If auth is provided by env variable or creds file, the behavior will be + // different based on whether scope is set. Because the returned + // creds.TokenSource does oauth with jwt by default, and it requires scope. + // We can only use it if scope is not empty, otherwise it will fail with + // missing scope error. + // + // If scope is set, use it, it should just work. + // + // If scope is not set, we try to use jwt directly without oauth (this only + // works if it's a service account). + + if len(scope) != 0 { + return TokenSource{creds.TokenSource}, nil + } + + // Try to convert JSON to a jwt config without setting the optional scope + // parameter to check if it's a service account (the function errors if it's + // not). This is necessary because the returned config doesn't show the type + // of the account. + if _, err := google.JWTConfigFromJSON(creds.JSON); err != nil { + // If this fails, it's not a service account, return the original + // TokenSource from above. + return TokenSource{creds.TokenSource}, nil + } + + // If it's a service account, create a JWT only access with the key. + return NewJWTAccessFromKey(creds.JSON) +} diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go new file mode 100644 index 000000000..411435854 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -0,0 +1,283 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "net" + "net/url" + "os" + + "google.golang.org/grpc/grpclog" + credinternal "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/envconfig" +) + +var logger = grpclog.Component("credentials") + +// TLSInfo contains the auth information for a TLS authenticated connection. +// It implements the AuthInfo interface. +type TLSInfo struct { + State tls.ConnectionState + CommonAuthInfo + // This API is experimental. + SPIFFEID *url.URL +} + +// AuthType returns the type of TLSInfo as a string. +func (t TLSInfo) AuthType() string { + return "tls" +} + +// cipherSuiteLookup returns the string version of a TLS cipher suite ID. +func cipherSuiteLookup(cipherSuiteID uint16) string { + for _, s := range tls.CipherSuites() { + if s.ID == cipherSuiteID { + return s.Name + } + } + for _, s := range tls.InsecureCipherSuites() { + if s.ID == cipherSuiteID { + return s.Name + } + } + return fmt.Sprintf("unknown ID: %v", cipherSuiteID) +} + +// GetSecurityValue returns security info requested by channelz. +func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { + v := &TLSChannelzSecurityValue{ + StandardName: cipherSuiteLookup(t.State.CipherSuite), + } + // Currently there's no way to get LocalCertificate info from tls package. + if len(t.State.PeerCertificates) > 0 { + v.RemoteCertificate = t.State.PeerCertificates[0].Raw + } + return v +} + +// tlsCreds is the credentials required for authenticating a connection using TLS. +type tlsCreds struct { + // TLS configuration + config *tls.Config +} + +func (c tlsCreds) Info() ProtocolInfo { + return ProtocolInfo{ + SecurityProtocol: "tls", + SecurityVersion: "1.2", + ServerName: c.config.ServerName, + } +} + +func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { + // use local cfg to avoid clobbering ServerName if using multiple endpoints + cfg := credinternal.CloneTLSConfig(c.config) + if cfg.ServerName == "" { + serverName, _, err := net.SplitHostPort(authority) + if err != nil { + // If the authority had no host port or if the authority cannot be parsed, use it as-is. + serverName = authority + } + cfg.ServerName = serverName + } + conn := tls.Client(rawConn, cfg) + errChannel := make(chan error, 1) + go func() { + errChannel <- conn.Handshake() + close(errChannel) + }() + select { + case err := <-errChannel: + if err != nil { + conn.Close() + return nil, nil, err + } + case <-ctx.Done(): + conn.Close() + return nil, nil, ctx.Err() + } + + // The negotiated protocol can be either of the following: + // 1. h2: When the server supports ALPN. Only HTTP/2 can be negotiated since + // it is the only protocol advertised by the client during the handshake. + // The tls library ensures that the server chooses a protocol advertised + // by the client. + // 2. "" (empty string): If the server doesn't support ALPN. ALPN is a requirement + // for using HTTP/2 over TLS. We can terminate the connection immediately. + np := conn.ConnectionState().NegotiatedProtocol + if np == "" { + if envconfig.EnforceALPNEnabled { + conn.Close() + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + } + logger.Warningf("Allowing TLS connection to server %q with ALPN disabled. TLS connections to servers with ALPN disabled will be disallowed in future grpc-go releases", cfg.ServerName) + } + tlsInfo := TLSInfo{ + State: conn.ConnectionState(), + CommonAuthInfo: CommonAuthInfo{ + SecurityLevel: PrivacyAndIntegrity, + }, + } + id := credinternal.SPIFFEIDFromState(conn.ConnectionState()) + if id != nil { + tlsInfo.SPIFFEID = id + } + return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil +} + +func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { + conn := tls.Server(rawConn, c.config) + if err := conn.Handshake(); err != nil { + conn.Close() + return nil, nil, err + } + cs := conn.ConnectionState() + // The negotiated application protocol can be empty only if the client doesn't + // support ALPN. In such cases, we can close the connection since ALPN is required + // for using HTTP/2 over TLS. + if cs.NegotiatedProtocol == "" { + if envconfig.EnforceALPNEnabled { + conn.Close() + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + } else if logger.V(2) { + logger.Info("Allowing TLS connection from client with ALPN disabled. TLS connections with ALPN disabled will be disallowed in future grpc-go releases") + } + } + tlsInfo := TLSInfo{ + State: cs, + CommonAuthInfo: CommonAuthInfo{ + SecurityLevel: PrivacyAndIntegrity, + }, + } + id := credinternal.SPIFFEIDFromState(conn.ConnectionState()) + if id != nil { + tlsInfo.SPIFFEID = id + } + return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil +} + +func (c *tlsCreds) Clone() TransportCredentials { + return NewTLS(c.config) +} + +func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { + c.config.ServerName = serverNameOverride + return nil +} + +// The following cipher suites are forbidden for use with HTTP/2 by +// https://datatracker.ietf.org/doc/html/rfc7540#appendix-A +var tls12ForbiddenCipherSuites = map[uint16]struct{}{ + tls.TLS_RSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_RSA_WITH_AES_256_CBC_SHA: {}, + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: {}, + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: {}, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: {}, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: {}, +} + +// NewTLS uses c to construct a TransportCredentials based on TLS. +func NewTLS(c *tls.Config) TransportCredentials { + tc := &tlsCreds{credinternal.CloneTLSConfig(c)} + tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) + // If the user did not configure a MinVersion and did not configure a + // MaxVersion < 1.2, use MinVersion=1.2, which is required by + // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2 + if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) { + tc.config.MinVersion = tls.VersionTLS12 + } + // If the user did not configure CipherSuites, use all "secure" cipher + // suites reported by the TLS package, but remove some explicitly forbidden + // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A + if tc.config.CipherSuites == nil { + for _, cs := range tls.CipherSuites() { + if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok { + tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID) + } + } + } + return tc +} + +// NewClientTLSFromCert constructs TLS credentials from the provided root +// certificate authority certificate(s) to validate server connections. If +// certificates to establish the identity of the client need to be included in +// the credentials (eg: for mTLS), use NewTLS instead, where a complete +// tls.Config can be specified. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header +// field) in requests. +func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) +} + +// NewClientTLSFromFile constructs TLS credentials from the provided root +// certificate authority certificate file(s) to validate server connections. If +// certificates to establish the identity of the client need to be included in +// the credentials (eg: for mTLS), use NewTLS instead, where a complete +// tls.Config can be specified. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header +// field) in requests. +func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { + b, err := os.ReadFile(certFile) + if err != nil { + return nil, err + } + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM(b) { + return nil, fmt.Errorf("credentials: failed to append certificates") + } + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil +} + +// NewServerTLSFromCert constructs TLS credentials from the input certificate for server. +func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { + return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) +} + +// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key +// file for server. +func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, err + } + return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil +} + +// TLSChannelzSecurityValue defines the struct that TLS protocol should return +// from GetSecurityValue(), containing security info like cipher and certificate used. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type TLSChannelzSecurityValue struct { + ChannelzSecurityValue + StandardName string + LocalCertificate []byte + RemoteCertificate []byte +} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index e8f34d0d6..2b285beee 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -20,22 +20,50 @@ package grpc import ( "context" - "fmt" "net" + "net/url" "time" - "google.golang.org/grpc/balancer" + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/envconfig" + internalbackoff "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" ) +const ( + // https://github.com/grpc/proposal/blob/master/A6-client-retries.md#limits-on-retries-and-hedges + defaultMaxCallAttempts = 5 +) + +func init() { + internal.AddGlobalDialOptions = func(opt ...DialOption) { + globalDialOptions = append(globalDialOptions, opt...) + } + internal.ClearGlobalDialOptions = func() { + globalDialOptions = nil + } + internal.AddGlobalPerTargetDialOptions = func(opt any) { + if ptdo, ok := opt.(perTargetDialOption); ok { + globalPerTargetDialOptions = append(globalPerTargetDialOptions, ptdo) + } + } + internal.ClearGlobalPerTargetDialOptions = func() { + globalPerTargetDialOptions = nil + } + internal.WithBinaryLogger = withBinaryLogger + internal.JoinDialOptions = newJoinDialOption + internal.DisableGlobalDialOptions = newDisableGlobalDialOptions + internal.WithBufferPool = withBufferPool +} + // dialOptions configure a Dial call. dialOptions are set by the DialOption // values passed to Dial. type dialOptions struct { @@ -45,22 +73,17 @@ type dialOptions struct { chainUnaryInts []UnaryClientInterceptor chainStreamInts []StreamClientInterceptor - cp Compressor - dc Decompressor - bs backoff.Strategy - block bool - insecure bool - timeout time.Duration - scChan <-chan ServiceConfig - authority string - copts transport.ConnectOptions - callOptions []CallOption - // This is used by v1 balancer dial option WithBalancer to support v1 - // balancer, and also by WithBalancerName dial option. - balancerBuilder balancer.Builder - // This is to support grpclb. - resolverBuilder resolver.Builder - channelzParentID int64 + cp Compressor + dc Decompressor + bs internalbackoff.Strategy + block bool + returnLastError bool + timeout time.Duration + authority string + binaryLogger binarylog.Logger + copts transport.ConnectOptions + callOptions []CallOption + channelzParent channelz.Identifier disableServiceConfig bool disableRetry bool disableHealthCheck bool @@ -68,6 +91,10 @@ type dialOptions struct { minConnectTimeout func() time.Duration defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string + resolvers []resolver.Builder + idleTimeout time.Duration + defaultScheme string + maxCallAttempts int } // DialOption configures how we set up the connection. @@ -75,14 +102,42 @@ type DialOption interface { apply(*dialOptions) } +var globalDialOptions []DialOption + +// perTargetDialOption takes a parsed target and returns a dial option to apply. +// +// This gets called after NewClient() parses the target, and allows per target +// configuration set through a returned DialOption. The DialOption will not take +// effect if specifies a resolver builder, as that Dial Option is factored in +// while parsing target. +type perTargetDialOption interface { + // DialOption returns a Dial Option to apply. + DialOptionForTarget(parsedTarget url.URL) DialOption +} + +var globalPerTargetDialOptions []perTargetDialOption + // EmptyDialOption does not alter the dial configuration. It can be embedded in // another structure to build custom dial options. // -// This API is EXPERIMENTAL. +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type EmptyDialOption struct{} func (EmptyDialOption) apply(*dialOptions) {} +type disableGlobalDialOptions struct{} + +func (disableGlobalDialOptions) apply(*dialOptions) {} + +// newDisableGlobalDialOptions returns a DialOption that prevents the ClientConn +// from applying the global DialOptions (set via AddGlobalDialOptions). +func newDisableGlobalDialOptions() DialOption { + return &disableGlobalDialOptions{} +} + // funcDialOption wraps a function that modifies dialOptions into an // implementation of the DialOption interface. type funcDialOption struct { @@ -99,13 +154,40 @@ func newFuncDialOption(f func(*dialOptions)) *funcDialOption { } } +type joinDialOption struct { + opts []DialOption +} + +func (jdo *joinDialOption) apply(do *dialOptions) { + for _, opt := range jdo.opts { + opt.apply(do) + } +} + +func newJoinDialOption(opts ...DialOption) DialOption { + return &joinDialOption{opts: opts} +} + +// WithSharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithSharedWriteBuffer(val bool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.SharedWriteBuffer = val + }) +} + // WithWriteBufferSize determines how much data can be batched before doing a -// write on the wire. The corresponding memory allocation for this buffer will -// be twice the size to keep syscalls low. The default value for this buffer is -// 32KB. +// write on the wire. The default value for this buffer is 32KB. // -// Zero will disable the write buffer such that each write will be on underlying -// connection. Note: A Send call may not directly translate to a write. +// Zero or negative values will disable the write buffer such that each write +// will be on underlying connection. Note: A Send call may not directly +// translate to a write. func WithWriteBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.WriteBufferSize = s @@ -115,8 +197,9 @@ func WithWriteBufferSize(s int) DialOption { // WithReadBufferSize lets you set the size of read buffer, this determines how // much data can be read at most for each read syscall. // -// The default value for this buffer is 32KB. Zero will disable read buffer for -// a connection so data framer can access the underlying conn directly. +// The default value for this buffer is 32KB. Zero or negative values will +// disable read buffer for a connection so data framer can access the +// underlying conn directly. func WithReadBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.ReadBufferSize = s @@ -194,60 +277,27 @@ func WithDecompressor(dc Decompressor) DialOption { }) } -// WithBalancer returns a DialOption which sets a load balancer with the v1 API. -// Name resolver will be ignored if this DialOption is specified. +// WithConnectParams configures the ClientConn to use the provided ConnectParams +// for creating and maintaining connections to servers. // -// Deprecated: use the new balancer APIs in balancer package and -// WithBalancerName. Will be removed in a future 1.x release. -func WithBalancer(b Balancer) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.balancerBuilder = &balancerWrapperBuilder{ - b: b, +// The backoff configuration specified as part of the ConnectParams overrides +// all defaults specified in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider +// using the backoff.DefaultConfig as a base, in cases where you want to +// override only a subset of the backoff configuration. +func WithConnectParams(p ConnectParams) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.bs = internalbackoff.Exponential{Config: p.Backoff} + o.minConnectTimeout = func() time.Duration { + return p.MinConnectTimeout } }) } -// WithBalancerName sets the balancer that the ClientConn will be initialized -// with. Balancer registered with balancerName will be used. This function -// panics if no balancer was registered by balancerName. -// -// The balancer cannot be overridden by balancer option specified by service -// config. -// -// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig -// instead. Will be removed in a future 1.x release. -func WithBalancerName(balancerName string) DialOption { - builder := balancer.Get(balancerName) - if builder == nil { - panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) - } - return newFuncDialOption(func(o *dialOptions) { - o.balancerBuilder = builder - }) -} - -// withResolverBuilder is only for grpclb. -func withResolverBuilder(b resolver.Builder) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.resolverBuilder = b - }) -} - -// WithServiceConfig returns a DialOption which has a channel to read the -// service configuration. -// -// Deprecated: service config should be received through name resolver or via -// WithDefaultServiceConfig, as specified at -// https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be -// removed in a future 1.x release. -func WithServiceConfig(c <-chan ServiceConfig) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.scChan = c - }) -} - // WithBackoffMaxDelay configures the dialer to use the provided maximum delay // when backing off after failed connection attempts. +// +// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. func WithBackoffMaxDelay(md time.Duration) DialOption { return WithBackoffConfig(BackoffConfig{MaxDelay: md}) } @@ -255,39 +305,80 @@ func WithBackoffMaxDelay(md time.Duration) DialOption { // WithBackoffConfig configures the dialer to use the provided backoff // parameters after connection failures. // -// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up -// for use. +// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. func WithBackoffConfig(b BackoffConfig) DialOption { - return withBackoff(backoff.Exponential{ - MaxDelay: b.MaxDelay, - }) + bc := backoff.DefaultConfig + bc.MaxDelay = b.MaxDelay + return withBackoff(internalbackoff.Exponential{Config: bc}) } // withBackoff sets the backoff strategy used for connectRetryNum after a failed // connection attempt. // // This can be exported if arbitrary backoff strategies are allowed by gRPC. -func withBackoff(bs backoff.Strategy) DialOption { +func withBackoff(bs internalbackoff.Strategy) DialOption { return newFuncDialOption(func(o *dialOptions) { o.bs = bs }) } -// WithBlock returns a DialOption which makes caller of Dial blocks until the +// WithBlock returns a DialOption which makes callers of Dial block until the // underlying connection is up. Without this, Dial returns immediately and // connecting the server happens in background. +// +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// +// Deprecated: this DialOption is not supported by NewClient. +// Will be supported throughout 1.x. func WithBlock() DialOption { return newFuncDialOption(func(o *dialOptions) { o.block = true }) } +// WithReturnConnectionError returns a DialOption which makes the client connection +// return a string containing both the last connection error that occurred and +// the context.DeadlineExceeded error. +// Implies WithBlock() +// +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// +// Deprecated: this DialOption is not supported by NewClient. +// Will be supported throughout 1.x. +func WithReturnConnectionError() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.block = true + o.returnLastError = true + }) +} + // WithInsecure returns a DialOption which disables transport security for this -// ClientConn. Note that transport security is required unless WithInsecure is -// set. +// ClientConn. Under the hood, it uses insecure.NewCredentials(). +// +// Note that using this DialOption with per-RPC credentials (through +// WithCredentialsBundle or WithPerRPCCredentials) which require transport +// security is incompatible and will cause grpc.Dial() to fail. +// +// Deprecated: use WithTransportCredentials and insecure.NewCredentials() +// instead. Will be supported throughout 1.x. func WithInsecure() DialOption { return newFuncDialOption(func(o *dialOptions) { - o.insecure = true + o.copts.TransportCredentials = insecure.NewCredentials() + }) +} + +// WithNoProxy returns a DialOption which disables the use of proxies for this +// ClientConn. This is ignored if WithDialer or WithContextDialer are used. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithNoProxy() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.UseProxy = false }) } @@ -312,7 +403,10 @@ func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { // the ClientConn.WithCreds. This should not be used together with // WithTransportCredentials. // -// This API is experimental. +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func WithCredentialsBundle(b credentials.Bundle) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.CredsBundle = b @@ -322,8 +416,8 @@ func WithCredentialsBundle(b credentials.Bundle) DialOption { // WithTimeout returns a DialOption that configures a timeout for dialing a // ClientConn initially. This is valid if and only if WithBlock() is present. // -// Deprecated: use DialContext and context.WithTimeout instead. Will be -// supported throughout 1.x. +// Deprecated: this DialOption is not supported by NewClient. +// Will be supported throughout 1.x. func WithTimeout(d time.Duration) DialOption { return newFuncDialOption(func(o *dialOptions) { o.timeout = d @@ -334,6 +428,17 @@ func WithTimeout(d time.Duration) DialOption { // connections. If FailOnNonTempDialError() is set to true, and an error is // returned by f, gRPC checks the error's Temporary() method to decide if it // should try to reconnect to the network address. +// +// Note: All supported releases of Go (as of December 2023) override the OS +// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive +// with OS defaults for keepalive time and interval, use a net.Dialer that sets +// the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket +// option to true from the Control field. For a concrete example of how to do +// this, see internal.NetDialerWithTCPKeepalive(). +// +// For more information, please see [issue 23459] in the Go github repo. +// +// [issue 23459]: https://github.com/golang/go/issues/23459 func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.Dialer = f @@ -341,7 +446,6 @@ func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOp } func init() { - internal.WithResolverBuilder = withResolverBuilder internal.WithHealthCheckFunc = withHealthCheckFunc } @@ -366,7 +470,21 @@ func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { // all the RPCs and underlying network connections in this ClientConn. func WithStatsHandler(h stats.Handler) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.copts.StatsHandler = h + if h == nil { + logger.Error("ignoring nil parameter in grpc.WithStatsHandler ClientOption") + // Do not allow a nil stats handler, which would otherwise cause + // panics. + return + } + o.copts.StatsHandlers = append(o.copts.StatsHandlers, h) + }) +} + +// withBinaryLogger returns a DialOption that specifies the binary logger for +// this ClientConn. +func withBinaryLogger(bl binarylog.Logger) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.binaryLogger = bl }) } @@ -378,7 +496,12 @@ func WithStatsHandler(h stats.Handler) DialOption { // FailOnNonTempDialError only affects the initial dial, and does not do // anything useful unless you are also using WithBlock(). // -// This is an EXPERIMENTAL API. +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// +// Deprecated: this DialOption is not supported by NewClient. +// This API may be changed or removed in a +// later release. func FailOnNonTempDialError(f bool) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.FailOnNonTempDialError = f @@ -389,15 +512,17 @@ func FailOnNonTempDialError(f bool) DialOption { // the RPCs. func WithUserAgent(s string) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.copts.UserAgent = s + o.copts.UserAgent = s + " " + grpcUA }) } // WithKeepaliveParams returns a DialOption that specifies keepalive parameters // for the client transport. +// +// Keepalive is disabled by default. func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { if kp.Time < internal.KeepaliveMinPingTime { - grpclog.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) + logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) kp.Time = internal.KeepaliveMinPingTime } return newFuncDialOption(func(o *dialOptions) { @@ -433,7 +558,7 @@ func WithStreamInterceptor(f StreamClientInterceptor) DialOption { } // WithChainStreamInterceptor returns a DialOption that specifies the chained -// interceptor for unary RPCs. The first interceptor will be the outer most, +// interceptor for streaming RPCs. The first interceptor will be the outer most, // while the last interceptor will be the inner most wrapper around the real call. // All interceptors added by this method will be chained, and the interceptor // defined by WithStreamInterceptor will always be prepended to the chain. @@ -444,8 +569,7 @@ func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOpt } // WithAuthority returns a DialOption that specifies the value to be used as the -// :authority pseudo-header. This value only works with WithInsecure and has no -// effect if TransportCredentials are present. +// :authority pseudo-header and as the server name in authentication handshake. func WithAuthority(a string) DialOption { return newFuncDialOption(func(o *dialOptions) { o.authority = a @@ -455,9 +579,14 @@ func WithAuthority(a string) DialOption { // WithChannelzParentID returns a DialOption that specifies the channelz ID of // current ClientConn's parent. This function is used in nested channel creation // (e.g. grpclb dial). -func WithChannelzParentID(id int64) DialOption { +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithChannelzParentID(c channelz.Identifier) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.channelzParentID = id + o.channelzParent = c }) } @@ -476,11 +605,16 @@ func WithDisableServiceConfig() DialOption { // WithDefaultServiceConfig returns a DialOption that configures the default // service config, which will be used in cases where: // -// 1. WithDisableServiceConfig is also used. -// 2. Resolver does not return a service config or if the resolver returns an -// invalid service config. +// 1. WithDisableServiceConfig is also used, or // -// This API is EXPERIMENTAL. +// 2. The name resolver does not provide a service config or provides an +// invalid service config. +// +// The parameter s is the JSON representation of the default service config. +// For more information about service configs, see: +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +// For a simple example of usage, see: +// examples/features/load_balancing/client/main.go func WithDefaultServiceConfig(s string) DialOption { return newFuncDialOption(func(o *dialOptions) { o.defaultServiceConfigRawJSON = &s @@ -491,30 +625,37 @@ func WithDefaultServiceConfig(s string) DialOption { // service config enables them. This does not impact transparent retries, which // will happen automatically if no data is written to the wire or if the RPC is // unprocessed by the remote server. -// -// Retry support is currently disabled by default, but will be enabled by -// default in the future. Until then, it may be enabled by setting the -// environment variable "GRPC_GO_RETRY" to "on". -// -// This API is EXPERIMENTAL. func WithDisableRetry() DialOption { return newFuncDialOption(func(o *dialOptions) { o.disableRetry = true }) } +// MaxHeaderListSizeDialOption is a DialOption that specifies the maximum +// (uncompressed) size of header list that the client is prepared to accept. +type MaxHeaderListSizeDialOption struct { + MaxHeaderListSize uint32 +} + +func (o MaxHeaderListSizeDialOption) apply(do *dialOptions) { + do.copts.MaxHeaderListSize = &o.MaxHeaderListSize +} + // WithMaxHeaderListSize returns a DialOption that specifies the maximum // (uncompressed) size of header list that the client is prepared to accept. func WithMaxHeaderListSize(s uint32) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.MaxHeaderListSize = &s - }) + return MaxHeaderListSizeDialOption{ + MaxHeaderListSize: s, + } } // WithDisableHealthCheck disables the LB channel health checking for all // SubConns of this ClientConn. // -// This API is EXPERIMENTAL. +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func WithDisableHealthCheck() DialOption { return newFuncDialOption(func(o *dialOptions) { o.disableHealthCheck = true @@ -533,16 +674,22 @@ func withHealthCheckFunc(f internal.HealthChecker) DialOption { func defaultDialOptions() dialOptions { return dialOptions{ - disableRetry: !envconfig.Retry, - healthCheckFunc: internal.HealthCheckFunc, copts: transport.ConnectOptions{ - WriteBufferSize: defaultWriteBufSize, ReadBufferSize: defaultReadBufSize, + WriteBufferSize: defaultWriteBufSize, + UseProxy: true, + UserAgent: grpcUA, + BufferPool: mem.DefaultBufferPool(), }, + bs: internalbackoff.DefaultExponential, + healthCheckFunc: internal.HealthCheckFunc, + idleTimeout: 30 * time.Minute, + defaultScheme: "dns", + maxCallAttempts: defaultMaxCallAttempts, } } -// withGetMinConnectDeadline specifies the function that clientconn uses to +// withMinConnectDeadline specifies the function that clientconn uses to // get minConnectDeadline. This can be used to make connection attempts happen // faster/slower. // @@ -552,3 +699,69 @@ func withMinConnectDeadline(f func() time.Duration) DialOption { o.minConnectTimeout = f }) } + +// withDefaultScheme is used to allow Dial to use "passthrough" as the default +// name resolver, while NewClient uses "dns" otherwise. +func withDefaultScheme(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.defaultScheme = s + }) +} + +// WithResolvers allows a list of resolver implementations to be registered +// locally with the ClientConn without needing to be globally registered via +// resolver.Register. They will be matched against the scheme used for the +// current Dial only, and will take precedence over the global registry. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithResolvers(rs ...resolver.Builder) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.resolvers = append(o.resolvers, rs...) + }) +} + +// WithIdleTimeout returns a DialOption that configures an idle timeout for the +// channel. If the channel is idle for the configured timeout, i.e there are no +// ongoing RPCs and no new RPCs are initiated, the channel will enter idle mode +// and as a result the name resolver and load balancer will be shut down. The +// channel will exit idle mode when the Connect() method is called or when an +// RPC is initiated. +// +// A default timeout of 30 minutes will be used if this dial option is not set +// at dial time and idleness can be disabled by passing a timeout of zero. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithIdleTimeout(d time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.idleTimeout = d + }) +} + +// WithMaxCallAttempts returns a DialOption that configures the maximum number +// of attempts per call (including retries and hedging) using the channel. +// Service owners may specify a higher value for these parameters, but higher +// values will be treated as equal to the maximum value by the client +// implementation. This mitigates security concerns related to the service +// config being transferred to the client via DNS. +// +// A value of 5 will be used if this dial option is not set or n < 2. +func WithMaxCallAttempts(n int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + if n < 2 { + n = defaultMaxCallAttempts + } + o.maxCallAttempts = n + }) +} + +func withBufferPool(bufferPool mem.BufferPool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.BufferPool = bufferPool + }) +} diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go index 187adbb11..e7b532b6f 100644 --- a/vendor/google.golang.org/grpc/doc.go +++ b/vendor/google.golang.org/grpc/doc.go @@ -16,6 +16,8 @@ * */ +//go:generate ./scripts/regenerate.sh + /* Package grpc implements an RPC system called gRPC. diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 30a75da99..11d0ae142 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -19,12 +19,17 @@ // Package encoding defines the interface for the compressor and codec, and // functions to register and retrieve compressors and codecs. // -// This package is EXPERIMENTAL. +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. package encoding import ( "io" "strings" + + "google.golang.org/grpc/internal/grpcutil" ) // Identity specifies the optional encoding for uncompressed streams. @@ -33,6 +38,10 @@ const Identity = "identity" // Compressor is used for compressing and decompressing when sending or // receiving messages. +// +// If a Compressor implements `DecompressedSize(compressedBytes []byte) int`, +// gRPC will invoke it to determine the size of the buffer allocated for the +// result of decompression. A return value of -1 indicates unknown size. type Compressor interface { // Compress writes the data written to wc to w after compressing it. If an // error occurs while initializing the compressor, that error is returned @@ -61,6 +70,9 @@ var registeredCompressor = make(map[string]Compressor) // registered with the same name, the one registered last will take effect. func RegisterCompressor(c Compressor) { registeredCompressor[c.Name()] = c + if !grpcutil.IsCompressorNameRegistered(c.Name()) { + grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name()) + } } // GetCompressor returns Compressor for the given compressor name. @@ -73,16 +85,16 @@ func GetCompressor(name string) Compressor { // methods can be called from concurrent goroutines. type Codec interface { // Marshal returns the wire format of v. - Marshal(v interface{}) ([]byte, error) + Marshal(v any) ([]byte, error) // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v interface{}) error + Unmarshal(data []byte, v any) error // Name returns the name of the Codec implementation. The returned string // will be used as part of content type in transmission. The result must be // static; the result cannot change between calls. Name() string } -var registeredCodecs = make(map[string]Codec) +var registeredCodecs = make(map[string]any) // RegisterCodec registers the provided Codec for use with all gRPC clients and // servers. @@ -96,7 +108,7 @@ var registeredCodecs = make(map[string]Codec) // more details. // // NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple Compressors are +// an init() function), and is not thread-safe. If multiple Codecs are // registered with the same name, the one registered last will take effect. func RegisterCodec(codec Codec) { if codec == nil { @@ -114,5 +126,6 @@ func RegisterCodec(codec Codec) { // // The content-subtype is expected to be lowercase. func GetCodec(contentSubtype string) Codec { - return registeredCodecs[contentSubtype] + c, _ := registeredCodecs[contentSubtype].(Codec) + return c } diff --git a/vendor/google.golang.org/grpc/encoding/encoding_v2.go b/vendor/google.golang.org/grpc/encoding/encoding_v2.go new file mode 100644 index 000000000..074c5e234 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/encoding_v2.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package encoding + +import ( + "strings" + + "google.golang.org/grpc/mem" +) + +// CodecV2 defines the interface gRPC uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a CodecV2's +// methods can be called from concurrent goroutines. +type CodecV2 interface { + // Marshal returns the wire format of v. The buffers in the returned + // [mem.BufferSlice] must have at least one reference each, which will be freed + // by gRPC when they are no longer needed. + Marshal(v any) (out mem.BufferSlice, err error) + // Unmarshal parses the wire format into v. Note that data will be freed as soon + // as this function returns. If the codec wishes to guarantee access to the data + // after this function, it must take its own reference that it frees when it is + // no longer needed. + Unmarshal(data mem.BufferSlice, v any) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. + Name() string +} + +// RegisterCodecV2 registers the provided CodecV2 for use with all gRPC clients and +// servers. +// +// The CodecV2 will be stored and looked up by result of its Name() method, which +// should match the content-subtype of the encoding handled by the CodecV2. This +// is case-insensitive, and is stored and looked up as lowercase. If the +// result of calling Name() is an empty string, RegisterCodecV2 will panic. See +// Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If both a Codec and CodecV2 are registered with the same name, the CodecV2 +// will be used. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Codecs are +// registered with the same name, the one registered last will take effect. +func RegisterCodecV2(codec CodecV2) { + if codec == nil { + panic("cannot register a nil CodecV2") + } + if codec.Name() == "" { + panic("cannot register CodecV2 with empty string result for Name()") + } + contentSubtype := strings.ToLower(codec.Name()) + registeredCodecs[contentSubtype] = codec +} + +// GetCodecV2 gets a registered CodecV2 by content-subtype, or nil if no CodecV2 is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodecV2(contentSubtype string) CodecV2 { + c, _ := registeredCodecs[contentSubtype].(CodecV2) + return c +} diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index 66b97a6f6..ceec319dd 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -1,6 +1,6 @@ /* * - * Copyright 2018 gRPC authors. + * Copyright 2024 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,90 +21,76 @@ package proto import ( - "math" - "sync" + "fmt" - "github.com/golang/protobuf/proto" "google.golang.org/grpc/encoding" + "google.golang.org/grpc/mem" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/protoadapt" ) // Name is the name registered for the proto compressor. const Name = "proto" func init() { - encoding.RegisterCodec(codec{}) + encoding.RegisterCodecV2(&codecV2{}) } -// codec is a Codec implementation with protobuf. It is the default codec for gRPC. -type codec struct{} +// codec is a CodecV2 implementation with protobuf. It is the default codec for +// gRPC. +type codecV2 struct{} -type cachedProtoBuffer struct { - lastMarshaledSize uint32 - proto.Buffer -} - -func capToMaxInt32(val int) uint32 { - if val > math.MaxInt32 { - return uint32(math.MaxInt32) +func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) { + vv := messageV2Of(v) + if vv == nil { + return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v) } - return uint32(val) -} -func marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) { - protoMsg := v.(proto.Message) - newSlice := make([]byte, 0, cb.lastMarshaledSize) - - cb.SetBuf(newSlice) - cb.Reset() - if err := cb.Marshal(protoMsg); err != nil { - return nil, err + size := proto.Size(vv) + if mem.IsBelowBufferPoolingThreshold(size) { + buf, err := proto.Marshal(vv) + if err != nil { + return nil, err + } + data = append(data, mem.SliceBuffer(buf)) + } else { + pool := mem.DefaultBufferPool() + buf := pool.Get(size) + if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil { + pool.Put(buf) + return nil, err + } + data = append(data, mem.NewBuffer(buf, pool)) } - out := cb.Bytes() - cb.lastMarshaledSize = capToMaxInt32(len(out)) - return out, nil + + return data, nil } -func (codec) Marshal(v interface{}) ([]byte, error) { - if pm, ok := v.(proto.Marshaler); ok { - // object can marshal itself, no need for buffer - return pm.Marshal() +func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) (err error) { + vv := messageV2Of(v) + if vv == nil { + return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) } - cb := protoBufferPool.Get().(*cachedProtoBuffer) - out, err := marshal(v, cb) - - // put back buffer and lose the ref to the slice - cb.SetBuf(nil) - protoBufferPool.Put(cb) - return out, err + buf := data.MaterializeToBuffer(mem.DefaultBufferPool()) + defer buf.Free() + // TODO: Upgrade proto.Unmarshal to support mem.BufferSlice. Right now, it's not + // really possible without a major overhaul of the proto package, but the + // vtprotobuf library may be able to support this. + return proto.Unmarshal(buf.ReadOnlyData(), vv) } -func (codec) Unmarshal(data []byte, v interface{}) error { - protoMsg := v.(proto.Message) - protoMsg.Reset() - - if pu, ok := protoMsg.(proto.Unmarshaler); ok { - // object can unmarshal itself, no need for buffer - return pu.Unmarshal(data) +func messageV2Of(v any) proto.Message { + switch v := v.(type) { + case protoadapt.MessageV1: + return protoadapt.MessageV2Of(v) + case protoadapt.MessageV2: + return v } - cb := protoBufferPool.Get().(*cachedProtoBuffer) - cb.SetBuf(data) - err := cb.Unmarshal(protoMsg) - cb.SetBuf(nil) - protoBufferPool.Put(cb) - return err + return nil } -func (codec) Name() string { +func (c *codecV2) Name() string { return Name } - -var protoBufferPool = &sync.Pool{ - New: func() interface{} { - return &cachedProtoBuffer{ - Buffer: proto.Buffer{}, - lastMarshaledSize: 16, - } - }, -} diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go new file mode 100644 index 000000000..1d827dd5d --- /dev/null +++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go @@ -0,0 +1,269 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "maps" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" +) + +func init() { + internal.SnapshotMetricRegistryForTesting = snapshotMetricsRegistryForTesting +} + +var logger = grpclog.Component("metrics-registry") + +// DefaultMetrics are the default metrics registered through global metrics +// registry. This is written to at initialization time only, and is read only +// after initialization. +var DefaultMetrics = NewMetrics() + +// MetricDescriptor is the data for a registered metric. +type MetricDescriptor struct { + // The name of this metric. This name must be unique across the whole binary + // (including any per call metrics). See + // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions + // for metric naming conventions. + Name Metric + // The description of this metric. + Description string + // The unit (e.g. entries, seconds) of this metric. + Unit string + // The required label keys for this metric. These are intended to + // metrics emitted from a stats handler. + Labels []string + // The optional label keys for this metric. These are intended to attached + // to metrics emitted from a stats handler if configured. + OptionalLabels []string + // Whether this metric is on by default. + Default bool + // The type of metric. This is set by the metric registry, and not intended + // to be set by a component registering a metric. + Type MetricType + // Bounds are the bounds of this metric. This only applies to histogram + // metrics. If unset or set with length 0, stats handlers will fall back to + // default bounds. + Bounds []float64 +} + +// MetricType is the type of metric. +type MetricType int + +// Type of metric supported by this instrument registry. +const ( + MetricTypeIntCount MetricType = iota + MetricTypeFloatCount + MetricTypeIntHisto + MetricTypeFloatHisto + MetricTypeIntGauge +) + +// Int64CountHandle is a typed handle for a int count metric. This handle +// is passed at the recording point in order to know which metric to record +// on. +type Int64CountHandle MetricDescriptor + +// Descriptor returns the int64 count handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64CountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 count value on the metrics recorder provided. +func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Count(h, incr, labels...) +} + +// Float64CountHandle is a typed handle for a float count metric. This handle is +// passed at the recording point in order to know which metric to record on. +type Float64CountHandle MetricDescriptor + +// Descriptor returns the float64 count handle typecast to a pointer to a +// MetricDescriptor. +func (h *Float64CountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the float64 count value on the metrics recorder provided. +func (h *Float64CountHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) { + recorder.RecordFloat64Count(h, incr, labels...) +} + +// Int64HistoHandle is a typed handle for an int histogram metric. This handle +// is passed at the recording point in order to know which metric to record on. +type Int64HistoHandle MetricDescriptor + +// Descriptor returns the int64 histo handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64HistoHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 histo value on the metrics recorder provided. +func (h *Int64HistoHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Histo(h, incr, labels...) +} + +// Float64HistoHandle is a typed handle for a float histogram metric. This +// handle is passed at the recording point in order to know which metric to +// record on. +type Float64HistoHandle MetricDescriptor + +// Descriptor returns the float64 histo handle typecast to a pointer to a +// MetricDescriptor. +func (h *Float64HistoHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the float64 histo value on the metrics recorder provided. +func (h *Float64HistoHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) { + recorder.RecordFloat64Histo(h, incr, labels...) +} + +// Int64GaugeHandle is a typed handle for an int gauge metric. This handle is +// passed at the recording point in order to know which metric to record on. +type Int64GaugeHandle MetricDescriptor + +// Descriptor returns the int64 gauge handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64GaugeHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 histo value on the metrics recorder provided. +func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Gauge(h, incr, labels...) +} + +// registeredMetrics are the registered metric descriptor names. +var registeredMetrics = make(map[Metric]bool) + +// metricsRegistry contains all of the registered metrics. +// +// This is written to only at init time, and read only after that. +var metricsRegistry = make(map[Metric]*MetricDescriptor) + +// DescriptorForMetric returns the MetricDescriptor from the global registry. +// +// Returns nil if MetricDescriptor not present. +func DescriptorForMetric(metric Metric) *MetricDescriptor { + return metricsRegistry[metric] +} + +func registerMetric(name Metric, def bool) { + if registeredMetrics[name] { + logger.Fatalf("metric %v already registered", name) + } + registeredMetrics[name] = true + if def { + DefaultMetrics = DefaultMetrics.Add(name) + } +} + +// RegisterInt64Count registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Count(descriptor MetricDescriptor) *Int64CountHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64CountHandle)(descPtr) +} + +// RegisterFloat64Count registers the metric description onto the global +// registry. It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterFloat64Count(descriptor MetricDescriptor) *Float64CountHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeFloatCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Float64CountHandle)(descPtr) +} + +// RegisterInt64Histo registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Histo(descriptor MetricDescriptor) *Int64HistoHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntHisto + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64HistoHandle)(descPtr) +} + +// RegisterFloat64Histo registers the metric description onto the global +// registry. It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterFloat64Histo(descriptor MetricDescriptor) *Float64HistoHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeFloatHisto + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Float64HistoHandle)(descPtr) +} + +// RegisterInt64Gauge registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntGauge + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64GaugeHandle)(descPtr) +} + +// snapshotMetricsRegistryForTesting snapshots the global data of the metrics +// registry. Returns a cleanup function that sets the metrics registry to its +// original state. +func snapshotMetricsRegistryForTesting() func() { + oldDefaultMetrics := DefaultMetrics + oldRegisteredMetrics := registeredMetrics + oldMetricsRegistry := metricsRegistry + + registeredMetrics = make(map[Metric]bool) + metricsRegistry = make(map[Metric]*MetricDescriptor) + maps.Copy(registeredMetrics, registeredMetrics) + maps.Copy(metricsRegistry, metricsRegistry) + + return func() { + DefaultMetrics = oldDefaultMetrics + registeredMetrics = oldRegisteredMetrics + metricsRegistry = oldMetricsRegistry + } +} diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go new file mode 100644 index 000000000..3221f7a63 --- /dev/null +++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go @@ -0,0 +1,114 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats contains experimental metrics/stats API's. +package stats + +import "maps" + +// MetricsRecorder records on metrics derived from metric registry. +type MetricsRecorder interface { + // RecordInt64Count records the measurement alongside labels on the int + // count associated with the provided handle. + RecordInt64Count(handle *Int64CountHandle, incr int64, labels ...string) + // RecordFloat64Count records the measurement alongside labels on the float + // count associated with the provided handle. + RecordFloat64Count(handle *Float64CountHandle, incr float64, labels ...string) + // RecordInt64Histo records the measurement alongside labels on the int + // histo associated with the provided handle. + RecordInt64Histo(handle *Int64HistoHandle, incr int64, labels ...string) + // RecordFloat64Histo records the measurement alongside labels on the float + // histo associated with the provided handle. + RecordFloat64Histo(handle *Float64HistoHandle, incr float64, labels ...string) + // RecordInt64Gauge records the measurement alongside labels on the int + // gauge associated with the provided handle. + RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string) +} + +// Metric is an identifier for a metric. +type Metric string + +// Metrics is a set of metrics to record. Once created, Metrics is immutable, +// however Add and Remove can make copies with specific metrics added or +// removed, respectively. +// +// Do not construct directly; use NewMetrics instead. +type Metrics struct { + // metrics are the set of metrics to initialize. + metrics map[Metric]bool +} + +// NewMetrics returns a Metrics containing Metrics. +func NewMetrics(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for _, metric := range metrics { + newMetrics[metric] = true + } + return &Metrics{ + metrics: newMetrics, + } +} + +// Metrics returns the metrics set. The returned map is read-only and must not +// be modified. +func (m *Metrics) Metrics() map[Metric]bool { + return m.metrics +} + +// Add adds the metrics to the metrics set and returns a new copy with the +// additional metrics. +func (m *Metrics) Add(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metrics { + newMetrics[metric] = true + } + return &Metrics{ + metrics: newMetrics, + } +} + +// Join joins the metrics passed in with the metrics set, and returns a new copy +// with the merged metrics. +func (m *Metrics) Join(metrics *Metrics) *Metrics { + newMetrics := make(map[Metric]bool) + maps.Copy(newMetrics, m.metrics) + maps.Copy(newMetrics, metrics.metrics) + return &Metrics{ + metrics: newMetrics, + } +} + +// Remove removes the metrics from the metrics set and returns a new copy with +// the metrics removed. +func (m *Metrics) Remove(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metrics { + delete(newMetrics, metric) + } + return &Metrics{ + metrics: newMetrics, + } +} diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go new file mode 100644 index 000000000..f1ae080dc --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -0,0 +1,115 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "fmt" +) + +// componentData records the settings for a component. +type componentData struct { + name string +} + +var cache = map[string]*componentData{} + +func (c *componentData) InfoDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) + InfoDepth(depth+1, args...) +} + +func (c *componentData) WarningDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) + WarningDepth(depth+1, args...) +} + +func (c *componentData) ErrorDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) + ErrorDepth(depth+1, args...) +} + +func (c *componentData) FatalDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) + FatalDepth(depth+1, args...) +} + +func (c *componentData) Info(args ...any) { + c.InfoDepth(1, args...) +} + +func (c *componentData) Warning(args ...any) { + c.WarningDepth(1, args...) +} + +func (c *componentData) Error(args ...any) { + c.ErrorDepth(1, args...) +} + +func (c *componentData) Fatal(args ...any) { + c.FatalDepth(1, args...) +} + +func (c *componentData) Infof(format string, args ...any) { + c.InfoDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Warningf(format string, args ...any) { + c.WarningDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Errorf(format string, args ...any) { + c.ErrorDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Fatalf(format string, args ...any) { + c.FatalDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Infoln(args ...any) { + c.InfoDepth(1, args...) +} + +func (c *componentData) Warningln(args ...any) { + c.WarningDepth(1, args...) +} + +func (c *componentData) Errorln(args ...any) { + c.ErrorDepth(1, args...) +} + +func (c *componentData) Fatalln(args ...any) { + c.FatalDepth(1, args...) +} + +func (c *componentData) V(l int) bool { + return V(l) +} + +// Component creates a new component and returns it for logging. If a component +// with the name already exists, nothing will be created and it will be +// returned. SetLoggerV2 will panic if it is called with a logger created by +// Component. +func Component(componentName string) DepthLoggerV2 { + if cData, ok := cache[componentName]; ok { + return cData + } + c := &componentData{componentName} + cache[componentName] = c + return c +} diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go index 51bb9457c..db320105e 100644 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -18,88 +18,91 @@ // Package grpclog defines logging for grpc. // -// All logs in transport and grpclb packages only go to verbose level 2. -// All logs in other packages in grpc are logged in spite of the verbosity level. -// -// In the default logger, -// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, -// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. -package grpclog // import "google.golang.org/grpc/grpclog" +// In the default logger, severity level can be set by environment variable +// GRPC_GO_LOG_SEVERITY_LEVEL, verbosity level can be set by +// GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog + +import ( + "os" -import "os" + "google.golang.org/grpc/grpclog/internal" +) -var logger = newLoggerV2() +func init() { + SetLoggerV2(newLoggerV2()) +} // V reports whether verbosity level l is at least the requested verbose level. func V(l int) bool { - return logger.V(l) + return internal.LoggerV2Impl.V(l) } // Info logs to the INFO log. -func Info(args ...interface{}) { - logger.Info(args...) +func Info(args ...any) { + internal.LoggerV2Impl.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. -func Infof(format string, args ...interface{}) { - logger.Infof(format, args...) +func Infof(format string, args ...any) { + internal.LoggerV2Impl.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. -func Infoln(args ...interface{}) { - logger.Infoln(args...) +func Infoln(args ...any) { + internal.LoggerV2Impl.Infoln(args...) } // Warning logs to the WARNING log. -func Warning(args ...interface{}) { - logger.Warning(args...) +func Warning(args ...any) { + internal.LoggerV2Impl.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. -func Warningf(format string, args ...interface{}) { - logger.Warningf(format, args...) +func Warningf(format string, args ...any) { + internal.LoggerV2Impl.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. -func Warningln(args ...interface{}) { - logger.Warningln(args...) +func Warningln(args ...any) { + internal.LoggerV2Impl.Warningln(args...) } // Error logs to the ERROR log. -func Error(args ...interface{}) { - logger.Error(args...) +func Error(args ...any) { + internal.LoggerV2Impl.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. -func Errorf(format string, args ...interface{}) { - logger.Errorf(format, args...) +func Errorf(format string, args ...any) { + internal.LoggerV2Impl.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. -func Errorln(args ...interface{}) { - logger.Errorln(args...) +func Errorln(args ...any) { + internal.LoggerV2Impl.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. -func Fatal(args ...interface{}) { - logger.Fatal(args...) +func Fatal(args ...any) { + internal.LoggerV2Impl.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) } // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. -// It calles os.Exit() with exit code 1. -func Fatalf(format string, args ...interface{}) { - logger.Fatalf(format, args...) +// It calls os.Exit() with exit code 1. +func Fatalf(format string, args ...any) { + internal.LoggerV2Impl.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) } // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. -// It calle os.Exit()) with exit code 1. -func Fatalln(args ...interface{}) { - logger.Fatalln(args...) +// It calls os.Exit() with exit code 1. +func Fatalln(args ...any) { + internal.LoggerV2Impl.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -107,20 +110,77 @@ func Fatalln(args ...interface{}) { // Print prints to the logger. Arguments are handled in the manner of fmt.Print. // // Deprecated: use Info. -func Print(args ...interface{}) { - logger.Info(args...) +func Print(args ...any) { + internal.LoggerV2Impl.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. -func Printf(format string, args ...interface{}) { - logger.Infof(format, args...) +func Printf(format string, args ...any) { + internal.LoggerV2Impl.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. -func Println(args ...interface{}) { - logger.Infoln(args...) +func Println(args ...any) { + internal.LoggerV2Impl.Infoln(args...) +} + +// InfoDepth logs to the INFO log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func InfoDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.InfoDepth(depth, args...) + } else { + internal.LoggerV2Impl.Infoln(args...) + } +} + +// WarningDepth logs to the WARNING log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WarningDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.WarningDepth(depth, args...) + } else { + internal.LoggerV2Impl.Warningln(args...) + } +} + +// ErrorDepth logs to the ERROR log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ErrorDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.ErrorDepth(depth, args...) + } else { + internal.LoggerV2Impl.Errorln(args...) + } +} + +// FatalDepth logs to the FATAL log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func FatalDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.FatalDepth(depth, args...) + } else { + internal.LoggerV2Impl.Fatalln(args...) + } + os.Exit(1) } diff --git a/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go new file mode 100644 index 000000000..59c03bc14 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go @@ -0,0 +1,26 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains functionality internal to the grpclog package. +package internal + +// LoggerV2Impl is the logger used for the non-depth log functions. +var LoggerV2Impl LoggerV2 + +// DepthLoggerV2Impl is the logger used for the depth log functions. +var DepthLoggerV2Impl DepthLoggerV2 diff --git a/vendor/google.golang.org/grpc/grpclog/internal/logger.go b/vendor/google.golang.org/grpc/grpclog/internal/logger.go new file mode 100644 index 000000000..e524fdd40 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/internal/logger.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +// Logger mimics golang's standard Logger as an interface. +// +// Deprecated: use LoggerV2. +type Logger interface { + Fatal(args ...any) + Fatalf(format string, args ...any) + Fatalln(args ...any) + Print(args ...any) + Printf(format string, args ...any) + Println(args ...any) +} + +// LoggerWrapper wraps Logger into a LoggerV2. +type LoggerWrapper struct { + Logger +} + +// Info logs to INFO log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Info(args ...any) { + l.Logger.Print(args...) +} + +// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Infoln(args ...any) { + l.Logger.Println(args...) +} + +// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Infof(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Warning(args ...any) { + l.Logger.Print(args...) +} + +// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Warningln(args ...any) { + l.Logger.Println(args...) +} + +// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Warningf(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Error(args ...any) { + l.Logger.Print(args...) +} + +// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Errorln(args ...any) { + l.Logger.Println(args...) +} + +// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Errorf(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// V reports whether verbosity level l is at least the requested verbose level. +func (*LoggerWrapper) V(int) bool { + // Returns true for all verbose level. + return true +} diff --git a/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go new file mode 100644 index 000000000..07df71e98 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go @@ -0,0 +1,204 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "encoding/json" + "fmt" + "io" + "log" + "os" +) + +// LoggerV2 does underlying logging work for grpclog. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...any) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...any) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...any) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...any) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...any) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...any) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...any) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...any) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...any) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...any) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...any) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...any) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type DepthLoggerV2 interface { + LoggerV2 + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. + InfoDepth(depth int, args ...any) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. + WarningDepth(depth int, args ...any) + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. + ErrorDepth(depth int, args ...any) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. + FatalDepth(depth int, args ...any) +} + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int + jsonFormat bool +} + +func (g *loggerT) output(severity int, s string) { + sevStr := severityName[severity] + if !g.jsonFormat { + g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + return + } + // TODO: we can also include the logging component, but that needs more + // (API) changes. + b, _ := json.Marshal(map[string]string{ + "severity": sevStr, + "message": s, + }) + g.m[severity].Output(2, string(b)) +} + +func (g *loggerT) Info(args ...any) { + g.output(infoLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Infoln(args ...any) { + g.output(infoLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Infof(format string, args ...any) { + g.output(infoLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Warning(args ...any) { + g.output(warningLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Warningln(args ...any) { + g.output(warningLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Warningf(format string, args ...any) { + g.output(warningLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Error(args ...any) { + g.output(errorLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Errorln(args ...any) { + g.output(errorLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Errorf(format string, args ...any) { + g.output(errorLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Fatal(args ...any) { + g.output(fatalLog, fmt.Sprint(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalln(args ...any) { + g.output(fatalLog, fmt.Sprintln(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalf(format string, args ...any) { + g.output(fatalLog, fmt.Sprintf(format, args...)) + os.Exit(1) +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} + +// LoggerV2Config configures the LoggerV2 implementation. +type LoggerV2Config struct { + // Verbosity sets the verbosity level of the logger. + Verbosity int + // FormatJSON controls whether the logger should output logs in JSON format. + FormatJSON bool +} + +// NewLoggerV2 creates a new LoggerV2 instance with the provided configuration. +// The infoW, warningW, and errorW writers are used to write log messages of +// different severity levels. +func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 { + var m []*log.Logger + flag := log.LstdFlags + if c.FormatJSON { + flag = 0 + } + m = append(m, log.New(infoW, "", flag)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, "", flag)) + m = append(m, log.New(ew, "", flag)) + return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON} +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index 097494f71..4b2035857 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -18,68 +18,17 @@ package grpclog +import "google.golang.org/grpc/grpclog/internal" + // Logger mimics golang's standard Logger as an interface. // // Deprecated: use LoggerV2. -type Logger interface { - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Fatalln(args ...interface{}) - Print(args ...interface{}) - Printf(format string, args ...interface{}) - Println(args ...interface{}) -} +type Logger internal.Logger // SetLogger sets the logger that is used in grpc. Call only from // init() functions. // // Deprecated: use SetLoggerV2. func SetLogger(l Logger) { - logger = &loggerWrapper{Logger: l} -} - -// loggerWrapper wraps Logger into a LoggerV2. -type loggerWrapper struct { - Logger -} - -func (g *loggerWrapper) Info(args ...interface{}) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Infoln(args ...interface{}) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Infof(format string, args ...interface{}) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Warning(args ...interface{}) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Warningln(args ...interface{}) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Warningf(format string, args ...interface{}) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Error(args ...interface{}) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Errorln(args ...interface{}) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Errorf(format string, args ...interface{}) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) V(l int) bool { - // Returns true for all verbose level. - return true + internal.LoggerV2Impl = &internal.LoggerWrapper{Logger: l} } diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index d49325776..892dc13d1 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -20,77 +20,24 @@ package grpclog import ( "io" - "io/ioutil" - "log" "os" "strconv" + "strings" + + "google.golang.org/grpc/grpclog/internal" ) // LoggerV2 does underlying logging work for grpclog. -type LoggerV2 interface { - // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...interface{}) - // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...interface{}) - // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...interface{}) - // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...interface{}) - // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...interface{}) - // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...interface{}) - // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...interface{}) - // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...interface{}) - // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...interface{}) - // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...interface{}) - // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...interface{}) - // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...interface{}) - // V reports whether verbosity level l is at least the requested verbose level. - V(l int) bool -} +type LoggerV2 internal.LoggerV2 // SetLoggerV2 sets logger that is used in grpc to a V2 logger. // Not mutex-protected, should be called before any gRPC functions. func SetLoggerV2(l LoggerV2) { - logger = l -} - -const ( - // infoLog indicates Info severity. - infoLog int = iota - // warningLog indicates Warning severity. - warningLog - // errorLog indicates Error severity. - errorLog - // fatalLog indicates Fatal severity. - fatalLog -) - -// severityName contains the string representation of each severity. -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// loggerT is the default logger used by grpclog. -type loggerT struct { - m []*log.Logger - v int + if _, ok := l.(*componentData); ok { + panic("cannot use component logger as grpclog logger") + } + internal.LoggerV2Impl = l + internal.DepthLoggerV2Impl, _ = l.(internal.DepthLoggerV2) } // NewLoggerV2 creates a loggerV2 with the provided writers. @@ -99,27 +46,21 @@ type loggerT struct { // Warning logs will be written to warningW and infoW. // Info logs will be written to infoW. func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { - return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0) + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{}) } // NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and // verbosity level. func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { - var m []*log.Logger - m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags)) - ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags)) - m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags)) - return &loggerT{m: m, v: v} + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{Verbosity: v}) } // newLoggerV2 creates a loggerV2 to be used as default logger. // All logs are written to stderr. func newLoggerV2() LoggerV2 { - errorW := ioutil.Discard - warningW := ioutil.Discard - infoW := ioutil.Discard + errorW := io.Discard + warningW := io.Discard + infoW := io.Discard logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") switch logLevel { @@ -136,60 +77,21 @@ func newLoggerV2() LoggerV2 { if vl, err := strconv.Atoi(vLevel); err == nil { v = vl } - return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v) -} - -func (g *loggerT) Info(args ...interface{}) { - g.m[infoLog].Print(args...) -} - -func (g *loggerT) Infoln(args ...interface{}) { - g.m[infoLog].Println(args...) -} - -func (g *loggerT) Infof(format string, args ...interface{}) { - g.m[infoLog].Printf(format, args...) -} -func (g *loggerT) Warning(args ...interface{}) { - g.m[warningLog].Print(args...) -} - -func (g *loggerT) Warningln(args ...interface{}) { - g.m[warningLog].Println(args...) -} - -func (g *loggerT) Warningf(format string, args ...interface{}) { - g.m[warningLog].Printf(format, args...) -} - -func (g *loggerT) Error(args ...interface{}) { - g.m[errorLog].Print(args...) -} - -func (g *loggerT) Errorln(args ...interface{}) { - g.m[errorLog].Println(args...) -} - -func (g *loggerT) Errorf(format string, args ...interface{}) { - g.m[errorLog].Printf(format, args...) -} - -func (g *loggerT) Fatal(args ...interface{}) { - g.m[fatalLog].Fatal(args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). -} - -func (g *loggerT) Fatalln(args ...interface{}) { - g.m[fatalLog].Fatalln(args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). -} + jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json") -func (g *loggerT) Fatalf(format string, args ...interface{}) { - g.m[fatalLog].Fatalf(format, args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{ + Verbosity: v, + FormatJSON: jsonFormat, + }) } -func (g *loggerT) V(l int) bool { - return l <= g.v -} +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type DepthLoggerV2 internal.DepthLoggerV2 diff --git a/vendor/google.golang.org/grpc/install_gae.sh b/vendor/google.golang.org/grpc/install_gae.sh deleted file mode 100644 index 7c7bcada5..000000000 --- a/vendor/google.golang.org/grpc/install_gae.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -TMP=$(mktemp -d /tmp/sdk.XXX) \ -&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \ -&& unzip -q $TMP.zip -d $TMP \ -&& export PATH="$PATH:$TMP/go_appengine" diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go index 8b7350022..877d78fc3 100644 --- a/vendor/google.golang.org/grpc/interceptor.go +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -23,41 +23,68 @@ import ( ) // UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. -type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error +type UnaryInvoker func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error -// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC -// and it is the responsibility of the interceptor to call it. -// This is an EXPERIMENTAL API. -type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error +// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. +// Unary interceptors can be specified as a DialOption, using +// WithUnaryInterceptor() or WithChainUnaryInterceptor(), when creating a +// ClientConn. When a unary interceptor(s) is set on a ClientConn, gRPC +// delegates all unary RPC invocations to the interceptor, and it is the +// responsibility of the interceptor to call invoker to complete the processing +// of the RPC. +// +// method is the RPC name. req and reply are the corresponding request and +// response messages. cc is the ClientConn on which the RPC was invoked. invoker +// is the handler to complete the RPC and it is the responsibility of the +// interceptor to call it. opts contain all applicable call options, including +// defaults from the ClientConn as well as per-call options. +// +// The returned error must be compatible with the status package. +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error // Streamer is called by StreamClientInterceptor to create a ClientStream. type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) -// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O -// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it. -// This is an EXPERIMENTAL API. +// StreamClientInterceptor intercepts the creation of a ClientStream. Stream +// interceptors can be specified as a DialOption, using WithStreamInterceptor() +// or WithChainStreamInterceptor(), when creating a ClientConn. When a stream +// interceptor(s) is set on the ClientConn, gRPC delegates all stream creations +// to the interceptor, and it is the responsibility of the interceptor to call +// streamer. +// +// desc contains a description of the stream. cc is the ClientConn on which the +// RPC was invoked. streamer is the handler to create a ClientStream and it is +// the responsibility of the interceptor to call it. opts contain all applicable +// call options, including defaults from the ClientConn as well as per-call +// options. +// +// StreamClientInterceptor may return a custom ClientStream to intercept all I/O +// operations. The returned error must be compatible with the status package. type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) // UnaryServerInfo consists of various information about a unary RPC on // server side. All per-rpc information may be mutated by the interceptor. type UnaryServerInfo struct { // Server is the service implementation the user provides. This is read-only. - Server interface{} + Server any // FullMethod is the full RPC method string, i.e., /package.service/method. FullMethod string } // UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal -// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the -// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as -// the status message of the RPC. -type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) +// execution of a unary RPC. +// +// If a UnaryHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. +type UnaryHandler func(ctx context.Context, req any) (any, error) // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info // contains all the information of this RPC the interceptor can operate on. And handler is the wrapper // of the service method implementation. It is the responsibility of the interceptor to invoke handler // to complete the RPC. -type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) +type UnaryServerInterceptor func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (resp any, err error) // StreamServerInfo consists of various information about a streaming RPC on // server side. All per-rpc information may be mutated by the interceptor. @@ -74,4 +101,4 @@ type StreamServerInfo struct { // info contains all the information of this RPC the interceptor can operate on. And handler is the // service method implementation. It is the responsibility of the interceptor to invoke handler to // complete the RPC. -type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error +type StreamServerInterceptor func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go index 1bd0cce5a..b15cf482d 100644 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -23,46 +23,43 @@ package backoff import ( + "context" + "errors" + "math/rand" "time" - "google.golang.org/grpc/internal/grpcrand" + grpcbackoff "google.golang.org/grpc/backoff" ) // Strategy defines the methodology for backing off after a grpc connection // failure. -// type Strategy interface { // Backoff returns the amount of time to wait before the next retry given // the number of consecutive failures. Backoff(retries int) time.Duration } -const ( - // baseDelay is the amount of time to wait before retrying after the first - // failure. - baseDelay = 1.0 * time.Second - // factor is applied to the backoff after each retry. - factor = 1.6 - // jitter provides a range to randomize backoff delays. - jitter = 0.2 -) +// DefaultExponential is an exponential backoff implementation using the +// default values for all the configurable knobs defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +var DefaultExponential = Exponential{Config: grpcbackoff.DefaultConfig} // Exponential implements exponential backoff algorithm as defined in // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. type Exponential struct { - // MaxDelay is the upper bound of backoff delay. - MaxDelay time.Duration + // Config contains all options to configure the backoff algorithm. + Config grpcbackoff.Config } // Backoff returns the amount of time to wait before the next retry given the // number of retries. func (bc Exponential) Backoff(retries int) time.Duration { if retries == 0 { - return baseDelay + return bc.Config.BaseDelay } - backoff, max := float64(baseDelay), float64(bc.MaxDelay) + backoff, max := float64(bc.Config.BaseDelay), float64(bc.Config.MaxDelay) for backoff < max && retries > 0 { - backoff *= factor + backoff *= bc.Config.Multiplier retries-- } if backoff > max { @@ -70,9 +67,43 @@ func (bc Exponential) Backoff(retries int) time.Duration { } // Randomize backoff delays so that if a cluster of requests start at // the same time, they won't operate in lockstep. - backoff *= 1 + jitter*(grpcrand.Float64()*2-1) + backoff *= 1 + bc.Config.Jitter*(rand.Float64()*2-1) if backoff < 0 { return 0 } return time.Duration(backoff) } + +// ErrResetBackoff is the error to be returned by the function executed by RunF, +// to instruct the latter to reset its backoff state. +var ErrResetBackoff = errors.New("reset backoff state") + +// RunF provides a convenient way to run a function f repeatedly until the +// context expires or f returns a non-nil error that is not ErrResetBackoff. +// When f returns ErrResetBackoff, RunF continues to run f, but resets its +// backoff state before doing so. backoff accepts an integer representing the +// number of retries, and returns the amount of time to backoff. +func RunF(ctx context.Context, f func() error, backoff func(int) time.Duration) { + attempt := 0 + timer := time.NewTimer(0) + for ctx.Err() == nil { + select { + case <-timer.C: + case <-ctx.Done(): + timer.Stop() + return + } + + err := f() + if errors.Is(err, ErrResetBackoff) { + timer.Reset(0) + attempt = 0 + continue + } + if err != nil { + return + } + timer.Reset(backoff(attempt)) + attempt++ + } +} diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go new file mode 100644 index 000000000..13821a926 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package gracefulswitch + +import ( + "encoding/json" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/serviceconfig" +) + +type lbConfig struct { + serviceconfig.LoadBalancingConfig + + childBuilder balancer.Builder + childConfig serviceconfig.LoadBalancingConfig +} + +func ChildName(l serviceconfig.LoadBalancingConfig) string { + return l.(*lbConfig).childBuilder.Name() +} + +// ParseConfig parses a child config list and returns a LB config for the +// gracefulswitch Balancer. +// +// cfg is expected to be a json.RawMessage containing a JSON array of LB policy +// names + configs as the format of the "loadBalancingConfig" field in +// ServiceConfig. It returns a type that should be passed to +// UpdateClientConnState in the BalancerConfig field. +func ParseConfig(cfg json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var lbCfg []map[string]json.RawMessage + if err := json.Unmarshal(cfg, &lbCfg); err != nil { + return nil, err + } + for i, e := range lbCfg { + if len(e) != 1 { + return nil, fmt.Errorf("expected a JSON struct with one entry; received entry %v at index %d", e, i) + } + + var name string + var jsonCfg json.RawMessage + for name, jsonCfg = range e { + } + + builder := balancer.Get(name) + if builder == nil { + // Skip unregistered balancer names. + continue + } + + parser, ok := builder.(balancer.ConfigParser) + if !ok { + // This is a valid child with no config. + return &lbConfig{childBuilder: builder}, nil + } + + cfg, err := parser.ParseConfig(jsonCfg) + if err != nil { + return nil, fmt.Errorf("error parsing config for policy %q: %v", name, err) + } + return &lbConfig{childBuilder: builder, childConfig: cfg}, nil + } + + return nil, fmt.Errorf("no supported policies found in config: %v", string(cfg)) +} diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go new file mode 100644 index 000000000..73bb4c4ee --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -0,0 +1,419 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package gracefulswitch implements a graceful switch load balancer. +package gracefulswitch + +import ( + "errors" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" +) + +var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed") +var _ balancer.Balancer = (*Balancer)(nil) + +// NewBalancer returns a graceful switch Balancer. +func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer { + return &Balancer{ + cc: cc, + bOpts: opts, + } +} + +// Balancer is a utility to gracefully switch from one balancer to +// a new balancer. It implements the balancer.Balancer interface. +type Balancer struct { + bOpts balancer.BuildOptions + cc balancer.ClientConn + + // mu protects the following fields and all fields within balancerCurrent + // and balancerPending. mu does not need to be held when calling into the + // child balancers, as all calls into these children happen only as a direct + // result of a call into the gracefulSwitchBalancer, which are also + // guaranteed to be synchronous. There is one exception: an UpdateState call + // from a child balancer when current and pending are populated can lead to + // calling Close() on the current. To prevent that racing with an + // UpdateSubConnState from the channel, we hold currentMu during Close and + // UpdateSubConnState calls. + mu sync.Mutex + balancerCurrent *balancerWrapper + balancerPending *balancerWrapper + closed bool // set to true when this balancer is closed + + // currentMu must be locked before mu. This mutex guards against this + // sequence of events: UpdateSubConnState() called, finds the + // balancerCurrent, gives up lock, updateState comes in, causes Close() on + // balancerCurrent before the UpdateSubConnState is called on the + // balancerCurrent. + currentMu sync.Mutex +} + +// swap swaps out the current lb with the pending lb and updates the ClientConn. +// The caller must hold gsb.mu. +func (gsb *Balancer) swap() { + gsb.cc.UpdateState(gsb.balancerPending.lastState) + cur := gsb.balancerCurrent + gsb.balancerCurrent = gsb.balancerPending + gsb.balancerPending = nil + go func() { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + cur.Close() + }() +} + +// Helper function that checks if the balancer passed in is current or pending. +// The caller must hold gsb.mu. +func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool { + return bw == gsb.balancerCurrent || bw == gsb.balancerPending +} + +// SwitchTo initializes the graceful switch process, which completes based on +// connectivity state changes on the current/pending balancer. Thus, the switch +// process is not complete when this method returns. This method must be called +// synchronously alongside the rest of the balancer.Balancer methods this +// Graceful Switch Balancer implements. +// +// Deprecated: use ParseConfig and pass a parsed config to UpdateClientConnState +// to cause the Balancer to automatically change to the new child when necessary. +func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { + _, err := gsb.switchTo(builder) + return err +} + +func (gsb *Balancer) switchTo(builder balancer.Builder) (*balancerWrapper, error) { + gsb.mu.Lock() + if gsb.closed { + gsb.mu.Unlock() + return nil, errBalancerClosed + } + bw := &balancerWrapper{ + builder: builder, + gsb: gsb, + lastState: balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }, + subconns: make(map[balancer.SubConn]bool), + } + balToClose := gsb.balancerPending // nil if there is no pending balancer + if gsb.balancerCurrent == nil { + gsb.balancerCurrent = bw + } else { + gsb.balancerPending = bw + } + gsb.mu.Unlock() + balToClose.Close() + // This function takes a builder instead of a balancer because builder.Build + // can call back inline, and this utility needs to handle the callbacks. + newBalancer := builder.Build(bw, gsb.bOpts) + if newBalancer == nil { + // This is illegal and should never happen; we clear the balancerWrapper + // we were constructing if it happens to avoid a potential panic. + gsb.mu.Lock() + if gsb.balancerPending != nil { + gsb.balancerPending = nil + } else { + gsb.balancerCurrent = nil + } + gsb.mu.Unlock() + return nil, balancer.ErrBadResolverState + } + + // This write doesn't need to take gsb.mu because this field never gets read + // or written to on any calls from the current or pending. Calls from grpc + // to this balancer are guaranteed to be called synchronously, so this + // bw.Balancer field will never be forwarded to until this SwitchTo() + // function returns. + bw.Balancer = newBalancer + return bw, nil +} + +// Returns nil if the graceful switch balancer is closed. +func (gsb *Balancer) latestBalancer() *balancerWrapper { + gsb.mu.Lock() + defer gsb.mu.Unlock() + if gsb.balancerPending != nil { + return gsb.balancerPending + } + return gsb.balancerCurrent +} + +// UpdateClientConnState forwards the update to the latest balancer created. +// +// If the state's BalancerConfig is the config returned by a call to +// gracefulswitch.ParseConfig, then this function will automatically SwitchTo +// the balancer indicated by the config before forwarding its config to it, if +// necessary. +func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + gsbCfg, ok := state.BalancerConfig.(*lbConfig) + if ok { + // Switch to the child in the config unless it is already active. + if balToUpdate == nil || gsbCfg.childBuilder.Name() != balToUpdate.builder.Name() { + var err error + balToUpdate, err = gsb.switchTo(gsbCfg.childBuilder) + if err != nil { + return fmt.Errorf("could not switch to new child balancer: %w", err) + } + } + // Unwrap the child balancer's config. + state.BalancerConfig = gsbCfg.childConfig + } + + if balToUpdate == nil { + return errBalancerClosed + } + + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + return balToUpdate.UpdateClientConnState(state) +} + +// ResolverError forwards the error to the latest balancer created. +func (gsb *Balancer) ResolverError(err error) { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + gsb.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(err), + }) + return + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + balToUpdate.ResolverError(err) +} + +// ExitIdle forwards the call to the latest balancer created. +// +// If the latest balancer does not support ExitIdle, the subConns are +// re-connected to manually. +func (gsb *Balancer) ExitIdle() { + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok { + ei.ExitIdle() + return + } + gsb.mu.Lock() + defer gsb.mu.Unlock() + for sc := range balToUpdate.subconns { + sc.Connect() + } +} + +// updateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + gsb.mu.Lock() + // Forward update to the appropriate child. Even if there is a pending + // balancer, the current balancer should continue to get SubConn updates to + // maintain the proper state while the pending is still connecting. + var balToUpdate *balancerWrapper + if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] { + balToUpdate = gsb.balancerCurrent + } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { + balToUpdate = gsb.balancerPending + } + if balToUpdate == nil { + // SubConn belonged to a stale lb policy that has not yet fully closed, + // or the balancer was already closed. + gsb.mu.Unlock() + return + } + if state.ConnectivityState == connectivity.Shutdown { + delete(balToUpdate.subconns, sc) + } + gsb.mu.Unlock() + if cb != nil { + cb(state) + } else { + balToUpdate.UpdateSubConnState(sc, state) + } +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + gsb.updateSubConnState(sc, state, nil) +} + +// Close closes any active child balancers. +func (gsb *Balancer) Close() { + gsb.mu.Lock() + gsb.closed = true + currentBalancerToClose := gsb.balancerCurrent + gsb.balancerCurrent = nil + pendingBalancerToClose := gsb.balancerPending + gsb.balancerPending = nil + gsb.mu.Unlock() + + currentBalancerToClose.Close() + pendingBalancerToClose.Close() +} + +// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer +// methods to help cleanup SubConns created by the wrapped balancer. +// +// It implements the balancer.ClientConn interface and is passed down in that +// capacity to the wrapped balancer. It maintains a set of subConns created by +// the wrapped balancer and calls from the latter to create/update/shutdown +// SubConns update this set before being forwarded to the parent ClientConn. +// State updates from the wrapped balancer can result in invocation of the +// graceful switch logic. +type balancerWrapper struct { + balancer.Balancer + gsb *Balancer + builder balancer.Builder + + lastState balancer.State + subconns map[balancer.SubConn]bool // subconns created by this balancer +} + +// Close closes the underlying LB policy and shuts down the subconns it +// created. bw must not be referenced via balancerCurrent or balancerPending in +// gsb when called. gsb.mu must not be held. Does not panic with a nil +// receiver. +func (bw *balancerWrapper) Close() { + // before Close is called. + if bw == nil { + return + } + // There is no need to protect this read with a mutex, as Close() is + // impossible to be called concurrently with the write in SwitchTo(). The + // callsites of Close() for this balancer in Graceful Switch Balancer will + // never be called until SwitchTo() returns. + bw.Balancer.Close() + bw.gsb.mu.Lock() + for sc := range bw.subconns { + sc.Shutdown() + } + bw.gsb.mu.Unlock() +} + +func (bw *balancerWrapper) UpdateState(state balancer.State) { + // Hold the mutex for this entire call to ensure it cannot occur + // concurrently with other updateState() calls. This causes updates to + // lastState and calls to cc.UpdateState to happen atomically. + bw.gsb.mu.Lock() + defer bw.gsb.mu.Unlock() + bw.lastState = state + + if !bw.gsb.balancerCurrentOrPending(bw) { + return + } + + if bw == bw.gsb.balancerCurrent { + // In the case that the current balancer exits READY, and there is a pending + // balancer, you can forward the pending balancer's cached State up to + // ClientConn and swap the pending into the current. This is because there + // is no reason to gracefully switch from and keep using the old policy as + // the ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil { + bw.gsb.swap() + return + } + // Even if there is a pending balancer waiting to be gracefully switched to, + // continue to forward current balancer updates to the Client Conn. Ignoring + // state + picker from the current would cause undefined behavior/cause the + // system to behave incorrectly from the current LB policies perspective. + // Also, the current LB is still being used by grpc to choose SubConns per + // RPC, and thus should use the most updated form of the current balancer. + bw.gsb.cc.UpdateState(state) + return + } + // This method is now dealing with a state update from the pending balancer. + // If the current balancer is currently in a state other than READY, the new + // policy can be swapped into place immediately. This is because there is no + // reason to gracefully switch from and keep using the old policy as the + // ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready { + bw.gsb.swap() + } +} + +func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.gsb.mu.Unlock() + + var sc balancer.SubConn + oldListener := opts.StateListener + opts.StateListener = func(state balancer.SubConnState) { bw.gsb.updateSubConnState(sc, state, oldListener) } + sc, err := bw.gsb.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call + sc.Shutdown() + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.subconns[sc] = true + bw.gsb.mu.Unlock() + return sc, nil +} + +func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { + // Ignore ResolveNow requests from anything other than the most recent + // balancer, because older balancers were already removed from the config. + if bw != bw.gsb.latestBalancer() { + return + } + bw.gsb.cc.ResolveNow(opts) +} + +func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { + // Note: existing third party balancers may call this, so it must remain + // until RemoveSubConn is fully removed. + sc.Shutdown() +} + +func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.UpdateAddresses(sc, addrs) +} + +func (bw *balancerWrapper) Target() string { + return bw.gsb.cc.Target() +} diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go index 3a905d966..94a08d687 100644 --- a/vendor/google.golang.org/grpc/internal/balancerload/load.go +++ b/vendor/google.golang.org/grpc/internal/balancerload/load.go @@ -25,7 +25,7 @@ import ( // Parser converts loads from metadata into a concrete type. type Parser interface { // Parse parses loads from metadata. - Parse(md metadata.MD) interface{} + Parse(md metadata.MD) any } var parser Parser @@ -38,7 +38,7 @@ func SetParser(lr Parser) { } // Parse calls parser.Read(). -func Parse(md metadata.MD) interface{} { +func Parse(md metadata.MD) any { if parser == nil { return nil } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go index fee6aecd0..755fdebc1 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -25,38 +25,51 @@ import ( "os" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpcutil" ) -// Logger is the global binary logger. It can be used to get binary logger for -// each method. +var grpclogLogger = grpclog.Component("binarylog") + +// Logger specifies MethodLoggers for method names with a Log call that +// takes a context. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. type Logger interface { - getMethodLogger(methodName string) *MethodLogger + GetMethodLogger(methodName string) MethodLogger } // binLogger is the global binary logger for the binary. One of this should be -// built at init time from the configuration (environment varialbe or flags). +// built at init time from the configuration (environment variable or flags). // -// It is used to get a methodLogger for each individual method. +// It is used to get a MethodLogger for each individual method. var binLogger Logger -// SetLogger sets the binarg logger. +// SetLogger sets the binary logger. // // Only call this at init time. func SetLogger(l Logger) { binLogger = l } -// GetMethodLogger returns the methodLogger for the given methodName. +// GetLogger gets the binary logger. +// +// Only call this at init time. +func GetLogger() Logger { + return binLogger +} + +// GetMethodLogger returns the MethodLogger for the given methodName. // // methodName should be in the format of "/service/method". // -// Each methodLogger returned by this method is a new instance. This is to +// Each MethodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func GetMethodLogger(methodName string) *MethodLogger { +func GetMethodLogger(methodName string) MethodLogger { if binLogger == nil { return nil } - return binLogger.getMethodLogger(methodName) + return binLogger.GetMethodLogger(methodName) } func init() { @@ -65,17 +78,29 @@ func init() { binLogger = NewLoggerFromConfigString(configStr) } -type methodLoggerConfig struct { +// MethodLoggerConfig contains the setting for logging behavior of a method +// logger. Currently, it contains the max length of header and message. +type MethodLoggerConfig struct { // Max length of header and message. - hdr, msg uint64 + Header, Message uint64 +} + +// LoggerConfig contains the config for loggers to create method loggers. +type LoggerConfig struct { + All *MethodLoggerConfig + Services map[string]*MethodLoggerConfig + Methods map[string]*MethodLoggerConfig + + Blacklist map[string]struct{} } type logger struct { - all *methodLoggerConfig - services map[string]*methodLoggerConfig - methods map[string]*methodLoggerConfig + config LoggerConfig +} - blacklist map[string]struct{} +// NewLoggerFromConfig builds a logger with the given LoggerConfig. +func NewLoggerFromConfig(config LoggerConfig) Logger { + return &logger{config: config} } // newEmptyLogger creates an empty logger. The map fields need to be filled in @@ -85,83 +110,83 @@ func newEmptyLogger() *logger { } // Set method logger for "*". -func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error { - if l.all != nil { +func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error { + if l.config.All != nil { return fmt.Errorf("conflicting global rules found") } - l.all = ml + l.config.All = ml return nil } // Set method logger for "service/*". // -// New methodLogger with same service overrides the old one. -func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error { - if _, ok := l.services[service]; ok { - return fmt.Errorf("conflicting rules for service %v found", service) +// New MethodLogger with same service overrides the old one. +func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Services[service]; ok { + return fmt.Errorf("conflicting service rules for service %v found", service) } - if l.services == nil { - l.services = make(map[string]*methodLoggerConfig) + if l.config.Services == nil { + l.config.Services = make(map[string]*MethodLoggerConfig) } - l.services[service] = ml + l.config.Services[service] = ml return nil } // Set method logger for "service/method". // -// New methodLogger with same method overrides the old one. -func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error { - if _, ok := l.blacklist[method]; ok { - return fmt.Errorf("conflicting rules for method %v found", method) +// New MethodLogger with same method overrides the old one. +func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Blacklist[method]; ok { + return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { - return fmt.Errorf("conflicting rules for method %v found", method) + if _, ok := l.config.Methods[method]; ok { + return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.methods == nil { - l.methods = make(map[string]*methodLoggerConfig) + if l.config.Methods == nil { + l.config.Methods = make(map[string]*MethodLoggerConfig) } - l.methods[method] = ml + l.config.Methods[method] = ml return nil } // Set blacklist method for "-service/method". func (l *logger) setBlacklist(method string) error { - if _, ok := l.blacklist[method]; ok { - return fmt.Errorf("conflicting rules for method %v found", method) + if _, ok := l.config.Blacklist[method]; ok { + return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { - return fmt.Errorf("conflicting rules for method %v found", method) + if _, ok := l.config.Methods[method]; ok { + return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.blacklist == nil { - l.blacklist = make(map[string]struct{}) + if l.config.Blacklist == nil { + l.config.Blacklist = make(map[string]struct{}) } - l.blacklist[method] = struct{}{} + l.config.Blacklist[method] = struct{}{} return nil } -// getMethodLogger returns the methodLogger for the given methodName. +// getMethodLogger returns the MethodLogger for the given methodName. // // methodName should be in the format of "/service/method". // -// Each methodLogger returned by this method is a new instance. This is to +// Each MethodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func (l *logger) getMethodLogger(methodName string) *MethodLogger { - s, m, err := parseMethodName(methodName) +func (l *logger) GetMethodLogger(methodName string) MethodLogger { + s, m, err := grpcutil.ParseMethod(methodName) if err != nil { - grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err) + grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) return nil } - if ml, ok := l.methods[s+"/"+m]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Methods[s+"/"+m]; ok { + return NewTruncatingMethodLogger(ml.Header, ml.Message) } - if _, ok := l.blacklist[s+"/"+m]; ok { + if _, ok := l.config.Blacklist[s+"/"+m]; ok { return nil } - if ml, ok := l.services[s]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Services[s]; ok { + return NewTruncatingMethodLogger(ml.Header, ml.Message) } - if l.all == nil { + if l.config.All == nil { return nil } - return newMethodLogger(l.all.hdr, l.all.msg) + return NewTruncatingMethodLogger(l.config.All.Header, l.config.All.Message) } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go index 4cc2525df..f9e80e27a 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -24,26 +24,24 @@ import ( "regexp" "strconv" "strings" - - "google.golang.org/grpc/grpclog" ) // NewLoggerFromConfigString reads the string and build a logger. It can be used // to build a new logger and assign it to binarylog.Logger. // // Example filter config strings: -// - "" Nothing will be logged -// - "*" All headers and messages will be fully logged. -// - "*{h}" Only headers will be logged. -// - "*{m:256}" Only the first 256 bytes of each message will be logged. -// - "Foo/*" Logs every method in service Foo -// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar -// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method -// /Foo/Bar, logs all headers and messages in every other method in service -// Foo. +// - "" Nothing will be logged +// - "*" All headers and messages will be fully logged. +// - "*{h}" Only headers will be logged. +// - "*{m:256}" Only the first 256 bytes of each message will be logged. +// - "Foo/*" Logs every method in service Foo +// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar +// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method +// /Foo/Bar, logs all headers and messages in every other method in service +// Foo. // // If two configs exist for one certain method or service, the one specified -// later overrides the privous config. +// later overrides the previous config. func NewLoggerFromConfigString(s string) Logger { if s == "" { return nil @@ -52,14 +50,14 @@ func NewLoggerFromConfigString(s string) Logger { methods := strings.Split(s, ",") for _, method := range methods { if err := l.fillMethodLoggerWithConfigString(method); err != nil { - grpclog.Warningf("failed to parse binary log config: %v", err) + grpclogLogger.Warningf("failed to parse binary log config: %v", err) return nil } } return l } -// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds +// fillMethodLoggerWithConfigString parses config, creates TruncatingMethodLogger and adds // it to the right map in the logger. func (l *logger) fillMethodLoggerWithConfigString(config string) error { // "" is invalid. @@ -74,7 +72,7 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { return fmt.Errorf("invalid config: %q, %v", config, err) } if m == "*" { - return fmt.Errorf("invalid config: %q, %v", config, "* not allowd in blacklist config") + return fmt.Errorf("invalid config: %q, %v", config, "* not allowed in blacklist config") } if suffix != "" { return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config") @@ -91,7 +89,7 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { if err != nil { return fmt.Errorf("invalid config: %q, %v", config, err) } - if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } return nil @@ -106,11 +104,11 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) } if m == "*" { - if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } else { - if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 160f6e861..966932891 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -19,17 +19,18 @@ package binarylog import ( + "context" "net" "strings" "sync/atomic" "time" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" - pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" - "google.golang.org/grpc/grpclog" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" ) type callIDGenerator struct { @@ -49,48 +50,67 @@ func (g *callIDGenerator) reset() { var idGen callIDGenerator // MethodLogger is the sub-logger for each method. -type MethodLogger struct { +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. +type MethodLogger interface { + Log(context.Context, LogEntryConfig) +} + +// TruncatingMethodLogger is a method logger that truncates headers and messages +// based on configured fields. +type TruncatingMethodLogger struct { headerMaxLen, messageMaxLen uint64 callID uint64 idWithinCallGen *callIDGenerator - sink Sink // TODO(blog): make this plugable. + sink Sink // TODO(blog): make this pluggable. } -func newMethodLogger(h, m uint64) *MethodLogger { - return &MethodLogger{ +// NewTruncatingMethodLogger returns a new truncating method logger. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. +func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { + return &TruncatingMethodLogger{ headerMaxLen: h, messageMaxLen: m, callID: idGen.next(), idWithinCallGen: &callIDGenerator{}, - sink: defaultSink, // TODO(blog): make it plugable. + sink: DefaultSink, // TODO(blog): make it pluggable. } } -// Log creates a proto binary log entry, and logs it to the sink. -func (ml *MethodLogger) Log(c LogEntryConfig) { +// Build is an internal only method for building the proto message out of the +// input event. It's made public to enable other library to reuse as much logic +// in TruncatingMethodLogger as possible. +func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry { m := c.toProto() - timestamp, _ := ptypes.TimestampProto(time.Now()) + timestamp := timestamppb.Now() m.Timestamp = timestamp m.CallId = ml.callID m.SequenceIdWithinCall = ml.idWithinCallGen.next() switch pay := m.Payload.(type) { - case *pb.GrpcLogEntry_ClientHeader: + case *binlogpb.GrpcLogEntry_ClientHeader: m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata()) - case *pb.GrpcLogEntry_ServerHeader: + case *binlogpb.GrpcLogEntry_ServerHeader: m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata()) - case *pb.GrpcLogEntry_Message: + case *binlogpb.GrpcLogEntry_Message: m.PayloadTruncated = ml.truncateMessage(pay.Message) } + return m +} - ml.sink.Write(m) +// Log creates a proto binary log entry, and logs it to the sink. +func (ml *TruncatingMethodLogger) Log(_ context.Context, c LogEntryConfig) { + ml.sink.Write(ml.Build(c)) } -func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) { if ml.headerMaxLen == maxUInt { return false } @@ -109,7 +129,7 @@ func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { // but not counted towards the size limit. continue } - currentEntryLen := uint64(len(entry.Value)) + currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue())) if currentEntryLen > bytesLimit { break } @@ -120,7 +140,7 @@ func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { return truncated } -func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) { if ml.messageMaxLen == maxUInt { return false } @@ -132,8 +152,11 @@ func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { } // LogEntryConfig represents the configuration for binary log entry. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. type LogEntryConfig interface { - toProto() *pb.GrpcLogEntry + toProto() *binlogpb.GrpcLogEntry } // ClientHeader configs the binary log entry to be a ClientHeader entry. @@ -147,27 +170,27 @@ type ClientHeader struct { PeerAddr net.Addr } -func (c *ClientHeader) toProto() *pb.GrpcLogEntry { +func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry { // This function doesn't need to set all the fields (e.g. seq ID). The Log // function will set the fields when necessary. - clientHeader := &pb.ClientHeader{ + clientHeader := &binlogpb.ClientHeader{ Metadata: mdToMetadataProto(c.Header), MethodName: c.MethodName, Authority: c.Authority, } if c.Timeout > 0 { - clientHeader.Timeout = ptypes.DurationProto(c.Timeout) + clientHeader.Timeout = durationpb.New(c.Timeout) } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, - Payload: &pb.GrpcLogEntry_ClientHeader{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Payload: &binlogpb.GrpcLogEntry_ClientHeader{ ClientHeader: clientHeader, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -183,19 +206,19 @@ type ServerHeader struct { PeerAddr net.Addr } -func (c *ServerHeader) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, - Payload: &pb.GrpcLogEntry_ServerHeader{ - ServerHeader: &pb.ServerHeader{ +func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, + Payload: &binlogpb.GrpcLogEntry_ServerHeader{ + ServerHeader: &binlogpb.ServerHeader{ Metadata: mdToMetadataProto(c.Header), }, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -208,10 +231,10 @@ type ClientMessage struct { OnClientSide bool // Message can be a proto.Message or []byte. Other messages formats are not // supported. - Message interface{} + Message any } -func (c *ClientMessage) toProto() *pb.GrpcLogEntry { +func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry { var ( data []byte err error @@ -219,26 +242,26 @@ func (c *ClientMessage) toProto() *pb.GrpcLogEntry { if m, ok := c.Message.(proto.Message); ok { data, err = proto.Marshal(m) if err != nil { - grpclog.Infof("binarylogging: failed to marshal proto message: %v", err) + grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err) } } else if b, ok := c.Message.([]byte); ok { data = b } else { - grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte") + grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ Length: uint32(len(data)), Data: data, }, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -248,10 +271,10 @@ type ServerMessage struct { OnClientSide bool // Message can be a proto.Message or []byte. Other messages formats are not // supported. - Message interface{} + Message any } -func (c *ServerMessage) toProto() *pb.GrpcLogEntry { +func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry { var ( data []byte err error @@ -259,26 +282,26 @@ func (c *ServerMessage) toProto() *pb.GrpcLogEntry { if m, ok := c.Message.(proto.Message); ok { data, err = proto.Marshal(m) if err != nil { - grpclog.Infof("binarylogging: failed to marshal proto message: %v", err) + grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err) } } else if b, ok := c.Message.([]byte); ok { data = b } else { - grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte") + grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ Length: uint32(len(data)), Data: data, }, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -288,15 +311,15 @@ type ClientHalfClose struct { OnClientSide bool } -func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, +func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, Payload: nil, // No payload here. } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -312,10 +335,10 @@ type ServerTrailer struct { PeerAddr net.Addr } -func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { +func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry { st, ok := status.FromError(c.Err) if !ok { - grpclog.Info("binarylogging: error in trailer is not a status error") + grpclogLogger.Info("binarylogging: error in trailer is not a status error") } var ( detailsBytes []byte @@ -325,13 +348,13 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { if stProto != nil && len(stProto.Details) != 0 { detailsBytes, err = proto.Marshal(stProto) if err != nil { - grpclog.Infof("binarylogging: failed to marshal status proto: %v", err) + grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err) } } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, - Payload: &pb.GrpcLogEntry_Trailer{ - Trailer: &pb.Trailer{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, + Payload: &binlogpb.GrpcLogEntry_Trailer{ + Trailer: &binlogpb.Trailer{ Metadata: mdToMetadataProto(c.Trailer), StatusCode: uint32(st.Code()), StatusMessage: st.Message(), @@ -340,9 +363,9 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -355,15 +378,15 @@ type Cancel struct { OnClientSide bool } -func (c *Cancel) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL, +func (c *Cancel) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL, Payload: nil, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -374,21 +397,21 @@ func metadataKeyOmit(key string) bool { switch key { case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te": return true - case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users. + case "grpc-trace-bin": // grpc-trace-bin is special because it's visible to users. return false } return strings.HasPrefix(key, "grpc-") } -func mdToMetadataProto(md metadata.MD) *pb.Metadata { - ret := &pb.Metadata{} +func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata { + ret := &binlogpb.Metadata{} for k, vv := range md { if metadataKeyOmit(k) { continue } for _, v := range vv { ret.Entry = append(ret.Entry, - &pb.MetadataEntry{ + &binlogpb.MetadataEntry{ Key: k, Value: []byte(v), }, @@ -398,26 +421,26 @@ func mdToMetadataProto(md metadata.MD) *pb.Metadata { return ret } -func addrToProto(addr net.Addr) *pb.Address { - ret := &pb.Address{} +func addrToProto(addr net.Addr) *binlogpb.Address { + ret := &binlogpb.Address{} switch a := addr.(type) { case *net.TCPAddr: if a.IP.To4() != nil { - ret.Type = pb.Address_TYPE_IPV4 + ret.Type = binlogpb.Address_TYPE_IPV4 } else if a.IP.To16() != nil { - ret.Type = pb.Address_TYPE_IPV6 + ret.Type = binlogpb.Address_TYPE_IPV6 } else { - ret.Type = pb.Address_TYPE_UNKNOWN + ret.Type = binlogpb.Address_TYPE_UNKNOWN // Do not set address and port fields. break } ret.Address = a.IP.String() ret.IpPort = uint32(a.Port) case *net.UnixAddr: - ret.Type = pb.Address_TYPE_UNIX + ret.Type = binlogpb.Address_TYPE_UNIX ret.Address = a.String() default: - ret.Type = pb.Address_TYPE_UNKNOWN + ret.Type = binlogpb.Address_TYPE_UNKNOWN } return ret } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go index 20d044f0f..9ea598b14 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -21,53 +21,44 @@ package binarylog import ( "bufio" "encoding/binary" - "fmt" "io" - "io/ioutil" "sync" "time" - "github.com/golang/protobuf/proto" - pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" - "google.golang.org/grpc/grpclog" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + "google.golang.org/protobuf/proto" ) var ( - defaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp). + // DefaultSink is the sink where the logs will be written to. It's exported + // for the binarylog package to update. + DefaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp). ) -// SetDefaultSink sets the sink where binary logs will be written to. -// -// Not thread safe. Only set during initialization. -func SetDefaultSink(s Sink) { - if defaultSink != nil { - defaultSink.Close() - } - defaultSink = s -} - // Sink writes log entry into the binary log sink. +// +// sink is a copy of the exported binarylog.Sink, to avoid circular dependency. type Sink interface { // Write will be called to write the log entry into the sink. // // It should be thread-safe so it can be called in parallel. - Write(*pb.GrpcLogEntry) error + Write(*binlogpb.GrpcLogEntry) error // Close will be called when the Sink is replaced by a new Sink. Close() error } type noopSink struct{} -func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil } -func (ns *noopSink) Close() error { return nil } +func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil } +func (ns *noopSink) Close() error { return nil } // newWriterSink creates a binary log sink with the given writer. // -// Write() marshalls the proto message and writes it to the given writer. Each +// Write() marshals the proto message and writes it to the given writer. Each // message is prefixed with a 4 byte big endian unsigned integer as the length. // // No buffer is done, Close() doesn't try to close the writer. -func newWriterSink(w io.Writer) *writerSink { +func newWriterSink(w io.Writer) Sink { return &writerSink{out: w} } @@ -75,10 +66,11 @@ type writerSink struct { out io.Writer } -func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { +func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error { b, err := proto.Marshal(e) if err != nil { - grpclog.Infof("binary logging: failed to marshal proto message: %v", err) + grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err) + return err } hdr := make([]byte, 4) binary.BigEndian.PutUint32(hdr, uint32(len(b))) @@ -93,25 +85,28 @@ func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { func (ws *writerSink) Close() error { return nil } -type bufWriteCloserSink struct { - mu sync.Mutex - closer io.Closer - out *writerSink // out is built on buf. - buf *bufio.Writer // buf is kept for flush. +type bufferedSink struct { + mu sync.Mutex + closer io.Closer + out Sink // out is built on buf. + buf *bufio.Writer // buf is kept for flush. + flusherStarted bool - writeStartOnce sync.Once - writeTicker *time.Ticker + writeTicker *time.Ticker + done chan struct{} } -func (fs *bufWriteCloserSink) Write(e *pb.GrpcLogEntry) error { - // Start the write loop when Write is called. - fs.writeStartOnce.Do(fs.startFlushGoroutine) +func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error { fs.mu.Lock() + defer fs.mu.Unlock() + if !fs.flusherStarted { + // Start the write loop when Write is called. + fs.startFlushGoroutine() + fs.flusherStarted = true + } if err := fs.out.Write(e); err != nil { - fs.mu.Unlock() return err } - fs.mu.Unlock() return nil } @@ -119,44 +114,57 @@ const ( bufFlushDuration = 60 * time.Second ) -func (fs *bufWriteCloserSink) startFlushGoroutine() { +func (fs *bufferedSink) startFlushGoroutine() { fs.writeTicker = time.NewTicker(bufFlushDuration) go func() { - for range fs.writeTicker.C { + for { + select { + case <-fs.done: + return + case <-fs.writeTicker.C: + } fs.mu.Lock() - fs.buf.Flush() + if err := fs.buf.Flush(); err != nil { + grpclogLogger.Warningf("failed to flush to Sink: %v", err) + } fs.mu.Unlock() } }() } -func (fs *bufWriteCloserSink) Close() error { +func (fs *bufferedSink) Close() error { + fs.mu.Lock() + defer fs.mu.Unlock() if fs.writeTicker != nil { fs.writeTicker.Stop() } - fs.mu.Lock() - fs.buf.Flush() - fs.closer.Close() - fs.out.Close() - fs.mu.Unlock() + close(fs.done) + if err := fs.buf.Flush(); err != nil { + grpclogLogger.Warningf("failed to flush to Sink: %v", err) + } + if err := fs.closer.Close(); err != nil { + grpclogLogger.Warningf("failed to close the underlying WriterCloser: %v", err) + } + if err := fs.out.Close(); err != nil { + grpclogLogger.Warningf("failed to close the Sink: %v", err) + } return nil } -func newBufWriteCloserSink(o io.WriteCloser) Sink { +// NewBufferedSink creates a binary log sink with the given WriteCloser. +// +// Write() marshals the proto message and writes it to the given writer. Each +// message is prefixed with a 4 byte big endian unsigned integer as the length. +// +// Content is kept in a buffer, and is flushed every 60 seconds. +// +// Close closes the WriteCloser. +func NewBufferedSink(o io.WriteCloser) Sink { bufW := bufio.NewWriter(o) - return &bufWriteCloserSink{ + return &bufferedSink{ closer: o, out: newWriterSink(bufW), buf: bufW, + done: make(chan struct{}), } } - -// NewTempFileSink creates a temp file and returns a Sink that writes to this -// file. -func NewTempFileSink() (Sink, error) { - tempFile, err := ioutil.TempFile("/tmp", "grpcgo_binarylog_*.txt") - if err != nil { - return nil, fmt.Errorf("failed to create temp file: %v", err) - } - return newBufWriteCloserSink(tempFile), nil -} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/util.go b/vendor/google.golang.org/grpc/internal/binarylog/util.go deleted file mode 100644 index 15dc7803d..000000000 --- a/vendor/google.golang.org/grpc/internal/binarylog/util.go +++ /dev/null @@ -1,41 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package binarylog - -import ( - "errors" - "strings" -) - -// parseMethodName splits service and method from the input. It expects format -// "/service/method". -// -// TODO: move to internal/grpcutil. -func parseMethodName(methodName string) (service, method string, _ error) { - if !strings.HasPrefix(methodName, "/") { - return "", "", errors.New("invalid method name: should start with /") - } - methodName = methodName[1:] - - pos := strings.LastIndex(methodName, "/") - if pos < 0 { - return "", "", errors.New("invalid method name: suffix /method is missing") - } - return methodName[:pos], methodName[pos+1:], nil -} diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go new file mode 100644 index 000000000..11f91668a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -0,0 +1,116 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package buffer provides an implementation of an unbounded buffer. +package buffer + +import ( + "errors" + "sync" +) + +// Unbounded is an implementation of an unbounded buffer which does not use +// extra goroutines. This is typically used for passing updates from one entity +// to another within gRPC. +// +// All methods on this type are thread-safe and don't block on anything except +// the underlying mutex used for synchronization. +// +// Unbounded supports values of any type to be stored in it by using a channel +// of `any`. This means that a call to Put() incurs an extra memory allocation, +// and also that users need a type assertion while reading. For performance +// critical code paths, using Unbounded is strongly discouraged and defining a +// new type specific implementation of this buffer is preferred. See +// internal/transport/transport.go for an example of this. +type Unbounded struct { + c chan any + closed bool + closing bool + mu sync.Mutex + backlog []any +} + +// NewUnbounded returns a new instance of Unbounded. +func NewUnbounded() *Unbounded { + return &Unbounded{c: make(chan any, 1)} +} + +var errBufferClosed = errors.New("Put called on closed buffer.Unbounded") + +// Put adds t to the unbounded buffer. +func (b *Unbounded) Put(t any) error { + b.mu.Lock() + defer b.mu.Unlock() + if b.closing { + return errBufferClosed + } + if len(b.backlog) == 0 { + select { + case b.c <- t: + return nil + default: + } + } + b.backlog = append(b.backlog, t) + return nil +} + +// Load sends the earliest buffered data, if any, onto the read channel returned +// by Get(). Users are expected to call this every time they successfully read a +// value from the read channel. +func (b *Unbounded) Load() { + b.mu.Lock() + defer b.mu.Unlock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = nil + b.backlog = b.backlog[1:] + default: + } + } else if b.closing && !b.closed { + close(b.c) + } +} + +// Get returns a read channel on which values added to the buffer, via Put(), +// are sent on. +// +// Upon reading a value from this channel, users are expected to call Load() to +// send the next buffered value onto the channel if there is any. +// +// If the unbounded buffer is closed, the read channel returned by this method +// is closed after all data is drained. +func (b *Unbounded) Get() <-chan any { + return b.c +} + +// Close closes the unbounded buffer. No subsequent data may be Put(), and the +// channel returned from Get() will be closed after all the data is read and +// Load() is called for the final time. +func (b *Unbounded) Close() { + b.mu.Lock() + defer b.mu.Unlock() + if b.closing { + return + } + b.closing = true + if len(b.backlog) == 0 { + b.closed = true + close(b.c) + } +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/channel.go b/vendor/google.golang.org/grpc/internal/channelz/channel.go new file mode 100644 index 000000000..d7e9e1d54 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/channel.go @@ -0,0 +1,255 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sync/atomic" + + "google.golang.org/grpc/connectivity" +) + +// Channel represents a channel within channelz, which includes metrics and +// internal channelz data, such as channelz id, child list, etc. +type Channel struct { + Entity + // ID is the channelz id of this channel. + ID int64 + // RefName is the human readable reference string of this channel. + RefName string + + closeCalled bool + nestedChans map[int64]string + subChans map[int64]string + Parent *Channel + trace *ChannelTrace + // traceRefCount is the number of trace events that reference this channel. + // Non-zero traceRefCount means the trace of this channel cannot be deleted. + traceRefCount int32 + + ChannelMetrics ChannelMetrics +} + +// Implemented to make Channel implement the Identifier interface used for +// nesting. +func (c *Channel) channelzIdentifier() {} + +func (c *Channel) String() string { + if c.Parent == nil { + return fmt.Sprintf("Channel #%d", c.ID) + } + return fmt.Sprintf("%s Channel #%d", c.Parent, c.ID) +} + +func (c *Channel) id() int64 { + return c.ID +} + +func (c *Channel) SubChans() map[int64]string { + db.mu.RLock() + defer db.mu.RUnlock() + return copyMap(c.subChans) +} + +func (c *Channel) NestedChans() map[int64]string { + db.mu.RLock() + defer db.mu.RUnlock() + return copyMap(c.nestedChans) +} + +func (c *Channel) Trace() *ChannelTrace { + db.mu.RLock() + defer db.mu.RUnlock() + return c.trace.copy() +} + +type ChannelMetrics struct { + // The current connectivity state of the channel. + State atomic.Pointer[connectivity.State] + // The target this channel originally tried to connect to. May be absent + Target atomic.Pointer[string] + // The number of calls started on the channel. + CallsStarted atomic.Int64 + // The number of calls that have completed with an OK status. + CallsSucceeded atomic.Int64 + // The number of calls that have a completed with a non-OK status. + CallsFailed atomic.Int64 + // The last time a call was started on the channel. + LastCallStartedTimestamp atomic.Int64 +} + +// CopyFrom copies the metrics in o to c. For testing only. +func (c *ChannelMetrics) CopyFrom(o *ChannelMetrics) { + c.State.Store(o.State.Load()) + c.Target.Store(o.Target.Load()) + c.CallsStarted.Store(o.CallsStarted.Load()) + c.CallsSucceeded.Store(o.CallsSucceeded.Load()) + c.CallsFailed.Store(o.CallsFailed.Load()) + c.LastCallStartedTimestamp.Store(o.LastCallStartedTimestamp.Load()) +} + +// Equal returns true iff the metrics of c are the same as the metrics of o. +// For testing only. +func (c *ChannelMetrics) Equal(o any) bool { + oc, ok := o.(*ChannelMetrics) + if !ok { + return false + } + if (c.State.Load() == nil) != (oc.State.Load() == nil) { + return false + } + if c.State.Load() != nil && *c.State.Load() != *oc.State.Load() { + return false + } + if (c.Target.Load() == nil) != (oc.Target.Load() == nil) { + return false + } + if c.Target.Load() != nil && *c.Target.Load() != *oc.Target.Load() { + return false + } + return c.CallsStarted.Load() == oc.CallsStarted.Load() && + c.CallsFailed.Load() == oc.CallsFailed.Load() && + c.CallsSucceeded.Load() == oc.CallsSucceeded.Load() && + c.LastCallStartedTimestamp.Load() == oc.LastCallStartedTimestamp.Load() +} + +func strFromPointer(s *string) string { + if s == nil { + return "" + } + return *s +} + +func (c *ChannelMetrics) String() string { + return fmt.Sprintf("State: %v, Target: %s, CallsStarted: %v, CallsSucceeded: %v, CallsFailed: %v, LastCallStartedTimestamp: %v", + c.State.Load(), strFromPointer(c.Target.Load()), c.CallsStarted.Load(), c.CallsSucceeded.Load(), c.CallsFailed.Load(), c.LastCallStartedTimestamp.Load(), + ) +} + +func NewChannelMetricForTesting(state connectivity.State, target string, started, succeeded, failed, timestamp int64) *ChannelMetrics { + c := &ChannelMetrics{} + c.State.Store(&state) + c.Target.Store(&target) + c.CallsStarted.Store(started) + c.CallsSucceeded.Store(succeeded) + c.CallsFailed.Store(failed) + c.LastCallStartedTimestamp.Store(timestamp) + return c +} + +func (c *Channel) addChild(id int64, e entry) { + switch v := e.(type) { + case *SubChannel: + c.subChans[id] = v.RefName + case *Channel: + c.nestedChans[id] = v.RefName + default: + logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) + } +} + +func (c *Channel) deleteChild(id int64) { + delete(c.subChans, id) + delete(c.nestedChans, id) + c.deleteSelfIfReady() +} + +func (c *Channel) triggerDelete() { + c.closeCalled = true + c.deleteSelfIfReady() +} + +func (c *Channel) getParentID() int64 { + if c.Parent == nil { + return -1 + } + return c.Parent.ID +} + +// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means +// deleting the channel reference from its parent's child list. +// +// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the +// corresponding grpc object has been invoked, and the channel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (c *Channel) deleteSelfFromTree() (deleted bool) { + if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { + return false + } + // not top channel + if c.Parent != nil { + c.Parent.deleteChild(c.ID) + } + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means +// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the +// channel, and its memory will be garbage collected. +// +// The trace reference count of the channel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (c *Channel) deleteSelfFromMap() (delete bool) { + return c.getTraceRefCount() == 0 +} + +// deleteSelfIfReady tries to delete the channel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +// parent's child list. +// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +// will return entry not found error. +func (c *Channel) deleteSelfIfReady() { + if !c.deleteSelfFromTree() { + return + } + if !c.deleteSelfFromMap() { + return + } + db.deleteEntry(c.ID) + c.trace.clear() +} + +func (c *Channel) getChannelTrace() *ChannelTrace { + return c.trace +} + +func (c *Channel) incrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, 1) +} + +func (c *Channel) decrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, -1) +} + +func (c *Channel) getTraceRefCount() int { + i := atomic.LoadInt32(&c.traceRefCount) + return int(i) +} + +func (c *Channel) getRefName() string { + return c.RefName +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go new file mode 100644 index 000000000..64c791953 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go @@ -0,0 +1,395 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sort" + "sync" + "time" +) + +// entry represents a node in the channelz database. +type entry interface { + // addChild adds a child e, whose channelz id is id to child list + addChild(id int64, e entry) + // deleteChild deletes a child with channelz id to be id from child list + deleteChild(id int64) + // triggerDelete tries to delete self from channelz database. However, if + // child list is not empty, then deletion from the database is on hold until + // the last child is deleted from database. + triggerDelete() + // deleteSelfIfReady check whether triggerDelete() has been called before, + // and whether child list is now empty. If both conditions are met, then + // delete self from database. + deleteSelfIfReady() + // getParentID returns parent ID of the entry. 0 value parent ID means no parent. + getParentID() int64 + Entity +} + +// channelMap is the storage data structure for channelz. +// +// Methods of channelMap can be divided into two categories with respect to +// locking. +// +// 1. Methods acquire the global lock. +// 2. Methods that can only be called when global lock is held. +// +// A second type of method need always to be called inside a first type of method. +type channelMap struct { + mu sync.RWMutex + topLevelChannels map[int64]struct{} + channels map[int64]*Channel + subChannels map[int64]*SubChannel + sockets map[int64]*Socket + servers map[int64]*Server +} + +func newChannelMap() *channelMap { + return &channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*Channel), + subChannels: make(map[int64]*SubChannel), + sockets: make(map[int64]*Socket), + servers: make(map[int64]*Server), + } +} + +func (c *channelMap) addServer(id int64, s *Server) { + c.mu.Lock() + defer c.mu.Unlock() + s.cm = c + c.servers[id] = s +} + +func (c *channelMap) addChannel(id int64, cn *Channel, isTopChannel bool, pid int64) { + c.mu.Lock() + defer c.mu.Unlock() + cn.trace.cm = c + c.channels[id] = cn + if isTopChannel { + c.topLevelChannels[id] = struct{}{} + } else if p := c.channels[pid]; p != nil { + p.addChild(id, cn) + } else { + logger.Infof("channel %d references invalid parent ID %d", id, pid) + } +} + +func (c *channelMap) addSubChannel(id int64, sc *SubChannel, pid int64) { + c.mu.Lock() + defer c.mu.Unlock() + sc.trace.cm = c + c.subChannels[id] = sc + if p := c.channels[pid]; p != nil { + p.addChild(id, sc) + } else { + logger.Infof("subchannel %d references invalid parent ID %d", id, pid) + } +} + +func (c *channelMap) addSocket(s *Socket) { + c.mu.Lock() + defer c.mu.Unlock() + s.cm = c + c.sockets[s.ID] = s + if s.Parent == nil { + logger.Infof("normal socket %d has no parent", s.ID) + } + s.Parent.(entry).addChild(s.ID, s) +} + +// removeEntry triggers the removal of an entry, which may not indeed delete the +// entry, if it has to wait on the deletion of its children and until no other +// entity's channel trace references it. It may lead to a chain of entry +// deletion. For example, deleting the last socket of a gracefully shutting down +// server will lead to the server being also deleted. +func (c *channelMap) removeEntry(id int64) { + c.mu.Lock() + defer c.mu.Unlock() + c.findEntry(id).triggerDelete() +} + +// tracedChannel represents tracing operations which are present on both +// channels and subChannels. +type tracedChannel interface { + getChannelTrace() *ChannelTrace + incrTraceRefCount() + decrTraceRefCount() + getRefName() string +} + +// c.mu must be held by the caller +func (c *channelMap) decrTraceRefCount(id int64) { + e := c.findEntry(id) + if v, ok := e.(tracedChannel); ok { + v.decrTraceRefCount() + e.deleteSelfIfReady() + } +} + +// c.mu must be held by the caller. +func (c *channelMap) findEntry(id int64) entry { + if v, ok := c.channels[id]; ok { + return v + } + if v, ok := c.subChannels[id]; ok { + return v + } + if v, ok := c.servers[id]; ok { + return v + } + if v, ok := c.sockets[id]; ok { + return v + } + return &dummyEntry{idNotFound: id} +} + +// c.mu must be held by the caller +// +// deleteEntry deletes an entry from the channelMap. Before calling this method, +// caller must check this entry is ready to be deleted, i.e removeEntry() has +// been called on it, and no children still exist. +func (c *channelMap) deleteEntry(id int64) entry { + if v, ok := c.sockets[id]; ok { + delete(c.sockets, id) + return v + } + if v, ok := c.subChannels[id]; ok { + delete(c.subChannels, id) + return v + } + if v, ok := c.channels[id]; ok { + delete(c.channels, id) + delete(c.topLevelChannels, id) + return v + } + if v, ok := c.servers[id]; ok { + delete(c.servers, id) + return v + } + return &dummyEntry{idNotFound: id} +} + +func (c *channelMap) traceEvent(id int64, desc *TraceEvent) { + c.mu.Lock() + defer c.mu.Unlock() + child := c.findEntry(id) + childTC, ok := child.(tracedChannel) + if !ok { + return + } + childTC.getChannelTrace().append(&traceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()}) + if desc.Parent != nil { + parent := c.findEntry(child.getParentID()) + var chanType RefChannelType + switch child.(type) { + case *Channel: + chanType = RefChannel + case *SubChannel: + chanType = RefSubChannel + } + if parentTC, ok := parent.(tracedChannel); ok { + parentTC.getChannelTrace().append(&traceEvent{ + Desc: desc.Parent.Desc, + Severity: desc.Parent.Severity, + Timestamp: time.Now(), + RefID: id, + RefName: childTC.getRefName(), + RefType: chanType, + }) + childTC.incrTraceRefCount() + } + } +} + +type int64Slice []int64 + +func (s int64Slice) Len() int { return len(s) } +func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } + +func copyMap(m map[int64]string) map[int64]string { + n := make(map[int64]string) + for k, v := range m { + n[k] = v + } + return n +} + +func (c *channelMap) getTopChannels(id int64, maxResults int) ([]*Channel, bool) { + if maxResults <= 0 { + maxResults = EntriesPerPage + } + c.mu.RLock() + defer c.mu.RUnlock() + l := int64(len(c.topLevelChannels)) + ids := make([]int64, 0, l) + + for k := range c.topLevelChannels { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + end := true + var t []*Channel + for _, v := range ids[idx:] { + if len(t) == maxResults { + end = false + break + } + if cn, ok := c.channels[v]; ok { + t = append(t, cn) + } + } + return t, end +} + +func (c *channelMap) getServers(id int64, maxResults int) ([]*Server, bool) { + if maxResults <= 0 { + maxResults = EntriesPerPage + } + c.mu.RLock() + defer c.mu.RUnlock() + ids := make([]int64, 0, len(c.servers)) + for k := range c.servers { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + end := true + var s []*Server + for _, v := range ids[idx:] { + if len(s) == maxResults { + end = false + break + } + if svr, ok := c.servers[v]; ok { + s = append(s, svr) + } + } + return s, end +} + +func (c *channelMap) getServerSockets(id int64, startID int64, maxResults int) ([]*Socket, bool) { + if maxResults <= 0 { + maxResults = EntriesPerPage + } + c.mu.RLock() + defer c.mu.RUnlock() + svr, ok := c.servers[id] + if !ok { + // server with id doesn't exist. + return nil, true + } + svrskts := svr.sockets + ids := make([]int64, 0, len(svrskts)) + sks := make([]*Socket, 0, min(len(svrskts), maxResults)) + for k := range svrskts { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID }) + end := true + for _, v := range ids[idx:] { + if len(sks) == maxResults { + end = false + break + } + if ns, ok := c.sockets[v]; ok { + sks = append(sks, ns) + } + } + return sks, end +} + +func (c *channelMap) getChannel(id int64) *Channel { + c.mu.RLock() + defer c.mu.RUnlock() + return c.channels[id] +} + +func (c *channelMap) getSubChannel(id int64) *SubChannel { + c.mu.RLock() + defer c.mu.RUnlock() + return c.subChannels[id] +} + +func (c *channelMap) getSocket(id int64) *Socket { + c.mu.RLock() + defer c.mu.RUnlock() + return c.sockets[id] +} + +func (c *channelMap) getServer(id int64) *Server { + c.mu.RLock() + defer c.mu.RUnlock() + return c.servers[id] +} + +type dummyEntry struct { + // dummyEntry is a fake entry to handle entry not found case. + idNotFound int64 + Entity +} + +func (d *dummyEntry) String() string { + return fmt.Sprintf("non-existent entity #%d", d.idNotFound) +} + +func (d *dummyEntry) ID() int64 { return d.idNotFound } + +func (d *dummyEntry) addChild(id int64, e entry) { + // Note: It is possible for a normal program to reach here under race + // condition. For example, there could be a race between ClientConn.Close() + // info being propagated to addrConn and http2Client. ClientConn.Close() + // cancel the context and result in http2Client to error. The error info is + // then caught by transport monitor and before addrConn.tearDown() is called + // in side ClientConn.Close(). Therefore, the addrConn will create a new + // transport. And when registering the new transport in channelz, its parent + // addrConn could have already been torn down and deleted from channelz + // tracking, and thus reach the code here. + logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) +} + +func (d *dummyEntry) deleteChild(id int64) { + // It is possible for a normal program to reach here under race condition. + // Refer to the example described in addChild(). + logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) +} + +func (d *dummyEntry) triggerDelete() { + logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) +} + +func (*dummyEntry) deleteSelfIfReady() { + // code should not reach here. deleteSelfIfReady is always called on an existing entry. +} + +func (*dummyEntry) getParentID() int64 { + return 0 +} + +// Entity is implemented by all channelz types. +type Entity interface { + isEntity() + fmt.Stringer + id() int64 +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index f0744f993..078bb8123 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -16,135 +16,54 @@ * */ -// Package channelz defines APIs for enabling channelz service, entry +// Package channelz defines internal APIs for enabling channelz service, entry // registration/deletion, and accessing channelz data. It also defines channelz // metric struct formats. -// -// All APIs in this package are experimental. package channelz import ( - "fmt" - "sort" - "sync" "sync/atomic" "time" - "google.golang.org/grpc/grpclog" -) - -const ( - defaultMaxTraceEntry int32 = 30 + "google.golang.org/grpc/internal" ) var ( - db dbWrapper - idGen idGenerator - // EntryPerPage defines the number of channelz entries to be shown on a web page. - EntryPerPage = int64(50) - curState int32 - maxTraceEntry = defaultMaxTraceEntry + // IDGen is the global channelz entity ID generator. It should not be used + // outside this package except by tests. + IDGen IDGenerator + + db = newChannelMap() + // EntriesPerPage defines the number of channelz entries to be shown on a web page. + EntriesPerPage = 50 + curState int32 ) // TurnOn turns on channelz data collection. func TurnOn() { - if !IsOn() { - NewChannelzStorage() - atomic.StoreInt32(&curState, 1) + atomic.StoreInt32(&curState, 1) +} + +func init() { + internal.ChannelzTurnOffForTesting = func() { + atomic.StoreInt32(&curState, 0) } } // IsOn returns whether channelz data collection is on. func IsOn() bool { - return atomic.CompareAndSwapInt32(&curState, 1, 1) -} - -// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). -// Setting it to 0 will disable channel tracing. -func SetMaxTraceEntry(i int32) { - atomic.StoreInt32(&maxTraceEntry, i) -} - -// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default. -func ResetMaxTraceEntryToDefault() { - atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry) -} - -func getMaxTraceEntry() int { - i := atomic.LoadInt32(&maxTraceEntry) - return int(i) -} - -// dbWarpper wraps around a reference to internal channelz data storage, and -// provide synchronized functionality to set and get the reference. -type dbWrapper struct { - mu sync.RWMutex - DB *channelMap -} - -func (d *dbWrapper) set(db *channelMap) { - d.mu.Lock() - d.DB = db - d.mu.Unlock() -} - -func (d *dbWrapper) get() *channelMap { - d.mu.RLock() - defer d.mu.RUnlock() - return d.DB -} - -// NewChannelzStorage initializes channelz data storage and id generator. -// -// This function returns a cleanup function to wait for all channelz state to be reset by the -// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests -// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen -// to remove some entity just register by the new test, since the id space is the same. -// -// Note: This function is exported for testing purpose only. User should not call -// it in most cases. -func NewChannelzStorage() (cleanup func() error) { - db.set(&channelMap{ - topLevelChannels: make(map[int64]struct{}), - channels: make(map[int64]*channel), - listenSockets: make(map[int64]*listenSocket), - normalSockets: make(map[int64]*normalSocket), - servers: make(map[int64]*server), - subChannels: make(map[int64]*subChannel), - }) - idGen.reset() - return func() error { - var err error - cm := db.get() - if cm == nil { - return nil - } - for i := 0; i < 1000; i++ { - cm.mu.Lock() - if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 { - cm.mu.Unlock() - // all things stored in the channelz map have been cleared. - return nil - } - cm.mu.Unlock() - time.Sleep(10 * time.Millisecond) - } - - cm.mu.Lock() - err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)) - cm.mu.Unlock() - return err - } + return atomic.LoadInt32(&curState) == 1 } // GetTopChannels returns a slice of top channel's ChannelMetric, along with a // boolean indicating whether there's more top channels to be queried for. // -// The arg id specifies that only top channel with id at or above it will be included -// in the result. The returned slice is up to a length of the arg maxResults or -// EntryPerPage if maxResults is zero, and is sorted in ascending id order. -func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { - return db.get().GetTopChannels(id, maxResults) +// The arg id specifies that only top channel with id at or above it will be +// included in the result. The returned slice is up to a length of the arg +// maxResults or EntriesPerPage if maxResults is zero, and is sorted in ascending +// id order. +func GetTopChannels(id int64, maxResults int) ([]*Channel, bool) { + return db.getTopChannels(id, maxResults) } // GetServers returns a slice of server's ServerMetric, along with a @@ -152,576 +71,160 @@ func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { // // The arg id specifies that only server with id at or above it will be included // in the result. The returned slice is up to a length of the arg maxResults or -// EntryPerPage if maxResults is zero, and is sorted in ascending id order. -func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) { - return db.get().GetServers(id, maxResults) +// EntriesPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServers(id int64, maxResults int) ([]*Server, bool) { + return db.getServers(id, maxResults) } // GetServerSockets returns a slice of server's (identified by id) normal socket's -// SocketMetric, along with a boolean indicating whether there's more sockets to +// SocketMetrics, along with a boolean indicating whether there's more sockets to // be queried for. // // The arg startID specifies that only sockets with id at or above it will be // included in the result. The returned slice is up to a length of the arg maxResults -// or EntryPerPage if maxResults is zero, and is sorted in ascending id order. -func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { - return db.get().GetServerSockets(id, startID, maxResults) +// or EntriesPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServerSockets(id int64, startID int64, maxResults int) ([]*Socket, bool) { + return db.getServerSockets(id, startID, maxResults) } -// GetChannel returns the ChannelMetric for the channel (identified by id). -func GetChannel(id int64) *ChannelMetric { - return db.get().GetChannel(id) +// GetChannel returns the Channel for the channel (identified by id). +func GetChannel(id int64) *Channel { + return db.getChannel(id) } -// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id). -func GetSubChannel(id int64) *SubChannelMetric { - return db.get().GetSubChannel(id) +// GetSubChannel returns the SubChannel for the subchannel (identified by id). +func GetSubChannel(id int64) *SubChannel { + return db.getSubChannel(id) } -// GetSocket returns the SocketInternalMetric for the socket (identified by id). -func GetSocket(id int64) *SocketMetric { - return db.get().GetSocket(id) +// GetSocket returns the Socket for the socket (identified by id). +func GetSocket(id int64) *Socket { + return db.getSocket(id) } // GetServer returns the ServerMetric for the server (identified by id). -func GetServer(id int64) *ServerMetric { - return db.get().GetServer(id) -} - -// RegisterChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). pid = 0 means no parent. It returns the unique channelz tracking id -// assigned to this channel. -func RegisterChannel(c Channel, pid int64, ref string) int64 { - id := idGen.genID() - cn := &channel{ - refName: ref, - c: c, - subChans: make(map[int64]string), - nestedChans: make(map[int64]string), - id: id, - pid: pid, - trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, - } - if pid == 0 { - db.get().addChannel(id, cn, true, pid, ref) - } else { - db.get().addChannel(id, cn, false, pid, ref) - } - return id +func GetServer(id int64) *Server { + return db.getServer(id) } -// RegisterSubChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). It returns the unique channelz tracking id assigned to this subchannel. -func RegisterSubChannel(c Channel, pid int64, ref string) int64 { - if pid == 0 { - grpclog.Error("a SubChannel's parent id cannot be 0") - return 0 - } - id := idGen.genID() - sc := &subChannel{ - refName: ref, - c: c, - sockets: make(map[int64]string), - id: id, - pid: pid, - trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, - } - db.get().addSubChannel(id, sc, pid, ref) - return id -} +// RegisterChannel registers the given channel c in the channelz database with +// target as its target and reference name, and adds it to the child list of its +// parent. parent == nil means no parent. +// +// Returns a unique channelz identifier assigned to this channel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterChannel(parent *Channel, target string) *Channel { + id := IDGen.genID() -// RegisterServer registers the given server s in channelz database. It returns -// the unique channelz tracking id assigned to this server. -func RegisterServer(s Server, ref string) int64 { - id := idGen.genID() - svr := &server{ - refName: ref, - s: s, - sockets: make(map[int64]string), - listenSockets: make(map[int64]string), - id: id, + if !IsOn() { + return &Channel{ID: id} } - db.get().addServer(id, svr) - return id -} -// RegisterListenSocket registers the given listen socket s in channelz database -// with ref as its reference name, and add it to the child list of its parent -// (identified by pid). It returns the unique channelz tracking id assigned to -// this listen socket. -func RegisterListenSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - grpclog.Error("a ListenSocket's parent id cannot be 0") - return 0 - } - id := idGen.genID() - ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addListenSocket(id, ls, pid, ref) - return id -} + isTopChannel := parent == nil -// RegisterNormalSocket registers the given normal socket s in channelz database -// with ref as its reference name, and add it to the child list of its parent -// (identified by pid). It returns the unique channelz tracking id assigned to -// this normal socket. -func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - grpclog.Error("a NormalSocket's parent id cannot be 0") - return 0 + cn := &Channel{ + ID: id, + RefName: target, + nestedChans: make(map[int64]string), + subChans: make(map[int64]string), + Parent: parent, + trace: &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())}, } - id := idGen.genID() - ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addNormalSocket(id, ns, pid, ref) - return id -} - -// RemoveEntry removes an entry with unique channelz trakcing id to be id from -// channelz database. -func RemoveEntry(id int64) { - db.get().removeEntry(id) + cn.ChannelMetrics.Target.Store(&target) + db.addChannel(id, cn, isTopChannel, cn.getParentID()) + return cn } -// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added -// to the channel trace. -// The Parent field is optional. It is used for event that will be recorded in the entity's parent -// trace also. -type TraceEventDesc struct { - Desc string - Severity Severity - Parent *TraceEventDesc -} - -// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. -func AddTraceEvent(id int64, desc *TraceEventDesc) { - if getMaxTraceEntry() == 0 { - return +// RegisterSubChannel registers the given subChannel c in the channelz database +// with ref as its reference name, and adds it to the child list of its parent +// (identified by pid). +// +// Returns a unique channelz identifier assigned to this subChannel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterSubChannel(parent *Channel, ref string) *SubChannel { + id := IDGen.genID() + sc := &SubChannel{ + ID: id, + RefName: ref, + parent: parent, } - db.get().traceEvent(id, desc) -} -// channelMap is the storage data structure for channelz. -// Methods of channelMap can be divided in two two categories with respect to locking. -// 1. Methods acquire the global lock. -// 2. Methods that can only be called when global lock is held. -// A second type of method need always to be called inside a first type of method. -type channelMap struct { - mu sync.RWMutex - topLevelChannels map[int64]struct{} - servers map[int64]*server - channels map[int64]*channel - subChannels map[int64]*subChannel - listenSockets map[int64]*listenSocket - normalSockets map[int64]*normalSocket -} - -func (c *channelMap) addServer(id int64, s *server) { - c.mu.Lock() - s.cm = c - c.servers[id] = s - c.mu.Unlock() -} - -func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) { - c.mu.Lock() - cn.cm = c - cn.trace.cm = c - c.channels[id] = cn - if isTopChannel { - c.topLevelChannels[id] = struct{}{} - } else { - c.findEntry(pid).addChild(id, cn) + if !IsOn() { + return sc } - c.mu.Unlock() -} -func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) { - c.mu.Lock() - sc.cm = c - sc.trace.cm = c - c.subChannels[id] = sc - c.findEntry(pid).addChild(id, sc) - c.mu.Unlock() + sc.sockets = make(map[int64]string) + sc.trace = &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())} + db.addSubChannel(id, sc, parent.ID) + return sc } -func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) { - c.mu.Lock() - ls.cm = c - c.listenSockets[id] = ls - c.findEntry(pid).addChild(id, ls) - c.mu.Unlock() -} - -func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) { - c.mu.Lock() - ns.cm = c - c.normalSockets[id] = ns - c.findEntry(pid).addChild(id, ns) - c.mu.Unlock() -} - -// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to -// wait on the deletion of its children and until no other entity's channel trace references it. -// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully -// shutting down server will lead to the server being also deleted. -func (c *channelMap) removeEntry(id int64) { - c.mu.Lock() - c.findEntry(id).triggerDelete() - c.mu.Unlock() -} - -// c.mu must be held by the caller -func (c *channelMap) decrTraceRefCount(id int64) { - e := c.findEntry(id) - if v, ok := e.(tracedChannel); ok { - v.decrTraceRefCount() - e.deleteSelfIfReady() +// RegisterServer registers the given server s in channelz database. It returns +// the unique channelz tracking id assigned to this server. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterServer(ref string) *Server { + id := IDGen.genID() + if !IsOn() { + return &Server{ID: id} } -} -// c.mu must be held by the caller. -func (c *channelMap) findEntry(id int64) entry { - var v entry - var ok bool - if v, ok = c.channels[id]; ok { - return v - } - if v, ok = c.subChannels[id]; ok { - return v - } - if v, ok = c.servers[id]; ok { - return v - } - if v, ok = c.listenSockets[id]; ok { - return v - } - if v, ok = c.normalSockets[id]; ok { - return v + svr := &Server{ + RefName: ref, + sockets: make(map[int64]string), + listenSockets: make(map[int64]string), + ID: id, } - return &dummyEntry{idNotFound: id} + db.addServer(id, svr) + return svr } -// c.mu must be held by the caller -// deleteEntry simply deletes an entry from the channelMap. Before calling this -// method, caller must check this entry is ready to be deleted, i.e removeEntry() -// has been called on it, and no children still exist. -// Conditionals are ordered by the expected frequency of deletion of each entity -// type, in order to optimize performance. -func (c *channelMap) deleteEntry(id int64) { - var ok bool - if _, ok = c.normalSockets[id]; ok { - delete(c.normalSockets, id) - return - } - if _, ok = c.subChannels[id]; ok { - delete(c.subChannels, id) - return - } - if _, ok = c.channels[id]; ok { - delete(c.channels, id) - delete(c.topLevelChannels, id) - return - } - if _, ok = c.listenSockets[id]; ok { - delete(c.listenSockets, id) - return - } - if _, ok = c.servers[id]; ok { - delete(c.servers, id) - return +// RegisterSocket registers the given normal socket s in channelz database +// with ref as its reference name, and adds it to the child list of its parent +// (identified by skt.Parent, which must be set). It returns the unique channelz +// tracking id assigned to this normal socket. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterSocket(skt *Socket) *Socket { + skt.ID = IDGen.genID() + if IsOn() { + db.addSocket(skt) } + return skt } -func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) { - c.mu.Lock() - child := c.findEntry(id) - childTC, ok := child.(tracedChannel) - if !ok { - c.mu.Unlock() +// RemoveEntry removes an entry with unique channelz tracking id to be id from +// channelz database. +// +// If channelz is not turned ON, this function is a no-op. +func RemoveEntry(id int64) { + if !IsOn() { return } - childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()}) - if desc.Parent != nil { - parent := c.findEntry(child.getParentID()) - var chanType RefChannelType - switch child.(type) { - case *channel: - chanType = RefChannel - case *subChannel: - chanType = RefSubChannel - } - if parentTC, ok := parent.(tracedChannel); ok { - parentTC.getChannelTrace().append(&TraceEvent{ - Desc: desc.Parent.Desc, - Severity: desc.Parent.Severity, - Timestamp: time.Now(), - RefID: id, - RefName: childTC.getRefName(), - RefType: chanType, - }) - childTC.incrTraceRefCount() - } - } - c.mu.Unlock() -} - -type int64Slice []int64 - -func (s int64Slice) Len() int { return len(s) } -func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } - -func copyMap(m map[int64]string) map[int64]string { - n := make(map[int64]string) - for k, v := range m { - n[k] = v - } - return n + db.removeEntry(id) } -func min(a, b int64) int64 { - if a < b { - return a - } - return b -} - -func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { - if maxResults <= 0 { - maxResults = EntryPerPage - } - c.mu.RLock() - l := int64(len(c.topLevelChannels)) - ids := make([]int64, 0, l) - cns := make([]*channel, 0, min(l, maxResults)) - - for k := range c.topLevelChannels { - ids = append(ids, k) - } - sort.Sort(int64Slice(ids)) - idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) - count := int64(0) - var end bool - var t []*ChannelMetric - for i, v := range ids[idx:] { - if count == maxResults { - break - } - if cn, ok := c.channels[v]; ok { - cns = append(cns, cn) - t = append(t, &ChannelMetric{ - NestedChans: copyMap(cn.nestedChans), - SubChans: copyMap(cn.subChans), - }) - count++ - } - if i == len(ids[idx:])-1 { - end = true - break - } - } - c.mu.RUnlock() - if count == 0 { - end = true - } - - for i, cn := range cns { - t[i].ChannelData = cn.c.ChannelzMetric() - t[i].ID = cn.id - t[i].RefName = cn.refName - t[i].Trace = cn.trace.dumpData() - } - return t, end -} - -func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) { - if maxResults <= 0 { - maxResults = EntryPerPage - } - c.mu.RLock() - l := int64(len(c.servers)) - ids := make([]int64, 0, l) - ss := make([]*server, 0, min(l, maxResults)) - for k := range c.servers { - ids = append(ids, k) - } - sort.Sort(int64Slice(ids)) - idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) - count := int64(0) - var end bool - var s []*ServerMetric - for i, v := range ids[idx:] { - if count == maxResults { - break - } - if svr, ok := c.servers[v]; ok { - ss = append(ss, svr) - s = append(s, &ServerMetric{ - ListenSockets: copyMap(svr.listenSockets), - }) - count++ - } - if i == len(ids[idx:])-1 { - end = true - break - } - } - c.mu.RUnlock() - if count == 0 { - end = true - } - - for i, svr := range ss { - s[i].ServerData = svr.s.ChannelzMetric() - s[i].ID = svr.id - s[i].RefName = svr.refName - } - return s, end -} - -func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { - if maxResults <= 0 { - maxResults = EntryPerPage - } - var svr *server - var ok bool - c.mu.RLock() - if svr, ok = c.servers[id]; !ok { - // server with id doesn't exist. - c.mu.RUnlock() - return nil, true - } - svrskts := svr.sockets - l := int64(len(svrskts)) - ids := make([]int64, 0, l) - sks := make([]*normalSocket, 0, min(l, maxResults)) - for k := range svrskts { - ids = append(ids, k) - } - sort.Sort(int64Slice(ids)) - idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID }) - count := int64(0) - var end bool - for i, v := range ids[idx:] { - if count == maxResults { - break - } - if ns, ok := c.normalSockets[v]; ok { - sks = append(sks, ns) - count++ - } - if i == len(ids[idx:])-1 { - end = true - break - } - } - c.mu.RUnlock() - if count == 0 { - end = true - } - var s []*SocketMetric - for _, ns := range sks { - sm := &SocketMetric{} - sm.SocketData = ns.s.ChannelzMetric() - sm.ID = ns.id - sm.RefName = ns.refName - s = append(s, sm) - } - return s, end -} - -func (c *channelMap) GetChannel(id int64) *ChannelMetric { - cm := &ChannelMetric{} - var cn *channel - var ok bool - c.mu.RLock() - if cn, ok = c.channels[id]; !ok { - // channel with id doesn't exist. - c.mu.RUnlock() - return nil - } - cm.NestedChans = copyMap(cn.nestedChans) - cm.SubChans = copyMap(cn.subChans) - // cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when - // holding the lock to prevent potential data race. - chanCopy := cn.c - c.mu.RUnlock() - cm.ChannelData = chanCopy.ChannelzMetric() - cm.ID = cn.id - cm.RefName = cn.refName - cm.Trace = cn.trace.dumpData() - return cm -} - -func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric { - cm := &SubChannelMetric{} - var sc *subChannel - var ok bool - c.mu.RLock() - if sc, ok = c.subChannels[id]; !ok { - // subchannel with id doesn't exist. - c.mu.RUnlock() - return nil - } - cm.Sockets = copyMap(sc.sockets) - // sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when - // holding the lock to prevent potential data race. - chanCopy := sc.c - c.mu.RUnlock() - cm.ChannelData = chanCopy.ChannelzMetric() - cm.ID = sc.id - cm.RefName = sc.refName - cm.Trace = sc.trace.dumpData() - return cm -} - -func (c *channelMap) GetSocket(id int64) *SocketMetric { - sm := &SocketMetric{} - c.mu.RLock() - if ls, ok := c.listenSockets[id]; ok { - c.mu.RUnlock() - sm.SocketData = ls.s.ChannelzMetric() - sm.ID = ls.id - sm.RefName = ls.refName - return sm - } - if ns, ok := c.normalSockets[id]; ok { - c.mu.RUnlock() - sm.SocketData = ns.s.ChannelzMetric() - sm.ID = ns.id - sm.RefName = ns.refName - return sm - } - c.mu.RUnlock() - return nil -} - -func (c *channelMap) GetServer(id int64) *ServerMetric { - sm := &ServerMetric{} - var svr *server - var ok bool - c.mu.RLock() - if svr, ok = c.servers[id]; !ok { - c.mu.RUnlock() - return nil - } - sm.ListenSockets = copyMap(svr.listenSockets) - c.mu.RUnlock() - sm.ID = svr.id - sm.RefName = svr.refName - sm.ServerData = svr.s.ChannelzMetric() - return sm -} - -type idGenerator struct { +// IDGenerator is an incrementing atomic that tracks IDs for channelz entities. +type IDGenerator struct { id int64 } -func (i *idGenerator) reset() { +// Reset resets the generated ID back to zero. Should only be used at +// initialization or by tests sensitive to the ID number. +func (i *IDGenerator) Reset() { atomic.StoreInt64(&i.id, 0) } -func (i *idGenerator) genID() int64 { +func (i *IDGenerator) genID() int64 { return atomic.AddInt64(&i.id, 1) } + +// Identifier is an opaque channelz identifier used to expose channelz symbols +// outside of grpc. Currently only implemented by Channel since no other +// types require exposure outside grpc. +type Identifier interface { + Entity + channelzIdentifier() +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go new file mode 100644 index 000000000..ee4d72125 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("channelz") + +// Info logs and adds a trace event if channelz is on. +func Info(l grpclog.DepthLoggerV2, e Entity, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ + Desc: fmt.Sprint(args...), + Severity: CtInfo, + }) +} + +// Infof logs and adds a trace event if channelz is on. +func Infof(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ + Desc: fmt.Sprintf(format, args...), + Severity: CtInfo, + }) +} + +// Warning logs and adds a trace event if channelz is on. +func Warning(l grpclog.DepthLoggerV2, e Entity, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, + }) +} + +// Warningf logs and adds a trace event if channelz is on. +func Warningf(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ + Desc: fmt.Sprintf(format, args...), + Severity: CtWarning, + }) +} + +// Error logs and adds a trace event if channelz is on. +func Error(l grpclog.DepthLoggerV2, e Entity, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ + Desc: fmt.Sprint(args...), + Severity: CtError, + }) +} + +// Errorf logs and adds a trace event if channelz is on. +func Errorf(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ + Desc: fmt.Sprintf(format, args...), + Severity: CtError, + }) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/server.go b/vendor/google.golang.org/grpc/internal/channelz/server.go new file mode 100644 index 000000000..cdfc49d6e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/server.go @@ -0,0 +1,119 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sync/atomic" +) + +// Server is the channelz representation of a server. +type Server struct { + Entity + ID int64 + RefName string + + ServerMetrics ServerMetrics + + closeCalled bool + sockets map[int64]string + listenSockets map[int64]string + cm *channelMap +} + +// ServerMetrics defines a struct containing metrics for servers. +type ServerMetrics struct { + // The number of incoming calls started on the server. + CallsStarted atomic.Int64 + // The number of incoming calls that have completed with an OK status. + CallsSucceeded atomic.Int64 + // The number of incoming calls that have a completed with a non-OK status. + CallsFailed atomic.Int64 + // The last time a call was started on the server. + LastCallStartedTimestamp atomic.Int64 +} + +// NewServerMetricsForTesting returns an initialized ServerMetrics. +func NewServerMetricsForTesting(started, succeeded, failed, timestamp int64) *ServerMetrics { + sm := &ServerMetrics{} + sm.CallsStarted.Store(started) + sm.CallsSucceeded.Store(succeeded) + sm.CallsFailed.Store(failed) + sm.LastCallStartedTimestamp.Store(timestamp) + return sm +} + +func (sm *ServerMetrics) CopyFrom(o *ServerMetrics) { + sm.CallsStarted.Store(o.CallsStarted.Load()) + sm.CallsSucceeded.Store(o.CallsSucceeded.Load()) + sm.CallsFailed.Store(o.CallsFailed.Load()) + sm.LastCallStartedTimestamp.Store(o.LastCallStartedTimestamp.Load()) +} + +// ListenSockets returns the listening sockets for s. +func (s *Server) ListenSockets() map[int64]string { + db.mu.RLock() + defer db.mu.RUnlock() + return copyMap(s.listenSockets) +} + +// String returns a printable description of s. +func (s *Server) String() string { + return fmt.Sprintf("Server #%d", s.ID) +} + +func (s *Server) id() int64 { + return s.ID +} + +func (s *Server) addChild(id int64, e entry) { + switch v := e.(type) { + case *Socket: + switch v.SocketType { + case SocketTypeNormal: + s.sockets[id] = v.RefName + case SocketTypeListen: + s.listenSockets[id] = v.RefName + } + default: + logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) + } +} + +func (s *Server) deleteChild(id int64) { + delete(s.sockets, id) + delete(s.listenSockets, id) + s.deleteSelfIfReady() +} + +func (s *Server) triggerDelete() { + s.closeCalled = true + s.deleteSelfIfReady() +} + +func (s *Server) deleteSelfIfReady() { + if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { + return + } + s.cm.deleteEntry(s.ID) +} + +func (s *Server) getParentID() int64 { + return 0 +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/socket.go b/vendor/google.golang.org/grpc/internal/channelz/socket.go new file mode 100644 index 000000000..fa64834b2 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/socket.go @@ -0,0 +1,130 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "net" + "sync/atomic" + + "google.golang.org/grpc/credentials" +) + +// SocketMetrics defines the struct that the implementor of Socket interface +// should return from ChannelzMetric(). +type SocketMetrics struct { + // The number of streams that have been started. + StreamsStarted atomic.Int64 + // The number of streams that have ended successfully: + // On client side, receiving frame with eos bit set. + // On server side, sending frame with eos bit set. + StreamsSucceeded atomic.Int64 + // The number of streams that have ended unsuccessfully: + // On client side, termination without receiving frame with eos bit set. + // On server side, termination without sending frame with eos bit set. + StreamsFailed atomic.Int64 + // The number of messages successfully sent on this socket. + MessagesSent atomic.Int64 + MessagesReceived atomic.Int64 + // The number of keep alives sent. This is typically implemented with HTTP/2 + // ping messages. + KeepAlivesSent atomic.Int64 + // The last time a stream was created by this endpoint. Usually unset for + // servers. + LastLocalStreamCreatedTimestamp atomic.Int64 + // The last time a stream was created by the remote endpoint. Usually unset + // for clients. + LastRemoteStreamCreatedTimestamp atomic.Int64 + // The last time a message was sent by this endpoint. + LastMessageSentTimestamp atomic.Int64 + // The last time a message was received by this endpoint. + LastMessageReceivedTimestamp atomic.Int64 +} + +// EphemeralSocketMetrics are metrics that change rapidly and are tracked +// outside of channelz. +type EphemeralSocketMetrics struct { + // The amount of window, granted to the local endpoint by the remote endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + LocalFlowControlWindow int64 + // The amount of window, granted to the remote endpoint by the local endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + RemoteFlowControlWindow int64 +} + +type SocketType string + +const ( + SocketTypeNormal = "NormalSocket" + SocketTypeListen = "ListenSocket" +) + +type Socket struct { + Entity + SocketType SocketType + ID int64 + Parent Entity + cm *channelMap + SocketMetrics SocketMetrics + EphemeralMetrics func() *EphemeralSocketMetrics + + RefName string + // The locally bound address. Immutable. + LocalAddr net.Addr + // The remote bound address. May be absent. Immutable. + RemoteAddr net.Addr + // Optional, represents the name of the remote endpoint, if different than + // the original target name. Immutable. + RemoteName string + // Immutable. + SocketOptions *SocketOptionData + // Immutable. + Security credentials.ChannelzSecurityValue +} + +func (ls *Socket) String() string { + return fmt.Sprintf("%s %s #%d", ls.Parent, ls.SocketType, ls.ID) +} + +func (ls *Socket) id() int64 { + return ls.ID +} + +func (ls *Socket) addChild(id int64, e entry) { + logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) +} + +func (ls *Socket) deleteChild(id int64) { + logger.Errorf("cannot delete a child (id = %d) from a listen socket", id) +} + +func (ls *Socket) triggerDelete() { + ls.cm.deleteEntry(ls.ID) + ls.Parent.(entry).deleteChild(ls.ID) +} + +func (ls *Socket) deleteSelfIfReady() { + logger.Errorf("cannot call deleteSelfIfReady on a listen socket") +} + +func (ls *Socket) getParentID() int64 { + return ls.Parent.id() +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/subchannel.go b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go new file mode 100644 index 000000000..3b88e4cba --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go @@ -0,0 +1,151 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sync/atomic" +) + +// SubChannel is the channelz representation of a subchannel. +type SubChannel struct { + Entity + // ID is the channelz id of this subchannel. + ID int64 + // RefName is the human readable reference string of this subchannel. + RefName string + closeCalled bool + sockets map[int64]string + parent *Channel + trace *ChannelTrace + traceRefCount int32 + + ChannelMetrics ChannelMetrics +} + +func (sc *SubChannel) String() string { + return fmt.Sprintf("%s SubChannel #%d", sc.parent, sc.ID) +} + +func (sc *SubChannel) id() int64 { + return sc.ID +} + +func (sc *SubChannel) Sockets() map[int64]string { + db.mu.RLock() + defer db.mu.RUnlock() + return copyMap(sc.sockets) +} + +func (sc *SubChannel) Trace() *ChannelTrace { + db.mu.RLock() + defer db.mu.RUnlock() + return sc.trace.copy() +} + +func (sc *SubChannel) addChild(id int64, e entry) { + if v, ok := e.(*Socket); ok && v.SocketType == SocketTypeNormal { + sc.sockets[id] = v.RefName + } else { + logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) + } +} + +func (sc *SubChannel) deleteChild(id int64) { + delete(sc.sockets, id) + sc.deleteSelfIfReady() +} + +func (sc *SubChannel) triggerDelete() { + sc.closeCalled = true + sc.deleteSelfIfReady() +} + +func (sc *SubChannel) getParentID() int64 { + return sc.parent.ID +} + +// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which +// means deleting the subchannel reference from its parent's child list. +// +// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of +// the corresponding grpc object has been invoked, and the subchannel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (sc *SubChannel) deleteSelfFromTree() (deleted bool) { + if !sc.closeCalled || len(sc.sockets) != 0 { + return false + } + sc.parent.deleteChild(sc.ID) + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means +// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query +// the subchannel, and its memory will be garbage collected. +// +// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (sc *SubChannel) deleteSelfFromMap() (delete bool) { + return sc.getTraceRefCount() == 0 +} + +// deleteSelfIfReady tries to delete the subchannel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +// its parent's child list. +// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +// by id will return entry not found error. +func (sc *SubChannel) deleteSelfIfReady() { + if !sc.deleteSelfFromTree() { + return + } + if !sc.deleteSelfFromMap() { + return + } + db.deleteEntry(sc.ID) + sc.trace.clear() +} + +func (sc *SubChannel) getChannelTrace() *ChannelTrace { + return sc.trace +} + +func (sc *SubChannel) incrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, 1) +} + +func (sc *SubChannel) decrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, -1) +} + +func (sc *SubChannel) getTraceRefCount() int { + i := atomic.LoadInt32(&sc.traceRefCount) + return int(i) +} + +func (sc *SubChannel) getRefName() string { + return sc.RefName +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go similarity index 83% rename from vendor/google.golang.org/grpc/internal/channelz/types_linux.go rename to vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go index 692dd6181..5ac73ff83 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. @@ -51,3 +49,17 @@ func (s *SocketOptionData) Getsockopt(fd uintptr) { s.TCPInfo = v } } + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(socket any) *SocketOptionData { + c, ok := socket.(syscall.Conn) + if !ok { + return nil + } + data := &SocketOptionData{} + if rawConn, err := c.SyscallConn(); err == nil { + rawConn.Control(data.Getsockopt) + return data + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go similarity index 79% rename from vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go rename to vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go index 79edbefc4..0e6e18e18 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go @@ -1,4 +1,4 @@ -// +build !linux appengine +//go:build !linux /* * @@ -22,8 +22,6 @@ package channelz import ( "sync" - - "google.golang.org/grpc/grpclog" ) var once sync.Once @@ -37,8 +35,13 @@ type SocketOptionData struct { // Getsockopt defines the function to get socket options requested by channelz. // It is to be passed to syscall.RawConn.Control(). // Windows OS doesn't support Socket Option -func (s *SocketOptionData) Getsockopt(fd uintptr) { +func (s *SocketOptionData) Getsockopt(uintptr) { once.Do(func() { - grpclog.Warningln("Channelz: socket options are not supported on non-linux os and appengine.") + logger.Warning("Channelz: socket options are not supported on non-linux environments") }) } + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(any) *SocketOptionData { + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/trace.go b/vendor/google.golang.org/grpc/internal/channelz/trace.go new file mode 100644 index 000000000..36b867403 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/trace.go @@ -0,0 +1,204 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/grpclog" +) + +const ( + defaultMaxTraceEntry int32 = 30 +) + +var maxTraceEntry = defaultMaxTraceEntry + +// SetMaxTraceEntry sets maximum number of trace entries per entity (i.e. +// channel/subchannel). Setting it to 0 will disable channel tracing. +func SetMaxTraceEntry(i int32) { + atomic.StoreInt32(&maxTraceEntry, i) +} + +// ResetMaxTraceEntryToDefault resets the maximum number of trace entries per +// entity to default. +func ResetMaxTraceEntryToDefault() { + atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry) +} + +func getMaxTraceEntry() int { + i := atomic.LoadInt32(&maxTraceEntry) + return int(i) +} + +// traceEvent is an internal representation of a single trace event +type traceEvent struct { + // Desc is a simple description of the trace event. + Desc string + // Severity states the severity of this trace event. + Severity Severity + // Timestamp is the event time. + Timestamp time.Time + // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is + // involved in this event. + // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside []) + RefID int64 + // RefName is the reference name for the entity that gets referenced in the event. + RefName string + // RefType indicates the referenced entity type, i.e Channel or SubChannel. + RefType RefChannelType +} + +// TraceEvent is what the caller of AddTraceEvent should provide to describe the +// event to be added to the channel trace. +// +// The Parent field is optional. It is used for an event that will be recorded +// in the entity's parent trace. +type TraceEvent struct { + Desc string + Severity Severity + Parent *TraceEvent +} + +type ChannelTrace struct { + cm *channelMap + clearCalled bool + CreationTime time.Time + EventNum int64 + mu sync.Mutex + Events []*traceEvent +} + +func (c *ChannelTrace) copy() *ChannelTrace { + return &ChannelTrace{ + CreationTime: c.CreationTime, + EventNum: c.EventNum, + Events: append(([]*traceEvent)(nil), c.Events...), + } +} + +func (c *ChannelTrace) append(e *traceEvent) { + c.mu.Lock() + if len(c.Events) == getMaxTraceEntry() { + del := c.Events[0] + c.Events = c.Events[1:] + if del.RefID != 0 { + // start recursive cleanup in a goroutine to not block the call originated from grpc. + go func() { + // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func. + c.cm.mu.Lock() + c.cm.decrTraceRefCount(del.RefID) + c.cm.mu.Unlock() + }() + } + } + e.Timestamp = time.Now() + c.Events = append(c.Events, e) + c.EventNum++ + c.mu.Unlock() +} + +func (c *ChannelTrace) clear() { + if c.clearCalled { + return + } + c.clearCalled = true + c.mu.Lock() + for _, e := range c.Events { + if e.RefID != 0 { + // caller should have already held the c.cm.mu lock. + c.cm.decrTraceRefCount(e.RefID) + } + } + c.mu.Unlock() +} + +// Severity is the severity level of a trace event. +// The canonical enumeration of all valid values is here: +// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126. +type Severity int + +const ( + // CtUnknown indicates unknown severity of a trace event. + CtUnknown Severity = iota + // CtInfo indicates info level severity of a trace event. + CtInfo + // CtWarning indicates warning level severity of a trace event. + CtWarning + // CtError indicates error level severity of a trace event. + CtError +) + +// RefChannelType is the type of the entity being referenced in a trace event. +type RefChannelType int + +const ( + // RefUnknown indicates an unknown entity type, the zero value for this type. + RefUnknown RefChannelType = iota + // RefChannel indicates the referenced entity is a Channel. + RefChannel + // RefSubChannel indicates the referenced entity is a SubChannel. + RefSubChannel + // RefServer indicates the referenced entity is a Server. + RefServer + // RefListenSocket indicates the referenced entity is a ListenSocket. + RefListenSocket + // RefNormalSocket indicates the referenced entity is a NormalSocket. + RefNormalSocket +) + +var refChannelTypeToString = map[RefChannelType]string{ + RefUnknown: "Unknown", + RefChannel: "Channel", + RefSubChannel: "SubChannel", + RefServer: "Server", + RefListenSocket: "ListenSocket", + RefNormalSocket: "NormalSocket", +} + +func (r RefChannelType) String() string { + return refChannelTypeToString[r] +} + +// AddTraceEvent adds trace related to the entity with specified id, using the +// provided TraceEventDesc. +// +// If channelz is not turned ON, this will simply log the event descriptions. +func AddTraceEvent(l grpclog.DepthLoggerV2, e Entity, depth int, desc *TraceEvent) { + // Log only the trace description associated with the bottom most entity. + d := fmt.Sprintf("[%s]%s", e, desc.Desc) + switch desc.Severity { + case CtUnknown, CtInfo: + l.InfoDepth(depth+1, d) + case CtWarning: + l.WarningDepth(depth+1, d) + case CtError: + l.ErrorDepth(depth+1, d) + } + + if getMaxTraceEntry() == 0 { + return + } + if IsOn() { + db.traceEvent(e.id(), desc) + } +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go deleted file mode 100644 index 17c2274cb..000000000 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ /dev/null @@ -1,702 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package channelz - -import ( - "net" - "sync" - "sync/atomic" - "time" - - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" -) - -// entry represents a node in the channelz database. -type entry interface { - // addChild adds a child e, whose channelz id is id to child list - addChild(id int64, e entry) - // deleteChild deletes a child with channelz id to be id from child list - deleteChild(id int64) - // triggerDelete tries to delete self from channelz database. However, if child - // list is not empty, then deletion from the database is on hold until the last - // child is deleted from database. - triggerDelete() - // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child - // list is now empty. If both conditions are met, then delete self from database. - deleteSelfIfReady() - // getParentID returns parent ID of the entry. 0 value parent ID means no parent. - getParentID() int64 -} - -// dummyEntry is a fake entry to handle entry not found case. -type dummyEntry struct { - idNotFound int64 -} - -func (d *dummyEntry) addChild(id int64, e entry) { - // Note: It is possible for a normal program to reach here under race condition. - // For example, there could be a race between ClientConn.Close() info being propagated - // to addrConn and http2Client. ClientConn.Close() cancel the context and result - // in http2Client to error. The error info is then caught by transport monitor - // and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore, - // the addrConn will create a new transport. And when registering the new transport in - // channelz, its parent addrConn could have already been torn down and deleted - // from channelz tracking, and thus reach the code here. - grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) -} - -func (d *dummyEntry) deleteChild(id int64) { - // It is possible for a normal program to reach here under race condition. - // Refer to the example described in addChild(). - grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) -} - -func (d *dummyEntry) triggerDelete() { - grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) -} - -func (*dummyEntry) deleteSelfIfReady() { - // code should not reach here. deleteSelfIfReady is always called on an existing entry. -} - -func (*dummyEntry) getParentID() int64 { - return 0 -} - -// ChannelMetric defines the info channelz provides for a specific Channel, which -// includes ChannelInternalMetric and channelz-specific data, such as channelz id, -// child list, etc. -type ChannelMetric struct { - // ID is the channelz id of this channel. - ID int64 - // RefName is the human readable reference string of this channel. - RefName string - // ChannelData contains channel internal metric reported by the channel through - // ChannelzMetric(). - ChannelData *ChannelInternalMetric - // NestedChans tracks the nested channel type children of this channel in the format of - // a map from nested channel channelz id to corresponding reference string. - NestedChans map[int64]string - // SubChans tracks the subchannel type children of this channel in the format of a - // map from subchannel channelz id to corresponding reference string. - SubChans map[int64]string - // Sockets tracks the socket type children of this channel in the format of a map - // from socket channelz id to corresponding reference string. - // Note current grpc implementation doesn't allow channel having sockets directly, - // therefore, this is field is unused. - Sockets map[int64]string - // Trace contains the most recent traced events. - Trace *ChannelTrace -} - -// SubChannelMetric defines the info channelz provides for a specific SubChannel, -// which includes ChannelInternalMetric and channelz-specific data, such as -// channelz id, child list, etc. -type SubChannelMetric struct { - // ID is the channelz id of this subchannel. - ID int64 - // RefName is the human readable reference string of this subchannel. - RefName string - // ChannelData contains subchannel internal metric reported by the subchannel - // through ChannelzMetric(). - ChannelData *ChannelInternalMetric - // NestedChans tracks the nested channel type children of this subchannel in the format of - // a map from nested channel channelz id to corresponding reference string. - // Note current grpc implementation doesn't allow subchannel to have nested channels - // as children, therefore, this field is unused. - NestedChans map[int64]string - // SubChans tracks the subchannel type children of this subchannel in the format of a - // map from subchannel channelz id to corresponding reference string. - // Note current grpc implementation doesn't allow subchannel to have subchannels - // as children, therefore, this field is unused. - SubChans map[int64]string - // Sockets tracks the socket type children of this subchannel in the format of a map - // from socket channelz id to corresponding reference string. - Sockets map[int64]string - // Trace contains the most recent traced events. - Trace *ChannelTrace -} - -// ChannelInternalMetric defines the struct that the implementor of Channel interface -// should return from ChannelzMetric(). -type ChannelInternalMetric struct { - // current connectivity state of the channel. - State connectivity.State - // The target this channel originally tried to connect to. May be absent - Target string - // The number of calls started on the channel. - CallsStarted int64 - // The number of calls that have completed with an OK status. - CallsSucceeded int64 - // The number of calls that have a completed with a non-OK status. - CallsFailed int64 - // The last time a call was started on the channel. - LastCallStartedTimestamp time.Time -} - -// ChannelTrace stores traced events on a channel/subchannel and related info. -type ChannelTrace struct { - // EventNum is the number of events that ever got traced (i.e. including those that have been deleted) - EventNum int64 - // CreationTime is the creation time of the trace. - CreationTime time.Time - // Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the - // oldest one) - Events []*TraceEvent -} - -// TraceEvent represent a single trace event -type TraceEvent struct { - // Desc is a simple description of the trace event. - Desc string - // Severity states the severity of this trace event. - Severity Severity - // Timestamp is the event time. - Timestamp time.Time - // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is - // involved in this event. - // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside []) - RefID int64 - // RefName is the reference name for the entity that gets referenced in the event. - RefName string - // RefType indicates the referenced entity type, i.e Channel or SubChannel. - RefType RefChannelType -} - -// Channel is the interface that should be satisfied in order to be tracked by -// channelz as Channel or SubChannel. -type Channel interface { - ChannelzMetric() *ChannelInternalMetric -} - -type dummyChannel struct{} - -func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric { - return &ChannelInternalMetric{} -} - -type channel struct { - refName string - c Channel - closeCalled bool - nestedChans map[int64]string - subChans map[int64]string - id int64 - pid int64 - cm *channelMap - trace *channelTrace - // traceRefCount is the number of trace events that reference this channel. - // Non-zero traceRefCount means the trace of this channel cannot be deleted. - traceRefCount int32 -} - -func (c *channel) addChild(id int64, e entry) { - switch v := e.(type) { - case *subChannel: - c.subChans[id] = v.refName - case *channel: - c.nestedChans[id] = v.refName - default: - grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) - } -} - -func (c *channel) deleteChild(id int64) { - delete(c.subChans, id) - delete(c.nestedChans, id) - c.deleteSelfIfReady() -} - -func (c *channel) triggerDelete() { - c.closeCalled = true - c.deleteSelfIfReady() -} - -func (c *channel) getParentID() int64 { - return c.pid -} - -// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means -// deleting the channel reference from its parent's child list. -// -// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the -// corresponding grpc object has been invoked, and the channel does not have any children left. -// -// The returned boolean value indicates whether the channel has been successfully deleted from tree. -func (c *channel) deleteSelfFromTree() (deleted bool) { - if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { - return false - } - // not top channel - if c.pid != 0 { - c.cm.findEntry(c.pid).deleteChild(c.id) - } - return true -} - -// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means -// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the -// channel, and its memory will be garbage collected. -// -// The trace reference count of the channel must be 0 in order to be deleted from the map. This is -// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, -// the trace of the referenced entity must not be deleted. In order to release the resource allocated -// by grpc, the reference to the grpc object is reset to a dummy object. -// -// deleteSelfFromMap must be called after deleteSelfFromTree returns true. -// -// It returns a bool to indicate whether the channel can be safely deleted from map. -func (c *channel) deleteSelfFromMap() (delete bool) { - if c.getTraceRefCount() != 0 { - c.c = &dummyChannel{} - return false - } - return true -} - -// deleteSelfIfReady tries to delete the channel itself from the channelz database. -// The delete process includes two steps: -// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its -// parent's child list. -// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id -// will return entry not found error. -func (c *channel) deleteSelfIfReady() { - if !c.deleteSelfFromTree() { - return - } - if !c.deleteSelfFromMap() { - return - } - c.cm.deleteEntry(c.id) - c.trace.clear() -} - -func (c *channel) getChannelTrace() *channelTrace { - return c.trace -} - -func (c *channel) incrTraceRefCount() { - atomic.AddInt32(&c.traceRefCount, 1) -} - -func (c *channel) decrTraceRefCount() { - atomic.AddInt32(&c.traceRefCount, -1) -} - -func (c *channel) getTraceRefCount() int { - i := atomic.LoadInt32(&c.traceRefCount) - return int(i) -} - -func (c *channel) getRefName() string { - return c.refName -} - -type subChannel struct { - refName string - c Channel - closeCalled bool - sockets map[int64]string - id int64 - pid int64 - cm *channelMap - trace *channelTrace - traceRefCount int32 -} - -func (sc *subChannel) addChild(id int64, e entry) { - if v, ok := e.(*normalSocket); ok { - sc.sockets[id] = v.refName - } else { - grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) - } -} - -func (sc *subChannel) deleteChild(id int64) { - delete(sc.sockets, id) - sc.deleteSelfIfReady() -} - -func (sc *subChannel) triggerDelete() { - sc.closeCalled = true - sc.deleteSelfIfReady() -} - -func (sc *subChannel) getParentID() int64 { - return sc.pid -} - -// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which -// means deleting the subchannel reference from its parent's child list. -// -// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of -// the corresponding grpc object has been invoked, and the subchannel does not have any children left. -// -// The returned boolean value indicates whether the channel has been successfully deleted from tree. -func (sc *subChannel) deleteSelfFromTree() (deleted bool) { - if !sc.closeCalled || len(sc.sockets) != 0 { - return false - } - sc.cm.findEntry(sc.pid).deleteChild(sc.id) - return true -} - -// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means -// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query -// the subchannel, and its memory will be garbage collected. -// -// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is -// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, -// the trace of the referenced entity must not be deleted. In order to release the resource allocated -// by grpc, the reference to the grpc object is reset to a dummy object. -// -// deleteSelfFromMap must be called after deleteSelfFromTree returns true. -// -// It returns a bool to indicate whether the channel can be safely deleted from map. -func (sc *subChannel) deleteSelfFromMap() (delete bool) { - if sc.getTraceRefCount() != 0 { - // free the grpc struct (i.e. addrConn) - sc.c = &dummyChannel{} - return false - } - return true -} - -// deleteSelfIfReady tries to delete the subchannel itself from the channelz database. -// The delete process includes two steps: -// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from -// its parent's child list. -// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup -// by id will return entry not found error. -func (sc *subChannel) deleteSelfIfReady() { - if !sc.deleteSelfFromTree() { - return - } - if !sc.deleteSelfFromMap() { - return - } - sc.cm.deleteEntry(sc.id) - sc.trace.clear() -} - -func (sc *subChannel) getChannelTrace() *channelTrace { - return sc.trace -} - -func (sc *subChannel) incrTraceRefCount() { - atomic.AddInt32(&sc.traceRefCount, 1) -} - -func (sc *subChannel) decrTraceRefCount() { - atomic.AddInt32(&sc.traceRefCount, -1) -} - -func (sc *subChannel) getTraceRefCount() int { - i := atomic.LoadInt32(&sc.traceRefCount) - return int(i) -} - -func (sc *subChannel) getRefName() string { - return sc.refName -} - -// SocketMetric defines the info channelz provides for a specific Socket, which -// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc. -type SocketMetric struct { - // ID is the channelz id of this socket. - ID int64 - // RefName is the human readable reference string of this socket. - RefName string - // SocketData contains socket internal metric reported by the socket through - // ChannelzMetric(). - SocketData *SocketInternalMetric -} - -// SocketInternalMetric defines the struct that the implementor of Socket interface -// should return from ChannelzMetric(). -type SocketInternalMetric struct { - // The number of streams that have been started. - StreamsStarted int64 - // The number of streams that have ended successfully: - // On client side, receiving frame with eos bit set. - // On server side, sending frame with eos bit set. - StreamsSucceeded int64 - // The number of streams that have ended unsuccessfully: - // On client side, termination without receiving frame with eos bit set. - // On server side, termination without sending frame with eos bit set. - StreamsFailed int64 - // The number of messages successfully sent on this socket. - MessagesSent int64 - MessagesReceived int64 - // The number of keep alives sent. This is typically implemented with HTTP/2 - // ping messages. - KeepAlivesSent int64 - // The last time a stream was created by this endpoint. Usually unset for - // servers. - LastLocalStreamCreatedTimestamp time.Time - // The last time a stream was created by the remote endpoint. Usually unset - // for clients. - LastRemoteStreamCreatedTimestamp time.Time - // The last time a message was sent by this endpoint. - LastMessageSentTimestamp time.Time - // The last time a message was received by this endpoint. - LastMessageReceivedTimestamp time.Time - // The amount of window, granted to the local endpoint by the remote endpoint. - // This may be slightly out of date due to network latency. This does NOT - // include stream level or TCP level flow control info. - LocalFlowControlWindow int64 - // The amount of window, granted to the remote endpoint by the local endpoint. - // This may be slightly out of date due to network latency. This does NOT - // include stream level or TCP level flow control info. - RemoteFlowControlWindow int64 - // The locally bound address. - LocalAddr net.Addr - // The remote bound address. May be absent. - RemoteAddr net.Addr - // Optional, represents the name of the remote endpoint, if different than - // the original target name. - RemoteName string - SocketOptions *SocketOptionData - Security credentials.ChannelzSecurityValue -} - -// Socket is the interface that should be satisfied in order to be tracked by -// channelz as Socket. -type Socket interface { - ChannelzMetric() *SocketInternalMetric -} - -type listenSocket struct { - refName string - s Socket - id int64 - pid int64 - cm *channelMap -} - -func (ls *listenSocket) addChild(id int64, e entry) { - grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) -} - -func (ls *listenSocket) deleteChild(id int64) { - grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id) -} - -func (ls *listenSocket) triggerDelete() { - ls.cm.deleteEntry(ls.id) - ls.cm.findEntry(ls.pid).deleteChild(ls.id) -} - -func (ls *listenSocket) deleteSelfIfReady() { - grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket") -} - -func (ls *listenSocket) getParentID() int64 { - return ls.pid -} - -type normalSocket struct { - refName string - s Socket - id int64 - pid int64 - cm *channelMap -} - -func (ns *normalSocket) addChild(id int64, e entry) { - grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) -} - -func (ns *normalSocket) deleteChild(id int64) { - grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id) -} - -func (ns *normalSocket) triggerDelete() { - ns.cm.deleteEntry(ns.id) - ns.cm.findEntry(ns.pid).deleteChild(ns.id) -} - -func (ns *normalSocket) deleteSelfIfReady() { - grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket") -} - -func (ns *normalSocket) getParentID() int64 { - return ns.pid -} - -// ServerMetric defines the info channelz provides for a specific Server, which -// includes ServerInternalMetric and channelz-specific data, such as channelz id, -// child list, etc. -type ServerMetric struct { - // ID is the channelz id of this server. - ID int64 - // RefName is the human readable reference string of this server. - RefName string - // ServerData contains server internal metric reported by the server through - // ChannelzMetric(). - ServerData *ServerInternalMetric - // ListenSockets tracks the listener socket type children of this server in the - // format of a map from socket channelz id to corresponding reference string. - ListenSockets map[int64]string -} - -// ServerInternalMetric defines the struct that the implementor of Server interface -// should return from ChannelzMetric(). -type ServerInternalMetric struct { - // The number of incoming calls started on the server. - CallsStarted int64 - // The number of incoming calls that have completed with an OK status. - CallsSucceeded int64 - // The number of incoming calls that have a completed with a non-OK status. - CallsFailed int64 - // The last time a call was started on the server. - LastCallStartedTimestamp time.Time -} - -// Server is the interface to be satisfied in order to be tracked by channelz as -// Server. -type Server interface { - ChannelzMetric() *ServerInternalMetric -} - -type server struct { - refName string - s Server - closeCalled bool - sockets map[int64]string - listenSockets map[int64]string - id int64 - cm *channelMap -} - -func (s *server) addChild(id int64, e entry) { - switch v := e.(type) { - case *normalSocket: - s.sockets[id] = v.refName - case *listenSocket: - s.listenSockets[id] = v.refName - default: - grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) - } -} - -func (s *server) deleteChild(id int64) { - delete(s.sockets, id) - delete(s.listenSockets, id) - s.deleteSelfIfReady() -} - -func (s *server) triggerDelete() { - s.closeCalled = true - s.deleteSelfIfReady() -} - -func (s *server) deleteSelfIfReady() { - if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { - return - } - s.cm.deleteEntry(s.id) -} - -func (s *server) getParentID() int64 { - return 0 -} - -type tracedChannel interface { - getChannelTrace() *channelTrace - incrTraceRefCount() - decrTraceRefCount() - getRefName() string -} - -type channelTrace struct { - cm *channelMap - createdTime time.Time - eventCount int64 - mu sync.Mutex - events []*TraceEvent -} - -func (c *channelTrace) append(e *TraceEvent) { - c.mu.Lock() - if len(c.events) == getMaxTraceEntry() { - del := c.events[0] - c.events = c.events[1:] - if del.RefID != 0 { - // start recursive cleanup in a goroutine to not block the call originated from grpc. - go func() { - // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func. - c.cm.mu.Lock() - c.cm.decrTraceRefCount(del.RefID) - c.cm.mu.Unlock() - }() - } - } - e.Timestamp = time.Now() - c.events = append(c.events, e) - c.eventCount++ - c.mu.Unlock() -} - -func (c *channelTrace) clear() { - c.mu.Lock() - for _, e := range c.events { - if e.RefID != 0 { - // caller should have already held the c.cm.mu lock. - c.cm.decrTraceRefCount(e.RefID) - } - } - c.mu.Unlock() -} - -// Severity is the severity level of a trace event. -// The canonical enumeration of all valid values is here: -// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126. -type Severity int - -const ( - // CtUNKNOWN indicates unknown severity of a trace event. - CtUNKNOWN Severity = iota - // CtINFO indicates info level severity of a trace event. - CtINFO - // CtWarning indicates warning level severity of a trace event. - CtWarning - // CtError indicates error level severity of a trace event. - CtError -) - -// RefChannelType is the type of the entity being referenced in a trace event. -type RefChannelType int - -const ( - // RefChannel indicates the referenced entity is a Channel. - RefChannel RefChannelType = iota - // RefSubChannel indicates the referenced entity is a SubChannel. - RefSubChannel -) - -func (c *channelTrace) dumpData() *ChannelTrace { - c.mu.Lock() - ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} - ct.Events = c.events[:len(c.events)] - c.mu.Unlock() - return ct -} diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go new file mode 100644 index 000000000..9deee7f65 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go @@ -0,0 +1,49 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "context" +) + +// requestInfoKey is a struct to be used as the key to store RequestInfo in a +// context. +type requestInfoKey struct{} + +// NewRequestInfoContext creates a context with ri. +func NewRequestInfoContext(ctx context.Context, ri any) context.Context { + return context.WithValue(ctx, requestInfoKey{}, ri) +} + +// RequestInfoFromContext extracts the RequestInfo from ctx. +func RequestInfoFromContext(ctx context.Context) any { + return ctx.Value(requestInfoKey{}) +} + +// clientHandshakeInfoKey is a struct used as the key to store +// ClientHandshakeInfo in a context. +type clientHandshakeInfoKey struct{} + +// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx. +func ClientHandshakeInfoFromContext(ctx context.Context) any { + return ctx.Value(clientHandshakeInfoKey{}) +} + +// NewClientHandshakeInfoContext creates a context with chi. +func NewClientHandshakeInfoContext(ctx context.Context, chi any) context.Context { + return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go new file mode 100644 index 000000000..25ade6230 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package credentials defines APIs for parsing SPIFFE ID. +// +// All APIs in this package are experimental. +package credentials + +import ( + "crypto/tls" + "crypto/x509" + "net/url" + + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("credentials") + +// SPIFFEIDFromState parses the SPIFFE ID from State. If the SPIFFE ID format +// is invalid, return nil with warning. +func SPIFFEIDFromState(state tls.ConnectionState) *url.URL { + if len(state.PeerCertificates) == 0 || len(state.PeerCertificates[0].URIs) == 0 { + return nil + } + return SPIFFEIDFromCert(state.PeerCertificates[0]) +} + +// SPIFFEIDFromCert parses the SPIFFE ID from x509.Certificate. If the SPIFFE +// ID format is invalid, return nil with warning. +func SPIFFEIDFromCert(cert *x509.Certificate) *url.URL { + if cert == nil || cert.URIs == nil { + return nil + } + var spiffeID *url.URL + for _, uri := range cert.URIs { + if uri == nil || uri.Scheme != "spiffe" || uri.Opaque != "" || (uri.User != nil && uri.User.Username() != "") { + continue + } + // From this point, we assume the uri is intended for a SPIFFE ID. + if len(uri.String()) > 2048 { + logger.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes") + return nil + } + if len(uri.Host) == 0 || len(uri.Path) == 0 { + logger.Warning("invalid SPIFFE ID: domain or workload ID is empty") + return nil + } + if len(uri.Host) > 255 { + logger.Warning("invalid SPIFFE ID: domain length larger than 255 characters") + return nil + } + // A valid SPIFFE certificate can only have exactly one URI SAN field. + if len(cert.URIs) > 1 { + logger.Warning("invalid SPIFFE ID: multiple URI SANs") + return nil + } + spiffeID = uri + } + return spiffeID +} diff --git a/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go similarity index 94% rename from vendor/google.golang.org/grpc/credentials/internal/syscallconn.go rename to vendor/google.golang.org/grpc/internal/credentials/syscallconn.go index 2f4472bec..2919632d6 100644 --- a/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go +++ b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. @@ -18,8 +16,7 @@ * */ -// Package internal contains credentials-internal code. -package internal +package credentials import ( "net" diff --git a/vendor/google.golang.org/grpc/internal/credentials/util.go b/vendor/google.golang.org/grpc/internal/credentials/util.go new file mode 100644 index 000000000..f792fd22c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/util.go @@ -0,0 +1,52 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "crypto/tls" +) + +const alpnProtoStrH2 = "h2" + +// AppendH2ToNextProtos appends h2 to next protos. +func AppendH2ToNextProtos(ps []string) []string { + for _, p := range ps { + if p == alpnProtoStrH2 { + return ps + } + } + ret := make([]string, 0, len(ps)+1) + ret = append(ret, ps...) + return append(ret, alpnProtoStrH2) +} + +// CloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +// +// TODO: inline this function if possible. +func CloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + + return cfg.Clone() +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 3ee8740f1..452985f8d 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -21,15 +21,56 @@ package envconfig import ( "os" + "strconv" "strings" ) -const ( - prefix = "GRPC_GO_" - retryStr = prefix + "RETRY" -) - var ( - // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on". - Retry = strings.EqualFold(os.Getenv(retryStr), "on") + // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). + TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true) + // RingHashCap indicates the maximum ring size which defaults to 4096 + // entries but may be overridden by setting the environment variable + // "GRPC_RING_HASH_CAP". This does not override the default bounds + // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). + RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) + // LeastRequestLB is set if we should support the least_request_experimental + // LB policy, which can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". + LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", false) + // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS + // handshakes that can be performed. + ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) + // EnforceALPNEnabled is set if TLS connections to servers with ALPN disabled + // should be rejected. The HTTP/2 protocol requires ALPN to be enabled, this + // option is present for backward compatibility. This option may be overridden + // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true" + // or "false". + EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true) + // XDSFallbackSupport is the env variable that controls whether support for + // xDS fallback is turned on. If this is unset or is false, only the first + // xDS server in the list of server configs will be used. + XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false) ) + +func boolFromEnv(envVar string, def bool) bool { + if def { + // The default is true; return true unless the variable is "false". + return !strings.EqualFold(os.Getenv(envVar), "false") + } + // The default is false; return false unless the variable is "true". + return strings.EqualFold(os.Getenv(envVar), "true") +} + +func uint64FromEnv(envVar string, def, min, max uint64) uint64 { + v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64) + if err != nil { + return def + } + if v < min { + return min + } + if v > max { + return max + } + return v +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/observability.go b/vendor/google.golang.org/grpc/internal/envconfig/observability.go new file mode 100644 index 000000000..dd314cfb1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/observability.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import "os" + +const ( + envObservabilityConfig = "GRPC_GCP_OBSERVABILITY_CONFIG" + envObservabilityConfigFile = "GRPC_GCP_OBSERVABILITY_CONFIG_FILE" +) + +var ( + // ObservabilityConfig is the json configuration for the gcp/observability + // package specified directly in the envObservabilityConfig env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ObservabilityConfig = os.Getenv(envObservabilityConfig) + // ObservabilityConfigFile is the json configuration for the + // gcp/observability specified in a file with the location specified in + // envObservabilityConfigFile env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile) +) diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go new file mode 100644 index 000000000..29f234acb --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -0,0 +1,56 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import ( + "os" +) + +const ( + // XDSBootstrapFileNameEnv is the env variable to set bootstrap file name. + // Do not use this and read from env directly. Its value is read and kept in + // variable XDSBootstrapFileName. + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP" + // XDSBootstrapFileContentEnv is the env variable to set bootstrap file + // content. Do not use this and read from env directly. Its value is read + // and kept in variable XDSBootstrapFileContent. + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" +) + +var ( + // XDSBootstrapFileName holds the name of the file which contains xDS + // bootstrap configuration. Users can specify the location of the bootstrap + // file by setting the environment variable "GRPC_XDS_BOOTSTRAP". + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileName = os.Getenv(XDSBootstrapFileNameEnv) + // XDSBootstrapFileContent holds the content of the xDS bootstrap + // configuration. Users can specify the bootstrap config by setting the + // environment variable "GRPC_XDS_BOOTSTRAP_CONFIG". + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv) + + // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. + C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") +) diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go new file mode 100644 index 000000000..7617be215 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/experimental.go @@ -0,0 +1,28 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +var ( + // WithBufferPool is implemented by the grpc package and returns a dial + // option to configure a shared buffer pool for a grpc.ClientConn. + WithBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption + + // BufferPool is implemented by the grpc package and returns a server + // option to configure a shared buffer pool for a grpc.Server. + BufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption +) diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go b/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go new file mode 100644 index 000000000..43423d8ad --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go @@ -0,0 +1,72 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package googlecloud contains internal helpful functions for google cloud. +package googlecloud + +import ( + "runtime" + "strings" + "sync" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const logPrefix = "[googlecloud]" + +var ( + vmOnGCEOnce sync.Once + vmOnGCE bool + + logger = internalgrpclog.NewPrefixLogger(grpclog.Component("googlecloud"), logPrefix) +) + +// OnGCE returns whether the client is running on GCE. +// +// It provides similar functionality as metadata.OnGCE from the cloud library +// package. We keep this to avoid depending on the cloud library module. +func OnGCE() bool { + vmOnGCEOnce.Do(func() { + mf, err := manufacturer() + if err != nil { + logger.Infof("failed to read manufacturer, setting onGCE=false: %v") + return + } + vmOnGCE = isRunningOnGCE(mf, runtime.GOOS) + }) + return vmOnGCE +} + +// isRunningOnGCE checks whether the local system, without doing a network request, is +// running on GCP. +func isRunningOnGCE(manufacturer []byte, goos string) bool { + name := string(manufacturer) + switch goos { + case "linux": + name = strings.TrimSpace(name) + return name == "Google" || name == "Google Compute Engine" + case "windows": + name = strings.ReplaceAll(name, " ", "") + name = strings.ReplaceAll(name, "\n", "") + name = strings.ReplaceAll(name, "\r", "") + return name == "Google" + default: + return false + } +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer.go similarity index 73% rename from vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go rename to vendor/google.golang.org/grpc/internal/googlecloud/manufacturer.go index 8864a0811..ffa0f1dde 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer.go @@ -1,8 +1,9 @@ -// +build !linux appengine +//go:build !(linux || windows) +// +build !linux,!windows /* * - * Copyright 2018 gRPC authors. + * Copyright 2022 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,9 +19,8 @@ * */ -package channelz +package googlecloud -// GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c interface{}) *SocketOptionData { - return nil +func manufacturer() ([]byte, error) { + return nil, nil } diff --git a/vendor/google.golang.org/grpc/credentials/tls13.go b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go similarity index 58% rename from vendor/google.golang.org/grpc/credentials/tls13.go rename to vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go index ccbf35b33..6e455fb0a 100644 --- a/vendor/google.golang.org/grpc/credentials/tls13.go +++ b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go @@ -1,8 +1,6 @@ -// +build go1.12 - /* * - * Copyright 2019 gRPC authors. + * Copyright 2022 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,13 +16,12 @@ * */ -package credentials +package googlecloud + +import "os" -import "crypto/tls" +const linuxProductNameFile = "/sys/class/dmi/id/product_name" -// This init function adds cipher suite constants only defined in Go 1.12. -func init() { - cipherSuiteLookup[tls.TLS_AES_128_GCM_SHA256] = "TLS_AES_128_GCM_SHA256" - cipherSuiteLookup[tls.TLS_AES_256_GCM_SHA384] = "TLS_AES_256_GCM_SHA384" - cipherSuiteLookup[tls.TLS_CHACHA20_POLY1305_SHA256] = "TLS_CHACHA20_POLY1305_SHA256" +func manufacturer() ([]byte, error) { + return os.ReadFile(linuxProductNameFile) } diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go new file mode 100644 index 000000000..2d7aaaaa7 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package googlecloud + +import ( + "errors" + "os/exec" + "regexp" + "strings" +) + +const ( + windowsCheckCommand = "powershell.exe" + windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" + powershellOutputFilter = "Manufacturer" + windowsManufacturerRegex = ":(.*)" +) + +func manufacturer() ([]byte, error) { + cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) + out, err := cmd.Output() + if err != nil { + return nil, err + } + for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { + if strings.HasPrefix(line, powershellOutputFilter) { + re := regexp.MustCompile(windowsManufacturerRegex) + name := re.FindString(line) + name = strings.TrimLeft(name, ":") + return []byte(name), nil + } + } + return nil, errors.New("cannot determine the machine's manufacturer") +} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go new file mode 100644 index 000000000..092ad187a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go @@ -0,0 +1,79 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog provides logging functionality for internal gRPC packages, +// outside of the functionality provided by the external `grpclog` package. +package grpclog + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" +) + +// PrefixLogger does logging with a prefix. +// +// Logging method on a nil logs without any prefix. +type PrefixLogger struct { + logger grpclog.DepthLoggerV2 + prefix string +} + +// Infof does info logging. +func (pl *PrefixLogger) Infof(format string, args ...any) { + if pl != nil { + // Handle nil, so the tests can pass in a nil logger. + format = pl.prefix + format + pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) + return + } + grpclog.InfoDepth(1, fmt.Sprintf(format, args...)) +} + +// Warningf does warning logging. +func (pl *PrefixLogger) Warningf(format string, args ...any) { + if pl != nil { + format = pl.prefix + format + pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) + return + } + grpclog.WarningDepth(1, fmt.Sprintf(format, args...)) +} + +// Errorf does error logging. +func (pl *PrefixLogger) Errorf(format string, args ...any) { + if pl != nil { + format = pl.prefix + format + pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) + return + } + grpclog.ErrorDepth(1, fmt.Sprintf(format, args...)) +} + +// V reports whether verbosity level l is at least the requested verbose level. +func (pl *PrefixLogger) V(l int) bool { + if pl != nil { + return pl.logger.V(l) + } + return true +} + +// NewPrefixLogger creates a prefix logger with the given prefix. +func NewPrefixLogger(logger grpclog.DepthLoggerV2, prefix string) *PrefixLogger { + return &PrefixLogger{logger: logger, prefix: prefix} +} diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go deleted file mode 100644 index 200b115ca..000000000 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ /dev/null @@ -1,56 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpcrand implements math/rand functions in a concurrent-safe way -// with a global random source, independent of math/rand's global source. -package grpcrand - -import ( - "math/rand" - "sync" - "time" -) - -var ( - r = rand.New(rand.NewSource(time.Now().UnixNano())) - mu sync.Mutex -) - -// Int63n implements rand.Int63n on the grpcrand global source. -func Int63n(n int64) int64 { - mu.Lock() - res := r.Int63n(n) - mu.Unlock() - return res -} - -// Intn implements rand.Intn on the grpcrand global source. -func Intn(n int) int { - mu.Lock() - res := r.Intn(n) - mu.Unlock() - return res -} - -// Float64 implements rand.Float64 on the grpcrand global source. -func Float64() float64 { - mu.Lock() - res := r.Float64() - mu.Unlock() - return res -} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go new file mode 100644 index 000000000..19b9d6392 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -0,0 +1,112 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "context" + + "google.golang.org/grpc/internal/buffer" +) + +// CallbackSerializer provides a mechanism to schedule callbacks in a +// synchronized manner. It provides a FIFO guarantee on the order of execution +// of scheduled callbacks. New callbacks can be scheduled by invoking the +// Schedule() method. +// +// This type is safe for concurrent access. +type CallbackSerializer struct { + // done is closed once the serializer is shut down completely, i.e all + // scheduled callbacks are executed and the serializer has deallocated all + // its resources. + done chan struct{} + + callbacks *buffer.Unbounded +} + +// NewCallbackSerializer returns a new CallbackSerializer instance. The provided +// context will be passed to the scheduled callbacks. Users should cancel the +// provided context to shutdown the CallbackSerializer. It is guaranteed that no +// callbacks will be added once this context is canceled, and any pending un-run +// callbacks will be executed before the serializer is shut down. +func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { + cs := &CallbackSerializer{ + done: make(chan struct{}), + callbacks: buffer.NewUnbounded(), + } + go cs.run(ctx) + return cs +} + +// TrySchedule tries to schedules the provided callback function f to be +// executed in the order it was added. This is a best-effort operation. If the +// context passed to NewCallbackSerializer was canceled before this method is +// called, the callback will not be scheduled. +// +// Callbacks are expected to honor the context when performing any blocking +// operations, and should return early when the context is canceled. +func (cs *CallbackSerializer) TrySchedule(f func(ctx context.Context)) { + cs.callbacks.Put(f) +} + +// ScheduleOr schedules the provided callback function f to be executed in the +// order it was added. If the context passed to NewCallbackSerializer has been +// canceled before this method is called, the onFailure callback will be +// executed inline instead. +// +// Callbacks are expected to honor the context when performing any blocking +// operations, and should return early when the context is canceled. +func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure func()) { + if cs.callbacks.Put(f) != nil { + onFailure() + } +} + +func (cs *CallbackSerializer) run(ctx context.Context) { + defer close(cs.done) + + // TODO: when Go 1.21 is the oldest supported version, this loop and Close + // can be replaced with: + // + // context.AfterFunc(ctx, cs.callbacks.Close) + for ctx.Err() == nil { + select { + case <-ctx.Done(): + // Do nothing here. Next iteration of the for loop will not happen, + // since ctx.Err() would be non-nil. + case cb := <-cs.callbacks.Get(): + cs.callbacks.Load() + cb.(func(context.Context))(ctx) + } + } + + // Close the buffer to prevent new callbacks from being added. + cs.callbacks.Close() + + // Run all pending callbacks. + for cb := range cs.callbacks.Get() { + cs.callbacks.Load() + cb.(func(context.Context))(ctx) + } +} + +// Done returns a channel that is closed after the context passed to +// NewCallbackSerializer is canceled and all callbacks have been executed. +func (cs *CallbackSerializer) Done() <-chan struct{} { + return cs.done +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go similarity index 59% rename from vendor/google.golang.org/grpc/internal/channelz/util_linux.go rename to vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go index fdf409d55..6635f7bca 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go @@ -1,8 +1,6 @@ -// +build linux,!appengine - /* * - * Copyright 2018 gRPC authors. + * Copyright 2022 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,22 +16,17 @@ * */ -package channelz +package grpcsync import ( - "syscall" + "sync" ) -// GetSocketOption gets the socket option info of the conn. -func GetSocketOption(socket interface{}) *SocketOptionData { - c, ok := socket.(syscall.Conn) - if !ok { - return nil - } - data := &SocketOptionData{} - if rawConn, err := c.SyscallConn(); err == nil { - rawConn.Control(data.Getsockopt) - return data +// OnceFunc returns a function wrapping f which ensures f is only executed +// once even if the returned function is executed multiple times. +func OnceFunc(f func()) func() { + var once sync.Once + return func() { + once.Do(f) } - return nil } diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go new file mode 100644 index 000000000..6d8c2f518 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go @@ -0,0 +1,121 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "context" + "sync" +) + +// Subscriber represents an entity that is subscribed to messages published on +// a PubSub. It wraps the callback to be invoked by the PubSub when a new +// message is published. +type Subscriber interface { + // OnMessage is invoked when a new message is published. Implementations + // must not block in this method. + OnMessage(msg any) +} + +// PubSub is a simple one-to-many publish-subscribe system that supports +// messages of arbitrary type. It guarantees that messages are delivered in +// the same order in which they were published. +// +// Publisher invokes the Publish() method to publish new messages, while +// subscribers interested in receiving these messages register a callback +// via the Subscribe() method. +// +// Once a PubSub is stopped, no more messages can be published, but any pending +// published messages will be delivered to the subscribers. Done may be used +// to determine when all published messages have been delivered. +type PubSub struct { + cs *CallbackSerializer + + // Access to the below fields are guarded by this mutex. + mu sync.Mutex + msg any + subscribers map[Subscriber]bool +} + +// NewPubSub returns a new PubSub instance. Users should cancel the +// provided context to shutdown the PubSub. +func NewPubSub(ctx context.Context) *PubSub { + return &PubSub{ + cs: NewCallbackSerializer(ctx), + subscribers: map[Subscriber]bool{}, + } +} + +// Subscribe registers the provided Subscriber to the PubSub. +// +// If the PubSub contains a previously published message, the Subscriber's +// OnMessage() callback will be invoked asynchronously with the existing +// message to begin with, and subsequently for every newly published message. +// +// The caller is responsible for invoking the returned cancel function to +// unsubscribe itself from the PubSub. +func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { + ps.mu.Lock() + defer ps.mu.Unlock() + + ps.subscribers[sub] = true + + if ps.msg != nil { + msg := ps.msg + ps.cs.TrySchedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[sub] { + return + } + sub.OnMessage(msg) + }) + } + + return func() { + ps.mu.Lock() + defer ps.mu.Unlock() + delete(ps.subscribers, sub) + } +} + +// Publish publishes the provided message to the PubSub, and invokes +// callbacks registered by subscribers asynchronously. +func (ps *PubSub) Publish(msg any) { + ps.mu.Lock() + defer ps.mu.Unlock() + + ps.msg = msg + for sub := range ps.subscribers { + s := sub + ps.cs.TrySchedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[s] { + return + } + s.OnMessage(msg) + }) + } +} + +// Done returns a channel that is closed after the context passed to NewPubSub +// is canceled and all updates have been sent to subscribers. +func (ps *PubSub) Done() <-chan struct{} { + return ps.cs.Done() +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go new file mode 100644 index 000000000..e8d866984 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "strings" +) + +// RegisteredCompressorNames holds names of the registered compressors. +var RegisteredCompressorNames []string + +// IsCompressorNameRegistered returns true when name is available in registry. +func IsCompressorNameRegistered(name string) bool { + for _, compressor := range RegisteredCompressorNames { + if compressor == name { + return true + } + } + return false +} + +// RegisteredCompressors returns a string of registered compressor names +// separated by comma. +func RegisteredCompressors() string { + return strings.Join(RegisteredCompressorNames, ",") +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go new file mode 100644 index 000000000..b25b0baec --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "strconv" + "time" +) + +const maxTimeoutValue int64 = 100000000 - 1 + +// div does integer division and round-up the result. Note that this is +// equivalent to (d+r-1)/r but has less chance to overflow. +func div(d, r time.Duration) int64 { + if d%r > 0 { + return int64(d/r + 1) + } + return int64(d / r) +} + +// EncodeDuration encodes the duration to the format grpc-timeout header +// accepts. +// +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests +func EncodeDuration(t time.Duration) string { + // TODO: This is simplistic and not bandwidth efficient. Improve it. + if t <= 0 { + return "0n" + } + if d := div(t, time.Nanosecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "n" + } + if d := div(t, time.Microsecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "u" + } + if d := div(t, time.Millisecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "m" + } + if d := div(t, time.Second); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "S" + } + if d := div(t, time.Minute); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "M" + } + // Note that maxTimeoutValue * time.Hour > MaxInt64. + return strconv.FormatInt(div(t, time.Hour), 10) + "H" +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go new file mode 100644 index 000000000..e2f948e8f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go @@ -0,0 +1,20 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcutil provides utility functions used across the gRPC codebase. +package grpcutil diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go new file mode 100644 index 000000000..6f22bd891 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "context" + + "google.golang.org/grpc/metadata" +) + +type mdExtraKey struct{} + +// WithExtraMetadata creates a new context with incoming md attached. +func WithExtraMetadata(ctx context.Context, md metadata.MD) context.Context { + return context.WithValue(ctx, mdExtraKey{}, md) +} + +// ExtraMetadata returns the incoming metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func ExtraMetadata(ctx context.Context) (md metadata.MD, ok bool) { + md, ok = ctx.Value(mdExtraKey{}).(metadata.MD) + return +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go new file mode 100644 index 000000000..ec62b4775 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -0,0 +1,88 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "errors" + "strings" +) + +// ParseMethod splits service and method from the input. It expects format +// "/service/method". +func ParseMethod(methodName string) (service, method string, _ error) { + if !strings.HasPrefix(methodName, "/") { + return "", "", errors.New("invalid method name: should start with /") + } + methodName = methodName[1:] + + pos := strings.LastIndex(methodName, "/") + if pos < 0 { + return "", "", errors.New("invalid method name: suffix /method is missing") + } + return methodName[:pos], methodName[pos+1:], nil +} + +// baseContentType is the base content-type for gRPC. This is a valid +// content-type on it's own, but can also include a content-subtype such as +// "proto" as a suffix after "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests +// for more details. +const baseContentType = "application/grpc" + +// ContentSubtype returns the content-subtype for the given content-type. The +// given content-type must be a valid content-type that starts with +// "application/grpc". A content-subtype will follow "application/grpc" after a +// "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If contentType is not a valid content-type for gRPC, the boolean +// will be false, otherwise true. If content-type == "application/grpc", +// "application/grpc+", or "application/grpc;", the boolean will be true, +// but no content-subtype will be returned. +// +// contentType is assumed to be lowercase already. +func ContentSubtype(contentType string) (string, bool) { + if contentType == baseContentType { + return "", true + } + if !strings.HasPrefix(contentType, baseContentType) { + return "", false + } + // guaranteed since != baseContentType and has baseContentType prefix + switch contentType[len(baseContentType)] { + case '+', ';': + // this will return true for "application/grpc+" or "application/grpc;" + // which the previous validContentType function tested to be valid, so we + // just say that no content-subtype is specified in this case + return contentType[len(baseContentType)+1:], true + default: + return "", false + } +} + +// ContentType builds full content type with the given sub-type. +// +// contentSubtype is assumed to be lowercase +func ContentType(contentSubtype string) string { + if contentSubtype == "" { + return baseContentType + } + return baseContentType + "+" + contentSubtype +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/regex.go b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go new file mode 100644 index 000000000..7a092b2b8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go @@ -0,0 +1,31 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import "regexp" + +// FullMatchWithRegex returns whether the full text matches the regex provided. +func FullMatchWithRegex(re *regexp.Regexp, text string) bool { + if len(text) == 0 { + return re.MatchString(text) + } + re.Longest() + rem := re.FindString(text) + return len(rem) == len(text) +} diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go new file mode 100644 index 000000000..fe49cb74c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -0,0 +1,278 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package idle contains a component for managing idleness (entering and exiting) +// based on RPC activity. +package idle + +import ( + "fmt" + "math" + "sync" + "sync/atomic" + "time" +) + +// For overriding in unit tests. +var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { + return time.AfterFunc(d, f) +} + +// Enforcer is the functionality provided by grpc.ClientConn to enter +// and exit from idle mode. +type Enforcer interface { + ExitIdleMode() error + EnterIdleMode() +} + +// Manager implements idleness detection and calls the configured Enforcer to +// enter/exit idle mode when appropriate. Must be created by NewManager. +type Manager struct { + // State accessed atomically. + lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. + activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. + activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback. + closed int32 // Boolean; True when the manager is closed. + + // Can be accessed without atomics or mutex since these are set at creation + // time and read-only after that. + enforcer Enforcer // Functionality provided by grpc.ClientConn. + timeout time.Duration + + // idleMu is used to guarantee mutual exclusion in two scenarios: + // - Opposing intentions: + // - a: Idle timeout has fired and handleIdleTimeout() is trying to put + // the channel in idle mode because the channel has been inactive. + // - b: At the same time an RPC is made on the channel, and OnCallBegin() + // is trying to prevent the channel from going idle. + // - Competing intentions: + // - The channel is in idle mode and there are multiple RPCs starting at + // the same time, all trying to move the channel out of idle. Only one + // of them should succeed in doing so, while the other RPCs should + // piggyback on the first one and be successfully handled. + idleMu sync.RWMutex + actuallyIdle bool + timer *time.Timer +} + +// NewManager creates a new idleness manager implementation for the +// given idle timeout. It begins in idle mode. +func NewManager(enforcer Enforcer, timeout time.Duration) *Manager { + return &Manager{ + enforcer: enforcer, + timeout: timeout, + actuallyIdle: true, + activeCallsCount: -math.MaxInt32, + } +} + +// resetIdleTimerLocked resets the idle timer to the given duration. Called +// when exiting idle mode or when the timer fires and we need to reset it. +func (m *Manager) resetIdleTimerLocked(d time.Duration) { + if m.isClosed() || m.timeout == 0 || m.actuallyIdle { + return + } + + // It is safe to ignore the return value from Reset() because this method is + // only ever called from the timer callback or when exiting idle mode. + if m.timer != nil { + m.timer.Stop() + } + m.timer = timeAfterFunc(d, m.handleIdleTimeout) +} + +func (m *Manager) resetIdleTimer(d time.Duration) { + m.idleMu.Lock() + defer m.idleMu.Unlock() + m.resetIdleTimerLocked(d) +} + +// handleIdleTimeout is the timer callback that is invoked upon expiry of the +// configured idle timeout. The channel is considered inactive if there are no +// ongoing calls and no RPC activity since the last time the timer fired. +func (m *Manager) handleIdleTimeout() { + if m.isClosed() { + return + } + + if atomic.LoadInt32(&m.activeCallsCount) > 0 { + m.resetIdleTimer(m.timeout) + return + } + + // There has been activity on the channel since we last got here. Reset the + // timer and return. + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { + // Set the timer to fire after a duration of idle timeout, calculated + // from the time the most recent RPC completed. + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0) + m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime)-time.Now().UnixNano()) + m.timeout) + return + } + + // Now that we've checked that there has been no activity, attempt to enter + // idle mode, which is very likely to succeed. + if m.tryEnterIdleMode() { + // Successfully entered idle mode. No timer needed until we exit idle. + return + } + + // Failed to enter idle mode due to a concurrent RPC that kept the channel + // active, or because of an error from the channel. Undo the attempt to + // enter idle, and reset the timer to try again later. + m.resetIdleTimer(m.timeout) +} + +// tryEnterIdleMode instructs the channel to enter idle mode. But before +// that, it performs a last minute check to ensure that no new RPC has come in, +// making the channel active. +// +// Return value indicates whether or not the channel moved to idle mode. +// +// Holds idleMu which ensures mutual exclusion with exitIdleMode. +func (m *Manager) tryEnterIdleMode() bool { + // Setting the activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() + // that the channel is either in idle mode or is trying to get there. + if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { + // This CAS operation can fail if an RPC started after we checked for + // activity in the timer handler, or one was ongoing from before the + // last time the timer fired, or if a test is attempting to enter idle + // mode without checking. In all cases, abort going into idle mode. + return false + } + // N.B. if we fail to enter idle mode after this, we must re-add + // math.MaxInt32 to m.activeCallsCount. + + m.idleMu.Lock() + defer m.idleMu.Unlock() + + if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 { + // We raced and lost to a new RPC. Very rare, but stop entering idle. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + return false + } + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { + // A very short RPC could have come in (and also finished) after we + // checked for calls count and activity in handleIdleTimeout(), but + // before the CAS operation. So, we need to check for activity again. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + return false + } + + // No new RPCs have come in since we set the active calls count value to + // -math.MaxInt32. And since we have the lock, it is safe to enter idle mode + // unconditionally now. + m.enforcer.EnterIdleMode() + m.actuallyIdle = true + return true +} + +func (m *Manager) EnterIdleModeForTesting() { + m.tryEnterIdleMode() +} + +// OnCallBegin is invoked at the start of every RPC. +func (m *Manager) OnCallBegin() error { + if m.isClosed() { + return nil + } + + if atomic.AddInt32(&m.activeCallsCount, 1) > 0 { + // Channel is not idle now. Set the activity bit and allow the call. + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) + return nil + } + + // Channel is either in idle mode or is in the process of moving to idle + // mode. Attempt to exit idle mode to allow this RPC. + if err := m.ExitIdleMode(); err != nil { + // Undo the increment to calls count, and return an error causing the + // RPC to fail. + atomic.AddInt32(&m.activeCallsCount, -1) + return err + } + + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) + return nil +} + +// ExitIdleMode instructs m to call the enforcer's ExitIdleMode and update m's +// internal state. +func (m *Manager) ExitIdleMode() error { + // Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. + m.idleMu.Lock() + defer m.idleMu.Unlock() + + if m.isClosed() || !m.actuallyIdle { + // This can happen in three scenarios: + // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called + // tryEnterIdleMode(). But before the latter could grab the lock, an RPC + // came in and OnCallBegin() noticed that the calls count is negative. + // - Channel is in idle mode, and multiple new RPCs come in at the same + // time, all of them notice a negative calls count in OnCallBegin and get + // here. The first one to get the lock would got the channel to exit idle. + // - Channel is not in idle mode, and the user calls Connect which calls + // m.ExitIdleMode. + // + // In any case, there is nothing to do here. + return nil + } + + if err := m.enforcer.ExitIdleMode(); err != nil { + return fmt.Errorf("failed to exit idle mode: %w", err) + } + + // Undo the idle entry process. This also respects any new RPC attempts. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + m.actuallyIdle = false + + // Start a new timer to fire after the configured idle timeout. + m.resetIdleTimerLocked(m.timeout) + return nil +} + +// OnCallEnd is invoked at the end of every RPC. +func (m *Manager) OnCallEnd() { + if m.isClosed() { + return + } + + // Record the time at which the most recent call finished. + atomic.StoreInt64(&m.lastCallEndTime, time.Now().UnixNano()) + + // Decrement the active calls count. This count can temporarily go negative + // when the timer callback is in the process of moving the channel to idle + // mode, but one or more RPCs come in and complete before the timer callback + // can get done with the process of moving to idle mode. + atomic.AddInt32(&m.activeCallsCount, -1) +} + +func (m *Manager) isClosed() bool { + return atomic.LoadInt32(&m.closed) == 1 +} + +func (m *Manager) Close() { + atomic.StoreInt32(&m.closed, 1) + + m.idleMu.Lock() + if m.timer != nil { + m.timer.Stop() + m.timer = nil + } + m.idleMu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index bc1f99ac8..7aae9240f 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -25,13 +25,12 @@ import ( "time" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/serviceconfig" ) var ( - // WithResolverBuilder is exported by dialoptions.go - WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption - // WithHealthCheckFunc is not exported by dialoptions.go - WithHealthCheckFunc interface{} // func (HealthChecker) DialOption + // WithHealthCheckFunc is set by dialoptions.go + WithHealthCheckFunc any // func (HealthChecker) DialOption // HealthCheckFunc is used to provide client-side LB channel health checking HealthCheckFunc HealthChecker // BalancerUnregister is exported by package balancer to unregister a balancer. @@ -39,17 +38,200 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second - // ParseServiceConfig is a function to parse JSON service configs into - // opaque data structures. - ParseServiceConfig func(sc string) (interface{}, error) - // StatusRawProto is exported by status/status.go. This func returns a - // pointer to the wrapped Status proto for a given status.Status without a - // call to proto.Clone(). The returned Status proto should not be mutated by - // the caller. - StatusRawProto interface{} // func (*status.Status) *spb.Status + // KeepaliveMinServerPingTime is the minimum ping interval for servers. + // This must be 1s by default, but tests may wish to set it lower for + // convenience. + KeepaliveMinServerPingTime = time.Second + // ParseServiceConfig parses a JSON representation of the service config. + ParseServiceConfig any // func(string) *serviceconfig.ParseResult + // EqualServiceConfigForTesting is for testing service config generation and + // parsing. Both a and b should be returned by ParseServiceConfig. + // This function compares the config without rawJSON stripped, in case the + // there's difference in white space. + EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool + // GetCertificateProviderBuilder returns the registered builder for the + // given name. This is set by package certprovider for use from xDS + // bootstrap code while parsing certificate provider configs in the + // bootstrap file. + GetCertificateProviderBuilder any // func(string) certprovider.Builder + // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo + // stored in the passed in attributes. This is set by + // credentials/xds/xds.go. + GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *unsafe.Pointer + // GetServerCredentials returns the transport credentials configured on a + // gRPC server. An xDS-enabled server needs to know what type of credentials + // is configured on the underlying gRPC server. This is set by server.go. + GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials + // CanonicalString returns the canonical string of the code defined here: + // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + CanonicalString any // func (codes.Code) string + // IsRegisteredMethod returns whether the passed in method is registered as + // a method on the server. + IsRegisteredMethod any // func(*grpc.Server, string) bool + // ServerFromContext returns the server from the context. + ServerFromContext any // func(context.Context) *grpc.Server + // AddGlobalServerOptions adds an array of ServerOption that will be + // effective globally for newly created servers. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + AddGlobalServerOptions any // func(opt ...ServerOption) + // ClearGlobalServerOptions clears the array of extra ServerOption. This + // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ClearGlobalServerOptions func() + // AddGlobalDialOptions adds an array of DialOption that will be effective + // globally for newly created client channels. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + AddGlobalDialOptions any // func(opt ...DialOption) + // DisableGlobalDialOptions returns a DialOption that prevents the + // ClientConn from applying the global DialOptions (set via + // AddGlobalDialOptions). + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + DisableGlobalDialOptions any // func() grpc.DialOption + // ClearGlobalDialOptions clears the array of extra DialOption. This + // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ClearGlobalDialOptions func() + + // AddGlobalPerTargetDialOptions adds a PerTargetDialOption that will be + // configured for newly created ClientConns. + AddGlobalPerTargetDialOptions any // func (opt any) + // ClearGlobalPerTargetDialOptions clears the slice of global late apply + // dial options. + ClearGlobalPerTargetDialOptions func() + + // JoinDialOptions combines the dial options passed as arguments into a + // single dial option. + JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption + // JoinServerOptions combines the server options passed as arguments into a + // single server option. + JoinServerOptions any // func(...grpc.ServerOption) grpc.ServerOption + + // WithBinaryLogger returns a DialOption that specifies the binary logger + // for a ClientConn. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + WithBinaryLogger any // func(binarylog.Logger) grpc.DialOption + // BinaryLogger returns a ServerOption that can set the binary logger for a + // server. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + BinaryLogger any // func(binarylog.Logger) grpc.ServerOption + + // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a + // provided grpc.ClientConn. + SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber) + + // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using + // the provided xds bootstrap config instead of the global configuration from + // the supported environment variables. The resolver.Builder is meant to be + // used in conjunction with the grpc.WithResolvers DialOption. + // + // Testing Only + // + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. + NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) + + // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster + // Specifier Plugin for testing purposes, regardless of the XDSRLS environment + // variable. + // + // TODO: Remove this function once the RLS env var is removed. + RegisterRLSClusterSpecifierPluginForTesting func() + + // UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster + // Specifier Plugin for testing purposes. This is needed because there is no way + // to unregister the RLS Cluster Specifier Plugin after registering it solely + // for testing purposes using RegisterRLSClusterSpecifierPluginForTesting(). + // + // TODO: Remove this function once the RLS env var is removed. + UnregisterRLSClusterSpecifierPluginForTesting func() + + // RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing + // purposes, regardless of the RBAC environment variable. + // + // TODO: Remove this function once the RBAC env var is removed. + RegisterRBACHTTPFilterForTesting func() + + // UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for + // testing purposes. This is needed because there is no way to unregister the + // HTTP Filter after registering it solely for testing purposes using + // RegisterRBACHTTPFilterForTesting(). + // + // TODO: Remove this function once the RBAC env var is removed. + UnregisterRBACHTTPFilterForTesting func() + + // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. + ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions) + + // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra + // metadata to RPCs. + GRPCResolverSchemeExtraMetadata = "xds" + + // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. + EnterIdleModeForTesting any // func(*grpc.ClientConn) + + // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. + ExitIdleModeForTesting any // func(*grpc.ClientConn) error + + ChannelzTurnOffForTesting func() + + // TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to + // invoke resource-not-found error for the given resource type and name. + TriggerXDSResourceNotFoundForTesting any // func(xdsclient.XDSClient, xdsresource.Type, string) error + + // FromOutgoingContextRaw returns the un-merged, intermediary contents of + // metadata.rawMD. + FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool) + + // UserSetDefaultScheme is set to true if the user has overridden the + // default resolver scheme. + UserSetDefaultScheme = false + + // ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n + // is the number of elements. swap swaps the elements with indexes i and j. + ShuffleAddressListForTesting any // func(n int, swap func(i, j int)) + + // ConnectedAddress returns the connected address for a SubConnState. The + // address is only valid if the state is READY. + ConnectedAddress any // func (scs SubConnState) resolver.Address + + // SetConnectedAddress sets the connected address for a SubConnState. + SetConnectedAddress any // func(scs *SubConnState, addr resolver.Address) + + // SnapshotMetricRegistryForTesting snapshots the global data of the metric + // registry. Returns a cleanup function that sets the metric registry to its + // original state. Only called in testing functions. + SnapshotMetricRegistryForTesting func() func() + + // SetDefaultBufferPoolForTesting updates the default buffer pool, for + // testing purposes. + SetDefaultBufferPoolForTesting any // func(mem.BufferPool) + + // SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for + // testing purposes. + SetBufferPoolingThresholdForTesting any // func(int) ) -// HealthChecker defines the signature of the client-side LB channel health checking function. +// HealthChecker defines the signature of the client-side LB channel health +// checking function. // // The implementation is expected to create a health checking RPC stream by // calling newStream(), watch for the health status of serviceName, and report @@ -57,7 +239,7 @@ var ( // // The health checking protocol is defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md -type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State), serviceName string) error +type HealthChecker func(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), serviceName string) error const ( // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. @@ -69,3 +251,9 @@ const ( // that supports backend returned by grpclb balancer. CredsBundleModeBackendFromBalancer = "backend-from-balancer" ) + +// RLSLoadBalancingPolicyName is the name of the RLS LB policy. +// +// It currently has an experimental suffix which would be removed once +// end-to-end testing of the policy is completed. +const RLSLoadBalancingPolicyName = "rls_experimental" diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go new file mode 100644 index 000000000..900bfb716 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -0,0 +1,132 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata contains functions to set and get metadata from addresses. +// +// This package is experimental. +package metadata + +import ( + "fmt" + "strings" + + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +) + +type mdKeyType string + +const mdKey = mdKeyType("grpc.internal.address.metadata") + +type mdValue metadata.MD + +func (m mdValue) Equal(o any) bool { + om, ok := o.(mdValue) + if !ok { + return false + } + if len(m) != len(om) { + return false + } + for k, v := range m { + ov := om[k] + if len(ov) != len(v) { + return false + } + for i, ve := range v { + if ov[i] != ve { + return false + } + } + } + return true +} + +// Get returns the metadata of addr. +func Get(addr resolver.Address) metadata.MD { + attrs := addr.Attributes + if attrs == nil { + return nil + } + md, _ := attrs.Value(mdKey).(mdValue) + return metadata.MD(md) +} + +// Set sets (overrides) the metadata in addr. +// +// When a SubConn is created with this address, the RPCs sent on it will all +// have this metadata. +func Set(addr resolver.Address, md metadata.MD) resolver.Address { + addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md)) + return addr +} + +// Validate validates every pair in md with ValidatePair. +func Validate(md metadata.MD) error { + for k, vals := range md { + if err := ValidatePair(k, vals...); err != nil { + return err + } + } + return nil +} + +// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E +func hasNotPrintable(msg string) bool { + // for i that saving a conversion if not using for range + for i := 0; i < len(msg); i++ { + if msg[i] < 0x20 || msg[i] > 0x7E { + return true + } + } + return false +} + +// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) : +// +// - key must contain one or more characters. +// - the characters in the key must be contained in [0-9 a-z _ - .]. +// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed. +// - the characters in the every value must be printable (in [%x20-%x7E]). +func ValidatePair(key string, vals ...string) error { + // key should not be empty + if key == "" { + return fmt.Errorf("there is an empty key in the header") + } + // pseudo-header will be ignored + if key[0] == ':' { + return nil + } + // check key, for i that saving a conversion if not using for range + for i := 0; i < len(key); i++ { + r := key[i] + if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { + return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key) + } + } + if strings.HasSuffix(key, "-bin") { + return nil + } + // check value + for _, val := range vals { + if hasNotPrintable(val) { + return fmt.Errorf("header key %q contains value with non-printable ASCII characters", key) + } + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go new file mode 100644 index 000000000..dbee7a60d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -0,0 +1,73 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pretty defines helper functions to pretty-print structs for logging. +package pretty + +import ( + "bytes" + "encoding/json" + "fmt" + + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/protoadapt" +) + +const jsonIndent = " " + +// ToJSON marshals the input into a json string. +// +// If marshal fails, it falls back to fmt.Sprintf("%+v"). +func ToJSON(e any) string { + if ee, ok := e.(protoadapt.MessageV1); ok { + e = protoadapt.MessageV2Of(ee) + } + + if ee, ok := e.(protoadapt.MessageV2); ok { + mm := protojson.MarshalOptions{ + Indent: jsonIndent, + Multiline: true, + } + ret, err := mm.Marshal(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return string(ret) + } + + ret, err := json.MarshalIndent(e, "", jsonIndent) + if err != nil { + return fmt.Sprintf("%+v", e) + } + return string(ret) +} + +// FormatJSON formats the input json bytes with indentation. +// +// If Indent fails, it returns the unchanged input as string. +func FormatJSON(b []byte) string { + var out bytes.Buffer + err := json.Indent(&out, b, "", jsonIndent) + if err != nil { + return string(b) + } + return out.String() +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go new file mode 100644 index 000000000..f0603871c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -0,0 +1,167 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver provides internal resolver-related functionality. +package resolver + +import ( + "context" + "sync" + + "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +) + +// ConfigSelector controls what configuration to use for every RPC. +type ConfigSelector interface { + // Selects the configuration for the RPC, or terminates it using the error. + // This error will be converted by the gRPC library to a status error with + // code UNKNOWN if it is not returned as a status error. + SelectConfig(RPCInfo) (*RPCConfig, error) +} + +// RPCInfo contains RPC information needed by a ConfigSelector. +type RPCInfo struct { + // Context is the user's context for the RPC and contains headers and + // application timeout. It is passed for interception purposes and for + // efficiency reasons. SelectConfig should not be blocking. + Context context.Context + Method string // i.e. "/Service/Method" +} + +// RPCConfig describes the configuration to use for each RPC. +type RPCConfig struct { + // The context to use for the remainder of the RPC; can pass info to LB + // policy or affect timeout or metadata. + Context context.Context + MethodConfig serviceconfig.MethodConfig // configuration to use for this RPC + OnCommitted func() // Called when the RPC has been committed (retries no longer possible) + Interceptor ClientInterceptor +} + +// ClientStream is the same as grpc.ClientStream, but defined here for circular +// dependency reasons. +type ClientStream interface { + // Header returns the header metadata received from the server if there + // is any. It blocks if the metadata is not ready to read. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. It is also not safe to call CloseSend + // concurrently with SendMsg. + CloseSend() error + // Context returns the context for this stream. + // + // It should not be called until after Header or RecvMsg has returned. Once + // called, subsequent client-side retries are disabled. + Context() context.Context + // SendMsg is generally called by generated code. On error, SendMsg aborts + // the stream. If the error was generated by the client, the status is + // returned directly; otherwise, io.EOF is returned and the status of + // the stream may be discovered using RecvMsg. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the server. An + // untimely stream closure may result in lost messages. To ensure delivery, + // users should ensure the RPC completed successfully using RecvMsg. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. It is also + // not safe to call CloseSend concurrently with SendMsg. + SendMsg(m any) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC + // status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m any) error +} + +// ClientInterceptor is an interceptor for gRPC client streams. +type ClientInterceptor interface { + // NewStream produces a ClientStream for an RPC which may optionally use + // the provided function to produce a stream for delegation. Note: + // RPCInfo.Context should not be used (will be nil). + // + // done is invoked when the RPC is finished using its connection, or could + // not be assigned a connection. RPC operations may still occur on + // ClientStream after done is called, since the interceptor is invoked by + // application-layer operations. done must never be nil when called. + NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error) +} + +// ServerInterceptor is an interceptor for incoming RPC's on gRPC server side. +type ServerInterceptor interface { + // AllowRPC checks if an incoming RPC is allowed to proceed based on + // information about connection RPC was received on, and HTTP Headers. This + // information will be piped into context. + AllowRPC(ctx context.Context) error // TODO: Make this a real interceptor for filters such as rate limiting. +} + +type csKeyType string + +const csKey = csKeyType("grpc.internal.resolver.configSelector") + +// SetConfigSelector sets the config selector in state and returns the new +// state. +func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State { + state.Attributes = state.Attributes.WithValue(csKey, cs) + return state +} + +// GetConfigSelector retrieves the config selector from state, if present, and +// returns it or nil if absent. +func GetConfigSelector(state resolver.State) ConfigSelector { + cs, _ := state.Attributes.Value(csKey).(ConfigSelector) + return cs +} + +// SafeConfigSelector allows for safe switching of ConfigSelector +// implementations such that previous values are guaranteed to not be in use +// when UpdateConfigSelector returns. +type SafeConfigSelector struct { + mu sync.RWMutex + cs ConfigSelector +} + +// UpdateConfigSelector swaps to the provided ConfigSelector and blocks until +// all uses of the previous ConfigSelector have completed. +func (scs *SafeConfigSelector) UpdateConfigSelector(cs ConfigSelector) { + scs.mu.Lock() + defer scs.mu.Unlock() + scs.cs = cs +} + +// SelectConfig defers to the current ConfigSelector in scs. +func (scs *SafeConfigSelector) SelectConfig(r RPCInfo) (*RPCConfig, error) { + scs.mu.RLock() + defer scs.mu.RUnlock() + return scs.cs.SelectConfig(r) +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go new file mode 100644 index 000000000..4552db16b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -0,0 +1,458 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +package dns + +import ( + "context" + "encoding/json" + "fmt" + "math/rand" + "net" + "os" + "strconv" + "strings" + "sync" + "time" + + grpclbstate "google.golang.org/grpc/balancer/grpclb/state" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/resolver/dns/internal" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB + // addresses from SRV records. Must not be changed after init time. + EnableSRVLookups = false + + // MinResolutionInterval is the minimum interval at which re-resolutions are + // allowed. This helps to prevent excessive re-resolution. + MinResolutionInterval = 30 * time.Second + + // ResolvingTimeout specifies the maximum duration for a DNS resolution request. + // If the timeout expires before a response is received, the request will be canceled. + // + // It is recommended to set this value at application startup. Avoid modifying this variable + // after initialization as it's not thread-safe for concurrent modification. + ResolvingTimeout = 30 * time.Second + + logger = grpclog.Component("dns") +) + +func init() { + resolver.Register(NewBuilder()) + internal.TimeAfterFunc = time.After + internal.TimeNowFunc = time.Now + internal.TimeUntilFunc = time.Until + internal.NewNetResolver = newNetResolver + internal.AddressDialer = addressDialer +} + +const ( + defaultPort = "443" + defaultDNSSvrPort = "53" + golang = "GO" + // txtPrefix is the prefix string to be prepended to the host name for txt + // record lookup. + txtPrefix = "_grpc_config." + // In DNS, service config is encoded in a TXT record via the mechanism + // described in RFC-1464 using the attribute name grpc_config. + txtAttribute = "grpc_config=" +) + +var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) { + return func(ctx context.Context, network, _ string) (net.Conn, error) { + var dialer net.Dialer + return dialer.DialContext(ctx, network, address) + } +} + +var newNetResolver = func(authority string) (internal.NetResolver, error) { + if authority == "" { + return net.DefaultResolver, nil + } + + host, port, err := parseTarget(authority, defaultDNSSvrPort) + if err != nil { + return nil, err + } + + authorityWithPort := net.JoinHostPort(host, port) + + return &net.Resolver{ + PreferGo: true, + Dial: internal.AddressDialer(authorityWithPort), + }, nil +} + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +func NewBuilder() resolver.Builder { + return &dnsBuilder{} +} + +type dnsBuilder struct{} + +// Build creates and starts a DNS resolver that watches the name resolution of +// the target. +func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + host, port, err := parseTarget(target.Endpoint(), defaultPort) + if err != nil { + return nil, err + } + + // IP address. + if ipAddr, ok := formatIP(host); ok { + addr := []resolver.Address{{Addr: ipAddr + ":" + port}} + cc.UpdateState(resolver.State{Addresses: addr}) + return deadResolver{}, nil + } + + // DNS address (non-IP). + ctx, cancel := context.WithCancel(context.Background()) + d := &dnsResolver{ + host: host, + port: port, + ctx: ctx, + cancel: cancel, + cc: cc, + rn: make(chan struct{}, 1), + disableServiceConfig: opts.DisableServiceConfig, + } + + d.resolver, err = internal.NewNetResolver(target.URL.Host) + if err != nil { + return nil, err + } + + d.wg.Add(1) + go d.watcher() + return d, nil +} + +// Scheme returns the naming scheme of this resolver builder, which is "dns". +func (b *dnsBuilder) Scheme() string { + return "dns" +} + +// deadResolver is a resolver that does nothing. +type deadResolver struct{} + +func (deadResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (deadResolver) Close() {} + +// dnsResolver watches for the name resolution update for a non-IP target. +type dnsResolver struct { + host string + port string + resolver internal.NetResolver + ctx context.Context + cancel context.CancelFunc + cc resolver.ClientConn + // rn channel is used by ResolveNow() to force an immediate resolution of the + // target. + rn chan struct{} + // wg is used to enforce Close() to return after the watcher() goroutine has + // finished. Otherwise, data race will be possible. [Race Example] in + // dns_resolver_test we replace the real lookup functions with mocked ones to + // facilitate testing. If Close() doesn't wait for watcher() goroutine + // finishes, race detector sometimes will warns lookup (READ the lookup + // function pointers) inside watcher() goroutine has data race with + // replaceNetFunc (WRITE the lookup function pointers). + wg sync.WaitGroup + disableServiceConfig bool +} + +// ResolveNow invoke an immediate resolution of the target that this +// dnsResolver watches. +func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { + select { + case d.rn <- struct{}{}: + default: + } +} + +// Close closes the dnsResolver. +func (d *dnsResolver) Close() { + d.cancel() + d.wg.Wait() +} + +func (d *dnsResolver) watcher() { + defer d.wg.Done() + backoffIndex := 1 + for { + state, err := d.lookup() + if err != nil { + // Report error to the underlying grpc.ClientConn. + d.cc.ReportError(err) + } else { + err = d.cc.UpdateState(*state) + } + + var nextResolutionTime time.Time + if err == nil { + // Success resolving, wait for the next ResolveNow. However, also wait 30 + // seconds at the very least to prevent constantly re-resolving. + backoffIndex = 1 + nextResolutionTime = internal.TimeNowFunc().Add(MinResolutionInterval) + select { + case <-d.ctx.Done(): + return + case <-d.rn: + } + } else { + // Poll on an error found in DNS Resolver or an error received from + // ClientConn. + nextResolutionTime = internal.TimeNowFunc().Add(backoff.DefaultExponential.Backoff(backoffIndex)) + backoffIndex++ + } + select { + case <-d.ctx.Done(): + return + case <-internal.TimeAfterFunc(internal.TimeUntilFunc(nextResolutionTime)): + } + } +} + +func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) { + if !EnableSRVLookups { + return nil, nil + } + var newAddrs []resolver.Address + _, srvs, err := d.resolver.LookupSRV(ctx, "grpclb", "tcp", d.host) + if err != nil { + err = handleDNSError(err, "SRV") // may become nil + return nil, err + } + for _, s := range srvs { + lbAddrs, err := d.resolver.LookupHost(ctx, s.Target) + if err != nil { + err = handleDNSError(err, "A") // may become nil + if err == nil { + // If there are other SRV records, look them up and ignore this + // one that does not exist. + continue + } + return nil, err + } + for _, a := range lbAddrs { + ip, ok := formatIP(a) + if !ok { + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + } + addr := ip + ":" + strconv.Itoa(int(s.Port)) + newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target}) + } + } + return newAddrs, nil +} + +func handleDNSError(err error, lookupType string) error { + dnsErr, ok := err.(*net.DNSError) + if ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { + // Timeouts and temporary errors should be communicated to gRPC to + // attempt another DNS query (with backoff). Other errors should be + // suppressed (they may represent the absence of a TXT record). + return nil + } + if err != nil { + err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err) + logger.Info(err) + } + return err +} + +func (d *dnsResolver) lookupTXT(ctx context.Context) *serviceconfig.ParseResult { + ss, err := d.resolver.LookupTXT(ctx, txtPrefix+d.host) + if err != nil { + if envconfig.TXTErrIgnore { + return nil + } + if err = handleDNSError(err, "TXT"); err != nil { + return &serviceconfig.ParseResult{Err: err} + } + return nil + } + var res string + for _, s := range ss { + res += s + } + + // TXT record must have "grpc_config=" attribute in order to be used as + // service config. + if !strings.HasPrefix(res, txtAttribute) { + logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) + // This is not an error; it is the equivalent of not having a service + // config. + return nil + } + sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) + return d.cc.ParseServiceConfig(sc) +} + +func (d *dnsResolver) lookupHost(ctx context.Context) ([]resolver.Address, error) { + addrs, err := d.resolver.LookupHost(ctx, d.host) + if err != nil { + err = handleDNSError(err, "A") + return nil, err + } + newAddrs := make([]resolver.Address, 0, len(addrs)) + for _, a := range addrs { + ip, ok := formatIP(a) + if !ok { + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + } + addr := ip + ":" + d.port + newAddrs = append(newAddrs, resolver.Address{Addr: addr}) + } + return newAddrs, nil +} + +func (d *dnsResolver) lookup() (*resolver.State, error) { + ctx, cancel := context.WithTimeout(d.ctx, ResolvingTimeout) + defer cancel() + srv, srvErr := d.lookupSRV(ctx) + addrs, hostErr := d.lookupHost(ctx) + if hostErr != nil && (srvErr != nil || len(srv) == 0) { + return nil, hostErr + } + + state := resolver.State{Addresses: addrs} + if len(srv) > 0 { + state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv}) + } + if !d.disableServiceConfig { + state.ServiceConfig = d.lookupTXT(ctx) + } + return &state, nil +} + +// formatIP returns ok = false if addr is not a valid textual representation of +// an IP address. If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and +// ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string and default port, returns +// formatted host and port info. If target doesn't specify a port, set the port +// to be the defaultPort. If target is in IPv6 format and host-name is enclosed +// in square brackets, brackets are stripped when setting the host. +// examples: +// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443" +// target: ":80" defaultPort: "443" returns host: "localhost", port: "80" +func parseTarget(target, defaultPort string) (host, port string, err error) { + if target == "" { + return "", "", internal.ErrMissingAddr + } + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err = net.SplitHostPort(target); err == nil { + if port == "" { + // If the port field is empty (target ends with colon), e.g. "[::1]:", + // this is an error. + return "", "", internal.ErrEndsWithColon + } + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", + // the local system is assumed. + host = "localhost" + } + return host, port, nil + } + if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err) +} + +type rawChoice struct { + ClientLanguage *[]string `json:"clientLanguage,omitempty"` + Percentage *int `json:"percentage,omitempty"` + ClientHostName *[]string `json:"clientHostName,omitempty"` + ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"` +} + +func containsString(a *[]string, b string) bool { + if a == nil { + return true + } + for _, c := range *a { + if c == b { + return true + } + } + return false +} + +func chosenByPercentage(a *int) bool { + if a == nil { + return true + } + return rand.Intn(100)+1 <= *a +} + +func canaryingSC(js string) string { + if js == "" { + return "" + } + var rcs []rawChoice + err := json.Unmarshal([]byte(js), &rcs) + if err != nil { + logger.Warningf("dns: error parsing service config json: %v", err) + return "" + } + cliHostname, err := os.Hostname() + if err != nil { + logger.Warningf("dns: error getting client hostname: %v", err) + return "" + } + var sc string + for _, c := range rcs { + if !containsString(c.ClientLanguage, golang) || + !chosenByPercentage(c.Percentage) || + !containsString(c.ClientHostName, cliHostname) || + c.ServiceConfig == nil { + continue + } + sc = string(*c.ServiceConfig) + break + } + return sc +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go new file mode 100644 index 000000000..c0eae4f5f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go @@ -0,0 +1,77 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains functionality internal to the dns resolver package. +package internal + +import ( + "context" + "errors" + "net" + "time" +) + +// NetResolver groups the methods on net.Resolver that are used by the DNS +// resolver implementation. This allows the default net.Resolver instance to be +// overridden from tests. +type NetResolver interface { + LookupHost(ctx context.Context, host string) (addrs []string, err error) + LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) + LookupTXT(ctx context.Context, name string) (txts []string, err error) +} + +var ( + // ErrMissingAddr is the error returned when building a DNS resolver when + // the provided target name is empty. + ErrMissingAddr = errors.New("dns resolver: missing address") + + // ErrEndsWithColon is the error returned when building a DNS resolver when + // the provided target name ends with a colon that is supposed to be the + // separator between host and port. E.g. "::" is a valid address as it is + // an IPv6 address (host only) and "[::]:" is invalid as it ends with a + // colon as the host and port separator + ErrEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") +) + +// The following vars are overridden from tests. +var ( + // TimeAfterFunc is used by the DNS resolver to wait for the given duration + // to elapse. In non-test code, this is implemented by time.After. In test + // code, this can be used to control the amount of time the resolver is + // blocked waiting for the duration to elapse. + TimeAfterFunc func(time.Duration) <-chan time.Time + + // TimeNowFunc is used by the DNS resolver to get the current time. + // In non-test code, this is implemented by time.Now. In test code, + // this can be used to control the current time for the resolver. + TimeNowFunc func() time.Time + + // TimeUntilFunc is used by the DNS resolver to calculate the remaining + // wait time for re-resolution. In non-test code, this is implemented by + // time.Until. In test code, this can be used to control the remaining + // time for resolver to wait for re-resolution. + TimeUntilFunc func(time.Time) time.Duration + + // NewNetResolver returns the net.Resolver instance for the given target. + NewNetResolver func(string) (NetResolver, error) + + // AddressDialer is the dialer used to dial the DNS server. It accepts the + // Host portion of the URL corresponding to the user's dial target and + // returns a dial function. + AddressDialer func(address string) func(context.Context, string, string) (net.Conn, error) +) diff --git a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go similarity index 78% rename from vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go rename to vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go index 893d5d12c..b901c7bac 100644 --- a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -20,13 +20,20 @@ // name without scheme back to gRPC as resolved address. package passthrough -import "google.golang.org/grpc/resolver" +import ( + "errors" + + "google.golang.org/grpc/resolver" +) const scheme = "passthrough" type passthroughBuilder struct{} -func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { +func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + if target.Endpoint() == "" && opts.Dialer == nil { + return nil, errors.New("passthrough: received empty target in Build()") + } r := &passthroughResolver{ target: target, cc: cc, @@ -45,10 +52,10 @@ type passthroughResolver struct { } func (r *passthroughResolver) start() { - r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}}) + r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}}) } -func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {} +func (*passthroughResolver) ResolveNow(resolver.ResolveNowOptions) {} func (*passthroughResolver) Close() {} diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go new file mode 100644 index 000000000..27cd81af9 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -0,0 +1,78 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package unix implements a resolver for unix targets. +package unix + +import ( + "fmt" + + "google.golang.org/grpc/internal/transport/networktype" + "google.golang.org/grpc/resolver" +) + +const unixScheme = "unix" +const unixAbstractScheme = "unix-abstract" + +type builder struct { + scheme string +} + +func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { + if target.URL.Host != "" { + return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host) + } + + // gRPC was parsing the dial target manually before PR #4817, and we + // switched to using url.Parse() in that PR. To avoid breaking existing + // resolver implementations we ended up stripping the leading "/" from the + // endpoint. This obviously does not work for the "unix" scheme. Hence we + // end up using the parsed URL instead. + endpoint := target.URL.Path + if endpoint == "" { + endpoint = target.URL.Opaque + } + addr := resolver.Address{Addr: endpoint} + if b.scheme == unixAbstractScheme { + // We can not prepend \0 as c++ gRPC does, as in Golang '@' is used to signify we do + // not want trailing \0 in address. + addr.Addr = "@" + addr.Addr + } + cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}}) + return &nopResolver{}, nil +} + +func (b *builder) Scheme() string { + return b.scheme +} + +func (b *builder) OverrideAuthority(resolver.Target) string { + return "localhost" +} + +type nopResolver struct { +} + +func (*nopResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (*nopResolver) Close() {} + +func init() { + resolver.Register(&builder{scheme: unixScheme}) + resolver.Register(&builder{scheme: unixAbstractScheme}) +} diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go new file mode 100644 index 000000000..11d82afcc --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go @@ -0,0 +1,130 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package serviceconfig + +import ( + "encoding/json" + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// Duration defines JSON marshal and unmarshal methods to conform to the +// protobuf JSON spec defined [here]. +// +// [here]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration +type Duration time.Duration + +func (d Duration) String() string { + return fmt.Sprint(time.Duration(d)) +} + +// MarshalJSON converts from d to a JSON string output. +func (d Duration) MarshalJSON() ([]byte, error) { + ns := time.Duration(d).Nanoseconds() + sec := ns / int64(time.Second) + ns = ns % int64(time.Second) + + var sign string + if sec < 0 || ns < 0 { + sign, sec, ns = "-", -1*sec, -1*ns + } + + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision. + str := fmt.Sprintf("%s%d.%09d", sign, sec, ns) + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, ".000") + return []byte(fmt.Sprintf("\"%ss\"", str)), nil +} + +// UnmarshalJSON unmarshals b as a duration JSON string into d. +func (d *Duration) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !strings.HasSuffix(s, "s") { + return fmt.Errorf("malformed duration %q: missing seconds unit", s) + } + neg := false + if s[0] == '-' { + neg = true + s = s[1:] + } + ss := strings.SplitN(s[:len(s)-1], ".", 3) + if len(ss) > 2 { + return fmt.Errorf("malformed duration %q: too many decimals", s) + } + // hasDigits is set if either the whole or fractional part of the number is + // present, since both are optional but one is required. + hasDigits := false + var sec, ns int64 + if len(ss[0]) > 0 { + var err error + if sec, err = strconv.ParseInt(ss[0], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + // Maximum seconds value per the durationpb spec. + const maxProtoSeconds = 315_576_000_000 + if sec > maxProtoSeconds { + return fmt.Errorf("out of range: %q", s) + } + hasDigits = true + } + if len(ss) == 2 && len(ss[1]) > 0 { + if len(ss[1]) > 9 { + return fmt.Errorf("malformed duration %q: too many digits after decimal", s) + } + var err error + if ns, err = strconv.ParseInt(ss[1], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + for i := 9; i > len(ss[1]); i-- { + ns *= 10 + } + hasDigits = true + } + if !hasDigits { + return fmt.Errorf("malformed duration %q: contains no numbers", s) + } + + if neg { + sec *= -1 + ns *= -1 + } + + // Maximum/minimum seconds/nanoseconds representable by Go's time.Duration. + const maxSeconds = math.MaxInt64 / int64(time.Second) + const maxNanosAtMaxSeconds = math.MaxInt64 % int64(time.Second) + const minSeconds = math.MinInt64 / int64(time.Second) + const minNanosAtMinSeconds = math.MinInt64 % int64(time.Second) + + if sec > maxSeconds || (sec == maxSeconds && ns >= maxNanosAtMaxSeconds) { + *d = Duration(math.MaxInt64) + } else if sec < minSeconds || (sec == minSeconds && ns <= minNanosAtMinSeconds) { + *d = Duration(math.MinInt64) + } else { + *d = Duration(sec*int64(time.Second) + ns) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go new file mode 100644 index 000000000..51e733e49 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go @@ -0,0 +1,180 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package serviceconfig contains utility functions to parse service config. +package serviceconfig + +import ( + "encoding/json" + "fmt" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + externalserviceconfig "google.golang.org/grpc/serviceconfig" +) + +var logger = grpclog.Component("core") + +// BalancerConfig wraps the name and config associated with one load balancing +// policy. It corresponds to a single entry of the loadBalancingConfig field +// from ServiceConfig. +// +// It implements the json.Unmarshaler interface. +// +// https://github.com/grpc/grpc-proto/blob/54713b1e8bc6ed2d4f25fb4dff527842150b91b2/grpc/service_config/service_config.proto#L247 +type BalancerConfig struct { + Name string + Config externalserviceconfig.LoadBalancingConfig +} + +type intermediateBalancerConfig []map[string]json.RawMessage + +// MarshalJSON implements the json.Marshaler interface. +// +// It marshals the balancer and config into a length-1 slice +// ([]map[string]config). +func (bc *BalancerConfig) MarshalJSON() ([]byte, error) { + if bc.Config == nil { + // If config is nil, return empty config `{}`. + return []byte(fmt.Sprintf(`[{%q: %v}]`, bc.Name, "{}")), nil + } + c, err := json.Marshal(bc.Config) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf(`[{%q: %s}]`, bc.Name, c)), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +// +// ServiceConfig contains a list of loadBalancingConfigs, each with a name and +// config. This method iterates through that list in order, and stops at the +// first policy that is supported. +// - If the config for the first supported policy is invalid, the whole service +// config is invalid. +// - If the list doesn't contain any supported policy, the whole service config +// is invalid. +func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { + var ir intermediateBalancerConfig + err := json.Unmarshal(b, &ir) + if err != nil { + return err + } + + var names []string + for i, lbcfg := range ir { + if len(lbcfg) != 1 { + return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) + } + + var ( + name string + jsonCfg json.RawMessage + ) + // Get the key:value pair from the map. We have already made sure that + // the map contains a single entry. + for name, jsonCfg = range lbcfg { + } + + names = append(names, name) + builder := balancer.Get(name) + if builder == nil { + // If the balancer is not registered, move on to the next config. + // This is not an error. + continue + } + bc.Name = name + + parser, ok := builder.(balancer.ConfigParser) + if !ok { + if string(jsonCfg) != "{}" { + logger.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg)) + } + // Stop at this, though the builder doesn't support parsing config. + return nil + } + + cfg, err := parser.ParseConfig(jsonCfg) + if err != nil { + return fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err) + } + bc.Config = cfg + return nil + } + // This is reached when the for loop iterates over all entries, but didn't + // return. This means we had a loadBalancingConfig slice but did not + // encounter a registered policy. The config is considered invalid in this + // case. + return fmt.Errorf("invalid loadBalancingConfig: no supported policies found in %v", names) +} + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +type MethodConfig struct { + // WaitForReady indicates whether RPCs sent to this method should wait until + // the connection is ready by default (!failfast). The value specified via the + // gRPC client API will override the value set here. + WaitForReady *bool + // Timeout is the default timeout for RPCs sent to this method. The actual + // deadline used will be the minimum of the value specified here and the value + // set by the application via the gRPC client API. If either one is not set, + // then the other will be used. If neither is set, then the RPC has no deadline. + Timeout *time.Duration + // MaxReqSize is the maximum allowed payload size for an individual request in a + // stream (client->server) in bytes. The size which is measured is the serialized + // payload after per-message compression (but before stream compression) in bytes. + // The actual value used is the minimum of the value specified here and the value set + // by the application via the gRPC client API. If either one is not set, then the other + // will be used. If neither is set, then the built-in default is used. + MaxReqSize *int + // MaxRespSize is the maximum allowed payload size for an individual response in a + // stream (server->client) in bytes. + MaxRespSize *int + // RetryPolicy configures retry options for the method. + RetryPolicy *RetryPolicy +} + +// RetryPolicy defines the go-native version of the retry policy defined by the +// service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type RetryPolicy struct { + // MaxAttempts is the maximum number of attempts, including the original RPC. + // + // This field is required and must be two or greater. + MaxAttempts int + + // Exponential backoff parameters. The initial retry attempt will occur at + // random(0, initialBackoff). In general, the nth attempt will occur at + // random(0, + // min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)). + // + // These fields are required and must be greater than zero. + InitialBackoff time.Duration + MaxBackoff time.Duration + BackoffMultiplier float64 + + // The set of status codes which may be retried. + // + // Status codes are specified as strings, e.g., "UNAVAILABLE". + // + // This field is required and must be non-empty. + // Note: a set is used to store this for easy lookup. + RetryableStatusCodes map[codes.Code]bool +} diff --git a/vendor/google.golang.org/grpc/internal/stats/labels.go b/vendor/google.golang.org/grpc/internal/stats/labels.go new file mode 100644 index 000000000..fd33af51a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/stats/labels.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats provides internal stats related functionality. +package stats + +import "context" + +// Labels are the labels for metrics. +type Labels struct { + // TelemetryLabels are the telemetry labels to record. + TelemetryLabels map[string]string +} + +type labelsKey struct{} + +// GetLabels returns the Labels stored in the context, or nil if there is one. +func GetLabels(ctx context.Context) *Labels { + labels, _ := ctx.Value(labelsKey{}).(*Labels) + return labels +} + +// SetLabels sets the Labels in the context. +func SetLabels(ctx context.Context, labels *Labels) context.Context { + // could also append + return context.WithValue(ctx, labelsKey{}, labels) +} diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go new file mode 100644 index 000000000..be110d41f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go @@ -0,0 +1,95 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package stats + +import ( + "fmt" + + estats "google.golang.org/grpc/experimental/stats" + "google.golang.org/grpc/stats" +) + +// MetricsRecorderList forwards Record calls to all of its metricsRecorders. +// +// It eats any record calls where the label values provided do not match the +// number of label keys. +type MetricsRecorderList struct { + // metricsRecorders are the metrics recorders this list will forward to. + metricsRecorders []estats.MetricsRecorder +} + +// NewMetricsRecorderList creates a new metric recorder list with all the stats +// handlers provided which implement the MetricsRecorder interface. +// If no stats handlers provided implement the MetricsRecorder interface, +// the MetricsRecorder list returned is a no-op. +func NewMetricsRecorderList(shs []stats.Handler) *MetricsRecorderList { + var mrs []estats.MetricsRecorder + for _, sh := range shs { + if mr, ok := sh.(estats.MetricsRecorder); ok { + mrs = append(mrs, mr) + } + } + return &MetricsRecorderList{ + metricsRecorders: mrs, + } +} + +func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) { + if got, want := len(labelsRecv), len(desc.Labels)+len(desc.OptionalLabels); got != want { + panic(fmt.Sprintf("Received %d labels in call to record metric %q, but expected %d.", got, desc.Name, want)) + } +} + +func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Count(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordFloat64Count(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Histo(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordFloat64Histo(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Gauge(handle, incr, labels...) + } +} diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go new file mode 100644 index 000000000..757925381 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -0,0 +1,205 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "errors" + "fmt" + + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/protoadapt" + "google.golang.org/protobuf/types/known/anypb" +) + +// Status represents an RPC status code, message, and details. It is immutable +// and should be created with New, Newf, or FromProto. +type Status struct { + s *spb.Status +} + +// NewWithProto returns a new status including details from statusProto. This +// is meant to be used by the gRPC library only. +func NewWithProto(code codes.Code, message string, statusProto []string) *Status { + if len(statusProto) != 1 { + // No grpc-status-details bin header, or multiple; just ignore. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + st := &spb.Status{} + if err := proto.Unmarshal([]byte(statusProto[0]), st); err != nil { + // Probably not a google.rpc.Status proto; do not provide details. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + if st.Code == int32(code) { + // The codes match between the grpc-status header and the + // grpc-status-details-bin header; use the full details proto. + return &Status{s: st} + } + return &Status{ + s: &spb.Status{ + Code: int32(codes.Internal), + Message: fmt.Sprintf( + "grpc-status-details-bin mismatch: grpc-status=%v, grpc-message=%q, grpc-status-details-bin=%+v", + code, message, st, + ), + }, + } +} + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return &Status{s: &spb.Status{Code: int32(c), Message: msg}} +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...any) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return &Status{s: proto.Clone(s).(*spb.Status)} +} + +// Err returns an error representing c and msg. If c is OK, returns nil. +func Err(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...any) error { + return Err(c, fmt.Sprintf(format, a...)) +} + +// Code returns the status code contained in s. +func (s *Status) Code() codes.Code { + if s == nil || s.s == nil { + return codes.OK + } + return codes.Code(s.s.Code) +} + +// Message returns the message contained in s. +func (s *Status) Message() string { + if s == nil || s.s == nil { + return "" + } + return s.s.Message +} + +// Proto returns s's status as an spb.Status proto message. +func (s *Status) Proto() *spb.Status { + if s == nil { + return nil + } + return proto.Clone(s.s).(*spb.Status) +} + +// Err returns an immutable error representing s; returns nil if s.Code() is OK. +func (s *Status) Err() error { + if s.Code() == codes.OK { + return nil + } + return &Error{s: s} +} + +// WithDetails returns a new status with the provided details messages appended to the status. +// If any errors are encountered, it returns nil and the first error encountered. +func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) { + if s.Code() == codes.OK { + return nil, errors.New("no error details for status with code OK") + } + // s.Code() != OK implies that s.Proto() != nil. + p := s.Proto() + for _, detail := range details { + m, err := anypb.New(protoadapt.MessageV2Of(detail)) + if err != nil { + return nil, err + } + p.Details = append(p.Details, m) + } + return &Status{s: p}, nil +} + +// Details returns a slice of details messages attached to the status. +// If a detail cannot be decoded, the error is returned in place of the detail. +func (s *Status) Details() []any { + if s == nil || s.s == nil { + return nil + } + details := make([]any, 0, len(s.s.Details)) + for _, any := range s.s.Details { + detail, err := any.UnmarshalNew() + if err != nil { + details = append(details, err) + continue + } + details = append(details, detail) + } + return details +} + +func (s *Status) String() string { + return fmt.Sprintf("rpc error: code = %s desc = %s", s.Code(), s.Message()) +} + +// Error wraps a pointer of a status proto. It implements error and Status, +// and a nil *Error should never be returned by this package. +type Error struct { + s *Status +} + +func (e *Error) Error() string { + return e.s.String() +} + +// GRPCStatus returns the Status represented by se. +func (e *Error) GRPCStatus() *Status { + return e.s +} + +// Is implements future error.Is functionality. +// A Error is equivalent if the code and message are identical. +func (e *Error) Is(target error) bool { + tse, ok := target.(*Error) + if !ok { + return false + } + return proto.Equal(e.s.s, tse.s.s) +} + +// IsRestrictedControlPlaneCode returns whether the status includes a code +// restricted for control plane usage as defined by gRFC A54. +func IsRestrictedControlPlaneCode(s *Status) bool { + switch s.Code() { + case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss: + return true + } + return false +} diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go index 43281a3e0..b3a72276d 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. @@ -32,35 +30,35 @@ import ( "google.golang.org/grpc/grpclog" ) +var logger = grpclog.Component("core") + // GetCPUTime returns the how much CPU time has passed since the start of this process. func GetCPUTime() int64 { var ts unix.Timespec if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil { - grpclog.Fatal(err) + logger.Fatal(err) } return ts.Nano() } -// Rusage is an alias for syscall.Rusage under linux non-appengine environment. -type Rusage syscall.Rusage +// Rusage is an alias for syscall.Rusage under linux environment. +type Rusage = syscall.Rusage // GetRusage returns the resource usage of current process. -func GetRusage() (rusage *Rusage) { - rusage = new(Rusage) - syscall.Getrusage(syscall.RUSAGE_SELF, (*syscall.Rusage)(rusage)) - return +func GetRusage() *Rusage { + rusage := new(Rusage) + syscall.Getrusage(syscall.RUSAGE_SELF, rusage) + return rusage } // CPUTimeDiff returns the differences of user CPU time and system CPU time used // between two Rusage structs. func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { - f := (*syscall.Rusage)(first) - l := (*syscall.Rusage)(latest) var ( - utimeDiffs = l.Utime.Sec - f.Utime.Sec - utimeDiffus = l.Utime.Usec - f.Utime.Usec - stimeDiffs = l.Stime.Sec - f.Stime.Sec - stimeDiffus = l.Stime.Usec - f.Stime.Usec + utimeDiffs = latest.Utime.Sec - first.Utime.Sec + utimeDiffus = latest.Utime.Usec - first.Utime.Usec + stimeDiffs = latest.Stime.Sec - first.Stime.Sec + stimeDiffus = latest.Stime.Usec - first.Stime.Usec ) uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6 diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go index d3fd9dab3..54c24c2ff 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -1,4 +1,5 @@ -// +build !linux appengine +//go:build !linux +// +build !linux /* * @@ -18,6 +19,8 @@ * */ +// Package syscall provides functionalities that grpc uses to get low-level +// operating system stats/info. package syscall import ( @@ -29,45 +32,46 @@ import ( ) var once sync.Once +var logger = grpclog.Component("core") func log() { once.Do(func() { - grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.") + logger.Info("CPU time info is unavailable on non-linux environments.") }) } -// GetCPUTime returns the how much CPU time has passed since the start of this process. -// It always returns 0 under non-linux or appengine environment. +// GetCPUTime returns the how much CPU time has passed since the start of this +// process. It always returns 0 under non-linux environments. func GetCPUTime() int64 { log() return 0 } -// Rusage is an empty struct under non-linux or appengine environment. +// Rusage is an empty struct under non-linux environments. type Rusage struct{} -// GetRusage is a no-op function under non-linux or appengine environment. -func GetRusage() (rusage *Rusage) { +// GetRusage is a no-op function under non-linux environments. +func GetRusage() *Rusage { log() return nil } // CPUTimeDiff returns the differences of user CPU time and system CPU time used -// between two Rusage structs. It a no-op function for non-linux or appengine environment. -func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { +// between two Rusage structs. It a no-op function for non-linux environments. +func CPUTimeDiff(*Rusage, *Rusage) (float64, float64) { log() return 0, 0 } -// SetTCPUserTimeout is a no-op function under non-linux or appengine environments -func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { +// SetTCPUserTimeout is a no-op function under non-linux environments. +func SetTCPUserTimeout(net.Conn, time.Duration) error { log() return nil } -// GetTCPUserTimeout is a no-op function under non-linux or appengine environments -// a negative return value indicates the operation is not supported -func GetTCPUserTimeout(conn net.Conn) (int, error) { +// GetTCPUserTimeout is a no-op function under non-linux environments. +// A negative return value indicates the operation is not supported +func GetTCPUserTimeout(net.Conn) (int, error) { log() return -1, nil } diff --git a/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go similarity index 74% rename from vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go rename to vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go index d4346e9ea..4f347edd4 100644 --- a/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go @@ -1,8 +1,7 @@ -// +build appengine +//go:build !unix && !windows /* - * - * Copyright 2018 gRPC authors. + * Copyright 2023 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,7 +23,7 @@ import ( "net" ) -// WrapSyscallConn returns newConn on appengine. -func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { - return newConn +// NetDialerWithTCPKeepalive returns a vanilla net.Dialer on non-unix platforms. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{} } diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go new file mode 100644 index 000000000..7e7aaa546 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go @@ -0,0 +1,54 @@ +//go:build unix + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" + "syscall" + "time" + + "golang.org/x/sys/unix" +) + +// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on +// the underlying connection with OS default values for keepalive parameters. +// +// TODO: Once https://github.com/golang/go/issues/62254 lands, and the +// appropriate Go version becomes less than our least supported Go version, we +// should look into using the new API to make things more straightforward. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{ + // Setting a negative value here prevents the Go stdlib from overriding + // the values of TCP keepalive time and interval. It also prevents the + // Go stdlib from enabling TCP keepalives by default. + KeepAlive: time.Duration(-1), + // This method is called after the underlying network socket is created, + // but before dialing the socket (or calling its connect() method). The + // combination of unconditionally enabling TCP keepalives here, and + // disabling the overriding of TCP keepalive parameters by setting the + // KeepAlive field to a negative value above, results in OS defaults for + // the TCP keepalive interval and time parameters. + Control: func(_, _ string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) + }) + }, + } +} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go new file mode 100644 index 000000000..d5c1085ee --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go @@ -0,0 +1,54 @@ +//go:build windows + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" + "syscall" + "time" + + "golang.org/x/sys/windows" +) + +// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on +// the underlying connection with OS default values for keepalive parameters. +// +// TODO: Once https://github.com/golang/go/issues/62254 lands, and the +// appropriate Go version becomes less than our least supported Go version, we +// should look into using the new API to make things more straightforward. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{ + // Setting a negative value here prevents the Go stdlib from overriding + // the values of TCP keepalive time and interval. It also prevents the + // Go stdlib from enabling TCP keepalives by default. + KeepAlive: time.Duration(-1), + // This method is called after the underlying network socket is created, + // but before dialing the socket (or calling its connect() method). The + // combination of unconditionally enabling TCP keepalives here, and + // disabling the overriding of TCP keepalive parameters by setting the + // KeepAlive field to a negative value above, results in OS defaults for + // the TCP keepalive interval and time parameters. + Control: func(_, _ string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1) + }) + }, + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index ddee20b6b..ef72fbb3a 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -20,13 +20,20 @@ package transport import ( "bytes" + "errors" "fmt" + "net" "runtime" + "strconv" "sync" "sync/atomic" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/mem" + "google.golang.org/grpc/status" ) var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { @@ -34,7 +41,7 @@ var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { } type itemNode struct { - it interface{} + it any next *itemNode } @@ -43,7 +50,7 @@ type itemList struct { tail *itemNode } -func (il *itemList) enqueue(i interface{}) { +func (il *itemList) enqueue(i any) { n := &itemNode{it: i} if il.tail == nil { il.head, il.tail = n, n @@ -55,11 +62,11 @@ func (il *itemList) enqueue(i interface{}) { // peek returns the first item in the list without removing it from the // list. -func (il *itemList) peek() interface{} { +func (il *itemList) peek() any { return il.head.it } -func (il *itemList) dequeue() interface{} { +func (il *itemList) dequeue() any { if il.head == nil { return nil } @@ -128,13 +135,23 @@ type cleanupStream struct { func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM +type earlyAbortStream struct { + httpStatus uint32 + streamID uint32 + contentSubtype string + status *status.Status + rst bool +} + +func (*earlyAbortStream) isTransportResponseFrame() bool { return false } + type dataFrame struct { streamID uint32 endStream bool h []byte - d []byte + reader mem.Reader // onEachWrite is called every time - // a part of d is written out. + // a part of data is written out. onEachWrite func() } @@ -177,7 +194,7 @@ type goAway struct { code http2.ErrCode debugData []byte headsUp bool - closeConn bool + closeConn error // if set, loopyWriter will exit with this error } func (*goAway) isTransportResponseFrame() bool { return false } @@ -195,6 +212,14 @@ type outFlowControlSizeRequest struct { func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } +// closeConnection is an instruction to tell the loopy writer to flush the +// framer and exit, which will cause the transport's connection to be closed +// (by the client or server). The transport itself will close after the reader +// encounters the EOF caused by the connection closure. +type closeConnection struct{} + +func (closeConnection) isTransportResponseFrame() bool { return false } + type outStreamState int const ( @@ -265,18 +290,22 @@ func (l *outStreamList) dequeue() *outStream { } // controlBuffer is a way to pass information to loopy. -// Information is passed as specific struct types called control frames. -// A control frame not only represents data, messages or headers to be sent out -// but can also be used to instruct loopy to update its internal state. -// It shouldn't be confused with an HTTP2 frame, although some of the control frames -// like dataFrame and headerFrame do go out on wire as HTTP2 frames. +// +// Information is passed as specific struct types called control frames. A +// control frame not only represents data, messages or headers to be sent out +// but can also be used to instruct loopy to update its internal state. It +// shouldn't be confused with an HTTP2 frame, although some of the control +// frames like dataFrame and headerFrame do go out on wire as HTTP2 frames. type controlBuffer struct { - ch chan struct{} - done <-chan struct{} + wakeupCh chan struct{} // Unblocks readers waiting for something to read. + done <-chan struct{} // Closed when the transport is done. + + // Mutex guards all the fields below, except trfChan which can be read + // atomically without holding mu. mu sync.Mutex - consumerWaiting bool - list *itemList - err error + consumerWaiting bool // True when readers are blocked waiting for new data. + closed bool // True when the controlbuf is finished. + list *itemList // List of queued control frames. // transportResponseFrames counts the number of queued items that represent // the response of an action initiated by the peer. trfChan is created @@ -284,47 +313,59 @@ type controlBuffer struct { // closed and nilled when transportResponseFrames drops below the // threshold. Both fields are protected by mu. transportResponseFrames int - trfChan atomic.Value // *chan struct{} + trfChan atomic.Pointer[chan struct{}] } func newControlBuffer(done <-chan struct{}) *controlBuffer { return &controlBuffer{ - ch: make(chan struct{}, 1), - list: &itemList{}, - done: done, + wakeupCh: make(chan struct{}, 1), + list: &itemList{}, + done: done, } } -// throttle blocks if there are too many incomingSettings/cleanupStreams in the -// controlbuf. +// throttle blocks if there are too many frames in the control buf that +// represent the response of an action initiated by the peer, like +// incomingSettings cleanupStreams etc. func (c *controlBuffer) throttle() { - ch, _ := c.trfChan.Load().(*chan struct{}) - if ch != nil { + if ch := c.trfChan.Load(); ch != nil { select { - case <-*ch: + case <-(*ch): case <-c.done: } } } +// put adds an item to the controlbuf. func (c *controlBuffer) put(it cbItem) error { _, err := c.executeAndPut(nil, it) return err } -func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) { - var wakeUp bool +// executeAndPut runs f, and if the return value is true, adds the given item to +// the controlbuf. The item could be nil, in which case, this method simply +// executes f and does not add the item to the controlbuf. +// +// The first return value indicates whether the item was successfully added to +// the control buffer. A non-nil error, specifically ErrConnClosing, is returned +// if the control buffer is already closed. +func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return false, c.err + defer c.mu.Unlock() + + if c.closed { + return false, ErrConnClosing } if f != nil { - if !f(it) { // f wasn't successful - c.mu.Unlock() + if !f() { // f wasn't successful return false, nil } } + if it == nil { + return true, nil + } + + var wakeUp bool if c.consumerWaiting { wakeUp = true c.consumerWaiting = false @@ -339,88 +380,98 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (b c.trfChan.Store(&ch) } } - c.mu.Unlock() if wakeUp { select { - case c.ch <- struct{}{}: + case c.wakeupCh <- struct{}{}: default: } } return true, nil } -// Note argument f should never be nil. -func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) { - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return false, c.err - } - if !f(it) { // f wasn't successful - c.mu.Unlock() - return false, nil - } - c.mu.Unlock() - return true, nil -} - -func (c *controlBuffer) get(block bool) (interface{}, error) { +// get returns the next control frame from the control buffer. If block is true +// **and** there are no control frames in the control buffer, the call blocks +// until one of the conditions is met: there is a frame to return or the +// transport is closed. +func (c *controlBuffer) get(block bool) (any, error) { for { c.mu.Lock() - if c.err != nil { + frame, err := c.getOnceLocked() + if frame != nil || err != nil || !block { + // If we read a frame or an error, we can return to the caller. The + // call to getOnceLocked() returns a nil frame and a nil error if + // there is nothing to read, and in that case, if the caller asked + // us not to block, we can return now as well. c.mu.Unlock() - return nil, c.err - } - if !c.list.isEmpty() { - h := c.list.dequeue().(cbItem) - if h.isTransportResponseFrame() { - if c.transportResponseFrames == maxQueuedTransportResponseFrames { - // We are removing the frame that put us over the - // threshold; close and clear the throttling channel. - ch := c.trfChan.Load().(*chan struct{}) - close(*ch) - c.trfChan.Store((*chan struct{})(nil)) - } - c.transportResponseFrames-- - } - c.mu.Unlock() - return h, nil - } - if !block { - c.mu.Unlock() - return nil, nil + return frame, err } c.consumerWaiting = true c.mu.Unlock() + + // Release the lock above and wait to be woken up. select { - case <-c.ch: + case <-c.wakeupCh: case <-c.done: - c.finish() - return nil, ErrConnClosing + return nil, errors.New("transport closed by client") + } + } +} + +// Callers must not use this method, but should instead use get(). +// +// Caller must hold c.mu. +func (c *controlBuffer) getOnceLocked() (any, error) { + if c.closed { + return false, ErrConnClosing + } + if c.list.isEmpty() { + return nil, nil + } + h := c.list.dequeue().(cbItem) + if h.isTransportResponseFrame() { + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are removing the frame that put us over the + // threshold; close and clear the throttling channel. + ch := c.trfChan.Swap(nil) + close(*ch) } + c.transportResponseFrames-- } + return h, nil } +// finish closes the control buffer, cleaning up any streams that have queued +// header frames. Once this method returns, no more frames can be added to the +// control buffer, and attempts to do so will return ErrConnClosing. func (c *controlBuffer) finish() { c.mu.Lock() - if c.err != nil { - c.mu.Unlock() + defer c.mu.Unlock() + + if c.closed { return } - c.err = ErrConnClosing + c.closed = true // There may be headers for streams in the control buffer. // These streams need to be cleaned out since the transport // is still not aware of these yet. for head := c.list.dequeueAll(); head != nil; head = head.next { - hdr, ok := head.it.(*headerFrame) - if !ok { - continue - } - if hdr.onOrphaned != nil { // It will be nil on the server-side. - hdr.onOrphaned(ErrConnClosing) + switch v := head.it.(type) { + case *headerFrame: + if v.onOrphaned != nil { // It will be nil on the server-side. + v.onOrphaned(ErrConnClosing) + } + case *dataFrame: + _ = v.reader.Close() } } - c.mu.Unlock() + + // In case throttle() is currently in flight, it needs to be unblocked. + // Otherwise, the transport may not close, since the transport is closed by + // the reader encountering the connection error. + ch := c.trfChan.Swap(nil) + if ch != nil { + close(*ch) + } } type side int @@ -436,7 +487,7 @@ const ( // stream maintains a queue of data frames; as loopy receives data frames // it gets added to the queue of the relevant stream. // Loopy goes over this list of active streams by processing one node every iteration, -// thereby closely resemebling to a round-robin scheduling over all streams. While +// thereby closely resembling a round-robin scheduling over all streams. While // processing a stream, loopy writes out data bytes from this stream capped by the min // of http2MaxFrameLen, connection-level flow control and stream-level flow control. type loopyWriter struct { @@ -458,24 +509,31 @@ type loopyWriter struct { hEnc *hpack.Encoder // HPACK encoder. bdpEst *bdpEstimator draining bool + conn net.Conn + logger *grpclog.PrefixLogger + bufferPool mem.BufferPool // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) } -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter { +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter { var buf bytes.Buffer l := &loopyWriter{ - side: s, - cbuf: cbuf, - sendQuota: defaultWindowSize, - oiws: defaultWindowSize, - estdStreams: make(map[uint32]*outStream), - activeStreams: newOutStreamList(), - framer: fr, - hBuf: &buf, - hEnc: hpack.NewEncoder(&buf), - bdpEst: bdpEst, + side: s, + cbuf: cbuf, + sendQuota: defaultWindowSize, + oiws: defaultWindowSize, + estdStreams: make(map[uint32]*outStream), + activeStreams: newOutStreamList(), + framer: fr, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + bdpEst: bdpEst, + conn: conn, + logger: logger, + ssGoAwayHandler: goAwayHandler, + bufferPool: bufferPool, } return l } @@ -493,21 +551,25 @@ const minBatchSize = 1000 // 2. Stream level flow control quota available. // // In each iteration of run loop, other than processing the incoming control -// frame, loopy calls processData, which processes one node from the activeStreams linked-list. -// This results in writing of HTTP2 frames into an underlying write buffer. -// When there's no more control frames to read from controlBuf, loopy flushes the write buffer. -// As an optimization, to increase the batch size for each flush, loopy yields the processor, once -// if the batch size is too low to give stream goroutines a chance to fill it up. +// frame, loopy calls processData, which processes one node from the +// activeStreams linked-list. This results in writing of HTTP2 frames into an +// underlying write buffer. When there's no more control frames to read from +// controlBuf, loopy flushes the write buffer. As an optimization, to increase +// the batch size for each flush, loopy yields the processor, once if the batch +// size is too low to give stream goroutines a chance to fill it up. +// +// Upon exiting, if the error causing the exit is not an I/O error, run() +// flushes the underlying connection. The connection is always left open to +// allow different closing behavior on the client and server. func (l *loopyWriter) run() (err error) { defer func() { - if err == ErrConnClosing { - // Don't log ErrConnClosing as error since it happens - // 1. When the connection is closed by some other known issue. - // 2. User closed the connection. - // 3. A graceful close of connection. - infof("transport: loopyWriter.run returning. %v", err) - err = nil + if l.logger.V(logLevel) { + l.logger.Infof("loopyWriter exiting with error: %v", err) + } + if !isIOError(err) { + l.framer.writer.Flush() } + l.cbuf.finish() }() for { it, err := l.cbuf.get(true) @@ -552,7 +614,6 @@ func (l *loopyWriter) run() (err error) { } l.framer.writer.Flush() break hasdata - } } } @@ -561,11 +622,11 @@ func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) } -func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error { +func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) { // Otherwise update the quota. if w.streamID == 0 { l.sendQuota += w.increment - return nil + return } // Find the stream and update it. if str, ok := l.estdStreams[w.streamID]; ok { @@ -573,10 +634,9 @@ func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { str.state = active l.activeStreams.enqueue(str) - return nil + return } } - return nil } func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { @@ -584,13 +644,11 @@ func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { } func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { - if err := l.applySettings(s.ss); err != nil { - return err - } + l.applySettings(s.ss) return l.framer.fr.WriteSettingsAck() } -func (l *loopyWriter) registerStreamHandler(h *registerStream) error { +func (l *loopyWriter) registerStreamHandler(h *registerStream) { str := &outStream{ id: h.streamID, state: empty, @@ -598,14 +656,15 @@ func (l *loopyWriter) registerStreamHandler(h *registerStream) error { wq: h.wq, } l.estdStreams[h.streamID] = str - return nil } func (l *loopyWriter) headerHandler(h *headerFrame) error { if l.side == serverSide { str, ok := l.estdStreams[h.streamID] if !ok { - warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) + if l.logger.V(logLevel) { + l.logger.Infof("Unrecognized streamID %d in loopyWriter", h.streamID) + } return nil } // Case 1.A: Server is responding back with headers. @@ -631,19 +690,20 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { itl: &itemList{}, wq: h.wq, } - str.itl.enqueue(h) - return l.originateStream(str) + return l.originateStream(str, h) } -func (l *loopyWriter) originateStream(str *outStream) error { - hdr := str.itl.dequeue().(*headerFrame) - if err := hdr.initStream(str.id); err != nil { - if err == ErrConnClosing { - return err - } - // Other errors(errStreamDrain) need not close transport. +func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error { + // l.draining is set when handling GoAway. In which case, we want to avoid + // creating new streams. + if l.draining { + // TODO: provide a better error with the reason we are in draining. + hdr.onOrphaned(errStreamDrain) return nil } + if err := hdr.initStream(str.id); err != nil { + return err + } if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { return err } @@ -658,7 +718,9 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He l.hBuf.Reset() for _, f := range hf { if err := l.hEnc.WriteField(f); err != nil { - warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err) + if l.logger.V(logLevel) { + l.logger.Warningf("Encountered error while encoding headers: %v", err) + } } } var ( @@ -695,10 +757,10 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He return nil } -func (l *loopyWriter) preprocessData(df *dataFrame) error { +func (l *loopyWriter) preprocessData(df *dataFrame) { str, ok := l.estdStreams[df.streamID] if !ok { - return nil + return } // If we got data for a stream it means that // stream was originated and the headers were sent out. @@ -707,7 +769,6 @@ func (l *loopyWriter) preprocessData(df *dataFrame) error { str.state = active l.activeStreams.enqueue(str) } - return nil } func (l *loopyWriter) pingHandler(p *ping) error { @@ -718,9 +779,8 @@ func (l *loopyWriter) pingHandler(p *ping) error { } -func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error { +func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) { o.resp <- l.sendQuota - return nil } func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { @@ -731,14 +791,46 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { // not be established yet. delete(l.estdStreams, c.streamID) str.deleteSelf() + for head := str.itl.dequeueAll(); head != nil; head = head.next { + if df, ok := head.it.(*dataFrame); ok { + _ = df.reader.Close() + } + } } if c.rst { // If RST_STREAM needs to be sent. if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { return err } } - if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { - return ErrConnClosing + if l.draining && len(l.estdStreams) == 0 { + // Flush and close the connection; we are done with it. + return errors.New("finished processing active streams while in draining mode") + } + return nil +} + +func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { + if l.side == clientSide { + return errors.New("earlyAbortStream not handled on client") + } + // In case the caller forgets to set the http status, default to 200. + if eas.httpStatus == 0 { + eas.httpStatus = 200 + } + headerFields := []hpack.HeaderField{ + {Name: ":status", Value: strconv.Itoa(int(eas.httpStatus))}, + {Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)}, + {Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))}, + {Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())}, + } + + if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil { + return err + } + if eas.rst { + if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil { + return err + } } return nil } @@ -747,7 +839,8 @@ func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { if l.side == clientSide { l.draining = true if len(l.estdStreams) == 0 { - return ErrConnClosing + // Flush and close the connection; we are done with it. + return errors.New("received GOAWAY with no active streams") } } return nil @@ -765,10 +858,10 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { return nil } -func (l *loopyWriter) handle(i interface{}) error { +func (l *loopyWriter) handle(i any) error { switch i := i.(type) { case *incomingWindowUpdate: - return l.incomingWindowUpdateHandler(i) + l.incomingWindowUpdateHandler(i) case *outgoingWindowUpdate: return l.outgoingWindowUpdateHandler(i) case *incomingSettings: @@ -778,25 +871,32 @@ func (l *loopyWriter) handle(i interface{}) error { case *headerFrame: return l.headerHandler(i) case *registerStream: - return l.registerStreamHandler(i) + l.registerStreamHandler(i) case *cleanupStream: return l.cleanupStreamHandler(i) + case *earlyAbortStream: + return l.earlyAbortStreamHandler(i) case *incomingGoAway: return l.incomingGoAwayHandler(i) case *dataFrame: - return l.preprocessData(i) + l.preprocessData(i) case *ping: return l.pingHandler(i) case *goAway: return l.goAwayHandler(i) case *outFlowControlSizeRequest: - return l.outFlowControlSizeRequestHandler(i) + l.outFlowControlSizeRequestHandler(i) + case closeConnection: + // Just return a non-I/O error and run() will flush and close the + // connection. + return ErrConnClosing default: return fmt.Errorf("transport: unknown control message type %T", i) } + return nil } -func (l *loopyWriter) applySettings(ss []http2.Setting) error { +func (l *loopyWriter) applySettings(ss []http2.Setting) { for _, s := range ss { switch s.ID { case http2.SettingInitialWindowSize: @@ -815,7 +915,6 @@ func (l *loopyWriter) applySettings(ss []http2.Setting) error { updateHeaderTblSize(l.hEnc, s.Val) } } - return nil } // processData removes the first stream from active streams, writes out at most 16KB @@ -832,16 +931,18 @@ func (l *loopyWriter) processData() (bool, error) { dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. // A data item is represented by a dataFrame, since it later translates into // multiple HTTP2 data frames. - // Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data. - // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the - // maximum possilbe HTTP2 frame size. + // Every dataFrame has two buffers; h that keeps grpc-message header and data + // that is the actual message. As an optimization to keep wire traffic low, data + // from data is copied to h to make as big as the maximum possible HTTP2 frame + // size. - if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame + if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame // Client sends out empty data frame with endStream = true if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { return false, err } str.itl.dequeue() // remove the empty data item from stream + _ = dataItem.reader.Close() if str.itl.isEmpty() { str.state = empty } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. @@ -849,63 +950,71 @@ func (l *loopyWriter) processData() (bool, error) { return false, err } if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { - return false, nil + return false, err } } else { l.activeStreams.enqueue(str) } return false, nil } - var ( - idx int - buf []byte - ) - if len(dataItem.h) != 0 { // data header has not been written out yet. - buf = dataItem.h - } else { - idx = 1 - buf = dataItem.d - } - size := http2MaxFrameLen - if len(buf) < size { - size = len(buf) - } + + // Figure out the maximum size we can send + maxSize := http2MaxFrameLen if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. str.state = waitingOnStreamQuota return false, nil - } else if strQuota < size { - size = strQuota + } else if maxSize > strQuota { + maxSize = strQuota } + if maxSize > int(l.sendQuota) { // connection-level flow control. + maxSize = int(l.sendQuota) + } + // Compute how much of the header and data we can send within quota and max frame length + hSize := min(maxSize, len(dataItem.h)) + dSize := min(maxSize-hSize, dataItem.reader.Remaining()) + remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize + size := hSize + dSize + + var buf *[]byte - if l.sendQuota < uint32(size) { // connection-level flow control. - size = int(l.sendQuota) + if hSize != 0 && dSize == 0 { + buf = &dataItem.h + } else { + // Note: this is only necessary because the http2.Framer does not support + // partially writing a frame, so the sequence must be materialized into a buffer. + // TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed. + pool := l.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + buf = pool.Get(size) + defer pool.Put(buf) + + copy((*buf)[:hSize], dataItem.h) + _, _ = dataItem.reader.Read((*buf)[hSize:]) } + // Now that outgoing flow controls are checked we can replenish str's write quota str.wq.replenish(size) var endStream bool // If this is the last data message on this stream and all of it can be written in this iteration. - if dataItem.endStream && size == len(buf) { - // buf contains either data or it contains header but data is empty. - if idx == 1 || len(dataItem.d) == 0 { - endStream = true - } + if dataItem.endStream && remainingBytes == 0 { + endStream = true } if dataItem.onEachWrite != nil { dataItem.onEachWrite() } - if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { + if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil { return false, err } - buf = buf[size:] str.bytesOutStanding += size l.sendQuota -= uint32(size) - if idx == 0 { - dataItem.h = buf - } else { - dataItem.d = buf - } + dataItem.h = dataItem.h[hSize:] - if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. + if remainingBytes == 0 { // All the data from that message was written out. + _ = dataItem.reader.Close() str.itl.dequeue() } if str.itl.isEmpty() { diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go index 9fa306b2e..bc8ee0747 100644 --- a/vendor/google.golang.org/grpc/internal/transport/defaults.go +++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go @@ -47,3 +47,9 @@ const ( defaultClientMaxHeaderListSize = uint32(16 << 20) defaultServerMaxHeaderListSize = uint32(16 << 20) ) + +// MaxStreamID is the upper bound for the stream ID before the current +// transport gracefully closes and new transport is created for subsequent RPCs. +// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit +// integer. It's exported so that tests can override it. +var MaxStreamID = uint32(math.MaxInt32 * 3 / 4) diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go index f262edd8e..97198c515 100644 --- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -136,12 +136,10 @@ type inFlow struct { // newLimit updates the inflow window to a new value n. // It assumes that n is always greater than the old limit. -func (f *inFlow) newLimit(n uint32) uint32 { +func (f *inFlow) newLimit(n uint32) { f.mu.Lock() - d := n - f.limit f.limit = n f.mu.Unlock() - return d } func (f *inFlow) maybeAdjust(n uint32) uint32 { diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 78f9ddc3d..ce878693b 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -24,7 +24,6 @@ package transport import ( - "bytes" "context" "errors" "fmt" @@ -35,50 +34,80 @@ import ( "sync" "time" - "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" ) -// NewServerHandlerTransport returns a ServerTransport handling gRPC -// from inside an http.Handler. It requires that the http Server -// supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) { - if r.ProtoMajor != 2 { - return nil, errors.New("gRPC requires HTTP/2") - } - if r.Method != "POST" { - return nil, errors.New("invalid gRPC request method") +// NewServerHandlerTransport returns a ServerTransport handling gRPC from +// inside an http.Handler, or writes an HTTP error to w and returns an error. +// It requires that the http Server supports HTTP/2. +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) { + if r.Method != http.MethodPost { + w.Header().Set("Allow", http.MethodPost) + msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) + http.Error(w, msg, http.StatusMethodNotAllowed) + return nil, errors.New(msg) } contentType := r.Header.Get("Content-Type") // TODO: do we assume contentType is lowercase? we did before - contentSubtype, validContentType := contentSubtype(contentType) + contentSubtype, validContentType := grpcutil.ContentSubtype(contentType) if !validContentType { - return nil, errors.New("invalid gRPC request content-type") + msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType) + http.Error(w, msg, http.StatusUnsupportedMediaType) + return nil, errors.New(msg) + } + if r.ProtoMajor != 2 { + msg := "gRPC requires HTTP/2" + http.Error(w, msg, http.StatusHTTPVersionNotSupported) + return nil, errors.New(msg) } if _, ok := w.(http.Flusher); !ok { - return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + msg := "gRPC requires a ResponseWriter supporting http.Flusher" + http.Error(w, msg, http.StatusInternalServerError) + return nil, errors.New(msg) } + var localAddr net.Addr + if la := r.Context().Value(http.LocalAddrContextKey); la != nil { + localAddr, _ = la.(net.Addr) + } + var authInfo credentials.AuthInfo + if r.TLS != nil { + authInfo = credentials.TLSInfo{State: *r.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} + } + p := peer.Peer{ + Addr: strAddr(r.RemoteAddr), + LocalAddr: localAddr, + AuthInfo: authInfo, + } st := &serverHandlerTransport{ rw: w, req: r, closedCh: make(chan struct{}), writes: make(chan func()), + peer: p, contentType: contentType, contentSubtype: contentSubtype, stats: stats, + bufferPool: bufferPool, } + st.logger = prefixLoggerForServerHandlerTransport(st) if v := r.Header.Get("grpc-timeout"); v != "" { to, err := decodeTimeout(v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err) + msg := fmt.Sprintf("malformed grpc-timeout: %v", err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) } st.timeoutSet = true st.timeout = to @@ -96,7 +125,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta for _, v := range vv { v, err := decodeMetadataHeader(k, v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err) + msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) } metakv = append(metakv, k, v) } @@ -112,14 +143,15 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta // at this point to be speaking over HTTP/2, so it's able to speak valid // gRPC. type serverHandlerTransport struct { - rw http.ResponseWriter - req *http.Request - timeoutSet bool - timeout time.Duration - didCommonHeaders bool + rw http.ResponseWriter + req *http.Request + timeoutSet bool + timeout time.Duration headerMD metadata.MD + peer peer.Peer + closeOnce sync.Once closedCh chan struct{} // closed on Close @@ -138,17 +170,28 @@ type serverHandlerTransport struct { // TODO make sure this is consistent across handler_server and http2_server contentSubtype string - stats stats.Handler -} + stats []stats.Handler + logger *grpclog.PrefixLogger -func (ht *serverHandlerTransport) Close() error { - ht.closeOnce.Do(ht.closeCloseChanOnce) - return nil + bufferPool mem.BufferPool } -func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } +func (ht *serverHandlerTransport) Close(err error) { + ht.closeOnce.Do(func() { + if ht.logger.V(logLevel) { + ht.logger.Infof("Closing: %v", err) + } + close(ht.closedCh) + }) +} -func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } +func (ht *serverHandlerTransport) Peer() *peer.Peer { + return &peer.Peer{ + Addr: ht.peer.Addr, + LocalAddr: ht.peer.LocalAddr, + AuthInfo: ht.peer.AuthInfo, + } +} // strAddr is a net.Addr backed by either a TCP "ip:port" string, or // the empty string if unknown. @@ -186,8 +229,11 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro ht.writeStatusMu.Lock() defer ht.writeStatusMu.Unlock() + headersWritten := s.updateHeaderSent() err := ht.do(func() { - ht.writeCommonHeaders(s) + if !headersWritten { + ht.writePendingHeaders(s) + } // And flush, in case no header or body has been sent yet. // This forces a separation of headers and trailers if this is the @@ -200,18 +246,21 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro h.Set("Grpc-Message", encodeGrpcMessage(m)) } + s.hdrMu.Lock() + defer s.hdrMu.Unlock() if p := st.Proto(); p != nil && len(p.Details) > 0 { + delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. panic(err) } - h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + h.Set(grpcStatusDetailsBinHeader, encodeBinHeader(stBytes)) } - if md := s.Trailer(); len(md) > 0 { - for k, vv := range md { + if len(s.trailer) > 0 { + for k, vv := range s.trailer { // Clients don't tolerate reading restricted headers after some non restricted ones were sent. if isReservedHeader(k) { continue @@ -226,22 +275,28 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro }) if err == nil { // transport has not been closed - if ht.stats != nil { - ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + for _, sh := range ht.stats { + sh.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) } } - ht.Close() + ht.Close(errors.New("finished writing status")) return err } +// writePendingHeaders sets common and custom headers on the first +// write call (Write, WriteHeader, or WriteStatus) +func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) { + ht.writeCommonHeaders(s) + ht.writeCustomHeaders(s) +} + // writeCommonHeaders sets common headers on the first write // call (Write, WriteHeader, or WriteStatus). func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { - if ht.didCommonHeaders { - return - } - ht.didCommonHeaders = true - h := ht.rw.Header() h["Date"] = nil // suppress Date to make tests happy; TODO: restore h.Set("Content-Type", ht.contentType) @@ -260,45 +315,78 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { } } -func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { - return ht.do(func() { - ht.writeCommonHeaders(s) +// writeCustomHeaders sets custom headers set on the stream via SetHeader +// on the first write call (Write, WriteHeader, or WriteStatus) +func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { + h := ht.rw.Header() + + s.hdrMu.Lock() + for k, vv := range s.header { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + h.Add(k, encodeMetadataHeader(k, v)) + } + } + + s.hdrMu.Unlock() +} + +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { + // Always take a reference because otherwise there is no guarantee the data will + // be available after this function returns. This is what callers to Write + // expect. + data.Ref() + headersWritten := s.updateHeaderSent() + err := ht.do(func() { + defer data.Free() + if !headersWritten { + ht.writePendingHeaders(s) + } ht.rw.Write(hdr) - ht.rw.Write(data) + for _, b := range data { + _, _ = ht.rw.Write(b.ReadOnlyData()) + } ht.rw.(http.Flusher).Flush() }) + if err != nil { + data.Free() + return err + } + return nil } func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { + if err := s.SetHeader(md); err != nil { + return err + } + + headersWritten := s.updateHeaderSent() err := ht.do(func() { - ht.writeCommonHeaders(s) - h := ht.rw.Header() - for k, vv := range md { - // Clients don't tolerate reading restricted headers after some non restricted ones were sent. - if isReservedHeader(k) { - continue - } - for _, v := range vv { - v = encodeMetadataHeader(k, v) - h.Add(k, v) - } + if !headersWritten { + ht.writePendingHeaders(s) } + ht.rw.WriteHeader(200) ht.rw.(http.Flusher).Flush() }) if err == nil { - if ht.stats != nil { - ht.stats.HandleRPC(s.Context(), &stats.OutHeader{}) + for _, sh := range ht.stats { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + sh.HandleRPC(s.Context(), &stats.OutHeader{ + Header: md.Copy(), + Compression: s.sendCompress, + }) } } return err } -func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { +func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) { // With this transport type there will be exactly 1 stream: this HTTP request. - - ctx := ht.req.Context() var cancel context.CancelFunc if ht.timeoutSet { ctx, cancel = context.WithTimeout(ctx, ht.timeout) @@ -315,40 +403,25 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace case <-ht.req.Context().Done(): } cancel() - ht.Close() + ht.Close(errors.New("request is done processing")) }() + ctx = metadata.NewIncomingContext(ctx, ht.headerMD) req := ht.req - s := &Stream{ - id: 0, // irrelevant - requestRead: func(int) {}, - cancel: cancel, - buf: newRecvBuffer(), - st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), - contentSubtype: ht.contentSubtype, - } - pr := &peer.Peer{ - Addr: ht.RemoteAddr(), - } - if req.TLS != nil { - pr.AuthInfo = credentials.TLSInfo{State: *req.TLS} - } - ctx = metadata.NewIncomingContext(ctx, ht.headerMD) - s.ctx = peer.NewContext(ctx, pr) - if ht.stats != nil { - s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) - inHeader := &stats.InHeader{ - FullMethod: s.method, - RemoteAddr: ht.RemoteAddr(), - Compression: s.recvCompress, - } - ht.stats.HandleRPC(s.ctx, inHeader) + id: 0, // irrelevant + ctx: ctx, + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ - reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, + reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf}, windowHandler: func(int) {}, } @@ -357,21 +430,19 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace go func() { defer close(readerDone) - // TODO: minimize garbage, optimize recvBuffer code/ownership - const readSize = 8196 - for buf := make([]byte, readSize); ; { - n, err := req.Body.Read(buf) + for { + buf := ht.bufferPool.Get(http2MaxFrameLen) + n, err := req.Body.Read(*buf) if n > 0 { - s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])}) - buf = buf[n:] + *buf = (*buf)[:n] + s.buf.put(recvMsg{buffer: mem.NewBuffer(buf, ht.bufferPool)}) + } else { + ht.bufferPool.Put(buf) } if err != nil { s.buf.put(recvMsg{err: mapRecvMsgError(err)}) return } - if len(buf) == 0 { - buf = make([]byte, readSize) - } } }() @@ -404,17 +475,17 @@ func (ht *serverHandlerTransport) IncrMsgSent() {} func (ht *serverHandlerTransport) IncrMsgRecv() {} -func (ht *serverHandlerTransport) Drain() { +func (ht *serverHandlerTransport) Drain(string) { panic("Drain() is not implemented") } // mapRecvMsgError returns the non-nil err into the appropriate // error value as expected by callers of *grpc.parser.recvMsg. // In particular, in can only be: -// * io.EOF -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * an error from the status package +// - io.EOF +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package func mapRecvMsgError(err error) error { if err == io.EOF || err == io.ErrUnexpectedEOF { return err diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 9bd8c27b3..c769deab5 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -24,6 +24,8 @@ import ( "io" "math" "net" + "net/http" + "path/filepath" "strconv" "strings" "sync" @@ -32,25 +34,48 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/syscall" + icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" + istatus "google.golang.org/grpc/internal/status" + isyscall "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) +// clientConnectionCounter counts the number of connections a client has +// initiated (equal to the number of http2Clients created). Must be accessed +// atomically. +var clientConnectionCounter uint64 + +var goAwayLoopyWriterTimeout = 5 * time.Second + +var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) + // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { - ctx context.Context - cancel context.CancelFunc - ctxDone <-chan struct{} // Cache the ctx.Done() chan. - userAgent string - md interface{} + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + cancel context.CancelFunc + ctxDone <-chan struct{} // Cache the ctx.Done() chan. + userAgent string + // address contains the resolver returned address for this transport. + // If the `ServerName` field is set, it takes precedence over `CallHdr.Host` + // passed to `NewStream`, when determining the :authority header. + address resolver.Address + md metadata.MD conn net.Conn // underlying communication channel loopy *loopyWriter remoteAddr net.Addr @@ -66,6 +91,7 @@ type http2Client struct { framer *framer // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. + // Do not access controlBuf with mu held. controlBuf *controlBuffer fc *trInFlow // The scheme used: https if TLS is on, http otherwise. @@ -75,13 +101,10 @@ type http2Client struct { perRPCCreds []credentials.PerRPCCredentials - // Boolean to keep track of reading activity on transport. - // 1 is true and 0 is false. - activity uint32 // Accessed atomically. kp keepalive.ClientParameters keepaliveEnabled bool - statsHandler stats.Handler + statsHandlers []stats.Handler initialWindowSize int32 @@ -89,18 +112,16 @@ type http2Client struct { maxSendHeaderListSize *uint32 bdpEst *bdpEstimator - // onPrefaceReceipt is a callback that client transport calls upon - // receiving server preface to signal that a succefull HTTP2 - // connection was established. - onPrefaceReceipt func() maxConcurrentStreams uint32 streamQuota int64 streamsQuotaAvailable chan struct{} waitingStreams uint32 - nextID uint32 + registeredCompressors string + // Do not access controlBuf with mu held. mu sync.Mutex // guard the following variables + nextID uint32 state transportState activeStreams map[uint32]*Stream // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. @@ -108,6 +129,9 @@ type http2Client struct { // goAwayReason records the http2.ErrCode and debug data received with the // GoAway frame. goAwayReason GoAwayReason + // goAwayDebugMessage contains a detailed human readable string about a + // GoAway frame, useful for error messages. + goAwayDebugMessage string // A condition variable used to signal when the keepalive goroutine should // go dormant. The condition for dormancy is based on the number of active // streams and the `PermitWithoutStream` keepalive client parameter. And @@ -119,21 +143,44 @@ type http2Client struct { // variable. kpDormant bool - // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number - czData *channelzData + channelz *channelz.Socket - onGoAway func(GoAwayReason) - onClose func() + onClose func(GoAwayReason) - bufferPool *bufferPool + bufferPool mem.BufferPool + + connectionID uint64 + logger *grpclog.PrefixLogger } -func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) { +func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) { + address := addr.Addr + networkType, ok := networktype.Get(addr) if fn != nil { - return fn(ctx, addr) + // Special handling for unix scheme with custom dialer. Back in the day, + // we did not have a unix resolver and therefore targets with a unix + // scheme would end up using the passthrough resolver. So, user's used a + // custom dialer in this case and expected the original dial target to + // be passed to the custom dialer. Now, we have a unix resolver. But if + // a custom dialer is specified, we want to retain the old behavior in + // terms of the address being passed to the custom dialer. + if networkType == "unix" && !strings.HasPrefix(address, "\x00") { + // Supported unix targets are either "unix://absolute-path" or + // "unix:relative-path". + if filepath.IsAbs(address) { + return fn(ctx, "unix://"+address) + } + return fn(ctx, "unix:"+address) + } + return fn(ctx, address) } - return (&net.Dialer{}).DialContext(ctx, "tcp", addr) + if !ok { + networkType, address = parseDialTarget(address) + } + if networkType == "tcp" && useProxy { + return proxyDial(ctx, address, grpcUA) + } + return internal.NetDialerWithTCPKeepalive().DialContext(ctx, networkType, address) } func isTemporary(err error) bool { @@ -155,7 +202,7 @@ func isTemporary(err error) bool { // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { +func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -164,19 +211,51 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne } }() - conn, err := dial(connectCtx, opts.Dialer, addr.Addr) + // gRPC, resolver, balancer etc. can specify arbitrary data in the + // Attributes field of resolver.Address, which is shoved into connectCtx + // and passed to the dialer and credential handshaker. This makes it possible for + // address specific arbitrary data to reach custom dialers and credential handshakers. + connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) + + conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent) if err != nil { if opts.FailOnNonTempDialError { return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) } - return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) + return nil, connectionErrorf(true, err, "transport: Error while dialing: %v", err) } + // Any further errors will close the underlying connection defer func(conn net.Conn) { if err != nil { conn.Close() } }(conn) + + // The following defer and goroutine monitor the connectCtx for cancellation + // and deadline. On context expiration, the connection is hard closed and + // this function will naturally fail as a result. Otherwise, the defer + // waits for the goroutine to exit to prevent the context from being + // monitored (and to prevent the connection from ever being closed) after + // returning from this function. + ctxMonitorDone := grpcsync.NewEvent() + newClientCtx, newClientDone := context.WithCancel(connectCtx) + defer func() { + newClientDone() // Awaken the goroutine below if connectCtx hasn't expired. + <-ctxMonitorDone.Done() // Wait for the goroutine below to exit. + }() + go func(conn net.Conn) { + defer ctxMonitorDone.Fire() // Signal this goroutine has exited. + <-newClientCtx.Done() // Block until connectCtx expires or the defer above executes. + if err := connectCtx.Err(); err != nil { + // connectCtx expired before exiting the function. Hard close the connection. + if logger.V(logLevel) { + logger.Infof("Aborting due to connect deadline expiring: %v", err) + } + conn.Close() + } + }(conn) + kp := opts.KeepaliveParams // Validate keepalive parameters. if kp.Time == 0 { @@ -187,7 +266,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne } keepaliveEnabled := false if kp.Time != infinity { - if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + if err = isyscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) } keepaliveEnabled = true @@ -208,12 +287,26 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne } } if transportCreds != nil { - scheme = "https" - conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.Authority, conn) + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) if err != nil { return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) } + for _, cd := range perRPCCreds { + if cd.RequireTransportSecurity() { + if ci, ok := authInfo.(interface { + GetCommonAuthInfo() credentials.CommonAuthInfo + }); ok { + secLevel := ci.GetCommonAuthInfo().SecurityLevel + if secLevel != credentials.InvalidSecurityLevel && secLevel < credentials.PrivacyAndIntegrity { + return nil, connectionErrorf(true, nil, "transport: cannot send secure credentials on an insecure connection") + } + } + } + } isSecure = true + if transportCreds.Info().SecurityProtocol == "tls" { + scheme = "https" + } } dynamicWindow := true icwz := int32(initialWindowSize) @@ -227,12 +320,14 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne if opts.MaxHeaderListSize != nil { maxHeaderListSize = *opts.MaxHeaderListSize } + t := &http2Client{ ctx: ctx, ctxDone: ctx.Done(), // Cache Done chan. cancel: cancel, userAgent: opts.UserAgent, - md: addr.Metadata, + registeredCompressors: grpcutil.RegisteredCompressors(), + address: addr, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), @@ -240,25 +335,46 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne readerDone: make(chan struct{}), writerDone: make(chan struct{}), goAway: make(chan struct{}), - framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), + framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, activeStreams: make(map[uint32]*Stream), isSecure: isSecure, perRPCCreds: perRPCCreds, kp: kp, - statsHandler: opts.StatsHandler, + statsHandlers: opts.StatsHandlers, initialWindowSize: initialWindowSize, - onPrefaceReceipt: onPrefaceReceipt, nextID: 1, maxConcurrentStreams: defaultMaxStreamsClient, streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), - czData: new(channelzData), - onGoAway: onGoAway, - onClose: onClose, keepaliveEnabled: keepaliveEnabled, - bufferPool: newBufferPool(), + bufferPool: opts.BufferPool, + onClose: onClose, + } + var czSecurity credentials.ChannelzSecurityValue + if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok { + czSecurity = au.GetSecurityValue() + } + t.channelz = channelz.RegisterSocket( + &channelz.Socket{ + SocketType: channelz.SocketTypeNormal, + Parent: opts.ChannelzParent, + SocketMetrics: channelz.SocketMetrics{}, + EphemeralMetrics: t.socketMetrics, + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + SocketOptions: channelz.GetSocketOption(t.conn), + Security: czSecurity, + }) + t.logger = prefixLoggerForClientTransport(t) + // Add peer information to the http2client context. + t.ctx = peer.NewContext(t.ctx, t.getPeer()) + + if md, ok := addr.Metadata.(*metadata.MD); ok { + t.md = *md + } else if md := imetadata.Get(addr); md != nil { + t.md = md } t.controlBuf = newControlBuffer(t.ctxDone) if opts.InitialWindowSize >= defaultWindowSize { @@ -271,37 +387,47 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne updateFlowControl: t.updateFlowControl, } } - if t.statsHandler != nil { - t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ + for _, sh := range t.statsHandlers { + t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, }) connBegin := &stats.ConnBegin{ Client: true, } - t.statsHandler.HandleConn(t.ctx, connBegin) - } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + sh.HandleConn(t.ctx, connBegin) } if t.keepaliveEnabled { t.kpDormancyCond = sync.NewCond(&t.mu) go t.keepalive() } - // Start the reader goroutine for incoming message. Each transport has - // a dedicated goroutine which reads HTTP2 frame from network. Then it - // dispatches the frame to the corresponding stream entity. - go t.reader() + + // Start the reader goroutine for incoming messages. Each transport has a + // dedicated goroutine which reads HTTP2 frames from the network. Then it + // dispatches the frame to the corresponding stream entity. When the + // server preface is received, readerErrCh is closed. If an error occurs + // first, an error is pushed to the channel. This must be checked before + // returning from this function. + readerErrCh := make(chan error, 1) + go t.reader(readerErrCh) + defer func() { + if err != nil { + // writerDone should be closed since the loopy goroutine + // wouldn't have started in the case this function returns an error. + close(t.writerDone) + t.Close(err) + } + }() // Send connection preface to server. n, err := t.conn.Write(clientPreface) if err != nil { - t.Close() - return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + return nil, err } if n != len(clientPreface) { - t.Close() - return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + return nil, err } var ss []http2.Setting @@ -319,29 +445,33 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne } err = t.framer.fr.WriteSettings(ss...) if err != nil { - t.Close() - return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + return nil, err } // Adjust the connection flow control window if needed. if delta := uint32(icwz - defaultWindowSize); delta > 0 { if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { - t.Close() - return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err) + err = connectionErrorf(true, err, "transport: failed to write window update: %v", err) + return nil, err } } + t.connectionID = atomic.AddUint64(&clientConnectionCounter, 1) + if err := t.framer.writer.Flush(); err != nil { return nil, err } + // Block until the server preface is received successfully or an error occurs. + if err = <-readerErrCh; err != nil { + return nil, err + } go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) - err := t.loopy.run() - if err != nil { - errorf("transport: loopyWriter.run returning. Err: %v", err) - } - // If it's a connection error, let reader goroutine handle it - // since there might be data in the buffers. - if _, ok := err.(net.Error); !ok { + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool) + if err := t.loopy.run(); !isIOError(err) { + // Immediately close the connection, as the loopy writer returns + // when there are no more active streams and we were draining (the + // server sent a GOAWAY). For I/O errors, the reader will hit it + // after draining any remaining incoming data. t.conn.Close() } close(t.writerDone) @@ -352,12 +482,14 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { // TODO(zhaoq): Handle uint32 overflow of Stream.id. s := &Stream{ + ct: t, done: make(chan struct{}), method: callHdr.Method, sendCompress: callHdr.SendCompress, buf: newRecvBuffer(), headerChan: make(chan struct{}), contentSubtype: callHdr.ContentSubtype, + doneFunc: callHdr.DoneFunc, } s.wq = newWriteQuota(defaultWriteQuota, s.done) s.requestRead = func(n int) { @@ -375,7 +507,6 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { closeStream: func(err error) { t.CloseStream(s, err) }, - freeBuffer: t.bufferPool.put, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) @@ -385,23 +516,36 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { } func (t *http2Client) getPeer() *peer.Peer { - pr := &peer.Peer{ - Addr: t.remoteAddr, + return &peer.Peer{ + Addr: t.remoteAddr, + AuthInfo: t.authInfo, // Can be nil + LocalAddr: t.localAddr, } - // Attach Auth info if there is any. - if t.authInfo != nil { - pr.AuthInfo = t.authInfo +} + +// OutgoingGoAwayHandler writes a GOAWAY to the connection. Always returns (false, err) as we want the GoAway +// to be the last frame loopy writes to the transport. +func (t *http2Client) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.mu.Lock() + defer t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(t.nextID-2, http2.ErrCodeNo, g.debugData); err != nil { + return false, err } - return pr + return false, g.closeConn } func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { aud := t.createAudience(callHdr) - authData, err := t.getTrAuthData(ctx, aud) + ri := credentials.RequestInfo{ + Method: callHdr.Method, + AuthInfo: t.authInfo, + } + ctxWithRequestInfo := icredentials.NewRequestInfoContext(ctx, ri) + authData, err := t.getTrAuthData(ctxWithRequestInfo, aud) if err != nil { return nil, err } - callAuthData, err := t.getCallAuthData(ctx, aud, callHdr) + callAuthData, err := t.getCallAuthData(ctxWithRequestInfo, aud, callHdr) if err != nil { return nil, err } @@ -415,21 +559,35 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) - headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(callHdr.ContentSubtype)}) headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) if callHdr.PreviousAttempts > 0 { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) } + registeredCompressors := t.registeredCompressors if callHdr.SendCompress != "" { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) + // Include the outgoing compressor name when compressor is not registered + // via encoding.RegisterCompressor. This is possible when client uses + // WithCompressor dial option. + if !grpcutil.IsCompressorNameRegistered(callHdr.SendCompress) { + if registeredCompressors != "" { + registeredCompressors += "," + } + registeredCompressors += callHdr.SendCompress + } + } + + if registeredCompressors != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: registeredCompressors}) } if dl, ok := ctx.Deadline(); ok { // Send out timeout regardless its value. The server can detect timeout context by itself. // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. timeout := time.Until(dl) - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)}) } for k, v := range authData { headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) @@ -444,7 +602,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) } - if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { var k string for k, vv := range md { // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. @@ -458,25 +616,23 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) for _, vv := range added { for i, v := range vv { if i%2 == 0 { - k = v + k = strings.ToLower(v) continue } // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. if isReservedHeader(k) { continue } - headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)}) + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } } - if md, ok := t.md.(*metadata.MD); ok { - for k, vv := range *md { - if isReservedHeader(k) { - continue - } - for _, v := range vv { - headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) - } + for k, vv := range t.md { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } return headerFields, nil @@ -505,11 +661,15 @@ func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[s for _, c := range t.perRPCCreds { data, err := c.GetRequestMetadata(ctx, audience) if err != nil { - if _, ok := status.FromError(err); ok { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } return nil, err } - return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err) + return nil, status.Errorf(codes.Unauthenticated, "transport: per-RPC creds failed due to error: %v", err) } for k, v := range data { // Capital header names are illegal in HTTP/2. @@ -526,12 +686,22 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call // Note: if these credentials are provided both via dial options and call // options, then both sets of credentials will be applied. if callCreds := callHdr.Creds; callCreds != nil { - if !t.isSecure && callCreds.RequireTransportSecurity() { - return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + if callCreds.RequireTransportSecurity() { + ri, _ := credentials.RequestInfoFromContext(ctx) + if !t.isSecure || credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity) != nil { + return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + } } data, err := callCreds.GetRequestMetadata(ctx, audience) if err != nil { - return nil, status.Errorf(codes.Internal, "transport: %v", err) + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } + return nil, err + } + return nil, status.Errorf(codes.Internal, "transport: per-RPC creds failed due to error: %v", err) } callAuthData = make(map[string]string, len(data)) for k, v := range data { @@ -543,13 +713,46 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call return callAuthData, nil } +// NewStreamError wraps an error and reports additional information. Typically +// NewStream errors result in transparent retry, as they mean nothing went onto +// the wire. However, there are two notable exceptions: +// +// 1. If the stream headers violate the max header list size allowed by the +// server. It's possible this could succeed on another transport, even if +// it's unlikely, but do not transparently retry. +// 2. If the credentials errored when requesting their headers. In this case, +// it's possible a retry can fix the problem, but indefinitely transparently +// retrying is not appropriate as it is likely the credentials, if they can +// eventually succeed, would need I/O to do so. +type NewStreamError struct { + Err error + + AllowTransparentRetry bool +} + +func (e NewStreamError) Error() string { + return e.Err.Error() +} + // NewStream creates a stream and registers it into the transport as "active" -// streams. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { +// streams. All non-nil errors returned will be *NewStreamError. +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { ctx = peer.NewContext(ctx, t.getPeer()) + + // ServerName field of the resolver returned address takes precedence over + // Host field of CallHdr to determine the :authority header. This is because, + // the ServerName field takes precedence for server authentication during + // TLS handshake, and the :authority header should match the value used + // for server authentication. + if t.address.ServerName != "" { + newCallHdr := *callHdr + newCallHdr.Host = t.address.ServerName + callHdr = &newCallHdr + } + headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { - return nil, err + return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} } s := t.newStream(ctx, callHdr) cleanup := func(err error) { @@ -569,22 +772,18 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea hdr := &headerFrame{ hf: headerFields, endStream: false, - initStream: func(id uint32) error { + initStream: func(uint32) error { t.mu.Lock() - if state := t.state; state != reachable { + // TODO: handle transport closure in loopy instead and remove this + // initStream is never called when transport is draining. + if t.state == closing { t.mu.Unlock() - // Do a quick cleanup. - err := error(errStreamDrain) - if state == closing { - err = ErrConnClosing - } - cleanup(err) - return err + cleanup(ErrConnClosing) + return ErrConnClosing } - t.activeStreams[id] = s if channelz.IsOn() { - atomic.AddInt64(&t.czData.streamsStarted, 1) - atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + t.channelz.SocketMetrics.StreamsStarted.Add(1) + t.channelz.SocketMetrics.LastLocalStreamCreatedTimestamp.Store(time.Now().UnixNano()) } // If the keepalive goroutine has gone dormant, wake it up. if t.kpDormant { @@ -598,7 +797,8 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } firstTry := true var ch chan struct{} - checkForStreamQuota := func(it interface{}) bool { + transportDrainRequired := false + checkForStreamQuota := func() bool { if t.streamQuota <= 0 { // Can go negative if server decreases it. if firstTry { t.waitingStreams++ @@ -610,11 +810,24 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea t.waitingStreams-- } t.streamQuota-- - h := it.(*headerFrame) - h.streamID = t.nextID + + t.mu.Lock() + if t.state == draining || t.activeStreams == nil { // Can be niled from Close(). + t.mu.Unlock() + return false // Don't create a stream if the transport is already closed. + } + + hdr.streamID = t.nextID t.nextID += 2 - s.id = h.streamID + // Drain client transport if nextID > MaxStreamID which signals gRPC that + // the connection is closed and a new one must be created for subsequent RPCs. + transportDrainRequired = t.nextID > MaxStreamID + + s.id = hdr.streamID s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + t.activeStreams[s.id] = s + t.mu.Unlock() + if t.streamQuota > 0 && t.waitingStreams > 0 { select { case t.streamsQuotaAvailable <- struct{}{}: @@ -624,13 +837,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea return true } var hdrListSizeErr error - checkForHeaderListSize := func(it interface{}) bool { + checkForHeaderListSize := func() bool { if t.maxSendHeaderListSize == nil { return true } - hdrFrame := it.(*headerFrame) var sz int64 - for _, f := range hdrFrame.hf { + for _, f := range hdr.hf { if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize) return false @@ -639,44 +851,57 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea return true } for { - success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { - if !checkForStreamQuota(it) { - return false - } - if !checkForHeaderListSize(it) { - return false - } - return true + success, err := t.controlBuf.executeAndPut(func() bool { + return checkForHeaderListSize() && checkForStreamQuota() }, hdr) if err != nil { - return nil, err + // Connection closed. + return nil, &NewStreamError{Err: err, AllowTransparentRetry: true} } if success { break } if hdrListSizeErr != nil { - return nil, hdrListSizeErr + return nil, &NewStreamError{Err: hdrListSizeErr} } firstTry = false select { case <-ch: - case <-s.ctx.Done(): - return nil, ContextErr(s.ctx.Err()) + case <-ctx.Done(): + return nil, &NewStreamError{Err: ContextErr(ctx.Err())} case <-t.goAway: - return nil, errStreamDrain + return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true} case <-t.ctx.Done(): - return nil, ErrConnClosing + return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true} } } - if t.statsHandler != nil { - outHeader := &stats.OutHeader{ - Client: true, - FullMethod: callHdr.Method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: callHdr.SendCompress, + if len(t.statsHandlers) != 0 { + header, ok := metadata.FromOutgoingContext(ctx) + if ok { + header.Set("user-agent", t.userAgent) + } else { + header = metadata.Pairs("user-agent", t.userAgent) + } + for _, sh := range t.statsHandlers { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + // Note: Creating a new stats object to prevent pollution. + outHeader := &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + Header: header, + } + sh.HandleRPC(s.ctx, outHeader) } - t.statsHandler.HandleRPC(s.ctx, outHeader) + } + if transportDrainRequired { + if t.logger.V(logLevel) { + t.logger.Infof("Draining transport: t.nextID > MaxStreamID") + } + t.GracefulClose() } return s, nil } @@ -729,16 +954,16 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. t.mu.Unlock() if channelz.IsOn() { if eosReceived { - atomic.AddInt64(&t.czData.streamsSucceeded, 1) + t.channelz.SocketMetrics.StreamsSucceeded.Add(1) } else { - atomic.AddInt64(&t.czData.streamsFailed, 1) + t.channelz.SocketMetrics.StreamsFailed.Add(1) } } }, rst: rst, rstCode: rstCode, } - addBackStreamQuota := func(interface{}) bool { + addBackStreamQuota := func() bool { t.streamQuota++ if t.streamQuota > 0 && t.waitingStreams > 0 { select { @@ -751,25 +976,30 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. t.controlBuf.executeAndPut(addBackStreamQuota, cleanup) // This will unblock write. close(s.done) + if s.doneFunc != nil { + s.doneFunc() + } } // Close kicks off the shutdown process of the transport. This should be called // only once on a transport. Once it is called, the transport should not be -// accessed any more. -// -// This method blocks until the addrConn that initiated this transport is -// re-connected. This happens because t.onClose() begins reconnect logic at the -// addrConn level and blocks until the addrConn is successfully connected. -func (t *http2Client) Close() error { +// accessed anymore. +func (t *http2Client) Close(err error) { + t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10)) t.mu.Lock() - // Make sure we only Close once. + // Make sure we only close once. if t.state == closing { t.mu.Unlock() - return nil + return + } + if t.logger.V(logLevel) { + t.logger.Infof("Closing: %v", err) + } + // Call t.onClose ASAP to prevent the client from attempting to create new + // streams. + if t.state != draining { + t.onClose(GoAwayInvalid) } - // Call t.onClose before setting the state to closing to prevent the client - // from attempting to create new streams ASAP. - t.onClose() t.state = closing streams := t.activeStreams t.activeStreams = nil @@ -779,23 +1009,45 @@ func (t *http2Client) Close() error { t.kpDormancyCond.Signal() } t.mu.Unlock() - t.controlBuf.finish() + + // Per HTTP/2 spec, a GOAWAY frame must be sent before closing the + // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. It + // also waits for loopyWriter to be closed with a timer to avoid the + // long blocking in case the connection is blackholed, i.e. TCP is + // just stuck. + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err}) + timer := time.NewTimer(goAwayLoopyWriterTimeout) + defer timer.Stop() + select { + case <-t.writerDone: // success + case <-timer.C: + t.logger.Infof("Failed to write a GOAWAY frame as part of connection close after %s. Giving up and closing the transport.", goAwayLoopyWriterTimeout) + } t.cancel() - err := t.conn.Close() - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) + t.conn.Close() + channelz.RemoveEntry(t.channelz.ID) + // Append info about previous goaways if there were any, since this may be important + // for understanding the root cause for this connection to be closed. + _, goAwayDebugMessage := t.GetGoAwayReason() + + var st *status.Status + if len(goAwayDebugMessage) > 0 { + st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) + err = st.Err() + } else { + st = status.New(codes.Unavailable, err.Error()) } + // Notify all active streams. for _, s := range streams { - t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, status.New(codes.Unavailable, ErrConnClosing.Desc), nil, false) + t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) } - if t.statsHandler != nil { + for _, sh := range t.statsHandlers { connEnd := &stats.ConnEnd{ Client: true, } - t.statsHandler.HandleConn(t.ctx, connEnd) + sh.HandleConn(t.ctx, connEnd) } - return err } // GracefulClose sets the state to draining, which prevents new streams from @@ -810,11 +1062,15 @@ func (t *http2Client) GracefulClose() { t.mu.Unlock() return } + if t.logger.V(logLevel) { + t.logger.Infof("GracefulClose called") + } + t.onClose(GoAwayInvalid) t.state = draining active := len(t.activeStreams) t.mu.Unlock() if active == 0 { - t.Close() + t.Close(connectionErrorf(true, nil, "no active streams left to process while draining")) return } t.controlBuf.put(&incomingGoAway{}) @@ -822,35 +1078,36 @@ func (t *http2Client) GracefulClose() { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. -func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { + reader := data.Reader() + if opts.Last { // If it's the last message, update stream state. if !s.compareAndSwapState(streamActive, streamWriteDone) { + _ = reader.Close() return errStreamDone } } else if s.getState() != streamActive { + _ = reader.Close() return errStreamDone } df := &dataFrame{ streamID: s.id, endStream: opts.Last, + h: hdr, + reader: reader, } - if hdr != nil || data != nil { // If it's not an empty data frame. - // Add some data to grpc message header so that we can equally - // distribute bytes across frames. - emptyLen := http2MaxFrameLen - len(hdr) - if emptyLen > len(data) { - emptyLen = len(data) - } - hdr = append(hdr, data[:emptyLen]...) - data = data[emptyLen:] - df.h, df.d = hdr, data - // TODO(mmukhi): The above logic in this if can be moved to loopyWriter's data handler. - if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota. + if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { + _ = reader.Close() return err } } - return t.controlBuf.put(df) + if err := t.controlBuf.put(df); err != nil { + _ = reader.Close() + return err + } + return nil } func (t *http2Client) getStream(f http2.Frame) *Stream { @@ -882,13 +1139,13 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { // for the transport and the stream based on the current bdp // estimation. func (t *http2Client) updateFlowControl(n uint32) { - t.mu.Lock() - for _, s := range t.activeStreams { - s.fc.newLimit(n) - } - t.mu.Unlock() - updateIWS := func(interface{}) bool { + updateIWS := func() bool { t.initialWindowSize = int32(n) + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.mu.Unlock() return true } t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) @@ -955,15 +1212,18 @@ func (t *http2Client) handleData(f *http2.DataFrame) { // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? if len(f.Data()) > 0 { - buffer := t.bufferPool.get() - buffer.Reset() - buffer.Write(f.Data()) - s.write(recvMsg{buffer: buffer}) + pool := t.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) } } // The server has closed the stream without sending trailers. Record that // the read direction is closed, and set the status appropriately. - if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { + if f.StreamEnded() { t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) } } @@ -979,13 +1239,15 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { } statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { - warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + if t.logger.V(logLevel) { + t.logger.Infof("Received a RST_STREAM frame with code %q, but found no mapped gRPC status", f.ErrCode) + } statusCode = codes.Unknown } if statusCode == codes.Canceled { if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { // Our deadline was already exceeded, and that was likely the cause - // of this cancelation. Alter the status code accordingly. + // of this cancellation. Alter the status code accordingly. statusCode = codes.DeadlineExceeded } } @@ -1033,7 +1295,7 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { } updateFuncs = append(updateFuncs, updateStreamQuota) } - t.controlBuf.executeAndPut(func(interface{}) bool { + t.controlBuf.executeAndPut(func() bool { for _, f := range updateFuncs { f() } @@ -1060,13 +1322,17 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { t.mu.Unlock() return } - if f.ErrCode == http2.ErrCodeEnhanceYourCalm { - infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") + if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" { + // When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug + // data equal to ASCII "too_many_pings", it should log the occurrence at a log level that is + // enabled by default and double the configure KEEPALIVE_TIME used for new connections + // on that channel. + logger.Errorf("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\".") } id := f.LastStreamID - if id > 0 && id%2 != 1 { + if id > 0 && id%2 == 0 { t.mu.Unlock() - t.Close() + t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id)) return } // A client can receive multiple GoAways from the server (see @@ -1084,18 +1350,20 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // If there are multiple GoAways the first one should always have an ID greater than the following ones. if id > t.prevGoAwayID { t.mu.Unlock() - t.Close() + t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)) return } default: t.setGoAwayReason(f) close(t.goAway) - t.controlBuf.put(&incomingGoAway{}) + defer t.controlBuf.put(&incomingGoAway{}) // Defer as t.mu is currently held. // Notify the clientconn about the GOAWAY before we set the state to // draining, to allow the client to stop attempting to create streams // before disallowing new streams on this connection. - t.onGoAway(t.goAwayReason) - t.state = draining + if t.state != draining { + t.onClose(t.goAwayReason) + t.state = draining + } } // All streams with IDs greater than the GoAwayId // and smaller than the previous GoAway ID should be killed. @@ -1103,24 +1371,33 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { if upperLimit == 0 { // This is the first GoAway Frame. upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. } + + t.prevGoAwayID = id + if len(t.activeStreams) == 0 { + t.mu.Unlock() + t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) + return + } + + streamsToClose := make([]*Stream, 0) for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. atomic.StoreUint32(&stream.unprocessed, 1) - t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) + streamsToClose = append(streamsToClose, stream) } } - t.prevGoAwayID = id - active := len(t.activeStreams) t.mu.Unlock() - if active == 0 { - t.Close() + // Called outside t.mu because closeStream can take controlBuf's mu, which + // could induce deadlock and is not allowed. + for _, stream := range streamsToClose { + t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) } } // setGoAwayReason sets the value of t.goAwayReason based // on the GoAway frame received. -// It expects a lock on transport's mutext to be held by +// It expects a lock on transport's mutex to be held by // the caller. func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { t.goAwayReason = GoAwayNoReason @@ -1130,12 +1407,17 @@ func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { t.goAwayReason = GoAwayTooManyPings } } + if len(f.DebugData()) == 0 { + t.goAwayDebugMessage = fmt.Sprintf("code: %s", f.ErrCode) + } else { + t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %q", f.ErrCode, string(f.DebugData())) + } } -func (t *http2Client) GetGoAwayReason() GoAwayReason { +func (t *http2Client) GetGoAwayReason() (GoAwayReason, string) { t.mu.Lock() defer t.mu.Unlock() - return t.goAwayReason + return t.goAwayReason, t.goAwayDebugMessage } func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { @@ -1162,93 +1444,207 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - state := &decodeState{} - // Initialize isGRPC value to be !initialHeader, since if a gRPC Response-Headers has already been received, then it means that the peer is speaking gRPC and we are in gRPC mode. - state.data.isGRPC = !initialHeader - if err := state.decodeHeader(frame); err != nil { - t.closeStream(s, err, true, http2.ErrCodeProtocol, status.Convert(err), nil, endStream) + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + se := status.New(codes.Internal, "peer header list size exceeded limit") + t.closeStream(s, se.Err(), true, http2.ErrCodeFrameSize, se, nil, endStream) return } - isHeader := false - defer func() { - if t.statsHandler != nil { - if isHeader { - inHeader := &stats.InHeader{ - Client: true, - WireLength: int(frame.Header().Length), - } - t.statsHandler.HandleRPC(s.ctx, inHeader) - } else { - inTrailer := &stats.InTrailer{ - Client: true, - WireLength: int(frame.Header().Length), - } - t.statsHandler.HandleRPC(s.ctx, inTrailer) + var ( + // If a gRPC Response-Headers has already been received, then it means + // that the peer is speaking gRPC and we are in gRPC mode. + isGRPC = !initialHeader + mdata = make(map[string][]string) + contentTypeErr = "malformed header: missing HTTP content-type" + grpcMessage string + recvCompress string + httpStatusCode *int + httpStatusErr string + rawStatusCode = codes.Unknown + // headerError is set if an error is encountered while parsing the headers + headerError string + ) + + if initialHeader { + httpStatusErr = "malformed header: missing HTTP status" + } + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + if _, validContentType := grpcutil.ContentSubtype(hf.Value); !validContentType { + contentTypeErr = fmt.Sprintf("transport: received unexpected content-type %q", hf.Value) + break + } + contentTypeErr = "" + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + isGRPC = true + case "grpc-encoding": + recvCompress = hf.Value + case "grpc-status": + code, err := strconv.ParseInt(hf.Value, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + rawStatusCode = codes.Code(uint32(code)) + case "grpc-message": + grpcMessage = decodeGrpcMessage(hf.Value) + case ":status": + if hf.Value == "200" { + httpStatusErr = "" + statusCode := 200 + httpStatusCode = &statusCode + break } + + c, err := strconv.ParseInt(hf.Value, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + statusCode := int(c) + httpStatusCode = &statusCode + + httpStatusErr = fmt.Sprintf( + "unexpected HTTP status code received from server: %d (%s)", + statusCode, + http.StatusText(statusCode), + ) + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = fmt.Sprintf("transport: malformed %s: %v", hf.Name, err) + logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break + } + mdata[hf.Name] = append(mdata[hf.Name], v) } - }() + } - // If headerChan hasn't been closed yet - if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { - if !endStream { - // HEADERS frame block carries a Response-Headers. - isHeader = true + if !isGRPC || httpStatusErr != "" { + var code = codes.Internal // when header does not include HTTP status, return INTERNAL + + if httpStatusCode != nil { + var ok bool + code, ok = HTTPStatusConvTab[*httpStatusCode] + if !ok { + code = codes.Unknown + } + } + var errs []string + if httpStatusErr != "" { + errs = append(errs, httpStatusErr) + } + if contentTypeErr != "" { + errs = append(errs, contentTypeErr) + } + // Verify the HTTP response is a 200. + se := status.New(code, strings.Join(errs, "; ")) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + + if headerError != "" { + se := status.New(codes.Internal, headerError) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + + // For headers, set them in s.header and close headerChan. For trailers or + // trailers-only, closeStream will set the trailers and close headerChan as + // needed. + if !endStream { + // If headerChan hasn't been closed yet (expected, given we checked it + // above, but something else could have potentially closed the whole + // stream). + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.headerValid = true // These values can be set without any synchronization because // stream goroutine will read it only after seeing a closed // headerChan which we'll close after setting this. - s.recvCompress = state.data.encoding - if len(state.data.mdata) > 0 { - s.header = state.data.mdata + s.recvCompress = recvCompress + if len(mdata) > 0 { + s.header = mdata + } + close(s.headerChan) + } + } + + for _, sh := range t.statsHandlers { + if !endStream { + inHeader := &stats.InHeader{ + Client: true, + WireLength: int(frame.Header().Length), + Header: metadata.MD(mdata).Copy(), + Compression: s.recvCompress, } + sh.HandleRPC(s.ctx, inHeader) } else { - // HEADERS frame block carries a Trailers-Only. - s.noHeaders = true + inTrailer := &stats.InTrailer{ + Client: true, + WireLength: int(frame.Header().Length), + Trailer: metadata.MD(mdata).Copy(), + } + sh.HandleRPC(s.ctx, inTrailer) } - close(s.headerChan) } if !endStream { return } - // if client received END_STREAM from server while stream was still active, send RST_STREAM - rst := s.getState() == streamActive - t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true) + status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) + + // If client received END_STREAM from server while stream was still active, + // send RST_STREAM. + rstStream := s.getState() == streamActive + t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, status, mdata, true) } -// reader runs as a separate goroutine in charge of reading data from network -// connection. -// -// TODO(zhaoq): currently one reader per transport. Investigate whether this is -// optimal. -// TODO(zhaoq): Check the validity of the incoming frame sequence. -func (t *http2Client) reader() { - defer close(t.readerDone) - // Check the validity of server preface. +// readServerPreface reads and handles the initial settings frame from the +// server. +func (t *http2Client) readServerPreface() error { frame, err := t.framer.fr.ReadFrame() if err != nil { - t.Close() // this kicks off resetTransport, so must be last before return - return - } - t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) - if t.keepaliveEnabled { - atomic.CompareAndSwapUint32(&t.activity, 0, 1) + return connectionErrorf(true, err, "error reading server preface: %v", err) } sf, ok := frame.(*http2.SettingsFrame) if !ok { - t.Close() // this kicks off resetTransport, so must be last before return - return + return connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame) } - t.onPrefaceReceipt() t.handleSettings(sf, true) + return nil +} + +// reader verifies the server preface and reads all subsequent data from +// network connection. If the server preface is not read successfully, an +// error is pushed to errCh; otherwise errCh is closed with no error. +func (t *http2Client) reader(errCh chan<- error) { + defer close(t.readerDone) + + if err := t.readServerPreface(); err != nil { + errCh <- err + return + } + close(errCh) + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } // loop to keep reading incoming messages on this transport. for { t.controlBuf.throttle() frame, err := t.framer.fr.ReadFrame() if t.keepaliveEnabled { - atomic.CompareAndSwapUint32(&t.activity, 0, 1) + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) } if err != nil { // Abort an active stream if the http2.Framer returns a @@ -1261,15 +1657,20 @@ func (t *http2Client) reader() { if s != nil { // use error detail to provide better err message code := http2ErrConvTab[se.Code] - msg := t.framer.fr.ErrorDetail().Error() + errorDetail := t.framer.fr.ErrorDetail() + var msg string + if errorDetail != nil { + msg = errorDetail.Error() + } else { + msg = "received invalid frame" + } t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) } continue - } else { - // Transport error. - t.Close() - return } + // Transport error. + t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) + return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: @@ -1287,22 +1688,41 @@ func (t *http2Client) reader() { case *http2.WindowUpdateFrame: t.handleWindowUpdate(frame) default: - errorf("transport: http2Client.reader got unhandled frame type %v.", frame) + if logger.V(logLevel) { + logger.Errorf("transport: http2Client.reader got unhandled frame type %v.", frame) + } } } } -// keepalive running in a separate goroutune makes sure the connection is alive by sending pings. +// keepalive running in a separate goroutine makes sure the connection is alive by sending pings. func (t *http2Client) keepalive() { p := &ping{data: [8]byte{}} + // True iff a ping has been sent, and no data has been received since then. + outstandingPing := false + // Amount of time remaining before which we should receive an ACK for the + // last sent ping. + timeoutLeft := time.Duration(0) + // Records the last value of t.lastRead before we go block on the timer. + // This is required to check for read activity since then. + prevNano := time.Now().UnixNano() timer := time.NewTimer(t.kp.Time) for { select { case <-timer.C: - if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { - timer.Reset(t.kp.Time) + lastRead := atomic.LoadInt64(&t.lastRead) + if lastRead > prevNano { + // There has been read activity since the last time we were here. + outstandingPing = false + // Next timer should fire at kp.Time seconds from lastRead time. + timer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) + prevNano = lastRead continue } + if outstandingPing && timeoutLeft <= 0 { + t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")) + return + } t.mu.Lock() if t.state == closing { // If the transport is closing, we should exit from the @@ -1315,36 +1735,36 @@ func (t *http2Client) keepalive() { return } if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream { + // If a ping was sent out previously (because there were active + // streams at that point) which wasn't acked and its timeout + // hadn't fired, but we got here and are about to go dormant, + // we should make sure that we unconditionally send a ping once + // we awaken. + outstandingPing = false t.kpDormant = true t.kpDormancyCond.Wait() } t.kpDormant = false t.mu.Unlock() - if channelz.IsOn() { - atomic.AddInt64(&t.czData.kpCount, 1) - } // We get here either because we were dormant and a new stream was // created which unblocked the Wait() call, or because the // keepalive timer expired. In both cases, we need to send a ping. - t.controlBuf.put(p) - - timer.Reset(t.kp.Timeout) - select { - case <-timer.C: - if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { - timer.Reset(t.kp.Time) - continue - } - infof("transport: closing client transport due to idleness.") - t.Close() - return - case <-t.ctx.Done(): - if !timer.Stop() { - <-timer.C + if !outstandingPing { + if channelz.IsOn() { + t.channelz.SocketMetrics.KeepAlivesSent.Add(1) } - return + t.controlBuf.put(p) + timeoutLeft = t.kp.Timeout + outstandingPing = true } + // The amount of time to sleep here is the minimum of kp.Time and + // timeoutLeft. This will ensure that we wait only for kp.Time + // before sending out the next ping (for cases where the ping is + // acked). + sleepDuration := min(t.kp.Time, timeoutLeft) + timeoutLeft -= sleepDuration + timer.Reset(sleepDuration) case <-t.ctx.Done(): if !timer.Stop() { <-timer.C @@ -1362,40 +1782,23 @@ func (t *http2Client) GoAway() <-chan struct{} { return t.goAway } -func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric { - s := channelz.SocketInternalMetric{ - StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), - StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), - StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), - MessagesSent: atomic.LoadInt64(&t.czData.msgSent), - MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), - KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), - LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), - LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), - LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), - LocalFlowControlWindow: int64(t.fc.getSize()), - SocketOptions: channelz.GetSocketOption(t.conn), - LocalAddr: t.localAddr, - RemoteAddr: t.remoteAddr, - // RemoteName : - } - if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { - s.Security = au.GetSecurityValue() - } - s.RemoteFlowControlWindow = t.getOutFlowWindow() - return &s +func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics { + return &channelz.EphemeralSocketMetrics{ + LocalFlowControlWindow: int64(t.fc.getSize()), + RemoteFlowControlWindow: t.getOutFlowWindow(), + } } func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } func (t *http2Client) IncrMsgSent() { - atomic.AddInt64(&t.czData.msgSent, 1) - atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano()) } func (t *http2Client) IncrMsgRecv() { - atomic.AddInt64(&t.czData.msgRecv, 1) - atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano()) } func (t *http2Client) getOutFlowWindow() int64 { @@ -1412,3 +1815,9 @@ func (t *http2Client) getOutFlowWindow() int64 { return -2 } } + +func (t *http2Client) stateForTesting() transportState { + t.mu.Lock() + defer t.mu.Unlock() + return t.state +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 33686a11a..584b50fe5 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -25,23 +25,27 @@ import ( "fmt" "io" "math" + "math/rand" "net" + "net/http" "strconv" "sync" "sync/atomic" "time" - "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/mem" + "google.golang.org/protobuf/proto" - spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -53,42 +57,36 @@ import ( var ( // ErrIllegalHeaderWrite indicates that setting header is illegal because of // the stream's state. - ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times") // ErrHeaderListSizeLimitViolation indicates that the header list size is larger // than the limit set by peer. - ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") - // statusRawProto is a function to get to the raw status proto wrapped in a - // status.Status without a proto.Clone(). - statusRawProto = internal.StatusRawProto.(func(*status.Status) *spb.Status) + ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer") ) +// serverConnectionCounter counts the number of connections a server has seen +// (equal to the number of http2Servers created). Must be accessed atomically. +var serverConnectionCounter uint64 + // http2Server implements the ServerTransport interface with HTTP2. type http2Server struct { - ctx context.Context - done chan struct{} - conn net.Conn - loopy *loopyWriter - readerDone chan struct{} // sync point to enable testing. - writerDone chan struct{} // sync point to enable testing. - remoteAddr net.Addr - localAddr net.Addr - maxStreamID uint32 // max stream ID ever seen - authInfo credentials.AuthInfo // auth info about the connection - inTapHandle tap.ServerInHandle - framer *framer + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + done chan struct{} + conn net.Conn + loopy *loopyWriter + readerDone chan struct{} // sync point to enable testing. + loopyWriterDone chan struct{} + peer peer.Peer + inTapHandle tap.ServerInHandle + framer *framer // The max number of concurrent streams. maxStreams uint32 // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. controlBuf *controlBuffer fc *trInFlow - stats stats.Handler - // Flag to keep track of reading activity on transport. - // 1 is true and 0 is false. - activity uint32 // Accessed atomically. + stats []stats.Handler // Keepalive and max-age parameters for the server. kp keepalive.ServerParameters - // Keepalive enforcement policy. kep keepalive.EnforcementPolicy // The time instance last ping was received. @@ -105,13 +103,13 @@ type http2Server struct { mu sync.Mutex // guard the following - // drainChan is initialized when drain(...) is called the first time. - // After which the server writes out the first GoAway(with ID 2^31-1) frame. - // Then an independent goroutine will be launched to later send the second GoAway. - // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. - // Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is - // already underway. - drainChan chan struct{} + // drainEvent is initialized when Drain() is called the first time. After + // which the server writes out the first GoAway(with ID 2^31-1) frame. Then + // an independent goroutine will be launched to later send the second + // GoAway. During this time we don't want to write another first GoAway(with + // ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is + // already initialized since draining is already underway. + drainEvent *grpcsync.Event state transportState activeStreams map[uint32]*Stream // idle is the time instant when the connection went idle. @@ -121,35 +119,59 @@ type http2Server struct { idle time.Time // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number - czData *channelzData - bufferPool *bufferPool + channelz *channelz.Socket + bufferPool mem.BufferPool + + connectionID uint64 + + // maxStreamMu guards the maximum stream ID + // This lock may not be taken if mu is already held. + maxStreamMu sync.Mutex + maxStreamID uint32 // max stream ID ever seen + + logger *grpclog.PrefixLogger } -// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is -// returned if something goes wrong. -func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { +// NewServerTransport creates a http2 transport with conn and configuration +// options from config. +// +// It returns a non-nil transport and a nil error on success. On failure, it +// returns a nil transport and a non-nil error. For a special case where the +// underlying conn gets closed before the client preface could be read, it +// returns a nil transport and a nil error. +func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { + var authInfo credentials.AuthInfo + rawConn := conn + if config.Credentials != nil { + var err error + conn, authInfo, err = config.Credentials.ServerHandshake(rawConn) + if err != nil { + // ErrConnDispatched means that the connection was dispatched away + // from gRPC; those connections should be left open. io.EOF means + // the connection was closed before handshaking completed, which can + // happen naturally from probers. Return these errors directly. + if err == credentials.ErrConnDispatched || err == io.EOF { + return nil, err + } + return nil, connectionErrorf(false, err, "ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + } + } writeBufSize := config.WriteBufferSize readBufSize := config.ReadBufferSize maxHeaderListSize := defaultServerMaxHeaderListSize if config.MaxHeaderListSize != nil { maxHeaderListSize = *config.MaxHeaderListSize } - framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) + framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize) // Send initial settings as connection preface to client. isettings := []http2.Setting{{ ID: http2.SettingMaxFrameSize, Val: http2MaxFrameLen, }} - // TODO(zhaoq): Have a better way to signal "no limit" because 0 is - // permitted in the HTTP2 spec. - maxStreams := config.MaxStreams - if maxStreams == 0 { - maxStreams = math.MaxUint32 - } else { + if config.MaxStreams != math.MaxUint32 { isettings = append(isettings, http2.Setting{ ID: http2.SettingMaxConcurrentStreams, - Val: maxStreams, + Val: config.MaxStreams, }) } dynamicWindow := true @@ -174,6 +196,12 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err Val: *config.MaxHeaderListSize, }) } + if config.HeaderTableSize != nil { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingHeaderTableSize, + Val: *config.HeaderTableSize, + }) + } if err := framer.fr.WriteSettings(isettings...); err != nil { return nil, connectionErrorf(false, err, "transport: %v", err) } @@ -201,34 +229,59 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err if kp.Timeout == 0 { kp.Timeout = defaultServerKeepaliveTimeout } + if kp.Time != infinity { + if err = syscall.SetTCPUserTimeout(rawConn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + } kep := config.KeepalivePolicy if kep.MinTime == 0 { kep.MinTime = defaultKeepalivePolicyMinTime } + done := make(chan struct{}) + peer := peer.Peer{ + Addr: conn.RemoteAddr(), + LocalAddr: conn.LocalAddr(), + AuthInfo: authInfo, + } t := &http2Server{ - ctx: context.Background(), done: done, conn: conn, - remoteAddr: conn.RemoteAddr(), - localAddr: conn.LocalAddr(), - authInfo: config.AuthInfo, + peer: peer, framer: framer, readerDone: make(chan struct{}), - writerDone: make(chan struct{}), - maxStreams: maxStreams, + loopyWriterDone: make(chan struct{}), + maxStreams: config.MaxStreams, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, activeStreams: make(map[uint32]*Stream), - stats: config.StatsHandler, + stats: config.StatsHandlers, kp: kp, idle: time.Now(), kep: kep, initialWindowSize: iwz, - czData: new(channelzData), - bufferPool: newBufferPool(), - } + bufferPool: config.BufferPool, + } + var czSecurity credentials.ChannelzSecurityValue + if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok { + czSecurity = au.GetSecurityValue() + } + t.channelz = channelz.RegisterSocket( + &channelz.Socket{ + SocketType: channelz.SocketTypeNormal, + Parent: config.ChannelzParent, + SocketMetrics: channelz.SocketMetrics{}, + EphemeralMetrics: t.socketMetrics, + LocalAddr: t.peer.LocalAddr, + RemoteAddr: t.peer.Addr, + SocketOptions: channelz.GetSocketOption(t.conn), + Security: czSecurity, + }, + ) + t.logger = prefixLoggerForServerTransport(t) + t.controlBuf = newControlBuffer(t.done) if dynamicWindow { t.bdpEst = &bdpEstimator{ @@ -236,28 +289,27 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err updateFlowControl: t.updateFlowControl, } } - if t.stats != nil { - t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - }) - connBegin := &stats.ConnBegin{} - t.stats.HandleConn(t.ctx, connBegin) - } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) - } + + t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) t.framer.writer.Flush() defer func() { if err != nil { - t.Close() + t.Close(err) } }() // Check the validity of client preface. preface := make([]byte, len(clientPreface)) if _, err := io.ReadFull(t.conn, preface); err != nil { + // In deployments where a gRPC server runs behind a cloud load balancer + // which performs regular TCP level health checks, the connection is + // closed immediately by the latter. Returning io.EOF here allows the + // grpc server implementation to recognize this scenario and suppress + // logging to reduce spam. + if err == io.EOF { + return nil, io.EOF + } return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) } if !bytes.Equal(preface, clientPreface) { @@ -271,7 +323,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err if err != nil { return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err) } - atomic.StoreUint32(&t.activity, 1) + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) sf, ok := frame.(*http2.SettingsFrame) if !ok { return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) @@ -279,96 +331,224 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err t.handleSettings(sf) go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) - t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler - if err := t.loopy.run(); err != nil { - errorf("transport: loopyWriter.run returning. Err: %v", err) + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool) + err := t.loopy.run() + close(t.loopyWriterDone) + if !isIOError(err) { + // Close the connection if a non-I/O error occurs (for I/O errors + // the reader will also encounter the error and close). Wait 1 + // second before closing the connection, or when the reader is done + // (i.e. the client already closed the connection or a connection + // error occurred). This avoids the potential problem where there + // is unread data on the receive side of the connection, which, if + // closed, would lead to a TCP RST instead of FIN, and the client + // encountering errors. For more info: + // https://github.com/grpc/grpc-go/issues/5358 + timer := time.NewTimer(time.Second) + defer timer.Stop() + select { + case <-t.readerDone: + case <-timer.C: + } + t.conn.Close() } - t.conn.Close() - close(t.writerDone) }() go t.keepalive() return t, nil } -// operateHeader takes action on the decoded headers. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { +// operateHeaders takes action on the decoded headers. Returns an error if fatal +// error encountered and transport needs to close, otherwise returns nil. +func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error { + // Acquire max stream ID lock for entire duration + t.maxStreamMu.Lock() + defer t.maxStreamMu.Unlock() + streamID := frame.Header().StreamID - state := &decodeState{ - serverSide: true, - } - if err := state.decodeHeader(frame); err != nil { - if se, ok := status.FromError(err); ok { - t.controlBuf.put(&cleanupStream{ - streamID: streamID, - rst: true, - rstCode: statusCodeConvTab[se.Code()], - onWrite: func() {}, - }) - } - return false + + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeFrameSize, + onWrite: func() {}, + }) + return nil } + if streamID%2 != 1 || streamID <= t.maxStreamID { + // illegal gRPC stream id. + return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame) + } + t.maxStreamID = streamID + buf := newRecvBuffer() s := &Stream{ - id: streamID, - st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, - recvCompress: state.data.encoding, - method: state.data.method, - contentSubtype: state.data.contentSubtype, + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + headerWireLength: int(frame.Header().Length), + } + var ( + // if false, content-type was missing or invalid + isGRPC = false + contentType = "" + mdata = make(metadata.MD, len(frame.Fields)) + httpMethod string + // these are set if an error is encountered while parsing the headers + protocolError bool + headerError *status.Status + + timeoutSet bool + timeout time.Duration + ) + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value) + if !validContentType { + contentType = hf.Value + break + } + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + s.contentSubtype = contentSubtype + isGRPC = true + + case "grpc-accept-encoding": + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + if hf.Value == "" { + continue + } + compressors := hf.Value + if s.clientAdvertisedCompressors != "" { + compressors = s.clientAdvertisedCompressors + "," + compressors + } + s.clientAdvertisedCompressors = compressors + case "grpc-encoding": + s.recvCompress = hf.Value + case ":method": + httpMethod = hf.Value + case ":path": + s.method = hf.Value + case "grpc-timeout": + timeoutSet = true + var err error + if timeout, err = decodeTimeout(hf.Value); err != nil { + headerError = status.Newf(codes.Internal, "malformed grpc-timeout: %v", err) + } + // "Transports must consider requests containing the Connection header + // as malformed." - A41 + case "connection": + if t.logger.V(logLevel) { + t.logger.Infof("Received a HEADERS frame with a :connection header which makes the request malformed, as per the HTTP/2 spec") + } + protocolError = true + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err) + t.logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break + } + mdata[hf.Name] = append(mdata[hf.Name], v) + } } - if frame.StreamEnded() { - // s is just created by the caller. No lock needed. - s.state = streamReadDone + + // "If multiple Host headers or multiple :authority headers are present, the + // request must be rejected with an HTTP status code 400 as required by Host + // validation in RFC 7230 §5.4, gRPC status code INTERNAL, or RST_STREAM + // with HTTP/2 error code PROTOCOL_ERROR." - A41. Since this is a HTTP/2 + // error, this takes precedence over a client not speaking gRPC. + if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 { + errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"])) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early: %v", errMsg) + } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusBadRequest, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), + }) + return nil } - if state.data.timeoutSet { - s.ctx, s.cancel = context.WithTimeout(t.ctx, state.data.timeout) - } else { - s.ctx, s.cancel = context.WithCancel(t.ctx) + + if protocolError { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeProtocol, + onWrite: func() {}, + }) + return nil } - pr := &peer.Peer{ - Addr: t.remoteAddr, + if !isGRPC { + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusUnsupportedMediaType, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.Newf(codes.InvalidArgument, "invalid gRPC request content-type %q", contentType), + rst: !frame.StreamEnded(), + }) + return nil } - // Attach Auth info if there is any. - if t.authInfo != nil { - pr.AuthInfo = t.authInfo + if headerError != nil { + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusBadRequest, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: headerError, + rst: !frame.StreamEnded(), + }) + return nil } - s.ctx = peer.NewContext(s.ctx, pr) - // Attach the received metadata to the context. - if len(state.data.mdata) > 0 { - s.ctx = metadata.NewIncomingContext(s.ctx, state.data.mdata) + + // "If :authority is missing, Host must be renamed to :authority." - A41 + if len(mdata[":authority"]) == 0 { + // No-op if host isn't present, no eventual :authority header is a valid + // RPC. + if host, ok := mdata["host"]; ok { + mdata[":authority"] = host + delete(mdata, "host") + } + } else { + // "If :authority is present, Host must be discarded" - A41 + delete(mdata, "host") } - if state.data.statsTags != nil { - s.ctx = stats.SetIncomingTags(s.ctx, state.data.statsTags) + + if frame.StreamEnded() { + // s is just created by the caller. No lock needed. + s.state = streamReadDone } - if state.data.statsTrace != nil { - s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace) + if timeoutSet { + s.ctx, s.cancel = context.WithTimeout(ctx, timeout) + } else { + s.ctx, s.cancel = context.WithCancel(ctx) } - if t.inTapHandle != nil { - var err error - info := &tap.Info{ - FullMethodName: state.data.method, + + // Attach the received metadata to the context. + if len(mdata) > 0 { + s.ctx = metadata.NewIncomingContext(s.ctx, mdata) + if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 { + s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1])) } - s.ctx, err = t.inTapHandle(s.ctx, info) - if err != nil { - warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) - t.controlBuf.put(&cleanupStream{ - streamID: s.id, - rst: true, - rstCode: http2.ErrCodeRefusedStream, - onWrite: func() {}, - }) - s.cancel() - return false + if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 { + s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1])) } } t.mu.Lock() if t.state != reachable { t.mu.Unlock() s.cancel() - return false + return nil } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() @@ -379,48 +559,64 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( onWrite: func() {}, }) s.cancel() - return false + return nil } - if streamID%2 != 1 || streamID <= t.maxStreamID { + if httpMethod != http.MethodPost { t.mu.Unlock() - // illegal gRPC stream id. - errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + errMsg := fmt.Sprintf("Received a HEADERS frame with :method %q which should be POST", httpMethod) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early: %v", errMsg) + } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 405, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), + }) s.cancel() - return true + return nil + } + if t.inTapHandle != nil { + var err error + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method, Header: mdata}); err != nil { + t.mu.Unlock() + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) + } + stat, ok := status.FromError(err) + if !ok { + stat = status.New(codes.PermissionDenied, err.Error()) + } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 200, + streamID: s.id, + contentSubtype: s.contentSubtype, + status: stat, + rst: !frame.StreamEnded(), + }) + return nil + } } - t.maxStreamID = streamID t.activeStreams[streamID] = s if len(t.activeStreams) == 1 { t.idle = time.Time{} } t.mu.Unlock() if channelz.IsOn() { - atomic.AddInt64(&t.czData.streamsStarted, 1) - atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + t.channelz.SocketMetrics.StreamsStarted.Add(1) + t.channelz.SocketMetrics.LastRemoteStreamCreatedTimestamp.Store(time.Now().UnixNano()) } s.requestRead = func(n int) { t.adjustWindow(s, uint32(n)) } - s.ctx = traceCtx(s.ctx, s.method) - if t.stats != nil { - s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) - inHeader := &stats.InHeader{ - FullMethod: s.method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: s.recvCompress, - WireLength: int(frame.Header().Length), - } - t.stats.HandleRPC(s.ctx, inHeader) - } s.ctxDone = s.ctx.Done() s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) s.trReader = &transportReader{ reader: &recvBufferReader{ - ctx: s.ctx, - ctxDone: s.ctxDone, - recv: s.buf, - freeBuffer: t.bufferPool.put, + ctx: s.ctx, + ctxDone: s.ctxDone, + recv: s.buf, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) @@ -432,21 +628,26 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( wq: s.wq, }) handle(s) - return false + return nil } // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { - defer close(t.readerDone) +func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { + defer func() { + close(t.readerDone) + <-t.loopyWriterDone + }() for { t.controlBuf.throttle() frame, err := t.framer.fr.ReadFrame() - atomic.StoreUint32(&t.activity, 1) + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) if err != nil { if se, ok := err.(http2.StreamError); ok { - warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) + if t.logger.V(logLevel) { + t.logger.Warningf("Encountered http2.StreamError: %v", se) + } t.mu.Lock() s := t.activeStreams[se.StreamID] t.mu.Unlock() @@ -462,19 +663,20 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. } continue } - if err == io.EOF || err == io.ErrUnexpectedEOF { - t.Close() - return - } - warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) - t.Close() + t.Close(err) return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if t.operateHeaders(frame, handle, traceCtx) { - t.Close() - break + if err := t.operateHeaders(ctx, frame, handle); err != nil { + // Any error processing client headers, e.g. invalid stream ID, + // is considered a protocol violation. + t.controlBuf.put(&goAway{ + code: http2.ErrCodeProtocol, + debugData: []byte(err.Error()), + closeConn: err, + }) + continue } case *http2.DataFrame: t.handleData(frame) @@ -489,7 +691,9 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. case *http2.GoAwayFrame: // TODO: Handle GoAway from the client appropriately. default: - errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + if t.logger.V(logLevel) { + t.logger.Infof("Received unsupported frame type %T", frame) + } } } } @@ -591,6 +795,10 @@ func (t *http2Server) handleData(f *http2.DataFrame) { if !ok { return } + if s.getState() == streamReadDone { + t.closeStream(s, true, http2.ErrCodeStreamClosed, false) + return + } if size > 0 { if err := s.fc.onData(size); err != nil { t.closeStream(s, true, http2.ErrCodeFlowControl, false) @@ -605,13 +813,16 @@ func (t *http2Server) handleData(f *http2.DataFrame) { // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? if len(f.Data()) > 0 { - buffer := t.bufferPool.get() - buffer.Reset() - buffer.Write(f.Data()) - s.write(recvMsg{buffer: buffer}) + pool := t.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) } } - if f.Header().Flags.Has(http2.FlagDataEndStream) { + if f.StreamEnded() { // Received the end of stream from the client. s.compareAndSwapState(streamActive, streamReadDone) s.write(recvMsg{err: io.EOF}) @@ -651,7 +862,7 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) { } return nil }) - t.controlBuf.executeAndPut(func(interface{}) bool { + t.controlBuf.executeAndPut(func() bool { for _, f := range updateFuncs { f() } @@ -668,8 +879,8 @@ const ( func (t *http2Server) handlePing(f *http2.PingFrame) { if f.IsAck() { - if f.Data == goAwayPing.data && t.drainChan != nil { - close(t.drainChan) + if f.Data == goAwayPing.data && t.drainEvent != nil { + t.drainEvent.Fire() return } // Maybe it's a BDP ping. @@ -711,8 +922,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { if t.pingStrikes > maxPingStrikes { // Send goaway and close the connection. - errorf("transport: Got too many pings from the client, closing the connection.") - t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")}) } } @@ -736,7 +946,7 @@ func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) return headerFields } -func (t *http2Server) checkForHeaderListSize(it interface{}) bool { +func (t *http2Server) checkForHeaderListSize(it any) bool { if t.maxSendHeaderListSize == nil { return true } @@ -744,19 +954,36 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { var sz int64 for _, f := range hdrFrame.hf { if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { - errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + if t.logger.V(logLevel) { + t.logger.Infof("Header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + } return false } } return true } -// WriteHeader sends the header metedata md back to the client. +func (t *http2Server) streamContextErr(s *Stream) error { + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) +} + +// WriteHeader sends the header metadata md back to the client. func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { - if s.updateHeaderSent() || s.getState() == streamDone { + s.hdrMu.Lock() + defer s.hdrMu.Unlock() + if s.getState() == streamDone { + return t.streamContextErr(s) + } + + if s.updateHeaderSent() { return ErrIllegalHeaderWrite } - s.hdrMu.Lock() + if md.Len() > 0 { if s.header.Len() > 0 { s.header = metadata.Join(s.header, md) @@ -765,10 +992,13 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } } if err := t.writeHeaderLocked(s); err != nil { - s.hdrMu.Unlock() - return err + switch e := err.(type) { + case ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + return status.Convert(err).Err() + } } - s.hdrMu.Unlock() return nil } @@ -781,17 +1011,18 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) - headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)}) if s.sendCompress != "" { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) } headerFields = appendHeaderFieldsFromMD(headerFields, s.header) - success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{ + hf := &headerFrame{ streamID: s.id, hf: headerFields, endStream: false, onWrite: t.setResetPingStrikes, - }) + } + success, err := t.controlBuf.executeAndPut(func() bool { return t.checkForHeaderListSize(hf) }, hf) if !success { if err != nil { return err @@ -799,11 +1030,14 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { t.closeStream(s, true, http2.ErrCodeInternal, false) return ErrHeaderListSizeLimitViolation } - if t.stats != nil { - // Note: WireLength is not set in outHeader. - // TODO(mmukhi): Revisit this later, if needed. - outHeader := &stats.OutHeader{} - t.stats.HandleRPC(s.Context(), outHeader) + for _, sh := range t.stats { + // Note: Headers are compressed with hpack after this call returns. + // No WireLength field is set here. + outHeader := &stats.OutHeader{ + Header: s.header.Copy(), + Compression: s.sendCompress, + } + sh.HandleRPC(s.Context(), outHeader) } return nil } @@ -813,34 +1047,39 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + s.hdrMu.Lock() + defer s.hdrMu.Unlock() + if s.getState() == streamDone { return nil } - s.hdrMu.Lock() + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. if !s.updateHeaderSent() { // No headers have been sent. if len(s.header) > 0 { // Send a separate header frame. if err := t.writeHeaderLocked(s); err != nil { - s.hdrMu.Unlock() return err } } else { // Send a trailer only response. headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) - headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)}) } } headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) - if p := statusRawProto(st); p != nil && len(p.Details) > 0 { + if p := st.Proto(); p != nil && len(p.Details) > 0 { + // Do not use the user's grpc-status-details-bin (if present) if we are + // even attempting to set our own. + delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. - grpclog.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) + t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) } else { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + headerFields = append(headerFields, hpack.HeaderField{Name: grpcStatusDetailsBinHeader, Value: encodeBinHeader(stBytes)}) } } @@ -852,8 +1091,10 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { endStream: true, onWrite: t.setResetPingStrikes, } - s.hdrMu.Unlock() - success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) + + success, err := t.controlBuf.executeAndPut(func() bool { + return t.checkForHeaderListSize(trailingHeader) + }, nil) if !success { if err != nil { return err @@ -864,58 +1105,49 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Send a RST_STREAM after the trailers if the client has not already half-closed. rst := s.getState() == streamActive t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) - if t.stats != nil { - t.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) + for _, sh := range t.stats { + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + sh.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) } return nil } // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { + reader := data.Reader() + if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { - if _, ok := err.(ConnectionError); ok { - return err - } - // TODO(mmukhi, dfawley): Make sure this is the right code to return. - return status.Errorf(codes.Internal, "transport: %v", err) + _ = reader.Close() + return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { - // TODO(mmukhi, dfawley): Should the server write also return io.EOF? - s.cancel() - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + _ = reader.Close() + return t.streamContextErr(s) } } - // Add some data to header frame so that we can equally distribute bytes across frames. - emptyLen := http2MaxFrameLen - len(hdr) - if emptyLen > len(data) { - emptyLen = len(data) - } - hdr = append(hdr, data[:emptyLen]...) - data = data[emptyLen:] + df := &dataFrame{ streamID: s.id, h: hdr, - d: data, + reader: reader, onEachWrite: t.setResetPingStrikes, } - if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { + _ = reader.Close() + return t.streamContextErr(s) } - return t.controlBuf.put(df) + if err := t.controlBuf.put(df); err != nil { + _ = reader.Close() + return err + } + return nil } // keepalive running in a separate goroutine does the following: @@ -926,32 +1158,35 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e // after an additional duration of keepalive.Timeout. func (t *http2Server) keepalive() { p := &ping{} - var pingSent bool - maxIdle := time.NewTimer(t.kp.MaxConnectionIdle) - maxAge := time.NewTimer(t.kp.MaxConnectionAge) - keepalive := time.NewTimer(t.kp.Time) - // NOTE: All exit paths of this function should reset their - // respective timers. A failure to do so will cause the - // following clean-up to deadlock and eventually leak. + // True iff a ping has been sent, and no data has been received since then. + outstandingPing := false + // Amount of time remaining before which we should receive an ACK for the + // last sent ping. + kpTimeoutLeft := time.Duration(0) + // Records the last value of t.lastRead before we go block on the timer. + // This is required to check for read activity since then. + prevNano := time.Now().UnixNano() + // Initialize the different timers to their default values. + idleTimer := time.NewTimer(t.kp.MaxConnectionIdle) + ageTimer := time.NewTimer(t.kp.MaxConnectionAge) + kpTimer := time.NewTimer(t.kp.Time) defer func() { - if !maxIdle.Stop() { - <-maxIdle.C - } - if !maxAge.Stop() { - <-maxAge.C - } - if !keepalive.Stop() { - <-keepalive.C - } + // We need to drain the underlying channel in these timers after a call + // to Stop(), only if we are interested in resetting them. Clearly we + // are not interested in resetting them here. + idleTimer.Stop() + ageTimer.Stop() + kpTimer.Stop() }() + for { select { - case <-maxIdle.C: + case <-idleTimer.C: t.mu.Lock() idle := t.idle if idle.IsZero() { // The connection is non-idle. t.mu.Unlock() - maxIdle.Reset(t.kp.MaxConnectionIdle) + idleTimer.Reset(t.kp.MaxConnectionIdle) continue } val := t.kp.MaxConnectionIdle - time.Since(idle) @@ -959,44 +1194,53 @@ func (t *http2Server) keepalive() { if val <= 0 { // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. // Gracefully close the connection. - t.drain(http2.ErrCodeNo, []byte{}) - // Resetting the timer so that the clean-up doesn't deadlock. - maxIdle.Reset(infinity) + t.Drain("max_idle") return } - maxIdle.Reset(val) - case <-maxAge.C: - t.drain(http2.ErrCodeNo, []byte{}) - maxAge.Reset(t.kp.MaxConnectionAgeGrace) + idleTimer.Reset(val) + case <-ageTimer.C: + t.Drain("max_age") + ageTimer.Reset(t.kp.MaxConnectionAgeGrace) select { - case <-maxAge.C: + case <-ageTimer.C: // Close the connection after grace period. - infof("transport: closing server transport due to maximum connection age.") - t.Close() - // Resetting the timer so that the clean-up doesn't deadlock. - maxAge.Reset(infinity) + if t.logger.V(logLevel) { + t.logger.Infof("Closing server transport due to maximum connection age") + } + t.controlBuf.put(closeConnection{}) case <-t.done: } return - case <-keepalive.C: - if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { - pingSent = false - keepalive.Reset(t.kp.Time) + case <-kpTimer.C: + lastRead := atomic.LoadInt64(&t.lastRead) + if lastRead > prevNano { + // There has been read activity since the last time we were + // here. Setup the timer to fire at kp.Time seconds from + // lastRead time and continue. + outstandingPing = false + kpTimer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) + prevNano = lastRead continue } - if pingSent { - infof("transport: closing server transport due to idleness.") - t.Close() - // Resetting the timer so that the clean-up doesn't deadlock. - keepalive.Reset(infinity) + if outstandingPing && kpTimeoutLeft <= 0 { + t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Timeout)) return } - pingSent = true - if channelz.IsOn() { - atomic.AddInt64(&t.czData.kpCount, 1) + if !outstandingPing { + if channelz.IsOn() { + t.channelz.SocketMetrics.KeepAlivesSent.Add(1) + } + t.controlBuf.put(p) + kpTimeoutLeft = t.kp.Timeout + outstandingPing = true } - t.controlBuf.put(p) - keepalive.Reset(t.kp.Timeout) + // The amount of time to sleep here is the minimum of kp.Time and + // timeoutLeft. This will ensure that we wait only for kp.Time + // before sending out the next ping (for cases where the ping is + // acked). + sleepDuration := min(t.kp.Time, kpTimeoutLeft) + kpTimeoutLeft -= sleepDuration + kpTimer.Reset(sleepDuration) case <-t.done: return } @@ -1006,11 +1250,14 @@ func (t *http2Server) keepalive() { // Close starts shutting down the http2Server transport. // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This // could cause some resource issue. Revisit this later. -func (t *http2Server) Close() error { +func (t *http2Server) Close(err error) { t.mu.Lock() if t.state == closing { t.mu.Unlock() - return errors.New("transport: Close() was already called") + return + } + if t.logger.V(logLevel) { + t.logger.Infof("Closing: %v", err) } t.state = closing streams := t.activeStreams @@ -1018,27 +1265,18 @@ func (t *http2Server) Close() error { t.mu.Unlock() t.controlBuf.finish() close(t.done) - err := t.conn.Close() - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) + if err := t.conn.Close(); err != nil && t.logger.V(logLevel) { + t.logger.Infof("Error closing underlying net.Conn during Close: %v", err) } + channelz.RemoveEntry(t.channelz.ID) // Cancel all active streams. for _, s := range streams { s.cancel() } - if t.stats != nil { - connEnd := &stats.ConnEnd{} - t.stats.HandleConn(t.ctx, connEnd) - } - return err } // deleteStream deletes the stream s from transport's active streams. func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { - // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), cancel needs to be - // called to interrupt the potential blocking on other goroutines. - s.cancel() t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { @@ -1051,15 +1289,20 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { if channelz.IsOn() { if eosReceived { - atomic.AddInt64(&t.czData.streamsSucceeded, 1) + t.channelz.SocketMetrics.StreamsSucceeded.Add(1) } else { - atomic.AddInt64(&t.czData.streamsFailed, 1) + t.channelz.SocketMetrics.StreamsFailed.Add(1) } } } // finishStream closes the stream and puts the trailing headerFrame into controlbuf. func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + oldState := s.swapState(streamDone) if oldState == streamDone { // If the stream was already done, return. @@ -1079,6 +1322,11 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h // closeStream clears the footprint of a stream when the stream is not needed any more. func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + s.swapState(streamDone) t.deleteStream(s, eosReceived) @@ -1090,22 +1338,14 @@ func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eo }) } -func (t *http2Server) RemoteAddr() net.Addr { - return t.remoteAddr -} - -func (t *http2Server) Drain() { - t.drain(http2.ErrCodeNo, []byte{}) -} - -func (t *http2Server) drain(code http2.ErrCode, debugData []byte) { +func (t *http2Server) Drain(debugData string) { t.mu.Lock() defer t.mu.Unlock() - if t.drainChan != nil { + if t.drainEvent != nil { return } - t.drainChan = make(chan struct{}) - t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true}) + t.drainEvent = grpcsync.NewEvent() + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte(debugData), headsUp: true}) } var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} @@ -1113,49 +1353,52 @@ var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} // Handles outgoing GoAway and returns true if loopy needs to put itself // in draining mode. func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.maxStreamMu.Lock() t.mu.Lock() if t.state == closing { // TODO(mmukhi): This seems unnecessary. t.mu.Unlock() + t.maxStreamMu.Unlock() // The transport is closing. return false, ErrConnClosing } - sid := t.maxStreamID if !g.headsUp { // Stop accepting more streams now. t.state = draining + sid := t.maxStreamID + retErr := g.closeConn if len(t.activeStreams) == 0 { - g.closeConn = true + retErr = errors.New("second GOAWAY written and no active streams left to process") } t.mu.Unlock() + t.maxStreamMu.Unlock() if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { return false, err } - if g.closeConn { - // Abruptly close the connection following the GoAway (via - // loopywriter). But flush out what's inside the buffer first. - t.framer.writer.Flush() - return false, fmt.Errorf("transport: Connection closing") + t.framer.writer.Flush() + if retErr != nil { + return false, retErr } return true, nil } t.mu.Unlock() + t.maxStreamMu.Unlock() // For a graceful close, send out a GoAway with stream ID of MaxUInt32, // Follow that with a ping and wait for the ack to come back or a timer // to expire. During this time accept new streams since they might have // originated before the GoAway reaches the client. // After getting the ack or timer expiration send out another GoAway this // time with an ID of the max stream server intends to process. - if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, g.debugData); err != nil { return false, err } if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { return false, err } go func() { - timer := time.NewTimer(time.Minute) + timer := time.NewTimer(5 * time.Second) defer timer.Stop() select { - case <-t.drainChan: + case <-t.drainEvent.Done(): case <-timer.C: case <-t.done: return @@ -1165,38 +1408,21 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { return false, nil } -func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { - s := channelz.SocketInternalMetric{ - StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), - StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), - StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), - MessagesSent: atomic.LoadInt64(&t.czData.msgSent), - MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), - KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), - LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), - LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), - LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), - LocalFlowControlWindow: int64(t.fc.getSize()), - SocketOptions: channelz.GetSocketOption(t.conn), - LocalAddr: t.localAddr, - RemoteAddr: t.remoteAddr, - // RemoteName : - } - if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { - s.Security = au.GetSecurityValue() - } - s.RemoteFlowControlWindow = t.getOutFlowWindow() - return &s +func (t *http2Server) socketMetrics() *channelz.EphemeralSocketMetrics { + return &channelz.EphemeralSocketMetrics{ + LocalFlowControlWindow: int64(t.fc.getSize()), + RemoteFlowControlWindow: t.getOutFlowWindow(), + } } func (t *http2Server) IncrMsgSent() { - atomic.AddInt64(&t.czData.msgSent, 1) - atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1) } func (t *http2Server) IncrMsgRecv() { - atomic.AddInt64(&t.czData.msgRecv, 1) - atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1) } func (t *http2Server) getOutFlowWindow() int64 { @@ -1214,12 +1440,36 @@ func (t *http2Server) getOutFlowWindow() int64 { } } +// Peer returns the peer of the transport. +func (t *http2Server) Peer() *peer.Peer { + return &peer.Peer{ + Addr: t.peer.Addr, + LocalAddr: t.peer.LocalAddr, + AuthInfo: t.peer.AuthInfo, // Can be nil + } +} + func getJitter(v time.Duration) time.Duration { if v == infinity { return 0 } // Generate a jitter between +/- 10% of the value. r := int64(v / 10) - j := grpcrand.Int63n(2*r) - r + j := rand.Int63n(2*r) - r return time.Duration(j) } + +type connectionKey struct{} + +// GetConnection gets the connection from the context. +func GetConnection(ctx context.Context) net.Conn { + conn, _ := ctx.Value(connectionKey{}).(net.Conn) + return conn +} + +// SetConnection adds the connection to the context to be able to get +// information about the destination ip and port for an incoming RPC. This also +// allows any unary or streaming interceptors to see the connection. +func SetConnection(ctx context.Context, conn net.Conn) context.Context { + return context.WithValue(ctx, connectionKey{}, conn) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 8f5f3349d..3613d7b64 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -20,37 +20,30 @@ package transport import ( "bufio" - "bytes" "encoding/base64" + "errors" "fmt" "io" "math" "net" "net/http" + "net/url" "strconv" "strings" + "sync" "time" "unicode/utf8" - "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) const ( // http2MaxFrameLen specifies the max length of a HTTP2 frame. http2MaxFrameLen = 16384 // 16KB frame - // http://http2.github.io/http2-spec/#SettingValues + // https://httpwg.org/specs/rfc7540.html#SettingValues http2InitHeaderTableSize = 4096 - // baseContentType is the base content-type for gRPC. This is a valid - // content-type on it's own, but can also include a content-subtype such as - // "proto" as a suffix after "+" or ";". See - // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests - // for more details. - baseContentType = "application/grpc" ) var ( @@ -71,13 +64,6 @@ var ( http2.ErrCodeInadequateSecurity: codes.PermissionDenied, http2.ErrCodeHTTP11Required: codes.Internal, } - statusCodeConvTab = map[codes.Code]http2.ErrCode{ - codes.Internal: http2.ErrCodeInternal, - codes.Canceled: http2.ErrCodeCancel, - codes.Unavailable: http2.ErrCodeRefusedStream, - codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, - codes.PermissionDenied: http2.ErrCodeInadequateSecurity, - } // HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table. HTTPStatusConvTab = map[int]codes.Code{ // 400 Bad Request - INTERNAL. @@ -99,51 +85,7 @@ var ( } ) -type parsedHeaderData struct { - encoding string - // statusGen caches the stream status received from the trailer the server - // sent. Client side only. Do not access directly. After all trailers are - // parsed, use the status method to retrieve the status. - statusGen *status.Status - // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not - // intended for direct access outside of parsing. - rawStatusCode *int - rawStatusMsg string - httpStatus *int - // Server side only fields. - timeoutSet bool - timeout time.Duration - method string - // key-value metadata map from the peer. - mdata map[string][]string - statsTags []byte - statsTrace []byte - contentSubtype string - - // isGRPC field indicates whether the peer is speaking gRPC (otherwise HTTP). - // - // We are in gRPC mode (peer speaking gRPC) if: - // * We are client side and have already received a HEADER frame that indicates gRPC peer. - // * The header contains valid a content-type, i.e. a string starts with "application/grpc" - // And we should handle error specific to gRPC. - // - // Otherwise (i.e. a content-type string starts without "application/grpc", or does not exist), we - // are in HTTP fallback mode, and should handle error specific to HTTP. - isGRPC bool - grpcErr error - httpErr error - contentTypeErr string -} - -// decodeState configures decoding criteria and records the decoded data. -type decodeState struct { - // whether decoding on server side or not - serverSide bool - - // Records the states during HPACK decoding. It will be filled with info parsed from HTTP HEADERS - // frame once decodeHeader function has been invoked and returned. - data parsedHeaderData -} +var grpcStatusDetailsBinHeader = "grpc-status-details-bin" // isReservedHeader checks whether hdr belongs to HTTP2 headers // reserved by gRPC protocol. Any other headers are classified as the @@ -160,7 +102,6 @@ func isReservedHeader(hdr string) bool { "grpc-message", "grpc-status", "grpc-timeout", - "grpc-status-details-bin", // Intentionally exclude grpc-previous-rpc-attempts and // grpc-retry-pushback-ms, which are "reserved", but their API // intentionally works via metadata. @@ -182,54 +123,6 @@ func isWhitelistedHeader(hdr string) bool { } } -// contentSubtype returns the content-subtype for the given content-type. The -// given content-type must be a valid content-type that starts with -// "application/grpc". A content-subtype will follow "application/grpc" after a -// "+" or ";". See -// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for -// more details. -// -// If contentType is not a valid content-type for gRPC, the boolean -// will be false, otherwise true. If content-type == "application/grpc", -// "application/grpc+", or "application/grpc;", the boolean will be true, -// but no content-subtype will be returned. -// -// contentType is assumed to be lowercase already. -func contentSubtype(contentType string) (string, bool) { - if contentType == baseContentType { - return "", true - } - if !strings.HasPrefix(contentType, baseContentType) { - return "", false - } - // guaranteed since != baseContentType and has baseContentType prefix - switch contentType[len(baseContentType)] { - case '+', ';': - // this will return true for "application/grpc+" or "application/grpc;" - // which the previous validContentType function tested to be valid, so we - // just say that no content-subtype is specified in this case - return contentType[len(baseContentType)+1:], true - default: - return "", false - } -} - -// contentSubtype is assumed to be lowercase -func contentType(contentSubtype string) string { - if contentSubtype == "" { - return baseContentType - } - return baseContentType + "+" + contentSubtype -} - -func (d *decodeState) status() *status.Status { - if d.data.statusGen == nil { - // No status-details were provided; generate status using code/msg. - d.data.statusGen = status.New(codes.Code(int32(*(d.data.rawStatusCode))), d.data.rawStatusMsg) - } - return d.data.statusGen -} - const binHdrSuffix = "-bin" func encodeBinHeader(v []byte) string { @@ -259,166 +152,6 @@ func decodeMetadataHeader(k, v string) (string, error) { return v, nil } -func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error { - // frame.Truncated is set to true when framer detects that the current header - // list size hits MaxHeaderListSize limit. - if frame.Truncated { - return status.Error(codes.Internal, "peer header list size exceeded limit") - } - - for _, hf := range frame.Fields { - d.processHeaderField(hf) - } - - if d.data.isGRPC { - if d.data.grpcErr != nil { - return d.data.grpcErr - } - if d.serverSide { - return nil - } - if d.data.rawStatusCode == nil && d.data.statusGen == nil { - // gRPC status doesn't exist. - // Set rawStatusCode to be unknown and return nil error. - // So that, if the stream has ended this Unknown status - // will be propagated to the user. - // Otherwise, it will be ignored. In which case, status from - // a later trailer, that has StreamEnded flag set, is propagated. - code := int(codes.Unknown) - d.data.rawStatusCode = &code - } - return nil - } - - // HTTP fallback mode - if d.data.httpErr != nil { - return d.data.httpErr - } - - var ( - code = codes.Internal // when header does not include HTTP status, return INTERNAL - ok bool - ) - - if d.data.httpStatus != nil { - code, ok = HTTPStatusConvTab[*(d.data.httpStatus)] - if !ok { - code = codes.Unknown - } - } - - return status.Error(code, d.constructHTTPErrMsg()) -} - -// constructErrMsg constructs error message to be returned in HTTP fallback mode. -// Format: HTTP status code and its corresponding message + content-type error message. -func (d *decodeState) constructHTTPErrMsg() string { - var errMsgs []string - - if d.data.httpStatus == nil { - errMsgs = append(errMsgs, "malformed header: missing HTTP status") - } else { - errMsgs = append(errMsgs, fmt.Sprintf("%s: HTTP status code %d", http.StatusText(*(d.data.httpStatus)), *d.data.httpStatus)) - } - - if d.data.contentTypeErr == "" { - errMsgs = append(errMsgs, "transport: missing content-type field") - } else { - errMsgs = append(errMsgs, d.data.contentTypeErr) - } - - return strings.Join(errMsgs, "; ") -} - -func (d *decodeState) addMetadata(k, v string) { - if d.data.mdata == nil { - d.data.mdata = make(map[string][]string) - } - d.data.mdata[k] = append(d.data.mdata[k], v) -} - -func (d *decodeState) processHeaderField(f hpack.HeaderField) { - switch f.Name { - case "content-type": - contentSubtype, validContentType := contentSubtype(f.Value) - if !validContentType { - d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value) - return - } - d.data.contentSubtype = contentSubtype - // TODO: do we want to propagate the whole content-type in the metadata, - // or come up with a way to just propagate the content-subtype if it was set? - // ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"} - // in the metadata? - d.addMetadata(f.Name, f.Value) - d.data.isGRPC = true - case "grpc-encoding": - d.data.encoding = f.Value - case "grpc-status": - code, err := strconv.Atoi(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err) - return - } - d.data.rawStatusCode = &code - case "grpc-message": - d.data.rawStatusMsg = decodeGrpcMessage(f.Value) - case "grpc-status-details-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) - return - } - s := &spb.Status{} - if err := proto.Unmarshal(v, s); err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) - return - } - d.data.statusGen = status.FromProto(s) - case "grpc-timeout": - d.data.timeoutSet = true - var err error - if d.data.timeout, err = decodeTimeout(f.Value); err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed time-out: %v", err) - } - case ":path": - d.data.method = f.Value - case ":status": - code, err := strconv.Atoi(f.Value) - if err != nil { - d.data.httpErr = status.Errorf(codes.Internal, "transport: malformed http-status: %v", err) - return - } - d.data.httpStatus = &code - case "grpc-tags-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) - return - } - d.data.statsTags = v - d.addMetadata(f.Name, string(v)) - case "grpc-trace-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) - return - } - d.data.statsTrace = v - d.addMetadata(f.Name, string(v)) - default: - if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) { - break - } - v, err := decodeMetadataHeader(f.Name, f.Value) - if err != nil { - errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) - return - } - d.addMetadata(f.Name, v) - } -} - type timeoutUnit uint8 const ( @@ -449,41 +182,6 @@ func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { return } -const maxTimeoutValue int64 = 100000000 - 1 - -// div does integer division and round-up the result. Note that this is -// equivalent to (d+r-1)/r but has less chance to overflow. -func div(d, r time.Duration) int64 { - if m := d % r; m > 0 { - return int64(d/r + 1) - } - return int64(d / r) -} - -// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it. -func encodeTimeout(t time.Duration) string { - if t <= 0 { - return "0n" - } - if d := div(t, time.Nanosecond); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "n" - } - if d := div(t, time.Microsecond); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "u" - } - if d := div(t, time.Millisecond); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "m" - } - if d := div(t, time.Second); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "S" - } - if d := div(t, time.Minute); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "M" - } - // Note that maxTimeoutValue * time.Hour > MaxInt64. - return strconv.FormatInt(div(t, time.Hour), 10) + "H" -} - func decodeTimeout(s string) (time.Duration, error) { size := len(s) if size < 2 { @@ -538,13 +236,13 @@ func encodeGrpcMessage(msg string) string { } func encodeGrpcMessageUnchecked(msg string) string { - var buf bytes.Buffer + var sb strings.Builder for len(msg) > 0 { r, size := utf8.DecodeRuneInString(msg) for _, b := range []byte(string(r)) { if size > 1 { // If size > 1, r is not ascii. Always do percent encoding. - buf.WriteString(fmt.Sprintf("%%%02X", b)) + fmt.Fprintf(&sb, "%%%02X", b) continue } @@ -553,14 +251,14 @@ func encodeGrpcMessageUnchecked(msg string) string { // // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". if b >= spaceByte && b <= tildeByte && b != percentByte { - buf.WriteByte(b) + sb.WriteByte(b) } else { - buf.WriteString(fmt.Sprintf("%%%02X", b)) + fmt.Fprintf(&sb, "%%%02X", b) } } msg = msg[size:] } - return buf.String() + return sb.String() } // decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. @@ -578,83 +276,127 @@ func decodeGrpcMessage(msg string) string { } func decodeGrpcMessageUnchecked(msg string) string { - var buf bytes.Buffer + var sb strings.Builder lenMsg := len(msg) for i := 0; i < lenMsg; i++ { c := msg[i] if c == percentByte && i+2 < lenMsg { parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) if err != nil { - buf.WriteByte(c) + sb.WriteByte(c) } else { - buf.WriteByte(byte(parsed)) + sb.WriteByte(byte(parsed)) i += 2 } } else { - buf.WriteByte(c) + sb.WriteByte(c) } } - return buf.String() + return sb.String() } type bufWriter struct { + pool *sync.Pool buf []byte offset int batchSize int conn net.Conn err error - - onFlush func() } -func newBufWriter(conn net.Conn, batchSize int) *bufWriter { - return &bufWriter{ - buf: make([]byte, batchSize*2), +func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { + w := &bufWriter{ batchSize: batchSize, conn: conn, + pool: pool, } + // this indicates that we should use non shared buf + if pool == nil { + w.buf = make([]byte, batchSize) + } + return w } -func (w *bufWriter) Write(b []byte) (n int, err error) { +func (w *bufWriter) Write(b []byte) (int, error) { if w.err != nil { return 0, w.err } if w.batchSize == 0 { // Buffer has been disabled. - return w.conn.Write(b) + n, err := w.conn.Write(b) + return n, toIOError(err) + } + if w.buf == nil { + b := w.pool.Get().(*[]byte) + w.buf = *b } + written := 0 for len(b) > 0 { - nn := copy(w.buf[w.offset:], b) - b = b[nn:] - w.offset += nn - n += nn - if w.offset >= w.batchSize { - err = w.Flush() + copied := copy(w.buf[w.offset:], b) + b = b[copied:] + written += copied + w.offset += copied + if w.offset < w.batchSize { + continue + } + if err := w.flushKeepBuffer(); err != nil { + return written, err } } - return n, err + return written, nil } func (w *bufWriter) Flush() error { + err := w.flushKeepBuffer() + // Only release the buffer if we are in a "shared" mode + if w.buf != nil && w.pool != nil { + b := w.buf + w.pool.Put(&b) + w.buf = nil + } + return err +} + +func (w *bufWriter) flushKeepBuffer() error { if w.err != nil { return w.err } if w.offset == 0 { return nil } - if w.onFlush != nil { - w.onFlush() - } _, w.err = w.conn.Write(w.buf[:w.offset]) + w.err = toIOError(w.err) w.offset = 0 return w.err } +type ioError struct { + error +} + +func (i ioError) Unwrap() error { + return i.error +} + +func isIOError(err error) bool { + return errors.As(err, &ioError{}) +} + +func toIOError(err error) error { + if err == nil { + return nil + } + return ioError{error: err} +} + type framer struct { writer *bufWriter fr *http2.Framer } -func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer { +var writeBufferPoolMap = make(map[int]*sync.Pool) +var writeBufferMutex sync.Mutex + +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { if writeBufferSize < 0 { writeBufferSize = 0 } @@ -662,7 +404,11 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList if readBufferSize > 0 { r = bufio.NewReaderSize(r, readBufferSize) } - w := newBufWriter(conn, writeBufferSize) + var pool *sync.Pool + if sharedWriteBuffer { + pool = getWriteBufferPool(writeBufferSize) + } + w := newBufWriter(conn, writeBufferSize, pool) f := &framer{ writer: w, fr: http2.NewFramer(w, r), @@ -675,3 +421,48 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) return f } + +func getWriteBufferPool(size int) *sync.Pool { + writeBufferMutex.Lock() + defer writeBufferMutex.Unlock() + pool, ok := writeBufferPoolMap[size] + if ok { + return pool + } + pool = &sync.Pool{ + New: func() any { + b := make([]byte, size) + return &b + }, + } + writeBufferPoolMap[size] = pool + return pool +} + +// parseDialTarget returns the network and address to pass to dialer. +func parseDialTarget(target string) (string, string) { + net := "tcp" + m1 := strings.Index(target, ":") + m2 := strings.Index(target, ":/") + // handle unix:addr which will fail with url.Parse + if m1 >= 0 && m2 < 0 { + if n := target[0:m1]; n == "unix" { + return n, target[m1+1:] + } + } + if m2 >= 0 { + t, err := url.Parse(target) + if err != nil { + return net, target + } + scheme := t.Scheme + addr := t.Path + if scheme == "unix" { + if addr == "" { + addr = t.Host + } + return scheme, addr + } + } + return net, target +} diff --git a/vendor/google.golang.org/grpc/internal/transport/log.go b/vendor/google.golang.org/grpc/internal/transport/log.go deleted file mode 100644 index 879df80c4..000000000 --- a/vendor/google.golang.org/grpc/internal/transport/log.go +++ /dev/null @@ -1,44 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// This file contains wrappers for grpclog functions. -// The transport package only logs to verbose level 2 by default. - -package transport - -import "google.golang.org/grpc/grpclog" - -const logLevel = 2 - -func infof(format string, args ...interface{}) { - if grpclog.V(logLevel) { - grpclog.Infof(format, args...) - } -} - -func warningf(format string, args ...interface{}) { - if grpclog.V(logLevel) { - grpclog.Warningf(format, args...) - } -} - -func errorf(format string, args ...interface{}) { - if grpclog.V(logLevel) { - grpclog.Errorf(format, args...) - } -} diff --git a/vendor/google.golang.org/grpc/internal/transport/logging.go b/vendor/google.golang.org/grpc/internal/transport/logging.go new file mode 100644 index 000000000..42ed2b07a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/logging.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +var logger = grpclog.Component("transport") + +func prefixLoggerForServerTransport(p *http2Server) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-transport %p] ", p)) +} + +func prefixLoggerForServerHandlerTransport(p *serverHandlerTransport) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-handler-transport %p] ", p)) +} + +func prefixLoggerForClientTransport(p *http2Client) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[client-transport %p] ", p)) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go new file mode 100644 index 000000000..c11b52782 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go @@ -0,0 +1,46 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package networktype declares the network type to be used in the default +// dialer. Attribute of a resolver.Address. +package networktype + +import ( + "google.golang.org/grpc/resolver" +) + +// keyType is the key to use for storing State in Attributes. +type keyType string + +const key = keyType("grpc.internal.transport.networktype") + +// Set returns a copy of the provided address with attributes containing networkType. +func Set(address resolver.Address, networkType string) resolver.Address { + address.Attributes = address.Attributes.WithValue(key, networkType) + return address +} + +// Get returns the network type in the resolver.Address and true, or "", false +// if not present. +func Get(address resolver.Address) (string, bool) { + v := address.Attributes.Value(key) + if v == nil { + return "", false + } + return v.(string), true +} diff --git a/vendor/google.golang.org/grpc/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go similarity index 70% rename from vendor/google.golang.org/grpc/proxy.go rename to vendor/google.golang.org/grpc/internal/transport/proxy.go index f8f69bfb7..54b224436 100644 --- a/vendor/google.golang.org/grpc/proxy.go +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -16,31 +16,30 @@ * */ -package grpc +package transport import ( "bufio" "context" "encoding/base64" - "errors" "fmt" "io" "net" "net/http" "net/http/httputil" "net/url" + + "google.golang.org/grpc/internal" ) const proxyAuthHeaderKey = "Proxy-Authorization" var ( - // errDisabled indicates that proxy is disabled for the address. - errDisabled = errors.New("proxy is disabled for the address") // The following variable will be overwritten in the tests. httpProxyFromEnvironment = http.ProxyFromEnvironment ) -func mapAddress(ctx context.Context, address string) (*url.URL, error) { +func mapAddress(address string) (*url.URL, error) { req := &http.Request{ URL: &url.URL{ Scheme: "https", @@ -51,9 +50,6 @@ func mapAddress(ctx context.Context, address string) (*url.URL, error) { if err != nil { return nil, err } - if url == nil { - return nil, errDisabled - } return url, nil } @@ -76,7 +72,7 @@ func basicAuth(username, password string) string { return base64.StdEncoding.EncodeToString([]byte(auth)) } -func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL) (_ net.Conn, err error) { +func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL, grpcUA string) (_ net.Conn, err error) { defer func() { if err != nil { conn.Close() @@ -111,36 +107,38 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri } return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) } - - return &bufConn{Conn: conn, r: r}, nil + // The buffer could contain extra bytes from the target server, so we can't + // discard it. However, in many cases where the server waits for the client + // to send the first message (e.g. when TLS is being used), the buffer will + // be empty, so we can avoid the overhead of reading through this buffer. + if r.Buffered() != 0 { + return &bufConn{Conn: conn, r: r}, nil + } + return conn, nil } -// newProxyDialer returns a dialer that connects to proxy first if necessary. -// The returned dialer checks if a proxy is necessary, dial to the proxy with the -// provided dialer, does HTTP CONNECT handshake and returns the connection. -func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) { - return func(ctx context.Context, addr string) (conn net.Conn, err error) { - var newAddr string - proxyURL, err := mapAddress(ctx, addr) - if err != nil { - if err != errDisabled { - return nil, err - } - newAddr = addr - } else { - newAddr = proxyURL.Host - } +// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy +// is necessary, dials, does the HTTP CONNECT handshake, and returns the +// connection. +func proxyDial(ctx context.Context, addr string, grpcUA string) (net.Conn, error) { + newAddr := addr + proxyURL, err := mapAddress(addr) + if err != nil { + return nil, err + } + if proxyURL != nil { + newAddr = proxyURL.Host + } - conn, err = dialer(ctx, newAddr) - if err != nil { - return - } - if proxyURL != nil { - // proxy is disabled if proxyURL is nil. - conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL) - } - return + conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", newAddr) + if err != nil { + return nil, err + } + if proxyURL == nil { + // proxy is disabled if proxyURL is nil. + return conn, err } + return doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) } func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 1c1d10670..924ba4f36 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -22,50 +22,35 @@ package transport import ( - "bytes" "context" "errors" "fmt" "io" "net" + "strings" "sync" "sync/atomic" + "time" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" ) -type bufferPool struct { - pool sync.Pool -} - -func newBufferPool() *bufferPool { - return &bufferPool{ - pool: sync.Pool{ - New: func() interface{} { - return new(bytes.Buffer) - }, - }, - } -} - -func (p *bufferPool) get() *bytes.Buffer { - return p.pool.Get().(*bytes.Buffer) -} - -func (p *bufferPool) put(b *bytes.Buffer) { - p.pool.Put(b) -} +const logLevel = 2 // recvMsg represents the received msg from the transport. All transport // protocol specific info has been removed. type recvMsg struct { - buffer *bytes.Buffer + buffer mem.Buffer // nil: received some data // io.EOF: stream is completed. data is nil. // other non-nil error: transport failure. data is nil. @@ -73,10 +58,11 @@ type recvMsg struct { } // recvBuffer is an unbounded channel of recvMsg structs. -// Note recvBuffer differs from controlBuffer only in that recvBuffer -// holds a channel of only recvMsg structs instead of objects implementing "item" interface. -// recvBuffer is written to much more often than -// controlBuffer and using strict recvMsg structs helps avoid allocation in "recvBuffer.put" +// +// Note: recvBuffer differs from buffer.Unbounded only in the fact that it +// holds a channel of recvMsg structs instead of objects implementing "item" +// interface. recvBuffer is written to much more often and using strict recvMsg +// structs helps avoid allocation in "recvBuffer.put" type recvBuffer struct { c chan recvMsg mu sync.Mutex @@ -94,6 +80,9 @@ func newRecvBuffer() *recvBuffer { func (b *recvBuffer) put(r recvMsg) { b.mu.Lock() if b.err != nil { + // drop the buffer on the floor. Since b.err is not nil, any subsequent reads + // will always return an error, making this buffer inaccessible. + r.buffer.Free() b.mu.Unlock() // An error had occurred earlier, don't accept more // data or errors. @@ -140,45 +129,70 @@ type recvBufferReader struct { ctx context.Context ctxDone <-chan struct{} // cache of ctx.Done() (for performance). recv *recvBuffer - last *bytes.Buffer // Stores the remaining data in the previous calls. + last mem.Buffer // Stores the remaining data in the previous calls. err error - freeBuffer func(*bytes.Buffer) } -// Read reads the next len(p) bytes from last. If last is drained, it tries to -// read additional data from recv. It blocks if there no additional data available -// in recv. If Read returns any non-nil error, it will continue to return that error. -func (r *recvBufferReader) Read(p []byte) (n int, err error) { +func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { if r.err != nil { return 0, r.err } if r.last != nil { - // Read remaining data left in last call. - copied, _ := r.last.Read(p) - if r.last.Len() == 0 { - r.freeBuffer(r.last) + n, r.last = mem.ReadUnsafe(header, r.last) + return n, nil + } + if r.closeStream != nil { + n, r.err = r.readHeaderClient(header) + } else { + n, r.err = r.readHeader(header) + } + return n, r.err +} + +// Read reads the next n bytes from last. If last is drained, it tries to read +// additional data from recv. It blocks if there no additional data available in +// recv. If Read returns any non-nil error, it will continue to return that +// error. +func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) { + if r.err != nil { + return nil, r.err + } + if r.last != nil { + buf = r.last + if r.last.Len() > n { + buf, r.last = mem.SplitUnsafe(buf, n) + } else { r.last = nil } - return copied, nil + return buf, nil } if r.closeStream != nil { - n, r.err = r.readClient(p) + buf, r.err = r.readClient(n) } else { - n, r.err = r.read(p) + buf, r.err = r.read(n) } - return n, r.err + return buf, r.err } -func (r *recvBufferReader) read(p []byte) (n int, err error) { +func (r *recvBufferReader) readHeader(header []byte) (n int, err error) { select { case <-r.ctxDone: return 0, ContextErr(r.ctx.Err()) case m := <-r.recv.get(): - return r.readAdditional(m, p) + return r.readHeaderAdditional(m, header) } } -func (r *recvBufferReader) readClient(p []byte) (n int, err error) { +func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) { + select { + case <-r.ctxDone: + return nil, ContextErr(r.ctx.Err()) + case m := <-r.recv.get(): + return r.readAdditional(m, n) + } +} + +func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { // If the context is canceled, then closes the stream with nil metadata. // closeStream writes its error parameter to r.recv as a recvMsg. // r.readAdditional acts on that message and returns the necessary error. @@ -199,25 +213,67 @@ func (r *recvBufferReader) readClient(p []byte) (n int, err error) { // faster. r.closeStream(ContextErr(r.ctx.Err())) m := <-r.recv.get() - return r.readAdditional(m, p) + return r.readHeaderAdditional(m, header) case m := <-r.recv.get(): - return r.readAdditional(m, p) + return r.readHeaderAdditional(m, header) } } -func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) { +func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { + // If the context is canceled, then closes the stream with nil metadata. + // closeStream writes its error parameter to r.recv as a recvMsg. + // r.readAdditional acts on that message and returns the necessary error. + select { + case <-r.ctxDone: + // Note that this adds the ctx error to the end of recv buffer, and + // reads from the head. This will delay the error until recv buffer is + // empty, thus will delay ctx cancellation in Recv(). + // + // It's done this way to fix a race between ctx cancel and trailer. The + // race was, stream.Recv() may return ctx error if ctxDone wins the + // race, but stream.Trailer() may return a non-nil md because the stream + // was not marked as done when trailer is received. This closeStream + // call will mark stream as done, thus fix the race. + // + // TODO: delaying ctx error seems like a unnecessary side effect. What + // we really want is to mark the stream as done, and return ctx error + // faster. + r.closeStream(ContextErr(r.ctx.Err())) + m := <-r.recv.get() + return r.readAdditional(m, n) + case m := <-r.recv.get(): + return r.readAdditional(m, n) + } +} + +func (r *recvBufferReader) readHeaderAdditional(m recvMsg, header []byte) (n int, err error) { r.recv.load() if m.err != nil { + if m.buffer != nil { + m.buffer.Free() + } return 0, m.err } - copied, _ := m.buffer.Read(p) - if m.buffer.Len() == 0 { - r.freeBuffer(m.buffer) - r.last = nil - } else { - r.last = m.buffer + + n, r.last = mem.ReadUnsafe(header, m.buffer) + + return n, nil +} + +func (r *recvBufferReader) readAdditional(m recvMsg, n int) (b mem.Buffer, err error) { + r.recv.load() + if m.err != nil { + if m.buffer != nil { + m.buffer.Free() + } + return nil, m.err + } + + if m.buffer.Len() > n { + m.buffer, r.last = mem.SplitUnsafe(m.buffer, n) } - return copied, nil + + return m.buffer, nil } type streamState uint32 @@ -233,24 +289,34 @@ const ( type Stream struct { id uint32 st ServerTransport // nil for client side Stream + ct ClientTransport // nil for server side Stream ctx context.Context // the associated context of the stream cancel context.CancelFunc // always nil for client side Stream done chan struct{} // closed at the end of stream to unblock writers. On the client side. + doneFunc func() // invoked at the end of stream on client side. ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) method string // the associated RPC method of the stream recvCompress string sendCompress string buf *recvBuffer - trReader io.Reader + trReader *transportReader fc *inFlow wq *writeQuota + // Holds compressor names passed in grpc-accept-encoding metadata from the + // client. This is empty for the client side stream. + clientAdvertisedCompressors string // Callback to state application's intentions to read data. This // is used to adjust flow control, if needed. requestRead func(int) headerChan chan struct{} // closed to indicate the end of header metadata. headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + // headerValid indicates whether a valid header was received. Only + // meaningful after headerChan is closed (always call waitOnHeader() before + // reading its value). Not valid on server side. + headerValid bool + headerWireLength int // Only set on server side. // hdrMu protects header and trailer metadata on the server-side. hdrMu sync.Mutex @@ -286,7 +352,7 @@ func (s *Stream) isHeaderSent() bool { } // updateHeaderSent updates headerSent and returns true -// if it was alreay set. It is valid only on server-side. +// if it was already set. It is valid only on server-side. func (s *Stream) updateHeaderSent() bool { return atomic.SwapUint32(&s.headerSent, 1) == 1 } @@ -303,40 +369,54 @@ func (s *Stream) getState() streamState { return streamState(atomic.LoadUint32((*uint32)(&s.state))) } -func (s *Stream) waitOnHeader() error { +func (s *Stream) waitOnHeader() { if s.headerChan == nil { // On the server headerChan is always nil since a stream originates // only after having received headers. - return nil + return } select { case <-s.ctx.Done(): - // We prefer success over failure when reading messages because we delay - // context error in stream.Read(). To keep behavior consistent, we also - // prefer success here. - select { - case <-s.headerChan: - return nil - default: - } - return ContextErr(s.ctx.Err()) + // Close the stream to prevent headers/trailers from changing after + // this function returns. + s.ct.CloseStream(s, ContextErr(s.ctx.Err())) + // headerChan could possibly not be closed yet if closeStream raced + // with operateHeaders; wait until it is closed explicitly here. + <-s.headerChan case <-s.headerChan: - return nil } } // RecvCompress returns the compression algorithm applied to the inbound // message. It is empty string if there is no compression applied. func (s *Stream) RecvCompress() string { - if err := s.waitOnHeader(); err != nil { - return "" - } + s.waitOnHeader() return s.recvCompress } // SetSendCompress sets the compression algorithm to the stream. -func (s *Stream) SetSendCompress(str string) { - s.sendCompress = str +func (s *Stream) SetSendCompress(name string) error { + if s.isHeaderSent() || s.getState() == streamDone { + return errors.New("transport: set send compressor called after headers sent or stream done") + } + + s.sendCompress = name + return nil +} + +// SendCompress returns the send compressor name. +func (s *Stream) SendCompress() string { + return s.sendCompress +} + +// ClientAdvertisedCompressors returns the compressor names advertised by the +// client via grpc-accept-encoding header. +func (s *Stream) ClientAdvertisedCompressors() []string { + values := strings.Split(s.clientAdvertisedCompressors, ",") + for i, v := range values { + values[i] = strings.TrimSpace(v) + } + return values } // Done returns a channel which is closed when it receives the final status @@ -351,39 +431,32 @@ func (s *Stream) Done() <-chan struct{} { // available. It blocks until i) the metadata is ready or ii) there is no header // metadata or iii) the stream is canceled/expired. // -// On server side, it returns the out header after t.WriteHeader is called. +// On server side, it returns the out header after t.WriteHeader is called. It +// does not block and must not be called until after WriteHeader. func (s *Stream) Header() (metadata.MD, error) { - if s.headerChan == nil && s.header != nil { + if s.headerChan == nil { // On server side, return the header in stream. It will be the out // header after t.WriteHeader is called. return s.header.Copy(), nil } - err := s.waitOnHeader() - // Even if the stream is closed, header is returned if available. - select { - case <-s.headerChan: - if s.header == nil { - return nil, nil - } - return s.header.Copy(), nil - default: + s.waitOnHeader() + + if !s.headerValid || s.noHeaders { + return nil, s.status.Err() } - return nil, err + + return s.header.Copy(), nil } // TrailersOnly blocks until a header or trailers-only frame is received and // then returns true if the stream was trailers-only. If the stream ends -// before headers are received, returns true, nil. If a context error happens -// first, returns it as a status error. Client-side only. -func (s *Stream) TrailersOnly() (bool, error) { - err := s.waitOnHeader() - if err != nil { - return false, err - } - return s.noHeaders, nil +// before headers are received, returns true, nil. Client-side only. +func (s *Stream) TrailersOnly() bool { + s.waitOnHeader() + return s.noHeaders } -// Trailer returns the cached trailer metedata. Note that if it is not called +// Trailer returns the cached trailer metadata. Note that if it is not called // after the entire stream is done, it could return an empty MD. Client // side only. // It can be safely read only after stream has ended that is either read @@ -407,6 +480,12 @@ func (s *Stream) Context() context.Context { return s.ctx } +// SetContext sets the context of the stream. This will be deleted once the +// stats handler callouts all move to gRPC layer. +func (s *Stream) SetContext(ctx context.Context) { + s.ctx = ctx +} + // Method returns the method for the stream. func (s *Stream) Method() string { return s.method @@ -419,6 +498,12 @@ func (s *Stream) Status() *status.Status { return s.status } +// HeaderWireLength returns the size of the headers of the stream as received +// from the wire. Valid only on the server. +func (s *Stream) HeaderWireLength() int { + return s.headerWireLength +} + // SetHeader sets the header metadata. This can be called multiple times. // Server side only. // This should not be called in parallel to other data writes. @@ -462,36 +547,87 @@ func (s *Stream) write(m recvMsg) { s.buf.put(m) } -// Read reads all p bytes from the wire for this stream. -func (s *Stream) Read(p []byte) (n int, err error) { +func (s *Stream) ReadHeader(header []byte) (err error) { // Don't request a read if there was an error earlier - if er := s.trReader.(*transportReader).er; er != nil { - return 0, er + if er := s.trReader.er; er != nil { + return er } - s.requestRead(len(p)) - return io.ReadFull(s.trReader, p) + s.requestRead(len(header)) + for len(header) != 0 { + n, err := s.trReader.ReadHeader(header) + header = header[n:] + if len(header) == 0 { + err = nil + } + if err != nil { + if n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + } + return nil } -// tranportReader reads all the data available for this Stream from the transport and +// Read reads n bytes from the wire for this stream. +func (s *Stream) Read(n int) (data mem.BufferSlice, err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.er; er != nil { + return nil, er + } + s.requestRead(n) + for n != 0 { + buf, err := s.trReader.Read(n) + var bufLen int + if buf != nil { + bufLen = buf.Len() + } + n -= bufLen + if n == 0 { + err = nil + } + if err != nil { + if bufLen > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + data.Free() + return nil, err + } + data = append(data, buf) + } + return data, nil +} + +// transportReader reads all the data available for this Stream from the transport and // passes them into the decoder, which converts them into a gRPC message stream. // The error is io.EOF when the stream is done or another non-nil error if // the stream broke. type transportReader struct { - reader io.Reader + reader *recvBufferReader // The handler to control the window update procedure for both this // particular stream and the associated transport. windowHandler func(int) er error } -func (t *transportReader) Read(p []byte) (n int, err error) { - n, err = t.reader.Read(p) +func (t *transportReader) ReadHeader(header []byte) (int, error) { + n, err := t.reader.ReadHeader(header) if err != nil { t.er = err - return + return 0, err } t.windowHandler(n) - return + return n, nil +} + +func (t *transportReader) Read(n int) (mem.Buffer, error) { + buf, err := t.reader.Read(n) + if err != nil { + t.er = err + return buf, err + } + t.windowHandler(buf.Len()) + return buf, nil } // BytesReceived indicates whether any bytes have been received on this stream. @@ -523,23 +659,21 @@ const ( // ServerConfig consists of all the configurations to establish a server transport. type ServerConfig struct { MaxStreams uint32 - AuthInfo credentials.AuthInfo + ConnectionTimeout time.Duration + Credentials credentials.TransportCredentials InTapHandle tap.ServerInHandle - StatsHandler stats.Handler + StatsHandlers []stats.Handler KeepaliveParams keepalive.ServerParameters KeepalivePolicy keepalive.EnforcementPolicy InitialWindowSize int32 InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int - ChannelzParentID int64 + SharedWriteBuffer bool + ChannelzParent *channelz.Server MaxHeaderListSize *uint32 -} - -// NewServerTransport creates a ServerTransport with conn or non-nil error -// if it fails. -func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) { - return newHTTP2Server(conn, config) + HeaderTableSize *uint32 + BufferPool mem.BufferPool } // ConnectOptions covers all relevant options for communicating with the server. @@ -560,8 +694,8 @@ type ConnectOptions struct { CredsBundle credentials.Bundle // KeepaliveParams stores the keepalive parameters. KeepaliveParams keepalive.ClientParameters - // StatsHandler stores the handler for stats. - StatsHandler stats.Handler + // StatsHandlers stores the handler for stats. + StatsHandlers []stats.Handler // InitialWindowSize sets the initial window size for a stream. InitialWindowSize int32 // InitialConnWindowSize sets the initial window size for a connection. @@ -570,23 +704,22 @@ type ConnectOptions struct { WriteBufferSize int // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int - // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. - ChannelzParentID int64 + // SharedWriteBuffer indicates whether connections should reuse write buffer + SharedWriteBuffer bool + // ChannelzParent sets the addrConn id which initiated the creation of this client transport. + ChannelzParent *channelz.SubChannel // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. MaxHeaderListSize *uint32 -} - -// TargetInfo contains the information of the target such as network address and metadata. -type TargetInfo struct { - Addr string - Metadata interface{} - Authority string + // UseProxy specifies if a proxy should be used. + UseProxy bool + // The mem.BufferPool to use when reading/writing to the wire. + BufferPool mem.BufferPool } // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, target, opts, onPrefaceReceipt, onGoAway, onClose) +func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, addr, opts, onClose) } // Options provides additional hints and information for message @@ -621,6 +754,8 @@ type CallHdr struct { ContentSubtype string PreviousAttempts int // value of grpc-previous-rpc-attempts header to set + + DoneFunc func() // called when the stream is finished } // ClientTransport is the common interface for all gRPC client-side transport @@ -629,7 +764,7 @@ type ClientTransport interface { // Close tears down this transport. Once it returns, the transport // should not be accessed any more. The caller must make sure this // is called only once. - Close() error + Close(err error) // GracefulClose starts to tear down the transport: the transport will stop // accepting new RPCs and NewStream will return error. Once all streams are @@ -640,7 +775,7 @@ type ClientTransport interface { // Write sends the data for the given stream. A nil stream indicates // the write is to be performed on the transport as a whole. - Write(s *Stream, hdr []byte, data []byte, opts *Options) error + Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error // NewStream creates a Stream for an RPC. NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) @@ -663,8 +798,9 @@ type ClientTransport interface { // HTTP/2). GoAway() <-chan struct{} - // GetGoAwayReason returns the reason why GoAway frame was received. - GetGoAwayReason() GoAwayReason + // GetGoAwayReason returns the reason why GoAway frame was received, along + // with a human readable string with debug info. + GetGoAwayReason() (GoAwayReason, string) // RemoteAddr returns the remote network address. RemoteAddr() net.Addr @@ -683,7 +819,7 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. - HandleStreams(func(*Stream), func(context.Context, string) context.Context) + HandleStreams(context.Context, func(*Stream)) // WriteHeader sends the header metadata for the given stream. // WriteHeader may not be called on all streams. @@ -691,7 +827,7 @@ type ServerTransport interface { // Write sends the data for the given stream. // Write may not be called on all streams. - Write(s *Stream, hdr []byte, data []byte, opts *Options) error + Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error // WriteStatus sends the status of a stream to the client. WriteStatus is // the final call made on a stream and always occurs. @@ -700,13 +836,13 @@ type ServerTransport interface { // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. - Close() error + Close(err error) - // RemoteAddr returns the remote network address. - RemoteAddr() net.Addr + // Peer returns the peer of the server transport. + Peer() *peer.Peer // Drain notifies the client this ServerTransport stops accepting new RPCs. - Drain() + Drain(debugData string) // IncrMsgSent increments the number of message sent through this transport. IncrMsgSent() @@ -716,7 +852,7 @@ type ServerTransport interface { } // connectionErrorf creates an ConnectionError with the specified error description. -func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { +func connectionErrorf(temp bool, e error, format string, a ...any) ConnectionError { return ConnectionError{ Desc: fmt.Sprintf(format, a...), temp: temp, @@ -751,6 +887,12 @@ func (e ConnectionError) Origin() error { return e.err } +// Unwrap returns the original error of this connection error or nil when the +// origin is nil. +func (e ConnectionError) Unwrap() error { + return e.err +} + var ( // ErrConnClosing indicates that the transport is closing. ErrConnClosing = connectionErrorf(true, nil, "transport is closing") @@ -758,7 +900,7 @@ var ( // connection is draining. This could be caused by goaway or balancer // removing the address. errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") - // errStreamDone is returned from write at the client side to indiacte application + // errStreamDone is returned from write at the client side to indicate application // layer of an error. errStreamDone = errors.New("the stream is done") // StatusGoAway indicates that the server sent a GOAWAY that included this @@ -780,30 +922,6 @@ const ( GoAwayTooManyPings GoAwayReason = 2 ) -// channelzData is used to store channelz related data for http2Client and http2Server. -// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic -// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. -// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. -type channelzData struct { - kpCount int64 - // The number of streams that have started, including already finished ones. - streamsStarted int64 - // Client side: The number of streams that have ended successfully by receiving - // EoS bit set frame from server. - // Server side: The number of streams that have ended successfully by sending - // frame with EoS bit set. - streamsSucceeded int64 - streamsFailed int64 - // lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type - // instead of time.Time since it's more costly to atomically update time.Time variable than int64 - // variable. The same goes for lastMsgSentTime and lastMsgRecvTime. - lastStreamCreatedTime int64 - msgSent int64 - msgRecv int64 - lastMsgSentTime int64 - lastMsgRecvTime int64 -} - // ContextErr converts the error from context package into a status error. func ContextErr(err error) error { switch err { diff --git a/vendor/google.golang.org/grpc/internal/xds/xds.go b/vendor/google.golang.org/grpc/internal/xds/xds.go new file mode 100644 index 000000000..024c388b7 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds/xds.go @@ -0,0 +1,42 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package xds contains methods to Get/Set handshake cluster names. It is separated +// out from the top level /internal package to avoid circular dependencies. +package xds + +import ( + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/resolver" +) + +// handshakeClusterNameKey is the type used as the key to store cluster name in +// the Attributes field of resolver.Address. +type handshakeClusterNameKey struct{} + +// SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field +// is updated with the cluster name. +func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address { + addr.Attributes = addr.Attributes.WithValue(handshakeClusterNameKey{}, clusterName) + return addr +} + +// GetXDSHandshakeClusterName returns cluster name stored in attr. +func GetXDSHandshakeClusterName(attr *attributes.Attributes) (string, bool) { + v := attr.Value(handshakeClusterNameKey{}) + name, ok := v.(string) + return name, ok +} diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go index 34d31b5e7..eb42b19fb 100644 --- a/vendor/google.golang.org/grpc/keepalive/keepalive.go +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -34,15 +34,29 @@ type ClientParameters struct { // After a duration of this time if the client doesn't see any activity it // pings the server to see if the transport is still alive. // If set below 10s, a minimum value of 10s will be used instead. - Time time.Duration // The current default value is infinity. + // + // Note that gRPC servers have a default EnforcementPolicy.MinTime of 5 + // minutes (which means the client shouldn't ping more frequently than every + // 5 minutes). + // + // Though not ideal, it's not a strong requirement for Time to be less than + // EnforcementPolicy.MinTime. Time will automatically double if the server + // disconnects due to its enforcement policy. + // + // For more details, see + // https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md + Time time.Duration // After having pinged for keepalive check, the client waits for a duration // of Timeout and if no activity is seen even after that the connection is // closed. - Timeout time.Duration // The current default value is 20 seconds. + // + // If keepalive is enabled, and this value is not explicitly set, the default + // is 20 seconds. + Timeout time.Duration // If true, client sends keepalive pings even with no active RPCs. If false, // when there are no active RPCs, Time and Timeout will be ignored and no // keepalive pings will be sent. - PermitWithoutStream bool // false by default. + PermitWithoutStream bool } // ServerParameters is used to set keepalive and max-age parameters on the diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go new file mode 100644 index 000000000..c37c58c02 --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go @@ -0,0 +1,194 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mem + +import ( + "sort" + "sync" + + "google.golang.org/grpc/internal" +) + +// BufferPool is a pool of buffers that can be shared and reused, resulting in +// decreased memory allocation. +type BufferPool interface { + // Get returns a buffer with specified length from the pool. + Get(length int) *[]byte + + // Put returns a buffer to the pool. + Put(*[]byte) +} + +var defaultBufferPoolSizes = []int{ + 256, + 4 << 10, // 4KB (go page size) + 16 << 10, // 16KB (max HTTP/2 frame size used by gRPC) + 32 << 10, // 32KB (default buffer size for io.Copy) + 1 << 20, // 1MB +} + +var defaultBufferPool BufferPool + +func init() { + defaultBufferPool = NewTieredBufferPool(defaultBufferPoolSizes...) + + internal.SetDefaultBufferPoolForTesting = func(pool BufferPool) { + defaultBufferPool = pool + } + + internal.SetBufferPoolingThresholdForTesting = func(threshold int) { + bufferPoolingThreshold = threshold + } +} + +// DefaultBufferPool returns the current default buffer pool. It is a BufferPool +// created with NewBufferPool that uses a set of default sizes optimized for +// expected workflows. +func DefaultBufferPool() BufferPool { + return defaultBufferPool +} + +// NewTieredBufferPool returns a BufferPool implementation that uses multiple +// underlying pools of the given pool sizes. +func NewTieredBufferPool(poolSizes ...int) BufferPool { + sort.Ints(poolSizes) + pools := make([]*sizedBufferPool, len(poolSizes)) + for i, s := range poolSizes { + pools[i] = newSizedBufferPool(s) + } + return &tieredBufferPool{ + sizedPools: pools, + } +} + +// tieredBufferPool implements the BufferPool interface with multiple tiers of +// buffer pools for different sizes of buffers. +type tieredBufferPool struct { + sizedPools []*sizedBufferPool + fallbackPool simpleBufferPool +} + +func (p *tieredBufferPool) Get(size int) *[]byte { + return p.getPool(size).Get(size) +} + +func (p *tieredBufferPool) Put(buf *[]byte) { + p.getPool(cap(*buf)).Put(buf) +} + +func (p *tieredBufferPool) getPool(size int) BufferPool { + poolIdx := sort.Search(len(p.sizedPools), func(i int) bool { + return p.sizedPools[i].defaultSize >= size + }) + + if poolIdx == len(p.sizedPools) { + return &p.fallbackPool + } + + return p.sizedPools[poolIdx] +} + +// sizedBufferPool is a BufferPool implementation that is optimized for specific +// buffer sizes. For example, HTTP/2 frames within gRPC have a default max size +// of 16kb and a sizedBufferPool can be configured to only return buffers with a +// capacity of 16kb. Note that however it does not support returning larger +// buffers and in fact panics if such a buffer is requested. Because of this, +// this BufferPool implementation is not meant to be used on its own and rather +// is intended to be embedded in a tieredBufferPool such that Get is only +// invoked when the required size is smaller than or equal to defaultSize. +type sizedBufferPool struct { + pool sync.Pool + defaultSize int +} + +func (p *sizedBufferPool) Get(size int) *[]byte { + buf := p.pool.Get().(*[]byte) + b := *buf + clear(b[:cap(b)]) + *buf = b[:size] + return buf +} + +func (p *sizedBufferPool) Put(buf *[]byte) { + if cap(*buf) < p.defaultSize { + // Ignore buffers that are too small to fit in the pool. Otherwise, when + // Get is called it will panic as it tries to index outside the bounds + // of the buffer. + return + } + p.pool.Put(buf) +} + +func newSizedBufferPool(size int) *sizedBufferPool { + return &sizedBufferPool{ + pool: sync.Pool{ + New: func() any { + buf := make([]byte, size) + return &buf + }, + }, + defaultSize: size, + } +} + +var _ BufferPool = (*simpleBufferPool)(nil) + +// simpleBufferPool is an implementation of the BufferPool interface that +// attempts to pool buffers with a sync.Pool. When Get is invoked, it tries to +// acquire a buffer from the pool but if that buffer is too small, it returns it +// to the pool and creates a new one. +type simpleBufferPool struct { + pool sync.Pool +} + +func (p *simpleBufferPool) Get(size int) *[]byte { + bs, ok := p.pool.Get().(*[]byte) + if ok && cap(*bs) >= size { + *bs = (*bs)[:size] + return bs + } + + // A buffer was pulled from the pool, but it is too small. Put it back in + // the pool and create one large enough. + if ok { + p.pool.Put(bs) + } + + b := make([]byte, size) + return &b +} + +func (p *simpleBufferPool) Put(buf *[]byte) { + p.pool.Put(buf) +} + +var _ BufferPool = NopBufferPool{} + +// NopBufferPool is a buffer pool that returns new buffers without pooling. +type NopBufferPool struct{} + +// Get returns a buffer with specified length from the pool. +func (NopBufferPool) Get(length int) *[]byte { + b := make([]byte, length) + return &b +} + +// Put returns a buffer to the pool. +func (NopBufferPool) Put(*[]byte) { +} diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go new file mode 100644 index 000000000..228e9c2f2 --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -0,0 +1,226 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mem + +import ( + "io" +) + +// BufferSlice offers a means to represent data that spans one or more Buffer +// instances. A BufferSlice is meant to be immutable after creation, and methods +// like Ref create and return copies of the slice. This is why all methods have +// value receivers rather than pointer receivers. +// +// Note that any of the methods that read the underlying buffers such as Ref, +// Len or CopyTo etc., will panic if any underlying buffers have already been +// freed. It is recommended to not directly interact with any of the underlying +// buffers directly, rather such interactions should be mediated through the +// various methods on this type. +// +// By convention, any APIs that return (mem.BufferSlice, error) should reduce +// the burden on the caller by never returning a mem.BufferSlice that needs to +// be freed if the error is non-nil, unless explicitly stated. +type BufferSlice []Buffer + +// Len returns the sum of the length of all the Buffers in this slice. +// +// # Warning +// +// Invoking the built-in len on a BufferSlice will return the number of buffers +// in the slice, and *not* the value returned by this function. +func (s BufferSlice) Len() int { + var length int + for _, b := range s { + length += b.Len() + } + return length +} + +// Ref invokes Ref on each buffer in the slice. +func (s BufferSlice) Ref() { + for _, b := range s { + b.Ref() + } +} + +// Free invokes Buffer.Free() on each Buffer in the slice. +func (s BufferSlice) Free() { + for _, b := range s { + b.Free() + } +} + +// CopyTo copies each of the underlying Buffer's data into the given buffer, +// returning the number of bytes copied. Has the same semantics as the copy +// builtin in that it will copy as many bytes as it can, stopping when either dst +// is full or s runs out of data, returning the minimum of s.Len() and len(dst). +func (s BufferSlice) CopyTo(dst []byte) int { + off := 0 + for _, b := range s { + off += copy(dst[off:], b.ReadOnlyData()) + } + return off +} + +// Materialize concatenates all the underlying Buffer's data into a single +// contiguous buffer using CopyTo. +func (s BufferSlice) Materialize() []byte { + l := s.Len() + if l == 0 { + return nil + } + out := make([]byte, l) + s.CopyTo(out) + return out +} + +// MaterializeToBuffer functions like Materialize except that it writes the data +// to a single Buffer pulled from the given BufferPool. +// +// As a special case, if the input BufferSlice only actually has one Buffer, this +// function simply increases the refcount before returning said Buffer. Freeing this +// buffer won't release it until the BufferSlice is itself released. +func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer { + if len(s) == 1 { + s[0].Ref() + return s[0] + } + sLen := s.Len() + if sLen == 0 { + return emptyBuffer{} + } + buf := pool.Get(sLen) + s.CopyTo(*buf) + return NewBuffer(buf, pool) +} + +// Reader returns a new Reader for the input slice after taking references to +// each underlying buffer. +func (s BufferSlice) Reader() Reader { + s.Ref() + return &sliceReader{ + data: s, + len: s.Len(), + } +} + +// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface +// with other parts systems. It also provides an additional convenience method +// Remaining(), which returns the number of unread bytes remaining in the slice. +// Buffers will be freed as they are read. +type Reader interface { + io.Reader + io.ByteReader + // Close frees the underlying BufferSlice and never returns an error. Subsequent + // calls to Read will return (0, io.EOF). + Close() error + // Remaining returns the number of unread bytes remaining in the slice. + Remaining() int +} + +type sliceReader struct { + data BufferSlice + len int + // The index into data[0].ReadOnlyData(). + bufferIdx int +} + +func (r *sliceReader) Remaining() int { + return r.len +} + +func (r *sliceReader) Close() error { + r.data.Free() + r.data = nil + r.len = 0 + return nil +} + +func (r *sliceReader) freeFirstBufferIfEmpty() bool { + if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) { + return false + } + + r.data[0].Free() + r.data = r.data[1:] + r.bufferIdx = 0 + return true +} + +func (r *sliceReader) Read(buf []byte) (n int, _ error) { + if r.len == 0 { + return 0, io.EOF + } + + for len(buf) != 0 && r.len != 0 { + // Copy as much as possible from the first Buffer in the slice into the + // given byte slice. + data := r.data[0].ReadOnlyData() + copied := copy(buf, data[r.bufferIdx:]) + r.len -= copied // Reduce len by the number of bytes copied. + r.bufferIdx += copied // Increment the buffer index. + n += copied // Increment the total number of bytes read. + buf = buf[copied:] // Shrink the given byte slice. + + // If we have copied all the data from the first Buffer, free it and advance to + // the next in the slice. + r.freeFirstBufferIfEmpty() + } + + return n, nil +} + +func (r *sliceReader) ReadByte() (byte, error) { + if r.len == 0 { + return 0, io.EOF + } + + // There may be any number of empty buffers in the slice, clear them all until a + // non-empty buffer is reached. This is guaranteed to exit since r.len is not 0. + for r.freeFirstBufferIfEmpty() { + } + + b := r.data[0].ReadOnlyData()[r.bufferIdx] + r.len-- + r.bufferIdx++ + // Free the first buffer in the slice if the last byte was read + r.freeFirstBufferIfEmpty() + return b, nil +} + +var _ io.Writer = (*writer)(nil) + +type writer struct { + buffers *BufferSlice + pool BufferPool +} + +func (w *writer) Write(p []byte) (n int, err error) { + b := Copy(p, w.pool) + *w.buffers = append(*w.buffers, b) + return b.Len(), nil +} + +// NewWriter wraps the given BufferSlice and BufferPool to implement the +// io.Writer interface. Every call to Write copies the contents of the given +// buffer into a new Buffer pulled from the given pool and the Buffer is added to +// the given BufferSlice. +func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer { + return &writer{buffers: buffers, pool: pool} +} diff --git a/vendor/google.golang.org/grpc/mem/buffers.go b/vendor/google.golang.org/grpc/mem/buffers.go new file mode 100644 index 000000000..4d66b2ccc --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffers.go @@ -0,0 +1,252 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package mem provides utilities that facilitate memory reuse in byte slices +// that are used as buffers. +// +// # Experimental +// +// Notice: All APIs in this package are EXPERIMENTAL and may be changed or +// removed in a later release. +package mem + +import ( + "fmt" + "sync" + "sync/atomic" +) + +// A Buffer represents a reference counted piece of data (in bytes) that can be +// acquired by a call to NewBuffer() or Copy(). A reference to a Buffer may be +// released by calling Free(), which invokes the free function given at creation +// only after all references are released. +// +// Note that a Buffer is not safe for concurrent access and instead each +// goroutine should use its own reference to the data, which can be acquired via +// a call to Ref(). +// +// Attempts to access the underlying data after releasing the reference to the +// Buffer will panic. +type Buffer interface { + // ReadOnlyData returns the underlying byte slice. Note that it is undefined + // behavior to modify the contents of this slice in any way. + ReadOnlyData() []byte + // Ref increases the reference counter for this Buffer. + Ref() + // Free decrements this Buffer's reference counter and frees the underlying + // byte slice if the counter reaches 0 as a result of this call. + Free() + // Len returns the Buffer's size. + Len() int + + split(n int) (left, right Buffer) + read(buf []byte) (int, Buffer) +} + +var ( + bufferPoolingThreshold = 1 << 10 + + bufferObjectPool = sync.Pool{New: func() any { return new(buffer) }} + refObjectPool = sync.Pool{New: func() any { return new(atomic.Int32) }} +) + +func IsBelowBufferPoolingThreshold(size int) bool { + return size <= bufferPoolingThreshold +} + +type buffer struct { + origData *[]byte + data []byte + refs *atomic.Int32 + pool BufferPool +} + +func newBuffer() *buffer { + return bufferObjectPool.Get().(*buffer) +} + +// NewBuffer creates a new Buffer from the given data, initializing the reference +// counter to 1. The data will then be returned to the given pool when all +// references to the returned Buffer are released. As a special case to avoid +// additional allocations, if the given buffer pool is nil, the returned buffer +// will be a "no-op" Buffer where invoking Buffer.Free() does nothing and the +// underlying data is never freed. +// +// Note that the backing array of the given data is not copied. +func NewBuffer(data *[]byte, pool BufferPool) Buffer { + if pool == nil || IsBelowBufferPoolingThreshold(len(*data)) { + return (SliceBuffer)(*data) + } + b := newBuffer() + b.origData = data + b.data = *data + b.pool = pool + b.refs = refObjectPool.Get().(*atomic.Int32) + b.refs.Add(1) + return b +} + +// Copy creates a new Buffer from the given data, initializing the reference +// counter to 1. +// +// It acquires a []byte from the given pool and copies over the backing array +// of the given data. The []byte acquired from the pool is returned to the +// pool when all references to the returned Buffer are released. +func Copy(data []byte, pool BufferPool) Buffer { + if IsBelowBufferPoolingThreshold(len(data)) { + buf := make(SliceBuffer, len(data)) + copy(buf, data) + return buf + } + + buf := pool.Get(len(data)) + copy(*buf, data) + return NewBuffer(buf, pool) +} + +func (b *buffer) ReadOnlyData() []byte { + if b.refs == nil { + panic("Cannot read freed buffer") + } + return b.data +} + +func (b *buffer) Ref() { + if b.refs == nil { + panic("Cannot ref freed buffer") + } + b.refs.Add(1) +} + +func (b *buffer) Free() { + if b.refs == nil { + panic("Cannot free freed buffer") + } + + refs := b.refs.Add(-1) + switch { + case refs > 0: + return + case refs == 0: + if b.pool != nil { + b.pool.Put(b.origData) + } + + refObjectPool.Put(b.refs) + b.origData = nil + b.data = nil + b.refs = nil + b.pool = nil + bufferObjectPool.Put(b) + default: + panic("Cannot free freed buffer") + } +} + +func (b *buffer) Len() int { + return len(b.ReadOnlyData()) +} + +func (b *buffer) split(n int) (Buffer, Buffer) { + if b.refs == nil { + panic("Cannot split freed buffer") + } + + b.refs.Add(1) + split := newBuffer() + split.origData = b.origData + split.data = b.data[n:] + split.refs = b.refs + split.pool = b.pool + + b.data = b.data[:n] + + return b, split +} + +func (b *buffer) read(buf []byte) (int, Buffer) { + if b.refs == nil { + panic("Cannot read freed buffer") + } + + n := copy(buf, b.data) + if n == len(b.data) { + b.Free() + return n, nil + } + + b.data = b.data[n:] + return n, b +} + +// String returns a string representation of the buffer. May be used for +// debugging purposes. +func (b *buffer) String() string { + return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData())) +} + +func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) { + return buf.read(dst) +} + +// SplitUnsafe modifies the receiver to point to the first n bytes while it +// returns a new reference to the remaining bytes. The returned Buffer functions +// just like a normal reference acquired using Ref(). +func SplitUnsafe(buf Buffer, n int) (left, right Buffer) { + return buf.split(n) +} + +type emptyBuffer struct{} + +func (e emptyBuffer) ReadOnlyData() []byte { + return nil +} + +func (e emptyBuffer) Ref() {} +func (e emptyBuffer) Free() {} + +func (e emptyBuffer) Len() int { + return 0 +} + +func (e emptyBuffer) split(int) (left, right Buffer) { + return e, e +} + +func (e emptyBuffer) read([]byte) (int, Buffer) { + return 0, e +} + +type SliceBuffer []byte + +func (s SliceBuffer) ReadOnlyData() []byte { return s } +func (s SliceBuffer) Ref() {} +func (s SliceBuffer) Free() {} +func (s SliceBuffer) Len() int { return len(s) } + +func (s SliceBuffer) split(n int) (left, right Buffer) { + return s[:n], s[n:] +} + +func (s SliceBuffer) read(buf []byte) (int, Buffer) { + n := copy(buf, s) + if n == len(s) { + return n, nil + } + return n, s[n:] +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index cf6d1b947..d2e15253b 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -25,8 +25,14 @@ import ( "context" "fmt" "strings" + + "google.golang.org/grpc/internal" ) +func init() { + internal.FromOutgoingContextRaw = fromOutgoingContextRaw +} + // DecodeKeyValue returns k, v, nil. // // Deprecated: use k and v directly instead. @@ -41,16 +47,17 @@ type MD map[string][]string // New creates an MD from a given key-value map. // // Only the following ASCII characters are allowed in keys: -// - digits: 0-9 -// - uppercase letters: A-Z (normalized to lower) -// - lowercase letters: a-z -// - special characters: -_. +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// // Uppercase letters are automatically converted to lowercase. // // Keys beginning with "grpc-" are reserved for grpc-internal use only and may // result in errors if set in metadata. func New(m map[string]string) MD { - md := MD{} + md := make(MD, len(m)) for k, val := range m { key := strings.ToLower(k) md[key] = append(md[key], val) @@ -62,10 +69,11 @@ func New(m map[string]string) MD { // Pairs panics if len(kv) is odd. // // Only the following ASCII characters are allowed in keys: -// - digits: 0-9 -// - uppercase letters: A-Z (normalized to lower) -// - lowercase letters: a-z -// - special characters: -_. +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// // Uppercase letters are automatically converted to lowercase. // // Keys beginning with "grpc-" are reserved for grpc-internal use only and may @@ -74,14 +82,10 @@ func Pairs(kv ...string) MD { if len(kv)%2 == 1 { panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) } - md := MD{} - var key string - for i, s := range kv { - if i%2 == 0 { - key = strings.ToLower(s) - continue - } - md[key] = append(md[key], s) + md := make(MD, len(kv)/2) + for i := 0; i < len(kv); i += 2 { + key := strings.ToLower(kv[i]) + md[key] = append(md[key], kv[i+1]) } return md } @@ -93,16 +97,24 @@ func (md MD) Len() int { // Copy returns a copy of md. func (md MD) Copy() MD { - return Join(md) + out := make(MD, len(md)) + for k, v := range md { + out[k] = copyOf(v) + } + return out } // Get obtains the values for a given key. +// +// k is converted to lowercase before searching in md. func (md MD) Get(k string) []string { k = strings.ToLower(k) return md[k] } // Set sets the value of a given key with a slice of values. +// +// k is converted to lowercase before storing in md. func (md MD) Set(k string, vals ...string) { if len(vals) == 0 { return @@ -111,7 +123,10 @@ func (md MD) Set(k string, vals ...string) { md[k] = vals } -// Append adds the values to key k, not overwriting what was already stored at that key. +// Append adds the values to key k, not overwriting what was already stored at +// that key. +// +// k is converted to lowercase before storing in md. func (md MD) Append(k string, vals ...string) { if len(vals) == 0 { return @@ -120,9 +135,17 @@ func (md MD) Append(k string, vals ...string) { md[k] = append(md[k], vals...) } +// Delete removes the values for a given key k which is converted to lowercase +// before removing it from md. +func (md MD) Delete(k string) { + k = strings.ToLower(k) + delete(md, k) +} + // Join joins any number of mds into a single MD. -// The order of values for each key is determined by the order in which -// the mds containing those values are presented to Join. +// +// The order of values for each key is determined by the order in which the mds +// containing those values are presented to Join. func Join(mds ...MD) MD { out := MD{} for _, md := range mds { @@ -136,21 +159,23 @@ func Join(mds ...MD) MD { type mdIncomingKey struct{} type mdOutgoingKey struct{} -// NewIncomingContext creates a new context with incoming md attached. +// NewIncomingContext creates a new context with incoming md attached. md must +// not be modified after calling this function. func NewIncomingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdIncomingKey{}, md) } // NewOutgoingContext creates a new context with outgoing md attached. If used // in conjunction with AppendToOutgoingContext, NewOutgoingContext will -// overwrite any previously-appended metadata. +// overwrite any previously-appended metadata. md must not be modified after +// calling this function. func NewOutgoingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) } // AppendToOutgoingContext returns a new context with the provided kv merged -// with any existing metadata in the context. Please refer to the -// documentation of Pairs for a description of kv. +// with any existing metadata in the context. Please refer to the documentation +// of Pairs for a description of kv. func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context { if len(kv)%2 == 1 { panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv))) @@ -158,26 +183,69 @@ func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) added := make([][]string, len(md.added)+1) copy(added, md.added) - added[len(added)-1] = make([]string, len(kv)) - copy(added[len(added)-1], kv) + kvCopy := make([]string, 0, len(kv)) + for i := 0; i < len(kv); i += 2 { + kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1]) + } + added[len(added)-1] = kvCopy return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) } -// FromIncomingContext returns the incoming metadata in ctx if it exists. The -// returned MD should not be modified. Writing to it may cause races. -// Modification should be made to copies of the returned MD. -func FromIncomingContext(ctx context.Context) (md MD, ok bool) { - md, ok = ctx.Value(mdIncomingKey{}).(MD) - return +// FromIncomingContext returns the incoming metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. +func FromIncomingContext(ctx context.Context) (MD, bool) { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil, false + } + out := make(MD, len(md)) + for k, v := range md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + out[key] = copyOf(v) + } + return out, true +} + +// ValueFromIncomingContext returns the metadata value corresponding to the metadata +// key from the incoming metadata if it exists. Keys are matched in a case insensitive +// manner. +func ValueFromIncomingContext(ctx context.Context, key string) []string { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil + } + + if v, ok := md[key]; ok { + return copyOf(v) + } + for k, v := range md { + // Case insensitive comparison: MD is a map, and there's no guarantee + // that the MD attached to the context is created using our helper + // functions. + if strings.EqualFold(k, key) { + return copyOf(v) + } + } + return nil +} + +func copyOf(v []string) []string { + vals := make([]string, len(v)) + copy(vals, v) + return vals } -// FromOutgoingContextRaw returns the un-merged, intermediary contents -// of rawMD. Remember to perform strings.ToLower on the keys. The returned -// MD should not be modified. Writing to it may cause races. Modification -// should be made to copies of the returned MD. +// fromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. // -// This is intended for gRPC-internal use ONLY. -func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { +// Remember to perform strings.ToLower on the keys, for both the returned MD (MD +// is a map, there's no guarantee it's created using our helper functions) and +// the extra kv pairs (AppendToOutgoingContext doesn't turn them into +// lowercase). +func fromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) if !ok { return nil, nil, false @@ -186,21 +254,39 @@ func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { return raw.md, raw.added, true } -// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The -// returned MD should not be modified. Writing to it may cause races. -// Modification should be made to copies of the returned MD. +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. func FromOutgoingContext(ctx context.Context) (MD, bool) { raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) if !ok { return nil, false } - mds := make([]MD, 0, len(raw.added)+1) - mds = append(mds, raw.md) - for _, vv := range raw.added { - mds = append(mds, Pairs(vv...)) + mdSize := len(raw.md) + for i := range raw.added { + mdSize += len(raw.added[i]) / 2 + } + + out := make(MD, mdSize) + for k, v := range raw.md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + out[key] = copyOf(v) + } + for _, added := range raw.added { + if len(added)%2 == 1 { + panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added))) + } + + for i := 0; i < len(added); i += 2 { + key := strings.ToLower(added[i]) + out[key] = append(out[key], added[i+1]) + } } - return Join(mds...), ok + return out, ok } type rawMD struct { diff --git a/vendor/google.golang.org/grpc/naming/dns_resolver.go b/vendor/google.golang.org/grpc/naming/dns_resolver.go deleted file mode 100644 index c9f79dc53..000000000 --- a/vendor/google.golang.org/grpc/naming/dns_resolver.go +++ /dev/null @@ -1,293 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package naming - -import ( - "context" - "errors" - "fmt" - "net" - "strconv" - "time" - - "google.golang.org/grpc/grpclog" -) - -const ( - defaultPort = "443" - defaultFreq = time.Minute * 30 -) - -var ( - errMissingAddr = errors.New("missing address") - errWatcherClose = errors.New("watcher has been closed") - - lookupHost = net.DefaultResolver.LookupHost - lookupSRV = net.DefaultResolver.LookupSRV -) - -// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and -// create watchers that poll the DNS server using the frequency set by freq. -func NewDNSResolverWithFreq(freq time.Duration) (Resolver, error) { - return &dnsResolver{freq: freq}, nil -} - -// NewDNSResolver creates a DNS Resolver that can resolve DNS names, and create -// watchers that poll the DNS server using the default frequency defined by defaultFreq. -func NewDNSResolver() (Resolver, error) { - return NewDNSResolverWithFreq(defaultFreq) -} - -// dnsResolver handles name resolution for names following the DNS scheme -type dnsResolver struct { - // frequency of polling the DNS server that the watchers created by this resolver will use. - freq time.Duration -} - -// formatIP returns ok = false if addr is not a valid textual representation of an IP address. -// If addr is an IPv4 address, return the addr and ok = true. -// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. -func formatIP(addr string) (addrIP string, ok bool) { - ip := net.ParseIP(addr) - if ip == nil { - return "", false - } - if ip.To4() != nil { - return addr, true - } - return "[" + addr + "]", true -} - -// parseTarget takes the user input target string, returns formatted host and port info. -// If target doesn't specify a port, set the port to be the defaultPort. -// If target is in IPv6 format and host-name is enclosed in square brackets, brackets -// are stripped when setting the host. -// examples: -// target: "www.google.com" returns host: "www.google.com", port: "443" -// target: "ipv4-host:80" returns host: "ipv4-host", port: "80" -// target: "[ipv6-host]" returns host: "ipv6-host", port: "443" -// target: ":80" returns host: "localhost", port: "80" -// target: ":" returns host: "localhost", port: "443" -func parseTarget(target string) (host, port string, err error) { - if target == "" { - return "", "", errMissingAddr - } - - if ip := net.ParseIP(target); ip != nil { - // target is an IPv4 or IPv6(without brackets) address - return target, defaultPort, nil - } - if host, port, err := net.SplitHostPort(target); err == nil { - // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port - if host == "" { - // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. - host = "localhost" - } - if port == "" { - // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used. - port = defaultPort - } - return host, port, nil - } - if host, port, err := net.SplitHostPort(target + ":" + defaultPort); err == nil { - // target doesn't have port - return host, port, nil - } - return "", "", fmt.Errorf("invalid target address %v", target) -} - -// Resolve creates a watcher that watches the name resolution of the target. -func (r *dnsResolver) Resolve(target string) (Watcher, error) { - host, port, err := parseTarget(target) - if err != nil { - return nil, err - } - - if net.ParseIP(host) != nil { - ipWatcher := &ipWatcher{ - updateChan: make(chan *Update, 1), - } - host, _ = formatIP(host) - ipWatcher.updateChan <- &Update{Op: Add, Addr: host + ":" + port} - return ipWatcher, nil - } - - ctx, cancel := context.WithCancel(context.Background()) - return &dnsWatcher{ - r: r, - host: host, - port: port, - ctx: ctx, - cancel: cancel, - t: time.NewTimer(0), - }, nil -} - -// dnsWatcher watches for the name resolution update for a specific target -type dnsWatcher struct { - r *dnsResolver - host string - port string - // The latest resolved address set - curAddrs map[string]*Update - ctx context.Context - cancel context.CancelFunc - t *time.Timer -} - -// ipWatcher watches for the name resolution update for an IP address. -type ipWatcher struct { - updateChan chan *Update -} - -// Next returns the address resolution Update for the target. For IP address, -// the resolution is itself, thus polling name server is unnecessary. Therefore, -// Next() will return an Update the first time it is called, and will be blocked -// for all following calls as no Update exists until watcher is closed. -func (i *ipWatcher) Next() ([]*Update, error) { - u, ok := <-i.updateChan - if !ok { - return nil, errWatcherClose - } - return []*Update{u}, nil -} - -// Close closes the ipWatcher. -func (i *ipWatcher) Close() { - close(i.updateChan) -} - -// AddressType indicates the address type returned by name resolution. -type AddressType uint8 - -const ( - // Backend indicates the server is a backend server. - Backend AddressType = iota - // GRPCLB indicates the server is a grpclb load balancer. - GRPCLB -) - -// AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The -// name resolver used by the grpclb balancer is required to provide this type of metadata in -// its address updates. -type AddrMetadataGRPCLB struct { - // AddrType is the type of server (grpc load balancer or backend). - AddrType AddressType - // ServerName is the name of the grpc load balancer. Used for authentication. - ServerName string -} - -// compileUpdate compares the old resolved addresses and newly resolved addresses, -// and generates an update list -func (w *dnsWatcher) compileUpdate(newAddrs map[string]*Update) []*Update { - var res []*Update - for a, u := range w.curAddrs { - if _, ok := newAddrs[a]; !ok { - u.Op = Delete - res = append(res, u) - } - } - for a, u := range newAddrs { - if _, ok := w.curAddrs[a]; !ok { - res = append(res, u) - } - } - return res -} - -func (w *dnsWatcher) lookupSRV() map[string]*Update { - newAddrs := make(map[string]*Update) - _, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host) - if err != nil { - grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) - return nil - } - for _, s := range srvs { - lbAddrs, err := lookupHost(w.ctx, s.Target) - if err != nil { - grpclog.Warningf("grpc: failed load balancer address dns lookup due to %v.\n", err) - continue - } - for _, a := range lbAddrs { - a, ok := formatIP(a) - if !ok { - grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) - continue - } - addr := a + ":" + strconv.Itoa(int(s.Port)) - newAddrs[addr] = &Update{Addr: addr, - Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}} - } - } - return newAddrs -} - -func (w *dnsWatcher) lookupHost() map[string]*Update { - newAddrs := make(map[string]*Update) - addrs, err := lookupHost(w.ctx, w.host) - if err != nil { - grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) - return nil - } - for _, a := range addrs { - a, ok := formatIP(a) - if !ok { - grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) - continue - } - addr := a + ":" + w.port - newAddrs[addr] = &Update{Addr: addr} - } - return newAddrs -} - -func (w *dnsWatcher) lookup() []*Update { - newAddrs := w.lookupSRV() - if newAddrs == nil { - // If failed to get any balancer address (either no corresponding SRV for the - // target, or caused by failure during resolution/parsing of the balancer target), - // return any A record info available. - newAddrs = w.lookupHost() - } - result := w.compileUpdate(newAddrs) - w.curAddrs = newAddrs - return result -} - -// Next returns the resolved address update(delta) for the target. If there's no -// change, it will sleep for 30 mins and try to resolve again after that. -func (w *dnsWatcher) Next() ([]*Update, error) { - for { - select { - case <-w.ctx.Done(): - return nil, errWatcherClose - case <-w.t.C: - } - result := w.lookup() - // Next lookup should happen after an interval defined by w.r.freq. - w.t.Reset(w.r.freq) - if len(result) > 0 { - return result, nil - } - } -} - -func (w *dnsWatcher) Close() { - w.cancel() -} diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go deleted file mode 100644 index f4c1c8b68..000000000 --- a/vendor/google.golang.org/grpc/naming/naming.go +++ /dev/null @@ -1,68 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package naming defines the naming API and related data structures for gRPC. -// -// This package is deprecated: please use package resolver instead. -package naming - -// Operation defines the corresponding operations for a name resolution change. -// -// Deprecated: please use package resolver. -type Operation uint8 - -const ( - // Add indicates a new address is added. - Add Operation = iota - // Delete indicates an existing address is deleted. - Delete -) - -// Update defines a name resolution update. Notice that it is not valid having both -// empty string Addr and nil Metadata in an Update. -// -// Deprecated: please use package resolver. -type Update struct { - // Op indicates the operation of the update. - Op Operation - // Addr is the updated address. It is empty string if there is no address update. - Addr string - // Metadata is the updated metadata. It is nil if there is no metadata update. - // Metadata is not required for a custom naming implementation. - Metadata interface{} -} - -// Resolver creates a Watcher for a target to track its resolution changes. -// -// Deprecated: please use package resolver. -type Resolver interface { - // Resolve creates a Watcher for target. - Resolve(target string) (Watcher, error) -} - -// Watcher watches for the updates on the specified target. -// -// Deprecated: please use package resolver. -type Watcher interface { - // Next blocks until an update or error happens. It may return one or more - // updates. The first call should get the full set of the results. It should - // return an error if and only if Watcher cannot recover. - Next() ([]*Update, error) - // Close closes the Watcher. - Close() -} diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go index e01d219ff..499a49c8c 100644 --- a/vendor/google.golang.org/grpc/peer/peer.go +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -22,7 +22,9 @@ package peer import ( "context" + "fmt" "net" + "strings" "google.golang.org/grpc/credentials" ) @@ -32,11 +34,41 @@ import ( type Peer struct { // Addr is the peer address. Addr net.Addr + // LocalAddr is the local address. + LocalAddr net.Addr // AuthInfo is the authentication information of the transport. // It is nil if there is no transport security being used. AuthInfo credentials.AuthInfo } +// String ensures the Peer types implements the Stringer interface in order to +// allow to print a context with a peerKey value effectively. +func (p *Peer) String() string { + if p == nil { + return "Peer" + } + sb := &strings.Builder{} + sb.WriteString("Peer{") + if p.Addr != nil { + fmt.Fprintf(sb, "Addr: '%s', ", p.Addr.String()) + } else { + fmt.Fprintf(sb, "Addr: , ") + } + if p.LocalAddr != nil { + fmt.Fprintf(sb, "LocalAddr: '%s', ", p.LocalAddr.String()) + } else { + fmt.Fprintf(sb, "LocalAddr: , ") + } + if p.AuthInfo != nil { + fmt.Fprintf(sb, "AuthInfo: '%s'", p.AuthInfo.AuthType()) + } else { + fmt.Fprintf(sb, "AuthInfo: ") + } + sb.WriteString("}") + + return sb.String() +} + type peerKey struct{} // NewContext creates a new context with peer information attached. diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 45baa2ae1..bdaa2130e 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -20,68 +20,68 @@ package grpc import ( "context" + "fmt" "io" - "sync" + "sync/atomic" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/channelz" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) -// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick -// actions and unblock when there's a picker update. -type pickerWrapper struct { - mu sync.Mutex - done bool +// pickerGeneration stores a picker and a channel used to signal that a picker +// newer than this one is available. +type pickerGeneration struct { + // picker is the picker produced by the LB policy. May be nil if a picker + // has never been produced. + picker balancer.Picker + // blockingCh is closed when the picker has been invalidated because there + // is a new one available. blockingCh chan struct{} - picker balancer.Picker - - // The latest connection happened. - connErrMu sync.Mutex - connErr error } -func newPickerWrapper() *pickerWrapper { - bp := &pickerWrapper{blockingCh: make(chan struct{})} - return bp -} - -func (bp *pickerWrapper) updateConnectionError(err error) { - bp.connErrMu.Lock() - bp.connErr = err - bp.connErrMu.Unlock() +// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick +// actions and unblock when there's a picker update. +type pickerWrapper struct { + // If pickerGen holds a nil pointer, the pickerWrapper is closed. + pickerGen atomic.Pointer[pickerGeneration] + statsHandlers []stats.Handler // to record blocking picker calls } -func (bp *pickerWrapper) connectionError() error { - bp.connErrMu.Lock() - err := bp.connErr - bp.connErrMu.Unlock() - return err +func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { + pw := &pickerWrapper{ + statsHandlers: statsHandlers, + } + pw.pickerGen.Store(&pickerGeneration{ + blockingCh: make(chan struct{}), + }) + return pw } -// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. -func (bp *pickerWrapper) updatePicker(p balancer.Picker) { - bp.mu.Lock() - if bp.done { - bp.mu.Unlock() - return - } - bp.picker = p - // bp.blockingCh should never be nil. - close(bp.blockingCh) - bp.blockingCh = make(chan struct{}) - bp.mu.Unlock() +// updatePicker is called by UpdateState calls from the LB policy. It +// unblocks all blocked pick. +func (pw *pickerWrapper) updatePicker(p balancer.Picker) { + old := pw.pickerGen.Swap(&pickerGeneration{ + picker: p, + blockingCh: make(chan struct{}), + }) + close(old.blockingCh) } -func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) { - acw.mu.Lock() - ac := acw.ac - acw.mu.Unlock() +// doneChannelzWrapper performs the following: +// - increments the calls started channelz counter +// - wraps the done function in the passed in result to increment the calls +// failed or calls succeeded channelz counter before invoking the actual +// done function. +func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { + ac := acbw.ac ac.incrCallsStarted() - return func(b balancer.DoneInfo) { + done := result.Done + result.Done = func(b balancer.DoneInfo) { if b.Err != nil && b.Err != io.EOF { ac.incrCallsFailed() } else { @@ -100,85 +100,99 @@ func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) f // - the current picker returns other errors and failfast is false. // - the subConn returned by the current picker is not READY // When one of these situations happens, pick blocks until the picker gets updated. -func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) { +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) { var ch chan struct{} + var lastPickErr error + for { - bp.mu.Lock() - if bp.done { - bp.mu.Unlock() - return nil, nil, ErrClientConnClosing + pg := pw.pickerGen.Load() + if pg == nil { + return nil, balancer.PickResult{}, ErrClientConnClosing } - - if bp.picker == nil { - ch = bp.blockingCh + if pg.picker == nil { + ch = pg.blockingCh } - if ch == bp.blockingCh { + if ch == pg.blockingCh { // This could happen when either: - // - bp.picker is nil (the previous if condition), or - // - has called pick on the current picker. - bp.mu.Unlock() + // - pw.picker is nil (the previous if condition), or + // - we have already called pick on the current picker. select { case <-ctx.Done(): - if connectionErr := bp.connectionError(); connectionErr != nil { - switch ctx.Err() { - case context.DeadlineExceeded: - return nil, nil, status.Errorf(codes.DeadlineExceeded, "latest connection error: %v", connectionErr) - case context.Canceled: - return nil, nil, status.Errorf(codes.Canceled, "latest connection error: %v", connectionErr) - } + var errStr string + if lastPickErr != nil { + errStr = "latest balancer error: " + lastPickErr.Error() + } else { + errStr = fmt.Sprintf("received context error while waiting for new LB policy update: %s", ctx.Err().Error()) + } + switch ctx.Err() { + case context.DeadlineExceeded: + return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr) + case context.Canceled: + return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr) } - return nil, nil, ctx.Err() case <-ch: } continue } - ch = bp.blockingCh - p := bp.picker - bp.mu.Unlock() + // If the channel is set, it means that the pick call had to wait for a + // new picker at some point. Either it's the first iteration and this + // function received the first picker, or a picker errored with + // ErrNoSubConnAvailable or errored with failfast set to false, which + // will trigger a continue to the next iteration. In the first case this + // conditional will hit if this call had to block (the channel is set). + // In the second case, the only way it will get to this conditional is + // if there is a new picker. + if ch != nil { + for _, sh := range pw.statsHandlers { + sh.HandleRPC(ctx, &stats.PickerUpdated{}) + } + } - subConn, done, err := p.Pick(ctx, opts) + ch = pg.blockingCh + p := pg.picker + pickResult, err := p.Pick(info) if err != nil { - switch err { - case balancer.ErrNoSubConnAvailable: + if err == balancer.ErrNoSubConnAvailable { continue - case balancer.ErrTransientFailure: - if !failfast { - continue - } - return nil, nil, status.Errorf(codes.Unavailable, "%v, latest connection error: %v", err, bp.connectionError()) - case context.DeadlineExceeded: - return nil, nil, status.Error(codes.DeadlineExceeded, err.Error()) - case context.Canceled: - return nil, nil, status.Error(codes.Canceled, err.Error()) - default: - if _, ok := status.FromError(err); ok { - return nil, nil, err + } + if st, ok := status.FromError(err); ok { + // Status error: end the RPC unconditionally with this status. + // First restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) } - // err is some other error. - return nil, nil, status.Error(codes.Unknown, err.Error()) + return nil, balancer.PickResult{}, dropError{error: err} + } + // For all other errors, wait for ready RPCs should block and other + // RPCs should fail with unavailable. + if !failfast { + lastPickErr = err + continue } + return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) } - acw, ok := subConn.(*acBalancerWrapper) + acbw, ok := pickResult.SubConn.(*acBalancerWrapper) if !ok { - grpclog.Error("subconn returned from pick is not *acBalancerWrapper") + logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn) continue } - if t, ok := acw.getAddrConn().getReadyTransport(); ok { + if t := acbw.ac.getReadyTransport(); t != nil { if channelz.IsOn() { - return t, doneChannelzWrapper(acw, done), nil + doneChannelzWrapper(acbw, &pickResult) + return t, pickResult, nil } - return t, done, nil + return t, pickResult, nil } - if done != nil { + if pickResult.Done != nil { // Calling done with nil error, no bytes sent and no bytes received. // DoneInfo with default value works. - done(balancer.DoneInfo{}) + pickResult.Done(balancer.DoneInfo{}) } - grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick") + logger.Infof("blockingPicker: the picked transport is not ready, loop back to repick") // If ok == false, ac.state is not READY. // A valid picker always returns READY subConn. This means the state of ac // just changed, and picker will be updated shortly. @@ -186,12 +200,20 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer. } } -func (bp *pickerWrapper) close() { - bp.mu.Lock() - defer bp.mu.Unlock() - if bp.done { - return - } - bp.done = true - close(bp.blockingCh) +func (pw *pickerWrapper) close() { + old := pw.pickerGen.Swap(nil) + close(old.blockingCh) +} + +// reset clears the pickerWrapper and prepares it for being used again when idle +// mode is exited. +func (pw *pickerWrapper) reset() { + old := pw.pickerGen.Swap(&pickerGeneration{blockingCh: make(chan struct{})}) + close(old.blockingCh) +} + +// dropError is a wrapper error that indicates the LB policy wishes to drop the +// RPC and not retry it. +type dropError struct { + error } diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go deleted file mode 100644 index ed05b02ed..000000000 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ /dev/null @@ -1,118 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/resolver" -) - -// PickFirstBalancerName is the name of the pick_first balancer. -const PickFirstBalancerName = "pick_first" - -func newPickfirstBuilder() balancer.Builder { - return &pickfirstBuilder{} -} - -type pickfirstBuilder struct{} - -func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { - return &pickfirstBalancer{cc: cc} -} - -func (*pickfirstBuilder) Name() string { - return PickFirstBalancerName -} - -type pickfirstBalancer struct { - cc balancer.ClientConn - sc balancer.SubConn -} - -func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { - if err != nil { - if grpclog.V(2) { - grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err) - } - return - } - if b.sc == nil { - b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) - if err != nil { - //TODO(yuxuanli): why not change the cc state to Idle? - if grpclog.V(2) { - grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) - } - return - } - b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc}) - b.sc.Connect() - } else { - b.sc.UpdateAddresses(addrs) - b.sc.Connect() - } -} - -func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - if grpclog.V(2) { - grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s) - } - if b.sc != sc { - if grpclog.V(2) { - grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized") - } - return - } - if s == connectivity.Shutdown { - b.sc = nil - return - } - - switch s { - case connectivity.Ready, connectivity.Idle: - b.cc.UpdateBalancerState(s, &picker{sc: sc}) - case connectivity.Connecting: - b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrNoSubConnAvailable}) - case connectivity.TransientFailure: - b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrTransientFailure}) - } -} - -func (b *pickfirstBalancer) Close() { -} - -type picker struct { - err error - sc balancer.SubConn -} - -func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { - if p.err != nil { - return nil, nil, p.err - } - return p.sc, nil, nil -} - -func init() { - balancer.Register(newPickfirstBuilder()) -} diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index 76acbbcc9..e87a17f36 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -20,21 +20,26 @@ package grpc import ( "google.golang.org/grpc/codes" + "google.golang.org/grpc/mem" "google.golang.org/grpc/status" ) // PreparedMsg is responsible for creating a Marshalled and Compressed object. // -// This API is EXPERIMENTAL. +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type PreparedMsg struct { // Struct for preparing msg before sending them - encodedData []byte + encodedData mem.BufferSlice hdr []byte - payload []byte + payload mem.BufferSlice + pf payloadFormat } // Encode marshalls and compresses the message using the codec and compressor for the stream. -func (p *PreparedMsg) Encode(s Stream, msg interface{}) error { +func (p *PreparedMsg) Encode(s Stream, msg any) error { ctx := s.Context() rpcInfo, ok := rpcInfoFromContext(ctx) if !ok { @@ -54,11 +59,27 @@ func (p *PreparedMsg) Encode(s Stream, msg interface{}) error { if err != nil { return err } - p.encodedData = data - compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp) + + materializedData := data.Materialize() + data.Free() + p.encodedData = mem.BufferSlice{mem.NewBuffer(&materializedData, nil)} + + // TODO: it should be possible to grab the bufferPool from the underlying + // stream implementation with a type cast to its actual type (such as + // addrConnStream) and accessing the buffer pool directly. + var compData mem.BufferSlice + compData, p.pf, err = compress(p.encodedData, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp, mem.DefaultBufferPool()) if err != nil { return err } - p.hdr, p.payload = msgHeader(data, compData) + + if p.pf.isCompressed() { + materializedCompData := compData.Materialize() + compData.Free() + compData = mem.BufferSlice{mem.NewBuffer(&materializedCompData, nil)} + } + + p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf) + return nil } diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go index 297492e87..ef3d6ed6c 100644 --- a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -21,437 +21,40 @@ package dns import ( - "context" - "encoding/json" - "errors" - "fmt" - "net" - "os" - "strconv" - "strings" - "sync" "time" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/resolver/dns" "google.golang.org/grpc/resolver" ) -func init() { - resolver.Register(NewBuilder()) -} - -const ( - defaultPort = "443" - defaultFreq = time.Minute * 30 - defaultDNSSvrPort = "53" - golang = "GO" - // txtPrefix is the prefix string to be prepended to the host name for txt record lookup. - txtPrefix = "_grpc_config." - // In DNS, service config is encoded in a TXT record via the mechanism - // described in RFC-1464 using the attribute name grpc_config. - txtAttribute = "grpc_config=" -) - -var ( - errMissingAddr = errors.New("dns resolver: missing address") - - // Addresses ending with a colon that is supposed to be the separator - // between host and port is not allowed. E.g. "::" is a valid address as - // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with - // a colon as the host and port separator - errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") -) - -var ( - defaultResolver netResolver = net.DefaultResolver - // To prevent excessive re-resolution, we enforce a rate limit on DNS - // resolution requests. - minDNSResRate = 30 * time.Second -) - -var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { - return func(ctx context.Context, network, address string) (net.Conn, error) { - var dialer net.Dialer - return dialer.DialContext(ctx, network, authority) - } -} - -var customAuthorityResolver = func(authority string) (netResolver, error) { - host, port, err := parseTarget(authority, defaultDNSSvrPort) - if err != nil { - return nil, err - } - - authorityWithPort := net.JoinHostPort(host, port) - - return &net.Resolver{ - PreferGo: true, - Dial: customAuthorityDialler(authorityWithPort), - }, nil +// SetResolvingTimeout sets the maximum duration for DNS resolution requests. +// +// This function affects the global timeout used by all channels using the DNS +// name resolver scheme. +// +// It must be called only at application startup, before any gRPC calls are +// made. Modifying this value after initialization is not thread-safe. +// +// The default value is 30 seconds. Setting the timeout too low may result in +// premature timeouts during resolution, while setting it too high may lead to +// unnecessary delays in service discovery. Choose a value appropriate for your +// specific needs and network environment. +func SetResolvingTimeout(timeout time.Duration) { + dns.ResolvingTimeout = timeout } // NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +// +// Deprecated: import grpc and use resolver.Get("dns") instead. func NewBuilder() resolver.Builder { - return &dnsBuilder{minFreq: defaultFreq} -} - -type dnsBuilder struct { - // minimum frequency of polling the DNS server. - minFreq time.Duration -} - -// Build creates and starts a DNS resolver that watches the name resolution of the target. -func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { - host, port, err := parseTarget(target.Endpoint, defaultPort) - if err != nil { - return nil, err - } - - // IP address. - if net.ParseIP(host) != nil { - host, _ = formatIP(host) - addr := []resolver.Address{{Addr: host + ":" + port}} - i := &ipResolver{ - cc: cc, - ip: addr, - rn: make(chan struct{}, 1), - q: make(chan struct{}), - } - cc.NewAddress(addr) - go i.watcher() - return i, nil - } - - // DNS address (non-IP). - ctx, cancel := context.WithCancel(context.Background()) - d := &dnsResolver{ - freq: b.minFreq, - backoff: backoff.Exponential{MaxDelay: b.minFreq}, - host: host, - port: port, - ctx: ctx, - cancel: cancel, - cc: cc, - t: time.NewTimer(0), - rn: make(chan struct{}, 1), - disableServiceConfig: opts.DisableServiceConfig, - } - - if target.Authority == "" { - d.resolver = defaultResolver - } else { - d.resolver, err = customAuthorityResolver(target.Authority) - if err != nil { - return nil, err - } - } - - d.wg.Add(1) - go d.watcher() - return d, nil -} - -// Scheme returns the naming scheme of this resolver builder, which is "dns". -func (b *dnsBuilder) Scheme() string { - return "dns" -} - -type netResolver interface { - LookupHost(ctx context.Context, host string) (addrs []string, err error) - LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) - LookupTXT(ctx context.Context, name string) (txts []string, err error) -} - -// ipResolver watches for the name resolution update for an IP address. -type ipResolver struct { - cc resolver.ClientConn - ip []resolver.Address - // rn channel is used by ResolveNow() to force an immediate resolution of the target. - rn chan struct{} - q chan struct{} -} - -// ResolveNow resend the address it stores, no resolution is needed. -func (i *ipResolver) ResolveNow(opt resolver.ResolveNowOption) { - select { - case i.rn <- struct{}{}: - default: - } -} - -// Close closes the ipResolver. -func (i *ipResolver) Close() { - close(i.q) -} - -func (i *ipResolver) watcher() { - for { - select { - case <-i.rn: - i.cc.NewAddress(i.ip) - case <-i.q: - return - } - } -} - -// dnsResolver watches for the name resolution update for a non-IP target. -type dnsResolver struct { - freq time.Duration - backoff backoff.Exponential - retryCount int - host string - port string - resolver netResolver - ctx context.Context - cancel context.CancelFunc - cc resolver.ClientConn - // rn channel is used by ResolveNow() to force an immediate resolution of the target. - rn chan struct{} - t *time.Timer - // wg is used to enforce Close() to return after the watcher() goroutine has finished. - // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we - // replace the real lookup functions with mocked ones to facilitate testing. - // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes - // will warns lookup (READ the lookup function pointers) inside watcher() goroutine - // has data race with replaceNetFunc (WRITE the lookup function pointers). - wg sync.WaitGroup - disableServiceConfig bool -} - -// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. -func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) { - select { - case d.rn <- struct{}{}: - default: - } -} - -// Close closes the dnsResolver. -func (d *dnsResolver) Close() { - d.cancel() - d.wg.Wait() - d.t.Stop() -} - -func (d *dnsResolver) watcher() { - defer d.wg.Done() - for { - select { - case <-d.ctx.Done(): - return - case <-d.t.C: - case <-d.rn: - if !d.t.Stop() { - // Before resetting a timer, it should be stopped to prevent racing with - // reads on it's channel. - <-d.t.C - } - } - - result, sc := d.lookup() - // Next lookup should happen within an interval defined by d.freq. It may be - // more often due to exponential retry on empty address list. - if len(result) == 0 { - d.retryCount++ - d.t.Reset(d.backoff.Backoff(d.retryCount)) - } else { - d.retryCount = 0 - d.t.Reset(d.freq) - } - d.cc.NewServiceConfig(sc) - d.cc.NewAddress(result) - - // Sleep to prevent excessive re-resolutions. Incoming resolution requests - // will be queued in d.rn. - t := time.NewTimer(minDNSResRate) - select { - case <-t.C: - case <-d.ctx.Done(): - t.Stop() - return - } - } -} - -func (d *dnsResolver) lookupSRV() []resolver.Address { - var newAddrs []resolver.Address - _, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host) - if err != nil { - grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) - return nil - } - for _, s := range srvs { - lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target) - if err != nil { - grpclog.Infof("grpc: failed load balancer address dns lookup due to %v.\n", err) - continue - } - for _, a := range lbAddrs { - a, ok := formatIP(a) - if !ok { - grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) - continue - } - addr := a + ":" + strconv.Itoa(int(s.Port)) - newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target}) - } - } - return newAddrs -} - -func (d *dnsResolver) lookupTXT() string { - ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host) - if err != nil { - grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err) - return "" - } - var res string - for _, s := range ss { - res += s - } - - // TXT record must have "grpc_config=" attribute in order to be used as service config. - if !strings.HasPrefix(res, txtAttribute) { - grpclog.Warningf("grpc: TXT record %v missing %v attribute", res, txtAttribute) - return "" - } - return strings.TrimPrefix(res, txtAttribute) -} - -func (d *dnsResolver) lookupHost() []resolver.Address { - var newAddrs []resolver.Address - addrs, err := d.resolver.LookupHost(d.ctx, d.host) - if err != nil { - grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) - return nil - } - for _, a := range addrs { - a, ok := formatIP(a) - if !ok { - grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) - continue - } - addr := a + ":" + d.port - newAddrs = append(newAddrs, resolver.Address{Addr: addr}) - } - return newAddrs -} - -func (d *dnsResolver) lookup() ([]resolver.Address, string) { - newAddrs := d.lookupSRV() - // Support fallback to non-balancer address. - newAddrs = append(newAddrs, d.lookupHost()...) - if d.disableServiceConfig { - return newAddrs, "" - } - sc := d.lookupTXT() - return newAddrs, canaryingSC(sc) -} - -// formatIP returns ok = false if addr is not a valid textual representation of an IP address. -// If addr is an IPv4 address, return the addr and ok = true. -// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. -func formatIP(addr string) (addrIP string, ok bool) { - ip := net.ParseIP(addr) - if ip == nil { - return "", false - } - if ip.To4() != nil { - return addr, true - } - return "[" + addr + "]", true -} - -// parseTarget takes the user input target string and default port, returns formatted host and port info. -// If target doesn't specify a port, set the port to be the defaultPort. -// If target is in IPv6 format and host-name is enclosed in square brackets, brackets -// are stripped when setting the host. -// examples: -// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" -// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" -// target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443" -// target: ":80" defaultPort: "443" returns host: "localhost", port: "80" -func parseTarget(target, defaultPort string) (host, port string, err error) { - if target == "" { - return "", "", errMissingAddr - } - if ip := net.ParseIP(target); ip != nil { - // target is an IPv4 or IPv6(without brackets) address - return target, defaultPort, nil - } - if host, port, err = net.SplitHostPort(target); err == nil { - if port == "" { - // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error. - return "", "", errEndsWithColon - } - // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port - if host == "" { - // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. - host = "localhost" - } - return host, port, nil - } - if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil { - // target doesn't have port - return host, port, nil - } - return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err) -} - -type rawChoice struct { - ClientLanguage *[]string `json:"clientLanguage,omitempty"` - Percentage *int `json:"percentage,omitempty"` - ClientHostName *[]string `json:"clientHostName,omitempty"` - ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"` -} - -func containsString(a *[]string, b string) bool { - if a == nil { - return true - } - for _, c := range *a { - if c == b { - return true - } - } - return false -} - -func chosenByPercentage(a *int) bool { - if a == nil { - return true - } - return grpcrand.Intn(100)+1 <= *a + return dns.NewBuilder() } -func canaryingSC(js string) string { - if js == "" { - return "" - } - var rcs []rawChoice - err := json.Unmarshal([]byte(js), &rcs) - if err != nil { - grpclog.Warningf("grpc: failed to parse service config json string due to %v.\n", err) - return "" - } - cliHostname, err := os.Hostname() - if err != nil { - grpclog.Warningf("grpc: failed to get client hostname due to %v.\n", err) - return "" - } - var sc string - for _, c := range rcs { - if !containsString(c.ClientLanguage, golang) || - !chosenByPercentage(c.Percentage) || - !containsString(c.ClientHostName, cliHostname) || - c.ServiceConfig == nil { - continue - } - sc = string(*c.ServiceConfig) - break - } - return sc +// SetMinResolutionInterval sets the default minimum interval at which DNS +// re-resolutions are allowed. This helps to prevent excessive re-resolution. +// +// It must be called only at application startup, before any gRPC calls are +// made. Modifying this value after initialization is not thread-safe. +func SetMinResolutionInterval(d time.Duration) { + dns.MinResolutionInterval = d } diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go new file mode 100644 index 000000000..09e864a89 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go @@ -0,0 +1,128 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package manual defines a resolver that can be used to manually send resolved +// addresses to ClientConn. +package manual + +import ( + "sync" + + "google.golang.org/grpc/resolver" +) + +// NewBuilderWithScheme creates a new manual resolver builder with the given +// scheme. Every instance of the manual resolver may only ever be used with a +// single grpc.ClientConn. Otherwise, bad things will happen. +func NewBuilderWithScheme(scheme string) *Resolver { + return &Resolver{ + BuildCallback: func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) {}, + UpdateStateCallback: func(error) {}, + ResolveNowCallback: func(resolver.ResolveNowOptions) {}, + CloseCallback: func() {}, + scheme: scheme, + } +} + +// Resolver is also a resolver builder. +// It's build() function always returns itself. +type Resolver struct { + // BuildCallback is called when the Build method is called. Must not be + // nil. Must not be changed after the resolver may be built. + BuildCallback func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) + // UpdateStateCallback is called when the UpdateState method is called on + // the resolver. The value passed as argument to this callback is the value + // returned by the resolver.ClientConn. Must not be nil. Must not be + // changed after the resolver may be built. + UpdateStateCallback func(err error) + // ResolveNowCallback is called when the ResolveNow method is called on the + // resolver. Must not be nil. Must not be changed after the resolver may + // be built. + ResolveNowCallback func(resolver.ResolveNowOptions) + // CloseCallback is called when the Close method is called. Must not be + // nil. Must not be changed after the resolver may be built. + CloseCallback func() + scheme string + + // Fields actually belong to the resolver. + // Guards access to below fields. + mu sync.Mutex + CC resolver.ClientConn + // Storing the most recent state update makes this resolver resilient to + // restarts, which is possible with channel idleness. + lastSeenState *resolver.State +} + +// InitialState adds initial state to the resolver so that UpdateState doesn't +// need to be explicitly called after Dial. +func (r *Resolver) InitialState(s resolver.State) { + r.lastSeenState = &s +} + +// Build returns itself for Resolver, because it's both a builder and a resolver. +func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + r.mu.Lock() + defer r.mu.Unlock() + // Call BuildCallback after locking to avoid a race when UpdateState + // or ReportError is called before Build returns. + r.BuildCallback(target, cc, opts) + r.CC = cc + if r.lastSeenState != nil { + err := r.CC.UpdateState(*r.lastSeenState) + go r.UpdateStateCallback(err) + } + return r, nil +} + +// Scheme returns the manual resolver's scheme. +func (r *Resolver) Scheme() string { + return r.scheme +} + +// ResolveNow is a noop for Resolver. +func (r *Resolver) ResolveNow(o resolver.ResolveNowOptions) { + r.ResolveNowCallback(o) +} + +// Close is a noop for Resolver. +func (r *Resolver) Close() { + r.CloseCallback() +} + +// UpdateState calls CC.UpdateState. +func (r *Resolver) UpdateState(s resolver.State) { + r.mu.Lock() + defer r.mu.Unlock() + var err error + if r.CC == nil { + panic("cannot update state as grpc.Dial with resolver has not been called") + } + err = r.CC.UpdateState(s) + r.lastSeenState = &s + r.UpdateStateCallback(err) +} + +// ReportError calls CC.ReportError. +func (r *Resolver) ReportError(err error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.CC == nil { + panic("cannot report error as grpc.Dial with resolver has not been called") + } + r.CC.ReportError(err) +} diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go new file mode 100644 index 000000000..ada5b9bb7 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -0,0 +1,251 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package resolver + +type addressMapEntry struct { + addr Address + value any +} + +// AddressMap is a map of addresses to arbitrary values taking into account +// Attributes. BalancerAttributes are ignored, as are Metadata and Type. +// Multiple accesses may not be performed concurrently. Must be created via +// NewAddressMap; do not construct directly. +type AddressMap struct { + // The underlying map is keyed by an Address with fields that we don't care + // about being set to their zero values. The only fields that we care about + // are `Addr`, `ServerName` and `Attributes`. Since we need to be able to + // distinguish between addresses with same `Addr` and `ServerName`, but + // different `Attributes`, we cannot store the `Attributes` in the map key. + // + // The comparison operation for structs work as follows: + // Struct values are comparable if all their fields are comparable. Two + // struct values are equal if their corresponding non-blank fields are equal. + // + // The value type of the map contains a slice of addresses which match the key + // in their `Addr` and `ServerName` fields and contain the corresponding value + // associated with them. + m map[Address]addressMapEntryList +} + +func toMapKey(addr *Address) Address { + return Address{Addr: addr.Addr, ServerName: addr.ServerName} +} + +type addressMapEntryList []*addressMapEntry + +// NewAddressMap creates a new AddressMap. +func NewAddressMap() *AddressMap { + return &AddressMap{m: make(map[Address]addressMapEntryList)} +} + +// find returns the index of addr in the addressMapEntry slice, or -1 if not +// present. +func (l addressMapEntryList) find(addr Address) int { + for i, entry := range l { + // Attributes are the only thing to match on here, since `Addr` and + // `ServerName` are already equal. + if entry.addr.Attributes.Equal(addr.Attributes) { + return i + } + } + return -1 +} + +// Get returns the value for the address in the map, if present. +func (a *AddressMap) Get(addr Address) (value any, ok bool) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] + if entry := entryList.find(addr); entry != -1 { + return entryList[entry].value, true + } + return nil, false +} + +// Set updates or adds the value to the address in the map. +func (a *AddressMap) Set(addr Address, value any) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] + if entry := entryList.find(addr); entry != -1 { + entryList[entry].value = value + return + } + a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value}) +} + +// Delete removes addr from the map. +func (a *AddressMap) Delete(addr Address) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] + entry := entryList.find(addr) + if entry == -1 { + return + } + if len(entryList) == 1 { + entryList = nil + } else { + copy(entryList[entry:], entryList[entry+1:]) + entryList = entryList[:len(entryList)-1] + } + a.m[addrKey] = entryList +} + +// Len returns the number of entries in the map. +func (a *AddressMap) Len() int { + ret := 0 + for _, entryList := range a.m { + ret += len(entryList) + } + return ret +} + +// Keys returns a slice of all current map keys. +func (a *AddressMap) Keys() []Address { + ret := make([]Address, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.addr) + } + } + return ret +} + +// Values returns a slice of all current map values. +func (a *AddressMap) Values() []any { + ret := make([]any, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.value) + } + } + return ret +} + +type endpointNode struct { + addrs map[string]struct{} +} + +// Equal returns whether the unordered set of addrs are the same between the +// endpoint nodes. +func (en *endpointNode) Equal(en2 *endpointNode) bool { + if len(en.addrs) != len(en2.addrs) { + return false + } + for addr := range en.addrs { + if _, ok := en2.addrs[addr]; !ok { + return false + } + } + return true +} + +func toEndpointNode(endpoint Endpoint) endpointNode { + en := make(map[string]struct{}) + for _, addr := range endpoint.Addresses { + en[addr.Addr] = struct{}{} + } + return endpointNode{ + addrs: en, + } +} + +// EndpointMap is a map of endpoints to arbitrary values keyed on only the +// unordered set of address strings within an endpoint. This map is not thread +// safe, thus it is unsafe to access concurrently. Must be created via +// NewEndpointMap; do not construct directly. +type EndpointMap struct { + endpoints map[*endpointNode]any +} + +// NewEndpointMap creates a new EndpointMap. +func NewEndpointMap() *EndpointMap { + return &EndpointMap{ + endpoints: make(map[*endpointNode]any), + } +} + +// Get returns the value for the address in the map, if present. +func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) { + en := toEndpointNode(e) + if endpoint := em.find(en); endpoint != nil { + return em.endpoints[endpoint], true + } + return nil, false +} + +// Set updates or adds the value to the address in the map. +func (em *EndpointMap) Set(e Endpoint, value any) { + en := toEndpointNode(e) + if endpoint := em.find(en); endpoint != nil { + em.endpoints[endpoint] = value + return + } + em.endpoints[&en] = value +} + +// Len returns the number of entries in the map. +func (em *EndpointMap) Len() int { + return len(em.endpoints) +} + +// Keys returns a slice of all current map keys, as endpoints specifying the +// addresses present in the endpoint keys, in which uniqueness is determined by +// the unordered set of addresses. Thus, endpoint information returned is not +// the full endpoint data (drops duplicated addresses and attributes) but can be +// used for EndpointMap accesses. +func (em *EndpointMap) Keys() []Endpoint { + ret := make([]Endpoint, 0, len(em.endpoints)) + for en := range em.endpoints { + var endpoint Endpoint + for addr := range en.addrs { + endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr}) + } + ret = append(ret, endpoint) + } + return ret +} + +// Values returns a slice of all current map values. +func (em *EndpointMap) Values() []any { + ret := make([]any, 0, len(em.endpoints)) + for _, val := range em.endpoints { + ret = append(ret, val) + } + return ret +} + +// find returns a pointer to the endpoint node in em if the endpoint node is +// already present. If not found, nil is returned. The comparisons are done on +// the unordered set of addresses within an endpoint. +func (em EndpointMap) find(e endpointNode) *endpointNode { + for endpoint := range em.endpoints { + if e.Equal(endpoint) { + return endpoint + } + } + return nil +} + +// Delete removes the specified endpoint from the map. +func (em *EndpointMap) Delete(e Endpoint) { + en := toEndpointNode(e) + if entry := em.find(en); entry != nil { + delete(em.endpoints, entry) + } +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index e83da346a..202854511 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -21,6 +21,15 @@ package resolver import ( + "context" + "fmt" + "net" + "net/url" + "strings" + + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" "google.golang.org/grpc/serviceconfig" ) @@ -33,8 +42,9 @@ var ( // TODO(bar) install dns resolver in init(){}. -// Register registers the resolver builder to the resolver map. b.Scheme will be -// used as the scheme registered with this builder. +// Register registers the resolver builder to the resolver map. b.Scheme will +// be used as the scheme registered with this builder. The registry is case +// sensitive, and schemes should not contain any uppercase characters. // // NOTE: this function must only be called during initialization time (i.e. in // an init() function), and is not thread-safe. If multiple Resolvers are @@ -54,62 +64,157 @@ func Get(scheme string) Builder { } // SetDefaultScheme sets the default scheme that will be used. The default -// default scheme is "passthrough". +// scheme is initially set to "passthrough". // // NOTE: this function must only be called during initialization time (i.e. in // an init() function), and is not thread-safe. The scheme set last overrides // previously set values. func SetDefaultScheme(scheme string) { defaultScheme = scheme + internal.UserSetDefaultScheme = true } -// GetDefaultScheme gets the default scheme that will be used. +// GetDefaultScheme gets the default scheme that will be used by grpc.Dial. If +// SetDefaultScheme is never called, the default scheme used by grpc.NewClient is "dns" instead. func GetDefaultScheme() string { return defaultScheme } -// AddressType indicates the address type returned by name resolution. -type AddressType uint8 - -const ( - // Backend indicates the address is for a backend server. - Backend AddressType = iota - // GRPCLB indicates the address is for a grpclb load balancer. - GRPCLB -) - // Address represents a server the client connects to. -// This is the EXPERIMENTAL API and may be changed or extended in the future. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type Address struct { // Addr is the server address on which a connection will be established. Addr string - // Type is the type of this address. - Type AddressType + // ServerName is the name of this address. + // If non-empty, the ServerName is used as the transport certification authority for + // the address, instead of the hostname from the Dial target string. In most cases, + // this should not be set. // - // e.g. if Type is GRPCLB, ServerName should be the name of the remote load - // balancer, not the name of the backend. + // WARNING: ServerName must only be populated with trusted values. It + // is insecure to populate it with data from untrusted inputs since untrusted + // values could be used to bypass the authority checks performed by TLS. ServerName string + + // Attributes contains arbitrary data about this address intended for + // consumption by the SubConn. + Attributes *attributes.Attributes + + // BalancerAttributes contains arbitrary data about this address intended + // for consumption by the LB policy. These attributes do not affect SubConn + // creation, connection establishment, handshaking, etc. + // + // Deprecated: when an Address is inside an Endpoint, this field should not + // be used, and it will eventually be removed entirely. + BalancerAttributes *attributes.Attributes + // Metadata is the information associated with Addr, which may be used // to make load balancing decision. - Metadata interface{} + // + // Deprecated: use Attributes instead. + Metadata any +} + +// Equal returns whether a and o are identical. Metadata is compared directly, +// not with any recursive introspection. +// +// This method compares all fields of the address. When used to tell apart +// addresses during subchannel creation or connection establishment, it might be +// more appropriate for the caller to implement custom equality logic. +func (a Address) Equal(o Address) bool { + return a.Addr == o.Addr && a.ServerName == o.ServerName && + a.Attributes.Equal(o.Attributes) && + a.BalancerAttributes.Equal(o.BalancerAttributes) && + a.Metadata == o.Metadata } -// BuildOption includes additional information for the builder to create +// String returns JSON formatted string representation of the address. +func (a Address) String() string { + var sb strings.Builder + sb.WriteString(fmt.Sprintf("{Addr: %q, ", a.Addr)) + sb.WriteString(fmt.Sprintf("ServerName: %q, ", a.ServerName)) + if a.Attributes != nil { + sb.WriteString(fmt.Sprintf("Attributes: %v, ", a.Attributes.String())) + } + if a.BalancerAttributes != nil { + sb.WriteString(fmt.Sprintf("BalancerAttributes: %v", a.BalancerAttributes.String())) + } + sb.WriteString("}") + return sb.String() +} + +// BuildOptions includes additional information for the builder to create // the resolver. -type BuildOption struct { - // DisableServiceConfig indicates whether resolver should fetch service config data. +type BuildOptions struct { + // DisableServiceConfig indicates whether a resolver implementation should + // fetch service config data. DisableServiceConfig bool + // DialCreds is the transport credentials used by the ClientConn for + // communicating with the target gRPC service (set via + // WithTransportCredentials). In cases where a name resolution service + // requires the same credentials, the resolver may use this field. In most + // cases though, it is not appropriate, and this field may be ignored. + DialCreds credentials.TransportCredentials + // CredsBundle is the credentials bundle used by the ClientConn for + // communicating with the target gRPC service (set via + // WithCredentialsBundle). In cases where a name resolution service + // requires the same credentials, the resolver may use this field. In most + // cases though, it is not appropriate, and this field may be ignored. + CredsBundle credentials.Bundle + // Dialer is the custom dialer used by the ClientConn for dialling the + // target gRPC service (set via WithDialer). In cases where a name + // resolution service requires the same dialer, the resolver may use this + // field. In most cases though, it is not appropriate, and this field may + // be ignored. + Dialer func(context.Context, string) (net.Conn, error) + // Authority is the effective authority of the clientconn for which the + // resolver is built. + Authority string +} + +// An Endpoint is one network endpoint, or server, which may have multiple +// addresses with which it can be accessed. +type Endpoint struct { + // Addresses contains a list of addresses used to access this endpoint. + Addresses []Address + + // Attributes contains arbitrary data about this endpoint intended for + // consumption by the LB policy. + Attributes *attributes.Attributes } // State contains the current Resolver state relevant to the ClientConn. type State struct { - Addresses []Address // Resolved addresses for the target - // ServiceConfig is the parsed service config; obtained from - // serviceconfig.Parse. - ServiceConfig serviceconfig.Config + // Addresses is the latest set of resolved addresses for the target. + // + // If a resolver sets Addresses but does not set Endpoints, one Endpoint + // will be created for each Address before the State is passed to the LB + // policy. The BalancerAttributes of each entry in Addresses will be set + // in Endpoints.Attributes, and be cleared in the Endpoint's Address's + // BalancerAttributes. + // + // Soon, Addresses will be deprecated and replaced fully by Endpoints. + Addresses []Address - // TODO: add Err error + // Endpoints is the latest set of resolved endpoints for the target. + // + // If a resolver produces a State containing Endpoints but not Addresses, + // it must take care to ensure the LB policies it selects will support + // Endpoints. + Endpoints []Endpoint + + // ServiceConfig contains the result from parsing the latest service + // config. If it is nil, it indicates no service config is present or the + // resolver does not provide service configs. + ServiceConfig *serviceconfig.ParseResult + + // Attributes contains arbitrary data about the resolver intended for + // consumption by the load balancing policy. + Attributes *attributes.Attributes } // ClientConn contains the callbacks for resolver to notify any updates @@ -121,41 +226,70 @@ type State struct { // gRPC to add new methods to this interface. type ClientConn interface { // UpdateState updates the state of the ClientConn appropriately. - UpdateState(State) + // + // If an error is returned, the resolver should try to resolve the + // target again. The resolver should use a backoff timer to prevent + // overloading the server with requests. If a resolver is certain that + // reresolving will not change the result, e.g. because it is + // a watch-based resolver, returned errors can be ignored. + // + // If the resolved State is the same as the last reported one, calling + // UpdateState can be omitted. + UpdateState(State) error + // ReportError notifies the ClientConn that the Resolver encountered an + // error. The ClientConn will notify the load balancer and begin calling + // ResolveNow on the Resolver with exponential backoff. + ReportError(error) // NewAddress is called by resolver to notify ClientConn a new list // of resolved addresses. // The address list should be the complete list of resolved addresses. // // Deprecated: Use UpdateState instead. NewAddress(addresses []Address) - // NewServiceConfig is called by resolver to notify ClientConn a new - // service config. The service config should be provided as a json string. - // - // Deprecated: Use UpdateState instead. - NewServiceConfig(serviceConfig string) + // ParseServiceConfig parses the provided service config and returns an + // object that provides the parsed config. + ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult } // Target represents a target for gRPC, as specified in: // https://github.com/grpc/grpc/blob/master/doc/naming.md. -// It is parsed from the target string that gets passed into Dial or DialContext by the user. And -// grpc passes it to the resolver and the balancer. -// -// If the target follows the naming spec, and the parsed scheme is registered with grpc, we will -// parse the target string according to the spec. e.g. "dns://some_authority/foo.bar" will be parsed -// into &Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// It is parsed from the target string that gets passed into Dial or DialContext +// by the user. And gRPC passes it to the resolver and the balancer. // -// If the target does not contain a scheme, we will apply the default scheme, and set the Target to -// be the full target string. e.g. "foo.bar" will be parsed into -// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}. -// -// If the parsed scheme is not registered (i.e. no corresponding resolver available to resolve the -// endpoint), we set the Scheme to be the default scheme, and set the Endpoint to be the full target -// string. e.g. target string "unknown_scheme://authority/endpoint" will be parsed into -// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}. +// If the target follows the naming spec, and the parsed scheme is registered +// with gRPC, we will parse the target string according to the spec. If the +// target does not contain a scheme or if the parsed scheme is not registered +// (i.e. no corresponding resolver available to resolve the endpoint), we will +// apply the default scheme, and will attempt to reparse it. type Target struct { - Scheme string - Authority string - Endpoint string + // URL contains the parsed dial target with an optional default scheme added + // to it if the original dial target contained no scheme or contained an + // unregistered scheme. Any query params specified in the original dial + // target can be accessed from here. + URL url.URL +} + +// Endpoint retrieves endpoint without leading "/" from either `URL.Path` +// or `URL.Opaque`. The latter is used when the former is empty. +func (t Target) Endpoint() string { + endpoint := t.URL.Path + if endpoint == "" { + endpoint = t.URL.Opaque + } + // For targets of the form "[scheme]://[authority]/endpoint, the endpoint + // value returned from url.Parse() contains a leading "/". Although this is + // in accordance with RFC 3986, we do not want to break existing resolver + // implementations which expect the endpoint without the leading "/". So, we + // end up stripping the leading "/" here. But this will result in an + // incorrect parsing for something like "unix:///path/to/socket". Since we + // own the "unix" resolver, we can workaround in the unix resolver by using + // the `URL` field. + return strings.TrimPrefix(endpoint, "/") +} + +// String returns the canonical string representation of Target. +func (t Target) String() string { + return t.URL.Scheme + "://" + t.URL.Host + "/" + t.Endpoint() } // Builder creates a resolver that will be used to watch name resolution updates. @@ -164,14 +298,16 @@ type Builder interface { // // gRPC dial calls Build synchronously, and fails if the returned error is // not nil. - Build(target Target, cc ClientConn, opts BuildOption) (Resolver, error) - // Scheme returns the scheme supported by this resolver. - // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. + Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error) + // Scheme returns the scheme supported by this resolver. Scheme is defined + // at https://github.com/grpc/grpc/blob/master/doc/naming.md. The returned + // string should not contain uppercase characters, as they will not match + // the parsed target's scheme as defined in RFC 3986. Scheme() string } -// ResolveNowOption includes additional information for ResolveNow. -type ResolveNowOption struct{} +// ResolveNowOptions includes additional information for ResolveNow. +type ResolveNowOptions struct{} // Resolver watches for the updates on the specified target. // Updates include address updates and service config updates. @@ -180,14 +316,17 @@ type Resolver interface { // again. It's just a hint, resolver can ignore this if it's not necessary. // // It could be called multiple times concurrently. - ResolveNow(ResolveNowOption) + ResolveNow(ResolveNowOptions) // Close closes the resolver. Close() } -// UnregisterForTesting removes the resolver builder with the given scheme from the -// resolver map. -// This function is for testing only. -func UnregisterForTesting(scheme string) { - delete(m, scheme) +// AuthorityOverrider is implemented by Builders that wish to override the +// default authority for the ClientConn. +// By default, the authority used is target.Endpoint(). +type AuthorityOverrider interface { + // OverrideAuthority returns the authority to use for a ClientConn with the + // given target. The implementation must generate it without blocking, + // typically in line, and must keep it unchanged. + OverrideAuthority(Target) string } diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go deleted file mode 100644 index 6934905b0..000000000 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ /dev/null @@ -1,168 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "fmt" - "strings" - "sync/atomic" - - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/resolver" -) - -// ccResolverWrapper is a wrapper on top of cc for resolvers. -// It implements resolver.ClientConnection interface. -type ccResolverWrapper struct { - cc *ClientConn - resolver resolver.Resolver - addrCh chan []resolver.Address - scCh chan string - done uint32 // accessed atomically; set to 1 when closed. - curState resolver.State -} - -// split2 returns the values from strings.SplitN(s, sep, 2). -// If sep is not found, it returns ("", "", false) instead. -func split2(s, sep string) (string, string, bool) { - spl := strings.SplitN(s, sep, 2) - if len(spl) < 2 { - return "", "", false - } - return spl[0], spl[1], true -} - -// parseTarget splits target into a struct containing scheme, authority and -// endpoint. -// -// If target is not a valid scheme://authority/endpoint, it returns {Endpoint: -// target}. -func parseTarget(target string) (ret resolver.Target) { - var ok bool - ret.Scheme, ret.Endpoint, ok = split2(target, "://") - if !ok { - return resolver.Target{Endpoint: target} - } - ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") - if !ok { - return resolver.Target{Endpoint: target} - } - return ret -} - -// newCCResolverWrapper parses cc.target for scheme and gets the resolver -// builder for this scheme and builds the resolver. The monitoring goroutine -// for it is not started yet and can be created by calling start(). -// -// If withResolverBuilder dial option is set, the specified resolver will be -// used instead. -func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) { - rb := cc.dopts.resolverBuilder - if rb == nil { - return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme) - } - - ccr := &ccResolverWrapper{ - cc: cc, - addrCh: make(chan []resolver.Address, 1), - scCh: make(chan string, 1), - } - - var err error - ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, resolver.BuildOption{DisableServiceConfig: cc.dopts.disableServiceConfig}) - if err != nil { - return nil, err - } - return ccr, nil -} - -func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) { - ccr.resolver.ResolveNow(o) -} - -func (ccr *ccResolverWrapper) close() { - ccr.resolver.Close() - atomic.StoreUint32(&ccr.done, 1) -} - -func (ccr *ccResolverWrapper) isDone() bool { - return atomic.LoadUint32(&ccr.done) == 1 -} - -func (ccr *ccResolverWrapper) UpdateState(s resolver.State) { - if ccr.isDone() { - return - } - grpclog.Infof("ccResolverWrapper: sending update to cc: %v", s) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(s) - } - ccr.cc.updateResolverState(s) - ccr.curState = s -} - -// NewAddress is called by the resolver implementation to send addresses to gRPC. -func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - if ccr.isDone() { - return - } - grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - } - ccr.curState.Addresses = addrs - ccr.cc.updateResolverState(ccr.curState) -} - -// NewServiceConfig is called by the resolver implementation to send service -// configs to gRPC. -func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { - if ccr.isDone() { - return - } - grpclog.Infof("ccResolverWrapper: got new service config: %v", sc) - c, err := parseServiceConfig(sc) - if err != nil { - return - } - if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: c}) - } - ccr.curState.ServiceConfig = c - ccr.cc.updateResolverState(ccr.curState) -} - -func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { - var updates []string - oldSC, oldOK := ccr.curState.ServiceConfig.(*ServiceConfig) - newSC, newOK := s.ServiceConfig.(*ServiceConfig) - if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { - updates = append(updates, "service config updated") - } - if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { - updates = append(updates, "resolver returned an empty address list") - } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { - updates = append(updates, "resolver returned new addresses") - } - channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), - Severity: channelz.CtINFO, - }) -} diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go new file mode 100644 index 000000000..23bb3fb25 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -0,0 +1,201 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "strings" + "sync" + + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConn interface. +type ccResolverWrapper struct { + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + ignoreServiceConfig bool + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + + resolver resolver.Resolver // only accessed within the serializer + + // The following fields are protected by mu. Caller must take cc.mu before + // taking mu. + mu sync.Mutex + curState resolver.State + closed bool +} + +// newCCResolverWrapper initializes the ccResolverWrapper. It can only be used +// after calling start, which builds the resolver. +func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper { + ctx, cancel := context.WithCancel(cc.ctx) + return &ccResolverWrapper{ + cc: cc, + ignoreServiceConfig: cc.dopts.disableServiceConfig, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } +} + +// start builds the name resolver using the resolver.Builder in cc and returns +// any error encountered. It must always be the first operation performed on +// any newly created ccResolverWrapper, except that close may be called instead. +func (ccr *ccResolverWrapper) start() error { + errCh := make(chan error) + ccr.serializer.TrySchedule(func(ctx context.Context) { + if ctx.Err() != nil { + return + } + opts := resolver.BuildOptions{ + DisableServiceConfig: ccr.cc.dopts.disableServiceConfig, + DialCreds: ccr.cc.dopts.copts.TransportCredentials, + CredsBundle: ccr.cc.dopts.copts.CredsBundle, + Dialer: ccr.cc.dopts.copts.Dialer, + Authority: ccr.cc.authority, + } + var err error + ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts) + errCh <- err + }) + return <-errCh +} + +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { + ccr.serializer.TrySchedule(func(ctx context.Context) { + if ctx.Err() != nil || ccr.resolver == nil { + return + } + ccr.resolver.ResolveNow(o) + }) +} + +// close initiates async shutdown of the wrapper. To determine the wrapper has +// finished shutting down, the channel should block on ccr.serializer.Done() +// without cc.mu held. +func (ccr *ccResolverWrapper) close() { + channelz.Info(logger, ccr.cc.channelz, "Closing the name resolver") + ccr.mu.Lock() + ccr.closed = true + ccr.mu.Unlock() + + ccr.serializer.TrySchedule(func(context.Context) { + if ccr.resolver == nil { + return + } + ccr.resolver.Close() + ccr.resolver = nil + }) + ccr.serializerCancel() +} + +// UpdateState is called by resolver implementations to report new state to gRPC +// which includes addresses and service config. +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return nil + } + if s.Endpoints == nil { + s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) + for _, a := range s.Addresses { + ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} + ep.Addresses[0].BalancerAttributes = nil + s.Endpoints = append(s.Endpoints, ep) + } + } + ccr.addChannelzTraceEvent(s) + ccr.curState = s + ccr.mu.Unlock() + return ccr.cc.updateResolverStateAndUnlock(s, nil) +} + +// ReportError is called by resolver implementations to report errors +// encountered during name resolution to gRPC. +func (ccr *ccResolverWrapper) ReportError(err error) { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return + } + ccr.mu.Unlock() + channelz.Warningf(logger, ccr.cc.channelz, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.cc.updateResolverStateAndUnlock(resolver.State{}, err) +} + +// NewAddress is called by the resolver implementation to send addresses to +// gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return + } + s := resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig} + ccr.addChannelzTraceEvent(s) + ccr.curState = s + ccr.mu.Unlock() + ccr.cc.updateResolverStateAndUnlock(s, nil) +} + +// ParseServiceConfig is called by resolver implementations to parse a JSON +// representation of the service config. +func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { + return parseServiceConfig(scJSON, ccr.cc.dopts.maxCallAttempts) +} + +// addChannelzTraceEvent adds a channelz trace event containing the new +// state received from resolver implementations. +func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + if !logger.V(0) && !channelz.IsOn() { + return + } + var updates []string + var oldSC, newSC *ServiceConfig + var oldOK, newOK bool + if ccr.curState.ServiceConfig != nil { + oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) + } + if s.ServiceConfig != nil { + newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) + } + if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { + updates = append(updates, "service config updated") + } + if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { + updates = append(updates, "resolver returned an empty address list") + } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { + updates = append(updates, "resolver returned new addresses") + } + channelz.Infof(logger, ccr.cc.channelz, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 088c3f1b2..2d96f1405 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -19,15 +19,12 @@ package grpc import ( - "bytes" "compress/gzip" "context" "encoding/binary" "fmt" "io" - "io/ioutil" "math" - "net/url" "strings" "sync" "time" @@ -37,6 +34,7 @@ import ( "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -77,8 +75,8 @@ func NewGZIPCompressorWithLevel(level int) (Compressor, error) { } return &gzipCompressor{ pool: sync.Pool{ - New: func() interface{} { - w, err := gzip.NewWriterLevel(ioutil.Discard, level) + New: func() any { + w, err := gzip.NewWriterLevel(io.Discard, level) if err != nil { panic(err) } @@ -144,7 +142,7 @@ func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { z.Close() d.pool.Put(z) }() - return ioutil.ReadAll(z) + return io.ReadAll(z) } func (d *gzipDecompressor) Type() string { @@ -155,13 +153,13 @@ func (d *gzipDecompressor) Type() string { type callInfo struct { compressorType string failFast bool - stream ClientStream maxReceiveMessageSize *int maxSendMessageSize *int creds credentials.PerRPCCredentials contentSubtype string codec baseCodec maxRetryRPCBufferSize int + onFinish []func(err error) } func defaultCallInfo() *callInfo { @@ -180,7 +178,7 @@ type CallOption interface { // after is called after the call has completed. after cannot return an // error, so any failures should be reported via output parameters. - after(*callInfo) + after(*callInfo, *csAttempt) } // EmptyCallOption does not alter the Call configuration. @@ -188,8 +186,22 @@ type CallOption interface { // by interceptors. type EmptyCallOption struct{} -func (EmptyCallOption) before(*callInfo) error { return nil } -func (EmptyCallOption) after(*callInfo) {} +func (EmptyCallOption) before(*callInfo) error { return nil } +func (EmptyCallOption) after(*callInfo, *csAttempt) {} + +// StaticMethod returns a CallOption which specifies that a call is being made +// to a method that is static, which means the method is known at compile time +// and doesn't change at runtime. This can be used as a signal to stats plugins +// that this method is safe to include as a key to a measurement. +func StaticMethod() CallOption { + return StaticMethodCallOption{} +} + +// StaticMethodCallOption is a CallOption that specifies that a call comes +// from a static method. +type StaticMethodCallOption struct { + EmptyCallOption +} // Header returns a CallOptions that retrieves the header metadata // for a unary RPC. @@ -199,16 +211,18 @@ func Header(md *metadata.MD) CallOption { // HeaderCallOption is a CallOption for collecting response header metadata. // The metadata field will be populated *after* the RPC completes. -// This is an EXPERIMENTAL API. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type HeaderCallOption struct { HeaderAddr *metadata.MD } -func (o HeaderCallOption) before(c *callInfo) error { return nil } -func (o HeaderCallOption) after(c *callInfo) { - if c.stream != nil { - *o.HeaderAddr, _ = c.stream.Header() - } +func (o HeaderCallOption) before(*callInfo) error { return nil } +func (o HeaderCallOption) after(_ *callInfo, attempt *csAttempt) { + *o.HeaderAddr, _ = attempt.s.Header() } // Trailer returns a CallOptions that retrieves the trailer metadata @@ -219,16 +233,18 @@ func Trailer(md *metadata.MD) CallOption { // TrailerCallOption is a CallOption for collecting response trailer metadata. // The metadata field will be populated *after* the RPC completes. -// This is an EXPERIMENTAL API. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type TrailerCallOption struct { TrailerAddr *metadata.MD } -func (o TrailerCallOption) before(c *callInfo) error { return nil } -func (o TrailerCallOption) after(c *callInfo) { - if c.stream != nil { - *o.TrailerAddr = c.stream.Trailer() - } +func (o TrailerCallOption) before(*callInfo) error { return nil } +func (o TrailerCallOption) after(_ *callInfo, attempt *csAttempt) { + *o.TrailerAddr = attempt.s.Trailer() } // Peer returns a CallOption that retrieves peer information for a unary RPC. @@ -239,30 +255,29 @@ func Peer(p *peer.Peer) CallOption { // PeerCallOption is a CallOption for collecting the identity of the remote // peer. The peer field will be populated *after* the RPC completes. -// This is an EXPERIMENTAL API. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type PeerCallOption struct { PeerAddr *peer.Peer } -func (o PeerCallOption) before(c *callInfo) error { return nil } -func (o PeerCallOption) after(c *callInfo) { - if c.stream != nil { - if x, ok := peer.FromContext(c.stream.Context()); ok { - *o.PeerAddr = *x - } +func (o PeerCallOption) before(*callInfo) error { return nil } +func (o PeerCallOption) after(_ *callInfo, attempt *csAttempt) { + if x, ok := peer.FromContext(attempt.s.Context()); ok { + *o.PeerAddr = *x } } -// WaitForReady configures the action to take when an RPC is attempted on broken -// connections or unreachable servers. If waitForReady is false, the RPC will fail -// immediately. Otherwise, the RPC client will block the call until a -// connection is available (or the call is canceled or times out) and will -// retry the call if it fails due to a transient error. gRPC will not retry if -// data was written to the wire unless the server indicates it did not process -// the data. Please refer to -// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// WaitForReady configures the RPC's behavior when the client is in +// TRANSIENT_FAILURE, which occurs when all addresses fail to connect. If +// waitForReady is false, the RPC will fail immediately. Otherwise, the client +// will wait until a connection becomes available or the RPC's deadline is +// reached. // -// By default, RPCs don't "wait for ready". +// By default, RPCs do not "wait for ready". func WaitForReady(waitForReady bool) CallOption { return FailFastCallOption{FailFast: !waitForReady} } @@ -276,7 +291,11 @@ func FailFast(failFast bool) CallOption { // FailFastCallOption is a CallOption for indicating whether an RPC should fail // fast or not. -// This is an EXPERIMENTAL API. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type FailFastCallOption struct { FailFast bool } @@ -285,16 +304,57 @@ func (o FailFastCallOption) before(c *callInfo) error { c.failFast = o.FailFast return nil } -func (o FailFastCallOption) after(c *callInfo) {} +func (o FailFastCallOption) after(*callInfo, *csAttempt) {} -// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive. -func MaxCallRecvMsgSize(s int) CallOption { - return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: s} +// OnFinish returns a CallOption that configures a callback to be called when +// the call completes. The error passed to the callback is the status of the +// RPC, and may be nil. The onFinish callback provided will only be called once +// by gRPC. This is mainly used to be used by streaming interceptors, to be +// notified when the RPC completes along with information about the status of +// the RPC. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func OnFinish(onFinish func(err error)) CallOption { + return OnFinishCallOption{ + OnFinish: onFinish, + } +} + +// OnFinishCallOption is CallOption that indicates a callback to be called when +// the call completes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type OnFinishCallOption struct { + OnFinish func(error) +} + +func (o OnFinishCallOption) before(c *callInfo) error { + c.onFinish = append(c.onFinish, o.OnFinish) + return nil +} + +func (o OnFinishCallOption) after(*callInfo, *csAttempt) {} + +// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can receive. If this is not set, gRPC uses the default +// 4MB. +func MaxCallRecvMsgSize(bytes int) CallOption { + return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes} } // MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message -// size the client can receive. -// This is an EXPERIMENTAL API. +// size in bytes the client can receive. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type MaxRecvMsgSizeCallOption struct { MaxRecvMsgSize int } @@ -303,16 +363,22 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { c.maxReceiveMessageSize = &o.MaxRecvMsgSize return nil } -func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {} +func (o MaxRecvMsgSizeCallOption) after(*callInfo, *csAttempt) {} -// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send. -func MaxCallSendMsgSize(s int) CallOption { - return MaxSendMsgSizeCallOption{MaxSendMsgSize: s} +// MaxCallSendMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can send. If this is not set, gRPC uses the default +// `math.MaxInt32`. +func MaxCallSendMsgSize(bytes int) CallOption { + return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes} } // MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message -// size the client can send. -// This is an EXPERIMENTAL API. +// size in bytes the client can send. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type MaxSendMsgSizeCallOption struct { MaxSendMsgSize int } @@ -321,7 +387,7 @@ func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { c.maxSendMessageSize = &o.MaxSendMsgSize return nil } -func (o MaxSendMsgSizeCallOption) after(c *callInfo) {} +func (o MaxSendMsgSizeCallOption) after(*callInfo, *csAttempt) {} // PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials // for a call. @@ -331,7 +397,11 @@ func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { // PerRPCCredsCallOption is a CallOption that indicates the per-RPC // credentials to use for the call. -// This is an EXPERIMENTAL API. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type PerRPCCredsCallOption struct { Creds credentials.PerRPCCredentials } @@ -340,19 +410,26 @@ func (o PerRPCCredsCallOption) before(c *callInfo) error { c.creds = o.Creds return nil } -func (o PerRPCCredsCallOption) after(c *callInfo) {} +func (o PerRPCCredsCallOption) after(*callInfo, *csAttempt) {} // UseCompressor returns a CallOption which sets the compressor used when // sending the request. If WithCompressor is also set, UseCompressor has // higher priority. // -// This API is EXPERIMENTAL. +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func UseCompressor(name string) CallOption { return CompressorCallOption{CompressorType: name} } // CompressorCallOption is a CallOption that indicates the compressor to use. -// This is an EXPERIMENTAL API. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type CompressorCallOption struct { CompressorType string } @@ -361,7 +438,7 @@ func (o CompressorCallOption) before(c *callInfo) error { c.compressorType = o.CompressorType return nil } -func (o CompressorCallOption) after(c *callInfo) {} +func (o CompressorCallOption) after(*callInfo, *csAttempt) {} // CallContentSubtype returns a CallOption that will set the content-subtype // for a call. For example, if content-subtype is "json", the Content-Type over @@ -385,7 +462,11 @@ func CallContentSubtype(contentSubtype string) CallOption { // ContentSubtypeCallOption is a CallOption that indicates the content-subtype // used for marshaling messages. -// This is an EXPERIMENTAL API. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type ContentSubtypeCallOption struct { ContentSubtype string } @@ -394,11 +475,12 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error { c.contentSubtype = o.ContentSubtype return nil } -func (o ContentSubtypeCallOption) after(c *callInfo) {} +func (o ContentSubtypeCallOption) after(*callInfo, *csAttempt) {} -// ForceCodec returns a CallOption that will set the given Codec to be -// used for all request and response messages for a call. The result of calling -// String() will be used as the content-subtype in a case-insensitive manner. +// ForceCodec returns a CallOption that will set codec to be used for all +// request and response messages for a call. The result of calling Name() will +// be used as the content-subtype after converting to lowercase, unless +// CallContentSubtype is also used. // // See Content-Type on // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for @@ -409,7 +491,10 @@ func (o ContentSubtypeCallOption) after(c *callInfo) {} // This function is provided for advanced users; prefer to use only // CallContentSubtype to select a registered codec instead. // -// This is an EXPERIMENTAL API. +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func ForceCodec(codec encoding.Codec) CallOption { return ForceCodecCallOption{Codec: codec} } @@ -417,16 +502,59 @@ func ForceCodec(codec encoding.Codec) CallOption { // ForceCodecCallOption is a CallOption that indicates the codec used for // marshaling messages. // -// This is an EXPERIMENTAL API. +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type ForceCodecCallOption struct { Codec encoding.Codec } func (o ForceCodecCallOption) before(c *callInfo) error { - c.codec = o.Codec + c.codec = newCodecV1Bridge(o.Codec) + return nil +} +func (o ForceCodecCallOption) after(*callInfo, *csAttempt) {} + +// ForceCodecV2 returns a CallOption that will set codec to be used for all +// request and response messages for a call. The result of calling Name() will +// be used as the content-subtype after converting to lowercase, unless +// CallContentSubtype is also used. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between Codec and +// content-subtype. +// +// This function is provided for advanced users; prefer to use only +// CallContentSubtype to select a registered codec instead. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceCodecV2(codec encoding.CodecV2) CallOption { + return ForceCodecV2CallOption{CodecV2: codec} +} + +// ForceCodecV2CallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ForceCodecV2CallOption struct { + CodecV2 encoding.CodecV2 +} + +func (o ForceCodecV2CallOption) before(c *callInfo) error { + c.codec = o.CodecV2 return nil } -func (o ForceCodecCallOption) after(c *callInfo) {} + +func (o ForceCodecV2CallOption) after(*callInfo, *csAttempt) {} // CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of // an encoding.Codec. @@ -439,28 +567,38 @@ func CallCustomCodec(codec Codec) CallOption { // CustomCodecCallOption is a CallOption that indicates the codec used for // marshaling messages. // -// This is an EXPERIMENTAL API. +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type CustomCodecCallOption struct { Codec Codec } func (o CustomCodecCallOption) before(c *callInfo) error { - c.codec = o.Codec + c.codec = newCodecV0Bridge(o.Codec) return nil } -func (o CustomCodecCallOption) after(c *callInfo) {} +func (o CustomCodecCallOption) after(*callInfo, *csAttempt) {} // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory // used for buffering this RPC's requests for retry purposes. // -// This API is EXPERIMENTAL. +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func MaxRetryRPCBufferSize(bytes int) CallOption { return MaxRetryRPCBufferSizeCallOption{bytes} } // MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of // memory to be used for caching this RPC for retry purposes. -// This is an EXPERIMENTAL API. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type MaxRetryRPCBufferSizeCallOption struct { MaxRetryRPCBufferSize int } @@ -469,7 +607,7 @@ func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize return nil } -func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo) {} +func (o MaxRetryRPCBufferSizeCallOption) after(*callInfo, *csAttempt) {} // The format of the payload: compressed or not? type payloadFormat uint8 @@ -479,16 +617,28 @@ const ( compressionMade payloadFormat = 1 // compressed ) +func (pf payloadFormat) isCompressed() bool { + return pf == compressionMade +} + +type streamReader interface { + ReadHeader(header []byte) error + Read(n int) (mem.BufferSlice, error) +} + // parser reads complete gRPC messages from the underlying reader. type parser struct { // r is the underlying reader. // See the comment on recvMsg for the permissible // error types. - r io.Reader + r streamReader // The header of a gRPC message. Find more detail at // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md header [5]byte + + // bufferPool is the pool of shared receive buffers. + bufferPool mem.BufferPool } // recvMsg reads a complete gRPC message from the stream. @@ -497,19 +647,21 @@ type parser struct { // format. The caller owns the returned msg memory. // // If there is an error, possible values are: -// * io.EOF, when no messages remain -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * an error from the status package +// - io.EOF, when no messages remain +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package +// // No other error values or types must be returned, which also means -// that the underlying io.Reader must not return an incompatible +// that the underlying streamReader must not return an incompatible // error. -func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { - if _, err := p.r.Read(p.header[:]); err != nil { +func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) { + err := p.r.ReadHeader(p.header[:]) + if err != nil { return 0, nil, err } - pf = payloadFormat(p.header[0]) + pf := payloadFormat(p.header[0]) length := binary.BigEndian.Uint32(p.header[1:]) if length == 0 { @@ -521,22 +673,21 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt if int(length) > maxReceiveMessageSize { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } - // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead - // of making it for each message: - msg = make([]byte, int(length)) - if _, err := p.r.Read(msg); err != nil { + + data, err := p.r.Read(int(length)) + if err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF } return 0, nil, err } - return pf, msg, nil + return pf, data, nil } // encode serializes msg and returns a buffer containing the message, or an // error if it is too large to be transmitted by grpc. If msg is nil, it // generates an empty message. -func encode(c baseCodec, msg interface{}) ([]byte, error) { +func encode(c baseCodec, msg any) (mem.BufferSlice, error) { if msg == nil { // NOTE: typed nils will not be caught by this check return nil, nil } @@ -544,41 +695,53 @@ func encode(c baseCodec, msg interface{}) ([]byte, error) { if err != nil { return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) } - if uint(len(b)) > math.MaxUint32 { + if uint(b.Len()) > math.MaxUint32 { + b.Free() return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) } return b, nil } -// compress returns the input bytes compressed by compressor or cp. If both -// compressors are nil, returns nil. +// compress returns the input bytes compressed by compressor or cp. +// If both compressors are nil, or if the message has zero length, returns nil, +// indicating no compression was done. // // TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. -func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { - if compressor == nil && cp == nil { - return nil, nil +func compress(in mem.BufferSlice, cp Compressor, compressor encoding.Compressor, pool mem.BufferPool) (mem.BufferSlice, payloadFormat, error) { + if (compressor == nil && cp == nil) || in.Len() == 0 { + return nil, compressionNone, nil } + var out mem.BufferSlice + w := mem.NewWriter(&out, pool) wrapErr := func(err error) error { + out.Free() return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) } - cbuf := &bytes.Buffer{} if compressor != nil { - z, err := compressor.Compress(cbuf) + z, err := compressor.Compress(w) if err != nil { - return nil, wrapErr(err) + return nil, 0, wrapErr(err) } - if _, err := z.Write(in); err != nil { - return nil, wrapErr(err) + for _, b := range in { + if _, err := z.Write(b.ReadOnlyData()); err != nil { + return nil, 0, wrapErr(err) + } } if err := z.Close(); err != nil { - return nil, wrapErr(err) + return nil, 0, wrapErr(err) } } else { - if err := cp.Do(cbuf, in); err != nil { - return nil, wrapErr(err) + // This is obviously really inefficient since it fully materializes the data, but + // there is no way around this with the old Compressor API. At least it attempts + // to return the buffer to the provider, in the hopes it can be reused (maybe + // even by a subsequent call to this very function). + buf := in.MaterializeToBuffer(pool) + defer buf.Free() + if err := cp.Do(w, buf.ReadOnlyData()); err != nil { + return nil, 0, wrapErr(err) } } - return cbuf.Bytes(), nil + return out, compressionMade, nil } const ( @@ -589,32 +752,36 @@ const ( // msgHeader returns a 5-byte header for the message being transmitted and the // payload, which is compData if non-nil or data otherwise. -func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { +func msgHeader(data, compData mem.BufferSlice, pf payloadFormat) (hdr []byte, payload mem.BufferSlice) { hdr = make([]byte, headerLen) - if compData != nil { - hdr[0] = byte(compressionMade) - data = compData + hdr[0] = byte(pf) + + var length uint32 + if pf.isCompressed() { + length = uint32(compData.Len()) + payload = compData } else { - hdr[0] = byte(compressionNone) + length = uint32(data.Len()) + payload = data } // Write length of payload into buf - binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data))) - return hdr, data + binary.BigEndian.PutUint32(hdr[payloadLen:], length) + return hdr, payload } -func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { +func outPayload(client bool, msg any, dataLength, payloadLength int, t time.Time) *stats.OutPayload { return &stats.OutPayload{ - Client: client, - Payload: msg, - Data: data, - Length: len(data), - WireLength: len(payload) + headerLen, - SentTime: t, + Client: client, + Payload: msg, + Length: dataLength, + WireLength: payloadLength + headerLen, + CompressedLength: payloadLength, + SentTime: t, } } -func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { +func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool, isServer bool) *status.Status { switch pf { case compressionNone: case compressionMade: @@ -622,7 +789,11 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") } if !haveCompressor { - return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + if isServer { + return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } else { + return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } } default: return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) @@ -631,66 +802,130 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool } type payloadInfo struct { - wireLength int // The compressed length got from wire. - uncompressedBytes []byte + compressedLength int // The compressed length got from wire. + uncompressedBytes mem.BufferSlice +} + +func (p *payloadInfo) free() { + if p != nil && p.uncompressedBytes != nil { + p.uncompressedBytes.Free() + } } -func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { - pf, d, err := p.recvMsg(maxReceiveMessageSize) +// recvAndDecompress reads a message from the stream, decompressing it if necessary. +// +// Cancelling the returned cancel function releases the buffer back to the pool. So the caller should cancel as soon as +// the buffer is no longer needed. +// TODO: Refactor this function to reduce the number of arguments. +// See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, +) (out mem.BufferSlice, err error) { + pf, compressed, err := p.recvMsg(maxReceiveMessageSize) if err != nil { return nil, err } - if payInfo != nil { - payInfo.wireLength = len(d) - } - if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { + compressedLength := compressed.Len() + + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil, isServer); st != nil { + compressed.Free() return nil, st.Err() } - if pf == compressionMade { + var size int + if pf.isCompressed() { + defer compressed.Free() + // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { - d, err = dc.Do(bytes.NewReader(d)) - if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + var uncompressedBuf []byte + uncompressedBuf, err = dc.Do(compressed.Reader()) + if err == nil { + out = mem.BufferSlice{mem.NewBuffer(&uncompressedBuf, nil)} } + size = len(uncompressedBuf) } else { - dcReader, err := compressor.Decompress(bytes.NewReader(d)) - if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) - } - // Read from LimitReader with limit max+1. So if the underlying - // reader is over limit, the result will be bigger than max. - d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) - } + out, size, err = decompress(compressor, compressed, maxReceiveMessageSize, p.bufferPool) + } + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) } + if size > maxReceiveMessageSize { + out.Free() + // TODO: Revisit the error code. Currently keep it consistent with java + // implementation. + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + } + } else { + out = compressed } - if len(d) > maxReceiveMessageSize { - // TODO: Revisit the error code. Currently keep it consistent with java - // implementation. - return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize) + + if payInfo != nil { + payInfo.compressedLength = compressedLength + out.Ref() + payInfo.uncompressedBytes = out + } + + return out, nil +} + +// Using compressor, decompress d, returning data and size. +// Optionally, if data will be over maxReceiveMessageSize, just return the size. +func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, int, error) { + dcReader, err := compressor.Decompress(d.Reader()) + if err != nil { + return nil, 0, err + } + + // TODO: Can/should this still be preserved with the new BufferSlice API? Are + // there any actual benefits to allocating a single large buffer instead of + // multiple smaller ones? + //if sizer, ok := compressor.(interface { + // DecompressedSize(compressedBytes []byte) int + //}); ok { + // if size := sizer.DecompressedSize(d); size >= 0 { + // if size > maxReceiveMessageSize { + // return nil, size, nil + // } + // // size is used as an estimate to size the buffer, but we + // // will read more data if available. + // // +MinRead so ReadFrom will not reallocate if size is correct. + // // + // // TODO: If we ensure that the buffer size is the same as the DecompressedSize, + // // we can also utilize the recv buffer pool here. + // buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) + // bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + // return buf.Bytes(), int(bytesRead), err + // } + //} + + var out mem.BufferSlice + _, err = io.Copy(mem.NewWriter(&out, pool), io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + if err != nil { + out.Free() + return nil, 0, err } - return d, nil + return out, out.Len(), nil } // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { + data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer) if err != nil { return err } - if err := c.Unmarshal(d, m); err != nil { - return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) - } - if payInfo != nil { - payInfo.uncompressedBytes = d + + // If the codec wants its own reference to the data, it can get it. Otherwise, always + // free the buffers. + defer data.Free() + + if err := c.Unmarshal(data, m); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } + return nil } @@ -749,115 +984,86 @@ func ErrorDesc(err error) string { // Errorf returns nil if c is OK. // // Deprecated: use status.Errorf instead. -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return status.Errorf(c, format, a...) } +var errContextCanceled = status.Error(codes.Canceled, context.Canceled.Error()) +var errContextDeadline = status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error()) + // toRPCErr converts an error into an error from the status package. func toRPCErr(err error) error { - if err == nil || err == io.EOF { + switch err { + case nil, io.EOF: return err - } - if err == io.ErrUnexpectedEOF { + case context.DeadlineExceeded: + return errContextDeadline + case context.Canceled: + return errContextCanceled + case io.ErrUnexpectedEOF: return status.Error(codes.Internal, err.Error()) } - if _, ok := status.FromError(err); ok { - return err - } + switch e := err.(type) { case transport.ConnectionError: return status.Error(codes.Unavailable, e.Desc) - default: - switch err { - case context.DeadlineExceeded: - return status.Error(codes.DeadlineExceeded, err.Error()) - case context.Canceled: - return status.Error(codes.Canceled, err.Error()) - } + case *transport.NewStreamError: + return toRPCErr(e.Err) + } + + if _, ok := status.FromError(err); ok { + return err } + return status.Error(codes.Unknown, err.Error()) } // setCallInfoCodec should only be called after CallOptions have been applied. func setCallInfoCodec(c *callInfo) error { if c.codec != nil { - // codec was already set by a CallOption; use it. + // codec was already set by a CallOption; use it, but set the content + // subtype if it is not set. + if c.contentSubtype == "" { + // c.codec is a baseCodec to hide the difference between grpc.Codec and + // encoding.Codec (Name vs. String method name). We only support + // setting content subtype from encoding.Codec to avoid a behavior + // change with the deprecated version. + if ec, ok := c.codec.(encoding.CodecV2); ok { + c.contentSubtype = strings.ToLower(ec.Name()) + } + } return nil } if c.contentSubtype == "" { // No codec specified in CallOptions; use proto by default. - c.codec = encoding.GetCodec(proto.Name) + c.codec = getCodec(proto.Name) return nil } // c.contentSubtype is already lowercased in CallContentSubtype - c.codec = encoding.GetCodec(c.contentSubtype) + c.codec = getCodec(c.contentSubtype) if c.codec == nil { return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) } return nil } -// parseDialTarget returns the network and address to pass to dialer -func parseDialTarget(target string) (net string, addr string) { - net = "tcp" - - m1 := strings.Index(target, ":") - m2 := strings.Index(target, ":/") - - // handle unix:addr which will fail with url.Parse - if m1 >= 0 && m2 < 0 { - if n := target[0:m1]; n == "unix" { - net = n - addr = target[m1+1:] - return net, addr - } - } - if m2 >= 0 { - t, err := url.Parse(target) - if err != nil { - return net, target - } - scheme := t.Scheme - addr = t.Path - if scheme == "unix" { - net = scheme - if addr == "" { - addr = t.Host - } - return net, addr - } - } - - return net, target -} - -// channelzData is used to store channelz related data for ClientConn, addrConn and Server. -// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic -// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. -// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. -type channelzData struct { - callsStarted int64 - callsFailed int64 - callsSucceeded int64 - // lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of - // time.Time since it's more costly to atomically update time.Time variable than int64 variable. - lastCallStartedTime int64 -} - // The SupportPackageIsVersion variables are referenced from generated protocol // buffer files to ensure compatibility with the gRPC version used. The latest -// support package version is 5. +// support package version is 9. // -// Older versions are kept for compatibility. They may be removed if -// compatibility cannot be maintained. +// Older versions are kept for compatibility. // // These constants should not be referenced from any other code. const ( SupportPackageIsVersion3 = true SupportPackageIsVersion4 = true SupportPackageIsVersion5 = true + SupportPackageIsVersion6 = true + SupportPackageIsVersion7 = true + SupportPackageIsVersion8 = true + SupportPackageIsVersion9 = true ) const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index f064b73e5..d1e1415a4 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -33,18 +33,19 @@ import ( "sync/atomic" "time" - "golang.org/x/net/trace" - "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -55,11 +56,38 @@ import ( const ( defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 defaultServerMaxSendMessageSize = math.MaxInt32 + + // Server transports are tracked in a map which is keyed on listener + // address. For regular gRPC traffic, connections are accepted in Serve() + // through a call to Accept(), and we use the actual listener address as key + // when we add it to the map. But for connections received through + // ServeHTTP(), we do not have a listener and hence use this dummy value. + listenerAddressForServeHTTP = "listenerAddressForServeHTTP" ) +func init() { + internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials { + return srv.opts.creds + } + internal.IsRegisteredMethod = func(srv *Server, method string) bool { + return srv.isRegisteredMethod(method) + } + internal.ServerFromContext = serverFromContext + internal.AddGlobalServerOptions = func(opt ...ServerOption) { + globalServerOptions = append(globalServerOptions, opt...) + } + internal.ClearGlobalServerOptions = func() { + globalServerOptions = nil + } + internal.BinaryLogger = binaryLogger + internal.JoinServerOptions = newJoinServerOption + internal.BufferPool = bufferPool +} + var statusOK = status.New(codes.OK, "") +var logger = grpclog.Component("core") -type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) +type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { @@ -72,41 +100,48 @@ type ServiceDesc struct { ServiceName string // The pointer to the service interface. Used to check whether the user // provided implementation satisfies the interface requirements. - HandlerType interface{} + HandlerType any Methods []MethodDesc Streams []StreamDesc - Metadata interface{} + Metadata any } -// service consists of the information of the server serving this service and -// the methods in this service. -type service struct { - server interface{} // the server for service methods - md map[string]*MethodDesc - sd map[string]*StreamDesc - mdata interface{} +// serviceInfo wraps information about a service. It is very similar to +// ServiceDesc and is constructed from it for internal purposes. +type serviceInfo struct { + // Contains the implementation for the methods in this service. + serviceImpl any + methods map[string]*MethodDesc + streams map[string]*StreamDesc + mdata any } // Server is a gRPC server to serve RPC requests. type Server struct { opts serverOptions - mu sync.Mutex // guards following - lis map[net.Listener]bool - conns map[transport.ServerTransport]bool - serve bool - drain bool - cv *sync.Cond // signaled when connections close for GracefulStop - m map[string]*service // service name -> service info - events trace.EventLog + mu sync.Mutex // guards following + lis map[net.Listener]bool + // conns contains all active server transports. It is a map keyed on a + // listener address with the value being the set of active transports + // belonging to that listener. + conns map[string]map[transport.ServerTransport]bool + serve bool + drain bool + cv *sync.Cond // signaled when connections close for GracefulStop + services map[string]*serviceInfo // service name -> service info + events traceEventLog quit *grpcsync.Event done *grpcsync.Event channelzRemoveOnce sync.Once - serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop + serveWG sync.WaitGroup // counts active Serve goroutines for Stop/GracefulStop + handlersWG sync.WaitGroup // counts active method handler goroutines - channelzID int64 // channelz unique identification number - czData *channelzData + channelz *channelz.Server + + serverWorkerChannel chan func() + serverWorkerChannelClose func() } type serverOptions struct { @@ -116,8 +151,11 @@ type serverOptions struct { dc Decompressor unaryInt UnaryServerInterceptor streamInt StreamServerInterceptor + chainUnaryInts []UnaryServerInterceptor + chainStreamInts []StreamServerInterceptor + binaryLogger binarylog.Logger inTapHandle tap.ServerInHandle - statsHandler stats.Handler + statsHandlers []stats.Handler maxConcurrentStreams uint32 maxReceiveMessageSize int maxSendMessageSize int @@ -128,17 +166,25 @@ type serverOptions struct { initialConnWindowSize int32 writeBufferSize int readBufferSize int + sharedWriteBuffer bool connectionTimeout time.Duration maxHeaderListSize *uint32 + headerTableSize *uint32 + numServerWorkers uint32 + bufferPool mem.BufferPool + waitForHandlers bool } var defaultServerOptions = serverOptions{ + maxConcurrentStreams: math.MaxUint32, maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, maxSendMessageSize: defaultServerMaxSendMessageSize, connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, + bufferPool: mem.DefaultBufferPool(), } +var globalServerOptions []ServerOption // A ServerOption sets options such as credentials, codec and keepalive parameters, etc. type ServerOption interface { @@ -148,7 +194,10 @@ type ServerOption interface { // EmptyServerOption does not alter the server configuration. It can be embedded // in another structure to build custom server options. // -// This API is EXPERIMENTAL. +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type EmptyServerOption struct{} func (EmptyServerOption) apply(*serverOptions) {} @@ -169,22 +218,50 @@ func newFuncServerOption(f func(*serverOptions)) *funcServerOption { } } -// WriteBufferSize determines how much data can be batched before doing a write on the wire. -// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. -// The default value for this buffer is 32KB. -// Zero will disable the write buffer such that each write will be on underlying connection. -// Note: A Send call may not directly translate to a write. +// joinServerOption provides a way to combine arbitrary number of server +// options into one. +type joinServerOption struct { + opts []ServerOption +} + +func (mdo *joinServerOption) apply(do *serverOptions) { + for _, opt := range mdo.opts { + opt.apply(do) + } +} + +func newJoinServerOption(opts ...ServerOption) ServerOption { + return &joinServerOption{opts: opts} +} + +// SharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func SharedWriteBuffer(val bool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.sharedWriteBuffer = val + }) +} + +// WriteBufferSize determines how much data can be batched before doing a write +// on the wire. The default value for this buffer is 32KB. Zero or negative +// values will disable the write buffer such that each write will be on underlying +// connection. Note: A Send call may not directly translate to a write. func WriteBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.writeBufferSize = s }) } -// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most -// for one read syscall. -// The default value for this buffer is 32KB. -// Zero will disable read buffer for a connection so data framer can access the underlying -// conn directly. +// ReadBufferSize lets you set the size of read buffer, this determines how much +// data can be read at most for one read syscall. The default value for this +// buffer is 32KB. Zero or negative values will disable read buffer for a +// connection so data framer can access the underlying conn directly. func ReadBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.readBufferSize = s @@ -209,9 +286,9 @@ func InitialConnWindowSize(s int32) ServerOption { // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { - if kp.Time > 0 && kp.Time < time.Second { - grpclog.Warning("Adjusting keepalive ping interval to minimum period of 1s") - kp.Time = time.Second + if kp.Time > 0 && kp.Time < internal.KeepaliveMinServerPingTime { + logger.Warning("Adjusting keepalive ping interval to minimum period of 1s") + kp.Time = internal.KeepaliveMinServerPingTime } return newFuncServerOption(func(o *serverOptions) { @@ -229,9 +306,59 @@ func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { // CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. // // This will override any lookups by content-subtype for Codecs registered with RegisterCodec. +// +// Deprecated: register codecs using encoding.RegisterCodec. The server will +// automatically use registered codecs based on the incoming requests' headers. +// See also +// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. +// Will be supported throughout 1.x. func CustomCodec(codec Codec) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.codec = codec + o.codec = newCodecV0Bridge(codec) + }) +} + +// ForceServerCodec returns a ServerOption that sets a codec for message +// marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered +// with RegisterCodec. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between encoding.Codec +// and content-subtype. +// +// This function is provided for advanced users; prefer to register codecs +// using encoding.RegisterCodec. +// The server will automatically use registered codecs based on the incoming +// requests' headers. See also +// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. +// Will be supported throughout 1.x. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceServerCodec(codec encoding.Codec) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = newCodecV1Bridge(codec) + }) +} + +// ForceServerCodecV2 is the equivalent of ForceServerCodec, but for the new +// CodecV2 interface. +// +// Will be supported throughout 1.x. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceServerCodecV2(codecV2 encoding.CodecV2) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codecV2 }) } @@ -241,7 +368,8 @@ func CustomCodec(codec Codec) ServerOption { // default, server messages will be sent using the same compressor with which // request messages were sent. // -// Deprecated: use encoding.RegisterCompressor instead. +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. func RPCCompressor(cp Compressor) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.cp = cp @@ -252,7 +380,8 @@ func RPCCompressor(cp Compressor) ServerOption { // messages. It has higher priority than decompressors registered via // encoding.RegisterCompressor. // -// Deprecated: use encoding.RegisterCompressor instead. +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. func RPCDecompressor(dc Decompressor) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.dc = dc @@ -262,7 +391,7 @@ func RPCDecompressor(dc Decompressor) ServerOption { // MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. // If this is not set, gRPC uses the default limit. // -// Deprecated: use MaxRecvMsgSize instead. +// Deprecated: use MaxRecvMsgSize instead. Will be supported throughout 1.x. func MaxMsgSize(m int) ServerOption { return MaxRecvMsgSize(m) } @@ -286,6 +415,9 @@ func MaxSendMsgSize(m int) ServerOption { // MaxConcurrentStreams returns a ServerOption that will apply a limit on the number // of concurrent streams to each ServerTransport. func MaxConcurrentStreams(n uint32) ServerOption { + if n == 0 { + n = math.MaxUint32 + } return newFuncServerOption(func(o *serverOptions) { o.maxConcurrentStreams = n }) @@ -310,6 +442,16 @@ func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { }) } +// ChainUnaryInterceptor returns a ServerOption that specifies the chained interceptor +// for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All unary interceptors added by this method will be chained. +func ChainUnaryInterceptor(interceptors ...UnaryServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) + }) +} + // StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the // server. Only one stream interceptor can be installed. func StreamInterceptor(i StreamServerInterceptor) ServerOption { @@ -321,8 +463,23 @@ func StreamInterceptor(i StreamServerInterceptor) ServerOption { }) } +// ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor +// for streaming RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All stream interceptors added by this method will be chained. +func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.chainStreamInts = append(o.chainStreamInts, interceptors...) + }) +} + // InTapHandle returns a ServerOption that sets the tap handle for all the server // transport to be created. Only one can be installed. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func InTapHandle(h tap.ServerInHandle) ServerOption { return newFuncServerOption(func(o *serverOptions) { if o.inTapHandle != nil { @@ -335,7 +492,21 @@ func InTapHandle(h tap.ServerInHandle) ServerOption { // StatsHandler returns a ServerOption that sets the stats handler for the server. func StatsHandler(h stats.Handler) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.statsHandler = h + if h == nil { + logger.Error("ignoring nil parameter in grpc.StatsHandler ServerOption") + // Do not allow a nil stats handler, which would otherwise cause + // panics. + return + } + o.statsHandlers = append(o.statsHandlers, h) + }) +} + +// binaryLogger returns a ServerOption that can set the binary logger for the +// server. +func binaryLogger(bl binarylog.Logger) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.binaryLogger = bl }) } @@ -343,8 +514,8 @@ func StatsHandler(h stats.Handler) ServerOption { // unknown service handler. The provided method is a bidi-streaming RPC service // handler that will be invoked instead of returning the "unimplemented" gRPC // error whenever a request is received for an unregistered service or method. -// The handling function has full access to the Context of the request and the -// stream, and the invocation bypasses interceptors. +// The handling function and stream interceptor (if set) have full access to +// the ServerStream, including its Context. func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.unknownStreamDesc = &StreamDesc{ @@ -362,52 +533,161 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { // new connections. If this is not set, the default is 120 seconds. A zero or // negative value will result in an immediate timeout. // -// This API is EXPERIMENTAL. +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func ConnectionTimeout(d time.Duration) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.connectionTimeout = d }) } +// MaxHeaderListSizeServerOption is a ServerOption that sets the max +// (uncompressed) size of header list that the server is prepared to accept. +type MaxHeaderListSizeServerOption struct { + MaxHeaderListSize uint32 +} + +func (o MaxHeaderListSizeServerOption) apply(so *serverOptions) { + so.maxHeaderListSize = &o.MaxHeaderListSize +} + // MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size // of header list that the server is prepared to accept. func MaxHeaderListSize(s uint32) ServerOption { + return MaxHeaderListSizeServerOption{ + MaxHeaderListSize: s, + } +} + +// HeaderTableSize returns a ServerOption that sets the size of dynamic +// header table for stream. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func HeaderTableSize(s uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.headerTableSize = &s + }) +} + +// NumStreamWorkers returns a ServerOption that sets the number of worker +// goroutines that should be used to process incoming streams. Setting this to +// zero (default) will disable workers and spawn a new goroutine for each +// stream. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NumStreamWorkers(numServerWorkers uint32) ServerOption { + // TODO: If/when this API gets stabilized (i.e. stream workers become the + // only way streams are processed), change the behavior of the zero value to + // a sane default. Preliminary experiments suggest that a value equal to the + // number of CPUs available is most performant; requires thorough testing. return newFuncServerOption(func(o *serverOptions) { - o.maxHeaderListSize = &s + o.numServerWorkers = numServerWorkers }) } +// WaitForHandlers cause Stop to wait until all outstanding method handlers have +// exited before returning. If false, Stop will return as soon as all +// connections have closed, but method handlers may still be running. By +// default, Stop does not wait for method handlers to return. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WaitForHandlers(w bool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.waitForHandlers = w + }) +} + +func bufferPool(bufferPool mem.BufferPool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.bufferPool = bufferPool + }) +} + +// serverWorkerResetThreshold defines how often the stack must be reset. Every +// N requests, by spawning a new goroutine in its place, a worker can reset its +// stack so that large stacks don't live in memory forever. 2^16 should allow +// each goroutine stack to live for at least a few seconds in a typical +// workload (assuming a QPS of a few thousand requests/sec). +const serverWorkerResetThreshold = 1 << 16 + +// serverWorker blocks on a *transport.Stream channel forever and waits for +// data to be fed by serveStreams. This allows multiple requests to be +// processed by the same goroutine, removing the need for expensive stack +// re-allocations (see the runtime.morestack problem [1]). +// +// [1] https://github.com/golang/go/issues/18138 +func (s *Server) serverWorker() { + for completed := 0; completed < serverWorkerResetThreshold; completed++ { + f, ok := <-s.serverWorkerChannel + if !ok { + return + } + f() + } + go s.serverWorker() +} + +// initServerWorkers creates worker goroutines and a channel to process incoming +// connections to reduce the time spent overall on runtime.morestack. +func (s *Server) initServerWorkers() { + s.serverWorkerChannel = make(chan func()) + s.serverWorkerChannelClose = grpcsync.OnceFunc(func() { + close(s.serverWorkerChannel) + }) + for i := uint32(0); i < s.opts.numServerWorkers; i++ { + go s.serverWorker() + } +} + // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { opts := defaultServerOptions + for _, o := range globalServerOptions { + o.apply(&opts) + } for _, o := range opt { o.apply(&opts) } s := &Server{ - lis: make(map[net.Listener]bool), - opts: opts, - conns: make(map[transport.ServerTransport]bool), - m: make(map[string]*service), - quit: grpcsync.NewEvent(), - done: grpcsync.NewEvent(), - czData: new(channelzData), - } + lis: make(map[net.Listener]bool), + opts: opts, + conns: make(map[string]map[transport.ServerTransport]bool), + services: make(map[string]*serviceInfo), + quit: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + channelz: channelz.RegisterServer(""), + } + chainUnaryServerInterceptors(s) + chainStreamServerInterceptors(s) s.cv = sync.NewCond(&s.mu) if EnableTracing { _, file, line, _ := runtime.Caller(1) - s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) + s.events = newTraceEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) } - if channelz.IsOn() { - s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") + if s.opts.numServerWorkers > 0 { + s.initServerWorkers() } + + channelz.Info(logger, s.channelz, "Server created") return s } // printf records an event in s's event log, unless s has been stopped. // REQUIRES s.mu is held. -func (s *Server) printf(format string, a ...interface{}) { +func (s *Server) printf(format string, a ...any) { if s.events != nil { s.events.Printf(format, a...) } @@ -415,49 +695,64 @@ func (s *Server) printf(format string, a ...interface{}) { // errorf records an error in s's event log, unless s has been stopped. // REQUIRES s.mu is held. -func (s *Server) errorf(format string, a ...interface{}) { +func (s *Server) errorf(format string, a ...any) { if s.events != nil { s.events.Errorf(format, a...) } } +// ServiceRegistrar wraps a single method that supports service registration. It +// enables users to pass concrete types other than grpc.Server to the service +// registration methods exported by the IDL generated code. +type ServiceRegistrar interface { + // RegisterService registers a service and its implementation to the + // concrete type implementing this interface. It may not be called + // once the server has started serving. + // desc describes the service and its methods and handlers. impl is the + // service implementation which is passed to the method handlers. + RegisterService(desc *ServiceDesc, impl any) +} + // RegisterService registers a service and its implementation to the gRPC // server. It is called from the IDL generated code. This must be called before -// invoking Serve. -func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { - ht := reflect.TypeOf(sd.HandlerType).Elem() - st := reflect.TypeOf(ss) - if !st.Implements(ht) { - grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) +// invoking Serve. If ss is non-nil (for legacy code), its type is checked to +// ensure it implements sd.HandlerType. +func (s *Server) RegisterService(sd *ServiceDesc, ss any) { + if ss != nil { + ht := reflect.TypeOf(sd.HandlerType).Elem() + st := reflect.TypeOf(ss) + if !st.Implements(ht) { + logger.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + } } s.register(sd, ss) } -func (s *Server) register(sd *ServiceDesc, ss interface{}) { +func (s *Server) register(sd *ServiceDesc, ss any) { s.mu.Lock() defer s.mu.Unlock() s.printf("RegisterService(%q)", sd.ServiceName) if s.serve { - grpclog.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) + logger.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) } - if _, ok := s.m[sd.ServiceName]; ok { - grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) + if _, ok := s.services[sd.ServiceName]; ok { + logger.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) } - srv := &service{ - server: ss, - md: make(map[string]*MethodDesc), - sd: make(map[string]*StreamDesc), - mdata: sd.Metadata, + info := &serviceInfo{ + serviceImpl: ss, + methods: make(map[string]*MethodDesc), + streams: make(map[string]*StreamDesc), + mdata: sd.Metadata, } for i := range sd.Methods { d := &sd.Methods[i] - srv.md[d.MethodName] = d + info.methods[d.MethodName] = d } for i := range sd.Streams { d := &sd.Streams[i] - srv.sd[d.StreamName] = d + info.streams[d.StreamName] = d } - s.m[sd.ServiceName] = srv + s.services[sd.ServiceName] = info } // MethodInfo contains the information of an RPC including its method name and type. @@ -474,23 +769,23 @@ type MethodInfo struct { type ServiceInfo struct { Methods []MethodInfo // Metadata is the metadata specified in ServiceDesc when registering service. - Metadata interface{} + Metadata any } // GetServiceInfo returns a map from service names to ServiceInfo. // Service names include the package names, in the form of .. func (s *Server) GetServiceInfo() map[string]ServiceInfo { ret := make(map[string]ServiceInfo) - for n, srv := range s.m { - methods := make([]MethodInfo, 0, len(srv.md)+len(srv.sd)) - for m := range srv.md { + for n, srv := range s.services { + methods := make([]MethodInfo, 0, len(srv.methods)+len(srv.streams)) + for m := range srv.methods { methods = append(methods, MethodInfo{ Name: m, IsClientStream: false, IsServerStream: false, }) } - for m, d := range srv.sd { + for m, d := range srv.streams { methods = append(methods, MethodInfo{ Name: m, IsClientStream: d.ClientStreams, @@ -510,30 +805,15 @@ func (s *Server) GetServiceInfo() map[string]ServiceInfo { // the server being stopped. var ErrServerStopped = errors.New("grpc: the server has been stopped") -func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { - if s.opts.creds == nil { - return rawConn, nil, nil - } - return s.opts.creds.ServerHandshake(rawConn) -} - type listenSocket struct { net.Listener - channelzID int64 -} - -func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { - return &channelz.SocketInternalMetric{ - SocketOptions: channelz.GetSocketOption(l.Listener), - LocalAddr: l.Listener.Addr(), - } + channelz *channelz.Socket } func (l *listenSocket) Close() error { err := l.Listener.Close() - if channelz.IsOn() { - channelz.RemoveEntry(l.channelzID) - } + channelz.RemoveEntry(l.channelz.ID) + channelz.Info(logger, l.channelz, "ListenSocket deleted") return err } @@ -543,6 +823,18 @@ func (l *listenSocket) Close() error { // Serve returns when lis.Accept fails with fatal errors. lis will be closed when // this method returns. // Serve will return a non-nil error unless Stop or GracefulStop is called. +// +// Note: All supported releases of Go (as of December 2023) override the OS +// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive +// with OS defaults for keepalive time and interval, callers need to do the +// following two things: +// - pass a net.Listener created by calling the Listen method on a +// net.ListenConfig with the `KeepAlive` field set to a negative value. This +// will result in the Go standard library not overriding OS defaults for TCP +// keepalive interval and time. But this will also result in the Go standard +// library not enabling TCP keepalives by default. +// - override the Accept method on the passed in net.Listener and set the +// SO_KEEPALIVE socket option to enable TCP keepalives, with OS defaults. func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() s.printf("serving") @@ -563,13 +855,17 @@ func (s *Server) Serve(lis net.Listener) error { } }() - ls := &listenSocket{Listener: lis} - s.lis[ls] = true - - if channelz.IsOn() { - ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) + ls := &listenSocket{ + Listener: lis, + channelz: channelz.RegisterSocket(&channelz.Socket{ + SocketType: channelz.SocketTypeListen, + Parent: s.channelz, + RefName: lis.Addr().String(), + LocalAddr: lis.Addr(), + SocketOptions: channelz.GetSocketOption(lis)}, + ), } - s.mu.Unlock() + s.lis[ls] = true defer func() { s.mu.Lock() @@ -580,8 +876,10 @@ func (s *Server) Serve(lis net.Listener) error { s.mu.Unlock() }() - var tempDelay time.Duration // how long to sleep on accept failure + s.mu.Unlock() + channelz.Info(logger, ls.channelz, "ListenSocket created") + var tempDelay time.Duration // how long to sleep on accept failure for { rawConn, err := lis.Accept() if err != nil { @@ -625,7 +923,7 @@ func (s *Server) Serve(lis net.Listener) error { // s.conns before this conn can be added. s.serveWG.Add(1) go func() { - s.handleRawConn(rawConn) + s.handleRawConn(lis.Addr().String(), rawConn) s.serveWG.Done() }() } @@ -633,90 +931,114 @@ func (s *Server) Serve(lis net.Listener) error { // handleRawConn forks a goroutine to handle a just-accepted connection that // has not had any I/O performed on it yet. -func (s *Server) handleRawConn(rawConn net.Conn) { +func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { if s.quit.HasFired() { rawConn.Close() return } rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) - conn, authInfo, err := s.useTransportAuthenticator(rawConn) - if err != nil { - // ErrConnDispatched means that the connection was dispatched away from - // gRPC; those connections should be left open. - if err != credentials.ErrConnDispatched { - s.mu.Lock() - s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) - s.mu.Unlock() - grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) - rawConn.Close() - } - rawConn.SetDeadline(time.Time{}) - return - } // Finish handshaking (HTTP2) - st := s.newHTTP2Transport(conn, authInfo) + st := s.newHTTP2Transport(rawConn) + rawConn.SetDeadline(time.Time{}) if st == nil { return } - rawConn.SetDeadline(time.Time{}) - if !s.addConn(st) { + if cc, ok := rawConn.(interface { + PassServerTransport(transport.ServerTransport) + }); ok { + cc.PassServerTransport(st) + } + + if !s.addConn(lisAddr, st) { return } go func() { - s.serveStreams(st) - s.removeConn(st) + s.serveStreams(context.Background(), st, rawConn) + s.removeConn(lisAddr, st) }() } // newHTTP2Transport sets up a http/2 transport (using the // gRPC http2 server transport in transport/http2_server.go). -func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport { +func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { config := &transport.ServerConfig{ MaxStreams: s.opts.maxConcurrentStreams, - AuthInfo: authInfo, + ConnectionTimeout: s.opts.connectionTimeout, + Credentials: s.opts.creds, InTapHandle: s.opts.inTapHandle, - StatsHandler: s.opts.statsHandler, + StatsHandlers: s.opts.statsHandlers, KeepaliveParams: s.opts.keepaliveParams, KeepalivePolicy: s.opts.keepalivePolicy, InitialWindowSize: s.opts.initialWindowSize, InitialConnWindowSize: s.opts.initialConnWindowSize, WriteBufferSize: s.opts.writeBufferSize, ReadBufferSize: s.opts.readBufferSize, - ChannelzParentID: s.channelzID, + SharedWriteBuffer: s.opts.sharedWriteBuffer, + ChannelzParent: s.channelz, MaxHeaderListSize: s.opts.maxHeaderListSize, + HeaderTableSize: s.opts.headerTableSize, + BufferPool: s.opts.bufferPool, } - st, err := transport.NewServerTransport("http2", c, config) + st, err := transport.NewServerTransport(c, config) if err != nil { s.mu.Lock() s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) s.mu.Unlock() - c.Close() - grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err) + // ErrConnDispatched means that the connection was dispatched away from + // gRPC; those connections should be left open. + if err != credentials.ErrConnDispatched { + // Don't log on ErrConnDispatched and io.EOF to prevent log spam. + if err != io.EOF { + channelz.Info(logger, s.channelz, "grpc: Server.Serve failed to create ServerTransport: ", err) + } + c.Close() + } return nil } return st } -func (s *Server) serveStreams(st transport.ServerTransport) { - defer st.Close() - var wg sync.WaitGroup - st.HandleStreams(func(stream *transport.Stream) { - wg.Add(1) - go func() { - defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) - }() - }, func(ctx context.Context, method string) context.Context { - if !EnableTracing { - return ctx +func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) { + ctx = transport.SetConnection(ctx, rawConn) + ctx = peer.NewContext(ctx, st.Peer()) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagConn(ctx, &stats.ConnTagInfo{ + RemoteAddr: st.Peer().Addr, + LocalAddr: st.Peer().LocalAddr, + }) + sh.HandleConn(ctx, &stats.ConnBegin{}) + } + + defer func() { + st.Close(errors.New("finished serving streams for the server transport")) + for _, sh := range s.opts.statsHandlers { + sh.HandleConn(ctx, &stats.ConnEnd{}) } - tr := trace.New("grpc.Recv."+methodFamily(method), method) - return trace.NewContext(ctx, tr) + }() + + streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) + st.HandleStreams(ctx, func(stream *transport.Stream) { + s.handlersWG.Add(1) + streamQuota.acquire() + f := func() { + defer streamQuota.release() + defer s.handlersWG.Done() + s.handleStream(st, stream) + } + + if s.opts.numServerWorkers > 0 { + select { + case s.serverWorkerChannel <- f: + return + default: + // If all stream workers are busy, fallback to the default code path. + } + } + go f() }) - wg.Wait() } var _ http.Handler = (*Server)(nil) @@ -733,168 +1055,231 @@ var _ http.Handler = (*Server)(nil) // To share one port (such as 443 for https) between gRPC and an // existing http.Handler, use a root http.Handler such as: // -// if r.ProtoMajor == 2 && strings.HasPrefix( -// r.Header.Get("Content-Type"), "application/grpc") { -// grpcServer.ServeHTTP(w, r) -// } else { -// yourMux.ServeHTTP(w, r) -// } +// if r.ProtoMajor == 2 && strings.HasPrefix( +// r.Header.Get("Content-Type"), "application/grpc") { +// grpcServer.ServeHTTP(w, r) +// } else { +// yourMux.ServeHTTP(w, r) +// } // // Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally // separate from grpc-go's HTTP/2 server. Performance and features may vary // between the two paths. ServeHTTP does not support some gRPC features -// available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL -// and subject to change. +// available through grpc-go's HTTP/2 server. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler) + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool) if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) + // Errors returned from transport.NewServerHandlerTransport have + // already been written to w. return } - if !s.addConn(st) { + if !s.addConn(listenerAddressForServeHTTP, st) { return } - defer s.removeConn(st) - s.serveStreams(st) + defer s.removeConn(listenerAddressForServeHTTP, st) + s.serveStreams(r.Context(), st, nil) } -// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. -// If tracing is not enabled, it returns nil. -func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { - if !EnableTracing { - return nil - } - tr, ok := trace.FromContext(stream.Context()) - if !ok { - return nil - } - - trInfo = &traceInfo{ - tr: tr, - firstLine: firstLine{ - client: false, - remoteAddr: st.RemoteAddr(), - }, - } - if dl, ok := stream.Context().Deadline(); ok { - trInfo.firstLine.deadline = time.Until(dl) - } - return trInfo -} - -func (s *Server) addConn(st transport.ServerTransport) bool { +func (s *Server) addConn(addr string, st transport.ServerTransport) bool { s.mu.Lock() defer s.mu.Unlock() if s.conns == nil { - st.Close() + st.Close(errors.New("Server.addConn called when server has already been stopped")) return false } if s.drain { // Transport added after we drained our existing conns: drain it // immediately. - st.Drain() + st.Drain("") } - s.conns[st] = true + + if s.conns[addr] == nil { + // Create a map entry if this is the first connection on this listener. + s.conns[addr] = make(map[transport.ServerTransport]bool) + } + s.conns[addr][st] = true return true } -func (s *Server) removeConn(st transport.ServerTransport) { +func (s *Server) removeConn(addr string, st transport.ServerTransport) { s.mu.Lock() defer s.mu.Unlock() - if s.conns != nil { - delete(s.conns, st) - s.cv.Broadcast() - } -} -func (s *Server) channelzMetric() *channelz.ServerInternalMetric { - return &channelz.ServerInternalMetric{ - CallsStarted: atomic.LoadInt64(&s.czData.callsStarted), - CallsSucceeded: atomic.LoadInt64(&s.czData.callsSucceeded), - CallsFailed: atomic.LoadInt64(&s.czData.callsFailed), - LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)), + conns := s.conns[addr] + if conns != nil { + delete(conns, st) + if len(conns) == 0 { + // If the last connection for this address is being removed, also + // remove the map entry corresponding to the address. This is used + // in GracefulStop() when waiting for all connections to be closed. + delete(s.conns, addr) + } + s.cv.Broadcast() } } func (s *Server) incrCallsStarted() { - atomic.AddInt64(&s.czData.callsStarted, 1) - atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano()) + s.channelz.ServerMetrics.CallsStarted.Add(1) + s.channelz.ServerMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano()) } func (s *Server) incrCallsSucceeded() { - atomic.AddInt64(&s.czData.callsSucceeded, 1) + s.channelz.ServerMetrics.CallsSucceeded.Add(1) } func (s *Server) incrCallsFailed() { - atomic.AddInt64(&s.czData.callsFailed, 1) + s.channelz.ServerMetrics.CallsFailed.Add(1) } -func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { +func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { - grpclog.Errorln("grpc: server failed to encode response: ", err) + channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err) return err } - compData, err := compress(data, cp, comp) + + compData, pf, err := compress(data, cp, comp, s.opts.bufferPool) if err != nil { - grpclog.Errorln("grpc: server failed to compress response: ", err) + data.Free() + channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err) return err } - hdr, payload := msgHeader(data, compData) + + hdr, payload := msgHeader(data, compData, pf) + + defer func() { + compData.Free() + data.Free() + // payload does not need to be freed here, it is either data or compData, both of + // which are already freed. + }() + + dataLen := data.Len() + payloadLen := payload.Len() // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > s.opts.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) + if payloadLen > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize) } err = t.Write(stream, hdr, payload, opts) - if err == nil && s.opts.statsHandler != nil { - s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + if err == nil { + if len(s.opts.statsHandlers) != 0 { + for _, sh := range s.opts.statsHandlers { + sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now())) + } + } } return err } -func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) { - if channelz.IsOn() { - s.incrCallsStarted() - defer func() { - if err != nil && err != io.EOF { - s.incrCallsFailed() - } else { - s.incrCallsSucceeded() - } - }() +// chainUnaryServerInterceptors chains all unary server interceptors into one. +func chainUnaryServerInterceptors(s *Server) { + // Prepend opts.unaryInt to the chaining interceptors if it exists, since unaryInt will + // be executed before any other chained interceptors. + interceptors := s.opts.chainUnaryInts + if s.opts.unaryInt != nil { + interceptors = append([]UnaryServerInterceptor{s.opts.unaryInt}, s.opts.chainUnaryInts...) } - sh := s.opts.statsHandler - if sh != nil { - beginTime := time.Now() - begin := &stats.Begin{ - BeginTime: beginTime, + + var chainedInt UnaryServerInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = chainUnaryInterceptors(interceptors) + } + + s.opts.unaryInt = chainedInt +} + +func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { + return func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (any, error) { + return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) + } +} + +func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(ctx context.Context, req any) (any, error) { + return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) + } +} + +func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { + shs := s.opts.statsHandlers + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { + if channelz.IsOn() { + s.incrCallsStarted() } - sh.HandleRPC(stream.Context(), begin) + var statsBegin *stats.Begin + for _, sh := range shs { + beginTime := time.Now() + statsBegin = &stats.Begin{ + BeginTime: beginTime, + IsClientStream: false, + IsServerStream: false, + } + sh.HandleRPC(ctx, statsBegin) + } + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + } + // The deferred error handling for tracing, stats handler and channelz are + // combined into one function to reduce stack usage -- a defer takes ~56-64 + // bytes on the stack, so overflowing the stack will require a stack + // re-allocation, which is expensive. + // + // To maintain behavior similar to separate deferred statements, statements + // should be executed in the reverse order. That is, tracing first, stats + // handler second, and channelz last. Note that panics *within* defers will + // lead to different behavior, but that's an acceptable compromise; that + // would be undefined behavior territory anyway. defer func() { - end := &stats.End{ - BeginTime: beginTime, - EndTime: time.Now(), + if trInfo != nil { + if err != nil && err != io.EOF { + trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + trInfo.tr.SetError() + } + trInfo.tr.Finish() } - if err != nil && err != io.EOF { - end.Error = toRPCErr(err) + + for _, sh := range shs { + end := &stats.End{ + BeginTime: statsBegin.BeginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(ctx, end) } - sh.HandleRPC(stream.Context(), end) - }() - } - if trInfo != nil { - defer trInfo.tr.Finish() - trInfo.tr.LazyLog(&trInfo.firstLine, false) - defer func() { - if err != nil && err != io.EOF { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() + + if channelz.IsOn() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } } }() } - - binlog := binarylog.GetMethodLogger(stream.Method()) - if binlog != nil { - ctx := stream.Context() + var binlogs []binarylog.MethodLogger + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + } + if len(binlogs) != 0 { md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ Header: md, @@ -913,7 +1298,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if peer, ok := peer.FromContext(ctx); ok { logEntry.PeerAddr = peer.Addr } - binlog.Log(logEntry) + for _, binlog := range binlogs { + binlog.Log(ctx, logEntry) + } } // comp and cp are used for compression. decomp and dc are used for @@ -923,6 +1310,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. var comp, decomp encoding.Compressor var cp Compressor var dc Decompressor + var sendCompressorName string // If dc is set and matches the stream's compression, use it. Otherwise, try // to find a matching registered compressor for decomp. @@ -943,82 +1331,100 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. if s.opts.cp != nil { cp = s.opts.cp - stream.SetSendCompress(cp.Type()) + sendCompressorName = cp.Type() } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { // Legacy compressor not specified; attempt to respond with same encoding. comp = encoding.GetCompressor(rc) if comp != nil { - stream.SetSendCompress(rc) + sendCompressorName = comp.Name() + } + } + + if sendCompressorName != "" { + if err := stream.SetSendCompress(sendCompressorName); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) } } var payInfo *payloadInfo - if sh != nil || binlog != nil { + if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} + defer payInfo.free() } - d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + + d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true) if err != nil { - if st, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, st); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) - } + if e := t.WriteStatus(stream, status.Convert(err)); e != nil { + channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } + defer d.Free() if channelz.IsOn() { t.IncrMsgRecv() } - df := func(v interface{}) error { + df := func(v any) error { if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } - if sh != nil { - sh.HandleRPC(stream.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: v, - WireLength: payInfo.wireLength, - Data: d, - Length: len(d), + + for _, sh := range shs { + sh.HandleRPC(ctx, &stats.InPayload{ + RecvTime: time.Now(), + Payload: v, + Length: d.Len(), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, }) } - if binlog != nil { - binlog.Log(&binarylog.ClientMessage{ - Message: d, - }) + if len(binlogs) != 0 { + cm := &binarylog.ClientMessage{ + Message: d.Materialize(), + } + for _, binlog := range binlogs { + binlog.Log(ctx, cm) + } } if trInfo != nil { trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) } return nil } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) - reply, appErr := md.Handler(srv.server, ctx, df, s.opts.unaryInt) + ctx = NewContextWithServerTransportStream(ctx, stream) + reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - // Convert appErr if it is not a grpc status error. - appErr = status.Error(codes.Unknown, appErr.Error()) - appStatus, _ = status.FromError(appErr) + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) + appErr = appStatus.Err() } if trInfo != nil { trInfo.tr.LazyLog(stringer(appStatus.Message()), true) trInfo.tr.SetError() } if e := t.WriteStatus(stream, appStatus); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } - if binlog != nil { + if len(binlogs) != 0 { if h, _ := stream.Header(); h.Len() > 0 { // Only log serverHeader if there was header. Otherwise it can // be trailer only. - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } + for _, binlog := range binlogs { + binlog.Log(ctx, sh) + } } - binlog.Log(&binarylog.ServerTrailer{ + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(ctx, st) + } } return appErr } @@ -1027,14 +1433,19 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } opts := &transport.Options{Last: true} - if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { + // Server handler could have set new compressor by calling SetSendCompressor. + // In case it is set, we need to use it for compressing outbound message. + if stream.SendCompress() != sendCompressorName { + comp = encoding.GetCompressor(stream.SendCompress()) + } + if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). return err } - if s, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, s); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + if sts, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, sts); e != nil { + channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } } else { switch st := err.(type) { @@ -1044,26 +1455,34 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) } } - if binlog != nil { + if len(binlogs) != 0 { h, _ := stream.Header() - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) - binlog.Log(&binarylog.ServerTrailer{ + } + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(ctx, sh) + binlog.Log(ctx, st) + } } return err } - if binlog != nil { + if len(binlogs) != 0 { h, _ := stream.Header() - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) - binlog.Log(&binarylog.ServerMessage{ + } + sm := &binarylog.ServerMessage{ Message: reply, - }) + } + for _, binlog := range binlogs { + binlog.Log(ctx, sh) + binlog.Log(ctx, sm) + } } if channelz.IsOn() { t.IncrMsgSent() @@ -1074,60 +1493,130 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // TODO: Should we be logging if writing status failed here, like above? // Should the logging be in WriteStatus? Should we ignore the WriteStatus // error or allow the stats handler to see it? - err = t.WriteStatus(stream, statusOK) - if binlog != nil { - binlog.Log(&binarylog.ServerTrailer{ + if len(binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(ctx, st) + } } - return err + return t.WriteStatus(stream, statusOK) +} + +// chainStreamServerInterceptors chains all stream server interceptors into one. +func chainStreamServerInterceptors(s *Server) { + // Prepend opts.streamInt to the chaining interceptors if it exists, since streamInt will + // be executed before any other chained interceptors. + interceptors := s.opts.chainStreamInts + if s.opts.streamInt != nil { + interceptors = append([]StreamServerInterceptor{s.opts.streamInt}, s.opts.chainStreamInts...) + } + + var chainedInt StreamServerInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = chainStreamInterceptors(interceptors) + } + + s.opts.streamInt = chainedInt } -func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { +func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { + return func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) + } +} + +func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(srv any, stream ServerStream) error { + return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) + } +} + +func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() - defer func() { - if err != nil && err != io.EOF { - s.incrCallsFailed() - } else { - s.incrCallsSucceeded() - } - }() } - sh := s.opts.statsHandler - if sh != nil { + shs := s.opts.statsHandlers + var statsBegin *stats.Begin + if len(shs) != 0 { beginTime := time.Now() - begin := &stats.Begin{ - BeginTime: beginTime, + statsBegin = &stats.Begin{ + BeginTime: beginTime, + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, + } + for _, sh := range shs { + sh.HandleRPC(ctx, statsBegin) } - sh.HandleRPC(stream.Context(), begin) - defer func() { - end := &stats.End{ - BeginTime: beginTime, - EndTime: time.Now(), - } - if err != nil && err != io.EOF { - end.Error = toRPCErr(err) - } - sh.HandleRPC(stream.Context(), end) - }() } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, t: t, s: stream, - p: &parser{r: stream}, + p: &parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, trInfo: trInfo, - statsHandler: sh, + statsHandler: shs, } - ss.binlog = binarylog.GetMethodLogger(stream.Method()) - if ss.binlog != nil { + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { + // See comment in processUnaryRPC on defers. + defer func() { + if trInfo != nil { + ss.mu.Lock() + if err != nil && err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ss.trInfo.tr.SetError() + } + ss.trInfo.tr.Finish() + ss.trInfo.tr = nil + ss.mu.Unlock() + } + + if len(shs) != 0 { + end := &stats.End{ + BeginTime: statsBegin.BeginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + for _, sh := range shs { + sh.HandleRPC(ctx, end) + } + } + + if channelz.IsOn() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + } + }() + } + + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + } + if len(ss.binlogs) != 0 { md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ Header: md, @@ -1146,7 +1635,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if peer, ok := peer.FromContext(ss.Context()); ok { logEntry.PeerAddr = peer.Addr } - ss.binlog.Log(logEntry) + for _, binlog := range ss.binlogs { + binlog.Log(ctx, logEntry) + } } // If dc is set and matches the stream's compression, use it. Otherwise, try @@ -1168,32 +1659,30 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. if s.opts.cp != nil { ss.cp = s.opts.cp - stream.SetSendCompress(s.opts.cp.Type()) + ss.sendCompressorName = s.opts.cp.Type() } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { // Legacy compressor not specified; attempt to respond with same encoding. ss.comp = encoding.GetCompressor(rc) if ss.comp != nil { - stream.SetSendCompress(rc) + ss.sendCompressorName = rc } } + if ss.sendCompressorName != "" { + if err := stream.SetSendCompress(ss.sendCompressorName); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) + } + } + + ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.cp, ss.comp) + if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) - defer func() { - ss.mu.Lock() - if err != nil && err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - ss.trInfo.tr.SetError() - } - ss.trInfo.tr.Finish() - ss.trInfo.tr = nil - ss.mu.Unlock() - }() } var appErr error - var server interface{} - if srv != nil { - server = srv.server + var server any + if info != nil { + server = info.serviceImpl } if s.opts.streamInt == nil { appErr = sd.Handler(server, ss) @@ -1208,7 +1697,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - appStatus = status.New(codes.Unknown, appErr.Error()) + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) appErr = appStatus.Err() } if trInfo != nil { @@ -1217,13 +1708,16 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.trInfo.tr.SetError() ss.mu.Unlock() } - t.WriteStatus(ss.s, appStatus) - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ServerTrailer{ + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(ctx, st) + } } + t.WriteStatus(ss.s, appStatus) // TODO: Should we log an error from WriteStatus here and below? return appErr } @@ -1232,57 +1726,93 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.trInfo.tr.LazyLog(stringer("OK"), false) ss.mu.Unlock() } - err = t.WriteStatus(ss.s, statusOK) - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ServerTrailer{ + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(ctx, st) + } } - return err + return t.WriteStatus(ss.s, statusOK) } -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { + ctx := stream.Context() + ctx = contextWithServer(ctx, s) + var ti *traceInfo + if EnableTracing { + tr := newTrace("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) + ctx = newTraceContext(ctx, tr) + ti = &traceInfo{ + tr: tr, + firstLine: firstLine{ + client: false, + remoteAddr: t.Peer().Addr, + }, + } + if dl, ok := ctx.Deadline(); ok { + ti.firstLine.deadline = time.Until(dl) + } + } + sm := stream.Method() if sm != "" && sm[0] == '/' { sm = sm[1:] } pos := strings.LastIndex(sm, "/") if pos == -1 { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) + ti.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) - if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() } - grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err) } - if trInfo != nil { - trInfo.tr.Finish() + if ti != nil { + ti.tr.Finish() } return } service := sm[:pos] method := sm[pos+1:] - srv, knownService := s.m[service] + md, _ := metadata.FromIncomingContext(ctx) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + sh.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) + } + // To have calls in stream callouts work. Will delete once all stats handler + // calls come from the gRPC layer. + stream.SetContext(ctx) + + srv, knownService := s.services[service] if knownService { - if md, ok := srv.md[method]; ok { - s.processUnaryRPC(t, stream, srv, md, trInfo) + if md, ok := srv.methods[method]; ok { + s.processUnaryRPC(ctx, t, stream, srv, md, ti) return } - if sd, ok := srv.sd[method]; ok { - s.processStreamingRPC(t, stream, srv, sd, trInfo) + if sd, ok := srv.streams[method]; ok { + s.processStreamingRPC(ctx, t, stream, srv, sd, ti) return } } // Unknown service, or known server unknown method. if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { - s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) return } var errDesc string @@ -1291,19 +1821,19 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str } else { errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) } - if trInfo != nil { - trInfo.tr.LazyPrintf("%s", errDesc) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyPrintf("%s", errDesc) + ti.tr.SetError() } if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() } - grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err) } - if trInfo != nil { - trInfo.tr.Finish() + if ti != nil { + ti.tr.Finish() } } @@ -1313,7 +1843,10 @@ type streamKey struct{} // NewContextWithServerTransportStream creates a new context from ctx and // attaches stream to it. // -// This API is EXPERIMENTAL. +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context { return context.WithValue(ctx, streamKey{}, stream) } @@ -1325,7 +1858,10 @@ func NewContextWithServerTransportStream(ctx context.Context, stream ServerTrans // // See also NewContextWithServerTransportStream. // -// This API is EXPERIMENTAL. +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type ServerTransportStream interface { Method() string SetHeader(md metadata.MD) error @@ -1337,7 +1873,10 @@ type ServerTransportStream interface { // ctx. Returns nil if the given context has no stream associated with it // (which implies it is not an RPC invocation context). // -// This API is EXPERIMENTAL. +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream { s, _ := ctx.Value(streamKey{}).(ServerTransportStream) return s @@ -1349,87 +1888,87 @@ func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream // pending RPCs on the client side will get notified by connection // errors. func (s *Server) Stop() { - s.quit.Fire() - - defer func() { - s.serveWG.Wait() - s.done.Fire() - }() - - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) - - s.mu.Lock() - listeners := s.lis - s.lis = nil - st := s.conns - s.conns = nil - // interrupt GracefulStop if Stop and GracefulStop are called concurrently. - s.cv.Broadcast() - s.mu.Unlock() - - for lis := range listeners { - lis.Close() - } - for c := range st { - c.Close() - } - - s.mu.Lock() - if s.events != nil { - s.events.Finish() - s.events = nil - } - s.mu.Unlock() + s.stop(false) } // GracefulStop stops the gRPC server gracefully. It stops the server from // accepting new connections and RPCs and blocks until all the pending RPCs are // finished. func (s *Server) GracefulStop() { + s.stop(true) +} + +func (s *Server) stop(graceful bool) { s.quit.Fire() defer s.done.Fire() - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelz.ID) }) s.mu.Lock() - if s.conns == nil { - s.mu.Unlock() - return - } - - for lis := range s.lis { - lis.Close() - } - s.lis = nil - if !s.drain { - for st := range s.conns { - st.Drain() - } - s.drain = true - } - + s.closeListenersLocked() // Wait for serving threads to be ready to exit. Only then can we be sure no // new conns will be created. s.mu.Unlock() s.serveWG.Wait() + s.mu.Lock() + defer s.mu.Unlock() + + if graceful { + s.drainAllServerTransportsLocked() + } else { + s.closeServerTransportsLocked() + } for len(s.conns) != 0 { s.cv.Wait() } s.conns = nil + + if s.opts.numServerWorkers > 0 { + // Closing the channel (only once, via grpcsync.OnceFunc) after all the + // connections have been closed above ensures that there are no + // goroutines executing the callback passed to st.HandleStreams (where + // the channel is written to). + s.serverWorkerChannelClose() + } + + if graceful || s.opts.waitForHandlers { + s.handlersWG.Wait() + } + if s.events != nil { s.events.Finish() s.events = nil } - s.mu.Unlock() +} + +// s.mu must be held by the caller. +func (s *Server) closeServerTransportsLocked() { + for _, conns := range s.conns { + for st := range conns { + st.Close(errors.New("Server.Stop called")) + } + } +} + +// s.mu must be held by the caller. +func (s *Server) drainAllServerTransportsLocked() { + if !s.drain { + for _, conns := range s.conns { + for st := range conns { + st.Drain("graceful_stop") + } + } + s.drain = true + } +} + +// s.mu must be held by the caller. +func (s *Server) closeListenersLocked() { + for lis := range s.lis { + lis.Close() + } + s.lis = nil } // contentSubtype must be lowercase @@ -1439,21 +1978,74 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { return s.opts.codec } if contentSubtype == "" { - return encoding.GetCodec(proto.Name) + return getCodec(proto.Name) } - codec := encoding.GetCodec(contentSubtype) + codec := getCodec(contentSubtype) if codec == nil { - return encoding.GetCodec(proto.Name) + logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name) + return getCodec(proto.Name) } return codec } -// SetHeader sets the header metadata. -// When called multiple times, all the provided metadata will be merged. -// All the metadata will be sent out when one of the following happens: -// - grpc.SendHeader() is called; -// - The first response is sent out; -// - An RPC status is sent out (error or success). +type serverKey struct{} + +// serverFromContext gets the Server from the context. +func serverFromContext(ctx context.Context) *Server { + s, _ := ctx.Value(serverKey{}).(*Server) + return s +} + +// contextWithServer sets the Server in the context. +func contextWithServer(ctx context.Context, server *Server) context.Context { + return context.WithValue(ctx, serverKey{}, server) +} + +// isRegisteredMethod returns whether the passed in method is registered as a +// method on the server. /service/method and service/method will match if the +// service and method are registered on the server. +func (s *Server) isRegisteredMethod(serviceMethod string) bool { + if serviceMethod != "" && serviceMethod[0] == '/' { + serviceMethod = serviceMethod[1:] + } + pos := strings.LastIndex(serviceMethod, "/") + if pos == -1 { // Invalid method name syntax. + return false + } + service := serviceMethod[:pos] + method := serviceMethod[pos+1:] + srv, knownService := s.services[service] + if knownService { + if _, ok := srv.methods[method]; ok { + return true + } + if _, ok := srv.streams[method]; ok { + return true + } + } + return false +} + +// SetHeader sets the header metadata to be sent from the server to the client. +// The context provided must be the context passed to the server's handler. +// +// Streaming RPCs should prefer the SetHeader method of the ServerStream. +// +// When called multiple times, all the provided metadata will be merged. All +// the metadata will be sent out when one of the following happens: +// +// - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader. +// - The first response message is sent. For unary handlers, this occurs when +// the handler returns; for streaming handlers, this can happen when stream's +// SendMsg method is called. +// - An RPC status is sent out (error or success). This occurs when the handler +// returns. +// +// SetHeader will fail if called after any of the events above. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetHeader(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil @@ -1465,8 +2057,14 @@ func SetHeader(ctx context.Context, md metadata.MD) error { return stream.SetHeader(md) } -// SendHeader sends header metadata. It may be called at most once. -// The provided md and headers set by SetHeader() will be sent. +// SendHeader sends header metadata. It may be called at most once, and may not +// be called after any event that causes headers to be sent (see SetHeader for +// a complete list). The provided md and headers set by SetHeader() will be +// sent. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SendHeader(ctx context.Context, md metadata.MD) error { stream := ServerTransportStreamFromContext(ctx) if stream == nil { @@ -1478,8 +2076,66 @@ func SendHeader(ctx context.Context, md metadata.MD) error { return nil } +// SetSendCompressor sets a compressor for outbound messages from the server. +// It must not be called after any event that causes headers to be sent +// (see ServerStream.SetHeader for the complete list). Provided compressor is +// used when below conditions are met: +// +// - compressor is registered via encoding.RegisterCompressor +// - compressor name must exist in the client advertised compressor names +// sent in grpc-accept-encoding header. Use ClientSupportedCompressors to +// get client supported compressor names. +// +// The context provided must be the context passed to the server's handler. +// It must be noted that compressor name encoding.Identity disables the +// outbound compression. +// By default, server messages will be sent using the same compressor with +// which request messages were sent. +// +// It is not safe to call SetSendCompressor concurrently with SendHeader and +// SendMsg. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func SetSendCompressor(ctx context.Context, name string) error { + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + if !ok || stream == nil { + return fmt.Errorf("failed to fetch the stream from the given context") + } + + if err := validateSendCompressor(name, stream.ClientAdvertisedCompressors()); err != nil { + return fmt.Errorf("unable to set send compressor: %w", err) + } + + return stream.SetSendCompress(name) +} + +// ClientSupportedCompressors returns compressor names advertised by the client +// via grpc-accept-encoding header. +// +// The context provided must be the context passed to the server's handler. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func ClientSupportedCompressors(ctx context.Context) ([]string, error) { + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + if !ok || stream == nil { + return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) + } + + return stream.ClientAdvertisedCompressors(), nil +} + // SetTrailer sets the trailer metadata that will be sent when an RPC returns. // When called more than once, all the provided metadata will be merged. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetTrailer(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil @@ -1501,10 +2157,52 @@ func Method(ctx context.Context) (string, bool) { return s.Method(), true } -type channelzServer struct { - s *Server +// validateSendCompressor returns an error when given compressor name cannot be +// handled by the server or the client based on the advertised compressors. +func validateSendCompressor(name string, clientCompressors []string) error { + if name == encoding.Identity { + return nil + } + + if !grpcutil.IsCompressorNameRegistered(name) { + return fmt.Errorf("compressor not registered %q", name) + } + + for _, c := range clientCompressors { + if c == name { + return nil // found match + } + } + return fmt.Errorf("client does not support compressor %q", name) +} + +// atomicSemaphore implements a blocking, counting semaphore. acquire should be +// called synchronously; release may be called asynchronously. +type atomicSemaphore struct { + n atomic.Int64 + wait chan struct{} +} + +func (q *atomicSemaphore) acquire() { + if q.n.Add(-1) < 0 { + // We ran out of quota. Block until a release happens. + <-q.wait + } +} + +func (q *atomicSemaphore) release() { + // N.B. the "<= 0" check below should allow for this to work with multiple + // concurrent calls to acquire, but also note that with synchronous calls to + // acquire, as our system does, n will never be less than -1. There are + // fairness issues (queuing) to consider if this was to be generalized. + if q.n.Add(1) <= 0 { + // An acquire was waiting on us. Unblock it. + q.wait <- struct{}{} + } } -func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { - return c.s.channelzMetric() +func newHandlerQuota(n uint32) *atomicSemaphore { + a := &atomicSemaphore{wait: make(chan struct{}, 1)} + a.n.Store(int64(n)) + return a } diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 686ad7ba6..2671c5ef6 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -20,15 +20,17 @@ package grpc import ( "encoding/json" + "errors" "fmt" - "strconv" - "strings" + "reflect" "time" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancer/gracefulswitch" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/serviceconfig" ) @@ -40,34 +42,7 @@ const maxInt = int(^uint(0) >> 1) // Deprecated: Users should not use this struct. Service config should be received // through name resolver, as specified here // https://github.com/grpc/grpc/blob/master/doc/service_config.md -type MethodConfig struct { - // WaitForReady indicates whether RPCs sent to this method should wait until - // the connection is ready by default (!failfast). The value specified via the - // gRPC client API will override the value set here. - WaitForReady *bool - // Timeout is the default timeout for RPCs sent to this method. The actual - // deadline used will be the minimum of the value specified here and the value - // set by the application via the gRPC client API. If either one is not set, - // then the other will be used. If neither is set, then the RPC has no deadline. - Timeout *time.Duration - // MaxReqSize is the maximum allowed payload size for an individual request in a - // stream (client->server) in bytes. The size which is measured is the serialized - // payload after per-message compression (but before stream compression) in bytes. - // The actual value used is the minimum of the value specified here and the value set - // by the application via the gRPC client API. If either one is not set, then the other - // will be used. If neither is set, then the built-in default is used. - MaxReqSize *int - // MaxRespSize is the maximum allowed payload size for an individual response in a - // stream (server->client) in bytes. - MaxRespSize *int - // RetryPolicy configures retry options for the method. - retryPolicy *retryPolicy -} - -type lbConfig struct { - name string - cfg serviceconfig.LoadBalancingConfig -} +type MethodConfig = internalserviceconfig.MethodConfig // ServiceConfig is provided by the service provider and contains parameters for how // clients that connect to the service should behave. @@ -78,15 +53,9 @@ type lbConfig struct { type ServiceConfig struct { serviceconfig.Config - // LB is the load balancer the service providers recommends. The balancer - // specified via grpc.WithBalancer will override this. This is deprecated; - // lbConfigs is preferred. If lbConfig and LB are both present, lbConfig - // will be used. - LB *string - // lbConfig is the service config's load balancing configuration. If // lbConfig and LB are both present, lbConfig will be used. - lbConfig *lbConfig + lbConfig serviceconfig.LoadBalancingConfig // Methods contains a map for the methods in this service. If there is an // exact match for a method (i.e. /service/method) in the map, use the @@ -126,38 +95,10 @@ type healthCheckConfig struct { ServiceName string } -// retryPolicy defines the go-native version of the retry policy defined by the -// service config here: -// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config -type retryPolicy struct { - // MaxAttempts is the maximum number of attempts, including the original RPC. - // - // This field is required and must be two or greater. - maxAttempts int - - // Exponential backoff parameters. The initial retry attempt will occur at - // random(0, initialBackoffMS). In general, the nth attempt will occur at - // random(0, - // min(initialBackoffMS*backoffMultiplier**(n-1), maxBackoffMS)). - // - // These fields are required and must be greater than zero. - initialBackoff time.Duration - maxBackoff time.Duration - backoffMultiplier float64 - - // The set of status codes which may be retried. - // - // Status codes are specified as strings, e.g., "UNAVAILABLE". - // - // This field is required and must be non-empty. - // Note: a set is used to store this for easy lookup. - retryableStatusCodes map[codes.Code]bool -} - type jsonRetryPolicy struct { MaxAttempts int - InitialBackoff string - MaxBackoff string + InitialBackoff internalserviceconfig.Duration + MaxBackoff internalserviceconfig.Duration BackoffMultiplier float64 RetryableStatusCodes []codes.Code } @@ -179,167 +120,110 @@ type retryThrottlingPolicy struct { TokenRatio float64 } -func parseDuration(s *string) (*time.Duration, error) { - if s == nil { - return nil, nil - } - if !strings.HasSuffix(*s, "s") { - return nil, fmt.Errorf("malformed duration %q", *s) - } - ss := strings.SplitN((*s)[:len(*s)-1], ".", 3) - if len(ss) > 2 { - return nil, fmt.Errorf("malformed duration %q", *s) - } - // hasDigits is set if either the whole or fractional part of the number is - // present, since both are optional but one is required. - hasDigits := false - var d time.Duration - if len(ss[0]) > 0 { - i, err := strconv.ParseInt(ss[0], 10, 32) - if err != nil { - return nil, fmt.Errorf("malformed duration %q: %v", *s, err) - } - d = time.Duration(i) * time.Second - hasDigits = true - } - if len(ss) == 2 && len(ss[1]) > 0 { - if len(ss[1]) > 9 { - return nil, fmt.Errorf("malformed duration %q", *s) - } - f, err := strconv.ParseInt(ss[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("malformed duration %q: %v", *s, err) - } - for i := 9; i > len(ss[1]); i-- { - f *= 10 - } - d += time.Duration(f) - hasDigits = true - } - if !hasDigits { - return nil, fmt.Errorf("malformed duration %q", *s) - } - - return &d, nil -} - type jsonName struct { - Service *string - Method *string + Service string + Method string } -func (j jsonName) generatePath() (string, bool) { - if j.Service == nil { - return "", false +var ( + errDuplicatedName = errors.New("duplicated name") + errEmptyServiceNonEmptyMethod = errors.New("cannot combine empty 'service' and non-empty 'method'") +) + +func (j jsonName) generatePath() (string, error) { + if j.Service == "" { + if j.Method != "" { + return "", errEmptyServiceNonEmptyMethod + } + return "", nil } - res := "/" + *j.Service + "/" - if j.Method != nil { - res += *j.Method + res := "/" + j.Service + "/" + if j.Method != "" { + res += j.Method } - return res, true + return res, nil } // TODO(lyuxuan): delete this struct after cleaning up old service config implementation. type jsonMC struct { Name *[]jsonName WaitForReady *bool - Timeout *string + Timeout *internalserviceconfig.Duration MaxRequestMessageBytes *int64 MaxResponseMessageBytes *int64 RetryPolicy *jsonRetryPolicy } -type loadBalancingConfig map[string]json.RawMessage - // TODO(lyuxuan): delete this struct after cleaning up old service config implementation. type jsonSC struct { LoadBalancingPolicy *string - LoadBalancingConfig *[]loadBalancingConfig + LoadBalancingConfig *json.RawMessage MethodConfig *[]jsonMC RetryThrottling *retryThrottlingPolicy HealthCheckConfig *healthCheckConfig } func init() { - internal.ParseServiceConfig = func(sc string) (interface{}, error) { - return parseServiceConfig(sc) + internal.ParseServiceConfig = func(js string) *serviceconfig.ParseResult { + return parseServiceConfig(js, defaultMaxCallAttempts) } } - -func parseServiceConfig(js string) (*ServiceConfig, error) { +func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult { if len(js) == 0 { - return nil, fmt.Errorf("no JSON service config provided") + return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} } var rsc jsonSC err := json.Unmarshal([]byte(js), &rsc) if err != nil { - grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) - return nil, err + logger.Warningf("grpc: unmarshalling service config %s: %v", js, err) + return &serviceconfig.ParseResult{Err: err} } sc := ServiceConfig{ - LB: rsc.LoadBalancingPolicy, Methods: make(map[string]MethodConfig), retryThrottling: rsc.RetryThrottling, healthCheckConfig: rsc.HealthCheckConfig, rawJSONString: js, } - if rsc.LoadBalancingConfig != nil { - for i, lbcfg := range *rsc.LoadBalancingConfig { - if len(lbcfg) != 1 { - err := fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) - grpclog.Warningf(err.Error()) - return nil, err - } - var name string - var jsonCfg json.RawMessage - for name, jsonCfg = range lbcfg { - } - builder := balancer.Get(name) - if builder == nil { - continue - } - sc.lbConfig = &lbConfig{name: name} - if parser, ok := builder.(balancer.ConfigParser); ok { - var err error - sc.lbConfig.cfg, err = parser.ParseConfig(jsonCfg) - if err != nil { - return nil, fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err) - } - } else if string(jsonCfg) != "{}" { - grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg)) - } - break + c := rsc.LoadBalancingConfig + if c == nil { + name := pickfirst.Name + if rsc.LoadBalancingPolicy != nil { + name = *rsc.LoadBalancingPolicy } - if sc.lbConfig == nil { - // We had a loadBalancingConfig field but did not encounter a - // supported policy. The config is considered invalid in this - // case. - err := fmt.Errorf("invalid loadBalancingConfig: no supported policies found") - grpclog.Warningf(err.Error()) - return nil, err + if balancer.Get(name) == nil { + name = pickfirst.Name } + cfg := []map[string]any{{name: struct{}{}}} + strCfg, err := json.Marshal(cfg) + if err != nil { + return &serviceconfig.ParseResult{Err: fmt.Errorf("unexpected error marshaling simple LB config: %w", err)} + } + r := json.RawMessage(strCfg) + c = &r + } + cfg, err := gracefulswitch.ParseConfig(*c) + if err != nil { + return &serviceconfig.ParseResult{Err: err} } + sc.lbConfig = cfg if rsc.MethodConfig == nil { - return &sc, nil + return &serviceconfig.ParseResult{Config: &sc} } + + paths := map[string]struct{}{} for _, m := range *rsc.MethodConfig { if m.Name == nil { continue } - d, err := parseDuration(m.Timeout) - if err != nil { - grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) - return nil, err - } mc := MethodConfig{ WaitForReady: m.WaitForReady, - Timeout: d, + Timeout: (*time.Duration)(m.Timeout), } - if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { - grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) - return nil, err + if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy, maxAttempts); err != nil { + logger.Warningf("grpc: unmarshalling service config %s: %v", js, err) + return &serviceconfig.ParseResult{Err: err} } if m.MaxRequestMessageBytes != nil { if *m.MaxRequestMessageBytes > int64(maxInt) { @@ -355,59 +239,60 @@ func parseServiceConfig(js string) (*ServiceConfig, error) { mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes)) } } - for _, n := range *m.Name { - if path, valid := n.generatePath(); valid { - sc.Methods[path] = mc + for i, n := range *m.Name { + path, err := n.generatePath() + if err != nil { + logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err) + return &serviceconfig.ParseResult{Err: err} } + + if _, ok := paths[path]; ok { + err = errDuplicatedName + logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err) + return &serviceconfig.ParseResult{Err: err} + } + paths[path] = struct{}{} + sc.Methods[path] = mc } } if sc.retryThrottling != nil { if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 { - return nil, fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt) + return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)} } if tr := sc.retryThrottling.TokenRatio; tr <= 0 { - return nil, fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr) + return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)} } } - return &sc, nil + return &serviceconfig.ParseResult{Config: &sc} } -func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) { +func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalserviceconfig.RetryPolicy, err error) { if jrp == nil { return nil, nil } - ib, err := parseDuration(&jrp.InitialBackoff) - if err != nil { - return nil, err - } - mb, err := parseDuration(&jrp.MaxBackoff) - if err != nil { - return nil, err - } if jrp.MaxAttempts <= 1 || - *ib <= 0 || - *mb <= 0 || + jrp.InitialBackoff <= 0 || + jrp.MaxBackoff <= 0 || jrp.BackoffMultiplier <= 0 || len(jrp.RetryableStatusCodes) == 0 { - grpclog.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) + logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) return nil, nil } - rp := &retryPolicy{ - maxAttempts: jrp.MaxAttempts, - initialBackoff: *ib, - maxBackoff: *mb, - backoffMultiplier: jrp.BackoffMultiplier, - retryableStatusCodes: make(map[codes.Code]bool), + if jrp.MaxAttempts < maxAttempts { + maxAttempts = jrp.MaxAttempts } - if rp.maxAttempts > 5 { - // TODO(retry): Make the max maxAttempts configurable. - rp.maxAttempts = 5 + rp := &internalserviceconfig.RetryPolicy{ + MaxAttempts: maxAttempts, + InitialBackoff: time.Duration(jrp.InitialBackoff), + MaxBackoff: time.Duration(jrp.MaxBackoff), + BackoffMultiplier: jrp.BackoffMultiplier, + RetryableStatusCodes: make(map[codes.Code]bool), } for _, code := range jrp.RetryableStatusCodes { - rp.retryableStatusCodes[code] = true + rp.RetryableStatusCodes[code] = true } return rp, nil } @@ -435,3 +320,37 @@ func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { func newInt(b int) *int { return &b } + +func init() { + internal.EqualServiceConfigForTesting = equalServiceConfig +} + +// equalServiceConfig compares two configs. The rawJSONString field is ignored, +// because they may diff in white spaces. +// +// If any of them is NOT *ServiceConfig, return false. +func equalServiceConfig(a, b serviceconfig.Config) bool { + if a == nil && b == nil { + return true + } + aa, ok := a.(*ServiceConfig) + if !ok { + return false + } + bb, ok := b.(*ServiceConfig) + if !ok { + return false + } + aaRaw := aa.rawJSONString + aa.rawJSONString = "" + bbRaw := bb.rawJSONString + bb.rawJSONString = "" + defer func() { + aa.rawJSONString = aaRaw + bb.rawJSONString = bbRaw + }() + // Using reflect.DeepEqual instead of cmp.Equal because many balancer + // configs are unexported, and cmp.Equal cannot compare unexported fields + // from unexported structs. + return reflect.DeepEqual(aa, bb) +} diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go index 53b27875a..35e7a20a0 100644 --- a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go +++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go @@ -19,30 +19,26 @@ // Package serviceconfig defines types and methods for operating on gRPC // service configs. // -// This package is EXPERIMENTAL. +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. package serviceconfig -import ( - "google.golang.org/grpc/internal" -) - // Config represents an opaque data structure holding a service config. type Config interface { - isConfig() + isServiceConfig() } // LoadBalancingConfig represents an opaque data structure holding a load -// balancer config. +// balancing config. type LoadBalancingConfig interface { isLoadBalancingConfig() } -// Parse parses the JSON service config provided into an internal form or -// returns an error if the config is invalid. -func Parse(ServiceConfigJSON string) (Config, error) { - c, err := internal.ParseServiceConfig(ServiceConfigJSON) - if err != nil { - return nil, err - } - return c.(Config), err +// ParseResult contains a service config or an error. Exactly one must be +// non-nil. +type ParseResult struct { + Config Config + Err error } diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index f3f593c84..71195c494 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -16,8 +16,6 @@ * */ -//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto - // Package stats is for collecting and reporting various network and RPC stats. // This package is for monitoring purpose only. All fields are read-only. // All APIs are experimental. @@ -38,15 +36,22 @@ type RPCStats interface { IsClient() bool } -// Begin contains stats when an RPC begins. +// Begin contains stats when an RPC attempt begins. // FailFast is only valid if this Begin is from client side. type Begin struct { // Client is true if this Begin is from client side. Client bool - // BeginTime is the time when the RPC begins. + // BeginTime is the time when the RPC attempt begins. BeginTime time.Time // FailFast indicates if this RPC is failfast. FailFast bool + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool + // IsTransparentRetryAttempt indicates whether this attempt was initiated + // due to transparently retrying a previous attempt. + IsTransparentRetryAttempt bool } // IsClient indicates if the stats information is from client side. @@ -54,18 +59,36 @@ func (s *Begin) IsClient() bool { return s.Client } func (s *Begin) isRPCStats() {} +// PickerUpdated indicates that the LB policy provided a new picker while the +// RPC was waiting for one. +type PickerUpdated struct{} + +// IsClient indicates if the stats information is from client side. Only Client +// Side interfaces with a Picker, thus always returns true. +func (*PickerUpdated) IsClient() bool { return true } + +func (*PickerUpdated) isRPCStats() {} + // InPayload contains the information for an incoming payload. type InPayload struct { // Client is true if this InPayload is from client side. Client bool - // Payload is the payload with original type. - Payload interface{} - // Data is the serialized message payload. - Data []byte - // Length is the length of uncompressed data. + // Payload is the payload with original type. This may be modified after + // the call to HandleRPC which provides the InPayload returns and must be + // copied if needed later. + Payload any + + // Length is the size of the uncompressed payload data. Does not include any + // framing (gRPC or HTTP/2). Length int - // WireLength is the length of data on wire (compressed, signed, encrypted). + // CompressedLength is the size of the compressed payload data. Does not + // include any framing (gRPC or HTTP/2). Same as Length if compression not + // enabled. + CompressedLength int + // WireLength is the size of the compressed payload data plus gRPC framing. + // Does not include HTTP/2 framing. WireLength int + // RecvTime is the time when the payload is received. RecvTime time.Time } @@ -81,6 +104,10 @@ type InHeader struct { Client bool // WireLength is the wire length of header. WireLength int + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata received. + Header metadata.MD // The following fields are valid only if Client is false. // FullMethod is the full RPC method string, i.e., /package.service/method. @@ -89,8 +116,6 @@ type InHeader struct { RemoteAddr net.Addr // LocalAddr is the local address of the corresponding connection. LocalAddr net.Addr - // Compression is the compression algorithm used for the RPC. - Compression string } // IsClient indicates if the stats information is from client side. @@ -104,6 +129,9 @@ type InTrailer struct { Client bool // WireLength is the wire length of trailer. WireLength int + // Trailer contains the trailer metadata received from the server. This + // field is only valid if this InTrailer is from the client side. + Trailer metadata.MD } // IsClient indicates if the stats information is from client side. @@ -115,13 +143,19 @@ func (s *InTrailer) isRPCStats() {} type OutPayload struct { // Client is true if this OutPayload is from client side. Client bool - // Payload is the payload with original type. - Payload interface{} - // Data is the serialized message payload. - Data []byte - // Length is the length of uncompressed data. + // Payload is the payload with original type. This may be modified after + // the call to HandleRPC which provides the OutPayload returns and must be + // copied if needed later. + Payload any + // Length is the size of the uncompressed payload data. Does not include any + // framing (gRPC or HTTP/2). Length int - // WireLength is the length of data on wire (compressed, signed, encrypted). + // CompressedLength is the size of the compressed payload data. Does not + // include any framing (gRPC or HTTP/2). Same as Length if compression not + // enabled. + CompressedLength int + // WireLength is the size of the compressed payload data plus gRPC framing. + // Does not include HTTP/2 framing. WireLength int // SentTime is the time when the payload is sent. SentTime time.Time @@ -136,6 +170,10 @@ func (s *OutPayload) isRPCStats() {} type OutHeader struct { // Client is true if this OutHeader is from client side. Client bool + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata sent. + Header metadata.MD // The following fields are valid only if Client is true. // FullMethod is the full RPC method string, i.e., /package.service/method. @@ -144,8 +182,6 @@ type OutHeader struct { RemoteAddr net.Addr // LocalAddr is the local address of the corresponding connection. LocalAddr net.Addr - // Compression is the compression algorithm used for the RPC. - Compression string } // IsClient indicates if this stats information is from client side. @@ -158,7 +194,13 @@ type OutTrailer struct { // Client is true if this OutTrailer is from client side. Client bool // WireLength is the wire length of trailer. + // + // Deprecated: This field is never set. The length is not known when this message is + // emitted because the trailer fields are compressed with hpack after that. WireLength int + // Trailer contains the trailer metadata sent to the client. This + // field is only valid if this OutTrailer is from the server side. + Trailer metadata.MD } // IsClient indicates if this stats information is from client side. @@ -176,6 +218,7 @@ type End struct { EndTime time.Time // Trailer contains the trailer metadata received from the server. This // field is only valid if this End is from the client side. + // Deprecated: use Trailer in InTrailer instead. Trailer metadata.MD // Error is the error the RPC ended with. It is an error generated from // status.Status and can be converted back to status.Status using diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index a1348e9b1..a93360efb 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -32,89 +32,25 @@ import ( "errors" "fmt" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/status" ) -func init() { - internal.StatusRawProto = statusRawProto -} - -func statusRawProto(s *Status) *spb.Status { return s.s } - -// statusError is an alias of a status proto. It implements error and Status, -// and a nil statusError should never be returned by this package. -type statusError spb.Status - -func (se *statusError) Error() string { - p := (*spb.Status)(se) - return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage()) -} - -func (se *statusError) GRPCStatus() *Status { - return &Status{s: (*spb.Status)(se)} -} - -// Is implements future error.Is functionality. -// A statusError is equivalent if the code and message are identical. -func (se *statusError) Is(target error) bool { - tse, ok := target.(*statusError) - if !ok { - return false - } - - return proto.Equal((*spb.Status)(se), (*spb.Status)(tse)) -} - -// Status represents an RPC status code, message, and details. It is immutable -// and should be created with New, Newf, or FromProto. -type Status struct { - s *spb.Status -} - -// Code returns the status code contained in s. -func (s *Status) Code() codes.Code { - if s == nil || s.s == nil { - return codes.OK - } - return codes.Code(s.s.Code) -} - -// Message returns the message contained in s. -func (s *Status) Message() string { - if s == nil || s.s == nil { - return "" - } - return s.s.Message -} - -// Proto returns s's status as an spb.Status proto message. -func (s *Status) Proto() *spb.Status { - if s == nil { - return nil - } - return proto.Clone(s.s).(*spb.Status) -} - -// Err returns an immutable error representing s; returns nil if s.Code() is -// OK. -func (s *Status) Err() error { - if s.Code() == codes.OK { - return nil - } - return (*statusError)(s.s) -} +// Status references google.golang.org/grpc/internal/status. It represents an +// RPC status code, message, and details. It is immutable and should be +// created with New, Newf, or FromProto. +// https://godoc.org/google.golang.org/grpc/internal/status +type Status = status.Status // New returns a Status representing c and msg. func New(c codes.Code, msg string) *Status { - return &Status{s: &spb.Status{Code: int32(c), Message: msg}} + return status.New(c, msg) } // Newf returns New(c, fmt.Sprintf(format, a...)). -func Newf(c codes.Code, format string, a ...interface{}) *Status { +func Newf(c codes.Code, format string, a ...any) *Status { return New(c, fmt.Sprintf(format, a...)) } @@ -124,7 +60,7 @@ func Error(c codes.Code, msg string) error { } // Errorf returns Error(c, fmt.Sprintf(format, a...)). -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return Error(c, fmt.Sprintf(format, a...)) } @@ -135,20 +71,57 @@ func ErrorProto(s *spb.Status) error { // FromProto returns a Status representing s. func FromProto(s *spb.Status) *Status { - return &Status{s: proto.Clone(s).(*spb.Status)} + return status.FromProto(s) } -// FromError returns a Status representing err if it was produced from this -// package or has a method `GRPCStatus() *Status`. Otherwise, ok is false and a -// Status is returned with codes.Unknown and the original error message. +// FromError returns a Status representation of err. +// +// - If err was produced by this package or implements the method `GRPCStatus() +// *Status` and `GRPCStatus()` does not return nil, or if err wraps a type +// satisfying this, the Status from `GRPCStatus()` is returned. For wrapped +// errors, the message returned contains the entire err.Error() text and not +// just the wrapped status. In that case, ok is true. +// +// - If err is nil, a Status is returned with codes.OK and no message, and ok +// is true. +// +// - If err implements the method `GRPCStatus() *Status` and `GRPCStatus()` +// returns nil (which maps to Codes.OK), or if err wraps a type +// satisfying this, a Status is returned with codes.Unknown and err's +// Error() message, and ok is false. +// +// - Otherwise, err is an error not compatible with this package. In this +// case, a Status is returned with codes.Unknown and err's Error() message, +// and ok is false. func FromError(err error) (s *Status, ok bool) { if err == nil { return nil, true } - if se, ok := err.(interface { - GRPCStatus() *Status - }); ok { - return se.GRPCStatus(), true + type grpcstatus interface{ GRPCStatus() *Status } + if gs, ok := err.(grpcstatus); ok { + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { + // Error has status nil, which maps to codes.OK. There + // is no sensible behavior for this, so we turn it into + // an error with codes.Unknown and discard the existing + // status. + return New(codes.Unknown, err.Error()), false + } + return grpcStatus, true + } + var gs grpcstatus + if errors.As(err, &gs) { + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { + // Error wraps an error that has status nil, which maps + // to codes.OK. There is no sensible behavior for this, + // so we turn it into an error with codes.Unknown and + // discard the existing status. + return New(codes.Unknown, err.Error()), false + } + p := grpcStatus.Proto() + p.Message = err.Error() + return status.FromProto(p), true } return New(codes.Unknown, err.Error()), false } @@ -160,69 +133,30 @@ func Convert(err error) *Status { return s } -// WithDetails returns a new status with the provided details messages appended to the status. -// If any errors are encountered, it returns nil and the first error encountered. -func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { - if s.Code() == codes.OK { - return nil, errors.New("no error details for status with code OK") - } - // s.Code() != OK implies that s.Proto() != nil. - p := s.Proto() - for _, detail := range details { - any, err := ptypes.MarshalAny(detail) - if err != nil { - return nil, err - } - p.Details = append(p.Details, any) - } - return &Status{s: p}, nil -} - -// Details returns a slice of details messages attached to the status. -// If a detail cannot be decoded, the error is returned in place of the detail. -func (s *Status) Details() []interface{} { - if s == nil || s.s == nil { - return nil - } - details := make([]interface{}, 0, len(s.s.Details)) - for _, any := range s.s.Details { - detail := &ptypes.DynamicAny{} - if err := ptypes.UnmarshalAny(any, detail); err != nil { - details = append(details, err) - continue - } - details = append(details, detail.Message) - } - return details -} - -// Code returns the Code of the error if it is a Status error, codes.OK if err -// is nil, or codes.Unknown otherwise. +// Code returns the Code of the error if it is a Status error or if it wraps a +// Status error. If that is not the case, it returns codes.OK if err is nil, or +// codes.Unknown otherwise. func Code(err error) codes.Code { // Don't use FromError to avoid allocation of OK status. if err == nil { return codes.OK } - if se, ok := err.(interface { - GRPCStatus() *Status - }); ok { - return se.GRPCStatus().Code() - } - return codes.Unknown + + return Convert(err).Code() } -// FromContextError converts a context error into a Status. It returns a -// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is -// non-nil and not a context error. +// FromContextError converts a context error or wrapped context error into a +// Status. It returns a Status with codes.OK if err is nil, or a Status with +// codes.Unknown if err is non-nil and not a context error. func FromContextError(err error) *Status { - switch err { - case nil: + if err == nil { return nil - case context.DeadlineExceeded: + } + if errors.Is(err, context.DeadlineExceeded) { return New(codes.DeadlineExceeded, err.Error()) - case context.Canceled: + } + if errors.Is(err, context.Canceled) { return New(codes.Canceled, err.Error()) - default: - return New(codes.Unknown, err.Error()) } + return New(codes.Unknown, err.Error()) } diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 134a624a1..bb2b2a216 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -23,41 +23,56 @@ import ( "errors" "io" "math" + "math/rand" "strconv" "sync" "time" - "golang.org/x/net/trace" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding" - "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/serviceconfig" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) -// StreamHandler defines the handler called by gRPC server to complete the -// execution of a streaming RPC. If a StreamHandler returns an error, it -// should be produced by the status package, or else gRPC will use -// codes.Unknown as the status code and err.Error() as the status message -// of the RPC. -type StreamHandler func(srv interface{}, stream ServerStream) error +var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) -// StreamDesc represents a streaming RPC service's method specification. +// StreamHandler defines the handler called by gRPC server to complete the +// execution of a streaming RPC. +// +// If a StreamHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. +type StreamHandler func(srv any, stream ServerStream) error + +// StreamDesc represents a streaming RPC service's method specification. Used +// on the server when registering services and on the client when initiating +// new streams. type StreamDesc struct { - StreamName string - Handler StreamHandler - - // At least one of these is true. - ServerStreams bool - ClientStreams bool + // StreamName and Handler are only used when registering handlers on a + // server. + StreamName string // the name of the method excluding the service + Handler StreamHandler // the handler called for the method + + // ServerStreams and ClientStreams are used for registering handlers on a + // server as well as defining RPC behavior when passed to NewClientStream + // and ClientConn.NewStream. At least one must be true. + ServerStreams bool // indicates the server can perform streaming sends + ClientStreams bool // indicates the client can perform streaming sends } // Stream defines the common interface a client or server stream has to satisfy. @@ -67,9 +82,9 @@ type Stream interface { // Deprecated: See ClientStream and ServerStream documentation instead. Context() context.Context // Deprecated: See ClientStream and ServerStream documentation instead. - SendMsg(m interface{}) error + SendMsg(m any) error // Deprecated: See ClientStream and ServerStream documentation instead. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // ClientStream defines the client-side behavior of a streaming RPC. @@ -78,7 +93,9 @@ type Stream interface { // status package. type ClientStream interface { // Header returns the header metadata received from the server if there - // is any. It blocks if the metadata is not ready to read. + // is any. It blocks if the metadata is not ready to read. If the metadata + // is nil and the error is also nil, then the stream was terminated without + // headers, and the status can be discovered by calling RecvMsg. Header() (metadata.MD, error) // Trailer returns the trailer metadata from the server, if there is any. // It must only be called after stream.CloseAndRecv has returned, or @@ -111,7 +128,10 @@ type ClientStream interface { // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. It is also // not safe to call CloseSend concurrently with SendMsg. - SendMsg(m interface{}) error + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On // any other error, the stream is aborted and the error contains the RPC @@ -120,7 +140,7 @@ type ClientStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // NewStream creates a new Stream for the client side. This is typically @@ -129,13 +149,13 @@ type ClientStream interface { // To ensure resources are not leaked due to the stream returned, one of the following // actions must be performed: // -// 1. Call Close on the ClientConn. -// 2. Cancel the context provided. -// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated -// client-streaming RPC, for instance, might use the helper function -// CloseAndRecv (note that CloseSend does not Recv, therefore is not -// guaranteed to release all resources). -// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. +// 1. Call Close on the ClientConn. +// 2. Cancel the context provided. +// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated +// client-streaming RPC, for instance, might use the helper function +// CloseAndRecv (note that CloseSend does not Recv, therefore is not +// guaranteed to release all resources). +// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. // // If none of the above happen, a goroutine and a context will be leaked, and grpc // will not call the optionally-configured stats handler with a stats.End message. @@ -156,6 +176,30 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + // Start tracking the RPC for idleness purposes. This is where a stream is + // created for both streaming and unary RPCs, and hence is a good place to + // track active RPC count. + if err := cc.idlenessMgr.OnCallBegin(); err != nil { + return nil, err + } + // Add a calloption, to decrement the active call count, that gets executed + // when the RPC completes. + opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...) + + if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { + // validate md + if err := imetadata.Validate(md); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + // validate added + for _, kvs := range added { + for i := 0; i < len(kvs); i += 2 { + if err := imetadata.ValidatePair(kvs[i], kvs[i+1]); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } + } + } if channelz.IsOn() { cc.incrCallsStarted() defer func() { @@ -164,13 +208,55 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } }() } - c := defaultCallInfo() // Provide an opportunity for the first RPC to see the first service config // provided by the resolver. if err := cc.waitForResolvedAddrs(ctx); err != nil { return nil, err } - mc := cc.GetMethodConfig(method) + + var mc serviceconfig.MethodConfig + var onCommit func() + var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...) + } + + rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} + rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo) + if err != nil { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "config selector returned illegal status: %v", err) + } + return nil, err + } + return nil, toRPCErr(err) + } + + if rpcConfig != nil { + if rpcConfig.Context != nil { + ctx = rpcConfig.Context + } + mc = rpcConfig.MethodConfig + onCommit = rpcConfig.OnCommitted + if rpcConfig.Interceptor != nil { + rpcInfo.Context = nil + ns := newStream + newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + cs, err := rpcConfig.Interceptor.NewStream(ctx, rpcInfo, done, ns) + if err != nil { + return nil, toRPCErr(err) + } + return cs, nil + } + } + } + + return newStream(ctx, func() {}) +} + +func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) { + c := defaultCallInfo() if mc.WaitForReady != nil { c.failFast = !*mc.WaitForReady } @@ -207,6 +293,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth Host: cc.authority, Method: method, ContentSubtype: c.contentSubtype, + DoneFunc: doneFunc, } // Set our outgoing compression according to the UseCompressor CallOption, if @@ -230,33 +317,6 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth if c.creds != nil { callHdr.Creds = c.creds } - var trInfo *traceInfo - if EnableTracing { - trInfo = &traceInfo{ - tr: trace.New("grpc.Sent."+methodFamily(method), method), - firstLine: firstLine{ - client: true, - }, - } - if deadline, ok := ctx.Deadline(); ok { - trInfo.firstLine.deadline = time.Until(deadline) - } - trInfo.tr.LazyLog(&trInfo.firstLine, false) - ctx = trace.NewContext(ctx, trInfo.tr) - } - ctx = newContextWithRPCInfo(ctx, c.failFast, c.codec, cp, comp) - sh := cc.dopts.copts.StatsHandler - var beginTime time.Time - if sh != nil { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) - beginTime = time.Now() - begin := &stats.Begin{ - Client: true, - BeginTime: beginTime, - FailFast: c.failFast, - } - sh.HandleRPC(ctx, begin) - } cs := &clientStream{ callHdr: callHdr, @@ -270,29 +330,41 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth cp: cp, comp: comp, cancel: cancel, - beginTime: beginTime, firstAttempt: true, + onCommit: onCommit, } if !cc.dopts.disableRetry { cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) } - cs.binlog = binarylog.GetMethodLogger(method) - - cs.callInfo.stream = cs - // Only this initial attempt has stats/tracing. - // TODO(dfawley): move to newAttempt when per-attempt stats are implemented. - if err := cs.newAttemptLocked(sh, trInfo); err != nil { - cs.finish(err) - return nil, err + if ml := binarylog.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } + if cc.dopts.binaryLogger != nil { + if ml := cc.dopts.binaryLogger.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } } - op := func(a *csAttempt) error { return a.newStream() } - if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { - cs.finish(err) + // Pick the transport to use and create a new stream on the transport. + // Assign cs.attempt upon success. + op := func(a *csAttempt) error { + if err := a.getTransport(); err != nil { + return err + } + if err := a.newStream(); err != nil { + return err + } + // Because this operation is always called either here (while creating + // the clientStream) or by the retry code while locked when replaying + // the operation, it is safe to access cs.attempt directly. + cs.attempt = a + return nil + } + if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }); err != nil { return nil, err } - if cs.binlog != nil { + if len(cs.binlogs) != 0 { md, _ := metadata.FromOutgoingContext(ctx) logEntry := &binarylog.ClientHeader{ OnClientSide: true, @@ -306,7 +378,9 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth logEntry.Timeout = 0 } } - cs.binlog.Log(logEntry) + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } } if desc != unaryStreamDesc { @@ -327,49 +401,124 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth return cs, nil } -// newAttemptLocked creates a new attempt with a transport. -// If it succeeds, then it replaces clientStream's attempt with this new attempt. -func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (retErr error) { - newAttempt := &csAttempt{ - cs: cs, - dc: cs.cc.dopts.dc, - statsHandler: sh, - trInfo: trInfo, +// newAttemptLocked creates a new csAttempt without a transport or stream. +func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) { + if err := cs.ctx.Err(); err != nil { + return nil, toRPCErr(err) } - defer func() { - if retErr != nil { - // This attempt is not set in the clientStream, so it's finish won't - // be called. Call it here for stats and trace in case they are not - // nil. - newAttempt.finish(retErr) + if err := cs.cc.ctx.Err(); err != nil { + return nil, ErrClientConnClosing + } + + ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) + method := cs.callHdr.Method + var beginTime time.Time + shs := cs.cc.dopts.copts.StatsHandlers + for _, sh := range shs { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) + beginTime = time.Now() + begin := &stats.Begin{ + Client: true, + BeginTime: beginTime, + FailFast: cs.callInfo.failFast, + IsClientStream: cs.desc.ClientStreams, + IsServerStream: cs.desc.ServerStreams, + IsTransparentRetryAttempt: isTransparent, } - }() + sh.HandleRPC(ctx, begin) + } - if err := cs.ctx.Err(); err != nil { - return toRPCErr(err) + var trInfo *traceInfo + if EnableTracing { + trInfo = &traceInfo{ + tr: newTrace("grpc.Sent."+methodFamily(method), method), + firstLine: firstLine{ + client: true, + }, + } + if deadline, ok := ctx.Deadline(); ok { + trInfo.firstLine.deadline = time.Until(deadline) + } + trInfo.tr.LazyLog(&trInfo.firstLine, false) + ctx = newTraceContext(ctx, trInfo.tr) } - t, done, err := cs.cc.getTransport(cs.ctx, cs.callInfo.failFast, cs.callHdr.Method) + + if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata { + // Add extra metadata (metadata that will be added by transport) to context + // so the balancer can see them. + ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( + "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), + )) + } + + return &csAttempt{ + ctx: ctx, + beginTime: beginTime, + cs: cs, + dc: cs.cc.dopts.dc, + statsHandlers: shs, + trInfo: trInfo, + }, nil +} + +func (a *csAttempt) getTransport() error { + cs := a.cs + + var err error + a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) if err != nil { + if de, ok := err.(dropError); ok { + err = de.error + a.drop = true + } return err } - if trInfo != nil { - trInfo.firstLine.SetRemoteAddr(t.RemoteAddr()) + if a.trInfo != nil { + a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr()) } - newAttempt.t = t - newAttempt.done = done - cs.attempt = newAttempt return nil } func (a *csAttempt) newStream() error { cs := a.cs cs.callHdr.PreviousAttempts = cs.numRetries - s, err := a.t.NewStream(cs.ctx, cs.callHdr) + + // Merge metadata stored in PickResult, if any, with existing call metadata. + // It is safe to overwrite the csAttempt's context here, since all state + // maintained in it are local to the attempt. When the attempt has to be + // retried, a new instance of csAttempt will be created. + if a.pickResult.Metadata != nil { + // We currently do not have a function it the metadata package which + // merges given metadata with existing metadata in a context. Existing + // function `AppendToOutgoingContext()` takes a variadic argument of key + // value pairs. + // + // TODO: Make it possible to retrieve key value pairs from metadata.MD + // in a form passable to AppendToOutgoingContext(), or create a version + // of AppendToOutgoingContext() that accepts a metadata.MD. + md, _ := metadata.FromOutgoingContext(a.ctx) + md = metadata.Join(md, a.pickResult.Metadata) + a.ctx = metadata.NewOutgoingContext(a.ctx, md) + } + + s, err := a.t.NewStream(a.ctx, cs.callHdr) if err != nil { - return toRPCErr(err) + nse, ok := err.(*transport.NewStreamError) + if !ok { + // Unexpected. + return err + } + + if nse.AllowTransparentRetry { + a.allowTransparentRetry = true + } + + // Unwrap and convert error. + return toRPCErr(nse.Err) } - cs.attempt.s = s - cs.attempt.p = &parser{r: s} + a.s = s + a.ctx = s.Context() + a.p = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} return nil } @@ -387,8 +536,7 @@ type clientStream struct { cancel context.CancelFunc // cancels all attempts - sentLast bool // sent an end stream - beginTime time.Time + sentLast bool // sent an end stream methodConfig *MethodConfig @@ -396,7 +544,7 @@ type clientStream struct { retryThrottler *retryThrottler // The throttler active when the RPC began. - binlog *binarylog.MethodLogger // Binary logger, can be nil. + binlogs []binarylog.MethodLogger // serverHeaderBinlogged is a boolean for whether server header has been // logged. Server header will be logged when the first time one of those // happens: stream.Header(), stream.Recv(). @@ -419,19 +567,26 @@ type clientStream struct { // place where we need to check if the attempt is nil. attempt *csAttempt // TODO(hedging): hedging will have multiple attempts simultaneously. - committed bool // active attempt committed for retry? - buffer []func(a *csAttempt) error // operations to replay on retry - bufferSize int // current size of buffer + committed bool // active attempt committed for retry? + onCommit func() + replayBuffer []replayOp // operations to replay on retry + replayBufferSize int // current size of replayBuffer +} + +type replayOp struct { + op func(a *csAttempt) error + cleanup func() } // csAttempt implements a single transport stream attempt within a // clientStream. type csAttempt struct { - cs *clientStream - t transport.ClientTransport - s *transport.Stream - p *parser - done func(balancer.DoneInfo) + ctx context.Context + cs *clientStream + t transport.ClientTransport + s *transport.Stream + p *parser + pickResult balancer.PickResult finished bool dc Decompressor @@ -444,12 +599,26 @@ type csAttempt struct { // and cleared when the finish method is called. trInfo *traceInfo - statsHandler stats.Handler + statsHandlers []stats.Handler + beginTime time.Time + + // set for newStream errors that may be transparently retried + allowTransparentRetry bool + // set for pick errors that are returned as a status + drop bool } func (cs *clientStream) commitAttemptLocked() { + if !cs.committed && cs.onCommit != nil { + cs.onCommit() + } cs.committed = true - cs.buffer = nil + for _, op := range cs.replayBuffer { + if op.cleanup != nil { + op.cleanup() + } + } + cs.replayBuffer = nil } func (cs *clientStream) commitAttempt() { @@ -459,76 +628,76 @@ func (cs *clientStream) commitAttempt() { } // shouldRetry returns nil if the RPC should be retried; otherwise it returns -// the error that should be returned by the operation. -func (cs *clientStream) shouldRetry(err error) error { - if cs.attempt.s == nil && !cs.callInfo.failFast { - // In the event of any error from NewStream (attempt.s == nil), we - // never attempted to write anything to the wire, so we can retry - // indefinitely for non-fail-fast RPCs. - return nil +// the error that should be returned by the operation. If the RPC should be +// retried, the bool indicates whether it is being retried transparently. +func (a *csAttempt) shouldRetry(err error) (bool, error) { + cs := a.cs + + if cs.finished || cs.committed || a.drop { + // RPC is finished or committed or was dropped by the picker; cannot retry. + return false, err } - if cs.finished || cs.committed { - // RPC is finished or committed; cannot retry. - return err + if a.s == nil && a.allowTransparentRetry { + return true, nil } // Wait for the trailers. - if cs.attempt.s != nil { - <-cs.attempt.s.Done() + unprocessed := false + if a.s != nil { + <-a.s.Done() + unprocessed = a.s.Unprocessed() } - if cs.firstAttempt && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) { + if cs.firstAttempt && unprocessed { // First attempt, stream unprocessed: transparently retry. - cs.firstAttempt = false - return nil + return true, nil } - cs.firstAttempt = false if cs.cc.dopts.disableRetry { - return err + return false, err } pushback := 0 hasPushback := false - if cs.attempt.s != nil { - if to, toErr := cs.attempt.s.TrailersOnly(); toErr != nil || !to { - return err + if a.s != nil { + if !a.s.TrailersOnly() { + return false, err } // TODO(retry): Move down if the spec changes to not check server pushback // before considering this a failure for throttling. - sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"] + sps := a.s.Trailer()["grpc-retry-pushback-ms"] if len(sps) == 1 { var e error if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { - grpclog.Infof("Server retry pushback specified to abort (%q).", sps[0]) + channelz.Infof(logger, cs.cc.channelz, "Server retry pushback specified to abort (%q).", sps[0]) cs.retryThrottler.throttle() // This counts as a failure for throttling. - return err + return false, err } hasPushback = true } else if len(sps) > 1 { - grpclog.Warningf("Server retry pushback specified multiple values (%q); not retrying.", sps) + channelz.Warningf(logger, cs.cc.channelz, "Server retry pushback specified multiple values (%q); not retrying.", sps) cs.retryThrottler.throttle() // This counts as a failure for throttling. - return err + return false, err } } var code codes.Code - if cs.attempt.s != nil { - code = cs.attempt.s.Status().Code() + if a.s != nil { + code = a.s.Status().Code() } else { - code = status.Convert(err).Code() + code = status.Code(err) } - rp := cs.methodConfig.retryPolicy - if rp == nil || !rp.retryableStatusCodes[code] { - return err + rp := cs.methodConfig.RetryPolicy + if rp == nil || !rp.RetryableStatusCodes[code] { + return false, err } // Note: the ordering here is important; we count this as a failure // only if the code matched a retryable code. if cs.retryThrottler.throttle() { - return err + return false, err } - if cs.numRetries+1 >= rp.maxAttempts { - return err + if cs.numRetries+1 >= rp.MaxAttempts { + return false, err } var dur time.Duration @@ -536,12 +705,12 @@ func (cs *clientStream) shouldRetry(err error) error { dur = time.Millisecond * time.Duration(pushback) cs.numRetriesSincePushback = 0 } else { - fact := math.Pow(rp.backoffMultiplier, float64(cs.numRetriesSincePushback)) - cur := float64(rp.initialBackoff) * fact - if max := float64(rp.maxBackoff); cur > max { + fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback)) + cur := float64(rp.InitialBackoff) * fact + if max := float64(rp.MaxBackoff); cur > max { cur = max } - dur = time.Duration(grpcrand.Int63n(int64(cur))) + dur = time.Duration(rand.Int63n(int64(cur))) cs.numRetriesSincePushback++ } @@ -551,25 +720,32 @@ func (cs *clientStream) shouldRetry(err error) error { select { case <-t.C: cs.numRetries++ - return nil + return false, nil case <-cs.ctx.Done(): t.Stop() - return status.FromContextError(cs.ctx.Err()).Err() + return false, status.FromContextError(cs.ctx.Err()).Err() } } // Returns nil if a retry was performed and succeeded; error otherwise. -func (cs *clientStream) retryLocked(lastErr error) error { +func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { for { - cs.attempt.finish(lastErr) - if err := cs.shouldRetry(lastErr); err != nil { + attempt.finish(toRPCErr(lastErr)) + isTransparent, err := attempt.shouldRetry(lastErr) + if err != nil { cs.commitAttemptLocked() return err } - if err := cs.newAttemptLocked(nil, nil); err != nil { + cs.firstAttempt = false + attempt, err = cs.newAttemptLocked(isTransparent) + if err != nil { + // Only returns error if the clientconn is closed or the context of + // the stream is canceled. return err } - if lastErr = cs.replayBufferLocked(); lastErr == nil { + // Note that the first op in replayBuffer always sets cs.attempt + // if it is able to pick a transport and create a stream. + if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { return nil } } @@ -579,7 +755,10 @@ func (cs *clientStream) Context() context.Context { cs.commitAttempt() // No need to lock before using attempt, since we know it is committed and // cannot change. - return cs.attempt.s.Context() + if cs.attempt.s != nil { + return cs.attempt.s.Context() + } + return cs.ctx } func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { @@ -587,7 +766,23 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) for { if cs.committed { cs.mu.Unlock() - return op(cs.attempt) + // toRPCErr is used in case the error from the attempt comes from + // NewClientStream, which intentionally doesn't return a status + // error to allow for further inspection; all other errors should + // already be status errors. + return toRPCErr(op(cs.attempt)) + } + if len(cs.replayBuffer) == 0 { + // For the first op, which controls creation of the stream and + // assigns cs.attempt, we need to create a new attempt inline + // before executing the first op. On subsequent ops, the attempt + // is created immediately before replaying the ops. + var err error + if cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */); err != nil { + cs.mu.Unlock() + cs.finish(err) + return err + } } a := cs.attempt cs.mu.Unlock() @@ -605,7 +800,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) cs.mu.Unlock() return err } - if err := cs.retryLocked(err); err != nil { + if err := cs.retryLocked(a, err); err != nil { cs.mu.Unlock() return err } @@ -619,12 +814,21 @@ func (cs *clientStream) Header() (metadata.MD, error) { m, err = a.s.Header() return toRPCErr(err) }, cs.commitAttemptLocked) + + if m == nil && err == nil { + // The stream ended with success. Finish the clientStream. + err = io.EOF + } + if err != nil { cs.finish(err) - return nil, err + // Do not return the error. The user should get it by calling Recv(). + return nil, nil } - if cs.binlog != nil && !cs.serverHeaderBinlogged { - // Only log if binary log is on and header has not been logged. + + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && m != nil { + // Only log if binary log is on and header has not been logged, and + // there is actually headers to log. logEntry := &binarylog.ServerHeader{ OnClientSide: true, Header: m, @@ -633,10 +837,13 @@ func (cs *clientStream) Header() (metadata.MD, error) { if peer, ok := peer.FromContext(cs.Context()); ok { logEntry.PeerAddr = peer.Addr } - cs.binlog.Log(logEntry) cs.serverHeaderBinlogged = true + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } } - return m, err + + return m, nil } func (cs *clientStream) Trailer() metadata.MD { @@ -654,30 +861,30 @@ func (cs *clientStream) Trailer() metadata.MD { return cs.attempt.s.Trailer() } -func (cs *clientStream) replayBufferLocked() error { - a := cs.attempt - for _, f := range cs.buffer { - if err := f(a); err != nil { +func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { + for _, f := range cs.replayBuffer { + if err := f.op(attempt); err != nil { return err } } return nil } -func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { +func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error, cleanup func()) { // Note: we still will buffer if retry is disabled (for transparent retries). if cs.committed { return } - cs.bufferSize += sz - if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { + cs.replayBufferSize += sz + if cs.replayBufferSize > cs.callInfo.maxRetryRPCBufferSize { cs.commitAttemptLocked() + cleanup() return } - cs.buffer = append(cs.buffer, op) + cs.replayBuffer = append(cs.replayBuffer, replayOp{op: op, cleanup: cleanup}) } -func (cs *clientStream) SendMsg(m interface{}) (err error) { +func (cs *clientStream) SendMsg(m any) (err error) { defer func() { if err != nil && err != io.EOF { // Call finish on the client stream for errors generated by this SendMsg @@ -696,70 +903,83 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { } // load hdr, payload, data - hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp) + hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.cp, cs.comp, cs.cc.dopts.copts.BufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + + dataLen := data.Len() + payloadLen := payload.Len() // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > *cs.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) + if payloadLen > *cs.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, *cs.callInfo.maxSendMessageSize) } - msgBytes := data // Store the pointer before setting to nil. For binary logging. + + // always take an extra ref in case data == payload (i.e. when the data isn't + // compressed). The original ref will always be freed by the deferred free above. + payload.Ref() op := func(a *csAttempt) error { - err := a.sendMsg(m, hdr, payload, data) - // nil out the message and uncomp when replaying; they are only needed for - // stats which is disabled for subsequent attempts. - m, data = nil, nil - return err - } - err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) - if cs.binlog != nil && err == nil { - cs.binlog.Log(&binarylog.ClientMessage{ + return a.sendMsg(m, hdr, payload, dataLen, payloadLen) + } + + // onSuccess is invoked when the op is captured for a subsequent retry. If the + // stream was established by a previous message and therefore retries are + // disabled, onSuccess will not be invoked, and payloadRef can be freed + // immediately. + onSuccessCalled := false + err = cs.withRetry(op, func() { + cs.bufferForRetryLocked(len(hdr)+payloadLen, op, payload.Free) + onSuccessCalled = true + }) + if !onSuccessCalled { + payload.Free() + } + if len(cs.binlogs) != 0 && err == nil { + cm := &binarylog.ClientMessage{ OnClientSide: true, - Message: msgBytes, - }) + Message: data.Materialize(), + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, cm) + } } - return + return err } -func (cs *clientStream) RecvMsg(m interface{}) error { - if cs.binlog != nil && !cs.serverHeaderBinlogged { +func (cs *clientStream) RecvMsg(m any) error { + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { // Call Header() to binary log header if it's not already logged. cs.Header() } var recvInfo *payloadInfo - if cs.binlog != nil { + if len(cs.binlogs) != 0 { recvInfo = &payloadInfo{} + defer recvInfo.free() } err := cs.withRetry(func(a *csAttempt) error { return a.recvMsg(m, recvInfo) }, cs.commitAttemptLocked) - if cs.binlog != nil && err == nil { - cs.binlog.Log(&binarylog.ServerMessage{ + if len(cs.binlogs) != 0 && err == nil { + sm := &binarylog.ServerMessage{ OnClientSide: true, - Message: recvInfo.uncompressedBytes, - }) + Message: recvInfo.uncompressedBytes.Materialize(), + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, sm) + } } if err != nil || !cs.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. cs.finish(err) - - if cs.binlog != nil { - // finish will not log Trailer. Log Trailer here. - logEntry := &binarylog.ServerTrailer{ - OnClientSide: true, - Trailer: cs.Trailer(), - Err: err, - } - if logEntry.Err == io.EOF { - logEntry.Err = nil - } - if peer, ok := peer.FromContext(cs.Context()); ok { - logEntry.PeerAddr = peer.Addr - } - cs.binlog.Log(logEntry) - } } return err } @@ -778,11 +998,14 @@ func (cs *clientStream) CloseSend() error { // RecvMsg. This also matches historical behavior. return nil } - cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) - if cs.binlog != nil { - cs.binlog.Log(&binarylog.ClientHalfClose{ + cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }) + if len(cs.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{ OnClientSide: true, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, chc) + } } // We never returned an error here for reasons. return nil @@ -799,17 +1022,44 @@ func (cs *clientStream) finish(err error) { return } cs.finished = true + for _, onFinish := range cs.callInfo.onFinish { + onFinish(err) + } cs.commitAttemptLocked() + if cs.attempt != nil { + cs.attempt.finish(err) + // after functions all rely upon having a stream. + if cs.attempt.s != nil { + for _, o := range cs.opts { + o.after(cs.callInfo, cs.attempt) + } + } + } + cs.mu.Unlock() - // For binary logging. only log cancel in finish (could be caused by RPC ctx - // canceled or ClientConn closed). Trailer will be logged in RecvMsg. - // - // Only one of cancel or trailer needs to be logged. In the cases where - // users don't call RecvMsg, users must have already canceled the RPC. - if cs.binlog != nil && status.Code(err) == codes.Canceled { - cs.binlog.Log(&binarylog.Cancel{ - OnClientSide: true, - }) + // Only one of cancel or trailer needs to be logged. + if len(cs.binlogs) != 0 { + switch err { + case errContextCanceled, errContextDeadline, ErrClientConnClosing: + c := &binarylog.Cancel{ + OnClientSide: true, + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, c) + } + default: + logEntry := &binarylog.ServerTrailer{ + OnClientSide: true, + Trailer: cs.Trailer(), + Err: err, + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } + } } if err == nil { cs.retryThrottler.successfulRPC() @@ -821,19 +1071,10 @@ func (cs *clientStream) finish(err error) { cs.cc.incrCallsSucceeded() } } - if cs.attempt != nil { - cs.attempt.finish(err) - // after functions all rely upon having a stream. - if cs.attempt.s != nil { - for _, o := range cs.opts { - o.after(cs.callInfo) - } - } - } cs.cancel() } -func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { +func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength, payloadLength int) error { cs := a.cs if a.trInfo != nil { a.mu.Lock() @@ -851,8 +1092,10 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { } return io.EOF } - if a.statsHandler != nil { - a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now())) + if len(a.statsHandlers) != 0 { + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) + } } if channelz.IsOn() { a.t.IncrMsgSent() @@ -860,10 +1103,11 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { return nil } -func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { +func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { cs := a.cs - if a.statsHandler != nil && payInfo == nil { + if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} + defer payInfo.free() } if !a.decompSet { @@ -882,14 +1126,14 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { // Only initialize this state once per stream. a.decompSet = true } - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp) - if err != nil { + if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp, false); err != nil { if err == io.EOF { if statusErr := a.s.Status().Err(); statusErr != nil { return statusErr } return io.EOF // indicates successful end of stream. } + return toRPCErr(err) } if a.trInfo != nil { @@ -899,15 +1143,14 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { } a.mu.Unlock() } - if a.statsHandler != nil { - a.statsHandler.HandleRPC(cs.ctx, &stats.InPayload{ - Client: true, - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.wireLength, - Length: len(payInfo.uncompressedBytes), + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, &stats.InPayload{ + Client: true, + RecvTime: time.Now(), + Payload: m, + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + Length: payInfo.uncompressedBytes.Len(), }) } if channelz.IsOn() { @@ -919,14 +1162,12 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { } // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) - } - if err == io.EOF { + if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp, false); err == io.EOF { return a.s.Status().Err() // non-server streaming Recv returns nil on success + } else if err != nil { + return toRPCErr(err) } - return toRPCErr(err) + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) } func (a *csAttempt) finish(err error) { @@ -946,12 +1187,12 @@ func (a *csAttempt) finish(err error) { tr = a.s.Trailer() } - if a.done != nil { + if a.pickResult.Done != nil { br := false if a.s != nil { br = a.s.BytesReceived() } - a.done(balancer.DoneInfo{ + a.pickResult.Done(balancer.DoneInfo{ Err: err, Trailer: tr, BytesSent: a.s != nil, @@ -959,15 +1200,15 @@ func (a *csAttempt) finish(err error) { ServerLoad: balancerload.Parse(tr), }) } - if a.statsHandler != nil { + for _, sh := range a.statsHandlers { end := &stats.End{ Client: true, - BeginTime: a.cs.beginTime, + BeginTime: a.beginTime, EndTime: time.Now(), Trailer: tr, Error: err, } - a.statsHandler.HandleRPC(a.cs.ctx, end) + sh.HandleRPC(a.ctx, end) } if a.trInfo != nil && a.trInfo.tr != nil { if err == nil { @@ -982,12 +1223,12 @@ func (a *csAttempt) finish(err error) { a.mu.Unlock() } -// newClientStream creates a ClientStream with the specified transport, on the +// newNonRetryClientStream creates a ClientStream with the specified transport, on the // given addrConn. // // It's expected that the given transport is either the same one in addrConn, or // is already closed. To avoid race, transport is specified separately, instead -// of using ac.transpot. +// of using ac.transport. // // Main difference between this and ClientConn.NewStream: // - no retry @@ -1067,24 +1308,28 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin t: t, } - as.callInfo.stream = as s, err := as.t.NewStream(as.ctx, as.callHdr) if err != nil { err = toRPCErr(err) return nil, err } as.s = s - as.p = &parser{r: s} + as.p = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { - // Listen on cc and stream contexts to cleanup when the user closes the - // ClientConn or cancels the stream context. In all other cases, an error - // should already be injected into the recv buffer by the transport, which - // the client will eventually receive, and then we will cancel the stream's - // context in clientStream.finish. + // Listen on stream context to cleanup when the stream context is + // canceled. Also listen for the addrConn's context in case the + // addrConn is closed or reconnects to a different address. In all + // other cases, an error should already be injected into the recv + // buffer by the transport, which the client will eventually receive, + // and then we will cancel the stream's context in + // addrConnStream.finish. go func() { + ac.mu.Lock() + acCtx := ac.ctx + ac.mu.Unlock() select { - case <-ac.ctx.Done(): + case <-acCtx.Done(): as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) case <-ctx.Done(): as.finish(toRPCErr(ctx.Err())) @@ -1147,7 +1392,7 @@ func (as *addrConnStream) Context() context.Context { return as.s.Context() } -func (as *addrConnStream) SendMsg(m interface{}) (err error) { +func (as *addrConnStream) SendMsg(m any) (err error) { defer func() { if err != nil && err != io.EOF { // Call finish on the client stream for errors generated by this SendMsg @@ -1166,17 +1411,26 @@ func (as *addrConnStream) SendMsg(m interface{}) (err error) { } // load hdr, payload, data - hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp) + hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.cp, as.comp, as.ac.dopts.copts.BufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + // TODO(dfawley): should we be checking len(data) instead? - if len(payld) > *as.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) + if payload.Len() > *as.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize) } - if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if err := as.t.Write(as.s, hdr, payload, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { if !as.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1192,7 +1446,7 @@ func (as *addrConnStream) SendMsg(m interface{}) (err error) { return nil } -func (as *addrConnStream) RecvMsg(m interface{}) (err error) { +func (as *addrConnStream) RecvMsg(m any) (err error) { defer func() { if err != nil || !as.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. @@ -1216,8 +1470,7 @@ func (as *addrConnStream) RecvMsg(m interface{}) (err error) { // Only initialize this state once per stream. as.decompSet = true } - err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) - if err != nil { + if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err != nil { if err == io.EOF { if statusErr := as.s.Status().Err(); statusErr != nil { return statusErr @@ -1237,14 +1490,12 @@ func (as *addrConnStream) RecvMsg(m interface{}) (err error) { // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) - } - if err == io.EOF { + if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err == io.EOF { return as.s.Status().Err() // non-server streaming Recv returns nil on success + } else if err != nil { + return toRPCErr(err) } - return toRPCErr(err) + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) } func (as *addrConnStream) finish(err error) { @@ -1273,8 +1524,10 @@ func (as *addrConnStream) finish(err error) { // ServerStream defines the server-side behavior of a streaming RPC. // -// All errors returned from ServerStream methods are compatible with the -// status package. +// Errors returned from ServerStream methods are compatible with the status +// package. However, the status code will often not match the RPC status as +// seen by the client application, and therefore, should not be relied upon for +// this purpose. type ServerStream interface { // SetHeader sets the header metadata. It may be called multiple times. // When call multiple times, all the provided metadata will be merged. @@ -1306,7 +1559,10 @@ type ServerStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. - SendMsg(m interface{}) error + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the client has performed a CloseSend. On // any non-EOF error, the stream is aborted and the error contains the @@ -1315,7 +1571,7 @@ type ServerStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // serverStream implements a server side Stream. @@ -1331,13 +1587,15 @@ type serverStream struct { comp encoding.Compressor decomp encoding.Compressor + sendCompressorName string + maxReceiveMessageSize int maxSendMessageSize int trInfo *traceInfo - statsHandler stats.Handler + statsHandler []stats.Handler - binlog *binarylog.MethodLogger + binlogs []binarylog.MethodLogger // serverHeaderBinlogged indicates whether server header has been logged. It // will happen when one of the following two happens: stream.SendHeader(), // stream.Send(). @@ -1357,17 +1615,29 @@ func (ss *serverStream) SetHeader(md metadata.MD) error { if md.Len() == 0 { return nil } + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } return ss.s.SetHeader(md) } func (ss *serverStream) SendHeader(md metadata.MD) error { - err := ss.t.WriteHeader(ss.s, md) - if ss.binlog != nil && !ss.serverHeaderBinlogged { + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + + err = ss.t.WriteHeader(ss.s, md) + if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged { h, _ := ss.s.Header() - ss.binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, sh) + } } return err } @@ -1376,10 +1646,13 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { if md.Len() == 0 { return } + if err := imetadata.Validate(md); err != nil { + logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err) + } ss.s.SetTrailer(md) } -func (ss *serverStream) SendMsg(m interface{}) (err error) { +func (ss *serverStream) SendMsg(m any) (err error) { defer func() { if ss.trInfo != nil { ss.mu.Lock() @@ -1387,7 +1660,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { if err == nil { ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) } else { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } } @@ -1408,38 +1681,66 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { } }() + // Server handler could have set new compressor by calling SetSendCompressor. + // In case it is set, we need to use it for compressing outbound message. + if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName { + ss.comp = encoding.GetCompressor(sendCompressorsName) + ss.sendCompressorName = sendCompressorsName + } + // load hdr, payload, data - hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) + hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.cp, ss.comp, ss.p.bufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + + dataLen := data.Len() + payloadLen := payload.Len() + // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > ss.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) + if payloadLen > ss.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize) } if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } - if ss.binlog != nil { + + if len(ss.binlogs) != 0 { if !ss.serverHeaderBinlogged { h, _ := ss.s.Header() - ss.binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, sh) + } + } + sm := &binarylog.ServerMessage{ + Message: data.Materialize(), + } + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, sm) } - ss.binlog.Log(&binarylog.ServerMessage{ - Message: data, - }) } - if ss.statsHandler != nil { - ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now())) + } } return nil } -func (ss *serverStream) RecvMsg(m interface{}) (err error) { +func (ss *serverStream) RecvMsg(m any) (err error) { defer func() { if ss.trInfo != nil { ss.mu.Lock() @@ -1447,7 +1748,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { if err == nil { ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) } else if err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } } @@ -1468,13 +1769,17 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } }() var payInfo *payloadInfo - if ss.statsHandler != nil || ss.binlog != nil { + if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} + defer payInfo.free() } - if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp, true); err != nil { if err == io.EOF { - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ClientHalfClose{}) + if len(ss.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{} + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, chc) + } } return err } @@ -1483,20 +1788,24 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } return toRPCErr(err) } - if ss.statsHandler != nil { - ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.wireLength, - Length: len(payInfo.uncompressedBytes), - }) + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: m, + Length: payInfo.uncompressedBytes.Len(), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + }) + } } - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ClientMessage{ - Message: payInfo.uncompressedBytes, - }) + if len(ss.binlogs) != 0 { + cm := &binarylog.ClientMessage{ + Message: payInfo.uncompressedBytes.Materialize(), + } + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, cm) + } } return nil } @@ -1507,23 +1816,26 @@ func MethodFromServerStream(stream ServerStream) (string, bool) { return Method(stream.Context()) } -// prepareMsg returns the hdr, payload and data -// using the compressors passed or using the -// passed preparedmsg -func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { +// prepareMsg returns the hdr, payload and data using the compressors passed or +// using the passed preparedmsg. The returned boolean indicates whether +// compression was made and therefore whether the payload needs to be freed in +// addition to the returned data. Freeing the payload if the returned boolean is +// false can lead to undefined behavior. +func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor, pool mem.BufferPool) (hdr []byte, data, payload mem.BufferSlice, pf payloadFormat, err error) { if preparedMsg, ok := m.(*PreparedMsg); ok { - return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil + return preparedMsg.hdr, preparedMsg.encodedData, preparedMsg.payload, preparedMsg.pf, nil } // The input interface is not a prepared msg. // Marshal and Compress the data at this point data, err = encode(codec, m) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, 0, err } - compData, err := compress(data, cp, comp) + compData, pf, err := compress(data, cp, comp, pool) if err != nil { - return nil, nil, nil, err + data.Free() + return nil, nil, nil, 0, err } - hdr, payload = msgHeader(data, compData) - return hdr, payload, data, nil + hdr, payload = msgHeader(data, compData, pf) + return hdr, data, payload, pf, nil } diff --git a/vendor/google.golang.org/grpc/stream_interfaces.go b/vendor/google.golang.org/grpc/stream_interfaces.go new file mode 100644 index 000000000..0037fee0b --- /dev/null +++ b/vendor/google.golang.org/grpc/stream_interfaces.go @@ -0,0 +1,238 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +// ServerStreamingClient represents the client side of a server-streaming (one +// request, many responses) RPC. It is generic over the type of the response +// message. It is used in generated code. +type ServerStreamingClient[Res any] interface { + // Recv receives the next response message from the server. The client may + // repeatedly call Recv to read messages from the response stream. If + // io.EOF is returned, the stream has terminated with an OK status. Any + // other error is compatible with the status package and indicates the + // RPC's status code and message. + Recv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, and Trailer + // functionality. No other methods in the ClientStream should be called + // directly. + ClientStream +} + +// ServerStreamingServer represents the server side of a server-streaming (one +// request, many responses) RPC. It is generic over the type of the response +// message. It is used in generated code. +// +// To terminate the response stream, return from the handler method and return +// an error from the status package, or use nil to indicate an OK status code. +type ServerStreamingServer[Res any] interface { + // Send sends a response message to the client. The server handler may + // call Send multiple times to send multiple messages to the client. An + // error is returned if the stream was terminated unexpectedly, and the + // handler method should return, as the stream is no longer usable. + Send(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. + ServerStream +} + +// ClientStreamingClient represents the client side of a client-streaming (many +// requests, one response) RPC. It is generic over both the type of the request +// message stream and the type of the unary response message. It is used in +// generated code. +type ClientStreamingClient[Req any, Res any] interface { + // Send sends a request message to the server. The client may call Send + // multiple times to send multiple messages to the server. On error, Send + // aborts the stream. If the error was generated by the client, the status + // is returned directly. Otherwise, io.EOF is returned, and the status of + // the stream may be discovered using CloseAndRecv(). + Send(*Req) error + + // CloseAndRecv closes the request stream and waits for the server's + // response. This method must be called once and only once after sending + // all request messages. Any error returned is implemented by the status + // package. + CloseAndRecv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, and Trailer + // functionality. No other methods in the ClientStream should be called + // directly. + ClientStream +} + +// ClientStreamingServer represents the server side of a client-streaming (many +// requests, one response) RPC. It is generic over both the type of the request +// message stream and the type of the unary response message. It is used in +// generated code. +// +// To terminate the RPC, call SendAndClose and return nil from the method +// handler or do not call SendAndClose and return an error from the status +// package. +type ClientStreamingServer[Req any, Res any] interface { + // Recv receives the next request message from the client. The server may + // repeatedly call Recv to read messages from the request stream. If + // io.EOF is returned, it indicates the client called CloseAndRecv on its + // ClientStreamingClient. Any other error indicates the stream was + // terminated unexpectedly, and the handler method should return, as the + // stream is no longer usable. + Recv() (*Req, error) + + // SendAndClose sends a single response message to the client and closes + // the stream. This method must be called once and only once after all + // request messages have been processed. Recv should not be called after + // calling SendAndClose. + SendAndClose(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. + ServerStream +} + +// BidiStreamingClient represents the client side of a bidirectional-streaming +// (many requests, many responses) RPC. It is generic over both the type of the +// request message stream and the type of the response message stream. It is +// used in generated code. +type BidiStreamingClient[Req any, Res any] interface { + // Send sends a request message to the server. The client may call Send + // multiple times to send multiple messages to the server. On error, Send + // aborts the stream. If the error was generated by the client, the status + // is returned directly. Otherwise, io.EOF is returned, and the status of + // the stream may be discovered using Recv(). + Send(*Req) error + + // Recv receives the next response message from the server. The client may + // repeatedly call Recv to read messages from the response stream. If + // io.EOF is returned, the stream has terminated with an OK status. Any + // other error is compatible with the status package and indicates the + // RPC's status code and message. + Recv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, Trailer, and + // CloseSend functionality. No other methods in the ClientStream should be + // called directly. + ClientStream +} + +// BidiStreamingServer represents the server side of a bidirectional-streaming +// (many requests, many responses) RPC. It is generic over both the type of the +// request message stream and the type of the response message stream. It is +// used in generated code. +// +// To terminate the stream, return from the handler method and return +// an error from the status package, or use nil to indicate an OK status code. +type BidiStreamingServer[Req any, Res any] interface { + // Recv receives the next request message from the client. The server may + // repeatedly call Recv to read messages from the request stream. If + // io.EOF is returned, it indicates the client called CloseSend on its + // BidiStreamingClient. Any other error indicates the stream was + // terminated unexpectedly, and the handler method should return, as the + // stream is no longer usable. + Recv() (*Req, error) + + // Send sends a response message to the client. The server handler may + // call Send multiple times to send multiple messages to the client. An + // error is returned if the stream was terminated unexpectedly, and the + // handler method should return, as the stream is no longer usable. + Send(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. + ServerStream +} + +// GenericClientStream implements the ServerStreamingClient, ClientStreamingClient, +// and BidiStreamingClient interfaces. It is used in generated code. +type GenericClientStream[Req any, Res any] struct { + ClientStream +} + +var _ ServerStreamingClient[string] = (*GenericClientStream[int, string])(nil) +var _ ClientStreamingClient[int, string] = (*GenericClientStream[int, string])(nil) +var _ BidiStreamingClient[int, string] = (*GenericClientStream[int, string])(nil) + +// Send pushes one message into the stream of requests to be consumed by the +// server. The type of message which can be sent is determined by the Req type +// parameter of the GenericClientStream receiver. +func (x *GenericClientStream[Req, Res]) Send(m *Req) error { + return x.ClientStream.SendMsg(m) +} + +// Recv reads one message from the stream of responses generated by the server. +// The type of the message returned is determined by the Res type parameter +// of the GenericClientStream receiver. +func (x *GenericClientStream[Req, Res]) Recv() (*Res, error) { + m := new(Res) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// CloseAndRecv closes the sending side of the stream, then receives the unary +// response from the server. The type of message which it returns is determined +// by the Res type parameter of the GenericClientStream receiver. +func (x *GenericClientStream[Req, Res]) CloseAndRecv() (*Res, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(Res) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GenericServerStream implements the ServerStreamingServer, ClientStreamingServer, +// and BidiStreamingServer interfaces. It is used in generated code. +type GenericServerStream[Req any, Res any] struct { + ServerStream +} + +var _ ServerStreamingServer[string] = (*GenericServerStream[int, string])(nil) +var _ ClientStreamingServer[int, string] = (*GenericServerStream[int, string])(nil) +var _ BidiStreamingServer[int, string] = (*GenericServerStream[int, string])(nil) + +// Send pushes one message into the stream of responses to be consumed by the +// client. The type of message which can be sent is determined by the Res +// type parameter of the serverStreamServer receiver. +func (x *GenericServerStream[Req, Res]) Send(m *Res) error { + return x.ServerStream.SendMsg(m) +} + +// SendAndClose pushes the unary response to the client. The type of message +// which can be sent is determined by the Res type parameter of the +// clientStreamServer receiver. +func (x *GenericServerStream[Req, Res]) SendAndClose(m *Res) error { + return x.ServerStream.SendMsg(m) +} + +// Recv reads one message from the stream of requests generated by the client. +// The type of the message returned is determined by the Req type parameter +// of the clientStreamServer receiver. +func (x *GenericServerStream[Req, Res]) Recv() (*Req, error) { + m := new(Req) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go index 584360f68..07f012576 100644 --- a/vendor/google.golang.org/grpc/tap/tap.go +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -17,11 +17,18 @@ */ // Package tap defines the function handles which are executed on the transport -// layer of gRPC-Go and related information. Everything here is EXPERIMENTAL. +// layer of gRPC-Go and related information. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. package tap import ( "context" + + "google.golang.org/grpc/metadata" ) // Info defines the relevant information needed by the handles. @@ -29,19 +36,23 @@ type Info struct { // FullMethodName is the string of grpc method (in the format of // /package.service/method). FullMethodName string + + // Header contains the header metadata received. + Header metadata.MD + // TODO: More to be added. } -// ServerInHandle defines the function which runs before a new stream is created -// on the server side. If it returns a non-nil error, the stream will not be -// created and a RST_STREAM will be sent back to the client with REFUSED_STREAM. -// The client will receive an RPC error "code = Unavailable, desc = stream -// terminated by RST_STREAM with error code: REFUSED_STREAM". +// ServerInHandle defines the function which runs before a new stream is +// created on the server side. If it returns a non-nil error, the stream will +// not be created and an error will be returned to the client. If the error +// returned is a status error, that status code and message will be used, +// otherwise PermissionDenied will be the code and err.Error() will be the +// message. // // It's intended to be used in situations where you don't want to waste the -// resources to accept the new stream (e.g. rate-limiting). And the content of -// the error will be ignored and won't be sent back to the client. For other -// general usages, please use interceptors. +// resources to accept the new stream (e.g. rate-limiting). For other general +// usages, please use interceptors. // // Note that it is executed in the per-connection I/O goroutine(s) instead of // per-RPC goroutine. Therefore, users should NOT have any diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go index 0a57b9994..10f4f798f 100644 --- a/vendor/google.golang.org/grpc/trace.go +++ b/vendor/google.golang.org/grpc/trace.go @@ -26,8 +26,6 @@ import ( "strings" "sync" "time" - - "golang.org/x/net/trace" ) // EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. @@ -41,15 +39,34 @@ func methodFamily(m string) string { if i := strings.Index(m, "/"); i >= 0 { m = m[:i] // remove everything from second slash } - if i := strings.LastIndex(m, "."); i >= 0 { - m = m[i+1:] // cut down to last dotted component - } return m } +// traceEventLog mirrors golang.org/x/net/trace.EventLog. +// +// It exists in order to avoid importing x/net/trace on grpcnotrace builds. +type traceEventLog interface { + Printf(format string, a ...any) + Errorf(format string, a ...any) + Finish() +} + +// traceLog mirrors golang.org/x/net/trace.Trace. +// +// It exists in order to avoid importing x/net/trace on grpcnotrace builds. +type traceLog interface { + LazyLog(x fmt.Stringer, sensitive bool) + LazyPrintf(format string, a ...any) + SetError() + SetRecycler(f func(any)) + SetTraceInfo(traceID, spanID uint64) + SetMaxEvents(m int) + Finish() +} + // traceInfo contains tracing information for an RPC. type traceInfo struct { - tr trace.Trace + tr traceLog firstLine firstLine } @@ -100,8 +117,8 @@ func truncate(x string, l int) string { // payload represents an RPC request or response payload. type payload struct { - sent bool // whether this is an outgoing payload - msg interface{} // e.g. a proto.Message + sent bool // whether this is an outgoing payload + msg any // e.g. a proto.Message // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? } @@ -114,7 +131,7 @@ func (p payload) String() string { type fmtStringer struct { format string - a []interface{} + a []any } func (f *fmtStringer) String() string { diff --git a/vendor/google.golang.org/grpc/trace_notrace.go b/vendor/google.golang.org/grpc/trace_notrace.go new file mode 100644 index 000000000..1da3a2308 --- /dev/null +++ b/vendor/google.golang.org/grpc/trace_notrace.go @@ -0,0 +1,52 @@ +//go:build grpcnotrace + +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +// grpcnotrace can be used to avoid importing golang.org/x/net/trace, which in +// turn enables binaries using gRPC-Go for dead code elimination, which can +// yield 10-15% improvements in binary size when tracing is not needed. + +import ( + "context" + "fmt" +) + +type notrace struct{} + +func (notrace) LazyLog(x fmt.Stringer, sensitive bool) {} +func (notrace) LazyPrintf(format string, a ...any) {} +func (notrace) SetError() {} +func (notrace) SetRecycler(f func(any)) {} +func (notrace) SetTraceInfo(traceID, spanID uint64) {} +func (notrace) SetMaxEvents(m int) {} +func (notrace) Finish() {} + +func newTrace(family, title string) traceLog { + return notrace{} +} + +func newTraceContext(ctx context.Context, tr traceLog) context.Context { + return ctx +} + +func newTraceEventLog(family, title string) traceEventLog { + return nil +} diff --git a/vendor/google.golang.org/grpc/trace_withtrace.go b/vendor/google.golang.org/grpc/trace_withtrace.go new file mode 100644 index 000000000..88d6e8571 --- /dev/null +++ b/vendor/google.golang.org/grpc/trace_withtrace.go @@ -0,0 +1,39 @@ +//go:build !grpcnotrace + +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + + t "golang.org/x/net/trace" +) + +func newTrace(family, title string) traceLog { + return t.New(family, title) +} + +func newTraceContext(ctx context.Context, tr traceLog) context.Context { + return t.NewContext(ctx, tr) +} + +func newTraceEventLog(family, title string) traceEventLog { + return t.NewEventLog(family, title) +} diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 483ef8968..a96b6a6bf 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.24.0" +const Version = "1.67.1" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh deleted file mode 100644 index 2d79b1c69..000000000 --- a/vendor/google.golang.org/grpc/vet.sh +++ /dev/null @@ -1,135 +0,0 @@ -#!/bin/bash - -if [[ `uname -a` = *"Darwin"* ]]; then - echo "It seems you are running on Mac. This script does not work on Mac. See https://github.com/grpc/grpc-go/issues/2047" - exit 1 -fi - -set -ex # Exit on error; debugging enabled. -set -o pipefail # Fail a pipe if any sub-command fails. - -die() { - echo "$@" >&2 - exit 1 -} - -fail_on_output() { - tee /dev/stderr | (! read) -} - -# Check to make sure it's safe to modify the user's git repo. -git status --porcelain | fail_on_output - -# Undo any edits made by this script. -cleanup() { - git reset --hard HEAD -} -trap cleanup EXIT - -PATH="${GOPATH}/bin:${GOROOT}/bin:${PATH}" - -if [[ "$1" = "-install" ]]; then - # Check for module support - if go help mod >& /dev/null; then - go install \ - golang.org/x/lint/golint \ - golang.org/x/tools/cmd/goimports \ - honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell \ - github.com/golang/protobuf/protoc-gen-go - else - # Ye olde `go get` incantation. - # Note: this gets the latest version of all tools (vs. the pinned versions - # with Go modules). - go get -u \ - golang.org/x/lint/golint \ - golang.org/x/tools/cmd/goimports \ - honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell \ - github.com/golang/protobuf/protoc-gen-go - fi - if [[ -z "${VET_SKIP_PROTO}" ]]; then - if [[ "${TRAVIS}" = "true" ]]; then - PROTOBUF_VERSION=3.3.0 - PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip - pushd /home/travis - wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} - unzip ${PROTOC_FILENAME} - bin/protoc --version - popd - elif ! which protoc > /dev/null; then - die "Please install protoc into your path" - fi - fi - exit 0 -elif [[ "$#" -ne 0 ]]; then - die "Unknown argument(s): $*" -fi - -# - Ensure all source files contain a copyright message. -(! git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go') - -# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. -(! grep 'func Test[^(]' *_test.go) -(! grep 'func Test[^(]' test/*.go) - -# - Do not import math/rand for real library code. Use internal/grpcrand for -# thread safety. -git grep -l '"math/rand"' -- "*.go" 2>&1 | (! grep -v '^examples\|^stress\|grpcrand\|wrr_test') - -# - Ensure all ptypes proto packages are renamed when importing. -(! git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go") - -# - Check imports that are illegal in appengine (until Go 1.11). -# TODO: Remove when we drop Go 1.10 support -go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go - -# - gofmt, goimports, golint (with exceptions for generated code), go vet. -gofmt -s -d -l . 2>&1 | fail_on_output -goimports -l . 2>&1 | (! grep -vE "(_mock|\.pb)\.go") | fail_on_output -golint ./... 2>&1 | (! grep -vE "(_mock|\.pb)\.go:") -go vet -all . - -# - Check that generated proto files are up to date. -if [[ -z "${VET_SKIP_PROTO}" ]]; then - PATH="/home/travis/bin:${PATH}" make proto && \ - git status --porcelain 2>&1 | fail_on_output || \ - (git status; git --no-pager diff; exit 1) -fi - -# - Check that our module is tidy. -if go help mod >& /dev/null; then - go mod tidy && \ - git status --porcelain 2>&1 | fail_on_output || \ - (git status; git --no-pager diff; exit 1) -fi - -# - Collection of static analysis checks -# TODO(dfawley): don't use deprecated functions in examples. -staticcheck -go 1.9 -checks 'inherit,-ST1015' -ignore ' -google.golang.org/grpc/balancer.go:SA1019 -google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go:SA1019 -google.golang.org/grpc/balancer/roundrobin/roundrobin_test.go:SA1019 -google.golang.org/grpc/xds/internal/balancer/edsbalancer/balancergroup.go:SA1019 -google.golang.org/grpc/xds/internal/resolver/xds_resolver.go:SA1019 -google.golang.org/grpc/xds/internal/balancer/xds.go:SA1019 -google.golang.org/grpc/xds/internal/balancer/xds_client.go:SA1019 -google.golang.org/grpc/balancer_conn_wrappers.go:SA1019 -google.golang.org/grpc/balancer_test.go:SA1019 -google.golang.org/grpc/benchmark/benchmain/main.go:SA1019 -google.golang.org/grpc/benchmark/worker/benchmark_client.go:SA1019 -google.golang.org/grpc/clientconn.go:S1024 -google.golang.org/grpc/clientconn_state_transition_test.go:SA1019 -google.golang.org/grpc/clientconn_test.go:SA1019 -google.golang.org/grpc/examples/features/debugging/client/main.go:SA1019 -google.golang.org/grpc/examples/features/load_balancing/client/main.go:SA1019 -google.golang.org/grpc/internal/transport/handler_server.go:SA1019 -google.golang.org/grpc/internal/transport/handler_server_test.go:SA1019 -google.golang.org/grpc/resolver/dns/dns_resolver.go:SA1019 -google.golang.org/grpc/stats/stats_test.go:SA1019 -google.golang.org/grpc/test/balancer_test.go:SA1019 -google.golang.org/grpc/test/channelz_test.go:SA1019 -google.golang.org/grpc/test/end2end_test.go:SA1019 -google.golang.org/grpc/test/healthcheck_test.go:SA1019 -' ./... -misspell -error . diff --git a/vendor/google.golang.org/protobuf/LICENSE b/vendor/google.golang.org/protobuf/LICENSE new file mode 100644 index 000000000..49ea0f928 --- /dev/null +++ b/vendor/google.golang.org/protobuf/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2018 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/google.golang.org/protobuf/PATENTS b/vendor/google.golang.org/protobuf/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/google.golang.org/protobuf/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go new file mode 100644 index 000000000..bb2966e3b --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -0,0 +1,685 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "encoding/base64" + "fmt" + "math" + "strconv" + "strings" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/set" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// Unmarshal reads the given []byte into the given [proto.Message]. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func Unmarshal(b []byte, m proto.Message) error { + return UnmarshalOptions{}.Unmarshal(b, m) +} + +// UnmarshalOptions is a configurable JSON format parser. +type UnmarshalOptions struct { + pragma.NoUnkeyedLiterals + + // If AllowPartial is set, input for messages that will result in missing + // required fields will not return an error. + AllowPartial bool + + // If DiscardUnknown is set, unknown fields and enum name values are ignored. + DiscardUnknown bool + + // Resolver is used for looking up types when unmarshaling + // google.protobuf.Any messages or extension fields. + // If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.MessageTypeResolver + protoregistry.ExtensionTypeResolver + } + + // RecursionLimit limits how deeply messages may be nested. + // If zero, a default limit is applied. + RecursionLimit int +} + +// Unmarshal reads the given []byte and populates the given [proto.Message] +// using options in the UnmarshalOptions object. +// It will clear the message first before setting the fields. +// If it returns an error, the given message may be partially set. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { + return o.unmarshal(b, m) +} + +// unmarshal is a centralized function that all unmarshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for unmarshal that do not go through this. +func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error { + proto.Reset(m) + + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } + + dec := decoder{json.NewDecoder(b), o} + if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil { + return err + } + + // Check for EOF. + tok, err := dec.Read() + if err != nil { + return err + } + if tok.Kind() != json.EOF { + return dec.unexpectedTokenError(tok) + } + + if o.AllowPartial { + return nil + } + return proto.CheckInitialized(m) +} + +type decoder struct { + *json.Decoder + opts UnmarshalOptions +} + +// newError returns an error object with position info. +func (d decoder) newError(pos int, f string, x ...any) error { + line, column := d.Position(pos) + head := fmt.Sprintf("(line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unexpectedTokenError returns a syntax error for the given unexpected token. +func (d decoder) unexpectedTokenError(tok json.Token) error { + return d.syntaxError(tok.Pos(), "unexpected token %s", tok.RawString()) +} + +// syntaxError returns a syntax error for given position. +func (d decoder) syntaxError(pos int, f string, x ...any) error { + line, column := d.Position(pos) + head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unmarshalMessage unmarshals a message into the given protoreflect.Message. +func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error { + d.opts.RecursionLimit-- + if d.opts.RecursionLimit < 0 { + return errors.New("exceeded max recursion depth") + } + if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil { + return unmarshal(d, m) + } + + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + messageDesc := m.Descriptor() + if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { + return errors.New("no support for proto1 MessageSets") + } + + var seenNums set.Ints + var seenOneofs set.Ints + fieldDescs := messageDesc.Fields() + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + default: + return d.unexpectedTokenError(tok) + case json.ObjectClose: + return nil + case json.Name: + // Continue below. + } + + name := tok.Name() + // Unmarshaling a non-custom embedded message in Any will contain the + // JSON field "@type" which should be skipped because it is not a field + // of the embedded message, but simply an artifact of the Any format. + if skipTypeURL && name == "@type" { + d.Read() + continue + } + + // Get the FieldDescriptor. + var fd protoreflect.FieldDescriptor + if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") { + // Only extension names are in [name] format. + extName := protoreflect.FullName(name[1 : len(name)-1]) + extType, err := d.opts.Resolver.FindExtensionByName(extName) + if err != nil && err != protoregistry.NotFound { + return d.newError(tok.Pos(), "unable to resolve %s: %v", tok.RawString(), err) + } + if extType != nil { + fd = extType.TypeDescriptor() + if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() { + return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName()) + } + } + } else { + // The name can either be the JSON name or the proto field name. + fd = fieldDescs.ByJSONName(name) + if fd == nil { + fd = fieldDescs.ByTextName(name) + } + } + if flags.ProtoLegacy { + if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { + fd = nil // reset since the weak reference is not linked in + } + } + + if fd == nil { + // Field is unknown. + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + } + + // Do not allow duplicate fields. + num := uint64(fd.Number()) + if seenNums.Has(num) { + return d.newError(tok.Pos(), "duplicate field %v", tok.RawString()) + } + seenNums.Set(num) + + // No need to set values for JSON null unless the field type is + // google.protobuf.Value or google.protobuf.NullValue. + if tok, _ := d.Peek(); tok.Kind() == json.Null && !isKnownValue(fd) && !isNullValue(fd) { + d.Read() + continue + } + + switch { + case fd.IsList(): + list := m.Mutable(fd).List() + if err := d.unmarshalList(list, fd); err != nil { + return err + } + case fd.IsMap(): + mmap := m.Mutable(fd).Map() + if err := d.unmarshalMap(mmap, fd); err != nil { + return err + } + default: + // If field is a oneof, check if it has already been set. + if od := fd.ContainingOneof(); od != nil { + idx := uint64(od.Index()) + if seenOneofs.Has(idx) { + return d.newError(tok.Pos(), "error parsing %s, oneof %v is already set", tok.RawString(), od.FullName()) + } + seenOneofs.Set(idx) + } + + // Required or optional fields. + if err := d.unmarshalSingular(m, fd); err != nil { + return err + } + } + } +} + +func isKnownValue(fd protoreflect.FieldDescriptor) bool { + md := fd.Message() + return md != nil && md.FullName() == genid.Value_message_fullname +} + +func isNullValue(fd protoreflect.FieldDescriptor) bool { + ed := fd.Enum() + return ed != nil && ed.FullName() == genid.NullValue_enum_fullname +} + +// unmarshalSingular unmarshals to the non-repeated field specified +// by the given FieldDescriptor. +func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.FieldDescriptor) error { + var val protoreflect.Value + var err error + switch fd.Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + val = m.NewField(fd) + err = d.unmarshalMessage(val.Message(), false) + default: + val, err = d.unmarshalScalar(fd) + } + + if err != nil { + return err + } + if val.IsValid() { + m.Set(fd, val) + } + return nil +} + +// unmarshalScalar unmarshals to a scalar/enum protoreflect.Value specified by +// the given FieldDescriptor. +func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + const b32 int = 32 + const b64 int = 64 + + tok, err := d.Read() + if err != nil { + return protoreflect.Value{}, err + } + + kind := fd.Kind() + switch kind { + case protoreflect.BoolKind: + if tok.Kind() == json.Bool { + return protoreflect.ValueOfBool(tok.Bool()), nil + } + + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if v, ok := unmarshalInt(tok, b32); ok { + return v, nil + } + + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if v, ok := unmarshalInt(tok, b64); ok { + return v, nil + } + + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if v, ok := unmarshalUint(tok, b32); ok { + return v, nil + } + + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if v, ok := unmarshalUint(tok, b64); ok { + return v, nil + } + + case protoreflect.FloatKind: + if v, ok := unmarshalFloat(tok, b32); ok { + return v, nil + } + + case protoreflect.DoubleKind: + if v, ok := unmarshalFloat(tok, b64); ok { + return v, nil + } + + case protoreflect.StringKind: + if tok.Kind() == json.String { + return protoreflect.ValueOfString(tok.ParsedString()), nil + } + + case protoreflect.BytesKind: + if v, ok := unmarshalBytes(tok); ok { + return v, nil + } + + case protoreflect.EnumKind: + if v, ok := unmarshalEnum(tok, fd, d.opts.DiscardUnknown); ok { + return v, nil + } + + default: + panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind)) + } + + return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) +} + +func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.Number: + return getInt(tok, bitSize) + + case json.String: + // Decode number from string. + s := strings.TrimSpace(tok.ParsedString()) + if len(s) != len(tok.ParsedString()) { + return protoreflect.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return protoreflect.Value{}, false + } + return getInt(tok, bitSize) + } + return protoreflect.Value{}, false +} + +func getInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { + n, ok := tok.Int(bitSize) + if !ok { + return protoreflect.Value{}, false + } + if bitSize == 32 { + return protoreflect.ValueOfInt32(int32(n)), true + } + return protoreflect.ValueOfInt64(n), true +} + +func unmarshalUint(tok json.Token, bitSize int) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.Number: + return getUint(tok, bitSize) + + case json.String: + // Decode number from string. + s := strings.TrimSpace(tok.ParsedString()) + if len(s) != len(tok.ParsedString()) { + return protoreflect.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return protoreflect.Value{}, false + } + return getUint(tok, bitSize) + } + return protoreflect.Value{}, false +} + +func getUint(tok json.Token, bitSize int) (protoreflect.Value, bool) { + n, ok := tok.Uint(bitSize) + if !ok { + return protoreflect.Value{}, false + } + if bitSize == 32 { + return protoreflect.ValueOfUint32(uint32(n)), true + } + return protoreflect.ValueOfUint64(n), true +} + +func unmarshalFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.Number: + return getFloat(tok, bitSize) + + case json.String: + s := tok.ParsedString() + switch s { + case "NaN": + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(math.NaN())), true + } + return protoreflect.ValueOfFloat64(math.NaN()), true + case "Infinity": + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(math.Inf(+1))), true + } + return protoreflect.ValueOfFloat64(math.Inf(+1)), true + case "-Infinity": + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(math.Inf(-1))), true + } + return protoreflect.ValueOfFloat64(math.Inf(-1)), true + } + + // Decode number from string. + if len(s) != len(strings.TrimSpace(s)) { + return protoreflect.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return protoreflect.Value{}, false + } + return getFloat(tok, bitSize) + } + return protoreflect.Value{}, false +} + +func getFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) { + n, ok := tok.Float(bitSize) + if !ok { + return protoreflect.Value{}, false + } + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(n)), true + } + return protoreflect.ValueOfFloat64(n), true +} + +func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) { + if tok.Kind() != json.String { + return protoreflect.Value{}, false + } + + s := tok.ParsedString() + enc := base64.StdEncoding + if strings.ContainsAny(s, "-_") { + enc = base64.URLEncoding + } + if len(s)%4 != 0 { + enc = enc.WithPadding(base64.NoPadding) + } + b, err := enc.DecodeString(s) + if err != nil { + return protoreflect.Value{}, false + } + return protoreflect.ValueOfBytes(b), true +} + +func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor, discardUnknown bool) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.String: + // Lookup EnumNumber based on name. + s := tok.ParsedString() + if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil { + return protoreflect.ValueOfEnum(enumVal.Number()), true + } + if discardUnknown { + return protoreflect.Value{}, true + } + + case json.Number: + if n, ok := tok.Int(32); ok { + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(n)), true + } + + case json.Null: + // This is only valid for google.protobuf.NullValue. + if isNullValue(fd) { + return protoreflect.ValueOfEnum(0), true + } + } + + return protoreflect.Value{}, false +} + +func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ArrayOpen { + return d.unexpectedTokenError(tok) + } + + switch fd.Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + for { + tok, err := d.Peek() + if err != nil { + return err + } + + if tok.Kind() == json.ArrayClose { + d.Read() + return nil + } + + val := list.NewElement() + if err := d.unmarshalMessage(val.Message(), false); err != nil { + return err + } + list.Append(val) + } + default: + for { + tok, err := d.Peek() + if err != nil { + return err + } + + if tok.Kind() == json.ArrayClose { + d.Read() + return nil + } + + val, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + if val.IsValid() { + list.Append(val) + } + } + } + + return nil +} + +func (d decoder) unmarshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + // Determine ahead whether map entry is a scalar type or a message type in + // order to call the appropriate unmarshalMapValue func inside the for loop + // below. + var unmarshalMapValue func() (protoreflect.Value, error) + switch fd.MapValue().Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + unmarshalMapValue = func() (protoreflect.Value, error) { + val := mmap.NewValue() + if err := d.unmarshalMessage(val.Message(), false); err != nil { + return protoreflect.Value{}, err + } + return val, nil + } + default: + unmarshalMapValue = func() (protoreflect.Value, error) { + return d.unmarshalScalar(fd.MapValue()) + } + } + +Loop: + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + default: + return d.unexpectedTokenError(tok) + case json.ObjectClose: + break Loop + case json.Name: + // Continue. + } + + // Unmarshal field name. + pkey, err := d.unmarshalMapKey(tok, fd.MapKey()) + if err != nil { + return err + } + + // Check for duplicate field name. + if mmap.Has(pkey) { + return d.newError(tok.Pos(), "duplicate map key %v", tok.RawString()) + } + + // Read and unmarshal field value. + pval, err := unmarshalMapValue() + if err != nil { + return err + } + if pval.IsValid() { + mmap.Set(pkey, pval) + } + } + + return nil +} + +// unmarshalMapKey converts given token of Name kind into a protoreflect.MapKey. +// A map key type is any integral or string type. +func (d decoder) unmarshalMapKey(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.MapKey, error) { + const b32 = 32 + const b64 = 64 + const base10 = 10 + + name := tok.Name() + kind := fd.Kind() + switch kind { + case protoreflect.StringKind: + return protoreflect.ValueOfString(name).MapKey(), nil + + case protoreflect.BoolKind: + switch name { + case "true": + return protoreflect.ValueOfBool(true).MapKey(), nil + case "false": + return protoreflect.ValueOfBool(false).MapKey(), nil + } + + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if n, err := strconv.ParseInt(name, base10, b32); err == nil { + return protoreflect.ValueOfInt32(int32(n)).MapKey(), nil + } + + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if n, err := strconv.ParseInt(name, base10, b64); err == nil { + return protoreflect.ValueOfInt64(int64(n)).MapKey(), nil + } + + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if n, err := strconv.ParseUint(name, base10, b32); err == nil { + return protoreflect.ValueOfUint32(uint32(n)).MapKey(), nil + } + + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if n, err := strconv.ParseUint(name, base10, b64); err == nil { + return protoreflect.ValueOfUint64(uint64(n)).MapKey(), nil + } + + default: + panic(fmt.Sprintf("invalid kind for map key: %v", kind)) + } + + return protoreflect.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString()) +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go new file mode 100644 index 000000000..ae71007c1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protojson marshals and unmarshals protocol buffer messages as JSON +// format. It follows the guide at +// https://protobuf.dev/programming-guides/proto3#json. +// +// This package produces a different output than the standard [encoding/json] +// package, which does not operate correctly on protocol buffer messages. +package protojson diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go new file mode 100644 index 000000000..29846df22 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -0,0 +1,382 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "encoding/base64" + "fmt" + + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/order" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const defaultIndent = " " + +// Format formats the message as a multiline string. +// This function is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. +func Format(m proto.Message) string { + return MarshalOptions{Multiline: true}.Format(m) +} + +// Marshal writes the given [proto.Message] in JSON format using default options. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. +func Marshal(m proto.Message) ([]byte, error) { + return MarshalOptions{}.Marshal(m) +} + +// MarshalOptions is a configurable JSON format marshaler. +type MarshalOptions struct { + pragma.NoUnkeyedLiterals + + // Multiline specifies whether the marshaler should format the output in + // indented-form with every textual element on a new line. + // If Indent is an empty string, then an arbitrary indent is chosen. + Multiline bool + + // Indent specifies the set of indentation characters to use in a multiline + // formatted output such that every entry is preceded by Indent and + // terminated by a newline. If non-empty, then Multiline is treated as true. + // Indent can only be composed of space or tab characters. + Indent string + + // AllowPartial allows messages that have missing required fields to marshal + // without returning an error. If AllowPartial is false (the default), + // Marshal will return error if there are any missing required fields. + AllowPartial bool + + // UseProtoNames uses proto field name instead of lowerCamelCase name in JSON + // field names. + UseProtoNames bool + + // UseEnumNumbers emits enum values as numbers. + UseEnumNumbers bool + + // EmitUnpopulated specifies whether to emit unpopulated fields. It does not + // emit unpopulated oneof fields or unpopulated extension fields. + // The JSON value emitted for unpopulated fields are as follows: + // ╔═══════╤════════════════════════════╗ + // ║ JSON │ Protobuf field ║ + // ╠═══════╪════════════════════════════╣ + // ║ false │ proto3 boolean fields ║ + // ║ 0 │ proto3 numeric fields ║ + // ║ "" │ proto3 string/bytes fields ║ + // ║ null │ proto2 scalar fields ║ + // ║ null │ message fields ║ + // ║ [] │ list fields ║ + // ║ {} │ map fields ║ + // ╚═══════╧════════════════════════════╝ + EmitUnpopulated bool + + // EmitDefaultValues specifies whether to emit default-valued primitive fields, + // empty lists, and empty maps. The fields affected are as follows: + // ╔═══════╤════════════════════════════════════════╗ + // ║ JSON │ Protobuf field ║ + // ╠═══════╪════════════════════════════════════════╣ + // ║ false │ non-optional scalar boolean fields ║ + // ║ 0 │ non-optional scalar numeric fields ║ + // ║ "" │ non-optional scalar string/byte fields ║ + // ║ [] │ empty repeated fields ║ + // ║ {} │ empty map fields ║ + // ╚═══════╧════════════════════════════════════════╝ + // + // Behaves similarly to EmitUnpopulated, but does not emit "null"-value fields, + // i.e. presence-sensing fields that are omitted will remain omitted to preserve + // presence-sensing. + // EmitUnpopulated takes precedence over EmitDefaultValues since the former generates + // a strict superset of the latter. + EmitDefaultValues bool + + // Resolver is used for looking up types when expanding google.protobuf.Any + // messages. If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.ExtensionTypeResolver + protoregistry.MessageTypeResolver + } +} + +// Format formats the message as a string. +// This method is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. +func (o MarshalOptions) Format(m proto.Message) string { + if m == nil || !m.ProtoReflect().IsValid() { + return "" // invalid syntax, but okay since this is for debugging + } + o.AllowPartial = true + b, _ := o.Marshal(m) + return string(b) +} + +// Marshal marshals the given [proto.Message] in the JSON format using options in +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. +func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { + return o.marshal(nil, m) +} + +// MarshalAppend appends the JSON format encoding of m to b, +// returning the result. +func (o MarshalOptions) MarshalAppend(b []byte, m proto.Message) ([]byte, error) { + return o.marshal(b, m) +} + +// marshal is a centralized function that all marshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for marshal that do not go through this. +func (o MarshalOptions) marshal(b []byte, m proto.Message) ([]byte, error) { + if o.Multiline && o.Indent == "" { + o.Indent = defaultIndent + } + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + + internalEnc, err := json.NewEncoder(b, o.Indent) + if err != nil { + return nil, err + } + + // Treat nil message interface as an empty message, + // in which case the output in an empty JSON object. + if m == nil { + return append(b, '{', '}'), nil + } + + enc := encoder{internalEnc, o} + if err := enc.marshalMessage(m.ProtoReflect(), ""); err != nil { + return nil, err + } + if o.AllowPartial { + return enc.Bytes(), nil + } + return enc.Bytes(), proto.CheckInitialized(m) +} + +type encoder struct { + *json.Encoder + opts MarshalOptions +} + +// typeFieldDesc is a synthetic field descriptor used for the "@type" field. +var typeFieldDesc = func() protoreflect.FieldDescriptor { + var fd filedesc.Field + fd.L0.FullName = "@type" + fd.L0.Index = -1 + fd.L1.Cardinality = protoreflect.Optional + fd.L1.Kind = protoreflect.StringKind + return &fd +}() + +// typeURLFieldRanger wraps a protoreflect.Message and modifies its Range method +// to additionally iterate over a synthetic field for the type URL. +type typeURLFieldRanger struct { + order.FieldRanger + typeURL string +} + +func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if !f(typeFieldDesc, protoreflect.ValueOfString(m.typeURL)) { + return + } + m.FieldRanger.Range(f) +} + +// unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range +// method to additionally iterate over unpopulated fields. +type unpopulatedFieldRanger struct { + protoreflect.Message + + skipNull bool +} + +func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + fds := m.Descriptor().Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if m.Has(fd) || fd.ContainingOneof() != nil { + continue // ignore populated fields and fields within a oneofs + } + + v := m.Get(fd) + isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid() + isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil + if isProto2Scalar || isSingularMessage { + if m.skipNull { + continue + } + v = protoreflect.Value{} // use invalid value to emit null + } + if !f(fd, v) { + return + } + } + m.Message.Range(f) +} + +// marshalMessage marshals the fields in the given protoreflect.Message. +// If the typeURL is non-empty, then a synthetic "@type" field is injected +// containing the URL as the value. +func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error { + if !flags.ProtoLegacy && messageset.IsMessageSet(m.Descriptor()) { + return errors.New("no support for proto1 MessageSets") + } + + if marshal := wellKnownTypeMarshaler(m.Descriptor().FullName()); marshal != nil { + return marshal(e, m) + } + + e.StartObject() + defer e.EndObject() + + var fields order.FieldRanger = m + switch { + case e.opts.EmitUnpopulated: + fields = unpopulatedFieldRanger{Message: m, skipNull: false} + case e.opts.EmitDefaultValues: + fields = unpopulatedFieldRanger{Message: m, skipNull: true} + } + if typeURL != "" { + fields = typeURLFieldRanger{fields, typeURL} + } + + var err error + order.RangeFields(fields, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + name := fd.JSONName() + if e.opts.UseProtoNames { + name = fd.TextName() + } + + if err = e.WriteName(name); err != nil { + return false + } + if err = e.marshalValue(v, fd); err != nil { + return false + } + return true + }) + return err +} + +// marshalValue marshals the given protoreflect.Value. +func (e encoder) marshalValue(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { + switch { + case fd.IsList(): + return e.marshalList(val.List(), fd) + case fd.IsMap(): + return e.marshalMap(val.Map(), fd) + default: + return e.marshalSingular(val, fd) + } +} + +// marshalSingular marshals the given non-repeated field value. This includes +// all scalar types, enums, messages, and groups. +func (e encoder) marshalSingular(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { + if !val.IsValid() { + e.WriteNull() + return nil + } + + switch kind := fd.Kind(); kind { + case protoreflect.BoolKind: + e.WriteBool(val.Bool()) + + case protoreflect.StringKind: + if e.WriteString(val.String()) != nil { + return errors.InvalidUTF8(string(fd.FullName())) + } + + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + e.WriteInt(val.Int()) + + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + e.WriteUint(val.Uint()) + + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Uint64Kind, + protoreflect.Sfixed64Kind, protoreflect.Fixed64Kind: + // 64-bit integers are written out as JSON string. + e.WriteString(val.String()) + + case protoreflect.FloatKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 32) + + case protoreflect.DoubleKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 64) + + case protoreflect.BytesKind: + e.WriteString(base64.StdEncoding.EncodeToString(val.Bytes())) + + case protoreflect.EnumKind: + if fd.Enum().FullName() == genid.NullValue_enum_fullname { + e.WriteNull() + } else { + desc := fd.Enum().Values().ByNumber(val.Enum()) + if e.opts.UseEnumNumbers || desc == nil { + e.WriteInt(int64(val.Enum())) + } else { + e.WriteString(string(desc.Name())) + } + } + + case protoreflect.MessageKind, protoreflect.GroupKind: + if err := e.marshalMessage(val.Message(), ""); err != nil { + return err + } + + default: + panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind)) + } + return nil +} + +// marshalList marshals the given protoreflect.List. +func (e encoder) marshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error { + e.StartArray() + defer e.EndArray() + + for i := 0; i < list.Len(); i++ { + item := list.Get(i) + if err := e.marshalSingular(item, fd); err != nil { + return err + } + } + return nil +} + +// marshalMap marshals given protoreflect.Map. +func (e encoder) marshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { + e.StartObject() + defer e.EndObject() + + var err error + order.RangeEntries(mmap, order.GenericKeyOrder, func(k protoreflect.MapKey, v protoreflect.Value) bool { + if err = e.WriteName(k.String()); err != nil { + return false + } + if err = e.marshalSingular(v, fd.MapValue()); err != nil { + return false + } + return true + }) + return err +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go new file mode 100644 index 000000000..4b177c820 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -0,0 +1,876 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "bytes" + "fmt" + "math" + "strconv" + "strings" + "time" + + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" +) + +type marshalFunc func(encoder, protoreflect.Message) error + +// wellKnownTypeMarshaler returns a marshal function if the message type +// has specialized serialization behavior. It returns nil otherwise. +func wellKnownTypeMarshaler(name protoreflect.FullName) marshalFunc { + if name.Parent() == genid.GoogleProtobuf_package { + switch name.Name() { + case genid.Any_message_name: + return encoder.marshalAny + case genid.Timestamp_message_name: + return encoder.marshalTimestamp + case genid.Duration_message_name: + return encoder.marshalDuration + case genid.BoolValue_message_name, + genid.Int32Value_message_name, + genid.Int64Value_message_name, + genid.UInt32Value_message_name, + genid.UInt64Value_message_name, + genid.FloatValue_message_name, + genid.DoubleValue_message_name, + genid.StringValue_message_name, + genid.BytesValue_message_name: + return encoder.marshalWrapperType + case genid.Struct_message_name: + return encoder.marshalStruct + case genid.ListValue_message_name: + return encoder.marshalListValue + case genid.Value_message_name: + return encoder.marshalKnownValue + case genid.FieldMask_message_name: + return encoder.marshalFieldMask + case genid.Empty_message_name: + return encoder.marshalEmpty + } + } + return nil +} + +type unmarshalFunc func(decoder, protoreflect.Message) error + +// wellKnownTypeUnmarshaler returns a unmarshal function if the message type +// has specialized serialization behavior. It returns nil otherwise. +func wellKnownTypeUnmarshaler(name protoreflect.FullName) unmarshalFunc { + if name.Parent() == genid.GoogleProtobuf_package { + switch name.Name() { + case genid.Any_message_name: + return decoder.unmarshalAny + case genid.Timestamp_message_name: + return decoder.unmarshalTimestamp + case genid.Duration_message_name: + return decoder.unmarshalDuration + case genid.BoolValue_message_name, + genid.Int32Value_message_name, + genid.Int64Value_message_name, + genid.UInt32Value_message_name, + genid.UInt64Value_message_name, + genid.FloatValue_message_name, + genid.DoubleValue_message_name, + genid.StringValue_message_name, + genid.BytesValue_message_name: + return decoder.unmarshalWrapperType + case genid.Struct_message_name: + return decoder.unmarshalStruct + case genid.ListValue_message_name: + return decoder.unmarshalListValue + case genid.Value_message_name: + return decoder.unmarshalKnownValue + case genid.FieldMask_message_name: + return decoder.unmarshalFieldMask + case genid.Empty_message_name: + return decoder.unmarshalEmpty + } + } + return nil +} + +// The JSON representation of an Any message uses the regular representation of +// the deserialized, embedded message, with an additional field `@type` which +// contains the type URL. If the embedded message type is well-known and has a +// custom JSON representation, that representation will be embedded adding a +// field `value` which holds the custom JSON in addition to the `@type` field. + +func (e encoder) marshalAny(m protoreflect.Message) error { + fds := m.Descriptor().Fields() + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) + fdValue := fds.ByNumber(genid.Any_Value_field_number) + + if !m.Has(fdType) { + if !m.Has(fdValue) { + // If message is empty, marshal out empty JSON object. + e.StartObject() + e.EndObject() + return nil + } else { + // Return error if type_url field is not set, but value is set. + return errors.New("%s: %v is not set", genid.Any_message_fullname, genid.Any_TypeUrl_field_name) + } + } + + typeVal := m.Get(fdType) + valueVal := m.Get(fdValue) + + // Resolve the type in order to unmarshal value field. + typeURL := typeVal.String() + emt, err := e.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return errors.New("%s: unable to resolve %q: %v", genid.Any_message_fullname, typeURL, err) + } + + em := emt.New() + err = proto.UnmarshalOptions{ + AllowPartial: true, // never check required fields inside an Any + Resolver: e.opts.Resolver, + }.Unmarshal(valueVal.Bytes(), em.Interface()) + if err != nil { + return errors.New("%s: unable to unmarshal %q: %v", genid.Any_message_fullname, typeURL, err) + } + + // If type of value has custom JSON encoding, marshal out a field "value" + // with corresponding custom JSON encoding of the embedded message as a + // field. + if marshal := wellKnownTypeMarshaler(emt.Descriptor().FullName()); marshal != nil { + e.StartObject() + defer e.EndObject() + + // Marshal out @type field. + e.WriteName("@type") + if err := e.WriteString(typeURL); err != nil { + return err + } + + e.WriteName("value") + return marshal(e, em) + } + + // Else, marshal out the embedded message's fields in this Any object. + if err := e.marshalMessage(em, typeURL); err != nil { + return err + } + + return nil +} + +func (d decoder) unmarshalAny(m protoreflect.Message) error { + // Peek to check for json.ObjectOpen to avoid advancing a read. + start, err := d.Peek() + if err != nil { + return err + } + if start.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(start) + } + + // Use another decoder to parse the unread bytes for @type field. This + // avoids advancing a read from current decoder because the current JSON + // object may contain the fields of the embedded type. + dec := decoder{d.Clone(), UnmarshalOptions{RecursionLimit: d.opts.RecursionLimit}} + tok, err := findTypeURL(dec) + switch err { + case errEmptyObject: + // An empty JSON object translates to an empty Any message. + d.Read() // Read json.ObjectOpen. + d.Read() // Read json.ObjectClose. + return nil + + case errMissingType: + if d.opts.DiscardUnknown { + // Treat all fields as unknowns, similar to an empty object. + return d.skipJSONValue() + } + // Use start.Pos() for line position. + return d.newError(start.Pos(), err.Error()) + + default: + if err != nil { + return err + } + } + + typeURL := tok.ParsedString() + emt, err := d.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return d.newError(tok.Pos(), "unable to resolve %v: %q", tok.RawString(), err) + } + + // Create new message for the embedded message type and unmarshal into it. + em := emt.New() + if unmarshal := wellKnownTypeUnmarshaler(emt.Descriptor().FullName()); unmarshal != nil { + // If embedded message is a custom type, + // unmarshal the JSON "value" field into it. + if err := d.unmarshalAnyValue(unmarshal, em); err != nil { + return err + } + } else { + // Else unmarshal the current JSON object into it. + if err := d.unmarshalMessage(em, true); err != nil { + return err + } + } + // Serialize the embedded message and assign the resulting bytes to the + // proto value field. + b, err := proto.MarshalOptions{ + AllowPartial: true, // No need to check required fields inside an Any. + Deterministic: true, + }.Marshal(em.Interface()) + if err != nil { + return d.newError(start.Pos(), "error in marshaling Any.value field: %v", err) + } + + fds := m.Descriptor().Fields() + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) + fdValue := fds.ByNumber(genid.Any_Value_field_number) + + m.Set(fdType, protoreflect.ValueOfString(typeURL)) + m.Set(fdValue, protoreflect.ValueOfBytes(b)) + return nil +} + +var errEmptyObject = fmt.Errorf(`empty object`) +var errMissingType = fmt.Errorf(`missing "@type" field`) + +// findTypeURL returns the token for the "@type" field value from the given +// JSON bytes. It is expected that the given bytes start with json.ObjectOpen. +// It returns errEmptyObject if the JSON object is empty or errMissingType if +// @type field does not exist. It returns other error if the @type field is not +// valid or other decoding issues. +func findTypeURL(d decoder) (json.Token, error) { + var typeURL string + var typeTok json.Token + numFields := 0 + // Skip start object. + d.Read() + +Loop: + for { + tok, err := d.Read() + if err != nil { + return json.Token{}, err + } + + switch tok.Kind() { + case json.ObjectClose: + if typeURL == "" { + // Did not find @type field. + if numFields > 0 { + return json.Token{}, errMissingType + } + return json.Token{}, errEmptyObject + } + break Loop + + case json.Name: + numFields++ + if tok.Name() != "@type" { + // Skip value. + if err := d.skipJSONValue(); err != nil { + return json.Token{}, err + } + continue + } + + // Return error if this was previously set already. + if typeURL != "" { + return json.Token{}, d.newError(tok.Pos(), `duplicate "@type" field`) + } + // Read field value. + tok, err := d.Read() + if err != nil { + return json.Token{}, err + } + if tok.Kind() != json.String { + return json.Token{}, d.newError(tok.Pos(), `@type field value is not a string: %v`, tok.RawString()) + } + typeURL = tok.ParsedString() + if typeURL == "" { + return json.Token{}, d.newError(tok.Pos(), `@type field contains empty value`) + } + typeTok = tok + } + } + + return typeTok, nil +} + +// skipJSONValue parses a JSON value (null, boolean, string, number, object and +// array) in order to advance the read to the next JSON value. It relies on +// the decoder returning an error if the types are not in valid sequence. +func (d decoder) skipJSONValue() error { + var open int + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose, json.ArrayClose: + open-- + case json.ObjectOpen, json.ArrayOpen: + open++ + if open > d.opts.RecursionLimit { + return errors.New("exceeded max recursion depth") + } + case json.EOF: + // This can only happen if there's a bug in Decoder.Read. + // Avoid an infinite loop if this does happen. + return errors.New("unexpected EOF") + } + if open == 0 { + return nil + } + } +} + +// unmarshalAnyValue unmarshals the given custom-type message from the JSON +// object's "value" field. +func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Message) error { + // Skip ObjectOpen, and start reading the fields. + d.Read() + + var found bool // Used for detecting duplicate "value". + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose: + if !found { + return d.newError(tok.Pos(), `missing "value" field`) + } + return nil + + case json.Name: + switch tok.Name() { + case "@type": + // Skip the value as this was previously parsed already. + d.Read() + + case "value": + if found { + return d.newError(tok.Pos(), `duplicate "value" field`) + } + // Unmarshal the field value into the given message. + if err := unmarshal(d, m); err != nil { + return err + } + found = true + + default: + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + } + } + } +} + +// Wrapper types are encoded as JSON primitives like string, number or boolean. + +func (e encoder) marshalWrapperType(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) + val := m.Get(fd) + return e.marshalSingular(val, fd) +} + +func (d decoder) unmarshalWrapperType(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) + val, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + m.Set(fd, val) + return nil +} + +// The JSON representation for Empty is an empty JSON object. + +func (e encoder) marshalEmpty(protoreflect.Message) error { + e.StartObject() + e.EndObject() + return nil +} + +func (d decoder) unmarshalEmpty(protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose: + return nil + + case json.Name: + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + + default: + return d.unexpectedTokenError(tok) + } + } +} + +// The JSON representation for Struct is a JSON object that contains the encoded +// Struct.fields map and follows the serialization rules for a map. + +func (e encoder) marshalStruct(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) + return e.marshalMap(m.Get(fd).Map(), fd) +} + +func (d decoder) unmarshalStruct(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) + return d.unmarshalMap(m.Mutable(fd).Map(), fd) +} + +// The JSON representation for ListValue is JSON array that contains the encoded +// ListValue.values repeated field and follows the serialization rules for a +// repeated field. + +func (e encoder) marshalListValue(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) + return e.marshalList(m.Get(fd).List(), fd) +} + +func (d decoder) unmarshalListValue(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) + return d.unmarshalList(m.Mutable(fd).List(), fd) +} + +// The JSON representation for a Value is dependent on the oneof field that is +// set. Each of the field in the oneof has its own custom serialization rule. A +// Value message needs to be a oneof field set, else it is an error. + +func (e encoder) marshalKnownValue(m protoreflect.Message) error { + od := m.Descriptor().Oneofs().ByName(genid.Value_Kind_oneof_name) + fd := m.WhichOneof(od) + if fd == nil { + return errors.New("%s: none of the oneof fields is set", genid.Value_message_fullname) + } + if fd.Number() == genid.Value_NumberValue_field_number { + if v := m.Get(fd).Float(); math.IsNaN(v) || math.IsInf(v, 0) { + return errors.New("%s: invalid %v value", genid.Value_NumberValue_field_fullname, v) + } + } + return e.marshalSingular(m.Get(fd), fd) +} + +func (d decoder) unmarshalKnownValue(m protoreflect.Message) error { + tok, err := d.Peek() + if err != nil { + return err + } + + var fd protoreflect.FieldDescriptor + var val protoreflect.Value + switch tok.Kind() { + case json.Null: + d.Read() + fd = m.Descriptor().Fields().ByNumber(genid.Value_NullValue_field_number) + val = protoreflect.ValueOfEnum(0) + + case json.Bool: + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_BoolValue_field_number) + val = protoreflect.ValueOfBool(tok.Bool()) + + case json.Number: + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_NumberValue_field_number) + var ok bool + val, ok = unmarshalFloat(tok, 64) + if !ok { + return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString()) + } + + case json.String: + // A JSON string may have been encoded from the number_value field, + // e.g. "NaN", "Infinity", etc. Parsing a proto double type also allows + // for it to be in JSON string form. Given this custom encoding spec, + // however, there is no way to identify that and hence a JSON string is + // always assigned to the string_value field, which means that certain + // encoding cannot be parsed back to the same field. + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_StringValue_field_number) + val = protoreflect.ValueOfString(tok.ParsedString()) + + case json.ObjectOpen: + fd = m.Descriptor().Fields().ByNumber(genid.Value_StructValue_field_number) + val = m.NewField(fd) + if err := d.unmarshalStruct(val.Message()); err != nil { + return err + } + + case json.ArrayOpen: + fd = m.Descriptor().Fields().ByNumber(genid.Value_ListValue_field_number) + val = m.NewField(fd) + if err := d.unmarshalListValue(val.Message()); err != nil { + return err + } + + default: + return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString()) + } + + m.Set(fd, val) + return nil +} + +// The JSON representation for a Duration is a JSON string that ends in the +// suffix "s" (indicating seconds) and is preceded by the number of seconds, +// with nanoseconds expressed as fractional seconds. +// +// Durations less than one second are represented with a 0 seconds field and a +// positive or negative nanos field. For durations of one second or more, a +// non-zero value for the nanos field must be of the same sign as the seconds +// field. +// +// Duration.seconds must be from -315,576,000,000 to +315,576,000,000 inclusive. +// Duration.nanos must be from -999,999,999 to +999,999,999 inclusive. + +const ( + secondsInNanos = 999999999 + maxSecondsInDuration = 315576000000 +) + +func (e encoder) marshalDuration(m protoreflect.Message) error { + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) + + secsVal := m.Get(fdSeconds) + nanosVal := m.Get(fdNanos) + secs := secsVal.Int() + nanos := nanosVal.Int() + if secs < -maxSecondsInDuration || secs > maxSecondsInDuration { + return errors.New("%s: seconds out of range %v", genid.Duration_message_fullname, secs) + } + if nanos < -secondsInNanos || nanos > secondsInNanos { + return errors.New("%s: nanos out of range %v", genid.Duration_message_fullname, nanos) + } + if (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0) { + return errors.New("%s: signs of seconds and nanos do not match", genid.Duration_message_fullname) + } + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision, followed by the suffix "s". + var sign string + if secs < 0 || nanos < 0 { + sign, secs, nanos = "-", -1*secs, -1*nanos + } + x := fmt.Sprintf("%s%d.%09d", sign, secs, nanos) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + e.WriteString(x + "s") + return nil +} + +func (d decoder) unmarshalDuration(m protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + + secs, nanos, ok := parseDuration(tok.ParsedString()) + if !ok { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Duration_message_fullname, tok.RawString()) + } + // Validate seconds. No need to validate nanos because parseDuration would + // have covered that already. + if secs < -maxSecondsInDuration || secs > maxSecondsInDuration { + return d.newError(tok.Pos(), "%v value out of range: %v", genid.Duration_message_fullname, tok.RawString()) + } + + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) + + m.Set(fdSeconds, protoreflect.ValueOfInt64(secs)) + m.Set(fdNanos, protoreflect.ValueOfInt32(nanos)) + return nil +} + +// parseDuration parses the given input string for seconds and nanoseconds value +// for the Duration JSON format. The format is a decimal number with a suffix +// 's'. It can have optional plus/minus sign. There needs to be at least an +// integer or fractional part. Fractional part is limited to 9 digits only for +// nanoseconds precision, regardless of whether there are trailing zero digits. +// Example values are 1s, 0.1s, 1.s, .1s, +1s, -1s, -.1s. +func parseDuration(input string) (int64, int32, bool) { + b := []byte(input) + size := len(b) + if size < 2 { + return 0, 0, false + } + if b[size-1] != 's' { + return 0, 0, false + } + b = b[:size-1] + + // Read optional plus/minus symbol. + var neg bool + switch b[0] { + case '-': + neg = true + b = b[1:] + case '+': + b = b[1:] + } + if len(b) == 0 { + return 0, 0, false + } + + // Read the integer part. + var intp []byte + switch { + case b[0] == '0': + b = b[1:] + + case '1' <= b[0] && b[0] <= '9': + intp = b[0:] + b = b[1:] + n := 1 + for len(b) > 0 && '0' <= b[0] && b[0] <= '9' { + n++ + b = b[1:] + } + intp = intp[:n] + + case b[0] == '.': + // Continue below. + + default: + return 0, 0, false + } + + hasFrac := false + var frac [9]byte + if len(b) > 0 { + if b[0] != '.' { + return 0, 0, false + } + // Read the fractional part. + b = b[1:] + n := 0 + for len(b) > 0 && n < 9 && '0' <= b[0] && b[0] <= '9' { + frac[n] = b[0] + n++ + b = b[1:] + } + // It is not valid if there are more bytes left. + if len(b) > 0 { + return 0, 0, false + } + // Pad fractional part with 0s. + for i := n; i < 9; i++ { + frac[i] = '0' + } + hasFrac = true + } + + var secs int64 + if len(intp) > 0 { + var err error + secs, err = strconv.ParseInt(string(intp), 10, 64) + if err != nil { + return 0, 0, false + } + } + + var nanos int64 + if hasFrac { + nanob := bytes.TrimLeft(frac[:], "0") + if len(nanob) > 0 { + var err error + nanos, err = strconv.ParseInt(string(nanob), 10, 32) + if err != nil { + return 0, 0, false + } + } + } + + if neg { + if secs > 0 { + secs = -secs + } + if nanos > 0 { + nanos = -nanos + } + } + return secs, int32(nanos), true +} + +// The JSON representation for a Timestamp is a JSON string in the RFC 3339 +// format, i.e. "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" where +// {year} is always expressed using four digits while {month}, {day}, {hour}, +// {min}, and {sec} are zero-padded to two digits each. The fractional seconds, +// which can go up to 9 digits, up to 1 nanosecond resolution, is optional. The +// "Z" suffix indicates the timezone ("UTC"); the timezone is required. Encoding +// should always use UTC (as indicated by "Z") and a decoder should be able to +// accept both UTC and other timezones (as indicated by an offset). +// +// Timestamp.seconds must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z +// inclusive. +// Timestamp.nanos must be from 0 to 999,999,999 inclusive. + +const ( + maxTimestampSeconds = 253402300799 + minTimestampSeconds = -62135596800 +) + +func (e encoder) marshalTimestamp(m protoreflect.Message) error { + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) + + secsVal := m.Get(fdSeconds) + nanosVal := m.Get(fdNanos) + secs := secsVal.Int() + nanos := nanosVal.Int() + if secs < minTimestampSeconds || secs > maxTimestampSeconds { + return errors.New("%s: seconds out of range %v", genid.Timestamp_message_fullname, secs) + } + if nanos < 0 || nanos > secondsInNanos { + return errors.New("%s: nanos out of range %v", genid.Timestamp_message_fullname, nanos) + } + // Uses RFC 3339, where generated output will be Z-normalized and uses 0, 3, + // 6 or 9 fractional digits. + t := time.Unix(secs, nanos).UTC() + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + e.WriteString(x + "Z") + return nil +} + +func (d decoder) unmarshalTimestamp(m protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + + s := tok.ParsedString() + t, err := time.Parse(time.RFC3339Nano, s) + if err != nil { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) + } + // Validate seconds. + secs := t.Unix() + if secs < minTimestampSeconds || secs > maxTimestampSeconds { + return d.newError(tok.Pos(), "%v value out of range: %v", genid.Timestamp_message_fullname, tok.RawString()) + } + // Validate subseconds. + i := strings.LastIndexByte(s, '.') // start of subsecond field + j := strings.LastIndexAny(s, "Z-+") // start of timezone field + if i >= 0 && j >= i && j-i > len(".999999999") { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) + } + + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) + + m.Set(fdSeconds, protoreflect.ValueOfInt64(secs)) + m.Set(fdNanos, protoreflect.ValueOfInt32(int32(t.Nanosecond()))) + return nil +} + +// The JSON representation for a FieldMask is a JSON string where paths are +// separated by a comma. Fields name in each path are converted to/from +// lower-camel naming conventions. Encoding should fail if the path name would +// end up differently after a round-trip. + +func (e encoder) marshalFieldMask(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number) + list := m.Get(fd).List() + paths := make([]string, 0, list.Len()) + + for i := 0; i < list.Len(); i++ { + s := list.Get(i).String() + if !protoreflect.FullName(s).IsValid() { + return errors.New("%s contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s) + } + // Return error if conversion to camelCase is not reversible. + cc := strs.JSONCamelCase(s) + if s != strs.JSONSnakeCase(cc) { + return errors.New("%s contains irreversible value %q", genid.FieldMask_Paths_field_fullname, s) + } + paths = append(paths, cc) + } + + e.WriteString(strings.Join(paths, ",")) + return nil +} + +func (d decoder) unmarshalFieldMask(m protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + str := strings.TrimSpace(tok.ParsedString()) + if str == "" { + return nil + } + paths := strings.Split(str, ",") + + fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number) + list := m.Mutable(fd).List() + + for _, s0 := range paths { + s := strs.JSONSnakeCase(s0) + if strings.Contains(s0, "_") || !protoreflect.FullName(s).IsValid() { + return d.newError(tok.Pos(), "%v contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s0) + } + list.Append(protoreflect.ValueOfString(s)) + } + return nil +} diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go new file mode 100644 index 000000000..24bc98ac4 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -0,0 +1,772 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package prototext + +import ( + "fmt" + "unicode/utf8" + + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/encoding/text" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/set" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// Unmarshal reads the given []byte into the given [proto.Message]. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func Unmarshal(b []byte, m proto.Message) error { + return UnmarshalOptions{}.Unmarshal(b, m) +} + +// UnmarshalOptions is a configurable textproto format unmarshaler. +type UnmarshalOptions struct { + pragma.NoUnkeyedLiterals + + // AllowPartial accepts input for messages that will result in missing + // required fields. If AllowPartial is false (the default), Unmarshal will + // return error if there are any missing required fields. + AllowPartial bool + + // DiscardUnknown specifies whether to ignore unknown fields when parsing. + // An unknown field is any field whose field name or field number does not + // resolve to any known or extension field in the message. + // By default, unmarshal rejects unknown fields as an error. + DiscardUnknown bool + + // Resolver is used for looking up types when unmarshaling + // google.protobuf.Any messages or extension fields. + // If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.MessageTypeResolver + protoregistry.ExtensionTypeResolver + } +} + +// Unmarshal reads the given []byte and populates the given [proto.Message] +// using options in the UnmarshalOptions object. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { + return o.unmarshal(b, m) +} + +// unmarshal is a centralized function that all unmarshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for unmarshal that do not go through this. +func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error { + proto.Reset(m) + + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + + dec := decoder{text.NewDecoder(b), o} + if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil { + return err + } + if o.AllowPartial { + return nil + } + return proto.CheckInitialized(m) +} + +type decoder struct { + *text.Decoder + opts UnmarshalOptions +} + +// newError returns an error object with position info. +func (d decoder) newError(pos int, f string, x ...any) error { + line, column := d.Position(pos) + head := fmt.Sprintf("(line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unexpectedTokenError returns a syntax error for the given unexpected token. +func (d decoder) unexpectedTokenError(tok text.Token) error { + return d.syntaxError(tok.Pos(), "unexpected token: %s", tok.RawString()) +} + +// syntaxError returns a syntax error for given position. +func (d decoder) syntaxError(pos int, f string, x ...any) error { + line, column := d.Position(pos) + head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unmarshalMessage unmarshals into the given protoreflect.Message. +func (d decoder) unmarshalMessage(m protoreflect.Message, checkDelims bool) error { + messageDesc := m.Descriptor() + if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { + return errors.New("no support for proto1 MessageSets") + } + + if messageDesc.FullName() == genid.Any_message_fullname { + return d.unmarshalAny(m, checkDelims) + } + + if checkDelims { + tok, err := d.Read() + if err != nil { + return err + } + + if tok.Kind() != text.MessageOpen { + return d.unexpectedTokenError(tok) + } + } + + var seenNums set.Ints + var seenOneofs set.Ints + fieldDescs := messageDesc.Fields() + + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch typ := tok.Kind(); typ { + case text.Name: + // Continue below. + case text.EOF: + if checkDelims { + return text.ErrUnexpectedEOF + } + return nil + default: + if checkDelims && typ == text.MessageClose { + return nil + } + return d.unexpectedTokenError(tok) + } + + // Resolve the field descriptor. + var name protoreflect.Name + var fd protoreflect.FieldDescriptor + var xt protoreflect.ExtensionType + var xtErr error + var isFieldNumberName bool + + switch tok.NameKind() { + case text.IdentName: + name = protoreflect.Name(tok.IdentName()) + fd = fieldDescs.ByTextName(string(name)) + + case text.TypeName: + // Handle extensions only. This code path is not for Any. + xt, xtErr = d.opts.Resolver.FindExtensionByName(protoreflect.FullName(tok.TypeName())) + + case text.FieldNumber: + isFieldNumberName = true + num := protoreflect.FieldNumber(tok.FieldNumber()) + if !num.IsValid() { + return d.newError(tok.Pos(), "invalid field number: %d", num) + } + fd = fieldDescs.ByNumber(num) + if fd == nil { + xt, xtErr = d.opts.Resolver.FindExtensionByNumber(messageDesc.FullName(), num) + } + } + + if xt != nil { + fd = xt.TypeDescriptor() + if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() { + return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName()) + } + } else if xtErr != nil && xtErr != protoregistry.NotFound { + return d.newError(tok.Pos(), "unable to resolve [%s]: %v", tok.RawString(), xtErr) + } + if flags.ProtoLegacy { + if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { + fd = nil // reset since the weak reference is not linked in + } + } + + // Handle unknown fields. + if fd == nil { + if d.opts.DiscardUnknown || messageDesc.ReservedNames().Has(name) { + d.skipValue() + continue + } + return d.newError(tok.Pos(), "unknown field: %v", tok.RawString()) + } + + // Handle fields identified by field number. + if isFieldNumberName { + // TODO: Add an option to permit parsing field numbers. + // + // This requires careful thought as the MarshalOptions.EmitUnknown + // option allows formatting unknown fields as the field number and the + // best-effort textual representation of the field value. In that case, + // it may not be possible to unmarshal the value from a parser that does + // have information about the unknown field. + return d.newError(tok.Pos(), "cannot specify field by number: %v", tok.RawString()) + } + + switch { + case fd.IsList(): + kind := fd.Kind() + if kind != protoreflect.MessageKind && kind != protoreflect.GroupKind && !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + + list := m.Mutable(fd).List() + if err := d.unmarshalList(fd, list); err != nil { + return err + } + + case fd.IsMap(): + mmap := m.Mutable(fd).Map() + if err := d.unmarshalMap(fd, mmap); err != nil { + return err + } + + default: + kind := fd.Kind() + if kind != protoreflect.MessageKind && kind != protoreflect.GroupKind && !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + + // If field is a oneof, check if it has already been set. + if od := fd.ContainingOneof(); od != nil { + idx := uint64(od.Index()) + if seenOneofs.Has(idx) { + return d.newError(tok.Pos(), "error parsing %q, oneof %v is already set", tok.RawString(), od.FullName()) + } + seenOneofs.Set(idx) + } + + num := uint64(fd.Number()) + if seenNums.Has(num) { + return d.newError(tok.Pos(), "non-repeated field %q is repeated", tok.RawString()) + } + + if err := d.unmarshalSingular(fd, m); err != nil { + return err + } + seenNums.Set(num) + } + } + + return nil +} + +// unmarshalSingular unmarshals a non-repeated field value specified by the +// given FieldDescriptor. +func (d decoder) unmarshalSingular(fd protoreflect.FieldDescriptor, m protoreflect.Message) error { + var val protoreflect.Value + var err error + switch fd.Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + val = m.NewField(fd) + err = d.unmarshalMessage(val.Message(), true) + default: + val, err = d.unmarshalScalar(fd) + } + if err == nil { + m.Set(fd, val) + } + return err +} + +// unmarshalScalar unmarshals a scalar/enum protoreflect.Value specified by the +// given FieldDescriptor. +func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + tok, err := d.Read() + if err != nil { + return protoreflect.Value{}, err + } + + if tok.Kind() != text.Scalar { + return protoreflect.Value{}, d.unexpectedTokenError(tok) + } + + kind := fd.Kind() + switch kind { + case protoreflect.BoolKind: + if b, ok := tok.Bool(); ok { + return protoreflect.ValueOfBool(b), nil + } + + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if n, ok := tok.Int32(); ok { + return protoreflect.ValueOfInt32(n), nil + } + + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if n, ok := tok.Int64(); ok { + return protoreflect.ValueOfInt64(n), nil + } + + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if n, ok := tok.Uint32(); ok { + return protoreflect.ValueOfUint32(n), nil + } + + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if n, ok := tok.Uint64(); ok { + return protoreflect.ValueOfUint64(n), nil + } + + case protoreflect.FloatKind: + if n, ok := tok.Float32(); ok { + return protoreflect.ValueOfFloat32(n), nil + } + + case protoreflect.DoubleKind: + if n, ok := tok.Float64(); ok { + return protoreflect.ValueOfFloat64(n), nil + } + + case protoreflect.StringKind: + if s, ok := tok.String(); ok { + if strs.EnforceUTF8(fd) && !utf8.ValidString(s) { + return protoreflect.Value{}, d.newError(tok.Pos(), "contains invalid UTF-8") + } + return protoreflect.ValueOfString(s), nil + } + + case protoreflect.BytesKind: + if b, ok := tok.String(); ok { + return protoreflect.ValueOfBytes([]byte(b)), nil + } + + case protoreflect.EnumKind: + if lit, ok := tok.Enum(); ok { + // Lookup EnumNumber based on name. + if enumVal := fd.Enum().Values().ByName(protoreflect.Name(lit)); enumVal != nil { + return protoreflect.ValueOfEnum(enumVal.Number()), nil + } + } + if num, ok := tok.Int32(); ok { + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(num)), nil + } + + default: + panic(fmt.Sprintf("invalid scalar kind %v", kind)) + } + + return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) +} + +// unmarshalList unmarshals into given protoreflect.List. A list value can +// either be in [] syntax or simply just a single scalar/message value. +func (d decoder) unmarshalList(fd protoreflect.FieldDescriptor, list protoreflect.List) error { + tok, err := d.Peek() + if err != nil { + return err + } + + switch fd.Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + switch tok.Kind() { + case text.ListOpen: + d.Read() + for { + tok, err := d.Peek() + if err != nil { + return err + } + + switch tok.Kind() { + case text.ListClose: + d.Read() + return nil + case text.MessageOpen: + pval := list.NewElement() + if err := d.unmarshalMessage(pval.Message(), true); err != nil { + return err + } + list.Append(pval) + default: + return d.unexpectedTokenError(tok) + } + } + + case text.MessageOpen: + pval := list.NewElement() + if err := d.unmarshalMessage(pval.Message(), true); err != nil { + return err + } + list.Append(pval) + return nil + } + + default: + switch tok.Kind() { + case text.ListOpen: + d.Read() + for { + tok, err := d.Peek() + if err != nil { + return err + } + + switch tok.Kind() { + case text.ListClose: + d.Read() + return nil + case text.Scalar: + pval, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + list.Append(pval) + default: + return d.unexpectedTokenError(tok) + } + } + + case text.Scalar: + pval, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + list.Append(pval) + return nil + } + } + + return d.unexpectedTokenError(tok) +} + +// unmarshalMap unmarshals into given protoreflect.Map. A map value is a +// textproto message containing {key: , value: }. +func (d decoder) unmarshalMap(fd protoreflect.FieldDescriptor, mmap protoreflect.Map) error { + // Determine ahead whether map entry is a scalar type or a message type in + // order to call the appropriate unmarshalMapValue func inside + // unmarshalMapEntry. + var unmarshalMapValue func() (protoreflect.Value, error) + switch fd.MapValue().Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + unmarshalMapValue = func() (protoreflect.Value, error) { + pval := mmap.NewValue() + if err := d.unmarshalMessage(pval.Message(), true); err != nil { + return protoreflect.Value{}, err + } + return pval, nil + } + default: + unmarshalMapValue = func() (protoreflect.Value, error) { + return d.unmarshalScalar(fd.MapValue()) + } + } + + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case text.MessageOpen: + return d.unmarshalMapEntry(fd, mmap, unmarshalMapValue) + + case text.ListOpen: + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case text.ListClose: + return nil + case text.MessageOpen: + if err := d.unmarshalMapEntry(fd, mmap, unmarshalMapValue); err != nil { + return err + } + default: + return d.unexpectedTokenError(tok) + } + } + + default: + return d.unexpectedTokenError(tok) + } +} + +// unmarshalMap unmarshals into given protoreflect.Map. A map value is a +// textproto message containing {key: , value: }. +func (d decoder) unmarshalMapEntry(fd protoreflect.FieldDescriptor, mmap protoreflect.Map, unmarshalMapValue func() (protoreflect.Value, error)) error { + var key protoreflect.MapKey + var pval protoreflect.Value +Loop: + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case text.Name: + if tok.NameKind() != text.IdentName { + if !d.opts.DiscardUnknown { + return d.newError(tok.Pos(), "unknown map entry field %q", tok.RawString()) + } + d.skipValue() + continue Loop + } + // Continue below. + case text.MessageClose: + break Loop + default: + return d.unexpectedTokenError(tok) + } + + switch name := protoreflect.Name(tok.IdentName()); name { + case genid.MapEntry_Key_field_name: + if !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + if key.IsValid() { + return d.newError(tok.Pos(), "map entry %q cannot be repeated", name) + } + val, err := d.unmarshalScalar(fd.MapKey()) + if err != nil { + return err + } + key = val.MapKey() + + case genid.MapEntry_Value_field_name: + if kind := fd.MapValue().Kind(); (kind != protoreflect.MessageKind) && (kind != protoreflect.GroupKind) { + if !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + } + if pval.IsValid() { + return d.newError(tok.Pos(), "map entry %q cannot be repeated", name) + } + pval, err = unmarshalMapValue() + if err != nil { + return err + } + + default: + if !d.opts.DiscardUnknown { + return d.newError(tok.Pos(), "unknown map entry field %q", name) + } + d.skipValue() + } + } + + if !key.IsValid() { + key = fd.MapKey().Default().MapKey() + } + if !pval.IsValid() { + switch fd.MapValue().Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + // If value field is not set for message/group types, construct an + // empty one as default. + pval = mmap.NewValue() + default: + pval = fd.MapValue().Default() + } + } + mmap.Set(key, pval) + return nil +} + +// unmarshalAny unmarshals an Any textproto. It can either be in expanded form +// or non-expanded form. +func (d decoder) unmarshalAny(m protoreflect.Message, checkDelims bool) error { + var typeURL string + var bValue []byte + var seenTypeUrl bool + var seenValue bool + var isExpanded bool + + if checkDelims { + tok, err := d.Read() + if err != nil { + return err + } + + if tok.Kind() != text.MessageOpen { + return d.unexpectedTokenError(tok) + } + } + +Loop: + for { + // Read field name. Can only have 3 possible field names, i.e. type_url, + // value and type URL name inside []. + tok, err := d.Read() + if err != nil { + return err + } + if typ := tok.Kind(); typ != text.Name { + if checkDelims { + if typ == text.MessageClose { + break Loop + } + } else if typ == text.EOF { + break Loop + } + return d.unexpectedTokenError(tok) + } + + switch tok.NameKind() { + case text.IdentName: + // Both type_url and value fields require field separator :. + if !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + + switch name := protoreflect.Name(tok.IdentName()); name { + case genid.Any_TypeUrl_field_name: + if seenTypeUrl { + return d.newError(tok.Pos(), "duplicate %v field", genid.Any_TypeUrl_field_fullname) + } + if isExpanded { + return d.newError(tok.Pos(), "conflict with [%s] field", typeURL) + } + tok, err := d.Read() + if err != nil { + return err + } + var ok bool + typeURL, ok = tok.String() + if !ok { + return d.newError(tok.Pos(), "invalid %v field value: %v", genid.Any_TypeUrl_field_fullname, tok.RawString()) + } + seenTypeUrl = true + + case genid.Any_Value_field_name: + if seenValue { + return d.newError(tok.Pos(), "duplicate %v field", genid.Any_Value_field_fullname) + } + if isExpanded { + return d.newError(tok.Pos(), "conflict with [%s] field", typeURL) + } + tok, err := d.Read() + if err != nil { + return err + } + s, ok := tok.String() + if !ok { + return d.newError(tok.Pos(), "invalid %v field value: %v", genid.Any_Value_field_fullname, tok.RawString()) + } + bValue = []byte(s) + seenValue = true + + default: + if !d.opts.DiscardUnknown { + return d.newError(tok.Pos(), "invalid field name %q in %v message", tok.RawString(), genid.Any_message_fullname) + } + } + + case text.TypeName: + if isExpanded { + return d.newError(tok.Pos(), "cannot have more than one type") + } + if seenTypeUrl { + return d.newError(tok.Pos(), "conflict with type_url field") + } + typeURL = tok.TypeName() + var err error + bValue, err = d.unmarshalExpandedAny(typeURL, tok.Pos()) + if err != nil { + return err + } + isExpanded = true + + default: + if !d.opts.DiscardUnknown { + return d.newError(tok.Pos(), "invalid field name %q in %v message", tok.RawString(), genid.Any_message_fullname) + } + } + } + + fds := m.Descriptor().Fields() + if len(typeURL) > 0 { + m.Set(fds.ByNumber(genid.Any_TypeUrl_field_number), protoreflect.ValueOfString(typeURL)) + } + if len(bValue) > 0 { + m.Set(fds.ByNumber(genid.Any_Value_field_number), protoreflect.ValueOfBytes(bValue)) + } + return nil +} + +func (d decoder) unmarshalExpandedAny(typeURL string, pos int) ([]byte, error) { + mt, err := d.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return nil, d.newError(pos, "unable to resolve message [%v]: %v", typeURL, err) + } + // Create new message for the embedded message type and unmarshal the value + // field into it. + m := mt.New() + if err := d.unmarshalMessage(m, true); err != nil { + return nil, err + } + // Serialize the embedded message and return the resulting bytes. + b, err := proto.MarshalOptions{ + AllowPartial: true, // Never check required fields inside an Any. + Deterministic: true, + }.Marshal(m.Interface()) + if err != nil { + return nil, d.newError(pos, "error in marshaling message into Any.value: %v", err) + } + return b, nil +} + +// skipValue makes the decoder parse a field value in order to advance the read +// to the next field. It relies on Read returning an error if the types are not +// in valid sequence. +func (d decoder) skipValue() error { + tok, err := d.Read() + if err != nil { + return err + } + // Only need to continue reading for messages and lists. + switch tok.Kind() { + case text.MessageOpen: + return d.skipMessageValue() + + case text.ListOpen: + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case text.ListClose: + return nil + case text.MessageOpen: + if err := d.skipMessageValue(); err != nil { + return err + } + default: + // Skip items. This will not validate whether skipped values are + // of the same type or not, same behavior as C++ + // TextFormat::Parser::AllowUnknownField(true) version 3.8.0. + } + } + } + return nil +} + +// skipMessageValue makes the decoder parse and skip over all fields in a +// message. It assumes that the previous read type is MessageOpen. +func (d decoder) skipMessageValue() error { + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case text.MessageClose: + return nil + case text.Name: + if err := d.skipValue(); err != nil { + return err + } + } + } +} diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/doc.go b/vendor/google.golang.org/protobuf/encoding/prototext/doc.go new file mode 100644 index 000000000..162b4f98a --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/prototext/doc.go @@ -0,0 +1,7 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package prototext marshals and unmarshals protocol buffer messages as the +// textproto format. +package prototext diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go new file mode 100644 index 000000000..1f57e6610 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -0,0 +1,380 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package prototext + +import ( + "fmt" + "strconv" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/encoding/text" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/order" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const defaultIndent = " " + +// Format formats the message as a multiline string. +// This function is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. +func Format(m proto.Message) string { + return MarshalOptions{Multiline: true}.Format(m) +} + +// Marshal writes the given [proto.Message] in textproto format using default +// options. Do not depend on the output being stable. Its output will change +// across different builds of your program, even when using the same version of +// the protobuf module. +func Marshal(m proto.Message) ([]byte, error) { + return MarshalOptions{}.Marshal(m) +} + +// MarshalOptions is a configurable text format marshaler. +type MarshalOptions struct { + pragma.NoUnkeyedLiterals + + // Multiline specifies whether the marshaler should format the output in + // indented-form with every textual element on a new line. + // If Indent is an empty string, then an arbitrary indent is chosen. + Multiline bool + + // Indent specifies the set of indentation characters to use in a multiline + // formatted output such that every entry is preceded by Indent and + // terminated by a newline. If non-empty, then Multiline is treated as true. + // Indent can only be composed of space or tab characters. + Indent string + + // EmitASCII specifies whether to format strings and bytes as ASCII only + // as opposed to using UTF-8 encoding when possible. + EmitASCII bool + + // allowInvalidUTF8 specifies whether to permit the encoding of strings + // with invalid UTF-8. This is unexported as it is intended to only + // be specified by the Format method. + allowInvalidUTF8 bool + + // AllowPartial allows messages that have missing required fields to marshal + // without returning an error. If AllowPartial is false (the default), + // Marshal will return error if there are any missing required fields. + AllowPartial bool + + // EmitUnknown specifies whether to emit unknown fields in the output. + // If specified, the unmarshaler may be unable to parse the output. + // The default is to exclude unknown fields. + EmitUnknown bool + + // Resolver is used for looking up types when expanding google.protobuf.Any + // messages. If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.ExtensionTypeResolver + protoregistry.MessageTypeResolver + } +} + +// Format formats the message as a string. +// This method is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. +func (o MarshalOptions) Format(m proto.Message) string { + if m == nil || !m.ProtoReflect().IsValid() { + return "" // invalid syntax, but okay since this is for debugging + } + o.allowInvalidUTF8 = true + o.AllowPartial = true + o.EmitUnknown = true + b, _ := o.Marshal(m) + return string(b) +} + +// Marshal writes the given [proto.Message] in textproto format using options in +// MarshalOptions object. Do not depend on the output being stable. Its output +// will change across different builds of your program, even when using the +// same version of the protobuf module. +func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { + return o.marshal(nil, m) +} + +// MarshalAppend appends the textproto format encoding of m to b, +// returning the result. +func (o MarshalOptions) MarshalAppend(b []byte, m proto.Message) ([]byte, error) { + return o.marshal(b, m) +} + +// marshal is a centralized function that all marshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for marshal that do not go through this. +func (o MarshalOptions) marshal(b []byte, m proto.Message) ([]byte, error) { + var delims = [2]byte{'{', '}'} + + if o.Multiline && o.Indent == "" { + o.Indent = defaultIndent + } + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + + internalEnc, err := text.NewEncoder(b, o.Indent, delims, o.EmitASCII) + if err != nil { + return nil, err + } + + // Treat nil message interface as an empty message, + // in which case there is nothing to output. + if m == nil { + return b, nil + } + + enc := encoder{internalEnc, o} + err = enc.marshalMessage(m.ProtoReflect(), false) + if err != nil { + return nil, err + } + out := enc.Bytes() + if len(o.Indent) > 0 && len(out) > 0 { + out = append(out, '\n') + } + if o.AllowPartial { + return out, nil + } + return out, proto.CheckInitialized(m) +} + +type encoder struct { + *text.Encoder + opts MarshalOptions +} + +// marshalMessage marshals the given protoreflect.Message. +func (e encoder) marshalMessage(m protoreflect.Message, inclDelims bool) error { + messageDesc := m.Descriptor() + if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { + return errors.New("no support for proto1 MessageSets") + } + + if inclDelims { + e.StartMessage() + defer e.EndMessage() + } + + // Handle Any expansion. + if messageDesc.FullName() == genid.Any_message_fullname { + if e.marshalAny(m) { + return nil + } + // If unable to expand, continue on to marshal Any as a regular message. + } + + // Marshal fields. + var err error + order.RangeFields(m, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if err = e.marshalField(fd.TextName(), v, fd); err != nil { + return false + } + return true + }) + if err != nil { + return err + } + + // Marshal unknown fields. + if e.opts.EmitUnknown { + e.marshalUnknown(m.GetUnknown()) + } + + return nil +} + +// marshalField marshals the given field with protoreflect.Value. +func (e encoder) marshalField(name string, val protoreflect.Value, fd protoreflect.FieldDescriptor) error { + switch { + case fd.IsList(): + return e.marshalList(name, val.List(), fd) + case fd.IsMap(): + return e.marshalMap(name, val.Map(), fd) + default: + e.WriteName(name) + return e.marshalSingular(val, fd) + } +} + +// marshalSingular marshals the given non-repeated field value. This includes +// all scalar types, enums, messages, and groups. +func (e encoder) marshalSingular(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { + kind := fd.Kind() + switch kind { + case protoreflect.BoolKind: + e.WriteBool(val.Bool()) + + case protoreflect.StringKind: + s := val.String() + if !e.opts.allowInvalidUTF8 && strs.EnforceUTF8(fd) && !utf8.ValidString(s) { + return errors.InvalidUTF8(string(fd.FullName())) + } + e.WriteString(s) + + case protoreflect.Int32Kind, protoreflect.Int64Kind, + protoreflect.Sint32Kind, protoreflect.Sint64Kind, + protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind: + e.WriteInt(val.Int()) + + case protoreflect.Uint32Kind, protoreflect.Uint64Kind, + protoreflect.Fixed32Kind, protoreflect.Fixed64Kind: + e.WriteUint(val.Uint()) + + case protoreflect.FloatKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 32) + + case protoreflect.DoubleKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 64) + + case protoreflect.BytesKind: + e.WriteString(string(val.Bytes())) + + case protoreflect.EnumKind: + num := val.Enum() + if desc := fd.Enum().Values().ByNumber(num); desc != nil { + e.WriteLiteral(string(desc.Name())) + } else { + // Use numeric value if there is no enum description. + e.WriteInt(int64(num)) + } + + case protoreflect.MessageKind, protoreflect.GroupKind: + return e.marshalMessage(val.Message(), true) + + default: + panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind)) + } + return nil +} + +// marshalList marshals the given protoreflect.List as multiple name-value fields. +func (e encoder) marshalList(name string, list protoreflect.List, fd protoreflect.FieldDescriptor) error { + size := list.Len() + for i := 0; i < size; i++ { + e.WriteName(name) + if err := e.marshalSingular(list.Get(i), fd); err != nil { + return err + } + } + return nil +} + +// marshalMap marshals the given protoreflect.Map as multiple name-value fields. +func (e encoder) marshalMap(name string, mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { + var err error + order.RangeEntries(mmap, order.GenericKeyOrder, func(key protoreflect.MapKey, val protoreflect.Value) bool { + e.WriteName(name) + e.StartMessage() + defer e.EndMessage() + + e.WriteName(string(genid.MapEntry_Key_field_name)) + err = e.marshalSingular(key.Value(), fd.MapKey()) + if err != nil { + return false + } + + e.WriteName(string(genid.MapEntry_Value_field_name)) + err = e.marshalSingular(val, fd.MapValue()) + if err != nil { + return false + } + return true + }) + return err +} + +// marshalUnknown parses the given []byte and marshals fields out. +// This function assumes proper encoding in the given []byte. +func (e encoder) marshalUnknown(b []byte) { + const dec = 10 + const hex = 16 + for len(b) > 0 { + num, wtype, n := protowire.ConsumeTag(b) + b = b[n:] + e.WriteName(strconv.FormatInt(int64(num), dec)) + + switch wtype { + case protowire.VarintType: + var v uint64 + v, n = protowire.ConsumeVarint(b) + e.WriteUint(v) + case protowire.Fixed32Type: + var v uint32 + v, n = protowire.ConsumeFixed32(b) + e.WriteLiteral("0x" + strconv.FormatUint(uint64(v), hex)) + case protowire.Fixed64Type: + var v uint64 + v, n = protowire.ConsumeFixed64(b) + e.WriteLiteral("0x" + strconv.FormatUint(v, hex)) + case protowire.BytesType: + var v []byte + v, n = protowire.ConsumeBytes(b) + e.WriteString(string(v)) + case protowire.StartGroupType: + e.StartMessage() + var v []byte + v, n = protowire.ConsumeGroup(num, b) + e.marshalUnknown(v) + e.EndMessage() + default: + panic(fmt.Sprintf("prototext: error parsing unknown field wire type: %v", wtype)) + } + + b = b[n:] + } +} + +// marshalAny marshals the given google.protobuf.Any message in expanded form. +// It returns true if it was able to marshal, else false. +func (e encoder) marshalAny(any protoreflect.Message) bool { + // Construct the embedded message. + fds := any.Descriptor().Fields() + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) + typeURL := any.Get(fdType).String() + mt, err := e.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return false + } + m := mt.New().Interface() + + // Unmarshal bytes into embedded message. + fdValue := fds.ByNumber(genid.Any_Value_field_number) + value := any.Get(fdValue) + err = proto.UnmarshalOptions{ + AllowPartial: true, + Resolver: e.opts.Resolver, + }.Unmarshal(value.Bytes(), m) + if err != nil { + return false + } + + // Get current encoder position. If marshaling fails, reset encoder output + // back to this position. + pos := e.Snapshot() + + // Field name is the proto field name enclosed in []. + e.WriteName("[" + typeURL + "]") + err = e.marshalMessage(m.ProtoReflect(), true) + if err != nil { + e.Reset(pos) + return false + } + return true +} diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go new file mode 100644 index 000000000..e942bc983 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -0,0 +1,547 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protowire parses and formats the raw wire encoding. +// See https://protobuf.dev/programming-guides/encoding. +// +// For marshaling and unmarshaling entire protobuf messages, +// use the [google.golang.org/protobuf/proto] package instead. +package protowire + +import ( + "io" + "math" + "math/bits" + + "google.golang.org/protobuf/internal/errors" +) + +// Number represents the field number. +type Number int32 + +const ( + MinValidNumber Number = 1 + FirstReservedNumber Number = 19000 + LastReservedNumber Number = 19999 + MaxValidNumber Number = 1<<29 - 1 + DefaultRecursionLimit = 10000 +) + +// IsValid reports whether the field number is semantically valid. +func (n Number) IsValid() bool { + return MinValidNumber <= n && n <= MaxValidNumber +} + +// Type represents the wire type. +type Type int8 + +const ( + VarintType Type = 0 + Fixed32Type Type = 5 + Fixed64Type Type = 1 + BytesType Type = 2 + StartGroupType Type = 3 + EndGroupType Type = 4 +) + +const ( + _ = -iota + errCodeTruncated + errCodeFieldNumber + errCodeOverflow + errCodeReserved + errCodeEndGroup + errCodeRecursionDepth +) + +var ( + errFieldNumber = errors.New("invalid field number") + errOverflow = errors.New("variable length integer overflow") + errReserved = errors.New("cannot parse reserved wire type") + errEndGroup = errors.New("mismatching end group marker") + errParse = errors.New("parse error") +) + +// ParseError converts an error code into an error value. +// This returns nil if n is a non-negative number. +func ParseError(n int) error { + if n >= 0 { + return nil + } + switch n { + case errCodeTruncated: + return io.ErrUnexpectedEOF + case errCodeFieldNumber: + return errFieldNumber + case errCodeOverflow: + return errOverflow + case errCodeReserved: + return errReserved + case errCodeEndGroup: + return errEndGroup + default: + return errParse + } +} + +// ConsumeField parses an entire field record (both tag and value) and returns +// the field number, the wire type, and the total length. +// This returns a negative length upon an error (see [ParseError]). +// +// The total length includes the tag header and the end group marker (if the +// field is a group). +func ConsumeField(b []byte) (Number, Type, int) { + num, typ, n := ConsumeTag(b) + if n < 0 { + return 0, 0, n // forward error code + } + m := ConsumeFieldValue(num, typ, b[n:]) + if m < 0 { + return 0, 0, m // forward error code + } + return num, typ, n + m +} + +// ConsumeFieldValue parses a field value and returns its length. +// This assumes that the field [Number] and wire [Type] have already been parsed. +// This returns a negative length upon an error (see [ParseError]). +// +// When parsing a group, the length includes the end group marker and +// the end group is verified to match the starting field number. +func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { + return consumeFieldValueD(num, typ, b, DefaultRecursionLimit) +} + +func consumeFieldValueD(num Number, typ Type, b []byte, depth int) (n int) { + switch typ { + case VarintType: + _, n = ConsumeVarint(b) + return n + case Fixed32Type: + _, n = ConsumeFixed32(b) + return n + case Fixed64Type: + _, n = ConsumeFixed64(b) + return n + case BytesType: + _, n = ConsumeBytes(b) + return n + case StartGroupType: + if depth < 0 { + return errCodeRecursionDepth + } + n0 := len(b) + for { + num2, typ2, n := ConsumeTag(b) + if n < 0 { + return n // forward error code + } + b = b[n:] + if typ2 == EndGroupType { + if num != num2 { + return errCodeEndGroup + } + return n0 - len(b) + } + + n = consumeFieldValueD(num2, typ2, b, depth-1) + if n < 0 { + return n // forward error code + } + b = b[n:] + } + case EndGroupType: + return errCodeEndGroup + default: + return errCodeReserved + } +} + +// AppendTag encodes num and typ as a varint-encoded tag and appends it to b. +func AppendTag(b []byte, num Number, typ Type) []byte { + return AppendVarint(b, EncodeTag(num, typ)) +} + +// ConsumeTag parses b as a varint-encoded tag, reporting its length. +// This returns a negative length upon an error (see [ParseError]). +func ConsumeTag(b []byte) (Number, Type, int) { + v, n := ConsumeVarint(b) + if n < 0 { + return 0, 0, n // forward error code + } + num, typ := DecodeTag(v) + if num < MinValidNumber { + return 0, 0, errCodeFieldNumber + } + return num, typ, n +} + +func SizeTag(num Number) int { + return SizeVarint(EncodeTag(num, 0)) // wire type has no effect on size +} + +// AppendVarint appends v to b as a varint-encoded uint64. +func AppendVarint(b []byte, v uint64) []byte { + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +// ConsumeVarint parses b as a varint-encoded uint64, reporting its length. +// This returns a negative length upon an error (see [ParseError]). +func ConsumeVarint(b []byte) (v uint64, n int) { + var y uint64 + if len(b) <= 0 { + return 0, errCodeTruncated + } + v = uint64(b[0]) + if v < 0x80 { + return v, 1 + } + v -= 0x80 + + if len(b) <= 1 { + return 0, errCodeTruncated + } + y = uint64(b[1]) + v += y << 7 + if y < 0x80 { + return v, 2 + } + v -= 0x80 << 7 + + if len(b) <= 2 { + return 0, errCodeTruncated + } + y = uint64(b[2]) + v += y << 14 + if y < 0x80 { + return v, 3 + } + v -= 0x80 << 14 + + if len(b) <= 3 { + return 0, errCodeTruncated + } + y = uint64(b[3]) + v += y << 21 + if y < 0x80 { + return v, 4 + } + v -= 0x80 << 21 + + if len(b) <= 4 { + return 0, errCodeTruncated + } + y = uint64(b[4]) + v += y << 28 + if y < 0x80 { + return v, 5 + } + v -= 0x80 << 28 + + if len(b) <= 5 { + return 0, errCodeTruncated + } + y = uint64(b[5]) + v += y << 35 + if y < 0x80 { + return v, 6 + } + v -= 0x80 << 35 + + if len(b) <= 6 { + return 0, errCodeTruncated + } + y = uint64(b[6]) + v += y << 42 + if y < 0x80 { + return v, 7 + } + v -= 0x80 << 42 + + if len(b) <= 7 { + return 0, errCodeTruncated + } + y = uint64(b[7]) + v += y << 49 + if y < 0x80 { + return v, 8 + } + v -= 0x80 << 49 + + if len(b) <= 8 { + return 0, errCodeTruncated + } + y = uint64(b[8]) + v += y << 56 + if y < 0x80 { + return v, 9 + } + v -= 0x80 << 56 + + if len(b) <= 9 { + return 0, errCodeTruncated + } + y = uint64(b[9]) + v += y << 63 + if y < 2 { + return v, 10 + } + return 0, errCodeOverflow +} + +// SizeVarint returns the encoded size of a varint. +// The size is guaranteed to be within 1 and 10, inclusive. +func SizeVarint(v uint64) int { + // This computes 1 + (bits.Len64(v)-1)/7. + // 9/64 is a good enough approximation of 1/7 + return int(9*uint32(bits.Len64(v))+64) / 64 +} + +// AppendFixed32 appends v to b as a little-endian uint32. +func AppendFixed32(b []byte, v uint32) []byte { + return append(b, + byte(v>>0), + byte(v>>8), + byte(v>>16), + byte(v>>24)) +} + +// ConsumeFixed32 parses b as a little-endian uint32, reporting its length. +// This returns a negative length upon an error (see [ParseError]). +func ConsumeFixed32(b []byte) (v uint32, n int) { + if len(b) < 4 { + return 0, errCodeTruncated + } + v = uint32(b[0])<<0 | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + return v, 4 +} + +// SizeFixed32 returns the encoded size of a fixed32; which is always 4. +func SizeFixed32() int { + return 4 +} + +// AppendFixed64 appends v to b as a little-endian uint64. +func AppendFixed64(b []byte, v uint64) []byte { + return append(b, + byte(v>>0), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) +} + +// ConsumeFixed64 parses b as a little-endian uint64, reporting its length. +// This returns a negative length upon an error (see [ParseError]). +func ConsumeFixed64(b []byte) (v uint64, n int) { + if len(b) < 8 { + return 0, errCodeTruncated + } + v = uint64(b[0])<<0 | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + return v, 8 +} + +// SizeFixed64 returns the encoded size of a fixed64; which is always 8. +func SizeFixed64() int { + return 8 +} + +// AppendBytes appends v to b as a length-prefixed bytes value. +func AppendBytes(b []byte, v []byte) []byte { + return append(AppendVarint(b, uint64(len(v))), v...) +} + +// ConsumeBytes parses b as a length-prefixed bytes value, reporting its length. +// This returns a negative length upon an error (see [ParseError]). +func ConsumeBytes(b []byte) (v []byte, n int) { + m, n := ConsumeVarint(b) + if n < 0 { + return nil, n // forward error code + } + if m > uint64(len(b[n:])) { + return nil, errCodeTruncated + } + return b[n:][:m], n + int(m) +} + +// SizeBytes returns the encoded size of a length-prefixed bytes value, +// given only the length. +func SizeBytes(n int) int { + return SizeVarint(uint64(n)) + n +} + +// AppendString appends v to b as a length-prefixed bytes value. +func AppendString(b []byte, v string) []byte { + return append(AppendVarint(b, uint64(len(v))), v...) +} + +// ConsumeString parses b as a length-prefixed bytes value, reporting its length. +// This returns a negative length upon an error (see [ParseError]). +func ConsumeString(b []byte) (v string, n int) { + bb, n := ConsumeBytes(b) + return string(bb), n +} + +// AppendGroup appends v to b as group value, with a trailing end group marker. +// The value v must not contain the end marker. +func AppendGroup(b []byte, num Number, v []byte) []byte { + return AppendVarint(append(b, v...), EncodeTag(num, EndGroupType)) +} + +// ConsumeGroup parses b as a group value until the trailing end group marker, +// and verifies that the end marker matches the provided num. The value v +// does not contain the end marker, while the length does contain the end marker. +// This returns a negative length upon an error (see [ParseError]). +func ConsumeGroup(num Number, b []byte) (v []byte, n int) { + n = ConsumeFieldValue(num, StartGroupType, b) + if n < 0 { + return nil, n // forward error code + } + b = b[:n] + + // Truncate off end group marker, but need to handle denormalized varints. + // Assuming end marker is never 0 (which is always the case since + // EndGroupType is non-zero), we can truncate all trailing bytes where the + // lower 7 bits are all zero (implying that the varint is denormalized). + for len(b) > 0 && b[len(b)-1]&0x7f == 0 { + b = b[:len(b)-1] + } + b = b[:len(b)-SizeTag(num)] + return b, n +} + +// SizeGroup returns the encoded size of a group, given only the length. +func SizeGroup(num Number, n int) int { + return n + SizeTag(num) +} + +// DecodeTag decodes the field [Number] and wire [Type] from its unified form. +// The [Number] is -1 if the decoded field number overflows int32. +// Other than overflow, this does not check for field number validity. +func DecodeTag(x uint64) (Number, Type) { + // NOTE: MessageSet allows for larger field numbers than normal. + if x>>3 > uint64(math.MaxInt32) { + return -1, 0 + } + return Number(x >> 3), Type(x & 7) +} + +// EncodeTag encodes the field [Number] and wire [Type] into its unified form. +func EncodeTag(num Number, typ Type) uint64 { + return uint64(num)<<3 | uint64(typ&7) +} + +// DecodeZigZag decodes a zig-zag-encoded uint64 as an int64. +// +// Input: {…, 5, 3, 1, 0, 2, 4, 6, …} +// Output: {…, -3, -2, -1, 0, +1, +2, +3, …} +func DecodeZigZag(x uint64) int64 { + return int64(x>>1) ^ int64(x)<<63>>63 +} + +// EncodeZigZag encodes an int64 as a zig-zag-encoded uint64. +// +// Input: {…, -3, -2, -1, 0, +1, +2, +3, …} +// Output: {…, 5, 3, 1, 0, 2, 4, 6, …} +func EncodeZigZag(x int64) uint64 { + return uint64(x<<1) ^ uint64(x>>63) +} + +// DecodeBool decodes a uint64 as a bool. +// +// Input: { 0, 1, 2, …} +// Output: {false, true, true, …} +func DecodeBool(x uint64) bool { + return x != 0 +} + +// EncodeBool encodes a bool as a uint64. +// +// Input: {false, true} +// Output: { 0, 1} +func EncodeBool(x bool) uint64 { + if x { + return 1 + } + return 0 +} diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go new file mode 100644 index 000000000..87e46bd4d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -0,0 +1,414 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package descfmt provides functionality to format descriptors. +package descfmt + +import ( + "fmt" + "io" + "reflect" + "strconv" + "strings" + + "google.golang.org/protobuf/internal/detrand" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" +) + +type list interface { + Len() int + pragma.DoNotImplement +} + +func FormatList(s fmt.State, r rune, vs list) { + io.WriteString(s, formatListOpt(vs, true, r == 'v' && (s.Flag('+') || s.Flag('#')))) +} +func formatListOpt(vs list, isRoot, allowMulti bool) string { + start, end := "[", "]" + if isRoot { + var name string + switch vs.(type) { + case protoreflect.Names: + name = "Names" + case protoreflect.FieldNumbers: + name = "FieldNumbers" + case protoreflect.FieldRanges: + name = "FieldRanges" + case protoreflect.EnumRanges: + name = "EnumRanges" + case protoreflect.FileImports: + name = "FileImports" + case protoreflect.Descriptor: + name = reflect.ValueOf(vs).MethodByName("Get").Type().Out(0).Name() + "s" + default: + name = reflect.ValueOf(vs).Elem().Type().Name() + } + start, end = name+"{", "}" + } + + var ss []string + switch vs := vs.(type) { + case protoreflect.Names: + for i := 0; i < vs.Len(); i++ { + ss = append(ss, fmt.Sprint(vs.Get(i))) + } + return start + joinStrings(ss, false) + end + case protoreflect.FieldNumbers: + for i := 0; i < vs.Len(); i++ { + ss = append(ss, fmt.Sprint(vs.Get(i))) + } + return start + joinStrings(ss, false) + end + case protoreflect.FieldRanges: + for i := 0; i < vs.Len(); i++ { + r := vs.Get(i) + if r[0]+1 == r[1] { + ss = append(ss, fmt.Sprintf("%d", r[0])) + } else { + ss = append(ss, fmt.Sprintf("%d:%d", r[0], r[1])) // enum ranges are end exclusive + } + } + return start + joinStrings(ss, false) + end + case protoreflect.EnumRanges: + for i := 0; i < vs.Len(); i++ { + r := vs.Get(i) + if r[0] == r[1] { + ss = append(ss, fmt.Sprintf("%d", r[0])) + } else { + ss = append(ss, fmt.Sprintf("%d:%d", r[0], int64(r[1])+1)) // enum ranges are end inclusive + } + } + return start + joinStrings(ss, false) + end + case protoreflect.FileImports: + for i := 0; i < vs.Len(); i++ { + var rs records + rv := reflect.ValueOf(vs.Get(i)) + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Path"), "Path"}, + {rv.MethodByName("Package"), "Package"}, + {rv.MethodByName("IsPublic"), "IsPublic"}, + {rv.MethodByName("IsWeak"), "IsWeak"}, + }...) + ss = append(ss, "{"+rs.Join()+"}") + } + return start + joinStrings(ss, allowMulti) + end + default: + _, isEnumValue := vs.(protoreflect.EnumValueDescriptors) + for i := 0; i < vs.Len(); i++ { + m := reflect.ValueOf(vs).MethodByName("Get") + v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface() + ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue, nil)) + } + return start + joinStrings(ss, allowMulti && isEnumValue) + end + } +} + +type methodAndName struct { + method reflect.Value + name string +} + +func FormatDesc(s fmt.State, r rune, t protoreflect.Descriptor) { + io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')), nil)) +} + +func InternalFormatDescOptForTesting(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string { + return formatDescOpt(t, isRoot, allowMulti, record) +} + +func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string { + rv := reflect.ValueOf(t) + rt := rv.MethodByName("ProtoType").Type().In(0) + + start, end := "{", "}" + if isRoot { + start = rt.Name() + "{" + } + + _, isFile := t.(protoreflect.FileDescriptor) + rs := records{ + allowMulti: allowMulti, + record: record, + } + if t.IsPlaceholder() { + if isFile { + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Path"), "Path"}, + {rv.MethodByName("Package"), "Package"}, + {rv.MethodByName("IsPlaceholder"), "IsPlaceholder"}, + }...) + } else { + rs.Append(rv, []methodAndName{ + {rv.MethodByName("FullName"), "FullName"}, + {rv.MethodByName("IsPlaceholder"), "IsPlaceholder"}, + }...) + } + } else { + switch { + case isFile: + rs.Append(rv, methodAndName{rv.MethodByName("Syntax"), "Syntax"}) + case isRoot: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Syntax"), "Syntax"}, + {rv.MethodByName("FullName"), "FullName"}, + }...) + default: + rs.Append(rv, methodAndName{rv.MethodByName("Name"), "Name"}) + } + switch t := t.(type) { + case protoreflect.FieldDescriptor: + accessors := []methodAndName{ + {rv.MethodByName("Number"), "Number"}, + {rv.MethodByName("Cardinality"), "Cardinality"}, + {rv.MethodByName("Kind"), "Kind"}, + {rv.MethodByName("HasJSONName"), "HasJSONName"}, + {rv.MethodByName("JSONName"), "JSONName"}, + {rv.MethodByName("HasPresence"), "HasPresence"}, + {rv.MethodByName("IsExtension"), "IsExtension"}, + {rv.MethodByName("IsPacked"), "IsPacked"}, + {rv.MethodByName("IsWeak"), "IsWeak"}, + {rv.MethodByName("IsList"), "IsList"}, + {rv.MethodByName("IsMap"), "IsMap"}, + {rv.MethodByName("MapKey"), "MapKey"}, + {rv.MethodByName("MapValue"), "MapValue"}, + {rv.MethodByName("HasDefault"), "HasDefault"}, + {rv.MethodByName("Default"), "Default"}, + {rv.MethodByName("ContainingOneof"), "ContainingOneof"}, + {rv.MethodByName("ContainingMessage"), "ContainingMessage"}, + {rv.MethodByName("Message"), "Message"}, + {rv.MethodByName("Enum"), "Enum"}, + } + for _, s := range accessors { + switch s.name { + case "MapKey": + if k := t.MapKey(); k != nil { + rs.recs = append(rs.recs, [2]string{"MapKey", k.Kind().String()}) + } + case "MapValue": + if v := t.MapValue(); v != nil { + switch v.Kind() { + case protoreflect.EnumKind: + rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Enum().FullName())}) + case protoreflect.MessageKind, protoreflect.GroupKind: + rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Message().FullName())}) + default: + rs.AppendRecs("MapValue", [2]string{"MapValue", v.Kind().String()}) + } + } + case "ContainingOneof": + if od := t.ContainingOneof(); od != nil { + rs.AppendRecs("ContainingOneof", [2]string{"Oneof", string(od.Name())}) + } + case "ContainingMessage": + if t.IsExtension() { + rs.AppendRecs("ContainingMessage", [2]string{"Extendee", string(t.ContainingMessage().FullName())}) + } + case "Message": + if !t.IsMap() { + rs.Append(rv, s) + } + default: + rs.Append(rv, s) + } + } + case protoreflect.OneofDescriptor: + var ss []string + fs := t.Fields() + for i := 0; i < fs.Len(); i++ { + ss = append(ss, string(fs.Get(i).Name())) + } + if len(ss) > 0 { + rs.AppendRecs("Fields", [2]string{"Fields", "[" + joinStrings(ss, false) + "]"}) + } + + case protoreflect.FileDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Path"), "Path"}, + {rv.MethodByName("Package"), "Package"}, + {rv.MethodByName("Imports"), "Imports"}, + {rv.MethodByName("Messages"), "Messages"}, + {rv.MethodByName("Enums"), "Enums"}, + {rv.MethodByName("Extensions"), "Extensions"}, + {rv.MethodByName("Services"), "Services"}, + }...) + + case protoreflect.MessageDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("IsMapEntry"), "IsMapEntry"}, + {rv.MethodByName("Fields"), "Fields"}, + {rv.MethodByName("Oneofs"), "Oneofs"}, + {rv.MethodByName("ReservedNames"), "ReservedNames"}, + {rv.MethodByName("ReservedRanges"), "ReservedRanges"}, + {rv.MethodByName("RequiredNumbers"), "RequiredNumbers"}, + {rv.MethodByName("ExtensionRanges"), "ExtensionRanges"}, + {rv.MethodByName("Messages"), "Messages"}, + {rv.MethodByName("Enums"), "Enums"}, + {rv.MethodByName("Extensions"), "Extensions"}, + }...) + + case protoreflect.EnumDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Values"), "Values"}, + {rv.MethodByName("ReservedNames"), "ReservedNames"}, + {rv.MethodByName("ReservedRanges"), "ReservedRanges"}, + {rv.MethodByName("IsClosed"), "IsClosed"}, + }...) + + case protoreflect.EnumValueDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Number"), "Number"}, + }...) + + case protoreflect.ServiceDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Methods"), "Methods"}, + }...) + + case protoreflect.MethodDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Input"), "Input"}, + {rv.MethodByName("Output"), "Output"}, + {rv.MethodByName("IsStreamingClient"), "IsStreamingClient"}, + {rv.MethodByName("IsStreamingServer"), "IsStreamingServer"}, + }...) + } + if m := rv.MethodByName("GoType"); m.IsValid() { + rs.Append(rv, methodAndName{m, "GoType"}) + } + } + return start + rs.Join() + end +} + +type records struct { + recs [][2]string + allowMulti bool + + // record is a function that will be called for every Append() or + // AppendRecs() call, to be used for testing with the + // InternalFormatDescOptForTesting function. + record func(string) +} + +func (rs *records) AppendRecs(fieldName string, newRecs [2]string) { + if rs.record != nil { + rs.record(fieldName) + } + rs.recs = append(rs.recs, newRecs) +} + +func (rs *records) Append(v reflect.Value, accessors ...methodAndName) { + for _, a := range accessors { + if rs.record != nil { + rs.record(a.name) + } + var rv reflect.Value + if a.method.IsValid() { + rv = a.method.Call(nil)[0] + } + if v.Kind() == reflect.Struct && !rv.IsValid() { + rv = v.FieldByName(a.name) + } + if !rv.IsValid() { + panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a.name)) + } + if _, ok := rv.Interface().(protoreflect.Value); ok { + rv = rv.MethodByName("Interface").Call(nil)[0] + if !rv.IsNil() { + rv = rv.Elem() + } + } + + // Ignore zero values. + var isZero bool + switch rv.Kind() { + case reflect.Interface, reflect.Slice: + isZero = rv.IsNil() + case reflect.Bool: + isZero = rv.Bool() == false + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + isZero = rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + isZero = rv.Uint() == 0 + case reflect.String: + isZero = rv.String() == "" + } + if n, ok := rv.Interface().(list); ok { + isZero = n.Len() == 0 + } + if isZero { + continue + } + + // Format the value. + var s string + v := rv.Interface() + switch v := v.(type) { + case list: + s = formatListOpt(v, false, rs.allowMulti) + case protoreflect.FieldDescriptor, protoreflect.OneofDescriptor, protoreflect.EnumValueDescriptor, protoreflect.MethodDescriptor: + s = string(v.(protoreflect.Descriptor).Name()) + case protoreflect.Descriptor: + s = string(v.FullName()) + case string: + s = strconv.Quote(v) + case []byte: + s = fmt.Sprintf("%q", v) + default: + s = fmt.Sprint(v) + } + rs.recs = append(rs.recs, [2]string{a.name, s}) + } +} + +func (rs *records) Join() string { + var ss []string + + // In single line mode, simply join all records with commas. + if !rs.allowMulti { + for _, r := range rs.recs { + ss = append(ss, r[0]+formatColon(0)+r[1]) + } + return joinStrings(ss, false) + } + + // In allowMulti line mode, align single line records for more readable output. + var maxLen int + flush := func(i int) { + for _, r := range rs.recs[len(ss):i] { + ss = append(ss, r[0]+formatColon(maxLen-len(r[0]))+r[1]) + } + maxLen = 0 + } + for i, r := range rs.recs { + if isMulti := strings.Contains(r[1], "\n"); isMulti { + flush(i) + ss = append(ss, r[0]+formatColon(0)+strings.Join(strings.Split(r[1], "\n"), "\n\t")) + } else if maxLen < len(r[0]) { + maxLen = len(r[0]) + } + } + flush(len(rs.recs)) + return joinStrings(ss, true) +} + +func formatColon(padding int) string { + // Deliberately introduce instability into the debug output to + // discourage users from performing string comparisons. + // This provides us flexibility to change the output in the future. + if detrand.Bool() { + return ":" + strings.Repeat(" ", 1+padding) // use non-breaking spaces (U+00a0) + } else { + return ":" + strings.Repeat(" ", 1+padding) // use regular spaces (U+0020) + } +} + +func joinStrings(ss []string, isMulti bool) string { + if len(ss) == 0 { + return "" + } + if isMulti { + return "\n\t" + strings.Join(ss, "\n\t") + "\n" + } + return strings.Join(ss, ", ") +} diff --git a/vendor/google.golang.org/protobuf/internal/descopts/options.go b/vendor/google.golang.org/protobuf/internal/descopts/options.go new file mode 100644 index 000000000..8401be8c8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/descopts/options.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package descopts contains the nil pointers to concrete descriptor options. +// +// This package exists as a form of reverse dependency injection so that certain +// packages (e.g., internal/filedesc and internal/filetype can avoid a direct +// dependency on the descriptor proto package). +package descopts + +import pref "google.golang.org/protobuf/reflect/protoreflect" + +// These variables are set by the init function in descriptor.pb.go via logic +// in internal/filetype. In other words, so long as the descriptor proto package +// is linked in, these variables will be populated. +// +// Each variable is populated with a nil pointer to the options struct. +var ( + File pref.ProtoMessage + Enum pref.ProtoMessage + EnumValue pref.ProtoMessage + Message pref.ProtoMessage + Field pref.ProtoMessage + Oneof pref.ProtoMessage + ExtensionRange pref.ProtoMessage + Service pref.ProtoMessage + Method pref.ProtoMessage +) diff --git a/vendor/google.golang.org/protobuf/internal/detrand/rand.go b/vendor/google.golang.org/protobuf/internal/detrand/rand.go new file mode 100644 index 000000000..49c8676d4 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/detrand/rand.go @@ -0,0 +1,69 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package detrand provides deterministically random functionality. +// +// The pseudo-randomness of these functions is seeded by the program binary +// itself and guarantees that the output does not change within a program, +// while ensuring that the output is unstable across different builds. +package detrand + +import ( + "encoding/binary" + "hash/fnv" + "os" +) + +// Disable disables detrand such that all functions returns the zero value. +// This function is not concurrent-safe and must be called during program init. +func Disable() { + randSeed = 0 +} + +// Bool returns a deterministically random boolean. +func Bool() bool { + return randSeed%2 == 1 +} + +// Intn returns a deterministically random integer between 0 and n-1, inclusive. +func Intn(n int) int { + if n <= 0 { + panic("must be positive") + } + return int(randSeed % uint64(n)) +} + +// randSeed is a best-effort at an approximate hash of the Go binary. +var randSeed = binaryHash() + +func binaryHash() uint64 { + // Open the Go binary. + s, err := os.Executable() + if err != nil { + return 0 + } + f, err := os.Open(s) + if err != nil { + return 0 + } + defer f.Close() + + // Hash the size and several samples of the Go binary. + const numSamples = 8 + var buf [64]byte + h := fnv.New64() + fi, err := f.Stat() + if err != nil { + return 0 + } + binary.LittleEndian.PutUint64(buf[:8], uint64(fi.Size())) + h.Write(buf[:8]) + for i := int64(0); i < numSamples; i++ { + if _, err := f.ReadAt(buf[:], i*fi.Size()/numSamples); err != nil { + return 0 + } + h.Write(buf[:]) + } + return h.Sum64() +} diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go b/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go new file mode 100644 index 000000000..14656b65a --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go @@ -0,0 +1,12 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package editiondefaults contains the binary representation of the editions +// defaults. +package editiondefaults + +import _ "embed" + +//go:embed editions_defaults.binpb +var Defaults []byte diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb new file mode 100644 index 0000000000000000000000000000000000000000..ff6a38360add36f53d48bb0863b701696e0d7b2d GIT binary patch literal 93 zcmd;*mUzal#C*w)K}(Q>QGiK;Nr72|(SYfa9TNv5m$bxlxFnMRqXeS@6Ht;7B*_4j Ve8H{+(u69m1u{(G8N0>{b^xZ!4_5#H literal 0 HcmV?d00001 diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go new file mode 100644 index 000000000..029a6a12d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go @@ -0,0 +1,13 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package editionssupport defines constants for editions that are supported. +package editionssupport + +import descriptorpb "google.golang.org/protobuf/types/descriptorpb" + +const ( + Minimum = descriptorpb.Edition_EDITION_PROTO2 + Maximum = descriptorpb.Edition_EDITION_2023 +) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go b/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go new file mode 100644 index 000000000..328dc733b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go @@ -0,0 +1,213 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package defval marshals and unmarshals textual forms of default values. +// +// This package handles both the form historically used in Go struct field tags +// and also the form used by google.protobuf.FieldDescriptorProto.default_value +// since they differ in superficial ways. +package defval + +import ( + "fmt" + "math" + "strconv" + + ptext "google.golang.org/protobuf/internal/encoding/text" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// Format is the serialization format used to represent the default value. +type Format int + +const ( + _ Format = iota + + // Descriptor uses the serialization format that protoc uses with the + // google.protobuf.FieldDescriptorProto.default_value field. + Descriptor + + // GoTag uses the historical serialization format in Go struct field tags. + GoTag +) + +// Unmarshal deserializes the default string s according to the given kind k. +// When k is an enum, a list of enum value descriptors must be provided. +func Unmarshal(s string, k protoreflect.Kind, evs protoreflect.EnumValueDescriptors, f Format) (protoreflect.Value, protoreflect.EnumValueDescriptor, error) { + switch k { + case protoreflect.BoolKind: + if f == GoTag { + switch s { + case "1": + return protoreflect.ValueOfBool(true), nil, nil + case "0": + return protoreflect.ValueOfBool(false), nil, nil + } + } else { + switch s { + case "true": + return protoreflect.ValueOfBool(true), nil, nil + case "false": + return protoreflect.ValueOfBool(false), nil, nil + } + } + case protoreflect.EnumKind: + if f == GoTag { + // Go tags use the numeric form of the enum value. + if n, err := strconv.ParseInt(s, 10, 32); err == nil { + if ev := evs.ByNumber(protoreflect.EnumNumber(n)); ev != nil { + return protoreflect.ValueOfEnum(ev.Number()), ev, nil + } + } + } else { + // Descriptor default_value use the enum identifier. + ev := evs.ByName(protoreflect.Name(s)) + if ev != nil { + return protoreflect.ValueOfEnum(ev.Number()), ev, nil + } + } + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if v, err := strconv.ParseInt(s, 10, 32); err == nil { + return protoreflect.ValueOfInt32(int32(v)), nil, nil + } + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if v, err := strconv.ParseInt(s, 10, 64); err == nil { + return protoreflect.ValueOfInt64(int64(v)), nil, nil + } + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if v, err := strconv.ParseUint(s, 10, 32); err == nil { + return protoreflect.ValueOfUint32(uint32(v)), nil, nil + } + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if v, err := strconv.ParseUint(s, 10, 64); err == nil { + return protoreflect.ValueOfUint64(uint64(v)), nil, nil + } + case protoreflect.FloatKind, protoreflect.DoubleKind: + var v float64 + var err error + switch s { + case "-inf": + v = math.Inf(-1) + case "inf": + v = math.Inf(+1) + case "nan": + v = math.NaN() + default: + v, err = strconv.ParseFloat(s, 64) + } + if err == nil { + if k == protoreflect.FloatKind { + return protoreflect.ValueOfFloat32(float32(v)), nil, nil + } else { + return protoreflect.ValueOfFloat64(float64(v)), nil, nil + } + } + case protoreflect.StringKind: + // String values are already unescaped and can be used as is. + return protoreflect.ValueOfString(s), nil, nil + case protoreflect.BytesKind: + if b, ok := unmarshalBytes(s); ok { + return protoreflect.ValueOfBytes(b), nil, nil + } + } + return protoreflect.Value{}, nil, errors.New("could not parse value for %v: %q", k, s) +} + +// Marshal serializes v as the default string according to the given kind k. +// When specifying the Descriptor format for an enum kind, the associated +// enum value descriptor must be provided. +func Marshal(v protoreflect.Value, ev protoreflect.EnumValueDescriptor, k protoreflect.Kind, f Format) (string, error) { + switch k { + case protoreflect.BoolKind: + if f == GoTag { + if v.Bool() { + return "1", nil + } else { + return "0", nil + } + } else { + if v.Bool() { + return "true", nil + } else { + return "false", nil + } + } + case protoreflect.EnumKind: + if f == GoTag { + return strconv.FormatInt(int64(v.Enum()), 10), nil + } else { + return string(ev.Name()), nil + } + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return strconv.FormatInt(v.Int(), 10), nil + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return strconv.FormatUint(v.Uint(), 10), nil + case protoreflect.FloatKind, protoreflect.DoubleKind: + f := v.Float() + switch { + case math.IsInf(f, -1): + return "-inf", nil + case math.IsInf(f, +1): + return "inf", nil + case math.IsNaN(f): + return "nan", nil + default: + if k == protoreflect.FloatKind { + return strconv.FormatFloat(f, 'g', -1, 32), nil + } else { + return strconv.FormatFloat(f, 'g', -1, 64), nil + } + } + case protoreflect.StringKind: + // String values are serialized as is without any escaping. + return v.String(), nil + case protoreflect.BytesKind: + if s, ok := marshalBytes(v.Bytes()); ok { + return s, nil + } + } + return "", errors.New("could not format value for %v: %v", k, v) +} + +// unmarshalBytes deserializes bytes by applying C unescaping. +func unmarshalBytes(s string) ([]byte, bool) { + // Bytes values use the same escaping as the text format, + // however they lack the surrounding double quotes. + v, err := ptext.UnmarshalString(`"` + s + `"`) + if err != nil { + return nil, false + } + return []byte(v), true +} + +// marshalBytes serializes bytes by using C escaping. +// To match the exact output of protoc, this is identical to the +// CEscape function in strutil.cc of the protoc source code. +func marshalBytes(b []byte) (string, bool) { + var s []byte + for _, c := range b { + switch c { + case '\n': + s = append(s, `\n`...) + case '\r': + s = append(s, `\r`...) + case '\t': + s = append(s, `\t`...) + case '"': + s = append(s, `\"`...) + case '\'': + s = append(s, `\'`...) + case '\\': + s = append(s, `\\`...) + default: + if printableASCII := c >= 0x20 && c <= 0x7e; printableASCII { + s = append(s, c) + } else { + s = append(s, fmt.Sprintf(`\%03o`, c)...) + } + } + } + return string(s), true +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go new file mode 100644 index 000000000..ea1d3e65a --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go @@ -0,0 +1,340 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "fmt" + "io" + "regexp" + "unicode/utf8" + + "google.golang.org/protobuf/internal/errors" +) + +// call specifies which Decoder method was invoked. +type call uint8 + +const ( + readCall call = iota + peekCall +) + +const unexpectedFmt = "unexpected token %s" + +// ErrUnexpectedEOF means that EOF was encountered in the middle of the input. +var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF) + +// Decoder is a token-based JSON decoder. +type Decoder struct { + // lastCall is last method called, either readCall or peekCall. + // Initial value is readCall. + lastCall call + + // lastToken contains the last read token. + lastToken Token + + // lastErr contains the last read error. + lastErr error + + // openStack is a stack containing ObjectOpen and ArrayOpen values. The + // top of stack represents the object or the array the current value is + // directly located in. + openStack []Kind + + // orig is used in reporting line and column. + orig []byte + // in contains the unconsumed input. + in []byte +} + +// NewDecoder returns a Decoder to read the given []byte. +func NewDecoder(b []byte) *Decoder { + return &Decoder{orig: b, in: b} +} + +// Peek looks ahead and returns the next token kind without advancing a read. +func (d *Decoder) Peek() (Token, error) { + defer func() { d.lastCall = peekCall }() + if d.lastCall == readCall { + d.lastToken, d.lastErr = d.Read() + } + return d.lastToken, d.lastErr +} + +// Read returns the next JSON token. +// It will return an error if there is no valid token. +func (d *Decoder) Read() (Token, error) { + const scalar = Null | Bool | Number | String + + defer func() { d.lastCall = readCall }() + if d.lastCall == peekCall { + return d.lastToken, d.lastErr + } + + tok, err := d.parseNext() + if err != nil { + return Token{}, err + } + + switch tok.kind { + case EOF: + if len(d.openStack) != 0 || + d.lastToken.kind&scalar|ObjectClose|ArrayClose == 0 { + return Token{}, ErrUnexpectedEOF + } + + case Null: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + + case Bool, Number: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + + case String: + if d.isValueNext() { + break + } + // This string token should only be for a field name. + if d.lastToken.kind&(ObjectOpen|comma) == 0 { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + if len(d.in) == 0 { + return Token{}, ErrUnexpectedEOF + } + if c := d.in[0]; c != ':' { + return Token{}, d.newSyntaxError(d.currPos(), `unexpected character %s, missing ":" after field name`, string(c)) + } + tok.kind = Name + d.consume(1) + + case ObjectOpen, ArrayOpen: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = append(d.openStack, tok.kind) + + case ObjectClose: + if len(d.openStack) == 0 || + d.lastToken.kind&(Name|comma) != 0 || + d.openStack[len(d.openStack)-1] != ObjectOpen { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = d.openStack[:len(d.openStack)-1] + + case ArrayClose: + if len(d.openStack) == 0 || + d.lastToken.kind == comma || + d.openStack[len(d.openStack)-1] != ArrayOpen { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = d.openStack[:len(d.openStack)-1] + + case comma: + if len(d.openStack) == 0 || + d.lastToken.kind&(scalar|ObjectClose|ArrayClose) == 0 { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + } + + // Update d.lastToken only after validating token to be in the right sequence. + d.lastToken = tok + + if d.lastToken.kind == comma { + return d.Read() + } + return tok, nil +} + +// Any sequence that looks like a non-delimiter (for error reporting). +var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9]{1,32}|.)`) + +// parseNext parses for the next JSON token. It returns a Token object for +// different types, except for Name. It does not handle whether the next token +// is in a valid sequence or not. +func (d *Decoder) parseNext() (Token, error) { + // Trim leading spaces. + d.consume(0) + + in := d.in + if len(in) == 0 { + return d.consumeToken(EOF, 0), nil + } + + switch in[0] { + case 'n': + if n := matchWithDelim("null", in); n != 0 { + return d.consumeToken(Null, n), nil + } + + case 't': + if n := matchWithDelim("true", in); n != 0 { + return d.consumeBoolToken(true, n), nil + } + + case 'f': + if n := matchWithDelim("false", in); n != 0 { + return d.consumeBoolToken(false, n), nil + } + + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + if n, ok := parseNumber(in); ok { + return d.consumeToken(Number, n), nil + } + + case '"': + s, n, err := d.parseString(in) + if err != nil { + return Token{}, err + } + return d.consumeStringToken(s, n), nil + + case '{': + return d.consumeToken(ObjectOpen, 1), nil + + case '}': + return d.consumeToken(ObjectClose, 1), nil + + case '[': + return d.consumeToken(ArrayOpen, 1), nil + + case ']': + return d.consumeToken(ArrayClose, 1), nil + + case ',': + return d.consumeToken(comma, 1), nil + } + return Token{}, d.newSyntaxError(d.currPos(), "invalid value %s", errRegexp.Find(in)) +} + +// newSyntaxError returns an error with line and column information useful for +// syntax errors. +func (d *Decoder) newSyntaxError(pos int, f string, x ...any) error { + e := errors.New(f, x...) + line, column := d.Position(pos) + return errors.New("syntax error (line %d:%d): %v", line, column, e) +} + +// Position returns line and column number of given index of the original input. +// It will panic if index is out of range. +func (d *Decoder) Position(idx int) (line int, column int) { + b := d.orig[:idx] + line = bytes.Count(b, []byte("\n")) + 1 + if i := bytes.LastIndexByte(b, '\n'); i >= 0 { + b = b[i+1:] + } + column = utf8.RuneCount(b) + 1 // ignore multi-rune characters + return line, column +} + +// currPos returns the current index position of d.in from d.orig. +func (d *Decoder) currPos() int { + return len(d.orig) - len(d.in) +} + +// matchWithDelim matches s with the input b and verifies that the match +// terminates with a delimiter of some form (e.g., r"[^-+_.a-zA-Z0-9]"). +// As a special case, EOF is considered a delimiter. It returns the length of s +// if there is a match, else 0. +func matchWithDelim(s string, b []byte) int { + if !bytes.HasPrefix(b, []byte(s)) { + return 0 + } + + n := len(s) + if n < len(b) && isNotDelim(b[n]) { + return 0 + } + return n +} + +// isNotDelim returns true if given byte is a not delimiter character. +func isNotDelim(c byte) bool { + return (c == '-' || c == '+' || c == '.' || c == '_' || + ('a' <= c && c <= 'z') || + ('A' <= c && c <= 'Z') || + ('0' <= c && c <= '9')) +} + +// consume consumes n bytes of input and any subsequent whitespace. +func (d *Decoder) consume(n int) { + d.in = d.in[n:] + for len(d.in) > 0 { + switch d.in[0] { + case ' ', '\n', '\r', '\t': + d.in = d.in[1:] + default: + return + } + } +} + +// isValueNext returns true if next type should be a JSON value: Null, +// Number, String or Bool. +func (d *Decoder) isValueNext() bool { + if len(d.openStack) == 0 { + return d.lastToken.kind == 0 + } + + start := d.openStack[len(d.openStack)-1] + switch start { + case ObjectOpen: + return d.lastToken.kind&Name != 0 + case ArrayOpen: + return d.lastToken.kind&(ArrayOpen|comma) != 0 + } + panic(fmt.Sprintf( + "unreachable logic in Decoder.isValueNext, lastToken.kind: %v, openStack: %v", + d.lastToken.kind, start)) +} + +// consumeToken constructs a Token for given Kind with raw value derived from +// current d.in and given size, and consumes the given size-length of it. +func (d *Decoder) consumeToken(kind Kind, size int) Token { + tok := Token{ + kind: kind, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + } + d.consume(size) + return tok +} + +// consumeBoolToken constructs a Token for a Bool kind with raw value derived from +// current d.in and given size. +func (d *Decoder) consumeBoolToken(b bool, size int) Token { + tok := Token{ + kind: Bool, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + boo: b, + } + d.consume(size) + return tok +} + +// consumeStringToken constructs a Token for a String kind with raw value derived +// from current d.in and given size. +func (d *Decoder) consumeStringToken(s string, size int) Token { + tok := Token{ + kind: String, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + str: s, + } + d.consume(size) + return tok +} + +// Clone returns a copy of the Decoder for use in reading ahead the next JSON +// object, array or other values without affecting current Decoder. +func (d *Decoder) Clone() *Decoder { + ret := *d + ret.openStack = append([]Kind(nil), ret.openStack...) + return &ret +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go new file mode 100644 index 000000000..2999d7133 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go @@ -0,0 +1,254 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "strconv" +) + +// parseNumber reads the given []byte for a valid JSON number. If it is valid, +// it returns the number of bytes. Parsing logic follows the definition in +// https://tools.ietf.org/html/rfc7159#section-6, and is based off +// encoding/json.isValidNumber function. +func parseNumber(input []byte) (int, bool) { + var n int + + s := input + if len(s) == 0 { + return 0, false + } + + // Optional - + if s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return 0, false + } + } + + // Digits + switch { + case s[0] == '0': + s = s[1:] + n++ + + case '1' <= s[0] && s[0] <= '9': + s = s[1:] + n++ + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + + default: + return 0, false + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + s = s[2:] + n += 2 + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + n++ + if s[0] == '+' || s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return 0, false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + } + + // Check that next byte is a delimiter or it is at the end. + if n < len(input) && isNotDelim(input[n]) { + return 0, false + } + + return n, true +} + +// numberParts is the result of parsing out a valid JSON number. It contains +// the parts of a number. The parts are used for integer conversion. +type numberParts struct { + neg bool + intp []byte + frac []byte + exp []byte +} + +// parseNumber constructs numberParts from given []byte. The logic here is +// similar to consumeNumber above with the difference of having to construct +// numberParts. The slice fields in numberParts are subslices of the input. +func parseNumberParts(input []byte) (numberParts, bool) { + var neg bool + var intp []byte + var frac []byte + var exp []byte + + s := input + if len(s) == 0 { + return numberParts{}, false + } + + // Optional - + if s[0] == '-' { + neg = true + s = s[1:] + if len(s) == 0 { + return numberParts{}, false + } + } + + // Digits + switch { + case s[0] == '0': + // Skip first 0 and no need to store. + s = s[1:] + + case '1' <= s[0] && s[0] <= '9': + intp = s + n := 1 + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + intp = intp[:n] + + default: + return numberParts{}, false + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + frac = s[1:] + n := 1 + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + frac = frac[:n] + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + exp = s + n := 0 + if s[0] == '+' || s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return numberParts{}, false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + exp = exp[:n] + } + + return numberParts{ + neg: neg, + intp: intp, + frac: bytes.TrimRight(frac, "0"), // Remove unnecessary 0s to the right. + exp: exp, + }, true +} + +// normalizeToIntString returns an integer string in normal form without the +// E-notation for given numberParts. It will return false if it is not an +// integer or if the exponent exceeds than max/min int value. +func normalizeToIntString(n numberParts) (string, bool) { + intpSize := len(n.intp) + fracSize := len(n.frac) + + if intpSize == 0 && fracSize == 0 { + return "0", true + } + + var exp int + if len(n.exp) > 0 { + i, err := strconv.ParseInt(string(n.exp), 10, 32) + if err != nil { + return "", false + } + exp = int(i) + } + + var num []byte + if exp >= 0 { + // For positive E, shift fraction digits into integer part and also pad + // with zeroes as needed. + + // If there are more digits in fraction than the E value, then the + // number is not an integer. + if fracSize > exp { + return "", false + } + + // Make sure resulting digits are within max value limit to avoid + // unnecessarily constructing a large byte slice that may simply fail + // later on. + const maxDigits = 20 // Max uint64 value has 20 decimal digits. + if intpSize+exp > maxDigits { + return "", false + } + + // Set cap to make a copy of integer part when appended. + num = n.intp[:len(n.intp):len(n.intp)] + num = append(num, n.frac...) + for i := 0; i < exp-fracSize; i++ { + num = append(num, '0') + } + } else { + // For negative E, shift digits in integer part out. + + // If there are fractions, then the number is not an integer. + if fracSize > 0 { + return "", false + } + + // index is where the decimal point will be after adjusting for negative + // exponent. + index := intpSize + exp + if index < 0 { + return "", false + } + + num = n.intp + // If any of the digits being shifted to the right of the decimal point + // is non-zero, then the number is not an integer. + for i := index; i < intpSize; i++ { + if num[i] != '0' { + return "", false + } + } + num = num[:index] + } + + if n.neg { + return "-" + string(num), true + } + return string(num), true +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go new file mode 100644 index 000000000..f7fea7d8d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go @@ -0,0 +1,91 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" + + "google.golang.org/protobuf/internal/strs" +) + +func (d *Decoder) parseString(in []byte) (string, int, error) { + in0 := in + if len(in) == 0 { + return "", 0, ErrUnexpectedEOF + } + if in[0] != '"' { + return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q at start of string", in[0]) + } + in = in[1:] + i := indexNeedEscapeInBytes(in) + in, out := in[i:], in[:i:i] // set cap to prevent mutations + for len(in) > 0 { + switch r, n := utf8.DecodeRune(in); { + case r == utf8.RuneError && n == 1: + return "", 0, d.newSyntaxError(d.currPos(), "invalid UTF-8 in string") + case r < ' ': + return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q in string", r) + case r == '"': + in = in[1:] + n := len(in0) - len(in) + return string(out), n, nil + case r == '\\': + if len(in) < 2 { + return "", 0, ErrUnexpectedEOF + } + switch r := in[1]; r { + case '"', '\\', '/': + in, out = in[2:], append(out, r) + case 'b': + in, out = in[2:], append(out, '\b') + case 'f': + in, out = in[2:], append(out, '\f') + case 'n': + in, out = in[2:], append(out, '\n') + case 'r': + in, out = in[2:], append(out, '\r') + case 't': + in, out = in[2:], append(out, '\t') + case 'u': + if len(in) < 6 { + return "", 0, ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:6]), 16, 16) + if err != nil { + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6]) + } + in = in[6:] + + r := rune(v) + if utf16.IsSurrogate(r) { + if len(in) < 6 { + return "", 0, ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:6]), 16, 16) + r = utf16.DecodeRune(r, rune(v)) + if in[0] != '\\' || in[1] != 'u' || + r == unicode.ReplacementChar || err != nil { + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6]) + } + in = in[6:] + } + out = append(out, string(r)...) + default: + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:2]) + } + default: + i := indexNeedEscapeInBytes(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + return "", 0, ErrUnexpectedEOF +} + +// indexNeedEscapeInBytes returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) } diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go new file mode 100644 index 000000000..50578d659 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go @@ -0,0 +1,192 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "fmt" + "strconv" +) + +// Kind represents a token kind expressible in the JSON format. +type Kind uint16 + +const ( + Invalid Kind = (1 << iota) / 2 + EOF + Null + Bool + Number + String + Name + ObjectOpen + ObjectClose + ArrayOpen + ArrayClose + + // comma is only for parsing in between tokens and + // does not need to be exported. + comma +) + +func (k Kind) String() string { + switch k { + case EOF: + return "eof" + case Null: + return "null" + case Bool: + return "bool" + case Number: + return "number" + case String: + return "string" + case ObjectOpen: + return "{" + case ObjectClose: + return "}" + case Name: + return "name" + case ArrayOpen: + return "[" + case ArrayClose: + return "]" + case comma: + return "," + } + return "" +} + +// Token provides a parsed token kind and value. +// +// Values are provided by the difference accessor methods. The accessor methods +// Name, Bool, and ParsedString will panic if called on the wrong kind. There +// are different accessor methods for the Number kind for converting to the +// appropriate Go numeric type and those methods have the ok return value. +type Token struct { + // Token kind. + kind Kind + // pos provides the position of the token in the original input. + pos int + // raw bytes of the serialized token. + // This is a subslice into the original input. + raw []byte + // boo is parsed boolean value. + boo bool + // str is parsed string value. + str string +} + +// Kind returns the token kind. +func (t Token) Kind() Kind { + return t.kind +} + +// RawString returns the read value in string. +func (t Token) RawString() string { + return string(t.raw) +} + +// Pos returns the token position from the input. +func (t Token) Pos() int { + return t.pos +} + +// Name returns the object name if token is Name, else it panics. +func (t Token) Name() string { + if t.kind == Name { + return t.str + } + panic(fmt.Sprintf("Token is not a Name: %v", t.RawString())) +} + +// Bool returns the bool value if token kind is Bool, else it panics. +func (t Token) Bool() bool { + if t.kind == Bool { + return t.boo + } + panic(fmt.Sprintf("Token is not a Bool: %v", t.RawString())) +} + +// ParsedString returns the string value for a JSON string token or the read +// value in string if token is not a string. +func (t Token) ParsedString() string { + if t.kind == String { + return t.str + } + panic(fmt.Sprintf("Token is not a String: %v", t.RawString())) +} + +// Float returns the floating-point number if token kind is Number. +// +// The floating-point precision is specified by the bitSize parameter: 32 for +// float32 or 64 for float64. If bitSize=32, the result still has type float64, +// but it will be convertible to float32 without changing its value. It will +// return false if the number exceeds the floating point limits for given +// bitSize. +func (t Token) Float(bitSize int) (float64, bool) { + if t.kind != Number { + return 0, false + } + f, err := strconv.ParseFloat(t.RawString(), bitSize) + if err != nil { + return 0, false + } + return f, true +} + +// Int returns the signed integer number if token is Number. +// +// The given bitSize specifies the integer type that the result must fit into. +// It returns false if the number is not an integer value or if the result +// exceeds the limits for given bitSize. +func (t Token) Int(bitSize int) (int64, bool) { + s, ok := t.getIntStr() + if !ok { + return 0, false + } + n, err := strconv.ParseInt(s, 10, bitSize) + if err != nil { + return 0, false + } + return n, true +} + +// Uint returns the signed integer number if token is Number. +// +// The given bitSize specifies the unsigned integer type that the result must +// fit into. It returns false if the number is not an unsigned integer value +// or if the result exceeds the limits for given bitSize. +func (t Token) Uint(bitSize int) (uint64, bool) { + s, ok := t.getIntStr() + if !ok { + return 0, false + } + n, err := strconv.ParseUint(s, 10, bitSize) + if err != nil { + return 0, false + } + return n, true +} + +func (t Token) getIntStr() (string, bool) { + if t.kind != Number { + return "", false + } + parts, ok := parseNumberParts(t.raw) + if !ok { + return "", false + } + return normalizeToIntString(parts) +} + +// TokenEquals returns true if given Tokens are equal, else false. +func TokenEquals(x, y Token) bool { + return x.kind == y.kind && + x.pos == y.pos && + bytes.Equal(x.raw, y.raw) && + x.boo == y.boo && + x.str == y.str +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go new file mode 100644 index 000000000..934f2dcb3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go @@ -0,0 +1,278 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "math" + "math/bits" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/internal/detrand" + "google.golang.org/protobuf/internal/errors" +) + +// kind represents an encoding type. +type kind uint8 + +const ( + _ kind = (1 << iota) / 2 + name + scalar + objectOpen + objectClose + arrayOpen + arrayClose +) + +// Encoder provides methods to write out JSON constructs and values. The user is +// responsible for producing valid sequences of JSON constructs and values. +type Encoder struct { + indent string + lastKind kind + indents []byte + out []byte +} + +// NewEncoder returns an Encoder. +// +// If indent is a non-empty string, it causes every entry for an Array or Object +// to be preceded by the indent and trailed by a newline. +func NewEncoder(buf []byte, indent string) (*Encoder, error) { + e := &Encoder{ + out: buf, + } + if len(indent) > 0 { + if strings.Trim(indent, " \t") != "" { + return nil, errors.New("indent may only be composed of space or tab characters") + } + e.indent = indent + } + return e, nil +} + +// Bytes returns the content of the written bytes. +func (e *Encoder) Bytes() []byte { + return e.out +} + +// WriteNull writes out the null value. +func (e *Encoder) WriteNull() { + e.prepareNext(scalar) + e.out = append(e.out, "null"...) +} + +// WriteBool writes out the given boolean value. +func (e *Encoder) WriteBool(b bool) { + e.prepareNext(scalar) + if b { + e.out = append(e.out, "true"...) + } else { + e.out = append(e.out, "false"...) + } +} + +// WriteString writes out the given string in JSON string value. Returns error +// if input string contains invalid UTF-8. +func (e *Encoder) WriteString(s string) error { + e.prepareNext(scalar) + var err error + if e.out, err = appendString(e.out, s); err != nil { + return err + } + return nil +} + +// Sentinel error used for indicating invalid UTF-8. +var errInvalidUTF8 = errors.New("invalid UTF-8") + +func appendString(out []byte, in string) ([]byte, error) { + out = append(out, '"') + i := indexNeedEscapeInString(in) + in, out = in[i:], append(out, in[:i]...) + for len(in) > 0 { + switch r, n := utf8.DecodeRuneInString(in); { + case r == utf8.RuneError && n == 1: + return out, errInvalidUTF8 + case r < ' ' || r == '"' || r == '\\': + out = append(out, '\\') + switch r { + case '"', '\\': + out = append(out, byte(r)) + case '\b': + out = append(out, 'b') + case '\f': + out = append(out, 'f') + case '\n': + out = append(out, 'n') + case '\r': + out = append(out, 'r') + case '\t': + out = append(out, 't') + default: + out = append(out, 'u') + out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...) + out = strconv.AppendUint(out, uint64(r), 16) + } + in = in[n:] + default: + i := indexNeedEscapeInString(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + out = append(out, '"') + return out, nil +} + +// indexNeedEscapeInString returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInString(s string) int { + for i, r := range s { + if r < ' ' || r == '\\' || r == '"' || r == utf8.RuneError { + return i + } + } + return len(s) +} + +// WriteFloat writes out the given float and bitSize in JSON number value. +func (e *Encoder) WriteFloat(n float64, bitSize int) { + e.prepareNext(scalar) + e.out = appendFloat(e.out, n, bitSize) +} + +// appendFloat formats given float in bitSize, and appends to the given []byte. +func appendFloat(out []byte, n float64, bitSize int) []byte { + switch { + case math.IsNaN(n): + return append(out, `"NaN"`...) + case math.IsInf(n, +1): + return append(out, `"Infinity"`...) + case math.IsInf(n, -1): + return append(out, `"-Infinity"`...) + } + + // JSON number formatting logic based on encoding/json. + // See floatEncoder.encode for reference. + fmt := byte('f') + if abs := math.Abs(n); abs != 0 { + if bitSize == 64 && (abs < 1e-6 || abs >= 1e21) || + bitSize == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) { + fmt = 'e' + } + } + out = strconv.AppendFloat(out, n, fmt, -1, bitSize) + if fmt == 'e' { + n := len(out) + if n >= 4 && out[n-4] == 'e' && out[n-3] == '-' && out[n-2] == '0' { + out[n-2] = out[n-1] + out = out[:n-1] + } + } + return out +} + +// WriteInt writes out the given signed integer in JSON number value. +func (e *Encoder) WriteInt(n int64) { + e.prepareNext(scalar) + e.out = strconv.AppendInt(e.out, n, 10) +} + +// WriteUint writes out the given unsigned integer in JSON number value. +func (e *Encoder) WriteUint(n uint64) { + e.prepareNext(scalar) + e.out = strconv.AppendUint(e.out, n, 10) +} + +// StartObject writes out the '{' symbol. +func (e *Encoder) StartObject() { + e.prepareNext(objectOpen) + e.out = append(e.out, '{') +} + +// EndObject writes out the '}' symbol. +func (e *Encoder) EndObject() { + e.prepareNext(objectClose) + e.out = append(e.out, '}') +} + +// WriteName writes out the given string in JSON string value and the name +// separator ':'. Returns error if input string contains invalid UTF-8, which +// should not be likely as protobuf field names should be valid. +func (e *Encoder) WriteName(s string) error { + e.prepareNext(name) + var err error + // Append to output regardless of error. + e.out, err = appendString(e.out, s) + e.out = append(e.out, ':') + return err +} + +// StartArray writes out the '[' symbol. +func (e *Encoder) StartArray() { + e.prepareNext(arrayOpen) + e.out = append(e.out, '[') +} + +// EndArray writes out the ']' symbol. +func (e *Encoder) EndArray() { + e.prepareNext(arrayClose) + e.out = append(e.out, ']') +} + +// prepareNext adds possible comma and indentation for the next value based +// on last type and indent option. It also updates lastKind to next. +func (e *Encoder) prepareNext(next kind) { + defer func() { + // Set lastKind to next. + e.lastKind = next + }() + + if len(e.indent) == 0 { + // Need to add comma on the following condition. + if e.lastKind&(scalar|objectClose|arrayClose) != 0 && + next&(name|scalar|objectOpen|arrayOpen) != 0 { + e.out = append(e.out, ',') + // For single-line output, add a random extra space after each + // comma to make output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + } + return + } + + switch { + case e.lastKind&(objectOpen|arrayOpen) != 0: + // If next type is NOT closing, add indent and newline. + if next&(objectClose|arrayClose) == 0 { + e.indents = append(e.indents, e.indent...) + e.out = append(e.out, '\n') + e.out = append(e.out, e.indents...) + } + + case e.lastKind&(scalar|objectClose|arrayClose) != 0: + switch { + // If next type is either a value or name, add comma and newline. + case next&(name|scalar|objectOpen|arrayOpen) != 0: + e.out = append(e.out, ',', '\n') + + // If next type is a closing object or array, adjust indentation. + case next&(objectClose|arrayClose) != 0: + e.indents = e.indents[:len(e.indents)-len(e.indent)] + e.out = append(e.out, '\n') + } + e.out = append(e.out, e.indents...) + + case e.lastKind&name != 0: + e.out = append(e.out, ' ') + // For multi-line output, add a random extra space after key: to make + // output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + } +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go new file mode 100644 index 000000000..a6693f0a2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go @@ -0,0 +1,242 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package messageset encodes and decodes the obsolete MessageSet wire format. +package messageset + +import ( + "math" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// The MessageSet wire format is equivalent to a message defined as follows, +// where each Item defines an extension field with a field number of 'type_id' +// and content of 'message'. MessageSet extensions must be non-repeated message +// fields. +// +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// } +// } +const ( + FieldItem = protowire.Number(1) + FieldTypeID = protowire.Number(2) + FieldMessage = protowire.Number(3) +) + +// ExtensionName is the field name for extensions of MessageSet. +// +// A valid MessageSet extension must be of the form: +// +// message MyMessage { +// extend proto2.bridge.MessageSet { +// optional MyMessage message_set_extension = 1234; +// } +// ... +// } +const ExtensionName = "message_set_extension" + +// IsMessageSet returns whether the message uses the MessageSet wire format. +func IsMessageSet(md protoreflect.MessageDescriptor) bool { + xmd, ok := md.(interface{ IsMessageSet() bool }) + return ok && xmd.IsMessageSet() +} + +// IsMessageSetExtension reports this field properly extends a MessageSet. +func IsMessageSetExtension(fd protoreflect.FieldDescriptor) bool { + switch { + case fd.Name() != ExtensionName: + return false + case !IsMessageSet(fd.ContainingMessage()): + return false + case fd.FullName().Parent() != fd.Message().FullName(): + return false + } + return true +} + +// SizeField returns the size of a MessageSet item field containing an extension +// with the given field number, not counting the contents of the message subfield. +func SizeField(num protowire.Number) int { + return 2*protowire.SizeTag(FieldItem) + protowire.SizeTag(FieldTypeID) + protowire.SizeVarint(uint64(num)) +} + +// Unmarshal parses a MessageSet. +// +// It calls fn with the type ID and value of each item in the MessageSet. +// Unknown fields are discarded. +// +// If wantLen is true, the item values include the varint length prefix. +// This is ugly, but simplifies the fast-path decoder in internal/impl. +func Unmarshal(b []byte, wantLen bool, fn func(typeID protowire.Number, value []byte) error) error { + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return protowire.ParseError(n) + } + b = b[n:] + if num != FieldItem || wtyp != protowire.StartGroupType { + n := protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return protowire.ParseError(n) + } + b = b[n:] + continue + } + typeID, value, n, err := ConsumeFieldValue(b, wantLen) + if err != nil { + return err + } + b = b[n:] + if typeID == 0 { + continue + } + if err := fn(typeID, value); err != nil { + return err + } + } + return nil +} + +// ConsumeFieldValue parses b as a MessageSet item field value until and including +// the trailing end group marker. It assumes the start group tag has already been parsed. +// It returns the contents of the type_id and message subfields and the total +// item length. +// +// If wantLen is true, the returned message value includes the length prefix. +func ConsumeFieldValue(b []byte, wantLen bool) (typeid protowire.Number, message []byte, n int, err error) { + ilen := len(b) + for { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return 0, nil, 0, protowire.ParseError(n) + } + b = b[n:] + switch { + case num == FieldItem && wtyp == protowire.EndGroupType: + if wantLen && len(message) == 0 { + // The message field was missing, which should never happen. + // Be prepared for this case anyway. + message = protowire.AppendVarint(message, 0) + } + return typeid, message, ilen - len(b), nil + case num == FieldTypeID && wtyp == protowire.VarintType: + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, nil, 0, protowire.ParseError(n) + } + b = b[n:] + if v < 1 || v > math.MaxInt32 { + return 0, nil, 0, errors.New("invalid type_id in message set") + } + typeid = protowire.Number(v) + case num == FieldMessage && wtyp == protowire.BytesType: + m, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, nil, 0, protowire.ParseError(n) + } + if message == nil { + if wantLen { + message = b[:n:n] + } else { + message = m[:len(m):len(m)] + } + } else { + // This case should never happen in practice, but handle it for + // correctness: The MessageSet item contains multiple message + // fields, which need to be merged. + // + // In the case where we're returning the length, this becomes + // quite inefficient since we need to strip the length off + // the existing data and reconstruct it with the combined length. + if wantLen { + _, nn := protowire.ConsumeVarint(message) + m0 := message[nn:] + message = nil + message = protowire.AppendVarint(message, uint64(len(m0)+len(m))) + message = append(message, m0...) + message = append(message, m...) + } else { + message = append(message, m...) + } + } + b = b[n:] + default: + // We have no place to put it, so we just ignore unknown fields. + n := protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return 0, nil, 0, protowire.ParseError(n) + } + b = b[n:] + } + } +} + +// AppendFieldStart appends the start of a MessageSet item field containing +// an extension with the given number. The caller must add the message +// subfield (including the tag). +func AppendFieldStart(b []byte, num protowire.Number) []byte { + b = protowire.AppendTag(b, FieldItem, protowire.StartGroupType) + b = protowire.AppendTag(b, FieldTypeID, protowire.VarintType) + b = protowire.AppendVarint(b, uint64(num)) + return b +} + +// AppendFieldEnd appends the trailing end group marker for a MessageSet item field. +func AppendFieldEnd(b []byte) []byte { + return protowire.AppendTag(b, FieldItem, protowire.EndGroupType) +} + +// SizeUnknown returns the size of an unknown fields section in MessageSet format. +// +// See AppendUnknown. +func SizeUnknown(unknown []byte) (size int) { + for len(unknown) > 0 { + num, typ, n := protowire.ConsumeTag(unknown) + if n < 0 || typ != protowire.BytesType { + return 0 + } + unknown = unknown[n:] + _, n = protowire.ConsumeBytes(unknown) + if n < 0 { + return 0 + } + unknown = unknown[n:] + size += SizeField(num) + protowire.SizeTag(FieldMessage) + n + } + return size +} + +// AppendUnknown appends unknown fields to b in MessageSet format. +// +// For historic reasons, unresolved items in a MessageSet are stored in a +// message's unknown fields section in non-MessageSet format. That is, an +// unknown item with typeID T and value V appears in the unknown fields as +// a field with number T and value V. +// +// This function converts the unknown fields back into MessageSet form. +func AppendUnknown(b, unknown []byte) ([]byte, error) { + for len(unknown) > 0 { + num, typ, n := protowire.ConsumeTag(unknown) + if n < 0 || typ != protowire.BytesType { + return nil, errors.New("invalid data in message set unknown fields") + } + unknown = unknown[n:] + _, n = protowire.ConsumeBytes(unknown) + if n < 0 { + return nil, errors.New("invalid data in message set unknown fields") + } + b = AppendFieldStart(b, num) + b = protowire.AppendTag(b, FieldMessage, protowire.BytesType) + b = append(b, unknown[:n]...) + b = AppendFieldEnd(b) + unknown = unknown[n:] + } + return b, nil +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go new file mode 100644 index 000000000..7e87c7604 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -0,0 +1,207 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tag marshals and unmarshals the legacy struct tags as generated +// by historical versions of protoc-gen-go. +package tag + +import ( + "reflect" + "strconv" + "strings" + + "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" +) + +var byteType = reflect.TypeOf(byte(0)) + +// Unmarshal decodes the tag into a prototype.Field. +// +// The goType is needed to determine the original protoreflect.Kind since the +// tag does not record sufficient information to determine that. +// The type is the underlying field type (e.g., a repeated field may be +// represented by []T, but the Go type passed in is just T). +// A list of enum value descriptors must be provided for enum fields. +// This does not populate the Enum or Message (except for weak message). +// +// This function is a best effort attempt; parsing errors are ignored. +func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor { + f := new(filedesc.Field) + f.L0.ParentFile = filedesc.SurrogateProto2 + f.L1.EditionFeatures = f.L0.ParentFile.L1.EditionFeatures + for len(tag) > 0 { + i := strings.IndexByte(tag, ',') + if i < 0 { + i = len(tag) + } + switch s := tag[:i]; { + case strings.HasPrefix(s, "name="): + f.L0.FullName = protoreflect.FullName(s[len("name="):]) + case strings.Trim(s, "0123456789") == "": + n, _ := strconv.ParseUint(s, 10, 32) + f.L1.Number = protoreflect.FieldNumber(n) + case s == "opt": + f.L1.Cardinality = protoreflect.Optional + case s == "req": + f.L1.Cardinality = protoreflect.Required + case s == "rep": + f.L1.Cardinality = protoreflect.Repeated + case s == "varint": + switch goType.Kind() { + case reflect.Bool: + f.L1.Kind = protoreflect.BoolKind + case reflect.Int32: + f.L1.Kind = protoreflect.Int32Kind + case reflect.Int64: + f.L1.Kind = protoreflect.Int64Kind + case reflect.Uint32: + f.L1.Kind = protoreflect.Uint32Kind + case reflect.Uint64: + f.L1.Kind = protoreflect.Uint64Kind + } + case s == "zigzag32": + if goType.Kind() == reflect.Int32 { + f.L1.Kind = protoreflect.Sint32Kind + } + case s == "zigzag64": + if goType.Kind() == reflect.Int64 { + f.L1.Kind = protoreflect.Sint64Kind + } + case s == "fixed32": + switch goType.Kind() { + case reflect.Int32: + f.L1.Kind = protoreflect.Sfixed32Kind + case reflect.Uint32: + f.L1.Kind = protoreflect.Fixed32Kind + case reflect.Float32: + f.L1.Kind = protoreflect.FloatKind + } + case s == "fixed64": + switch goType.Kind() { + case reflect.Int64: + f.L1.Kind = protoreflect.Sfixed64Kind + case reflect.Uint64: + f.L1.Kind = protoreflect.Fixed64Kind + case reflect.Float64: + f.L1.Kind = protoreflect.DoubleKind + } + case s == "bytes": + switch { + case goType.Kind() == reflect.String: + f.L1.Kind = protoreflect.StringKind + case goType.Kind() == reflect.Slice && goType.Elem() == byteType: + f.L1.Kind = protoreflect.BytesKind + default: + f.L1.Kind = protoreflect.MessageKind + } + case s == "group": + f.L1.Kind = protoreflect.GroupKind + case strings.HasPrefix(s, "enum="): + f.L1.Kind = protoreflect.EnumKind + case strings.HasPrefix(s, "json="): + jsonName := s[len("json="):] + if jsonName != strs.JSONCamelCase(string(f.L0.FullName.Name())) { + f.L1.StringName.InitJSON(jsonName) + } + case s == "packed": + f.L1.EditionFeatures.IsPacked = true + case strings.HasPrefix(s, "weak="): + f.L1.IsWeak = true + f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):])) + case strings.HasPrefix(s, "def="): + // The default tag is special in that everything afterwards is the + // default regardless of the presence of commas. + s, i = tag[len("def="):], len(tag) + v, ev, _ := defval.Unmarshal(s, f.L1.Kind, evs, defval.GoTag) + f.L1.Default = filedesc.DefaultValue(v, ev) + case s == "proto3": + f.L0.ParentFile = filedesc.SurrogateProto3 + } + tag = strings.TrimPrefix(tag[i:], ",") + } + + // The generator uses the group message name instead of the field name. + // We obtain the real field name by lowercasing the group name. + if f.L1.Kind == protoreflect.GroupKind { + f.L0.FullName = protoreflect.FullName(strings.ToLower(string(f.L0.FullName))) + } + return f +} + +// Marshal encodes the protoreflect.FieldDescriptor as a tag. +// +// The enumName must be provided if the kind is an enum. +// Historically, the formulation of the enum "name" was the proto package +// dot-concatenated with the generated Go identifier for the enum type. +// Depending on the context on how Marshal is called, there are different ways +// through which that information is determined. As such it is the caller's +// responsibility to provide a function to obtain that information. +func Marshal(fd protoreflect.FieldDescriptor, enumName string) string { + var tag []string + switch fd.Kind() { + case protoreflect.BoolKind, protoreflect.EnumKind, protoreflect.Int32Kind, protoreflect.Uint32Kind, protoreflect.Int64Kind, protoreflect.Uint64Kind: + tag = append(tag, "varint") + case protoreflect.Sint32Kind: + tag = append(tag, "zigzag32") + case protoreflect.Sint64Kind: + tag = append(tag, "zigzag64") + case protoreflect.Sfixed32Kind, protoreflect.Fixed32Kind, protoreflect.FloatKind: + tag = append(tag, "fixed32") + case protoreflect.Sfixed64Kind, protoreflect.Fixed64Kind, protoreflect.DoubleKind: + tag = append(tag, "fixed64") + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind: + tag = append(tag, "bytes") + case protoreflect.GroupKind: + tag = append(tag, "group") + } + tag = append(tag, strconv.Itoa(int(fd.Number()))) + switch fd.Cardinality() { + case protoreflect.Optional: + tag = append(tag, "opt") + case protoreflect.Required: + tag = append(tag, "req") + case protoreflect.Repeated: + tag = append(tag, "rep") + } + if fd.IsPacked() { + tag = append(tag, "packed") + } + name := string(fd.Name()) + if fd.Kind() == protoreflect.GroupKind { + // The name of the FieldDescriptor for a group field is + // lowercased. To find the original capitalization, we + // look in the field's MessageType. + name = string(fd.Message().Name()) + } + tag = append(tag, "name="+name) + if jsonName := fd.JSONName(); jsonName != "" && jsonName != name && !fd.IsExtension() { + // NOTE: The jsonName != name condition is suspect, but it preserve + // the exact same semantics from the previous generator. + tag = append(tag, "json="+jsonName) + } + if fd.IsWeak() { + tag = append(tag, "weak="+string(fd.Message().FullName())) + } + // The previous implementation does not tag extension fields as proto3, + // even when the field is defined in a proto3 file. Match that behavior + // for consistency. + if fd.Syntax() == protoreflect.Proto3 && !fd.IsExtension() { + tag = append(tag, "proto3") + } + if fd.Kind() == protoreflect.EnumKind && enumName != "" { + tag = append(tag, "enum="+enumName) + } + if fd.ContainingOneof() != nil { + tag = append(tag, "oneof") + } + // This must appear last in the tag, since commas in strings aren't escaped. + if fd.HasDefault() { + def, _ := defval.Marshal(fd.Default(), fd.DefaultEnumValue(), fd.Kind(), defval.GoTag) + tag = append(tag, "def="+def) + } + return strings.Join(tag, ",") +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go new file mode 100644 index 000000000..099b2bf45 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -0,0 +1,686 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package text + +import ( + "bytes" + "fmt" + "io" + "strconv" + "unicode/utf8" + + "google.golang.org/protobuf/internal/errors" +) + +// Decoder is a token-based textproto decoder. +type Decoder struct { + // lastCall is last method called, either readCall or peekCall. + // Initial value is readCall. + lastCall call + + // lastToken contains the last read token. + lastToken Token + + // lastErr contains the last read error. + lastErr error + + // openStack is a stack containing the byte characters for MessageOpen and + // ListOpen kinds. The top of stack represents the message or the list that + // the current token is nested in. An empty stack means the current token is + // at the top level message. The characters '{' and '<' both represent the + // MessageOpen kind. + openStack []byte + + // orig is used in reporting line and column. + orig []byte + // in contains the unconsumed input. + in []byte +} + +// NewDecoder returns a Decoder to read the given []byte. +func NewDecoder(b []byte) *Decoder { + return &Decoder{orig: b, in: b} +} + +// ErrUnexpectedEOF means that EOF was encountered in the middle of the input. +var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF) + +// call specifies which Decoder method was invoked. +type call uint8 + +const ( + readCall call = iota + peekCall +) + +// Peek looks ahead and returns the next token and error without advancing a read. +func (d *Decoder) Peek() (Token, error) { + defer func() { d.lastCall = peekCall }() + if d.lastCall == readCall { + d.lastToken, d.lastErr = d.Read() + } + return d.lastToken, d.lastErr +} + +// Read returns the next token. +// It will return an error if there is no valid token. +func (d *Decoder) Read() (Token, error) { + defer func() { d.lastCall = readCall }() + if d.lastCall == peekCall { + return d.lastToken, d.lastErr + } + + tok, err := d.parseNext(d.lastToken.kind) + if err != nil { + return Token{}, err + } + + switch tok.kind { + case comma, semicolon: + tok, err = d.parseNext(tok.kind) + if err != nil { + return Token{}, err + } + } + d.lastToken = tok + return tok, nil +} + +const ( + mismatchedFmt = "mismatched close character %q" + unexpectedFmt = "unexpected character %q" +) + +// parseNext parses the next Token based on given last kind. +func (d *Decoder) parseNext(lastKind Kind) (Token, error) { + // Trim leading spaces. + d.consume(0) + isEOF := false + if len(d.in) == 0 { + isEOF = true + } + + switch lastKind { + case EOF: + return d.consumeToken(EOF, 0, 0), nil + + case bof: + // Start of top level message. Next token can be EOF or Name. + if isEOF { + return d.consumeToken(EOF, 0, 0), nil + } + return d.parseFieldName() + + case Name: + // Next token can be MessageOpen, ListOpen or Scalar. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case '{', '<': + d.pushOpenStack(ch) + return d.consumeToken(MessageOpen, 1, 0), nil + case '[': + d.pushOpenStack(ch) + return d.consumeToken(ListOpen, 1, 0), nil + default: + return d.parseScalar() + } + + case Scalar: + openKind, closeCh := d.currentOpenKind() + switch openKind { + case bof: + // Top level message. + // Next token can be EOF, comma, semicolon or Name. + if isEOF { + return d.consumeToken(EOF, 0, 0), nil + } + switch d.in[0] { + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + case MessageOpen: + // Next token can be MessageClose, comma, semicolon or Name. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(MessageClose, 1, 0), nil + case otherCloseChar[closeCh]: + return Token{}, d.newSyntaxError(mismatchedFmt, ch) + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + case ListOpen: + // Next token can be ListClose or comma. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case ']': + d.popOpenStack() + return d.consumeToken(ListClose, 1, 0), nil + case ',': + return d.consumeToken(comma, 1, 0), nil + default: + return Token{}, d.newSyntaxError(unexpectedFmt, ch) + } + } + + case MessageOpen: + // Next token can be MessageClose or Name. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + _, closeCh := d.currentOpenKind() + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(MessageClose, 1, 0), nil + case otherCloseChar[closeCh]: + return Token{}, d.newSyntaxError(mismatchedFmt, ch) + default: + return d.parseFieldName() + } + + case MessageClose: + openKind, closeCh := d.currentOpenKind() + switch openKind { + case bof: + // Top level message. + // Next token can be EOF, comma, semicolon or Name. + if isEOF { + return d.consumeToken(EOF, 0, 0), nil + } + switch ch := d.in[0]; ch { + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + case MessageOpen: + // Next token can be MessageClose, comma, semicolon or Name. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(MessageClose, 1, 0), nil + case otherCloseChar[closeCh]: + return Token{}, d.newSyntaxError(mismatchedFmt, ch) + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + case ListOpen: + // Next token can be ListClose or comma + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(ListClose, 1, 0), nil + case ',': + return d.consumeToken(comma, 1, 0), nil + default: + return Token{}, d.newSyntaxError(unexpectedFmt, ch) + } + } + + case ListOpen: + // Next token can be ListClose, MessageStart or Scalar. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case ']': + d.popOpenStack() + return d.consumeToken(ListClose, 1, 0), nil + case '{', '<': + d.pushOpenStack(ch) + return d.consumeToken(MessageOpen, 1, 0), nil + default: + return d.parseScalar() + } + + case ListClose: + openKind, closeCh := d.currentOpenKind() + switch openKind { + case bof: + // Top level message. + // Next token can be EOF, comma, semicolon or Name. + if isEOF { + return d.consumeToken(EOF, 0, 0), nil + } + switch ch := d.in[0]; ch { + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + case MessageOpen: + // Next token can be MessageClose, comma, semicolon or Name. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(MessageClose, 1, 0), nil + case otherCloseChar[closeCh]: + return Token{}, d.newSyntaxError(mismatchedFmt, ch) + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + default: + // It is not possible to have this case. Let it panic below. + } + + case comma, semicolon: + openKind, closeCh := d.currentOpenKind() + switch openKind { + case bof: + // Top level message. Next token can be EOF or Name. + if isEOF { + return d.consumeToken(EOF, 0, 0), nil + } + return d.parseFieldName() + + case MessageOpen: + // Next token can be MessageClose or Name. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(MessageClose, 1, 0), nil + case otherCloseChar[closeCh]: + return Token{}, d.newSyntaxError(mismatchedFmt, ch) + default: + return d.parseFieldName() + } + + case ListOpen: + if lastKind == semicolon { + // It is not be possible to have this case as logic here + // should not have produced a semicolon Token when inside a + // list. Let it panic below. + break + } + // Next token can be MessageOpen or Scalar. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case '{', '<': + d.pushOpenStack(ch) + return d.consumeToken(MessageOpen, 1, 0), nil + default: + return d.parseScalar() + } + } + } + + line, column := d.Position(len(d.orig) - len(d.in)) + panic(fmt.Sprintf("Decoder.parseNext: bug at handling line %d:%d with lastKind=%v", line, column, lastKind)) +} + +var otherCloseChar = map[byte]byte{ + '}': '>', + '>': '}', +} + +// currentOpenKind indicates whether current position is inside a message, list +// or top-level message by returning MessageOpen, ListOpen or bof respectively. +// If the returned kind is either a MessageOpen or ListOpen, it also returns the +// corresponding closing character. +func (d *Decoder) currentOpenKind() (Kind, byte) { + if len(d.openStack) == 0 { + return bof, 0 + } + openCh := d.openStack[len(d.openStack)-1] + switch openCh { + case '{': + return MessageOpen, '}' + case '<': + return MessageOpen, '>' + case '[': + return ListOpen, ']' + } + panic(fmt.Sprintf("Decoder: openStack contains invalid byte %c", openCh)) +} + +func (d *Decoder) pushOpenStack(ch byte) { + d.openStack = append(d.openStack, ch) +} + +func (d *Decoder) popOpenStack() { + d.openStack = d.openStack[:len(d.openStack)-1] +} + +// parseFieldName parses field name and separator. +func (d *Decoder) parseFieldName() (tok Token, err error) { + defer func() { + if err == nil && d.tryConsumeChar(':') { + tok.attrs |= hasSeparator + } + }() + + // Extension or Any type URL. + if d.in[0] == '[' { + return d.parseTypeName() + } + + // Identifier. + if size := parseIdent(d.in, false); size > 0 { + return d.consumeToken(Name, size, uint8(IdentName)), nil + } + + // Field number. Identify if input is a valid number that is not negative + // and is decimal integer within 32-bit range. + if num := parseNumber(d.in); num.size > 0 { + str := num.string(d.in) + if !num.neg && num.kind == numDec { + if _, err := strconv.ParseInt(str, 10, 32); err == nil { + return d.consumeToken(Name, num.size, uint8(FieldNumber)), nil + } + } + return Token{}, d.newSyntaxError("invalid field number: %s", str) + } + + return Token{}, d.newSyntaxError("invalid field name: %s", errId(d.in)) +} + +// parseTypeName parses Any type URL or extension field name. The name is +// enclosed in [ and ] characters. The C++ parser does not handle many legal URL +// strings. This implementation is more liberal and allows for the pattern +// ^[-_a-zA-Z0-9]+([./][-_a-zA-Z0-9]+)*`). Whitespaces and comments are allowed +// in between [ ], '.', '/' and the sub names. +func (d *Decoder) parseTypeName() (Token, error) { + startPos := len(d.orig) - len(d.in) + // Use alias s to advance first in order to use d.in for error handling. + // Caller already checks for [ as first character. + s := consume(d.in[1:], 0) + if len(s) == 0 { + return Token{}, ErrUnexpectedEOF + } + + var name []byte + for len(s) > 0 && isTypeNameChar(s[0]) { + name = append(name, s[0]) + s = s[1:] + } + s = consume(s, 0) + + var closed bool + for len(s) > 0 && !closed { + switch { + case s[0] == ']': + s = s[1:] + closed = true + + case s[0] == '/', s[0] == '.': + if len(name) > 0 && (name[len(name)-1] == '/' || name[len(name)-1] == '.') { + return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s", + d.orig[startPos:len(d.orig)-len(s)+1]) + } + name = append(name, s[0]) + s = s[1:] + s = consume(s, 0) + for len(s) > 0 && isTypeNameChar(s[0]) { + name = append(name, s[0]) + s = s[1:] + } + s = consume(s, 0) + + default: + return Token{}, d.newSyntaxError( + "invalid type URL/extension field name: %s", d.orig[startPos:len(d.orig)-len(s)+1]) + } + } + + if !closed { + return Token{}, ErrUnexpectedEOF + } + + // First character cannot be '.'. Last character cannot be '.' or '/'. + size := len(name) + if size == 0 || name[0] == '.' || name[size-1] == '.' || name[size-1] == '/' { + return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s", + d.orig[startPos:len(d.orig)-len(s)]) + } + + d.in = s + endPos := len(d.orig) - len(d.in) + d.consume(0) + + return Token{ + kind: Name, + attrs: uint8(TypeName), + pos: startPos, + raw: d.orig[startPos:endPos], + str: string(name), + }, nil +} + +func isTypeNameChar(b byte) bool { + return (b == '-' || b == '_' || + ('0' <= b && b <= '9') || + ('a' <= b && b <= 'z') || + ('A' <= b && b <= 'Z')) +} + +func isWhiteSpace(b byte) bool { + switch b { + case ' ', '\n', '\r', '\t': + return true + default: + return false + } +} + +// parseIdent parses an unquoted proto identifier and returns size. +// If allowNeg is true, it allows '-' to be the first character in the +// identifier. This is used when parsing literal values like -infinity, etc. +// Regular expression matches an identifier: `^[_a-zA-Z][_a-zA-Z0-9]*` +func parseIdent(input []byte, allowNeg bool) int { + var size int + + s := input + if len(s) == 0 { + return 0 + } + + if allowNeg && s[0] == '-' { + s = s[1:] + size++ + if len(s) == 0 { + return 0 + } + } + + switch { + case s[0] == '_', + 'a' <= s[0] && s[0] <= 'z', + 'A' <= s[0] && s[0] <= 'Z': + s = s[1:] + size++ + default: + return 0 + } + + for len(s) > 0 && (s[0] == '_' || + 'a' <= s[0] && s[0] <= 'z' || + 'A' <= s[0] && s[0] <= 'Z' || + '0' <= s[0] && s[0] <= '9') { + s = s[1:] + size++ + } + + if len(s) > 0 && !isDelim(s[0]) { + return 0 + } + + return size +} + +// parseScalar parses for a string, literal or number value. +func (d *Decoder) parseScalar() (Token, error) { + if d.in[0] == '"' || d.in[0] == '\'' { + return d.parseStringValue() + } + + if tok, ok := d.parseLiteralValue(); ok { + return tok, nil + } + + if tok, ok := d.parseNumberValue(); ok { + return tok, nil + } + + return Token{}, d.newSyntaxError("invalid scalar value: %s", errId(d.in)) +} + +// parseLiteralValue parses a literal value. A literal value is used for +// bools, special floats and enums. This function simply identifies that the +// field value is a literal. +func (d *Decoder) parseLiteralValue() (Token, bool) { + size := parseIdent(d.in, true) + if size == 0 { + return Token{}, false + } + return d.consumeToken(Scalar, size, literalValue), true +} + +// consumeToken constructs a Token for given Kind from d.in and consumes given +// size-length from it. +func (d *Decoder) consumeToken(kind Kind, size int, attrs uint8) Token { + // Important to compute raw and pos before consuming. + tok := Token{ + kind: kind, + attrs: attrs, + pos: len(d.orig) - len(d.in), + raw: d.in[:size], + } + d.consume(size) + return tok +} + +// newSyntaxError returns a syntax error with line and column information for +// current position. +func (d *Decoder) newSyntaxError(f string, x ...any) error { + e := errors.New(f, x...) + line, column := d.Position(len(d.orig) - len(d.in)) + return errors.New("syntax error (line %d:%d): %v", line, column, e) +} + +// Position returns line and column number of given index of the original input. +// It will panic if index is out of range. +func (d *Decoder) Position(idx int) (line int, column int) { + b := d.orig[:idx] + line = bytes.Count(b, []byte("\n")) + 1 + if i := bytes.LastIndexByte(b, '\n'); i >= 0 { + b = b[i+1:] + } + column = utf8.RuneCount(b) + 1 // ignore multi-rune characters + return line, column +} + +func (d *Decoder) tryConsumeChar(c byte) bool { + if len(d.in) > 0 && d.in[0] == c { + d.consume(1) + return true + } + return false +} + +// consume consumes n bytes of input and any subsequent whitespace or comments. +func (d *Decoder) consume(n int) { + d.in = consume(d.in, n) + return +} + +// consume consumes n bytes of input and any subsequent whitespace or comments. +func consume(b []byte, n int) []byte { + b = b[n:] + for len(b) > 0 { + switch b[0] { + case ' ', '\n', '\r', '\t': + b = b[1:] + case '#': + if i := bytes.IndexByte(b, '\n'); i >= 0 { + b = b[i+len("\n"):] + } else { + b = nil + } + default: + return b + } + } + return b +} + +// errId extracts a byte sequence that looks like an invalid ID +// (for the purposes of error reporting). +func errId(seq []byte) []byte { + const maxLen = 32 + for i := 0; i < len(seq); { + if i > maxLen { + return append(seq[:i:i], "…"...) + } + r, size := utf8.DecodeRune(seq[i:]) + if r > utf8.RuneSelf || (r != '/' && isDelim(byte(r))) { + if i == 0 { + // Either the first byte is invalid UTF-8 or a + // delimiter, or the first rune is non-ASCII. + // Return it as-is. + i = size + } + return seq[:i:i] + } + i += size + } + // No delimiter found. + return seq +} + +// isDelim returns true if given byte is a delimiter character. +func isDelim(c byte) bool { + return !(c == '-' || c == '+' || c == '.' || c == '_' || + ('a' <= c && c <= 'z') || + ('A' <= c && c <= 'Z') || + ('0' <= c && c <= '9')) +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go new file mode 100644 index 000000000..45c81f029 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go @@ -0,0 +1,211 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package text + +// parseNumberValue parses a number from the input and returns a Token object. +func (d *Decoder) parseNumberValue() (Token, bool) { + in := d.in + num := parseNumber(in) + if num.size == 0 { + return Token{}, false + } + numAttrs := num.kind + if num.neg { + numAttrs |= isNegative + } + tok := Token{ + kind: Scalar, + attrs: numberValue, + pos: len(d.orig) - len(d.in), + raw: d.in[:num.size], + str: num.string(d.in), + numAttrs: numAttrs, + } + d.consume(num.size) + return tok, true +} + +const ( + numDec uint8 = (1 << iota) / 2 + numHex + numOct + numFloat +) + +// number is the result of parsing out a valid number from parseNumber. It +// contains data for doing float or integer conversion via the strconv package +// in conjunction with the input bytes. +type number struct { + kind uint8 + neg bool + size int + // if neg, this is the length of whitespace and comments between + // the minus sign and the rest fo the number literal + sep int +} + +func (num number) string(data []byte) string { + strSize := num.size + last := num.size - 1 + if num.kind == numFloat && (data[last] == 'f' || data[last] == 'F') { + strSize = last + } + if num.neg && num.sep > 0 { + // strip whitespace/comments between negative sign and the rest + strLen := strSize - num.sep + str := make([]byte, strLen) + str[0] = data[0] + copy(str[1:], data[num.sep+1:strSize]) + return string(str) + } + return string(data[:strSize]) + +} + +// parseNumber constructs a number object from given input. It allows for the +// following patterns: +// +// integer: ^-?([1-9][0-9]*|0[xX][0-9a-fA-F]+|0[0-7]*) +// float: ^-?((0|[1-9][0-9]*)?([.][0-9]*)?([eE][+-]?[0-9]+)?[fF]?) +// +// It also returns the number of parsed bytes for the given number, 0 if it is +// not a number. +func parseNumber(input []byte) number { + kind := numDec + var size int + var neg bool + + s := input + if len(s) == 0 { + return number{} + } + + // Optional - + var sep int + if s[0] == '-' { + neg = true + s = s[1:] + size++ + // Consume any whitespace or comments between the + // negative sign and the rest of the number + lenBefore := len(s) + s = consume(s, 0) + sep = lenBefore - len(s) + size += sep + if len(s) == 0 { + return number{} + } + } + + switch { + case s[0] == '0': + if len(s) > 1 { + switch { + case s[1] == 'x' || s[1] == 'X': + // Parse as hex number. + kind = numHex + n := 2 + s = s[2:] + for len(s) > 0 && (('0' <= s[0] && s[0] <= '9') || + ('a' <= s[0] && s[0] <= 'f') || + ('A' <= s[0] && s[0] <= 'F')) { + s = s[1:] + n++ + } + if n == 2 { + return number{} + } + size += n + + case '0' <= s[1] && s[1] <= '7': + // Parse as octal number. + kind = numOct + n := 2 + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '7' { + s = s[1:] + n++ + } + size += n + } + + if kind&(numHex|numOct) > 0 { + if len(s) > 0 && !isDelim(s[0]) { + return number{} + } + return number{kind: kind, neg: neg, size: size, sep: sep} + } + } + s = s[1:] + size++ + + case '1' <= s[0] && s[0] <= '9': + n := 1 + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + size += n + + case s[0] == '.': + // Set kind to numFloat to signify the intent to parse as float. And + // that it needs to have other digits after '.'. + kind = numFloat + + default: + return number{} + } + + // . followed by 0 or more digits. + if len(s) > 0 && s[0] == '.' { + n := 1 + s = s[1:] + // If decimal point was before any digits, it should be followed by + // other digits. + if len(s) == 0 && kind == numFloat { + return number{} + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + size += n + kind = numFloat + } + + // e or E followed by an optional - or + and 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + kind = numFloat + s = s[1:] + n := 1 + if s[0] == '+' || s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return number{} + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + size += n + } + + // Optional suffix f or F for floats. + if len(s) > 0 && (s[0] == 'f' || s[0] == 'F') { + kind = numFloat + s = s[1:] + size++ + } + + // Check that next byte is a delimiter or it is at the end. + if len(s) > 0 && !isDelim(s[0]) { + return number{} + } + + return number{kind: kind, neg: neg, size: size, sep: sep} +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go new file mode 100644 index 000000000..d4d349023 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go @@ -0,0 +1,161 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package text + +import ( + "bytes" + "strconv" + "strings" + "unicode" + "unicode/utf16" + "unicode/utf8" + + "google.golang.org/protobuf/internal/strs" +) + +// parseStringValue parses string field token. +// This differs from parseString since the text format allows +// multiple back-to-back string literals where they are semantically treated +// as a single large string with all values concatenated. +// +// E.g., `"foo" "bar" "baz"` => "foobarbaz" +func (d *Decoder) parseStringValue() (Token, error) { + // Note that the ending quote is sufficient to unambiguously mark the end + // of a string. Thus, the text grammar does not require intervening + // whitespace or control characters in-between strings. + // Thus, the following is valid: + // `"foo"'bar'"baz"` => "foobarbaz" + in0 := d.in + var ss []string + for len(d.in) > 0 && (d.in[0] == '"' || d.in[0] == '\'') { + s, err := d.parseString() + if err != nil { + return Token{}, err + } + ss = append(ss, s) + } + // d.in already points to the end of the value at this point. + return Token{ + kind: Scalar, + attrs: stringValue, + pos: len(d.orig) - len(in0), + raw: in0[:len(in0)-len(d.in)], + str: strings.Join(ss, ""), + }, nil +} + +// parseString parses a string value enclosed in " or '. +func (d *Decoder) parseString() (string, error) { + in := d.in + if len(in) == 0 { + return "", ErrUnexpectedEOF + } + quote := in[0] + in = in[1:] + i := indexNeedEscapeInBytes(in) + in, out := in[i:], in[:i:i] // set cap to prevent mutations + for len(in) > 0 { + switch r, n := utf8.DecodeRune(in); { + case r == utf8.RuneError && n == 1: + return "", d.newSyntaxError("invalid UTF-8 detected") + case r == 0 || r == '\n': + return "", d.newSyntaxError("invalid character %q in string", r) + case r == rune(quote): + in = in[1:] + d.consume(len(d.in) - len(in)) + return string(out), nil + case r == '\\': + if len(in) < 2 { + return "", ErrUnexpectedEOF + } + switch r := in[1]; r { + case '"', '\'', '\\', '?': + in, out = in[2:], append(out, r) + case 'a': + in, out = in[2:], append(out, '\a') + case 'b': + in, out = in[2:], append(out, '\b') + case 'n': + in, out = in[2:], append(out, '\n') + case 'r': + in, out = in[2:], append(out, '\r') + case 't': + in, out = in[2:], append(out, '\t') + case 'v': + in, out = in[2:], append(out, '\v') + case 'f': + in, out = in[2:], append(out, '\f') + case '0', '1', '2', '3', '4', '5', '6', '7': + // One, two, or three octal characters. + n := len(in[1:]) - len(bytes.TrimLeft(in[1:], "01234567")) + if n > 3 { + n = 3 + } + v, err := strconv.ParseUint(string(in[1:1+n]), 8, 8) + if err != nil { + return "", d.newSyntaxError("invalid octal escape code %q in string", in[:1+n]) + } + in, out = in[1+n:], append(out, byte(v)) + case 'x': + // One or two hexadecimal characters. + n := len(in[2:]) - len(bytes.TrimLeft(in[2:], "0123456789abcdefABCDEF")) + if n > 2 { + n = 2 + } + v, err := strconv.ParseUint(string(in[2:2+n]), 16, 8) + if err != nil { + return "", d.newSyntaxError("invalid hex escape code %q in string", in[:2+n]) + } + in, out = in[2+n:], append(out, byte(v)) + case 'u', 'U': + // Four or eight hexadecimal characters + n := 6 + if r == 'U' { + n = 10 + } + if len(in) < n { + return "", ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:n]), 16, 32) + if utf8.MaxRune < v || err != nil { + return "", d.newSyntaxError("invalid Unicode escape code %q in string", in[:n]) + } + in = in[n:] + + r := rune(v) + if utf16.IsSurrogate(r) { + if len(in) < 6 { + return "", ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:6]), 16, 16) + r = utf16.DecodeRune(r, rune(v)) + if in[0] != '\\' || in[1] != 'u' || r == unicode.ReplacementChar || err != nil { + return "", d.newSyntaxError("invalid Unicode escape code %q in string", in[:6]) + } + in = in[6:] + } + out = append(out, string(r)...) + default: + return "", d.newSyntaxError("invalid escape code %q in string", in[:2]) + } + default: + i := indexNeedEscapeInBytes(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + return "", ErrUnexpectedEOF +} + +// indexNeedEscapeInString returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) } + +// UnmarshalString returns an unescaped string given a textproto string value. +// String value needs to contain single or double quotes. This is only used by +// internal/encoding/defval package for unmarshaling bytes. +func UnmarshalString(s string) (string, error) { + d := NewDecoder([]byte(s)) + return d.parseString() +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go new file mode 100644 index 000000000..83d2b0d5a --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go @@ -0,0 +1,373 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package text + +import ( + "bytes" + "fmt" + "math" + "strconv" + "strings" + + "google.golang.org/protobuf/internal/flags" +) + +// Kind represents a token kind expressible in the textproto format. +type Kind uint8 + +// Kind values. +const ( + Invalid Kind = iota + EOF + Name // Name indicates the field name. + Scalar // Scalar are scalar values, e.g. "string", 47, ENUM_LITERAL, true. + MessageOpen + MessageClose + ListOpen + ListClose + + // comma and semi-colon are only for parsing in between values and should not be exposed. + comma + semicolon + + // bof indicates beginning of file, which is the default token + // kind at the beginning of parsing. + bof = Invalid +) + +func (t Kind) String() string { + switch t { + case Invalid: + return "" + case EOF: + return "eof" + case Scalar: + return "scalar" + case Name: + return "name" + case MessageOpen: + return "{" + case MessageClose: + return "}" + case ListOpen: + return "[" + case ListClose: + return "]" + case comma: + return "," + case semicolon: + return ";" + default: + return fmt.Sprintf("", uint8(t)) + } +} + +// NameKind represents different types of field names. +type NameKind uint8 + +// NameKind values. +const ( + IdentName NameKind = iota + 1 + TypeName + FieldNumber +) + +func (t NameKind) String() string { + switch t { + case IdentName: + return "IdentName" + case TypeName: + return "TypeName" + case FieldNumber: + return "FieldNumber" + default: + return fmt.Sprintf("", uint8(t)) + } +} + +// Bit mask in Token.attrs to indicate if a Name token is followed by the +// separator char ':'. The field name separator char is optional for message +// field or repeated message field, but required for all other types. Decoder +// simply indicates whether a Name token is followed by separator or not. It is +// up to the prototext package to validate. +const hasSeparator = 1 << 7 + +// Scalar value types. +const ( + numberValue = iota + 1 + stringValue + literalValue +) + +// Bit mask in Token.numAttrs to indicate that the number is a negative. +const isNegative = 1 << 7 + +// Token provides a parsed token kind and value. Values are provided by the +// different accessor methods. +type Token struct { + // Kind of the Token object. + kind Kind + // attrs contains metadata for the following Kinds: + // Name: hasSeparator bit and one of NameKind. + // Scalar: one of numberValue, stringValue, literalValue. + attrs uint8 + // numAttrs contains metadata for numberValue: + // - highest bit is whether negative or positive. + // - lower bits indicate one of numDec, numHex, numOct, numFloat. + numAttrs uint8 + // pos provides the position of the token in the original input. + pos int + // raw bytes of the serialized token. + // This is a subslice into the original input. + raw []byte + // str contains parsed string for the following: + // - stringValue of Scalar kind + // - numberValue of Scalar kind + // - TypeName of Name kind + str string +} + +// Kind returns the token kind. +func (t Token) Kind() Kind { + return t.kind +} + +// RawString returns the read value in string. +func (t Token) RawString() string { + return string(t.raw) +} + +// Pos returns the token position from the input. +func (t Token) Pos() int { + return t.pos +} + +// NameKind returns IdentName, TypeName or FieldNumber. +// It panics if type is not Name. +func (t Token) NameKind() NameKind { + if t.kind == Name { + return NameKind(t.attrs &^ hasSeparator) + } + panic(fmt.Sprintf("Token is not a Name type: %s", t.kind)) +} + +// HasSeparator returns true if the field name is followed by the separator char +// ':', else false. It panics if type is not Name. +func (t Token) HasSeparator() bool { + if t.kind == Name { + return t.attrs&hasSeparator != 0 + } + panic(fmt.Sprintf("Token is not a Name type: %s", t.kind)) +} + +// IdentName returns the value for IdentName type. +func (t Token) IdentName() string { + if t.kind == Name && t.attrs&uint8(IdentName) != 0 { + return string(t.raw) + } + panic(fmt.Sprintf("Token is not an IdentName: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator))) +} + +// TypeName returns the value for TypeName type. +func (t Token) TypeName() string { + if t.kind == Name && t.attrs&uint8(TypeName) != 0 { + return t.str + } + panic(fmt.Sprintf("Token is not a TypeName: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator))) +} + +// FieldNumber returns the value for FieldNumber type. It returns a +// non-negative int32 value. Caller will still need to validate for the correct +// field number range. +func (t Token) FieldNumber() int32 { + if t.kind != Name || t.attrs&uint8(FieldNumber) == 0 { + panic(fmt.Sprintf("Token is not a FieldNumber: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator))) + } + // Following should not return an error as it had already been called right + // before this Token was constructed. + num, _ := strconv.ParseInt(string(t.raw), 10, 32) + return int32(num) +} + +// String returns the string value for a Scalar type. +func (t Token) String() (string, bool) { + if t.kind != Scalar || t.attrs != stringValue { + return "", false + } + return t.str, true +} + +// Enum returns the literal value for a Scalar type for use as enum literals. +func (t Token) Enum() (string, bool) { + if t.kind != Scalar || t.attrs != literalValue || (len(t.raw) > 0 && t.raw[0] == '-') { + return "", false + } + return string(t.raw), true +} + +// Bool returns the bool value for a Scalar type. +func (t Token) Bool() (bool, bool) { + if t.kind != Scalar { + return false, false + } + switch t.attrs { + case literalValue: + if b, ok := boolLits[string(t.raw)]; ok { + return b, true + } + case numberValue: + // Unsigned integer representation of 0 or 1 is permitted: 00, 0x0, 01, + // 0x1, etc. + n, err := strconv.ParseUint(t.str, 0, 64) + if err == nil { + switch n { + case 0: + return false, true + case 1: + return true, true + } + } + } + return false, false +} + +// These exact boolean literals are the ones supported in C++. +var boolLits = map[string]bool{ + "t": true, + "true": true, + "True": true, + "f": false, + "false": false, + "False": false, +} + +// Uint64 returns the uint64 value for a Scalar type. +func (t Token) Uint64() (uint64, bool) { + if t.kind != Scalar || t.attrs != numberValue || + t.numAttrs&isNegative > 0 || t.numAttrs&numFloat > 0 { + return 0, false + } + n, err := strconv.ParseUint(t.str, 0, 64) + if err != nil { + return 0, false + } + return n, true +} + +// Uint32 returns the uint32 value for a Scalar type. +func (t Token) Uint32() (uint32, bool) { + if t.kind != Scalar || t.attrs != numberValue || + t.numAttrs&isNegative > 0 || t.numAttrs&numFloat > 0 { + return 0, false + } + n, err := strconv.ParseUint(t.str, 0, 32) + if err != nil { + return 0, false + } + return uint32(n), true +} + +// Int64 returns the int64 value for a Scalar type. +func (t Token) Int64() (int64, bool) { + if t.kind != Scalar || t.attrs != numberValue || t.numAttrs&numFloat > 0 { + return 0, false + } + if n, err := strconv.ParseInt(t.str, 0, 64); err == nil { + return n, true + } + // C++ accepts large positive hex numbers as negative values. + // This feature is here for proto1 backwards compatibility purposes. + if flags.ProtoLegacy && (t.numAttrs == numHex) { + if n, err := strconv.ParseUint(t.str, 0, 64); err == nil { + return int64(n), true + } + } + return 0, false +} + +// Int32 returns the int32 value for a Scalar type. +func (t Token) Int32() (int32, bool) { + if t.kind != Scalar || t.attrs != numberValue || t.numAttrs&numFloat > 0 { + return 0, false + } + if n, err := strconv.ParseInt(t.str, 0, 32); err == nil { + return int32(n), true + } + // C++ accepts large positive hex numbers as negative values. + // This feature is here for proto1 backwards compatibility purposes. + if flags.ProtoLegacy && (t.numAttrs == numHex) { + if n, err := strconv.ParseUint(t.str, 0, 32); err == nil { + return int32(n), true + } + } + return 0, false +} + +// Float64 returns the float64 value for a Scalar type. +func (t Token) Float64() (float64, bool) { + if t.kind != Scalar { + return 0, false + } + switch t.attrs { + case literalValue: + if f, ok := floatLits[strings.ToLower(string(t.raw))]; ok { + return f, true + } + case numberValue: + n, err := strconv.ParseFloat(t.str, 64) + if err == nil { + return n, true + } + nerr := err.(*strconv.NumError) + if nerr.Err == strconv.ErrRange { + return n, true + } + } + return 0, false +} + +// Float32 returns the float32 value for a Scalar type. +func (t Token) Float32() (float32, bool) { + if t.kind != Scalar { + return 0, false + } + switch t.attrs { + case literalValue: + if f, ok := floatLits[strings.ToLower(string(t.raw))]; ok { + return float32(f), true + } + case numberValue: + n, err := strconv.ParseFloat(t.str, 64) + if err == nil { + // Overflows are treated as (-)infinity. + return float32(n), true + } + nerr := err.(*strconv.NumError) + if nerr.Err == strconv.ErrRange { + return float32(n), true + } + } + return 0, false +} + +// These are the supported float literals which C++ permits case-insensitive +// variants of these. +var floatLits = map[string]float64{ + "nan": math.NaN(), + "inf": math.Inf(1), + "infinity": math.Inf(1), + "-inf": math.Inf(-1), + "-infinity": math.Inf(-1), +} + +// TokenEquals returns true if given Tokens are equal, else false. +func TokenEquals(x, y Token) bool { + return x.kind == y.kind && + x.attrs == y.attrs && + x.numAttrs == y.numAttrs && + x.pos == y.pos && + bytes.Equal(x.raw, y.raw) && + x.str == y.str +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go b/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go new file mode 100644 index 000000000..7ae6c2a3c --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package text implements the text format for protocol buffers. +// This package has no semantic understanding for protocol buffers and is only +// a parser and composer for the format. +// +// There is no formal specification for the protobuf text format, as such the +// C++ implementation (see google::protobuf::TextFormat) is the reference +// implementation of the text format. +// +// This package is neither a superset nor a subset of the C++ implementation. +// This implementation permits a more liberal grammar in some cases to be +// backwards compatible with the historical Go implementation. +// Future parsings unique to Go should not be added. +// Some grammars allowed by the C++ implementation are deliberately +// not implemented here because they are considered a bug by the protobuf team +// and should not be replicated. +// +// The Go implementation should implement a sufficient amount of the C++ +// grammar such that the default text serialization by C++ can be parsed by Go. +// However, just because the C++ parser accepts some input does not mean that +// the Go implementation should as well. +// +// The text format is almost a superset of JSON except: +// - message keys are not quoted strings, but identifiers +// - the top-level value must be a message without the delimiters +package text diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go new file mode 100644 index 000000000..cf7aed77b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go @@ -0,0 +1,272 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package text + +import ( + "math" + "math/bits" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/internal/detrand" + "google.golang.org/protobuf/internal/errors" +) + +// encType represents an encoding type. +type encType uint8 + +const ( + _ encType = (1 << iota) / 2 + name + scalar + messageOpen + messageClose +) + +// Encoder provides methods to write out textproto constructs and values. The user is +// responsible for producing valid sequences of constructs and values. +type Encoder struct { + encoderState + + indent string + delims [2]byte + outputASCII bool +} + +type encoderState struct { + lastType encType + indents []byte + out []byte +} + +// NewEncoder returns an Encoder. +// +// If indent is a non-empty string, it causes every entry in a List or Message +// to be preceded by the indent and trailed by a newline. +// +// If delims is not the zero value, it controls the delimiter characters used +// for messages (e.g., "{}" vs "<>"). +// +// If outputASCII is true, strings will be serialized in such a way that +// multi-byte UTF-8 sequences are escaped. This property ensures that the +// overall output is ASCII (as opposed to UTF-8). +func NewEncoder(buf []byte, indent string, delims [2]byte, outputASCII bool) (*Encoder, error) { + e := &Encoder{ + encoderState: encoderState{out: buf}, + } + if len(indent) > 0 { + if strings.Trim(indent, " \t") != "" { + return nil, errors.New("indent may only be composed of space and tab characters") + } + e.indent = indent + } + switch delims { + case [2]byte{0, 0}: + e.delims = [2]byte{'{', '}'} + case [2]byte{'{', '}'}, [2]byte{'<', '>'}: + e.delims = delims + default: + return nil, errors.New("delimiters may only be \"{}\" or \"<>\"") + } + e.outputASCII = outputASCII + + return e, nil +} + +// Bytes returns the content of the written bytes. +func (e *Encoder) Bytes() []byte { + return e.out +} + +// StartMessage writes out the '{' or '<' symbol. +func (e *Encoder) StartMessage() { + e.prepareNext(messageOpen) + e.out = append(e.out, e.delims[0]) +} + +// EndMessage writes out the '}' or '>' symbol. +func (e *Encoder) EndMessage() { + e.prepareNext(messageClose) + e.out = append(e.out, e.delims[1]) +} + +// WriteName writes out the field name and the separator ':'. +func (e *Encoder) WriteName(s string) { + e.prepareNext(name) + e.out = append(e.out, s...) + e.out = append(e.out, ':') +} + +// WriteBool writes out the given boolean value. +func (e *Encoder) WriteBool(b bool) { + if b { + e.WriteLiteral("true") + } else { + e.WriteLiteral("false") + } +} + +// WriteString writes out the given string value. +func (e *Encoder) WriteString(s string) { + e.prepareNext(scalar) + e.out = appendString(e.out, s, e.outputASCII) +} + +func appendString(out []byte, in string, outputASCII bool) []byte { + out = append(out, '"') + i := indexNeedEscapeInString(in) + in, out = in[i:], append(out, in[:i]...) + for len(in) > 0 { + switch r, n := utf8.DecodeRuneInString(in); { + case r == utf8.RuneError && n == 1: + // We do not report invalid UTF-8 because strings in the text format + // are used to represent both the proto string and bytes type. + r = rune(in[0]) + fallthrough + case r < ' ' || r == '"' || r == '\\' || r == 0x7f: + out = append(out, '\\') + switch r { + case '"', '\\': + out = append(out, byte(r)) + case '\n': + out = append(out, 'n') + case '\r': + out = append(out, 'r') + case '\t': + out = append(out, 't') + default: + out = append(out, 'x') + out = append(out, "00"[1+(bits.Len32(uint32(r))-1)/4:]...) + out = strconv.AppendUint(out, uint64(r), 16) + } + in = in[n:] + case r >= utf8.RuneSelf && (outputASCII || r <= 0x009f): + out = append(out, '\\') + if r <= math.MaxUint16 { + out = append(out, 'u') + out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...) + out = strconv.AppendUint(out, uint64(r), 16) + } else { + out = append(out, 'U') + out = append(out, "00000000"[1+(bits.Len32(uint32(r))-1)/4:]...) + out = strconv.AppendUint(out, uint64(r), 16) + } + in = in[n:] + default: + i := indexNeedEscapeInString(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + out = append(out, '"') + return out +} + +// indexNeedEscapeInString returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInString(s string) int { + for i := 0; i < len(s); i++ { + if c := s[i]; c < ' ' || c == '"' || c == '\'' || c == '\\' || c >= 0x7f { + return i + } + } + return len(s) +} + +// WriteFloat writes out the given float value for given bitSize. +func (e *Encoder) WriteFloat(n float64, bitSize int) { + e.prepareNext(scalar) + e.out = appendFloat(e.out, n, bitSize) +} + +func appendFloat(out []byte, n float64, bitSize int) []byte { + switch { + case math.IsNaN(n): + return append(out, "nan"...) + case math.IsInf(n, +1): + return append(out, "inf"...) + case math.IsInf(n, -1): + return append(out, "-inf"...) + default: + return strconv.AppendFloat(out, n, 'g', -1, bitSize) + } +} + +// WriteInt writes out the given signed integer value. +func (e *Encoder) WriteInt(n int64) { + e.prepareNext(scalar) + e.out = strconv.AppendInt(e.out, n, 10) +} + +// WriteUint writes out the given unsigned integer value. +func (e *Encoder) WriteUint(n uint64) { + e.prepareNext(scalar) + e.out = strconv.AppendUint(e.out, n, 10) +} + +// WriteLiteral writes out the given string as a literal value without quotes. +// This is used for writing enum literal strings. +func (e *Encoder) WriteLiteral(s string) { + e.prepareNext(scalar) + e.out = append(e.out, s...) +} + +// prepareNext adds possible space and indentation for the next value based +// on last encType and indent option. It also updates e.lastType to next. +func (e *Encoder) prepareNext(next encType) { + defer func() { + e.lastType = next + }() + + // Single line. + if len(e.indent) == 0 { + // Add space after each field before the next one. + if e.lastType&(scalar|messageClose) != 0 && next == name { + e.out = append(e.out, ' ') + // Add a random extra space to make output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + } + return + } + + // Multi-line. + switch { + case e.lastType == name: + e.out = append(e.out, ' ') + // Add a random extra space after name: to make output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + + case e.lastType == messageOpen && next != messageClose: + e.indents = append(e.indents, e.indent...) + e.out = append(e.out, '\n') + e.out = append(e.out, e.indents...) + + case e.lastType&(scalar|messageClose) != 0: + if next == messageClose { + e.indents = e.indents[:len(e.indents)-len(e.indent)] + } + e.out = append(e.out, '\n') + e.out = append(e.out, e.indents...) + } +} + +// Snapshot returns the current snapshot for use in Reset. +func (e *Encoder) Snapshot() encoderState { + return e.encoderState +} + +// Reset resets the Encoder to the given encoderState from a Snapshot. +func (e *Encoder) Reset(es encoderState) { + e.encoderState = es +} + +// AppendString appends the escaped form of the input string to b. +func AppendString(b []byte, s string) []byte { + return appendString(b, s, false) +} diff --git a/vendor/google.golang.org/protobuf/internal/errors/errors.go b/vendor/google.golang.org/protobuf/internal/errors/errors.go new file mode 100644 index 000000000..c2d6bd526 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/errors/errors.go @@ -0,0 +1,104 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errors implements functions to manipulate errors. +package errors + +import ( + "errors" + "fmt" + + "google.golang.org/protobuf/internal/detrand" +) + +// Error is a sentinel matching all errors produced by this package. +var Error = errors.New("protobuf error") + +// New formats a string according to the format specifier and arguments and +// returns an error that has a "proto" prefix. +func New(f string, x ...any) error { + return &prefixError{s: format(f, x...)} +} + +type prefixError struct{ s string } + +var prefix = func() string { + // Deliberately introduce instability into the error message string to + // discourage users from performing error string comparisons. + if detrand.Bool() { + return "proto: " // use non-breaking spaces (U+00a0) + } else { + return "proto: " // use regular spaces (U+0020) + } +}() + +func (e *prefixError) Error() string { + return prefix + e.s +} + +func (e *prefixError) Unwrap() error { + return Error +} + +// Wrap returns an error that has a "proto" prefix, the formatted string described +// by the format specifier and arguments, and a suffix of err. The error wraps err. +func Wrap(err error, f string, x ...any) error { + return &wrapError{ + s: format(f, x...), + err: err, + } +} + +type wrapError struct { + s string + err error +} + +func (e *wrapError) Error() string { + return format("%v%v: %v", prefix, e.s, e.err) +} + +func (e *wrapError) Unwrap() error { + return e.err +} + +func (e *wrapError) Is(target error) bool { + return target == Error +} + +func format(f string, x ...any) string { + // avoid "proto: " prefix when chaining + for i := 0; i < len(x); i++ { + switch e := x[i].(type) { + case *prefixError: + x[i] = e.s + case *wrapError: + x[i] = format("%v: %v", e.s, e.err) + } + } + return fmt.Sprintf(f, x...) +} + +func InvalidUTF8(name string) error { + return New("field %v contains invalid UTF-8", name) +} + +func RequiredNotSet(name string) error { + return New("required field %v not set", name) +} + +type SizeMismatchError struct { + Calculated, Measured int +} + +func (e *SizeMismatchError) Error() string { + return fmt.Sprintf("size mismatch (see https://github.com/golang/protobuf/issues/1609): calculated=%d, measured=%d", e.Calculated, e.Measured) +} + +func MismatchedSizeCalculation(calculated, measured int) error { + return &SizeMismatchError{ + Calculated: calculated, + Measured: measured, + } +} diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go new file mode 100644 index 000000000..fbcd34920 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go @@ -0,0 +1,40 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.13 +// +build !go1.13 + +package errors + +import "reflect" + +// Is is a copy of Go 1.13's errors.Is for use with older Go versions. +func Is(err, target error) bool { + if target == nil { + return err == target + } + + isComparable := reflect.TypeOf(target).Comparable() + for { + if isComparable && err == target { + return true + } + if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) { + return true + } + if err = unwrap(err); err == nil { + return false + } + } +} + +func unwrap(err error) error { + u, ok := err.(interface { + Unwrap() error + }) + if !ok { + return nil + } + return u.Unwrap() +} diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go new file mode 100644 index 000000000..5e72f1cde --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go @@ -0,0 +1,13 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.13 +// +build go1.13 + +package errors + +import "errors" + +// Is is errors.Is. +func Is(err, target error) bool { return errors.Is(err, target) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/build.go b/vendor/google.golang.org/protobuf/internal/filedesc/build.go new file mode 100644 index 000000000..7cac1c190 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/build.go @@ -0,0 +1,157 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package filedesc provides functionality for constructing descriptors. +// +// The types in this package implement interfaces in the protoreflect package +// related to protobuf descripriptors. +package filedesc + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// Builder construct a protoreflect.FileDescriptor from the raw descriptor. +type Builder struct { + // GoPackagePath is the Go package path that is invoking this builder. + GoPackagePath string + + // RawDescriptor is the wire-encoded bytes of FileDescriptorProto + // and must be populated. + RawDescriptor []byte + + // NumEnums is the total number of enums declared in the file. + NumEnums int32 + // NumMessages is the total number of messages declared in the file. + // It includes the implicit message declarations for map entries. + NumMessages int32 + // NumExtensions is the total number of extensions declared in the file. + NumExtensions int32 + // NumServices is the total number of services declared in the file. + NumServices int32 + + // TypeResolver resolves extension field types for descriptor options. + // If nil, it uses protoregistry.GlobalTypes. + TypeResolver interface { + protoregistry.ExtensionTypeResolver + } + + // FileRegistry is use to lookup file, enum, and message dependencies. + // Once constructed, the file descriptor is registered here. + // If nil, it uses protoregistry.GlobalFiles. + FileRegistry interface { + FindFileByPath(string) (protoreflect.FileDescriptor, error) + FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) + RegisterFile(protoreflect.FileDescriptor) error + } +} + +// resolverByIndex is an interface Builder.FileRegistry may implement. +// If so, it permits looking up an enum or message dependency based on the +// sub-list and element index into filetype.Builder.DependencyIndexes. +type resolverByIndex interface { + FindEnumByIndex(int32, int32, []Enum, []Message) protoreflect.EnumDescriptor + FindMessageByIndex(int32, int32, []Enum, []Message) protoreflect.MessageDescriptor +} + +// Indexes of each sub-list in filetype.Builder.DependencyIndexes. +const ( + listFieldDeps int32 = iota + listExtTargets + listExtDeps + listMethInDeps + listMethOutDeps +) + +// Out is the output of the Builder. +type Out struct { + File protoreflect.FileDescriptor + + // Enums is all enum descriptors in "flattened ordering". + Enums []Enum + // Messages is all message descriptors in "flattened ordering". + // It includes the implicit message declarations for map entries. + Messages []Message + // Extensions is all extension descriptors in "flattened ordering". + Extensions []Extension + // Service is all service descriptors in "flattened ordering". + Services []Service +} + +// Build constructs a FileDescriptor given the parameters set in Builder. +// It assumes that the inputs are well-formed and panics if any inconsistencies +// are encountered. +// +// If NumEnums+NumMessages+NumExtensions+NumServices is zero, +// then Build automatically derives them from the raw descriptor. +func (db Builder) Build() (out Out) { + // Populate the counts if uninitialized. + if db.NumEnums+db.NumMessages+db.NumExtensions+db.NumServices == 0 { + db.unmarshalCounts(db.RawDescriptor, true) + } + + // Initialize resolvers and registries if unpopulated. + if db.TypeResolver == nil { + db.TypeResolver = protoregistry.GlobalTypes + } + if db.FileRegistry == nil { + db.FileRegistry = protoregistry.GlobalFiles + } + + fd := newRawFile(db) + out.File = fd + out.Enums = fd.allEnums + out.Messages = fd.allMessages + out.Extensions = fd.allExtensions + out.Services = fd.allServices + + if err := db.FileRegistry.RegisterFile(fd); err != nil { + panic(err) + } + return out +} + +// unmarshalCounts counts the number of enum, message, extension, and service +// declarations in the raw message, which is either a FileDescriptorProto +// or a MessageDescriptorProto depending on whether isFile is set. +func (db *Builder) unmarshalCounts(b []byte, isFile bool) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + if isFile { + switch num { + case genid.FileDescriptorProto_EnumType_field_number: + db.NumEnums++ + case genid.FileDescriptorProto_MessageType_field_number: + db.unmarshalCounts(v, false) + db.NumMessages++ + case genid.FileDescriptorProto_Extension_field_number: + db.NumExtensions++ + case genid.FileDescriptorProto_Service_field_number: + db.NumServices++ + } + } else { + switch num { + case genid.DescriptorProto_EnumType_field_number: + db.NumEnums++ + case genid.DescriptorProto_NestedType_field_number: + db.unmarshalCounts(v, false) + db.NumMessages++ + case genid.DescriptorProto_Extension_field_number: + db.NumExtensions++ + } + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go new file mode 100644 index 000000000..df53ff40b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -0,0 +1,733 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "bytes" + "fmt" + "strings" + "sync" + "sync/atomic" + + "google.golang.org/protobuf/internal/descfmt" + "google.golang.org/protobuf/internal/descopts" + "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// Edition is an Enum for proto2.Edition +type Edition int32 + +// These values align with the value of Enum in descriptor.proto which allows +// direct conversion between the proto enum and this enum. +const ( + EditionUnknown Edition = 0 + EditionProto2 Edition = 998 + EditionProto3 Edition = 999 + Edition2023 Edition = 1000 + EditionUnsupported Edition = 100000 +) + +// The types in this file may have a suffix: +// • L0: Contains fields common to all descriptors (except File) and +// must be initialized up front. +// • L1: Contains fields specific to a descriptor and +// must be initialized up front. If the associated proto uses Editions, the +// Editions features must always be resolved. If not explicitly set, the +// appropriate default must be resolved and set. +// • L2: Contains fields that are lazily initialized when constructing +// from the raw file descriptor. When constructing as a literal, the L2 +// fields must be initialized up front. +// +// The types are exported so that packages like reflect/protodesc can +// directly construct descriptors. + +type ( + File struct { + fileRaw + L1 FileL1 + + once uint32 // atomically set if L2 is valid + mu sync.Mutex // protects L2 + L2 *FileL2 + } + FileL1 struct { + Syntax protoreflect.Syntax + Edition Edition // Only used if Syntax == Editions + Path string + Package protoreflect.FullName + + Enums Enums + Messages Messages + Extensions Extensions + Services Services + + EditionFeatures EditionFeatures + } + FileL2 struct { + Options func() protoreflect.ProtoMessage + Imports FileImports + Locations SourceLocations + } + + EditionFeatures struct { + // IsFieldPresence is true if field_presence is EXPLICIT + // https://protobuf.dev/editions/features/#field_presence + IsFieldPresence bool + // IsFieldPresence is true if field_presence is LEGACY_REQUIRED + // https://protobuf.dev/editions/features/#field_presence + IsLegacyRequired bool + // IsOpenEnum is true if enum_type is OPEN + // https://protobuf.dev/editions/features/#enum_type + IsOpenEnum bool + // IsPacked is true if repeated_field_encoding is PACKED + // https://protobuf.dev/editions/features/#repeated_field_encoding + IsPacked bool + // IsUTF8Validated is true if utf_validation is VERIFY + // https://protobuf.dev/editions/features/#utf8_validation + IsUTF8Validated bool + // IsDelimitedEncoded is true if message_encoding is DELIMITED + // https://protobuf.dev/editions/features/#message_encoding + IsDelimitedEncoded bool + // IsJSONCompliant is true if json_format is ALLOW + // https://protobuf.dev/editions/features/#json_format + IsJSONCompliant bool + // GenerateLegacyUnmarshalJSON determines if the plugin generates the + // UnmarshalJSON([]byte) error method for enums. + GenerateLegacyUnmarshalJSON bool + } +) + +func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd } +func (fd *File) Parent() protoreflect.Descriptor { return nil } +func (fd *File) Index() int { return 0 } +func (fd *File) Syntax() protoreflect.Syntax { return fd.L1.Syntax } + +// Not exported and just used to reconstruct the original FileDescriptor proto +func (fd *File) Edition() int32 { return int32(fd.L1.Edition) } +func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } +func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } +func (fd *File) IsPlaceholder() bool { return false } +func (fd *File) Options() protoreflect.ProtoMessage { + if f := fd.lazyInit().Options; f != nil { + return f() + } + return descopts.File +} +func (fd *File) Path() string { return fd.L1.Path } +func (fd *File) Package() protoreflect.FullName { return fd.L1.Package } +func (fd *File) Imports() protoreflect.FileImports { return &fd.lazyInit().Imports } +func (fd *File) Enums() protoreflect.EnumDescriptors { return &fd.L1.Enums } +func (fd *File) Messages() protoreflect.MessageDescriptors { return &fd.L1.Messages } +func (fd *File) Extensions() protoreflect.ExtensionDescriptors { return &fd.L1.Extensions } +func (fd *File) Services() protoreflect.ServiceDescriptors { return &fd.L1.Services } +func (fd *File) SourceLocations() protoreflect.SourceLocations { return &fd.lazyInit().Locations } +func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } +func (fd *File) ProtoType(protoreflect.FileDescriptor) {} +func (fd *File) ProtoInternal(pragma.DoNotImplement) {} + +func (fd *File) lazyInit() *FileL2 { + if atomic.LoadUint32(&fd.once) == 0 { + fd.lazyInitOnce() + } + return fd.L2 +} + +func (fd *File) lazyInitOnce() { + fd.mu.Lock() + if fd.L2 == nil { + fd.lazyRawInit() // recursively initializes all L2 structures + } + atomic.StoreUint32(&fd.once, 1) + fd.mu.Unlock() +} + +// GoPackagePath is a pseudo-internal API for determining the Go package path +// that this file descriptor is declared in. +// +// WARNING: This method is exempt from the compatibility promise and may be +// removed in the future without warning. +func (fd *File) GoPackagePath() string { + return fd.builder.GoPackagePath +} + +type ( + Enum struct { + Base + L1 EnumL1 + L2 *EnumL2 // protected by fileDesc.once + } + EnumL1 struct { + eagerValues bool // controls whether EnumL2.Values is already populated + + EditionFeatures EditionFeatures + } + EnumL2 struct { + Options func() protoreflect.ProtoMessage + Values EnumValues + ReservedNames Names + ReservedRanges EnumRanges + } + + EnumValue struct { + Base + L1 EnumValueL1 + } + EnumValueL1 struct { + Options func() protoreflect.ProtoMessage + Number protoreflect.EnumNumber + } +) + +func (ed *Enum) Options() protoreflect.ProtoMessage { + if f := ed.lazyInit().Options; f != nil { + return f() + } + return descopts.Enum +} +func (ed *Enum) Values() protoreflect.EnumValueDescriptors { + if ed.L1.eagerValues { + return &ed.L2.Values + } + return &ed.lazyInit().Values +} +func (ed *Enum) ReservedNames() protoreflect.Names { return &ed.lazyInit().ReservedNames } +func (ed *Enum) ReservedRanges() protoreflect.EnumRanges { return &ed.lazyInit().ReservedRanges } +func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } +func (ed *Enum) ProtoType(protoreflect.EnumDescriptor) {} +func (ed *Enum) lazyInit() *EnumL2 { + ed.L0.ParentFile.lazyInit() // implicitly initializes L2 + return ed.L2 +} +func (ed *Enum) IsClosed() bool { + return !ed.L1.EditionFeatures.IsOpenEnum +} + +func (ed *EnumValue) Options() protoreflect.ProtoMessage { + if f := ed.L1.Options; f != nil { + return f() + } + return descopts.EnumValue +} +func (ed *EnumValue) Number() protoreflect.EnumNumber { return ed.L1.Number } +func (ed *EnumValue) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } +func (ed *EnumValue) ProtoType(protoreflect.EnumValueDescriptor) {} + +type ( + Message struct { + Base + L1 MessageL1 + L2 *MessageL2 // protected by fileDesc.once + } + MessageL1 struct { + Enums Enums + Messages Messages + Extensions Extensions + IsMapEntry bool // promoted from google.protobuf.MessageOptions + IsMessageSet bool // promoted from google.protobuf.MessageOptions + + EditionFeatures EditionFeatures + } + MessageL2 struct { + Options func() protoreflect.ProtoMessage + Fields Fields + Oneofs Oneofs + ReservedNames Names + ReservedRanges FieldRanges + RequiredNumbers FieldNumbers // must be consistent with Fields.Cardinality + ExtensionRanges FieldRanges + ExtensionRangeOptions []func() protoreflect.ProtoMessage // must be same length as ExtensionRanges + } + + Field struct { + Base + L1 FieldL1 + } + FieldL1 struct { + Options func() protoreflect.ProtoMessage + Number protoreflect.FieldNumber + Cardinality protoreflect.Cardinality // must be consistent with Message.RequiredNumbers + Kind protoreflect.Kind + StringName stringName + IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto + IsWeak bool // promoted from google.protobuf.FieldOptions + Default defaultValue + ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields + Enum protoreflect.EnumDescriptor + Message protoreflect.MessageDescriptor + + EditionFeatures EditionFeatures + } + + Oneof struct { + Base + L1 OneofL1 + } + OneofL1 struct { + Options func() protoreflect.ProtoMessage + Fields OneofFields // must be consistent with Message.Fields.ContainingOneof + + EditionFeatures EditionFeatures + } +) + +func (md *Message) Options() protoreflect.ProtoMessage { + if f := md.lazyInit().Options; f != nil { + return f() + } + return descopts.Message +} +func (md *Message) IsMapEntry() bool { return md.L1.IsMapEntry } +func (md *Message) Fields() protoreflect.FieldDescriptors { return &md.lazyInit().Fields } +func (md *Message) Oneofs() protoreflect.OneofDescriptors { return &md.lazyInit().Oneofs } +func (md *Message) ReservedNames() protoreflect.Names { return &md.lazyInit().ReservedNames } +func (md *Message) ReservedRanges() protoreflect.FieldRanges { return &md.lazyInit().ReservedRanges } +func (md *Message) RequiredNumbers() protoreflect.FieldNumbers { return &md.lazyInit().RequiredNumbers } +func (md *Message) ExtensionRanges() protoreflect.FieldRanges { return &md.lazyInit().ExtensionRanges } +func (md *Message) ExtensionRangeOptions(i int) protoreflect.ProtoMessage { + if f := md.lazyInit().ExtensionRangeOptions[i]; f != nil { + return f() + } + return descopts.ExtensionRange +} +func (md *Message) Enums() protoreflect.EnumDescriptors { return &md.L1.Enums } +func (md *Message) Messages() protoreflect.MessageDescriptors { return &md.L1.Messages } +func (md *Message) Extensions() protoreflect.ExtensionDescriptors { return &md.L1.Extensions } +func (md *Message) ProtoType(protoreflect.MessageDescriptor) {} +func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } +func (md *Message) lazyInit() *MessageL2 { + md.L0.ParentFile.lazyInit() // implicitly initializes L2 + return md.L2 +} + +// IsMessageSet is a pseudo-internal API for checking whether a message +// should serialize in the proto1 message format. +// +// WARNING: This method is exempt from the compatibility promise and may be +// removed in the future without warning. +func (md *Message) IsMessageSet() bool { + return md.L1.IsMessageSet +} + +func (fd *Field) Options() protoreflect.ProtoMessage { + if f := fd.L1.Options; f != nil { + return f() + } + return descopts.Field +} +func (fd *Field) Number() protoreflect.FieldNumber { return fd.L1.Number } +func (fd *Field) Cardinality() protoreflect.Cardinality { return fd.L1.Cardinality } +func (fd *Field) Kind() protoreflect.Kind { + return fd.L1.Kind +} +func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } +func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } +func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } +func (fd *Field) HasPresence() bool { + if fd.L1.Cardinality == protoreflect.Repeated { + return false + } + return fd.IsExtension() || fd.L1.EditionFeatures.IsFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil +} +func (fd *Field) HasOptionalKeyword() bool { + return (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional +} +func (fd *Field) IsPacked() bool { + if fd.L1.Cardinality != protoreflect.Repeated { + return false + } + switch fd.L1.Kind { + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: + return false + } + return fd.L1.EditionFeatures.IsPacked +} +func (fd *Field) IsExtension() bool { return false } +func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } +func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() } +func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } +func (fd *Field) MapKey() protoreflect.FieldDescriptor { + if !fd.IsMap() { + return nil + } + return fd.Message().Fields().ByNumber(genid.MapEntry_Key_field_number) +} +func (fd *Field) MapValue() protoreflect.FieldDescriptor { + if !fd.IsMap() { + return nil + } + return fd.Message().Fields().ByNumber(genid.MapEntry_Value_field_number) +} +func (fd *Field) HasDefault() bool { return fd.L1.Default.has } +func (fd *Field) Default() protoreflect.Value { return fd.L1.Default.get(fd) } +func (fd *Field) DefaultEnumValue() protoreflect.EnumValueDescriptor { return fd.L1.Default.enum } +func (fd *Field) ContainingOneof() protoreflect.OneofDescriptor { return fd.L1.ContainingOneof } +func (fd *Field) ContainingMessage() protoreflect.MessageDescriptor { + return fd.L0.Parent.(protoreflect.MessageDescriptor) +} +func (fd *Field) Enum() protoreflect.EnumDescriptor { + return fd.L1.Enum +} +func (fd *Field) Message() protoreflect.MessageDescriptor { + if fd.L1.IsWeak { + if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil { + return d.(protoreflect.MessageDescriptor) + } + } + return fd.L1.Message +} +func (fd *Field) IsMapEntry() bool { + parent, ok := fd.L0.Parent.(protoreflect.MessageDescriptor) + return ok && parent.IsMapEntry() +} +func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } +func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} + +// EnforceUTF8 is a pseudo-internal API to determine whether to enforce UTF-8 +// validation for the string field. This exists for Google-internal use only +// since proto3 did not enforce UTF-8 validity prior to the open-source release. +// If this method does not exist, the default is to enforce valid UTF-8. +// +// WARNING: This method is exempt from the compatibility promise and may be +// removed in the future without warning. +func (fd *Field) EnforceUTF8() bool { + return fd.L1.EditionFeatures.IsUTF8Validated +} + +func (od *Oneof) IsSynthetic() bool { + return od.L0.ParentFile.L1.Syntax == protoreflect.Proto3 && len(od.L1.Fields.List) == 1 && od.L1.Fields.List[0].HasOptionalKeyword() +} +func (od *Oneof) Options() protoreflect.ProtoMessage { + if f := od.L1.Options; f != nil { + return f() + } + return descopts.Oneof +} +func (od *Oneof) Fields() protoreflect.FieldDescriptors { return &od.L1.Fields } +func (od *Oneof) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, od) } +func (od *Oneof) ProtoType(protoreflect.OneofDescriptor) {} + +type ( + Extension struct { + Base + L1 ExtensionL1 + L2 *ExtensionL2 // protected by fileDesc.once + } + ExtensionL1 struct { + Number protoreflect.FieldNumber + Extendee protoreflect.MessageDescriptor + Cardinality protoreflect.Cardinality + Kind protoreflect.Kind + EditionFeatures EditionFeatures + } + ExtensionL2 struct { + Options func() protoreflect.ProtoMessage + StringName stringName + IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto + Default defaultValue + Enum protoreflect.EnumDescriptor + Message protoreflect.MessageDescriptor + } +) + +func (xd *Extension) Options() protoreflect.ProtoMessage { + if f := xd.lazyInit().Options; f != nil { + return f() + } + return descopts.Field +} +func (xd *Extension) Number() protoreflect.FieldNumber { return xd.L1.Number } +func (xd *Extension) Cardinality() protoreflect.Cardinality { return xd.L1.Cardinality } +func (xd *Extension) Kind() protoreflect.Kind { return xd.L1.Kind } +func (xd *Extension) HasJSONName() bool { return xd.lazyInit().StringName.hasJSON } +func (xd *Extension) JSONName() string { return xd.lazyInit().StringName.getJSON(xd) } +func (xd *Extension) TextName() string { return xd.lazyInit().StringName.getText(xd) } +func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != protoreflect.Repeated } +func (xd *Extension) HasOptionalKeyword() bool { + return (xd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && xd.L1.Cardinality == protoreflect.Optional) || xd.lazyInit().IsProto3Optional +} +func (xd *Extension) IsPacked() bool { + if xd.L1.Cardinality != protoreflect.Repeated { + return false + } + switch xd.L1.Kind { + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: + return false + } + return xd.L1.EditionFeatures.IsPacked +} +func (xd *Extension) IsExtension() bool { return true } +func (xd *Extension) IsWeak() bool { return false } +func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated } +func (xd *Extension) IsMap() bool { return false } +func (xd *Extension) MapKey() protoreflect.FieldDescriptor { return nil } +func (xd *Extension) MapValue() protoreflect.FieldDescriptor { return nil } +func (xd *Extension) HasDefault() bool { return xd.lazyInit().Default.has } +func (xd *Extension) Default() protoreflect.Value { return xd.lazyInit().Default.get(xd) } +func (xd *Extension) DefaultEnumValue() protoreflect.EnumValueDescriptor { + return xd.lazyInit().Default.enum +} +func (xd *Extension) ContainingOneof() protoreflect.OneofDescriptor { return nil } +func (xd *Extension) ContainingMessage() protoreflect.MessageDescriptor { return xd.L1.Extendee } +func (xd *Extension) Enum() protoreflect.EnumDescriptor { return xd.lazyInit().Enum } +func (xd *Extension) Message() protoreflect.MessageDescriptor { return xd.lazyInit().Message } +func (xd *Extension) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, xd) } +func (xd *Extension) ProtoType(protoreflect.FieldDescriptor) {} +func (xd *Extension) ProtoInternal(pragma.DoNotImplement) {} +func (xd *Extension) lazyInit() *ExtensionL2 { + xd.L0.ParentFile.lazyInit() // implicitly initializes L2 + return xd.L2 +} + +type ( + Service struct { + Base + L1 ServiceL1 + L2 *ServiceL2 // protected by fileDesc.once + } + ServiceL1 struct{} + ServiceL2 struct { + Options func() protoreflect.ProtoMessage + Methods Methods + } + + Method struct { + Base + L1 MethodL1 + } + MethodL1 struct { + Options func() protoreflect.ProtoMessage + Input protoreflect.MessageDescriptor + Output protoreflect.MessageDescriptor + IsStreamingClient bool + IsStreamingServer bool + } +) + +func (sd *Service) Options() protoreflect.ProtoMessage { + if f := sd.lazyInit().Options; f != nil { + return f() + } + return descopts.Service +} +func (sd *Service) Methods() protoreflect.MethodDescriptors { return &sd.lazyInit().Methods } +func (sd *Service) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, sd) } +func (sd *Service) ProtoType(protoreflect.ServiceDescriptor) {} +func (sd *Service) ProtoInternal(pragma.DoNotImplement) {} +func (sd *Service) lazyInit() *ServiceL2 { + sd.L0.ParentFile.lazyInit() // implicitly initializes L2 + return sd.L2 +} + +func (md *Method) Options() protoreflect.ProtoMessage { + if f := md.L1.Options; f != nil { + return f() + } + return descopts.Method +} +func (md *Method) Input() protoreflect.MessageDescriptor { return md.L1.Input } +func (md *Method) Output() protoreflect.MessageDescriptor { return md.L1.Output } +func (md *Method) IsStreamingClient() bool { return md.L1.IsStreamingClient } +func (md *Method) IsStreamingServer() bool { return md.L1.IsStreamingServer } +func (md *Method) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } +func (md *Method) ProtoType(protoreflect.MethodDescriptor) {} +func (md *Method) ProtoInternal(pragma.DoNotImplement) {} + +// Surrogate files are can be used to create standalone descriptors +// where the syntax is only information derived from the parent file. +var ( + SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}} + SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}} + SurrogateEdition2023 = &File{L1: FileL1{Syntax: protoreflect.Editions, Edition: Edition2023}, L2: &FileL2{}} +) + +type ( + Base struct { + L0 BaseL0 + } + BaseL0 struct { + FullName protoreflect.FullName // must be populated + ParentFile *File // must be populated + Parent protoreflect.Descriptor + Index int + } +) + +func (d *Base) Name() protoreflect.Name { return d.L0.FullName.Name() } +func (d *Base) FullName() protoreflect.FullName { return d.L0.FullName } +func (d *Base) ParentFile() protoreflect.FileDescriptor { + if d.L0.ParentFile == SurrogateProto2 || d.L0.ParentFile == SurrogateProto3 { + return nil // surrogate files are not real parents + } + return d.L0.ParentFile +} +func (d *Base) Parent() protoreflect.Descriptor { return d.L0.Parent } +func (d *Base) Index() int { return d.L0.Index } +func (d *Base) Syntax() protoreflect.Syntax { return d.L0.ParentFile.Syntax() } +func (d *Base) IsPlaceholder() bool { return false } +func (d *Base) ProtoInternal(pragma.DoNotImplement) {} + +type stringName struct { + hasJSON bool + once sync.Once + nameJSON string + nameText string +} + +// InitJSON initializes the name. It is exported for use by other internal packages. +func (s *stringName) InitJSON(name string) { + s.hasJSON = true + s.nameJSON = name +} + +// Returns true if this field is structured like the synthetic field of a proto2 +// group. This allows us to expand our treatment of delimited fields without +// breaking proto2 files that have been upgraded to editions. +func isGroupLike(fd protoreflect.FieldDescriptor) bool { + // Groups are always group types. + if fd.Kind() != protoreflect.GroupKind { + return false + } + + // Group fields are always the lowercase type name. + if strings.ToLower(string(fd.Message().Name())) != string(fd.Name()) { + return false + } + + // Groups could only be defined in the same file they're used. + if fd.Message().ParentFile() != fd.ParentFile() { + return false + } + + // Group messages are always defined in the same scope as the field. File + // level extensions will compare NULL == NULL here, which is why the file + // comparison above is necessary to ensure both come from the same file. + if fd.IsExtension() { + return fd.Parent() == fd.Message().Parent() + } + return fd.ContainingMessage() == fd.Message().Parent() +} + +func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName { + s.once.Do(func() { + if fd.IsExtension() { + // For extensions, JSON and text are formatted the same way. + var name string + if messageset.IsMessageSetExtension(fd) { + name = string("[" + fd.FullName().Parent() + "]") + } else { + name = string("[" + fd.FullName() + "]") + } + s.nameJSON = name + s.nameText = name + } else { + // Format the JSON name. + if !s.hasJSON { + s.nameJSON = strs.JSONCamelCase(string(fd.Name())) + } + + // Format the text name. + s.nameText = string(fd.Name()) + if isGroupLike(fd) { + s.nameText = string(fd.Message().Name()) + } + } + }) + return s +} + +func (s *stringName) getJSON(fd protoreflect.FieldDescriptor) string { return s.lazyInit(fd).nameJSON } +func (s *stringName) getText(fd protoreflect.FieldDescriptor) string { return s.lazyInit(fd).nameText } + +func DefaultValue(v protoreflect.Value, ev protoreflect.EnumValueDescriptor) defaultValue { + dv := defaultValue{has: v.IsValid(), val: v, enum: ev} + if b, ok := v.Interface().([]byte); ok { + // Store a copy of the default bytes, so that we can detect + // accidental mutations of the original value. + dv.bytes = append([]byte(nil), b...) + } + return dv +} + +func unmarshalDefault(b []byte, k protoreflect.Kind, pf *File, ed protoreflect.EnumDescriptor) defaultValue { + var evs protoreflect.EnumValueDescriptors + if k == protoreflect.EnumKind { + // If the enum is declared within the same file, be careful not to + // blindly call the Values method, lest we bind ourselves in a deadlock. + if e, ok := ed.(*Enum); ok && e.L0.ParentFile == pf { + evs = &e.L2.Values + } else { + evs = ed.Values() + } + + // If we are unable to resolve the enum dependency, use a placeholder + // enum value since we will not be able to parse the default value. + if ed.IsPlaceholder() && protoreflect.Name(b).IsValid() { + v := protoreflect.ValueOfEnum(0) + ev := PlaceholderEnumValue(ed.FullName().Parent().Append(protoreflect.Name(b))) + return DefaultValue(v, ev) + } + } + + v, ev, err := defval.Unmarshal(string(b), k, evs, defval.Descriptor) + if err != nil { + panic(err) + } + return DefaultValue(v, ev) +} + +type defaultValue struct { + has bool + val protoreflect.Value + enum protoreflect.EnumValueDescriptor + bytes []byte +} + +func (dv *defaultValue) get(fd protoreflect.FieldDescriptor) protoreflect.Value { + // Return the zero value as the default if unpopulated. + if !dv.has { + if fd.Cardinality() == protoreflect.Repeated { + return protoreflect.Value{} + } + switch fd.Kind() { + case protoreflect.BoolKind: + return protoreflect.ValueOfBool(false) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + return protoreflect.ValueOfInt32(0) + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return protoreflect.ValueOfInt64(0) + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + return protoreflect.ValueOfUint32(0) + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return protoreflect.ValueOfUint64(0) + case protoreflect.FloatKind: + return protoreflect.ValueOfFloat32(0) + case protoreflect.DoubleKind: + return protoreflect.ValueOfFloat64(0) + case protoreflect.StringKind: + return protoreflect.ValueOfString("") + case protoreflect.BytesKind: + return protoreflect.ValueOfBytes(nil) + case protoreflect.EnumKind: + if evs := fd.Enum().Values(); evs.Len() > 0 { + return protoreflect.ValueOfEnum(evs.Get(0).Number()) + } + return protoreflect.ValueOfEnum(0) + } + } + + if len(dv.bytes) > 0 && !bytes.Equal(dv.bytes, dv.val.Bytes()) { + // TODO: Avoid panic if we're running with the race detector + // and instead spawn a goroutine that periodically resets + // this value back to the original to induce a race. + panic(fmt.Sprintf("detected mutation on the default bytes for %v", fd.FullName())) + } + return dv.val +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go new file mode 100644 index 000000000..8a57d60b0 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -0,0 +1,558 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "fmt" + "sync" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// fileRaw is a data struct used when initializing a file descriptor from +// a raw FileDescriptorProto. +type fileRaw struct { + builder Builder + allEnums []Enum + allMessages []Message + allExtensions []Extension + allServices []Service +} + +func newRawFile(db Builder) *File { + fd := &File{fileRaw: fileRaw{builder: db}} + fd.initDecls(db.NumEnums, db.NumMessages, db.NumExtensions, db.NumServices) + fd.unmarshalSeed(db.RawDescriptor) + + // Extended message targets are eagerly resolved since registration + // needs this information at program init time. + for i := range fd.allExtensions { + xd := &fd.allExtensions[i] + xd.L1.Extendee = fd.resolveMessageDependency(xd.L1.Extendee, listExtTargets, int32(i)) + } + + fd.checkDecls() + return fd +} + +// initDecls pre-allocates slices for the exact number of enums, messages +// (including map entries), extensions, and services declared in the proto file. +// This is done to avoid regrowing the slice, which would change the address +// for any previously seen declaration. +// +// The alloc methods "allocates" slices by pulling from the capacity. +func (fd *File) initDecls(numEnums, numMessages, numExtensions, numServices int32) { + fd.allEnums = make([]Enum, 0, numEnums) + fd.allMessages = make([]Message, 0, numMessages) + fd.allExtensions = make([]Extension, 0, numExtensions) + fd.allServices = make([]Service, 0, numServices) +} + +func (fd *File) allocEnums(n int) []Enum { + total := len(fd.allEnums) + es := fd.allEnums[total : total+n] + fd.allEnums = fd.allEnums[:total+n] + return es +} +func (fd *File) allocMessages(n int) []Message { + total := len(fd.allMessages) + ms := fd.allMessages[total : total+n] + fd.allMessages = fd.allMessages[:total+n] + return ms +} +func (fd *File) allocExtensions(n int) []Extension { + total := len(fd.allExtensions) + xs := fd.allExtensions[total : total+n] + fd.allExtensions = fd.allExtensions[:total+n] + return xs +} +func (fd *File) allocServices(n int) []Service { + total := len(fd.allServices) + xs := fd.allServices[total : total+n] + fd.allServices = fd.allServices[:total+n] + return xs +} + +// checkDecls performs a sanity check that the expected number of expected +// declarations matches the number that were found in the descriptor proto. +func (fd *File) checkDecls() { + switch { + case len(fd.allEnums) != cap(fd.allEnums): + case len(fd.allMessages) != cap(fd.allMessages): + case len(fd.allExtensions) != cap(fd.allExtensions): + case len(fd.allServices) != cap(fd.allServices): + default: + return + } + panic("mismatching cardinality") +} + +func (fd *File) unmarshalSeed(b []byte) { + sb := getBuilder() + defer putBuilder(sb) + + var prevField protoreflect.FieldNumber + var numEnums, numMessages, numExtensions, numServices int + var posEnums, posMessages, posExtensions, posServices int + var options []byte + b0 := b + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FileDescriptorProto_Syntax_field_number: + switch string(v) { + case "proto2": + fd.L1.Syntax = protoreflect.Proto2 + fd.L1.Edition = EditionProto2 + case "proto3": + fd.L1.Syntax = protoreflect.Proto3 + fd.L1.Edition = EditionProto3 + case "editions": + fd.L1.Syntax = protoreflect.Editions + default: + panic("invalid syntax") + } + case genid.FileDescriptorProto_Name_field_number: + fd.L1.Path = sb.MakeString(v) + case genid.FileDescriptorProto_Package_field_number: + fd.L1.Package = protoreflect.FullName(sb.MakeString(v)) + case genid.FileDescriptorProto_Options_field_number: + options = v + case genid.FileDescriptorProto_EnumType_field_number: + if prevField != genid.FileDescriptorProto_EnumType_field_number { + if numEnums > 0 { + panic("non-contiguous repeated field") + } + posEnums = len(b0) - len(b) - n - m + } + numEnums++ + case genid.FileDescriptorProto_MessageType_field_number: + if prevField != genid.FileDescriptorProto_MessageType_field_number { + if numMessages > 0 { + panic("non-contiguous repeated field") + } + posMessages = len(b0) - len(b) - n - m + } + numMessages++ + case genid.FileDescriptorProto_Extension_field_number: + if prevField != genid.FileDescriptorProto_Extension_field_number { + if numExtensions > 0 { + panic("non-contiguous repeated field") + } + posExtensions = len(b0) - len(b) - n - m + } + numExtensions++ + case genid.FileDescriptorProto_Service_field_number: + if prevField != genid.FileDescriptorProto_Service_field_number { + if numServices > 0 { + panic("non-contiguous repeated field") + } + posServices = len(b0) - len(b) - n - m + } + numServices++ + } + prevField = num + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FileDescriptorProto_Edition_field_number: + fd.L1.Edition = Edition(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + prevField = -1 // ignore known field numbers of unknown wire type + } + } + + // If syntax is missing, it is assumed to be proto2. + if fd.L1.Syntax == 0 { + fd.L1.Syntax = protoreflect.Proto2 + fd.L1.Edition = EditionProto2 + } + + fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition) + + // Parse editions features from options if any + if options != nil { + fd.unmarshalSeedOptions(options) + } + + // Must allocate all declarations before parsing each descriptor type + // to ensure we handled all descriptors in "flattened ordering". + if numEnums > 0 { + fd.L1.Enums.List = fd.allocEnums(numEnums) + } + if numMessages > 0 { + fd.L1.Messages.List = fd.allocMessages(numMessages) + } + if numExtensions > 0 { + fd.L1.Extensions.List = fd.allocExtensions(numExtensions) + } + if numServices > 0 { + fd.L1.Services.List = fd.allocServices(numServices) + } + + if numEnums > 0 { + b := b0[posEnums:] + for i := range fd.L1.Enums.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + fd.L1.Enums.List[i].unmarshalSeed(v, sb, fd, fd, i) + b = b[n+m:] + } + } + if numMessages > 0 { + b := b0[posMessages:] + for i := range fd.L1.Messages.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + fd.L1.Messages.List[i].unmarshalSeed(v, sb, fd, fd, i) + b = b[n+m:] + } + } + if numExtensions > 0 { + b := b0[posExtensions:] + for i := range fd.L1.Extensions.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + fd.L1.Extensions.List[i].unmarshalSeed(v, sb, fd, fd, i) + b = b[n+m:] + } + } + if numServices > 0 { + b := b0[posServices:] + for i := range fd.L1.Services.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + fd.L1.Services.List[i].unmarshalSeed(v, sb, fd, fd, i) + b = b[n+m:] + } + } +} + +func (fd *File) unmarshalSeedOptions(b []byte) { + for b := b; len(b) > 0; { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FileOptions_Features_field_number: + if fd.Syntax() != protoreflect.Editions { + panic(fmt.Sprintf("invalid descriptor: using edition features in a proto with syntax %s", fd.Syntax())) + } + fd.L1.EditionFeatures = unmarshalFeatureSet(v, fd.L1.EditionFeatures) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { + ed.L0.ParentFile = pf + ed.L0.Parent = pd + ed.L0.Index = i + ed.L1.EditionFeatures = featuresFromParentDesc(ed.Parent()) + + var numValues int + for b := b; len(b) > 0; { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.EnumDescriptorProto_Name_field_number: + ed.L0.FullName = appendFullName(sb, pd.FullName(), v) + case genid.EnumDescriptorProto_Value_field_number: + numValues++ + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + + // Only construct enum value descriptors for top-level enums since + // they are needed for registration. + if pd != pf { + return + } + ed.L1.eagerValues = true + ed.L2 = new(EnumL2) + ed.L2.Values.List = make([]EnumValue, numValues) + for i := 0; len(b) > 0; { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.EnumDescriptorProto_Value_field_number: + ed.L2.Values.List[i].unmarshalFull(v, sb, pf, ed, i) + i++ + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { + md.L0.ParentFile = pf + md.L0.Parent = pd + md.L0.Index = i + md.L1.EditionFeatures = featuresFromParentDesc(md.Parent()) + + var prevField protoreflect.FieldNumber + var numEnums, numMessages, numExtensions int + var posEnums, posMessages, posExtensions int + b0 := b + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.DescriptorProto_Name_field_number: + md.L0.FullName = appendFullName(sb, pd.FullName(), v) + case genid.DescriptorProto_EnumType_field_number: + if prevField != genid.DescriptorProto_EnumType_field_number { + if numEnums > 0 { + panic("non-contiguous repeated field") + } + posEnums = len(b0) - len(b) - n - m + } + numEnums++ + case genid.DescriptorProto_NestedType_field_number: + if prevField != genid.DescriptorProto_NestedType_field_number { + if numMessages > 0 { + panic("non-contiguous repeated field") + } + posMessages = len(b0) - len(b) - n - m + } + numMessages++ + case genid.DescriptorProto_Extension_field_number: + if prevField != genid.DescriptorProto_Extension_field_number { + if numExtensions > 0 { + panic("non-contiguous repeated field") + } + posExtensions = len(b0) - len(b) - n - m + } + numExtensions++ + case genid.DescriptorProto_Options_field_number: + md.unmarshalSeedOptions(v) + } + prevField = num + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + prevField = -1 // ignore known field numbers of unknown wire type + } + } + + // Must allocate all declarations before parsing each descriptor type + // to ensure we handled all descriptors in "flattened ordering". + if numEnums > 0 { + md.L1.Enums.List = pf.allocEnums(numEnums) + } + if numMessages > 0 { + md.L1.Messages.List = pf.allocMessages(numMessages) + } + if numExtensions > 0 { + md.L1.Extensions.List = pf.allocExtensions(numExtensions) + } + + if numEnums > 0 { + b := b0[posEnums:] + for i := range md.L1.Enums.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + md.L1.Enums.List[i].unmarshalSeed(v, sb, pf, md, i) + b = b[n+m:] + } + } + if numMessages > 0 { + b := b0[posMessages:] + for i := range md.L1.Messages.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + md.L1.Messages.List[i].unmarshalSeed(v, sb, pf, md, i) + b = b[n+m:] + } + } + if numExtensions > 0 { + b := b0[posExtensions:] + for i := range md.L1.Extensions.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + md.L1.Extensions.List[i].unmarshalSeed(v, sb, pf, md, i) + b = b[n+m:] + } + } +} + +func (md *Message) unmarshalSeedOptions(b []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.MessageOptions_MapEntry_field_number: + md.L1.IsMapEntry = protowire.DecodeBool(v) + case genid.MessageOptions_MessageSetWireFormat_field_number: + md.L1.IsMessageSet = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.MessageOptions_Features_field_number: + md.L1.EditionFeatures = unmarshalFeatureSet(v, md.L1.EditionFeatures) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { + xd.L0.ParentFile = pf + xd.L0.Parent = pd + xd.L0.Index = i + xd.L1.EditionFeatures = featuresFromParentDesc(pd) + + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldDescriptorProto_Number_field_number: + xd.L1.Number = protoreflect.FieldNumber(v) + case genid.FieldDescriptorProto_Label_field_number: + xd.L1.Cardinality = protoreflect.Cardinality(v) + case genid.FieldDescriptorProto_Type_field_number: + xd.L1.Kind = protoreflect.Kind(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldDescriptorProto_Name_field_number: + xd.L0.FullName = appendFullName(sb, pd.FullName(), v) + case genid.FieldDescriptorProto_Extendee_field_number: + xd.L1.Extendee = PlaceholderMessage(makeFullName(sb, v)) + case genid.FieldDescriptorProto_Options_field_number: + xd.unmarshalOptions(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + + if xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded { + xd.L1.Kind = protoreflect.GroupKind + } +} + +func (xd *Extension) unmarshalOptions(b []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldOptions_Packed_field_number: + xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldOptions_Features_field_number: + xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { + sd.L0.ParentFile = pf + sd.L0.Parent = pd + sd.L0.Index = i + + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.ServiceDescriptorProto_Name_field_number: + sd.L0.FullName = appendFullName(sb, pd.FullName(), v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +var nameBuilderPool = sync.Pool{ + New: func() any { return new(strs.Builder) }, +} + +func getBuilder() *strs.Builder { + return nameBuilderPool.Get().(*strs.Builder) +} +func putBuilder(b *strs.Builder) { + nameBuilderPool.Put(b) +} + +// makeFullName converts b to a protoreflect.FullName, +// where b must start with a leading dot. +func makeFullName(sb *strs.Builder, b []byte) protoreflect.FullName { + if len(b) == 0 || b[0] != '.' { + panic("name reference must be fully qualified") + } + return protoreflect.FullName(sb.MakeString(b[1:])) +} + +func appendFullName(sb *strs.Builder, prefix protoreflect.FullName, suffix []byte) protoreflect.FullName { + return sb.AppendFullName(prefix, protoreflect.Name(strs.UnsafeString(suffix))) +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go new file mode 100644 index 000000000..e56c91a8d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -0,0 +1,701 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "reflect" + "sync" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/descopts" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" +) + +func (fd *File) lazyRawInit() { + fd.unmarshalFull(fd.builder.RawDescriptor) + fd.resolveMessages() + fd.resolveExtensions() + fd.resolveServices() +} + +func (file *File) resolveMessages() { + var depIdx int32 + for i := range file.allMessages { + md := &file.allMessages[i] + + // Resolve message field dependencies. + for j := range md.L2.Fields.List { + fd := &md.L2.Fields.List[j] + + // Weak fields are resolved upon actual use. + if fd.L1.IsWeak { + continue + } + + // Resolve message field dependency. + switch fd.L1.Kind { + case protoreflect.EnumKind: + fd.L1.Enum = file.resolveEnumDependency(fd.L1.Enum, listFieldDeps, depIdx) + depIdx++ + case protoreflect.MessageKind, protoreflect.GroupKind: + fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx) + depIdx++ + if fd.L1.Kind == protoreflect.GroupKind && (fd.IsMap() || fd.IsMapEntry()) { + // A map field might inherit delimited encoding from a file-wide default feature. + // But maps never actually use delimited encoding. (At least for now...) + fd.L1.Kind = protoreflect.MessageKind + } + } + + // Default is resolved here since it depends on Enum being resolved. + if v := fd.L1.Default.val; v.IsValid() { + fd.L1.Default = unmarshalDefault(v.Bytes(), fd.L1.Kind, file, fd.L1.Enum) + } + } + } +} + +func (file *File) resolveExtensions() { + var depIdx int32 + for i := range file.allExtensions { + xd := &file.allExtensions[i] + + // Resolve extension field dependency. + switch xd.L1.Kind { + case protoreflect.EnumKind: + xd.L2.Enum = file.resolveEnumDependency(xd.L2.Enum, listExtDeps, depIdx) + depIdx++ + case protoreflect.MessageKind, protoreflect.GroupKind: + xd.L2.Message = file.resolveMessageDependency(xd.L2.Message, listExtDeps, depIdx) + depIdx++ + } + + // Default is resolved here since it depends on Enum being resolved. + if v := xd.L2.Default.val; v.IsValid() { + xd.L2.Default = unmarshalDefault(v.Bytes(), xd.L1.Kind, file, xd.L2.Enum) + } + } +} + +func (file *File) resolveServices() { + var depIdx int32 + for i := range file.allServices { + sd := &file.allServices[i] + + // Resolve method dependencies. + for j := range sd.L2.Methods.List { + md := &sd.L2.Methods.List[j] + md.L1.Input = file.resolveMessageDependency(md.L1.Input, listMethInDeps, depIdx) + md.L1.Output = file.resolveMessageDependency(md.L1.Output, listMethOutDeps, depIdx) + depIdx++ + } + } +} + +func (file *File) resolveEnumDependency(ed protoreflect.EnumDescriptor, i, j int32) protoreflect.EnumDescriptor { + r := file.builder.FileRegistry + if r, ok := r.(resolverByIndex); ok { + if ed2 := r.FindEnumByIndex(i, j, file.allEnums, file.allMessages); ed2 != nil { + return ed2 + } + } + for i := range file.allEnums { + if ed2 := &file.allEnums[i]; ed2.L0.FullName == ed.FullName() { + return ed2 + } + } + if d, _ := r.FindDescriptorByName(ed.FullName()); d != nil { + return d.(protoreflect.EnumDescriptor) + } + return ed +} + +func (file *File) resolveMessageDependency(md protoreflect.MessageDescriptor, i, j int32) protoreflect.MessageDescriptor { + r := file.builder.FileRegistry + if r, ok := r.(resolverByIndex); ok { + if md2 := r.FindMessageByIndex(i, j, file.allEnums, file.allMessages); md2 != nil { + return md2 + } + } + for i := range file.allMessages { + if md2 := &file.allMessages[i]; md2.L0.FullName == md.FullName() { + return md2 + } + } + if d, _ := r.FindDescriptorByName(md.FullName()); d != nil { + return d.(protoreflect.MessageDescriptor) + } + return md +} + +func (fd *File) unmarshalFull(b []byte) { + sb := getBuilder() + defer putBuilder(sb) + + var enumIdx, messageIdx, extensionIdx, serviceIdx int + var rawOptions []byte + fd.L2 = new(FileL2) + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FileDescriptorProto_PublicDependency_field_number: + fd.L2.Imports[v].IsPublic = true + case genid.FileDescriptorProto_WeakDependency_field_number: + fd.L2.Imports[v].IsWeak = true + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FileDescriptorProto_Dependency_field_number: + path := sb.MakeString(v) + imp, _ := fd.builder.FileRegistry.FindFileByPath(path) + if imp == nil { + imp = PlaceholderFile(path) + } + fd.L2.Imports = append(fd.L2.Imports, protoreflect.FileImport{FileDescriptor: imp}) + case genid.FileDescriptorProto_EnumType_field_number: + fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb) + enumIdx++ + case genid.FileDescriptorProto_MessageType_field_number: + fd.L1.Messages.List[messageIdx].unmarshalFull(v, sb) + messageIdx++ + case genid.FileDescriptorProto_Extension_field_number: + fd.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb) + extensionIdx++ + case genid.FileDescriptorProto_Service_field_number: + fd.L1.Services.List[serviceIdx].unmarshalFull(v, sb) + serviceIdx++ + case genid.FileDescriptorProto_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + fd.L2.Options = fd.builder.optionsUnmarshaler(&descopts.File, rawOptions) +} + +func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) { + var rawValues [][]byte + var rawOptions []byte + if !ed.L1.eagerValues { + ed.L2 = new(EnumL2) + } + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.EnumDescriptorProto_Value_field_number: + rawValues = append(rawValues, v) + case genid.EnumDescriptorProto_ReservedName_field_number: + ed.L2.ReservedNames.List = append(ed.L2.ReservedNames.List, protoreflect.Name(sb.MakeString(v))) + case genid.EnumDescriptorProto_ReservedRange_field_number: + ed.L2.ReservedRanges.List = append(ed.L2.ReservedRanges.List, unmarshalEnumReservedRange(v)) + case genid.EnumDescriptorProto_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + if !ed.L1.eagerValues && len(rawValues) > 0 { + ed.L2.Values.List = make([]EnumValue, len(rawValues)) + for i, b := range rawValues { + ed.L2.Values.List[i].unmarshalFull(b, sb, ed.L0.ParentFile, ed, i) + } + } + ed.L2.Options = ed.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Enum, rawOptions) +} + +func unmarshalEnumReservedRange(b []byte) (r [2]protoreflect.EnumNumber) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.EnumDescriptorProto_EnumReservedRange_Start_field_number: + r[0] = protoreflect.EnumNumber(v) + case genid.EnumDescriptorProto_EnumReservedRange_End_field_number: + r[1] = protoreflect.EnumNumber(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + return r +} + +func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { + vd.L0.ParentFile = pf + vd.L0.Parent = pd + vd.L0.Index = i + + var rawOptions []byte + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.EnumValueDescriptorProto_Number_field_number: + vd.L1.Number = protoreflect.EnumNumber(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.EnumValueDescriptorProto_Name_field_number: + // NOTE: Enum values are in the same scope as the enum parent. + vd.L0.FullName = appendFullName(sb, pd.Parent().FullName(), v) + case genid.EnumValueDescriptorProto_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + vd.L1.Options = pf.builder.optionsUnmarshaler(&descopts.EnumValue, rawOptions) +} + +func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) { + var rawFields, rawOneofs [][]byte + var enumIdx, messageIdx, extensionIdx int + var rawOptions []byte + md.L2 = new(MessageL2) + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.DescriptorProto_Field_field_number: + rawFields = append(rawFields, v) + case genid.DescriptorProto_OneofDecl_field_number: + rawOneofs = append(rawOneofs, v) + case genid.DescriptorProto_ReservedName_field_number: + md.L2.ReservedNames.List = append(md.L2.ReservedNames.List, protoreflect.Name(sb.MakeString(v))) + case genid.DescriptorProto_ReservedRange_field_number: + md.L2.ReservedRanges.List = append(md.L2.ReservedRanges.List, unmarshalMessageReservedRange(v)) + case genid.DescriptorProto_ExtensionRange_field_number: + r, rawOptions := unmarshalMessageExtensionRange(v) + opts := md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.ExtensionRange, rawOptions) + md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, r) + md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, opts) + case genid.DescriptorProto_EnumType_field_number: + md.L1.Enums.List[enumIdx].unmarshalFull(v, sb) + enumIdx++ + case genid.DescriptorProto_NestedType_field_number: + md.L1.Messages.List[messageIdx].unmarshalFull(v, sb) + messageIdx++ + case genid.DescriptorProto_Extension_field_number: + md.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb) + extensionIdx++ + case genid.DescriptorProto_Options_field_number: + md.unmarshalOptions(v) + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + if len(rawFields) > 0 || len(rawOneofs) > 0 { + md.L2.Fields.List = make([]Field, len(rawFields)) + md.L2.Oneofs.List = make([]Oneof, len(rawOneofs)) + for i, b := range rawFields { + fd := &md.L2.Fields.List[i] + fd.unmarshalFull(b, sb, md.L0.ParentFile, md, i) + if fd.L1.Cardinality == protoreflect.Required { + md.L2.RequiredNumbers.List = append(md.L2.RequiredNumbers.List, fd.L1.Number) + } + } + for i, b := range rawOneofs { + od := &md.L2.Oneofs.List[i] + od.unmarshalFull(b, sb, md.L0.ParentFile, md, i) + } + } + md.L2.Options = md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Message, rawOptions) +} + +func (md *Message) unmarshalOptions(b []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.MessageOptions_MapEntry_field_number: + md.L1.IsMapEntry = protowire.DecodeBool(v) + case genid.MessageOptions_MessageSetWireFormat_field_number: + md.L1.IsMessageSet = protowire.DecodeBool(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func unmarshalMessageReservedRange(b []byte) (r [2]protoreflect.FieldNumber) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.DescriptorProto_ReservedRange_Start_field_number: + r[0] = protoreflect.FieldNumber(v) + case genid.DescriptorProto_ReservedRange_End_field_number: + r[1] = protoreflect.FieldNumber(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + return r +} + +func unmarshalMessageExtensionRange(b []byte) (r [2]protoreflect.FieldNumber, rawOptions []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.DescriptorProto_ExtensionRange_Start_field_number: + r[0] = protoreflect.FieldNumber(v) + case genid.DescriptorProto_ExtensionRange_End_field_number: + r[1] = protoreflect.FieldNumber(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.DescriptorProto_ExtensionRange_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + return r, rawOptions +} + +func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { + fd.L0.ParentFile = pf + fd.L0.Parent = pd + fd.L0.Index = i + fd.L1.EditionFeatures = featuresFromParentDesc(fd.Parent()) + + var rawTypeName []byte + var rawOptions []byte + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldDescriptorProto_Number_field_number: + fd.L1.Number = protoreflect.FieldNumber(v) + case genid.FieldDescriptorProto_Label_field_number: + fd.L1.Cardinality = protoreflect.Cardinality(v) + case genid.FieldDescriptorProto_Type_field_number: + fd.L1.Kind = protoreflect.Kind(v) + case genid.FieldDescriptorProto_OneofIndex_field_number: + // In Message.unmarshalFull, we allocate slices for both + // the field and oneof descriptors before unmarshaling either + // of them. This ensures pointers to slice elements are stable. + od := &pd.(*Message).L2.Oneofs.List[v] + od.L1.Fields.List = append(od.L1.Fields.List, fd) + if fd.L1.ContainingOneof != nil { + panic("oneof type already set") + } + fd.L1.ContainingOneof = od + case genid.FieldDescriptorProto_Proto3Optional_field_number: + fd.L1.IsProto3Optional = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldDescriptorProto_Name_field_number: + fd.L0.FullName = appendFullName(sb, pd.FullName(), v) + case genid.FieldDescriptorProto_JsonName_field_number: + fd.L1.StringName.InitJSON(sb.MakeString(v)) + case genid.FieldDescriptorProto_DefaultValue_field_number: + fd.L1.Default.val = protoreflect.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages + case genid.FieldDescriptorProto_TypeName_field_number: + rawTypeName = v + case genid.FieldDescriptorProto_Options_field_number: + fd.unmarshalOptions(v) + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + if fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded { + fd.L1.Kind = protoreflect.GroupKind + } + if fd.L1.EditionFeatures.IsLegacyRequired { + fd.L1.Cardinality = protoreflect.Required + } + if rawTypeName != nil { + name := makeFullName(sb, rawTypeName) + switch fd.L1.Kind { + case protoreflect.EnumKind: + fd.L1.Enum = PlaceholderEnum(name) + case protoreflect.MessageKind, protoreflect.GroupKind: + fd.L1.Message = PlaceholderMessage(name) + } + } + fd.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Field, rawOptions) +} + +func (fd *Field) unmarshalOptions(b []byte) { + const FieldOptions_EnforceUTF8 = 13 + + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldOptions_Packed_field_number: + fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) + case genid.FieldOptions_Weak_field_number: + fd.L1.IsWeak = protowire.DecodeBool(v) + case FieldOptions_EnforceUTF8: + fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldOptions_Features_field_number: + fd.L1.EditionFeatures = unmarshalFeatureSet(v, fd.L1.EditionFeatures) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { + od.L0.ParentFile = pf + od.L0.Parent = pd + od.L0.Index = i + + var rawOptions []byte + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.OneofDescriptorProto_Name_field_number: + od.L0.FullName = appendFullName(sb, pd.FullName(), v) + case genid.OneofDescriptorProto_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + od.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Oneof, rawOptions) +} + +func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { + var rawTypeName []byte + var rawOptions []byte + xd.L2 = new(ExtensionL2) + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldDescriptorProto_Proto3Optional_field_number: + xd.L2.IsProto3Optional = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldDescriptorProto_JsonName_field_number: + xd.L2.StringName.InitJSON(sb.MakeString(v)) + case genid.FieldDescriptorProto_DefaultValue_field_number: + xd.L2.Default.val = protoreflect.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions + case genid.FieldDescriptorProto_TypeName_field_number: + rawTypeName = v + case genid.FieldDescriptorProto_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + if rawTypeName != nil { + name := makeFullName(sb, rawTypeName) + switch xd.L1.Kind { + case protoreflect.EnumKind: + xd.L2.Enum = PlaceholderEnum(name) + case protoreflect.MessageKind, protoreflect.GroupKind: + xd.L2.Message = PlaceholderMessage(name) + } + } + xd.L2.Options = xd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Field, rawOptions) +} + +func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) { + var rawMethods [][]byte + var rawOptions []byte + sd.L2 = new(ServiceL2) + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.ServiceDescriptorProto_Method_field_number: + rawMethods = append(rawMethods, v) + case genid.ServiceDescriptorProto_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + if len(rawMethods) > 0 { + sd.L2.Methods.List = make([]Method, len(rawMethods)) + for i, b := range rawMethods { + sd.L2.Methods.List[i].unmarshalFull(b, sb, sd.L0.ParentFile, sd, i) + } + } + sd.L2.Options = sd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Service, rawOptions) +} + +func (md *Method) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { + md.L0.ParentFile = pf + md.L0.Parent = pd + md.L0.Index = i + + var rawOptions []byte + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.MethodDescriptorProto_ClientStreaming_field_number: + md.L1.IsStreamingClient = protowire.DecodeBool(v) + case genid.MethodDescriptorProto_ServerStreaming_field_number: + md.L1.IsStreamingServer = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.MethodDescriptorProto_Name_field_number: + md.L0.FullName = appendFullName(sb, pd.FullName(), v) + case genid.MethodDescriptorProto_InputType_field_number: + md.L1.Input = PlaceholderMessage(makeFullName(sb, v)) + case genid.MethodDescriptorProto_OutputType_field_number: + md.L1.Output = PlaceholderMessage(makeFullName(sb, v)) + case genid.MethodDescriptorProto_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + md.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Method, rawOptions) +} + +// appendOptions appends src to dst, where the returned slice is never nil. +// This is necessary to distinguish between empty and unpopulated options. +func appendOptions(dst, src []byte) []byte { + if dst == nil { + dst = []byte{} + } + return append(dst, src...) +} + +// optionsUnmarshaler constructs a lazy unmarshal function for an options message. +// +// The type of message to unmarshal to is passed as a pointer since the +// vars in descopts may not yet be populated at the time this function is called. +func (db *Builder) optionsUnmarshaler(p *protoreflect.ProtoMessage, b []byte) func() protoreflect.ProtoMessage { + if b == nil { + return nil + } + var opts protoreflect.ProtoMessage + var once sync.Once + return func() protoreflect.ProtoMessage { + once.Do(func() { + if *p == nil { + panic("Descriptor.Options called without importing the descriptor package") + } + opts = reflect.New(reflect.TypeOf(*p).Elem()).Interface().(protoreflect.ProtoMessage) + if err := (proto.UnmarshalOptions{ + AllowPartial: true, + Resolver: db.TypeResolver, + }).Unmarshal(b, opts); err != nil { + panic(err) + } + }) + return opts + } +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go new file mode 100644 index 000000000..e3b6587da --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go @@ -0,0 +1,457 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "fmt" + "math" + "sort" + "sync" + + "google.golang.org/protobuf/internal/genid" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/descfmt" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" +) + +type FileImports []protoreflect.FileImport + +func (p *FileImports) Len() int { return len(*p) } +func (p *FileImports) Get(i int) protoreflect.FileImport { return (*p)[i] } +func (p *FileImports) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *FileImports) ProtoInternal(pragma.DoNotImplement) {} + +type Names struct { + List []protoreflect.Name + once sync.Once + has map[protoreflect.Name]int // protected by once +} + +func (p *Names) Len() int { return len(p.List) } +func (p *Names) Get(i int) protoreflect.Name { return p.List[i] } +func (p *Names) Has(s protoreflect.Name) bool { return p.lazyInit().has[s] > 0 } +func (p *Names) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *Names) ProtoInternal(pragma.DoNotImplement) {} +func (p *Names) lazyInit() *Names { + p.once.Do(func() { + if len(p.List) > 0 { + p.has = make(map[protoreflect.Name]int, len(p.List)) + for _, s := range p.List { + p.has[s] = p.has[s] + 1 + } + } + }) + return p +} + +// CheckValid reports any errors with the set of names with an error message +// that completes the sentence: "ranges is invalid because it has ..." +func (p *Names) CheckValid() error { + for s, n := range p.lazyInit().has { + switch { + case n > 1: + return errors.New("duplicate name: %q", s) + case false && !s.IsValid(): + // NOTE: The C++ implementation does not validate the identifier. + // See https://github.com/protocolbuffers/protobuf/issues/6335. + return errors.New("invalid name: %q", s) + } + } + return nil +} + +type EnumRanges struct { + List [][2]protoreflect.EnumNumber // start inclusive; end inclusive + once sync.Once + sorted [][2]protoreflect.EnumNumber // protected by once +} + +func (p *EnumRanges) Len() int { return len(p.List) } +func (p *EnumRanges) Get(i int) [2]protoreflect.EnumNumber { return p.List[i] } +func (p *EnumRanges) Has(n protoreflect.EnumNumber) bool { + for ls := p.lazyInit().sorted; len(ls) > 0; { + i := len(ls) / 2 + switch r := enumRange(ls[i]); { + case n < r.Start(): + ls = ls[:i] // search lower + case n > r.End(): + ls = ls[i+1:] // search upper + default: + return true + } + } + return false +} +func (p *EnumRanges) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *EnumRanges) ProtoInternal(pragma.DoNotImplement) {} +func (p *EnumRanges) lazyInit() *EnumRanges { + p.once.Do(func() { + p.sorted = append(p.sorted, p.List...) + sort.Slice(p.sorted, func(i, j int) bool { + return p.sorted[i][0] < p.sorted[j][0] + }) + }) + return p +} + +// CheckValid reports any errors with the set of names with an error message +// that completes the sentence: "ranges is invalid because it has ..." +func (p *EnumRanges) CheckValid() error { + var rp enumRange + for i, r := range p.lazyInit().sorted { + r := enumRange(r) + switch { + case !(r.Start() <= r.End()): + return errors.New("invalid range: %v", r) + case !(rp.End() < r.Start()) && i > 0: + return errors.New("overlapping ranges: %v with %v", rp, r) + } + rp = r + } + return nil +} + +type enumRange [2]protoreflect.EnumNumber + +func (r enumRange) Start() protoreflect.EnumNumber { return r[0] } // inclusive +func (r enumRange) End() protoreflect.EnumNumber { return r[1] } // inclusive +func (r enumRange) String() string { + if r.Start() == r.End() { + return fmt.Sprintf("%d", r.Start()) + } + return fmt.Sprintf("%d to %d", r.Start(), r.End()) +} + +type FieldRanges struct { + List [][2]protoreflect.FieldNumber // start inclusive; end exclusive + once sync.Once + sorted [][2]protoreflect.FieldNumber // protected by once +} + +func (p *FieldRanges) Len() int { return len(p.List) } +func (p *FieldRanges) Get(i int) [2]protoreflect.FieldNumber { return p.List[i] } +func (p *FieldRanges) Has(n protoreflect.FieldNumber) bool { + for ls := p.lazyInit().sorted; len(ls) > 0; { + i := len(ls) / 2 + switch r := fieldRange(ls[i]); { + case n < r.Start(): + ls = ls[:i] // search lower + case n > r.End(): + ls = ls[i+1:] // search upper + default: + return true + } + } + return false +} +func (p *FieldRanges) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *FieldRanges) ProtoInternal(pragma.DoNotImplement) {} +func (p *FieldRanges) lazyInit() *FieldRanges { + p.once.Do(func() { + p.sorted = append(p.sorted, p.List...) + sort.Slice(p.sorted, func(i, j int) bool { + return p.sorted[i][0] < p.sorted[j][0] + }) + }) + return p +} + +// CheckValid reports any errors with the set of ranges with an error message +// that completes the sentence: "ranges is invalid because it has ..." +func (p *FieldRanges) CheckValid(isMessageSet bool) error { + var rp fieldRange + for i, r := range p.lazyInit().sorted { + r := fieldRange(r) + switch { + case !isValidFieldNumber(r.Start(), isMessageSet): + return errors.New("invalid field number: %d", r.Start()) + case !isValidFieldNumber(r.End(), isMessageSet): + return errors.New("invalid field number: %d", r.End()) + case !(r.Start() <= r.End()): + return errors.New("invalid range: %v", r) + case !(rp.End() < r.Start()) && i > 0: + return errors.New("overlapping ranges: %v with %v", rp, r) + } + rp = r + } + return nil +} + +// isValidFieldNumber reports whether the field number is valid. +// Unlike the FieldNumber.IsValid method, it allows ranges that cover the +// reserved number range. +func isValidFieldNumber(n protoreflect.FieldNumber, isMessageSet bool) bool { + return protowire.MinValidNumber <= n && (n <= protowire.MaxValidNumber || isMessageSet) +} + +// CheckOverlap reports an error if p and q overlap. +func (p *FieldRanges) CheckOverlap(q *FieldRanges) error { + rps := p.lazyInit().sorted + rqs := q.lazyInit().sorted + for pi, qi := 0, 0; pi < len(rps) && qi < len(rqs); { + rp := fieldRange(rps[pi]) + rq := fieldRange(rqs[qi]) + if !(rp.End() < rq.Start() || rq.End() < rp.Start()) { + return errors.New("overlapping ranges: %v with %v", rp, rq) + } + if rp.Start() < rq.Start() { + pi++ + } else { + qi++ + } + } + return nil +} + +type fieldRange [2]protoreflect.FieldNumber + +func (r fieldRange) Start() protoreflect.FieldNumber { return r[0] } // inclusive +func (r fieldRange) End() protoreflect.FieldNumber { return r[1] - 1 } // inclusive +func (r fieldRange) String() string { + if r.Start() == r.End() { + return fmt.Sprintf("%d", r.Start()) + } + return fmt.Sprintf("%d to %d", r.Start(), r.End()) +} + +type FieldNumbers struct { + List []protoreflect.FieldNumber + once sync.Once + has map[protoreflect.FieldNumber]struct{} // protected by once +} + +func (p *FieldNumbers) Len() int { return len(p.List) } +func (p *FieldNumbers) Get(i int) protoreflect.FieldNumber { return p.List[i] } +func (p *FieldNumbers) Has(n protoreflect.FieldNumber) bool { + p.once.Do(func() { + if len(p.List) > 0 { + p.has = make(map[protoreflect.FieldNumber]struct{}, len(p.List)) + for _, n := range p.List { + p.has[n] = struct{}{} + } + } + }) + _, ok := p.has[n] + return ok +} +func (p *FieldNumbers) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *FieldNumbers) ProtoInternal(pragma.DoNotImplement) {} + +type OneofFields struct { + List []protoreflect.FieldDescriptor + once sync.Once + byName map[protoreflect.Name]protoreflect.FieldDescriptor // protected by once + byJSON map[string]protoreflect.FieldDescriptor // protected by once + byText map[string]protoreflect.FieldDescriptor // protected by once + byNum map[protoreflect.FieldNumber]protoreflect.FieldDescriptor // protected by once +} + +func (p *OneofFields) Len() int { return len(p.List) } +func (p *OneofFields) Get(i int) protoreflect.FieldDescriptor { return p.List[i] } +func (p *OneofFields) ByName(s protoreflect.Name) protoreflect.FieldDescriptor { + return p.lazyInit().byName[s] +} +func (p *OneofFields) ByJSONName(s string) protoreflect.FieldDescriptor { + return p.lazyInit().byJSON[s] +} +func (p *OneofFields) ByTextName(s string) protoreflect.FieldDescriptor { + return p.lazyInit().byText[s] +} +func (p *OneofFields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor { + return p.lazyInit().byNum[n] +} +func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {} + +func (p *OneofFields) lazyInit() *OneofFields { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]protoreflect.FieldDescriptor, len(p.List)) + p.byJSON = make(map[string]protoreflect.FieldDescriptor, len(p.List)) + p.byText = make(map[string]protoreflect.FieldDescriptor, len(p.List)) + p.byNum = make(map[protoreflect.FieldNumber]protoreflect.FieldDescriptor, len(p.List)) + for _, f := range p.List { + // Field names and numbers are guaranteed to be unique. + p.byName[f.Name()] = f + p.byJSON[f.JSONName()] = f + p.byText[f.TextName()] = f + p.byNum[f.Number()] = f + } + } + }) + return p +} + +type SourceLocations struct { + // List is a list of SourceLocations. + // The SourceLocation.Next field does not need to be populated + // as it will be lazily populated upon first need. + List []protoreflect.SourceLocation + + // File is the parent file descriptor that these locations are relative to. + // If non-nil, ByDescriptor verifies that the provided descriptor + // is a child of this file descriptor. + File protoreflect.FileDescriptor + + once sync.Once + byPath map[pathKey]int +} + +func (p *SourceLocations) Len() int { return len(p.List) } +func (p *SourceLocations) Get(i int) protoreflect.SourceLocation { return p.lazyInit().List[i] } +func (p *SourceLocations) byKey(k pathKey) protoreflect.SourceLocation { + if i, ok := p.lazyInit().byPath[k]; ok { + return p.List[i] + } + return protoreflect.SourceLocation{} +} +func (p *SourceLocations) ByPath(path protoreflect.SourcePath) protoreflect.SourceLocation { + return p.byKey(newPathKey(path)) +} +func (p *SourceLocations) ByDescriptor(desc protoreflect.Descriptor) protoreflect.SourceLocation { + if p.File != nil && desc != nil && p.File != desc.ParentFile() { + return protoreflect.SourceLocation{} // mismatching parent files + } + var pathArr [16]int32 + path := pathArr[:0] + for { + switch desc.(type) { + case protoreflect.FileDescriptor: + // Reverse the path since it was constructed in reverse. + for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 { + path[i], path[j] = path[j], path[i] + } + return p.byKey(newPathKey(path)) + case protoreflect.MessageDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case protoreflect.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_MessageType_field_number)) + case protoreflect.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_NestedType_field_number)) + default: + return protoreflect.SourceLocation{} + } + case protoreflect.FieldDescriptor: + isExtension := desc.(protoreflect.FieldDescriptor).IsExtension() + path = append(path, int32(desc.Index())) + desc = desc.Parent() + if isExtension { + switch desc.(type) { + case protoreflect.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_Extension_field_number)) + case protoreflect.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_Extension_field_number)) + default: + return protoreflect.SourceLocation{} + } + } else { + switch desc.(type) { + case protoreflect.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_Field_field_number)) + default: + return protoreflect.SourceLocation{} + } + } + case protoreflect.OneofDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case protoreflect.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_OneofDecl_field_number)) + default: + return protoreflect.SourceLocation{} + } + case protoreflect.EnumDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case protoreflect.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_EnumType_field_number)) + case protoreflect.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_EnumType_field_number)) + default: + return protoreflect.SourceLocation{} + } + case protoreflect.EnumValueDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case protoreflect.EnumDescriptor: + path = append(path, int32(genid.EnumDescriptorProto_Value_field_number)) + default: + return protoreflect.SourceLocation{} + } + case protoreflect.ServiceDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case protoreflect.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_Service_field_number)) + default: + return protoreflect.SourceLocation{} + } + case protoreflect.MethodDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case protoreflect.ServiceDescriptor: + path = append(path, int32(genid.ServiceDescriptorProto_Method_field_number)) + default: + return protoreflect.SourceLocation{} + } + default: + return protoreflect.SourceLocation{} + } + } +} +func (p *SourceLocations) lazyInit() *SourceLocations { + p.once.Do(func() { + if len(p.List) > 0 { + // Collect all the indexes for a given path. + pathIdxs := make(map[pathKey][]int, len(p.List)) + for i, l := range p.List { + k := newPathKey(l.Path) + pathIdxs[k] = append(pathIdxs[k], i) + } + + // Update the next index for all locations. + p.byPath = make(map[pathKey]int, len(p.List)) + for k, idxs := range pathIdxs { + for i := 0; i < len(idxs)-1; i++ { + p.List[idxs[i]].Next = idxs[i+1] + } + p.List[idxs[len(idxs)-1]].Next = 0 + p.byPath[k] = idxs[0] // record the first location for this path + } + } + }) + return p +} +func (p *SourceLocations) ProtoInternal(pragma.DoNotImplement) {} + +// pathKey is a comparable representation of protoreflect.SourcePath. +type pathKey struct { + arr [16]uint8 // first n-1 path segments; last element is the length + str string // used if the path does not fit in arr +} + +func newPathKey(p protoreflect.SourcePath) (k pathKey) { + if len(p) < len(k.arr) { + for i, ps := range p { + if ps < 0 || math.MaxUint8 <= ps { + return pathKey{str: p.String()} + } + k.arr[i] = uint8(ps) + } + k.arr[len(k.arr)-1] = uint8(len(p)) + return k + } + return pathKey{str: p.String()} +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go new file mode 100644 index 000000000..f4107c05f --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go @@ -0,0 +1,367 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package filedesc + +import ( + "fmt" + "strings" + "sync" + + "google.golang.org/protobuf/internal/descfmt" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" +) + +type Enums struct { + List []Enum + once sync.Once + byName map[protoreflect.Name]*Enum // protected by once +} + +func (p *Enums) Len() int { + return len(p.List) +} +func (p *Enums) Get(i int) protoreflect.EnumDescriptor { + return &p.List[i] +} +func (p *Enums) ByName(s protoreflect.Name) protoreflect.EnumDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Enums) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Enums) ProtoInternal(pragma.DoNotImplement) {} +func (p *Enums) lazyInit() *Enums { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Enum, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} + +type EnumValues struct { + List []EnumValue + once sync.Once + byName map[protoreflect.Name]*EnumValue // protected by once + byNum map[protoreflect.EnumNumber]*EnumValue // protected by once +} + +func (p *EnumValues) Len() int { + return len(p.List) +} +func (p *EnumValues) Get(i int) protoreflect.EnumValueDescriptor { + return &p.List[i] +} +func (p *EnumValues) ByName(s protoreflect.Name) protoreflect.EnumValueDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *EnumValues) ByNumber(n protoreflect.EnumNumber) protoreflect.EnumValueDescriptor { + if d := p.lazyInit().byNum[n]; d != nil { + return d + } + return nil +} +func (p *EnumValues) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *EnumValues) ProtoInternal(pragma.DoNotImplement) {} +func (p *EnumValues) lazyInit() *EnumValues { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*EnumValue, len(p.List)) + p.byNum = make(map[protoreflect.EnumNumber]*EnumValue, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + if _, ok := p.byNum[d.Number()]; !ok { + p.byNum[d.Number()] = d + } + } + } + }) + return p +} + +type Messages struct { + List []Message + once sync.Once + byName map[protoreflect.Name]*Message // protected by once +} + +func (p *Messages) Len() int { + return len(p.List) +} +func (p *Messages) Get(i int) protoreflect.MessageDescriptor { + return &p.List[i] +} +func (p *Messages) ByName(s protoreflect.Name) protoreflect.MessageDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Messages) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Messages) ProtoInternal(pragma.DoNotImplement) {} +func (p *Messages) lazyInit() *Messages { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Message, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} + +type Fields struct { + List []Field + once sync.Once + byName map[protoreflect.Name]*Field // protected by once + byJSON map[string]*Field // protected by once + byText map[string]*Field // protected by once + byNum map[protoreflect.FieldNumber]*Field // protected by once +} + +func (p *Fields) Len() int { + return len(p.List) +} +func (p *Fields) Get(i int) protoreflect.FieldDescriptor { + return &p.List[i] +} +func (p *Fields) ByName(s protoreflect.Name) protoreflect.FieldDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Fields) ByJSONName(s string) protoreflect.FieldDescriptor { + if d := p.lazyInit().byJSON[s]; d != nil { + return d + } + return nil +} +func (p *Fields) ByTextName(s string) protoreflect.FieldDescriptor { + if d := p.lazyInit().byText[s]; d != nil { + return d + } + return nil +} +func (p *Fields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor { + if d := p.lazyInit().byNum[n]; d != nil { + return d + } + return nil +} +func (p *Fields) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Fields) ProtoInternal(pragma.DoNotImplement) {} +func (p *Fields) lazyInit() *Fields { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Field, len(p.List)) + p.byJSON = make(map[string]*Field, len(p.List)) + p.byText = make(map[string]*Field, len(p.List)) + p.byNum = make(map[protoreflect.FieldNumber]*Field, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + if _, ok := p.byJSON[d.JSONName()]; !ok { + p.byJSON[d.JSONName()] = d + } + if _, ok := p.byText[d.TextName()]; !ok { + p.byText[d.TextName()] = d + } + if isGroupLike(d) { + lowerJSONName := strings.ToLower(d.JSONName()) + if _, ok := p.byJSON[lowerJSONName]; !ok { + p.byJSON[lowerJSONName] = d + } + lowerTextName := strings.ToLower(d.TextName()) + if _, ok := p.byText[lowerTextName]; !ok { + p.byText[lowerTextName] = d + } + } + if _, ok := p.byNum[d.Number()]; !ok { + p.byNum[d.Number()] = d + } + } + } + }) + return p +} + +type Oneofs struct { + List []Oneof + once sync.Once + byName map[protoreflect.Name]*Oneof // protected by once +} + +func (p *Oneofs) Len() int { + return len(p.List) +} +func (p *Oneofs) Get(i int) protoreflect.OneofDescriptor { + return &p.List[i] +} +func (p *Oneofs) ByName(s protoreflect.Name) protoreflect.OneofDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Oneofs) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Oneofs) ProtoInternal(pragma.DoNotImplement) {} +func (p *Oneofs) lazyInit() *Oneofs { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Oneof, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} + +type Extensions struct { + List []Extension + once sync.Once + byName map[protoreflect.Name]*Extension // protected by once +} + +func (p *Extensions) Len() int { + return len(p.List) +} +func (p *Extensions) Get(i int) protoreflect.ExtensionDescriptor { + return &p.List[i] +} +func (p *Extensions) ByName(s protoreflect.Name) protoreflect.ExtensionDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Extensions) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Extensions) ProtoInternal(pragma.DoNotImplement) {} +func (p *Extensions) lazyInit() *Extensions { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Extension, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} + +type Services struct { + List []Service + once sync.Once + byName map[protoreflect.Name]*Service // protected by once +} + +func (p *Services) Len() int { + return len(p.List) +} +func (p *Services) Get(i int) protoreflect.ServiceDescriptor { + return &p.List[i] +} +func (p *Services) ByName(s protoreflect.Name) protoreflect.ServiceDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Services) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Services) ProtoInternal(pragma.DoNotImplement) {} +func (p *Services) lazyInit() *Services { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Service, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} + +type Methods struct { + List []Method + once sync.Once + byName map[protoreflect.Name]*Method // protected by once +} + +func (p *Methods) Len() int { + return len(p.List) +} +func (p *Methods) Get(i int) protoreflect.MethodDescriptor { + return &p.List[i] +} +func (p *Methods) ByName(s protoreflect.Name) protoreflect.MethodDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Methods) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Methods) ProtoInternal(pragma.DoNotImplement) {} +func (p *Methods) lazyInit() *Methods { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Method, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go new file mode 100644 index 000000000..11f5f356b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -0,0 +1,156 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "fmt" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/editiondefaults" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/reflect/protoreflect" +) + +var defaultsCache = make(map[Edition]EditionFeatures) +var defaultsKeys = []Edition{} + +func init() { + unmarshalEditionDefaults(editiondefaults.Defaults) + SurrogateProto2.L1.EditionFeatures = getFeaturesFor(EditionProto2) + SurrogateProto3.L1.EditionFeatures = getFeaturesFor(EditionProto3) + SurrogateEdition2023.L1.EditionFeatures = getFeaturesFor(Edition2023) +} + +func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures { + for len(b) > 0 { + num, _, n := protowire.ConsumeTag(b) + b = b[n:] + switch num { + case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + parent.GenerateLegacyUnmarshalJSON = protowire.DecodeBool(v) + default: + panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num)) + } + } + return parent +} + +func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FeatureSet_FieldPresence_field_number: + parent.IsFieldPresence = v == genid.FeatureSet_EXPLICIT_enum_value || v == genid.FeatureSet_LEGACY_REQUIRED_enum_value + parent.IsLegacyRequired = v == genid.FeatureSet_LEGACY_REQUIRED_enum_value + case genid.FeatureSet_EnumType_field_number: + parent.IsOpenEnum = v == genid.FeatureSet_OPEN_enum_value + case genid.FeatureSet_RepeatedFieldEncoding_field_number: + parent.IsPacked = v == genid.FeatureSet_PACKED_enum_value + case genid.FeatureSet_Utf8Validation_field_number: + parent.IsUTF8Validated = v == genid.FeatureSet_VERIFY_enum_value + case genid.FeatureSet_MessageEncoding_field_number: + parent.IsDelimitedEncoded = v == genid.FeatureSet_DELIMITED_enum_value + case genid.FeatureSet_JsonFormat_field_number: + parent.IsJSONCompliant = v == genid.FeatureSet_ALLOW_enum_value + default: + panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num)) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + parent = unmarshalGoFeature(v, parent) + } + } + } + + return parent +} + +func featuresFromParentDesc(parentDesc protoreflect.Descriptor) EditionFeatures { + var parentFS EditionFeatures + switch p := parentDesc.(type) { + case *File: + parentFS = p.L1.EditionFeatures + case *Message: + parentFS = p.L1.EditionFeatures + default: + panic(fmt.Sprintf("unknown parent type %T", parentDesc)) + } + return parentFS +} + +func unmarshalEditionDefault(b []byte) { + var ed Edition + var fs EditionFeatures + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number: + ed = Edition(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_number: + fs = unmarshalFeatureSet(v, fs) + case genid.FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_number: + fs = unmarshalFeatureSet(v, fs) + } + } + } + defaultsCache[ed] = fs + defaultsKeys = append(defaultsKeys, ed) +} + +func unmarshalEditionDefaults(b []byte) { + for len(b) > 0 { + num, _, n := protowire.ConsumeTag(b) + b = b[n:] + switch num { + case genid.FeatureSetDefaults_Defaults_field_number: + def, m := protowire.ConsumeBytes(b) + b = b[m:] + unmarshalEditionDefault(def) + case genid.FeatureSetDefaults_MinimumEdition_field_number, + genid.FeatureSetDefaults_MaximumEdition_field_number: + // We don't care about the minimum and maximum editions. If the + // edition we are looking for later on is not in the cache we know + // it is outside of the range between minimum and maximum edition. + _, m := protowire.ConsumeVarint(b) + b = b[m:] + default: + panic(fmt.Sprintf("unkown field number %d while unmarshalling EditionDefault", num)) + } + } +} + +func getFeaturesFor(ed Edition) EditionFeatures { + match := EditionUnknown + for _, key := range defaultsKeys { + if key > ed { + break + } + match = key + } + if match == EditionUnknown { + panic(fmt.Sprintf("unsupported edition: %v", ed)) + } + return defaultsCache[match] +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go new file mode 100644 index 000000000..bfb3b8417 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go @@ -0,0 +1,110 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "google.golang.org/protobuf/internal/descopts" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" +) + +var ( + emptyNames = new(Names) + emptyEnumRanges = new(EnumRanges) + emptyFieldRanges = new(FieldRanges) + emptyFieldNumbers = new(FieldNumbers) + emptySourceLocations = new(SourceLocations) + + emptyFiles = new(FileImports) + emptyMessages = new(Messages) + emptyFields = new(Fields) + emptyOneofs = new(Oneofs) + emptyEnums = new(Enums) + emptyEnumValues = new(EnumValues) + emptyExtensions = new(Extensions) + emptyServices = new(Services) +) + +// PlaceholderFile is a placeholder, representing only the file path. +type PlaceholderFile string + +func (f PlaceholderFile) ParentFile() protoreflect.FileDescriptor { return f } +func (f PlaceholderFile) Parent() protoreflect.Descriptor { return nil } +func (f PlaceholderFile) Index() int { return 0 } +func (f PlaceholderFile) Syntax() protoreflect.Syntax { return 0 } +func (f PlaceholderFile) Name() protoreflect.Name { return "" } +func (f PlaceholderFile) FullName() protoreflect.FullName { return "" } +func (f PlaceholderFile) IsPlaceholder() bool { return true } +func (f PlaceholderFile) Options() protoreflect.ProtoMessage { return descopts.File } +func (f PlaceholderFile) Path() string { return string(f) } +func (f PlaceholderFile) Package() protoreflect.FullName { return "" } +func (f PlaceholderFile) Imports() protoreflect.FileImports { return emptyFiles } +func (f PlaceholderFile) Messages() protoreflect.MessageDescriptors { return emptyMessages } +func (f PlaceholderFile) Enums() protoreflect.EnumDescriptors { return emptyEnums } +func (f PlaceholderFile) Extensions() protoreflect.ExtensionDescriptors { return emptyExtensions } +func (f PlaceholderFile) Services() protoreflect.ServiceDescriptors { return emptyServices } +func (f PlaceholderFile) SourceLocations() protoreflect.SourceLocations { return emptySourceLocations } +func (f PlaceholderFile) ProtoType(protoreflect.FileDescriptor) { return } +func (f PlaceholderFile) ProtoInternal(pragma.DoNotImplement) { return } + +// PlaceholderEnum is a placeholder, representing only the full name. +type PlaceholderEnum protoreflect.FullName + +func (e PlaceholderEnum) ParentFile() protoreflect.FileDescriptor { return nil } +func (e PlaceholderEnum) Parent() protoreflect.Descriptor { return nil } +func (e PlaceholderEnum) Index() int { return 0 } +func (e PlaceholderEnum) Syntax() protoreflect.Syntax { return 0 } +func (e PlaceholderEnum) Name() protoreflect.Name { return protoreflect.FullName(e).Name() } +func (e PlaceholderEnum) FullName() protoreflect.FullName { return protoreflect.FullName(e) } +func (e PlaceholderEnum) IsPlaceholder() bool { return true } +func (e PlaceholderEnum) Options() protoreflect.ProtoMessage { return descopts.Enum } +func (e PlaceholderEnum) Values() protoreflect.EnumValueDescriptors { return emptyEnumValues } +func (e PlaceholderEnum) ReservedNames() protoreflect.Names { return emptyNames } +func (e PlaceholderEnum) ReservedRanges() protoreflect.EnumRanges { return emptyEnumRanges } +func (e PlaceholderEnum) IsClosed() bool { return false } +func (e PlaceholderEnum) ProtoType(protoreflect.EnumDescriptor) { return } +func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return } + +// PlaceholderEnumValue is a placeholder, representing only the full name. +type PlaceholderEnumValue protoreflect.FullName + +func (e PlaceholderEnumValue) ParentFile() protoreflect.FileDescriptor { return nil } +func (e PlaceholderEnumValue) Parent() protoreflect.Descriptor { return nil } +func (e PlaceholderEnumValue) Index() int { return 0 } +func (e PlaceholderEnumValue) Syntax() protoreflect.Syntax { return 0 } +func (e PlaceholderEnumValue) Name() protoreflect.Name { return protoreflect.FullName(e).Name() } +func (e PlaceholderEnumValue) FullName() protoreflect.FullName { return protoreflect.FullName(e) } +func (e PlaceholderEnumValue) IsPlaceholder() bool { return true } +func (e PlaceholderEnumValue) Options() protoreflect.ProtoMessage { return descopts.EnumValue } +func (e PlaceholderEnumValue) Number() protoreflect.EnumNumber { return 0 } +func (e PlaceholderEnumValue) ProtoType(protoreflect.EnumValueDescriptor) { return } +func (e PlaceholderEnumValue) ProtoInternal(pragma.DoNotImplement) { return } + +// PlaceholderMessage is a placeholder, representing only the full name. +type PlaceholderMessage protoreflect.FullName + +func (m PlaceholderMessage) ParentFile() protoreflect.FileDescriptor { return nil } +func (m PlaceholderMessage) Parent() protoreflect.Descriptor { return nil } +func (m PlaceholderMessage) Index() int { return 0 } +func (m PlaceholderMessage) Syntax() protoreflect.Syntax { return 0 } +func (m PlaceholderMessage) Name() protoreflect.Name { return protoreflect.FullName(m).Name() } +func (m PlaceholderMessage) FullName() protoreflect.FullName { return protoreflect.FullName(m) } +func (m PlaceholderMessage) IsPlaceholder() bool { return true } +func (m PlaceholderMessage) Options() protoreflect.ProtoMessage { return descopts.Message } +func (m PlaceholderMessage) IsMapEntry() bool { return false } +func (m PlaceholderMessage) Fields() protoreflect.FieldDescriptors { return emptyFields } +func (m PlaceholderMessage) Oneofs() protoreflect.OneofDescriptors { return emptyOneofs } +func (m PlaceholderMessage) ReservedNames() protoreflect.Names { return emptyNames } +func (m PlaceholderMessage) ReservedRanges() protoreflect.FieldRanges { return emptyFieldRanges } +func (m PlaceholderMessage) RequiredNumbers() protoreflect.FieldNumbers { return emptyFieldNumbers } +func (m PlaceholderMessage) ExtensionRanges() protoreflect.FieldRanges { return emptyFieldRanges } +func (m PlaceholderMessage) ExtensionRangeOptions(int) protoreflect.ProtoMessage { + panic("index out of range") +} +func (m PlaceholderMessage) Messages() protoreflect.MessageDescriptors { return emptyMessages } +func (m PlaceholderMessage) Enums() protoreflect.EnumDescriptors { return emptyEnums } +func (m PlaceholderMessage) Extensions() protoreflect.ExtensionDescriptors { return emptyExtensions } +func (m PlaceholderMessage) ProtoType(protoreflect.MessageDescriptor) { return } +func (m PlaceholderMessage) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go new file mode 100644 index 000000000..ba83fea44 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go @@ -0,0 +1,296 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package filetype provides functionality for wrapping descriptors +// with Go type information. +package filetype + +import ( + "reflect" + + "google.golang.org/protobuf/internal/descopts" + "google.golang.org/protobuf/internal/filedesc" + pimpl "google.golang.org/protobuf/internal/impl" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// Builder constructs type descriptors from a raw file descriptor +// and associated Go types for each enum and message declaration. +// +// # Flattened Ordering +// +// The protobuf type system represents declarations as a tree. Certain nodes in +// the tree require us to either associate it with a concrete Go type or to +// resolve a dependency, which is information that must be provided separately +// since it cannot be derived from the file descriptor alone. +// +// However, representing a tree as Go literals is difficult to simply do in a +// space and time efficient way. Thus, we store them as a flattened list of +// objects where the serialization order from the tree-based form is important. +// +// The "flattened ordering" is defined as a tree traversal of all enum, message, +// extension, and service declarations using the following algorithm: +// +// def VisitFileDecls(fd): +// for e in fd.Enums: yield e +// for m in fd.Messages: yield m +// for x in fd.Extensions: yield x +// for s in fd.Services: yield s +// for m in fd.Messages: yield from VisitMessageDecls(m) +// +// def VisitMessageDecls(md): +// for e in md.Enums: yield e +// for m in md.Messages: yield m +// for x in md.Extensions: yield x +// for m in md.Messages: yield from VisitMessageDecls(m) +// +// The traversal starts at the root file descriptor and yields each direct +// declaration within each node before traversing into sub-declarations +// that children themselves may have. +type Builder struct { + // File is the underlying file descriptor builder. + File filedesc.Builder + + // GoTypes is a unique set of the Go types for all declarations and + // dependencies. Each type is represented as a zero value of the Go type. + // + // Declarations are Go types generated for enums and messages directly + // declared (not publicly imported) in the proto source file. + // Messages for map entries are accounted for, but represented by nil. + // Enum declarations in "flattened ordering" come first, followed by + // message declarations in "flattened ordering". + // + // Dependencies are Go types for enums or messages referenced by + // message fields (excluding weak fields), for parent extended messages of + // extension fields, for enums or messages referenced by extension fields, + // and for input and output messages referenced by service methods. + // Dependencies must come after declarations, but the ordering of + // dependencies themselves is unspecified. + GoTypes []any + + // DependencyIndexes is an ordered list of indexes into GoTypes for the + // dependencies of messages, extensions, or services. + // + // There are 5 sub-lists in "flattened ordering" concatenated back-to-back: + // 0. Message field dependencies: list of the enum or message type + // referred to by every message field. + // 1. Extension field targets: list of the extended parent message of + // every extension. + // 2. Extension field dependencies: list of the enum or message type + // referred to by every extension field. + // 3. Service method inputs: list of the input message type + // referred to by every service method. + // 4. Service method outputs: list of the output message type + // referred to by every service method. + // + // The offset into DependencyIndexes for the start of each sub-list + // is appended to the end in reverse order. + DependencyIndexes []int32 + + // EnumInfos is a list of enum infos in "flattened ordering". + EnumInfos []pimpl.EnumInfo + + // MessageInfos is a list of message infos in "flattened ordering". + // If provided, the GoType and PBType for each element is populated. + // + // Requirement: len(MessageInfos) == len(Build.Messages) + MessageInfos []pimpl.MessageInfo + + // ExtensionInfos is a list of extension infos in "flattened ordering". + // Each element is initialized and registered with the protoregistry package. + // + // Requirement: len(LegacyExtensions) == len(Build.Extensions) + ExtensionInfos []pimpl.ExtensionInfo + + // TypeRegistry is the registry to register each type descriptor. + // If nil, it uses protoregistry.GlobalTypes. + TypeRegistry interface { + RegisterMessage(protoreflect.MessageType) error + RegisterEnum(protoreflect.EnumType) error + RegisterExtension(protoreflect.ExtensionType) error + } +} + +// Out is the output of the builder. +type Out struct { + File protoreflect.FileDescriptor +} + +func (tb Builder) Build() (out Out) { + // Replace the resolver with one that resolves dependencies by index, + // which is faster and more reliable than relying on the global registry. + if tb.File.FileRegistry == nil { + tb.File.FileRegistry = protoregistry.GlobalFiles + } + tb.File.FileRegistry = &resolverByIndex{ + goTypes: tb.GoTypes, + depIdxs: tb.DependencyIndexes, + fileRegistry: tb.File.FileRegistry, + } + + // Initialize registry if unpopulated. + if tb.TypeRegistry == nil { + tb.TypeRegistry = protoregistry.GlobalTypes + } + + fbOut := tb.File.Build() + out.File = fbOut.File + + // Process enums. + enumGoTypes := tb.GoTypes[:len(fbOut.Enums)] + if len(tb.EnumInfos) != len(fbOut.Enums) { + panic("mismatching enum lengths") + } + if len(fbOut.Enums) > 0 { + for i := range fbOut.Enums { + tb.EnumInfos[i] = pimpl.EnumInfo{ + GoReflectType: reflect.TypeOf(enumGoTypes[i]), + Desc: &fbOut.Enums[i], + } + // Register enum types. + if err := tb.TypeRegistry.RegisterEnum(&tb.EnumInfos[i]); err != nil { + panic(err) + } + } + } + + // Process messages. + messageGoTypes := tb.GoTypes[len(fbOut.Enums):][:len(fbOut.Messages)] + if len(tb.MessageInfos) != len(fbOut.Messages) { + panic("mismatching message lengths") + } + if len(fbOut.Messages) > 0 { + for i := range fbOut.Messages { + if messageGoTypes[i] == nil { + continue // skip map entry + } + + tb.MessageInfos[i].GoReflectType = reflect.TypeOf(messageGoTypes[i]) + tb.MessageInfos[i].Desc = &fbOut.Messages[i] + + // Register message types. + if err := tb.TypeRegistry.RegisterMessage(&tb.MessageInfos[i]); err != nil { + panic(err) + } + } + + // As a special-case for descriptor.proto, + // locally register concrete message type for the options. + if out.File.Path() == "google/protobuf/descriptor.proto" && out.File.Package() == "google.protobuf" { + for i := range fbOut.Messages { + switch fbOut.Messages[i].Name() { + case "FileOptions": + descopts.File = messageGoTypes[i].(protoreflect.ProtoMessage) + case "EnumOptions": + descopts.Enum = messageGoTypes[i].(protoreflect.ProtoMessage) + case "EnumValueOptions": + descopts.EnumValue = messageGoTypes[i].(protoreflect.ProtoMessage) + case "MessageOptions": + descopts.Message = messageGoTypes[i].(protoreflect.ProtoMessage) + case "FieldOptions": + descopts.Field = messageGoTypes[i].(protoreflect.ProtoMessage) + case "OneofOptions": + descopts.Oneof = messageGoTypes[i].(protoreflect.ProtoMessage) + case "ExtensionRangeOptions": + descopts.ExtensionRange = messageGoTypes[i].(protoreflect.ProtoMessage) + case "ServiceOptions": + descopts.Service = messageGoTypes[i].(protoreflect.ProtoMessage) + case "MethodOptions": + descopts.Method = messageGoTypes[i].(protoreflect.ProtoMessage) + } + } + } + } + + // Process extensions. + if len(tb.ExtensionInfos) != len(fbOut.Extensions) { + panic("mismatching extension lengths") + } + var depIdx int32 + for i := range fbOut.Extensions { + // For enum and message kinds, determine the referent Go type so + // that we can construct their constructors. + const listExtDeps = 2 + var goType reflect.Type + switch fbOut.Extensions[i].L1.Kind { + case protoreflect.EnumKind: + j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx) + goType = reflect.TypeOf(tb.GoTypes[j]) + depIdx++ + case protoreflect.MessageKind, protoreflect.GroupKind: + j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx) + goType = reflect.TypeOf(tb.GoTypes[j]) + depIdx++ + default: + goType = goTypeForPBKind[fbOut.Extensions[i].L1.Kind] + } + if fbOut.Extensions[i].IsList() { + goType = reflect.SliceOf(goType) + } + + pimpl.InitExtensionInfo(&tb.ExtensionInfos[i], &fbOut.Extensions[i], goType) + + // Register extension types. + if err := tb.TypeRegistry.RegisterExtension(&tb.ExtensionInfos[i]); err != nil { + panic(err) + } + } + + return out +} + +var goTypeForPBKind = map[protoreflect.Kind]reflect.Type{ + protoreflect.BoolKind: reflect.TypeOf(bool(false)), + protoreflect.Int32Kind: reflect.TypeOf(int32(0)), + protoreflect.Sint32Kind: reflect.TypeOf(int32(0)), + protoreflect.Sfixed32Kind: reflect.TypeOf(int32(0)), + protoreflect.Int64Kind: reflect.TypeOf(int64(0)), + protoreflect.Sint64Kind: reflect.TypeOf(int64(0)), + protoreflect.Sfixed64Kind: reflect.TypeOf(int64(0)), + protoreflect.Uint32Kind: reflect.TypeOf(uint32(0)), + protoreflect.Fixed32Kind: reflect.TypeOf(uint32(0)), + protoreflect.Uint64Kind: reflect.TypeOf(uint64(0)), + protoreflect.Fixed64Kind: reflect.TypeOf(uint64(0)), + protoreflect.FloatKind: reflect.TypeOf(float32(0)), + protoreflect.DoubleKind: reflect.TypeOf(float64(0)), + protoreflect.StringKind: reflect.TypeOf(string("")), + protoreflect.BytesKind: reflect.TypeOf([]byte(nil)), +} + +type depIdxs []int32 + +// Get retrieves the jth element of the ith sub-list. +func (x depIdxs) Get(i, j int32) int32 { + return x[x[int32(len(x))-i-1]+j] +} + +type ( + resolverByIndex struct { + goTypes []any + depIdxs depIdxs + fileRegistry + } + fileRegistry interface { + FindFileByPath(string) (protoreflect.FileDescriptor, error) + FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) + RegisterFile(protoreflect.FileDescriptor) error + } +) + +func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []filedesc.Enum, ms []filedesc.Message) protoreflect.EnumDescriptor { + if depIdx := int(r.depIdxs.Get(i, j)); int(depIdx) < len(es)+len(ms) { + return &es[depIdx] + } else { + return pimpl.Export{}.EnumDescriptorOf(r.goTypes[depIdx]) + } +} + +func (r *resolverByIndex) FindMessageByIndex(i, j int32, es []filedesc.Enum, ms []filedesc.Message) protoreflect.MessageDescriptor { + if depIdx := int(r.depIdxs.Get(i, j)); depIdx < len(es)+len(ms) { + return &ms[depIdx-len(es)] + } else { + return pimpl.Export{}.MessageDescriptorOf(r.goTypes[depIdx]) + } +} diff --git a/vendor/google.golang.org/protobuf/internal/flags/flags.go b/vendor/google.golang.org/protobuf/internal/flags/flags.go new file mode 100644 index 000000000..58372dd34 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/flags/flags.go @@ -0,0 +1,24 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package flags provides a set of flags controlled by build tags. +package flags + +// ProtoLegacy specifies whether to enable support for legacy functionality +// such as MessageSets, weak fields, and various other obscure behavior +// that is necessary to maintain backwards compatibility with proto1 or +// the pre-release variants of proto2 and proto3. +// +// This is disabled by default unless built with the "protolegacy" tag. +// +// WARNING: The compatibility agreement covers nothing provided by this flag. +// As such, functionality may suddenly be removed or changed at our discretion. +const ProtoLegacy = protoLegacy + +// LazyUnmarshalExtensions specifies whether to lazily unmarshal extensions. +// +// Lazy extension unmarshaling validates the contents of message-valued +// extension fields at unmarshal time, but defers creating the message +// structure until the extension is first accessed. +const LazyUnmarshalExtensions = ProtoLegacy diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go new file mode 100644 index 000000000..bda8e8cf3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go @@ -0,0 +1,10 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !protolegacy +// +build !protolegacy + +package flags + +const protoLegacy = false diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go new file mode 100644 index 000000000..6d8d9bd6b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go @@ -0,0 +1,10 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build protolegacy +// +build protolegacy + +package flags + +const protoLegacy = true diff --git a/vendor/google.golang.org/protobuf/internal/genid/any_gen.go b/vendor/google.golang.org/protobuf/internal/genid/any_gen.go new file mode 100644 index 000000000..e6f7d47ab --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/any_gen.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_any_proto = "google/protobuf/any.proto" + +// Names for google.protobuf.Any. +const ( + Any_message_name protoreflect.Name = "Any" + Any_message_fullname protoreflect.FullName = "google.protobuf.Any" +) + +// Field names for google.protobuf.Any. +const ( + Any_TypeUrl_field_name protoreflect.Name = "type_url" + Any_Value_field_name protoreflect.Name = "value" + + Any_TypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Any.type_url" + Any_Value_field_fullname protoreflect.FullName = "google.protobuf.Any.value" +) + +// Field numbers for google.protobuf.Any. +const ( + Any_TypeUrl_field_number protoreflect.FieldNumber = 1 + Any_Value_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go new file mode 100644 index 000000000..df8f91850 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go @@ -0,0 +1,106 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_api_proto = "google/protobuf/api.proto" + +// Names for google.protobuf.Api. +const ( + Api_message_name protoreflect.Name = "Api" + Api_message_fullname protoreflect.FullName = "google.protobuf.Api" +) + +// Field names for google.protobuf.Api. +const ( + Api_Name_field_name protoreflect.Name = "name" + Api_Methods_field_name protoreflect.Name = "methods" + Api_Options_field_name protoreflect.Name = "options" + Api_Version_field_name protoreflect.Name = "version" + Api_SourceContext_field_name protoreflect.Name = "source_context" + Api_Mixins_field_name protoreflect.Name = "mixins" + Api_Syntax_field_name protoreflect.Name = "syntax" + + Api_Name_field_fullname protoreflect.FullName = "google.protobuf.Api.name" + Api_Methods_field_fullname protoreflect.FullName = "google.protobuf.Api.methods" + Api_Options_field_fullname protoreflect.FullName = "google.protobuf.Api.options" + Api_Version_field_fullname protoreflect.FullName = "google.protobuf.Api.version" + Api_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Api.source_context" + Api_Mixins_field_fullname protoreflect.FullName = "google.protobuf.Api.mixins" + Api_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Api.syntax" +) + +// Field numbers for google.protobuf.Api. +const ( + Api_Name_field_number protoreflect.FieldNumber = 1 + Api_Methods_field_number protoreflect.FieldNumber = 2 + Api_Options_field_number protoreflect.FieldNumber = 3 + Api_Version_field_number protoreflect.FieldNumber = 4 + Api_SourceContext_field_number protoreflect.FieldNumber = 5 + Api_Mixins_field_number protoreflect.FieldNumber = 6 + Api_Syntax_field_number protoreflect.FieldNumber = 7 +) + +// Names for google.protobuf.Method. +const ( + Method_message_name protoreflect.Name = "Method" + Method_message_fullname protoreflect.FullName = "google.protobuf.Method" +) + +// Field names for google.protobuf.Method. +const ( + Method_Name_field_name protoreflect.Name = "name" + Method_RequestTypeUrl_field_name protoreflect.Name = "request_type_url" + Method_RequestStreaming_field_name protoreflect.Name = "request_streaming" + Method_ResponseTypeUrl_field_name protoreflect.Name = "response_type_url" + Method_ResponseStreaming_field_name protoreflect.Name = "response_streaming" + Method_Options_field_name protoreflect.Name = "options" + Method_Syntax_field_name protoreflect.Name = "syntax" + + Method_Name_field_fullname protoreflect.FullName = "google.protobuf.Method.name" + Method_RequestTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.request_type_url" + Method_RequestStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.request_streaming" + Method_ResponseTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.response_type_url" + Method_ResponseStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.response_streaming" + Method_Options_field_fullname protoreflect.FullName = "google.protobuf.Method.options" + Method_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Method.syntax" +) + +// Field numbers for google.protobuf.Method. +const ( + Method_Name_field_number protoreflect.FieldNumber = 1 + Method_RequestTypeUrl_field_number protoreflect.FieldNumber = 2 + Method_RequestStreaming_field_number protoreflect.FieldNumber = 3 + Method_ResponseTypeUrl_field_number protoreflect.FieldNumber = 4 + Method_ResponseStreaming_field_number protoreflect.FieldNumber = 5 + Method_Options_field_number protoreflect.FieldNumber = 6 + Method_Syntax_field_number protoreflect.FieldNumber = 7 +) + +// Names for google.protobuf.Mixin. +const ( + Mixin_message_name protoreflect.Name = "Mixin" + Mixin_message_fullname protoreflect.FullName = "google.protobuf.Mixin" +) + +// Field names for google.protobuf.Mixin. +const ( + Mixin_Name_field_name protoreflect.Name = "name" + Mixin_Root_field_name protoreflect.Name = "root" + + Mixin_Name_field_fullname protoreflect.FullName = "google.protobuf.Mixin.name" + Mixin_Root_field_fullname protoreflect.FullName = "google.protobuf.Mixin.root" +) + +// Field numbers for google.protobuf.Mixin. +const ( + Mixin_Name_field_number protoreflect.FieldNumber = 1 + Mixin_Root_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go new file mode 100644 index 000000000..f30ab6b58 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -0,0 +1,1270 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_descriptor_proto = "google/protobuf/descriptor.proto" + +// Full and short names for google.protobuf.Edition. +const ( + Edition_enum_fullname = "google.protobuf.Edition" + Edition_enum_name = "Edition" +) + +// Enum values for google.protobuf.Edition. +const ( + Edition_EDITION_UNKNOWN_enum_value = 0 + Edition_EDITION_LEGACY_enum_value = 900 + Edition_EDITION_PROTO2_enum_value = 998 + Edition_EDITION_PROTO3_enum_value = 999 + Edition_EDITION_2023_enum_value = 1000 + Edition_EDITION_2024_enum_value = 1001 + Edition_EDITION_1_TEST_ONLY_enum_value = 1 + Edition_EDITION_2_TEST_ONLY_enum_value = 2 + Edition_EDITION_99997_TEST_ONLY_enum_value = 99997 + Edition_EDITION_99998_TEST_ONLY_enum_value = 99998 + Edition_EDITION_99999_TEST_ONLY_enum_value = 99999 + Edition_EDITION_MAX_enum_value = 2147483647 +) + +// Names for google.protobuf.FileDescriptorSet. +const ( + FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet" + FileDescriptorSet_message_fullname protoreflect.FullName = "google.protobuf.FileDescriptorSet" +) + +// Field names for google.protobuf.FileDescriptorSet. +const ( + FileDescriptorSet_File_field_name protoreflect.Name = "file" + + FileDescriptorSet_File_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorSet.file" +) + +// Field numbers for google.protobuf.FileDescriptorSet. +const ( + FileDescriptorSet_File_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.FileDescriptorProto. +const ( + FileDescriptorProto_message_name protoreflect.Name = "FileDescriptorProto" + FileDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto" +) + +// Field names for google.protobuf.FileDescriptorProto. +const ( + FileDescriptorProto_Name_field_name protoreflect.Name = "name" + FileDescriptorProto_Package_field_name protoreflect.Name = "package" + FileDescriptorProto_Dependency_field_name protoreflect.Name = "dependency" + FileDescriptorProto_PublicDependency_field_name protoreflect.Name = "public_dependency" + FileDescriptorProto_WeakDependency_field_name protoreflect.Name = "weak_dependency" + FileDescriptorProto_MessageType_field_name protoreflect.Name = "message_type" + FileDescriptorProto_EnumType_field_name protoreflect.Name = "enum_type" + FileDescriptorProto_Service_field_name protoreflect.Name = "service" + FileDescriptorProto_Extension_field_name protoreflect.Name = "extension" + FileDescriptorProto_Options_field_name protoreflect.Name = "options" + FileDescriptorProto_SourceCodeInfo_field_name protoreflect.Name = "source_code_info" + FileDescriptorProto_Syntax_field_name protoreflect.Name = "syntax" + FileDescriptorProto_Edition_field_name protoreflect.Name = "edition" + + FileDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.name" + FileDescriptorProto_Package_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.package" + FileDescriptorProto_Dependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.dependency" + FileDescriptorProto_PublicDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.public_dependency" + FileDescriptorProto_WeakDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.weak_dependency" + FileDescriptorProto_MessageType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.message_type" + FileDescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.enum_type" + FileDescriptorProto_Service_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.service" + FileDescriptorProto_Extension_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.extension" + FileDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.options" + FileDescriptorProto_SourceCodeInfo_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.source_code_info" + FileDescriptorProto_Syntax_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.syntax" + FileDescriptorProto_Edition_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.edition" +) + +// Field numbers for google.protobuf.FileDescriptorProto. +const ( + FileDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + FileDescriptorProto_Package_field_number protoreflect.FieldNumber = 2 + FileDescriptorProto_Dependency_field_number protoreflect.FieldNumber = 3 + FileDescriptorProto_PublicDependency_field_number protoreflect.FieldNumber = 10 + FileDescriptorProto_WeakDependency_field_number protoreflect.FieldNumber = 11 + FileDescriptorProto_MessageType_field_number protoreflect.FieldNumber = 4 + FileDescriptorProto_EnumType_field_number protoreflect.FieldNumber = 5 + FileDescriptorProto_Service_field_number protoreflect.FieldNumber = 6 + FileDescriptorProto_Extension_field_number protoreflect.FieldNumber = 7 + FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 + FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9 + FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12 + FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 14 +) + +// Names for google.protobuf.DescriptorProto. +const ( + DescriptorProto_message_name protoreflect.Name = "DescriptorProto" + DescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto" +) + +// Field names for google.protobuf.DescriptorProto. +const ( + DescriptorProto_Name_field_name protoreflect.Name = "name" + DescriptorProto_Field_field_name protoreflect.Name = "field" + DescriptorProto_Extension_field_name protoreflect.Name = "extension" + DescriptorProto_NestedType_field_name protoreflect.Name = "nested_type" + DescriptorProto_EnumType_field_name protoreflect.Name = "enum_type" + DescriptorProto_ExtensionRange_field_name protoreflect.Name = "extension_range" + DescriptorProto_OneofDecl_field_name protoreflect.Name = "oneof_decl" + DescriptorProto_Options_field_name protoreflect.Name = "options" + DescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" + DescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" + + DescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.name" + DescriptorProto_Field_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.field" + DescriptorProto_Extension_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.extension" + DescriptorProto_NestedType_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.nested_type" + DescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.enum_type" + DescriptorProto_ExtensionRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.extension_range" + DescriptorProto_OneofDecl_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.oneof_decl" + DescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.options" + DescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_range" + DescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_name" +) + +// Field numbers for google.protobuf.DescriptorProto. +const ( + DescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + DescriptorProto_Field_field_number protoreflect.FieldNumber = 2 + DescriptorProto_Extension_field_number protoreflect.FieldNumber = 6 + DescriptorProto_NestedType_field_number protoreflect.FieldNumber = 3 + DescriptorProto_EnumType_field_number protoreflect.FieldNumber = 4 + DescriptorProto_ExtensionRange_field_number protoreflect.FieldNumber = 5 + DescriptorProto_OneofDecl_field_number protoreflect.FieldNumber = 8 + DescriptorProto_Options_field_number protoreflect.FieldNumber = 7 + DescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 9 + DescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 10 +) + +// Names for google.protobuf.DescriptorProto.ExtensionRange. +const ( + DescriptorProto_ExtensionRange_message_name protoreflect.Name = "ExtensionRange" + DescriptorProto_ExtensionRange_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange" +) + +// Field names for google.protobuf.DescriptorProto.ExtensionRange. +const ( + DescriptorProto_ExtensionRange_Start_field_name protoreflect.Name = "start" + DescriptorProto_ExtensionRange_End_field_name protoreflect.Name = "end" + DescriptorProto_ExtensionRange_Options_field_name protoreflect.Name = "options" + + DescriptorProto_ExtensionRange_Start_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.start" + DescriptorProto_ExtensionRange_End_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.end" + DescriptorProto_ExtensionRange_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.options" +) + +// Field numbers for google.protobuf.DescriptorProto.ExtensionRange. +const ( + DescriptorProto_ExtensionRange_Start_field_number protoreflect.FieldNumber = 1 + DescriptorProto_ExtensionRange_End_field_number protoreflect.FieldNumber = 2 + DescriptorProto_ExtensionRange_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.DescriptorProto.ReservedRange. +const ( + DescriptorProto_ReservedRange_message_name protoreflect.Name = "ReservedRange" + DescriptorProto_ReservedRange_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange" +) + +// Field names for google.protobuf.DescriptorProto.ReservedRange. +const ( + DescriptorProto_ReservedRange_Start_field_name protoreflect.Name = "start" + DescriptorProto_ReservedRange_End_field_name protoreflect.Name = "end" + + DescriptorProto_ReservedRange_Start_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange.start" + DescriptorProto_ReservedRange_End_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange.end" +) + +// Field numbers for google.protobuf.DescriptorProto.ReservedRange. +const ( + DescriptorProto_ReservedRange_Start_field_number protoreflect.FieldNumber = 1 + DescriptorProto_ReservedRange_End_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.ExtensionRangeOptions. +const ( + ExtensionRangeOptions_message_name protoreflect.Name = "ExtensionRangeOptions" + ExtensionRangeOptions_message_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions" +) + +// Field names for google.protobuf.ExtensionRangeOptions. +const ( + ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + ExtensionRangeOptions_Declaration_field_name protoreflect.Name = "declaration" + ExtensionRangeOptions_Features_field_name protoreflect.Name = "features" + ExtensionRangeOptions_Verification_field_name protoreflect.Name = "verification" + + ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option" + ExtensionRangeOptions_Declaration_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.declaration" + ExtensionRangeOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.features" + ExtensionRangeOptions_Verification_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.verification" +) + +// Field numbers for google.protobuf.ExtensionRangeOptions. +const ( + ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 + ExtensionRangeOptions_Declaration_field_number protoreflect.FieldNumber = 2 + ExtensionRangeOptions_Features_field_number protoreflect.FieldNumber = 50 + ExtensionRangeOptions_Verification_field_number protoreflect.FieldNumber = 3 +) + +// Full and short names for google.protobuf.ExtensionRangeOptions.VerificationState. +const ( + ExtensionRangeOptions_VerificationState_enum_fullname = "google.protobuf.ExtensionRangeOptions.VerificationState" + ExtensionRangeOptions_VerificationState_enum_name = "VerificationState" +) + +// Enum values for google.protobuf.ExtensionRangeOptions.VerificationState. +const ( + ExtensionRangeOptions_DECLARATION_enum_value = 0 + ExtensionRangeOptions_UNVERIFIED_enum_value = 1 +) + +// Names for google.protobuf.ExtensionRangeOptions.Declaration. +const ( + ExtensionRangeOptions_Declaration_message_name protoreflect.Name = "Declaration" + ExtensionRangeOptions_Declaration_message_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration" +) + +// Field names for google.protobuf.ExtensionRangeOptions.Declaration. +const ( + ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number" + ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name" + ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type" + ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved" + ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated" + + ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number" + ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name" + ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type" + ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved" + ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated" +) + +// Field numbers for google.protobuf.ExtensionRangeOptions.Declaration. +const ( + ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1 + ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2 + ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3 + ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5 + ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6 +) + +// Names for google.protobuf.FieldDescriptorProto. +const ( + FieldDescriptorProto_message_name protoreflect.Name = "FieldDescriptorProto" + FieldDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto" +) + +// Field names for google.protobuf.FieldDescriptorProto. +const ( + FieldDescriptorProto_Name_field_name protoreflect.Name = "name" + FieldDescriptorProto_Number_field_name protoreflect.Name = "number" + FieldDescriptorProto_Label_field_name protoreflect.Name = "label" + FieldDescriptorProto_Type_field_name protoreflect.Name = "type" + FieldDescriptorProto_TypeName_field_name protoreflect.Name = "type_name" + FieldDescriptorProto_Extendee_field_name protoreflect.Name = "extendee" + FieldDescriptorProto_DefaultValue_field_name protoreflect.Name = "default_value" + FieldDescriptorProto_OneofIndex_field_name protoreflect.Name = "oneof_index" + FieldDescriptorProto_JsonName_field_name protoreflect.Name = "json_name" + FieldDescriptorProto_Options_field_name protoreflect.Name = "options" + FieldDescriptorProto_Proto3Optional_field_name protoreflect.Name = "proto3_optional" + + FieldDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.name" + FieldDescriptorProto_Number_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.number" + FieldDescriptorProto_Label_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.label" + FieldDescriptorProto_Type_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.type" + FieldDescriptorProto_TypeName_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.type_name" + FieldDescriptorProto_Extendee_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.extendee" + FieldDescriptorProto_DefaultValue_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.default_value" + FieldDescriptorProto_OneofIndex_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.oneof_index" + FieldDescriptorProto_JsonName_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.json_name" + FieldDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.options" + FieldDescriptorProto_Proto3Optional_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.proto3_optional" +) + +// Field numbers for google.protobuf.FieldDescriptorProto. +const ( + FieldDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + FieldDescriptorProto_Number_field_number protoreflect.FieldNumber = 3 + FieldDescriptorProto_Label_field_number protoreflect.FieldNumber = 4 + FieldDescriptorProto_Type_field_number protoreflect.FieldNumber = 5 + FieldDescriptorProto_TypeName_field_number protoreflect.FieldNumber = 6 + FieldDescriptorProto_Extendee_field_number protoreflect.FieldNumber = 2 + FieldDescriptorProto_DefaultValue_field_number protoreflect.FieldNumber = 7 + FieldDescriptorProto_OneofIndex_field_number protoreflect.FieldNumber = 9 + FieldDescriptorProto_JsonName_field_number protoreflect.FieldNumber = 10 + FieldDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 + FieldDescriptorProto_Proto3Optional_field_number protoreflect.FieldNumber = 17 +) + +// Full and short names for google.protobuf.FieldDescriptorProto.Type. +const ( + FieldDescriptorProto_Type_enum_fullname = "google.protobuf.FieldDescriptorProto.Type" + FieldDescriptorProto_Type_enum_name = "Type" +) + +// Enum values for google.protobuf.FieldDescriptorProto.Type. +const ( + FieldDescriptorProto_TYPE_DOUBLE_enum_value = 1 + FieldDescriptorProto_TYPE_FLOAT_enum_value = 2 + FieldDescriptorProto_TYPE_INT64_enum_value = 3 + FieldDescriptorProto_TYPE_UINT64_enum_value = 4 + FieldDescriptorProto_TYPE_INT32_enum_value = 5 + FieldDescriptorProto_TYPE_FIXED64_enum_value = 6 + FieldDescriptorProto_TYPE_FIXED32_enum_value = 7 + FieldDescriptorProto_TYPE_BOOL_enum_value = 8 + FieldDescriptorProto_TYPE_STRING_enum_value = 9 + FieldDescriptorProto_TYPE_GROUP_enum_value = 10 + FieldDescriptorProto_TYPE_MESSAGE_enum_value = 11 + FieldDescriptorProto_TYPE_BYTES_enum_value = 12 + FieldDescriptorProto_TYPE_UINT32_enum_value = 13 + FieldDescriptorProto_TYPE_ENUM_enum_value = 14 + FieldDescriptorProto_TYPE_SFIXED32_enum_value = 15 + FieldDescriptorProto_TYPE_SFIXED64_enum_value = 16 + FieldDescriptorProto_TYPE_SINT32_enum_value = 17 + FieldDescriptorProto_TYPE_SINT64_enum_value = 18 +) + +// Full and short names for google.protobuf.FieldDescriptorProto.Label. +const ( + FieldDescriptorProto_Label_enum_fullname = "google.protobuf.FieldDescriptorProto.Label" + FieldDescriptorProto_Label_enum_name = "Label" +) + +// Enum values for google.protobuf.FieldDescriptorProto.Label. +const ( + FieldDescriptorProto_LABEL_OPTIONAL_enum_value = 1 + FieldDescriptorProto_LABEL_REPEATED_enum_value = 3 + FieldDescriptorProto_LABEL_REQUIRED_enum_value = 2 +) + +// Names for google.protobuf.OneofDescriptorProto. +const ( + OneofDescriptorProto_message_name protoreflect.Name = "OneofDescriptorProto" + OneofDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto" +) + +// Field names for google.protobuf.OneofDescriptorProto. +const ( + OneofDescriptorProto_Name_field_name protoreflect.Name = "name" + OneofDescriptorProto_Options_field_name protoreflect.Name = "options" + + OneofDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto.name" + OneofDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto.options" +) + +// Field numbers for google.protobuf.OneofDescriptorProto. +const ( + OneofDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + OneofDescriptorProto_Options_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.EnumDescriptorProto. +const ( + EnumDescriptorProto_message_name protoreflect.Name = "EnumDescriptorProto" + EnumDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto" +) + +// Field names for google.protobuf.EnumDescriptorProto. +const ( + EnumDescriptorProto_Name_field_name protoreflect.Name = "name" + EnumDescriptorProto_Value_field_name protoreflect.Name = "value" + EnumDescriptorProto_Options_field_name protoreflect.Name = "options" + EnumDescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" + EnumDescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" + + EnumDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.name" + EnumDescriptorProto_Value_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.value" + EnumDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.options" + EnumDescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_range" + EnumDescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_name" +) + +// Field numbers for google.protobuf.EnumDescriptorProto. +const ( + EnumDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + EnumDescriptorProto_Value_field_number protoreflect.FieldNumber = 2 + EnumDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 + EnumDescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 4 + EnumDescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 5 +) + +// Names for google.protobuf.EnumDescriptorProto.EnumReservedRange. +const ( + EnumDescriptorProto_EnumReservedRange_message_name protoreflect.Name = "EnumReservedRange" + EnumDescriptorProto_EnumReservedRange_message_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange" +) + +// Field names for google.protobuf.EnumDescriptorProto.EnumReservedRange. +const ( + EnumDescriptorProto_EnumReservedRange_Start_field_name protoreflect.Name = "start" + EnumDescriptorProto_EnumReservedRange_End_field_name protoreflect.Name = "end" + + EnumDescriptorProto_EnumReservedRange_Start_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange.start" + EnumDescriptorProto_EnumReservedRange_End_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange.end" +) + +// Field numbers for google.protobuf.EnumDescriptorProto.EnumReservedRange. +const ( + EnumDescriptorProto_EnumReservedRange_Start_field_number protoreflect.FieldNumber = 1 + EnumDescriptorProto_EnumReservedRange_End_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.EnumValueDescriptorProto. +const ( + EnumValueDescriptorProto_message_name protoreflect.Name = "EnumValueDescriptorProto" + EnumValueDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto" +) + +// Field names for google.protobuf.EnumValueDescriptorProto. +const ( + EnumValueDescriptorProto_Name_field_name protoreflect.Name = "name" + EnumValueDescriptorProto_Number_field_name protoreflect.Name = "number" + EnumValueDescriptorProto_Options_field_name protoreflect.Name = "options" + + EnumValueDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.name" + EnumValueDescriptorProto_Number_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.number" + EnumValueDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.options" +) + +// Field numbers for google.protobuf.EnumValueDescriptorProto. +const ( + EnumValueDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + EnumValueDescriptorProto_Number_field_number protoreflect.FieldNumber = 2 + EnumValueDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.ServiceDescriptorProto. +const ( + ServiceDescriptorProto_message_name protoreflect.Name = "ServiceDescriptorProto" + ServiceDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto" +) + +// Field names for google.protobuf.ServiceDescriptorProto. +const ( + ServiceDescriptorProto_Name_field_name protoreflect.Name = "name" + ServiceDescriptorProto_Method_field_name protoreflect.Name = "method" + ServiceDescriptorProto_Options_field_name protoreflect.Name = "options" + + ServiceDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.name" + ServiceDescriptorProto_Method_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.method" + ServiceDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.options" +) + +// Field numbers for google.protobuf.ServiceDescriptorProto. +const ( + ServiceDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + ServiceDescriptorProto_Method_field_number protoreflect.FieldNumber = 2 + ServiceDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.MethodDescriptorProto. +const ( + MethodDescriptorProto_message_name protoreflect.Name = "MethodDescriptorProto" + MethodDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto" +) + +// Field names for google.protobuf.MethodDescriptorProto. +const ( + MethodDescriptorProto_Name_field_name protoreflect.Name = "name" + MethodDescriptorProto_InputType_field_name protoreflect.Name = "input_type" + MethodDescriptorProto_OutputType_field_name protoreflect.Name = "output_type" + MethodDescriptorProto_Options_field_name protoreflect.Name = "options" + MethodDescriptorProto_ClientStreaming_field_name protoreflect.Name = "client_streaming" + MethodDescriptorProto_ServerStreaming_field_name protoreflect.Name = "server_streaming" + + MethodDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.name" + MethodDescriptorProto_InputType_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.input_type" + MethodDescriptorProto_OutputType_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.output_type" + MethodDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.options" + MethodDescriptorProto_ClientStreaming_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.client_streaming" + MethodDescriptorProto_ServerStreaming_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.server_streaming" +) + +// Field numbers for google.protobuf.MethodDescriptorProto. +const ( + MethodDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + MethodDescriptorProto_InputType_field_number protoreflect.FieldNumber = 2 + MethodDescriptorProto_OutputType_field_number protoreflect.FieldNumber = 3 + MethodDescriptorProto_Options_field_number protoreflect.FieldNumber = 4 + MethodDescriptorProto_ClientStreaming_field_number protoreflect.FieldNumber = 5 + MethodDescriptorProto_ServerStreaming_field_number protoreflect.FieldNumber = 6 +) + +// Names for google.protobuf.FileOptions. +const ( + FileOptions_message_name protoreflect.Name = "FileOptions" + FileOptions_message_fullname protoreflect.FullName = "google.protobuf.FileOptions" +) + +// Field names for google.protobuf.FileOptions. +const ( + FileOptions_JavaPackage_field_name protoreflect.Name = "java_package" + FileOptions_JavaOuterClassname_field_name protoreflect.Name = "java_outer_classname" + FileOptions_JavaMultipleFiles_field_name protoreflect.Name = "java_multiple_files" + FileOptions_JavaGenerateEqualsAndHash_field_name protoreflect.Name = "java_generate_equals_and_hash" + FileOptions_JavaStringCheckUtf8_field_name protoreflect.Name = "java_string_check_utf8" + FileOptions_OptimizeFor_field_name protoreflect.Name = "optimize_for" + FileOptions_GoPackage_field_name protoreflect.Name = "go_package" + FileOptions_CcGenericServices_field_name protoreflect.Name = "cc_generic_services" + FileOptions_JavaGenericServices_field_name protoreflect.Name = "java_generic_services" + FileOptions_PyGenericServices_field_name protoreflect.Name = "py_generic_services" + FileOptions_Deprecated_field_name protoreflect.Name = "deprecated" + FileOptions_CcEnableArenas_field_name protoreflect.Name = "cc_enable_arenas" + FileOptions_ObjcClassPrefix_field_name protoreflect.Name = "objc_class_prefix" + FileOptions_CsharpNamespace_field_name protoreflect.Name = "csharp_namespace" + FileOptions_SwiftPrefix_field_name protoreflect.Name = "swift_prefix" + FileOptions_PhpClassPrefix_field_name protoreflect.Name = "php_class_prefix" + FileOptions_PhpNamespace_field_name protoreflect.Name = "php_namespace" + FileOptions_PhpMetadataNamespace_field_name protoreflect.Name = "php_metadata_namespace" + FileOptions_RubyPackage_field_name protoreflect.Name = "ruby_package" + FileOptions_Features_field_name protoreflect.Name = "features" + FileOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + FileOptions_JavaPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_package" + FileOptions_JavaOuterClassname_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_outer_classname" + FileOptions_JavaMultipleFiles_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_multiple_files" + FileOptions_JavaGenerateEqualsAndHash_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generate_equals_and_hash" + FileOptions_JavaStringCheckUtf8_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_string_check_utf8" + FileOptions_OptimizeFor_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.optimize_for" + FileOptions_GoPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.go_package" + FileOptions_CcGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_generic_services" + FileOptions_JavaGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generic_services" + FileOptions_PyGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.py_generic_services" + FileOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.deprecated" + FileOptions_CcEnableArenas_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_enable_arenas" + FileOptions_ObjcClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.objc_class_prefix" + FileOptions_CsharpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.csharp_namespace" + FileOptions_SwiftPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.swift_prefix" + FileOptions_PhpClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_class_prefix" + FileOptions_PhpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_namespace" + FileOptions_PhpMetadataNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_metadata_namespace" + FileOptions_RubyPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.ruby_package" + FileOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.features" + FileOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.FileOptions. +const ( + FileOptions_JavaPackage_field_number protoreflect.FieldNumber = 1 + FileOptions_JavaOuterClassname_field_number protoreflect.FieldNumber = 8 + FileOptions_JavaMultipleFiles_field_number protoreflect.FieldNumber = 10 + FileOptions_JavaGenerateEqualsAndHash_field_number protoreflect.FieldNumber = 20 + FileOptions_JavaStringCheckUtf8_field_number protoreflect.FieldNumber = 27 + FileOptions_OptimizeFor_field_number protoreflect.FieldNumber = 9 + FileOptions_GoPackage_field_number protoreflect.FieldNumber = 11 + FileOptions_CcGenericServices_field_number protoreflect.FieldNumber = 16 + FileOptions_JavaGenericServices_field_number protoreflect.FieldNumber = 17 + FileOptions_PyGenericServices_field_number protoreflect.FieldNumber = 18 + FileOptions_Deprecated_field_number protoreflect.FieldNumber = 23 + FileOptions_CcEnableArenas_field_number protoreflect.FieldNumber = 31 + FileOptions_ObjcClassPrefix_field_number protoreflect.FieldNumber = 36 + FileOptions_CsharpNamespace_field_number protoreflect.FieldNumber = 37 + FileOptions_SwiftPrefix_field_number protoreflect.FieldNumber = 39 + FileOptions_PhpClassPrefix_field_number protoreflect.FieldNumber = 40 + FileOptions_PhpNamespace_field_number protoreflect.FieldNumber = 41 + FileOptions_PhpMetadataNamespace_field_number protoreflect.FieldNumber = 44 + FileOptions_RubyPackage_field_number protoreflect.FieldNumber = 45 + FileOptions_Features_field_number protoreflect.FieldNumber = 50 + FileOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Full and short names for google.protobuf.FileOptions.OptimizeMode. +const ( + FileOptions_OptimizeMode_enum_fullname = "google.protobuf.FileOptions.OptimizeMode" + FileOptions_OptimizeMode_enum_name = "OptimizeMode" +) + +// Enum values for google.protobuf.FileOptions.OptimizeMode. +const ( + FileOptions_SPEED_enum_value = 1 + FileOptions_CODE_SIZE_enum_value = 2 + FileOptions_LITE_RUNTIME_enum_value = 3 +) + +// Names for google.protobuf.MessageOptions. +const ( + MessageOptions_message_name protoreflect.Name = "MessageOptions" + MessageOptions_message_fullname protoreflect.FullName = "google.protobuf.MessageOptions" +) + +// Field names for google.protobuf.MessageOptions. +const ( + MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format" + MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor" + MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" + MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" + MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" + MessageOptions_Features_field_name protoreflect.Name = "features" + MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" + MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor" + MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" + MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" + MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated_legacy_json_field_conflicts" + MessageOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.features" + MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.MessageOptions. +const ( + MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1 + MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2 + MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 + MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 11 + MessageOptions_Features_field_number protoreflect.FieldNumber = 12 + MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.FieldOptions. +const ( + FieldOptions_message_name protoreflect.Name = "FieldOptions" + FieldOptions_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions" +) + +// Field names for google.protobuf.FieldOptions. +const ( + FieldOptions_Ctype_field_name protoreflect.Name = "ctype" + FieldOptions_Packed_field_name protoreflect.Name = "packed" + FieldOptions_Jstype_field_name protoreflect.Name = "jstype" + FieldOptions_Lazy_field_name protoreflect.Name = "lazy" + FieldOptions_UnverifiedLazy_field_name protoreflect.Name = "unverified_lazy" + FieldOptions_Deprecated_field_name protoreflect.Name = "deprecated" + FieldOptions_Weak_field_name protoreflect.Name = "weak" + FieldOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" + FieldOptions_Retention_field_name protoreflect.Name = "retention" + FieldOptions_Targets_field_name protoreflect.Name = "targets" + FieldOptions_EditionDefaults_field_name protoreflect.Name = "edition_defaults" + FieldOptions_Features_field_name protoreflect.Name = "features" + FieldOptions_FeatureSupport_field_name protoreflect.Name = "feature_support" + FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" + FieldOptions_Packed_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.packed" + FieldOptions_Jstype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.jstype" + FieldOptions_Lazy_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.lazy" + FieldOptions_UnverifiedLazy_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.unverified_lazy" + FieldOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.deprecated" + FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak" + FieldOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.debug_redact" + FieldOptions_Retention_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.retention" + FieldOptions_Targets_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.targets" + FieldOptions_EditionDefaults_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.edition_defaults" + FieldOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.features" + FieldOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.feature_support" + FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.FieldOptions. +const ( + FieldOptions_Ctype_field_number protoreflect.FieldNumber = 1 + FieldOptions_Packed_field_number protoreflect.FieldNumber = 2 + FieldOptions_Jstype_field_number protoreflect.FieldNumber = 6 + FieldOptions_Lazy_field_number protoreflect.FieldNumber = 5 + FieldOptions_UnverifiedLazy_field_number protoreflect.FieldNumber = 15 + FieldOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + FieldOptions_Weak_field_number protoreflect.FieldNumber = 10 + FieldOptions_DebugRedact_field_number protoreflect.FieldNumber = 16 + FieldOptions_Retention_field_number protoreflect.FieldNumber = 17 + FieldOptions_Targets_field_number protoreflect.FieldNumber = 19 + FieldOptions_EditionDefaults_field_number protoreflect.FieldNumber = 20 + FieldOptions_Features_field_number protoreflect.FieldNumber = 21 + FieldOptions_FeatureSupport_field_number protoreflect.FieldNumber = 22 + FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Full and short names for google.protobuf.FieldOptions.CType. +const ( + FieldOptions_CType_enum_fullname = "google.protobuf.FieldOptions.CType" + FieldOptions_CType_enum_name = "CType" +) + +// Enum values for google.protobuf.FieldOptions.CType. +const ( + FieldOptions_STRING_enum_value = 0 + FieldOptions_CORD_enum_value = 1 + FieldOptions_STRING_PIECE_enum_value = 2 +) + +// Full and short names for google.protobuf.FieldOptions.JSType. +const ( + FieldOptions_JSType_enum_fullname = "google.protobuf.FieldOptions.JSType" + FieldOptions_JSType_enum_name = "JSType" +) + +// Enum values for google.protobuf.FieldOptions.JSType. +const ( + FieldOptions_JS_NORMAL_enum_value = 0 + FieldOptions_JS_STRING_enum_value = 1 + FieldOptions_JS_NUMBER_enum_value = 2 +) + +// Full and short names for google.protobuf.FieldOptions.OptionRetention. +const ( + FieldOptions_OptionRetention_enum_fullname = "google.protobuf.FieldOptions.OptionRetention" + FieldOptions_OptionRetention_enum_name = "OptionRetention" +) + +// Enum values for google.protobuf.FieldOptions.OptionRetention. +const ( + FieldOptions_RETENTION_UNKNOWN_enum_value = 0 + FieldOptions_RETENTION_RUNTIME_enum_value = 1 + FieldOptions_RETENTION_SOURCE_enum_value = 2 +) + +// Full and short names for google.protobuf.FieldOptions.OptionTargetType. +const ( + FieldOptions_OptionTargetType_enum_fullname = "google.protobuf.FieldOptions.OptionTargetType" + FieldOptions_OptionTargetType_enum_name = "OptionTargetType" +) + +// Enum values for google.protobuf.FieldOptions.OptionTargetType. +const ( + FieldOptions_TARGET_TYPE_UNKNOWN_enum_value = 0 + FieldOptions_TARGET_TYPE_FILE_enum_value = 1 + FieldOptions_TARGET_TYPE_EXTENSION_RANGE_enum_value = 2 + FieldOptions_TARGET_TYPE_MESSAGE_enum_value = 3 + FieldOptions_TARGET_TYPE_FIELD_enum_value = 4 + FieldOptions_TARGET_TYPE_ONEOF_enum_value = 5 + FieldOptions_TARGET_TYPE_ENUM_enum_value = 6 + FieldOptions_TARGET_TYPE_ENUM_ENTRY_enum_value = 7 + FieldOptions_TARGET_TYPE_SERVICE_enum_value = 8 + FieldOptions_TARGET_TYPE_METHOD_enum_value = 9 +) + +// Names for google.protobuf.FieldOptions.EditionDefault. +const ( + FieldOptions_EditionDefault_message_name protoreflect.Name = "EditionDefault" + FieldOptions_EditionDefault_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault" +) + +// Field names for google.protobuf.FieldOptions.EditionDefault. +const ( + FieldOptions_EditionDefault_Edition_field_name protoreflect.Name = "edition" + FieldOptions_EditionDefault_Value_field_name protoreflect.Name = "value" + + FieldOptions_EditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault.edition" + FieldOptions_EditionDefault_Value_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault.value" +) + +// Field numbers for google.protobuf.FieldOptions.EditionDefault. +const ( + FieldOptions_EditionDefault_Edition_field_number protoreflect.FieldNumber = 3 + FieldOptions_EditionDefault_Value_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.FieldOptions.FeatureSupport. +const ( + FieldOptions_FeatureSupport_message_name protoreflect.Name = "FeatureSupport" + FieldOptions_FeatureSupport_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport" +) + +// Field names for google.protobuf.FieldOptions.FeatureSupport. +const ( + FieldOptions_FeatureSupport_EditionIntroduced_field_name protoreflect.Name = "edition_introduced" + FieldOptions_FeatureSupport_EditionDeprecated_field_name protoreflect.Name = "edition_deprecated" + FieldOptions_FeatureSupport_DeprecationWarning_field_name protoreflect.Name = "deprecation_warning" + FieldOptions_FeatureSupport_EditionRemoved_field_name protoreflect.Name = "edition_removed" + + FieldOptions_FeatureSupport_EditionIntroduced_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_introduced" + FieldOptions_FeatureSupport_EditionDeprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_deprecated" + FieldOptions_FeatureSupport_DeprecationWarning_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.deprecation_warning" + FieldOptions_FeatureSupport_EditionRemoved_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_removed" +) + +// Field numbers for google.protobuf.FieldOptions.FeatureSupport. +const ( + FieldOptions_FeatureSupport_EditionIntroduced_field_number protoreflect.FieldNumber = 1 + FieldOptions_FeatureSupport_EditionDeprecated_field_number protoreflect.FieldNumber = 2 + FieldOptions_FeatureSupport_DeprecationWarning_field_number protoreflect.FieldNumber = 3 + FieldOptions_FeatureSupport_EditionRemoved_field_number protoreflect.FieldNumber = 4 +) + +// Names for google.protobuf.OneofOptions. +const ( + OneofOptions_message_name protoreflect.Name = "OneofOptions" + OneofOptions_message_fullname protoreflect.FullName = "google.protobuf.OneofOptions" +) + +// Field names for google.protobuf.OneofOptions. +const ( + OneofOptions_Features_field_name protoreflect.Name = "features" + OneofOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + OneofOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.features" + OneofOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.OneofOptions. +const ( + OneofOptions_Features_field_number protoreflect.FieldNumber = 1 + OneofOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.EnumOptions. +const ( + EnumOptions_message_name protoreflect.Name = "EnumOptions" + EnumOptions_message_fullname protoreflect.FullName = "google.protobuf.EnumOptions" +) + +// Field names for google.protobuf.EnumOptions. +const ( + EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" + EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" + EnumOptions_Features_field_name protoreflect.Name = "features" + EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" + EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" + EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated_legacy_json_field_conflicts" + EnumOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.features" + EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.EnumOptions. +const ( + EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 + EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 6 + EnumOptions_Features_field_number protoreflect.FieldNumber = 7 + EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.EnumValueOptions. +const ( + EnumValueOptions_message_name protoreflect.Name = "EnumValueOptions" + EnumValueOptions_message_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions" +) + +// Field names for google.protobuf.EnumValueOptions. +const ( + EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumValueOptions_Features_field_name protoreflect.Name = "features" + EnumValueOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" + EnumValueOptions_FeatureSupport_field_name protoreflect.Name = "feature_support" + EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated" + EnumValueOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.features" + EnumValueOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.debug_redact" + EnumValueOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.feature_support" + EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.EnumValueOptions. +const ( + EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1 + EnumValueOptions_Features_field_number protoreflect.FieldNumber = 2 + EnumValueOptions_DebugRedact_field_number protoreflect.FieldNumber = 3 + EnumValueOptions_FeatureSupport_field_number protoreflect.FieldNumber = 4 + EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.ServiceOptions. +const ( + ServiceOptions_message_name protoreflect.Name = "ServiceOptions" + ServiceOptions_message_fullname protoreflect.FullName = "google.protobuf.ServiceOptions" +) + +// Field names for google.protobuf.ServiceOptions. +const ( + ServiceOptions_Features_field_name protoreflect.Name = "features" + ServiceOptions_Deprecated_field_name protoreflect.Name = "deprecated" + ServiceOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + ServiceOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.features" + ServiceOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.deprecated" + ServiceOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.ServiceOptions. +const ( + ServiceOptions_Features_field_number protoreflect.FieldNumber = 34 + ServiceOptions_Deprecated_field_number protoreflect.FieldNumber = 33 + ServiceOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.MethodOptions. +const ( + MethodOptions_message_name protoreflect.Name = "MethodOptions" + MethodOptions_message_fullname protoreflect.FullName = "google.protobuf.MethodOptions" +) + +// Field names for google.protobuf.MethodOptions. +const ( + MethodOptions_Deprecated_field_name protoreflect.Name = "deprecated" + MethodOptions_IdempotencyLevel_field_name protoreflect.Name = "idempotency_level" + MethodOptions_Features_field_name protoreflect.Name = "features" + MethodOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + MethodOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.deprecated" + MethodOptions_IdempotencyLevel_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.idempotency_level" + MethodOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.features" + MethodOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.MethodOptions. +const ( + MethodOptions_Deprecated_field_number protoreflect.FieldNumber = 33 + MethodOptions_IdempotencyLevel_field_number protoreflect.FieldNumber = 34 + MethodOptions_Features_field_number protoreflect.FieldNumber = 35 + MethodOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Full and short names for google.protobuf.MethodOptions.IdempotencyLevel. +const ( + MethodOptions_IdempotencyLevel_enum_fullname = "google.protobuf.MethodOptions.IdempotencyLevel" + MethodOptions_IdempotencyLevel_enum_name = "IdempotencyLevel" +) + +// Enum values for google.protobuf.MethodOptions.IdempotencyLevel. +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN_enum_value = 0 + MethodOptions_NO_SIDE_EFFECTS_enum_value = 1 + MethodOptions_IDEMPOTENT_enum_value = 2 +) + +// Names for google.protobuf.UninterpretedOption. +const ( + UninterpretedOption_message_name protoreflect.Name = "UninterpretedOption" + UninterpretedOption_message_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption" +) + +// Field names for google.protobuf.UninterpretedOption. +const ( + UninterpretedOption_Name_field_name protoreflect.Name = "name" + UninterpretedOption_IdentifierValue_field_name protoreflect.Name = "identifier_value" + UninterpretedOption_PositiveIntValue_field_name protoreflect.Name = "positive_int_value" + UninterpretedOption_NegativeIntValue_field_name protoreflect.Name = "negative_int_value" + UninterpretedOption_DoubleValue_field_name protoreflect.Name = "double_value" + UninterpretedOption_StringValue_field_name protoreflect.Name = "string_value" + UninterpretedOption_AggregateValue_field_name protoreflect.Name = "aggregate_value" + + UninterpretedOption_Name_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.name" + UninterpretedOption_IdentifierValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.identifier_value" + UninterpretedOption_PositiveIntValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.positive_int_value" + UninterpretedOption_NegativeIntValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.negative_int_value" + UninterpretedOption_DoubleValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.double_value" + UninterpretedOption_StringValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.string_value" + UninterpretedOption_AggregateValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.aggregate_value" +) + +// Field numbers for google.protobuf.UninterpretedOption. +const ( + UninterpretedOption_Name_field_number protoreflect.FieldNumber = 2 + UninterpretedOption_IdentifierValue_field_number protoreflect.FieldNumber = 3 + UninterpretedOption_PositiveIntValue_field_number protoreflect.FieldNumber = 4 + UninterpretedOption_NegativeIntValue_field_number protoreflect.FieldNumber = 5 + UninterpretedOption_DoubleValue_field_number protoreflect.FieldNumber = 6 + UninterpretedOption_StringValue_field_number protoreflect.FieldNumber = 7 + UninterpretedOption_AggregateValue_field_number protoreflect.FieldNumber = 8 +) + +// Names for google.protobuf.UninterpretedOption.NamePart. +const ( + UninterpretedOption_NamePart_message_name protoreflect.Name = "NamePart" + UninterpretedOption_NamePart_message_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart" +) + +// Field names for google.protobuf.UninterpretedOption.NamePart. +const ( + UninterpretedOption_NamePart_NamePart_field_name protoreflect.Name = "name_part" + UninterpretedOption_NamePart_IsExtension_field_name protoreflect.Name = "is_extension" + + UninterpretedOption_NamePart_NamePart_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart.name_part" + UninterpretedOption_NamePart_IsExtension_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart.is_extension" +) + +// Field numbers for google.protobuf.UninterpretedOption.NamePart. +const ( + UninterpretedOption_NamePart_NamePart_field_number protoreflect.FieldNumber = 1 + UninterpretedOption_NamePart_IsExtension_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.FeatureSet. +const ( + FeatureSet_message_name protoreflect.Name = "FeatureSet" + FeatureSet_message_fullname protoreflect.FullName = "google.protobuf.FeatureSet" +) + +// Field names for google.protobuf.FeatureSet. +const ( + FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence" + FeatureSet_EnumType_field_name protoreflect.Name = "enum_type" + FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding" + FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation" + FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding" + FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format" + + FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence" + FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type" + FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding" + FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation" + FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding" + FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format" +) + +// Field numbers for google.protobuf.FeatureSet. +const ( + FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1 + FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2 + FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3 + FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4 + FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5 + FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6 +) + +// Full and short names for google.protobuf.FeatureSet.FieldPresence. +const ( + FeatureSet_FieldPresence_enum_fullname = "google.protobuf.FeatureSet.FieldPresence" + FeatureSet_FieldPresence_enum_name = "FieldPresence" +) + +// Enum values for google.protobuf.FeatureSet.FieldPresence. +const ( + FeatureSet_FIELD_PRESENCE_UNKNOWN_enum_value = 0 + FeatureSet_EXPLICIT_enum_value = 1 + FeatureSet_IMPLICIT_enum_value = 2 + FeatureSet_LEGACY_REQUIRED_enum_value = 3 +) + +// Full and short names for google.protobuf.FeatureSet.EnumType. +const ( + FeatureSet_EnumType_enum_fullname = "google.protobuf.FeatureSet.EnumType" + FeatureSet_EnumType_enum_name = "EnumType" +) + +// Enum values for google.protobuf.FeatureSet.EnumType. +const ( + FeatureSet_ENUM_TYPE_UNKNOWN_enum_value = 0 + FeatureSet_OPEN_enum_value = 1 + FeatureSet_CLOSED_enum_value = 2 +) + +// Full and short names for google.protobuf.FeatureSet.RepeatedFieldEncoding. +const ( + FeatureSet_RepeatedFieldEncoding_enum_fullname = "google.protobuf.FeatureSet.RepeatedFieldEncoding" + FeatureSet_RepeatedFieldEncoding_enum_name = "RepeatedFieldEncoding" +) + +// Enum values for google.protobuf.FeatureSet.RepeatedFieldEncoding. +const ( + FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN_enum_value = 0 + FeatureSet_PACKED_enum_value = 1 + FeatureSet_EXPANDED_enum_value = 2 +) + +// Full and short names for google.protobuf.FeatureSet.Utf8Validation. +const ( + FeatureSet_Utf8Validation_enum_fullname = "google.protobuf.FeatureSet.Utf8Validation" + FeatureSet_Utf8Validation_enum_name = "Utf8Validation" +) + +// Enum values for google.protobuf.FeatureSet.Utf8Validation. +const ( + FeatureSet_UTF8_VALIDATION_UNKNOWN_enum_value = 0 + FeatureSet_VERIFY_enum_value = 2 + FeatureSet_NONE_enum_value = 3 +) + +// Full and short names for google.protobuf.FeatureSet.MessageEncoding. +const ( + FeatureSet_MessageEncoding_enum_fullname = "google.protobuf.FeatureSet.MessageEncoding" + FeatureSet_MessageEncoding_enum_name = "MessageEncoding" +) + +// Enum values for google.protobuf.FeatureSet.MessageEncoding. +const ( + FeatureSet_MESSAGE_ENCODING_UNKNOWN_enum_value = 0 + FeatureSet_LENGTH_PREFIXED_enum_value = 1 + FeatureSet_DELIMITED_enum_value = 2 +) + +// Full and short names for google.protobuf.FeatureSet.JsonFormat. +const ( + FeatureSet_JsonFormat_enum_fullname = "google.protobuf.FeatureSet.JsonFormat" + FeatureSet_JsonFormat_enum_name = "JsonFormat" +) + +// Enum values for google.protobuf.FeatureSet.JsonFormat. +const ( + FeatureSet_JSON_FORMAT_UNKNOWN_enum_value = 0 + FeatureSet_ALLOW_enum_value = 1 + FeatureSet_LEGACY_BEST_EFFORT_enum_value = 2 +) + +// Names for google.protobuf.FeatureSetDefaults. +const ( + FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults" + FeatureSetDefaults_message_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults" +) + +// Field names for google.protobuf.FeatureSetDefaults. +const ( + FeatureSetDefaults_Defaults_field_name protoreflect.Name = "defaults" + FeatureSetDefaults_MinimumEdition_field_name protoreflect.Name = "minimum_edition" + FeatureSetDefaults_MaximumEdition_field_name protoreflect.Name = "maximum_edition" + + FeatureSetDefaults_Defaults_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.defaults" + FeatureSetDefaults_MinimumEdition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.minimum_edition" + FeatureSetDefaults_MaximumEdition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.maximum_edition" +) + +// Field numbers for google.protobuf.FeatureSetDefaults. +const ( + FeatureSetDefaults_Defaults_field_number protoreflect.FieldNumber = 1 + FeatureSetDefaults_MinimumEdition_field_number protoreflect.FieldNumber = 4 + FeatureSetDefaults_MaximumEdition_field_number protoreflect.FieldNumber = 5 +) + +// Names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. +const ( + FeatureSetDefaults_FeatureSetEditionDefault_message_name protoreflect.Name = "FeatureSetEditionDefault" + FeatureSetDefaults_FeatureSetEditionDefault_message_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault" +) + +// Field names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. +const ( + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition" + FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_name protoreflect.Name = "overridable_features" + FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_name protoreflect.Name = "fixed_features" + + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition" + FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features" + FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features" +) + +// Field numbers for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. +const ( + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3 + FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_number protoreflect.FieldNumber = 4 + FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_number protoreflect.FieldNumber = 5 +) + +// Names for google.protobuf.SourceCodeInfo. +const ( + SourceCodeInfo_message_name protoreflect.Name = "SourceCodeInfo" + SourceCodeInfo_message_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo" +) + +// Field names for google.protobuf.SourceCodeInfo. +const ( + SourceCodeInfo_Location_field_name protoreflect.Name = "location" + + SourceCodeInfo_Location_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.location" +) + +// Field numbers for google.protobuf.SourceCodeInfo. +const ( + SourceCodeInfo_Location_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.SourceCodeInfo.Location. +const ( + SourceCodeInfo_Location_message_name protoreflect.Name = "Location" + SourceCodeInfo_Location_message_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location" +) + +// Field names for google.protobuf.SourceCodeInfo.Location. +const ( + SourceCodeInfo_Location_Path_field_name protoreflect.Name = "path" + SourceCodeInfo_Location_Span_field_name protoreflect.Name = "span" + SourceCodeInfo_Location_LeadingComments_field_name protoreflect.Name = "leading_comments" + SourceCodeInfo_Location_TrailingComments_field_name protoreflect.Name = "trailing_comments" + SourceCodeInfo_Location_LeadingDetachedComments_field_name protoreflect.Name = "leading_detached_comments" + + SourceCodeInfo_Location_Path_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.path" + SourceCodeInfo_Location_Span_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.span" + SourceCodeInfo_Location_LeadingComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.leading_comments" + SourceCodeInfo_Location_TrailingComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.trailing_comments" + SourceCodeInfo_Location_LeadingDetachedComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.leading_detached_comments" +) + +// Field numbers for google.protobuf.SourceCodeInfo.Location. +const ( + SourceCodeInfo_Location_Path_field_number protoreflect.FieldNumber = 1 + SourceCodeInfo_Location_Span_field_number protoreflect.FieldNumber = 2 + SourceCodeInfo_Location_LeadingComments_field_number protoreflect.FieldNumber = 3 + SourceCodeInfo_Location_TrailingComments_field_number protoreflect.FieldNumber = 4 + SourceCodeInfo_Location_LeadingDetachedComments_field_number protoreflect.FieldNumber = 6 +) + +// Names for google.protobuf.GeneratedCodeInfo. +const ( + GeneratedCodeInfo_message_name protoreflect.Name = "GeneratedCodeInfo" + GeneratedCodeInfo_message_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo" +) + +// Field names for google.protobuf.GeneratedCodeInfo. +const ( + GeneratedCodeInfo_Annotation_field_name protoreflect.Name = "annotation" + + GeneratedCodeInfo_Annotation_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.annotation" +) + +// Field numbers for google.protobuf.GeneratedCodeInfo. +const ( + GeneratedCodeInfo_Annotation_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.GeneratedCodeInfo.Annotation. +const ( + GeneratedCodeInfo_Annotation_message_name protoreflect.Name = "Annotation" + GeneratedCodeInfo_Annotation_message_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation" +) + +// Field names for google.protobuf.GeneratedCodeInfo.Annotation. +const ( + GeneratedCodeInfo_Annotation_Path_field_name protoreflect.Name = "path" + GeneratedCodeInfo_Annotation_SourceFile_field_name protoreflect.Name = "source_file" + GeneratedCodeInfo_Annotation_Begin_field_name protoreflect.Name = "begin" + GeneratedCodeInfo_Annotation_End_field_name protoreflect.Name = "end" + GeneratedCodeInfo_Annotation_Semantic_field_name protoreflect.Name = "semantic" + + GeneratedCodeInfo_Annotation_Path_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.path" + GeneratedCodeInfo_Annotation_SourceFile_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.source_file" + GeneratedCodeInfo_Annotation_Begin_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.begin" + GeneratedCodeInfo_Annotation_End_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.end" + GeneratedCodeInfo_Annotation_Semantic_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.semantic" +) + +// Field numbers for google.protobuf.GeneratedCodeInfo.Annotation. +const ( + GeneratedCodeInfo_Annotation_Path_field_number protoreflect.FieldNumber = 1 + GeneratedCodeInfo_Annotation_SourceFile_field_number protoreflect.FieldNumber = 2 + GeneratedCodeInfo_Annotation_Begin_field_number protoreflect.FieldNumber = 3 + GeneratedCodeInfo_Annotation_End_field_number protoreflect.FieldNumber = 4 + GeneratedCodeInfo_Annotation_Semantic_field_number protoreflect.FieldNumber = 5 +) + +// Full and short names for google.protobuf.GeneratedCodeInfo.Annotation.Semantic. +const ( + GeneratedCodeInfo_Annotation_Semantic_enum_fullname = "google.protobuf.GeneratedCodeInfo.Annotation.Semantic" + GeneratedCodeInfo_Annotation_Semantic_enum_name = "Semantic" +) + +// Enum values for google.protobuf.GeneratedCodeInfo.Annotation.Semantic. +const ( + GeneratedCodeInfo_Annotation_NONE_enum_value = 0 + GeneratedCodeInfo_Annotation_SET_enum_value = 1 + GeneratedCodeInfo_Annotation_ALIAS_enum_value = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go new file mode 100644 index 000000000..45ccd0121 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package genid contains constants for declarations in descriptor.proto +// and the well-known types. +package genid + +import protoreflect "google.golang.org/protobuf/reflect/protoreflect" + +const GoogleProtobuf_package protoreflect.FullName = "google.protobuf" diff --git a/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go b/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go new file mode 100644 index 000000000..b070ef4fd --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_duration_proto = "google/protobuf/duration.proto" + +// Names for google.protobuf.Duration. +const ( + Duration_message_name protoreflect.Name = "Duration" + Duration_message_fullname protoreflect.FullName = "google.protobuf.Duration" +) + +// Field names for google.protobuf.Duration. +const ( + Duration_Seconds_field_name protoreflect.Name = "seconds" + Duration_Nanos_field_name protoreflect.Name = "nanos" + + Duration_Seconds_field_fullname protoreflect.FullName = "google.protobuf.Duration.seconds" + Duration_Nanos_field_fullname protoreflect.FullName = "google.protobuf.Duration.nanos" +) + +// Field numbers for google.protobuf.Duration. +const ( + Duration_Seconds_field_number protoreflect.FieldNumber = 1 + Duration_Nanos_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go b/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go new file mode 100644 index 000000000..762abb34a --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go @@ -0,0 +1,19 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_empty_proto = "google/protobuf/empty.proto" + +// Names for google.protobuf.Empty. +const ( + Empty_message_name protoreflect.Name = "Empty" + Empty_message_fullname protoreflect.FullName = "google.protobuf.Empty" +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go b/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go new file mode 100644 index 000000000..70bed453f --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_field_mask_proto = "google/protobuf/field_mask.proto" + +// Names for google.protobuf.FieldMask. +const ( + FieldMask_message_name protoreflect.Name = "FieldMask" + FieldMask_message_fullname protoreflect.FullName = "google.protobuf.FieldMask" +) + +// Field names for google.protobuf.FieldMask. +const ( + FieldMask_Paths_field_name protoreflect.Name = "paths" + + FieldMask_Paths_field_fullname protoreflect.FullName = "google.protobuf.FieldMask.paths" +) + +// Field numbers for google.protobuf.FieldMask. +const ( + FieldMask_Paths_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go new file mode 100644 index 000000000..9a652a2b4 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto" + +// Names for google.protobuf.GoFeatures. +const ( + GoFeatures_message_name protoreflect.Name = "GoFeatures" + GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures" +) + +// Field names for google.protobuf.GoFeatures. +const ( + GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum" + + GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum" +) + +// Field numbers for google.protobuf.GoFeatures. +const ( + GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/goname.go b/vendor/google.golang.org/protobuf/internal/genid/goname.go new file mode 100644 index 000000000..693d2e9e1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/goname.go @@ -0,0 +1,25 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genid + +// Go names of implementation-specific struct fields in generated messages. +const ( + State_goname = "state" + + SizeCache_goname = "sizeCache" + SizeCacheA_goname = "XXX_sizecache" + + WeakFields_goname = "weakFields" + WeakFieldsA_goname = "XXX_weak" + + UnknownFields_goname = "unknownFields" + UnknownFieldsA_goname = "XXX_unrecognized" + + ExtensionFields_goname = "extensionFields" + ExtensionFieldsA_goname = "XXX_InternalExtensions" + ExtensionFieldsB_goname = "XXX_extensions" + + WeakFieldPrefix_goname = "XXX_weak_" +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go new file mode 100644 index 000000000..8f9ea02ff --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go @@ -0,0 +1,16 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genid + +import protoreflect "google.golang.org/protobuf/reflect/protoreflect" + +// Generic field names and numbers for synthetic map entry messages. +const ( + MapEntry_Key_field_name protoreflect.Name = "key" + MapEntry_Value_field_name protoreflect.Name = "value" + + MapEntry_Key_field_number protoreflect.FieldNumber = 1 + MapEntry_Value_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go b/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go new file mode 100644 index 000000000..3e99ae16c --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_source_context_proto = "google/protobuf/source_context.proto" + +// Names for google.protobuf.SourceContext. +const ( + SourceContext_message_name protoreflect.Name = "SourceContext" + SourceContext_message_fullname protoreflect.FullName = "google.protobuf.SourceContext" +) + +// Field names for google.protobuf.SourceContext. +const ( + SourceContext_FileName_field_name protoreflect.Name = "file_name" + + SourceContext_FileName_field_fullname protoreflect.FullName = "google.protobuf.SourceContext.file_name" +) + +// Field numbers for google.protobuf.SourceContext. +const ( + SourceContext_FileName_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go new file mode 100644 index 000000000..ad6f80c46 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go @@ -0,0 +1,121 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_struct_proto = "google/protobuf/struct.proto" + +// Full and short names for google.protobuf.NullValue. +const ( + NullValue_enum_fullname = "google.protobuf.NullValue" + NullValue_enum_name = "NullValue" +) + +// Enum values for google.protobuf.NullValue. +const ( + NullValue_NULL_VALUE_enum_value = 0 +) + +// Names for google.protobuf.Struct. +const ( + Struct_message_name protoreflect.Name = "Struct" + Struct_message_fullname protoreflect.FullName = "google.protobuf.Struct" +) + +// Field names for google.protobuf.Struct. +const ( + Struct_Fields_field_name protoreflect.Name = "fields" + + Struct_Fields_field_fullname protoreflect.FullName = "google.protobuf.Struct.fields" +) + +// Field numbers for google.protobuf.Struct. +const ( + Struct_Fields_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.Struct.FieldsEntry. +const ( + Struct_FieldsEntry_message_name protoreflect.Name = "FieldsEntry" + Struct_FieldsEntry_message_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry" +) + +// Field names for google.protobuf.Struct.FieldsEntry. +const ( + Struct_FieldsEntry_Key_field_name protoreflect.Name = "key" + Struct_FieldsEntry_Value_field_name protoreflect.Name = "value" + + Struct_FieldsEntry_Key_field_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry.key" + Struct_FieldsEntry_Value_field_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry.value" +) + +// Field numbers for google.protobuf.Struct.FieldsEntry. +const ( + Struct_FieldsEntry_Key_field_number protoreflect.FieldNumber = 1 + Struct_FieldsEntry_Value_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.Value. +const ( + Value_message_name protoreflect.Name = "Value" + Value_message_fullname protoreflect.FullName = "google.protobuf.Value" +) + +// Field names for google.protobuf.Value. +const ( + Value_NullValue_field_name protoreflect.Name = "null_value" + Value_NumberValue_field_name protoreflect.Name = "number_value" + Value_StringValue_field_name protoreflect.Name = "string_value" + Value_BoolValue_field_name protoreflect.Name = "bool_value" + Value_StructValue_field_name protoreflect.Name = "struct_value" + Value_ListValue_field_name protoreflect.Name = "list_value" + + Value_NullValue_field_fullname protoreflect.FullName = "google.protobuf.Value.null_value" + Value_NumberValue_field_fullname protoreflect.FullName = "google.protobuf.Value.number_value" + Value_StringValue_field_fullname protoreflect.FullName = "google.protobuf.Value.string_value" + Value_BoolValue_field_fullname protoreflect.FullName = "google.protobuf.Value.bool_value" + Value_StructValue_field_fullname protoreflect.FullName = "google.protobuf.Value.struct_value" + Value_ListValue_field_fullname protoreflect.FullName = "google.protobuf.Value.list_value" +) + +// Field numbers for google.protobuf.Value. +const ( + Value_NullValue_field_number protoreflect.FieldNumber = 1 + Value_NumberValue_field_number protoreflect.FieldNumber = 2 + Value_StringValue_field_number protoreflect.FieldNumber = 3 + Value_BoolValue_field_number protoreflect.FieldNumber = 4 + Value_StructValue_field_number protoreflect.FieldNumber = 5 + Value_ListValue_field_number protoreflect.FieldNumber = 6 +) + +// Oneof names for google.protobuf.Value. +const ( + Value_Kind_oneof_name protoreflect.Name = "kind" + + Value_Kind_oneof_fullname protoreflect.FullName = "google.protobuf.Value.kind" +) + +// Names for google.protobuf.ListValue. +const ( + ListValue_message_name protoreflect.Name = "ListValue" + ListValue_message_fullname protoreflect.FullName = "google.protobuf.ListValue" +) + +// Field names for google.protobuf.ListValue. +const ( + ListValue_Values_field_name protoreflect.Name = "values" + + ListValue_Values_field_fullname protoreflect.FullName = "google.protobuf.ListValue.values" +) + +// Field numbers for google.protobuf.ListValue. +const ( + ListValue_Values_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go b/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go new file mode 100644 index 000000000..f5cd5634c --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_timestamp_proto = "google/protobuf/timestamp.proto" + +// Names for google.protobuf.Timestamp. +const ( + Timestamp_message_name protoreflect.Name = "Timestamp" + Timestamp_message_fullname protoreflect.FullName = "google.protobuf.Timestamp" +) + +// Field names for google.protobuf.Timestamp. +const ( + Timestamp_Seconds_field_name protoreflect.Name = "seconds" + Timestamp_Nanos_field_name protoreflect.Name = "nanos" + + Timestamp_Seconds_field_fullname protoreflect.FullName = "google.protobuf.Timestamp.seconds" + Timestamp_Nanos_field_fullname protoreflect.FullName = "google.protobuf.Timestamp.nanos" +) + +// Field numbers for google.protobuf.Timestamp. +const ( + Timestamp_Seconds_field_number protoreflect.FieldNumber = 1 + Timestamp_Nanos_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go new file mode 100644 index 000000000..49bc73e25 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go @@ -0,0 +1,228 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_type_proto = "google/protobuf/type.proto" + +// Full and short names for google.protobuf.Syntax. +const ( + Syntax_enum_fullname = "google.protobuf.Syntax" + Syntax_enum_name = "Syntax" +) + +// Enum values for google.protobuf.Syntax. +const ( + Syntax_SYNTAX_PROTO2_enum_value = 0 + Syntax_SYNTAX_PROTO3_enum_value = 1 + Syntax_SYNTAX_EDITIONS_enum_value = 2 +) + +// Names for google.protobuf.Type. +const ( + Type_message_name protoreflect.Name = "Type" + Type_message_fullname protoreflect.FullName = "google.protobuf.Type" +) + +// Field names for google.protobuf.Type. +const ( + Type_Name_field_name protoreflect.Name = "name" + Type_Fields_field_name protoreflect.Name = "fields" + Type_Oneofs_field_name protoreflect.Name = "oneofs" + Type_Options_field_name protoreflect.Name = "options" + Type_SourceContext_field_name protoreflect.Name = "source_context" + Type_Syntax_field_name protoreflect.Name = "syntax" + Type_Edition_field_name protoreflect.Name = "edition" + + Type_Name_field_fullname protoreflect.FullName = "google.protobuf.Type.name" + Type_Fields_field_fullname protoreflect.FullName = "google.protobuf.Type.fields" + Type_Oneofs_field_fullname protoreflect.FullName = "google.protobuf.Type.oneofs" + Type_Options_field_fullname protoreflect.FullName = "google.protobuf.Type.options" + Type_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Type.source_context" + Type_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Type.syntax" + Type_Edition_field_fullname protoreflect.FullName = "google.protobuf.Type.edition" +) + +// Field numbers for google.protobuf.Type. +const ( + Type_Name_field_number protoreflect.FieldNumber = 1 + Type_Fields_field_number protoreflect.FieldNumber = 2 + Type_Oneofs_field_number protoreflect.FieldNumber = 3 + Type_Options_field_number protoreflect.FieldNumber = 4 + Type_SourceContext_field_number protoreflect.FieldNumber = 5 + Type_Syntax_field_number protoreflect.FieldNumber = 6 + Type_Edition_field_number protoreflect.FieldNumber = 7 +) + +// Names for google.protobuf.Field. +const ( + Field_message_name protoreflect.Name = "Field" + Field_message_fullname protoreflect.FullName = "google.protobuf.Field" +) + +// Field names for google.protobuf.Field. +const ( + Field_Kind_field_name protoreflect.Name = "kind" + Field_Cardinality_field_name protoreflect.Name = "cardinality" + Field_Number_field_name protoreflect.Name = "number" + Field_Name_field_name protoreflect.Name = "name" + Field_TypeUrl_field_name protoreflect.Name = "type_url" + Field_OneofIndex_field_name protoreflect.Name = "oneof_index" + Field_Packed_field_name protoreflect.Name = "packed" + Field_Options_field_name protoreflect.Name = "options" + Field_JsonName_field_name protoreflect.Name = "json_name" + Field_DefaultValue_field_name protoreflect.Name = "default_value" + + Field_Kind_field_fullname protoreflect.FullName = "google.protobuf.Field.kind" + Field_Cardinality_field_fullname protoreflect.FullName = "google.protobuf.Field.cardinality" + Field_Number_field_fullname protoreflect.FullName = "google.protobuf.Field.number" + Field_Name_field_fullname protoreflect.FullName = "google.protobuf.Field.name" + Field_TypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Field.type_url" + Field_OneofIndex_field_fullname protoreflect.FullName = "google.protobuf.Field.oneof_index" + Field_Packed_field_fullname protoreflect.FullName = "google.protobuf.Field.packed" + Field_Options_field_fullname protoreflect.FullName = "google.protobuf.Field.options" + Field_JsonName_field_fullname protoreflect.FullName = "google.protobuf.Field.json_name" + Field_DefaultValue_field_fullname protoreflect.FullName = "google.protobuf.Field.default_value" +) + +// Field numbers for google.protobuf.Field. +const ( + Field_Kind_field_number protoreflect.FieldNumber = 1 + Field_Cardinality_field_number protoreflect.FieldNumber = 2 + Field_Number_field_number protoreflect.FieldNumber = 3 + Field_Name_field_number protoreflect.FieldNumber = 4 + Field_TypeUrl_field_number protoreflect.FieldNumber = 6 + Field_OneofIndex_field_number protoreflect.FieldNumber = 7 + Field_Packed_field_number protoreflect.FieldNumber = 8 + Field_Options_field_number protoreflect.FieldNumber = 9 + Field_JsonName_field_number protoreflect.FieldNumber = 10 + Field_DefaultValue_field_number protoreflect.FieldNumber = 11 +) + +// Full and short names for google.protobuf.Field.Kind. +const ( + Field_Kind_enum_fullname = "google.protobuf.Field.Kind" + Field_Kind_enum_name = "Kind" +) + +// Enum values for google.protobuf.Field.Kind. +const ( + Field_TYPE_UNKNOWN_enum_value = 0 + Field_TYPE_DOUBLE_enum_value = 1 + Field_TYPE_FLOAT_enum_value = 2 + Field_TYPE_INT64_enum_value = 3 + Field_TYPE_UINT64_enum_value = 4 + Field_TYPE_INT32_enum_value = 5 + Field_TYPE_FIXED64_enum_value = 6 + Field_TYPE_FIXED32_enum_value = 7 + Field_TYPE_BOOL_enum_value = 8 + Field_TYPE_STRING_enum_value = 9 + Field_TYPE_GROUP_enum_value = 10 + Field_TYPE_MESSAGE_enum_value = 11 + Field_TYPE_BYTES_enum_value = 12 + Field_TYPE_UINT32_enum_value = 13 + Field_TYPE_ENUM_enum_value = 14 + Field_TYPE_SFIXED32_enum_value = 15 + Field_TYPE_SFIXED64_enum_value = 16 + Field_TYPE_SINT32_enum_value = 17 + Field_TYPE_SINT64_enum_value = 18 +) + +// Full and short names for google.protobuf.Field.Cardinality. +const ( + Field_Cardinality_enum_fullname = "google.protobuf.Field.Cardinality" + Field_Cardinality_enum_name = "Cardinality" +) + +// Enum values for google.protobuf.Field.Cardinality. +const ( + Field_CARDINALITY_UNKNOWN_enum_value = 0 + Field_CARDINALITY_OPTIONAL_enum_value = 1 + Field_CARDINALITY_REQUIRED_enum_value = 2 + Field_CARDINALITY_REPEATED_enum_value = 3 +) + +// Names for google.protobuf.Enum. +const ( + Enum_message_name protoreflect.Name = "Enum" + Enum_message_fullname protoreflect.FullName = "google.protobuf.Enum" +) + +// Field names for google.protobuf.Enum. +const ( + Enum_Name_field_name protoreflect.Name = "name" + Enum_Enumvalue_field_name protoreflect.Name = "enumvalue" + Enum_Options_field_name protoreflect.Name = "options" + Enum_SourceContext_field_name protoreflect.Name = "source_context" + Enum_Syntax_field_name protoreflect.Name = "syntax" + Enum_Edition_field_name protoreflect.Name = "edition" + + Enum_Name_field_fullname protoreflect.FullName = "google.protobuf.Enum.name" + Enum_Enumvalue_field_fullname protoreflect.FullName = "google.protobuf.Enum.enumvalue" + Enum_Options_field_fullname protoreflect.FullName = "google.protobuf.Enum.options" + Enum_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Enum.source_context" + Enum_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Enum.syntax" + Enum_Edition_field_fullname protoreflect.FullName = "google.protobuf.Enum.edition" +) + +// Field numbers for google.protobuf.Enum. +const ( + Enum_Name_field_number protoreflect.FieldNumber = 1 + Enum_Enumvalue_field_number protoreflect.FieldNumber = 2 + Enum_Options_field_number protoreflect.FieldNumber = 3 + Enum_SourceContext_field_number protoreflect.FieldNumber = 4 + Enum_Syntax_field_number protoreflect.FieldNumber = 5 + Enum_Edition_field_number protoreflect.FieldNumber = 6 +) + +// Names for google.protobuf.EnumValue. +const ( + EnumValue_message_name protoreflect.Name = "EnumValue" + EnumValue_message_fullname protoreflect.FullName = "google.protobuf.EnumValue" +) + +// Field names for google.protobuf.EnumValue. +const ( + EnumValue_Name_field_name protoreflect.Name = "name" + EnumValue_Number_field_name protoreflect.Name = "number" + EnumValue_Options_field_name protoreflect.Name = "options" + + EnumValue_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.name" + EnumValue_Number_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.number" + EnumValue_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.options" +) + +// Field numbers for google.protobuf.EnumValue. +const ( + EnumValue_Name_field_number protoreflect.FieldNumber = 1 + EnumValue_Number_field_number protoreflect.FieldNumber = 2 + EnumValue_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.Option. +const ( + Option_message_name protoreflect.Name = "Option" + Option_message_fullname protoreflect.FullName = "google.protobuf.Option" +) + +// Field names for google.protobuf.Option. +const ( + Option_Name_field_name protoreflect.Name = "name" + Option_Value_field_name protoreflect.Name = "value" + + Option_Name_field_fullname protoreflect.FullName = "google.protobuf.Option.name" + Option_Value_field_fullname protoreflect.FullName = "google.protobuf.Option.value" +) + +// Field numbers for google.protobuf.Option. +const ( + Option_Name_field_number protoreflect.FieldNumber = 1 + Option_Value_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go new file mode 100644 index 000000000..429384b85 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go @@ -0,0 +1,13 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genid + +import protoreflect "google.golang.org/protobuf/reflect/protoreflect" + +// Generic field name and number for messages in wrappers.proto. +const ( + WrapperValue_Value_field_name protoreflect.Name = "value" + WrapperValue_Value_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go new file mode 100644 index 000000000..72527d2ab --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go @@ -0,0 +1,175 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_wrappers_proto = "google/protobuf/wrappers.proto" + +// Names for google.protobuf.DoubleValue. +const ( + DoubleValue_message_name protoreflect.Name = "DoubleValue" + DoubleValue_message_fullname protoreflect.FullName = "google.protobuf.DoubleValue" +) + +// Field names for google.protobuf.DoubleValue. +const ( + DoubleValue_Value_field_name protoreflect.Name = "value" + + DoubleValue_Value_field_fullname protoreflect.FullName = "google.protobuf.DoubleValue.value" +) + +// Field numbers for google.protobuf.DoubleValue. +const ( + DoubleValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.FloatValue. +const ( + FloatValue_message_name protoreflect.Name = "FloatValue" + FloatValue_message_fullname protoreflect.FullName = "google.protobuf.FloatValue" +) + +// Field names for google.protobuf.FloatValue. +const ( + FloatValue_Value_field_name protoreflect.Name = "value" + + FloatValue_Value_field_fullname protoreflect.FullName = "google.protobuf.FloatValue.value" +) + +// Field numbers for google.protobuf.FloatValue. +const ( + FloatValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.Int64Value. +const ( + Int64Value_message_name protoreflect.Name = "Int64Value" + Int64Value_message_fullname protoreflect.FullName = "google.protobuf.Int64Value" +) + +// Field names for google.protobuf.Int64Value. +const ( + Int64Value_Value_field_name protoreflect.Name = "value" + + Int64Value_Value_field_fullname protoreflect.FullName = "google.protobuf.Int64Value.value" +) + +// Field numbers for google.protobuf.Int64Value. +const ( + Int64Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.UInt64Value. +const ( + UInt64Value_message_name protoreflect.Name = "UInt64Value" + UInt64Value_message_fullname protoreflect.FullName = "google.protobuf.UInt64Value" +) + +// Field names for google.protobuf.UInt64Value. +const ( + UInt64Value_Value_field_name protoreflect.Name = "value" + + UInt64Value_Value_field_fullname protoreflect.FullName = "google.protobuf.UInt64Value.value" +) + +// Field numbers for google.protobuf.UInt64Value. +const ( + UInt64Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.Int32Value. +const ( + Int32Value_message_name protoreflect.Name = "Int32Value" + Int32Value_message_fullname protoreflect.FullName = "google.protobuf.Int32Value" +) + +// Field names for google.protobuf.Int32Value. +const ( + Int32Value_Value_field_name protoreflect.Name = "value" + + Int32Value_Value_field_fullname protoreflect.FullName = "google.protobuf.Int32Value.value" +) + +// Field numbers for google.protobuf.Int32Value. +const ( + Int32Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.UInt32Value. +const ( + UInt32Value_message_name protoreflect.Name = "UInt32Value" + UInt32Value_message_fullname protoreflect.FullName = "google.protobuf.UInt32Value" +) + +// Field names for google.protobuf.UInt32Value. +const ( + UInt32Value_Value_field_name protoreflect.Name = "value" + + UInt32Value_Value_field_fullname protoreflect.FullName = "google.protobuf.UInt32Value.value" +) + +// Field numbers for google.protobuf.UInt32Value. +const ( + UInt32Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.BoolValue. +const ( + BoolValue_message_name protoreflect.Name = "BoolValue" + BoolValue_message_fullname protoreflect.FullName = "google.protobuf.BoolValue" +) + +// Field names for google.protobuf.BoolValue. +const ( + BoolValue_Value_field_name protoreflect.Name = "value" + + BoolValue_Value_field_fullname protoreflect.FullName = "google.protobuf.BoolValue.value" +) + +// Field numbers for google.protobuf.BoolValue. +const ( + BoolValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.StringValue. +const ( + StringValue_message_name protoreflect.Name = "StringValue" + StringValue_message_fullname protoreflect.FullName = "google.protobuf.StringValue" +) + +// Field names for google.protobuf.StringValue. +const ( + StringValue_Value_field_name protoreflect.Name = "value" + + StringValue_Value_field_fullname protoreflect.FullName = "google.protobuf.StringValue.value" +) + +// Field numbers for google.protobuf.StringValue. +const ( + StringValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.BytesValue. +const ( + BytesValue_message_name protoreflect.Name = "BytesValue" + BytesValue_message_fullname protoreflect.FullName = "google.protobuf.BytesValue" +) + +// Field names for google.protobuf.BytesValue. +const ( + BytesValue_Value_field_name protoreflect.Name = "value" + + BytesValue_Value_field_fullname protoreflect.FullName = "google.protobuf.BytesValue.value" +) + +// Field numbers for google.protobuf.BytesValue. +const ( + BytesValue_Value_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go new file mode 100644 index 000000000..5d5771c2e --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -0,0 +1,177 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "strconv" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +// Export is a zero-length named type that exists only to export a set of +// functions that we do not want to appear in godoc. +type Export struct{} + +// NewError formats a string according to the format specifier and arguments and +// returns an error that has a "proto" prefix. +func (Export) NewError(f string, x ...any) error { + return errors.New(f, x...) +} + +// enum is any enum type generated by protoc-gen-go +// and must be a named int32 type. +type enum = any + +// EnumOf returns the protoreflect.Enum interface over e. +// It returns nil if e is nil. +func (Export) EnumOf(e enum) protoreflect.Enum { + switch e := e.(type) { + case nil: + return nil + case protoreflect.Enum: + return e + default: + return legacyWrapEnum(reflect.ValueOf(e)) + } +} + +// EnumDescriptorOf returns the protoreflect.EnumDescriptor for e. +// It returns nil if e is nil. +func (Export) EnumDescriptorOf(e enum) protoreflect.EnumDescriptor { + switch e := e.(type) { + case nil: + return nil + case protoreflect.Enum: + return e.Descriptor() + default: + return LegacyLoadEnumDesc(reflect.TypeOf(e)) + } +} + +// EnumTypeOf returns the protoreflect.EnumType for e. +// It returns nil if e is nil. +func (Export) EnumTypeOf(e enum) protoreflect.EnumType { + switch e := e.(type) { + case nil: + return nil + case protoreflect.Enum: + return e.Type() + default: + return legacyLoadEnumType(reflect.TypeOf(e)) + } +} + +// EnumStringOf returns the enum value as a string, either as the name if +// the number is resolvable, or the number formatted as a string. +func (Export) EnumStringOf(ed protoreflect.EnumDescriptor, n protoreflect.EnumNumber) string { + ev := ed.Values().ByNumber(n) + if ev != nil { + return string(ev.Name()) + } + return strconv.Itoa(int(n)) +} + +// message is any message type generated by protoc-gen-go +// and must be a pointer to a named struct type. +type message = any + +// legacyMessageWrapper wraps a v2 message as a v1 message. +type legacyMessageWrapper struct{ m protoreflect.ProtoMessage } + +func (m legacyMessageWrapper) Reset() { proto.Reset(m.m) } +func (m legacyMessageWrapper) String() string { return Export{}.MessageStringOf(m.m) } +func (m legacyMessageWrapper) ProtoMessage() {} + +// ProtoMessageV1Of converts either a v1 or v2 message to a v1 message. +// It returns nil if m is nil. +func (Export) ProtoMessageV1Of(m message) protoiface.MessageV1 { + switch mv := m.(type) { + case nil: + return nil + case protoiface.MessageV1: + return mv + case unwrapper: + return Export{}.ProtoMessageV1Of(mv.protoUnwrap()) + case protoreflect.ProtoMessage: + return legacyMessageWrapper{mv} + default: + panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m)) + } +} + +func (Export) protoMessageV2Of(m message) protoreflect.ProtoMessage { + switch mv := m.(type) { + case nil: + return nil + case protoreflect.ProtoMessage: + return mv + case legacyMessageWrapper: + return mv.m + case protoiface.MessageV1: + return nil + default: + panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m)) + } +} + +// ProtoMessageV2Of converts either a v1 or v2 message to a v2 message. +// It returns nil if m is nil. +func (Export) ProtoMessageV2Of(m message) protoreflect.ProtoMessage { + if m == nil { + return nil + } + if mv := (Export{}).protoMessageV2Of(m); mv != nil { + return mv + } + return legacyWrapMessage(reflect.ValueOf(m)).Interface() +} + +// MessageOf returns the protoreflect.Message interface over m. +// It returns nil if m is nil. +func (Export) MessageOf(m message) protoreflect.Message { + if m == nil { + return nil + } + if mv := (Export{}).protoMessageV2Of(m); mv != nil { + return mv.ProtoReflect() + } + return legacyWrapMessage(reflect.ValueOf(m)) +} + +// MessageDescriptorOf returns the protoreflect.MessageDescriptor for m. +// It returns nil if m is nil. +func (Export) MessageDescriptorOf(m message) protoreflect.MessageDescriptor { + if m == nil { + return nil + } + if mv := (Export{}).protoMessageV2Of(m); mv != nil { + return mv.ProtoReflect().Descriptor() + } + return LegacyLoadMessageDesc(reflect.TypeOf(m)) +} + +// MessageTypeOf returns the protoreflect.MessageType for m. +// It returns nil if m is nil. +func (Export) MessageTypeOf(m message) protoreflect.MessageType { + if m == nil { + return nil + } + if mv := (Export{}).protoMessageV2Of(m); mv != nil { + return mv.ProtoReflect().Type() + } + return legacyLoadMessageType(reflect.TypeOf(m), "") +} + +// MessageStringOf returns the message value as a string, +// which is the message serialized in the protobuf text format. +func (Export) MessageStringOf(m protoreflect.ProtoMessage) string { + return prototext.MarshalOptions{Multiline: false}.Format(m) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go new file mode 100644 index 000000000..f29e6a8fa --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go @@ -0,0 +1,141 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "sync" + + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +func (mi *MessageInfo) checkInitialized(in protoiface.CheckInitializedInput) (protoiface.CheckInitializedOutput, error) { + var p pointer + if ms, ok := in.Message.(*messageState); ok { + p = ms.pointer() + } else { + p = in.Message.(*messageReflectWrapper).pointer() + } + return protoiface.CheckInitializedOutput{}, mi.checkInitializedPointer(p) +} + +func (mi *MessageInfo) checkInitializedPointer(p pointer) error { + mi.init() + if !mi.needsInitCheck { + return nil + } + if p.IsNil() { + for _, f := range mi.orderedCoderFields { + if f.isRequired { + return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName())) + } + } + return nil + } + if mi.extensionOffset.IsValid() { + e := p.Apply(mi.extensionOffset).Extensions() + if err := mi.isInitExtensions(e); err != nil { + return err + } + } + for _, f := range mi.orderedCoderFields { + if !f.isRequired && f.funcs.isInit == nil { + continue + } + fptr := p.Apply(f.offset) + if f.isPointer && fptr.Elem().IsNil() { + if f.isRequired { + return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName())) + } + continue + } + if f.funcs.isInit == nil { + continue + } + if err := f.funcs.isInit(fptr, f); err != nil { + return err + } + } + return nil +} + +func (mi *MessageInfo) isInitExtensions(ext *map[int32]ExtensionField) error { + if ext == nil { + return nil + } + for _, x := range *ext { + ei := getExtensionFieldInfo(x.Type()) + if ei.funcs.isInit == nil || x.isUnexpandedLazy() { + continue + } + v := x.Value() + if !v.IsValid() { + continue + } + if err := ei.funcs.isInit(v); err != nil { + return err + } + } + return nil +} + +var ( + needsInitCheckMu sync.Mutex + needsInitCheckMap sync.Map +) + +// needsInitCheck reports whether a message needs to be checked for partial initialization. +// +// It returns true if the message transitively includes any required or extension fields. +func needsInitCheck(md protoreflect.MessageDescriptor) bool { + if v, ok := needsInitCheckMap.Load(md); ok { + if has, ok := v.(bool); ok { + return has + } + } + needsInitCheckMu.Lock() + defer needsInitCheckMu.Unlock() + return needsInitCheckLocked(md) +} + +func needsInitCheckLocked(md protoreflect.MessageDescriptor) (has bool) { + if v, ok := needsInitCheckMap.Load(md); ok { + // If has is true, we've previously determined that this message + // needs init checks. + // + // If has is false, we've previously determined that it can never + // be uninitialized. + // + // If has is not a bool, we've just encountered a cycle in the + // message graph. In this case, it is safe to return false: If + // the message does have required fields, we'll detect them later + // in the graph traversal. + has, ok := v.(bool) + return ok && has + } + needsInitCheckMap.Store(md, struct{}{}) // avoid cycles while descending into this message + defer func() { + needsInitCheckMap.Store(md, has) + }() + if md.RequiredNumbers().Len() > 0 { + return true + } + if md.ExtensionRanges().Len() > 0 { + return true + } + for i := 0; i < md.Fields().Len(); i++ { + fd := md.Fields().Get(i) + // Map keys are never messages, so just consider the map value. + if fd.IsMap() { + fd = fd.MapValue() + } + fmd := fd.Message() + if fmd != nil && needsInitCheckLocked(fmd) { + return true + } + } + return false +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go new file mode 100644 index 000000000..4bb0a7a20 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -0,0 +1,237 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "sync" + "sync/atomic" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" +) + +type extensionFieldInfo struct { + wiretag uint64 + tagsize int + unmarshalNeedsValue bool + funcs valueCoderFuncs + validation validationInfo +} + +func getExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { + if xi, ok := xt.(*ExtensionInfo); ok { + xi.lazyInit() + return xi.info + } + // Ideally we'd cache the resulting *extensionFieldInfo so we don't have to + // recompute this metadata repeatedly. But without support for something like + // weak references, such a cache would pin temporary values (like dynamic + // extension types, constructed for the duration of a user request) to the + // heap forever, causing memory usage of the cache to grow unbounded. + // See discussion in https://github.com/golang/protobuf/issues/1521. + return makeExtensionFieldInfo(xt.TypeDescriptor()) +} + +func makeExtensionFieldInfo(xd protoreflect.ExtensionDescriptor) *extensionFieldInfo { + var wiretag uint64 + if !xd.IsPacked() { + wiretag = protowire.EncodeTag(xd.Number(), wireTypes[xd.Kind()]) + } else { + wiretag = protowire.EncodeTag(xd.Number(), protowire.BytesType) + } + e := &extensionFieldInfo{ + wiretag: wiretag, + tagsize: protowire.SizeVarint(wiretag), + funcs: encoderFuncsForValue(xd), + } + // Does the unmarshal function need a value passed to it? + // This is true for composite types, where we pass in a message, list, or map to fill in, + // and for enums, where we pass in a prototype value to specify the concrete enum type. + switch xd.Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind, protoreflect.EnumKind: + e.unmarshalNeedsValue = true + default: + if xd.Cardinality() == protoreflect.Repeated { + e.unmarshalNeedsValue = true + } + } + return e +} + +type lazyExtensionValue struct { + atomicOnce uint32 // atomically set if value is valid + mu sync.Mutex + xi *extensionFieldInfo + value protoreflect.Value + b []byte + fn func() protoreflect.Value +} + +type ExtensionField struct { + typ protoreflect.ExtensionType + + // value is either the value of GetValue, + // or a *lazyExtensionValue that then returns the value of GetValue. + value protoreflect.Value + lazy *lazyExtensionValue +} + +func (f *ExtensionField) appendLazyBytes(xt protoreflect.ExtensionType, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, b []byte) { + if f.lazy == nil { + f.lazy = &lazyExtensionValue{xi: xi} + } + f.typ = xt + f.lazy.xi = xi + f.lazy.b = protowire.AppendTag(f.lazy.b, num, wtyp) + f.lazy.b = append(f.lazy.b, b...) +} + +func (f *ExtensionField) canLazy(xt protoreflect.ExtensionType) bool { + if f.typ == nil { + return true + } + if f.typ == xt && f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0 { + return true + } + return false +} + +// isUnexpandedLazy returns true if the ExensionField is lazy and not +// yet expanded, which means it's present and already checked for +// initialized required fields. +func (f *ExtensionField) isUnexpandedLazy() bool { + return f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0 +} + +// lazyBuffer retrieves the buffer for a lazy extension if it's not yet expanded. +// +// The returned buffer has to be kept over whatever operation we're planning, +// as re-retrieving it will fail after the message is lazily decoded. +func (f *ExtensionField) lazyBuffer() []byte { + // This function might be in the critical path, so check the atomic without + // taking a look first, then only take the lock if needed. + if !f.isUnexpandedLazy() { + return nil + } + f.lazy.mu.Lock() + defer f.lazy.mu.Unlock() + return f.lazy.b +} + +func (f *ExtensionField) lazyInit() { + f.lazy.mu.Lock() + defer f.lazy.mu.Unlock() + if atomic.LoadUint32(&f.lazy.atomicOnce) == 1 { + return + } + if f.lazy.xi != nil { + b := f.lazy.b + val := f.typ.New() + for len(b) > 0 { + var tag uint64 + if b[0] < 0x80 { + tag = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + tag, n = protowire.ConsumeVarint(b) + if n < 0 { + panic(errors.New("bad tag in lazy extension decoding")) + } + b = b[n:] + } + num := protowire.Number(tag >> 3) + wtyp := protowire.Type(tag & 7) + var out unmarshalOutput + var err error + val, out, err = f.lazy.xi.funcs.unmarshal(b, val, num, wtyp, lazyUnmarshalOptions) + if err != nil { + panic(errors.New("decode failure in lazy extension decoding: %v", err)) + } + b = b[out.n:] + } + f.lazy.value = val + } else { + f.lazy.value = f.lazy.fn() + } + f.lazy.xi = nil + f.lazy.fn = nil + f.lazy.b = nil + atomic.StoreUint32(&f.lazy.atomicOnce, 1) +} + +// Set sets the type and value of the extension field. +// This must not be called concurrently. +func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value) { + f.typ = t + f.value = v + f.lazy = nil +} + +// SetLazy sets the type and a value that is to be lazily evaluated upon first use. +// This must not be called concurrently. +func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) { + f.typ = t + f.lazy = &lazyExtensionValue{fn: fn} +} + +// Value returns the value of the extension field. +// This may be called concurrently. +func (f *ExtensionField) Value() protoreflect.Value { + if f.lazy != nil { + if atomic.LoadUint32(&f.lazy.atomicOnce) == 0 { + f.lazyInit() + } + return f.lazy.value + } + return f.value +} + +// Type returns the type of the extension field. +// This may be called concurrently. +func (f ExtensionField) Type() protoreflect.ExtensionType { + return f.typ +} + +// IsSet returns whether the extension field is set. +// This may be called concurrently. +func (f ExtensionField) IsSet() bool { + return f.typ != nil +} + +// IsLazy reports whether a field is lazily encoded. +// It is exported for testing. +func IsLazy(m protoreflect.Message, fd protoreflect.FieldDescriptor) bool { + var mi *MessageInfo + var p pointer + switch m := m.(type) { + case *messageState: + mi = m.messageInfo() + p = m.pointer() + case *messageReflectWrapper: + mi = m.messageInfo() + p = m.pointer() + default: + return false + } + xd, ok := fd.(protoreflect.ExtensionTypeDescriptor) + if !ok { + return false + } + xt := xd.Type() + ext := mi.extensionMap(p) + if ext == nil { + return false + } + f, ok := (*ext)[int32(fd.Number())] + if !ok { + return false + } + return f.typ == xt && f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0 +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go new file mode 100644 index 000000000..78ee47e44 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -0,0 +1,860 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "sync" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" +) + +type errInvalidUTF8 struct{} + +func (errInvalidUTF8) Error() string { return "string field contains invalid UTF-8" } +func (errInvalidUTF8) InvalidUTF8() bool { return true } +func (errInvalidUTF8) Unwrap() error { return errors.Error } + +// initOneofFieldCoders initializes the fast-path functions for the fields in a oneof. +// +// For size, marshal, and isInit operations, functions are set only on the first field +// in the oneof. The functions are called when the oneof is non-nil, and will dispatch +// to the appropriate field-specific function as necessary. +// +// The unmarshal function is set on each field individually as usual. +func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si structInfo) { + fs := si.oneofsByName[od.Name()] + ft := fs.Type + oneofFields := make(map[reflect.Type]*coderFieldInfo) + needIsInit := false + fields := od.Fields() + for i, lim := 0, fields.Len(); i < lim; i++ { + fd := od.Fields().Get(i) + num := fd.Number() + // Make a copy of the original coderFieldInfo for use in unmarshaling. + // + // oneofFields[oneofType].funcs.marshal is the field-specific marshal function. + // + // mi.coderFields[num].marshal is set on only the first field in the oneof, + // and dispatches to the field-specific marshaler in oneofFields. + cf := *mi.coderFields[num] + ot := si.oneofWrappersByNumber[num] + cf.ft = ot.Field(0).Type + cf.mi, cf.funcs = fieldCoder(fd, cf.ft) + oneofFields[ot] = &cf + if cf.funcs.isInit != nil { + needIsInit = true + } + mi.coderFields[num].funcs.unmarshal = func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + var vw reflect.Value // pointer to wrapper type + vi := p.AsValueOf(ft).Elem() // oneof field value of interface kind + if !vi.IsNil() && !vi.Elem().IsNil() && vi.Elem().Elem().Type() == ot { + vw = vi.Elem() + } else { + vw = reflect.New(ot) + } + out, err := cf.funcs.unmarshal(b, pointerOfValue(vw).Apply(zeroOffset), wtyp, &cf, opts) + if err != nil { + return out, err + } + vi.Set(vw) + return out, nil + } + } + getInfo := func(p pointer) (pointer, *coderFieldInfo) { + v := p.AsValueOf(ft).Elem() + if v.IsNil() { + return pointer{}, nil + } + v = v.Elem() // interface -> *struct + if v.IsNil() { + return pointer{}, nil + } + return pointerOfValue(v).Apply(zeroOffset), oneofFields[v.Elem().Type()] + } + first := mi.coderFields[od.Fields().Get(0).Number()] + first.funcs.size = func(p pointer, _ *coderFieldInfo, opts marshalOptions) int { + p, info := getInfo(p) + if info == nil || info.funcs.size == nil { + return 0 + } + return info.funcs.size(p, info, opts) + } + first.funcs.marshal = func(b []byte, p pointer, _ *coderFieldInfo, opts marshalOptions) ([]byte, error) { + p, info := getInfo(p) + if info == nil || info.funcs.marshal == nil { + return b, nil + } + return info.funcs.marshal(b, p, info, opts) + } + first.funcs.merge = func(dst, src pointer, _ *coderFieldInfo, opts mergeOptions) { + srcp, srcinfo := getInfo(src) + if srcinfo == nil || srcinfo.funcs.merge == nil { + return + } + dstp, dstinfo := getInfo(dst) + if dstinfo != srcinfo { + dst.AsValueOf(ft).Elem().Set(reflect.New(src.AsValueOf(ft).Elem().Elem().Elem().Type())) + dstp = pointerOfValue(dst.AsValueOf(ft).Elem().Elem()).Apply(zeroOffset) + } + srcinfo.funcs.merge(dstp, srcp, srcinfo, opts) + } + if needIsInit { + first.funcs.isInit = func(p pointer, _ *coderFieldInfo) error { + p, info := getInfo(p) + if info == nil || info.funcs.isInit == nil { + return nil + } + return info.funcs.isInit(p, info) + } + } +} + +func makeWeakMessageFieldCoder(fd protoreflect.FieldDescriptor) pointerCoderFuncs { + var once sync.Once + var messageType protoreflect.MessageType + lazyInit := func() { + once.Do(func() { + messageName := fd.Message().FullName() + messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName) + }) + } + + return pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + m, ok := p.WeakFields().get(f.num) + if !ok { + return 0 + } + lazyInit() + if messageType == nil { + panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) + } + return sizeMessage(m, f.tagsize, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + m, ok := p.WeakFields().get(f.num) + if !ok { + return b, nil + } + lazyInit() + if messageType == nil { + panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) + } + return appendMessage(b, m, f.wiretag, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + fs := p.WeakFields() + m, ok := fs.get(f.num) + if !ok { + lazyInit() + if messageType == nil { + return unmarshalOutput{}, errUnknown + } + m = messageType.New().Interface() + fs.set(f.num, m) + } + return consumeMessage(b, m, wtyp, opts) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + m, ok := p.WeakFields().get(f.num) + if !ok { + return nil + } + return proto.CheckInitialized(m) + }, + merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + sm, ok := src.WeakFields().get(f.num) + if !ok { + return + } + dm, ok := dst.WeakFields().get(f.num) + if !ok { + lazyInit() + if messageType == nil { + panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) + } + dm = messageType.New().Interface() + dst.WeakFields().set(f.num, dm) + } + opts.Merge(dm, sm) + }, + } +} + +func makeMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { + if mi := getMessageInfo(ft); mi != nil { + funcs := pointerCoderFuncs{ + size: sizeMessageInfo, + marshal: appendMessageInfo, + unmarshal: consumeMessageInfo, + merge: mergeMessage, + } + if needsInitCheck(mi.Desc) { + funcs.isInit = isInitMessageInfo + } + return funcs + } else { + return pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + m := asMessage(p.AsValueOf(ft).Elem()) + return sizeMessage(m, f.tagsize, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + m := asMessage(p.AsValueOf(ft).Elem()) + return appendMessage(b, m, f.wiretag, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + mp := p.AsValueOf(ft).Elem() + if mp.IsNil() { + mp.Set(reflect.New(ft.Elem())) + } + return consumeMessage(b, asMessage(mp), wtyp, opts) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + m := asMessage(p.AsValueOf(ft).Elem()) + return proto.CheckInitialized(m) + }, + merge: mergeMessage, + } + } +} + +func sizeMessageInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return protowire.SizeBytes(f.mi.sizePointer(p.Elem(), opts)) + f.tagsize +} + +func appendMessageInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + calculatedSize := f.mi.sizePointer(p.Elem(), opts) + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) + b, err := f.mi.marshalAppendPointer(b, p.Elem(), opts) + if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } + return b, err +} + +func consumeMessageInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + if p.Elem().IsNil() { + p.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + o, err := f.mi.unmarshalPointer(v, p.Elem(), 0, opts) + if err != nil { + return out, err + } + out.n = n + out.initialized = o.initialized + return out, nil +} + +func isInitMessageInfo(p pointer, f *coderFieldInfo) error { + return f.mi.checkInitializedPointer(p.Elem()) +} + +func sizeMessage(m proto.Message, tagsize int, opts marshalOptions) int { + return protowire.SizeBytes(opts.Options().Size(m)) + tagsize +} + +func appendMessage(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { + mopts := opts.Options() + calculatedSize := mopts.Size(m) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) + b, err := mopts.MarshalAppend(b, m) + if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } + return b, err +} + +func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ + Buf: v, + Message: m.ProtoReflect(), + }) + if err != nil { + return out, err + } + out.n = n + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 + return out, nil +} + +func sizeMessageValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { + m := v.Message().Interface() + return sizeMessage(m, tagsize, opts) +} + +func appendMessageValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + m := v.Message().Interface() + return appendMessage(b, m, wiretag, opts) +} + +func consumeMessageValue(b []byte, v protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (protoreflect.Value, unmarshalOutput, error) { + m := v.Message().Interface() + out, err := consumeMessage(b, m, wtyp, opts) + return v, out, err +} + +func isInitMessageValue(v protoreflect.Value) error { + m := v.Message().Interface() + return proto.CheckInitialized(m) +} + +var coderMessageValue = valueCoderFuncs{ + size: sizeMessageValue, + marshal: appendMessageValue, + unmarshal: consumeMessageValue, + isInit: isInitMessageValue, + merge: mergeMessageValue, +} + +func sizeGroupValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { + m := v.Message().Interface() + return sizeGroup(m, tagsize, opts) +} + +func appendGroupValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + m := v.Message().Interface() + return appendGroup(b, m, wiretag, opts) +} + +func consumeGroupValue(b []byte, v protoreflect.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (protoreflect.Value, unmarshalOutput, error) { + m := v.Message().Interface() + out, err := consumeGroup(b, m, num, wtyp, opts) + return v, out, err +} + +var coderGroupValue = valueCoderFuncs{ + size: sizeGroupValue, + marshal: appendGroupValue, + unmarshal: consumeGroupValue, + isInit: isInitMessageValue, + merge: mergeMessageValue, +} + +func makeGroupFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { + num := fd.Number() + if mi := getMessageInfo(ft); mi != nil { + funcs := pointerCoderFuncs{ + size: sizeGroupType, + marshal: appendGroupType, + unmarshal: consumeGroupType, + merge: mergeMessage, + } + if needsInitCheck(mi.Desc) { + funcs.isInit = isInitMessageInfo + } + return funcs + } else { + return pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + m := asMessage(p.AsValueOf(ft).Elem()) + return sizeGroup(m, f.tagsize, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + m := asMessage(p.AsValueOf(ft).Elem()) + return appendGroup(b, m, f.wiretag, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + mp := p.AsValueOf(ft).Elem() + if mp.IsNil() { + mp.Set(reflect.New(ft.Elem())) + } + return consumeGroup(b, asMessage(mp), num, wtyp, opts) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + m := asMessage(p.AsValueOf(ft).Elem()) + return proto.CheckInitialized(m) + }, + merge: mergeMessage, + } + } +} + +func sizeGroupType(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return 2*f.tagsize + f.mi.sizePointer(p.Elem(), opts) +} + +func appendGroupType(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, f.wiretag) // start group + b, err := f.mi.marshalAppendPointer(b, p.Elem(), opts) + b = protowire.AppendVarint(b, f.wiretag+1) // end group + return b, err +} + +func consumeGroupType(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.StartGroupType { + return out, errUnknown + } + if p.Elem().IsNil() { + p.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + return f.mi.unmarshalPointer(b, p.Elem(), f.num, opts) +} + +func sizeGroup(m proto.Message, tagsize int, opts marshalOptions) int { + return 2*tagsize + opts.Options().Size(m) +} + +func appendGroup(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) // start group + b, err := opts.Options().MarshalAppend(b, m) + b = protowire.AppendVarint(b, wiretag+1) // end group + return b, err +} + +func consumeGroup(b []byte, m proto.Message, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.StartGroupType { + return out, errUnknown + } + b, n := protowire.ConsumeGroup(num, b) + if n < 0 { + return out, errDecode + } + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ + Buf: b, + Message: m.ProtoReflect(), + }) + if err != nil { + return out, err + } + out.n = n + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 + return out, nil +} + +func makeMessageSliceFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { + if mi := getMessageInfo(ft); mi != nil { + funcs := pointerCoderFuncs{ + size: sizeMessageSliceInfo, + marshal: appendMessageSliceInfo, + unmarshal: consumeMessageSliceInfo, + merge: mergeMessageSlice, + } + if needsInitCheck(mi.Desc) { + funcs.isInit = isInitMessageSliceInfo + } + return funcs + } + return pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return sizeMessageSlice(p, ft, f.tagsize, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return appendMessageSlice(b, p, f.wiretag, ft, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + return consumeMessageSlice(b, p, ft, wtyp, opts) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + return isInitMessageSlice(p, ft) + }, + merge: mergeMessageSlice, + } +} + +func sizeMessageSliceInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int { + s := p.PointerSlice() + n := 0 + for _, v := range s { + n += protowire.SizeBytes(f.mi.sizePointer(v, opts)) + f.tagsize + } + return n +} + +func appendMessageSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.PointerSlice() + var err error + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + siz := f.mi.sizePointer(v, opts) + b = protowire.AppendVarint(b, uint64(siz)) + before := len(b) + b, err = f.mi.marshalAppendPointer(b, v, opts) + if err != nil { + return b, err + } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } + } + return b, nil +} + +func consumeMessageSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + m := reflect.New(f.mi.GoReflectType.Elem()).Interface() + mp := pointerOfIface(m) + o, err := f.mi.unmarshalPointer(v, mp, 0, opts) + if err != nil { + return out, err + } + p.AppendPointerSlice(mp) + out.n = n + out.initialized = o.initialized + return out, nil +} + +func isInitMessageSliceInfo(p pointer, f *coderFieldInfo) error { + s := p.PointerSlice() + for _, v := range s { + if err := f.mi.checkInitializedPointer(v); err != nil { + return err + } + } + return nil +} + +func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, opts marshalOptions) int { + mopts := opts.Options() + s := p.PointerSlice() + n := 0 + for _, v := range s { + m := asMessage(v.AsValueOf(goType.Elem())) + n += protowire.SizeBytes(mopts.Size(m)) + tagsize + } + return n +} + +func appendMessageSlice(b []byte, p pointer, wiretag uint64, goType reflect.Type, opts marshalOptions) ([]byte, error) { + mopts := opts.Options() + s := p.PointerSlice() + var err error + for _, v := range s { + m := asMessage(v.AsValueOf(goType.Elem())) + b = protowire.AppendVarint(b, wiretag) + siz := mopts.Size(m) + b = protowire.AppendVarint(b, uint64(siz)) + before := len(b) + b, err = mopts.MarshalAppend(b, m) + if err != nil { + return b, err + } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } + } + return b, nil +} + +func consumeMessageSlice(b []byte, p pointer, goType reflect.Type, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + mp := reflect.New(goType.Elem()) + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ + Buf: v, + Message: asMessage(mp).ProtoReflect(), + }) + if err != nil { + return out, err + } + p.AppendPointerSlice(pointerOfValue(mp)) + out.n = n + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 + return out, nil +} + +func isInitMessageSlice(p pointer, goType reflect.Type) error { + s := p.PointerSlice() + for _, v := range s { + m := asMessage(v.AsValueOf(goType.Elem())) + if err := proto.CheckInitialized(m); err != nil { + return err + } + } + return nil +} + +// Slices of messages + +func sizeMessageSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { + mopts := opts.Options() + list := listv.List() + n := 0 + for i, llen := 0, list.Len(); i < llen; i++ { + m := list.Get(i).Message().Interface() + n += protowire.SizeBytes(mopts.Size(m)) + tagsize + } + return n +} + +func appendMessageSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + mopts := opts.Options() + for i, llen := 0, list.Len(); i < llen; i++ { + m := list.Get(i).Message().Interface() + b = protowire.AppendVarint(b, wiretag) + siz := mopts.Size(m) + b = protowire.AppendVarint(b, uint64(siz)) + before := len(b) + var err error + b, err = mopts.MarshalAppend(b, m) + if err != nil { + return b, err + } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } + } + return b, nil +} + +func consumeMessageSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp != protowire.BytesType { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + m := list.NewElement() + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ + Buf: v, + Message: m.Message(), + }) + if err != nil { + return protoreflect.Value{}, out, err + } + list.Append(m) + out.n = n + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 + return listv, out, nil +} + +func isInitMessageSliceValue(listv protoreflect.Value) error { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + m := list.Get(i).Message().Interface() + if err := proto.CheckInitialized(m); err != nil { + return err + } + } + return nil +} + +var coderMessageSliceValue = valueCoderFuncs{ + size: sizeMessageSliceValue, + marshal: appendMessageSliceValue, + unmarshal: consumeMessageSliceValue, + isInit: isInitMessageSliceValue, + merge: mergeMessageListValue, +} + +func sizeGroupSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { + mopts := opts.Options() + list := listv.List() + n := 0 + for i, llen := 0, list.Len(); i < llen; i++ { + m := list.Get(i).Message().Interface() + n += 2*tagsize + mopts.Size(m) + } + return n +} + +func appendGroupSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + mopts := opts.Options() + for i, llen := 0, list.Len(); i < llen; i++ { + m := list.Get(i).Message().Interface() + b = protowire.AppendVarint(b, wiretag) // start group + var err error + b, err = mopts.MarshalAppend(b, m) + if err != nil { + return b, err + } + b = protowire.AppendVarint(b, wiretag+1) // end group + } + return b, nil +} + +func consumeGroupSliceValue(b []byte, listv protoreflect.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp != protowire.StartGroupType { + return protoreflect.Value{}, out, errUnknown + } + b, n := protowire.ConsumeGroup(num, b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + m := list.NewElement() + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ + Buf: b, + Message: m.Message(), + }) + if err != nil { + return protoreflect.Value{}, out, err + } + list.Append(m) + out.n = n + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 + return listv, out, nil +} + +var coderGroupSliceValue = valueCoderFuncs{ + size: sizeGroupSliceValue, + marshal: appendGroupSliceValue, + unmarshal: consumeGroupSliceValue, + isInit: isInitMessageSliceValue, + merge: mergeMessageListValue, +} + +func makeGroupSliceFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { + num := fd.Number() + if mi := getMessageInfo(ft); mi != nil { + funcs := pointerCoderFuncs{ + size: sizeGroupSliceInfo, + marshal: appendGroupSliceInfo, + unmarshal: consumeGroupSliceInfo, + merge: mergeMessageSlice, + } + if needsInitCheck(mi.Desc) { + funcs.isInit = isInitMessageSliceInfo + } + return funcs + } + return pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return sizeGroupSlice(p, ft, f.tagsize, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return appendGroupSlice(b, p, f.wiretag, ft, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + return consumeGroupSlice(b, p, num, wtyp, ft, opts) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + return isInitMessageSlice(p, ft) + }, + merge: mergeMessageSlice, + } +} + +func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, opts marshalOptions) int { + mopts := opts.Options() + s := p.PointerSlice() + n := 0 + for _, v := range s { + m := asMessage(v.AsValueOf(messageType.Elem())) + n += 2*tagsize + mopts.Size(m) + } + return n +} + +func appendGroupSlice(b []byte, p pointer, wiretag uint64, messageType reflect.Type, opts marshalOptions) ([]byte, error) { + s := p.PointerSlice() + var err error + for _, v := range s { + m := asMessage(v.AsValueOf(messageType.Elem())) + b = protowire.AppendVarint(b, wiretag) // start group + b, err = opts.Options().MarshalAppend(b, m) + if err != nil { + return b, err + } + b = protowire.AppendVarint(b, wiretag+1) // end group + } + return b, nil +} + +func consumeGroupSlice(b []byte, p pointer, num protowire.Number, wtyp protowire.Type, goType reflect.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.StartGroupType { + return out, errUnknown + } + b, n := protowire.ConsumeGroup(num, b) + if n < 0 { + return out, errDecode + } + mp := reflect.New(goType.Elem()) + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ + Buf: b, + Message: asMessage(mp).ProtoReflect(), + }) + if err != nil { + return out, err + } + p.AppendPointerSlice(pointerOfValue(mp)) + out.n = n + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 + return out, nil +} + +func sizeGroupSliceInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int { + s := p.PointerSlice() + n := 0 + for _, v := range s { + n += 2*f.tagsize + f.mi.sizePointer(v, opts) + } + return n +} + +func appendGroupSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.PointerSlice() + var err error + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) // start group + b, err = f.mi.marshalAppendPointer(b, v, opts) + if err != nil { + return b, err + } + b = protowire.AppendVarint(b, f.wiretag+1) // end group + } + return b, nil +} + +func consumeGroupSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + if wtyp != protowire.StartGroupType { + return unmarshalOutput{}, errUnknown + } + m := reflect.New(f.mi.GoReflectType.Elem()).Interface() + mp := pointerOfIface(m) + out, err := f.mi.unmarshalPointer(b, mp, f.num, opts) + if err != nil { + return out, err + } + p.AppendPointerSlice(mp) + return out, nil +} + +func asMessage(v reflect.Value) protoreflect.ProtoMessage { + if m, ok := v.Interface().(protoreflect.ProtoMessage); ok { + return m + } + return legacyWrapMessage(v).Interface() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go new file mode 100644 index 000000000..f55dc01e3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go @@ -0,0 +1,5724 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package impl + +import ( + "math" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// sizeBool returns the size of wire encoding a bool pointer as a Bool. +func sizeBool(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Bool() + return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) +} + +// appendBool wire encodes a bool pointer as a Bool. +func appendBool(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Bool() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v)) + return b, nil +} + +// consumeBool wire decodes a bool pointer as a Bool. +func consumeBool(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *p.Bool() = protowire.DecodeBool(v) + out.n = n + return out, nil +} + +var coderBool = pointerCoderFuncs{ + size: sizeBool, + marshal: appendBool, + unmarshal: consumeBool, + merge: mergeBool, +} + +// sizeBoolNoZero returns the size of wire encoding a bool pointer as a Bool. +// The zero value is not encoded. +func sizeBoolNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Bool() + if v == false { + return 0 + } + return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) +} + +// appendBoolNoZero wire encodes a bool pointer as a Bool. +// The zero value is not encoded. +func appendBoolNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Bool() + if v == false { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v)) + return b, nil +} + +var coderBoolNoZero = pointerCoderFuncs{ + size: sizeBoolNoZero, + marshal: appendBoolNoZero, + unmarshal: consumeBool, + merge: mergeBoolNoZero, +} + +// sizeBoolPtr returns the size of wire encoding a *bool pointer as a Bool. +// It panics if the pointer is nil. +func sizeBoolPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := **p.BoolPtr() + return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) +} + +// appendBoolPtr wire encodes a *bool pointer as a Bool. +// It panics if the pointer is nil. +func appendBoolPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.BoolPtr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v)) + return b, nil +} + +// consumeBoolPtr wire decodes a *bool pointer as a Bool. +func consumeBoolPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + vp := p.BoolPtr() + if *vp == nil { + *vp = new(bool) + } + **vp = protowire.DecodeBool(v) + out.n = n + return out, nil +} + +var coderBoolPtr = pointerCoderFuncs{ + size: sizeBoolPtr, + marshal: appendBoolPtr, + unmarshal: consumeBoolPtr, + merge: mergeBoolPtr, +} + +// sizeBoolSlice returns the size of wire encoding a []bool pointer as a repeated Bool. +func sizeBoolSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.BoolSlice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) + } + return size +} + +// appendBoolSlice encodes a []bool pointer as a repeated Bool. +func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.BoolSlice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v)) + } + return b, nil +} + +// consumeBoolSlice wire decodes a []bool pointer as a repeated Bool. +func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.BoolSlice() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growBoolSlice(count) + } + s := *sp + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + s = append(s, protowire.DecodeBool(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *sp = append(*sp, protowire.DecodeBool(v)) + out.n = n + return out, nil +} + +var coderBoolSlice = pointerCoderFuncs{ + size: sizeBoolSlice, + marshal: appendBoolSlice, + unmarshal: consumeBoolSlice, + merge: mergeBoolSlice, +} + +// sizeBoolPackedSlice returns the size of wire encoding a []bool pointer as a packed repeated Bool. +func sizeBoolPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.BoolSlice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeBool(v)) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendBoolPackedSlice encodes a []bool pointer as a packed repeated Bool. +func appendBoolPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.BoolSlice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeBool(v)) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, protowire.EncodeBool(v)) + } + return b, nil +} + +var coderBoolPackedSlice = pointerCoderFuncs{ + size: sizeBoolPackedSlice, + marshal: appendBoolPackedSlice, + unmarshal: consumeBoolSlice, + merge: mergeBoolSlice, +} + +// sizeBoolValue returns the size of wire encoding a bool value as a Bool. +func sizeBoolValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeVarint(protowire.EncodeBool(v.Bool())) +} + +// appendBoolValue encodes a bool value as a Bool. +func appendBoolValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) + return b, nil +} + +// consumeBoolValue decodes a bool value as a Bool. +func consumeBoolValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfBool(protowire.DecodeBool(v)), out, nil +} + +var coderBoolValue = valueCoderFuncs{ + size: sizeBoolValue, + marshal: appendBoolValue, + unmarshal: consumeBoolValue, + merge: mergeScalarValue, +} + +// sizeBoolSliceValue returns the size of wire encoding a []bool value as a repeated Bool. +func sizeBoolSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(protowire.EncodeBool(v.Bool())) + } + return size +} + +// appendBoolSliceValue encodes a []bool value as a repeated Bool. +func appendBoolSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) + } + return b, nil +} + +// consumeBoolSliceValue wire decodes a []bool value as a repeated Bool. +func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) + out.n = n + return listv, out, nil +} + +var coderBoolSliceValue = valueCoderFuncs{ + size: sizeBoolSliceValue, + marshal: appendBoolSliceValue, + unmarshal: consumeBoolSliceValue, + merge: mergeListValue, +} + +// sizeBoolPackedSliceValue returns the size of wire encoding a []bool value as a packed repeated Bool. +func sizeBoolPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeBool(v.Bool())) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendBoolPackedSliceValue encodes a []bool value as a packed repeated Bool. +func appendBoolPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeBool(v.Bool())) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) + } + return b, nil +} + +var coderBoolPackedSliceValue = valueCoderFuncs{ + size: sizeBoolPackedSliceValue, + marshal: appendBoolPackedSliceValue, + unmarshal: consumeBoolSliceValue, + merge: mergeListValue, +} + +// sizeEnumValue returns the size of wire encoding a value as a Enum. +func sizeEnumValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeVarint(uint64(v.Enum())) +} + +// appendEnumValue encodes a value as a Enum. +func appendEnumValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(v.Enum())) + return b, nil +} + +// consumeEnumValue decodes a value as a Enum. +func consumeEnumValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), out, nil +} + +var coderEnumValue = valueCoderFuncs{ + size: sizeEnumValue, + marshal: appendEnumValue, + unmarshal: consumeEnumValue, + merge: mergeScalarValue, +} + +// sizeEnumSliceValue returns the size of wire encoding a [] value as a repeated Enum. +func sizeEnumSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(uint64(v.Enum())) + } + return size +} + +// appendEnumSliceValue encodes a [] value as a repeated Enum. +func appendEnumSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(v.Enum())) + } + return b, nil +} + +// consumeEnumSliceValue wire decodes a [] value as a repeated Enum. +func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) + out.n = n + return listv, out, nil +} + +var coderEnumSliceValue = valueCoderFuncs{ + size: sizeEnumSliceValue, + marshal: appendEnumSliceValue, + unmarshal: consumeEnumSliceValue, + merge: mergeListValue, +} + +// sizeEnumPackedSliceValue returns the size of wire encoding a [] value as a packed repeated Enum. +func sizeEnumPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(v.Enum())) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendEnumPackedSliceValue encodes a [] value as a packed repeated Enum. +func appendEnumPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(v.Enum())) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, uint64(v.Enum())) + } + return b, nil +} + +var coderEnumPackedSliceValue = valueCoderFuncs{ + size: sizeEnumPackedSliceValue, + marshal: appendEnumPackedSliceValue, + unmarshal: consumeEnumSliceValue, + merge: mergeListValue, +} + +// sizeInt32 returns the size of wire encoding a int32 pointer as a Int32. +func sizeInt32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Int32() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt32 wire encodes a int32 pointer as a Int32. +func appendInt32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeInt32 wire decodes a int32 pointer as a Int32. +func consumeInt32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *p.Int32() = int32(v) + out.n = n + return out, nil +} + +var coderInt32 = pointerCoderFuncs{ + size: sizeInt32, + marshal: appendInt32, + unmarshal: consumeInt32, + merge: mergeInt32, +} + +// sizeInt32NoZero returns the size of wire encoding a int32 pointer as a Int32. +// The zero value is not encoded. +func sizeInt32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Int32() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt32NoZero wire encodes a int32 pointer as a Int32. +// The zero value is not encoded. +func appendInt32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int32() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +var coderInt32NoZero = pointerCoderFuncs{ + size: sizeInt32NoZero, + marshal: appendInt32NoZero, + unmarshal: consumeInt32, + merge: mergeInt32NoZero, +} + +// sizeInt32Ptr returns the size of wire encoding a *int32 pointer as a Int32. +// It panics if the pointer is nil. +func sizeInt32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := **p.Int32Ptr() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt32Ptr wire encodes a *int32 pointer as a Int32. +// It panics if the pointer is nil. +func appendInt32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Int32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeInt32Ptr wire decodes a *int32 pointer as a Int32. +func consumeInt32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + vp := p.Int32Ptr() + if *vp == nil { + *vp = new(int32) + } + **vp = int32(v) + out.n = n + return out, nil +} + +var coderInt32Ptr = pointerCoderFuncs{ + size: sizeInt32Ptr, + marshal: appendInt32Ptr, + unmarshal: consumeInt32Ptr, + merge: mergeInt32Ptr, +} + +// sizeInt32Slice returns the size of wire encoding a []int32 pointer as a repeated Int32. +func sizeInt32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int32Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(uint64(v)) + } + return size +} + +// appendInt32Slice encodes a []int32 pointer as a repeated Int32. +func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +// consumeInt32Slice wire decodes a []int32 pointer as a repeated Int32. +func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int32Slice() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt32Slice(count) + } + s := *sp + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + s = append(s, int32(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *sp = append(*sp, int32(v)) + out.n = n + return out, nil +} + +var coderInt32Slice = pointerCoderFuncs{ + size: sizeInt32Slice, + marshal: appendInt32Slice, + unmarshal: consumeInt32Slice, + merge: mergeInt32Slice, +} + +// sizeInt32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Int32. +func sizeInt32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendInt32PackedSlice encodes a []int32 pointer as a packed repeated Int32. +func appendInt32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +var coderInt32PackedSlice = pointerCoderFuncs{ + size: sizeInt32PackedSlice, + marshal: appendInt32PackedSlice, + unmarshal: consumeInt32Slice, + merge: mergeInt32Slice, +} + +// sizeInt32Value returns the size of wire encoding a int32 value as a Int32. +func sizeInt32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeVarint(uint64(int32(v.Int()))) +} + +// appendInt32Value encodes a int32 value as a Int32. +func appendInt32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(int32(v.Int()))) + return b, nil +} + +// consumeInt32Value decodes a int32 value as a Int32. +func consumeInt32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfInt32(int32(v)), out, nil +} + +var coderInt32Value = valueCoderFuncs{ + size: sizeInt32Value, + marshal: appendInt32Value, + unmarshal: consumeInt32Value, + merge: mergeScalarValue, +} + +// sizeInt32SliceValue returns the size of wire encoding a []int32 value as a repeated Int32. +func sizeInt32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(uint64(int32(v.Int()))) + } + return size +} + +// appendInt32SliceValue encodes a []int32 value as a repeated Int32. +func appendInt32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(int32(v.Int()))) + } + return b, nil +} + +// consumeInt32SliceValue wire decodes a []int32 value as a repeated Int32. +func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + out.n = n + return listv, out, nil +} + +var coderInt32SliceValue = valueCoderFuncs{ + size: sizeInt32SliceValue, + marshal: appendInt32SliceValue, + unmarshal: consumeInt32SliceValue, + merge: mergeListValue, +} + +// sizeInt32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Int32. +func sizeInt32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(int32(v.Int()))) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendInt32PackedSliceValue encodes a []int32 value as a packed repeated Int32. +func appendInt32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(int32(v.Int()))) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, uint64(int32(v.Int()))) + } + return b, nil +} + +var coderInt32PackedSliceValue = valueCoderFuncs{ + size: sizeInt32PackedSliceValue, + marshal: appendInt32PackedSliceValue, + unmarshal: consumeInt32SliceValue, + merge: mergeListValue, +} + +// sizeSint32 returns the size of wire encoding a int32 pointer as a Sint32. +func sizeSint32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Int32() + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) +} + +// appendSint32 wire encodes a int32 pointer as a Sint32. +func appendSint32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) + return b, nil +} + +// consumeSint32 wire decodes a int32 pointer as a Sint32. +func consumeSint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *p.Int32() = int32(protowire.DecodeZigZag(v & math.MaxUint32)) + out.n = n + return out, nil +} + +var coderSint32 = pointerCoderFuncs{ + size: sizeSint32, + marshal: appendSint32, + unmarshal: consumeSint32, + merge: mergeInt32, +} + +// sizeSint32NoZero returns the size of wire encoding a int32 pointer as a Sint32. +// The zero value is not encoded. +func sizeSint32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Int32() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) +} + +// appendSint32NoZero wire encodes a int32 pointer as a Sint32. +// The zero value is not encoded. +func appendSint32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int32() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) + return b, nil +} + +var coderSint32NoZero = pointerCoderFuncs{ + size: sizeSint32NoZero, + marshal: appendSint32NoZero, + unmarshal: consumeSint32, + merge: mergeInt32NoZero, +} + +// sizeSint32Ptr returns the size of wire encoding a *int32 pointer as a Sint32. +// It panics if the pointer is nil. +func sizeSint32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := **p.Int32Ptr() + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) +} + +// appendSint32Ptr wire encodes a *int32 pointer as a Sint32. +// It panics if the pointer is nil. +func appendSint32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Int32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) + return b, nil +} + +// consumeSint32Ptr wire decodes a *int32 pointer as a Sint32. +func consumeSint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + vp := p.Int32Ptr() + if *vp == nil { + *vp = new(int32) + } + **vp = int32(protowire.DecodeZigZag(v & math.MaxUint32)) + out.n = n + return out, nil +} + +var coderSint32Ptr = pointerCoderFuncs{ + size: sizeSint32Ptr, + marshal: appendSint32Ptr, + unmarshal: consumeSint32Ptr, + merge: mergeInt32Ptr, +} + +// sizeSint32Slice returns the size of wire encoding a []int32 pointer as a repeated Sint32. +func sizeSint32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int32Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) + } + return size +} + +// appendSint32Slice encodes a []int32 pointer as a repeated Sint32. +func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) + } + return b, nil +} + +// consumeSint32Slice wire decodes a []int32 pointer as a repeated Sint32. +func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int32Slice() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt32Slice(count) + } + s := *sp + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + s = append(s, int32(protowire.DecodeZigZag(v&math.MaxUint32))) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *sp = append(*sp, int32(protowire.DecodeZigZag(v&math.MaxUint32))) + out.n = n + return out, nil +} + +var coderSint32Slice = pointerCoderFuncs{ + size: sizeSint32Slice, + marshal: appendSint32Slice, + unmarshal: consumeSint32Slice, + merge: mergeInt32Slice, +} + +// sizeSint32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sint32. +func sizeSint32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendSint32PackedSlice encodes a []int32 pointer as a packed repeated Sint32. +func appendSint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) + } + return b, nil +} + +var coderSint32PackedSlice = pointerCoderFuncs{ + size: sizeSint32PackedSlice, + marshal: appendSint32PackedSlice, + unmarshal: consumeSint32Slice, + merge: mergeInt32Slice, +} + +// sizeSint32Value returns the size of wire encoding a int32 value as a Sint32. +func sizeSint32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) +} + +// appendSint32Value encodes a int32 value as a Sint32. +func appendSint32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) + return b, nil +} + +// consumeSint32Value decodes a int32 value as a Sint32. +func consumeSint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), out, nil +} + +var coderSint32Value = valueCoderFuncs{ + size: sizeSint32Value, + marshal: appendSint32Value, + unmarshal: consumeSint32Value, + merge: mergeScalarValue, +} + +// sizeSint32SliceValue returns the size of wire encoding a []int32 value as a repeated Sint32. +func sizeSint32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) + } + return size +} + +// appendSint32SliceValue encodes a []int32 value as a repeated Sint32. +func appendSint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) + } + return b, nil +} + +// consumeSint32SliceValue wire decodes a []int32 value as a repeated Sint32. +func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) + out.n = n + return listv, out, nil +} + +var coderSint32SliceValue = valueCoderFuncs{ + size: sizeSint32SliceValue, + marshal: appendSint32SliceValue, + unmarshal: consumeSint32SliceValue, + merge: mergeListValue, +} + +// sizeSint32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sint32. +func sizeSint32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendSint32PackedSliceValue encodes a []int32 value as a packed repeated Sint32. +func appendSint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) + } + return b, nil +} + +var coderSint32PackedSliceValue = valueCoderFuncs{ + size: sizeSint32PackedSliceValue, + marshal: appendSint32PackedSliceValue, + unmarshal: consumeSint32SliceValue, + merge: mergeListValue, +} + +// sizeUint32 returns the size of wire encoding a uint32 pointer as a Uint32. +func sizeUint32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Uint32() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendUint32 wire encodes a uint32 pointer as a Uint32. +func appendUint32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Uint32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeUint32 wire decodes a uint32 pointer as a Uint32. +func consumeUint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *p.Uint32() = uint32(v) + out.n = n + return out, nil +} + +var coderUint32 = pointerCoderFuncs{ + size: sizeUint32, + marshal: appendUint32, + unmarshal: consumeUint32, + merge: mergeUint32, +} + +// sizeUint32NoZero returns the size of wire encoding a uint32 pointer as a Uint32. +// The zero value is not encoded. +func sizeUint32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Uint32() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendUint32NoZero wire encodes a uint32 pointer as a Uint32. +// The zero value is not encoded. +func appendUint32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Uint32() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +var coderUint32NoZero = pointerCoderFuncs{ + size: sizeUint32NoZero, + marshal: appendUint32NoZero, + unmarshal: consumeUint32, + merge: mergeUint32NoZero, +} + +// sizeUint32Ptr returns the size of wire encoding a *uint32 pointer as a Uint32. +// It panics if the pointer is nil. +func sizeUint32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := **p.Uint32Ptr() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendUint32Ptr wire encodes a *uint32 pointer as a Uint32. +// It panics if the pointer is nil. +func appendUint32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Uint32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeUint32Ptr wire decodes a *uint32 pointer as a Uint32. +func consumeUint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + vp := p.Uint32Ptr() + if *vp == nil { + *vp = new(uint32) + } + **vp = uint32(v) + out.n = n + return out, nil +} + +var coderUint32Ptr = pointerCoderFuncs{ + size: sizeUint32Ptr, + marshal: appendUint32Ptr, + unmarshal: consumeUint32Ptr, + merge: mergeUint32Ptr, +} + +// sizeUint32Slice returns the size of wire encoding a []uint32 pointer as a repeated Uint32. +func sizeUint32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Uint32Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(uint64(v)) + } + return size +} + +// appendUint32Slice encodes a []uint32 pointer as a repeated Uint32. +func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Uint32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +// consumeUint32Slice wire decodes a []uint32 pointer as a repeated Uint32. +func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Uint32Slice() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growUint32Slice(count) + } + s := *sp + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + s = append(s, uint32(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *sp = append(*sp, uint32(v)) + out.n = n + return out, nil +} + +var coderUint32Slice = pointerCoderFuncs{ + size: sizeUint32Slice, + marshal: appendUint32Slice, + unmarshal: consumeUint32Slice, + merge: mergeUint32Slice, +} + +// sizeUint32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Uint32. +func sizeUint32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Uint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendUint32PackedSlice encodes a []uint32 pointer as a packed repeated Uint32. +func appendUint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Uint32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +var coderUint32PackedSlice = pointerCoderFuncs{ + size: sizeUint32PackedSlice, + marshal: appendUint32PackedSlice, + unmarshal: consumeUint32Slice, + merge: mergeUint32Slice, +} + +// sizeUint32Value returns the size of wire encoding a uint32 value as a Uint32. +func sizeUint32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeVarint(uint64(uint32(v.Uint()))) +} + +// appendUint32Value encodes a uint32 value as a Uint32. +func appendUint32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) + return b, nil +} + +// consumeUint32Value decodes a uint32 value as a Uint32. +func consumeUint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfUint32(uint32(v)), out, nil +} + +var coderUint32Value = valueCoderFuncs{ + size: sizeUint32Value, + marshal: appendUint32Value, + unmarshal: consumeUint32Value, + merge: mergeScalarValue, +} + +// sizeUint32SliceValue returns the size of wire encoding a []uint32 value as a repeated Uint32. +func sizeUint32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(uint64(uint32(v.Uint()))) + } + return size +} + +// appendUint32SliceValue encodes a []uint32 value as a repeated Uint32. +func appendUint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) + } + return b, nil +} + +// consumeUint32SliceValue wire decodes a []uint32 value as a repeated Uint32. +func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + out.n = n + return listv, out, nil +} + +var coderUint32SliceValue = valueCoderFuncs{ + size: sizeUint32SliceValue, + marshal: appendUint32SliceValue, + unmarshal: consumeUint32SliceValue, + merge: mergeListValue, +} + +// sizeUint32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Uint32. +func sizeUint32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(uint32(v.Uint()))) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendUint32PackedSliceValue encodes a []uint32 value as a packed repeated Uint32. +func appendUint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(uint32(v.Uint()))) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) + } + return b, nil +} + +var coderUint32PackedSliceValue = valueCoderFuncs{ + size: sizeUint32PackedSliceValue, + marshal: appendUint32PackedSliceValue, + unmarshal: consumeUint32SliceValue, + merge: mergeListValue, +} + +// sizeInt64 returns the size of wire encoding a int64 pointer as a Int64. +func sizeInt64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Int64() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt64 wire encodes a int64 pointer as a Int64. +func appendInt64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeInt64 wire decodes a int64 pointer as a Int64. +func consumeInt64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *p.Int64() = int64(v) + out.n = n + return out, nil +} + +var coderInt64 = pointerCoderFuncs{ + size: sizeInt64, + marshal: appendInt64, + unmarshal: consumeInt64, + merge: mergeInt64, +} + +// sizeInt64NoZero returns the size of wire encoding a int64 pointer as a Int64. +// The zero value is not encoded. +func sizeInt64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Int64() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt64NoZero wire encodes a int64 pointer as a Int64. +// The zero value is not encoded. +func appendInt64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int64() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +var coderInt64NoZero = pointerCoderFuncs{ + size: sizeInt64NoZero, + marshal: appendInt64NoZero, + unmarshal: consumeInt64, + merge: mergeInt64NoZero, +} + +// sizeInt64Ptr returns the size of wire encoding a *int64 pointer as a Int64. +// It panics if the pointer is nil. +func sizeInt64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := **p.Int64Ptr() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt64Ptr wire encodes a *int64 pointer as a Int64. +// It panics if the pointer is nil. +func appendInt64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Int64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeInt64Ptr wire decodes a *int64 pointer as a Int64. +func consumeInt64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + vp := p.Int64Ptr() + if *vp == nil { + *vp = new(int64) + } + **vp = int64(v) + out.n = n + return out, nil +} + +var coderInt64Ptr = pointerCoderFuncs{ + size: sizeInt64Ptr, + marshal: appendInt64Ptr, + unmarshal: consumeInt64Ptr, + merge: mergeInt64Ptr, +} + +// sizeInt64Slice returns the size of wire encoding a []int64 pointer as a repeated Int64. +func sizeInt64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int64Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(uint64(v)) + } + return size +} + +// appendInt64Slice encodes a []int64 pointer as a repeated Int64. +func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +// consumeInt64Slice wire decodes a []int64 pointer as a repeated Int64. +func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int64Slice() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt64Slice(count) + } + s := *sp + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + s = append(s, int64(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *sp = append(*sp, int64(v)) + out.n = n + return out, nil +} + +var coderInt64Slice = pointerCoderFuncs{ + size: sizeInt64Slice, + marshal: appendInt64Slice, + unmarshal: consumeInt64Slice, + merge: mergeInt64Slice, +} + +// sizeInt64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Int64. +func sizeInt64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendInt64PackedSlice encodes a []int64 pointer as a packed repeated Int64. +func appendInt64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +var coderInt64PackedSlice = pointerCoderFuncs{ + size: sizeInt64PackedSlice, + marshal: appendInt64PackedSlice, + unmarshal: consumeInt64Slice, + merge: mergeInt64Slice, +} + +// sizeInt64Value returns the size of wire encoding a int64 value as a Int64. +func sizeInt64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeVarint(uint64(v.Int())) +} + +// appendInt64Value encodes a int64 value as a Int64. +func appendInt64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(v.Int())) + return b, nil +} + +// consumeInt64Value decodes a int64 value as a Int64. +func consumeInt64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfInt64(int64(v)), out, nil +} + +var coderInt64Value = valueCoderFuncs{ + size: sizeInt64Value, + marshal: appendInt64Value, + unmarshal: consumeInt64Value, + merge: mergeScalarValue, +} + +// sizeInt64SliceValue returns the size of wire encoding a []int64 value as a repeated Int64. +func sizeInt64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(uint64(v.Int())) + } + return size +} + +// appendInt64SliceValue encodes a []int64 value as a repeated Int64. +func appendInt64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(v.Int())) + } + return b, nil +} + +// consumeInt64SliceValue wire decodes a []int64 value as a repeated Int64. +func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + out.n = n + return listv, out, nil +} + +var coderInt64SliceValue = valueCoderFuncs{ + size: sizeInt64SliceValue, + marshal: appendInt64SliceValue, + unmarshal: consumeInt64SliceValue, + merge: mergeListValue, +} + +// sizeInt64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Int64. +func sizeInt64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(v.Int())) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendInt64PackedSliceValue encodes a []int64 value as a packed repeated Int64. +func appendInt64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(v.Int())) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, uint64(v.Int())) + } + return b, nil +} + +var coderInt64PackedSliceValue = valueCoderFuncs{ + size: sizeInt64PackedSliceValue, + marshal: appendInt64PackedSliceValue, + unmarshal: consumeInt64SliceValue, + merge: mergeListValue, +} + +// sizeSint64 returns the size of wire encoding a int64 pointer as a Sint64. +func sizeSint64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Int64() + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) +} + +// appendSint64 wire encodes a int64 pointer as a Sint64. +func appendSint64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) + return b, nil +} + +// consumeSint64 wire decodes a int64 pointer as a Sint64. +func consumeSint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *p.Int64() = protowire.DecodeZigZag(v) + out.n = n + return out, nil +} + +var coderSint64 = pointerCoderFuncs{ + size: sizeSint64, + marshal: appendSint64, + unmarshal: consumeSint64, + merge: mergeInt64, +} + +// sizeSint64NoZero returns the size of wire encoding a int64 pointer as a Sint64. +// The zero value is not encoded. +func sizeSint64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Int64() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) +} + +// appendSint64NoZero wire encodes a int64 pointer as a Sint64. +// The zero value is not encoded. +func appendSint64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int64() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) + return b, nil +} + +var coderSint64NoZero = pointerCoderFuncs{ + size: sizeSint64NoZero, + marshal: appendSint64NoZero, + unmarshal: consumeSint64, + merge: mergeInt64NoZero, +} + +// sizeSint64Ptr returns the size of wire encoding a *int64 pointer as a Sint64. +// It panics if the pointer is nil. +func sizeSint64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := **p.Int64Ptr() + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) +} + +// appendSint64Ptr wire encodes a *int64 pointer as a Sint64. +// It panics if the pointer is nil. +func appendSint64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Int64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) + return b, nil +} + +// consumeSint64Ptr wire decodes a *int64 pointer as a Sint64. +func consumeSint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + vp := p.Int64Ptr() + if *vp == nil { + *vp = new(int64) + } + **vp = protowire.DecodeZigZag(v) + out.n = n + return out, nil +} + +var coderSint64Ptr = pointerCoderFuncs{ + size: sizeSint64Ptr, + marshal: appendSint64Ptr, + unmarshal: consumeSint64Ptr, + merge: mergeInt64Ptr, +} + +// sizeSint64Slice returns the size of wire encoding a []int64 pointer as a repeated Sint64. +func sizeSint64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int64Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) + } + return size +} + +// appendSint64Slice encodes a []int64 pointer as a repeated Sint64. +func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) + } + return b, nil +} + +// consumeSint64Slice wire decodes a []int64 pointer as a repeated Sint64. +func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int64Slice() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt64Slice(count) + } + s := *sp + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + s = append(s, protowire.DecodeZigZag(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *sp = append(*sp, protowire.DecodeZigZag(v)) + out.n = n + return out, nil +} + +var coderSint64Slice = pointerCoderFuncs{ + size: sizeSint64Slice, + marshal: appendSint64Slice, + unmarshal: consumeSint64Slice, + merge: mergeInt64Slice, +} + +// sizeSint64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sint64. +func sizeSint64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeZigZag(v)) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendSint64PackedSlice encodes a []int64 pointer as a packed repeated Sint64. +func appendSint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeZigZag(v)) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) + } + return b, nil +} + +var coderSint64PackedSlice = pointerCoderFuncs{ + size: sizeSint64PackedSlice, + marshal: appendSint64PackedSlice, + unmarshal: consumeSint64Slice, + merge: mergeInt64Slice, +} + +// sizeSint64Value returns the size of wire encoding a int64 value as a Sint64. +func sizeSint64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) +} + +// appendSint64Value encodes a int64 value as a Sint64. +func appendSint64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) + return b, nil +} + +// consumeSint64Value decodes a int64 value as a Sint64. +func consumeSint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), out, nil +} + +var coderSint64Value = valueCoderFuncs{ + size: sizeSint64Value, + marshal: appendSint64Value, + unmarshal: consumeSint64Value, + merge: mergeScalarValue, +} + +// sizeSint64SliceValue returns the size of wire encoding a []int64 value as a repeated Sint64. +func sizeSint64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) + } + return size +} + +// appendSint64SliceValue encodes a []int64 value as a repeated Sint64. +func appendSint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) + } + return b, nil +} + +// consumeSint64SliceValue wire decodes a []int64 value as a repeated Sint64. +func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) + out.n = n + return listv, out, nil +} + +var coderSint64SliceValue = valueCoderFuncs{ + size: sizeSint64SliceValue, + marshal: appendSint64SliceValue, + unmarshal: consumeSint64SliceValue, + merge: mergeListValue, +} + +// sizeSint64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sint64. +func sizeSint64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendSint64PackedSliceValue encodes a []int64 value as a packed repeated Sint64. +func appendSint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) + } + return b, nil +} + +var coderSint64PackedSliceValue = valueCoderFuncs{ + size: sizeSint64PackedSliceValue, + marshal: appendSint64PackedSliceValue, + unmarshal: consumeSint64SliceValue, + merge: mergeListValue, +} + +// sizeUint64 returns the size of wire encoding a uint64 pointer as a Uint64. +func sizeUint64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Uint64() + return f.tagsize + protowire.SizeVarint(v) +} + +// appendUint64 wire encodes a uint64 pointer as a Uint64. +func appendUint64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Uint64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, v) + return b, nil +} + +// consumeUint64 wire decodes a uint64 pointer as a Uint64. +func consumeUint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *p.Uint64() = v + out.n = n + return out, nil +} + +var coderUint64 = pointerCoderFuncs{ + size: sizeUint64, + marshal: appendUint64, + unmarshal: consumeUint64, + merge: mergeUint64, +} + +// sizeUint64NoZero returns the size of wire encoding a uint64 pointer as a Uint64. +// The zero value is not encoded. +func sizeUint64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Uint64() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(v) +} + +// appendUint64NoZero wire encodes a uint64 pointer as a Uint64. +// The zero value is not encoded. +func appendUint64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Uint64() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, v) + return b, nil +} + +var coderUint64NoZero = pointerCoderFuncs{ + size: sizeUint64NoZero, + marshal: appendUint64NoZero, + unmarshal: consumeUint64, + merge: mergeUint64NoZero, +} + +// sizeUint64Ptr returns the size of wire encoding a *uint64 pointer as a Uint64. +// It panics if the pointer is nil. +func sizeUint64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := **p.Uint64Ptr() + return f.tagsize + protowire.SizeVarint(v) +} + +// appendUint64Ptr wire encodes a *uint64 pointer as a Uint64. +// It panics if the pointer is nil. +func appendUint64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Uint64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, v) + return b, nil +} + +// consumeUint64Ptr wire decodes a *uint64 pointer as a Uint64. +func consumeUint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + vp := p.Uint64Ptr() + if *vp == nil { + *vp = new(uint64) + } + **vp = v + out.n = n + return out, nil +} + +var coderUint64Ptr = pointerCoderFuncs{ + size: sizeUint64Ptr, + marshal: appendUint64Ptr, + unmarshal: consumeUint64Ptr, + merge: mergeUint64Ptr, +} + +// sizeUint64Slice returns the size of wire encoding a []uint64 pointer as a repeated Uint64. +func sizeUint64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Uint64Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(v) + } + return size +} + +// appendUint64Slice encodes a []uint64 pointer as a repeated Uint64. +func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Uint64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, v) + } + return b, nil +} + +// consumeUint64Slice wire decodes a []uint64 pointer as a repeated Uint64. +func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Uint64Slice() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growUint64Slice(count) + } + s := *sp + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + s = append(s, v) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *sp = append(*sp, v) + out.n = n + return out, nil +} + +var coderUint64Slice = pointerCoderFuncs{ + size: sizeUint64Slice, + marshal: appendUint64Slice, + unmarshal: consumeUint64Slice, + merge: mergeUint64Slice, +} + +// sizeUint64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Uint64. +func sizeUint64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Uint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(v) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendUint64PackedSlice encodes a []uint64 pointer as a packed repeated Uint64. +func appendUint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Uint64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(v) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, v) + } + return b, nil +} + +var coderUint64PackedSlice = pointerCoderFuncs{ + size: sizeUint64PackedSlice, + marshal: appendUint64PackedSlice, + unmarshal: consumeUint64Slice, + merge: mergeUint64Slice, +} + +// sizeUint64Value returns the size of wire encoding a uint64 value as a Uint64. +func sizeUint64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeVarint(v.Uint()) +} + +// appendUint64Value encodes a uint64 value as a Uint64. +func appendUint64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, v.Uint()) + return b, nil +} + +// consumeUint64Value decodes a uint64 value as a Uint64. +func consumeUint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfUint64(v), out, nil +} + +var coderUint64Value = valueCoderFuncs{ + size: sizeUint64Value, + marshal: appendUint64Value, + unmarshal: consumeUint64Value, + merge: mergeScalarValue, +} + +// sizeUint64SliceValue returns the size of wire encoding a []uint64 value as a repeated Uint64. +func sizeUint64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(v.Uint()) + } + return size +} + +// appendUint64SliceValue encodes a []uint64 value as a repeated Uint64. +func appendUint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, v.Uint()) + } + return b, nil +} + +// consumeUint64SliceValue wire decodes a []uint64 value as a repeated Uint64. +func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfUint64(v)) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfUint64(v)) + out.n = n + return listv, out, nil +} + +var coderUint64SliceValue = valueCoderFuncs{ + size: sizeUint64SliceValue, + marshal: appendUint64SliceValue, + unmarshal: consumeUint64SliceValue, + merge: mergeListValue, +} + +// sizeUint64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Uint64. +func sizeUint64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(v.Uint()) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendUint64PackedSliceValue encodes a []uint64 value as a packed repeated Uint64. +func appendUint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(v.Uint()) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, v.Uint()) + } + return b, nil +} + +var coderUint64PackedSliceValue = valueCoderFuncs{ + size: sizeUint64PackedSliceValue, + marshal: appendUint64PackedSliceValue, + unmarshal: consumeUint64SliceValue, + merge: mergeListValue, +} + +// sizeSfixed32 returns the size of wire encoding a int32 pointer as a Sfixed32. +func sizeSfixed32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed32() +} + +// appendSfixed32 wire encodes a int32 pointer as a Sfixed32. +func appendSfixed32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, uint32(v)) + return b, nil +} + +// consumeSfixed32 wire decodes a int32 pointer as a Sfixed32. +func consumeSfixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + *p.Int32() = int32(v) + out.n = n + return out, nil +} + +var coderSfixed32 = pointerCoderFuncs{ + size: sizeSfixed32, + marshal: appendSfixed32, + unmarshal: consumeSfixed32, + merge: mergeInt32, +} + +// sizeSfixed32NoZero returns the size of wire encoding a int32 pointer as a Sfixed32. +// The zero value is not encoded. +func sizeSfixed32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Int32() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeFixed32() +} + +// appendSfixed32NoZero wire encodes a int32 pointer as a Sfixed32. +// The zero value is not encoded. +func appendSfixed32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int32() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, uint32(v)) + return b, nil +} + +var coderSfixed32NoZero = pointerCoderFuncs{ + size: sizeSfixed32NoZero, + marshal: appendSfixed32NoZero, + unmarshal: consumeSfixed32, + merge: mergeInt32NoZero, +} + +// sizeSfixed32Ptr returns the size of wire encoding a *int32 pointer as a Sfixed32. +// It panics if the pointer is nil. +func sizeSfixed32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed32() +} + +// appendSfixed32Ptr wire encodes a *int32 pointer as a Sfixed32. +// It panics if the pointer is nil. +func appendSfixed32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Int32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, uint32(v)) + return b, nil +} + +// consumeSfixed32Ptr wire decodes a *int32 pointer as a Sfixed32. +func consumeSfixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + vp := p.Int32Ptr() + if *vp == nil { + *vp = new(int32) + } + **vp = int32(v) + out.n = n + return out, nil +} + +var coderSfixed32Ptr = pointerCoderFuncs{ + size: sizeSfixed32Ptr, + marshal: appendSfixed32Ptr, + unmarshal: consumeSfixed32Ptr, + merge: mergeInt32Ptr, +} + +// sizeSfixed32Slice returns the size of wire encoding a []int32 pointer as a repeated Sfixed32. +func sizeSfixed32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int32Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed32()) + return size +} + +// appendSfixed32Slice encodes a []int32 pointer as a repeated Sfixed32. +func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, uint32(v)) + } + return b, nil +} + +// consumeSfixed32Slice wire decodes a []int32 pointer as a repeated Sfixed32. +func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int32Slice() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + count := len(b) / protowire.SizeFixed32() + if count > 0 { + p.growInt32Slice(count) + } + s := *sp + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + s = append(s, int32(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + *sp = append(*sp, int32(v)) + out.n = n + return out, nil +} + +var coderSfixed32Slice = pointerCoderFuncs{ + size: sizeSfixed32Slice, + marshal: appendSfixed32Slice, + unmarshal: consumeSfixed32Slice, + merge: mergeInt32Slice, +} + +// sizeSfixed32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sfixed32. +func sizeSfixed32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int32Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed32() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendSfixed32PackedSlice encodes a []int32 pointer as a packed repeated Sfixed32. +func appendSfixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed32(b, uint32(v)) + } + return b, nil +} + +var coderSfixed32PackedSlice = pointerCoderFuncs{ + size: sizeSfixed32PackedSlice, + marshal: appendSfixed32PackedSlice, + unmarshal: consumeSfixed32Slice, + merge: mergeInt32Slice, +} + +// sizeSfixed32Value returns the size of wire encoding a int32 value as a Sfixed32. +func sizeSfixed32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeFixed32() +} + +// appendSfixed32Value encodes a int32 value as a Sfixed32. +func appendSfixed32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, uint32(v.Int())) + return b, nil +} + +// consumeSfixed32Value decodes a int32 value as a Sfixed32. +func consumeSfixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfInt32(int32(v)), out, nil +} + +var coderSfixed32Value = valueCoderFuncs{ + size: sizeSfixed32Value, + marshal: appendSfixed32Value, + unmarshal: consumeSfixed32Value, + merge: mergeScalarValue, +} + +// sizeSfixed32SliceValue returns the size of wire encoding a []int32 value as a repeated Sfixed32. +func sizeSfixed32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed32()) + return size +} + +// appendSfixed32SliceValue encodes a []int32 value as a repeated Sfixed32. +func appendSfixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, uint32(v.Int())) + } + return b, nil +} + +// consumeSfixed32SliceValue wire decodes a []int32 value as a repeated Sfixed32. +func consumeSfixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + out.n = n + return listv, out, nil +} + +var coderSfixed32SliceValue = valueCoderFuncs{ + size: sizeSfixed32SliceValue, + marshal: appendSfixed32SliceValue, + unmarshal: consumeSfixed32SliceValue, + merge: mergeListValue, +} + +// sizeSfixed32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sfixed32. +func sizeSfixed32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed32() + return tagsize + protowire.SizeBytes(n) +} + +// appendSfixed32PackedSliceValue encodes a []int32 value as a packed repeated Sfixed32. +func appendSfixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed32(b, uint32(v.Int())) + } + return b, nil +} + +var coderSfixed32PackedSliceValue = valueCoderFuncs{ + size: sizeSfixed32PackedSliceValue, + marshal: appendSfixed32PackedSliceValue, + unmarshal: consumeSfixed32SliceValue, + merge: mergeListValue, +} + +// sizeFixed32 returns the size of wire encoding a uint32 pointer as a Fixed32. +func sizeFixed32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed32() +} + +// appendFixed32 wire encodes a uint32 pointer as a Fixed32. +func appendFixed32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Uint32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, v) + return b, nil +} + +// consumeFixed32 wire decodes a uint32 pointer as a Fixed32. +func consumeFixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + *p.Uint32() = v + out.n = n + return out, nil +} + +var coderFixed32 = pointerCoderFuncs{ + size: sizeFixed32, + marshal: appendFixed32, + unmarshal: consumeFixed32, + merge: mergeUint32, +} + +// sizeFixed32NoZero returns the size of wire encoding a uint32 pointer as a Fixed32. +// The zero value is not encoded. +func sizeFixed32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Uint32() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeFixed32() +} + +// appendFixed32NoZero wire encodes a uint32 pointer as a Fixed32. +// The zero value is not encoded. +func appendFixed32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Uint32() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, v) + return b, nil +} + +var coderFixed32NoZero = pointerCoderFuncs{ + size: sizeFixed32NoZero, + marshal: appendFixed32NoZero, + unmarshal: consumeFixed32, + merge: mergeUint32NoZero, +} + +// sizeFixed32Ptr returns the size of wire encoding a *uint32 pointer as a Fixed32. +// It panics if the pointer is nil. +func sizeFixed32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed32() +} + +// appendFixed32Ptr wire encodes a *uint32 pointer as a Fixed32. +// It panics if the pointer is nil. +func appendFixed32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Uint32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, v) + return b, nil +} + +// consumeFixed32Ptr wire decodes a *uint32 pointer as a Fixed32. +func consumeFixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + vp := p.Uint32Ptr() + if *vp == nil { + *vp = new(uint32) + } + **vp = v + out.n = n + return out, nil +} + +var coderFixed32Ptr = pointerCoderFuncs{ + size: sizeFixed32Ptr, + marshal: appendFixed32Ptr, + unmarshal: consumeFixed32Ptr, + merge: mergeUint32Ptr, +} + +// sizeFixed32Slice returns the size of wire encoding a []uint32 pointer as a repeated Fixed32. +func sizeFixed32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Uint32Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed32()) + return size +} + +// appendFixed32Slice encodes a []uint32 pointer as a repeated Fixed32. +func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Uint32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, v) + } + return b, nil +} + +// consumeFixed32Slice wire decodes a []uint32 pointer as a repeated Fixed32. +func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Uint32Slice() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + count := len(b) / protowire.SizeFixed32() + if count > 0 { + p.growUint32Slice(count) + } + s := *sp + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + s = append(s, v) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + *sp = append(*sp, v) + out.n = n + return out, nil +} + +var coderFixed32Slice = pointerCoderFuncs{ + size: sizeFixed32Slice, + marshal: appendFixed32Slice, + unmarshal: consumeFixed32Slice, + merge: mergeUint32Slice, +} + +// sizeFixed32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Fixed32. +func sizeFixed32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Uint32Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed32() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendFixed32PackedSlice encodes a []uint32 pointer as a packed repeated Fixed32. +func appendFixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Uint32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed32(b, v) + } + return b, nil +} + +var coderFixed32PackedSlice = pointerCoderFuncs{ + size: sizeFixed32PackedSlice, + marshal: appendFixed32PackedSlice, + unmarshal: consumeFixed32Slice, + merge: mergeUint32Slice, +} + +// sizeFixed32Value returns the size of wire encoding a uint32 value as a Fixed32. +func sizeFixed32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeFixed32() +} + +// appendFixed32Value encodes a uint32 value as a Fixed32. +func appendFixed32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, uint32(v.Uint())) + return b, nil +} + +// consumeFixed32Value decodes a uint32 value as a Fixed32. +func consumeFixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfUint32(uint32(v)), out, nil +} + +var coderFixed32Value = valueCoderFuncs{ + size: sizeFixed32Value, + marshal: appendFixed32Value, + unmarshal: consumeFixed32Value, + merge: mergeScalarValue, +} + +// sizeFixed32SliceValue returns the size of wire encoding a []uint32 value as a repeated Fixed32. +func sizeFixed32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed32()) + return size +} + +// appendFixed32SliceValue encodes a []uint32 value as a repeated Fixed32. +func appendFixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, uint32(v.Uint())) + } + return b, nil +} + +// consumeFixed32SliceValue wire decodes a []uint32 value as a repeated Fixed32. +func consumeFixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + out.n = n + return listv, out, nil +} + +var coderFixed32SliceValue = valueCoderFuncs{ + size: sizeFixed32SliceValue, + marshal: appendFixed32SliceValue, + unmarshal: consumeFixed32SliceValue, + merge: mergeListValue, +} + +// sizeFixed32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Fixed32. +func sizeFixed32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed32() + return tagsize + protowire.SizeBytes(n) +} + +// appendFixed32PackedSliceValue encodes a []uint32 value as a packed repeated Fixed32. +func appendFixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed32(b, uint32(v.Uint())) + } + return b, nil +} + +var coderFixed32PackedSliceValue = valueCoderFuncs{ + size: sizeFixed32PackedSliceValue, + marshal: appendFixed32PackedSliceValue, + unmarshal: consumeFixed32SliceValue, + merge: mergeListValue, +} + +// sizeFloat returns the size of wire encoding a float32 pointer as a Float. +func sizeFloat(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed32() +} + +// appendFloat wire encodes a float32 pointer as a Float. +func appendFloat(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Float32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(v)) + return b, nil +} + +// consumeFloat wire decodes a float32 pointer as a Float. +func consumeFloat(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + *p.Float32() = math.Float32frombits(v) + out.n = n + return out, nil +} + +var coderFloat = pointerCoderFuncs{ + size: sizeFloat, + marshal: appendFloat, + unmarshal: consumeFloat, + merge: mergeFloat32, +} + +// sizeFloatNoZero returns the size of wire encoding a float32 pointer as a Float. +// The zero value is not encoded. +func sizeFloatNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Float32() + if v == 0 && !math.Signbit(float64(v)) { + return 0 + } + return f.tagsize + protowire.SizeFixed32() +} + +// appendFloatNoZero wire encodes a float32 pointer as a Float. +// The zero value is not encoded. +func appendFloatNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Float32() + if v == 0 && !math.Signbit(float64(v)) { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(v)) + return b, nil +} + +var coderFloatNoZero = pointerCoderFuncs{ + size: sizeFloatNoZero, + marshal: appendFloatNoZero, + unmarshal: consumeFloat, + merge: mergeFloat32NoZero, +} + +// sizeFloatPtr returns the size of wire encoding a *float32 pointer as a Float. +// It panics if the pointer is nil. +func sizeFloatPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed32() +} + +// appendFloatPtr wire encodes a *float32 pointer as a Float. +// It panics if the pointer is nil. +func appendFloatPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Float32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(v)) + return b, nil +} + +// consumeFloatPtr wire decodes a *float32 pointer as a Float. +func consumeFloatPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + vp := p.Float32Ptr() + if *vp == nil { + *vp = new(float32) + } + **vp = math.Float32frombits(v) + out.n = n + return out, nil +} + +var coderFloatPtr = pointerCoderFuncs{ + size: sizeFloatPtr, + marshal: appendFloatPtr, + unmarshal: consumeFloatPtr, + merge: mergeFloat32Ptr, +} + +// sizeFloatSlice returns the size of wire encoding a []float32 pointer as a repeated Float. +func sizeFloatSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Float32Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed32()) + return size +} + +// appendFloatSlice encodes a []float32 pointer as a repeated Float. +func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Float32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(v)) + } + return b, nil +} + +// consumeFloatSlice wire decodes a []float32 pointer as a repeated Float. +func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Float32Slice() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + count := len(b) / protowire.SizeFixed32() + if count > 0 { + p.growFloat32Slice(count) + } + s := *sp + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + s = append(s, math.Float32frombits(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + *sp = append(*sp, math.Float32frombits(v)) + out.n = n + return out, nil +} + +var coderFloatSlice = pointerCoderFuncs{ + size: sizeFloatSlice, + marshal: appendFloatSlice, + unmarshal: consumeFloatSlice, + merge: mergeFloat32Slice, +} + +// sizeFloatPackedSlice returns the size of wire encoding a []float32 pointer as a packed repeated Float. +func sizeFloatPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Float32Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed32() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendFloatPackedSlice encodes a []float32 pointer as a packed repeated Float. +func appendFloatPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Float32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed32(b, math.Float32bits(v)) + } + return b, nil +} + +var coderFloatPackedSlice = pointerCoderFuncs{ + size: sizeFloatPackedSlice, + marshal: appendFloatPackedSlice, + unmarshal: consumeFloatSlice, + merge: mergeFloat32Slice, +} + +// sizeFloatValue returns the size of wire encoding a float32 value as a Float. +func sizeFloatValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeFixed32() +} + +// appendFloatValue encodes a float32 value as a Float. +func appendFloatValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) + return b, nil +} + +// consumeFloatValue decodes a float32 value as a Float. +func consumeFloatValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), out, nil +} + +var coderFloatValue = valueCoderFuncs{ + size: sizeFloatValue, + marshal: appendFloatValue, + unmarshal: consumeFloatValue, + merge: mergeScalarValue, +} + +// sizeFloatSliceValue returns the size of wire encoding a []float32 value as a repeated Float. +func sizeFloatSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed32()) + return size +} + +// appendFloatSliceValue encodes a []float32 value as a repeated Float. +func appendFloatSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) + } + return b, nil +} + +// consumeFloatSliceValue wire decodes a []float32 value as a repeated Float. +func consumeFloatSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) + out.n = n + return listv, out, nil +} + +var coderFloatSliceValue = valueCoderFuncs{ + size: sizeFloatSliceValue, + marshal: appendFloatSliceValue, + unmarshal: consumeFloatSliceValue, + merge: mergeListValue, +} + +// sizeFloatPackedSliceValue returns the size of wire encoding a []float32 value as a packed repeated Float. +func sizeFloatPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed32() + return tagsize + protowire.SizeBytes(n) +} + +// appendFloatPackedSliceValue encodes a []float32 value as a packed repeated Float. +func appendFloatPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) + } + return b, nil +} + +var coderFloatPackedSliceValue = valueCoderFuncs{ + size: sizeFloatPackedSliceValue, + marshal: appendFloatPackedSliceValue, + unmarshal: consumeFloatSliceValue, + merge: mergeListValue, +} + +// sizeSfixed64 returns the size of wire encoding a int64 pointer as a Sfixed64. +func sizeSfixed64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed64() +} + +// appendSfixed64 wire encodes a int64 pointer as a Sfixed64. +func appendSfixed64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, uint64(v)) + return b, nil +} + +// consumeSfixed64 wire decodes a int64 pointer as a Sfixed64. +func consumeSfixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + *p.Int64() = int64(v) + out.n = n + return out, nil +} + +var coderSfixed64 = pointerCoderFuncs{ + size: sizeSfixed64, + marshal: appendSfixed64, + unmarshal: consumeSfixed64, + merge: mergeInt64, +} + +// sizeSfixed64NoZero returns the size of wire encoding a int64 pointer as a Sfixed64. +// The zero value is not encoded. +func sizeSfixed64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Int64() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeFixed64() +} + +// appendSfixed64NoZero wire encodes a int64 pointer as a Sfixed64. +// The zero value is not encoded. +func appendSfixed64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int64() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, uint64(v)) + return b, nil +} + +var coderSfixed64NoZero = pointerCoderFuncs{ + size: sizeSfixed64NoZero, + marshal: appendSfixed64NoZero, + unmarshal: consumeSfixed64, + merge: mergeInt64NoZero, +} + +// sizeSfixed64Ptr returns the size of wire encoding a *int64 pointer as a Sfixed64. +// It panics if the pointer is nil. +func sizeSfixed64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed64() +} + +// appendSfixed64Ptr wire encodes a *int64 pointer as a Sfixed64. +// It panics if the pointer is nil. +func appendSfixed64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Int64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, uint64(v)) + return b, nil +} + +// consumeSfixed64Ptr wire decodes a *int64 pointer as a Sfixed64. +func consumeSfixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + vp := p.Int64Ptr() + if *vp == nil { + *vp = new(int64) + } + **vp = int64(v) + out.n = n + return out, nil +} + +var coderSfixed64Ptr = pointerCoderFuncs{ + size: sizeSfixed64Ptr, + marshal: appendSfixed64Ptr, + unmarshal: consumeSfixed64Ptr, + merge: mergeInt64Ptr, +} + +// sizeSfixed64Slice returns the size of wire encoding a []int64 pointer as a repeated Sfixed64. +func sizeSfixed64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int64Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed64()) + return size +} + +// appendSfixed64Slice encodes a []int64 pointer as a repeated Sfixed64. +func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, uint64(v)) + } + return b, nil +} + +// consumeSfixed64Slice wire decodes a []int64 pointer as a repeated Sfixed64. +func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int64Slice() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + count := len(b) / protowire.SizeFixed64() + if count > 0 { + p.growInt64Slice(count) + } + s := *sp + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + s = append(s, int64(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + *sp = append(*sp, int64(v)) + out.n = n + return out, nil +} + +var coderSfixed64Slice = pointerCoderFuncs{ + size: sizeSfixed64Slice, + marshal: appendSfixed64Slice, + unmarshal: consumeSfixed64Slice, + merge: mergeInt64Slice, +} + +// sizeSfixed64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sfixed64. +func sizeSfixed64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int64Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed64() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendSfixed64PackedSlice encodes a []int64 pointer as a packed repeated Sfixed64. +func appendSfixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed64(b, uint64(v)) + } + return b, nil +} + +var coderSfixed64PackedSlice = pointerCoderFuncs{ + size: sizeSfixed64PackedSlice, + marshal: appendSfixed64PackedSlice, + unmarshal: consumeSfixed64Slice, + merge: mergeInt64Slice, +} + +// sizeSfixed64Value returns the size of wire encoding a int64 value as a Sfixed64. +func sizeSfixed64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeFixed64() +} + +// appendSfixed64Value encodes a int64 value as a Sfixed64. +func appendSfixed64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, uint64(v.Int())) + return b, nil +} + +// consumeSfixed64Value decodes a int64 value as a Sfixed64. +func consumeSfixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfInt64(int64(v)), out, nil +} + +var coderSfixed64Value = valueCoderFuncs{ + size: sizeSfixed64Value, + marshal: appendSfixed64Value, + unmarshal: consumeSfixed64Value, + merge: mergeScalarValue, +} + +// sizeSfixed64SliceValue returns the size of wire encoding a []int64 value as a repeated Sfixed64. +func sizeSfixed64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed64()) + return size +} + +// appendSfixed64SliceValue encodes a []int64 value as a repeated Sfixed64. +func appendSfixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, uint64(v.Int())) + } + return b, nil +} + +// consumeSfixed64SliceValue wire decodes a []int64 value as a repeated Sfixed64. +func consumeSfixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + out.n = n + return listv, out, nil +} + +var coderSfixed64SliceValue = valueCoderFuncs{ + size: sizeSfixed64SliceValue, + marshal: appendSfixed64SliceValue, + unmarshal: consumeSfixed64SliceValue, + merge: mergeListValue, +} + +// sizeSfixed64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sfixed64. +func sizeSfixed64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed64() + return tagsize + protowire.SizeBytes(n) +} + +// appendSfixed64PackedSliceValue encodes a []int64 value as a packed repeated Sfixed64. +func appendSfixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed64(b, uint64(v.Int())) + } + return b, nil +} + +var coderSfixed64PackedSliceValue = valueCoderFuncs{ + size: sizeSfixed64PackedSliceValue, + marshal: appendSfixed64PackedSliceValue, + unmarshal: consumeSfixed64SliceValue, + merge: mergeListValue, +} + +// sizeFixed64 returns the size of wire encoding a uint64 pointer as a Fixed64. +func sizeFixed64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed64() +} + +// appendFixed64 wire encodes a uint64 pointer as a Fixed64. +func appendFixed64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Uint64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, v) + return b, nil +} + +// consumeFixed64 wire decodes a uint64 pointer as a Fixed64. +func consumeFixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + *p.Uint64() = v + out.n = n + return out, nil +} + +var coderFixed64 = pointerCoderFuncs{ + size: sizeFixed64, + marshal: appendFixed64, + unmarshal: consumeFixed64, + merge: mergeUint64, +} + +// sizeFixed64NoZero returns the size of wire encoding a uint64 pointer as a Fixed64. +// The zero value is not encoded. +func sizeFixed64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Uint64() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeFixed64() +} + +// appendFixed64NoZero wire encodes a uint64 pointer as a Fixed64. +// The zero value is not encoded. +func appendFixed64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Uint64() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, v) + return b, nil +} + +var coderFixed64NoZero = pointerCoderFuncs{ + size: sizeFixed64NoZero, + marshal: appendFixed64NoZero, + unmarshal: consumeFixed64, + merge: mergeUint64NoZero, +} + +// sizeFixed64Ptr returns the size of wire encoding a *uint64 pointer as a Fixed64. +// It panics if the pointer is nil. +func sizeFixed64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed64() +} + +// appendFixed64Ptr wire encodes a *uint64 pointer as a Fixed64. +// It panics if the pointer is nil. +func appendFixed64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Uint64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, v) + return b, nil +} + +// consumeFixed64Ptr wire decodes a *uint64 pointer as a Fixed64. +func consumeFixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + vp := p.Uint64Ptr() + if *vp == nil { + *vp = new(uint64) + } + **vp = v + out.n = n + return out, nil +} + +var coderFixed64Ptr = pointerCoderFuncs{ + size: sizeFixed64Ptr, + marshal: appendFixed64Ptr, + unmarshal: consumeFixed64Ptr, + merge: mergeUint64Ptr, +} + +// sizeFixed64Slice returns the size of wire encoding a []uint64 pointer as a repeated Fixed64. +func sizeFixed64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Uint64Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed64()) + return size +} + +// appendFixed64Slice encodes a []uint64 pointer as a repeated Fixed64. +func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Uint64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, v) + } + return b, nil +} + +// consumeFixed64Slice wire decodes a []uint64 pointer as a repeated Fixed64. +func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Uint64Slice() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + count := len(b) / protowire.SizeFixed64() + if count > 0 { + p.growUint64Slice(count) + } + s := *sp + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + s = append(s, v) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + *sp = append(*sp, v) + out.n = n + return out, nil +} + +var coderFixed64Slice = pointerCoderFuncs{ + size: sizeFixed64Slice, + marshal: appendFixed64Slice, + unmarshal: consumeFixed64Slice, + merge: mergeUint64Slice, +} + +// sizeFixed64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Fixed64. +func sizeFixed64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Uint64Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed64() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendFixed64PackedSlice encodes a []uint64 pointer as a packed repeated Fixed64. +func appendFixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Uint64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed64(b, v) + } + return b, nil +} + +var coderFixed64PackedSlice = pointerCoderFuncs{ + size: sizeFixed64PackedSlice, + marshal: appendFixed64PackedSlice, + unmarshal: consumeFixed64Slice, + merge: mergeUint64Slice, +} + +// sizeFixed64Value returns the size of wire encoding a uint64 value as a Fixed64. +func sizeFixed64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeFixed64() +} + +// appendFixed64Value encodes a uint64 value as a Fixed64. +func appendFixed64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, v.Uint()) + return b, nil +} + +// consumeFixed64Value decodes a uint64 value as a Fixed64. +func consumeFixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfUint64(v), out, nil +} + +var coderFixed64Value = valueCoderFuncs{ + size: sizeFixed64Value, + marshal: appendFixed64Value, + unmarshal: consumeFixed64Value, + merge: mergeScalarValue, +} + +// sizeFixed64SliceValue returns the size of wire encoding a []uint64 value as a repeated Fixed64. +func sizeFixed64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed64()) + return size +} + +// appendFixed64SliceValue encodes a []uint64 value as a repeated Fixed64. +func appendFixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, v.Uint()) + } + return b, nil +} + +// consumeFixed64SliceValue wire decodes a []uint64 value as a repeated Fixed64. +func consumeFixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfUint64(v)) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfUint64(v)) + out.n = n + return listv, out, nil +} + +var coderFixed64SliceValue = valueCoderFuncs{ + size: sizeFixed64SliceValue, + marshal: appendFixed64SliceValue, + unmarshal: consumeFixed64SliceValue, + merge: mergeListValue, +} + +// sizeFixed64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Fixed64. +func sizeFixed64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed64() + return tagsize + protowire.SizeBytes(n) +} + +// appendFixed64PackedSliceValue encodes a []uint64 value as a packed repeated Fixed64. +func appendFixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed64(b, v.Uint()) + } + return b, nil +} + +var coderFixed64PackedSliceValue = valueCoderFuncs{ + size: sizeFixed64PackedSliceValue, + marshal: appendFixed64PackedSliceValue, + unmarshal: consumeFixed64SliceValue, + merge: mergeListValue, +} + +// sizeDouble returns the size of wire encoding a float64 pointer as a Double. +func sizeDouble(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed64() +} + +// appendDouble wire encodes a float64 pointer as a Double. +func appendDouble(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Float64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v)) + return b, nil +} + +// consumeDouble wire decodes a float64 pointer as a Double. +func consumeDouble(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + *p.Float64() = math.Float64frombits(v) + out.n = n + return out, nil +} + +var coderDouble = pointerCoderFuncs{ + size: sizeDouble, + marshal: appendDouble, + unmarshal: consumeDouble, + merge: mergeFloat64, +} + +// sizeDoubleNoZero returns the size of wire encoding a float64 pointer as a Double. +// The zero value is not encoded. +func sizeDoubleNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Float64() + if v == 0 && !math.Signbit(float64(v)) { + return 0 + } + return f.tagsize + protowire.SizeFixed64() +} + +// appendDoubleNoZero wire encodes a float64 pointer as a Double. +// The zero value is not encoded. +func appendDoubleNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Float64() + if v == 0 && !math.Signbit(float64(v)) { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v)) + return b, nil +} + +var coderDoubleNoZero = pointerCoderFuncs{ + size: sizeDoubleNoZero, + marshal: appendDoubleNoZero, + unmarshal: consumeDouble, + merge: mergeFloat64NoZero, +} + +// sizeDoublePtr returns the size of wire encoding a *float64 pointer as a Double. +// It panics if the pointer is nil. +func sizeDoublePtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed64() +} + +// appendDoublePtr wire encodes a *float64 pointer as a Double. +// It panics if the pointer is nil. +func appendDoublePtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Float64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v)) + return b, nil +} + +// consumeDoublePtr wire decodes a *float64 pointer as a Double. +func consumeDoublePtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + vp := p.Float64Ptr() + if *vp == nil { + *vp = new(float64) + } + **vp = math.Float64frombits(v) + out.n = n + return out, nil +} + +var coderDoublePtr = pointerCoderFuncs{ + size: sizeDoublePtr, + marshal: appendDoublePtr, + unmarshal: consumeDoublePtr, + merge: mergeFloat64Ptr, +} + +// sizeDoubleSlice returns the size of wire encoding a []float64 pointer as a repeated Double. +func sizeDoubleSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Float64Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed64()) + return size +} + +// appendDoubleSlice encodes a []float64 pointer as a repeated Double. +func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Float64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v)) + } + return b, nil +} + +// consumeDoubleSlice wire decodes a []float64 pointer as a repeated Double. +func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Float64Slice() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + count := len(b) / protowire.SizeFixed64() + if count > 0 { + p.growFloat64Slice(count) + } + s := *sp + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + s = append(s, math.Float64frombits(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + *sp = append(*sp, math.Float64frombits(v)) + out.n = n + return out, nil +} + +var coderDoubleSlice = pointerCoderFuncs{ + size: sizeDoubleSlice, + marshal: appendDoubleSlice, + unmarshal: consumeDoubleSlice, + merge: mergeFloat64Slice, +} + +// sizeDoublePackedSlice returns the size of wire encoding a []float64 pointer as a packed repeated Double. +func sizeDoublePackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Float64Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed64() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendDoublePackedSlice encodes a []float64 pointer as a packed repeated Double. +func appendDoublePackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Float64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed64(b, math.Float64bits(v)) + } + return b, nil +} + +var coderDoublePackedSlice = pointerCoderFuncs{ + size: sizeDoublePackedSlice, + marshal: appendDoublePackedSlice, + unmarshal: consumeDoubleSlice, + merge: mergeFloat64Slice, +} + +// sizeDoubleValue returns the size of wire encoding a float64 value as a Double. +func sizeDoubleValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeFixed64() +} + +// appendDoubleValue encodes a float64 value as a Double. +func appendDoubleValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) + return b, nil +} + +// consumeDoubleValue decodes a float64 value as a Double. +func consumeDoubleValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfFloat64(math.Float64frombits(v)), out, nil +} + +var coderDoubleValue = valueCoderFuncs{ + size: sizeDoubleValue, + marshal: appendDoubleValue, + unmarshal: consumeDoubleValue, + merge: mergeScalarValue, +} + +// sizeDoubleSliceValue returns the size of wire encoding a []float64 value as a repeated Double. +func sizeDoubleSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed64()) + return size +} + +// appendDoubleSliceValue encodes a []float64 value as a repeated Double. +func appendDoubleSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) + } + return b, nil +} + +// consumeDoubleSliceValue wire decodes a []float64 value as a repeated Double. +func consumeDoubleSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) + out.n = n + return listv, out, nil +} + +var coderDoubleSliceValue = valueCoderFuncs{ + size: sizeDoubleSliceValue, + marshal: appendDoubleSliceValue, + unmarshal: consumeDoubleSliceValue, + merge: mergeListValue, +} + +// sizeDoublePackedSliceValue returns the size of wire encoding a []float64 value as a packed repeated Double. +func sizeDoublePackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed64() + return tagsize + protowire.SizeBytes(n) +} + +// appendDoublePackedSliceValue encodes a []float64 value as a packed repeated Double. +func appendDoublePackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) + } + return b, nil +} + +var coderDoublePackedSliceValue = valueCoderFuncs{ + size: sizeDoublePackedSliceValue, + marshal: appendDoublePackedSliceValue, + unmarshal: consumeDoubleSliceValue, + merge: mergeListValue, +} + +// sizeString returns the size of wire encoding a string pointer as a String. +func sizeString(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.String() + return f.tagsize + protowire.SizeBytes(len(v)) +} + +// appendString wire encodes a string pointer as a String. +func appendString(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.String() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + return b, nil +} + +// consumeString wire decodes a string pointer as a String. +func consumeString(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + *p.String() = string(v) + out.n = n + return out, nil +} + +var coderString = pointerCoderFuncs{ + size: sizeString, + marshal: appendString, + unmarshal: consumeString, + merge: mergeString, +} + +// appendStringValidateUTF8 wire encodes a string pointer as a String. +func appendStringValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.String() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + if !utf8.ValidString(v) { + return b, errInvalidUTF8{} + } + return b, nil +} + +// consumeStringValidateUTF8 wire decodes a string pointer as a String. +func consumeStringValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + if !utf8.Valid(v) { + return out, errInvalidUTF8{} + } + *p.String() = string(v) + out.n = n + return out, nil +} + +var coderStringValidateUTF8 = pointerCoderFuncs{ + size: sizeString, + marshal: appendStringValidateUTF8, + unmarshal: consumeStringValidateUTF8, + merge: mergeString, +} + +// sizeStringNoZero returns the size of wire encoding a string pointer as a String. +// The zero value is not encoded. +func sizeStringNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.String() + if len(v) == 0 { + return 0 + } + return f.tagsize + protowire.SizeBytes(len(v)) +} + +// appendStringNoZero wire encodes a string pointer as a String. +// The zero value is not encoded. +func appendStringNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.String() + if len(v) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + return b, nil +} + +var coderStringNoZero = pointerCoderFuncs{ + size: sizeStringNoZero, + marshal: appendStringNoZero, + unmarshal: consumeString, + merge: mergeStringNoZero, +} + +// appendStringNoZeroValidateUTF8 wire encodes a string pointer as a String. +// The zero value is not encoded. +func appendStringNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.String() + if len(v) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + if !utf8.ValidString(v) { + return b, errInvalidUTF8{} + } + return b, nil +} + +var coderStringNoZeroValidateUTF8 = pointerCoderFuncs{ + size: sizeStringNoZero, + marshal: appendStringNoZeroValidateUTF8, + unmarshal: consumeStringValidateUTF8, + merge: mergeStringNoZero, +} + +// sizeStringPtr returns the size of wire encoding a *string pointer as a String. +// It panics if the pointer is nil. +func sizeStringPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := **p.StringPtr() + return f.tagsize + protowire.SizeBytes(len(v)) +} + +// appendStringPtr wire encodes a *string pointer as a String. +// It panics if the pointer is nil. +func appendStringPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.StringPtr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + return b, nil +} + +// consumeStringPtr wire decodes a *string pointer as a String. +func consumeStringPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + vp := p.StringPtr() + if *vp == nil { + *vp = new(string) + } + **vp = string(v) + out.n = n + return out, nil +} + +var coderStringPtr = pointerCoderFuncs{ + size: sizeStringPtr, + marshal: appendStringPtr, + unmarshal: consumeStringPtr, + merge: mergeStringPtr, +} + +// appendStringPtrValidateUTF8 wire encodes a *string pointer as a String. +// It panics if the pointer is nil. +func appendStringPtrValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.StringPtr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + if !utf8.ValidString(v) { + return b, errInvalidUTF8{} + } + return b, nil +} + +// consumeStringPtrValidateUTF8 wire decodes a *string pointer as a String. +func consumeStringPtrValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + if !utf8.Valid(v) { + return out, errInvalidUTF8{} + } + vp := p.StringPtr() + if *vp == nil { + *vp = new(string) + } + **vp = string(v) + out.n = n + return out, nil +} + +var coderStringPtrValidateUTF8 = pointerCoderFuncs{ + size: sizeStringPtr, + marshal: appendStringPtrValidateUTF8, + unmarshal: consumeStringPtrValidateUTF8, + merge: mergeStringPtr, +} + +// sizeStringSlice returns the size of wire encoding a []string pointer as a repeated String. +func sizeStringSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.StringSlice() + for _, v := range s { + size += f.tagsize + protowire.SizeBytes(len(v)) + } + return size +} + +// appendStringSlice encodes a []string pointer as a repeated String. +func appendStringSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.StringSlice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + } + return b, nil +} + +// consumeStringSlice wire decodes a []string pointer as a repeated String. +func consumeStringSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.StringSlice() + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + *sp = append(*sp, string(v)) + out.n = n + return out, nil +} + +var coderStringSlice = pointerCoderFuncs{ + size: sizeStringSlice, + marshal: appendStringSlice, + unmarshal: consumeStringSlice, + merge: mergeStringSlice, +} + +// appendStringSliceValidateUTF8 encodes a []string pointer as a repeated String. +func appendStringSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.StringSlice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + if !utf8.ValidString(v) { + return b, errInvalidUTF8{} + } + } + return b, nil +} + +// consumeStringSliceValidateUTF8 wire decodes a []string pointer as a repeated String. +func consumeStringSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + if !utf8.Valid(v) { + return out, errInvalidUTF8{} + } + sp := p.StringSlice() + *sp = append(*sp, string(v)) + out.n = n + return out, nil +} + +var coderStringSliceValidateUTF8 = pointerCoderFuncs{ + size: sizeStringSlice, + marshal: appendStringSliceValidateUTF8, + unmarshal: consumeStringSliceValidateUTF8, + merge: mergeStringSlice, +} + +// sizeStringValue returns the size of wire encoding a string value as a String. +func sizeStringValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeBytes(len(v.String())) +} + +// appendStringValue encodes a string value as a String. +func appendStringValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendString(b, v.String()) + return b, nil +} + +// consumeStringValue decodes a string value as a String. +func consumeStringValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfString(string(v)), out, nil +} + +var coderStringValue = valueCoderFuncs{ + size: sizeStringValue, + marshal: appendStringValue, + unmarshal: consumeStringValue, + merge: mergeScalarValue, +} + +// appendStringValueValidateUTF8 encodes a string value as a String. +func appendStringValueValidateUTF8(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendString(b, v.String()) + if !utf8.ValidString(v.String()) { + return b, errInvalidUTF8{} + } + return b, nil +} + +// consumeStringValueValidateUTF8 decodes a string value as a String. +func consumeStringValueValidateUTF8(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + if !utf8.Valid(v) { + return protoreflect.Value{}, out, errInvalidUTF8{} + } + out.n = n + return protoreflect.ValueOfString(string(v)), out, nil +} + +var coderStringValueValidateUTF8 = valueCoderFuncs{ + size: sizeStringValue, + marshal: appendStringValueValidateUTF8, + unmarshal: consumeStringValueValidateUTF8, + merge: mergeScalarValue, +} + +// sizeStringSliceValue returns the size of wire encoding a []string value as a repeated String. +func sizeStringSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeBytes(len(v.String())) + } + return size +} + +// appendStringSliceValue encodes a []string value as a repeated String. +func appendStringSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendString(b, v.String()) + } + return b, nil +} + +// consumeStringSliceValue wire decodes a []string value as a repeated String. +func consumeStringSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp != protowire.BytesType { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfString(string(v))) + out.n = n + return listv, out, nil +} + +var coderStringSliceValue = valueCoderFuncs{ + size: sizeStringSliceValue, + marshal: appendStringSliceValue, + unmarshal: consumeStringSliceValue, + merge: mergeListValue, +} + +// sizeBytes returns the size of wire encoding a []byte pointer as a Bytes. +func sizeBytes(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Bytes() + return f.tagsize + protowire.SizeBytes(len(v)) +} + +// appendBytes wire encodes a []byte pointer as a Bytes. +func appendBytes(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Bytes() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + return b, nil +} + +// consumeBytes wire decodes a []byte pointer as a Bytes. +func consumeBytes(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + *p.Bytes() = append(emptyBuf[:], v...) + out.n = n + return out, nil +} + +var coderBytes = pointerCoderFuncs{ + size: sizeBytes, + marshal: appendBytes, + unmarshal: consumeBytes, + merge: mergeBytes, +} + +// appendBytesValidateUTF8 wire encodes a []byte pointer as a Bytes. +func appendBytesValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Bytes() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + if !utf8.Valid(v) { + return b, errInvalidUTF8{} + } + return b, nil +} + +// consumeBytesValidateUTF8 wire decodes a []byte pointer as a Bytes. +func consumeBytesValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + if !utf8.Valid(v) { + return out, errInvalidUTF8{} + } + *p.Bytes() = append(emptyBuf[:], v...) + out.n = n + return out, nil +} + +var coderBytesValidateUTF8 = pointerCoderFuncs{ + size: sizeBytes, + marshal: appendBytesValidateUTF8, + unmarshal: consumeBytesValidateUTF8, + merge: mergeBytes, +} + +// sizeBytesNoZero returns the size of wire encoding a []byte pointer as a Bytes. +// The zero value is not encoded. +func sizeBytesNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Bytes() + if len(v) == 0 { + return 0 + } + return f.tagsize + protowire.SizeBytes(len(v)) +} + +// appendBytesNoZero wire encodes a []byte pointer as a Bytes. +// The zero value is not encoded. +func appendBytesNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Bytes() + if len(v) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + return b, nil +} + +// consumeBytesNoZero wire decodes a []byte pointer as a Bytes. +// The zero value is not decoded. +func consumeBytesNoZero(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + *p.Bytes() = append(([]byte)(nil), v...) + out.n = n + return out, nil +} + +var coderBytesNoZero = pointerCoderFuncs{ + size: sizeBytesNoZero, + marshal: appendBytesNoZero, + unmarshal: consumeBytesNoZero, + merge: mergeBytesNoZero, +} + +// appendBytesNoZeroValidateUTF8 wire encodes a []byte pointer as a Bytes. +// The zero value is not encoded. +func appendBytesNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Bytes() + if len(v) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + if !utf8.Valid(v) { + return b, errInvalidUTF8{} + } + return b, nil +} + +// consumeBytesNoZeroValidateUTF8 wire decodes a []byte pointer as a Bytes. +func consumeBytesNoZeroValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + if !utf8.Valid(v) { + return out, errInvalidUTF8{} + } + *p.Bytes() = append(([]byte)(nil), v...) + out.n = n + return out, nil +} + +var coderBytesNoZeroValidateUTF8 = pointerCoderFuncs{ + size: sizeBytesNoZero, + marshal: appendBytesNoZeroValidateUTF8, + unmarshal: consumeBytesNoZeroValidateUTF8, + merge: mergeBytesNoZero, +} + +// sizeBytesSlice returns the size of wire encoding a [][]byte pointer as a repeated Bytes. +func sizeBytesSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.BytesSlice() + for _, v := range s { + size += f.tagsize + protowire.SizeBytes(len(v)) + } + return size +} + +// appendBytesSlice encodes a [][]byte pointer as a repeated Bytes. +func appendBytesSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.BytesSlice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + } + return b, nil +} + +// consumeBytesSlice wire decodes a [][]byte pointer as a repeated Bytes. +func consumeBytesSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.BytesSlice() + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + *sp = append(*sp, append(emptyBuf[:], v...)) + out.n = n + return out, nil +} + +var coderBytesSlice = pointerCoderFuncs{ + size: sizeBytesSlice, + marshal: appendBytesSlice, + unmarshal: consumeBytesSlice, + merge: mergeBytesSlice, +} + +// appendBytesSliceValidateUTF8 encodes a [][]byte pointer as a repeated Bytes. +func appendBytesSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.BytesSlice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + if !utf8.Valid(v) { + return b, errInvalidUTF8{} + } + } + return b, nil +} + +// consumeBytesSliceValidateUTF8 wire decodes a [][]byte pointer as a repeated Bytes. +func consumeBytesSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + if !utf8.Valid(v) { + return out, errInvalidUTF8{} + } + sp := p.BytesSlice() + *sp = append(*sp, append(emptyBuf[:], v...)) + out.n = n + return out, nil +} + +var coderBytesSliceValidateUTF8 = pointerCoderFuncs{ + size: sizeBytesSlice, + marshal: appendBytesSliceValidateUTF8, + unmarshal: consumeBytesSliceValidateUTF8, + merge: mergeBytesSlice, +} + +// sizeBytesValue returns the size of wire encoding a []byte value as a Bytes. +func sizeBytesValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeBytes(len(v.Bytes())) +} + +// appendBytesValue encodes a []byte value as a Bytes. +func appendBytesValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendBytes(b, v.Bytes()) + return b, nil +} + +// consumeBytesValue decodes a []byte value as a Bytes. +func consumeBytesValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), out, nil +} + +var coderBytesValue = valueCoderFuncs{ + size: sizeBytesValue, + marshal: appendBytesValue, + unmarshal: consumeBytesValue, + merge: mergeBytesValue, +} + +// sizeBytesSliceValue returns the size of wire encoding a [][]byte value as a repeated Bytes. +func sizeBytesSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeBytes(len(v.Bytes())) + } + return size +} + +// appendBytesSliceValue encodes a [][]byte value as a repeated Bytes. +func appendBytesSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendBytes(b, v.Bytes()) + } + return b, nil +} + +// consumeBytesSliceValue wire decodes a [][]byte value as a repeated Bytes. +func consumeBytesSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp != protowire.BytesType { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...))) + out.n = n + return listv, out, nil +} + +var coderBytesSliceValue = valueCoderFuncs{ + size: sizeBytesSliceValue, + marshal: appendBytesSliceValue, + unmarshal: consumeBytesSliceValue, + merge: mergeBytesListValue, +} + +// We append to an empty array rather than a nil []byte to get non-nil zero-length byte slices. +var emptyBuf [0]byte + +var wireTypes = map[protoreflect.Kind]protowire.Type{ + protoreflect.BoolKind: protowire.VarintType, + protoreflect.EnumKind: protowire.VarintType, + protoreflect.Int32Kind: protowire.VarintType, + protoreflect.Sint32Kind: protowire.VarintType, + protoreflect.Uint32Kind: protowire.VarintType, + protoreflect.Int64Kind: protowire.VarintType, + protoreflect.Sint64Kind: protowire.VarintType, + protoreflect.Uint64Kind: protowire.VarintType, + protoreflect.Sfixed32Kind: protowire.Fixed32Type, + protoreflect.Fixed32Kind: protowire.Fixed32Type, + protoreflect.FloatKind: protowire.Fixed32Type, + protoreflect.Sfixed64Kind: protowire.Fixed64Type, + protoreflect.Fixed64Kind: protowire.Fixed64Type, + protoreflect.DoubleKind: protowire.Fixed64Type, + protoreflect.StringKind: protowire.BytesType, + protoreflect.BytesKind: protowire.BytesType, + protoreflect.MessageKind: protowire.BytesType, + protoreflect.GroupKind: protowire.StartGroupType, +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go new file mode 100644 index 000000000..fb35f0bae --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -0,0 +1,399 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "reflect" + "sort" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/reflect/protoreflect" +) + +type mapInfo struct { + goType reflect.Type + keyWiretag uint64 + valWiretag uint64 + keyFuncs valueCoderFuncs + valFuncs valueCoderFuncs + keyZero protoreflect.Value + keyKind protoreflect.Kind + conv *mapConverter +} + +func encoderFuncsForMap(fd protoreflect.FieldDescriptor, ft reflect.Type) (valueMessage *MessageInfo, funcs pointerCoderFuncs) { + // TODO: Consider generating specialized map coders. + keyField := fd.MapKey() + valField := fd.MapValue() + keyWiretag := protowire.EncodeTag(1, wireTypes[keyField.Kind()]) + valWiretag := protowire.EncodeTag(2, wireTypes[valField.Kind()]) + keyFuncs := encoderFuncsForValue(keyField) + valFuncs := encoderFuncsForValue(valField) + conv := newMapConverter(ft, fd) + + mapi := &mapInfo{ + goType: ft, + keyWiretag: keyWiretag, + valWiretag: valWiretag, + keyFuncs: keyFuncs, + valFuncs: valFuncs, + keyZero: keyField.Default(), + keyKind: keyField.Kind(), + conv: conv, + } + if valField.Kind() == protoreflect.MessageKind { + valueMessage = getMessageInfo(ft.Elem()) + } + + funcs = pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return sizeMap(p.AsValueOf(ft).Elem(), mapi, f, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return appendMap(b, p.AsValueOf(ft).Elem(), mapi, f, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + mp := p.AsValueOf(ft) + if mp.Elem().IsNil() { + mp.Elem().Set(reflect.MakeMap(mapi.goType)) + } + if f.mi == nil { + return consumeMap(b, mp.Elem(), wtyp, mapi, f, opts) + } else { + return consumeMapOfMessage(b, mp.Elem(), wtyp, mapi, f, opts) + } + }, + } + switch valField.Kind() { + case protoreflect.MessageKind: + funcs.merge = mergeMapOfMessage + case protoreflect.BytesKind: + funcs.merge = mergeMapOfBytes + default: + funcs.merge = mergeMap + } + if valFuncs.isInit != nil { + funcs.isInit = func(p pointer, f *coderFieldInfo) error { + return isInitMap(p.AsValueOf(ft).Elem(), mapi, f) + } + } + return valueMessage, funcs +} + +const ( + mapKeyTagSize = 1 // field 1, tag size 1. + mapValTagSize = 1 // field 2, tag size 2. +) + +func sizeMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) int { + if mapv.Len() == 0 { + return 0 + } + n := 0 + iter := mapRange(mapv) + for iter.Next() { + key := mapi.conv.keyConv.PBValueOf(iter.Key()).MapKey() + keySize := mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) + var valSize int + value := mapi.conv.valConv.PBValueOf(iter.Value()) + if f.mi == nil { + valSize = mapi.valFuncs.size(value, mapValTagSize, opts) + } else { + p := pointerOfValue(iter.Value()) + valSize += mapValTagSize + valSize += protowire.SizeBytes(f.mi.sizePointer(p, opts)) + } + n += f.tagsize + protowire.SizeBytes(keySize+valSize) + } + return n +} + +func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + var ( + key = mapi.keyZero + val = mapi.conv.valConv.New() + ) + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return out, errDecode + } + if num > protowire.MaxValidNumber { + return out, errDecode + } + b = b[n:] + err := errUnknown + switch num { + case genid.MapEntry_Key_field_number: + var v protoreflect.Value + var o unmarshalOutput + v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts) + if err != nil { + break + } + key = v + n = o.n + case genid.MapEntry_Value_field_number: + var v protoreflect.Value + var o unmarshalOutput + v, o, err = mapi.valFuncs.unmarshal(b, val, num, wtyp, opts) + if err != nil { + break + } + val = v + n = o.n + } + if err == errUnknown { + n = protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return out, errDecode + } + } else if err != nil { + return out, err + } + b = b[n:] + } + mapv.SetMapIndex(mapi.conv.keyConv.GoValueOf(key), mapi.conv.valConv.GoValueOf(val)) + out.n = n + return out, nil +} + +func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + var ( + key = mapi.keyZero + val = reflect.New(f.mi.GoReflectType.Elem()) + ) + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return out, errDecode + } + if num > protowire.MaxValidNumber { + return out, errDecode + } + b = b[n:] + err := errUnknown + switch num { + case 1: + var v protoreflect.Value + var o unmarshalOutput + v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts) + if err != nil { + break + } + key = v + n = o.n + case 2: + if wtyp != protowire.BytesType { + break + } + var v []byte + v, n = protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + var o unmarshalOutput + o, err = f.mi.unmarshalPointer(v, pointerOfValue(val), 0, opts) + if o.initialized { + // Consider this map item initialized so long as we see + // an initialized value. + out.initialized = true + } + } + if err == errUnknown { + n = protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return out, errDecode + } + } else if err != nil { + return out, err + } + b = b[n:] + } + mapv.SetMapIndex(mapi.conv.keyConv.GoValueOf(key), val) + out.n = n + return out, nil +} + +func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + if f.mi == nil { + key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey() + val := mapi.conv.valConv.PBValueOf(valrv) + size := 0 + size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) + size += mapi.valFuncs.size(val, mapValTagSize, opts) + b = protowire.AppendVarint(b, uint64(size)) + before := len(b) + b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts) + if err != nil { + return nil, err + } + b, err = mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts) + if measuredSize := len(b) - before; size != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(size, measuredSize) + } + return b, err + } else { + key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey() + val := pointerOfValue(valrv) + valSize := f.mi.sizePointer(val, opts) + size := 0 + size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) + size += mapValTagSize + protowire.SizeBytes(valSize) + b = protowire.AppendVarint(b, uint64(size)) + b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts) + if err != nil { + return nil, err + } + b = protowire.AppendVarint(b, mapi.valWiretag) + b = protowire.AppendVarint(b, uint64(valSize)) + before := len(b) + b, err = f.mi.marshalAppendPointer(b, val, opts) + if measuredSize := len(b) - before; valSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(valSize, measuredSize) + } + return b, err + } +} + +func appendMap(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + if mapv.Len() == 0 { + return b, nil + } + if opts.Deterministic() { + return appendMapDeterministic(b, mapv, mapi, f, opts) + } + iter := mapRange(mapv) + for iter.Next() { + var err error + b = protowire.AppendVarint(b, f.wiretag) + b, err = appendMapItem(b, iter.Key(), iter.Value(), mapi, f, opts) + if err != nil { + return b, err + } + } + return b, nil +} + +func appendMapDeterministic(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + keys := mapv.MapKeys() + sort.Slice(keys, func(i, j int) bool { + switch keys[i].Kind() { + case reflect.Bool: + return !keys[i].Bool() && keys[j].Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return keys[i].Int() < keys[j].Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return keys[i].Uint() < keys[j].Uint() + case reflect.Float32, reflect.Float64: + return keys[i].Float() < keys[j].Float() + case reflect.String: + return keys[i].String() < keys[j].String() + default: + panic("invalid kind: " + keys[i].Kind().String()) + } + }) + for _, key := range keys { + var err error + b = protowire.AppendVarint(b, f.wiretag) + b, err = appendMapItem(b, key, mapv.MapIndex(key), mapi, f, opts) + if err != nil { + return b, err + } + } + return b, nil +} + +func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error { + if mi := f.mi; mi != nil { + mi.init() + if !mi.needsInitCheck { + return nil + } + iter := mapRange(mapv) + for iter.Next() { + val := pointerOfValue(iter.Value()) + if err := mi.checkInitializedPointer(val); err != nil { + return err + } + } + } else { + iter := mapRange(mapv) + for iter.Next() { + val := mapi.conv.valConv.PBValueOf(iter.Value()) + if err := mapi.valFuncs.isInit(val); err != nil { + return err + } + } + } + return nil +} + +func mergeMap(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + dstm := dst.AsValueOf(f.ft).Elem() + srcm := src.AsValueOf(f.ft).Elem() + if srcm.Len() == 0 { + return + } + if dstm.IsNil() { + dstm.Set(reflect.MakeMap(f.ft)) + } + iter := mapRange(srcm) + for iter.Next() { + dstm.SetMapIndex(iter.Key(), iter.Value()) + } +} + +func mergeMapOfBytes(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + dstm := dst.AsValueOf(f.ft).Elem() + srcm := src.AsValueOf(f.ft).Elem() + if srcm.Len() == 0 { + return + } + if dstm.IsNil() { + dstm.Set(reflect.MakeMap(f.ft)) + } + iter := mapRange(srcm) + for iter.Next() { + dstm.SetMapIndex(iter.Key(), reflect.ValueOf(append(emptyBuf[:], iter.Value().Bytes()...))) + } +} + +func mergeMapOfMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + dstm := dst.AsValueOf(f.ft).Elem() + srcm := src.AsValueOf(f.ft).Elem() + if srcm.Len() == 0 { + return + } + if dstm.IsNil() { + dstm.Set(reflect.MakeMap(f.ft)) + } + iter := mapRange(srcm) + for iter.Next() { + val := reflect.New(f.ft.Elem().Elem()) + if f.mi != nil { + f.mi.mergePointer(pointerOfValue(val), pointerOfValue(iter.Value()), opts) + } else { + opts.Merge(asMessage(val), asMessage(iter.Value())) + } + dstm.SetMapIndex(iter.Key(), val) + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go new file mode 100644 index 000000000..4b15493f2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go @@ -0,0 +1,38 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.12 +// +build !go1.12 + +package impl + +import "reflect" + +type mapIter struct { + v reflect.Value + keys []reflect.Value +} + +// mapRange provides a less-efficient equivalent to +// the Go 1.12 reflect.Value.MapRange method. +func mapRange(v reflect.Value) *mapIter { + return &mapIter{v: v} +} + +func (i *mapIter) Next() bool { + if i.keys == nil { + i.keys = i.v.MapKeys() + } else { + i.keys = i.keys[1:] + } + return len(i.keys) > 0 +} + +func (i *mapIter) Key() reflect.Value { + return i.keys[0] +} + +func (i *mapIter) Value() reflect.Value { + return i.v.MapIndex(i.keys[0]) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go new file mode 100644 index 000000000..0b31b66ea --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go @@ -0,0 +1,12 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.12 +// +build go1.12 + +package impl + +import "reflect" + +func mapRange(v reflect.Value) *reflect.MapIter { return v.MapRange() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go new file mode 100644 index 000000000..6b2fdbb73 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -0,0 +1,217 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "sort" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/order" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +// coderMessageInfo contains per-message information used by the fast-path functions. +// This is a different type from MessageInfo to keep MessageInfo as general-purpose as +// possible. +type coderMessageInfo struct { + methods protoiface.Methods + + orderedCoderFields []*coderFieldInfo + denseCoderFields []*coderFieldInfo + coderFields map[protowire.Number]*coderFieldInfo + sizecacheOffset offset + unknownOffset offset + unknownPtrKind bool + extensionOffset offset + needsInitCheck bool + isMessageSet bool + numRequiredFields uint8 +} + +type coderFieldInfo struct { + funcs pointerCoderFuncs // fast-path per-field functions + mi *MessageInfo // field's message + ft reflect.Type + validation validationInfo // information used by message validation + num protoreflect.FieldNumber // field number + offset offset // struct field offset + wiretag uint64 // field tag (number + wire type) + tagsize int // size of the varint-encoded tag + isPointer bool // true if IsNil may be called on the struct field + isRequired bool // true if field is required +} + +func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { + mi.sizecacheOffset = invalidOffset + mi.unknownOffset = invalidOffset + mi.extensionOffset = invalidOffset + + if si.sizecacheOffset.IsValid() && si.sizecacheType == sizecacheType { + mi.sizecacheOffset = si.sizecacheOffset + } + if si.unknownOffset.IsValid() && (si.unknownType == unknownFieldsAType || si.unknownType == unknownFieldsBType) { + mi.unknownOffset = si.unknownOffset + mi.unknownPtrKind = si.unknownType.Kind() == reflect.Ptr + } + if si.extensionOffset.IsValid() && si.extensionType == extensionFieldsType { + mi.extensionOffset = si.extensionOffset + } + + mi.coderFields = make(map[protowire.Number]*coderFieldInfo) + fields := mi.Desc.Fields() + preallocFields := make([]coderFieldInfo, fields.Len()) + for i := 0; i < fields.Len(); i++ { + fd := fields.Get(i) + + fs := si.fieldsByNumber[fd.Number()] + isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() + if isOneof { + fs = si.oneofsByName[fd.ContainingOneof().Name()] + } + ft := fs.Type + var wiretag uint64 + if !fd.IsPacked() { + wiretag = protowire.EncodeTag(fd.Number(), wireTypes[fd.Kind()]) + } else { + wiretag = protowire.EncodeTag(fd.Number(), protowire.BytesType) + } + var fieldOffset offset + var funcs pointerCoderFuncs + var childMessage *MessageInfo + switch { + case ft == nil: + // This never occurs for generated message types. + // It implies that a hand-crafted type has missing Go fields + // for specific protobuf message fields. + funcs = pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return 0 + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return nil, nil + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + panic("missing Go struct field for " + string(fd.FullName())) + }, + merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + } + case isOneof: + fieldOffset = offsetOf(fs, mi.Exporter) + case fd.IsWeak(): + fieldOffset = si.weakOffset + funcs = makeWeakMessageFieldCoder(fd) + default: + fieldOffset = offsetOf(fs, mi.Exporter) + childMessage, funcs = fieldCoder(fd, ft) + } + cf := &preallocFields[i] + *cf = coderFieldInfo{ + num: fd.Number(), + offset: fieldOffset, + wiretag: wiretag, + ft: ft, + tagsize: protowire.SizeVarint(wiretag), + funcs: funcs, + mi: childMessage, + validation: newFieldValidationInfo(mi, si, fd, ft), + isPointer: fd.Cardinality() == protoreflect.Repeated || fd.HasPresence(), + isRequired: fd.Cardinality() == protoreflect.Required, + } + mi.orderedCoderFields = append(mi.orderedCoderFields, cf) + mi.coderFields[cf.num] = cf + } + for i, oneofs := 0, mi.Desc.Oneofs(); i < oneofs.Len(); i++ { + if od := oneofs.Get(i); !od.IsSynthetic() { + mi.initOneofFieldCoders(od, si) + } + } + if messageset.IsMessageSet(mi.Desc) { + if !mi.extensionOffset.IsValid() { + panic(fmt.Sprintf("%v: MessageSet with no extensions field", mi.Desc.FullName())) + } + if !mi.unknownOffset.IsValid() { + panic(fmt.Sprintf("%v: MessageSet with no unknown field", mi.Desc.FullName())) + } + mi.isMessageSet = true + } + sort.Slice(mi.orderedCoderFields, func(i, j int) bool { + return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num + }) + + var maxDense protoreflect.FieldNumber + for _, cf := range mi.orderedCoderFields { + if cf.num >= 16 && cf.num >= 2*maxDense { + break + } + maxDense = cf.num + } + mi.denseCoderFields = make([]*coderFieldInfo, maxDense+1) + for _, cf := range mi.orderedCoderFields { + if int(cf.num) >= len(mi.denseCoderFields) { + break + } + mi.denseCoderFields[cf.num] = cf + } + + // To preserve compatibility with historic wire output, marshal oneofs last. + if mi.Desc.Oneofs().Len() > 0 { + sort.Slice(mi.orderedCoderFields, func(i, j int) bool { + fi := fields.ByNumber(mi.orderedCoderFields[i].num) + fj := fields.ByNumber(mi.orderedCoderFields[j].num) + return order.LegacyFieldOrder(fi, fj) + }) + } + + mi.needsInitCheck = needsInitCheck(mi.Desc) + if mi.methods.Marshal == nil && mi.methods.Size == nil { + mi.methods.Flags |= protoiface.SupportMarshalDeterministic + mi.methods.Marshal = mi.marshal + mi.methods.Size = mi.size + } + if mi.methods.Unmarshal == nil { + mi.methods.Flags |= protoiface.SupportUnmarshalDiscardUnknown + mi.methods.Unmarshal = mi.unmarshal + } + if mi.methods.CheckInitialized == nil { + mi.methods.CheckInitialized = mi.checkInitialized + } + if mi.methods.Merge == nil { + mi.methods.Merge = mi.merge + } +} + +// getUnknownBytes returns a *[]byte for the unknown fields. +// It is the caller's responsibility to check whether the pointer is nil. +// This function is specially designed to be inlineable. +func (mi *MessageInfo) getUnknownBytes(p pointer) *[]byte { + if mi.unknownPtrKind { + return *p.Apply(mi.unknownOffset).BytesPtr() + } else { + return p.Apply(mi.unknownOffset).Bytes() + } +} + +// mutableUnknownBytes returns a *[]byte for the unknown fields. +// The returned pointer is guaranteed to not be nil. +func (mi *MessageInfo) mutableUnknownBytes(p pointer) *[]byte { + if mi.unknownPtrKind { + bp := p.Apply(mi.unknownOffset).BytesPtr() + if *bp == nil { + *bp = new([]byte) + } + return *bp + } else { + return p.Apply(mi.unknownOffset).Bytes() + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go new file mode 100644 index 000000000..7a16ec13d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go @@ -0,0 +1,145 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "sort" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" +) + +func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int) { + if !flags.ProtoLegacy { + return 0 + } + + ext := *p.Apply(mi.extensionOffset).Extensions() + for _, x := range ext { + xi := getExtensionFieldInfo(x.Type()) + if xi.funcs.size == nil { + continue + } + num, _ := protowire.DecodeTag(xi.wiretag) + size += messageset.SizeField(num) + if fullyLazyExtensions(opts) { + // Don't expand the extension, instead use the buffer to calculate size + if lb := x.lazyBuffer(); lb != nil { + // We got hold of the buffer, so it's still lazy. + // Don't count the tag size in the extension buffer, it's already added. + size += protowire.SizeTag(messageset.FieldMessage) + len(lb) - xi.tagsize + continue + } + } + size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts) + } + + if u := mi.getUnknownBytes(p); u != nil { + size += messageset.SizeUnknown(*u) + } + + return size +} + +func marshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts marshalOptions) ([]byte, error) { + if !flags.ProtoLegacy { + return b, errors.New("no support for message_set_wire_format") + } + + ext := *p.Apply(mi.extensionOffset).Extensions() + switch len(ext) { + case 0: + case 1: + // Fast-path for one extension: Don't bother sorting the keys. + for _, x := range ext { + var err error + b, err = marshalMessageSetField(mi, b, x, opts) + if err != nil { + return b, err + } + } + default: + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(ext)) + for k := range ext { + keys = append(keys, int(k)) + } + sort.Ints(keys) + for _, k := range keys { + var err error + b, err = marshalMessageSetField(mi, b, ext[int32(k)], opts) + if err != nil { + return b, err + } + } + } + + if u := mi.getUnknownBytes(p); u != nil { + var err error + b, err = messageset.AppendUnknown(b, *u) + if err != nil { + return b, err + } + } + + return b, nil +} + +func marshalMessageSetField(mi *MessageInfo, b []byte, x ExtensionField, opts marshalOptions) ([]byte, error) { + xi := getExtensionFieldInfo(x.Type()) + num, _ := protowire.DecodeTag(xi.wiretag) + b = messageset.AppendFieldStart(b, num) + + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + // The tag inside the lazy buffer is a different tag (the extension + // number), but what we need here is the tag for FieldMessage: + b = protowire.AppendVarint(b, protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType)) + b = append(b, lb[xi.tagsize:]...) + b = messageset.AppendFieldEnd(b) + return b, nil + } + } + + b, err := xi.funcs.marshal(b, x.Value(), protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType), opts) + if err != nil { + return b, err + } + b = messageset.AppendFieldEnd(b) + return b, nil +} + +func unmarshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts unmarshalOptions) (out unmarshalOutput, err error) { + if !flags.ProtoLegacy { + return out, errors.New("no support for message_set_wire_format") + } + + ep := p.Apply(mi.extensionOffset).Extensions() + if *ep == nil { + *ep = make(map[int32]ExtensionField) + } + ext := *ep + initialized := true + err = messageset.Unmarshal(b, true, func(num protowire.Number, v []byte) error { + o, err := mi.unmarshalExtension(v, num, protowire.BytesType, ext, opts) + if err == errUnknown { + u := mi.mutableUnknownBytes(p) + *u = protowire.AppendTag(*u, num, protowire.BytesType) + *u = append(*u, v...) + return nil + } + if !o.initialized { + initialized = false + } + return err + }) + out.n = len(b) + out.initialized = initialized + return out, err +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go new file mode 100644 index 000000000..145c577bd --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go @@ -0,0 +1,210 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build purego || appengine +// +build purego appengine + +package impl + +import ( + "reflect" + + "google.golang.org/protobuf/encoding/protowire" +) + +func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := p.v.Elem().Int() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := p.v.Elem().Int() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return out, errDecode + } + p.v.Elem().SetInt(int64(v)) + out.n = n + return out, nil +} + +func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + dst.v.Elem().Set(src.v.Elem()) +} + +var coderEnum = pointerCoderFuncs{ + size: sizeEnum, + marshal: appendEnum, + unmarshal: consumeEnum, + merge: mergeEnum, +} + +func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + if p.v.Elem().Int() == 0 { + return 0 + } + return sizeEnum(p, f, opts) +} + +func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + if p.v.Elem().Int() == 0 { + return b, nil + } + return appendEnum(b, p, f, opts) +} + +func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + if src.v.Elem().Int() != 0 { + dst.v.Elem().Set(src.v.Elem()) + } +} + +var coderEnumNoZero = pointerCoderFuncs{ + size: sizeEnumNoZero, + marshal: appendEnumNoZero, + unmarshal: consumeEnum, + merge: mergeEnumNoZero, +} + +func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return sizeEnum(pointer{p.v.Elem()}, f, opts) +} + +func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return appendEnum(b, pointer{p.v.Elem()}, f, opts) +} + +func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + if p.v.Elem().IsNil() { + p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem())) + } + return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts) +} + +func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + if !src.v.Elem().IsNil() { + v := reflect.New(dst.v.Type().Elem().Elem()) + v.Elem().Set(src.v.Elem().Elem()) + dst.v.Elem().Set(v) + } +} + +var coderEnumPtr = pointerCoderFuncs{ + size: sizeEnumPtr, + marshal: appendEnumPtr, + unmarshal: consumeEnumPtr, + merge: mergeEnumPtr, +} + +func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := p.v.Elem() + for i, llen := 0, s.Len(); i < llen; i++ { + size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize + } + return size +} + +func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.v.Elem() + for i, llen := 0, s.Len(); i < llen; i++ { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) + } + return b, nil +} + +func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + s := p.v.Elem() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return out, errDecode + } + rv := reflect.New(s.Type().Elem()).Elem() + rv.SetInt(int64(v)) + s.Set(reflect.Append(s, rv)) + b = b[n:] + } + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return out, errDecode + } + rv := reflect.New(s.Type().Elem()).Elem() + rv.SetInt(int64(v)) + s.Set(reflect.Append(s, rv)) + out.n = n + return out, nil +} + +func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem())) +} + +var coderEnumSlice = pointerCoderFuncs{ + size: sizeEnumSlice, + marshal: appendEnumSlice, + unmarshal: consumeEnumSlice, + merge: mergeEnumSlice, +} + +func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := p.v.Elem() + llen := s.Len() + if llen == 0 { + return 0 + } + n := 0 + for i := 0; i < llen; i++ { + n += protowire.SizeVarint(uint64(s.Index(i).Int())) + } + return f.tagsize + protowire.SizeBytes(n) +} + +func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.v.Elem() + llen := s.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for i := 0; i < llen; i++ { + n += protowire.SizeVarint(uint64(s.Index(i).Int())) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) + } + return b, nil +} + +var coderEnumPackedSlice = pointerCoderFuncs{ + size: sizeEnumPackedSlice, + marshal: appendEnumPackedSlice, + unmarshal: consumeEnumSlice, + merge: mergeEnumSlice, +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go new file mode 100644 index 000000000..13077751e --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go @@ -0,0 +1,557 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// pointerCoderFuncs is a set of pointer encoding functions. +type pointerCoderFuncs struct { + mi *MessageInfo + size func(p pointer, f *coderFieldInfo, opts marshalOptions) int + marshal func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) + unmarshal func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) + isInit func(p pointer, f *coderFieldInfo) error + merge func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) +} + +// valueCoderFuncs is a set of protoreflect.Value encoding functions. +type valueCoderFuncs struct { + size func(v protoreflect.Value, tagsize int, opts marshalOptions) int + marshal func(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) + unmarshal func(b []byte, v protoreflect.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (protoreflect.Value, unmarshalOutput, error) + isInit func(v protoreflect.Value) error + merge func(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value +} + +// fieldCoder returns pointer functions for a field, used for operating on +// struct fields. +func fieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) { + switch { + case fd.IsMap(): + return encoderFuncsForMap(fd, ft) + case fd.Cardinality() == protoreflect.Repeated && !fd.IsPacked(): + // Repeated fields (not packed). + if ft.Kind() != reflect.Slice { + break + } + ft := ft.Elem() + switch fd.Kind() { + case protoreflect.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBoolSlice + } + case protoreflect.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnumSlice + } + case protoreflect.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32Slice + } + case protoreflect.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32Slice + } + case protoreflect.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32Slice + } + case protoreflect.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64Slice + } + case protoreflect.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64Slice + } + case protoreflect.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64Slice + } + case protoreflect.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32Slice + } + case protoreflect.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32Slice + } + case protoreflect.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloatSlice + } + case protoreflect.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64Slice + } + case protoreflect.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64Slice + } + case protoreflect.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDoubleSlice + } + case protoreflect.StringKind: + if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { + return nil, coderStringSliceValidateUTF8 + } + if ft.Kind() == reflect.String { + return nil, coderStringSlice + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 && strs.EnforceUTF8(fd) { + return nil, coderBytesSliceValidateUTF8 + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytesSlice + } + case protoreflect.BytesKind: + if ft.Kind() == reflect.String { + return nil, coderStringSlice + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytesSlice + } + case protoreflect.MessageKind: + return getMessageInfo(ft), makeMessageSliceFieldCoder(fd, ft) + case protoreflect.GroupKind: + return getMessageInfo(ft), makeGroupSliceFieldCoder(fd, ft) + } + case fd.Cardinality() == protoreflect.Repeated && fd.IsPacked(): + // Packed repeated fields. + // + // Only repeated fields of primitive numeric types + // (Varint, Fixed32, or Fixed64 wire type) can be packed. + if ft.Kind() != reflect.Slice { + break + } + ft := ft.Elem() + switch fd.Kind() { + case protoreflect.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBoolPackedSlice + } + case protoreflect.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnumPackedSlice + } + case protoreflect.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32PackedSlice + } + case protoreflect.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32PackedSlice + } + case protoreflect.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32PackedSlice + } + case protoreflect.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64PackedSlice + } + case protoreflect.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64PackedSlice + } + case protoreflect.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64PackedSlice + } + case protoreflect.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32PackedSlice + } + case protoreflect.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32PackedSlice + } + case protoreflect.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloatPackedSlice + } + case protoreflect.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64PackedSlice + } + case protoreflect.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64PackedSlice + } + case protoreflect.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDoublePackedSlice + } + } + case fd.Kind() == protoreflect.MessageKind: + return getMessageInfo(ft), makeMessageFieldCoder(fd, ft) + case fd.Kind() == protoreflect.GroupKind: + return getMessageInfo(ft), makeGroupFieldCoder(fd, ft) + case !fd.HasPresence() && fd.ContainingOneof() == nil: + // Populated oneof fields always encode even if set to the zero value, + // which normally are not encoded in proto3. + switch fd.Kind() { + case protoreflect.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBoolNoZero + } + case protoreflect.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnumNoZero + } + case protoreflect.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32NoZero + } + case protoreflect.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32NoZero + } + case protoreflect.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32NoZero + } + case protoreflect.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64NoZero + } + case protoreflect.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64NoZero + } + case protoreflect.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64NoZero + } + case protoreflect.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32NoZero + } + case protoreflect.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32NoZero + } + case protoreflect.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloatNoZero + } + case protoreflect.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64NoZero + } + case protoreflect.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64NoZero + } + case protoreflect.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDoubleNoZero + } + case protoreflect.StringKind: + if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { + return nil, coderStringNoZeroValidateUTF8 + } + if ft.Kind() == reflect.String { + return nil, coderStringNoZero + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 && strs.EnforceUTF8(fd) { + return nil, coderBytesNoZeroValidateUTF8 + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytesNoZero + } + case protoreflect.BytesKind: + if ft.Kind() == reflect.String { + return nil, coderStringNoZero + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytesNoZero + } + } + case ft.Kind() == reflect.Ptr: + ft := ft.Elem() + switch fd.Kind() { + case protoreflect.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBoolPtr + } + case protoreflect.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnumPtr + } + case protoreflect.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32Ptr + } + case protoreflect.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32Ptr + } + case protoreflect.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32Ptr + } + case protoreflect.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64Ptr + } + case protoreflect.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64Ptr + } + case protoreflect.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64Ptr + } + case protoreflect.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32Ptr + } + case protoreflect.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32Ptr + } + case protoreflect.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloatPtr + } + case protoreflect.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64Ptr + } + case protoreflect.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64Ptr + } + case protoreflect.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDoublePtr + } + case protoreflect.StringKind: + if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { + return nil, coderStringPtrValidateUTF8 + } + if ft.Kind() == reflect.String { + return nil, coderStringPtr + } + case protoreflect.BytesKind: + if ft.Kind() == reflect.String { + return nil, coderStringPtr + } + } + default: + switch fd.Kind() { + case protoreflect.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBool + } + case protoreflect.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnum + } + case protoreflect.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32 + } + case protoreflect.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32 + } + case protoreflect.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32 + } + case protoreflect.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64 + } + case protoreflect.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64 + } + case protoreflect.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64 + } + case protoreflect.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32 + } + case protoreflect.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32 + } + case protoreflect.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloat + } + case protoreflect.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64 + } + case protoreflect.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64 + } + case protoreflect.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDouble + } + case protoreflect.StringKind: + if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { + return nil, coderStringValidateUTF8 + } + if ft.Kind() == reflect.String { + return nil, coderString + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 && strs.EnforceUTF8(fd) { + return nil, coderBytesValidateUTF8 + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytes + } + case protoreflect.BytesKind: + if ft.Kind() == reflect.String { + return nil, coderString + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytes + } + } + } + panic(fmt.Sprintf("invalid type: no encoder for %v %v %v/%v", fd.FullName(), fd.Cardinality(), fd.Kind(), ft)) +} + +// encoderFuncsForValue returns value functions for a field, used for +// extension values and map encoding. +func encoderFuncsForValue(fd protoreflect.FieldDescriptor) valueCoderFuncs { + switch { + case fd.Cardinality() == protoreflect.Repeated && !fd.IsPacked(): + switch fd.Kind() { + case protoreflect.BoolKind: + return coderBoolSliceValue + case protoreflect.EnumKind: + return coderEnumSliceValue + case protoreflect.Int32Kind: + return coderInt32SliceValue + case protoreflect.Sint32Kind: + return coderSint32SliceValue + case protoreflect.Uint32Kind: + return coderUint32SliceValue + case protoreflect.Int64Kind: + return coderInt64SliceValue + case protoreflect.Sint64Kind: + return coderSint64SliceValue + case protoreflect.Uint64Kind: + return coderUint64SliceValue + case protoreflect.Sfixed32Kind: + return coderSfixed32SliceValue + case protoreflect.Fixed32Kind: + return coderFixed32SliceValue + case protoreflect.FloatKind: + return coderFloatSliceValue + case protoreflect.Sfixed64Kind: + return coderSfixed64SliceValue + case protoreflect.Fixed64Kind: + return coderFixed64SliceValue + case protoreflect.DoubleKind: + return coderDoubleSliceValue + case protoreflect.StringKind: + // We don't have a UTF-8 validating coder for repeated string fields. + // Value coders are used for extensions and maps. + // Extensions are never proto3, and maps never contain lists. + return coderStringSliceValue + case protoreflect.BytesKind: + return coderBytesSliceValue + case protoreflect.MessageKind: + return coderMessageSliceValue + case protoreflect.GroupKind: + return coderGroupSliceValue + } + case fd.Cardinality() == protoreflect.Repeated && fd.IsPacked(): + switch fd.Kind() { + case protoreflect.BoolKind: + return coderBoolPackedSliceValue + case protoreflect.EnumKind: + return coderEnumPackedSliceValue + case protoreflect.Int32Kind: + return coderInt32PackedSliceValue + case protoreflect.Sint32Kind: + return coderSint32PackedSliceValue + case protoreflect.Uint32Kind: + return coderUint32PackedSliceValue + case protoreflect.Int64Kind: + return coderInt64PackedSliceValue + case protoreflect.Sint64Kind: + return coderSint64PackedSliceValue + case protoreflect.Uint64Kind: + return coderUint64PackedSliceValue + case protoreflect.Sfixed32Kind: + return coderSfixed32PackedSliceValue + case protoreflect.Fixed32Kind: + return coderFixed32PackedSliceValue + case protoreflect.FloatKind: + return coderFloatPackedSliceValue + case protoreflect.Sfixed64Kind: + return coderSfixed64PackedSliceValue + case protoreflect.Fixed64Kind: + return coderFixed64PackedSliceValue + case protoreflect.DoubleKind: + return coderDoublePackedSliceValue + } + default: + switch fd.Kind() { + default: + case protoreflect.BoolKind: + return coderBoolValue + case protoreflect.EnumKind: + return coderEnumValue + case protoreflect.Int32Kind: + return coderInt32Value + case protoreflect.Sint32Kind: + return coderSint32Value + case protoreflect.Uint32Kind: + return coderUint32Value + case protoreflect.Int64Kind: + return coderInt64Value + case protoreflect.Sint64Kind: + return coderSint64Value + case protoreflect.Uint64Kind: + return coderUint64Value + case protoreflect.Sfixed32Kind: + return coderSfixed32Value + case protoreflect.Fixed32Kind: + return coderFixed32Value + case protoreflect.FloatKind: + return coderFloatValue + case protoreflect.Sfixed64Kind: + return coderSfixed64Value + case protoreflect.Fixed64Kind: + return coderFixed64Value + case protoreflect.DoubleKind: + return coderDoubleValue + case protoreflect.StringKind: + if strs.EnforceUTF8(fd) { + return coderStringValueValidateUTF8 + } + return coderStringValue + case protoreflect.BytesKind: + return coderBytesValue + case protoreflect.MessageKind: + return coderMessageValue + case protoreflect.GroupKind: + return coderGroupValue + } + } + panic(fmt.Sprintf("invalid field: no encoder for %v %v %v", fd.FullName(), fd.Cardinality(), fd.Kind())) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go new file mode 100644 index 000000000..757642e23 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go @@ -0,0 +1,18 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego && !appengine +// +build !purego,!appengine + +package impl + +// When using unsafe pointers, we can just treat enum values as int32s. + +var ( + coderEnumNoZero = coderInt32NoZero + coderEnum = coderInt32 + coderEnumPtr = coderInt32Ptr + coderEnumSlice = coderInt32Slice + coderEnumPackedSlice = coderInt32PackedSlice +) diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go new file mode 100644 index 000000000..e06ece55a --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -0,0 +1,495 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +// unwrapper unwraps the value to the underlying value. +// This is implemented by List and Map. +type unwrapper interface { + protoUnwrap() any +} + +// A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types. +type Converter interface { + // PBValueOf converts a reflect.Value to a protoreflect.Value. + PBValueOf(reflect.Value) protoreflect.Value + + // GoValueOf converts a protoreflect.Value to a reflect.Value. + GoValueOf(protoreflect.Value) reflect.Value + + // IsValidPB returns whether a protoreflect.Value is compatible with this type. + IsValidPB(protoreflect.Value) bool + + // IsValidGo returns whether a reflect.Value is compatible with this type. + IsValidGo(reflect.Value) bool + + // New returns a new field value. + // For scalars, it returns the default value of the field. + // For composite types, it returns a new mutable value. + New() protoreflect.Value + + // Zero returns a new field value. + // For scalars, it returns the default value of the field. + // For composite types, it returns an immutable, empty value. + Zero() protoreflect.Value +} + +// NewConverter matches a Go type with a protobuf field and returns a Converter +// that converts between the two. Enums must be a named int32 kind that +// implements protoreflect.Enum, and messages must be pointer to a named +// struct type that implements protoreflect.ProtoMessage. +// +// This matcher deliberately supports a wider range of Go types than what +// protoc-gen-go historically generated to be able to automatically wrap some +// v1 messages generated by other forks of protoc-gen-go. +func NewConverter(t reflect.Type, fd protoreflect.FieldDescriptor) Converter { + switch { + case fd.IsList(): + return newListConverter(t, fd) + case fd.IsMap(): + return newMapConverter(t, fd) + default: + return newSingularConverter(t, fd) + } +} + +var ( + boolType = reflect.TypeOf(bool(false)) + int32Type = reflect.TypeOf(int32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint32Type = reflect.TypeOf(uint32(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float32Type = reflect.TypeOf(float32(0)) + float64Type = reflect.TypeOf(float64(0)) + stringType = reflect.TypeOf(string("")) + bytesType = reflect.TypeOf([]byte(nil)) + byteType = reflect.TypeOf(byte(0)) +) + +var ( + boolZero = protoreflect.ValueOfBool(false) + int32Zero = protoreflect.ValueOfInt32(0) + int64Zero = protoreflect.ValueOfInt64(0) + uint32Zero = protoreflect.ValueOfUint32(0) + uint64Zero = protoreflect.ValueOfUint64(0) + float32Zero = protoreflect.ValueOfFloat32(0) + float64Zero = protoreflect.ValueOfFloat64(0) + stringZero = protoreflect.ValueOfString("") + bytesZero = protoreflect.ValueOfBytes(nil) +) + +func newSingularConverter(t reflect.Type, fd protoreflect.FieldDescriptor) Converter { + defVal := func(fd protoreflect.FieldDescriptor, zero protoreflect.Value) protoreflect.Value { + if fd.Cardinality() == protoreflect.Repeated { + // Default isn't defined for repeated fields. + return zero + } + return fd.Default() + } + switch fd.Kind() { + case protoreflect.BoolKind: + if t.Kind() == reflect.Bool { + return &boolConverter{t, defVal(fd, boolZero)} + } + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if t.Kind() == reflect.Int32 { + return &int32Converter{t, defVal(fd, int32Zero)} + } + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if t.Kind() == reflect.Int64 { + return &int64Converter{t, defVal(fd, int64Zero)} + } + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if t.Kind() == reflect.Uint32 { + return &uint32Converter{t, defVal(fd, uint32Zero)} + } + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if t.Kind() == reflect.Uint64 { + return &uint64Converter{t, defVal(fd, uint64Zero)} + } + case protoreflect.FloatKind: + if t.Kind() == reflect.Float32 { + return &float32Converter{t, defVal(fd, float32Zero)} + } + case protoreflect.DoubleKind: + if t.Kind() == reflect.Float64 { + return &float64Converter{t, defVal(fd, float64Zero)} + } + case protoreflect.StringKind: + if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) { + return &stringConverter{t, defVal(fd, stringZero)} + } + case protoreflect.BytesKind: + if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) { + return &bytesConverter{t, defVal(fd, bytesZero)} + } + case protoreflect.EnumKind: + // Handle enums, which must be a named int32 type. + if t.Kind() == reflect.Int32 { + return newEnumConverter(t, fd) + } + case protoreflect.MessageKind, protoreflect.GroupKind: + return newMessageConverter(t) + } + panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) +} + +type boolConverter struct { + goType reflect.Type + def protoreflect.Value +} + +func (c *boolConverter) PBValueOf(v reflect.Value) protoreflect.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return protoreflect.ValueOfBool(v.Bool()) +} +func (c *boolConverter) GoValueOf(v protoreflect.Value) reflect.Value { + return reflect.ValueOf(v.Bool()).Convert(c.goType) +} +func (c *boolConverter) IsValidPB(v protoreflect.Value) bool { + _, ok := v.Interface().(bool) + return ok +} +func (c *boolConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *boolConverter) New() protoreflect.Value { return c.def } +func (c *boolConverter) Zero() protoreflect.Value { return c.def } + +type int32Converter struct { + goType reflect.Type + def protoreflect.Value +} + +func (c *int32Converter) PBValueOf(v reflect.Value) protoreflect.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return protoreflect.ValueOfInt32(int32(v.Int())) +} +func (c *int32Converter) GoValueOf(v protoreflect.Value) reflect.Value { + return reflect.ValueOf(int32(v.Int())).Convert(c.goType) +} +func (c *int32Converter) IsValidPB(v protoreflect.Value) bool { + _, ok := v.Interface().(int32) + return ok +} +func (c *int32Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *int32Converter) New() protoreflect.Value { return c.def } +func (c *int32Converter) Zero() protoreflect.Value { return c.def } + +type int64Converter struct { + goType reflect.Type + def protoreflect.Value +} + +func (c *int64Converter) PBValueOf(v reflect.Value) protoreflect.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return protoreflect.ValueOfInt64(int64(v.Int())) +} +func (c *int64Converter) GoValueOf(v protoreflect.Value) reflect.Value { + return reflect.ValueOf(int64(v.Int())).Convert(c.goType) +} +func (c *int64Converter) IsValidPB(v protoreflect.Value) bool { + _, ok := v.Interface().(int64) + return ok +} +func (c *int64Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *int64Converter) New() protoreflect.Value { return c.def } +func (c *int64Converter) Zero() protoreflect.Value { return c.def } + +type uint32Converter struct { + goType reflect.Type + def protoreflect.Value +} + +func (c *uint32Converter) PBValueOf(v reflect.Value) protoreflect.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return protoreflect.ValueOfUint32(uint32(v.Uint())) +} +func (c *uint32Converter) GoValueOf(v protoreflect.Value) reflect.Value { + return reflect.ValueOf(uint32(v.Uint())).Convert(c.goType) +} +func (c *uint32Converter) IsValidPB(v protoreflect.Value) bool { + _, ok := v.Interface().(uint32) + return ok +} +func (c *uint32Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *uint32Converter) New() protoreflect.Value { return c.def } +func (c *uint32Converter) Zero() protoreflect.Value { return c.def } + +type uint64Converter struct { + goType reflect.Type + def protoreflect.Value +} + +func (c *uint64Converter) PBValueOf(v reflect.Value) protoreflect.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return protoreflect.ValueOfUint64(uint64(v.Uint())) +} +func (c *uint64Converter) GoValueOf(v protoreflect.Value) reflect.Value { + return reflect.ValueOf(uint64(v.Uint())).Convert(c.goType) +} +func (c *uint64Converter) IsValidPB(v protoreflect.Value) bool { + _, ok := v.Interface().(uint64) + return ok +} +func (c *uint64Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *uint64Converter) New() protoreflect.Value { return c.def } +func (c *uint64Converter) Zero() protoreflect.Value { return c.def } + +type float32Converter struct { + goType reflect.Type + def protoreflect.Value +} + +func (c *float32Converter) PBValueOf(v reflect.Value) protoreflect.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return protoreflect.ValueOfFloat32(float32(v.Float())) +} +func (c *float32Converter) GoValueOf(v protoreflect.Value) reflect.Value { + return reflect.ValueOf(float32(v.Float())).Convert(c.goType) +} +func (c *float32Converter) IsValidPB(v protoreflect.Value) bool { + _, ok := v.Interface().(float32) + return ok +} +func (c *float32Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *float32Converter) New() protoreflect.Value { return c.def } +func (c *float32Converter) Zero() protoreflect.Value { return c.def } + +type float64Converter struct { + goType reflect.Type + def protoreflect.Value +} + +func (c *float64Converter) PBValueOf(v reflect.Value) protoreflect.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return protoreflect.ValueOfFloat64(float64(v.Float())) +} +func (c *float64Converter) GoValueOf(v protoreflect.Value) reflect.Value { + return reflect.ValueOf(float64(v.Float())).Convert(c.goType) +} +func (c *float64Converter) IsValidPB(v protoreflect.Value) bool { + _, ok := v.Interface().(float64) + return ok +} +func (c *float64Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *float64Converter) New() protoreflect.Value { return c.def } +func (c *float64Converter) Zero() protoreflect.Value { return c.def } + +type stringConverter struct { + goType reflect.Type + def protoreflect.Value +} + +func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return protoreflect.ValueOfString(v.Convert(stringType).String()) +} +func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value { + // pref.Value.String never panics, so we go through an interface + // conversion here to check the type. + s := v.Interface().(string) + if c.goType.Kind() == reflect.Slice && s == "" { + return reflect.Zero(c.goType) // ensure empty string is []byte(nil) + } + return reflect.ValueOf(s).Convert(c.goType) +} +func (c *stringConverter) IsValidPB(v protoreflect.Value) bool { + _, ok := v.Interface().(string) + return ok +} +func (c *stringConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *stringConverter) New() protoreflect.Value { return c.def } +func (c *stringConverter) Zero() protoreflect.Value { return c.def } + +type bytesConverter struct { + goType reflect.Type + def protoreflect.Value +} + +func (c *bytesConverter) PBValueOf(v reflect.Value) protoreflect.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + if c.goType.Kind() == reflect.String && v.Len() == 0 { + return protoreflect.ValueOfBytes(nil) // ensure empty string is []byte(nil) + } + return protoreflect.ValueOfBytes(v.Convert(bytesType).Bytes()) +} +func (c *bytesConverter) GoValueOf(v protoreflect.Value) reflect.Value { + return reflect.ValueOf(v.Bytes()).Convert(c.goType) +} +func (c *bytesConverter) IsValidPB(v protoreflect.Value) bool { + _, ok := v.Interface().([]byte) + return ok +} +func (c *bytesConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *bytesConverter) New() protoreflect.Value { return c.def } +func (c *bytesConverter) Zero() protoreflect.Value { return c.def } + +type enumConverter struct { + goType reflect.Type + def protoreflect.Value +} + +func newEnumConverter(goType reflect.Type, fd protoreflect.FieldDescriptor) Converter { + var def protoreflect.Value + if fd.Cardinality() == protoreflect.Repeated { + def = protoreflect.ValueOfEnum(fd.Enum().Values().Get(0).Number()) + } else { + def = fd.Default() + } + return &enumConverter{goType, def} +} + +func (c *enumConverter) PBValueOf(v reflect.Value) protoreflect.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v.Int())) +} + +func (c *enumConverter) GoValueOf(v protoreflect.Value) reflect.Value { + return reflect.ValueOf(v.Enum()).Convert(c.goType) +} + +func (c *enumConverter) IsValidPB(v protoreflect.Value) bool { + _, ok := v.Interface().(protoreflect.EnumNumber) + return ok +} + +func (c *enumConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} + +func (c *enumConverter) New() protoreflect.Value { + return c.def +} + +func (c *enumConverter) Zero() protoreflect.Value { + return c.def +} + +type messageConverter struct { + goType reflect.Type +} + +func newMessageConverter(goType reflect.Type) Converter { + return &messageConverter{goType} +} + +func (c *messageConverter) PBValueOf(v reflect.Value) protoreflect.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + if c.isNonPointer() { + if v.CanAddr() { + v = v.Addr() // T => *T + } else { + v = reflect.Zero(reflect.PtrTo(v.Type())) + } + } + if m, ok := v.Interface().(protoreflect.ProtoMessage); ok { + return protoreflect.ValueOfMessage(m.ProtoReflect()) + } + return protoreflect.ValueOfMessage(legacyWrapMessage(v)) +} + +func (c *messageConverter) GoValueOf(v protoreflect.Value) reflect.Value { + m := v.Message() + var rv reflect.Value + if u, ok := m.(unwrapper); ok { + rv = reflect.ValueOf(u.protoUnwrap()) + } else { + rv = reflect.ValueOf(m.Interface()) + } + if c.isNonPointer() { + if rv.Type() != reflect.PtrTo(c.goType) { + panic(fmt.Sprintf("invalid type: got %v, want %v", rv.Type(), reflect.PtrTo(c.goType))) + } + if !rv.IsNil() { + rv = rv.Elem() // *T => T + } else { + rv = reflect.Zero(rv.Type().Elem()) + } + } + if rv.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", rv.Type(), c.goType)) + } + return rv +} + +func (c *messageConverter) IsValidPB(v protoreflect.Value) bool { + m := v.Message() + var rv reflect.Value + if u, ok := m.(unwrapper); ok { + rv = reflect.ValueOf(u.protoUnwrap()) + } else { + rv = reflect.ValueOf(m.Interface()) + } + if c.isNonPointer() { + return rv.Type() == reflect.PtrTo(c.goType) + } + return rv.Type() == c.goType +} + +func (c *messageConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} + +func (c *messageConverter) New() protoreflect.Value { + if c.isNonPointer() { + return c.PBValueOf(reflect.New(c.goType).Elem()) + } + return c.PBValueOf(reflect.New(c.goType.Elem())) +} + +func (c *messageConverter) Zero() protoreflect.Value { + return c.PBValueOf(reflect.Zero(c.goType)) +} + +// isNonPointer reports whether the type is a non-pointer type. +// This never occurs for generated message types. +func (c *messageConverter) isNonPointer() bool { + return c.goType.Kind() != reflect.Ptr +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go new file mode 100644 index 000000000..18cb96fd7 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go @@ -0,0 +1,141 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +func newListConverter(t reflect.Type, fd protoreflect.FieldDescriptor) Converter { + switch { + case t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Slice: + return &listPtrConverter{t, newSingularConverter(t.Elem().Elem(), fd)} + case t.Kind() == reflect.Slice: + return &listConverter{t, newSingularConverter(t.Elem(), fd)} + } + panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) +} + +type listConverter struct { + goType reflect.Type // []T + c Converter +} + +func (c *listConverter) PBValueOf(v reflect.Value) protoreflect.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + pv := reflect.New(c.goType) + pv.Elem().Set(v) + return protoreflect.ValueOfList(&listReflect{pv, c.c}) +} + +func (c *listConverter) GoValueOf(v protoreflect.Value) reflect.Value { + rv := v.List().(*listReflect).v + if rv.IsNil() { + return reflect.Zero(c.goType) + } + return rv.Elem() +} + +func (c *listConverter) IsValidPB(v protoreflect.Value) bool { + list, ok := v.Interface().(*listReflect) + if !ok { + return false + } + return list.v.Type().Elem() == c.goType +} + +func (c *listConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} + +func (c *listConverter) New() protoreflect.Value { + return protoreflect.ValueOfList(&listReflect{reflect.New(c.goType), c.c}) +} + +func (c *listConverter) Zero() protoreflect.Value { + return protoreflect.ValueOfList(&listReflect{reflect.Zero(reflect.PtrTo(c.goType)), c.c}) +} + +type listPtrConverter struct { + goType reflect.Type // *[]T + c Converter +} + +func (c *listPtrConverter) PBValueOf(v reflect.Value) protoreflect.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return protoreflect.ValueOfList(&listReflect{v, c.c}) +} + +func (c *listPtrConverter) GoValueOf(v protoreflect.Value) reflect.Value { + return v.List().(*listReflect).v +} + +func (c *listPtrConverter) IsValidPB(v protoreflect.Value) bool { + list, ok := v.Interface().(*listReflect) + if !ok { + return false + } + return list.v.Type() == c.goType +} + +func (c *listPtrConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} + +func (c *listPtrConverter) New() protoreflect.Value { + return c.PBValueOf(reflect.New(c.goType.Elem())) +} + +func (c *listPtrConverter) Zero() protoreflect.Value { + return c.PBValueOf(reflect.Zero(c.goType)) +} + +type listReflect struct { + v reflect.Value // *[]T + conv Converter +} + +func (ls *listReflect) Len() int { + if ls.v.IsNil() { + return 0 + } + return ls.v.Elem().Len() +} +func (ls *listReflect) Get(i int) protoreflect.Value { + return ls.conv.PBValueOf(ls.v.Elem().Index(i)) +} +func (ls *listReflect) Set(i int, v protoreflect.Value) { + ls.v.Elem().Index(i).Set(ls.conv.GoValueOf(v)) +} +func (ls *listReflect) Append(v protoreflect.Value) { + ls.v.Elem().Set(reflect.Append(ls.v.Elem(), ls.conv.GoValueOf(v))) +} +func (ls *listReflect) AppendMutable() protoreflect.Value { + if _, ok := ls.conv.(*messageConverter); !ok { + panic("invalid AppendMutable on list with non-message type") + } + v := ls.NewElement() + ls.Append(v) + return v +} +func (ls *listReflect) Truncate(i int) { + ls.v.Elem().Set(ls.v.Elem().Slice(0, i)) +} +func (ls *listReflect) NewElement() protoreflect.Value { + return ls.conv.New() +} +func (ls *listReflect) IsValid() bool { + return !ls.v.IsNil() +} +func (ls *listReflect) protoUnwrap() any { + return ls.v.Interface() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go new file mode 100644 index 000000000..304244a65 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -0,0 +1,121 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +type mapConverter struct { + goType reflect.Type // map[K]V + keyConv, valConv Converter +} + +func newMapConverter(t reflect.Type, fd protoreflect.FieldDescriptor) *mapConverter { + if t.Kind() != reflect.Map { + panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) + } + return &mapConverter{ + goType: t, + keyConv: newSingularConverter(t.Key(), fd.MapKey()), + valConv: newSingularConverter(t.Elem(), fd.MapValue()), + } +} + +func (c *mapConverter) PBValueOf(v reflect.Value) protoreflect.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return protoreflect.ValueOfMap(&mapReflect{v, c.keyConv, c.valConv}) +} + +func (c *mapConverter) GoValueOf(v protoreflect.Value) reflect.Value { + return v.Map().(*mapReflect).v +} + +func (c *mapConverter) IsValidPB(v protoreflect.Value) bool { + mapv, ok := v.Interface().(*mapReflect) + if !ok { + return false + } + return mapv.v.Type() == c.goType +} + +func (c *mapConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} + +func (c *mapConverter) New() protoreflect.Value { + return c.PBValueOf(reflect.MakeMap(c.goType)) +} + +func (c *mapConverter) Zero() protoreflect.Value { + return c.PBValueOf(reflect.Zero(c.goType)) +} + +type mapReflect struct { + v reflect.Value // map[K]V + keyConv Converter + valConv Converter +} + +func (ms *mapReflect) Len() int { + return ms.v.Len() +} +func (ms *mapReflect) Has(k protoreflect.MapKey) bool { + rk := ms.keyConv.GoValueOf(k.Value()) + rv := ms.v.MapIndex(rk) + return rv.IsValid() +} +func (ms *mapReflect) Get(k protoreflect.MapKey) protoreflect.Value { + rk := ms.keyConv.GoValueOf(k.Value()) + rv := ms.v.MapIndex(rk) + if !rv.IsValid() { + return protoreflect.Value{} + } + return ms.valConv.PBValueOf(rv) +} +func (ms *mapReflect) Set(k protoreflect.MapKey, v protoreflect.Value) { + rk := ms.keyConv.GoValueOf(k.Value()) + rv := ms.valConv.GoValueOf(v) + ms.v.SetMapIndex(rk, rv) +} +func (ms *mapReflect) Clear(k protoreflect.MapKey) { + rk := ms.keyConv.GoValueOf(k.Value()) + ms.v.SetMapIndex(rk, reflect.Value{}) +} +func (ms *mapReflect) Mutable(k protoreflect.MapKey) protoreflect.Value { + if _, ok := ms.valConv.(*messageConverter); !ok { + panic("invalid Mutable on map with non-message value type") + } + v := ms.Get(k) + if !v.IsValid() { + v = ms.NewValue() + ms.Set(k, v) + } + return v +} +func (ms *mapReflect) Range(f func(protoreflect.MapKey, protoreflect.Value) bool) { + iter := mapRange(ms.v) + for iter.Next() { + k := ms.keyConv.PBValueOf(iter.Key()).MapKey() + v := ms.valConv.PBValueOf(iter.Value()) + if !f(k, v) { + return + } + } +} +func (ms *mapReflect) NewValue() protoreflect.Value { + return ms.valConv.New() +} +func (ms *mapReflect) IsValid() bool { + return !ms.v.IsNil() +} +func (ms *mapReflect) protoUnwrap() any { + return ms.v.Interface() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go new file mode 100644 index 000000000..cda0520c2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go @@ -0,0 +1,285 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "math/bits" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" +) + +var errDecode = errors.New("cannot parse invalid wire-format data") +var errRecursionDepth = errors.New("exceeded maximum recursion depth") + +type unmarshalOptions struct { + flags protoiface.UnmarshalInputFlags + resolver interface { + FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) + FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) + } + depth int +} + +func (o unmarshalOptions) Options() proto.UnmarshalOptions { + return proto.UnmarshalOptions{ + Merge: true, + AllowPartial: true, + DiscardUnknown: o.DiscardUnknown(), + Resolver: o.resolver, + } +} + +func (o unmarshalOptions) DiscardUnknown() bool { + return o.flags&protoiface.UnmarshalDiscardUnknown != 0 +} + +func (o unmarshalOptions) IsDefault() bool { + return o.flags == 0 && o.resolver == protoregistry.GlobalTypes +} + +var lazyUnmarshalOptions = unmarshalOptions{ + resolver: protoregistry.GlobalTypes, + depth: protowire.DefaultRecursionLimit, +} + +type unmarshalOutput struct { + n int // number of bytes consumed + initialized bool +} + +// unmarshal is protoreflect.Methods.Unmarshal. +func (mi *MessageInfo) unmarshal(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + var p pointer + if ms, ok := in.Message.(*messageState); ok { + p = ms.pointer() + } else { + p = in.Message.(*messageReflectWrapper).pointer() + } + out, err := mi.unmarshalPointer(in.Buf, p, 0, unmarshalOptions{ + flags: in.Flags, + resolver: in.Resolver, + depth: in.Depth, + }) + var flags protoiface.UnmarshalOutputFlags + if out.initialized { + flags |= protoiface.UnmarshalInitialized + } + return protoiface.UnmarshalOutput{ + Flags: flags, + }, err +} + +// errUnknown is returned during unmarshaling to indicate a parse error that +// should result in a field being placed in the unknown fields section (for example, +// when the wire type doesn't match) as opposed to the entire unmarshal operation +// failing (for example, when a field extends past the available input). +// +// This is a sentinel error which should never be visible to the user. +var errUnknown = errors.New("unknown") + +func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) { + mi.init() + opts.depth-- + if opts.depth < 0 { + return out, errRecursionDepth + } + if flags.ProtoLegacy && mi.isMessageSet { + return unmarshalMessageSet(mi, b, p, opts) + } + initialized := true + var requiredMask uint64 + var exts *map[int32]ExtensionField + start := len(b) + for len(b) > 0 { + // Parse the tag (field number and wire type). + var tag uint64 + if b[0] < 0x80 { + tag = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + tag, n = protowire.ConsumeVarint(b) + if n < 0 { + return out, errDecode + } + b = b[n:] + } + var num protowire.Number + if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) { + return out, errDecode + } else { + num = protowire.Number(n) + } + wtyp := protowire.Type(tag & 7) + + if wtyp == protowire.EndGroupType { + if num != groupTag { + return out, errDecode + } + groupTag = 0 + break + } + + var f *coderFieldInfo + if int(num) < len(mi.denseCoderFields) { + f = mi.denseCoderFields[num] + } else { + f = mi.coderFields[num] + } + var n int + err := errUnknown + switch { + case f != nil: + if f.funcs.unmarshal == nil { + break + } + var o unmarshalOutput + o, err = f.funcs.unmarshal(b, p.Apply(f.offset), wtyp, f, opts) + n = o.n + if err != nil { + break + } + requiredMask |= f.validation.requiredBit + if f.funcs.isInit != nil && !o.initialized { + initialized = false + } + default: + // Possible extension. + if exts == nil && mi.extensionOffset.IsValid() { + exts = p.Apply(mi.extensionOffset).Extensions() + if *exts == nil { + *exts = make(map[int32]ExtensionField) + } + } + if exts == nil { + break + } + var o unmarshalOutput + o, err = mi.unmarshalExtension(b, num, wtyp, *exts, opts) + if err != nil { + break + } + n = o.n + if !o.initialized { + initialized = false + } + } + if err != nil { + if err != errUnknown { + return out, err + } + n = protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return out, errDecode + } + if !opts.DiscardUnknown() && mi.unknownOffset.IsValid() { + u := mi.mutableUnknownBytes(p) + *u = protowire.AppendTag(*u, num, wtyp) + *u = append(*u, b[:n]...) + } + } + b = b[n:] + } + if groupTag != 0 { + return out, errDecode + } + if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) { + initialized = false + } + if initialized { + out.initialized = true + } + out.n = start - len(b) + return out, nil +} + +func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp protowire.Type, exts map[int32]ExtensionField, opts unmarshalOptions) (out unmarshalOutput, err error) { + x := exts[int32(num)] + xt := x.Type() + if xt == nil { + var err error + xt, err = opts.resolver.FindExtensionByNumber(mi.Desc.FullName(), num) + if err != nil { + if err == protoregistry.NotFound { + return out, errUnknown + } + return out, errors.New("%v: unable to resolve extension %v: %v", mi.Desc.FullName(), num, err) + } + } + xi := getExtensionFieldInfo(xt) + if xi.funcs.unmarshal == nil { + return out, errUnknown + } + if flags.LazyUnmarshalExtensions { + if opts.IsDefault() && x.canLazy(xt) { + out, valid := skipExtension(b, xi, num, wtyp, opts) + switch valid { + case ValidationValid: + if out.initialized { + x.appendLazyBytes(xt, xi, num, wtyp, b[:out.n]) + exts[int32(num)] = x + return out, nil + } + case ValidationInvalid: + return out, errDecode + case ValidationUnknown: + } + } + } + ival := x.Value() + if !ival.IsValid() && xi.unmarshalNeedsValue { + // Create a new message, list, or map value to fill in. + // For enums, create a prototype value to let the unmarshal func know the + // concrete type. + ival = xt.New() + } + v, out, err := xi.funcs.unmarshal(b, ival, num, wtyp, opts) + if err != nil { + return out, err + } + if xi.funcs.isInit == nil { + out.initialized = true + } + x.Set(xt, v) + exts[int32(num)] = x + return out, nil +} + +func skipExtension(b []byte, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, _ ValidationStatus) { + if xi.validation.mi == nil { + return out, ValidationUnknown + } + xi.validation.mi.init() + switch xi.validation.typ { + case validationTypeMessage: + if wtyp != protowire.BytesType { + return out, ValidationUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, ValidationUnknown + } + out, st := xi.validation.mi.validate(v, 0, opts) + out.n = n + return out, st + case validationTypeGroup: + if wtyp != protowire.StartGroupType { + return out, ValidationUnknown + } + out, st := xi.validation.mi.validate(b, num, opts) + return out, st + default: + return out, ValidationUnknown + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go new file mode 100644 index 000000000..febd21224 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -0,0 +1,237 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "math" + "sort" + "sync/atomic" + + "google.golang.org/protobuf/internal/flags" + proto "google.golang.org/protobuf/proto" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +type marshalOptions struct { + flags piface.MarshalInputFlags +} + +func (o marshalOptions) Options() proto.MarshalOptions { + return proto.MarshalOptions{ + AllowPartial: true, + Deterministic: o.Deterministic(), + UseCachedSize: o.UseCachedSize(), + } +} + +func (o marshalOptions) Deterministic() bool { return o.flags&piface.MarshalDeterministic != 0 } +func (o marshalOptions) UseCachedSize() bool { return o.flags&piface.MarshalUseCachedSize != 0 } + +// size is protoreflect.Methods.Size. +func (mi *MessageInfo) size(in piface.SizeInput) piface.SizeOutput { + var p pointer + if ms, ok := in.Message.(*messageState); ok { + p = ms.pointer() + } else { + p = in.Message.(*messageReflectWrapper).pointer() + } + size := mi.sizePointer(p, marshalOptions{ + flags: in.Flags, + }) + return piface.SizeOutput{Size: size} +} + +func (mi *MessageInfo) sizePointer(p pointer, opts marshalOptions) (size int) { + mi.init() + if p.IsNil() { + return 0 + } + if opts.UseCachedSize() && mi.sizecacheOffset.IsValid() { + // The size cache contains the size + 1, to allow the + // zero value to be invalid, while also allowing for a + // 0 size to be cached. + if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size > 0 { + return int(size - 1) + } + } + return mi.sizePointerSlow(p, opts) +} + +func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int) { + if flags.ProtoLegacy && mi.isMessageSet { + size = sizeMessageSet(mi, p, opts) + if mi.sizecacheOffset.IsValid() { + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1)) + } + return size + } + if mi.extensionOffset.IsValid() { + e := p.Apply(mi.extensionOffset).Extensions() + size += mi.sizeExtensions(e, opts) + } + for _, f := range mi.orderedCoderFields { + if f.funcs.size == nil { + continue + } + fptr := p.Apply(f.offset) + if f.isPointer && fptr.Elem().IsNil() { + continue + } + size += f.funcs.size(fptr, f, opts) + } + if mi.unknownOffset.IsValid() { + if u := mi.getUnknownBytes(p); u != nil { + size += len(*u) + } + } + if mi.sizecacheOffset.IsValid() { + if size > (math.MaxInt32 - 1) { + // The size is too large for the int32 sizecache field. + // We will need to recompute the size when encoding; + // unfortunately expensive, but better than invalid output. + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), 0) + } else { + // The size cache contains the size + 1, to allow the + // zero value to be invalid, while also allowing for a + // 0 size to be cached. + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1)) + } + } + return size +} + +// marshal is protoreflect.Methods.Marshal. +func (mi *MessageInfo) marshal(in piface.MarshalInput) (out piface.MarshalOutput, err error) { + var p pointer + if ms, ok := in.Message.(*messageState); ok { + p = ms.pointer() + } else { + p = in.Message.(*messageReflectWrapper).pointer() + } + b, err := mi.marshalAppendPointer(in.Buf, p, marshalOptions{ + flags: in.Flags, + }) + return piface.MarshalOutput{Buf: b}, err +} + +func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOptions) ([]byte, error) { + mi.init() + if p.IsNil() { + return b, nil + } + if flags.ProtoLegacy && mi.isMessageSet { + return marshalMessageSet(mi, b, p, opts) + } + var err error + // The old marshaler encodes extensions at beginning. + if mi.extensionOffset.IsValid() { + e := p.Apply(mi.extensionOffset).Extensions() + // TODO: Special handling for MessageSet? + b, err = mi.appendExtensions(b, e, opts) + if err != nil { + return b, err + } + } + for _, f := range mi.orderedCoderFields { + if f.funcs.marshal == nil { + continue + } + fptr := p.Apply(f.offset) + if f.isPointer && fptr.Elem().IsNil() { + continue + } + b, err = f.funcs.marshal(b, fptr, f, opts) + if err != nil { + return b, err + } + } + if mi.unknownOffset.IsValid() && !mi.isMessageSet { + if u := mi.getUnknownBytes(p); u != nil { + b = append(b, (*u)...) + } + } + return b, nil +} + +// fullyLazyExtensions returns true if we should attempt to keep extensions lazy over size and marshal. +func fullyLazyExtensions(opts marshalOptions) bool { + // When deterministic marshaling is requested, force an unmarshal for lazy + // extensions to produce a deterministic result, instead of passing through + // bytes lazily that may or may not match what Go Protobuf would produce. + return opts.flags&piface.MarshalDeterministic == 0 +} + +func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) { + if ext == nil { + return 0 + } + for _, x := range *ext { + xi := getExtensionFieldInfo(x.Type()) + if xi.funcs.size == nil { + continue + } + if fullyLazyExtensions(opts) { + // Don't expand the extension, instead use the buffer to calculate size + if lb := x.lazyBuffer(); lb != nil { + // We got hold of the buffer, so it's still lazy. + n += len(lb) + continue + } + } + n += xi.funcs.size(x.Value(), xi.tagsize, opts) + } + return n +} + +func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, opts marshalOptions) ([]byte, error) { + if ext == nil { + return b, nil + } + + switch len(*ext) { + case 0: + return b, nil + case 1: + // Fast-path for one extension: Don't bother sorting the keys. + var err error + for _, x := range *ext { + xi := getExtensionFieldInfo(x.Type()) + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + b = append(b, lb...) + continue + } + } + b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) + } + return b, err + default: + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(*ext)) + for k := range *ext { + keys = append(keys, int(k)) + } + sort.Ints(keys) + var err error + for _, k := range keys { + x := (*ext)[int32(k)] + xi := getExtensionFieldInfo(x.Type()) + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + b = append(b, lb...) + continue + } + } + b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) + if err != nil { + return b, err + } + } + return b, nil + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/enum.go b/vendor/google.golang.org/protobuf/internal/impl/enum.go new file mode 100644 index 000000000..5f3ef5ad7 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/enum.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "reflect" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +type EnumInfo struct { + GoReflectType reflect.Type // int32 kind + Desc protoreflect.EnumDescriptor +} + +func (t *EnumInfo) New(n protoreflect.EnumNumber) protoreflect.Enum { + return reflect.ValueOf(n).Convert(t.GoReflectType).Interface().(protoreflect.Enum) +} +func (t *EnumInfo) Descriptor() protoreflect.EnumDescriptor { return t.Desc } diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go new file mode 100644 index 000000000..e31249f64 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go @@ -0,0 +1,156 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "reflect" + "sync" + "sync/atomic" + + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +// ExtensionInfo implements ExtensionType. +// +// This type contains a number of exported fields for legacy compatibility. +// The only non-deprecated use of this type is through the methods of the +// ExtensionType interface. +type ExtensionInfo struct { + // An ExtensionInfo may exist in several stages of initialization. + // + // extensionInfoUninitialized: Some or all of the legacy exported + // fields may be set, but none of the unexported fields have been + // initialized. This is the starting state for an ExtensionInfo + // in legacy generated code. + // + // extensionInfoDescInit: The desc field is set, but other unexported fields + // may not be initialized. Legacy exported fields may or may not be set. + // This is the starting state for an ExtensionInfo in newly generated code. + // + // extensionInfoFullInit: The ExtensionInfo is fully initialized. + // This state is only entered after lazy initialization is complete. + init uint32 + mu sync.Mutex + + goType reflect.Type + desc extensionTypeDescriptor + conv Converter + info *extensionFieldInfo // for fast-path method implementations + + // ExtendedType is a typed nil-pointer to the parent message type that + // is being extended. It is possible for this to be unpopulated in v2 + // since the message may no longer implement the MessageV1 interface. + // + // Deprecated: Use the ExtendedType method instead. + ExtendedType protoiface.MessageV1 + + // ExtensionType is the zero value of the extension type. + // + // For historical reasons, reflect.TypeOf(ExtensionType) and the + // type returned by InterfaceOf may not be identical. + // + // Deprecated: Use InterfaceOf(xt.Zero()) instead. + ExtensionType any + + // Field is the field number of the extension. + // + // Deprecated: Use the Descriptor().Number method instead. + Field int32 + + // Name is the fully qualified name of extension. + // + // Deprecated: Use the Descriptor().FullName method instead. + Name string + + // Tag is the protobuf struct tag used in the v1 API. + // + // Deprecated: Do not use. + Tag string + + // Filename is the proto filename in which the extension is defined. + // + // Deprecated: Use Descriptor().ParentFile().Path() instead. + Filename string +} + +// Stages of initialization: See the ExtensionInfo.init field. +const ( + extensionInfoUninitialized = 0 + extensionInfoDescInit = 1 + extensionInfoFullInit = 2 +) + +func InitExtensionInfo(xi *ExtensionInfo, xd protoreflect.ExtensionDescriptor, goType reflect.Type) { + xi.goType = goType + xi.desc = extensionTypeDescriptor{xd, xi} + xi.init = extensionInfoDescInit +} + +func (xi *ExtensionInfo) New() protoreflect.Value { + return xi.lazyInit().New() +} +func (xi *ExtensionInfo) Zero() protoreflect.Value { + return xi.lazyInit().Zero() +} +func (xi *ExtensionInfo) ValueOf(v any) protoreflect.Value { + return xi.lazyInit().PBValueOf(reflect.ValueOf(v)) +} +func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) any { + return xi.lazyInit().GoValueOf(v).Interface() +} +func (xi *ExtensionInfo) IsValidValue(v protoreflect.Value) bool { + return xi.lazyInit().IsValidPB(v) +} +func (xi *ExtensionInfo) IsValidInterface(v any) bool { + return xi.lazyInit().IsValidGo(reflect.ValueOf(v)) +} +func (xi *ExtensionInfo) TypeDescriptor() protoreflect.ExtensionTypeDescriptor { + if atomic.LoadUint32(&xi.init) < extensionInfoDescInit { + xi.lazyInitSlow() + } + return &xi.desc +} + +func (xi *ExtensionInfo) lazyInit() Converter { + if atomic.LoadUint32(&xi.init) < extensionInfoFullInit { + xi.lazyInitSlow() + } + return xi.conv +} + +func (xi *ExtensionInfo) lazyInitSlow() { + xi.mu.Lock() + defer xi.mu.Unlock() + + if xi.init == extensionInfoFullInit { + return + } + defer atomic.StoreUint32(&xi.init, extensionInfoFullInit) + + if xi.desc.ExtensionDescriptor == nil { + xi.initFromLegacy() + } + if !xi.desc.ExtensionDescriptor.IsPlaceholder() { + if xi.ExtensionType == nil { + xi.initToLegacy() + } + xi.conv = NewConverter(xi.goType, xi.desc.ExtensionDescriptor) + xi.info = makeExtensionFieldInfo(xi.desc.ExtensionDescriptor) + xi.info.validation = newValidationInfo(xi.desc.ExtensionDescriptor, xi.goType) + } +} + +type extensionTypeDescriptor struct { + protoreflect.ExtensionDescriptor + xi *ExtensionInfo +} + +func (xtd *extensionTypeDescriptor) Type() protoreflect.ExtensionType { + return xtd.xi +} +func (xtd *extensionTypeDescriptor) Descriptor() protoreflect.ExtensionDescriptor { + return xtd.ExtensionDescriptor +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go new file mode 100644 index 000000000..81b2b1a76 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go @@ -0,0 +1,219 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "strings" + "sync" + + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// legacyEnumName returns the name of enums used in legacy code. +// It is neither the protobuf full name nor the qualified Go name, +// but rather an odd hybrid of both. +func legacyEnumName(ed protoreflect.EnumDescriptor) string { + var protoPkg string + enumName := string(ed.FullName()) + if fd := ed.ParentFile(); fd != nil { + protoPkg = string(fd.Package()) + enumName = strings.TrimPrefix(enumName, protoPkg+".") + } + if protoPkg == "" { + return strs.GoCamelCase(enumName) + } + return protoPkg + "." + strs.GoCamelCase(enumName) +} + +// legacyWrapEnum wraps v as a protoreflect.Enum, +// where v must be a int32 kind and not implement the v2 API already. +func legacyWrapEnum(v reflect.Value) protoreflect.Enum { + et := legacyLoadEnumType(v.Type()) + return et.New(protoreflect.EnumNumber(v.Int())) +} + +var legacyEnumTypeCache sync.Map // map[reflect.Type]protoreflect.EnumType + +// legacyLoadEnumType dynamically loads a protoreflect.EnumType for t, +// where t must be an int32 kind and not implement the v2 API already. +func legacyLoadEnumType(t reflect.Type) protoreflect.EnumType { + // Fast-path: check if a EnumType is cached for this concrete type. + if et, ok := legacyEnumTypeCache.Load(t); ok { + return et.(protoreflect.EnumType) + } + + // Slow-path: derive enum descriptor and initialize EnumType. + var et protoreflect.EnumType + ed := LegacyLoadEnumDesc(t) + et = &legacyEnumType{ + desc: ed, + goType: t, + } + if et, ok := legacyEnumTypeCache.LoadOrStore(t, et); ok { + return et.(protoreflect.EnumType) + } + return et +} + +type legacyEnumType struct { + desc protoreflect.EnumDescriptor + goType reflect.Type + m sync.Map // map[protoreflect.EnumNumber]proto.Enum +} + +func (t *legacyEnumType) New(n protoreflect.EnumNumber) protoreflect.Enum { + if e, ok := t.m.Load(n); ok { + return e.(protoreflect.Enum) + } + e := &legacyEnumWrapper{num: n, pbTyp: t, goTyp: t.goType} + t.m.Store(n, e) + return e +} +func (t *legacyEnumType) Descriptor() protoreflect.EnumDescriptor { + return t.desc +} + +type legacyEnumWrapper struct { + num protoreflect.EnumNumber + pbTyp protoreflect.EnumType + goTyp reflect.Type +} + +func (e *legacyEnumWrapper) Descriptor() protoreflect.EnumDescriptor { + return e.pbTyp.Descriptor() +} +func (e *legacyEnumWrapper) Type() protoreflect.EnumType { + return e.pbTyp +} +func (e *legacyEnumWrapper) Number() protoreflect.EnumNumber { + return e.num +} +func (e *legacyEnumWrapper) ProtoReflect() protoreflect.Enum { + return e +} +func (e *legacyEnumWrapper) protoUnwrap() any { + v := reflect.New(e.goTyp).Elem() + v.SetInt(int64(e.num)) + return v.Interface() +} + +var ( + _ protoreflect.Enum = (*legacyEnumWrapper)(nil) + _ unwrapper = (*legacyEnumWrapper)(nil) +) + +var legacyEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor + +// LegacyLoadEnumDesc returns an EnumDescriptor derived from the Go type, +// which must be an int32 kind and not implement the v2 API already. +// +// This is exported for testing purposes. +func LegacyLoadEnumDesc(t reflect.Type) protoreflect.EnumDescriptor { + // Fast-path: check if an EnumDescriptor is cached for this concrete type. + if ed, ok := legacyEnumDescCache.Load(t); ok { + return ed.(protoreflect.EnumDescriptor) + } + + // Slow-path: initialize EnumDescriptor from the raw descriptor. + ev := reflect.Zero(t).Interface() + if _, ok := ev.(protoreflect.Enum); ok { + panic(fmt.Sprintf("%v already implements proto.Enum", t)) + } + edV1, ok := ev.(enumV1) + if !ok { + return aberrantLoadEnumDesc(t) + } + b, idxs := edV1.EnumDescriptor() + + var ed protoreflect.EnumDescriptor + if len(idxs) == 1 { + ed = legacyLoadFileDesc(b).Enums().Get(idxs[0]) + } else { + md := legacyLoadFileDesc(b).Messages().Get(idxs[0]) + for _, i := range idxs[1 : len(idxs)-1] { + md = md.Messages().Get(i) + } + ed = md.Enums().Get(idxs[len(idxs)-1]) + } + if ed, ok := legacyEnumDescCache.LoadOrStore(t, ed); ok { + return ed.(protoreflect.EnumDescriptor) + } + return ed +} + +var aberrantEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor + +// aberrantLoadEnumDesc returns an EnumDescriptor derived from the Go type, +// which must not implement protoreflect.Enum or enumV1. +// +// If the type does not implement enumV1, then there is no reliable +// way to derive the original protobuf type information. +// We are unable to use the global enum registry since it is +// unfortunately keyed by the protobuf full name, which we also do not know. +// Thus, this produces some bogus enum descriptor based on the Go type name. +func aberrantLoadEnumDesc(t reflect.Type) protoreflect.EnumDescriptor { + // Fast-path: check if an EnumDescriptor is cached for this concrete type. + if ed, ok := aberrantEnumDescCache.Load(t); ok { + return ed.(protoreflect.EnumDescriptor) + } + + // Slow-path: construct a bogus, but unique EnumDescriptor. + ed := &filedesc.Enum{L2: new(filedesc.EnumL2)} + ed.L0.FullName = AberrantDeriveFullName(t) // e.g., github_com.user.repo.MyEnum + ed.L0.ParentFile = filedesc.SurrogateProto3 + ed.L1.EditionFeatures = ed.L0.ParentFile.L1.EditionFeatures + ed.L2.Values.List = append(ed.L2.Values.List, filedesc.EnumValue{}) + + // TODO: Use the presence of a UnmarshalJSON method to determine proto2? + + vd := &ed.L2.Values.List[0] + vd.L0.FullName = ed.L0.FullName + "_UNKNOWN" // e.g., github_com.user.repo.MyEnum_UNKNOWN + vd.L0.ParentFile = ed.L0.ParentFile + vd.L0.Parent = ed + + // TODO: We could use the String method to obtain some enum value names by + // starting at 0 and print the enum until it produces invalid identifiers. + // An exhaustive query is clearly impractical, but can be best-effort. + + if ed, ok := aberrantEnumDescCache.LoadOrStore(t, ed); ok { + return ed.(protoreflect.EnumDescriptor) + } + return ed +} + +// AberrantDeriveFullName derives a fully qualified protobuf name for the given Go type +// The provided name is not guaranteed to be stable nor universally unique. +// It should be sufficiently unique within a program. +// +// This is exported for testing purposes. +func AberrantDeriveFullName(t reflect.Type) protoreflect.FullName { + sanitize := func(r rune) rune { + switch { + case r == '/': + return '.' + case 'a' <= r && r <= 'z', 'A' <= r && r <= 'Z', '0' <= r && r <= '9': + return r + default: + return '_' + } + } + prefix := strings.Map(sanitize, t.PkgPath()) + suffix := strings.Map(sanitize, t.Name()) + if suffix == "" { + suffix = fmt.Sprintf("UnknownX%X", reflect.ValueOf(t).Pointer()) + } + + ss := append(strings.Split(prefix, "."), suffix) + for i, s := range ss { + if s == "" || ('0' <= s[0] && s[0] <= '9') { + ss[i] = "x" + s + } + } + return protoreflect.FullName(strings.Join(ss, ".")) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go new file mode 100644 index 000000000..9b64ad5bb --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go @@ -0,0 +1,92 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "encoding/binary" + "encoding/json" + "hash/crc32" + "math" + "reflect" + + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +// These functions exist to support exported APIs in generated protobufs. +// While these are deprecated, they cannot be removed for compatibility reasons. + +// LegacyEnumName returns the name of enums used in legacy code. +func (Export) LegacyEnumName(ed protoreflect.EnumDescriptor) string { + return legacyEnumName(ed) +} + +// LegacyMessageTypeOf returns the protoreflect.MessageType for m, +// with name used as the message name if necessary. +func (Export) LegacyMessageTypeOf(m protoiface.MessageV1, name protoreflect.FullName) protoreflect.MessageType { + if mv := (Export{}).protoMessageV2Of(m); mv != nil { + return mv.ProtoReflect().Type() + } + return legacyLoadMessageType(reflect.TypeOf(m), name) +} + +// UnmarshalJSONEnum unmarshals an enum from a JSON-encoded input. +// The input can either be a string representing the enum value by name, +// or a number representing the enum number itself. +func (Export) UnmarshalJSONEnum(ed protoreflect.EnumDescriptor, b []byte) (protoreflect.EnumNumber, error) { + if b[0] == '"' { + var name protoreflect.Name + if err := json.Unmarshal(b, &name); err != nil { + return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b) + } + ev := ed.Values().ByName(name) + if ev == nil { + return 0, errors.New("invalid value for enum %v: %s", ed.FullName(), name) + } + return ev.Number(), nil + } else { + var num protoreflect.EnumNumber + if err := json.Unmarshal(b, &num); err != nil { + return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b) + } + return num, nil + } +} + +// CompressGZIP compresses the input as a GZIP-encoded file. +// The current implementation does no compression. +func (Export) CompressGZIP(in []byte) (out []byte) { + // RFC 1952, section 2.3.1. + var gzipHeader = [10]byte{0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff} + + // RFC 1951, section 3.2.4. + var blockHeader [5]byte + const maxBlockSize = math.MaxUint16 + numBlocks := 1 + len(in)/maxBlockSize + + // RFC 1952, section 2.3.1. + var gzipFooter [8]byte + binary.LittleEndian.PutUint32(gzipFooter[0:4], crc32.ChecksumIEEE(in)) + binary.LittleEndian.PutUint32(gzipFooter[4:8], uint32(len(in))) + + // Encode the input without compression using raw DEFLATE blocks. + out = make([]byte, 0, len(gzipHeader)+len(blockHeader)*numBlocks+len(in)+len(gzipFooter)) + out = append(out, gzipHeader[:]...) + for blockHeader[0] == 0 { + blockSize := maxBlockSize + if blockSize > len(in) { + blockHeader[0] = 0x01 // final bit per RFC 1951, section 3.2.3. + blockSize = len(in) + } + binary.LittleEndian.PutUint16(blockHeader[1:3], uint16(blockSize)) + binary.LittleEndian.PutUint16(blockHeader[3:5], ^uint16(blockSize)) + out = append(out, blockHeader[:]...) + out = append(out, in[:blockSize]...) + in = in[blockSize:] + } + out = append(out, gzipFooter[:]...) + return out +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go new file mode 100644 index 000000000..6e8677ee6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -0,0 +1,176 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "reflect" + + "google.golang.org/protobuf/internal/descopts" + "google.golang.org/protobuf/internal/encoding/messageset" + ptag "google.golang.org/protobuf/internal/encoding/tag" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" +) + +func (xi *ExtensionInfo) initToLegacy() { + xd := xi.desc + var parent protoiface.MessageV1 + messageName := xd.ContainingMessage().FullName() + if mt, _ := protoregistry.GlobalTypes.FindMessageByName(messageName); mt != nil { + // Create a new parent message and unwrap it if possible. + mv := mt.New().Interface() + t := reflect.TypeOf(mv) + if mv, ok := mv.(unwrapper); ok { + t = reflect.TypeOf(mv.protoUnwrap()) + } + + // Check whether the message implements the legacy v1 Message interface. + mz := reflect.Zero(t).Interface() + if mz, ok := mz.(protoiface.MessageV1); ok { + parent = mz + } + } + + // Determine the v1 extension type, which is unfortunately not the same as + // the v2 ExtensionType.GoType. + extType := xi.goType + switch extType.Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + extType = reflect.PtrTo(extType) // T -> *T for singular scalar fields + } + + // Reconstruct the legacy enum full name. + var enumName string + if xd.Kind() == protoreflect.EnumKind { + enumName = legacyEnumName(xd.Enum()) + } + + // Derive the proto file that the extension was declared within. + var filename string + if fd := xd.ParentFile(); fd != nil { + filename = fd.Path() + } + + // For MessageSet extensions, the name used is the parent message. + name := xd.FullName() + if messageset.IsMessageSetExtension(xd) { + name = name.Parent() + } + + xi.ExtendedType = parent + xi.ExtensionType = reflect.Zero(extType).Interface() + xi.Field = int32(xd.Number()) + xi.Name = string(name) + xi.Tag = ptag.Marshal(xd, enumName) + xi.Filename = filename +} + +// initFromLegacy initializes an ExtensionInfo from +// the contents of the deprecated exported fields of the type. +func (xi *ExtensionInfo) initFromLegacy() { + // The v1 API returns "type incomplete" descriptors where only the + // field number is specified. In such a case, use a placeholder. + if xi.ExtendedType == nil || xi.ExtensionType == nil { + xd := placeholderExtension{ + name: protoreflect.FullName(xi.Name), + number: protoreflect.FieldNumber(xi.Field), + } + xi.desc = extensionTypeDescriptor{xd, xi} + return + } + + // Resolve enum or message dependencies. + var ed protoreflect.EnumDescriptor + var md protoreflect.MessageDescriptor + t := reflect.TypeOf(xi.ExtensionType) + isOptional := t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct + isRepeated := t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 + if isOptional || isRepeated { + t = t.Elem() + } + switch v := reflect.Zero(t).Interface().(type) { + case protoreflect.Enum: + ed = v.Descriptor() + case enumV1: + ed = LegacyLoadEnumDesc(t) + case protoreflect.ProtoMessage: + md = v.ProtoReflect().Descriptor() + case messageV1: + md = LegacyLoadMessageDesc(t) + } + + // Derive basic field information from the struct tag. + var evs protoreflect.EnumValueDescriptors + if ed != nil { + evs = ed.Values() + } + fd := ptag.Unmarshal(xi.Tag, t, evs).(*filedesc.Field) + + // Construct a v2 ExtensionType. + xd := &filedesc.Extension{L2: new(filedesc.ExtensionL2)} + xd.L0.ParentFile = filedesc.SurrogateProto2 + xd.L0.FullName = protoreflect.FullName(xi.Name) + xd.L1.Number = protoreflect.FieldNumber(xi.Field) + xd.L1.Cardinality = fd.L1.Cardinality + xd.L1.Kind = fd.L1.Kind + xd.L1.EditionFeatures = fd.L1.EditionFeatures + xd.L2.Default = fd.L1.Default + xd.L1.Extendee = Export{}.MessageDescriptorOf(xi.ExtendedType) + xd.L2.Enum = ed + xd.L2.Message = md + + // Derive real extension field name for MessageSets. + if messageset.IsMessageSet(xd.L1.Extendee) && md.FullName() == xd.L0.FullName { + xd.L0.FullName = xd.L0.FullName.Append(messageset.ExtensionName) + } + + tt := reflect.TypeOf(xi.ExtensionType) + if isOptional { + tt = tt.Elem() + } + xi.goType = tt + xi.desc = extensionTypeDescriptor{xd, xi} +} + +type placeholderExtension struct { + name protoreflect.FullName + number protoreflect.FieldNumber +} + +func (x placeholderExtension) ParentFile() protoreflect.FileDescriptor { return nil } +func (x placeholderExtension) Parent() protoreflect.Descriptor { return nil } +func (x placeholderExtension) Index() int { return 0 } +func (x placeholderExtension) Syntax() protoreflect.Syntax { return 0 } +func (x placeholderExtension) Name() protoreflect.Name { return x.name.Name() } +func (x placeholderExtension) FullName() protoreflect.FullName { return x.name } +func (x placeholderExtension) IsPlaceholder() bool { return true } +func (x placeholderExtension) Options() protoreflect.ProtoMessage { return descopts.Field } +func (x placeholderExtension) Number() protoreflect.FieldNumber { return x.number } +func (x placeholderExtension) Cardinality() protoreflect.Cardinality { return 0 } +func (x placeholderExtension) Kind() protoreflect.Kind { return 0 } +func (x placeholderExtension) HasJSONName() bool { return false } +func (x placeholderExtension) JSONName() string { return "[" + string(x.name) + "]" } +func (x placeholderExtension) TextName() string { return "[" + string(x.name) + "]" } +func (x placeholderExtension) HasPresence() bool { return false } +func (x placeholderExtension) HasOptionalKeyword() bool { return false } +func (x placeholderExtension) IsExtension() bool { return true } +func (x placeholderExtension) IsWeak() bool { return false } +func (x placeholderExtension) IsPacked() bool { return false } +func (x placeholderExtension) IsList() bool { return false } +func (x placeholderExtension) IsMap() bool { return false } +func (x placeholderExtension) MapKey() protoreflect.FieldDescriptor { return nil } +func (x placeholderExtension) MapValue() protoreflect.FieldDescriptor { return nil } +func (x placeholderExtension) HasDefault() bool { return false } +func (x placeholderExtension) Default() protoreflect.Value { return protoreflect.Value{} } +func (x placeholderExtension) DefaultEnumValue() protoreflect.EnumValueDescriptor { return nil } +func (x placeholderExtension) ContainingOneof() protoreflect.OneofDescriptor { return nil } +func (x placeholderExtension) ContainingMessage() protoreflect.MessageDescriptor { return nil } +func (x placeholderExtension) Enum() protoreflect.EnumDescriptor { return nil } +func (x placeholderExtension) Message() protoreflect.MessageDescriptor { return nil } +func (x placeholderExtension) ProtoType(protoreflect.FieldDescriptor) { return } +func (x placeholderExtension) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go new file mode 100644 index 000000000..b649f1124 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go @@ -0,0 +1,81 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "bytes" + "compress/gzip" + "io" + "sync" + + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// Every enum and message type generated by protoc-gen-go since commit 2fc053c5 +// on February 25th, 2016 has had a method to get the raw descriptor. +// Types that were not generated by protoc-gen-go or were generated prior +// to that version are not supported. +// +// The []byte returned is the encoded form of a FileDescriptorProto message +// compressed using GZIP. The []int is the path from the top-level file +// to the specific message or enum declaration. +type ( + enumV1 interface { + EnumDescriptor() ([]byte, []int) + } + messageV1 interface { + Descriptor() ([]byte, []int) + } +) + +var legacyFileDescCache sync.Map // map[*byte]protoreflect.FileDescriptor + +// legacyLoadFileDesc unmarshals b as a compressed FileDescriptorProto message. +// +// This assumes that b is immutable and that b does not refer to part of a +// concatenated series of GZIP files (which would require shenanigans that +// rely on the concatenation properties of both protobufs and GZIP). +// File descriptors generated by protoc-gen-go do not rely on that property. +func legacyLoadFileDesc(b []byte) protoreflect.FileDescriptor { + // Fast-path: check whether we already have a cached file descriptor. + if fd, ok := legacyFileDescCache.Load(&b[0]); ok { + return fd.(protoreflect.FileDescriptor) + } + + // Slow-path: decompress and unmarshal the file descriptor proto. + zr, err := gzip.NewReader(bytes.NewReader(b)) + if err != nil { + panic(err) + } + b2, err := io.ReadAll(zr) + if err != nil { + panic(err) + } + + fd := filedesc.Builder{ + RawDescriptor: b2, + FileRegistry: resolverOnly{protoregistry.GlobalFiles}, // do not register back to global registry + }.Build().File + if fd, ok := legacyFileDescCache.LoadOrStore(&b[0], fd); ok { + return fd.(protoreflect.FileDescriptor) + } + return fd +} + +type resolverOnly struct { + reg *protoregistry.Files +} + +func (r resolverOnly) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { + return r.reg.FindFileByPath(path) +} +func (r resolverOnly) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { + return r.reg.FindDescriptorByName(name) +} +func (resolverOnly) RegisterFile(protoreflect.FileDescriptor) error { + return nil +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go new file mode 100644 index 000000000..bf0b6049b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -0,0 +1,572 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "strings" + "sync" + + "google.golang.org/protobuf/internal/descopts" + ptag "google.golang.org/protobuf/internal/encoding/tag" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +// legacyWrapMessage wraps v as a protoreflect.Message, +// where v must be a *struct kind and not implement the v2 API already. +func legacyWrapMessage(v reflect.Value) protoreflect.Message { + t := v.Type() + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { + return aberrantMessage{v: v} + } + mt := legacyLoadMessageInfo(t, "") + return mt.MessageOf(v.Interface()) +} + +// legacyLoadMessageType dynamically loads a protoreflect.Type for t, +// where t must be not implement the v2 API already. +// The provided name is used if it cannot be determined from the message. +func legacyLoadMessageType(t reflect.Type, name protoreflect.FullName) protoreflect.MessageType { + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { + return aberrantMessageType{t} + } + return legacyLoadMessageInfo(t, name) +} + +var legacyMessageTypeCache sync.Map // map[reflect.Type]*MessageInfo + +// legacyLoadMessageInfo dynamically loads a *MessageInfo for t, +// where t must be a *struct kind and not implement the v2 API already. +// The provided name is used if it cannot be determined from the message. +func legacyLoadMessageInfo(t reflect.Type, name protoreflect.FullName) *MessageInfo { + // Fast-path: check if a MessageInfo is cached for this concrete type. + if mt, ok := legacyMessageTypeCache.Load(t); ok { + return mt.(*MessageInfo) + } + + // Slow-path: derive message descriptor and initialize MessageInfo. + mi := &MessageInfo{ + Desc: legacyLoadMessageDesc(t, name), + GoReflectType: t, + } + + var hasMarshal, hasUnmarshal bool + v := reflect.Zero(t).Interface() + if _, hasMarshal = v.(legacyMarshaler); hasMarshal { + mi.methods.Marshal = legacyMarshal + + // We have no way to tell whether the type's Marshal method + // supports deterministic serialization or not, but this + // preserves the v1 implementation's behavior of always + // calling Marshal methods when present. + mi.methods.Flags |= protoiface.SupportMarshalDeterministic + } + if _, hasUnmarshal = v.(legacyUnmarshaler); hasUnmarshal { + mi.methods.Unmarshal = legacyUnmarshal + } + if _, hasMerge := v.(legacyMerger); hasMerge || (hasMarshal && hasUnmarshal) { + mi.methods.Merge = legacyMerge + } + + if mi, ok := legacyMessageTypeCache.LoadOrStore(t, mi); ok { + return mi.(*MessageInfo) + } + return mi +} + +var legacyMessageDescCache sync.Map // map[reflect.Type]protoreflect.MessageDescriptor + +// LegacyLoadMessageDesc returns an MessageDescriptor derived from the Go type, +// which should be a *struct kind and must not implement the v2 API already. +// +// This is exported for testing purposes. +func LegacyLoadMessageDesc(t reflect.Type) protoreflect.MessageDescriptor { + return legacyLoadMessageDesc(t, "") +} +func legacyLoadMessageDesc(t reflect.Type, name protoreflect.FullName) protoreflect.MessageDescriptor { + // Fast-path: check if a MessageDescriptor is cached for this concrete type. + if mi, ok := legacyMessageDescCache.Load(t); ok { + return mi.(protoreflect.MessageDescriptor) + } + + // Slow-path: initialize MessageDescriptor from the raw descriptor. + mv := reflect.Zero(t).Interface() + if _, ok := mv.(protoreflect.ProtoMessage); ok { + panic(fmt.Sprintf("%v already implements proto.Message", t)) + } + mdV1, ok := mv.(messageV1) + if !ok { + return aberrantLoadMessageDesc(t, name) + } + + // If this is a dynamic message type where there isn't a 1-1 mapping between + // Go and protobuf types, calling the Descriptor method on the zero value of + // the message type isn't likely to work. If it panics, swallow the panic and + // continue as if the Descriptor method wasn't present. + b, idxs := func() ([]byte, []int) { + defer func() { + recover() + }() + return mdV1.Descriptor() + }() + if b == nil { + return aberrantLoadMessageDesc(t, name) + } + + // If the Go type has no fields, then this might be a proto3 empty message + // from before the size cache was added. If there are any fields, check to + // see that at least one of them looks like something we generated. + if t.Elem().Kind() == reflect.Struct { + if nfield := t.Elem().NumField(); nfield > 0 { + hasProtoField := false + for i := 0; i < nfield; i++ { + f := t.Elem().Field(i) + if f.Tag.Get("protobuf") != "" || f.Tag.Get("protobuf_oneof") != "" || strings.HasPrefix(f.Name, "XXX_") { + hasProtoField = true + break + } + } + if !hasProtoField { + return aberrantLoadMessageDesc(t, name) + } + } + } + + md := legacyLoadFileDesc(b).Messages().Get(idxs[0]) + for _, i := range idxs[1:] { + md = md.Messages().Get(i) + } + if name != "" && md.FullName() != name { + panic(fmt.Sprintf("mismatching message name: got %v, want %v", md.FullName(), name)) + } + if md, ok := legacyMessageDescCache.LoadOrStore(t, md); ok { + return md.(protoreflect.MessageDescriptor) + } + return md +} + +var ( + aberrantMessageDescLock sync.Mutex + aberrantMessageDescCache map[reflect.Type]protoreflect.MessageDescriptor +) + +// aberrantLoadMessageDesc returns an MessageDescriptor derived from the Go type, +// which must not implement protoreflect.ProtoMessage or messageV1. +// +// This is a best-effort derivation of the message descriptor using the protobuf +// tags on the struct fields. +func aberrantLoadMessageDesc(t reflect.Type, name protoreflect.FullName) protoreflect.MessageDescriptor { + aberrantMessageDescLock.Lock() + defer aberrantMessageDescLock.Unlock() + if aberrantMessageDescCache == nil { + aberrantMessageDescCache = make(map[reflect.Type]protoreflect.MessageDescriptor) + } + return aberrantLoadMessageDescReentrant(t, name) +} +func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName) protoreflect.MessageDescriptor { + // Fast-path: check if an MessageDescriptor is cached for this concrete type. + if md, ok := aberrantMessageDescCache[t]; ok { + return md + } + + // Slow-path: construct a descriptor from the Go struct type (best-effort). + // Cache the MessageDescriptor early on so that we can resolve internal + // cyclic references. + md := &filedesc.Message{L2: new(filedesc.MessageL2)} + md.L0.FullName = aberrantDeriveMessageName(t, name) + md.L0.ParentFile = filedesc.SurrogateProto2 + aberrantMessageDescCache[t] = md + + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { + return md + } + + // Try to determine if the message is using proto3 by checking scalars. + for i := 0; i < t.Elem().NumField(); i++ { + f := t.Elem().Field(i) + if tag := f.Tag.Get("protobuf"); tag != "" { + switch f.Type.Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + md.L0.ParentFile = filedesc.SurrogateProto3 + } + for _, s := range strings.Split(tag, ",") { + if s == "proto3" { + md.L0.ParentFile = filedesc.SurrogateProto3 + } + } + } + } + + md.L1.EditionFeatures = md.L0.ParentFile.L1.EditionFeatures + // Obtain a list of oneof wrapper types. + var oneofWrappers []reflect.Type + methods := make([]reflect.Method, 0, 2) + if m, ok := t.MethodByName("XXX_OneofFuncs"); ok { + methods = append(methods, m) + } + if m, ok := t.MethodByName("XXX_OneofWrappers"); ok { + methods = append(methods, m) + } + for _, fn := range methods { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]any); ok { + for _, v := range vs { + oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) + } + } + } + } + + // Obtain a list of the extension ranges. + if fn, ok := t.MethodByName("ExtensionRangeArray"); ok { + vs := fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0] + for i := 0; i < vs.Len(); i++ { + v := vs.Index(i) + md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, [2]protoreflect.FieldNumber{ + protoreflect.FieldNumber(v.FieldByName("Start").Int()), + protoreflect.FieldNumber(v.FieldByName("End").Int() + 1), + }) + md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, nil) + } + } + + // Derive the message fields by inspecting the struct fields. + for i := 0; i < t.Elem().NumField(); i++ { + f := t.Elem().Field(i) + if tag := f.Tag.Get("protobuf"); tag != "" { + tagKey := f.Tag.Get("protobuf_key") + tagVal := f.Tag.Get("protobuf_val") + aberrantAppendField(md, f.Type, tag, tagKey, tagVal) + } + if tag := f.Tag.Get("protobuf_oneof"); tag != "" { + n := len(md.L2.Oneofs.List) + md.L2.Oneofs.List = append(md.L2.Oneofs.List, filedesc.Oneof{}) + od := &md.L2.Oneofs.List[n] + od.L0.FullName = md.FullName().Append(protoreflect.Name(tag)) + od.L0.ParentFile = md.L0.ParentFile + od.L1.EditionFeatures = md.L1.EditionFeatures + od.L0.Parent = md + od.L0.Index = n + + for _, t := range oneofWrappers { + if t.Implements(f.Type) { + f := t.Elem().Field(0) + if tag := f.Tag.Get("protobuf"); tag != "" { + aberrantAppendField(md, f.Type, tag, "", "") + fd := &md.L2.Fields.List[len(md.L2.Fields.List)-1] + fd.L1.ContainingOneof = od + fd.L1.EditionFeatures = od.L1.EditionFeatures + od.L1.Fields.List = append(od.L1.Fields.List, fd) + } + } + } + } + } + + return md +} + +func aberrantDeriveMessageName(t reflect.Type, name protoreflect.FullName) protoreflect.FullName { + if name.IsValid() { + return name + } + func() { + defer func() { recover() }() // swallow possible nil panics + if m, ok := reflect.Zero(t).Interface().(interface{ XXX_MessageName() string }); ok { + name = protoreflect.FullName(m.XXX_MessageName()) + } + }() + if name.IsValid() { + return name + } + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + return AberrantDeriveFullName(t) +} + +func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, tagVal string) { + t := goType + isOptional := t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct + isRepeated := t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 + if isOptional || isRepeated { + t = t.Elem() + } + fd := ptag.Unmarshal(tag, t, placeholderEnumValues{}).(*filedesc.Field) + + // Append field descriptor to the message. + n := len(md.L2.Fields.List) + md.L2.Fields.List = append(md.L2.Fields.List, *fd) + fd = &md.L2.Fields.List[n] + fd.L0.FullName = md.FullName().Append(fd.Name()) + fd.L0.ParentFile = md.L0.ParentFile + fd.L0.Parent = md + fd.L0.Index = n + + if fd.L1.IsWeak || fd.L1.EditionFeatures.IsPacked { + fd.L1.Options = func() protoreflect.ProtoMessage { + opts := descopts.Field.ProtoReflect().New() + if fd.L1.IsWeak { + opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true)) + } + if fd.L1.EditionFeatures.IsPacked { + opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.EditionFeatures.IsPacked)) + } + return opts.Interface() + } + } + + // Populate Enum and Message. + if fd.Enum() == nil && fd.Kind() == protoreflect.EnumKind { + switch v := reflect.Zero(t).Interface().(type) { + case protoreflect.Enum: + fd.L1.Enum = v.Descriptor() + default: + fd.L1.Enum = LegacyLoadEnumDesc(t) + } + } + if fd.Message() == nil && (fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind) { + switch v := reflect.Zero(t).Interface().(type) { + case protoreflect.ProtoMessage: + fd.L1.Message = v.ProtoReflect().Descriptor() + case messageV1: + fd.L1.Message = LegacyLoadMessageDesc(t) + default: + if t.Kind() == reflect.Map { + n := len(md.L1.Messages.List) + md.L1.Messages.List = append(md.L1.Messages.List, filedesc.Message{L2: new(filedesc.MessageL2)}) + md2 := &md.L1.Messages.List[n] + md2.L0.FullName = md.FullName().Append(protoreflect.Name(strs.MapEntryName(string(fd.Name())))) + md2.L0.ParentFile = md.L0.ParentFile + md2.L0.Parent = md + md2.L0.Index = n + md2.L1.EditionFeatures = md.L1.EditionFeatures + + md2.L1.IsMapEntry = true + md2.L2.Options = func() protoreflect.ProtoMessage { + opts := descopts.Message.ProtoReflect().New() + opts.Set(opts.Descriptor().Fields().ByName("map_entry"), protoreflect.ValueOfBool(true)) + return opts.Interface() + } + + aberrantAppendField(md2, t.Key(), tagKey, "", "") + aberrantAppendField(md2, t.Elem(), tagVal, "", "") + + fd.L1.Message = md2 + break + } + fd.L1.Message = aberrantLoadMessageDescReentrant(t, "") + } + } +} + +type placeholderEnumValues struct { + protoreflect.EnumValueDescriptors +} + +func (placeholderEnumValues) ByNumber(n protoreflect.EnumNumber) protoreflect.EnumValueDescriptor { + return filedesc.PlaceholderEnumValue(protoreflect.FullName(fmt.Sprintf("UNKNOWN_%d", n))) +} + +// legacyMarshaler is the proto.Marshaler interface superseded by protoiface.Methoder. +type legacyMarshaler interface { + Marshal() ([]byte, error) +} + +// legacyUnmarshaler is the proto.Unmarshaler interface superseded by protoiface.Methoder. +type legacyUnmarshaler interface { + Unmarshal([]byte) error +} + +// legacyMerger is the proto.Merger interface superseded by protoiface.Methoder. +type legacyMerger interface { + Merge(protoiface.MessageV1) +} + +var aberrantProtoMethods = &protoiface.Methods{ + Marshal: legacyMarshal, + Unmarshal: legacyUnmarshal, + Merge: legacyMerge, + + // We have no way to tell whether the type's Marshal method + // supports deterministic serialization or not, but this + // preserves the v1 implementation's behavior of always + // calling Marshal methods when present. + Flags: protoiface.SupportMarshalDeterministic, +} + +func legacyMarshal(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) { + v := in.Message.(unwrapper).protoUnwrap() + marshaler, ok := v.(legacyMarshaler) + if !ok { + return protoiface.MarshalOutput{}, errors.New("%T does not implement Marshal", v) + } + out, err := marshaler.Marshal() + if in.Buf != nil { + out = append(in.Buf, out...) + } + return protoiface.MarshalOutput{ + Buf: out, + }, err +} + +func legacyUnmarshal(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + v := in.Message.(unwrapper).protoUnwrap() + unmarshaler, ok := v.(legacyUnmarshaler) + if !ok { + return protoiface.UnmarshalOutput{}, errors.New("%T does not implement Unmarshal", v) + } + return protoiface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf) +} + +func legacyMerge(in protoiface.MergeInput) protoiface.MergeOutput { + // Check whether this supports the legacy merger. + dstv := in.Destination.(unwrapper).protoUnwrap() + merger, ok := dstv.(legacyMerger) + if ok { + merger.Merge(Export{}.ProtoMessageV1Of(in.Source)) + return protoiface.MergeOutput{Flags: protoiface.MergeComplete} + } + + // If legacy merger is unavailable, implement merge in terms of + // a marshal and unmarshal operation. + srcv := in.Source.(unwrapper).protoUnwrap() + marshaler, ok := srcv.(legacyMarshaler) + if !ok { + return protoiface.MergeOutput{} + } + dstv = in.Destination.(unwrapper).protoUnwrap() + unmarshaler, ok := dstv.(legacyUnmarshaler) + if !ok { + return protoiface.MergeOutput{} + } + if !in.Source.IsValid() { + // Legacy Marshal methods may not function on nil messages. + // Check for a typed nil source only after we confirm that + // legacy Marshal/Unmarshal methods are present, for + // consistency. + return protoiface.MergeOutput{Flags: protoiface.MergeComplete} + } + b, err := marshaler.Marshal() + if err != nil { + return protoiface.MergeOutput{} + } + err = unmarshaler.Unmarshal(b) + if err != nil { + return protoiface.MergeOutput{} + } + return protoiface.MergeOutput{Flags: protoiface.MergeComplete} +} + +// aberrantMessageType implements MessageType for all types other than pointer-to-struct. +type aberrantMessageType struct { + t reflect.Type +} + +func (mt aberrantMessageType) New() protoreflect.Message { + if mt.t.Kind() == reflect.Ptr { + return aberrantMessage{reflect.New(mt.t.Elem())} + } + return aberrantMessage{reflect.Zero(mt.t)} +} +func (mt aberrantMessageType) Zero() protoreflect.Message { + return aberrantMessage{reflect.Zero(mt.t)} +} +func (mt aberrantMessageType) GoType() reflect.Type { + return mt.t +} +func (mt aberrantMessageType) Descriptor() protoreflect.MessageDescriptor { + return LegacyLoadMessageDesc(mt.t) +} + +// aberrantMessage implements Message for all types other than pointer-to-struct. +// +// When the underlying type implements legacyMarshaler or legacyUnmarshaler, +// the aberrant Message can be marshaled or unmarshaled. Otherwise, there is +// not much that can be done with values of this type. +type aberrantMessage struct { + v reflect.Value +} + +// Reset implements the v1 proto.Message.Reset method. +func (m aberrantMessage) Reset() { + if mr, ok := m.v.Interface().(interface{ Reset() }); ok { + mr.Reset() + return + } + if m.v.Kind() == reflect.Ptr && !m.v.IsNil() { + m.v.Elem().Set(reflect.Zero(m.v.Type().Elem())) + } +} + +func (m aberrantMessage) ProtoReflect() protoreflect.Message { + return m +} + +func (m aberrantMessage) Descriptor() protoreflect.MessageDescriptor { + return LegacyLoadMessageDesc(m.v.Type()) +} +func (m aberrantMessage) Type() protoreflect.MessageType { + return aberrantMessageType{m.v.Type()} +} +func (m aberrantMessage) New() protoreflect.Message { + if m.v.Type().Kind() == reflect.Ptr { + return aberrantMessage{reflect.New(m.v.Type().Elem())} + } + return aberrantMessage{reflect.Zero(m.v.Type())} +} +func (m aberrantMessage) Interface() protoreflect.ProtoMessage { + return m +} +func (m aberrantMessage) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + return +} +func (m aberrantMessage) Has(protoreflect.FieldDescriptor) bool { + return false +} +func (m aberrantMessage) Clear(protoreflect.FieldDescriptor) { + panic("invalid Message.Clear on " + string(m.Descriptor().FullName())) +} +func (m aberrantMessage) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { + if fd.Default().IsValid() { + return fd.Default() + } + panic("invalid Message.Get on " + string(m.Descriptor().FullName())) +} +func (m aberrantMessage) Set(protoreflect.FieldDescriptor, protoreflect.Value) { + panic("invalid Message.Set on " + string(m.Descriptor().FullName())) +} +func (m aberrantMessage) Mutable(protoreflect.FieldDescriptor) protoreflect.Value { + panic("invalid Message.Mutable on " + string(m.Descriptor().FullName())) +} +func (m aberrantMessage) NewField(protoreflect.FieldDescriptor) protoreflect.Value { + panic("invalid Message.NewField on " + string(m.Descriptor().FullName())) +} +func (m aberrantMessage) WhichOneof(protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + panic("invalid Message.WhichOneof descriptor on " + string(m.Descriptor().FullName())) +} +func (m aberrantMessage) GetUnknown() protoreflect.RawFields { + return nil +} +func (m aberrantMessage) SetUnknown(protoreflect.RawFields) { + // SetUnknown discards its input on messages which don't support unknown field storage. +} +func (m aberrantMessage) IsValid() bool { + if m.v.Kind() == reflect.Ptr { + return !m.v.IsNil() + } + return false +} +func (m aberrantMessage) ProtoMethods() *protoiface.Methods { + return aberrantProtoMethods +} +func (m aberrantMessage) protoUnwrap() any { + return m.v.Interface() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go new file mode 100644 index 000000000..7e65f64f2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go @@ -0,0 +1,176 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +type mergeOptions struct{} + +func (o mergeOptions) Merge(dst, src proto.Message) { + proto.Merge(dst, src) +} + +// merge is protoreflect.Methods.Merge. +func (mi *MessageInfo) merge(in protoiface.MergeInput) protoiface.MergeOutput { + dp, ok := mi.getPointer(in.Destination) + if !ok { + return protoiface.MergeOutput{} + } + sp, ok := mi.getPointer(in.Source) + if !ok { + return protoiface.MergeOutput{} + } + mi.mergePointer(dp, sp, mergeOptions{}) + return protoiface.MergeOutput{Flags: protoiface.MergeComplete} +} + +func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { + mi.init() + if dst.IsNil() { + panic(fmt.Sprintf("invalid value: merging into nil message")) + } + if src.IsNil() { + return + } + for _, f := range mi.orderedCoderFields { + if f.funcs.merge == nil { + continue + } + sfptr := src.Apply(f.offset) + if f.isPointer && sfptr.Elem().IsNil() { + continue + } + f.funcs.merge(dst.Apply(f.offset), sfptr, f, opts) + } + if mi.extensionOffset.IsValid() { + sext := src.Apply(mi.extensionOffset).Extensions() + dext := dst.Apply(mi.extensionOffset).Extensions() + if *dext == nil { + *dext = make(map[int32]ExtensionField) + } + for num, sx := range *sext { + xt := sx.Type() + xi := getExtensionFieldInfo(xt) + if xi.funcs.merge == nil { + continue + } + dx := (*dext)[num] + var dv protoreflect.Value + if dx.Type() == sx.Type() { + dv = dx.Value() + } + if !dv.IsValid() && xi.unmarshalNeedsValue { + dv = xt.New() + } + dv = xi.funcs.merge(dv, sx.Value(), opts) + dx.Set(sx.Type(), dv) + (*dext)[num] = dx + } + } + if mi.unknownOffset.IsValid() { + su := mi.getUnknownBytes(src) + if su != nil && len(*su) > 0 { + du := mi.mutableUnknownBytes(dst) + *du = append(*du, *su...) + } + } +} + +func mergeScalarValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { + return src +} + +func mergeBytesValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { + return protoreflect.ValueOfBytes(append(emptyBuf[:], src.Bytes()...)) +} + +func mergeListValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { + dstl := dst.List() + srcl := src.List() + for i, llen := 0, srcl.Len(); i < llen; i++ { + dstl.Append(srcl.Get(i)) + } + return dst +} + +func mergeBytesListValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { + dstl := dst.List() + srcl := src.List() + for i, llen := 0, srcl.Len(); i < llen; i++ { + sb := srcl.Get(i).Bytes() + db := append(emptyBuf[:], sb...) + dstl.Append(protoreflect.ValueOfBytes(db)) + } + return dst +} + +func mergeMessageListValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { + dstl := dst.List() + srcl := src.List() + for i, llen := 0, srcl.Len(); i < llen; i++ { + sm := srcl.Get(i).Message() + dm := proto.Clone(sm.Interface()).ProtoReflect() + dstl.Append(protoreflect.ValueOfMessage(dm)) + } + return dst +} + +func mergeMessageValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { + opts.Merge(dst.Message().Interface(), src.Message().Interface()) + return dst +} + +func mergeMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + if f.mi != nil { + if dst.Elem().IsNil() { + dst.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + f.mi.mergePointer(dst.Elem(), src.Elem(), opts) + } else { + dm := dst.AsValueOf(f.ft).Elem() + sm := src.AsValueOf(f.ft).Elem() + if dm.IsNil() { + dm.Set(reflect.New(f.ft.Elem())) + } + opts.Merge(asMessage(dm), asMessage(sm)) + } +} + +func mergeMessageSlice(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + for _, sp := range src.PointerSlice() { + dm := reflect.New(f.ft.Elem().Elem()) + if f.mi != nil { + f.mi.mergePointer(pointerOfValue(dm), sp, opts) + } else { + opts.Merge(asMessage(dm), asMessage(sp.AsValueOf(f.ft.Elem().Elem()))) + } + dst.AppendPointerSlice(pointerOfValue(dm)) + } +} + +func mergeBytes(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Bytes() = append(emptyBuf[:], *src.Bytes()...) +} + +func mergeBytesNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Bytes() + if len(v) > 0 { + *dst.Bytes() = append(emptyBuf[:], v...) + } +} + +func mergeBytesSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.BytesSlice() + for _, v := range *src.BytesSlice() { + *ds = append(*ds, append(emptyBuf[:], v...)) + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge_gen.go b/vendor/google.golang.org/protobuf/internal/impl/merge_gen.go new file mode 100644 index 000000000..8816c274d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/merge_gen.go @@ -0,0 +1,209 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package impl + +import () + +func mergeBool(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Bool() = *src.Bool() +} + +func mergeBoolNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Bool() + if v != false { + *dst.Bool() = v + } +} + +func mergeBoolPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.BoolPtr() + if p != nil { + v := *p + *dst.BoolPtr() = &v + } +} + +func mergeBoolSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.BoolSlice() + ss := src.BoolSlice() + *ds = append(*ds, *ss...) +} + +func mergeInt32(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Int32() = *src.Int32() +} + +func mergeInt32NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Int32() + if v != 0 { + *dst.Int32() = v + } +} + +func mergeInt32Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Int32Ptr() + if p != nil { + v := *p + *dst.Int32Ptr() = &v + } +} + +func mergeInt32Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Int32Slice() + ss := src.Int32Slice() + *ds = append(*ds, *ss...) +} + +func mergeUint32(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Uint32() = *src.Uint32() +} + +func mergeUint32NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Uint32() + if v != 0 { + *dst.Uint32() = v + } +} + +func mergeUint32Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Uint32Ptr() + if p != nil { + v := *p + *dst.Uint32Ptr() = &v + } +} + +func mergeUint32Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Uint32Slice() + ss := src.Uint32Slice() + *ds = append(*ds, *ss...) +} + +func mergeInt64(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Int64() = *src.Int64() +} + +func mergeInt64NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Int64() + if v != 0 { + *dst.Int64() = v + } +} + +func mergeInt64Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Int64Ptr() + if p != nil { + v := *p + *dst.Int64Ptr() = &v + } +} + +func mergeInt64Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Int64Slice() + ss := src.Int64Slice() + *ds = append(*ds, *ss...) +} + +func mergeUint64(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Uint64() = *src.Uint64() +} + +func mergeUint64NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Uint64() + if v != 0 { + *dst.Uint64() = v + } +} + +func mergeUint64Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Uint64Ptr() + if p != nil { + v := *p + *dst.Uint64Ptr() = &v + } +} + +func mergeUint64Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Uint64Slice() + ss := src.Uint64Slice() + *ds = append(*ds, *ss...) +} + +func mergeFloat32(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Float32() = *src.Float32() +} + +func mergeFloat32NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Float32() + if v != 0 { + *dst.Float32() = v + } +} + +func mergeFloat32Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Float32Ptr() + if p != nil { + v := *p + *dst.Float32Ptr() = &v + } +} + +func mergeFloat32Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Float32Slice() + ss := src.Float32Slice() + *ds = append(*ds, *ss...) +} + +func mergeFloat64(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Float64() = *src.Float64() +} + +func mergeFloat64NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Float64() + if v != 0 { + *dst.Float64() = v + } +} + +func mergeFloat64Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Float64Ptr() + if p != nil { + v := *p + *dst.Float64Ptr() = &v + } +} + +func mergeFloat64Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Float64Slice() + ss := src.Float64Slice() + *ds = append(*ds, *ss...) +} + +func mergeString(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.String() = *src.String() +} + +func mergeStringNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.String() + if v != "" { + *dst.String() = v + } +} + +func mergeStringPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.StringPtr() + if p != nil { + v := *p + *dst.StringPtr() = &v + } +} + +func mergeStringSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.StringSlice() + ss := src.StringSlice() + *ds = append(*ds, *ss...) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go new file mode 100644 index 000000000..019399d45 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -0,0 +1,284 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// MessageInfo provides protobuf related functionality for a given Go type +// that represents a message. A given instance of MessageInfo is tied to +// exactly one Go type, which must be a pointer to a struct type. +// +// The exported fields must be populated before any methods are called +// and cannot be mutated after set. +type MessageInfo struct { + // GoReflectType is the underlying message Go type and must be populated. + GoReflectType reflect.Type // pointer to struct + + // Desc is the underlying message descriptor type and must be populated. + Desc protoreflect.MessageDescriptor + + // Exporter must be provided in a purego environment in order to provide + // access to unexported fields. + Exporter exporter + + // OneofWrappers is list of pointers to oneof wrapper struct types. + OneofWrappers []any + + initMu sync.Mutex // protects all unexported fields + initDone uint32 + + reflectMessageInfo // for reflection implementation + coderMessageInfo // for fast-path method implementations +} + +// exporter is a function that returns a reference to the ith field of v, +// where v is a pointer to a struct. It returns nil if it does not support +// exporting the requested field (e.g., already exported). +type exporter func(v any, i int) any + +// getMessageInfo returns the MessageInfo for any message type that +// is generated by our implementation of protoc-gen-go (for v2 and on). +// If it is unable to obtain a MessageInfo, it returns nil. +func getMessageInfo(mt reflect.Type) *MessageInfo { + m, ok := reflect.Zero(mt).Interface().(protoreflect.ProtoMessage) + if !ok { + return nil + } + mr, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *MessageInfo }) + if !ok { + return nil + } + return mr.ProtoMessageInfo() +} + +func (mi *MessageInfo) init() { + // This function is called in the hot path. Inline the sync.Once logic, + // since allocating a closure for Once.Do is expensive. + // Keep init small to ensure that it can be inlined. + if atomic.LoadUint32(&mi.initDone) == 0 { + mi.initOnce() + } +} + +func (mi *MessageInfo) initOnce() { + mi.initMu.Lock() + defer mi.initMu.Unlock() + if mi.initDone == 1 { + return + } + + t := mi.GoReflectType + if t.Kind() != reflect.Ptr && t.Elem().Kind() != reflect.Struct { + panic(fmt.Sprintf("got %v, want *struct kind", t)) + } + t = t.Elem() + + si := mi.makeStructInfo(t) + mi.makeReflectFuncs(t, si) + mi.makeCoderMethods(t, si) + + atomic.StoreUint32(&mi.initDone, 1) +} + +// getPointer returns the pointer for a message, which should be of +// the type of the MessageInfo. If the message is of a different type, +// it returns ok==false. +func (mi *MessageInfo) getPointer(m protoreflect.Message) (p pointer, ok bool) { + switch m := m.(type) { + case *messageState: + return m.pointer(), m.messageInfo() == mi + case *messageReflectWrapper: + return m.pointer(), m.messageInfo() == mi + } + return pointer{}, false +} + +type ( + SizeCache = int32 + WeakFields = map[int32]protoreflect.ProtoMessage + UnknownFields = unknownFieldsA // TODO: switch to unknownFieldsB + unknownFieldsA = []byte + unknownFieldsB = *[]byte + ExtensionFields = map[int32]ExtensionField +) + +var ( + sizecacheType = reflect.TypeOf(SizeCache(0)) + weakFieldsType = reflect.TypeOf(WeakFields(nil)) + unknownFieldsAType = reflect.TypeOf(unknownFieldsA(nil)) + unknownFieldsBType = reflect.TypeOf(unknownFieldsB(nil)) + extensionFieldsType = reflect.TypeOf(ExtensionFields(nil)) +) + +type structInfo struct { + sizecacheOffset offset + sizecacheType reflect.Type + weakOffset offset + weakType reflect.Type + unknownOffset offset + unknownType reflect.Type + extensionOffset offset + extensionType reflect.Type + + fieldsByNumber map[protoreflect.FieldNumber]reflect.StructField + oneofsByName map[protoreflect.Name]reflect.StructField + oneofWrappersByType map[reflect.Type]protoreflect.FieldNumber + oneofWrappersByNumber map[protoreflect.FieldNumber]reflect.Type +} + +func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { + si := structInfo{ + sizecacheOffset: invalidOffset, + weakOffset: invalidOffset, + unknownOffset: invalidOffset, + extensionOffset: invalidOffset, + + fieldsByNumber: map[protoreflect.FieldNumber]reflect.StructField{}, + oneofsByName: map[protoreflect.Name]reflect.StructField{}, + oneofWrappersByType: map[reflect.Type]protoreflect.FieldNumber{}, + oneofWrappersByNumber: map[protoreflect.FieldNumber]reflect.Type{}, + } + +fieldLoop: + for i := 0; i < t.NumField(); i++ { + switch f := t.Field(i); f.Name { + case genid.SizeCache_goname, genid.SizeCacheA_goname: + if f.Type == sizecacheType { + si.sizecacheOffset = offsetOf(f, mi.Exporter) + si.sizecacheType = f.Type + } + case genid.WeakFields_goname, genid.WeakFieldsA_goname: + if f.Type == weakFieldsType { + si.weakOffset = offsetOf(f, mi.Exporter) + si.weakType = f.Type + } + case genid.UnknownFields_goname, genid.UnknownFieldsA_goname: + if f.Type == unknownFieldsAType || f.Type == unknownFieldsBType { + si.unknownOffset = offsetOf(f, mi.Exporter) + si.unknownType = f.Type + } + case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname: + if f.Type == extensionFieldsType { + si.extensionOffset = offsetOf(f, mi.Exporter) + si.extensionType = f.Type + } + default: + for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { + if len(s) > 0 && strings.Trim(s, "0123456789") == "" { + n, _ := strconv.ParseUint(s, 10, 64) + si.fieldsByNumber[protoreflect.FieldNumber(n)] = f + continue fieldLoop + } + } + if s := f.Tag.Get("protobuf_oneof"); len(s) > 0 { + si.oneofsByName[protoreflect.Name(s)] = f + continue fieldLoop + } + } + } + + // Derive a mapping of oneof wrappers to fields. + oneofWrappers := mi.OneofWrappers + methods := make([]reflect.Method, 0, 2) + if m, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok { + methods = append(methods, m) + } + if m, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok { + methods = append(methods, m) + } + for _, fn := range methods { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]any); ok { + oneofWrappers = vs + } + } + } + for _, v := range oneofWrappers { + tf := reflect.TypeOf(v).Elem() + f := tf.Field(0) + for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { + if len(s) > 0 && strings.Trim(s, "0123456789") == "" { + n, _ := strconv.ParseUint(s, 10, 64) + si.oneofWrappersByType[tf] = protoreflect.FieldNumber(n) + si.oneofWrappersByNumber[protoreflect.FieldNumber(n)] = tf + break + } + } + } + + return si +} + +func (mi *MessageInfo) New() protoreflect.Message { + m := reflect.New(mi.GoReflectType.Elem()).Interface() + if r, ok := m.(protoreflect.ProtoMessage); ok { + return r.ProtoReflect() + } + return mi.MessageOf(m) +} +func (mi *MessageInfo) Zero() protoreflect.Message { + return mi.MessageOf(reflect.Zero(mi.GoReflectType).Interface()) +} +func (mi *MessageInfo) Descriptor() protoreflect.MessageDescriptor { + return mi.Desc +} +func (mi *MessageInfo) Enum(i int) protoreflect.EnumType { + mi.init() + fd := mi.Desc.Fields().Get(i) + return Export{}.EnumTypeOf(mi.fieldTypes[fd.Number()]) +} +func (mi *MessageInfo) Message(i int) protoreflect.MessageType { + mi.init() + fd := mi.Desc.Fields().Get(i) + switch { + case fd.IsWeak(): + mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()) + return mt + case fd.IsMap(): + return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]} + default: + return Export{}.MessageTypeOf(mi.fieldTypes[fd.Number()]) + } +} + +type mapEntryType struct { + desc protoreflect.MessageDescriptor + valType any // zero value of enum or message type +} + +func (mt mapEntryType) New() protoreflect.Message { + return nil +} +func (mt mapEntryType) Zero() protoreflect.Message { + return nil +} +func (mt mapEntryType) Descriptor() protoreflect.MessageDescriptor { + return mt.desc +} +func (mt mapEntryType) Enum(i int) protoreflect.EnumType { + fd := mt.desc.Fields().Get(i) + if fd.Enum() == nil { + return nil + } + return Export{}.EnumTypeOf(mt.valType) +} +func (mt mapEntryType) Message(i int) protoreflect.MessageType { + fd := mt.desc.Fields().Get(i) + if fd.Message() == nil { + return nil + } + return Export{}.MessageTypeOf(mt.valType) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go new file mode 100644 index 000000000..ecb4623d7 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -0,0 +1,462 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/internal/detrand" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" +) + +type reflectMessageInfo struct { + fields map[protoreflect.FieldNumber]*fieldInfo + oneofs map[protoreflect.Name]*oneofInfo + + // fieldTypes contains the zero value of an enum or message field. + // For lists, it contains the element type. + // For maps, it contains the entry value type. + fieldTypes map[protoreflect.FieldNumber]any + + // denseFields is a subset of fields where: + // 0 < fieldDesc.Number() < len(denseFields) + // It provides faster access to the fieldInfo, but may be incomplete. + denseFields []*fieldInfo + + // rangeInfos is a list of all fields (not belonging to a oneof) and oneofs. + rangeInfos []any // either *fieldInfo or *oneofInfo + + getUnknown func(pointer) protoreflect.RawFields + setUnknown func(pointer, protoreflect.RawFields) + extensionMap func(pointer) *extensionMap + + nilMessage atomicNilMessage +} + +// makeReflectFuncs generates the set of functions to support reflection. +func (mi *MessageInfo) makeReflectFuncs(t reflect.Type, si structInfo) { + mi.makeKnownFieldsFunc(si) + mi.makeUnknownFieldsFunc(t, si) + mi.makeExtensionFieldsFunc(t, si) + mi.makeFieldTypes(si) +} + +// makeKnownFieldsFunc generates functions for operations that can be performed +// on each protobuf message field. It takes in a reflect.Type representing the +// Go struct and matches message fields with struct fields. +// +// This code assumes that the struct is well-formed and panics if there are +// any discrepancies. +func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { + mi.fields = map[protoreflect.FieldNumber]*fieldInfo{} + md := mi.Desc + fds := md.Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + fs := si.fieldsByNumber[fd.Number()] + isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() + if isOneof { + fs = si.oneofsByName[fd.ContainingOneof().Name()] + } + var fi fieldInfo + switch { + case fs.Type == nil: + fi = fieldInfoForMissing(fd) // never occurs for officially generated message types + case isOneof: + fi = fieldInfoForOneof(fd, fs, mi.Exporter, si.oneofWrappersByNumber[fd.Number()]) + case fd.IsMap(): + fi = fieldInfoForMap(fd, fs, mi.Exporter) + case fd.IsList(): + fi = fieldInfoForList(fd, fs, mi.Exporter) + case fd.IsWeak(): + fi = fieldInfoForWeakMessage(fd, si.weakOffset) + case fd.Message() != nil: + fi = fieldInfoForMessage(fd, fs, mi.Exporter) + default: + fi = fieldInfoForScalar(fd, fs, mi.Exporter) + } + mi.fields[fd.Number()] = &fi + } + + mi.oneofs = map[protoreflect.Name]*oneofInfo{} + for i := 0; i < md.Oneofs().Len(); i++ { + od := md.Oneofs().Get(i) + mi.oneofs[od.Name()] = makeOneofInfo(od, si, mi.Exporter) + } + + mi.denseFields = make([]*fieldInfo, fds.Len()*2) + for i := 0; i < fds.Len(); i++ { + if fd := fds.Get(i); int(fd.Number()) < len(mi.denseFields) { + mi.denseFields[fd.Number()] = mi.fields[fd.Number()] + } + } + + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil && !od.IsSynthetic() { + mi.rangeInfos = append(mi.rangeInfos, mi.oneofs[od.Name()]) + i += od.Fields().Len() + } else { + mi.rangeInfos = append(mi.rangeInfos, mi.fields[fd.Number()]) + i++ + } + } + + // Introduce instability to iteration order, but keep it deterministic. + if len(mi.rangeInfos) > 1 && detrand.Bool() { + i := detrand.Intn(len(mi.rangeInfos) - 1) + mi.rangeInfos[i], mi.rangeInfos[i+1] = mi.rangeInfos[i+1], mi.rangeInfos[i] + } +} + +func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { + switch { + case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsAType: + // Handle as []byte. + mi.getUnknown = func(p pointer) protoreflect.RawFields { + if p.IsNil() { + return nil + } + return *p.Apply(mi.unknownOffset).Bytes() + } + mi.setUnknown = func(p pointer, b protoreflect.RawFields) { + if p.IsNil() { + panic("invalid SetUnknown on nil Message") + } + *p.Apply(mi.unknownOffset).Bytes() = b + } + case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsBType: + // Handle as *[]byte. + mi.getUnknown = func(p pointer) protoreflect.RawFields { + if p.IsNil() { + return nil + } + bp := p.Apply(mi.unknownOffset).BytesPtr() + if *bp == nil { + return nil + } + return **bp + } + mi.setUnknown = func(p pointer, b protoreflect.RawFields) { + if p.IsNil() { + panic("invalid SetUnknown on nil Message") + } + bp := p.Apply(mi.unknownOffset).BytesPtr() + if *bp == nil { + *bp = new([]byte) + } + **bp = b + } + default: + mi.getUnknown = func(pointer) protoreflect.RawFields { + return nil + } + mi.setUnknown = func(p pointer, _ protoreflect.RawFields) { + if p.IsNil() { + panic("invalid SetUnknown on nil Message") + } + } + } +} + +func (mi *MessageInfo) makeExtensionFieldsFunc(t reflect.Type, si structInfo) { + if si.extensionOffset.IsValid() { + mi.extensionMap = func(p pointer) *extensionMap { + if p.IsNil() { + return (*extensionMap)(nil) + } + v := p.Apply(si.extensionOffset).AsValueOf(extensionFieldsType) + return (*extensionMap)(v.Interface().(*map[int32]ExtensionField)) + } + } else { + mi.extensionMap = func(pointer) *extensionMap { + return (*extensionMap)(nil) + } + } +} +func (mi *MessageInfo) makeFieldTypes(si structInfo) { + md := mi.Desc + fds := md.Fields() + for i := 0; i < fds.Len(); i++ { + var ft reflect.Type + fd := fds.Get(i) + fs := si.fieldsByNumber[fd.Number()] + isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() + if isOneof { + fs = si.oneofsByName[fd.ContainingOneof().Name()] + } + var isMessage bool + switch { + case fs.Type == nil: + continue // never occurs for officially generated message types + case isOneof: + if fd.Enum() != nil || fd.Message() != nil { + ft = si.oneofWrappersByNumber[fd.Number()].Field(0).Type + } + case fd.IsMap(): + if fd.MapValue().Enum() != nil || fd.MapValue().Message() != nil { + ft = fs.Type.Elem() + } + isMessage = fd.MapValue().Message() != nil + case fd.IsList(): + if fd.Enum() != nil || fd.Message() != nil { + ft = fs.Type.Elem() + } + isMessage = fd.Message() != nil + case fd.Enum() != nil: + ft = fs.Type + if fd.HasPresence() && ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + case fd.Message() != nil: + ft = fs.Type + if fd.IsWeak() { + ft = nil + } + isMessage = true + } + if isMessage && ft != nil && ft.Kind() != reflect.Ptr { + ft = reflect.PtrTo(ft) // never occurs for officially generated message types + } + if ft != nil { + if mi.fieldTypes == nil { + mi.fieldTypes = make(map[protoreflect.FieldNumber]any) + } + mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface() + } + } +} + +type extensionMap map[int32]ExtensionField + +func (m *extensionMap) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if m != nil { + for _, x := range *m { + xd := x.Type().TypeDescriptor() + v := x.Value() + if xd.IsList() && v.List().Len() == 0 { + continue + } + if !f(xd, v) { + return + } + } + } +} +func (m *extensionMap) Has(xd protoreflect.ExtensionTypeDescriptor) (ok bool) { + if m == nil { + return false + } + x, ok := (*m)[int32(xd.Number())] + if !ok { + return false + } + if x.isUnexpandedLazy() { + // Avoid calling x.Value(), which triggers a lazy unmarshal. + return true + } + switch { + case xd.IsList(): + return x.Value().List().Len() > 0 + case xd.IsMap(): + return x.Value().Map().Len() > 0 + } + return true +} +func (m *extensionMap) Clear(xd protoreflect.ExtensionTypeDescriptor) { + delete(*m, int32(xd.Number())) +} +func (m *extensionMap) Get(xd protoreflect.ExtensionTypeDescriptor) protoreflect.Value { + if m != nil { + if x, ok := (*m)[int32(xd.Number())]; ok { + return x.Value() + } + } + return xd.Type().Zero() +} +func (m *extensionMap) Set(xd protoreflect.ExtensionTypeDescriptor, v protoreflect.Value) { + xt := xd.Type() + isValid := true + switch { + case !xt.IsValidValue(v): + isValid = false + case xd.IsList(): + isValid = v.List().IsValid() + case xd.IsMap(): + isValid = v.Map().IsValid() + case xd.Message() != nil: + isValid = v.Message().IsValid() + } + if !isValid { + panic(fmt.Sprintf("%v: assigning invalid value", xd.FullName())) + } + + if *m == nil { + *m = make(map[int32]ExtensionField) + } + var x ExtensionField + x.Set(xt, v) + (*m)[int32(xd.Number())] = x +} +func (m *extensionMap) Mutable(xd protoreflect.ExtensionTypeDescriptor) protoreflect.Value { + if xd.Kind() != protoreflect.MessageKind && xd.Kind() != protoreflect.GroupKind && !xd.IsList() && !xd.IsMap() { + panic("invalid Mutable on field with non-composite type") + } + if x, ok := (*m)[int32(xd.Number())]; ok { + return x.Value() + } + v := xd.Type().New() + m.Set(xd, v) + return v +} + +// MessageState is a data structure that is nested as the first field in a +// concrete message. It provides a way to implement the ProtoReflect method +// in an allocation-free way without needing to have a shadow Go type generated +// for every message type. This technique only works using unsafe. +// +// Example generated code: +// +// type M struct { +// state protoimpl.MessageState +// +// Field1 int32 +// Field2 string +// Field3 *BarMessage +// ... +// } +// +// func (m *M) ProtoReflect() protoreflect.Message { +// mi := &file_fizz_buzz_proto_msgInfos[5] +// if protoimpl.UnsafeEnabled && m != nil { +// ms := protoimpl.X.MessageStateOf(Pointer(m)) +// if ms.LoadMessageInfo() == nil { +// ms.StoreMessageInfo(mi) +// } +// return ms +// } +// return mi.MessageOf(m) +// } +// +// The MessageState type holds a *MessageInfo, which must be atomically set to +// the message info associated with a given message instance. +// By unsafely converting a *M into a *MessageState, the MessageState object +// has access to all the information needed to implement protobuf reflection. +// It has access to the message info as its first field, and a pointer to the +// MessageState is identical to a pointer to the concrete message value. +// +// Requirements: +// - The type M must implement protoreflect.ProtoMessage. +// - The address of m must not be nil. +// - The address of m and the address of m.state must be equal, +// even though they are different Go types. +type MessageState struct { + pragma.NoUnkeyedLiterals + pragma.DoNotCompare + pragma.DoNotCopy + + atomicMessageInfo *MessageInfo +} + +type messageState MessageState + +var ( + _ protoreflect.Message = (*messageState)(nil) + _ unwrapper = (*messageState)(nil) +) + +// messageDataType is a tuple of a pointer to the message data and +// a pointer to the message type. It is a generalized way of providing a +// reflective view over a message instance. The disadvantage of this approach +// is the need to allocate this tuple of 16B. +type messageDataType struct { + p pointer + mi *MessageInfo +} + +type ( + messageReflectWrapper messageDataType + messageIfaceWrapper messageDataType +) + +var ( + _ protoreflect.Message = (*messageReflectWrapper)(nil) + _ unwrapper = (*messageReflectWrapper)(nil) + _ protoreflect.ProtoMessage = (*messageIfaceWrapper)(nil) + _ unwrapper = (*messageIfaceWrapper)(nil) +) + +// MessageOf returns a reflective view over a message. The input must be a +// pointer to a named Go struct. If the provided type has a ProtoReflect method, +// it must be implemented by calling this method. +func (mi *MessageInfo) MessageOf(m any) protoreflect.Message { + if reflect.TypeOf(m) != mi.GoReflectType { + panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType)) + } + p := pointerOfIface(m) + if p.IsNil() { + return mi.nilMessage.Init(mi) + } + return &messageReflectWrapper{p, mi} +} + +func (m *messageReflectWrapper) pointer() pointer { return m.p } +func (m *messageReflectWrapper) messageInfo() *MessageInfo { return m.mi } + +// Reset implements the v1 proto.Message.Reset method. +func (m *messageIfaceWrapper) Reset() { + if mr, ok := m.protoUnwrap().(interface{ Reset() }); ok { + mr.Reset() + return + } + rv := reflect.ValueOf(m.protoUnwrap()) + if rv.Kind() == reflect.Ptr && !rv.IsNil() { + rv.Elem().Set(reflect.Zero(rv.Type().Elem())) + } +} +func (m *messageIfaceWrapper) ProtoReflect() protoreflect.Message { + return (*messageReflectWrapper)(m) +} +func (m *messageIfaceWrapper) protoUnwrap() any { + return m.p.AsIfaceOf(m.mi.GoReflectType.Elem()) +} + +// checkField verifies that the provided field descriptor is valid. +// Exactly one of the returned values is populated. +func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionTypeDescriptor) { + var fi *fieldInfo + if n := fd.Number(); 0 < n && int(n) < len(mi.denseFields) { + fi = mi.denseFields[n] + } else { + fi = mi.fields[n] + } + if fi != nil { + if fi.fieldDesc != fd { + if got, want := fd.FullName(), fi.fieldDesc.FullName(); got != want { + panic(fmt.Sprintf("mismatching field: got %v, want %v", got, want)) + } + panic(fmt.Sprintf("mismatching field: %v", fd.FullName())) + } + return fi, nil + } + + if fd.IsExtension() { + if got, want := fd.ContainingMessage().FullName(), mi.Desc.FullName(); got != want { + // TODO: Should this be exact containing message descriptor match? + panic(fmt.Sprintf("extension %v has mismatching containing message: got %v, want %v", fd.FullName(), got, want)) + } + if !mi.Desc.ExtensionRanges().Has(fd.Number()) { + panic(fmt.Sprintf("extension %v extends %v outside the extension range", fd.FullName(), mi.Desc.FullName())) + } + xtd, ok := fd.(protoreflect.ExtensionTypeDescriptor) + if !ok { + panic(fmt.Sprintf("extension %v does not implement protoreflect.ExtensionTypeDescriptor", fd.FullName())) + } + return nil, xtd + } + panic(fmt.Sprintf("field %v is invalid", fd.FullName())) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go new file mode 100644 index 000000000..986322b19 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -0,0 +1,543 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "math" + "reflect" + "sync" + + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +type fieldInfo struct { + fieldDesc protoreflect.FieldDescriptor + + // These fields are used for protobuf reflection support. + has func(pointer) bool + clear func(pointer) + get func(pointer) protoreflect.Value + set func(pointer, protoreflect.Value) + mutable func(pointer) protoreflect.Value + newMessage func() protoreflect.Message + newField func() protoreflect.Value +} + +func fieldInfoForMissing(fd protoreflect.FieldDescriptor) fieldInfo { + // This never occurs for generated message types. + // It implies that a hand-crafted type has missing Go fields + // for specific protobuf message fields. + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + return false + }, + clear: func(p pointer) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + get: func(p pointer) protoreflect.Value { + return fd.Default() + }, + set: func(p pointer, v protoreflect.Value) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + mutable: func(p pointer) protoreflect.Value { + panic("missing Go struct field for " + string(fd.FullName())) + }, + newMessage: func() protoreflect.Message { + panic("missing Go struct field for " + string(fd.FullName())) + }, + newField: func() protoreflect.Value { + if v := fd.Default(); v.IsValid() { + return v + } + panic("missing Go struct field for " + string(fd.FullName())) + }, + } +} + +func fieldInfoForOneof(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter, ot reflect.Type) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Interface { + panic(fmt.Sprintf("field %v has invalid type: got %v, want interface kind", fd.FullName(), ft)) + } + if ot.Kind() != reflect.Struct { + panic(fmt.Sprintf("field %v has invalid type: got %v, want struct kind", fd.FullName(), ot)) + } + if !reflect.PtrTo(ot).Implements(ft) { + panic(fmt.Sprintf("field %v has invalid type: %v does not implement %v", fd.FullName(), ot, ft)) + } + conv := NewConverter(ot.Field(0).Type, fd) + isMessage := fd.Message() != nil + + // TODO: Implement unsafe fast path? + fieldOffset := offsetOf(fs, x) + return fieldInfo{ + // NOTE: The logic below intentionally assumes that oneof fields are + // well-formatted. That is, the oneof interface never contains a + // typed nil pointer to one of the wrapper structs. + + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { + return false + } + return true + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() || rv.Elem().Type().Elem() != ot { + // NOTE: We intentionally don't check for rv.Elem().IsNil() + // so that (*OneofWrapperType)(nil) gets cleared to nil. + return + } + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { + return conv.Zero() + } + rv = rv.Elem().Elem().Field(0) + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { + rv.Set(reflect.New(ot)) + } + rv = rv.Elem().Elem().Field(0) + rv.Set(conv.GoValueOf(v)) + }, + mutable: func(p pointer) protoreflect.Value { + if !isMessage { + panic(fmt.Sprintf("field %v with invalid Mutable call on field with non-composite type", fd.FullName())) + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { + rv.Set(reflect.New(ot)) + } + rv = rv.Elem().Elem().Field(0) + if rv.Kind() == reflect.Ptr && rv.IsNil() { + rv.Set(conv.GoValueOf(protoreflect.ValueOfMessage(conv.New().Message()))) + } + return conv.PBValueOf(rv) + }, + newMessage: func() protoreflect.Message { + return conv.New().Message() + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func fieldInfoForMap(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Map { + panic(fmt.Sprintf("field %v has invalid type: got %v, want map kind", fd.FullName(), ft)) + } + conv := NewConverter(ft, fd) + + // TODO: Implement unsafe fast path? + fieldOffset := offsetOf(fs, x) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return rv.Len() > 0 + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + pv := conv.GoValueOf(v) + if pv.IsNil() { + panic(fmt.Sprintf("map field %v cannot be set with read-only value", fd.FullName())) + } + rv.Set(pv) + }, + mutable: func(p pointer) protoreflect.Value { + v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if v.IsNil() { + v.Set(reflect.MakeMap(fs.Type)) + } + return conv.PBValueOf(v) + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func fieldInfoForList(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Slice { + panic(fmt.Sprintf("field %v has invalid type: got %v, want slice kind", fd.FullName(), ft)) + } + conv := NewConverter(reflect.PtrTo(ft), fd) + + // TODO: Implement unsafe fast path? + fieldOffset := offsetOf(fs, x) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return rv.Len() > 0 + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type) + if rv.Elem().Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + pv := conv.GoValueOf(v) + if pv.IsNil() { + panic(fmt.Sprintf("list field %v cannot be set with read-only value", fd.FullName())) + } + rv.Set(pv.Elem()) + }, + mutable: func(p pointer) protoreflect.Value { + v := p.Apply(fieldOffset).AsValueOf(fs.Type) + return conv.PBValueOf(v) + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +var ( + nilBytes = reflect.ValueOf([]byte(nil)) + emptyBytes = reflect.ValueOf([]byte{}) +) + +func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { + ft := fs.Type + nullable := fd.HasPresence() + isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 + if nullable { + if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice { + // This never occurs for generated message types. + // Despite the protobuf type system specifying presence, + // the Go field type cannot represent it. + nullable = false + } + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + } + conv := NewConverter(ft, fd) + + // TODO: Implement unsafe fast path? + fieldOffset := offsetOf(fs, x) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if nullable { + return !rv.IsNil() + } + switch rv.Kind() { + case reflect.Bool: + return rv.Bool() + case reflect.Int32, reflect.Int64: + return rv.Int() != 0 + case reflect.Uint32, reflect.Uint64: + return rv.Uint() != 0 + case reflect.Float32, reflect.Float64: + return rv.Float() != 0 || math.Signbit(rv.Float()) + case reflect.String, reflect.Slice: + return rv.Len() > 0 + default: + panic(fmt.Sprintf("field %v has invalid type: %v", fd.FullName(), rv.Type())) // should never happen + } + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if nullable { + if rv.IsNil() { + return conv.Zero() + } + if rv.Kind() == reflect.Ptr { + rv = rv.Elem() + } + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if nullable && rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(ft)) + } + rv = rv.Elem() + } + rv.Set(conv.GoValueOf(v)) + if isBytes && rv.Len() == 0 { + if nullable { + rv.Set(emptyBytes) // preserve presence + } else { + rv.Set(nilBytes) // do not preserve presence + } + } + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func fieldInfoForWeakMessage(fd protoreflect.FieldDescriptor, weakOffset offset) fieldInfo { + if !flags.ProtoLegacy { + panic("no support for proto1 weak fields") + } + + var once sync.Once + var messageType protoreflect.MessageType + lazyInit := func() { + once.Do(func() { + messageName := fd.Message().FullName() + messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName) + if messageType == nil { + panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName())) + } + }) + } + + num := fd.Number() + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + _, ok := p.Apply(weakOffset).WeakFields().get(num) + return ok + }, + clear: func(p pointer) { + p.Apply(weakOffset).WeakFields().clear(num) + }, + get: func(p pointer) protoreflect.Value { + lazyInit() + if p.IsNil() { + return protoreflect.ValueOfMessage(messageType.Zero()) + } + m, ok := p.Apply(weakOffset).WeakFields().get(num) + if !ok { + return protoreflect.ValueOfMessage(messageType.Zero()) + } + return protoreflect.ValueOfMessage(m.ProtoReflect()) + }, + set: func(p pointer, v protoreflect.Value) { + lazyInit() + m := v.Message() + if m.Descriptor() != messageType.Descriptor() { + if got, want := m.Descriptor().FullName(), messageType.Descriptor().FullName(); got != want { + panic(fmt.Sprintf("field %v has mismatching message descriptor: got %v, want %v", fd.FullName(), got, want)) + } + panic(fmt.Sprintf("field %v has mismatching message descriptor: %v", fd.FullName(), m.Descriptor().FullName())) + } + p.Apply(weakOffset).WeakFields().set(num, m.Interface()) + }, + mutable: func(p pointer) protoreflect.Value { + lazyInit() + fs := p.Apply(weakOffset).WeakFields() + m, ok := fs.get(num) + if !ok { + m = messageType.New().Interface() + fs.set(num, m) + } + return protoreflect.ValueOfMessage(m.ProtoReflect()) + }, + newMessage: func() protoreflect.Message { + lazyInit() + return messageType.New() + }, + newField: func() protoreflect.Value { + lazyInit() + return protoreflect.ValueOfMessage(messageType.New()) + }, + } +} + +func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { + ft := fs.Type + conv := NewConverter(ft, fd) + + // TODO: Implement unsafe fast path? + fieldOffset := offsetOf(fs, x) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if fs.Type.Kind() != reflect.Ptr { + return !isZero(rv) + } + return !rv.IsNil() + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(conv.GoValueOf(v)) + if fs.Type.Kind() == reflect.Ptr && rv.IsNil() { + panic(fmt.Sprintf("field %v has invalid nil pointer", fd.FullName())) + } + }, + mutable: func(p pointer) protoreflect.Value { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if fs.Type.Kind() == reflect.Ptr && rv.IsNil() { + rv.Set(conv.GoValueOf(conv.New())) + } + return conv.PBValueOf(rv) + }, + newMessage: func() protoreflect.Message { + return conv.New().Message() + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +type oneofInfo struct { + oneofDesc protoreflect.OneofDescriptor + which func(pointer) protoreflect.FieldNumber +} + +func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) *oneofInfo { + oi := &oneofInfo{oneofDesc: od} + if od.IsSynthetic() { + fs := si.fieldsByNumber[od.Fields().Get(0).Number()] + fieldOffset := offsetOf(fs, x) + oi.which = func(p pointer) protoreflect.FieldNumber { + if p.IsNil() { + return 0 + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { // valid on either *T or []byte + return 0 + } + return od.Fields().Get(0).Number() + } + } else { + fs := si.oneofsByName[od.Name()] + fieldOffset := offsetOf(fs, x) + oi.which = func(p pointer) protoreflect.FieldNumber { + if p.IsNil() { + return 0 + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { + return 0 + } + rv = rv.Elem() + if rv.IsNil() { + return 0 + } + return si.oneofWrappersByType[rv.Type().Elem()] + } + } + return oi +} + +// isZero is identical to reflect.Value.IsZero. +// TODO: Remove this when Go1.13 is the minimally supported Go version. +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return math.Float64bits(v.Float()) == 0 + case reflect.Complex64, reflect.Complex128: + c := v.Complex() + return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if !isZero(v.Index(i)) { + return false + } + } + return true + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + return v.IsNil() + case reflect.String: + return v.Len() == 0 + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if !isZero(v.Field(i)) { + return false + } + } + return true + default: + panic(&reflect.ValueError{Method: "reflect.Value.IsZero", Kind: v.Kind()}) + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go new file mode 100644 index 000000000..99dc23c6f --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go @@ -0,0 +1,271 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package impl + +import ( + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +func (m *messageState) Descriptor() protoreflect.MessageDescriptor { + return m.messageInfo().Desc +} +func (m *messageState) Type() protoreflect.MessageType { + return m.messageInfo() +} +func (m *messageState) New() protoreflect.Message { + return m.messageInfo().New() +} +func (m *messageState) Interface() protoreflect.ProtoMessage { + return m.protoUnwrap().(protoreflect.ProtoMessage) +} +func (m *messageState) protoUnwrap() any { + return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) +} +func (m *messageState) ProtoMethods() *protoiface.Methods { + mi := m.messageInfo() + mi.init() + return &mi.methods +} + +// ProtoMessageInfo is a pseudo-internal API for allowing the v1 code +// to be able to retrieve a v2 MessageInfo struct. +// +// WARNING: This method is exempt from the compatibility promise and +// may be removed in the future without warning. +func (m *messageState) ProtoMessageInfo() *MessageInfo { + return m.messageInfo() +} + +func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + mi := m.messageInfo() + mi.init() + for _, ri := range mi.rangeInfos { + switch ri := ri.(type) { + case *fieldInfo: + if ri.has(m.pointer()) { + if !f(ri.fieldDesc, ri.get(m.pointer())) { + return + } + } + case *oneofInfo: + if n := ri.which(m.pointer()); n > 0 { + fi := mi.fields[n] + if !f(fi.fieldDesc, fi.get(m.pointer())) { + return + } + } + } + } + mi.extensionMap(m.pointer()).Range(f) +} +func (m *messageState) Has(fd protoreflect.FieldDescriptor) bool { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { + return fi.has(m.pointer()) + } else { + return mi.extensionMap(m.pointer()).Has(xd) + } +} +func (m *messageState) Clear(fd protoreflect.FieldDescriptor) { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { + fi.clear(m.pointer()) + } else { + mi.extensionMap(m.pointer()).Clear(xd) + } +} +func (m *messageState) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { + return fi.get(m.pointer()) + } else { + return mi.extensionMap(m.pointer()).Get(xd) + } +} +func (m *messageState) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { + fi.set(m.pointer(), v) + } else { + mi.extensionMap(m.pointer()).Set(xd, v) + } +} +func (m *messageState) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { + return fi.mutable(m.pointer()) + } else { + return mi.extensionMap(m.pointer()).Mutable(xd) + } +} +func (m *messageState) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { + return fi.newField() + } else { + return xd.Type().New() + } +} +func (m *messageState) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + mi := m.messageInfo() + mi.init() + if oi := mi.oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { + return od.Fields().ByNumber(oi.which(m.pointer())) + } + panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) +} +func (m *messageState) GetUnknown() protoreflect.RawFields { + mi := m.messageInfo() + mi.init() + return mi.getUnknown(m.pointer()) +} +func (m *messageState) SetUnknown(b protoreflect.RawFields) { + mi := m.messageInfo() + mi.init() + mi.setUnknown(m.pointer(), b) +} +func (m *messageState) IsValid() bool { + return !m.pointer().IsNil() +} + +func (m *messageReflectWrapper) Descriptor() protoreflect.MessageDescriptor { + return m.messageInfo().Desc +} +func (m *messageReflectWrapper) Type() protoreflect.MessageType { + return m.messageInfo() +} +func (m *messageReflectWrapper) New() protoreflect.Message { + return m.messageInfo().New() +} +func (m *messageReflectWrapper) Interface() protoreflect.ProtoMessage { + if m, ok := m.protoUnwrap().(protoreflect.ProtoMessage); ok { + return m + } + return (*messageIfaceWrapper)(m) +} +func (m *messageReflectWrapper) protoUnwrap() any { + return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) +} +func (m *messageReflectWrapper) ProtoMethods() *protoiface.Methods { + mi := m.messageInfo() + mi.init() + return &mi.methods +} + +// ProtoMessageInfo is a pseudo-internal API for allowing the v1 code +// to be able to retrieve a v2 MessageInfo struct. +// +// WARNING: This method is exempt from the compatibility promise and +// may be removed in the future without warning. +func (m *messageReflectWrapper) ProtoMessageInfo() *MessageInfo { + return m.messageInfo() +} + +func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + mi := m.messageInfo() + mi.init() + for _, ri := range mi.rangeInfos { + switch ri := ri.(type) { + case *fieldInfo: + if ri.has(m.pointer()) { + if !f(ri.fieldDesc, ri.get(m.pointer())) { + return + } + } + case *oneofInfo: + if n := ri.which(m.pointer()); n > 0 { + fi := mi.fields[n] + if !f(fi.fieldDesc, fi.get(m.pointer())) { + return + } + } + } + } + mi.extensionMap(m.pointer()).Range(f) +} +func (m *messageReflectWrapper) Has(fd protoreflect.FieldDescriptor) bool { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { + return fi.has(m.pointer()) + } else { + return mi.extensionMap(m.pointer()).Has(xd) + } +} +func (m *messageReflectWrapper) Clear(fd protoreflect.FieldDescriptor) { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { + fi.clear(m.pointer()) + } else { + mi.extensionMap(m.pointer()).Clear(xd) + } +} +func (m *messageReflectWrapper) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { + return fi.get(m.pointer()) + } else { + return mi.extensionMap(m.pointer()).Get(xd) + } +} +func (m *messageReflectWrapper) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { + fi.set(m.pointer(), v) + } else { + mi.extensionMap(m.pointer()).Set(xd, v) + } +} +func (m *messageReflectWrapper) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { + return fi.mutable(m.pointer()) + } else { + return mi.extensionMap(m.pointer()).Mutable(xd) + } +} +func (m *messageReflectWrapper) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { + return fi.newField() + } else { + return xd.Type().New() + } +} +func (m *messageReflectWrapper) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + mi := m.messageInfo() + mi.init() + if oi := mi.oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { + return od.Fields().ByNumber(oi.which(m.pointer())) + } + panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) +} +func (m *messageReflectWrapper) GetUnknown() protoreflect.RawFields { + mi := m.messageInfo() + mi.init() + return mi.getUnknown(m.pointer()) +} +func (m *messageReflectWrapper) SetUnknown(b protoreflect.RawFields) { + mi := m.messageInfo() + mi.init() + mi.setUnknown(m.pointer(), b) +} +func (m *messageReflectWrapper) IsValid() bool { + return !m.pointer().IsNil() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go new file mode 100644 index 000000000..da685e8a2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -0,0 +1,215 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build purego || appengine +// +build purego appengine + +package impl + +import ( + "fmt" + "reflect" + "sync" +) + +const UnsafeEnabled = false + +// Pointer is an opaque pointer type. +type Pointer any + +// offset represents the offset to a struct field, accessible from a pointer. +// The offset is the field index into a struct. +type offset struct { + index int + export exporter +} + +// offsetOf returns a field offset for the struct field. +func offsetOf(f reflect.StructField, x exporter) offset { + if len(f.Index) != 1 { + panic("embedded structs are not supported") + } + if f.PkgPath == "" { + return offset{index: f.Index[0]} // field is already exported + } + if x == nil { + panic("exporter must be provided for unexported field") + } + return offset{index: f.Index[0], export: x} +} + +// IsValid reports whether the offset is valid. +func (f offset) IsValid() bool { return f.index >= 0 } + +// invalidOffset is an invalid field offset. +var invalidOffset = offset{index: -1} + +// zeroOffset is a noop when calling pointer.Apply. +var zeroOffset = offset{index: 0} + +// pointer is an abstract representation of a pointer to a struct or field. +type pointer struct{ v reflect.Value } + +// pointerOf returns p as a pointer. +func pointerOf(p Pointer) pointer { + return pointerOfIface(p) +} + +// pointerOfValue returns v as a pointer. +func pointerOfValue(v reflect.Value) pointer { + return pointer{v: v} +} + +// pointerOfIface returns the pointer portion of an interface. +func pointerOfIface(v any) pointer { + return pointer{v: reflect.ValueOf(v)} +} + +// IsNil reports whether the pointer is nil. +func (p pointer) IsNil() bool { + return p.v.IsNil() +} + +// Apply adds an offset to the pointer to derive a new pointer +// to a specified field. The current pointer must be pointing at a struct. +func (p pointer) Apply(f offset) pointer { + if f.export != nil { + if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() { + return pointer{v: v} + } + } + return pointer{v: p.v.Elem().Field(f.index).Addr()} +} + +// AsValueOf treats p as a pointer to an object of type t and returns the value. +// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t)) +func (p pointer) AsValueOf(t reflect.Type) reflect.Value { + if got := p.v.Type().Elem(); got != t { + panic(fmt.Sprintf("invalid type: got %v, want %v", got, t)) + } + return p.v +} + +// AsIfaceOf treats p as a pointer to an object of type t and returns the value. +// It is equivalent to p.AsValueOf(t).Interface() +func (p pointer) AsIfaceOf(t reflect.Type) any { + return p.AsValueOf(t).Interface() +} + +func (p pointer) Bool() *bool { return p.v.Interface().(*bool) } +func (p pointer) BoolPtr() **bool { return p.v.Interface().(**bool) } +func (p pointer) BoolSlice() *[]bool { return p.v.Interface().(*[]bool) } +func (p pointer) Int32() *int32 { return p.v.Interface().(*int32) } +func (p pointer) Int32Ptr() **int32 { return p.v.Interface().(**int32) } +func (p pointer) Int32Slice() *[]int32 { return p.v.Interface().(*[]int32) } +func (p pointer) Int64() *int64 { return p.v.Interface().(*int64) } +func (p pointer) Int64Ptr() **int64 { return p.v.Interface().(**int64) } +func (p pointer) Int64Slice() *[]int64 { return p.v.Interface().(*[]int64) } +func (p pointer) Uint32() *uint32 { return p.v.Interface().(*uint32) } +func (p pointer) Uint32Ptr() **uint32 { return p.v.Interface().(**uint32) } +func (p pointer) Uint32Slice() *[]uint32 { return p.v.Interface().(*[]uint32) } +func (p pointer) Uint64() *uint64 { return p.v.Interface().(*uint64) } +func (p pointer) Uint64Ptr() **uint64 { return p.v.Interface().(**uint64) } +func (p pointer) Uint64Slice() *[]uint64 { return p.v.Interface().(*[]uint64) } +func (p pointer) Float32() *float32 { return p.v.Interface().(*float32) } +func (p pointer) Float32Ptr() **float32 { return p.v.Interface().(**float32) } +func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) } +func (p pointer) Float64() *float64 { return p.v.Interface().(*float64) } +func (p pointer) Float64Ptr() **float64 { return p.v.Interface().(**float64) } +func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) } +func (p pointer) String() *string { return p.v.Interface().(*string) } +func (p pointer) StringPtr() **string { return p.v.Interface().(**string) } +func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) } +func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) } +func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) } +func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) } +func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) } +func (p pointer) Extensions() *map[int32]ExtensionField { + return p.v.Interface().(*map[int32]ExtensionField) +} + +func (p pointer) Elem() pointer { + return pointer{v: p.v.Elem()} +} + +// PointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) PointerSlice() []pointer { + // TODO: reconsider this + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s +} + +// AppendPointerSlice appends v to p, which must be a []*T. +func (p pointer) AppendPointerSlice(v pointer) { + sp := p.v.Elem() + sp.Set(reflect.Append(sp, v.v)) +} + +// SetPointer sets *p to v. +func (p pointer) SetPointer(v pointer) { + p.v.Elem().Set(v.v) +} + +func growSlice(p pointer, addCap int) { + // TODO: Once we only support Go 1.20 and newer, use reflect.Grow. + in := p.v.Elem() + out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap) + reflect.Copy(out, in) + p.v.Elem().Set(out) +} + +func (p pointer) growBoolSlice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growInt32Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growUint32Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growInt64Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growUint64Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growFloat64Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growFloat32Slice(addCap int) { + growSlice(p, addCap) +} + +func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") } +func (ms *messageState) pointer() pointer { panic("not supported") } +func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") } +func (ms *messageState) LoadMessageInfo() *MessageInfo { panic("not supported") } +func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") } + +type atomicNilMessage struct { + once sync.Once + m messageReflectWrapper +} + +func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper { + m.once.Do(func() { + m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface()) + m.m.mi = mi + }) + return &m.m +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go new file mode 100644 index 000000000..5f20ca5d8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -0,0 +1,215 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego && !appengine +// +build !purego,!appengine + +package impl + +import ( + "reflect" + "sync/atomic" + "unsafe" +) + +const UnsafeEnabled = true + +// Pointer is an opaque pointer type. +type Pointer unsafe.Pointer + +// offset represents the offset to a struct field, accessible from a pointer. +// The offset is the byte offset to the field from the start of the struct. +type offset uintptr + +// offsetOf returns a field offset for the struct field. +func offsetOf(f reflect.StructField, x exporter) offset { + return offset(f.Offset) +} + +// IsValid reports whether the offset is valid. +func (f offset) IsValid() bool { return f != invalidOffset } + +// invalidOffset is an invalid field offset. +var invalidOffset = ^offset(0) + +// zeroOffset is a noop when calling pointer.Apply. +var zeroOffset = offset(0) + +// pointer is a pointer to a message struct or field. +type pointer struct{ p unsafe.Pointer } + +// pointerOf returns p as a pointer. +func pointerOf(p Pointer) pointer { + return pointer{p: unsafe.Pointer(p)} +} + +// pointerOfValue returns v as a pointer. +func pointerOfValue(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} +} + +// pointerOfIface returns the pointer portion of an interface. +func pointerOfIface(v any) pointer { + type ifaceHeader struct { + Type unsafe.Pointer + Data unsafe.Pointer + } + return pointer{p: (*ifaceHeader)(unsafe.Pointer(&v)).Data} +} + +// IsNil reports whether the pointer is nil. +func (p pointer) IsNil() bool { + return p.p == nil +} + +// Apply adds an offset to the pointer to derive a new pointer +// to a specified field. The pointer must be valid and pointing at a struct. +func (p pointer) Apply(f offset) pointer { + if p.IsNil() { + panic("invalid nil pointer") + } + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} +} + +// AsValueOf treats p as a pointer to an object of type t and returns the value. +// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t)) +func (p pointer) AsValueOf(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} + +// AsIfaceOf treats p as a pointer to an object of type t and returns the value. +// It is equivalent to p.AsValueOf(t).Interface() +func (p pointer) AsIfaceOf(t reflect.Type) any { + // TODO: Use tricky unsafe magic to directly create ifaceHeader. + return p.AsValueOf(t).Interface() +} + +func (p pointer) Bool() *bool { return (*bool)(p.p) } +func (p pointer) BoolPtr() **bool { return (**bool)(p.p) } +func (p pointer) BoolSlice() *[]bool { return (*[]bool)(p.p) } +func (p pointer) Int32() *int32 { return (*int32)(p.p) } +func (p pointer) Int32Ptr() **int32 { return (**int32)(p.p) } +func (p pointer) Int32Slice() *[]int32 { return (*[]int32)(p.p) } +func (p pointer) Int64() *int64 { return (*int64)(p.p) } +func (p pointer) Int64Ptr() **int64 { return (**int64)(p.p) } +func (p pointer) Int64Slice() *[]int64 { return (*[]int64)(p.p) } +func (p pointer) Uint32() *uint32 { return (*uint32)(p.p) } +func (p pointer) Uint32Ptr() **uint32 { return (**uint32)(p.p) } +func (p pointer) Uint32Slice() *[]uint32 { return (*[]uint32)(p.p) } +func (p pointer) Uint64() *uint64 { return (*uint64)(p.p) } +func (p pointer) Uint64Ptr() **uint64 { return (**uint64)(p.p) } +func (p pointer) Uint64Slice() *[]uint64 { return (*[]uint64)(p.p) } +func (p pointer) Float32() *float32 { return (*float32)(p.p) } +func (p pointer) Float32Ptr() **float32 { return (**float32)(p.p) } +func (p pointer) Float32Slice() *[]float32 { return (*[]float32)(p.p) } +func (p pointer) Float64() *float64 { return (*float64)(p.p) } +func (p pointer) Float64Ptr() **float64 { return (**float64)(p.p) } +func (p pointer) Float64Slice() *[]float64 { return (*[]float64)(p.p) } +func (p pointer) String() *string { return (*string)(p.p) } +func (p pointer) StringPtr() **string { return (**string)(p.p) } +func (p pointer) StringSlice() *[]string { return (*[]string)(p.p) } +func (p pointer) Bytes() *[]byte { return (*[]byte)(p.p) } +func (p pointer) BytesPtr() **[]byte { return (**[]byte)(p.p) } +func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) } +func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) } +func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) } + +func (p pointer) Elem() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} +} + +// PointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) PointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) +} + +// AppendPointerSlice appends v to p, which must be a []*T. +func (p pointer) AppendPointerSlice(v pointer) { + *(*[]pointer)(p.p) = append(*(*[]pointer)(p.p), v) +} + +// SetPointer sets *p to v. +func (p pointer) SetPointer(v pointer) { + *(*unsafe.Pointer)(p.p) = (unsafe.Pointer)(v.p) +} + +func (p pointer) growBoolSlice(addCap int) { + sp := p.BoolSlice() + s := make([]bool, 0, addCap+len(*sp)) + s = s[:len(*sp)] + copy(s, *sp) + *sp = s +} + +func (p pointer) growInt32Slice(addCap int) { + sp := p.Int32Slice() + s := make([]int32, 0, addCap+len(*sp)) + s = s[:len(*sp)] + copy(s, *sp) + *sp = s +} + +func (p pointer) growUint32Slice(addCap int) { + p.growInt32Slice(addCap) +} + +func (p pointer) growFloat32Slice(addCap int) { + p.growInt32Slice(addCap) +} + +func (p pointer) growInt64Slice(addCap int) { + sp := p.Int64Slice() + s := make([]int64, 0, addCap+len(*sp)) + s = s[:len(*sp)] + copy(s, *sp) + *sp = s +} + +func (p pointer) growUint64Slice(addCap int) { + p.growInt64Slice(addCap) +} + +func (p pointer) growFloat64Slice(addCap int) { + p.growInt64Slice(addCap) +} + +// Static check that MessageState does not exceed the size of a pointer. +const _ = uint(unsafe.Sizeof(unsafe.Pointer(nil)) - unsafe.Sizeof(MessageState{})) + +func (Export) MessageStateOf(p Pointer) *messageState { + // Super-tricky - see documentation on MessageState. + return (*messageState)(unsafe.Pointer(p)) +} +func (ms *messageState) pointer() pointer { + // Super-tricky - see documentation on MessageState. + return pointer{p: unsafe.Pointer(ms)} +} +func (ms *messageState) messageInfo() *MessageInfo { + mi := ms.LoadMessageInfo() + if mi == nil { + panic("invalid nil message info; this suggests memory corruption due to a race or shallow copy on the message struct") + } + return mi +} +func (ms *messageState) LoadMessageInfo() *MessageInfo { + return (*MessageInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&ms.atomicMessageInfo)))) +} +func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&ms.atomicMessageInfo)), unsafe.Pointer(mi)) +} + +type atomicNilMessage struct{ p unsafe.Pointer } // p is a *messageReflectWrapper + +func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper { + if p := atomic.LoadPointer(&m.p); p != nil { + return (*messageReflectWrapper)(p) + } + w := &messageReflectWrapper{mi: mi} + atomic.CompareAndSwapPointer(&m.p, nil, (unsafe.Pointer)(w)) + return (*messageReflectWrapper)(atomic.LoadPointer(&m.p)) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go new file mode 100644 index 000000000..a24e6bbd7 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go @@ -0,0 +1,576 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "math" + "math/bits" + "reflect" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" +) + +// ValidationStatus is the result of validating the wire-format encoding of a message. +type ValidationStatus int + +const ( + // ValidationUnknown indicates that unmarshaling the message might succeed or fail. + // The validator was unable to render a judgement. + // + // The only causes of this status are an aberrant message type appearing somewhere + // in the message or a failure in the extension resolver. + ValidationUnknown ValidationStatus = iota + 1 + + // ValidationInvalid indicates that unmarshaling the message will fail. + ValidationInvalid + + // ValidationValid indicates that unmarshaling the message will succeed. + ValidationValid +) + +func (v ValidationStatus) String() string { + switch v { + case ValidationUnknown: + return "ValidationUnknown" + case ValidationInvalid: + return "ValidationInvalid" + case ValidationValid: + return "ValidationValid" + default: + return fmt.Sprintf("ValidationStatus(%d)", int(v)) + } +} + +// Validate determines whether the contents of the buffer are a valid wire encoding +// of the message type. +// +// This function is exposed for testing. +func Validate(mt protoreflect.MessageType, in protoiface.UnmarshalInput) (out protoiface.UnmarshalOutput, _ ValidationStatus) { + mi, ok := mt.(*MessageInfo) + if !ok { + return out, ValidationUnknown + } + if in.Resolver == nil { + in.Resolver = protoregistry.GlobalTypes + } + o, st := mi.validate(in.Buf, 0, unmarshalOptions{ + flags: in.Flags, + resolver: in.Resolver, + }) + if o.initialized { + out.Flags |= protoiface.UnmarshalInitialized + } + return out, st +} + +type validationInfo struct { + mi *MessageInfo + typ validationType + keyType, valType validationType + + // For non-required fields, requiredBit is 0. + // + // For required fields, requiredBit's nth bit is set, where n is a + // unique index in the range [0, MessageInfo.numRequiredFields). + // + // If there are more than 64 required fields, requiredBit is 0. + requiredBit uint64 +} + +type validationType uint8 + +const ( + validationTypeOther validationType = iota + validationTypeMessage + validationTypeGroup + validationTypeMap + validationTypeRepeatedVarint + validationTypeRepeatedFixed32 + validationTypeRepeatedFixed64 + validationTypeVarint + validationTypeFixed32 + validationTypeFixed64 + validationTypeBytes + validationTypeUTF8String + validationTypeMessageSetItem +) + +func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd protoreflect.FieldDescriptor, ft reflect.Type) validationInfo { + var vi validationInfo + switch { + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + switch fd.Kind() { + case protoreflect.MessageKind: + vi.typ = validationTypeMessage + if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok { + vi.mi = getMessageInfo(ot.Field(0).Type) + } + case protoreflect.GroupKind: + vi.typ = validationTypeGroup + if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok { + vi.mi = getMessageInfo(ot.Field(0).Type) + } + case protoreflect.StringKind: + if strs.EnforceUTF8(fd) { + vi.typ = validationTypeUTF8String + } + } + default: + vi = newValidationInfo(fd, ft) + } + if fd.Cardinality() == protoreflect.Required { + // Avoid overflow. The required field check is done with a 64-bit mask, with + // any message containing more than 64 required fields always reported as + // potentially uninitialized, so it is not important to get a precise count + // of the required fields past 64. + if mi.numRequiredFields < math.MaxUint8 { + mi.numRequiredFields++ + vi.requiredBit = 1 << (mi.numRequiredFields - 1) + } + } + return vi +} + +func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validationInfo { + var vi validationInfo + switch { + case fd.IsList(): + switch fd.Kind() { + case protoreflect.MessageKind: + vi.typ = validationTypeMessage + if ft.Kind() == reflect.Slice { + vi.mi = getMessageInfo(ft.Elem()) + } + case protoreflect.GroupKind: + vi.typ = validationTypeGroup + if ft.Kind() == reflect.Slice { + vi.mi = getMessageInfo(ft.Elem()) + } + case protoreflect.StringKind: + vi.typ = validationTypeBytes + if strs.EnforceUTF8(fd) { + vi.typ = validationTypeUTF8String + } + default: + switch wireTypes[fd.Kind()] { + case protowire.VarintType: + vi.typ = validationTypeRepeatedVarint + case protowire.Fixed32Type: + vi.typ = validationTypeRepeatedFixed32 + case protowire.Fixed64Type: + vi.typ = validationTypeRepeatedFixed64 + } + } + case fd.IsMap(): + vi.typ = validationTypeMap + switch fd.MapKey().Kind() { + case protoreflect.StringKind: + if strs.EnforceUTF8(fd) { + vi.keyType = validationTypeUTF8String + } + } + switch fd.MapValue().Kind() { + case protoreflect.MessageKind: + vi.valType = validationTypeMessage + if ft.Kind() == reflect.Map { + vi.mi = getMessageInfo(ft.Elem()) + } + case protoreflect.StringKind: + if strs.EnforceUTF8(fd) { + vi.valType = validationTypeUTF8String + } + } + default: + switch fd.Kind() { + case protoreflect.MessageKind: + vi.typ = validationTypeMessage + if !fd.IsWeak() { + vi.mi = getMessageInfo(ft) + } + case protoreflect.GroupKind: + vi.typ = validationTypeGroup + vi.mi = getMessageInfo(ft) + case protoreflect.StringKind: + vi.typ = validationTypeBytes + if strs.EnforceUTF8(fd) { + vi.typ = validationTypeUTF8String + } + default: + switch wireTypes[fd.Kind()] { + case protowire.VarintType: + vi.typ = validationTypeVarint + case protowire.Fixed32Type: + vi.typ = validationTypeFixed32 + case protowire.Fixed64Type: + vi.typ = validationTypeFixed64 + case protowire.BytesType: + vi.typ = validationTypeBytes + } + } + } + return vi +} + +func (mi *MessageInfo) validate(b []byte, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, result ValidationStatus) { + mi.init() + type validationState struct { + typ validationType + keyType, valType validationType + endGroup protowire.Number + mi *MessageInfo + tail []byte + requiredMask uint64 + } + + // Pre-allocate some slots to avoid repeated slice reallocation. + states := make([]validationState, 0, 16) + states = append(states, validationState{ + typ: validationTypeMessage, + mi: mi, + }) + if groupTag > 0 { + states[0].typ = validationTypeGroup + states[0].endGroup = groupTag + } + initialized := true + start := len(b) +State: + for len(states) > 0 { + st := &states[len(states)-1] + for len(b) > 0 { + // Parse the tag (field number and wire type). + var tag uint64 + if b[0] < 0x80 { + tag = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + tag, n = protowire.ConsumeVarint(b) + if n < 0 { + return out, ValidationInvalid + } + b = b[n:] + } + var num protowire.Number + if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) { + return out, ValidationInvalid + } else { + num = protowire.Number(n) + } + wtyp := protowire.Type(tag & 7) + + if wtyp == protowire.EndGroupType { + if st.endGroup == num { + goto PopState + } + return out, ValidationInvalid + } + var vi validationInfo + switch { + case st.typ == validationTypeMap: + switch num { + case genid.MapEntry_Key_field_number: + vi.typ = st.keyType + case genid.MapEntry_Value_field_number: + vi.typ = st.valType + vi.mi = st.mi + vi.requiredBit = 1 + } + case flags.ProtoLegacy && st.mi.isMessageSet: + switch num { + case messageset.FieldItem: + vi.typ = validationTypeMessageSetItem + } + default: + var f *coderFieldInfo + if int(num) < len(st.mi.denseCoderFields) { + f = st.mi.denseCoderFields[num] + } else { + f = st.mi.coderFields[num] + } + if f != nil { + vi = f.validation + if vi.typ == validationTypeMessage && vi.mi == nil { + // Probable weak field. + // + // TODO: Consider storing the results of this lookup somewhere + // rather than recomputing it on every validation. + fd := st.mi.Desc.Fields().ByNumber(num) + if fd == nil || !fd.IsWeak() { + break + } + messageName := fd.Message().FullName() + messageType, err := protoregistry.GlobalTypes.FindMessageByName(messageName) + switch err { + case nil: + vi.mi, _ = messageType.(*MessageInfo) + case protoregistry.NotFound: + vi.typ = validationTypeBytes + default: + return out, ValidationUnknown + } + } + break + } + // Possible extension field. + // + // TODO: We should return ValidationUnknown when: + // 1. The resolver is not frozen. (More extensions may be added to it.) + // 2. The resolver returns preg.NotFound. + // In this case, a type added to the resolver in the future could cause + // unmarshaling to begin failing. Supporting this requires some way to + // determine if the resolver is frozen. + xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), num) + if err != nil && err != protoregistry.NotFound { + return out, ValidationUnknown + } + if err == nil { + vi = getExtensionFieldInfo(xt).validation + } + } + if vi.requiredBit != 0 { + // Check that the field has a compatible wire type. + // We only need to consider non-repeated field types, + // since repeated fields (and maps) can never be required. + ok := false + switch vi.typ { + case validationTypeVarint: + ok = wtyp == protowire.VarintType + case validationTypeFixed32: + ok = wtyp == protowire.Fixed32Type + case validationTypeFixed64: + ok = wtyp == protowire.Fixed64Type + case validationTypeBytes, validationTypeUTF8String, validationTypeMessage: + ok = wtyp == protowire.BytesType + case validationTypeGroup: + ok = wtyp == protowire.StartGroupType + } + if ok { + st.requiredMask |= vi.requiredBit + } + } + + switch wtyp { + case protowire.VarintType: + if len(b) >= 10 { + switch { + case b[0] < 0x80: + b = b[1:] + case b[1] < 0x80: + b = b[2:] + case b[2] < 0x80: + b = b[3:] + case b[3] < 0x80: + b = b[4:] + case b[4] < 0x80: + b = b[5:] + case b[5] < 0x80: + b = b[6:] + case b[6] < 0x80: + b = b[7:] + case b[7] < 0x80: + b = b[8:] + case b[8] < 0x80: + b = b[9:] + case b[9] < 0x80 && b[9] < 2: + b = b[10:] + default: + return out, ValidationInvalid + } + } else { + switch { + case len(b) > 0 && b[0] < 0x80: + b = b[1:] + case len(b) > 1 && b[1] < 0x80: + b = b[2:] + case len(b) > 2 && b[2] < 0x80: + b = b[3:] + case len(b) > 3 && b[3] < 0x80: + b = b[4:] + case len(b) > 4 && b[4] < 0x80: + b = b[5:] + case len(b) > 5 && b[5] < 0x80: + b = b[6:] + case len(b) > 6 && b[6] < 0x80: + b = b[7:] + case len(b) > 7 && b[7] < 0x80: + b = b[8:] + case len(b) > 8 && b[8] < 0x80: + b = b[9:] + case len(b) > 9 && b[9] < 2: + b = b[10:] + default: + return out, ValidationInvalid + } + } + continue State + case protowire.BytesType: + var size uint64 + if len(b) >= 1 && b[0] < 0x80 { + size = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + size = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + size, n = protowire.ConsumeVarint(b) + if n < 0 { + return out, ValidationInvalid + } + b = b[n:] + } + if size > uint64(len(b)) { + return out, ValidationInvalid + } + v := b[:size] + b = b[size:] + switch vi.typ { + case validationTypeMessage: + if vi.mi == nil { + return out, ValidationUnknown + } + vi.mi.init() + fallthrough + case validationTypeMap: + if vi.mi != nil { + vi.mi.init() + } + states = append(states, validationState{ + typ: vi.typ, + keyType: vi.keyType, + valType: vi.valType, + mi: vi.mi, + tail: b, + }) + b = v + continue State + case validationTypeRepeatedVarint: + // Packed field. + for len(v) > 0 { + _, n := protowire.ConsumeVarint(v) + if n < 0 { + return out, ValidationInvalid + } + v = v[n:] + } + case validationTypeRepeatedFixed32: + // Packed field. + if len(v)%4 != 0 { + return out, ValidationInvalid + } + case validationTypeRepeatedFixed64: + // Packed field. + if len(v)%8 != 0 { + return out, ValidationInvalid + } + case validationTypeUTF8String: + if !utf8.Valid(v) { + return out, ValidationInvalid + } + } + case protowire.Fixed32Type: + if len(b) < 4 { + return out, ValidationInvalid + } + b = b[4:] + case protowire.Fixed64Type: + if len(b) < 8 { + return out, ValidationInvalid + } + b = b[8:] + case protowire.StartGroupType: + switch { + case vi.typ == validationTypeGroup: + if vi.mi == nil { + return out, ValidationUnknown + } + vi.mi.init() + states = append(states, validationState{ + typ: validationTypeGroup, + mi: vi.mi, + endGroup: num, + }) + continue State + case flags.ProtoLegacy && vi.typ == validationTypeMessageSetItem: + typeid, v, n, err := messageset.ConsumeFieldValue(b, false) + if err != nil { + return out, ValidationInvalid + } + xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), typeid) + switch { + case err == protoregistry.NotFound: + b = b[n:] + case err != nil: + return out, ValidationUnknown + default: + xvi := getExtensionFieldInfo(xt).validation + if xvi.mi != nil { + xvi.mi.init() + } + states = append(states, validationState{ + typ: xvi.typ, + mi: xvi.mi, + tail: b[n:], + }) + b = v + continue State + } + default: + n := protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return out, ValidationInvalid + } + b = b[n:] + } + default: + return out, ValidationInvalid + } + } + if st.endGroup != 0 { + return out, ValidationInvalid + } + if len(b) != 0 { + return out, ValidationInvalid + } + b = st.tail + PopState: + numRequiredFields := 0 + switch st.typ { + case validationTypeMessage, validationTypeGroup: + numRequiredFields = int(st.mi.numRequiredFields) + case validationTypeMap: + // If this is a map field with a message value that contains + // required fields, require that the value be present. + if st.mi != nil && st.mi.numRequiredFields > 0 { + numRequiredFields = 1 + } + } + // If there are more than 64 required fields, this check will + // always fail and we will report that the message is potentially + // uninitialized. + if numRequiredFields > 0 && bits.OnesCount64(st.requiredMask) != numRequiredFields { + initialized = false + } + states = states[:len(states)-1] + } + out.n = start - len(b) + if initialized { + out.initialized = true + } + return out, ValidationValid +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/weak.go b/vendor/google.golang.org/protobuf/internal/impl/weak.go new file mode 100644 index 000000000..eb79a7ba9 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/weak.go @@ -0,0 +1,74 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// weakFields adds methods to the exported WeakFields type for internal use. +// +// The exported type is an alias to an unnamed type, so methods can't be +// defined directly on it. +type weakFields WeakFields + +func (w weakFields) get(num protoreflect.FieldNumber) (protoreflect.ProtoMessage, bool) { + m, ok := w[int32(num)] + return m, ok +} + +func (w *weakFields) set(num protoreflect.FieldNumber, m protoreflect.ProtoMessage) { + if *w == nil { + *w = make(weakFields) + } + (*w)[int32(num)] = m +} + +func (w *weakFields) clear(num protoreflect.FieldNumber) { + delete(*w, int32(num)) +} + +func (Export) HasWeak(w WeakFields, num protoreflect.FieldNumber) bool { + _, ok := w[int32(num)] + return ok +} + +func (Export) ClearWeak(w *WeakFields, num protoreflect.FieldNumber) { + delete(*w, int32(num)) +} + +func (Export) GetWeak(w WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName) protoreflect.ProtoMessage { + if m, ok := w[int32(num)]; ok { + return m + } + mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) + if mt == nil { + panic(fmt.Sprintf("message %v for weak field is not linked in", name)) + } + return mt.Zero().Interface() +} + +func (Export) SetWeak(w *WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName, m protoreflect.ProtoMessage) { + if m != nil { + mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) + if mt == nil { + panic(fmt.Sprintf("message %v for weak field is not linked in", name)) + } + if mt != m.ProtoReflect().Type() { + panic(fmt.Sprintf("invalid message type for weak field: got %T, want %T", m, mt.Zero().Interface())) + } + } + if m == nil || !m.ProtoReflect().IsValid() { + delete(*w, int32(num)) + return + } + if *w == nil { + *w = make(weakFields) + } + (*w)[int32(num)] = m +} diff --git a/vendor/google.golang.org/protobuf/internal/order/order.go b/vendor/google.golang.org/protobuf/internal/order/order.go new file mode 100644 index 000000000..dea522e12 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/order/order.go @@ -0,0 +1,89 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package order + +import ( + "google.golang.org/protobuf/reflect/protoreflect" +) + +// FieldOrder specifies the ordering to visit message fields. +// It is a function that reports whether x is ordered before y. +type FieldOrder func(x, y protoreflect.FieldDescriptor) bool + +var ( + // AnyFieldOrder specifies no specific field ordering. + AnyFieldOrder FieldOrder = nil + + // LegacyFieldOrder sorts fields in the same ordering as emitted by + // wire serialization in the github.com/golang/protobuf implementation. + LegacyFieldOrder FieldOrder = func(x, y protoreflect.FieldDescriptor) bool { + ox, oy := x.ContainingOneof(), y.ContainingOneof() + inOneof := func(od protoreflect.OneofDescriptor) bool { + return od != nil && !od.IsSynthetic() + } + + // Extension fields sort before non-extension fields. + if x.IsExtension() != y.IsExtension() { + return x.IsExtension() && !y.IsExtension() + } + // Fields not within a oneof sort before those within a oneof. + if inOneof(ox) != inOneof(oy) { + return !inOneof(ox) && inOneof(oy) + } + // Fields in disjoint oneof sets are sorted by declaration index. + if inOneof(ox) && inOneof(oy) && ox != oy { + return ox.Index() < oy.Index() + } + // Fields sorted by field number. + return x.Number() < y.Number() + } + + // NumberFieldOrder sorts fields by their field number. + NumberFieldOrder FieldOrder = func(x, y protoreflect.FieldDescriptor) bool { + return x.Number() < y.Number() + } + + // IndexNameFieldOrder sorts non-extension fields before extension fields. + // Non-extensions are sorted according to their declaration index. + // Extensions are sorted according to their full name. + IndexNameFieldOrder FieldOrder = func(x, y protoreflect.FieldDescriptor) bool { + // Non-extension fields sort before extension fields. + if x.IsExtension() != y.IsExtension() { + return !x.IsExtension() && y.IsExtension() + } + // Extensions sorted by fullname. + if x.IsExtension() && y.IsExtension() { + return x.FullName() < y.FullName() + } + // Non-extensions sorted by declaration index. + return x.Index() < y.Index() + } +) + +// KeyOrder specifies the ordering to visit map entries. +// It is a function that reports whether x is ordered before y. +type KeyOrder func(x, y protoreflect.MapKey) bool + +var ( + // AnyKeyOrder specifies no specific key ordering. + AnyKeyOrder KeyOrder = nil + + // GenericKeyOrder sorts false before true, numeric keys in ascending order, + // and strings in lexicographical ordering according to UTF-8 codepoints. + GenericKeyOrder KeyOrder = func(x, y protoreflect.MapKey) bool { + switch x.Interface().(type) { + case bool: + return !x.Bool() && y.Bool() + case int32, int64: + return x.Int() < y.Int() + case uint32, uint64: + return x.Uint() < y.Uint() + case string: + return x.String() < y.String() + default: + panic("invalid map key type") + } + } +) diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go new file mode 100644 index 000000000..a1f09162d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/order/range.go @@ -0,0 +1,115 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package order provides ordered access to messages and maps. +package order + +import ( + "sort" + "sync" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +type messageField struct { + fd protoreflect.FieldDescriptor + v protoreflect.Value +} + +var messageFieldPool = sync.Pool{ + New: func() any { return new([]messageField) }, +} + +type ( + // FieldRnger is an interface for visiting all fields in a message. + // The protoreflect.Message type implements this interface. + FieldRanger interface{ Range(VisitField) } + // VisitField is called every time a message field is visited. + VisitField = func(protoreflect.FieldDescriptor, protoreflect.Value) bool +) + +// RangeFields iterates over the fields of fs according to the specified order. +func RangeFields(fs FieldRanger, less FieldOrder, fn VisitField) { + if less == nil { + fs.Range(fn) + return + } + + // Obtain a pre-allocated scratch buffer. + p := messageFieldPool.Get().(*[]messageField) + fields := (*p)[:0] + defer func() { + if cap(fields) < 1024 { + *p = fields + messageFieldPool.Put(p) + } + }() + + // Collect all fields in the message and sort them. + fs.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + fields = append(fields, messageField{fd, v}) + return true + }) + sort.Slice(fields, func(i, j int) bool { + return less(fields[i].fd, fields[j].fd) + }) + + // Visit the fields in the specified ordering. + for _, f := range fields { + if !fn(f.fd, f.v) { + return + } + } +} + +type mapEntry struct { + k protoreflect.MapKey + v protoreflect.Value +} + +var mapEntryPool = sync.Pool{ + New: func() any { return new([]mapEntry) }, +} + +type ( + // EntryRanger is an interface for visiting all fields in a message. + // The protoreflect.Map type implements this interface. + EntryRanger interface{ Range(VisitEntry) } + // VisitEntry is called every time a map entry is visited. + VisitEntry = func(protoreflect.MapKey, protoreflect.Value) bool +) + +// RangeEntries iterates over the entries of es according to the specified order. +func RangeEntries(es EntryRanger, less KeyOrder, fn VisitEntry) { + if less == nil { + es.Range(fn) + return + } + + // Obtain a pre-allocated scratch buffer. + p := mapEntryPool.Get().(*[]mapEntry) + entries := (*p)[:0] + defer func() { + if cap(entries) < 1024 { + *p = entries + mapEntryPool.Put(p) + } + }() + + // Collect all entries in the map and sort them. + es.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + entries = append(entries, mapEntry{k, v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + return less(entries[i].k, entries[j].k) + }) + + // Visit the entries in the specified ordering. + for _, e := range entries { + if !fn(e.k, e.v) { + return + } + } +} diff --git a/vendor/google.golang.org/protobuf/internal/pragma/pragma.go b/vendor/google.golang.org/protobuf/internal/pragma/pragma.go new file mode 100644 index 000000000..49dc4fcd9 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/pragma/pragma.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pragma provides types that can be embedded into a struct to +// statically enforce or prevent certain language properties. +package pragma + +import "sync" + +// NoUnkeyedLiterals can be embedded in a struct to prevent unkeyed literals. +type NoUnkeyedLiterals struct{} + +// DoNotImplement can be embedded in an interface to prevent trivial +// implementations of the interface. +// +// This is useful to prevent unauthorized implementations of an interface +// so that it can be extended in the future for any protobuf language changes. +type DoNotImplement interface{ ProtoInternal(DoNotImplement) } + +// DoNotCompare can be embedded in a struct to prevent comparability. +type DoNotCompare [0]func() + +// DoNotCopy can be embedded in a struct to help prevent shallow copies. +// This does not rely on a Go language feature, but rather a special case +// within the vet checker. +// +// See https://golang.org/issues/8005. +type DoNotCopy [0]sync.Mutex diff --git a/vendor/google.golang.org/protobuf/internal/set/ints.go b/vendor/google.golang.org/protobuf/internal/set/ints.go new file mode 100644 index 000000000..d3d7f89ab --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/set/ints.go @@ -0,0 +1,58 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package set provides simple set data structures for uint64s. +package set + +import "math/bits" + +// int64s represents a set of integers within the range of 0..63. +type int64s uint64 + +func (bs *int64s) Len() int { + return bits.OnesCount64(uint64(*bs)) +} +func (bs *int64s) Has(n uint64) bool { + return uint64(*bs)&(uint64(1)< 0 +} +func (bs *int64s) Set(n uint64) { + *(*uint64)(bs) |= uint64(1) << n +} +func (bs *int64s) Clear(n uint64) { + *(*uint64)(bs) &^= uint64(1) << n +} + +// Ints represents a set of integers within the range of 0..math.MaxUint64. +type Ints struct { + lo int64s + hi map[uint64]struct{} +} + +func (bs *Ints) Len() int { + return bs.lo.Len() + len(bs.hi) +} +func (bs *Ints) Has(n uint64) bool { + if n < 64 { + return bs.lo.Has(n) + } + _, ok := bs.hi[n] + return ok +} +func (bs *Ints) Set(n uint64) { + if n < 64 { + bs.lo.Set(n) + return + } + if bs.hi == nil { + bs.hi = make(map[uint64]struct{}) + } + bs.hi[n] = struct{}{} +} +func (bs *Ints) Clear(n uint64) { + if n < 64 { + bs.lo.Clear(n) + return + } + delete(bs.hi, n) +} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings.go b/vendor/google.golang.org/protobuf/internal/strs/strings.go new file mode 100644 index 000000000..a6e7df244 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/strs/strings.go @@ -0,0 +1,196 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package strs provides string manipulation functionality specific to protobuf. +package strs + +import ( + "go/token" + "strings" + "unicode" + "unicode/utf8" + + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// EnforceUTF8 reports whether to enforce strict UTF-8 validation. +func EnforceUTF8(fd protoreflect.FieldDescriptor) bool { + if flags.ProtoLegacy || fd.Syntax() == protoreflect.Editions { + if fd, ok := fd.(interface{ EnforceUTF8() bool }); ok { + return fd.EnforceUTF8() + } + } + return fd.Syntax() == protoreflect.Proto3 +} + +// GoCamelCase camel-cases a protobuf name for use as a Go identifier. +// +// If there is an interior underscore followed by a lower case letter, +// drop the underscore and convert the letter to upper case. +func GoCamelCase(s string) string { + // Invariant: if the next letter is lower case, it must be converted + // to upper case. + // That is, we process a word at a time, where words are marked by _ or + // upper case letter. Digits are treated as words. + var b []byte + for i := 0; i < len(s); i++ { + c := s[i] + switch { + case c == '.' && i+1 < len(s) && isASCIILower(s[i+1]): + // Skip over '.' in ".{{lowercase}}". + case c == '.': + b = append(b, '_') // convert '.' to '_' + case c == '_' && (i == 0 || s[i-1] == '.'): + // Convert initial '_' to ensure we start with a capital letter. + // Do the same for '_' after '.' to match historic behavior. + b = append(b, 'X') // convert '_' to 'X' + case c == '_' && i+1 < len(s) && isASCIILower(s[i+1]): + // Skip over '_' in "_{{lowercase}}". + case isASCIIDigit(c): + b = append(b, c) + default: + // Assume we have a letter now - if not, it's a bogus identifier. + // The next word is a sequence of characters that must start upper case. + if isASCIILower(c) { + c -= 'a' - 'A' // convert lowercase to uppercase + } + b = append(b, c) + + // Accept lower case sequence that follows. + for ; i+1 < len(s) && isASCIILower(s[i+1]); i++ { + b = append(b, s[i+1]) + } + } + } + return string(b) +} + +// GoSanitized converts a string to a valid Go identifier. +func GoSanitized(s string) string { + // Sanitize the input to the set of valid characters, + // which must be '_' or be in the Unicode L or N categories. + s = strings.Map(func(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) { + return r + } + return '_' + }, s) + + // Prepend '_' in the event of a Go keyword conflict or if + // the identifier is invalid (does not start in the Unicode L category). + r, _ := utf8.DecodeRuneInString(s) + if token.Lookup(s).IsKeyword() || !unicode.IsLetter(r) { + return "_" + s + } + return s +} + +// JSONCamelCase converts a snake_case identifier to a camelCase identifier, +// according to the protobuf JSON specification. +func JSONCamelCase(s string) string { + var b []byte + var wasUnderscore bool + for i := 0; i < len(s); i++ { // proto identifiers are always ASCII + c := s[i] + if c != '_' { + if wasUnderscore && isASCIILower(c) { + c -= 'a' - 'A' // convert to uppercase + } + b = append(b, c) + } + wasUnderscore = c == '_' + } + return string(b) +} + +// JSONSnakeCase converts a camelCase identifier to a snake_case identifier, +// according to the protobuf JSON specification. +func JSONSnakeCase(s string) string { + var b []byte + for i := 0; i < len(s); i++ { // proto identifiers are always ASCII + c := s[i] + if isASCIIUpper(c) { + b = append(b, '_') + c += 'a' - 'A' // convert to lowercase + } + b = append(b, c) + } + return string(b) +} + +// MapEntryName derives the name of the map entry message given the field name. +// See protoc v3.8.0: src/google/protobuf/descriptor.cc:254-276,6057 +func MapEntryName(s string) string { + var b []byte + upperNext := true + for _, c := range s { + switch { + case c == '_': + upperNext = true + case upperNext: + b = append(b, byte(unicode.ToUpper(c))) + upperNext = false + default: + b = append(b, byte(c)) + } + } + b = append(b, "Entry"...) + return string(b) +} + +// EnumValueName derives the camel-cased enum value name. +// See protoc v3.8.0: src/google/protobuf/descriptor.cc:297-313 +func EnumValueName(s string) string { + var b []byte + upperNext := true + for _, c := range s { + switch { + case c == '_': + upperNext = true + case upperNext: + b = append(b, byte(unicode.ToUpper(c))) + upperNext = false + default: + b = append(b, byte(unicode.ToLower(c))) + upperNext = false + } + } + return string(b) +} + +// TrimEnumPrefix trims the enum name prefix from an enum value name, +// where the prefix is all lowercase without underscores. +// See protoc v3.8.0: src/google/protobuf/descriptor.cc:330-375 +func TrimEnumPrefix(s, prefix string) string { + s0 := s // original input + for len(s) > 0 && len(prefix) > 0 { + if s[0] == '_' { + s = s[1:] + continue + } + if unicode.ToLower(rune(s[0])) != rune(prefix[0]) { + return s0 // no prefix match + } + s, prefix = s[1:], prefix[1:] + } + if len(prefix) > 0 { + return s0 // no prefix match + } + s = strings.TrimLeft(s, "_") + if len(s) == 0 { + return s0 // avoid returning empty string + } + return s +} + +func isASCIILower(c byte) bool { + return 'a' <= c && c <= 'z' +} +func isASCIIUpper(c byte) bool { + return 'A' <= c && c <= 'Z' +} +func isASCIIDigit(c byte) bool { + return '0' <= c && c <= '9' +} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go new file mode 100644 index 000000000..a1f6f3338 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go @@ -0,0 +1,28 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build purego || appengine +// +build purego appengine + +package strs + +import pref "google.golang.org/protobuf/reflect/protoreflect" + +func UnsafeString(b []byte) string { + return string(b) +} + +func UnsafeBytes(s string) []byte { + return []byte(s) +} + +type Builder struct{} + +func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { + return prefix.Append(name) +} + +func (*Builder) MakeString(b []byte) string { + return string(b) +} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go new file mode 100644 index 000000000..a008acd09 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go @@ -0,0 +1,95 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego && !appengine && !go1.21 +// +build !purego,!appengine,!go1.21 + +package strs + +import ( + "unsafe" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +type ( + stringHeader struct { + Data unsafe.Pointer + Len int + } + sliceHeader struct { + Data unsafe.Pointer + Len int + Cap int + } +) + +// UnsafeString returns an unsafe string reference of b. +// The caller must treat the input slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user +// unless the input slice is provably immutable. +func UnsafeString(b []byte) (s string) { + src := (*sliceHeader)(unsafe.Pointer(&b)) + dst := (*stringHeader)(unsafe.Pointer(&s)) + dst.Data = src.Data + dst.Len = src.Len + return s +} + +// UnsafeBytes returns an unsafe bytes slice reference of s. +// The caller must treat returned slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user. +func UnsafeBytes(s string) (b []byte) { + src := (*stringHeader)(unsafe.Pointer(&s)) + dst := (*sliceHeader)(unsafe.Pointer(&b)) + dst.Data = src.Data + dst.Len = src.Len + dst.Cap = src.Len + return b +} + +// Builder builds a set of strings with shared lifetime. +// This differs from strings.Builder, which is for building a single string. +type Builder struct { + buf []byte +} + +// AppendFullName is equivalent to protoreflect.FullName.Append, +// but optimized for large batches where each name has a shared lifetime. +func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName { + n := len(prefix) + len(".") + len(name) + if len(prefix) == 0 { + n -= len(".") + } + sb.grow(n) + sb.buf = append(sb.buf, prefix...) + sb.buf = append(sb.buf, '.') + sb.buf = append(sb.buf, name...) + return protoreflect.FullName(sb.last(n)) +} + +// MakeString is equivalent to string(b), but optimized for large batches +// with a shared lifetime. +func (sb *Builder) MakeString(b []byte) string { + sb.grow(len(b)) + sb.buf = append(sb.buf, b...) + return sb.last(len(b)) +} + +func (sb *Builder) grow(n int) { + if cap(sb.buf)-len(sb.buf) >= n { + return + } + + // Unlike strings.Builder, we do not need to copy over the contents + // of the old buffer since our builder provides no API for + // retrieving previously created strings. + sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n)) +} + +func (sb *Builder) last(n int) string { + return UnsafeString(sb.buf[len(sb.buf)-n:]) +} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go new file mode 100644 index 000000000..60166f2ba --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go @@ -0,0 +1,74 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego && !appengine && go1.21 +// +build !purego,!appengine,go1.21 + +package strs + +import ( + "unsafe" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +// UnsafeString returns an unsafe string reference of b. +// The caller must treat the input slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user +// unless the input slice is provably immutable. +func UnsafeString(b []byte) string { + return unsafe.String(unsafe.SliceData(b), len(b)) +} + +// UnsafeBytes returns an unsafe bytes slice reference of s. +// The caller must treat returned slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user. +func UnsafeBytes(s string) []byte { + return unsafe.Slice(unsafe.StringData(s), len(s)) +} + +// Builder builds a set of strings with shared lifetime. +// This differs from strings.Builder, which is for building a single string. +type Builder struct { + buf []byte +} + +// AppendFullName is equivalent to protoreflect.FullName.Append, +// but optimized for large batches where each name has a shared lifetime. +func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName { + n := len(prefix) + len(".") + len(name) + if len(prefix) == 0 { + n -= len(".") + } + sb.grow(n) + sb.buf = append(sb.buf, prefix...) + sb.buf = append(sb.buf, '.') + sb.buf = append(sb.buf, name...) + return protoreflect.FullName(sb.last(n)) +} + +// MakeString is equivalent to string(b), but optimized for large batches +// with a shared lifetime. +func (sb *Builder) MakeString(b []byte) string { + sb.grow(len(b)) + sb.buf = append(sb.buf, b...) + return sb.last(len(b)) +} + +func (sb *Builder) grow(n int) { + if cap(sb.buf)-len(sb.buf) >= n { + return + } + + // Unlike strings.Builder, we do not need to copy over the contents + // of the old buffer since our builder provides no API for + // retrieving previously created strings. + sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n)) +} + +func (sb *Builder) last(n int) string { + return UnsafeString(sb.buf[len(sb.buf)-n:]) +} diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go new file mode 100644 index 000000000..dbbf1f686 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -0,0 +1,79 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package version records versioning information about this module. +package version + +import ( + "fmt" + "strings" +) + +// These constants determine the current version of this module. +// +// For our release process, we enforce the following rules: +// - Tagged releases use a tag that is identical to String. +// - Tagged releases never reference a commit where the String +// contains "devel". +// - The set of all commits in this repository where String +// does not contain "devel" must have a unique String. +// +// Steps for tagging a new release: +// +// 1. Create a new CL. +// +// 2. Update Minor, Patch, and/or PreRelease as necessary. +// PreRelease must not contain the string "devel". +// +// 3. Since the last released minor version, have there been any changes to +// generator that relies on new functionality in the runtime? +// If yes, then increment RequiredGenerated. +// +// 4. Since the last released minor version, have there been any changes to +// the runtime that removes support for old .pb.go source code? +// If yes, then increment SupportMinimum. +// +// 5. Send out the CL for review and submit it. +// Note that the next CL in step 8 must be submitted after this CL +// without any other CLs in-between. +// +// 6. Tag a new version, where the tag is is the current String. +// +// 7. Write release notes for all notable changes +// between this release and the last release. +// +// 8. Create a new CL. +// +// 9. Update PreRelease to include the string "devel". +// For example: "" -> "devel" or "rc.1" -> "rc.1.devel" +// +// 10. Send out the CL for review and submit it. +const ( + Major = 1 + Minor = 34 + Patch = 2 + PreRelease = "" +) + +// String formats the version string for this module in semver format. +// +// Examples: +// +// v1.20.1 +// v1.21.0-rc.1 +func String() string { + v := fmt.Sprintf("v%d.%d.%d", Major, Minor, Patch) + if PreRelease != "" { + v += "-" + PreRelease + + // TODO: Add metadata about the commit or build hash. + // See https://golang.org/issue/29814 + // See https://golang.org/issue/33533 + var metadata string + if strings.Contains(PreRelease, "devel") && metadata != "" { + v += "+" + metadata + } + } + return v +} diff --git a/vendor/google.golang.org/protobuf/proto/checkinit.go b/vendor/google.golang.org/protobuf/proto/checkinit.go new file mode 100644 index 000000000..3e9a6a2f6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/checkinit.go @@ -0,0 +1,71 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +// CheckInitialized returns an error if any required fields in m are not set. +func CheckInitialized(m Message) error { + // Treat a nil message interface as an "untyped" empty message, + // which we assume to have no required fields. + if m == nil { + return nil + } + + return checkInitialized(m.ProtoReflect()) +} + +// CheckInitialized returns an error if any required fields in m are not set. +func checkInitialized(m protoreflect.Message) error { + if methods := protoMethods(m); methods != nil && methods.CheckInitialized != nil { + _, err := methods.CheckInitialized(protoiface.CheckInitializedInput{ + Message: m, + }) + return err + } + return checkInitializedSlow(m) +} + +func checkInitializedSlow(m protoreflect.Message) error { + md := m.Descriptor() + fds := md.Fields() + for i, nums := 0, md.RequiredNumbers(); i < nums.Len(); i++ { + fd := fds.ByNumber(nums.Get(i)) + if !m.Has(fd) { + return errors.RequiredNotSet(string(fd.FullName())) + } + } + var err error + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + switch { + case fd.IsList(): + if fd.Message() == nil { + return true + } + for i, list := 0, v.List(); i < list.Len() && err == nil; i++ { + err = checkInitialized(list.Get(i).Message()) + } + case fd.IsMap(): + if fd.MapValue().Message() == nil { + return true + } + v.Map().Range(func(key protoreflect.MapKey, v protoreflect.Value) bool { + err = checkInitialized(v.Message()) + return err == nil + }) + default: + if fd.Message() == nil { + return true + } + err = checkInitialized(v.Message()) + } + return err == nil + }) + return err +} diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go new file mode 100644 index 000000000..d75a6534c --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -0,0 +1,296 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" +) + +// UnmarshalOptions configures the unmarshaler. +// +// Example usage: +// +// err := UnmarshalOptions{DiscardUnknown: true}.Unmarshal(b, m) +type UnmarshalOptions struct { + pragma.NoUnkeyedLiterals + + // Merge merges the input into the destination message. + // The default behavior is to always reset the message before unmarshaling, + // unless Merge is specified. + Merge bool + + // AllowPartial accepts input for messages that will result in missing + // required fields. If AllowPartial is false (the default), Unmarshal will + // return an error if there are any missing required fields. + AllowPartial bool + + // If DiscardUnknown is set, unknown fields are ignored. + DiscardUnknown bool + + // Resolver is used for looking up types when unmarshaling extension fields. + // If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) + FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) + } + + // RecursionLimit limits how deeply messages may be nested. + // If zero, a default limit is applied. + RecursionLimit int +} + +// Unmarshal parses the wire-format message in b and places the result in m. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +// +// See the [UnmarshalOptions] type if you need more control. +func Unmarshal(b []byte, m Message) error { + _, err := UnmarshalOptions{RecursionLimit: protowire.DefaultRecursionLimit}.unmarshal(b, m.ProtoReflect()) + return err +} + +// Unmarshal parses the wire-format message in b and places the result in m. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } + _, err := o.unmarshal(b, m.ProtoReflect()) + return err +} + +// UnmarshalState parses a wire-format message and places the result in m. +// +// This method permits fine-grained control over the unmarshaler. +// Most users should use [Unmarshal] instead. +func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } + return o.unmarshal(in.Buf, in.Message) +} + +// unmarshal is a centralized function that all unmarshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for unmarshal that do not go through this. +func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out protoiface.UnmarshalOutput, err error) { + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + if !o.Merge { + Reset(m.Interface()) + } + allowPartial := o.AllowPartial + o.Merge = true + o.AllowPartial = true + methods := protoMethods(m) + if methods != nil && methods.Unmarshal != nil && + !(o.DiscardUnknown && methods.Flags&protoiface.SupportUnmarshalDiscardUnknown == 0) { + in := protoiface.UnmarshalInput{ + Message: m, + Buf: b, + Resolver: o.Resolver, + Depth: o.RecursionLimit, + } + if o.DiscardUnknown { + in.Flags |= protoiface.UnmarshalDiscardUnknown + } + out, err = methods.Unmarshal(in) + } else { + o.RecursionLimit-- + if o.RecursionLimit < 0 { + return out, errors.New("exceeded max recursion depth") + } + err = o.unmarshalMessageSlow(b, m) + } + if err != nil { + return out, err + } + if allowPartial || (out.Flags&protoiface.UnmarshalInitialized != 0) { + return out, nil + } + return out, checkInitialized(m) +} + +func (o UnmarshalOptions) unmarshalMessage(b []byte, m protoreflect.Message) error { + _, err := o.unmarshal(b, m) + return err +} + +func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) error { + md := m.Descriptor() + if messageset.IsMessageSet(md) { + return o.unmarshalMessageSet(b, m) + } + fields := md.Fields() + for len(b) > 0 { + // Parse the tag (field number and wire type). + num, wtyp, tagLen := protowire.ConsumeTag(b) + if tagLen < 0 { + return errDecode + } + if num > protowire.MaxValidNumber { + return errDecode + } + + // Find the field descriptor for this field number. + fd := fields.ByNumber(num) + if fd == nil && md.ExtensionRanges().Has(num) { + extType, err := o.Resolver.FindExtensionByNumber(md.FullName(), num) + if err != nil && err != protoregistry.NotFound { + return errors.New("%v: unable to resolve extension %v: %v", md.FullName(), num, err) + } + if extType != nil { + fd = extType.TypeDescriptor() + } + } + var err error + if fd == nil { + err = errUnknown + } else if flags.ProtoLegacy { + if fd.IsWeak() && fd.Message().IsPlaceholder() { + err = errUnknown // weak referent is not linked in + } + } + + // Parse the field value. + var valLen int + switch { + case err != nil: + case fd.IsList(): + valLen, err = o.unmarshalList(b[tagLen:], wtyp, m.Mutable(fd).List(), fd) + case fd.IsMap(): + valLen, err = o.unmarshalMap(b[tagLen:], wtyp, m.Mutable(fd).Map(), fd) + default: + valLen, err = o.unmarshalSingular(b[tagLen:], wtyp, m, fd) + } + if err != nil { + if err != errUnknown { + return err + } + valLen = protowire.ConsumeFieldValue(num, wtyp, b[tagLen:]) + if valLen < 0 { + return errDecode + } + if !o.DiscardUnknown { + m.SetUnknown(append(m.GetUnknown(), b[:tagLen+valLen]...)) + } + } + b = b[tagLen+valLen:] + } + return nil +} + +func (o UnmarshalOptions) unmarshalSingular(b []byte, wtyp protowire.Type, m protoreflect.Message, fd protoreflect.FieldDescriptor) (n int, err error) { + v, n, err := o.unmarshalScalar(b, wtyp, fd) + if err != nil { + return 0, err + } + switch fd.Kind() { + case protoreflect.GroupKind, protoreflect.MessageKind: + m2 := m.Mutable(fd).Message() + if err := o.unmarshalMessage(v.Bytes(), m2); err != nil { + return n, err + } + default: + // Non-message scalars replace the previous value. + m.Set(fd, v) + } + return n, nil +} + +func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv protoreflect.Map, fd protoreflect.FieldDescriptor) (n int, err error) { + if wtyp != protowire.BytesType { + return 0, errUnknown + } + b, n = protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + var ( + keyField = fd.MapKey() + valField = fd.MapValue() + key protoreflect.Value + val protoreflect.Value + haveKey bool + haveVal bool + ) + switch valField.Kind() { + case protoreflect.GroupKind, protoreflect.MessageKind: + val = mapv.NewValue() + } + // Map entries are represented as a two-element message with fields + // containing the key and value. + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return 0, errDecode + } + if num > protowire.MaxValidNumber { + return 0, errDecode + } + b = b[n:] + err = errUnknown + switch num { + case genid.MapEntry_Key_field_number: + key, n, err = o.unmarshalScalar(b, wtyp, keyField) + if err != nil { + break + } + haveKey = true + case genid.MapEntry_Value_field_number: + var v protoreflect.Value + v, n, err = o.unmarshalScalar(b, wtyp, valField) + if err != nil { + break + } + switch valField.Kind() { + case protoreflect.GroupKind, protoreflect.MessageKind: + if err := o.unmarshalMessage(v.Bytes(), val.Message()); err != nil { + return 0, err + } + default: + val = v + } + haveVal = true + } + if err == errUnknown { + n = protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return 0, errDecode + } + } else if err != nil { + return 0, err + } + b = b[n:] + } + // Every map entry should have entries for key and value, but this is not strictly required. + if !haveKey { + key = keyField.Default() + } + if !haveVal { + switch valField.Kind() { + case protoreflect.GroupKind, protoreflect.MessageKind: + default: + val = valField.Default() + } + } + mapv.Set(key.MapKey(), val) + return n, nil +} + +// errUnknown is used internally to indicate fields which should be added +// to the unknown field set of a message. It is never returned from an exported +// function. +var errUnknown = errors.New("BUG: internal error (unknown)") + +var errDecode = errors.New("cannot parse invalid wire-format data") diff --git a/vendor/google.golang.org/protobuf/proto/decode_gen.go b/vendor/google.golang.org/protobuf/proto/decode_gen.go new file mode 100644 index 000000000..301eeb20f --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/decode_gen.go @@ -0,0 +1,603 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package proto + +import ( + "math" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// unmarshalScalar decodes a value of the given kind. +// +// Message values are decoded into a []byte which aliases the input data. +func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd protoreflect.FieldDescriptor) (val protoreflect.Value, n int, err error) { + switch fd.Kind() { + case protoreflect.BoolKind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfBool(protowire.DecodeBool(v)), n, nil + case protoreflect.EnumKind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), n, nil + case protoreflect.Int32Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfInt32(int32(v)), n, nil + case protoreflect.Sint32Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), n, nil + case protoreflect.Uint32Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfUint32(uint32(v)), n, nil + case protoreflect.Int64Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfInt64(int64(v)), n, nil + case protoreflect.Sint64Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), n, nil + case protoreflect.Uint64Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfUint64(v), n, nil + case protoreflect.Sfixed32Kind: + if wtyp != protowire.Fixed32Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfInt32(int32(v)), n, nil + case protoreflect.Fixed32Kind: + if wtyp != protowire.Fixed32Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfUint32(uint32(v)), n, nil + case protoreflect.FloatKind: + if wtyp != protowire.Fixed32Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), n, nil + case protoreflect.Sfixed64Kind: + if wtyp != protowire.Fixed64Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfInt64(int64(v)), n, nil + case protoreflect.Fixed64Kind: + if wtyp != protowire.Fixed64Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfUint64(v), n, nil + case protoreflect.DoubleKind: + if wtyp != protowire.Fixed64Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfFloat64(math.Float64frombits(v)), n, nil + case protoreflect.StringKind: + if wtyp != protowire.BytesType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return val, 0, errDecode + } + if strs.EnforceUTF8(fd) && !utf8.Valid(v) { + return protoreflect.Value{}, 0, errors.InvalidUTF8(string(fd.FullName())) + } + return protoreflect.ValueOfString(string(v)), n, nil + case protoreflect.BytesKind: + if wtyp != protowire.BytesType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), n, nil + case protoreflect.MessageKind: + if wtyp != protowire.BytesType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfBytes(v), n, nil + case protoreflect.GroupKind: + if wtyp != protowire.StartGroupType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeGroup(fd.Number(), b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfBytes(v), n, nil + default: + return val, 0, errUnknown + } +} + +func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list protoreflect.List, fd protoreflect.FieldDescriptor) (n int, err error) { + switch fd.Kind() { + case protoreflect.BoolKind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) + return n, nil + case protoreflect.EnumKind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) + return n, nil + case protoreflect.Int32Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt32(int32(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + return n, nil + case protoreflect.Sint32Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) + return n, nil + case protoreflect.Uint32Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfUint32(uint32(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + return n, nil + case protoreflect.Int64Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt64(int64(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + return n, nil + case protoreflect.Sint64Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) + return n, nil + case protoreflect.Uint64Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfUint64(v)) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfUint64(v)) + return n, nil + case protoreflect.Sfixed32Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed32(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt32(int32(v))) + } + return n, nil + } + if wtyp != protowire.Fixed32Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + return n, nil + case protoreflect.Fixed32Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed32(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfUint32(uint32(v))) + } + return n, nil + } + if wtyp != protowire.Fixed32Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + return n, nil + case protoreflect.FloatKind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed32(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) + } + return n, nil + } + if wtyp != protowire.Fixed32Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) + return n, nil + case protoreflect.Sfixed64Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed64(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt64(int64(v))) + } + return n, nil + } + if wtyp != protowire.Fixed64Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + return n, nil + case protoreflect.Fixed64Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed64(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfUint64(v)) + } + return n, nil + } + if wtyp != protowire.Fixed64Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfUint64(v)) + return n, nil + case protoreflect.DoubleKind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed64(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) + } + return n, nil + } + if wtyp != protowire.Fixed64Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) + return n, nil + case protoreflect.StringKind: + if wtyp != protowire.BytesType { + return 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + if strs.EnforceUTF8(fd) && !utf8.Valid(v) { + return 0, errors.InvalidUTF8(string(fd.FullName())) + } + list.Append(protoreflect.ValueOfString(string(v))) + return n, nil + case protoreflect.BytesKind: + if wtyp != protowire.BytesType { + return 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...))) + return n, nil + case protoreflect.MessageKind: + if wtyp != protowire.BytesType { + return 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + m := list.NewElement() + if err := o.unmarshalMessage(v, m.Message()); err != nil { + return 0, err + } + list.Append(m) + return n, nil + case protoreflect.GroupKind: + if wtyp != protowire.StartGroupType { + return 0, errUnknown + } + v, n := protowire.ConsumeGroup(fd.Number(), b) + if n < 0 { + return 0, errDecode + } + m := list.NewElement() + if err := o.unmarshalMessage(v, m.Message()); err != nil { + return 0, err + } + list.Append(m) + return n, nil + default: + return 0, errUnknown + } +} + +// We append to an empty array rather than a nil []byte to get non-nil zero-length byte slices. +var emptyBuf [0]byte diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go new file mode 100644 index 000000000..80ed16a0c --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/doc.go @@ -0,0 +1,86 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proto provides functions operating on protocol buffer messages. +// +// For documentation on protocol buffers in general, see: +// https://protobuf.dev. +// +// For a tutorial on using protocol buffers with Go, see: +// https://protobuf.dev/getting-started/gotutorial. +// +// For a guide to generated Go protocol buffer code, see: +// https://protobuf.dev/reference/go/go-generated. +// +// # Binary serialization +// +// This package contains functions to convert to and from the wire format, +// an efficient binary serialization of protocol buffers. +// +// - [Size] reports the size of a message in the wire format. +// +// - [Marshal] converts a message to the wire format. +// The [MarshalOptions] type provides more control over wire marshaling. +// +// - [Unmarshal] converts a message from the wire format. +// The [UnmarshalOptions] type provides more control over wire unmarshaling. +// +// # Basic message operations +// +// - [Clone] makes a deep copy of a message. +// +// - [Merge] merges the content of a message into another. +// +// - [Equal] compares two messages. For more control over comparisons +// and detailed reporting of differences, see package +// [google.golang.org/protobuf/testing/protocmp]. +// +// - [Reset] clears the content of a message. +// +// - [CheckInitialized] reports whether all required fields in a message are set. +// +// # Optional scalar constructors +// +// The API for some generated messages represents optional scalar fields +// as pointers to a value. For example, an optional string field has the +// Go type *string. +// +// - [Bool], [Int32], [Int64], [Uint32], [Uint64], [Float32], [Float64], and [String] +// take a value and return a pointer to a new instance of it, +// to simplify construction of optional field values. +// +// Generated enum types usually have an Enum method which performs the +// same operation. +// +// Optional scalar fields are only supported in proto2. +// +// # Extension accessors +// +// - [HasExtension], [GetExtension], [SetExtension], and [ClearExtension] +// access extension field values in a protocol buffer message. +// +// Extension fields are only supported in proto2. +// +// # Related packages +// +// - Package [google.golang.org/protobuf/encoding/protojson] converts messages to +// and from JSON. +// +// - Package [google.golang.org/protobuf/encoding/prototext] converts messages to +// and from the text format. +// +// - Package [google.golang.org/protobuf/reflect/protoreflect] provides a +// reflection interface for protocol buffer data types. +// +// - Package [google.golang.org/protobuf/testing/protocmp] provides features +// to compare protocol buffer messages with the [github.com/google/go-cmp/cmp] +// package. +// +// - Package [google.golang.org/protobuf/types/dynamicpb] provides a dynamic +// message type, suitable for working with messages where the protocol buffer +// type is only known at runtime. +// +// This module contains additional packages for more specialized use cases. +// Consult the individual package documentation for details. +package proto diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go new file mode 100644 index 000000000..1f847bcc3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -0,0 +1,354 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "errors" + "fmt" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/order" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" + + protoerrors "google.golang.org/protobuf/internal/errors" +) + +// MarshalOptions configures the marshaler. +// +// Example usage: +// +// b, err := MarshalOptions{Deterministic: true}.Marshal(m) +type MarshalOptions struct { + pragma.NoUnkeyedLiterals + + // AllowPartial allows messages that have missing required fields to marshal + // without returning an error. If AllowPartial is false (the default), + // Marshal will return an error if there are any missing required fields. + AllowPartial bool + + // Deterministic controls whether the same message will always be + // serialized to the same bytes within the same binary. + // + // Setting this option guarantees that repeated serialization of + // the same message will return the same bytes, and that different + // processes of the same binary (which may be executing on different + // machines) will serialize equal messages to the same bytes. + // It has no effect on the resulting size of the encoded message compared + // to a non-deterministic marshal. + // + // Note that the deterministic serialization is NOT canonical across + // languages. It is not guaranteed to remain stable over time. It is + // unstable across different builds with schema changes due to unknown + // fields. Users who need canonical serialization (e.g., persistent + // storage in a canonical form, fingerprinting, etc.) must define + // their own canonicalization specification and implement their own + // serializer rather than relying on this API. + // + // If deterministic serialization is requested, map entries will be + // sorted by keys in lexographical order. This is an implementation + // detail and subject to change. + Deterministic bool + + // UseCachedSize indicates that the result of a previous Size call + // may be reused. + // + // Setting this option asserts that: + // + // 1. Size has previously been called on this message with identical + // options (except for UseCachedSize itself). + // + // 2. The message and all its submessages have not changed in any + // way since the Size call. + // + // If either of these invariants is violated, + // the results are undefined and may include panics or corrupted output. + // + // Implementations MAY take this option into account to provide + // better performance, but there is no guarantee that they will do so. + // There is absolutely no guarantee that Size followed by Marshal with + // UseCachedSize set will perform equivalently to Marshal alone. + UseCachedSize bool +} + +// flags turns the specified MarshalOptions (user-facing) into +// protoiface.MarshalInputFlags (used internally by the marshaler). +// +// See impl.marshalOptions.Options for the inverse operation. +func (o MarshalOptions) flags() protoiface.MarshalInputFlags { + var flags protoiface.MarshalInputFlags + + // Note: o.AllowPartial is always forced to true by MarshalOptions.marshal, + // which is why it is not a part of MarshalInputFlags. + + if o.Deterministic { + flags |= protoiface.MarshalDeterministic + } + + if o.UseCachedSize { + flags |= protoiface.MarshalUseCachedSize + } + + return flags +} + +// Marshal returns the wire-format encoding of m. +// +// This is the most common entry point for encoding a Protobuf message. +// +// See the [MarshalOptions] type if you need more control. +func Marshal(m Message) ([]byte, error) { + // Treat nil message interface as an empty message; nothing to output. + if m == nil { + return nil, nil + } + + out, err := MarshalOptions{}.marshal(nil, m.ProtoReflect()) + if len(out.Buf) == 0 && err == nil { + out.Buf = emptyBytesForMessage(m) + } + return out.Buf, err +} + +// Marshal returns the wire-format encoding of m. +func (o MarshalOptions) Marshal(m Message) ([]byte, error) { + // Treat nil message interface as an empty message; nothing to output. + if m == nil { + return nil, nil + } + + out, err := o.marshal(nil, m.ProtoReflect()) + if len(out.Buf) == 0 && err == nil { + out.Buf = emptyBytesForMessage(m) + } + return out.Buf, err +} + +// emptyBytesForMessage returns a nil buffer if and only if m is invalid, +// otherwise it returns a non-nil empty buffer. +// +// This is to assist the edge-case where user-code does the following: +// +// m1.OptionalBytes, _ = proto.Marshal(m2) +// +// where they expect the proto2 "optional_bytes" field to be populated +// if any only if m2 is a valid message. +func emptyBytesForMessage(m Message) []byte { + if m == nil || !m.ProtoReflect().IsValid() { + return nil + } + return emptyBuf[:] +} + +// MarshalAppend appends the wire-format encoding of m to b, +// returning the result. +// +// This is a less common entry point than [Marshal], which is only needed if you +// need to supply your own buffers for performance reasons. +func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) { + // Treat nil message interface as an empty message; nothing to append. + if m == nil { + return b, nil + } + + out, err := o.marshal(b, m.ProtoReflect()) + return out.Buf, err +} + +// MarshalState returns the wire-format encoding of a message. +// +// This method permits fine-grained control over the marshaler. +// Most users should use [Marshal] instead. +func (o MarshalOptions) MarshalState(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) { + return o.marshal(in.Buf, in.Message) +} + +// marshal is a centralized function that all marshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for marshal that do not go through this. +func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoiface.MarshalOutput, err error) { + allowPartial := o.AllowPartial + o.AllowPartial = true + if methods := protoMethods(m); methods != nil && methods.Marshal != nil && + !(o.Deterministic && methods.Flags&protoiface.SupportMarshalDeterministic == 0) { + in := protoiface.MarshalInput{ + Message: m, + Buf: b, + Flags: o.flags(), + } + if methods.Size != nil { + sout := methods.Size(protoiface.SizeInput{ + Message: m, + Flags: in.Flags, + }) + if cap(b) < len(b)+sout.Size { + in.Buf = make([]byte, len(b), growcap(cap(b), len(b)+sout.Size)) + copy(in.Buf, b) + } + in.Flags |= protoiface.MarshalUseCachedSize + } + out, err = methods.Marshal(in) + } else { + out.Buf, err = o.marshalMessageSlow(b, m) + } + if err != nil { + var mismatch *protoerrors.SizeMismatchError + if errors.As(err, &mismatch) { + return out, fmt.Errorf("marshaling %s: %v", string(m.Descriptor().FullName()), err) + } + return out, err + } + if allowPartial { + return out, nil + } + return out, checkInitialized(m) +} + +func (o MarshalOptions) marshalMessage(b []byte, m protoreflect.Message) ([]byte, error) { + out, err := o.marshal(b, m) + return out.Buf, err +} + +// growcap scales up the capacity of a slice. +// +// Given a slice with a current capacity of oldcap and a desired +// capacity of wantcap, growcap returns a new capacity >= wantcap. +// +// The algorithm is mostly identical to the one used by append as of Go 1.14. +func growcap(oldcap, wantcap int) (newcap int) { + if wantcap > oldcap*2 { + newcap = wantcap + } else if oldcap < 1024 { + // The Go 1.14 runtime takes this case when len(s) < 1024, + // not when cap(s) < 1024. The difference doesn't seem + // significant here. + newcap = oldcap * 2 + } else { + newcap = oldcap + for 0 < newcap && newcap < wantcap { + newcap += newcap / 4 + } + if newcap <= 0 { + newcap = wantcap + } + } + return newcap +} + +func (o MarshalOptions) marshalMessageSlow(b []byte, m protoreflect.Message) ([]byte, error) { + if messageset.IsMessageSet(m.Descriptor()) { + return o.marshalMessageSet(b, m) + } + fieldOrder := order.AnyFieldOrder + if o.Deterministic { + // TODO: This should use a more natural ordering like NumberFieldOrder, + // but doing so breaks golden tests that make invalid assumption about + // output stability of this implementation. + fieldOrder = order.LegacyFieldOrder + } + var err error + order.RangeFields(m, fieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + b, err = o.marshalField(b, fd, v) + return err == nil + }) + if err != nil { + return b, err + } + b = append(b, m.GetUnknown()...) + return b, nil +} + +func (o MarshalOptions) marshalField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) { + switch { + case fd.IsList(): + return o.marshalList(b, fd, value.List()) + case fd.IsMap(): + return o.marshalMap(b, fd, value.Map()) + default: + b = protowire.AppendTag(b, fd.Number(), wireTypes[fd.Kind()]) + return o.marshalSingular(b, fd, value) + } +} + +func (o MarshalOptions) marshalList(b []byte, fd protoreflect.FieldDescriptor, list protoreflect.List) ([]byte, error) { + if fd.IsPacked() && list.Len() > 0 { + b = protowire.AppendTag(b, fd.Number(), protowire.BytesType) + b, pos := appendSpeculativeLength(b) + for i, llen := 0, list.Len(); i < llen; i++ { + var err error + b, err = o.marshalSingular(b, fd, list.Get(i)) + if err != nil { + return b, err + } + } + b = finishSpeculativeLength(b, pos) + return b, nil + } + + kind := fd.Kind() + for i, llen := 0, list.Len(); i < llen; i++ { + var err error + b = protowire.AppendTag(b, fd.Number(), wireTypes[kind]) + b, err = o.marshalSingular(b, fd, list.Get(i)) + if err != nil { + return b, err + } + } + return b, nil +} + +func (o MarshalOptions) marshalMap(b []byte, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) ([]byte, error) { + keyf := fd.MapKey() + valf := fd.MapValue() + keyOrder := order.AnyKeyOrder + if o.Deterministic { + keyOrder = order.GenericKeyOrder + } + var err error + order.RangeEntries(mapv, keyOrder, func(key protoreflect.MapKey, value protoreflect.Value) bool { + b = protowire.AppendTag(b, fd.Number(), protowire.BytesType) + var pos int + b, pos = appendSpeculativeLength(b) + + b, err = o.marshalField(b, keyf, key.Value()) + if err != nil { + return false + } + b, err = o.marshalField(b, valf, value) + if err != nil { + return false + } + b = finishSpeculativeLength(b, pos) + return true + }) + return b, err +} + +// When encoding length-prefixed fields, we speculatively set aside some number of bytes +// for the length, encode the data, and then encode the length (shifting the data if necessary +// to make room). +const speculativeLength = 1 + +func appendSpeculativeLength(b []byte) ([]byte, int) { + pos := len(b) + b = append(b, "\x00\x00\x00\x00"[:speculativeLength]...) + return b, pos +} + +func finishSpeculativeLength(b []byte, pos int) []byte { + mlen := len(b) - pos - speculativeLength + msiz := protowire.SizeVarint(uint64(mlen)) + if msiz != speculativeLength { + for i := 0; i < msiz-speculativeLength; i++ { + b = append(b, 0) + } + copy(b[pos+msiz:], b[pos+speculativeLength:]) + b = b[:pos+msiz+mlen] + } + protowire.AppendVarint(b[:pos], uint64(mlen)) + return b +} diff --git a/vendor/google.golang.org/protobuf/proto/encode_gen.go b/vendor/google.golang.org/protobuf/proto/encode_gen.go new file mode 100644 index 000000000..185dacfb4 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/encode_gen.go @@ -0,0 +1,97 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package proto + +import ( + "math" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" +) + +var wireTypes = map[protoreflect.Kind]protowire.Type{ + protoreflect.BoolKind: protowire.VarintType, + protoreflect.EnumKind: protowire.VarintType, + protoreflect.Int32Kind: protowire.VarintType, + protoreflect.Sint32Kind: protowire.VarintType, + protoreflect.Uint32Kind: protowire.VarintType, + protoreflect.Int64Kind: protowire.VarintType, + protoreflect.Sint64Kind: protowire.VarintType, + protoreflect.Uint64Kind: protowire.VarintType, + protoreflect.Sfixed32Kind: protowire.Fixed32Type, + protoreflect.Fixed32Kind: protowire.Fixed32Type, + protoreflect.FloatKind: protowire.Fixed32Type, + protoreflect.Sfixed64Kind: protowire.Fixed64Type, + protoreflect.Fixed64Kind: protowire.Fixed64Type, + protoreflect.DoubleKind: protowire.Fixed64Type, + protoreflect.StringKind: protowire.BytesType, + protoreflect.BytesKind: protowire.BytesType, + protoreflect.MessageKind: protowire.BytesType, + protoreflect.GroupKind: protowire.StartGroupType, +} + +func (o MarshalOptions) marshalSingular(b []byte, fd protoreflect.FieldDescriptor, v protoreflect.Value) ([]byte, error) { + switch fd.Kind() { + case protoreflect.BoolKind: + b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) + case protoreflect.EnumKind: + b = protowire.AppendVarint(b, uint64(v.Enum())) + case protoreflect.Int32Kind: + b = protowire.AppendVarint(b, uint64(int32(v.Int()))) + case protoreflect.Sint32Kind: + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) + case protoreflect.Uint32Kind: + b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) + case protoreflect.Int64Kind: + b = protowire.AppendVarint(b, uint64(v.Int())) + case protoreflect.Sint64Kind: + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) + case protoreflect.Uint64Kind: + b = protowire.AppendVarint(b, v.Uint()) + case protoreflect.Sfixed32Kind: + b = protowire.AppendFixed32(b, uint32(v.Int())) + case protoreflect.Fixed32Kind: + b = protowire.AppendFixed32(b, uint32(v.Uint())) + case protoreflect.FloatKind: + b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) + case protoreflect.Sfixed64Kind: + b = protowire.AppendFixed64(b, uint64(v.Int())) + case protoreflect.Fixed64Kind: + b = protowire.AppendFixed64(b, v.Uint()) + case protoreflect.DoubleKind: + b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) + case protoreflect.StringKind: + if strs.EnforceUTF8(fd) && !utf8.ValidString(v.String()) { + return b, errors.InvalidUTF8(string(fd.FullName())) + } + b = protowire.AppendString(b, v.String()) + case protoreflect.BytesKind: + b = protowire.AppendBytes(b, v.Bytes()) + case protoreflect.MessageKind: + var pos int + var err error + b, pos = appendSpeculativeLength(b) + b, err = o.marshalMessage(b, v.Message()) + if err != nil { + return b, err + } + b = finishSpeculativeLength(b, pos) + case protoreflect.GroupKind: + var err error + b, err = o.marshalMessage(b, v.Message()) + if err != nil { + return b, err + } + b = protowire.AppendVarint(b, protowire.EncodeTag(fd.Number(), protowire.EndGroupType)) + default: + return b, errors.New("invalid kind %v", fd.Kind()) + } + return b, nil +} diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go new file mode 100644 index 000000000..1a0be1b03 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/equal.go @@ -0,0 +1,57 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "reflect" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +// Equal reports whether two messages are equal, +// by recursively comparing the fields of the message. +// +// - Bytes fields are equal if they contain identical bytes. +// Empty bytes (regardless of nil-ness) are considered equal. +// +// - Floating-point fields are equal if they contain the same value. +// Unlike the == operator, a NaN is equal to another NaN. +// +// - Other scalar fields are equal if they contain the same value. +// +// - Message fields are equal if they have +// the same set of populated known and extension field values, and +// the same set of unknown fields values. +// +// - Lists are equal if they are the same length and +// each corresponding element is equal. +// +// - Maps are equal if they have the same set of keys and +// the corresponding value for each key is equal. +// +// An invalid message is not equal to a valid message. +// An invalid message is only equal to another invalid message of the +// same type. An invalid message often corresponds to a nil pointer +// of the concrete message type. For example, (*pb.M)(nil) is not equal +// to &pb.M{}. +// If two valid messages marshal to the same bytes under deterministic +// serialization, then Equal is guaranteed to report true. +func Equal(x, y Message) bool { + if x == nil || y == nil { + return x == nil && y == nil + } + if reflect.TypeOf(x).Kind() == reflect.Ptr && x == y { + // Avoid an expensive comparison if both inputs are identical pointers. + return true + } + mx := x.ProtoReflect() + my := y.ProtoReflect() + if mx.IsValid() != my.IsValid() { + return false + } + vx := protoreflect.ValueOfMessage(mx) + vy := protoreflect.ValueOfMessage(my) + return vx.Equal(vy) +} diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go new file mode 100644 index 000000000..d248f2928 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -0,0 +1,95 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" +) + +// HasExtension reports whether an extension field is populated. +// It returns false if m is invalid or if xt does not extend m. +func HasExtension(m Message, xt protoreflect.ExtensionType) bool { + // Treat nil message interface or descriptor as an empty message; no populated + // fields. + if m == nil || xt == nil { + return false + } + + // As a special-case, we reports invalid or mismatching descriptors + // as always not being populated (since they aren't). + mr := m.ProtoReflect() + xd := xt.TypeDescriptor() + if mr.Descriptor() != xd.ContainingMessage() { + return false + } + + return mr.Has(xd) +} + +// ClearExtension clears an extension field such that subsequent +// [HasExtension] calls return false. +// It panics if m is invalid or if xt does not extend m. +func ClearExtension(m Message, xt protoreflect.ExtensionType) { + m.ProtoReflect().Clear(xt.TypeDescriptor()) +} + +// GetExtension retrieves the value for an extension field. +// If the field is unpopulated, it returns the default value for +// scalars and an immutable, empty value for lists or messages. +// It panics if xt does not extend m. +func GetExtension(m Message, xt protoreflect.ExtensionType) any { + // Treat nil message interface as an empty message; return the default. + if m == nil { + return xt.InterfaceOf(xt.Zero()) + } + + return xt.InterfaceOf(m.ProtoReflect().Get(xt.TypeDescriptor())) +} + +// SetExtension stores the value of an extension field. +// It panics if m is invalid, xt does not extend m, or if type of v +// is invalid for the specified extension field. +func SetExtension(m Message, xt protoreflect.ExtensionType, v any) { + xd := xt.TypeDescriptor() + pv := xt.ValueOf(v) + + // Specially treat an invalid list, map, or message as clear. + isValid := true + switch { + case xd.IsList(): + isValid = pv.List().IsValid() + case xd.IsMap(): + isValid = pv.Map().IsValid() + case xd.Message() != nil: + isValid = pv.Message().IsValid() + } + if !isValid { + m.ProtoReflect().Clear(xd) + return + } + + m.ProtoReflect().Set(xd, pv) +} + +// RangeExtensions iterates over every populated extension field in m in an +// undefined order, calling f for each extension type and value encountered. +// It returns immediately if f returns false. +// While iterating, mutating operations may only be performed +// on the current extension field. +func RangeExtensions(m Message, f func(protoreflect.ExtensionType, any) bool) { + // Treat nil message interface as an empty message; nothing to range over. + if m == nil { + return + } + + m.ProtoReflect().Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + xt := fd.(protoreflect.ExtensionTypeDescriptor).Type() + vi := xt.InterfaceOf(v) + return f(xt, vi) + } + return true + }) +} diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go new file mode 100644 index 000000000..3c6fe5780 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/merge.go @@ -0,0 +1,139 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "fmt" + + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +// Merge merges src into dst, which must be a message with the same descriptor. +// +// Populated scalar fields in src are copied to dst, while populated +// singular messages in src are merged into dst by recursively calling Merge. +// The elements of every list field in src is appended to the corresponded +// list fields in dst. The entries of every map field in src is copied into +// the corresponding map field in dst, possibly replacing existing entries. +// The unknown fields of src are appended to the unknown fields of dst. +// +// It is semantically equivalent to unmarshaling the encoded form of src +// into dst with the [UnmarshalOptions.Merge] option specified. +func Merge(dst, src Message) { + // TODO: Should nil src be treated as semantically equivalent to a + // untyped, read-only, empty message? What about a nil dst? + + dstMsg, srcMsg := dst.ProtoReflect(), src.ProtoReflect() + if dstMsg.Descriptor() != srcMsg.Descriptor() { + if got, want := dstMsg.Descriptor().FullName(), srcMsg.Descriptor().FullName(); got != want { + panic(fmt.Sprintf("descriptor mismatch: %v != %v", got, want)) + } + panic("descriptor mismatch") + } + mergeOptions{}.mergeMessage(dstMsg, srcMsg) +} + +// Clone returns a deep copy of m. +// If the top-level message is invalid, it returns an invalid message as well. +func Clone(m Message) Message { + // NOTE: Most usages of Clone assume the following properties: + // t := reflect.TypeOf(m) + // t == reflect.TypeOf(m.ProtoReflect().New().Interface()) + // t == reflect.TypeOf(m.ProtoReflect().Type().Zero().Interface()) + // + // Embedding protobuf messages breaks this since the parent type will have + // a forwarded ProtoReflect method, but the Interface method will return + // the underlying embedded message type. + if m == nil { + return nil + } + src := m.ProtoReflect() + if !src.IsValid() { + return src.Type().Zero().Interface() + } + dst := src.New() + mergeOptions{}.mergeMessage(dst, src) + return dst.Interface() +} + +// mergeOptions provides a namespace for merge functions, and can be +// exported in the future if we add user-visible merge options. +type mergeOptions struct{} + +func (o mergeOptions) mergeMessage(dst, src protoreflect.Message) { + methods := protoMethods(dst) + if methods != nil && methods.Merge != nil { + in := protoiface.MergeInput{ + Destination: dst, + Source: src, + } + out := methods.Merge(in) + if out.Flags&protoiface.MergeComplete != 0 { + return + } + } + + if !dst.IsValid() { + panic(fmt.Sprintf("cannot merge into invalid %v message", dst.Descriptor().FullName())) + } + + src.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + switch { + case fd.IsList(): + o.mergeList(dst.Mutable(fd).List(), v.List(), fd) + case fd.IsMap(): + o.mergeMap(dst.Mutable(fd).Map(), v.Map(), fd.MapValue()) + case fd.Message() != nil: + o.mergeMessage(dst.Mutable(fd).Message(), v.Message()) + case fd.Kind() == protoreflect.BytesKind: + dst.Set(fd, o.cloneBytes(v)) + default: + dst.Set(fd, v) + } + return true + }) + + if len(src.GetUnknown()) > 0 { + dst.SetUnknown(append(dst.GetUnknown(), src.GetUnknown()...)) + } +} + +func (o mergeOptions) mergeList(dst, src protoreflect.List, fd protoreflect.FieldDescriptor) { + // Merge semantics appends to the end of the existing list. + for i, n := 0, src.Len(); i < n; i++ { + switch v := src.Get(i); { + case fd.Message() != nil: + dstv := dst.NewElement() + o.mergeMessage(dstv.Message(), v.Message()) + dst.Append(dstv) + case fd.Kind() == protoreflect.BytesKind: + dst.Append(o.cloneBytes(v)) + default: + dst.Append(v) + } + } +} + +func (o mergeOptions) mergeMap(dst, src protoreflect.Map, fd protoreflect.FieldDescriptor) { + // Merge semantics replaces, rather than merges into existing entries. + src.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + switch { + case fd.Message() != nil: + dstv := dst.NewValue() + o.mergeMessage(dstv.Message(), v.Message()) + dst.Set(k, dstv) + case fd.Kind() == protoreflect.BytesKind: + dst.Set(k, o.cloneBytes(v)) + default: + dst.Set(k, v) + } + return true + }) +} + +func (o mergeOptions) cloneBytes(v protoreflect.Value) protoreflect.Value { + return protoreflect.ValueOfBytes(append([]byte{}, v.Bytes()...)) +} diff --git a/vendor/google.golang.org/protobuf/proto/messageset.go b/vendor/google.golang.org/protobuf/proto/messageset.go new file mode 100644 index 000000000..575d14831 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/messageset.go @@ -0,0 +1,98 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/order" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +func (o MarshalOptions) sizeMessageSet(m protoreflect.Message) (size int) { + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + size += messageset.SizeField(fd.Number()) + size += protowire.SizeTag(messageset.FieldMessage) + size += protowire.SizeBytes(o.size(v.Message())) + return true + }) + size += messageset.SizeUnknown(m.GetUnknown()) + return size +} + +func (o MarshalOptions) marshalMessageSet(b []byte, m protoreflect.Message) ([]byte, error) { + if !flags.ProtoLegacy { + return b, errors.New("no support for message_set_wire_format") + } + fieldOrder := order.AnyFieldOrder + if o.Deterministic { + fieldOrder = order.NumberFieldOrder + } + var err error + order.RangeFields(m, fieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + b, err = o.marshalMessageSetField(b, fd, v) + return err == nil + }) + if err != nil { + return b, err + } + return messageset.AppendUnknown(b, m.GetUnknown()) +} + +func (o MarshalOptions) marshalMessageSetField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) { + b = messageset.AppendFieldStart(b, fd.Number()) + b = protowire.AppendTag(b, messageset.FieldMessage, protowire.BytesType) + calculatedSize := o.Size(value.Message().Interface()) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) + b, err := o.marshalMessage(b, value.Message()) + if err != nil { + return b, err + } + if measuredSize := len(b) - before; calculatedSize != measuredSize { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } + b = messageset.AppendFieldEnd(b) + return b, nil +} + +func (o UnmarshalOptions) unmarshalMessageSet(b []byte, m protoreflect.Message) error { + if !flags.ProtoLegacy { + return errors.New("no support for message_set_wire_format") + } + return messageset.Unmarshal(b, false, func(num protowire.Number, v []byte) error { + err := o.unmarshalMessageSetField(m, num, v) + if err == errUnknown { + unknown := m.GetUnknown() + unknown = protowire.AppendTag(unknown, num, protowire.BytesType) + unknown = protowire.AppendBytes(unknown, v) + m.SetUnknown(unknown) + return nil + } + return err + }) +} + +func (o UnmarshalOptions) unmarshalMessageSetField(m protoreflect.Message, num protowire.Number, v []byte) error { + md := m.Descriptor() + if !md.ExtensionRanges().Has(num) { + return errUnknown + } + xt, err := o.Resolver.FindExtensionByNumber(md.FullName(), num) + if err == protoregistry.NotFound { + return errUnknown + } + if err != nil { + return errors.New("%v: unable to resolve extension %v: %v", md.FullName(), num, err) + } + xd := xt.TypeDescriptor() + if err := o.unmarshalMessage(v, m.Mutable(xd).Message()); err != nil { + return err + } + return nil +} diff --git a/vendor/google.golang.org/protobuf/proto/proto.go b/vendor/google.golang.org/protobuf/proto/proto.go new file mode 100644 index 000000000..7543ee6b2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/proto.go @@ -0,0 +1,45 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// Message is the top-level interface that all messages must implement. +// It provides access to a reflective view of a message. +// Any implementation of this interface may be used with all functions in the +// protobuf module that accept a Message, except where otherwise specified. +// +// This is the v2 interface definition for protobuf messages. +// The v1 interface definition is [github.com/golang/protobuf/proto.Message]. +// +// - To convert a v1 message to a v2 message, +// use [google.golang.org/protobuf/protoadapt.MessageV2Of]. +// - To convert a v2 message to a v1 message, +// use [google.golang.org/protobuf/protoadapt.MessageV1Of]. +type Message = protoreflect.ProtoMessage + +// Error matches all errors produced by packages in the protobuf module +// according to [errors.Is]. +// +// Example usage: +// +// if errors.Is(err, proto.Error) { ... } +var Error error + +func init() { + Error = errors.Error +} + +// MessageName returns the full name of m. +// If m is nil, it returns an empty string. +func MessageName(m Message) protoreflect.FullName { + if m == nil { + return "" + } + return m.ProtoReflect().Descriptor().FullName() +} diff --git a/vendor/google.golang.org/protobuf/proto/proto_methods.go b/vendor/google.golang.org/protobuf/proto/proto_methods.go new file mode 100644 index 000000000..465e057b3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/proto_methods.go @@ -0,0 +1,20 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The protoreflect build tag disables use of fast-path methods. +//go:build !protoreflect +// +build !protoreflect + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +const hasProtoMethods = true + +func protoMethods(m protoreflect.Message) *protoiface.Methods { + return m.ProtoMethods() +} diff --git a/vendor/google.golang.org/protobuf/proto/proto_reflect.go b/vendor/google.golang.org/protobuf/proto/proto_reflect.go new file mode 100644 index 000000000..494d6ceef --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/proto_reflect.go @@ -0,0 +1,20 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The protoreflect build tag disables use of fast-path methods. +//go:build protoreflect +// +build protoreflect + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +const hasProtoMethods = false + +func protoMethods(m protoreflect.Message) *protoiface.Methods { + return nil +} diff --git a/vendor/google.golang.org/protobuf/proto/reset.go b/vendor/google.golang.org/protobuf/proto/reset.go new file mode 100644 index 000000000..3d7f89436 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/reset.go @@ -0,0 +1,43 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "fmt" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +// Reset clears every field in the message. +// The resulting message shares no observable memory with its previous state +// other than the memory for the message itself. +func Reset(m Message) { + if mr, ok := m.(interface{ Reset() }); ok && hasProtoMethods { + mr.Reset() + return + } + resetMessage(m.ProtoReflect()) +} + +func resetMessage(m protoreflect.Message) { + if !m.IsValid() { + panic(fmt.Sprintf("cannot reset invalid %v message", m.Descriptor().FullName())) + } + + // Clear all known fields. + fds := m.Descriptor().Fields() + for i := 0; i < fds.Len(); i++ { + m.Clear(fds.Get(i)) + } + + // Clear extension fields. + m.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + m.Clear(fd) + return true + }) + + // Clear unknown fields. + m.SetUnknown(nil) +} diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go new file mode 100644 index 000000000..052fb5ae3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/size.go @@ -0,0 +1,103 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +// Size returns the size in bytes of the wire-format encoding of m. +func Size(m Message) int { + return MarshalOptions{}.Size(m) +} + +// Size returns the size in bytes of the wire-format encoding of m. +func (o MarshalOptions) Size(m Message) int { + // Treat a nil message interface as an empty message; nothing to output. + if m == nil { + return 0 + } + + return o.size(m.ProtoReflect()) +} + +// size is a centralized function that all size operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for size that do not go through this. +func (o MarshalOptions) size(m protoreflect.Message) (size int) { + methods := protoMethods(m) + if methods != nil && methods.Size != nil { + out := methods.Size(protoiface.SizeInput{ + Message: m, + Flags: o.flags(), + }) + return out.Size + } + if methods != nil && methods.Marshal != nil { + // This is not efficient, but we don't have any choice. + // This case is mainly used for legacy types with a Marshal method. + out, _ := methods.Marshal(protoiface.MarshalInput{ + Message: m, + Flags: o.flags(), + }) + return len(out.Buf) + } + return o.sizeMessageSlow(m) +} + +func (o MarshalOptions) sizeMessageSlow(m protoreflect.Message) (size int) { + if messageset.IsMessageSet(m.Descriptor()) { + return o.sizeMessageSet(m) + } + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + size += o.sizeField(fd, v) + return true + }) + size += len(m.GetUnknown()) + return size +} + +func (o MarshalOptions) sizeField(fd protoreflect.FieldDescriptor, value protoreflect.Value) (size int) { + num := fd.Number() + switch { + case fd.IsList(): + return o.sizeList(num, fd, value.List()) + case fd.IsMap(): + return o.sizeMap(num, fd, value.Map()) + default: + return protowire.SizeTag(num) + o.sizeSingular(num, fd.Kind(), value) + } +} + +func (o MarshalOptions) sizeList(num protowire.Number, fd protoreflect.FieldDescriptor, list protoreflect.List) (size int) { + sizeTag := protowire.SizeTag(num) + + if fd.IsPacked() && list.Len() > 0 { + content := 0 + for i, llen := 0, list.Len(); i < llen; i++ { + content += o.sizeSingular(num, fd.Kind(), list.Get(i)) + } + return sizeTag + protowire.SizeBytes(content) + } + + for i, llen := 0, list.Len(); i < llen; i++ { + size += sizeTag + o.sizeSingular(num, fd.Kind(), list.Get(i)) + } + return size +} + +func (o MarshalOptions) sizeMap(num protowire.Number, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) (size int) { + sizeTag := protowire.SizeTag(num) + + mapv.Range(func(key protoreflect.MapKey, value protoreflect.Value) bool { + size += sizeTag + size += protowire.SizeBytes(o.sizeField(fd.MapKey(), key.Value()) + o.sizeField(fd.MapValue(), value)) + return true + }) + return size +} diff --git a/vendor/google.golang.org/protobuf/proto/size_gen.go b/vendor/google.golang.org/protobuf/proto/size_gen.go new file mode 100644 index 000000000..3cf61a824 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/size_gen.go @@ -0,0 +1,55 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package proto + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/reflect/protoreflect" +) + +func (o MarshalOptions) sizeSingular(num protowire.Number, kind protoreflect.Kind, v protoreflect.Value) int { + switch kind { + case protoreflect.BoolKind: + return protowire.SizeVarint(protowire.EncodeBool(v.Bool())) + case protoreflect.EnumKind: + return protowire.SizeVarint(uint64(v.Enum())) + case protoreflect.Int32Kind: + return protowire.SizeVarint(uint64(int32(v.Int()))) + case protoreflect.Sint32Kind: + return protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) + case protoreflect.Uint32Kind: + return protowire.SizeVarint(uint64(uint32(v.Uint()))) + case protoreflect.Int64Kind: + return protowire.SizeVarint(uint64(v.Int())) + case protoreflect.Sint64Kind: + return protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) + case protoreflect.Uint64Kind: + return protowire.SizeVarint(v.Uint()) + case protoreflect.Sfixed32Kind: + return protowire.SizeFixed32() + case protoreflect.Fixed32Kind: + return protowire.SizeFixed32() + case protoreflect.FloatKind: + return protowire.SizeFixed32() + case protoreflect.Sfixed64Kind: + return protowire.SizeFixed64() + case protoreflect.Fixed64Kind: + return protowire.SizeFixed64() + case protoreflect.DoubleKind: + return protowire.SizeFixed64() + case protoreflect.StringKind: + return protowire.SizeBytes(len(v.String())) + case protoreflect.BytesKind: + return protowire.SizeBytes(len(v.Bytes())) + case protoreflect.MessageKind: + return protowire.SizeBytes(o.size(v.Message())) + case protoreflect.GroupKind: + return protowire.SizeGroup(num, o.size(v.Message())) + default: + return 0 + } +} diff --git a/vendor/google.golang.org/protobuf/proto/wrappers.go b/vendor/google.golang.org/protobuf/proto/wrappers.go new file mode 100644 index 000000000..653b12c3a --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/wrappers.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +// Bool stores v in a new bool value and returns a pointer to it. +func Bool(v bool) *bool { return &v } + +// Int32 stores v in a new int32 value and returns a pointer to it. +func Int32(v int32) *int32 { return &v } + +// Int64 stores v in a new int64 value and returns a pointer to it. +func Int64(v int64) *int64 { return &v } + +// Float32 stores v in a new float32 value and returns a pointer to it. +func Float32(v float32) *float32 { return &v } + +// Float64 stores v in a new float64 value and returns a pointer to it. +func Float64(v float64) *float64 { return &v } + +// Uint32 stores v in a new uint32 value and returns a pointer to it. +func Uint32(v uint32) *uint32 { return &v } + +// Uint64 stores v in a new uint64 value and returns a pointer to it. +func Uint64(v uint64) *uint64 { return &v } + +// String stores v in a new string value and returns a pointer to it. +func String(v string) *string { return &v } diff --git a/vendor/google.golang.org/protobuf/protoadapt/convert.go b/vendor/google.golang.org/protobuf/protoadapt/convert.go new file mode 100644 index 000000000..ea276d15a --- /dev/null +++ b/vendor/google.golang.org/protobuf/protoadapt/convert.go @@ -0,0 +1,31 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protoadapt bridges the original and new proto APIs. +package protoadapt + +import ( + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// MessageV1 is the original [github.com/golang/protobuf/proto.Message] type. +type MessageV1 = protoiface.MessageV1 + +// MessageV2 is the [google.golang.org/protobuf/proto.Message] type used by the +// current [google.golang.org/protobuf] module, adding support for reflection. +type MessageV2 = proto.Message + +// MessageV1Of converts a v2 message to a v1 message. +// It returns nil if m is nil. +func MessageV1Of(m MessageV2) MessageV1 { + return protoimpl.X.ProtoMessageV1Of(m) +} + +// MessageV2Of converts a v1 message to a v2 message. +// It returns nil if m is nil. +func MessageV2Of(m MessageV1) MessageV2 { + return protoimpl.X.ProtoMessageV2Of(m) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go new file mode 100644 index 000000000..8fbecb4f5 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -0,0 +1,286 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protodesc provides functionality for converting +// FileDescriptorProto messages to/from [protoreflect.FileDescriptor] values. +// +// The google.protobuf.FileDescriptorProto is a protobuf message that describes +// the type information for a .proto file in a form that is easily serializable. +// The [protoreflect.FileDescriptor] is a more structured representation of +// the FileDescriptorProto message where references and remote dependencies +// can be directly followed. +package protodesc + +import ( + "google.golang.org/protobuf/internal/editionssupport" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + "google.golang.org/protobuf/types/descriptorpb" +) + +// Resolver is the resolver used by [NewFile] to resolve dependencies. +// The enums and messages provided must belong to some parent file, +// which is also registered. +// +// It is implemented by [protoregistry.Files]. +type Resolver interface { + FindFileByPath(string) (protoreflect.FileDescriptor, error) + FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) +} + +// FileOptions configures the construction of file descriptors. +type FileOptions struct { + pragma.NoUnkeyedLiterals + + // AllowUnresolvable configures New to permissively allow unresolvable + // file, enum, or message dependencies. Unresolved dependencies are replaced + // by placeholder equivalents. + // + // The following dependencies may be left unresolved: + // • Resolving an imported file. + // • Resolving the type for a message field or extension field. + // If the kind of the field is unknown, then a placeholder is used for both + // the Enum and Message accessors on the protoreflect.FieldDescriptor. + // • Resolving an enum value set as the default for an optional enum field. + // If unresolvable, the protoreflect.FieldDescriptor.Default is set to the + // first value in the associated enum (or zero if the also enum dependency + // is also unresolvable). The protoreflect.FieldDescriptor.DefaultEnumValue + // is populated with a placeholder. + // • Resolving the extended message type for an extension field. + // • Resolving the input or output message type for a service method. + // + // If the unresolved dependency uses a relative name, + // then the placeholder will contain an invalid FullName with a "*." prefix, + // indicating that the starting prefix of the full name is unknown. + AllowUnresolvable bool +} + +// NewFile creates a new [protoreflect.FileDescriptor] from the provided +// file descriptor message. See [FileOptions.New] for more information. +func NewFile(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { + return FileOptions{}.New(fd, r) +} + +// NewFiles creates a new [protoregistry.Files] from the provided +// FileDescriptorSet message. See [FileOptions.NewFiles] for more information. +func NewFiles(fd *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { + return FileOptions{}.NewFiles(fd) +} + +// New creates a new [protoreflect.FileDescriptor] from the provided +// file descriptor message. The file must represent a valid proto file according +// to protobuf semantics. The returned descriptor is a deep copy of the input. +// +// Any imported files, enum types, or message types referenced in the file are +// resolved using the provided registry. When looking up an import file path, +// the path must be unique. The newly created file descriptor is not registered +// back into the provided file registry. +func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { + if r == nil { + r = (*protoregistry.Files)(nil) // empty resolver + } + + // Handle the file descriptor content. + f := &filedesc.File{L2: &filedesc.FileL2{}} + switch fd.GetSyntax() { + case "proto2", "": + f.L1.Syntax = protoreflect.Proto2 + f.L1.Edition = filedesc.EditionProto2 + case "proto3": + f.L1.Syntax = protoreflect.Proto3 + f.L1.Edition = filedesc.EditionProto3 + case "editions": + f.L1.Syntax = protoreflect.Editions + f.L1.Edition = fromEditionProto(fd.GetEdition()) + default: + return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) + } + if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) { + return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition()) + } + f.L1.Path = fd.GetName() + if f.L1.Path == "" { + return nil, errors.New("file path must be populated") + } + f.L1.Package = protoreflect.FullName(fd.GetPackage()) + if !f.L1.Package.IsValid() && f.L1.Package != "" { + return nil, errors.New("invalid package: %q", f.L1.Package) + } + if opts := fd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.FileOptions) + f.L2.Options = func() protoreflect.ProtoMessage { return opts } + } + initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures()) + + f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency())) + for _, i := range fd.GetPublicDependency() { + if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsPublic { + return nil, errors.New("invalid or duplicate public import index: %d", i) + } + f.L2.Imports[i].IsPublic = true + } + for _, i := range fd.GetWeakDependency() { + if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsWeak { + return nil, errors.New("invalid or duplicate weak import index: %d", i) + } + f.L2.Imports[i].IsWeak = true + } + imps := importSet{f.Path(): true} + for i, path := range fd.GetDependency() { + imp := &f.L2.Imports[i] + f, err := r.FindFileByPath(path) + if err == protoregistry.NotFound && (o.AllowUnresolvable || imp.IsWeak) { + f = filedesc.PlaceholderFile(path) + } else if err != nil { + return nil, errors.New("could not resolve import %q: %v", path, err) + } + imp.FileDescriptor = f + + if imps[imp.Path()] { + return nil, errors.New("already imported %q", path) + } + imps[imp.Path()] = true + } + for i := range fd.GetDependency() { + imp := &f.L2.Imports[i] + imps.importPublic(imp.Imports()) + } + + // Handle source locations. + f.L2.Locations.File = f + for _, loc := range fd.GetSourceCodeInfo().GetLocation() { + var l protoreflect.SourceLocation + // TODO: Validate that the path points to an actual declaration? + l.Path = protoreflect.SourcePath(loc.GetPath()) + s := loc.GetSpan() + switch len(s) { + case 3: + l.StartLine, l.StartColumn, l.EndLine, l.EndColumn = int(s[0]), int(s[1]), int(s[0]), int(s[2]) + case 4: + l.StartLine, l.StartColumn, l.EndLine, l.EndColumn = int(s[0]), int(s[1]), int(s[2]), int(s[3]) + default: + return nil, errors.New("invalid span: %v", s) + } + // TODO: Validate that the span information is sensible? + // See https://github.com/protocolbuffers/protobuf/issues/6378. + if false && (l.EndLine < l.StartLine || l.StartLine < 0 || l.StartColumn < 0 || l.EndColumn < 0 || + (l.StartLine == l.EndLine && l.EndColumn <= l.StartColumn)) { + return nil, errors.New("invalid span: %v", s) + } + l.LeadingDetachedComments = loc.GetLeadingDetachedComments() + l.LeadingComments = loc.GetLeadingComments() + l.TrailingComments = loc.GetTrailingComments() + f.L2.Locations.List = append(f.L2.Locations.List, l) + } + + // Step 1: Allocate and derive the names for all declarations. + // This copies all fields from the descriptor proto except: + // google.protobuf.FieldDescriptorProto.type_name + // google.protobuf.FieldDescriptorProto.default_value + // google.protobuf.FieldDescriptorProto.oneof_index + // google.protobuf.FieldDescriptorProto.extendee + // google.protobuf.MethodDescriptorProto.input + // google.protobuf.MethodDescriptorProto.output + var err error + sb := new(strs.Builder) + r1 := make(descsByName) + if f.L1.Enums.List, err = r1.initEnumDeclarations(fd.GetEnumType(), f, sb); err != nil { + return nil, err + } + if f.L1.Messages.List, err = r1.initMessagesDeclarations(fd.GetMessageType(), f, sb); err != nil { + return nil, err + } + if f.L1.Extensions.List, err = r1.initExtensionDeclarations(fd.GetExtension(), f, sb); err != nil { + return nil, err + } + if f.L1.Services.List, err = r1.initServiceDeclarations(fd.GetService(), f, sb); err != nil { + return nil, err + } + + // Step 2: Resolve every dependency reference not handled by step 1. + r2 := &resolver{local: r1, remote: r, imports: imps, allowUnresolvable: o.AllowUnresolvable} + if err := r2.resolveMessageDependencies(f.L1.Messages.List, fd.GetMessageType()); err != nil { + return nil, err + } + if err := r2.resolveExtensionDependencies(f.L1.Extensions.List, fd.GetExtension()); err != nil { + return nil, err + } + if err := r2.resolveServiceDependencies(f.L1.Services.List, fd.GetService()); err != nil { + return nil, err + } + + // Step 3: Validate every enum, message, and extension declaration. + if err := validateEnumDeclarations(f.L1.Enums.List, fd.GetEnumType()); err != nil { + return nil, err + } + if err := validateMessageDeclarations(f, f.L1.Messages.List, fd.GetMessageType()); err != nil { + return nil, err + } + if err := validateExtensionDeclarations(f, f.L1.Extensions.List, fd.GetExtension()); err != nil { + return nil, err + } + + return f, nil +} + +type importSet map[string]bool + +func (is importSet) importPublic(imps protoreflect.FileImports) { + for i := 0; i < imps.Len(); i++ { + if imp := imps.Get(i); imp.IsPublic { + is[imp.Path()] = true + is.importPublic(imp.Imports()) + } + } +} + +// NewFiles creates a new [protoregistry.Files] from the provided +// FileDescriptorSet message. The descriptor set must include only +// valid files according to protobuf semantics. The returned descriptors +// are a deep copy of the input. +func (o FileOptions) NewFiles(fds *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { + files := make(map[string]*descriptorpb.FileDescriptorProto) + for _, fd := range fds.File { + if _, ok := files[fd.GetName()]; ok { + return nil, errors.New("file appears multiple times: %q", fd.GetName()) + } + files[fd.GetName()] = fd + } + r := &protoregistry.Files{} + for _, fd := range files { + if err := o.addFileDeps(r, fd, files); err != nil { + return nil, err + } + } + return r, nil +} +func (o FileOptions) addFileDeps(r *protoregistry.Files, fd *descriptorpb.FileDescriptorProto, files map[string]*descriptorpb.FileDescriptorProto) error { + // Set the entry to nil while descending into a file's dependencies to detect cycles. + files[fd.GetName()] = nil + for _, dep := range fd.Dependency { + depfd, ok := files[dep] + if depfd == nil { + if ok { + return errors.New("import cycle in file: %q", dep) + } + continue + } + if err := o.addFileDeps(r, depfd, files); err != nil { + return err + } + } + // Delete the entry once dependencies are processed. + delete(files, fd.GetName()) + f, err := o.New(fd, r) + if err != nil { + return err + } + return r.RegisterFile(f) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go new file mode 100644 index 000000000..856175542 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -0,0 +1,285 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +type descsByName map[protoreflect.FullName]protoreflect.Descriptor + +func (r descsByName) initEnumDeclarations(eds []*descriptorpb.EnumDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (es []filedesc.Enum, err error) { + es = make([]filedesc.Enum, len(eds)) // allocate up-front to ensure stable pointers + for i, ed := range eds { + e := &es[i] + e.L2 = new(filedesc.EnumL2) + if e.L0, err = r.makeBase(e, parent, ed.GetName(), i, sb); err != nil { + return nil, err + } + if opts := ed.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.EnumOptions) + e.L2.Options = func() protoreflect.ProtoMessage { return opts } + } + e.L1.EditionFeatures = mergeEditionFeatures(parent, ed.GetOptions().GetFeatures()) + for _, s := range ed.GetReservedName() { + e.L2.ReservedNames.List = append(e.L2.ReservedNames.List, protoreflect.Name(s)) + } + for _, rr := range ed.GetReservedRange() { + e.L2.ReservedRanges.List = append(e.L2.ReservedRanges.List, [2]protoreflect.EnumNumber{ + protoreflect.EnumNumber(rr.GetStart()), + protoreflect.EnumNumber(rr.GetEnd()), + }) + } + if e.L2.Values.List, err = r.initEnumValuesFromDescriptorProto(ed.GetValue(), e, sb); err != nil { + return nil, err + } + } + return es, nil +} + +func (r descsByName) initEnumValuesFromDescriptorProto(vds []*descriptorpb.EnumValueDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (vs []filedesc.EnumValue, err error) { + vs = make([]filedesc.EnumValue, len(vds)) // allocate up-front to ensure stable pointers + for i, vd := range vds { + v := &vs[i] + if v.L0, err = r.makeBase(v, parent, vd.GetName(), i, sb); err != nil { + return nil, err + } + if opts := vd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.EnumValueOptions) + v.L1.Options = func() protoreflect.ProtoMessage { return opts } + } + v.L1.Number = protoreflect.EnumNumber(vd.GetNumber()) + } + return vs, nil +} + +func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ms []filedesc.Message, err error) { + ms = make([]filedesc.Message, len(mds)) // allocate up-front to ensure stable pointers + for i, md := range mds { + m := &ms[i] + m.L2 = new(filedesc.MessageL2) + if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { + return nil, err + } + m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures()) + if opts := md.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.MessageOptions) + m.L2.Options = func() protoreflect.ProtoMessage { return opts } + m.L1.IsMapEntry = opts.GetMapEntry() + m.L1.IsMessageSet = opts.GetMessageSetWireFormat() + } + for _, s := range md.GetReservedName() { + m.L2.ReservedNames.List = append(m.L2.ReservedNames.List, protoreflect.Name(s)) + } + for _, rr := range md.GetReservedRange() { + m.L2.ReservedRanges.List = append(m.L2.ReservedRanges.List, [2]protoreflect.FieldNumber{ + protoreflect.FieldNumber(rr.GetStart()), + protoreflect.FieldNumber(rr.GetEnd()), + }) + } + for _, xr := range md.GetExtensionRange() { + m.L2.ExtensionRanges.List = append(m.L2.ExtensionRanges.List, [2]protoreflect.FieldNumber{ + protoreflect.FieldNumber(xr.GetStart()), + protoreflect.FieldNumber(xr.GetEnd()), + }) + var optsFunc func() protoreflect.ProtoMessage + if opts := xr.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.ExtensionRangeOptions) + optsFunc = func() protoreflect.ProtoMessage { return opts } + } + m.L2.ExtensionRangeOptions = append(m.L2.ExtensionRangeOptions, optsFunc) + } + if m.L2.Fields.List, err = r.initFieldsFromDescriptorProto(md.GetField(), m, sb); err != nil { + return nil, err + } + if m.L2.Oneofs.List, err = r.initOneofsFromDescriptorProto(md.GetOneofDecl(), m, sb); err != nil { + return nil, err + } + if m.L1.Enums.List, err = r.initEnumDeclarations(md.GetEnumType(), m, sb); err != nil { + return nil, err + } + if m.L1.Messages.List, err = r.initMessagesDeclarations(md.GetNestedType(), m, sb); err != nil { + return nil, err + } + if m.L1.Extensions.List, err = r.initExtensionDeclarations(md.GetExtension(), m, sb); err != nil { + return nil, err + } + } + return ms, nil +} + +// canBePacked returns whether the field can use packed encoding: +// https://protobuf.dev/programming-guides/encoding/#packed +func canBePacked(fd *descriptorpb.FieldDescriptorProto) bool { + if fd.GetLabel() != descriptorpb.FieldDescriptorProto_LABEL_REPEATED { + return false // not a repeated field + } + + switch protoreflect.Kind(fd.GetType()) { + case protoreflect.MessageKind, protoreflect.GroupKind: + return false // not a scalar type field + + case protoreflect.StringKind, protoreflect.BytesKind: + // string and bytes can explicitly not be declared as packed, + // see https://protobuf.dev/programming-guides/encoding/#packed + return false + + default: + return true + } +} + +func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (fs []filedesc.Field, err error) { + fs = make([]filedesc.Field, len(fds)) // allocate up-front to ensure stable pointers + for i, fd := range fds { + f := &fs[i] + if f.L0, err = r.makeBase(f, parent, fd.GetName(), i, sb); err != nil { + return nil, err + } + f.L1.EditionFeatures = mergeEditionFeatures(parent, fd.GetOptions().GetFeatures()) + f.L1.IsProto3Optional = fd.GetProto3Optional() + if opts := fd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.FieldOptions) + f.L1.Options = func() protoreflect.ProtoMessage { return opts } + f.L1.IsWeak = opts.GetWeak() + if opts.Packed != nil { + f.L1.EditionFeatures.IsPacked = opts.GetPacked() + } + } + f.L1.Number = protoreflect.FieldNumber(fd.GetNumber()) + f.L1.Cardinality = protoreflect.Cardinality(fd.GetLabel()) + if fd.Type != nil { + f.L1.Kind = protoreflect.Kind(fd.GetType()) + } + if fd.JsonName != nil { + f.L1.StringName.InitJSON(fd.GetJsonName()) + } + + if f.L1.EditionFeatures.IsLegacyRequired { + f.L1.Cardinality = protoreflect.Required + } + + if f.L1.Kind == protoreflect.MessageKind && f.L1.EditionFeatures.IsDelimitedEncoded { + f.L1.Kind = protoreflect.GroupKind + } + } + return fs, nil +} + +func (r descsByName) initOneofsFromDescriptorProto(ods []*descriptorpb.OneofDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (os []filedesc.Oneof, err error) { + os = make([]filedesc.Oneof, len(ods)) // allocate up-front to ensure stable pointers + for i, od := range ods { + o := &os[i] + if o.L0, err = r.makeBase(o, parent, od.GetName(), i, sb); err != nil { + return nil, err + } + o.L1.EditionFeatures = mergeEditionFeatures(parent, od.GetOptions().GetFeatures()) + if opts := od.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.OneofOptions) + o.L1.Options = func() protoreflect.ProtoMessage { return opts } + } + } + return os, nil +} + +func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (xs []filedesc.Extension, err error) { + xs = make([]filedesc.Extension, len(xds)) // allocate up-front to ensure stable pointers + for i, xd := range xds { + x := &xs[i] + x.L2 = new(filedesc.ExtensionL2) + if x.L0, err = r.makeBase(x, parent, xd.GetName(), i, sb); err != nil { + return nil, err + } + x.L1.EditionFeatures = mergeEditionFeatures(parent, xd.GetOptions().GetFeatures()) + if opts := xd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.FieldOptions) + x.L2.Options = func() protoreflect.ProtoMessage { return opts } + if opts.Packed != nil { + x.L1.EditionFeatures.IsPacked = opts.GetPacked() + } + } + x.L1.Number = protoreflect.FieldNumber(xd.GetNumber()) + x.L1.Cardinality = protoreflect.Cardinality(xd.GetLabel()) + if xd.Type != nil { + x.L1.Kind = protoreflect.Kind(xd.GetType()) + } + if xd.JsonName != nil { + x.L2.StringName.InitJSON(xd.GetJsonName()) + } + } + return xs, nil +} + +func (r descsByName) initServiceDeclarations(sds []*descriptorpb.ServiceDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ss []filedesc.Service, err error) { + ss = make([]filedesc.Service, len(sds)) // allocate up-front to ensure stable pointers + for i, sd := range sds { + s := &ss[i] + s.L2 = new(filedesc.ServiceL2) + if s.L0, err = r.makeBase(s, parent, sd.GetName(), i, sb); err != nil { + return nil, err + } + if opts := sd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.ServiceOptions) + s.L2.Options = func() protoreflect.ProtoMessage { return opts } + } + if s.L2.Methods.List, err = r.initMethodsFromDescriptorProto(sd.GetMethod(), s, sb); err != nil { + return nil, err + } + } + return ss, nil +} + +func (r descsByName) initMethodsFromDescriptorProto(mds []*descriptorpb.MethodDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ms []filedesc.Method, err error) { + ms = make([]filedesc.Method, len(mds)) // allocate up-front to ensure stable pointers + for i, md := range mds { + m := &ms[i] + if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { + return nil, err + } + if opts := md.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.MethodOptions) + m.L1.Options = func() protoreflect.ProtoMessage { return opts } + } + m.L1.IsStreamingClient = md.GetClientStreaming() + m.L1.IsStreamingServer = md.GetServerStreaming() + } + return ms, nil +} + +func (r descsByName) makeBase(child, parent protoreflect.Descriptor, name string, idx int, sb *strs.Builder) (filedesc.BaseL0, error) { + if !protoreflect.Name(name).IsValid() { + return filedesc.BaseL0{}, errors.New("descriptor %q has an invalid nested name: %q", parent.FullName(), name) + } + + // Derive the full name of the child. + // Note that enum values are a sibling to the enum parent in the namespace. + var fullName protoreflect.FullName + if _, ok := parent.(protoreflect.EnumDescriptor); ok { + fullName = sb.AppendFullName(parent.FullName().Parent(), protoreflect.Name(name)) + } else { + fullName = sb.AppendFullName(parent.FullName(), protoreflect.Name(name)) + } + if _, ok := r[fullName]; ok { + return filedesc.BaseL0{}, errors.New("descriptor %q already declared", fullName) + } + r[fullName] = child + + // TODO: Verify that the full name does not already exist in the resolver? + // This is not as critical since most usages of NewFile will register + // the created file back into the registry, which will perform this check. + + return filedesc.BaseL0{ + FullName: fullName, + ParentFile: parent.ParentFile().(*filedesc.File), + Parent: parent, + Index: idx, + }, nil +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go new file mode 100644 index 000000000..f3cebab29 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -0,0 +1,291 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + "google.golang.org/protobuf/types/descriptorpb" +) + +// resolver is a wrapper around a local registry of declarations within the file +// and the remote resolver. The remote resolver is restricted to only return +// descriptors that have been imported. +type resolver struct { + local descsByName + remote Resolver + imports importSet + + allowUnresolvable bool +} + +func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) (err error) { + for i, md := range mds { + m := &ms[i] + for j, fd := range md.GetField() { + f := &m.L2.Fields.List[j] + if f.L1.Cardinality == protoreflect.Required { + m.L2.RequiredNumbers.List = append(m.L2.RequiredNumbers.List, f.L1.Number) + } + if fd.OneofIndex != nil { + k := int(fd.GetOneofIndex()) + if !(0 <= k && k < len(md.GetOneofDecl())) { + return errors.New("message field %q has an invalid oneof index: %d", f.FullName(), k) + } + o := &m.L2.Oneofs.List[k] + f.L1.ContainingOneof = o + o.L1.Fields.List = append(o.L1.Fields.List, f) + } + + if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil { + return errors.New("message field %q cannot resolve type: %v", f.FullName(), err) + } + if f.L1.Kind == protoreflect.GroupKind && (f.IsMap() || f.IsMapEntry()) { + // A map field might inherit delimited encoding from a file-wide default feature. + // But maps never actually use delimited encoding. (At least for now...) + f.L1.Kind = protoreflect.MessageKind + } + if fd.DefaultValue != nil { + v, ev, err := unmarshalDefault(fd.GetDefaultValue(), f, r.allowUnresolvable) + if err != nil { + return errors.New("message field %q has invalid default: %v", f.FullName(), err) + } + f.L1.Default = filedesc.DefaultValue(v, ev) + } + } + + if err := r.resolveMessageDependencies(m.L1.Messages.List, md.GetNestedType()); err != nil { + return err + } + if err := r.resolveExtensionDependencies(m.L1.Extensions.List, md.GetExtension()); err != nil { + return err + } + } + return nil +} + +func (r *resolver) resolveExtensionDependencies(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) (err error) { + for i, xd := range xds { + x := &xs[i] + if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee()), false); err != nil { + return errors.New("extension field %q cannot resolve extendee: %v", x.FullName(), err) + } + if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName()), false); err != nil { + return errors.New("extension field %q cannot resolve type: %v", x.FullName(), err) + } + if xd.DefaultValue != nil { + v, ev, err := unmarshalDefault(xd.GetDefaultValue(), x, r.allowUnresolvable) + if err != nil { + return errors.New("extension field %q has invalid default: %v", x.FullName(), err) + } + x.L2.Default = filedesc.DefaultValue(v, ev) + } + } + return nil +} + +func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*descriptorpb.ServiceDescriptorProto) (err error) { + for i, sd := range sds { + s := &ss[i] + for j, md := range sd.GetMethod() { + m := &s.L2.Methods.List[j] + m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType()), false) + if err != nil { + return errors.New("service method %q cannot resolve input: %v", m.FullName(), err) + } + m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType()), false) + if err != nil { + return errors.New("service method %q cannot resolve output: %v", m.FullName(), err) + } + } + } + return nil +} + +// findTarget finds an enum or message descriptor if k is an enum, message, +// group, or unknown. If unknown, and the name could be resolved, the kind +// returned kind is set based on the type of the resolved descriptor. +func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) { + switch k { + case protoreflect.EnumKind: + ed, err := r.findEnumDescriptor(scope, ref, isWeak) + if err != nil { + return 0, nil, nil, err + } + return k, ed, nil, nil + case protoreflect.MessageKind, protoreflect.GroupKind: + md, err := r.findMessageDescriptor(scope, ref, isWeak) + if err != nil { + return 0, nil, nil, err + } + return k, nil, md, nil + case 0: + // Handle unspecified kinds (possible with parsers that operate + // on a per-file basis without knowledge of dependencies). + d, err := r.findDescriptor(scope, ref) + if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + return k, filedesc.PlaceholderEnum(ref.FullName()), filedesc.PlaceholderMessage(ref.FullName()), nil + } else if err == protoregistry.NotFound { + return 0, nil, nil, errors.New("%q not found", ref.FullName()) + } else if err != nil { + return 0, nil, nil, err + } + switch d := d.(type) { + case protoreflect.EnumDescriptor: + return protoreflect.EnumKind, d, nil, nil + case protoreflect.MessageDescriptor: + return protoreflect.MessageKind, nil, d, nil + default: + return 0, nil, nil, errors.New("unknown kind") + } + default: + if ref != "" { + return 0, nil, nil, errors.New("target name cannot be specified for %v", k) + } + if !k.IsValid() { + return 0, nil, nil, errors.New("invalid kind: %d", k) + } + return k, nil, nil, nil + } +} + +// findDescriptor finds the descriptor by name, +// which may be a relative name within some scope. +// +// Suppose the scope was "fizz.buzz" and the reference was "Foo.Bar", +// then the following full names are searched: +// - fizz.buzz.Foo.Bar +// - fizz.Foo.Bar +// - Foo.Bar +func (r *resolver) findDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.Descriptor, error) { + if !ref.IsValid() { + return nil, errors.New("invalid name reference: %q", ref) + } + if ref.IsFull() { + scope, ref = "", ref[1:] + } + var foundButNotImported protoreflect.Descriptor + for { + // Derive the full name to search. + s := protoreflect.FullName(ref) + if scope != "" { + s = scope + "." + s + } + + // Check the current file for the descriptor. + if d, ok := r.local[s]; ok { + return d, nil + } + + // Check the remote registry for the descriptor. + d, err := r.remote.FindDescriptorByName(s) + if err == nil { + // Only allow descriptors covered by one of the imports. + if r.imports[d.ParentFile().Path()] { + return d, nil + } + foundButNotImported = d + } else if err != protoregistry.NotFound { + return nil, errors.Wrap(err, "%q", s) + } + + // Continue on at a higher level of scoping. + if scope == "" { + if d := foundButNotImported; d != nil { + return nil, errors.New("resolved %q, but %q is not imported", d.FullName(), d.ParentFile().Path()) + } + return nil, protoregistry.NotFound + } + scope = scope.Parent() + } +} + +func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.EnumDescriptor, error) { + d, err := r.findDescriptor(scope, ref) + if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + return filedesc.PlaceholderEnum(ref.FullName()), nil + } else if err == protoregistry.NotFound { + return nil, errors.New("%q not found", ref.FullName()) + } else if err != nil { + return nil, err + } + ed, ok := d.(protoreflect.EnumDescriptor) + if !ok { + return nil, errors.New("resolved %q, but it is not an enum", d.FullName()) + } + return ed, nil +} + +func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.MessageDescriptor, error) { + d, err := r.findDescriptor(scope, ref) + if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + return filedesc.PlaceholderMessage(ref.FullName()), nil + } else if err == protoregistry.NotFound { + return nil, errors.New("%q not found", ref.FullName()) + } else if err != nil { + return nil, err + } + md, ok := d.(protoreflect.MessageDescriptor) + if !ok { + return nil, errors.New("resolved %q, but it is not an message", d.FullName()) + } + return md, nil +} + +// partialName is the partial name. A leading dot means that the name is full, +// otherwise the name is relative to some current scope. +// See google.protobuf.FieldDescriptorProto.type_name. +type partialName string + +func (s partialName) IsFull() bool { + return len(s) > 0 && s[0] == '.' +} + +func (s partialName) IsValid() bool { + if s.IsFull() { + return protoreflect.FullName(s[1:]).IsValid() + } + return protoreflect.FullName(s).IsValid() +} + +const unknownPrefix = "*." + +// FullName converts the partial name to a full name on a best-effort basis. +// If relative, it creates an invalid full name, using a "*." prefix +// to indicate that the start of the full name is unknown. +func (s partialName) FullName() protoreflect.FullName { + if s.IsFull() { + return protoreflect.FullName(s[1:]) + } + return protoreflect.FullName(unknownPrefix + s) +} + +func unmarshalDefault(s string, fd protoreflect.FieldDescriptor, allowUnresolvable bool) (protoreflect.Value, protoreflect.EnumValueDescriptor, error) { + var evs protoreflect.EnumValueDescriptors + if fd.Enum() != nil { + evs = fd.Enum().Values() + } + v, ev, err := defval.Unmarshal(s, fd.Kind(), evs, defval.Descriptor) + if err != nil && allowUnresolvable && evs != nil && protoreflect.Name(s).IsValid() { + v = protoreflect.ValueOfEnum(0) + if evs.Len() > 0 { + v = protoreflect.ValueOfEnum(evs.Get(0).Number()) + } + ev = filedesc.PlaceholderEnumValue(fd.Enum().FullName().Parent().Append(protoreflect.Name(s))) + } else if err != nil { + return v, ev, err + } + if !fd.HasPresence() { + return v, ev, errors.New("cannot be specified with implicit field presence") + } + if fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind || fd.Cardinality() == protoreflect.Repeated { + return v, ev, errors.New("cannot be specified on composite types") + } + return v, ev, nil +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go new file mode 100644 index 000000000..6de31c2eb --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -0,0 +1,371 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "strings" + "unicode" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescriptorProto) error { + for i, ed := range eds { + e := &es[i] + if err := e.L2.ReservedNames.CheckValid(); err != nil { + return errors.New("enum %q reserved names has %v", e.FullName(), err) + } + if err := e.L2.ReservedRanges.CheckValid(); err != nil { + return errors.New("enum %q reserved ranges has %v", e.FullName(), err) + } + if len(ed.GetValue()) == 0 { + return errors.New("enum %q must contain at least one value declaration", e.FullName()) + } + allowAlias := ed.GetOptions().GetAllowAlias() + foundAlias := false + for i := 0; i < e.Values().Len(); i++ { + v1 := e.Values().Get(i) + if v2 := e.Values().ByNumber(v1.Number()); v1 != v2 { + foundAlias = true + if !allowAlias { + return errors.New("enum %q has conflicting non-aliased values on number %d: %q with %q", e.FullName(), v1.Number(), v1.Name(), v2.Name()) + } + } + } + if allowAlias && !foundAlias { + return errors.New("enum %q allows aliases, but none were found", e.FullName()) + } + if !e.IsClosed() { + if v := e.Values().Get(0); v.Number() != 0 { + return errors.New("enum %q using open semantics must have zero number for the first value", v.FullName()) + } + // Verify that value names in open enums do not conflict if the + // case-insensitive prefix is removed. + // See protoc v3.8.0: src/google/protobuf/descriptor.cc:4991-5055 + names := map[string]protoreflect.EnumValueDescriptor{} + prefix := strings.Replace(strings.ToLower(string(e.Name())), "_", "", -1) + for i := 0; i < e.Values().Len(); i++ { + v1 := e.Values().Get(i) + s := strs.EnumValueName(strs.TrimEnumPrefix(string(v1.Name()), prefix)) + if v2, ok := names[s]; ok && v1.Number() != v2.Number() { + return errors.New("enum %q using open semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name()) + } + names[s] = v1 + } + } + + for j, vd := range ed.GetValue() { + v := &e.L2.Values.List[j] + if vd.Number == nil { + return errors.New("enum value %q must have a specified number", v.FullName()) + } + if e.L2.ReservedNames.Has(v.Name()) { + return errors.New("enum value %q must not use reserved name", v.FullName()) + } + if e.L2.ReservedRanges.Has(v.Number()) { + return errors.New("enum value %q must not use reserved number %d", v.FullName(), v.Number()) + } + } + } + return nil +} + +func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error { + // There are a few limited exceptions only for proto3 + isProto3 := file.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3) + for i, md := range mds { + m := &ms[i] + + // Handle the message descriptor itself. + isMessageSet := md.GetOptions().GetMessageSetWireFormat() + if err := m.L2.ReservedNames.CheckValid(); err != nil { + return errors.New("message %q reserved names has %v", m.FullName(), err) + } + if err := m.L2.ReservedRanges.CheckValid(isMessageSet); err != nil { + return errors.New("message %q reserved ranges has %v", m.FullName(), err) + } + if err := m.L2.ExtensionRanges.CheckValid(isMessageSet); err != nil { + return errors.New("message %q extension ranges has %v", m.FullName(), err) + } + if err := (*filedesc.FieldRanges).CheckOverlap(&m.L2.ReservedRanges, &m.L2.ExtensionRanges); err != nil { + return errors.New("message %q reserved and extension ranges has %v", m.FullName(), err) + } + for i := 0; i < m.Fields().Len(); i++ { + f1 := m.Fields().Get(i) + if f2 := m.Fields().ByNumber(f1.Number()); f1 != f2 { + return errors.New("message %q has conflicting fields: %q with %q", m.FullName(), f1.Name(), f2.Name()) + } + } + if isMessageSet && !flags.ProtoLegacy { + return errors.New("message %q is a MessageSet, which is a legacy proto1 feature that is no longer supported", m.FullName()) + } + if isMessageSet && (isProto3 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { + return errors.New("message %q is an invalid proto1 MessageSet", m.FullName()) + } + if isProto3 { + if m.ExtensionRanges().Len() > 0 { + return errors.New("message %q using proto3 semantics cannot have extension ranges", m.FullName()) + } + } + + for j, fd := range md.GetField() { + f := &m.L2.Fields.List[j] + if m.L2.ReservedNames.Has(f.Name()) { + return errors.New("message field %q must not use reserved name", f.FullName()) + } + if !f.Number().IsValid() { + return errors.New("message field %q has an invalid number: %d", f.FullName(), f.Number()) + } + if !f.Cardinality().IsValid() { + return errors.New("message field %q has an invalid cardinality: %d", f.FullName(), f.Cardinality()) + } + if m.L2.ReservedRanges.Has(f.Number()) { + return errors.New("message field %q must not use reserved number %d", f.FullName(), f.Number()) + } + if m.L2.ExtensionRanges.Has(f.Number()) { + return errors.New("message field %q with number %d in extension range", f.FullName(), f.Number()) + } + if fd.Extendee != nil { + return errors.New("message field %q may not have extendee: %q", f.FullName(), fd.GetExtendee()) + } + if f.L1.IsProto3Optional { + if !isProto3 { + return errors.New("message field %q under proto3 optional semantics must be specified in the proto3 syntax", f.FullName()) + } + if f.Cardinality() != protoreflect.Optional { + return errors.New("message field %q under proto3 optional semantics must have optional cardinality", f.FullName()) + } + if f.ContainingOneof() != nil && f.ContainingOneof().Fields().Len() != 1 { + return errors.New("message field %q under proto3 optional semantics must be within a single element oneof", f.FullName()) + } + } + if f.IsWeak() && !flags.ProtoLegacy { + return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName()) + } + if f.IsWeak() && (!f.HasPresence() || !isOptionalMessage(f) || f.ContainingOneof() != nil) { + return errors.New("message field %q may only be weak for an optional message", f.FullName()) + } + if f.IsPacked() && !isPackable(f) { + return errors.New("message field %q is not packable", f.FullName()) + } + if err := checkValidGroup(file, f); err != nil { + return errors.New("message field %q is an invalid group: %v", f.FullName(), err) + } + if err := checkValidMap(f); err != nil { + return errors.New("message field %q is an invalid map: %v", f.FullName(), err) + } + if isProto3 { + if f.Cardinality() == protoreflect.Required { + return errors.New("message field %q using proto3 semantics cannot be required", f.FullName()) + } + if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().IsClosed() { + return errors.New("message field %q using proto3 semantics may only depend on open enums", f.FullName()) + } + } + if f.Cardinality() == protoreflect.Optional && !f.HasPresence() && f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().IsClosed() { + return errors.New("message field %q with implicit presence may only use open enums", f.FullName()) + } + } + seenSynthetic := false // synthetic oneofs for proto3 optional must come after real oneofs + for j := range md.GetOneofDecl() { + o := &m.L2.Oneofs.List[j] + if o.Fields().Len() == 0 { + return errors.New("message oneof %q must contain at least one field declaration", o.FullName()) + } + if n := o.Fields().Len(); n-1 != (o.Fields().Get(n-1).Index() - o.Fields().Get(0).Index()) { + return errors.New("message oneof %q must have consecutively declared fields", o.FullName()) + } + + if o.IsSynthetic() { + seenSynthetic = true + continue + } + if !o.IsSynthetic() && seenSynthetic { + return errors.New("message oneof %q must be declared before synthetic oneofs", o.FullName()) + } + + for i := 0; i < o.Fields().Len(); i++ { + f := o.Fields().Get(i) + if f.Cardinality() != protoreflect.Optional { + return errors.New("message field %q belongs in a oneof and must be optional", f.FullName()) + } + if f.IsWeak() { + return errors.New("message field %q belongs in a oneof and must not be a weak reference", f.FullName()) + } + } + } + + if err := validateEnumDeclarations(m.L1.Enums.List, md.GetEnumType()); err != nil { + return err + } + if err := validateMessageDeclarations(file, m.L1.Messages.List, md.GetNestedType()); err != nil { + return err + } + if err := validateExtensionDeclarations(file, m.L1.Extensions.List, md.GetExtension()); err != nil { + return err + } + } + return nil +} + +func validateExtensionDeclarations(f *filedesc.File, xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error { + for i, xd := range xds { + x := &xs[i] + // NOTE: Avoid using the IsValid method since extensions to MessageSet + // may have a field number higher than normal. This check only verifies + // that the number is not negative or reserved. We check again later + // if we know that the extendee is definitely not a MessageSet. + if n := x.Number(); n < 0 || (protowire.FirstReservedNumber <= n && n <= protowire.LastReservedNumber) { + return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number()) + } + if !x.Cardinality().IsValid() || x.Cardinality() == protoreflect.Required { + return errors.New("extension field %q has an invalid cardinality: %d", x.FullName(), x.Cardinality()) + } + if xd.JsonName != nil { + // A bug in older versions of protoc would always populate the + // "json_name" option for extensions when it is meaningless. + // When it did so, it would always use the camel-cased field name. + if xd.GetJsonName() != strs.JSONCamelCase(string(x.Name())) { + return errors.New("extension field %q may not have an explicitly set JSON name: %q", x.FullName(), xd.GetJsonName()) + } + } + if xd.OneofIndex != nil { + return errors.New("extension field %q may not be part of a oneof", x.FullName()) + } + if md := x.ContainingMessage(); !md.IsPlaceholder() { + if !md.ExtensionRanges().Has(x.Number()) { + return errors.New("extension field %q extends %q with non-extension field number: %d", x.FullName(), md.FullName(), x.Number()) + } + isMessageSet := md.Options().(*descriptorpb.MessageOptions).GetMessageSetWireFormat() + if isMessageSet && !isOptionalMessage(x) { + return errors.New("extension field %q extends MessageSet and must be an optional message", x.FullName()) + } + if !isMessageSet && !x.Number().IsValid() { + return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number()) + } + } + if xd.GetOptions().GetWeak() { + return errors.New("extension field %q cannot be a weak reference", x.FullName()) + } + if x.IsPacked() && !isPackable(x) { + return errors.New("extension field %q is not packable", x.FullName()) + } + if err := checkValidGroup(f, x); err != nil { + return errors.New("extension field %q is an invalid group: %v", x.FullName(), err) + } + if md := x.Message(); md != nil && md.IsMapEntry() { + return errors.New("extension field %q cannot be a map entry", x.FullName()) + } + if f.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3) { + switch x.ContainingMessage().FullName() { + case (*descriptorpb.FileOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.EnumOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.EnumValueOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.MessageOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.FieldOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.OneofOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.ExtensionRangeOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.ServiceOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.MethodOptions)(nil).ProtoReflect().Descriptor().FullName(): + default: + return errors.New("extension field %q cannot be declared in proto3 unless extended descriptor options", x.FullName()) + } + } + } + return nil +} + +// isOptionalMessage reports whether this is an optional message. +// If the kind is unknown, it is assumed to be a message. +func isOptionalMessage(fd protoreflect.FieldDescriptor) bool { + return (fd.Kind() == 0 || fd.Kind() == protoreflect.MessageKind) && fd.Cardinality() == protoreflect.Optional +} + +// isPackable checks whether the pack option can be specified. +func isPackable(fd protoreflect.FieldDescriptor) bool { + switch fd.Kind() { + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: + return false + } + return fd.IsList() +} + +// checkValidGroup reports whether fd is a valid group according to the same +// rules that protoc imposes. +func checkValidGroup(f *filedesc.File, fd protoreflect.FieldDescriptor) error { + md := fd.Message() + switch { + case fd.Kind() != protoreflect.GroupKind: + return nil + case f.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3): + return errors.New("invalid under proto3 semantics") + case md == nil || md.IsPlaceholder(): + return errors.New("message must be resolvable") + } + if f.L1.Edition < fromEditionProto(descriptorpb.Edition_EDITION_2023) { + switch { + case fd.FullName().Parent() != md.FullName().Parent(): + return errors.New("message and field must be declared in the same scope") + case !unicode.IsUpper(rune(md.Name()[0])): + return errors.New("message name must start with an uppercase") + case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))): + return errors.New("field name must be lowercased form of the message name") + } + } + return nil +} + +// checkValidMap checks whether the field is a valid map according to the same +// rules that protoc imposes. +// See protoc v3.8.0: src/google/protobuf/descriptor.cc:6045-6115 +func checkValidMap(fd protoreflect.FieldDescriptor) error { + md := fd.Message() + switch { + case md == nil || !md.IsMapEntry(): + return nil + case fd.FullName().Parent() != md.FullName().Parent(): + return errors.New("message and field must be declared in the same scope") + case md.Name() != protoreflect.Name(strs.MapEntryName(string(fd.Name()))): + return errors.New("incorrect implicit map entry name") + case fd.Cardinality() != protoreflect.Repeated: + return errors.New("field must be repeated") + case md.Fields().Len() != 2: + return errors.New("message must have exactly two fields") + case md.ExtensionRanges().Len() > 0: + return errors.New("message must not have any extension ranges") + case md.Enums().Len()+md.Messages().Len()+md.Extensions().Len() > 0: + return errors.New("message must not have any nested declarations") + } + kf := md.Fields().Get(0) + vf := md.Fields().Get(1) + switch { + case kf.Name() != genid.MapEntry_Key_field_name || kf.Number() != genid.MapEntry_Key_field_number || kf.Cardinality() != protoreflect.Optional || kf.ContainingOneof() != nil || kf.HasDefault(): + return errors.New("invalid key field") + case vf.Name() != genid.MapEntry_Value_field_name || vf.Number() != genid.MapEntry_Value_field_number || vf.Cardinality() != protoreflect.Optional || vf.ContainingOneof() != nil || vf.HasDefault(): + return errors.New("invalid value field") + } + switch kf.Kind() { + case protoreflect.BoolKind: // bool + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: // int32 + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: // int64 + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: // uint32 + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: // uint64 + case protoreflect.StringKind: // string + default: + return errors.New("invalid key kind: %v", kf.Kind()) + } + if e := vf.Enum(); e != nil && e.Values().Len() > 0 && e.Values().Get(0).Number() != 0 { + return errors.New("map enum value must have zero number for the first value") + } + return nil +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go new file mode 100644 index 000000000..804830eda --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -0,0 +1,145 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "fmt" + "os" + "sync" + + "google.golang.org/protobuf/internal/editiondefaults" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + gofeaturespb "google.golang.org/protobuf/types/gofeaturespb" +) + +var defaults = &descriptorpb.FeatureSetDefaults{} +var defaultsCacheMu sync.Mutex +var defaultsCache = make(map[filedesc.Edition]*descriptorpb.FeatureSet) + +func init() { + err := proto.Unmarshal(editiondefaults.Defaults, defaults) + if err != nil { + fmt.Fprintf(os.Stderr, "unmarshal editions defaults: %v\n", err) + os.Exit(1) + } +} + +func fromEditionProto(epb descriptorpb.Edition) filedesc.Edition { + return filedesc.Edition(epb) +} + +func toEditionProto(ed filedesc.Edition) descriptorpb.Edition { + switch ed { + case filedesc.EditionUnknown: + return descriptorpb.Edition_EDITION_UNKNOWN + case filedesc.EditionProto2: + return descriptorpb.Edition_EDITION_PROTO2 + case filedesc.EditionProto3: + return descriptorpb.Edition_EDITION_PROTO3 + case filedesc.Edition2023: + return descriptorpb.Edition_EDITION_2023 + default: + panic(fmt.Sprintf("unknown value for edition: %v", ed)) + } +} + +func getFeatureSetFor(ed filedesc.Edition) *descriptorpb.FeatureSet { + defaultsCacheMu.Lock() + defer defaultsCacheMu.Unlock() + if def, ok := defaultsCache[ed]; ok { + return def + } + edpb := toEditionProto(ed) + if defaults.GetMinimumEdition() > edpb || defaults.GetMaximumEdition() < edpb { + // This should never happen protodesc.(FileOptions).New would fail when + // initializing the file descriptor. + // This most likely means the embedded defaults were not updated. + fmt.Fprintf(os.Stderr, "internal error: unsupported edition %v (did you forget to update the embedded defaults (i.e. the bootstrap descriptor proto)?)\n", edpb) + os.Exit(1) + } + fsed := defaults.GetDefaults()[0] + // Using a linear search for now. + // Editions are guaranteed to be sorted and thus we could use a binary search. + // Given that there are only a handful of editions (with one more per year) + // there is not much reason to use a binary search. + for _, def := range defaults.GetDefaults() { + if def.GetEdition() <= edpb { + fsed = def + } else { + break + } + } + fs := proto.Clone(fsed.GetFixedFeatures()).(*descriptorpb.FeatureSet) + proto.Merge(fs, fsed.GetOverridableFeatures()) + defaultsCache[ed] = fs + return fs +} + +// mergeEditionFeatures merges the parent and child feature sets. This function +// should be used when initializing Go descriptors from descriptor protos which +// is why the parent is a filedesc.EditionsFeatures (Go representation) while +// the child is a descriptorproto.FeatureSet (protoc representation). +// Any feature set by the child overwrites what is set by the parent. +func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorpb.FeatureSet) filedesc.EditionFeatures { + var parentFS filedesc.EditionFeatures + switch p := parentDesc.(type) { + case *filedesc.File: + parentFS = p.L1.EditionFeatures + case *filedesc.Message: + parentFS = p.L1.EditionFeatures + default: + panic(fmt.Sprintf("unknown parent type %T", parentDesc)) + } + if child == nil { + return parentFS + } + if fp := child.FieldPresence; fp != nil { + parentFS.IsFieldPresence = *fp == descriptorpb.FeatureSet_LEGACY_REQUIRED || + *fp == descriptorpb.FeatureSet_EXPLICIT + parentFS.IsLegacyRequired = *fp == descriptorpb.FeatureSet_LEGACY_REQUIRED + } + if et := child.EnumType; et != nil { + parentFS.IsOpenEnum = *et == descriptorpb.FeatureSet_OPEN + } + + if rfe := child.RepeatedFieldEncoding; rfe != nil { + parentFS.IsPacked = *rfe == descriptorpb.FeatureSet_PACKED + } + + if utf8val := child.Utf8Validation; utf8val != nil { + parentFS.IsUTF8Validated = *utf8val == descriptorpb.FeatureSet_VERIFY + } + + if me := child.MessageEncoding; me != nil { + parentFS.IsDelimitedEncoded = *me == descriptorpb.FeatureSet_DELIMITED + } + + if jf := child.JsonFormat; jf != nil { + parentFS.IsJSONCompliant = *jf == descriptorpb.FeatureSet_ALLOW + } + + if goFeatures, ok := proto.GetExtension(child, gofeaturespb.E_Go).(*gofeaturespb.GoFeatures); ok && goFeatures != nil { + if luje := goFeatures.LegacyUnmarshalJsonEnum; luje != nil { + parentFS.GenerateLegacyUnmarshalJSON = *luje + } + } + + return parentFS +} + +// initFileDescFromFeatureSet initializes editions related fields in fd based +// on fs. If fs is nil it is assumed to be an empty featureset and all fields +// will be initialized with the appropriate default. fd.L1.Edition must be set +// before calling this function. +func initFileDescFromFeatureSet(fd *filedesc.File, fs *descriptorpb.FeatureSet) { + dfs := getFeatureSetFor(fd.L1.Edition) + // initialize the featureset with the defaults + fd.L1.EditionFeatures = mergeEditionFeatures(fd, dfs) + // overwrite any options explicitly specified + fd.L1.EditionFeatures = mergeEditionFeatures(fd, fs) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go new file mode 100644 index 000000000..a5de8d400 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -0,0 +1,274 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "fmt" + "strings" + + "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +// ToFileDescriptorProto copies a [protoreflect.FileDescriptor] into a +// google.protobuf.FileDescriptorProto message. +func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto { + p := &descriptorpb.FileDescriptorProto{ + Name: proto.String(file.Path()), + Options: proto.Clone(file.Options()).(*descriptorpb.FileOptions), + } + if file.Package() != "" { + p.Package = proto.String(string(file.Package())) + } + for i, imports := 0, file.Imports(); i < imports.Len(); i++ { + imp := imports.Get(i) + p.Dependency = append(p.Dependency, imp.Path()) + if imp.IsPublic { + p.PublicDependency = append(p.PublicDependency, int32(i)) + } + if imp.IsWeak { + p.WeakDependency = append(p.WeakDependency, int32(i)) + } + } + for i, locs := 0, file.SourceLocations(); i < locs.Len(); i++ { + loc := locs.Get(i) + l := &descriptorpb.SourceCodeInfo_Location{} + l.Path = append(l.Path, loc.Path...) + if loc.StartLine == loc.EndLine { + l.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndColumn)} + } else { + l.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndLine), int32(loc.EndColumn)} + } + l.LeadingDetachedComments = append([]string(nil), loc.LeadingDetachedComments...) + if loc.LeadingComments != "" { + l.LeadingComments = proto.String(loc.LeadingComments) + } + if loc.TrailingComments != "" { + l.TrailingComments = proto.String(loc.TrailingComments) + } + if p.SourceCodeInfo == nil { + p.SourceCodeInfo = &descriptorpb.SourceCodeInfo{} + } + p.SourceCodeInfo.Location = append(p.SourceCodeInfo.Location, l) + + } + for i, messages := 0, file.Messages(); i < messages.Len(); i++ { + p.MessageType = append(p.MessageType, ToDescriptorProto(messages.Get(i))) + } + for i, enums := 0, file.Enums(); i < enums.Len(); i++ { + p.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i))) + } + for i, services := 0, file.Services(); i < services.Len(); i++ { + p.Service = append(p.Service, ToServiceDescriptorProto(services.Get(i))) + } + for i, exts := 0, file.Extensions(); i < exts.Len(); i++ { + p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) + } + if syntax := file.Syntax(); syntax != protoreflect.Proto2 && syntax.IsValid() { + p.Syntax = proto.String(file.Syntax().String()) + } + if file.Syntax() == protoreflect.Editions { + desc := file + if fileImportDesc, ok := file.(protoreflect.FileImport); ok { + desc = fileImportDesc.FileDescriptor + } + + if editionsInterface, ok := desc.(interface{ Edition() int32 }); ok { + p.Edition = descriptorpb.Edition(editionsInterface.Edition()).Enum() + } + } + return p +} + +// ToDescriptorProto copies a [protoreflect.MessageDescriptor] into a +// google.protobuf.DescriptorProto message. +func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto { + p := &descriptorpb.DescriptorProto{ + Name: proto.String(string(message.Name())), + Options: proto.Clone(message.Options()).(*descriptorpb.MessageOptions), + } + for i, fields := 0, message.Fields(); i < fields.Len(); i++ { + p.Field = append(p.Field, ToFieldDescriptorProto(fields.Get(i))) + } + for i, exts := 0, message.Extensions(); i < exts.Len(); i++ { + p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) + } + for i, messages := 0, message.Messages(); i < messages.Len(); i++ { + p.NestedType = append(p.NestedType, ToDescriptorProto(messages.Get(i))) + } + for i, enums := 0, message.Enums(); i < enums.Len(); i++ { + p.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i))) + } + for i, xranges := 0, message.ExtensionRanges(); i < xranges.Len(); i++ { + xrange := xranges.Get(i) + p.ExtensionRange = append(p.ExtensionRange, &descriptorpb.DescriptorProto_ExtensionRange{ + Start: proto.Int32(int32(xrange[0])), + End: proto.Int32(int32(xrange[1])), + Options: proto.Clone(message.ExtensionRangeOptions(i)).(*descriptorpb.ExtensionRangeOptions), + }) + } + for i, oneofs := 0, message.Oneofs(); i < oneofs.Len(); i++ { + p.OneofDecl = append(p.OneofDecl, ToOneofDescriptorProto(oneofs.Get(i))) + } + for i, ranges := 0, message.ReservedRanges(); i < ranges.Len(); i++ { + rrange := ranges.Get(i) + p.ReservedRange = append(p.ReservedRange, &descriptorpb.DescriptorProto_ReservedRange{ + Start: proto.Int32(int32(rrange[0])), + End: proto.Int32(int32(rrange[1])), + }) + } + for i, names := 0, message.ReservedNames(); i < names.Len(); i++ { + p.ReservedName = append(p.ReservedName, string(names.Get(i))) + } + return p +} + +// ToFieldDescriptorProto copies a [protoreflect.FieldDescriptor] into a +// google.protobuf.FieldDescriptorProto message. +func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto { + p := &descriptorpb.FieldDescriptorProto{ + Name: proto.String(string(field.Name())), + Number: proto.Int32(int32(field.Number())), + Label: descriptorpb.FieldDescriptorProto_Label(field.Cardinality()).Enum(), + Options: proto.Clone(field.Options()).(*descriptorpb.FieldOptions), + } + if field.IsExtension() { + p.Extendee = fullNameOf(field.ContainingMessage()) + } + if field.Kind().IsValid() { + p.Type = descriptorpb.FieldDescriptorProto_Type(field.Kind()).Enum() + } + if field.Enum() != nil { + p.TypeName = fullNameOf(field.Enum()) + } + if field.Message() != nil { + p.TypeName = fullNameOf(field.Message()) + } + if field.HasJSONName() { + // A bug in older versions of protoc would always populate the + // "json_name" option for extensions when it is meaningless. + // When it did so, it would always use the camel-cased field name. + if field.IsExtension() { + p.JsonName = proto.String(strs.JSONCamelCase(string(field.Name()))) + } else { + p.JsonName = proto.String(field.JSONName()) + } + } + if field.Syntax() == protoreflect.Proto3 && field.HasOptionalKeyword() { + p.Proto3Optional = proto.Bool(true) + } + if field.Syntax() == protoreflect.Editions { + // Editions have no group keyword, this type is only set so that downstream users continue + // treating this as delimited encoding. + if p.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP { + p.Type = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE.Enum() + } + // Editions have no required keyword, this label is only set so that downstream users continue + // treating it as required. + if p.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REQUIRED { + p.Label = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum() + } + } + if field.HasDefault() { + def, err := defval.Marshal(field.Default(), field.DefaultEnumValue(), field.Kind(), defval.Descriptor) + if err != nil && field.DefaultEnumValue() != nil { + def = string(field.DefaultEnumValue().Name()) // occurs for unresolved enum values + } else if err != nil { + panic(fmt.Sprintf("%v: %v", field.FullName(), err)) + } + p.DefaultValue = proto.String(def) + } + if oneof := field.ContainingOneof(); oneof != nil { + p.OneofIndex = proto.Int32(int32(oneof.Index())) + } + return p +} + +// ToOneofDescriptorProto copies a [protoreflect.OneofDescriptor] into a +// google.protobuf.OneofDescriptorProto message. +func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto { + return &descriptorpb.OneofDescriptorProto{ + Name: proto.String(string(oneof.Name())), + Options: proto.Clone(oneof.Options()).(*descriptorpb.OneofOptions), + } +} + +// ToEnumDescriptorProto copies a [protoreflect.EnumDescriptor] into a +// google.protobuf.EnumDescriptorProto message. +func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto { + p := &descriptorpb.EnumDescriptorProto{ + Name: proto.String(string(enum.Name())), + Options: proto.Clone(enum.Options()).(*descriptorpb.EnumOptions), + } + for i, values := 0, enum.Values(); i < values.Len(); i++ { + p.Value = append(p.Value, ToEnumValueDescriptorProto(values.Get(i))) + } + for i, ranges := 0, enum.ReservedRanges(); i < ranges.Len(); i++ { + rrange := ranges.Get(i) + p.ReservedRange = append(p.ReservedRange, &descriptorpb.EnumDescriptorProto_EnumReservedRange{ + Start: proto.Int32(int32(rrange[0])), + End: proto.Int32(int32(rrange[1])), + }) + } + for i, names := 0, enum.ReservedNames(); i < names.Len(); i++ { + p.ReservedName = append(p.ReservedName, string(names.Get(i))) + } + return p +} + +// ToEnumValueDescriptorProto copies a [protoreflect.EnumValueDescriptor] into a +// google.protobuf.EnumValueDescriptorProto message. +func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto { + return &descriptorpb.EnumValueDescriptorProto{ + Name: proto.String(string(value.Name())), + Number: proto.Int32(int32(value.Number())), + Options: proto.Clone(value.Options()).(*descriptorpb.EnumValueOptions), + } +} + +// ToServiceDescriptorProto copies a [protoreflect.ServiceDescriptor] into a +// google.protobuf.ServiceDescriptorProto message. +func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto { + p := &descriptorpb.ServiceDescriptorProto{ + Name: proto.String(string(service.Name())), + Options: proto.Clone(service.Options()).(*descriptorpb.ServiceOptions), + } + for i, methods := 0, service.Methods(); i < methods.Len(); i++ { + p.Method = append(p.Method, ToMethodDescriptorProto(methods.Get(i))) + } + return p +} + +// ToMethodDescriptorProto copies a [protoreflect.MethodDescriptor] into a +// google.protobuf.MethodDescriptorProto message. +func ToMethodDescriptorProto(method protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto { + p := &descriptorpb.MethodDescriptorProto{ + Name: proto.String(string(method.Name())), + InputType: fullNameOf(method.Input()), + OutputType: fullNameOf(method.Output()), + Options: proto.Clone(method.Options()).(*descriptorpb.MethodOptions), + } + if method.IsStreamingClient() { + p.ClientStreaming = proto.Bool(true) + } + if method.IsStreamingServer() { + p.ServerStreaming = proto.Bool(true) + } + return p +} + +func fullNameOf(d protoreflect.Descriptor) *string { + if d == nil { + return nil + } + if strings.HasPrefix(string(d.FullName()), unknownPrefix) { + return proto.String(string(d.FullName()[len(unknownPrefix):])) + } + return proto.String("." + string(d.FullName())) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go new file mode 100644 index 000000000..d5d5af6eb --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go @@ -0,0 +1,78 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +import ( + "google.golang.org/protobuf/internal/pragma" +) + +// The following types are used by the fast-path Message.ProtoMethods method. +// +// To avoid polluting the public protoreflect API with types used only by +// low-level implementations, the canonical definitions of these types are +// in the runtime/protoiface package. The definitions here and in protoiface +// must be kept in sync. +type ( + methods = struct { + pragma.NoUnkeyedLiterals + Flags supportFlags + Size func(sizeInput) sizeOutput + Marshal func(marshalInput) (marshalOutput, error) + Unmarshal func(unmarshalInput) (unmarshalOutput, error) + Merge func(mergeInput) mergeOutput + CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error) + } + supportFlags = uint64 + sizeInput = struct { + pragma.NoUnkeyedLiterals + Message Message + Flags uint8 + } + sizeOutput = struct { + pragma.NoUnkeyedLiterals + Size int + } + marshalInput = struct { + pragma.NoUnkeyedLiterals + Message Message + Buf []byte + Flags uint8 + } + marshalOutput = struct { + pragma.NoUnkeyedLiterals + Buf []byte + } + unmarshalInput = struct { + pragma.NoUnkeyedLiterals + Message Message + Buf []byte + Flags uint8 + Resolver interface { + FindExtensionByName(field FullName) (ExtensionType, error) + FindExtensionByNumber(message FullName, field FieldNumber) (ExtensionType, error) + } + Depth int + } + unmarshalOutput = struct { + pragma.NoUnkeyedLiterals + Flags uint8 + } + mergeInput = struct { + pragma.NoUnkeyedLiterals + Source Message + Destination Message + } + mergeOutput = struct { + pragma.NoUnkeyedLiterals + Flags uint8 + } + checkInitializedInput = struct { + pragma.NoUnkeyedLiterals + Message Message + } + checkInitializedOutput = struct { + pragma.NoUnkeyedLiterals + } +) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go new file mode 100644 index 000000000..c85bfaa5b --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -0,0 +1,513 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protoreflect provides interfaces to dynamically manipulate messages. +// +// This package includes type descriptors which describe the structure of types +// defined in proto source files and value interfaces which provide the +// ability to examine and manipulate the contents of messages. +// +// # Protocol Buffer Descriptors +// +// Protobuf descriptors (e.g., [EnumDescriptor] or [MessageDescriptor]) +// are immutable objects that represent protobuf type information. +// They are wrappers around the messages declared in descriptor.proto. +// Protobuf descriptors alone lack any information regarding Go types. +// +// Enums and messages generated by this module implement [Enum] and [ProtoMessage], +// where the Descriptor and ProtoReflect.Descriptor accessors respectively +// return the protobuf descriptor for the values. +// +// The protobuf descriptor interfaces are not meant to be implemented by +// user code since they might need to be extended in the future to support +// additions to the protobuf language. +// The [google.golang.org/protobuf/reflect/protodesc] package converts between +// google.protobuf.DescriptorProto messages and protobuf descriptors. +// +// # Go Type Descriptors +// +// A type descriptor (e.g., [EnumType] or [MessageType]) is a constructor for +// a concrete Go type that represents the associated protobuf descriptor. +// There is commonly a one-to-one relationship between protobuf descriptors and +// Go type descriptors, but it can potentially be a one-to-many relationship. +// +// Enums and messages generated by this module implement [Enum] and [ProtoMessage], +// where the Type and ProtoReflect.Type accessors respectively +// return the protobuf descriptor for the values. +// +// The [google.golang.org/protobuf/types/dynamicpb] package can be used to +// create Go type descriptors from protobuf descriptors. +// +// # Value Interfaces +// +// The [Enum] and [Message] interfaces provide a reflective view over an +// enum or message instance. For enums, it provides the ability to retrieve +// the enum value number for any concrete enum type. For messages, it provides +// the ability to access or manipulate fields of the message. +// +// To convert a [google.golang.org/protobuf/proto.Message] to a [protoreflect.Message], use the +// former's ProtoReflect method. Since the ProtoReflect method is new to the +// v2 message interface, it may not be present on older message implementations. +// The [github.com/golang/protobuf/proto.MessageReflect] function can be used +// to obtain a reflective view on older messages. +// +// # Relationships +// +// The following diagrams demonstrate the relationships between +// various types declared in this package. +// +// ┌───────────────────────────────────┐ +// V │ +// ┌────────────── New(n) ─────────────┐ │ +// │ │ │ +// │ ┌──── Descriptor() ──┐ │ ┌── Number() ──┐ │ +// │ │ V V │ V │ +// ╔════════════╗ ╔════════════════╗ ╔════════╗ ╔════════════╗ +// ║ EnumType ║ ║ EnumDescriptor ║ ║ Enum ║ ║ EnumNumber ║ +// ╚════════════╝ ╚════════════════╝ ╚════════╝ ╚════════════╝ +// Λ Λ │ │ +// │ └─── Descriptor() ──┘ │ +// │ │ +// └────────────────── Type() ───────┘ +// +// • An [EnumType] describes a concrete Go enum type. +// It has an EnumDescriptor and can construct an Enum instance. +// +// • An [EnumDescriptor] describes an abstract protobuf enum type. +// +// • An [Enum] is a concrete enum instance. Generated enums implement Enum. +// +// ┌──────────────── New() ─────────────────┐ +// │ │ +// │ ┌─── Descriptor() ─────┐ │ ┌── Interface() ───┐ +// │ │ V V │ V +// ╔═════════════╗ ╔═══════════════════╗ ╔═════════╗ ╔══════════════╗ +// ║ MessageType ║ ║ MessageDescriptor ║ ║ Message ║ ║ ProtoMessage ║ +// ╚═════════════╝ ╚═══════════════════╝ ╚═════════╝ ╚══════════════╝ +// Λ Λ │ │ Λ │ +// │ └──── Descriptor() ────┘ │ └─ ProtoReflect() ─┘ +// │ │ +// └─────────────────── Type() ─────────┘ +// +// • A [MessageType] describes a concrete Go message type. +// It has a [MessageDescriptor] and can construct a [Message] instance. +// Just as how Go's [reflect.Type] is a reflective description of a Go type, +// a [MessageType] is a reflective description of a Go type for a protobuf message. +// +// • A [MessageDescriptor] describes an abstract protobuf message type. +// It has no understanding of Go types. In order to construct a [MessageType] +// from just a [MessageDescriptor], you can consider looking up the message type +// in the global registry using the FindMessageByName method on +// [google.golang.org/protobuf/reflect/protoregistry.GlobalTypes] +// or constructing a dynamic [MessageType] using +// [google.golang.org/protobuf/types/dynamicpb.NewMessageType]. +// +// • A [Message] is a reflective view over a concrete message instance. +// Generated messages implement [ProtoMessage], which can convert to a [Message]. +// Just as how Go's [reflect.Value] is a reflective view over a Go value, +// a [Message] is a reflective view over a concrete protobuf message instance. +// Using Go reflection as an analogy, the [ProtoMessage.ProtoReflect] method is similar to +// calling [reflect.ValueOf], and the [Message.Interface] method is similar to +// calling [reflect.Value.Interface]. +// +// ┌── TypeDescriptor() ──┐ ┌───── Descriptor() ─────┐ +// │ V │ V +// ╔═══════════════╗ ╔═════════════════════════╗ ╔═════════════════════╗ +// ║ ExtensionType ║ ║ ExtensionTypeDescriptor ║ ║ ExtensionDescriptor ║ +// ╚═══════════════╝ ╚═════════════════════════╝ ╚═════════════════════╝ +// Λ │ │ Λ │ Λ +// └─────── Type() ───────┘ │ └─── may implement ────┘ │ +// │ │ +// └────── implements ────────┘ +// +// • An [ExtensionType] describes a concrete Go implementation of an extension. +// It has an [ExtensionTypeDescriptor] and can convert to/from +// an abstract [Value] and a Go value. +// +// • An [ExtensionTypeDescriptor] is an [ExtensionDescriptor] +// which also has an [ExtensionType]. +// +// • An [ExtensionDescriptor] describes an abstract protobuf extension field and +// may not always be an [ExtensionTypeDescriptor]. +package protoreflect + +import ( + "fmt" + "strings" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/pragma" +) + +type doNotImplement pragma.DoNotImplement + +// ProtoMessage is the top-level interface that all proto messages implement. +// This is declared in the protoreflect package to avoid a cyclic dependency; +// use the [google.golang.org/protobuf/proto.Message] type instead, which aliases this type. +type ProtoMessage interface{ ProtoReflect() Message } + +// Syntax is the language version of the proto file. +type Syntax syntax + +type syntax int8 // keep exact type opaque as the int type may change + +const ( + Proto2 Syntax = 2 + Proto3 Syntax = 3 + Editions Syntax = 4 +) + +// IsValid reports whether the syntax is valid. +func (s Syntax) IsValid() bool { + switch s { + case Proto2, Proto3, Editions: + return true + default: + return false + } +} + +// String returns s as a proto source identifier (e.g., "proto2"). +func (s Syntax) String() string { + switch s { + case Proto2: + return "proto2" + case Proto3: + return "proto3" + case Editions: + return "editions" + default: + return fmt.Sprintf("", s) + } +} + +// GoString returns s as a Go source identifier (e.g., "Proto2"). +func (s Syntax) GoString() string { + switch s { + case Proto2: + return "Proto2" + case Proto3: + return "Proto3" + default: + return fmt.Sprintf("Syntax(%d)", s) + } +} + +// Cardinality determines whether a field is optional, required, or repeated. +type Cardinality cardinality + +type cardinality int8 // keep exact type opaque as the int type may change + +// Constants as defined by the google.protobuf.Cardinality enumeration. +const ( + Optional Cardinality = 1 // appears zero or one times + Required Cardinality = 2 // appears exactly one time; invalid with Proto3 + Repeated Cardinality = 3 // appears zero or more times +) + +// IsValid reports whether the cardinality is valid. +func (c Cardinality) IsValid() bool { + switch c { + case Optional, Required, Repeated: + return true + default: + return false + } +} + +// String returns c as a proto source identifier (e.g., "optional"). +func (c Cardinality) String() string { + switch c { + case Optional: + return "optional" + case Required: + return "required" + case Repeated: + return "repeated" + default: + return fmt.Sprintf("", c) + } +} + +// GoString returns c as a Go source identifier (e.g., "Optional"). +func (c Cardinality) GoString() string { + switch c { + case Optional: + return "Optional" + case Required: + return "Required" + case Repeated: + return "Repeated" + default: + return fmt.Sprintf("Cardinality(%d)", c) + } +} + +// Kind indicates the basic proto kind of a field. +type Kind kind + +type kind int8 // keep exact type opaque as the int type may change + +// Constants as defined by the google.protobuf.Field.Kind enumeration. +const ( + BoolKind Kind = 8 + EnumKind Kind = 14 + Int32Kind Kind = 5 + Sint32Kind Kind = 17 + Uint32Kind Kind = 13 + Int64Kind Kind = 3 + Sint64Kind Kind = 18 + Uint64Kind Kind = 4 + Sfixed32Kind Kind = 15 + Fixed32Kind Kind = 7 + FloatKind Kind = 2 + Sfixed64Kind Kind = 16 + Fixed64Kind Kind = 6 + DoubleKind Kind = 1 + StringKind Kind = 9 + BytesKind Kind = 12 + MessageKind Kind = 11 + GroupKind Kind = 10 +) + +// IsValid reports whether the kind is valid. +func (k Kind) IsValid() bool { + switch k { + case BoolKind, EnumKind, + Int32Kind, Sint32Kind, Uint32Kind, + Int64Kind, Sint64Kind, Uint64Kind, + Sfixed32Kind, Fixed32Kind, FloatKind, + Sfixed64Kind, Fixed64Kind, DoubleKind, + StringKind, BytesKind, MessageKind, GroupKind: + return true + default: + return false + } +} + +// String returns k as a proto source identifier (e.g., "bool"). +func (k Kind) String() string { + switch k { + case BoolKind: + return "bool" + case EnumKind: + return "enum" + case Int32Kind: + return "int32" + case Sint32Kind: + return "sint32" + case Uint32Kind: + return "uint32" + case Int64Kind: + return "int64" + case Sint64Kind: + return "sint64" + case Uint64Kind: + return "uint64" + case Sfixed32Kind: + return "sfixed32" + case Fixed32Kind: + return "fixed32" + case FloatKind: + return "float" + case Sfixed64Kind: + return "sfixed64" + case Fixed64Kind: + return "fixed64" + case DoubleKind: + return "double" + case StringKind: + return "string" + case BytesKind: + return "bytes" + case MessageKind: + return "message" + case GroupKind: + return "group" + default: + return fmt.Sprintf("", k) + } +} + +// GoString returns k as a Go source identifier (e.g., "BoolKind"). +func (k Kind) GoString() string { + switch k { + case BoolKind: + return "BoolKind" + case EnumKind: + return "EnumKind" + case Int32Kind: + return "Int32Kind" + case Sint32Kind: + return "Sint32Kind" + case Uint32Kind: + return "Uint32Kind" + case Int64Kind: + return "Int64Kind" + case Sint64Kind: + return "Sint64Kind" + case Uint64Kind: + return "Uint64Kind" + case Sfixed32Kind: + return "Sfixed32Kind" + case Fixed32Kind: + return "Fixed32Kind" + case FloatKind: + return "FloatKind" + case Sfixed64Kind: + return "Sfixed64Kind" + case Fixed64Kind: + return "Fixed64Kind" + case DoubleKind: + return "DoubleKind" + case StringKind: + return "StringKind" + case BytesKind: + return "BytesKind" + case MessageKind: + return "MessageKind" + case GroupKind: + return "GroupKind" + default: + return fmt.Sprintf("Kind(%d)", k) + } +} + +// FieldNumber is the field number in a message. +type FieldNumber = protowire.Number + +// FieldNumbers represent a list of field numbers. +type FieldNumbers interface { + // Len reports the number of fields in the list. + Len() int + // Get returns the ith field number. It panics if out of bounds. + Get(i int) FieldNumber + // Has reports whether n is within the list of fields. + Has(n FieldNumber) bool + + doNotImplement +} + +// FieldRanges represent a list of field number ranges. +type FieldRanges interface { + // Len reports the number of ranges in the list. + Len() int + // Get returns the ith range. It panics if out of bounds. + Get(i int) [2]FieldNumber // start inclusive; end exclusive + // Has reports whether n is within any of the ranges. + Has(n FieldNumber) bool + + doNotImplement +} + +// EnumNumber is the numeric value for an enum. +type EnumNumber int32 + +// EnumRanges represent a list of enum number ranges. +type EnumRanges interface { + // Len reports the number of ranges in the list. + Len() int + // Get returns the ith range. It panics if out of bounds. + Get(i int) [2]EnumNumber // start inclusive; end inclusive + // Has reports whether n is within any of the ranges. + Has(n EnumNumber) bool + + doNotImplement +} + +// Name is the short name for a proto declaration. This is not the name +// as used in Go source code, which might not be identical to the proto name. +type Name string // e.g., "Kind" + +// IsValid reports whether s is a syntactically valid name. +// An empty name is invalid. +func (s Name) IsValid() bool { + return consumeIdent(string(s)) == len(s) +} + +// Names represent a list of names. +type Names interface { + // Len reports the number of names in the list. + Len() int + // Get returns the ith name. It panics if out of bounds. + Get(i int) Name + // Has reports whether s matches any names in the list. + Has(s Name) bool + + doNotImplement +} + +// FullName is a qualified name that uniquely identifies a proto declaration. +// A qualified name is the concatenation of the proto package along with the +// fully-declared name (i.e., name of parent preceding the name of the child), +// with a '.' delimiter placed between each [Name]. +// +// This should not have any leading or trailing dots. +type FullName string // e.g., "google.protobuf.Field.Kind" + +// IsValid reports whether s is a syntactically valid full name. +// An empty full name is invalid. +func (s FullName) IsValid() bool { + i := consumeIdent(string(s)) + if i < 0 { + return false + } + for len(s) > i { + if s[i] != '.' { + return false + } + i++ + n := consumeIdent(string(s[i:])) + if n < 0 { + return false + } + i += n + } + return true +} + +func consumeIdent(s string) (i int) { + if len(s) == 0 || !isLetter(s[i]) { + return -1 + } + i++ + for len(s) > i && isLetterDigit(s[i]) { + i++ + } + return i +} +func isLetter(c byte) bool { + return c == '_' || ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') +} +func isLetterDigit(c byte) bool { + return isLetter(c) || ('0' <= c && c <= '9') +} + +// Name returns the short name, which is the last identifier segment. +// A single segment FullName is the [Name] itself. +func (n FullName) Name() Name { + if i := strings.LastIndexByte(string(n), '.'); i >= 0 { + return Name(n[i+1:]) + } + return Name(n) +} + +// Parent returns the full name with the trailing identifier removed. +// A single segment FullName has no parent. +func (n FullName) Parent() FullName { + if i := strings.LastIndexByte(string(n), '.'); i >= 0 { + return n[:i] + } + return "" +} + +// Append returns the qualified name appended with the provided short name. +// +// Invariant: n == n.Parent().Append(n.Name()) // assuming n is valid +func (n FullName) Append(s Name) FullName { + if n == "" { + return FullName(s) + } + return n + "." + FullName(s) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go new file mode 100644 index 000000000..0b9942885 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go @@ -0,0 +1,129 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +import ( + "strconv" +) + +// SourceLocations is a list of source locations. +type SourceLocations interface { + // Len reports the number of source locations in the proto file. + Len() int + // Get returns the ith SourceLocation. It panics if out of bounds. + Get(int) SourceLocation + + // ByPath returns the SourceLocation for the given path, + // returning the first location if multiple exist for the same path. + // If multiple locations exist for the same path, + // then SourceLocation.Next index can be used to identify the + // index of the next SourceLocation. + // If no location exists for this path, it returns the zero value. + ByPath(path SourcePath) SourceLocation + + // ByDescriptor returns the SourceLocation for the given descriptor, + // returning the first location if multiple exist for the same path. + // If no location exists for this descriptor, it returns the zero value. + ByDescriptor(desc Descriptor) SourceLocation + + doNotImplement +} + +// SourceLocation describes a source location and +// corresponds with the google.protobuf.SourceCodeInfo.Location message. +type SourceLocation struct { + // Path is the path to the declaration from the root file descriptor. + // The contents of this slice must not be mutated. + Path SourcePath + + // StartLine and StartColumn are the zero-indexed starting location + // in the source file for the declaration. + StartLine, StartColumn int + // EndLine and EndColumn are the zero-indexed ending location + // in the source file for the declaration. + // In the descriptor.proto, the end line may be omitted if it is identical + // to the start line. Here, it is always populated. + EndLine, EndColumn int + + // LeadingDetachedComments are the leading detached comments + // for the declaration. The contents of this slice must not be mutated. + LeadingDetachedComments []string + // LeadingComments is the leading attached comment for the declaration. + LeadingComments string + // TrailingComments is the trailing attached comment for the declaration. + TrailingComments string + + // Next is an index into SourceLocations for the next source location that + // has the same Path. It is zero if there is no next location. + Next int +} + +// SourcePath identifies part of a file descriptor for a source location. +// The SourcePath is a sequence of either field numbers or indexes into +// a repeated field that form a path starting from the root file descriptor. +// +// See google.protobuf.SourceCodeInfo.Location.path. +type SourcePath []int32 + +// Equal reports whether p1 equals p2. +func (p1 SourcePath) Equal(p2 SourcePath) bool { + if len(p1) != len(p2) { + return false + } + for i := range p1 { + if p1[i] != p2[i] { + return false + } + } + return true +} + +// String formats the path in a humanly readable manner. +// The output is guaranteed to be deterministic, +// making it suitable for use as a key into a Go map. +// It is not guaranteed to be stable as the exact output could change +// in a future version of this module. +// +// Example output: +// +// .message_type[6].nested_type[15].field[3] +func (p SourcePath) String() string { + b := p.appendFileDescriptorProto(nil) + for _, i := range p { + b = append(b, '.') + b = strconv.AppendInt(b, int64(i), 10) + } + return string(b) +} + +type appendFunc func(*SourcePath, []byte) []byte + +func (p *SourcePath) appendSingularField(b []byte, name string, f appendFunc) []byte { + if len(*p) == 0 { + return b + } + b = append(b, '.') + b = append(b, name...) + *p = (*p)[1:] + if f != nil { + b = f(p, b) + } + return b +} + +func (p *SourcePath) appendRepeatedField(b []byte, name string, f appendFunc) []byte { + b = p.appendSingularField(b, name, nil) + if len(*p) == 0 || (*p)[0] < 0 { + return b + } + b = append(b, '[') + b = strconv.AppendUint(b, uint64((*p)[0]), 10) + b = append(b, ']') + *p = (*p)[1:] + if f != nil { + b = f(p, b) + } + return b +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go new file mode 100644 index 000000000..ea154eec4 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -0,0 +1,573 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package protoreflect + +func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "package", nil) + case 3: + b = p.appendRepeatedField(b, "dependency", nil) + case 10: + b = p.appendRepeatedField(b, "public_dependency", nil) + case 11: + b = p.appendRepeatedField(b, "weak_dependency", nil) + case 4: + b = p.appendRepeatedField(b, "message_type", (*SourcePath).appendDescriptorProto) + case 5: + b = p.appendRepeatedField(b, "enum_type", (*SourcePath).appendEnumDescriptorProto) + case 6: + b = p.appendRepeatedField(b, "service", (*SourcePath).appendServiceDescriptorProto) + case 7: + b = p.appendRepeatedField(b, "extension", (*SourcePath).appendFieldDescriptorProto) + case 8: + b = p.appendSingularField(b, "options", (*SourcePath).appendFileOptions) + case 9: + b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo) + case 12: + b = p.appendSingularField(b, "syntax", nil) + case 14: + b = p.appendSingularField(b, "edition", nil) + } + return b +} + +func (p *SourcePath) appendDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendRepeatedField(b, "field", (*SourcePath).appendFieldDescriptorProto) + case 6: + b = p.appendRepeatedField(b, "extension", (*SourcePath).appendFieldDescriptorProto) + case 3: + b = p.appendRepeatedField(b, "nested_type", (*SourcePath).appendDescriptorProto) + case 4: + b = p.appendRepeatedField(b, "enum_type", (*SourcePath).appendEnumDescriptorProto) + case 5: + b = p.appendRepeatedField(b, "extension_range", (*SourcePath).appendDescriptorProto_ExtensionRange) + case 8: + b = p.appendRepeatedField(b, "oneof_decl", (*SourcePath).appendOneofDescriptorProto) + case 7: + b = p.appendSingularField(b, "options", (*SourcePath).appendMessageOptions) + case 9: + b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendDescriptorProto_ReservedRange) + case 10: + b = p.appendRepeatedField(b, "reserved_name", nil) + } + return b +} + +func (p *SourcePath) appendEnumDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendRepeatedField(b, "value", (*SourcePath).appendEnumValueDescriptorProto) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendEnumOptions) + case 4: + b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendEnumDescriptorProto_EnumReservedRange) + case 5: + b = p.appendRepeatedField(b, "reserved_name", nil) + } + return b +} + +func (p *SourcePath) appendServiceDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendRepeatedField(b, "method", (*SourcePath).appendMethodDescriptorProto) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendServiceOptions) + } + return b +} + +func (p *SourcePath) appendFieldDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 3: + b = p.appendSingularField(b, "number", nil) + case 4: + b = p.appendSingularField(b, "label", nil) + case 5: + b = p.appendSingularField(b, "type", nil) + case 6: + b = p.appendSingularField(b, "type_name", nil) + case 2: + b = p.appendSingularField(b, "extendee", nil) + case 7: + b = p.appendSingularField(b, "default_value", nil) + case 9: + b = p.appendSingularField(b, "oneof_index", nil) + case 10: + b = p.appendSingularField(b, "json_name", nil) + case 8: + b = p.appendSingularField(b, "options", (*SourcePath).appendFieldOptions) + case 17: + b = p.appendSingularField(b, "proto3_optional", nil) + } + return b +} + +func (p *SourcePath) appendFileOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "java_package", nil) + case 8: + b = p.appendSingularField(b, "java_outer_classname", nil) + case 10: + b = p.appendSingularField(b, "java_multiple_files", nil) + case 20: + b = p.appendSingularField(b, "java_generate_equals_and_hash", nil) + case 27: + b = p.appendSingularField(b, "java_string_check_utf8", nil) + case 9: + b = p.appendSingularField(b, "optimize_for", nil) + case 11: + b = p.appendSingularField(b, "go_package", nil) + case 16: + b = p.appendSingularField(b, "cc_generic_services", nil) + case 17: + b = p.appendSingularField(b, "java_generic_services", nil) + case 18: + b = p.appendSingularField(b, "py_generic_services", nil) + case 23: + b = p.appendSingularField(b, "deprecated", nil) + case 31: + b = p.appendSingularField(b, "cc_enable_arenas", nil) + case 36: + b = p.appendSingularField(b, "objc_class_prefix", nil) + case 37: + b = p.appendSingularField(b, "csharp_namespace", nil) + case 39: + b = p.appendSingularField(b, "swift_prefix", nil) + case 40: + b = p.appendSingularField(b, "php_class_prefix", nil) + case 41: + b = p.appendSingularField(b, "php_namespace", nil) + case 44: + b = p.appendSingularField(b, "php_metadata_namespace", nil) + case 45: + b = p.appendSingularField(b, "ruby_package", nil) + case 50: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendSourceCodeInfo(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendRepeatedField(b, "location", (*SourcePath).appendSourceCodeInfo_Location) + } + return b +} + +func (p *SourcePath) appendDescriptorProto_ExtensionRange(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "start", nil) + case 2: + b = p.appendSingularField(b, "end", nil) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendExtensionRangeOptions) + } + return b +} + +func (p *SourcePath) appendOneofDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "options", (*SourcePath).appendOneofOptions) + } + return b +} + +func (p *SourcePath) appendMessageOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "message_set_wire_format", nil) + case 2: + b = p.appendSingularField(b, "no_standard_descriptor_accessor", nil) + case 3: + b = p.appendSingularField(b, "deprecated", nil) + case 7: + b = p.appendSingularField(b, "map_entry", nil) + case 11: + b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) + case 12: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendDescriptorProto_ReservedRange(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "start", nil) + case 2: + b = p.appendSingularField(b, "end", nil) + } + return b +} + +func (p *SourcePath) appendEnumValueDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "number", nil) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendEnumValueOptions) + } + return b +} + +func (p *SourcePath) appendEnumOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 2: + b = p.appendSingularField(b, "allow_alias", nil) + case 3: + b = p.appendSingularField(b, "deprecated", nil) + case 6: + b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) + case 7: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendEnumDescriptorProto_EnumReservedRange(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "start", nil) + case 2: + b = p.appendSingularField(b, "end", nil) + } + return b +} + +func (p *SourcePath) appendMethodDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "input_type", nil) + case 3: + b = p.appendSingularField(b, "output_type", nil) + case 4: + b = p.appendSingularField(b, "options", (*SourcePath).appendMethodOptions) + case 5: + b = p.appendSingularField(b, "client_streaming", nil) + case 6: + b = p.appendSingularField(b, "server_streaming", nil) + } + return b +} + +func (p *SourcePath) appendServiceOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 34: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) + case 33: + b = p.appendSingularField(b, "deprecated", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendFieldOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "ctype", nil) + case 2: + b = p.appendSingularField(b, "packed", nil) + case 6: + b = p.appendSingularField(b, "jstype", nil) + case 5: + b = p.appendSingularField(b, "lazy", nil) + case 15: + b = p.appendSingularField(b, "unverified_lazy", nil) + case 3: + b = p.appendSingularField(b, "deprecated", nil) + case 10: + b = p.appendSingularField(b, "weak", nil) + case 16: + b = p.appendSingularField(b, "debug_redact", nil) + case 17: + b = p.appendSingularField(b, "retention", nil) + case 19: + b = p.appendRepeatedField(b, "targets", nil) + case 20: + b = p.appendRepeatedField(b, "edition_defaults", (*SourcePath).appendFieldOptions_EditionDefault) + case 21: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) + case 22: + b = p.appendSingularField(b, "feature_support", (*SourcePath).appendFieldOptions_FeatureSupport) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendFeatureSet(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "field_presence", nil) + case 2: + b = p.appendSingularField(b, "enum_type", nil) + case 3: + b = p.appendSingularField(b, "repeated_field_encoding", nil) + case 4: + b = p.appendSingularField(b, "utf8_validation", nil) + case 5: + b = p.appendSingularField(b, "message_encoding", nil) + case 6: + b = p.appendSingularField(b, "json_format", nil) + } + return b +} + +func (p *SourcePath) appendUninterpretedOption(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 2: + b = p.appendRepeatedField(b, "name", (*SourcePath).appendUninterpretedOption_NamePart) + case 3: + b = p.appendSingularField(b, "identifier_value", nil) + case 4: + b = p.appendSingularField(b, "positive_int_value", nil) + case 5: + b = p.appendSingularField(b, "negative_int_value", nil) + case 6: + b = p.appendSingularField(b, "double_value", nil) + case 7: + b = p.appendSingularField(b, "string_value", nil) + case 8: + b = p.appendSingularField(b, "aggregate_value", nil) + } + return b +} + +func (p *SourcePath) appendSourceCodeInfo_Location(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendRepeatedField(b, "path", nil) + case 2: + b = p.appendRepeatedField(b, "span", nil) + case 3: + b = p.appendSingularField(b, "leading_comments", nil) + case 4: + b = p.appendSingularField(b, "trailing_comments", nil) + case 6: + b = p.appendRepeatedField(b, "leading_detached_comments", nil) + } + return b +} + +func (p *SourcePath) appendExtensionRangeOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + case 2: + b = p.appendRepeatedField(b, "declaration", (*SourcePath).appendExtensionRangeOptions_Declaration) + case 50: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) + case 3: + b = p.appendSingularField(b, "verification", nil) + } + return b +} + +func (p *SourcePath) appendOneofOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendEnumValueOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "deprecated", nil) + case 2: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) + case 3: + b = p.appendSingularField(b, "debug_redact", nil) + case 4: + b = p.appendSingularField(b, "feature_support", (*SourcePath).appendFieldOptions_FeatureSupport) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendMethodOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 33: + b = p.appendSingularField(b, "deprecated", nil) + case 34: + b = p.appendSingularField(b, "idempotency_level", nil) + case 35: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendFieldOptions_EditionDefault(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 3: + b = p.appendSingularField(b, "edition", nil) + case 2: + b = p.appendSingularField(b, "value", nil) + } + return b +} + +func (p *SourcePath) appendFieldOptions_FeatureSupport(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "edition_introduced", nil) + case 2: + b = p.appendSingularField(b, "edition_deprecated", nil) + case 3: + b = p.appendSingularField(b, "deprecation_warning", nil) + case 4: + b = p.appendSingularField(b, "edition_removed", nil) + } + return b +} + +func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name_part", nil) + case 2: + b = p.appendSingularField(b, "is_extension", nil) + } + return b +} + +func (p *SourcePath) appendExtensionRangeOptions_Declaration(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "number", nil) + case 2: + b = p.appendSingularField(b, "full_name", nil) + case 3: + b = p.appendSingularField(b, "type", nil) + case 5: + b = p.appendSingularField(b, "reserved", nil) + case 6: + b = p.appendSingularField(b, "repeated", nil) + } + return b +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go new file mode 100644 index 000000000..cd8fadbaf --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -0,0 +1,672 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +// Descriptor provides a set of accessors that are common to every descriptor. +// Each descriptor type wraps the equivalent google.protobuf.XXXDescriptorProto, +// but provides efficient lookup and immutability. +// +// Each descriptor is comparable. Equality implies that the two types are +// exactly identical. However, it is possible for the same semantically +// identical proto type to be represented by multiple type descriptors. +// +// For example, suppose we have t1 and t2 which are both an [MessageDescriptor]. +// If t1 == t2, then the types are definitely equal and all accessors return +// the same information. However, if t1 != t2, then it is still possible that +// they still represent the same proto type (e.g., t1.FullName == t2.FullName). +// This can occur if a descriptor type is created dynamically, or multiple +// versions of the same proto type are accidentally linked into the Go binary. +type Descriptor interface { + // ParentFile returns the parent file descriptor that this descriptor + // is declared within. The parent file for the file descriptor is itself. + // + // Support for this functionality is optional and may return nil. + ParentFile() FileDescriptor + + // Parent returns the parent containing this descriptor declaration. + // The following shows the mapping from child type to possible parent types: + // + // ╔═════════════════════╤═══════════════════════════════════╗ + // ║ Child type │ Possible parent types ║ + // ╠═════════════════════╪═══════════════════════════════════╣ + // ║ FileDescriptor │ nil ║ + // ║ MessageDescriptor │ FileDescriptor, MessageDescriptor ║ + // ║ FieldDescriptor │ FileDescriptor, MessageDescriptor ║ + // ║ OneofDescriptor │ MessageDescriptor ║ + // ║ EnumDescriptor │ FileDescriptor, MessageDescriptor ║ + // ║ EnumValueDescriptor │ EnumDescriptor ║ + // ║ ServiceDescriptor │ FileDescriptor ║ + // ║ MethodDescriptor │ ServiceDescriptor ║ + // ╚═════════════════════╧═══════════════════════════════════╝ + // + // Support for this functionality is optional and may return nil. + Parent() Descriptor + + // Index returns the index of this descriptor within its parent. + // It returns 0 if the descriptor does not have a parent or if the parent + // is unknown. + Index() int + + // Syntax is the protobuf syntax. + Syntax() Syntax // e.g., Proto2 or Proto3 + + // Name is the short name of the declaration (i.e., FullName.Name). + Name() Name // e.g., "Any" + + // FullName is the fully-qualified name of the declaration. + // + // The FullName is a concatenation of the full name of the type that this + // type is declared within and the declaration name. For example, + // field "foo_field" in message "proto.package.MyMessage" is + // uniquely identified as "proto.package.MyMessage.foo_field". + // Enum values are an exception to the rule (see EnumValueDescriptor). + FullName() FullName // e.g., "google.protobuf.Any" + + // IsPlaceholder reports whether type information is missing since a + // dependency is not resolved, in which case only name information is known. + // + // Placeholder types may only be returned by the following accessors + // as a result of unresolved dependencies or weak imports: + // + // ╔═══════════════════════════════════╤═════════════════════╗ + // ║ Accessor │ Descriptor ║ + // ╠═══════════════════════════════════╪═════════════════════╣ + // ║ FileImports.FileDescriptor │ FileDescriptor ║ + // ║ FieldDescriptor.Enum │ EnumDescriptor ║ + // ║ FieldDescriptor.Message │ MessageDescriptor ║ + // ║ FieldDescriptor.DefaultEnumValue │ EnumValueDescriptor ║ + // ║ FieldDescriptor.ContainingMessage │ MessageDescriptor ║ + // ║ MethodDescriptor.Input │ MessageDescriptor ║ + // ║ MethodDescriptor.Output │ MessageDescriptor ║ + // ╚═══════════════════════════════════╧═════════════════════╝ + // + // If true, only Name and FullName are valid. + // For FileDescriptor, the Path is also valid. + IsPlaceholder() bool + + // Options returns the descriptor options. The caller must not modify + // the returned value. + // + // To avoid a dependency cycle, this function returns a proto.Message value. + // The proto message type returned for each descriptor type is as follows: + // ╔═════════════════════╤══════════════════════════════════════════╗ + // ║ Go type │ Protobuf message type ║ + // ╠═════════════════════╪══════════════════════════════════════════╣ + // ║ FileDescriptor │ google.protobuf.FileOptions ║ + // ║ EnumDescriptor │ google.protobuf.EnumOptions ║ + // ║ EnumValueDescriptor │ google.protobuf.EnumValueOptions ║ + // ║ MessageDescriptor │ google.protobuf.MessageOptions ║ + // ║ FieldDescriptor │ google.protobuf.FieldOptions ║ + // ║ OneofDescriptor │ google.protobuf.OneofOptions ║ + // ║ ServiceDescriptor │ google.protobuf.ServiceOptions ║ + // ║ MethodDescriptor │ google.protobuf.MethodOptions ║ + // ╚═════════════════════╧══════════════════════════════════════════╝ + // + // This method returns a typed nil-pointer if no options are present. + // The caller must import the descriptorpb package to use this. + Options() ProtoMessage + + doNotImplement +} + +// FileDescriptor describes the types in a complete proto file and +// corresponds with the google.protobuf.FileDescriptorProto message. +// +// Top-level declarations: +// [EnumDescriptor], [MessageDescriptor], [FieldDescriptor], and/or [ServiceDescriptor]. +type FileDescriptor interface { + Descriptor // Descriptor.FullName is identical to Package + + // Path returns the file name, relative to the source tree root. + Path() string // e.g., "path/to/file.proto" + // Package returns the protobuf package namespace. + Package() FullName // e.g., "google.protobuf" + + // Imports is a list of imported proto files. + Imports() FileImports + + // Enums is a list of the top-level enum declarations. + Enums() EnumDescriptors + // Messages is a list of the top-level message declarations. + Messages() MessageDescriptors + // Extensions is a list of the top-level extension declarations. + Extensions() ExtensionDescriptors + // Services is a list of the top-level service declarations. + Services() ServiceDescriptors + + // SourceLocations is a list of source locations. + SourceLocations() SourceLocations + + isFileDescriptor +} +type isFileDescriptor interface{ ProtoType(FileDescriptor) } + +// FileImports is a list of file imports. +type FileImports interface { + // Len reports the number of files imported by this proto file. + Len() int + // Get returns the ith FileImport. It panics if out of bounds. + Get(i int) FileImport + + doNotImplement +} + +// FileImport is the declaration for a proto file import. +type FileImport struct { + // FileDescriptor is the file type for the given import. + // It is a placeholder descriptor if IsWeak is set or if a dependency has + // not been regenerated to implement the new reflection APIs. + FileDescriptor + + // IsPublic reports whether this is a public import, which causes this file + // to alias declarations within the imported file. The intended use cases + // for this feature is the ability to move proto files without breaking + // existing dependencies. + // + // The current file and the imported file must be within proto package. + IsPublic bool + + // IsWeak reports whether this is a weak import, which does not impose + // a direct dependency on the target file. + // + // Weak imports are a legacy proto1 feature. Equivalent behavior is + // achieved using proto2 extension fields or proto3 Any messages. + IsWeak bool +} + +// MessageDescriptor describes a message and +// corresponds with the google.protobuf.DescriptorProto message. +// +// Nested declarations: +// [FieldDescriptor], [OneofDescriptor], [FieldDescriptor], [EnumDescriptor], +// and/or [MessageDescriptor]. +type MessageDescriptor interface { + Descriptor + + // IsMapEntry indicates that this is an auto-generated message type to + // represent the entry type for a map field. + // + // Map entry messages have only two fields: + // • a "key" field with a field number of 1 + // • a "value" field with a field number of 2 + // The key and value types are determined by these two fields. + // + // If IsMapEntry is true, it implies that FieldDescriptor.IsMap is true + // for some field with this message type. + IsMapEntry() bool + + // Fields is a list of nested field declarations. + Fields() FieldDescriptors + // Oneofs is a list of nested oneof declarations. + Oneofs() OneofDescriptors + + // ReservedNames is a list of reserved field names. + ReservedNames() Names + // ReservedRanges is a list of reserved ranges of field numbers. + ReservedRanges() FieldRanges + // RequiredNumbers is a list of required field numbers. + // In Proto3, it is always an empty list. + RequiredNumbers() FieldNumbers + // ExtensionRanges is the field ranges used for extension fields. + // In Proto3, it is always an empty ranges. + ExtensionRanges() FieldRanges + // ExtensionRangeOptions returns the ith extension range options. + // + // To avoid a dependency cycle, this method returns a proto.Message] value, + // which always contains a google.protobuf.ExtensionRangeOptions message. + // This method returns a typed nil-pointer if no options are present. + // The caller must import the descriptorpb package to use this. + ExtensionRangeOptions(i int) ProtoMessage + + // Enums is a list of nested enum declarations. + Enums() EnumDescriptors + // Messages is a list of nested message declarations. + Messages() MessageDescriptors + // Extensions is a list of nested extension declarations. + Extensions() ExtensionDescriptors + + isMessageDescriptor +} +type isMessageDescriptor interface{ ProtoType(MessageDescriptor) } + +// MessageType encapsulates a [MessageDescriptor] with a concrete Go implementation. +// It is recommended that implementations of this interface also implement the +// [MessageFieldTypes] interface. +type MessageType interface { + // New returns a newly allocated empty message. + // It may return nil for synthetic messages representing a map entry. + New() Message + + // Zero returns an empty, read-only message. + // It may return nil for synthetic messages representing a map entry. + Zero() Message + + // Descriptor returns the message descriptor. + // + // Invariant: t.Descriptor() == t.New().Descriptor() + Descriptor() MessageDescriptor +} + +// MessageFieldTypes extends a [MessageType] by providing type information +// regarding enums and messages referenced by the message fields. +type MessageFieldTypes interface { + MessageType + + // Enum returns the EnumType for the ith field in MessageDescriptor.Fields. + // It returns nil if the ith field is not an enum kind. + // It panics if out of bounds. + // + // Invariant: mt.Enum(i).Descriptor() == mt.Descriptor().Fields(i).Enum() + Enum(i int) EnumType + + // Message returns the MessageType for the ith field in MessageDescriptor.Fields. + // It returns nil if the ith field is not a message or group kind. + // It panics if out of bounds. + // + // Invariant: mt.Message(i).Descriptor() == mt.Descriptor().Fields(i).Message() + Message(i int) MessageType +} + +// MessageDescriptors is a list of message declarations. +type MessageDescriptors interface { + // Len reports the number of messages. + Len() int + // Get returns the ith MessageDescriptor. It panics if out of bounds. + Get(i int) MessageDescriptor + // ByName returns the MessageDescriptor for a message named s. + // It returns nil if not found. + ByName(s Name) MessageDescriptor + + doNotImplement +} + +// FieldDescriptor describes a field within a message and +// corresponds with the google.protobuf.FieldDescriptorProto message. +// +// It is used for both normal fields defined within the parent message +// (e.g., [MessageDescriptor.Fields]) and fields that extend some remote message +// (e.g., [FileDescriptor.Extensions] or [MessageDescriptor.Extensions]). +type FieldDescriptor interface { + Descriptor + + // Number reports the unique number for this field. + Number() FieldNumber + // Cardinality reports the cardinality for this field. + Cardinality() Cardinality + // Kind reports the basic kind for this field. + Kind() Kind + + // HasJSONName reports whether this field has an explicitly set JSON name. + HasJSONName() bool + + // JSONName reports the name used for JSON serialization. + // It is usually the camel-cased form of the field name. + // Extension fields are represented by the full name surrounded by brackets. + JSONName() string + + // TextName reports the name used for text serialization. + // It is usually the name of the field, except that groups use the name + // of the inlined message, and extension fields are represented by the + // full name surrounded by brackets. + TextName() string + + // HasPresence reports whether the field distinguishes between unpopulated + // and default values. + HasPresence() bool + + // IsExtension reports whether this is an extension field. If false, + // then Parent and ContainingMessage refer to the same message. + // Otherwise, ContainingMessage and Parent likely differ. + IsExtension() bool + + // HasOptionalKeyword reports whether the "optional" keyword was explicitly + // specified in the source .proto file. + HasOptionalKeyword() bool + + // IsWeak reports whether this is a weak field, which does not impose a + // direct dependency on the target type. + // If true, then Message returns a placeholder type. + IsWeak() bool + + // IsPacked reports whether repeated primitive numeric kinds should be + // serialized using a packed encoding. + // If true, then it implies Cardinality is Repeated. + IsPacked() bool + + // IsList reports whether this field represents a list, + // where the value type for the associated field is a List. + // It is equivalent to checking whether Cardinality is Repeated and + // that IsMap reports false. + IsList() bool + + // IsMap reports whether this field represents a map, + // where the value type for the associated field is a Map. + // It is equivalent to checking whether Cardinality is Repeated, + // that the Kind is MessageKind, and that MessageDescriptor.IsMapEntry reports true. + IsMap() bool + + // MapKey returns the field descriptor for the key in the map entry. + // It returns nil if IsMap reports false. + MapKey() FieldDescriptor + + // MapValue returns the field descriptor for the value in the map entry. + // It returns nil if IsMap reports false. + MapValue() FieldDescriptor + + // HasDefault reports whether this field has a default value. + HasDefault() bool + + // Default returns the default value for scalar fields. + // For proto2, it is the default value as specified in the proto file, + // or the zero value if unspecified. + // For proto3, it is always the zero value of the scalar. + // The Value type is determined by the Kind. + Default() Value + + // DefaultEnumValue returns the enum value descriptor for the default value + // of an enum field, and is nil for any other kind of field. + DefaultEnumValue() EnumValueDescriptor + + // ContainingOneof is the containing oneof that this field belongs to, + // and is nil if this field is not part of a oneof. + ContainingOneof() OneofDescriptor + + // ContainingMessage is the containing message that this field belongs to. + // For extension fields, this may not necessarily be the parent message + // that the field is declared within. + ContainingMessage() MessageDescriptor + + // Enum is the enum descriptor if Kind is EnumKind. + // It returns nil for any other Kind. + Enum() EnumDescriptor + + // Message is the message descriptor if Kind is + // MessageKind or GroupKind. It returns nil for any other Kind. + Message() MessageDescriptor + + isFieldDescriptor +} +type isFieldDescriptor interface{ ProtoType(FieldDescriptor) } + +// FieldDescriptors is a list of field declarations. +type FieldDescriptors interface { + // Len reports the number of fields. + Len() int + // Get returns the ith FieldDescriptor. It panics if out of bounds. + Get(i int) FieldDescriptor + // ByName returns the FieldDescriptor for a field named s. + // It returns nil if not found. + ByName(s Name) FieldDescriptor + // ByJSONName returns the FieldDescriptor for a field with s as the JSON name. + // It returns nil if not found. + ByJSONName(s string) FieldDescriptor + // ByTextName returns the FieldDescriptor for a field with s as the text name. + // It returns nil if not found. + ByTextName(s string) FieldDescriptor + // ByNumber returns the FieldDescriptor for a field numbered n. + // It returns nil if not found. + ByNumber(n FieldNumber) FieldDescriptor + + doNotImplement +} + +// OneofDescriptor describes a oneof field set within a given message and +// corresponds with the google.protobuf.OneofDescriptorProto message. +type OneofDescriptor interface { + Descriptor + + // IsSynthetic reports whether this is a synthetic oneof created to support + // proto3 optional semantics. If true, Fields contains exactly one field + // with FieldDescriptor.HasOptionalKeyword specified. + IsSynthetic() bool + + // Fields is a list of fields belonging to this oneof. + Fields() FieldDescriptors + + isOneofDescriptor +} +type isOneofDescriptor interface{ ProtoType(OneofDescriptor) } + +// OneofDescriptors is a list of oneof declarations. +type OneofDescriptors interface { + // Len reports the number of oneof fields. + Len() int + // Get returns the ith OneofDescriptor. It panics if out of bounds. + Get(i int) OneofDescriptor + // ByName returns the OneofDescriptor for a oneof named s. + // It returns nil if not found. + ByName(s Name) OneofDescriptor + + doNotImplement +} + +// ExtensionDescriptor is an alias of [FieldDescriptor] for documentation. +type ExtensionDescriptor = FieldDescriptor + +// ExtensionTypeDescriptor is an [ExtensionDescriptor] with an associated [ExtensionType]. +type ExtensionTypeDescriptor interface { + ExtensionDescriptor + + // Type returns the associated ExtensionType. + Type() ExtensionType + + // Descriptor returns the plain ExtensionDescriptor without the + // associated ExtensionType. + Descriptor() ExtensionDescriptor +} + +// ExtensionDescriptors is a list of field declarations. +type ExtensionDescriptors interface { + // Len reports the number of fields. + Len() int + // Get returns the ith ExtensionDescriptor. It panics if out of bounds. + Get(i int) ExtensionDescriptor + // ByName returns the ExtensionDescriptor for a field named s. + // It returns nil if not found. + ByName(s Name) ExtensionDescriptor + + doNotImplement +} + +// ExtensionType encapsulates an [ExtensionDescriptor] with a concrete +// Go implementation. The nested field descriptor must be for a extension field. +// +// While a normal field is a member of the parent message that it is declared +// within (see [Descriptor.Parent]), an extension field is a member of some other +// target message (see [FieldDescriptor.ContainingMessage]) and may have no +// relationship with the parent. However, the full name of an extension field is +// relative to the parent that it is declared within. +// +// For example: +// +// syntax = "proto2"; +// package example; +// message FooMessage { +// extensions 100 to max; +// } +// message BarMessage { +// extends FooMessage { optional BarMessage bar_field = 100; } +// } +// +// Field "bar_field" is an extension of FooMessage, but its full name is +// "example.BarMessage.bar_field" instead of "example.FooMessage.bar_field". +type ExtensionType interface { + // New returns a new value for the field. + // For scalars, this returns the default value in native Go form. + New() Value + + // Zero returns a new value for the field. + // For scalars, this returns the default value in native Go form. + // For composite types, this returns an empty, read-only message, list, or map. + Zero() Value + + // TypeDescriptor returns the extension type descriptor. + TypeDescriptor() ExtensionTypeDescriptor + + // ValueOf wraps the input and returns it as a Value. + // ValueOf panics if the input value is invalid or not the appropriate type. + // + // ValueOf is more extensive than protoreflect.ValueOf for a given field's + // value as it has more type information available. + ValueOf(any) Value + + // InterfaceOf completely unwraps the Value to the underlying Go type. + // InterfaceOf panics if the input is nil or does not represent the + // appropriate underlying Go type. For composite types, it panics if the + // value is not mutable. + // + // InterfaceOf is able to unwrap the Value further than Value.Interface + // as it has more type information available. + InterfaceOf(Value) any + + // IsValidValue reports whether the Value is valid to assign to the field. + IsValidValue(Value) bool + + // IsValidInterface reports whether the input is valid to assign to the field. + IsValidInterface(any) bool +} + +// EnumDescriptor describes an enum and +// corresponds with the google.protobuf.EnumDescriptorProto message. +// +// Nested declarations: +// [EnumValueDescriptor]. +type EnumDescriptor interface { + Descriptor + + // Values is a list of nested enum value declarations. + Values() EnumValueDescriptors + + // ReservedNames is a list of reserved enum names. + ReservedNames() Names + // ReservedRanges is a list of reserved ranges of enum numbers. + ReservedRanges() EnumRanges + + // IsClosed reports whether this enum uses closed semantics. + // See https://protobuf.dev/programming-guides/enum/#definitions. + // Note: the Go protobuf implementation is not spec compliant and treats + // all enums as open enums. + IsClosed() bool + + isEnumDescriptor +} +type isEnumDescriptor interface{ ProtoType(EnumDescriptor) } + +// EnumType encapsulates an [EnumDescriptor] with a concrete Go implementation. +type EnumType interface { + // New returns an instance of this enum type with its value set to n. + New(n EnumNumber) Enum + + // Descriptor returns the enum descriptor. + // + // Invariant: t.Descriptor() == t.New(0).Descriptor() + Descriptor() EnumDescriptor +} + +// EnumDescriptors is a list of enum declarations. +type EnumDescriptors interface { + // Len reports the number of enum types. + Len() int + // Get returns the ith EnumDescriptor. It panics if out of bounds. + Get(i int) EnumDescriptor + // ByName returns the EnumDescriptor for an enum named s. + // It returns nil if not found. + ByName(s Name) EnumDescriptor + + doNotImplement +} + +// EnumValueDescriptor describes an enum value and +// corresponds with the google.protobuf.EnumValueDescriptorProto message. +// +// All other proto declarations are in the namespace of the parent. +// However, enum values do not follow this rule and are within the namespace +// of the parent's parent (i.e., they are a sibling of the containing enum). +// Thus, a value named "FOO_VALUE" declared within an enum uniquely identified +// as "proto.package.MyEnum" has a full name of "proto.package.FOO_VALUE". +type EnumValueDescriptor interface { + Descriptor + + // Number returns the enum value as an integer. + Number() EnumNumber + + isEnumValueDescriptor +} +type isEnumValueDescriptor interface{ ProtoType(EnumValueDescriptor) } + +// EnumValueDescriptors is a list of enum value declarations. +type EnumValueDescriptors interface { + // Len reports the number of enum values. + Len() int + // Get returns the ith EnumValueDescriptor. It panics if out of bounds. + Get(i int) EnumValueDescriptor + // ByName returns the EnumValueDescriptor for the enum value named s. + // It returns nil if not found. + ByName(s Name) EnumValueDescriptor + // ByNumber returns the EnumValueDescriptor for the enum value numbered n. + // If multiple have the same number, the first one defined is returned + // It returns nil if not found. + ByNumber(n EnumNumber) EnumValueDescriptor + + doNotImplement +} + +// ServiceDescriptor describes a service and +// corresponds with the google.protobuf.ServiceDescriptorProto message. +// +// Nested declarations: [MethodDescriptor]. +type ServiceDescriptor interface { + Descriptor + + // Methods is a list of nested message declarations. + Methods() MethodDescriptors + + isServiceDescriptor +} +type isServiceDescriptor interface{ ProtoType(ServiceDescriptor) } + +// ServiceDescriptors is a list of service declarations. +type ServiceDescriptors interface { + // Len reports the number of services. + Len() int + // Get returns the ith ServiceDescriptor. It panics if out of bounds. + Get(i int) ServiceDescriptor + // ByName returns the ServiceDescriptor for a service named s. + // It returns nil if not found. + ByName(s Name) ServiceDescriptor + + doNotImplement +} + +// MethodDescriptor describes a method and +// corresponds with the google.protobuf.MethodDescriptorProto message. +type MethodDescriptor interface { + Descriptor + + // Input is the input message descriptor. + Input() MessageDescriptor + // Output is the output message descriptor. + Output() MessageDescriptor + // IsStreamingClient reports whether the client streams multiple messages. + IsStreamingClient() bool + // IsStreamingServer reports whether the server streams multiple messages. + IsStreamingServer() bool + + isMethodDescriptor +} +type isMethodDescriptor interface{ ProtoType(MethodDescriptor) } + +// MethodDescriptors is a list of method declarations. +type MethodDescriptors interface { + // Len reports the number of methods. + Len() int + // Get returns the ith MethodDescriptor. It panics if out of bounds. + Get(i int) MethodDescriptor + // ByName returns the MethodDescriptor for a service method named s. + // It returns nil if not found. + ByName(s Name) MethodDescriptor + + doNotImplement +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go new file mode 100644 index 000000000..a7b0d06ff --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go @@ -0,0 +1,285 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +import "google.golang.org/protobuf/encoding/protowire" + +// Enum is a reflection interface for a concrete enum value, +// which provides type information and a getter for the enum number. +// Enum does not provide a mutable API since enums are commonly backed by +// Go constants, which are not addressable. +type Enum interface { + // Descriptor returns enum descriptor, which contains only the protobuf + // type information for the enum. + Descriptor() EnumDescriptor + + // Type returns the enum type, which encapsulates both Go and protobuf + // type information. If the Go type information is not needed, + // it is recommended that the enum descriptor be used instead. + Type() EnumType + + // Number returns the enum value as an integer. + Number() EnumNumber +} + +// Message is a reflective interface for a concrete message value, +// encapsulating both type and value information for the message. +// +// Accessor/mutators for individual fields are keyed by [FieldDescriptor]. +// For non-extension fields, the descriptor must exactly match the +// field known by the parent message. +// For extension fields, the descriptor must implement [ExtensionTypeDescriptor], +// extend the parent message (i.e., have the same message [FullName]), and +// be within the parent's extension range. +// +// Each field [Value] can be a scalar or a composite type ([Message], [List], or [Map]). +// See [Value] for the Go types associated with a [FieldDescriptor]. +// Providing a [Value] that is invalid or of an incorrect type panics. +type Message interface { + // Descriptor returns message descriptor, which contains only the protobuf + // type information for the message. + Descriptor() MessageDescriptor + + // Type returns the message type, which encapsulates both Go and protobuf + // type information. If the Go type information is not needed, + // it is recommended that the message descriptor be used instead. + Type() MessageType + + // New returns a newly allocated and mutable empty message. + New() Message + + // Interface unwraps the message reflection interface and + // returns the underlying ProtoMessage interface. + Interface() ProtoMessage + + // Range iterates over every populated field in an undefined order, + // calling f for each field descriptor and value encountered. + // Range returns immediately if f returns false. + // While iterating, mutating operations may only be performed + // on the current field descriptor. + Range(f func(FieldDescriptor, Value) bool) + + // Has reports whether a field is populated. + // + // Some fields have the property of nullability where it is possible to + // distinguish between the default value of a field and whether the field + // was explicitly populated with the default value. Singular message fields, + // member fields of a oneof, and proto2 scalar fields are nullable. Such + // fields are populated only if explicitly set. + // + // In other cases (aside from the nullable cases above), + // a proto3 scalar field is populated if it contains a non-zero value, and + // a repeated field is populated if it is non-empty. + Has(FieldDescriptor) bool + + // Clear clears the field such that a subsequent Has call reports false. + // + // Clearing an extension field clears both the extension type and value + // associated with the given field number. + // + // Clear is a mutating operation and unsafe for concurrent use. + Clear(FieldDescriptor) + + // Get retrieves the value for a field. + // + // For unpopulated scalars, it returns the default value, where + // the default value of a bytes scalar is guaranteed to be a copy. + // For unpopulated composite types, it returns an empty, read-only view + // of the value; to obtain a mutable reference, use Mutable. + Get(FieldDescriptor) Value + + // Set stores the value for a field. + // + // For a field belonging to a oneof, it implicitly clears any other field + // that may be currently set within the same oneof. + // For extension fields, it implicitly stores the provided ExtensionType. + // When setting a composite type, it is unspecified whether the stored value + // aliases the source's memory in any way. If the composite value is an + // empty, read-only value, then it panics. + // + // Set is a mutating operation and unsafe for concurrent use. + Set(FieldDescriptor, Value) + + // Mutable returns a mutable reference to a composite type. + // + // If the field is unpopulated, it may allocate a composite value. + // For a field belonging to a oneof, it implicitly clears any other field + // that may be currently set within the same oneof. + // For extension fields, it implicitly stores the provided ExtensionType + // if not already stored. + // It panics if the field does not contain a composite type. + // + // Mutable is a mutating operation and unsafe for concurrent use. + Mutable(FieldDescriptor) Value + + // NewField returns a new value that is assignable to the field + // for the given descriptor. For scalars, this returns the default value. + // For lists, maps, and messages, this returns a new, empty, mutable value. + NewField(FieldDescriptor) Value + + // WhichOneof reports which field within the oneof is populated, + // returning nil if none are populated. + // It panics if the oneof descriptor does not belong to this message. + WhichOneof(OneofDescriptor) FieldDescriptor + + // GetUnknown retrieves the entire list of unknown fields. + // The caller may only mutate the contents of the RawFields + // if the mutated bytes are stored back into the message with SetUnknown. + GetUnknown() RawFields + + // SetUnknown stores an entire list of unknown fields. + // The raw fields must be syntactically valid according to the wire format. + // An implementation may panic if this is not the case. + // Once stored, the caller must not mutate the content of the RawFields. + // An empty RawFields may be passed to clear the fields. + // + // SetUnknown is a mutating operation and unsafe for concurrent use. + SetUnknown(RawFields) + + // IsValid reports whether the message is valid. + // + // An invalid message is an empty, read-only value. + // + // An invalid message often corresponds to a nil pointer of the concrete + // message type, but the details are implementation dependent. + // Validity is not part of the protobuf data model, and may not + // be preserved in marshaling or other operations. + IsValid() bool + + // ProtoMethods returns optional fast-path implementations of various operations. + // This method may return nil. + // + // The returned methods type is identical to + // google.golang.org/protobuf/runtime/protoiface.Methods. + // Consult the protoiface package documentation for details. + ProtoMethods() *methods +} + +// RawFields is the raw bytes for an ordered sequence of fields. +// Each field contains both the tag (representing field number and wire type), +// and also the wire data itself. +type RawFields []byte + +// IsValid reports whether b is syntactically correct wire format. +func (b RawFields) IsValid() bool { + for len(b) > 0 { + _, _, n := protowire.ConsumeField(b) + if n < 0 { + return false + } + b = b[n:] + } + return true +} + +// List is a zero-indexed, ordered list. +// The element [Value] type is determined by [FieldDescriptor.Kind]. +// Providing a [Value] that is invalid or of an incorrect type panics. +type List interface { + // Len reports the number of entries in the List. + // Get, Set, and Truncate panic with out of bound indexes. + Len() int + + // Get retrieves the value at the given index. + // It never returns an invalid value. + Get(int) Value + + // Set stores a value for the given index. + // When setting a composite type, it is unspecified whether the set + // value aliases the source's memory in any way. + // + // Set is a mutating operation and unsafe for concurrent use. + Set(int, Value) + + // Append appends the provided value to the end of the list. + // When appending a composite type, it is unspecified whether the appended + // value aliases the source's memory in any way. + // + // Append is a mutating operation and unsafe for concurrent use. + Append(Value) + + // AppendMutable appends a new, empty, mutable message value to the end + // of the list and returns it. + // It panics if the list does not contain a message type. + AppendMutable() Value + + // Truncate truncates the list to a smaller length. + // + // Truncate is a mutating operation and unsafe for concurrent use. + Truncate(int) + + // NewElement returns a new value for a list element. + // For enums, this returns the first enum value. + // For other scalars, this returns the zero value. + // For messages, this returns a new, empty, mutable value. + NewElement() Value + + // IsValid reports whether the list is valid. + // + // An invalid list is an empty, read-only value. + // + // Validity is not part of the protobuf data model, and may not + // be preserved in marshaling or other operations. + IsValid() bool +} + +// Map is an unordered, associative map. +// The entry [MapKey] type is determined by [FieldDescriptor.MapKey].Kind. +// The entry [Value] type is determined by [FieldDescriptor.MapValue].Kind. +// Providing a [MapKey] or [Value] that is invalid or of an incorrect type panics. +type Map interface { + // Len reports the number of elements in the map. + Len() int + + // Range iterates over every map entry in an undefined order, + // calling f for each key and value encountered. + // Range calls f Len times unless f returns false, which stops iteration. + // While iterating, mutating operations may only be performed + // on the current map key. + Range(f func(MapKey, Value) bool) + + // Has reports whether an entry with the given key is in the map. + Has(MapKey) bool + + // Clear clears the entry associated with they given key. + // The operation does nothing if there is no entry associated with the key. + // + // Clear is a mutating operation and unsafe for concurrent use. + Clear(MapKey) + + // Get retrieves the value for an entry with the given key. + // It returns an invalid value for non-existent entries. + Get(MapKey) Value + + // Set stores the value for an entry with the given key. + // It panics when given a key or value that is invalid or the wrong type. + // When setting a composite type, it is unspecified whether the set + // value aliases the source's memory in any way. + // + // Set is a mutating operation and unsafe for concurrent use. + Set(MapKey, Value) + + // Mutable retrieves a mutable reference to the entry for the given key. + // If no entry exists for the key, it creates a new, empty, mutable value + // and stores it as the entry for the key. + // It panics if the map value is not a message. + Mutable(MapKey) Value + + // NewValue returns a new value assignable as a map value. + // For enums, this returns the first enum value. + // For other scalars, this returns the zero value. + // For messages, this returns a new, empty, mutable value. + NewValue() Value + + // IsValid reports whether the map is valid. + // + // An invalid map is an empty, read-only value. + // + // An invalid message often corresponds to a nil Go map value, + // but the details are implementation dependent. + // Validity is not part of the protobuf data model, and may not + // be preserved in marshaling or other operations. + IsValid() bool +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go new file mode 100644 index 000000000..654599d44 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go @@ -0,0 +1,168 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +import ( + "bytes" + "fmt" + "math" + "reflect" + + "google.golang.org/protobuf/encoding/protowire" +) + +// Equal reports whether v1 and v2 are recursively equal. +// +// - Values of different types are always unequal. +// +// - Bytes values are equal if they contain identical bytes. +// Empty bytes (regardless of nil-ness) are considered equal. +// +// - Floating point values are equal if they contain the same value. +// Unlike the == operator, a NaN is equal to another NaN. +// +// - Enums are equal if they contain the same number. +// Since [Value] does not contain an enum descriptor, +// enum values do not consider the type of the enum. +// +// - Other scalar values are equal if they contain the same value. +// +// - [Message] values are equal if they belong to the same message descriptor, +// have the same set of populated known and extension field values, +// and the same set of unknown fields values. +// +// - [List] values are equal if they are the same length and +// each corresponding element is equal. +// +// - [Map] values are equal if they have the same set of keys and +// the corresponding value for each key is equal. +func (v1 Value) Equal(v2 Value) bool { + return equalValue(v1, v2) +} + +func equalValue(x, y Value) bool { + eqType := x.typ == y.typ + switch x.typ { + case nilType: + return eqType + case boolType: + return eqType && x.Bool() == y.Bool() + case int32Type, int64Type: + return eqType && x.Int() == y.Int() + case uint32Type, uint64Type: + return eqType && x.Uint() == y.Uint() + case float32Type, float64Type: + return eqType && equalFloat(x.Float(), y.Float()) + case stringType: + return eqType && x.String() == y.String() + case bytesType: + return eqType && bytes.Equal(x.Bytes(), y.Bytes()) + case enumType: + return eqType && x.Enum() == y.Enum() + default: + switch x := x.Interface().(type) { + case Message: + y, ok := y.Interface().(Message) + return ok && equalMessage(x, y) + case List: + y, ok := y.Interface().(List) + return ok && equalList(x, y) + case Map: + y, ok := y.Interface().(Map) + return ok && equalMap(x, y) + default: + panic(fmt.Sprintf("unknown type: %T", x)) + } + } +} + +// equalFloat compares two floats, where NaNs are treated as equal. +func equalFloat(x, y float64) bool { + if math.IsNaN(x) || math.IsNaN(y) { + return math.IsNaN(x) && math.IsNaN(y) + } + return x == y +} + +// equalMessage compares two messages. +func equalMessage(mx, my Message) bool { + if mx.Descriptor() != my.Descriptor() { + return false + } + + nx := 0 + equal := true + mx.Range(func(fd FieldDescriptor, vx Value) bool { + nx++ + vy := my.Get(fd) + equal = my.Has(fd) && equalValue(vx, vy) + return equal + }) + if !equal { + return false + } + ny := 0 + my.Range(func(fd FieldDescriptor, vx Value) bool { + ny++ + return true + }) + if nx != ny { + return false + } + + return equalUnknown(mx.GetUnknown(), my.GetUnknown()) +} + +// equalList compares two lists. +func equalList(x, y List) bool { + if x.Len() != y.Len() { + return false + } + for i := x.Len() - 1; i >= 0; i-- { + if !equalValue(x.Get(i), y.Get(i)) { + return false + } + } + return true +} + +// equalMap compares two maps. +func equalMap(x, y Map) bool { + if x.Len() != y.Len() { + return false + } + equal := true + x.Range(func(k MapKey, vx Value) bool { + vy := y.Get(k) + equal = y.Has(k) && equalValue(vx, vy) + return equal + }) + return equal +} + +// equalUnknown compares unknown fields by direct comparison on the raw bytes +// of each individual field number. +func equalUnknown(x, y RawFields) bool { + if len(x) != len(y) { + return false + } + if bytes.Equal([]byte(x), []byte(y)) { + return true + } + + mx := make(map[FieldNumber]RawFields) + my := make(map[FieldNumber]RawFields) + for len(x) > 0 { + fnum, _, n := protowire.ConsumeField(x) + mx[fnum] = append(mx[fnum], x[:n]...) + x = x[n:] + } + for len(y) > 0 { + fnum, _, n := protowire.ConsumeField(y) + my[fnum] = append(my[fnum], y[:n]...) + y = y[n:] + } + return reflect.DeepEqual(mx, my) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go new file mode 100644 index 000000000..75f83a2af --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go @@ -0,0 +1,60 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build purego || appengine +// +build purego appengine + +package protoreflect + +import "google.golang.org/protobuf/internal/pragma" + +type valueType int + +const ( + nilType valueType = iota + boolType + int32Type + int64Type + uint32Type + uint64Type + float32Type + float64Type + stringType + bytesType + enumType + ifaceType +) + +// value is a union where only one type can be represented at a time. +// This uses a distinct field for each type. This is type safe in Go, but +// occupies more memory than necessary (72B). +type value struct { + pragma.DoNotCompare // 0B + + typ valueType // 8B + num uint64 // 8B + str string // 16B + bin []byte // 24B + iface any // 16B +} + +func valueOfString(v string) Value { + return Value{typ: stringType, str: v} +} +func valueOfBytes(v []byte) Value { + return Value{typ: bytesType, bin: v} +} +func valueOfIface(v any) Value { + return Value{typ: ifaceType, iface: v} +} + +func (v Value) getString() string { + return v.str +} +func (v Value) getBytes() []byte { + return v.bin +} +func (v Value) getIface() any { + return v.iface +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go new file mode 100644 index 000000000..9fe83cef5 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -0,0 +1,438 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +import ( + "fmt" + "math" +) + +// Value is a union where only one Go type may be set at a time. +// The Value is used to represent all possible values a field may take. +// The following shows which Go type is used to represent each proto [Kind]: +// +// ╔════════════╤═════════════════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠════════════╪═════════════════════════════════════╣ +// ║ bool │ BoolKind ║ +// ║ int32 │ Int32Kind, Sint32Kind, Sfixed32Kind ║ +// ║ int64 │ Int64Kind, Sint64Kind, Sfixed64Kind ║ +// ║ uint32 │ Uint32Kind, Fixed32Kind ║ +// ║ uint64 │ Uint64Kind, Fixed64Kind ║ +// ║ float32 │ FloatKind ║ +// ║ float64 │ DoubleKind ║ +// ║ string │ StringKind ║ +// ║ []byte │ BytesKind ║ +// ║ EnumNumber │ EnumKind ║ +// ║ Message │ MessageKind, GroupKind ║ +// ╚════════════╧═════════════════════════════════════╝ +// +// Multiple protobuf Kinds may be represented by a single Go type if the type +// can losslessly represent the information for the proto kind. For example, +// [Int64Kind], [Sint64Kind], and [Sfixed64Kind] are all represented by int64, +// but use different integer encoding methods. +// +// The [List] or [Map] types are used if the field cardinality is repeated. +// A field is a [List] if [FieldDescriptor.IsList] reports true. +// A field is a [Map] if [FieldDescriptor.IsMap] reports true. +// +// Converting to/from a Value and a concrete Go value panics on type mismatch. +// For example, [ValueOf]("hello").Int() panics because this attempts to +// retrieve an int64 from a string. +// +// [List], [Map], and [Message] Values are called "composite" values. +// +// A composite Value may alias (reference) memory at some location, +// such that changes to the Value updates the that location. +// A composite value acquired with a Mutable method, such as [Message.Mutable], +// always references the source object. +// +// For example: +// +// // Append a 0 to a "repeated int32" field. +// // Since the Value returned by Mutable is guaranteed to alias +// // the source message, modifying the Value modifies the message. +// message.Mutable(fieldDesc).List().Append(protoreflect.ValueOfInt32(0)) +// +// // Assign [0] to a "repeated int32" field by creating a new Value, +// // modifying it, and assigning it. +// list := message.NewField(fieldDesc).List() +// list.Append(protoreflect.ValueOfInt32(0)) +// message.Set(fieldDesc, list) +// // ERROR: Since it is not defined whether Set aliases the source, +// // appending to the List here may or may not modify the message. +// list.Append(protoreflect.ValueOfInt32(0)) +// +// Some operations, such as [Message.Get], may return an "empty, read-only" +// composite Value. Modifying an empty, read-only value panics. +type Value value + +// The protoreflect API uses a custom Value union type instead of any +// to keep the future open for performance optimizations. Using an any +// always incurs an allocation for primitives (e.g., int64) since it needs to +// be boxed on the heap (as interfaces can only contain pointers natively). +// Instead, we represent the Value union as a flat struct that internally keeps +// track of which type is set. Using unsafe, the Value union can be reduced +// down to 24B, which is identical in size to a slice. +// +// The latest compiler (Go1.11) currently suffers from some limitations: +// • With inlining, the compiler should be able to statically prove that +// only one of these switch cases are taken and inline one specific case. +// See https://golang.org/issue/22310. + +// ValueOf returns a Value initialized with the concrete value stored in v. +// This panics if the type does not match one of the allowed types in the +// Value union. +func ValueOf(v any) Value { + switch v := v.(type) { + case nil: + return Value{} + case bool: + return ValueOfBool(v) + case int32: + return ValueOfInt32(v) + case int64: + return ValueOfInt64(v) + case uint32: + return ValueOfUint32(v) + case uint64: + return ValueOfUint64(v) + case float32: + return ValueOfFloat32(v) + case float64: + return ValueOfFloat64(v) + case string: + return ValueOfString(v) + case []byte: + return ValueOfBytes(v) + case EnumNumber: + return ValueOfEnum(v) + case Message, List, Map: + return valueOfIface(v) + case ProtoMessage: + panic(fmt.Sprintf("invalid proto.Message(%T) type, expected a protoreflect.Message type", v)) + default: + panic(fmt.Sprintf("invalid type: %T", v)) + } +} + +// ValueOfBool returns a new boolean value. +func ValueOfBool(v bool) Value { + if v { + return Value{typ: boolType, num: 1} + } else { + return Value{typ: boolType, num: 0} + } +} + +// ValueOfInt32 returns a new int32 value. +func ValueOfInt32(v int32) Value { + return Value{typ: int32Type, num: uint64(v)} +} + +// ValueOfInt64 returns a new int64 value. +func ValueOfInt64(v int64) Value { + return Value{typ: int64Type, num: uint64(v)} +} + +// ValueOfUint32 returns a new uint32 value. +func ValueOfUint32(v uint32) Value { + return Value{typ: uint32Type, num: uint64(v)} +} + +// ValueOfUint64 returns a new uint64 value. +func ValueOfUint64(v uint64) Value { + return Value{typ: uint64Type, num: v} +} + +// ValueOfFloat32 returns a new float32 value. +func ValueOfFloat32(v float32) Value { + return Value{typ: float32Type, num: uint64(math.Float64bits(float64(v)))} +} + +// ValueOfFloat64 returns a new float64 value. +func ValueOfFloat64(v float64) Value { + return Value{typ: float64Type, num: uint64(math.Float64bits(float64(v)))} +} + +// ValueOfString returns a new string value. +func ValueOfString(v string) Value { + return valueOfString(v) +} + +// ValueOfBytes returns a new bytes value. +func ValueOfBytes(v []byte) Value { + return valueOfBytes(v[:len(v):len(v)]) +} + +// ValueOfEnum returns a new enum value. +func ValueOfEnum(v EnumNumber) Value { + return Value{typ: enumType, num: uint64(v)} +} + +// ValueOfMessage returns a new Message value. +func ValueOfMessage(v Message) Value { + return valueOfIface(v) +} + +// ValueOfList returns a new List value. +func ValueOfList(v List) Value { + return valueOfIface(v) +} + +// ValueOfMap returns a new Map value. +func ValueOfMap(v Map) Value { + return valueOfIface(v) +} + +// IsValid reports whether v is populated with a value. +func (v Value) IsValid() bool { + return v.typ != nilType +} + +// Interface returns v as an any. +// +// Invariant: v == ValueOf(v).Interface() +func (v Value) Interface() any { + switch v.typ { + case nilType: + return nil + case boolType: + return v.Bool() + case int32Type: + return int32(v.Int()) + case int64Type: + return int64(v.Int()) + case uint32Type: + return uint32(v.Uint()) + case uint64Type: + return uint64(v.Uint()) + case float32Type: + return float32(v.Float()) + case float64Type: + return float64(v.Float()) + case stringType: + return v.String() + case bytesType: + return v.Bytes() + case enumType: + return v.Enum() + default: + return v.getIface() + } +} + +func (v Value) typeName() string { + switch v.typ { + case nilType: + return "nil" + case boolType: + return "bool" + case int32Type: + return "int32" + case int64Type: + return "int64" + case uint32Type: + return "uint32" + case uint64Type: + return "uint64" + case float32Type: + return "float32" + case float64Type: + return "float64" + case stringType: + return "string" + case bytesType: + return "bytes" + case enumType: + return "enum" + default: + switch v := v.getIface().(type) { + case Message: + return "message" + case List: + return "list" + case Map: + return "map" + default: + return fmt.Sprintf("", v) + } + } +} + +func (v Value) panicMessage(what string) string { + return fmt.Sprintf("type mismatch: cannot convert %v to %s", v.typeName(), what) +} + +// Bool returns v as a bool and panics if the type is not a bool. +func (v Value) Bool() bool { + switch v.typ { + case boolType: + return v.num > 0 + default: + panic(v.panicMessage("bool")) + } +} + +// Int returns v as a int64 and panics if the type is not a int32 or int64. +func (v Value) Int() int64 { + switch v.typ { + case int32Type, int64Type: + return int64(v.num) + default: + panic(v.panicMessage("int")) + } +} + +// Uint returns v as a uint64 and panics if the type is not a uint32 or uint64. +func (v Value) Uint() uint64 { + switch v.typ { + case uint32Type, uint64Type: + return uint64(v.num) + default: + panic(v.panicMessage("uint")) + } +} + +// Float returns v as a float64 and panics if the type is not a float32 or float64. +func (v Value) Float() float64 { + switch v.typ { + case float32Type, float64Type: + return math.Float64frombits(uint64(v.num)) + default: + panic(v.panicMessage("float")) + } +} + +// String returns v as a string. Since this method implements [fmt.Stringer], +// this returns the formatted string value for any non-string type. +func (v Value) String() string { + switch v.typ { + case stringType: + return v.getString() + default: + return fmt.Sprint(v.Interface()) + } +} + +// Bytes returns v as a []byte and panics if the type is not a []byte. +func (v Value) Bytes() []byte { + switch v.typ { + case bytesType: + return v.getBytes() + default: + panic(v.panicMessage("bytes")) + } +} + +// Enum returns v as a [EnumNumber] and panics if the type is not a [EnumNumber]. +func (v Value) Enum() EnumNumber { + switch v.typ { + case enumType: + return EnumNumber(v.num) + default: + panic(v.panicMessage("enum")) + } +} + +// Message returns v as a [Message] and panics if the type is not a [Message]. +func (v Value) Message() Message { + switch vi := v.getIface().(type) { + case Message: + return vi + default: + panic(v.panicMessage("message")) + } +} + +// List returns v as a [List] and panics if the type is not a [List]. +func (v Value) List() List { + switch vi := v.getIface().(type) { + case List: + return vi + default: + panic(v.panicMessage("list")) + } +} + +// Map returns v as a [Map] and panics if the type is not a [Map]. +func (v Value) Map() Map { + switch vi := v.getIface().(type) { + case Map: + return vi + default: + panic(v.panicMessage("map")) + } +} + +// MapKey returns v as a [MapKey] and panics for invalid [MapKey] types. +func (v Value) MapKey() MapKey { + switch v.typ { + case boolType, int32Type, int64Type, uint32Type, uint64Type, stringType: + return MapKey(v) + default: + panic(v.panicMessage("map key")) + } +} + +// MapKey is used to index maps, where the Go type of the MapKey must match +// the specified key [Kind] (see [MessageDescriptor.IsMapEntry]). +// The following shows what Go type is used to represent each proto [Kind]: +// +// ╔═════════╤═════════════════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═════════╪═════════════════════════════════════╣ +// ║ bool │ BoolKind ║ +// ║ int32 │ Int32Kind, Sint32Kind, Sfixed32Kind ║ +// ║ int64 │ Int64Kind, Sint64Kind, Sfixed64Kind ║ +// ║ uint32 │ Uint32Kind, Fixed32Kind ║ +// ║ uint64 │ Uint64Kind, Fixed64Kind ║ +// ║ string │ StringKind ║ +// ╚═════════╧═════════════════════════════════════╝ +// +// A MapKey is constructed and accessed through a [Value]: +// +// k := ValueOf("hash").MapKey() // convert string to MapKey +// s := k.String() // convert MapKey to string +// +// The MapKey is a strict subset of valid types used in [Value]; +// converting a [Value] to a MapKey with an invalid type panics. +type MapKey value + +// IsValid reports whether k is populated with a value. +func (k MapKey) IsValid() bool { + return Value(k).IsValid() +} + +// Interface returns k as an any. +func (k MapKey) Interface() any { + return Value(k).Interface() +} + +// Bool returns k as a bool and panics if the type is not a bool. +func (k MapKey) Bool() bool { + return Value(k).Bool() +} + +// Int returns k as a int64 and panics if the type is not a int32 or int64. +func (k MapKey) Int() int64 { + return Value(k).Int() +} + +// Uint returns k as a uint64 and panics if the type is not a uint32 or uint64. +func (k MapKey) Uint() uint64 { + return Value(k).Uint() +} + +// String returns k as a string. Since this method implements [fmt.Stringer], +// this returns the formatted string value for any non-string type. +func (k MapKey) String() string { + return Value(k).String() +} + +// Value returns k as a [Value]. +func (k MapKey) Value() Value { + return Value(k) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go new file mode 100644 index 000000000..7f3583ead --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -0,0 +1,99 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego && !appengine && !go1.21 +// +build !purego,!appengine,!go1.21 + +package protoreflect + +import ( + "unsafe" + + "google.golang.org/protobuf/internal/pragma" +) + +type ( + stringHeader struct { + Data unsafe.Pointer + Len int + } + sliceHeader struct { + Data unsafe.Pointer + Len int + Cap int + } + ifaceHeader struct { + Type unsafe.Pointer + Data unsafe.Pointer + } +) + +var ( + nilType = typeOf(nil) + boolType = typeOf(*new(bool)) + int32Type = typeOf(*new(int32)) + int64Type = typeOf(*new(int64)) + uint32Type = typeOf(*new(uint32)) + uint64Type = typeOf(*new(uint64)) + float32Type = typeOf(*new(float32)) + float64Type = typeOf(*new(float64)) + stringType = typeOf(*new(string)) + bytesType = typeOf(*new([]byte)) + enumType = typeOf(*new(EnumNumber)) +) + +// typeOf returns a pointer to the Go type information. +// The pointer is comparable and equal if and only if the types are identical. +func typeOf(t any) unsafe.Pointer { + return (*ifaceHeader)(unsafe.Pointer(&t)).Type +} + +// value is a union where only one type can be represented at a time. +// The struct is 24B large on 64-bit systems and requires the minimum storage +// necessary to represent each possible type. +// +// The Go GC needs to be able to scan variables containing pointers. +// As such, pointers and non-pointers cannot be intermixed. +type value struct { + pragma.DoNotCompare // 0B + + // typ stores the type of the value as a pointer to the Go type. + typ unsafe.Pointer // 8B + + // ptr stores the data pointer for a String, Bytes, or interface value. + ptr unsafe.Pointer // 8B + + // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or + // Enum value as a raw uint64. + // + // It is also used to store the length of a String or Bytes value; + // the capacity is ignored. + num uint64 // 8B +} + +func valueOfString(v string) Value { + p := (*stringHeader)(unsafe.Pointer(&v)) + return Value{typ: stringType, ptr: p.Data, num: uint64(len(v))} +} +func valueOfBytes(v []byte) Value { + p := (*sliceHeader)(unsafe.Pointer(&v)) + return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))} +} +func valueOfIface(v any) Value { + p := (*ifaceHeader)(unsafe.Pointer(&v)) + return Value{typ: p.Type, ptr: p.Data} +} + +func (v Value) getString() (x string) { + *(*stringHeader)(unsafe.Pointer(&x)) = stringHeader{Data: v.ptr, Len: int(v.num)} + return x +} +func (v Value) getBytes() (x []byte) { + *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)} + return x +} +func (v Value) getIface() (x any) { + *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} + return x +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go new file mode 100644 index 000000000..f7d386990 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -0,0 +1,87 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego && !appengine && go1.21 +// +build !purego,!appengine,go1.21 + +package protoreflect + +import ( + "unsafe" + + "google.golang.org/protobuf/internal/pragma" +) + +type ( + ifaceHeader struct { + _ [0]any // if interfaces have greater alignment than unsafe.Pointer, this will enforce it. + Type unsafe.Pointer + Data unsafe.Pointer + } +) + +var ( + nilType = typeOf(nil) + boolType = typeOf(*new(bool)) + int32Type = typeOf(*new(int32)) + int64Type = typeOf(*new(int64)) + uint32Type = typeOf(*new(uint32)) + uint64Type = typeOf(*new(uint64)) + float32Type = typeOf(*new(float32)) + float64Type = typeOf(*new(float64)) + stringType = typeOf(*new(string)) + bytesType = typeOf(*new([]byte)) + enumType = typeOf(*new(EnumNumber)) +) + +// typeOf returns a pointer to the Go type information. +// The pointer is comparable and equal if and only if the types are identical. +func typeOf(t any) unsafe.Pointer { + return (*ifaceHeader)(unsafe.Pointer(&t)).Type +} + +// value is a union where only one type can be represented at a time. +// The struct is 24B large on 64-bit systems and requires the minimum storage +// necessary to represent each possible type. +// +// The Go GC needs to be able to scan variables containing pointers. +// As such, pointers and non-pointers cannot be intermixed. +type value struct { + pragma.DoNotCompare // 0B + + // typ stores the type of the value as a pointer to the Go type. + typ unsafe.Pointer // 8B + + // ptr stores the data pointer for a String, Bytes, or interface value. + ptr unsafe.Pointer // 8B + + // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or + // Enum value as a raw uint64. + // + // It is also used to store the length of a String or Bytes value; + // the capacity is ignored. + num uint64 // 8B +} + +func valueOfString(v string) Value { + return Value{typ: stringType, ptr: unsafe.Pointer(unsafe.StringData(v)), num: uint64(len(v))} +} +func valueOfBytes(v []byte) Value { + return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))} +} +func valueOfIface(v any) Value { + p := (*ifaceHeader)(unsafe.Pointer(&v)) + return Value{typ: p.Type, ptr: p.Data} +} + +func (v Value) getString() string { + return unsafe.String((*byte)(v.ptr), v.num) +} +func (v Value) getBytes() []byte { + return unsafe.Slice((*byte)(v.ptr), v.num) +} +func (v Value) getIface() (x any) { + *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} + return x +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go new file mode 100644 index 000000000..de1777339 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -0,0 +1,882 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protoregistry provides data structures to register and lookup +// protobuf descriptor types. +// +// The [Files] registry contains file descriptors and provides the ability +// to iterate over the files or lookup a specific descriptor within the files. +// [Files] only contains protobuf descriptors and has no understanding of Go +// type information that may be associated with each descriptor. +// +// The [Types] registry contains descriptor types for which there is a known +// Go type associated with that descriptor. It provides the ability to iterate +// over the registered types or lookup a type by name. +package protoregistry + +import ( + "fmt" + "os" + "strings" + "sync" + + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// conflictPolicy configures the policy for handling registration conflicts. +// +// It can be over-written at compile time with a linker-initialized variable: +// +// go build -ldflags "-X google.golang.org/protobuf/reflect/protoregistry.conflictPolicy=warn" +// +// It can be over-written at program execution with an environment variable: +// +// GOLANG_PROTOBUF_REGISTRATION_CONFLICT=warn ./main +// +// Neither of the above are covered by the compatibility promise and +// may be removed in a future release of this module. +var conflictPolicy = "panic" // "panic" | "warn" | "ignore" + +// ignoreConflict reports whether to ignore a registration conflict +// given the descriptor being registered and the error. +// It is a variable so that the behavior is easily overridden in another file. +var ignoreConflict = func(d protoreflect.Descriptor, err error) bool { + const env = "GOLANG_PROTOBUF_REGISTRATION_CONFLICT" + const faq = "https://protobuf.dev/reference/go/faq#namespace-conflict" + policy := conflictPolicy + if v := os.Getenv(env); v != "" { + policy = v + } + switch policy { + case "panic": + panic(fmt.Sprintf("%v\nSee %v\n", err, faq)) + case "warn": + fmt.Fprintf(os.Stderr, "WARNING: %v\nSee %v\n\n", err, faq) + return true + case "ignore": + return true + default: + panic("invalid " + env + " value: " + os.Getenv(env)) + } +} + +var globalMutex sync.RWMutex + +// GlobalFiles is a global registry of file descriptors. +var GlobalFiles *Files = new(Files) + +// GlobalTypes is the registry used by default for type lookups +// unless a local registry is provided by the user. +var GlobalTypes *Types = new(Types) + +// NotFound is a sentinel error value to indicate that the type was not found. +// +// Since registry lookup can happen in the critical performance path, resolvers +// must return this exact error value, not an error wrapping it. +var NotFound = errors.New("not found") + +// Files is a registry for looking up or iterating over files and the +// descriptors contained within them. +// The Find and Range methods are safe for concurrent use. +type Files struct { + // The map of descsByName contains: + // EnumDescriptor + // EnumValueDescriptor + // MessageDescriptor + // ExtensionDescriptor + // ServiceDescriptor + // *packageDescriptor + // + // Note that files are stored as a slice, since a package may contain + // multiple files. Only top-level declarations are registered. + // Note that enum values are in the top-level since that are in the same + // scope as the parent enum. + descsByName map[protoreflect.FullName]any + filesByPath map[string][]protoreflect.FileDescriptor + numFiles int +} + +type packageDescriptor struct { + files []protoreflect.FileDescriptor +} + +// RegisterFile registers the provided file descriptor. +// +// If any descriptor within the file conflicts with the descriptor of any +// previously registered file (e.g., two enums with the same full name), +// then the file is not registered and an error is returned. +// +// It is permitted for multiple files to have the same file path. +func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { + if r == GlobalFiles { + globalMutex.Lock() + defer globalMutex.Unlock() + } + if r.descsByName == nil { + r.descsByName = map[protoreflect.FullName]any{ + "": &packageDescriptor{}, + } + r.filesByPath = make(map[string][]protoreflect.FileDescriptor) + } + path := file.Path() + if prev := r.filesByPath[path]; len(prev) > 0 { + r.checkGenProtoConflict(path) + err := errors.New("file %q is already registered", file.Path()) + err = amendErrorWithCaller(err, prev[0], file) + if !(r == GlobalFiles && ignoreConflict(file, err)) { + return err + } + } + + for name := file.Package(); name != ""; name = name.Parent() { + switch prev := r.descsByName[name]; prev.(type) { + case nil, *packageDescriptor: + default: + err := errors.New("file %q has a package name conflict over %v", file.Path(), name) + err = amendErrorWithCaller(err, prev, file) + if r == GlobalFiles && ignoreConflict(file, err) { + err = nil + } + return err + } + } + var err error + var hasConflict bool + rangeTopLevelDescriptors(file, func(d protoreflect.Descriptor) { + if prev := r.descsByName[d.FullName()]; prev != nil { + hasConflict = true + err = errors.New("file %q has a name conflict over %v", file.Path(), d.FullName()) + err = amendErrorWithCaller(err, prev, file) + if r == GlobalFiles && ignoreConflict(d, err) { + err = nil + } + } + }) + if hasConflict { + return err + } + + for name := file.Package(); name != ""; name = name.Parent() { + if r.descsByName[name] == nil { + r.descsByName[name] = &packageDescriptor{} + } + } + p := r.descsByName[file.Package()].(*packageDescriptor) + p.files = append(p.files, file) + rangeTopLevelDescriptors(file, func(d protoreflect.Descriptor) { + r.descsByName[d.FullName()] = d + }) + r.filesByPath[path] = append(r.filesByPath[path], file) + r.numFiles++ + return nil +} + +// Several well-known types were hosted in the google.golang.org/genproto module +// but were later moved to this module. To avoid a weak dependency on the +// genproto module (and its relatively large set of transitive dependencies), +// we rely on a registration conflict to determine whether the genproto version +// is too old (i.e., does not contain aliases to the new type declarations). +func (r *Files) checkGenProtoConflict(path string) { + if r != GlobalFiles { + return + } + var prevPath string + const prevModule = "google.golang.org/genproto" + const prevVersion = "cb27e3aa (May 26th, 2020)" + switch path { + case "google/protobuf/field_mask.proto": + prevPath = prevModule + "/protobuf/field_mask" + case "google/protobuf/api.proto": + prevPath = prevModule + "/protobuf/api" + case "google/protobuf/type.proto": + prevPath = prevModule + "/protobuf/ptype" + case "google/protobuf/source_context.proto": + prevPath = prevModule + "/protobuf/source_context" + default: + return + } + pkgName := strings.TrimSuffix(strings.TrimPrefix(path, "google/protobuf/"), ".proto") + pkgName = strings.Replace(pkgName, "_", "", -1) + "pb" // e.g., "field_mask" => "fieldmaskpb" + currPath := "google.golang.org/protobuf/types/known/" + pkgName + panic(fmt.Sprintf(""+ + "duplicate registration of %q\n"+ + "\n"+ + "The generated definition for this file has moved:\n"+ + "\tfrom: %q\n"+ + "\tto: %q\n"+ + "A dependency on the %q module must\n"+ + "be at version %v or higher.\n"+ + "\n"+ + "Upgrade the dependency by running:\n"+ + "\tgo get -u %v\n", + path, prevPath, currPath, prevModule, prevVersion, prevPath)) +} + +// FindDescriptorByName looks up a descriptor by the full name. +// +// This returns (nil, [NotFound]) if not found. +func (r *Files) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + prefix := name + suffix := nameSuffix("") + for prefix != "" { + if d, ok := r.descsByName[prefix]; ok { + switch d := d.(type) { + case protoreflect.EnumDescriptor: + if d.FullName() == name { + return d, nil + } + case protoreflect.EnumValueDescriptor: + if d.FullName() == name { + return d, nil + } + case protoreflect.MessageDescriptor: + if d.FullName() == name { + return d, nil + } + if d := findDescriptorInMessage(d, suffix); d != nil && d.FullName() == name { + return d, nil + } + case protoreflect.ExtensionDescriptor: + if d.FullName() == name { + return d, nil + } + case protoreflect.ServiceDescriptor: + if d.FullName() == name { + return d, nil + } + if d := d.Methods().ByName(suffix.Pop()); d != nil && d.FullName() == name { + return d, nil + } + } + return nil, NotFound + } + prefix = prefix.Parent() + suffix = nameSuffix(name[len(prefix)+len("."):]) + } + return nil, NotFound +} + +func findDescriptorInMessage(md protoreflect.MessageDescriptor, suffix nameSuffix) protoreflect.Descriptor { + name := suffix.Pop() + if suffix == "" { + if ed := md.Enums().ByName(name); ed != nil { + return ed + } + for i := md.Enums().Len() - 1; i >= 0; i-- { + if vd := md.Enums().Get(i).Values().ByName(name); vd != nil { + return vd + } + } + if xd := md.Extensions().ByName(name); xd != nil { + return xd + } + if fd := md.Fields().ByName(name); fd != nil { + return fd + } + if od := md.Oneofs().ByName(name); od != nil { + return od + } + } + if md := md.Messages().ByName(name); md != nil { + if suffix == "" { + return md + } + return findDescriptorInMessage(md, suffix) + } + return nil +} + +type nameSuffix string + +func (s *nameSuffix) Pop() (name protoreflect.Name) { + if i := strings.IndexByte(string(*s), '.'); i >= 0 { + name, *s = protoreflect.Name((*s)[:i]), (*s)[i+1:] + } else { + name, *s = protoreflect.Name((*s)), "" + } + return name +} + +// FindFileByPath looks up a file by the path. +// +// This returns (nil, [NotFound]) if not found. +// This returns an error if multiple files have the same path. +func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + fds := r.filesByPath[path] + switch len(fds) { + case 0: + return nil, NotFound + case 1: + return fds[0], nil + default: + return nil, errors.New("multiple files named %q", path) + } +} + +// NumFiles reports the number of registered files, +// including duplicate files with the same name. +func (r *Files) NumFiles() int { + if r == nil { + return 0 + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + return r.numFiles +} + +// RangeFiles iterates over all registered files while f returns true. +// If multiple files have the same name, RangeFiles iterates over all of them. +// The iteration order is undefined. +func (r *Files) RangeFiles(f func(protoreflect.FileDescriptor) bool) { + if r == nil { + return + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + for _, files := range r.filesByPath { + for _, file := range files { + if !f(file) { + return + } + } + } +} + +// NumFilesByPackage reports the number of registered files in a proto package. +func (r *Files) NumFilesByPackage(name protoreflect.FullName) int { + if r == nil { + return 0 + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + p, ok := r.descsByName[name].(*packageDescriptor) + if !ok { + return 0 + } + return len(p.files) +} + +// RangeFilesByPackage iterates over all registered files in a given proto package +// while f returns true. The iteration order is undefined. +func (r *Files) RangeFilesByPackage(name protoreflect.FullName, f func(protoreflect.FileDescriptor) bool) { + if r == nil { + return + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + p, ok := r.descsByName[name].(*packageDescriptor) + if !ok { + return + } + for _, file := range p.files { + if !f(file) { + return + } + } +} + +// rangeTopLevelDescriptors iterates over all top-level descriptors in a file +// which will be directly entered into the registry. +func rangeTopLevelDescriptors(fd protoreflect.FileDescriptor, f func(protoreflect.Descriptor)) { + eds := fd.Enums() + for i := eds.Len() - 1; i >= 0; i-- { + f(eds.Get(i)) + vds := eds.Get(i).Values() + for i := vds.Len() - 1; i >= 0; i-- { + f(vds.Get(i)) + } + } + mds := fd.Messages() + for i := mds.Len() - 1; i >= 0; i-- { + f(mds.Get(i)) + } + xds := fd.Extensions() + for i := xds.Len() - 1; i >= 0; i-- { + f(xds.Get(i)) + } + sds := fd.Services() + for i := sds.Len() - 1; i >= 0; i-- { + f(sds.Get(i)) + } +} + +// MessageTypeResolver is an interface for looking up messages. +// +// A compliant implementation must deterministically return the same type +// if no error is encountered. +// +// The [Types] type implements this interface. +type MessageTypeResolver interface { + // FindMessageByName looks up a message by its full name. + // E.g., "google.protobuf.Any" + // + // This return (nil, NotFound) if not found. + FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) + + // FindMessageByURL looks up a message by a URL identifier. + // See documentation on google.protobuf.Any.type_url for the URL format. + // + // This returns (nil, NotFound) if not found. + FindMessageByURL(url string) (protoreflect.MessageType, error) +} + +// ExtensionTypeResolver is an interface for looking up extensions. +// +// A compliant implementation must deterministically return the same type +// if no error is encountered. +// +// The [Types] type implements this interface. +type ExtensionTypeResolver interface { + // FindExtensionByName looks up a extension field by the field's full name. + // Note that this is the full name of the field as determined by + // where the extension is declared and is unrelated to the full name of the + // message being extended. + // + // This returns (nil, NotFound) if not found. + FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) + + // FindExtensionByNumber looks up a extension field by the field number + // within some parent message, identified by full name. + // + // This returns (nil, NotFound) if not found. + FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) +} + +var ( + _ MessageTypeResolver = (*Types)(nil) + _ ExtensionTypeResolver = (*Types)(nil) +) + +// Types is a registry for looking up or iterating over descriptor types. +// The Find and Range methods are safe for concurrent use. +type Types struct { + typesByName typesByName + extensionsByMessage extensionsByMessage + + numEnums int + numMessages int + numExtensions int +} + +type ( + typesByName map[protoreflect.FullName]any + extensionsByMessage map[protoreflect.FullName]extensionsByNumber + extensionsByNumber map[protoreflect.FieldNumber]protoreflect.ExtensionType +) + +// RegisterMessage registers the provided message type. +// +// If a naming conflict occurs, the type is not registered and an error is returned. +func (r *Types) RegisterMessage(mt protoreflect.MessageType) error { + // Under rare circumstances getting the descriptor might recursively + // examine the registry, so fetch it before locking. + md := mt.Descriptor() + + if r == GlobalTypes { + globalMutex.Lock() + defer globalMutex.Unlock() + } + + if err := r.register("message", md, mt); err != nil { + return err + } + r.numMessages++ + return nil +} + +// RegisterEnum registers the provided enum type. +// +// If a naming conflict occurs, the type is not registered and an error is returned. +func (r *Types) RegisterEnum(et protoreflect.EnumType) error { + // Under rare circumstances getting the descriptor might recursively + // examine the registry, so fetch it before locking. + ed := et.Descriptor() + + if r == GlobalTypes { + globalMutex.Lock() + defer globalMutex.Unlock() + } + + if err := r.register("enum", ed, et); err != nil { + return err + } + r.numEnums++ + return nil +} + +// RegisterExtension registers the provided extension type. +// +// If a naming conflict occurs, the type is not registered and an error is returned. +func (r *Types) RegisterExtension(xt protoreflect.ExtensionType) error { + // Under rare circumstances getting the descriptor might recursively + // examine the registry, so fetch it before locking. + // + // A known case where this can happen: Fetching the TypeDescriptor for a + // legacy ExtensionDesc can consult the global registry. + xd := xt.TypeDescriptor() + + if r == GlobalTypes { + globalMutex.Lock() + defer globalMutex.Unlock() + } + + field := xd.Number() + message := xd.ContainingMessage().FullName() + if prev := r.extensionsByMessage[message][field]; prev != nil { + err := errors.New("extension number %d is already registered on message %v", field, message) + err = amendErrorWithCaller(err, prev, xt) + if !(r == GlobalTypes && ignoreConflict(xd, err)) { + return err + } + } + + if err := r.register("extension", xd, xt); err != nil { + return err + } + if r.extensionsByMessage == nil { + r.extensionsByMessage = make(extensionsByMessage) + } + if r.extensionsByMessage[message] == nil { + r.extensionsByMessage[message] = make(extensionsByNumber) + } + r.extensionsByMessage[message][field] = xt + r.numExtensions++ + return nil +} + +func (r *Types) register(kind string, desc protoreflect.Descriptor, typ any) error { + name := desc.FullName() + prev := r.typesByName[name] + if prev != nil { + err := errors.New("%v %v is already registered", kind, name) + err = amendErrorWithCaller(err, prev, typ) + if !(r == GlobalTypes && ignoreConflict(desc, err)) { + return err + } + } + if r.typesByName == nil { + r.typesByName = make(typesByName) + } + r.typesByName[name] = typ + return nil +} + +// FindEnumByName looks up an enum by its full name. +// E.g., "google.protobuf.Field.Kind". +// +// This returns (nil, [NotFound]) if not found. +func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumType, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + if v := r.typesByName[enum]; v != nil { + if et, _ := v.(protoreflect.EnumType); et != nil { + return et, nil + } + return nil, errors.New("found wrong type: got %v, want enum", typeName(v)) + } + return nil, NotFound +} + +// FindMessageByName looks up a message by its full name, +// e.g. "google.protobuf.Any". +// +// This returns (nil, [NotFound]) if not found. +func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + if v := r.typesByName[message]; v != nil { + if mt, _ := v.(protoreflect.MessageType); mt != nil { + return mt, nil + } + return nil, errors.New("found wrong type: got %v, want message", typeName(v)) + } + return nil, NotFound +} + +// FindMessageByURL looks up a message by a URL identifier. +// See documentation on google.protobuf.Any.type_url for the URL format. +// +// This returns (nil, [NotFound]) if not found. +func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { + // This function is similar to FindMessageByName but + // truncates anything before and including '/' in the URL. + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + message := protoreflect.FullName(url) + if i := strings.LastIndexByte(url, '/'); i >= 0 { + message = message[i+len("/"):] + } + + if v := r.typesByName[message]; v != nil { + if mt, _ := v.(protoreflect.MessageType); mt != nil { + return mt, nil + } + return nil, errors.New("found wrong type: got %v, want message", typeName(v)) + } + return nil, NotFound +} + +// FindExtensionByName looks up a extension field by the field's full name. +// Note that this is the full name of the field as determined by +// where the extension is declared and is unrelated to the full name of the +// message being extended. +// +// This returns (nil, [NotFound]) if not found. +func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + if v := r.typesByName[field]; v != nil { + if xt, _ := v.(protoreflect.ExtensionType); xt != nil { + return xt, nil + } + + // MessageSet extensions are special in that the name of the extension + // is the name of the message type used to extend the MessageSet. + // This naming scheme is used by text and JSON serialization. + // + // This feature is protected by the ProtoLegacy flag since MessageSets + // are a proto1 feature that is long deprecated. + if flags.ProtoLegacy { + if _, ok := v.(protoreflect.MessageType); ok { + field := field.Append(messageset.ExtensionName) + if v := r.typesByName[field]; v != nil { + if xt, _ := v.(protoreflect.ExtensionType); xt != nil { + if messageset.IsMessageSetExtension(xt.TypeDescriptor()) { + return xt, nil + } + } + } + } + } + + return nil, errors.New("found wrong type: got %v, want extension", typeName(v)) + } + return nil, NotFound +} + +// FindExtensionByNumber looks up a extension field by the field number +// within some parent message, identified by full name. +// +// This returns (nil, [NotFound]) if not found. +func (r *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + if xt, ok := r.extensionsByMessage[message][field]; ok { + return xt, nil + } + return nil, NotFound +} + +// NumEnums reports the number of registered enums. +func (r *Types) NumEnums() int { + if r == nil { + return 0 + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + return r.numEnums +} + +// RangeEnums iterates over all registered enums while f returns true. +// Iteration order is undefined. +func (r *Types) RangeEnums(f func(protoreflect.EnumType) bool) { + if r == nil { + return + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + for _, typ := range r.typesByName { + if et, ok := typ.(protoreflect.EnumType); ok { + if !f(et) { + return + } + } + } +} + +// NumMessages reports the number of registered messages. +func (r *Types) NumMessages() int { + if r == nil { + return 0 + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + return r.numMessages +} + +// RangeMessages iterates over all registered messages while f returns true. +// Iteration order is undefined. +func (r *Types) RangeMessages(f func(protoreflect.MessageType) bool) { + if r == nil { + return + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + for _, typ := range r.typesByName { + if mt, ok := typ.(protoreflect.MessageType); ok { + if !f(mt) { + return + } + } + } +} + +// NumExtensions reports the number of registered extensions. +func (r *Types) NumExtensions() int { + if r == nil { + return 0 + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + return r.numExtensions +} + +// RangeExtensions iterates over all registered extensions while f returns true. +// Iteration order is undefined. +func (r *Types) RangeExtensions(f func(protoreflect.ExtensionType) bool) { + if r == nil { + return + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + for _, typ := range r.typesByName { + if xt, ok := typ.(protoreflect.ExtensionType); ok { + if !f(xt) { + return + } + } + } +} + +// NumExtensionsByMessage reports the number of registered extensions for +// a given message type. +func (r *Types) NumExtensionsByMessage(message protoreflect.FullName) int { + if r == nil { + return 0 + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + return len(r.extensionsByMessage[message]) +} + +// RangeExtensionsByMessage iterates over all registered extensions filtered +// by a given message type while f returns true. Iteration order is undefined. +func (r *Types) RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) { + if r == nil { + return + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + for _, xt := range r.extensionsByMessage[message] { + if !f(xt) { + return + } + } +} + +func typeName(t any) string { + switch t.(type) { + case protoreflect.EnumType: + return "enum" + case protoreflect.MessageType: + return "message" + case protoreflect.ExtensionType: + return "extension" + default: + return fmt.Sprintf("%T", t) + } +} + +func amendErrorWithCaller(err error, prev, curr any) error { + prevPkg := goPackage(prev) + currPkg := goPackage(curr) + if prevPkg == "" || currPkg == "" || prevPkg == currPkg { + return err + } + return errors.New("%s\n\tpreviously from: %q\n\tcurrently from: %q", err, prevPkg, currPkg) +} + +func goPackage(v any) string { + switch d := v.(type) { + case protoreflect.EnumType: + v = d.Descriptor() + case protoreflect.MessageType: + v = d.Descriptor() + case protoreflect.ExtensionType: + v = d.TypeDescriptor() + } + if d, ok := v.(protoreflect.Descriptor); ok { + v = d.ParentFile() + } + if d, ok := v.(interface{ GoPackagePath() string }); ok { + return d.GoPackagePath() + } + return "" +} diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go b/vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go new file mode 100644 index 000000000..c58727675 --- /dev/null +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoiface + +type MessageV1 interface { + Reset() + String() string + ProtoMessage() +} + +type ExtensionRangeV1 struct { + Start, End int32 // both inclusive +} diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go new file mode 100644 index 000000000..44cf467d8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go @@ -0,0 +1,168 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protoiface contains types referenced or implemented by messages. +// +// WARNING: This package should only be imported by message implementations. +// The functionality found in this package should be accessed through +// higher-level abstractions provided by the proto package. +package protoiface + +import ( + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// Methods is a set of optional fast-path implementations of various operations. +type Methods = struct { + pragma.NoUnkeyedLiterals + + // Flags indicate support for optional features. + Flags SupportFlags + + // Size returns the size in bytes of the wire-format encoding of a message. + // Marshal must be provided if a custom Size is provided. + Size func(SizeInput) SizeOutput + + // Marshal formats a message in the wire-format encoding to the provided buffer. + // Size should be provided if a custom Marshal is provided. + // It must not return an error for a partial message. + Marshal func(MarshalInput) (MarshalOutput, error) + + // Unmarshal parses the wire-format encoding and merges the result into a message. + // It must not reset the target message or return an error for a partial message. + Unmarshal func(UnmarshalInput) (UnmarshalOutput, error) + + // Merge merges the contents of a source message into a destination message. + Merge func(MergeInput) MergeOutput + + // CheckInitialized returns an error if any required fields in the message are not set. + CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error) +} + +// SupportFlags indicate support for optional features. +type SupportFlags = uint64 + +const ( + // SupportMarshalDeterministic reports whether MarshalOptions.Deterministic is supported. + SupportMarshalDeterministic SupportFlags = 1 << iota + + // SupportUnmarshalDiscardUnknown reports whether UnmarshalOptions.DiscardUnknown is supported. + SupportUnmarshalDiscardUnknown +) + +// SizeInput is input to the Size method. +type SizeInput = struct { + pragma.NoUnkeyedLiterals + + Message protoreflect.Message + Flags MarshalInputFlags +} + +// SizeOutput is output from the Size method. +type SizeOutput = struct { + pragma.NoUnkeyedLiterals + + Size int +} + +// MarshalInput is input to the Marshal method. +type MarshalInput = struct { + pragma.NoUnkeyedLiterals + + Message protoreflect.Message + Buf []byte // output is appended to this buffer + Flags MarshalInputFlags +} + +// MarshalOutput is output from the Marshal method. +type MarshalOutput = struct { + pragma.NoUnkeyedLiterals + + Buf []byte // contains marshaled message +} + +// MarshalInputFlags configure the marshaler. +// Most flags correspond to fields in proto.MarshalOptions. +type MarshalInputFlags = uint8 + +const ( + MarshalDeterministic MarshalInputFlags = 1 << iota + MarshalUseCachedSize +) + +// UnmarshalInput is input to the Unmarshal method. +type UnmarshalInput = struct { + pragma.NoUnkeyedLiterals + + Message protoreflect.Message + Buf []byte // input buffer + Flags UnmarshalInputFlags + Resolver interface { + FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) + FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) + } + Depth int +} + +// UnmarshalOutput is output from the Unmarshal method. +type UnmarshalOutput = struct { + pragma.NoUnkeyedLiterals + + Flags UnmarshalOutputFlags +} + +// UnmarshalInputFlags configure the unmarshaler. +// Most flags correspond to fields in proto.UnmarshalOptions. +type UnmarshalInputFlags = uint8 + +const ( + UnmarshalDiscardUnknown UnmarshalInputFlags = 1 << iota +) + +// UnmarshalOutputFlags are output from the Unmarshal method. +type UnmarshalOutputFlags = uint8 + +const ( + // UnmarshalInitialized may be set on return if all required fields are known to be set. + // If unset, then it does not necessarily indicate that the message is uninitialized, + // only that its status could not be confirmed. + UnmarshalInitialized UnmarshalOutputFlags = 1 << iota +) + +// MergeInput is input to the Merge method. +type MergeInput = struct { + pragma.NoUnkeyedLiterals + + Source protoreflect.Message + Destination protoreflect.Message +} + +// MergeOutput is output from the Merge method. +type MergeOutput = struct { + pragma.NoUnkeyedLiterals + + Flags MergeOutputFlags +} + +// MergeOutputFlags are output from the Merge method. +type MergeOutputFlags = uint8 + +const ( + // MergeComplete reports whether the merge was performed. + // If unset, the merger must have made no changes to the destination. + MergeComplete MergeOutputFlags = 1 << iota +) + +// CheckInitializedInput is input to the CheckInitialized method. +type CheckInitializedInput = struct { + pragma.NoUnkeyedLiterals + + Message protoreflect.Message +} + +// CheckInitializedOutput is output from the CheckInitialized method. +type CheckInitializedOutput = struct { + pragma.NoUnkeyedLiterals +} diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go new file mode 100644 index 000000000..4a1ab7fb3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go @@ -0,0 +1,44 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protoimpl contains the default implementation for messages +// generated by protoc-gen-go. +// +// WARNING: This package should only ever be imported by generated messages. +// The compatibility agreement covers nothing except for functionality needed +// to keep existing generated messages operational. Breakages that occur due +// to unauthorized usages of this package are not the author's responsibility. +package protoimpl + +import ( + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/filetype" + "google.golang.org/protobuf/internal/impl" +) + +// UnsafeEnabled specifies whether package unsafe can be used. +const UnsafeEnabled = impl.UnsafeEnabled + +type ( + // Types used by generated code in init functions. + DescBuilder = filedesc.Builder + TypeBuilder = filetype.Builder + + // Types used by generated code to implement EnumType, MessageType, and ExtensionType. + EnumInfo = impl.EnumInfo + MessageInfo = impl.MessageInfo + ExtensionInfo = impl.ExtensionInfo + + // Types embedded in generated messages. + MessageState = impl.MessageState + SizeCache = impl.SizeCache + WeakFields = impl.WeakFields + UnknownFields = impl.UnknownFields + ExtensionFields = impl.ExtensionFields + ExtensionFieldV1 = impl.ExtensionField + + Pointer = impl.Pointer +) + +var X impl.Export diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go new file mode 100644 index 000000000..a105cb23e --- /dev/null +++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go @@ -0,0 +1,60 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoimpl + +import ( + "google.golang.org/protobuf/internal/version" +) + +const ( + // MaxVersion is the maximum supported version for generated .pb.go files. + // It is always the current version of the module. + MaxVersion = version.Minor + + // GenVersion is the runtime version required by generated .pb.go files. + // This is incremented when generated code relies on new functionality + // in the runtime. + GenVersion = 20 + + // MinVersion is the minimum supported version for generated .pb.go files. + // This is incremented when the runtime drops support for old code. + MinVersion = 0 +) + +// EnforceVersion is used by code generated by protoc-gen-go +// to statically enforce minimum and maximum versions of this package. +// A compilation failure implies either that: +// - the runtime package is too old and needs to be updated OR +// - the generated code is too old and needs to be regenerated. +// +// The runtime package can be upgraded by running: +// +// go get google.golang.org/protobuf +// +// The generated code can be regenerated by running: +// +// protoc --go_out=${PROTOC_GEN_GO_ARGS} ${PROTO_FILES} +// +// Example usage by generated code: +// +// const ( +// // Verify that this generated code is sufficiently up-to-date. +// _ = protoimpl.EnforceVersion(genVersion - protoimpl.MinVersion) +// // Verify that runtime/protoimpl is sufficiently up-to-date. +// _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - genVersion) +// ) +// +// The genVersion is the current minor version used to generated the code. +// This compile-time check relies on negative integer overflow of a uint +// being a compilation failure (guaranteed by the Go specification). +type EnforceVersion uint + +// This enforces the following invariant: +// +// MinVersion ≤ GenVersion ≤ MaxVersion +const ( + _ = EnforceVersion(GenVersion - MinVersion) + _ = EnforceVersion(MaxVersion - GenVersion) +) diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go new file mode 100644 index 000000000..9403eb075 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -0,0 +1,5825 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// The messages in this file describe the definitions found in .proto files. +// A valid .proto file can be translated directly to a FileDescriptorProto +// without any other information (e.g. without reading its imports). + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/descriptor.proto + +package descriptorpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +// The full set of known editions. +type Edition int32 + +const ( + // A placeholder for an unknown edition value. + Edition_EDITION_UNKNOWN Edition = 0 + // A placeholder edition for specifying default behaviors *before* a feature + // was first introduced. This is effectively an "infinite past". + Edition_EDITION_LEGACY Edition = 900 + // Legacy syntax "editions". These pre-date editions, but behave much like + // distinct editions. These can't be used to specify the edition of proto + // files, but feature definitions must supply proto2/proto3 defaults for + // backwards compatibility. + Edition_EDITION_PROTO2 Edition = 998 + Edition_EDITION_PROTO3 Edition = 999 + // Editions that have been released. The specific values are arbitrary and + // should not be depended on, but they will always be time-ordered for easy + // comparison. + Edition_EDITION_2023 Edition = 1000 + Edition_EDITION_2024 Edition = 1001 + // Placeholder editions for testing feature resolution. These should not be + // used or relyed on outside of tests. + Edition_EDITION_1_TEST_ONLY Edition = 1 + Edition_EDITION_2_TEST_ONLY Edition = 2 + Edition_EDITION_99997_TEST_ONLY Edition = 99997 + Edition_EDITION_99998_TEST_ONLY Edition = 99998 + Edition_EDITION_99999_TEST_ONLY Edition = 99999 + // Placeholder for specifying unbounded edition support. This should only + // ever be used by plugins that can expect to never require any changes to + // support a new edition. + Edition_EDITION_MAX Edition = 2147483647 +) + +// Enum value maps for Edition. +var ( + Edition_name = map[int32]string{ + 0: "EDITION_UNKNOWN", + 900: "EDITION_LEGACY", + 998: "EDITION_PROTO2", + 999: "EDITION_PROTO3", + 1000: "EDITION_2023", + 1001: "EDITION_2024", + 1: "EDITION_1_TEST_ONLY", + 2: "EDITION_2_TEST_ONLY", + 99997: "EDITION_99997_TEST_ONLY", + 99998: "EDITION_99998_TEST_ONLY", + 99999: "EDITION_99999_TEST_ONLY", + 2147483647: "EDITION_MAX", + } + Edition_value = map[string]int32{ + "EDITION_UNKNOWN": 0, + "EDITION_LEGACY": 900, + "EDITION_PROTO2": 998, + "EDITION_PROTO3": 999, + "EDITION_2023": 1000, + "EDITION_2024": 1001, + "EDITION_1_TEST_ONLY": 1, + "EDITION_2_TEST_ONLY": 2, + "EDITION_99997_TEST_ONLY": 99997, + "EDITION_99998_TEST_ONLY": 99998, + "EDITION_99999_TEST_ONLY": 99999, + "EDITION_MAX": 2147483647, + } +) + +func (x Edition) Enum() *Edition { + p := new(Edition) + *p = x + return p +} + +func (x Edition) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Edition) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() +} + +func (Edition) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[0] +} + +func (x Edition) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *Edition) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = Edition(num) + return nil +} + +// Deprecated: Use Edition.Descriptor instead. +func (Edition) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0} +} + +// The verification state of the extension range. +type ExtensionRangeOptions_VerificationState int32 + +const ( + // All the extensions of the range must be declared. + ExtensionRangeOptions_DECLARATION ExtensionRangeOptions_VerificationState = 0 + ExtensionRangeOptions_UNVERIFIED ExtensionRangeOptions_VerificationState = 1 +) + +// Enum value maps for ExtensionRangeOptions_VerificationState. +var ( + ExtensionRangeOptions_VerificationState_name = map[int32]string{ + 0: "DECLARATION", + 1: "UNVERIFIED", + } + ExtensionRangeOptions_VerificationState_value = map[string]int32{ + "DECLARATION": 0, + "UNVERIFIED": 1, + } +) + +func (x ExtensionRangeOptions_VerificationState) Enum() *ExtensionRangeOptions_VerificationState { + p := new(ExtensionRangeOptions_VerificationState) + *p = x + return p +} + +func (x ExtensionRangeOptions_VerificationState) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() +} + +func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[1] +} + +func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *ExtensionRangeOptions_VerificationState) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = ExtensionRangeOptions_VerificationState(num) + return nil +} + +// Deprecated: Use ExtensionRangeOptions_VerificationState.Descriptor instead. +func (ExtensionRangeOptions_VerificationState) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{3, 0} +} + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported after google.protobuf. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. In Editions, the group wire format + // can be enabled via the `message_encoding` feature. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 // Length-delimited aggregate. + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 // Uses ZigZag encoding. + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 // Uses ZigZag encoding. +) + +// Enum value maps for FieldDescriptorProto_Type. +var ( + FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", + } + FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, + } +) + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} + +func (x FieldDescriptorProto_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() +} + +func (FieldDescriptorProto_Type) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[2] +} + +func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldDescriptorProto_Type) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(num) + return nil +} + +// Deprecated: Use FieldDescriptorProto_Type.Descriptor instead. +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{4, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 + // The required label is only allowed in google.protobuf. In proto3 and Editions + // it's explicitly prohibited. In Editions, the `field_presence` feature + // can be used to get this behavior. + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 +) + +// Enum value maps for FieldDescriptorProto_Label. +var ( + FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 3: "LABEL_REPEATED", + 2: "LABEL_REQUIRED", + } + FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REPEATED": 3, + "LABEL_REQUIRED": 2, + } +) + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} + +func (x FieldDescriptorProto_Label) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() +} + +func (FieldDescriptorProto_Label) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[3] +} + +func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldDescriptorProto_Label) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(num) + return nil +} + +// Deprecated: Use FieldDescriptorProto_Label.Descriptor instead. +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 // Generate complete code for parsing, serialization, + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 // Use ReflectionOps to implement these methods. + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 // Generate code using MessageLite and the lite runtime. +) + +// Enum value maps for FileOptions_OptimizeMode. +var ( + FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", + } + FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, + } +) + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} + +func (x FileOptions_OptimizeMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() +} + +func (FileOptions_OptimizeMode) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[4] +} + +func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FileOptions_OptimizeMode) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(num) + return nil +} + +// Deprecated: Use FileOptions_OptimizeMode.Descriptor instead. +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{10, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + // The option [ctype=CORD] may be applied to a non-repeated field of type + // "bytes". It indicates that in C++, the data should be stored in a Cord + // instead of a string. For very large strings, this may reduce memory + // fragmentation. It may also allow better performance when parsing from a + // Cord, or when parsing with aliasing enabled, as the parsed Cord may then + // alias the original buffer. + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +// Enum value maps for FieldOptions_CType. +var ( + FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", + } + FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, + } +) + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} + +func (x FieldOptions_CType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() +} + +func (FieldOptions_CType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[5] +} + +func (x FieldOptions_CType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldOptions_CType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldOptions_CType(num) + return nil +} + +// Deprecated: Use FieldOptions_CType.Descriptor instead. +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +// Enum value maps for FieldOptions_JSType. +var ( + FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", + } + FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, + } +) + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} + +func (x FieldOptions_JSType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() +} + +func (FieldOptions_JSType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[6] +} + +func (x FieldOptions_JSType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldOptions_JSType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldOptions_JSType(num) + return nil +} + +// Deprecated: Use FieldOptions_JSType.Descriptor instead. +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1} +} + +// If set to RETENTION_SOURCE, the option will be omitted from the binary. +// Note: as of January 2023, support for this is in progress and does not yet +// have an effect (b/264593489). +type FieldOptions_OptionRetention int32 + +const ( + FieldOptions_RETENTION_UNKNOWN FieldOptions_OptionRetention = 0 + FieldOptions_RETENTION_RUNTIME FieldOptions_OptionRetention = 1 + FieldOptions_RETENTION_SOURCE FieldOptions_OptionRetention = 2 +) + +// Enum value maps for FieldOptions_OptionRetention. +var ( + FieldOptions_OptionRetention_name = map[int32]string{ + 0: "RETENTION_UNKNOWN", + 1: "RETENTION_RUNTIME", + 2: "RETENTION_SOURCE", + } + FieldOptions_OptionRetention_value = map[string]int32{ + "RETENTION_UNKNOWN": 0, + "RETENTION_RUNTIME": 1, + "RETENTION_SOURCE": 2, + } +) + +func (x FieldOptions_OptionRetention) Enum() *FieldOptions_OptionRetention { + p := new(FieldOptions_OptionRetention) + *p = x + return p +} + +func (x FieldOptions_OptionRetention) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() +} + +func (FieldOptions_OptionRetention) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[7] +} + +func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldOptions_OptionRetention) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldOptions_OptionRetention(num) + return nil +} + +// Deprecated: Use FieldOptions_OptionRetention.Descriptor instead. +func (FieldOptions_OptionRetention) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 2} +} + +// This indicates the types of entities that the field may apply to when used +// as an option. If it is unset, then the field may be freely used as an +// option on any kind of entity. Note: as of January 2023, support for this is +// in progress and does not yet have an effect (b/264593489). +type FieldOptions_OptionTargetType int32 + +const ( + FieldOptions_TARGET_TYPE_UNKNOWN FieldOptions_OptionTargetType = 0 + FieldOptions_TARGET_TYPE_FILE FieldOptions_OptionTargetType = 1 + FieldOptions_TARGET_TYPE_EXTENSION_RANGE FieldOptions_OptionTargetType = 2 + FieldOptions_TARGET_TYPE_MESSAGE FieldOptions_OptionTargetType = 3 + FieldOptions_TARGET_TYPE_FIELD FieldOptions_OptionTargetType = 4 + FieldOptions_TARGET_TYPE_ONEOF FieldOptions_OptionTargetType = 5 + FieldOptions_TARGET_TYPE_ENUM FieldOptions_OptionTargetType = 6 + FieldOptions_TARGET_TYPE_ENUM_ENTRY FieldOptions_OptionTargetType = 7 + FieldOptions_TARGET_TYPE_SERVICE FieldOptions_OptionTargetType = 8 + FieldOptions_TARGET_TYPE_METHOD FieldOptions_OptionTargetType = 9 +) + +// Enum value maps for FieldOptions_OptionTargetType. +var ( + FieldOptions_OptionTargetType_name = map[int32]string{ + 0: "TARGET_TYPE_UNKNOWN", + 1: "TARGET_TYPE_FILE", + 2: "TARGET_TYPE_EXTENSION_RANGE", + 3: "TARGET_TYPE_MESSAGE", + 4: "TARGET_TYPE_FIELD", + 5: "TARGET_TYPE_ONEOF", + 6: "TARGET_TYPE_ENUM", + 7: "TARGET_TYPE_ENUM_ENTRY", + 8: "TARGET_TYPE_SERVICE", + 9: "TARGET_TYPE_METHOD", + } + FieldOptions_OptionTargetType_value = map[string]int32{ + "TARGET_TYPE_UNKNOWN": 0, + "TARGET_TYPE_FILE": 1, + "TARGET_TYPE_EXTENSION_RANGE": 2, + "TARGET_TYPE_MESSAGE": 3, + "TARGET_TYPE_FIELD": 4, + "TARGET_TYPE_ONEOF": 5, + "TARGET_TYPE_ENUM": 6, + "TARGET_TYPE_ENUM_ENTRY": 7, + "TARGET_TYPE_SERVICE": 8, + "TARGET_TYPE_METHOD": 9, + } +) + +func (x FieldOptions_OptionTargetType) Enum() *FieldOptions_OptionTargetType { + p := new(FieldOptions_OptionTargetType) + *p = x + return p +} + +func (x FieldOptions_OptionTargetType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() +} + +func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[8] +} + +func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldOptions_OptionTargetType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldOptions_OptionTargetType(num) + return nil +} + +// Deprecated: Use FieldOptions_OptionTargetType.Descriptor instead. +func (FieldOptions_OptionTargetType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 3} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 // implies idempotent + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 // idempotent, but may have side effects +) + +// Enum value maps for MethodOptions_IdempotencyLevel. +var ( + MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", + } + MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, + } +) + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} + +func (x MethodOptions_IdempotencyLevel) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() +} + +func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[9] +} + +func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(num) + return nil +} + +// Deprecated: Use MethodOptions_IdempotencyLevel.Descriptor instead. +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17, 0} +} + +type FeatureSet_FieldPresence int32 + +const ( + FeatureSet_FIELD_PRESENCE_UNKNOWN FeatureSet_FieldPresence = 0 + FeatureSet_EXPLICIT FeatureSet_FieldPresence = 1 + FeatureSet_IMPLICIT FeatureSet_FieldPresence = 2 + FeatureSet_LEGACY_REQUIRED FeatureSet_FieldPresence = 3 +) + +// Enum value maps for FeatureSet_FieldPresence. +var ( + FeatureSet_FieldPresence_name = map[int32]string{ + 0: "FIELD_PRESENCE_UNKNOWN", + 1: "EXPLICIT", + 2: "IMPLICIT", + 3: "LEGACY_REQUIRED", + } + FeatureSet_FieldPresence_value = map[string]int32{ + "FIELD_PRESENCE_UNKNOWN": 0, + "EXPLICIT": 1, + "IMPLICIT": 2, + "LEGACY_REQUIRED": 3, + } +) + +func (x FeatureSet_FieldPresence) Enum() *FeatureSet_FieldPresence { + p := new(FeatureSet_FieldPresence) + *p = x + return p +} + +func (x FeatureSet_FieldPresence) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_FieldPresence) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor() +} + +func (FeatureSet_FieldPresence) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[10] +} + +func (x FeatureSet_FieldPresence) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_FieldPresence) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_FieldPresence(num) + return nil +} + +// Deprecated: Use FeatureSet_FieldPresence.Descriptor instead. +func (FeatureSet_FieldPresence) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} +} + +type FeatureSet_EnumType int32 + +const ( + FeatureSet_ENUM_TYPE_UNKNOWN FeatureSet_EnumType = 0 + FeatureSet_OPEN FeatureSet_EnumType = 1 + FeatureSet_CLOSED FeatureSet_EnumType = 2 +) + +// Enum value maps for FeatureSet_EnumType. +var ( + FeatureSet_EnumType_name = map[int32]string{ + 0: "ENUM_TYPE_UNKNOWN", + 1: "OPEN", + 2: "CLOSED", + } + FeatureSet_EnumType_value = map[string]int32{ + "ENUM_TYPE_UNKNOWN": 0, + "OPEN": 1, + "CLOSED": 2, + } +) + +func (x FeatureSet_EnumType) Enum() *FeatureSet_EnumType { + p := new(FeatureSet_EnumType) + *p = x + return p +} + +func (x FeatureSet_EnumType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_EnumType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor() +} + +func (FeatureSet_EnumType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[11] +} + +func (x FeatureSet_EnumType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_EnumType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_EnumType(num) + return nil +} + +// Deprecated: Use FeatureSet_EnumType.Descriptor instead. +func (FeatureSet_EnumType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 1} +} + +type FeatureSet_RepeatedFieldEncoding int32 + +const ( + FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN FeatureSet_RepeatedFieldEncoding = 0 + FeatureSet_PACKED FeatureSet_RepeatedFieldEncoding = 1 + FeatureSet_EXPANDED FeatureSet_RepeatedFieldEncoding = 2 +) + +// Enum value maps for FeatureSet_RepeatedFieldEncoding. +var ( + FeatureSet_RepeatedFieldEncoding_name = map[int32]string{ + 0: "REPEATED_FIELD_ENCODING_UNKNOWN", + 1: "PACKED", + 2: "EXPANDED", + } + FeatureSet_RepeatedFieldEncoding_value = map[string]int32{ + "REPEATED_FIELD_ENCODING_UNKNOWN": 0, + "PACKED": 1, + "EXPANDED": 2, + } +) + +func (x FeatureSet_RepeatedFieldEncoding) Enum() *FeatureSet_RepeatedFieldEncoding { + p := new(FeatureSet_RepeatedFieldEncoding) + *p = x + return p +} + +func (x FeatureSet_RepeatedFieldEncoding) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_RepeatedFieldEncoding) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor() +} + +func (FeatureSet_RepeatedFieldEncoding) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[12] +} + +func (x FeatureSet_RepeatedFieldEncoding) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_RepeatedFieldEncoding) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_RepeatedFieldEncoding(num) + return nil +} + +// Deprecated: Use FeatureSet_RepeatedFieldEncoding.Descriptor instead. +func (FeatureSet_RepeatedFieldEncoding) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 2} +} + +type FeatureSet_Utf8Validation int32 + +const ( + FeatureSet_UTF8_VALIDATION_UNKNOWN FeatureSet_Utf8Validation = 0 + FeatureSet_VERIFY FeatureSet_Utf8Validation = 2 + FeatureSet_NONE FeatureSet_Utf8Validation = 3 +) + +// Enum value maps for FeatureSet_Utf8Validation. +var ( + FeatureSet_Utf8Validation_name = map[int32]string{ + 0: "UTF8_VALIDATION_UNKNOWN", + 2: "VERIFY", + 3: "NONE", + } + FeatureSet_Utf8Validation_value = map[string]int32{ + "UTF8_VALIDATION_UNKNOWN": 0, + "VERIFY": 2, + "NONE": 3, + } +) + +func (x FeatureSet_Utf8Validation) Enum() *FeatureSet_Utf8Validation { + p := new(FeatureSet_Utf8Validation) + *p = x + return p +} + +func (x FeatureSet_Utf8Validation) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_Utf8Validation) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor() +} + +func (FeatureSet_Utf8Validation) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[13] +} + +func (x FeatureSet_Utf8Validation) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_Utf8Validation) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_Utf8Validation(num) + return nil +} + +// Deprecated: Use FeatureSet_Utf8Validation.Descriptor instead. +func (FeatureSet_Utf8Validation) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 3} +} + +type FeatureSet_MessageEncoding int32 + +const ( + FeatureSet_MESSAGE_ENCODING_UNKNOWN FeatureSet_MessageEncoding = 0 + FeatureSet_LENGTH_PREFIXED FeatureSet_MessageEncoding = 1 + FeatureSet_DELIMITED FeatureSet_MessageEncoding = 2 +) + +// Enum value maps for FeatureSet_MessageEncoding. +var ( + FeatureSet_MessageEncoding_name = map[int32]string{ + 0: "MESSAGE_ENCODING_UNKNOWN", + 1: "LENGTH_PREFIXED", + 2: "DELIMITED", + } + FeatureSet_MessageEncoding_value = map[string]int32{ + "MESSAGE_ENCODING_UNKNOWN": 0, + "LENGTH_PREFIXED": 1, + "DELIMITED": 2, + } +) + +func (x FeatureSet_MessageEncoding) Enum() *FeatureSet_MessageEncoding { + p := new(FeatureSet_MessageEncoding) + *p = x + return p +} + +func (x FeatureSet_MessageEncoding) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_MessageEncoding) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor() +} + +func (FeatureSet_MessageEncoding) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[14] +} + +func (x FeatureSet_MessageEncoding) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_MessageEncoding) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_MessageEncoding(num) + return nil +} + +// Deprecated: Use FeatureSet_MessageEncoding.Descriptor instead. +func (FeatureSet_MessageEncoding) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 4} +} + +type FeatureSet_JsonFormat int32 + +const ( + FeatureSet_JSON_FORMAT_UNKNOWN FeatureSet_JsonFormat = 0 + FeatureSet_ALLOW FeatureSet_JsonFormat = 1 + FeatureSet_LEGACY_BEST_EFFORT FeatureSet_JsonFormat = 2 +) + +// Enum value maps for FeatureSet_JsonFormat. +var ( + FeatureSet_JsonFormat_name = map[int32]string{ + 0: "JSON_FORMAT_UNKNOWN", + 1: "ALLOW", + 2: "LEGACY_BEST_EFFORT", + } + FeatureSet_JsonFormat_value = map[string]int32{ + "JSON_FORMAT_UNKNOWN": 0, + "ALLOW": 1, + "LEGACY_BEST_EFFORT": 2, + } +) + +func (x FeatureSet_JsonFormat) Enum() *FeatureSet_JsonFormat { + p := new(FeatureSet_JsonFormat) + *p = x + return p +} + +func (x FeatureSet_JsonFormat) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_JsonFormat) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor() +} + +func (FeatureSet_JsonFormat) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[15] +} + +func (x FeatureSet_JsonFormat) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_JsonFormat) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_JsonFormat(num) + return nil +} + +// Deprecated: Use FeatureSet_JsonFormat.Descriptor instead. +func (FeatureSet_JsonFormat) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 5} +} + +// Represents the identified object's effect on the element in the original +// .proto file. +type GeneratedCodeInfo_Annotation_Semantic int32 + +const ( + // There is no effect or the effect is indescribable. + GeneratedCodeInfo_Annotation_NONE GeneratedCodeInfo_Annotation_Semantic = 0 + // The element is set or otherwise mutated. + GeneratedCodeInfo_Annotation_SET GeneratedCodeInfo_Annotation_Semantic = 1 + // An alias to the element is returned. + GeneratedCodeInfo_Annotation_ALIAS GeneratedCodeInfo_Annotation_Semantic = 2 +) + +// Enum value maps for GeneratedCodeInfo_Annotation_Semantic. +var ( + GeneratedCodeInfo_Annotation_Semantic_name = map[int32]string{ + 0: "NONE", + 1: "SET", + 2: "ALIAS", + } + GeneratedCodeInfo_Annotation_Semantic_value = map[string]int32{ + "NONE": 0, + "SET": 1, + "ALIAS": 2, + } +) + +func (x GeneratedCodeInfo_Annotation_Semantic) Enum() *GeneratedCodeInfo_Annotation_Semantic { + p := new(GeneratedCodeInfo_Annotation_Semantic) + *p = x + return p +} + +func (x GeneratedCodeInfo_Annotation_Semantic) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() +} + +func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[16] +} + +func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *GeneratedCodeInfo_Annotation_Semantic) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = GeneratedCodeInfo_Annotation_Semantic(num) + return nil +} + +// Deprecated: Use GeneratedCodeInfo_Annotation_Semantic.Descriptor instead. +func (GeneratedCodeInfo_Annotation_Semantic) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22, 0, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` +} + +func (x *FileDescriptorSet) Reset() { + *x = FileDescriptorSet{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileDescriptorSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileDescriptorSet) ProtoMessage() {} + +func (x *FileDescriptorSet) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileDescriptorSet.ProtoReflect.Descriptor instead. +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0} +} + +func (x *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if x != nil { + return x.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // file name, relative to root of source tree + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` // e.g. "foo", "foo.bar", etc. + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2", "proto3", and "editions". + // + // If `edition` is present, this value must be "editions". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + // The edition of the proto file. + Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` +} + +func (x *FileDescriptorProto) Reset() { + *x = FileDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileDescriptorProto) ProtoMessage() {} + +func (x *FileDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileDescriptorProto.ProtoReflect.Descriptor instead. +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{1} +} + +func (x *FileDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *FileDescriptorProto) GetPackage() string { + if x != nil && x.Package != nil { + return *x.Package + } + return "" +} + +func (x *FileDescriptorProto) GetDependency() []string { + if x != nil { + return x.Dependency + } + return nil +} + +func (x *FileDescriptorProto) GetPublicDependency() []int32 { + if x != nil { + return x.PublicDependency + } + return nil +} + +func (x *FileDescriptorProto) GetWeakDependency() []int32 { + if x != nil { + return x.WeakDependency + } + return nil +} + +func (x *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if x != nil { + return x.MessageType + } + return nil +} + +func (x *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if x != nil { + return x.EnumType + } + return nil +} + +func (x *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if x != nil { + return x.Service + } + return nil +} + +func (x *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if x != nil { + return x.Extension + } + return nil +} + +func (x *FileDescriptorProto) GetOptions() *FileOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if x != nil { + return x.SourceCodeInfo + } + return nil +} + +func (x *FileDescriptorProto) GetSyntax() string { + if x != nil && x.Syntax != nil { + return *x.Syntax + } + return "" +} + +func (x *FileDescriptorProto) GetEdition() Edition { + if x != nil && x.Edition != nil { + return *x.Edition + } + return Edition_EDITION_UNKNOWN +} + +// Describes a message type. +type DescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` +} + +func (x *DescriptorProto) Reset() { + *x = DescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescriptorProto) ProtoMessage() {} + +func (x *DescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescriptorProto.ProtoReflect.Descriptor instead. +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{2} +} + +func (x *DescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *DescriptorProto) GetField() []*FieldDescriptorProto { + if x != nil { + return x.Field + } + return nil +} + +func (x *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if x != nil { + return x.Extension + } + return nil +} + +func (x *DescriptorProto) GetNestedType() []*DescriptorProto { + if x != nil { + return x.NestedType + } + return nil +} + +func (x *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if x != nil { + return x.EnumType + } + return nil +} + +func (x *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if x != nil { + return x.ExtensionRange + } + return nil +} + +func (x *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if x != nil { + return x.OneofDecl + } + return nil +} + +func (x *DescriptorProto) GetOptions() *MessageOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if x != nil { + return x.ReservedRange + } + return nil +} + +func (x *DescriptorProto) GetReservedName() []string { + if x != nil { + return x.ReservedName + } + return nil +} + +type ExtensionRangeOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + // For external users: DO NOT USE. We are in the process of open sourcing + // extension declaration and executing internal cleanups before it can be + // used externally. + Declaration []*ExtensionRangeOptions_Declaration `protobuf:"bytes,2,rep,name=declaration" json:"declaration,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"` + // The verification state of the range. + // TODO: flip the default to DECLARATION once all empty ranges + // are marked as UNVERIFIED. + Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"` +} + +// Default values for ExtensionRangeOptions fields. +const ( + Default_ExtensionRangeOptions_Verification = ExtensionRangeOptions_UNVERIFIED +) + +func (x *ExtensionRangeOptions) Reset() { + *x = ExtensionRangeOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionRangeOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionRangeOptions) ProtoMessage() {} + +func (x *ExtensionRangeOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionRangeOptions.ProtoReflect.Descriptor instead. +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{3} +} + +func (x *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +func (x *ExtensionRangeOptions) GetDeclaration() []*ExtensionRangeOptions_Declaration { + if x != nil { + return x.Declaration + } + return nil +} + +func (x *ExtensionRangeOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + +func (x *ExtensionRangeOptions) GetVerification() ExtensionRangeOptions_VerificationState { + if x != nil && x.Verification != nil { + return *x.Verification + } + return Default_ExtensionRangeOptions_Verification +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // If true, this is a proto3 "optional". When a proto3 field is optional, it + // tracks presence regardless of field type. + // + // When proto3_optional is true, this field must belong to a oneof to signal + // to old proto3 clients that presence is tracked for this field. This oneof + // is known as a "synthetic" oneof, and this field must be its sole member + // (each proto3 optional field gets its own synthetic oneof). Synthetic oneofs + // exist in the descriptor only, and do not generate any API. Synthetic oneofs + // must be ordered after all "real" oneofs. + // + // For message fields, proto3_optional doesn't create any semantic change, + // since non-repeated message fields always track presence. However it still + // indicates the semantic detail of whether the user wrote "optional" or not. + // This can be useful for round-tripping the .proto file. For consistency we + // give message fields a synthetic oneof also, even though it is not required + // to track presence. This is especially important because the parser can't + // tell if a field is a message or an enum, so it must always create a + // synthetic oneof. + // + // Proto2 optional fields do not set this flag, because they already indicate + // optional with `LABEL_OPTIONAL`. + Proto3Optional *bool `protobuf:"varint,17,opt,name=proto3_optional,json=proto3Optional" json:"proto3_optional,omitempty"` +} + +func (x *FieldDescriptorProto) Reset() { + *x = FieldDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldDescriptorProto) ProtoMessage() {} + +func (x *FieldDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldDescriptorProto.ProtoReflect.Descriptor instead. +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{4} +} + +func (x *FieldDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *FieldDescriptorProto) GetNumber() int32 { + if x != nil && x.Number != nil { + return *x.Number + } + return 0 +} + +func (x *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if x != nil && x.Label != nil { + return *x.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (x *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if x != nil && x.Type != nil { + return *x.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (x *FieldDescriptorProto) GetTypeName() string { + if x != nil && x.TypeName != nil { + return *x.TypeName + } + return "" +} + +func (x *FieldDescriptorProto) GetExtendee() string { + if x != nil && x.Extendee != nil { + return *x.Extendee + } + return "" +} + +func (x *FieldDescriptorProto) GetDefaultValue() string { + if x != nil && x.DefaultValue != nil { + return *x.DefaultValue + } + return "" +} + +func (x *FieldDescriptorProto) GetOneofIndex() int32 { + if x != nil && x.OneofIndex != nil { + return *x.OneofIndex + } + return 0 +} + +func (x *FieldDescriptorProto) GetJsonName() string { + if x != nil && x.JsonName != nil { + return *x.JsonName + } + return "" +} + +func (x *FieldDescriptorProto) GetOptions() *FieldOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *FieldDescriptorProto) GetProto3Optional() bool { + if x != nil && x.Proto3Optional != nil { + return *x.Proto3Optional + } + return false +} + +// Describes a oneof. +type OneofDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` +} + +func (x *OneofDescriptorProto) Reset() { + *x = OneofDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OneofDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OneofDescriptorProto) ProtoMessage() {} + +func (x *OneofDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OneofDescriptorProto.ProtoReflect.Descriptor instead. +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{5} +} + +func (x *OneofDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *OneofDescriptorProto) GetOptions() *OneofOptions { + if x != nil { + return x.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` +} + +func (x *EnumDescriptorProto) Reset() { + *x = EnumDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumDescriptorProto) ProtoMessage() {} + +func (x *EnumDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumDescriptorProto.ProtoReflect.Descriptor instead. +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{6} +} + +func (x *EnumDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if x != nil { + return x.Value + } + return nil +} + +func (x *EnumDescriptorProto) GetOptions() *EnumOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if x != nil { + return x.ReservedRange + } + return nil +} + +func (x *EnumDescriptorProto) GetReservedName() []string { + if x != nil { + return x.ReservedName + } + return nil +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` +} + +func (x *EnumValueDescriptorProto) Reset() { + *x = EnumValueDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumValueDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumValueDescriptorProto) ProtoMessage() {} + +func (x *EnumValueDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumValueDescriptorProto.ProtoReflect.Descriptor instead. +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{7} +} + +func (x *EnumValueDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *EnumValueDescriptorProto) GetNumber() int32 { + if x != nil && x.Number != nil { + return *x.Number + } + return 0 +} + +func (x *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if x != nil { + return x.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` +} + +func (x *ServiceDescriptorProto) Reset() { + *x = ServiceDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceDescriptorProto) ProtoMessage() {} + +func (x *ServiceDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceDescriptorProto.ProtoReflect.Descriptor instead. +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{8} +} + +func (x *ServiceDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if x != nil { + return x.Method + } + return nil +} + +func (x *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if x != nil { + return x.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` +} + +// Default values for MethodDescriptorProto fields. +const ( + Default_MethodDescriptorProto_ClientStreaming = bool(false) + Default_MethodDescriptorProto_ServerStreaming = bool(false) +) + +func (x *MethodDescriptorProto) Reset() { + *x = MethodDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodDescriptorProto) ProtoMessage() {} + +func (x *MethodDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodDescriptorProto.ProtoReflect.Descriptor instead. +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{9} +} + +func (x *MethodDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *MethodDescriptorProto) GetInputType() string { + if x != nil && x.InputType != nil { + return *x.InputType + } + return "" +} + +func (x *MethodDescriptorProto) GetOutputType() string { + if x != nil && x.OutputType != nil { + return *x.OutputType + } + return "" +} + +func (x *MethodDescriptorProto) GetOptions() *MethodOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *MethodDescriptorProto) GetClientStreaming() bool { + if x != nil && x.ClientStreaming != nil { + return *x.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (x *MethodDescriptorProto) GetServerStreaming() bool { + if x != nil && x.ServerStreaming != nil { + return *x.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // Controls the name of the wrapper Java class generated for the .proto file. + // That class will always contain the .proto file's getDescriptor() method as + // well as any top-level extensions defined in the .proto file. + // If java_multiple_files is disabled, then all the other classes from the + // .proto file will be nested inside the single wrapper outer class. + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If enabled, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the wrapper class + // named by java_outer_classname. However, the wrapper class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + // + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` + // A proto2 file can set this to true to opt in to UTF-8 checking for Java, + // which will throw an exception if invalid UTF-8 is parsed from the wire or + // assigned to a string field. + // + // TODO: clarify exactly what kinds of field types this option + // applies to, and update these docs accordingly. + // + // Proto3 files already perform these checks. Setting the option explicitly to + // false has no effect: it cannot be used to opt proto3 files out of UTF-8 + // checks. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=1" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be + // used for determining the namespace. + PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"` + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for FileOptions fields. +const ( + Default_FileOptions_JavaMultipleFiles = bool(false) + Default_FileOptions_JavaStringCheckUtf8 = bool(false) + Default_FileOptions_OptimizeFor = FileOptions_SPEED + Default_FileOptions_CcGenericServices = bool(false) + Default_FileOptions_JavaGenericServices = bool(false) + Default_FileOptions_PyGenericServices = bool(false) + Default_FileOptions_Deprecated = bool(false) + Default_FileOptions_CcEnableArenas = bool(true) +) + +func (x *FileOptions) Reset() { + *x = FileOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileOptions) ProtoMessage() {} + +func (x *FileOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileOptions.ProtoReflect.Descriptor instead. +func (*FileOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{10} +} + +func (x *FileOptions) GetJavaPackage() string { + if x != nil && x.JavaPackage != nil { + return *x.JavaPackage + } + return "" +} + +func (x *FileOptions) GetJavaOuterClassname() string { + if x != nil && x.JavaOuterClassname != nil { + return *x.JavaOuterClassname + } + return "" +} + +func (x *FileOptions) GetJavaMultipleFiles() bool { + if x != nil && x.JavaMultipleFiles != nil { + return *x.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. +func (x *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if x != nil && x.JavaGenerateEqualsAndHash != nil { + return *x.JavaGenerateEqualsAndHash + } + return false +} + +func (x *FileOptions) GetJavaStringCheckUtf8() bool { + if x != nil && x.JavaStringCheckUtf8 != nil { + return *x.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (x *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if x != nil && x.OptimizeFor != nil { + return *x.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (x *FileOptions) GetGoPackage() string { + if x != nil && x.GoPackage != nil { + return *x.GoPackage + } + return "" +} + +func (x *FileOptions) GetCcGenericServices() bool { + if x != nil && x.CcGenericServices != nil { + return *x.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (x *FileOptions) GetJavaGenericServices() bool { + if x != nil && x.JavaGenericServices != nil { + return *x.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (x *FileOptions) GetPyGenericServices() bool { + if x != nil && x.PyGenericServices != nil { + return *x.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (x *FileOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (x *FileOptions) GetCcEnableArenas() bool { + if x != nil && x.CcEnableArenas != nil { + return *x.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (x *FileOptions) GetObjcClassPrefix() string { + if x != nil && x.ObjcClassPrefix != nil { + return *x.ObjcClassPrefix + } + return "" +} + +func (x *FileOptions) GetCsharpNamespace() string { + if x != nil && x.CsharpNamespace != nil { + return *x.CsharpNamespace + } + return "" +} + +func (x *FileOptions) GetSwiftPrefix() string { + if x != nil && x.SwiftPrefix != nil { + return *x.SwiftPrefix + } + return "" +} + +func (x *FileOptions) GetPhpClassPrefix() string { + if x != nil && x.PhpClassPrefix != nil { + return *x.PhpClassPrefix + } + return "" +} + +func (x *FileOptions) GetPhpNamespace() string { + if x != nil && x.PhpNamespace != nil { + return *x.PhpNamespace + } + return "" +} + +func (x *FileOptions) GetPhpMetadataNamespace() string { + if x != nil && x.PhpMetadataNamespace != nil { + return *x.PhpMetadataNamespace + } + return "" +} + +func (x *FileOptions) GetRubyPackage() string { + if x != nil && x.RubyPackage != nil { + return *x.RubyPackage + } + return "" +} + +func (x *FileOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + +func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // + // map map_field = 1; + // + // The parsed descriptor looks like: + // + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementations still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // Enable the legacy handling of JSON field name conflicts. This lowercases + // and strips underscored from the fields before comparison in proto3 only. + // The new behavior takes `json_name` into account and applies to proto2 as + // well. + // + // This should only be used as a temporary measure against broken builds due + // to the change in behavior for JSON field name conflicts. + // + // TODO This is legacy behavior we plan to remove once downstream + // teams have had time to migrate. + // + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. + DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for MessageOptions fields. +const ( + Default_MessageOptions_MessageSetWireFormat = bool(false) + Default_MessageOptions_NoStandardDescriptorAccessor = bool(false) + Default_MessageOptions_Deprecated = bool(false) +) + +func (x *MessageOptions) Reset() { + *x = MessageOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MessageOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageOptions) ProtoMessage() {} + +func (x *MessageOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MessageOptions.ProtoReflect.Descriptor instead. +func (*MessageOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{11} +} + +func (x *MessageOptions) GetMessageSetWireFormat() bool { + if x != nil && x.MessageSetWireFormat != nil { + return *x.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (x *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if x != nil && x.NoStandardDescriptorAccessor != nil { + return *x.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (x *MessageOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (x *MessageOptions) GetMapEntry() bool { + if x != nil && x.MapEntry != nil { + return *x.MapEntry + } + return false +} + +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. +func (x *MessageOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { + if x != nil && x.DeprecatedLegacyJsonFieldConflicts != nil { + return *x.DeprecatedLegacyJsonFieldConflicts + } + return false +} + +func (x *MessageOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + +func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is only implemented to support use of + // [ctype=CORD] and [ctype=STRING] (the default) on non-repeated fields of + // type "bytes" in the open source release -- sorry, we'll try to include + // other types in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. This option is prohibited in + // Editions, but the `repeated_field_encoding` feature can be used to control + // the behavior. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // Note that lazy message fields are still eagerly verified to check + // ill-formed wireformat or missing required fields. Calling IsInitialized() + // on the outer message would fail if the inner message has missing required + // fields. Failed verification would result in parsing failure (except when + // uninitialized messages are acceptable). + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // unverified_lazy does no correctness checks on the byte stream. This should + // only be used where lazy with verification is prohibitive for performance + // reasons. + UnverifiedLazy *bool `protobuf:"varint,15,opt,name=unverified_lazy,json=unverifiedLazy,def=0" json:"unverified_lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // Indicate that the field value should not be printed out when using debug + // formats, e.g. when the field contains sensitive credentials. + DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` + Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` + EditionDefaults []*FieldOptions_EditionDefault `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"` + FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,22,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for FieldOptions fields. +const ( + Default_FieldOptions_Ctype = FieldOptions_STRING + Default_FieldOptions_Jstype = FieldOptions_JS_NORMAL + Default_FieldOptions_Lazy = bool(false) + Default_FieldOptions_UnverifiedLazy = bool(false) + Default_FieldOptions_Deprecated = bool(false) + Default_FieldOptions_Weak = bool(false) + Default_FieldOptions_DebugRedact = bool(false) +) + +func (x *FieldOptions) Reset() { + *x = FieldOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldOptions) ProtoMessage() {} + +func (x *FieldOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldOptions.ProtoReflect.Descriptor instead. +func (*FieldOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12} +} + +func (x *FieldOptions) GetCtype() FieldOptions_CType { + if x != nil && x.Ctype != nil { + return *x.Ctype + } + return Default_FieldOptions_Ctype +} + +func (x *FieldOptions) GetPacked() bool { + if x != nil && x.Packed != nil { + return *x.Packed + } + return false +} + +func (x *FieldOptions) GetJstype() FieldOptions_JSType { + if x != nil && x.Jstype != nil { + return *x.Jstype + } + return Default_FieldOptions_Jstype +} + +func (x *FieldOptions) GetLazy() bool { + if x != nil && x.Lazy != nil { + return *x.Lazy + } + return Default_FieldOptions_Lazy +} + +func (x *FieldOptions) GetUnverifiedLazy() bool { + if x != nil && x.UnverifiedLazy != nil { + return *x.UnverifiedLazy + } + return Default_FieldOptions_UnverifiedLazy +} + +func (x *FieldOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (x *FieldOptions) GetWeak() bool { + if x != nil && x.Weak != nil { + return *x.Weak + } + return Default_FieldOptions_Weak +} + +func (x *FieldOptions) GetDebugRedact() bool { + if x != nil && x.DebugRedact != nil { + return *x.DebugRedact + } + return Default_FieldOptions_DebugRedact +} + +func (x *FieldOptions) GetRetention() FieldOptions_OptionRetention { + if x != nil && x.Retention != nil { + return *x.Retention + } + return FieldOptions_RETENTION_UNKNOWN +} + +func (x *FieldOptions) GetTargets() []FieldOptions_OptionTargetType { + if x != nil { + return x.Targets + } + return nil +} + +func (x *FieldOptions) GetEditionDefaults() []*FieldOptions_EditionDefault { + if x != nil { + return x.EditionDefaults + } + return nil +} + +func (x *FieldOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + +func (x *FieldOptions) GetFeatureSupport() *FieldOptions_FeatureSupport { + if x != nil { + return x.FeatureSupport + } + return nil +} + +func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +func (x *OneofOptions) Reset() { + *x = OneofOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OneofOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OneofOptions) ProtoMessage() {} + +func (x *OneofOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OneofOptions.ProtoReflect.Descriptor instead. +func (*OneofOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{13} +} + +func (x *OneofOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + +func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enable the legacy handling of JSON field name conflicts. This lowercases + // and strips underscored from the fields before comparison in proto3 only. + // The new behavior takes `json_name` into account and applies to proto2 as + // well. + // TODO Remove this legacy behavior once downstream teams have + // had time to migrate. + // + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. + DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for EnumOptions fields. +const ( + Default_EnumOptions_Deprecated = bool(false) +) + +func (x *EnumOptions) Reset() { + *x = EnumOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumOptions) ProtoMessage() {} + +func (x *EnumOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumOptions.ProtoReflect.Descriptor instead. +func (*EnumOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{14} +} + +func (x *EnumOptions) GetAllowAlias() bool { + if x != nil && x.AllowAlias != nil { + return *x.AllowAlias + } + return false +} + +func (x *EnumOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_EnumOptions_Deprecated +} + +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. +func (x *EnumOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { + if x != nil && x.DeprecatedLegacyJsonFieldConflicts != nil { + return *x.DeprecatedLegacyJsonFieldConflicts + } + return false +} + +func (x *EnumOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + +func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` + // Indicate that fields annotated with this enum value should not be printed + // out when using debug formats, e.g. when the field contains sensitive + // credentials. + DebugRedact *bool `protobuf:"varint,3,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + // Information about the support window of a feature value. + FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,4,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for EnumValueOptions fields. +const ( + Default_EnumValueOptions_Deprecated = bool(false) + Default_EnumValueOptions_DebugRedact = bool(false) +) + +func (x *EnumValueOptions) Reset() { + *x = EnumValueOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumValueOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumValueOptions) ProtoMessage() {} + +func (x *EnumValueOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumValueOptions.ProtoReflect.Descriptor instead. +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{15} +} + +func (x *EnumValueOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (x *EnumValueOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + +func (x *EnumValueOptions) GetDebugRedact() bool { + if x != nil && x.DebugRedact != nil { + return *x.DebugRedact + } + return Default_EnumValueOptions_DebugRedact +} + +func (x *EnumValueOptions) GetFeatureSupport() *FieldOptions_FeatureSupport { + if x != nil { + return x.FeatureSupport + } + return nil +} + +func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"` + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for ServiceOptions fields. +const ( + Default_ServiceOptions_Deprecated = bool(false) +) + +func (x *ServiceOptions) Reset() { + *x = ServiceOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceOptions) ProtoMessage() {} + +func (x *ServiceOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceOptions.ProtoReflect.Descriptor instead. +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{16} +} + +func (x *ServiceOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + +func (x *ServiceOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (x *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for MethodOptions fields. +const ( + Default_MethodOptions_Deprecated = bool(false) + Default_MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN +) + +func (x *MethodOptions) Reset() { + *x = MethodOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodOptions) ProtoMessage() {} + +func (x *MethodOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodOptions.ProtoReflect.Descriptor instead. +func (*MethodOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17} +} + +func (x *MethodOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (x *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if x != nil && x.IdempotencyLevel != nil { + return *x.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (x *MethodOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + +func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` +} + +func (x *UninterpretedOption) Reset() { + *x = UninterpretedOption{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UninterpretedOption) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UninterpretedOption) ProtoMessage() {} + +func (x *UninterpretedOption) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UninterpretedOption.ProtoReflect.Descriptor instead. +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{18} +} + +func (x *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if x != nil { + return x.Name + } + return nil +} + +func (x *UninterpretedOption) GetIdentifierValue() string { + if x != nil && x.IdentifierValue != nil { + return *x.IdentifierValue + } + return "" +} + +func (x *UninterpretedOption) GetPositiveIntValue() uint64 { + if x != nil && x.PositiveIntValue != nil { + return *x.PositiveIntValue + } + return 0 +} + +func (x *UninterpretedOption) GetNegativeIntValue() int64 { + if x != nil && x.NegativeIntValue != nil { + return *x.NegativeIntValue + } + return 0 +} + +func (x *UninterpretedOption) GetDoubleValue() float64 { + if x != nil && x.DoubleValue != nil { + return *x.DoubleValue + } + return 0 +} + +func (x *UninterpretedOption) GetStringValue() []byte { + if x != nil { + return x.StringValue + } + return nil +} + +func (x *UninterpretedOption) GetAggregateValue() string { + if x != nil && x.AggregateValue != nil { + return *x.AggregateValue + } + return "" +} + +// TODO Enums in C++ gencode (and potentially other languages) are +// not well scoped. This means that each of the feature enums below can clash +// with each other. The short names we've chosen maximize call-site +// readability, but leave us very open to this scenario. A future feature will +// be designed and implemented to handle this, hopefully before we ever hit a +// conflict here. +type FeatureSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"` + EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"` + RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"` + Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` + MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` + JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` +} + +func (x *FeatureSet) Reset() { + *x = FeatureSet{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FeatureSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSet) ProtoMessage() {} + +func (x *FeatureSet) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureSet.ProtoReflect.Descriptor instead. +func (*FeatureSet) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19} +} + +func (x *FeatureSet) GetFieldPresence() FeatureSet_FieldPresence { + if x != nil && x.FieldPresence != nil { + return *x.FieldPresence + } + return FeatureSet_FIELD_PRESENCE_UNKNOWN +} + +func (x *FeatureSet) GetEnumType() FeatureSet_EnumType { + if x != nil && x.EnumType != nil { + return *x.EnumType + } + return FeatureSet_ENUM_TYPE_UNKNOWN +} + +func (x *FeatureSet) GetRepeatedFieldEncoding() FeatureSet_RepeatedFieldEncoding { + if x != nil && x.RepeatedFieldEncoding != nil { + return *x.RepeatedFieldEncoding + } + return FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN +} + +func (x *FeatureSet) GetUtf8Validation() FeatureSet_Utf8Validation { + if x != nil && x.Utf8Validation != nil { + return *x.Utf8Validation + } + return FeatureSet_UTF8_VALIDATION_UNKNOWN +} + +func (x *FeatureSet) GetMessageEncoding() FeatureSet_MessageEncoding { + if x != nil && x.MessageEncoding != nil { + return *x.MessageEncoding + } + return FeatureSet_MESSAGE_ENCODING_UNKNOWN +} + +func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat { + if x != nil && x.JsonFormat != nil { + return *x.JsonFormat + } + return FeatureSet_JSON_FORMAT_UNKNOWN +} + +// A compiled specification for the defaults of a set of features. These +// messages are generated from FeatureSet extensions and can be used to seed +// feature resolution. The resolution with this object becomes a simple search +// for the closest matching edition, followed by proto merges. +type FeatureSetDefaults struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Defaults []*FeatureSetDefaults_FeatureSetEditionDefault `protobuf:"bytes,1,rep,name=defaults" json:"defaults,omitempty"` + // The minimum supported edition (inclusive) when this was constructed. + // Editions before this will not have defaults. + MinimumEdition *Edition `protobuf:"varint,4,opt,name=minimum_edition,json=minimumEdition,enum=google.protobuf.Edition" json:"minimum_edition,omitempty"` + // The maximum known edition (inclusive) when this was constructed. Editions + // after this will not have reliable defaults. + MaximumEdition *Edition `protobuf:"varint,5,opt,name=maximum_edition,json=maximumEdition,enum=google.protobuf.Edition" json:"maximum_edition,omitempty"` +} + +func (x *FeatureSetDefaults) Reset() { + *x = FeatureSetDefaults{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FeatureSetDefaults) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSetDefaults) ProtoMessage() {} + +func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureSetDefaults.ProtoReflect.Descriptor instead. +func (*FeatureSetDefaults) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20} +} + +func (x *FeatureSetDefaults) GetDefaults() []*FeatureSetDefaults_FeatureSetEditionDefault { + if x != nil { + return x.Defaults + } + return nil +} + +func (x *FeatureSetDefaults) GetMinimumEdition() Edition { + if x != nil && x.MinimumEdition != nil { + return *x.MinimumEdition + } + return Edition_EDITION_UNKNOWN +} + +func (x *FeatureSetDefaults) GetMaximumEdition() Edition { + if x != nil && x.MaximumEdition != nil { + return *x.MaximumEdition + } + return Edition_EDITION_UNKNOWN +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // + // message Foo { + // optional string foo = 1; + // } + // + // Let's look at just the field definition: + // + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // + // We have the following locations: + // + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` +} + +func (x *SourceCodeInfo) Reset() { + *x = SourceCodeInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceCodeInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceCodeInfo) ProtoMessage() {} + +func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceCodeInfo.ProtoReflect.Descriptor instead. +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{21} +} + +func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if x != nil { + return x.Location + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` +} + +func (x *GeneratedCodeInfo) Reset() { + *x = GeneratedCodeInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GeneratedCodeInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GeneratedCodeInfo) ProtoMessage() {} + +func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GeneratedCodeInfo.ProtoReflect.Descriptor instead. +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22} +} + +func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if x != nil { + return x.Annotation + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` +} + +func (x *DescriptorProto_ExtensionRange) Reset() { + *x = DescriptorProto_ExtensionRange{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DescriptorProto_ExtensionRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} + +func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescriptorProto_ExtensionRange.ProtoReflect.Descriptor instead. +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *DescriptorProto_ExtensionRange) GetStart() int32 { + if x != nil && x.Start != nil { + return *x.Start + } + return 0 +} + +func (x *DescriptorProto_ExtensionRange) GetEnd() int32 { + if x != nil && x.End != nil { + return *x.End + } + return 0 +} + +func (x *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if x != nil { + return x.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. +} + +func (x *DescriptorProto_ReservedRange) Reset() { + *x = DescriptorProto_ReservedRange{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DescriptorProto_ReservedRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescriptorProto_ReservedRange) ProtoMessage() {} + +func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescriptorProto_ReservedRange.ProtoReflect.Descriptor instead. +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{2, 1} +} + +func (x *DescriptorProto_ReservedRange) GetStart() int32 { + if x != nil && x.Start != nil { + return *x.Start + } + return 0 +} + +func (x *DescriptorProto_ReservedRange) GetEnd() int32 { + if x != nil && x.End != nil { + return *x.End + } + return 0 +} + +type ExtensionRangeOptions_Declaration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The extension number declared within the extension range. + Number *int32 `protobuf:"varint,1,opt,name=number" json:"number,omitempty"` + // The fully-qualified name of the extension field. There must be a leading + // dot in front of the full name. + FullName *string `protobuf:"bytes,2,opt,name=full_name,json=fullName" json:"full_name,omitempty"` + // The fully-qualified type name of the extension field. Unlike + // Metadata.type, Declaration.type must have a leading dot for messages + // and enums. + Type *string `protobuf:"bytes,3,opt,name=type" json:"type,omitempty"` + // If true, indicates that the number is reserved in the extension range, + // and any extension field with the number will fail to compile. Set this + // when a declared extension field is deleted. + Reserved *bool `protobuf:"varint,5,opt,name=reserved" json:"reserved,omitempty"` + // If true, indicates that the extension must be defined as repeated. + // Otherwise the extension must be defined as optional. + Repeated *bool `protobuf:"varint,6,opt,name=repeated" json:"repeated,omitempty"` +} + +func (x *ExtensionRangeOptions_Declaration) Reset() { + *x = ExtensionRangeOptions_Declaration{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionRangeOptions_Declaration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionRangeOptions_Declaration) ProtoMessage() {} + +func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionRangeOptions_Declaration.ProtoReflect.Descriptor instead. +func (*ExtensionRangeOptions_Declaration) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *ExtensionRangeOptions_Declaration) GetNumber() int32 { + if x != nil && x.Number != nil { + return *x.Number + } + return 0 +} + +func (x *ExtensionRangeOptions_Declaration) GetFullName() string { + if x != nil && x.FullName != nil { + return *x.FullName + } + return "" +} + +func (x *ExtensionRangeOptions_Declaration) GetType() string { + if x != nil && x.Type != nil { + return *x.Type + } + return "" +} + +func (x *ExtensionRangeOptions_Declaration) GetReserved() bool { + if x != nil && x.Reserved != nil { + return *x.Reserved + } + return false +} + +func (x *ExtensionRangeOptions_Declaration) GetRepeated() bool { + if x != nil && x.Repeated != nil { + return *x.Repeated + } + return false +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Inclusive. +} + +func (x *EnumDescriptorProto_EnumReservedRange) Reset() { + *x = EnumDescriptorProto_EnumReservedRange{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumDescriptorProto_EnumReservedRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} + +func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumDescriptorProto_EnumReservedRange.ProtoReflect.Descriptor instead. +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if x != nil && x.Start != nil { + return *x.Start + } + return 0 +} + +func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if x != nil && x.End != nil { + return *x.End + } + return 0 +} + +type FieldOptions_EditionDefault struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value. +} + +func (x *FieldOptions_EditionDefault) Reset() { + *x = FieldOptions_EditionDefault{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldOptions_EditionDefault) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldOptions_EditionDefault) ProtoMessage() {} + +func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldOptions_EditionDefault.ProtoReflect.Descriptor instead. +func (*FieldOptions_EditionDefault) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 0} +} + +func (x *FieldOptions_EditionDefault) GetEdition() Edition { + if x != nil && x.Edition != nil { + return *x.Edition + } + return Edition_EDITION_UNKNOWN +} + +func (x *FieldOptions_EditionDefault) GetValue() string { + if x != nil && x.Value != nil { + return *x.Value + } + return "" +} + +// Information about the support window of a feature. +type FieldOptions_FeatureSupport struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The edition that this feature was first available in. In editions + // earlier than this one, the default assigned to EDITION_LEGACY will be + // used, and proto files will not be able to override it. + EditionIntroduced *Edition `protobuf:"varint,1,opt,name=edition_introduced,json=editionIntroduced,enum=google.protobuf.Edition" json:"edition_introduced,omitempty"` + // The edition this feature becomes deprecated in. Using this after this + // edition may trigger warnings. + EditionDeprecated *Edition `protobuf:"varint,2,opt,name=edition_deprecated,json=editionDeprecated,enum=google.protobuf.Edition" json:"edition_deprecated,omitempty"` + // The deprecation warning text if this feature is used after the edition it + // was marked deprecated in. + DeprecationWarning *string `protobuf:"bytes,3,opt,name=deprecation_warning,json=deprecationWarning" json:"deprecation_warning,omitempty"` + // The edition this feature is no longer available in. In editions after + // this one, the last default assigned will be used, and proto files will + // not be able to override it. + EditionRemoved *Edition `protobuf:"varint,4,opt,name=edition_removed,json=editionRemoved,enum=google.protobuf.Edition" json:"edition_removed,omitempty"` +} + +func (x *FieldOptions_FeatureSupport) Reset() { + *x = FieldOptions_FeatureSupport{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldOptions_FeatureSupport) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldOptions_FeatureSupport) ProtoMessage() {} + +func (x *FieldOptions_FeatureSupport) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldOptions_FeatureSupport.ProtoReflect.Descriptor instead. +func (*FieldOptions_FeatureSupport) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1} +} + +func (x *FieldOptions_FeatureSupport) GetEditionIntroduced() Edition { + if x != nil && x.EditionIntroduced != nil { + return *x.EditionIntroduced + } + return Edition_EDITION_UNKNOWN +} + +func (x *FieldOptions_FeatureSupport) GetEditionDeprecated() Edition { + if x != nil && x.EditionDeprecated != nil { + return *x.EditionDeprecated + } + return Edition_EDITION_UNKNOWN +} + +func (x *FieldOptions_FeatureSupport) GetDeprecationWarning() string { + if x != nil && x.DeprecationWarning != nil { + return *x.DeprecationWarning + } + return "" +} + +func (x *FieldOptions_FeatureSupport) GetEditionRemoved() Edition { + if x != nil && x.EditionRemoved != nil { + return *x.EditionRemoved + } + return Edition_EDITION_UNKNOWN +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["moo", false] } represents +// "foo.(bar.baz).moo". +type UninterpretedOption_NamePart struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` +} + +func (x *UninterpretedOption_NamePart) Reset() { + *x = UninterpretedOption_NamePart{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UninterpretedOption_NamePart) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UninterpretedOption_NamePart) ProtoMessage() {} + +func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UninterpretedOption_NamePart.ProtoReflect.Descriptor instead. +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{18, 0} +} + +func (x *UninterpretedOption_NamePart) GetNamePart() string { + if x != nil && x.NamePart != nil { + return *x.NamePart + } + return "" +} + +func (x *UninterpretedOption_NamePart) GetIsExtension() bool { + if x != nil && x.IsExtension != nil { + return *x.IsExtension + } + return false +} + +// A map from every known edition with a unique set of defaults to its +// defaults. Not all editions may be contained here. For a given edition, +// the defaults at the closest matching edition ordered at or before it should +// be used. This field must be in strict ascending order by edition. +type FeatureSetDefaults_FeatureSetEditionDefault struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + // Defaults of features that can be overridden in this edition. + OverridableFeatures *FeatureSet `protobuf:"bytes,4,opt,name=overridable_features,json=overridableFeatures" json:"overridable_features,omitempty"` + // Defaults of features that can't be overridden in this edition. + FixedFeatures *FeatureSet `protobuf:"bytes,5,opt,name=fixed_features,json=fixedFeatures" json:"fixed_features,omitempty"` +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { + *x = FeatureSetDefaults_FeatureSetEditionDefault{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureSetDefaults_FeatureSetEditionDefault.ProtoReflect.Descriptor instead. +func (*FeatureSetDefaults_FeatureSetEditionDefault) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0} +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetEdition() Edition { + if x != nil && x.Edition != nil { + return *x.Edition + } + return Edition_EDITION_UNKNOWN +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetOverridableFeatures() *FeatureSet { + if x != nil { + return x.OverridableFeatures + } + return nil +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFixedFeatures() *FeatureSet { + if x != nil { + return x.FixedFeatures + } + return nil +} + +type SourceCodeInfo_Location struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition appears. + // For example, this path: + // + // [ 4, 3, 2, 7, 1 ] + // + // refers to: + // + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // + // This is because FileDescriptorProto.message_type has field number 4: + // + // repeated DescriptorProto message_type = 4; + // + // and DescriptorProto.field has field number 2: + // + // repeated FieldDescriptorProto field = 2; + // + // and FieldDescriptorProto.name has field number 1: + // + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // + // [ 4, 3, 2, 7 ] + // + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to moo. + // // + // // Another line attached to moo. + // optional double moo = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to moo or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` +} + +func (x *SourceCodeInfo_Location) Reset() { + *x = SourceCodeInfo_Location{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceCodeInfo_Location) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceCodeInfo_Location) ProtoMessage() {} + +func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceCodeInfo_Location.ProtoReflect.Descriptor instead. +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{21, 0} +} + +func (x *SourceCodeInfo_Location) GetPath() []int32 { + if x != nil { + return x.Path + } + return nil +} + +func (x *SourceCodeInfo_Location) GetSpan() []int32 { + if x != nil { + return x.Span + } + return nil +} + +func (x *SourceCodeInfo_Location) GetLeadingComments() string { + if x != nil && x.LeadingComments != nil { + return *x.LeadingComments + } + return "" +} + +func (x *SourceCodeInfo_Location) GetTrailingComments() string { + if x != nil && x.TrailingComments != nil { + return *x.TrailingComments + } + return "" +} + +func (x *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if x != nil { + return x.LeadingDetachedComments + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified object. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + Semantic *GeneratedCodeInfo_Annotation_Semantic `protobuf:"varint,5,opt,name=semantic,enum=google.protobuf.GeneratedCodeInfo_Annotation_Semantic" json:"semantic,omitempty"` +} + +func (x *GeneratedCodeInfo_Annotation) Reset() { + *x = GeneratedCodeInfo_Annotation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GeneratedCodeInfo_Annotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} + +func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GeneratedCodeInfo_Annotation.ProtoReflect.Descriptor instead. +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22, 0} +} + +func (x *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if x != nil { + return x.Path + } + return nil +} + +func (x *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if x != nil && x.SourceFile != nil { + return *x.SourceFile + } + return "" +} + +func (x *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if x != nil && x.Begin != nil { + return *x.Begin + } + return 0 +} + +func (x *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if x != nil && x.End != nil { + return *x.End + } + return 0 +} + +func (x *GeneratedCodeInfo_Annotation) GetSemantic() GeneratedCodeInfo_Annotation_Semantic { + if x != nil && x.Semantic != nil { + return *x.Semantic + } + return GeneratedCodeInfo_Annotation_NONE +} + +var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor + +var file_google_protobuf_descriptor_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x22, 0x4d, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, + 0x6c, 0x65, 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, + 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, + 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, + 0x03, 0x28, 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, + 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, + 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, + 0x77, 0x65, 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, + 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, + 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, + 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, + 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, + 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, + 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, + 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, + 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, + 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, + 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, + 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x12, 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, + 0x01, 0x02, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x1a, 0x94, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, + 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, + 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, + 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, + 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, + 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, + 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, + 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, + 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, + 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, + 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, + 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, + 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, + 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, + 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, + 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, + 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, + 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, + 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, + 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, + 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, + 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, + 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, + 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, + 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, + 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, + 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, + 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x69, 0x6e, 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, + 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, + 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, + 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, + 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, + 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, + 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, + 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, + 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, + 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, + 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, + 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, + 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, + 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, + 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, + 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, + 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, + 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, + 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, + 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, + 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, + 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, + 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, + 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, + 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, + 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, + 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, + 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, + 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, + 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, + 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, + 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, + 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, + 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, + 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, + 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, + 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, + 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, + 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, + 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, + 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, + 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, + 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, + 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, + 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, + 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, + 0x55, 0x0a, 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, + 0x72, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, + 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, + 0x0e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, + 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, + 0x64, 0x75, 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, + 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, + 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, + 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, + 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, + 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, + 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, + 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, + 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, + 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, + 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, + 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, + 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, + 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, + 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, + 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, + 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, + 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, + 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, + 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, + 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, + 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, + 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, + 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, + 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, + 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, + 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, + 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, + 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, + 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, + 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, + 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, + 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, + 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, + 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, + 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, + 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, + 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, + 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, + 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, + 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, + 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, + 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, + 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, + 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, + 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, + 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, + 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, + 0x6d, 0x54, 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, + 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, + 0x09, 0x12, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, + 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, + 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, + 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, + 0x2d, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, + 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, + 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, + 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, + 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0xe6, + 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, + 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, + 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, + 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xb2, 0x01, + 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, + 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, + 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, + 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, + 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, + 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, + 0x43, 0x49, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, + 0x54, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, + 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, + 0x50, 0x45, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, + 0x02, 0x22, 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, + 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, + 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x0a, 0x0a, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, + 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, + 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, + 0x54, 0x46, 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, + 0x46, 0x59, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, + 0x08, 0x01, 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, + 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, + 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, + 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, + 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, + 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, + 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, + 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, + 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, + 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, + 0x10, 0xe8, 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, + 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, + 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, + 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, + 0x62, 0x6c, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, + 0x69, 0x78, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, + 0x52, 0x0d, 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, + 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, + 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, + 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, + 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, + 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, + 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, + 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, + 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, + 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, + 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, + 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, + 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, + 0x10, 0x02, 0x2a, 0xa7, 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, + 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, + 0x45, 0x47, 0x41, 0x43, 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, + 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, + 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, + 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, + 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, + 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, + 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, + 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, + 0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, + 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, + 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, +} + +var ( + file_google_protobuf_descriptor_proto_rawDescOnce sync.Once + file_google_protobuf_descriptor_proto_rawDescData = file_google_protobuf_descriptor_proto_rawDesc +) + +func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { + file_google_protobuf_descriptor_proto_rawDescOnce.Do(func() { + file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_descriptor_proto_rawDescData) + }) + return file_google_protobuf_descriptor_proto_rawDescData +} + +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17) +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33) +var file_google_protobuf_descriptor_proto_goTypes = []any{ + (Edition)(0), // 0: google.protobuf.Edition + (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState + (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type + (FieldDescriptorProto_Label)(0), // 3: google.protobuf.FieldDescriptorProto.Label + (FileOptions_OptimizeMode)(0), // 4: google.protobuf.FileOptions.OptimizeMode + (FieldOptions_CType)(0), // 5: google.protobuf.FieldOptions.CType + (FieldOptions_JSType)(0), // 6: google.protobuf.FieldOptions.JSType + (FieldOptions_OptionRetention)(0), // 7: google.protobuf.FieldOptions.OptionRetention + (FieldOptions_OptionTargetType)(0), // 8: google.protobuf.FieldOptions.OptionTargetType + (MethodOptions_IdempotencyLevel)(0), // 9: google.protobuf.MethodOptions.IdempotencyLevel + (FeatureSet_FieldPresence)(0), // 10: google.protobuf.FeatureSet.FieldPresence + (FeatureSet_EnumType)(0), // 11: google.protobuf.FeatureSet.EnumType + (FeatureSet_RepeatedFieldEncoding)(0), // 12: google.protobuf.FeatureSet.RepeatedFieldEncoding + (FeatureSet_Utf8Validation)(0), // 13: google.protobuf.FeatureSet.Utf8Validation + (FeatureSet_MessageEncoding)(0), // 14: google.protobuf.FeatureSet.MessageEncoding + (FeatureSet_JsonFormat)(0), // 15: google.protobuf.FeatureSet.JsonFormat + (GeneratedCodeInfo_Annotation_Semantic)(0), // 16: google.protobuf.GeneratedCodeInfo.Annotation.Semantic + (*FileDescriptorSet)(nil), // 17: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 18: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 19: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 20: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 21: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 22: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 23: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 24: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 25: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 26: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 27: google.protobuf.FileOptions + (*MessageOptions)(nil), // 28: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 29: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 30: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 31: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 32: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 33: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 34: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 35: google.protobuf.UninterpretedOption + (*FeatureSet)(nil), // 36: google.protobuf.FeatureSet + (*FeatureSetDefaults)(nil), // 37: google.protobuf.FeatureSetDefaults + (*SourceCodeInfo)(nil), // 38: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 39: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 40: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 41: google.protobuf.DescriptorProto.ReservedRange + (*ExtensionRangeOptions_Declaration)(nil), // 42: google.protobuf.ExtensionRangeOptions.Declaration + (*EnumDescriptorProto_EnumReservedRange)(nil), // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*FieldOptions_EditionDefault)(nil), // 44: google.protobuf.FieldOptions.EditionDefault + (*FieldOptions_FeatureSupport)(nil), // 45: google.protobuf.FieldOptions.FeatureSupport + (*UninterpretedOption_NamePart)(nil), // 46: google.protobuf.UninterpretedOption.NamePart + (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 47: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + (*SourceCodeInfo_Location)(nil), // 48: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 49: google.protobuf.GeneratedCodeInfo.Annotation +} +var file_google_protobuf_descriptor_proto_depIdxs = []int32{ + 18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 19, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 23, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 25, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 21, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 27, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 38, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 0, // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition + 21, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 21, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 19, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 23, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 40, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 22, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 28, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 41, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 35, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 42, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration + 36, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet + 1, // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState + 3, // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label + 2, // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type + 29, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 30, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 24, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 31, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 43, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 32, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 26, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 33, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 34, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 4, // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode + 36, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 5, // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType + 6, // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType + 7, // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention + 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType + 44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault + 36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet + 45, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 35, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet + 45, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 35, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 9, // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 36, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 46, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence + 11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType + 12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding + 13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation + 14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding + 15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat + 47, // 63: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + 0, // 64: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition + 0, // 65: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition + 48, // 66: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 49, // 67: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 20, // 68: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 0, // 69: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition + 0, // 70: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition + 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition + 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition + 0, // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition + 36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet + 36, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet + 16, // 76: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 77, // [77:77] is the sub-list for method output_type + 77, // [77:77] is the sub-list for method input_type + 77, // [77:77] is the sub-list for extension type_name + 77, // [77:77] is the sub-list for extension extendee + 0, // [0:77] is the sub-list for field type_name +} + +func init() { file_google_protobuf_descriptor_proto_init() } +func file_google_protobuf_descriptor_proto_init() { + if File_google_protobuf_descriptor_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*FileDescriptorSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*FileDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*DescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*ExtensionRangeOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*FieldDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*OneofDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*EnumDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*EnumValueDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*ServiceDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*MethodDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*FileOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*MessageOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*FieldOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v any, i int) any { + switch v := v.(*OneofOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v any, i int) any { + switch v := v.(*EnumOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v any, i int) any { + switch v := v.(*EnumValueOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v any, i int) any { + switch v := v.(*ServiceOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v any, i int) any { + switch v := v.(*MethodOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v any, i int) any { + switch v := v.(*UninterpretedOption); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v any, i int) any { + switch v := v.(*FeatureSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v any, i int) any { + switch v := v.(*FeatureSetDefaults); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v any, i int) any { + switch v := v.(*SourceCodeInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v any, i int) any { + switch v := v.(*GeneratedCodeInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v any, i int) any { + switch v := v.(*DescriptorProto_ExtensionRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v any, i int) any { + switch v := v.(*DescriptorProto_ReservedRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v any, i int) any { + switch v := v.(*ExtensionRangeOptions_Declaration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v any, i int) any { + switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v any, i int) any { + switch v := v.(*FieldOptions_EditionDefault); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v any, i int) any { + switch v := v.(*FieldOptions_FeatureSupport); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v any, i int) any { + switch v := v.(*UninterpretedOption_NamePart); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v any, i int) any { + switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v any, i int) any { + switch v := v.(*SourceCodeInfo_Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v any, i int) any { + switch v := v.(*GeneratedCodeInfo_Annotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, + NumEnums: 17, + NumMessages: 33, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_descriptor_proto_goTypes, + DependencyIndexes: file_google_protobuf_descriptor_proto_depIdxs, + EnumInfos: file_google_protobuf_descriptor_proto_enumTypes, + MessageInfos: file_google_protobuf_descriptor_proto_msgTypes, + }.Build() + File_google_protobuf_descriptor_proto = out.File + file_google_protobuf_descriptor_proto_rawDesc = nil + file_google_protobuf_descriptor_proto_goTypes = nil + file_google_protobuf_descriptor_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go new file mode 100644 index 000000000..a2ca940c5 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -0,0 +1,181 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2023 Google Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/go_features.proto + +package gofeaturespb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +type GoFeatures struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Whether or not to generate the deprecated UnmarshalJSON method for enums. + LegacyUnmarshalJsonEnum *bool `protobuf:"varint,1,opt,name=legacy_unmarshal_json_enum,json=legacyUnmarshalJsonEnum" json:"legacy_unmarshal_json_enum,omitempty"` +} + +func (x *GoFeatures) Reset() { + *x = GoFeatures{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_go_features_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GoFeatures) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GoFeatures) ProtoMessage() {} + +func (x *GoFeatures) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_go_features_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GoFeatures.ProtoReflect.Descriptor instead. +func (*GoFeatures) Descriptor() ([]byte, []int) { + return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0} +} + +func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool { + if x != nil && x.LegacyUnmarshalJsonEnum != nil { + return *x.LegacyUnmarshalJsonEnum + } + return false +} + +var file_google_protobuf_go_features_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FeatureSet)(nil), + ExtensionType: (*GoFeatures)(nil), + Field: 1002, + Name: "pb.go", + Tag: "bytes,1002,opt,name=go", + Filename: "google/protobuf/go_features.proto", + }, +} + +// Extension fields to descriptorpb.FeatureSet. +var ( + // optional pb.GoFeatures go = 1002; + E_Go = &file_google_protobuf_go_features_proto_extTypes[0] +) + +var File_google_protobuf_go_features_proto protoreflect.FileDescriptor + +var file_google_protobuf_go_features_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x01, 0x0a, 0x0a, 0x47, 0x6f, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67, + 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73, + 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01, + 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, + 0x75, 0x65, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, + 0xe7, 0x07, 0xb2, 0x01, 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65, + 0x20, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, + 0x6c, 0x4a, 0x53, 0x4f, 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c, + 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, + 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, + 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, + 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, +} + +var ( + file_google_protobuf_go_features_proto_rawDescOnce sync.Once + file_google_protobuf_go_features_proto_rawDescData = file_google_protobuf_go_features_proto_rawDesc +) + +func file_google_protobuf_go_features_proto_rawDescGZIP() []byte { + file_google_protobuf_go_features_proto_rawDescOnce.Do(func() { + file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_go_features_proto_rawDescData) + }) + return file_google_protobuf_go_features_proto_rawDescData +} + +var file_google_protobuf_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_go_features_proto_goTypes = []any{ + (*GoFeatures)(nil), // 0: pb.GoFeatures + (*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet +} +var file_google_protobuf_go_features_proto_depIdxs = []int32{ + 1, // 0: pb.go:extendee -> google.protobuf.FeatureSet + 0, // 1: pb.go:type_name -> pb.GoFeatures + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_go_features_proto_init() } +func file_google_protobuf_go_features_proto_init() { + if File_google_protobuf_go_features_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*GoFeatures); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_go_features_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_google_protobuf_go_features_proto_goTypes, + DependencyIndexes: file_google_protobuf_go_features_proto_depIdxs, + MessageInfos: file_google_protobuf_go_features_proto_msgTypes, + ExtensionInfos: file_google_protobuf_go_features_proto_extTypes, + }.Build() + File_google_protobuf_go_features_proto = out.File + file_google_protobuf_go_features_proto_rawDesc = nil + file_google_protobuf_go_features_proto_goTypes = nil + file_google_protobuf_go_features_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go new file mode 100644 index 000000000..7172b43d3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -0,0 +1,496 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/any.proto + +// Package anypb contains generated types for google/protobuf/any.proto. +// +// The Any message is a dynamic representation of any other message value. +// It is functionally a tuple of the full name of the remote message type and +// the serialized bytes of the remote message value. +// +// # Constructing an Any +// +// An Any message containing another message value is constructed using New: +// +// any, err := anypb.New(m) +// if err != nil { +// ... // handle error +// } +// ... // make use of any +// +// # Unmarshaling an Any +// +// With a populated Any message, the underlying message can be serialized into +// a remote concrete message value in a few ways. +// +// If the exact concrete type is known, then a new (or pre-existing) instance +// of that message can be passed to the UnmarshalTo method: +// +// m := new(foopb.MyMessage) +// if err := any.UnmarshalTo(m); err != nil { +// ... // handle error +// } +// ... // make use of m +// +// If the exact concrete type is not known, then the UnmarshalNew method can be +// used to unmarshal the contents into a new instance of the remote message type: +// +// m, err := any.UnmarshalNew() +// if err != nil { +// ... // handle error +// } +// ... // make use of m +// +// UnmarshalNew uses the global type registry to resolve the message type and +// construct a new instance of that message to unmarshal into. In order for a +// message type to appear in the global registry, the Go type representing that +// protobuf message type must be linked into the Go binary. For messages +// generated by protoc-gen-go, this is achieved through an import of the +// generated Go package representing a .proto file. +// +// A common pattern with UnmarshalNew is to use a type switch with the resulting +// proto.Message value: +// +// switch m := m.(type) { +// case *foopb.MyMessage: +// ... // make use of m as a *foopb.MyMessage +// case *barpb.OtherMessage: +// ... // make use of m as a *barpb.OtherMessage +// case *bazpb.SomeMessage: +// ... // make use of m as a *bazpb.SomeMessage +// } +// +// This pattern ensures that the generated packages containing the message types +// listed in the case clauses are linked into the Go binary and therefore also +// registered in the global registry. +// +// # Type checking an Any +// +// In order to type check whether an Any message represents some other message, +// then use the MessageIs method: +// +// if any.MessageIs((*foopb.MyMessage)(nil)) { +// ... // make use of any, knowing that it contains a foopb.MyMessage +// } +// +// The MessageIs method can also be used with an allocated instance of the target +// message type if the intention is to unmarshal into it if the type matches: +// +// m := new(foopb.MyMessage) +// if any.MessageIs(m) { +// if err := any.UnmarshalTo(m); err != nil { +// ... // handle error +// } +// ... // make use of m +// } +package anypb + +import ( + proto "google.golang.org/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoregistry "google.golang.org/protobuf/reflect/protoregistry" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + strings "strings" + sync "sync" +) + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// // or ... +// if (any.isSameTypeAs(Foo.getDefaultInstance())) { +// foo = any.unpack(Foo.getDefaultInstance()); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := anypb.New(foo) +// if err != nil { +// ... +// } +// ... +// foo := &pb.Foo{} +// if err := any.UnmarshalTo(foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +type Any struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. This string must contain at least + // one "/" character. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). + // + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // + // - If no scheme is provided, `https` is assumed. + // - An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // - Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. As of May 2023, there are no widely used type server + // implementations and no plans to implement one. + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +// New marshals src into a new Any instance. +func New(src proto.Message) (*Any, error) { + dst := new(Any) + if err := dst.MarshalFrom(src); err != nil { + return nil, err + } + return dst, nil +} + +// MarshalFrom marshals src into dst as the underlying message +// using the provided marshal options. +// +// If no options are specified, call dst.MarshalFrom instead. +func MarshalFrom(dst *Any, src proto.Message, opts proto.MarshalOptions) error { + const urlPrefix = "type.googleapis.com/" + if src == nil { + return protoimpl.X.NewError("invalid nil source message") + } + b, err := opts.Marshal(src) + if err != nil { + return err + } + dst.TypeUrl = urlPrefix + string(src.ProtoReflect().Descriptor().FullName()) + dst.Value = b + return nil +} + +// UnmarshalTo unmarshals the underlying message from src into dst +// using the provided unmarshal options. +// It reports an error if dst is not of the right message type. +// +// If no options are specified, call src.UnmarshalTo instead. +func UnmarshalTo(src *Any, dst proto.Message, opts proto.UnmarshalOptions) error { + if src == nil { + return protoimpl.X.NewError("invalid nil source message") + } + if !src.MessageIs(dst) { + got := dst.ProtoReflect().Descriptor().FullName() + want := src.MessageName() + return protoimpl.X.NewError("mismatched message type: got %q, want %q", got, want) + } + return opts.Unmarshal(src.GetValue(), dst) +} + +// UnmarshalNew unmarshals the underlying message from src into dst, +// which is newly created message using a type resolved from the type URL. +// The message type is resolved according to opt.Resolver, +// which should implement protoregistry.MessageTypeResolver. +// It reports an error if the underlying message type could not be resolved. +// +// If no options are specified, call src.UnmarshalNew instead. +func UnmarshalNew(src *Any, opts proto.UnmarshalOptions) (dst proto.Message, err error) { + if src.GetTypeUrl() == "" { + return nil, protoimpl.X.NewError("invalid empty type URL") + } + if opts.Resolver == nil { + opts.Resolver = protoregistry.GlobalTypes + } + r, ok := opts.Resolver.(protoregistry.MessageTypeResolver) + if !ok { + return nil, protoregistry.NotFound + } + mt, err := r.FindMessageByURL(src.GetTypeUrl()) + if err != nil { + if err == protoregistry.NotFound { + return nil, err + } + return nil, protoimpl.X.NewError("could not resolve %q: %v", src.GetTypeUrl(), err) + } + dst = mt.New().Interface() + return dst, opts.Unmarshal(src.GetValue(), dst) +} + +// MessageIs reports whether the underlying message is of the same type as m. +func (x *Any) MessageIs(m proto.Message) bool { + if m == nil { + return false + } + url := x.GetTypeUrl() + name := string(m.ProtoReflect().Descriptor().FullName()) + if !strings.HasSuffix(url, name) { + return false + } + return len(url) == len(name) || url[len(url)-len(name)-1] == '/' +} + +// MessageName reports the full name of the underlying message, +// returning an empty string if invalid. +func (x *Any) MessageName() protoreflect.FullName { + url := x.GetTypeUrl() + name := protoreflect.FullName(url) + if i := strings.LastIndexByte(url, '/'); i >= 0 { + name = name[i+len("/"):] + } + if !name.IsValid() { + return "" + } + return name +} + +// MarshalFrom marshals m into x as the underlying message. +func (x *Any) MarshalFrom(m proto.Message) error { + return MarshalFrom(x, m, proto.MarshalOptions{}) +} + +// UnmarshalTo unmarshals the contents of the underlying message of x into m. +// It resets m before performing the unmarshal operation. +// It reports an error if m is not of the right message type. +func (x *Any) UnmarshalTo(m proto.Message) error { + return UnmarshalTo(x, m, proto.UnmarshalOptions{}) +} + +// UnmarshalNew unmarshals the contents of the underlying message of x into +// a newly allocated message of the specified type. +// It reports an error if the underlying message type could not be resolved. +func (x *Any) UnmarshalNew() (proto.Message, error) { + return UnmarshalNew(x, proto.UnmarshalOptions{}) +} + +func (x *Any) Reset() { + *x = Any{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_any_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Any) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Any) ProtoMessage() {} + +func (x *Any) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_any_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Any.ProtoReflect.Descriptor instead. +func (*Any) Descriptor() ([]byte, []int) { + return file_google_protobuf_any_proto_rawDescGZIP(), []int{0} +} + +func (x *Any) GetTypeUrl() string { + if x != nil { + return x.TypeUrl + } + return "" +} + +func (x *Any) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +var File_google_protobuf_any_proto protoreflect.FileDescriptor + +var file_google_protobuf_any_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03, + 0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, + 0x61, 0x6e, 0x79, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, + 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_any_proto_rawDescOnce sync.Once + file_google_protobuf_any_proto_rawDescData = file_google_protobuf_any_proto_rawDesc +) + +func file_google_protobuf_any_proto_rawDescGZIP() []byte { + file_google_protobuf_any_proto_rawDescOnce.Do(func() { + file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_any_proto_rawDescData) + }) + return file_google_protobuf_any_proto_rawDescData +} + +var file_google_protobuf_any_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_any_proto_goTypes = []any{ + (*Any)(nil), // 0: google.protobuf.Any +} +var file_google_protobuf_any_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_any_proto_init() } +func file_google_protobuf_any_proto_init() { + if File_google_protobuf_any_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Any); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_any_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_any_proto_goTypes, + DependencyIndexes: file_google_protobuf_any_proto_depIdxs, + MessageInfos: file_google_protobuf_any_proto_msgTypes, + }.Build() + File_google_protobuf_any_proto = out.File + file_google_protobuf_any_proto_rawDesc = nil + file_google_protobuf_any_proto_goTypes = nil + file_google_protobuf_any_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go new file mode 100644 index 000000000..1b71bcd91 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -0,0 +1,374 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/duration.proto + +// Package durationpb contains generated types for google/protobuf/duration.proto. +// +// The Duration message represents a signed span of time. +// +// # Conversion to a Go Duration +// +// The AsDuration method can be used to convert a Duration message to a +// standard Go time.Duration value: +// +// d := dur.AsDuration() +// ... // make use of d as a time.Duration +// +// Converting to a time.Duration is a common operation so that the extensive +// set of time-based operations provided by the time package can be leveraged. +// See https://golang.org/pkg/time for more information. +// +// The AsDuration method performs the conversion on a best-effort basis. +// Durations with denormal values (e.g., nanoseconds beyond -99999999 and +// +99999999, inclusive; or seconds and nanoseconds with opposite signs) +// are normalized during the conversion to a time.Duration. To manually check for +// invalid Duration per the documented limitations in duration.proto, +// additionally call the CheckValid method: +// +// if err := dur.CheckValid(); err != nil { +// ... // handle error +// } +// +// Note that the documented limitations in duration.proto does not protect a +// Duration from overflowing the representable range of a time.Duration in Go. +// The AsDuration method uses saturation arithmetic such that an overflow clamps +// the resulting value to the closest representable value (e.g., math.MaxInt64 +// for positive overflow and math.MinInt64 for negative overflow). +// +// # Conversion from a Go Duration +// +// The durationpb.New function can be used to construct a Duration message +// from a standard Go time.Duration value: +// +// dur := durationpb.New(d) +// ... // make use of d as a *durationpb.Duration +package durationpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" + reflect "reflect" + sync "sync" + time "time" +) + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (duration.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +type Duration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +// New constructs a new Duration from the provided time.Duration. +func New(d time.Duration) *Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &Duration{Seconds: int64(secs), Nanos: int32(nanos)} +} + +// AsDuration converts x to a time.Duration, +// returning the closest duration value in the event of overflow. +func (x *Duration) AsDuration() time.Duration { + secs := x.GetSeconds() + nanos := x.GetNanos() + d := time.Duration(secs) * time.Second + overflow := d/time.Second != time.Duration(secs) + d += time.Duration(nanos) * time.Nanosecond + overflow = overflow || (secs < 0 && nanos < 0 && d > 0) + overflow = overflow || (secs > 0 && nanos > 0 && d < 0) + if overflow { + switch { + case secs < 0: + return time.Duration(math.MinInt64) + case secs > 0: + return time.Duration(math.MaxInt64) + } + } + return d +} + +// IsValid reports whether the duration is valid. +// It is equivalent to CheckValid == nil. +func (x *Duration) IsValid() bool { + return x.check() == 0 +} + +// CheckValid returns an error if the duration is invalid. +// In particular, it checks whether the value is within the range of +// -10000 years to +10000 years inclusive. +// An error is reported for a nil Duration. +func (x *Duration) CheckValid() error { + switch x.check() { + case invalidNil: + return protoimpl.X.NewError("invalid nil Duration") + case invalidUnderflow: + return protoimpl.X.NewError("duration (%v) exceeds -10000 years", x) + case invalidOverflow: + return protoimpl.X.NewError("duration (%v) exceeds +10000 years", x) + case invalidNanosRange: + return protoimpl.X.NewError("duration (%v) has out-of-range nanos", x) + case invalidNanosSign: + return protoimpl.X.NewError("duration (%v) has seconds and nanos with different signs", x) + default: + return nil + } +} + +const ( + _ = iota + invalidNil + invalidUnderflow + invalidOverflow + invalidNanosRange + invalidNanosSign +) + +func (x *Duration) check() uint { + const absDuration = 315576000000 // 10000yr * 365.25day/yr * 24hr/day * 60min/hr * 60sec/min + secs := x.GetSeconds() + nanos := x.GetNanos() + switch { + case x == nil: + return invalidNil + case secs < -absDuration: + return invalidUnderflow + case secs > +absDuration: + return invalidOverflow + case nanos <= -1e9 || nanos >= +1e9: + return invalidNanosRange + case (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0): + return invalidNanosSign + default: + return 0 + } +} + +func (x *Duration) Reset() { + *x = Duration{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_duration_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Duration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Duration) ProtoMessage() {} + +func (x *Duration) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_duration_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Duration.ProtoReflect.Descriptor instead. +func (*Duration) Descriptor() ([]byte, []int) { + return file_google_protobuf_duration_proto_rawDescGZIP(), []int{0} +} + +func (x *Duration) GetSeconds() int64 { + if x != nil { + return x.Seconds + } + return 0 +} + +func (x *Duration) GetNanos() int32 { + if x != nil { + return x.Nanos + } + return 0 +} + +var File_google_protobuf_duration_proto protoreflect.FileDescriptor + +var file_google_protobuf_duration_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, + 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x83, 0x01, + 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, + 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, + 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_duration_proto_rawDescOnce sync.Once + file_google_protobuf_duration_proto_rawDescData = file_google_protobuf_duration_proto_rawDesc +) + +func file_google_protobuf_duration_proto_rawDescGZIP() []byte { + file_google_protobuf_duration_proto_rawDescOnce.Do(func() { + file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_duration_proto_rawDescData) + }) + return file_google_protobuf_duration_proto_rawDescData +} + +var file_google_protobuf_duration_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_duration_proto_goTypes = []any{ + (*Duration)(nil), // 0: google.protobuf.Duration +} +var file_google_protobuf_duration_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_duration_proto_init() } +func file_google_protobuf_duration_proto_init() { + if File_google_protobuf_duration_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Duration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_duration_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_duration_proto_goTypes, + DependencyIndexes: file_google_protobuf_duration_proto_depIdxs, + MessageInfos: file_google_protobuf_duration_proto_msgTypes, + }.Build() + File_google_protobuf_duration_proto = out.File + file_google_protobuf_duration_proto_rawDesc = nil + file_google_protobuf_duration_proto_goTypes = nil + file_google_protobuf_duration_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go new file mode 100644 index 000000000..d87b4fb82 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -0,0 +1,166 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/empty.proto + +package emptypb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +type Empty struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Empty) Reset() { + *x = Empty{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_empty_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Empty) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Empty) ProtoMessage() {} + +func (x *Empty) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_empty_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Empty.ProtoReflect.Descriptor instead. +func (*Empty) Descriptor() ([]byte, []int) { + return file_google_protobuf_empty_proto_rawDescGZIP(), []int{0} +} + +var File_google_protobuf_empty_proto protoreflect.FileDescriptor + +var file_google_protobuf_empty_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x07, + 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x7d, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0a, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, + 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, + 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, + 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_empty_proto_rawDescOnce sync.Once + file_google_protobuf_empty_proto_rawDescData = file_google_protobuf_empty_proto_rawDesc +) + +func file_google_protobuf_empty_proto_rawDescGZIP() []byte { + file_google_protobuf_empty_proto_rawDescOnce.Do(func() { + file_google_protobuf_empty_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_empty_proto_rawDescData) + }) + return file_google_protobuf_empty_proto_rawDescData +} + +var file_google_protobuf_empty_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_empty_proto_goTypes = []any{ + (*Empty)(nil), // 0: google.protobuf.Empty +} +var file_google_protobuf_empty_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_empty_proto_init() } +func file_google_protobuf_empty_proto_init() { + if File_google_protobuf_empty_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Empty); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_empty_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_empty_proto_goTypes, + DependencyIndexes: file_google_protobuf_empty_proto_depIdxs, + MessageInfos: file_google_protobuf_empty_proto_msgTypes, + }.Build() + File_google_protobuf_empty_proto = out.File + file_google_protobuf_empty_proto_rawDesc = nil + file_google_protobuf_empty_proto_goTypes = nil + file_google_protobuf_empty_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go new file mode 100644 index 000000000..ac1e91bb6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -0,0 +1,588 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/field_mask.proto + +// Package fieldmaskpb contains generated types for google/protobuf/field_mask.proto. +// +// The FieldMask message represents a set of symbolic field paths. +// The paths are specific to some target message type, +// which is not stored within the FieldMask message itself. +// +// # Constructing a FieldMask +// +// The New function is used construct a FieldMask: +// +// var messageType *descriptorpb.DescriptorProto +// fm, err := fieldmaskpb.New(messageType, "field.name", "field.number") +// if err != nil { +// ... // handle error +// } +// ... // make use of fm +// +// The "field.name" and "field.number" paths are valid paths according to the +// google.protobuf.DescriptorProto message. Use of a path that does not correlate +// to valid fields reachable from DescriptorProto would result in an error. +// +// Once a FieldMask message has been constructed, +// the Append method can be used to insert additional paths to the path set: +// +// var messageType *descriptorpb.DescriptorProto +// if err := fm.Append(messageType, "options"); err != nil { +// ... // handle error +// } +// +// # Type checking a FieldMask +// +// In order to verify that a FieldMask represents a set of fields that are +// reachable from some target message type, use the IsValid method: +// +// var messageType *descriptorpb.DescriptorProto +// if fm.IsValid(messageType) { +// ... // make use of fm +// } +// +// IsValid needs to be passed the target message type as an input since the +// FieldMask message itself does not store the message type that the set of paths +// are for. +package fieldmaskpb + +import ( + proto "google.golang.org/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sort "sort" + strings "strings" + sync "sync" +) + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// paths string. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, new values will +// be appended to the existing repeated field in the target resource. Note that +// a repeated field is only allowed in the last position of a `paths` string. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then new value will be merged into the existing sub-message +// in the target resource. +// +// For example, given the target message: +// +// f { +// b { +// d: 1 +// x: 2 +// } +// c: [1] +// } +// +// And an update message: +// +// f { +// b { +// d: 10 +// } +// c: [2] +// } +// +// then if the field mask is: +// +// paths: ["f.b", "f.c"] +// +// then the result will be: +// +// f { +// b { +// d: 10 +// x: 2 +// } +// c: [1, 2] +// } +// +// An implementation may provide options to override this default behavior for +// repeated and message fields. +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +// +// ## Field Mask Verification +// +// The implementation of any API method which has a FieldMask type field in the +// request should verify the included field paths, and return an +// `INVALID_ARGUMENT` error if any path is unmappable. +type FieldMask struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The set of field mask paths. + Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` +} + +// New constructs a field mask from a list of paths and verifies that +// each one is valid according to the specified message type. +func New(m proto.Message, paths ...string) (*FieldMask, error) { + x := new(FieldMask) + return x, x.Append(m, paths...) +} + +// Union returns the union of all the paths in the input field masks. +func Union(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask { + var out []string + out = append(out, mx.GetPaths()...) + out = append(out, my.GetPaths()...) + for _, m := range ms { + out = append(out, m.GetPaths()...) + } + return &FieldMask{Paths: normalizePaths(out)} +} + +// Intersect returns the intersection of all the paths in the input field masks. +func Intersect(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask { + var ss1, ss2 []string // reused buffers for performance + intersect := func(out, in []string) []string { + ss1 = normalizePaths(append(ss1[:0], in...)) + ss2 = normalizePaths(append(ss2[:0], out...)) + out = out[:0] + for i1, i2 := 0, 0; i1 < len(ss1) && i2 < len(ss2); { + switch s1, s2 := ss1[i1], ss2[i2]; { + case hasPathPrefix(s1, s2): + out = append(out, s1) + i1++ + case hasPathPrefix(s2, s1): + out = append(out, s2) + i2++ + case lessPath(s1, s2): + i1++ + case lessPath(s2, s1): + i2++ + } + } + return out + } + + out := Union(mx, my, ms...).GetPaths() + out = intersect(out, mx.GetPaths()) + out = intersect(out, my.GetPaths()) + for _, m := range ms { + out = intersect(out, m.GetPaths()) + } + return &FieldMask{Paths: normalizePaths(out)} +} + +// IsValid reports whether all the paths are syntactically valid and +// refer to known fields in the specified message type. +// It reports false for a nil FieldMask. +func (x *FieldMask) IsValid(m proto.Message) bool { + paths := x.GetPaths() + return x != nil && numValidPaths(m, paths) == len(paths) +} + +// Append appends a list of paths to the mask and verifies that each one +// is valid according to the specified message type. +// An invalid path is not appended and breaks insertion of subsequent paths. +func (x *FieldMask) Append(m proto.Message, paths ...string) error { + numValid := numValidPaths(m, paths) + x.Paths = append(x.Paths, paths[:numValid]...) + paths = paths[numValid:] + if len(paths) > 0 { + name := m.ProtoReflect().Descriptor().FullName() + return protoimpl.X.NewError("invalid path %q for message %q", paths[0], name) + } + return nil +} + +func numValidPaths(m proto.Message, paths []string) int { + md0 := m.ProtoReflect().Descriptor() + for i, path := range paths { + md := md0 + if !rangeFields(path, func(field string) bool { + // Search the field within the message. + if md == nil { + return false // not within a message + } + fd := md.Fields().ByName(protoreflect.Name(field)) + // The real field name of a group is the message name. + if fd == nil { + gd := md.Fields().ByName(protoreflect.Name(strings.ToLower(field))) + if gd != nil && gd.Kind() == protoreflect.GroupKind && string(gd.Message().Name()) == field { + fd = gd + } + } else if fd.Kind() == protoreflect.GroupKind && string(fd.Message().Name()) != field { + fd = nil + } + if fd == nil { + return false // message has does not have this field + } + + // Identify the next message to search within. + md = fd.Message() // may be nil + + // Repeated fields are only allowed at the last position. + if fd.IsList() || fd.IsMap() { + md = nil + } + + return true + }) { + return i + } + } + return len(paths) +} + +// Normalize converts the mask to its canonical form where all paths are sorted +// and redundant paths are removed. +func (x *FieldMask) Normalize() { + x.Paths = normalizePaths(x.Paths) +} + +func normalizePaths(paths []string) []string { + sort.Slice(paths, func(i, j int) bool { + return lessPath(paths[i], paths[j]) + }) + + // Elide any path that is a prefix match on the previous. + out := paths[:0] + for _, path := range paths { + if len(out) > 0 && hasPathPrefix(path, out[len(out)-1]) { + continue + } + out = append(out, path) + } + return out +} + +// hasPathPrefix is like strings.HasPrefix, but further checks for either +// an exact matche or that the prefix is delimited by a dot. +func hasPathPrefix(path, prefix string) bool { + return strings.HasPrefix(path, prefix) && (len(path) == len(prefix) || path[len(prefix)] == '.') +} + +// lessPath is a lexicographical comparison where dot is specially treated +// as the smallest symbol. +func lessPath(x, y string) bool { + for i := 0; i < len(x) && i < len(y); i++ { + if x[i] != y[i] { + return (x[i] - '.') < (y[i] - '.') + } + } + return len(x) < len(y) +} + +// rangeFields is like strings.Split(path, "."), but avoids allocations by +// iterating over each field in place and calling a iterator function. +func rangeFields(path string, f func(field string) bool) bool { + for { + var field string + if i := strings.IndexByte(path, '.'); i >= 0 { + field, path = path[:i], path[i:] + } else { + field, path = path, "" + } + + if !f(field) { + return false + } + + if len(path) == 0 { + return true + } + path = strings.TrimPrefix(path, ".") + } +} + +func (x *FieldMask) Reset() { + *x = FieldMask{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_field_mask_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldMask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldMask) ProtoMessage() {} + +func (x *FieldMask) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_field_mask_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldMask.ProtoReflect.Descriptor instead. +func (*FieldMask) Descriptor() ([]byte, []int) { + return file_google_protobuf_field_mask_proto_rawDescGZIP(), []int{0} +} + +func (x *FieldMask) GetPaths() []string { + if x != nil { + return x.Paths + } + return nil +} + +var File_google_protobuf_field_mask_proto protoreflect.FileDescriptor + +var file_google_protobuf_field_mask_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x22, 0x21, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, + 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x42, 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x6d, 0x61, + 0x73, 0x6b, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_field_mask_proto_rawDescOnce sync.Once + file_google_protobuf_field_mask_proto_rawDescData = file_google_protobuf_field_mask_proto_rawDesc +) + +func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte { + file_google_protobuf_field_mask_proto_rawDescOnce.Do(func() { + file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_field_mask_proto_rawDescData) + }) + return file_google_protobuf_field_mask_proto_rawDescData +} + +var file_google_protobuf_field_mask_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_field_mask_proto_goTypes = []any{ + (*FieldMask)(nil), // 0: google.protobuf.FieldMask +} +var file_google_protobuf_field_mask_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_field_mask_proto_init() } +func file_google_protobuf_field_mask_proto_init() { + if File_google_protobuf_field_mask_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*FieldMask); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_field_mask_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_field_mask_proto_goTypes, + DependencyIndexes: file_google_protobuf_field_mask_proto_depIdxs, + MessageInfos: file_google_protobuf_field_mask_proto_msgTypes, + }.Build() + File_google_protobuf_field_mask_proto = out.File + file_google_protobuf_field_mask_proto_rawDesc = nil + file_google_protobuf_field_mask_proto_goTypes = nil + file_google_protobuf_field_mask_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go new file mode 100644 index 000000000..83a5a645b --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -0,0 +1,383 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/timestamp.proto + +// Package timestamppb contains generated types for google/protobuf/timestamp.proto. +// +// The Timestamp message represents a timestamp, +// an instant in time since the Unix epoch (January 1st, 1970). +// +// # Conversion to a Go Time +// +// The AsTime method can be used to convert a Timestamp message to a +// standard Go time.Time value in UTC: +// +// t := ts.AsTime() +// ... // make use of t as a time.Time +// +// Converting to a time.Time is a common operation so that the extensive +// set of time-based operations provided by the time package can be leveraged. +// See https://golang.org/pkg/time for more information. +// +// The AsTime method performs the conversion on a best-effort basis. Timestamps +// with denormal values (e.g., nanoseconds beyond 0 and 99999999, inclusive) +// are normalized during the conversion to a time.Time. To manually check for +// invalid Timestamps per the documented limitations in timestamp.proto, +// additionally call the CheckValid method: +// +// if err := ts.CheckValid(); err != nil { +// ... // handle error +// } +// +// # Conversion from a Go Time +// +// The timestamppb.New function can be used to construct a Timestamp message +// from a standard Go time.Time value: +// +// ts := timestamppb.New(t) +// ... // make use of ts as a *timestamppb.Timestamp +// +// In order to construct a Timestamp representing the current time, use Now: +// +// ts := timestamppb.Now() +// ... // make use of ts as a *timestamppb.Timestamp +package timestamppb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + time "time" +) + +// A Timestamp represents a point in time independent of any time zone or local +// calendar, encoded as a count of seconds and fractions of seconds at +// nanosecond resolution. The count is relative to an epoch at UTC midnight on +// January 1, 1970, in the proleptic Gregorian calendar which extends the +// Gregorian calendar backwards to year one. +// +// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap +// second table is needed for interpretation, using a [24-hour linear +// smear](https://developers.google.com/time/smear). +// +// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By +// restricting to that range, we ensure that we can convert to and from [RFC +// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// Example 5: Compute Timestamp from Java `Instant.now()`. +// +// Instant now = Instant.now(); +// +// Timestamp timestamp = +// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) +// .setNanos(now.getNano()).build(); +// +// Example 6: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard +// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using +// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with +// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use +// the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime() +// ) to obtain a formatter capable of generating timestamps in this format. +type Timestamp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +// Now constructs a new Timestamp from the current time. +func Now() *Timestamp { + return New(time.Now()) +} + +// New constructs a new Timestamp from the provided time.Time. +func New(t time.Time) *Timestamp { + return &Timestamp{Seconds: int64(t.Unix()), Nanos: int32(t.Nanosecond())} +} + +// AsTime converts x to a time.Time. +func (x *Timestamp) AsTime() time.Time { + return time.Unix(int64(x.GetSeconds()), int64(x.GetNanos())).UTC() +} + +// IsValid reports whether the timestamp is valid. +// It is equivalent to CheckValid == nil. +func (x *Timestamp) IsValid() bool { + return x.check() == 0 +} + +// CheckValid returns an error if the timestamp is invalid. +// In particular, it checks whether the value represents a date that is +// in the range of 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive. +// An error is reported for a nil Timestamp. +func (x *Timestamp) CheckValid() error { + switch x.check() { + case invalidNil: + return protoimpl.X.NewError("invalid nil Timestamp") + case invalidUnderflow: + return protoimpl.X.NewError("timestamp (%v) before 0001-01-01", x) + case invalidOverflow: + return protoimpl.X.NewError("timestamp (%v) after 9999-12-31", x) + case invalidNanos: + return protoimpl.X.NewError("timestamp (%v) has out-of-range nanos", x) + default: + return nil + } +} + +const ( + _ = iota + invalidNil + invalidUnderflow + invalidOverflow + invalidNanos +) + +func (x *Timestamp) check() uint { + const minTimestamp = -62135596800 // Seconds between 1970-01-01T00:00:00Z and 0001-01-01T00:00:00Z, inclusive + const maxTimestamp = +253402300799 // Seconds between 1970-01-01T00:00:00Z and 9999-12-31T23:59:59Z, inclusive + secs := x.GetSeconds() + nanos := x.GetNanos() + switch { + case x == nil: + return invalidNil + case secs < minTimestamp: + return invalidUnderflow + case secs > maxTimestamp: + return invalidOverflow + case nanos < 0 || nanos >= 1e9: + return invalidNanos + default: + return 0 + } +} + +func (x *Timestamp) Reset() { + *x = Timestamp{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_timestamp_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Timestamp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Timestamp) ProtoMessage() {} + +func (x *Timestamp) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_timestamp_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Timestamp.ProtoReflect.Descriptor instead. +func (*Timestamp) Descriptor() ([]byte, []int) { + return file_google_protobuf_timestamp_proto_rawDescGZIP(), []int{0} +} + +func (x *Timestamp) GetSeconds() int64 { + if x != nil { + return x.Seconds + } + return 0 +} + +func (x *Timestamp) GetNanos() int32 { + if x != nil { + return x.Nanos + } + return 0 +} + +var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor + +var file_google_protobuf_timestamp_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x22, 0x3b, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, + 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, + 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, + 0x6e, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x70, 0x62, 0xf8, 0x01, 0x01, + 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, + 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_timestamp_proto_rawDescOnce sync.Once + file_google_protobuf_timestamp_proto_rawDescData = file_google_protobuf_timestamp_proto_rawDesc +) + +func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte { + file_google_protobuf_timestamp_proto_rawDescOnce.Do(func() { + file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_timestamp_proto_rawDescData) + }) + return file_google_protobuf_timestamp_proto_rawDescData +} + +var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_timestamp_proto_goTypes = []any{ + (*Timestamp)(nil), // 0: google.protobuf.Timestamp +} +var file_google_protobuf_timestamp_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_timestamp_proto_init() } +func file_google_protobuf_timestamp_proto_init() { + if File_google_protobuf_timestamp_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Timestamp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_timestamp_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_timestamp_proto_goTypes, + DependencyIndexes: file_google_protobuf_timestamp_proto_depIdxs, + MessageInfos: file_google_protobuf_timestamp_proto_msgTypes, + }.Build() + File_google_protobuf_timestamp_proto = out.File + file_google_protobuf_timestamp_proto_rawDesc = nil + file_google_protobuf_timestamp_proto_goTypes = nil + file_google_protobuf_timestamp_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go new file mode 100644 index 000000000..e473f826a --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -0,0 +1,760 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Wrappers for primitive (non-message) types. These types are useful +// for embedding primitives in the `google.protobuf.Any` type and for places +// where we need to distinguish between the absence of a primitive +// typed field and its default value. +// +// These wrappers have no meaningful use within repeated fields as they lack +// the ability to detect presence on individual elements. +// These wrappers have no meaningful use within a map or a oneof since +// individual entries of a map or fields of a oneof can already detect presence. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/wrappers.proto + +package wrapperspb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +type DoubleValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The double value. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// Double stores v in a new DoubleValue and returns a pointer to it. +func Double(v float64) *DoubleValue { + return &DoubleValue{Value: v} +} + +func (x *DoubleValue) Reset() { + *x = DoubleValue{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_wrappers_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DoubleValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DoubleValue) ProtoMessage() {} + +func (x *DoubleValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DoubleValue.ProtoReflect.Descriptor instead. +func (*DoubleValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{0} +} + +func (x *DoubleValue) GetValue() float64 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +type FloatValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The float value. + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// Float stores v in a new FloatValue and returns a pointer to it. +func Float(v float32) *FloatValue { + return &FloatValue{Value: v} +} + +func (x *FloatValue) Reset() { + *x = FloatValue{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_wrappers_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FloatValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FloatValue) ProtoMessage() {} + +func (x *FloatValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FloatValue.ProtoReflect.Descriptor instead. +func (*FloatValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{1} +} + +func (x *FloatValue) GetValue() float32 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +type Int64Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The int64 value. + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// Int64 stores v in a new Int64Value and returns a pointer to it. +func Int64(v int64) *Int64Value { + return &Int64Value{Value: v} +} + +func (x *Int64Value) Reset() { + *x = Int64Value{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_wrappers_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Int64Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int64Value) ProtoMessage() {} + +func (x *Int64Value) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int64Value.ProtoReflect.Descriptor instead. +func (*Int64Value) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{2} +} + +func (x *Int64Value) GetValue() int64 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +type UInt64Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The uint64 value. + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// UInt64 stores v in a new UInt64Value and returns a pointer to it. +func UInt64(v uint64) *UInt64Value { + return &UInt64Value{Value: v} +} + +func (x *UInt64Value) Reset() { + *x = UInt64Value{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_wrappers_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UInt64Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UInt64Value) ProtoMessage() {} + +func (x *UInt64Value) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UInt64Value.ProtoReflect.Descriptor instead. +func (*UInt64Value) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{3} +} + +func (x *UInt64Value) GetValue() uint64 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +type Int32Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The int32 value. + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// Int32 stores v in a new Int32Value and returns a pointer to it. +func Int32(v int32) *Int32Value { + return &Int32Value{Value: v} +} + +func (x *Int32Value) Reset() { + *x = Int32Value{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_wrappers_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Int32Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int32Value) ProtoMessage() {} + +func (x *Int32Value) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int32Value.ProtoReflect.Descriptor instead. +func (*Int32Value) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{4} +} + +func (x *Int32Value) GetValue() int32 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +type UInt32Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The uint32 value. + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// UInt32 stores v in a new UInt32Value and returns a pointer to it. +func UInt32(v uint32) *UInt32Value { + return &UInt32Value{Value: v} +} + +func (x *UInt32Value) Reset() { + *x = UInt32Value{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_wrappers_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UInt32Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UInt32Value) ProtoMessage() {} + +func (x *UInt32Value) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UInt32Value.ProtoReflect.Descriptor instead. +func (*UInt32Value) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{5} +} + +func (x *UInt32Value) GetValue() uint32 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +type BoolValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The bool value. + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// Bool stores v in a new BoolValue and returns a pointer to it. +func Bool(v bool) *BoolValue { + return &BoolValue{Value: v} +} + +func (x *BoolValue) Reset() { + *x = BoolValue{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_wrappers_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BoolValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BoolValue) ProtoMessage() {} + +func (x *BoolValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BoolValue.ProtoReflect.Descriptor instead. +func (*BoolValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{6} +} + +func (x *BoolValue) GetValue() bool { + if x != nil { + return x.Value + } + return false +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +type StringValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The string value. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// String stores v in a new StringValue and returns a pointer to it. +func String(v string) *StringValue { + return &StringValue{Value: v} +} + +func (x *StringValue) Reset() { + *x = StringValue{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_wrappers_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StringValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StringValue) ProtoMessage() {} + +func (x *StringValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StringValue.ProtoReflect.Descriptor instead. +func (*StringValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{7} +} + +func (x *StringValue) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +type BytesValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The bytes value. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// Bytes stores v in a new BytesValue and returns a pointer to it. +func Bytes(v []byte) *BytesValue { + return &BytesValue{Value: v} +} + +func (x *BytesValue) Reset() { + *x = BytesValue{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_wrappers_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BytesValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BytesValue) ProtoMessage() {} + +func (x *BytesValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BytesValue.ProtoReflect.Descriptor instead. +func (*BytesValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{8} +} + +func (x *BytesValue) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +var File_google_protobuf_wrappers_proto protoreflect.FileDescriptor + +var file_google_protobuf_wrappers_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x22, 0x23, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x02, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e, + 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, + 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x21, 0x0a, 0x09, + 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x23, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x42, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x83, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x42, 0x0d, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, + 0x72, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_wrappers_proto_rawDescOnce sync.Once + file_google_protobuf_wrappers_proto_rawDescData = file_google_protobuf_wrappers_proto_rawDesc +) + +func file_google_protobuf_wrappers_proto_rawDescGZIP() []byte { + file_google_protobuf_wrappers_proto_rawDescOnce.Do(func() { + file_google_protobuf_wrappers_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_wrappers_proto_rawDescData) + }) + return file_google_protobuf_wrappers_proto_rawDescData +} + +var file_google_protobuf_wrappers_proto_msgTypes = make([]protoimpl.MessageInfo, 9) +var file_google_protobuf_wrappers_proto_goTypes = []any{ + (*DoubleValue)(nil), // 0: google.protobuf.DoubleValue + (*FloatValue)(nil), // 1: google.protobuf.FloatValue + (*Int64Value)(nil), // 2: google.protobuf.Int64Value + (*UInt64Value)(nil), // 3: google.protobuf.UInt64Value + (*Int32Value)(nil), // 4: google.protobuf.Int32Value + (*UInt32Value)(nil), // 5: google.protobuf.UInt32Value + (*BoolValue)(nil), // 6: google.protobuf.BoolValue + (*StringValue)(nil), // 7: google.protobuf.StringValue + (*BytesValue)(nil), // 8: google.protobuf.BytesValue +} +var file_google_protobuf_wrappers_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_wrappers_proto_init() } +func file_google_protobuf_wrappers_proto_init() { + if File_google_protobuf_wrappers_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*DoubleValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*FloatValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*Int64Value); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*UInt64Value); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*Int32Value); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*UInt32Value); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*BoolValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*StringValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*BytesValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_wrappers_proto_rawDesc, + NumEnums: 0, + NumMessages: 9, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_wrappers_proto_goTypes, + DependencyIndexes: file_google_protobuf_wrappers_proto_depIdxs, + MessageInfos: file_google_protobuf_wrappers_proto_msgTypes, + }.Build() + File_google_protobuf_wrappers_proto = out.File + file_google_protobuf_wrappers_proto_rawDesc = nil + file_google_protobuf_wrappers_proto_goTypes = nil + file_google_protobuf_wrappers_proto_depIdxs = nil +} diff --git a/vendor/gopkg.in/ini.v1/.editorconfig b/vendor/gopkg.in/ini.v1/.editorconfig new file mode 100644 index 000000000..4a2d9180f --- /dev/null +++ b/vendor/gopkg.in/ini.v1/.editorconfig @@ -0,0 +1,12 @@ +# http://editorconfig.org + +root = true + +[*] +charset = utf-8 +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true + +[*_test.go] +trim_trailing_whitespace = false diff --git a/vendor/gopkg.in/ini.v1/.gitignore b/vendor/gopkg.in/ini.v1/.gitignore new file mode 100644 index 000000000..588388bda --- /dev/null +++ b/vendor/gopkg.in/ini.v1/.gitignore @@ -0,0 +1,7 @@ +testdata/conf_out.ini +ini.sublime-project +ini.sublime-workspace +testdata/conf_reflect.ini +.idea +/.vscode +.DS_Store diff --git a/vendor/gopkg.in/ini.v1/.golangci.yml b/vendor/gopkg.in/ini.v1/.golangci.yml new file mode 100644 index 000000000..631e36925 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/.golangci.yml @@ -0,0 +1,27 @@ +linters-settings: + staticcheck: + checks: [ + "all", + "-SA1019" # There are valid use cases of strings.Title + ] + nakedret: + max-func-lines: 0 # Disallow any unnamed return statement + +linters: + enable: + - deadcode + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - structcheck + - typecheck + - unused + - varcheck + - nakedret + - gofmt + - rowserrcheck + - unconvert + - goimports + - unparam diff --git a/vendor/gopkg.in/ini.v1/LICENSE b/vendor/gopkg.in/ini.v1/LICENSE new file mode 100644 index 000000000..d361bbcdf --- /dev/null +++ b/vendor/gopkg.in/ini.v1/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright 2014 Unknwon + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/gopkg.in/ini.v1/Makefile b/vendor/gopkg.in/ini.v1/Makefile new file mode 100644 index 000000000..f3b0dae2d --- /dev/null +++ b/vendor/gopkg.in/ini.v1/Makefile @@ -0,0 +1,15 @@ +.PHONY: build test bench vet coverage + +build: vet bench + +test: + go test -v -cover -race + +bench: + go test -v -cover -test.bench=. -test.benchmem + +vet: + go vet + +coverage: + go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out diff --git a/vendor/gopkg.in/ini.v1/README.md b/vendor/gopkg.in/ini.v1/README.md new file mode 100644 index 000000000..30606d970 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/README.md @@ -0,0 +1,43 @@ +# INI + +[![GitHub Workflow Status](https://img.shields.io/github/checks-status/go-ini/ini/main?logo=github&style=for-the-badge)](https://github.com/go-ini/ini/actions?query=branch%3Amain) +[![codecov](https://img.shields.io/codecov/c/github/go-ini/ini/master?logo=codecov&style=for-the-badge)](https://codecov.io/gh/go-ini/ini) +[![GoDoc](https://img.shields.io/badge/GoDoc-Reference-blue?style=for-the-badge&logo=go)](https://pkg.go.dev/github.com/go-ini/ini?tab=doc) +[![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg?style=for-the-badge&logo=sourcegraph)](https://sourcegraph.com/github.com/go-ini/ini) + +![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) + +Package ini provides INI file read and write functionality in Go. + +## Features + +- Load from multiple data sources(file, `[]byte`, `io.Reader` and `io.ReadCloser`) with overwrites. +- Read with recursion values. +- Read with parent-child sections. +- Read with auto-increment key names. +- Read with multiple-line values. +- Read with tons of helper methods. +- Read and convert values to Go types. +- Read and **WRITE** comments of sections and keys. +- Manipulate sections, keys and comments with ease. +- Keep sections and keys in order as you parse and save. + +## Installation + +The minimum requirement of Go is **1.13**. + +```sh +$ go get gopkg.in/ini.v1 +``` + +Please add `-u` flag to update in the future. + +## Getting Help + +- [Getting Started](https://ini.unknwon.io/docs/intro/getting_started) +- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) +- 中国大陆镜像:https://ini.unknwon.cn + +## License + +This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/gopkg.in/ini.v1/codecov.yml b/vendor/gopkg.in/ini.v1/codecov.yml new file mode 100644 index 000000000..e02ec84bc --- /dev/null +++ b/vendor/gopkg.in/ini.v1/codecov.yml @@ -0,0 +1,16 @@ +coverage: + range: "60...95" + status: + project: + default: + threshold: 1% + informational: true + patch: + defualt: + only_pulls: true + informational: true + +comment: + layout: 'diff' + +github_checks: false diff --git a/vendor/gopkg.in/ini.v1/data_source.go b/vendor/gopkg.in/ini.v1/data_source.go new file mode 100644 index 000000000..c3a541f1d --- /dev/null +++ b/vendor/gopkg.in/ini.v1/data_source.go @@ -0,0 +1,76 @@ +// Copyright 2019 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" +) + +var ( + _ dataSource = (*sourceFile)(nil) + _ dataSource = (*sourceData)(nil) + _ dataSource = (*sourceReadCloser)(nil) +) + +// dataSource is an interface that returns object which can be read and closed. +type dataSource interface { + ReadCloser() (io.ReadCloser, error) +} + +// sourceFile represents an object that contains content on the local file system. +type sourceFile struct { + name string +} + +func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { + return os.Open(s.name) +} + +// sourceData represents an object that contains content in memory. +type sourceData struct { + data []byte +} + +func (s *sourceData) ReadCloser() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewReader(s.data)), nil +} + +// sourceReadCloser represents an input stream with Close method. +type sourceReadCloser struct { + reader io.ReadCloser +} + +func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { + return s.reader, nil +} + +func parseDataSource(source interface{}) (dataSource, error) { + switch s := source.(type) { + case string: + return sourceFile{s}, nil + case []byte: + return &sourceData{s}, nil + case io.ReadCloser: + return &sourceReadCloser{s}, nil + case io.Reader: + return &sourceReadCloser{ioutil.NopCloser(s)}, nil + default: + return nil, fmt.Errorf("error parsing data source: unknown type %q", s) + } +} diff --git a/vendor/gopkg.in/ini.v1/deprecated.go b/vendor/gopkg.in/ini.v1/deprecated.go new file mode 100644 index 000000000..48b8e66d6 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/deprecated.go @@ -0,0 +1,22 @@ +// Copyright 2019 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +var ( + // Deprecated: Use "DefaultSection" instead. + DEFAULT_SECTION = DefaultSection + // Deprecated: AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. + AllCapsUnderscore = SnackCase +) diff --git a/vendor/gopkg.in/ini.v1/error.go b/vendor/gopkg.in/ini.v1/error.go new file mode 100644 index 000000000..f66bc94b8 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/error.go @@ -0,0 +1,49 @@ +// Copyright 2016 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "fmt" +) + +// ErrDelimiterNotFound indicates the error type of no delimiter is found which there should be one. +type ErrDelimiterNotFound struct { + Line string +} + +// IsErrDelimiterNotFound returns true if the given error is an instance of ErrDelimiterNotFound. +func IsErrDelimiterNotFound(err error) bool { + _, ok := err.(ErrDelimiterNotFound) + return ok +} + +func (err ErrDelimiterNotFound) Error() string { + return fmt.Sprintf("key-value delimiter not found: %s", err.Line) +} + +// ErrEmptyKeyName indicates the error type of no key name is found which there should be one. +type ErrEmptyKeyName struct { + Line string +} + +// IsErrEmptyKeyName returns true if the given error is an instance of ErrEmptyKeyName. +func IsErrEmptyKeyName(err error) bool { + _, ok := err.(ErrEmptyKeyName) + return ok +} + +func (err ErrEmptyKeyName) Error() string { + return fmt.Sprintf("empty key name: %s", err.Line) +} diff --git a/vendor/gopkg.in/ini.v1/file.go b/vendor/gopkg.in/ini.v1/file.go new file mode 100644 index 000000000..f8b22408b --- /dev/null +++ b/vendor/gopkg.in/ini.v1/file.go @@ -0,0 +1,541 @@ +// Copyright 2017 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "sync" +) + +// File represents a combination of one or more INI files in memory. +type File struct { + options LoadOptions + dataSources []dataSource + + // Should make things safe, but sometimes doesn't matter. + BlockMode bool + lock sync.RWMutex + + // To keep data in order. + sectionList []string + // To keep track of the index of a section with same name. + // This meta list is only used with non-unique section names are allowed. + sectionIndexes []int + + // Actual data is stored here. + sections map[string][]*Section + + NameMapper + ValueMapper +} + +// newFile initializes File object with given data sources. +func newFile(dataSources []dataSource, opts LoadOptions) *File { + if len(opts.KeyValueDelimiters) == 0 { + opts.KeyValueDelimiters = "=:" + } + if len(opts.KeyValueDelimiterOnWrite) == 0 { + opts.KeyValueDelimiterOnWrite = "=" + } + if len(opts.ChildSectionDelimiter) == 0 { + opts.ChildSectionDelimiter = "." + } + + return &File{ + BlockMode: true, + dataSources: dataSources, + sections: make(map[string][]*Section), + options: opts, + } +} + +// Empty returns an empty file object. +func Empty(opts ...LoadOptions) *File { + var opt LoadOptions + if len(opts) > 0 { + opt = opts[0] + } + + // Ignore error here, we are sure our data is good. + f, _ := LoadSources(opt, []byte("")) + return f +} + +// NewSection creates a new section. +func (f *File) NewSection(name string) (*Section, error) { + if len(name) == 0 { + return nil, errors.New("empty section name") + } + + if (f.options.Insensitive || f.options.InsensitiveSections) && name != DefaultSection { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if !f.options.AllowNonUniqueSections && inSlice(name, f.sectionList) { + return f.sections[name][0], nil + } + + f.sectionList = append(f.sectionList, name) + + // NOTE: Append to indexes must happen before appending to sections, + // otherwise index will have off-by-one problem. + f.sectionIndexes = append(f.sectionIndexes, len(f.sections[name])) + + sec := newSection(f, name) + f.sections[name] = append(f.sections[name], sec) + + return sec, nil +} + +// NewRawSection creates a new section with an unparseable body. +func (f *File) NewRawSection(name, body string) (*Section, error) { + section, err := f.NewSection(name) + if err != nil { + return nil, err + } + + section.isRawSection = true + section.rawBody = body + return section, nil +} + +// NewSections creates a list of sections. +func (f *File) NewSections(names ...string) (err error) { + for _, name := range names { + if _, err = f.NewSection(name); err != nil { + return err + } + } + return nil +} + +// GetSection returns section by given name. +func (f *File) GetSection(name string) (*Section, error) { + secs, err := f.SectionsByName(name) + if err != nil { + return nil, err + } + + return secs[0], err +} + +// HasSection returns true if the file contains a section with given name. +func (f *File) HasSection(name string) bool { + section, _ := f.GetSection(name) + return section != nil +} + +// SectionsByName returns all sections with given name. +func (f *File) SectionsByName(name string) ([]*Section, error) { + if len(name) == 0 { + name = DefaultSection + } + if f.options.Insensitive || f.options.InsensitiveSections { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + secs := f.sections[name] + if len(secs) == 0 { + return nil, fmt.Errorf("section %q does not exist", name) + } + + return secs, nil +} + +// Section assumes named section exists and returns a zero-value when not. +func (f *File) Section(name string) *Section { + sec, err := f.GetSection(name) + if err != nil { + if name == "" { + name = DefaultSection + } + sec, _ = f.NewSection(name) + return sec + } + return sec +} + +// SectionWithIndex assumes named section exists and returns a new section when not. +func (f *File) SectionWithIndex(name string, index int) *Section { + secs, err := f.SectionsByName(name) + if err != nil || len(secs) <= index { + // NOTE: It's OK here because the only possible error is empty section name, + // but if it's empty, this piece of code won't be executed. + newSec, _ := f.NewSection(name) + return newSec + } + + return secs[index] +} + +// Sections returns a list of Section stored in the current instance. +func (f *File) Sections() []*Section { + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sections := make([]*Section, len(f.sectionList)) + for i, name := range f.sectionList { + sections[i] = f.sections[name][f.sectionIndexes[i]] + } + return sections +} + +// ChildSections returns a list of child sections of given section name. +func (f *File) ChildSections(name string) []*Section { + return f.Section(name).ChildSections() +} + +// SectionStrings returns list of section names. +func (f *File) SectionStrings() []string { + list := make([]string, len(f.sectionList)) + copy(list, f.sectionList) + return list +} + +// DeleteSection deletes a section or all sections with given name. +func (f *File) DeleteSection(name string) { + secs, err := f.SectionsByName(name) + if err != nil { + return + } + + for i := 0; i < len(secs); i++ { + // For non-unique sections, it is always needed to remove the first one so + // in the next iteration, the subsequent section continue having index 0. + // Ignoring the error as index 0 never returns an error. + _ = f.DeleteSectionWithIndex(name, 0) + } +} + +// DeleteSectionWithIndex deletes a section with given name and index. +func (f *File) DeleteSectionWithIndex(name string, index int) error { + if !f.options.AllowNonUniqueSections && index != 0 { + return fmt.Errorf("delete section with non-zero index is only allowed when non-unique sections is enabled") + } + + if len(name) == 0 { + name = DefaultSection + } + if f.options.Insensitive || f.options.InsensitiveSections { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + // Count occurrences of the sections + occurrences := 0 + + sectionListCopy := make([]string, len(f.sectionList)) + copy(sectionListCopy, f.sectionList) + + for i, s := range sectionListCopy { + if s != name { + continue + } + + if occurrences == index { + if len(f.sections[name]) <= 1 { + delete(f.sections, name) // The last one in the map + } else { + f.sections[name] = append(f.sections[name][:index], f.sections[name][index+1:]...) + } + + // Fix section lists + f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) + f.sectionIndexes = append(f.sectionIndexes[:i], f.sectionIndexes[i+1:]...) + + } else if occurrences > index { + // Fix the indices of all following sections with this name. + f.sectionIndexes[i-1]-- + } + + occurrences++ + } + + return nil +} + +func (f *File) reload(s dataSource) error { + r, err := s.ReadCloser() + if err != nil { + return err + } + defer r.Close() + + return f.parse(r) +} + +// Reload reloads and parses all data sources. +func (f *File) Reload() (err error) { + for _, s := range f.dataSources { + if err = f.reload(s); err != nil { + // In loose mode, we create an empty default section for nonexistent files. + if os.IsNotExist(err) && f.options.Loose { + _ = f.parse(bytes.NewBuffer(nil)) + continue + } + return err + } + if f.options.ShortCircuit { + return nil + } + } + return nil +} + +// Append appends one or more data sources and reloads automatically. +func (f *File) Append(source interface{}, others ...interface{}) error { + ds, err := parseDataSource(source) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + for _, s := range others { + ds, err = parseDataSource(s) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + } + return f.Reload() +} + +func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { + equalSign := DefaultFormatLeft + f.options.KeyValueDelimiterOnWrite + DefaultFormatRight + + if PrettyFormat || PrettyEqual { + equalSign = fmt.Sprintf(" %s ", f.options.KeyValueDelimiterOnWrite) + } + + // Use buffer to make sure target is safe until finish encoding. + buf := bytes.NewBuffer(nil) + lastSectionIdx := len(f.sectionList) - 1 + for i, sname := range f.sectionList { + sec := f.SectionWithIndex(sname, f.sectionIndexes[i]) + if len(sec.Comment) > 0 { + // Support multiline comments + lines := strings.Split(sec.Comment, LineBreak) + for i := range lines { + if lines[i][0] != '#' && lines[i][0] != ';' { + lines[i] = "; " + lines[i] + } else { + lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) + } + + if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { + return nil, err + } + } + } + + if i > 0 || DefaultHeader || (i == 0 && strings.ToUpper(sec.name) != DefaultSection) { + if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return nil, err + } + } else { + // Write nothing if default section is empty + if len(sec.keyList) == 0 { + continue + } + } + + isLastSection := i == lastSectionIdx + if sec.isRawSection { + if _, err := buf.WriteString(sec.rawBody); err != nil { + return nil, err + } + + if PrettySection && !isLastSection { + // Put a line between sections + if _, err := buf.WriteString(LineBreak); err != nil { + return nil, err + } + } + continue + } + + // Count and generate alignment length and buffer spaces using the + // longest key. Keys may be modified if they contain certain characters so + // we need to take that into account in our calculation. + alignLength := 0 + if PrettyFormat { + for _, kname := range sec.keyList { + keyLength := len(kname) + // First case will surround key by ` and second by """ + if strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters) { + keyLength += 2 + } else if strings.Contains(kname, "`") { + keyLength += 6 + } + + if keyLength > alignLength { + alignLength = keyLength + } + } + } + alignSpaces := bytes.Repeat([]byte(" "), alignLength) + + KeyList: + for _, kname := range sec.keyList { + key := sec.Key(kname) + if len(key.Comment) > 0 { + if len(indent) > 0 && sname != DefaultSection { + buf.WriteString(indent) + } + + // Support multiline comments + lines := strings.Split(key.Comment, LineBreak) + for i := range lines { + if lines[i][0] != '#' && lines[i][0] != ';' { + lines[i] = "; " + strings.TrimSpace(lines[i]) + } else { + lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) + } + + if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { + return nil, err + } + } + } + + if len(indent) > 0 && sname != DefaultSection { + buf.WriteString(indent) + } + + switch { + case key.isAutoIncrement: + kname = "-" + case strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters): + kname = "`" + kname + "`" + case strings.Contains(kname, "`"): + kname = `"""` + kname + `"""` + } + + writeKeyValue := func(val string) (bool, error) { + if _, err := buf.WriteString(kname); err != nil { + return false, err + } + + if key.isBooleanType { + buf.WriteString(LineBreak) + return true, nil + } + + // Write out alignment spaces before "=" sign + if PrettyFormat { + buf.Write(alignSpaces[:alignLength-len(kname)]) + } + + // In case key value contains "\n", "`", "\"", "#" or ";" + if strings.ContainsAny(val, "\n`") { + val = `"""` + val + `"""` + } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") { + val = "`" + val + "`" + } else if len(strings.TrimSpace(val)) != len(val) { + val = `"` + val + `"` + } + if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil { + return false, err + } + return false, nil + } + + shadows := key.ValueWithShadows() + if len(shadows) == 0 { + if _, err := writeKeyValue(""); err != nil { + return nil, err + } + } + + for _, val := range shadows { + exitLoop, err := writeKeyValue(val) + if err != nil { + return nil, err + } else if exitLoop { + continue KeyList + } + } + + for _, val := range key.nestedValues { + if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil { + return nil, err + } + } + } + + if PrettySection && !isLastSection { + // Put a line between sections + if _, err := buf.WriteString(LineBreak); err != nil { + return nil, err + } + } + } + + return buf, nil +} + +// WriteToIndent writes content into io.Writer with given indention. +// If PrettyFormat has been set to be true, +// it will align "=" sign with spaces under each section. +func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) { + buf, err := f.writeToBuffer(indent) + if err != nil { + return 0, err + } + return buf.WriteTo(w) +} + +// WriteTo writes file content into io.Writer. +func (f *File) WriteTo(w io.Writer) (int64, error) { + return f.WriteToIndent(w, "") +} + +// SaveToIndent writes content to file system with given value indention. +func (f *File) SaveToIndent(filename, indent string) error { + // Note: Because we are truncating with os.Create, + // so it's safer to save to a temporary file location and rename after done. + buf, err := f.writeToBuffer(indent) + if err != nil { + return err + } + + return ioutil.WriteFile(filename, buf.Bytes(), 0666) +} + +// SaveTo writes content to file system. +func (f *File) SaveTo(filename string) error { + return f.SaveToIndent(filename, "") +} diff --git a/vendor/gopkg.in/ini.v1/helper.go b/vendor/gopkg.in/ini.v1/helper.go new file mode 100644 index 000000000..f9d80a682 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/helper.go @@ -0,0 +1,24 @@ +// Copyright 2019 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +func inSlice(str string, s []string) bool { + for _, v := range s { + if str == v { + return true + } + } + return false +} diff --git a/vendor/gopkg.in/ini.v1/ini.go b/vendor/gopkg.in/ini.v1/ini.go new file mode 100644 index 000000000..99e7f8651 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/ini.go @@ -0,0 +1,176 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package ini provides INI file read and write functionality in Go. +package ini + +import ( + "os" + "regexp" + "runtime" + "strings" +) + +const ( + // Maximum allowed depth when recursively substituing variable names. + depthValues = 99 +) + +var ( + // DefaultSection is the name of default section. You can use this var or the string literal. + // In most of cases, an empty string is all you need to access the section. + DefaultSection = "DEFAULT" + + // LineBreak is the delimiter to determine or compose a new line. + // This variable will be changed to "\r\n" automatically on Windows at package init time. + LineBreak = "\n" + + // Variable regexp pattern: %(variable)s + varPattern = regexp.MustCompile(`%\(([^)]+)\)s`) + + // DefaultHeader explicitly writes default section header. + DefaultHeader = false + + // PrettySection indicates whether to put a line between sections. + PrettySection = true + // PrettyFormat indicates whether to align "=" sign with spaces to produce pretty output + // or reduce all possible spaces for compact format. + PrettyFormat = true + // PrettyEqual places spaces around "=" sign even when PrettyFormat is false. + PrettyEqual = false + // DefaultFormatLeft places custom spaces on the left when PrettyFormat and PrettyEqual are both disabled. + DefaultFormatLeft = "" + // DefaultFormatRight places custom spaces on the right when PrettyFormat and PrettyEqual are both disabled. + DefaultFormatRight = "" +) + +var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test") + +func init() { + if runtime.GOOS == "windows" && !inTest { + LineBreak = "\r\n" + } +} + +// LoadOptions contains all customized options used for load data source(s). +type LoadOptions struct { + // Loose indicates whether the parser should ignore nonexistent files or return error. + Loose bool + // Insensitive indicates whether the parser forces all section and key names to lowercase. + Insensitive bool + // InsensitiveSections indicates whether the parser forces all section to lowercase. + InsensitiveSections bool + // InsensitiveKeys indicates whether the parser forces all key names to lowercase. + InsensitiveKeys bool + // IgnoreContinuation indicates whether to ignore continuation lines while parsing. + IgnoreContinuation bool + // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. + IgnoreInlineComment bool + // SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs. + SkipUnrecognizableLines bool + // ShortCircuit indicates whether to ignore other configuration sources after loaded the first available configuration source. + ShortCircuit bool + // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. + // This type of keys are mostly used in my.cnf. + AllowBooleanKeys bool + // AllowShadows indicates whether to keep track of keys with same name under same section. + AllowShadows bool + // AllowNestedValues indicates whether to allow AWS-like nested values. + // Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values + AllowNestedValues bool + // AllowPythonMultilineValues indicates whether to allow Python-like multi-line values. + // Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure + // Relevant quote: Values can also span multiple lines, as long as they are indented deeper + // than the first line of the value. + AllowPythonMultilineValues bool + // SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value. + // Docs: https://docs.python.org/2/library/configparser.html + // Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names. + // In the latter case, they need to be preceded by a whitespace character to be recognized as a comment. + SpaceBeforeInlineComment bool + // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format + // when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value" + UnescapeValueDoubleQuotes bool + // UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format + // when value is NOT surrounded by any quotes. + // Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all. + UnescapeValueCommentSymbols bool + // UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise + // conform to key/value pairs. Specify the names of those blocks here. + UnparseableSections []string + // KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:". + KeyValueDelimiters string + // KeyValueDelimiterOnWrite is the delimiter that are used to separate key and value output. By default, it is "=". + KeyValueDelimiterOnWrite string + // ChildSectionDelimiter is the delimiter that is used to separate child sections. By default, it is ".". + ChildSectionDelimiter string + // PreserveSurroundedQuote indicates whether to preserve surrounded quote (single and double quotes). + PreserveSurroundedQuote bool + // DebugFunc is called to collect debug information (currently only useful to debug parsing Python-style multiline values). + DebugFunc DebugFunc + // ReaderBufferSize is the buffer size of the reader in bytes. + ReaderBufferSize int + // AllowNonUniqueSections indicates whether to allow sections with the same name multiple times. + AllowNonUniqueSections bool + // AllowDuplicateShadowValues indicates whether values for shadowed keys should be deduplicated. + AllowDuplicateShadowValues bool +} + +// DebugFunc is the type of function called to log parse events. +type DebugFunc func(message string) + +// LoadSources allows caller to apply customized options for loading from data source(s). +func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { + sources := make([]dataSource, len(others)+1) + sources[0], err = parseDataSource(source) + if err != nil { + return nil, err + } + for i := range others { + sources[i+1], err = parseDataSource(others[i]) + if err != nil { + return nil, err + } + } + f := newFile(sources, opts) + if err = f.Reload(); err != nil { + return nil, err + } + return f, nil +} + +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +// It will return error if list contains nonexistent files. +func Load(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{}, source, others...) +} + +// LooseLoad has exactly same functionality as Load function +// except it ignores nonexistent files instead of returning error. +func LooseLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Loose: true}, source, others...) +} + +// InsensitiveLoad has exactly same functionality as Load function +// except it forces all section and key names to be lowercased. +func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Insensitive: true}, source, others...) +} + +// ShadowLoad has exactly same functionality as Load function +// except it allows have shadow keys. +func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{AllowShadows: true}, source, others...) +} diff --git a/vendor/gopkg.in/ini.v1/key.go b/vendor/gopkg.in/ini.v1/key.go new file mode 100644 index 000000000..a19d9f38e --- /dev/null +++ b/vendor/gopkg.in/ini.v1/key.go @@ -0,0 +1,837 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" + "time" +) + +// Key represents a key under a section. +type Key struct { + s *Section + Comment string + name string + value string + isAutoIncrement bool + isBooleanType bool + + isShadow bool + shadows []*Key + + nestedValues []string +} + +// newKey simply return a key object with given values. +func newKey(s *Section, name, val string) *Key { + return &Key{ + s: s, + name: name, + value: val, + } +} + +func (k *Key) addShadow(val string) error { + if k.isShadow { + return errors.New("cannot add shadow to another shadow key") + } else if k.isAutoIncrement || k.isBooleanType { + return errors.New("cannot add shadow to auto-increment or boolean key") + } + + if !k.s.f.options.AllowDuplicateShadowValues { + // Deduplicate shadows based on their values. + if k.value == val { + return nil + } + for i := range k.shadows { + if k.shadows[i].value == val { + return nil + } + } + } + + shadow := newKey(k.s, k.name, val) + shadow.isShadow = true + k.shadows = append(k.shadows, shadow) + return nil +} + +// AddShadow adds a new shadow key to itself. +func (k *Key) AddShadow(val string) error { + if !k.s.f.options.AllowShadows { + return errors.New("shadow key is not allowed") + } + return k.addShadow(val) +} + +func (k *Key) addNestedValue(val string) error { + if k.isAutoIncrement || k.isBooleanType { + return errors.New("cannot add nested value to auto-increment or boolean key") + } + + k.nestedValues = append(k.nestedValues, val) + return nil +} + +// AddNestedValue adds a nested value to the key. +func (k *Key) AddNestedValue(val string) error { + if !k.s.f.options.AllowNestedValues { + return errors.New("nested value is not allowed") + } + return k.addNestedValue(val) +} + +// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv +type ValueMapper func(string) string + +// Name returns name of key. +func (k *Key) Name() string { + return k.name +} + +// Value returns raw value of key for performance purpose. +func (k *Key) Value() string { + return k.value +} + +// ValueWithShadows returns raw values of key and its shadows if any. Shadow +// keys with empty values are ignored from the returned list. +func (k *Key) ValueWithShadows() []string { + if len(k.shadows) == 0 { + if k.value == "" { + return []string{} + } + return []string{k.value} + } + + vals := make([]string, 0, len(k.shadows)+1) + if k.value != "" { + vals = append(vals, k.value) + } + for _, s := range k.shadows { + if s.value != "" { + vals = append(vals, s.value) + } + } + return vals +} + +// NestedValues returns nested values stored in the key. +// It is possible returned value is nil if no nested values stored in the key. +func (k *Key) NestedValues() []string { + return k.nestedValues +} + +// transformValue takes a raw value and transforms to its final string. +func (k *Key) transformValue(val string) string { + if k.s.f.ValueMapper != nil { + val = k.s.f.ValueMapper(val) + } + + // Fail-fast if no indicate char found for recursive value + if !strings.Contains(val, "%") { + return val + } + for i := 0; i < depthValues; i++ { + vr := varPattern.FindString(val) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := vr[2 : len(vr)-2] + + // Search in the same section. + // If not found or found the key itself, then search again in default section. + nk, err := k.s.GetKey(noption) + if err != nil || k == nk { + nk, _ = k.s.f.Section("").GetKey(noption) + if nk == nil { + // Stop when no results found in the default section, + // and returns the value as-is. + break + } + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + val = strings.Replace(val, vr, nk.value, -1) + } + return val +} + +// String returns string representation of value. +func (k *Key) String() string { + return k.transformValue(k.value) +} + +// Validate accepts a validate function which can +// return modifed result as key value. +func (k *Key) Validate(fn func(string) string) string { + return fn(k.String()) +} + +// parseBool returns the boolean value represented by the string. +// +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, +// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. +// Any other value returns an error. +func parseBool(str string) (value bool, err error) { + switch str { + case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": + return true, nil + case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": + return false, nil + } + return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) +} + +// Bool returns bool type value. +func (k *Key) Bool() (bool, error) { + return parseBool(k.String()) +} + +// Float64 returns float64 type value. +func (k *Key) Float64() (float64, error) { + return strconv.ParseFloat(k.String(), 64) +} + +// Int returns int type value. +func (k *Key) Int() (int, error) { + v, err := strconv.ParseInt(k.String(), 0, 64) + return int(v), err +} + +// Int64 returns int64 type value. +func (k *Key) Int64() (int64, error) { + return strconv.ParseInt(k.String(), 0, 64) +} + +// Uint returns uint type valued. +func (k *Key) Uint() (uint, error) { + u, e := strconv.ParseUint(k.String(), 0, 64) + return uint(u), e +} + +// Uint64 returns uint64 type value. +func (k *Key) Uint64() (uint64, error) { + return strconv.ParseUint(k.String(), 0, 64) +} + +// Duration returns time.Duration type value. +func (k *Key) Duration() (time.Duration, error) { + return time.ParseDuration(k.String()) +} + +// TimeFormat parses with given format and returns time.Time type value. +func (k *Key) TimeFormat(format string) (time.Time, error) { + return time.Parse(format, k.String()) +} + +// Time parses with RFC3339 format and returns time.Time type value. +func (k *Key) Time() (time.Time, error) { + return k.TimeFormat(time.RFC3339) +} + +// MustString returns default value if key value is empty. +func (k *Key) MustString(defaultVal string) string { + val := k.String() + if len(val) == 0 { + k.value = defaultVal + return defaultVal + } + return val +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (k *Key) MustBool(defaultVal ...bool) bool { + val, err := k.Bool() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatBool(defaultVal[0]) + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (k *Key) MustFloat64(defaultVal ...float64) float64 { + val, err := k.Float64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) + return defaultVal[0] + } + return val +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt(defaultVal ...int) int { + val, err := k.Int() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(int64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt64(defaultVal ...int64) int64 { + val, err := k.Int64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustUint always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint(defaultVal ...uint) uint { + val, err := k.Uint() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustUint64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint64(defaultVal ...uint64) uint64 { + val, err := k.Uint64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustDuration always returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { + val, err := k.Duration() + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].String() + return defaultVal[0] + } + return val +} + +// MustTimeFormat always parses with given format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { + val, err := k.TimeFormat(format) + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].Format(format) + return defaultVal[0] + } + return val +} + +// MustTime always parses with RFC3339 format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTime(defaultVal ...time.Time) time.Time { + return k.MustTimeFormat(time.RFC3339, defaultVal...) +} + +// In always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) In(defaultVal string, candidates []string) string { + val := k.String() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InFloat64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { + val := k.MustFloat64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt(defaultVal int, candidates []int) int { + val := k.MustInt() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { + val := k.MustInt64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint(defaultVal uint, candidates []uint) uint { + val := k.MustUint() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { + val := k.MustUint64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTimeFormat always parses with given format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { + val := k.MustTimeFormat(format) + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTime always parses with RFC3339 format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { + return k.InTimeFormat(time.RFC3339, defaultVal, candidates) +} + +// RangeFloat64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { + val := k.MustFloat64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt(defaultVal, min, max int) int { + val := k.MustInt() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { + val := k.MustInt64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeTimeFormat checks if value with given format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { + val := k.MustTimeFormat(format) + if val.Unix() < min.Unix() || val.Unix() > max.Unix() { + return defaultVal + } + return val +} + +// RangeTime checks if value with RFC3339 format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { + return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) +} + +// Strings returns list of string divided by given delimiter. +func (k *Key) Strings(delim string) []string { + str := k.String() + if len(str) == 0 { + return []string{} + } + + runes := []rune(str) + vals := make([]string, 0, 2) + var buf bytes.Buffer + escape := false + idx := 0 + for { + if escape { + escape = false + if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) { + buf.WriteRune('\\') + } + buf.WriteRune(runes[idx]) + } else { + if runes[idx] == '\\' { + escape = true + } else if strings.HasPrefix(string(runes[idx:]), delim) { + idx += len(delim) - 1 + vals = append(vals, strings.TrimSpace(buf.String())) + buf.Reset() + } else { + buf.WriteRune(runes[idx]) + } + } + idx++ + if idx == len(runes) { + break + } + } + + if buf.Len() > 0 { + vals = append(vals, strings.TrimSpace(buf.String())) + } + + return vals +} + +// StringsWithShadows returns list of string divided by given delimiter. +// Shadows will also be appended if any. +func (k *Key) StringsWithShadows(delim string) []string { + vals := k.ValueWithShadows() + results := make([]string, 0, len(vals)*2) + for i := range vals { + if len(vals) == 0 { + continue + } + + results = append(results, strings.Split(vals[i], delim)...) + } + + for i := range results { + results[i] = k.transformValue(strings.TrimSpace(results[i])) + } + return results +} + +// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Float64s(delim string) []float64 { + vals, _ := k.parseFloat64s(k.Strings(delim), true, false) + return vals +} + +// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Ints(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), true, false) + return vals +} + +// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Int64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), true, false) + return vals +} + +// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uints(delim string) []uint { + vals, _ := k.parseUints(k.Strings(delim), true, false) + return vals +} + +// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uint64s(delim string) []uint64 { + vals, _ := k.parseUint64s(k.Strings(delim), true, false) + return vals +} + +// Bools returns list of bool divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Bools(delim string) []bool { + vals, _ := k.parseBools(k.Strings(delim), true, false) + return vals +} + +// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) TimesFormat(format, delim string) []time.Time { + vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false) + return vals +} + +// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) Times(delim string) []time.Time { + return k.TimesFormat(time.RFC3339, delim) +} + +// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then +// it will not be included to result list. +func (k *Key) ValidFloat64s(delim string) []float64 { + vals, _ := k.parseFloat64s(k.Strings(delim), false, false) + return vals +} + +// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will +// not be included to result list. +func (k *Key) ValidInts(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), false, false) + return vals +} + +// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, +// then it will not be included to result list. +func (k *Key) ValidInt64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), false, false) + return vals +} + +// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, +// then it will not be included to result list. +func (k *Key) ValidUints(delim string) []uint { + vals, _ := k.parseUints(k.Strings(delim), false, false) + return vals +} + +// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned +// integer, then it will not be included to result list. +func (k *Key) ValidUint64s(delim string) []uint64 { + vals, _ := k.parseUint64s(k.Strings(delim), false, false) + return vals +} + +// ValidBools returns list of bool divided by given delimiter. If some value is not 64-bit unsigned +// integer, then it will not be included to result list. +func (k *Key) ValidBools(delim string) []bool { + vals, _ := k.parseBools(k.Strings(delim), false, false) + return vals +} + +// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimesFormat(format, delim string) []time.Time { + vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false) + return vals +} + +// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimes(delim string) []time.Time { + return k.ValidTimesFormat(time.RFC3339, delim) +} + +// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictFloat64s(delim string) ([]float64, error) { + return k.parseFloat64s(k.Strings(delim), false, true) +} + +// StrictInts returns list of int divided by given delimiter or error on first invalid input. +func (k *Key) StrictInts(delim string) ([]int, error) { + return k.parseInts(k.Strings(delim), false, true) +} + +// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictInt64s(delim string) ([]int64, error) { + return k.parseInt64s(k.Strings(delim), false, true) +} + +// StrictUints returns list of uint divided by given delimiter or error on first invalid input. +func (k *Key) StrictUints(delim string) ([]uint, error) { + return k.parseUints(k.Strings(delim), false, true) +} + +// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictUint64s(delim string) ([]uint64, error) { + return k.parseUint64s(k.Strings(delim), false, true) +} + +// StrictBools returns list of bool divided by given delimiter or error on first invalid input. +func (k *Key) StrictBools(delim string) ([]bool, error) { + return k.parseBools(k.Strings(delim), false, true) +} + +// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { + return k.parseTimesFormat(format, k.Strings(delim), false, true) +} + +// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimes(delim string) ([]time.Time, error) { + return k.StrictTimesFormat(time.RFC3339, delim) +} + +// parseBools transforms strings to bools. +func (k *Key) parseBools(strs []string, addInvalid, returnOnInvalid bool) ([]bool, error) { + vals := make([]bool, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := parseBool(str) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(bool)) + } + } + return vals, err +} + +// parseFloat64s transforms strings to float64s. +func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) { + vals := make([]float64, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseFloat(str, 64) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(float64)) + } + } + return vals, err +} + +// parseInts transforms strings to ints. +func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) { + vals := make([]int, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseInt(str, 0, 64) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, int(val.(int64))) + } + } + return vals, err +} + +// parseInt64s transforms strings to int64s. +func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) { + vals := make([]int64, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseInt(str, 0, 64) + return val, err + } + + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(int64)) + } + } + return vals, err +} + +// parseUints transforms strings to uints. +func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) { + vals := make([]uint, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseUint(str, 0, 64) + return val, err + } + + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, uint(val.(uint64))) + } + } + return vals, err +} + +// parseUint64s transforms strings to uint64s. +func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) { + vals := make([]uint64, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseUint(str, 0, 64) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(uint64)) + } + } + return vals, err +} + +type Parser func(str string) (interface{}, error) + +// parseTimesFormat transforms strings to times in given format. +func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { + vals := make([]time.Time, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := time.Parse(format, str) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(time.Time)) + } + } + return vals, err +} + +// doParse transforms strings to different types +func (k *Key) doParse(strs []string, addInvalid, returnOnInvalid bool, parser Parser) ([]interface{}, error) { + vals := make([]interface{}, 0, len(strs)) + for _, str := range strs { + val, err := parser(str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// SetValue changes key value. +func (k *Key) SetValue(v string) { + if k.s.f.BlockMode { + k.s.f.lock.Lock() + defer k.s.f.lock.Unlock() + } + + k.value = v + k.s.keysHash[k.name] = v +} diff --git a/vendor/gopkg.in/ini.v1/parser.go b/vendor/gopkg.in/ini.v1/parser.go new file mode 100644 index 000000000..44fc526c2 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/parser.go @@ -0,0 +1,520 @@ +// Copyright 2015 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bufio" + "bytes" + "fmt" + "io" + "regexp" + "strconv" + "strings" + "unicode" +) + +const minReaderBufferSize = 4096 + +var pythonMultiline = regexp.MustCompile(`^([\t\f ]+)(.*)`) + +type parserOptions struct { + IgnoreContinuation bool + IgnoreInlineComment bool + AllowPythonMultilineValues bool + SpaceBeforeInlineComment bool + UnescapeValueDoubleQuotes bool + UnescapeValueCommentSymbols bool + PreserveSurroundedQuote bool + DebugFunc DebugFunc + ReaderBufferSize int +} + +type parser struct { + buf *bufio.Reader + options parserOptions + + isEOF bool + count int + comment *bytes.Buffer +} + +func (p *parser) debug(format string, args ...interface{}) { + if p.options.DebugFunc != nil { + p.options.DebugFunc(fmt.Sprintf(format, args...)) + } +} + +func newParser(r io.Reader, opts parserOptions) *parser { + size := opts.ReaderBufferSize + if size < minReaderBufferSize { + size = minReaderBufferSize + } + + return &parser{ + buf: bufio.NewReaderSize(r, size), + options: opts, + count: 1, + comment: &bytes.Buffer{}, + } +} + +// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. +// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding +func (p *parser) BOM() error { + mask, err := p.buf.Peek(2) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 2 { + return nil + } + + switch { + case mask[0] == 254 && mask[1] == 255: + fallthrough + case mask[0] == 255 && mask[1] == 254: + _, err = p.buf.Read(mask) + if err != nil { + return err + } + case mask[0] == 239 && mask[1] == 187: + mask, err := p.buf.Peek(3) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 3 { + return nil + } + if mask[2] == 191 { + _, err = p.buf.Read(mask) + if err != nil { + return err + } + } + } + return nil +} + +func (p *parser) readUntil(delim byte) ([]byte, error) { + data, err := p.buf.ReadBytes(delim) + if err != nil { + if err == io.EOF { + p.isEOF = true + } else { + return nil, err + } + } + return data, nil +} + +func cleanComment(in []byte) ([]byte, bool) { + i := bytes.IndexAny(in, "#;") + if i == -1 { + return nil, false + } + return in[i:], true +} + +func readKeyName(delimiters string, in []byte) (string, int, error) { + line := string(in) + + // Check if key name surrounded by quotes. + var keyQuote string + if line[0] == '"' { + if len(line) > 6 && line[0:3] == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + + // Get out key name + var endIdx int + if len(keyQuote) > 0 { + startIdx := len(keyQuote) + // FIXME: fail case -> """"""name"""=value + pos := strings.Index(line[startIdx:], keyQuote) + if pos == -1 { + return "", -1, fmt.Errorf("missing closing key quote: %s", line) + } + pos += startIdx + + // Find key-value delimiter + i := strings.IndexAny(line[pos+startIdx:], delimiters) + if i < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + endIdx = pos + i + return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil + } + + endIdx = strings.IndexAny(line, delimiters) + if endIdx < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + if endIdx == 0 { + return "", -1, ErrEmptyKeyName{line} + } + + return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil +} + +func (p *parser) readMultilines(line, val, valQuote string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := string(data) + + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + + comment, has := cleanComment([]byte(next[pos:])) + if has { + p.comment.Write(bytes.TrimSpace(comment)) + } + break + } + val += next + if p.isEOF { + return "", fmt.Errorf("missing closing key quote from %q to %q", line, next) + } + } + return val, nil +} + +func (p *parser) readContinuationLines(val string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := strings.TrimSpace(string(data)) + + if len(next) == 0 { + break + } + val += next + if val[len(val)-1] != '\\' { + break + } + val = val[:len(val)-1] + } + return val, nil +} + +// hasSurroundedQuote check if and only if the first and last characters +// are quotes \" or \'. +// It returns false if any other parts also contain same kind of quotes. +func hasSurroundedQuote(in string, quote byte) bool { + return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote && + strings.IndexByte(in[1:], quote) == len(in)-2 +} + +func (p *parser) readValue(in []byte, bufferSize int) (string, error) { + + line := strings.TrimLeftFunc(string(in), unicode.IsSpace) + if len(line) == 0 { + if p.options.AllowPythonMultilineValues && len(in) > 0 && in[len(in)-1] == '\n' { + return p.readPythonMultilines(line, bufferSize) + } + return "", nil + } + + var valQuote string + if len(line) > 3 && line[0:3] == `"""` { + valQuote = `"""` + } else if line[0] == '`' { + valQuote = "`" + } else if p.options.UnescapeValueDoubleQuotes && line[0] == '"' { + valQuote = `"` + } + + if len(valQuote) > 0 { + startIdx := len(valQuote) + pos := strings.LastIndex(line[startIdx:], valQuote) + // Check for multi-line value + if pos == -1 { + return p.readMultilines(line, line[startIdx:], valQuote) + } + + if p.options.UnescapeValueDoubleQuotes && valQuote == `"` { + return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil + } + return line[startIdx : pos+startIdx], nil + } + + lastChar := line[len(line)-1] + // Won't be able to reach here if value only contains whitespace + line = strings.TrimSpace(line) + trimmedLastChar := line[len(line)-1] + + // Check continuation lines when desired + if !p.options.IgnoreContinuation && trimmedLastChar == '\\' { + return p.readContinuationLines(line[:len(line)-1]) + } + + // Check if ignore inline comment + if !p.options.IgnoreInlineComment { + var i int + if p.options.SpaceBeforeInlineComment { + i = strings.Index(line, " #") + if i == -1 { + i = strings.Index(line, " ;") + } + + } else { + i = strings.IndexAny(line, "#;") + } + + if i > -1 { + p.comment.WriteString(line[i:]) + line = strings.TrimSpace(line[:i]) + } + + } + + // Trim single and double quotes + if (hasSurroundedQuote(line, '\'') || + hasSurroundedQuote(line, '"')) && !p.options.PreserveSurroundedQuote { + line = line[1 : len(line)-1] + } else if len(valQuote) == 0 && p.options.UnescapeValueCommentSymbols { + line = strings.ReplaceAll(line, `\;`, ";") + line = strings.ReplaceAll(line, `\#`, "#") + } else if p.options.AllowPythonMultilineValues && lastChar == '\n' { + return p.readPythonMultilines(line, bufferSize) + } + + return line, nil +} + +func (p *parser) readPythonMultilines(line string, bufferSize int) (string, error) { + parserBufferPeekResult, _ := p.buf.Peek(bufferSize) + peekBuffer := bytes.NewBuffer(parserBufferPeekResult) + + for { + peekData, peekErr := peekBuffer.ReadBytes('\n') + if peekErr != nil && peekErr != io.EOF { + p.debug("readPythonMultilines: failed to peek with error: %v", peekErr) + return "", peekErr + } + + p.debug("readPythonMultilines: parsing %q", string(peekData)) + + peekMatches := pythonMultiline.FindStringSubmatch(string(peekData)) + p.debug("readPythonMultilines: matched %d parts", len(peekMatches)) + for n, v := range peekMatches { + p.debug(" %d: %q", n, v) + } + + // Return if not a Python multiline value. + if len(peekMatches) != 3 { + p.debug("readPythonMultilines: end of value, got: %q", line) + return line, nil + } + + // Advance the parser reader (buffer) in-sync with the peek buffer. + _, err := p.buf.Discard(len(peekData)) + if err != nil { + p.debug("readPythonMultilines: failed to skip to the end, returning error") + return "", err + } + + line += "\n" + peekMatches[0] + } +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) (err error) { + p := newParser(reader, parserOptions{ + IgnoreContinuation: f.options.IgnoreContinuation, + IgnoreInlineComment: f.options.IgnoreInlineComment, + AllowPythonMultilineValues: f.options.AllowPythonMultilineValues, + SpaceBeforeInlineComment: f.options.SpaceBeforeInlineComment, + UnescapeValueDoubleQuotes: f.options.UnescapeValueDoubleQuotes, + UnescapeValueCommentSymbols: f.options.UnescapeValueCommentSymbols, + PreserveSurroundedQuote: f.options.PreserveSurroundedQuote, + DebugFunc: f.options.DebugFunc, + ReaderBufferSize: f.options.ReaderBufferSize, + }) + if err = p.BOM(); err != nil { + return fmt.Errorf("BOM: %v", err) + } + + // Ignore error because default section name is never empty string. + name := DefaultSection + if f.options.Insensitive || f.options.InsensitiveSections { + name = strings.ToLower(DefaultSection) + } + section, _ := f.NewSection(name) + + // This "last" is not strictly equivalent to "previous one" if current key is not the first nested key + var isLastValueEmpty bool + var lastRegularKey *Key + + var line []byte + var inUnparseableSection bool + + // NOTE: Iterate and increase `currentPeekSize` until + // the size of the parser buffer is found. + // TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`. + parserBufferSize := 0 + // NOTE: Peek 4kb at a time. + currentPeekSize := minReaderBufferSize + + if f.options.AllowPythonMultilineValues { + for { + peekBytes, _ := p.buf.Peek(currentPeekSize) + peekBytesLength := len(peekBytes) + + if parserBufferSize >= peekBytesLength { + break + } + + currentPeekSize *= 2 + parserBufferSize = peekBytesLength + } + } + + for !p.isEOF { + line, err = p.readUntil('\n') + if err != nil { + return err + } + + if f.options.AllowNestedValues && + isLastValueEmpty && len(line) > 0 { + if line[0] == ' ' || line[0] == '\t' { + err = lastRegularKey.addNestedValue(string(bytes.TrimSpace(line))) + if err != nil { + return err + } + continue + } + } + + line = bytes.TrimLeftFunc(line, unicode.IsSpace) + if len(line) == 0 { + continue + } + + // Comments + if line[0] == '#' || line[0] == ';' { + // Note: we do not care ending line break, + // it is needed for adding second line, + // so just clean it once at the end when set to value. + p.comment.Write(line) + continue + } + + // Section + if line[0] == '[' { + // Read to the next ']' (TODO: support quoted strings) + closeIdx := bytes.LastIndexByte(line, ']') + if closeIdx == -1 { + return fmt.Errorf("unclosed section: %s", line) + } + + name := string(line[1:closeIdx]) + section, err = f.NewSection(name) + if err != nil { + return err + } + + comment, has := cleanComment(line[closeIdx+1:]) + if has { + p.comment.Write(comment) + } + + section.Comment = strings.TrimSpace(p.comment.String()) + + // Reset auto-counter and comments + p.comment.Reset() + p.count = 1 + // Nested values can't span sections + isLastValueEmpty = false + + inUnparseableSection = false + for i := range f.options.UnparseableSections { + if f.options.UnparseableSections[i] == name || + ((f.options.Insensitive || f.options.InsensitiveSections) && strings.EqualFold(f.options.UnparseableSections[i], name)) { + inUnparseableSection = true + continue + } + } + continue + } + + if inUnparseableSection { + section.isRawSection = true + section.rawBody += string(line) + continue + } + + kname, offset, err := readKeyName(f.options.KeyValueDelimiters, line) + if err != nil { + switch { + // Treat as boolean key when desired, and whole line is key name. + case IsErrDelimiterNotFound(err): + switch { + case f.options.AllowBooleanKeys: + kname, err := p.readValue(line, parserBufferSize) + if err != nil { + return err + } + key, err := section.NewBooleanKey(kname) + if err != nil { + return err + } + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + continue + + case f.options.SkipUnrecognizableLines: + continue + } + case IsErrEmptyKeyName(err) && f.options.SkipUnrecognizableLines: + continue + } + return err + } + + // Auto increment. + isAutoIncr := false + if kname == "-" { + isAutoIncr = true + kname = "#" + strconv.Itoa(p.count) + p.count++ + } + + value, err := p.readValue(line[offset:], parserBufferSize) + if err != nil { + return err + } + isLastValueEmpty = len(value) == 0 + + key, err := section.NewKey(kname, value) + if err != nil { + return err + } + key.isAutoIncrement = isAutoIncr + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + lastRegularKey = key + } + return nil +} diff --git a/vendor/gopkg.in/ini.v1/section.go b/vendor/gopkg.in/ini.v1/section.go new file mode 100644 index 000000000..a3615d820 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/section.go @@ -0,0 +1,256 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "errors" + "fmt" + "strings" +) + +// Section represents a config section. +type Section struct { + f *File + Comment string + name string + keys map[string]*Key + keyList []string + keysHash map[string]string + + isRawSection bool + rawBody string +} + +func newSection(f *File, name string) *Section { + return &Section{ + f: f, + name: name, + keys: make(map[string]*Key), + keyList: make([]string, 0, 10), + keysHash: make(map[string]string), + } +} + +// Name returns name of Section. +func (s *Section) Name() string { + return s.name +} + +// Body returns rawBody of Section if the section was marked as unparseable. +// It still follows the other rules of the INI format surrounding leading/trailing whitespace. +func (s *Section) Body() string { + return strings.TrimSpace(s.rawBody) +} + +// SetBody updates body content only if section is raw. +func (s *Section) SetBody(body string) { + if !s.isRawSection { + return + } + s.rawBody = body +} + +// NewKey creates a new key to given section. +func (s *Section) NewKey(name, val string) (*Key, error) { + if len(name) == 0 { + return nil, errors.New("error creating new key: empty key name") + } else if s.f.options.Insensitive || s.f.options.InsensitiveKeys { + name = strings.ToLower(name) + } + + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + if inSlice(name, s.keyList) { + if s.f.options.AllowShadows { + if err := s.keys[name].addShadow(val); err != nil { + return nil, err + } + } else { + s.keys[name].value = val + s.keysHash[name] = val + } + return s.keys[name], nil + } + + s.keyList = append(s.keyList, name) + s.keys[name] = newKey(s, name, val) + s.keysHash[name] = val + return s.keys[name], nil +} + +// NewBooleanKey creates a new boolean type key to given section. +func (s *Section) NewBooleanKey(name string) (*Key, error) { + key, err := s.NewKey(name, "true") + if err != nil { + return nil, err + } + + key.isBooleanType = true + return key, nil +} + +// GetKey returns key in section by given name. +func (s *Section) GetKey(name string) (*Key, error) { + if s.f.BlockMode { + s.f.lock.RLock() + } + if s.f.options.Insensitive || s.f.options.InsensitiveKeys { + name = strings.ToLower(name) + } + key := s.keys[name] + if s.f.BlockMode { + s.f.lock.RUnlock() + } + + if key == nil { + // Check if it is a child-section. + sname := s.name + for { + if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + return sec.GetKey(name) + } + break + } + return nil, fmt.Errorf("error when getting key of section %q: key %q not exists", s.name, name) + } + return key, nil +} + +// HasKey returns true if section contains a key with given name. +func (s *Section) HasKey(name string) bool { + key, _ := s.GetKey(name) + return key != nil +} + +// Deprecated: Use "HasKey" instead. +func (s *Section) Haskey(name string) bool { + return s.HasKey(name) +} + +// HasValue returns true if section contains given raw value. +func (s *Section) HasValue(value string) bool { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + for _, k := range s.keys { + if value == k.value { + return true + } + } + return false +} + +// Key assumes named Key exists in section and returns a zero-value when not. +func (s *Section) Key(name string) *Key { + key, err := s.GetKey(name) + if err != nil { + // It's OK here because the only possible error is empty key name, + // but if it's empty, this piece of code won't be executed. + key, _ = s.NewKey(name, "") + return key + } + return key +} + +// Keys returns list of keys of section. +func (s *Section) Keys() []*Key { + keys := make([]*Key, len(s.keyList)) + for i := range s.keyList { + keys[i] = s.Key(s.keyList[i]) + } + return keys +} + +// ParentKeys returns list of keys of parent section. +func (s *Section) ParentKeys() []*Key { + var parentKeys []*Key + sname := s.name + for { + if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + parentKeys = append(parentKeys, sec.Keys()...) + } else { + break + } + + } + return parentKeys +} + +// KeyStrings returns list of key names of section. +func (s *Section) KeyStrings() []string { + list := make([]string, len(s.keyList)) + copy(list, s.keyList) + return list +} + +// KeysHash returns keys hash consisting of names and values. +func (s *Section) KeysHash() map[string]string { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + hash := make(map[string]string, len(s.keysHash)) + for key, value := range s.keysHash { + hash[key] = value + } + return hash +} + +// DeleteKey deletes a key from section. +func (s *Section) DeleteKey(name string) { + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + for i, k := range s.keyList { + if k == name { + s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) + delete(s.keys, name) + delete(s.keysHash, name) + return + } + } +} + +// ChildSections returns a list of child sections of current section. +// For example, "[parent.child1]" and "[parent.child12]" are child sections +// of section "[parent]". +func (s *Section) ChildSections() []*Section { + prefix := s.name + s.f.options.ChildSectionDelimiter + children := make([]*Section, 0, 3) + for _, name := range s.f.sectionList { + if strings.HasPrefix(name, prefix) { + children = append(children, s.f.sections[name]...) + } + } + return children +} diff --git a/vendor/gopkg.in/ini.v1/struct.go b/vendor/gopkg.in/ini.v1/struct.go new file mode 100644 index 000000000..a486b2fe0 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/struct.go @@ -0,0 +1,747 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "strings" + "time" + "unicode" +) + +// NameMapper represents a ini tag name mapper. +type NameMapper func(string) string + +// Built-in name getters. +var ( + // SnackCase converts to format SNACK_CASE. + SnackCase NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + } + newstr = append(newstr, unicode.ToUpper(chr)) + } + return string(newstr) + } + // TitleUnderscore converts to format title_underscore. + TitleUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + chr -= 'A' - 'a' + } + newstr = append(newstr, chr) + } + return string(newstr) + } +) + +func (s *Section) parseFieldName(raw, actual string) string { + if len(actual) > 0 { + return actual + } + if s.f.NameMapper != nil { + return s.f.NameMapper(raw) + } + return raw +} + +func parseDelim(actual string) string { + if len(actual) > 0 { + return actual + } + return "," +} + +var reflectTime = reflect.TypeOf(time.Now()).Kind() + +// setSliceWithProperType sets proper values to slice based on its type. +func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { + var strs []string + if allowShadow { + strs = key.StringsWithShadows(delim) + } else { + strs = key.Strings(delim) + } + + numVals := len(strs) + if numVals == 0 { + return nil + } + + var vals interface{} + var err error + + sliceOf := field.Type().Elem().Kind() + switch sliceOf { + case reflect.String: + vals = strs + case reflect.Int: + vals, err = key.parseInts(strs, true, false) + case reflect.Int64: + vals, err = key.parseInt64s(strs, true, false) + case reflect.Uint: + vals, err = key.parseUints(strs, true, false) + case reflect.Uint64: + vals, err = key.parseUint64s(strs, true, false) + case reflect.Float64: + vals, err = key.parseFloat64s(strs, true, false) + case reflect.Bool: + vals, err = key.parseBools(strs, true, false) + case reflectTime: + vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + if err != nil && isStrict { + return err + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflect.String: + slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) + case reflect.Int: + slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) + case reflect.Int64: + slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) + case reflect.Uint: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) + case reflect.Uint64: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) + case reflect.Float64: + slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) + case reflect.Bool: + slice.Index(i).Set(reflect.ValueOf(vals.([]bool)[i])) + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) + } + } + field.Set(slice) + return nil +} + +func wrapStrictError(err error, isStrict bool) error { + if isStrict { + return err + } + return nil +} + +// setWithProperType sets proper value to field based on its type, +// but it does not return error for failing parsing, +// because we want to use default value that is already assigned to struct. +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { + vt := t + isPtr := t.Kind() == reflect.Ptr + if isPtr { + vt = t.Elem() + } + switch vt.Kind() { + case reflect.String: + stringVal := key.String() + if isPtr { + field.Set(reflect.ValueOf(&stringVal)) + } else if len(stringVal) > 0 { + field.SetString(key.String()) + } + case reflect.Bool: + boolVal, err := key.Bool() + if err != nil { + return wrapStrictError(err, isStrict) + } + if isPtr { + field.Set(reflect.ValueOf(&boolVal)) + } else { + field.SetBool(boolVal) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + // ParseDuration will not return err for `0`, so check the type name + if vt.Name() == "Duration" { + durationVal, err := key.Duration() + if err != nil { + if intVal, err := key.Int64(); err == nil { + field.SetInt(intVal) + return nil + } + return wrapStrictError(err, isStrict) + } + if isPtr { + field.Set(reflect.ValueOf(&durationVal)) + } else if int64(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + } + return nil + } + + intVal, err := key.Int64() + if err != nil { + return wrapStrictError(err, isStrict) + } + if isPtr { + pv := reflect.New(t.Elem()) + pv.Elem().SetInt(intVal) + field.Set(pv) + } else { + field.SetInt(intVal) + } + // byte is an alias for uint8, so supporting uint8 breaks support for byte + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && uint64(durationVal) > 0 { + if isPtr { + field.Set(reflect.ValueOf(&durationVal)) + } else { + field.Set(reflect.ValueOf(durationVal)) + } + return nil + } + + uintVal, err := key.Uint64() + if err != nil { + return wrapStrictError(err, isStrict) + } + if isPtr { + pv := reflect.New(t.Elem()) + pv.Elem().SetUint(uintVal) + field.Set(pv) + } else { + field.SetUint(uintVal) + } + + case reflect.Float32, reflect.Float64: + floatVal, err := key.Float64() + if err != nil { + return wrapStrictError(err, isStrict) + } + if isPtr { + pv := reflect.New(t.Elem()) + pv.Elem().SetFloat(floatVal) + field.Set(pv) + } else { + field.SetFloat(floatVal) + } + case reflectTime: + timeVal, err := key.Time() + if err != nil { + return wrapStrictError(err, isStrict) + } + if isPtr { + field.Set(reflect.ValueOf(&timeVal)) + } else { + field.Set(reflect.ValueOf(timeVal)) + } + case reflect.Slice: + return setSliceWithProperType(key, field, delim, allowShadow, isStrict) + default: + return fmt.Errorf("unsupported type %q", t) + } + return nil +} + +func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool, allowNonUnique bool, extends bool) { + opts := strings.SplitN(tag, ",", 5) + rawName = opts[0] + for _, opt := range opts[1:] { + omitEmpty = omitEmpty || (opt == "omitempty") + allowShadow = allowShadow || (opt == "allowshadow") + allowNonUnique = allowNonUnique || (opt == "nonunique") + extends = extends || (opt == "extends") + } + return rawName, omitEmpty, allowShadow, allowNonUnique, extends +} + +// mapToField maps the given value to the matching field of the given section. +// The sectionIndex is the index (if non unique sections are enabled) to which the value should be added. +func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int, sectionName string) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + rawName, _, allowShadow, allowNonUnique, extends := parseTagOptions(tag) + fieldName := s.parseFieldName(tpField.Name, rawName) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + isStruct := tpField.Type.Kind() == reflect.Struct + isStructPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct + isAnonymousPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous + if isAnonymousPtr { + field.Set(reflect.New(tpField.Type.Elem())) + } + + if extends && (isAnonymousPtr || (isStruct && tpField.Anonymous)) { + if isStructPtr && field.IsNil() { + field.Set(reflect.New(tpField.Type.Elem())) + } + fieldSection := s + if rawName != "" { + sectionName = s.name + s.f.options.ChildSectionDelimiter + rawName + if secs, err := s.f.SectionsByName(sectionName); err == nil && sectionIndex < len(secs) { + fieldSection = secs[sectionIndex] + } + } + if err := fieldSection.mapToField(field, isStrict, sectionIndex, sectionName); err != nil { + return fmt.Errorf("map to field %q: %v", fieldName, err) + } + } else if isAnonymousPtr || isStruct || isStructPtr { + if secs, err := s.f.SectionsByName(fieldName); err == nil { + if len(secs) <= sectionIndex { + return fmt.Errorf("there are not enough sections (%d <= %d) for the field %q", len(secs), sectionIndex, fieldName) + } + // Only set the field to non-nil struct value if we have a section for it. + // Otherwise, we end up with a non-nil struct ptr even though there is no data. + if isStructPtr && field.IsNil() { + field.Set(reflect.New(tpField.Type.Elem())) + } + if err = secs[sectionIndex].mapToField(field, isStrict, sectionIndex, fieldName); err != nil { + return fmt.Errorf("map to field %q: %v", fieldName, err) + } + continue + } + } + + // Map non-unique sections + if allowNonUnique && tpField.Type.Kind() == reflect.Slice { + newField, err := s.mapToSlice(fieldName, field, isStrict) + if err != nil { + return fmt.Errorf("map to slice %q: %v", fieldName, err) + } + + field.Set(newField) + continue + } + + if key, err := s.GetKey(fieldName); err == nil { + delim := parseDelim(tpField.Tag.Get("delim")) + if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil { + return fmt.Errorf("set field %q: %v", fieldName, err) + } + } + } + return nil +} + +// mapToSlice maps all sections with the same name and returns the new value. +// The type of the Value must be a slice. +func (s *Section) mapToSlice(secName string, val reflect.Value, isStrict bool) (reflect.Value, error) { + secs, err := s.f.SectionsByName(secName) + if err != nil { + return reflect.Value{}, err + } + + typ := val.Type().Elem() + for i, sec := range secs { + elem := reflect.New(typ) + if err = sec.mapToField(elem, isStrict, i, sec.name); err != nil { + return reflect.Value{}, fmt.Errorf("map to field from section %q: %v", secName, err) + } + + val = reflect.Append(val, elem.Elem()) + } + return val, nil +} + +// mapTo maps a section to object v. +func (s *Section) mapTo(v interface{}, isStrict bool) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("not a pointer to a struct") + } + + if typ.Kind() == reflect.Slice { + newField, err := s.mapToSlice(s.name, val, isStrict) + if err != nil { + return err + } + + val.Set(newField) + return nil + } + + return s.mapToField(val, isStrict, 0, s.name) +} + +// MapTo maps section to given struct. +func (s *Section) MapTo(v interface{}) error { + return s.mapTo(v, false) +} + +// StrictMapTo maps section to given struct in strict mode, +// which returns all possible error including value parsing error. +func (s *Section) StrictMapTo(v interface{}) error { + return s.mapTo(v, true) +} + +// MapTo maps file to given struct. +func (f *File) MapTo(v interface{}) error { + return f.Section("").MapTo(v) +} + +// StrictMapTo maps file to given struct in strict mode, +// which returns all possible error including value parsing error. +func (f *File) StrictMapTo(v interface{}) error { + return f.Section("").StrictMapTo(v) +} + +// MapToWithMapper maps data sources to given struct with name mapper. +func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.MapTo(v) +} + +// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode, +// which returns all possible error including value parsing error. +func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.StrictMapTo(v) +} + +// MapTo maps data sources to given struct. +func MapTo(v, source interface{}, others ...interface{}) error { + return MapToWithMapper(v, nil, source, others...) +} + +// StrictMapTo maps data sources to given struct in strict mode, +// which returns all possible error including value parsing error. +func StrictMapTo(v, source interface{}, others ...interface{}) error { + return StrictMapToWithMapper(v, nil, source, others...) +} + +// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. +func reflectSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error { + slice := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + sliceOf := field.Type().Elem().Kind() + + if allowShadow { + var keyWithShadows *Key + for i := 0; i < field.Len(); i++ { + var val string + switch sliceOf { + case reflect.String: + val = slice.Index(i).String() + case reflect.Int, reflect.Int64: + val = fmt.Sprint(slice.Index(i).Int()) + case reflect.Uint, reflect.Uint64: + val = fmt.Sprint(slice.Index(i).Uint()) + case reflect.Float64: + val = fmt.Sprint(slice.Index(i).Float()) + case reflect.Bool: + val = fmt.Sprint(slice.Index(i).Bool()) + case reflectTime: + val = slice.Index(i).Interface().(time.Time).Format(time.RFC3339) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + + if i == 0 { + keyWithShadows = newKey(key.s, key.name, val) + } else { + _ = keyWithShadows.AddShadow(val) + } + } + *key = *keyWithShadows + return nil + } + + var buf bytes.Buffer + for i := 0; i < field.Len(); i++ { + switch sliceOf { + case reflect.String: + buf.WriteString(slice.Index(i).String()) + case reflect.Int, reflect.Int64: + buf.WriteString(fmt.Sprint(slice.Index(i).Int())) + case reflect.Uint, reflect.Uint64: + buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) + case reflect.Float64: + buf.WriteString(fmt.Sprint(slice.Index(i).Float())) + case reflect.Bool: + buf.WriteString(fmt.Sprint(slice.Index(i).Bool())) + case reflectTime: + buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-len(delim)]) + return nil +} + +// reflectWithProperType does the opposite thing as setWithProperType. +func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error { + switch t.Kind() { + case reflect.String: + key.SetValue(field.String()) + case reflect.Bool: + key.SetValue(fmt.Sprint(field.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + key.SetValue(fmt.Sprint(field.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + key.SetValue(fmt.Sprint(field.Uint())) + case reflect.Float32, reflect.Float64: + key.SetValue(fmt.Sprint(field.Float())) + case reflectTime: + key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) + case reflect.Slice: + return reflectSliceWithProperType(key, field, delim, allowShadow) + case reflect.Ptr: + if !field.IsNil() { + return reflectWithProperType(t.Elem(), key, field.Elem(), delim, allowShadow) + } + default: + return fmt.Errorf("unsupported type %q", t) + } + return nil +} + +// CR: copied from encoding/json/encode.go with modifications of time.Time support. +// TODO: add more test coverage. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflectTime: + t, ok := v.Interface().(time.Time) + return ok && t.IsZero() + } + return false +} + +// StructReflector is the interface implemented by struct types that can extract themselves into INI objects. +type StructReflector interface { + ReflectINIStruct(*File) error +} + +func (s *Section) reflectFrom(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + if !val.Field(i).CanInterface() { + continue + } + + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + rawName, omitEmpty, allowShadow, allowNonUnique, extends := parseTagOptions(tag) + if omitEmpty && isEmptyValue(field) { + continue + } + + if r, ok := field.Interface().(StructReflector); ok { + return r.ReflectINIStruct(s.f) + } + + fieldName := s.parseFieldName(tpField.Name, rawName) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + if extends && tpField.Anonymous && (tpField.Type.Kind() == reflect.Ptr || tpField.Type.Kind() == reflect.Struct) { + if err := s.reflectFrom(field); err != nil { + return fmt.Errorf("reflect from field %q: %v", fieldName, err) + } + continue + } + + if (tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct) || + (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { + // Note: The only error here is section doesn't exist. + sec, err := s.f.GetSection(fieldName) + if err != nil { + // Note: fieldName can never be empty here, ignore error. + sec, _ = s.f.NewSection(fieldName) + } + + // Add comment from comment tag + if len(sec.Comment) == 0 { + sec.Comment = tpField.Tag.Get("comment") + } + + if err = sec.reflectFrom(field); err != nil { + return fmt.Errorf("reflect from field %q: %v", fieldName, err) + } + continue + } + + if allowNonUnique && tpField.Type.Kind() == reflect.Slice { + slice := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + sliceOf := field.Type().Elem().Kind() + + for i := 0; i < field.Len(); i++ { + if sliceOf != reflect.Struct && sliceOf != reflect.Ptr { + return fmt.Errorf("field %q is not a slice of pointer or struct", fieldName) + } + + sec, err := s.f.NewSection(fieldName) + if err != nil { + return err + } + + // Add comment from comment tag + if len(sec.Comment) == 0 { + sec.Comment = tpField.Tag.Get("comment") + } + + if err := sec.reflectFrom(slice.Index(i)); err != nil { + return fmt.Errorf("reflect from field %q: %v", fieldName, err) + } + } + continue + } + + // Note: Same reason as section. + key, err := s.GetKey(fieldName) + if err != nil { + key, _ = s.NewKey(fieldName, "") + } + + // Add comment from comment tag + if len(key.Comment) == 0 { + key.Comment = tpField.Tag.Get("comment") + } + + delim := parseDelim(tpField.Tag.Get("delim")) + if err = reflectWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil { + return fmt.Errorf("reflect field %q: %v", fieldName, err) + } + + } + return nil +} + +// ReflectFrom reflects section from given struct. It overwrites existing ones. +func (s *Section) ReflectFrom(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + + if s.name != DefaultSection && s.f.options.AllowNonUniqueSections && + (typ.Kind() == reflect.Slice || typ.Kind() == reflect.Ptr) { + // Clear sections to make sure none exists before adding the new ones + s.f.DeleteSection(s.name) + + if typ.Kind() == reflect.Ptr { + sec, err := s.f.NewSection(s.name) + if err != nil { + return err + } + return sec.reflectFrom(val.Elem()) + } + + slice := val.Slice(0, val.Len()) + sliceOf := val.Type().Elem().Kind() + if sliceOf != reflect.Ptr { + return fmt.Errorf("not a slice of pointers") + } + + for i := 0; i < slice.Len(); i++ { + sec, err := s.f.NewSection(s.name) + if err != nil { + return err + } + + err = sec.reflectFrom(slice.Index(i)) + if err != nil { + return fmt.Errorf("reflect from %dth field: %v", i, err) + } + } + + return nil + } + + if typ.Kind() == reflect.Ptr { + val = val.Elem() + } else { + return errors.New("not a pointer to a struct") + } + + return s.reflectFrom(val) +} + +// ReflectFrom reflects file from given struct. +func (f *File) ReflectFrom(v interface{}) error { + return f.Section("").ReflectFrom(v) +} + +// ReflectFromWithMapper reflects data sources from given struct with name mapper. +func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { + cfg.NameMapper = mapper + return cfg.ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct. +func ReflectFrom(cfg *File, v interface{}) error { + return ReflectFromWithMapper(cfg, v, nil) +} diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml deleted file mode 100644 index 055480b9e..000000000 --- a/vendor/gopkg.in/yaml.v2/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -language: go - -go: - - "1.4.x" - - "1.5.x" - - "1.6.x" - - "1.7.x" - - "1.8.x" - - "1.9.x" - - "1.10.x" - - "1.11.x" - - "1.12.x" - - "1.13.x" - - "tip" - -go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go deleted file mode 100644 index 0ee738e11..000000000 --- a/vendor/gopkg.in/yaml.v2/encode.go +++ /dev/null @@ -1,390 +0,0 @@ -package yaml - -import ( - "encoding" - "fmt" - "io" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -// jsonNumber is the interface of the encoding/json.Number datatype. -// Repeating the interface here avoids a dependency on encoding/json, and also -// supports other libraries like jsoniter, which use a similar datatype with -// the same interface. Detecting this interface is useful when dealing with -// structures containing json.Number, which is a string under the hood. The -// encoder should prefer the use of Int64(), Float64() and string(), in that -// order, when encoding this type. -type jsonNumber interface { - Float64() (float64, error) - Int64() (int64, error) - String() string -} - -type encoder struct { - emitter yaml_emitter_t - event yaml_event_t - out []byte - flow bool - // doneInit holds whether the initial stream_start_event has been - // emitted. - doneInit bool -} - -func newEncoder() *encoder { - e := &encoder{} - yaml_emitter_initialize(&e.emitter) - yaml_emitter_set_output_string(&e.emitter, &e.out) - yaml_emitter_set_unicode(&e.emitter, true) - return e -} - -func newEncoderWithWriter(w io.Writer) *encoder { - e := &encoder{} - yaml_emitter_initialize(&e.emitter) - yaml_emitter_set_output_writer(&e.emitter, w) - yaml_emitter_set_unicode(&e.emitter, true) - return e -} - -func (e *encoder) init() { - if e.doneInit { - return - } - yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) - e.emit() - e.doneInit = true -} - -func (e *encoder) finish() { - e.emitter.open_ended = false - yaml_stream_end_event_initialize(&e.event) - e.emit() -} - -func (e *encoder) destroy() { - yaml_emitter_delete(&e.emitter) -} - -func (e *encoder) emit() { - // This will internally delete the e.event value. - e.must(yaml_emitter_emit(&e.emitter, &e.event)) -} - -func (e *encoder) must(ok bool) { - if !ok { - msg := e.emitter.problem - if msg == "" { - msg = "unknown problem generating YAML content" - } - failf("%s", msg) - } -} - -func (e *encoder) marshalDoc(tag string, in reflect.Value) { - e.init() - yaml_document_start_event_initialize(&e.event, nil, nil, true) - e.emit() - e.marshal(tag, in) - yaml_document_end_event_initialize(&e.event, true) - e.emit() -} - -func (e *encoder) marshal(tag string, in reflect.Value) { - if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { - e.nilv() - return - } - iface := in.Interface() - switch m := iface.(type) { - case jsonNumber: - integer, err := m.Int64() - if err == nil { - // In this case the json.Number is a valid int64 - in = reflect.ValueOf(integer) - break - } - float, err := m.Float64() - if err == nil { - // In this case the json.Number is a valid float64 - in = reflect.ValueOf(float) - break - } - // fallback case - no number could be obtained - in = reflect.ValueOf(m.String()) - case time.Time, *time.Time: - // Although time.Time implements TextMarshaler, - // we don't want to treat it as a string for YAML - // purposes because YAML has special support for - // timestamps. - case Marshaler: - v, err := m.MarshalYAML() - if err != nil { - fail(err) - } - if v == nil { - e.nilv() - return - } - in = reflect.ValueOf(v) - case encoding.TextMarshaler: - text, err := m.MarshalText() - if err != nil { - fail(err) - } - in = reflect.ValueOf(string(text)) - case nil: - e.nilv() - return - } - switch in.Kind() { - case reflect.Interface: - e.marshal(tag, in.Elem()) - case reflect.Map: - e.mapv(tag, in) - case reflect.Ptr: - if in.Type() == ptrTimeType { - e.timev(tag, in.Elem()) - } else { - e.marshal(tag, in.Elem()) - } - case reflect.Struct: - if in.Type() == timeType { - e.timev(tag, in) - } else { - e.structv(tag, in) - } - case reflect.Slice, reflect.Array: - if in.Type().Elem() == mapItemType { - e.itemsv(tag, in) - } else { - e.slicev(tag, in) - } - case reflect.String: - e.stringv(tag, in) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if in.Type() == durationType { - e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) - } else { - e.intv(tag, in) - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - e.uintv(tag, in) - case reflect.Float32, reflect.Float64: - e.floatv(tag, in) - case reflect.Bool: - e.boolv(tag, in) - default: - panic("cannot marshal type: " + in.Type().String()) - } -} - -func (e *encoder) mapv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - keys := keyList(in.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - e.marshal("", k) - e.marshal("", in.MapIndex(k)) - } - }) -} - -func (e *encoder) itemsv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) - for _, item := range slice { - e.marshal("", reflect.ValueOf(item.Key)) - e.marshal("", reflect.ValueOf(item.Value)) - } - }) -} - -func (e *encoder) structv(tag string, in reflect.Value) { - sinfo, err := getStructInfo(in.Type()) - if err != nil { - panic(err) - } - e.mappingv(tag, func() { - for _, info := range sinfo.FieldsList { - var value reflect.Value - if info.Inline == nil { - value = in.Field(info.Num) - } else { - value = in.FieldByIndex(info.Inline) - } - if info.OmitEmpty && isZero(value) { - continue - } - e.marshal("", reflect.ValueOf(info.Key)) - e.flow = info.Flow - e.marshal("", value) - } - if sinfo.InlineMap >= 0 { - m := in.Field(sinfo.InlineMap) - if m.Len() > 0 { - e.flow = false - keys := keyList(m.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - if _, found := sinfo.FieldsMap[k.String()]; found { - panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) - } - e.marshal("", k) - e.flow = false - e.marshal("", m.MapIndex(k)) - } - } - } - }) -} - -func (e *encoder) mappingv(tag string, f func()) { - implicit := tag == "" - style := yaml_BLOCK_MAPPING_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_MAPPING_STYLE - } - yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) - e.emit() - f() - yaml_mapping_end_event_initialize(&e.event) - e.emit() -} - -func (e *encoder) slicev(tag string, in reflect.Value) { - implicit := tag == "" - style := yaml_BLOCK_SEQUENCE_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_SEQUENCE_STYLE - } - e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) - e.emit() - n := in.Len() - for i := 0; i < n; i++ { - e.marshal("", in.Index(i)) - } - e.must(yaml_sequence_end_event_initialize(&e.event)) - e.emit() -} - -// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. -// -// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported -// in YAML 1.2 and by this package, but these should be marshalled quoted for -// the time being for compatibility with other parsers. -func isBase60Float(s string) (result bool) { - // Fast path. - if s == "" { - return false - } - c := s[0] - if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { - return false - } - // Do the full match. - return base60float.MatchString(s) -} - -// From http://yaml.org/type/float.html, except the regular expression there -// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. -var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) - -func (e *encoder) stringv(tag string, in reflect.Value) { - var style yaml_scalar_style_t - s := in.String() - canUsePlain := true - switch { - case !utf8.ValidString(s): - if tag == yaml_BINARY_TAG { - failf("explicitly tagged !!binary data must be base64-encoded") - } - if tag != "" { - failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) - } - // It can't be encoded directly as YAML so use a binary tag - // and encode it as base64. - tag = yaml_BINARY_TAG - s = encodeBase64(s) - case tag == "": - // Check to see if it would resolve to a specific - // tag when encoded unquoted. If it doesn't, - // there's no need to quote it. - rtag, _ := resolve("", s) - canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) - } - // Note: it's possible for user code to emit invalid YAML - // if they explicitly specify a tag and a string containing - // text that's incompatible with that tag. - switch { - case strings.Contains(s, "\n"): - style = yaml_LITERAL_SCALAR_STYLE - case canUsePlain: - style = yaml_PLAIN_SCALAR_STYLE - default: - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - e.emitScalar(s, "", tag, style) -} - -func (e *encoder) boolv(tag string, in reflect.Value) { - var s string - if in.Bool() { - s = "true" - } else { - s = "false" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) intv(tag string, in reflect.Value) { - s := strconv.FormatInt(in.Int(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) uintv(tag string, in reflect.Value) { - s := strconv.FormatUint(in.Uint(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) timev(tag string, in reflect.Value) { - t := in.Interface().(time.Time) - s := t.Format(time.RFC3339Nano) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) floatv(tag string, in reflect.Value) { - // Issue #352: When formatting, use the precision of the underlying value - precision := 64 - if in.Kind() == reflect.Float32 { - precision = 32 - } - - s := strconv.FormatFloat(in.Float(), 'g', -1, precision) - switch s { - case "+Inf": - s = ".inf" - case "-Inf": - s = "-.inf" - case "NaN": - s = ".nan" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) nilv() { - e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { - implicit := tag == "" - e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) - e.emit() -} diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go deleted file mode 100644 index a2dde608c..000000000 --- a/vendor/gopkg.in/yaml.v2/writerc.go +++ /dev/null @@ -1,26 +0,0 @@ -package yaml - -// Set the writer error and return false. -func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_WRITER_ERROR - emitter.problem = problem - return false -} - -// Flush the output buffer. -func yaml_emitter_flush(emitter *yaml_emitter_t) bool { - if emitter.write_handler == nil { - panic("write handler not set") - } - - // Check if the buffer is empty. - if emitter.buffer_pos == 0 { - return true - } - - if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - return true -} diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v3/LICENSE similarity index 51% rename from vendor/gopkg.in/yaml.v2/LICENSE.libyaml rename to vendor/gopkg.in/yaml.v3/LICENSE index 8da58fbf6..2683e4bb1 100644 --- a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml +++ b/vendor/gopkg.in/yaml.v3/LICENSE @@ -1,16 +1,17 @@ + +This project is covered by two different licenses: MIT and Apache. + +#### MIT License #### + The following files were ported to Go from C files of libyaml, and thus -are still covered by their original copyright and license: +are still covered by their original MIT license, with the additional +copyright staring in 2011 when the project was ported over: - apic.go - emitterc.go - parserc.go - readerc.go - scannerc.go - writerc.go - yamlh.go - yamlprivateh.go + apic.go emitterc.go parserc.go readerc.go scannerc.go + writerc.go yamlh.go yamlprivateh.go -Copyright (c) 2006 Kirill Simonov +Copyright (c) 2006-2010 Kirill Simonov +Copyright (c) 2006-2011 Kirill Simonov Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in @@ -29,3 +30,21 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +### Apache License ### + +All the remaining project files are covered by the Apache license: + +Copyright (c) 2011-2019 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/NOTICE b/vendor/gopkg.in/yaml.v3/NOTICE similarity index 100% rename from vendor/gopkg.in/yaml.v2/NOTICE rename to vendor/gopkg.in/yaml.v3/NOTICE diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v3/README.md similarity index 66% rename from vendor/gopkg.in/yaml.v2/README.md rename to vendor/gopkg.in/yaml.v3/README.md index b50c6e877..08eb1babd 100644 --- a/vendor/gopkg.in/yaml.v2/README.md +++ b/vendor/gopkg.in/yaml.v3/README.md @@ -12,7 +12,23 @@ C library to parse and generate YAML data quickly and reliably. Compatibility ------------- -The yaml package supports most of YAML 1.1 and 1.2, including support for +The yaml package supports most of YAML 1.2, but preserves some behavior +from 1.1 for backwards compatibility. + +Specifically, as of v3 of the yaml package: + + - YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being + decoded into a typed bool value. Otherwise they behave as a string. Booleans + in YAML 1.2 are _true/false_ only. + - Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_ + as specified in YAML 1.2, because most parsers still use the old format. + Octals in the _0o777_ format are supported though, so new files work. + - Does not support base-60 floats. These are gone from YAML 1.2, and were + actually never supported by this package as it's clearly a poor choice. + +and offers backwards +compatibility with YAML 1.1 in some cases. +1.2, including support for anchors, tags, map merging, etc. Multi-document unmarshalling is not yet implemented, and base-60 floats from YAML 1.1 are purposefully not supported since they're a poor design and are gone in YAML 1.2. @@ -20,29 +36,30 @@ supported since they're a poor design and are gone in YAML 1.2. Installation and usage ---------------------- -The import path for the package is *gopkg.in/yaml.v2*. +The import path for the package is *gopkg.in/yaml.v3*. To install it, run: - go get gopkg.in/yaml.v2 + go get gopkg.in/yaml.v3 API documentation ----------------- If opened in a browser, the import path itself leads to the API documentation: - * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + - [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3) API stability ------------- -The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). +The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in). License ------- -The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. +The yaml package is licensed under the MIT and Apache License 2.0 licenses. +Please see the LICENSE file for details. Example @@ -55,7 +72,7 @@ import ( "fmt" "log" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" ) var data = ` diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v3/apic.go similarity index 93% rename from vendor/gopkg.in/yaml.v2/apic.go rename to vendor/gopkg.in/yaml.v3/apic.go index 1f7e87e67..ae7d049f1 100644 --- a/vendor/gopkg.in/yaml.v2/apic.go +++ b/vendor/gopkg.in/yaml.v3/apic.go @@ -1,3 +1,25 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + package yaml import ( @@ -86,6 +108,7 @@ func yaml_emitter_initialize(emitter *yaml_emitter_t) { raw_buffer: make([]byte, 0, output_raw_buffer_size), states: make([]yaml_emitter_state_t, 0, initial_stack_size), events: make([]yaml_event_t, 0, initial_queue_size), + best_width: -1, } } @@ -138,7 +161,7 @@ func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { emitter.canonical = canonical } -//// Set the indentation increment. +// Set the indentation increment. func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { if indent < 2 || indent > 9 { indent = 2 @@ -288,29 +311,14 @@ func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { } } -///* -// * Create ALIAS. -// */ -// -//YAML_DECLARE(int) -//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) -//{ -// mark yaml_mark_t = { 0, 0, 0 } -// anchor_copy *yaml_char_t = NULL -// -// assert(event) // Non-NULL event object is expected. -// assert(anchor) // Non-NULL anchor is expected. -// -// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 -// -// anchor_copy = yaml_strdup(anchor) -// if (!anchor_copy) -// return 0 -// -// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) -// -// return 1 -//} +// Create ALIAS. +func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool { + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + anchor: anchor, + } + return true +} // Create SCALAR. func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v3/decode.go similarity index 51% rename from vendor/gopkg.in/yaml.v2/decode.go rename to vendor/gopkg.in/yaml.v3/decode.go index 129bc2a97..0173b6982 100644 --- a/vendor/gopkg.in/yaml.v2/decode.go +++ b/vendor/gopkg.in/yaml.v3/decode.go @@ -1,3 +1,18 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package yaml import ( @@ -11,34 +26,16 @@ import ( "time" ) -const ( - documentNode = 1 << iota - mappingNode - sequenceNode - scalarNode - aliasNode -) - -type node struct { - kind int - line, column int - tag string - // For an alias node, alias holds the resolved alias. - alias *node - value string - implicit bool - children []*node - anchors map[string]*node -} - // ---------------------------------------------------------------------------- // Parser, produces a node tree out of a libyaml event stream. type parser struct { parser yaml_parser_t event yaml_event_t - doc *node + doc *Node + anchors map[string]*Node doneInit bool + textless bool } func newParser(b []byte) *parser { @@ -66,6 +63,7 @@ func (p *parser) init() { if p.doneInit { return } + p.anchors = make(map[string]*Node) p.expect(yaml_STREAM_START_EVENT) p.doneInit = true } @@ -102,7 +100,10 @@ func (p *parser) peek() yaml_event_type_t { if p.event.typ != yaml_NO_EVENT { return p.event.typ } - if !yaml_parser_parse(&p.parser, &p.event) { + // It's curious choice from the underlying API to generally return a + // positive result on success, but on this case return true in an error + // scenario. This was the source of bugs in the past (issue #666). + if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR { p.fail() } return p.event.typ @@ -111,14 +112,18 @@ func (p *parser) peek() yaml_event_type_t { func (p *parser) fail() { var where string var line int - if p.parser.problem_mark.line != 0 { + if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.problem_mark.line != 0 { line = p.parser.problem_mark.line // Scanner errors don't iterate line before returning error if p.parser.error == yaml_SCANNER_ERROR { line++ } - } else if p.parser.context_mark.line != 0 { - line = p.parser.context_mark.line } if line != 0 { where = "line " + strconv.Itoa(line) + ": " @@ -132,13 +137,14 @@ func (p *parser) fail() { failf("%s%s", where, msg) } -func (p *parser) anchor(n *node, anchor []byte) { +func (p *parser) anchor(n *Node, anchor []byte) { if anchor != nil { - p.doc.anchors[string(anchor)] = n + n.Anchor = string(anchor) + p.anchors[n.Anchor] = n } } -func (p *parser) parse() *node { +func (p *parser) parse() *Node { p.init() switch p.peek() { case yaml_SCALAR_EVENT: @@ -154,67 +160,148 @@ func (p *parser) parse() *node { case yaml_STREAM_END_EVENT: // Happens when attempting to decode an empty buffer. return nil + case yaml_TAIL_COMMENT_EVENT: + panic("internal error: unexpected tail comment event (please report)") default: - panic("attempted to parse unknown event: " + p.event.typ.String()) + panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String()) } } -func (p *parser) node(kind int) *node { - return &node{ - kind: kind, - line: p.event.start_mark.line, - column: p.event.start_mark.column, +func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { + var style Style + if tag != "" && tag != "!" { + tag = shortTag(tag) + style = TaggedStyle + } else if defaultTag != "" { + tag = defaultTag + } else if kind == ScalarNode { + tag, _ = resolve("", value) } + n := &Node{ + Kind: kind, + Tag: tag, + Value: value, + Style: style, + } + if !p.textless { + n.Line = p.event.start_mark.line + 1 + n.Column = p.event.start_mark.column + 1 + n.HeadComment = string(p.event.head_comment) + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + } + return n +} + +func (p *parser) parseChild(parent *Node) *Node { + child := p.parse() + parent.Content = append(parent.Content, child) + return child } -func (p *parser) document() *node { - n := p.node(documentNode) - n.anchors = make(map[string]*node) +func (p *parser) document() *Node { + n := p.node(DocumentNode, "", "", "") p.doc = n p.expect(yaml_DOCUMENT_START_EVENT) - n.children = append(n.children, p.parse()) + p.parseChild(n) + if p.peek() == yaml_DOCUMENT_END_EVENT { + n.FootComment = string(p.event.foot_comment) + } p.expect(yaml_DOCUMENT_END_EVENT) return n } -func (p *parser) alias() *node { - n := p.node(aliasNode) - n.value = string(p.event.anchor) - n.alias = p.doc.anchors[n.value] - if n.alias == nil { - failf("unknown anchor '%s' referenced", n.value) +func (p *parser) alias() *Node { + n := p.node(AliasNode, "", "", string(p.event.anchor)) + n.Alias = p.anchors[n.Value] + if n.Alias == nil { + failf("unknown anchor '%s' referenced", n.Value) } p.expect(yaml_ALIAS_EVENT) return n } -func (p *parser) scalar() *node { - n := p.node(scalarNode) - n.value = string(p.event.value) - n.tag = string(p.event.tag) - n.implicit = p.event.implicit +func (p *parser) scalar() *Node { + var parsedStyle = p.event.scalar_style() + var nodeStyle Style + switch { + case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = DoubleQuotedStyle + case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = SingleQuotedStyle + case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0: + nodeStyle = LiteralStyle + case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0: + nodeStyle = FoldedStyle + } + var nodeValue = string(p.event.value) + var nodeTag = string(p.event.tag) + var defaultTag string + if nodeStyle == 0 { + if nodeValue == "<<" { + defaultTag = mergeTag + } + } else { + defaultTag = strTag + } + n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue) + n.Style |= nodeStyle p.anchor(n, p.event.anchor) p.expect(yaml_SCALAR_EVENT) return n } -func (p *parser) sequence() *node { - n := p.node(sequenceNode) +func (p *parser) sequence() *Node { + n := p.node(SequenceNode, seqTag, string(p.event.tag), "") + if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 { + n.Style |= FlowStyle + } p.anchor(n, p.event.anchor) p.expect(yaml_SEQUENCE_START_EVENT) for p.peek() != yaml_SEQUENCE_END_EVENT { - n.children = append(n.children, p.parse()) + p.parseChild(n) } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) p.expect(yaml_SEQUENCE_END_EVENT) return n } -func (p *parser) mapping() *node { - n := p.node(mappingNode) +func (p *parser) mapping() *Node { + n := p.node(MappingNode, mapTag, string(p.event.tag), "") + block := true + if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 { + block = false + n.Style |= FlowStyle + } p.anchor(n, p.event.anchor) p.expect(yaml_MAPPING_START_EVENT) for p.peek() != yaml_MAPPING_END_EVENT { - n.children = append(n.children, p.parse(), p.parse()) + k := p.parseChild(n) + if block && k.FootComment != "" { + // Must be a foot comment for the prior value when being dedented. + if len(n.Content) > 2 { + n.Content[len(n.Content)-3].FootComment = k.FootComment + k.FootComment = "" + } + } + v := p.parseChild(n) + if k.FootComment == "" && v.FootComment != "" { + k.FootComment = v.FootComment + v.FootComment = "" + } + if p.peek() == yaml_TAIL_COMMENT_EVENT { + if k.FootComment == "" { + k.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_TAIL_COMMENT_EVENT) + } + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 { + n.Content[len(n.Content)-2].FootComment = n.FootComment + n.FootComment = "" } p.expect(yaml_MAPPING_END_EVENT) return n @@ -224,48 +311,70 @@ func (p *parser) mapping() *node { // Decoder, unmarshals a node into a provided value. type decoder struct { - doc *node - aliases map[*node]bool - mapType reflect.Type + doc *Node + aliases map[*Node]bool terrors []string - strict bool + stringMapType reflect.Type + generalMapType reflect.Type + + knownFields bool + uniqueKeys bool decodeCount int aliasCount int aliasDepth int + + mergedFields map[interface{}]bool } var ( - mapItemType = reflect.TypeOf(MapItem{}) + nodeType = reflect.TypeOf(Node{}) durationType = reflect.TypeOf(time.Duration(0)) - defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) - ifaceType = defaultMapType.Elem() + stringMapType = reflect.TypeOf(map[string]interface{}{}) + generalMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = generalMapType.Elem() timeType = reflect.TypeOf(time.Time{}) ptrTimeType = reflect.TypeOf(&time.Time{}) ) -func newDecoder(strict bool) *decoder { - d := &decoder{mapType: defaultMapType, strict: strict} - d.aliases = make(map[*node]bool) +func newDecoder() *decoder { + d := &decoder{ + stringMapType: stringMapType, + generalMapType: generalMapType, + uniqueKeys: true, + } + d.aliases = make(map[*Node]bool) return d } -func (d *decoder) terror(n *node, tag string, out reflect.Value) { - if n.tag != "" { - tag = n.tag +func (d *decoder) terror(n *Node, tag string, out reflect.Value) { + if n.Tag != "" { + tag = n.Tag } - value := n.value - if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + value := n.Value + if tag != seqTag && tag != mapTag { if len(value) > 10 { value = " `" + value[:7] + "...`" } else { value = " `" + value + "`" } } - d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) { + err := u.UnmarshalYAML(n) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true } -func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { +func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) { terrlen := len(d.terrors) err := u.UnmarshalYAML(func(v interface{}) (err error) { defer handleErr(&err) @@ -294,8 +403,8 @@ func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { // its types unmarshalled appropriately. // // If n holds a null value, prepare returns before doing anything. -func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { - if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { +func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.ShortTag() == nullTag { return out, false, false } again := true @@ -309,15 +418,40 @@ func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unm again = true } if out.CanAddr() { - if u, ok := out.Addr().Interface().(Unmarshaler); ok { + outi := out.Addr().Interface() + if u, ok := outi.(Unmarshaler); ok { good = d.callUnmarshaler(n, u) return out, true, good } + if u, ok := outi.(obsoleteUnmarshaler); ok { + good = d.callObsoleteUnmarshaler(n, u) + return out, true, good + } } } return out, false, false } +func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) { + if n.ShortTag() == nullTag { + return reflect.Value{} + } + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + const ( // 400,000 decode operations is ~500kb of dense object declarations, or // ~5kb of dense object declarations with 10000% alias expansion @@ -347,7 +481,7 @@ func allowedAliasRatio(decodeCount int) float64 { } } -func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { +func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { d.decodeCount++ if d.aliasDepth > 0 { d.aliasCount++ @@ -355,46 +489,55 @@ func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { failf("document contains excessive aliasing") } - switch n.kind { - case documentNode: + if out.Type() == nodeType { + out.Set(reflect.ValueOf(n).Elem()) + return true + } + switch n.Kind { + case DocumentNode: return d.document(n, out) - case aliasNode: + case AliasNode: return d.alias(n, out) } out, unmarshaled, good := d.prepare(n, out) if unmarshaled { return good } - switch n.kind { - case scalarNode: + switch n.Kind { + case ScalarNode: good = d.scalar(n, out) - case mappingNode: + case MappingNode: good = d.mapping(n, out) - case sequenceNode: + case SequenceNode: good = d.sequence(n, out) + case 0: + if n.IsZero() { + return d.null(out) + } + fallthrough default: - panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + failf("cannot decode node with unknown kind %d", n.Kind) } return good } -func (d *decoder) document(n *node, out reflect.Value) (good bool) { - if len(n.children) == 1 { +func (d *decoder) document(n *Node, out reflect.Value) (good bool) { + if len(n.Content) == 1 { d.doc = n - d.unmarshal(n.children[0], out) + d.unmarshal(n.Content[0], out) return true } return false } -func (d *decoder) alias(n *node, out reflect.Value) (good bool) { +func (d *decoder) alias(n *Node, out reflect.Value) (good bool) { if d.aliases[n] { // TODO this could actually be allowed in some circumstances. - failf("anchor '%s' value contains itself", n.value) + failf("anchor '%s' value contains itself", n.Value) } d.aliases[n] = true d.aliasDepth++ - good = d.unmarshal(n.alias, out) + good = d.unmarshal(n.Alias, out) d.aliasDepth-- delete(d.aliases, n) return good @@ -408,15 +551,26 @@ func resetMap(out reflect.Value) { } } -func (d *decoder) scalar(n *node, out reflect.Value) bool { +func (d *decoder) null(out reflect.Value) bool { + if out.CanAddr() { + switch out.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + out.Set(reflect.Zero(out.Type())) + return true + } + } + return false +} + +func (d *decoder) scalar(n *Node, out reflect.Value) bool { var tag string var resolved interface{} - if n.tag == "" && !n.implicit { - tag = yaml_STR_TAG - resolved = n.value + if n.indicatedString() { + tag = strTag + resolved = n.Value } else { - tag, resolved = resolve(n.tag, n.value) - if tag == yaml_BINARY_TAG { + tag, resolved = resolve(n.Tag, n.Value) + if tag == binaryTag { data, err := base64.StdEncoding.DecodeString(resolved.(string)) if err != nil { failf("!!binary value contains invalid base64 data") @@ -425,12 +579,7 @@ func (d *decoder) scalar(n *node, out reflect.Value) bool { } } if resolved == nil { - if out.Kind() == reflect.Map && !out.CanAddr() { - resetMap(out) - } else { - out.Set(reflect.Zero(out.Type())) - } - return true + return d.null(out) } if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { // We've resolved to exactly the type we want, so use that. @@ -443,13 +592,13 @@ func (d *decoder) scalar(n *node, out reflect.Value) bool { u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) if ok { var text []byte - if tag == yaml_BINARY_TAG { + if tag == binaryTag { text = []byte(resolved.(string)) } else { // We let any value be unmarshaled into TextUnmarshaler. // That might be more lax than we'd like, but the // TextUnmarshaler itself should bowl out any dubious values. - text = []byte(n.value) + text = []byte(n.Value) } err := u.UnmarshalText(text) if err != nil { @@ -460,47 +609,37 @@ func (d *decoder) scalar(n *node, out reflect.Value) bool { } switch out.Kind() { case reflect.String: - if tag == yaml_BINARY_TAG { + if tag == binaryTag { out.SetString(resolved.(string)) return true } - if resolved != nil { - out.SetString(n.value) - return true - } + out.SetString(n.Value) + return true case reflect.Interface: - if resolved == nil { - out.Set(reflect.Zero(out.Type())) - } else if tag == yaml_TIMESTAMP_TAG { - // It looks like a timestamp but for backward compatibility - // reasons we set it as a string, so that code that unmarshals - // timestamp-like values into interface{} will continue to - // see a string and not a time.Time. - // TODO(v3) Drop this. - out.Set(reflect.ValueOf(n.value)) - } else { - out.Set(reflect.ValueOf(resolved)) - } + out.Set(reflect.ValueOf(resolved)) return true case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + // This used to work in v2, but it's very unfriendly. + isDuration := out.Type() == durationType + switch resolved := resolved.(type) { case int: - if !out.OverflowInt(int64(resolved)) { + if !isDuration && !out.OverflowInt(int64(resolved)) { out.SetInt(int64(resolved)) return true } case int64: - if !out.OverflowInt(resolved) { + if !isDuration && !out.OverflowInt(resolved) { out.SetInt(resolved) return true } case uint64: - if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { out.SetInt(int64(resolved)) return true } case float64: - if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { out.SetInt(int64(resolved)) return true } @@ -541,6 +680,17 @@ func (d *decoder) scalar(n *node, out reflect.Value) bool { case bool: out.SetBool(resolved) return true + case string: + // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html). + // It only works if explicitly attempting to unmarshal into a typed bool value. + switch resolved { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": + out.SetBool(true) + return true + case "n", "N", "no", "No", "NO", "off", "Off", "OFF": + out.SetBool(false) + return true + } } case reflect.Float32, reflect.Float64: switch resolved := resolved.(type) { @@ -563,13 +713,7 @@ func (d *decoder) scalar(n *node, out reflect.Value) bool { return true } case reflect.Ptr: - if out.Type().Elem() == reflect.TypeOf(resolved) { - // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? - elem := reflect.New(out.Type().Elem()) - elem.Elem().Set(reflect.ValueOf(resolved)) - out.Set(elem) - return true - } + panic("yaml internal error: please report the issue") } d.terror(n, tag, out) return false @@ -582,8 +726,8 @@ func settableValueOf(i interface{}) reflect.Value { return sv } -func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { - l := len(n.children) +func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) var iface reflect.Value switch out.Kind() { @@ -598,7 +742,7 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { iface = out out = settableValueOf(make([]interface{}, l)) default: - d.terror(n, yaml_SEQ_TAG, out) + d.terror(n, seqTag, out) return false } et := out.Type().Elem() @@ -606,7 +750,7 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { j := 0 for i := 0; i < l; i++ { e := reflect.New(et).Elem() - if ok := d.unmarshal(n.children[i], e); ok { + if ok := d.unmarshal(n.Content[i], e); ok { out.Index(j).Set(e) j++ } @@ -620,51 +764,79 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { return true } -func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { +func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + if d.uniqueKeys { + nerrs := len(d.terrors) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + for j := i + 2; j < l; j += 2 { + nj := n.Content[j] + if ni.Kind == nj.Kind && ni.Value == nj.Value { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line)) + } + } + } + if len(d.terrors) > nerrs { + return false + } + } switch out.Kind() { case reflect.Struct: return d.mappingStruct(n, out) - case reflect.Slice: - return d.mappingSlice(n, out) case reflect.Map: // okay case reflect.Interface: - if d.mapType.Kind() == reflect.Map { - iface := out - out = reflect.MakeMap(d.mapType) - iface.Set(out) + iface := out + if isStringMap(n) { + out = reflect.MakeMap(d.stringMapType) } else { - slicev := reflect.New(d.mapType).Elem() - if !d.mappingSlice(n, slicev) { - return false - } - out.Set(slicev) - return true + out = reflect.MakeMap(d.generalMapType) } + iface.Set(out) default: - d.terror(n, yaml_MAP_TAG, out) + d.terror(n, mapTag, out) return false } + outt := out.Type() kt := outt.Key() et := outt.Elem() - mapType := d.mapType - if outt.Key() == ifaceType && outt.Elem() == ifaceType { - d.mapType = outt + stringMapType := d.stringMapType + generalMapType := d.generalMapType + if outt.Elem() == ifaceType { + if outt.Key().Kind() == reflect.String { + d.stringMapType = outt + } else if outt.Key() == ifaceType { + d.generalMapType = outt + } } + mergedFields := d.mergedFields + d.mergedFields = nil + + var mergeNode *Node + + mapIsNew := false if out.IsNil() { out.Set(reflect.MakeMap(outt)) + mapIsNew = true } - l := len(n.children) for i := 0; i < l; i += 2 { - if isMerge(n.children[i]) { - d.merge(n.children[i+1], out) + if isMerge(n.Content[i]) { + mergeNode = n.Content[i+1] continue } k := reflect.New(kt).Elem() - if d.unmarshal(n.children[i], k) { + if d.unmarshal(n.Content[i], k) { + if mergedFields != nil { + ki := k.Interface() + if mergedFields[ki] { + continue + } + mergedFields[ki] = true + } kkind := k.Kind() if kkind == reflect.Interface { kkind = k.Elem().Kind() @@ -673,87 +845,83 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { failf("invalid map key: %#v", k.Interface()) } e := reflect.New(et).Elem() - if d.unmarshal(n.children[i+1], e) { - d.setMapIndex(n.children[i+1], out, k, e) + if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) { + out.SetMapIndex(k, e) } } } - d.mapType = mapType - return true -} -func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { - if d.strict && out.MapIndex(k) != zeroValue { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) - return + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) } - out.SetMapIndex(k, v) + + d.stringMapType = stringMapType + d.generalMapType = generalMapType + return true } -func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { - outt := out.Type() - if outt.Elem() != mapItemType { - d.terror(n, yaml_MAP_TAG, out) +func isStringMap(n *Node) bool { + if n.Kind != MappingNode { return false } - - mapType := d.mapType - d.mapType = outt - - var slice []MapItem - var l = len(n.children) + l := len(n.Content) for i := 0; i < l; i += 2 { - if isMerge(n.children[i]) { - d.merge(n.children[i+1], out) - continue - } - item := MapItem{} - k := reflect.ValueOf(&item.Key).Elem() - if d.unmarshal(n.children[i], k) { - v := reflect.ValueOf(&item.Value).Elem() - if d.unmarshal(n.children[i+1], v) { - slice = append(slice, item) - } + shortTag := n.Content[i].ShortTag() + if shortTag != strTag && shortTag != mergeTag { + return false } } - out.Set(reflect.ValueOf(slice)) - d.mapType = mapType return true } -func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { +func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { sinfo, err := getStructInfo(out.Type()) if err != nil { panic(err) } - name := settableValueOf("") - l := len(n.children) var inlineMap reflect.Value var elemType reflect.Type if sinfo.InlineMap != -1 { inlineMap = out.Field(sinfo.InlineMap) - inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) elemType = inlineMap.Type().Elem() } + for _, index := range sinfo.InlineUnmarshalers { + field := d.fieldByIndex(n, out, index) + d.prepare(n, field) + } + + mergedFields := d.mergedFields + d.mergedFields = nil + var mergeNode *Node var doneFields []bool - if d.strict { + if d.uniqueKeys { doneFields = make([]bool, len(sinfo.FieldsList)) } + name := settableValueOf("") + l := len(n.Content) for i := 0; i < l; i += 2 { - ni := n.children[i] + ni := n.Content[i] if isMerge(ni) { - d.merge(n.children[i+1], out) + mergeNode = n.Content[i+1] continue } if !d.unmarshal(ni, name) { continue } - if info, ok := sinfo.FieldsMap[name.String()]; ok { - if d.strict { + sname := name.String() + if mergedFields != nil { + if mergedFields[sname] { + continue + } + mergedFields[sname] = true + } + if info, ok := sinfo.FieldsMap[sname]; ok { + if d.uniqueKeys { if doneFields[info.Id] { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) continue } doneFields[info.Id] = true @@ -762,20 +930,25 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { if info.Inline == nil { field = out.Field(info.Num) } else { - field = out.FieldByIndex(info.Inline) + field = d.fieldByIndex(n, out, info.Inline) } - d.unmarshal(n.children[i+1], field) + d.unmarshal(n.Content[i+1], field) } else if sinfo.InlineMap != -1 { if inlineMap.IsNil() { inlineMap.Set(reflect.MakeMap(inlineMap.Type())) } value := reflect.New(elemType).Elem() - d.unmarshal(n.children[i+1], value) - d.setMapIndex(n.children[i+1], inlineMap, name, value) - } else if d.strict { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) + d.unmarshal(n.Content[i+1], value) + inlineMap.SetMapIndex(name, value) + } else if d.knownFields { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) } } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } return true } @@ -783,24 +956,34 @@ func failWantMap() { failf("map merge requires map or sequence of maps as the value") } -func (d *decoder) merge(n *node, out reflect.Value) { - switch n.kind { - case mappingNode: - d.unmarshal(n, out) - case aliasNode: - if n.alias != nil && n.alias.kind != mappingNode { +func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { + mergedFields := d.mergedFields + if mergedFields == nil { + d.mergedFields = make(map[interface{}]bool) + for i := 0; i < len(parent.Content); i += 2 { + k := reflect.New(ifaceType).Elem() + if d.unmarshal(parent.Content[i], k) { + d.mergedFields[k.Interface()] = true + } + } + } + + switch merge.Kind { + case MappingNode: + d.unmarshal(merge, out) + case AliasNode: + if merge.Alias != nil && merge.Alias.Kind != MappingNode { failWantMap() } - d.unmarshal(n, out) - case sequenceNode: - // Step backwards as earlier nodes take precedence. - for i := len(n.children) - 1; i >= 0; i-- { - ni := n.children[i] - if ni.kind == aliasNode { - if ni.alias != nil && ni.alias.kind != mappingNode { + d.unmarshal(merge, out) + case SequenceNode: + for i := 0; i < len(merge.Content); i++ { + ni := merge.Content[i] + if ni.Kind == AliasNode { + if ni.Alias != nil && ni.Alias.Kind != MappingNode { failWantMap() } - } else if ni.kind != mappingNode { + } else if ni.Kind != MappingNode { failWantMap() } d.unmarshal(ni, out) @@ -808,8 +991,10 @@ func (d *decoder) merge(n *node, out reflect.Value) { default: failWantMap() } + + d.mergedFields = mergedFields } -func isMerge(n *node) bool { - return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +func isMerge(n *Node) bool { + return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag) } diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v3/emitterc.go similarity index 80% rename from vendor/gopkg.in/yaml.v2/emitterc.go rename to vendor/gopkg.in/yaml.v3/emitterc.go index a1c2cc526..0f47c9ca8 100644 --- a/vendor/gopkg.in/yaml.v2/emitterc.go +++ b/vendor/gopkg.in/yaml.v3/emitterc.go @@ -1,3 +1,25 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + package yaml import ( @@ -43,8 +65,13 @@ func put_break(emitter *yaml_emitter_t) bool { default: panic("unknown line break setting") } + if emitter.column == 0 { + emitter.space_above = true + } emitter.column = 0 emitter.line++ + // [Go] Do this here and below and drop from everywhere else (see commented lines). + emitter.indention = true return true } @@ -97,8 +124,13 @@ func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { if !write(emitter, s, i) { return false } + if emitter.column == 0 { + emitter.space_above = true + } emitter.column = 0 emitter.line++ + // [Go] Do this here and above and drop from everywhere else (see commented lines). + emitter.indention = true } return true } @@ -203,7 +235,14 @@ func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool emitter.indent = 0 } } else if !indentless { - emitter.indent += emitter.best_indent + // [Go] This was changed so that indentations are more regular. + if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { + // The first indent inside a sequence will just skip the "- " indicator. + emitter.indent += 2 + } else { + // Everything else aligns to the chosen indentation. + emitter.indent = emitter.best_indent*((emitter.indent+emitter.best_indent)/emitter.best_indent) + } } return true } @@ -228,16 +267,22 @@ func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bo return yaml_emitter_emit_document_end(emitter, event) case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false) + + case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true) case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false) case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false) + + case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true) case yaml_EMIT_FLOW_MAPPING_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false) case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: return yaml_emitter_emit_flow_mapping_value(emitter, event, true) @@ -298,6 +343,8 @@ func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t emitter.column = 0 emitter.whitespace = true emitter.indention = true + emitter.space_above = true + emitter.foot_indent = -1 if emitter.encoding != yaml_UTF8_ENCODING { if !yaml_emitter_write_bom(emitter) { @@ -392,13 +439,22 @@ func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { return false } - if emitter.canonical { + if emitter.canonical || true { if !yaml_emitter_write_indent(emitter) { return false } } } + if len(emitter.head_comment) > 0 { + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !put_break(emitter) { + return false + } + } + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE return true } @@ -425,7 +481,20 @@ func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event // Expect the root node. func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) - return yaml_emitter_emit_node(emitter, event, true, false, false, false) + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_emit_node(emitter, event, true, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true } // Expect DOCUMENT-END. @@ -433,6 +502,12 @@ func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t if event.typ != yaml_DOCUMENT_END_EVENT { return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") } + // [Go] Force document foot separation. + emitter.foot_indent = 0 + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.foot_indent = -1 if !yaml_emitter_write_indent(emitter) { return false } @@ -454,7 +529,7 @@ func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t } // Expect a flow item node. -func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { if first { if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { return false @@ -466,13 +541,15 @@ func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_e } if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { + if emitter.canonical && !first && !trail { if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { return false } + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.column == 0 || emitter.canonical && !first { if !yaml_emitter_write_indent(emitter) { return false } @@ -480,29 +557,62 @@ func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_e if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { return false } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } emitter.state = emitter.states[len(emitter.states)-1] emitter.states = emitter.states[:len(emitter.states)-1] return true } - if !first { + if !first && !trail { if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { return false } } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { if !yaml_emitter_write_indent(emitter) { return false } } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true } // Expect a flow key node. -func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { if first { if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { return false @@ -514,13 +624,18 @@ func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_eve } if event.typ == yaml_MAPPING_END_EVENT { + if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } emitter.flow_level-- emitter.indent = emitter.indents[len(emitter.indents)-1] emitter.indents = emitter.indents[:len(emitter.indents)-1] if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } if !yaml_emitter_write_indent(emitter) { return false } @@ -528,16 +643,33 @@ func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_eve if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { return false } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } emitter.state = emitter.states[len(emitter.states)-1] emitter.states = emitter.states[:len(emitter.states)-1] return true } - if !first { + if !first && !trail { if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { return false } } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { if !yaml_emitter_write_indent(emitter) { return false @@ -571,14 +703,32 @@ func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_e return false } } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true } // Expect a block item node. func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { if first { - if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + if !yaml_emitter_increase_indent(emitter, false, false) { return false } } @@ -589,6 +739,9 @@ func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_ emitter.states = emitter.states[:len(emitter.states)-1] return true } + if !yaml_emitter_process_head_comment(emitter) { + return false + } if !yaml_emitter_write_indent(emitter) { return false } @@ -596,7 +749,16 @@ func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_ return false } emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true } // Expect a block key node. @@ -606,6 +768,9 @@ func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_ev return false } } + if !yaml_emitter_process_head_comment(emitter) { + return false + } if event.typ == yaml_MAPPING_END_EVENT { emitter.indent = emitter.indents[len(emitter.indents)-1] emitter.indents = emitter.indents[:len(emitter.indents)-1] @@ -616,6 +781,13 @@ func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_ev if !yaml_emitter_write_indent(emitter) { return false } + if len(emitter.line_comment) > 0 { + // [Go] A line comment was provided for the key. That's unusual as the + // scanner associates line comments with the value. Either way, + // save the line comment and render it appropriately later. + emitter.key_line_comment = emitter.line_comment + emitter.line_comment = nil + } if yaml_emitter_check_simple_key(emitter) { emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) return yaml_emitter_emit_node(emitter, event, false, false, true, true) @@ -641,8 +813,42 @@ func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_ return false } } + if len(emitter.key_line_comment) > 0 { + // [Go] Line comments are generally associated with the value, but when there's + // no value on the same line as a mapping key they end up attached to the + // key itself. + if event.typ == yaml_SCALAR_EVENT { + if len(emitter.line_comment) == 0 { + // A scalar is coming and it has no line comments by itself yet, + // so just let it handle the line comment as usual. If it has a + // line comment, we can't have both so the one from the key is lost. + emitter.line_comment = emitter.key_line_comment + emitter.key_line_comment = nil + } + } else if event.sequence_style() != yaml_FLOW_SEQUENCE_STYLE && (event.typ == yaml_MAPPING_START_EVENT || event.typ == yaml_SEQUENCE_START_EVENT) { + // An indented block follows, so write the comment right now. + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + if !yaml_emitter_process_line_comment(emitter) { + return false + } + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + } + } emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0 } // Expect a node. @@ -908,6 +1114,71 @@ func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { panic("unknown scalar style") } +// Write a head comment. +func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool { + if len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.tail_comment) { + return false + } + emitter.tail_comment = emitter.tail_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + } + + if len(emitter.head_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.head_comment) { + return false + } + emitter.head_comment = emitter.head_comment[:0] + return true +} + +// Write an line comment. +func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool { + if len(emitter.line_comment) == 0 { + return true + } + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !yaml_emitter_write_comment(emitter, emitter.line_comment) { + return false + } + emitter.line_comment = emitter.line_comment[:0] + return true +} + +// Write a foot comment. +func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool { + if len(emitter.foot_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.foot_comment) { + return false + } + emitter.foot_comment = emitter.foot_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + return true +} + // Check if a %YAML directive is valid. func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { if version_directive.major != 1 || version_directive.minor != 1 { @@ -987,6 +1258,7 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { flow_indicators = false line_breaks = false special_characters = false + tab_characters = false leading_space = false leading_break = false @@ -1055,7 +1327,9 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { } } - if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + if value[i] == '\t' { + tab_characters = true + } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { special_characters = true } if is_space(value, i) { @@ -1110,10 +1384,12 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { emitter.scalar_data.block_plain_allowed = false emitter.scalar_data.single_quoted_allowed = false } - if space_break || special_characters { + if space_break || tab_characters || special_characters { emitter.scalar_data.flow_plain_allowed = false emitter.scalar_data.block_plain_allowed = false emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { emitter.scalar_data.block_allowed = false } if line_breaks { @@ -1137,6 +1413,19 @@ func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bo emitter.tag_data.suffix = nil emitter.scalar_data.value = nil + if len(event.head_comment) > 0 { + emitter.head_comment = event.head_comment + } + if len(event.line_comment) > 0 { + emitter.line_comment = event.line_comment + } + if len(event.foot_comment) > 0 { + emitter.foot_comment = event.foot_comment + } + if len(event.tail_comment) > 0 { + emitter.tail_comment = event.tail_comment + } + switch event.typ { case yaml_ALIAS_EVENT: if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { @@ -1208,13 +1497,20 @@ func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { return false } } + if emitter.foot_indent == indent { + if !put_break(emitter) { + return false + } + } for emitter.column < indent { if !put(emitter, ' ') { return false } } emitter.whitespace = true - emitter.indention = true + //emitter.indention = true + emitter.space_above = false + emitter.foot_indent = -1 return true } @@ -1311,7 +1607,7 @@ func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_ } func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - if !emitter.whitespace { + if len(value) > 0 && !emitter.whitespace { if !put(emitter, ' ') { return false } @@ -1341,7 +1637,7 @@ func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allo if !write_break(emitter, value, &i) { return false } - emitter.indention = true + //emitter.indention = true breaks = true } else { if breaks { @@ -1358,7 +1654,9 @@ func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allo } } - emitter.whitespace = false + if len(value) > 0 { + emitter.whitespace = false + } emitter.indention = false if emitter.root_context { emitter.open_ended = true @@ -1397,7 +1695,7 @@ func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []by if !write_break(emitter, value, &i) { return false } - emitter.indention = true + //emitter.indention = true breaks = true } else { if breaks { @@ -1596,10 +1894,10 @@ func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bo if !yaml_emitter_write_block_scalar_hints(emitter, value) { return false } - if !put_break(emitter) { + if !yaml_emitter_process_line_comment(emitter) { return false } - emitter.indention = true + //emitter.indention = true emitter.whitespace = true breaks := true for i := 0; i < len(value); { @@ -1607,7 +1905,7 @@ func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bo if !write_break(emitter, value, &i) { return false } - emitter.indention = true + //emitter.indention = true breaks = true } else { if breaks { @@ -1633,11 +1931,11 @@ func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) boo if !yaml_emitter_write_block_scalar_hints(emitter, value) { return false } - - if !put_break(emitter) { + if !yaml_emitter_process_line_comment(emitter) { return false } - emitter.indention = true + + //emitter.indention = true emitter.whitespace = true breaks := true @@ -1658,7 +1956,7 @@ func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) boo if !write_break(emitter, value, &i) { return false } - emitter.indention = true + //emitter.indention = true breaks = true } else { if breaks { @@ -1683,3 +1981,40 @@ func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) boo } return true } + +func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool { + breaks := false + pound := false + for i := 0; i < len(comment); { + if is_break(comment, i) { + if !write_break(emitter, comment, &i) { + return false + } + //emitter.indention = true + breaks = true + pound = false + } else { + if breaks && !yaml_emitter_write_indent(emitter) { + return false + } + if !pound { + if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) { + return false + } + pound = true + } + if !write(emitter, comment, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + if !breaks && !put_break(emitter) { + return false + } + + emitter.whitespace = true + //emitter.indention = true + return true +} diff --git a/vendor/gopkg.in/yaml.v3/encode.go b/vendor/gopkg.in/yaml.v3/encode.go new file mode 100644 index 000000000..de9e72a3e --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/encode.go @@ -0,0 +1,577 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + indent int + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + if e.indent == 0 { + e.indent = 4 + } + e.emitter.best_indent = e.indent + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + var node *Node + if in.IsValid() { + node, _ = in.Interface().(*Node) + } + if node != nil && node.Kind == DocumentNode { + e.nodev(in) + } else { + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + tag = shortTag(tag) + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch value := iface.(type) { + case *Node: + e.nodev(in) + return + case Node: + if !in.CanAddr() { + var n = reflect.New(in.Type()).Elem() + n.Set(in) + in = n + } + e.nodev(in.Addr()) + return + case time.Time: + e.timev(tag, in) + return + case *time.Time: + e.timev(tag, in.Elem()) + return + case time.Duration: + e.stringv(tag, reflect.ValueOf(value.String())) + return + case Marshaler: + v, err := value.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + e.marshal(tag, reflect.ValueOf(v)) + return + case encoding.TextMarshaler: + text, err := value.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + e.marshal(tag, in.Elem()) + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice, reflect.Array: + e.slicev(tag, in) + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + e.intv(tag, in) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) { + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return reflect.Value{} + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = e.fieldByIndex(in, info.Inline) + if !value.IsValid() { + continue + } + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +// isOldBool returns whether s is bool notation as defined in YAML 1.1. +// +// We continue to force strings that YAML 1.1 would interpret as booleans to be +// rendered as quotes strings so that the marshalled output valid for YAML 1.1 +// parsing. +func isOldBool(s string) (result bool) { + switch s { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON", + "n", "N", "no", "No", "NO", "off", "Off", "OFF": + return true + default: + return false + } +} + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s)) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + if e.flow { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else { + style = yaml_LITERAL_SCALAR_STYLE + } + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style, nil, nil, nil, nil) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) { + // TODO Kill this function. Replace all initialize calls by their underlining Go literals. + implicit := tag == "" + if !implicit { + tag = longTag(tag) + } + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.event.head_comment = head + e.event.line_comment = line + e.event.foot_comment = foot + e.event.tail_comment = tail + e.emit() +} + +func (e *encoder) nodev(in reflect.Value) { + e.node(in.Interface().(*Node), "") +} + +func (e *encoder) node(node *Node, tail string) { + // Zero nodes behave as nil. + if node.Kind == 0 && node.IsZero() { + e.nilv() + return + } + + // If the tag was not explicitly requested, and dropping it won't change the + // implicit tag of the value, don't include it in the presentation. + var tag = node.Tag + var stag = shortTag(tag) + var forceQuoting bool + if tag != "" && node.Style&TaggedStyle == 0 { + if node.Kind == ScalarNode { + if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 { + tag = "" + } else { + rtag, _ := resolve("", node.Value) + if rtag == stag { + tag = "" + } else if stag == strTag { + tag = "" + forceQuoting = true + } + } + } else { + var rtag string + switch node.Kind { + case MappingNode: + rtag = mapTag + case SequenceNode: + rtag = seqTag + } + if rtag == stag { + tag = "" + } + } + } + + switch node.Kind { + case DocumentNode: + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + yaml_document_end_event_initialize(&e.event, true) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case SequenceNode: + style := yaml_BLOCK_SEQUENCE_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case MappingNode: + style := yaml_BLOCK_MAPPING_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style) + e.event.tail_comment = []byte(tail) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + + // The tail logic below moves the foot comment of prior keys to the following key, + // since the value for each key may be a nested structure and the foot needs to be + // processed only the entirety of the value is streamed. The last tail is processed + // with the mapping end event. + var tail string + for i := 0; i+1 < len(node.Content); i += 2 { + k := node.Content[i] + foot := k.FootComment + if foot != "" { + kopy := *k + kopy.FootComment = "" + k = &kopy + } + e.node(k, tail) + tail = foot + + v := node.Content[i+1] + e.node(v, "") + } + + yaml_mapping_end_event_initialize(&e.event) + e.event.tail_comment = []byte(tail) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case AliasNode: + yaml_alias_event_initialize(&e.event, []byte(node.Value)) + e.event.head_comment = []byte(node.HeadComment) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case ScalarNode: + value := node.Value + if !utf8.ValidString(value) { + if stag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if stag != "" { + failf("cannot marshal invalid UTF-8 data as %s", stag) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + value = encodeBase64(value) + } + + style := yaml_PLAIN_SCALAR_STYLE + switch { + case node.Style&DoubleQuotedStyle != 0: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + case node.Style&SingleQuotedStyle != 0: + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + case node.Style&LiteralStyle != 0: + style = yaml_LITERAL_SCALAR_STYLE + case node.Style&FoldedStyle != 0: + style = yaml_FOLDED_SCALAR_STYLE + case strings.Contains(value, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case forceQuoting: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail)) + default: + failf("cannot encode node with unknown kind %d", node.Kind) + } +} diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go similarity index 85% rename from vendor/gopkg.in/yaml.v2/parserc.go rename to vendor/gopkg.in/yaml.v3/parserc.go index 81d05dfe5..268558a0d 100644 --- a/vendor/gopkg.in/yaml.v2/parserc.go +++ b/vendor/gopkg.in/yaml.v3/parserc.go @@ -1,3 +1,25 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + package yaml import ( @@ -45,11 +67,46 @@ import ( // Peek the next token in the token queue. func peek_token(parser *yaml_parser_t) *yaml_token_t { if parser.token_available || yaml_parser_fetch_more_tokens(parser) { - return &parser.tokens[parser.tokens_head] + token := &parser.tokens[parser.tokens_head] + yaml_parser_unfold_comments(parser, token) + return token } return nil } +// yaml_parser_unfold_comments walks through the comments queue and joins all +// comments behind the position of the provided token into the respective +// top-level comment slices in the parser. +func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) { + for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index { + comment := &parser.comments[parser.comments_head] + if len(comment.head) > 0 { + if token.typ == yaml_BLOCK_END_TOKEN { + // No heads on ends, so keep comment.head for a follow up token. + break + } + if len(parser.head_comment) > 0 { + parser.head_comment = append(parser.head_comment, '\n') + } + parser.head_comment = append(parser.head_comment, comment.head...) + } + if len(comment.foot) > 0 { + if len(parser.foot_comment) > 0 { + parser.foot_comment = append(parser.foot_comment, '\n') + } + parser.foot_comment = append(parser.foot_comment, comment.foot...) + } + if len(comment.line) > 0 { + if len(parser.line_comment) > 0 { + parser.line_comment = append(parser.line_comment, '\n') + } + parser.line_comment = append(parser.line_comment, comment.line...) + } + *comment = yaml_comment_t{} + parser.comments_head++ + } +} + // Remove the next token from the queue (must be called after peek_token). func skip_token(parser *yaml_parser_t) { parser.token_available = false @@ -224,10 +281,32 @@ func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) parser.state = yaml_PARSE_BLOCK_NODE_STATE + var head_comment []byte + if len(parser.head_comment) > 0 { + // [Go] Scan the header comment backwards, and if an empty line is found, break + // the header so the part before the last empty line goes into the + // document header, while the bottom of it goes into a follow up event. + for i := len(parser.head_comment) - 1; i > 0; i-- { + if parser.head_comment[i] == '\n' { + if i == len(parser.head_comment)-1 { + head_comment = parser.head_comment[:i] + parser.head_comment = parser.head_comment[i+1:] + break + } else if parser.head_comment[i-1] == '\n' { + head_comment = parser.head_comment[:i-1] + parser.head_comment = parser.head_comment[i+1:] + break + } + } + } + } + *event = yaml_event_t{ typ: yaml_DOCUMENT_START_EVENT, start_mark: token.start_mark, end_mark: token.end_mark, + + head_comment: head_comment, } } else if token.typ != yaml_STREAM_END_TOKEN { @@ -284,6 +363,7 @@ func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event if token == nil { return false } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN || token.typ == yaml_DOCUMENT_START_TOKEN || @@ -327,9 +407,25 @@ func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) end_mark: end_mark, implicit: implicit, } + yaml_parser_set_event_comments(parser, event) + if len(event.head_comment) > 0 && len(event.foot_comment) == 0 { + event.foot_comment = event.head_comment + event.head_comment = nil + } return true } +func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) { + event.head_comment = parser.head_comment + event.line_comment = parser.line_comment + event.foot_comment = parser.foot_comment + parser.head_comment = nil + parser.line_comment = nil + parser.foot_comment = nil + parser.tail_comment = nil + parser.stem_comment = nil +} + // Parse the productions: // block_node_or_indentless_sequence ::= // ALIAS @@ -373,6 +469,7 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i end_mark: token.end_mark, anchor: token.value, } + yaml_parser_set_event_comments(parser, event) skip_token(parser) return true } @@ -486,6 +583,7 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i quoted_implicit: quoted_implicit, style: yaml_style_t(token.style), } + yaml_parser_set_event_comments(parser, event) skip_token(parser) return true } @@ -502,6 +600,7 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i implicit: implicit, style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), } + yaml_parser_set_event_comments(parser, event) return true } if token.typ == yaml_FLOW_MAPPING_START_TOKEN { @@ -516,6 +615,7 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i implicit: implicit, style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), } + yaml_parser_set_event_comments(parser, event) return true } if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { @@ -530,6 +630,10 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i implicit: implicit, style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } return true } if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { @@ -544,6 +648,10 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i implicit: implicit, style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } return true } if len(anchor) > 0 || len(tag) > 0 { @@ -579,6 +687,9 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) + if token == nil { + return false + } parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } @@ -590,7 +701,9 @@ func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_e if token.typ == yaml_BLOCK_ENTRY_TOKEN { mark := token.end_mark + prior_head_len := len(parser.head_comment) skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) token = peek_token(parser) if token == nil { return false @@ -636,7 +749,9 @@ func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *y if token.typ == yaml_BLOCK_ENTRY_TOKEN { mark := token.end_mark + prior_head_len := len(parser.head_comment) skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) token = peek_token(parser) if token == nil { return false @@ -662,6 +777,32 @@ func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *y return true } +// Split stem comment from head comment. +// +// When a sequence or map is found under a sequence entry, the former head comment +// is assigned to the underlying sequence or map as a whole, not the individual +// sequence or map entry as would be expected otherwise. To handle this case the +// previous head comment is moved aside as the stem comment. +func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { + if stem_len == 0 { + return + } + + token := peek_token(parser) + if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { + return + } + + parser.stem_comment = parser.head_comment[:stem_len] + if len(parser.head_comment) == stem_len { + parser.head_comment = nil + } else { + // Copy suffix to prevent very strange bugs if someone ever appends + // further bytes to the prefix in the stem_comment slice above. + parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...) + } +} + // Parse the productions: // block_mapping ::= BLOCK-MAPPING_START // ******************* @@ -675,6 +816,9 @@ func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *y func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) + if token == nil { + return false + } parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } @@ -684,6 +828,19 @@ func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_even return false } + // [Go] A tail comment was left from the prior mapping value processed. Emit an event + // as it needs to be processed with that value and not the following key. + if len(parser.tail_comment) > 0 { + *event = yaml_event_t{ + typ: yaml_TAIL_COMMENT_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + foot_comment: parser.tail_comment, + } + parser.tail_comment = nil + return true + } + if token.typ == yaml_KEY_TOKEN { mark := token.end_mark skip_token(parser) @@ -709,6 +866,7 @@ func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_even start_mark: token.start_mark, end_mark: token.end_mark, } + yaml_parser_set_event_comments(parser, event) skip_token(parser) return true } @@ -770,6 +928,9 @@ func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_ev func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) + if token == nil { + return false + } parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } @@ -820,6 +981,7 @@ func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_ev start_mark: token.start_mark, end_mark: token.end_mark, } + yaml_parser_set_event_comments(parser, event) skip_token(parser) return true @@ -959,6 +1121,7 @@ func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event start_mark: token.start_mark, end_mark: token.end_mark, } + yaml_parser_set_event_comments(parser, event) skip_token(parser) return true } diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v3/readerc.go similarity index 91% rename from vendor/gopkg.in/yaml.v2/readerc.go rename to vendor/gopkg.in/yaml.v3/readerc.go index 7c1f5fac3..b7de0a89c 100644 --- a/vendor/gopkg.in/yaml.v2/readerc.go +++ b/vendor/gopkg.in/yaml.v3/readerc.go @@ -1,3 +1,25 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + package yaml import ( @@ -95,7 +117,7 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { // [Go] This function was changed to guarantee the requested length size at EOF. // The fact we need to do this is pretty awful, but the description above implies - // for that to be the case, and there are tests + // for that to be the case, and there are tests // If the EOF flag is set and the raw buffer is empty, do nothing. if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v3/resolve.go similarity index 60% rename from vendor/gopkg.in/yaml.v2/resolve.go rename to vendor/gopkg.in/yaml.v3/resolve.go index 4120e0c91..64ae88805 100644 --- a/vendor/gopkg.in/yaml.v2/resolve.go +++ b/vendor/gopkg.in/yaml.v3/resolve.go @@ -1,3 +1,18 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package yaml import ( @@ -34,18 +49,14 @@ func init() { tag string l []string }{ - {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, - {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, - {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, - {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, - {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, - {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, - {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, - {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, - {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, - {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, - {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, - {"<<", yaml_MERGE_TAG, []string{"<<"}}, + {true, boolTag, []string{"true", "True", "TRUE"}}, + {false, boolTag, []string{"false", "False", "FALSE"}}, + {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", mergeTag, []string{"<<"}}, } m := resolveMap @@ -56,11 +67,37 @@ func init() { } } +const ( + nullTag = "!!null" + boolTag = "!!bool" + strTag = "!!str" + intTag = "!!int" + floatTag = "!!float" + timestampTag = "!!timestamp" + seqTag = "!!seq" + mapTag = "!!map" + binaryTag = "!!binary" + mergeTag = "!!merge" +) + +var longTags = make(map[string]string) +var shortTags = make(map[string]string) + +func init() { + for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} { + ltag := longTag(stag) + longTags[stag] = ltag + shortTags[ltag] = stag + } +} + const longTagPrefix = "tag:yaml.org,2002:" func shortTag(tag string) string { - // TODO This can easily be made faster and produce less garbage. if strings.HasPrefix(tag, longTagPrefix) { + if stag, ok := shortTags[tag]; ok { + return stag + } return "!!" + tag[len(longTagPrefix):] } return tag @@ -68,6 +105,9 @@ func shortTag(tag string) string { func longTag(tag string) string { if strings.HasPrefix(tag, "!!") { + if ltag, ok := longTags[tag]; ok { + return ltag + } return longTagPrefix + tag[2:] } return tag @@ -75,7 +115,7 @@ func longTag(tag string) string { func resolvableTag(tag string) bool { switch tag { - case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: + case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag: return true } return false @@ -84,23 +124,24 @@ func resolvableTag(tag string) bool { var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) func resolve(tag string, in string) (rtag string, out interface{}) { + tag = shortTag(tag) if !resolvableTag(tag) { return tag, in } defer func() { switch tag { - case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + case "", rtag, strTag, binaryTag: return - case yaml_FLOAT_TAG: - if rtag == yaml_INT_TAG { + case floatTag: + if rtag == intTag { switch v := out.(type) { case int64: - rtag = yaml_FLOAT_TAG + rtag = floatTag out = float64(v) return case int: - rtag = yaml_FLOAT_TAG + rtag = floatTag out = float64(v) return } @@ -115,7 +156,7 @@ func resolve(tag string, in string) (rtag string, out interface{}) { if in != "" { hint = resolveTable[in[0]] } - if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + if hint != 0 && tag != strTag && tag != binaryTag { // Handle things we can lookup in a map. if item, ok := resolveMap[in]; ok { return item.tag, item.value @@ -133,17 +174,17 @@ func resolve(tag string, in string) (rtag string, out interface{}) { // Not in the map, so maybe a normal float. floatv, err := strconv.ParseFloat(in, 64) if err == nil { - return yaml_FLOAT_TAG, floatv + return floatTag, floatv } case 'D', 'S': // Int, float, or timestamp. // Only try values as a timestamp if the value is unquoted or there's an explicit // !!timestamp tag. - if tag == "" || tag == yaml_TIMESTAMP_TAG { + if tag == "" || tag == timestampTag { t, ok := parseTimestamp(in) if ok { - return yaml_TIMESTAMP_TAG, t + return timestampTag, t } } @@ -151,49 +192,76 @@ func resolve(tag string, in string) (rtag string, out interface{}) { intv, err := strconv.ParseInt(plain, 0, 64) if err == nil { if intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) + return intTag, int(intv) } else { - return yaml_INT_TAG, intv + return intTag, intv } } uintv, err := strconv.ParseUint(plain, 0, 64) if err == nil { - return yaml_INT_TAG, uintv + return intTag, uintv } if yamlStyleFloat.MatchString(plain) { floatv, err := strconv.ParseFloat(plain, 64) if err == nil { - return yaml_FLOAT_TAG, floatv + return floatTag, floatv } } if strings.HasPrefix(plain, "0b") { intv, err := strconv.ParseInt(plain[2:], 2, 64) if err == nil { if intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) + return intTag, int(intv) } else { - return yaml_INT_TAG, intv + return intTag, intv } } uintv, err := strconv.ParseUint(plain[2:], 2, 64) if err == nil { - return yaml_INT_TAG, uintv + return intTag, uintv } } else if strings.HasPrefix(plain, "-0b") { - intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) + intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + // Octals as introduced in version 1.2 of the spec. + // Octals from the 1.1 spec, spelled as 0777, are still + // decoded by default in v3 as well for compatibility. + // May be dropped in v4 depending on how usage evolves. + if strings.HasPrefix(plain, "0o") { + intv, err := strconv.ParseInt(plain[2:], 8, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 8, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0o") { + intv, err := strconv.ParseInt("-"+plain[3:], 8, 64) if err == nil { if true || intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) + return intTag, int(intv) } else { - return yaml_INT_TAG, intv + return intTag, intv } } } default: - panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")") } } - return yaml_STR_TAG, in + return strTag, in } // encodeBase64 encodes s as base64 that is broken up into multiple lines diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v3/scannerc.go similarity index 87% rename from vendor/gopkg.in/yaml.v2/scannerc.go rename to vendor/gopkg.in/yaml.v3/scannerc.go index 0b9bb6030..ca0070108 100644 --- a/vendor/gopkg.in/yaml.v2/scannerc.go +++ b/vendor/gopkg.in/yaml.v3/scannerc.go @@ -1,3 +1,25 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + package yaml import ( @@ -489,6 +511,9 @@ func cache(parser *yaml_parser_t, length int) bool { // Advance the buffer pointer. func skip(parser *yaml_parser_t) { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } parser.mark.index++ parser.mark.column++ parser.unread-- @@ -502,17 +527,22 @@ func skip_line(parser *yaml_parser_t) { parser.mark.line++ parser.unread -= 2 parser.buffer_pos += 2 + parser.newlines++ } else if is_break(parser.buffer, parser.buffer_pos) { parser.mark.index++ parser.mark.column = 0 parser.mark.line++ parser.unread-- parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + parser.newlines++ } } // Copy a character to a string buffer and advance pointers. func read(parser *yaml_parser_t, s []byte) []byte { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } w := width(parser.buffer[parser.buffer_pos]) if w == 0 { panic("invalid character sequence") @@ -564,6 +594,7 @@ func read_line(parser *yaml_parser_t, s []byte) []byte { parser.mark.column = 0 parser.mark.line++ parser.unread-- + parser.newlines++ return s } @@ -626,9 +657,13 @@ func trace(args ...interface{}) func() { func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { // While we need more tokens to fetch, do it. for { - if parser.tokens_head != len(parser.tokens) { - // If queue is non-empty, check if any potential simple key may - // occupy the head position. + // [Go] The comment parsing logic requires a lookahead of two tokens + // so that foot comments may be parsed in time of associating them + // with the tokens that are parsed before them, and also for line + // comments to be transformed into head comments in some edge cases. + if parser.tokens_head < len(parser.tokens)-2 { + // If a potential simple key is at the head position, we need to fetch + // the next token to disambiguate it. head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] if !ok { break @@ -649,7 +684,7 @@ func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { } // The dispatcher for token fetchers. -func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { +func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) { // Ensure that the buffer is initialized. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false @@ -660,13 +695,19 @@ func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { return yaml_parser_fetch_stream_start(parser) } + scan_mark := parser.mark + // Eat whitespaces and comments until we reach the next token. if !yaml_parser_scan_to_next_token(parser) { return false } + // [Go] While unrolling indents, transform the head comments of prior + // indentation levels observed after scan_start into foot comments at + // the respective indexes. + // Check the indentation level against the current column. - if !yaml_parser_unroll_indent(parser, parser.mark.column) { + if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) { return false } @@ -699,6 +740,26 @@ func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) } + comment_mark := parser.mark + if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') { + // Associate any following comments with the prior token. + comment_mark = parser.tokens[len(parser.tokens)-1].start_mark + } + defer func() { + if !ok { + return + } + if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN { + // Sequence indicators alone have no line comments. It becomes + // a head comment for whatever follows. + return + } + if !yaml_parser_scan_line_comment(parser, comment_mark) { + ok = false + return + } + }() + // Is it the flow sequence start indicator? if buf[pos] == '[' { return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) @@ -792,7 +853,7 @@ func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { // if it is followed by a non-space character. // // The last rule is more restrictive than the specification requires. - // [Go] Make this logic more reasonable. + // [Go] TODO Make this logic more reasonable. //switch parser.buffer[parser.buffer_pos] { //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': //} @@ -965,19 +1026,49 @@ func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml // Pop indentation levels from the indents stack until the current level // becomes less or equal to the column. For each indentation level, append // the BLOCK-END token. -func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool { // In the flow context, do nothing. if parser.flow_level > 0 { return true } + block_mark := scan_mark + block_mark.index-- + // Loop through the indentation levels in the stack. for parser.indent > column { + + // [Go] Reposition the end token before potential following + // foot comments of parent blocks. For that, search + // backwards for recent comments that were at the same + // indent as the block that is ending now. + stop_index := block_mark.index + for i := len(parser.comments) - 1; i >= 0; i-- { + comment := &parser.comments[i] + + if comment.end_mark.index < stop_index { + // Don't go back beyond the start of the comment/whitespace scan, unless column < 0. + // If requested indent column is < 0, then the document is over and everything else + // is a foot anyway. + break + } + if comment.start_mark.column == parser.indent+1 { + // This is a good match. But maybe there's a former comment + // at that same indent level, so keep searching. + block_mark = comment.start_mark + } + + // While the end of the former comment matches with + // the start of the following one, we know there's + // nothing in between and scanning is still safe. + stop_index = comment.scan_mark.index + } + // Create a token and append it to the queue. token := yaml_token_t{ typ: yaml_BLOCK_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, + start_mark: block_mark, + end_mark: block_mark, } yaml_insert_token(parser, -1, &token) @@ -1026,7 +1117,7 @@ func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { } // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { return false } @@ -1050,7 +1141,7 @@ func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { // Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { return false } @@ -1074,7 +1165,7 @@ func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { // Produce the DOCUMENT-START or DOCUMENT-END token. func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { return false } @@ -1107,6 +1198,7 @@ func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_ // Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. if !yaml_parser_save_simple_key(parser) { return false @@ -1442,6 +1534,8 @@ func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { // Eat whitespaces and comments until the next token is found. func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + scan_mark := parser.mark + // Until the next token is not found. for { // Allow the BOM mark to start a line. @@ -1468,13 +1562,33 @@ func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { } } + // Check if we just had a line comment under a sequence entry that + // looks more like a header to the following content. Similar to this: + // + // - # The comment + // - Some data + // + // If so, transform the line comment to a head comment and reposition. + if len(parser.comments) > 0 && len(parser.tokens) > 1 { + tokenA := parser.tokens[len(parser.tokens)-2] + tokenB := parser.tokens[len(parser.tokens)-1] + comment := &parser.comments[len(parser.comments)-1] + if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) { + // If it was in the prior line, reposition so it becomes a + // header of the follow up token. Otherwise, keep it in place + // so it becomes a header of the former. + comment.head = comment.line + comment.line = nil + if comment.start_mark.line == parser.mark.line-1 { + comment.token_mark = parser.mark + } + } + } + // Eat a comment until a line break. if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } + if !yaml_parser_scan_comments(parser, scan_mark) { + return false } } @@ -1572,6 +1686,10 @@ func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool } if parser.buffer[parser.buffer_pos] == '#' { + // [Go] Discard this inline comment for the time being. + //if !yaml_parser_scan_line_comment(parser, start_mark) { + // return false + //} for !is_breakz(parser.buffer, parser.buffer_pos) { skip(parser) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { @@ -1987,7 +2105,7 @@ func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', // '%'. - // [Go] Convert this into more reasonable logic. + // [Go] TODO Convert this into more reasonable logic. for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || @@ -2142,6 +2260,9 @@ func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, l } } if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_line_comment(parser, start_mark) { + return false + } for !is_breakz(parser.buffer, parser.buffer_pos) { skip(parser) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { @@ -2709,3 +2830,209 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b } return true } + +func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool { + if parser.newlines > 0 { + return true + } + + var start_mark yaml_mark_t + var text []byte + + for peek := 0; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + if parser.buffer[parser.buffer_pos+peek] == '#' { + seen := parser.mark.index+peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + if len(text) == 0 { + start_mark = parser.mark + } + text = read(parser, text) + } else { + skip(parser) + } + } + } + break + } + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + token_mark: token_mark, + start_mark: start_mark, + line: text, + }) + } + return true +} + +func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool { + token := parser.tokens[len(parser.tokens)-1] + + if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 { + token = parser.tokens[len(parser.tokens)-2] + } + + var token_mark = token.start_mark + var start_mark yaml_mark_t + var next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + + var recent_empty = false + var first_empty = parser.newlines <= 1 + + var line = parser.mark.line + var column = parser.mark.column + + var text []byte + + // The foot line is the place where a comment must start to + // still be considered as a foot of the prior content. + // If there's some content in the currently parsed line, then + // the foot is the line below it. + var foot_line = -1 + if scan_mark.line > 0 { + foot_line = parser.mark.line-parser.newlines+1 + if parser.newlines == 0 && parser.mark.column > 1 { + foot_line++ + } + } + + var peek = 0 + for ; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + column++ + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + c := parser.buffer[parser.buffer_pos+peek] + var close_flow = parser.flow_level > 0 && (c == ']' || c == '}') + if close_flow || is_breakz(parser.buffer, parser.buffer_pos+peek) { + // Got line break or terminator. + if close_flow || !recent_empty { + if close_flow || first_empty && (start_mark.line == foot_line && token.typ != yaml_VALUE_TOKEN || start_mark.column-1 < next_indent) { + // This is the first empty line and there were no empty lines before, + // so this initial part of the comment is a foot of the prior token + // instead of being a head for the following one. Split it up. + // Alternatively, this might also be the last comment inside a flow + // scope, so it must be a footer. + if len(text) > 0 { + if start_mark.column-1 < next_indent { + // If dedented it's unrelated to the prior token. + token_mark = start_mark + } + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + } else { + if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 { + text = append(text, '\n') + } + } + } + if !is_break(parser.buffer, parser.buffer_pos+peek) { + break + } + first_empty = false + recent_empty = true + column = 0 + line++ + continue + } + + if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.column) { + // The comment at the different indentation is a foot of the + // preceding data rather than a head of the upcoming one. + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + + if parser.buffer[parser.buffer_pos+peek] != '#' { + break + } + + if len(text) == 0 { + start_mark = yaml_mark_t{parser.mark.index + peek, line, column} + } else { + text = append(text, '\n') + } + + recent_empty = false + + // Consume until after the consumed comment line. + seen := parser.mark.index+peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + text = read(parser, text) + } else { + skip(parser) + } + } + + peek = 0 + column = 0 + line = parser.mark.line + next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + } + + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: start_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column}, + head: text, + }) + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v3/sorter.go similarity index 76% rename from vendor/gopkg.in/yaml.v2/sorter.go rename to vendor/gopkg.in/yaml.v3/sorter.go index 4c45e660a..9210ece7e 100644 --- a/vendor/gopkg.in/yaml.v2/sorter.go +++ b/vendor/gopkg.in/yaml.v3/sorter.go @@ -1,3 +1,18 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package yaml import ( @@ -37,8 +52,10 @@ func (l keyList) Less(i, j int) bool { return ak < bk } ar, br := []rune(a.String()), []rune(b.String()) + digits := false for i := 0; i < len(ar) && i < len(br); i++ { if ar[i] == br[i] { + digits = unicode.IsDigit(ar[i]) continue } al := unicode.IsLetter(ar[i]) @@ -47,12 +64,16 @@ func (l keyList) Less(i, j int) bool { return ar[i] < br[i] } if al || bl { - return bl + if digits { + return al + } else { + return bl + } } var ai, bi int var an, bn int64 if ar[i] == '0' || br[i] == '0' { - for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- { if ar[j] != '0' { an = 1 bn = 1 diff --git a/vendor/gopkg.in/yaml.v3/writerc.go b/vendor/gopkg.in/yaml.v3/writerc.go new file mode 100644 index 000000000..b8a116bf9 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/writerc.go @@ -0,0 +1,48 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v3/yaml.go similarity index 55% rename from vendor/gopkg.in/yaml.v2/yaml.go rename to vendor/gopkg.in/yaml.v3/yaml.go index 89650e293..8cec6da48 100644 --- a/vendor/gopkg.in/yaml.v2/yaml.go +++ b/vendor/gopkg.in/yaml.v3/yaml.go @@ -1,3 +1,18 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Package yaml implements YAML support for the Go language. // // Source code and other details for the project are available at GitHub: @@ -13,23 +28,16 @@ import ( "reflect" "strings" "sync" + "unicode/utf8" ) -// MapSlice encodes and decodes as a YAML map. -// The order of keys is preserved when encoding and decoding. -type MapSlice []MapItem - -// MapItem is an item in a MapSlice. -type MapItem struct { - Key, Value interface{} -} - // The Unmarshaler interface may be implemented by types to customize their -// behavior when being unmarshaled from a YAML document. The UnmarshalYAML -// method receives a function that may be called to unmarshal the original -// YAML value into a field or variable. It is safe to call the unmarshal -// function parameter more than once if necessary. +// behavior when being unmarshaled from a YAML document. type Unmarshaler interface { + UnmarshalYAML(value *Node) error +} + +type obsoleteUnmarshaler interface { UnmarshalYAML(unmarshal func(interface{}) error) error } @@ -81,18 +89,10 @@ func Unmarshal(in []byte, out interface{}) (err error) { return unmarshal(in, out, false) } -// UnmarshalStrict is like Unmarshal except that any fields that are found -// in the data that do not have corresponding struct members, or mapping -// keys that are duplicates, will result in -// an error. -func UnmarshalStrict(in []byte, out interface{}) (err error) { - return unmarshal(in, out, true) -} - // A Decoder reads and decodes YAML values from an input stream. type Decoder struct { - strict bool - parser *parser + parser *parser + knownFields bool } // NewDecoder returns a new decoder that reads from r. @@ -105,10 +105,10 @@ func NewDecoder(r io.Reader) *Decoder { } } -// SetStrict sets whether strict decoding behaviour is enabled when -// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. -func (dec *Decoder) SetStrict(strict bool) { - dec.strict = strict +// KnownFields ensures that the keys in decoded mappings to +// exist as fields in the struct being decoded into. +func (dec *Decoder) KnownFields(enable bool) { + dec.knownFields = enable } // Decode reads the next YAML-encoded value from its input @@ -117,7 +117,8 @@ func (dec *Decoder) SetStrict(strict bool) { // See the documentation for Unmarshal for details about the // conversion of YAML into a Go value. func (dec *Decoder) Decode(v interface{}) (err error) { - d := newDecoder(dec.strict) + d := newDecoder() + d.knownFields = dec.knownFields defer handleErr(&err) node := dec.parser.parse() if node == nil { @@ -134,9 +135,27 @@ func (dec *Decoder) Decode(v interface{}) (err error) { return nil } +// Decode decodes the node and stores its data into the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (n *Node) Decode(v interface{}) (err error) { + d := newDecoder() + defer handleErr(&err) + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(n, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + func unmarshal(in []byte, out interface{}, strict bool) (err error) { defer handleErr(&err) - d := newDecoder(strict) + d := newDecoder() p := newParser(in) defer p.destroy() node := p.parse() @@ -175,7 +194,7 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) { // Zero valued structs will be omitted if all their public // fields are zero, unless they implement an IsZero // method (see the IsZeroer interface type), in which -// case the field will be included if that method returns true. +// case the field will be excluded if IsZero returns true. // // flow Marshal using a flow style (useful for structs, // sequences and maps). @@ -233,6 +252,32 @@ func (e *Encoder) Encode(v interface{}) (err error) { return nil } +// Encode encodes value v and stores its representation in n. +// +// See the documentation for Marshal for details about the +// conversion of Go values into YAML. +func (n *Node) Encode(v interface{}) (err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(v)) + e.finish() + p := newParser(e.out) + p.textless = true + defer p.destroy() + doc := p.parse() + *n = *doc.Content[0] + return nil +} + +// SetIndent changes the used indentation used when encoding. +func (e *Encoder) SetIndent(spaces int) { + if spaces < 0 { + panic("yaml: cannot indent to a negative number of spaces") + } + e.encoder.indent = spaces +} + // Close closes the encoder by writing any remaining data. // It does not write a stream terminating string "...". func (e *Encoder) Close() (err error) { @@ -275,6 +320,168 @@ func (e *TypeError) Error() string { return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) } +type Kind uint32 + +const ( + DocumentNode Kind = 1 << iota + SequenceNode + MappingNode + ScalarNode + AliasNode +) + +type Style uint32 + +const ( + TaggedStyle Style = 1 << iota + DoubleQuotedStyle + SingleQuotedStyle + LiteralStyle + FoldedStyle + FlowStyle +) + +// Node represents an element in the YAML document hierarchy. While documents +// are typically encoded and decoded into higher level types, such as structs +// and maps, Node is an intermediate representation that allows detailed +// control over the content being decoded or encoded. +// +// It's worth noting that although Node offers access into details such as +// line numbers, colums, and comments, the content when re-encoded will not +// have its original textual representation preserved. An effort is made to +// render the data plesantly, and to preserve comments near the data they +// describe, though. +// +// Values that make use of the Node type interact with the yaml package in the +// same way any other type would do, by encoding and decoding yaml data +// directly or indirectly into them. +// +// For example: +// +// var person struct { +// Name string +// Address yaml.Node +// } +// err := yaml.Unmarshal(data, &person) +// +// Or by itself: +// +// var person Node +// err := yaml.Unmarshal(data, &person) +// +type Node struct { + // Kind defines whether the node is a document, a mapping, a sequence, + // a scalar value, or an alias to another node. The specific data type of + // scalar nodes may be obtained via the ShortTag and LongTag methods. + Kind Kind + + // Style allows customizing the apperance of the node in the tree. + Style Style + + // Tag holds the YAML tag defining the data type for the value. + // When decoding, this field will always be set to the resolved tag, + // even when it wasn't explicitly provided in the YAML content. + // When encoding, if this field is unset the value type will be + // implied from the node properties, and if it is set, it will only + // be serialized into the representation if TaggedStyle is used or + // the implicit tag diverges from the provided one. + Tag string + + // Value holds the unescaped and unquoted represenation of the value. + Value string + + // Anchor holds the anchor name for this node, which allows aliases to point to it. + Anchor string + + // Alias holds the node that this alias points to. Only valid when Kind is AliasNode. + Alias *Node + + // Content holds contained nodes for documents, mappings, and sequences. + Content []*Node + + // HeadComment holds any comments in the lines preceding the node and + // not separated by an empty line. + HeadComment string + + // LineComment holds any comments at the end of the line where the node is in. + LineComment string + + // FootComment holds any comments following the node and before empty lines. + FootComment string + + // Line and Column hold the node position in the decoded YAML text. + // These fields are not respected when encoding the node. + Line int + Column int +} + +// IsZero returns whether the node has all of its fields unset. +func (n *Node) IsZero() bool { + return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil && + n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0 +} + + +// LongTag returns the long form of the tag that indicates the data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) LongTag() string { + return longTag(n.ShortTag()) +} + +// ShortTag returns the short form of the YAML tag that indicates data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) ShortTag() string { + if n.indicatedString() { + return strTag + } + if n.Tag == "" || n.Tag == "!" { + switch n.Kind { + case MappingNode: + return mapTag + case SequenceNode: + return seqTag + case AliasNode: + if n.Alias != nil { + return n.Alias.ShortTag() + } + case ScalarNode: + tag, _ := resolve("", n.Value) + return tag + case 0: + // Special case to make the zero value convenient. + if n.IsZero() { + return nullTag + } + } + return "" + } + return shortTag(n.Tag) +} + +func (n *Node) indicatedString() bool { + return n.Kind == ScalarNode && + (shortTag(n.Tag) == strTag || + (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0) +} + +// SetString is a convenience function that sets the node to a string value +// and defines its style in a pleasant way depending on its content. +func (n *Node) SetString(s string) { + n.Kind = ScalarNode + if utf8.ValidString(s) { + n.Value = s + n.Tag = strTag + } else { + n.Value = encodeBase64(s) + n.Tag = binaryTag + } + if strings.Contains(n.Value, "\n") { + n.Style = LiteralStyle + } +} + // -------------------------------------------------------------------------- // Maintain a mapping of keys to structure field indexes @@ -289,6 +496,10 @@ type structInfo struct { // InlineMap is the number of the field in the struct that // contains an ,inline map, or -1 if there's none. InlineMap int + + // InlineUnmarshalers holds indexes to inlined fields that + // contain unmarshaler values. + InlineUnmarshalers [][]int } type fieldInfo struct { @@ -306,6 +517,12 @@ type fieldInfo struct { var structMap = make(map[reflect.Type]*structInfo) var fieldMapMutex sync.RWMutex +var unmarshalerType reflect.Type + +func init() { + var v Unmarshaler + unmarshalerType = reflect.ValueOf(&v).Elem().Type() +} func getStructInfo(st reflect.Type) (*structInfo, error) { fieldMapMutex.RLock() @@ -319,6 +536,7 @@ func getStructInfo(st reflect.Type) (*structInfo, error) { fieldsMap := make(map[string]fieldInfo) fieldsList := make([]fieldInfo, 0, n) inlineMap := -1 + inlineUnmarshalers := [][]int(nil) for i := 0; i != n; i++ { field := st.Field(i) if field.PkgPath != "" && !field.Anonymous { @@ -347,7 +565,7 @@ func getStructInfo(st reflect.Type) (*structInfo, error) { case "inline": inline = true default: - return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st)) } } tag = fields[0] @@ -357,34 +575,47 @@ func getStructInfo(st reflect.Type) (*structInfo, error) { switch field.Type.Kind() { case reflect.Map: if inlineMap >= 0 { - return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + return nil, errors.New("multiple ,inline maps in struct " + st.String()) } if field.Type.Key() != reflect.TypeOf("") { - return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String()) } inlineMap = info.Num - case reflect.Struct: - sinfo, err := getStructInfo(field.Type) - if err != nil { - return nil, err + case reflect.Struct, reflect.Ptr: + ftype := field.Type + for ftype.Kind() == reflect.Ptr { + ftype = ftype.Elem() } - for _, finfo := range sinfo.FieldsList { - if _, found := fieldsMap[finfo.Key]; found { - msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() - return nil, errors.New(msg) + if ftype.Kind() != reflect.Struct { + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + if reflect.PtrTo(ftype).Implements(unmarshalerType) { + inlineUnmarshalers = append(inlineUnmarshalers, []int{i}) + } else { + sinfo, err := getStructInfo(ftype) + if err != nil { + return nil, err + } + for _, index := range sinfo.InlineUnmarshalers { + inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...)) } - if finfo.Inline == nil { - finfo.Inline = []int{i, finfo.Num} - } else { - finfo.Inline = append([]int{i}, finfo.Inline...) + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) } - finfo.Id = len(fieldsList) - fieldsMap[finfo.Key] = finfo - fieldsList = append(fieldsList, finfo) } default: - //return nil, errors.New("Option ,inline needs a struct value or map field") - return nil, errors.New("Option ,inline needs a struct value field") + return nil, errors.New("option ,inline may only be used on a struct or map field") } continue } @@ -396,7 +627,7 @@ func getStructInfo(st reflect.Type) (*structInfo, error) { } if _, found = fieldsMap[info.Key]; found { - msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + msg := "duplicated key '" + info.Key + "' in struct " + st.String() return nil, errors.New(msg) } @@ -406,9 +637,10 @@ func getStructInfo(st reflect.Type) (*structInfo, error) { } sinfo = &structInfo{ - FieldsMap: fieldsMap, - FieldsList: fieldsList, - InlineMap: inlineMap, + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + InlineUnmarshalers: inlineUnmarshalers, } fieldMapMutex.Lock() diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v3/yamlh.go similarity index 88% rename from vendor/gopkg.in/yaml.v2/yamlh.go rename to vendor/gopkg.in/yaml.v3/yamlh.go index f6a9c8e34..7c6d00770 100644 --- a/vendor/gopkg.in/yaml.v2/yamlh.go +++ b/vendor/gopkg.in/yaml.v3/yamlh.go @@ -1,3 +1,25 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + package yaml import ( @@ -73,13 +95,13 @@ type yaml_scalar_style_t yaml_style_t // Scalar styles. const ( // Let the emitter choose the style. - yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0 - yaml_PLAIN_SCALAR_STYLE // The plain scalar style. - yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. - yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. - yaml_LITERAL_SCALAR_STYLE // The literal scalar style. - yaml_FOLDED_SCALAR_STYLE // The folded scalar style. + yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. ) type yaml_sequence_style_t yaml_style_t @@ -238,6 +260,7 @@ const ( yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. yaml_MAPPING_START_EVENT // A MAPPING-START event. yaml_MAPPING_END_EVENT // A MAPPING-END event. + yaml_TAIL_COMMENT_EVENT ) var eventStrings = []string{ @@ -252,6 +275,7 @@ var eventStrings = []string{ yaml_SEQUENCE_END_EVENT: "sequence end", yaml_MAPPING_START_EVENT: "mapping start", yaml_MAPPING_END_EVENT: "mapping end", + yaml_TAIL_COMMENT_EVENT: "tail comment", } func (e yaml_event_type_t) String() string { @@ -279,6 +303,12 @@ type yaml_event_t struct { // The list of tag directives (for yaml_DOCUMENT_START_EVENT). tag_directives []yaml_tag_directive_t + // The comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). anchor []byte @@ -554,6 +584,8 @@ type yaml_parser_t struct { unread int // The number of unread characters in the buffer. + newlines int // The number of line breaks since last non-break/non-blank character + raw_buffer []byte // The raw buffer. raw_buffer_pos int // The current position of the buffer. @@ -562,6 +594,17 @@ type yaml_parser_t struct { offset int // The offset of the current position (in bytes). mark yaml_mark_t // The mark of the current position. + // Comments + + head_comment []byte // The current head comments + line_comment []byte // The current line comments + foot_comment []byte // The current foot comments + tail_comment []byte // Foot comment that happens at the end of a block. + stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc) + + comments []yaml_comment_t // The folded comments for all parsed tokens + comments_head int + // Scanner stuff stream_start_produced bool // Have we started to scan the input stream? @@ -595,6 +638,18 @@ type yaml_parser_t struct { document *yaml_document_t // The currently parsed document. } +type yaml_comment_t struct { + + scan_mark yaml_mark_t // Position where scanning for comments started + token_mark yaml_mark_t // Position after which tokens will be associated with this comment + start_mark yaml_mark_t // Position of '#' comment mark + end_mark yaml_mark_t // Position where comment terminated + + head []byte + line []byte + foot []byte +} + // Emitter Definitions // The prototype of a write handler. @@ -625,8 +680,10 @@ const ( yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. @@ -698,6 +755,9 @@ type yaml_emitter_t struct { indention bool // If the last character was an indentation character (' ', '-', '?', ':')? open_ended bool // If an explicit document end is required? + space_above bool // Is there's an empty line above? + foot_indent int // The indent used to write the foot comment above, or -1 if none. + // Anchor analysis. anchor_data struct { anchor []byte // The anchor value. @@ -721,6 +781,14 @@ type yaml_emitter_t struct { style yaml_scalar_style_t // The output style. } + // Comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + key_line_comment []byte + // Dumper stuff opened bool // If the stream was already opened? diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v3/yamlprivateh.go similarity index 78% rename from vendor/gopkg.in/yaml.v2/yamlprivateh.go rename to vendor/gopkg.in/yaml.v3/yamlprivateh.go index 8110ce3c3..e88f9c54a 100644 --- a/vendor/gopkg.in/yaml.v2/yamlprivateh.go +++ b/vendor/gopkg.in/yaml.v3/yamlprivateh.go @@ -1,3 +1,25 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + package yaml const ( @@ -114,8 +136,9 @@ func is_crlf(b []byte, i int) bool { // Check if the character is a line break or NUL. func is_breakz(b []byte, i int) bool { //return is_break(b, i) || is_z(b, i) - return ( // is_break: - b[i] == '\r' || // CR (#xD) + return ( + // is_break: + b[i] == '\r' || // CR (#xD) b[i] == '\n' || // LF (#xA) b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) @@ -127,8 +150,9 @@ func is_breakz(b []byte, i int) bool { // Check if the character is a line break, space, or NUL. func is_spacez(b []byte, i int) bool { //return is_space(b, i) || is_breakz(b, i) - return ( // is_space: - b[i] == ' ' || + return ( + // is_space: + b[i] == ' ' || // is_breakz: b[i] == '\r' || // CR (#xD) b[i] == '\n' || // LF (#xA) @@ -141,8 +165,9 @@ func is_spacez(b []byte, i int) bool { // Check if the character is a line break, space, tab, or NUL. func is_blankz(b []byte, i int) bool { //return is_blank(b, i) || is_breakz(b, i) - return ( // is_blank: - b[i] == ' ' || b[i] == '\t' || + return ( + // is_blank: + b[i] == ' ' || b[i] == '\t' || // is_breakz: b[i] == '\r' || // CR (#xD) b[i] == '\n' || // LF (#xA) diff --git a/vendor/modules.txt b/vendor/modules.txt index adec96f25..76b3c6168 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,91 +1,160 @@ -# cloud.google.com/go v0.43.0 -## explicit; go 1.9 +# cloud.google.com/go v0.115.1 +## explicit; go 1.20 +cloud.google.com/go +cloud.google.com/go/internal +cloud.google.com/go/internal/optional +cloud.google.com/go/internal/trace +cloud.google.com/go/internal/version +# cloud.google.com/go/auth v0.9.7 +## explicit; go 1.21 +cloud.google.com/go/auth +cloud.google.com/go/auth/credentials +cloud.google.com/go/auth/credentials/internal/externalaccount +cloud.google.com/go/auth/credentials/internal/externalaccountuser +cloud.google.com/go/auth/credentials/internal/gdch +cloud.google.com/go/auth/credentials/internal/impersonate +cloud.google.com/go/auth/credentials/internal/stsexchange +cloud.google.com/go/auth/grpctransport +cloud.google.com/go/auth/httptransport +cloud.google.com/go/auth/internal +cloud.google.com/go/auth/internal/compute +cloud.google.com/go/auth/internal/credsfile +cloud.google.com/go/auth/internal/jwt +cloud.google.com/go/auth/internal/transport +cloud.google.com/go/auth/internal/transport/cert +# cloud.google.com/go/auth/oauth2adapt v0.2.4 +## explicit; go 1.20 +cloud.google.com/go/auth/oauth2adapt +# cloud.google.com/go/compute/metadata v0.5.2 +## explicit; go 1.21 cloud.google.com/go/compute/metadata -# github.com/Azure/azure-sdk-for-go v32.6.0+incompatible -## explicit -github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault -github.com/Azure/azure-sdk-for-go/version -# github.com/Azure/go-autorest/autorest v0.9.2 -## explicit; go 1.12 -github.com/Azure/go-autorest/autorest -github.com/Azure/go-autorest/autorest/azure -# github.com/Azure/go-autorest/autorest/adal v0.8.0 -## explicit; go 1.12 -github.com/Azure/go-autorest/autorest/adal -# github.com/Azure/go-autorest/autorest/azure/auth v0.4.0 -## explicit; go 1.12 -github.com/Azure/go-autorest/autorest/azure/auth -# github.com/Azure/go-autorest/autorest/azure/cli v0.3.0 -## explicit; go 1.12 -github.com/Azure/go-autorest/autorest/azure/cli -# github.com/Azure/go-autorest/autorest/date v0.2.0 -## explicit; go 1.12 -github.com/Azure/go-autorest/autorest/date -# github.com/Azure/go-autorest/autorest/to v0.3.0 -## explicit; go 1.12 -github.com/Azure/go-autorest/autorest/to -# github.com/Azure/go-autorest/autorest/validation v0.2.0 -## explicit; go 1.12 -github.com/Azure/go-autorest/autorest/validation -# github.com/Azure/go-autorest/logger v0.1.0 -## explicit; go 1.12 -github.com/Azure/go-autorest/logger -# github.com/Azure/go-autorest/tracing v0.5.0 -## explicit; go 1.12 -github.com/Azure/go-autorest/tracing +# cloud.google.com/go/iam v1.2.1 +## explicit; go 1.21 +cloud.google.com/go/iam +cloud.google.com/go/iam/apiv1/iampb +# cloud.google.com/go/kms v1.20.0 +## explicit; go 1.21 +cloud.google.com/go/kms/apiv1 +cloud.google.com/go/kms/apiv1/kmspb +cloud.google.com/go/kms/internal +# cloud.google.com/go/longrunning v0.6.1 +## explicit; go 1.21 +cloud.google.com/go/longrunning +cloud.google.com/go/longrunning/autogen +cloud.google.com/go/longrunning/autogen/longrunningpb +# cloud.google.com/go/storage v1.43.0 +## explicit; go 1.20 +cloud.google.com/go/storage +cloud.google.com/go/storage/internal +cloud.google.com/go/storage/internal/apiv2 +cloud.google.com/go/storage/internal/apiv2/storagepb +# filippo.io/age v1.2.0 +## explicit; go 1.19 +filippo.io/age +filippo.io/age/armor +filippo.io/age/internal/bech32 +filippo.io/age/internal/format +filippo.io/age/internal/stream +# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/azcore +github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource +github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy +github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime +github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared +github.com/Azure/azure-sdk-for-go/sdk/azcore/log +github.com/Azure/azure-sdk-for-go/sdk/azcore/policy +github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime +github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming +github.com/Azure/azure-sdk-for-go/sdk/azcore/to +github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing +# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/azidentity +github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal +# github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/internal/diag +github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo +github.com/Azure/azure-sdk-for-go/sdk/internal/exported +github.com/Azure/azure-sdk-for-go/sdk/internal/log +github.com/Azure/azure-sdk-for-go/sdk/internal/poller +github.com/Azure/azure-sdk-for-go/sdk/internal/temporal +github.com/Azure/azure-sdk-for-go/sdk/internal/uuid +# github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.1.0 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys +# github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.1 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal +# github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 +## explicit; go 1.18 +github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache +github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential +github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared +github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version +github.com/AzureAD/microsoft-authentication-library-for-go/apps/public # github.com/DataDog/datadog-go v3.7.2+incompatible ## explicit github.com/DataDog/datadog-go/statsd +# github.com/ProtonMail/go-crypto v1.1.0-beta.0-proton +## explicit; go 1.17 +github.com/ProtonMail/go-crypto/bitcurves +github.com/ProtonMail/go-crypto/brainpool +github.com/ProtonMail/go-crypto/eax +github.com/ProtonMail/go-crypto/internal/byteutil +github.com/ProtonMail/go-crypto/ocb +github.com/ProtonMail/go-crypto/openpgp +github.com/ProtonMail/go-crypto/openpgp/aes/keywrap +github.com/ProtonMail/go-crypto/openpgp/armor +github.com/ProtonMail/go-crypto/openpgp/ecdh +github.com/ProtonMail/go-crypto/openpgp/ecdsa +github.com/ProtonMail/go-crypto/openpgp/ed25519 +github.com/ProtonMail/go-crypto/openpgp/ed448 +github.com/ProtonMail/go-crypto/openpgp/eddsa +github.com/ProtonMail/go-crypto/openpgp/elgamal +github.com/ProtonMail/go-crypto/openpgp/errors +github.com/ProtonMail/go-crypto/openpgp/internal/algorithm +github.com/ProtonMail/go-crypto/openpgp/internal/ecc +github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519 +github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field +github.com/ProtonMail/go-crypto/openpgp/internal/encoding +github.com/ProtonMail/go-crypto/openpgp/packet +github.com/ProtonMail/go-crypto/openpgp/s2k +github.com/ProtonMail/go-crypto/openpgp/symmetric +github.com/ProtonMail/go-crypto/openpgp/x25519 +github.com/ProtonMail/go-crypto/openpgp/x448 # github.com/aws/aws-lambda-go v1.47.0 ## explicit; go 1.18 github.com/aws/aws-lambda-go/lambda github.com/aws/aws-lambda-go/lambda/handlertrace github.com/aws/aws-lambda-go/lambda/messages github.com/aws/aws-lambda-go/lambdacontext -# github.com/aws/aws-sdk-go v1.42.15 -## explicit; go 1.11 -github.com/aws/aws-sdk-go/aws -github.com/aws/aws-sdk-go/aws/awserr -github.com/aws/aws-sdk-go/aws/awsutil -github.com/aws/aws-sdk-go/aws/client -github.com/aws/aws-sdk-go/aws/client/metadata -github.com/aws/aws-sdk-go/aws/corehandlers -github.com/aws/aws-sdk-go/aws/credentials -github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds -github.com/aws/aws-sdk-go/aws/credentials/endpointcreds -github.com/aws/aws-sdk-go/aws/credentials/processcreds -github.com/aws/aws-sdk-go/aws/credentials/ssocreds -github.com/aws/aws-sdk-go/aws/credentials/stscreds -github.com/aws/aws-sdk-go/aws/csm -github.com/aws/aws-sdk-go/aws/defaults -github.com/aws/aws-sdk-go/aws/ec2metadata -github.com/aws/aws-sdk-go/aws/endpoints -github.com/aws/aws-sdk-go/aws/request -github.com/aws/aws-sdk-go/aws/session -github.com/aws/aws-sdk-go/aws/signer/v4 -github.com/aws/aws-sdk-go/internal/context -github.com/aws/aws-sdk-go/internal/ini -github.com/aws/aws-sdk-go/internal/sdkio -github.com/aws/aws-sdk-go/internal/sdkmath -github.com/aws/aws-sdk-go/internal/sdkrand -github.com/aws/aws-sdk-go/internal/sdkuri -github.com/aws/aws-sdk-go/internal/shareddefaults -github.com/aws/aws-sdk-go/internal/strings -github.com/aws/aws-sdk-go/internal/sync/singleflight -github.com/aws/aws-sdk-go/private/protocol -github.com/aws/aws-sdk-go/private/protocol/json/jsonutil -github.com/aws/aws-sdk-go/private/protocol/jsonrpc -github.com/aws/aws-sdk-go/private/protocol/query -github.com/aws/aws-sdk-go/private/protocol/query/queryutil -github.com/aws/aws-sdk-go/private/protocol/rest -github.com/aws/aws-sdk-go/private/protocol/restjson -github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil -github.com/aws/aws-sdk-go/service/kms -github.com/aws/aws-sdk-go/service/kms/kmsiface -github.com/aws/aws-sdk-go/service/sso -github.com/aws/aws-sdk-go/service/sso/ssoiface -github.com/aws/aws-sdk-go/service/sts -github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/aws/aws-sdk-go-v2 v1.32.3 ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/aws @@ -165,6 +234,11 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url github.com/aws/aws-sdk-go-v2/service/internal/s3shared github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config +# github.com/aws/aws-sdk-go-v2/service/kms v1.36.3 +## explicit; go 1.21 +github.com/aws/aws-sdk-go-v2/service/kms +github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints +github.com/aws/aws-sdk-go-v2/service/kms/types # github.com/aws/aws-sdk-go-v2/service/s3 v1.66.2 ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/service/s3 @@ -216,63 +290,202 @@ github.com/aws/smithy-go/tracing github.com/aws/smithy-go/transport/http github.com/aws/smithy-go/transport/http/internal/io github.com/aws/smithy-go/waiter -# github.com/dgrijalva/jwt-go v3.2.0+incompatible -## explicit -github.com/dgrijalva/jwt-go -# github.com/dimchansky/utfbom v1.1.0 -## explicit -github.com/dimchansky/utfbom -# github.com/fatih/color v1.7.0 +# github.com/blang/semver v3.5.1+incompatible ## explicit +github.com/blang/semver +# github.com/cenkalti/backoff/v4 v4.3.0 +## explicit; go 1.18 +github.com/cenkalti/backoff/v4 +# github.com/cloudflare/circl v1.4.0 +## explicit; go 1.21 +github.com/cloudflare/circl/dh/x25519 +github.com/cloudflare/circl/dh/x448 +github.com/cloudflare/circl/ecc/goldilocks +github.com/cloudflare/circl/internal/conv +github.com/cloudflare/circl/internal/sha3 +github.com/cloudflare/circl/math +github.com/cloudflare/circl/math/fp25519 +github.com/cloudflare/circl/math/fp448 +github.com/cloudflare/circl/math/mlsbset +github.com/cloudflare/circl/sign +github.com/cloudflare/circl/sign/ed25519 +github.com/cloudflare/circl/sign/ed448 +# github.com/cpuguy83/go-md2man/v2 v2.0.5 +## explicit; go 1.11 +github.com/cpuguy83/go-md2man/v2/md2man +# github.com/fatih/color v1.17.0 +## explicit; go 1.17 github.com/fatih/color -# github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b +# github.com/felixge/httpsnoop v1.0.4 +## explicit; go 1.13 +github.com/felixge/httpsnoop +# github.com/getsops/gopgagent v0.0.0-20240527072608-0c14999532fe ## explicit +github.com/getsops/gopgagent +# github.com/getsops/sops/v3 v3.9.1 +## explicit; go 1.22 +github.com/getsops/sops/v3 +github.com/getsops/sops/v3/aes +github.com/getsops/sops/v3/age +github.com/getsops/sops/v3/audit +github.com/getsops/sops/v3/azkv +github.com/getsops/sops/v3/cmd/sops/codes +github.com/getsops/sops/v3/cmd/sops/common +github.com/getsops/sops/v3/cmd/sops/formats +github.com/getsops/sops/v3/config +github.com/getsops/sops/v3/decrypt +github.com/getsops/sops/v3/gcpkms +github.com/getsops/sops/v3/hcvault +github.com/getsops/sops/v3/keys +github.com/getsops/sops/v3/keyservice +github.com/getsops/sops/v3/kms +github.com/getsops/sops/v3/logging +github.com/getsops/sops/v3/pgp +github.com/getsops/sops/v3/publish +github.com/getsops/sops/v3/shamir +github.com/getsops/sops/v3/stores +github.com/getsops/sops/v3/stores/dotenv +github.com/getsops/sops/v3/stores/ini +github.com/getsops/sops/v3/stores/json +github.com/getsops/sops/v3/stores/yaml +github.com/getsops/sops/v3/version +# github.com/go-jose/go-jose/v4 v4.0.4 +## explicit; go 1.21 +github.com/go-jose/go-jose/v4 +github.com/go-jose/go-jose/v4/cipher +github.com/go-jose/go-jose/v4/json +github.com/go-jose/go-jose/v4/jwt +# github.com/go-logr/logr v1.4.2 +## explicit; go 1.18 +github.com/go-logr/logr +github.com/go-logr/logr/funcr +# github.com/go-logr/stdr v1.2.2 +## explicit; go 1.16 +github.com/go-logr/stdr +# github.com/golang-jwt/jwt/v5 v5.2.1 +## explicit; go 1.18 +github.com/golang-jwt/jwt/v5 +# github.com/golang/glog v1.2.2 +## explicit; go 1.19 github.com/golang/glog -# github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 +github.com/golang/glog/internal/logsink +github.com/golang/glog/internal/stackdump +# github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da ## explicit github.com/golang/groupcache/lru # github.com/golang/mock v1.6.0 ## explicit; go 1.11 github.com/golang/mock/gomock -# github.com/golang/protobuf v1.3.2 -## explicit +# github.com/golang/protobuf v1.5.4 +## explicit; go 1.17 github.com/golang/protobuf/proto -github.com/golang/protobuf/ptypes -github.com/golang/protobuf/ptypes/any -github.com/golang/protobuf/ptypes/duration -github.com/golang/protobuf/ptypes/timestamp +# github.com/google/go-cmp v0.6.0 +## explicit; go 1.13 +github.com/google/go-cmp/cmp +github.com/google/go-cmp/cmp/internal/diff +github.com/google/go-cmp/cmp/internal/flags +github.com/google/go-cmp/cmp/internal/function +github.com/google/go-cmp/cmp/internal/value +# github.com/google/s2a-go v0.1.8 +## explicit; go 1.20 +github.com/google/s2a-go +github.com/google/s2a-go/fallback +github.com/google/s2a-go/internal/authinfo +github.com/google/s2a-go/internal/handshaker +github.com/google/s2a-go/internal/handshaker/service +github.com/google/s2a-go/internal/proto/common_go_proto +github.com/google/s2a-go/internal/proto/s2a_context_go_proto +github.com/google/s2a-go/internal/proto/s2a_go_proto +github.com/google/s2a-go/internal/proto/v2/common_go_proto +github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto +github.com/google/s2a-go/internal/proto/v2/s2a_go_proto +github.com/google/s2a-go/internal/record +github.com/google/s2a-go/internal/record/internal/aeadcrypter +github.com/google/s2a-go/internal/record/internal/halfconn +github.com/google/s2a-go/internal/tokenmanager +github.com/google/s2a-go/internal/v2 +github.com/google/s2a-go/internal/v2/certverifier +github.com/google/s2a-go/internal/v2/remotesigner +github.com/google/s2a-go/internal/v2/tlsconfigstore +github.com/google/s2a-go/retry +github.com/google/s2a-go/stream # github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid -# github.com/googleapis/gax-go/v2 v2.0.5 -## explicit +# github.com/googleapis/enterprise-certificate-proxy v0.3.4 +## explicit; go 1.19 +github.com/googleapis/enterprise-certificate-proxy/client +github.com/googleapis/enterprise-certificate-proxy/client/util +# github.com/googleapis/gax-go/v2 v2.13.0 +## explicit; go 1.20 github.com/googleapis/gax-go/v2 +github.com/googleapis/gax-go/v2/apierror +github.com/googleapis/gax-go/v2/apierror/internal/proto +github.com/googleapis/gax-go/v2/callctx +github.com/googleapis/gax-go/v2/internal +github.com/googleapis/gax-go/v2/iterator # github.com/gorilla/mux v1.8.0 ## explicit; go 1.12 github.com/gorilla/mux # github.com/goware/prefixer v0.0.0-20160118172347-395022866408 ## explicit github.com/goware/prefixer +# github.com/hashicorp/errwrap v1.1.0 +## explicit +github.com/hashicorp/errwrap +# github.com/hashicorp/go-cleanhttp v0.5.2 +## explicit; go 1.13 +github.com/hashicorp/go-cleanhttp +# github.com/hashicorp/go-multierror v1.1.1 +## explicit; go 1.13 +github.com/hashicorp/go-multierror +# github.com/hashicorp/go-retryablehttp v0.7.7 +## explicit; go 1.19 +github.com/hashicorp/go-retryablehttp +# github.com/hashicorp/go-rootcerts v1.0.2 +## explicit; go 1.12 +github.com/hashicorp/go-rootcerts +# github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8 +## explicit; go 1.20 +github.com/hashicorp/go-secure-stdlib/parseutil +# github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 +## explicit; go 1.16 +github.com/hashicorp/go-secure-stdlib/strutil +# github.com/hashicorp/go-sockaddr v1.0.7 +## explicit; go 1.19 +github.com/hashicorp/go-sockaddr # github.com/hashicorp/golang-lru v0.5.4 ## explicit; go 1.12 github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru -# github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c +# github.com/hashicorp/hcl v1.0.0 ## explicit -github.com/howeyc/gopass -# github.com/jmespath/go-jmespath v0.4.0 -## explicit; go 1.14 -github.com/jmespath/go-jmespath +github.com/hashicorp/hcl +github.com/hashicorp/hcl/hcl/ast +github.com/hashicorp/hcl/hcl/parser +github.com/hashicorp/hcl/hcl/scanner +github.com/hashicorp/hcl/hcl/strconv +github.com/hashicorp/hcl/hcl/token +github.com/hashicorp/hcl/json/parser +github.com/hashicorp/hcl/json/scanner +github.com/hashicorp/hcl/json/token +# github.com/hashicorp/vault/api v1.15.0 +## explicit; go 1.21 +github.com/hashicorp/vault/api +# github.com/kylelemons/godebug v1.1.0 +## explicit; go 1.11 +github.com/kylelemons/godebug/diff +github.com/kylelemons/godebug/pretty # github.com/lib/pq v1.10.9 ## explicit; go 1.13 github.com/lib/pq github.com/lib/pq/oid github.com/lib/pq/scram -# github.com/mattn/go-colorable v0.1.4 -## explicit +# github.com/mattn/go-colorable v0.1.13 +## explicit; go 1.15 github.com/mattn/go-colorable -# github.com/mattn/go-isatty v0.0.10 -## explicit; go 1.14 +# github.com/mattn/go-isatty v0.0.20 +## explicit; go 1.15 github.com/mattn/go-isatty # github.com/miekg/pkcs11 v1.0.3 ## explicit; go 1.12 @@ -280,21 +493,36 @@ github.com/miekg/pkcs11 # github.com/mitchellh/go-homedir v1.1.0 ## explicit github.com/mitchellh/go-homedir -# github.com/mitchellh/go-wordwrap v1.0.0 -## explicit +# github.com/mitchellh/go-wordwrap v1.0.1 +## explicit; go 1.14 github.com/mitchellh/go-wordwrap +# github.com/mitchellh/mapstructure v1.5.0 +## explicit; go 1.14 +github.com/mitchellh/mapstructure # github.com/mozilla-services/yaml v0.0.0-20191106225358-5c216288813c ## explicit github.com/mozilla-services/yaml +# github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c +## explicit; go 1.14 +github.com/pkg/browser # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors +# github.com/russross/blackfriday/v2 v2.1.0 +## explicit +github.com/russross/blackfriday/v2 +# github.com/ryanuber/go-glob v1.0.0 +## explicit +github.com/ryanuber/go-glob # github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus # github.com/ugorji/go v0.0.0-20180112141927-9831f2c3ac10 ## explicit github.com/ugorji/go/codec +# github.com/urfave/cli v1.22.15 +## explicit; go 1.11 +github.com/urfave/cli # github.com/youtube/vitess v2.1.1+incompatible ## explicit github.com/youtube/vitess/go/acl @@ -304,9 +532,6 @@ github.com/youtube/vitess/go/sync2 # go.mozilla.org/cose v0.0.0-20200221144611-2ea72a6b3de3 ## explicit; go 1.12 go.mozilla.org/cose -# go.mozilla.org/gopgagent v0.0.0-20170926210634-4d7ea76ff71a -## explicit -go.mozilla.org/gopgagent # go.mozilla.org/hawk v0.0.0-20190327210923-a483e4a7047e ## explicit go.mozilla.org/hawk @@ -319,31 +544,14 @@ go.mozilla.org/mozlogrus # go.mozilla.org/pkcs7 v0.9.0 ## explicit; go 1.11 go.mozilla.org/pkcs7 -# go.mozilla.org/sops v0.0.0-20190912205235-14a22d7a7060 -## explicit; go 1.12 -go.mozilla.org/sops -go.mozilla.org/sops/aes -go.mozilla.org/sops/audit -go.mozilla.org/sops/azkv -go.mozilla.org/sops/decrypt -go.mozilla.org/sops/gcpkms -go.mozilla.org/sops/keys -go.mozilla.org/sops/keyservice -go.mozilla.org/sops/kms -go.mozilla.org/sops/logging -go.mozilla.org/sops/pgp -go.mozilla.org/sops/shamir -go.mozilla.org/sops/stores -go.mozilla.org/sops/stores/dotenv -go.mozilla.org/sops/stores/json -go.mozilla.org/sops/stores/yaml -# go.opencensus.io v0.22.1 +# go.opencensus.io v0.24.0 ## explicit; go 1.13 go.opencensus.io go.opencensus.io/internal go.opencensus.io/internal/tagencoding go.opencensus.io/metric/metricdata go.opencensus.io/metric/metricproducer +go.opencensus.io/plugin/ocgrpc go.opencensus.io/plugin/ochttp go.opencensus.io/plugin/ochttp/propagation/b3 go.opencensus.io/resource @@ -355,113 +563,251 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate +# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0 +## explicit; go 1.22 +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 +## explicit; go 1.22 +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil +# go.opentelemetry.io/otel v1.30.0 +## explicit; go 1.22 +go.opentelemetry.io/otel +go.opentelemetry.io/otel/attribute +go.opentelemetry.io/otel/baggage +go.opentelemetry.io/otel/codes +go.opentelemetry.io/otel/internal +go.opentelemetry.io/otel/internal/attribute +go.opentelemetry.io/otel/internal/baggage +go.opentelemetry.io/otel/internal/global +go.opentelemetry.io/otel/propagation +go.opentelemetry.io/otel/semconv/v1.17.0 +go.opentelemetry.io/otel/semconv/v1.20.0 +go.opentelemetry.io/otel/semconv/v1.26.0 +# go.opentelemetry.io/otel/metric v1.30.0 +## explicit; go 1.22 +go.opentelemetry.io/otel/metric +go.opentelemetry.io/otel/metric/embedded +go.opentelemetry.io/otel/metric/noop +# go.opentelemetry.io/otel/trace v1.30.0 +## explicit; go 1.22 +go.opentelemetry.io/otel/trace +go.opentelemetry.io/otel/trace/embedded # go.uber.org/mock v0.5.0 ## explicit; go 1.22 go.uber.org/mock/gomock -# golang.org/x/crypto v0.21.0 -## explicit; go 1.18 +# golang.org/x/crypto v0.27.0 +## explicit; go 1.20 +golang.org/x/crypto/argon2 +golang.org/x/crypto/blake2b golang.org/x/crypto/cast5 -golang.org/x/crypto/openpgp -golang.org/x/crypto/openpgp/armor -golang.org/x/crypto/openpgp/elgamal -golang.org/x/crypto/openpgp/errors -golang.org/x/crypto/openpgp/packet -golang.org/x/crypto/openpgp/s2k +golang.org/x/crypto/chacha20 +golang.org/x/crypto/chacha20poly1305 +golang.org/x/crypto/cryptobyte +golang.org/x/crypto/cryptobyte/asn1 +golang.org/x/crypto/curve25519 +golang.org/x/crypto/hkdf +golang.org/x/crypto/internal/alias +golang.org/x/crypto/internal/poly1305 +golang.org/x/crypto/pbkdf2 golang.org/x/crypto/pkcs12 golang.org/x/crypto/pkcs12/internal/rc2 -golang.org/x/crypto/ssh/terminal -# golang.org/x/net v0.23.0 +golang.org/x/crypto/poly1305 +golang.org/x/crypto/scrypt +golang.org/x/crypto/sha3 +# golang.org/x/net v0.29.0 ## explicit; go 1.18 golang.org/x/net/context -golang.org/x/net/context/ctxhttp golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 -## explicit; go 1.11 +# golang.org/x/oauth2 v0.23.0 +## explicit; go 1.18 golang.org/x/oauth2 +golang.org/x/oauth2/authhandler golang.org/x/oauth2/google +golang.org/x/oauth2/google/externalaccount +golang.org/x/oauth2/google/internal/externalaccountauthorizeduser +golang.org/x/oauth2/google/internal/impersonate +golang.org/x/oauth2/google/internal/stsexchange golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sys v0.24.0 +# golang.org/x/sync v0.8.0 +## explicit; go 1.18 +golang.org/x/sync/semaphore +# golang.org/x/sys v0.25.0 ## explicit; go 1.18 +golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.18.0 +golang.org/x/sys/windows/registry +# golang.org/x/term v0.24.0 ## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.14.0 +# golang.org/x/text v0.18.0 ## explicit; go 1.18 golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# google.golang.org/api v0.11.0 -## explicit; go 1.11 -google.golang.org/api/cloudkms/v1 +# golang.org/x/time v0.6.0 +## explicit; go 1.18 +golang.org/x/time/rate +# google.golang.org/api v0.199.0 +## explicit; go 1.21 google.golang.org/api/googleapi -google.golang.org/api/googleapi/internal/uritemplates google.golang.org/api/googleapi/transport +google.golang.org/api/iamcredentials/v1 google.golang.org/api/internal +google.golang.org/api/internal/cert google.golang.org/api/internal/gensupport +google.golang.org/api/internal/impersonate +google.golang.org/api/internal/third_party/uritemplates +google.golang.org/api/iterator google.golang.org/api/option +google.golang.org/api/option/internaloption +google.golang.org/api/storage/v1 +google.golang.org/api/transport +google.golang.org/api/transport/grpc google.golang.org/api/transport/http google.golang.org/api/transport/http/internal/propagation -# google.golang.org/appengine v1.6.1 -## explicit -google.golang.org/appengine -google.golang.org/appengine/internal -google.golang.org/appengine/internal/app_identity -google.golang.org/appengine/internal/base -google.golang.org/appengine/internal/datastore -google.golang.org/appengine/internal/log -google.golang.org/appengine/internal/modules -google.golang.org/appengine/internal/remote_api -google.golang.org/appengine/internal/urlfetch -google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610 -## explicit +# google.golang.org/genproto v0.0.0-20240930140551-af27646dc61f +## explicit; go 1.21 +google.golang.org/genproto/googleapis/cloud/location +google.golang.org/genproto/googleapis/type/date +google.golang.org/genproto/googleapis/type/expr +# google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f +## explicit; go 1.21 +google.golang.org/genproto/googleapis/api +google.golang.org/genproto/googleapis/api/annotations +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240930140551-af27646dc61f +## explicit; go 1.21 +google.golang.org/genproto/googleapis/rpc/code +google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.24.0 -## explicit; go 1.11 +# google.golang.org/grpc v1.67.1 +## explicit; go 1.21 google.golang.org/grpc +google.golang.org/grpc/attributes +google.golang.org/grpc/backoff google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base +google.golang.org/grpc/balancer/grpclb +google.golang.org/grpc/balancer/grpclb/grpc_lb_v1 +google.golang.org/grpc/balancer/grpclb/state +google.golang.org/grpc/balancer/pickfirst google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 +google.golang.org/grpc/channelz google.golang.org/grpc/codes google.golang.org/grpc/connectivity google.golang.org/grpc/credentials -google.golang.org/grpc/credentials/internal +google.golang.org/grpc/credentials/alts +google.golang.org/grpc/credentials/alts/internal +google.golang.org/grpc/credentials/alts/internal/authinfo +google.golang.org/grpc/credentials/alts/internal/conn +google.golang.org/grpc/credentials/alts/internal/handshaker +google.golang.org/grpc/credentials/alts/internal/handshaker/service +google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp +google.golang.org/grpc/credentials/google +google.golang.org/grpc/credentials/insecure +google.golang.org/grpc/credentials/oauth google.golang.org/grpc/encoding google.golang.org/grpc/encoding/proto +google.golang.org/grpc/experimental/stats google.golang.org/grpc/grpclog +google.golang.org/grpc/grpclog/internal google.golang.org/grpc/internal google.golang.org/grpc/internal/backoff +google.golang.org/grpc/internal/balancer/gracefulswitch google.golang.org/grpc/internal/balancerload google.golang.org/grpc/internal/binarylog +google.golang.org/grpc/internal/buffer google.golang.org/grpc/internal/channelz +google.golang.org/grpc/internal/credentials google.golang.org/grpc/internal/envconfig -google.golang.org/grpc/internal/grpcrand +google.golang.org/grpc/internal/googlecloud +google.golang.org/grpc/internal/grpclog google.golang.org/grpc/internal/grpcsync +google.golang.org/grpc/internal/grpcutil +google.golang.org/grpc/internal/idle +google.golang.org/grpc/internal/metadata +google.golang.org/grpc/internal/pretty +google.golang.org/grpc/internal/resolver +google.golang.org/grpc/internal/resolver/dns +google.golang.org/grpc/internal/resolver/dns/internal +google.golang.org/grpc/internal/resolver/passthrough +google.golang.org/grpc/internal/resolver/unix +google.golang.org/grpc/internal/serviceconfig +google.golang.org/grpc/internal/stats +google.golang.org/grpc/internal/status google.golang.org/grpc/internal/syscall google.golang.org/grpc/internal/transport +google.golang.org/grpc/internal/transport/networktype +google.golang.org/grpc/internal/xds google.golang.org/grpc/keepalive +google.golang.org/grpc/mem google.golang.org/grpc/metadata -google.golang.org/grpc/naming google.golang.org/grpc/peer google.golang.org/grpc/resolver google.golang.org/grpc/resolver/dns -google.golang.org/grpc/resolver/passthrough +google.golang.org/grpc/resolver/manual google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# gopkg.in/yaml.v2 v2.2.8 +# google.golang.org/protobuf v1.34.2 +## explicit; go 1.20 +google.golang.org/protobuf/encoding/protojson +google.golang.org/protobuf/encoding/prototext +google.golang.org/protobuf/encoding/protowire +google.golang.org/protobuf/internal/descfmt +google.golang.org/protobuf/internal/descopts +google.golang.org/protobuf/internal/detrand +google.golang.org/protobuf/internal/editiondefaults +google.golang.org/protobuf/internal/editionssupport +google.golang.org/protobuf/internal/encoding/defval +google.golang.org/protobuf/internal/encoding/json +google.golang.org/protobuf/internal/encoding/messageset +google.golang.org/protobuf/internal/encoding/tag +google.golang.org/protobuf/internal/encoding/text +google.golang.org/protobuf/internal/errors +google.golang.org/protobuf/internal/filedesc +google.golang.org/protobuf/internal/filetype +google.golang.org/protobuf/internal/flags +google.golang.org/protobuf/internal/genid +google.golang.org/protobuf/internal/impl +google.golang.org/protobuf/internal/order +google.golang.org/protobuf/internal/pragma +google.golang.org/protobuf/internal/set +google.golang.org/protobuf/internal/strs +google.golang.org/protobuf/internal/version +google.golang.org/protobuf/proto +google.golang.org/protobuf/protoadapt +google.golang.org/protobuf/reflect/protodesc +google.golang.org/protobuf/reflect/protoreflect +google.golang.org/protobuf/reflect/protoregistry +google.golang.org/protobuf/runtime/protoiface +google.golang.org/protobuf/runtime/protoimpl +google.golang.org/protobuf/types/descriptorpb +google.golang.org/protobuf/types/gofeaturespb +google.golang.org/protobuf/types/known/anypb +google.golang.org/protobuf/types/known/durationpb +google.golang.org/protobuf/types/known/emptypb +google.golang.org/protobuf/types/known/fieldmaskpb +google.golang.org/protobuf/types/known/timestamppb +google.golang.org/protobuf/types/known/wrapperspb +# gopkg.in/ini.v1 v1.67.0 +## explicit +gopkg.in/ini.v1 +# gopkg.in/yaml.v3 v3.0.1 ## explicit -gopkg.in/yaml.v2 +gopkg.in/yaml.v3